Browse Source

Merge remote-tracking branch 'upstream/develop' into moreTests

cl-refactor
CJentzsch 10 years ago
parent
commit
f0057144da
  1. 13
      CMakeLists.txt
  2. 2
      abi/CMakeLists.txt
  3. 4
      alethzero/MainWin.cpp
  4. 7
      alethzero/NatspecHandler.h
  5. 7
      cmake/EthCompilerSettings.cmake
  6. 6
      cmake/EthDependencies.cmake
  7. 49
      cmake/FindRocksDB.cmake
  8. 13
      eth/main.cpp
  9. 11
      ethminer/MinerAux.h
  10. 2
      ethvm/CMakeLists.txt
  11. 2
      exp/CMakeLists.txt
  12. 2
      libdevcore/CMakeLists.txt
  13. 2
      libdevcore/Common.cpp
  14. 21
      libdevcore/CommonIO.cpp
  15. 9
      libdevcore/CommonIO.h
  16. 15
      libdevcore/TrieDB.h
  17. 36
      libdevcore/db.h
  18. 8
      libdevcore/vector_ref.h
  19. 4
      libdevcrypto/CMakeLists.txt
  20. 129
      libdevcrypto/Common.cpp
  21. 27
      libdevcrypto/Common.h
  22. 35
      libdevcrypto/Exceptions.h
  23. 3
      libdevcrypto/OverlayDB.cpp
  24. 7
      libdevcrypto/OverlayDB.h
  25. 124
      libdevcrypto/SecretStore.cpp
  26. 50
      libdevcrypto/SecretStore.h
  27. 71
      libethash-cl/ethash_cl_miner.cpp
  28. 3
      libethash-cl/ethash_cl_miner.h
  29. 6
      libethcore/Common.h
  30. 3
      libethcore/Ethash.cpp
  31. 3
      libethcore/Ethash.h
  32. 3
      libethcore/Farm.h
  33. 6
      libethcore/Params.cpp
  34. 12
      libethereum/BlockChain.cpp
  35. 9
      libethereum/BlockChain.h
  36. 800
      libethereum/BlockChainSync.cpp
  37. 278
      libethereum/BlockChainSync.h
  38. 7
      libethereum/BlockDetails.h
  39. 87
      libethereum/BlockQueue.cpp
  40. 5
      libethereum/BlockQueue.h
  41. 3
      libethereum/CMakeLists.txt
  42. 6
      libethereum/CanonBlockChain.h
  43. 37
      libethereum/Client.cpp
  44. 1
      libethereum/Client.h
  45. 6
      libethereum/CommonNet.h
  46. 603
      libethereum/EthereumHost.cpp
  47. 75
      libethereum/EthereumHost.h
  48. 45
      libethereum/EthereumPeer.cpp
  49. 13
      libethereum/EthereumPeer.h
  50. 2
      libethereum/State.h
  51. 3
      libethereum/TransactionQueue.cpp
  52. 3
      libethereum/TransactionQueue.h
  53. 58
      libevm/VM.cpp
  54. 7
      libjsconsole/JSConsole.cpp
  55. 7
      libjsconsole/JSConsole.h
  56. 2
      libp2p/CMakeLists.txt
  57. 3
      libp2p/Host.cpp
  58. 19
      libp2p/Session.cpp
  59. 7
      libp2p/Session.h
  60. 43
      libsolidity/AST.cpp
  61. 56
      libsolidity/ArrayUtils.cpp
  62. 40
      libsolidity/Compiler.cpp
  63. 64
      libsolidity/CompilerUtils.cpp
  64. 5
      libsolidity/CompilerUtils.h
  65. 55
      libsolidity/ExpressionCompiler.cpp
  66. 8
      libsolidity/NameAndTypeResolver.cpp
  67. 62
      libsolidity/Types.cpp
  68. 32
      libsolidity/Types.h
  69. 3
      libweb3jsonrpc/CMakeLists.txt
  70. 2
      libweb3jsonrpc/JsonHelper.h
  71. 11
      libweb3jsonrpc/WebThreeStubServer.cpp
  72. 17
      libweb3jsonrpc/WebThreeStubServer.h
  73. 4
      libwebthree/CMakeLists.txt
  74. 36
      libwhisper/BloomFilter.cpp
  75. 63
      libwhisper/BloomFilter.h
  76. 4
      libwhisper/CMakeLists.txt
  77. 4
      libwhisper/Common.cpp
  78. 4
      libwhisper/Common.h
  79. 12
      mix/CodeModel.cpp
  80. 2
      mix/CodeModel.h
  81. 106
      mix/ContractCallDataEncoder.cpp
  82. 5
      mix/ContractCallDataEncoder.h
  83. 1
      mix/MixClient.h
  84. 1
      mix/QVariableDeclaration.h
  85. 1
      mix/SolidityType.h
  86. 3
      mix/qml/QIntTypeView.qml
  87. 22
      mix/qml/QStringTypeView.qml
  88. 2
      mix/qml/StructView.qml
  89. 113
      mix/qml/js/InputValidator.js
  90. 9
      mix/qml/js/ProjectModel.js
  91. 4
      neth/CMakeLists.txt
  92. 3
      rlp/CMakeLists.txt
  93. 6
      test/TestHelper.cpp
  94. 2
      test/TestHelper.h
  95. 11
      test/TestUtils.cpp
  96. 9
      test/TestUtils.h
  97. 138
      test/libdevcrypto/SecretStore.cpp
  98. 4
      test/libdevcrypto/crypto.cpp
  99. 6
      test/libethereum/stateOriginal.cpp
  100. 4
      test/libp2p/capability.cpp

13
CMakeLists.txt

@ -30,7 +30,7 @@ option(JSONRPC "Build with jsonprc. default on" ON)
option(FATDB "Build with ability to list entries in the Trie. Doubles DB size, slows everything down, but good for looking at state diffs and trie contents." OFF) option(FATDB "Build with ability to list entries in the Trie. Doubles DB size, slows everything down, but good for looking at state diffs and trie contents." OFF)
option(USENPM "Use npm to recompile ethereum.js if it was changed" OFF) option(USENPM "Use npm to recompile ethereum.js if it was changed" OFF)
option(PROFILING "Build in support for profiling" OFF) option(PROFILING "Build in support for profiling" OFF)
option(ROCKSDB "Use rocksdb rather than leveldb" OFF)
set(BUNDLE "none" CACHE STRING "Predefined bundle of software to build (none, full, user, tests, minimal).") set(BUNDLE "none" CACHE STRING "Predefined bundle of software to build (none, full, user, tests, minimal).")
option(MINER "Build the CLI miner component" ON) option(MINER "Build the CLI miner component" ON)
@ -196,6 +196,7 @@ eth_format_option(MINER)
eth_format_option(USENPM) eth_format_option(USENPM)
eth_format_option(PROFILING) eth_format_option(PROFILING)
eth_format_option(SOLIDITY) eth_format_option(SOLIDITY)
eth_format_option(ROCKSDB)
eth_format_option(GUI) eth_format_option(GUI)
eth_format_option(TESTS) eth_format_option(TESTS)
eth_format_option(NOBOOST) eth_format_option(NOBOOST)
@ -311,6 +312,7 @@ message("-- PROFILING Profiling support ${PROFILIN
message("-- FATDB Full database exploring ${FATDB}") message("-- FATDB Full database exploring ${FATDB}")
message("-- JSONRPC JSON-RPC support ${JSONRPC}") message("-- JSONRPC JSON-RPC support ${JSONRPC}")
message("-- USENPM Javascript source building ${USENPM}") message("-- USENPM Javascript source building ${USENPM}")
message("-- ROCKSDB Prefer rocksdb to leveldb ${ROCKSDB}")
message("------------------------------------------------------------- components") message("------------------------------------------------------------- components")
message("-- MINER Build miner ${MINER}") message("-- MINER Build miner ${MINER}")
message("-- ETHKEY Build wallet tools ${ETHKEY}") message("-- ETHKEY Build wallet tools ${ETHKEY}")
@ -337,6 +339,15 @@ include(EthExecutableHelper)
createBuildInfo() createBuildInfo()
if (ROCKSDB AND ROCKSDB_FOUND)
set(DB_INCLUDE_DIRS ${ROCKSDB_INCLUDE_DIRS})
set(DB_LIBRARIES ${ROCKSDB_LIBRARIES})
add_definitions(-DETH_ROCKSDB)
else()
set(DB_INCLUDE_DIRS ${LEVELDB_INCLUDE_DIRS})
set(DB_LIBRARIES ${LEVELDB_LIBRARIES})
endif()
if (EVMJIT) if (EVMJIT)
set(EVMJIT_CPP TRUE) # include CPP-JIT connector set(EVMJIT_CPP TRUE) # include CPP-JIT connector
add_subdirectory(evmjit) add_subdirectory(evmjit)

2
abi/CMakeLists.txt

@ -4,7 +4,7 @@ set(CMAKE_AUTOMOC OFF)
aux_source_directory(. SRC_LIST) aux_source_directory(. SRC_LIST)
include_directories(BEFORE ..) include_directories(BEFORE ..)
include_directories(${LEVELDB_INCLUDE_DIRS}) include_directories(${DB_INCLUDE_DIRS})
set(EXECUTABLE abi) set(EXECUTABLE abi)

4
alethzero/MainWin.cpp

@ -1157,7 +1157,7 @@ void Main::refreshNetwork()
auto ns = web3()->nodes(); auto ns = web3()->nodes();
for (p2p::Peer const& i: ns) for (p2p::Peer const& i: ns)
ui->nodes->insertItem(sessions.count(i.id) ? 0 : ui->nodes->count(), QString("[%1 %3] %2 - ( =%5s | /%4s%6 ) - *%7 $%8") ui->nodes->insertItem(sessions.count(i.id) ? 0 : ui->nodes->count(), QString("[%1 %3] %2 - ( %4 ) - *%5")
.arg(QString::fromStdString(i.id.abridged())) .arg(QString::fromStdString(i.id.abridged()))
.arg(QString::fromStdString(i.endpoint.address.to_string())) .arg(QString::fromStdString(i.endpoint.address.to_string()))
.arg(i.id == web3()->id() ? "self" : sessions.count(i.id) ? sessions[i.id] : "disconnected") .arg(i.id == web3()->id() ? "self" : sessions.count(i.id) ? sessions[i.id] : "disconnected")
@ -1253,7 +1253,7 @@ void Main::refreshBlockCount()
BlockQueueStatus b = ethereum()->blockQueueStatus(); BlockQueueStatus b = ethereum()->blockQueueStatus();
SyncStatus sync = ethereum()->syncStatus(); SyncStatus sync = ethereum()->syncStatus();
QString syncStatus = EthereumHost::stateName(sync.state); QString syncStatus = EthereumHost::stateName(sync.state);
if (sync.state == SyncState::HashesParallel || sync.state == SyncState::HashesSingle) if (sync.state == SyncState::Hashes)
syncStatus += QString(": %1/%2%3").arg(sync.hashesReceived).arg(sync.hashesEstimated ? "~" : "").arg(sync.hashesTotal); syncStatus += QString(": %1/%2%3").arg(sync.hashesReceived).arg(sync.hashesEstimated ? "~" : "").arg(sync.hashesTotal);
if (sync.state == SyncState::Blocks || sync.state == SyncState::NewBlocks) if (sync.state == SyncState::Blocks || sync.state == SyncState::NewBlocks)
syncStatus += QString(": %1/%2").arg(sync.blocksReceived).arg(sync.blocksTotal); syncStatus += QString(": %1/%2").arg(sync.blocksReceived).arg(sync.blocksTotal);

7
alethzero/NatspecHandler.h

@ -22,16 +22,11 @@
#pragma once #pragma once
#pragma warning(push) #include <libdevcore/db.h>
#pragma warning(disable: 4100 4267)
#include <leveldb/db.h>
#pragma warning(pop)
#include <json/json.h> #include <json/json.h>
#include <libdevcore/FixedHash.h> #include <libdevcore/FixedHash.h>
#include "Context.h" #include "Context.h"
namespace ldb = leveldb;
class NatspecHandler: public NatSpecFace class NatspecHandler: public NatSpecFace
{ {
public: public:

7
cmake/EthCompilerSettings.cmake

@ -34,11 +34,16 @@ elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
# disable unknown pragma warning (4068) # disable unknown pragma warning (4068)
# disable unsafe function warning (4996) # disable unsafe function warning (4996)
# disable decorated name length exceeded, name was truncated (4503) # disable decorated name length exceeded, name was truncated (4503)
# disable conversion from 'size_t' to 'type', possible loss of data (4267)
# disable qualifier applied to function type has no meaning; ignored (4180)
# disable C++ exception specification ignored except to indicate a function is not __declspec(nothrow) (4290)
# disable conversion from 'type1' to 'type2', possible loss of data (4244)
# disable forcing value to bool 'true' or 'false' (performance warning) (4800)
# disable warning C4535: calling _set_se_translator() requires /EHa (for boost tests) # disable warning C4535: calling _set_se_translator() requires /EHa (for boost tests)
# declare Windows XP requirement # declare Windows XP requirement
# undefine windows.h MAX && MIN macros cause it cause conflicts with std::min && std::max functions # undefine windows.h MAX && MIN macros cause it cause conflicts with std::min && std::max functions
# define miniupnp static library # define miniupnp static library
add_compile_options(/MP /EHsc /wd4068 /wd4996 /wd4503 -D_WIN32_WINNT=0x0501 /DNOMINMAX /DMINIUPNP_STATICLIB) add_compile_options(/MP /EHsc /wd4068 /wd4996 /wd4503 /wd4267 /wd4180 /wd4290 /wd4244 /wd4800 -D_WIN32_WINNT=0x0501 /DNOMINMAX /DMINIUPNP_STATICLIB)
# disable empty object file warning # disable empty object file warning
set(CMAKE_STATIC_LINKER_FLAGS "${CMAKE_STATIC_LINKER_FLAGS} /ignore:4221") set(CMAKE_STATIC_LINKER_FLAGS "${CMAKE_STATIC_LINKER_FLAGS} /ignore:4221")
# warning LNK4075: ignoring '/EDITANDCONTINUE' due to '/SAFESEH' specification # warning LNK4075: ignoring '/EDITANDCONTINUE' due to '/SAFESEH' specification

6
cmake/EthDependencies.cmake

@ -49,6 +49,12 @@ find_package (LevelDB REQUIRED)
message(" - LevelDB header: ${LEVELDB_INCLUDE_DIRS}") message(" - LevelDB header: ${LEVELDB_INCLUDE_DIRS}")
message(" - LevelDB lib: ${LEVELDB_LIBRARIES}") message(" - LevelDB lib: ${LEVELDB_LIBRARIES}")
find_package (RocksDB)
if (ROCKSDB_FOUND)
message(" - RocksDB header: ${ROCKSDB_INCLUDE_DIRS}")
message(" - RocksDB lib: ${ROCKSDB_LIBRARIES}")
endif()
if (JSCONSOLE) if (JSCONSOLE)
find_package (v8 REQUIRED) find_package (v8 REQUIRED)
message(" - v8 header: ${V8_INCLUDE_DIRS}") message(" - v8 header: ${V8_INCLUDE_DIRS}")

49
cmake/FindRocksDB.cmake

@ -0,0 +1,49 @@
# Find rocksdb
#
# Find the rocksdb includes and library
#
# if you nee to add a custom library search path, do it via via CMAKE_PREFIX_PATH
#
# This module defines
# ROCKSDB_INCLUDE_DIRS, where to find header, etc.
# ROCKSDB_LIBRARIES, the libraries needed to use rocksdb.
# ROCKSDB_FOUND, If false, do not try to use rocksdb.
# only look in default directories
find_path(
ROCKSDB_INCLUDE_DIR
NAMES rocksdb/db.h
DOC "rocksdb include dir"
)
find_library(
ROCKSDB_LIBRARY
NAMES rocksdb
DOC "rocksdb library"
)
set(ROCKSDB_INCLUDE_DIRS ${ROCKSDB_INCLUDE_DIR})
set(ROCKSDB_LIBRARIES ${ROCKSDB_LIBRARY})
# debug library on windows
# same naming convention as in qt (appending debug library with d)
# boost is using the same "hack" as us with "optimized" and "debug"
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
find_library(
ROCKSDB_LIBRARY_DEBUG
NAMES rocksdbd
DOC "rocksdb debug library"
)
set(ROCKSDB_LIBRARIES optimized ${ROCKSDB_LIBRARIES} debug ${ROCKSDB_LIBRARY_DEBUG})
endif()
# handle the QUIETLY and REQUIRED arguments and set ROCKSDB_FOUND to TRUE
# if all listed variables are TRUE, hide their existence from configuration view
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(rocksdb DEFAULT_MSG
ROCKSDB_INCLUDE_DIR ROCKSDB_LIBRARY)
mark_as_advanced (ROCKSDB_INCLUDE_DIR ROCKSDB_LIBRARY)

13
eth/main.cpp

@ -764,7 +764,8 @@ int main(int argc, char** argv)
case ImportResult::Success: good++; break; case ImportResult::Success: good++; break;
case ImportResult::AlreadyKnown: alreadyHave++; break; case ImportResult::AlreadyKnown: alreadyHave++; break;
case ImportResult::UnknownParent: unknownParent++; break; case ImportResult::UnknownParent: unknownParent++; break;
case ImportResult::FutureTime: futureTime++; break; case ImportResult::FutureTimeUnknown: unknownParent++; futureTime++; break;
case ImportResult::FutureTimeKnown: futureTime++; break;
default: bad++; break; default: bad++; break;
} }
} }
@ -835,12 +836,13 @@ int main(int argc, char** argv)
cout << "Networking disabled. To start, use netstart or pass -b or a remote host." << endl; cout << "Networking disabled. To start, use netstart or pass -b or a remote host." << endl;
#if ETH_JSONRPC || !ETH_TRUE #if ETH_JSONRPC || !ETH_TRUE
shared_ptr<WebThreeStubServer> jsonrpcServer; shared_ptr<dev::WebThreeStubServer> jsonrpcServer;
unique_ptr<jsonrpc::AbstractServerConnector> jsonrpcConnector; unique_ptr<jsonrpc::AbstractServerConnector> jsonrpcConnector;
if (jsonrpc > -1) if (jsonrpc > -1)
{ {
jsonrpcConnector = unique_ptr<jsonrpc::AbstractServerConnector>(new jsonrpc::HttpServer(jsonrpc, "", "", SensibleHttpThreads)); jsonrpcConnector = unique_ptr<jsonrpc::AbstractServerConnector>(new jsonrpc::HttpServer(jsonrpc, "", "", SensibleHttpThreads));
jsonrpcServer = shared_ptr<WebThreeStubServer>(new WebThreeStubServer(*jsonrpcConnector.get(), web3, make_shared<SimpleAccountHolder>([&](){ return web3.ethereum(); }, getAccountPassword, keyManager), vector<KeyPair>(), keyManager, *gasPricer)); jsonrpcServer = shared_ptr<dev::WebThreeStubServer>(new dev::WebThreeStubServer(*jsonrpcConnector.get(), web3, make_shared<SimpleAccountHolder>([&](){ return web3.ethereum(); }, getAccountPassword, keyManager), vector<KeyPair>(), keyManager, *gasPricer));
jsonrpcServer->setMiningBenefactorChanger([&](Address const& a) { beneficiary = a; });
jsonrpcServer->StartListening(); jsonrpcServer->StartListening();
if (jsonAdmin.empty()) if (jsonAdmin.empty())
jsonAdmin = jsonrpcServer->newSession(SessionPermissions{{Priviledge::Admin}}); jsonAdmin = jsonrpcServer->newSession(SessionPermissions{{Priviledge::Admin}});
@ -995,7 +997,8 @@ int main(int argc, char** argv)
if (jsonrpc < 0) if (jsonrpc < 0)
jsonrpc = SensibleHttpPort; jsonrpc = SensibleHttpPort;
jsonrpcConnector = unique_ptr<jsonrpc::AbstractServerConnector>(new jsonrpc::HttpServer(jsonrpc, "", "", SensibleHttpThreads)); jsonrpcConnector = unique_ptr<jsonrpc::AbstractServerConnector>(new jsonrpc::HttpServer(jsonrpc, "", "", SensibleHttpThreads));
jsonrpcServer = shared_ptr<WebThreeStubServer>(new WebThreeStubServer(*jsonrpcConnector.get(), web3, make_shared<SimpleAccountHolder>([&](){ return web3.ethereum(); }, getAccountPassword, keyManager), vector<KeyPair>(), keyManager, *gasPricer)); jsonrpcServer = shared_ptr<dev::WebThreeStubServer>(new dev::WebThreeStubServer(*jsonrpcConnector.get(), web3, make_shared<SimpleAccountHolder>([&](){ return web3.ethereum(); }, getAccountPassword, keyManager), vector<KeyPair>(), keyManager, *gasPricer));
jsonrpcServer->setMiningBenefactorChanger([&](Address const& a) { beneficiary = a; });
jsonrpcServer->StartListening(); jsonrpcServer->StartListening();
if (jsonAdmin.empty()) if (jsonAdmin.empty())
jsonAdmin = jsonrpcServer->newSession(SessionPermissions{{Priviledge::Admin}}); jsonAdmin = jsonrpcServer->newSession(SessionPermissions{{Priviledge::Admin}});
@ -1742,7 +1745,7 @@ int main(int argc, char** argv)
JSConsole console(web3, make_shared<SimpleAccountHolder>([&](){return web3.ethereum();}, getAccountPassword, keyManager)); JSConsole console(web3, make_shared<SimpleAccountHolder>([&](){return web3.ethereum();}, getAccountPassword, keyManager));
while (!g_exit) while (!g_exit)
{ {
console.repl(); console.readExpression();
stopMiningAfterXBlocks(c, n, mining); stopMiningAfterXBlocks(c, n, mining);
} }
#endif #endif

11
ethminer/MinerAux.h

@ -134,8 +134,6 @@ public:
m_clAllowCPU = true; m_clAllowCPU = true;
else if (arg == "--cl-extragpu-mem" && i + 1 < argc) else if (arg == "--cl-extragpu-mem" && i + 1 < argc)
m_extraGPUMemory = 1000000 * stol(argv[++i]); m_extraGPUMemory = 1000000 * stol(argv[++i]);
else if (arg == "--force-single-chunk")
m_forceSingleChunk = true;
else if (arg == "--phone-home" && i + 1 < argc) else if (arg == "--phone-home" && i + 1 < argc)
{ {
string m = argv[++i]; string m = argv[++i];
@ -273,7 +271,6 @@ public:
m_openclDevice, m_openclDevice,
m_clAllowCPU, m_clAllowCPU,
m_extraGPUMemory, m_extraGPUMemory,
m_forceSingleChunk,
m_currentBlock m_currentBlock
)) ))
{ {
@ -318,10 +315,9 @@ public:
<< " --opencl-device <n> When mining using -G/--opencl use OpenCL device n (default: 0)." << endl << " --opencl-device <n> When mining using -G/--opencl use OpenCL device n (default: 0)." << endl
<< " -t, --mining-threads <n> Limit number of CPU/GPU miners to n (default: use everything available on selected platform)" << endl << " -t, --mining-threads <n> Limit number of CPU/GPU miners to n (default: use everything available on selected platform)" << endl
<< " --allow-opencl-cpu Allows CPU to be considered as an OpenCL device if the OpenCL platform supports it." << endl << " --allow-opencl-cpu Allows CPU to be considered as an OpenCL device if the OpenCL platform supports it." << endl
<< " --list-devices List the detected OpenCL devices and exit." <<endl << " --list-devices List the detected OpenCL devices and exit." << endl
<< " --current-block Let the miner know the current block number at configuration time. Will help determine DAG size and required GPU memory." <<endl << " --current-block Let the miner know the current block number at configuration time. Will help determine DAG size and required GPU memory." << endl
<< " --cl-extragpu-mem Set the memory (in MB) you believe your GPU requires for stuff other than mining. Windows rendering e.t.c.." <<endl << " --cl-extragpu-mem Set the memory (in MB) you believe your GPU requires for stuff other than mining. Windows rendering e.t.c.." << endl
<< " --force-single-chunk Force DAG uploading in a single chunk against OpenCL's judgement. Use at your own risk." <<endl
; ;
} }
@ -510,7 +506,6 @@ private:
unsigned m_miningThreads = UINT_MAX; unsigned m_miningThreads = UINT_MAX;
bool m_shouldListDevices = false; bool m_shouldListDevices = false;
bool m_clAllowCPU = false; bool m_clAllowCPU = false;
bool m_forceSingleChunk = false;
boost::optional<uint64_t> m_currentBlock; boost::optional<uint64_t> m_currentBlock;
// default value is 350MB of GPU memory for other stuff (windows system rendering, e.t.c.) // default value is 350MB of GPU memory for other stuff (windows system rendering, e.t.c.)
unsigned m_extraGPUMemory = 350000000; unsigned m_extraGPUMemory = 350000000;

2
ethvm/CMakeLists.txt

@ -4,7 +4,7 @@ set(CMAKE_AUTOMOC OFF)
aux_source_directory(. SRC_LIST) aux_source_directory(. SRC_LIST)
include_directories(BEFORE ..) include_directories(BEFORE ..)
include_directories(${LEVELDB_INCLUDE_DIRS}) include_directories(${DB_INCLUDE_DIRS})
set(EXECUTABLE ethvm) set(EXECUTABLE ethvm)

2
exp/CMakeLists.txt

@ -5,7 +5,7 @@ aux_source_directory(. SRC_LIST)
include_directories(BEFORE ${JSONCPP_INCLUDE_DIRS}) include_directories(BEFORE ${JSONCPP_INCLUDE_DIRS})
include_directories(BEFORE ..) include_directories(BEFORE ..)
include_directories(${LEVELDB_INCLUDE_DIRS}) include_directories(${DB_INCLUDE_DIRS})
set(EXECUTABLE exp) set(EXECUTABLE exp)

2
libdevcore/CMakeLists.txt

@ -15,6 +15,7 @@ aux_source_directory(. SRC_LIST)
include_directories(BEFORE ${JSONCPP_INCLUDE_DIRS}) include_directories(BEFORE ${JSONCPP_INCLUDE_DIRS})
include_directories(BEFORE ..) include_directories(BEFORE ..)
include_directories(${Boost_INCLUDE_DIRS}) include_directories(${Boost_INCLUDE_DIRS})
include_directories(${DB_INCLUDE_DIRS})
set(EXECUTABLE devcore) set(EXECUTABLE devcore)
@ -26,6 +27,7 @@ target_link_libraries(${EXECUTABLE} ${Boost_THREAD_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${Boost_SYSTEM_LIBRARIES}) target_link_libraries(${EXECUTABLE} ${Boost_SYSTEM_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${Boost_FILESYSTEM_LIBRARIES}) target_link_libraries(${EXECUTABLE} ${Boost_FILESYSTEM_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${JSONCPP_LIBRARIES}) target_link_libraries(${EXECUTABLE} ${JSONCPP_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${DB_LIBRARIES})
# transitive dependencies for windows executables # transitive dependencies for windows executables
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")

2
libdevcore/Common.cpp

@ -28,7 +28,7 @@ using namespace dev;
namespace dev namespace dev
{ {
char const* Version = "0.9.26"; char const* Version = "0.9.27";
const u256 UndefinedU256 = ~(u256)0; const u256 UndefinedU256 = ~(u256)0;

21
libdevcore/CommonIO.cpp

@ -23,13 +23,14 @@
#include <iostream> #include <iostream>
#include <cstdlib> #include <cstdlib>
#include <fstream> #include <fstream>
#include "Exceptions.h"
#include <stdio.h> #include <stdio.h>
#ifdef _WIN32 #ifdef _WIN32
#include <windows.h> #include <windows.h>
#else #else
#include <termios.h> #include <termios.h>
#endif #endif
#include <boost/filesystem.hpp>
#include "Exceptions.h"
using namespace std; using namespace std;
using namespace dev; using namespace dev;
@ -95,9 +96,23 @@ string dev::contentsString(string const& _file)
return contentsGeneric<string>(_file); return contentsGeneric<string>(_file);
} }
void dev::writeFile(std::string const& _file, bytesConstRef _data) void dev::writeFile(std::string const& _file, bytesConstRef _data, bool _writeDeleteRename)
{ {
ofstream(_file, ios::trunc|ios::binary).write((char const*)_data.data(), _data.size()); if (_writeDeleteRename)
{
namespace fs = boost::filesystem;
fs::path tempPath = fs::unique_path(_file + "-%%%%%%");
writeFile(tempPath.string(), _data, false);
// will delete _file if it exists
fs::rename(tempPath, _file);
}
else
{
ofstream s(_file, ios::trunc | ios::binary);
s.write(reinterpret_cast<char const*>(_data.data()), _data.size());
if (!s)
BOOST_THROW_EXCEPTION(FileError());
}
} }
std::string dev::getPassword(std::string const& _prompt) std::string dev::getPassword(std::string const& _prompt)

9
libdevcore/CommonIO.h

@ -56,10 +56,13 @@ std::string contentsString(std::string const& _file);
bytesRef contentsNew(std::string const& _file, bytesRef _dest = bytesRef()); bytesRef contentsNew(std::string const& _file, bytesRef _dest = bytesRef());
/// Write the given binary data into the given file, replacing the file if it pre-exists. /// Write the given binary data into the given file, replacing the file if it pre-exists.
void writeFile(std::string const& _file, bytesConstRef _data); /// Throws exception on error.
/// @param _writeDeleteRename useful not to lose any data: If set, first writes to another file in
/// the same directory and then moves that file.
void writeFile(std::string const& _file, bytesConstRef _data, bool _writeDeleteRename = false);
/// Write the given binary data into the given file, replacing the file if it pre-exists. /// Write the given binary data into the given file, replacing the file if it pre-exists.
inline void writeFile(std::string const& _file, bytes const& _data) { writeFile(_file, bytesConstRef(&_data)); } inline void writeFile(std::string const& _file, bytes const& _data, bool _writeDeleteRename = false) { writeFile(_file, bytesConstRef(&_data), _writeDeleteRename); }
inline void writeFile(std::string const& _file, std::string const& _data) { writeFile(_file, bytesConstRef(_data)); } inline void writeFile(std::string const& _file, std::string const& _data, bool _writeDeleteRename = false) { writeFile(_file, bytesConstRef(_data), _writeDeleteRename); }
/// Nicely renders the given bytes to a string, optionally as HTML. /// Nicely renders the given bytes to a string, optionally as HTML.
/// @a _bytes: bytes array to be rendered as string. @a _width of a bytes line. /// @a _bytes: bytes array to be rendered as string. @a _width of a bytes line.

15
libdevcore/TrieDB.h

@ -21,19 +21,14 @@
#pragma once #pragma once
#pragma warning(push)
#pragma warning(disable: 4100 4267)
#include <leveldb/db.h>
#pragma warning(pop)
#include <memory> #include <memory>
#include <libdevcore/Common.h> #include "db.h"
#include <libdevcore/Log.h> #include "Common.h"
#include <libdevcore/Exceptions.h> #include "Log.h"
#include <libdevcore/SHA3.h> #include "Exceptions.h"
#include "SHA3.h"
#include "MemoryDB.h" #include "MemoryDB.h"
#include "TrieCommon.h" #include "TrieCommon.h"
namespace ldb = leveldb;
namespace dev namespace dev
{ {

36
libdevcore/db.h

@ -0,0 +1,36 @@
/*
This file is part of cpp-ethereum.
cpp-ethereum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
cpp-ethereum is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
*/
/** @file DB.h
* @author Gav Wood <i@gavwood.com>
* @date 2014
*/
#pragma once
#pragma warning(push)
#pragma warning(disable: 4100 4267)
#if ETH_ROCKSDB || !ETH_TRUE
#include <rocksdb/db.h>
#include <rocksdb/write_batch.h>
namespace ldb = rocksdb;
#else
#include <leveldb/db.h>
#include <leveldb/write_batch.h>
namespace ldb = leveldb;
#endif
#pragma warning(pop)
#define DEV_LDB 1

8
libdevcore/vector_ref.h

@ -31,8 +31,8 @@ public:
vector_ref(typename std::conditional<std::is_const<_T>::value, std::vector<typename std::remove_const<_T>::type> const*, std::vector<_T>*>::type _data): m_data(_data->data()), m_count(_data->size()) {} vector_ref(typename std::conditional<std::is_const<_T>::value, std::vector<typename std::remove_const<_T>::type> const*, std::vector<_T>*>::type _data): m_data(_data->data()), m_count(_data->size()) {}
/// Creates a new vector_ref pointing to the data part of a string (given as reference). /// Creates a new vector_ref pointing to the data part of a string (given as reference).
vector_ref(typename std::conditional<std::is_const<_T>::value, std::string const&, std::string&>::type _data): m_data(reinterpret_cast<_T*>(_data.data())), m_count(_data.size() / sizeof(_T)) {} vector_ref(typename std::conditional<std::is_const<_T>::value, std::string const&, std::string&>::type _data): m_data(reinterpret_cast<_T*>(_data.data())), m_count(_data.size() / sizeof(_T)) {}
#ifdef STORAGE_LEVELDB_INCLUDE_DB_H_ #if DEV_LDB
vector_ref(leveldb::Slice const& _s): m_data(reinterpret_cast<_T*>(_s.data())), m_count(_s.size() / sizeof(_T)) {} vector_ref(ldb::Slice const& _s): m_data(reinterpret_cast<_T*>(_s.data())), m_count(_s.size() / sizeof(_T)) {}
#endif #endif
explicit operator bool() const { return m_data && m_count; } explicit operator bool() const { return m_data && m_count; }
@ -77,8 +77,8 @@ public:
bool operator==(vector_ref<_T> const& _cmp) const { return m_data == _cmp.m_data && m_count == _cmp.m_count; } bool operator==(vector_ref<_T> const& _cmp) const { return m_data == _cmp.m_data && m_count == _cmp.m_count; }
bool operator!=(vector_ref<_T> const& _cmp) const { return !operator==(_cmp); } bool operator!=(vector_ref<_T> const& _cmp) const { return !operator==(_cmp); }
#ifdef STORAGE_LEVELDB_INCLUDE_DB_H_ #if DEV_LDB
operator leveldb::Slice() const { return leveldb::Slice((char const*)m_data, m_count * sizeof(_T)); } operator ldb::Slice() const { return ldb::Slice((char const*)m_data, m_count * sizeof(_T)); }
#endif #endif
void reset() { m_data = nullptr; m_count = 0; } void reset() { m_data = nullptr; m_count = 0; }

4
libdevcrypto/CMakeLists.txt

@ -12,7 +12,7 @@ aux_source_directory(. SRC_LIST)
include_directories(BEFORE ..) include_directories(BEFORE ..)
include_directories(${Boost_INCLUDE_DIRS}) include_directories(${Boost_INCLUDE_DIRS})
include_directories(${CRYPTOPP_INCLUDE_DIRS}) include_directories(${CRYPTOPP_INCLUDE_DIRS})
include_directories(${LEVELDB_INCLUDE_DIRS}) include_directories(${DB_INCLUDE_DIRS})
set(EXECUTABLE devcrypto) set(EXECUTABLE devcrypto)
@ -20,7 +20,7 @@ file(GLOB HEADERS "*.h")
add_library(${EXECUTABLE} ${SRC_LIST} ${HEADERS}) add_library(${EXECUTABLE} ${SRC_LIST} ${HEADERS})
target_link_libraries(${EXECUTABLE} ${Boost_FILESYSTEM_LIBRARIES}) target_link_libraries(${EXECUTABLE} ${Boost_FILESYSTEM_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${LEVELDB_LIBRARIES}) target_link_libraries(${EXECUTABLE} ${DB_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${CRYPTOPP_LIBRARIES}) target_link_libraries(${EXECUTABLE} ${CRYPTOPP_LIBRARIES})
target_link_libraries(${EXECUTABLE} scrypt) target_link_libraries(${EXECUTABLE} scrypt)
target_link_libraries(${EXECUTABLE} devcore) target_link_libraries(${EXECUTABLE} devcore)

129
libdevcrypto/Common.cpp

@ -31,6 +31,7 @@
#include <libdevcore/FileSystem.h> #include <libdevcore/FileSystem.h>
#include "AES.h" #include "AES.h"
#include "CryptoPP.h" #include "CryptoPP.h"
#include "Exceptions.h"
using namespace std; using namespace std;
using namespace dev; using namespace dev;
using namespace dev::crypto; using namespace dev::crypto;
@ -178,15 +179,35 @@ bool dev::verify(Public const& _p, Signature const& _s, h256 const& _hash)
bytes dev::pbkdf2(string const& _pass, bytes const& _salt, unsigned _iterations, unsigned _dkLen) bytes dev::pbkdf2(string const& _pass, bytes const& _salt, unsigned _iterations, unsigned _dkLen)
{ {
bytes ret(_dkLen); bytes ret(_dkLen);
PKCS5_PBKDF2_HMAC<SHA256> pbkdf; if (PKCS5_PBKDF2_HMAC<SHA256>().DeriveKey(
pbkdf.DeriveKey(ret.data(), ret.size(), 0, (byte*)_pass.data(), _pass.size(), _salt.data(), _salt.size(), _iterations); ret.data(),
ret.size(),
0,
reinterpret_cast<byte const*>(_pass.data()),
_pass.size(),
_salt.data(),
_salt.size(),
_iterations
) != _iterations)
BOOST_THROW_EXCEPTION(CryptoException() << errinfo_comment("Key derivation failed."));
return ret; return ret;
} }
bytes dev::scrypt(std::string const& _pass, bytes const& _salt, uint64_t _n, uint32_t _r, uint32_t _p, unsigned _dkLen) bytes dev::scrypt(std::string const& _pass, bytes const& _salt, uint64_t _n, uint32_t _r, uint32_t _p, unsigned _dkLen)
{ {
bytes ret(_dkLen); bytes ret(_dkLen);
libscrypt_scrypt((uint8_t const*)_pass.data(), _pass.size(), _salt.data(), _salt.size(), _n, _r, _p, ret.data(), ret.size()); if (libscrypt_scrypt(
reinterpret_cast<uint8_t const*>(_pass.data()),
_pass.size(),
_salt.data(),
_salt.size(),
_n,
_r,
_p,
ret.data(),
ret.size()
) != 0)
BOOST_THROW_EXCEPTION(CryptoException() << errinfo_comment("Key derivation failed."));
return ret; return ret;
} }
@ -233,42 +254,84 @@ h256 crypto::kdf(Secret const& _priv, h256 const& _hash)
return s; return s;
} }
h256 Nonce::get(bool _commit) mutex Nonce::s_x;
static string s_seedFile;
h256 Nonce::get()
{ {
// todo: atomic efface bit, periodic save, kdf, rr, rng // todo: atomic efface bit, periodic save, kdf, rr, rng
// todo: encrypt // todo: encrypt
static h256 s_seed; Guard l(Nonce::s_x);
static string s_seedFile(getDataDir() + "/seed"); return Nonce::singleton().next();
static mutex s_x; }
Guard l(s_x);
if (!s_seed) void Nonce::reset()
{
Guard l(Nonce::s_x);
Nonce::singleton().resetInternal();
}
void Nonce::setSeedFilePath(string const& _filePath)
{
s_seedFile = _filePath;
}
Nonce::~Nonce()
{
Guard l(Nonce::s_x);
if (m_value)
// this might throw
resetInternal();
}
Nonce& Nonce::singleton()
{
static Nonce s;
return s;
}
void Nonce::initialiseIfNeeded()
{
if (m_value)
return;
bytes b = contents(seedFile());
if (b.size() == 32)
memcpy(m_value.data(), b.data(), 32);
else
{ {
static Nonce s_nonce; // todo: replace w/entropy from user and system
bytes b = contents(s_seedFile); std::mt19937_64 s_eng(time(0) + chrono::high_resolution_clock::now().time_since_epoch().count());
if (b.size() == 32) std::uniform_int_distribution<uint16_t> d(0, 255);
memcpy(s_seed.data(), b.data(), 32); for (unsigned i = 0; i < 32; ++i)
else m_value[i] = byte(d(s_eng));
{
// todo: replace w/entropy from user and system
std::mt19937_64 s_eng(time(0) + chrono::high_resolution_clock::now().time_since_epoch().count());
std::uniform_int_distribution<uint16_t> d(0, 255);
for (unsigned i = 0; i < 32; ++i)
s_seed[i] = (byte)d(s_eng);
}
if (!s_seed)
BOOST_THROW_EXCEPTION(InvalidState());
// prevent seed reuse if process terminates abnormally
writeFile(s_seedFile, bytes());
} }
h256 prev(s_seed); if (!m_value)
sha3(prev.ref(), s_seed.ref()); BOOST_THROW_EXCEPTION(InvalidState());
if (_commit)
writeFile(s_seedFile, s_seed.asBytes()); // prevent seed reuse if process terminates abnormally
return std::move(s_seed); // this might throw
writeFile(seedFile(), bytes());
} }
Nonce::~Nonce() h256 Nonce::next()
{
initialiseIfNeeded();
m_value = sha3(m_value);
return m_value;
}
void Nonce::resetInternal()
{
// this might throw
next();
writeFile(seedFile(), m_value.asBytes());
m_value = h256();
}
string const& Nonce::seedFile()
{ {
Nonce::get(true); if (s_seedFile.empty())
s_seedFile = getDataDir() + "/seed";
return s_seedFile;
} }

27
libdevcrypto/Common.h

@ -24,6 +24,7 @@
#pragma once #pragma once
#include <mutex>
#include <libdevcore/Common.h> #include <libdevcore/Common.h>
#include <libdevcore/FixedHash.h> #include <libdevcore/FixedHash.h>
#include <libdevcore/Exceptions.h> #include <libdevcore/Exceptions.h>
@ -180,14 +181,36 @@ struct InvalidState: public dev::Exception {};
h256 kdf(Secret const& _priv, h256 const& _hash); h256 kdf(Secret const& _priv, h256 const& _hash);
/** /**
* @brief Generator for nonce material * @brief Generator for nonce material.
*/ */
struct Nonce struct Nonce
{ {
static h256 get(bool _commit = false); /// Returns the next nonce (might be read from a file).
static h256 get();
/// Stores the current nonce in a file and resets Nonce to the uninitialised state.
static void reset();
/// Sets the location of the seed file to a non-default place. Used for testing.
static void setSeedFilePath(std::string const& _filePath);
private: private:
Nonce() {} Nonce() {}
~Nonce(); ~Nonce();
/// @returns the singleton instance.
static Nonce& singleton();
/// Reads the last seed from the seed file.
void initialiseIfNeeded();
/// @returns the next nonce.
h256 next();
/// Stores the current seed in the seed file.
void resetInternal();
/// @returns the path of the seed file.
static std::string const& seedFile();
/// Mutex for the singleton object.
/// @note Every access to any private function has to be guarded by this mutex.
static std::mutex s_x;
h256 m_value;
}; };
} }

35
libdevcrypto/Exceptions.h

@ -0,0 +1,35 @@
/*
This file is part of cpp-ethereum.
cpp-ethereum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
cpp-ethereum is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
*/
/** @file Exceptions.h
* @author Christian <c@ethdev.com>
* @date 2016
*/
#pragma once
#include <libdevcore/Exceptions.h>
namespace dev
{
namespace crypto
{
/// Rare malfunction of cryptographic functions.
DEV_SIMPLE_EXCEPTION(CryptoException);
}
}

3
libdevcrypto/OverlayDB.cpp

@ -20,8 +20,7 @@
*/ */
#include <thread> #include <thread>
#include <leveldb/db.h> #include <libdevcore/db.h>
#include <leveldb/write_batch.h>
#include <libdevcore/Common.h> #include <libdevcore/Common.h>
#include "OverlayDB.h" #include "OverlayDB.h"
using namespace std; using namespace std;

7
libdevcrypto/OverlayDB.h

@ -21,16 +21,11 @@
#pragma once #pragma once
#pragma warning(push)
#pragma warning(disable: 4100 4267)
#include <leveldb/db.h>
#pragma warning(pop)
#include <memory> #include <memory>
#include <libdevcore/db.h>
#include <libdevcore/Common.h> #include <libdevcore/Common.h>
#include <libdevcore/Log.h> #include <libdevcore/Log.h>
#include <libdevcore/MemoryDB.h> #include <libdevcore/MemoryDB.h>
namespace ldb = leveldb;
namespace dev namespace dev
{ {

124
libdevcrypto/SecretStore.cpp

@ -29,6 +29,7 @@
#include <libdevcore/SHA3.h> #include <libdevcore/SHA3.h>
#include <libdevcore/FileSystem.h> #include <libdevcore/FileSystem.h>
#include <test/JsonSpiritHeaders.h> #include <test/JsonSpiritHeaders.h>
#include <libdevcrypto/Exceptions.h>
using namespace std; using namespace std;
using namespace dev; using namespace dev;
namespace js = json_spirit; namespace js = json_spirit;
@ -36,7 +37,8 @@ namespace fs = boost::filesystem;
static const int c_keyFileVersion = 3; static const int c_keyFileVersion = 3;
static js::mValue upgraded(std::string const& _s) /// Upgrade the json-format to the current version.
static js::mValue upgraded(string const& _s)
{ {
js::mValue v; js::mValue v;
js::read_string(_s, v); js::read_string(_s, v);
@ -84,35 +86,34 @@ static js::mValue upgraded(std::string const& _s)
return js::mValue(); return js::mValue();
} }
SecretStore::SecretStore(std::string const& _path): m_path(_path) SecretStore::SecretStore(string const& _path): m_path(_path)
{ {
load(); load();
} }
SecretStore::~SecretStore() bytes SecretStore::secret(h128 const& _uuid, function<string()> const& _pass, bool _useCache) const
{ {
}
bytes SecretStore::secret(h128 const& _uuid, function<std::string()> const& _pass, bool _useCache) const
{
(void)_pass;
auto rit = m_cached.find(_uuid); auto rit = m_cached.find(_uuid);
if (_useCache && rit != m_cached.end()) if (_useCache && rit != m_cached.end())
return rit->second; return rit->second;
auto it = m_keys.find(_uuid); auto it = m_keys.find(_uuid);
if (it == m_keys.end()) bytes key;
return bytes(); if (it != m_keys.end())
bytes key = decrypt(it->second.first, _pass()); {
if (!key.empty()) key = decrypt(it->second.encryptedKey, _pass());
m_cached[_uuid] = key; if (!key.empty())
m_cached[_uuid] = key;
}
return key; return key;
} }
h128 SecretStore::importSecret(bytes const& _s, std::string const& _pass) h128 SecretStore::importSecret(bytes const& _s, string const& _pass)
{ {
h128 r = h128::random(); h128 r;
EncryptedKey key{encrypt(_s, _pass), string()};
r = h128::random();
m_cached[r] = _s; m_cached[r] = _s;
m_keys[r] = make_pair(encrypt(_s, _pass), std::string()); m_keys[r] = move(key);
save(); save();
return r; return r;
} }
@ -122,7 +123,7 @@ void SecretStore::kill(h128 const& _uuid)
m_cached.erase(_uuid); m_cached.erase(_uuid);
if (m_keys.count(_uuid)) if (m_keys.count(_uuid))
{ {
boost::filesystem::remove(m_keys[_uuid].second); fs::remove(m_keys[_uuid].filename);
m_keys.erase(_uuid); m_keys.erase(_uuid);
} }
} }
@ -132,50 +133,50 @@ void SecretStore::clearCache() const
m_cached.clear(); m_cached.clear();
} }
void SecretStore::save(std::string const& _keysPath) void SecretStore::save(string const& _keysPath)
{ {
fs::path p(_keysPath); fs::path p(_keysPath);
boost::filesystem::create_directories(p); fs::create_directories(p);
for (auto& k: m_keys) for (auto& k: m_keys)
{ {
std::string uuid = toUUID(k.first); string uuid = toUUID(k.first);
std::string filename = (p / uuid).string() + ".json"; string filename = (p / uuid).string() + ".json";
js::mObject v; js::mObject v;
js::mValue crypto; js::mValue crypto;
js::read_string(k.second.first, crypto); js::read_string(k.second.encryptedKey, crypto);
v["crypto"] = crypto; v["crypto"] = crypto;
v["id"] = uuid; v["id"] = uuid;
v["version"] = c_keyFileVersion; v["version"] = c_keyFileVersion;
writeFile(filename, js::write_string(js::mValue(v), true)); writeFile(filename, js::write_string(js::mValue(v), true));
if (!k.second.second.empty() && k.second.second != filename) swap(k.second.filename, filename);
boost::filesystem::remove(k.second.second); if (!filename.empty() && !fs::equivalent(filename, k.second.filename))
k.second.second = filename; fs::remove(filename);
} }
} }
void SecretStore::load(std::string const& _keysPath) void SecretStore::load(string const& _keysPath)
{ {
fs::path p(_keysPath); fs::path p(_keysPath);
boost::filesystem::create_directories(p); fs::create_directories(p);
for (fs::directory_iterator it(p); it != fs::directory_iterator(); ++it) for (fs::directory_iterator it(p); it != fs::directory_iterator(); ++it)
if (is_regular_file(it->path())) if (fs::is_regular_file(it->path()))
readKey(it->path().string(), true); readKey(it->path().string(), true);
} }
h128 SecretStore::readKey(std::string const& _file, bool _deleteFile) h128 SecretStore::readKey(string const& _file, bool _takeFileOwnership)
{ {
cnote << "Reading" << _file; cnote << "Reading" << _file;
return readKeyContent(contentsString(_file), _deleteFile ? _file : string()); return readKeyContent(contentsString(_file), _takeFileOwnership ? _file : string());
} }
h128 SecretStore::readKeyContent(std::string const& _content, std::string const& _file) h128 SecretStore::readKeyContent(string const& _content, string const& _file)
{ {
js::mValue u = upgraded(_content); js::mValue u = upgraded(_content);
if (u.type() == js::obj_type) if (u.type() == js::obj_type)
{ {
js::mObject& o = u.get_obj(); js::mObject& o = u.get_obj();
auto uuid = fromUUID(o["id"].get_str()); auto uuid = fromUUID(o["id"].get_str());
m_keys[uuid] = make_pair(js::write_string(o["crypto"], false), _file); m_keys[uuid] = EncryptedKey{js::write_string(o["crypto"], false), _file};
return uuid; return uuid;
} }
else else
@ -183,62 +184,63 @@ h128 SecretStore::readKeyContent(std::string const& _content, std::string const&
return h128(); return h128();
} }
bool SecretStore::recode(h128 const& _uuid, string const& _newPass, std::function<std::string()> const& _pass, KDF _kdf) bool SecretStore::recode(h128 const& _uuid, string const& _newPass, function<string()> const& _pass, KDF _kdf)
{ {
// cdebug << "recode:" << toUUID(_uuid);
bytes s = secret(_uuid, _pass, true); bytes s = secret(_uuid, _pass, true);
if (s.empty()) if (s.empty())
return false; return false;
m_keys[_uuid].first = encrypt(s, _newPass, _kdf); m_cached.erase(_uuid);
m_keys[_uuid].encryptedKey = encrypt(s, _newPass, _kdf);
save(); save();
return true; return true;
} }
std::string SecretStore::encrypt(bytes const& _v, std::string const& _pass, KDF _kdf) static bytes deriveNewKey(string const& _pass, KDF _kdf, js::mObject& o_ret)
{ {
js::mObject ret;
// KDF info
unsigned dklen = 32; unsigned dklen = 32;
unsigned iterations = 1 << 18;
bytes salt = h256::random().asBytes(); bytes salt = h256::random().asBytes();
bytes derivedKey;
if (_kdf == KDF::Scrypt) if (_kdf == KDF::Scrypt)
{ {
unsigned iterations = 262144;
unsigned p = 1; unsigned p = 1;
unsigned r = 8; unsigned r = 8;
ret["kdf"] = "scrypt"; o_ret["kdf"] = "scrypt";
{ {
js::mObject params; js::mObject params;
params["n"] = (int64_t)iterations; params["n"] = int64_t(iterations);
params["r"] = (int)r; params["r"] = int(r);
params["p"] = (int)p; params["p"] = int(p);
params["dklen"] = (int)dklen; params["dklen"] = int(dklen);
params["salt"] = toHex(salt); params["salt"] = toHex(salt);
ret["kdfparams"] = params; o_ret["kdfparams"] = params;
} }
derivedKey = scrypt(_pass, salt, iterations, r, p, dklen); return scrypt(_pass, salt, iterations, r, p, dklen);
} }
else else
{ {
unsigned iterations = 262144; o_ret["kdf"] = "pbkdf2";
ret["kdf"] = "pbkdf2";
{ {
js::mObject params; js::mObject params;
params["prf"] = "hmac-sha256"; params["prf"] = "hmac-sha256";
params["c"] = (int)iterations; params["c"] = int(iterations);
params["salt"] = toHex(salt); params["salt"] = toHex(salt);
params["dklen"] = (int)dklen; params["dklen"] = int(dklen);
ret["kdfparams"] = params; o_ret["kdfparams"] = params;
} }
derivedKey = pbkdf2(_pass, salt, iterations, dklen); return pbkdf2(_pass, salt, iterations, dklen);
} }
// cdebug << "derivedKey" << toHex(derivedKey); }
string SecretStore::encrypt(bytes const& _v, string const& _pass, KDF _kdf)
{
js::mObject ret;
bytes derivedKey = deriveNewKey(_pass, _kdf, ret);
if (derivedKey.empty())
BOOST_THROW_EXCEPTION(crypto::CryptoException() << errinfo_comment("Key derivation failed."));
// cipher info
ret["cipher"] = "aes-128-ctr"; ret["cipher"] = "aes-128-ctr";
h128 key(derivedKey, h128::AlignLeft); h128 key(derivedKey, h128::AlignLeft);
// cdebug << "cipherKey" << key.hex();
h128 iv = h128::random(); h128 iv = h128::random();
{ {
js::mObject params; js::mObject params;
@ -248,18 +250,18 @@ std::string SecretStore::encrypt(bytes const& _v, std::string const& _pass, KDF
// cipher text // cipher text
bytes cipherText = encryptSymNoAuth(key, iv, &_v); bytes cipherText = encryptSymNoAuth(key, iv, &_v);
if (cipherText.empty())
BOOST_THROW_EXCEPTION(crypto::CryptoException() << errinfo_comment("Key encryption failed."));
ret["ciphertext"] = toHex(cipherText); ret["ciphertext"] = toHex(cipherText);
// and mac. // and mac.
h256 mac = sha3(ref(derivedKey).cropped(16, 16).toBytes() + cipherText); h256 mac = sha3(ref(derivedKey).cropped(16, 16).toBytes() + cipherText);
// cdebug << "macBody" << toHex(ref(derivedKey).cropped(16, 16).toBytes() + cipherText);
// cdebug << "mac" << mac.hex();
ret["mac"] = toHex(mac.ref()); ret["mac"] = toHex(mac.ref());
return js::write_string((js::mValue)ret, true); return js::write_string(js::mValue(ret), true);
} }
bytes SecretStore::decrypt(std::string const& _v, std::string const& _pass) bytes SecretStore::decrypt(string const& _v, string const& _pass)
{ {
js::mObject o; js::mObject o;
{ {

50
libdevcrypto/SecretStore.h

@ -35,41 +35,81 @@ enum class KDF {
Scrypt, Scrypt,
}; };
/**
* Manages encrypted keys stored in a certain directory on disk. The keys are read into memory
* and changes to the keys are automatically synced to the directory.
* Each file stores exactly one key in a specific JSON format whose file name is derived from the
* UUID of the key.
* @note that most of the functions here affect the filesystem and throw exceptions on failure,
* and they also throw exceptions upon rare malfunction in the cryptographic functions.
*/
class SecretStore class SecretStore
{ {
public: public:
/// Construct a new SecretStore and read all keys in the given directory.
SecretStore(std::string const& _path = defaultPath()); SecretStore(std::string const& _path = defaultPath());
~SecretStore();
/// @returns the secret key stored by the given @a _uuid.
/// @param _pass function that returns the password for the key.
/// @param _useCache if true, allow previously decrypted keys to be returned directly.
bytes secret(h128 const& _uuid, std::function<std::string()> const& _pass, bool _useCache = true) const; bytes secret(h128 const& _uuid, std::function<std::string()> const& _pass, bool _useCache = true) const;
/// Imports the (encrypted) key stored in the file @a _file and copies it to the managed directory.
h128 importKey(std::string const& _file) { auto ret = readKey(_file, false); if (ret) save(); return ret; } h128 importKey(std::string const& _file) { auto ret = readKey(_file, false); if (ret) save(); return ret; }
/// Imports the (encrypted) key contained in the json formatted @a _content and stores it in
/// the managed directory.
h128 importKeyContent(std::string const& _content) { auto ret = readKeyContent(_content, std::string()); if (ret) save(); return ret; } h128 importKeyContent(std::string const& _content) { auto ret = readKeyContent(_content, std::string()); if (ret) save(); return ret; }
/// Imports the decrypted key given by @a _s and stores it, encrypted with
/// (a key derived from) the password @a _pass.
h128 importSecret(bytes const& _s, std::string const& _pass); h128 importSecret(bytes const& _s, std::string const& _pass);
/// Decrypts and re-encrypts the key identified by @a _uuid.
bool recode(h128 const& _uuid, std::string const& _newPass, std::function<std::string()> const& _pass, KDF _kdf = KDF::Scrypt); bool recode(h128 const& _uuid, std::string const& _newPass, std::function<std::string()> const& _pass, KDF _kdf = KDF::Scrypt);
/// Removes the key specified by @a _uuid from both memory and disk.
void kill(h128 const& _uuid); void kill(h128 const& _uuid);
/// Returns the uuids of all stored keys.
std::vector<h128> keys() const { return keysOf(m_keys); } std::vector<h128> keys() const { return keysOf(m_keys); }
// Clear any cached keys. /// Clears all cached decrypted keys. The passwords have to be supplied in order to retrieve
/// secrets again after calling this function.
void clearCache() const; void clearCache() const;
// Doesn't save(). /// Import the key from the file @a _file, but do not copy it to the managed directory yet.
h128 readKey(std::string const& _file, bool _deleteFile); /// @param _takeFileOwnership if true, deletes the file if it is not the canonical file for the
/// key (derived from its uuid).
h128 readKey(std::string const& _file, bool _takeFileOwnership);
/// Import the key contained in the json-encoded @a _content, but do not store it in the
/// managed directory.
/// @param _file if given, assume this file contains @a _content and delete it later, if it is
/// not the canonical file for the key (derived from the uuid).
h128 readKeyContent(std::string const& _content, std::string const& _file = std::string()); h128 readKeyContent(std::string const& _content, std::string const& _file = std::string());
/// Store all keys in the directory @a _keysPath.
void save(std::string const& _keysPath); void save(std::string const& _keysPath);
/// Store all keys in the managed directory.
void save() { save(m_path); } void save() { save(m_path); }
/// @returns the default path for the managed directory.
static std::string defaultPath() { return getDataDir("web3") + "/keys"; } static std::string defaultPath() { return getDataDir("web3") + "/keys"; }
private: private:
struct EncryptedKey
{
std::string encryptedKey;
std::string filename;
};
/// Loads all keys in the given directory.
void load(std::string const& _keysPath); void load(std::string const& _keysPath);
void load() { load(m_path); } void load() { load(m_path); }
/// Encrypts @a _v with a key derived from @a _pass or the empty string on error.
static std::string encrypt(bytes const& _v, std::string const& _pass, KDF _kdf = KDF::Scrypt); static std::string encrypt(bytes const& _v, std::string const& _pass, KDF _kdf = KDF::Scrypt);
/// Decrypts @a _v with a key derived from @a _pass or the empty byte array on error.
static bytes decrypt(std::string const& _v, std::string const& _pass); static bytes decrypt(std::string const& _v, std::string const& _pass);
/// Stores decrypted keys by uuid.
mutable std::unordered_map<h128, bytes> m_cached; mutable std::unordered_map<h128, bytes> m_cached;
std::unordered_map<h128, std::pair<std::string, std::string>> m_keys; /// Stores encrypted keys together with the file they were loaded from by uuid.
std::unordered_map<h128, EncryptedKey> m_keys;
std::string m_path; std::string m_path;
}; };

71
libethash-cl/ethash_cl_miner.cpp

@ -140,12 +140,10 @@ unsigned ethash_cl_miner::getNumDevices(unsigned _platformId)
bool ethash_cl_miner::configureGPU( bool ethash_cl_miner::configureGPU(
bool _allowCPU, bool _allowCPU,
unsigned _extraGPUMemory, unsigned _extraGPUMemory,
bool _forceSingleChunk,
boost::optional<uint64_t> _currentBlock boost::optional<uint64_t> _currentBlock
) )
{ {
s_allowCPU = _allowCPU; s_allowCPU = _allowCPU;
s_forceSingleChunk = _forceSingleChunk;
s_extraRequiredGPUMem = _extraGPUMemory; s_extraRequiredGPUMem = _extraGPUMemory;
// by default let's only consider the DAG of the first epoch // by default let's only consider the DAG of the first epoch
uint64_t dagSize = _currentBlock ? ethash_get_datasize(*_currentBlock) : 1073739904U; uint64_t dagSize = _currentBlock ? ethash_get_datasize(*_currentBlock) : 1073739904U;
@ -174,7 +172,6 @@ bool ethash_cl_miner::configureGPU(
} }
bool ethash_cl_miner::s_allowCPU = false; bool ethash_cl_miner::s_allowCPU = false;
bool ethash_cl_miner::s_forceSingleChunk = false;
unsigned ethash_cl_miner::s_extraRequiredGPUMem; unsigned ethash_cl_miner::s_extraRequiredGPUMem;
bool ethash_cl_miner::searchForAllDevices(function<bool(cl::Device const&)> _callback) bool ethash_cl_miner::searchForAllDevices(function<bool(cl::Device const&)> _callback)
@ -288,23 +285,6 @@ bool ethash_cl_miner::init(
string device_version = device.getInfo<CL_DEVICE_VERSION>(); string device_version = device.getInfo<CL_DEVICE_VERSION>();
ETHCL_LOG("Using device: " << device.getInfo<CL_DEVICE_NAME>().c_str() << "(" << device_version.c_str() << ")"); ETHCL_LOG("Using device: " << device.getInfo<CL_DEVICE_NAME>().c_str() << "(" << device_version.c_str() << ")");
// configure chunk number depending on max allocateable memory
cl_ulong result;
device.getInfo(CL_DEVICE_MAX_MEM_ALLOC_SIZE, &result);
if (s_forceSingleChunk || result >= _dagSize)
{
m_dagChunksNum = 1;
ETHCL_LOG(
((result <= _dagSize && s_forceSingleChunk) ? "Forcing single chunk. Good luck!\n" : "") <<
"Using 1 big chunk. Max OpenCL allocateable memory is " << result
);
}
else
{
m_dagChunksNum = 4;
ETHCL_LOG("Using 4 chunks. Max OpenCL allocateable memory is " << result);
}
if (strncmp("OpenCL 1.0", device_version.c_str(), 10) == 0) if (strncmp("OpenCL 1.0", device_version.c_str(), 10) == 0)
{ {
ETHCL_LOG("OpenCL 1.0 is not supported."); ETHCL_LOG("OpenCL 1.0 is not supported.");
@ -341,31 +321,32 @@ bool ethash_cl_miner::init(
ETHCL_LOG("Printing program log"); ETHCL_LOG("Printing program log");
ETHCL_LOG(program.getBuildInfo<CL_PROGRAM_BUILD_LOG>(device).c_str()); ETHCL_LOG(program.getBuildInfo<CL_PROGRAM_BUILD_LOG>(device).c_str());
} }
catch (cl::Error err) catch (cl::Error const& err)
{ {
ETHCL_LOG(program.getBuildInfo<CL_PROGRAM_BUILD_LOG>(device).c_str()); ETHCL_LOG(program.getBuildInfo<CL_PROGRAM_BUILD_LOG>(device).c_str());
return false; return false;
} }
if (m_dagChunksNum == 1)
{
ETHCL_LOG("Loading single big chunk kernels");
m_hash_kernel = cl::Kernel(program, "ethash_hash");
m_search_kernel = cl::Kernel(program, "ethash_search");
}
else
{
ETHCL_LOG("Loading chunk kernels");
m_hash_kernel = cl::Kernel(program, "ethash_hash_chunks");
m_search_kernel = cl::Kernel(program, "ethash_search_chunks");
}
// create buffer for dag // create buffer for dag
if (m_dagChunksNum == 1) try
{ {
ETHCL_LOG("Creating one big buffer"); m_dagChunksNum = 1;
m_dagChunks.push_back(cl::Buffer(m_context, CL_MEM_READ_ONLY, _dagSize)); m_dagChunks.push_back(cl::Buffer(m_context, CL_MEM_READ_ONLY, _dagSize));
ETHCL_LOG("Created one big buffer for the DAG");
} }
else catch (cl::Error const& err)
{
int errCode = err.err();
if (errCode != CL_INVALID_BUFFER_SIZE || errCode != CL_MEM_OBJECT_ALLOCATION_FAILURE)
ETHCL_LOG("Allocating single buffer failed with: " << err.what() << "(" << errCode << ")");
cl_ulong result;
device.getInfo(CL_DEVICE_MAX_MEM_ALLOC_SIZE, &result);
ETHCL_LOG(
"Failed to allocate 1 big chunk. Max allocateable memory is "
<< result << ". Trying to allocate 4 chunks."
);
// The OpenCL kernel has a hard coded number of 4 chunks at the moment
m_dagChunksNum = 4;
for (unsigned i = 0; i < m_dagChunksNum; i++) for (unsigned i = 0; i < m_dagChunksNum; i++)
{ {
// TODO Note: If we ever change to _dagChunksNum other than 4, then the size would need recalculation // TODO Note: If we ever change to _dagChunksNum other than 4, then the size would need recalculation
@ -376,6 +357,20 @@ bool ethash_cl_miner::init(
(i == 3) ? (_dagSize - 3 * ((_dagSize >> 9) << 7)) : (_dagSize >> 9) << 7 (i == 3) ? (_dagSize - 3 * ((_dagSize >> 9) << 7)) : (_dagSize >> 9) << 7
)); ));
} }
}
if (m_dagChunksNum == 1)
{
ETHCL_LOG("Loading single big chunk kernels");
m_hash_kernel = cl::Kernel(program, "ethash_hash");
m_search_kernel = cl::Kernel(program, "ethash_search");
}
else
{
ETHCL_LOG("Loading chunk kernels");
m_hash_kernel = cl::Kernel(program, "ethash_hash_chunks");
m_search_kernel = cl::Kernel(program, "ethash_search_chunks");
}
// create buffer for header // create buffer for header
ETHCL_LOG("Creating buffer for header."); ETHCL_LOG("Creating buffer for header.");
@ -410,7 +405,7 @@ bool ethash_cl_miner::init(
m_search_buf[i] = cl::Buffer(m_context, CL_MEM_WRITE_ONLY, (c_max_search_results + 1) * sizeof(uint32_t)); m_search_buf[i] = cl::Buffer(m_context, CL_MEM_WRITE_ONLY, (c_max_search_results + 1) * sizeof(uint32_t));
} }
} }
catch (cl::Error err) catch (cl::Error const& err)
{ {
ETHCL_LOG(err.what() << "(" << err.err() << ")"); ETHCL_LOG(err.what() << "(" << err.err() << ")");
return false; return false;
@ -504,7 +499,7 @@ void ethash_cl_miner::search(uint8_t const* header, uint64_t target, search_hook
pre_return_event.wait(); pre_return_event.wait();
#endif #endif
} }
catch (cl::Error err) catch (cl::Error const& err)
{ {
ETHCL_LOG(err.what() << "(" << err.err() << ")"); ETHCL_LOG(err.what() << "(" << err.err() << ")");
} }

3
libethash-cl/ethash_cl_miner.h

@ -44,7 +44,6 @@ public:
static bool configureGPU( static bool configureGPU(
bool _allowCPU, bool _allowCPU,
unsigned _extraGPUMemory, unsigned _extraGPUMemory,
bool _forceSingleChunk,
boost::optional<uint64_t> _currentBlock boost::optional<uint64_t> _currentBlock
); );
@ -79,8 +78,6 @@ private:
unsigned m_workgroup_size; unsigned m_workgroup_size;
bool m_opencl_1_1; bool m_opencl_1_1;
/// Force dag upload to GPU in a single chunk even if OpenCL thinks you can't do it. Use at your own risk.
static bool s_forceSingleChunk;
/// Allow CPU to appear as an OpenCL device or not. Default is false /// Allow CPU to appear as an OpenCL device or not. Default is false
static bool s_allowCPU; static bool s_allowCPU;
/// GPU memory required for other things, like window rendering e.t.c. /// GPU memory required for other things, like window rendering e.t.c.

6
libethcore/Common.h

@ -47,7 +47,8 @@ extern const unsigned c_databaseVersion;
enum class Network enum class Network
{ {
Olympic = 0, Olympic = 0,
Frontier = 1 Frontier = 1,
Turbo = 2
}; };
extern const Network c_network; extern const Network c_network;
@ -100,7 +101,8 @@ enum class ImportResult
{ {
Success = 0, Success = 0,
UnknownParent, UnknownParent,
FutureTime, FutureTimeKnown,
FutureTimeUnknown,
AlreadyInChain, AlreadyInChain,
AlreadyKnown, AlreadyKnown,
Malformed, Malformed,

3
libethcore/Ethash.cpp

@ -389,13 +389,12 @@ bool Ethash::GPUMiner::configureGPU(
unsigned _deviceId, unsigned _deviceId,
bool _allowCPU, bool _allowCPU,
unsigned _extraGPUMemory, unsigned _extraGPUMemory,
bool _forceSingleChunk,
boost::optional<uint64_t> _currentBlock boost::optional<uint64_t> _currentBlock
) )
{ {
s_platformId = _platformId; s_platformId = _platformId;
s_deviceId = _deviceId; s_deviceId = _deviceId;
return ethash_cl_miner::configureGPU(_allowCPU, _extraGPUMemory, _forceSingleChunk, _currentBlock); return ethash_cl_miner::configureGPU(_allowCPU, _extraGPUMemory, _currentBlock);
} }
#endif #endif

3
libethcore/Ethash.h

@ -88,7 +88,7 @@ public:
static unsigned instances() { return s_numInstances > 0 ? s_numInstances : std::thread::hardware_concurrency(); } static unsigned instances() { return s_numInstances > 0 ? s_numInstances : std::thread::hardware_concurrency(); }
static std::string platformInfo(); static std::string platformInfo();
static void listDevices() {} static void listDevices() {}
static bool configureGPU(unsigned, unsigned, bool, unsigned, bool, boost::optional<uint64_t>) { return false; } static bool configureGPU(unsigned, unsigned, bool, unsigned, boost::optional<uint64_t>) { return false; }
static void setNumInstances(unsigned _instances) { s_numInstances = std::min<unsigned>(_instances, std::thread::hardware_concurrency()); } static void setNumInstances(unsigned _instances) { s_numInstances = std::min<unsigned>(_instances, std::thread::hardware_concurrency()); }
protected: protected:
void kickOff() override void kickOff() override
@ -122,7 +122,6 @@ public:
unsigned _deviceId, unsigned _deviceId,
bool _allowCPU, bool _allowCPU,
unsigned _extraGPUMemory, unsigned _extraGPUMemory,
bool _forceSingleChunk,
boost::optional<uint64_t> _currentBlock boost::optional<uint64_t> _currentBlock
); );
static void setNumInstances(unsigned _instances) { s_numInstances = std::min<unsigned>(_instances, getNumDevices()); } static void setNumInstances(unsigned _instances) { s_numInstances = std::min<unsigned>(_instances, getNumDevices()); }

3
libethcore/Farm.h

@ -68,6 +68,7 @@ public:
void setWork(WorkPackage const& _wp) void setWork(WorkPackage const& _wp)
{ {
WriteGuard l(x_minerWork); WriteGuard l(x_minerWork);
cdebug << "Farm::setWork()";
if (_wp.headerHash == m_work.headerHash) if (_wp.headerHash == m_work.headerHash)
return; return;
m_work = _wp; m_work = _wp;
@ -94,6 +95,7 @@ public:
void stop() void stop()
{ {
WriteGuard l(x_minerWork); WriteGuard l(x_minerWork);
cdebug << "Farm::stop()";
m_miners.clear(); m_miners.clear();
m_work.reset(); m_work.reset();
m_isMining = false; m_isMining = false;
@ -175,6 +177,7 @@ private:
bool start() bool start()
{ {
WriteGuard l(x_minerWork); WriteGuard l(x_minerWork);
cdebug << "start()";
if (!m_miners.empty() && !!std::dynamic_pointer_cast<MinerType>(m_miners[0])) if (!m_miners.empty() && !!std::dynamic_pointer_cast<MinerType>(m_miners[0]))
return true; return true;
m_miners.clear(); m_miners.clear();

6
libethcore/Params.cpp

@ -31,12 +31,12 @@ namespace eth
//--- BEGIN: AUTOGENERATED FROM github.com/ethereum/common/params.json //--- BEGIN: AUTOGENERATED FROM github.com/ethereum/common/params.json
u256 const c_genesisDifficulty = 131072; u256 const c_genesisDifficulty = 131072;
u256 const c_maximumExtraDataSize = 1024; u256 const c_maximumExtraDataSize = 1024;
u256 const c_genesisGasLimit = 3141592; u256 const c_genesisGasLimit = c_network == Network::Turbo ? 100000000 : 3141592;
u256 const c_minGasLimit = 125000; u256 const c_minGasLimit = c_network == Network::Turbo ? 100000000 : 125000;
u256 const c_gasLimitBoundDivisor = 1024; u256 const c_gasLimitBoundDivisor = 1024;
u256 const c_minimumDifficulty = 131072; u256 const c_minimumDifficulty = 131072;
u256 const c_difficultyBoundDivisor = 2048; u256 const c_difficultyBoundDivisor = 2048;
u256 const c_durationLimit = c_network == Network::Olympic ? 8 : 12; u256 const c_durationLimit = c_network == Network::Turbo ? 2 : c_network == Network::Olympic ? 8 : 12;
//--- END: AUTOGENERATED FROM /feeStructure.json //--- END: AUTOGENERATED FROM /feeStructure.json
} }

12
libethereum/BlockChain.cpp

@ -24,8 +24,6 @@
#if ETH_PROFILING_GPERF #if ETH_PROFILING_GPERF
#include <gperftools/profiler.h> #include <gperftools/profiler.h>
#endif #endif
#include <leveldb/db.h>
#include <leveldb/write_batch.h>
#include <boost/timer.hpp> #include <boost/timer.hpp>
#include <boost/filesystem.hpp> #include <boost/filesystem.hpp>
#include <test/JsonSpiritHeaders.h> #include <test/JsonSpiritHeaders.h>
@ -305,7 +303,7 @@ LastHashes BlockChain::lastHashes(unsigned _n) const
return m_lastLastHashes; return m_lastLastHashes;
} }
tuple<ImportRoute, bool> BlockChain::sync(BlockQueue& _bq, OverlayDB const& _stateDB, unsigned _max) tuple<ImportRoute, bool, unsigned> BlockChain::sync(BlockQueue& _bq, OverlayDB const& _stateDB, unsigned _max)
{ {
// _bq.tick(*this); // _bq.tick(*this);
@ -315,6 +313,7 @@ tuple<ImportRoute, bool> BlockChain::sync(BlockQueue& _bq, OverlayDB const& _sta
h256s fresh; h256s fresh;
h256s dead; h256s dead;
h256s badBlocks; h256s badBlocks;
unsigned count = 0;
for (VerifiedBlock const& block: blocks) for (VerifiedBlock const& block: blocks)
if (!badBlocks.empty()) if (!badBlocks.empty())
badBlocks.push_back(block.verified.info.hash()); badBlocks.push_back(block.verified.info.hash());
@ -328,6 +327,7 @@ tuple<ImportRoute, bool> BlockChain::sync(BlockQueue& _bq, OverlayDB const& _sta
r = import(block.verified, _stateDB, ImportRequirements::Default & ~ImportRequirements::ValidNonce & ~ImportRequirements::CheckUncles); r = import(block.verified, _stateDB, ImportRequirements::Default & ~ImportRequirements::ValidNonce & ~ImportRequirements::CheckUncles);
fresh += r.liveBlocks; fresh += r.liveBlocks;
dead += r.deadBlocks; dead += r.deadBlocks;
++count;
} }
catch (dev::eth::UnknownParent) catch (dev::eth::UnknownParent)
{ {
@ -353,7 +353,7 @@ tuple<ImportRoute, bool> BlockChain::sync(BlockQueue& _bq, OverlayDB const& _sta
badBlocks.push_back(block.verified.info.hash()); badBlocks.push_back(block.verified.info.hash());
} }
} }
return make_tuple(ImportRoute{dead, fresh}, _bq.doneDrain(badBlocks)); return make_tuple(ImportRoute{dead, fresh}, _bq.doneDrain(badBlocks), count);
} }
pair<ImportResult, ImportRoute> BlockChain::attemptImport(bytes const& _block, OverlayDB const& _stateDB, ImportRequirements::value _ir) noexcept pair<ImportResult, ImportRoute> BlockChain::attemptImport(bytes const& _block, OverlayDB const& _stateDB, ImportRequirements::value _ir) noexcept
@ -372,7 +372,7 @@ pair<ImportResult, ImportRoute> BlockChain::attemptImport(bytes const& _block, O
} }
catch (FutureTime&) catch (FutureTime&)
{ {
return make_pair(ImportResult::FutureTime, ImportRoute()); return make_pair(ImportResult::FutureTimeKnown, ImportRoute());
} }
catch (Exception& ex) catch (Exception& ex)
{ {
@ -1122,6 +1122,6 @@ VerifiedBlockRef BlockChain::verifyBlock(bytes const& _block, function<void(Exce
++i; ++i;
} }
res.block = bytesConstRef(&_block); res.block = bytesConstRef(&_block);
return move(res); return res;
} }

9
libethereum/BlockChain.h

@ -21,15 +21,11 @@
#pragma once #pragma once
#pragma warning(push)
#pragma warning(disable: 4100 4267)
#include <leveldb/db.h>
#pragma warning(pop)
#include <deque> #include <deque>
#include <chrono> #include <chrono>
#include <unordered_map> #include <unordered_map>
#include <unordered_set> #include <unordered_set>
#include <libdevcore/db.h>
#include <libdevcore/Log.h> #include <libdevcore/Log.h>
#include <libdevcore/Exceptions.h> #include <libdevcore/Exceptions.h>
#include <libdevcore/Guards.h> #include <libdevcore/Guards.h>
@ -41,7 +37,6 @@
#include "Transaction.h" #include "Transaction.h"
#include "BlockQueue.h" #include "BlockQueue.h"
#include "VerifiedBlock.h" #include "VerifiedBlock.h"
namespace ldb = leveldb;
namespace std namespace std
{ {
@ -117,7 +112,7 @@ public:
/// Sync the chain with any incoming blocks. All blocks should, if processed in order. /// Sync the chain with any incoming blocks. All blocks should, if processed in order.
/// @returns fresh blocks, dead blocks and true iff there are additional blocks to be processed waiting. /// @returns fresh blocks, dead blocks and true iff there are additional blocks to be processed waiting.
std::tuple<ImportRoute, bool> sync(BlockQueue& _bq, OverlayDB const& _stateDB, unsigned _max); std::tuple<ImportRoute, bool, unsigned> sync(BlockQueue& _bq, OverlayDB const& _stateDB, unsigned _max);
/// Attempt to import the given block directly into the CanonBlockChain and sync with the state DB. /// Attempt to import the given block directly into the CanonBlockChain and sync with the state DB.
/// @returns the block hashes of any blocks that came into/went out of the canonical block chain. /// @returns the block hashes of any blocks that came into/went out of the canonical block chain.

800
libethereum/BlockChainSync.cpp

@ -0,0 +1,800 @@
/*
This file is part of cpp-ethereum.
cpp-ethereum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
cpp-ethereum is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
*/
/** @file BlockChainSync.cpp
* @author Gav Wood <i@gavwood.com>
* @date 2014
*/
#include "BlockChainSync.h"
#include <chrono>
#include <libdevcore/Common.h>
#include <libp2p/Host.h>
#include <libp2p/Session.h>
#include <libethcore/Exceptions.h>
#include <libethcore/Params.h>
#include "BlockChain.h"
#include "BlockQueue.h"
#include "EthereumPeer.h"
#include "EthereumHost.h"
#include "DownloadMan.h"
using namespace std;
using namespace dev;
using namespace dev::eth;
using namespace p2p;
unsigned const c_chainReorgSize = 30000;
BlockChainSync::BlockChainSync(EthereumHost& _host):
m_host(_host)
{
m_bqRoomAvailable = host().bq().onRoomAvailable([this]()
{
RecursiveGuard l(x_sync);
continueSync();
});
}
BlockChainSync::~BlockChainSync()
{
RecursiveGuard l(x_sync);
abortSync();
}
DownloadMan const& BlockChainSync::downloadMan() const
{
return host().downloadMan();
}
DownloadMan& BlockChainSync::downloadMan()
{
return host().downloadMan();
}
void BlockChainSync::abortSync()
{
DEV_INVARIANT_CHECK;
host().foreachPeer([this](EthereumPeer* _p) { onPeerAborting(_p); return true; });
downloadMan().resetToChain(h256s());
DEV_INVARIANT_CHECK;
}
void BlockChainSync::onPeerStatus(EthereumPeer* _peer)
{
RecursiveGuard l(x_sync);
DEV_INVARIANT_CHECK;
if (_peer->m_genesisHash != host().chain().genesisHash())
_peer->disable("Invalid genesis hash");
else if (_peer->m_protocolVersion != host().protocolVersion() && _peer->m_protocolVersion != EthereumHost::c_oldProtocolVersion)
_peer->disable("Invalid protocol version.");
else if (_peer->m_networkId != host().networkId())
_peer->disable("Invalid network identifier.");
else if (_peer->session()->info().clientVersion.find("/v0.7.0/") != string::npos)
_peer->disable("Blacklisted client version.");
else if (host().isBanned(_peer->session()->id()))
_peer->disable("Peer banned for previous bad behaviour.");
else
{
unsigned hashes = estimatedHashes();
_peer->m_expectedHashes = hashes;
onNewPeer(_peer);
}
DEV_INVARIANT_CHECK;
}
unsigned BlockChainSync::estimatedHashes() const
{
BlockInfo block = host().chain().info();
time_t lastBlockTime = (block.hash() == host().chain().genesisHash()) ? 1428192000 : (time_t)block.timestamp;
time_t now = time(0);
unsigned blockCount = c_chainReorgSize;
if (lastBlockTime > now)
clog(NetWarn) << "Clock skew? Latest block is in the future";
else
blockCount += (now - lastBlockTime) / (unsigned)c_durationLimit;
clog(NetAllDetail) << "Estimated hashes: " << blockCount;
return blockCount;
}
void BlockChainSync::requestBlocks(EthereumPeer* _peer)
{
if (host().bq().knownFull())
{
clog(NetAllDetail) << "Waiting for block queue before downloading blocks";
pauseSync();
_peer->setIdle();
return;
}
_peer->requestBlocks();
if (_peer->m_asking != Asking::Blocks) //nothing to download
{
peerDoneBlocks(_peer);
if (downloadMan().isComplete())
completeSync();
return;
}
}
void BlockChainSync::onPeerBlocks(EthereumPeer* _peer, RLP const& _r)
{
RecursiveGuard l(x_sync);
DEV_INVARIANT_CHECK;
unsigned itemCount = _r.itemCount();
clog(NetMessageSummary) << "Blocks (" << dec << itemCount << "entries)" << (itemCount ? "" : ": NoMoreBlocks");
_peer->setIdle();
if (m_state != SyncState::Blocks && m_state != SyncState::NewBlocks)
clog(NetWarn) << "Unexpected Blocks received!";
if (m_state == SyncState::Waiting)
{
clog(NetAllDetail) << "Ignored blocks while waiting";
return;
}
if (itemCount == 0)
{
// Got to this peer's latest block - just give up.
peerDoneBlocks(_peer);
if (downloadMan().isComplete())
completeSync();
return;
}
unsigned success = 0;
unsigned future = 0;
unsigned unknown = 0;
unsigned got = 0;
unsigned repeated = 0;
u256 maxUnknownNumber = 0;
h256 maxUnknown;
for (unsigned i = 0; i < itemCount; ++i)
{
auto h = BlockInfo::headerHash(_r[i].data());
if (_peer->m_sub.noteBlock(h))
{
_peer->addRating(10);
switch (host().bq().import(_r[i].data(), host().chain()))
{
case ImportResult::Success:
success++;
break;
case ImportResult::Malformed:
case ImportResult::BadChain:
_peer->disable("Malformed block received.");
return;
case ImportResult::FutureTimeKnown:
future++;
break;
case ImportResult::AlreadyInChain:
case ImportResult::AlreadyKnown:
got++;
break;
case ImportResult::FutureTimeUnknown:
future++; //Fall through
case ImportResult::UnknownParent:
{
unknown++;
if (m_state == SyncState::NewBlocks)
{
BlockInfo bi;
bi.populateFromHeader(_r[i][0]);
if (bi.number > maxUnknownNumber)
{
maxUnknownNumber = bi.number;
maxUnknown = h;
}
}
break;
}
default:;
}
}
else
{
_peer->addRating(0); // -1?
repeated++;
}
}
clog(NetMessageSummary) << dec << success << "imported OK," << unknown << "with unknown parents," << future << "with future timestamps," << got << " already known," << repeated << " repeats received.";
if (host().bq().unknownFull())
{
clog(NetWarn) << "Too many unknown blocks, restarting sync";
restartSync();
return;
}
if (m_state == SyncState::NewBlocks && unknown > 0)
{
completeSync();
resetSyncFor(_peer, maxUnknown, std::numeric_limits<u256>::max()); //TODO: proper total difficuty
}
if (m_state == SyncState::Blocks || m_state == SyncState::NewBlocks)
{
if (downloadMan().isComplete())
completeSync();
else
requestBlocks(_peer); // Some of the blocks might have been downloaded by helping peers, proceed anyway
}
DEV_INVARIANT_CHECK;
}
void BlockChainSync::onPeerNewBlock(EthereumPeer* _peer, RLP const& _r)
{
DEV_INVARIANT_CHECK;
RecursiveGuard l(x_sync);
auto h = BlockInfo::headerHash(_r[0].data());
clog(NetMessageSummary) << "NewBlock: " << h;
if (_r.itemCount() != 2)
_peer->disable("NewBlock without 2 data fields.");
else
{
switch (host().bq().import(_r[0].data(), host().chain()))
{
case ImportResult::Success:
_peer->addRating(100);
break;
case ImportResult::FutureTimeKnown:
//TODO: Rating dependent on how far in future it is.
break;
case ImportResult::Malformed:
case ImportResult::BadChain:
_peer->disable("Malformed block received.");
return;
case ImportResult::AlreadyInChain:
case ImportResult::AlreadyKnown:
break;
case ImportResult::FutureTimeUnknown:
case ImportResult::UnknownParent:
clog(NetMessageSummary) << "Received block with no known parent. Resyncing...";
resetSyncFor(_peer, h, _r[1].toInt<u256>());
break;
default:;
}
DEV_GUARDED(_peer->x_knownBlocks)
_peer->m_knownBlocks.insert(h);
}
DEV_INVARIANT_CHECK;
}
PV60Sync::PV60Sync(EthereumHost& _host):
BlockChainSync(_host)
{
resetSync();
}
SyncStatus PV60Sync::status() const
{
RecursiveGuard l(x_sync);
SyncStatus res;
res.state = m_state;
if (m_state == SyncState::Hashes)
{
res.hashesTotal = m_estimatedHashes;
res.hashesReceived = static_cast<unsigned>(m_syncingNeededBlocks.size());
res.hashesEstimated = true;
}
else if (m_state == SyncState::Blocks || m_state == SyncState::NewBlocks || m_state == SyncState::Waiting)
{
res.blocksTotal = downloadMan().chainSize();
res.blocksReceived = downloadMan().blocksGot().size();
}
return res;
}
void PV60Sync::setState(EthereumPeer* _peer, SyncState _s, bool _isSyncing, bool _needHelp)
{
bool changedState = (m_state != _s);
m_state = _s;
if (_isSyncing != (m_syncer == _peer) || (_isSyncing && changedState))
changeSyncer(_isSyncing ? _peer : nullptr, _needHelp);
else if (_s == SyncState::Idle)
changeSyncer(nullptr, _needHelp);
assert(!!m_syncer || _s == SyncState::Idle);
}
void PV60Sync::resetSync()
{
m_syncingLatestHash = h256();
m_syncingLastReceivedHash = h256();
m_syncingTotalDifficulty = 0;
m_syncingNeededBlocks.clear();
}
void PV60Sync::restartSync()
{
resetSync();
host().bq().clear();
if (isSyncing())
transition(m_syncer, SyncState::Idle);
}
void PV60Sync::completeSync()
{
if (isSyncing())
transition(m_syncer, SyncState::Idle);
}
void PV60Sync::pauseSync()
{
if (isSyncing())
setState(m_syncer, SyncState::Waiting, true);
}
void PV60Sync::continueSync()
{
transition(m_syncer, SyncState::Blocks);
}
void PV60Sync::onNewPeer(EthereumPeer* _peer)
{
setNeedsSyncing(_peer, _peer->m_latestHash, _peer->m_totalDifficulty);
}
void PV60Sync::transition(EthereumPeer* _peer, SyncState _s, bool _force, bool _needHelp)
{
clog(NetMessageSummary) << "Transition!" << EthereumHost::stateName(_s) << "from" << EthereumHost::stateName(m_state) << ", " << (isSyncing(_peer) ? "syncing" : "holding") << (needsSyncing(_peer) ? "& needed" : "");
if (m_state == SyncState::Idle && _s != SyncState::Idle)
_peer->m_requireTransactions = true;
RLPStream s;
if (_s == SyncState::Hashes)
{
if (m_state == SyncState::Idle)
{
if (isSyncing(_peer))
clog(NetWarn) << "Bad state: not asking for Hashes, yet syncing!";
m_syncingLatestHash = _peer->m_latestHash;
m_syncingTotalDifficulty = _peer->m_totalDifficulty;
setState(_peer, _s, true);
_peer->requestHashes(m_syncingLastReceivedHash ? m_syncingLastReceivedHash : m_syncingLatestHash);
DEV_INVARIANT_CHECK;
return;
}
else if (m_state == SyncState::Hashes)
{
if (!isSyncing(_peer))
clog(NetWarn) << "Bad state: asking for Hashes yet not syncing!";
setState(_peer, _s, true);
_peer->requestHashes(m_syncingLastReceivedHash);
DEV_INVARIANT_CHECK;
return;
}
}
else if (_s == SyncState::Blocks)
{
if (m_state == SyncState::Hashes)
{
if (!isSyncing(_peer))
{
clog(NetWarn) << "Bad state: asking for Hashes yet not syncing!";
return;
}
if (shouldGrabBlocks(_peer))
{
clog(NetNote) << "Difficulty of hashchain HIGHER. Grabbing" << m_syncingNeededBlocks.size() << "blocks [latest now" << m_syncingLatestHash << ", was" << host().latestBlockSent() << "]";
downloadMan().resetToChain(m_syncingNeededBlocks);
resetSync();
}
else
{
clog(NetNote) << "Difficulty of hashchain not HIGHER. Ignoring.";
resetSync();
setState(_peer, SyncState::Idle, false);
return;
}
assert (isSyncing(_peer));
}
// run through into...
if (m_state == SyncState::Idle || m_state == SyncState::Hashes || m_state == SyncState::Blocks || m_state == SyncState::Waiting)
{
// Looks like it's the best yet for total difficulty. Set to download.
setState(_peer, SyncState::Blocks, isSyncing(_peer), _needHelp); // will kick off other peers to help if available.
requestBlocks(_peer);
DEV_INVARIANT_CHECK;
return;
}
}
else if (_s == SyncState::NewBlocks)
{
if (m_state != SyncState::Idle && m_state != SyncState::NewBlocks && m_state != SyncState::Waiting)
clog(NetWarn) << "Bad state: Asking new blocks while syncing!";
else
{
setState(_peer, SyncState::NewBlocks, true, _needHelp);
requestBlocks(_peer);
DEV_INVARIANT_CHECK;
return;
}
}
else if (_s == SyncState::Waiting)
{
if (m_state != SyncState::Blocks && m_state != SyncState::NewBlocks && m_state != SyncState::Hashes && m_state != SyncState::Waiting)
clog(NetWarn) << "Bad state: Entering waiting state while not downloading blocks!";
else
{
setState(_peer, SyncState::Waiting, isSyncing(_peer), _needHelp);
return;
}
}
else if (_s == SyncState::Idle)
{
host().foreachPeer([this](EthereumPeer* _p) { _p->setIdle(); return true; });
if (m_state == SyncState::Blocks || m_state == SyncState::NewBlocks)
{
clog(NetNote) << "Finishing blocks fetch...";
// a bit overkill given that the other nodes may yet have the needed blocks, but better to be safe than sorry.
if (isSyncing(_peer))
noteDoneBlocks(_peer, _force);
// NOTE: need to notify of giving up on chain-hashes, too, altering state as necessary.
_peer->m_sub.doneFetch();
_peer->setIdle();
setState(_peer, SyncState::Idle, false);
}
else if (m_state == SyncState::Hashes)
{
clog(NetNote) << "Finishing hashes fetch...";
setState(_peer, SyncState::Idle, false);
}
// Otherwise it's fine. We don't care if it's Nothing->Nothing.
DEV_INVARIANT_CHECK;
return;
}
clog(NetWarn) << "Invalid state transition:" << EthereumHost::stateName(_s) << "from" << EthereumHost::stateName(m_state) << ", " << (isSyncing(_peer) ? "syncing" : "holding") << (needsSyncing(_peer) ? "& needed" : "");
}
void PV60Sync::resetSyncFor(EthereumPeer* _peer, h256 const& _latestHash, u256 const& _td)
{
setNeedsSyncing(_peer, _latestHash, _td);
}
void PV60Sync::setNeedsSyncing(EthereumPeer* _peer, h256 const& _latestHash, u256 const& _td)
{
_peer->m_latestHash = _latestHash;
_peer->m_totalDifficulty = _td;
if (_peer->m_latestHash)
noteNeedsSyncing(_peer);
_peer->session()->addNote("sync", string(isSyncing(_peer) ? "ongoing" : "holding") + (needsSyncing(_peer) ? " & needed" : ""));
}
bool PV60Sync::needsSyncing(EthereumPeer* _peer) const
{
return !!_peer->m_latestHash;
}
bool PV60Sync::isSyncing(EthereumPeer* _peer) const
{
return m_syncer == _peer;
}
bool PV60Sync::shouldGrabBlocks(EthereumPeer* _peer) const
{
auto td = _peer->m_totalDifficulty;
auto lh = _peer->m_latestHash;
auto ctd = host().chain().details().totalDifficulty;
if (m_syncingNeededBlocks.empty())
return false;
clog(NetNote) << "Should grab blocks? " << td << "vs" << ctd << ";" << m_syncingNeededBlocks.size() << " blocks, ends" << m_syncingNeededBlocks.back();
if (td < ctd || (td == ctd && host().chain().currentHash() == lh))
return false;
return true;
}
void PV60Sync::attemptSync(EthereumPeer* _peer)
{
if (m_state != SyncState::Idle)
{
clog(NetAllDetail) << "Can't sync with this peer - outstanding asks.";
return;
}
// if already done this, then ignore.
if (!needsSyncing(_peer))
{
clog(NetAllDetail) << "Already synced with this peer.";
return;
}
unsigned n = host().chain().number();
u256 td = host().chain().details().totalDifficulty;
if (host().bq().isActive())
td += host().bq().difficulty();
clog(NetAllDetail) << "Attempt chain-grab? Latest:" << (m_syncingLastReceivedHash ? m_syncingLastReceivedHash : m_syncingLatestHash) << ", number:" << n << ", TD:" << td << " versus " << _peer->m_totalDifficulty;
if (td >= _peer->m_totalDifficulty)
{
clog(NetAllDetail) << "No. Our chain is better.";
resetNeedsSyncing(_peer);
transition(_peer, SyncState::Idle);
}
else
{
clog(NetAllDetail) << "Yes. Their chain is better.";
m_estimatedHashes = _peer->m_expectedHashes - c_chainReorgSize;
transition(_peer, SyncState::Hashes);
}
}
void PV60Sync::noteNeedsSyncing(EthereumPeer* _peer)
{
// if already downloading hash-chain, ignore.
if (isSyncing())
{
clog(NetAllDetail) << "Sync in progress: Just set to help out.";
if (m_state == SyncState::Blocks)
requestBlocks(_peer);
}
else
// otherwise check to see if we should be downloading...
attemptSync(_peer);
}
void PV60Sync::changeSyncer(EthereumPeer* _syncer, bool _needHelp)
{
if (_syncer)
clog(NetAllDetail) << "Changing syncer to" << _syncer->session()->socketId();
else
clog(NetAllDetail) << "Clearing syncer.";
m_syncer = _syncer;
if (isSyncing())
{
if (_needHelp && (m_state == SyncState::Blocks || m_state == SyncState::NewBlocks))
host().foreachPeer([&](EthereumPeer* _p)
{
clog(NetNote) << "Getting help with downloading blocks";
if (_p != _syncer && _p->m_asking == Asking::Nothing)
transition(_p, m_state);
return true;
});
}
else
{
// start grabbing next hash chain if there is one.
host().foreachPeer([this](EthereumPeer* _p)
{
attemptSync(_p);
return !isSyncing();
});
if (!isSyncing())
{
if (m_state != SyncState::Idle)
setState(_syncer, SyncState::Idle);
clog(NetNote) << "No more peers to sync with.";
}
}
assert(!!m_syncer || m_state == SyncState::Idle);
}
void PV60Sync::peerDoneBlocks(EthereumPeer* _peer)
{
noteDoneBlocks(_peer, false);
}
void PV60Sync::noteDoneBlocks(EthereumPeer* _peer, bool _clemency)
{
resetNeedsSyncing(_peer);
if (downloadMan().isComplete())
{
// Done our chain-get.
clog(NetNote) << "Chain download complete.";
// 1/100th for each useful block hash.
_peer->addRating(downloadMan().chainSize() / 100);
downloadMan().reset();
}
else if (isSyncing(_peer))
{
if (_clemency)
clog(NetNote) << "Chain download failed. Aborted while incomplete.";
else
{
// Done our chain-get.
clog(NetWarn) << "Chain download failed. Peer with blocks didn't have them all. This peer is bad and should be punished.";
clog(NetWarn) << downloadMan().remaining();
clog(NetWarn) << "WOULD BAN.";
// m_banned.insert(_peer->session()->id()); // We know who you are!
// _peer->disable("Peer sent hashes but was unable to provide the blocks.");
}
resetSync();
downloadMan().reset();
transition(_peer, SyncState::Idle);
}
_peer->m_sub.doneFetch();
}
void PV60Sync::onPeerHashes(EthereumPeer* _peer, h256s const& _hashes)
{
RecursiveGuard l(x_sync);
DEV_INVARIANT_CHECK;
_peer->setIdle();
if (!isSyncing(_peer))
{
clog(NetMessageSummary) << "Ignoring hashes since not syncing";
return;
}
if (_hashes.size() == 0)
{
transition(_peer, SyncState::Blocks);
return;
}
unsigned knowns = 0;
unsigned unknowns = 0;
for (unsigned i = 0; i < _hashes.size(); ++i)
{
auto h = _hashes[i];
auto status = host().bq().blockStatus(h);
if (status == QueueStatus::Importing || status == QueueStatus::Ready || host().chain().isKnown(h))
{
clog(NetMessageSummary) << "block hash ready:" << h << ". Start blocks download...";
assert (isSyncing(_peer));
transition(_peer, SyncState::Blocks);
return;
}
else if (status == QueueStatus::Bad)
{
cwarn << "block hash bad!" << h << ". Bailing...";
transition(_peer, SyncState::Idle);
return;
}
else if (status == QueueStatus::Unknown)
{
unknowns++;
m_syncingNeededBlocks.push_back(h);
}
else
knowns++;
m_syncingLastReceivedHash = h;
}
clog(NetMessageSummary) << knowns << "knowns," << unknowns << "unknowns; now at" << m_syncingLastReceivedHash;
if (m_syncingNeededBlocks.size() > _peer->m_expectedHashes)
{
_peer->disable("Too many hashes");
restartSync();
return;
}
// run through - ask for more.
transition(_peer, SyncState::Hashes);
DEV_INVARIANT_CHECK;
}
void PV60Sync::onPeerNewHashes(EthereumPeer* _peer, h256s const& _hashes)
{
RecursiveGuard l(x_sync);
DEV_INVARIANT_CHECK;
if (isSyncing())
{
clog(NetMessageSummary) << "Ignoring since we're already downloading.";
return;
}
clog(NetMessageDetail) << "Not syncing and new block hash discovered: syncing without help.";
unsigned knowns = 0;
unsigned unknowns = 0;
for (auto const& h: _hashes)
{
_peer->addRating(1);
DEV_GUARDED(_peer->x_knownBlocks)
_peer->m_knownBlocks.insert(h);
auto status = host().bq().blockStatus(h);
if (status == QueueStatus::Importing || status == QueueStatus::Ready || host().chain().isKnown(h))
knowns++;
else if (status == QueueStatus::Bad)
{
cwarn << "block hash bad!" << h << ". Bailing...";
return;
}
else if (status == QueueStatus::Unknown)
{
unknowns++;
m_syncingNeededBlocks.push_back(h);
}
else
knowns++;
}
clog(NetMessageSummary) << knowns << "knowns," << unknowns << "unknowns";
if (unknowns > 0)
{
clog(NetNote) << "Not syncing and new block hash discovered: syncing without help.";
downloadMan().resetToChain(m_syncingNeededBlocks);
resetSync();
transition(_peer, SyncState::NewBlocks, false, false);
}
DEV_INVARIANT_CHECK;
}
void PV60Sync::abortSync(EthereumPeer* _peer)
{
// Can't check invariants here since the peers is already removed from the list and the state is not updated yet.
if (isSyncing(_peer))
{
host().foreachPeer([this](EthereumPeer* _p) { _p->setIdle(); return true; });
transition(_peer, SyncState::Idle, true);
}
DEV_INVARIANT_CHECK;
}
void PV60Sync::onPeerAborting(EthereumPeer* _peer)
{
RecursiveGuard l(x_sync);
// Can't check invariants here since the peers is already removed from the list and the state is not updated yet.
abortSync(_peer);
DEV_INVARIANT_CHECK;
}
bool PV60Sync::invariants() const
{
if (m_state == SyncState::Idle && !!m_syncer)
return false;
if (m_state != SyncState::Idle && !m_syncer)
return false;
if (m_state == SyncState::Hashes)
{
bool hashes = false;
host().foreachPeer([&](EthereumPeer* _p) { if (_p->m_asking == Asking::Hashes) hashes = true; return !hashes; });
if (!hashes)
return false;
if (!m_syncingLatestHash)
return false;
if (m_syncingNeededBlocks.empty() != (!m_syncingLastReceivedHash))
return false;
}
if (m_state == SyncState::Blocks || m_state == SyncState::NewBlocks)
{
bool blocks = false;
host().foreachPeer([&](EthereumPeer* _p) { if (_p->m_asking == Asking::Blocks) blocks = true; return !blocks; });
if (!blocks)
return false;
if (downloadMan().isComplete())
return false;
}
if (m_state == SyncState::Idle)
{
bool busy = false;
host().foreachPeer([&](EthereumPeer* _p) { if (_p->m_asking != Asking::Nothing && _p->m_asking != Asking::State) busy = true; return !busy; });
if (busy)
return false;
}
if (m_state == SyncState::Waiting && !host().bq().isActive())
return false;
return true;
}

278
libethereum/BlockChainSync.h

@ -0,0 +1,278 @@
/*
This file is part of cpp-ethereum.
cpp-ethereum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
cpp-ethereum is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
*/
/** @file BlockChainSync.h
* @author Gav Wood <i@gavwood.com>
* @date 2014
*/
#pragma once
#include <mutex>
#include <libdevcore/Guards.h>
#include <libdevcore/RangeMask.h>
#include <libethcore/Common.h>
#include <libp2p/Common.h>
#include "CommonNet.h"
#include "DownloadMan.h"
namespace dev
{
class RLPStream;
namespace eth
{
class EthereumHost;
class BlockQueue;
class EthereumPeer;
/**
* @brief Base BlockChain synchronization strategy class.
* Syncs to peers and keeps up to date. Base class handles blocks downloading but does not contain any details on state transfer logic.
*/
class BlockChainSync: public HasInvariants
{
public:
BlockChainSync(EthereumHost& _host);
virtual ~BlockChainSync();
void abortSync(); ///< Abort all sync activity
DownloadMan const& downloadMan() const;
DownloadMan& downloadMan();
/// @returns true is Sync is in progress
virtual bool isSyncing() const = 0;
/// Called by peer to report status
virtual void onPeerStatus(EthereumPeer* _peer);
/// Called by peer once it has new blocks during syn
virtual void onPeerBlocks(EthereumPeer* _peer, RLP const& _r);
/// Called by peer once it has new blocks
virtual void onPeerNewBlock(EthereumPeer* _peer, RLP const& _r);
/// Called by peer once it has new hashes
virtual void onPeerNewHashes(EthereumPeer* _peer, h256s const& _hashes) = 0;
/// Called by peer once it has another sequential block of hashes during sync
virtual void onPeerHashes(EthereumPeer* _peer, h256s const& _hashes) = 0;
/// Called by peer when it is disconnecting
virtual void onPeerAborting(EthereumPeer* _peer) = 0;
/// @returns Synchonization status
virtual SyncStatus status() const = 0;
static char const* stateName(SyncState _s) { return s_stateNames[static_cast<int>(_s)]; }
protected:
//To be implemented in derived classes:
/// New valid peer appears
virtual void onNewPeer(EthereumPeer* _peer) = 0;
/// Peer done downloading blocks
virtual void peerDoneBlocks(EthereumPeer* _peer) = 0;
/// Resume downloading after witing state
virtual void continueSync() = 0;
/// Restart sync
virtual void restartSync() = 0;
/// Called after all blocks have been donloaded
virtual void completeSync() = 0;
/// Enter waiting state
virtual void pauseSync() = 0;
/// Restart sync for given peer
virtual void resetSyncFor(EthereumPeer* _peer, h256 const& _latestHash, u256 const& _td) = 0;
EthereumHost& host() { return m_host; }
EthereumHost const& host() const { return m_host; }
/// Estimates max number of hashes peers can give us.
unsigned estimatedHashes() const;
/// Request blocks from peer if needed
void requestBlocks(EthereumPeer* _peer);
protected:
Handler m_bqRoomAvailable; ///< Triggered once block queue
mutable RecursiveMutex x_sync;
SyncState m_state = SyncState::Idle; ///< Current sync state
unsigned m_estimatedHashes = 0; ///< Number of estimated hashes for the last peer over PV60. Used for status reporting only.
private:
static char const* const s_stateNames[static_cast<int>(SyncState::Size)];
bool invariants() const override = 0;
EthereumHost& m_host;
HashDownloadMan m_hashMan;
};
/**
* @brief Syncrhonization over PV60. Selects a single peer and tries to downloading hashes from it. After hash downaload is complete
* Syncs to peers and keeps up to date
*/
/**
* Transitions:
*
* Idle->Hashes
* Triggered when:
* * A new peer appears that we can sync to
* * Transtition to Idle, there are peers we can sync to
* Effects:
* * Set chain sync (m_syncingTotalDifficulty, m_syncingLatestHash, m_syncer)
* * Requests hashes from m_syncer
*
* Hashes->Idle
* Triggered when:
* * Received too many hashes
* * Received 0 total hashes from m_syncer
* * m_syncer aborts
* Effects:
* In case of too many hashes sync is reset
*
* Hashes->Blocks
* Triggered when:
* * Received known hash from m_syncer
* * Received 0 hashes from m_syncer and m_syncingTotalBlocks not empty
* Effects:
* * Set up download manager, clear m_syncingTotalBlocks. Set all peers to help with downloading if they can
*
* Blocks->Idle
* Triggered when:
* * m_syncer aborts
* * m_syncer does not have required block
* * All blocks downloaded
* * Block qeueue is full with unknown blocks
* Effects:
* * Download manager is reset
*
* Blocks->Waiting
* Triggered when:
* * Block queue is full with known blocks
* Effects:
* * Stop requesting blocks from peers
*
* Waiting->Blocks
* Triggered when:
* * Block queue has space for new blocks
* Effects:
* * Continue requesting blocks from peers
*
* Idle->NewBlocks
* Triggered when:
* * New block hashes arrive
* Effects:
* * Set up download manager, clear m_syncingTotalBlocks. Download blocks from a single peer. If downloaded blocks have unknown parents, set the peer to sync
*
* NewBlocks->Idle
* Triggered when:
* * m_syncer aborts
* * m_syncer does not have required block
* * All new blocks downloaded
* * Block qeueue is full with unknown blocks
* Effects:
* * Download manager is reset
*
*/
class PV60Sync: public BlockChainSync
{
public:
PV60Sync(EthereumHost& _host);
/// @returns true is Sync is in progress
bool isSyncing() const override { return !!m_syncer; }
/// Called by peer once it has new hashes
void onPeerNewHashes(EthereumPeer* _peer, h256s const& _hashes) override;
/// Called by peer once it has another sequential block of hashes during sync
void onPeerHashes(EthereumPeer* _peer, h256s const& _hashes) override;
/// Called by peer when it is disconnecting
void onPeerAborting(EthereumPeer* _peer) override;
/// @returns Sync status
SyncStatus status() const override;
protected:
void onNewPeer(EthereumPeer* _peer) override;
void continueSync() override;
void peerDoneBlocks(EthereumPeer* _peer) override;
void restartSync() override;
void completeSync() override;
void pauseSync() override;
void resetSyncFor(EthereumPeer* _peer, h256 const& _latestHash, u256 const& _td) override;
private:
/// Transition sync state in a particular direction. @param _peer Peer that is responsible for state tranfer
void transition(EthereumPeer* _peer, SyncState _s, bool _force = false, bool _needHelp = true);
/// Reset peer syncing requirements state.
void resetNeedsSyncing(EthereumPeer* _peer) { setNeedsSyncing(_peer, h256(), 0); }
/// Update peer syncing requirements state.
void setNeedsSyncing(EthereumPeer* _peer, h256 const& _latestHash, u256 const& _td);
/// Do we presently need syncing with this peer?
bool needsSyncing(EthereumPeer* _peer) const;
/// Check whether the session should bother grabbing blocks from a peer.
bool shouldGrabBlocks(EthereumPeer* _peer) const;
/// Attempt to begin syncing with the peer; first check the peer has a more difficlult chain to download, then start asking for hashes, then move to blocks
void attemptSync(EthereumPeer* _peer);
/// Update our syncing state
void setState(EthereumPeer* _peer, SyncState _s, bool _isSyncing = false, bool _needHelp = false);
/// Check if peer is main syncer
bool isSyncing(EthereumPeer* _peer) const;
/// Check if we need (re-)syncing with the peer.
void noteNeedsSyncing(EthereumPeer* _who);
/// Set main syncing peer
void changeSyncer(EthereumPeer* _syncer, bool _needHelp);
/// Called when peer done downloading blocks
void noteDoneBlocks(EthereumPeer* _who, bool _clemency);
/// Abort syncing for peer
void abortSync(EthereumPeer* _peer);
/// Reset hash chain syncing
void resetSync();
bool invariants() const override;
h256s m_syncingNeededBlocks; ///< The blocks that we should download from this peer.
h256 m_syncingLastReceivedHash; ///< Hash most recently received from peer.
h256 m_syncingLatestHash; ///< Latest block's hash of the peer we are syncing to, as of the current sync.
u256 m_syncingTotalDifficulty; ///< Latest block's total difficulty of the peer we aresyncing to, as of the current sync.
// TODO: switch to weak_ptr
EthereumPeer* m_syncer = nullptr; ///< Peer we are currently syncing with
};
}
}

7
libethereum/BlockDetails.h

@ -22,15 +22,10 @@
#pragma once #pragma once
#include <unordered_map> #include <unordered_map>
#pragma warning(push) #include <libdevcore/db.h>
#pragma warning(disable: 4100 4267)
#include <leveldb/db.h>
#pragma warning(pop)
#include <libdevcore/Log.h> #include <libdevcore/Log.h>
#include <libdevcore/RLP.h> #include <libdevcore/RLP.h>
#include "TransactionReceipt.h" #include "TransactionReceipt.h"
namespace ldb = leveldb;
namespace dev namespace dev
{ {

87
libethereum/BlockQueue.cpp

@ -36,6 +36,7 @@ const char* BlockQueueChannel::name() { return EthOrange "[]>"; }
#else #else
const char* BlockQueueChannel::name() { return EthOrange "▣┅▶"; } const char* BlockQueueChannel::name() { return EthOrange "▣┅▶"; }
#endif #endif
const char* BlockQueueTraceChannel::name() { return EthOrange "▣ ▶"; }
size_t const c_maxKnownCount = 100000; size_t const c_maxKnownCount = 100000;
size_t const c_maxKnownSize = 128 * 1024 * 1024; size_t const c_maxKnownSize = 128 * 1024 * 1024;
@ -81,6 +82,8 @@ void BlockQueue::clear()
m_unknownCount = 0; m_unknownCount = 0;
m_knownSize = 0; m_knownSize = 0;
m_knownCount = 0; m_knownCount = 0;
m_difficulty = 0;
m_drainingDifficulty = 0;
} }
void BlockQueue::verifierBody() void BlockQueue::verifierBody()
@ -181,14 +184,14 @@ ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc, boo
// Check if we already know this block. // Check if we already know this block.
h256 h = BlockInfo::headerHash(_block); h256 h = BlockInfo::headerHash(_block);
cblockq << "Queuing block" << h << "for import..."; clog(BlockQueueTraceChannel) << "Queuing block" << h << "for import...";
UpgradableGuard l(m_lock); UpgradableGuard l(m_lock);
if (m_readySet.count(h) || m_drainingSet.count(h) || m_unknownSet.count(h) || m_knownBad.count(h)) if (m_readySet.count(h) || m_drainingSet.count(h) || m_unknownSet.count(h) || m_knownBad.count(h))
{ {
// Already know about this one. // Already know about this one.
cblockq << "Already known."; clog(BlockQueueTraceChannel) << "Already known.";
return ImportResult::AlreadyKnown; return ImportResult::AlreadyKnown;
} }
@ -226,10 +229,12 @@ ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc, boo
time_t bit = (unsigned)bi.timestamp; time_t bit = (unsigned)bi.timestamp;
if (strftime(buf, 24, "%X", localtime(&bit)) == 0) if (strftime(buf, 24, "%X", localtime(&bit)) == 0)
buf[0] = '\0'; // empty if case strftime fails buf[0] = '\0'; // empty if case strftime fails
cblockq << "OK - queued for future [" << bi.timestamp << "vs" << time(0) << "] - will wait until" << buf; clog(BlockQueueTraceChannel) << "OK - queued for future [" << bi.timestamp << "vs" << time(0) << "] - will wait until" << buf;
m_unknownSize += _block.size(); m_unknownSize += _block.size();
m_unknownCount++; m_unknownCount++;
return ImportResult::FutureTime; m_difficulty += bi.difficulty;
bool unknown = !m_readySet.count(bi.parentHash) && !m_drainingSet.count(bi.parentHash) && !_bc.isKnown(bi.parentHash);
return unknown ? ImportResult::FutureTimeUnknown : ImportResult::FutureTimeKnown;
} }
else else
{ {
@ -244,10 +249,11 @@ ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc, boo
else if (!m_readySet.count(bi.parentHash) && !m_drainingSet.count(bi.parentHash) && !_bc.isKnown(bi.parentHash)) else if (!m_readySet.count(bi.parentHash) && !m_drainingSet.count(bi.parentHash) && !_bc.isKnown(bi.parentHash))
{ {
// We don't know the parent (yet) - queue it up for later. It'll get resent to us if we find out about its ancestry later on. // We don't know the parent (yet) - queue it up for later. It'll get resent to us if we find out about its ancestry later on.
cblockq << "OK - queued as unknown parent:" << bi.parentHash; clog(BlockQueueTraceChannel) << "OK - queued as unknown parent:" << bi.parentHash;
m_unknown.insert(make_pair(bi.parentHash, make_pair(h, _block.toBytes()))); m_unknown.insert(make_pair(bi.parentHash, make_pair(h, _block.toBytes())));
m_unknownSet.insert(h); m_unknownSet.insert(h);
m_unknownSize += _block.size(); m_unknownSize += _block.size();
m_difficulty += bi.difficulty;
m_unknownCount++; m_unknownCount++;
return ImportResult::UnknownParent; return ImportResult::UnknownParent;
@ -255,12 +261,13 @@ ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc, boo
else else
{ {
// If valid, append to blocks. // If valid, append to blocks.
cblockq << "OK - ready for chain insertion."; clog(BlockQueueTraceChannel) << "OK - ready for chain insertion.";
DEV_GUARDED(m_verification) DEV_GUARDED(m_verification)
m_unverified.push_back(UnverifiedBlock { h, bi.parentHash, _block.toBytes() }); m_unverified.push_back(UnverifiedBlock { h, bi.parentHash, _block.toBytes() });
m_moreToVerify.notify_one(); m_moreToVerify.notify_one();
m_readySet.insert(h); m_readySet.insert(h);
m_knownSize += _block.size(); m_knownSize += _block.size();
m_difficulty += bi.difficulty;
m_knownCount++; m_knownCount++;
noteReady_WITH_LOCK(h); noteReady_WITH_LOCK(h);
@ -350,13 +357,16 @@ bool BlockQueue::doneDrain(h256s const& _bad)
WriteGuard l(m_lock); WriteGuard l(m_lock);
DEV_INVARIANT_CHECK; DEV_INVARIANT_CHECK;
m_drainingSet.clear(); m_drainingSet.clear();
m_difficulty -= m_drainingDifficulty;
m_drainingDifficulty = 0;
if (_bad.size()) if (_bad.size())
{ {
// at least one of them was bad. // at least one of them was bad.
m_knownBad += _bad; m_knownBad += _bad;
for (h256 const& b : _bad) for (h256 const& b : _bad)
updateBad(b); updateBad(b);
} return !m_readySet.empty(); }
return !m_readySet.empty();
} }
void BlockQueue::tick(BlockChain const& _bc) void BlockQueue::tick(BlockChain const& _bc)
@ -427,32 +437,35 @@ bool BlockQueue::unknownFull() const
void BlockQueue::drain(VerifiedBlocks& o_out, unsigned _max) void BlockQueue::drain(VerifiedBlocks& o_out, unsigned _max)
{ {
WriteGuard l(m_lock); bool wasFull = false;
DEV_INVARIANT_CHECK; DEV_WRITE_GUARDED(m_lock)
if (m_drainingSet.empty())
{ {
bool wasFull = knownFull(); DEV_INVARIANT_CHECK;
DEV_GUARDED(m_verification) wasFull = knownFull();
if (m_drainingSet.empty())
{ {
o_out.resize(min<unsigned>(_max, m_verified.size())); m_drainingDifficulty = 0;
for (unsigned i = 0; i < o_out.size(); ++i) DEV_GUARDED(m_verification)
swap(o_out[i], m_verified[i]); {
m_verified.erase(m_verified.begin(), advanced(m_verified.begin(), o_out.size())); o_out.resize(min<unsigned>(_max, m_verified.size()));
} for (unsigned i = 0; i < o_out.size(); ++i)
for (auto const& bs: o_out) swap(o_out[i], m_verified[i]);
{ m_verified.erase(m_verified.begin(), advanced(m_verified.begin(), o_out.size()));
// TODO: @optimise use map<h256, bytes> rather than vector<bytes> & set<h256>. }
auto h = bs.verified.info.hash(); for (auto const& bs: o_out)
m_drainingSet.insert(h); {
m_readySet.erase(h); // TODO: @optimise use map<h256, bytes> rather than vector<bytes> & set<h256>.
m_knownSize -= bs.verified.block.size(); auto h = bs.verified.info.hash();
m_knownCount--; m_drainingSet.insert(h);
m_drainingDifficulty += bs.verified.info.difficulty;
m_readySet.erase(h);
m_knownSize -= bs.verified.block.size();
m_knownCount--;
}
} }
if (wasFull && !knownFull())
m_onRoomAvailable();
} }
if (wasFull && !knownFull())
m_onRoomAvailable();
} }
bool BlockQueue::invariants() const bool BlockQueue::invariants() const
@ -524,3 +537,19 @@ std::ostream& dev::eth::operator<<(std::ostream& _out, BlockQueueStatus const& _
return _out; return _out;
} }
u256 BlockQueue::difficulty() const
{
UpgradableGuard l(m_lock);
return m_difficulty;
}
bool BlockQueue::isActive() const
{
UpgradableGuard l(m_lock);
if (m_readySet.empty() && m_drainingSet.empty())
DEV_GUARDED(m_verification)
if (m_verified.empty() && m_verifying.empty() && m_unverified.empty())
return false;
return true;
}

5
libethereum/BlockQueue.h

@ -42,6 +42,7 @@ namespace eth
class BlockChain; class BlockChain;
struct BlockQueueChannel: public LogChannel { static const char* name(); static const int verbosity = 4; }; struct BlockQueueChannel: public LogChannel { static const char* name(); static const int verbosity = 4; };
struct BlockQueueTraceChannel: public LogChannel { static const char* name(); static const int verbosity = 7; };
#define cblockq dev::LogOutputStream<dev::eth::BlockQueueChannel, true>() #define cblockq dev::LogOutputStream<dev::eth::BlockQueueChannel, true>()
struct BlockQueueStatus struct BlockQueueStatus
@ -117,6 +118,8 @@ public:
bool knownFull() const; bool knownFull() const;
bool unknownFull() const; bool unknownFull() const;
u256 difficulty() const; // Total difficulty of queueud blocks
bool isActive() const;
private: private:
struct UnverifiedBlock struct UnverifiedBlock
@ -158,6 +161,8 @@ private:
std::atomic<size_t> m_knownSize; ///< Tracks total size in bytes of all known blocks; std::atomic<size_t> m_knownSize; ///< Tracks total size in bytes of all known blocks;
std::atomic<size_t> m_unknownCount; ///< Tracks total count of unknown blocks. Used to avoid additional syncing std::atomic<size_t> m_unknownCount; ///< Tracks total count of unknown blocks. Used to avoid additional syncing
std::atomic<size_t> m_knownCount; ///< Tracks total count of known blocks. Used to avoid additional syncing std::atomic<size_t> m_knownCount; ///< Tracks total count of known blocks. Used to avoid additional syncing
u256 m_difficulty; ///< Total difficulty of blocks in the queue
u256 m_drainingDifficulty; ///< Total difficulty of blocks in draining
}; };
std::ostream& operator<<(std::ostream& _out, BlockQueueStatus const& _s); std::ostream& operator<<(std::ostream& _out, BlockQueueStatus const& _s);

3
libethereum/CMakeLists.txt

@ -12,7 +12,7 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSTATICLIB")
aux_source_directory(. SRC_LIST) aux_source_directory(. SRC_LIST)
include_directories(BEFORE ..) include_directories(BEFORE ..)
include_directories(${LEVELDB_INCLUDE_DIRS}) include_directories(${DB_INCLUDE_DIRS})
include_directories(${Boost_INCLUDE_DIRS}) include_directories(${Boost_INCLUDE_DIRS})
if (JSONRPC) if (JSONRPC)
include_directories(BEFORE ${JSONCPP_INCLUDE_DIRS}) include_directories(BEFORE ${JSONCPP_INCLUDE_DIRS})
@ -31,7 +31,6 @@ target_link_libraries(${EXECUTABLE} whisper)
target_link_libraries(${EXECUTABLE} p2p) target_link_libraries(${EXECUTABLE} p2p)
target_link_libraries(${EXECUTABLE} devcrypto) target_link_libraries(${EXECUTABLE} devcrypto)
target_link_libraries(${EXECUTABLE} ethcore) target_link_libraries(${EXECUTABLE} ethcore)
target_link_libraries(${EXECUTABLE} ${LEVELDB_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${Boost_REGEX_LIBRARIES}) target_link_libraries(${EXECUTABLE} ${Boost_REGEX_LIBRARIES})
target_link_libraries(${EXECUTABLE} secp256k1) target_link_libraries(${EXECUTABLE} secp256k1)
if (JSONRPC) if (JSONRPC)

6
libethereum/CanonBlockChain.h

@ -21,11 +21,6 @@
#pragma once #pragma once
#pragma warning(push)
#pragma warning(disable: 4100 4267)
#include <leveldb/db.h>
#pragma warning(pop)
#include <mutex> #include <mutex>
#include <libdevcore/Log.h> #include <libdevcore/Log.h>
#include <libdevcore/Exceptions.h> #include <libdevcore/Exceptions.h>
@ -35,7 +30,6 @@
#include "BlockDetails.h" #include "BlockDetails.h"
#include "Account.h" #include "Account.h"
#include "BlockChain.h" #include "BlockChain.h"
namespace ldb = leveldb;
namespace dev namespace dev
{ {

37
libethereum/Client.cpp

@ -355,6 +355,14 @@ bool Client::isSyncing() const
return false; return false;
} }
bool Client::isMajorSyncing() const
{
// TODO: only return true if it is actually doing a proper chain sync.
if (auto h = m_host.lock())
return h->isSyncing();
return false;
}
void Client::startedWorking() void Client::startedWorking()
{ {
// Synchronise the state according to the head of the block chain. // Synchronise the state according to the head of the block chain.
@ -612,22 +620,23 @@ bool Client::submitWork(ProofOfWork::Solution const& _solution)
} }
unsigned static const c_syncMin = 1; unsigned static const c_syncMin = 1;
unsigned static const c_syncMax = 100; unsigned static const c_syncMax = 1000;
double static const c_targetDuration = 1; double static const c_targetDuration = 1;
void Client::syncBlockQueue() void Client::syncBlockQueue()
{ {
ImportRoute ir;
cwork << "BQ ==> CHAIN ==> STATE"; cwork << "BQ ==> CHAIN ==> STATE";
ImportRoute ir;
unsigned count;
boost::timer t; boost::timer t;
tie(ir, m_syncBlockQueue) = m_bc.sync(m_bq, m_stateDB, m_syncAmount); tie(ir, m_syncBlockQueue, count) = m_bc.sync(m_bq, m_stateDB, m_syncAmount);
double elapsed = t.elapsed(); double elapsed = t.elapsed();
cnote << m_syncAmount << "blocks imported in" << unsigned(elapsed * 1000) << "ms (" << (m_syncAmount / elapsed) << "blocks/s)"; cnote << count << "blocks imported in" << unsigned(elapsed * 1000) << "ms (" << (count / elapsed) << "blocks/s)";
if (elapsed > c_targetDuration * 1.1 && m_syncAmount > c_syncMin) if (elapsed > c_targetDuration * 1.1 && count > c_syncMin)
m_syncAmount = max(c_syncMin, m_syncAmount * 9 / 10); m_syncAmount = max(c_syncMin, count * 9 / 10);
else if (elapsed < c_targetDuration * 0.9 && m_syncAmount < c_syncMax) else if (count == m_syncAmount && elapsed < c_targetDuration * 0.9 && m_syncAmount < c_syncMax)
m_syncAmount = min(c_syncMax, m_syncAmount * 11 / 10 + 1); m_syncAmount = min(c_syncMax, m_syncAmount * 11 / 10 + 1);
if (ir.liveBlocks.empty()) if (ir.liveBlocks.empty())
return; return;
@ -673,10 +682,10 @@ void Client::onChainChanged(ImportRoute const& _ir)
// insert transactions that we are declaring the dead part of the chain // insert transactions that we are declaring the dead part of the chain
for (auto const& h: _ir.deadBlocks) for (auto const& h: _ir.deadBlocks)
{ {
clog(ClientNote) << "Dead block:" << h; clog(ClientTrace) << "Dead block:" << h;
for (auto const& t: m_bc.transactions(h)) for (auto const& t: m_bc.transactions(h))
{ {
clog(ClientNote) << "Resubmitting dead-block transaction " << Transaction(t, CheckTransaction::None); clog(ClientTrace) << "Resubmitting dead-block transaction " << Transaction(t, CheckTransaction::None);
m_tq.import(t, TransactionQueue::ImportCallback(), IfDropped::Retry); m_tq.import(t, TransactionQueue::ImportCallback(), IfDropped::Retry);
} }
} }
@ -684,10 +693,10 @@ void Client::onChainChanged(ImportRoute const& _ir)
// remove transactions from m_tq nicely rather than relying on out of date nonce later on. // remove transactions from m_tq nicely rather than relying on out of date nonce later on.
for (auto const& h: _ir.liveBlocks) for (auto const& h: _ir.liveBlocks)
{ {
clog(ClientChat) << "Live block:" << h; clog(ClientTrace) << "Live block:" << h;
for (auto const& th: m_bc.transactionHashes(h)) for (auto const& th: m_bc.transactionHashes(h))
{ {
clog(ClientNote) << "Safely dropping transaction " << th; clog(ClientTrace) << "Safely dropping transaction " << th;
m_tq.drop(th); m_tq.drop(th);
} }
} }
@ -701,7 +710,7 @@ void Client::onChainChanged(ImportRoute const& _ir)
// RESTART MINING // RESTART MINING
if (!m_bq.items().first) if (!isMajorSyncing())
{ {
bool preChanged = false; bool preChanged = false;
State newPreMine; State newPreMine;
@ -723,7 +732,7 @@ void Client::onChainChanged(ImportRoute const& _ir)
DEV_READ_GUARDED(x_postMine) DEV_READ_GUARDED(x_postMine)
for (auto const& t: m_postMine.pending()) for (auto const& t: m_postMine.pending())
{ {
clog(ClientNote) << "Resubmitting post-mine transaction " << t; clog(ClientTrace) << "Resubmitting post-mine transaction " << t;
auto ir = m_tq.import(t, TransactionQueue::ImportCallback(), IfDropped::Retry); auto ir = m_tq.import(t, TransactionQueue::ImportCallback(), IfDropped::Retry);
if (ir != ImportResult::Success) if (ir != ImportResult::Success)
onTransactionQueueReady(); onTransactionQueueReady();
@ -764,7 +773,7 @@ void Client::startMining()
void Client::rejigMining() void Client::rejigMining()
{ {
if ((wouldMine() || remoteActive()) && !m_bq.items().first && (!isChainBad() || mineOnBadChain()) /*&& (forceMining() || transactionsWaiting())*/) if ((wouldMine() || remoteActive()) && !isMajorSyncing() && (!isChainBad() || mineOnBadChain()) /*&& (forceMining() || transactionsWaiting())*/)
{ {
cnote << "Rejigging mining..."; cnote << "Rejigging mining...";
DEV_WRITE_GUARDED(x_working) DEV_WRITE_GUARDED(x_working)

1
libethereum/Client.h

@ -219,6 +219,7 @@ public:
DownloadMan const* downloadMan() const; DownloadMan const* downloadMan() const;
bool isSyncing() const; bool isSyncing() const;
bool isMajorSyncing() const;
/// Sets the network id. /// Sets the network id.
void setNetworkId(u256 _n); void setNetworkId(u256 _n);
/// Clears pending transactions. Just for debug use. /// Clears pending transactions. Just for debug use.

6
libethereum/CommonNet.h

@ -80,10 +80,8 @@ enum class Asking
enum class SyncState enum class SyncState
{ {
Idle, ///< Initial chain sync complete. Waiting for new packets Idle, ///< Initial chain sync complete. Waiting for new packets
WaitingQueue, ///< Block downloading paused. Waiting for block queue to process blocks and free space Waiting, ///< Block downloading paused. Waiting for block queue to process blocks and free space
HashesNegotiate, ///< Waiting for first hashes to arrive Hashes, ///< Downloading hashes from multiple peers over
HashesSingle, ///< Locked on and downloading hashes from a single peer
HashesParallel, ///< Downloading hashes from multiple peers over
Blocks, ///< Downloading blocks Blocks, ///< Downloading blocks
NewBlocks, ///< Downloading blocks learned from NewHashes packet NewBlocks, ///< Downloading blocks learned from NewHashes packet

603
libethereum/EthereumHost.cpp

@ -33,6 +33,8 @@
#include "BlockQueue.h" #include "BlockQueue.h"
#include "EthereumPeer.h" #include "EthereumPeer.h"
#include "DownloadMan.h" #include "DownloadMan.h"
#include "BlockChainSync.h"
using namespace std; using namespace std;
using namespace dev; using namespace dev;
using namespace dev::eth; using namespace dev::eth;
@ -41,7 +43,7 @@ using namespace p2p;
unsigned const EthereumHost::c_oldProtocolVersion = 60; //TODO: remove this once v61+ is common unsigned const EthereumHost::c_oldProtocolVersion = 60; //TODO: remove this once v61+ is common
unsigned const c_chainReorgSize = 30000; unsigned const c_chainReorgSize = 30000;
char const* const EthereumHost::s_stateNames[static_cast<int>(SyncState::Size)] = {"Idle", "WaitingQueue", "HashesNegotiate", "HashesSingle", "HashesParallel", "Blocks", "NewBlocks" }; char const* const EthereumHost::s_stateNames[static_cast<int>(SyncState::Size)] = {"Idle", "Waiting", "Hashes", "Blocks", "NewBlocks" };
EthereumHost::EthereumHost(BlockChain const& _ch, TransactionQueue& _tq, BlockQueue& _bq, u256 _networkId): EthereumHost::EthereumHost(BlockChain const& _ch, TransactionQueue& _tq, BlockQueue& _bq, u256 _networkId):
HostCapability<EthereumPeer>(), HostCapability<EthereumPeer>(),
@ -51,15 +53,11 @@ EthereumHost::EthereumHost(BlockChain const& _ch, TransactionQueue& _tq, BlockQu
m_bq (_bq), m_bq (_bq),
m_networkId (_networkId) m_networkId (_networkId)
{ {
setState(SyncState::HashesNegotiate);
m_latestBlockSent = _ch.currentHash(); m_latestBlockSent = _ch.currentHash();
m_hashMan.reset(m_chain.number() + 1);
m_bqRoomAvailable = m_bq.onRoomAvailable([this](){ m_continueSync = true; });
} }
EthereumHost::~EthereumHost() EthereumHost::~EthereumHost()
{ {
foreachPeer([](EthereumPeer* _p) { _p->abortSync(); });
} }
bool EthereumHost::ensureInitialised() bool EthereumHost::ensureInitialised()
@ -79,31 +77,13 @@ bool EthereumHost::ensureInitialised()
void EthereumHost::reset() void EthereumHost::reset()
{ {
foreachPeer([](EthereumPeer* _p) { _p->abortSync(); }); Guard l(x_sync);
m_man.resetToChain(h256s()); if (m_sync)
m_hashMan.reset(m_chain.number() + 1); m_sync->abortSync();
setState(SyncState::HashesNegotiate); m_sync.reset();
m_syncingLatestHash = h256();
m_syncingTotalDifficulty = 0;
m_latestBlockSent = h256(); m_latestBlockSent = h256();
m_transactionsSent.clear(); m_transactionsSent.clear();
m_hashes.clear();
}
void EthereumHost::resetSyncTo(h256 const& _h)
{
setState(SyncState::HashesNegotiate);
m_syncingLatestHash = _h;
}
void EthereumHost::setState(SyncState _s)
{
if (m_state != _s)
{
clog(NetAllDetail) << "SyncState changed from " << stateName(m_state) << " to " << stateName(_s);
m_state = _s;
}
} }
void EthereumHost::doWork() void EthereumHost::doWork()
@ -125,14 +105,7 @@ void EthereumHost::doWork()
} }
} }
if (m_continueSync) foreachPeer([](EthereumPeer* _p) { _p->tick(); return true; });
{
m_continueSync = false;
RecursiveGuard l(x_sync);
continueSync();
}
foreachPeer([](EthereumPeer* _p) { _p->tick(); });
// return netChange; // return netChange;
// TODO: Figure out what to do with netChange. // TODO: Figure out what to do with netChange.
@ -174,24 +147,28 @@ void EthereumHost::maintainTransactions()
cnote << "Sent" << n << "transactions to " << _p->session()->info().clientVersion; cnote << "Sent" << n << "transactions to " << _p->session()->info().clientVersion;
} }
_p->m_requireTransactions = false; _p->m_requireTransactions = false;
return true;
}); });
} }
void EthereumHost::foreachPeer(std::function<void(EthereumPeer*)> const& _f) const void EthereumHost::foreachPeer(std::function<bool(EthereumPeer*)> const& _f) const
{ {
foreachPeerPtr([&](std::shared_ptr<EthereumPeer> _p) foreachPeerPtr([&](std::shared_ptr<EthereumPeer> _p)
{ {
if (_p) if (_p)
_f(_p.get()); return _f(_p.get());
return true;
}); });
} }
void EthereumHost::foreachPeerPtr(std::function<void(std::shared_ptr<EthereumPeer>)> const& _f) const void EthereumHost::foreachPeerPtr(std::function<bool(std::shared_ptr<EthereumPeer>)> const& _f) const
{ {
for (auto s: peerSessions()) for (auto s: peerSessions())
_f(s.first->cap<EthereumPeer>()); if (!_f(s.first->cap<EthereumPeer>()))
return;
for (auto s: peerSessions(c_oldProtocolVersion)) //TODO: remove once v61+ is common for (auto s: peerSessions(c_oldProtocolVersion)) //TODO: remove once v61+ is common
_f(s.first->cap<EthereumPeer>(c_oldProtocolVersion)); if (!_f(s.first->cap<EthereumPeer>(c_oldProtocolVersion)))
return;
} }
tuple<vector<shared_ptr<EthereumPeer>>, vector<shared_ptr<EthereumPeer>>, vector<shared_ptr<Session>>> EthereumHost::randomSelection(unsigned _percent, std::function<bool(EthereumPeer*)> const& _allow) tuple<vector<shared_ptr<EthereumPeer>>, vector<shared_ptr<EthereumPeer>>, vector<shared_ptr<Session>>> EthereumHost::randomSelection(unsigned _percent, std::function<bool(EthereumPeer*)> const& _allow)
@ -263,348 +240,63 @@ void EthereumHost::maintainBlocks(h256 const& _currentHash)
} }
} }
void EthereumHost::onPeerStatus(EthereumPeer* _peer) BlockChainSync& EthereumHost::sync()
{ {
RecursiveGuard l(x_sync); if (m_sync)
DEV_INVARIANT_CHECK; return *m_sync; // We only chose sync strategy once
if (_peer->m_genesisHash != m_chain.genesisHash())
_peer->disable("Invalid genesis hash"); bool pv61 = false;
else if (_peer->m_protocolVersion != protocolVersion() && _peer->m_protocolVersion != c_oldProtocolVersion) foreachPeer([&](EthereumPeer* _p)
_peer->disable("Invalid protocol version.");
else if (_peer->m_networkId != networkId())
_peer->disable("Invalid network identifier.");
else if (_peer->session()->info().clientVersion.find("/v0.7.0/") != string::npos)
_peer->disable("Blacklisted client version.");
else if (isBanned(_peer->session()->id()))
_peer->disable("Peer banned for previous bad behaviour.");
else
{ {
unsigned estimatedHashes = estimateHashes(); if (_p->m_protocolVersion == protocolVersion())
if (_peer->m_protocolVersion == protocolVersion()) pv61 = true;
{ return !pv61;
if (_peer->m_latestBlockNumber > m_chain.number()) });
_peer->m_expectedHashes = (unsigned)_peer->m_latestBlockNumber - m_chain.number(); m_sync.reset(pv61 ? new PV60Sync(*this) : new PV60Sync(*this));
if (_peer->m_expectedHashes > estimatedHashes) return *m_sync;
_peer->disable("Too many hashes");
else if (needHashes() && m_hashMan.chainSize() < _peer->m_expectedHashes)
m_hashMan.resetToRange(m_chain.number() + 1, _peer->m_expectedHashes);
}
else
_peer->m_expectedHashes = estimatedHashes;
continueSync(_peer);
DEV_INVARIANT_CHECK;
}
} }
unsigned EthereumHost::estimateHashes() void EthereumHost::onPeerStatus(EthereumPeer* _peer)
{ {
BlockInfo block = m_chain.info(); Guard l(x_sync);
time_t lastBlockTime = (block.hash() == m_chain.genesisHash()) ? 1428192000 : (time_t)block.timestamp; sync().onPeerStatus(_peer);
time_t now = time(0);
unsigned blockCount = c_chainReorgSize;
if (lastBlockTime > now)
clog(NetWarn) << "Clock skew? Latest block is in the future";
else
blockCount += (now - lastBlockTime) / (unsigned)c_durationLimit;
clog(NetAllDetail) << "Estimated hashes: " << blockCount;
return blockCount;
} }
void EthereumHost::onPeerHashes(EthereumPeer* _peer, h256s const& _hashes) void EthereumHost::onPeerHashes(EthereumPeer* _peer, h256s const& _hashes)
{ {
RecursiveGuard l(x_sync); Guard l(x_sync);
DEV_INVARIANT_CHECK; sync().onPeerHashes(_peer, _hashes);
if (_peer->m_syncHashNumber > 0)
_peer->m_syncHashNumber += _hashes.size();
_peer->setAsking(Asking::Nothing);
onPeerHashes(_peer, _hashes, false);
}
void EthereumHost::onPeerHashes(EthereumPeer* _peer, h256s const& _hashes, bool _complete)
{
if (_hashes.empty())
{
_peer->m_hashSub.doneFetch();
continueSync();
return;
}
bool syncByNumber = _peer->m_syncHashNumber;
if (!syncByNumber && !_complete && _peer->m_syncHash != m_syncingLatestHash)
{
// Obsolete hashes, discard
continueSync(_peer);
return;
}
unsigned knowns = 0;
unsigned unknowns = 0;
h256s neededBlocks;
unsigned firstNumber = _peer->m_syncHashNumber - _hashes.size();
for (unsigned i = 0; i < _hashes.size(); ++i)
{
_peer->addRating(1);
auto h = _hashes[i];
auto status = m_bq.blockStatus(h);
if (status == QueueStatus::Importing || status == QueueStatus::Ready || m_chain.isKnown(h))
{
clog(NetMessageSummary) << "Block hash already known:" << h;
if (!syncByNumber)
{
m_hashes += neededBlocks;
clog(NetMessageSummary) << "Start blocks download...";
onPeerDoneHashes(_peer, true);
return;
}
}
else if (status == QueueStatus::Bad)
{
cwarn << "block hash bad!" << h << ". Bailing...";
_peer->setIdle();
return;
}
else if (status == QueueStatus::Unknown)
{
unknowns++;
neededBlocks.push_back(h);
}
else
knowns++;
if (!syncByNumber)
m_syncingLatestHash = h;
else
_peer->m_hashSub.noteHash(firstNumber + i, 1);
}
if (syncByNumber)
{
m_man.appendToChain(neededBlocks); // Append to download manager immediatelly
clog(NetMessageSummary) << knowns << "knowns," << unknowns << "unknowns";
}
else
{
m_hashes += neededBlocks; // Append to local list
clog(NetMessageSummary) << knowns << "knowns," << unknowns << "unknowns; now at" << m_syncingLatestHash;
}
if (_complete)
{
clog(NetMessageSummary) << "Start new blocks download...";
m_syncingLatestHash = h256();
setState(SyncState::NewBlocks);
m_man.resetToChain(m_hashes);
m_hashes.clear();
m_hashMan.reset(m_chain.number() + 1);
continueSync(_peer);
}
else if (syncByNumber && m_hashMan.isComplete())
{
// Done our chain-get.
clog(NetNote) << "Hashes download complete.";
onPeerDoneHashes(_peer, false);
}
else if (m_hashes.size() > _peer->m_expectedHashes)
{
_peer->disable("Too many hashes");
m_hashes.clear();
m_syncingLatestHash = h256();
setState(SyncState::HashesNegotiate);
continueSync(); ///Try with some other peer, keep the chain
}
else
continueSync(_peer); /// Grab next hashes
DEV_INVARIANT_CHECK;
}
void EthereumHost::onPeerDoneHashes(EthereumPeer* _peer, bool _localChain)
{
assert(_peer->m_asking == Asking::Nothing);
m_syncingLatestHash = h256();
setState(SyncState::Blocks);
if (_peer->m_protocolVersion != protocolVersion() || _localChain)
{
m_man.resetToChain(m_hashes);
_peer->addRating(m_man.chainSize() / 100); //TODO: what about other peers?
}
m_hashMan.reset(m_chain.number() + 1);
m_hashes.clear();
continueSync();
} }
void EthereumHost::onPeerBlocks(EthereumPeer* _peer, RLP const& _r) void EthereumHost::onPeerBlocks(EthereumPeer* _peer, RLP const& _r)
{ {
RecursiveGuard l(x_sync); Guard l(x_sync);
DEV_INVARIANT_CHECK; sync().onPeerBlocks(_peer, _r);
_peer->setAsking(Asking::Nothing);
unsigned itemCount = _r.itemCount();
clog(NetMessageSummary) << "Blocks (" << dec << itemCount << "entries)" << (itemCount ? "" : ": NoMoreBlocks");
if (itemCount == 0)
{
// Got to this peer's latest block - just give up.
clog(NetNote) << "Finishing blocks fetch...";
// NOTE: need to notify of giving up on chain-hashes, too, altering state as necessary.
_peer->m_sub.doneFetch();
_peer->setIdle();
return;
}
unsigned success = 0;
unsigned future = 0;
unsigned unknown = 0;
unsigned got = 0;
unsigned repeated = 0;
u256 maxDifficulty = 0;
h256 maxUnknown;
for (unsigned i = 0; i < itemCount; ++i)
{
auto h = BlockInfo::headerHash(_r[i].data());
if (_peer->m_sub.noteBlock(h))
{
_peer->addRating(10);
switch (m_bq.import(_r[i].data(), m_chain))
{
case ImportResult::Success:
success++;
break;
case ImportResult::Malformed:
case ImportResult::BadChain:
_peer->disable("Malformed block received.");
return;
case ImportResult::FutureTime:
future++;
break;
case ImportResult::AlreadyInChain:
case ImportResult::AlreadyKnown:
got++;
break;
case ImportResult::UnknownParent:
{
unknown++;
BlockInfo bi;
bi.populateFromHeader(_r[i][0]);
if (bi.difficulty > maxDifficulty)
{
maxDifficulty = bi.difficulty;
maxUnknown = h;
}
break;
}
default:;
}
}
else
{
_peer->addRating(0); // -1?
repeated++;
}
}
clog(NetMessageSummary) << dec << success << "imported OK," << unknown << "with unknown parents," << future << "with future timestamps," << got << " already known," << repeated << " repeats received.";
if (m_state == SyncState::NewBlocks && unknown > 0)
{
_peer->m_latestHash = maxUnknown;
_peer->m_totalDifficulty = maxDifficulty;
if (peerShouldGrabChain(_peer))
resetSyncTo(maxUnknown);
}
continueSync(_peer);
DEV_INVARIANT_CHECK;
} }
void EthereumHost::onPeerNewHashes(EthereumPeer* _peer, h256s const& _hashes) void EthereumHost::onPeerNewHashes(EthereumPeer* _peer, h256s const& _hashes)
{ {
RecursiveGuard l(x_sync); Guard l(x_sync);
DEV_INVARIANT_CHECK; sync().onPeerNewHashes(_peer, _hashes);
if (isSyncing() || _peer->isConversing())
{
clog(NetMessageSummary) << "Ignoring new hashes since we're already downloading.";
return;
}
clog(NetNote) << "New block hash discovered: syncing without help.";
_peer->m_syncHashNumber = 0;
onPeerHashes(_peer, _hashes, true);
DEV_INVARIANT_CHECK;
} }
void EthereumHost::onPeerNewBlock(EthereumPeer* _peer, RLP const& _r) void EthereumHost::onPeerNewBlock(EthereumPeer* _peer, RLP const& _r)
{ {
RecursiveGuard l(x_sync); Guard l(x_sync);
DEV_INVARIANT_CHECK; sync().onPeerNewBlock(_peer, _r);
if ((isSyncing() || _peer->isConversing()))
{
clog(NetMessageSummary) << "Ignoring new blocks since we're already downloading.";
return;
}
auto h = BlockInfo::headerHash(_r[0].data());
clog(NetMessageSummary) << "NewBlock: " << h;
if (_r.itemCount() != 2)
_peer->disable("NewBlock without 2 data fields.");
else
{
bool sync = false;
switch (m_bq.import(_r[0].data(), m_chain))
{
case ImportResult::Success:
_peer->addRating(100);
break;
case ImportResult::FutureTime:
//TODO: Rating dependent on how far in future it is.
break;
case ImportResult::Malformed:
case ImportResult::BadChain:
_peer->disable("Malformed block received.");
return;
case ImportResult::AlreadyInChain:
case ImportResult::AlreadyKnown:
break;
case ImportResult::UnknownParent:
if (h)
{
u256 difficulty = _r[1].toInt<u256>();
if (m_syncingTotalDifficulty < difficulty)
{
_peer->m_latestHash = h;
_peer->m_totalDifficulty = difficulty;
if (peerShouldGrabChain(_peer))
{
clog(NetMessageSummary) << "Received block with no known parent. Resyncing...";
resetSyncTo(h);;
sync = true;
}
}
}
break;
default:;
}
DEV_GUARDED(_peer->x_knownBlocks)
_peer->m_knownBlocks.insert(h);
if (sync)
continueSync(_peer);
}
DEV_INVARIANT_CHECK;
} }
void EthereumHost::onPeerTransactions(EthereumPeer* _peer, RLP const& _r) void EthereumHost::onPeerTransactions(EthereumPeer* _peer, RLP const& _r)
{ {
if (_peer->isCriticalSyncing())
{
clog(NetAllDetail) << "Ignoring transaction from peer we are syncing with";
return;
}
unsigned itemCount = _r.itemCount(); unsigned itemCount = _r.itemCount();
clog(NetAllDetail) << "Transactions (" << dec << itemCount << "entries)"; clog(NetAllDetail) << "Transactions (" << dec << itemCount << "entries)";
Guard l(_peer->x_knownTransactions); Guard l(_peer->x_knownTransactions);
for (unsigned i = 0; i < itemCount; ++i) for (unsigned i = 0; i < min<unsigned>(itemCount, 256); ++i) // process 256 transactions at most. TODO: much better solution.
{ {
auto h = sha3(_r[i].data()); auto h = sha3(_r[i].data());
_peer->m_knownTransactions.insert(h); _peer->m_knownTransactions.insert(h);
@ -629,202 +321,23 @@ void EthereumHost::onPeerTransactions(EthereumPeer* _peer, RLP const& _r)
void EthereumHost::onPeerAborting(EthereumPeer* _peer) void EthereumHost::onPeerAborting(EthereumPeer* _peer)
{ {
RecursiveGuard l(x_sync); Guard l(x_sync);
if (_peer->isConversing()) if (m_sync)
{ m_sync->onPeerAborting(_peer);
_peer->setIdle();
// if (_peer->isCriticalSyncing())
_peer->setRude();
continueSync();
}
DEV_INVARIANT_CHECK;
}
void EthereumHost::continueSync()
{
if (m_state == SyncState::WaitingQueue)
setState(m_lastActiveState);
clog(NetAllDetail) << "Continuing sync for all peers";
foreachPeer([&](EthereumPeer* _p)
{
if (_p->m_asking == Asking::Nothing)
continueSync(_p);
});
}
void EthereumHost::continueSync(EthereumPeer* _peer)
{
DEV_INVARIANT_CHECK;
assert(_peer->m_asking == Asking::Nothing);
bool otherPeerV60Sync = false;
bool otherPeerV61Sync = false;
if (needHashes())
{
if (!peerShouldGrabChain(_peer))
{
_peer->setIdle();
return;
}
foreachPeer([&](EthereumPeer* _p)
{
if (_p != _peer && _p->m_asking == Asking::Hashes)
{
if (_p->m_protocolVersion != protocolVersion())
otherPeerV60Sync = true; // Already have a peer downloading hash chain with old protocol, do nothing
else
otherPeerV61Sync = true; // Already have a peer downloading hash chain with V61+ protocol, join if supported
}
});
if (otherPeerV60Sync && !m_hashes.empty())
{
/// Downloading from other peer with v60 protocol, nothing else we can do
_peer->setIdle();
return;
}
if (otherPeerV61Sync && _peer->m_protocolVersion != protocolVersion())
{
/// Downloading from other peer with v61+ protocol which this peer does not support,
_peer->setIdle();
return;
}
if (_peer->m_protocolVersion == protocolVersion() && !m_hashMan.isComplete())
{
setState(SyncState::HashesParallel);
_peer->requestHashes(); /// v61+ and not catching up to a particular hash
}
else
{
// Restart/continue sync in single peer mode
if (!m_syncingLatestHash)
{
m_syncingLatestHash =_peer->m_latestHash;
m_syncingTotalDifficulty = _peer->m_totalDifficulty;
}
if (_peer->m_totalDifficulty >= m_syncingTotalDifficulty)
{
_peer->requestHashes(m_syncingLatestHash);
setState(SyncState::HashesSingle);
m_estimatedHashes = _peer->m_expectedHashes - (_peer->m_protocolVersion == protocolVersion() ? 0 : c_chainReorgSize);
}
else
_peer->setIdle();
}
}
else if (needBlocks())
{
if (m_man.isComplete())
{
// Done our chain-get.
setState(SyncState::Idle);
clog(NetNote) << "Chain download complete.";
// 1/100th for each useful block hash.
_peer->addRating(m_man.chainSize() / 100); //TODO: what about other peers?
m_man.reset();
_peer->setIdle();
return;
}
else if (peerCanHelp(_peer))
{
// Check block queue status
if (m_bq.unknownFull())
{
clog(NetWarn) << "Too many unknown blocks, restarting sync";
m_bq.clear();
reset();
continueSync();
}
else if (m_bq.knownFull())
{
clog(NetAllDetail) << "Waiting for block queue before downloading blocks";
m_lastActiveState = m_state;
setState(SyncState::WaitingQueue);
_peer->setIdle();
}
else
_peer->requestBlocks();
}
}
else
_peer->setIdle();
DEV_INVARIANT_CHECK;
}
bool EthereumHost::peerCanHelp(EthereumPeer* _peer) const
{
(void)_peer;
return true;
}
bool EthereumHost::peerShouldGrabBlocks(EthereumPeer* _peer) const
{
// this is only good for deciding whether to go ahead and grab a particular peer's hash chain,
// yet it's being used in determining whether to allow a peer help with downloading an existing
// chain of blocks.
auto td = _peer->m_totalDifficulty;
auto lh = m_syncingLatestHash;
auto ctd = m_chain.details().totalDifficulty;
clog(NetAllDetail) << "Should grab blocks? " << td << "vs" << ctd;
if (td < ctd || (td == ctd && m_chain.currentHash() == lh))
return false;
return true;
}
bool EthereumHost::peerShouldGrabChain(EthereumPeer* _peer) const
{
h256 c = m_chain.currentHash();
unsigned n = m_chain.number();
u256 td = m_chain.details().totalDifficulty;
clog(NetAllDetail) << "Attempt chain-grab? Latest:" << c << ", number:" << n << ", TD:" << td << " versus " << _peer->m_totalDifficulty;
if (td >= _peer->m_totalDifficulty)
{
clog(NetAllDetail) << "No. Our chain is better.";
return false;
}
else
{
clog(NetAllDetail) << "Yes. Their chain is better.";
return true;
}
} }
bool EthereumHost::isSyncing() const bool EthereumHost::isSyncing() const
{ {
return m_state != SyncState::Idle; Guard l(x_sync);
if (!m_sync)
return false;
return m_sync->isSyncing();
} }
SyncStatus EthereumHost::status() const SyncStatus EthereumHost::status() const
{ {
RecursiveGuard l(x_sync); Guard l(x_sync);
SyncStatus res; if (!m_sync)
res.state = m_state; return SyncStatus();
if (m_state == SyncState::HashesParallel) return m_sync->status();
{
res.hashesReceived = m_hashMan.hashesGot().size();
res.hashesTotal = m_hashMan.chainSize();
}
else if (m_state == SyncState::HashesSingle)
{
res.hashesTotal = m_estimatedHashes;
res.hashesReceived = static_cast<unsigned>(m_hashes.size());
res.hashesEstimated = true;
}
else if (m_state == SyncState::Blocks || m_state == SyncState::NewBlocks || m_state == SyncState::WaitingQueue)
{
res.blocksTotal = m_man.chainSize();
res.blocksReceived = m_man.blocksGot().size();
}
return res;
}
bool EthereumHost::invariants() const
{
if (m_state == SyncState::HashesNegotiate && !m_hashes.empty())
return false;
if (needBlocks() && (m_syncingLatestHash || !m_hashes.empty()))
return false;
return true;
} }

75
libethereum/EthereumHost.h

@ -48,16 +48,16 @@ namespace eth
class TransactionQueue; class TransactionQueue;
class BlockQueue; class BlockQueue;
class BlockChainSync;
/** /**
* @brief The EthereumHost class * @brief The EthereumHost class
* @warning None of this is thread-safe. You have been warned. * @warning None of this is thread-safe. You have been warned.
* @doWork Syncs to peers and sends new blocks and transactions. * @doWork Syncs to peers and sends new blocks and transactions.
*/ */
class EthereumHost: public p2p::HostCapability<EthereumPeer>, Worker, HasInvariants class EthereumHost: public p2p::HostCapability<EthereumPeer>, Worker
{ {
public: public:
/// Start server, but don't listen. /// Start server, but don't listen.
EthereumHost(BlockChain const& _ch, TransactionQueue& _tq, BlockQueue& _bq, u256 _networkId); EthereumHost(BlockChain const& _ch, TransactionQueue& _tq, BlockQueue& _bq, u256 _networkId);
@ -71,82 +71,60 @@ public:
void reset(); void reset();
DownloadMan const& downloadMan() const { return m_man; } DownloadMan const& downloadMan() const { return m_man; }
DownloadMan& downloadMan() { return m_man; }
bool isSyncing() const; bool isSyncing() const;
bool isBanned(p2p::NodeId const& _id) const { return !!m_banned.count(_id); } bool isBanned(p2p::NodeId const& _id) const { return !!m_banned.count(_id); }
void noteNewTransactions() { m_newTransactions = true; } void noteNewTransactions() { m_newTransactions = true; }
void noteNewBlocks() { m_newBlocks = true; } void noteNewBlocks() { m_newBlocks = true; }
void onPeerStatus(EthereumPeer* _peer); ///< Called by peer to report status BlockChain const& chain() const { return m_chain; }
void onPeerBlocks(EthereumPeer* _peer, RLP const& _r); ///< Called by peer once it has new blocks during syn BlockQueue& bq() { return m_bq; }
void onPeerNewBlock(EthereumPeer* _peer, RLP const& _r); ///< Called by peer once it has new blocks BlockQueue const& bq() const { return m_bq; }
void onPeerNewHashes(EthereumPeer* _peer, h256s const& _hashes); ///< Called by peer once it has new hashes
void onPeerHashes(EthereumPeer* _peer, h256s const& _hashes); ///< Called by peer once it has another sequential block of hashes during sync
void onPeerTransactions(EthereumPeer* _peer, RLP const& _r); ///< Called by peer when it has new transactions
void onPeerAborting(EthereumPeer* _peer); ///< Called by peer when it is disconnecting
DownloadMan& downloadMan() { return m_man; }
HashDownloadMan& hashDownloadMan() { return m_hashMan; }
BlockChain const& chain() { return m_chain; }
SyncStatus status() const; SyncStatus status() const;
h256 latestBlockSent() { return m_latestBlockSent; }
static char const* stateName(SyncState _s) { return s_stateNames[static_cast<int>(_s)]; } static char const* stateName(SyncState _s) { return s_stateNames[static_cast<int>(_s)]; }
static unsigned const c_oldProtocolVersion; static unsigned const c_oldProtocolVersion;
void foreachPeerPtr(std::function<bool(std::shared_ptr<EthereumPeer>)> const& _f) const;
void foreachPeer(std::function<bool(EthereumPeer*)> const& _f) const;
void onPeerStatus(EthereumPeer* _peer);
void onPeerHashes(EthereumPeer* _peer, h256s const& _hashes);
void onPeerBlocks(EthereumPeer* _peer, RLP const& _r);
void onPeerNewHashes(EthereumPeer* _peer, h256s const& _hashes);
void onPeerNewBlock(EthereumPeer* _peer, RLP const& _r);
void onPeerTransactions(EthereumPeer* _peer, RLP const& _r);
void onPeerAborting(EthereumPeer* _peer);
private: private:
static char const* const s_stateNames[static_cast<int>(SyncState::Size)]; static char const* const s_stateNames[static_cast<int>(SyncState::Size)];
std::tuple<std::vector<std::shared_ptr<EthereumPeer>>, std::vector<std::shared_ptr<EthereumPeer>>, std::vector<std::shared_ptr<p2p::Session>>> randomSelection(unsigned _percent = 25, std::function<bool(EthereumPeer*)> const& _allow = [](EthereumPeer const*){ return true; }); std::tuple<std::vector<std::shared_ptr<EthereumPeer>>, std::vector<std::shared_ptr<EthereumPeer>>, std::vector<std::shared_ptr<p2p::Session>>> randomSelection(unsigned _percent = 25, std::function<bool(EthereumPeer*)> const& _allow = [](EthereumPeer const*){ return true; });
void foreachPeerPtr(std::function<void(std::shared_ptr<EthereumPeer>)> const& _f) const;
void foreachPeer(std::function<void(EthereumPeer*)> const& _f) const;
void resetSyncTo(h256 const& _h);
bool needHashes() const { return m_state == SyncState::HashesNegotiate || m_state == SyncState::HashesSingle || m_state == SyncState::HashesParallel; }
bool needBlocks() const { return m_state == SyncState::Blocks || m_state == SyncState::NewBlocks; }
/// Sync with the BlockChain. It might contain one of our mined blocks, we might have new candidates from the network. /// Sync with the BlockChain. It might contain one of our mined blocks, we might have new candidates from the network.
void doWork(); virtual void doWork() override;
void maintainTransactions(); void maintainTransactions();
void maintainBlocks(h256 const& _currentBlock); void maintainBlocks(h256 const& _currentBlock);
/// Get a bunch of needed blocks.
/// Removes them from our list of needed blocks.
/// @returns empty if there's no more blocks left to fetch, otherwise the blocks to fetch.
h256Hash neededBlocks(h256Hash const& _exclude);
/// Check to see if the network peer-state initialisation has happened. /// Check to see if the network peer-state initialisation has happened.
bool isInitialised() const { return (bool)m_latestBlockSent; } bool isInitialised() const { return (bool)m_latestBlockSent; }
/// Initialises the network peer-state, doing the stuff that needs to be once-only. @returns true if it really was first. /// Initialises the network peer-state, doing the stuff that needs to be once-only. @returns true if it really was first.
bool ensureInitialised(); bool ensureInitialised();
virtual void onStarting() { startWorking(); } virtual void onStarting() override { startWorking(); }
virtual void onStopping() { stopWorking(); } virtual void onStopping() override { stopWorking(); }
void continueSync(); /// Find something to do for all peers
void continueSync(EthereumPeer* _peer); /// Find some work to do for a peer
void onPeerDoneHashes(EthereumPeer* _peer, bool _new); /// Called when done downloading hashes from peer
void onPeerHashes(EthereumPeer* _peer, h256s const& _hashes, bool _complete);
bool peerShouldGrabBlocks(EthereumPeer* _peer) const;
bool peerShouldGrabChain(EthereumPeer* _peer) const;
bool peerCanHelp(EthereumPeer* _peer) const;
unsigned estimateHashes();
void estimatePeerHashes(EthereumPeer* _peer);
void setState(SyncState _s);
bool invariants() const override; BlockChainSync& sync();
BlockChain const& m_chain; BlockChain const& m_chain;
TransactionQueue& m_tq; ///< Maintains a list of incoming transactions not yet in a block on the blockchain. TransactionQueue& m_tq; ///< Maintains a list of incoming transactions not yet in a block on the blockchain.
BlockQueue& m_bq; ///< Maintains a list of incoming blocks not yet on the blockchain (to be imported). BlockQueue& m_bq; ///< Maintains a list of incoming blocks not yet on the blockchain (to be imported).
Handler m_bqRoomAvailable;
u256 m_networkId; u256 m_networkId;
DownloadMan m_man;
HashDownloadMan m_hashMan;
h256 m_latestBlockSent; h256 m_latestBlockSent;
h256Hash m_transactionsSent; h256Hash m_transactionsSent;
@ -155,14 +133,9 @@ private:
bool m_newTransactions = false; bool m_newTransactions = false;
bool m_newBlocks = false; bool m_newBlocks = false;
mutable RecursiveMutex x_sync; mutable Mutex x_sync;
SyncState m_state = SyncState::Idle; ///< Current sync state DownloadMan m_man;
SyncState m_lastActiveState = SyncState::Idle; ///< Saved state before entering waiting queue mode std::unique_ptr<BlockChainSync> m_sync;
h256 m_syncingLatestHash; ///< Latest block's hash, as of the current sync.
u256 m_syncingTotalDifficulty; ///< Latest block's total difficulty, as of the current sync.
h256s m_hashes; ///< List of hashes with unknown block numbers. Used for PV60 chain downloading and catching up to a particular unknown
unsigned m_estimatedHashes = 0; ///< Number of estimated hashes for the last peer over PV60. Used for status reporting only.
bool m_continueSync = false; ///< True when the block queue has processed a block; we should restart grabbing blocks.
}; };
} }

45
libethereum/EthereumPeer.cpp

@ -30,15 +30,28 @@
#include "EthereumHost.h" #include "EthereumHost.h"
#include "TransactionQueue.h" #include "TransactionQueue.h"
#include "BlockQueue.h" #include "BlockQueue.h"
#include "BlockChainSync.h"
using namespace std; using namespace std;
using namespace dev; using namespace dev;
using namespace dev::eth; using namespace dev::eth;
using namespace p2p; using namespace p2p;
string toString(Asking _a)
{
switch (_a)
{
case Asking::Blocks: return "Blocks";
case Asking::Hashes: return "Hashes";
case Asking::Nothing: return "Nothing";
case Asking::State: return "State";
}
return "?";
}
EthereumPeer::EthereumPeer(Session* _s, HostCapabilityFace* _h, unsigned _i, CapDesc const& _cap): EthereumPeer::EthereumPeer(Session* _s, HostCapabilityFace* _h, unsigned _i, CapDesc const& _cap):
Capability(_s, _h, _i), Capability(_s, _h, _i),
m_sub(host()->downloadMan()), m_sub(host()->downloadMan()),
m_hashSub(host()->hashDownloadMan()),
m_peerCapabilityVersion(_cap.second) m_peerCapabilityVersion(_cap.second)
{ {
session()->addNote("manners", isRude() ? "RUDE" : "nice"); session()->addNote("manners", isRude() ? "RUDE" : "nice");
@ -48,6 +61,11 @@ EthereumPeer::EthereumPeer(Session* _s, HostCapabilityFace* _h, unsigned _i, Cap
EthereumPeer::~EthereumPeer() EthereumPeer::~EthereumPeer()
{ {
if (m_asking != Asking::Nothing)
{
cnote << "Peer aborting while being asked for " << ::toString(m_asking);
setRude();
}
abortSync(); abortSync();
} }
@ -58,13 +76,18 @@ bool EthereumPeer::isRude() const
unsigned EthereumPeer::askOverride() const unsigned EthereumPeer::askOverride() const
{ {
std::string static const badGeth = "Geth/v0.9.27";
if (session()->info().clientVersion.substr(0, badGeth.size()) == badGeth)
return 1;
bytes const& d = repMan().data(*session(), name()); bytes const& d = repMan().data(*session(), name());
return d.empty() ? c_maxBlocksAsk : RLP(d).toInt<unsigned>(RLP::LaisezFaire); return d.empty() ? c_maxBlocksAsk : RLP(d).toInt<unsigned>(RLP::LaisezFaire);
} }
void EthereumPeer::setRude() void EthereumPeer::setRude()
{ {
auto old = askOverride();
repMan().setData(*session(), name(), rlp(askOverride() / 2 + 1)); repMan().setData(*session(), name(), rlp(askOverride() / 2 + 1));
cnote << "Rude behaviour; askOverride now" << askOverride() << ", was" << old;
repMan().noteRude(*session(), name()); repMan().noteRude(*session(), name());
session()->addNote("manners", "RUDE"); session()->addNote("manners", "RUDE");
} }
@ -83,22 +106,8 @@ EthereumHost* EthereumPeer::host() const
* Possible asking/syncing states for two peers: * Possible asking/syncing states for two peers:
*/ */
string toString(Asking _a)
{
switch (_a)
{
case Asking::Blocks: return "Blocks";
case Asking::Hashes: return "Hashes";
case Asking::Nothing: return "Nothing";
case Asking::State: return "State";
}
return "?";
}
void EthereumPeer::setIdle() void EthereumPeer::setIdle()
{ {
m_sub.doneFetch();
m_hashSub.doneFetch();
setAsking(Asking::Nothing); setAsking(Asking::Nothing);
} }
@ -120,14 +129,14 @@ void EthereumPeer::requestStatus()
sealAndSend(s); sealAndSend(s);
} }
void EthereumPeer::requestHashes() void EthereumPeer::requestHashes(u256 _number, unsigned _count)
{ {
assert(m_asking == Asking::Nothing); assert(m_asking == Asking::Nothing);
m_syncHashNumber = m_hashSub.nextFetch(c_maxHashesAsk); m_syncHashNumber = _number;
m_syncHash = h256(); m_syncHash = h256();
setAsking(Asking::Hashes); setAsking(Asking::Hashes);
RLPStream s; RLPStream s;
prep(s, GetBlockHashesByNumberPacket, 2) << m_syncHashNumber << c_maxHashesAsk; prep(s, GetBlockHashesByNumberPacket, 2) << m_syncHashNumber << _count;
clog(NetMessageDetail) << "Requesting block hashes for numbers " << m_syncHashNumber << "-" << m_syncHashNumber + c_maxHashesAsk - 1; clog(NetMessageDetail) << "Requesting block hashes for numbers " << m_syncHashNumber << "-" << m_syncHashNumber + c_maxHashesAsk - 1;
sealAndSend(s); sealAndSend(s);
} }

13
libethereum/EthereumPeer.h

@ -50,6 +50,9 @@ namespace eth
class EthereumPeer: public p2p::Capability class EthereumPeer: public p2p::Capability
{ {
friend class EthereumHost; //TODO: remove this friend class EthereumHost; //TODO: remove this
friend class BlockChainSync; //TODO: remove this
friend class PV60Sync; //TODO: remove this
friend class PV61Sync; //TODO: remove this
public: public:
/// Basic constructor. /// Basic constructor.
@ -73,8 +76,8 @@ public:
/// Abort sync and reset fetch /// Abort sync and reset fetch
void setIdle(); void setIdle();
/// Request hashes. Uses hash download manager to get hash number. v61+ protocol version only /// Request hashes by number. v61+ protocol version only
void requestHashes(); void requestHashes(u256 _number, unsigned _count);
/// Request hashes for given parent hash. /// Request hashes for given parent hash.
void requestHashes(h256 const& _lastHash); void requestHashes(h256 const& _lastHash);
@ -138,18 +141,16 @@ private:
h256 m_genesisHash; ///< Peer's genesis hash h256 m_genesisHash; ///< Peer's genesis hash
u256 m_latestBlockNumber; ///< Number of the latest block this peer has u256 m_latestBlockNumber; ///< Number of the latest block this peer has
/// This is built as we ask for hashes. Once no more hashes are given, we present this to the /// This is built as we ask for hashes. Once no more hashes are given, we present this to the
/// host who initialises the DownloadMan and m_sub becomes active for us to begin asking for blocks. /// host who initialises the DownloadMan and m_sub becomes active for us to begin asking for blocks.
unsigned m_expectedHashes = 0; ///< Estimated upper bound of hashes to expect from this peer. unsigned m_expectedHashes = 0; ///< Estimated upper bound of hashes to expect from this peer.
unsigned m_syncHashNumber = 0; ///< Number of latest hash we sync to (PV61+) u256 m_syncHashNumber = 0; ///< Number of latest hash we sync to (PV61+)
h256 m_syncHash; ///< Latest hash we sync to (PV60) h256 m_syncHash; ///< Latest hash we sync to (PV60)
/// Once we're asking for blocks, this becomes in use. /// Once we're asking for blocks, this becomes in use.
DownloadSub m_sub; DownloadSub m_sub;
/// Once we're asking for hashes, this becomes in use.
HashDownloadSub m_hashSub;
u256 m_peerCapabilityVersion; ///< Protocol version this peer supports received as capability u256 m_peerCapabilityVersion; ///< Protocol version this peer supports received as capability
/// Have we received a GetTransactions packet that we haven't yet answered? /// Have we received a GetTransactions packet that we haven't yet answered?
bool m_requireTransactions = false; bool m_requireTransactions = false;

2
libethereum/State.h

@ -206,6 +206,8 @@ public:
return false; return false;
PoW::assignResult(_result, m_currentBlock); PoW::assignResult(_result, m_currentBlock);
if (!PoW::verify(m_currentBlock))
return false;
cnote << "Completed" << m_currentBlock.headerHash(WithoutNonce) << m_currentBlock.nonce << m_currentBlock.difficulty << PoW::verify(m_currentBlock); cnote << "Completed" << m_currentBlock.headerHash(WithoutNonce) << m_currentBlock.nonce << m_currentBlock.difficulty << PoW::verify(m_currentBlock);

3
libethereum/TransactionQueue.cpp

@ -29,6 +29,7 @@ using namespace dev;
using namespace dev::eth; using namespace dev::eth;
const char* TransactionQueueChannel::name() { return EthCyan "┉┅▶"; } const char* TransactionQueueChannel::name() { return EthCyan "┉┅▶"; }
const char* TransactionQueueTraceChannel::name() { return EthCyan " ┅▶"; }
ImportResult TransactionQueue::import(bytesConstRef _transactionRLP, ImportCallback const& _cb, IfDropped _ik) ImportResult TransactionQueue::import(bytesConstRef _transactionRLP, ImportCallback const& _cb, IfDropped _ik)
{ {
@ -115,7 +116,7 @@ ImportResult TransactionQueue::manageImport_WITH_LOCK(h256 const& _h, Transactio
m_known.insert(_h); m_known.insert(_h);
if (_cb) if (_cb)
m_callbacks[_h] = _cb; m_callbacks[_h] = _cb;
ctxq << "Queued vaguely legit-looking transaction" << _h; clog(TransactionQueueTraceChannel) << "Queued vaguely legit-looking transaction" << _h;
m_onReady(); m_onReady();
} }
catch (Exception const& _e) catch (Exception const& _e)

3
libethereum/TransactionQueue.h

@ -36,7 +36,8 @@ namespace eth
class BlockChain; class BlockChain;
struct TransactionQueueChannel: public LogChannel { static const char* name(); static const int verbosity = 4; }; struct TransactionQueueChannel: public LogChannel { static const char* name(); static const int verbosity = 4; };
#define ctxq dev::LogOutputStream<dev::eth::TransactionQueueChannel, true>() struct TransactionQueueTraceChannel: public LogChannel { static const char* name(); static const int verbosity = 7; };
#define ctxq dev::LogOutputStream<dev::eth::TransactionQueueTraceChannel, true>()
enum class IfDropped { Ignore, Retry }; enum class IfDropped { Ignore, Retry };

58
libevm/VM.cpp

@ -202,6 +202,24 @@ bytesConstRef VM::execImpl(u256& io_gas, ExtVMFace& _ext, OnOpFunc const& _onOp)
return nextPC; return nextPC;
}; };
auto copyDataToMemory = [](bytesConstRef _data, decltype(m_stack)& _stack, decltype(m_temp)& _memory)
{
auto offset = static_cast<size_t>(_stack.back());
_stack.pop_back();
bigint bigIndex = _stack.back();
auto index = static_cast<size_t>(bigIndex);
_stack.pop_back();
auto size = static_cast<size_t>(_stack.back());
_stack.pop_back();
size_t sizeToBeCopied = bigIndex + size > _data.size() ? _data.size() < bigIndex ? 0 : _data.size() - index : size;
if (sizeToBeCopied > 0)
std::memcpy(_memory.data() + offset, _data.data() + index, sizeToBeCopied);
if (size > sizeToBeCopied)
std::memset(_memory.data() + offset + sizeToBeCopied, 0, size - sizeToBeCopied);
};
m_steps = 0; m_steps = 0;
for (auto nextPC = m_curPC + 1; true; m_curPC = nextPC, nextPC = m_curPC + 1, ++m_steps) for (auto nextPC = m_curPC + 1; true; m_curPC = nextPC, nextPC = m_curPC + 1, ++m_steps)
{ {
@ -364,44 +382,16 @@ bytesConstRef VM::execImpl(u256& io_gas, ExtVMFace& _ext, OnOpFunc const& _onOp)
m_stack.back() = _ext.codeAt(asAddress(m_stack.back())).size(); m_stack.back() = _ext.codeAt(asAddress(m_stack.back())).size();
break; break;
case Instruction::CALLDATACOPY: case Instruction::CALLDATACOPY:
copyDataToMemory(_ext.data, m_stack, m_temp);
break;
case Instruction::CODECOPY: case Instruction::CODECOPY:
copyDataToMemory(&_ext.code, m_stack, m_temp);
break;
case Instruction::EXTCODECOPY: case Instruction::EXTCODECOPY:
{ {
Address a; auto a = asAddress(m_stack.back());
if (inst == Instruction::EXTCODECOPY)
{
a = asAddress(m_stack.back());
m_stack.pop_back();
}
unsigned offset = (unsigned)m_stack.back();
m_stack.pop_back();
u256 index = m_stack.back();
m_stack.pop_back();
unsigned size = (unsigned)m_stack.back();
m_stack.pop_back(); m_stack.pop_back();
unsigned sizeToBeCopied; copyDataToMemory(&_ext.codeAt(a), m_stack, m_temp);
switch(inst)
{
case Instruction::CALLDATACOPY:
sizeToBeCopied = index + (bigint)size > (u256)_ext.data.size() ? (u256)_ext.data.size() < index ? 0 : _ext.data.size() - (unsigned)index : size;
memcpy(m_temp.data() + offset, _ext.data.data() + (unsigned)index, sizeToBeCopied);
break;
case Instruction::CODECOPY:
sizeToBeCopied = index + (bigint)size > (u256)_ext.code.size() ? (u256)_ext.code.size() < index ? 0 : _ext.code.size() - (unsigned)index : size;
memcpy(m_temp.data() + offset, _ext.code.data() + (unsigned)index, sizeToBeCopied);
break;
case Instruction::EXTCODECOPY:
sizeToBeCopied = index + (bigint)size > (u256)_ext.codeAt(a).size() ? (u256)_ext.codeAt(a).size() < index ? 0 : _ext.codeAt(a).size() - (unsigned)index : size;
memcpy(m_temp.data() + offset, _ext.codeAt(a).data() + (unsigned)index, sizeToBeCopied);
break;
default:
// this is unreachable, but if someone introduces a bug in the future, he may get here.
assert(false);
BOOST_THROW_EXCEPTION(InvalidOpcode() << errinfo_comment("CALLDATACOPY, CODECOPY or EXTCODECOPY instruction requested."));
break;
}
memset(m_temp.data() + offset + sizeToBeCopied, 0, size - sizeToBeCopied);
break;
} }
case Instruction::GASPRICE: case Instruction::GASPRICE:
m_stack.push_back(_ext.gasPrice); m_stack.push_back(_ext.gasPrice);

7
libjsconsole/JSConsole.cpp

@ -39,12 +39,11 @@ JSConsole::JSConsole(WebThreeDirect& _web3, shared_ptr<AccountHolder> const& _ac
m_printer(m_engine) m_printer(m_engine)
{ {
m_jsonrpcConnector.reset(new JSV8Connector(m_engine)); m_jsonrpcConnector.reset(new JSV8Connector(m_engine));
m_jsonrpcServer.reset(new WebThreeStubServer(*m_jsonrpcConnector.get(), _web3, _accounts, vector<KeyPair>())); (void)_web3; (void)_accounts;
// m_jsonrpcServer.reset(new WebThreeStubServer(*m_jsonrpcConnector.get(), _web3, _accounts, vector<KeyPair>()));
} }
JSConsole::~JSConsole() {} void JSConsole::readExpression() const
void JSConsole::repl() const
{ {
string cmd = ""; string cmd = "";
g_logPost = [](std::string const& a, char const*) { cout << "\r \r" << a << endl << flush; rl_forced_update_display(); }; g_logPost = [](std::string const& a, char const*) { cout << "\r \r" << a << endl << flush; rl_forced_update_display(); };

7
libjsconsole/JSConsole.h

@ -25,7 +25,7 @@
#include <libjsengine/JSV8Engine.h> #include <libjsengine/JSV8Engine.h>
#include <libjsengine/JSV8Printer.h> #include <libjsengine/JSV8Printer.h>
class WebThreeStubServer; namespace dev { class WebThreeStubServer; }
namespace jsonrpc { class AbstractServerConnector; } namespace jsonrpc { class AbstractServerConnector; }
namespace dev namespace dev
@ -39,15 +39,14 @@ class JSConsole
{ {
public: public:
JSConsole(WebThreeDirect& _web3, std::shared_ptr<AccountHolder> const& _accounts); JSConsole(WebThreeDirect& _web3, std::shared_ptr<AccountHolder> const& _accounts);
~JSConsole(); void readExpression() const;
void repl() const;
private: private:
std::string promptForIndentionLevel(int _i) const; std::string promptForIndentionLevel(int _i) const;
JSV8Engine m_engine; JSV8Engine m_engine;
JSV8Printer m_printer; JSV8Printer m_printer;
std::unique_ptr<WebThreeStubServer> m_jsonrpcServer; std::unique_ptr<dev::WebThreeStubServer> m_jsonrpcServer;
std::unique_ptr<jsonrpc::AbstractServerConnector> m_jsonrpcConnector; std::unique_ptr<jsonrpc::AbstractServerConnector> m_jsonrpcConnector;
}; };

2
libp2p/CMakeLists.txt

@ -14,7 +14,7 @@ aux_source_directory(. SRC_LIST)
include_directories(BEFORE ..) include_directories(BEFORE ..)
# we may not use it in libp2p, but one of our dependecies is including leveldb in header file # we may not use it in libp2p, but one of our dependecies is including leveldb in header file
# and windows is failing to build without that # and windows is failing to build without that
include_directories(${LEVELDB_INCLUDE_DIRS}) include_directories(${DB_INCLUDE_DIRS})
include_directories(${Boost_INCLUDE_DIRS}) include_directories(${Boost_INCLUDE_DIRS})
if (MINIUPNPC_FOUND) if (MINIUPNPC_FOUND)

3
libp2p/Host.cpp

@ -580,7 +580,8 @@ PeerSessionInfos Host::peerSessionInfo() const
for (auto& i: m_sessions) for (auto& i: m_sessions)
if (auto j = i.second.lock()) if (auto j = i.second.lock())
if (j->isConnected()) if (j->isConnected())
ret.push_back(j->m_info); DEV_GUARDED(j->x_info)
ret.push_back(j->m_info);
return ret; return ret;
} }

19
libp2p/Session.cpp

@ -44,7 +44,8 @@ Session::Session(Host* _h, RLPXFrameCoder* _io, std::shared_ptr<RLPXSocket> cons
{ {
m_peer->m_lastDisconnect = NoDisconnect; m_peer->m_lastDisconnect = NoDisconnect;
m_lastReceived = m_connect = chrono::steady_clock::now(); m_lastReceived = m_connect = chrono::steady_clock::now();
m_info.socketId = m_socket->ref().native_handle(); DEV_GUARDED(x_info)
m_info.socketId = m_socket->ref().native_handle();
} }
Session::~Session() Session::~Session()
@ -187,9 +188,12 @@ bool Session::interpret(PacketType _t, RLP const& _r)
break; break;
} }
case PongPacket: case PongPacket:
m_info.lastPing = std::chrono::steady_clock::now() - m_ping; {
DEV_GUARDED(x_info)
m_info.lastPing = std::chrono::steady_clock::now() - m_ping;
clog(NetTriviaSummary) << "Latency: " << chrono::duration_cast<chrono::milliseconds>(m_info.lastPing).count() << " ms"; clog(NetTriviaSummary) << "Latency: " << chrono::duration_cast<chrono::milliseconds>(m_info.lastPing).count() << " ms";
break; break;
}
case GetPeersPacket: case GetPeersPacket:
// Disabled for interop testing. // Disabled for interop testing.
// GetPeers/PeersPacket will be modified to only exchange new nodes which it's peers are interested in. // GetPeers/PeersPacket will be modified to only exchange new nodes which it's peers are interested in.
@ -382,11 +386,12 @@ void Session::drop(DisconnectReason _reason)
void Session::disconnect(DisconnectReason _reason) void Session::disconnect(DisconnectReason _reason)
{ {
clog(NetConnect) << "Disconnecting (our reason:" << reasonOf(_reason) << ")"; clog(NetConnect) << "Disconnecting (our reason:" << reasonOf(_reason) << ")";
StructuredLogger::p2pDisconnected( DEV_GUARDED(x_info)
m_info.id.abridged(), StructuredLogger::p2pDisconnected(
m_peer->endpoint, // TODO: may not be 100% accurate m_info.id.abridged(),
m_server->peerCount() m_peer->endpoint, // TODO: may not be 100% accurate
); m_server->peerCount()
);
if (m_socket->ref().is_open()) if (m_socket->ref().is_open())
{ {
RLPStream s; RLPStream s;

7
libp2p/Session.h

@ -67,7 +67,7 @@ public:
bool isConnected() const { return m_socket->ref().is_open(); } bool isConnected() const { return m_socket->ref().is_open(); }
NodeId id() const; NodeId id() const;
unsigned socketId() const { return m_info.socketId; } unsigned socketId() const { Guard l(x_info); return m_info.socketId; }
template <class PeerCap> template <class PeerCap>
std::shared_ptr<PeerCap> cap() const { try { return std::static_pointer_cast<PeerCap>(m_capabilities.at(std::make_pair(PeerCap::name(), PeerCap::version()))); } catch (...) { return nullptr; } } std::shared_ptr<PeerCap> cap() const { try { return std::static_pointer_cast<PeerCap>(m_capabilities.at(std::make_pair(PeerCap::name(), PeerCap::version()))); } catch (...) { return nullptr; } }
@ -81,9 +81,9 @@ public:
int rating() const; int rating() const;
void addRating(int _r); void addRating(int _r);
void addNote(std::string const& _k, std::string const& _v) { m_info.notes[_k] = _v; } void addNote(std::string const& _k, std::string const& _v) { Guard l(x_info); m_info.notes[_k] = _v; }
PeerSessionInfo const& info() const { return m_info; } PeerSessionInfo info() const { Guard l(x_info); return m_info; }
void ensureNodesRequested(); void ensureNodesRequested();
void serviceNodesRequest(); void serviceNodesRequest();
@ -119,6 +119,7 @@ private:
std::shared_ptr<Peer> m_peer; ///< The Peer object. std::shared_ptr<Peer> m_peer; ///< The Peer object.
bool m_dropped = false; ///< If true, we've already divested ourselves of this peer. We're just waiting for the reads & writes to fail before the shared_ptr goes OOS and the destructor kicks in. bool m_dropped = false; ///< If true, we've already divested ourselves of this peer. We're just waiting for the reads & writes to fail before the shared_ptr goes OOS and the destructor kicks in.
mutable Mutex x_info;
PeerSessionInfo m_info; ///< Dynamic information about this peer. PeerSessionInfo m_info; ///< Dynamic information about this peer.
bool m_theyRequestedNodes = false; ///< Has the peer requested nodes from us without receiveing an answer from us? bool m_theyRequestedNodes = false; ///< Has the peer requested nodes from us without receiveing an answer from us?

43
libsolidity/AST.cpp

@ -21,6 +21,7 @@
*/ */
#include <algorithm> #include <algorithm>
#include <functional>
#include <boost/range/adaptor/reversed.hpp> #include <boost/range/adaptor/reversed.hpp>
#include <libsolidity/Utils.h> #include <libsolidity/Utils.h>
#include <libsolidity/AST.h> #include <libsolidity/AST.h>
@ -434,23 +435,29 @@ void StructDefinition::checkMemberTypes() const
void StructDefinition::checkRecursion() const void StructDefinition::checkRecursion() const
{ {
set<StructDefinition const*> definitionsSeen; using StructPointer = StructDefinition const*;
vector<StructDefinition const*> queue = {this}; using StructPointersSet = set<StructPointer>;
while (!queue.empty()) function<void(StructPointer,StructPointersSet const&)> check = [&](StructPointer _struct, StructPointersSet const& _parents)
{ {
StructDefinition const* def = queue.back(); if (_parents.count(_struct))
queue.pop_back(); BOOST_THROW_EXCEPTION(
if (definitionsSeen.count(def)) ParserError() <<
BOOST_THROW_EXCEPTION(ParserError() << errinfo_sourceLocation(def->getLocation()) errinfo_sourceLocation(_struct->getLocation()) <<
<< errinfo_comment("Recursive struct definition.")); errinfo_comment("Recursive struct definition.")
definitionsSeen.insert(def); );
for (ASTPointer<VariableDeclaration> const& member: def->getMembers()) set<StructDefinition const*> parents = _parents;
parents.insert(_struct);
for (ASTPointer<VariableDeclaration> const& member: _struct->getMembers())
if (member->getType()->getCategory() == Type::Category::Struct) if (member->getType()->getCategory() == Type::Category::Struct)
{ {
UserDefinedTypeName const& typeName = dynamic_cast<UserDefinedTypeName const&>(*member->getTypeName()); auto const& typeName = dynamic_cast<UserDefinedTypeName const&>(*member->getTypeName());
queue.push_back(&dynamic_cast<StructDefinition const&>(*typeName.getReferencedDeclaration())); check(
&dynamic_cast<StructDefinition const&>(*typeName.getReferencedDeclaration()),
parents
);
} }
} };
check(this, {});
} }
TypePointer EnumDefinition::getType(ContractDefinition const*) const TypePointer EnumDefinition::getType(ContractDefinition const*) const
@ -919,7 +926,7 @@ void MemberAccess::checkTypeRequirements(TypePointers const* _argumentTypes)
{ {
auto const& arrayType(dynamic_cast<ArrayType const&>(type)); auto const& arrayType(dynamic_cast<ArrayType const&>(type));
m_isLValue = (*m_memberName == "length" && m_isLValue = (*m_memberName == "length" &&
arrayType.location() != ReferenceType::Location::CallData && arrayType.isDynamicallySized()); arrayType.location() != DataLocation::CallData && arrayType.isDynamicallySized());
} }
else else
m_isLValue = false; m_isLValue = false;
@ -942,7 +949,7 @@ void IndexAccess::checkTypeRequirements(TypePointers const*)
m_type = make_shared<FixedBytesType>(1); m_type = make_shared<FixedBytesType>(1);
else else
m_type = type.getBaseType(); m_type = type.getBaseType();
m_isLValue = type.location() != ReferenceType::Location::CallData; m_isLValue = type.location() != DataLocation::CallData;
break; break;
} }
case Type::Category::Mapping: case Type::Category::Mapping:
@ -959,7 +966,7 @@ void IndexAccess::checkTypeRequirements(TypePointers const*)
{ {
TypeType const& type = dynamic_cast<TypeType const&>(*m_base->getType()); TypeType const& type = dynamic_cast<TypeType const&>(*m_base->getType());
if (!m_index) if (!m_index)
m_type = make_shared<TypeType>(make_shared<ArrayType>(ReferenceType::Location::Memory, type.getActualType())); m_type = make_shared<TypeType>(make_shared<ArrayType>(DataLocation::Memory, type.getActualType()));
else else
{ {
m_index->checkTypeRequirements(nullptr); m_index->checkTypeRequirements(nullptr);
@ -967,7 +974,9 @@ void IndexAccess::checkTypeRequirements(TypePointers const*)
if (!length) if (!length)
BOOST_THROW_EXCEPTION(m_index->createTypeError("Integer constant expected.")); BOOST_THROW_EXCEPTION(m_index->createTypeError("Integer constant expected."));
m_type = make_shared<TypeType>(make_shared<ArrayType>( m_type = make_shared<TypeType>(make_shared<ArrayType>(
ReferenceType::Location::Memory, type.getActualType(), length->literalValue(nullptr))); DataLocation::Memory, type.getActualType(),
length->literalValue(nullptr)
));
} }
break; break;
} }

56
libsolidity/ArrayUtils.cpp

@ -38,10 +38,10 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons
// need to leave "target_ref target_byte_off" on the stack at the end // need to leave "target_ref target_byte_off" on the stack at the end
// stack layout: [source_ref] [source_byte_off] [source length] target_ref target_byte_off (top) // stack layout: [source_ref] [source_byte_off] [source length] target_ref target_byte_off (top)
solAssert(_targetType.location() == ReferenceType::Location::Storage, ""); solAssert(_targetType.location() == DataLocation::Storage, "");
solAssert( solAssert(
_sourceType.location() == ReferenceType::Location::CallData || _sourceType.location() == DataLocation::CallData ||
_sourceType.location() == ReferenceType::Location::Storage, _sourceType.location() == DataLocation::Storage,
"Given array location not implemented." "Given array location not implemented."
); );
@ -51,7 +51,7 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons
// TODO unroll loop for small sizes // TODO unroll loop for small sizes
bool sourceIsStorage = _sourceType.location() == ReferenceType::Location::Storage; bool sourceIsStorage = _sourceType.location() == DataLocation::Storage;
bool directCopy = sourceIsStorage && sourceBaseType->isValueType() && *sourceBaseType == *targetBaseType; bool directCopy = sourceIsStorage && sourceBaseType->isValueType() && *sourceBaseType == *targetBaseType;
bool haveByteOffsetSource = !directCopy && sourceIsStorage && sourceBaseType->getStorageBytes() <= 16; bool haveByteOffsetSource = !directCopy && sourceIsStorage && sourceBaseType->getStorageBytes() <= 16;
bool haveByteOffsetTarget = !directCopy && targetBaseType->getStorageBytes() <= 16; bool haveByteOffsetTarget = !directCopy && targetBaseType->getStorageBytes() <= 16;
@ -69,7 +69,7 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons
m_context << eth::Instruction::POP; m_context << eth::Instruction::POP;
// stack: target_ref source_ref [source_length] // stack: target_ref source_ref [source_length]
// retrieve source length // retrieve source length
if (_sourceType.location() != ReferenceType::Location::CallData || !_sourceType.isDynamicallySized()) if (_sourceType.location() != DataLocation::CallData || !_sourceType.isDynamicallySized())
retrieveLength(_sourceType); // otherwise, length is already there retrieveLength(_sourceType); // otherwise, length is already there
// stack: target_ref source_ref source_length // stack: target_ref source_ref source_length
m_context << eth::Instruction::DUP3; m_context << eth::Instruction::DUP3;
@ -82,7 +82,7 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons
if (sourceBaseType->getCategory() == Type::Category::Mapping) if (sourceBaseType->getCategory() == Type::Category::Mapping)
{ {
solAssert(targetBaseType->getCategory() == Type::Category::Mapping, ""); solAssert(targetBaseType->getCategory() == Type::Category::Mapping, "");
solAssert(_sourceType.location() == ReferenceType::Location::Storage, ""); solAssert(_sourceType.location() == DataLocation::Storage, "");
// nothing to copy // nothing to copy
m_context m_context
<< eth::Instruction::POP << eth::Instruction::POP << eth::Instruction::POP << eth::Instruction::POP
@ -106,7 +106,7 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons
eth::AssemblyItem copyLoopEndWithoutByteOffset = m_context.newTag(); eth::AssemblyItem copyLoopEndWithoutByteOffset = m_context.newTag();
m_context.appendConditionalJumpTo(copyLoopEndWithoutByteOffset); m_context.appendConditionalJumpTo(copyLoopEndWithoutByteOffset);
if (_sourceType.location() == ReferenceType::Location::Storage && _sourceType.isDynamicallySized()) if (_sourceType.location() == DataLocation::Storage && _sourceType.isDynamicallySized())
CompilerUtils(m_context).computeHashStatic(); CompilerUtils(m_context).computeHashStatic();
// stack: target_ref target_data_end source_length target_data_pos source_data_pos // stack: target_ref target_data_end source_length target_data_pos source_data_pos
m_context << eth::Instruction::SWAP2; m_context << eth::Instruction::SWAP2;
@ -155,7 +155,7 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons
// checking is easier. // checking is easier.
// stack: target_ref target_data_end source_data_pos target_data_pos source_data_end [target_byte_offset] [source_byte_offset] // stack: target_ref target_data_end source_data_pos target_data_pos source_data_end [target_byte_offset] [source_byte_offset]
m_context << eth::dupInstruction(3 + byteOffsetSize); m_context << eth::dupInstruction(3 + byteOffsetSize);
if (_sourceType.location() == ReferenceType::Location::Storage) if (_sourceType.location() == DataLocation::Storage)
{ {
if (haveByteOffsetSource) if (haveByteOffsetSource)
m_context << eth::Instruction::DUP2; m_context << eth::Instruction::DUP2;
@ -231,7 +231,7 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons
void ArrayUtils::clearArray(ArrayType const& _type) const void ArrayUtils::clearArray(ArrayType const& _type) const
{ {
unsigned stackHeightStart = m_context.getStackHeight(); unsigned stackHeightStart = m_context.getStackHeight();
solAssert(_type.location() == ReferenceType::Location::Storage, ""); solAssert(_type.location() == DataLocation::Storage, "");
if (_type.getBaseType()->getStorageBytes() < 32) if (_type.getBaseType()->getStorageBytes() < 32)
{ {
solAssert(_type.getBaseType()->isValueType(), "Invalid storage size for non-value type."); solAssert(_type.getBaseType()->isValueType(), "Invalid storage size for non-value type.");
@ -286,7 +286,7 @@ void ArrayUtils::clearArray(ArrayType const& _type) const
void ArrayUtils::clearDynamicArray(ArrayType const& _type) const void ArrayUtils::clearDynamicArray(ArrayType const& _type) const
{ {
solAssert(_type.location() == ReferenceType::Location::Storage, ""); solAssert(_type.location() == DataLocation::Storage, "");
solAssert(_type.isDynamicallySized(), ""); solAssert(_type.isDynamicallySized(), "");
unsigned stackHeightStart = m_context.getStackHeight(); unsigned stackHeightStart = m_context.getStackHeight();
@ -314,7 +314,7 @@ void ArrayUtils::clearDynamicArray(ArrayType const& _type) const
void ArrayUtils::resizeDynamicArray(const ArrayType& _type) const void ArrayUtils::resizeDynamicArray(const ArrayType& _type) const
{ {
solAssert(_type.location() == ReferenceType::Location::Storage, ""); solAssert(_type.location() == DataLocation::Storage, "");
solAssert(_type.isDynamicallySized(), ""); solAssert(_type.isDynamicallySized(), "");
if (!_type.isByteArray() && _type.getBaseType()->getStorageBytes() < 32) if (!_type.isByteArray() && _type.getBaseType()->getStorageBytes() < 32)
solAssert(_type.getBaseType()->isValueType(), "Invalid storage size for non-value type."); solAssert(_type.getBaseType()->isValueType(), "Invalid storage size for non-value type.");
@ -399,7 +399,7 @@ void ArrayUtils::clearStorageLoop(Type const& _type) const
void ArrayUtils::convertLengthToSize(ArrayType const& _arrayType, bool _pad) const void ArrayUtils::convertLengthToSize(ArrayType const& _arrayType, bool _pad) const
{ {
if (_arrayType.location() == ReferenceType::Location::Storage) if (_arrayType.location() == DataLocation::Storage)
{ {
if (_arrayType.getBaseType()->getStorageSize() <= 1) if (_arrayType.getBaseType()->getStorageSize() <= 1)
{ {
@ -437,13 +437,13 @@ void ArrayUtils::retrieveLength(ArrayType const& _arrayType) const
m_context << eth::Instruction::DUP1; m_context << eth::Instruction::DUP1;
switch (_arrayType.location()) switch (_arrayType.location())
{ {
case ReferenceType::Location::CallData: case DataLocation::CallData:
// length is stored on the stack // length is stored on the stack
break; break;
case ReferenceType::Location::Memory: case DataLocation::Memory:
m_context << eth::Instruction::MLOAD; m_context << eth::Instruction::MLOAD;
break; break;
case ReferenceType::Location::Storage: case DataLocation::Storage:
m_context << eth::Instruction::SLOAD; m_context << eth::Instruction::SLOAD;
break; break;
} }
@ -452,16 +452,16 @@ void ArrayUtils::retrieveLength(ArrayType const& _arrayType) const
void ArrayUtils::accessIndex(ArrayType const& _arrayType) const void ArrayUtils::accessIndex(ArrayType const& _arrayType) const
{ {
ReferenceType::Location location = _arrayType.location(); DataLocation location = _arrayType.location();
eth::Instruction load = eth::Instruction load =
location == ReferenceType::Location::Storage ? eth::Instruction::SLOAD : location == DataLocation::Storage ? eth::Instruction::SLOAD :
location == ReferenceType::Location::Memory ? eth::Instruction::MLOAD : location == DataLocation::Memory ? eth::Instruction::MLOAD :
eth::Instruction::CALLDATALOAD; eth::Instruction::CALLDATALOAD;
// retrieve length // retrieve length
if (!_arrayType.isDynamicallySized()) if (!_arrayType.isDynamicallySized())
m_context << _arrayType.getLength(); m_context << _arrayType.getLength();
else if (location == ReferenceType::Location::CallData) else if (location == DataLocation::CallData)
// length is stored on the stack // length is stored on the stack
m_context << eth::Instruction::SWAP1; m_context << eth::Instruction::SWAP1;
else else
@ -476,20 +476,20 @@ void ArrayUtils::accessIndex(ArrayType const& _arrayType) const
m_context << eth::Instruction::SWAP1; m_context << eth::Instruction::SWAP1;
if (_arrayType.isDynamicallySized()) if (_arrayType.isDynamicallySized())
{ {
if (location == ReferenceType::Location::Storage) if (location == DataLocation::Storage)
CompilerUtils(m_context).computeHashStatic(); CompilerUtils(m_context).computeHashStatic();
else if (location == ReferenceType::Location::Memory) else if (location == DataLocation::Memory)
m_context << u256(32) << eth::Instruction::ADD; m_context << u256(32) << eth::Instruction::ADD;
} }
// stack: <index> <data_ref> // stack: <index> <data_ref>
switch (location) switch (location)
{ {
case ReferenceType::Location::CallData: case DataLocation::CallData:
if (!_arrayType.isByteArray()) if (!_arrayType.isByteArray())
m_context {
<< eth::Instruction::SWAP1 m_context << eth::Instruction::SWAP1;
<< _arrayType.getBaseType()->getCalldataEncodedSize() m_context << _arrayType.getBaseType()->getCalldataEncodedSize() << eth::Instruction::MUL;
<< eth::Instruction::MUL; }
m_context << eth::Instruction::ADD; m_context << eth::Instruction::ADD;
if (_arrayType.getBaseType()->isValueType()) if (_arrayType.getBaseType()->isValueType())
CompilerUtils(m_context).loadFromMemoryDynamic( CompilerUtils(m_context).loadFromMemoryDynamic(
@ -499,7 +499,7 @@ void ArrayUtils::accessIndex(ArrayType const& _arrayType) const
false false
); );
break; break;
case ReferenceType::Location::Storage: case DataLocation::Storage:
m_context << eth::Instruction::SWAP1; m_context << eth::Instruction::SWAP1;
if (_arrayType.getBaseType()->getStorageBytes() <= 16) if (_arrayType.getBaseType()->getStorageBytes() <= 16)
{ {
@ -527,7 +527,7 @@ void ArrayUtils::accessIndex(ArrayType const& _arrayType) const
m_context << eth::Instruction::ADD << u256(0); m_context << eth::Instruction::ADD << u256(0);
} }
break; break;
case ReferenceType::Location::Memory: case DataLocation::Memory:
solAssert(false, "Memory lvalues not yet implemented."); solAssert(false, "Memory lvalues not yet implemented.");
} }
} }

40
libsolidity/Compiler.cpp

@ -245,21 +245,35 @@ void Compiler::appendCalldataUnpacker(
{ {
// We do not check the calldata size, everything is zero-paddedd // We do not check the calldata size, everything is zero-paddedd
//@todo this does not yet support nested arrays
if (_startOffset == u256(-1)) if (_startOffset == u256(-1))
_startOffset = u256(CompilerUtils::dataStartOffset); _startOffset = u256(CompilerUtils::dataStartOffset);
m_context << _startOffset; m_context << _startOffset;
for (TypePointer const& type: _typeParameters) for (TypePointer const& type: _typeParameters)
{ {
// stack: v1 v2 ... v(k-1) mem_offset
switch (type->getCategory()) switch (type->getCategory())
{ {
case Type::Category::Array: case Type::Category::Array:
{ {
auto const& arrayType = dynamic_cast<ArrayType const&>(*type); auto const& arrayType = dynamic_cast<ArrayType const&>(*type);
if (arrayType.location() == ReferenceType::Location::CallData) solAssert(arrayType.location() != DataLocation::Storage, "");
solAssert(!arrayType.getBaseType()->isDynamicallySized(), "Nested arrays not yet implemented.");
if (_fromMemory)
{
solAssert(arrayType.location() == DataLocation::Memory, "");
// compute data pointer
//@todo once we support nested arrays, this offset needs to be dynamic.
m_context << eth::Instruction::DUP1 << _startOffset << eth::Instruction::ADD;
m_context << eth::Instruction::SWAP1 << u256(0x20) << eth::Instruction::ADD;
}
else
{ {
solAssert(!_fromMemory, ""); // first load from calldata and potentially convert to memory if arrayType is memory
if (type->isDynamicallySized()) TypePointer calldataType = arrayType.copyForLocation(DataLocation::CallData, false);
if (calldataType->isDynamicallySized())
{ {
// put on stack: data_pointer length // put on stack: data_pointer length
CompilerUtils(m_context).loadFromMemoryDynamic(IntegerType(256), !_fromMemory); CompilerUtils(m_context).loadFromMemoryDynamic(IntegerType(256), !_fromMemory);
@ -276,17 +290,17 @@ void Compiler::appendCalldataUnpacker(
{ {
// leave the pointer on the stack // leave the pointer on the stack
m_context << eth::Instruction::DUP1; m_context << eth::Instruction::DUP1;
m_context << u256(type->getCalldataEncodedSize()) << eth::Instruction::ADD; m_context << u256(calldataType->getCalldataEncodedSize()) << eth::Instruction::ADD;
}
if (arrayType.location() == DataLocation::Memory)
{
// copy to memory
// move calldata type up again
CompilerUtils(m_context).moveIntoStack(calldataType->getSizeOnStack());
CompilerUtils(m_context).convertType(*calldataType, arrayType);
// fetch next pointer again
CompilerUtils(m_context).moveToStackTop(arrayType.getSizeOnStack());
} }
}
else
{
solAssert(arrayType.location() == ReferenceType::Location::Memory, "");
// compute data pointer
m_context << eth::Instruction::DUP1 << _startOffset << eth::Instruction::ADD;
if (!_fromMemory)
solAssert(false, "Not yet implemented.");
m_context << eth::Instruction::SWAP1 << u256(0x20) << eth::Instruction::ADD;
} }
break; break;
} }

64
libsolidity/CompilerUtils.cpp

@ -107,16 +107,18 @@ void CompilerUtils::storeInMemoryDynamic(Type const& _type, bool _padToWordBound
auto const& type = dynamic_cast<ArrayType const&>(_type); auto const& type = dynamic_cast<ArrayType const&>(_type);
solAssert(type.isByteArray(), "Non byte arrays not yet implemented here."); solAssert(type.isByteArray(), "Non byte arrays not yet implemented here.");
if (type.location() == ReferenceType::Location::CallData) if (type.location() == DataLocation::CallData)
{ {
if (!type.isDynamicallySized())
m_context << type.getLength();
// stack: target source_offset source_len // stack: target source_offset source_len
m_context << eth::Instruction::DUP1 << eth::Instruction::DUP3 << eth::Instruction::DUP5; m_context << eth::Instruction::DUP1 << eth::Instruction::DUP3 << eth::Instruction::DUP5;
// stack: target source_offset source_len source_len source_offset target // stack: target source_offset source_len source_len source_offset target
m_context << eth::Instruction::CALLDATACOPY; m_context << eth::Instruction::CALLDATACOPY;
m_context << eth::Instruction::DUP3 << eth::Instruction::ADD; m_context << eth::Instruction::DUP3 << eth::Instruction::ADD;
m_context << eth::Instruction::SWAP2 << eth::Instruction::POP << eth::Instruction::POP; m_context << eth::Instruction::SWAP2 << eth::Instruction::POP << eth::Instruction::POP;
} }
else if (type.location() == ReferenceType::Location::Memory) else if (type.location() == DataLocation::Memory)
{ {
// memcpy using the built-in contract // memcpy using the built-in contract
ArrayUtils(m_context).retrieveLength(type); ArrayUtils(m_context).retrieveLength(type);
@ -183,7 +185,7 @@ void CompilerUtils::storeInMemoryDynamic(Type const& _type, bool _padToWordBound
} }
else else
{ {
solAssert(type.location() == ReferenceType::Location::Storage, ""); solAssert(type.location() == DataLocation::Storage, "");
m_context << eth::Instruction::POP; // remove offset, arrays always start new slot m_context << eth::Instruction::POP; // remove offset, arrays always start new slot
m_context << eth::Instruction::DUP1 << eth::Instruction::SLOAD; m_context << eth::Instruction::DUP1 << eth::Instruction::SLOAD;
// stack here: memory_offset storage_offset length_bytes // stack here: memory_offset storage_offset length_bytes
@ -274,10 +276,16 @@ void CompilerUtils::encodeToMemory(
else else
{ {
copyToStackTop(argSize - stackPos + dynPointers + 2, _givenTypes[i]->getSizeOnStack()); copyToStackTop(argSize - stackPos + dynPointers + 2, _givenTypes[i]->getSizeOnStack());
if (targetType->isValueType())
convertType(*_givenTypes[i], *targetType, true);
solAssert(!!targetType, "Externalable type expected."); solAssert(!!targetType, "Externalable type expected.");
storeInMemoryDynamic(*targetType, _padToWordBoundaries); TypePointer type = targetType;
if (
_givenTypes[i]->dataStoredIn(DataLocation::Storage) ||
_givenTypes[i]->dataStoredIn(DataLocation::CallData)
)
type = _givenTypes[i]; // delay conversion
else
convertType(*_givenTypes[i], *targetType, true);
storeInMemoryDynamic(*type, _padToWordBoundaries);
} }
stackPos += _givenTypes[i]->getSizeOnStack(); stackPos += _givenTypes[i]->getSizeOnStack();
} }
@ -304,13 +312,13 @@ void CompilerUtils::encodeToMemory(
// stack: ... <end_of_mem> <value...> // stack: ... <end_of_mem> <value...>
// copy length to memory // copy length to memory
m_context << eth::dupInstruction(1 + arrayType.getSizeOnStack()); m_context << eth::dupInstruction(1 + arrayType.getSizeOnStack());
if (arrayType.location() == ReferenceType::Location::CallData) if (arrayType.location() == DataLocation::CallData)
m_context << eth::Instruction::DUP2; // length is on stack m_context << eth::Instruction::DUP2; // length is on stack
else if (arrayType.location() == ReferenceType::Location::Storage) else if (arrayType.location() == DataLocation::Storage)
m_context << eth::Instruction::DUP3 << eth::Instruction::SLOAD; m_context << eth::Instruction::DUP3 << eth::Instruction::SLOAD;
else else
{ {
solAssert(arrayType.location() == ReferenceType::Location::Memory, ""); solAssert(arrayType.location() == DataLocation::Memory, "");
m_context << eth::Instruction::DUP2 << eth::Instruction::MLOAD; m_context << eth::Instruction::DUP2 << eth::Instruction::MLOAD;
} }
// stack: ... <end_of_mem> <value...> <end_of_mem'> <length> // stack: ... <end_of_mem> <value...> <end_of_mem'> <length>
@ -432,18 +440,18 @@ void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetTyp
ArrayType const& targetType = dynamic_cast<ArrayType const&>(_targetType); ArrayType const& targetType = dynamic_cast<ArrayType const&>(_targetType);
switch (targetType.location()) switch (targetType.location())
{ {
case ReferenceType::Location::Storage: case DataLocation::Storage:
// Other cases are done explicitly in LValue::storeValue, and only possible by assignment. // Other cases are done explicitly in LValue::storeValue, and only possible by assignment.
solAssert( solAssert(
targetType.isPointer() && targetType.isPointer() &&
typeOnStack.location() == ReferenceType::Location::Storage, typeOnStack.location() == DataLocation::Storage,
"Invalid conversion to storage type." "Invalid conversion to storage type."
); );
break; break;
case ReferenceType::Location::Memory: case DataLocation::Memory:
{ {
// Copy the array to a free position in memory, unless it is already in memory. // Copy the array to a free position in memory, unless it is already in memory.
if (typeOnStack.location() != ReferenceType::Location::Memory) if (typeOnStack.location() != DataLocation::Memory)
{ {
// stack: <source ref> (variably sized) // stack: <source ref> (variably sized)
unsigned stackSize = typeOnStack.getSizeOnStack(); unsigned stackSize = typeOnStack.getSizeOnStack();
@ -452,7 +460,7 @@ void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetTyp
// stack: <mem start> <source ref> (variably sized) // stack: <mem start> <source ref> (variably sized)
if (targetType.isDynamicallySized()) if (targetType.isDynamicallySized())
{ {
bool fromStorage = (typeOnStack.location() == ReferenceType::Location::Storage); bool fromStorage = (typeOnStack.location() == DataLocation::Storage);
// store length // store length
if (fromStorage) if (fromStorage)
{ {
@ -483,11 +491,25 @@ void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetTyp
// Stack <mem start> <mem end> // Stack <mem start> <mem end>
storeFreeMemoryPointer(); storeFreeMemoryPointer();
} }
else if (typeOnStack.location() == ReferenceType::Location::CallData) else if (typeOnStack.location() == DataLocation::CallData)
{ {
// Stack: <offset> <length> // Stack: <offset> [<length>]
//@todo // length is present if dynamically sized
solAssert(false, "Not yet implemented."); fetchFreeMemoryPointer();
moveIntoStack(typeOnStack.getSizeOnStack());
// stack: memptr calldataoffset [<length>]
if (typeOnStack.isDynamicallySized())
{
solAssert(targetType.isDynamicallySized(), "");
m_context << eth::Instruction::DUP3 << eth::Instruction::DUP2;
storeInMemoryDynamic(IntegerType(256));
moveIntoStack(typeOnStack.getSizeOnStack());
}
else
m_context << eth::Instruction::DUP2 << eth::Instruction::SWAP1;
// stack: mem_ptr mem_data_ptr calldataoffset [<length>]
storeInMemoryDynamic(typeOnStack);
storeFreeMemoryPointer();
} }
// nothing to do for memory to memory // nothing to do for memory to memory
break; break;
@ -504,8 +526,8 @@ void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetTyp
auto& targetType = dynamic_cast<StructType const&>(_targetType); auto& targetType = dynamic_cast<StructType const&>(_targetType);
auto& stackType = dynamic_cast<StructType const&>(_typeOnStack); auto& stackType = dynamic_cast<StructType const&>(_typeOnStack);
solAssert( solAssert(
targetType.location() == ReferenceType::Location::Storage && targetType.location() == DataLocation::Storage &&
stackType.location() == ReferenceType::Location::Storage, stackType.location() == DataLocation::Storage,
"Non-storage structs not yet implemented." "Non-storage structs not yet implemented."
); );
solAssert( solAssert(

5
libsolidity/CompilerUtils.h

@ -99,8 +99,9 @@ public:
bool _copyDynamicDataInPlace = false bool _copyDynamicDataInPlace = false
); );
/// Appends code for an implicit or explicit type conversion. For now this comprises only erasing /// Appends code for an implicit or explicit type conversion. This includes erasing higher
/// higher-order bits (@see appendHighBitCleanup) when widening integer. /// order bits (@see appendHighBitCleanup) when widening integer but also copy to memory
/// if a reference type is converted from calldata or storage to memory.
/// If @a _cleanupNeeded, high order bits cleanup is also done if no type conversion would be /// If @a _cleanupNeeded, high order bits cleanup is also done if no type conversion would be
/// necessary. /// necessary.
void convertType(Type const& _typeOnStack, Type const& _targetType, bool _cleanupNeeded = false); void convertType(Type const& _typeOnStack, Type const& _targetType, bool _cleanupNeeded = false);

55
libsolidity/ExpressionCompiler.cpp

@ -109,34 +109,40 @@ void ExpressionCompiler::appendStateVariableAccessor(VariableDeclaration const&
} }
unsigned retSizeOnStack = 0; unsigned retSizeOnStack = 0;
solAssert(accessorType.getReturnParameterTypes().size() >= 1, ""); solAssert(accessorType.getReturnParameterTypes().size() >= 1, "");
auto const& returnTypes = accessorType.getReturnParameterTypes();
if (StructType const* structType = dynamic_cast<StructType const*>(returnType.get())) if (StructType const* structType = dynamic_cast<StructType const*>(returnType.get()))
{ {
// remove offset // remove offset
m_context << eth::Instruction::POP; m_context << eth::Instruction::POP;
auto const& names = accessorType.getReturnParameterNames(); auto const& names = accessorType.getReturnParameterNames();
auto const& types = accessorType.getReturnParameterTypes();
// struct // struct
for (size_t i = 0; i < names.size(); ++i) for (size_t i = 0; i < names.size(); ++i)
{ {
if (types[i]->getCategory() == Type::Category::Mapping || types[i]->getCategory() == Type::Category::Array) if (returnTypes[i]->getCategory() == Type::Category::Mapping)
continue; continue;
if (auto arrayType = dynamic_cast<ArrayType const*>(returnTypes[i].get()))
if (!arrayType->isByteArray())
continue;
pair<u256, unsigned> const& offsets = structType->getStorageOffsetsOfMember(names[i]); pair<u256, unsigned> const& offsets = structType->getStorageOffsetsOfMember(names[i]);
m_context << eth::Instruction::DUP1 << u256(offsets.first) << eth::Instruction::ADD << u256(offsets.second); m_context << eth::Instruction::DUP1 << u256(offsets.first) << eth::Instruction::ADD << u256(offsets.second);
StorageItem(m_context, *types[i]).retrieveValue(SourceLocation(), true); TypePointer memberType = structType->getMemberType(names[i]);
solAssert(types[i]->getSizeOnStack() == 1, "Returning struct elements with stack size != 1 is not yet implemented."); StorageItem(m_context, *memberType).retrieveValue(SourceLocation(), true);
m_context << eth::Instruction::SWAP1; utils().convertType(*memberType, *returnTypes[i]);
retSizeOnStack += types[i]->getSizeOnStack(); utils().moveToStackTop(returnTypes[i]->getSizeOnStack());
retSizeOnStack += returnTypes[i]->getSizeOnStack();
} }
// remove slot // remove slot
m_context << eth::Instruction::POP; m_context << eth::Instruction::POP;
} }
else else
{ {
// simple value // simple value or array
solAssert(accessorType.getReturnParameterTypes().size() == 1, ""); solAssert(returnTypes.size() == 1, "");
StorageItem(m_context, *returnType).retrieveValue(SourceLocation(), true); StorageItem(m_context, *returnType).retrieveValue(SourceLocation(), true);
retSizeOnStack = returnType->getSizeOnStack(); utils().convertType(*returnType, *returnTypes.front());
retSizeOnStack = returnTypes.front()->getSizeOnStack();
} }
solAssert(retSizeOnStack == utils().getSizeOnStack(returnTypes), "");
solAssert(retSizeOnStack <= 15, "Stack is too deep."); solAssert(retSizeOnStack <= 15, "Stack is too deep.");
m_context << eth::dupInstruction(retSizeOnStack + 1); m_context << eth::dupInstruction(retSizeOnStack + 1);
m_context.appendJump(eth::AssemblyItem::JumpType::OutOfFunction); m_context.appendJump(eth::AssemblyItem::JumpType::OutOfFunction);
@ -146,10 +152,13 @@ bool ExpressionCompiler::visit(Assignment const& _assignment)
{ {
CompilerContext::LocationSetter locationSetter(m_context, _assignment); CompilerContext::LocationSetter locationSetter(m_context, _assignment);
_assignment.getRightHandSide().accept(*this); _assignment.getRightHandSide().accept(*this);
if (_assignment.getType()->isValueType()) TypePointer type = _assignment.getRightHandSide().getType();
utils().convertType(*_assignment.getRightHandSide().getType(), *_assignment.getType()); if (!_assignment.getType()->dataStoredIn(DataLocation::Storage))
// We need this conversion mostly in the case of compound assignments. For non-value types {
// the conversion is done in LValue::storeValue. utils().convertType(*type, *_assignment.getType());
type = _assignment.getType();
}
_assignment.getLeftHandSide().accept(*this); _assignment.getLeftHandSide().accept(*this);
solAssert(!!m_currentLValue, "LValue not retrieved."); solAssert(!!m_currentLValue, "LValue not retrieved.");
@ -175,7 +184,7 @@ bool ExpressionCompiler::visit(Assignment const& _assignment)
m_context << eth::swapInstruction(itemSize + lvalueSize) << eth::Instruction::POP; m_context << eth::swapInstruction(itemSize + lvalueSize) << eth::Instruction::POP;
} }
} }
m_currentLValue->storeValue(*_assignment.getRightHandSide().getType(), _assignment.getLocation()); m_currentLValue->storeValue(*type, _assignment.getLocation());
m_currentLValue.reset(); m_currentLValue.reset();
return false; return false;
} }
@ -709,10 +718,10 @@ void ExpressionCompiler::endVisit(MemberAccess const& _memberAccess)
else else
switch (type.location()) switch (type.location())
{ {
case ReferenceType::Location::CallData: case DataLocation::CallData:
m_context << eth::Instruction::SWAP1 << eth::Instruction::POP; m_context << eth::Instruction::SWAP1 << eth::Instruction::POP;
break; break;
case ReferenceType::Location::Storage: case DataLocation::Storage:
setLValue<StorageArrayLength>(_memberAccess, type); setLValue<StorageArrayLength>(_memberAccess, type);
break; break;
default: default:
@ -755,13 +764,13 @@ bool ExpressionCompiler::visit(IndexAccess const& _indexAccess)
solAssert(_indexAccess.getIndexExpression(), "Index expression expected."); solAssert(_indexAccess.getIndexExpression(), "Index expression expected.");
// remove storage byte offset // remove storage byte offset
if (arrayType.location() == ReferenceType::Location::Storage) if (arrayType.location() == DataLocation::Storage)
m_context << eth::Instruction::POP; m_context << eth::Instruction::POP;
_indexAccess.getIndexExpression()->accept(*this); _indexAccess.getIndexExpression()->accept(*this);
// stack layout: <base_ref> [<length>] <index> // stack layout: <base_ref> [<length>] <index>
ArrayUtils(m_context).accessIndex(arrayType); ArrayUtils(m_context).accessIndex(arrayType);
if (arrayType.location() == ReferenceType::Location::Storage) if (arrayType.location() == DataLocation::Storage)
{ {
if (arrayType.isByteArray()) if (arrayType.isByteArray())
{ {
@ -1119,14 +1128,10 @@ void ExpressionCompiler::appendExternalFunctionCall(
void ExpressionCompiler::appendExpressionCopyToMemory(Type const& _expectedType, Expression const& _expression) void ExpressionCompiler::appendExpressionCopyToMemory(Type const& _expectedType, Expression const& _expression)
{ {
solAssert(_expectedType.isValueType(), "Not implemented for non-value types.");
_expression.accept(*this); _expression.accept(*this);
if (_expectedType.isValueType()) utils().convertType(*_expression.getType(), _expectedType, true);
{ utils().storeInMemoryDynamic(_expectedType);
utils().convertType(*_expression.getType(), _expectedType, true);
utils().storeInMemoryDynamic(_expectedType);
}
else
utils().storeInMemoryDynamic(*_expression.getType()->mobileType());
} }
void ExpressionCompiler::setLValueFromDeclaration(Declaration const& _declaration, Expression const& _expression) void ExpressionCompiler::setLValueFromDeclaration(Declaration const& _declaration, Expression const& _expression)

8
libsolidity/NameAndTypeResolver.cpp

@ -439,7 +439,7 @@ void ReferencesResolver::endVisit(VariableDeclaration& _variable)
"Location has to be calldata for external functions " "Location has to be calldata for external functions "
"(remove the \"memory\" or \"storage\" keyword)." "(remove the \"memory\" or \"storage\" keyword)."
)); ));
type = ref->copyForLocation(ReferenceType::Location::CallData, true); type = ref->copyForLocation(DataLocation::CallData, true);
} }
else if (_variable.isCallableParameter() && _variable.getScope()->isPublic()) else if (_variable.isCallableParameter() && _variable.getScope()->isPublic())
{ {
@ -449,7 +449,7 @@ void ReferencesResolver::endVisit(VariableDeclaration& _variable)
"Location has to be memory for publicly visible functions " "Location has to be memory for publicly visible functions "
"(remove the \"storage\" keyword)." "(remove the \"storage\" keyword)."
)); ));
type = ref->copyForLocation(ReferenceType::Location::Memory, true); type = ref->copyForLocation(DataLocation::Memory, true);
} }
else else
{ {
@ -458,8 +458,8 @@ void ReferencesResolver::endVisit(VariableDeclaration& _variable)
bool isPointer = !_variable.isStateVariable(); bool isPointer = !_variable.isStateVariable();
type = ref->copyForLocation( type = ref->copyForLocation(
loc == Location::Memory ? loc == Location::Memory ?
ReferenceType::Location::Memory : DataLocation::Memory :
ReferenceType::Location::Storage, DataLocation::Storage,
isPointer isPointer
); );
} }

62
libsolidity/Types.cpp

@ -144,12 +144,13 @@ TypePointer Type::fromElementaryTypeName(Token::Value _typeToken)
else if (_typeToken == Token::Bool) else if (_typeToken == Token::Bool)
return make_shared<BoolType>(); return make_shared<BoolType>();
else if (_typeToken == Token::Bytes) else if (_typeToken == Token::Bytes)
return make_shared<ArrayType>(ReferenceType::Location::Storage); return make_shared<ArrayType>(DataLocation::Storage);
else if (_typeToken == Token::String) else if (_typeToken == Token::String)
return make_shared<ArrayType>(ReferenceType::Location::Storage, true); return make_shared<ArrayType>(DataLocation::Storage, true);
else else
BOOST_THROW_EXCEPTION(InternalCompilerError() << errinfo_comment("Unable to convert elementary typename " + BOOST_THROW_EXCEPTION(InternalCompilerError() << errinfo_comment(
std::string(Token::toString(_typeToken)) + " to type.")); "Unable to convert elementary typename " + std::string(Token::toString(_typeToken)) + " to type."
));
} }
TypePointer Type::fromElementaryTypeName(string const& _name) TypePointer Type::fromElementaryTypeName(string const& _name)
@ -180,7 +181,7 @@ TypePointer Type::fromMapping(ElementaryTypeName& _keyType, TypeName& _valueType
if (!valueType) if (!valueType)
BOOST_THROW_EXCEPTION(_valueType.createTypeError("Invalid type name.")); BOOST_THROW_EXCEPTION(_valueType.createTypeError("Invalid type name."));
// Convert value type to storage reference. // Convert value type to storage reference.
valueType = ReferenceType::copyForLocationIfReference(ReferenceType::Location::Storage, valueType); valueType = ReferenceType::copyForLocationIfReference(DataLocation::Storage, valueType);
return make_shared<MappingType>(keyType, valueType); return make_shared<MappingType>(keyType, valueType);
} }
@ -198,10 +199,10 @@ TypePointer Type::fromArrayTypeName(TypeName& _baseTypeName, Expression* _length
auto const* length = dynamic_cast<IntegerConstantType const*>(_length->getType().get()); auto const* length = dynamic_cast<IntegerConstantType const*>(_length->getType().get());
if (!length) if (!length)
BOOST_THROW_EXCEPTION(_length->createTypeError("Invalid array length.")); BOOST_THROW_EXCEPTION(_length->createTypeError("Invalid array length."));
return make_shared<ArrayType>(ReferenceType::Location::Storage, baseType, length->literalValue(nullptr)); return make_shared<ArrayType>(DataLocation::Storage, baseType, length->literalValue(nullptr));
} }
else else
return make_shared<ArrayType>(ReferenceType::Location::Storage, baseType); return make_shared<ArrayType>(DataLocation::Storage, baseType);
} }
TypePointer Type::forLiteral(Literal const& _literal) TypePointer Type::forLiteral(Literal const& _literal)
@ -670,7 +671,7 @@ TypePointer ContractType::unaryOperatorResult(Token::Value _operator) const
return _operator == Token::Delete ? make_shared<VoidType>() : TypePointer(); return _operator == Token::Delete ? make_shared<VoidType>() : TypePointer();
} }
TypePointer ReferenceType::copyForLocationIfReference(Location _location, TypePointer const& _type) TypePointer ReferenceType::copyForLocationIfReference(DataLocation _location, TypePointer const& _type)
{ {
if (auto type = dynamic_cast<ReferenceType const*>(_type.get())) if (auto type = dynamic_cast<ReferenceType const*>(_type.get()))
return type->copyForLocation(_location, false); return type->copyForLocation(_location, false);
@ -686,11 +687,11 @@ string ReferenceType::stringForReferencePart() const
{ {
switch (m_location) switch (m_location)
{ {
case Location::Storage: case DataLocation::Storage:
return string("storage ") + (m_isPointer ? "pointer" : "ref"); return string("storage ") + (m_isPointer ? "pointer" : "ref");
case Location::CallData: case DataLocation::CallData:
return "calldata"; return "calldata";
case Location::Memory: case DataLocation::Memory:
return "memory"; return "memory";
} }
solAssert(false, ""); solAssert(false, "");
@ -705,11 +706,11 @@ bool ArrayType::isImplicitlyConvertibleTo(const Type& _convertTo) const
if (convertTo.isByteArray() != isByteArray() || convertTo.isString() != isString()) if (convertTo.isByteArray() != isByteArray() || convertTo.isString() != isString())
return false; return false;
// memory/calldata to storage can be converted, but only to a direct storage reference // memory/calldata to storage can be converted, but only to a direct storage reference
if (convertTo.location() == Location::Storage && location() != Location::Storage && convertTo.isPointer()) if (convertTo.location() == DataLocation::Storage && location() != DataLocation::Storage && convertTo.isPointer())
return false; return false;
if (convertTo.location() == Location::CallData && location() != convertTo.location()) if (convertTo.location() == DataLocation::CallData && location() != convertTo.location())
return false; return false;
if (convertTo.location() == Location::Storage && !convertTo.isPointer()) if (convertTo.location() == DataLocation::Storage && !convertTo.isPointer())
{ {
// Less restrictive conversion, since we need to copy anyway. // Less restrictive conversion, since we need to copy anyway.
if (!getBaseType()->isImplicitlyConvertibleTo(*convertTo.getBaseType())) if (!getBaseType()->isImplicitlyConvertibleTo(*convertTo.getBaseType()))
@ -788,10 +789,10 @@ u256 ArrayType::getStorageSize() const
unsigned ArrayType::getSizeOnStack() const unsigned ArrayType::getSizeOnStack() const
{ {
if (m_location == Location::CallData) if (m_location == DataLocation::CallData)
// offset [length] (stack top) // offset [length] (stack top)
return 1 + (isDynamicallySized() ? 1 : 0); return 1 + (isDynamicallySized() ? 1 : 0);
else if (m_location == Location::Storage) else if (m_location == DataLocation::Storage)
// storage_key storage_offset // storage_key storage_offset
return 2; return 2;
else else
@ -828,12 +829,12 @@ TypePointer ArrayType::externalType() const
return TypePointer(); return TypePointer();
if (isDynamicallySized()) if (isDynamicallySized())
return std::make_shared<ArrayType>(Location::CallData, m_baseType->externalType()); return std::make_shared<ArrayType>(DataLocation::CallData, m_baseType->externalType());
else else
return std::make_shared<ArrayType>(Location::CallData, m_baseType->externalType(), m_length); return std::make_shared<ArrayType>(DataLocation::CallData, m_baseType->externalType(), m_length);
} }
TypePointer ArrayType::copyForLocation(ReferenceType::Location _location, bool _isPointer) const TypePointer ArrayType::copyForLocation(DataLocation _location, bool _isPointer) const
{ {
auto copy = make_shared<ArrayType>(_location); auto copy = make_shared<ArrayType>(_location);
copy->m_isPointer = _isPointer; copy->m_isPointer = _isPointer;
@ -949,9 +950,9 @@ bool StructType::isImplicitlyConvertibleTo(const Type& _convertTo) const
return false; return false;
auto& convertTo = dynamic_cast<StructType const&>(_convertTo); auto& convertTo = dynamic_cast<StructType const&>(_convertTo);
// memory/calldata to storage can be converted, but only to a direct storage reference // memory/calldata to storage can be converted, but only to a direct storage reference
if (convertTo.location() == Location::Storage && location() != Location::Storage && convertTo.isPointer()) if (convertTo.location() == DataLocation::Storage && location() != DataLocation::Storage && convertTo.isPointer())
return false; return false;
if (convertTo.location() == Location::CallData && location() != convertTo.location()) if (convertTo.location() == DataLocation::CallData && location() != convertTo.location())
return false; return false;
return this->m_struct == convertTo.m_struct; return this->m_struct == convertTo.m_struct;
} }
@ -1009,7 +1010,7 @@ MemberList const& StructType::getMembers() const
return *m_members; return *m_members;
} }
TypePointer StructType::copyForLocation(ReferenceType::Location _location, bool _isPointer) const TypePointer StructType::copyForLocation(DataLocation _location, bool _isPointer) const
{ {
auto copy = make_shared<StructType>(m_struct); auto copy = make_shared<StructType>(m_struct);
copy->m_location = _location; copy->m_location = _location;
@ -1115,6 +1116,9 @@ FunctionType::FunctionType(VariableDeclaration const& _varDecl):
} }
else if (auto arrayType = dynamic_cast<ArrayType const*>(returnType.get())) else if (auto arrayType = dynamic_cast<ArrayType const*>(returnType.get()))
{ {
if (arrayType->isByteArray())
// Return byte arrays as as whole.
break;
returnType = arrayType->getBaseType(); returnType = arrayType->getBaseType();
paramNames.push_back(""); paramNames.push_back("");
paramTypes.push_back(make_shared<IntegerType>(256)); paramTypes.push_back(make_shared<IntegerType>(256));
@ -1128,15 +1132,21 @@ FunctionType::FunctionType(VariableDeclaration const& _varDecl):
if (auto structType = dynamic_cast<StructType const*>(returnType.get())) if (auto structType = dynamic_cast<StructType const*>(returnType.get()))
{ {
for (auto const& member: structType->getMembers()) for (auto const& member: structType->getMembers())
if (member.type->getCategory() != Category::Mapping && member.type->getCategory() != Category::Array) if (member.type->getCategory() != Category::Mapping)
{ {
retParamNames.push_back(member.name); if (auto arrayType = dynamic_cast<ArrayType const*>(member.type.get()))
if (!arrayType->isByteArray())
continue;
retParams.push_back(member.type); retParams.push_back(member.type);
retParamNames.push_back(member.name);
} }
} }
else else
{ {
retParams.push_back(returnType); retParams.push_back(ReferenceType::copyForLocationIfReference(
DataLocation::Memory,
returnType
));
retParamNames.push_back(""); retParamNames.push_back("");
} }
@ -1549,7 +1559,7 @@ MagicType::MagicType(MagicType::Kind _kind):
{"sender", make_shared<IntegerType>(0, IntegerType::Modifier::Address)}, {"sender", make_shared<IntegerType>(0, IntegerType::Modifier::Address)},
{"gas", make_shared<IntegerType>(256)}, {"gas", make_shared<IntegerType>(256)},
{"value", make_shared<IntegerType>(256)}, {"value", make_shared<IntegerType>(256)},
{"data", make_shared<ArrayType>(ReferenceType::Location::CallData)}, {"data", make_shared<ArrayType>(DataLocation::CallData)},
{"sig", make_shared<FixedBytesType>(4)} {"sig", make_shared<FixedBytesType>(4)}
}); });
break; break;

32
libsolidity/Types.h

@ -44,6 +44,8 @@ using FunctionTypePointer = std::shared_ptr<FunctionType const>;
using TypePointers = std::vector<TypePointer>; using TypePointers = std::vector<TypePointer>;
enum class DataLocation { Storage, CallData, Memory };
/** /**
* Helper class to compute storage offsets of members of structs and contracts. * Helper class to compute storage offsets of members of structs and contracts.
*/ */
@ -202,6 +204,9 @@ public:
/// This returns the corresponding integer type for IntegerConstantTypes and the pointer type /// This returns the corresponding integer type for IntegerConstantTypes and the pointer type
/// for storage reference types. /// for storage reference types.
virtual TypePointer mobileType() const { return shared_from_this(); } virtual TypePointer mobileType() const { return shared_from_this(); }
/// @returns true if this is a non-value type and the data of this type is stored at the
/// given location.
virtual bool dataStoredIn(DataLocation) const { return false; }
/// Returns the list of all members of this type. Default implementation: no members. /// Returns the list of all members of this type. Default implementation: no members.
virtual MemberList const& getMembers() const { return EmptyMemberList; } virtual MemberList const& getMembers() const { return EmptyMemberList; }
@ -365,15 +370,15 @@ public:
class ReferenceType: public Type class ReferenceType: public Type
{ {
public: public:
enum class Location { Storage, CallData, Memory }; explicit ReferenceType(DataLocation _location): m_location(_location) {}
explicit ReferenceType(Location _location): m_location(_location) {} DataLocation location() const { return m_location; }
Location location() const { return m_location; }
/// @returns a copy of this type with location (recursively) changed to @a _location, /// @returns a copy of this type with location (recursively) changed to @a _location,
/// whereas isPointer is only shallowly changed - the deep copy is always a bound reference. /// whereas isPointer is only shallowly changed - the deep copy is always a bound reference.
virtual TypePointer copyForLocation(Location _location, bool _isPointer) const = 0; virtual TypePointer copyForLocation(DataLocation _location, bool _isPointer) const = 0;
virtual TypePointer mobileType() const override { return copyForLocation(m_location, true); } virtual TypePointer mobileType() const override { return copyForLocation(m_location, true); }
virtual bool dataStoredIn(DataLocation _location) const override { return m_location == _location; }
/// Storage references can be pointers or bound references. In general, local variables are of /// Storage references can be pointers or bound references. In general, local variables are of
/// pointer type, state variables are bound references. Assignments to pointers or deleting /// pointer type, state variables are bound references. Assignments to pointers or deleting
@ -389,14 +394,14 @@ public:
/// @returns a copy of @a _type having the same location as this (and is not a pointer type) /// @returns a copy of @a _type having the same location as this (and is not a pointer type)
/// if _type is a reference type and an unmodified copy of _type otherwise. /// if _type is a reference type and an unmodified copy of _type otherwise.
/// This function is mostly useful to modify inner types appropriately. /// This function is mostly useful to modify inner types appropriately.
static TypePointer copyForLocationIfReference(Location _location, TypePointer const& _type); static TypePointer copyForLocationIfReference(DataLocation _location, TypePointer const& _type);
protected: protected:
TypePointer copyForLocationIfReference(TypePointer const& _type) const; TypePointer copyForLocationIfReference(TypePointer const& _type) const;
/// @returns a human-readable description of the reference part of the type. /// @returns a human-readable description of the reference part of the type.
std::string stringForReferencePart() const; std::string stringForReferencePart() const;
Location m_location = Location::Storage; DataLocation m_location = DataLocation::Storage;
bool m_isPointer = true; bool m_isPointer = true;
}; };
@ -413,20 +418,20 @@ public:
virtual Category getCategory() const override { return Category::Array; } virtual Category getCategory() const override { return Category::Array; }
/// Constructor for a byte array ("bytes") and string. /// Constructor for a byte array ("bytes") and string.
explicit ArrayType(Location _location, bool _isString = false): explicit ArrayType(DataLocation _location, bool _isString = false):
ReferenceType(_location), ReferenceType(_location),
m_arrayKind(_isString ? ArrayKind::String : ArrayKind::Bytes), m_arrayKind(_isString ? ArrayKind::String : ArrayKind::Bytes),
m_baseType(std::make_shared<FixedBytesType>(1)) m_baseType(std::make_shared<FixedBytesType>(1))
{ {
} }
/// Constructor for a dynamically sized array type ("type[]") /// Constructor for a dynamically sized array type ("type[]")
ArrayType(Location _location, TypePointer const& _baseType): ArrayType(DataLocation _location, TypePointer const& _baseType):
ReferenceType(_location), ReferenceType(_location),
m_baseType(copyForLocationIfReference(_baseType)) m_baseType(copyForLocationIfReference(_baseType))
{ {
} }
/// Constructor for a fixed-size array type ("type[20]") /// Constructor for a fixed-size array type ("type[20]")
ArrayType(Location _location, TypePointer const& _baseType, u256 const& _length): ArrayType(DataLocation _location, TypePointer const& _baseType, u256 const& _length):
ReferenceType(_location), ReferenceType(_location),
m_baseType(copyForLocationIfReference(_baseType)), m_baseType(copyForLocationIfReference(_baseType)),
m_hasDynamicLength(false), m_hasDynamicLength(false),
@ -454,7 +459,7 @@ public:
TypePointer const& getBaseType() const { solAssert(!!m_baseType, ""); return m_baseType;} TypePointer const& getBaseType() const { solAssert(!!m_baseType, ""); return m_baseType;}
u256 const& getLength() const { return m_length; } u256 const& getLength() const { return m_length; }
TypePointer copyForLocation(Location _location, bool _isPointer) const override; TypePointer copyForLocation(DataLocation _location, bool _isPointer) const override;
private: private:
/// String is interpreted as a subtype of Bytes. /// String is interpreted as a subtype of Bytes.
@ -533,7 +538,7 @@ public:
virtual Category getCategory() const override { return Category::Struct; } virtual Category getCategory() const override { return Category::Struct; }
explicit StructType(StructDefinition const& _struct): explicit StructType(StructDefinition const& _struct):
//@todo only storage until we have non-storage structs //@todo only storage until we have non-storage structs
ReferenceType(Location::Storage), m_struct(_struct) {} ReferenceType(DataLocation::Storage), m_struct(_struct) {}
virtual bool isImplicitlyConvertibleTo(const Type& _convertTo) const override; virtual bool isImplicitlyConvertibleTo(const Type& _convertTo) const override;
virtual TypePointer unaryOperatorResult(Token::Value _operator) const override; virtual TypePointer unaryOperatorResult(Token::Value _operator) const override;
virtual bool operator==(Type const& _other) const override; virtual bool operator==(Type const& _other) const override;
@ -544,7 +549,7 @@ public:
virtual MemberList const& getMembers() const override; virtual MemberList const& getMembers() const override;
TypePointer copyForLocation(Location _location, bool _isPointer) const override; TypePointer copyForLocation(DataLocation _location, bool _isPointer) const override;
std::pair<u256, unsigned> const& getStorageOffsetsOfMember(std::string const& _name) const; std::pair<u256, unsigned> const& getStorageOffsetsOfMember(std::string const& _name) const;
@ -636,8 +641,11 @@ public:
FunctionTypePointer externalFunctionType() const; FunctionTypePointer externalFunctionType() const;
virtual TypePointer externalType() const override { return externalFunctionType(); } virtual TypePointer externalType() const override { return externalFunctionType(); }
/// Creates the type of a function.
explicit FunctionType(FunctionDefinition const& _function, bool _isInternal = true); explicit FunctionType(FunctionDefinition const& _function, bool _isInternal = true);
/// Creates the accessor function type of a state variable.
explicit FunctionType(VariableDeclaration const& _varDecl); explicit FunctionType(VariableDeclaration const& _varDecl);
/// Creates the function type of an event.
explicit FunctionType(EventDefinition const& _event); explicit FunctionType(EventDefinition const& _event);
FunctionType( FunctionType(
strings const& _parameterTypes, strings const& _parameterTypes,

3
libweb3jsonrpc/CMakeLists.txt

@ -13,7 +13,7 @@ include_directories(BEFORE ${JSONCPP_INCLUDE_DIRS})
include_directories(BEFORE ..) include_directories(BEFORE ..)
include_directories(${MHD_INCLUDE_DIRS}) include_directories(${MHD_INCLUDE_DIRS})
include_directories(${JSON_RPC_CPP_INCLUDE_DIRS}) include_directories(${JSON_RPC_CPP_INCLUDE_DIRS})
include_directories(${LEVELDB_INCLUDE_DIRS}) include_directories(${DB_INCLUDE_DIRS})
include_directories(${Boost_INCLUDE_DIRS}) include_directories(${Boost_INCLUDE_DIRS})
set(EXECUTABLE web3jsonrpc) set(EXECUTABLE web3jsonrpc)
@ -22,7 +22,6 @@ file(GLOB HEADERS "*.h")
add_library(${EXECUTABLE} ${SRC_LIST} ${HEADERS}) add_library(${EXECUTABLE} ${SRC_LIST} ${HEADERS})
target_link_libraries(${EXECUTABLE} ${LEVELDB_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${JSONCPP_LIBRARIES}) target_link_libraries(${EXECUTABLE} ${JSONCPP_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${JSON_RPC_CPP_SERVER_LIBRARIES}) target_link_libraries(${EXECUTABLE} ${JSON_RPC_CPP_SERVER_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${MHD_LIBRARIES}) target_link_libraries(${EXECUTABLE} ${MHD_LIBRARIES})

2
libweb3jsonrpc/JsonHelper.h

@ -43,7 +43,7 @@ namespace eth
{ {
class Transaction; class Transaction;
class BlockDetails; struct BlockDetails;
class Interface; class Interface;
using Transactions = std::vector<Transaction>; using Transactions = std::vector<Transaction>;
using UncleHashes = h256s; using UncleHashes = h256s;

11
libweb3jsonrpc/WebThreeStubServer.cpp

@ -205,7 +205,16 @@ Json::Value WebThreeStubServer::admin_eth_newAccount(Json::Value const& _info, s
bool WebThreeStubServer::admin_eth_setMiningBenefactor(std::string const& _uuidOrAddress, std::string const& _session) bool WebThreeStubServer::admin_eth_setMiningBenefactor(std::string const& _uuidOrAddress, std::string const& _session)
{ {
ADMIN; ADMIN;
(void)_uuidOrAddress; Address a;
h128 uuid = fromUUID(_uuidOrAddress);
if (uuid)
a = m_keyMan.address(uuid);
else if (isHash<Address>(_uuidOrAddress))
a = Address(_uuidOrAddress);
else
throw jsonrpc::JsonRpcException("Invalid UUID or address");
if (m_setMiningBenefactor)
m_setMiningBenefactor(a);
return true; return true;
} }

17
libweb3jsonrpc/WebThreeStubServer.h

@ -23,11 +23,7 @@
#pragma once #pragma once
#pragma warning(push) #include <libdevcore/db.h>
#pragma warning(disable: 4100 4267)
#include <leveldb/db.h>
#pragma warning(pop)
#include "WebThreeStubServerBase.h" #include "WebThreeStubServerBase.h"
namespace dev namespace dev
@ -60,6 +56,8 @@ public:
std::string newSession(SessionPermissions const& _p); std::string newSession(SessionPermissions const& _p);
void addSession(std::string const& _session, SessionPermissions const& _p) { m_sessions[_session] = _p; } void addSession(std::string const& _session, SessionPermissions const& _p) { m_sessions[_session] = _p; }
virtual void setMiningBenefactorChanger(std::function<void(Address const&)> const& _f) { m_setMiningBenefactor = _f; }
private: private:
virtual bool hasPriviledgeLevel(std::string const& _session, Priviledge _l) const override { auto it = m_sessions.find(_session); return it != m_sessions.end() && it->second.priviledges.count(_l); } virtual bool hasPriviledgeLevel(std::string const& _session, Priviledge _l) const override { auto it = m_sessions.find(_session); return it != m_sessions.end() && it->second.priviledges.count(_l); }
@ -80,9 +78,9 @@ private:
virtual std::string admin_eth_blockQueueFirstUnknown(std::string const& _session) override; virtual std::string admin_eth_blockQueueFirstUnknown(std::string const& _session) override;
virtual bool admin_eth_blockQueueRetryUnknown(std::string const& _session) override; virtual bool admin_eth_blockQueueRetryUnknown(std::string const& _session) override;
virtual bool admin_eth_setMiningBenefactor(std::string const& _uuidOrAddress, std::string const& _session) override;
virtual Json::Value admin_eth_allAccounts(std::string const& _session) override; virtual Json::Value admin_eth_allAccounts(std::string const& _session) override;
virtual Json::Value admin_eth_newAccount(const Json::Value& _info, std::string const& _session) override; virtual Json::Value admin_eth_newAccount(const Json::Value& _info, std::string const& _session) override;
virtual bool admin_eth_setMiningBenefactor(std::string const& _uuidOrAddress, std::string const& _session) override;
virtual Json::Value admin_eth_inspect(std::string const& _address, std::string const& _session) override; virtual Json::Value admin_eth_inspect(std::string const& _address, std::string const& _session) override;
virtual Json::Value admin_eth_reprocess(std::string const& _blockNumberOrHash, std::string const& _session) override; virtual Json::Value admin_eth_reprocess(std::string const& _blockNumberOrHash, std::string const& _session) override;
virtual Json::Value admin_eth_vmTrace(std::string const& _blockNumberOrHash, int _txIndex, std::string const& _session) override; virtual Json::Value admin_eth_vmTrace(std::string const& _blockNumberOrHash, int _txIndex, std::string const& _session) override;
@ -97,10 +95,11 @@ private:
dev::WebThreeDirect& m_web3; dev::WebThreeDirect& m_web3;
dev::eth::KeyManager& m_keyMan; dev::eth::KeyManager& m_keyMan;
dev::eth::TrivialGasPricer& m_gp; dev::eth::TrivialGasPricer& m_gp;
leveldb::ReadOptions m_readOptions; ldb::ReadOptions m_readOptions;
leveldb::WriteOptions m_writeOptions; ldb::WriteOptions m_writeOptions;
leveldb::DB* m_db; ldb::DB* m_db;
std::function<void(Address const&)> m_setMiningBenefactor;
std::unordered_map<std::string, SessionPermissions> m_sessions; std::unordered_map<std::string, SessionPermissions> m_sessions;
}; };

4
libwebthree/CMakeLists.txt

@ -12,7 +12,7 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSTATICLIB")
aux_source_directory(. SRC_LIST) aux_source_directory(. SRC_LIST)
include_directories(BEFORE ..) include_directories(BEFORE ..)
include_directories(${LEVELDB_INCLUDE_DIRS}) include_directories(${DB_INCLUDE_DIRS})
include_directories(${Boost_INCLUDE_DIRS}) include_directories(${Boost_INCLUDE_DIRS})
set(EXECUTABLE webthree) set(EXECUTABLE webthree)
@ -21,8 +21,6 @@ file(GLOB HEADERS "*.h")
add_library(${EXECUTABLE} ${SRC_LIST} ${HEADERS}) add_library(${EXECUTABLE} ${SRC_LIST} ${HEADERS})
target_link_libraries(${EXECUTABLE} ${LEVELDB_LIBRARIES})
target_link_libraries(${EXECUTABLE} ethereum) target_link_libraries(${EXECUTABLE} ethereum)
target_link_libraries(${EXECUTABLE} evm) target_link_libraries(${EXECUTABLE} evm)
target_link_libraries(${EXECUTABLE} lll) target_link_libraries(${EXECUTABLE} lll)

36
libwhisper/BloomFilter.cpp

@ -25,40 +25,4 @@ using namespace std;
using namespace dev; using namespace dev;
using namespace dev::shh; using namespace dev::shh;
static unsigned const c_mask[] = { 1, 2, 4, 8, 16, 32, 64, 128 };
void TopicBloomFilter::addRaw(AbridgedTopic const& _h)
{
*this |= _h;
for (unsigned i = 0; i < CounterSize; ++i)
if (isBitSet(_h, i))
{
if (m_refCounter[i] != numeric_limits<uint16_t>::max())
m_refCounter[i]++;
else
BOOST_THROW_EXCEPTION(Overflow());
}
}
void TopicBloomFilter::removeRaw(AbridgedTopic const& _h)
{
for (unsigned i = 0; i < CounterSize; ++i)
if (isBitSet(_h, i))
{
if (m_refCounter[i])
m_refCounter[i]--;
if (!m_refCounter[i])
(*this)[i / 8] &= ~c_mask[i % 8];
}
}
bool TopicBloomFilter::isBitSet(AbridgedTopic const& _h, unsigned _index)
{
unsigned iByte = _index / 8;
unsigned iBit = _index % 8;
return (_h[iByte] & c_mask[iBit]) != 0;
}

63
libwhisper/BloomFilter.h

@ -28,30 +28,71 @@ namespace dev
namespace shh namespace shh
{ {
class TopicBloomFilter: public AbridgedTopic template <unsigned N>
class TopicBloomFilterBase: public FixedHash<N>
{ {
public: public:
TopicBloomFilter() { init(); } TopicBloomFilterBase() { init(); }
TopicBloomFilter(AbridgedTopic const& _h): AbridgedTopic(_h) { init(); } TopicBloomFilterBase(FixedHash<N> const& _h): FixedHash<N>(_h) { init(); }
void addBloom(AbridgedTopic const& _h) { addRaw(_h.template bloomPart<BitsPerBloom, 4>()); } void addBloom(dev::shh::AbridgedTopic const& _h) { addRaw(_h.template bloomPart<BitsPerBloom, N>()); }
void removeBloom(AbridgedTopic const& _h) { removeRaw(_h.template bloomPart<BitsPerBloom, 4>()); } void removeBloom(dev::shh::AbridgedTopic const& _h) { removeRaw(_h.template bloomPart<BitsPerBloom, N>()); }
bool containsBloom(AbridgedTopic const& _h) const { return contains(_h.template bloomPart<BitsPerBloom, 4>()); } bool containsBloom(dev::shh::AbridgedTopic const& _h) const { return this->contains(_h.template bloomPart<BitsPerBloom, N>()); }
void addRaw(AbridgedTopic const& _h); void addRaw(FixedHash<N> const& _h);
void removeRaw(AbridgedTopic const& _h); void removeRaw(FixedHash<N> const& _h);
bool containsRaw(AbridgedTopic const& _h) const { return contains(_h); } bool containsRaw(FixedHash<N> const& _h) const { return this->contains(_h); }
enum { BitsPerBloom = 3 }; enum { BitsPerBloom = 3 };
private: private:
void init() { for (unsigned i = 0; i < CounterSize; ++i) m_refCounter[i] = 0; } void init() { for (unsigned i = 0; i < CounterSize; ++i) m_refCounter[i] = 0; }
static bool isBitSet(AbridgedTopic const& _h, unsigned _index); static bool isBitSet(FixedHash<N> const& _h, unsigned _index);
enum { CounterSize = 8 * TopicBloomFilter::size }; enum { CounterSize = 8 * TopicBloomFilterBase::size };
std::array<uint16_t, CounterSize> m_refCounter; std::array<uint16_t, CounterSize> m_refCounter;
}; };
static unsigned const c_powerOfTwoBitMmask[8] = { 1, 2, 4, 8, 16, 32, 64, 128 };
template <unsigned N>
void TopicBloomFilterBase<N>::addRaw(FixedHash<N> const& _h)
{
*this |= _h;
for (unsigned i = 0; i < CounterSize; ++i)
if (isBitSet(_h, i))
{
if (m_refCounter[i] != std::numeric_limits<uint16_t>::max())
m_refCounter[i]++;
else
BOOST_THROW_EXCEPTION(Overflow());
}
}
template <unsigned N>
void TopicBloomFilterBase<N>::removeRaw(FixedHash<N> const& _h)
{
for (unsigned i = 0; i < CounterSize; ++i)
if (isBitSet(_h, i))
{
if (m_refCounter[i])
m_refCounter[i]--;
if (!m_refCounter[i])
(*this)[i / 8] &= ~c_powerOfTwoBitMmask[i % 8];
}
}
template <unsigned N>
bool TopicBloomFilterBase<N>::isBitSet(FixedHash<N> const& _h, unsigned _index)
{
unsigned iByte = _index / 8;
unsigned iBit = _index % 8;
return (_h[iByte] & c_powerOfTwoBitMmask[iBit]) != 0;
}
using TopicBloomFilter = TopicBloomFilterBase<TopicBloomFilterSize>;
} }
} }

4
libwhisper/CMakeLists.txt

@ -12,7 +12,7 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSTATICLIB")
aux_source_directory(. SRC_LIST) aux_source_directory(. SRC_LIST)
include_directories(BEFORE ..) include_directories(BEFORE ..)
include_directories(${LEVELDB_INCLUDE_DIRS}) include_directories(${DB_INCLUDE_DIRS})
include_directories(${Boost_INCLUDE_DIRS}) include_directories(${Boost_INCLUDE_DIRS})
set(EXECUTABLE whisper) set(EXECUTABLE whisper)
@ -21,8 +21,6 @@ file(GLOB HEADERS "*.h")
add_library(${EXECUTABLE} ${SRC_LIST} ${HEADERS}) add_library(${EXECUTABLE} ${SRC_LIST} ${HEADERS})
target_link_libraries(${EXECUTABLE} ${LEVELDB_LIBRARIES})
target_link_libraries(${EXECUTABLE} ethcore) target_link_libraries(${EXECUTABLE} ethcore)
target_link_libraries(${EXECUTABLE} devcrypto) target_link_libraries(${EXECUTABLE} devcrypto)
target_link_libraries(${EXECUTABLE} devcore) target_link_libraries(${EXECUTABLE} devcore)

4
libwhisper/Common.cpp

@ -95,9 +95,9 @@ TopicFilter::TopicFilter(RLP const& _r)
} }
} }
AbridgedTopic TopicFilter::exportBloom() const FixedHash<TopicBloomFilter::size> TopicFilter::exportBloom() const
{ {
AbridgedTopic ret; FixedHash<TopicBloomFilter::size> ret;
for (TopicMask const& t: m_topicMasks) for (TopicMask const& t: m_topicMasks)
for (auto const& i: t) for (auto const& i: t)
ret |= i.first.template bloomPart<TopicBloomFilter::BitsPerBloom, TopicBloomFilter::size>(); ret |= i.first.template bloomPart<TopicBloomFilter::BitsPerBloom, TopicBloomFilter::size>();

4
libwhisper/Common.h

@ -59,6 +59,8 @@ enum WhisperPacket
PacketCount PacketCount
}; };
enum { TopicBloomFilterSize = 8 };
using AbridgedTopic = FixedHash<4>; using AbridgedTopic = FixedHash<4>;
using Topic = h256; using Topic = h256;
@ -105,7 +107,7 @@ public:
void streamRLP(RLPStream& _s) const { _s << m_topicMasks; } void streamRLP(RLPStream& _s) const { _s << m_topicMasks; }
h256 sha3() const; h256 sha3() const;
bool matches(Envelope const& _m) const; bool matches(Envelope const& _m) const;
AbridgedTopic exportBloom() const; FixedHash<TopicBloomFilterSize> exportBloom() const;
private: private:
TopicMasks m_topicMasks; TopicMasks m_topicMasks;

12
mix/CodeModel.cpp

@ -493,9 +493,18 @@ dev::bytes const& CodeModel::getStdContractCode(const QString& _contractName, co
return m_compiledContracts.at(_contractName); return m_compiledContracts.at(_contractName);
} }
void CodeModel::retrieveSubType(SolidityType& _wrapperType, dev::solidity::Type const* _type)
{
if (_type->getCategory() == Type::Category::Array)
{
ArrayType const* arrayType = dynamic_cast<ArrayType const*>(_type);
_wrapperType.baseType = std::make_shared<dev::mix::SolidityType const>(nodeType(arrayType->getBaseType().get()));
}
}
SolidityType CodeModel::nodeType(dev::solidity::Type const* _type) SolidityType CodeModel::nodeType(dev::solidity::Type const* _type)
{ {
SolidityType r { SolidityType::Type::UnsignedInteger, 32, 1, false, false, QString::fromStdString(_type->toString()), std::vector<SolidityDeclaration>(), std::vector<QString>() }; SolidityType r { SolidityType::Type::UnsignedInteger, 32, 1, false, false, QString::fromStdString(_type->toString()), std::vector<SolidityDeclaration>(), std::vector<QString>(), nullptr };
if (!_type) if (!_type)
return r; return r;
switch (_type->getCategory()) switch (_type->getCategory())
@ -536,6 +545,7 @@ SolidityType CodeModel::nodeType(dev::solidity::Type const* _type)
r.count = static_cast<unsigned>(array->getLength()); r.count = static_cast<unsigned>(array->getLength());
r.dynamicSize = _type->isDynamicallySized(); r.dynamicSize = _type->isDynamicallySized();
r.array = true; r.array = true;
retrieveSubType(r, _type);
} }
break; break;
case Type::Category::Enum: case Type::Category::Enum:

2
mix/CodeModel.h

@ -224,6 +224,8 @@ public:
Q_INVOKABLE void unregisterContractSrc(QString const& _documentId); Q_INVOKABLE void unregisterContractSrc(QString const& _documentId);
/// Convert solidity type info to mix type /// Convert solidity type info to mix type
static SolidityType nodeType(dev::solidity::Type const* _type); static SolidityType nodeType(dev::solidity::Type const* _type);
/// Retrieve subtype
static void retrieveSubType(SolidityType& _wrapperType, dev::solidity::Type const* _type);
/// Check if given location belongs to contract or function /// Check if given location belongs to contract or function
bool isContractOrFunctionLocation(dev::SourceLocation const& _location); bool isContractOrFunctionLocation(dev::SourceLocation const& _location);
/// Get funciton name by location /// Get funciton name by location

106
mix/ContractCallDataEncoder.cpp

@ -20,9 +20,12 @@
* Ethereum IDE client. * Ethereum IDE client.
*/ */
#include <QDebug> #include <vector>
#include <QMap> #include <QMap>
#include <QStringList> #include <QStringList>
#include <QJsonArray>
#include <QJsonDocument>
#include <QJsonObject>
#include <libethcore/CommonJS.h> #include <libethcore/CommonJS.h>
#include <libsolidity/AST.h> #include <libsolidity/AST.h>
#include "QVariableDeclaration.h" #include "QVariableDeclaration.h"
@ -39,13 +42,18 @@ bytes ContractCallDataEncoder::encodedData()
bytes r(m_encodedData); bytes r(m_encodedData);
size_t headerSize = m_encodedData.size() & ~0x1fUL; //ignore any prefix that is not 32-byte aligned size_t headerSize = m_encodedData.size() & ~0x1fUL; //ignore any prefix that is not 32-byte aligned
//apply offsets //apply offsets
for (auto const& p: m_offsetMap) for (auto const& p: m_dynamicOffsetMap)
{
vector_ref<byte> offsetRef(m_dynamicData.data() + p.first, 32);
toBigEndian(p.second + headerSize, offsetRef); //add header size minus signature hash
}
for (auto const& p: m_staticOffsetMap)
{ {
vector_ref<byte> offsetRef(r.data() + p.first, 32); vector_ref<byte> offsetRef(r.data() + p.first, 32);
toBigEndian<size_t, vector_ref<byte>>(p.second + headerSize, offsetRef); //add header size minus signature hash toBigEndian(p.second + headerSize, offsetRef); //add header size minus signature hash
} }
if (m_dynamicData.size() > 0)
r.insert(r.end(), m_dynamicData.begin(), m_dynamicData.end()); r.insert(r.end(), m_dynamicData.begin(), m_dynamicData.end());
return r; return r;
} }
@ -55,54 +63,69 @@ void ContractCallDataEncoder::encode(QFunctionDefinition const* _function)
m_encodedData.insert(m_encodedData.end(), hash.begin(), hash.end()); m_encodedData.insert(m_encodedData.end(), hash.begin(), hash.end());
} }
void ContractCallDataEncoder::encode(QVariant const& _data, SolidityType const& _type) void ContractCallDataEncoder::encodeArray(QJsonArray const& _array, SolidityType const& _type, bytes& _content)
{ {
u256 count = 1; size_t offsetStart = _content.size();
QStringList strList; if (_type.dynamicSize)
if (_type.array)
{ {
if (_data.type() == QVariant::String) bytes count = bytes(32);
strList = _data.toString().split(",", QString::SkipEmptyParts); //TODO: proper parsing toBigEndian((u256)_array.size(), count);
else _content += count; //reserved space for count
strList = _data.toStringList(); }
count = strList.count();
int k = 0;
for (QJsonValue const& c: _array)
{
if (c.isArray())
{
if (_type.baseType->dynamicSize)
m_dynamicOffsetMap.push_back(std::make_pair(m_dynamicData.size() + offsetStart + 32 + k * 32, m_dynamicData.size() + _content.size()));
encodeArray(c.toArray(), *_type.baseType, _content);
}
else
{
// encode single item
if (c.isDouble())
encodeSingleItem(QString::number(c.toDouble()), _type, _content);
else if (c.isString())
encodeSingleItem(c.toString(), _type, _content);
}
k++;
} }
else }
strList.append(_data.toString());
if (_type.dynamicSize) void ContractCallDataEncoder::encode(QVariant const& _data, SolidityType const& _type)
{
if (_type.dynamicSize && (_type.type == SolidityType::Type::Bytes || _type.type == SolidityType::Type::String))
{ {
bytes empty(32); bytes empty(32);
size_t sizePos = m_dynamicData.size(); size_t sizePos = m_dynamicData.size();
m_dynamicData += empty; //reserve space for count m_dynamicData += empty; //reserve space for count
if (_type.type == SolidityType::Type::Bytes) u256 count = encodeSingleItem(_data.toString(), _type, m_dynamicData);
count = encodeSingleItem(_data.toString(), _type, m_dynamicData);
else
{
count = strList.count();
for (auto const& item: strList)
encodeSingleItem(item, _type, m_dynamicData);
}
vector_ref<byte> sizeRef(m_dynamicData.data() + sizePos, 32); vector_ref<byte> sizeRef(m_dynamicData.data() + sizePos, 32);
toBigEndian(count, sizeRef); toBigEndian(count, sizeRef);
m_offsetMap.push_back(std::make_pair(m_encodedData.size(), sizePos)); m_staticOffsetMap.push_back(std::make_pair(m_encodedData.size(), sizePos));
m_encodedData += empty; //reserve space for offset m_encodedData += empty; //reserve space for offset
} }
else else if (_type.array)
{ {
if (_type.array) bytes content;
count = _type.count; size_t size = m_encodedData.size();
int c = static_cast<int>(count); if (_type.dynamicSize)
if (strList.size() > c) {
strList.erase(strList.begin() + c, strList.end()); m_encodedData += bytes(32); // reserve space for offset
else m_staticOffsetMap.push_back(std::make_pair(size, m_dynamicData.size()));
while (strList.size() < c) }
strList.append(QString()); QJsonDocument jsonDoc = QJsonDocument::fromJson(_data.toString().toUtf8());
encodeArray(jsonDoc.array(), _type, content);
for (auto const& item: strList) if (!_type.dynamicSize)
encodeSingleItem(item, _type, m_encodedData); m_encodedData.insert(m_encodedData.end(), content.begin(), content.end());
else
m_dynamicData.insert(m_dynamicData.end(), content.begin(), content.end());
} }
else
encodeSingleItem(_data.toString(), _type, m_encodedData);
} }
unsigned ContractCallDataEncoder::encodeSingleItem(QString const& _data, SolidityType const& _type, bytes& _dest) unsigned ContractCallDataEncoder::encodeSingleItem(QString const& _data, SolidityType const& _type, bytes& _dest)
@ -207,6 +230,13 @@ QString ContractCallDataEncoder::toString(dev::bytes const& _b)
return QString::fromStdString(dev::toJS(_b)); return QString::fromStdString(dev::toJS(_b));
} }
QString ContractCallDataEncoder::toChar(dev::bytes const& _b)
{
QString str;
asString(_b, str);
return str;
}
QVariant ContractCallDataEncoder::decode(SolidityType const& _type, bytes const& _value) QVariant ContractCallDataEncoder::decode(SolidityType const& _type, bytes const& _value)
{ {
@ -220,6 +250,8 @@ QVariant ContractCallDataEncoder::decode(SolidityType const& _type, bytes const&
return QVariant::fromValue(toString(decodeBool(rawParam))); return QVariant::fromValue(toString(decodeBool(rawParam)));
else if (type == QSolidityType::Type::Bytes || type == QSolidityType::Type::Hash) else if (type == QSolidityType::Type::Bytes || type == QSolidityType::Type::Hash)
return QVariant::fromValue(toString(decodeBytes(rawParam))); return QVariant::fromValue(toString(decodeBytes(rawParam)));
else if (type == QSolidityType::Type::String)
return QVariant::fromValue(toChar(decodeBytes(rawParam)));
else if (type == QSolidityType::Type::Struct) else if (type == QSolidityType::Type::Struct)
return QVariant::fromValue(QString("struct")); //TODO return QVariant::fromValue(QString("struct")); //TODO
else if (type == QSolidityType::Type::Address) else if (type == QSolidityType::Type::Address)

5
mix/ContractCallDataEncoder.h

@ -71,11 +71,14 @@ private:
QString toString(bool _b); QString toString(bool _b);
QString toString(dev::bytes const& _b); QString toString(dev::bytes const& _b);
bool asString(dev::bytes const& _b, QString& _str); bool asString(dev::bytes const& _b, QString& _str);
void encodeArray(QJsonArray const& _array, SolidityType const& _type, bytes& _content);
QString toChar(dev::bytes const& _b);
private: private:
bytes m_encodedData; bytes m_encodedData;
bytes m_dynamicData; bytes m_dynamicData;
std::vector<std::pair<size_t, size_t>> m_offsetMap; std::vector<std::pair<size_t, size_t>> m_dynamicOffsetMap;
std::vector<std::pair<size_t, size_t>> m_staticOffsetMap;
}; };
} }

1
mix/MixClient.h

@ -59,6 +59,7 @@ public:
dev::eth::ExecutionResult call(Address const& _secret, u256 _value, Address _dest, bytes const& _data, u256 _gas, u256 _gasPrice, eth::BlockNumber _blockNumber = eth::PendingBlock, eth::FudgeFactor _ff = eth::FudgeFactor::Strict) override; dev::eth::ExecutionResult call(Address const& _secret, u256 _value, Address _dest, bytes const& _data, u256 _gas, u256 _gasPrice, eth::BlockNumber _blockNumber = eth::PendingBlock, eth::FudgeFactor _ff = eth::FudgeFactor::Strict) override;
dev::eth::ExecutionResult create(Address const& _secret, u256 _value, bytes const& _data = bytes(), u256 _gas = 10000, u256 _gasPrice = 10 * eth::szabo, eth::BlockNumber _blockNumber = eth::PendingBlock, eth::FudgeFactor _ff = eth::FudgeFactor::Strict) override; dev::eth::ExecutionResult create(Address const& _secret, u256 _value, bytes const& _data = bytes(), u256 _gas = 10000, u256 _gasPrice = 10 * eth::szabo, eth::BlockNumber _blockNumber = eth::PendingBlock, eth::FudgeFactor _ff = eth::FudgeFactor::Strict) override;
using ClientBase::submitTransaction;
void submitTransaction(Secret _secret, u256 _value, Address _dest, bytes const& _data, u256 _gas, u256 _gasPrice, bool _gasAuto); void submitTransaction(Secret _secret, u256 _value, Address _dest, bytes const& _data, u256 _gas, u256 _gasPrice, bool _gasAuto);
Address submitTransaction(Secret _secret, u256 _endowment, bytes const& _init, u256 _gas, u256 _gasPrice, bool _gasAuto); Address submitTransaction(Secret _secret, u256 _endowment, bytes const& _init, u256 _gas, u256 _gasPrice, bool _gasAuto);
dev::eth::ExecutionResult call(Address const& _secret, u256 _value, Address _dest, bytes const& _data, u256 _gas, u256 _gasPrice, eth::BlockNumber _blockNumber, bool _gasAuto, eth::FudgeFactor _ff = eth::FudgeFactor::Strict); dev::eth::ExecutionResult call(Address const& _secret, u256 _value, Address _dest, bytes const& _data, u256 _gas, u256 _gasPrice, eth::BlockNumber _blockNumber, bool _gasAuto, eth::FudgeFactor _ff = eth::FudgeFactor::Strict);

1
mix/QVariableDeclaration.h

@ -60,6 +60,7 @@ public:
Bool, Bool,
Address, Address,
Bytes, Bytes,
String,
Enum, Enum,
Struct Struct
}; };

1
mix/SolidityType.h

@ -57,6 +57,7 @@ struct SolidityType
QString name; QString name;
std::vector<SolidityDeclaration> members; //for struct std::vector<SolidityDeclaration> members; //for struct
std::vector<QString> enumNames; //for enum std::vector<QString> enumNames; //for enum
std::shared_ptr<SolidityType const> baseType;
}; };
struct SolidityDeclaration struct SolidityDeclaration

3
mix/qml/QIntTypeView.qml

@ -33,6 +33,3 @@ Item
} }
} }
} }

22
mix/qml/QStringTypeView.qml

@ -5,25 +5,31 @@ Item
property alias value: textinput.text property alias value: textinput.text
property alias readOnly: textinput.readOnly property alias readOnly: textinput.readOnly
id: editRoot id: editRoot
height: 20
width: readOnly ? textinput.implicitWidth : 150 width: readOnly ? textinput.implicitWidth : 150
SourceSansProBold DebuggerPaneStyle {
{ id: dbgStyle
id: boldFont
} }
Rectangle { Rectangle {
anchors.fill: parent anchors.fill: parent
radius: 4 radius: 4
TextInput { TextInput {
anchors.verticalCenter: parent.verticalCenter
id: textinput id: textinput
text: value font.family: dbgStyle.general.basicFont
clip: true clip: true
anchors.fill: parent
wrapMode: Text.WrapAnywhere
font.family: boldFont.name
selectByMouse: true selectByMouse: true
text: value
anchors.fill: parent
font.pointSize: dbgStyle.general.basicFontSize
color: dbgStyle.general.basicColor
MouseArea {
id: mouseArea
anchors.fill: parent
hoverEnabled: true
onClicked: textinput.forceActiveFocus()
}
} }
} }
} }

2
mix/qml/StructView.qml

@ -72,7 +72,7 @@ Column
return Qt.createComponent("qrc:/qml/QIntTypeView.qml"); return Qt.createComponent("qrc:/qml/QIntTypeView.qml");
else if (t === QSolidityType.Bool) else if (t === QSolidityType.Bool)
return Qt.createComponent("qrc:/qml/QBoolTypeView.qml"); return Qt.createComponent("qrc:/qml/QBoolTypeView.qml");
else if (t === QSolidityType.Bytes) else if (t === QSolidityType.Bytes || t === QSolidityType.String)
return Qt.createComponent("qrc:/qml/QStringTypeView.qml"); return Qt.createComponent("qrc:/qml/QStringTypeView.qml");
else if (t === QSolidityType.Hash) else if (t === QSolidityType.Hash)
return Qt.createComponent("qrc:/qml/QHashTypeView.qml"); return Qt.createComponent("qrc:/qml/QHashTypeView.qml");

113
mix/qml/js/InputValidator.js

@ -1,27 +1,20 @@
Qt.include("QEtherHelper.js") Qt.include("QEtherHelper.js")
var nbRegEx = new RegExp('^[0-9]+$'); var nbRegEx;
var arrayRegEx;
var capturenbRegEx;
function validate(model, values) function validate(model, values)
{ {
var inError = []; var inError = [];
for (var k in model) for (var k in model)
{ {
init()
if (values[model[k].name]) if (values[model[k].name])
{ {
var type = model[k].type.name; var type = model[k].type.name;
var res; var value = values[model[k].name];
if (isContractType(type)) var res = check(type, value)
res = validateAddress(type, values[model[k].name]);
else if (type.indexOf("int") !== -1)
res = validateInt(type, values[model[k].name]);
else if (type.indexOf("bytes") !== -1)
res = validateBytes(type, values[model[k].name]);
else if (type.indexOf("bool") !== -1)
res = validateBool(type, values[model[k].name]);
else if (type.indexOf("address") !== -1)
res = validateAddress(type, values[model[k].name]);
else
res.valid = true;
if (!res.valid) if (!res.valid)
inError.push({ type: type, value: values, message: res.message }); inError.push({ type: type, value: values, message: res.message });
} }
@ -29,6 +22,96 @@ function validate(model, values)
return inError; return inError;
} }
function init()
{
nbRegEx = new RegExp('^[0-9]+$');
arrayRegEx = new RegExp('\\[[^\\]]*\\]', "g");
capturenbRegEx = new RegExp("[0-9]+");
}
function check(type, value)
{
var res = { valid: true, message : "" }
if (isContractType(type))
res = validateAddress(type, value);
else if (isArray(type))
res = validateArray(type, value);
else if (type.indexOf("int") !== -1)
res = validateInt(type, value);
else if (type.indexOf("bytes") !== -1)
res = validateBytes(type, value);
else if (type.indexOf("bool") !== -1)
res = validateBool(type, value);
else if (type.indexOf("address") !== -1)
res = validateAddress(type, value);
else
{
res.valid = true
res.message = ""
}
return res;
}
function isArray(_type)
{
return arrayRegEx.test(_type);
}
function checkArrayRecursively(_type, _dim, _array)
{
if (_array instanceof Array)
{
if (_dim.length === 0)
return { valid: false, message: "Your input contains too many dimensions" }
var size = -1
var infinite = false
if (_dim === "[]")
infinite = true
else
size = parseInt(capturenbRegEx.exec(_dim[0]))
if (_array.length > size && !infinite)
return { valid: false, message: "Array size does not correspond. Should be " + _dim[0] }
if (_array.length > 0)
{
var _newdim = _dim.slice(0)
_newdim.splice(0, 1)
for (var i = 0; i < _array.length; i++)
{
var ret = checkArrayRecursively(_type, _newdim, _array[i])
if (!ret.valid)
return ret
}
}
return { valid: true, message: "" }
}
else
{
if (_dim.length > 0)
return { valid: false, message: "Your input contains too few dimensions" }
if (typeof(_array) === "number")
_array = '' + _array + ''
return check(_type, _array)
}
}
function validateArray(_type, _value)
{
try
{
_value = JSON.parse(_value)
}
catch (e)
{
return { valid: false, message: "Input must be JSON formatted like [1,5,3,9] or [[4,9],[4,9],[4,9],[4,9]]" }
}
var dim = _type.match(arrayRegEx)
dim.reverse();
for (var k = 0; k < dim.length; k++)
_type = _type.replace(dim[k], "")
_type = _type.replace(/calldata/g, "")
return checkArrayRecursively(_type, dim, _value)
}
function isContractType(_type) function isContractType(_type)
{ {
for (var k in Object.keys(codeModel.contracts)) for (var k in Object.keys(codeModel.contracts))
@ -100,7 +183,7 @@ function validateAddress(_type, _value)
function validateBytes(_type, _value) function validateBytes(_type, _value)
{ {
var ret = { valid: true, message: "" } var ret = { valid: true, message: "" }
if (_value.length > parseInt(_type.replace("bytes", "")) ) if (_type !== "bytes" && _value.length > parseInt(_type.replace("bytes", "")) )
{ {
ret.valid = false; ret.valid = false;
ret.message = _type + " should not contains more than " + _type.replace("bytes", "") + " characters"; ret.message = _type + " should not contains more than " + _type.replace("bytes", "") + " characters";

9
mix/qml/js/ProjectModel.js

@ -254,8 +254,10 @@ function doCreateProject(title, path) {
files: [ contractsFile, indexFile ] files: [ contractsFile, indexFile ]
}; };
//TODO: copy from template //TODO: copy from template
fileIo.writeFile(dirPath + indexFile, htmlTemplate); if (!fileIo.fileExists(dirPath + indexFile))
fileIo.writeFile(dirPath + contractsFile, contractTemplate); fileIo.writeFile(dirPath + indexFile, htmlTemplate);
if (!fileIo.fileExists(dirPath + contractsFile))
fileIo.writeFile(dirPath + contractsFile, contractTemplate);
newProject(projectData); newProject(projectData);
var json = JSON.stringify(projectData, null, "\t"); var json = JSON.stringify(projectData, null, "\t");
fileIo.writeFile(projectFile, json); fileIo.writeFile(projectFile, json);
@ -342,7 +344,8 @@ function newContract() {
function createAndAddFile(name, extension, content) { function createAndAddFile(name, extension, content) {
var fileName = generateFileName(name, extension); var fileName = generateFileName(name, extension);
var filePath = projectPath + fileName; var filePath = projectPath + fileName;
fileIo.writeFile(filePath, content); if (!fileIo.fileExists(filePath))
fileIo.writeFile(filePath, content);
var id = addFile(fileName); var id = addFile(fileName);
saveProjectFile(); saveProjectFile();
documentAdded(id); documentAdded(id);

4
neth/CMakeLists.txt

@ -4,7 +4,7 @@ aux_source_directory(. SRC_LIST)
include_directories(BEFORE ..) include_directories(BEFORE ..)
include_directories(${JSON_RPC_CPP_INCLUDE_DIRS}) include_directories(${JSON_RPC_CPP_INCLUDE_DIRS})
include_directories(${LEVELDB_INCLUDE_DIRS}) include_directories(${DB_INCLUDE_DIRS})
include_directories(${Boost_INCLUDE_DIRS}) include_directories(${Boost_INCLUDE_DIRS})
set(EXECUTABLE neth) set(EXECUTABLE neth)
@ -13,8 +13,6 @@ add_executable(${EXECUTABLE} ${SRC_LIST})
add_dependencies(${EXECUTABLE} BuildInfo.h) add_dependencies(${EXECUTABLE} BuildInfo.h)
target_link_libraries(${EXECUTABLE} ${LEVELDB_LIBRARIES})
if (JSONRPC) if (JSONRPC)
target_link_libraries(${EXECUTABLE} web3jsonrpc) target_link_libraries(${EXECUTABLE} web3jsonrpc)
endif() endif()

3
rlp/CMakeLists.txt

@ -4,13 +4,14 @@ set(CMAKE_AUTOMOC OFF)
aux_source_directory(. SRC_LIST) aux_source_directory(. SRC_LIST)
include_directories(BEFORE ..) include_directories(BEFORE ..)
include_directories(${LEVELDB_INCLUDE_DIRS}) include_directories(${DB_INCLUDE_DIRS})
set(EXECUTABLE rlp) set(EXECUTABLE rlp)
add_executable(${EXECUTABLE} ${SRC_LIST}) add_executable(${EXECUTABLE} ${SRC_LIST})
target_link_libraries(${EXECUTABLE} devcrypto) target_link_libraries(${EXECUTABLE} devcrypto)
target_link_libraries(${EXECUTABLE} ${DB_LIBRARIES})
if (APPLE) if (APPLE)
install(TARGETS ${EXECUTABLE} DESTINATION bin) install(TARGETS ${EXECUTABLE} DESTINATION bin)

6
test/TestHelper.cpp

@ -754,6 +754,10 @@ Options::Options()
checkState = true; checkState = true;
else if (arg == "--wallet") else if (arg == "--wallet")
wallet = true; wallet = true;
else if (arg == "--nonetwork")
nonetwork = true;
else if (arg == "--nodag")
nodag = true;
else if (arg == "--all") else if (arg == "--all")
{ {
performance = true; performance = true;
@ -761,7 +765,7 @@ Options::Options()
memory = true; memory = true;
inputLimits = true; inputLimits = true;
bigData = true; bigData = true;
wallet= true; wallet = true;
} }
else if (arg == "--singletest" && i + 1 < argc) else if (arg == "--singletest" && i + 1 < argc)
{ {

2
test/TestHelper.h

@ -223,6 +223,8 @@ public:
bool inputLimits = false; bool inputLimits = false;
bool bigData = false; bool bigData = false;
bool wallet = false; bool wallet = false;
bool nonetwork = false;
bool nodag = false;
/// @} /// @}
/// Get reference to options /// Get reference to options

11
test/TestUtils.cpp

@ -22,6 +22,7 @@
#include <thread> #include <thread>
#include <boost/test/unit_test.hpp> #include <boost/test/unit_test.hpp>
#include <boost/filesystem.hpp> #include <boost/filesystem.hpp>
#include <libdevcrypto/Common.h>
#include <libtestutils/Common.h> #include <libtestutils/Common.h>
#include <libtestutils/BlockChainLoader.h> #include <libtestutils/BlockChainLoader.h>
#include <libtestutils/FixedClient.h> #include <libtestutils/FixedClient.h>
@ -116,3 +117,13 @@ void ParallelClientBaseFixture::enumerateClients(std::function<void(Json::Value
}); });
}); });
} }
MoveNonceToTempDir::MoveNonceToTempDir()
{
crypto::Nonce::setSeedFilePath(m_dir.path() + "/seed");
}
MoveNonceToTempDir::~MoveNonceToTempDir()
{
crypto::Nonce::reset();
}

9
test/TestUtils.h

@ -24,6 +24,7 @@
#include <functional> #include <functional>
#include <string> #include <string>
#include <json/json.h> #include <json/json.h>
#include <libdevcore/TransientDirectory.h>
#include <libethereum/BlockChain.h> #include <libethereum/BlockChain.h>
#include <libethereum/ClientBase.h> #include <libethereum/ClientBase.h>
@ -78,5 +79,13 @@ struct JsonRpcFixture: public ClientBaseFixture
}; };
struct MoveNonceToTempDir
{
MoveNonceToTempDir();
~MoveNonceToTempDir();
private:
TransientDirectory m_dir;
};
} }
} }

138
test/libdevcrypto/SecretStore.cpp

@ -29,11 +29,16 @@
#include <libdevcore/TrieDB.h> #include <libdevcore/TrieDB.h>
#include <libdevcore/TrieHash.h> #include <libdevcore/TrieHash.h>
#include "MemTrie.h" #include "MemTrie.h"
#include "../TestHelper.h" #include <test/TestHelper.h>
#include <test/TestUtils.h>
using namespace std; using namespace std;
using namespace dev; using namespace dev;
using namespace dev::test;
namespace js = json_spirit; namespace js = json_spirit;
namespace fs = boost::filesystem;
BOOST_GLOBAL_FIXTURE( MoveNonceToTempDir )
BOOST_AUTO_TEST_SUITE(KeyStore) BOOST_AUTO_TEST_SUITE(KeyStore)
@ -52,7 +57,8 @@ BOOST_AUTO_TEST_CASE(basic_tests)
{ {
cnote << i.first; cnote << i.first;
js::mObject& o = i.second.get_obj(); js::mObject& o = i.second.get_obj();
SecretStore store("."); TransientDirectory tmpDir;
SecretStore store(tmpDir.path());
h128 u = store.readKeyContent(js::write_string(o["json"], false)); h128 u = store.readKeyContent(js::write_string(o["json"], false));
cdebug << "read uuid" << u; cdebug << "read uuid" << u;
bytes s = store.secret(u, [&](){ return o["password"].get_str(); }); bytes s = store.secret(u, [&](){ return o["password"].get_str(); });
@ -61,4 +67,132 @@ BOOST_AUTO_TEST_CASE(basic_tests)
} }
} }
BOOST_AUTO_TEST_CASE(import_key_from_file)
{
// Imports a key from an external file. Tests that the imported key is there
// and that the external file is not deleted.
TransientDirectory importDir;
string importFile = importDir.path() + "/import.json";
TransientDirectory storeDir;
string keyData = R"({
"version": 3,
"crypto": {
"ciphertext": "d69313b6470ac1942f75d72ebf8818a0d484ac78478a132ee081cd954d6bd7a9",
"cipherparams": { "iv": "ffffffffffffffffffffffffffffffff" },
"kdf": "pbkdf2",
"kdfparams": { "dklen": 32, "c": 262144, "prf": "hmac-sha256", "salt": "c82ef14476014cbf438081a42709e2ed" },
"mac": "cf6bfbcc77142a22c4a908784b4a16f1023a1d0e2aff404c20158fa4f1587177",
"cipher": "aes-128-ctr",
"version": 1
},
"id": "abb67040-8dbe-0dad-fc39-2b082ef0ee5f"
})";
string password = "bar";
string priv = "0202020202020202020202020202020202020202020202020202020202020202";
writeFile(importFile, keyData);
h128 uuid;
{
SecretStore store(storeDir.path());
BOOST_CHECK_EQUAL(store.keys().size(), 0);
uuid = store.importKey(importFile);
BOOST_CHECK(!!uuid);
BOOST_CHECK(contentsString(importFile) == keyData);
BOOST_CHECK_EQUAL(priv, toHex(store.secret(uuid, [&](){ return password; })));
BOOST_CHECK_EQUAL(store.keys().size(), 1);
}
fs::remove(importFile);
// now do it again to check whether SecretStore properly stored it on disk
{
SecretStore store(storeDir.path());
BOOST_CHECK_EQUAL(store.keys().size(), 1);
BOOST_CHECK_EQUAL(priv, toHex(store.secret(uuid, [&](){ return password; })));
}
}
BOOST_AUTO_TEST_CASE(import_secret)
{
for (string const& password: {"foobar", ""})
{
TransientDirectory storeDir;
string priv = "0202020202020202020202020202020202020202020202020202020202020202";
h128 uuid;
{
SecretStore store(storeDir.path());
BOOST_CHECK_EQUAL(store.keys().size(), 0);
uuid = store.importSecret(fromHex(priv), password);
BOOST_CHECK(!!uuid);
BOOST_CHECK_EQUAL(priv, toHex(store.secret(uuid, [&](){ return password; })));
BOOST_CHECK_EQUAL(store.keys().size(), 1);
}
{
SecretStore store(storeDir.path());
BOOST_CHECK_EQUAL(store.keys().size(), 1);
BOOST_CHECK_EQUAL(priv, toHex(store.secret(uuid, [&](){ return password; })));
}
}
}
BOOST_AUTO_TEST_CASE(wrong_password)
{
TransientDirectory storeDir;
SecretStore store(storeDir.path());
string password = "foobar";
string priv = "0202020202020202020202020202020202020202020202020202020202020202";
h128 uuid;
{
SecretStore store(storeDir.path());
BOOST_CHECK_EQUAL(store.keys().size(), 0);
uuid = store.importSecret(fromHex(priv), password);
BOOST_CHECK(!!uuid);
BOOST_CHECK_EQUAL(priv, toHex(store.secret(uuid, [&](){ return password; })));
BOOST_CHECK_EQUAL(store.keys().size(), 1);
// password will not be queried
BOOST_CHECK_EQUAL(priv, toHex(store.secret(uuid, [&](){ return "abcdefg"; })));
}
{
SecretStore store(storeDir.path());
BOOST_CHECK_EQUAL(store.keys().size(), 1);
BOOST_CHECK(store.secret(uuid, [&](){ return "abcdefg"; }).empty());
}
}
BOOST_AUTO_TEST_CASE(recode)
{
TransientDirectory storeDir;
SecretStore store(storeDir.path());
string password = "foobar";
string changedPassword = "abcdefg";
string priv = "0202020202020202020202020202020202020202020202020202020202020202";
h128 uuid;
{
SecretStore store(storeDir.path());
BOOST_CHECK_EQUAL(store.keys().size(), 0);
uuid = store.importSecret(fromHex(priv), password);
BOOST_CHECK(!!uuid);
BOOST_CHECK_EQUAL(priv, toHex(store.secret(uuid, [&](){ return password; })));
BOOST_CHECK_EQUAL(store.keys().size(), 1);
}
{
SecretStore store(storeDir.path());
BOOST_CHECK_EQUAL(store.keys().size(), 1);
BOOST_CHECK(store.secret(uuid, [&](){ return "abcdefg"; }).empty());
BOOST_CHECK(store.recode(uuid, changedPassword, [&](){ return password; }));
BOOST_CHECK_EQUAL(store.keys().size(), 1);
BOOST_CHECK_EQUAL(priv, toHex(store.secret(uuid, [&](){ return changedPassword; })));
store.clearCache();
BOOST_CHECK(store.secret(uuid, [&](){ return password; }).empty());
BOOST_CHECK_EQUAL(priv, toHex(store.secret(uuid, [&](){ return changedPassword; })));
}
{
SecretStore store(storeDir.path());
BOOST_CHECK_EQUAL(store.keys().size(), 1);
BOOST_CHECK(store.secret(uuid, [&](){ return password; }).empty());
BOOST_CHECK_EQUAL(priv, toHex(store.secret(uuid, [&](){ return changedPassword; })));
}
}
BOOST_AUTO_TEST_SUITE_END() BOOST_AUTO_TEST_SUITE_END()

4
test/libdevcrypto/crypto.cpp

@ -31,12 +31,16 @@
#include <libdevcore/SHA3.h> #include <libdevcore/SHA3.h>
#include <libdevcrypto/ECDHE.h> #include <libdevcrypto/ECDHE.h>
#include <libdevcrypto/CryptoPP.h> #include <libdevcrypto/CryptoPP.h>
#include <test/TestUtils.h>
using namespace std; using namespace std;
using namespace dev; using namespace dev;
using namespace dev::test;
using namespace dev::crypto; using namespace dev::crypto;
using namespace CryptoPP; using namespace CryptoPP;
BOOST_GLOBAL_FIXTURE( MoveNonceToTempDir )
BOOST_AUTO_TEST_SUITE(devcrypto) BOOST_AUTO_TEST_SUITE(devcrypto)
static Secp256k1 s_secp256k1; static Secp256k1 s_secp256k1;

6
test/libethereum/stateOriginal.cpp

@ -27,7 +27,8 @@
#include <libethereum/State.h> #include <libethereum/State.h>
#include <libethcore/Farm.h> #include <libethcore/Farm.h>
#include <libethereum/Defaults.h> #include <libethereum/Defaults.h>
#include "../TestHelper.h" #include <test/TestHelper.h>
using namespace std; using namespace std;
using namespace dev; using namespace dev;
using namespace dev::eth; using namespace dev::eth;
@ -48,6 +49,9 @@ BOOST_AUTO_TEST_CASE(Basic)
BOOST_AUTO_TEST_CASE(Complex) BOOST_AUTO_TEST_CASE(Complex)
{ {
if (test::Options::get().nodag)
return;
cnote << "Testing State..."; cnote << "Testing State...";
KeyPair me = sha3("Gav Wood"); KeyPair me = sha3("Gav Wood");

4
test/libp2p/capability.cpp

@ -27,6 +27,7 @@ along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
#include <libp2p/Session.h> #include <libp2p/Session.h>
#include <libp2p/Capability.h> #include <libp2p/Capability.h>
#include <libp2p/HostCapability.h> #include <libp2p/HostCapability.h>
#include <test/TestHelper.h>
using namespace std; using namespace std;
using namespace dev; using namespace dev;
@ -98,6 +99,9 @@ BOOST_FIXTURE_TEST_SUITE(p2pCapability, P2PFixture)
BOOST_AUTO_TEST_CASE(capability) BOOST_AUTO_TEST_CASE(capability)
{ {
if (test::Options::get().nonetwork)
return;
VerbosityHolder verbosityHolder(10); VerbosityHolder verbosityHolder(10);
cnote << "Testing Capability..."; cnote << "Testing Capability...";

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save