Browse Source

Merge remote-tracking branch 'up/develop' into designDebugPanel

cl-refactor
yann300 10 years ago
parent
commit
c0cab68327
  1. 16
      CMakeLists.txt
  2. 2
      abi/CMakeLists.txt
  3. 50
      alethzero/MainWin.cpp
  4. 2
      alethzero/MainWin.h
  5. 7
      alethzero/NatspecHandler.h
  6. 2
      alethzero/OurWebThreeStubServer.cpp
  7. 7
      alethzero/Transact.cpp
  8. 17
      cmake/EthCompilerSettings.cmake
  9. 6
      cmake/EthDependencies.cmake
  10. 49
      cmake/FindRocksDB.cmake
  11. 21
      eth/main.cpp
  12. 20
      ethkey/KeyAux.h
  13. 11
      ethminer/MinerAux.h
  14. 2
      ethvm/CMakeLists.txt
  15. 2
      exp/CMakeLists.txt
  16. 63
      getcoverage.sh
  17. 2
      libdevcore/CMakeLists.txt
  18. 2
      libdevcore/Common.cpp
  19. 21
      libdevcore/Common.h
  20. 4
      libdevcore/CommonData.cpp
  21. 14
      libdevcore/CommonData.h
  22. 79
      libdevcore/CommonIO.cpp
  23. 15
      libdevcore/CommonIO.h
  24. 15
      libdevcore/TrieDB.h
  25. 36
      libdevcore/db.h
  26. 33
      libdevcore/vector_ref.h
  27. 2
      libdevcrypto/AES.h
  28. 4
      libdevcrypto/CMakeLists.txt
  29. 129
      libdevcrypto/Common.cpp
  30. 27
      libdevcrypto/Common.h
  31. 35
      libdevcrypto/Exceptions.h
  32. 3
      libdevcrypto/OverlayDB.cpp
  33. 7
      libdevcrypto/OverlayDB.h
  34. 124
      libdevcrypto/SecretStore.cpp
  35. 50
      libdevcrypto/SecretStore.h
  36. 21
      libethash-cl/CMakeLists.txt
  37. 100
      libethash-cl/bin2h.cmake
  38. 71
      libethash-cl/ethash_cl_miner.cpp
  39. 3
      libethash-cl/ethash_cl_miner.h
  40. 24
      libethash-cl/ethash_cl_miner_kernel.cl
  41. 3
      libethash/endian.h
  42. 4
      libethash/internal.c
  43. 21
      libethash/internal.h
  44. 6
      libethcore/Common.h
  45. 3
      libethcore/Ethash.cpp
  46. 3
      libethcore/Ethash.h
  47. 3
      libethcore/Farm.h
  48. 169
      libethcore/KeyManager.cpp
  49. 68
      libethcore/KeyManager.h
  50. 6
      libethcore/Params.cpp
  51. 24
      libethereum/BlockChain.cpp
  52. 16
      libethereum/BlockChain.h
  53. 800
      libethereum/BlockChainSync.cpp
  54. 278
      libethereum/BlockChainSync.h
  55. 7
      libethereum/BlockDetails.h
  56. 87
      libethereum/BlockQueue.cpp
  57. 5
      libethereum/BlockQueue.h
  58. 3
      libethereum/CMakeLists.txt
  59. 6
      libethereum/CanonBlockChain.h
  60. 45
      libethereum/Client.cpp
  61. 1
      libethereum/Client.h
  62. 6
      libethereum/CommonNet.h
  63. 593
      libethereum/EthereumHost.cpp
  64. 75
      libethereum/EthereumHost.h
  65. 46
      libethereum/EthereumPeer.cpp
  66. 13
      libethereum/EthereumPeer.h
  67. 2
      libethereum/State.h
  68. 3
      libethereum/TransactionQueue.cpp
  69. 3
      libethereum/TransactionQueue.h
  70. 58
      libevm/VM.cpp
  71. 10
      libevmasm/Assembly.cpp
  72. 7
      libevmasm/AssemblyItem.cpp
  73. 9
      libevmasm/AssemblyItem.h
  74. 7
      libjsconsole/JSConsole.cpp
  75. 7
      libjsconsole/JSConsole.h
  76. 2
      liblll/CodeFragment.cpp
  77. 2
      libp2p/CMakeLists.txt
  78. 5
      libp2p/Host.cpp
  79. 19
      libp2p/Session.cpp
  80. 7
      libp2p/Session.h
  81. 43
      libsolidity/AST.cpp
  82. 56
      libsolidity/ArrayUtils.cpp
  83. 40
      libsolidity/Compiler.cpp
  84. 64
      libsolidity/CompilerUtils.cpp
  85. 5
      libsolidity/CompilerUtils.h
  86. 55
      libsolidity/ExpressionCompiler.cpp
  87. 8
      libsolidity/NameAndTypeResolver.cpp
  88. 62
      libsolidity/Types.cpp
  89. 32
      libsolidity/Types.h
  90. 2
      libtestutils/Common.cpp
  91. 2
      libweb3jsonrpc/AccountHolder.cpp
  92. 1
      libweb3jsonrpc/AccountHolder.h
  93. 3
      libweb3jsonrpc/CMakeLists.txt
  94. 2
      libweb3jsonrpc/JsonHelper.h
  95. 23
      libweb3jsonrpc/WebThreeStubServer.cpp
  96. 17
      libweb3jsonrpc/WebThreeStubServer.h
  97. 4
      libwebthree/CMakeLists.txt
  98. 28
      libwhisper/BloomFilter.cpp
  99. 103
      libwhisper/BloomFilter.h
  100. 4
      libwhisper/CMakeLists.txt

16
CMakeLists.txt

@ -30,6 +30,7 @@ option(JSONRPC "Build with jsonprc. default on" ON)
option(FATDB "Build with ability to list entries in the Trie. Doubles DB size, slows everything down, but good for looking at state diffs and trie contents." OFF)
option(USENPM "Use npm to recompile ethereum.js if it was changed" OFF)
option(PROFILING "Build in support for profiling" OFF)
option(ROCKSDB "Use rocksdb rather than leveldb" OFF)
set(BUNDLE "none" CACHE STRING "Predefined bundle of software to build (none, full, user, tests, minimal).")
option(MINER "Build the CLI miner component" ON)
@ -40,6 +41,7 @@ option(TOOLS "Build the tools components" ON)
option(NCURSES "Build the NCurses components" OFF)
option(GUI "Build GUI components (AlethZero, Mix)" ON)
option(TESTS "Build the tests." ON)
option(NOBOOST "No use of boost macros in test functions" OFF)
option(EVMJIT "Build just-in-time compiler for EVM code (requires LLVM)" OFF)
option(ETHASHCL "Build in support for GPU mining via OpenCL" OFF)
option(JSCONSOLE "Build in javascript console" OFF)
@ -82,6 +84,7 @@ function(configureProject)
add_definitions(-DETH_CURL)
endif()
add_definitions(-DNOBOOST)
add_definitions(-DETH_TRUE)
endfunction()
@ -193,8 +196,10 @@ eth_format_option(MINER)
eth_format_option(USENPM)
eth_format_option(PROFILING)
eth_format_option(SOLIDITY)
eth_format_option(ROCKSDB)
eth_format_option(GUI)
eth_format_option(TESTS)
eth_format_option(NOBOOST)
eth_format_option(TOOLS)
eth_format_option(ETHASHCL)
eth_format_option(JSCONSOLE)
@ -307,6 +312,7 @@ message("-- PROFILING Profiling support ${PROFILIN
message("-- FATDB Full database exploring ${FATDB}")
message("-- JSONRPC JSON-RPC support ${JSONRPC}")
message("-- USENPM Javascript source building ${USENPM}")
message("-- ROCKSDB Prefer rocksdb to leveldb ${ROCKSDB}")
message("------------------------------------------------------------- components")
message("-- MINER Build miner ${MINER}")
message("-- ETHKEY Build wallet tools ${ETHKEY}")
@ -316,6 +322,7 @@ message("-- SERPENT Build Serpent language components ${SERPENT}
message("-- GUI Build GUI components ${GUI}")
message("-- NCURSES Build NCurses components ${NCURSES}")
message("-- TESTS Build tests ${TESTS}")
message("-- NOBOOST No BOOST macros in test functions ${NOBOOST}")
message("-- ETHASHCL Build OpenCL components (experimental!) ${ETHASHCL}")
message("-- JSCONSOLE Build with javascript console ${JSCONSOLE}")
message("-- EVMJIT Build LLVM-based JIT EVM (experimental!) ${EVMJIT}")
@ -332,6 +339,15 @@ include(EthExecutableHelper)
createBuildInfo()
if (ROCKSDB AND ROCKSDB_FOUND)
set(DB_INCLUDE_DIRS ${ROCKSDB_INCLUDE_DIRS})
set(DB_LIBRARIES ${ROCKSDB_LIBRARIES})
add_definitions(-DETH_ROCKSDB)
else()
set(DB_INCLUDE_DIRS ${LEVELDB_INCLUDE_DIRS})
set(DB_LIBRARIES ${LEVELDB_LIBRARIES})
endif()
if (EVMJIT)
set(EVMJIT_CPP TRUE) # include CPP-JIT connector
add_subdirectory(evmjit)

2
abi/CMakeLists.txt

@ -4,7 +4,7 @@ set(CMAKE_AUTOMOC OFF)
aux_source_directory(. SRC_LIST)
include_directories(BEFORE ..)
include_directories(${LEVELDB_INCLUDE_DIRS})
include_directories(${DB_INCLUDE_DIRS})
set(EXECUTABLE abi)

50
alethzero/MainWin.cpp

@ -235,7 +235,7 @@ Main::Main(QWidget *parent) :
// ui->webView->page()->settings()->setAttribute(QWebEngineSettings::DeveloperExtrasEnabled, true);
// QWebEngineInspector* inspector = new QWebEngineInspector();
// inspector->setPage(page);
setBeneficiary(*m_keyManager.accounts().begin());
setBeneficiary(m_keyManager.accounts().front());
ethereum()->setDefault(LatestBlock);
@ -430,9 +430,9 @@ void Main::installBalancesWatch()
// TODO: Update for new currencies reg.
for (unsigned i = 0; i < ethereum()->stateAt(coinsAddr, PendingBlock); ++i)
altCoins.push_back(right160(ethereum()->stateAt(coinsAddr, i + 1)));
for (auto const& i: m_keyManager.accounts())
for (auto const& address: m_keyManager.accounts())
for (auto c: altCoins)
tf.address(c).topic(0, h256(i, h256::AlignRight));
tf.address(c).topic(0, h256(address, h256::AlignRight));
uninstallWatch(m_balancesFilter);
m_balancesFilter = installWatch(tf, [=](LocalisedLogEntries const&){ onBalancesChange(); });
@ -501,7 +501,7 @@ void Main::load(QString _s)
void Main::on_newTransaction_triggered()
{
m_transact->setEnvironment(m_keyManager.accounts(), ethereum(), &m_natSpecDB);
m_transact->setEnvironment(m_keyManager.accountsHash(), ethereum(), &m_natSpecDB);
m_transact->show();
}
@ -735,18 +735,17 @@ void Main::writeSettings()
s.setValue("windowState", saveState());
}
Secret Main::retrieveSecret(Address const& _a) const
Secret Main::retrieveSecret(Address const& _address) const
{
auto info = m_keyManager.accountDetails()[_a];
while (true)
{
Secret s = m_keyManager.secret(_a, [&](){
Secret s = m_keyManager.secret(_address, [&](){
QDialog d;
Ui_GetPassword gp;
gp.setupUi(&d);
d.setWindowTitle("Unlock Account");
gp.label->setText(QString("Enter the password for the account %2 (%1).").arg(QString::fromStdString(_a.abridged())).arg(QString::fromStdString(info.first)));
gp.entry->setPlaceholderText("Hint: " + QString::fromStdString(info.second));
gp.label->setText(QString("Enter the password for the account %2 (%1).").arg(QString::fromStdString(_address.abridged())).arg(QString::fromStdString(m_keyManager.accountName(_address))));
gp.entry->setPlaceholderText("Hint: " + QString::fromStdString(m_keyManager.passwordHint(_address)));
return d.exec() == QDialog::Accepted ? gp.entry->text().toStdString() : string();
});
if (s || QMessageBox::warning(nullptr, "Unlock Account", "The password you gave is incorrect for this key.", QMessageBox::Retry, QMessageBox::Cancel) == QMessageBox::Cancel)
@ -770,7 +769,7 @@ void Main::readSettings(bool _skipGeometry)
for (unsigned i = 0; i < b.size() / sizeof(Secret); ++i)
{
memcpy(&k, b.data() + i * sizeof(Secret), sizeof(Secret));
if (!m_keyManager.accounts().count(KeyPair(k).address()))
if (!m_keyManager.hasAccount(KeyPair(k).address()))
m_keyManager.import(k, "Imported (UNSAFE) key.");
}
}
@ -858,7 +857,7 @@ void Main::on_importKey_triggered()
if (b.size() == 32)
{
auto k = KeyPair(h256(b));
if (!m_keyManager.accounts().count(k.address()))
if (!m_keyManager.hasAccount(k.address()))
{
QString s = QInputDialog::getText(this, "Import Account Key", "Enter this account's name");
if (QMessageBox::question(this, "Additional Security?", "Would you like to use additional security for this key? This lets you protect it with a different password to other keys, but also means you must re-enter the key's password every time you wish to use the account.", QMessageBox::Yes, QMessageBox::No) == QMessageBox::Yes)
@ -939,7 +938,7 @@ void Main::on_claimPresale_triggered()
}
cnote << k.address();
if (!m_keyManager.accounts().count(k.address()))
if (!m_keyManager.hasAccount(k.address()))
ethereum()->submitTransaction(k.sec(), ethereum()->balanceAt(k.address()) - gasPrice() * c_txGas, m_beneficiary, {}, c_txGas, gasPrice());
else
QMessageBox::warning(this, "Already Have Key", "Could not import the secret key: we already own this account.");
@ -1110,13 +1109,13 @@ void Main::refreshBalances()
// cdebug << n << addr << denom << sha3(h256(n).asBytes());
altCoins[addr] = make_tuple(fromRaw(n), 0, denom);
}*/
for (pair<Address, std::pair<std::string, std::string>> const& i: m_keyManager.accountDetails())
for (auto const& address: m_keyManager.accounts())
{
u256 b = ethereum()->balanceAt(i.first);
QListWidgetItem* li = new QListWidgetItem(QString("%4 %2: %1 [%3]").arg(formatBalance(b).c_str()).arg(QString::fromStdString(render(i.first))).arg((unsigned)ethereum()->countAt(i.first)).arg(QString::fromStdString(i.second.first)), ui->ourAccounts);
li->setData(Qt::UserRole, QByteArray((char const*)i.first.data(), Address::size));
u256 b = ethereum()->balanceAt(address);
QListWidgetItem* li = new QListWidgetItem(QString("%4 %2: %1 [%3]").arg(formatBalance(b).c_str()).arg(QString::fromStdString(render(address))).arg((unsigned)ethereum()->countAt(address)).arg(QString::fromStdString(m_keyManager.accountName(address))), ui->ourAccounts);
li->setData(Qt::UserRole, QByteArray((char const*)address.data(), Address::size));
li->setFlags(Qt::ItemIsUserCheckable | Qt::ItemIsEnabled | Qt::ItemIsSelectable);
li->setCheckState(m_beneficiary == i.first ? Qt::Checked : Qt::Unchecked);
li->setCheckState(m_beneficiary == address ? Qt::Checked : Qt::Unchecked);
totalBalance += b;
// for (auto& c: altCoins)
@ -1158,7 +1157,7 @@ void Main::refreshNetwork()
auto ns = web3()->nodes();
for (p2p::Peer const& i: ns)
ui->nodes->insertItem(sessions.count(i.id) ? 0 : ui->nodes->count(), QString("[%1 %3] %2 - ( =%5s | /%4s%6 ) - *%7 $%8")
ui->nodes->insertItem(sessions.count(i.id) ? 0 : ui->nodes->count(), QString("[%1 %3] %2 - ( %4 ) - *%5")
.arg(QString::fromStdString(i.id.abridged()))
.arg(QString::fromStdString(i.endpoint.address.to_string()))
.arg(i.id == web3()->id() ? "self" : sessions.count(i.id) ? sessions[i.id] : "disconnected")
@ -1254,7 +1253,7 @@ void Main::refreshBlockCount()
BlockQueueStatus b = ethereum()->blockQueueStatus();
SyncStatus sync = ethereum()->syncStatus();
QString syncStatus = EthereumHost::stateName(sync.state);
if (sync.state == SyncState::HashesParallel || sync.state == SyncState::HashesSingle)
if (sync.state == SyncState::Hashes)
syncStatus += QString(": %1/%2%3").arg(sync.hashesReceived).arg(sync.hashesEstimated ? "~" : "").arg(sync.hashesTotal);
if (sync.state == SyncState::Blocks || sync.state == SyncState::NewBlocks)
syncStatus += QString(": %1/%2").arg(sync.blocksReceived).arg(sync.blocksTotal);
@ -2094,9 +2093,8 @@ void Main::on_killAccount_triggered()
{
auto hba = ui->ourAccounts->currentItem()->data(Qt::UserRole).toByteArray();
Address h((byte const*)hba.data(), Address::ConstructFromPointer);
auto k = m_keyManager.accountDetails()[h];
QString s = QInputDialog::getText(this, QString::fromStdString("Kill Account " + k.first + "?!"),
QString::fromStdString("Account " + k.first + " (" + render(h) + ") has " + formatBalance(ethereum()->balanceAt(h)) + " in it.\r\nIt, and any contract that this account can access, will be lost forever if you continue. Do NOT continue unless you know what you are doing.\n"
QString s = QInputDialog::getText(this, QString::fromStdString("Kill Account " + m_keyManager.accountName(h) + "?!"),
QString::fromStdString("Account " + m_keyManager.accountName(h) + " (" + render(h) + ") has " + formatBalance(ethereum()->balanceAt(h)) + " in it.\r\nIt, and any contract that this account can access, will be lost forever if you continue. Do NOT continue unless you know what you are doing.\n"
"Are you sure you want to continue? \r\n If so, type 'YES' to confirm."),
QLineEdit::Normal, "NO");
if (s != "YES")
@ -2104,10 +2102,10 @@ void Main::on_killAccount_triggered()
m_keyManager.kill(h);
if (m_keyManager.accounts().empty())
m_keyManager.import(Secret::random(), "Default account");
m_beneficiary = *m_keyManager.accounts().begin();
m_beneficiary = m_keyManager.accounts().front();
keysChanged();
if (m_beneficiary == h)
setBeneficiary(*m_keyManager.accounts().begin());
setBeneficiary(m_keyManager.accounts().front());
}
}
@ -2128,7 +2126,7 @@ void Main::on_reencryptKey_triggered()
return;
try {
auto pw = [&](){
auto p = QInputDialog::getText(this, "Re-Encrypt Key", "Enter the original password for this key.\nHint: " + QString::fromStdString(m_keyManager.hint(a)), QLineEdit::Password, QString()).toStdString();
auto p = QInputDialog::getText(this, "Re-Encrypt Key", "Enter the original password for this key.\nHint: " + QString::fromStdString(m_keyManager.passwordHint(a)), QLineEdit::Password, QString()).toStdString();
if (p.empty())
throw PasswordUnknown();
return p;
@ -2151,7 +2149,7 @@ void Main::on_reencryptAll_triggered()
try {
for (Address const& a: m_keyManager.accounts())
while (!m_keyManager.recode(a, SemanticPassword::Existing, [&](){
auto p = QInputDialog::getText(nullptr, "Re-Encrypt Key", QString("Enter the original password for key %1.\nHint: %2").arg(QString::fromStdString(pretty(a))).arg(QString::fromStdString(m_keyManager.hint(a))), QLineEdit::Password, QString()).toStdString();
auto p = QInputDialog::getText(nullptr, "Re-Encrypt Key", QString("Enter the original password for key %1.\nHint: %2").arg(QString::fromStdString(pretty(a))).arg(QString::fromStdString(m_keyManager.passwordHint(a))), QLineEdit::Password, QString()).toStdString();
if (p.empty())
throw PasswordUnknown();
return p;

2
alethzero/MainWin.h

@ -96,7 +96,7 @@ public:
dev::eth::KeyManager& keyManager() override { return m_keyManager; }
bool doConfirm();
dev::Secret retrieveSecret(dev::Address const& _a) const override;
dev::Secret retrieveSecret(dev::Address const& _address) const override;
public slots:
void load(QString _file);

7
alethzero/NatspecHandler.h

@ -22,16 +22,11 @@
#pragma once
#pragma warning(push)
#pragma warning(disable: 4100 4267)
#include <leveldb/db.h>
#pragma warning(pop)
#include <libdevcore/db.h>
#include <json/json.h>
#include <libdevcore/FixedHash.h>
#include "Context.h"
namespace ldb = leveldb;
class NatspecHandler: public NatSpecFace
{
public:

2
alethzero/OurWebThreeStubServer.cpp

@ -136,7 +136,7 @@ void OurAccountHolder::doValidations()
AddressHash OurAccountHolder::realAccounts() const
{
return m_main->keyManager().accounts();
return m_main->keyManager().accountsHash();
}
bool OurAccountHolder::validateTransaction(TransactionSkeleton const& _t, bool _toProxy)

7
alethzero/Transact.cpp

@ -77,11 +77,10 @@ void Transact::setEnvironment(AddressHash const& _accounts, dev::eth::Client* _e
auto old = ui->from->currentIndex();
ui->from->clear();
for (auto const& i: m_accounts)
for (auto const& address: m_accounts)
{
auto d = m_context->keyManager().accountDetails()[i];
u256 b = ethereum()->balanceAt(i, PendingBlock);
QString s = QString("%4 %2: %1").arg(formatBalance(b).c_str()).arg(QString::fromStdString(m_context->render(i))).arg(QString::fromStdString(d.first));
u256 b = ethereum()->balanceAt(address, PendingBlock);
QString s = QString("%4 %2: %1").arg(formatBalance(b).c_str()).arg(QString::fromStdString(m_context->render(address))).arg(QString::fromStdString(m_context->keyManager().accountName(address)));
ui->from->addItem(s);
}
if (old > -1 && old < ui->from->count())

17
cmake/EthCompilerSettings.cmake

@ -34,17 +34,21 @@ elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
# disable unknown pragma warning (4068)
# disable unsafe function warning (4996)
# disable decorated name length exceeded, name was truncated (4503)
# disable conversion from 'size_t' to 'type', possible loss of data (4267)
# disable qualifier applied to function type has no meaning; ignored (4180)
# disable C++ exception specification ignored except to indicate a function is not __declspec(nothrow) (4290)
# disable conversion from 'type1' to 'type2', possible loss of data (4244)
# disable forcing value to bool 'true' or 'false' (performance warning) (4800)
# disable warning C4535: calling _set_se_translator() requires /EHa (for boost tests)
# declare Windows XP requirement
# undefine windows.h MAX && MIN macros cause it cause conflicts with std::min && std::max functions
# define miniupnp static library
add_compile_options(/MP /EHsc /wd4068 /wd4996 /wd4503 -D_WIN32_WINNT=0x0501 /DNOMINMAX /DMINIUPNP_STATICLIB)
add_compile_options(/MP /EHsc /wd4068 /wd4996 /wd4503 /wd4267 /wd4180 /wd4290 /wd4244 /wd4800 -D_WIN32_WINNT=0x0501 /DNOMINMAX /DMINIUPNP_STATICLIB)
# disable empty object file warning
set(CMAKE_STATIC_LINKER_FLAGS "${CMAKE_STATIC_LINKER_FLAGS} /ignore:4221")
# warning LNK4075: ignoring '/EDITANDCONTINUE' due to '/SAFESEH' specification
# warning LNK4099: pdb was not found with lib
# stack size 16MB
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /ignore:4099,4075 /STACK:33554432")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /ignore:4099,4075")
# windows likes static
if (NOT ETH_STATIC)
@ -64,6 +68,13 @@ if (PROFILING AND (("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU") OR ("${CMAKE_CXX_C
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -lprofiler")
endif ()
if (PROFILING AND (("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")))
set(CMAKE_CXX_FLAGS "-g --coverage ${CMAKE_CXX_FLAGS}")
set(CMAKE_C_FLAGS "-g --coverage ${CMAKE_C_FLAGS}")
set(CMAKE_SHARED_LINKER_FLAGS "--coverage ${CMAKE_SHARED_LINKER_FLAGS} -lprofiler")
set(CMAKE_EXE_LINKER_FLAGS "--coverage ${CMAKE_EXE_LINKER_FLAGS} -lprofiler")
endif ()
if (("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU") OR ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang"))
option(USE_LD_GOLD "Use GNU gold linker" ON)
if (USE_LD_GOLD)

6
cmake/EthDependencies.cmake

@ -49,6 +49,12 @@ find_package (LevelDB REQUIRED)
message(" - LevelDB header: ${LEVELDB_INCLUDE_DIRS}")
message(" - LevelDB lib: ${LEVELDB_LIBRARIES}")
find_package (RocksDB)
if (ROCKSDB_FOUND)
message(" - RocksDB header: ${ROCKSDB_INCLUDE_DIRS}")
message(" - RocksDB lib: ${ROCKSDB_LIBRARIES}")
endif()
if (JSCONSOLE)
find_package (v8 REQUIRED)
message(" - v8 header: ${V8_INCLUDE_DIRS}")

49
cmake/FindRocksDB.cmake

@ -0,0 +1,49 @@
# Find rocksdb
#
# Find the rocksdb includes and library
#
# if you nee to add a custom library search path, do it via via CMAKE_PREFIX_PATH
#
# This module defines
# ROCKSDB_INCLUDE_DIRS, where to find header, etc.
# ROCKSDB_LIBRARIES, the libraries needed to use rocksdb.
# ROCKSDB_FOUND, If false, do not try to use rocksdb.
# only look in default directories
find_path(
ROCKSDB_INCLUDE_DIR
NAMES rocksdb/db.h
DOC "rocksdb include dir"
)
find_library(
ROCKSDB_LIBRARY
NAMES rocksdb
DOC "rocksdb library"
)
set(ROCKSDB_INCLUDE_DIRS ${ROCKSDB_INCLUDE_DIR})
set(ROCKSDB_LIBRARIES ${ROCKSDB_LIBRARY})
# debug library on windows
# same naming convention as in qt (appending debug library with d)
# boost is using the same "hack" as us with "optimized" and "debug"
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
find_library(
ROCKSDB_LIBRARY_DEBUG
NAMES rocksdbd
DOC "rocksdb debug library"
)
set(ROCKSDB_LIBRARIES optimized ${ROCKSDB_LIBRARIES} debug ${ROCKSDB_LIBRARY_DEBUG})
endif()
# handle the QUIETLY and REQUIRED arguments and set ROCKSDB_FOUND to TRUE
# if all listed variables are TRUE, hide their existence from configuration view
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(rocksdb DEFAULT_MSG
ROCKSDB_INCLUDE_DIR ROCKSDB_LIBRARY)
mark_as_advanced (ROCKSDB_INCLUDE_DIR ROCKSDB_LIBRARY)

21
eth/main.cpp

@ -690,7 +690,7 @@ int main(int argc, char** argv)
return ret;
};
auto getAccountPassword = [&](Address const& a){
return getPassword("Enter password for address " + keyManager.accountDetails()[a].first + " (" + a.abridged() + "; hint:" + keyManager.accountDetails()[a].second + "): ");
return getPassword("Enter password for address " + keyManager.accountName(a) + " (" + a.abridged() + "; hint:" + keyManager.passwordHint(a) + "): ");
};
StructuredLogger::get().initialize(structuredLogging, structuredLoggingFormat, structuredLoggingURL);
@ -764,7 +764,8 @@ int main(int argc, char** argv)
case ImportResult::Success: good++; break;
case ImportResult::AlreadyKnown: alreadyHave++; break;
case ImportResult::UnknownParent: unknownParent++; break;
case ImportResult::FutureTime: futureTime++; break;
case ImportResult::FutureTimeUnknown: unknownParent++; futureTime++; break;
case ImportResult::FutureTimeKnown: futureTime++; break;
default: bad++; break;
}
}
@ -835,12 +836,13 @@ int main(int argc, char** argv)
cout << "Networking disabled. To start, use netstart or pass -b or a remote host." << endl;
#if ETH_JSONRPC || !ETH_TRUE
shared_ptr<WebThreeStubServer> jsonrpcServer;
shared_ptr<dev::WebThreeStubServer> jsonrpcServer;
unique_ptr<jsonrpc::AbstractServerConnector> jsonrpcConnector;
if (jsonrpc > -1)
{
jsonrpcConnector = unique_ptr<jsonrpc::AbstractServerConnector>(new jsonrpc::HttpServer(jsonrpc, "", "", SensibleHttpThreads));
jsonrpcServer = shared_ptr<WebThreeStubServer>(new WebThreeStubServer(*jsonrpcConnector.get(), web3, make_shared<SimpleAccountHolder>([&](){ return web3.ethereum(); }, getAccountPassword, keyManager), vector<KeyPair>(), keyManager, *gasPricer));
jsonrpcServer = shared_ptr<dev::WebThreeStubServer>(new dev::WebThreeStubServer(*jsonrpcConnector.get(), web3, make_shared<SimpleAccountHolder>([&](){ return web3.ethereum(); }, getAccountPassword, keyManager), vector<KeyPair>(), keyManager, *gasPricer));
jsonrpcServer->setMiningBenefactorChanger([&](Address const& a) { beneficiary = a; });
jsonrpcServer->StartListening();
if (jsonAdmin.empty())
jsonAdmin = jsonrpcServer->newSession(SessionPermissions{{Priviledge::Admin}});
@ -995,7 +997,8 @@ int main(int argc, char** argv)
if (jsonrpc < 0)
jsonrpc = SensibleHttpPort;
jsonrpcConnector = unique_ptr<jsonrpc::AbstractServerConnector>(new jsonrpc::HttpServer(jsonrpc, "", "", SensibleHttpThreads));
jsonrpcServer = shared_ptr<WebThreeStubServer>(new WebThreeStubServer(*jsonrpcConnector.get(), web3, make_shared<SimpleAccountHolder>([&](){ return web3.ethereum(); }, getAccountPassword, keyManager), vector<KeyPair>(), keyManager, *gasPricer));
jsonrpcServer = shared_ptr<dev::WebThreeStubServer>(new dev::WebThreeStubServer(*jsonrpcConnector.get(), web3, make_shared<SimpleAccountHolder>([&](){ return web3.ethereum(); }, getAccountPassword, keyManager), vector<KeyPair>(), keyManager, *gasPricer));
jsonrpcServer->setMiningBenefactorChanger([&](Address const& a) { beneficiary = a; });
jsonrpcServer->StartListening();
if (jsonAdmin.empty())
jsonAdmin = jsonrpcServer->newSession(SessionPermissions{{Priviledge::Admin}});
@ -1136,10 +1139,10 @@ int main(int argc, char** argv)
{
cout << "Accounts:" << endl;
u256 total = 0;
for (auto const& i: keyManager.accountDetails())
for (auto const& address: keyManager.accounts())
{
auto b = c->balanceAt(i.first);
cout << ((i.first == signingKey) ? "SIGNING " : " ") << ((i.first == beneficiary) ? "COINBASE " : " ") << i.second.first << " (" << i.first << "): " << formatBalance(b) << " = " << b << " wei" << endl;
auto b = c->balanceAt(address);
cout << ((address == signingKey) ? "SIGNING " : " ") << ((address == beneficiary) ? "COINBASE " : " ") << keyManager.accountName(address) << " (" << address << "): " << formatBalance(b) << " = " << b << " wei" << endl;
total += b;
}
cout << "Total: " << formatBalance(total) << " = " << total << " wei" << endl;
@ -1742,7 +1745,7 @@ int main(int argc, char** argv)
JSConsole console(web3, make_shared<SimpleAccountHolder>([&](){return web3.ethereum();}, getAccountPassword, keyManager));
while (!g_exit)
{
console.repl();
console.readExpression();
stopMiningAfterXBlocks(c, n, mining);
}
#endif

20
ethkey/KeyAux.h

@ -44,7 +44,7 @@ class BadArgument: public Exception {};
string getAccountPassword(KeyManager& keyManager, Address const& a)
{
return getPassword("Enter password for address " + keyManager.accountDetails()[a].first + " (" + a.abridged() + "; hint:" + keyManager.accountDetails()[a].second + "): ");
return getPassword("Enter password for address " + keyManager.accountName(a) + " (" + a.abridged() + "; hint:" + keyManager.passwordHint(a) + "): ");
}
string createPassword(std::string const& _prompt)
@ -221,26 +221,26 @@ public:
break;
}
case OperationMode::ImportBare:
for (string const& i: m_inputs)
for (string const& input: m_inputs)
{
h128 u;
bytes b;
b = fromHex(i);
b = fromHex(input);
if (b.size() != 32)
{
std::string s = contentsString(i);
std::string s = contentsString(input);
b = fromHex(s);
if (b.size() != 32)
u = store.importKey(i);
u = store.importKey(input);
}
if (!u && b.size() == 32)
u = store.importSecret(b, lockPassword(toAddress(Secret(b)).abridged()));
if (!u)
{
cerr << "Cannot import " << i << " not a file or secret." << endl;
cerr << "Cannot import " << input << " not a file or secret." << endl;
continue;
}
cout << "Successfully imported " << i << " as " << toUUID(u);
cout << "Successfully imported " << input << " as " << toUUID(u);
}
break;
case OperationMode::InspectBare:
@ -359,20 +359,18 @@ public:
nonIcap.push_back(u);
else
{
std::pair<std::string, std::string> info = wallet.accountDetails()[a];
cout << toUUID(u) << " " << a.abridged();
cout << " " << ICAP(a).encoded();
cout << " " << info.first << endl;
cout << " " << wallet.accountName(a) << endl;
}
else
bare.push_back(u);
for (auto const& u: nonIcap)
if (Address a = wallet.address(u))
{
std::pair<std::string, std::string> info = wallet.accountDetails()[a];
cout << toUUID(u) << " " << a.abridged();
cout << " (Not ICAP) ";
cout << " " << info.first << endl;
cout << " " << wallet.accountName(a) << endl;
}
for (auto const& u: bare)
cout << toUUID(u) << " (Bare)" << endl;

11
ethminer/MinerAux.h

@ -134,8 +134,6 @@ public:
m_clAllowCPU = true;
else if (arg == "--cl-extragpu-mem" && i + 1 < argc)
m_extraGPUMemory = 1000000 * stol(argv[++i]);
else if (arg == "--force-single-chunk")
m_forceSingleChunk = true;
else if (arg == "--phone-home" && i + 1 < argc)
{
string m = argv[++i];
@ -273,7 +271,6 @@ public:
m_openclDevice,
m_clAllowCPU,
m_extraGPUMemory,
m_forceSingleChunk,
m_currentBlock
))
{
@ -318,10 +315,9 @@ public:
<< " --opencl-device <n> When mining using -G/--opencl use OpenCL device n (default: 0)." << endl
<< " -t, --mining-threads <n> Limit number of CPU/GPU miners to n (default: use everything available on selected platform)" << endl
<< " --allow-opencl-cpu Allows CPU to be considered as an OpenCL device if the OpenCL platform supports it." << endl
<< " --list-devices List the detected OpenCL devices and exit." <<endl
<< " --current-block Let the miner know the current block number at configuration time. Will help determine DAG size and required GPU memory." <<endl
<< " --cl-extragpu-mem Set the memory (in MB) you believe your GPU requires for stuff other than mining. Windows rendering e.t.c.." <<endl
<< " --force-single-chunk Force DAG uploading in a single chunk against OpenCL's judgement. Use at your own risk." <<endl
<< " --list-devices List the detected OpenCL devices and exit." << endl
<< " --current-block Let the miner know the current block number at configuration time. Will help determine DAG size and required GPU memory." << endl
<< " --cl-extragpu-mem Set the memory (in MB) you believe your GPU requires for stuff other than mining. Windows rendering e.t.c.." << endl
;
}
@ -510,7 +506,6 @@ private:
unsigned m_miningThreads = UINT_MAX;
bool m_shouldListDevices = false;
bool m_clAllowCPU = false;
bool m_forceSingleChunk = false;
boost::optional<uint64_t> m_currentBlock;
// default value is 350MB of GPU memory for other stuff (windows system rendering, e.t.c.)
unsigned m_extraGPUMemory = 350000000;

2
ethvm/CMakeLists.txt

@ -4,7 +4,7 @@ set(CMAKE_AUTOMOC OFF)
aux_source_directory(. SRC_LIST)
include_directories(BEFORE ..)
include_directories(${LEVELDB_INCLUDE_DIRS})
include_directories(${DB_INCLUDE_DIRS})
set(EXECUTABLE ethvm)

2
exp/CMakeLists.txt

@ -5,7 +5,7 @@ aux_source_directory(. SRC_LIST)
include_directories(BEFORE ${JSONCPP_INCLUDE_DIRS})
include_directories(BEFORE ..)
include_directories(${LEVELDB_INCLUDE_DIRS})
include_directories(${DB_INCLUDE_DIRS})
set(EXECUTABLE exp)

63
getcoverage.sh

@ -0,0 +1,63 @@
#!/bin/bash
CPP_ETHEREUM_PATH=$(pwd)
BUILD_DIR=$CPP_ETHEREUM_PATH/build
TEST_MODE=""
for i in "$@"
do
case $i in
-builddir)
shift
((i++))
BUILD_DIR=${!i}
shift
;;
--all)
TEST_MODE="--all"
shift
;;
esac
done
which $BUILD_DIR/test/testeth >/dev/null 2>&1
if [ $? != 0 ]
then
echo "You need to compile and build ethereum with cmake -DPROFILING option to the build dir!"
exit;
fi
OUTPUT_DIR=$BUILD_DIR/test/coverage
if which lcov >/dev/null; then
if which genhtml >/dev/null; then
echo Cleaning previous report...
if [ -d "$OUTPUT_DIR" ]; then
rm -r $OUTPUT_DIR
fi
mkdir $OUTPUT_DIR
lcov --directory $BUILD_DIR --zerocounters
lcov --capture --initial --directory $BUILD_DIR --output-file $OUTPUT_DIR/coverage_base.info
echo Running testeth...
$CPP_ETHEREUM_PATH/build/test/testeth $TEST_MODE
$CPP_ETHEREUM_PATH/build/test/testeth -t StateTests --jit $TEST_MODE
$CPP_ETHEREUM_PATH/build/test/testeth -t VMTests --jit $TEST_MODE
echo Prepearing coverage info...
lcov --capture --directory $BUILD_DIR --output-file $OUTPUT_DIR/coverage_test.info
lcov --add-tracefile $OUTPUT_DIR/coverage_base.info --add-tracefile $OUTPUT_DIR/coverage_test.info --output-file $OUTPUT_DIR/coverage_all.info
lcov --extract $OUTPUT_DIR/coverage_all.info *cpp-ethereum/* --output-file $OUTPUT_DIR/coverage_export.info
genhtml $OUTPUT_DIR/coverage_export.info --output-directory $OUTPUT_DIR/testeth
else
echo genhtml not found
exit;
fi
else
echo lcov not found
exit;
fi
echo "Coverage info should be located at: $OUTPUT_DIR/testeth"
echo "Opening index..."
xdg-open $OUTPUT_DIR/testeth/index.html &

2
libdevcore/CMakeLists.txt

@ -15,6 +15,7 @@ aux_source_directory(. SRC_LIST)
include_directories(BEFORE ${JSONCPP_INCLUDE_DIRS})
include_directories(BEFORE ..)
include_directories(${Boost_INCLUDE_DIRS})
include_directories(${DB_INCLUDE_DIRS})
set(EXECUTABLE devcore)
@ -26,6 +27,7 @@ target_link_libraries(${EXECUTABLE} ${Boost_THREAD_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${Boost_SYSTEM_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${Boost_FILESYSTEM_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${JSONCPP_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${DB_LIBRARIES})
# transitive dependencies for windows executables
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")

2
libdevcore/Common.cpp

@ -28,7 +28,7 @@ using namespace dev;
namespace dev
{
char const* Version = "0.9.26";
char const* Version = "0.9.27";
const u256 UndefinedU256 = ~(u256)0;

21
libdevcore/Common.h

@ -113,25 +113,27 @@ static const u256 Invalid256 = ~(u256)0;
static const bytes NullBytes;
static const std::map<u256, u256> EmptyMapU256U256;
/// Interprets @a _u as a two's complement signed number and returns the resulting s256.
inline s256 u2s(u256 _u)
{
static const bigint c_end = (bigint)1 << 256;
static const u256 c_send = (u256)1 << 255;
if (_u < c_send)
return (s256)_u;
else
return (s256)-(c_end - _u);
static const bigint c_end = bigint(1) << 256;
if (boost::multiprecision::bit_test(_u, 255))
return s256(-(c_end - _u));
else
return s256(_u);
}
/// @returns the two's complement signed representation of the signed number _u.
inline u256 s2u(s256 _u)
{
static const bigint c_end = (bigint)1 << 256;
static const bigint c_end = bigint(1) << 256;
if (_u >= 0)
return (u256)_u;
return u256(_u);
else
return (u256)(c_end + _u);
return u256(c_end + _u);
}
/// @returns the smallest n >= 0 such that (1 << n) >= _x
inline unsigned int toLog2(u256 _x)
{
unsigned ret;
@ -139,6 +141,7 @@ inline unsigned int toLog2(u256 _x)
return ret;
}
/// @returns the absolute distance between _a and _b.
template <class N>
inline N diff(N const& _a, N const& _b)
{

4
libdevcore/CommonData.cpp

@ -93,7 +93,7 @@ bytes dev::fromHex(std::string const& _s, WhenError _throw)
if (h != -1)
ret.push_back(h);
else if (_throw == WhenError::Throw)
throw BadHexCharacter();
BOOST_THROW_EXCEPTION(BadHexCharacter());
else
return bytes();
}
@ -104,7 +104,7 @@ bytes dev::fromHex(std::string const& _s, WhenError _throw)
if (h != -1 && l != -1)
ret.push_back((byte)(h * 16 + l));
else if (_throw == WhenError::Throw)
throw BadHexCharacter();
BOOST_THROW_EXCEPTION(BadHexCharacter());
else
return bytes();
}

14
libdevcore/CommonData.h

@ -25,6 +25,7 @@
#include <vector>
#include <algorithm>
#include <unordered_set>
#include <type_traits>
#include <cstring>
#include <string>
@ -68,11 +69,6 @@ int fromHex(char _i, WhenError _throw);
/// If _throw = ThrowType::DontThrow, it replaces bad hex characters with 0's, otherwise it will throw an exception.
bytes fromHex(std::string const& _s, WhenError _throw = WhenError::DontThrow);
#if 0
std::string toBase58(bytesConstRef _data);
bytes fromBase58(std::string const& _s);
#endif
/// Converts byte array to a string containing the same (binary) data. Unless
/// the byte array happens to contain ASCII data, this won't be printable.
inline std::string asString(bytes const& _b)
@ -258,7 +254,7 @@ template <class T, class U> std::set<T>& operator+=(std::set<T>& _a, U const& _b
return _a;
}
/// Insert the contents of a container into an unordered_st
/// Insert the contents of a container into an unordered_set
template <class T, class U> std::unordered_set<T>& operator+=(std::unordered_set<T>& _a, U const& _b)
{
for (auto const& i: _b)
@ -280,6 +276,12 @@ template <class T, class U> std::set<T> operator+(std::set<T> _a, U const& _b)
return _a += _b;
}
/// Insert the contents of a container into an unordered_set
template <class T, class U> std::unordered_set<T> operator+(std::unordered_set<T> _a, U const& _b)
{
return _a += _b;
}
/// Concatenate the contents of a container onto a vector
template <class T, class U> std::vector<T> operator+(std::vector<T> _a, U const& _b)
{

79
libdevcore/CommonIO.cpp

@ -23,13 +23,14 @@
#include <iostream>
#include <cstdlib>
#include <fstream>
#include "Exceptions.h"
#include <stdio.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <termios.h>
#endif
#include <boost/filesystem.hpp>
#include "Exceptions.h"
using namespace std;
using namespace dev;
@ -64,64 +65,54 @@ string dev::memDump(bytes const& _bytes, unsigned _width, bool _html)
return ret.str();
}
// Don't forget to delete[] later.
bytesRef dev::contentsNew(std::string const& _file, bytesRef _dest)
template <typename _T>
inline _T contentsGeneric(std::string const& _file)
{
_T ret;
size_t const c_elementSize = sizeof(typename _T::value_type);
std::ifstream is(_file, std::ifstream::binary);
if (!is)
return bytesRef();
return ret;
// get length of file:
is.seekg (0, is.end);
is.seekg(0, is.end);
streamoff length = is.tellg();
if (length == 0) // return early, MSVC does not like reading 0 bytes
return bytesRef();
if (!_dest.empty() && _dest.size() != (unsigned)length)
return bytesRef();
is.seekg (0, is.beg);
bytesRef ret = _dest.empty() ? bytesRef(new byte[length], length) : _dest;
is.read((char*)ret.data(), length);
is.close();
if (length == 0)
return ret; // do not read empty file (MSVC does not like it)
is.seekg(0, is.beg);
ret.resize((length + c_elementSize - 1) / c_elementSize);
is.read(const_cast<char*>(reinterpret_cast<char const*>(ret.data())), length);
return ret;
}
bytes dev::contents(std::string const& _file)
bytes dev::contents(string const& _file)
{
std::ifstream is(_file, std::ifstream::binary);
if (!is)
return bytes();
// get length of file:
is.seekg (0, is.end);
streamoff length = is.tellg();
if (length == 0) // return early, MSVC does not like reading 0 bytes
return bytes();
is.seekg (0, is.beg);
bytes ret(length);
is.read((char*)ret.data(), length);
is.close();
return ret;
return contentsGeneric<bytes>(_file);
}
string dev::contentsString(std::string const& _file)
string dev::contentsString(string const& _file)
{
std::ifstream is(_file, std::ifstream::binary);
if (!is)
return string();
// get length of file:
is.seekg (0, is.end);
streamoff length = is.tellg();
if (length == 0) // return early, MSVC does not like reading 0 bytes
return string();
is.seekg (0, is.beg);
string ret;
ret.resize(length);
is.read((char*)ret.data(), length);
is.close();
return ret;
return contentsGeneric<string>(_file);
}
void dev::writeFile(std::string const& _file, bytesConstRef _data)
void dev::writeFile(std::string const& _file, bytesConstRef _data, bool _writeDeleteRename)
{
ofstream(_file, ios::trunc|ios::binary).write((char const*)_data.data(), _data.size());
if (_writeDeleteRename)
{
namespace fs = boost::filesystem;
fs::path tempPath = fs::unique_path(_file + "-%%%%%%");
writeFile(tempPath.string(), _data, false);
// will delete _file if it exists
fs::rename(tempPath, _file);
}
else
{
ofstream s(_file, ios::trunc | ios::binary);
s.write(reinterpret_cast<char const*>(_data.data()), _data.size());
if (!s)
BOOST_THROW_EXCEPTION(FileError());
}
}
std::string dev::getPassword(std::string const& _prompt)

15
libdevcore/CommonIO.h

@ -42,20 +42,27 @@
namespace dev
{
/// Requests the user to enter a password on the console.
std::string getPassword(std::string const& _prompt);
/// Retrieve and returns the contents of the given file. If the file doesn't exist or isn't readable, returns an empty bytes.
/// Retrieve and returns the contents of the given file.
/// If the file doesn't exist or isn't readable, returns an empty container / bytes.
bytes contents(std::string const& _file);
/// Retrieve and returns the contents of the given file as a std::string.
/// If the file doesn't exist or isn't readable, returns an empty container / bytes.
std::string contentsString(std::string const& _file);
/// Retrieve and returns the allocated contents of the given file; if @_dest is given, don't allocate, use it directly.
/// If the file doesn't exist or isn't readable, returns bytesRef(). Don't forget to delete [] the returned value's data when finished.
bytesRef contentsNew(std::string const& _file, bytesRef _dest = bytesRef());
/// Write the given binary data into the given file, replacing the file if it pre-exists.
void writeFile(std::string const& _file, bytesConstRef _data);
/// Throws exception on error.
/// @param _writeDeleteRename useful not to lose any data: If set, first writes to another file in
/// the same directory and then moves that file.
void writeFile(std::string const& _file, bytesConstRef _data, bool _writeDeleteRename = false);
/// Write the given binary data into the given file, replacing the file if it pre-exists.
inline void writeFile(std::string const& _file, bytes const& _data) { writeFile(_file, bytesConstRef(&_data)); }
inline void writeFile(std::string const& _file, std::string const& _data) { writeFile(_file, bytesConstRef(_data)); }
inline void writeFile(std::string const& _file, bytes const& _data, bool _writeDeleteRename = false) { writeFile(_file, bytesConstRef(&_data), _writeDeleteRename); }
inline void writeFile(std::string const& _file, std::string const& _data, bool _writeDeleteRename = false) { writeFile(_file, bytesConstRef(_data), _writeDeleteRename); }
/// Nicely renders the given bytes to a string, optionally as HTML.
/// @a _bytes: bytes array to be rendered as string. @a _width of a bytes line.

15
libdevcore/TrieDB.h

@ -21,19 +21,14 @@
#pragma once
#pragma warning(push)
#pragma warning(disable: 4100 4267)
#include <leveldb/db.h>
#pragma warning(pop)
#include <memory>
#include <libdevcore/Common.h>
#include <libdevcore/Log.h>
#include <libdevcore/Exceptions.h>
#include <libdevcore/SHA3.h>
#include "db.h"
#include "Common.h"
#include "Log.h"
#include "Exceptions.h"
#include "SHA3.h"
#include "MemoryDB.h"
#include "TrieCommon.h"
namespace ldb = leveldb;
namespace dev
{

36
libdevcore/db.h

@ -0,0 +1,36 @@
/*
This file is part of cpp-ethereum.
cpp-ethereum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
cpp-ethereum is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
*/
/** @file DB.h
* @author Gav Wood <i@gavwood.com>
* @date 2014
*/
#pragma once
#pragma warning(push)
#pragma warning(disable: 4100 4267)
#if ETH_ROCKSDB || !ETH_TRUE
#include <rocksdb/db.h>
#include <rocksdb/write_batch.h>
namespace ldb = rocksdb;
#else
#include <leveldb/db.h>
#include <leveldb/write_batch.h>
namespace ldb = leveldb;
#endif
#pragma warning(pop)
#define DEV_LDB 1

33
libdevcore/vector_ref.h

@ -9,6 +9,9 @@
namespace dev
{
/**
* A modifiable reference to an existing object or vector in memory.
*/
template <class _T>
class vector_ref
{
@ -17,34 +20,50 @@ public:
using element_type = _T;
using mutable_value_type = typename std::conditional<std::is_const<_T>::value, typename std::remove_const<_T>::type, _T>::type;
static_assert(std::is_pod<value_type>::value, "vector_ref can only be used with PODs due to its low-level treatment of data.");
vector_ref(): m_data(nullptr), m_count(0) {}
/// Creates a new vector_ref to point to @a _count elements starting at @a _data.
vector_ref(_T* _data, size_t _count): m_data(_data), m_count(_count) {}
/// Creates a new vector_ref pointing to the data part of a string (given as pointer).
vector_ref(typename std::conditional<std::is_const<_T>::value, std::string const*, std::string*>::type _data): m_data(reinterpret_cast<_T*>(_data->data())), m_count(_data->size() / sizeof(_T)) {}
/// Creates a new vector_ref pointing to the data part of a vector (given as pointer).
vector_ref(typename std::conditional<std::is_const<_T>::value, std::vector<typename std::remove_const<_T>::type> const*, std::vector<_T>*>::type _data): m_data(_data->data()), m_count(_data->size()) {}
vector_ref(typename std::conditional<std::is_const<_T>::value, std::string const&, std::string&>::type _data): m_data((_T*)_data.data()), m_count(_data.size() / sizeof(_T)) {}
#ifdef STORAGE_LEVELDB_INCLUDE_DB_H_
vector_ref(leveldb::Slice const& _s): m_data(reinterpret_cast<_T*>(_s.data())), m_count(_s.size() / sizeof(_T)) {}
/// Creates a new vector_ref pointing to the data part of a string (given as reference).
vector_ref(typename std::conditional<std::is_const<_T>::value, std::string const&, std::string&>::type _data): m_data(reinterpret_cast<_T*>(_data.data())), m_count(_data.size() / sizeof(_T)) {}
#if DEV_LDB
vector_ref(ldb::Slice const& _s): m_data(reinterpret_cast<_T*>(_s.data())), m_count(_s.size() / sizeof(_T)) {}
#endif
explicit operator bool() const { return m_data && m_count; }
bool contentsEqual(std::vector<mutable_value_type> const& _c) const { return _c.size() == m_count && !memcmp(_c.data(), m_data, m_count); }
bool contentsEqual(std::vector<mutable_value_type> const& _c) const { if (!m_data || m_count == 0) return _c.empty(); else return _c.size() == m_count && !memcmp(_c.data(), m_data, m_count * sizeof(_T)); }
std::vector<mutable_value_type> toVector() const { return std::vector<mutable_value_type>(m_data, m_data + m_count); }
std::vector<unsigned char> toBytes() const { return std::vector<unsigned char>(reinterpret_cast<unsigned char const*>(m_data), reinterpret_cast<unsigned char const*>(m_data) + m_count * sizeof(_T)); }
std::string toString() const { return std::string((char const*)m_data, ((char const*)m_data) + m_count * sizeof(_T)); }
template <class _T2> explicit operator vector_ref<_T2>() const { assert(m_count * sizeof(_T) / sizeof(_T2) * sizeof(_T2) / sizeof(_T) == m_count); return vector_ref<_T2>(reinterpret_cast<_T2*>(m_data), m_count * sizeof(_T) / sizeof(_T2)); }
operator vector_ref<_T const>() const { return vector_ref<_T const>(m_data, m_count); }
_T* data() const { return m_data; }
/// @returns the number of elements referenced (not necessarily number of bytes).
size_t count() const { return m_count; }
/// @returns the number of elements referenced (not necessarily number of bytes).
size_t size() const { return m_count; }
bool empty() const { return !m_count; }
vector_ref<_T> next() const { return vector_ref<_T>(m_data + m_count, m_count); }
/// @returns a new vector_ref pointing at the next chunk of @a size() elements.
vector_ref<_T> next() const { if (!m_data) return *this; else return vector_ref<_T>(m_data + m_count, m_count); }
/// @returns a new vector_ref which is a shifted and shortened view of the original data.
/// If this goes out of bounds in any way, returns an empty vector_ref.
/// If @a _count is ~size_t(0), extends the view to the end of the data.
vector_ref<_T> cropped(size_t _begin, size_t _count) const { if (m_data && _begin + _count <= m_count) return vector_ref<_T>(m_data + _begin, _count == ~size_t(0) ? m_count - _begin : _count); else return vector_ref<_T>(); }
/// @returns a new vector_ref which is a shifted view of the original data (not going beyond it).
vector_ref<_T> cropped(size_t _begin) const { if (m_data && _begin <= m_count) return vector_ref<_T>(m_data + _begin, m_count - _begin); else return vector_ref<_T>(); }
void retarget(_T* _d, size_t _s) { m_data = _d; m_count = _s; }
void retarget(std::vector<_T> const& _t) { m_data = _t.data(); m_count = _t.size(); }
template <class T> bool overlapsWith(vector_ref<T> _t) const { void const* f1 = data(); void const* t1 = data() + size(); void const* f2 = _t.data(); void const* t2 = _t.data() + _t.size(); return f1 < t2 && t1 > f2; }
/// Copies the contents of this vector_ref to the contents of @a _t, up to the max size of @a _t.
void copyTo(vector_ref<typename std::remove_const<_T>::type> _t) const { if (overlapsWith(_t)) memmove(_t.data(), m_data, std::min(_t.size(), m_count) * sizeof(_T)); else memcpy(_t.data(), m_data, std::min(_t.size(), m_count) * sizeof(_T)); }
/// Copies the contents of this vector_ref to the contents of @a _t, and zeros further trailing elements in @a _t.
void populate(vector_ref<typename std::remove_const<_T>::type> _t) const { copyTo(_t); memset(_t.data() + m_count, 0, std::max(_t.size(), m_count) - m_count); }
_T* begin() { return m_data; }
@ -58,8 +77,8 @@ public:
bool operator==(vector_ref<_T> const& _cmp) const { return m_data == _cmp.m_data && m_count == _cmp.m_count; }
bool operator!=(vector_ref<_T> const& _cmp) const { return !operator==(_cmp); }
#ifdef STORAGE_LEVELDB_INCLUDE_DB_H_
operator leveldb::Slice() const { return leveldb::Slice((char const*)m_data, m_count * sizeof(_T)); }
#if DEV_LDB
operator ldb::Slice() const { return ldb::Slice((char const*)m_data, m_count * sizeof(_T)); }
#endif
void reset() { m_data = nullptr; m_count = 0; }

2
libdevcrypto/AES.h

@ -76,6 +76,8 @@ public:
/// Adjust mac interval. Next mac will be xored with value.
void adjustInterval(unsigned _interval) { m_macInterval = _interval; }
unsigned getMacInterval() { return m_macInterval;}
private:
AuthenticatedStream(AuthenticatedStream const&) = delete;
AuthenticatedStream& operator=(AuthenticatedStream const&) = delete;

4
libdevcrypto/CMakeLists.txt

@ -12,7 +12,7 @@ aux_source_directory(. SRC_LIST)
include_directories(BEFORE ..)
include_directories(${Boost_INCLUDE_DIRS})
include_directories(${CRYPTOPP_INCLUDE_DIRS})
include_directories(${LEVELDB_INCLUDE_DIRS})
include_directories(${DB_INCLUDE_DIRS})
set(EXECUTABLE devcrypto)
@ -20,7 +20,7 @@ file(GLOB HEADERS "*.h")
add_library(${EXECUTABLE} ${SRC_LIST} ${HEADERS})
target_link_libraries(${EXECUTABLE} ${Boost_FILESYSTEM_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${LEVELDB_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${DB_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${CRYPTOPP_LIBRARIES})
target_link_libraries(${EXECUTABLE} scrypt)
target_link_libraries(${EXECUTABLE} devcore)

129
libdevcrypto/Common.cpp

@ -31,6 +31,7 @@
#include <libdevcore/FileSystem.h>
#include "AES.h"
#include "CryptoPP.h"
#include "Exceptions.h"
using namespace std;
using namespace dev;
using namespace dev::crypto;
@ -178,15 +179,35 @@ bool dev::verify(Public const& _p, Signature const& _s, h256 const& _hash)
bytes dev::pbkdf2(string const& _pass, bytes const& _salt, unsigned _iterations, unsigned _dkLen)
{
bytes ret(_dkLen);
PKCS5_PBKDF2_HMAC<SHA256> pbkdf;
pbkdf.DeriveKey(ret.data(), ret.size(), 0, (byte*)_pass.data(), _pass.size(), _salt.data(), _salt.size(), _iterations);
if (PKCS5_PBKDF2_HMAC<SHA256>().DeriveKey(
ret.data(),
ret.size(),
0,
reinterpret_cast<byte const*>(_pass.data()),
_pass.size(),
_salt.data(),
_salt.size(),
_iterations
) != _iterations)
BOOST_THROW_EXCEPTION(CryptoException() << errinfo_comment("Key derivation failed."));
return ret;
}
bytes dev::scrypt(std::string const& _pass, bytes const& _salt, uint64_t _n, uint32_t _r, uint32_t _p, unsigned _dkLen)
{
bytes ret(_dkLen);
libscrypt_scrypt((uint8_t const*)_pass.data(), _pass.size(), _salt.data(), _salt.size(), _n, _r, _p, ret.data(), ret.size());
if (libscrypt_scrypt(
reinterpret_cast<uint8_t const*>(_pass.data()),
_pass.size(),
_salt.data(),
_salt.size(),
_n,
_r,
_p,
ret.data(),
ret.size()
) != 0)
BOOST_THROW_EXCEPTION(CryptoException() << errinfo_comment("Key derivation failed."));
return ret;
}
@ -233,42 +254,84 @@ h256 crypto::kdf(Secret const& _priv, h256 const& _hash)
return s;
}
h256 Nonce::get(bool _commit)
mutex Nonce::s_x;
static string s_seedFile;
h256 Nonce::get()
{
// todo: atomic efface bit, periodic save, kdf, rr, rng
// todo: encrypt
static h256 s_seed;
static string s_seedFile(getDataDir() + "/seed");
static mutex s_x;
Guard l(s_x);
if (!s_seed)
Guard l(Nonce::s_x);
return Nonce::singleton().next();
}
void Nonce::reset()
{
Guard l(Nonce::s_x);
Nonce::singleton().resetInternal();
}
void Nonce::setSeedFilePath(string const& _filePath)
{
s_seedFile = _filePath;
}
Nonce::~Nonce()
{
Guard l(Nonce::s_x);
if (m_value)
// this might throw
resetInternal();
}
Nonce& Nonce::singleton()
{
static Nonce s;
return s;
}
void Nonce::initialiseIfNeeded()
{
if (m_value)
return;
bytes b = contents(seedFile());
if (b.size() == 32)
memcpy(m_value.data(), b.data(), 32);
else
{
static Nonce s_nonce;
bytes b = contents(s_seedFile);
if (b.size() == 32)
memcpy(s_seed.data(), b.data(), 32);
else
{
// todo: replace w/entropy from user and system
std::mt19937_64 s_eng(time(0) + chrono::high_resolution_clock::now().time_since_epoch().count());
std::uniform_int_distribution<uint16_t> d(0, 255);
for (unsigned i = 0; i < 32; ++i)
s_seed[i] = (byte)d(s_eng);
}
if (!s_seed)
BOOST_THROW_EXCEPTION(InvalidState());
// prevent seed reuse if process terminates abnormally
writeFile(s_seedFile, bytes());
// todo: replace w/entropy from user and system
std::mt19937_64 s_eng(time(0) + chrono::high_resolution_clock::now().time_since_epoch().count());
std::uniform_int_distribution<uint16_t> d(0, 255);
for (unsigned i = 0; i < 32; ++i)
m_value[i] = byte(d(s_eng));
}
h256 prev(s_seed);
sha3(prev.ref(), s_seed.ref());
if (_commit)
writeFile(s_seedFile, s_seed.asBytes());
return std::move(s_seed);
if (!m_value)
BOOST_THROW_EXCEPTION(InvalidState());
// prevent seed reuse if process terminates abnormally
// this might throw
writeFile(seedFile(), bytes());
}
Nonce::~Nonce()
h256 Nonce::next()
{
initialiseIfNeeded();
m_value = sha3(m_value);
return m_value;
}
void Nonce::resetInternal()
{
// this might throw
next();
writeFile(seedFile(), m_value.asBytes());
m_value = h256();
}
string const& Nonce::seedFile()
{
Nonce::get(true);
if (s_seedFile.empty())
s_seedFile = getDataDir() + "/seed";
return s_seedFile;
}

27
libdevcrypto/Common.h

@ -24,6 +24,7 @@
#pragma once
#include <mutex>
#include <libdevcore/Common.h>
#include <libdevcore/FixedHash.h>
#include <libdevcore/Exceptions.h>
@ -180,14 +181,36 @@ struct InvalidState: public dev::Exception {};
h256 kdf(Secret const& _priv, h256 const& _hash);
/**
* @brief Generator for nonce material
* @brief Generator for nonce material.
*/
struct Nonce
{
static h256 get(bool _commit = false);
/// Returns the next nonce (might be read from a file).
static h256 get();
/// Stores the current nonce in a file and resets Nonce to the uninitialised state.
static void reset();
/// Sets the location of the seed file to a non-default place. Used for testing.
static void setSeedFilePath(std::string const& _filePath);
private:
Nonce() {}
~Nonce();
/// @returns the singleton instance.
static Nonce& singleton();
/// Reads the last seed from the seed file.
void initialiseIfNeeded();
/// @returns the next nonce.
h256 next();
/// Stores the current seed in the seed file.
void resetInternal();
/// @returns the path of the seed file.
static std::string const& seedFile();
/// Mutex for the singleton object.
/// @note Every access to any private function has to be guarded by this mutex.
static std::mutex s_x;
h256 m_value;
};
}

35
libdevcrypto/Exceptions.h

@ -0,0 +1,35 @@
/*
This file is part of cpp-ethereum.
cpp-ethereum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
cpp-ethereum is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
*/
/** @file Exceptions.h
* @author Christian <c@ethdev.com>
* @date 2016
*/
#pragma once
#include <libdevcore/Exceptions.h>
namespace dev
{
namespace crypto
{
/// Rare malfunction of cryptographic functions.
DEV_SIMPLE_EXCEPTION(CryptoException);
}
}

3
libdevcrypto/OverlayDB.cpp

@ -20,8 +20,7 @@
*/
#include <thread>
#include <leveldb/db.h>
#include <leveldb/write_batch.h>
#include <libdevcore/db.h>
#include <libdevcore/Common.h>
#include "OverlayDB.h"
using namespace std;

7
libdevcrypto/OverlayDB.h

@ -21,16 +21,11 @@
#pragma once
#pragma warning(push)
#pragma warning(disable: 4100 4267)
#include <leveldb/db.h>
#pragma warning(pop)
#include <memory>
#include <libdevcore/db.h>
#include <libdevcore/Common.h>
#include <libdevcore/Log.h>
#include <libdevcore/MemoryDB.h>
namespace ldb = leveldb;
namespace dev
{

124
libdevcrypto/SecretStore.cpp

@ -29,6 +29,7 @@
#include <libdevcore/SHA3.h>
#include <libdevcore/FileSystem.h>
#include <test/JsonSpiritHeaders.h>
#include <libdevcrypto/Exceptions.h>
using namespace std;
using namespace dev;
namespace js = json_spirit;
@ -36,7 +37,8 @@ namespace fs = boost::filesystem;
static const int c_keyFileVersion = 3;
static js::mValue upgraded(std::string const& _s)
/// Upgrade the json-format to the current version.
static js::mValue upgraded(string const& _s)
{
js::mValue v;
js::read_string(_s, v);
@ -84,35 +86,34 @@ static js::mValue upgraded(std::string const& _s)
return js::mValue();
}
SecretStore::SecretStore(std::string const& _path): m_path(_path)
SecretStore::SecretStore(string const& _path): m_path(_path)
{
load();
}
SecretStore::~SecretStore()
bytes SecretStore::secret(h128 const& _uuid, function<string()> const& _pass, bool _useCache) const
{
}
bytes SecretStore::secret(h128 const& _uuid, function<std::string()> const& _pass, bool _useCache) const
{
(void)_pass;
auto rit = m_cached.find(_uuid);
if (_useCache && rit != m_cached.end())
return rit->second;
auto it = m_keys.find(_uuid);
if (it == m_keys.end())
return bytes();
bytes key = decrypt(it->second.first, _pass());
if (!key.empty())
m_cached[_uuid] = key;
bytes key;
if (it != m_keys.end())
{
key = decrypt(it->second.encryptedKey, _pass());
if (!key.empty())
m_cached[_uuid] = key;
}
return key;
}
h128 SecretStore::importSecret(bytes const& _s, std::string const& _pass)
h128 SecretStore::importSecret(bytes const& _s, string const& _pass)
{
h128 r = h128::random();
h128 r;
EncryptedKey key{encrypt(_s, _pass), string()};
r = h128::random();
m_cached[r] = _s;
m_keys[r] = make_pair(encrypt(_s, _pass), std::string());
m_keys[r] = move(key);
save();
return r;
}
@ -122,7 +123,7 @@ void SecretStore::kill(h128 const& _uuid)
m_cached.erase(_uuid);
if (m_keys.count(_uuid))
{
boost::filesystem::remove(m_keys[_uuid].second);
fs::remove(m_keys[_uuid].filename);
m_keys.erase(_uuid);
}
}
@ -132,50 +133,50 @@ void SecretStore::clearCache() const
m_cached.clear();
}
void SecretStore::save(std::string const& _keysPath)
void SecretStore::save(string const& _keysPath)
{
fs::path p(_keysPath);
boost::filesystem::create_directories(p);
fs::create_directories(p);
for (auto& k: m_keys)
{
std::string uuid = toUUID(k.first);
std::string filename = (p / uuid).string() + ".json";
string uuid = toUUID(k.first);
string filename = (p / uuid).string() + ".json";
js::mObject v;
js::mValue crypto;
js::read_string(k.second.first, crypto);
js::read_string(k.second.encryptedKey, crypto);
v["crypto"] = crypto;
v["id"] = uuid;
v["version"] = c_keyFileVersion;
writeFile(filename, js::write_string(js::mValue(v), true));
if (!k.second.second.empty() && k.second.second != filename)
boost::filesystem::remove(k.second.second);
k.second.second = filename;
swap(k.second.filename, filename);
if (!filename.empty() && !fs::equivalent(filename, k.second.filename))
fs::remove(filename);
}
}
void SecretStore::load(std::string const& _keysPath)
void SecretStore::load(string const& _keysPath)
{
fs::path p(_keysPath);
boost::filesystem::create_directories(p);
fs::create_directories(p);
for (fs::directory_iterator it(p); it != fs::directory_iterator(); ++it)
if (is_regular_file(it->path()))
if (fs::is_regular_file(it->path()))
readKey(it->path().string(), true);
}
h128 SecretStore::readKey(std::string const& _file, bool _deleteFile)
h128 SecretStore::readKey(string const& _file, bool _takeFileOwnership)
{
cnote << "Reading" << _file;
return readKeyContent(contentsString(_file), _deleteFile ? _file : string());
return readKeyContent(contentsString(_file), _takeFileOwnership ? _file : string());
}
h128 SecretStore::readKeyContent(std::string const& _content, std::string const& _file)
h128 SecretStore::readKeyContent(string const& _content, string const& _file)
{
js::mValue u = upgraded(_content);
if (u.type() == js::obj_type)
{
js::mObject& o = u.get_obj();
auto uuid = fromUUID(o["id"].get_str());
m_keys[uuid] = make_pair(js::write_string(o["crypto"], false), _file);
m_keys[uuid] = EncryptedKey{js::write_string(o["crypto"], false), _file};
return uuid;
}
else
@ -183,62 +184,63 @@ h128 SecretStore::readKeyContent(std::string const& _content, std::string const&
return h128();
}
bool SecretStore::recode(h128 const& _uuid, string const& _newPass, std::function<std::string()> const& _pass, KDF _kdf)
bool SecretStore::recode(h128 const& _uuid, string const& _newPass, function<string()> const& _pass, KDF _kdf)
{
// cdebug << "recode:" << toUUID(_uuid);
bytes s = secret(_uuid, _pass, true);
if (s.empty())
return false;
m_keys[_uuid].first = encrypt(s, _newPass, _kdf);
m_cached.erase(_uuid);
m_keys[_uuid].encryptedKey = encrypt(s, _newPass, _kdf);
save();
return true;
}
std::string SecretStore::encrypt(bytes const& _v, std::string const& _pass, KDF _kdf)
static bytes deriveNewKey(string const& _pass, KDF _kdf, js::mObject& o_ret)
{
js::mObject ret;
// KDF info
unsigned dklen = 32;
unsigned iterations = 1 << 18;
bytes salt = h256::random().asBytes();
bytes derivedKey;
if (_kdf == KDF::Scrypt)
{
unsigned iterations = 262144;
unsigned p = 1;
unsigned r = 8;
ret["kdf"] = "scrypt";
o_ret["kdf"] = "scrypt";
{
js::mObject params;
params["n"] = (int64_t)iterations;
params["r"] = (int)r;
params["p"] = (int)p;
params["dklen"] = (int)dklen;
params["n"] = int64_t(iterations);
params["r"] = int(r);
params["p"] = int(p);
params["dklen"] = int(dklen);
params["salt"] = toHex(salt);
ret["kdfparams"] = params;
o_ret["kdfparams"] = params;
}
derivedKey = scrypt(_pass, salt, iterations, r, p, dklen);
return scrypt(_pass, salt, iterations, r, p, dklen);
}
else
{
unsigned iterations = 262144;
ret["kdf"] = "pbkdf2";
o_ret["kdf"] = "pbkdf2";
{
js::mObject params;
params["prf"] = "hmac-sha256";
params["c"] = (int)iterations;
params["c"] = int(iterations);
params["salt"] = toHex(salt);
params["dklen"] = (int)dklen;
ret["kdfparams"] = params;
params["dklen"] = int(dklen);
o_ret["kdfparams"] = params;
}
derivedKey = pbkdf2(_pass, salt, iterations, dklen);
return pbkdf2(_pass, salt, iterations, dklen);
}
// cdebug << "derivedKey" << toHex(derivedKey);
}
string SecretStore::encrypt(bytes const& _v, string const& _pass, KDF _kdf)
{
js::mObject ret;
bytes derivedKey = deriveNewKey(_pass, _kdf, ret);
if (derivedKey.empty())
BOOST_THROW_EXCEPTION(crypto::CryptoException() << errinfo_comment("Key derivation failed."));
// cipher info
ret["cipher"] = "aes-128-ctr";
h128 key(derivedKey, h128::AlignLeft);
// cdebug << "cipherKey" << key.hex();
h128 iv = h128::random();
{
js::mObject params;
@ -248,18 +250,18 @@ std::string SecretStore::encrypt(bytes const& _v, std::string const& _pass, KDF
// cipher text
bytes cipherText = encryptSymNoAuth(key, iv, &_v);
if (cipherText.empty())
BOOST_THROW_EXCEPTION(crypto::CryptoException() << errinfo_comment("Key encryption failed."));
ret["ciphertext"] = toHex(cipherText);
// and mac.
h256 mac = sha3(ref(derivedKey).cropped(16, 16).toBytes() + cipherText);
// cdebug << "macBody" << toHex(ref(derivedKey).cropped(16, 16).toBytes() + cipherText);
// cdebug << "mac" << mac.hex();
ret["mac"] = toHex(mac.ref());
return js::write_string((js::mValue)ret, true);
return js::write_string(js::mValue(ret), true);
}
bytes SecretStore::decrypt(std::string const& _v, std::string const& _pass)
bytes SecretStore::decrypt(string const& _v, string const& _pass)
{
js::mObject o;
{

50
libdevcrypto/SecretStore.h

@ -35,41 +35,81 @@ enum class KDF {
Scrypt,
};
/**
* Manages encrypted keys stored in a certain directory on disk. The keys are read into memory
* and changes to the keys are automatically synced to the directory.
* Each file stores exactly one key in a specific JSON format whose file name is derived from the
* UUID of the key.
* @note that most of the functions here affect the filesystem and throw exceptions on failure,
* and they also throw exceptions upon rare malfunction in the cryptographic functions.
*/
class SecretStore
{
public:
/// Construct a new SecretStore and read all keys in the given directory.
SecretStore(std::string const& _path = defaultPath());
~SecretStore();
/// @returns the secret key stored by the given @a _uuid.
/// @param _pass function that returns the password for the key.
/// @param _useCache if true, allow previously decrypted keys to be returned directly.
bytes secret(h128 const& _uuid, std::function<std::string()> const& _pass, bool _useCache = true) const;
/// Imports the (encrypted) key stored in the file @a _file and copies it to the managed directory.
h128 importKey(std::string const& _file) { auto ret = readKey(_file, false); if (ret) save(); return ret; }
/// Imports the (encrypted) key contained in the json formatted @a _content and stores it in
/// the managed directory.
h128 importKeyContent(std::string const& _content) { auto ret = readKeyContent(_content, std::string()); if (ret) save(); return ret; }
/// Imports the decrypted key given by @a _s and stores it, encrypted with
/// (a key derived from) the password @a _pass.
h128 importSecret(bytes const& _s, std::string const& _pass);
/// Decrypts and re-encrypts the key identified by @a _uuid.
bool recode(h128 const& _uuid, std::string const& _newPass, std::function<std::string()> const& _pass, KDF _kdf = KDF::Scrypt);
/// Removes the key specified by @a _uuid from both memory and disk.
void kill(h128 const& _uuid);
/// Returns the uuids of all stored keys.
std::vector<h128> keys() const { return keysOf(m_keys); }
// Clear any cached keys.
/// Clears all cached decrypted keys. The passwords have to be supplied in order to retrieve
/// secrets again after calling this function.
void clearCache() const;
// Doesn't save().
h128 readKey(std::string const& _file, bool _deleteFile);
/// Import the key from the file @a _file, but do not copy it to the managed directory yet.
/// @param _takeFileOwnership if true, deletes the file if it is not the canonical file for the
/// key (derived from its uuid).
h128 readKey(std::string const& _file, bool _takeFileOwnership);
/// Import the key contained in the json-encoded @a _content, but do not store it in the
/// managed directory.
/// @param _file if given, assume this file contains @a _content and delete it later, if it is
/// not the canonical file for the key (derived from the uuid).
h128 readKeyContent(std::string const& _content, std::string const& _file = std::string());
/// Store all keys in the directory @a _keysPath.
void save(std::string const& _keysPath);
/// Store all keys in the managed directory.
void save() { save(m_path); }
/// @returns the default path for the managed directory.
static std::string defaultPath() { return getDataDir("web3") + "/keys"; }
private:
struct EncryptedKey
{
std::string encryptedKey;
std::string filename;
};
/// Loads all keys in the given directory.
void load(std::string const& _keysPath);
void load() { load(m_path); }
/// Encrypts @a _v with a key derived from @a _pass or the empty string on error.
static std::string encrypt(bytes const& _v, std::string const& _pass, KDF _kdf = KDF::Scrypt);
/// Decrypts @a _v with a key derived from @a _pass or the empty byte array on error.
static bytes decrypt(std::string const& _v, std::string const& _pass);
/// Stores decrypted keys by uuid.
mutable std::unordered_map<h128, bytes> m_cached;
std::unordered_map<h128, std::pair<std::string, std::string>> m_keys;
/// Stores encrypted keys together with the file they were loaded from by uuid.
std::unordered_map<h128, EncryptedKey> m_keys;
std::string m_path;
};

21
libethash-cl/CMakeLists.txt

@ -1,12 +1,23 @@
set(EXECUTABLE ethash-cl)
include(bin2h.cmake)
bin2h(SOURCE_FILE ethash_cl_miner_kernel.cl
VARIABLE_NAME ethash_cl_miner_kernel
HEADER_FILE ${CMAKE_CURRENT_BINARY_DIR}/ethash_cl_miner_kernel.h)
# A custom command and target to turn the OpenCL kernel into a byte array header
# The normal build depends on it properly and if the kernel file is changed, then
# a rebuild of libethash-cl should be triggered
add_custom_command(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/ethash_cl_miner_kernel.h
COMMAND ${CMAKE_COMMAND} ARGS
-DBIN2H_SOURCE_FILE="${CMAKE_CURRENT_SOURCE_DIR}/ethash_cl_miner_kernel.cl"
-DBIN2H_VARIABLE_NAME=ethash_cl_miner_kernel
-DBIN2H_HEADER_FILE="${CMAKE_CURRENT_BINARY_DIR}/ethash_cl_miner_kernel.h"
-P "${CMAKE_CURRENT_SOURCE_DIR}/bin2h.cmake"
COMMENT "Generating OpenCL Kernel Byte Array"
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/ethash_cl_miner_kernel.cl
)
add_custom_target(clbin2h DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/ethash_cl_miner_kernel.h ${CMAKE_CURRENT_SOURCE_DIR}/ethash_cl_miner_kernel.cl)
aux_source_directory(. SRC_LIST)
file(GLOB HEADERS "*.h")
file(GLOB OUR_HEADERS "*.h")
set(HEADERS ${OUR_HEADERS} ${CMAKE_CURRENT_BINARY_DIR}/ethash_cl_miner_kernel.h)
include_directories(${CMAKE_CURRENT_BINARY_DIR})
include_directories(${OpenCL_INCLUDE_DIRS})

100
libethash-cl/bin2h.cmake

@ -6,31 +6,31 @@ include(CMakeParseArguments)
# VARIABLE - The name of the CMake variable holding the string.
# AT_COLUMN - The column position at which string will be wrapped.
function(WRAP_STRING)
set(oneValueArgs VARIABLE AT_COLUMN)
cmake_parse_arguments(WRAP_STRING "${options}" "${oneValueArgs}" "" ${ARGN})
set(oneValueArgs VARIABLE AT_COLUMN)
cmake_parse_arguments(WRAP_STRING "${options}" "${oneValueArgs}" "" ${ARGN})
string(LENGTH ${${WRAP_STRING_VARIABLE}} stringLength)
math(EXPR offset "0")
string(LENGTH ${${WRAP_STRING_VARIABLE}} stringLength)
math(EXPR offset "0")
while(stringLength GREATER 0)
while(stringLength GREATER 0)
if(stringLength GREATER ${WRAP_STRING_AT_COLUMN})
math(EXPR length "${WRAP_STRING_AT_COLUMN}")
else()
math(EXPR length "${stringLength}")
endif()
if(stringLength GREATER ${WRAP_STRING_AT_COLUMN})
math(EXPR length "${WRAP_STRING_AT_COLUMN}")
else()
math(EXPR length "${stringLength}")
endif()
string(SUBSTRING ${${WRAP_STRING_VARIABLE}} ${offset} ${length} line)
set(lines "${lines}\n${line}")
string(SUBSTRING ${${WRAP_STRING_VARIABLE}} ${offset} ${length} line)
set(lines "${lines}\n${line}")
math(EXPR stringLength "${stringLength} - ${length}")
math(EXPR offset "${offset} + ${length}")
endwhile()
math(EXPR stringLength "${stringLength} - ${length}")
math(EXPR offset "${offset} + ${length}")
endwhile()
set(${WRAP_STRING_VARIABLE} "${lines}" PARENT_SCOPE)
set(${WRAP_STRING_VARIABLE} "${lines}" PARENT_SCOPE)
endfunction()
# Function to embed contents of a file as byte array in C/C++ header file(.h). The header file
# Script to embed contents of a file as byte array in C/C++ header file(.h). The header file
# will contain a byte array and integer variable holding the size of the array.
# Parameters
# SOURCE_FILE - The path of source file whose contents will be embedded in the header file.
@ -42,45 +42,41 @@ endfunction()
# useful if the source file is a text file and we want to use the file contents
# as string. But the size variable holds size of the byte array without this
# null byte.
# Usage:
# bin2h(SOURCE_FILE "Logo.png" HEADER_FILE "Logo.h" VARIABLE_NAME "LOGO_PNG")
function(BIN2H)
set(options APPEND NULL_TERMINATE)
set(oneValueArgs SOURCE_FILE VARIABLE_NAME HEADER_FILE)
cmake_parse_arguments(BIN2H "${options}" "${oneValueArgs}" "" ${ARGN})
set(options APPEND NULL_TERMINATE)
set(oneValueArgs SOURCE_FILE VARIABLE_NAME HEADER_FILE)
# cmake_parse_arguments(BIN2H "${options}" "${oneValueArgs}" "" ${ARGN})
# reads source file contents as hex string
file(READ ${BIN2H_SOURCE_FILE} hexString HEX)
string(LENGTH ${hexString} hexStringLength)
# reads source file contents as hex string
file(READ ${BIN2H_SOURCE_FILE} hexString HEX)
string(LENGTH ${hexString} hexStringLength)
# appends null byte if asked
if(BIN2H_NULL_TERMINATE)
set(hexString "${hexString}00")
endif()
# appends null byte if asked
if(BIN2H_NULL_TERMINATE)
set(hexString "${hexString}00")
endif()
# wraps the hex string into multiple lines at column 32(i.e. 16 bytes per line)
wrap_string(VARIABLE hexString AT_COLUMN 32)
math(EXPR arraySize "${hexStringLength} / 2")
# wraps the hex string into multiple lines at column 32(i.e. 16 bytes per line)
wrap_string(VARIABLE hexString AT_COLUMN 32)
math(EXPR arraySize "${hexStringLength} / 2")
# adds '0x' prefix and comma suffix before and after every byte respectively
string(REGEX REPLACE "([0-9a-f][0-9a-f])" "0x\\1, " arrayValues ${hexString})
# removes trailing comma
string(REGEX REPLACE ", $" "" arrayValues ${arrayValues})
# adds '0x' prefix and comma suffix before and after every byte respectively
string(REGEX REPLACE "([0-9a-f][0-9a-f])" "0x\\1, " arrayValues ${hexString})
# removes trailing comma
string(REGEX REPLACE ", $" "" arrayValues ${arrayValues})
# converts the variable name into proper C identifier
IF (${CMAKE_VERSION} GREATER 2.8.10) # fix for legacy cmake
string(MAKE_C_IDENTIFIER "${BIN2H_VARIABLE_NAME}" BIN2H_VARIABLE_NAME)
ENDIF()
string(TOUPPER "${BIN2H_VARIABLE_NAME}" BIN2H_VARIABLE_NAME)
# converts the variable name into proper C identifier
IF (${CMAKE_VERSION} GREATER 2.8.10) # fix for legacy cmake
string(MAKE_C_IDENTIFIER "${BIN2H_VARIABLE_NAME}" BIN2H_VARIABLE_NAME)
ENDIF()
string(TOUPPER "${BIN2H_VARIABLE_NAME}" BIN2H_VARIABLE_NAME)
# declares byte array and the length variables
set(arrayDefinition "const unsigned char ${BIN2H_VARIABLE_NAME}[] = { ${arrayValues} };")
set(arraySizeDefinition "const size_t ${BIN2H_VARIABLE_NAME}_SIZE = ${arraySize};")
# declares byte array and the length variables
set(arrayDefinition "const unsigned char ${BIN2H_VARIABLE_NAME}[] = { ${arrayValues} };")
set(arraySizeDefinition "const size_t ${BIN2H_VARIABLE_NAME}_SIZE = ${arraySize};")
set(declarations "${arrayDefinition}\n\n${arraySizeDefinition}\n\n")
if(BIN2H_APPEND)
file(APPEND ${BIN2H_HEADER_FILE} "${declarations}")
else()
file(WRITE ${BIN2H_HEADER_FILE} "${declarations}")
endif()
endfunction()
set(declarations "${arrayDefinition}\n\n${arraySizeDefinition}\n\n")
if(BIN2H_APPEND)
file(APPEND ${BIN2H_HEADER_FILE} "${declarations}")
else()
file(WRITE ${BIN2H_HEADER_FILE} "${declarations}")
endif()

71
libethash-cl/ethash_cl_miner.cpp

@ -140,12 +140,10 @@ unsigned ethash_cl_miner::getNumDevices(unsigned _platformId)
bool ethash_cl_miner::configureGPU(
bool _allowCPU,
unsigned _extraGPUMemory,
bool _forceSingleChunk,
boost::optional<uint64_t> _currentBlock
)
{
s_allowCPU = _allowCPU;
s_forceSingleChunk = _forceSingleChunk;
s_extraRequiredGPUMem = _extraGPUMemory;
// by default let's only consider the DAG of the first epoch
uint64_t dagSize = _currentBlock ? ethash_get_datasize(*_currentBlock) : 1073739904U;
@ -174,7 +172,6 @@ bool ethash_cl_miner::configureGPU(
}
bool ethash_cl_miner::s_allowCPU = false;
bool ethash_cl_miner::s_forceSingleChunk = false;
unsigned ethash_cl_miner::s_extraRequiredGPUMem;
bool ethash_cl_miner::searchForAllDevices(function<bool(cl::Device const&)> _callback)
@ -288,23 +285,6 @@ bool ethash_cl_miner::init(
string device_version = device.getInfo<CL_DEVICE_VERSION>();
ETHCL_LOG("Using device: " << device.getInfo<CL_DEVICE_NAME>().c_str() << "(" << device_version.c_str() << ")");
// configure chunk number depending on max allocateable memory
cl_ulong result;
device.getInfo(CL_DEVICE_MAX_MEM_ALLOC_SIZE, &result);
if (s_forceSingleChunk || result >= _dagSize)
{
m_dagChunksNum = 1;
ETHCL_LOG(
((result <= _dagSize && s_forceSingleChunk) ? "Forcing single chunk. Good luck!\n" : "") <<
"Using 1 big chunk. Max OpenCL allocateable memory is " << result
);
}
else
{
m_dagChunksNum = 4;
ETHCL_LOG("Using 4 chunks. Max OpenCL allocateable memory is " << result);
}
if (strncmp("OpenCL 1.0", device_version.c_str(), 10) == 0)
{
ETHCL_LOG("OpenCL 1.0 is not supported.");
@ -341,31 +321,32 @@ bool ethash_cl_miner::init(
ETHCL_LOG("Printing program log");
ETHCL_LOG(program.getBuildInfo<CL_PROGRAM_BUILD_LOG>(device).c_str());
}
catch (cl::Error err)
catch (cl::Error const& err)
{
ETHCL_LOG(program.getBuildInfo<CL_PROGRAM_BUILD_LOG>(device).c_str());
return false;
}
if (m_dagChunksNum == 1)
{
ETHCL_LOG("Loading single big chunk kernels");
m_hash_kernel = cl::Kernel(program, "ethash_hash");
m_search_kernel = cl::Kernel(program, "ethash_search");
}
else
{
ETHCL_LOG("Loading chunk kernels");
m_hash_kernel = cl::Kernel(program, "ethash_hash_chunks");
m_search_kernel = cl::Kernel(program, "ethash_search_chunks");
}
// create buffer for dag
if (m_dagChunksNum == 1)
try
{
ETHCL_LOG("Creating one big buffer");
m_dagChunksNum = 1;
m_dagChunks.push_back(cl::Buffer(m_context, CL_MEM_READ_ONLY, _dagSize));
ETHCL_LOG("Created one big buffer for the DAG");
}
else
catch (cl::Error const& err)
{
int errCode = err.err();
if (errCode != CL_INVALID_BUFFER_SIZE || errCode != CL_MEM_OBJECT_ALLOCATION_FAILURE)
ETHCL_LOG("Allocating single buffer failed with: " << err.what() << "(" << errCode << ")");
cl_ulong result;
device.getInfo(CL_DEVICE_MAX_MEM_ALLOC_SIZE, &result);
ETHCL_LOG(
"Failed to allocate 1 big chunk. Max allocateable memory is "
<< result << ". Trying to allocate 4 chunks."
);
// The OpenCL kernel has a hard coded number of 4 chunks at the moment
m_dagChunksNum = 4;
for (unsigned i = 0; i < m_dagChunksNum; i++)
{
// TODO Note: If we ever change to _dagChunksNum other than 4, then the size would need recalculation
@ -376,6 +357,20 @@ bool ethash_cl_miner::init(
(i == 3) ? (_dagSize - 3 * ((_dagSize >> 9) << 7)) : (_dagSize >> 9) << 7
));
}
}
if (m_dagChunksNum == 1)
{
ETHCL_LOG("Loading single big chunk kernels");
m_hash_kernel = cl::Kernel(program, "ethash_hash");
m_search_kernel = cl::Kernel(program, "ethash_search");
}
else
{
ETHCL_LOG("Loading chunk kernels");
m_hash_kernel = cl::Kernel(program, "ethash_hash_chunks");
m_search_kernel = cl::Kernel(program, "ethash_search_chunks");
}
// create buffer for header
ETHCL_LOG("Creating buffer for header.");
@ -410,7 +405,7 @@ bool ethash_cl_miner::init(
m_search_buf[i] = cl::Buffer(m_context, CL_MEM_WRITE_ONLY, (c_max_search_results + 1) * sizeof(uint32_t));
}
}
catch (cl::Error err)
catch (cl::Error const& err)
{
ETHCL_LOG(err.what() << "(" << err.err() << ")");
return false;
@ -504,7 +499,7 @@ void ethash_cl_miner::search(uint8_t const* header, uint64_t target, search_hook
pre_return_event.wait();
#endif
}
catch (cl::Error err)
catch (cl::Error const& err)
{
ETHCL_LOG(err.what() << "(" << err.err() << ")");
}

3
libethash-cl/ethash_cl_miner.h

@ -44,7 +44,6 @@ public:
static bool configureGPU(
bool _allowCPU,
unsigned _extraGPUMemory,
bool _forceSingleChunk,
boost::optional<uint64_t> _currentBlock
);
@ -79,8 +78,6 @@ private:
unsigned m_workgroup_size;
bool m_opencl_1_1;
/// Force dag upload to GPU in a single chunk even if OpenCL thinks you can't do it. Use at your own risk.
static bool s_forceSingleChunk;
/// Allow CPU to appear as an OpenCL device or not. Default is false
static bool s_allowCPU;
/// GPU memory required for other things, like window rendering e.t.c.

24
libethash-cl/ethash_cl_miner_kernel.cl

@ -36,7 +36,7 @@ __constant uint2 const Keccak_f1600_RC[24] = {
(uint2)(0x80008008, 0x80000000),
};
void keccak_f1600_round(uint2* a, uint r, uint out_size)
static void keccak_f1600_round(uint2* a, uint r, uint out_size)
{
#if !__ENDIAN_LITTLE__
for (uint i = 0; i != 25; ++i)
@ -152,7 +152,7 @@ void keccak_f1600_round(uint2* a, uint r, uint out_size)
#endif
}
void keccak_f1600_no_absorb(ulong* a, uint in_size, uint out_size, uint isolate)
static void keccak_f1600_no_absorb(ulong* a, uint in_size, uint out_size, uint isolate)
{
for (uint i = in_size; i != 25; ++i)
{
@ -194,17 +194,17 @@ void keccak_f1600_no_absorb(ulong* a, uint in_size, uint out_size, uint isolate)
#define countof(x) (sizeof(x) / sizeof(x[0]))
uint fnv(uint x, uint y)
static uint fnv(uint x, uint y)
{
return x * FNV_PRIME ^ y;
}
uint4 fnv4(uint4 x, uint4 y)
static uint4 fnv4(uint4 x, uint4 y)
{
return x * FNV_PRIME ^ y;
}
uint fnv_reduce(uint4 v)
static uint fnv_reduce(uint4 v)
{
return fnv(fnv(fnv(v.x, v.y), v.z), v.w);
}
@ -227,7 +227,7 @@ typedef union
uint4 uint4s[128 / sizeof(uint4)];
} hash128_t;
hash64_t init_hash(__constant hash32_t const* header, ulong nonce, uint isolate)
static hash64_t init_hash(__constant hash32_t const* header, ulong nonce, uint isolate)
{
hash64_t init;
uint const init_size = countof(init.ulongs);
@ -243,7 +243,7 @@ hash64_t init_hash(__constant hash32_t const* header, ulong nonce, uint isolate)
return init;
}
uint inner_loop_chunks(uint4 init, uint thread_id, __local uint* share, __global hash128_t const* g_dag, __global hash128_t const* g_dag1, __global hash128_t const* g_dag2, __global hash128_t const* g_dag3, uint isolate)
static uint inner_loop_chunks(uint4 init, uint thread_id, __local uint* share, __global hash128_t const* g_dag, __global hash128_t const* g_dag1, __global hash128_t const* g_dag2, __global hash128_t const* g_dag3, uint isolate)
{
uint4 mix = init;
@ -277,7 +277,7 @@ uint inner_loop_chunks(uint4 init, uint thread_id, __local uint* share, __global
uint inner_loop(uint4 init, uint thread_id, __local uint* share, __global hash128_t const* g_dag, uint isolate)
static uint inner_loop(uint4 init, uint thread_id, __local uint* share, __global hash128_t const* g_dag, uint isolate)
{
uint4 mix = init;
@ -311,7 +311,7 @@ uint inner_loop(uint4 init, uint thread_id, __local uint* share, __global hash12
}
hash32_t final_hash(hash64_t const* init, hash32_t const* mix, uint isolate)
static hash32_t final_hash(hash64_t const* init, hash32_t const* mix, uint isolate)
{
ulong state[25];
@ -330,7 +330,7 @@ hash32_t final_hash(hash64_t const* init, hash32_t const* mix, uint isolate)
return hash;
}
hash32_t compute_hash_simple(
static hash32_t compute_hash_simple(
__constant hash32_t const* g_header,
__global hash128_t const* g_dag,
ulong nonce,
@ -383,7 +383,7 @@ typedef union
} compute_hash_share;
hash32_t compute_hash(
static hash32_t compute_hash(
__local compute_hash_share* share,
__constant hash32_t const* g_header,
__global hash128_t const* g_dag,
@ -427,7 +427,7 @@ hash32_t compute_hash(
}
hash32_t compute_hash_chunks(
static hash32_t compute_hash_chunks(
__local compute_hash_share* share,
__constant hash32_t const* g_header,
__global hash128_t const* g_dag,

3
libethash/endian.h

@ -32,6 +32,9 @@
#include <libkern/OSByteOrder.h>
#define ethash_swap_u32(input_) OSSwapInt32(input_)
#define ethash_swap_u64(input_) OSSwapInt64(input_)
#elif defined(__FreeBSD__) || defined(__DragonFly__) || defined(__NetBSD__)
#define ethash_swap_u32(input_) bswap32(input_)
#define ethash_swap_u64(input_) bswap64(input_)
#else // posix
#include <byteswap.h>
#define ethash_swap_u32(input_) __bswap_32(input_)

4
libethash/internal.c

@ -284,13 +284,13 @@ bool ethash_quick_check_difficulty(
ethash_h256_t const* header_hash,
uint64_t const nonce,
ethash_h256_t const* mix_hash,
ethash_h256_t const* difficulty
ethash_h256_t const* boundary
)
{
ethash_h256_t return_hash;
ethash_quick_hash(&return_hash, header_hash, nonce, mix_hash);
return ethash_check_difficulty(&return_hash, difficulty);
return ethash_check_difficulty(&return_hash, boundary);
}
ethash_light_t ethash_light_new_internal(uint64_t cache_size, ethash_h256_t const* seed)

21
libethash/internal.h

@ -46,27 +46,36 @@ static inline void ethash_h256_reset(ethash_h256_t* hash)
memset(hash, 0, 32);
}
// Returns if hash is less than or equal to difficulty
// Returns if hash is less than or equal to boundary (2^256/difficulty)
static inline bool ethash_check_difficulty(
ethash_h256_t const* hash,
ethash_h256_t const* difficulty
ethash_h256_t const* boundary
)
{
// Difficulty is big endian
// Boundary is big endian
for (int i = 0; i < 32; i++) {
if (ethash_h256_get(hash, i) == ethash_h256_get(difficulty, i)) {
if (ethash_h256_get(hash, i) == ethash_h256_get(boundary, i)) {
continue;
}
return ethash_h256_get(hash, i) < ethash_h256_get(difficulty, i);
return ethash_h256_get(hash, i) < ethash_h256_get(boundary, i);
}
return true;
}
/**
* Difficulty quick check for POW preverification
*
* @param header_hash The hash of the header
* @param nonce The block's nonce
* @param mix_hash The mix digest hash
* @param boundary The boundary is defined as (2^256 / difficulty)
* @return true for succesful pre-verification and false otherwise
*/
bool ethash_quick_check_difficulty(
ethash_h256_t const* header_hash,
uint64_t const nonce,
ethash_h256_t const* mix_hash,
ethash_h256_t const* difficulty
ethash_h256_t const* boundary
);
struct ethash_light {

6
libethcore/Common.h

@ -47,7 +47,8 @@ extern const unsigned c_databaseVersion;
enum class Network
{
Olympic = 0,
Frontier = 1
Frontier = 1,
Turbo = 2
};
extern const Network c_network;
@ -100,7 +101,8 @@ enum class ImportResult
{
Success = 0,
UnknownParent,
FutureTime,
FutureTimeKnown,
FutureTimeUnknown,
AlreadyInChain,
AlreadyKnown,
Malformed,

3
libethcore/Ethash.cpp

@ -389,13 +389,12 @@ bool Ethash::GPUMiner::configureGPU(
unsigned _deviceId,
bool _allowCPU,
unsigned _extraGPUMemory,
bool _forceSingleChunk,
boost::optional<uint64_t> _currentBlock
)
{
s_platformId = _platformId;
s_deviceId = _deviceId;
return ethash_cl_miner::configureGPU(_allowCPU, _extraGPUMemory, _forceSingleChunk, _currentBlock);
return ethash_cl_miner::configureGPU(_allowCPU, _extraGPUMemory, _currentBlock);
}
#endif

3
libethcore/Ethash.h

@ -88,7 +88,7 @@ public:
static unsigned instances() { return s_numInstances > 0 ? s_numInstances : std::thread::hardware_concurrency(); }
static std::string platformInfo();
static void listDevices() {}
static bool configureGPU(unsigned, unsigned, bool, unsigned, bool, boost::optional<uint64_t>) { return false; }
static bool configureGPU(unsigned, unsigned, bool, unsigned, boost::optional<uint64_t>) { return false; }
static void setNumInstances(unsigned _instances) { s_numInstances = std::min<unsigned>(_instances, std::thread::hardware_concurrency()); }
protected:
void kickOff() override
@ -122,7 +122,6 @@ public:
unsigned _deviceId,
bool _allowCPU,
unsigned _extraGPUMemory,
bool _forceSingleChunk,
boost::optional<uint64_t> _currentBlock
);
static void setNumInstances(unsigned _instances) { s_numInstances = std::min<unsigned>(_instances, getNumDevices()); }

3
libethcore/Farm.h

@ -68,6 +68,7 @@ public:
void setWork(WorkPackage const& _wp)
{
WriteGuard l(x_minerWork);
cdebug << "Farm::setWork()";
if (_wp.headerHash == m_work.headerHash)
return;
m_work = _wp;
@ -94,6 +95,7 @@ public:
void stop()
{
WriteGuard l(x_minerWork);
cdebug << "Farm::stop()";
m_miners.clear();
m_work.reset();
m_isMining = false;
@ -175,6 +177,7 @@ private:
bool start()
{
WriteGuard l(x_minerWork);
cdebug << "start()";
if (!m_miners.empty() && !!std::dynamic_pointer_cast<MinerType>(m_miners[0]))
return true;
m_miners.clear();

169
libethcore/KeyManager.cpp

@ -31,7 +31,7 @@ using namespace dev;
using namespace eth;
namespace fs = boost::filesystem;
KeyManager::KeyManager(std::string const& _keysFile, std::string const& _secretsPath):
KeyManager::KeyManager(string const& _keysFile, string const& _secretsPath):
m_keysFile(_keysFile), m_store(_secretsPath)
{}
@ -43,13 +43,13 @@ bool KeyManager::exists() const
return !contents(m_keysFile + ".salt").empty() && !contents(m_keysFile).empty();
}
void KeyManager::create(std::string const& _pass)
void KeyManager::create(string const& _pass)
{
m_password = asString(h256::random().asBytes());
m_defaultPasswordDeprecated = asString(h256::random().asBytes());
write(_pass, m_keysFile);
}
bool KeyManager::recode(Address const& _address, std::string const& _newPass, std::string const& _hint, std::function<string()> const& _pass, KDF _kdf)
bool KeyManager::recode(Address const& _address, string const& _newPass, string const& _hint, function<string()> const& _pass, KDF _kdf)
{
noteHint(_newPass, _hint);
h128 u = uuid(_address);
@ -61,10 +61,10 @@ bool KeyManager::recode(Address const& _address, std::string const& _newPass, st
return true;
}
bool KeyManager::recode(Address const& _address, SemanticPassword _newPass, std::function<string()> const& _pass, KDF _kdf)
bool KeyManager::recode(Address const& _address, SemanticPassword _newPass, function<string()> const& _pass, KDF _kdf)
{
h128 u = uuid(_address);
std::string p;
string p;
if (_newPass == SemanticPassword::Existing)
p = getPassword(u, _pass);
else if (_newPass == SemanticPassword::Master)
@ -75,41 +75,47 @@ bool KeyManager::recode(Address const& _address, SemanticPassword _newPass, std:
return recode(_address, p, string(), _pass, _kdf);
}
bool KeyManager::load(std::string const& _pass)
bool KeyManager::load(string const& _pass)
{
try {
try
{
bytes salt = contents(m_keysFile + ".salt");
bytes encKeys = contents(m_keysFile);
m_key = h128(pbkdf2(_pass, salt, 262144, 16));
bytes bs = decryptSymNoAuth(m_key, h128(), &encKeys);
m_keysFileKey = h128(pbkdf2(_pass, salt, 262144, 16));
bytes bs = decryptSymNoAuth(m_keysFileKey, h128(), &encKeys);
RLP s(bs);
unsigned version = (unsigned)s[0];
unsigned version = unsigned(s[0]);
if (version == 1)
{
for (auto const& i: s[1])
{
m_keyInfo[m_addrLookup[(Address)i[0]] = (h128)i[1]] = KeyInfo((h256)i[2], (std::string)i[3]);
// cdebug << toString((Address)i[0]) << toString((h128)i[1]) << toString((h256)i[2]) << (std::string)i[3];
h128 uuid(i[1]);
Address addr(i[0]);
m_addrLookup[addr] = uuid;
m_keyInfo[uuid] = KeyInfo(h256(i[2]), string(i[3]));
// cdebug << toString(addr) << toString(uuid) << toString((h256)i[2]) << (string)i[3];
}
for (auto const& i: s[2])
m_passwordInfo[(h256)i[0]] = (std::string)i[1];
m_password = (string)s[3];
m_passwordHint[h256(i[0])] = string(i[1]);
m_defaultPasswordDeprecated = string(s[3]);
}
// cdebug << hashPassword(m_password) << toHex(m_password);
m_cachedPasswords[hashPassword(m_password)] = m_password;
cachePassword(m_defaultPasswordDeprecated);
// cdebug << hashPassword(asString(m_key.ref())) << m_key.hex();
m_cachedPasswords[hashPassword(asString(m_key.ref()))] = asString(m_key.ref());
cachePassword(asString(m_keysFileKey.ref()));
// cdebug << hashPassword(_pass) << _pass;
m_cachedPasswords[m_master = hashPassword(_pass)] = _pass;
m_master = hashPassword(_pass);
cachePassword(_pass);
return true;
}
catch (...) {
catch (...)
{
return false;
}
}
Secret KeyManager::secret(Address const& _address, function<std::string()> const& _pass) const
Secret KeyManager::secret(Address const& _address, function<string()> const& _pass) const
{
auto it = m_addrLookup.find(_address);
if (it == m_addrLookup.end())
@ -117,12 +123,12 @@ Secret KeyManager::secret(Address const& _address, function<std::string()> const
return secret(it->second, _pass);
}
Secret KeyManager::secret(h128 const& _uuid, function<std::string()> const& _pass) const
Secret KeyManager::secret(h128 const& _uuid, function<string()> const& _pass) const
{
return Secret(m_store.secret(_uuid, [&](){ return getPassword(_uuid, _pass); }));
}
std::string KeyManager::getPassword(h128 const& _uuid, function<std::string()> const& _pass) const
string KeyManager::getPassword(h128 const& _uuid, function<string()> const& _pass) const
{
auto kit = m_keyInfo.find(_uuid);
h256 ph;
@ -131,19 +137,19 @@ std::string KeyManager::getPassword(h128 const& _uuid, function<std::string()> c
return getPassword(ph, _pass);
}
std::string KeyManager::getPassword(h256 const& _passHash, function<std::string()> const& _pass) const
string KeyManager::getPassword(h256 const& _passHash, function<string()> const& _pass) const
{
auto it = m_cachedPasswords.find(_passHash);
if (it != m_cachedPasswords.end())
return it->second;
for (unsigned i = 0; i< 10; ++i)
for (unsigned i = 0; i < 10; ++i)
{
std::string p = _pass();
string p = _pass();
if (p.empty())
break;
if (hashPassword(p) == _passHash || _passHash == UnknownPassword)
if (_passHash == UnknownPassword || hashPassword(p) == _passHash)
{
m_cachedPasswords[hashPassword(p)] = p;
cachePassword(p);
return p;
}
}
@ -166,20 +172,20 @@ Address KeyManager::address(h128 const& _uuid) const
return Address();
}
h128 KeyManager::import(Secret const& _s, string const& _info, std::string const& _pass, string const& _passInfo)
h128 KeyManager::import(Secret const& _s, string const& _accountName, string const& _pass, string const& _passwordHint)
{
Address addr = KeyPair(_s).address();
auto passHash = hashPassword(_pass);
m_cachedPasswords[passHash] = _pass;
m_passwordInfo[passHash] = _passInfo;
cachePassword(_pass);
m_passwordHint[passHash] = _passwordHint;
auto uuid = m_store.importSecret(_s.asBytes(), _pass);
m_keyInfo[uuid] = KeyInfo{passHash, _info};
m_keyInfo[uuid] = KeyInfo{passHash, _accountName};
m_addrLookup[addr] = uuid;
write(m_keysFile);
return uuid;
}
void KeyManager::importExisting(h128 const& _uuid, std::string const& _info, std::string const& _pass, std::string const& _passInfo)
void KeyManager::importExisting(h128 const& _uuid, string const& _info, string const& _pass, string const& _passwordHint)
{
bytes key = m_store.secret(_uuid, [&](){ return _pass; });
if (key.empty())
@ -187,17 +193,17 @@ void KeyManager::importExisting(h128 const& _uuid, std::string const& _info, std
Address a = KeyPair(Secret(key)).address();
auto passHash = hashPassword(_pass);
if (!m_cachedPasswords.count(passHash))
m_cachedPasswords[passHash] = _pass;
importExisting(_uuid, _info, a, passHash, _passInfo);
cachePassword(_pass);
importExisting(_uuid, _info, a, passHash, _passwordHint);
}
void KeyManager::importExisting(h128 const& _uuid, std::string const& _info, Address const& _address, h256 const& _passHash, std::string const& _passInfo)
void KeyManager::importExisting(h128 const& _uuid, string const& _accountName, Address const& _address, h256 const& _passHash, string const& _passwordHint)
{
if (!m_passwordInfo.count(_passHash))
m_passwordInfo[_passHash] = _passInfo;
if (!m_passwordHint.count(_passHash))
m_passwordHint[_passHash] = _passwordHint;
m_addrLookup[_address] = _uuid;
m_keyInfo[_uuid].passHash = _passHash;
m_keyInfo[_uuid].info = _info;
m_keyInfo[_uuid].accountName = _accountName;
write(m_keysFile);
}
@ -209,67 +215,92 @@ void KeyManager::kill(Address const& _a)
m_store.kill(id);
}
AddressHash KeyManager::accounts() const
Addresses KeyManager::accounts() const
{
AddressHash ret;
Addresses ret;
ret.reserve(m_addrLookup.size());
for (auto const& i: m_addrLookup)
if (m_keyInfo.count(i.second) > 0)
ret.insert(i.first);
ret.push_back(i.first);
return ret;
}
std::unordered_map<Address, std::pair<std::string, std::string>> KeyManager::accountDetails() const
bool KeyManager::hasAccount(const Address& _address) const
{
std::unordered_map<Address, std::pair<std::string, std::string>> ret;
for (auto const& i: m_addrLookup)
if (m_keyInfo.count(i.second) > 0)
ret[i.first] = make_pair(m_keyInfo.count(i.second) ? m_keyInfo.at(i.second).info : "", m_keyInfo.count(i.second) && m_passwordInfo.count(m_keyInfo.at(i.second).passHash) ? m_passwordInfo.at(m_keyInfo.at(i.second).passHash) : "");
return ret;
return m_addrLookup.count(_address) && m_keyInfo.count(m_addrLookup.at(_address));
}
string const& KeyManager::accountName(Address const& _address) const
{
try
{
return m_keyInfo.at(m_addrLookup.at(_address)).accountName;
}
catch (...)
{
return EmptyString;
}
}
string const& KeyManager::passwordHint(Address const& _address) const
{
try
{
return m_passwordHint.at(m_keyInfo.at(m_addrLookup.at(_address)).passHash);
}
catch (...)
{
return EmptyString;
}
}
h256 KeyManager::hashPassword(std::string const& _pass) const
h256 KeyManager::hashPassword(string const& _pass) const
{
// TODO SECURITY: store this a bit more securely; Scrypt perhaps?
return h256(pbkdf2(_pass, asBytes(m_password), 262144, 32));
return h256(pbkdf2(_pass, asBytes(m_defaultPasswordDeprecated), 262144, 32));
}
void KeyManager::cachePassword(string const& _password) const
{
m_cachedPasswords[hashPassword(_password)] = _password;
}
bool KeyManager::write(std::string const& _keysFile) const
bool KeyManager::write(string const& _keysFile) const
{
if (!m_key)
if (!m_keysFileKey)
return false;
write(m_key, _keysFile);
write(m_keysFileKey, _keysFile);
return true;
}
void KeyManager::write(std::string const& _pass, std::string const& _keysFile) const
void KeyManager::write(string const& _pass, string const& _keysFile) const
{
bytes salt = h256::random().asBytes();
writeFile(_keysFile + ".salt", salt);
auto key = h128(pbkdf2(_pass, salt, 262144, 16));
m_cachedPasswords[hashPassword(_pass)] = _pass;
cachePassword(_pass);
m_master = hashPassword(_pass);
write(key, _keysFile);
}
void KeyManager::write(h128 const& _key, std::string const& _keysFile) const
void KeyManager::write(h128 const& _key, string const& _keysFile) const
{
RLPStream s(4);
s << 1;
s.appendList(m_addrLookup.size());
for (auto const& i: m_addrLookup)
if (m_keyInfo.count(i.second))
{
auto ki = m_keyInfo.at(i.second);
s.appendList(4) << i.first << i.second << ki.passHash << ki.info;
}
s.appendList(m_passwordInfo.size());
for (auto const& i: m_passwordInfo)
s << 1; // version
s.appendList(accounts().size());
for (auto const& address: accounts())
{
h128 id = uuid(address);
auto const& ki = m_keyInfo.at(id);
s.appendList(4) << address << id << ki.passHash << ki.accountName;
}
s.appendList(m_passwordHint.size());
for (auto const& i: m_passwordHint)
s.appendList(2) << i.first << i.second;
s.append(m_password);
s.append(m_defaultPasswordDeprecated);
writeFile(_keysFile, encryptSymNoAuth(_key, h128(), &s.out()));
m_key = _key;
m_cachedPasswords[hashPassword(defaultPassword())] = defaultPassword();
m_keysFileKey = _key;
cachePassword(defaultPassword());
}

68
libethcore/KeyManager.h

@ -23,8 +23,9 @@
#include <functional>
#include <mutex>
#include <libdevcrypto/SecretStore.h>
#include <libdevcore/FileSystem.h>
#include <libdevcore/CommonData.h>
#include <libdevcrypto/SecretStore.h>
namespace dev
{
@ -35,14 +36,17 @@ class PasswordUnknown: public Exception {};
struct KeyInfo
{
KeyInfo() = default;
KeyInfo(h256 const& _passHash, std::string const& _info): passHash(_passHash), info(_info) {}
KeyInfo(h256 const& _passHash, std::string const& _accountName): passHash(_passHash), accountName(_accountName) {}
h256 passHash; ///< Hash of the password or h256() if unknown.
std::string info; ///< Name of the key, or JSON key info if begins with '{'.
/// Hash of the password or h256() / UnknownPassword if unknown.
h256 passHash;
/// Name of the key, or JSON key info if begins with '{'.
std::string accountName;
};
static const h256 UnknownPassword;
static const auto DontKnowThrow = [](){ throw PasswordUnknown(); return std::string(); };
static h256 const UnknownPassword;
/// Password query function that never returns a password.
static auto const DontKnowThrow = [](){ throw PasswordUnknown(); return std::string(); };
enum class SemanticPassword
{
@ -53,12 +57,15 @@ enum class SemanticPassword
// TODO: This one is specifically for Ethereum, but we can make it generic in due course.
// TODO: hidden-partition style key-store.
/**
* @brief High-level manager of keys for Ethereum.
* @brief High-level manager of password-encrypted keys for Ethereum.
* Usage:
*
* Call exists() to check whether there is already a database. If so, get the master password from
* the user and call load() with it. If not, get a new master password from the user (get them to type
* it twice and keep some hint around!) and call create() with it.
*
* Uses a "key file" (and a corresponding .salt file) that contains encrypted information about the keys and
* a directory called "secrets path" that contains a file for each key.
*/
class KeyManager
{
@ -75,25 +82,37 @@ public:
void save(std::string const& _pass) const { write(_pass, m_keysFile); }
void notePassword(std::string const& _pass) { m_cachedPasswords[hashPassword(_pass)] = _pass; }
void noteHint(std::string const& _pass, std::string const& _hint) { if (!_hint.empty()) m_passwordInfo[hashPassword(_pass)] = _hint; }
void noteHint(std::string const& _pass, std::string const& _hint) { if (!_hint.empty()) m_passwordHint[hashPassword(_pass)] = _hint; }
bool haveHint(std::string const& _pass) const { auto h = hashPassword(_pass); return m_cachedPasswords.count(h) && !m_cachedPasswords.at(h).empty(); }
AddressHash accounts() const;
std::unordered_map<Address, std::pair<std::string, std::string>> accountDetails() const;
std::string const& hint(Address const& _a) const { try { return m_passwordInfo.at(m_keyInfo.at(m_addrLookup.at(_a)).passHash); } catch (...) { return EmptyString; } }
/// @returns the list of account addresses.
Addresses accounts() const;
/// @returns a hashset of all account addresses.
AddressHash accountsHash() const { return AddressHash() + accounts(); }
bool hasAccount(Address const& _address) const;
/// @returns the human-readable name or json-encoded info of the account for the given address.
std::string const& accountName(Address const& _address) const;
/// @returns the password hint for the account for the given address;
std::string const& passwordHint(Address const& _address) const;
/// @returns the uuid of the key for the address @a _a or the empty hash on error.
h128 uuid(Address const& _a) const;
/// @returns the address corresponding to the key with uuid @a _uuid or the zero address on error.
Address address(h128 const& _uuid) const;
h128 import(Secret const& _s, std::string const& _info, std::string const& _pass, std::string const& _passInfo);
h128 import(Secret const& _s, std::string const& _info) { return import(_s, _info, defaultPassword(), std::string()); }
h128 import(Secret const& _s, std::string const& _accountName, std::string const& _pass, std::string const& _passwordHint);
h128 import(Secret const& _s, std::string const& _accountName) { return import(_s, _accountName, defaultPassword(), std::string()); }
SecretStore& store() { return m_store; }
void importExisting(h128 const& _uuid, std::string const& _info, std::string const& _pass, std::string const& _passInfo);
void importExisting(h128 const& _uuid, std::string const& _info) { importExisting(_uuid, _info, defaultPassword(), std::string()); }
void importExisting(h128 const& _uuid, std::string const& _info, Address const& _addr, h256 const& _passHash = h256(), std::string const& _passInfo = std::string());
void importExisting(h128 const& _uuid, std::string const& _accountName, std::string const& _pass, std::string const& _passwordHint);
void importExisting(h128 const& _uuid, std::string const& _accountName) { importExisting(_uuid, _accountName, defaultPassword(), std::string()); }
void importExisting(h128 const& _uuid, std::string const& _accountName, Address const& _addr, h256 const& _passHash = h256(), std::string const& _passwordHint = std::string());
/// @returns the secret key associated with an address provided the password query
/// function @a _pass or the zero-secret key on error.
Secret secret(Address const& _address, std::function<std::string()> const& _pass = DontKnowThrow) const;
/// @returns the secret key associated with the uuid of a key provided the password query
/// function @a _pass or the zero-secret key on error.
Secret secret(h128 const& _uuid, std::function<std::string()> const& _pass = DontKnowThrow) const;
bool recode(Address const& _address, SemanticPassword _newPass, std::function<std::string()> const& _pass = DontKnowThrow, KDF _kdf = KDF::Scrypt);
@ -110,6 +129,9 @@ private:
std::string defaultPassword(std::function<std::string()> const& _pass = DontKnowThrow) const { return getPassword(m_master, _pass); }
h256 hashPassword(std::string const& _pass) const;
/// Stores the password by its hash in the password cache.
void cachePassword(std::string const& _password) const;
// Only use if previously loaded ok.
// @returns false if wasn't previously loaded ok.
bool write() const { return write(m_keysFile); }
@ -118,11 +140,15 @@ private:
void write(h128 const& _key, std::string const& _keysFile) const;
// Ethereum keys.
/// Mapping address -> key uuid.
std::unordered_map<Address, h128> m_addrLookup;
/// Mapping key uuid -> key info.
std::unordered_map<h128, KeyInfo> m_keyInfo;
std::unordered_map<h256, std::string> m_passwordInfo;
/// Mapping password hash -> password hint.
std::unordered_map<h256, std::string> m_passwordHint;
// Passwords that we're storing.
// Passwords that we're storing. Mapping password hash -> password.
mutable std::unordered_map<h256, std::string> m_cachedPasswords;
// DEPRECATED.
@ -130,10 +156,10 @@ private:
// Now the default password is based off the key of the keys file directly, so this is redundant
// except for the fact that people have existing keys stored with it. Leave for now until/unless
// we have an upgrade strategy.
std::string m_password;
std::string m_defaultPasswordDeprecated;
mutable std::string m_keysFile;
mutable h128 m_key;
mutable h128 m_keysFileKey;
mutable h256 m_master;
SecretStore m_store;
};

6
libethcore/Params.cpp

@ -31,12 +31,12 @@ namespace eth
//--- BEGIN: AUTOGENERATED FROM github.com/ethereum/common/params.json
u256 const c_genesisDifficulty = 131072;
u256 const c_maximumExtraDataSize = 1024;
u256 const c_genesisGasLimit = 3141592;
u256 const c_minGasLimit = 125000;
u256 const c_genesisGasLimit = c_network == Network::Turbo ? 100000000 : 3141592;
u256 const c_minGasLimit = c_network == Network::Turbo ? 100000000 : 125000;
u256 const c_gasLimitBoundDivisor = 1024;
u256 const c_minimumDifficulty = 131072;
u256 const c_difficultyBoundDivisor = 2048;
u256 const c_durationLimit = c_network == Network::Olympic ? 8 : 12;
u256 const c_durationLimit = c_network == Network::Turbo ? 2 : c_network == Network::Olympic ? 8 : 12;
//--- END: AUTOGENERATED FROM /feeStructure.json
}

24
libethereum/BlockChain.cpp

@ -24,8 +24,6 @@
#if ETH_PROFILING_GPERF
#include <gperftools/profiler.h>
#endif
#include <leveldb/db.h>
#include <leveldb/write_batch.h>
#include <boost/timer.hpp>
#include <boost/filesystem.hpp>
#include <test/JsonSpiritHeaders.h>
@ -305,7 +303,7 @@ LastHashes BlockChain::lastHashes(unsigned _n) const
return m_lastLastHashes;
}
tuple<h256s, h256s, bool> BlockChain::sync(BlockQueue& _bq, OverlayDB const& _stateDB, unsigned _max)
tuple<ImportRoute, bool, unsigned> BlockChain::sync(BlockQueue& _bq, OverlayDB const& _stateDB, unsigned _max)
{
// _bq.tick(*this);
@ -315,6 +313,7 @@ tuple<h256s, h256s, bool> BlockChain::sync(BlockQueue& _bq, OverlayDB const& _st
h256s fresh;
h256s dead;
h256s badBlocks;
unsigned count = 0;
for (VerifiedBlock const& block: blocks)
if (!badBlocks.empty())
badBlocks.push_back(block.verified.info.hash());
@ -326,8 +325,9 @@ tuple<h256s, h256s, bool> BlockChain::sync(BlockQueue& _bq, OverlayDB const& _st
ImportRoute r;
DEV_TIMED_ABOVE(Block import, 500)
r = import(block.verified, _stateDB, ImportRequirements::Default & ~ImportRequirements::ValidNonce & ~ImportRequirements::CheckUncles);
fresh += r.first;
dead += r.second;
fresh += r.liveBlocks;
dead += r.deadBlocks;
++count;
}
catch (dev::eth::UnknownParent)
{
@ -353,7 +353,7 @@ tuple<h256s, h256s, bool> BlockChain::sync(BlockQueue& _bq, OverlayDB const& _st
badBlocks.push_back(block.verified.info.hash());
}
}
return make_tuple(fresh, dead, _bq.doneDrain(badBlocks));
return make_tuple(ImportRoute{dead, fresh}, _bq.doneDrain(badBlocks), count);
}
pair<ImportResult, ImportRoute> BlockChain::attemptImport(bytes const& _block, OverlayDB const& _stateDB, ImportRequirements::value _ir) noexcept
@ -364,21 +364,21 @@ pair<ImportResult, ImportRoute> BlockChain::attemptImport(bytes const& _block, O
}
catch (UnknownParent&)
{
return make_pair(ImportResult::UnknownParent, make_pair(h256s(), h256s()));
return make_pair(ImportResult::UnknownParent, ImportRoute());
}
catch (AlreadyHaveBlock&)
{
return make_pair(ImportResult::AlreadyKnown, make_pair(h256s(), h256s()));
return make_pair(ImportResult::AlreadyKnown, ImportRoute());
}
catch (FutureTime&)
{
return make_pair(ImportResult::FutureTime, make_pair(h256s(), h256s()));
return make_pair(ImportResult::FutureTimeKnown, ImportRoute());
}
catch (Exception& ex)
{
if (m_onBad)
m_onBad(ex);
return make_pair(ImportResult::Malformed, make_pair(h256s(), h256s()));
return make_pair(ImportResult::Malformed, ImportRoute());
}
}
@ -699,7 +699,7 @@ ImportRoute BlockChain::import(VerifiedBlockRef const& _block, OverlayDB const&
dead.push_back(h);
else
fresh.push_back(h);
return make_pair(fresh, dead);
return ImportRoute{dead, fresh};
}
void BlockChain::clearBlockBlooms(unsigned _begin, unsigned _end)
@ -1122,6 +1122,6 @@ VerifiedBlockRef BlockChain::verifyBlock(bytes const& _block, function<void(Exce
++i;
}
res.block = bytesConstRef(&_block);
return move(res);
return res;
}

16
libethereum/BlockChain.h

@ -21,15 +21,11 @@
#pragma once
#pragma warning(push)
#pragma warning(disable: 4100 4267)
#include <leveldb/db.h>
#pragma warning(pop)
#include <deque>
#include <chrono>
#include <unordered_map>
#include <unordered_set>
#include <libdevcore/db.h>
#include <libdevcore/Log.h>
#include <libdevcore/Exceptions.h>
#include <libdevcore/Guards.h>
@ -41,7 +37,6 @@
#include "Transaction.h"
#include "BlockQueue.h"
#include "VerifiedBlock.h"
namespace ldb = leveldb;
namespace std
{
@ -80,7 +75,12 @@ ldb::Slice toSlice(h256 const& _h, unsigned _sub = 0);
using BlocksHash = std::unordered_map<h256, bytes>;
using TransactionHashes = h256s;
using UncleHashes = h256s;
using ImportRoute = std::pair<h256s, h256s>;
struct ImportRoute
{
h256s deadBlocks;
h256s liveBlocks;
};
enum {
ExtraDetails = 0,
@ -112,7 +112,7 @@ public:
/// Sync the chain with any incoming blocks. All blocks should, if processed in order.
/// @returns fresh blocks, dead blocks and true iff there are additional blocks to be processed waiting.
std::tuple<h256s, h256s, bool> sync(BlockQueue& _bq, OverlayDB const& _stateDB, unsigned _max);
std::tuple<ImportRoute, bool, unsigned> sync(BlockQueue& _bq, OverlayDB const& _stateDB, unsigned _max);
/// Attempt to import the given block directly into the CanonBlockChain and sync with the state DB.
/// @returns the block hashes of any blocks that came into/went out of the canonical block chain.

800
libethereum/BlockChainSync.cpp

@ -0,0 +1,800 @@
/*
This file is part of cpp-ethereum.
cpp-ethereum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
cpp-ethereum is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
*/
/** @file BlockChainSync.cpp
* @author Gav Wood <i@gavwood.com>
* @date 2014
*/
#include "BlockChainSync.h"
#include <chrono>
#include <libdevcore/Common.h>
#include <libp2p/Host.h>
#include <libp2p/Session.h>
#include <libethcore/Exceptions.h>
#include <libethcore/Params.h>
#include "BlockChain.h"
#include "BlockQueue.h"
#include "EthereumPeer.h"
#include "EthereumHost.h"
#include "DownloadMan.h"
using namespace std;
using namespace dev;
using namespace dev::eth;
using namespace p2p;
unsigned const c_chainReorgSize = 30000;
BlockChainSync::BlockChainSync(EthereumHost& _host):
m_host(_host)
{
m_bqRoomAvailable = host().bq().onRoomAvailable([this]()
{
RecursiveGuard l(x_sync);
continueSync();
});
}
BlockChainSync::~BlockChainSync()
{
RecursiveGuard l(x_sync);
abortSync();
}
DownloadMan const& BlockChainSync::downloadMan() const
{
return host().downloadMan();
}
DownloadMan& BlockChainSync::downloadMan()
{
return host().downloadMan();
}
void BlockChainSync::abortSync()
{
DEV_INVARIANT_CHECK;
host().foreachPeer([this](EthereumPeer* _p) { onPeerAborting(_p); return true; });
downloadMan().resetToChain(h256s());
DEV_INVARIANT_CHECK;
}
void BlockChainSync::onPeerStatus(EthereumPeer* _peer)
{
RecursiveGuard l(x_sync);
DEV_INVARIANT_CHECK;
if (_peer->m_genesisHash != host().chain().genesisHash())
_peer->disable("Invalid genesis hash");
else if (_peer->m_protocolVersion != host().protocolVersion() && _peer->m_protocolVersion != EthereumHost::c_oldProtocolVersion)
_peer->disable("Invalid protocol version.");
else if (_peer->m_networkId != host().networkId())
_peer->disable("Invalid network identifier.");
else if (_peer->session()->info().clientVersion.find("/v0.7.0/") != string::npos)
_peer->disable("Blacklisted client version.");
else if (host().isBanned(_peer->session()->id()))
_peer->disable("Peer banned for previous bad behaviour.");
else
{
unsigned hashes = estimatedHashes();
_peer->m_expectedHashes = hashes;
onNewPeer(_peer);
}
DEV_INVARIANT_CHECK;
}
unsigned BlockChainSync::estimatedHashes() const
{
BlockInfo block = host().chain().info();
time_t lastBlockTime = (block.hash() == host().chain().genesisHash()) ? 1428192000 : (time_t)block.timestamp;
time_t now = time(0);
unsigned blockCount = c_chainReorgSize;
if (lastBlockTime > now)
clog(NetWarn) << "Clock skew? Latest block is in the future";
else
blockCount += (now - lastBlockTime) / (unsigned)c_durationLimit;
clog(NetAllDetail) << "Estimated hashes: " << blockCount;
return blockCount;
}
void BlockChainSync::requestBlocks(EthereumPeer* _peer)
{
if (host().bq().knownFull())
{
clog(NetAllDetail) << "Waiting for block queue before downloading blocks";
pauseSync();
_peer->setIdle();
return;
}
_peer->requestBlocks();
if (_peer->m_asking != Asking::Blocks) //nothing to download
{
peerDoneBlocks(_peer);
if (downloadMan().isComplete())
completeSync();
return;
}
}
void BlockChainSync::onPeerBlocks(EthereumPeer* _peer, RLP const& _r)
{
RecursiveGuard l(x_sync);
DEV_INVARIANT_CHECK;
unsigned itemCount = _r.itemCount();
clog(NetMessageSummary) << "Blocks (" << dec << itemCount << "entries)" << (itemCount ? "" : ": NoMoreBlocks");
_peer->setIdle();
if (m_state != SyncState::Blocks && m_state != SyncState::NewBlocks)
clog(NetWarn) << "Unexpected Blocks received!";
if (m_state == SyncState::Waiting)
{
clog(NetAllDetail) << "Ignored blocks while waiting";
return;
}
if (itemCount == 0)
{
// Got to this peer's latest block - just give up.
peerDoneBlocks(_peer);
if (downloadMan().isComplete())
completeSync();
return;
}
unsigned success = 0;
unsigned future = 0;
unsigned unknown = 0;
unsigned got = 0;
unsigned repeated = 0;
u256 maxUnknownNumber = 0;
h256 maxUnknown;
for (unsigned i = 0; i < itemCount; ++i)
{
auto h = BlockInfo::headerHash(_r[i].data());
if (_peer->m_sub.noteBlock(h))
{
_peer->addRating(10);
switch (host().bq().import(_r[i].data(), host().chain()))
{
case ImportResult::Success:
success++;
break;
case ImportResult::Malformed:
case ImportResult::BadChain:
_peer->disable("Malformed block received.");
return;
case ImportResult::FutureTimeKnown:
future++;
break;
case ImportResult::AlreadyInChain:
case ImportResult::AlreadyKnown:
got++;
break;
case ImportResult::FutureTimeUnknown:
future++; //Fall through
case ImportResult::UnknownParent:
{
unknown++;
if (m_state == SyncState::NewBlocks)
{
BlockInfo bi;
bi.populateFromHeader(_r[i][0]);
if (bi.number > maxUnknownNumber)
{
maxUnknownNumber = bi.number;
maxUnknown = h;
}
}
break;
}
default:;
}
}
else
{
_peer->addRating(0); // -1?
repeated++;
}
}
clog(NetMessageSummary) << dec << success << "imported OK," << unknown << "with unknown parents," << future << "with future timestamps," << got << " already known," << repeated << " repeats received.";
if (host().bq().unknownFull())
{
clog(NetWarn) << "Too many unknown blocks, restarting sync";
restartSync();
return;
}
if (m_state == SyncState::NewBlocks && unknown > 0)
{
completeSync();
resetSyncFor(_peer, maxUnknown, std::numeric_limits<u256>::max()); //TODO: proper total difficuty
}
if (m_state == SyncState::Blocks || m_state == SyncState::NewBlocks)
{
if (downloadMan().isComplete())
completeSync();
else
requestBlocks(_peer); // Some of the blocks might have been downloaded by helping peers, proceed anyway
}
DEV_INVARIANT_CHECK;
}
void BlockChainSync::onPeerNewBlock(EthereumPeer* _peer, RLP const& _r)
{
DEV_INVARIANT_CHECK;
RecursiveGuard l(x_sync);
auto h = BlockInfo::headerHash(_r[0].data());
clog(NetMessageSummary) << "NewBlock: " << h;
if (_r.itemCount() != 2)
_peer->disable("NewBlock without 2 data fields.");
else
{
switch (host().bq().import(_r[0].data(), host().chain()))
{
case ImportResult::Success:
_peer->addRating(100);
break;
case ImportResult::FutureTimeKnown:
//TODO: Rating dependent on how far in future it is.
break;
case ImportResult::Malformed:
case ImportResult::BadChain:
_peer->disable("Malformed block received.");
return;
case ImportResult::AlreadyInChain:
case ImportResult::AlreadyKnown:
break;
case ImportResult::FutureTimeUnknown:
case ImportResult::UnknownParent:
clog(NetMessageSummary) << "Received block with no known parent. Resyncing...";
resetSyncFor(_peer, h, _r[1].toInt<u256>());
break;
default:;
}
DEV_GUARDED(_peer->x_knownBlocks)
_peer->m_knownBlocks.insert(h);
}
DEV_INVARIANT_CHECK;
}
PV60Sync::PV60Sync(EthereumHost& _host):
BlockChainSync(_host)
{
resetSync();
}
SyncStatus PV60Sync::status() const
{
RecursiveGuard l(x_sync);
SyncStatus res;
res.state = m_state;
if (m_state == SyncState::Hashes)
{
res.hashesTotal = m_estimatedHashes;
res.hashesReceived = static_cast<unsigned>(m_syncingNeededBlocks.size());
res.hashesEstimated = true;
}
else if (m_state == SyncState::Blocks || m_state == SyncState::NewBlocks || m_state == SyncState::Waiting)
{
res.blocksTotal = downloadMan().chainSize();
res.blocksReceived = downloadMan().blocksGot().size();
}
return res;
}
void PV60Sync::setState(EthereumPeer* _peer, SyncState _s, bool _isSyncing, bool _needHelp)
{
bool changedState = (m_state != _s);
m_state = _s;
if (_isSyncing != (m_syncer == _peer) || (_isSyncing && changedState))
changeSyncer(_isSyncing ? _peer : nullptr, _needHelp);
else if (_s == SyncState::Idle)
changeSyncer(nullptr, _needHelp);
assert(!!m_syncer || _s == SyncState::Idle);
}
void PV60Sync::resetSync()
{
m_syncingLatestHash = h256();
m_syncingLastReceivedHash = h256();
m_syncingTotalDifficulty = 0;
m_syncingNeededBlocks.clear();
}
void PV60Sync::restartSync()
{
resetSync();
host().bq().clear();
if (isSyncing())
transition(m_syncer, SyncState::Idle);
}
void PV60Sync::completeSync()
{
if (isSyncing())
transition(m_syncer, SyncState::Idle);
}
void PV60Sync::pauseSync()
{
if (isSyncing())
setState(m_syncer, SyncState::Waiting, true);
}
void PV60Sync::continueSync()
{
transition(m_syncer, SyncState::Blocks);
}
void PV60Sync::onNewPeer(EthereumPeer* _peer)
{
setNeedsSyncing(_peer, _peer->m_latestHash, _peer->m_totalDifficulty);
}
void PV60Sync::transition(EthereumPeer* _peer, SyncState _s, bool _force, bool _needHelp)
{
clog(NetMessageSummary) << "Transition!" << EthereumHost::stateName(_s) << "from" << EthereumHost::stateName(m_state) << ", " << (isSyncing(_peer) ? "syncing" : "holding") << (needsSyncing(_peer) ? "& needed" : "");
if (m_state == SyncState::Idle && _s != SyncState::Idle)
_peer->m_requireTransactions = true;
RLPStream s;
if (_s == SyncState::Hashes)
{
if (m_state == SyncState::Idle)
{
if (isSyncing(_peer))
clog(NetWarn) << "Bad state: not asking for Hashes, yet syncing!";
m_syncingLatestHash = _peer->m_latestHash;
m_syncingTotalDifficulty = _peer->m_totalDifficulty;
setState(_peer, _s, true);
_peer->requestHashes(m_syncingLastReceivedHash ? m_syncingLastReceivedHash : m_syncingLatestHash);
DEV_INVARIANT_CHECK;
return;
}
else if (m_state == SyncState::Hashes)
{
if (!isSyncing(_peer))
clog(NetWarn) << "Bad state: asking for Hashes yet not syncing!";
setState(_peer, _s, true);
_peer->requestHashes(m_syncingLastReceivedHash);
DEV_INVARIANT_CHECK;
return;
}
}
else if (_s == SyncState::Blocks)
{
if (m_state == SyncState::Hashes)
{
if (!isSyncing(_peer))
{
clog(NetWarn) << "Bad state: asking for Hashes yet not syncing!";
return;
}
if (shouldGrabBlocks(_peer))
{
clog(NetNote) << "Difficulty of hashchain HIGHER. Grabbing" << m_syncingNeededBlocks.size() << "blocks [latest now" << m_syncingLatestHash << ", was" << host().latestBlockSent() << "]";
downloadMan().resetToChain(m_syncingNeededBlocks);
resetSync();
}
else
{
clog(NetNote) << "Difficulty of hashchain not HIGHER. Ignoring.";
resetSync();
setState(_peer, SyncState::Idle, false);
return;
}
assert (isSyncing(_peer));
}
// run through into...
if (m_state == SyncState::Idle || m_state == SyncState::Hashes || m_state == SyncState::Blocks || m_state == SyncState::Waiting)
{
// Looks like it's the best yet for total difficulty. Set to download.
setState(_peer, SyncState::Blocks, isSyncing(_peer), _needHelp); // will kick off other peers to help if available.
requestBlocks(_peer);
DEV_INVARIANT_CHECK;
return;
}
}
else if (_s == SyncState::NewBlocks)
{
if (m_state != SyncState::Idle && m_state != SyncState::NewBlocks && m_state != SyncState::Waiting)
clog(NetWarn) << "Bad state: Asking new blocks while syncing!";
else
{
setState(_peer, SyncState::NewBlocks, true, _needHelp);
requestBlocks(_peer);
DEV_INVARIANT_CHECK;
return;
}
}
else if (_s == SyncState::Waiting)
{
if (m_state != SyncState::Blocks && m_state != SyncState::NewBlocks && m_state != SyncState::Hashes && m_state != SyncState::Waiting)
clog(NetWarn) << "Bad state: Entering waiting state while not downloading blocks!";
else
{
setState(_peer, SyncState::Waiting, isSyncing(_peer), _needHelp);
return;
}
}
else if (_s == SyncState::Idle)
{
host().foreachPeer([this](EthereumPeer* _p) { _p->setIdle(); return true; });
if (m_state == SyncState::Blocks || m_state == SyncState::NewBlocks)
{
clog(NetNote) << "Finishing blocks fetch...";
// a bit overkill given that the other nodes may yet have the needed blocks, but better to be safe than sorry.
if (isSyncing(_peer))
noteDoneBlocks(_peer, _force);
// NOTE: need to notify of giving up on chain-hashes, too, altering state as necessary.
_peer->m_sub.doneFetch();
_peer->setIdle();
setState(_peer, SyncState::Idle, false);
}
else if (m_state == SyncState::Hashes)
{
clog(NetNote) << "Finishing hashes fetch...";
setState(_peer, SyncState::Idle, false);
}
// Otherwise it's fine. We don't care if it's Nothing->Nothing.
DEV_INVARIANT_CHECK;
return;
}
clog(NetWarn) << "Invalid state transition:" << EthereumHost::stateName(_s) << "from" << EthereumHost::stateName(m_state) << ", " << (isSyncing(_peer) ? "syncing" : "holding") << (needsSyncing(_peer) ? "& needed" : "");
}
void PV60Sync::resetSyncFor(EthereumPeer* _peer, h256 const& _latestHash, u256 const& _td)
{
setNeedsSyncing(_peer, _latestHash, _td);
}
void PV60Sync::setNeedsSyncing(EthereumPeer* _peer, h256 const& _latestHash, u256 const& _td)
{
_peer->m_latestHash = _latestHash;
_peer->m_totalDifficulty = _td;
if (_peer->m_latestHash)
noteNeedsSyncing(_peer);
_peer->session()->addNote("sync", string(isSyncing(_peer) ? "ongoing" : "holding") + (needsSyncing(_peer) ? " & needed" : ""));
}
bool PV60Sync::needsSyncing(EthereumPeer* _peer) const
{
return !!_peer->m_latestHash;
}
bool PV60Sync::isSyncing(EthereumPeer* _peer) const
{
return m_syncer == _peer;
}
bool PV60Sync::shouldGrabBlocks(EthereumPeer* _peer) const
{
auto td = _peer->m_totalDifficulty;
auto lh = _peer->m_latestHash;
auto ctd = host().chain().details().totalDifficulty;
if (m_syncingNeededBlocks.empty())
return false;
clog(NetNote) << "Should grab blocks? " << td << "vs" << ctd << ";" << m_syncingNeededBlocks.size() << " blocks, ends" << m_syncingNeededBlocks.back();
if (td < ctd || (td == ctd && host().chain().currentHash() == lh))
return false;
return true;
}
void PV60Sync::attemptSync(EthereumPeer* _peer)
{
if (m_state != SyncState::Idle)
{
clog(NetAllDetail) << "Can't sync with this peer - outstanding asks.";
return;
}
// if already done this, then ignore.
if (!needsSyncing(_peer))
{
clog(NetAllDetail) << "Already synced with this peer.";
return;
}
unsigned n = host().chain().number();
u256 td = host().chain().details().totalDifficulty;
if (host().bq().isActive())
td += host().bq().difficulty();
clog(NetAllDetail) << "Attempt chain-grab? Latest:" << (m_syncingLastReceivedHash ? m_syncingLastReceivedHash : m_syncingLatestHash) << ", number:" << n << ", TD:" << td << " versus " << _peer->m_totalDifficulty;
if (td >= _peer->m_totalDifficulty)
{
clog(NetAllDetail) << "No. Our chain is better.";
resetNeedsSyncing(_peer);
transition(_peer, SyncState::Idle);
}
else
{
clog(NetAllDetail) << "Yes. Their chain is better.";
m_estimatedHashes = _peer->m_expectedHashes - c_chainReorgSize;
transition(_peer, SyncState::Hashes);
}
}
void PV60Sync::noteNeedsSyncing(EthereumPeer* _peer)
{
// if already downloading hash-chain, ignore.
if (isSyncing())
{
clog(NetAllDetail) << "Sync in progress: Just set to help out.";
if (m_state == SyncState::Blocks)
requestBlocks(_peer);
}
else
// otherwise check to see if we should be downloading...
attemptSync(_peer);
}
void PV60Sync::changeSyncer(EthereumPeer* _syncer, bool _needHelp)
{
if (_syncer)
clog(NetAllDetail) << "Changing syncer to" << _syncer->session()->socketId();
else
clog(NetAllDetail) << "Clearing syncer.";
m_syncer = _syncer;
if (isSyncing())
{
if (_needHelp && (m_state == SyncState::Blocks || m_state == SyncState::NewBlocks))
host().foreachPeer([&](EthereumPeer* _p)
{
clog(NetNote) << "Getting help with downloading blocks";
if (_p != _syncer && _p->m_asking == Asking::Nothing)
transition(_p, m_state);
return true;
});
}
else
{
// start grabbing next hash chain if there is one.
host().foreachPeer([this](EthereumPeer* _p)
{
attemptSync(_p);
return !isSyncing();
});
if (!isSyncing())
{
if (m_state != SyncState::Idle)
setState(_syncer, SyncState::Idle);
clog(NetNote) << "No more peers to sync with.";
}
}
assert(!!m_syncer || m_state == SyncState::Idle);
}
void PV60Sync::peerDoneBlocks(EthereumPeer* _peer)
{
noteDoneBlocks(_peer, false);
}
void PV60Sync::noteDoneBlocks(EthereumPeer* _peer, bool _clemency)
{
resetNeedsSyncing(_peer);
if (downloadMan().isComplete())
{
// Done our chain-get.
clog(NetNote) << "Chain download complete.";
// 1/100th for each useful block hash.
_peer->addRating(downloadMan().chainSize() / 100);
downloadMan().reset();
}
else if (isSyncing(_peer))
{
if (_clemency)
clog(NetNote) << "Chain download failed. Aborted while incomplete.";
else
{
// Done our chain-get.
clog(NetWarn) << "Chain download failed. Peer with blocks didn't have them all. This peer is bad and should be punished.";
clog(NetWarn) << downloadMan().remaining();
clog(NetWarn) << "WOULD BAN.";
// m_banned.insert(_peer->session()->id()); // We know who you are!
// _peer->disable("Peer sent hashes but was unable to provide the blocks.");
}
resetSync();
downloadMan().reset();
transition(_peer, SyncState::Idle);
}
_peer->m_sub.doneFetch();
}
void PV60Sync::onPeerHashes(EthereumPeer* _peer, h256s const& _hashes)
{
RecursiveGuard l(x_sync);
DEV_INVARIANT_CHECK;
_peer->setIdle();
if (!isSyncing(_peer))
{
clog(NetMessageSummary) << "Ignoring hashes since not syncing";
return;
}
if (_hashes.size() == 0)
{
transition(_peer, SyncState::Blocks);
return;
}
unsigned knowns = 0;
unsigned unknowns = 0;
for (unsigned i = 0; i < _hashes.size(); ++i)
{
auto h = _hashes[i];
auto status = host().bq().blockStatus(h);
if (status == QueueStatus::Importing || status == QueueStatus::Ready || host().chain().isKnown(h))
{
clog(NetMessageSummary) << "block hash ready:" << h << ". Start blocks download...";
assert (isSyncing(_peer));
transition(_peer, SyncState::Blocks);
return;
}
else if (status == QueueStatus::Bad)
{
cwarn << "block hash bad!" << h << ". Bailing...";
transition(_peer, SyncState::Idle);
return;
}
else if (status == QueueStatus::Unknown)
{
unknowns++;
m_syncingNeededBlocks.push_back(h);
}
else
knowns++;
m_syncingLastReceivedHash = h;
}
clog(NetMessageSummary) << knowns << "knowns," << unknowns << "unknowns; now at" << m_syncingLastReceivedHash;
if (m_syncingNeededBlocks.size() > _peer->m_expectedHashes)
{
_peer->disable("Too many hashes");
restartSync();
return;
}
// run through - ask for more.
transition(_peer, SyncState::Hashes);
DEV_INVARIANT_CHECK;
}
void PV60Sync::onPeerNewHashes(EthereumPeer* _peer, h256s const& _hashes)
{
RecursiveGuard l(x_sync);
DEV_INVARIANT_CHECK;
if (isSyncing())
{
clog(NetMessageSummary) << "Ignoring since we're already downloading.";
return;
}
clog(NetMessageDetail) << "Not syncing and new block hash discovered: syncing without help.";
unsigned knowns = 0;
unsigned unknowns = 0;
for (auto const& h: _hashes)
{
_peer->addRating(1);
DEV_GUARDED(_peer->x_knownBlocks)
_peer->m_knownBlocks.insert(h);
auto status = host().bq().blockStatus(h);
if (status == QueueStatus::Importing || status == QueueStatus::Ready || host().chain().isKnown(h))
knowns++;
else if (status == QueueStatus::Bad)
{
cwarn << "block hash bad!" << h << ". Bailing...";
return;
}
else if (status == QueueStatus::Unknown)
{
unknowns++;
m_syncingNeededBlocks.push_back(h);
}
else
knowns++;
}
clog(NetMessageSummary) << knowns << "knowns," << unknowns << "unknowns";
if (unknowns > 0)
{
clog(NetNote) << "Not syncing and new block hash discovered: syncing without help.";
downloadMan().resetToChain(m_syncingNeededBlocks);
resetSync();
transition(_peer, SyncState::NewBlocks, false, false);
}
DEV_INVARIANT_CHECK;
}
void PV60Sync::abortSync(EthereumPeer* _peer)
{
// Can't check invariants here since the peers is already removed from the list and the state is not updated yet.
if (isSyncing(_peer))
{
host().foreachPeer([this](EthereumPeer* _p) { _p->setIdle(); return true; });
transition(_peer, SyncState::Idle, true);
}
DEV_INVARIANT_CHECK;
}
void PV60Sync::onPeerAborting(EthereumPeer* _peer)
{
RecursiveGuard l(x_sync);
// Can't check invariants here since the peers is already removed from the list and the state is not updated yet.
abortSync(_peer);
DEV_INVARIANT_CHECK;
}
bool PV60Sync::invariants() const
{
if (m_state == SyncState::Idle && !!m_syncer)
return false;
if (m_state != SyncState::Idle && !m_syncer)
return false;
if (m_state == SyncState::Hashes)
{
bool hashes = false;
host().foreachPeer([&](EthereumPeer* _p) { if (_p->m_asking == Asking::Hashes) hashes = true; return !hashes; });
if (!hashes)
return false;
if (!m_syncingLatestHash)
return false;
if (m_syncingNeededBlocks.empty() != (!m_syncingLastReceivedHash))
return false;
}
if (m_state == SyncState::Blocks || m_state == SyncState::NewBlocks)
{
bool blocks = false;
host().foreachPeer([&](EthereumPeer* _p) { if (_p->m_asking == Asking::Blocks) blocks = true; return !blocks; });
if (!blocks)
return false;
if (downloadMan().isComplete())
return false;
}
if (m_state == SyncState::Idle)
{
bool busy = false;
host().foreachPeer([&](EthereumPeer* _p) { if (_p->m_asking != Asking::Nothing && _p->m_asking != Asking::State) busy = true; return !busy; });
if (busy)
return false;
}
if (m_state == SyncState::Waiting && !host().bq().isActive())
return false;
return true;
}

278
libethereum/BlockChainSync.h

@ -0,0 +1,278 @@
/*
This file is part of cpp-ethereum.
cpp-ethereum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
cpp-ethereum is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
*/
/** @file BlockChainSync.h
* @author Gav Wood <i@gavwood.com>
* @date 2014
*/
#pragma once
#include <mutex>
#include <libdevcore/Guards.h>
#include <libdevcore/RangeMask.h>
#include <libethcore/Common.h>
#include <libp2p/Common.h>
#include "CommonNet.h"
#include "DownloadMan.h"
namespace dev
{
class RLPStream;
namespace eth
{
class EthereumHost;
class BlockQueue;
class EthereumPeer;
/**
* @brief Base BlockChain synchronization strategy class.
* Syncs to peers and keeps up to date. Base class handles blocks downloading but does not contain any details on state transfer logic.
*/
class BlockChainSync: public HasInvariants
{
public:
BlockChainSync(EthereumHost& _host);
virtual ~BlockChainSync();
void abortSync(); ///< Abort all sync activity
DownloadMan const& downloadMan() const;
DownloadMan& downloadMan();
/// @returns true is Sync is in progress
virtual bool isSyncing() const = 0;
/// Called by peer to report status
virtual void onPeerStatus(EthereumPeer* _peer);
/// Called by peer once it has new blocks during syn
virtual void onPeerBlocks(EthereumPeer* _peer, RLP const& _r);
/// Called by peer once it has new blocks
virtual void onPeerNewBlock(EthereumPeer* _peer, RLP const& _r);
/// Called by peer once it has new hashes
virtual void onPeerNewHashes(EthereumPeer* _peer, h256s const& _hashes) = 0;
/// Called by peer once it has another sequential block of hashes during sync
virtual void onPeerHashes(EthereumPeer* _peer, h256s const& _hashes) = 0;
/// Called by peer when it is disconnecting
virtual void onPeerAborting(EthereumPeer* _peer) = 0;
/// @returns Synchonization status
virtual SyncStatus status() const = 0;
static char const* stateName(SyncState _s) { return s_stateNames[static_cast<int>(_s)]; }
protected:
//To be implemented in derived classes:
/// New valid peer appears
virtual void onNewPeer(EthereumPeer* _peer) = 0;
/// Peer done downloading blocks
virtual void peerDoneBlocks(EthereumPeer* _peer) = 0;
/// Resume downloading after witing state
virtual void continueSync() = 0;
/// Restart sync
virtual void restartSync() = 0;
/// Called after all blocks have been donloaded
virtual void completeSync() = 0;
/// Enter waiting state
virtual void pauseSync() = 0;
/// Restart sync for given peer
virtual void resetSyncFor(EthereumPeer* _peer, h256 const& _latestHash, u256 const& _td) = 0;
EthereumHost& host() { return m_host; }
EthereumHost const& host() const { return m_host; }
/// Estimates max number of hashes peers can give us.
unsigned estimatedHashes() const;
/// Request blocks from peer if needed
void requestBlocks(EthereumPeer* _peer);
protected:
Handler m_bqRoomAvailable; ///< Triggered once block queue
mutable RecursiveMutex x_sync;
SyncState m_state = SyncState::Idle; ///< Current sync state
unsigned m_estimatedHashes = 0; ///< Number of estimated hashes for the last peer over PV60. Used for status reporting only.
private:
static char const* const s_stateNames[static_cast<int>(SyncState::Size)];
bool invariants() const override = 0;
EthereumHost& m_host;
HashDownloadMan m_hashMan;
};
/**
* @brief Syncrhonization over PV60. Selects a single peer and tries to downloading hashes from it. After hash downaload is complete
* Syncs to peers and keeps up to date
*/
/**
* Transitions:
*
* Idle->Hashes
* Triggered when:
* * A new peer appears that we can sync to
* * Transtition to Idle, there are peers we can sync to
* Effects:
* * Set chain sync (m_syncingTotalDifficulty, m_syncingLatestHash, m_syncer)
* * Requests hashes from m_syncer
*
* Hashes->Idle
* Triggered when:
* * Received too many hashes
* * Received 0 total hashes from m_syncer
* * m_syncer aborts
* Effects:
* In case of too many hashes sync is reset
*
* Hashes->Blocks
* Triggered when:
* * Received known hash from m_syncer
* * Received 0 hashes from m_syncer and m_syncingTotalBlocks not empty
* Effects:
* * Set up download manager, clear m_syncingTotalBlocks. Set all peers to help with downloading if they can
*
* Blocks->Idle
* Triggered when:
* * m_syncer aborts
* * m_syncer does not have required block
* * All blocks downloaded
* * Block qeueue is full with unknown blocks
* Effects:
* * Download manager is reset
*
* Blocks->Waiting
* Triggered when:
* * Block queue is full with known blocks
* Effects:
* * Stop requesting blocks from peers
*
* Waiting->Blocks
* Triggered when:
* * Block queue has space for new blocks
* Effects:
* * Continue requesting blocks from peers
*
* Idle->NewBlocks
* Triggered when:
* * New block hashes arrive
* Effects:
* * Set up download manager, clear m_syncingTotalBlocks. Download blocks from a single peer. If downloaded blocks have unknown parents, set the peer to sync
*
* NewBlocks->Idle
* Triggered when:
* * m_syncer aborts
* * m_syncer does not have required block
* * All new blocks downloaded
* * Block qeueue is full with unknown blocks
* Effects:
* * Download manager is reset
*
*/
class PV60Sync: public BlockChainSync
{
public:
PV60Sync(EthereumHost& _host);
/// @returns true is Sync is in progress
bool isSyncing() const override { return !!m_syncer; }
/// Called by peer once it has new hashes
void onPeerNewHashes(EthereumPeer* _peer, h256s const& _hashes) override;
/// Called by peer once it has another sequential block of hashes during sync
void onPeerHashes(EthereumPeer* _peer, h256s const& _hashes) override;
/// Called by peer when it is disconnecting
void onPeerAborting(EthereumPeer* _peer) override;
/// @returns Sync status
SyncStatus status() const override;
protected:
void onNewPeer(EthereumPeer* _peer) override;
void continueSync() override;
void peerDoneBlocks(EthereumPeer* _peer) override;
void restartSync() override;
void completeSync() override;
void pauseSync() override;
void resetSyncFor(EthereumPeer* _peer, h256 const& _latestHash, u256 const& _td) override;
private:
/// Transition sync state in a particular direction. @param _peer Peer that is responsible for state tranfer
void transition(EthereumPeer* _peer, SyncState _s, bool _force = false, bool _needHelp = true);
/// Reset peer syncing requirements state.
void resetNeedsSyncing(EthereumPeer* _peer) { setNeedsSyncing(_peer, h256(), 0); }
/// Update peer syncing requirements state.
void setNeedsSyncing(EthereumPeer* _peer, h256 const& _latestHash, u256 const& _td);
/// Do we presently need syncing with this peer?
bool needsSyncing(EthereumPeer* _peer) const;
/// Check whether the session should bother grabbing blocks from a peer.
bool shouldGrabBlocks(EthereumPeer* _peer) const;
/// Attempt to begin syncing with the peer; first check the peer has a more difficlult chain to download, then start asking for hashes, then move to blocks
void attemptSync(EthereumPeer* _peer);
/// Update our syncing state
void setState(EthereumPeer* _peer, SyncState _s, bool _isSyncing = false, bool _needHelp = false);
/// Check if peer is main syncer
bool isSyncing(EthereumPeer* _peer) const;
/// Check if we need (re-)syncing with the peer.
void noteNeedsSyncing(EthereumPeer* _who);
/// Set main syncing peer
void changeSyncer(EthereumPeer* _syncer, bool _needHelp);
/// Called when peer done downloading blocks
void noteDoneBlocks(EthereumPeer* _who, bool _clemency);
/// Abort syncing for peer
void abortSync(EthereumPeer* _peer);
/// Reset hash chain syncing
void resetSync();
bool invariants() const override;
h256s m_syncingNeededBlocks; ///< The blocks that we should download from this peer.
h256 m_syncingLastReceivedHash; ///< Hash most recently received from peer.
h256 m_syncingLatestHash; ///< Latest block's hash of the peer we are syncing to, as of the current sync.
u256 m_syncingTotalDifficulty; ///< Latest block's total difficulty of the peer we aresyncing to, as of the current sync.
// TODO: switch to weak_ptr
EthereumPeer* m_syncer = nullptr; ///< Peer we are currently syncing with
};
}
}

7
libethereum/BlockDetails.h

@ -22,15 +22,10 @@
#pragma once
#include <unordered_map>
#pragma warning(push)
#pragma warning(disable: 4100 4267)
#include <leveldb/db.h>
#pragma warning(pop)
#include <libdevcore/db.h>
#include <libdevcore/Log.h>
#include <libdevcore/RLP.h>
#include "TransactionReceipt.h"
namespace ldb = leveldb;
namespace dev
{

87
libethereum/BlockQueue.cpp

@ -36,6 +36,7 @@ const char* BlockQueueChannel::name() { return EthOrange "[]>"; }
#else
const char* BlockQueueChannel::name() { return EthOrange "▣┅▶"; }
#endif
const char* BlockQueueTraceChannel::name() { return EthOrange "▣ ▶"; }
size_t const c_maxKnownCount = 100000;
size_t const c_maxKnownSize = 128 * 1024 * 1024;
@ -81,6 +82,8 @@ void BlockQueue::clear()
m_unknownCount = 0;
m_knownSize = 0;
m_knownCount = 0;
m_difficulty = 0;
m_drainingDifficulty = 0;
}
void BlockQueue::verifierBody()
@ -181,14 +184,14 @@ ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc, boo
// Check if we already know this block.
h256 h = BlockInfo::headerHash(_block);
cblockq << "Queuing block" << h << "for import...";
clog(BlockQueueTraceChannel) << "Queuing block" << h << "for import...";
UpgradableGuard l(m_lock);
if (m_readySet.count(h) || m_drainingSet.count(h) || m_unknownSet.count(h) || m_knownBad.count(h))
{
// Already know about this one.
cblockq << "Already known.";
clog(BlockQueueTraceChannel) << "Already known.";
return ImportResult::AlreadyKnown;
}
@ -226,10 +229,12 @@ ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc, boo
time_t bit = (unsigned)bi.timestamp;
if (strftime(buf, 24, "%X", localtime(&bit)) == 0)
buf[0] = '\0'; // empty if case strftime fails
cblockq << "OK - queued for future [" << bi.timestamp << "vs" << time(0) << "] - will wait until" << buf;
clog(BlockQueueTraceChannel) << "OK - queued for future [" << bi.timestamp << "vs" << time(0) << "] - will wait until" << buf;
m_unknownSize += _block.size();
m_unknownCount++;
return ImportResult::FutureTime;
m_difficulty += bi.difficulty;
bool unknown = !m_readySet.count(bi.parentHash) && !m_drainingSet.count(bi.parentHash) && !_bc.isKnown(bi.parentHash);
return unknown ? ImportResult::FutureTimeUnknown : ImportResult::FutureTimeKnown;
}
else
{
@ -244,10 +249,11 @@ ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc, boo
else if (!m_readySet.count(bi.parentHash) && !m_drainingSet.count(bi.parentHash) && !_bc.isKnown(bi.parentHash))
{
// We don't know the parent (yet) - queue it up for later. It'll get resent to us if we find out about its ancestry later on.
cblockq << "OK - queued as unknown parent:" << bi.parentHash;
clog(BlockQueueTraceChannel) << "OK - queued as unknown parent:" << bi.parentHash;
m_unknown.insert(make_pair(bi.parentHash, make_pair(h, _block.toBytes())));
m_unknownSet.insert(h);
m_unknownSize += _block.size();
m_difficulty += bi.difficulty;
m_unknownCount++;
return ImportResult::UnknownParent;
@ -255,12 +261,13 @@ ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc, boo
else
{
// If valid, append to blocks.
cblockq << "OK - ready for chain insertion.";
clog(BlockQueueTraceChannel) << "OK - ready for chain insertion.";
DEV_GUARDED(m_verification)
m_unverified.push_back(UnverifiedBlock { h, bi.parentHash, _block.toBytes() });
m_moreToVerify.notify_one();
m_readySet.insert(h);
m_knownSize += _block.size();
m_difficulty += bi.difficulty;
m_knownCount++;
noteReady_WITH_LOCK(h);
@ -350,13 +357,16 @@ bool BlockQueue::doneDrain(h256s const& _bad)
WriteGuard l(m_lock);
DEV_INVARIANT_CHECK;
m_drainingSet.clear();
m_difficulty -= m_drainingDifficulty;
m_drainingDifficulty = 0;
if (_bad.size())
{
// at least one of them was bad.
m_knownBad += _bad;
for (h256 const& b : _bad)
updateBad(b);
} return !m_readySet.empty();
}
return !m_readySet.empty();
}
void BlockQueue::tick(BlockChain const& _bc)
@ -427,32 +437,35 @@ bool BlockQueue::unknownFull() const
void BlockQueue::drain(VerifiedBlocks& o_out, unsigned _max)
{
WriteGuard l(m_lock);
DEV_INVARIANT_CHECK;
if (m_drainingSet.empty())
bool wasFull = false;
DEV_WRITE_GUARDED(m_lock)
{
bool wasFull = knownFull();
DEV_GUARDED(m_verification)
DEV_INVARIANT_CHECK;
wasFull = knownFull();
if (m_drainingSet.empty())
{
o_out.resize(min<unsigned>(_max, m_verified.size()));
for (unsigned i = 0; i < o_out.size(); ++i)
swap(o_out[i], m_verified[i]);
m_verified.erase(m_verified.begin(), advanced(m_verified.begin(), o_out.size()));
}
for (auto const& bs: o_out)
{
// TODO: @optimise use map<h256, bytes> rather than vector<bytes> & set<h256>.
auto h = bs.verified.info.hash();
m_drainingSet.insert(h);
m_readySet.erase(h);
m_knownSize -= bs.verified.block.size();
m_knownCount--;
m_drainingDifficulty = 0;
DEV_GUARDED(m_verification)
{
o_out.resize(min<unsigned>(_max, m_verified.size()));
for (unsigned i = 0; i < o_out.size(); ++i)
swap(o_out[i], m_verified[i]);
m_verified.erase(m_verified.begin(), advanced(m_verified.begin(), o_out.size()));
}
for (auto const& bs: o_out)
{
// TODO: @optimise use map<h256, bytes> rather than vector<bytes> & set<h256>.
auto h = bs.verified.info.hash();
m_drainingSet.insert(h);
m_drainingDifficulty += bs.verified.info.difficulty;
m_readySet.erase(h);
m_knownSize -= bs.verified.block.size();
m_knownCount--;
}
}
if (wasFull && !knownFull())
m_onRoomAvailable();
}
if (wasFull && !knownFull())
m_onRoomAvailable();
}
bool BlockQueue::invariants() const
@ -524,3 +537,19 @@ std::ostream& dev::eth::operator<<(std::ostream& _out, BlockQueueStatus const& _
return _out;
}
u256 BlockQueue::difficulty() const
{
UpgradableGuard l(m_lock);
return m_difficulty;
}
bool BlockQueue::isActive() const
{
UpgradableGuard l(m_lock);
if (m_readySet.empty() && m_drainingSet.empty())
DEV_GUARDED(m_verification)
if (m_verified.empty() && m_verifying.empty() && m_unverified.empty())
return false;
return true;
}

5
libethereum/BlockQueue.h

@ -42,6 +42,7 @@ namespace eth
class BlockChain;
struct BlockQueueChannel: public LogChannel { static const char* name(); static const int verbosity = 4; };
struct BlockQueueTraceChannel: public LogChannel { static const char* name(); static const int verbosity = 7; };
#define cblockq dev::LogOutputStream<dev::eth::BlockQueueChannel, true>()
struct BlockQueueStatus
@ -117,6 +118,8 @@ public:
bool knownFull() const;
bool unknownFull() const;
u256 difficulty() const; // Total difficulty of queueud blocks
bool isActive() const;
private:
struct UnverifiedBlock
@ -158,6 +161,8 @@ private:
std::atomic<size_t> m_knownSize; ///< Tracks total size in bytes of all known blocks;
std::atomic<size_t> m_unknownCount; ///< Tracks total count of unknown blocks. Used to avoid additional syncing
std::atomic<size_t> m_knownCount; ///< Tracks total count of known blocks. Used to avoid additional syncing
u256 m_difficulty; ///< Total difficulty of blocks in the queue
u256 m_drainingDifficulty; ///< Total difficulty of blocks in draining
};
std::ostream& operator<<(std::ostream& _out, BlockQueueStatus const& _s);

3
libethereum/CMakeLists.txt

@ -12,7 +12,7 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSTATICLIB")
aux_source_directory(. SRC_LIST)
include_directories(BEFORE ..)
include_directories(${LEVELDB_INCLUDE_DIRS})
include_directories(${DB_INCLUDE_DIRS})
include_directories(${Boost_INCLUDE_DIRS})
if (JSONRPC)
include_directories(BEFORE ${JSONCPP_INCLUDE_DIRS})
@ -31,7 +31,6 @@ target_link_libraries(${EXECUTABLE} whisper)
target_link_libraries(${EXECUTABLE} p2p)
target_link_libraries(${EXECUTABLE} devcrypto)
target_link_libraries(${EXECUTABLE} ethcore)
target_link_libraries(${EXECUTABLE} ${LEVELDB_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${Boost_REGEX_LIBRARIES})
target_link_libraries(${EXECUTABLE} secp256k1)
if (JSONRPC)

6
libethereum/CanonBlockChain.h

@ -21,11 +21,6 @@
#pragma once
#pragma warning(push)
#pragma warning(disable: 4100 4267)
#include <leveldb/db.h>
#pragma warning(pop)
#include <mutex>
#include <libdevcore/Log.h>
#include <libdevcore/Exceptions.h>
@ -35,7 +30,6 @@
#include "BlockDetails.h"
#include "Account.h"
#include "BlockChain.h"
namespace ldb = leveldb;
namespace dev
{

45
libethereum/Client.cpp

@ -355,6 +355,14 @@ bool Client::isSyncing() const
return false;
}
bool Client::isMajorSyncing() const
{
// TODO: only return true if it is actually doing a proper chain sync.
if (auto h = m_host.lock())
return h->isSyncing();
return false;
}
void Client::startedWorking()
{
// Synchronise the state according to the head of the block chain.
@ -612,24 +620,25 @@ bool Client::submitWork(ProofOfWork::Solution const& _solution)
}
unsigned static const c_syncMin = 1;
unsigned static const c_syncMax = 100;
unsigned static const c_syncMax = 1000;
double static const c_targetDuration = 1;
void Client::syncBlockQueue()
{
ImportRoute ir;
cwork << "BQ ==> CHAIN ==> STATE";
ImportRoute ir;
unsigned count;
boost::timer t;
tie(ir.first, ir.second, m_syncBlockQueue) = m_bc.sync(m_bq, m_stateDB, m_syncAmount);
tie(ir, m_syncBlockQueue, count) = m_bc.sync(m_bq, m_stateDB, m_syncAmount);
double elapsed = t.elapsed();
cnote << m_syncAmount << "blocks imported in" << unsigned(elapsed * 1000) << "ms (" << (m_syncAmount / elapsed) << "blocks/s)";
cnote << count << "blocks imported in" << unsigned(elapsed * 1000) << "ms (" << (count / elapsed) << "blocks/s)";
if (elapsed > c_targetDuration * 1.1 && m_syncAmount > c_syncMin)
m_syncAmount = max(c_syncMin, m_syncAmount * 9 / 10);
else if (elapsed < c_targetDuration * 0.9 && m_syncAmount < c_syncMax)
if (elapsed > c_targetDuration * 1.1 && count > c_syncMin)
m_syncAmount = max(c_syncMin, count * 9 / 10);
else if (count == m_syncAmount && elapsed < c_targetDuration * 0.9 && m_syncAmount < c_syncMax)
m_syncAmount = min(c_syncMax, m_syncAmount * 11 / 10 + 1);
if (ir.first.empty())
if (ir.liveBlocks.empty())
return;
onChainChanged(ir);
}
@ -671,23 +680,23 @@ void Client::syncTransactionQueue()
void Client::onChainChanged(ImportRoute const& _ir)
{
// insert transactions that we are declaring the dead part of the chain
for (auto const& h: _ir.second)
for (auto const& h: _ir.deadBlocks)
{
clog(ClientNote) << "Dead block:" << h;
clog(ClientTrace) << "Dead block:" << h;
for (auto const& t: m_bc.transactions(h))
{
clog(ClientNote) << "Resubmitting dead-block transaction " << Transaction(t, CheckTransaction::None);
clog(ClientTrace) << "Resubmitting dead-block transaction " << Transaction(t, CheckTransaction::None);
m_tq.import(t, TransactionQueue::ImportCallback(), IfDropped::Retry);
}
}
// remove transactions from m_tq nicely rather than relying on out of date nonce later on.
for (auto const& h: _ir.first)
for (auto const& h: _ir.liveBlocks)
{
clog(ClientChat) << "Live block:" << h;
clog(ClientTrace) << "Live block:" << h;
for (auto const& th: m_bc.transactionHashes(h))
{
clog(ClientNote) << "Safely dropping transaction " << th;
clog(ClientTrace) << "Safely dropping transaction " << th;
m_tq.drop(th);
}
}
@ -696,12 +705,12 @@ void Client::onChainChanged(ImportRoute const& _ir)
h->noteNewBlocks();
h256Hash changeds;
for (auto const& h: _ir.first)
for (auto const& h: _ir.liveBlocks)
appendFromNewBlock(h, changeds);
// RESTART MINING
if (!m_bq.items().first)
if (!isMajorSyncing())
{
bool preChanged = false;
State newPreMine;
@ -723,7 +732,7 @@ void Client::onChainChanged(ImportRoute const& _ir)
DEV_READ_GUARDED(x_postMine)
for (auto const& t: m_postMine.pending())
{
clog(ClientNote) << "Resubmitting post-mine transaction " << t;
clog(ClientTrace) << "Resubmitting post-mine transaction " << t;
auto ir = m_tq.import(t, TransactionQueue::ImportCallback(), IfDropped::Retry);
if (ir != ImportResult::Success)
onTransactionQueueReady();
@ -764,7 +773,7 @@ void Client::startMining()
void Client::rejigMining()
{
if ((wouldMine() || remoteActive()) && !m_bq.items().first && (!isChainBad() || mineOnBadChain()) /*&& (forceMining() || transactionsWaiting())*/)
if ((wouldMine() || remoteActive()) && !isMajorSyncing() && (!isChainBad() || mineOnBadChain()) /*&& (forceMining() || transactionsWaiting())*/)
{
cnote << "Rejigging mining...";
DEV_WRITE_GUARDED(x_working)

1
libethereum/Client.h

@ -219,6 +219,7 @@ public:
DownloadMan const* downloadMan() const;
bool isSyncing() const;
bool isMajorSyncing() const;
/// Sets the network id.
void setNetworkId(u256 _n);
/// Clears pending transactions. Just for debug use.

6
libethereum/CommonNet.h

@ -80,10 +80,8 @@ enum class Asking
enum class SyncState
{
Idle, ///< Initial chain sync complete. Waiting for new packets
WaitingQueue, ///< Block downloading paused. Waiting for block queue to process blocks and free space
HashesNegotiate, ///< Waiting for first hashes to arrive
HashesSingle, ///< Locked on and downloading hashes from a single peer
HashesParallel, ///< Downloading hashes from multiple peers over
Waiting, ///< Block downloading paused. Waiting for block queue to process blocks and free space
Hashes, ///< Downloading hashes from multiple peers over
Blocks, ///< Downloading blocks
NewBlocks, ///< Downloading blocks learned from NewHashes packet

593
libethereum/EthereumHost.cpp

@ -33,6 +33,8 @@
#include "BlockQueue.h"
#include "EthereumPeer.h"
#include "DownloadMan.h"
#include "BlockChainSync.h"
using namespace std;
using namespace dev;
using namespace dev::eth;
@ -41,7 +43,7 @@ using namespace p2p;
unsigned const EthereumHost::c_oldProtocolVersion = 60; //TODO: remove this once v61+ is common
unsigned const c_chainReorgSize = 30000;
char const* const EthereumHost::s_stateNames[static_cast<int>(SyncState::Size)] = {"Idle", "WaitingQueue", "HashesNegotiate", "HashesSingle", "HashesParallel", "Blocks", "NewBlocks" };
char const* const EthereumHost::s_stateNames[static_cast<int>(SyncState::Size)] = {"Idle", "Waiting", "Hashes", "Blocks", "NewBlocks" };
EthereumHost::EthereumHost(BlockChain const& _ch, TransactionQueue& _tq, BlockQueue& _bq, u256 _networkId):
HostCapability<EthereumPeer>(),
@ -51,15 +53,11 @@ EthereumHost::EthereumHost(BlockChain const& _ch, TransactionQueue& _tq, BlockQu
m_bq (_bq),
m_networkId (_networkId)
{
setState(SyncState::HashesNegotiate);
m_latestBlockSent = _ch.currentHash();
m_hashMan.reset(m_chain.number() + 1);
m_bqRoomAvailable = m_bq.onRoomAvailable([this](){ m_continueSync = true; });
}
EthereumHost::~EthereumHost()
{
foreachPeer([](EthereumPeer* _p) { _p->abortSync(); });
}
bool EthereumHost::ensureInitialised()
@ -79,31 +77,13 @@ bool EthereumHost::ensureInitialised()
void EthereumHost::reset()
{
foreachPeer([](EthereumPeer* _p) { _p->abortSync(); });
m_man.resetToChain(h256s());
m_hashMan.reset(m_chain.number() + 1);
setState(SyncState::HashesNegotiate);
m_syncingLatestHash = h256();
m_syncingTotalDifficulty = 0;
Guard l(x_sync);
if (m_sync)
m_sync->abortSync();
m_sync.reset();
m_latestBlockSent = h256();
m_transactionsSent.clear();
m_hashes.clear();
}
void EthereumHost::resetSyncTo(h256 const& _h)
{
setState(SyncState::HashesNegotiate);
m_syncingLatestHash = _h;
}
void EthereumHost::setState(SyncState _s)
{
if (m_state != _s)
{
clog(NetAllDetail) << "SyncState changed from " << stateName(m_state) << " to " << stateName(_s);
m_state = _s;
}
}
void EthereumHost::doWork()
@ -125,14 +105,7 @@ void EthereumHost::doWork()
}
}
if (m_continueSync)
{
m_continueSync = false;
RecursiveGuard l(x_sync);
continueSync();
}
foreachPeer([](EthereumPeer* _p) { _p->tick(); });
foreachPeer([](EthereumPeer* _p) { _p->tick(); return true; });
// return netChange;
// TODO: Figure out what to do with netChange.
@ -174,24 +147,28 @@ void EthereumHost::maintainTransactions()
cnote << "Sent" << n << "transactions to " << _p->session()->info().clientVersion;
}
_p->m_requireTransactions = false;
return true;
});
}
void EthereumHost::foreachPeer(std::function<void(EthereumPeer*)> const& _f) const
void EthereumHost::foreachPeer(std::function<bool(EthereumPeer*)> const& _f) const
{
foreachPeerPtr([&](std::shared_ptr<EthereumPeer> _p)
{
if (_p)
_f(_p.get());
return _f(_p.get());
return true;
});
}
void EthereumHost::foreachPeerPtr(std::function<void(std::shared_ptr<EthereumPeer>)> const& _f) const
void EthereumHost::foreachPeerPtr(std::function<bool(std::shared_ptr<EthereumPeer>)> const& _f) const
{
for (auto s: peerSessions())
_f(s.first->cap<EthereumPeer>());
if (!_f(s.first->cap<EthereumPeer>()))
return;
for (auto s: peerSessions(c_oldProtocolVersion)) //TODO: remove once v61+ is common
_f(s.first->cap<EthereumPeer>(c_oldProtocolVersion));
if (!_f(s.first->cap<EthereumPeer>(c_oldProtocolVersion)))
return;
}
tuple<vector<shared_ptr<EthereumPeer>>, vector<shared_ptr<EthereumPeer>>, vector<shared_ptr<Session>>> EthereumHost::randomSelection(unsigned _percent, std::function<bool(EthereumPeer*)> const& _allow)
@ -263,334 +240,63 @@ void EthereumHost::maintainBlocks(h256 const& _currentHash)
}
}
void EthereumHost::onPeerStatus(EthereumPeer* _peer)
BlockChainSync& EthereumHost::sync()
{
RecursiveGuard l(x_sync);
DEV_INVARIANT_CHECK;
if (_peer->m_genesisHash != m_chain.genesisHash())
_peer->disable("Invalid genesis hash");
else if (_peer->m_protocolVersion != protocolVersion() && _peer->m_protocolVersion != c_oldProtocolVersion)
_peer->disable("Invalid protocol version.");
else if (_peer->m_networkId != networkId())
_peer->disable("Invalid network identifier.");
else if (_peer->session()->info().clientVersion.find("/v0.7.0/") != string::npos)
_peer->disable("Blacklisted client version.");
else if (isBanned(_peer->session()->id()))
_peer->disable("Peer banned for previous bad behaviour.");
else
if (m_sync)
return *m_sync; // We only chose sync strategy once
bool pv61 = false;
foreachPeer([&](EthereumPeer* _p)
{
unsigned estimatedHashes = estimateHashes();
if (_peer->m_protocolVersion == protocolVersion())
{
if (_peer->m_latestBlockNumber > m_chain.number())
_peer->m_expectedHashes = (unsigned)_peer->m_latestBlockNumber - m_chain.number();
if (_peer->m_expectedHashes > estimatedHashes)
_peer->disable("Too many hashes");
else if (needHashes() && m_hashMan.chainSize() < _peer->m_expectedHashes)
m_hashMan.resetToRange(m_chain.number() + 1, _peer->m_expectedHashes);
}
else
_peer->m_expectedHashes = estimatedHashes;
continueSync(_peer);
}
DEV_INVARIANT_CHECK;
if (_p->m_protocolVersion == protocolVersion())
pv61 = true;
return !pv61;
});
m_sync.reset(pv61 ? new PV60Sync(*this) : new PV60Sync(*this));
return *m_sync;
}
unsigned EthereumHost::estimateHashes()
void EthereumHost::onPeerStatus(EthereumPeer* _peer)
{
BlockInfo block = m_chain.info();
time_t lastBlockTime = (block.hash() == m_chain.genesisHash()) ? 1428192000 : (time_t)block.timestamp;
time_t now = time(0);
unsigned blockCount = c_chainReorgSize;
if (lastBlockTime > now)
clog(NetWarn) << "Clock skew? Latest block is in the future";
else
blockCount += (now - lastBlockTime) / (unsigned)c_durationLimit;
clog(NetAllDetail) << "Estimated hashes: " << blockCount;
return blockCount;
Guard l(x_sync);
sync().onPeerStatus(_peer);
}
void EthereumHost::onPeerHashes(EthereumPeer* _peer, h256s const& _hashes)
{
RecursiveGuard l(x_sync);
if (_peer->m_syncHashNumber > 0)
_peer->m_syncHashNumber += _hashes.size();
_peer->setAsking(Asking::Nothing);
onPeerHashes(_peer, _hashes, false);
}
void EthereumHost::onPeerHashes(EthereumPeer* _peer, h256s const& _hashes, bool _complete)
{
DEV_INVARIANT_CHECK;
if (_hashes.empty())
{
_peer->m_hashSub.doneFetch();
continueSync();
return;
}
bool syncByNumber = _peer->m_syncHashNumber;
if (!syncByNumber && !_complete && _peer->m_syncHash != m_syncingLatestHash)
{
// Obsolete hashes, discard
continueSync(_peer);
return;
}
unsigned knowns = 0;
unsigned unknowns = 0;
h256s neededBlocks;
unsigned firstNumber = _peer->m_syncHashNumber - _hashes.size();
for (unsigned i = 0; i < _hashes.size(); ++i)
{
_peer->addRating(1);
auto h = _hashes[i];
auto status = m_bq.blockStatus(h);
if (status == QueueStatus::Importing || status == QueueStatus::Ready || m_chain.isKnown(h))
{
clog(NetMessageSummary) << "Block hash already known:" << h;
if (!syncByNumber)
{
m_hashes += neededBlocks;
clog(NetMessageSummary) << "Start blocks download...";
onPeerDoneHashes(_peer, true);
return;
}
}
else if (status == QueueStatus::Bad)
{
cwarn << "block hash bad!" << h << ". Bailing...";
_peer->setIdle();
return;
}
else if (status == QueueStatus::Unknown)
{
unknowns++;
neededBlocks.push_back(h);
}
else
knowns++;
if (!syncByNumber)
m_syncingLatestHash = h;
else
_peer->m_hashSub.noteHash(firstNumber + i, 1);
}
if (syncByNumber)
{
m_man.appendToChain(neededBlocks); // Append to download manager immediatelly
clog(NetMessageSummary) << knowns << "knowns," << unknowns << "unknowns";
}
else
{
m_hashes += neededBlocks; // Append to local list
clog(NetMessageSummary) << knowns << "knowns," << unknowns << "unknowns; now at" << m_syncingLatestHash;
}
if (_complete)
{
clog(NetMessageSummary) << "Start new blocks download...";
m_syncingLatestHash = h256();
setState(SyncState::NewBlocks);
m_man.resetToChain(m_hashes);
m_hashes.clear();
m_hashMan.reset(m_chain.number() + 1);
continueSync(_peer);
}
else if (syncByNumber && m_hashMan.isComplete())
{
// Done our chain-get.
clog(NetNote) << "Hashes download complete.";
onPeerDoneHashes(_peer, false);
}
else if (m_hashes.size() > _peer->m_expectedHashes)
{
_peer->disable("Too many hashes");
m_hashes.clear();
m_syncingLatestHash = h256();
setState(SyncState::HashesNegotiate);
continueSync(); ///Try with some other peer, keep the chain
}
else
continueSync(_peer); /// Grab next hashes
DEV_INVARIANT_CHECK;
}
void EthereumHost::onPeerDoneHashes(EthereumPeer* _peer, bool _localChain)
{
assert(_peer->m_asking == Asking::Nothing);
m_syncingLatestHash = h256();
setState(SyncState::Blocks);
if (_peer->m_protocolVersion != protocolVersion() || _localChain)
{
m_man.resetToChain(m_hashes);
_peer->addRating(m_man.chainSize() / 100); //TODO: what about other peers?
}
m_hashMan.reset(m_chain.number() + 1);
m_hashes.clear();
continueSync();
Guard l(x_sync);
sync().onPeerHashes(_peer, _hashes);
}
void EthereumHost::onPeerBlocks(EthereumPeer* _peer, RLP const& _r)
{
RecursiveGuard l(x_sync);
DEV_INVARIANT_CHECK;
_peer->setAsking(Asking::Nothing);
unsigned itemCount = _r.itemCount();
clog(NetMessageSummary) << "Blocks (" << dec << itemCount << "entries)" << (itemCount ? "" : ": NoMoreBlocks");
if (itemCount == 0)
{
// Got to this peer's latest block - just give up.
clog(NetNote) << "Finishing blocks fetch...";
// NOTE: need to notify of giving up on chain-hashes, too, altering state as necessary.
_peer->m_sub.doneFetch();
_peer->setIdle();
return;
}
unsigned success = 0;
unsigned future = 0;
unsigned unknown = 0;
unsigned got = 0;
unsigned repeated = 0;
h256 lastUnknown;
for (unsigned i = 0; i < itemCount; ++i)
{
auto h = BlockInfo::headerHash(_r[i].data());
if (_peer->m_sub.noteBlock(h))
{
_peer->addRating(10);
switch (m_bq.import(_r[i].data(), m_chain))
{
case ImportResult::Success:
success++;
break;
case ImportResult::Malformed:
case ImportResult::BadChain:
_peer->disable("Malformed block received.");
return;
case ImportResult::FutureTime:
future++;
break;
case ImportResult::AlreadyInChain:
case ImportResult::AlreadyKnown:
got++;
break;
case ImportResult::UnknownParent:
lastUnknown = h;
unknown++;
break;
default:;
}
}
else
{
_peer->addRating(0); // -1?
repeated++;
}
}
clog(NetMessageSummary) << dec << success << "imported OK," << unknown << "with unknown parents," << future << "with future timestamps," << got << " already known," << repeated << " repeats received.";
if (m_state == SyncState::NewBlocks && unknown > 0)
{
_peer->m_latestHash = lastUnknown;
resetSyncTo(lastUnknown);
}
continueSync(_peer);
DEV_INVARIANT_CHECK;
Guard l(x_sync);
sync().onPeerBlocks(_peer, _r);
}
void EthereumHost::onPeerNewHashes(EthereumPeer* _peer, h256s const& _hashes)
{
RecursiveGuard l(x_sync);
DEV_INVARIANT_CHECK;
if (isSyncing() || _peer->isConversing())
{
clog(NetMessageSummary) << "Ignoring new hashes since we're already downloading.";
return;
}
clog(NetNote) << "New block hash discovered: syncing without help.";
_peer->m_syncHashNumber = 0;
onPeerHashes(_peer, _hashes, true);
DEV_INVARIANT_CHECK;
Guard l(x_sync);
sync().onPeerNewHashes(_peer, _hashes);
}
void EthereumHost::onPeerNewBlock(EthereumPeer* _peer, RLP const& _r)
{
RecursiveGuard l(x_sync);
DEV_INVARIANT_CHECK;
if ((isSyncing() || _peer->isConversing()) && m_state != SyncState::NewBlocks)
{
clog(NetMessageSummary) << "Ignoring new blocks since we're already downloading.";
return;
}
auto h = BlockInfo::headerHash(_r[0].data());
clog(NetMessageSummary) << "NewBlock: " << h;
if (_r.itemCount() != 2)
_peer->disable("NewBlock without 2 data fields.");
else
{
bool sync = false;
switch (m_bq.import(_r[0].data(), m_chain))
{
case ImportResult::Success:
_peer->addRating(100);
break;
case ImportResult::FutureTime:
//TODO: Rating dependent on how far in future it is.
break;
case ImportResult::Malformed:
case ImportResult::BadChain:
_peer->disable("Malformed block received.");
return;
case ImportResult::AlreadyInChain:
case ImportResult::AlreadyKnown:
break;
case ImportResult::UnknownParent:
if (h)
{
u256 difficulty = _r[1].toInt<u256>();
if (m_syncingTotalDifficulty < difficulty)
{
clog(NetMessageSummary) << "Received block with no known parent. Resyncing...";
_peer->m_latestHash = h;
_peer->m_totalDifficulty = difficulty;
resetSyncTo(h);;
sync = true;
}
}
break;
default:;
}
DEV_GUARDED(_peer->x_knownBlocks)
_peer->m_knownBlocks.insert(h);
if (sync)
continueSync();
}
DEV_INVARIANT_CHECK;
Guard l(x_sync);
sync().onPeerNewBlock(_peer, _r);
}
void EthereumHost::onPeerTransactions(EthereumPeer* _peer, RLP const& _r)
{
if (_peer->isCriticalSyncing())
{
clog(NetAllDetail) << "Ignoring transaction from peer we are syncing with";
return;
}
unsigned itemCount = _r.itemCount();
clog(NetAllDetail) << "Transactions (" << dec << itemCount << "entries)";
Guard l(_peer->x_knownTransactions);
for (unsigned i = 0; i < itemCount; ++i)
for (unsigned i = 0; i < min<unsigned>(itemCount, 256); ++i) // process 256 transactions at most. TODO: much better solution.
{
auto h = sha3(_r[i].data());
_peer->m_knownTransactions.insert(h);
@ -615,206 +321,23 @@ void EthereumHost::onPeerTransactions(EthereumPeer* _peer, RLP const& _r)
void EthereumHost::onPeerAborting(EthereumPeer* _peer)
{
RecursiveGuard l(x_sync);
if (_peer->isConversing())
{
_peer->setIdle();
// if (_peer->isCriticalSyncing())
_peer->setRude();
continueSync();
}
}
void EthereumHost::continueSync()
{
if (m_state == SyncState::WaitingQueue)
setState(m_lastActiveState);
clog(NetAllDetail) << "Continuing sync for all peers";
foreachPeer([&](EthereumPeer* _p)
{
if (_p->m_asking == Asking::Nothing)
continueSync(_p);
});
}
void EthereumHost::continueSync(EthereumPeer* _peer)
{
DEV_INVARIANT_CHECK;
assert(_peer->m_asking == Asking::Nothing);
bool otherPeerV60Sync = false;
bool otherPeerV61Sync = false;
if (needHashes())
{
if (!peerShouldGrabChain(_peer))
{
_peer->setIdle();
return;
}
foreachPeer([&](EthereumPeer* _p)
{
if (_p != _peer && _p->m_asking == Asking::Hashes)
{
if (_p->m_protocolVersion != protocolVersion())
otherPeerV60Sync = true; // Already have a peer downloading hash chain with old protocol, do nothing
else
otherPeerV61Sync = true; // Already have a peer downloading hash chain with V61+ protocol, join if supported
}
});
if (otherPeerV60Sync && !m_hashes.empty())
{
/// Downloading from other peer with v60 protocol, nothing else we can do
_peer->setIdle();
return;
}
if (otherPeerV61Sync && _peer->m_protocolVersion != protocolVersion())
{
/// Downloading from other peer with v61+ protocol which this peer does not support,
_peer->setIdle();
return;
}
if (_peer->m_protocolVersion == protocolVersion() && !m_hashMan.isComplete())
{
setState(SyncState::HashesParallel);
_peer->requestHashes(); /// v61+ and not catching up to a particular hash
}
else
{
// Restart/continue sync in single peer mode
if (!m_syncingLatestHash)
{
m_syncingLatestHash =_peer->m_latestHash;
m_syncingTotalDifficulty = _peer->m_totalDifficulty;
}
if (_peer->m_totalDifficulty >= m_syncingTotalDifficulty)
{
_peer->requestHashes(m_syncingLatestHash);
setState(SyncState::HashesSingle);
m_estimatedHashes = _peer->m_expectedHashes - (_peer->m_protocolVersion == protocolVersion() ? 0 : c_chainReorgSize);
}
else
_peer->setIdle();
}
}
else if (needBlocks())
{
if (m_man.isComplete())
{
// Done our chain-get.
setState(SyncState::Idle);
clog(NetNote) << "Chain download complete.";
// 1/100th for each useful block hash.
_peer->addRating(m_man.chainSize() / 100); //TODO: what about other peers?
m_man.reset();
_peer->setIdle();
return;
}
else if (peerCanHelp(_peer))
{
// Check block queue status
if (m_bq.unknownFull())
{
clog(NetWarn) << "Too many unknown blocks, restarting sync";
m_bq.clear();
reset();
continueSync();
}
else if (m_bq.knownFull())
{
clog(NetAllDetail) << "Waiting for block queue before downloading blocks";
m_lastActiveState = m_state;
setState(SyncState::WaitingQueue);
_peer->setIdle();
}
else
_peer->requestBlocks();
}
}
else
_peer->setIdle();
DEV_INVARIANT_CHECK;
}
bool EthereumHost::peerCanHelp(EthereumPeer* _peer) const
{
(void)_peer;
return true;
}
bool EthereumHost::peerShouldGrabBlocks(EthereumPeer* _peer) const
{
// this is only good for deciding whether to go ahead and grab a particular peer's hash chain,
// yet it's being used in determining whether to allow a peer help with downloading an existing
// chain of blocks.
auto td = _peer->m_totalDifficulty;
auto lh = m_syncingLatestHash;
auto ctd = m_chain.details().totalDifficulty;
clog(NetAllDetail) << "Should grab blocks? " << td << "vs" << ctd;
if (td < ctd || (td == ctd && m_chain.currentHash() == lh))
return false;
return true;
}
bool EthereumHost::peerShouldGrabChain(EthereumPeer* _peer) const
{
// Early exit if this peer has proved unreliable.
if (_peer->isRude())
return false;
h256 c = m_chain.currentHash();
unsigned n = m_chain.number();
u256 td = m_chain.details().totalDifficulty;
clog(NetAllDetail) << "Attempt chain-grab? Latest:" << c << ", number:" << n << ", TD:" << td << " versus " << _peer->m_totalDifficulty;
if (td >= _peer->m_totalDifficulty)
{
clog(NetAllDetail) << "No. Our chain is better.";
return false;
}
else
{
clog(NetAllDetail) << "Yes. Their chain is better.";
return true;
}
Guard l(x_sync);
if (m_sync)
m_sync->onPeerAborting(_peer);
}
bool EthereumHost::isSyncing() const
{
return m_state != SyncState::Idle;
Guard l(x_sync);
if (!m_sync)
return false;
return m_sync->isSyncing();
}
SyncStatus EthereumHost::status() const
{
RecursiveGuard l(x_sync);
SyncStatus res;
res.state = m_state;
if (m_state == SyncState::HashesParallel)
{
res.hashesReceived = m_hashMan.hashesGot().size();
res.hashesTotal = m_hashMan.chainSize();
}
else if (m_state == SyncState::HashesSingle)
{
res.hashesTotal = m_estimatedHashes;
res.hashesReceived = static_cast<unsigned>(m_hashes.size());
res.hashesEstimated = true;
}
else if (m_state == SyncState::Blocks || m_state == SyncState::NewBlocks || m_state == SyncState::WaitingQueue)
{
res.blocksTotal = m_man.chainSize();
res.blocksReceived = m_man.blocksGot().size();
}
return res;
}
bool EthereumHost::invariants() const
{
if (m_state == SyncState::HashesNegotiate && !m_hashes.empty())
return false;
if (needBlocks() && (m_syncingLatestHash || !m_hashes.empty()))
return false;
return true;
Guard l(x_sync);
if (!m_sync)
return SyncStatus();
return m_sync->status();
}

75
libethereum/EthereumHost.h

@ -48,16 +48,16 @@ namespace eth
class TransactionQueue;
class BlockQueue;
class BlockChainSync;
/**
* @brief The EthereumHost class
* @warning None of this is thread-safe. You have been warned.
* @doWork Syncs to peers and sends new blocks and transactions.
*/
class EthereumHost: public p2p::HostCapability<EthereumPeer>, Worker, HasInvariants
class EthereumHost: public p2p::HostCapability<EthereumPeer>, Worker
{
public:
/// Start server, but don't listen.
EthereumHost(BlockChain const& _ch, TransactionQueue& _tq, BlockQueue& _bq, u256 _networkId);
@ -71,82 +71,60 @@ public:
void reset();
DownloadMan const& downloadMan() const { return m_man; }
DownloadMan& downloadMan() { return m_man; }
bool isSyncing() const;
bool isBanned(p2p::NodeId const& _id) const { return !!m_banned.count(_id); }
void noteNewTransactions() { m_newTransactions = true; }
void noteNewBlocks() { m_newBlocks = true; }
void onPeerStatus(EthereumPeer* _peer); ///< Called by peer to report status
void onPeerBlocks(EthereumPeer* _peer, RLP const& _r); ///< Called by peer once it has new blocks during syn
void onPeerNewBlock(EthereumPeer* _peer, RLP const& _r); ///< Called by peer once it has new blocks
void onPeerNewHashes(EthereumPeer* _peer, h256s const& _hashes); ///< Called by peer once it has new hashes
void onPeerHashes(EthereumPeer* _peer, h256s const& _hashes); ///< Called by peer once it has another sequential block of hashes during sync
void onPeerTransactions(EthereumPeer* _peer, RLP const& _r); ///< Called by peer when it has new transactions
void onPeerAborting(EthereumPeer* _peer); ///< Called by peer when it is disconnecting
DownloadMan& downloadMan() { return m_man; }
HashDownloadMan& hashDownloadMan() { return m_hashMan; }
BlockChain const& chain() { return m_chain; }
BlockChain const& chain() const { return m_chain; }
BlockQueue& bq() { return m_bq; }
BlockQueue const& bq() const { return m_bq; }
SyncStatus status() const;
h256 latestBlockSent() { return m_latestBlockSent; }
static char const* stateName(SyncState _s) { return s_stateNames[static_cast<int>(_s)]; }
static unsigned const c_oldProtocolVersion;
void foreachPeerPtr(std::function<bool(std::shared_ptr<EthereumPeer>)> const& _f) const;
void foreachPeer(std::function<bool(EthereumPeer*)> const& _f) const;
void onPeerStatus(EthereumPeer* _peer);
void onPeerHashes(EthereumPeer* _peer, h256s const& _hashes);
void onPeerBlocks(EthereumPeer* _peer, RLP const& _r);
void onPeerNewHashes(EthereumPeer* _peer, h256s const& _hashes);
void onPeerNewBlock(EthereumPeer* _peer, RLP const& _r);
void onPeerTransactions(EthereumPeer* _peer, RLP const& _r);
void onPeerAborting(EthereumPeer* _peer);
private:
static char const* const s_stateNames[static_cast<int>(SyncState::Size)];
std::tuple<std::vector<std::shared_ptr<EthereumPeer>>, std::vector<std::shared_ptr<EthereumPeer>>, std::vector<std::shared_ptr<p2p::Session>>> randomSelection(unsigned _percent = 25, std::function<bool(EthereumPeer*)> const& _allow = [](EthereumPeer const*){ return true; });
void foreachPeerPtr(std::function<void(std::shared_ptr<EthereumPeer>)> const& _f) const;
void foreachPeer(std::function<void(EthereumPeer*)> const& _f) const;
void resetSyncTo(h256 const& _h);
bool needHashes() const { return m_state == SyncState::HashesNegotiate || m_state == SyncState::HashesSingle || m_state == SyncState::HashesParallel; }
bool needBlocks() const { return m_state == SyncState::Blocks || m_state == SyncState::NewBlocks; }
/// Sync with the BlockChain. It might contain one of our mined blocks, we might have new candidates from the network.
void doWork();
virtual void doWork() override;
void maintainTransactions();
void maintainBlocks(h256 const& _currentBlock);
/// Get a bunch of needed blocks.
/// Removes them from our list of needed blocks.
/// @returns empty if there's no more blocks left to fetch, otherwise the blocks to fetch.
h256Hash neededBlocks(h256Hash const& _exclude);
/// Check to see if the network peer-state initialisation has happened.
bool isInitialised() const { return (bool)m_latestBlockSent; }
/// Initialises the network peer-state, doing the stuff that needs to be once-only. @returns true if it really was first.
bool ensureInitialised();
virtual void onStarting() { startWorking(); }
virtual void onStopping() { stopWorking(); }
void continueSync(); /// Find something to do for all peers
void continueSync(EthereumPeer* _peer); /// Find some work to do for a peer
void onPeerDoneHashes(EthereumPeer* _peer, bool _new); /// Called when done downloading hashes from peer
void onPeerHashes(EthereumPeer* _peer, h256s const& _hashes, bool _complete);
bool peerShouldGrabBlocks(EthereumPeer* _peer) const;
bool peerShouldGrabChain(EthereumPeer* _peer) const;
bool peerCanHelp(EthereumPeer* _peer) const;
unsigned estimateHashes();
void estimatePeerHashes(EthereumPeer* _peer);
void setState(SyncState _s);
virtual void onStarting() override { startWorking(); }
virtual void onStopping() override { stopWorking(); }
bool invariants() const override;
BlockChainSync& sync();
BlockChain const& m_chain;
TransactionQueue& m_tq; ///< Maintains a list of incoming transactions not yet in a block on the blockchain.
BlockQueue& m_bq; ///< Maintains a list of incoming blocks not yet on the blockchain (to be imported).
Handler m_bqRoomAvailable;
u256 m_networkId;
DownloadMan m_man;
HashDownloadMan m_hashMan;
h256 m_latestBlockSent;
h256Hash m_transactionsSent;
@ -155,14 +133,9 @@ private:
bool m_newTransactions = false;
bool m_newBlocks = false;
mutable RecursiveMutex x_sync;
SyncState m_state = SyncState::Idle; ///< Current sync state
SyncState m_lastActiveState = SyncState::Idle; ///< Saved state before entering waiting queue mode
h256 m_syncingLatestHash; ///< Latest block's hash, as of the current sync.
u256 m_syncingTotalDifficulty; ///< Latest block's total difficulty, as of the current sync.
h256s m_hashes; ///< List of hashes with unknown block numbers. Used for PV60 chain downloading and catching up to a particular unknown
unsigned m_estimatedHashes = 0; ///< Number of estimated hashes for the last peer over PV60. Used for status reporting only.
bool m_continueSync = false; ///< True when the block queue has processed a block; we should restart grabbing blocks.
mutable Mutex x_sync;
DownloadMan m_man;
std::unique_ptr<BlockChainSync> m_sync;
};
}

46
libethereum/EthereumPeer.cpp

@ -30,15 +30,28 @@
#include "EthereumHost.h"
#include "TransactionQueue.h"
#include "BlockQueue.h"
#include "BlockChainSync.h"
using namespace std;
using namespace dev;
using namespace dev::eth;
using namespace p2p;
string toString(Asking _a)
{
switch (_a)
{
case Asking::Blocks: return "Blocks";
case Asking::Hashes: return "Hashes";
case Asking::Nothing: return "Nothing";
case Asking::State: return "State";
}
return "?";
}
EthereumPeer::EthereumPeer(Session* _s, HostCapabilityFace* _h, unsigned _i, CapDesc const& _cap):
Capability(_s, _h, _i),
m_sub(host()->downloadMan()),
m_hashSub(host()->hashDownloadMan()),
m_peerCapabilityVersion(_cap.second)
{
session()->addNote("manners", isRude() ? "RUDE" : "nice");
@ -48,6 +61,11 @@ EthereumPeer::EthereumPeer(Session* _s, HostCapabilityFace* _h, unsigned _i, Cap
EthereumPeer::~EthereumPeer()
{
if (m_asking != Asking::Nothing)
{
cnote << "Peer aborting while being asked for " << ::toString(m_asking);
setRude();
}
abortSync();
}
@ -58,13 +76,18 @@ bool EthereumPeer::isRude() const
unsigned EthereumPeer::askOverride() const
{
std::string static const badGeth = "Geth/v0.9.27";
if (session()->info().clientVersion.substr(0, badGeth.size()) == badGeth)
return 1;
bytes const& d = repMan().data(*session(), name());
return d.empty() ? c_maxBlocksAsk : RLP(d).toInt<unsigned>(RLP::LaisezFaire);
}
void EthereumPeer::setRude()
{
auto old = askOverride();
repMan().setData(*session(), name(), rlp(askOverride() / 2 + 1));
cnote << "Rude behaviour; askOverride now" << askOverride() << ", was" << old;
repMan().noteRude(*session(), name());
session()->addNote("manners", "RUDE");
}
@ -83,22 +106,8 @@ EthereumHost* EthereumPeer::host() const
* Possible asking/syncing states for two peers:
*/
string toString(Asking _a)
{
switch (_a)
{
case Asking::Blocks: return "Blocks";
case Asking::Hashes: return "Hashes";
case Asking::Nothing: return "Nothing";
case Asking::State: return "State";
}
return "?";
}
void EthereumPeer::setIdle()
{
m_sub.doneFetch();
m_hashSub.doneFetch();
setAsking(Asking::Nothing);
}
@ -106,6 +115,7 @@ void EthereumPeer::requestStatus()
{
assert(m_asking == Asking::Nothing);
setAsking(Asking::State);
m_requireTransactions = true;
RLPStream s;
bool latest = m_peerCapabilityVersion == host()->protocolVersion();
prep(s, StatusPacket, latest ? 6 : 5)
@ -119,14 +129,14 @@ void EthereumPeer::requestStatus()
sealAndSend(s);
}
void EthereumPeer::requestHashes()
void EthereumPeer::requestHashes(u256 _number, unsigned _count)
{
assert(m_asking == Asking::Nothing);
m_syncHashNumber = m_hashSub.nextFetch(c_maxHashesAsk);
m_syncHashNumber = _number;
m_syncHash = h256();
setAsking(Asking::Hashes);
RLPStream s;
prep(s, GetBlockHashesByNumberPacket, 2) << m_syncHashNumber << c_maxHashesAsk;
prep(s, GetBlockHashesByNumberPacket, 2) << m_syncHashNumber << _count;
clog(NetMessageDetail) << "Requesting block hashes for numbers " << m_syncHashNumber << "-" << m_syncHashNumber + c_maxHashesAsk - 1;
sealAndSend(s);
}

13
libethereum/EthereumPeer.h

@ -50,6 +50,9 @@ namespace eth
class EthereumPeer: public p2p::Capability
{
friend class EthereumHost; //TODO: remove this
friend class BlockChainSync; //TODO: remove this
friend class PV60Sync; //TODO: remove this
friend class PV61Sync; //TODO: remove this
public:
/// Basic constructor.
@ -73,8 +76,8 @@ public:
/// Abort sync and reset fetch
void setIdle();
/// Request hashes. Uses hash download manager to get hash number. v61+ protocol version only
void requestHashes();
/// Request hashes by number. v61+ protocol version only
void requestHashes(u256 _number, unsigned _count);
/// Request hashes for given parent hash.
void requestHashes(h256 const& _lastHash);
@ -138,18 +141,16 @@ private:
h256 m_genesisHash; ///< Peer's genesis hash
u256 m_latestBlockNumber; ///< Number of the latest block this peer has
/// This is built as we ask for hashes. Once no more hashes are given, we present this to the
/// host who initialises the DownloadMan and m_sub becomes active for us to begin asking for blocks.
unsigned m_expectedHashes = 0; ///< Estimated upper bound of hashes to expect from this peer.
unsigned m_syncHashNumber = 0; ///< Number of latest hash we sync to (PV61+)
u256 m_syncHashNumber = 0; ///< Number of latest hash we sync to (PV61+)
h256 m_syncHash; ///< Latest hash we sync to (PV60)
/// Once we're asking for blocks, this becomes in use.
DownloadSub m_sub;
/// Once we're asking for hashes, this becomes in use.
HashDownloadSub m_hashSub;
u256 m_peerCapabilityVersion; ///< Protocol version this peer supports received as capability
/// Have we received a GetTransactions packet that we haven't yet answered?
bool m_requireTransactions = false;

2
libethereum/State.h

@ -206,6 +206,8 @@ public:
return false;
PoW::assignResult(_result, m_currentBlock);
if (!PoW::verify(m_currentBlock))
return false;
cnote << "Completed" << m_currentBlock.headerHash(WithoutNonce) << m_currentBlock.nonce << m_currentBlock.difficulty << PoW::verify(m_currentBlock);

3
libethereum/TransactionQueue.cpp

@ -29,6 +29,7 @@ using namespace dev;
using namespace dev::eth;
const char* TransactionQueueChannel::name() { return EthCyan "┉┅▶"; }
const char* TransactionQueueTraceChannel::name() { return EthCyan " ┅▶"; }
ImportResult TransactionQueue::import(bytesConstRef _transactionRLP, ImportCallback const& _cb, IfDropped _ik)
{
@ -115,7 +116,7 @@ ImportResult TransactionQueue::manageImport_WITH_LOCK(h256 const& _h, Transactio
m_known.insert(_h);
if (_cb)
m_callbacks[_h] = _cb;
ctxq << "Queued vaguely legit-looking transaction" << _h;
clog(TransactionQueueTraceChannel) << "Queued vaguely legit-looking transaction" << _h;
m_onReady();
}
catch (Exception const& _e)

3
libethereum/TransactionQueue.h

@ -36,7 +36,8 @@ namespace eth
class BlockChain;
struct TransactionQueueChannel: public LogChannel { static const char* name(); static const int verbosity = 4; };
#define ctxq dev::LogOutputStream<dev::eth::TransactionQueueChannel, true>()
struct TransactionQueueTraceChannel: public LogChannel { static const char* name(); static const int verbosity = 7; };
#define ctxq dev::LogOutputStream<dev::eth::TransactionQueueTraceChannel, true>()
enum class IfDropped { Ignore, Retry };

58
libevm/VM.cpp

@ -202,6 +202,24 @@ bytesConstRef VM::execImpl(u256& io_gas, ExtVMFace& _ext, OnOpFunc const& _onOp)
return nextPC;
};
auto copyDataToMemory = [](bytesConstRef _data, decltype(m_stack)& _stack, decltype(m_temp)& _memory)
{
auto offset = static_cast<size_t>(_stack.back());
_stack.pop_back();
bigint bigIndex = _stack.back();
auto index = static_cast<size_t>(bigIndex);
_stack.pop_back();
auto size = static_cast<size_t>(_stack.back());
_stack.pop_back();
size_t sizeToBeCopied = bigIndex + size > _data.size() ? _data.size() < bigIndex ? 0 : _data.size() - index : size;
if (sizeToBeCopied > 0)
std::memcpy(_memory.data() + offset, _data.data() + index, sizeToBeCopied);
if (size > sizeToBeCopied)
std::memset(_memory.data() + offset + sizeToBeCopied, 0, size - sizeToBeCopied);
};
m_steps = 0;
for (auto nextPC = m_curPC + 1; true; m_curPC = nextPC, nextPC = m_curPC + 1, ++m_steps)
{
@ -364,44 +382,16 @@ bytesConstRef VM::execImpl(u256& io_gas, ExtVMFace& _ext, OnOpFunc const& _onOp)
m_stack.back() = _ext.codeAt(asAddress(m_stack.back())).size();
break;
case Instruction::CALLDATACOPY:
copyDataToMemory(_ext.data, m_stack, m_temp);
break;
case Instruction::CODECOPY:
copyDataToMemory(&_ext.code, m_stack, m_temp);
break;
case Instruction::EXTCODECOPY:
{
Address a;
if (inst == Instruction::EXTCODECOPY)
{
a = asAddress(m_stack.back());
m_stack.pop_back();
}
unsigned offset = (unsigned)m_stack.back();
m_stack.pop_back();
u256 index = m_stack.back();
m_stack.pop_back();
unsigned size = (unsigned)m_stack.back();
auto a = asAddress(m_stack.back());
m_stack.pop_back();
unsigned sizeToBeCopied;
switch(inst)
{
case Instruction::CALLDATACOPY:
sizeToBeCopied = index + (bigint)size > (u256)_ext.data.size() ? (u256)_ext.data.size() < index ? 0 : _ext.data.size() - (unsigned)index : size;
memcpy(m_temp.data() + offset, _ext.data.data() + (unsigned)index, sizeToBeCopied);
break;
case Instruction::CODECOPY:
sizeToBeCopied = index + (bigint)size > (u256)_ext.code.size() ? (u256)_ext.code.size() < index ? 0 : _ext.code.size() - (unsigned)index : size;
memcpy(m_temp.data() + offset, _ext.code.data() + (unsigned)index, sizeToBeCopied);
break;
case Instruction::EXTCODECOPY:
sizeToBeCopied = index + (bigint)size > (u256)_ext.codeAt(a).size() ? (u256)_ext.codeAt(a).size() < index ? 0 : _ext.codeAt(a).size() - (unsigned)index : size;
memcpy(m_temp.data() + offset, _ext.codeAt(a).data() + (unsigned)index, sizeToBeCopied);
break;
default:
// this is unreachable, but if someone introduces a bug in the future, he may get here.
assert(false);
BOOST_THROW_EXCEPTION(InvalidOpcode() << errinfo_comment("CALLDATACOPY, CODECOPY or EXTCODECOPY instruction requested."));
break;
}
memset(m_temp.data() + offset + sizeToBeCopied, 0, size - sizeToBeCopied);
break;
copyDataToMemory(&_ext.codeAt(a), m_stack, m_temp);
}
case Instruction::GASPRICE:
m_stack.push_back(_ext.gasPrice);

10
libevmasm/Assembly.cpp

@ -292,16 +292,6 @@ void Assembly::injectStart(AssemblyItem const& _i)
m_items.insert(m_items.begin(), _i);
}
inline bool matches(AssemblyItemsConstRef _a, AssemblyItemsConstRef _b)
{
if (_a.size() != _b.size())
return false;
for (unsigned i = 0; i < _a.size(); ++i)
if (!_a[i].match(_b[i]))
return false;
return true;
}
struct OptimiserChannel: public LogChannel { static const char* name() { return "OPT"; } static const int verbosity = 12; };
#define copt dev::LogOutputStream<OptimiserChannel, true>()

7
libevmasm/AssemblyItem.cpp

@ -126,10 +126,3 @@ ostream& dev::eth::operator<<(ostream& _out, AssemblyItem const& _item)
}
return _out;
}
ostream& dev::eth::operator<<(ostream& _out, AssemblyItemsConstRef _i)
{
for (AssemblyItem const& i: _i)
_out << i;
return _out;
}

9
libevmasm/AssemblyItem.h

@ -98,11 +98,14 @@ private:
};
using AssemblyItems = std::vector<AssemblyItem>;
using AssemblyItemsConstRef = vector_ref<AssemblyItem const>;
std::ostream& operator<<(std::ostream& _out, AssemblyItem const& _item);
std::ostream& operator<<(std::ostream& _out, AssemblyItemsConstRef _i);
inline std::ostream& operator<<(std::ostream& _out, AssemblyItems const& _i) { return operator<<(_out, AssemblyItemsConstRef(&_i)); }
inline std::ostream& operator<<(std::ostream& _out, AssemblyItems const& _items)
{
for (AssemblyItem const& item: _items)
_out << item;
return _out;
}
}
}

7
libjsconsole/JSConsole.cpp

@ -39,12 +39,11 @@ JSConsole::JSConsole(WebThreeDirect& _web3, shared_ptr<AccountHolder> const& _ac
m_printer(m_engine)
{
m_jsonrpcConnector.reset(new JSV8Connector(m_engine));
m_jsonrpcServer.reset(new WebThreeStubServer(*m_jsonrpcConnector.get(), _web3, _accounts, vector<KeyPair>()));
(void)_web3; (void)_accounts;
// m_jsonrpcServer.reset(new WebThreeStubServer(*m_jsonrpcConnector.get(), _web3, _accounts, vector<KeyPair>()));
}
JSConsole::~JSConsole() {}
void JSConsole::repl() const
void JSConsole::readExpression() const
{
string cmd = "";
g_logPost = [](std::string const& a, char const*) { cout << "\r \r" << a << endl << flush; rl_forced_update_display(); };

7
libjsconsole/JSConsole.h

@ -25,7 +25,7 @@
#include <libjsengine/JSV8Engine.h>
#include <libjsengine/JSV8Printer.h>
class WebThreeStubServer;
namespace dev { class WebThreeStubServer; }
namespace jsonrpc { class AbstractServerConnector; }
namespace dev
@ -39,15 +39,14 @@ class JSConsole
{
public:
JSConsole(WebThreeDirect& _web3, std::shared_ptr<AccountHolder> const& _accounts);
~JSConsole();
void repl() const;
void readExpression() const;
private:
std::string promptForIndentionLevel(int _i) const;
JSV8Engine m_engine;
JSV8Printer m_printer;
std::unique_ptr<WebThreeStubServer> m_jsonrpcServer;
std::unique_ptr<dev::WebThreeStubServer> m_jsonrpcServer;
std::unique_ptr<jsonrpc::AbstractServerConnector> m_jsonrpcConnector;
};

2
liblll/CodeFragment.cpp

@ -196,7 +196,7 @@ void CodeFragment::constructOperation(sp::utree const& _t, CompilerState& _s)
{
if (_t.size() != 2)
error<IncorrectParameterCount>();
m_asm.append(CodeFragment::compile(asString(contents(firstAsString())), _s).m_asm);
m_asm.append(CodeFragment::compile(contentsString(firstAsString()), _s).m_asm);
}
else if (us == "SET")
{

2
libp2p/CMakeLists.txt

@ -14,7 +14,7 @@ aux_source_directory(. SRC_LIST)
include_directories(BEFORE ..)
# we may not use it in libp2p, but one of our dependecies is including leveldb in header file
# and windows is failing to build without that
include_directories(${LEVELDB_INCLUDE_DIRS})
include_directories(${DB_INCLUDE_DIRS})
include_directories(${Boost_INCLUDE_DIRS})
if (MINIUPNPC_FOUND)

5
libp2p/Host.cpp

@ -391,7 +391,7 @@ void Host::runAcceptor()
{
if (peerCount() > 9 * m_idealPeerCount)
{
clog(NetConnect) << "Dropping incoming connect due to maximum peer count (2 * ideal peer count): " << socket->remoteEndpoint();
clog(NetConnect) << "Dropping incoming connect due to maximum peer count (9 * ideal peer count): " << socket->remoteEndpoint();
socket->close();
if (ec.value() < 1)
runAcceptor();
@ -580,7 +580,8 @@ PeerSessionInfos Host::peerSessionInfo() const
for (auto& i: m_sessions)
if (auto j = i.second.lock())
if (j->isConnected())
ret.push_back(j->m_info);
DEV_GUARDED(j->x_info)
ret.push_back(j->m_info);
return ret;
}

19
libp2p/Session.cpp

@ -44,7 +44,8 @@ Session::Session(Host* _h, RLPXFrameCoder* _io, std::shared_ptr<RLPXSocket> cons
{
m_peer->m_lastDisconnect = NoDisconnect;
m_lastReceived = m_connect = chrono::steady_clock::now();
m_info.socketId = m_socket->ref().native_handle();
DEV_GUARDED(x_info)
m_info.socketId = m_socket->ref().native_handle();
}
Session::~Session()
@ -187,9 +188,12 @@ bool Session::interpret(PacketType _t, RLP const& _r)
break;
}
case PongPacket:
m_info.lastPing = std::chrono::steady_clock::now() - m_ping;
{
DEV_GUARDED(x_info)
m_info.lastPing = std::chrono::steady_clock::now() - m_ping;
clog(NetTriviaSummary) << "Latency: " << chrono::duration_cast<chrono::milliseconds>(m_info.lastPing).count() << " ms";
break;
}
case GetPeersPacket:
// Disabled for interop testing.
// GetPeers/PeersPacket will be modified to only exchange new nodes which it's peers are interested in.
@ -382,11 +386,12 @@ void Session::drop(DisconnectReason _reason)
void Session::disconnect(DisconnectReason _reason)
{
clog(NetConnect) << "Disconnecting (our reason:" << reasonOf(_reason) << ")";
StructuredLogger::p2pDisconnected(
m_info.id.abridged(),
m_peer->endpoint, // TODO: may not be 100% accurate
m_server->peerCount()
);
DEV_GUARDED(x_info)
StructuredLogger::p2pDisconnected(
m_info.id.abridged(),
m_peer->endpoint, // TODO: may not be 100% accurate
m_server->peerCount()
);
if (m_socket->ref().is_open())
{
RLPStream s;

7
libp2p/Session.h

@ -67,7 +67,7 @@ public:
bool isConnected() const { return m_socket->ref().is_open(); }
NodeId id() const;
unsigned socketId() const { return m_info.socketId; }
unsigned socketId() const { Guard l(x_info); return m_info.socketId; }
template <class PeerCap>
std::shared_ptr<PeerCap> cap() const { try { return std::static_pointer_cast<PeerCap>(m_capabilities.at(std::make_pair(PeerCap::name(), PeerCap::version()))); } catch (...) { return nullptr; } }
@ -81,9 +81,9 @@ public:
int rating() const;
void addRating(int _r);
void addNote(std::string const& _k, std::string const& _v) { m_info.notes[_k] = _v; }
void addNote(std::string const& _k, std::string const& _v) { Guard l(x_info); m_info.notes[_k] = _v; }
PeerSessionInfo const& info() const { return m_info; }
PeerSessionInfo info() const { Guard l(x_info); return m_info; }
void ensureNodesRequested();
void serviceNodesRequest();
@ -119,6 +119,7 @@ private:
std::shared_ptr<Peer> m_peer; ///< The Peer object.
bool m_dropped = false; ///< If true, we've already divested ourselves of this peer. We're just waiting for the reads & writes to fail before the shared_ptr goes OOS and the destructor kicks in.
mutable Mutex x_info;
PeerSessionInfo m_info; ///< Dynamic information about this peer.
bool m_theyRequestedNodes = false; ///< Has the peer requested nodes from us without receiveing an answer from us?

43
libsolidity/AST.cpp

@ -21,6 +21,7 @@
*/
#include <algorithm>
#include <functional>
#include <boost/range/adaptor/reversed.hpp>
#include <libsolidity/Utils.h>
#include <libsolidity/AST.h>
@ -434,23 +435,29 @@ void StructDefinition::checkMemberTypes() const
void StructDefinition::checkRecursion() const
{
set<StructDefinition const*> definitionsSeen;
vector<StructDefinition const*> queue = {this};
while (!queue.empty())
using StructPointer = StructDefinition const*;
using StructPointersSet = set<StructPointer>;
function<void(StructPointer,StructPointersSet const&)> check = [&](StructPointer _struct, StructPointersSet const& _parents)
{
StructDefinition const* def = queue.back();
queue.pop_back();
if (definitionsSeen.count(def))
BOOST_THROW_EXCEPTION(ParserError() << errinfo_sourceLocation(def->getLocation())
<< errinfo_comment("Recursive struct definition."));
definitionsSeen.insert(def);
for (ASTPointer<VariableDeclaration> const& member: def->getMembers())
if (_parents.count(_struct))
BOOST_THROW_EXCEPTION(
ParserError() <<
errinfo_sourceLocation(_struct->getLocation()) <<
errinfo_comment("Recursive struct definition.")
);
set<StructDefinition const*> parents = _parents;
parents.insert(_struct);
for (ASTPointer<VariableDeclaration> const& member: _struct->getMembers())
if (member->getType()->getCategory() == Type::Category::Struct)
{
UserDefinedTypeName const& typeName = dynamic_cast<UserDefinedTypeName const&>(*member->getTypeName());
queue.push_back(&dynamic_cast<StructDefinition const&>(*typeName.getReferencedDeclaration()));
auto const& typeName = dynamic_cast<UserDefinedTypeName const&>(*member->getTypeName());
check(
&dynamic_cast<StructDefinition const&>(*typeName.getReferencedDeclaration()),
parents
);
}
}
};
check(this, {});
}
TypePointer EnumDefinition::getType(ContractDefinition const*) const
@ -919,7 +926,7 @@ void MemberAccess::checkTypeRequirements(TypePointers const* _argumentTypes)
{
auto const& arrayType(dynamic_cast<ArrayType const&>(type));
m_isLValue = (*m_memberName == "length" &&
arrayType.location() != ReferenceType::Location::CallData && arrayType.isDynamicallySized());
arrayType.location() != DataLocation::CallData && arrayType.isDynamicallySized());
}
else
m_isLValue = false;
@ -942,7 +949,7 @@ void IndexAccess::checkTypeRequirements(TypePointers const*)
m_type = make_shared<FixedBytesType>(1);
else
m_type = type.getBaseType();
m_isLValue = type.location() != ReferenceType::Location::CallData;
m_isLValue = type.location() != DataLocation::CallData;
break;
}
case Type::Category::Mapping:
@ -959,7 +966,7 @@ void IndexAccess::checkTypeRequirements(TypePointers const*)
{
TypeType const& type = dynamic_cast<TypeType const&>(*m_base->getType());
if (!m_index)
m_type = make_shared<TypeType>(make_shared<ArrayType>(ReferenceType::Location::Memory, type.getActualType()));
m_type = make_shared<TypeType>(make_shared<ArrayType>(DataLocation::Memory, type.getActualType()));
else
{
m_index->checkTypeRequirements(nullptr);
@ -967,7 +974,9 @@ void IndexAccess::checkTypeRequirements(TypePointers const*)
if (!length)
BOOST_THROW_EXCEPTION(m_index->createTypeError("Integer constant expected."));
m_type = make_shared<TypeType>(make_shared<ArrayType>(
ReferenceType::Location::Memory, type.getActualType(), length->literalValue(nullptr)));
DataLocation::Memory, type.getActualType(),
length->literalValue(nullptr)
));
}
break;
}

56
libsolidity/ArrayUtils.cpp

@ -38,10 +38,10 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons
// need to leave "target_ref target_byte_off" on the stack at the end
// stack layout: [source_ref] [source_byte_off] [source length] target_ref target_byte_off (top)
solAssert(_targetType.location() == ReferenceType::Location::Storage, "");
solAssert(_targetType.location() == DataLocation::Storage, "");
solAssert(
_sourceType.location() == ReferenceType::Location::CallData ||
_sourceType.location() == ReferenceType::Location::Storage,
_sourceType.location() == DataLocation::CallData ||
_sourceType.location() == DataLocation::Storage,
"Given array location not implemented."
);
@ -51,7 +51,7 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons
// TODO unroll loop for small sizes
bool sourceIsStorage = _sourceType.location() == ReferenceType::Location::Storage;
bool sourceIsStorage = _sourceType.location() == DataLocation::Storage;
bool directCopy = sourceIsStorage && sourceBaseType->isValueType() && *sourceBaseType == *targetBaseType;
bool haveByteOffsetSource = !directCopy && sourceIsStorage && sourceBaseType->getStorageBytes() <= 16;
bool haveByteOffsetTarget = !directCopy && targetBaseType->getStorageBytes() <= 16;
@ -69,7 +69,7 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons
m_context << eth::Instruction::POP;
// stack: target_ref source_ref [source_length]
// retrieve source length
if (_sourceType.location() != ReferenceType::Location::CallData || !_sourceType.isDynamicallySized())
if (_sourceType.location() != DataLocation::CallData || !_sourceType.isDynamicallySized())
retrieveLength(_sourceType); // otherwise, length is already there
// stack: target_ref source_ref source_length
m_context << eth::Instruction::DUP3;
@ -82,7 +82,7 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons
if (sourceBaseType->getCategory() == Type::Category::Mapping)
{
solAssert(targetBaseType->getCategory() == Type::Category::Mapping, "");
solAssert(_sourceType.location() == ReferenceType::Location::Storage, "");
solAssert(_sourceType.location() == DataLocation::Storage, "");
// nothing to copy
m_context
<< eth::Instruction::POP << eth::Instruction::POP
@ -106,7 +106,7 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons
eth::AssemblyItem copyLoopEndWithoutByteOffset = m_context.newTag();
m_context.appendConditionalJumpTo(copyLoopEndWithoutByteOffset);
if (_sourceType.location() == ReferenceType::Location::Storage && _sourceType.isDynamicallySized())
if (_sourceType.location() == DataLocation::Storage && _sourceType.isDynamicallySized())
CompilerUtils(m_context).computeHashStatic();
// stack: target_ref target_data_end source_length target_data_pos source_data_pos
m_context << eth::Instruction::SWAP2;
@ -155,7 +155,7 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons
// checking is easier.
// stack: target_ref target_data_end source_data_pos target_data_pos source_data_end [target_byte_offset] [source_byte_offset]
m_context << eth::dupInstruction(3 + byteOffsetSize);
if (_sourceType.location() == ReferenceType::Location::Storage)
if (_sourceType.location() == DataLocation::Storage)
{
if (haveByteOffsetSource)
m_context << eth::Instruction::DUP2;
@ -231,7 +231,7 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons
void ArrayUtils::clearArray(ArrayType const& _type) const
{
unsigned stackHeightStart = m_context.getStackHeight();
solAssert(_type.location() == ReferenceType::Location::Storage, "");
solAssert(_type.location() == DataLocation::Storage, "");
if (_type.getBaseType()->getStorageBytes() < 32)
{
solAssert(_type.getBaseType()->isValueType(), "Invalid storage size for non-value type.");
@ -286,7 +286,7 @@ void ArrayUtils::clearArray(ArrayType const& _type) const
void ArrayUtils::clearDynamicArray(ArrayType const& _type) const
{
solAssert(_type.location() == ReferenceType::Location::Storage, "");
solAssert(_type.location() == DataLocation::Storage, "");
solAssert(_type.isDynamicallySized(), "");
unsigned stackHeightStart = m_context.getStackHeight();
@ -314,7 +314,7 @@ void ArrayUtils::clearDynamicArray(ArrayType const& _type) const
void ArrayUtils::resizeDynamicArray(const ArrayType& _type) const
{
solAssert(_type.location() == ReferenceType::Location::Storage, "");
solAssert(_type.location() == DataLocation::Storage, "");
solAssert(_type.isDynamicallySized(), "");
if (!_type.isByteArray() && _type.getBaseType()->getStorageBytes() < 32)
solAssert(_type.getBaseType()->isValueType(), "Invalid storage size for non-value type.");
@ -399,7 +399,7 @@ void ArrayUtils::clearStorageLoop(Type const& _type) const
void ArrayUtils::convertLengthToSize(ArrayType const& _arrayType, bool _pad) const
{
if (_arrayType.location() == ReferenceType::Location::Storage)
if (_arrayType.location() == DataLocation::Storage)
{
if (_arrayType.getBaseType()->getStorageSize() <= 1)
{
@ -437,13 +437,13 @@ void ArrayUtils::retrieveLength(ArrayType const& _arrayType) const
m_context << eth::Instruction::DUP1;
switch (_arrayType.location())
{
case ReferenceType::Location::CallData:
case DataLocation::CallData:
// length is stored on the stack
break;
case ReferenceType::Location::Memory:
case DataLocation::Memory:
m_context << eth::Instruction::MLOAD;
break;
case ReferenceType::Location::Storage:
case DataLocation::Storage:
m_context << eth::Instruction::SLOAD;
break;
}
@ -452,16 +452,16 @@ void ArrayUtils::retrieveLength(ArrayType const& _arrayType) const
void ArrayUtils::accessIndex(ArrayType const& _arrayType) const
{
ReferenceType::Location location = _arrayType.location();
DataLocation location = _arrayType.location();
eth::Instruction load =
location == ReferenceType::Location::Storage ? eth::Instruction::SLOAD :
location == ReferenceType::Location::Memory ? eth::Instruction::MLOAD :
location == DataLocation::Storage ? eth::Instruction::SLOAD :
location == DataLocation::Memory ? eth::Instruction::MLOAD :
eth::Instruction::CALLDATALOAD;
// retrieve length
if (!_arrayType.isDynamicallySized())
m_context << _arrayType.getLength();
else if (location == ReferenceType::Location::CallData)
else if (location == DataLocation::CallData)
// length is stored on the stack
m_context << eth::Instruction::SWAP1;
else
@ -476,20 +476,20 @@ void ArrayUtils::accessIndex(ArrayType const& _arrayType) const
m_context << eth::Instruction::SWAP1;
if (_arrayType.isDynamicallySized())
{
if (location == ReferenceType::Location::Storage)
if (location == DataLocation::Storage)
CompilerUtils(m_context).computeHashStatic();
else if (location == ReferenceType::Location::Memory)
else if (location == DataLocation::Memory)
m_context << u256(32) << eth::Instruction::ADD;
}
// stack: <index> <data_ref>
switch (location)
{
case ReferenceType::Location::CallData:
case DataLocation::CallData:
if (!_arrayType.isByteArray())
m_context
<< eth::Instruction::SWAP1
<< _arrayType.getBaseType()->getCalldataEncodedSize()
<< eth::Instruction::MUL;
{
m_context << eth::Instruction::SWAP1;
m_context << _arrayType.getBaseType()->getCalldataEncodedSize() << eth::Instruction::MUL;
}
m_context << eth::Instruction::ADD;
if (_arrayType.getBaseType()->isValueType())
CompilerUtils(m_context).loadFromMemoryDynamic(
@ -499,7 +499,7 @@ void ArrayUtils::accessIndex(ArrayType const& _arrayType) const
false
);
break;
case ReferenceType::Location::Storage:
case DataLocation::Storage:
m_context << eth::Instruction::SWAP1;
if (_arrayType.getBaseType()->getStorageBytes() <= 16)
{
@ -527,7 +527,7 @@ void ArrayUtils::accessIndex(ArrayType const& _arrayType) const
m_context << eth::Instruction::ADD << u256(0);
}
break;
case ReferenceType::Location::Memory:
case DataLocation::Memory:
solAssert(false, "Memory lvalues not yet implemented.");
}
}

40
libsolidity/Compiler.cpp

@ -245,21 +245,35 @@ void Compiler::appendCalldataUnpacker(
{
// We do not check the calldata size, everything is zero-paddedd
//@todo this does not yet support nested arrays
if (_startOffset == u256(-1))
_startOffset = u256(CompilerUtils::dataStartOffset);
m_context << _startOffset;
for (TypePointer const& type: _typeParameters)
{
// stack: v1 v2 ... v(k-1) mem_offset
switch (type->getCategory())
{
case Type::Category::Array:
{
auto const& arrayType = dynamic_cast<ArrayType const&>(*type);
if (arrayType.location() == ReferenceType::Location::CallData)
solAssert(arrayType.location() != DataLocation::Storage, "");
solAssert(!arrayType.getBaseType()->isDynamicallySized(), "Nested arrays not yet implemented.");
if (_fromMemory)
{
solAssert(arrayType.location() == DataLocation::Memory, "");
// compute data pointer
//@todo once we support nested arrays, this offset needs to be dynamic.
m_context << eth::Instruction::DUP1 << _startOffset << eth::Instruction::ADD;
m_context << eth::Instruction::SWAP1 << u256(0x20) << eth::Instruction::ADD;
}
else
{
solAssert(!_fromMemory, "");
if (type->isDynamicallySized())
// first load from calldata and potentially convert to memory if arrayType is memory
TypePointer calldataType = arrayType.copyForLocation(DataLocation::CallData, false);
if (calldataType->isDynamicallySized())
{
// put on stack: data_pointer length
CompilerUtils(m_context).loadFromMemoryDynamic(IntegerType(256), !_fromMemory);
@ -276,17 +290,17 @@ void Compiler::appendCalldataUnpacker(
{
// leave the pointer on the stack
m_context << eth::Instruction::DUP1;
m_context << u256(type->getCalldataEncodedSize()) << eth::Instruction::ADD;
m_context << u256(calldataType->getCalldataEncodedSize()) << eth::Instruction::ADD;
}
if (arrayType.location() == DataLocation::Memory)
{
// copy to memory
// move calldata type up again
CompilerUtils(m_context).moveIntoStack(calldataType->getSizeOnStack());
CompilerUtils(m_context).convertType(*calldataType, arrayType);
// fetch next pointer again
CompilerUtils(m_context).moveToStackTop(arrayType.getSizeOnStack());
}
}
else
{
solAssert(arrayType.location() == ReferenceType::Location::Memory, "");
// compute data pointer
m_context << eth::Instruction::DUP1 << _startOffset << eth::Instruction::ADD;
if (!_fromMemory)
solAssert(false, "Not yet implemented.");
m_context << eth::Instruction::SWAP1 << u256(0x20) << eth::Instruction::ADD;
}
break;
}

64
libsolidity/CompilerUtils.cpp

@ -107,16 +107,18 @@ void CompilerUtils::storeInMemoryDynamic(Type const& _type, bool _padToWordBound
auto const& type = dynamic_cast<ArrayType const&>(_type);
solAssert(type.isByteArray(), "Non byte arrays not yet implemented here.");
if (type.location() == ReferenceType::Location::CallData)
if (type.location() == DataLocation::CallData)
{
if (!type.isDynamicallySized())
m_context << type.getLength();
// stack: target source_offset source_len
m_context << eth::Instruction::DUP1 << eth::Instruction::DUP3 << eth::Instruction::DUP5;
// stack: target source_offset source_len source_len source_offset target
// stack: target source_offset source_len source_len source_offset target
m_context << eth::Instruction::CALLDATACOPY;
m_context << eth::Instruction::DUP3 << eth::Instruction::ADD;
m_context << eth::Instruction::SWAP2 << eth::Instruction::POP << eth::Instruction::POP;
}
else if (type.location() == ReferenceType::Location::Memory)
else if (type.location() == DataLocation::Memory)
{
// memcpy using the built-in contract
ArrayUtils(m_context).retrieveLength(type);
@ -183,7 +185,7 @@ void CompilerUtils::storeInMemoryDynamic(Type const& _type, bool _padToWordBound
}
else
{
solAssert(type.location() == ReferenceType::Location::Storage, "");
solAssert(type.location() == DataLocation::Storage, "");
m_context << eth::Instruction::POP; // remove offset, arrays always start new slot
m_context << eth::Instruction::DUP1 << eth::Instruction::SLOAD;
// stack here: memory_offset storage_offset length_bytes
@ -274,10 +276,16 @@ void CompilerUtils::encodeToMemory(
else
{
copyToStackTop(argSize - stackPos + dynPointers + 2, _givenTypes[i]->getSizeOnStack());
if (targetType->isValueType())
convertType(*_givenTypes[i], *targetType, true);
solAssert(!!targetType, "Externalable type expected.");
storeInMemoryDynamic(*targetType, _padToWordBoundaries);
TypePointer type = targetType;
if (
_givenTypes[i]->dataStoredIn(DataLocation::Storage) ||
_givenTypes[i]->dataStoredIn(DataLocation::CallData)
)
type = _givenTypes[i]; // delay conversion
else
convertType(*_givenTypes[i], *targetType, true);
storeInMemoryDynamic(*type, _padToWordBoundaries);
}
stackPos += _givenTypes[i]->getSizeOnStack();
}
@ -304,13 +312,13 @@ void CompilerUtils::encodeToMemory(
// stack: ... <end_of_mem> <value...>
// copy length to memory
m_context << eth::dupInstruction(1 + arrayType.getSizeOnStack());
if (arrayType.location() == ReferenceType::Location::CallData)
if (arrayType.location() == DataLocation::CallData)
m_context << eth::Instruction::DUP2; // length is on stack
else if (arrayType.location() == ReferenceType::Location::Storage)
else if (arrayType.location() == DataLocation::Storage)
m_context << eth::Instruction::DUP3 << eth::Instruction::SLOAD;
else
{
solAssert(arrayType.location() == ReferenceType::Location::Memory, "");
solAssert(arrayType.location() == DataLocation::Memory, "");
m_context << eth::Instruction::DUP2 << eth::Instruction::MLOAD;
}
// stack: ... <end_of_mem> <value...> <end_of_mem'> <length>
@ -432,18 +440,18 @@ void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetTyp
ArrayType const& targetType = dynamic_cast<ArrayType const&>(_targetType);
switch (targetType.location())
{
case ReferenceType::Location::Storage:
case DataLocation::Storage:
// Other cases are done explicitly in LValue::storeValue, and only possible by assignment.
solAssert(
targetType.isPointer() &&
typeOnStack.location() == ReferenceType::Location::Storage,
typeOnStack.location() == DataLocation::Storage,
"Invalid conversion to storage type."
);
break;
case ReferenceType::Location::Memory:
case DataLocation::Memory:
{
// Copy the array to a free position in memory, unless it is already in memory.
if (typeOnStack.location() != ReferenceType::Location::Memory)
if (typeOnStack.location() != DataLocation::Memory)
{
// stack: <source ref> (variably sized)
unsigned stackSize = typeOnStack.getSizeOnStack();
@ -452,7 +460,7 @@ void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetTyp
// stack: <mem start> <source ref> (variably sized)
if (targetType.isDynamicallySized())
{
bool fromStorage = (typeOnStack.location() == ReferenceType::Location::Storage);
bool fromStorage = (typeOnStack.location() == DataLocation::Storage);
// store length
if (fromStorage)
{
@ -483,11 +491,25 @@ void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetTyp
// Stack <mem start> <mem end>
storeFreeMemoryPointer();
}
else if (typeOnStack.location() == ReferenceType::Location::CallData)
else if (typeOnStack.location() == DataLocation::CallData)
{
// Stack: <offset> <length>
//@todo
solAssert(false, "Not yet implemented.");
// Stack: <offset> [<length>]
// length is present if dynamically sized
fetchFreeMemoryPointer();
moveIntoStack(typeOnStack.getSizeOnStack());
// stack: memptr calldataoffset [<length>]
if (typeOnStack.isDynamicallySized())
{
solAssert(targetType.isDynamicallySized(), "");
m_context << eth::Instruction::DUP3 << eth::Instruction::DUP2;
storeInMemoryDynamic(IntegerType(256));
moveIntoStack(typeOnStack.getSizeOnStack());
}
else
m_context << eth::Instruction::DUP2 << eth::Instruction::SWAP1;
// stack: mem_ptr mem_data_ptr calldataoffset [<length>]
storeInMemoryDynamic(typeOnStack);
storeFreeMemoryPointer();
}
// nothing to do for memory to memory
break;
@ -504,8 +526,8 @@ void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetTyp
auto& targetType = dynamic_cast<StructType const&>(_targetType);
auto& stackType = dynamic_cast<StructType const&>(_typeOnStack);
solAssert(
targetType.location() == ReferenceType::Location::Storage &&
stackType.location() == ReferenceType::Location::Storage,
targetType.location() == DataLocation::Storage &&
stackType.location() == DataLocation::Storage,
"Non-storage structs not yet implemented."
);
solAssert(

5
libsolidity/CompilerUtils.h

@ -99,8 +99,9 @@ public:
bool _copyDynamicDataInPlace = false
);
/// Appends code for an implicit or explicit type conversion. For now this comprises only erasing
/// higher-order bits (@see appendHighBitCleanup) when widening integer.
/// Appends code for an implicit or explicit type conversion. This includes erasing higher
/// order bits (@see appendHighBitCleanup) when widening integer but also copy to memory
/// if a reference type is converted from calldata or storage to memory.
/// If @a _cleanupNeeded, high order bits cleanup is also done if no type conversion would be
/// necessary.
void convertType(Type const& _typeOnStack, Type const& _targetType, bool _cleanupNeeded = false);

55
libsolidity/ExpressionCompiler.cpp

@ -109,34 +109,40 @@ void ExpressionCompiler::appendStateVariableAccessor(VariableDeclaration const&
}
unsigned retSizeOnStack = 0;
solAssert(accessorType.getReturnParameterTypes().size() >= 1, "");
auto const& returnTypes = accessorType.getReturnParameterTypes();
if (StructType const* structType = dynamic_cast<StructType const*>(returnType.get()))
{
// remove offset
m_context << eth::Instruction::POP;
auto const& names = accessorType.getReturnParameterNames();
auto const& types = accessorType.getReturnParameterTypes();
// struct
for (size_t i = 0; i < names.size(); ++i)
{
if (types[i]->getCategory() == Type::Category::Mapping || types[i]->getCategory() == Type::Category::Array)
if (returnTypes[i]->getCategory() == Type::Category::Mapping)
continue;
if (auto arrayType = dynamic_cast<ArrayType const*>(returnTypes[i].get()))
if (!arrayType->isByteArray())
continue;
pair<u256, unsigned> const& offsets = structType->getStorageOffsetsOfMember(names[i]);
m_context << eth::Instruction::DUP1 << u256(offsets.first) << eth::Instruction::ADD << u256(offsets.second);
StorageItem(m_context, *types[i]).retrieveValue(SourceLocation(), true);
solAssert(types[i]->getSizeOnStack() == 1, "Returning struct elements with stack size != 1 is not yet implemented.");
m_context << eth::Instruction::SWAP1;
retSizeOnStack += types[i]->getSizeOnStack();
TypePointer memberType = structType->getMemberType(names[i]);
StorageItem(m_context, *memberType).retrieveValue(SourceLocation(), true);
utils().convertType(*memberType, *returnTypes[i]);
utils().moveToStackTop(returnTypes[i]->getSizeOnStack());
retSizeOnStack += returnTypes[i]->getSizeOnStack();
}
// remove slot
m_context << eth::Instruction::POP;
}
else
{
// simple value
solAssert(accessorType.getReturnParameterTypes().size() == 1, "");
// simple value or array
solAssert(returnTypes.size() == 1, "");
StorageItem(m_context, *returnType).retrieveValue(SourceLocation(), true);
retSizeOnStack = returnType->getSizeOnStack();
utils().convertType(*returnType, *returnTypes.front());
retSizeOnStack = returnTypes.front()->getSizeOnStack();
}
solAssert(retSizeOnStack == utils().getSizeOnStack(returnTypes), "");
solAssert(retSizeOnStack <= 15, "Stack is too deep.");
m_context << eth::dupInstruction(retSizeOnStack + 1);
m_context.appendJump(eth::AssemblyItem::JumpType::OutOfFunction);
@ -146,10 +152,13 @@ bool ExpressionCompiler::visit(Assignment const& _assignment)
{
CompilerContext::LocationSetter locationSetter(m_context, _assignment);
_assignment.getRightHandSide().accept(*this);
if (_assignment.getType()->isValueType())
utils().convertType(*_assignment.getRightHandSide().getType(), *_assignment.getType());
// We need this conversion mostly in the case of compound assignments. For non-value types
// the conversion is done in LValue::storeValue.
TypePointer type = _assignment.getRightHandSide().getType();
if (!_assignment.getType()->dataStoredIn(DataLocation::Storage))
{
utils().convertType(*type, *_assignment.getType());
type = _assignment.getType();
}
_assignment.getLeftHandSide().accept(*this);
solAssert(!!m_currentLValue, "LValue not retrieved.");
@ -175,7 +184,7 @@ bool ExpressionCompiler::visit(Assignment const& _assignment)
m_context << eth::swapInstruction(itemSize + lvalueSize) << eth::Instruction::POP;
}
}
m_currentLValue->storeValue(*_assignment.getRightHandSide().getType(), _assignment.getLocation());
m_currentLValue->storeValue(*type, _assignment.getLocation());
m_currentLValue.reset();
return false;
}
@ -709,10 +718,10 @@ void ExpressionCompiler::endVisit(MemberAccess const& _memberAccess)
else
switch (type.location())
{
case ReferenceType::Location::CallData:
case DataLocation::CallData:
m_context << eth::Instruction::SWAP1 << eth::Instruction::POP;
break;
case ReferenceType::Location::Storage:
case DataLocation::Storage:
setLValue<StorageArrayLength>(_memberAccess, type);
break;
default:
@ -755,13 +764,13 @@ bool ExpressionCompiler::visit(IndexAccess const& _indexAccess)
solAssert(_indexAccess.getIndexExpression(), "Index expression expected.");
// remove storage byte offset
if (arrayType.location() == ReferenceType::Location::Storage)
if (arrayType.location() == DataLocation::Storage)
m_context << eth::Instruction::POP;
_indexAccess.getIndexExpression()->accept(*this);
// stack layout: <base_ref> [<length>] <index>
ArrayUtils(m_context).accessIndex(arrayType);
if (arrayType.location() == ReferenceType::Location::Storage)
if (arrayType.location() == DataLocation::Storage)
{
if (arrayType.isByteArray())
{
@ -1119,14 +1128,10 @@ void ExpressionCompiler::appendExternalFunctionCall(
void ExpressionCompiler::appendExpressionCopyToMemory(Type const& _expectedType, Expression const& _expression)
{
solAssert(_expectedType.isValueType(), "Not implemented for non-value types.");
_expression.accept(*this);
if (_expectedType.isValueType())
{
utils().convertType(*_expression.getType(), _expectedType, true);
utils().storeInMemoryDynamic(_expectedType);
}
else
utils().storeInMemoryDynamic(*_expression.getType()->mobileType());
utils().convertType(*_expression.getType(), _expectedType, true);
utils().storeInMemoryDynamic(_expectedType);
}
void ExpressionCompiler::setLValueFromDeclaration(Declaration const& _declaration, Expression const& _expression)

8
libsolidity/NameAndTypeResolver.cpp

@ -439,7 +439,7 @@ void ReferencesResolver::endVisit(VariableDeclaration& _variable)
"Location has to be calldata for external functions "
"(remove the \"memory\" or \"storage\" keyword)."
));
type = ref->copyForLocation(ReferenceType::Location::CallData, true);
type = ref->copyForLocation(DataLocation::CallData, true);
}
else if (_variable.isCallableParameter() && _variable.getScope()->isPublic())
{
@ -449,7 +449,7 @@ void ReferencesResolver::endVisit(VariableDeclaration& _variable)
"Location has to be memory for publicly visible functions "
"(remove the \"storage\" keyword)."
));
type = ref->copyForLocation(ReferenceType::Location::Memory, true);
type = ref->copyForLocation(DataLocation::Memory, true);
}
else
{
@ -458,8 +458,8 @@ void ReferencesResolver::endVisit(VariableDeclaration& _variable)
bool isPointer = !_variable.isStateVariable();
type = ref->copyForLocation(
loc == Location::Memory ?
ReferenceType::Location::Memory :
ReferenceType::Location::Storage,
DataLocation::Memory :
DataLocation::Storage,
isPointer
);
}

62
libsolidity/Types.cpp

@ -144,12 +144,13 @@ TypePointer Type::fromElementaryTypeName(Token::Value _typeToken)
else if (_typeToken == Token::Bool)
return make_shared<BoolType>();
else if (_typeToken == Token::Bytes)
return make_shared<ArrayType>(ReferenceType::Location::Storage);
return make_shared<ArrayType>(DataLocation::Storage);
else if (_typeToken == Token::String)
return make_shared<ArrayType>(ReferenceType::Location::Storage, true);
return make_shared<ArrayType>(DataLocation::Storage, true);
else
BOOST_THROW_EXCEPTION(InternalCompilerError() << errinfo_comment("Unable to convert elementary typename " +
std::string(Token::toString(_typeToken)) + " to type."));
BOOST_THROW_EXCEPTION(InternalCompilerError() << errinfo_comment(
"Unable to convert elementary typename " + std::string(Token::toString(_typeToken)) + " to type."
));
}
TypePointer Type::fromElementaryTypeName(string const& _name)
@ -180,7 +181,7 @@ TypePointer Type::fromMapping(ElementaryTypeName& _keyType, TypeName& _valueType
if (!valueType)
BOOST_THROW_EXCEPTION(_valueType.createTypeError("Invalid type name."));
// Convert value type to storage reference.
valueType = ReferenceType::copyForLocationIfReference(ReferenceType::Location::Storage, valueType);
valueType = ReferenceType::copyForLocationIfReference(DataLocation::Storage, valueType);
return make_shared<MappingType>(keyType, valueType);
}
@ -198,10 +199,10 @@ TypePointer Type::fromArrayTypeName(TypeName& _baseTypeName, Expression* _length
auto const* length = dynamic_cast<IntegerConstantType const*>(_length->getType().get());
if (!length)
BOOST_THROW_EXCEPTION(_length->createTypeError("Invalid array length."));
return make_shared<ArrayType>(ReferenceType::Location::Storage, baseType, length->literalValue(nullptr));
return make_shared<ArrayType>(DataLocation::Storage, baseType, length->literalValue(nullptr));
}
else
return make_shared<ArrayType>(ReferenceType::Location::Storage, baseType);
return make_shared<ArrayType>(DataLocation::Storage, baseType);
}
TypePointer Type::forLiteral(Literal const& _literal)
@ -670,7 +671,7 @@ TypePointer ContractType::unaryOperatorResult(Token::Value _operator) const
return _operator == Token::Delete ? make_shared<VoidType>() : TypePointer();
}
TypePointer ReferenceType::copyForLocationIfReference(Location _location, TypePointer const& _type)
TypePointer ReferenceType::copyForLocationIfReference(DataLocation _location, TypePointer const& _type)
{
if (auto type = dynamic_cast<ReferenceType const*>(_type.get()))
return type->copyForLocation(_location, false);
@ -686,11 +687,11 @@ string ReferenceType::stringForReferencePart() const
{
switch (m_location)
{
case Location::Storage:
case DataLocation::Storage:
return string("storage ") + (m_isPointer ? "pointer" : "ref");
case Location::CallData:
case DataLocation::CallData:
return "calldata";
case Location::Memory:
case DataLocation::Memory:
return "memory";
}
solAssert(false, "");
@ -705,11 +706,11 @@ bool ArrayType::isImplicitlyConvertibleTo(const Type& _convertTo) const
if (convertTo.isByteArray() != isByteArray() || convertTo.isString() != isString())
return false;
// memory/calldata to storage can be converted, but only to a direct storage reference
if (convertTo.location() == Location::Storage && location() != Location::Storage && convertTo.isPointer())
if (convertTo.location() == DataLocation::Storage && location() != DataLocation::Storage && convertTo.isPointer())
return false;
if (convertTo.location() == Location::CallData && location() != convertTo.location())
if (convertTo.location() == DataLocation::CallData && location() != convertTo.location())
return false;
if (convertTo.location() == Location::Storage && !convertTo.isPointer())
if (convertTo.location() == DataLocation::Storage && !convertTo.isPointer())
{
// Less restrictive conversion, since we need to copy anyway.
if (!getBaseType()->isImplicitlyConvertibleTo(*convertTo.getBaseType()))
@ -788,10 +789,10 @@ u256 ArrayType::getStorageSize() const
unsigned ArrayType::getSizeOnStack() const
{
if (m_location == Location::CallData)
if (m_location == DataLocation::CallData)
// offset [length] (stack top)
return 1 + (isDynamicallySized() ? 1 : 0);
else if (m_location == Location::Storage)
else if (m_location == DataLocation::Storage)
// storage_key storage_offset
return 2;
else
@ -828,12 +829,12 @@ TypePointer ArrayType::externalType() const
return TypePointer();
if (isDynamicallySized())
return std::make_shared<ArrayType>(Location::CallData, m_baseType->externalType());
return std::make_shared<ArrayType>(DataLocation::CallData, m_baseType->externalType());
else
return std::make_shared<ArrayType>(Location::CallData, m_baseType->externalType(), m_length);
return std::make_shared<ArrayType>(DataLocation::CallData, m_baseType->externalType(), m_length);
}
TypePointer ArrayType::copyForLocation(ReferenceType::Location _location, bool _isPointer) const
TypePointer ArrayType::copyForLocation(DataLocation _location, bool _isPointer) const
{
auto copy = make_shared<ArrayType>(_location);
copy->m_isPointer = _isPointer;
@ -949,9 +950,9 @@ bool StructType::isImplicitlyConvertibleTo(const Type& _convertTo) const
return false;
auto& convertTo = dynamic_cast<StructType const&>(_convertTo);
// memory/calldata to storage can be converted, but only to a direct storage reference
if (convertTo.location() == Location::Storage && location() != Location::Storage && convertTo.isPointer())
if (convertTo.location() == DataLocation::Storage && location() != DataLocation::Storage && convertTo.isPointer())
return false;
if (convertTo.location() == Location::CallData && location() != convertTo.location())
if (convertTo.location() == DataLocation::CallData && location() != convertTo.location())
return false;
return this->m_struct == convertTo.m_struct;
}
@ -1009,7 +1010,7 @@ MemberList const& StructType::getMembers() const
return *m_members;
}
TypePointer StructType::copyForLocation(ReferenceType::Location _location, bool _isPointer) const
TypePointer StructType::copyForLocation(DataLocation _location, bool _isPointer) const
{
auto copy = make_shared<StructType>(m_struct);
copy->m_location = _location;
@ -1115,6 +1116,9 @@ FunctionType::FunctionType(VariableDeclaration const& _varDecl):
}
else if (auto arrayType = dynamic_cast<ArrayType const*>(returnType.get()))
{
if (arrayType->isByteArray())
// Return byte arrays as as whole.
break;
returnType = arrayType->getBaseType();
paramNames.push_back("");
paramTypes.push_back(make_shared<IntegerType>(256));
@ -1128,15 +1132,21 @@ FunctionType::FunctionType(VariableDeclaration const& _varDecl):
if (auto structType = dynamic_cast<StructType const*>(returnType.get()))
{
for (auto const& member: structType->getMembers())
if (member.type->getCategory() != Category::Mapping && member.type->getCategory() != Category::Array)
if (member.type->getCategory() != Category::Mapping)
{
retParamNames.push_back(member.name);
if (auto arrayType = dynamic_cast<ArrayType const*>(member.type.get()))
if (!arrayType->isByteArray())
continue;
retParams.push_back(member.type);
retParamNames.push_back(member.name);
}
}
else
{
retParams.push_back(returnType);
retParams.push_back(ReferenceType::copyForLocationIfReference(
DataLocation::Memory,
returnType
));
retParamNames.push_back("");
}
@ -1549,7 +1559,7 @@ MagicType::MagicType(MagicType::Kind _kind):
{"sender", make_shared<IntegerType>(0, IntegerType::Modifier::Address)},
{"gas", make_shared<IntegerType>(256)},
{"value", make_shared<IntegerType>(256)},
{"data", make_shared<ArrayType>(ReferenceType::Location::CallData)},
{"data", make_shared<ArrayType>(DataLocation::CallData)},
{"sig", make_shared<FixedBytesType>(4)}
});
break;

32
libsolidity/Types.h

@ -44,6 +44,8 @@ using FunctionTypePointer = std::shared_ptr<FunctionType const>;
using TypePointers = std::vector<TypePointer>;
enum class DataLocation { Storage, CallData, Memory };
/**
* Helper class to compute storage offsets of members of structs and contracts.
*/
@ -202,6 +204,9 @@ public:
/// This returns the corresponding integer type for IntegerConstantTypes and the pointer type
/// for storage reference types.
virtual TypePointer mobileType() const { return shared_from_this(); }
/// @returns true if this is a non-value type and the data of this type is stored at the
/// given location.
virtual bool dataStoredIn(DataLocation) const { return false; }
/// Returns the list of all members of this type. Default implementation: no members.
virtual MemberList const& getMembers() const { return EmptyMemberList; }
@ -365,15 +370,15 @@ public:
class ReferenceType: public Type
{
public:
enum class Location { Storage, CallData, Memory };
explicit ReferenceType(Location _location): m_location(_location) {}
Location location() const { return m_location; }
explicit ReferenceType(DataLocation _location): m_location(_location) {}
DataLocation location() const { return m_location; }
/// @returns a copy of this type with location (recursively) changed to @a _location,
/// whereas isPointer is only shallowly changed - the deep copy is always a bound reference.
virtual TypePointer copyForLocation(Location _location, bool _isPointer) const = 0;
virtual TypePointer copyForLocation(DataLocation _location, bool _isPointer) const = 0;
virtual TypePointer mobileType() const override { return copyForLocation(m_location, true); }
virtual bool dataStoredIn(DataLocation _location) const override { return m_location == _location; }
/// Storage references can be pointers or bound references. In general, local variables are of
/// pointer type, state variables are bound references. Assignments to pointers or deleting
@ -389,14 +394,14 @@ public:
/// @returns a copy of @a _type having the same location as this (and is not a pointer type)
/// if _type is a reference type and an unmodified copy of _type otherwise.
/// This function is mostly useful to modify inner types appropriately.
static TypePointer copyForLocationIfReference(Location _location, TypePointer const& _type);
static TypePointer copyForLocationIfReference(DataLocation _location, TypePointer const& _type);
protected:
TypePointer copyForLocationIfReference(TypePointer const& _type) const;
/// @returns a human-readable description of the reference part of the type.
std::string stringForReferencePart() const;
Location m_location = Location::Storage;
DataLocation m_location = DataLocation::Storage;
bool m_isPointer = true;
};
@ -413,20 +418,20 @@ public:
virtual Category getCategory() const override { return Category::Array; }
/// Constructor for a byte array ("bytes") and string.
explicit ArrayType(Location _location, bool _isString = false):
explicit ArrayType(DataLocation _location, bool _isString = false):
ReferenceType(_location),
m_arrayKind(_isString ? ArrayKind::String : ArrayKind::Bytes),
m_baseType(std::make_shared<FixedBytesType>(1))
{
}
/// Constructor for a dynamically sized array type ("type[]")
ArrayType(Location _location, TypePointer const& _baseType):
ArrayType(DataLocation _location, TypePointer const& _baseType):
ReferenceType(_location),
m_baseType(copyForLocationIfReference(_baseType))
{
}
/// Constructor for a fixed-size array type ("type[20]")
ArrayType(Location _location, TypePointer const& _baseType, u256 const& _length):
ArrayType(DataLocation _location, TypePointer const& _baseType, u256 const& _length):
ReferenceType(_location),
m_baseType(copyForLocationIfReference(_baseType)),
m_hasDynamicLength(false),
@ -454,7 +459,7 @@ public:
TypePointer const& getBaseType() const { solAssert(!!m_baseType, ""); return m_baseType;}
u256 const& getLength() const { return m_length; }
TypePointer copyForLocation(Location _location, bool _isPointer) const override;
TypePointer copyForLocation(DataLocation _location, bool _isPointer) const override;
private:
/// String is interpreted as a subtype of Bytes.
@ -533,7 +538,7 @@ public:
virtual Category getCategory() const override { return Category::Struct; }
explicit StructType(StructDefinition const& _struct):
//@todo only storage until we have non-storage structs
ReferenceType(Location::Storage), m_struct(_struct) {}
ReferenceType(DataLocation::Storage), m_struct(_struct) {}
virtual bool isImplicitlyConvertibleTo(const Type& _convertTo) const override;
virtual TypePointer unaryOperatorResult(Token::Value _operator) const override;
virtual bool operator==(Type const& _other) const override;
@ -544,7 +549,7 @@ public:
virtual MemberList const& getMembers() const override;
TypePointer copyForLocation(Location _location, bool _isPointer) const override;
TypePointer copyForLocation(DataLocation _location, bool _isPointer) const override;
std::pair<u256, unsigned> const& getStorageOffsetsOfMember(std::string const& _name) const;
@ -636,8 +641,11 @@ public:
FunctionTypePointer externalFunctionType() const;
virtual TypePointer externalType() const override { return externalFunctionType(); }
/// Creates the type of a function.
explicit FunctionType(FunctionDefinition const& _function, bool _isInternal = true);
/// Creates the accessor function type of a state variable.
explicit FunctionType(VariableDeclaration const& _varDecl);
/// Creates the function type of an event.
explicit FunctionType(EventDefinition const& _event);
FunctionType(
strings const& _parameterTypes,

2
libtestutils/Common.cpp

@ -59,7 +59,7 @@ Json::Value dev::test::loadJsonFromFile(std::string const& _path)
{
Json::Reader reader;
Json::Value result;
string s = asString(dev::contents(_path));
string s = dev::contentsString(_path);
if (!s.length())
ctest << "Contents of " + _path + " is empty. Have you cloned the 'tests' repo branch develop and set ETHEREUM_TEST_PATH to its path?";
else

2
libweb3jsonrpc/AccountHolder.cpp

@ -103,7 +103,7 @@ void AccountHolder::clearQueue(int _id)
AddressHash SimpleAccountHolder::realAccounts() const
{
return m_keyManager.accounts();
return m_keyManager.accountsHash();
}
void SimpleAccountHolder::authenticate(dev::eth::TransactionSkeleton const& _t)

1
libweb3jsonrpc/AccountHolder.h

@ -48,7 +48,6 @@ class AccountHolder
public:
explicit AccountHolder(std::function<Interface*()> const& _client): m_client(_client) {}
// easiest to return keyManager.addresses();
virtual AddressHash realAccounts() const = 0;
// use m_web3's submitTransaction
// or use AccountHolder::queueTransaction(_t) to accept

3
libweb3jsonrpc/CMakeLists.txt

@ -13,7 +13,7 @@ include_directories(BEFORE ${JSONCPP_INCLUDE_DIRS})
include_directories(BEFORE ..)
include_directories(${MHD_INCLUDE_DIRS})
include_directories(${JSON_RPC_CPP_INCLUDE_DIRS})
include_directories(${LEVELDB_INCLUDE_DIRS})
include_directories(${DB_INCLUDE_DIRS})
include_directories(${Boost_INCLUDE_DIRS})
set(EXECUTABLE web3jsonrpc)
@ -22,7 +22,6 @@ file(GLOB HEADERS "*.h")
add_library(${EXECUTABLE} ${SRC_LIST} ${HEADERS})
target_link_libraries(${EXECUTABLE} ${LEVELDB_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${JSONCPP_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${JSON_RPC_CPP_SERVER_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${MHD_LIBRARIES})

2
libweb3jsonrpc/JsonHelper.h

@ -43,7 +43,7 @@ namespace eth
{
class Transaction;
class BlockDetails;
struct BlockDetails;
class Interface;
using Transactions = std::vector<Transaction>;
using UncleHashes = h256s;

23
libweb3jsonrpc/WebThreeStubServer.cpp

@ -157,19 +157,19 @@ Json::Value WebThreeStubServer::admin_eth_allAccounts(std::string const& _sessio
u256 total = 0;
u256 pendingtotal = 0;
Address beneficiary;
for (auto const& i: m_keyMan.accountDetails())
for (auto const& address: m_keyMan.accounts())
{
auto pending = m_web3.ethereum()->balanceAt(i.first, PendingBlock);
auto latest = m_web3.ethereum()->balanceAt(i.first, LatestBlock);
auto pending = m_web3.ethereum()->balanceAt(address, PendingBlock);
auto latest = m_web3.ethereum()->balanceAt(address, LatestBlock);
Json::Value a;
if (i.first == beneficiary)
if (address == beneficiary)
a["beneficiary"] = true;
a["address"] = toJS(i.first);
a["address"] = toJS(address);
a["balance"] = toJS(latest);
a["nicebalance"] = formatBalance(latest);
a["pending"] = toJS(pending);
a["nicepending"] = formatBalance(pending);
ret["accounts"][i.second.first] = a;
ret["accounts"][m_keyMan.accountName(address)] = a;
total += latest;
pendingtotal += pending;
}
@ -205,7 +205,16 @@ Json::Value WebThreeStubServer::admin_eth_newAccount(Json::Value const& _info, s
bool WebThreeStubServer::admin_eth_setMiningBenefactor(std::string const& _uuidOrAddress, std::string const& _session)
{
ADMIN;
(void)_uuidOrAddress;
Address a;
h128 uuid = fromUUID(_uuidOrAddress);
if (uuid)
a = m_keyMan.address(uuid);
else if (isHash<Address>(_uuidOrAddress))
a = Address(_uuidOrAddress);
else
throw jsonrpc::JsonRpcException("Invalid UUID or address");
if (m_setMiningBenefactor)
m_setMiningBenefactor(a);
return true;
}

17
libweb3jsonrpc/WebThreeStubServer.h

@ -23,11 +23,7 @@
#pragma once
#pragma warning(push)
#pragma warning(disable: 4100 4267)
#include <leveldb/db.h>
#pragma warning(pop)
#include <libdevcore/db.h>
#include "WebThreeStubServerBase.h"
namespace dev
@ -60,6 +56,8 @@ public:
std::string newSession(SessionPermissions const& _p);
void addSession(std::string const& _session, SessionPermissions const& _p) { m_sessions[_session] = _p; }
virtual void setMiningBenefactorChanger(std::function<void(Address const&)> const& _f) { m_setMiningBenefactor = _f; }
private:
virtual bool hasPriviledgeLevel(std::string const& _session, Priviledge _l) const override { auto it = m_sessions.find(_session); return it != m_sessions.end() && it->second.priviledges.count(_l); }
@ -80,9 +78,9 @@ private:
virtual std::string admin_eth_blockQueueFirstUnknown(std::string const& _session) override;
virtual bool admin_eth_blockQueueRetryUnknown(std::string const& _session) override;
virtual bool admin_eth_setMiningBenefactor(std::string const& _uuidOrAddress, std::string const& _session) override;
virtual Json::Value admin_eth_allAccounts(std::string const& _session) override;
virtual Json::Value admin_eth_newAccount(const Json::Value& _info, std::string const& _session) override;
virtual bool admin_eth_setMiningBenefactor(std::string const& _uuidOrAddress, std::string const& _session) override;
virtual Json::Value admin_eth_inspect(std::string const& _address, std::string const& _session) override;
virtual Json::Value admin_eth_reprocess(std::string const& _blockNumberOrHash, std::string const& _session) override;
virtual Json::Value admin_eth_vmTrace(std::string const& _blockNumberOrHash, int _txIndex, std::string const& _session) override;
@ -97,10 +95,11 @@ private:
dev::WebThreeDirect& m_web3;
dev::eth::KeyManager& m_keyMan;
dev::eth::TrivialGasPricer& m_gp;
leveldb::ReadOptions m_readOptions;
leveldb::WriteOptions m_writeOptions;
leveldb::DB* m_db;
ldb::ReadOptions m_readOptions;
ldb::WriteOptions m_writeOptions;
ldb::DB* m_db;
std::function<void(Address const&)> m_setMiningBenefactor;
std::unordered_map<std::string, SessionPermissions> m_sessions;
};

4
libwebthree/CMakeLists.txt

@ -12,7 +12,7 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSTATICLIB")
aux_source_directory(. SRC_LIST)
include_directories(BEFORE ..)
include_directories(${LEVELDB_INCLUDE_DIRS})
include_directories(${DB_INCLUDE_DIRS})
include_directories(${Boost_INCLUDE_DIRS})
set(EXECUTABLE webthree)
@ -21,8 +21,6 @@ file(GLOB HEADERS "*.h")
add_library(${EXECUTABLE} ${SRC_LIST} ${HEADERS})
target_link_libraries(${EXECUTABLE} ${LEVELDB_LIBRARIES})
target_link_libraries(${EXECUTABLE} ethereum)
target_link_libraries(${EXECUTABLE} evm)
target_link_libraries(${EXECUTABLE} lll)

28
libwhisper/BloomFilter.cpp

@ -0,0 +1,28 @@
/*
This file is part of cpp-ethereum.
cpp-ethereum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
cpp-ethereum is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
*/
/** @file BloomFilter.cpp
* @author Vladislav Gluhovsky <vlad@ethdev.com>
* @date June 2015
*/
#include "BloomFilter.h"
using namespace std;
using namespace dev;
using namespace dev::shh;

103
libwhisper/BloomFilter.h

@ -0,0 +1,103 @@
/*
This file is part of cpp-ethereum.
cpp-ethereum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
cpp-ethereum is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
*/
/** @file BloomFilter.h
* @author Vladislav Gluhovsky <vlad@ethdev.com>
* @date June 2015
*/
#pragma once
#include "Common.h"
namespace dev
{
namespace shh
{
template <unsigned N>
class TopicBloomFilterBase: public FixedHash<N>
{
public:
TopicBloomFilterBase() { init(); }
TopicBloomFilterBase(FixedHash<N> const& _h): FixedHash<N>(_h) { init(); }
void addBloom(dev::shh::AbridgedTopic const& _h) { addRaw(_h.template bloomPart<BitsPerBloom, N>()); }
void removeBloom(dev::shh::AbridgedTopic const& _h) { removeRaw(_h.template bloomPart<BitsPerBloom, N>()); }
bool containsBloom(dev::shh::AbridgedTopic const& _h) const { return this->contains(_h.template bloomPart<BitsPerBloom, N>()); }
void addRaw(FixedHash<N> const& _h);
void removeRaw(FixedHash<N> const& _h);
bool containsRaw(FixedHash<N> const& _h) const { return this->contains(_h); }
enum { BitsPerBloom = 3 };
private:
void init() { for (unsigned i = 0; i < CounterSize; ++i) m_refCounter[i] = 0; }
static bool isBitSet(FixedHash<N> const& _h, unsigned _index);
enum { CounterSize = 8 * TopicBloomFilterBase::size };
std::array<uint16_t, CounterSize> m_refCounter;
};
static unsigned const c_powerOfTwoBitMmask[8] = { 1, 2, 4, 8, 16, 32, 64, 128 };
template <unsigned N>
void TopicBloomFilterBase<N>::addRaw(FixedHash<N> const& _h)
{
*this |= _h;
for (unsigned i = 0; i < CounterSize; ++i)
if (isBitSet(_h, i))
{
if (m_refCounter[i] != std::numeric_limits<uint16_t>::max())
m_refCounter[i]++;
else
BOOST_THROW_EXCEPTION(Overflow());
}
}
template <unsigned N>
void TopicBloomFilterBase<N>::removeRaw(FixedHash<N> const& _h)
{
for (unsigned i = 0; i < CounterSize; ++i)
if (isBitSet(_h, i))
{
if (m_refCounter[i])
m_refCounter[i]--;
if (!m_refCounter[i])
(*this)[i / 8] &= ~c_powerOfTwoBitMmask[i % 8];
}
}
template <unsigned N>
bool TopicBloomFilterBase<N>::isBitSet(FixedHash<N> const& _h, unsigned _index)
{
unsigned iByte = _index / 8;
unsigned iBit = _index % 8;
return (_h[iByte] & c_powerOfTwoBitMmask[iBit]) != 0;
}
using TopicBloomFilter = TopicBloomFilterBase<TopicBloomFilterSize>;
}
}

4
libwhisper/CMakeLists.txt

@ -12,7 +12,7 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSTATICLIB")
aux_source_directory(. SRC_LIST)
include_directories(BEFORE ..)
include_directories(${LEVELDB_INCLUDE_DIRS})
include_directories(${DB_INCLUDE_DIRS})
include_directories(${Boost_INCLUDE_DIRS})
set(EXECUTABLE whisper)
@ -21,8 +21,6 @@ file(GLOB HEADERS "*.h")
add_library(${EXECUTABLE} ${SRC_LIST} ${HEADERS})
target_link_libraries(${EXECUTABLE} ${LEVELDB_LIBRARIES})
target_link_libraries(${EXECUTABLE} ethcore)
target_link_libraries(${EXECUTABLE} devcrypto)
target_link_libraries(${EXECUTABLE} devcore)

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save