diff --git a/CMakeLists.txt b/CMakeLists.txt
index a5c4fe930..fd3188eae 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -37,7 +37,7 @@ option(ETHKEY "Build the CLI key manager component" ON)
option(SOLIDITY "Build the Solidity language components" ON)
option(SERPENT "Build the Serpent language components" ON)
option(TOOLS "Build the tools components" ON)
-option(NCURSES "Build the NCurses components" ON)
+option(NCURSES "Build the NCurses components" OFF)
option(GUI "Build GUI components (AlethZero, Mix)" ON)
option(TESTS "Build the tests." ON)
option(EVMJIT "Build just-in-time compiler for EVM code (requires LLVM)" OFF)
@@ -222,7 +222,7 @@ elseif (BUNDLE STREQUAL "full")
set(SOLIDITY ON)
set(USENPM ON)
set(GUI ON)
- set(NCURSES ${DECENT_PLATFORM})
+# set(NCURSES ${DECENT_PLATFORM})
set(TOOLS ON)
set(TESTS ON)
set(FATDB ON)
@@ -249,7 +249,7 @@ elseif (BUNDLE STREQUAL "user")
set(SOLIDITY OFF)
set(USENPM OFF)
set(GUI ON)
- set(NCURSES ${DECENT_PLATFORM})
+# set(NCURSES ${DECENT_PLATFORM})
set(TOOLS ON)
set(TESTS OFF)
elseif (BUNDLE STREQUAL "wallet")
@@ -421,6 +421,7 @@ if (TOOLS)
add_subdirectory(rlp)
add_subdirectory(abi)
+ add_subdirectory(ethvm)
add_subdirectory(eth)
if("x${CMAKE_BUILD_TYPE}" STREQUAL "xDebug")
diff --git a/alethzero/DappLoader.cpp b/alethzero/DappLoader.cpp
index b2249ae5b..662a1afbc 100644
--- a/alethzero/DappLoader.cpp
+++ b/alethzero/DappLoader.cpp
@@ -108,6 +108,7 @@ void DappLoader::downloadComplete(QNetworkReply* _reply)
//inject web3 js
QByteArray content = "\n");
content.append(_reply->readAll());
QString contentType = _reply->header(QNetworkRequest::ContentTypeHeader).toString();
diff --git a/alethzero/DappLoader.h b/alethzero/DappLoader.h
index f2d3ee5e5..39176e750 100644
--- a/alethzero/DappLoader.h
+++ b/alethzero/DappLoader.h
@@ -78,6 +78,8 @@ public:
///@param _uri Page Uri
void loadPage(QString const& _uri);
+ void setSessionKey(std::string const& _s) { m_sessionKey = _s; }
+
signals:
void dappReady(Dapp& _dapp);
void pageReady(QByteArray const& _content, QString const& _mimeType, QUrl const& _uri);
@@ -99,5 +101,6 @@ private:
std::set m_pageUrls;
QByteArray m_web3Js;
dev::Address m_nameReg;
+ std::string m_sessionKey;
};
diff --git a/alethzero/Main.ui b/alethzero/Main.ui
index efb6256af..f798437e6 100644
--- a/alethzero/Main.ui
+++ b/alethzero/Main.ui
@@ -44,7 +44,14 @@
0 bytes used
-
+
+ -
+
+
+
+
+
+
-
diff --git a/alethzero/MainWin.cpp b/alethzero/MainWin.cpp
index 8cde476c3..e88086e0d 100644
--- a/alethzero/MainWin.cpp
+++ b/alethzero/MainWin.cpp
@@ -198,6 +198,7 @@ Main::Main(QWidget *parent) :
statusBar()->addPermanentWidget(ui->balance);
statusBar()->addPermanentWidget(ui->peerCount);
statusBar()->addPermanentWidget(ui->mineStatus);
+ statusBar()->addPermanentWidget(ui->syncStatus);
statusBar()->addPermanentWidget(ui->chainStatus);
statusBar()->addPermanentWidget(ui->blockCount);
@@ -209,7 +210,9 @@ Main::Main(QWidget *parent) :
m_webThree.reset(new WebThreeDirect(string("AlethZero/v") + dev::Version + "/" DEV_QUOTED(ETH_BUILD_TYPE) "/" DEV_QUOTED(ETH_BUILD_PLATFORM), getDataDir(), WithExisting::Trust, {"eth"/*, "shh"*/}, p2p::NetworkPreferences(), network));
m_httpConnector.reset(new jsonrpc::HttpServer(SensibleHttpPort, "", "", dev::SensibleHttpThreads));
- m_server.reset(new OurWebThreeStubServer(*m_httpConnector, *web3(), this));
+ auto w3ss = new OurWebThreeStubServer(*m_httpConnector, this);
+ m_server.reset(w3ss);
+ auto sessionKey = w3ss->newSession({true});
connect(&*m_server, SIGNAL(onNewId(QString)), SLOT(addNewId(QString)));
m_server->setIdentities(keysAsVector(owned()));
m_server->StartListening();
@@ -226,12 +229,16 @@ Main::Main(QWidget *parent) :
m_dappHost.reset(new DappHost(8081));
m_dappLoader = new DappLoader(this, web3(), getNameReg());
+ m_dappLoader->setSessionKey(sessionKey);
connect(m_dappLoader, &DappLoader::dappReady, this, &Main::dappLoaded);
connect(m_dappLoader, &DappLoader::pageReady, this, &Main::pageLoaded);
// ui->webView->page()->settings()->setAttribute(QWebEngineSettings::DeveloperExtrasEnabled, true);
// QWebEngineInspector* inspector = new QWebEngineInspector();
// inspector->setPage(page);
setBeneficiary(*m_keyManager.accounts().begin());
+
+ ethereum()->setDefault(LatestBlock);
+
readSettings();
m_transact = new Transact(this, this);
@@ -240,6 +247,10 @@ Main::Main(QWidget *parent) :
#if !ETH_FATDB
removeDockWidget(ui->dockWidget_accounts);
+#endif
+#if !ETH_EVMJIT
+ ui->jitvm->setEnabled(false);
+ ui->jitvm->setChecked(false);
#endif
installWatches();
startTimer(100);
@@ -805,6 +816,7 @@ void Main::readSettings(bool _skipGeometry)
ui->usePrivate->setChecked(m_privateChain.size());
ui->verbosity->setValue(s.value("verbosity", 1).toInt());
ui->jitvm->setChecked(s.value("jitvm", true).toBool());
+ on_jitvm_triggered();
ui->urlEdit->setText(s.value("url", "about:blank").toString()); //http://gavwood.com/gavcoin.html
on_urlEdit_returnPressed();
@@ -1240,9 +1252,15 @@ void Main::refreshBlockCount()
{
auto d = ethereum()->blockChain().details();
BlockQueueStatus b = ethereum()->blockQueueStatus();
- HashChainStatus h = ethereum()->hashChainStatus();
- ui->chainStatus->setText(QString("%9/%10%11 hashes %3 ready %4 verifying %5 unverified %6 future %7 unknown %8 bad %1 #%2")
- .arg(m_privateChain.size() ? "[" + m_privateChain + "] " : "testnet").arg(d.number).arg(b.verified).arg(b.verifying).arg(b.unverified).arg(b.future).arg(b.unknown).arg(b.bad).arg(h.received).arg(h.estimated ? "~" : "").arg(h.total));
+ SyncStatus sync = ethereum()->syncStatus();
+ QString syncStatus = EthereumHost::stateName(sync.state);
+ if (sync.state == SyncState::HashesParallel || sync.state == SyncState::HashesSingle)
+ syncStatus += QString(": %1/%2%3").arg(sync.hashesReceived).arg(sync.hashesEstimated ? "~" : "").arg(sync.hashesTotal);
+ if (sync.state == SyncState::Blocks || sync.state == SyncState::NewBlocks)
+ syncStatus += QString(": %1/%2").arg(sync.blocksReceived).arg(sync.blocksTotal);
+ ui->syncStatus->setText(syncStatus);
+ ui->chainStatus->setText(QString("%3 importing %4 ready %5 verifying %6 unverified %7 future %8 unknown %9 bad %1 #%2")
+ .arg(m_privateChain.size() ? "[" + m_privateChain + "] " : "testnet").arg(d.number).arg(b.importing).arg(b.verified).arg(b.verifying).arg(b.unverified).arg(b.future).arg(b.unknown).arg(b.bad));
}
void Main::on_turboMining_triggered()
diff --git a/alethzero/OurWebThreeStubServer.cpp b/alethzero/OurWebThreeStubServer.cpp
index 7e9836818..39ab69a19 100644
--- a/alethzero/OurWebThreeStubServer.cpp
+++ b/alethzero/OurWebThreeStubServer.cpp
@@ -31,10 +31,9 @@ using namespace dev::eth;
OurWebThreeStubServer::OurWebThreeStubServer(
jsonrpc::AbstractServerConnector& _conn,
- WebThreeDirect& _web3,
Main* _main
):
- WebThreeStubServer(_conn, _web3, make_shared(_web3, _main), _main->owned().toVector().toStdVector()),
+ WebThreeStubServer(_conn, *_main->web3(), make_shared(_main), _main->owned().toVector().toStdVector(), _main->keyManager()),
m_main(_main)
{
}
@@ -46,12 +45,8 @@ string OurWebThreeStubServer::shh_newIdentity()
return toJS(kp.pub());
}
-OurAccountHolder::OurAccountHolder(
- WebThreeDirect& _web3,
- Main* _main
-):
- AccountHolder([=](){ return m_web3->ethereum(); }),
- m_web3(&_web3),
+OurAccountHolder::OurAccountHolder(Main* _main):
+ AccountHolder([=](){ return _main->ethereum(); }),
m_main(_main)
{
connect(_main, SIGNAL(poll()), this, SLOT(doValidations()));
@@ -135,7 +130,7 @@ void OurAccountHolder::doValidations()
else
// sign and submit.
if (Secret s = m_main->retrieveSecret(t.from))
- m_web3->ethereum()->submitTransaction(s, t);
+ m_main->ethereum()->submitTransaction(s, t);
}
}
@@ -155,7 +150,7 @@ bool OurAccountHolder::validateTransaction(TransactionSkeleton const& _t, bool _
return showCreationNotice(_t, _toProxy);
}
- h256 contractCodeHash = m_web3->ethereum()->postState().codeHash(_t.to);
+ h256 contractCodeHash = m_main->ethereum()->postState().codeHash(_t.to);
if (contractCodeHash == EmptySHA3)
{
// recipient has no code - nothing special about this transaction, show basic value transfer info
diff --git a/alethzero/OurWebThreeStubServer.h b/alethzero/OurWebThreeStubServer.h
index a07188b2d..43985b640 100644
--- a/alethzero/OurWebThreeStubServer.h
+++ b/alethzero/OurWebThreeStubServer.h
@@ -34,10 +34,7 @@ class OurAccountHolder: public QObject, public dev::eth::AccountHolder
Q_OBJECT
public:
- OurAccountHolder(
- dev::WebThreeDirect& _web3,
- Main* _main
- );
+ OurAccountHolder(Main* _main);
public slots:
void doValidations();
@@ -59,7 +56,6 @@ private:
std::queue m_queued;
dev::Mutex x_queued;
- dev::WebThreeDirect* m_web3;
Main* m_main;
};
@@ -70,7 +66,6 @@ class OurWebThreeStubServer: public QObject, public WebThreeStubServer
public:
OurWebThreeStubServer(
jsonrpc::AbstractServerConnector& _conn,
- dev::WebThreeDirect& _web3,
Main* main
);
diff --git a/cmake/EthCompilerSettings.cmake b/cmake/EthCompilerSettings.cmake
index eb8588ceb..53535a489 100644
--- a/cmake/EthCompilerSettings.cmake
+++ b/cmake/EthCompilerSettings.cmake
@@ -21,7 +21,7 @@ elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
set(CMAKE_CXX_FLAGS_DEBUG "-O0 -g -DETH_DEBUG")
set(CMAKE_CXX_FLAGS_MINSIZEREL "-Os -DNDEBUG -DETH_RELEASE")
set(CMAKE_CXX_FLAGS_RELEASE "-O3 -DNDEBUG -DETH_RELEASE")
- set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O2 -g -DETH_DEBUG")
+ set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O2 -g -DETH_RELEASE")
if ("${CMAKE_SYSTEM_NAME}" MATCHES "Linux")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libstdc++ -fcolor-diagnostics -Qunused-arguments -DBOOST_ASIO_HAS_CLANG_LIBCXX")
diff --git a/eth/main.cpp b/eth/main.cpp
index 5497a2cda..4b23ef62a 100644
--- a/eth/main.cpp
+++ b/eth/main.cpp
@@ -93,6 +93,7 @@ void interactiveHelp()
<< " accounts Gives information on all owned accounts (balances, mining beneficiary and default signer)." << endl
<< " newaccount Creates a new account with the given name." << endl
<< " transact Execute a given transaction." << endl
+ << " transactnonce Execute a given transaction with a specified nonce." << endl
<< " txcreate Execute a given contract creation transaction." << endl
<< " send Execute a given transaction with current secret." << endl
<< " contract Create a new contract with current secret." << endl
@@ -108,6 +109,7 @@ void interactiveHelp()
<< " exportconfig Export the config (.RLP) to the path provided." << endl
<< " importconfig Import the config (.RLP) from the path provided." << endl
<< " inspect Dumps a contract to /.evm." << endl
+ << " reprocess Reprocess a given block." << endl
<< " dumptrace Dumps a transaction trace" << endl << "to . should be one of pretty, standard, standard+." << endl
<< " dumpreceipt Dumps a transation receipt." << endl
<< " exit Exits the application." << endl;
@@ -124,6 +126,7 @@ void help()
#if ETH_JSONRPC || !ETH_TRUE
<< " -j,--json-rpc Enable JSON-RPC server (default: off)." << endl
<< " --json-rpc-port Specify JSON-RPC server port (implies '-j', default: " << SensibleHttpPort << ")." << endl
+ << " --admin Specify admin session key for JSON-RPC (default: auto-generated and printed at startup)." << endl
#endif
<< " -K,--kill First kill the blockchain." << endl
<< " -R,--rebuild Rebuild the blockchain from the existing database." << endl
@@ -286,6 +289,7 @@ int main(int argc, char** argv)
#if ETH_JSONRPC
int jsonrpc = -1;
#endif
+ string jsonAdmin;
bool upnp = true;
WithExisting killChain = WithExisting::Trust;
bool jit = false;
@@ -597,6 +601,8 @@ int main(int argc, char** argv)
jsonrpc = jsonrpc == -1 ? SensibleHttpPort : jsonrpc;
else if (arg == "--json-rpc-port" && i + 1 < argc)
jsonrpc = atoi(argv[++i]);
+ else if (arg == "--json-admin" && i + 1 < argc)
+ jsonAdmin = argv[++i];
#endif
#if ETH_JSCONSOLE
else if (arg == "--console")
@@ -679,7 +685,7 @@ int main(int argc, char** argv)
VMFactory::setKind(jit ? VMKind::JIT : VMKind::Interpreter);
auto netPrefs = publicIP.empty() ? NetworkPreferences(listenIP ,listenPort, upnp) : NetworkPreferences(publicIP, listenIP ,listenPort, upnp);
auto nodesState = contents((dbPath.size() ? dbPath : getDataDir()) + "/network.rlp");
- std::string clientImplString = "++eth/" + clientName + "v" + dev::Version + "/" DEV_QUOTED(ETH_BUILD_TYPE) "/" DEV_QUOTED(ETH_BUILD_PLATFORM) + (jit ? "/JIT" : "");
+ std::string clientImplString = "++eth/" + clientName + "v" + dev::Version + "-" + string(DEV_QUOTED(ETH_COMMIT_HASH)).substr(0, 8) + (ETH_CLEAN_REPO ? "" : "*") + "/" DEV_QUOTED(ETH_BUILD_TYPE) "/" DEV_QUOTED(ETH_BUILD_PLATFORM) + (jit ? "/JIT" : "");
dev::WebThreeDirect web3(
clientImplString,
dbPath,
@@ -806,14 +812,14 @@ int main(int argc, char** argv)
cout << "Transaction Signer: " << signingKey << endl;
cout << "Mining Benefactor: " << beneficiary << endl;
- web3.startNetwork();
- cout << "Node ID: " << web3.enode() << endl;
- if (bootstrap)
- for (auto const& i: Host::pocHosts())
- web3.requirePeer(i.first, i.second);
- if (remoteHost.size())
- web3.addNode(p2p::NodeId(), remoteHost + ":" + toString(remotePort));
+ if (bootstrap || !remoteHost.empty())
+ {
+ web3.startNetwork();
+ cout << "Node ID: " << web3.enode() << endl;
+ }
+ else
+ cout << "Networking disabled. To start, use netstart or pass -b or a remote host." << endl;
#if ETH_JSONRPC || !ETH_TRUE
shared_ptr jsonrpcServer;
@@ -821,11 +827,22 @@ int main(int argc, char** argv)
if (jsonrpc > -1)
{
jsonrpcConnector = unique_ptr(new jsonrpc::HttpServer(jsonrpc, "", "", SensibleHttpThreads));
- jsonrpcServer = shared_ptr(new WebThreeStubServer(*jsonrpcConnector.get(), web3, make_shared([&](){return web3.ethereum();}, getAccountPassword, keyManager), vector()));
+ jsonrpcServer = shared_ptr(new WebThreeStubServer(*jsonrpcConnector.get(), web3, make_shared([&](){return web3.ethereum();}, getAccountPassword, keyManager), vector(), keyManager));
jsonrpcServer->StartListening();
+ if (jsonAdmin.empty())
+ jsonAdmin = jsonrpcServer->newSession(SessionPermissions{true});
+ else
+ jsonrpcServer->addSession(jsonAdmin, SessionPermissions{true});
+ cout << "JSONRPC Admin Session Key: " << jsonAdmin << endl;
}
#endif
+ if (bootstrap)
+ for (auto const& i: Host::pocHosts())
+ web3.requirePeer(i.first, i.second);
+ if (!remoteHost.empty())
+ web3.addNode(p2p::NodeId(), remoteHost + ":" + toString(remotePort));
+
signal(SIGABRT, &sighandler);
signal(SIGTERM, &sighandler);
signal(SIGINT, &sighandler);
@@ -965,8 +982,13 @@ int main(int argc, char** argv)
if (jsonrpc < 0)
jsonrpc = SensibleHttpPort;
jsonrpcConnector = unique_ptr(new jsonrpc::HttpServer(jsonrpc, "", "", SensibleHttpThreads));
- jsonrpcServer = shared_ptr(new WebThreeStubServer(*jsonrpcConnector.get(), web3, make_shared([&](){return web3.ethereum();}, getAccountPassword, keyManager), vector()));
+ jsonrpcServer = shared_ptr(new WebThreeStubServer(*jsonrpcConnector.get(), web3, make_shared([&](){ return web3.ethereum(); }, getAccountPassword, keyManager), vector(), keyManager));
jsonrpcServer->StartListening();
+ if (jsonAdmin.empty())
+ jsonAdmin = jsonrpcServer->newSession(SessionPermissions{true});
+ else
+ jsonrpcServer->addSession(jsonAdmin, SessionPermissions{true});
+ cout << "JSONRPC Admin Session Key: " << jsonAdmin << endl;
}
else if (cmd == "jsonstop")
{
@@ -1175,6 +1197,75 @@ int main(int argc, char** argv)
else
cwarn << "Require parameters: submitTransaction ADDRESS AMOUNT GASPRICE GAS SECRET DATA";
}
+
+ else if (c && cmd == "transactnonce")
+ {
+ auto const& bc =c->blockChain();
+ auto h = bc.currentHash();
+ auto blockData = bc.block(h);
+ BlockInfo info(blockData);
+ if (iss.peek() != -1)
+ {
+ string hexAddr;
+ u256 amount;
+ u256 gasPrice;
+ u256 gas;
+ string sechex;
+ string sdata;
+ u256 nonce;
+
+ iss >> hexAddr >> amount >> gasPrice >> gas >> sechex >> sdata >> nonce;
+
+ if (!gasPrice)
+ gasPrice = gasPricer->bid(priority);
+
+ cnote << "Data:";
+ cnote << sdata;
+ bytes data = dev::eth::parseData(sdata);
+ cnote << "Bytes:";
+ string sbd = asString(data);
+ bytes bbd = asBytes(sbd);
+ stringstream ssbd;
+ ssbd << bbd;
+ cnote << ssbd.str();
+ int ssize = sechex.length();
+ int size = hexAddr.length();
+ u256 minGas = (u256)Transaction::gasRequired(data, 0);
+ if (size < 40)
+ {
+ if (size > 0)
+ cwarn << "Invalid address length:" << size;
+ }
+ else if (gas < minGas)
+ cwarn << "Minimum gas amount is" << minGas;
+ else if (ssize < 40)
+ {
+ if (ssize > 0)
+ cwarn << "Invalid secret length:" << ssize;
+ }
+ else
+ {
+ try
+ {
+ Secret secret = h256(fromHex(sechex));
+ Address dest = h160(fromHex(hexAddr));
+ c->submitTransaction(secret, amount, dest, data, gas, gasPrice, nonce);
+ }
+ catch (BadHexCharacter& _e)
+ {
+ cwarn << "invalid hex character, transaction rejected";
+ cwarn << boost::diagnostic_information(_e);
+ }
+ catch (...)
+ {
+ cwarn << "transaction rejected";
+ }
+ }
+ }
+ else
+ cwarn << "Require parameters: submitTransaction ADDRESS AMOUNT GASPRICE GAS SECRET DATA NONCE";
+ }
+
else if (c && cmd == "txcreate")
{
auto const& bc =c->blockChain();
@@ -1403,6 +1494,22 @@ int main(int argc, char** argv)
cout << "Hex: " << toHex(rb) << endl;
cout << r << endl;
}
+ else if (c && cmd == "reprocess")
+ {
+ string block;
+ iss >> block;
+ h256 blockHash;
+ try
+ {
+ if (block.size() == 64 || block.size() == 66)
+ blockHash = h256(block);
+ else
+ blockHash = c->blockChain().numberHash(stoi(block));
+ c->state(blockHash);
+ }
+ catch (...)
+ {}
+ }
else if (c && cmd == "dumptrace")
{
unsigned block;
diff --git a/ethminer/CMakeLists.txt b/ethminer/CMakeLists.txt
index 3ec92fc14..df828bc47 100644
--- a/ethminer/CMakeLists.txt
+++ b/ethminer/CMakeLists.txt
@@ -22,6 +22,10 @@ target_link_libraries(${EXECUTABLE} ${Boost_REGEX_LIBRARIES})
if (JSONRPC)
target_link_libraries(${EXECUTABLE} ${JSON_RPC_CPP_CLIENT_LIBRARIES})
+ target_link_libraries(${EXECUTABLE} ${CURL_LIBRARIES})
+ if (DEFINED WIN32 AND NOT DEFINED CMAKE_COMPILER_IS_MINGW)
+ eth_copy_dlls(${EXECUTABLE} CURL_DLLS)
+ endif()
endif()
target_link_libraries(${EXECUTABLE} ethcore)
diff --git a/ethminer/MinerAux.h b/ethminer/MinerAux.h
index 245b97ceb..6de3913e4 100644
--- a/ethminer/MinerAux.h
+++ b/ethminer/MinerAux.h
@@ -30,6 +30,7 @@
#include
#include
+#include
#include
#include
@@ -97,11 +98,11 @@ public:
if ((arg == "-F" || arg == "--farm") && i + 1 < argc)
{
mode = OperationMode::Farm;
- farmURL = argv[++i];
+ m_farmURL = argv[++i];
}
else if (arg == "--farm-recheck" && i + 1 < argc)
try {
- farmRecheckPeriod = stol(argv[++i]);
+ m_farmRecheckPeriod = stol(argv[++i]);
}
catch (...)
{
@@ -110,7 +111,7 @@ public:
}
else if (arg == "--opencl-platform" && i + 1 < argc)
try {
- openclPlatform = stol(argv[++i]);
+ m_openclPlatform = stol(argv[++i]);
}
catch (...)
{
@@ -119,8 +120,8 @@ public:
}
else if (arg == "--opencl-device" && i + 1 < argc)
try {
- openclDevice = stol(argv[++i]);
- miningThreads = 1;
+ m_openclDevice = stol(argv[++i]);
+ m_miningThreads = 1;
}
catch (...)
{
@@ -128,21 +129,20 @@ public:
throw BadArgument();
}
else if (arg == "--list-devices")
- {
- ProofOfWork::GPUMiner::listDevices();
- exit(0);
- }
- else if (arg == "--use-chunks")
- {
- dagChunks = 4;
- }
+ m_shouldListDevices = true;
+ else if (arg == "--allow-opencl-cpu")
+ m_clAllowCPU = true;
+ else if (arg == "--cl-extragpu-mem" && i + 1 < argc)
+ m_extraGPUMemory = 1000000 * stol(argv[++i]);
+ else if (arg == "--force-single-chunk")
+ m_forceSingleChunk = true;
else if (arg == "--phone-home" && i + 1 < argc)
{
string m = argv[++i];
if (isTrue(m))
- phoneHome = true;
+ m_phoneHome = true;
else if (isFalse(m))
- phoneHome = false;
+ m_phoneHome = false;
else
{
cerr << "Bad " << arg << " option: " << m << endl;
@@ -151,7 +151,7 @@ public:
}
else if (arg == "--benchmark-warmup" && i + 1 < argc)
try {
- benchmarkWarmup = stol(argv[++i]);
+ m_benchmarkWarmup = stol(argv[++i]);
}
catch (...)
{
@@ -160,7 +160,7 @@ public:
}
else if (arg == "--benchmark-trial" && i + 1 < argc)
try {
- benchmarkTrial = stol(argv[++i]);
+ m_benchmarkTrial = stol(argv[++i]);
}
catch (...)
{
@@ -169,7 +169,7 @@ public:
}
else if (arg == "--benchmark-trials" && i + 1 < argc)
try {
- benchmarkTrials = stol(argv[++i]);
+ m_benchmarkTrials = stol(argv[++i]);
}
catch (...)
{
@@ -179,21 +179,12 @@ public:
else if (arg == "-C" || arg == "--cpu")
m_minerType = MinerType::CPU;
else if (arg == "-G" || arg == "--opencl")
- {
- if (!ProofOfWork::GPUMiner::haveSufficientMemory())
- {
- cout << "No GPU device with sufficient memory was found. Defaulting to CPU" << endl;
- m_minerType = MinerType::CPU;
- }
- else
- {
- m_minerType = MinerType::GPU;
- miningThreads = 1;
- }
- }
+ m_minerType = MinerType::GPU;
+ else if (arg == "--current-block" && i + 1 < argc)
+ m_currentBlock = stol(argv[++i]);
else if (arg == "--no-precompute")
{
- precompute = false;
+ m_precompute = false;
}
else if ((arg == "-D" || arg == "--create-dag") && i + 1 < argc)
{
@@ -201,7 +192,7 @@ public:
mode = OperationMode::DAGInit;
try
{
- initDAG = stol(m);
+ m_initDAG = stol(m);
}
catch (...)
{
@@ -251,7 +242,7 @@ public:
else if ((arg == "-t" || arg == "--mining-threads") && i + 1 < argc)
{
try {
- miningThreads = stol(argv[++i]);
+ m_miningThreads = stol(argv[++i]);
}
catch (...)
{
@@ -266,21 +257,36 @@ public:
void execute()
{
+ if (m_shouldListDevices)
+ {
+ ProofOfWork::GPUMiner::listDevices();
+ exit(0);
+ }
+
if (m_minerType == MinerType::CPU)
- ProofOfWork::CPUMiner::setNumInstances(miningThreads);
+ ProofOfWork::CPUMiner::setNumInstances(m_miningThreads);
else if (m_minerType == MinerType::GPU)
{
- ProofOfWork::GPUMiner::setDefaultPlatform(openclPlatform);
- ProofOfWork::GPUMiner::setDefaultDevice(openclDevice);
- ProofOfWork::GPUMiner::setNumInstances(miningThreads);
- ProofOfWork::GPUMiner::setDagChunks(dagChunks);
+ ProofOfWork::GPUMiner::setNumInstances(m_miningThreads);
+ if (!ProofOfWork::GPUMiner::configureGPU(
+ m_openclPlatform,
+ m_openclDevice,
+ m_clAllowCPU,
+ m_extraGPUMemory,
+ m_forceSingleChunk,
+ m_currentBlock
+ ))
+ {
+ cout << "No GPU device with sufficient memory was found. Can't GPU mine. Remove the -G argument" << endl;
+ exit(1);
+ }
}
if (mode == OperationMode::DAGInit)
- doInitDAG(initDAG);
+ doInitDAG(m_initDAG);
else if (mode == OperationMode::Benchmark)
- doBenchmark(m_minerType, phoneHome, benchmarkWarmup, benchmarkTrial, benchmarkTrials);
+ doBenchmark(m_minerType, m_phoneHome, m_benchmarkWarmup, m_benchmarkTrial, m_benchmarkTrials);
else if (mode == OperationMode::Farm)
- doFarm(m_minerType, farmURL, farmRecheckPeriod);
+ doFarm(m_minerType, m_farmURL, m_farmRecheckPeriod);
}
static void streamHelp(ostream& _out)
@@ -311,7 +317,11 @@ public:
<< " --opencl-platform When mining using -G/--opencl use OpenCL platform n (default: 0)." << endl
<< " --opencl-device When mining using -G/--opencl use OpenCL device n (default: 0)." << endl
<< " -t, --mining-threads Limit number of CPU/GPU miners to n (default: use everything available on selected platform)" << endl
- << " --use-chunks When using GPU mining upload the DAG to the GPU in 4 chunks. " << endl
+ << " --allow-opencl-cpu Allows CPU to be considered as an OpenCL device if the OpenCL platform supports it." << endl
+ << " --list-devices List the detected OpenCL devices and exit." < m_currentBlock;
+ // default value is 350MB of GPU memory for other stuff (windows system rendering, e.t.c.)
+ unsigned m_extraGPUMemory = 350000000;
/// DAG initialisation param.
- unsigned initDAG = 0;
+ unsigned m_initDAG = 0;
/// Benchmarking params
- bool phoneHome = true;
- unsigned benchmarkWarmup = 3;
- unsigned benchmarkTrial = 3;
- unsigned benchmarkTrials = 5;
+ bool m_phoneHome = true;
+ unsigned m_benchmarkWarmup = 3;
+ unsigned m_benchmarkTrial = 3;
+ unsigned m_benchmarkTrials = 5;
/// Farm params
- string farmURL = "http://127.0.0.1:8545";
- unsigned farmRecheckPeriod = 500;
- bool precompute = true;
+ string m_farmURL = "http://127.0.0.1:8545";
+ unsigned m_farmRecheckPeriod = 500;
+ bool m_precompute = true;
};
diff --git a/ethvm/CMakeLists.txt b/ethvm/CMakeLists.txt
new file mode 100644
index 000000000..ed093061c
--- /dev/null
+++ b/ethvm/CMakeLists.txt
@@ -0,0 +1,19 @@
+cmake_policy(SET CMP0015 NEW)
+set(CMAKE_AUTOMOC OFF)
+
+aux_source_directory(. SRC_LIST)
+
+include_directories(BEFORE ..)
+include_directories(${LEVELDB_INCLUDE_DIRS})
+
+set(EXECUTABLE ethvm)
+
+add_executable(${EXECUTABLE} ${SRC_LIST})
+
+target_link_libraries(${EXECUTABLE} ethereum)
+
+if (APPLE)
+ install(TARGETS ${EXECUTABLE} DESTINATION bin)
+else()
+ eth_install_executable(${EXECUTABLE})
+endif()
diff --git a/ethvm/main.cpp b/ethvm/main.cpp
new file mode 100644
index 000000000..08a1b4508
--- /dev/null
+++ b/ethvm/main.cpp
@@ -0,0 +1,200 @@
+/*
+ This file is part of cpp-ethereum.
+
+ cpp-ethereum is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ cpp-ethereum is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with cpp-ethereum. If not, see .
+*/
+/** @file main.cpp
+ * @author Gav Wood
+ * @date 2014
+ * EVM Execution tool.
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+using namespace std;
+using namespace dev;
+using namespace eth;
+
+void help()
+{
+ cout
+ << "Usage ethvm [trace|stats|output] (|--)" << endl
+ << "Transaction options:" << endl
+ << " --value Transaction should transfer the wei (default: 0)." << endl
+ << " --gas Transaction should be given gas (default: block gas limit)." << endl
+ << " --gas-price Transaction's gas price' should be (default: 0)." << endl
+ << " --sender Transaction sender should be (default: 0000...0069)." << endl
+ << " --origin Transaction origin should be (default: 0000...0069)." << endl
+#if ETH_EVMJIT || !ETH_TRUE
+ << endl
+ << "VM options:" << endl
+ << " -J,--jit Enable LLVM VM (default: off)." << endl
+ << " --smart Enable smart VM (default: off)." << endl
+#endif
+ << endl
+ << "Options for trace:" << endl
+ << " --flat Minimal whitespace in the JSON." << endl
+ << " --mnemonics Show instruction mnemonics in the trace (non-standard)." << endl
+ << endl
+ << "General options:" << endl
+ << " -V,--version Show the version and exit." << endl
+ << " -h,--help Show this help message and exit." << endl;
+ exit(0);
+}
+
+void version()
+{
+ cout << "ethvm version " << dev::Version << endl;
+ cout << "By Gav Wood, 2015." << endl;
+ cout << "Build: " << DEV_QUOTED(ETH_BUILD_PLATFORM) << "/" << DEV_QUOTED(ETH_BUILD_TYPE) << endl;
+ exit(0);
+}
+
+enum class Mode
+{
+ Trace,
+ Statistics,
+ OutputOnly
+};
+
+int main(int argc, char** argv)
+{
+ string incoming = "--";
+
+ Mode mode = Mode::Statistics;
+ State state;
+ Address sender = Address(69);
+ Address origin = Address(69);
+ u256 value = 0;
+ u256 gas = state.gasLimitRemaining();
+ u256 gasPrice = 0;
+ bool styledJson = true;
+ StandardTrace st;
+
+ for (int i = 1; i < argc; ++i)
+ {
+ string arg = argv[i];
+ if (arg == "-h" || arg == "--help")
+ help();
+ else if (arg == "-V" || arg == "--version")
+ version();
+#if ETH_EVMJIT
+ else if (arg == "-J" || arg == "--jit")
+ VMFactory::setKind(VMKind::JIT);
+ else if (arg == "--smart")
+ VMFactory::setKind(VMKind::Smart);
+#endif
+ else if (arg == "--mnemonics")
+ st.setShowMnemonics();
+ else if (arg == "--flat")
+ styledJson = false;
+ else if (arg == "--value" && i + 1 < argc)
+ value = u256(argv[++i]);
+ else if (arg == "--sender" && i + 1 < argc)
+ sender = Address(argv[++i]);
+ else if (arg == "--origin" && i + 1 < argc)
+ origin = Address(argv[++i]);
+ else if (arg == "--gas" && i + 1 < argc)
+ gas = u256(argv[++i]);
+ else if (arg == "--gas-price" && i + 1 < argc)
+ gasPrice = u256(argv[++i]);
+ else if (arg == "--value" && i + 1 < argc)
+ value = u256(argv[++i]);
+ else if (arg == "--value" && i + 1 < argc)
+ value = u256(argv[++i]);
+ else if (arg == "stats")
+ mode = Mode::Statistics;
+ else if (arg == "output")
+ mode = Mode::OutputOnly;
+ else if (arg == "trace")
+ mode = Mode::Trace;
+ else
+ incoming = arg;
+ }
+
+ bytes code;
+ if (incoming == "--" || incoming.empty())
+ for (int i = cin.get(); i != -1; i = cin.get())
+ code.push_back((char)i);
+ else
+ code = contents(incoming);
+ bytes data = fromHex(boost::trim_copy(asString(code)));
+ if (data.empty())
+ data = code;
+
+ state.addBalance(sender, value);
+ Executive executive(state, eth::LastHashes(), 0);
+ ExecutionResult res;
+ executive.setResultRecipient(res);
+ Transaction t = eth::Transaction(value, gasPrice, gas, data, 0);
+ t.forceSender(sender);
+
+ unordered_map> counts;
+ unsigned total = 0;
+ bigint memTotal;
+ auto onOp = [&](uint64_t step, Instruction inst, bigint m, bigint gasCost, bigint gas, VM* vm, ExtVMFace const* extVM) {
+ if (mode == Mode::Statistics)
+ {
+ counts[(byte)inst].first++;
+ counts[(byte)inst].second += gasCost;
+ total++;
+ if (m > 0)
+ memTotal = m;
+ }
+ else if (mode == Mode::Trace)
+ st(step, inst, m, gasCost, gas, vm, extVM);
+ };
+
+ executive.initialize(t);
+ executive.create(sender, value, gasPrice, gas, &data, origin);
+ boost::timer timer;
+ executive.go(onOp);
+ double execTime = timer.elapsed();
+ executive.finalize();
+ bytes output = std::move(res.output);
+
+ if (mode == Mode::Statistics)
+ {
+ cout << "Gas used: " << res.gasUsed << " (+" << t.gasRequired() << " for transaction, -" << res.gasRefunded << " refunded)" << endl;
+ cout << "Output: " << toHex(output) << endl;
+ LogEntries logs = executive.logs();
+ cout << logs.size() << " logs" << (logs.empty() ? "." : ":") << endl;
+ for (LogEntry const& l: logs)
+ {
+ cout << " " << l.address.hex() << ": " << toHex(t.data()) << endl;
+ for (h256 const& t: l.topics)
+ cout << " " << t.hex() << endl;
+ }
+
+ cout << total << " operations in " << execTime << " seconds." << endl;
+ cout << "Maximum memory usage: " << memTotal * 32 << " bytes" << endl;
+ cout << "Expensive operations:" << endl;
+ for (auto const& c: {Instruction::SSTORE, Instruction::SLOAD, Instruction::CALL, Instruction::CREATE, Instruction::CALLCODE, Instruction::MSTORE8, Instruction::MSTORE, Instruction::MLOAD, Instruction::SHA3})
+ if (!!counts[(byte)c].first)
+ cout << " " << instructionInfo(c).name << " x " << counts[(byte)c].first << " (" << counts[(byte)c].second << " gas)" << endl;
+ }
+ else if (mode == Mode::Trace)
+ cout << st.json(styledJson);
+ else if (mode == Mode::OutputOnly)
+ cout << toHex(output);
+
+ return 0;
+}
diff --git a/evmjit/libevmjit-cpp/JitVM.cpp b/evmjit/libevmjit-cpp/JitVM.cpp
index 0d6a6e00a..68161526d 100644
--- a/evmjit/libevmjit-cpp/JitVM.cpp
+++ b/evmjit/libevmjit-cpp/JitVM.cpp
@@ -51,7 +51,7 @@ bytesConstRef JitVM::execImpl(u256& io_gas, ExtVMFace& _ext, OnOpFunc const& _on
m_data.timestamp = static_cast(_ext.currentBlock.timestamp);
m_data.code = _ext.code.data();
m_data.codeSize = _ext.code.size();
- m_data.codeHash = eth2llvm(sha3(_ext.code));
+ m_data.codeHash = eth2llvm(_ext.codeHash);
auto env = reinterpret_cast(&_ext);
auto exitCode = m_engine.run(&m_data, env);
diff --git a/evmjit/libevmjit/Array.cpp b/evmjit/libevmjit/Array.cpp
index 3266038db..0b511a058 100644
--- a/evmjit/libevmjit/Array.cpp
+++ b/evmjit/libevmjit/Array.cpp
@@ -9,8 +9,6 @@
#include "Runtime.h"
#include "Utils.h"
-#include // DEBUG only
-
namespace dev
{
namespace eth
@@ -269,52 +267,15 @@ void Array::extend(llvm::Value* _arrayPtr, llvm::Value* _size)
}
}
-namespace
-{
- struct AllocatedMemoryWatchdog
- {
- std::set allocatedMemory;
-
- ~AllocatedMemoryWatchdog()
- {
- if (!allocatedMemory.empty())
- {
- DLOG(mem) << allocatedMemory.size() << " MEM LEAKS!\n";
- for (auto&& leak : allocatedMemory)
- DLOG(mem) << "\t" << leak << "\n";
- }
- }
- };
-
- AllocatedMemoryWatchdog watchdog;
-}
-
extern "C"
{
- using namespace dev::eth::jit;
-
EXPORT void* ext_realloc(void* _data, size_t _size) noexcept
{
- //std::cerr << "REALLOC: " << _data << " [" << _size << "]" << std::endl;
- auto newData = std::realloc(_data, _size);
- if (_data != newData)
- {
- DLOG(mem) << "REALLOC: " << newData << " <- " << _data << " [" << _size << "]\n";
- watchdog.allocatedMemory.erase(_data);
- watchdog.allocatedMemory.insert(newData);
- }
- return newData;
+ return std::realloc(_data, _size);
}
EXPORT void ext_free(void* _data) noexcept
{
std::free(_data);
- if (_data)
- {
- DLOG(mem) << "FREE : " << _data << "\n";
- watchdog.allocatedMemory.erase(_data);
- }
}
-
-} // extern "C"
-
+}
diff --git a/evmjit/libevmjit/Cache.cpp b/evmjit/libevmjit/Cache.cpp
index 47a6386e9..42ccf44ac 100644
--- a/evmjit/libevmjit/Cache.cpp
+++ b/evmjit/libevmjit/Cache.cpp
@@ -1,5 +1,7 @@
#include "Cache.h"
+#include
+
#include "preprocessor/llvm_includes_start.h"
#include
#include
@@ -23,6 +25,8 @@ namespace jit
namespace
{
+ using Guard = std::lock_guard;
+ std::mutex x_cacheMutex;
CacheMode g_mode;
llvm::MemoryBuffer* g_lastObject;
ExecutionEngineListener* g_listener;
@@ -43,6 +47,9 @@ namespace
ObjectCache* Cache::getObjectCache(CacheMode _mode, ExecutionEngineListener* _listener)
{
static ObjectCache objectCache;
+
+ Guard g{x_cacheMutex};
+
g_mode = _mode;
g_listener = _listener;
return &objectCache;
@@ -50,6 +57,8 @@ ObjectCache* Cache::getObjectCache(CacheMode _mode, ExecutionEngineListener* _li
void Cache::clear()
{
+ Guard g{x_cacheMutex};
+
using namespace llvm::sys;
llvm::SmallString<256> cachePath;
path::system_temp_directory(false, cachePath);
@@ -62,6 +71,8 @@ void Cache::clear()
void Cache::preload(llvm::ExecutionEngine& _ee, std::unordered_map& _funcCache)
{
+ Guard g{x_cacheMutex};
+
// TODO: Cache dir should be in one place
using namespace llvm::sys;
llvm::SmallString<256> cachePath;
@@ -92,11 +103,14 @@ void Cache::preload(llvm::ExecutionEngine& _ee, std::unordered_map Cache::getObject(std::string const& id)
{
+ Guard g{x_cacheMutex};
+
if (g_mode != CacheMode::on && g_mode != CacheMode::read)
return nullptr;
- if (g_listener)
- g_listener->stateChanged(ExecState::CacheLoad);
+ // TODO: Disabled because is not thread-safe.
+ //if (g_listener)
+ // g_listener->stateChanged(ExecState::CacheLoad);
DLOG(cache) << id << ": search\n";
if (!CHECK(!g_lastObject))
@@ -136,12 +150,15 @@ std::unique_ptr Cache::getObject(std::string const& id)
void ObjectCache::notifyObjectCompiled(llvm::Module const* _module, llvm::MemoryBuffer const* _object)
{
+ Guard g{x_cacheMutex};
+
// Only in "on" and "write" mode
if (g_mode != CacheMode::on && g_mode != CacheMode::write)
return;
- if (g_listener)
- g_listener->stateChanged(ExecState::CacheWrite);
+ // TODO: Disabled because is not thread-safe.
+ // if (g_listener)
+ // g_listener->stateChanged(ExecState::CacheWrite);
auto&& id = _module->getModuleIdentifier();
llvm::SmallString<256> cachePath;
@@ -161,6 +178,8 @@ void ObjectCache::notifyObjectCompiled(llvm::Module const* _module, llvm::Memory
llvm::MemoryBuffer* ObjectCache::getObject(llvm::Module const* _module)
{
+ Guard g{x_cacheMutex};
+
DLOG(cache) << _module->getModuleIdentifier() << ": use\n";
auto o = g_lastObject;
g_lastObject = nullptr;
diff --git a/libdevcore/Base64.cpp b/libdevcore/Base64.cpp
index e36f8a18a..f97c82156 100644
--- a/libdevcore/Base64.cpp
+++ b/libdevcore/Base64.cpp
@@ -29,11 +29,13 @@
#include "Base64.h"
using namespace dev;
-static inline bool is_base64(byte c) {
+static inline bool is_base64(byte c)
+{
return (isalnum(c) || (c == '+') || (c == '/'));
}
-static inline byte find_base64_char_index(byte c) {
+static inline byte find_base64_char_index(byte c)
+{
if ('A' <= c && c <= 'Z') return c - 'A';
else if ('a' <= c && c <= 'z') return c - 'a' + 1 + find_base64_char_index('Z');
else if ('0' <= c && c <= '9') return c - '0' + 1 + find_base64_char_index('z');
@@ -42,7 +44,8 @@ static inline byte find_base64_char_index(byte c) {
else return 1 + find_base64_char_index('/');
}
-std::string dev::toBase64(bytesConstRef _in) {
+std::string dev::toBase64(bytesConstRef _in)
+{
static const char base64_chars[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
@@ -91,7 +94,8 @@ std::string dev::toBase64(bytesConstRef _in) {
return ret;
}
-bytes dev::fromBase64(std::string const& encoded_string) {
+bytes dev::fromBase64(std::string const& encoded_string)
+{
auto in_len = encoded_string.size();
int i = 0;
int j = 0;
diff --git a/libdevcore/Common.cpp b/libdevcore/Common.cpp
index 3dc3fd280..22ea584c1 100644
--- a/libdevcore/Common.cpp
+++ b/libdevcore/Common.cpp
@@ -28,7 +28,7 @@ using namespace dev;
namespace dev
{
-char const* Version = "0.9.24";
+char const* Version = "0.9.26";
const u256 UndefinedU256 = ~(u256)0;
diff --git a/libdevcore/Common.h b/libdevcore/Common.h
index 453c17e6f..1ee83c794 100644
--- a/libdevcore/Common.h
+++ b/libdevcore/Common.h
@@ -181,7 +181,7 @@ private:
/// Scope guard for invariant check in a class derived from HasInvariants.
#if ETH_DEBUG
-#define DEV_INVARIANT_CHECK ::dev::InvariantChecker __dev_invariantCheck(this)
+#define DEV_INVARIANT_CHECK { ::dev::InvariantChecker __dev_invariantCheck(this); }
#else
#define DEV_INVARIANT_CHECK (void)0;
#endif
diff --git a/libdevcore/Exceptions.h b/libdevcore/Exceptions.h
index 7d6fae77c..b0bab7d81 100644
--- a/libdevcore/Exceptions.h
+++ b/libdevcore/Exceptions.h
@@ -41,11 +41,11 @@ private:
std::string m_message;
};
-#define DEV_SIMPLE_EXCEPTION(X) struct X: virtual Exception { public: X(): Exception(#X) {} }
+#define DEV_SIMPLE_EXCEPTION(X) struct X: virtual Exception { const char* what() const noexcept override { return #X; } }
/// Base class for all RLP exceptions.
struct RLPException: virtual Exception { RLPException(std::string _message = std::string()): Exception(_message) {} };
-#define DEV_SIMPLE_EXCEPTION_RLP(X) struct X: virtual RLPException { public: X(): RLPException(#X) {} }
+#define DEV_SIMPLE_EXCEPTION_RLP(X) struct X: virtual RLPException { const char* what() const noexcept override { return #X; } }
DEV_SIMPLE_EXCEPTION_RLP(BadCast);
DEV_SIMPLE_EXCEPTION_RLP(BadRLP);
diff --git a/libdevcore/FixedHash.h b/libdevcore/FixedHash.h
index 88bc0fe95..2d1822b2c 100644
--- a/libdevcore/FixedHash.h
+++ b/libdevcore/FixedHash.h
@@ -100,7 +100,7 @@ public:
FixedHash operator|(FixedHash const& _c) const { return FixedHash(*this) |= _c; }
FixedHash& operator&=(FixedHash const& _c) { for (unsigned i = 0; i < N; ++i) m_data[i] &= _c.m_data[i]; return *this; }
FixedHash operator&(FixedHash const& _c) const { return FixedHash(*this) &= _c; }
- FixedHash& operator~() { for (unsigned i = 0; i < N; ++i) m_data[i] = ~m_data[i]; return *this; }
+ FixedHash operator~() const { FixedHash ret; for (unsigned i = 0; i < N; ++i) ret[i] = ~m_data[i]; return ret; }
/// @returns true if all bytes in @a _c are set in this object.
bool contains(FixedHash const& _c) const { return (*this & _c) == _c; }
diff --git a/libdevcore/Log.cpp b/libdevcore/Log.cpp
index 1e5c2d8ab..fde492f3b 100644
--- a/libdevcore/Log.cpp
+++ b/libdevcore/Log.cpp
@@ -40,7 +40,7 @@ mutex x_logOverride;
/// or equal to the currently output verbosity (g_logVerbosity).
static map s_logOverride;
-bool isLogVisible(std::type_info const* _ch, bool _default)
+bool dev::isChannelVisible(std::type_info const* _ch, bool _default)
{
Guard l(x_logOverride);
if (s_logOverride.count(_ch))
diff --git a/libdevcore/MemoryDB.cpp b/libdevcore/MemoryDB.cpp
index 2cf56475b..f71931bdd 100644
--- a/libdevcore/MemoryDB.cpp
+++ b/libdevcore/MemoryDB.cpp
@@ -32,7 +32,9 @@ const char* DBWarn::name() { return "TDB"; }
std::unordered_map MemoryDB::get() const
{
+#if DEV_GUARDED_DB
ReadGuard l(x_this);
+#endif
std::unordered_map ret;
for (auto const& i: m_main)
if (!m_enforceRefs || i.second.second > 0)
@@ -44,8 +46,10 @@ MemoryDB& MemoryDB::operator=(MemoryDB const& _c)
{
if (this == &_c)
return *this;
+#if DEV_GUARDED_DB
ReadGuard l(_c.x_this);
WriteGuard l2(x_this);
+#endif
m_main = _c.m_main;
m_aux = _c.m_aux;
return *this;
@@ -53,7 +57,9 @@ MemoryDB& MemoryDB::operator=(MemoryDB const& _c)
std::string MemoryDB::lookup(h256 const& _h) const
{
+#if DEV_GUARDED_DB
ReadGuard l(x_this);
+#endif
auto it = m_main.find(_h);
if (it != m_main.end())
{
@@ -67,7 +73,9 @@ std::string MemoryDB::lookup(h256 const& _h) const
bool MemoryDB::exists(h256 const& _h) const
{
+#if DEV_GUARDED_DB
ReadGuard l(x_this);
+#endif
auto it = m_main.find(_h);
if (it != m_main.end() && (!m_enforceRefs || it->second.second > 0))
return true;
@@ -76,7 +84,9 @@ bool MemoryDB::exists(h256 const& _h) const
void MemoryDB::insert(h256 const& _h, bytesConstRef _v)
{
+#if DEV_GUARDED_DB
WriteGuard l(x_this);
+#endif
auto it = m_main.find(_h);
if (it != m_main.end())
{
@@ -92,7 +102,9 @@ void MemoryDB::insert(h256 const& _h, bytesConstRef _v)
bool MemoryDB::kill(h256 const& _h)
{
+#if DEV_GUARDED_DB
ReadGuard l(x_this);
+#endif
if (m_main.count(_h))
{
if (m_main[_h].second > 0)
@@ -117,9 +129,38 @@ bool MemoryDB::kill(h256 const& _h)
return false;
}
+bytes MemoryDB::lookupAux(h256 const& _h) const
+{
+#if DEV_GUARDED_DB
+ ReadGuard l(x_this);
+#endif
+ auto it = m_aux.find(_h);
+ if (it != m_aux.end() && (!m_enforceRefs || it->second.second))
+ return it->second.first;
+ return bytes();
+}
+
+void MemoryDB::removeAux(h256 const& _h)
+{
+#if DEV_GUARDED_DB
+ WriteGuard l(x_this);
+#endif
+ m_aux[_h].second = false;
+}
+
+void MemoryDB::insertAux(h256 const& _h, bytesConstRef _v)
+{
+#if DEV_GUARDED_DB
+ WriteGuard l(x_this);
+#endif
+ m_aux[_h] = make_pair(_v.toBytes(), true);
+}
+
void MemoryDB::purge()
{
+#if DEV_GUARDED_DB
WriteGuard l(x_this);
+#endif
for (auto it = m_main.begin(); it != m_main.end(); )
if (it->second.second)
++it;
@@ -129,7 +170,9 @@ void MemoryDB::purge()
h256Hash MemoryDB::keys() const
{
+#if DEV_GUARDED_DB
ReadGuard l(x_this);
+#endif
h256Hash ret;
for (auto const& i: m_main)
if (i.second.second)
diff --git a/libdevcore/MemoryDB.h b/libdevcore/MemoryDB.h
index 169682815..a39c0efd0 100644
--- a/libdevcore/MemoryDB.h
+++ b/libdevcore/MemoryDB.h
@@ -57,14 +57,16 @@ public:
bool kill(h256 const& _h);
void purge();
- bytes lookupAux(h256 const& _h) const { ReadGuard l(x_this); auto it = m_aux.find(_h); if (it != m_aux.end() && (!m_enforceRefs || it->second.second)) return it->second.first; return bytes(); }
- void removeAux(h256 const& _h) { WriteGuard l(x_this); m_aux[_h].second = false; }
- void insertAux(h256 const& _h, bytesConstRef _v) { WriteGuard l(x_this); m_aux[_h] = make_pair(_v.toBytes(), true); }
+ bytes lookupAux(h256 const& _h) const;
+ void removeAux(h256 const& _h);
+ void insertAux(h256 const& _h, bytesConstRef _v);
h256Hash keys() const;
protected:
+#if DEV_GUARDED_DB
mutable SharedMutex x_this;
+#endif
std::unordered_map> m_main;
std::unordered_map> m_aux;
diff --git a/libdevcore/vector_ref.h b/libdevcore/vector_ref.h
index 5e9bba3e8..b04d449b3 100644
--- a/libdevcore/vector_ref.h
+++ b/libdevcore/vector_ref.h
@@ -43,7 +43,8 @@ public:
vector_ref<_T> cropped(size_t _begin) const { if (m_data && _begin <= m_count) return vector_ref<_T>(m_data + _begin, m_count - _begin); else return vector_ref<_T>(); }
void retarget(_T* _d, size_t _s) { m_data = _d; m_count = _s; }
void retarget(std::vector<_T> const& _t) { m_data = _t.data(); m_count = _t.size(); }
- void copyTo(vector_ref::type> _t) const { memcpy(_t.data(), m_data, std::min(_t.size(), m_count) * sizeof(_T)); }
+ template bool overlapsWith(vector_ref _t) const { void const* f1 = data(); void const* t1 = data() + size(); void const* f2 = _t.data(); void const* t2 = _t.data() + _t.size(); return f1 < t2 && t1 > f2; }
+ void copyTo(vector_ref::type> _t) const { if (overlapsWith(_t)) memmove(_t.data(), m_data, std::min(_t.size(), m_count) * sizeof(_T)); else memcpy(_t.data(), m_data, std::min(_t.size(), m_count) * sizeof(_T)); }
void populate(vector_ref::type> _t) const { copyTo(_t); memset(_t.data() + m_count, 0, std::max(_t.size(), m_count) - m_count); }
_T* begin() { return m_data; }
diff --git a/libdevcrypto/CryptoPP.cpp b/libdevcrypto/CryptoPP.cpp
index 9b550a666..40eae10f1 100644
--- a/libdevcrypto/CryptoPP.cpp
+++ b/libdevcrypto/CryptoPP.cpp
@@ -264,7 +264,6 @@ Public Secp256k1::recover(Signature _signature, bytesConstRef _message)
ECP::Element x;
{
- Guard l(x_curve);
m_curve.DecodePoint(x, encodedpoint, 33);
if (!m_curve.VerifyPoint(x))
return recovered;
@@ -286,7 +285,6 @@ Public Secp256k1::recover(Signature _signature, bytesConstRef _message)
ECP::Point p;
byte recoveredbytes[65];
{
- Guard l(x_curve);
// todo: make generator member
p = m_curve.CascadeMultiply(u2, x, u1, m_params.GetSubgroupGenerator());
m_curve.EncodePoint(recoveredbytes, p, false);
diff --git a/libdevcrypto/OverlayDB.cpp b/libdevcrypto/OverlayDB.cpp
index 05b9877ad..a6aa684f2 100644
--- a/libdevcrypto/OverlayDB.cpp
+++ b/libdevcrypto/OverlayDB.cpp
@@ -50,7 +50,9 @@ void OverlayDB::commit()
{
ldb::WriteBatch batch;
// cnote << "Committing nodes to disk DB:";
+#if DEV_GUARDED_DB
DEV_READ_GUARDED(x_this)
+#endif
{
for (auto const& i: m_main)
{
@@ -83,7 +85,9 @@ void OverlayDB::commit()
cwarn << "Sleeping for" << (i + 1) << "seconds, then retrying.";
this_thread::sleep_for(chrono::seconds(i + 1));
}
+#if DEV_GUARDED_DB
DEV_WRITE_GUARDED(x_this)
+#endif
{
m_aux.clear();
m_main.clear();
@@ -107,7 +111,9 @@ bytes OverlayDB::lookupAux(h256 const& _h) const
void OverlayDB::rollback()
{
+#if DEV_GUARDED_DB
WriteGuard l(x_this);
+#endif
m_main.clear();
}
diff --git a/libethash-cl/ethash_cl_miner.cpp b/libethash-cl/ethash_cl_miner.cpp
index 2bdcfcd9a..6c2f8269a 100644
--- a/libethash-cl/ethash_cl_miner.cpp
+++ b/libethash-cl/ethash_cl_miner.cpp
@@ -32,11 +32,11 @@
#include
#include
#include
+#include
#include "ethash_cl_miner.h"
#include "ethash_cl_miner_kernel.h"
#define ETHASH_BYTES 32
-#define ETHASH_CL_MINIMUM_MEMORY 2000000000
// workaround lame platforms
#if !CL_VERSION_1_2
@@ -51,12 +51,14 @@ using namespace std;
// TODO: If at any point we can use libdevcore in here then we should switch to using a LogChannel
#define ETHCL_LOG(_contents) cout << "[OPENCL]:" << _contents << endl
+// Types of OpenCL devices we are interested in
+#define ETHCL_QUERIED_DEVICE_TYPES (CL_DEVICE_TYPE_GPU | CL_DEVICE_TYPE_ACCELERATOR)
-static void add_definition(std::string& source, char const* id, unsigned value)
+static void addDefinition(string& _source, char const* _id, unsigned _value)
{
char buf[256];
- sprintf(buf, "#define %s %uu\n", id, value);
- source.insert(source.begin(), buf, buf + strlen(buf));
+ sprintf(buf, "#define %s %uu\n", _id, _value);
+ _source.insert(_source.begin(), buf, buf + strlen(buf));
}
ethash_cl_miner::search_hook::~search_hook() {}
@@ -71,44 +73,54 @@ ethash_cl_miner::~ethash_cl_miner()
finish();
}
-std::string ethash_cl_miner::platform_info(unsigned _platformId, unsigned _deviceId)
+string ethash_cl_miner::platform_info(unsigned _platformId, unsigned _deviceId)
{
- std::vector platforms;
+ vector platforms;
cl::Platform::get(&platforms);
if (platforms.empty())
{
ETHCL_LOG("No OpenCL platforms found.");
- return std::string();
+ return string();
}
// get GPU device of the selected platform
- std::vector devices;
- unsigned platform_num = std::min(_platformId, platforms.size() - 1);
- platforms[platform_num].getDevices(CL_DEVICE_TYPE_ALL, &devices);
+ unsigned platform_num = min(_platformId, platforms.size() - 1);
+ vector devices = getDevices(platforms, _platformId);
if (devices.empty())
{
ETHCL_LOG("No OpenCL devices found.");
- return std::string();
+ return string();
}
// use selected default device
- unsigned device_num = std::min(_deviceId, devices.size() - 1);
+ unsigned device_num = min(_deviceId, devices.size() - 1);
cl::Device& device = devices[device_num];
- std::string device_version = device.getInfo();
+ string device_version = device.getInfo();
return "{ \"platform\": \"" + platforms[platform_num].getInfo() + "\", \"device\": \"" + device.getInfo() + "\", \"version\": \"" + device_version + "\" }";
}
-unsigned ethash_cl_miner::get_num_platforms()
+std::vector ethash_cl_miner::getDevices(std::vector const& _platforms, unsigned _platformId)
{
- std::vector platforms;
+ vector devices;
+ unsigned platform_num = min(_platformId, _platforms.size() - 1);
+ _platforms[platform_num].getDevices(
+ s_allowCPU ? CL_DEVICE_TYPE_ALL : ETHCL_QUERIED_DEVICE_TYPES,
+ &devices
+ );
+ return devices;
+}
+
+unsigned ethash_cl_miner::getNumPlatforms()
+{
+ vector platforms;
cl::Platform::get(&platforms);
return platforms.size();
}
-unsigned ethash_cl_miner::get_num_devices(unsigned _platformId)
+unsigned ethash_cl_miner::getNumDevices(unsigned _platformId)
{
- std::vector platforms;
+ vector platforms;
cl::Platform::get(&platforms);
if (platforms.empty())
{
@@ -116,9 +128,7 @@ unsigned ethash_cl_miner::get_num_devices(unsigned _platformId)
return 0;
}
- std::vector devices;
- unsigned platform_num = std::min(_platformId, platforms.size() - 1);
- platforms[platform_num].getDevices(CL_DEVICE_TYPE_ALL, &devices);
+ vector devices = getDevices(platforms, _platformId);
if (devices.empty())
{
ETHCL_LOG("No OpenCL devices found.");
@@ -127,9 +137,49 @@ unsigned ethash_cl_miner::get_num_devices(unsigned _platformId)
return devices.size();
}
-bool ethash_cl_miner::haveSufficientGPUMemory()
+bool ethash_cl_miner::configureGPU(
+ bool _allowCPU,
+ unsigned _extraGPUMemory,
+ bool _forceSingleChunk,
+ boost::optional _currentBlock
+)
{
- std::vector platforms;
+ s_allowCPU = _allowCPU;
+ s_forceSingleChunk = _forceSingleChunk;
+ s_extraRequiredGPUMem = _extraGPUMemory;
+ // by default let's only consider the DAG of the first epoch
+ uint64_t dagSize = _currentBlock ? ethash_get_datasize(*_currentBlock) : 1073739904U;
+ uint64_t requiredSize = dagSize + _extraGPUMemory;
+ return searchForAllDevices([&requiredSize](cl::Device const _device) -> bool
+ {
+ cl_ulong result;
+ _device.getInfo(CL_DEVICE_GLOBAL_MEM_SIZE, &result);
+ if (result >= requiredSize)
+ {
+ ETHCL_LOG(
+ "Found suitable OpenCL device [" << _device.getInfo()
+ << "] with " << result << " bytes of GPU memory"
+ );
+ return true;
+ }
+
+ ETHCL_LOG(
+ "OpenCL device " << _device.getInfo()
+ << " has insufficient GPU memory." << result <<
+ " bytes of memory found < " << requiredSize << " bytes of memory required"
+ );
+ return false;
+ }
+ );
+}
+
+bool ethash_cl_miner::s_allowCPU = false;
+bool ethash_cl_miner::s_forceSingleChunk = false;
+unsigned ethash_cl_miner::s_extraRequiredGPUMem;
+
+bool ethash_cl_miner::searchForAllDevices(function _callback)
+{
+ vector platforms;
cl::Platform::get(&platforms);
if (platforms.empty())
{
@@ -137,50 +187,30 @@ bool ethash_cl_miner::haveSufficientGPUMemory()
return false;
}
for (unsigned i = 0; i < platforms.size(); ++i)
- if (haveSufficientGPUMemory(i))
+ if (searchForAllDevices(i, _callback))
return true;
return false;
}
-bool ethash_cl_miner::haveSufficientGPUMemory(unsigned _platformId)
+bool ethash_cl_miner::searchForAllDevices(unsigned _platformId, function _callback)
{
- std::vector platforms;
+ vector platforms;
cl::Platform::get(&platforms);
if (_platformId >= platforms.size())
return false;
- std::vector devices;
- unsigned platform_num = std::min(_platformId, platforms.size() - 1);
- platforms[platform_num].getDevices(CL_DEVICE_TYPE_ALL, &devices);
- if (devices.empty())
- return false;
-
+ vector devices = getDevices(platforms, _platformId);
for (cl::Device const& device: devices)
- {
- cl_ulong result;
- device.getInfo(CL_DEVICE_GLOBAL_MEM_SIZE, &result);
- if (result >= ETHASH_CL_MINIMUM_MEMORY)
- {
- ETHCL_LOG(
- "Found suitable OpenCL device [" << device.getInfo()
- << "] with " << result << " bytes of GPU memory"
- );
+ if (_callback(device))
return true;
- }
- else
- ETHCL_LOG(
- "OpenCL device " << device.getInfo()
- << " has insufficient GPU memory." << result <<
- " bytes of memory found < " << ETHASH_CL_MINIMUM_MEMORY << " bytes of memory required"
- );
- }
+
return false;
}
-void ethash_cl_miner::listDevices()
+void ethash_cl_miner::doForAllDevices(function _callback)
{
- std::vector platforms;
+ vector platforms;
cl::Platform::get(&platforms);
if (platforms.empty())
{
@@ -188,26 +218,31 @@ void ethash_cl_miner::listDevices()
return;
}
for (unsigned i = 0; i < platforms.size(); ++i)
- listDevices(i);
+ doForAllDevices(i, _callback);
}
-void ethash_cl_miner::listDevices(unsigned _platformId)
+void ethash_cl_miner::doForAllDevices(unsigned _platformId, function _callback)
{
- std::vector platforms;
+ vector platforms;
cl::Platform::get(&platforms);
if (_platformId >= platforms.size())
return;
- std::string outString ="Listing OpenCL devices for platform " + to_string(_platformId) + "\n[deviceID] deviceName\n";
- std::vector devices;
- platforms[_platformId].getDevices(CL_DEVICE_TYPE_ALL, &devices);
- unsigned i = 0;
- std::string deviceString;
+ vector devices = getDevices(platforms, _platformId);
for (cl::Device const& device: devices)
- {
- outString += "[" + to_string(i) + "] " + device.getInfo() + "\n";
- ++i;
- }
+ _callback(device);
+}
+
+void ethash_cl_miner::listDevices()
+{
+ string outString ="\nListing OpenCL devices.\nFORMAT: [deviceID] deviceName\n";
+ unsigned int i = 0;
+ doForAllDevices([&outString, &i](cl::Device const _device)
+ {
+ outString += "[" + to_string(i) + "] " + _device.getInfo() + "\n";
+ ++i;
+ }
+ );
ETHCL_LOG(outString);
}
@@ -222,19 +257,13 @@ bool ethash_cl_miner::init(
uint64_t _dagSize,
unsigned workgroup_size,
unsigned _platformId,
- unsigned _deviceId,
- unsigned _dagChunksNum
+ unsigned _deviceId
)
{
- // for now due to the .cl kernels we can only have either 1 big chunk or 4 chunks
- assert(_dagChunksNum == 1 || _dagChunksNum == 4);
- // now create the number of chunk buffers
- m_dagChunksNum = _dagChunksNum;
-
// get all platforms
try
{
- std::vector platforms;
+ vector platforms;
cl::Platform::get(&platforms);
if (platforms.empty())
{
@@ -243,12 +272,11 @@ bool ethash_cl_miner::init(
}
// use selected platform
- _platformId = std::min(_platformId, platforms.size() - 1);
+ _platformId = min(_platformId, platforms.size() - 1);
ETHCL_LOG("Using platform: " << platforms[_platformId].getInfo().c_str());
// get GPU device of the default platform
- std::vector devices;
- platforms[_platformId].getDevices(CL_DEVICE_TYPE_ALL, &devices);
+ vector devices = getDevices(platforms, _platformId);
if (devices.empty())
{
ETHCL_LOG("No OpenCL devices found.");
@@ -256,10 +284,27 @@ bool ethash_cl_miner::init(
}
// use selected device
- cl::Device& device = devices[std::min(_deviceId, devices.size() - 1)];
- std::string device_version = device.getInfo();
+ cl::Device& device = devices[min(_deviceId, devices.size() - 1)];
+ string device_version = device.getInfo();
ETHCL_LOG("Using device: " << device.getInfo().c_str() << "(" << device_version.c_str() << ")");
+ // configure chunk number depending on max allocateable memory
+ cl_ulong result;
+ device.getInfo(CL_DEVICE_MAX_MEM_ALLOC_SIZE, &result);
+ if (s_forceSingleChunk || result >= _dagSize)
+ {
+ m_dagChunksNum = 1;
+ ETHCL_LOG(
+ ((result <= _dagSize && s_forceSingleChunk) ? "Forcing single chunk. Good luck!\n" : "") <<
+ "Using 1 big chunk. Max OpenCL allocateable memory is " << result
+ );
+ }
+ else
+ {
+ m_dagChunksNum = 4;
+ ETHCL_LOG("Using 4 chunks. Max OpenCL allocateable memory is " << result);
+ }
+
if (strncmp("OpenCL 1.0", device_version.c_str(), 10) == 0)
{
ETHCL_LOG("OpenCL 1.0 is not supported.");
@@ -269,7 +314,7 @@ bool ethash_cl_miner::init(
m_opencl_1_1 = true;
// create context
- m_context = cl::Context(std::vector(&device, &device + 1));
+ m_context = cl::Context(vector(&device, &device + 1));
m_queue = cl::CommandQueue(m_context, device);
// use requested workgroup size, but we require multiple of 8
@@ -278,11 +323,11 @@ bool ethash_cl_miner::init(
// patch source code
// note: ETHASH_CL_MINER_KERNEL is simply ethash_cl_miner_kernel.cl compiled
// into a byte array by bin2h.cmake. There is no need to load the file by hand in runtime
- std::string code(ETHASH_CL_MINER_KERNEL, ETHASH_CL_MINER_KERNEL + ETHASH_CL_MINER_KERNEL_SIZE);
- add_definition(code, "GROUP_SIZE", m_workgroup_size);
- add_definition(code, "DAG_SIZE", (unsigned)(_dagSize / ETHASH_MIX_BYTES));
- add_definition(code, "ACCESSES", ETHASH_ACCESSES);
- add_definition(code, "MAX_OUTPUTS", c_max_search_results);
+ string code(ETHASH_CL_MINER_KERNEL, ETHASH_CL_MINER_KERNEL + ETHASH_CL_MINER_KERNEL_SIZE);
+ addDefinition(code, "GROUP_SIZE", m_workgroup_size);
+ addDefinition(code, "DAG_SIZE", (unsigned)(_dagSize / ETHASH_MIX_BYTES));
+ addDefinition(code, "ACCESSES", ETHASH_ACCESSES);
+ addDefinition(code, "MAX_OUTPUTS", c_max_search_results);
//debugf("%s", code.c_str());
// create miner OpenCL program
@@ -301,7 +346,7 @@ bool ethash_cl_miner::init(
ETHCL_LOG(program.getBuildInfo(device).c_str());
return false;
}
- if (_dagChunksNum == 1)
+ if (m_dagChunksNum == 1)
{
ETHCL_LOG("Loading single big chunk kernels");
m_hash_kernel = cl::Kernel(program, "ethash_hash");
@@ -315,13 +360,13 @@ bool ethash_cl_miner::init(
}
// create buffer for dag
- if (_dagChunksNum == 1)
+ if (m_dagChunksNum == 1)
{
ETHCL_LOG("Creating one big buffer");
m_dagChunks.push_back(cl::Buffer(m_context, CL_MEM_READ_ONLY, _dagSize));
}
else
- for (unsigned i = 0; i < _dagChunksNum; i++)
+ for (unsigned i = 0; i < m_dagChunksNum; i++)
{
// TODO Note: If we ever change to _dagChunksNum other than 4, then the size would need recalculation
ETHCL_LOG("Creating buffer for chunk " << i);
@@ -336,7 +381,7 @@ bool ethash_cl_miner::init(
ETHCL_LOG("Creating buffer for header.");
m_header = cl::Buffer(m_context, CL_MEM_READ_ONLY, 32);
- if (_dagChunksNum == 1)
+ if (m_dagChunksNum == 1)
{
ETHCL_LOG("Mapping one big chunk.");
m_queue.enqueueWriteBuffer(m_dagChunks[0], CL_TRUE, 0, _dagSize, _dag);
@@ -345,12 +390,12 @@ bool ethash_cl_miner::init(
{
// TODO Note: If we ever change to _dagChunksNum other than 4, then the size would need recalculation
void* dag_ptr[4];
- for (unsigned i = 0; i < _dagChunksNum; i++)
+ for (unsigned i = 0; i < m_dagChunksNum; i++)
{
ETHCL_LOG("Mapping chunk " << i);
dag_ptr[i] = m_queue.enqueueMapBuffer(m_dagChunks[i], true, m_opencl_1_1 ? CL_MAP_WRITE : CL_MAP_WRITE_INVALIDATE_REGION, 0, (i == 3) ? (_dagSize - 3 * ((_dagSize >> 9) << 7)) : (_dagSize >> 9) << 7);
}
- for (unsigned i = 0; i < _dagChunksNum; i++)
+ for (unsigned i = 0; i < m_dagChunksNum; i++)
{
memcpy(dag_ptr[i], (char *)_dag + i*((_dagSize >> 9) << 7), (i == 3) ? (_dagSize - 3 * ((_dagSize >> 9) << 7)) : (_dagSize >> 9) << 7);
m_queue.enqueueUnmapMemObject(m_dagChunks[i], dag_ptr[i]);
@@ -382,7 +427,7 @@ void ethash_cl_miner::search(uint8_t const* header, uint64_t target, search_hook
uint64_t start_nonce;
unsigned buf;
};
- std::queue pending;
+ queue pending;
static uint32_t const c_zero = 0;
@@ -408,8 +453,8 @@ void ethash_cl_miner::search(uint8_t const* header, uint64_t target, search_hook
m_search_kernel.setArg(argPos + 2, ~0u);
unsigned buf = 0;
- std::random_device engine;
- uint64_t start_nonce = std::uniform_int_distribution()(engine);
+ random_device engine;
+ uint64_t start_nonce = uniform_int_distribution()(engine);
for (;; start_nonce += c_search_batch_size)
{
// supply output buffer to kernel
@@ -432,7 +477,7 @@ void ethash_cl_miner::search(uint8_t const* header, uint64_t target, search_hook
// could use pinned host pointer instead
uint32_t* results = (uint32_t*)m_queue.enqueueMapBuffer(m_search_buf[batch.buf], true, CL_MAP_READ, 0, (1 + c_max_search_results) * sizeof(uint32_t));
- unsigned num_found = std::min(results[0], c_max_search_results);
+ unsigned num_found = min(results[0], c_max_search_results);
uint64_t nonces[c_max_search_results];
for (unsigned i = 0; i != num_found; ++i)
diff --git a/libethash-cl/ethash_cl_miner.h b/libethash-cl/ethash_cl_miner.h
index 4d5317186..cc01b0057 100644
--- a/libethash-cl/ethash_cl_miner.h
+++ b/libethash-cl/ethash_cl_miner.h
@@ -12,6 +12,7 @@
#include "cl.hpp"
#endif
+#include
#include
#include
#include
@@ -32,21 +33,27 @@ public:
ethash_cl_miner();
~ethash_cl_miner();
- static unsigned get_num_platforms();
- static unsigned get_num_devices(unsigned _platformId = 0);
+ static bool searchForAllDevices(unsigned _platformId, std::function _callback);
+ static bool searchForAllDevices(std::function _callback);
+ static void doForAllDevices(unsigned _platformId, std::function _callback);
+ static void doForAllDevices(std::function _callback);
+ static unsigned getNumPlatforms();
+ static unsigned getNumDevices(unsigned _platformId = 0);
static std::string platform_info(unsigned _platformId = 0, unsigned _deviceId = 0);
- static bool haveSufficientGPUMemory();
- static bool haveSufficientGPUMemory(unsigned _platformId);
static void listDevices();
- static void listDevices(unsigned _platformId);
+ static bool configureGPU(
+ bool _allowCPU,
+ unsigned _extraGPUMemory,
+ bool _forceSingleChunk,
+ boost::optional _currentBlock
+ );
bool init(
uint8_t const* _dag,
uint64_t _dagSize,
unsigned workgroup_size = 64,
unsigned _platformId = 0,
- unsigned _deviceId = 0,
- unsigned _dagChunksNum = 1
+ unsigned _deviceId = 0
);
void finish();
void search(uint8_t const* header, uint64_t target, search_hook& hook);
@@ -55,17 +62,28 @@ public:
void search_chunk(uint8_t const* header, uint64_t target, search_hook& hook);
private:
+
+ static std::vector getDevices(std::vector const& _platforms, unsigned _platformId);
+
enum { c_max_search_results = 63, c_num_buffers = 2, c_hash_batch_size = 1024, c_search_batch_size = 1024*256 };
cl::Context m_context;
cl::CommandQueue m_queue;
cl::Kernel m_hash_kernel;
cl::Kernel m_search_kernel;
- unsigned m_dagChunksNum;
+ unsigned int m_dagChunksNum;
std::vector m_dagChunks;
cl::Buffer m_header;
cl::Buffer m_hash_buf[c_num_buffers];
cl::Buffer m_search_buf[c_num_buffers];
unsigned m_workgroup_size;
bool m_opencl_1_1;
+
+ /// Force dag upload to GPU in a single chunk even if OpenCL thinks you can't do it. Use at your own risk.
+ static bool s_forceSingleChunk;
+ /// Allow CPU to appear as an OpenCL device or not. Default is false
+ static bool s_allowCPU;
+ /// GPU memory required for other things, like window rendering e.t.c.
+ /// User can set it via the --cl-extragpu-mem argument.
+ static unsigned s_extraRequiredGPUMem;
};
diff --git a/libethash-cl/ethash_cl_miner_kernel.cl b/libethash-cl/ethash_cl_miner_kernel.cl
index 8567bb164..2143435ed 100644
--- a/libethash-cl/ethash_cl_miner_kernel.cl
+++ b/libethash-cl/ethash_cl_miner_kernel.cl
@@ -585,4 +585,4 @@ __kernel void ethash_search_chunks(
uint slot = min(convert_uint(MAX_OUTPUTS), convert_uint(atomic_inc(&g_output[0]) + 1));
g_output[slot] = gid;
}
-}
\ No newline at end of file
+}
diff --git a/libethcore/Common.cpp b/libethcore/Common.cpp
index 63f4a19f9..618703e22 100644
--- a/libethcore/Common.cpp
+++ b/libethcore/Common.cpp
@@ -112,25 +112,26 @@ std::string formatBalance(bigint const& _b)
static void badBlockInfo(BlockInfo const& _bi, string const& _err)
{
- cwarn << EthRedBold << "========================================================================";
- cwarn << EthRedBold << "== Software Failure " + _err + string(max(0, 44 - _err.size()), ' ') + " ==";
+ string const c_line = EthReset EthOnMaroon + string(80, ' ');
+ string const c_border = EthReset EthOnMaroon + string(2, ' ') + EthReset EthMaroonBold;
+ string const c_space = c_border + string(76, ' ') + c_border;
+ stringstream ss;
+ ss << c_line << endl;
+ ss << c_space << endl;
+ ss << c_border + " Import Failure " + _err + string(max(0, 53 - _err.size()), ' ') + " " + c_border << endl;
+ ss << c_space << endl;
string bin = toString(_bi.number);
- cwarn << EthRedBold << ("== Guru Meditation #" + string(max(0, 8 - bin.size()), '0') + bin + "." + _bi.hash().abridged() + " ==");
- cwarn << EthRedBold << "========================================================================";
+ ss << c_border + (" Guru Meditation #" + string(max(0, 8 - bin.size()), '0') + bin + "." + _bi.hash().abridged() + " ") + c_border << endl;
+ ss << c_space << endl;
+ ss << c_line;
+ cwarn << "\n" + ss.str();
}
void badBlock(bytesConstRef _block, string const& _err)
{
- badBlockInfo(BlockInfo(_block, CheckNothing), _err);
- cwarn << " Block:" << toHex(_block);
- cwarn << " Block RLP:" << RLP(_block);
-}
-
-void badBlockHeader(bytesConstRef _header, string const& _err)
-{
- badBlockInfo(BlockInfo::fromHeader(_header, CheckNothing), _err);
- cwarn << " Header:" << toHex(_header);
- cwarn << " Header RLP:" << RLP(_header);;
+ BlockInfo bi;
+ DEV_IGNORE_EXCEPTIONS(bi = BlockInfo(_block, CheckNothing));
+ badBlockInfo(bi, _err);
}
}
diff --git a/libethcore/Common.h b/libethcore/Common.h
index 87ebffab7..6f23cb0e8 100644
--- a/libethcore/Common.h
+++ b/libethcore/Common.h
@@ -85,6 +85,10 @@ using BlockNumber = unsigned;
static const BlockNumber LatestBlock = (BlockNumber)-2;
static const BlockNumber PendingBlock = (BlockNumber)-1;
+static const h256 LatestBlockHash = h256(2);
+static const h256 EarliestBlockHash = h256(1);
+static const h256 PendingBlockHash = h256(0);
+
enum class RelativeBlock: BlockNumber
{
@@ -119,28 +123,43 @@ struct ImportRequirements
class Signal
{
public:
+ using Callback = std::function;
+
class HandlerAux
{
friend class Signal;
public:
~HandlerAux() { if (m_s) m_s->m_fire.erase(m_i); m_s = nullptr; }
+ void reset() { m_s = nullptr; }
+ void fire() { m_h(); }
private:
- HandlerAux(unsigned _i, Signal* _s): m_i(_i), m_s(_s) {}
+ HandlerAux(unsigned _i, Signal* _s, Callback const& _h): m_i(_i), m_s(_s), m_h(_h) {}
unsigned m_i = 0;
Signal* m_s = nullptr;
+ Callback m_h;
};
- using Callback = std::function;
+ ~Signal()
+ {
+ for (auto const& h : m_fire)
+ h.second->reset();
+ }
- std::shared_ptr add(Callback const& _h) { auto n = m_fire.empty() ? 0 : (m_fire.rbegin()->first + 1); m_fire[n] = _h; return std::shared_ptr(new HandlerAux(n, this)); }
+ std::shared_ptr add(Callback const& _h)
+ {
+ auto n = m_fire.empty() ? 0 : (m_fire.rbegin()->first + 1);
+ auto h = std::shared_ptr(new HandlerAux(n, this, _h));
+ m_fire[n] = h;
+ return h;
+ }
- void operator()() { for (auto const& f: m_fire) f.second(); }
+ void operator()() { for (auto const& f: m_fire) f.second->fire(); }
private:
- std::map m_fire;
+ std::map> m_fire;
};
using Handler = std::shared_ptr;
@@ -156,8 +175,6 @@ struct TransactionSkeleton
u256 gasPrice = UndefinedU256;
};
-void badBlockHeader(bytesConstRef _header, std::string const& _err);
-inline void badBlockHeader(bytes const& _header, std::string const& _err) { badBlockHeader(&_header, _err); }
void badBlock(bytesConstRef _header, std::string const& _err);
inline void badBlock(bytes const& _header, std::string const& _err) { badBlock(&_header, _err); }
diff --git a/libethcore/Ethash.cpp b/libethcore/Ethash.cpp
index f715d6912..2c743f33b 100644
--- a/libethcore/Ethash.cpp
+++ b/libethcore/Ethash.cpp
@@ -285,7 +285,6 @@ private:
unsigned Ethash::GPUMiner::s_platformId = 0;
unsigned Ethash::GPUMiner::s_deviceId = 0;
unsigned Ethash::GPUMiner::s_numInstances = 0;
-unsigned Ethash::GPUMiner::s_dagChunks = 1;
Ethash::GPUMiner::GPUMiner(ConstructionInfo const& _ci):
Miner(_ci),
@@ -335,18 +334,19 @@ void Ethash::GPUMiner::workLoop()
EthashAux::FullType dag;
while (true)
{
- if ((dag = EthashAux::full(w.seedHash, false)))
+ if ((dag = EthashAux::full(w.seedHash, true)))
break;
if (shouldStop())
{
delete m_miner;
+ m_miner = nullptr;
return;
}
cnote << "Awaiting DAG";
this_thread::sleep_for(chrono::milliseconds(500));
}
bytesConstRef dagData = dag->data();
- m_miner->init(dagData.data(), dagData.size(), 32, s_platformId, device, s_dagChunks);
+ m_miner->init(dagData.data(), dagData.size(), 32, s_platformId, device);
}
uint64_t upper64OfBoundary = (uint64_t)(u64)((u256)w.boundary >> 192);
@@ -354,6 +354,8 @@ void Ethash::GPUMiner::workLoop()
}
catch (cl::Error const& _e)
{
+ delete m_miner;
+ m_miner = nullptr;
cwarn << "Error GPU mining: " << _e.what() << "(" << _e.err() << ")";
}
}
@@ -371,7 +373,7 @@ std::string Ethash::GPUMiner::platformInfo()
unsigned Ethash::GPUMiner::getNumDevices()
{
- return ethash_cl_miner::get_num_devices(s_platformId);
+ return ethash_cl_miner::getNumDevices(s_platformId);
}
void Ethash::GPUMiner::listDevices()
@@ -379,9 +381,18 @@ void Ethash::GPUMiner::listDevices()
return ethash_cl_miner::listDevices();
}
-bool Ethash::GPUMiner::haveSufficientMemory()
+bool Ethash::GPUMiner::configureGPU(
+ unsigned _platformId,
+ unsigned _deviceId,
+ bool _allowCPU,
+ unsigned _extraGPUMemory,
+ bool _forceSingleChunk,
+ boost::optional _currentBlock
+)
{
- return ethash_cl_miner::haveSufficientGPUMemory();
+ s_platformId = _platformId;
+ s_deviceId = _deviceId;
+ return ethash_cl_miner::configureGPU(_allowCPU, _extraGPUMemory, _forceSingleChunk, _currentBlock);
}
#endif
diff --git a/libethcore/Ethash.h b/libethcore/Ethash.h
index 99df1dc71..a5a7856f1 100644
--- a/libethcore/Ethash.h
+++ b/libethcore/Ethash.h
@@ -87,11 +87,8 @@ public:
static unsigned instances() { return s_numInstances > 0 ? s_numInstances : std::thread::hardware_concurrency(); }
static std::string platformInfo();
- static void setDefaultPlatform(unsigned) {}
- static void setDagChunks(unsigned) {}
- static void setDefaultDevice(unsigned) {}
static void listDevices() {}
- static bool haveSufficientMemory() { return false; }
+ static bool configureGPU(unsigned, unsigned, bool, unsigned, bool, boost::optional) { return false; }
static void setNumInstances(unsigned _instances) { s_numInstances = std::min(_instances, std::thread::hardware_concurrency()); }
protected:
void kickOff() override
@@ -120,11 +117,15 @@ public:
static std::string platformInfo();
static unsigned getNumDevices();
static void listDevices();
- static bool haveSufficientMemory();
- static void setDefaultPlatform(unsigned _id) { s_platformId = _id; }
- static void setDefaultDevice(unsigned _id) { s_deviceId = _id; }
+ static bool configureGPU(
+ unsigned _platformId,
+ unsigned _deviceId,
+ bool _allowCPU,
+ unsigned _extraGPUMemory,
+ bool _forceSingleChunk,
+ boost::optional _currentBlock
+ );
static void setNumInstances(unsigned _instances) { s_numInstances = std::min(_instances, getNumDevices()); }
- static void setDagChunks(unsigned _dagChunks) { s_dagChunks = _dagChunks; }
protected:
void kickOff() override;
@@ -143,7 +144,6 @@ public:
static unsigned s_platformId;
static unsigned s_deviceId;
static unsigned s_numInstances;
- static unsigned s_dagChunks;
};
#else
using GPUMiner = CPUMiner;
diff --git a/libethcore/EthashAux.cpp b/libethcore/EthashAux.cpp
index 0c1e84ebc..51a606ff8 100644
--- a/libethcore/EthashAux.cpp
+++ b/libethcore/EthashAux.cpp
@@ -54,6 +54,11 @@ uint64_t EthashAux::cacheSize(BlockInfo const& _header)
return ethash_get_cachesize((uint64_t)_header.number);
}
+uint64_t EthashAux::dataSize(uint64_t _blockNumber)
+{
+ return ethash_get_datasize(_blockNumber);
+}
+
h256 EthashAux::seedHash(unsigned _number)
{
unsigned epoch = _number / ETHASH_EPOCH_LENGTH;
diff --git a/libethcore/EthashAux.h b/libethcore/EthashAux.h
index e6fed519f..47180bfd2 100644
--- a/libethcore/EthashAux.h
+++ b/libethcore/EthashAux.h
@@ -66,6 +66,7 @@ public:
static h256 seedHash(unsigned _number);
static uint64_t number(h256 const& _seedHash);
static uint64_t cacheSize(BlockInfo const& _header);
+ static uint64_t dataSize(uint64_t _blockNumber);
static LightType light(h256 const& _seedHash);
diff --git a/libethereum/BlockChain.cpp b/libethereum/BlockChain.cpp
index 3838dd362..bd6996a45 100644
--- a/libethereum/BlockChain.cpp
+++ b/libethereum/BlockChain.cpp
@@ -35,6 +35,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -42,13 +43,14 @@
#include "GenesisInfo.h"
#include "State.h"
#include "Defaults.h"
+
using namespace std;
using namespace dev;
using namespace dev::eth;
namespace js = json_spirit;
#define ETH_CATCH 1
-#define ETH_TIMED_IMPORTS 0
+#define ETH_TIMED_IMPORTS 1
#ifdef _WIN32
const char* BlockChainDebug::name() { return EthBlue "8" EthWhite " <>"; }
@@ -307,47 +309,50 @@ tuple BlockChain::sync(BlockQueue& _bq, OverlayDB const& _st
{
// _bq.tick(*this);
- vector> blocks;
+ VerifiedBlocks blocks;
_bq.drain(blocks, _max);
h256s fresh;
h256s dead;
h256s badBlocks;
- for (auto const& block: blocks)
- {
- try
- {
- // Nonce & uncle nonces already verified in verification thread at this point.
- ImportRoute r;
- DEV_TIMED_ABOVE(Block import, 500)
- r = import(block.first, block.second, _stateDB, ImportRequirements::Default & ~ImportRequirements::ValidNonce & ~ImportRequirements::CheckUncles);
- fresh += r.first;
- dead += r.second;
- }
- catch (dev::eth::UnknownParent)
- {
- cwarn << "ODD: Import queue contains block with unknown parent." << LogTag::Error << boost::current_exception_diagnostic_information();
- // NOTE: don't reimport since the queue should guarantee everything in the right order.
- // Can't continue - chain bad.
- badBlocks.push_back(block.first.hash());
- }
- catch (dev::eth::FutureTime)
- {
- cwarn << "ODD: Import queue contains a block with future time." << LogTag::Error << boost::current_exception_diagnostic_information();
- // NOTE: don't reimport since the queue should guarantee everything in the past.
- // Can't continue - chain bad.
- badBlocks.push_back(block.first.hash());
- }
- catch (Exception& ex)
+ for (VerifiedBlock const& block: blocks)
+ if (!badBlocks.empty())
+ badBlocks.push_back(block.verified.info.hash());
+ else
{
- cnote << "Exception while importing block. Someone (Jeff? That you?) seems to be giving us dodgy blocks!" << LogTag::Error << diagnostic_information(ex);
- if (m_onBad)
- m_onBad(ex);
- // NOTE: don't reimport since the queue should guarantee everything in the right order.
- // Can't continue - chain bad.
- badBlocks.push_back(block.first.hash());
+ try
+ {
+ // Nonce & uncle nonces already verified in verification thread at this point.
+ ImportRoute r;
+ DEV_TIMED_ABOVE(Block import, 500)
+ r = import(block.verified, _stateDB, ImportRequirements::Default & ~ImportRequirements::ValidNonce & ~ImportRequirements::CheckUncles);
+ fresh += r.first;
+ dead += r.second;
+ }
+ catch (dev::eth::UnknownParent)
+ {
+ cwarn << "ODD: Import queue contains block with unknown parent.";// << LogTag::Error << boost::current_exception_diagnostic_information();
+ // NOTE: don't reimport since the queue should guarantee everything in the right order.
+ // Can't continue - chain bad.
+ badBlocks.push_back(block.verified.info.hash());
+ }
+ catch (dev::eth::FutureTime)
+ {
+ cwarn << "ODD: Import queue contains a block with future time.";// << LogTag::Error << boost::current_exception_diagnostic_information();
+ // NOTE: don't reimport since the queue should guarantee everything in the past.
+ // Can't continue - chain bad.
+ badBlocks.push_back(block.verified.info.hash());
+ }
+ catch (Exception& ex)
+ {
+// cnote << "Exception while importing block. Someone (Jeff? That you?) seems to be giving us dodgy blocks!";// << LogTag::Error << diagnostic_information(ex);
+ if (m_onBad)
+ m_onBad(ex);
+ // NOTE: don't reimport since the queue should guarantee everything in the right order.
+ // Can't continue - chain bad.
+ badBlocks.push_back(block.verified.info.hash());
+ }
}
- }
return make_tuple(fresh, dead, _bq.doneDrain(badBlocks));
}
@@ -355,7 +360,7 @@ pair BlockChain::attemptImport(bytes const& _block, O
{
try
{
- return make_pair(ImportResult::Success, import(_block, _stateDB, _ir));
+ return make_pair(ImportResult::Success, import(verifyBlock(_block, m_onBad, _ir), _stateDB, _ir));
}
catch (UnknownParent&)
{
@@ -380,29 +385,28 @@ pair BlockChain::attemptImport(bytes const& _block, O
ImportRoute BlockChain::import(bytes const& _block, OverlayDB const& _db, ImportRequirements::value _ir)
{
// VERIFY: populates from the block and checks the block is internally coherent.
- BlockInfo bi;
+ VerifiedBlockRef block;
#if ETH_CATCH
try
#endif
{
- bi.populate(&_block);
- bi.verifyInternals(&_block);
+ block = verifyBlock(_block, m_onBad);
}
#if ETH_CATCH
catch (Exception& ex)
{
- clog(BlockChainNote) << " Malformed block: " << diagnostic_information(ex);
+// clog(BlockChainNote) << " Malformed block: " << diagnostic_information(ex);
ex << errinfo_now(time(0));
ex << errinfo_block(_block);
throw;
}
#endif
- return import(bi, _block, _db, _ir);
+ return import(block, _db, _ir);
}
-ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, OverlayDB const& _db, ImportRequirements::value _ir)
+ImportRoute BlockChain::import(VerifiedBlockRef const& _block, OverlayDB const& _db, ImportRequirements::value _ir)
{
//@tidy This is a behemoth of a method - could do to be split into a few smaller ones.
@@ -417,28 +421,28 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
#endif
// Check block doesn't already exist first!
- if (isKnown(_bi.hash()) && (_ir & ImportRequirements::DontHave))
+ if (isKnown(_block.info.hash()) && (_ir & ImportRequirements::DontHave))
{
- clog(BlockChainNote) << _bi.hash() << ": Not new.";
+ clog(BlockChainNote) << _block.info.hash() << ": Not new.";
BOOST_THROW_EXCEPTION(AlreadyHaveBlock());
}
// Work out its number as the parent's number + 1
- if (!isKnown(_bi.parentHash))
+ if (!isKnown(_block.info.parentHash))
{
- clog(BlockChainNote) << _bi.hash() << ": Unknown parent " << _bi.parentHash;
+ clog(BlockChainNote) << _block.info.hash() << ": Unknown parent " << _block.info.parentHash;
// We don't know the parent (yet) - discard for now. It'll get resent to us if we find out about its ancestry later on.
BOOST_THROW_EXCEPTION(UnknownParent());
}
- auto pd = details(_bi.parentHash);
+ auto pd = details(_block.info.parentHash);
if (!pd)
{
auto pdata = pd.rlp();
clog(BlockChainDebug) << "Details is returning false despite block known:" << RLP(pdata);
- auto parentBlock = block(_bi.parentHash);
- clog(BlockChainDebug) << "isKnown:" << isKnown(_bi.parentHash);
- clog(BlockChainDebug) << "last/number:" << m_lastBlockNumber << m_lastBlockHash << _bi.number;
+ auto parentBlock = block(_block.info.parentHash);
+ clog(BlockChainDebug) << "isKnown:" << isKnown(_block.info.parentHash);
+ clog(BlockChainDebug) << "last/number:" << m_lastBlockNumber << m_lastBlockHash << _block.info.number;
clog(BlockChainDebug) << "Block:" << BlockInfo(parentBlock);
clog(BlockChainDebug) << "RLP:" << RLP(parentBlock);
clog(BlockChainDebug) << "DATABASE CORRUPTION: CRITICAL FAILURE";
@@ -446,14 +450,14 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
}
// Check it's not crazy
- if (_bi.timestamp > (u256)time(0))
+ if (_block.info.timestamp > (u256)time(0))
{
- clog(BlockChainChat) << _bi.hash() << ": Future time " << _bi.timestamp << " (now at " << time(0) << ")";
+ clog(BlockChainChat) << _block.info.hash() << ": Future time " << _block.info.timestamp << " (now at " << time(0) << ")";
// Block has a timestamp in the future. This is no good.
BOOST_THROW_EXCEPTION(FutureTime());
}
- clog(BlockChainChat) << "Attempting import of " << _bi.hash() << "...";
+ clog(BlockChainChat) << "Attempting import of " << _block.info.hash() << "...";
#if ETH_TIMED_IMPORTS
preliminaryChecks = t.elapsed();
@@ -473,7 +477,7 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
// Check transactions are valid and that they result in a state equivalent to our state_root.
// Get total difficulty increase and update state, checking it.
State s(_db);
- auto tdIncrease = s.enactOn(&_block, _bi, *this, _ir);
+ auto tdIncrease = s.enactOn(_block, *this, _ir);
BlockLogBlooms blb;
BlockReceipts br;
@@ -503,22 +507,22 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
// together with an "ensureCachedWithUpdatableLock(l)" method.
// This is safe in practice since the caches don't get flushed nearly often enough to be
// done here.
- details(_bi.parentHash);
+ details(_block.info.parentHash);
DEV_WRITE_GUARDED(x_details)
- m_details[_bi.parentHash].children.push_back(_bi.hash());
+ m_details[_block.info.parentHash].children.push_back(_block.info.hash());
#if ETH_TIMED_IMPORTS || !ETH_TRUE
collation = t.elapsed();
t.restart();
#endif
- blocksBatch.Put(toSlice(_bi.hash()), (ldb::Slice)ref(_block));
+ blocksBatch.Put(toSlice(_block.info.hash()), ldb::Slice(_block.block));
DEV_READ_GUARDED(x_details)
- extrasBatch.Put(toSlice(_bi.parentHash, ExtraDetails), (ldb::Slice)dev::ref(m_details[_bi.parentHash].rlp()));
+ extrasBatch.Put(toSlice(_block.info.parentHash, ExtraDetails), (ldb::Slice)dev::ref(m_details[_block.info.parentHash].rlp()));
- extrasBatch.Put(toSlice(_bi.hash(), ExtraDetails), (ldb::Slice)dev::ref(BlockDetails((unsigned)pd.number + 1, td, _bi.parentHash, {}).rlp()));
- extrasBatch.Put(toSlice(_bi.hash(), ExtraLogBlooms), (ldb::Slice)dev::ref(blb.rlp()));
- extrasBatch.Put(toSlice(_bi.hash(), ExtraReceipts), (ldb::Slice)dev::ref(br.rlp()));
+ extrasBatch.Put(toSlice(_block.info.hash(), ExtraDetails), (ldb::Slice)dev::ref(BlockDetails((unsigned)pd.number + 1, td, _block.info.parentHash, {}).rlp()));
+ extrasBatch.Put(toSlice(_block.info.hash(), ExtraLogBlooms), (ldb::Slice)dev::ref(blb.rlp()));
+ extrasBatch.Put(toSlice(_block.info.hash(), ExtraReceipts), (ldb::Slice)dev::ref(br.rlp()));
#if ETH_TIMED_IMPORTS || !ETH_TRUE
writing = t.elapsed();
@@ -533,23 +537,18 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
}
catch (Exception& ex)
{
- clog(BlockChainWarn) << " Malformed block: " << diagnostic_information(ex);
- clog(BlockChainWarn) << "Block: " << _bi.hash();
- clog(BlockChainWarn) << _bi;
- clog(BlockChainWarn) << "Block parent: " << _bi.parentHash;
- clog(BlockChainWarn) << BlockInfo(block(_bi.parentHash));
ex << errinfo_now(time(0));
- ex << errinfo_block(_block);
+ ex << errinfo_block(_block.block.toBytes());
throw;
}
#endif
StructuredLogger::chainReceivedNewBlock(
- _bi.headerHash(WithoutNonce).abridged(),
- _bi.nonce.abridged(),
+ _block.info.headerHash(WithoutNonce).abridged(),
+ _block.info.nonce.abridged(),
currentHash().abridged(),
"", // TODO: remote id ??
- _bi.parentHash.abridged()
+ _block.info.parentHash.abridged()
);
// cnote << "Parent " << bi.parentHash << " has " << details(bi.parentHash).children.size() << " children.";
@@ -562,8 +561,8 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
// don't include bi.hash() in treeRoute, since it's not yet in details DB...
// just tack it on afterwards.
unsigned commonIndex;
- tie(route, common, commonIndex) = treeRoute(last, _bi.parentHash);
- route.push_back(_bi.hash());
+ tie(route, common, commonIndex) = treeRoute(last, _block.info.parentHash);
+ route.push_back(_block.info.hash());
// Most of the time these two will be equal - only when we're doing a chain revert will they not be
if (common != last)
@@ -575,8 +574,8 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
for (auto i = route.rbegin(); i != route.rend() && *i != common; ++i)
{
BlockInfo tbi;
- if (*i == _bi.hash())
- tbi = _bi;
+ if (*i == _block.info.hash())
+ tbi = _block.info;
else
tbi = BlockInfo(block(*i));
@@ -603,7 +602,7 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
h256s newTransactionAddresses;
{
bytes blockBytes;
- RLP blockRLP(*i == _bi.hash() ? _block : (blockBytes = block(*i)));
+ RLP blockRLP(*i == _block.info.hash() ? _block.block : &(blockBytes = block(*i)));
TransactionAddress ta;
ta.blockHash = tbi.hash();
for (ta.index = 0; ta.index < blockRLP[1].itemCount(); ++ta.index)
@@ -619,17 +618,17 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
// FINALLY! change our best hash.
{
- newLastBlockHash = _bi.hash();
- newLastBlockNumber = (unsigned)_bi.number;
+ newLastBlockHash = _block.info.hash();
+ newLastBlockNumber = (unsigned)_block.info.number;
}
- clog(BlockChainNote) << " Imported and best" << td << " (#" << _bi.number << "). Has" << (details(_bi.parentHash).children.size() - 1) << "siblings. Route:" << route;
+ clog(BlockChainNote) << " Imported and best" << td << " (#" << _block.info.number << "). Has" << (details(_block.info.parentHash).children.size() - 1) << "siblings. Route:" << route;
StructuredLogger::chainNewHead(
- _bi.headerHash(WithoutNonce).abridged(),
- _bi.nonce.abridged(),
+ _block.info.headerHash(WithoutNonce).abridged(),
+ _block.info.nonce.abridged(),
currentHash().abridged(),
- _bi.parentHash.abridged()
+ _block.info.parentHash.abridged()
);
}
else
@@ -640,24 +639,26 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
m_blocksDB->Write(m_writeOptions, &blocksBatch);
m_extrasDB->Write(m_writeOptions, &extrasBatch);
- if (isKnown(_bi.hash()) && !details(_bi.hash()))
+#if ETH_PARANOIA || !ETH_TRUE
+ if (isKnown(_block.info.hash()) && !details(_block.info.hash()))
{
clog(BlockChainDebug) << "Known block just inserted has no details.";
- clog(BlockChainDebug) << "Block:" << _bi;
+ clog(BlockChainDebug) << "Block:" << _block.info;
clog(BlockChainDebug) << "DATABASE CORRUPTION: CRITICAL FAILURE";
exit(-1);
}
try {
- State canary(_db, *this, _bi.hash(), ImportRequirements::DontHave);
+ State canary(_db, *this, _block.info.hash(), ImportRequirements::DontHave);
}
catch (...)
{
clog(BlockChainDebug) << "Failed to initialise State object form imported block.";
- clog(BlockChainDebug) << "Block:" << _bi;
+ clog(BlockChainDebug) << "Block:" << _block.info;
clog(BlockChainDebug) << "DATABASE CORRUPTION: CRITICAL FAILURE";
exit(-1);
}
+#endif
if (m_lastBlockHash != newLastBlockHash)
DEV_WRITE_GUARDED(x_lastBlockHash)
@@ -673,12 +674,16 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
#if ETH_TIMED_IMPORTS
checkBest = t.elapsed();
- cnote << "Import took:" << total.elapsed();
- cnote << "preliminaryChecks:" << preliminaryChecks;
- cnote << "enactment:" << enactment;
- cnote << "collation:" << collation;
- cnote << "writing:" << writing;
- cnote << "checkBest:" << checkBest;
+ if (total.elapsed() > 1.0)
+ {
+ cnote << "SLOW IMPORT:" << _block.info.hash();
+ cnote << " Import took:" << total.elapsed();
+ cnote << " preliminaryChecks:" << preliminaryChecks;
+ cnote << " enactment:" << enactment;
+ cnote << " collation:" << collation;
+ cnote << " writing:" << writing;
+ cnote << " checkBest:" << checkBest;
+ }
#endif
if (!route.empty())
@@ -921,8 +926,8 @@ void BlockChain::checkConsistency()
delete it;
}
-static inline unsigned upow(unsigned a, unsigned b) { while (b-- > 0) a *= a; return a; }
-static inline unsigned ceilDiv(unsigned n, unsigned d) { return n / (n + d - 1); }
+static inline unsigned upow(unsigned a, unsigned b) { if (!b) return 1; while (--b > 0) a *= a; return a; }
+static inline unsigned ceilDiv(unsigned n, unsigned d) { return (n + d - 1) / d; }
//static inline unsigned floorDivPow(unsigned n, unsigned a, unsigned b) { return n / upow(a, b); }
//static inline unsigned ceilDivPow(unsigned n, unsigned a, unsigned b) { return ceilDiv(n, upow(a, b)); }
@@ -1060,3 +1065,63 @@ bytes BlockChain::block(h256 const& _hash) const
return m_blocks[_hash];
}
+
+VerifiedBlockRef BlockChain::verifyBlock(bytes const& _block, function const& _onBad, ImportRequirements::value _ir)
+{
+ VerifiedBlockRef res;
+ try
+ {
+ Strictness strictness = Strictness::CheckEverything;
+ if (_ir & ~ImportRequirements::ValidNonce)
+ strictness = Strictness::IgnoreNonce;
+
+ res.info.populate(_block, strictness);
+ res.info.verifyInternals(&_block);
+ }
+ catch (Exception& ex)
+ {
+ ex << errinfo_now(time(0));
+ ex << errinfo_block(_block);
+ if (_onBad)
+ _onBad(ex);
+ throw;
+ }
+
+ RLP r(_block);
+ unsigned i = 0;
+ for (auto const& uncle: r[2])
+ {
+ try
+ {
+ BlockInfo().populateFromHeader(RLP(uncle.data()), CheckEverything);
+ }
+ catch (Exception& ex)
+ {
+ ex << errinfo_uncleIndex(i);
+ ex << errinfo_now(time(0));
+ ex << errinfo_block(_block);
+ if (_onBad)
+ _onBad(ex);
+ throw;
+ }
+ ++i;
+ }
+ i = 0;
+ for (auto const& tr: r[1])
+ {
+ try
+ {
+ res.transactions.push_back(Transaction(tr.data(), CheckTransaction::Everything));
+ }
+ catch (Exception& ex)
+ {
+ ex << errinfo_transactionIndex(i);
+ ex << errinfo_block(_block);
+ throw;
+ }
+ ++i;
+ }
+ res.block = bytesConstRef(&_block);
+ return move(res);
+}
+
diff --git a/libethereum/BlockChain.h b/libethereum/BlockChain.h
index 06a22e27b..534173a9a 100644
--- a/libethereum/BlockChain.h
+++ b/libethereum/BlockChain.h
@@ -40,6 +40,7 @@
#include "Account.h"
#include "Transaction.h"
#include "BlockQueue.h"
+#include "VerifiedBlock.h"
namespace ldb = leveldb;
namespace std
@@ -120,7 +121,7 @@ public:
/// Import block into disk-backed DB
/// @returns the block hashes of any blocks that came into/went out of the canonical block chain.
ImportRoute import(bytes const& _block, OverlayDB const& _stateDB, ImportRequirements::value _ir = ImportRequirements::Default);
- ImportRoute import(BlockInfo const& _bi, bytes const& _block, OverlayDB const& _stateDB, ImportRequirements::value _ir = ImportRequirements::Default);
+ ImportRoute import(VerifiedBlockRef const& _block, OverlayDB const& _db, ImportRequirements::value _ir = ImportRequirements::Default);
/// Returns true if the given block is known (though not necessarily a part of the canon chain).
bool isKnown(h256 const& _hash) const;
@@ -143,6 +144,7 @@ public:
BlockLogBlooms logBlooms() const { return logBlooms(currentHash()); }
/// Get the transactions' receipts of a block (or the most recent mined if none given). Thread-safe.
+ /// receipts are given in the same order are in the same order as the transactions
BlockReceipts receipts(h256 const& _hash) const { return queryExtras(_hash, m_receipts, x_receipts, NullBlockReceipts); }
BlockReceipts receipts() const { return receipts(currentHash()); }
@@ -256,6 +258,9 @@ public:
/// Deallocate unused data.
void garbageCollect(bool _force = false);
+ /// Verify block and prepare it for enactment
+ static VerifiedBlockRef verifyBlock(bytes const& _block, std::function const& _onBad = std::function(), ImportRequirements::value _ir = ImportRequirements::Default);
+
/// Change the function that is called with a bad block.
template void setOnBad(T const& _t) { m_onBad = _t; }
diff --git a/libethereum/BlockQueue.cpp b/libethereum/BlockQueue.cpp
index 07c9ba090..f142be62e 100644
--- a/libethereum/BlockQueue.cpp
+++ b/libethereum/BlockQueue.cpp
@@ -22,10 +22,10 @@
#include "BlockQueue.h"
#include
#include
-#include
#include
#include
#include "BlockChain.h"
+#include "VerifiedBlock.h"
#include "State.h"
using namespace std;
using namespace dev;
@@ -37,8 +37,16 @@ const char* BlockQueueChannel::name() { return EthOrange "[]>"; }
const char* BlockQueueChannel::name() { return EthOrange "▣┅▶"; }
#endif
+size_t const c_maxKnownCount = 100000;
+size_t const c_maxKnownSize = 128 * 1024 * 1024;
+size_t const c_maxUnknownCount = 100000;
+size_t const c_maxUnknownSize = 512 * 1024 * 1024; // Block size can be ~50kb
-BlockQueue::BlockQueue()
+BlockQueue::BlockQueue():
+ m_unknownSize(0),
+ m_knownSize(0),
+ m_unknownCount(0),
+ m_knownCount(0)
{
// Allow some room for other activity
unsigned verifierThreads = std::max(thread::hardware_concurrency(), 3U) - 2U;
@@ -57,11 +65,29 @@ BlockQueue::~BlockQueue()
i.join();
}
+void BlockQueue::clear()
+{
+ WriteGuard l(m_lock);
+ DEV_INVARIANT_CHECK;
+ Guard l2(m_verification);
+ m_readySet.clear();
+ m_drainingSet.clear();
+ m_verified.clear();
+ m_unverified.clear();
+ m_unknownSet.clear();
+ m_unknown.clear();
+ m_future.clear();
+ m_unknownSize = 0;
+ m_unknownCount = 0;
+ m_knownSize = 0;
+ m_knownCount = 0;
+}
+
void BlockQueue::verifierBody()
{
while (!m_deleting)
{
- std::pair work;
+ UnverifiedBlock work;
{
unique_lock l(m_verification);
@@ -71,51 +97,16 @@ void BlockQueue::verifierBody()
swap(work, m_unverified.front());
m_unverified.pop_front();
BlockInfo bi;
- bi.mixHash = work.first;
- m_verifying.push_back(make_pair(bi, bytes()));
+ bi.mixHash = work.hash;
+ bi.parentHash = work.parentHash;
+ m_verifying.push_back(VerifiedBlock { VerifiedBlockRef { bytesConstRef(), move(bi), Transactions() }, bytes() });
}
- std::pair res;
- swap(work.second, res.second);
+ VerifiedBlock res;
+ swap(work.block, res.blockData);
try
{
- try
- {
- res.first.populate(res.second, CheckEverything, work.first);
- res.first.verifyInternals(&res.second);
- }
- catch (Exception& ex)
- {
- clog(BlockChainNote) << " Malformed block: " << diagnostic_information(ex);
- badBlock(res.second, ex.what());
- ex << errinfo_now(time(0));
- ex << errinfo_block(res.second);
- if (m_onBad)
- m_onBad(ex);
- throw;
- }
-
- RLP r(&res.second);
- unsigned ii = 0;
- for (auto const& uncle: r[2])
- {
- try
- {
- BlockInfo().populateFromHeader(RLP(uncle.data()), CheckEverything);
- }
- catch (Exception& ex)
- {
- clog(BlockChainNote) << " Malformed block header: " << diagnostic_information(ex);
- badBlockHeader(uncle.data(), ex.what());
- ex << errinfo_uncleIndex(ii);
- ex << errinfo_now(time(0));
- ex << errinfo_block(res.second);
- if (m_onBad)
- m_onBad(ex);
- throw;
- }
- ++ii;
- }
+ res.verified = BlockChain::verifyBlock(res.blockData, m_onBad);
}
catch (...)
{
@@ -124,33 +115,46 @@ void BlockQueue::verifierBody()
// has to be this order as that's how invariants() assumes.
WriteGuard l2(m_lock);
unique_lock l(m_verification);
- m_readySet.erase(work.first);
- m_knownBad.insert(work.first);
+ m_readySet.erase(work.hash);
+ m_knownBad.insert(work.hash);
}
unique_lock l(m_verification);
for (auto it = m_verifying.begin(); it != m_verifying.end(); ++it)
- if (it->first.mixHash == work.first)
+ if (it->verified.info.mixHash == work.hash)
{
m_verifying.erase(it);
goto OK1;
}
- cwarn << "GAA BlockQueue corrupt: job cancelled but cannot be found in m_verifying queue.";
+ cwarn << "BlockQueue missing our job: was there a GM?";
OK1:;
continue;
}
bool ready = false;
{
+ WriteGuard l2(m_lock);
unique_lock l(m_verification);
- if (m_verifying.front().first.mixHash == work.first)
+ if (!m_verifying.empty() && m_verifying.front().verified.info.mixHash == work.hash)
{
// we're next!
m_verifying.pop_front();
- m_verified.push_back(move(res));
- while (m_verifying.size() && !m_verifying.front().second.empty())
+ if (m_knownBad.count(res.verified.info.parentHash))
{
- m_verified.push_back(move(m_verifying.front()));
+ m_readySet.erase(res.verified.info.hash());
+ m_knownBad.insert(res.verified.info.hash());
+ }
+ else
+ m_verified.push_back(move(res));
+ while (m_verifying.size() && !m_verifying.front().blockData.empty())
+ {
+ if (m_knownBad.count(m_verifying.front().verified.info.parentHash))
+ {
+ m_readySet.erase(m_verifying.front().verified.info.hash());
+ m_knownBad.insert(res.verified.info.hash());
+ }
+ else
+ m_verified.push_back(move(m_verifying.front()));
m_verifying.pop_front();
}
ready = true;
@@ -158,12 +162,12 @@ void BlockQueue::verifierBody()
else
{
for (auto& i: m_verifying)
- if (i.first.mixHash == work.first)
+ if (i.verified.info.mixHash == work.hash)
{
i = move(res);
goto OK;
}
- cwarn << "GAA BlockQueue corrupt: job finished but cannot be found in m_verifying queue.";
+ cwarn << "BlockQueue missing our job: was there a GM?";
OK:;
}
}
@@ -223,6 +227,8 @@ ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc, boo
if (strftime(buf, 24, "%X", localtime(&bit)) == 0)
buf[0] = '\0'; // empty if case strftime fails
cblockq << "OK - queued for future [" << bi.timestamp << "vs" << time(0) << "] - will wait until" << buf;
+ m_unknownSize += _block.size();
+ m_unknownCount++;
return ImportResult::FutureTime;
}
else
@@ -231,6 +237,7 @@ ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc, boo
if (m_knownBad.count(bi.parentHash))
{
m_knownBad.insert(bi.hash());
+ updateBad(bi.hash());
// bad parent; this is bad too, note it as such
return ImportResult::BadChain;
}
@@ -240,6 +247,8 @@ ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc, boo
cblockq << "OK - queued as unknown parent:" << bi.parentHash;
m_unknown.insert(make_pair(bi.parentHash, make_pair(h, _block.toBytes())));
m_unknownSet.insert(h);
+ m_unknownSize += _block.size();
+ m_unknownCount++;
return ImportResult::UnknownParent;
}
@@ -248,9 +257,11 @@ ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc, boo
// If valid, append to blocks.
cblockq << "OK - ready for chain insertion.";
DEV_GUARDED(m_verification)
- m_unverified.push_back(make_pair(h, _block.toBytes()));
+ m_unverified.push_back(UnverifiedBlock { h, bi.parentHash, _block.toBytes() });
m_moreToVerify.notify_one();
m_readySet.insert(h);
+ m_knownSize += _block.size();
+ m_knownCount++;
noteReady_WITH_LOCK(h);
@@ -259,30 +270,93 @@ ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc, boo
}
}
-bool BlockQueue::doneDrain(h256s const& _bad)
+void BlockQueue::updateBad(h256 const& _bad)
{
- WriteGuard l(m_lock);
DEV_INVARIANT_CHECK;
- m_drainingSet.clear();
- if (_bad.size())
+ DEV_GUARDED(m_verification)
{
- vector> old;
- DEV_GUARDED(m_verification)
- swap(m_verified, old);
- for (auto& b: old)
+ collectUnknownBad(_bad);
+ bool moreBad = true;
+ while (moreBad)
{
- if (m_knownBad.count(b.first.parentHash))
- {
- m_knownBad.insert(b.first.hash());
- m_readySet.erase(b.first.hash());
- }
- else
- DEV_GUARDED(m_verification)
+ moreBad = false;
+ std::vector oldVerified;
+ swap(m_verified, oldVerified);
+ for (auto& b: oldVerified)
+ if (m_knownBad.count(b.verified.info.parentHash) || m_knownBad.count(b.verified.info.hash()))
+ {
+ m_knownBad.insert(b.verified.info.hash());
+ m_readySet.erase(b.verified.info.hash());
+ collectUnknownBad(b.verified.info.hash());
+ moreBad = true;
+ }
+ else
m_verified.push_back(std::move(b));
+
+ std::deque oldUnverified;
+ swap(m_unverified, oldUnverified);
+ for (auto& b: oldUnverified)
+ if (m_knownBad.count(b.parentHash) || m_knownBad.count(b.hash))
+ {
+ m_knownBad.insert(b.hash);
+ m_readySet.erase(b.hash);
+ collectUnknownBad(b.hash);
+ moreBad = true;
+ }
+ else
+ m_unverified.push_back(std::move(b));
+
+ std::deque oldVerifying;
+ swap(m_verifying, oldVerifying);
+ for (auto& b: oldVerifying)
+ if (m_knownBad.count(b.verified.info.parentHash) || m_knownBad.count(b.verified.info.mixHash))
+ {
+ h256 const& h = b.blockData.size() != 0 ? b.verified.info.hash() : b.verified.info.mixHash;
+ m_knownBad.insert(h);
+ m_readySet.erase(h);
+ collectUnknownBad(h);
+ moreBad = true;
+ }
+ else
+ m_verifying.push_back(std::move(b));
}
}
- m_knownBad += _bad;
- return !m_readySet.empty();
+ DEV_INVARIANT_CHECK;
+}
+
+void BlockQueue::collectUnknownBad(h256 const& _bad)
+{
+ list badQueue(1, _bad);
+ while (!badQueue.empty())
+ {
+ auto r = m_unknown.equal_range(badQueue.front());
+ badQueue.pop_front();
+ for (auto it = r.first; it != r.second; ++it)
+ {
+ m_unknownSize -= it->second.second.size();
+ m_unknownCount--;
+ auto newBad = it->second.first;
+ m_unknownSet.erase(newBad);
+ m_knownBad.insert(newBad);
+ badQueue.push_back(newBad);
+ }
+ m_unknown.erase(r.first, r.second);
+ }
+
+}
+
+bool BlockQueue::doneDrain(h256s const& _bad)
+{
+ WriteGuard l(m_lock);
+ DEV_INVARIANT_CHECK;
+ m_drainingSet.clear();
+ if (_bad.size())
+ {
+ // at least one of them was bad.
+ m_knownBad += _bad;
+ for (h256 const& b : _bad)
+ updateBad(b);
+ } return !m_readySet.empty();
}
void BlockQueue::tick(BlockChain const& _bc)
@@ -306,7 +380,11 @@ void BlockQueue::tick(BlockChain const& _bc)
DEV_INVARIANT_CHECK;
auto end = m_future.lower_bound(t);
for (auto i = m_future.begin(); i != end; ++i)
+ {
+ m_unknownSize -= i->second.second.size();
+ m_unknownCount--;
todo.push_back(move(i->second));
+ }
m_future.erase(m_future.begin(), end);
}
}
@@ -337,12 +415,24 @@ QueueStatus BlockQueue::blockStatus(h256 const& _h) const
QueueStatus::Unknown;
}
-void BlockQueue::drain(std::vector>& o_out, unsigned _max)
+bool BlockQueue::knownFull() const
+{
+ return m_knownSize > c_maxKnownSize || m_knownCount > c_maxKnownCount;
+}
+
+bool BlockQueue::unknownFull() const
+{
+ return m_unknownSize > c_maxUnknownSize || m_unknownCount > c_maxUnknownCount;
+}
+
+void BlockQueue::drain(VerifiedBlocks& o_out, unsigned _max)
{
WriteGuard l(m_lock);
DEV_INVARIANT_CHECK;
+
if (m_drainingSet.empty())
{
+ bool wasFull = knownFull();
DEV_GUARDED(m_verification)
{
o_out.resize(min(_max, m_verified.size()));
@@ -353,11 +443,16 @@ void BlockQueue::drain(std::vector>& o_out, unsigned
for (auto const& bs: o_out)
{
// TODO: @optimise use map rather than vector & set.
- auto h = bs.first.hash();
+ auto h = bs.verified.info.hash();
m_drainingSet.insert(h);
m_readySet.erase(h);
+ m_knownSize -= bs.verified.block.size();
+ m_knownCount--;
}
+ if (wasFull && !knownFull())
+ m_onRoomAvailable();
}
+
}
bool BlockQueue::invariants() const
@@ -378,7 +473,11 @@ void BlockQueue::noteReady_WITH_LOCK(h256 const& _good)
for (auto it = r.first; it != r.second; ++it)
{
DEV_GUARDED(m_verification)
- m_unverified.push_back(it->second);
+ m_unverified.push_back(UnverifiedBlock { it->second.first, it->first, it->second.second });
+ m_knownSize += it->second.second.size();
+ m_knownCount++;
+ m_unknownSize -= it->second.second.size();
+ m_unknownCount--;
auto newReady = it->second.first;
m_unknownSet.erase(newReady);
m_readySet.insert(newReady);
@@ -389,6 +488,7 @@ void BlockQueue::noteReady_WITH_LOCK(h256 const& _good)
}
if (notify)
m_moreToVerify.notify_all();
+ DEV_INVARIANT_CHECK;
}
void BlockQueue::retryAllUnknown()
@@ -398,18 +498,23 @@ void BlockQueue::retryAllUnknown()
for (auto it = m_unknown.begin(); it != m_unknown.end(); ++it)
{
DEV_GUARDED(m_verification)
- m_unverified.push_back(it->second);
+ m_unverified.push_back(UnverifiedBlock { it->second.first, it->first, it->second.second });
auto newReady = it->second.first;
m_unknownSet.erase(newReady);
m_readySet.insert(newReady);
+ m_knownCount++;
m_moreToVerify.notify_one();
}
m_unknown.clear();
+ m_knownSize += m_unknownSize;
+ m_unknownSize = 0;
+ m_unknownCount = 0;
m_moreToVerify.notify_all();
}
std::ostream& dev::eth::operator<<(std::ostream& _out, BlockQueueStatus const& _bqs)
{
+ _out << "importing: " << _bqs.importing << endl;
_out << "verified: " << _bqs.verified << endl;
_out << "verifying: " << _bqs.verifying << endl;
_out << "unverified: " << _bqs.unverified << endl;
diff --git a/libethereum/BlockQueue.h b/libethereum/BlockQueue.h
index 3a299c5f6..8f079aa66 100644
--- a/libethereum/BlockQueue.h
+++ b/libethereum/BlockQueue.h
@@ -31,6 +31,7 @@
#include
#include
#include
+#include "VerifiedBlock.h"
namespace dev
{
@@ -45,6 +46,7 @@ struct BlockQueueChannel: public LogChannel { static const char* name(); static
struct BlockQueueStatus
{
+ size_t importing;
size_t verified;
size_t verifying;
size_t unverified;
@@ -74,14 +76,14 @@ public:
~BlockQueue();
/// Import a block into the queue.
- ImportResult import(bytesConstRef _tx, BlockChain const& _bc, bool _isOurs = false);
+ ImportResult import(bytesConstRef _block, BlockChain const& _bc, bool _isOurs = false);
/// Notes that time has moved on and some blocks that used to be "in the future" may no be valid.
void tick(BlockChain const& _bc);
/// Grabs at most @a _max of the blocks that are ready, giving them in the correct order for insertion into the chain.
/// Don't forget to call doneDrain() once you're done importing.
- void drain(std::vector>& o_out, unsigned _max);
+ void drain(std::vector& o_out, unsigned _max);
/// Must be called after a drain() call. Notes that the drained blocks have been imported into the blockchain, so we can forget about them.
/// @returns true iff there are additional blocks ready to be processed.
@@ -97,27 +99,40 @@ public:
std::pair items() const { ReadGuard l(m_lock); return std::make_pair(m_readySet.size(), m_unknownSet.size()); }
/// Clear everything.
- void clear() { WriteGuard l(m_lock); DEV_INVARIANT_CHECK; Guard l2(m_verification); m_readySet.clear(); m_drainingSet.clear(); m_verified.clear(); m_unverified.clear(); m_unknownSet.clear(); m_unknown.clear(); m_future.clear(); }
+ void clear();
/// Return first block with an unknown parent.
h256 firstUnknown() const { ReadGuard l(m_lock); return m_unknownSet.size() ? *m_unknownSet.begin() : h256(); }
/// Get some infomration on the current status.
- BlockQueueStatus status() const { ReadGuard l(m_lock); Guard l2(m_verification); return BlockQueueStatus{m_verified.size(), m_verifying.size(), m_unverified.size(), m_future.size(), m_unknown.size(), m_knownBad.size()}; }
+ BlockQueueStatus status() const { ReadGuard l(m_lock); Guard l2(m_verification); return BlockQueueStatus{m_drainingSet.size(), m_verified.size(), m_verifying.size(), m_unverified.size(), m_future.size(), m_unknown.size(), m_knownBad.size()}; }
/// Get some infomration on the given block's status regarding us.
QueueStatus blockStatus(h256 const& _h) const;
template Handler onReady(T const& _t) { return m_onReady.add(_t); }
+ template Handler onRoomAvailable(T const& _t) { return m_onRoomAvailable.add(_t); }
template void setOnBad(T const& _t) { m_onBad = _t; }
+ bool knownFull() const;
+ bool unknownFull() const;
+
private:
+ struct UnverifiedBlock
+ {
+ h256 hash;
+ h256 parentHash;
+ bytes block;
+ };
+
void noteReady_WITH_LOCK(h256 const& _b);
bool invariants() const override;
void verifierBody();
+ void collectUnknownBad(h256 const& _bad);
+ void updateBad(h256 const& _bad);
mutable boost::shared_mutex m_lock; ///< General lock for the sets, m_future and m_unknown.
h256Hash m_drainingSet; ///< All blocks being imported.
@@ -127,17 +142,22 @@ private:
h256Hash m_knownBad; ///< Set of blocks that we know will never be valid.
std::multimap> m_future; ///< Set of blocks that are not yet valid. Ordered by timestamp
Signal m_onReady; ///< Called when a subsequent call to import blocks will return a non-empty container. Be nice and exit fast.
+ Signal m_onRoomAvailable; ///< Called when space for new blocks becomes availabe after a drain. Be nice and exit fast.
mutable Mutex m_verification; ///< Mutex that allows writing to m_verified, m_verifying and m_unverified.
std::condition_variable m_moreToVerify; ///< Signaled when m_unverified has a new entry.
- std::vector> m_verified; ///< List of blocks, in correct order, verified and ready for chain-import.
- std::deque> m_verifying; ///< List of blocks being verified; as long as the second component (bytes) is empty, it's not finished.
- std::deque> m_unverified; ///< List of blocks, in correct order, ready for verification.
+ std::vector m_verified; ///< List of blocks, in correct order, verified and ready for chain-import.
+ std::deque m_verifying; ///< List of blocks being verified; as long as the block component (bytes) is empty, it's not finished.
+ std::deque m_unverified; ///< List of in correct order, ready for verification.
std::vector m_verifiers; ///< Threads who only verify.
bool m_deleting = false; ///< Exit condition for verifiers.
- std::function m_onBad; ///< Called if we have a block that doesn't verify.
+ std::function m_onBad; ///< Called if we have a block that doesn't verify.
+ std::atomic m_unknownSize; ///< Tracks total size in bytes of all unknown blocks
+ std::atomic m_knownSize; ///< Tracks total size in bytes of all known blocks;
+ std::atomic m_unknownCount; ///< Tracks total count of unknown blocks. Used to avoid additional syncing
+ std::atomic m_knownCount; ///< Tracks total count of known blocks. Used to avoid additional syncing
};
std::ostream& operator<<(std::ostream& _out, BlockQueueStatus const& _s);
diff --git a/libethereum/CMakeLists.txt b/libethereum/CMakeLists.txt
index 7d8f27ee7..6598e1bd7 100644
--- a/libethereum/CMakeLists.txt
+++ b/libethereum/CMakeLists.txt
@@ -36,6 +36,10 @@ target_link_libraries(${EXECUTABLE} ${Boost_REGEX_LIBRARIES})
target_link_libraries(${EXECUTABLE} secp256k1)
if (JSONRPC)
target_link_libraries(${EXECUTABLE} ${JSON_RPC_CPP_CLIENT_LIBRARIES})
+ target_link_libraries(${EXECUTABLE} ${CURL_LIBRARIES})
+ if (DEFINED WIN32 AND NOT DEFINED CMAKE_COMPILER_IS_MINGW)
+ eth_copy_dlls(${EXECUTABLE} CURL_DLLS)
+ endif()
endif()
if (CMAKE_COMPILER_IS_MINGW)
diff --git a/libethereum/Client.cpp b/libethereum/Client.cpp
index 1d3539f18..31c2e8026 100644
--- a/libethereum/Client.cpp
+++ b/libethereum/Client.cpp
@@ -87,87 +87,100 @@ void VersionChecker::setOk()
}
}
-void Client::onBadBlock(Exception& _ex)
+void Client::onBadBlock(Exception& _ex) const
{
// BAD BLOCK!!!
- bytes const& block = *boost::get_error_info