diff --git a/CMakeLists.txt b/CMakeLists.txt
index a5c4fe930..8a2ac51a5 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -421,6 +421,7 @@ if (TOOLS)
add_subdirectory(rlp)
add_subdirectory(abi)
+ add_subdirectory(ethvm)
add_subdirectory(eth)
if("x${CMAKE_BUILD_TYPE}" STREQUAL "xDebug")
diff --git a/alethzero/MainWin.cpp b/alethzero/MainWin.cpp
index 93054fd67..4d96b77ff 100644
--- a/alethzero/MainWin.cpp
+++ b/alethzero/MainWin.cpp
@@ -240,6 +240,10 @@ Main::Main(QWidget *parent) :
#if !ETH_FATDB
removeDockWidget(ui->dockWidget_accounts);
+#endif
+#if !ETH_EVMJIT
+ ui->jitvm->setEnabled(false);
+ ui->jitvm->setChecked(false);
#endif
installWatches();
startTimer(100);
@@ -805,6 +809,7 @@ void Main::readSettings(bool _skipGeometry)
ui->usePrivate->setChecked(m_privateChain.size());
ui->verbosity->setValue(s.value("verbosity", 1).toInt());
ui->jitvm->setChecked(s.value("jitvm", true).toBool());
+ on_jitvm_triggered();
ui->urlEdit->setText(s.value("url", "about:blank").toString()); //http://gavwood.com/gavcoin.html
on_urlEdit_returnPressed();
@@ -1240,7 +1245,9 @@ void Main::refreshBlockCount()
{
auto d = ethereum()->blockChain().details();
BlockQueueStatus b = ethereum()->blockQueueStatus();
- ui->chainStatus->setText(QString("%3 ready %4 verifying %5 unverified %6 future %7 unknown %8 bad %1 #%2").arg(m_privateChain.size() ? "[" + m_privateChain + "] " : "testnet").arg(d.number).arg(b.verified).arg(b.verifying).arg(b.unverified).arg(b.future).arg(b.unknown).arg(b.bad));
+ HashChainStatus h = ethereum()->hashChainStatus();
+ ui->chainStatus->setText(QString("%10/%11%12 hashes %3 importing %4 ready %5 verifying %6 unverified %7 future %8 unknown %9 bad %1 #%2")
+ .arg(m_privateChain.size() ? "[" + m_privateChain + "] " : "testnet").arg(d.number).arg(b.importing).arg(b.verified).arg(b.verifying).arg(b.unverified).arg(b.future).arg(b.unknown).arg(b.bad).arg(h.received).arg(h.estimated ? "~" : "").arg(h.total));
}
void Main::on_turboMining_triggered()
diff --git a/alethzero/MainWin.h b/alethzero/MainWin.h
index 193f8e364..efff89d2b 100644
--- a/alethzero/MainWin.h
+++ b/alethzero/MainWin.h
@@ -238,7 +238,7 @@ private:
void installNameRegWatch();
void installBalancesWatch();
- virtual void timerEvent(QTimerEvent*);
+ virtual void timerEvent(QTimerEvent*) override;
void refreshNetwork();
void refreshMining();
diff --git a/alethzero/NatspecHandler.h b/alethzero/NatspecHandler.h
index 7aeafec41..241df4e06 100644
--- a/alethzero/NatspecHandler.h
+++ b/alethzero/NatspecHandler.h
@@ -39,17 +39,17 @@ class NatspecHandler: public NatSpecFace
~NatspecHandler();
/// Stores locally in a levelDB a key value pair of contract code hash to natspec documentation
- void add(dev::h256 const& _contractHash, std::string const& _doc);
+ virtual void add(dev::h256 const& _contractHash, std::string const& _doc) override;
/// Retrieves the natspec documentation as a string given a contract code hash
std::string retrieve(dev::h256 const& _contractHash) const override;
/// Given a json natspec string and the transaction data return the user notice
- std::string getUserNotice(std::string const& json, const dev::bytes& _transactionData);
+ virtual std::string getUserNotice(std::string const& json, const dev::bytes& _transactionData) override;
/// Given a contract code hash and the transaction's data retrieve the natspec documention's
/// user notice for that transaction.
/// @returns The user notice or an empty string if no natspec for the contract exists
/// or if the existing natspec does not document the @c _methodName
- std::string getUserNotice(dev::h256 const& _contractHash, dev::bytes const& _transactionDacta);
+ virtual std::string getUserNotice(dev::h256 const& _contractHash, dev::bytes const& _transactionDacta) override;
private:
ldb::ReadOptions m_readOptions;
diff --git a/cmake/EthCompilerSettings.cmake b/cmake/EthCompilerSettings.cmake
index eb8588ceb..53535a489 100644
--- a/cmake/EthCompilerSettings.cmake
+++ b/cmake/EthCompilerSettings.cmake
@@ -21,7 +21,7 @@ elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
set(CMAKE_CXX_FLAGS_DEBUG "-O0 -g -DETH_DEBUG")
set(CMAKE_CXX_FLAGS_MINSIZEREL "-Os -DNDEBUG -DETH_RELEASE")
set(CMAKE_CXX_FLAGS_RELEASE "-O3 -DNDEBUG -DETH_RELEASE")
- set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O2 -g -DETH_DEBUG")
+ set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O2 -g -DETH_RELEASE")
if ("${CMAKE_SYSTEM_NAME}" MATCHES "Linux")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libstdc++ -fcolor-diagnostics -Qunused-arguments -DBOOST_ASIO_HAS_CLANG_LIBCXX")
diff --git a/eth/main.cpp b/eth/main.cpp
index 9128d9391..5d70b3fe5 100644
--- a/eth/main.cpp
+++ b/eth/main.cpp
@@ -135,6 +135,7 @@ void help()
<< " --session-sign-key
Sign all transactions with the key of the given address for this session only." << endl
<< " --master Give the master password for the key store." << endl
<< " --password Give a password for a private key." << endl
+ << " --sentinel Set the sentinel for reporting bad blocks or chain issues." << endl
<< endl
<< "Client transacting:" << endl
/*<< " -B,--block-fees Set the block fee profit in the reference unit e.g. ยข (default: 15)." << endl
@@ -147,6 +148,7 @@ void help()
<< " -a,--address Set the coinbase (mining payout) address to addr (default: auto)." << endl
<< " -m,--mining Enable mining, optionally for a specified number of blocks (default: off)" << endl
<< " -f,--force-mining Mine even when there are no transactions to mine (default: off)" << endl
+ << " --mine-on-wrong-chain Mine even when we know it's the wrong chain (default: off)" << endl
<< " -C,--cpu When mining, use the CPU." << endl
<< " -G,--opencl When mining use the GPU via OpenCL." << endl
<< " --opencl-platform When mining using -G/--opencl use OpenCL platform n (default: 0)." << endl
@@ -288,6 +290,7 @@ int main(int argc, char** argv)
bool upnp = true;
WithExisting killChain = WithExisting::Trust;
bool jit = false;
+ string sentinel;
/// Networking params.
string clientName;
@@ -303,6 +306,7 @@ int main(int argc, char** argv)
/// Mining params
unsigned mining = 0;
bool forceMining = false;
+ bool mineOnWrongChain = false;
Address signingKey;
Address sessionKey;
Address beneficiary = signingKey;
@@ -385,6 +389,10 @@ int main(int argc, char** argv)
mode = OperationMode::Export;
filename = argv[++i];
}
+ else if (arg == "--sentinel" && i + 1 < argc)
+ sentinel = argv[++i];
+ else if (arg == "--mine-on-wrong-chain")
+ mineOnWrongChain = true;
else if (arg == "--format" && i + 1 < argc)
{
string m = argv[++i];
@@ -680,6 +688,8 @@ int main(int argc, char** argv)
nodeMode == NodeMode::Full ? set{"eth"/*, "shh"*/} : set(),
netPrefs,
&nodesState);
+ web3.ethereum()->setMineOnBadChain(mineOnWrongChain);
+ web3.ethereum()->setSentinel(sentinel);
auto toNumber = [&](string const& s) -> unsigned {
if (s == "latest")
diff --git a/ethminer/CMakeLists.txt b/ethminer/CMakeLists.txt
index d364f6ed1..df828bc47 100644
--- a/ethminer/CMakeLists.txt
+++ b/ethminer/CMakeLists.txt
@@ -5,7 +5,10 @@ aux_source_directory(. SRC_LIST)
include_directories(BEFORE ..)
include_directories(${Boost_INCLUDE_DIRS})
+if (JSONRPC)
+include_directories(BEFORE ${JSONCPP_INCLUDE_DIRS})
include_directories(${JSON_RPC_CPP_INCLUDE_DIRS})
+endif()
set(EXECUTABLE ethminer)
diff --git a/ethminer/MinerAux.h b/ethminer/MinerAux.h
index 6a42dd774..476c810c2 100644
--- a/ethminer/MinerAux.h
+++ b/ethminer/MinerAux.h
@@ -127,9 +127,10 @@ public:
cerr << "Bad " << arg << " option: " << argv[i] << endl;
throw BadArgument();
}
- else if (arg == "--use-chunks")
+ else if (arg == "--list-devices")
{
- dagChunks = 4;
+ ProofOfWork::GPUMiner::listDevices();
+ exit(0);
}
else if (arg == "--phone-home" && i + 1 < argc)
{
@@ -175,7 +176,7 @@ public:
m_minerType = MinerType::CPU;
else if (arg == "-G" || arg == "--opencl")
{
- if (!ProofOfWork::GPUMiner::haveSufficientGPUMemory())
+ if (!ProofOfWork::GPUMiner::configureGPU())
{
cout << "No GPU device with sufficient memory was found. Defaulting to CPU" << endl;
m_minerType = MinerType::CPU;
@@ -268,7 +269,6 @@ public:
ProofOfWork::GPUMiner::setDefaultPlatform(openclPlatform);
ProofOfWork::GPUMiner::setDefaultDevice(openclDevice);
ProofOfWork::GPUMiner::setNumInstances(miningThreads);
- ProofOfWork::GPUMiner::setDagChunks(dagChunks);
}
if (mode == OperationMode::DAGInit)
doInitDAG(initDAG);
@@ -306,7 +306,6 @@ public:
<< " --opencl-platform When mining using -G/--opencl use OpenCL platform n (default: 0)." << endl
<< " --opencl-device When mining using -G/--opencl use OpenCL device n (default: 0)." << endl
<< " -t, --mining-threads Limit number of CPU/GPU miners to n (default: use everything available on selected platform)" << endl
- << " --use-chunks When using GPU mining upload the DAG to the GPU in 4 chunks. " << endl
;
}
diff --git a/ethvm/CMakeLists.txt b/ethvm/CMakeLists.txt
new file mode 100644
index 000000000..ed093061c
--- /dev/null
+++ b/ethvm/CMakeLists.txt
@@ -0,0 +1,19 @@
+cmake_policy(SET CMP0015 NEW)
+set(CMAKE_AUTOMOC OFF)
+
+aux_source_directory(. SRC_LIST)
+
+include_directories(BEFORE ..)
+include_directories(${LEVELDB_INCLUDE_DIRS})
+
+set(EXECUTABLE ethvm)
+
+add_executable(${EXECUTABLE} ${SRC_LIST})
+
+target_link_libraries(${EXECUTABLE} ethereum)
+
+if (APPLE)
+ install(TARGETS ${EXECUTABLE} DESTINATION bin)
+else()
+ eth_install_executable(${EXECUTABLE})
+endif()
diff --git a/ethvm/main.cpp b/ethvm/main.cpp
new file mode 100644
index 000000000..08a1b4508
--- /dev/null
+++ b/ethvm/main.cpp
@@ -0,0 +1,200 @@
+/*
+ This file is part of cpp-ethereum.
+
+ cpp-ethereum is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ cpp-ethereum is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with cpp-ethereum. If not, see .
+*/
+/** @file main.cpp
+ * @author Gav Wood
+ * @date 2014
+ * EVM Execution tool.
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+using namespace std;
+using namespace dev;
+using namespace eth;
+
+void help()
+{
+ cout
+ << "Usage ethvm [trace|stats|output] (|--)" << endl
+ << "Transaction options:" << endl
+ << " --value Transaction should transfer the wei (default: 0)." << endl
+ << " --gas Transaction should be given gas (default: block gas limit)." << endl
+ << " --gas-price Transaction's gas price' should be (default: 0)." << endl
+ << " --sender Transaction sender should be (default: 0000...0069)." << endl
+ << " --origin Transaction origin should be (default: 0000...0069)." << endl
+#if ETH_EVMJIT || !ETH_TRUE
+ << endl
+ << "VM options:" << endl
+ << " -J,--jit Enable LLVM VM (default: off)." << endl
+ << " --smart Enable smart VM (default: off)." << endl
+#endif
+ << endl
+ << "Options for trace:" << endl
+ << " --flat Minimal whitespace in the JSON." << endl
+ << " --mnemonics Show instruction mnemonics in the trace (non-standard)." << endl
+ << endl
+ << "General options:" << endl
+ << " -V,--version Show the version and exit." << endl
+ << " -h,--help Show this help message and exit." << endl;
+ exit(0);
+}
+
+void version()
+{
+ cout << "ethvm version " << dev::Version << endl;
+ cout << "By Gav Wood, 2015." << endl;
+ cout << "Build: " << DEV_QUOTED(ETH_BUILD_PLATFORM) << "/" << DEV_QUOTED(ETH_BUILD_TYPE) << endl;
+ exit(0);
+}
+
+enum class Mode
+{
+ Trace,
+ Statistics,
+ OutputOnly
+};
+
+int main(int argc, char** argv)
+{
+ string incoming = "--";
+
+ Mode mode = Mode::Statistics;
+ State state;
+ Address sender = Address(69);
+ Address origin = Address(69);
+ u256 value = 0;
+ u256 gas = state.gasLimitRemaining();
+ u256 gasPrice = 0;
+ bool styledJson = true;
+ StandardTrace st;
+
+ for (int i = 1; i < argc; ++i)
+ {
+ string arg = argv[i];
+ if (arg == "-h" || arg == "--help")
+ help();
+ else if (arg == "-V" || arg == "--version")
+ version();
+#if ETH_EVMJIT
+ else if (arg == "-J" || arg == "--jit")
+ VMFactory::setKind(VMKind::JIT);
+ else if (arg == "--smart")
+ VMFactory::setKind(VMKind::Smart);
+#endif
+ else if (arg == "--mnemonics")
+ st.setShowMnemonics();
+ else if (arg == "--flat")
+ styledJson = false;
+ else if (arg == "--value" && i + 1 < argc)
+ value = u256(argv[++i]);
+ else if (arg == "--sender" && i + 1 < argc)
+ sender = Address(argv[++i]);
+ else if (arg == "--origin" && i + 1 < argc)
+ origin = Address(argv[++i]);
+ else if (arg == "--gas" && i + 1 < argc)
+ gas = u256(argv[++i]);
+ else if (arg == "--gas-price" && i + 1 < argc)
+ gasPrice = u256(argv[++i]);
+ else if (arg == "--value" && i + 1 < argc)
+ value = u256(argv[++i]);
+ else if (arg == "--value" && i + 1 < argc)
+ value = u256(argv[++i]);
+ else if (arg == "stats")
+ mode = Mode::Statistics;
+ else if (arg == "output")
+ mode = Mode::OutputOnly;
+ else if (arg == "trace")
+ mode = Mode::Trace;
+ else
+ incoming = arg;
+ }
+
+ bytes code;
+ if (incoming == "--" || incoming.empty())
+ for (int i = cin.get(); i != -1; i = cin.get())
+ code.push_back((char)i);
+ else
+ code = contents(incoming);
+ bytes data = fromHex(boost::trim_copy(asString(code)));
+ if (data.empty())
+ data = code;
+
+ state.addBalance(sender, value);
+ Executive executive(state, eth::LastHashes(), 0);
+ ExecutionResult res;
+ executive.setResultRecipient(res);
+ Transaction t = eth::Transaction(value, gasPrice, gas, data, 0);
+ t.forceSender(sender);
+
+ unordered_map> counts;
+ unsigned total = 0;
+ bigint memTotal;
+ auto onOp = [&](uint64_t step, Instruction inst, bigint m, bigint gasCost, bigint gas, VM* vm, ExtVMFace const* extVM) {
+ if (mode == Mode::Statistics)
+ {
+ counts[(byte)inst].first++;
+ counts[(byte)inst].second += gasCost;
+ total++;
+ if (m > 0)
+ memTotal = m;
+ }
+ else if (mode == Mode::Trace)
+ st(step, inst, m, gasCost, gas, vm, extVM);
+ };
+
+ executive.initialize(t);
+ executive.create(sender, value, gasPrice, gas, &data, origin);
+ boost::timer timer;
+ executive.go(onOp);
+ double execTime = timer.elapsed();
+ executive.finalize();
+ bytes output = std::move(res.output);
+
+ if (mode == Mode::Statistics)
+ {
+ cout << "Gas used: " << res.gasUsed << " (+" << t.gasRequired() << " for transaction, -" << res.gasRefunded << " refunded)" << endl;
+ cout << "Output: " << toHex(output) << endl;
+ LogEntries logs = executive.logs();
+ cout << logs.size() << " logs" << (logs.empty() ? "." : ":") << endl;
+ for (LogEntry const& l: logs)
+ {
+ cout << " " << l.address.hex() << ": " << toHex(t.data()) << endl;
+ for (h256 const& t: l.topics)
+ cout << " " << t.hex() << endl;
+ }
+
+ cout << total << " operations in " << execTime << " seconds." << endl;
+ cout << "Maximum memory usage: " << memTotal * 32 << " bytes" << endl;
+ cout << "Expensive operations:" << endl;
+ for (auto const& c: {Instruction::SSTORE, Instruction::SLOAD, Instruction::CALL, Instruction::CREATE, Instruction::CALLCODE, Instruction::MSTORE8, Instruction::MSTORE, Instruction::MLOAD, Instruction::SHA3})
+ if (!!counts[(byte)c].first)
+ cout << " " << instructionInfo(c).name << " x " << counts[(byte)c].first << " (" << counts[(byte)c].second << " gas)" << endl;
+ }
+ else if (mode == Mode::Trace)
+ cout << st.json(styledJson);
+ else if (mode == Mode::OutputOnly)
+ cout << toHex(output);
+
+ return 0;
+}
diff --git a/evmjit/libevmjit-cpp/JitVM.cpp b/evmjit/libevmjit-cpp/JitVM.cpp
index 0d6a6e00a..68161526d 100644
--- a/evmjit/libevmjit-cpp/JitVM.cpp
+++ b/evmjit/libevmjit-cpp/JitVM.cpp
@@ -51,7 +51,7 @@ bytesConstRef JitVM::execImpl(u256& io_gas, ExtVMFace& _ext, OnOpFunc const& _on
m_data.timestamp = static_cast(_ext.currentBlock.timestamp);
m_data.code = _ext.code.data();
m_data.codeSize = _ext.code.size();
- m_data.codeHash = eth2llvm(sha3(_ext.code));
+ m_data.codeHash = eth2llvm(_ext.codeHash);
auto env = reinterpret_cast(&_ext);
auto exitCode = m_engine.run(&m_data, env);
diff --git a/evmjit/libevmjit/Array.cpp b/evmjit/libevmjit/Array.cpp
index 3266038db..0b511a058 100644
--- a/evmjit/libevmjit/Array.cpp
+++ b/evmjit/libevmjit/Array.cpp
@@ -9,8 +9,6 @@
#include "Runtime.h"
#include "Utils.h"
-#include // DEBUG only
-
namespace dev
{
namespace eth
@@ -269,52 +267,15 @@ void Array::extend(llvm::Value* _arrayPtr, llvm::Value* _size)
}
}
-namespace
-{
- struct AllocatedMemoryWatchdog
- {
- std::set allocatedMemory;
-
- ~AllocatedMemoryWatchdog()
- {
- if (!allocatedMemory.empty())
- {
- DLOG(mem) << allocatedMemory.size() << " MEM LEAKS!\n";
- for (auto&& leak : allocatedMemory)
- DLOG(mem) << "\t" << leak << "\n";
- }
- }
- };
-
- AllocatedMemoryWatchdog watchdog;
-}
-
extern "C"
{
- using namespace dev::eth::jit;
-
EXPORT void* ext_realloc(void* _data, size_t _size) noexcept
{
- //std::cerr << "REALLOC: " << _data << " [" << _size << "]" << std::endl;
- auto newData = std::realloc(_data, _size);
- if (_data != newData)
- {
- DLOG(mem) << "REALLOC: " << newData << " <- " << _data << " [" << _size << "]\n";
- watchdog.allocatedMemory.erase(_data);
- watchdog.allocatedMemory.insert(newData);
- }
- return newData;
+ return std::realloc(_data, _size);
}
EXPORT void ext_free(void* _data) noexcept
{
std::free(_data);
- if (_data)
- {
- DLOG(mem) << "FREE : " << _data << "\n";
- watchdog.allocatedMemory.erase(_data);
- }
}
-
-} // extern "C"
-
+}
diff --git a/evmjit/libevmjit/Cache.cpp b/evmjit/libevmjit/Cache.cpp
index 47a6386e9..42ccf44ac 100644
--- a/evmjit/libevmjit/Cache.cpp
+++ b/evmjit/libevmjit/Cache.cpp
@@ -1,5 +1,7 @@
#include "Cache.h"
+#include
+
#include "preprocessor/llvm_includes_start.h"
#include
#include
@@ -23,6 +25,8 @@ namespace jit
namespace
{
+ using Guard = std::lock_guard;
+ std::mutex x_cacheMutex;
CacheMode g_mode;
llvm::MemoryBuffer* g_lastObject;
ExecutionEngineListener* g_listener;
@@ -43,6 +47,9 @@ namespace
ObjectCache* Cache::getObjectCache(CacheMode _mode, ExecutionEngineListener* _listener)
{
static ObjectCache objectCache;
+
+ Guard g{x_cacheMutex};
+
g_mode = _mode;
g_listener = _listener;
return &objectCache;
@@ -50,6 +57,8 @@ ObjectCache* Cache::getObjectCache(CacheMode _mode, ExecutionEngineListener* _li
void Cache::clear()
{
+ Guard g{x_cacheMutex};
+
using namespace llvm::sys;
llvm::SmallString<256> cachePath;
path::system_temp_directory(false, cachePath);
@@ -62,6 +71,8 @@ void Cache::clear()
void Cache::preload(llvm::ExecutionEngine& _ee, std::unordered_map& _funcCache)
{
+ Guard g{x_cacheMutex};
+
// TODO: Cache dir should be in one place
using namespace llvm::sys;
llvm::SmallString<256> cachePath;
@@ -92,11 +103,14 @@ void Cache::preload(llvm::ExecutionEngine& _ee, std::unordered_map Cache::getObject(std::string const& id)
{
+ Guard g{x_cacheMutex};
+
if (g_mode != CacheMode::on && g_mode != CacheMode::read)
return nullptr;
- if (g_listener)
- g_listener->stateChanged(ExecState::CacheLoad);
+ // TODO: Disabled because is not thread-safe.
+ //if (g_listener)
+ // g_listener->stateChanged(ExecState::CacheLoad);
DLOG(cache) << id << ": search\n";
if (!CHECK(!g_lastObject))
@@ -136,12 +150,15 @@ std::unique_ptr Cache::getObject(std::string const& id)
void ObjectCache::notifyObjectCompiled(llvm::Module const* _module, llvm::MemoryBuffer const* _object)
{
+ Guard g{x_cacheMutex};
+
// Only in "on" and "write" mode
if (g_mode != CacheMode::on && g_mode != CacheMode::write)
return;
- if (g_listener)
- g_listener->stateChanged(ExecState::CacheWrite);
+ // TODO: Disabled because is not thread-safe.
+ // if (g_listener)
+ // g_listener->stateChanged(ExecState::CacheWrite);
auto&& id = _module->getModuleIdentifier();
llvm::SmallString<256> cachePath;
@@ -161,6 +178,8 @@ void ObjectCache::notifyObjectCompiled(llvm::Module const* _module, llvm::Memory
llvm::MemoryBuffer* ObjectCache::getObject(llvm::Module const* _module)
{
+ Guard g{x_cacheMutex};
+
DLOG(cache) << _module->getModuleIdentifier() << ": use\n";
auto o = g_lastObject;
g_lastObject = nullptr;
diff --git a/json_spirit/json_spirit_writer_template.h b/json_spirit/json_spirit_writer_template.h
index dbd0f45da..5376ef476 100644
--- a/json_spirit/json_spirit_writer_template.h
+++ b/json_spirit/json_spirit_writer_template.h
@@ -25,13 +25,9 @@ namespace json_spirit
return 'A' - 10 + ch;
}
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-local-typedefs")
template< class String_type >
String_type non_printable_to_string( unsigned int c )
{
- typedef typename String_type::value_type Char_type;
-
String_type result( 6, '\\' );
result[1] = 'u';
@@ -43,7 +39,6 @@ namespace json_spirit
return result;
}
-#pragma GCC diagnostic pop
template< typename Char_type, class String_type >
bool add_esc_char( Char_type c, String_type& s )
diff --git a/libdevcore/Common.cpp b/libdevcore/Common.cpp
index 3dc3fd280..5fe2775d2 100644
--- a/libdevcore/Common.cpp
+++ b/libdevcore/Common.cpp
@@ -28,7 +28,7 @@ using namespace dev;
namespace dev
{
-char const* Version = "0.9.24";
+char const* Version = "0.9.25";
const u256 UndefinedU256 = ~(u256)0;
diff --git a/libdevcore/Common.h b/libdevcore/Common.h
index 453c17e6f..1ee83c794 100644
--- a/libdevcore/Common.h
+++ b/libdevcore/Common.h
@@ -181,7 +181,7 @@ private:
/// Scope guard for invariant check in a class derived from HasInvariants.
#if ETH_DEBUG
-#define DEV_INVARIANT_CHECK ::dev::InvariantChecker __dev_invariantCheck(this)
+#define DEV_INVARIANT_CHECK { ::dev::InvariantChecker __dev_invariantCheck(this); }
#else
#define DEV_INVARIANT_CHECK (void)0;
#endif
diff --git a/libdevcore/Exceptions.h b/libdevcore/Exceptions.h
index 025568efa..b0bab7d81 100644
--- a/libdevcore/Exceptions.h
+++ b/libdevcore/Exceptions.h
@@ -30,7 +30,8 @@
namespace dev
{
-// base class for all exceptions
+
+/// Base class for all exceptions.
struct Exception: virtual std::exception, virtual boost::exception
{
Exception(std::string _message = std::string()): m_message(std::move(_message)) {}
@@ -40,20 +41,26 @@ private:
std::string m_message;
};
-struct BadHexCharacter: virtual Exception {};
-struct RLPException: virtual Exception {};
-struct BadCast: virtual RLPException {};
-struct BadRLP: virtual RLPException {};
-struct OversizeRLP: virtual RLPException {};
-struct UndersizeRLP: virtual RLPException {};
-struct NoNetworking: virtual Exception {};
-struct NoUPnPDevice: virtual Exception {};
-struct RootNotFound: virtual Exception {};
-struct BadRoot: virtual Exception {};
-struct FileError: virtual Exception {};
-struct Overflow: virtual Exception {};
+#define DEV_SIMPLE_EXCEPTION(X) struct X: virtual Exception { const char* what() const noexcept override { return #X; } }
+
+/// Base class for all RLP exceptions.
+struct RLPException: virtual Exception { RLPException(std::string _message = std::string()): Exception(_message) {} };
+#define DEV_SIMPLE_EXCEPTION_RLP(X) struct X: virtual RLPException { const char* what() const noexcept override { return #X; } }
+
+DEV_SIMPLE_EXCEPTION_RLP(BadCast);
+DEV_SIMPLE_EXCEPTION_RLP(BadRLP);
+DEV_SIMPLE_EXCEPTION_RLP(OversizeRLP);
+DEV_SIMPLE_EXCEPTION_RLP(UndersizeRLP);
+
+DEV_SIMPLE_EXCEPTION(BadHexCharacter);
+DEV_SIMPLE_EXCEPTION(NoNetworking);
+DEV_SIMPLE_EXCEPTION(NoUPnPDevice);
+DEV_SIMPLE_EXCEPTION(RootNotFound);
+DEV_SIMPLE_EXCEPTION(BadRoot);
+DEV_SIMPLE_EXCEPTION(FileError);
+DEV_SIMPLE_EXCEPTION(Overflow);
+DEV_SIMPLE_EXCEPTION(FailedInvariant);
struct InterfaceNotSupported: virtual Exception { public: InterfaceNotSupported(std::string _f): Exception("Interface " + _f + " not supported.") {} };
-struct FailedInvariant: virtual Exception {};
struct ExternalFunctionFailure: virtual Exception { public: ExternalFunctionFailure(std::string _f): Exception("Function " + _f + "() failed.") {} };
// error information to be added to exceptions
@@ -66,5 +73,7 @@ using errinfo_min = boost::error_info;
using errinfo_max = boost::error_info;
using RequirementError = boost::tuple;
using errinfo_hash256 = boost::error_info;
-using HashMismatchError = boost::tuple;
+using errinfo_required_h256 = boost::error_info;
+using errinfo_got_h256 = boost::error_info;
+using Hash256RequirementError = boost::tuple;
}
diff --git a/libdevcore/Log.cpp b/libdevcore/Log.cpp
index f28a2c6b9..fde492f3b 100644
--- a/libdevcore/Log.cpp
+++ b/libdevcore/Log.cpp
@@ -40,6 +40,14 @@ mutex x_logOverride;
/// or equal to the currently output verbosity (g_logVerbosity).
static map s_logOverride;
+bool dev::isChannelVisible(std::type_info const* _ch, bool _default)
+{
+ Guard l(x_logOverride);
+ if (s_logOverride.count(_ch))
+ return s_logOverride[_ch];
+ return _default;
+}
+
LogOverrideAux::LogOverrideAux(std::type_info const* _ch, bool _value):
m_ch(_ch)
{
diff --git a/libdevcore/Log.h b/libdevcore/Log.h
index ce0db17fe..e732ac73c 100644
--- a/libdevcore/Log.h
+++ b/libdevcore/Log.h
@@ -73,6 +73,9 @@ public:
LogOverride(bool _value): LogOverrideAux(&typeid(Channel), _value) {}
};
+bool isChannelVisible(std::type_info const* _ch, bool _default);
+template bool isChannelVisible() { return isChannelVisible(&typeid(Channel), Channel::verbosity <= g_logVerbosity); }
+
/// Temporary changes system's verbosity for specific function. Restores the old verbosity when function returns.
/// Not thread-safe, use with caution!
struct VerbosityHolder
diff --git a/libdevcore/MemoryDB.cpp b/libdevcore/MemoryDB.cpp
index 2cf56475b..f71931bdd 100644
--- a/libdevcore/MemoryDB.cpp
+++ b/libdevcore/MemoryDB.cpp
@@ -32,7 +32,9 @@ const char* DBWarn::name() { return "TDB"; }
std::unordered_map MemoryDB::get() const
{
+#if DEV_GUARDED_DB
ReadGuard l(x_this);
+#endif
std::unordered_map ret;
for (auto const& i: m_main)
if (!m_enforceRefs || i.second.second > 0)
@@ -44,8 +46,10 @@ MemoryDB& MemoryDB::operator=(MemoryDB const& _c)
{
if (this == &_c)
return *this;
+#if DEV_GUARDED_DB
ReadGuard l(_c.x_this);
WriteGuard l2(x_this);
+#endif
m_main = _c.m_main;
m_aux = _c.m_aux;
return *this;
@@ -53,7 +57,9 @@ MemoryDB& MemoryDB::operator=(MemoryDB const& _c)
std::string MemoryDB::lookup(h256 const& _h) const
{
+#if DEV_GUARDED_DB
ReadGuard l(x_this);
+#endif
auto it = m_main.find(_h);
if (it != m_main.end())
{
@@ -67,7 +73,9 @@ std::string MemoryDB::lookup(h256 const& _h) const
bool MemoryDB::exists(h256 const& _h) const
{
+#if DEV_GUARDED_DB
ReadGuard l(x_this);
+#endif
auto it = m_main.find(_h);
if (it != m_main.end() && (!m_enforceRefs || it->second.second > 0))
return true;
@@ -76,7 +84,9 @@ bool MemoryDB::exists(h256 const& _h) const
void MemoryDB::insert(h256 const& _h, bytesConstRef _v)
{
+#if DEV_GUARDED_DB
WriteGuard l(x_this);
+#endif
auto it = m_main.find(_h);
if (it != m_main.end())
{
@@ -92,7 +102,9 @@ void MemoryDB::insert(h256 const& _h, bytesConstRef _v)
bool MemoryDB::kill(h256 const& _h)
{
+#if DEV_GUARDED_DB
ReadGuard l(x_this);
+#endif
if (m_main.count(_h))
{
if (m_main[_h].second > 0)
@@ -117,9 +129,38 @@ bool MemoryDB::kill(h256 const& _h)
return false;
}
+bytes MemoryDB::lookupAux(h256 const& _h) const
+{
+#if DEV_GUARDED_DB
+ ReadGuard l(x_this);
+#endif
+ auto it = m_aux.find(_h);
+ if (it != m_aux.end() && (!m_enforceRefs || it->second.second))
+ return it->second.first;
+ return bytes();
+}
+
+void MemoryDB::removeAux(h256 const& _h)
+{
+#if DEV_GUARDED_DB
+ WriteGuard l(x_this);
+#endif
+ m_aux[_h].second = false;
+}
+
+void MemoryDB::insertAux(h256 const& _h, bytesConstRef _v)
+{
+#if DEV_GUARDED_DB
+ WriteGuard l(x_this);
+#endif
+ m_aux[_h] = make_pair(_v.toBytes(), true);
+}
+
void MemoryDB::purge()
{
+#if DEV_GUARDED_DB
WriteGuard l(x_this);
+#endif
for (auto it = m_main.begin(); it != m_main.end(); )
if (it->second.second)
++it;
@@ -129,7 +170,9 @@ void MemoryDB::purge()
h256Hash MemoryDB::keys() const
{
+#if DEV_GUARDED_DB
ReadGuard l(x_this);
+#endif
h256Hash ret;
for (auto const& i: m_main)
if (i.second.second)
diff --git a/libdevcore/MemoryDB.h b/libdevcore/MemoryDB.h
index 169682815..a39c0efd0 100644
--- a/libdevcore/MemoryDB.h
+++ b/libdevcore/MemoryDB.h
@@ -57,14 +57,16 @@ public:
bool kill(h256 const& _h);
void purge();
- bytes lookupAux(h256 const& _h) const { ReadGuard l(x_this); auto it = m_aux.find(_h); if (it != m_aux.end() && (!m_enforceRefs || it->second.second)) return it->second.first; return bytes(); }
- void removeAux(h256 const& _h) { WriteGuard l(x_this); m_aux[_h].second = false; }
- void insertAux(h256 const& _h, bytesConstRef _v) { WriteGuard l(x_this); m_aux[_h] = make_pair(_v.toBytes(), true); }
+ bytes lookupAux(h256 const& _h) const;
+ void removeAux(h256 const& _h);
+ void insertAux(h256 const& _h, bytesConstRef _v);
h256Hash keys() const;
protected:
+#if DEV_GUARDED_DB
mutable SharedMutex x_this;
+#endif
std::unordered_map> m_main;
std::unordered_map> m_aux;
diff --git a/libdevcore/RangeMask.h b/libdevcore/RangeMask.h
index bdf00e687..7c402fc98 100644
--- a/libdevcore/RangeMask.h
+++ b/libdevcore/RangeMask.h
@@ -219,6 +219,14 @@ public:
return uit == m_ranges.end() ? m_all.second : uit->first;
}
+ size_t size() const
+ {
+ size_t c = 0;
+ for (auto const& r: this->m_ranges)
+ c += r.second - r.first;
+ return c;
+ }
+
private:
UnsignedRange m_all;
std::map m_ranges;
diff --git a/libdevcore/vector_ref.h b/libdevcore/vector_ref.h
index 5e9bba3e8..b04d449b3 100644
--- a/libdevcore/vector_ref.h
+++ b/libdevcore/vector_ref.h
@@ -43,7 +43,8 @@ public:
vector_ref<_T> cropped(size_t _begin) const { if (m_data && _begin <= m_count) return vector_ref<_T>(m_data + _begin, m_count - _begin); else return vector_ref<_T>(); }
void retarget(_T* _d, size_t _s) { m_data = _d; m_count = _s; }
void retarget(std::vector<_T> const& _t) { m_data = _t.data(); m_count = _t.size(); }
- void copyTo(vector_ref::type> _t) const { memcpy(_t.data(), m_data, std::min(_t.size(), m_count) * sizeof(_T)); }
+ template bool overlapsWith(vector_ref _t) const { void const* f1 = data(); void const* t1 = data() + size(); void const* f2 = _t.data(); void const* t2 = _t.data() + _t.size(); return f1 < t2 && t1 > f2; }
+ void copyTo(vector_ref::type> _t) const { if (overlapsWith(_t)) memmove(_t.data(), m_data, std::min(_t.size(), m_count) * sizeof(_T)); else memcpy(_t.data(), m_data, std::min(_t.size(), m_count) * sizeof(_T)); }
void populate(vector_ref::type> _t) const { copyTo(_t); memset(_t.data() + m_count, 0, std::max(_t.size(), m_count) - m_count); }
_T* begin() { return m_data; }
diff --git a/libdevcrypto/Common.cpp b/libdevcrypto/Common.cpp
index e68381427..4ebd6a04b 100644
--- a/libdevcrypto/Common.cpp
+++ b/libdevcrypto/Common.cpp
@@ -54,7 +54,7 @@ Public dev::toPublic(Secret const& _secret)
{
Public p;
s_secp256k1.toPublic(_secret, p);
- return std::move(p);
+ return p;
}
Address dev::toAddress(Public const& _public)
@@ -230,7 +230,7 @@ h256 crypto::kdf(Secret const& _priv, h256 const& _hash)
if (!s || !_hash || !_priv)
BOOST_THROW_EXCEPTION(InvalidState());
- return std::move(s);
+ return s;
}
h256 Nonce::get(bool _commit)
diff --git a/libdevcrypto/CryptoPP.cpp b/libdevcrypto/CryptoPP.cpp
index b701fed8d..40eae10f1 100644
--- a/libdevcrypto/CryptoPP.cpp
+++ b/libdevcrypto/CryptoPP.cpp
@@ -61,7 +61,7 @@ bytes Secp256k1::eciesKDF(Secret _z, bytes _s1, unsigned kdByteLen)
}
k.resize(kdByteLen);
- return move(k);
+ return k;
}
void Secp256k1::encryptECIES(Public const& _k, bytes& io_cipher)
@@ -264,7 +264,6 @@ Public Secp256k1::recover(Signature _signature, bytesConstRef _message)
ECP::Element x;
{
- Guard l(x_curve);
m_curve.DecodePoint(x, encodedpoint, 33);
if (!m_curve.VerifyPoint(x))
return recovered;
@@ -286,7 +285,6 @@ Public Secp256k1::recover(Signature _signature, bytesConstRef _message)
ECP::Point p;
byte recoveredbytes[65];
{
- Guard l(x_curve);
// todo: make generator member
p = m_curve.CascadeMultiply(u2, x, u1, m_params.GetSubgroupGenerator());
m_curve.EncodePoint(recoveredbytes, p, false);
diff --git a/libdevcrypto/CryptoPP.h b/libdevcrypto/CryptoPP.h
index ca8a2e6b5..377da8754 100644
--- a/libdevcrypto/CryptoPP.h
+++ b/libdevcrypto/CryptoPP.h
@@ -59,7 +59,7 @@ namespace crypto
using namespace CryptoPP;
-inline ECP::Point publicToPoint(Public const& _p) { Integer x(_p.data(), 32); Integer y(_p.data() + 32, 32); return std::move(ECP::Point(x,y)); }
+inline ECP::Point publicToPoint(Public const& _p) { Integer x(_p.data(), 32); Integer y(_p.data() + 32, 32); return ECP::Point(x,y); }
inline Integer secretToExponent(Secret const& _s) { return std::move(Integer(_s.data(), Secret::size)); }
diff --git a/libdevcrypto/OverlayDB.cpp b/libdevcrypto/OverlayDB.cpp
index 80c901635..a6aa684f2 100644
--- a/libdevcrypto/OverlayDB.cpp
+++ b/libdevcrypto/OverlayDB.cpp
@@ -50,7 +50,9 @@ void OverlayDB::commit()
{
ldb::WriteBatch batch;
// cnote << "Committing nodes to disk DB:";
+#if DEV_GUARDED_DB
DEV_READ_GUARDED(x_this)
+#endif
{
for (auto const& i: m_main)
{
@@ -83,7 +85,9 @@ void OverlayDB::commit()
cwarn << "Sleeping for" << (i + 1) << "seconds, then retrying.";
this_thread::sleep_for(chrono::seconds(i + 1));
}
+#if DEV_GUARDED_DB
DEV_WRITE_GUARDED(x_this)
+#endif
{
m_aux.clear();
m_main.clear();
@@ -95,7 +99,7 @@ bytes OverlayDB::lookupAux(h256 const& _h) const
{
bytes ret = MemoryDB::lookupAux(_h);
if (!ret.empty() || !m_db)
- return move(ret);
+ return ret;
std::string v;
bytes b = _h.asBytes();
b.push_back(255); // for aux
@@ -107,7 +111,9 @@ bytes OverlayDB::lookupAux(h256 const& _h) const
void OverlayDB::rollback()
{
+#if DEV_GUARDED_DB
WriteGuard l(x_this);
+#endif
m_main.clear();
}
@@ -116,7 +122,7 @@ std::string OverlayDB::lookup(h256 const& _h) const
std::string ret = MemoryDB::lookup(_h);
if (ret.empty() && m_db)
m_db->Get(m_readOptions, ldb::Slice((char const*)_h.data(), 32), &ret);
- return move(ret);
+ return ret;
}
bool OverlayDB::exists(h256 const& _h) const
diff --git a/libethash-cl/ethash_cl_miner.cpp b/libethash-cl/ethash_cl_miner.cpp
index f501d9642..1a5eaddc3 100644
--- a/libethash-cl/ethash_cl_miner.cpp
+++ b/libethash-cl/ethash_cl_miner.cpp
@@ -52,11 +52,11 @@ using namespace std;
// TODO: If at any point we can use libdevcore in here then we should switch to using a LogChannel
#define ETHCL_LOG(_contents) cout << "[OPENCL]:" << _contents << endl
-static void add_definition(std::string& source, char const* id, unsigned value)
+static void addDefinition(string& _source, char const* _id, unsigned _value)
{
char buf[256];
- sprintf(buf, "#define %s %uu\n", id, value);
- source.insert(source.begin(), buf, buf + strlen(buf));
+ sprintf(buf, "#define %s %uu\n", _id, _value);
+ _source.insert(_source.begin(), buf, buf + strlen(buf));
}
ethash_cl_miner::search_hook::~search_hook() {}
@@ -71,44 +71,44 @@ ethash_cl_miner::~ethash_cl_miner()
finish();
}
-std::string ethash_cl_miner::platform_info(unsigned _platformId, unsigned _deviceId)
+string ethash_cl_miner::platform_info(unsigned _platformId, unsigned _deviceId)
{
- std::vector platforms;
+ vector platforms;
cl::Platform::get(&platforms);
if (platforms.empty())
{
ETHCL_LOG("No OpenCL platforms found.");
- return std::string();
+ return string();
}
// get GPU device of the selected platform
- std::vector devices;
- unsigned platform_num = std::min(_platformId, platforms.size() - 1);
+ vector devices;
+ unsigned platform_num = min(_platformId, platforms.size() - 1);
platforms[platform_num].getDevices(CL_DEVICE_TYPE_ALL, &devices);
if (devices.empty())
{
ETHCL_LOG("No OpenCL devices found.");
- return std::string();
+ return string();
}
// use selected default device
- unsigned device_num = std::min(_deviceId, devices.size() - 1);
+ unsigned device_num = min(_deviceId, devices.size() - 1);
cl::Device& device = devices[device_num];
- std::string device_version = device.getInfo();
+ string device_version = device.getInfo();
return "{ \"platform\": \"" + platforms[platform_num].getInfo() + "\", \"device\": \"" + device.getInfo() + "\", \"version\": \"" + device_version + "\" }";
}
-unsigned ethash_cl_miner::get_num_platforms()
+unsigned ethash_cl_miner::getNumPlatforms()
{
- std::vector platforms;
+ vector platforms;
cl::Platform::get(&platforms);
return platforms.size();
}
-unsigned ethash_cl_miner::get_num_devices(unsigned _platformId)
+unsigned ethash_cl_miner::getNumDevices(unsigned _platformId)
{
- std::vector platforms;
+ vector platforms;
cl::Platform::get(&platforms);
if (platforms.empty())
{
@@ -116,8 +116,8 @@ unsigned ethash_cl_miner::get_num_devices(unsigned _platformId)
return 0;
}
- std::vector devices;
- unsigned platform_num = std::min(_platformId, platforms.size() - 1);
+ vector devices;
+ unsigned platform_num = min(_platformId, platforms.size() - 1);
platforms[platform_num].getDevices(CL_DEVICE_TYPE_ALL, &devices);
if (devices.empty())
{
@@ -127,45 +127,100 @@ unsigned ethash_cl_miner::get_num_devices(unsigned _platformId)
return devices.size();
}
-bool ethash_cl_miner::haveSufficientGPUMemory(unsigned _platformId)
+bool ethash_cl_miner::configureGPU()
{
- std::vector platforms;
+ return searchForAllDevices([](cl::Device const _device) -> bool
+ {
+ cl_ulong result;
+ _device.getInfo(CL_DEVICE_GLOBAL_MEM_SIZE, &result);
+ if (result >= ETHASH_CL_MINIMUM_MEMORY)
+ {
+ ETHCL_LOG(
+ "Found suitable OpenCL device [" << _device.getInfo()
+ << "] with " << result << " bytes of GPU memory"
+ );
+ return true;
+ }
+
+ ETHCL_LOG(
+ "OpenCL device " << _device.getInfo()
+ << " has insufficient GPU memory." << result <<
+ " bytes of memory found < " << ETHASH_CL_MINIMUM_MEMORY << " bytes of memory required"
+ );
+ return false;
+ }
+ );
+}
+
+bool ethash_cl_miner::searchForAllDevices(function _callback)
+{
+ vector platforms;
cl::Platform::get(&platforms);
if (platforms.empty())
{
ETHCL_LOG("No OpenCL platforms found.");
return false;
}
+ for (unsigned i = 0; i < platforms.size(); ++i)
+ if (searchForAllDevices(i, _callback))
+ return true;
- std::vector devices;
- unsigned platform_num = std::min(_platformId, platforms.size() - 1);
- platforms[platform_num].getDevices(CL_DEVICE_TYPE_ALL, &devices);
- if (devices.empty())
- {
- ETHCL_LOG("No OpenCL devices found.");
+ return false;
+}
+
+bool ethash_cl_miner::searchForAllDevices(unsigned _platformId, function _callback)
+{
+ vector platforms;
+ cl::Platform::get(&platforms);
+ if (_platformId >= platforms.size())
return false;
- }
+ vector devices;
+ platforms[_platformId].getDevices(CL_DEVICE_TYPE_ALL, &devices);
for (cl::Device const& device: devices)
+ if (_callback(device))
+ return true;
+
+ return false;
+}
+
+void ethash_cl_miner::doForAllDevices(function _callback)
+{
+ vector platforms;
+ cl::Platform::get(&platforms);
+ if (platforms.empty())
{
- cl_ulong result;
- device.getInfo(CL_DEVICE_GLOBAL_MEM_SIZE, &result);
- if (result >= ETHASH_CL_MINIMUM_MEMORY)
+ ETHCL_LOG("No OpenCL platforms found.");
+ return;
+ }
+ for (unsigned i = 0; i < platforms.size(); ++i)
+ doForAllDevices(i, _callback);
+}
+
+void ethash_cl_miner::doForAllDevices(unsigned _platformId, function _callback)
+{
+ vector platforms;
+ cl::Platform::get(&platforms);
+ if (_platformId >= platforms.size())
+ return;
+
+ vector devices;
+ platforms[_platformId].getDevices(CL_DEVICE_TYPE_ALL, &devices);
+ for (cl::Device const& device: devices)
+ _callback(device);
+}
+
+void ethash_cl_miner::listDevices()
+{
+ string outString ="\nListing OpenCL devices.\nFORMAT: [deviceID] deviceName\n";
+ unsigned int i = 0;
+ doForAllDevices([&outString, &i](cl::Device const _device)
{
- ETHCL_LOG(
- "Found suitable OpenCL device [" << device.getInfo()
- << "] with " << result << " bytes of GPU memory"
- );
- return true;
+ outString += "[" + to_string(i) + "] " + _device.getInfo() + "\n";
+ ++i;
}
- else
- ETHCL_LOG(
- "OpenCL device " << device.getInfo()
- << " has insufficient GPU memory." << result <<
- " bytes of memory found < " << ETHASH_CL_MINIMUM_MEMORY << " bytes of memory required"
- );
- }
- return false;
+ );
+ ETHCL_LOG(outString);
}
void ethash_cl_miner::finish()
@@ -179,19 +234,13 @@ bool ethash_cl_miner::init(
uint64_t _dagSize,
unsigned workgroup_size,
unsigned _platformId,
- unsigned _deviceId,
- unsigned _dagChunksNum
+ unsigned _deviceId
)
{
- // for now due to the .cl kernels we can only have either 1 big chunk or 4 chunks
- assert(_dagChunksNum == 1 || _dagChunksNum == 4);
- // now create the number of chunk buffers
- m_dagChunksNum = _dagChunksNum;
-
// get all platforms
try
{
- std::vector platforms;
+ vector platforms;
cl::Platform::get(&platforms);
if (platforms.empty())
{
@@ -200,11 +249,11 @@ bool ethash_cl_miner::init(
}
// use selected platform
- _platformId = std::min(_platformId, platforms.size() - 1);
+ _platformId = min(_platformId, platforms.size() - 1);
ETHCL_LOG("Using platform: " << platforms[_platformId].getInfo().c_str());
// get GPU device of the default platform
- std::vector devices;
+ vector devices;
platforms[_platformId].getDevices(CL_DEVICE_TYPE_ALL, &devices);
if (devices.empty())
{
@@ -213,10 +262,24 @@ bool ethash_cl_miner::init(
}
// use selected device
- cl::Device& device = devices[std::min(_deviceId, devices.size() - 1)];
- std::string device_version = device.getInfo();
+ cl::Device& device = devices[min(_deviceId, devices.size() - 1)];
+ string device_version = device.getInfo();
ETHCL_LOG("Using device: " << device.getInfo().c_str() << "(" << device_version.c_str() << ")");
+ // configure chunk number depending on max allocateable memory
+ cl_ulong result;
+ device.getInfo(CL_DEVICE_MAX_MEM_ALLOC_SIZE, &result);
+ if (result >= ETHASH_CL_MINIMUM_MEMORY)
+ {
+ m_dagChunksNum = 1;
+ ETHCL_LOG("Using 1 big chunk. Max OpenCL allocateable memory is" << result);
+ }
+ else
+ {
+ m_dagChunksNum = 4;
+ ETHCL_LOG("Using 4 chunks. Max OpenCL allocateable memory is" << result);
+ }
+
if (strncmp("OpenCL 1.0", device_version.c_str(), 10) == 0)
{
ETHCL_LOG("OpenCL 1.0 is not supported.");
@@ -226,7 +289,7 @@ bool ethash_cl_miner::init(
m_opencl_1_1 = true;
// create context
- m_context = cl::Context(std::vector(&device, &device + 1));
+ m_context = cl::Context(vector(&device, &device + 1));
m_queue = cl::CommandQueue(m_context, device);
// use requested workgroup size, but we require multiple of 8
@@ -235,11 +298,11 @@ bool ethash_cl_miner::init(
// patch source code
// note: ETHASH_CL_MINER_KERNEL is simply ethash_cl_miner_kernel.cl compiled
// into a byte array by bin2h.cmake. There is no need to load the file by hand in runtime
- std::string code(ETHASH_CL_MINER_KERNEL, ETHASH_CL_MINER_KERNEL + ETHASH_CL_MINER_KERNEL_SIZE);
- add_definition(code, "GROUP_SIZE", m_workgroup_size);
- add_definition(code, "DAG_SIZE", (unsigned)(_dagSize / ETHASH_MIX_BYTES));
- add_definition(code, "ACCESSES", ETHASH_ACCESSES);
- add_definition(code, "MAX_OUTPUTS", c_max_search_results);
+ string code(ETHASH_CL_MINER_KERNEL, ETHASH_CL_MINER_KERNEL + ETHASH_CL_MINER_KERNEL_SIZE);
+ addDefinition(code, "GROUP_SIZE", m_workgroup_size);
+ addDefinition(code, "DAG_SIZE", (unsigned)(_dagSize / ETHASH_MIX_BYTES));
+ addDefinition(code, "ACCESSES", ETHASH_ACCESSES);
+ addDefinition(code, "MAX_OUTPUTS", c_max_search_results);
//debugf("%s", code.c_str());
// create miner OpenCL program
@@ -258,7 +321,7 @@ bool ethash_cl_miner::init(
ETHCL_LOG(program.getBuildInfo(device).c_str());
return false;
}
- if (_dagChunksNum == 1)
+ if (m_dagChunksNum == 1)
{
ETHCL_LOG("Loading single big chunk kernels");
m_hash_kernel = cl::Kernel(program, "ethash_hash");
@@ -272,13 +335,13 @@ bool ethash_cl_miner::init(
}
// create buffer for dag
- if (_dagChunksNum == 1)
+ if (m_dagChunksNum == 1)
{
ETHCL_LOG("Creating one big buffer");
m_dagChunks.push_back(cl::Buffer(m_context, CL_MEM_READ_ONLY, _dagSize));
}
else
- for (unsigned i = 0; i < _dagChunksNum; i++)
+ for (unsigned i = 0; i < m_dagChunksNum; i++)
{
// TODO Note: If we ever change to _dagChunksNum other than 4, then the size would need recalculation
ETHCL_LOG("Creating buffer for chunk " << i);
@@ -293,7 +356,7 @@ bool ethash_cl_miner::init(
ETHCL_LOG("Creating buffer for header.");
m_header = cl::Buffer(m_context, CL_MEM_READ_ONLY, 32);
- if (_dagChunksNum == 1)
+ if (m_dagChunksNum == 1)
{
ETHCL_LOG("Mapping one big chunk.");
m_queue.enqueueWriteBuffer(m_dagChunks[0], CL_TRUE, 0, _dagSize, _dag);
@@ -302,12 +365,12 @@ bool ethash_cl_miner::init(
{
// TODO Note: If we ever change to _dagChunksNum other than 4, then the size would need recalculation
void* dag_ptr[4];
- for (unsigned i = 0; i < _dagChunksNum; i++)
+ for (unsigned i = 0; i < m_dagChunksNum; i++)
{
ETHCL_LOG("Mapping chunk " << i);
dag_ptr[i] = m_queue.enqueueMapBuffer(m_dagChunks[i], true, m_opencl_1_1 ? CL_MAP_WRITE : CL_MAP_WRITE_INVALIDATE_REGION, 0, (i == 3) ? (_dagSize - 3 * ((_dagSize >> 9) << 7)) : (_dagSize >> 9) << 7);
}
- for (unsigned i = 0; i < _dagChunksNum; i++)
+ for (unsigned i = 0; i < m_dagChunksNum; i++)
{
memcpy(dag_ptr[i], (char *)_dag + i*((_dagSize >> 9) << 7), (i == 3) ? (_dagSize - 3 * ((_dagSize >> 9) << 7)) : (_dagSize >> 9) << 7);
m_queue.enqueueUnmapMemObject(m_dagChunks[i], dag_ptr[i]);
@@ -339,7 +402,7 @@ void ethash_cl_miner::search(uint8_t const* header, uint64_t target, search_hook
uint64_t start_nonce;
unsigned buf;
};
- std::queue pending;
+ queue pending;
static uint32_t const c_zero = 0;
@@ -365,8 +428,8 @@ void ethash_cl_miner::search(uint8_t const* header, uint64_t target, search_hook
m_search_kernel.setArg(argPos + 2, ~0u);
unsigned buf = 0;
- std::random_device engine;
- uint64_t start_nonce = std::uniform_int_distribution()(engine);
+ random_device engine;
+ uint64_t start_nonce = uniform_int_distribution()(engine);
for (;; start_nonce += c_search_batch_size)
{
// supply output buffer to kernel
@@ -389,7 +452,7 @@ void ethash_cl_miner::search(uint8_t const* header, uint64_t target, search_hook
// could use pinned host pointer instead
uint32_t* results = (uint32_t*)m_queue.enqueueMapBuffer(m_search_buf[batch.buf], true, CL_MAP_READ, 0, (1 + c_max_search_results) * sizeof(uint32_t));
- unsigned num_found = std::min(results[0], c_max_search_results);
+ unsigned num_found = min(results[0], c_max_search_results);
uint64_t nonces[c_max_search_results];
for (unsigned i = 0; i != num_found; ++i)
diff --git a/libethash-cl/ethash_cl_miner.h b/libethash-cl/ethash_cl_miner.h
index cdc4cf07f..4c986053a 100644
--- a/libethash-cl/ethash_cl_miner.h
+++ b/libethash-cl/ethash_cl_miner.h
@@ -32,18 +32,22 @@ public:
ethash_cl_miner();
~ethash_cl_miner();
- static unsigned get_num_platforms();
- static unsigned get_num_devices(unsigned _platformId = 0);
+ static bool searchForAllDevices(unsigned _platformId, std::function _callback);
+ static bool searchForAllDevices(std::function _callback);
+ static void doForAllDevices(unsigned _platformId, std::function _callback);
+ static void doForAllDevices(std::function _callback);
+ static unsigned getNumPlatforms();
+ static unsigned getNumDevices(unsigned _platformId = 0);
static std::string platform_info(unsigned _platformId = 0, unsigned _deviceId = 0);
- static bool haveSufficientGPUMemory(unsigned _platformId = 0);
+ static void listDevices();
+ static bool configureGPU();
bool init(
uint8_t const* _dag,
uint64_t _dagSize,
unsigned workgroup_size = 64,
unsigned _platformId = 0,
- unsigned _deviceId = 0,
- unsigned _dagChunksNum = 1
+ unsigned _deviceId = 0
);
void finish();
void search(uint8_t const* header, uint64_t target, search_hook& hook);
@@ -58,11 +62,12 @@ private:
cl::CommandQueue m_queue;
cl::Kernel m_hash_kernel;
cl::Kernel m_search_kernel;
- unsigned m_dagChunksNum;
+ unsigned int m_dagChunksNum;
std::vector m_dagChunks;
cl::Buffer m_header;
cl::Buffer m_hash_buf[c_num_buffers];
cl::Buffer m_search_buf[c_num_buffers];
unsigned m_workgroup_size;
bool m_opencl_1_1;
+
};
diff --git a/libethcore/BlockInfo.cpp b/libethcore/BlockInfo.cpp
index 0e125b607..69da52b09 100644
--- a/libethcore/BlockInfo.cpp
+++ b/libethcore/BlockInfo.cpp
@@ -139,7 +139,6 @@ void BlockInfo::populateFromHeader(RLP const& _header, Strictness _s, h256 const
mixHash = _header[field = 13].toHash(RLP::VeryStrict);
nonce = _header[field = 14].toHash(RLP::VeryStrict);
}
-
catch (Exception const& _e)
{
_e << errinfo_name("invalid block header format") << BadFieldError(field, toHex(_header[field].data().toBytes()));
@@ -151,9 +150,26 @@ void BlockInfo::populateFromHeader(RLP const& _header, Strictness _s, h256 const
// check it hashes according to proof of work or that it's the genesis block.
if (_s == CheckEverything && parentHash && !ProofOfWork::verify(*this))
- BOOST_THROW_EXCEPTION(InvalidBlockNonce() << errinfo_hash256(headerHash(WithoutNonce)) << errinfo_nonce(nonce) << errinfo_difficulty(difficulty));
+ {
+ InvalidBlockNonce ex;
+ ex << errinfo_hash256(headerHash(WithoutNonce));
+ ex << errinfo_nonce(nonce);
+ ex << errinfo_difficulty(difficulty);
+ ex << errinfo_seedHash(seedHash());
+ ex << errinfo_target(boundary());
+ ex << errinfo_mixHash(mixHash);
+ Ethash::Result er = EthashAux::eval(seedHash(), headerHash(WithoutNonce), nonce);
+ ex << errinfo_ethashResult(make_tuple(er.value, er.mixHash));
+ BOOST_THROW_EXCEPTION(ex);
+ }
else if (_s == QuickNonce && parentHash && !ProofOfWork::preVerify(*this))
- BOOST_THROW_EXCEPTION(InvalidBlockNonce() << errinfo_hash256(headerHash(WithoutNonce)) << errinfo_nonce(nonce) << errinfo_difficulty(difficulty));
+ {
+ InvalidBlockNonce ex;
+ ex << errinfo_hash256(headerHash(WithoutNonce));
+ ex << errinfo_nonce(nonce);
+ ex << errinfo_difficulty(difficulty);
+ BOOST_THROW_EXCEPTION(ex);
+ }
if (_s != CheckNothing)
{
@@ -224,7 +240,7 @@ void BlockInfo::verifyInternals(bytesConstRef _block) const
for (auto const& t: txs)
cdebug << toHex(t);
- BOOST_THROW_EXCEPTION(InvalidTransactionsHash() << HashMismatchError(expectedRoot, transactionsRoot));
+ BOOST_THROW_EXCEPTION(InvalidTransactionsRoot() << Hash256RequirementError(expectedRoot, transactionsRoot));
}
clog(BlockInfoDiagnosticsChannel) << "Expected uncle hash:" << toString(sha3(root[2].data()));
if (sha3Uncles != sha3(root[2].data()))
diff --git a/libethcore/Common.cpp b/libethcore/Common.cpp
index 63f4a19f9..618703e22 100644
--- a/libethcore/Common.cpp
+++ b/libethcore/Common.cpp
@@ -112,25 +112,26 @@ std::string formatBalance(bigint const& _b)
static void badBlockInfo(BlockInfo const& _bi, string const& _err)
{
- cwarn << EthRedBold << "========================================================================";
- cwarn << EthRedBold << "== Software Failure " + _err + string(max(0, 44 - _err.size()), ' ') + " ==";
+ string const c_line = EthReset EthOnMaroon + string(80, ' ');
+ string const c_border = EthReset EthOnMaroon + string(2, ' ') + EthReset EthMaroonBold;
+ string const c_space = c_border + string(76, ' ') + c_border;
+ stringstream ss;
+ ss << c_line << endl;
+ ss << c_space << endl;
+ ss << c_border + " Import Failure " + _err + string(max(0, 53 - _err.size()), ' ') + " " + c_border << endl;
+ ss << c_space << endl;
string bin = toString(_bi.number);
- cwarn << EthRedBold << ("== Guru Meditation #" + string(max(0, 8 - bin.size()), '0') + bin + "." + _bi.hash().abridged() + " ==");
- cwarn << EthRedBold << "========================================================================";
+ ss << c_border + (" Guru Meditation #" + string(max(0, 8 - bin.size()), '0') + bin + "." + _bi.hash().abridged() + " ") + c_border << endl;
+ ss << c_space << endl;
+ ss << c_line;
+ cwarn << "\n" + ss.str();
}
void badBlock(bytesConstRef _block, string const& _err)
{
- badBlockInfo(BlockInfo(_block, CheckNothing), _err);
- cwarn << " Block:" << toHex(_block);
- cwarn << " Block RLP:" << RLP(_block);
-}
-
-void badBlockHeader(bytesConstRef _header, string const& _err)
-{
- badBlockInfo(BlockInfo::fromHeader(_header, CheckNothing), _err);
- cwarn << " Header:" << toHex(_header);
- cwarn << " Header RLP:" << RLP(_header);;
+ BlockInfo bi;
+ DEV_IGNORE_EXCEPTIONS(bi = BlockInfo(_block, CheckNothing));
+ badBlockInfo(bi, _err);
}
}
diff --git a/libethcore/Common.h b/libethcore/Common.h
index 87ebffab7..64eb6de29 100644
--- a/libethcore/Common.h
+++ b/libethcore/Common.h
@@ -85,6 +85,10 @@ using BlockNumber = unsigned;
static const BlockNumber LatestBlock = (BlockNumber)-2;
static const BlockNumber PendingBlock = (BlockNumber)-1;
+static const h256 LatestBlockHash = h256(2);
+static const h256 EarliestBlockHash = h256(1);
+static const h256 PendingBlockHash = h256(0);
+
enum class RelativeBlock: BlockNumber
{
@@ -156,8 +160,6 @@ struct TransactionSkeleton
u256 gasPrice = UndefinedU256;
};
-void badBlockHeader(bytesConstRef _header, std::string const& _err);
-inline void badBlockHeader(bytes const& _header, std::string const& _err) { badBlockHeader(&_header, _err); }
void badBlock(bytesConstRef _header, std::string const& _err);
inline void badBlock(bytes const& _header, std::string const& _err) { badBlock(&_header, _err); }
diff --git a/libethcore/Ethash.cpp b/libethcore/Ethash.cpp
index 0ea09a5bc..9ac4474ba 100644
--- a/libethcore/Ethash.cpp
+++ b/libethcore/Ethash.cpp
@@ -285,7 +285,6 @@ private:
unsigned Ethash::GPUMiner::s_platformId = 0;
unsigned Ethash::GPUMiner::s_deviceId = 0;
unsigned Ethash::GPUMiner::s_numInstances = 0;
-unsigned Ethash::GPUMiner::s_dagChunks = 1;
Ethash::GPUMiner::GPUMiner(ConstructionInfo const& _ci):
Miner(_ci),
@@ -335,18 +334,19 @@ void Ethash::GPUMiner::workLoop()
EthashAux::FullType dag;
while (true)
{
- if ((dag = EthashAux::full(w.seedHash, false)))
+ if ((dag = EthashAux::full(w.seedHash, true)))
break;
if (shouldStop())
{
delete m_miner;
+ m_miner = nullptr;
return;
}
cnote << "Awaiting DAG";
this_thread::sleep_for(chrono::milliseconds(500));
}
bytesConstRef dagData = dag->data();
- m_miner->init(dagData.data(), dagData.size(), 32, s_platformId, device, s_dagChunks);
+ m_miner->init(dagData.data(), dagData.size(), 32, s_platformId, device);
}
uint64_t upper64OfBoundary = (uint64_t)(u64)((u256)w.boundary >> 192);
@@ -354,6 +354,8 @@ void Ethash::GPUMiner::workLoop()
}
catch (cl::Error const& _e)
{
+ delete m_miner;
+ m_miner = nullptr;
cwarn << "Error GPU mining: " << _e.what() << "(" << _e.err() << ")";
}
}
@@ -364,11 +366,6 @@ void Ethash::GPUMiner::pause()
stopWorking();
}
-bool Ethash::GPUMiner::haveSufficientGPUMemory()
-{
- return ethash_cl_miner::haveSufficientGPUMemory(s_platformId);
-}
-
std::string Ethash::GPUMiner::platformInfo()
{
return ethash_cl_miner::platform_info(s_platformId, s_deviceId);
@@ -376,7 +373,17 @@ std::string Ethash::GPUMiner::platformInfo()
unsigned Ethash::GPUMiner::getNumDevices()
{
- return ethash_cl_miner::get_num_devices(s_platformId);
+ return ethash_cl_miner::getNumDevices(s_platformId);
+}
+
+void Ethash::GPUMiner::listDevices()
+{
+ return ethash_cl_miner::listDevices();
+}
+
+bool Ethash::GPUMiner::configureGPU()
+{
+ return ethash_cl_miner::configureGPU();
}
#endif
diff --git a/libethcore/Ethash.h b/libethcore/Ethash.h
index 68c21c609..49a8ae006 100644
--- a/libethcore/Ethash.h
+++ b/libethcore/Ethash.h
@@ -87,10 +87,11 @@ public:
static unsigned instances() { return s_numInstances > 0 ? s_numInstances : std::thread::hardware_concurrency(); }
static std::string platformInfo();
- static bool haveSufficientGPUMemory() { return false; }
static void setDefaultPlatform(unsigned) {}
static void setDagChunks(unsigned) {}
static void setDefaultDevice(unsigned) {}
+ static void listDevices() {}
+ static bool configureGPU() { return false; }
static void setNumInstances(unsigned _instances) { s_numInstances = std::min(_instances, std::thread::hardware_concurrency()); }
protected:
void kickOff() override
@@ -117,12 +118,12 @@ public:
static unsigned instances() { return s_numInstances > 0 ? s_numInstances : 1; }
static std::string platformInfo();
- static bool haveSufficientGPUMemory();
static unsigned getNumDevices();
+ static void listDevices();
+ static bool configureGPU();
static void setDefaultPlatform(unsigned _id) { s_platformId = _id; }
static void setDefaultDevice(unsigned _id) { s_deviceId = _id; }
static void setNumInstances(unsigned _instances) { s_numInstances = std::min(_instances, getNumDevices()); }
- static void setDagChunks(unsigned _dagChunks) { s_dagChunks = _dagChunks; }
protected:
void kickOff() override;
@@ -141,7 +142,6 @@ public:
static unsigned s_platformId;
static unsigned s_deviceId;
static unsigned s_numInstances;
- static unsigned s_dagChunks;
};
#else
using GPUMiner = CPUMiner;
diff --git a/libethcore/Exceptions.h b/libethcore/Exceptions.h
index 9362e4fed..b411ea416 100644
--- a/libethcore/Exceptions.h
+++ b/libethcore/Exceptions.h
@@ -35,46 +35,45 @@ using errinfo_field = boost::error_info;
using errinfo_data = boost::error_info;
using errinfo_nonce = boost::error_info;
using errinfo_difficulty = boost::error_info;
+using errinfo_target = boost::error_info;
+using errinfo_seedHash = boost::error_info;
+using errinfo_mixHash = boost::error_info;
+using errinfo_ethashResult = boost::error_info>;
using BadFieldError = boost::tuple;
-struct DatabaseAlreadyOpen: virtual dev::Exception {};
-struct OutOfGasBase: virtual dev::Exception {};
-struct NotEnoughAvailableSpace: virtual dev::Exception {};
-struct NotEnoughCash: virtual dev::Exception {};
-struct GasPriceTooLow: virtual dev::Exception {};
-struct BlockGasLimitReached: virtual dev::Exception {};
-struct NoSuchContract: virtual dev::Exception {};
-struct ContractAddressCollision: virtual dev::Exception {};
-struct FeeTooSmall: virtual dev::Exception {};
-struct TooMuchGasUsed: virtual dev::Exception {};
-struct ExtraDataTooBig: virtual dev::Exception {};
-struct InvalidSignature: virtual dev::Exception {};
-struct InvalidBlockFormat: virtual dev::Exception {};
-struct InvalidUnclesHash: virtual dev::Exception {};
-struct InvalidUncle: virtual dev::Exception {};
-struct TooManyUncles: virtual dev::Exception {};
-struct UncleTooOld: virtual dev::Exception {};
-struct UncleIsBrother: virtual dev::Exception {};
-struct UncleInChain: virtual dev::Exception {};
-struct DuplicateUncleNonce: virtual dev::Exception {};
-struct InvalidStateRoot: virtual dev::Exception {};
-struct InvalidGasUsed: virtual dev::Exception {};
-struct InvalidTransactionsHash: virtual dev::Exception {};
-struct InvalidTransaction: virtual dev::Exception {};
-struct InvalidDifficulty: virtual dev::Exception {};
-struct InvalidGasLimit: virtual dev::Exception {};
-struct InvalidTransactionGasUsed: virtual dev::Exception {};
-struct InvalidTransactionsStateRoot: virtual dev::Exception {};
-struct InvalidReceiptsStateRoot: virtual dev::Exception {};
-struct InvalidTimestamp: virtual dev::Exception {};
-struct InvalidLogBloom: virtual dev::Exception {};
-struct InvalidNonce: virtual dev::Exception {};
-struct InvalidBlockHeaderItemCount: virtual dev::Exception {};
-struct InvalidBlockNonce: virtual dev::Exception {};
-struct InvalidParentHash: virtual dev::Exception {};
-struct InvalidNumber: virtual dev::Exception {};
-struct InvalidContractAddress: virtual public dev::Exception {};
-struct DAGCreationFailure: virtual public dev::Exception {};
-struct DAGComputeFailure: virtual public dev::Exception {};
+DEV_SIMPLE_EXCEPTION(OutOfGasBase);
+DEV_SIMPLE_EXCEPTION(OutOfGasIntrinsic);
+DEV_SIMPLE_EXCEPTION(NotEnoughAvailableSpace);
+DEV_SIMPLE_EXCEPTION(NotEnoughCash);
+DEV_SIMPLE_EXCEPTION(GasPriceTooLow);
+DEV_SIMPLE_EXCEPTION(BlockGasLimitReached);
+DEV_SIMPLE_EXCEPTION(FeeTooSmall);
+DEV_SIMPLE_EXCEPTION(TooMuchGasUsed);
+DEV_SIMPLE_EXCEPTION(ExtraDataTooBig);
+DEV_SIMPLE_EXCEPTION(InvalidSignature);
+DEV_SIMPLE_EXCEPTION(InvalidBlockFormat);
+DEV_SIMPLE_EXCEPTION(InvalidUnclesHash);
+DEV_SIMPLE_EXCEPTION(TooManyUncles);
+DEV_SIMPLE_EXCEPTION(UncleTooOld);
+DEV_SIMPLE_EXCEPTION(UncleIsBrother);
+DEV_SIMPLE_EXCEPTION(UncleInChain);
+DEV_SIMPLE_EXCEPTION(InvalidStateRoot);
+DEV_SIMPLE_EXCEPTION(InvalidGasUsed);
+DEV_SIMPLE_EXCEPTION(InvalidTransactionsRoot);
+DEV_SIMPLE_EXCEPTION(InvalidDifficulty);
+DEV_SIMPLE_EXCEPTION(InvalidGasLimit);
+DEV_SIMPLE_EXCEPTION(InvalidReceiptsStateRoot);
+DEV_SIMPLE_EXCEPTION(InvalidTimestamp);
+DEV_SIMPLE_EXCEPTION(InvalidLogBloom);
+DEV_SIMPLE_EXCEPTION(InvalidNonce);
+DEV_SIMPLE_EXCEPTION(InvalidBlockHeaderItemCount);
+DEV_SIMPLE_EXCEPTION(InvalidBlockNonce);
+DEV_SIMPLE_EXCEPTION(InvalidParentHash);
+DEV_SIMPLE_EXCEPTION(InvalidNumber);
+
+DEV_SIMPLE_EXCEPTION(DatabaseAlreadyOpen);
+DEV_SIMPLE_EXCEPTION(DAGCreationFailure);
+DEV_SIMPLE_EXCEPTION(DAGComputeFailure);
+
}
}
diff --git a/libethereum/BlockChain.cpp b/libethereum/BlockChain.cpp
index 67e42d7c8..480c7f977 100644
--- a/libethereum/BlockChain.cpp
+++ b/libethereum/BlockChain.cpp
@@ -35,6 +35,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -42,13 +43,14 @@
#include "GenesisInfo.h"
#include "State.h"
#include "Defaults.h"
+
using namespace std;
using namespace dev;
using namespace dev::eth;
namespace js = json_spirit;
#define ETH_CATCH 1
-#define ETH_TIMED_IMPORTS 0
+#define ETH_TIMED_IMPORTS 1
#ifdef _WIN32
const char* BlockChainDebug::name() { return EthBlue "8" EthWhite " <>"; }
@@ -307,38 +309,50 @@ tuple BlockChain::sync(BlockQueue& _bq, OverlayDB const& _st
{
// _bq.tick(*this);
- vector> blocks;
+ VerifiedBlocks blocks;
_bq.drain(blocks, _max);
h256s fresh;
h256s dead;
h256s badBlocks;
- for (auto const& block: blocks)
- {
- try
- {
- // Nonce & uncle nonces already verified in verification thread at this point.
- ImportRoute r;
- DEV_TIMED_ABOVE(Block import, 500)
- r = import(block.first, block.second, _stateDB, ImportRequirements::Default & ~ImportRequirements::ValidNonce & ~ImportRequirements::CheckUncles);
- fresh += r.first;
- dead += r.second;
- }
- catch (dev::eth::UnknownParent)
- {
- cwarn << "ODD: Import queue contains block with unknown parent." << LogTag::Error << boost::current_exception_diagnostic_information();
- // NOTE: don't reimport since the queue should guarantee everything in the right order.
- // Can't continue - chain bad.
- badBlocks.push_back(block.first.hash());
- }
- catch (Exception const& _e)
+ for (VerifiedBlock const& block: blocks)
+ if (!badBlocks.empty())
+ badBlocks.push_back(block.verified.info.hash());
+ else
{
- cnote << "Exception while importing block. Someone (Jeff? That you?) seems to be giving us dodgy blocks!" << LogTag::Error << diagnostic_information(_e);
- // NOTE: don't reimport since the queue should guarantee everything in the right order.
- // Can't continue - chain bad.
- badBlocks.push_back(block.first.hash());
+ try
+ {
+ // Nonce & uncle nonces already verified in verification thread at this point.
+ ImportRoute r;
+ DEV_TIMED_ABOVE(Block import, 500)
+ r = import(block.verified, _stateDB, ImportRequirements::Default & ~ImportRequirements::ValidNonce & ~ImportRequirements::CheckUncles);
+ fresh += r.first;
+ dead += r.second;
+ }
+ catch (dev::eth::UnknownParent)
+ {
+ cwarn << "ODD: Import queue contains block with unknown parent.";// << LogTag::Error << boost::current_exception_diagnostic_information();
+ // NOTE: don't reimport since the queue should guarantee everything in the right order.
+ // Can't continue - chain bad.
+ badBlocks.push_back(block.verified.info.hash());
+ }
+ catch (dev::eth::FutureTime)
+ {
+ cwarn << "ODD: Import queue contains a block with future time.";// << LogTag::Error << boost::current_exception_diagnostic_information();
+ // NOTE: don't reimport since the queue should guarantee everything in the past.
+ // Can't continue - chain bad.
+ badBlocks.push_back(block.verified.info.hash());
+ }
+ catch (Exception& ex)
+ {
+// cnote << "Exception while importing block. Someone (Jeff? That you?) seems to be giving us dodgy blocks!";// << LogTag::Error << diagnostic_information(ex);
+ if (m_onBad)
+ m_onBad(ex);
+ // NOTE: don't reimport since the queue should guarantee everything in the right order.
+ // Can't continue - chain bad.
+ badBlocks.push_back(block.verified.info.hash());
+ }
}
- }
return make_tuple(fresh, dead, _bq.doneDrain(badBlocks));
}
@@ -346,7 +360,7 @@ pair BlockChain::attemptImport(bytes const& _block, O
{
try
{
- return make_pair(ImportResult::Success, import(_block, _stateDB, _ir));
+ return make_pair(ImportResult::Success, import(verifyBlock(_block, m_onBad), _stateDB, _ir));
}
catch (UnknownParent&)
{
@@ -360,8 +374,10 @@ pair BlockChain::attemptImport(bytes const& _block, O
{
return make_pair(ImportResult::FutureTime, make_pair(h256s(), h256s()));
}
- catch (...)
+ catch (Exception& ex)
{
+ if (m_onBad)
+ m_onBad(ex);
return make_pair(ImportResult::Malformed, make_pair(h256s(), h256s()));
}
}
@@ -369,28 +385,28 @@ pair BlockChain::attemptImport(bytes const& _block, O
ImportRoute BlockChain::import(bytes const& _block, OverlayDB const& _db, ImportRequirements::value _ir)
{
// VERIFY: populates from the block and checks the block is internally coherent.
- BlockInfo bi;
+ VerifiedBlockRef block;
#if ETH_CATCH
try
#endif
{
- bi.populate(&_block);
- bi.verifyInternals(&_block);
+ block = verifyBlock(_block, m_onBad);
}
#if ETH_CATCH
- catch (Exception const& _e)
+ catch (Exception& ex)
{
- clog(BlockChainNote) << " Malformed block: " << diagnostic_information(_e);
- _e << errinfo_comment("Malformed block ");
+// clog(BlockChainNote) << " Malformed block: " << diagnostic_information(ex);
+ ex << errinfo_now(time(0));
+ ex << errinfo_block(_block);
throw;
}
#endif
- return import(bi, _block, _db, _ir);
+ return import(block, _db, _ir);
}
-ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, OverlayDB const& _db, ImportRequirements::value _ir)
+ImportRoute BlockChain::import(VerifiedBlockRef const& _block, OverlayDB const& _db, ImportRequirements::value _ir)
{
//@tidy This is a behemoth of a method - could do to be split into a few smaller ones.
@@ -405,28 +421,28 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
#endif
// Check block doesn't already exist first!
- if (isKnown(_bi.hash()) && (_ir & ImportRequirements::DontHave))
+ if (isKnown(_block.info.hash()) && (_ir & ImportRequirements::DontHave))
{
- clog(BlockChainNote) << _bi.hash() << ": Not new.";
+ clog(BlockChainNote) << _block.info.hash() << ": Not new.";
BOOST_THROW_EXCEPTION(AlreadyHaveBlock());
}
// Work out its number as the parent's number + 1
- if (!isKnown(_bi.parentHash))
+ if (!isKnown(_block.info.parentHash))
{
- clog(BlockChainNote) << _bi.hash() << ": Unknown parent " << _bi.parentHash;
+ clog(BlockChainNote) << _block.info.hash() << ": Unknown parent " << _block.info.parentHash;
// We don't know the parent (yet) - discard for now. It'll get resent to us if we find out about its ancestry later on.
BOOST_THROW_EXCEPTION(UnknownParent());
}
- auto pd = details(_bi.parentHash);
+ auto pd = details(_block.info.parentHash);
if (!pd)
{
auto pdata = pd.rlp();
clog(BlockChainDebug) << "Details is returning false despite block known:" << RLP(pdata);
- auto parentBlock = block(_bi.parentHash);
- clog(BlockChainDebug) << "isKnown:" << isKnown(_bi.parentHash);
- clog(BlockChainDebug) << "last/number:" << m_lastBlockNumber << m_lastBlockHash << _bi.number;
+ auto parentBlock = block(_block.info.parentHash);
+ clog(BlockChainDebug) << "isKnown:" << isKnown(_block.info.parentHash);
+ clog(BlockChainDebug) << "last/number:" << m_lastBlockNumber << m_lastBlockHash << _block.info.number;
clog(BlockChainDebug) << "Block:" << BlockInfo(parentBlock);
clog(BlockChainDebug) << "RLP:" << RLP(parentBlock);
clog(BlockChainDebug) << "DATABASE CORRUPTION: CRITICAL FAILURE";
@@ -434,14 +450,14 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
}
// Check it's not crazy
- if (_bi.timestamp > (u256)time(0))
+ if (_block.info.timestamp > (u256)time(0))
{
- clog(BlockChainChat) << _bi.hash() << ": Future time " << _bi.timestamp << " (now at " << time(0) << ")";
+ clog(BlockChainChat) << _block.info.hash() << ": Future time " << _block.info.timestamp << " (now at " << time(0) << ")";
// Block has a timestamp in the future. This is no good.
BOOST_THROW_EXCEPTION(FutureTime());
}
- clog(BlockChainChat) << "Attempting import of " << _bi.hash() << "...";
+ clog(BlockChainChat) << "Attempting import of " << _block.info.hash() << "...";
#if ETH_TIMED_IMPORTS
preliminaryChecks = t.elapsed();
@@ -461,7 +477,7 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
// Check transactions are valid and that they result in a state equivalent to our state_root.
// Get total difficulty increase and update state, checking it.
State s(_db);
- auto tdIncrease = s.enactOn(&_block, _bi, *this, _ir);
+ auto tdIncrease = s.enactOn(_block, *this, _ir);
BlockLogBlooms blb;
BlockReceipts br;
@@ -470,14 +486,8 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
blb.blooms.push_back(s.receipt(i).bloom());
br.receipts.push_back(s.receipt(i));
}
- try {
- s.cleanup(true);
- }
- catch (BadRoot)
- {
- cwarn << "BadRoot error. Retrying import later.";
- BOOST_THROW_EXCEPTION(FutureTime());
- }
+
+ s.cleanup(true);
td = pd.totalDifficulty + tdIncrease;
@@ -497,22 +507,22 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
// together with an "ensureCachedWithUpdatableLock(l)" method.
// This is safe in practice since the caches don't get flushed nearly often enough to be
// done here.
- details(_bi.parentHash);
+ details(_block.info.parentHash);
DEV_WRITE_GUARDED(x_details)
- m_details[_bi.parentHash].children.push_back(_bi.hash());
+ m_details[_block.info.parentHash].children.push_back(_block.info.hash());
#if ETH_TIMED_IMPORTS || !ETH_TRUE
collation = t.elapsed();
t.restart();
#endif
- blocksBatch.Put(toSlice(_bi.hash()), (ldb::Slice)ref(_block));
+ blocksBatch.Put(toSlice(_block.info.hash()), ldb::Slice(_block.block));
DEV_READ_GUARDED(x_details)
- extrasBatch.Put(toSlice(_bi.parentHash, ExtraDetails), (ldb::Slice)dev::ref(m_details[_bi.parentHash].rlp()));
+ extrasBatch.Put(toSlice(_block.info.parentHash, ExtraDetails), (ldb::Slice)dev::ref(m_details[_block.info.parentHash].rlp()));
- extrasBatch.Put(toSlice(_bi.hash(), ExtraDetails), (ldb::Slice)dev::ref(BlockDetails((unsigned)pd.number + 1, td, _bi.parentHash, {}).rlp()));
- extrasBatch.Put(toSlice(_bi.hash(), ExtraLogBlooms), (ldb::Slice)dev::ref(blb.rlp()));
- extrasBatch.Put(toSlice(_bi.hash(), ExtraReceipts), (ldb::Slice)dev::ref(br.rlp()));
+ extrasBatch.Put(toSlice(_block.info.hash(), ExtraDetails), (ldb::Slice)dev::ref(BlockDetails((unsigned)pd.number + 1, td, _block.info.parentHash, {}).rlp()));
+ extrasBatch.Put(toSlice(_block.info.hash(), ExtraLogBlooms), (ldb::Slice)dev::ref(blb.rlp()));
+ extrasBatch.Put(toSlice(_block.info.hash(), ExtraReceipts), (ldb::Slice)dev::ref(br.rlp()));
#if ETH_TIMED_IMPORTS || !ETH_TRUE
writing = t.elapsed();
@@ -520,30 +530,25 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
#endif
}
#if ETH_CATCH
- catch (InvalidNonce const& _e)
+ catch (BadRoot& ex)
{
- clog(BlockChainNote) << " Malformed block: " << diagnostic_information(_e);
- _e << errinfo_comment("Malformed block ");
- throw;
+ cwarn << "BadRoot error. Retrying import later.";
+ BOOST_THROW_EXCEPTION(FutureTime());
}
- catch (Exception const& _e)
+ catch (Exception& ex)
{
- clog(BlockChainWarn) << " Malformed block: " << diagnostic_information(_e);
- _e << errinfo_comment("Malformed block ");
- clog(BlockChainWarn) << "Block: " << _bi.hash();
- clog(BlockChainWarn) << _bi;
- clog(BlockChainWarn) << "Block parent: " << _bi.parentHash;
- clog(BlockChainWarn) << BlockInfo(block(_bi.parentHash));
+ ex << errinfo_now(time(0));
+ ex << errinfo_block(_block.block.toBytes());
throw;
}
#endif
StructuredLogger::chainReceivedNewBlock(
- _bi.headerHash(WithoutNonce).abridged(),
- _bi.nonce.abridged(),
+ _block.info.headerHash(WithoutNonce).abridged(),
+ _block.info.nonce.abridged(),
currentHash().abridged(),
"", // TODO: remote id ??
- _bi.parentHash.abridged()
+ _block.info.parentHash.abridged()
);
// cnote << "Parent " << bi.parentHash << " has " << details(bi.parentHash).children.size() << " children.";
@@ -556,8 +561,8 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
// don't include bi.hash() in treeRoute, since it's not yet in details DB...
// just tack it on afterwards.
unsigned commonIndex;
- tie(route, common, commonIndex) = treeRoute(last, _bi.parentHash);
- route.push_back(_bi.hash());
+ tie(route, common, commonIndex) = treeRoute(last, _block.info.parentHash);
+ route.push_back(_block.info.hash());
// Most of the time these two will be equal - only when we're doing a chain revert will they not be
if (common != last)
@@ -569,8 +574,8 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
for (auto i = route.rbegin(); i != route.rend() && *i != common; ++i)
{
BlockInfo tbi;
- if (*i == _bi.hash())
- tbi = _bi;
+ if (*i == _block.info.hash())
+ tbi = _block.info;
else
tbi = BlockInfo(block(*i));
@@ -597,7 +602,7 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
h256s newTransactionAddresses;
{
bytes blockBytes;
- RLP blockRLP(*i == _bi.hash() ? _block : (blockBytes = block(*i)));
+ RLP blockRLP(*i == _block.info.hash() ? _block.block : &(blockBytes = block(*i)));
TransactionAddress ta;
ta.blockHash = tbi.hash();
for (ta.index = 0; ta.index < blockRLP[1].itemCount(); ++ta.index)
@@ -613,17 +618,17 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
// FINALLY! change our best hash.
{
- newLastBlockHash = _bi.hash();
- newLastBlockNumber = (unsigned)_bi.number;
+ newLastBlockHash = _block.info.hash();
+ newLastBlockNumber = (unsigned)_block.info.number;
}
- clog(BlockChainNote) << " Imported and best" << td << " (#" << _bi.number << "). Has" << (details(_bi.parentHash).children.size() - 1) << "siblings. Route:" << route;
+ clog(BlockChainNote) << " Imported and best" << td << " (#" << _block.info.number << "). Has" << (details(_block.info.parentHash).children.size() - 1) << "siblings. Route:" << route;
StructuredLogger::chainNewHead(
- _bi.headerHash(WithoutNonce).abridged(),
- _bi.nonce.abridged(),
+ _block.info.headerHash(WithoutNonce).abridged(),
+ _block.info.nonce.abridged(),
currentHash().abridged(),
- _bi.parentHash.abridged()
+ _block.info.parentHash.abridged()
);
}
else
@@ -634,24 +639,26 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
m_blocksDB->Write(m_writeOptions, &blocksBatch);
m_extrasDB->Write(m_writeOptions, &extrasBatch);
- if (isKnown(_bi.hash()) && !details(_bi.hash()))
+#if ETH_PARANOIA || !ETH_TRUE
+ if (isKnown(_block.info.hash()) && !details(_block.info.hash()))
{
clog(BlockChainDebug) << "Known block just inserted has no details.";
- clog(BlockChainDebug) << "Block:" << _bi;
+ clog(BlockChainDebug) << "Block:" << _block.info;
clog(BlockChainDebug) << "DATABASE CORRUPTION: CRITICAL FAILURE";
exit(-1);
}
try {
- State canary(_db, *this, _bi.hash(), ImportRequirements::DontHave);
+ State canary(_db, *this, _block.info.hash(), ImportRequirements::DontHave);
}
catch (...)
{
clog(BlockChainDebug) << "Failed to initialise State object form imported block.";
- clog(BlockChainDebug) << "Block:" << _bi;
+ clog(BlockChainDebug) << "Block:" << _block.info;
clog(BlockChainDebug) << "DATABASE CORRUPTION: CRITICAL FAILURE";
exit(-1);
}
+#endif
if (m_lastBlockHash != newLastBlockHash)
DEV_WRITE_GUARDED(x_lastBlockHash)
@@ -667,12 +674,16 @@ ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, Overla
#if ETH_TIMED_IMPORTS
checkBest = t.elapsed();
- cnote << "Import took:" << total.elapsed();
- cnote << "preliminaryChecks:" << preliminaryChecks;
- cnote << "enactment:" << enactment;
- cnote << "collation:" << collation;
- cnote << "writing:" << writing;
- cnote << "checkBest:" << checkBest;
+ if (total.elapsed() > 1.0)
+ {
+ cnote << "SLOW IMPORT:" << _block.info.hash();
+ cnote << " Import took:" << total.elapsed();
+ cnote << " preliminaryChecks:" << preliminaryChecks;
+ cnote << " enactment:" << enactment;
+ cnote << " collation:" << collation;
+ cnote << " writing:" << writing;
+ cnote << " checkBest:" << checkBest;
+ }
#endif
if (!route.empty())
@@ -915,8 +926,8 @@ void BlockChain::checkConsistency()
delete it;
}
-static inline unsigned upow(unsigned a, unsigned b) { while (b-- > 0) a *= a; return a; }
-static inline unsigned ceilDiv(unsigned n, unsigned d) { return n / (n + d - 1); }
+static inline unsigned upow(unsigned a, unsigned b) { if (!b) return 1; while (--b > 0) a *= a; return a; }
+static inline unsigned ceilDiv(unsigned n, unsigned d) { return (n + d - 1) / d; }
//static inline unsigned floorDivPow(unsigned n, unsigned a, unsigned b) { return n / upow(a, b); }
//static inline unsigned ceilDivPow(unsigned n, unsigned a, unsigned b) { return ceilDiv(n, upow(a, b)); }
@@ -1054,3 +1065,59 @@ bytes BlockChain::block(h256 const& _hash) const
return m_blocks[_hash];
}
+
+VerifiedBlockRef BlockChain::verifyBlock(bytes const& _block, function const& _onBad)
+{
+ VerifiedBlockRef res;
+ try
+ {
+ res.info.populate(_block, CheckEverything);
+ res.info.verifyInternals(&_block);
+ }
+ catch (Exception& ex)
+ {
+ ex << errinfo_now(time(0));
+ ex << errinfo_block(_block);
+ if (_onBad)
+ _onBad(ex);
+ throw;
+ }
+
+ RLP r(_block);
+ unsigned i = 0;
+ for (auto const& uncle: r[2])
+ {
+ try
+ {
+ BlockInfo().populateFromHeader(RLP(uncle.data()), CheckEverything);
+ }
+ catch (Exception& ex)
+ {
+ ex << errinfo_uncleIndex(i);
+ ex << errinfo_now(time(0));
+ ex << errinfo_block(_block);
+ if (_onBad)
+ _onBad(ex);
+ throw;
+ }
+ ++i;
+ }
+ i = 0;
+ for (auto const& tr: r[1])
+ {
+ try
+ {
+ res.transactions.push_back(Transaction(tr.data(), CheckTransaction::Everything));
+ }
+ catch (Exception& ex)
+ {
+ ex << errinfo_transactionIndex(i);
+ ex << errinfo_block(_block);
+ throw;
+ }
+ ++i;
+ }
+ res.block = bytesConstRef(&_block);
+ return move(res);
+}
+
diff --git a/libethereum/BlockChain.h b/libethereum/BlockChain.h
index a67ec9a9c..5f197cfee 100644
--- a/libethereum/BlockChain.h
+++ b/libethereum/BlockChain.h
@@ -40,6 +40,7 @@
#include "Account.h"
#include "Transaction.h"
#include "BlockQueue.h"
+#include "VerifiedBlock.h"
namespace ldb = leveldb;
namespace std
@@ -120,7 +121,7 @@ public:
/// Import block into disk-backed DB
/// @returns the block hashes of any blocks that came into/went out of the canonical block chain.
ImportRoute import(bytes const& _block, OverlayDB const& _stateDB, ImportRequirements::value _ir = ImportRequirements::Default);
- ImportRoute import(BlockInfo const& _bi, bytes const& _block, OverlayDB const& _stateDB, ImportRequirements::value _ir = ImportRequirements::Default);
+ ImportRoute import(VerifiedBlockRef const& _block, OverlayDB const& _db, ImportRequirements::value _ir = ImportRequirements::Default);
/// Returns true if the given block is known (though not necessarily a part of the canon chain).
bool isKnown(h256 const& _hash) const;
@@ -143,6 +144,7 @@ public:
BlockLogBlooms logBlooms() const { return logBlooms(currentHash()); }
/// Get the transactions' receipts of a block (or the most recent mined if none given). Thread-safe.
+ /// receipts are given in the same order are in the same order as the transactions
BlockReceipts receipts(h256 const& _hash) const { return queryExtras(_hash, m_receipts, x_receipts, NullBlockReceipts); }
BlockReceipts receipts() const { return receipts(currentHash()); }
@@ -256,6 +258,12 @@ public:
/// Deallocate unused data.
void garbageCollect(bool _force = false);
+ /// Verify block and prepare it for enactment
+ static VerifiedBlockRef verifyBlock(bytes const& _block, std::function const& _onBad = std::function());
+
+ /// Change the function that is called with a bad block.
+ template void setOnBad(T const& _t) { m_onBad = _t; }
+
private:
static h256 chunkId(unsigned _level, unsigned _index) { return h256(_index * 0xff + _level); }
@@ -335,6 +343,8 @@ private:
ldb::ReadOptions m_readOptions;
ldb::WriteOptions m_writeOptions;
+ std::function m_onBad; ///< Called if we have a block that doesn't verify.
+
friend std::ostream& operator<<(std::ostream& _out, BlockChain const& _bc);
};
diff --git a/libethereum/BlockQueue.cpp b/libethereum/BlockQueue.cpp
index 360bf915e..f142be62e 100644
--- a/libethereum/BlockQueue.cpp
+++ b/libethereum/BlockQueue.cpp
@@ -22,10 +22,11 @@
#include "BlockQueue.h"
#include
#include
-#include
#include
#include
#include "BlockChain.h"
+#include "VerifiedBlock.h"
+#include "State.h"
using namespace std;
using namespace dev;
using namespace dev::eth;
@@ -36,8 +37,16 @@ const char* BlockQueueChannel::name() { return EthOrange "[]>"; }
const char* BlockQueueChannel::name() { return EthOrange "โฃโ
โถ"; }
#endif
+size_t const c_maxKnownCount = 100000;
+size_t const c_maxKnownSize = 128 * 1024 * 1024;
+size_t const c_maxUnknownCount = 100000;
+size_t const c_maxUnknownSize = 512 * 1024 * 1024; // Block size can be ~50kb
-BlockQueue::BlockQueue()
+BlockQueue::BlockQueue():
+ m_unknownSize(0),
+ m_knownSize(0),
+ m_unknownCount(0),
+ m_knownCount(0)
{
// Allow some room for other activity
unsigned verifierThreads = std::max(thread::hardware_concurrency(), 3U) - 2U;
@@ -56,11 +65,29 @@ BlockQueue::~BlockQueue()
i.join();
}
+void BlockQueue::clear()
+{
+ WriteGuard l(m_lock);
+ DEV_INVARIANT_CHECK;
+ Guard l2(m_verification);
+ m_readySet.clear();
+ m_drainingSet.clear();
+ m_verified.clear();
+ m_unverified.clear();
+ m_unknownSet.clear();
+ m_unknown.clear();
+ m_future.clear();
+ m_unknownSize = 0;
+ m_unknownCount = 0;
+ m_knownSize = 0;
+ m_knownCount = 0;
+}
+
void BlockQueue::verifierBody()
{
while (!m_deleting)
{
- std::pair work;
+ UnverifiedBlock work;
{
unique_lock l(m_verification);
@@ -70,63 +97,16 @@ void BlockQueue::verifierBody()
swap(work, m_unverified.front());
m_unverified.pop_front();
BlockInfo bi;
- bi.mixHash = work.first;
- m_verifying.push_back(make_pair(bi, bytes()));
+ bi.mixHash = work.hash;
+ bi.parentHash = work.parentHash;
+ m_verifying.push_back(VerifiedBlock { VerifiedBlockRef { bytesConstRef(), move(bi), Transactions() }, bytes() });
}
- std::pair res;
- swap(work.second, res.second);
- try {
- try {
- res.first.populate(res.second, CheckEverything, work.first);
- res.first.verifyInternals(&res.second);
- }
- catch (InvalidBlockNonce&)
- {
- badBlock(res.second, "Invalid block nonce");
- cwarn << " Nonce:" << res.first.nonce.hex();
- cwarn << " PoWHash:" << res.first.headerHash(WithoutNonce).hex();
- cwarn << " SeedHash:" << res.first.seedHash().hex();
- cwarn << " Target:" << res.first.boundary().hex();
- cwarn << " MixHash:" << res.first.mixHash.hex();
- Ethash::Result er = EthashAux::eval(res.first.seedHash(), res.first.headerHash(WithoutNonce), res.first.nonce);
- cwarn << " Ethash v:" << er.value.hex();
- cwarn << " Ethash mH:" << er.mixHash.hex();
- throw;
- }
- catch (Exception& _e)
- {
- badBlock(res.second, _e.what());
- throw;
- }
-
- RLP r(&res.second);
- for (auto const& uncle: r[2])
- {
- try
- {
- BlockInfo().populateFromHeader(RLP(uncle.data()), CheckEverything);
- }
- catch (InvalidNonce&)
- {
- badBlockHeader(uncle.data(), "Invalid uncle nonce");
- BlockInfo bi = BlockInfo::fromHeader(uncle.data(), CheckNothing);
- cwarn << " Nonce:" << bi.nonce.hex();
- cwarn << " PoWHash:" << bi.headerHash(WithoutNonce).hex();
- cwarn << " SeedHash:" << bi.seedHash().hex();
- cwarn << " Target:" << bi.boundary().hex();
- cwarn << " MixHash:" << bi.mixHash.hex();
- Ethash::Result er = EthashAux::eval(bi.seedHash(), bi.headerHash(WithoutNonce), bi.nonce);
- cwarn << " Ethash v:" << er.value.hex();
- cwarn << " Ethash mH:" << er.mixHash.hex();
- throw;
- }
- catch (Exception& _e)
- {
- badBlockHeader(uncle.data(), _e.what());
- throw;
- }
- }
+ VerifiedBlock res;
+ swap(work.block, res.blockData);
+ try
+ {
+ res.verified = BlockChain::verifyBlock(res.blockData, m_onBad);
}
catch (...)
{
@@ -135,33 +115,46 @@ void BlockQueue::verifierBody()
// has to be this order as that's how invariants() assumes.
WriteGuard l2(m_lock);
unique_lock l(m_verification);
- m_readySet.erase(work.first);
- m_knownBad.insert(work.first);
+ m_readySet.erase(work.hash);
+ m_knownBad.insert(work.hash);
}
unique_lock l(m_verification);
for (auto it = m_verifying.begin(); it != m_verifying.end(); ++it)
- if (it->first.mixHash == work.first)
+ if (it->verified.info.mixHash == work.hash)
{
m_verifying.erase(it);
goto OK1;
}
- cwarn << "GAA BlockQueue corrupt: job cancelled but cannot be found in m_verifying queue.";
+ cwarn << "BlockQueue missing our job: was there a GM?";
OK1:;
continue;
}
bool ready = false;
{
+ WriteGuard l2(m_lock);
unique_lock l(m_verification);
- if (m_verifying.front().first.mixHash == work.first)
+ if (!m_verifying.empty() && m_verifying.front().verified.info.mixHash == work.hash)
{
// we're next!
m_verifying.pop_front();
- m_verified.push_back(move(res));
- while (m_verifying.size() && !m_verifying.front().second.empty())
+ if (m_knownBad.count(res.verified.info.parentHash))
+ {
+ m_readySet.erase(res.verified.info.hash());
+ m_knownBad.insert(res.verified.info.hash());
+ }
+ else
+ m_verified.push_back(move(res));
+ while (m_verifying.size() && !m_verifying.front().blockData.empty())
{
- m_verified.push_back(move(m_verifying.front()));
+ if (m_knownBad.count(m_verifying.front().verified.info.parentHash))
+ {
+ m_readySet.erase(m_verifying.front().verified.info.hash());
+ m_knownBad.insert(res.verified.info.hash());
+ }
+ else
+ m_verified.push_back(move(m_verifying.front()));
m_verifying.pop_front();
}
ready = true;
@@ -169,12 +162,12 @@ void BlockQueue::verifierBody()
else
{
for (auto& i: m_verifying)
- if (i.first.mixHash == work.first)
+ if (i.verified.info.mixHash == work.hash)
{
i = move(res);
goto OK;
}
- cwarn << "GAA BlockQueue corrupt: job finished but cannot be found in m_verifying queue.";
+ cwarn << "BlockQueue missing our job: was there a GM?";
OK:;
}
}
@@ -234,6 +227,8 @@ ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc, boo
if (strftime(buf, 24, "%X", localtime(&bit)) == 0)
buf[0] = '\0'; // empty if case strftime fails
cblockq << "OK - queued for future [" << bi.timestamp << "vs" << time(0) << "] - will wait until" << buf;
+ m_unknownSize += _block.size();
+ m_unknownCount++;
return ImportResult::FutureTime;
}
else
@@ -242,6 +237,7 @@ ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc, boo
if (m_knownBad.count(bi.parentHash))
{
m_knownBad.insert(bi.hash());
+ updateBad(bi.hash());
// bad parent; this is bad too, note it as such
return ImportResult::BadChain;
}
@@ -251,6 +247,8 @@ ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc, boo
cblockq << "OK - queued as unknown parent:" << bi.parentHash;
m_unknown.insert(make_pair(bi.parentHash, make_pair(h, _block.toBytes())));
m_unknownSet.insert(h);
+ m_unknownSize += _block.size();
+ m_unknownCount++;
return ImportResult::UnknownParent;
}
@@ -259,9 +257,11 @@ ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc, boo
// If valid, append to blocks.
cblockq << "OK - ready for chain insertion.";
DEV_GUARDED(m_verification)
- m_unverified.push_back(make_pair(h, _block.toBytes()));
+ m_unverified.push_back(UnverifiedBlock { h, bi.parentHash, _block.toBytes() });
m_moreToVerify.notify_one();
m_readySet.insert(h);
+ m_knownSize += _block.size();
+ m_knownCount++;
noteReady_WITH_LOCK(h);
@@ -270,30 +270,93 @@ ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc, boo
}
}
-bool BlockQueue::doneDrain(h256s const& _bad)
+void BlockQueue::updateBad(h256 const& _bad)
{
- WriteGuard l(m_lock);
DEV_INVARIANT_CHECK;
- m_drainingSet.clear();
- if (_bad.size())
+ DEV_GUARDED(m_verification)
{
- vector> old;
- DEV_GUARDED(m_verification)
- swap(m_verified, old);
- for (auto& b: old)
+ collectUnknownBad(_bad);
+ bool moreBad = true;
+ while (moreBad)
{
- if (m_knownBad.count(b.first.parentHash))
- {
- m_knownBad.insert(b.first.hash());
- m_readySet.erase(b.first.hash());
- }
- else
- DEV_GUARDED(m_verification)
+ moreBad = false;
+ std::vector oldVerified;
+ swap(m_verified, oldVerified);
+ for (auto& b: oldVerified)
+ if (m_knownBad.count(b.verified.info.parentHash) || m_knownBad.count(b.verified.info.hash()))
+ {
+ m_knownBad.insert(b.verified.info.hash());
+ m_readySet.erase(b.verified.info.hash());
+ collectUnknownBad(b.verified.info.hash());
+ moreBad = true;
+ }
+ else
m_verified.push_back(std::move(b));
+
+ std::deque oldUnverified;
+ swap(m_unverified, oldUnverified);
+ for (auto& b: oldUnverified)
+ if (m_knownBad.count(b.parentHash) || m_knownBad.count(b.hash))
+ {
+ m_knownBad.insert(b.hash);
+ m_readySet.erase(b.hash);
+ collectUnknownBad(b.hash);
+ moreBad = true;
+ }
+ else
+ m_unverified.push_back(std::move(b));
+
+ std::deque oldVerifying;
+ swap(m_verifying, oldVerifying);
+ for (auto& b: oldVerifying)
+ if (m_knownBad.count(b.verified.info.parentHash) || m_knownBad.count(b.verified.info.mixHash))
+ {
+ h256 const& h = b.blockData.size() != 0 ? b.verified.info.hash() : b.verified.info.mixHash;
+ m_knownBad.insert(h);
+ m_readySet.erase(h);
+ collectUnknownBad(h);
+ moreBad = true;
+ }
+ else
+ m_verifying.push_back(std::move(b));
+ }
+ }
+ DEV_INVARIANT_CHECK;
+}
+
+void BlockQueue::collectUnknownBad(h256 const& _bad)
+{
+ list badQueue(1, _bad);
+ while (!badQueue.empty())
+ {
+ auto r = m_unknown.equal_range(badQueue.front());
+ badQueue.pop_front();
+ for (auto it = r.first; it != r.second; ++it)
+ {
+ m_unknownSize -= it->second.second.size();
+ m_unknownCount--;
+ auto newBad = it->second.first;
+ m_unknownSet.erase(newBad);
+ m_knownBad.insert(newBad);
+ badQueue.push_back(newBad);
}
+ m_unknown.erase(r.first, r.second);
}
- m_knownBad += _bad;
- return !m_readySet.empty();
+
+}
+
+bool BlockQueue::doneDrain(h256s const& _bad)
+{
+ WriteGuard l(m_lock);
+ DEV_INVARIANT_CHECK;
+ m_drainingSet.clear();
+ if (_bad.size())
+ {
+ // at least one of them was bad.
+ m_knownBad += _bad;
+ for (h256 const& b : _bad)
+ updateBad(b);
+ } return !m_readySet.empty();
}
void BlockQueue::tick(BlockChain const& _bc)
@@ -317,7 +380,11 @@ void BlockQueue::tick(BlockChain const& _bc)
DEV_INVARIANT_CHECK;
auto end = m_future.lower_bound(t);
for (auto i = m_future.begin(); i != end; ++i)
+ {
+ m_unknownSize -= i->second.second.size();
+ m_unknownCount--;
todo.push_back(move(i->second));
+ }
m_future.erase(m_future.begin(), end);
}
}
@@ -348,12 +415,24 @@ QueueStatus BlockQueue::blockStatus(h256 const& _h) const
QueueStatus::Unknown;
}
-void BlockQueue::drain(std::vector>& o_out, unsigned _max)
+bool BlockQueue::knownFull() const
+{
+ return m_knownSize > c_maxKnownSize || m_knownCount > c_maxKnownCount;
+}
+
+bool BlockQueue::unknownFull() const
+{
+ return m_unknownSize > c_maxUnknownSize || m_unknownCount > c_maxUnknownCount;
+}
+
+void BlockQueue::drain(VerifiedBlocks& o_out, unsigned _max)
{
WriteGuard l(m_lock);
DEV_INVARIANT_CHECK;
+
if (m_drainingSet.empty())
{
+ bool wasFull = knownFull();
DEV_GUARDED(m_verification)
{
o_out.resize(min(_max, m_verified.size()));
@@ -364,11 +443,16 @@ void BlockQueue::drain(std::vector>& o_out, unsigned
for (auto const& bs: o_out)
{
// TODO: @optimise use map rather than vector & set.
- auto h = bs.first.hash();
+ auto h = bs.verified.info.hash();
m_drainingSet.insert(h);
m_readySet.erase(h);
+ m_knownSize -= bs.verified.block.size();
+ m_knownCount--;
}
+ if (wasFull && !knownFull())
+ m_onRoomAvailable();
}
+
}
bool BlockQueue::invariants() const
@@ -389,7 +473,11 @@ void BlockQueue::noteReady_WITH_LOCK(h256 const& _good)
for (auto it = r.first; it != r.second; ++it)
{
DEV_GUARDED(m_verification)
- m_unverified.push_back(it->second);
+ m_unverified.push_back(UnverifiedBlock { it->second.first, it->first, it->second.second });
+ m_knownSize += it->second.second.size();
+ m_knownCount++;
+ m_unknownSize -= it->second.second.size();
+ m_unknownCount--;
auto newReady = it->second.first;
m_unknownSet.erase(newReady);
m_readySet.insert(newReady);
@@ -400,6 +488,7 @@ void BlockQueue::noteReady_WITH_LOCK(h256 const& _good)
}
if (notify)
m_moreToVerify.notify_all();
+ DEV_INVARIANT_CHECK;
}
void BlockQueue::retryAllUnknown()
@@ -409,18 +498,23 @@ void BlockQueue::retryAllUnknown()
for (auto it = m_unknown.begin(); it != m_unknown.end(); ++it)
{
DEV_GUARDED(m_verification)
- m_unverified.push_back(it->second);
+ m_unverified.push_back(UnverifiedBlock { it->second.first, it->first, it->second.second });
auto newReady = it->second.first;
m_unknownSet.erase(newReady);
m_readySet.insert(newReady);
+ m_knownCount++;
m_moreToVerify.notify_one();
}
m_unknown.clear();
+ m_knownSize += m_unknownSize;
+ m_unknownSize = 0;
+ m_unknownCount = 0;
m_moreToVerify.notify_all();
}
std::ostream& dev::eth::operator<<(std::ostream& _out, BlockQueueStatus const& _bqs)
{
+ _out << "importing: " << _bqs.importing << endl;
_out << "verified: " << _bqs.verified << endl;
_out << "verifying: " << _bqs.verifying << endl;
_out << "unverified: " << _bqs.unverified << endl;
diff --git a/libethereum/BlockQueue.h b/libethereum/BlockQueue.h
index 45043559b..8f079aa66 100644
--- a/libethereum/BlockQueue.h
+++ b/libethereum/BlockQueue.h
@@ -31,6 +31,7 @@
#include
#include
#include
+#include "VerifiedBlock.h"
namespace dev
{
@@ -45,6 +46,7 @@ struct BlockQueueChannel: public LogChannel { static const char* name(); static
struct BlockQueueStatus
{
+ size_t importing;
size_t verified;
size_t verifying;
size_t unverified;
@@ -74,14 +76,14 @@ public:
~BlockQueue();
/// Import a block into the queue.
- ImportResult import(bytesConstRef _tx, BlockChain const& _bc, bool _isOurs = false);
+ ImportResult import(bytesConstRef _block, BlockChain const& _bc, bool _isOurs = false);
/// Notes that time has moved on and some blocks that used to be "in the future" may no be valid.
void tick(BlockChain const& _bc);
/// Grabs at most @a _max of the blocks that are ready, giving them in the correct order for insertion into the chain.
/// Don't forget to call doneDrain() once you're done importing.
- void drain(std::vector>& o_out, unsigned _max);
+ void drain(std::vector& o_out, unsigned _max);
/// Must be called after a drain() call. Notes that the drained blocks have been imported into the blockchain, so we can forget about them.
/// @returns true iff there are additional blocks ready to be processed.
@@ -97,25 +99,40 @@ public:
std::pair items() const { ReadGuard l(m_lock); return std::make_pair(m_readySet.size(), m_unknownSet.size()); }
/// Clear everything.
- void clear() { WriteGuard l(m_lock); DEV_INVARIANT_CHECK; Guard l2(m_verification); m_readySet.clear(); m_drainingSet.clear(); m_verified.clear(); m_unverified.clear(); m_unknownSet.clear(); m_unknown.clear(); m_future.clear(); }
+ void clear();
/// Return first block with an unknown parent.
h256 firstUnknown() const { ReadGuard l(m_lock); return m_unknownSet.size() ? *m_unknownSet.begin() : h256(); }
/// Get some infomration on the current status.
- BlockQueueStatus status() const { ReadGuard l(m_lock); Guard l2(m_verification); return BlockQueueStatus{m_verified.size(), m_verifying.size(), m_unverified.size(), m_future.size(), m_unknown.size(), m_knownBad.size()}; }
+ BlockQueueStatus status() const { ReadGuard l(m_lock); Guard l2(m_verification); return BlockQueueStatus{m_drainingSet.size(), m_verified.size(), m_verifying.size(), m_unverified.size(), m_future.size(), m_unknown.size(), m_knownBad.size()}; }
/// Get some infomration on the given block's status regarding us.
QueueStatus blockStatus(h256 const& _h) const;
template Handler onReady(T const& _t) { return m_onReady.add(_t); }
+ template Handler onRoomAvailable(T const& _t) { return m_onRoomAvailable.add(_t); }
+
+ template void setOnBad(T const& _t) { m_onBad = _t; }
+
+ bool knownFull() const;
+ bool unknownFull() const;
private:
+ struct UnverifiedBlock
+ {
+ h256 hash;
+ h256 parentHash;
+ bytes block;
+ };
+
void noteReady_WITH_LOCK(h256 const& _b);
bool invariants() const override;
void verifierBody();
+ void collectUnknownBad(h256 const& _bad);
+ void updateBad(h256 const& _bad);
mutable boost::shared_mutex m_lock; ///< General lock for the sets, m_future and m_unknown.
h256Hash m_drainingSet; ///< All blocks being imported.
@@ -125,15 +142,22 @@ private:
h256Hash m_knownBad; ///< Set of blocks that we know will never be valid.
std::multimap> m_future; ///< Set of blocks that are not yet valid. Ordered by timestamp
Signal m_onReady; ///< Called when a subsequent call to import blocks will return a non-empty container. Be nice and exit fast.
+ Signal m_onRoomAvailable; ///< Called when space for new blocks becomes availabe after a drain. Be nice and exit fast.
mutable Mutex m_verification; ///< Mutex that allows writing to m_verified, m_verifying and m_unverified.
std::condition_variable m_moreToVerify; ///< Signaled when m_unverified has a new entry.
- std::vector> m_verified; ///< List of blocks, in correct order, verified and ready for chain-import.
- std::deque> m_verifying; ///< List of blocks being verified; as long as the second component (bytes) is empty, it's not finished.
- std::deque> m_unverified; ///< List of blocks, in correct order, ready for verification.
+ std::vector m_verified; ///< List of blocks, in correct order, verified and ready for chain-import.
+ std::deque m_verifying; ///< List of blocks being verified; as long as the block component (bytes) is empty, it's not finished.
+ std::deque m_unverified; ///< List of in correct order, ready for verification.
std::vector m_verifiers; ///< Threads who only verify.
bool m_deleting = false; ///< Exit condition for verifiers.
+
+ std::function m_onBad; ///< Called if we have a block that doesn't verify.
+ std::atomic m_unknownSize; ///< Tracks total size in bytes of all unknown blocks
+ std::atomic m_knownSize; ///< Tracks total size in bytes of all known blocks;
+ std::atomic m_unknownCount; ///< Tracks total count of unknown blocks. Used to avoid additional syncing
+ std::atomic m_knownCount; ///< Tracks total count of known blocks. Used to avoid additional syncing
};
std::ostream& operator<<(std::ostream& _out, BlockQueueStatus const& _s);
diff --git a/libethereum/CMakeLists.txt b/libethereum/CMakeLists.txt
index 8203402cb..6598e1bd7 100644
--- a/libethereum/CMakeLists.txt
+++ b/libethereum/CMakeLists.txt
@@ -14,6 +14,10 @@ aux_source_directory(. SRC_LIST)
include_directories(BEFORE ..)
include_directories(${LEVELDB_INCLUDE_DIRS})
include_directories(${Boost_INCLUDE_DIRS})
+if (JSONRPC)
+include_directories(BEFORE ${JSONCPP_INCLUDE_DIRS})
+include_directories(${JSON_RPC_CPP_INCLUDE_DIRS})
+endif()
set(EXECUTABLE ethereum)
@@ -30,6 +34,13 @@ target_link_libraries(${EXECUTABLE} ethcore)
target_link_libraries(${EXECUTABLE} ${LEVELDB_LIBRARIES})
target_link_libraries(${EXECUTABLE} ${Boost_REGEX_LIBRARIES})
target_link_libraries(${EXECUTABLE} secp256k1)
+if (JSONRPC)
+ target_link_libraries(${EXECUTABLE} ${JSON_RPC_CPP_CLIENT_LIBRARIES})
+ target_link_libraries(${EXECUTABLE} ${CURL_LIBRARIES})
+ if (DEFINED WIN32 AND NOT DEFINED CMAKE_COMPILER_IS_MINGW)
+ eth_copy_dlls(${EXECUTABLE} CURL_DLLS)
+ endif()
+endif()
if (CMAKE_COMPILER_IS_MINGW)
target_link_libraries(${EXECUTABLE} ssp shlwapi)
diff --git a/libethereum/Client.cpp b/libethereum/Client.cpp
index 9e2dfc0a6..5581d1071 100644
--- a/libethereum/Client.cpp
+++ b/libethereum/Client.cpp
@@ -25,9 +25,16 @@
#include
#include
#include
+#if ETH_JSONRPC || !ETH_TRUE
+#include
+#include
+#endif
#include
#include
#include
+#if ETH_JSONRPC || !ETH_TRUE
+#include "Sentinel.h"
+#endif
#include "Defaults.h"
#include "Executive.h"
#include "EthereumHost.h"
@@ -80,6 +87,114 @@ void VersionChecker::setOk()
}
}
+void Client::onBadBlock(Exception& _ex) const
+{
+ // BAD BLOCK!!!
+ bytes const* block = boost::get_error_info(_ex);
+ if (!block)
+ {
+ cwarn << "ODD: onBadBlock called but exception has no block in it.";
+ return;
+ }
+
+ badBlock(*block, _ex.what());
+
+#if ETH_JSONRPC || !ETH_TRUE
+ Json::Value report;
+
+ report["client"] = "cpp";
+ report["version"] = Version;
+ report["protocolVersion"] = c_protocolVersion;
+ report["databaseVersion"] = c_databaseVersion;
+ report["errortype"] = _ex.what();
+ report["block"] = toHex(*block);
+
+ // add the various hints.
+ if (unsigned const* uncleIndex = boost::get_error_info(_ex))
+ {
+ // uncle that failed.
+ report["hints"]["uncleIndex"] = *uncleIndex;
+ }
+ else if (unsigned const* txIndex = boost::get_error_info(_ex))
+ {
+ // transaction that failed.
+ report["hints"]["transactionIndex"] = *txIndex;
+ }
+ else
+ {
+ // general block failure.
+ }
+
+ if (string const* vmtraceJson = boost::get_error_info(_ex))
+ Json::Reader().parse(*vmtraceJson, report["hints"]["vmtrace"]);
+
+ if (vector const* receipts = boost::get_error_info(_ex))
+ {
+ report["hints"]["receipts"] = Json::arrayValue;
+ for (auto const& r: *receipts)
+ report["hints"]["receipts"].append(toHex(r));
+ }
+ if (h256Hash const* excluded = boost::get_error_info(_ex))
+ {
+ report["hints"]["unclesExcluded"] = Json::arrayValue;
+ for (auto const& r: h256Set() + *excluded)
+ report["hints"]["unclesExcluded"].append(Json::Value(r.hex()));
+ }
+
+#define DEV_HINT_ERRINFO(X) \
+ if (auto const* n = boost::get_error_info(_ex)) \
+ report["hints"][#X] = toString(*n)
+#define DEV_HINT_ERRINFO_HASH(X) \
+ if (auto const* n = boost::get_error_info(_ex)) \
+ report["hints"][#X] = n->hex()
+
+ DEV_HINT_ERRINFO_HASH(hash256);
+ DEV_HINT_ERRINFO(uncleNumber);
+ DEV_HINT_ERRINFO(currentNumber);
+ DEV_HINT_ERRINFO(now);
+ DEV_HINT_ERRINFO(invalidSymbol);
+ DEV_HINT_ERRINFO(wrongAddress);
+ DEV_HINT_ERRINFO(comment);
+ DEV_HINT_ERRINFO(min);
+ DEV_HINT_ERRINFO(max);
+ DEV_HINT_ERRINFO(name);
+ DEV_HINT_ERRINFO(field);
+ DEV_HINT_ERRINFO(data);
+ DEV_HINT_ERRINFO_HASH(nonce);
+ DEV_HINT_ERRINFO(difficulty);
+ DEV_HINT_ERRINFO(target);
+ DEV_HINT_ERRINFO_HASH(seedHash);
+ DEV_HINT_ERRINFO_HASH(mixHash);
+ if (tuple const* r = boost::get_error_info(_ex))
+ {
+ report["hints"]["ethashResult"]["value"] = get<0>(*r).hex();
+ report["hints"]["ethashResult"]["mixHash"] = get<1>(*r).hex();
+ }
+ DEV_HINT_ERRINFO(required);
+ DEV_HINT_ERRINFO(got);
+ DEV_HINT_ERRINFO_HASH(required_LogBloom);
+ DEV_HINT_ERRINFO_HASH(got_LogBloom);
+ DEV_HINT_ERRINFO_HASH(required_h256);
+ DEV_HINT_ERRINFO_HASH(got_h256);
+
+ cwarn << ("Report: \n" + Json::StyledWriter().write(report));
+
+ if (!m_sentinel.empty())
+ {
+ jsonrpc::HttpClient client(m_sentinel);
+ Sentinel rpc(client);
+ try
+ {
+ rpc.eth_badBlock(report);
+ }
+ catch (...)
+ {
+ cwarn << "Error reporting to sentinel. Sure the address" << m_sentinel << "is correct?";
+ }
+ }
+#endif
+}
+
void BasicGasPricer::update(BlockChain const& _bc)
{
unsigned c = 0;
@@ -174,7 +289,7 @@ Client::Client(p2p::Host* _extNet, std::string const& _dbPath, WithExisting _for
}
Client::Client(p2p::Host* _extNet, std::shared_ptr _gp, std::string const& _dbPath, WithExisting _forceAction, u256 _networkId):
- Worker("eth"),
+ Worker("eth", 0),
m_vc(_dbPath),
m_bc(_dbPath, max(m_vc.action(), _forceAction), [](unsigned d, unsigned t){ cerr << "REVISING BLOCKCHAIN: Processed " << d << " of " << t << "...\r"; }),
m_gp(_gp),
@@ -185,6 +300,8 @@ Client::Client(p2p::Host* _extNet, std::shared_ptr _gp, std::string c
m_lastGetWork = std::chrono::system_clock::now() - chrono::seconds(30);
m_tqReady = m_tq.onReady([=](){ this->onTransactionQueueReady(); }); // TODO: should read m_tq->onReady(thisThread, syncTransactionQueue);
m_bqReady = m_bq.onReady([=](){ this->onBlockQueueReady(); }); // TODO: should read m_bq->onReady(thisThread, syncBlockQueue);
+ m_bq.setOnBad([=](Exception& ex){ this->onBadBlock(ex); });
+ m_bc.setOnBad([=](Exception& ex){ this->onBadBlock(ex); });
m_farm.onSolutionFound([=](ProofOfWork::Solution const& s){ return this->submitWork(s); });
m_gp->update(m_bc);
@@ -206,6 +323,18 @@ Client::~Client()
stopWorking();
}
+static const Address c_canary("0x");
+
+bool Client::isChainBad() const
+{
+ return stateAt(c_canary, 0) != 0;
+}
+
+bool Client::isUpgradeNeeded() const
+{
+ return stateAt(c_canary, 0) == 2;
+}
+
void Client::setNetworkId(u256 _n)
{
if (auto h = m_host.lock())
@@ -299,21 +428,17 @@ void Client::killChain()
void Client::clearPending()
{
- h256Hash changeds;
DEV_WRITE_GUARDED(x_postMine)
{
if (!m_postMine.pending().size())
return;
-// for (unsigned i = 0; i < m_postMine.pending().size(); ++i)
-// appendFromNewPending(m_postMine.logBloom(i), changeds);
- changeds.insert(PendingChangedFilter);
m_tq.clear();
DEV_READ_GUARDED(x_preMine)
m_postMine = m_preMine;
}
startMining();
-
+ h256Hash changeds;
noteChanged(changeds);
}
@@ -336,47 +461,53 @@ static S& filtersStreamOut(S& _out, T const& _fs)
return _out;
}
-void Client::appendFromNewPending(TransactionReceipt const& _receipt, h256Hash& io_changed, h256 _transactionHash)
+void Client::appendFromNewPending(TransactionReceipt const& _receipt, h256Hash& io_changed, h256 _sha3)
{
Guard l(x_filtersWatches);
+ io_changed.insert(PendingChangedFilter);
+ m_specialFilters.at(PendingChangedFilter).push_back(_sha3);
for (pair& i: m_filters)
- if (i.second.filter.envelops(RelativeBlock::Pending, m_bc.number() + 1))
+ {
+ // acceptable number.
+ auto m = i.second.filter.matches(_receipt);
+ if (m.size())
{
- // acceptable number.
- auto m = i.second.filter.matches(_receipt);
- if (m.size())
- {
- // filter catches them
- for (LogEntry const& l: m)
- i.second.changes.push_back(LocalisedLogEntry(l, m_bc.number() + 1, _transactionHash));
- io_changed.insert(i.first);
- }
+ // filter catches them
+ for (LogEntry const& l: m)
+ i.second.changes.push_back(LocalisedLogEntry(l));
+ io_changed.insert(i.first);
}
+ }
}
void Client::appendFromNewBlock(h256 const& _block, h256Hash& io_changed)
{
// TODO: more precise check on whether the txs match.
auto d = m_bc.info(_block);
- auto br = m_bc.receipts(_block);
+ auto receipts = m_bc.receipts(_block).receipts;
Guard l(x_filtersWatches);
+ io_changed.insert(ChainChangedFilter);
+ m_specialFilters.at(ChainChangedFilter).push_back(_block);
for (pair& i: m_filters)
- if (i.second.filter.envelops(RelativeBlock::Latest, d.number) && i.second.filter.matches(d.logBloom))
- // acceptable number & looks like block may contain a matching log entry.
- for (size_t j = 0; j < br.receipts.size(); j++)
+ {
+ // acceptable number & looks like block may contain a matching log entry.
+ unsigned logIndex = 0;
+ for (size_t j = 0; j < receipts.size(); j++)
+ {
+ logIndex++;
+ auto tr = receipts[j];
+ auto m = i.second.filter.matches(tr);
+ if (m.size())
{
- auto tr = br.receipts[j];
- auto m = i.second.filter.matches(tr);
- if (m.size())
- {
- auto transactionHash = transaction(d.hash(), j).sha3();
- // filter catches them
- for (LogEntry const& l: m)
- i.second.changes.push_back(LocalisedLogEntry(l, (unsigned)d.number, transactionHash));
- io_changed.insert(i.first);
- }
+ auto transactionHash = transaction(d.hash(), j).sha3();
+ // filter catches them
+ for (LogEntry const& l: m)
+ i.second.changes.push_back(LocalisedLogEntry(l, d, transactionHash, j, logIndex));
+ io_changed.insert(i.first);
}
+ }
+ }
}
void Client::setForceMining(bool _enable)
@@ -448,6 +579,9 @@ ProofOfWork::WorkPackage Client::getWork()
bool oldShould = shouldServeWork();
m_lastGetWork = chrono::system_clock::now();
+ if (!m_mineOnBadChain && isChainBad())
+ return ProofOfWork::WorkPackage();
+
// if this request has made us bother to serve work, prep it now.
if (!oldShould && shouldServeWork())
onPostStateChanged();
@@ -480,13 +614,10 @@ bool Client::submitWork(ProofOfWork::Solution const& _solution)
void Client::syncBlockQueue()
{
ImportRoute ir;
-
cwork << "BQ ==> CHAIN ==> STATE";
- {
- tie(ir.first, ir.second, m_syncBlockQueue) = m_bc.sync(m_bq, m_stateDB, rand() % 90 + 10);
- if (ir.first.empty())
- return;
- }
+ tie(ir.first, ir.second, m_syncBlockQueue) = m_bc.sync(m_bq, m_stateDB, rand() % 10 + 5);
+ if (ir.first.empty())
+ return;
onChainChanged(ir);
}
@@ -511,7 +642,7 @@ void Client::syncTransactionQueue()
DEV_READ_GUARDED(x_postMine)
for (size_t i = 0; i < newPendingReceipts.size(); i++)
appendFromNewPending(newPendingReceipts[i], changeds, m_postMine.pending()[i].sha3());
- changeds.insert(PendingChangedFilter);
+
// Tell farm about new transaction (i.e. restartProofOfWork mining).
onPostStateChanged();
@@ -554,7 +685,6 @@ void Client::onChainChanged(ImportRoute const& _ir)
h256Hash changeds;
for (auto const& h: _ir.first)
appendFromNewBlock(h, changeds);
- changeds.insert(ChainChangedFilter);
// RESTART MINING
@@ -605,11 +735,22 @@ bool Client::remoteActive() const
void Client::onPostStateChanged()
{
- cnote << "Post state changed";
+ cnote << "Post state changed.";
+ rejigMining();
+ m_remoteWorking = false;
+}
- if (m_bq.items().first == 0 && (isMining() || remoteActive()))
+void Client::startMining()
+{
+ m_wouldMine = true;
+ rejigMining();
+}
+
+void Client::rejigMining()
+{
+ if ((wouldMine() || remoteActive()) && !m_bq.items().first && (!isChainBad() || mineOnBadChain()) /*&& (forceMining() || transactionsWaiting())*/)
{
- cnote << "Restarting mining...";
+ cnote << "Rejigging mining...";
DEV_WRITE_GUARDED(x_working)
m_working.commitToMine(m_bc);
DEV_READ_GUARDED(x_working)
@@ -618,20 +759,21 @@ void Client::onPostStateChanged()
m_postMine = m_working;
m_miningInfo = m_postMine.info();
}
- m_farm.setWork(m_miningInfo);
- Ethash::ensurePrecomputed(m_bc.number());
- }
- m_remoteWorking = false;
-}
+ if (m_wouldMine)
+ {
+ m_farm.setWork(m_miningInfo);
+ if (m_turboMining)
+ m_farm.startGPU();
+ else
+ m_farm.startCPU();
-void Client::startMining()
-{
- if (m_turboMining)
- m_farm.startGPU();
- else
- m_farm.startCPU();
- onPostStateChanged();
+ m_farm.setWork(m_miningInfo);
+ Ethash::ensurePrecomputed(m_bc.number());
+ }
+ }
+ if (!m_wouldMine)
+ m_farm.stop();
}
void Client::noteChanged(h256Hash const& _filters)
@@ -648,15 +790,18 @@ void Client::noteChanged(h256Hash const& _filters)
cwatch << "!!!" << w.first << w.second.id.abridged();
w.second.changes += m_filters.at(w.second.id).changes;
}
- else
- {
- cwatch << "!!!" << w.first << LogTag::Special << (w.second.id == PendingChangedFilter ? "pending" : w.second.id == ChainChangedFilter ? "chain" : "???");
- w.second.changes.push_back(LocalisedLogEntry(SpecialLogEntry, 0));
- }
+ else if (m_specialFilters.count(w.second.id))
+ for (h256 const& hash: m_specialFilters.at(w.second.id))
+ {
+ cwatch << "!!!" << w.first << LogTag::Special << (w.second.id == PendingChangedFilter ? "pending" : w.second.id == ChainChangedFilter ? "chain" : "???");
+ w.second.changes.push_back(LocalisedLogEntry(SpecialLogEntry, hash));
+ }
}
// clear the filters now.
for (auto& i: m_filters)
i.second.changes.clear();
+ for (auto& i: m_specialFilters)
+ i.second.clear();
}
void Client::doWork()
@@ -716,7 +861,16 @@ void Client::checkWatchGarbage()
State Client::asOf(h256 const& _block) const
{
- return State(m_stateDB, bc(), _block);
+ try
+ {
+ return State(m_stateDB, bc(), _block);
+ }
+ catch (Exception& ex)
+ {
+ ex << errinfo_block(bc().block(_block));
+ onBadBlock(ex);
+ return State();
+ }
}
void Client::prepareForTransaction()
@@ -746,3 +900,9 @@ void Client::flushTransactions()
{
doWork();
}
+
+HashChainStatus Client::hashChainStatus() const
+{
+ auto h = m_host.lock();
+ return h ? h->status() : HashChainStatus { 0, 0, false };
+}
diff --git a/libethereum/Client.h b/libethereum/Client.h
index 90200c20b..cba93290b 100644
--- a/libethereum/Client.h
+++ b/libethereum/Client.h
@@ -143,7 +143,7 @@ public:
ExecutionResult call(Address _dest, bytes const& _data = bytes(), u256 _gas = 125000, u256 _value = 0, u256 _gasPrice = 1 * ether, Address const& _from = Address());
/// Get the remaining gas limit in this block.
- virtual u256 gasLimitRemaining() const { return m_postMine.gasLimitRemaining(); }
+ virtual u256 gasLimitRemaining() const override { return m_postMine.gasLimitRemaining(); }
// [PRIVATE API - only relevant for base clients, not available in general]
dev::eth::State state(unsigned _txi, h256 _block) const;
@@ -156,12 +156,14 @@ public:
CanonBlockChain const& blockChain() const { return m_bc; }
/// Get some information on the block queue.
BlockQueueStatus blockQueueStatus() const { return m_bq.status(); }
+ /// Get some information on the block queue.
+ HashChainStatus hashChainStatus() const;
/// Get the block queue.
BlockQueue const& blockQueue() const { return m_bq; }
// Mining stuff:
- void setAddress(Address _us) { WriteGuard l(x_preMine); m_preMine.setAddress(_us); }
+ virtual void setAddress(Address _us) override { WriteGuard l(x_preMine); m_preMine.setAddress(_us); }
/// Check block validity prior to mining.
bool miningParanoia() const { return m_paranoia; }
@@ -176,14 +178,26 @@ public:
/// Enable/disable GPU mining.
void setTurboMining(bool _enable = true) { m_turboMining = _enable; if (isMining()) startMining(); }
+ /// Check to see if we'd mine on an apparently bad chain.
+ bool mineOnBadChain() const { return m_mineOnBadChain; }
+ /// Set true if you want to mine even when the canary says you're on the wrong chain.
+ void setMineOnBadChain(bool _v) { m_mineOnBadChain = _v; }
+
+ /// @returns true if the canary says that the chain is bad.
+ bool isChainBad() const;
+ /// @returns true if the canary says that the client should be upgraded.
+ bool isUpgradeNeeded() const;
+
/// Start mining.
/// NOT thread-safe - call it & stopMining only from a single thread
void startMining() override;
/// Stop mining.
/// NOT thread-safe
- void stopMining() override { m_farm.stop(); }
+ void stopMining() override { m_wouldMine = false; rejigMining(); }
/// Are we mining now?
bool isMining() const override { return m_farm.isMining(); }
+ /// Are we mining now?
+ bool wouldMine() const override { return m_wouldMine; }
/// The hashrate...
uint64_t hashrate() const override;
/// Check the progress of the mining.
@@ -215,6 +229,8 @@ public:
void retryUnkonwn() { m_bq.retryAllUnknown(); }
/// Get a report of activity.
ActivityReport activityReport() { ActivityReport ret; std::swap(m_report, ret); return ret; }
+ /// Set a JSONRPC server to which we can report bad blocks.
+ void setSentinel(std::string const& _server) { m_sentinel = _server; }
protected:
/// InterfaceStub methods
@@ -251,6 +267,9 @@ private:
/// Called when Worker is exiting.
void doneWorking() override;
+ /// Called when wouldMine(), turboMining(), isChainBad(), forceMining(), pendingTransactions() have changed.
+ void rejigMining();
+
/// Magically called when the chain has changed. An import route is provided.
/// Called by either submitWork() or in our main thread through syncBlockQueue().
void onChainChanged(ImportRoute const& _ir);
@@ -280,6 +299,10 @@ private:
/// @returns true only if it's worth bothering to prep the mining block.
bool shouldServeWork() const { return m_bq.items().first == 0 && (isMining() || remoteActive()); }
+ /// Called when we have attempted to import a bad block.
+ /// @warning May be called from any thread.
+ void onBadBlock(Exception& _ex) const;
+
VersionChecker m_vc; ///< Dummy object to check & update the protocol version.
CanonBlockChain m_bc; ///< Maintains block database.
BlockQueue m_bq; ///< Maintains a list of incoming blocks not yet on the blockchain (to be imported).
@@ -304,8 +327,10 @@ private:
Handler m_tqReady;
Handler m_bqReady;
+ bool m_wouldMine = false; ///< True if we /should/ be mining.
bool m_turboMining = false; ///< Don't squander all of our time mining actually just sleeping.
bool m_forceMining = false; ///< Mine even when there are no transactions pending?
+ bool m_mineOnBadChain = false; ///< Mine even when the canary says it's a bad chain.
bool m_paranoia = false; ///< Should we be paranoid about our state?
mutable std::chrono::system_clock::time_point m_lastGarbageCollection;
@@ -319,6 +344,8 @@ private:
Mutex x_signalled;
std::atomic m_syncTransactionQueue = {false};
std::atomic m_syncBlockQueue = {false};
+
+ std::string m_sentinel;
};
}
diff --git a/libethereum/ClientBase.cpp b/libethereum/ClientBase.cpp
index 19f0fe737..38e92fcd7 100644
--- a/libethereum/ClientBase.cpp
+++ b/libethereum/ClientBase.cpp
@@ -186,8 +186,8 @@ LocalisedLogEntries ClientBase::logs(unsigned _watchId) const
LocalisedLogEntries ClientBase::logs(LogFilter const& _f) const
{
LocalisedLogEntries ret;
- unsigned begin = min(bc().number() + 1, (unsigned)_f.latest());
- unsigned end = min(bc().number(), min(begin, (unsigned)_f.earliest()));
+ unsigned begin = min(bc().number() + 1, (unsigned)numberFromHash(_f.latest()));
+ unsigned end = min(bc().number(), min(begin, (unsigned)numberFromHash(_f.earliest())));
// Handle pending transactions differently as they're not on the block chain.
if (begin > bc().number())
@@ -197,11 +197,10 @@ LocalisedLogEntries ClientBase::logs(LogFilter const& _f) const
{
// Might have a transaction that contains a matching log.
TransactionReceipt const& tr = temp.receipt(i);
- auto th = temp.pending()[i].sha3();
LogEntries le = _f.matches(tr);
if (le.size())
for (unsigned j = 0; j < le.size(); ++j)
- ret.insert(ret.begin(), LocalisedLogEntry(le[j], begin, th));
+ ret.insert(ret.begin(), LocalisedLogEntry(le[j]));
}
begin = bc().number();
}
@@ -216,20 +215,22 @@ LocalisedLogEntries ClientBase::logs(LogFilter const& _f) const
{
int total = 0;
auto h = bc().numberHash(n);
+ auto info = bc().info(h);
auto receipts = bc().receipts(h).receipts;
+ unsigned logIndex = 0;
for (size_t i = 0; i < receipts.size(); i++)
{
+ logIndex++;
TransactionReceipt receipt = receipts[i];
if (_f.matches(receipt.bloom()))
{
- auto info = bc().info(h);
auto th = transaction(info.hash(), i).sha3();
LogEntries le = _f.matches(receipt);
if (le.size())
{
total += le.size();
for (unsigned j = 0; j < le.size(); ++j)
- ret.insert(ret.begin(), LocalisedLogEntry(le[j], n, th));
+ ret.insert(ret.begin(), LocalisedLogEntry(le[j], info, th, i, logIndex));
}
}
@@ -328,6 +329,8 @@ LocalisedLogEntries ClientBase::checkWatch(unsigned _watchId)
BlockInfo ClientBase::blockInfo(h256 _hash) const
{
+ if (_hash == PendingBlockHash)
+ return preMine().info();
return BlockInfo(bc().block(_hash));
}
@@ -415,17 +418,16 @@ h256s ClientBase::pendingHashes() const
return h256s() + postMine().pendingHashes();
}
-
StateDiff ClientBase::diff(unsigned _txi, h256 _block) const
{
State st = asOf(_block);
- return st.fromPending(_txi).diff(st.fromPending(_txi + 1));
+ return st.fromPending(_txi).diff(st.fromPending(_txi + 1), true);
}
StateDiff ClientBase::diff(unsigned _txi, BlockNumber _block) const
{
State st = asOf(_block);
- return st.fromPending(_txi).diff(st.fromPending(_txi + 1));
+ return st.fromPending(_txi).diff(st.fromPending(_txi + 1), true);
}
Addresses ClientBase::addresses(BlockNumber _block) const
@@ -457,6 +459,24 @@ h256 ClientBase::hashFromNumber(BlockNumber _number) const
BlockNumber ClientBase::numberFromHash(h256 _blockHash) const
{
+ if (_blockHash == PendingBlockHash)
+ return bc().number() + 1;
+ else if (_blockHash == LatestBlockHash)
+ return bc().number();
+ else if (_blockHash == EarliestBlockHash)
+ return 0;
return bc().number(_blockHash);
}
+int ClientBase::compareBlockHashes(h256 _h1, h256 _h2) const
+{
+ BlockNumber n1 = numberFromHash(_h1);
+ BlockNumber n2 = numberFromHash(_h2);
+
+ if (n1 > n2) {
+ return 1;
+ } else if (n1 == n2) {
+ return 0;
+ }
+ return -1;
+}
diff --git a/libethereum/ClientBase.h b/libethereum/ClientBase.h
index 19bb2088d..8aa84101c 100644
--- a/libethereum/ClientBase.h
+++ b/libethereum/ClientBase.h
@@ -44,7 +44,7 @@ static const h256 PendingChangedFilter = u256(0);
static const h256 ChainChangedFilter = u256(1);
static const LogEntry SpecialLogEntry = LogEntry(Address(), h256s(), bytes());
-static const LocalisedLogEntry InitialChange(SpecialLogEntry, 0);
+static const LocalisedLogEntry InitialChange(SpecialLogEntry);
struct ClientWatch
{
@@ -118,6 +118,7 @@ public:
virtual h256 hashFromNumber(BlockNumber _number) const override;
virtual BlockNumber numberFromHash(h256 _blockHash) const override;
+ virtual int compareBlockHashes(h256 _h1, h256 _h2) const override;
virtual BlockInfo blockInfo(h256 _hash) const override;
virtual BlockDetails blockDetails(h256 _hash) const override;
virtual Transaction transaction(h256 _transactionHash) const override;
@@ -133,8 +134,8 @@ public:
virtual Transactions pending() const override;
virtual h256s pendingHashes() const override;
- ImportResult injectTransaction(bytes const& _rlp) override { prepareForTransaction(); return m_tq.import(_rlp); }
- ImportResult injectBlock(bytes const& _block);
+ virtual ImportResult injectTransaction(bytes const& _rlp) override { prepareForTransaction(); return m_tq.import(_rlp); }
+ virtual ImportResult injectBlock(bytes const& _block) override;
using Interface::diff;
virtual StateDiff diff(unsigned _txi, h256 _block) const override;
@@ -144,9 +145,6 @@ public:
virtual Addresses addresses(BlockNumber _block) const override;
virtual u256 gasLimitRemaining() const override;
- /// Set the coinbase address
- virtual void setAddress(Address _us) = 0;
-
/// Get the coinbase address
virtual Address address() const override;
@@ -155,6 +153,7 @@ public:
virtual void startMining() override { BOOST_THROW_EXCEPTION(InterfaceNotSupported("ClientBase::startMining")); }
virtual void stopMining() override { BOOST_THROW_EXCEPTION(InterfaceNotSupported("ClientBase::stopMining")); }
virtual bool isMining() const override { BOOST_THROW_EXCEPTION(InterfaceNotSupported("ClientBase::isMining")); }
+ virtual bool wouldMine() const override { BOOST_THROW_EXCEPTION(InterfaceNotSupported("ClientBase::wouldMine")); }
virtual uint64_t hashrate() const override { BOOST_THROW_EXCEPTION(InterfaceNotSupported("ClientBase::hashrate")); }
virtual MiningProgress miningProgress() const override { BOOST_THROW_EXCEPTION(InterfaceNotSupported("ClientBase::miningProgress")); }
virtual ProofOfWork::WorkPackage getWork() override { BOOST_THROW_EXCEPTION(InterfaceNotSupported("ClientBase::getWork")); }
@@ -178,6 +177,8 @@ protected:
// filters
mutable Mutex x_filtersWatches; ///< Our lock.
std::unordered_map m_filters; ///< The dictionary of filters that are active.
+ std::unordered_map m_specialFilters = std::unordered_map>{{PendingChangedFilter, {}}, {ChainChangedFilter, {}}};
+ ///< The dictionary of special filters and their additional data
std::map m_watches; ///< Each and every watch - these reference a filter.
};
diff --git a/libethereum/CommonNet.h b/libethereum/CommonNet.h
index 8cf2647cf..2eb2d77c8 100644
--- a/libethereum/CommonNet.h
+++ b/libethereum/CommonNet.h
@@ -38,9 +38,9 @@ namespace eth
#if ETH_DEBUG
static const unsigned c_maxHashes = 2048; ///< Maximum number of hashes BlockHashes will ever send.
-static const unsigned c_maxHashesAsk = 256; ///< Maximum number of hashes GetBlockHashes will ever ask for.
+static const unsigned c_maxHashesAsk = 2048; ///< Maximum number of hashes GetBlockHashes will ever ask for.
static const unsigned c_maxBlocks = 128; ///< Maximum number of blocks Blocks will ever send.
-static const unsigned c_maxBlocksAsk = 8; ///< Maximum number of blocks we ask to receive in Blocks (when using GetChain).
+static const unsigned c_maxBlocksAsk = 128; ///< Maximum number of blocks we ask to receive in Blocks (when using GetChain).
static const unsigned c_maxPayload = 262144; ///< Maximum size of packet for us to send.
#else
static const unsigned c_maxHashes = 2048; ///< Maximum number of hashes BlockHashes will ever send.
@@ -84,5 +84,12 @@ enum class Syncing
Done
};
+struct HashChainStatus
+{
+ unsigned total;
+ unsigned received;
+ bool estimated;
+};
+
}
}
diff --git a/libethereum/DownloadMan.cpp b/libethereum/DownloadMan.cpp
index 3e33f3eb5..5e68e3c49 100644
--- a/libethereum/DownloadMan.cpp
+++ b/libethereum/DownloadMan.cpp
@@ -80,7 +80,6 @@ HashDownloadSub::HashDownloadSub(HashDownloadMan& _man): m_man(&_man)
{
WriteGuard l(m_man->x_subs);
m_asked = RangeMask(m_man->m_chainStart, m_man->m_chainStart + m_man->m_chainCount);
- m_attempted = RangeMask(m_man->m_chainStart, m_man->m_chainStart + m_man->m_chainCount);
m_man->m_subs.insert(this);
}
@@ -98,7 +97,6 @@ void HashDownloadSub::resetFetch()
Guard l(m_fetch);
m_remaining = 0;
m_asked = RangeMask(m_man->m_chainStart, m_man->m_chainStart + m_man->m_chainCount);
- m_attempted = RangeMask(m_man->m_chainStart, m_man->m_chainStart + m_man->m_chainCount);
}
unsigned HashDownloadSub::nextFetch(unsigned _n)
@@ -110,10 +108,9 @@ unsigned HashDownloadSub::nextFetch(unsigned _n)
if (!m_man || m_man->chainEmpty())
return 0;
- m_asked = (~(m_man->taken() + m_attempted)).lowest(_n);
+ m_asked = (~(m_man->taken())).lowest(_n);
if (m_asked.empty())
- m_asked = (~(m_man->taken(true) + m_attempted)).lowest(_n);
- m_attempted += m_asked;
+ m_asked = (~(m_man->taken(true))).lowest(_n);
return *m_asked.begin();
}
diff --git a/libethereum/DownloadMan.h b/libethereum/DownloadMan.h
index 3e1a071c9..0c27e84ea 100644
--- a/libethereum/DownloadMan.h
+++ b/libethereum/DownloadMan.h
@@ -187,7 +187,6 @@ public:
bool askedContains(unsigned _i) const { Guard l(m_fetch); return m_asked.contains(_i); }
RangeMask const& asked() const { return m_asked; }
- RangeMask const& attemped() const { return m_attempted; }
private:
void resetFetch(); // Called by DownloadMan when we need to reset the download.
@@ -196,7 +195,6 @@ private:
mutable Mutex m_fetch;
unsigned m_remaining;
RangeMask m_asked;
- RangeMask m_attempted;
};
class HashDownloadMan
@@ -255,6 +253,11 @@ public:
return m_got.full();
}
+ unsigned gotCount() const
+ {
+ return m_got.size();
+ }
+
size_t chainSize() const { ReadGuard l(m_lock); return m_chainCount; }
size_t chainEmpty() const { ReadGuard l(m_lock); return m_chainCount == 0; }
void foreachSub(std::function const& _f) const { ReadGuard l(x_subs); for(auto i: m_subs) _f(*i); }
@@ -274,3 +277,4 @@ private:
}
}
+
diff --git a/libethereum/EthereumHost.cpp b/libethereum/EthereumHost.cpp
index 7c9b730ea..3f0999508 100644
--- a/libethereum/EthereumHost.cpp
+++ b/libethereum/EthereumHost.cpp
@@ -39,6 +39,7 @@ using namespace dev::eth;
using namespace p2p;
unsigned const EthereumHost::c_oldProtocolVersion = 60; //TODO: remove this once v61+ is common
+unsigned const c_chainReorgSize = 30000;
EthereumHost::EthereumHost(BlockChain const& _ch, TransactionQueue& _tq, BlockQueue& _bq, u256 _networkId):
HostCapability(),
@@ -50,6 +51,7 @@ EthereumHost::EthereumHost(BlockChain const& _ch, TransactionQueue& _tq, BlockQu
{
m_latestBlockSent = _ch.currentHash();
m_hashMan.reset(m_chain.number() + 1);
+ m_bqRoomAvailable = m_bq.onRoomAvailable([this](){ m_continueSync = true; });
}
EthereumHost::~EthereumHost()
@@ -91,7 +93,7 @@ void EthereumHost::doWork()
bool netChange = ensureInitialised();
auto h = m_chain.currentHash();
// If we've finished our initial sync (including getting all the blocks into the chain so as to reduce invalid transactions), start trading transactions & blocks
- if (isSyncing() && m_chain.isKnown(m_latestBlockSent))
+ if (!isSyncing() && m_chain.isKnown(m_latestBlockSent))
{
if (m_newTransactions)
{
@@ -105,6 +107,12 @@ void EthereumHost::doWork()
}
}
+ if (m_continueSync)
+ {
+ m_continueSync = false;
+ continueSync();
+ }
+
foreachPeer([](EthereumPeer* _p) { _p->tick(); });
// return netChange;
@@ -144,6 +152,7 @@ void EthereumHost::maintainTransactions()
RLPStream ts;
_p->prep(ts, TransactionsPacket, n).appendRaw(b, n);
_p->sealAndSend(ts);
+ cnote << "Sent" << n << "transactions to " << _p->session()->info().clientVersion;
}
_p->m_requireTransactions = false;
});
@@ -237,7 +246,7 @@ void EthereumHost::maintainBlocks(h256 const& _currentHash)
void EthereumHost::onPeerStatus(EthereumPeer* _peer)
{
- Guard l(x_sync);
+ RecursiveGuard l(x_sync);
if (_peer->m_genesisHash != m_chain.genesisHash())
_peer->disable("Invalid genesis hash");
else if (_peer->m_protocolVersion != protocolVersion() && _peer->m_protocolVersion != c_oldProtocolVersion)
@@ -250,36 +259,43 @@ void EthereumHost::onPeerStatus(EthereumPeer* _peer)
_peer->disable("Peer banned for previous bad behaviour.");
else
{
- if (_peer->m_protocolVersion != protocolVersion())
- estimatePeerHashes(_peer);
- else if (_peer->m_latestBlockNumber > m_chain.number())
- _peer->m_expectedHashes = (unsigned)_peer->m_latestBlockNumber - m_chain.number() + 1000;
+ unsigned estimatedHashes = estimateHashes();
+ if (_peer->m_protocolVersion == protocolVersion())
+ {
+ if (_peer->m_latestBlockNumber > m_chain.number())
+ _peer->m_expectedHashes = (unsigned)_peer->m_latestBlockNumber - m_chain.number();
+ if (_peer->m_expectedHashes > estimatedHashes)
+ _peer->disable("Too many hashes");
+ else if (m_needSyncHashes && m_hashMan.chainSize() < _peer->m_expectedHashes)
+ m_hashMan.resetToRange(m_chain.number() + 1, _peer->m_expectedHashes);
+ }
else
- _peer->m_expectedHashes = 1000;
- if (m_hashMan.chainSize() < _peer->m_expectedHashes)
- m_hashMan.resetToRange(m_chain.number() + 1, _peer->m_expectedHashes);
+ _peer->m_expectedHashes = estimatedHashes;
continueSync(_peer);
}
}
-void EthereumHost::estimatePeerHashes(EthereumPeer* _peer)
+unsigned EthereumHost::estimateHashes()
{
BlockInfo block = m_chain.info();
time_t lastBlockTime = (block.hash() == m_chain.genesisHash()) ? 1428192000 : (time_t)block.timestamp;
time_t now = time(0);
- unsigned blockCount = 1000;
+ unsigned blockCount = c_chainReorgSize;
if (lastBlockTime > now)
clog(NetWarn) << "Clock skew? Latest block is in the future";
else
blockCount += (now - lastBlockTime) / (unsigned)c_durationLimit;
clog(NetAllDetail) << "Estimated hashes: " << blockCount;
- _peer->m_expectedHashes = blockCount;
+ return blockCount;
}
void EthereumHost::onPeerHashes(EthereumPeer* _peer, h256s const& _hashes)
{
- Guard l(x_sync);
- assert(_peer->m_asking == Asking::Nothing);
+ RecursiveGuard l(x_sync);
+ if (_peer->m_syncHashNumber > 0)
+ _peer->m_syncHashNumber += _hashes.size();
+
+ _peer->setAsking(Asking::Nothing);
onPeerHashes(_peer, _hashes, false);
}
@@ -287,13 +303,23 @@ void EthereumHost::onPeerHashes(EthereumPeer* _peer, h256s const& _hashes, bool
{
if (_hashes.empty())
{
- onPeerDoneHashes(_peer, true);
+ _peer->m_hashSub.doneFetch();
+ continueSync();
return;
}
+
+ bool syncByNumber = _peer->m_syncHashNumber;
+ if (!syncByNumber && !_complete && _peer->m_syncHash != m_syncingLatestHash)
+ {
+ // Obsolete hashes, discard
+ continueSync(_peer);
+ return;
+ }
+
unsigned knowns = 0;
unsigned unknowns = 0;
h256s neededBlocks;
- bool syncByNumber = !m_syncingLatestHash;
+ unsigned firstNumber = _peer->m_syncHashNumber - _hashes.size();
for (unsigned i = 0; i < _hashes.size(); ++i)
{
_peer->addRating(1);
@@ -323,8 +349,11 @@ void EthereumHost::onPeerHashes(EthereumPeer* _peer, h256s const& _hashes, bool
}
else
knowns++;
+
if (!syncByNumber)
m_syncingLatestHash = h;
+ else
+ _peer->m_hashSub.noteHash(firstNumber + i, 1);
}
if (syncByNumber)
{
@@ -339,7 +368,7 @@ void EthereumHost::onPeerHashes(EthereumPeer* _peer, h256s const& _hashes, bool
if (_complete)
{
m_needSyncBlocks = true;
- continueSync(_peer);
+ continueSync();
}
else if (syncByNumber && m_hashMan.isComplete())
{
@@ -370,14 +399,15 @@ void EthereumHost::onPeerDoneHashes(EthereumPeer* _peer, bool _localChain)
{
m_man.resetToChain(m_hashes);
m_hashes.clear();
+ m_hashMan.reset(m_chain.number() + 1);
}
continueSync();
}
void EthereumHost::onPeerBlocks(EthereumPeer* _peer, RLP const& _r)
{
- Guard l(x_sync);
- assert(_peer->m_asking == Asking::Nothing);
+ RecursiveGuard l(x_sync);
+ _peer->setAsking(Asking::Nothing);
unsigned itemCount = _r.itemCount();
clog(NetMessageSummary) << "Blocks (" << dec << itemCount << "entries)" << (itemCount ? "" : ": NoMoreBlocks");
@@ -386,6 +416,7 @@ void EthereumHost::onPeerBlocks(EthereumPeer* _peer, RLP const& _r)
// Got to this peer's latest block - just give up.
clog(NetNote) << "Finishing blocks fetch...";
// NOTE: need to notify of giving up on chain-hashes, too, altering state as necessary.
+ _peer->m_sub.doneFetch();
_peer->setIdle();
return;
}
@@ -437,35 +468,26 @@ void EthereumHost::onPeerBlocks(EthereumPeer* _peer, RLP const& _r)
}
clog(NetMessageSummary) << dec << success << "imported OK," << unknown << "with unknown parents," << future << "with future timestamps," << got << " already known," << repeated << " repeats received.";
-
- if (m_man.isComplete() && !m_needSyncHashes)
- {
- // Done our chain-get.
- m_needSyncBlocks = false;
- clog(NetNote) << "Chain download complete.";
- // 1/100th for each useful block hash.
- _peer->addRating(m_man.chainSize() / 100); //TODO: what about other peers?
- m_man.reset();
- }
continueSync(_peer);
}
void EthereumHost::onPeerNewHashes(EthereumPeer* _peer, h256s const& _hashes)
{
- Guard l(x_sync);
- if (isSyncing_UNSAFE())
+ RecursiveGuard l(x_sync);
+ if (isSyncing_UNSAFE() || _peer->isConversing())
{
clog(NetMessageSummary) << "Ignoring new hashes since we're already downloading.";
return;
}
clog(NetNote) << "New block hash discovered: syncing without help.";
+ _peer->m_syncHashNumber = 0;
onPeerHashes(_peer, _hashes, true);
}
void EthereumHost::onPeerNewBlock(EthereumPeer* _peer, RLP const& _r)
{
- Guard l(x_sync);
- if (isSyncing_UNSAFE())
+ RecursiveGuard l(x_sync);
+ if (isSyncing_UNSAFE() || _peer->isConversing())
{
clog(NetMessageSummary) << "Ignoring new blocks since we're already downloading.";
return;
@@ -519,7 +541,7 @@ void EthereumHost::onPeerNewBlock(EthereumPeer* _peer, RLP const& _r)
_peer->m_knownBlocks.insert(h);
if (sync)
- continueSync(_peer);
+ continueSync();
}
}
@@ -551,9 +573,21 @@ void EthereumHost::onPeerTransactions(EthereumPeer* _peer, RLP const& _r)
}
}
+void EthereumHost::onPeerAborting(EthereumPeer* _peer)
+{
+ RecursiveGuard l(x_sync);
+ if (_peer->isConversing())
+ {
+ _peer->setIdle();
+ if (_peer->isCriticalSyncing())
+ _peer->setRude();
+ continueSync();
+ }
+}
+
void EthereumHost::continueSync()
{
- clog(NetAllDetail) << "Getting help with downloading hashes and blocks";
+ clog(NetAllDetail) << "Continuing sync for all peers";
foreachPeer([&](EthereumPeer* _p)
{
if (_p->m_asking == Asking::Nothing)
@@ -564,22 +598,43 @@ void EthereumHost::continueSync()
void EthereumHost::continueSync(EthereumPeer* _peer)
{
assert(_peer->m_asking == Asking::Nothing);
- bool otherPeerSync = false;
- if (m_needSyncHashes && peerShouldGrabChain(_peer))
+ bool otherPeerV60Sync = false;
+ bool otherPeerV61Sync = false;
+ if (m_needSyncHashes)
{
+ if (!peerShouldGrabChain(_peer))
+ {
+ _peer->setIdle();
+ return;
+ }
+
foreachPeer([&](EthereumPeer* _p)
{
- if (_p != _peer && _p->m_asking == Asking::Hashes && _p->m_protocolVersion != protocolVersion())
- otherPeerSync = true; // Already have a peer downloading hash chain with old protocol, do nothing
+ if (_p != _peer && _p->m_asking == Asking::Hashes)
+ {
+ if (_p->m_protocolVersion != protocolVersion())
+ otherPeerV60Sync = true; // Already have a peer downloading hash chain with old protocol, do nothing
+ else
+ otherPeerV61Sync = true; // Already have a peer downloading hash chain with V61+ protocol, join if supported
+ }
});
- if (otherPeerSync)
+ if (otherPeerV60Sync && !m_hashes.empty())
+ {
+ /// Downloading from other peer with v60 protocol, nothing else we can do
+ _peer->setIdle();
+ return;
+ }
+ if (otherPeerV61Sync && _peer->m_protocolVersion != protocolVersion())
{
- /// Downloading from other peer with v60 protocol, nothing ese we can do
+ /// Downloading from other peer with v61+ protocol which this peer does not support,
_peer->setIdle();
return;
}
- if (_peer->m_protocolVersion == protocolVersion() && !m_syncingLatestHash)
+ if (_peer->m_protocolVersion == protocolVersion() && !m_hashMan.isComplete())
+ {
+ m_syncingV61 = true;
_peer->requestHashes(); /// v61+ and not catching up to a particular hash
+ }
else
{
// Restart/continue sync in single peer mode
@@ -588,17 +643,63 @@ void EthereumHost::continueSync(EthereumPeer* _peer)
m_syncingLatestHash =_peer->m_latestHash;
m_syncingTotalDifficulty = _peer->m_totalDifficulty;
}
- _peer->requestHashes(m_syncingLatestHash);
+ if (_peer->m_totalDifficulty >= m_syncingTotalDifficulty)
+ {
+ _peer->requestHashes(m_syncingLatestHash);
+ m_syncingV61 = false;
+ m_estimatedHashes = _peer->m_expectedHashes;
+ }
+ else
+ _peer->setIdle();
+ }
+ }
+ else if (m_needSyncBlocks)
+ {
+ if (m_man.isComplete())
+ {
+ // Done our chain-get.
+ m_needSyncBlocks = false;
+ clog(NetNote) << "Chain download complete.";
+ // 1/100th for each useful block hash.
+ _peer->addRating(m_man.chainSize() / 100); //TODO: what about other peers?
+ m_man.reset();
+ _peer->setIdle();
+ return;
+ }
+ else if (peerCanHelp(_peer))
+ {
+ // Check block queue status
+ if (m_bq.unknownFull())
+ {
+ clog(NetWarn) << "Too many unknown blocks, restarting sync";
+ m_bq.clear();
+ reset();
+ continueSync();
+ }
+ else if (m_bq.knownFull())
+ {
+ clog(NetAllDetail) << "Waiting for block queue before downloading blocks";
+ _peer->setIdle();
+ }
+ else
+ _peer->requestBlocks();
}
}
- else if (m_needSyncBlocks && peerShouldGrabBlocks(_peer)) // Check if this peer can help with downloading blocks
- _peer->requestBlocks();
else
_peer->setIdle();
}
+bool EthereumHost::peerCanHelp(EthereumPeer* _peer) const
+{
+ (void)_peer;
+ return true;
+}
+
bool EthereumHost::peerShouldGrabBlocks(EthereumPeer* _peer) const
{
+ // this is only good for deciding whether to go ahead and grab a particular peer's hash chain,
+ // yet it's being used in determining whether to allow a peer help with downloading an existing
+ // chain of blocks.
auto td = _peer->m_totalDifficulty;
auto lh = m_syncingLatestHash;
auto ctd = m_chain.details().totalDifficulty;
@@ -611,6 +712,10 @@ bool EthereumHost::peerShouldGrabBlocks(EthereumPeer* _peer) const
bool EthereumHost::peerShouldGrabChain(EthereumPeer* _peer) const
{
+ // Early exit if this peer has proved unreliable.
+ if (_peer->isRude())
+ return false;
+
h256 c = m_chain.currentHash();
unsigned n = m_chain.number();
u256 td = m_chain.details().totalDifficulty;
@@ -630,13 +735,14 @@ bool EthereumHost::peerShouldGrabChain(EthereumPeer* _peer) const
bool EthereumHost::isSyncing_UNSAFE() const
{
- /// We need actual peer information here to handle the case when we are the first ever peer on the network to mine.
- /// I.e. on a new private network the first node mining has noone to sync with and should start block propogation immediately.
- bool syncing = false;
- foreachPeer([&](EthereumPeer* _p)
- {
- if (_p->m_asking != Asking::Nothing)
- syncing = true;
- });
- return syncing;
+ return m_needSyncBlocks || m_needSyncHashes;
}
+
+HashChainStatus EthereumHost::status()
+{
+ RecursiveGuard l(x_sync);
+ if (m_syncingV61)
+ return HashChainStatus { static_cast(m_hashMan.chainSize()), static_cast(m_hashMan.gotCount()), false };
+ return HashChainStatus { m_estimatedHashes > 0 ? m_estimatedHashes - c_chainReorgSize : 0, static_cast(m_hashes.size()), m_estimatedHashes > 0 };
+}
+
diff --git a/libethereum/EthereumHost.h b/libethereum/EthereumHost.h
index 8ca815a17..17684fea1 100644
--- a/libethereum/EthereumHost.h
+++ b/libethereum/EthereumHost.h
@@ -70,8 +70,8 @@ public:
void reset();
DownloadMan const& downloadMan() const { return m_man; }
- bool isSyncing() const { Guard l(x_sync); return isSyncing_UNSAFE(); }
- bool isBanned(p2p::NodeId _id) const { return !!m_banned.count(_id); }
+ bool isSyncing() const { RecursiveGuard l(x_sync); return isSyncing_UNSAFE(); }
+ bool isBanned(p2p::NodeId const& _id) const { return !!m_banned.count(_id); }
void noteNewTransactions() { m_newTransactions = true; }
void noteNewBlocks() { m_newBlocks = true; }
@@ -82,10 +82,12 @@ public:
void onPeerNewHashes(EthereumPeer* _peer, h256s const& _hashes); ///< Called by peer once it has new hashes
void onPeerHashes(EthereumPeer* _peer, h256s const& _hashes); ///< Called by peer once it has another sequential block of hashes during sync
void onPeerTransactions(EthereumPeer* _peer, RLP const& _r); ///< Called by peer when it has new transactions
+ void onPeerAborting(EthereumPeer* _peer); ///< Called by peer when it is disconnecting
DownloadMan& downloadMan() { return m_man; }
HashDownloadMan& hashDownloadMan() { return m_hashMan; }
BlockChain const& chain() { return m_chain; }
+ HashChainStatus status();
static unsigned const c_oldProtocolVersion;
@@ -122,11 +124,14 @@ private:
void onPeerHashes(EthereumPeer* _peer, h256s const& _hashes, bool _complete);
bool peerShouldGrabBlocks(EthereumPeer* _peer) const;
bool peerShouldGrabChain(EthereumPeer* _peer) const;
+ bool peerCanHelp(EthereumPeer* _peer) const;
+ unsigned estimateHashes();
void estimatePeerHashes(EthereumPeer* _peer);
BlockChain const& m_chain;
TransactionQueue& m_tq; ///< Maintains a list of incoming transactions not yet in a block on the blockchain.
BlockQueue& m_bq; ///< Maintains a list of incoming blocks not yet on the blockchain (to be imported).
+ Handler m_bqRoomAvailable;
u256 m_networkId;
@@ -141,12 +146,15 @@ private:
bool m_newTransactions = false;
bool m_newBlocks = false;
- mutable Mutex x_sync;
+ mutable RecursiveMutex x_sync;
bool m_needSyncHashes = true; ///< Indicates if need to downlad hashes
bool m_needSyncBlocks = true; ///< Indicates if we still need to download some blocks
h256 m_syncingLatestHash; ///< Latest block's hash, as of the current sync.
u256 m_syncingTotalDifficulty; ///< Latest block's total difficulty, as of the current sync.
- h256s m_hashes; ///< List of hashes with unknown block numbers. Used for v60 chain downloading and catching up to a particular unknown
+ h256s m_hashes; ///< List of hashes with unknown block numbers. Used for PV60 chain downloading and catching up to a particular unknown
+ unsigned m_estimatedHashes = 0; ///< Number of estimated hashes for the last peer over PV60. Used for status reporting only.
+ bool m_syncingV61 = false; ///< True if recent activity was over pv61+. Used for status reporting only.
+ bool m_continueSync = false; ///< True when the block queue has processed a block; we should restart grabbing blocks.
};
}
diff --git a/libethereum/EthereumPeer.cpp b/libethereum/EthereumPeer.cpp
index a332e5b93..7a30f1ad9 100644
--- a/libethereum/EthereumPeer.cpp
+++ b/libethereum/EthereumPeer.cpp
@@ -25,6 +25,7 @@
#include
#include