diff --git a/alethzero/Main.ui b/alethzero/Main.ui
index e0852142a..b5e22409d 100644
--- a/alethzero/Main.ui
+++ b/alethzero/Main.ui
@@ -185,6 +185,7 @@
+
diff --git a/alethzero/MainWin.cpp b/alethzero/MainWin.cpp
index 9f630f37d..191db6eb4 100644
--- a/alethzero/MainWin.cpp
+++ b/alethzero/MainWin.cpp
@@ -212,7 +212,7 @@ Main::Main(QWidget *parent) :
m_server->setIdentities(keysAsVector(owned()));
m_server->StartListening();
- WebPage* webPage= new WebPage(this);
+ WebPage* webPage = new WebPage(this);
m_webPage = webPage;
connect(webPage, &WebPage::consoleMessage, [this](QString const& _msg) { Main::addConsoleMessage(_msg, QString()); });
ui->webView->setPage(m_webPage);
@@ -368,6 +368,11 @@ Address Main::getCurrencies() const
return abiOut(ethereum()->call(c_newConfig, abiIn("lookup(uint256)", (u256)3)).output);
}
+bool Main::doConfirm()
+{
+ return ui->confirm->isChecked();
+}
+
void Main::installNameRegWatch()
{
uninstallWatch(m_nameRegFilter);
@@ -1151,7 +1156,7 @@ void Main::refreshBlockCount()
{
auto d = ethereum()->blockChain().details();
BlockQueueStatus b = ethereum()->blockQueueStatus();
- ui->chainStatus->setText(QString("%3 ready %4 future %5 unknown %6 bad %1 #%2").arg(m_privateChain.size() ? "[" + m_privateChain + "] " : "testnet").arg(d.number).arg(b.ready).arg(b.future).arg(b.unknown).arg(b.bad));
+ ui->chainStatus->setText(QString("%3 ready %4 verifying %5 unverified %6 future %7 unknown %8 bad %1 #%2").arg(m_privateChain.size() ? "[" + m_privateChain + "] " : "testnet").arg(d.number).arg(b.verified).arg(b.verifying).arg(b.unverified).arg(b.future).arg(b.unknown).arg(b.bad));
}
void Main::on_turboMining_triggered()
diff --git a/alethzero/MainWin.h b/alethzero/MainWin.h
index d9075d178..9f2082dd0 100644
--- a/alethzero/MainWin.h
+++ b/alethzero/MainWin.h
@@ -94,6 +94,7 @@ public:
dev::u256 gasPrice() const { return 10 * dev::eth::szabo; }
dev::eth::KeyManager& keyManager() override { return m_keyManager; }
+ bool doConfirm();
dev::Secret retrieveSecret(dev::Address const& _a) const override;
diff --git a/alethzero/OurWebThreeStubServer.cpp b/alethzero/OurWebThreeStubServer.cpp
index d3ee4f41b..7e9836818 100644
--- a/alethzero/OurWebThreeStubServer.cpp
+++ b/alethzero/OurWebThreeStubServer.cpp
@@ -146,6 +146,9 @@ AddressHash OurAccountHolder::realAccounts() const
bool OurAccountHolder::validateTransaction(TransactionSkeleton const& _t, bool _toProxy)
{
+ if (!m_main->doConfirm())
+ return true;
+
if (_t.creation)
{
// show notice concerning the creation code. TODO: this needs entering into natspec.
diff --git a/test/libdevcrypto/TrieHash.cpp b/libdevcrypto/TrieHash.cpp
similarity index 89%
rename from test/libdevcrypto/TrieHash.cpp
rename to libdevcrypto/TrieHash.cpp
index ccf12c162..0b02ce77f 100644
--- a/test/libdevcrypto/TrieHash.cpp
+++ b/libdevcrypto/TrieHash.cpp
@@ -20,8 +20,8 @@
*/
#include "TrieHash.h"
-
#include
+#include // @TODO replace ASAP!
#include
#include
using namespace std;
@@ -197,4 +197,35 @@ h256 hash256(u256Map const& _s)
return sha3(s.out());
}
+/*h256 orderedTrieRoot(std::vector const& _data)
+{
+ StringMap m;
+ unsigned j = 0;
+ for (auto i: _data)
+ m[asString(rlp(j++))] = asString(i);
+ return hash256(m);
+}*/
+
+h256 orderedTrieRoot(std::vector const& _data)
+{
+ MemoryDB db;
+ GenericTrieDB t(&db);
+ t.init();
+ unsigned j = 0;
+ for (auto i: _data)
+ t.insert(rlp(j++), i.toBytes());
+ return t.root();
+}
+
+h256 orderedTrieRoot(std::vector const& _data)
+{
+ MemoryDB db;
+ GenericTrieDB t(&db);
+ t.init();
+ unsigned j = 0;
+ for (auto i: _data)
+ t.insert(rlp(j++), i);
+ return t.root();
+}
+
}
diff --git a/test/libdevcrypto/TrieHash.h b/libdevcrypto/TrieHash.h
similarity index 66%
rename from test/libdevcrypto/TrieHash.h
rename to libdevcrypto/TrieHash.h
index be1d84480..b0588fc38 100644
--- a/test/libdevcrypto/TrieHash.h
+++ b/libdevcrypto/TrieHash.h
@@ -31,4 +31,19 @@ bytes rlp256(StringMap const& _s);
h256 hash256(StringMap const& _s);
h256 hash256(u256Map const& _s);
+/*h256 orderedTrieRoot(std::vector const& _data);
+
+template inline h256 trieRootOver(unsigned _itemCount, T const& _getKey, U const& _getValue)
+{
+ StringMap m;
+ for (unsigned i = 0; i < _itemCount; ++i)
+ m[asString(_getKey(i))] = asString(_getValue(i));
+ return hash256(m);
+}*/
+
+using bytesMap = std::unordered_map;
+
+h256 orderedTrieRoot(std::vector const& _data);
+h256 orderedTrieRoot(std::vector const& _data);
+
}
diff --git a/libethcore/BlockInfo.cpp b/libethcore/BlockInfo.cpp
index e70b16753..00a5108c4 100644
--- a/libethcore/BlockInfo.cpp
+++ b/libethcore/BlockInfo.cpp
@@ -22,6 +22,7 @@
#include
#include
#include
+#include
#include
#include
#include "EthashAux.h"
@@ -173,6 +174,9 @@ void BlockInfo::populateFromHeader(RLP const& _header, Strictness _s, h256 const
void BlockInfo::populate(bytesConstRef _block, Strictness _s, h256 const& _h)
{
RLP root(_block);
+ if (!root.isList())
+ BOOST_THROW_EXCEPTION(InvalidBlockFormat() << errinfo_comment("block needs to be a list") << BadFieldError(0, _block.toString()));
+
RLP header = root[0];
if (!header.isList())
@@ -185,6 +189,8 @@ void BlockInfo::populate(bytesConstRef _block, Strictness _s, h256 const& _h)
BOOST_THROW_EXCEPTION(InvalidBlockFormat() << errinfo_comment("block uncles need to be a list") << BadFieldError(2, root[2].data().toString()));
}
+struct BlockInfoDiagnosticsChannel: public LogChannel { static const char* name() { return EthBlue "▧" EthWhite " ◌"; } static const int verbosity = 9; };
+
template h256 trieRootOver(unsigned _itemCount, T const& _getKey, U const& _getValue)
{
MemoryDB db;
@@ -195,17 +201,41 @@ template h256 trieRootOver(unsigned _itemCount, T const& _get
return t.root();
}
-struct BlockInfoDiagnosticsChannel: public LogChannel { static const char* name() { return EthBlue "▧" EthWhite " ◌"; } static const int verbosity = 9; };
-
void BlockInfo::verifyInternals(bytesConstRef _block) const
{
RLP root(_block);
auto txList = root[1];
auto expectedRoot = trieRootOver(txList.itemCount(), [&](unsigned i){ return rlp(i); }, [&](unsigned i){ return txList[i].data(); });
+
clog(BlockInfoDiagnosticsChannel) << "Expected trie root:" << toString(expectedRoot);
if (transactionsRoot != expectedRoot)
+ {
+ MemoryDB tm;
+ GenericTrieDB transactionsTrie(&tm);
+ transactionsTrie.init();
+
+ vector txs;
+
+ for (unsigned i = 0; i < txList.itemCount(); ++i)
+ {
+ RLPStream k;
+ k << i;
+
+ transactionsTrie.insert(&k.out(), txList[i].data());
+
+ txs.push_back(txList[i].data());
+ cdebug << toHex(k.out()) << toHex(txList[i].data());
+ }
+ cdebug << "trieRootOver" << expectedRoot;
+ cdebug << "orderedTrieRoot" << orderedTrieRoot(txs);
+ cdebug << "TrieDB" << transactionsTrie.root();
+ cdebug << "Contents:";
+ for (auto const& t: txs)
+ cdebug << toHex(t);
+
BOOST_THROW_EXCEPTION(InvalidTransactionsHash() << HashMismatchError(expectedRoot, transactionsRoot));
+ }
clog(BlockInfoDiagnosticsChannel) << "Expected uncle hash:" << toString(sha3(root[2].data()));
if (sha3Uncles != sha3(root[2].data()))
BOOST_THROW_EXCEPTION(InvalidUnclesHash());
diff --git a/libethereum/BlockChain.cpp b/libethereum/BlockChain.cpp
index da0e56247..cbde3f14e 100644
--- a/libethereum/BlockChain.cpp
+++ b/libethereum/BlockChain.cpp
@@ -199,7 +199,7 @@ void BlockChain::close()
#define IGNORE_EXCEPTIONS(X) try { X; } catch (...) {}
-void BlockChain::rebuild(std::string const& _path, std::function const& _progress)
+void BlockChain::rebuild(std::string const& _path, std::function const& _progress, bool _prepPoW)
{
std::string path = _path.empty() ? Defaults::get()->m_dbPath : _path;
@@ -252,7 +252,8 @@ void BlockChain::rebuild(std::string const& _path, std::function(h256(u256(d)), m_blockHashes, x_blockHashes, NullBlockHash, oldExtrasDB).value);
BlockInfo bi(b);
- ProofOfWork::prep(bi);
+ if (_prepPoW)
+ ProofOfWork::prep(bi);
if (bi.parentHash != lastHash)
{
@@ -306,7 +307,7 @@ tuple BlockChain::sync(BlockQueue& _bq, OverlayDB const& _st
{
// _bq.tick(*this);
- vector blocks;
+ vector> blocks;
_bq.drain(blocks, _max);
h256s fresh;
@@ -316,7 +317,7 @@ tuple BlockChain::sync(BlockQueue& _bq, OverlayDB const& _st
{
try
{
- auto r = import(block, _stateDB);
+ auto r = import(block.first, block.second, _stateDB);
fresh += r.first;
dead += r.second;
}
@@ -325,14 +326,14 @@ tuple BlockChain::sync(BlockQueue& _bq, OverlayDB const& _st
cwarn << "ODD: Import queue contains block with unknown parent." << LogTag::Error << boost::current_exception_diagnostic_information();
// NOTE: don't reimport since the queue should guarantee everything in the right order.
// Can't continue - chain bad.
- badBlocks.push_back(BlockInfo::headerHash(block));
+ badBlocks.push_back(block.first.hash());
}
catch (Exception const& _e)
{
cnote << "Exception while importing block. Someone (Jeff? That you?) seems to be giving us dodgy blocks!" << LogTag::Error << diagnostic_information(_e);
// NOTE: don't reimport since the queue should guarantee everything in the right order.
// Can't continue - chain bad.
- badBlocks.push_back(BlockInfo::headerHash(block));
+ badBlocks.push_back(block.first.hash());
}
}
return make_tuple(fresh, dead, _bq.doneDrain(badBlocks));
@@ -364,18 +365,6 @@ pair BlockChain::attemptImport(bytes const& _block, O
ImportRoute BlockChain::import(bytes const& _block, OverlayDB const& _db, ImportRequirements::value _ir)
{
- //@tidy This is a behemoth of a method - could do to be split into a few smaller ones.
-
-#if ETH_TIMED_IMPORTS
- boost::timer total;
- double preliminaryChecks;
- double enactment;
- double collation;
- double writing;
- double checkBest;
- boost::timer t;
-#endif
-
// VERIFY: populates from the block and checks the block is internally coherent.
BlockInfo bi;
@@ -383,11 +372,6 @@ ImportRoute BlockChain::import(bytes const& _block, OverlayDB const& _db, Import
try
#endif
{
- RLP blockRLP(_block);
-
- if (!blockRLP.isList())
- BOOST_THROW_EXCEPTION(InvalidBlockFormat() << errinfo_comment("block header needs to be a list") << BadFieldError(0, blockRLP.data().toString()));
-
bi.populate(&_block);
bi.verifyInternals(&_block);
}
@@ -400,29 +384,46 @@ ImportRoute BlockChain::import(bytes const& _block, OverlayDB const& _db, Import
}
#endif
+ return import(bi, _block, _db, _ir);
+}
+
+ImportRoute BlockChain::import(BlockInfo const& _bi, bytes const& _block, OverlayDB const& _db, ImportRequirements::value _ir)
+{
+ //@tidy This is a behemoth of a method - could do to be split into a few smaller ones.
+
+#if ETH_TIMED_IMPORTS
+ boost::timer total;
+ double preliminaryChecks;
+ double enactment;
+ double collation;
+ double writing;
+ double checkBest;
+ boost::timer t;
+#endif
+
// Check block doesn't already exist first!
- if (isKnown(bi.hash()) && (_ir & ImportRequirements::DontHave))
+ if (isKnown(_bi.hash()) && (_ir & ImportRequirements::DontHave))
{
- clog(BlockChainNote) << bi.hash() << ": Not new.";
+ clog(BlockChainNote) << _bi.hash() << ": Not new.";
BOOST_THROW_EXCEPTION(AlreadyHaveBlock());
}
// Work out its number as the parent's number + 1
- if (!isKnown(bi.parentHash))
+ if (!isKnown(_bi.parentHash))
{
- clog(BlockChainNote) << bi.hash() << ": Unknown parent " << bi.parentHash;
+ clog(BlockChainNote) << _bi.hash() << ": Unknown parent " << _bi.parentHash;
// We don't know the parent (yet) - discard for now. It'll get resent to us if we find out about its ancestry later on.
BOOST_THROW_EXCEPTION(UnknownParent());
}
- auto pd = details(bi.parentHash);
+ auto pd = details(_bi.parentHash);
if (!pd)
{
auto pdata = pd.rlp();
clog(BlockChainDebug) << "Details is returning false despite block known:" << RLP(pdata);
- auto parentBlock = block(bi.parentHash);
- clog(BlockChainDebug) << "isKnown:" << isKnown(bi.parentHash);
- clog(BlockChainDebug) << "last/number:" << m_lastBlockNumber << m_lastBlockHash << bi.number;
+ auto parentBlock = block(_bi.parentHash);
+ clog(BlockChainDebug) << "isKnown:" << isKnown(_bi.parentHash);
+ clog(BlockChainDebug) << "last/number:" << m_lastBlockNumber << m_lastBlockHash << _bi.number;
clog(BlockChainDebug) << "Block:" << BlockInfo(parentBlock);
clog(BlockChainDebug) << "RLP:" << RLP(parentBlock);
clog(BlockChainDebug) << "DATABASE CORRUPTION: CRITICAL FAILURE";
@@ -430,14 +431,14 @@ ImportRoute BlockChain::import(bytes const& _block, OverlayDB const& _db, Import
}
// Check it's not crazy
- if (bi.timestamp > (u256)time(0))
+ if (_bi.timestamp > (u256)time(0))
{
- clog(BlockChainChat) << bi.hash() << ": Future time " << bi.timestamp << " (now at " << time(0) << ")";
+ clog(BlockChainChat) << _bi.hash() << ": Future time " << _bi.timestamp << " (now at " << time(0) << ")";
// Block has a timestamp in the future. This is no good.
BOOST_THROW_EXCEPTION(FutureTime());
}
- clog(BlockChainChat) << "Attempting import of " << bi.hash() << "...";
+ clog(BlockChainChat) << "Attempting import of " << _bi.hash() << "...";
#if ETH_TIMED_IMPORTS
preliminaryChecks = t.elapsed();
@@ -457,7 +458,7 @@ ImportRoute BlockChain::import(bytes const& _block, OverlayDB const& _db, Import
// Check transactions are valid and that they result in a state equivalent to our state_root.
// Get total difficulty increase and update state, checking it.
State s(_db);
- auto tdIncrease = s.enactOn(&_block, bi, *this, _ir);
+ auto tdIncrease = s.enactOn(&_block, _bi, *this, _ir);
BlockLogBlooms blb;
BlockReceipts br;
@@ -493,22 +494,22 @@ ImportRoute BlockChain::import(bytes const& _block, OverlayDB const& _db, Import
// together with an "ensureCachedWithUpdatableLock(l)" method.
// This is safe in practice since the caches don't get flushed nearly often enough to be
// done here.
- details(bi.parentHash);
+ details(_bi.parentHash);
DEV_WRITE_GUARDED(x_details)
- m_details[bi.parentHash].children.push_back(bi.hash());
+ m_details[_bi.parentHash].children.push_back(_bi.hash());
#if ETH_TIMED_IMPORTS || !ETH_TRUE
collation = t.elapsed();
t.restart();
#endif
- blocksBatch.Put(toSlice(bi.hash()), (ldb::Slice)ref(_block));
+ blocksBatch.Put(toSlice(_bi.hash()), (ldb::Slice)ref(_block));
DEV_READ_GUARDED(x_details)
- extrasBatch.Put(toSlice(bi.parentHash, ExtraDetails), (ldb::Slice)dev::ref(m_details[bi.parentHash].rlp()));
+ extrasBatch.Put(toSlice(_bi.parentHash, ExtraDetails), (ldb::Slice)dev::ref(m_details[_bi.parentHash].rlp()));
- extrasBatch.Put(toSlice(bi.hash(), ExtraDetails), (ldb::Slice)dev::ref(BlockDetails((unsigned)pd.number + 1, td, bi.parentHash, {}).rlp()));
- extrasBatch.Put(toSlice(bi.hash(), ExtraLogBlooms), (ldb::Slice)dev::ref(blb.rlp()));
- extrasBatch.Put(toSlice(bi.hash(), ExtraReceipts), (ldb::Slice)dev::ref(br.rlp()));
+ extrasBatch.Put(toSlice(_bi.hash(), ExtraDetails), (ldb::Slice)dev::ref(BlockDetails((unsigned)pd.number + 1, td, _bi.parentHash, {}).rlp()));
+ extrasBatch.Put(toSlice(_bi.hash(), ExtraLogBlooms), (ldb::Slice)dev::ref(blb.rlp()));
+ extrasBatch.Put(toSlice(_bi.hash(), ExtraReceipts), (ldb::Slice)dev::ref(br.rlp()));
#if ETH_TIMED_IMPORTS || !ETH_TRUE
writing = t.elapsed();
@@ -526,20 +527,20 @@ ImportRoute BlockChain::import(bytes const& _block, OverlayDB const& _db, Import
{
clog(BlockChainWarn) << " Malformed block: " << diagnostic_information(_e);
_e << errinfo_comment("Malformed block ");
- clog(BlockChainWarn) << "Block: " << bi.hash();
- clog(BlockChainWarn) << bi;
- clog(BlockChainWarn) << "Block parent: " << bi.parentHash;
- clog(BlockChainWarn) << BlockInfo(block(bi.parentHash));
+ clog(BlockChainWarn) << "Block: " << _bi.hash();
+ clog(BlockChainWarn) << _bi;
+ clog(BlockChainWarn) << "Block parent: " << _bi.parentHash;
+ clog(BlockChainWarn) << BlockInfo(block(_bi.parentHash));
throw;
}
#endif
StructuredLogger::chainReceivedNewBlock(
- bi.headerHash(WithoutNonce).abridged(),
- bi.nonce.abridged(),
+ _bi.headerHash(WithoutNonce).abridged(),
+ _bi.nonce.abridged(),
currentHash().abridged(),
"", // TODO: remote id ??
- bi.parentHash.abridged()
+ _bi.parentHash.abridged()
);
// cnote << "Parent " << bi.parentHash << " has " << details(bi.parentHash).children.size() << " children.";
@@ -552,8 +553,8 @@ ImportRoute BlockChain::import(bytes const& _block, OverlayDB const& _db, Import
// don't include bi.hash() in treeRoute, since it's not yet in details DB...
// just tack it on afterwards.
unsigned commonIndex;
- tie(route, common, commonIndex) = treeRoute(last, bi.parentHash);
- route.push_back(bi.hash());
+ tie(route, common, commonIndex) = treeRoute(last, _bi.parentHash);
+ route.push_back(_bi.hash());
// Most of the time these two will be equal - only when we're doing a chain revert will they not be
if (common != last)
@@ -565,8 +566,8 @@ ImportRoute BlockChain::import(bytes const& _block, OverlayDB const& _db, Import
for (auto i = route.rbegin(); i != route.rend() && *i != common; ++i)
{
BlockInfo tbi;
- if (*i == bi.hash())
- tbi = bi;
+ if (*i == _bi.hash())
+ tbi = _bi;
else
tbi = BlockInfo(block(*i));
@@ -593,7 +594,7 @@ ImportRoute BlockChain::import(bytes const& _block, OverlayDB const& _db, Import
h256s newTransactionAddresses;
{
bytes blockBytes;
- RLP blockRLP(*i == bi.hash() ? _block : (blockBytes = block(*i)));
+ RLP blockRLP(*i == _bi.hash() ? _block : (blockBytes = block(*i)));
TransactionAddress ta;
ta.blockHash = tbi.hash();
for (ta.index = 0; ta.index < blockRLP[1].itemCount(); ++ta.index)
@@ -609,17 +610,17 @@ ImportRoute BlockChain::import(bytes const& _block, OverlayDB const& _db, Import
// FINALLY! change our best hash.
{
- newLastBlockHash = bi.hash();
- newLastBlockNumber = (unsigned)bi.number;
+ newLastBlockHash = _bi.hash();
+ newLastBlockNumber = (unsigned)_bi.number;
}
- clog(BlockChainNote) << " Imported and best" << td << " (#" << bi.number << "). Has" << (details(bi.parentHash).children.size() - 1) << "siblings. Route:" << route;
+ clog(BlockChainNote) << " Imported and best" << td << " (#" << _bi.number << "). Has" << (details(_bi.parentHash).children.size() - 1) << "siblings. Route:" << route;
StructuredLogger::chainNewHead(
- bi.headerHash(WithoutNonce).abridged(),
- bi.nonce.abridged(),
+ _bi.headerHash(WithoutNonce).abridged(),
+ _bi.nonce.abridged(),
currentHash().abridged(),
- bi.parentHash.abridged()
+ _bi.parentHash.abridged()
);
}
else
@@ -630,21 +631,21 @@ ImportRoute BlockChain::import(bytes const& _block, OverlayDB const& _db, Import
m_blocksDB->Write(m_writeOptions, &blocksBatch);
m_extrasDB->Write(m_writeOptions, &extrasBatch);
- if (isKnown(bi.hash()) && !details(bi.hash()))
+ if (isKnown(_bi.hash()) && !details(_bi.hash()))
{
clog(BlockChainDebug) << "Known block just inserted has no details.";
- clog(BlockChainDebug) << "Block:" << bi;
+ clog(BlockChainDebug) << "Block:" << _bi;
clog(BlockChainDebug) << "DATABASE CORRUPTION: CRITICAL FAILURE";
exit(-1);
}
try {
- State canary(_db, *this, bi.hash(), ImportRequirements::DontHave);
+ State canary(_db, *this, _bi.hash(), ImportRequirements::DontHave);
}
catch (...)
{
clog(BlockChainDebug) << "Failed to initialise State object form imported block.";
- clog(BlockChainDebug) << "Block:" << bi;
+ clog(BlockChainDebug) << "Block:" << _bi;
clog(BlockChainDebug) << "DATABASE CORRUPTION: CRITICAL FAILURE";
exit(-1);
}
diff --git a/libethereum/BlockChain.h b/libethereum/BlockChain.h
index 02f3f618a..a67ec9a9c 100644
--- a/libethereum/BlockChain.h
+++ b/libethereum/BlockChain.h
@@ -120,6 +120,7 @@ public:
/// Import block into disk-backed DB
/// @returns the block hashes of any blocks that came into/went out of the canonical block chain.
ImportRoute import(bytes const& _block, OverlayDB const& _stateDB, ImportRequirements::value _ir = ImportRequirements::Default);
+ ImportRoute import(BlockInfo const& _bi, bytes const& _block, OverlayDB const& _stateDB, ImportRequirements::value _ir = ImportRequirements::Default);
/// Returns true if the given block is known (though not necessarily a part of the canon chain).
bool isKnown(h256 const& _hash) const;
@@ -210,7 +211,7 @@ public:
/// Run through database and verify all blocks by reevaluating.
/// Will call _progress with the progress in this operation first param done, second total.
- void rebuild(std::string const& _path, ProgressCallback const& _progress = std::function());
+ void rebuild(std::string const& _path, ProgressCallback const& _progress = std::function(), bool _prepPoW = false);
/** @returns a tuple of:
* - an vector of hashes of all blocks between @a _from and @a _to, all blocks are ordered first by a number of
diff --git a/libethereum/BlockQueue.cpp b/libethereum/BlockQueue.cpp
index 43d2b4cb8..1632b2fa3 100644
--- a/libethereum/BlockQueue.cpp
+++ b/libethereum/BlockQueue.cpp
@@ -20,7 +20,7 @@
*/
#include "BlockQueue.h"
-
+#include
#include
#include
#include
@@ -35,6 +35,106 @@ const char* BlockQueueChannel::name() { return EthOrange "[]>"; }
const char* BlockQueueChannel::name() { return EthOrange "▣┅▶"; }
#endif
+
+BlockQueue::BlockQueue()
+{
+ for (unsigned i = 0; i < thread::hardware_concurrency(); ++i)
+ m_verifiers.emplace_back([=](){
+ setThreadName("verifier" + toString(i));
+ this->verifierBody();
+ });
+}
+
+BlockQueue::~BlockQueue()
+{
+ m_deleting = true;
+ m_moreToVerify.notify_all();
+ for (auto& i: m_verifiers)
+ i.join();
+}
+
+void BlockQueue::verifierBody()
+{
+ while (!m_deleting)
+ {
+ std::pair work;
+
+ {
+ unique_lock l(m_verification);
+ m_moreToVerify.wait(l, [&](){ return !m_unverified.empty() || m_deleting; });
+ swap(work, m_unverified.front());
+ m_unverified.pop_front();
+ BlockInfo bi;
+ bi.mixHash = work.first;
+ m_verifying.push_back(make_pair(bi, bytes()));
+ cdebug << "Verifying" << bi.mixHash;
+ }
+
+ std::pair res;
+ swap(work.second, res.second);
+ try {
+ res.first.populate(res.second, CheckEverything, work.first);
+ res.first.verifyInternals(&res.second);
+ }
+ catch (...)
+ {
+ // bad block.
+ {
+ // has to be this order as that's how invariants() assumes.
+ WriteGuard l2(m_lock);
+ unique_lock l(m_verification);
+ m_readySet.erase(work.first);
+ m_knownBad.insert(work.first);
+ }
+
+ unique_lock l(m_verification);
+ for (auto it = m_verifying.begin(); it != m_verifying.end(); ++it)
+ if (it->first.mixHash == work.first)
+ {
+ cdebug << "Cancel verifying" << work.first;
+ m_verifying.erase(it);
+ goto OK1;
+ }
+ cwarn << "GAA BlockQueue corrupt: job cancelled but cannot be found in m_verifying queue.";
+ OK1:;
+ continue;
+ }
+
+ bool ready = false;
+ {
+ unique_lock l(m_verification);
+ if (m_verifying.front().first.mixHash == work.first)
+ {
+ // we're next!
+ cdebug << "Verifyied" << work.first;
+ m_verifying.pop_front();
+ m_verified.push_back(move(res));
+ while (m_verifying.size() && !m_verifying.front().second.empty())
+ {
+ cdebug << "Pre-verified" << m_verifying.front().first.hash();
+ m_verified.push_back(move(m_verifying.front()));
+ m_verifying.pop_front();
+ }
+ ready = true;
+ }
+ else
+ {
+ for (auto& i: m_verifying)
+ if (i.first.mixHash == work.first)
+ {
+ cdebug << "Delay-verified" << work.first << " (replacing with " << res << ")";
+ i = move(res);
+ goto OK;
+ }
+ cwarn << "GAA BlockQueue corrupt: job finished but cannot be found in m_verifying queue.";
+ OK:;
+ }
+ }
+ if (ready)
+ m_onReady();
+ }
+}
+
ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc, bool _isOurs)
{
// Check if we already know this block.
@@ -110,11 +210,13 @@ ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc, boo
{
// If valid, append to blocks.
cblockq << "OK - ready for chain insertion.";
- m_ready.push_back(make_pair(h, _block.toBytes()));
+ DEV_GUARDED(m_verification)
+ m_unverified.push_back(make_pair(h, _block.toBytes()));
+ m_moreToVerify.notify_one();
m_readySet.insert(h);
noteReady_WITH_LOCK(h);
- m_onReady();
+
return ImportResult::Success;
}
}
@@ -127,18 +229,19 @@ bool BlockQueue::doneDrain(h256s const& _bad)
m_drainingSet.clear();
if (_bad.size())
{
- vector> old;
- swap(m_ready, old);
+ vector> old;
+ DEV_GUARDED(m_verification)
+ swap(m_verified, old);
for (auto& b: old)
{
- BlockInfo bi(b.second);
- if (m_knownBad.count(bi.parentHash))
+ if (m_knownBad.count(b.first.parentHash))
{
- m_knownBad.insert(b.first);
- m_readySet.erase(b.first);
+ m_knownBad.insert(b.first.hash());
+ m_readySet.erase(b.first.hash());
}
else
- m_ready.push_back(std::move(b));
+ DEV_GUARDED(m_verification)
+ m_verified.push_back(std::move(b));
}
}
m_knownBad += _bad;
@@ -197,62 +300,73 @@ QueueStatus BlockQueue::blockStatus(h256 const& _h) const
QueueStatus::Unknown;
}
-void BlockQueue::drain(std::vector& o_out, unsigned _max)
+void BlockQueue::drain(std::vector>& o_out, unsigned _max)
{
WriteGuard l(m_lock);
DEV_INVARIANT_CHECK;
if (m_drainingSet.empty())
{
- o_out.resize(min(_max, m_ready.size()));
- for (unsigned i = 0; i < o_out.size(); ++i)
- swap(o_out[i], m_ready[i].second);
- m_ready.erase(m_ready.begin(), advanced(m_ready.begin(), o_out.size()));
+ DEV_GUARDED(m_verification)
+ {
+ o_out.resize(min(_max, m_verified.size()));
+ for (unsigned i = 0; i < o_out.size(); ++i)
+ swap(o_out[i], m_verified[i]);
+ m_verified.erase(m_verified.begin(), advanced(m_verified.begin(), o_out.size()));
+ }
for (auto const& bs: o_out)
{
// TODO: @optimise use map rather than vector & set.
- auto h = BlockInfo::headerHash(bs);
+ auto h = bs.first.hash();
m_drainingSet.insert(h);
m_readySet.erase(h);
}
-// swap(o_out, m_ready);
-// swap(m_drainingSet, m_readySet);
}
}
bool BlockQueue::invariants() const
{
- return m_readySet.size() == m_ready.size();
+ Guard l(m_verification);
+ return m_readySet.size() == m_verified.size() + m_unverified.size() + m_verifying.size();
}
void BlockQueue::noteReady_WITH_LOCK(h256 const& _good)
{
DEV_INVARIANT_CHECK;
list goodQueue(1, _good);
+ bool notify = false;
while (!goodQueue.empty())
{
auto r = m_unknown.equal_range(goodQueue.front());
goodQueue.pop_front();
for (auto it = r.first; it != r.second; ++it)
{
- m_ready.push_back(it->second);
+ DEV_GUARDED(m_verification)
+ m_unverified.push_back(it->second);
auto newReady = it->second.first;
m_unknownSet.erase(newReady);
m_readySet.insert(newReady);
goodQueue.push_back(newReady);
+ notify = true;
}
m_unknown.erase(r.first, r.second);
}
+ if (notify)
+ m_moreToVerify.notify_all();
}
void BlockQueue::retryAllUnknown()
{
+ WriteGuard l(m_lock);
DEV_INVARIANT_CHECK;
for (auto it = m_unknown.begin(); it != m_unknown.end(); ++it)
{
- m_ready.push_back(it->second);
+ DEV_GUARDED(m_verification)
+ m_unverified.push_back(it->second);
auto newReady = it->second.first;
m_unknownSet.erase(newReady);
m_readySet.insert(newReady);
+ m_moreToVerify.notify_one();
}
m_unknown.clear();
+ m_moreToVerify.notify_all();
}
diff --git a/libethereum/BlockQueue.h b/libethereum/BlockQueue.h
index a4e44b390..ba515b409 100644
--- a/libethereum/BlockQueue.h
+++ b/libethereum/BlockQueue.h
@@ -21,12 +21,15 @@
#pragma once
+#include
+#include
#include
#include
#include
#include
#include
#include
+#include
namespace dev
{
@@ -41,7 +44,9 @@ struct BlockQueueChannel: public LogChannel { static const char* name(); static
struct BlockQueueStatus
{
- size_t ready;
+ size_t verified;
+ size_t verifying;
+ size_t unverified;
size_t future;
size_t unknown;
size_t bad;
@@ -64,6 +69,9 @@ enum class QueueStatus
class BlockQueue: HasInvariants
{
public:
+ BlockQueue();
+ ~BlockQueue();
+
/// Import a block into the queue.
ImportResult import(bytesConstRef _tx, BlockChain const& _bc, bool _isOurs = false);
@@ -72,7 +80,7 @@ public:
/// Grabs at most @a _max of the blocks that are ready, giving them in the correct order for insertion into the chain.
/// Don't forget to call doneDrain() once you're done importing.
- void drain(std::vector& o_out, unsigned _max);
+ void drain(std::vector>& o_out, unsigned _max);
/// Must be called after a drain() call. Notes that the drained blocks have been imported into the blockchain, so we can forget about them.
/// @returns true iff there are additional blocks ready to be processed.
@@ -85,16 +93,16 @@ public:
void retryAllUnknown();
/// Get information on the items queued.
- std::pair items() const { ReadGuard l(m_lock); return std::make_pair(m_ready.size(), m_unknown.size()); }
+ std::pair items() const { ReadGuard l(m_lock); return std::make_pair(m_verified.size(), m_unknown.size()); }
/// Clear everything.
- void clear() { WriteGuard l(m_lock); DEV_INVARIANT_CHECK; m_readySet.clear(); m_drainingSet.clear(); m_ready.clear(); m_unknownSet.clear(); m_unknown.clear(); m_future.clear(); }
+ void clear() { WriteGuard l(m_lock); DEV_INVARIANT_CHECK; Guard l2(m_verification); m_readySet.clear(); m_drainingSet.clear(); m_verified.clear(); m_unverified.clear(); m_unknownSet.clear(); m_unknown.clear(); m_future.clear(); }
/// Return first block with an unknown parent.
h256 firstUnknown() const { ReadGuard l(m_lock); return m_unknownSet.size() ? *m_unknownSet.begin() : h256(); }
/// Get some infomration on the current status.
- BlockQueueStatus status() const { ReadGuard l(m_lock); return BlockQueueStatus{m_ready.size(), m_future.size(), m_unknown.size(), m_knownBad.size()}; }
+ BlockQueueStatus status() const { ReadGuard l(m_lock); Guard l2(m_verification); return BlockQueueStatus{m_verified.size(), m_verifying.size(), m_unverified.size(), m_future.size(), m_unknown.size(), m_knownBad.size()}; }
/// Get some infomration on the given block's status regarding us.
QueueStatus blockStatus(h256 const& _h) const;
@@ -106,15 +114,25 @@ private:
bool invariants() const override;
- mutable boost::shared_mutex m_lock; ///< General lock.
+ void verifierBody();
+
+ mutable boost::shared_mutex m_lock; ///< General lock for the sets, m_future and m_unknown.
h256Hash m_drainingSet; ///< All blocks being imported.
- h256Hash m_readySet; ///< All blocks ready for chain-import.
- std::vector> m_ready; ///< List of blocks, in correct order, ready for chain-import.
+ h256Hash m_readySet; ///< All blocks ready for chain import.
h256Hash m_unknownSet; ///< Set of all blocks whose parents are not ready/in-chain.
std::unordered_multimap> m_unknown; ///< For blocks that have an unknown parent; we map their parent hash to the block stuff, and insert once the block appears.
h256Hash m_knownBad; ///< Set of blocks that we know will never be valid.
std::multimap> m_future; ///< Set of blocks that are not yet valid. Ordered by timestamp
Signal m_onReady; ///< Called when a subsequent call to import blocks will return a non-empty container. Be nice and exit fast.
+
+ mutable Mutex m_verification; ///< Mutex that allows writing to m_verified, m_verifying and m_unverified.
+ std::condition_variable m_moreToVerify; ///< Signaled when m_unverified has a new entry.
+ std::vector> m_verified; ///< List of blocks, in correct order, verified and ready for chain-import.
+ std::deque> m_verifying; ///< List of blocks being verified; as long as the second component (bytes) is empty, it's not finished.
+ std::deque> m_unverified; ///< List of blocks, in correct order, ready for verification.
+
+ std::vector m_verifiers; ///< Threads who only verify.
+ bool m_deleting = false; ///< Exit condition for verifiers.
};
}
diff --git a/libethereum/ClientBase.cpp b/libethereum/ClientBase.cpp
index cfa271cf6..8dc666bb5 100644
--- a/libethereum/ClientBase.cpp
+++ b/libethereum/ClientBase.cpp
@@ -57,7 +57,7 @@ void ClientBase::submitTransaction(Secret _secret, u256 _value, Address _dest, b
m_tq.import(t.rlp());
StructuredLogger::transactionReceived(t.sha3().abridged(), t.sender().abridged());
- cnote << "New transaction " << t;
+ cnote << "New transaction " << t << "(maxNonce for sender" << a << "is" << m_tq.maxNonce(a) << ")";
}
Address ClientBase::submitTransaction(Secret _secret, u256 _endowment, bytes const& _init, u256 _gas, u256 _gasPrice)
diff --git a/libethereum/State.cpp b/libethereum/State.cpp
index 654dca8d7..b9e548ea3 100644
--- a/libethereum/State.cpp
+++ b/libethereum/State.cpp
@@ -29,6 +29,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -516,10 +517,10 @@ pair State::sync(BlockChain const& _bc, TransactionQu
cnote << i.first << "Dropping old transaction (nonce too low)";
_tq.drop(i.first);
}
- else if (got > req + 25)
+ else if (got > req + _tq.waiting(i.second.sender()))
{
// too new
- cnote << i.first << "Dropping new transaction (> 25 nonces ahead)";
+ cnote << i.first << "Dropping new transaction (too many nonces ahead)";
_tq.drop(i.first);
}
else
@@ -534,7 +535,12 @@ pair State::sync(BlockChain const& _bc, TransactionQu
_tq.drop(i.first);
}
else
- _tq.setFuture(i);
+ {
+ // Temporarily no gas left in current block.
+ // OPTIMISE: could note this and then we don't evaluate until a block that does have the gas left.
+ // for now, just leave alone.
+// _tq.setFuture(i);
+ }
}
catch (Exception const& _e)
{
@@ -592,34 +598,33 @@ u256 State::enact(bytesConstRef _block, BlockChain const& _bc, ImportRequirement
LastHashes lh = _bc.lastHashes((unsigned)m_previousBlock.number);
RLP rlp(_block);
+ vector receipts;
+
// All ok with the block generally. Play back the transactions now...
unsigned i = 0;
for (auto const& tr: rlp[1])
{
- RLPStream k;
- k << i;
-
- transactionsTrie.insert(&k.out(), tr.data());
execute(lh, Transaction(tr.data(), CheckTransaction::Everything));
-
- RLPStream receiptrlp;
- m_receipts.back().streamRLP(receiptrlp);
- receiptsTrie.insert(&k.out(), &receiptrlp.out());
+ RLPStream receiptRLP;
+ m_receipts.back().streamRLP(receiptRLP);
+ receipts.push_back(receiptRLP.out());
++i;
}
- if (receiptsTrie.root() != m_currentBlock.receiptsRoot)
+ auto receiptsRoot = orderedTrieRoot(receipts);
+
+ if (receiptsRoot != m_currentBlock.receiptsRoot)
{
cwarn << "Bad receipts state root.";
- cwarn << "Expected: " << toString(receiptsTrie.root()) << " received: " << toString(m_currentBlock.receiptsRoot);
+ cwarn << "Expected: " << toString(receiptsRoot) << " received: " << toString(m_currentBlock.receiptsRoot);
cwarn << "Block:" << toHex(_block);
cwarn << "Block RLP:" << rlp;
- cwarn << "Calculated: " << receiptsTrie.root();
+ cwarn << "Calculated: " << receiptsRoot;
for (unsigned j = 0; j < i; ++j)
{
RLPStream k;
k << j;
- auto b = asBytes(receiptsTrie.at(&k.out()));
+ auto b = receipts[j];
cwarn << j << ": ";
cwarn << "RLP: " << RLP(b);
cwarn << "Hex: " << toHex(b);
@@ -835,6 +840,9 @@ void State::commitToMine(BlockChain const& _bc)
}
}
+ // TODO: move over to using TrieHash
+
+
MemoryDB tm;
GenericTrieDB transactionsTrie(&tm);
transactionsTrie.init();
diff --git a/libethereum/TransactionQueue.cpp b/libethereum/TransactionQueue.cpp
index 11104705c..76c486d50 100644
--- a/libethereum/TransactionQueue.cpp
+++ b/libethereum/TransactionQueue.cpp
@@ -35,20 +35,26 @@ ImportResult TransactionQueue::import(bytesConstRef _transactionRLP, ImportCallb
// Check if we already know this transaction.
h256 h = sha3(_transactionRLP);
+ Transaction t;
+ ImportResult ir;
+ {
UpgradableGuard l(m_lock);
- auto ir = check_WITH_LOCK(h, _ik);
+ ir = check_WITH_LOCK(h, _ik);
if (ir != ImportResult::Success)
return ir;
try {
- Transaction t(_transactionRLP, CheckTransaction::Everything);
+ t = Transaction(_transactionRLP, CheckTransaction::Everything);
UpgradeGuard ul(l);
- return manageImport_WITH_LOCK(h, t, _cb);
+ ir = manageImport_WITH_LOCK(h, t, _cb);
}
catch (...) {
return ImportResult::Malformed;
}
+ }
+// cdebug << "import-END: Nonce of" << t.sender() << "now" << maxNonce(t.sender());
+ return ir;
}
ImportResult TransactionQueue::check_WITH_LOCK(h256 const& _h, IfDropped _ik)
@@ -67,15 +73,23 @@ ImportResult TransactionQueue::import(Transaction const& _transaction, ImportCal
// Check if we already know this transaction.
h256 h = _transaction.sha3(WithSignature);
- UpgradableGuard l(m_lock);
- // TODO: keep old transactions around and check in State for nonce validity
+// cdebug << "import-BEGIN: Nonce of sender" << maxNonce(_transaction.sender());
+ ImportResult ret;
+ {
+ UpgradableGuard l(m_lock);
+ // TODO: keep old transactions around and check in State for nonce validity
- auto ir = check_WITH_LOCK(h, _ik);
- if (ir != ImportResult::Success)
- return ir;
+ auto ir = check_WITH_LOCK(h, _ik);
+ if (ir != ImportResult::Success)
+ return ir;
- UpgradeGuard ul(l);
- return manageImport_WITH_LOCK(h, _transaction, _cb);
+ {
+ UpgradeGuard ul(l);
+ ret = manageImport_WITH_LOCK(h, _transaction, _cb);
+ }
+ }
+// cdebug << "import-END: Nonce of" << _transaction.sender() << "now" << maxNonce(_transaction.sender());
+ return ret;
}
ImportResult TransactionQueue::manageImport_WITH_LOCK(h256 const& _h, Transaction const& _transaction, ImportCallback const& _cb)
@@ -110,62 +124,95 @@ ImportResult TransactionQueue::manageImport_WITH_LOCK(h256 const& _h, Transactio
u256 TransactionQueue::maxNonce(Address const& _a) const
{
- cdebug << "txQ::maxNonce" << _a;
+// cdebug << "txQ::maxNonce" << _a;
ReadGuard l(m_lock);
u256 ret = 0;
auto r = m_senders.equal_range(_a);
for (auto it = r.first; it != r.second; ++it)
- {
- cdebug << it->first << "1+" << m_current.at(it->second).nonce();
- DEV_IGNORE_EXCEPTIONS(ret = max(ret, m_current.at(it->second).nonce() + 1));
- }
+ if (m_current.count(it->second))
+ {
+// cdebug << it->first << "1+" << m_current.at(it->second).nonce();
+ ret = max(ret, m_current.at(it->second).nonce() + 1);
+ }
+ else if (m_future.count(it->second))
+ {
+// cdebug << it->first << "1+" << m_future.at(it->second).nonce();
+ ret = max(ret, m_future.at(it->second).nonce() + 1);
+ }
+ else
+ {
+ cwarn << "ERRROR!!!!! m_senders references non-current transaction";
+ cwarn << "Sender" << it->first << "has transaction" << it->second;
+ cwarn << "Count of m_current for" << it->second << "is" << m_current.count(it->second);
+ }
return ret;
}
void TransactionQueue::insertCurrent_WITH_LOCK(std::pair const& _p)
{
- cdebug << "txQ::insertCurrent" << _p.first << _p.second.sender() << _p.second.nonce();
+// cdebug << "txQ::insertCurrent" << _p.first << _p.second.sender() << _p.second.nonce();
m_senders.insert(make_pair(_p.second.sender(), _p.first));
+ if (m_current.count(_p.first))
+ cwarn << "Transaction hash" << _p.first << "already in current?!";
m_current.insert(_p);
}
-bool TransactionQueue::removeCurrent_WITH_LOCK(h256 const& _txHash)
+bool TransactionQueue::remove_WITH_LOCK(h256 const& _txHash)
{
- cdebug << "txQ::removeCurrent" << _txHash;
- if (m_current.count(_txHash))
+// cdebug << "txQ::remove" << _txHash;
+ for (std::unordered_map* pool: { &m_current, &m_future })
{
- auto r = m_senders.equal_range(m_current[_txHash].sender());
- for (auto it = r.first; it != r.second; ++it)
- if (it->second == _txHash)
- {
- cdebug << "=> sender" << it->first;
- m_senders.erase(it);
- break;
- }
- cdebug << "=> nonce" << m_current[_txHash].nonce();
- m_current.erase(_txHash);
- return true;
+ auto pit = pool->find(_txHash);
+ if (pit != pool->end())
+ {
+ auto r = m_senders.equal_range(pit->second.sender());
+ for (auto i = r.first; i != r.second; ++i)
+ if (i->second == _txHash)
+ {
+ m_senders.erase(i);
+ break;
+ }
+ cdebug << "=> nonce" << pit->second.nonce();
+ pool->erase(pit);
+ return true;
+ }
}
return false;
}
+unsigned TransactionQueue::waiting(Address const& _a) const
+{
+ auto it = m_senders.equal_range(_a);
+ unsigned ret = 0;
+ for (auto i = it.first; i != it.second; ++i, ++ret) {}
+ return ret;
+}
+
void TransactionQueue::setFuture(std::pair const& _t)
{
+// cdebug << "txQ::setFuture" << _t.first;
WriteGuard l(m_lock);
if (m_current.count(_t.first))
{
- m_unknown.insert(make_pair(_t.second.sender(), _t));
+ m_future.insert(_t);
m_current.erase(_t.first);
}
}
void TransactionQueue::noteGood(std::pair const& _t)
{
+// cdebug << "txQ::noteGood" << _t.first;
WriteGuard l(m_lock);
- auto r = m_unknown.equal_range(_t.second.sender());
+ auto r = m_senders.equal_range(_t.second.sender());
for (auto it = r.first; it != r.second; ++it)
- m_current.insert(it->second);
- m_unknown.erase(r.first, r.second);
+ {
+ auto fit = m_future.find(it->second);
+ if (fit != m_future.end())
+ {
+ m_current.insert(*fit);
+ m_future.erase(fit);
+ }
+ }
}
void TransactionQueue::drop(h256 const& _txHash)
@@ -179,13 +226,5 @@ void TransactionQueue::drop(h256 const& _txHash)
m_dropped.insert(_txHash);
m_known.erase(_txHash);
- if (!removeCurrent_WITH_LOCK(_txHash))
- {
- for (auto i = m_unknown.begin(); i != m_unknown.end(); ++i)
- if (i->second.first == _txHash)
- {
- m_unknown.erase(i);
- break;
- }
- }
+ remove_WITH_LOCK(_txHash);
}
diff --git a/libethereum/TransactionQueue.h b/libethereum/TransactionQueue.h
index 50fcea574..e91a12360 100644
--- a/libethereum/TransactionQueue.h
+++ b/libethereum/TransactionQueue.h
@@ -55,14 +55,15 @@ public:
void drop(h256 const& _txHash);
+ unsigned waiting(Address const& _a) const;
std::unordered_map transactions() const { ReadGuard l(m_lock); return m_current; }
- std::pair items() const { ReadGuard l(m_lock); return std::make_pair(m_current.size(), m_unknown.size()); }
+ std::pair items() const { ReadGuard l(m_lock); return std::make_pair(m_current.size(), m_future.size()); }
u256 maxNonce(Address const& _a) const;
void setFuture(std::pair const& _t);
void noteGood(std::pair const& _t);
- void clear() { WriteGuard l(m_lock); m_known.clear(); m_current.clear(); m_unknown.clear(); }
+ void clear() { WriteGuard l(m_lock); m_senders.clear(); m_known.clear(); m_current.clear(); m_future.clear(); }
template Handler onReady(T const& _t) { return m_onReady.add(_t); }
private:
@@ -70,15 +71,15 @@ private:
ImportResult manageImport_WITH_LOCK(h256 const& _h, Transaction const& _transaction, ImportCallback const& _cb);
void insertCurrent_WITH_LOCK(std::pair const& _p);
- bool removeCurrent_WITH_LOCK(h256 const& _txHash);
+ bool remove_WITH_LOCK(h256 const& _txHash);
mutable SharedMutex m_lock; ///< General lock.
h256Hash m_known; ///< Hashes of transactions in both sets.
+ std::unordered_multimap m_senders; ///< Mapping from the sender address to the transaction hash; useful for determining the nonce of a given sender.
std::unordered_map m_current; ///< Map of SHA3(tx) to tx.
- std::unordered_multimap> m_unknown; ///< For transactions that have a future nonce; we map their sender address to the tx stuff, and insert once the sender has a valid TX.
+ std::unordered_map m_future; ///< For transactions that have a future nonce; we re-insert into current once the sender has a valid TX.
std::unordered_map> m_callbacks; ///< Called once.
h256Hash m_dropped; ///< Transactions that have previously been dropped.
- std::multimap m_senders; ///< Mapping from the sender address to the transaction hash; useful for determining the nonce of a given sender.
Signal m_onReady; ///< Called when a subsequent call to import transactions will return a non-empty container. Be nice and exit fast.
};
diff --git a/test/libdevcrypto/trie.cpp b/test/libdevcrypto/trie.cpp
index d41739a01..c53617772 100644
--- a/test/libdevcrypto/trie.cpp
+++ b/test/libdevcrypto/trie.cpp
@@ -28,7 +28,7 @@
#include "../JsonSpiritHeaders.h"
#include
#include
-#include "TrieHash.h"
+#include
#include "MemTrie.h"
#include "../TestHelper.h"