Browse Source

Merge branch 'develop' into pacify

cl-refactor
subtly 10 years ago
parent
commit
bb62ce62d7
  1. 5
      alethzero/Connect.cpp
  2. 3
      alethzero/Connect.ui
  3. 2
      alethzero/MainWin.cpp
  4. 2
      libdevcore/Common.cpp
  5. 247
      libethereum/BlockChain.cpp
  6. 31
      libethereum/BlockChain.h
  7. 22
      libethereum/BlockQueue.cpp
  8. 4
      libethereum/BlockQueue.h
  9. 2
      libethereum/CanonBlockChain.cpp
  10. 4
      libethereum/CanonBlockChain.h
  11. 4
      libethereum/Client.cpp
  12. 8
      libethereum/ClientBase.cpp
  13. 35
      libethereum/State.cpp
  14. 5
      libweb3jsonrpc/WebThreeStubServer.cpp
  15. 4
      libweb3jsonrpc/WebThreeStubServer.h
  16. 2
      libwebthree/WebThree.h

5
alethzero/Connect.cpp

@ -38,13 +38,14 @@ Connect::~Connect()
void Connect::setEnvironment(QStringList const& _nodes)
{
ui->host->addItems(_nodes);
if (ui->host->count() == 0)
ui->host->addItems(_nodes);
}
void Connect::reset()
{
ui->nodeId->clear();
ui->required->setChecked(false);
ui->required->setChecked(true);
}
QString Connect::host()

3
alethzero/Connect.ui

@ -59,6 +59,9 @@
<property name="text">
<string>Required (Always Connect to this Peer)</string>
</property>
<property name="checked">
<bool>true</bool>
</property>
<property name="tristate">
<bool>false</bool>
</property>

2
alethzero/MainWin.cpp

@ -285,7 +285,7 @@ void Main::onKeysChanged()
unsigned Main::installWatch(LogFilter const& _tf, WatchHandler const& _f)
{
auto ret = ethereum()->installWatch(_tf);
auto ret = ethereum()->installWatch(_tf, Reaping::Manual);
m_handlers[ret] = _f;
_f(LocalisedLogEntries());
return ret;

2
libdevcore/Common.cpp

@ -27,7 +27,7 @@ using namespace dev;
namespace dev
{
char const* Version = "0.9.2";
char const* Version = "0.9.3";
}

247
libethereum/BlockChain.cpp

@ -19,10 +19,10 @@
* @date 2014
*/
#include <leveldb/db.h>
#include "BlockChain.h"
#include <leveldb/db.h>
#include <boost/timer.hpp>
#include <boost/filesystem.hpp>
#include <test/JsonSpiritHeaders.h>
#include <libdevcore/Common.h>
@ -43,6 +43,7 @@ using namespace dev::eth;
namespace js = json_spirit;
#define ETH_CATCH 1
#define ETH_TIMED_IMPORTS 0
std::ostream& dev::eth::operator<<(std::ostream& _out, BlockChain const& _bc)
{
@ -174,6 +175,8 @@ void BlockChain::close()
m_blocks.clear();
}
#define IGNORE_EXCEPTIONS(X) try { X; } catch (...) {}
void BlockChain::rebuild(std::string const& _path, std::function<void(unsigned, unsigned)> const& _progress)
{
unsigned originalNumber = number();
@ -181,6 +184,7 @@ void BlockChain::rebuild(std::string const& _path, std::function<void(unsigned,
// Keep extras DB around, but under a temp name
delete m_extrasDB;
m_extrasDB = nullptr;
IGNORE_EXCEPTIONS(boost::filesystem::remove_all(_path + "/details.old"));
boost::filesystem::rename(_path + "/details", _path + "/details.old");
ldb::DB* oldExtrasDB;
ldb::Options o;
@ -189,7 +193,7 @@ void BlockChain::rebuild(std::string const& _path, std::function<void(unsigned,
ldb::DB::Open(o, _path + "/details", &m_extrasDB);
// Open a fresh state DB
OverlayDB db = State::openDB(_path, WithExisting::Kill);
State s(Address(), State::openDB(_path, WithExisting::Kill), BaseState::CanonGenesis);
// Clear all memos ready for replay.
m_details.clear();
@ -201,11 +205,21 @@ void BlockChain::rebuild(std::string const& _path, std::function<void(unsigned,
m_lastLastHashes.clear();
m_lastBlockHash = genesisHash();
for (unsigned d = 0; d < originalNumber; ++d)
h256 lastHash = genesisHash();
for (unsigned d = 1; d < originalNumber; ++d)
{
try
{
import(block(queryExtras<BlockHash, ExtraBlockHash>(h256(u256(d)), m_blockHashes, x_blockHashes, NullBlockHash, oldExtrasDB).value), db);
bytes b = block(queryExtras<BlockHash, ExtraBlockHash>(h256(u256(d)), m_blockHashes, x_blockHashes, NullBlockHash, oldExtrasDB).value);
BlockInfo bi(b);
if (bi.parentHash != lastHash)
{
cwarn << "DISJOINT CHAIN DETECTED; " << bi.hash.abridged() << "#" << d << " -> parent is" << bi.parentHash.abridged() << "; expected" << lastHash.abridged() << "#" << (d - 1);
return;
}
lastHash = bi.hash;
import(b, s.db(), true);
}
catch (...)
{
@ -258,7 +272,7 @@ h256s BlockChain::sync(BlockQueue& _bq, OverlayDB const& _stateDB, unsigned _max
_bq.tick(*this);
vector<bytes> blocks;
_bq.drain(blocks);
_bq.drain(blocks, _max);
h256s ret;
for (auto const& block: blocks)
@ -266,10 +280,7 @@ h256s BlockChain::sync(BlockQueue& _bq, OverlayDB const& _stateDB, unsigned _max
try
{
for (auto h: import(block, _stateDB))
if (!_max--)
break;
else
ret.push_back(h);
ret.push_back(h);
}
catch (UnknownParent)
{
@ -288,11 +299,11 @@ h256s BlockChain::sync(BlockQueue& _bq, OverlayDB const& _stateDB, unsigned _max
return ret;
}
h256s BlockChain::attemptImport(bytes const& _block, OverlayDB const& _stateDB) noexcept
h256s BlockChain::attemptImport(bytes const& _block, OverlayDB const& _stateDB, bool _force) noexcept
{
try
{
return import(_block, _stateDB);
return import(_block, _stateDB, _force);
}
catch (...)
{
@ -301,8 +312,20 @@ h256s BlockChain::attemptImport(bytes const& _block, OverlayDB const& _stateDB)
}
}
h256s BlockChain::import(bytes const& _block, OverlayDB const& _db)
h256s BlockChain::import(bytes const& _block, OverlayDB const& _db, bool _force)
{
//@tidy This is a behemoth of a method - could do to be split into a few smaller ones.
#if ETH_TIMED_IMPORTS
boost::timer total;
double preliminaryChecks;
double enactment;
double collation;
double writing;
double checkBest;
boost::timer t;
#endif
// VERIFY: populates from the block and checks the block is internally coherent.
BlockInfo bi;
@ -329,7 +352,7 @@ h256s BlockChain::import(bytes const& _block, OverlayDB const& _db)
auto newHash = BlockInfo::headerHash(_block);
// Check block doesn't already exist first!
if (isKnown(newHash))
if (isKnown(newHash) && !_force)
{
clog(BlockChainNote) << newHash << ": Not new.";
BOOST_THROW_EXCEPTION(AlreadyHaveBlock());
@ -362,6 +385,11 @@ h256s BlockChain::import(bytes const& _block, OverlayDB const& _db)
clog(BlockChainNote) << "Attempting import of " << newHash.abridged() << "...";
#if ETH_TIMED_IMPORTS
preliminaryChecks = t.elapsed();
t.restart();
#endif
u256 td;
#if ETH_CATCH
try
@ -371,6 +399,7 @@ h256s BlockChain::import(bytes const& _block, OverlayDB const& _db)
// Get total difficulty increase and update state, checking it.
State s(bi.coinbaseAddress, _db);
auto tdIncrease = s.enactOn(&_block, bi, *this);
BlockLogBlooms blb;
BlockReceipts br;
for (unsigned i = 0; i < s.pending().size(); ++i)
@ -381,6 +410,11 @@ h256s BlockChain::import(bytes const& _block, OverlayDB const& _db)
s.cleanup(true);
td = pd.totalDifficulty + tdIncrease;
#if ETH_TIMED_IMPORTS
enactment = t.elapsed();
t.restart();
#endif
#if ETH_PARANOIA
checkConsistency();
#endif
@ -397,37 +431,6 @@ h256s BlockChain::import(bytes const& _block, OverlayDB const& _db)
m_details[newHash] = BlockDetails((unsigned)pd.number + 1, td, bi.parentHash, {});
m_details[bi.parentHash].children.push_back(newHash);
}
{
WriteGuard l(x_blockHashes);
m_blockHashes[h256(bi.number)].value = newHash;
}
h256s alteredBlooms;
{
WriteGuard l(x_blocksBlooms);
LogBloom blockBloom = bi.logBloom;
blockBloom.shiftBloom<3>(sha3(bi.coinbaseAddress.ref()));
unsigned index = (unsigned)bi.number;
for (unsigned level = 0; level < c_bloomIndexLevels; level++, index /= c_bloomIndexSize)
{
unsigned i = index / c_bloomIndexSize;
unsigned o = index % c_bloomIndexSize;
alteredBlooms.push_back(chunkId(level, i));
m_blocksBlooms[alteredBlooms.back()].blooms[o] |= blockBloom;
}
}
// Collate transaction hashes and remember who they were.
h256s newTransactionAddresses;
{
RLP blockRLP(_block);
TransactionAddress ta;
ta.blockHash = newHash;
WriteGuard l(x_transactionAddresses);
for (ta.index = 0; ta.index < blockRLP[1].itemCount(); ++ta.index)
{
newTransactionAddresses.push_back(sha3(blockRLP[1][ta.index].data()));
m_transactionAddresses[newTransactionAddresses.back()] = ta;
}
}
{
WriteGuard l(x_logBlooms);
m_logBlooms[newHash] = blb;
@ -437,25 +440,27 @@ h256s BlockChain::import(bytes const& _block, OverlayDB const& _db)
m_receipts[newHash] = br;
}
#if ETH_TIMED_IMPORTS
collation = t.elapsed();
t.restart();
#endif
{
ReadGuard l1(x_blocksBlooms);
ReadGuard l2(x_details);
ReadGuard l3(x_blockHashes);
ReadGuard l4(x_receipts);
ReadGuard l5(x_logBlooms);
ReadGuard l6(x_transactionAddresses);
m_blocksDB->Put(m_writeOptions, toSlice(newHash), (ldb::Slice)ref(_block));
m_extrasDB->Put(m_writeOptions, toSlice(newHash, ExtraDetails), (ldb::Slice)dev::ref(m_details[newHash].rlp()));
m_extrasDB->Put(m_writeOptions, toSlice(bi.parentHash, ExtraDetails), (ldb::Slice)dev::ref(m_details[bi.parentHash].rlp()));
m_extrasDB->Put(m_writeOptions, toSlice(h256(bi.number), ExtraBlockHash), (ldb::Slice)dev::ref(m_blockHashes[h256(bi.number)].rlp()));
for (auto const& h: newTransactionAddresses)
m_extrasDB->Put(m_writeOptions, toSlice(h, ExtraTransactionAddress), (ldb::Slice)dev::ref(m_transactionAddresses[h].rlp()));
m_extrasDB->Put(m_writeOptions, toSlice(newHash, ExtraLogBlooms), (ldb::Slice)dev::ref(m_logBlooms[newHash].rlp()));
m_extrasDB->Put(m_writeOptions, toSlice(newHash, ExtraReceipts), (ldb::Slice)dev::ref(m_receipts[newHash].rlp()));
for (auto const& h: alteredBlooms)
m_extrasDB->Put(m_writeOptions, toSlice(h, ExtraBlocksBlooms), (ldb::Slice)dev::ref(m_blocksBlooms[h].rlp()));
}
#if ETH_TIMED_IMPORTS
writing = t.elapsed();
t.restart();
#endif
#if ETH_PARANOIA
checkConsistency();
#endif
@ -493,16 +498,78 @@ h256s BlockChain::import(bytes const& _block, OverlayDB const& _db)
h256 last = currentHash();
if (td > details(last).totalDifficulty)
{
ret = treeRoute(last, newHash);
h256 common;
unsigned commonIndex;
tie(ret, common, commonIndex) = treeRoute(last, newHash);
{
WriteGuard l(x_lastBlockHash);
m_lastBlockHash = newHash;
}
noteCanonChanged();
m_extrasDB->Put(m_writeOptions, ldb::Slice("best"), ldb::Slice((char const*)&newHash, 32));
// Most of the time these two will be equal - only when we're doing a chain revert will they not be
if (common != last)
// If we are reverting previous blocks, we need to clear their blooms (in particular, to
// rebuild any higher level blooms that they contributed to).
clearBlockBlooms(number(common) + 1, number(last) + 1);
// Go through ret backwards until hash != last.parent and update m_transactionAddresses, m_blockHashes
for (auto i = ret.rbegin(); i != ret.rend() && *i != common; ++i)
{
auto b = block(*i);
BlockInfo bi(b);
// Collate logs into blooms.
h256s alteredBlooms;
{
LogBloom blockBloom = bi.logBloom;
blockBloom.shiftBloom<3>(sha3(bi.coinbaseAddress.ref()));
// Pre-memoize everything we need before locking x_blocksBlooms
for (unsigned level = 0, index = (unsigned)bi.number; level < c_bloomIndexLevels; level++, index /= c_bloomIndexSize)
blocksBlooms(chunkId(level, index / c_bloomIndexSize));
WriteGuard l(x_blocksBlooms);
for (unsigned level = 0, index = (unsigned)bi.number; level < c_bloomIndexLevels; level++, index /= c_bloomIndexSize)
{
unsigned i = index / c_bloomIndexSize;
unsigned o = index % c_bloomIndexSize;
alteredBlooms.push_back(chunkId(level, i));
m_blocksBlooms[alteredBlooms.back()].blooms[o] |= blockBloom;
}
}
// Collate transaction hashes and remember who they were.
h256s newTransactionAddresses;
{
RLP blockRLP(b);
TransactionAddress ta;
ta.blockHash = bi.hash;
WriteGuard l(x_transactionAddresses);
for (ta.index = 0; ta.index < blockRLP[1].itemCount(); ++ta.index)
{
newTransactionAddresses.push_back(sha3(blockRLP[1][ta.index].data()));
m_transactionAddresses[newTransactionAddresses.back()] = ta;
}
}
{
WriteGuard l(x_blockHashes);
m_blockHashes[h256(bi.number)].value = bi.hash;
}
// Update database with them.
ReadGuard l1(x_blocksBlooms);
ReadGuard l3(x_blockHashes);
ReadGuard l6(x_transactionAddresses);
for (auto const& h: alteredBlooms)
m_extrasDB->Put(m_writeOptions, toSlice(h, ExtraBlocksBlooms), (ldb::Slice)dev::ref(m_blocksBlooms[h].rlp()));
m_extrasDB->Put(m_writeOptions, toSlice(h256(bi.number), ExtraBlockHash), (ldb::Slice)dev::ref(m_blockHashes[h256(bi.number)].rlp()));
for (auto const& h: newTransactionAddresses)
m_extrasDB->Put(m_writeOptions, toSlice(h, ExtraTransactionAddress), (ldb::Slice)dev::ref(m_transactionAddresses[h].rlp()));
}
clog(BlockChainNote) << " Imported and best" << td << ". Has" << (details(bi.parentHash).children.size() - 1) << "siblings. Route:" << toString(ret);
noteCanonChanged();
StructuredLogger::chainNewHead(
bi.headerHash(WithoutNonce).abridged(),
bi.nonce.abridged(),
@ -514,14 +581,67 @@ h256s BlockChain::import(bytes const& _block, OverlayDB const& _db)
{
clog(BlockChainNote) << " Imported but not best (oTD:" << details(last).totalDifficulty << " > TD:" << td << ")";
}
#if ETH_TIMED_IMPORTS
checkBest = t.elapsed();
cnote << "Import took:" << total.elapsed();
cnote << "preliminaryChecks:" << preliminaryChecks;
cnote << "enactment:" << enactment;
cnote << "collation:" << collation;
cnote << "writing:" << writing;
cnote << "checkBest:" << checkBest;
#endif
return ret;
}
h256s BlockChain::treeRoute(h256 const& _from, h256 const& _to, h256* o_common, bool _pre, bool _post) const
void BlockChain::clearBlockBlooms(unsigned _begin, unsigned _end)
{
// ... c c c c c c c c c c C o o o o o o
// ... /=15 /=21
// L0...| ' | ' | ' | ' | ' | ' | ' | 'b|x'x|x'x|x'e| /=11
// L1...| ' | ' | ' | ' b | x ' x | x ' e | /=6
// L2...| ' | ' b | x ' x | e /=3
// L3...| ' b | x ' e
// model: c_bloomIndexLevels = 4, c_bloomIndexSize = 2
// ... /=15 /=21
// L0...| ' ' ' | ' ' ' | ' ' ' | ' ' 'b|x'x'x'x|x'e' ' |
// L1...| ' ' ' b | x ' x ' e ' |
// L2...| b ' x ' e ' |
// model: c_bloomIndexLevels = 2, c_bloomIndexSize = 4
// algorithm doesn't have the best memoisation coherence, but eh well...
unsigned beginDirty = _begin;
unsigned endDirty = _end;
for (unsigned level = 0; level < c_bloomIndexLevels; level++, beginDirty /= c_bloomIndexSize, endDirty = (endDirty - 1) / c_bloomIndexSize + 1)
{
// compute earliest & latest index for each level, rebuild from previous levels.
for (unsigned item = beginDirty; item != endDirty; ++item)
{
unsigned bunch = item / c_bloomIndexSize;
unsigned offset = item % c_bloomIndexSize;
auto id = chunkId(level, bunch);
LogBloom acc;
if (!!level)
{
// rebuild the bloom from the previous (lower) level (if there is one).
auto lowerChunkId = chunkId(level - 1, item);
for (auto const& bloom: blocksBlooms(lowerChunkId).blooms)
acc |= bloom;
}
blocksBlooms(id); // make sure it has been memoized.
m_blocksBlooms[id].blooms[offset] = acc;
}
}
}
tuple<h256s, h256, unsigned> BlockChain::treeRoute(h256 const& _from, h256 const& _to, bool _common, bool _pre, bool _post) const
{
// cdebug << "treeRoute" << _from.abridged() << "..." << _to.abridged();
if (!_from || !_to)
return h256s();
return make_tuple(h256s(), h256(), 0);
h256s ret;
h256s back;
unsigned fn = details(_from).number;
@ -553,20 +673,19 @@ h256s BlockChain::treeRoute(h256 const& _from, h256 const& _to, h256* o_common,
assert(to);
from = details(from).parent;
to = details(to).parent;
if (_pre)
if (_pre && (from != to || _common))
ret.push_back(from);
if (_post)
if (_post && (from != to || (!_pre && _common)))
back.push_back(to);
fn--;
tn--;
// cdebug << "from:" << fn << _from.abridged() << "; to:" << tn << _to.abridged();
}
if (o_common)
*o_common = from;
ret.reserve(ret.size() + back.size());
for (auto it = back.cbegin(); it != back.cend(); ++it)
unsigned i = ret.size() - (int)(_common && !ret.empty() && !back.empty());
for (auto it = back.rbegin(); it != back.rend(); ++it)
ret.push_back(*it);
return ret;
return make_tuple(ret, from, i);
}
void BlockChain::noteUsed(h256 const& _h, unsigned _extra) const

31
libethereum/BlockChain.h

@ -103,11 +103,11 @@ public:
/// Attempt to import the given block directly into the CanonBlockChain and sync with the state DB.
/// @returns the block hashes of any blocks that came into/went out of the canonical block chain.
h256s attemptImport(bytes const& _block, OverlayDB const& _stateDB) noexcept;
h256s attemptImport(bytes const& _block, OverlayDB const& _stateDB, bool _force = false) noexcept;
/// Import block into disk-backed DB
/// @returns the block hashes of any blocks that came into/went out of the canonical block chain.
h256s import(bytes const& _block, OverlayDB const& _stateDB);
h256s import(bytes const& _block, OverlayDB const& _stateDB, bool _force = false);
/// Returns true if the given block is known (though not necessarily a part of the canon chain).
bool isKnown(h256 const& _hash) const;
@ -163,6 +163,7 @@ public:
*/
BlocksBlooms blocksBlooms(unsigned _level, unsigned _index) const { return blocksBlooms(chunkId(_level, _index)); }
BlocksBlooms blocksBlooms(h256 const& _chunkId) const { return queryExtras<BlocksBlooms, ExtraBlocksBlooms>(_chunkId, m_blocksBlooms, x_blocksBlooms, NullBlocksBlooms); }
void clearBlockBlooms(unsigned _begin, unsigned _end);
LogBloom blockBloom(unsigned _number) const { return blocksBlooms(chunkId(0, _number / c_bloomIndexSize)).blooms[_number % c_bloomIndexSize]; }
std::vector<unsigned> withBlockBloom(LogBloom const& _b, unsigned _earliest, unsigned _latest) const;
std::vector<unsigned> withBlockBloom(LogBloom const& _b, unsigned _earliest, unsigned _latest, unsigned _topLevel, unsigned _index) const;
@ -194,21 +195,31 @@ public:
/// Will call _progress with the progress in this operation first param done, second total.
void rebuild(std::string const& _path, ProgressCallback const& _progress = std::function<void(unsigned, unsigned)>());
/** @returns the hash of all blocks between @a _from and @a _to, all blocks are ordered first by a number of
* blocks that are parent-to-child, then two sibling blocks, then a number of blocks that are child-to-parent.
/** @returns a tuple of:
* - an vector of hashes of all blocks between @a _from and @a _to, all blocks are ordered first by a number of
* blocks that are parent-to-child, then two sibling blocks, then a number of blocks that are child-to-parent;
* - the block hash of the latest common ancestor of both blocks;
* - the index where the latest common ancestor of both blocks would either be found or inserted, depending
* on whether it is included.
*
* If non-null, the h256 at @a o_common is set to the latest common ancestor of both blocks.
* @param _common if true, include the common ancestor in the returned vector.
* @param _pre if true, include all block hashes running from @a _from until the common ancestor in the returned vector.
* @param _post if true, include all block hashes running from the common ancestor until @a _to in the returned vector.
*
* e.g. if the block tree is 3a -> 2a -> 1a -> g and 2b -> 1b -> g (g is genesis, *a, *b are competing chains),
* then:
* @code
* treeRoute(3a, 2b) == { 3a, 2a, 1a, 1b, 2b }; // *o_common == g
* treeRoute(2a, 1a) == { 2a, 1a }; // *o_common == 1a
* treeRoute(1a, 2a) == { 1a, 2a }; // *o_common == 1a
* treeRoute(1b, 2a) == { 1b, 1a, 2a }; // *o_common == g
* treeRoute(3a, 2b, false) == make_tuple({ 3a, 2a, 1a, 1b, 2b }, g, 3);
* treeRoute(2a, 1a, false) == make_tuple({ 2a, 1a }, 1a, 1)
* treeRoute(1a, 2a, false) == make_tuple({ 1a, 2a }, 1a, 0)
* treeRoute(1b, 2a, false) == make_tuple({ 1b, 1a, 2a }, g, 1)
* treeRoute(3a, 2b, true) == make_tuple({ 3a, 2a, 1a, g, 1b, 2b }, g, 3);
* treeRoute(2a, 1a, true) == make_tuple({ 2a, 1a }, 1a, 1)
* treeRoute(1a, 2a, true) == make_tuple({ 1a, 2a }, 1a, 0)
* treeRoute(1b, 2a, true) == make_tuple({ 1b, g, 1a, 2a }, g, 1)
* @endcode
*/
h256s treeRoute(h256 const& _from, h256 const& _to, h256* o_common = nullptr, bool _pre = true, bool _post = true) const;
std::tuple<h256s, h256, unsigned> treeRoute(h256 const& _from, h256 const& _to, bool _common = true, bool _pre = true, bool _post = true) const;
struct Statistics
{

22
libethereum/BlockQueue.cpp

@ -114,13 +114,29 @@ void BlockQueue::tick(BlockChain const& _bc)
m_future.erase(m_future.begin(), m_future.upper_bound(t));
}
void BlockQueue::drain(std::vector<bytes>& o_out)
template <class T> T advanced(T _t, unsigned _n)
{
std::advance(_t, _n);
return _t;
}
void BlockQueue::drain(std::vector<bytes>& o_out, unsigned _max)
{
WriteGuard l(m_lock);
if (m_drainingSet.empty())
{
swap(o_out, m_ready);
swap(m_drainingSet, m_readySet);
o_out.resize(min<unsigned>(_max, m_ready.size()));
for (unsigned i = 0; i < o_out.size(); ++i)
swap(o_out[i], m_ready[i]);
m_ready.erase(m_ready.begin(), advanced(m_ready.begin(), o_out.size()));
for (auto const& bs: o_out)
{
auto h = sha3(bs);
m_drainingSet.insert(h);
m_readySet.erase(h);
}
// swap(o_out, m_ready);
// swap(m_drainingSet, m_readySet);
}
}

4
libethereum/BlockQueue.h

@ -61,9 +61,9 @@ public:
/// Notes that time has moved on and some blocks that used to be "in the future" may no be valid.
void tick(BlockChain const& _bc);
/// Grabs the blocks that are ready, giving them in the correct order for insertion into the chain.
/// Grabs at most @a _max of the blocks that are ready, giving them in the correct order for insertion into the chain.
/// Don't forget to call doneDrain() once you're done importing.
void drain(std::vector<bytes>& o_out);
void drain(std::vector<bytes>& o_out, unsigned _max);
/// Must be called after a drain() call. Notes that the drained blocks have been imported into the blockchain, so we can forget about them.
void doneDrain() { WriteGuard l(m_lock); m_drainingSet.clear(); }

2
libethereum/CanonBlockChain.cpp

@ -92,6 +92,6 @@ bytes CanonBlockChain::createGenesisBlock()
return block.out();
}
CanonBlockChain::CanonBlockChain(std::string const& _path, WithExisting _we): BlockChain(CanonBlockChain::createGenesisBlock(), _path, _we)
CanonBlockChain::CanonBlockChain(std::string const& _path, WithExisting _we, ProgressCallback const& _pc): BlockChain(CanonBlockChain::createGenesisBlock(), _path, _we, _pc)
{
}

4
libethereum/CanonBlockChain.h

@ -55,8 +55,8 @@ std::map<Address, Account> const& genesisState();
class CanonBlockChain: public BlockChain
{
public:
CanonBlockChain(WithExisting _we = WithExisting::Trust): CanonBlockChain(std::string(), _we) {}
CanonBlockChain(std::string const& _path, WithExisting _we = WithExisting::Trust);
CanonBlockChain(WithExisting _we = WithExisting::Trust, ProgressCallback const& _pc = ProgressCallback()): CanonBlockChain(std::string(), _we, _pc) {}
CanonBlockChain(std::string const& _path, WithExisting _we = WithExisting::Trust, ProgressCallback const& _pc = ProgressCallback());
~CanonBlockChain() {}
/// @returns the genesis block header.

4
libethereum/Client.cpp

@ -120,7 +120,7 @@ void BasicGasPricer::update(BlockChain const& _bc)
Client::Client(p2p::Host* _extNet, std::string const& _dbPath, WithExisting _forceAction, u256 _networkId, int _miners):
Worker("eth"),
m_vc(_dbPath),
m_bc(_dbPath, max(m_vc.action(), _forceAction)),
m_bc(_dbPath, max(m_vc.action(), _forceAction), [](unsigned d, unsigned t){ cerr << "REVISING BLOCKCHAIN: Processed " << d << " of " << t << "..." << endl; }),
m_gp(new TrivialGasPricer),
m_stateDB(State::openDB(_dbPath, max(m_vc.action(), _forceAction))),
m_preMine(Address(), m_stateDB),
@ -145,7 +145,7 @@ Client::Client(p2p::Host* _extNet, std::string const& _dbPath, WithExisting _for
Client::Client(p2p::Host* _extNet, std::shared_ptr<GasPricer> _gp, std::string const& _dbPath, WithExisting _forceAction, u256 _networkId, int _miners):
Worker("eth"),
m_vc(_dbPath),
m_bc(_dbPath, max(m_vc.action(), _forceAction)),
m_bc(_dbPath, max(m_vc.action(), _forceAction), [](unsigned d, unsigned t){ cerr << "REVISING BLOCKCHAIN: Processed " << d << " of " << t << "...\r"; }),
m_gp(_gp),
m_stateDB(State::openDB(_dbPath, max(m_vc.action(), _forceAction))),
m_preMine(Address(), m_stateDB),

8
libethereum/ClientBase.cpp

@ -265,7 +265,8 @@ LocalisedLogEntries ClientBase::peekWatch(unsigned _watchId) const
// cwatch << "peekWatch" << _watchId;
auto& w = m_watches.at(_watchId);
// cwatch << "lastPoll updated to " << chrono::duration_cast<chrono::seconds>(chrono::system_clock::now().time_since_epoch()).count();
w.lastPoll = chrono::system_clock::now();
if (w.lastPoll != chrono::system_clock::time_point::max())
w.lastPoll = chrono::system_clock::now();
return w.changes;
}
@ -278,8 +279,9 @@ LocalisedLogEntries ClientBase::checkWatch(unsigned _watchId)
auto& w = m_watches.at(_watchId);
// cwatch << "lastPoll updated to " << chrono::duration_cast<chrono::seconds>(chrono::system_clock::now().time_since_epoch()).count();
std::swap(ret, w.changes);
w.lastPoll = chrono::system_clock::now();
if (w.lastPoll != chrono::system_clock::time_point::max())
w.lastPoll = chrono::system_clock::now();
return ret;
}

35
libethereum/State.cpp

@ -43,6 +43,7 @@ using namespace dev;
using namespace dev::eth;
#define ctrace clog(StateTrace)
#define ETH_TIMED_ENACTMENTS 0
static const u256 c_blockReward = 1500 * finney;
@ -353,16 +354,48 @@ bool State::sync(BlockChain const& _bc, h256 _block, BlockInfo const& _bi)
u256 State::enactOn(bytesConstRef _block, BlockInfo const& _bi, BlockChain const& _bc)
{
#if ETH_TIMED_ENACTMENTS
boost::timer t;
double populateVerify;
double populateGrand;
double syncReset;
double enactment;
#endif
// Check family:
BlockInfo biParent(_bc.block(_bi.parentHash));
_bi.verifyParent(biParent);
#if ETH_TIMED_ENACTMENTS
populateVerify = t.elapsed();
t.restart();
#endif
BlockInfo biGrandParent;
if (biParent.number)
biGrandParent.populate(_bc.block(biParent.parentHash));
#if ETH_TIMED_ENACTMENTS
populateGrand = t.elapsed();
t.restart();
#endif
sync(_bc, _bi.parentHash);
resetCurrent();
#if ETH_TIMED_ENACTMENTS
syncReset = t.elapsed();
t.restart();
#endif
m_previousBlock = biParent;
return enact(_block, _bc);
auto ret = enact(_block, _bc);
#if ETH_TIMED_ENACTMENTS
enactment = t.elapsed();
cnote << "popVer/popGrand/syncReset/enactment = " << populateVerify << "/" << populateGrand << "/" << syncReset << "/" << enactment;
#endif
return ret;
}
map<Address, u256> State::addresses() const

5
libweb3jsonrpc/WebThreeStubServer.cpp

@ -44,6 +44,11 @@ WebThreeStubServer::WebThreeStubServer(jsonrpc::AbstractServerConnector& _conn,
ldb::DB::Open(o, path, &m_db);
}
std::string WebThreeStubServer::web3_clientVersion()
{
return m_web3.clientVersion();
}
dev::eth::Interface* WebThreeStubServer::client()
{
return m_web3.ethereum();

4
libweb3jsonrpc/WebThreeStubServer.h

@ -42,7 +42,9 @@ class WebThreeStubServer: public dev::WebThreeStubServerBase, public dev::WebThr
{
public:
WebThreeStubServer(jsonrpc::AbstractServerConnector& _conn, dev::WebThreeDirect& _web3, std::vector<dev::KeyPair> const& _accounts);
virtual std::string web3_clientVersion();
private:
virtual dev::eth::Interface* client() override;
virtual std::shared_ptr<dev::shh::Interface> face() override;

2
libwebthree/WebThree.h

@ -127,6 +127,8 @@ public:
// Misc stuff:
std::string const& clientVersion() const { return m_clientVersion; }
void setClientVersion(std::string const& _name) { m_clientVersion = _name; }
// Network stuff:

Loading…
Cancel
Save