Browse Source

Merge remote-tracking branch 'upstream/develop' into addTests

cl-refactor
CJentzsch 10 years ago
parent
commit
36f3cba0c2
  1. 6
      alethzero/MainWin.cpp
  2. 12
      ethminer/MinerAux.h
  3. 10
      evmjit/libevmjit-cpp/Env.cpp
  4. 4
      libdevcore/TransientDirectory.cpp
  5. 65
      libethash-cl/ethash_cl_miner.cpp
  6. 1
      libethash-cl/ethash_cl_miner.h
  7. 13
      libethash/internal.c
  8. 21
      libethash/io.c
  9. 17
      libethash/io.h
  10. 5
      libethcore/Ethash.cpp
  11. 2
      libethcore/Ethash.h
  12. 5
      libethcore/EthashAux.cpp
  13. 3
      libethereum/Client.cpp
  14. 138
      libethereum/EthereumHost.cpp
  15. 9
      libethereum/EthereumHost.h
  16. 20
      libethereum/EthereumPeer.cpp
  17. 6
      libp2p/HostCapability.cpp
  18. 4
      libp2p/HostCapability.h
  19. 10
      libp2p/Session.cpp
  20. 2
      libp2p/UDP.h
  21. 12
      libweb3jsonrpc/WebThreeStubServerBase.cpp
  22. 24
      libwhisper/Common.cpp
  23. 28
      libwhisper/Common.h
  24. 24
      libwhisper/Interface.h
  25. 20
      libwhisper/Message.cpp
  26. 20
      libwhisper/Message.h
  27. 4
      libwhisper/WhisperHost.cpp
  28. 6
      libwhisper/WhisperHost.h
  29. 20
      test/libethereum/TransactionTestsFiller/ttTransactionTestFiller.json
  30. 8
      test/libwhisper/whisperMessage.cpp

6
alethzero/MainWin.cpp

@ -1803,7 +1803,7 @@ void Main::on_accounts_doubleClicked()
}
}
static shh::FullTopic topicFromText(QString _s)
static shh::Topics topicFromText(QString _s)
{
shh::BuildTopic ret;
while (_s.size())
@ -2187,10 +2187,10 @@ void Main::refreshWhispers()
shh::Envelope const& e = w.second;
shh::Message m;
for (pair<Public, Secret> const& i: m_server->ids())
if (!!(m = e.open(shh::FullTopic(), i.second)))
if (!!(m = e.open(shh::Topics(), i.second)))
break;
if (!m)
m = e.open(shh::FullTopic());
m = e.open(shh::Topics());
QString msg;
if (m.from())

12
ethminer/MinerAux.h

@ -171,8 +171,16 @@ public:
m_minerType = MinerType::CPU;
else if (arg == "-G" || arg == "--opencl")
{
m_minerType = MinerType::GPU;
miningThreads = 1;
if (!ProofOfWork::GPUMiner::haveSufficientGPUMemory())
{
cout << "No GPU device with sufficient memory was found. Defaulting to CPU" << endl;
m_minerType = MinerType::CPU;
}
else
{
m_minerType = MinerType::GPU;
miningThreads = 1;
}
}
else if (arg == "--no-precompute")
{

10
evmjit/libevmjit-cpp/Env.cpp

@ -54,7 +54,7 @@ extern "C"
if (_env->balance(_env->myAddress) >= endowment && _env->depth < 1024)
{
u256 gas = *io_gas;
h256 address(_env->create(endowment, gas, {_initBeg, _initSize}, {}), h256::AlignRight);
h256 address(_env->create(endowment, gas, {_initBeg, (size_t)_initSize}, {}), h256::AlignRight);
*io_gas = static_cast<int64_t>(gas);
*o_address = address;
}
@ -69,8 +69,8 @@ extern "C"
params.senderAddress = _env->myAddress;
params.receiveAddress = right160(*_receiveAddress);
params.codeAddress = right160(*_codeAddress);
params.data = {_inBeg, _inSize};
params.out = {_outBeg, _outSize};
params.data = {_inBeg, (size_t)_inSize};
params.out = {_outBeg, (size_t)_outSize};
params.onOp = {};
const auto isCall = params.receiveAddress == params.codeAddress; // OPT: The same address pointer can be used if not CODECALL
@ -102,7 +102,7 @@ extern "C"
EXPORT void env_sha3(byte* _begin, uint64_t _size, h256* o_hash)
{
auto hash = sha3({_begin, _size});
auto hash = sha3({_begin, (size_t)_size});
*o_hash = hash;
}
@ -130,7 +130,7 @@ extern "C"
if (_topic4)
topics.push_back(*_topic4);
_env->log(std::move(topics), {_beg, _size});
_env->log(std::move(topics), {_beg, (size_t)_size});
}
}

4
libdevcore/TransientDirectory.cpp

@ -46,7 +46,7 @@ TransientDirectory::~TransientDirectory()
{
boost::system::error_code ec;
boost::filesystem::remove_all(m_path, ec);
if (0 == ec)
if (!ec)
return;
// In some cases, antivirus runnig on Windows will scan all the newly created directories.
@ -57,6 +57,6 @@ TransientDirectory::~TransientDirectory()
ec.clear();
boost::filesystem::remove_all(m_path, ec);
if (ec != 0)
if (!ec)
cwarn << "Failed to delete directory '" << m_path << "': " << ec.message();
}

65
libethash-cl/ethash_cl_miner.cpp

@ -35,6 +35,7 @@
#include "ethash_cl_miner_kernel.h"
#define ETHASH_BYTES 32
#define ETHASH_CL_MINIMUM_MEMORY 2000000000
// workaround lame platforms
#if !CL_VERSION_1_2
@ -47,6 +48,9 @@
using namespace std;
// TODO: If at any point we can use libdevcore in here then we should switch to using a LogChannel
#define ETHCL_LOG(_contents) cout << "[OPENCL]:" << _contents << endl
static void add_definition(std::string& source, char const* id, unsigned value)
{
char buf[256];
@ -72,7 +76,7 @@ std::string ethash_cl_miner::platform_info(unsigned _platformId, unsigned _devic
cl::Platform::get(&platforms);
if (platforms.empty())
{
cout << "No OpenCL platforms found." << endl;
ETHCL_LOG("No OpenCL platforms found.");
return std::string();
}
@ -82,7 +86,7 @@ std::string ethash_cl_miner::platform_info(unsigned _platformId, unsigned _devic
platforms[platform_num].getDevices(CL_DEVICE_TYPE_ALL, &devices);
if (devices.empty())
{
cout << "No OpenCL devices found." << endl;
ETHCL_LOG("No OpenCL devices found.");
return std::string();
}
@ -107,7 +111,7 @@ unsigned ethash_cl_miner::get_num_devices(unsigned _platformId)
cl::Platform::get(&platforms);
if (platforms.empty())
{
cout << "No OpenCL platforms found." << endl;
ETHCL_LOG("No OpenCL platforms found.");
return 0;
}
@ -116,12 +120,53 @@ unsigned ethash_cl_miner::get_num_devices(unsigned _platformId)
platforms[platform_num].getDevices(CL_DEVICE_TYPE_ALL, &devices);
if (devices.empty())
{
cout << "No OpenCL devices found." << endl;
ETHCL_LOG("No OpenCL devices found.");
return 0;
}
return devices.size();
}
bool ethash_cl_miner::haveSufficientGPUMemory(unsigned _platformId)
{
std::vector<cl::Platform> platforms;
cl::Platform::get(&platforms);
if (platforms.empty())
{
ETHCL_LOG("No OpenCL platforms found.");
return false;
}
std::vector<cl::Device> devices;
unsigned platform_num = std::min<unsigned>(_platformId, platforms.size() - 1);
platforms[platform_num].getDevices(CL_DEVICE_TYPE_ALL, &devices);
if (devices.empty())
{
ETHCL_LOG("No OpenCL devices found.");
return false;
}
for (cl::Device const& device: devices)
{
cl_ulong result;
device.getInfo(CL_DEVICE_GLOBAL_MEM_SIZE, &result);
if (result >= ETHASH_CL_MINIMUM_MEMORY)
{
ETHCL_LOG(
"Found suitable OpenCL device [" << device.getInfo<CL_DEVICE_NAME>()
<< "] with " << result << " bytes of GPU memory"
);
return true;
}
else
ETHCL_LOG(
"OpenCL device " << device.getInfo<CL_DEVICE_NAME>()
<< " has insufficient GPU memory." << result <<
" bytes of memory found < " << ETHASH_CL_MINIMUM_MEMORY << " bytes of memory required"
);
}
return false;
}
void ethash_cl_miner::finish()
{
if (m_queue())
@ -135,7 +180,7 @@ bool ethash_cl_miner::init(uint8_t const* _dag, uint64_t _dagSize, unsigned work
cl::Platform::get(&platforms);
if (platforms.empty())
{
cout << "No OpenCL platforms found." << endl;
ETHCL_LOG("No OpenCL platforms found.");
return false;
}
@ -143,25 +188,25 @@ bool ethash_cl_miner::init(uint8_t const* _dag, uint64_t _dagSize, unsigned work
_platformId = std::min<unsigned>(_platformId, platforms.size() - 1);
cout << "Using platform: " << platforms[_platformId].getInfo<CL_PLATFORM_NAME>().c_str() << endl;
ETHCL_LOG("Using platform: " << platforms[_platformId].getInfo<CL_PLATFORM_NAME>().c_str());
// get GPU device of the default platform
std::vector<cl::Device> devices;
platforms[_platformId].getDevices(CL_DEVICE_TYPE_ALL, &devices);
if (devices.empty())
{
cout << "No OpenCL devices found." << endl;
ETHCL_LOG("No OpenCL devices found.");
return false;
}
// use selected device
cl::Device& device = devices[std::min<unsigned>(_deviceId, devices.size() - 1)];
std::string device_version = device.getInfo<CL_DEVICE_VERSION>();
cout << "Using device: " << device.getInfo<CL_DEVICE_NAME>().c_str() << "(" << device_version.c_str() << ")" << endl;
ETHCL_LOG("Using device: " << device.getInfo<CL_DEVICE_NAME>().c_str() << "(" << device_version.c_str() << ")");
if (strncmp("OpenCL 1.0", device_version.c_str(), 10) == 0)
{
cout << "OpenCL 1.0 is not supported." << endl;
ETHCL_LOG("OpenCL 1.0 is not supported.");
return false;
}
if (strncmp("OpenCL 1.1", device_version.c_str(), 10) == 0)
@ -193,7 +238,7 @@ bool ethash_cl_miner::init(uint8_t const* _dag, uint64_t _dagSize, unsigned work
}
catch (cl::Error err)
{
cout << program.getBuildInfo<CL_PROGRAM_BUILD_LOG>(device).c_str();
ETHCL_LOG(program.getBuildInfo<CL_PROGRAM_BUILD_LOG>(device).c_str());
return false;
}
m_hash_kernel = cl::Kernel(program, "ethash_hash");

1
libethash-cl/ethash_cl_miner.h

@ -35,6 +35,7 @@ public:
static unsigned get_num_platforms();
static unsigned get_num_devices(unsigned _platformId = 0);
static std::string platform_info(unsigned _platformId = 0, unsigned _deviceId = 0);
static bool haveSufficientGPUMemory(unsigned _platformId = 0);
bool init(uint8_t const* _dag, uint64_t _dagSize, unsigned workgroup_size = 64, unsigned _platformId = 0, unsigned _deviceId = 0);
void finish();

13
libethash/internal.c

@ -364,6 +364,7 @@ static bool ethash_mmap(struct ethash_full* ret, FILE* f)
{
int fd;
char* mmapped_data;
errno = 0;
ret->file = f;
if ((fd = ethash_fileno(ret->file)) == -1) {
return false;
@ -400,38 +401,48 @@ ethash_full_t ethash_full_new_internal(
ret->file_size = (size_t)full_size;
switch (ethash_io_prepare(dirname, seed_hash, &f, (size_t)full_size, false)) {
case ETHASH_IO_FAIL:
// ethash_io_prepare will do all ETHASH_CRITICAL() logging in fail case
goto fail_free_full;
case ETHASH_IO_MEMO_MATCH:
if (!ethash_mmap(ret, f)) {
ETHASH_CRITICAL("mmap failure()");
goto fail_close_file;
}
return ret;
case ETHASH_IO_MEMO_SIZE_MISMATCH:
// if a DAG of same filename but unexpected size is found, silently force new file creation
if (ethash_io_prepare(dirname, seed_hash, &f, (size_t)full_size, true) != ETHASH_IO_MEMO_MISMATCH) {
ETHASH_CRITICAL("Could not recreate DAG file after finding existing DAG with unexpected size.");
goto fail_free_full;
}
// fallthrough to the mismatch case here, DO NOT go through match
case ETHASH_IO_MEMO_MISMATCH:
if (!ethash_mmap(ret, f)) {
ETHASH_CRITICAL("mmap failure()");
goto fail_close_file;
}
break;
}
if (!ethash_compute_full_data(ret->data, full_size, light, callback)) {
ETHASH_CRITICAL("Failure at computing DAG data.");
goto fail_free_full_data;
}
// after the DAG has been filled then we finalize it by writting the magic number at the beginning
if (fseek(f, 0, SEEK_SET) != 0) {
ETHASH_CRITICAL("Could not seek to DAG file start to write magic number.");
goto fail_free_full_data;
}
uint64_t const magic_num = ETHASH_DAG_MAGIC_NUM;
if (fwrite(&magic_num, ETHASH_DAG_MAGIC_NUM_SIZE, 1, f) != 1) {
ETHASH_CRITICAL("Could not write magic number to DAG's beginning.");
goto fail_free_full_data;
}
if (fflush(f) != 0) {// make sure the magic number IS there
ETHASH_CRITICAL("Could not flush memory mapped data to DAG file. Insufficient space?");
goto fail_free_full_data;
}
fflush(f); // make sure the magic number IS there
return ret;
fail_free_full_data:

21
libethash/io.c

@ -21,6 +21,7 @@
#include "io.h"
#include <string.h>
#include <stdio.h>
#include <errno.h>
enum ethash_io_rc ethash_io_prepare(
char const* dirname,
@ -32,15 +33,19 @@ enum ethash_io_rc ethash_io_prepare(
{
char mutable_name[DAG_MUTABLE_NAME_MAX_SIZE];
enum ethash_io_rc ret = ETHASH_IO_FAIL;
// reset errno before io calls
errno = 0;
// assert directory exists
if (!ethash_mkdir(dirname)) {
ETHASH_CRITICAL("Could not create the ethash directory");
goto end;
}
ethash_io_mutable_name(ETHASH_REVISION, &seedhash, mutable_name);
char* tmpfile = ethash_io_create_filename(dirname, mutable_name, strlen(mutable_name));
if (!tmpfile) {
ETHASH_CRITICAL("Could not create the full DAG pathname");
goto end;
}
@ -52,6 +57,7 @@ enum ethash_io_rc ethash_io_prepare(
size_t found_size;
if (!ethash_file_size(f, &found_size)) {
fclose(f);
ETHASH_CRITICAL("Could not query size of DAG file: \"%s\"", tmpfile);
goto free_memo;
}
if (file_size != found_size - ETHASH_DAG_MAGIC_NUM_SIZE) {
@ -64,6 +70,7 @@ enum ethash_io_rc ethash_io_prepare(
if (fread(&magic_num, ETHASH_DAG_MAGIC_NUM_SIZE, 1, f) != 1) {
// I/O error
fclose(f);
ETHASH_CRITICAL("Could not read from DAG file: \"%s\"", tmpfile);
ret = ETHASH_IO_MEMO_SIZE_MISMATCH;
goto free_memo;
}
@ -80,15 +87,25 @@ enum ethash_io_rc ethash_io_prepare(
// file does not exist, will need to be created
f = ethash_fopen(tmpfile, "wb+");
if (!f) {
ETHASH_CRITICAL("Could not create DAG file: \"%s\"", tmpfile);
goto free_memo;
}
// make sure it's of the proper size
if (fseek(f, (long int)(file_size + ETHASH_DAG_MAGIC_NUM_SIZE - 1), SEEK_SET) != 0) {
fclose(f);
ETHASH_CRITICAL("Could not seek to the end of DAG file: \"%s\". Insufficient space?", tmpfile);
goto free_memo;
}
if (fputc('\n', f) == EOF) {
fclose(f);
ETHASH_CRITICAL("Could not write in the end of DAG file: \"%s\". Insufficient space?", tmpfile);
goto free_memo;
}
if (fflush(f) != 0) {
fclose(f);
ETHASH_CRITICAL("Could not flush at end of DAG file: \"%s\". Insufficient space?", tmpfile);
goto free_memo;
}
fputc('\n', f);
fflush(f);
ret = ETHASH_IO_MEMO_MISMATCH;
goto set_file;

17
libethash/io.h

@ -54,6 +54,23 @@ enum ethash_io_rc {
#define snprintf(...) sprintf_s(__VA_ARGS__)
#endif
/**
* Logs a critical error in important parts of ethash. Should mostly help
* figure out what kind of problem (I/O, memory e.t.c.) causes a NULL
* ethash_full_t
*/
#ifdef ETHASH_PRINT_CRITICAL_OUTPUT
#define ETHASH_CRITICAL(...) \
do \
{ \
printf("ETHASH CRITICAL ERROR: "__VA_ARGS__); \
printf("\n"); \
fflush(stdout); \
} while (0)
#else
#define ETHASH_CRITICAL(...)
#endif
/**
* Prepares io for ethash
*

5
libethcore/Ethash.cpp

@ -363,6 +363,11 @@ void Ethash::GPUMiner::pause()
stopWorking();
}
bool Ethash::GPUMiner::haveSufficientGPUMemory()
{
return ethash_cl_miner::haveSufficientGPUMemory(s_platformId);
}
std::string Ethash::GPUMiner::platformInfo()
{
return ethash_cl_miner::platform_info(s_platformId, s_deviceId);

2
libethcore/Ethash.h

@ -87,6 +87,7 @@ public:
static unsigned instances() { return s_numInstances > 0 ? s_numInstances : std::thread::hardware_concurrency(); }
static std::string platformInfo();
static bool haveSufficientGPUMemory() { return false; }
static void setDefaultPlatform(unsigned) {}
static void setDefaultDevice(unsigned) {}
static void setNumInstances(unsigned _instances) { s_numInstances = std::min<unsigned>(_instances, std::thread::hardware_concurrency()); }
@ -115,6 +116,7 @@ public:
static unsigned instances() { return s_numInstances > 0 ? s_numInstances : 1; }
static std::string platformInfo();
static bool haveSufficientGPUMemory();
static unsigned getNumDevices();
static void setDefaultPlatform(unsigned _id) { s_platformId = _id; }
static void setDefaultDevice(unsigned _id) { s_deviceId = _id; }

5
libethcore/EthashAux.cpp

@ -137,7 +137,10 @@ EthashAux::FullAllocation::FullAllocation(ethash_light_t _light, ethash_callback
full = ethash_full_new(_light, _cb);
// cdebug << "Called OK.";
if (!full)
BOOST_THROW_EXCEPTION(ExternalFunctionFailure("ethash_full_new()"));
{
clog(DAGChannel) << "DAG Generation Failure. Reason: " << strerror(errno);
BOOST_THROW_EXCEPTION(ExternalFunctionFailure("ethash_full_new"));
}
}
EthashAux::FullAllocation::~FullAllocation()

3
libethereum/Client.cpp

@ -269,9 +269,11 @@ void Client::killChain()
{
WriteGuard l(x_postMine);
WriteGuard l2(x_preMine);
WriteGuard l3(x_working);
m_preMine = State();
m_postMine = State();
m_working = State();
m_stateDB = OverlayDB();
m_stateDB = State::openDB(Defaults::dbPath(), WithExisting::Kill);
@ -284,6 +286,7 @@ void Client::killChain()
if (auto h = m_host.lock())
h->reset();
startedWorking();
doWork();
startWorking();

138
libethereum/EthereumHost.cpp

@ -91,7 +91,7 @@ void EthereumHost::doWork()
bool netChange = ensureInitialised();
auto h = m_chain.currentHash();
// If we've finished our initial sync (including getting all the blocks into the chain so as to reduce invalid transactions), start trading transactions & blocks
if (!isSyncing() && m_chain.isKnown(m_latestBlockSent))
if (isSyncing() && m_chain.isKnown(m_latestBlockSent))
{
if (m_newTransactions)
{
@ -120,7 +120,7 @@ void EthereumHost::maintainTransactions()
for (auto const& i: ts)
{
bool unsent = !m_transactionsSent.count(i.first);
for (auto const& p: randomSelection(0, [&](EthereumPeer* p) { return p->m_requireTransactions || (unsent && !p->m_knownTransactions.count(i.first)); }).second)
for (auto const& p: get<1>(randomSelection(0, [&](EthereumPeer* p) { return p->m_requireTransactions || (unsent && !p->m_knownTransactions.count(i.first)); })))
peerTransactions[p].push_back(i.first);
}
for (auto const& t: ts)
@ -165,24 +165,32 @@ void EthereumHost::forEachPeerPtr(std::function<void(std::shared_ptr<EthereumPee
_f(s.first->cap<EthereumPeer>(c_oldProtocolVersion));
}
pair<vector<shared_ptr<EthereumPeer>>, vector<shared_ptr<EthereumPeer>>> EthereumHost::randomSelection(unsigned _percent, std::function<bool(EthereumPeer*)> const& _allow)
tuple<vector<shared_ptr<EthereumPeer>>, vector<shared_ptr<EthereumPeer>>, vector<shared_ptr<Session>>> EthereumHost::randomSelection(unsigned _percent, std::function<bool(EthereumPeer*)> const& _allow)
{
pair<vector<shared_ptr<EthereumPeer>>, vector<shared_ptr<EthereumPeer>>> ret;
forEachPeerPtr([&](shared_ptr<EthereumPeer> _p)
vector<shared_ptr<EthereumPeer>> chosen;
vector<shared_ptr<EthereumPeer>> allowed;
vector<shared_ptr<Session>> sessions;
auto const& ps = peerSessions();
allowed.reserve(ps.size());
for (auto const& j: ps)
{
if (_p && _allow(_p.get()))
ret.second.push_back(_p);
});
auto pp = j.first->cap<EthereumPeer>();
if (_allow(pp.get()))
{
allowed.push_back(move(pp));
sessions.push_back(move(j.first));
}
}
size_t size = (ret.second.size() * _percent + 99) / 100;
ret.second.reserve(size);
for (unsigned i = size; i-- && ret.second.size();)
chosen.reserve((ps.size() * _percent + 99) / 100);
for (unsigned i = (ps.size() * _percent + 99) / 100; i-- && allowed.size();)
{
unsigned n = rand() % ret.second.size();
ret.first.push_back(std::move(ret.second[n]));
ret.second.erase(ret.second.begin() + n);
unsigned n = rand() % allowed.size();
chosen.push_back(std::move(allowed[n]));
allowed.erase(allowed.begin() + n);
}
return ret;
return make_tuple(move(chosen), move(allowed), move(sessions));
}
void EthereumHost::maintainBlocks(h256 const& _currentHash)
@ -200,7 +208,7 @@ void EthereumHost::maintainBlocks(h256 const& _currentHash)
h256s blocks = get<0>(m_chain.treeRoute(m_latestBlockSent, _currentHash, false, false, true));
auto s = randomSelection(25, [&](EthereumPeer* p){ DEV_GUARDED(p->x_knownBlocks) return !p->m_knownBlocks.count(_currentHash); return false; });
for (shared_ptr<EthereumPeer> const& p: s.first)
for (shared_ptr<EthereumPeer> const& p: get<0>(s))
for (auto const& b: blocks)
{
RLPStream ts;
@ -210,7 +218,7 @@ void EthereumHost::maintainBlocks(h256 const& _currentHash)
p->sealAndSend(ts);
p->m_knownBlocks.clear();
}
for (shared_ptr<EthereumPeer> const& p: s.second)
for (shared_ptr<EthereumPeer> const& p: get<1>(s))
{
RLPStream ts;
p->prep(ts, NewBlockHashesPacket, blocks.size());
@ -241,7 +249,6 @@ void EthereumHost::onPeerStatus(EthereumPeer* _peer)
_peer->disable("Peer banned for previous bad behaviour.");
else
{
_peer->m_protocolVersion = EthereumHost::c_oldProtocolVersion; //force V60 for now
if (_peer->m_protocolVersion != protocolVersion())
estimatePeerHashes(_peer);
else if (_peer->m_latestBlockNumber > m_chain.number())
@ -283,6 +290,7 @@ void EthereumHost::onPeerHashes(EthereumPeer* _peer, h256s const& _hashes, bool
unsigned knowns = 0;
unsigned unknowns = 0;
h256s neededBlocks;
bool syncByNumber = !m_syncingLatestHash;
for (unsigned i = 0; i < _hashes.size(); ++i)
{
_peer->addRating(1);
@ -290,10 +298,14 @@ void EthereumHost::onPeerHashes(EthereumPeer* _peer, h256s const& _hashes, bool
auto status = m_bq.blockStatus(h);
if (status == QueueStatus::Importing || status == QueueStatus::Ready || m_chain.isKnown(h))
{
clog(NetMessageSummary) << "block hash ready:" << h << ". Start blocks download...";
m_hashes += neededBlocks;
onPeerDoneHashes(_peer, true);
return;
clog(NetMessageSummary) << "Block hash already known:" << h;
if (!syncByNumber)
{
m_hashes += neededBlocks;
clog(NetMessageSummary) << "Start blocks download...";
onPeerDoneHashes(_peer, true);
return;
}
}
else if (status == QueueStatus::Bad)
{
@ -308,65 +320,25 @@ void EthereumHost::onPeerHashes(EthereumPeer* _peer, h256s const& _hashes, bool
}
else
knowns++;
m_syncingLatestHash = h;
}
m_hashes += neededBlocks;
clog(NetMessageSummary) << knowns << "knowns," << unknowns << "unknowns; now at" << m_syncingLatestHash;
if (_complete)
{
m_needSyncBlocks = true;
continueSync(_peer);
if (!syncByNumber)
m_syncingLatestHash = h;
}
else if (m_hashes.size() > _peer->m_expectedHashes)
if (syncByNumber)
{
_peer->disable("Too many hashes");
m_hashes.clear();
m_syncingLatestHash = h256();
continueSync(); ///Try with some other peer, keep the chain
m_man.appendToChain(neededBlocks); // Append to download manager immediatelly
clog(NetMessageSummary) << knowns << "knowns," << unknowns << "unknowns";
}
else
continueSync(_peer); /// Grab next hashes
}
void EthereumHost::onPeerHashes(EthereumPeer* _peer, unsigned /*_index*/, h256s const& _hashes)
{
Guard l(x_sync);
assert(_peer->m_asking == Asking::Nothing);
if (_hashes.empty())
{
onPeerDoneHashes(_peer, true);
return;
m_hashes += neededBlocks; // Append to local list
clog(NetMessageSummary) << knowns << "knowns," << unknowns << "unknowns; now at" << m_syncingLatestHash;
}
unsigned knowns = 0;
unsigned unknowns = 0;
h256s neededBlocks;
for (unsigned i = 0; i < _hashes.size(); ++i)
if (_complete)
{
_peer->addRating(1);
auto h = _hashes[i];
auto status = m_bq.blockStatus(h);
if (status == QueueStatus::Importing || status == QueueStatus::Ready || m_chain.isKnown(h))
{
clog(NetWarn) << "block hash already known:" << h;
}
else if (status == QueueStatus::Bad)
{
clog(NetWarn) << "block hash bad!" << h << ". Bailing...";
_peer->setIdle();
return;
}
else if (status == QueueStatus::Unknown)
{
unknowns++;
neededBlocks.push_back(h);
}
else
knowns++;
m_needSyncBlocks = true;
continueSync(_peer);
}
m_man.appendToChain(neededBlocks);
clog(NetMessageSummary) << knowns << "knowns," << unknowns << "unknowns";
if (m_hashMan.isComplete())
else if (syncByNumber && m_hashMan.isComplete())
{
// Done our chain-get.
m_needSyncHashes = false;
@ -376,8 +348,15 @@ void EthereumHost::onPeerHashes(EthereumPeer* _peer, unsigned /*_index*/, h256s
m_hashMan.reset(m_chain.number() + 1);
continueSync();
}
else if (m_hashes.size() > _peer->m_expectedHashes)
{
_peer->disable("Too many hashes");
m_hashes.clear();
m_syncingLatestHash = h256();
continueSync(); ///Try with some other peer, keep the chain
}
else
continueSync(_peer);
continueSync(_peer); /// Grab next hashes
}
void EthereumHost::onPeerDoneHashes(EthereumPeer* _peer, bool _localChain)
@ -471,7 +450,7 @@ void EthereumHost::onPeerBlocks(EthereumPeer* _peer, RLP const& _r)
void EthereumHost::onPeerNewHashes(EthereumPeer* _peer, h256s const& _hashes)
{
Guard l(x_sync);
if (_peer->m_asking != Asking::Nothing)
if (isSyncing_UNSAFE())
{
clog(NetMessageSummary) << "Ignoring new hashes since we're already downloading.";
return;
@ -483,7 +462,7 @@ void EthereumHost::onPeerNewHashes(EthereumPeer* _peer, h256s const& _hashes)
void EthereumHost::onPeerNewBlock(EthereumPeer* _peer, RLP const& _r)
{
Guard l(x_sync);
if (_peer->m_asking != Asking::Nothing)
if (isSyncing_UNSAFE())
{
clog(NetMessageSummary) << "Ignoring new blocks since we're already downloading.";
return;
@ -525,7 +504,7 @@ void EthereumHost::onPeerNewBlock(EthereumPeer* _peer, RLP const& _r)
_peer->m_totalDifficulty = difficulty;
m_needSyncHashes = true;
m_needSyncBlocks = true;
m_syncingLatestHash = _peer->m_latestHash;
m_syncingLatestHash = h;
sync = true;
}
}
@ -646,9 +625,10 @@ bool EthereumHost::peerShouldGrabChain(EthereumPeer* _peer) const
}
}
bool EthereumHost::isSyncing() const
bool EthereumHost::isSyncing_UNSAFE() const
{
Guard l(x_sync);
/// We need actual peer information here to handle the case when we are the first ever peer on the network to mine.
/// I.e. on a new private network the first node mining has noone to sync with and should start block propogation immediately.
bool syncing = false;
forEachPeer([&](EthereumPeer* _p)
{

9
libethereum/EthereumHost.h

@ -70,8 +70,7 @@ public:
void reset();
DownloadMan const& downloadMan() const { return m_man; }
bool isSyncing() const;
bool isSyncing() const { Guard l(x_sync); return isSyncing_UNSAFE(); }
bool isBanned(p2p::NodeId _id) const { return !!m_banned.count(_id); }
void noteNewTransactions() { m_newTransactions = true; }
@ -82,7 +81,6 @@ public:
void onPeerNewBlock(EthereumPeer* _peer, RLP const& _r); ///< Called by peer once it has new blocks
void onPeerNewHashes(EthereumPeer* _peer, h256s const& _hashes); ///< Called by peer once it has new hashes
void onPeerHashes(EthereumPeer* _peer, h256s const& _hashes); ///< Called by peer once it has another sequential block of hashes during sync
void onPeerHashes(EthereumPeer* _peer, unsigned _index, h256s const& _hashes); ///< Called by peer once it has a new ordered block of hashes starting with a particular number
void onPeerTransactions(EthereumPeer* _peer, RLP const& _r); ///< Called by peer when it has new transactions
DownloadMan& downloadMan() { return m_man; }
@ -90,10 +88,13 @@ public:
BlockChain const& chain() { return m_chain; }
static unsigned const c_oldProtocolVersion;
private:
std::pair<std::vector<std::shared_ptr<EthereumPeer>>, std::vector<std::shared_ptr<EthereumPeer>>> randomSelection(unsigned _percent = 25, std::function<bool(EthereumPeer*)> const& _allow = [](EthereumPeer const*){ return true; });
std::tuple<std::vector<std::shared_ptr<EthereumPeer>>, std::vector<std::shared_ptr<EthereumPeer>>, std::vector<std::shared_ptr<p2p::Session>>> randomSelection(unsigned _percent = 25, std::function<bool(EthereumPeer*)> const& _allow = [](EthereumPeer const*){ return true; });
void forEachPeerPtr(std::function<void(std::shared_ptr<EthereumPeer>)> const& _f) const;
void forEachPeer(std::function<void(EthereumPeer*)> const& _f) const;
bool isSyncing_UNSAFE() const;
/// Sync with the BlockChain. It might contain one of our mined blocks, we might have new candidates from the network.
void doWork();

20
libethereum/EthereumPeer.cpp

@ -40,7 +40,6 @@ EthereumPeer::EthereumPeer(Session* _s, HostCapabilityFace* _h, unsigned _i, Cap
m_hashSub(host()->hashDownloadMan()),
m_peerCapabilityVersion(_cap.second)
{
m_peerCapabilityVersion = EthereumHost::c_oldProtocolVersion;
m_syncHashNumber = host()->chain().number() + 1;
requestStatus();
}
@ -78,7 +77,6 @@ string toString(Asking _a)
return "?";
}
void EthereumPeer::setIdle()
{
m_sub.doneFetch();
@ -88,8 +86,7 @@ void EthereumPeer::setIdle()
void EthereumPeer::requestStatus()
{
if (m_asking != Asking::Nothing)
clog(NetWarn) << "Bad state: requesting state should be the first action";
assert(m_asking == Asking::Nothing);
setAsking(Asking::State);
RLPStream s;
bool latest = m_peerCapabilityVersion == host()->protocolVersion();
@ -106,22 +103,22 @@ void EthereumPeer::requestStatus()
void EthereumPeer::requestHashes()
{
if (m_asking == Asking::Blocks)
return;
assert(m_asking == Asking::Nothing);
m_syncHashNumber = m_hashSub.nextFetch(c_maxHashesAsk);
setAsking(Asking::Hashes);
RLPStream s;
prep(s, GetBlockHashesByNumberPacket, 2) << m_syncHashNumber << c_maxHashesAsk;
clog(NetMessageDetail) << "Requesting block hashes for numbers " << m_syncHashNumber << "-" << m_syncHashNumber + c_maxHashesAsk - 1;
sealAndSend(s);
}
void EthereumPeer::requestHashes(h256 const& _lastHash)
{
if (m_asking == Asking::Blocks)
return;
assert(m_asking == Asking::Nothing);
setAsking(Asking::Hashes);
RLPStream s;
prep(s, GetBlockHashesPacket, 2) << _lastHash << c_maxHashesAsk;
clog(NetMessageDetail) << "Requesting block hashes staring from " << _lastHash;
sealAndSend(s);
}
@ -212,7 +209,7 @@ bool EthereumPeer::interpret(unsigned _id, RLP const& _r)
u256 number256 = _r[0].toInt<u256>();
unsigned number = (unsigned) number256;
unsigned limit = _r[1].toInt<unsigned>();
clog(NetMessageSummary) << "GetBlockHashesByNumber (" << number << "-" << number + limit << ")";
clog(NetMessageSummary) << "GetBlockHashesByNumber (" << number << "-" << number + limit - 1 << ")";
RLPStream s;
if (number <= host()->chain().number())
{
@ -248,11 +245,8 @@ bool EthereumPeer::interpret(unsigned _id, RLP const& _r)
m_hashSub.noteHash(m_syncHashNumber + i, 1);
}
if (m_protocolVersion == host()->protocolVersion())
host()->onPeerHashes(this, m_syncHashNumber, hashes); // V61+, report hashes by number
else
host()->onPeerHashes(this, hashes);
m_syncHashNumber += itemCount;
host()->onPeerHashes(this, hashes);
break;
}
case GetBlocksPacket:

6
libp2p/HostCapability.cpp

@ -27,15 +27,15 @@ using namespace std;
using namespace dev;
using namespace dev::p2p;
std::vector<std::pair<std::shared_ptr<Session>,std::shared_ptr<Peer>>> HostCapabilityFace::peerSessions() const
std::vector<std::pair<std::shared_ptr<Session>, std::shared_ptr<Peer>>> HostCapabilityFace::peerSessions() const
{
return peerSessions(version());
}
std::vector<std::pair<std::shared_ptr<Session>,std::shared_ptr<Peer>>> HostCapabilityFace::peerSessions(u256 const& _version) const
std::vector<std::pair<std::shared_ptr<Session>, std::shared_ptr<Peer>>> HostCapabilityFace::peerSessions(u256 const& _version) const
{
RecursiveGuard l(m_host->x_sessions);
std::vector<std::pair<std::shared_ptr<Session>,std::shared_ptr<Peer>>> ret;
std::vector<std::pair<std::shared_ptr<Session>, std::shared_ptr<Peer>>> ret;
for (auto const& i: m_host->m_sessions)
if (std::shared_ptr<Session> s = i.second.lock())
if (s->m_capabilities.count(std::make_pair(name(), _version)))

4
libp2p/HostCapability.h

@ -45,8 +45,8 @@ public:
Host* host() const { return m_host; }
std::vector<std::pair<std::shared_ptr<Session>,std::shared_ptr<Peer>>> peerSessions() const;
std::vector<std::pair<std::shared_ptr<Session>,std::shared_ptr<Peer>>> peerSessions(u256 const& _version) const;
std::vector<std::pair<std::shared_ptr<Session>, std::shared_ptr<Peer>>> peerSessions() const;
std::vector<std::pair<std::shared_ptr<Session>, std::shared_ptr<Peer>>> peerSessions(u256 const& _version) const;
protected:
virtual std::string name() const = 0;

10
libp2p/Session.cpp

@ -319,10 +319,14 @@ void Session::send(bytes&& _msg)
void Session::write()
{
const bytes& bytes = m_writeQueue[0];
m_io->writeSingleFramePacket(&bytes, m_writeQueue[0]);
bytes const* out;
DEV_GUARDED(x_writeQueue)
{
m_io->writeSingleFramePacket(&m_writeQueue[0], m_writeQueue[0]);
out = &m_writeQueue[0];
}
auto self(shared_from_this());
ba::async_write(m_socket, ba::buffer(bytes), [this, self](boost::system::error_code ec, std::size_t /*length*/)
ba::async_write(m_socket, ba::buffer(*out), [this, self](boost::system::error_code ec, std::size_t /*length*/)
{
ThreadContext tc(info().id.abridged());
ThreadContext tc2(info().clientVersion);

2
libp2p/UDP.h

@ -99,7 +99,7 @@ struct UDPSocketFace
*/
struct UDPSocketEvents
{
virtual void onDisconnected(UDPSocketFace*) {};
virtual void onDisconnected(UDPSocketFace*) {}
virtual void onReceived(UDPSocketFace*, bi::udp::endpoint const& _from, bytesConstRef _packetData) = 0;
};

12
libweb3jsonrpc/WebThreeStubServerBase.cpp

@ -267,7 +267,7 @@ static shh::Envelope toSealed(Json::Value const& _json, shh::Message const& _m,
return _m.seal(_from, bt, ttl, workToProve);
}
static pair<shh::FullTopic, Public> toWatch(Json::Value const& _json)
static pair<shh::Topics, Public> toWatch(Json::Value const& _json)
{
shh::BuildTopic bt;
Public to;
@ -985,7 +985,7 @@ string WebThreeStubServerBase::shh_newFilter(Json::Value const& _json)
try
{
pair<shh::FullTopic, Public> w = toWatch(_json);
pair<shh::Topics, Public> w = toWatch(_json);
auto ret = face()->installWatch(w.first);
m_shhWatches.insert(make_pair(ret, w.second));
return toJS(ret);
@ -1025,10 +1025,10 @@ Json::Value WebThreeStubServerBase::shh_getFilterChanges(string const& _filterId
if (pub)
{
cwarn << "Silently decrypting message from identity" << pub << ": User validation hook goes here.";
m = e.open(face()->fullTopic(id), m_shhIds[pub]);
m = e.open(face()->fullTopics(id), m_shhIds[pub]);
}
else
m = e.open(face()->fullTopic(id));
m = e.open(face()->fullTopics(id));
if (!m)
continue;
ret.append(toJson(h, e, m));
@ -1058,10 +1058,10 @@ Json::Value WebThreeStubServerBase::shh_getMessages(string const& _filterId)
if (pub)
{
cwarn << "Silently decrypting message from identity" << pub << ": User validation hook goes here.";
m = e.open(face()->fullTopic(id), m_shhIds[pub]);
m = e.open(face()->fullTopics(id), m_shhIds[pub]);
}
else
m = e.open(face()->fullTopic(id));
m = e.open(face()->fullTopics(id));
if (!m)
continue;
ret.append(toJson(h, e, m));

24
libwhisper/Common.cpp

@ -28,26 +28,26 @@ using namespace dev;
using namespace dev::p2p;
using namespace dev::shh;
CollapsedTopicPart dev::shh::collapse(FullTopicPart const& _p)
AbridgedTopic dev::shh::abridge(Topic const& _p)
{
return CollapsedTopicPart(sha3(_p));
return AbridgedTopic(sha3(_p));
}
CollapsedTopic dev::shh::collapse(FullTopic const& _fullTopic)
AbridgedTopics dev::shh::abridge(Topics const& _topics)
{
CollapsedTopic ret;
ret.reserve(_fullTopic.size());
for (auto const& ft: _fullTopic)
ret.push_back(collapse(ft));
AbridgedTopics ret;
ret.reserve(_topics.size());
for (auto const& t : _topics)
ret.push_back(abridge(t));
return ret;
}
CollapsedTopic BuildTopic::toTopic() const
AbridgedTopics BuildTopic::toAbridgedTopics() const
{
CollapsedTopic ret;
AbridgedTopics ret;
ret.reserve(m_parts.size());
for (auto const& h: m_parts)
ret.push_back(collapse(h));
ret.push_back(abridge(h));
return ret;
}
@ -71,7 +71,7 @@ bool TopicFilter::matches(Envelope const& _e) const
for (unsigned i = 0; i < t.size(); ++i)
{
for (auto et: _e.topic())
if (((t[i].first ^ et) & t[i].second) == CollapsedTopicPart())
if (((t[i].first ^ et) & t[i].second) == AbridgedTopic())
goto NEXT_TOPICPART;
// failed to match topicmask against any topics: move on to next mask
goto NEXT_TOPICMASK;
@ -89,7 +89,7 @@ TopicMask BuildTopicMask::toTopicMask() const
TopicMask ret;
ret.reserve(m_parts.size());
for (auto const& h: m_parts)
ret.push_back(make_pair(collapse(h), ~CollapsedTopicPart()));
ret.push_back(make_pair(abridge(h), ~AbridgedTopic()));
return ret;
}

28
libwhisper/Common.h

@ -60,14 +60,14 @@ enum WhisperPacket
PacketCount
};
using CollapsedTopicPart = FixedHash<4>;
using FullTopicPart = h256;
using AbridgedTopic = FixedHash<4>;
using Topic = h256;
using CollapsedTopic = std::vector<CollapsedTopicPart>;
using FullTopic = h256s;
using AbridgedTopics = std::vector<AbridgedTopic>;
using Topics = h256s;
CollapsedTopicPart collapse(FullTopicPart const& _fullTopicPart);
CollapsedTopic collapse(FullTopic const& _fullTopic);
AbridgedTopic abridge(Topic const& _topic);
AbridgedTopics abridge(Topics const& _topics);
class BuildTopic
{
@ -80,10 +80,10 @@ public:
BuildTopic& shiftRaw(h256 const& _part) { m_parts.push_back(_part); return *this; }
operator CollapsedTopic() const { return toTopic(); }
operator FullTopic() const { return toFullTopic(); }
CollapsedTopic toTopic() const;
FullTopic toFullTopic() const { return m_parts; }
operator AbridgedTopics() const { return toAbridgedTopics(); }
operator Topics() const { return toTopics(); }
AbridgedTopics toAbridgedTopics() const;
Topics toTopics() const { return m_parts; }
protected:
BuildTopic& shiftBytes(bytes const& _b);
@ -91,14 +91,14 @@ protected:
h256s m_parts;
};
using TopicMask = std::vector<std::pair<CollapsedTopicPart, CollapsedTopicPart>>;
using TopicMask = std::vector<std::pair<AbridgedTopic, AbridgedTopic>>;
using TopicMasks = std::vector<TopicMask>;
class TopicFilter
{
public:
TopicFilter() {}
TopicFilter(FullTopic const& _m) { m_topicMasks.push_back(TopicMask()); for (auto const& h: _m) m_topicMasks.back().push_back(std::make_pair(collapse(h), h ? ~CollapsedTopicPart() : CollapsedTopicPart())); }
TopicFilter(Topics const& _m) { m_topicMasks.push_back(TopicMask()); for (auto const& h: _m) m_topicMasks.back().push_back(std::make_pair(abridge(h), h ? ~AbridgedTopic() : AbridgedTopic())); }
TopicFilter(TopicMask const& _m): m_topicMasks(1, _m) {}
TopicFilter(TopicMasks const& _m): m_topicMasks(_m) {}
TopicFilter(RLP const& _r)//: m_topicMasks(_r.toVector<std::vector<>>())
@ -132,9 +132,9 @@ public:
template <class T> BuildTopicMask& operator()(T const& _t) { shift(_t); return *this; }
operator TopicMask() const { return toTopicMask(); }
operator FullTopic() const { return toFullTopic(); }
operator Topics() const { return toTopics(); }
TopicMask toTopicMask() const;
FullTopic toFullTopic() const { return m_parts; }
Topics toTopics() const { return m_parts; }
};
}

24
libwhisper/Interface.h

@ -38,19 +38,13 @@ namespace dev
namespace shh
{
/*struct TopicMask
{
Topic data;
Topic mask;
};*/
class Watch;
struct InstalledFilter
{
InstalledFilter(FullTopic const& _f): full(_f), filter(_f) {}
InstalledFilter(Topics const& _t): full(_t), filter(_t) {}
FullTopic full;
Topics full;
TopicFilter filter;
unsigned refCount = 1;
};
@ -71,8 +65,8 @@ public:
virtual void inject(Envelope const& _m, WhisperPeer* _from = nullptr) = 0;
virtual FullTopic const& fullTopic(unsigned _id) const = 0;
virtual unsigned installWatch(FullTopic const& _mask) = 0;
virtual Topics const& fullTopics(unsigned _id) const = 0;
virtual unsigned installWatch(Topics const& _filter) = 0;
virtual unsigned installWatchOnId(h256 _filterId) = 0;
virtual void uninstallWatch(unsigned _watchId) = 0;
virtual h256s peekWatch(unsigned _watchId) const = 0;
@ -81,10 +75,10 @@ public:
virtual Envelope envelope(h256 _m) const = 0;
void post(bytes const& _payload, FullTopic _topic, unsigned _ttl = 50, unsigned _workToProve = 50) { inject(Message(_payload).seal(_topic, _ttl, _workToProve)); }
void post(Public _to, bytes const& _payload, FullTopic _topic, unsigned _ttl = 50, unsigned _workToProve = 50) { inject(Message(_payload).sealTo(_to, _topic, _ttl, _workToProve)); }
void post(Secret _from, bytes const& _payload, FullTopic _topic, unsigned _ttl = 50, unsigned _workToProve = 50) { inject(Message(_payload).seal(_from, _topic, _ttl, _workToProve)); }
void post(Secret _from, Public _to, bytes const& _payload, FullTopic _topic, unsigned _ttl = 50, unsigned _workToProve = 50) { inject(Message(_payload).sealTo(_from, _to, _topic, _ttl, _workToProve)); }
void post(bytes const& _payload, Topics _topics, unsigned _ttl = 50, unsigned _workToProve = 50) { inject(Message(_payload).seal(_topics, _ttl, _workToProve)); }
void post(Public _to, bytes const& _payload, Topics _topics, unsigned _ttl = 50, unsigned _workToProve = 50) { inject(Message(_payload).sealTo(_to, _topics, _ttl, _workToProve)); }
void post(Secret _from, bytes const& _payload, Topics _topics, unsigned _ttl = 50, unsigned _workToProve = 50) { inject(Message(_payload).seal(_from, _topics, _ttl, _workToProve)); }
void post(Secret _from, Public _to, bytes const& _payload, Topics _topics, unsigned _ttl = 50, unsigned _workToProve = 50) { inject(Message(_payload).sealTo(_from, _to, _topics, _ttl, _workToProve)); }
};
struct WatshhChannel: public dev::LogChannel { static const char* name() { return "shh"; } static const int verbosity = 1; };
@ -106,7 +100,7 @@ class Watch: public boost::noncopyable
public:
Watch() {}
Watch(Interface& _c, FullTopic const& _f): m_c(&_c), m_id(_c.installWatch(_f)) {}
Watch(Interface& _c, Topics const& _t): m_c(&_c), m_id(_c.installWatch(_t)) {}
~Watch() { if (m_c) m_c->uninstallWatch(m_id); }
h256s check() { return m_c ? m_c->checkWatch(m_id) : h256s(); }

20
libwhisper/Message.cpp

@ -26,7 +26,7 @@ using namespace dev;
using namespace dev::p2p;
using namespace dev::shh;
Message::Message(Envelope const& _e, FullTopic const& _fk, Secret const& _s)
Message::Message(Envelope const& _e, Topics const& _t, Secret const& _s)
{
try
{
@ -35,7 +35,7 @@ Message::Message(Envelope const& _e, FullTopic const& _fk, Secret const& _s)
if (!decrypt(_s, &(_e.data()), b))
return;
else{}
else if (!openBroadcastEnvelope(_e, _fk, b))
else if (!openBroadcastEnvelope(_e, _t, b))
return;
if (populate(b))
@ -47,14 +47,14 @@ Message::Message(Envelope const& _e, FullTopic const& _fk, Secret const& _s)
}
}
bool Message::openBroadcastEnvelope(Envelope const& _e, FullTopic const& _fk, bytes& o_b)
bool Message::openBroadcastEnvelope(Envelope const& _e, Topics const& _fk, bytes& o_b)
{
// retrieve the key using the known topic and topicIndex.
unsigned topicIndex = 0;
Secret topicSecret;
// determine topicSecret/topicIndex from knowledge of the collapsed topics (which give the order) and our full-size filter topic.
CollapsedTopic knownTopic = collapse(_fk);
AbridgedTopics knownTopic = abridge(_fk);
for (unsigned ti = 0; ti < _fk.size() && !topicSecret; ++ti)
for (unsigned i = 0; i < _e.topic().size(); ++i)
if (_e.topic()[i] == knownTopic[ti])
@ -96,10 +96,10 @@ bool Message::populate(bytes const& _data)
return true;
}
Envelope Message::seal(Secret _from, FullTopic const& _fullTopic, unsigned _ttl, unsigned _workToProve) const
Envelope Message::seal(Secret _from, Topics const& _fullTopics, unsigned _ttl, unsigned _workToProve) const
{
CollapsedTopic topic = collapse(_fullTopic);
Envelope ret(time(0) + _ttl, _ttl, topic);
AbridgedTopics topics = abridge(_fullTopics);
Envelope ret(time(0) + _ttl, _ttl, topics);
bytes input(1 + m_payload.size());
input[0] = 0;
@ -121,7 +121,7 @@ Envelope Message::seal(Secret _from, FullTopic const& _fullTopic, unsigned _ttl,
// this message is for broadcast (could be read by anyone who knows at least one of the topics)
// create the shared secret for encrypting the payload, then encrypt the shared secret with each topic
Secret s = Secret::random();
for (h256 const& t : _fullTopic)
for (h256 const& t : _fullTopics)
{
h256 salt = h256::random();
ret.m_data += (generateGamma(t, salt) ^ s).asBytes();
@ -146,9 +146,9 @@ Envelope::Envelope(RLP const& _m)
m_nonce = _m[4].toInt<u256>();
}
Message Envelope::open(FullTopic const& _ft, Secret const& _s) const
Message Envelope::open(Topics const& _t, Secret const& _s) const
{
return Message(*this, _ft, _s);
return Message(*this, _t, _s);
}
unsigned Envelope::workProved() const

20
libwhisper/Message.h

@ -72,22 +72,22 @@ public:
unsigned sent() const { return m_expiry - m_ttl; }
unsigned expiry() const { return m_expiry; }
unsigned ttl() const { return m_ttl; }
CollapsedTopic const& topic() const { return m_topic; }
AbridgedTopics const& topic() const { return m_topic; }
bytes const& data() const { return m_data; }
Message open(FullTopic const& _ft, Secret const& _s = Secret()) const;
Message open(Topics const& _t, Secret const& _s = Secret()) const;
unsigned workProved() const;
void proveWork(unsigned _ms);
private:
Envelope(unsigned _exp, unsigned _ttl, CollapsedTopic const& _topic): m_expiry(_exp), m_ttl(_ttl), m_topic(_topic) {}
Envelope(unsigned _exp, unsigned _ttl, AbridgedTopics const& _topic): m_expiry(_exp), m_ttl(_ttl), m_topic(_topic) {}
unsigned m_expiry = 0;
unsigned m_ttl = 0;
u256 m_nonce;
CollapsedTopic m_topic;
AbridgedTopics m_topic;
bytes m_data;
};
@ -102,7 +102,7 @@ class Message
{
public:
Message() {}
Message(Envelope const& _e, FullTopic const& _ft, Secret const& _s = Secret());
Message(Envelope const& _e, Topics const& _t, Secret const& _s = Secret());
Message(bytes const& _payload): m_payload(_payload) {}
Message(bytesConstRef _payload): m_payload(_payload.toBytes()) {}
Message(bytes&& _payload) { std::swap(_payload, m_payload); }
@ -119,15 +119,15 @@ public:
operator bool() const { return !!m_payload.size() || m_from || m_to; }
/// Turn this message into a ditributable Envelope.
Envelope seal(Secret _from, FullTopic const& _topic, unsigned _ttl = 50, unsigned _workToProve = 50) const;
Envelope seal(Secret _from, Topics const& _topics, unsigned _ttl = 50, unsigned _workToProve = 50) const;
// Overloads for skipping _from or specifying _to.
Envelope seal(FullTopic const& _topic, unsigned _ttl = 50, unsigned _workToProve = 50) const { return seal(Secret(), _topic, _ttl, _workToProve); }
Envelope sealTo(Public _to, FullTopic const& _topic, unsigned _ttl = 50, unsigned _workToProve = 50) { m_to = _to; return seal(Secret(), _topic, _ttl, _workToProve); }
Envelope sealTo(Secret _from, Public _to, FullTopic const& _topic, unsigned _ttl = 50, unsigned _workToProve = 50) { m_to = _to; return seal(_from, _topic, _ttl, _workToProve); }
Envelope seal(Topics const& _topics, unsigned _ttl = 50, unsigned _workToProve = 50) const { return seal(Secret(), _topics, _ttl, _workToProve); }
Envelope sealTo(Public _to, Topics const& _topics, unsigned _ttl = 50, unsigned _workToProve = 50) { m_to = _to; return seal(Secret(), _topics, _ttl, _workToProve); }
Envelope sealTo(Secret _from, Public _to, Topics const& _topics, unsigned _ttl = 50, unsigned _workToProve = 50) { m_to = _to; return seal(_from, _topics, _ttl, _workToProve); }
private:
bool populate(bytes const& _data);
bool openBroadcastEnvelope(Envelope const& _e, FullTopic const& _fk, bytes& o_b);
bool openBroadcastEnvelope(Envelope const& _e, Topics const& _t, bytes& o_b);
h256 generateGamma(h256 const& _key, h256 const& _salt) const { return sha3(_key ^ _salt); }
Public m_from;

4
libwhisper/WhisperHost.cpp

@ -103,11 +103,11 @@ unsigned WhisperHost::installWatchOnId(h256 _h)
return ret;
}
unsigned WhisperHost::installWatch(shh::FullTopic const& _ft)
unsigned WhisperHost::installWatch(shh::Topics const& _t)
{
Guard l(m_filterLock);
InstalledFilter f(_ft);
InstalledFilter f(_t);
h256 h = f.filter.sha3();
if (!m_filters.count(h))

6
libwhisper/WhisperHost.h

@ -40,7 +40,7 @@ namespace dev
namespace shh
{
static const FullTopic EmptyFullTopic;
static const Topics EmptyTopics;
class WhisperHost: public HostCapability<WhisperPeer>, public Interface, public Worker
{
@ -54,8 +54,8 @@ public:
virtual void inject(Envelope const& _e, WhisperPeer* _from = nullptr) override;
virtual FullTopic const& fullTopic(unsigned _id) const { try { return m_filters.at(m_watches.at(_id).id).full; } catch (...) { return EmptyFullTopic; } }
virtual unsigned installWatch(FullTopic const& _filter) override;
virtual Topics const& fullTopics(unsigned _id) const { try { return m_filters.at(m_watches.at(_id).id).full; } catch (...) { return EmptyTopics; } }
virtual unsigned installWatch(Topics const& _filter) override;
virtual unsigned installWatchOnId(h256 _filterId) override;
virtual void uninstallWatch(unsigned _watchId) override;
virtual h256s peekWatch(unsigned _watchId) const override { dev::Guard l(m_filterLock); try { return m_watches.at(_watchId).changes; } catch (...) { return h256s(); } }

20
test/libethereum/TransactionTestsFiller/ttTransactionTestFiller.json

@ -63,12 +63,12 @@
}
},
"V_overflow64bit" : {
"V_overflow64bitPlus27" : {
"expect" : "invalid",
"transaction" :
{
"data" : "0x5544",
"gasLimit" : "21000",
"gasLimit" : "22000",
"gasPrice" : "1",
"nonce" : "3",
"to" : "b94f5374fce5edbc8e2a8697c15331677e6ebf0b",
@ -79,6 +79,22 @@
}
},
"V_overflow64bitPlus28" : {
"expect" : "invalid",
"transaction" :
{
"data" : "0x5544",
"gasLimit" : "22000",
"gasPrice" : "1",
"nonce" : "3",
"to" : "b94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"value" : "10",
"v" : "18446744073709551644",
"r" : "0x98ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4a",
"s" : "0x8887321be575c8095f789dd4c743dfe42c1820f9231f98a962b210e3ac2452a3"
}
},
"V_overflow64bitSigned" : {
"expect" : "invalid",
"transaction" :

8
test/libwhisper/whisperMessage.cpp

@ -34,9 +34,9 @@ struct VerbosityHolder
int oldLogVerbosity;
};
FullTopic createRandomTopics(unsigned int i)
Topics createRandomTopics(unsigned int i)
{
FullTopic ret;
Topics ret;
h256 t(i);
for (int j = 0; j < 8; ++j)
@ -72,14 +72,14 @@ void comparePayloads(Message const& m1, Message const& m2)
void sealAndOpenSingleMessage(unsigned int i)
{
Secret zero;
FullTopic topics = createRandomTopics(i);
Topics topics = createRandomTopics(i);
bytes const payload = createRandomPayload(i);
Message m1(payload);
Envelope e = m1.seal(zero, topics, 1, 1);
for (auto const& t: topics)
{
FullTopic singleTopic;
Topics singleTopic;
singleTopic.push_back(t);
Message m2(e, singleTopic, zero);
comparePayloads(m1, m2);

Loading…
Cancel
Save