Browse Source

Cleaned up some of network. Added HostSpec.

cl-refactor
Gav Wood 9 years ago
parent
commit
a060c46b4c
  1. 1
      BuildInfo.h.in
  2. 5
      CMakeLists.txt
  3. 7
      cmake/scripts/buildinfo.cmake
  4. 4
      eth/main.cpp
  5. 7
      libdevcore/Common.h
  6. 39
      libdevcore/Log.h
  7. 1
      libethcore/BlockInfo.cpp
  8. 4
      libethereum/EthereumHost.h
  9. 58
      libp2p/Common.cpp
  10. 73
      libp2p/Common.h
  11. 52
      libp2p/Host.cpp
  12. 39
      libp2p/Host.h
  13. 18
      libp2p/NodeTable.cpp
  14. 40
      libp2p/NodeTable.h
  15. 2
      libp2p/Peer.cpp
  16. 2
      libp2p/Peer.h
  17. 4
      libp2p/RLPxHandshake.h
  18. 4
      libp2p/Session.cpp
  19. 2
      libp2p/Session.h
  20. 14
      libweb3jsonrpc/WebThreeStubServerBase.cpp
  21. 8
      libwebthree/WebThree.cpp
  22. 26
      libwebthree/WebThree.h
  23. 14
      mix/Web3Server.cpp
  24. 4
      test/libp2p/capability.cpp
  25. 8
      test/libp2p/net.cpp
  26. 6
      test/libp2p/peer.cpp

1
BuildInfo.h.in

@ -5,3 +5,4 @@
#define ETH_CLEAN_REPO @ETH_CLEAN_REPO@
#define ETH_BUILD_TYPE @ETH_BUILD_TYPE@
#define ETH_BUILD_PLATFORM @ETH_BUILD_PLATFORM@
#define ETH_FATDB @ETH_FATDB@

5
CMakeLists.txt

@ -242,12 +242,14 @@ function(createBuildInfo)
set(_cmake_build_type "${CMAKE_CFG_INTDIR}")
endif()
message("createBuildInfo()")
# Generate header file containing useful build information
add_custom_target(BuildInfo.h ALL
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
COMMAND ${CMAKE_COMMAND} -DETH_SOURCE_DIR="${CMAKE_SOURCE_DIR}" -DETH_DST_DIR="${CMAKE_BINARY_DIR}"
-DETH_BUILD_TYPE="${_cmake_build_type}" -DETH_BUILD_PLATFORM="${ETH_BUILD_PLATFORM}"
-DPROJECT_VERSION="${PROJECT_VERSION}"
-DPROJECT_VERSION="${PROJECT_VERSION}" -DETH_FATDB="${FATDB}"
-P "${ETH_SCRIPTS_DIR}/buildinfo.cmake"
)
include_directories(${CMAKE_CURRENT_BINARY_DIR})
@ -375,6 +377,7 @@ message("-- CXXFLAGS: ${CMAKE_CXX_FLAGS}")
# this must be an include, as a function it would mess up with variable scope!
include(EthExecutableHelper)
message("creating build info...")
createBuildInfo()
if (ROCKSDB AND ROCKSDB_FOUND)

7
cmake/scripts/buildinfo.cmake

@ -9,6 +9,12 @@
# example usage:
# cmake -DETH_SOURCE_DIR=. -DETH_DST_DIR=build -DETH_BUILD_TYPE=Debug -DETH_BUILD_PLATFORM=mac -P scripts/buildinfo.cmake
if (ETH_FATDB)
set(ETH_FATDB 1)
else()
set(ETH_FATDB 0)
endif()
if (NOT ETH_BUILD_TYPE)
set(ETH_BUILD_TYPE "unknown")
endif()
@ -41,6 +47,7 @@ set(INFILE "${ETH_SOURCE_DIR}/BuildInfo.h.in")
set(TMPFILE "${ETH_DST_DIR}/BuildInfo.h.tmp")
set(OUTFILE "${ETH_DST_DIR}/BuildInfo.h")
message("ETH_FATDB: ${ETH_FATDB}")
configure_file("${INFILE}" "${TMPFILE}")
include("${ETH_SOURCE_DIR}/cmake/EthUtils.cmake")

4
eth/main.cpp

@ -314,7 +314,7 @@ int main(int argc, char** argv)
unsigned peers = 11;
unsigned peerStretch = 7;
std::map<NodeId, pair<NodeIPEndpoint,bool>> preferredNodes;
std::map<NodeID, pair<NodeIPEndpoint,bool>> preferredNodes;
bool bootstrap = true;
bool disableDiscovery = false;
bool pinning = false;
@ -1089,7 +1089,7 @@ int main(int argc, char** argv)
for (auto const& i: Host::pocHosts())
web3.requirePeer(i.first, i.second);
if (!remoteHost.empty())
web3.addNode(p2p::NodeId(), remoteHost + ":" + toString(remotePort));
web3.addNode(p2p::NodeID(), remoteHost + ":" + toString(remotePort));
signal(SIGABRT, &Client::exitHandler);
signal(SIGTERM, &Client::exitHandler);

7
libdevcore/Common.h

@ -279,6 +279,13 @@ private:
#define DEV_TIMED_FUNCTION_ABOVE(MS) DEV_TIMED_SCOPE_ABOVE(__PRETTY_FUNCTION__, MS)
#endif
#ifdef _MSC_VER
// TODO.
#define DEV_UNUSED
#else
#define DEV_UNUSED __attribute__((unused))
#endif
enum class WithExisting: int
{
Trust = 0,

39
libdevcore/Log.h

@ -111,11 +111,11 @@ std::string getThreadName();
/// The default logging channels. Each has an associated verbosity and three-letter prefix (name() ).
/// Channels should inherit from LogChannel and define name() and verbosity.
struct LogChannel { static const char* name(); static const int verbosity = 1; };
struct LogChannel { static const char* name(); static const int verbosity = 1; static const bool debug = true; };
struct LeftChannel: public LogChannel { static const char* name(); };
struct RightChannel: public LogChannel { static const char* name(); };
struct WarnChannel: public LogChannel { static const char* name(); static const int verbosity = 0; };
struct NoteChannel: public LogChannel { static const char* name(); };
struct WarnChannel: public LogChannel { static const char* name(); static const int verbosity = 0; static const bool debug = false; };
struct NoteChannel: public LogChannel { static const char* name(); static const bool debug = false; };
struct DebugChannel: public LogChannel { static const char* name(); static const int verbosity = 0; };
enum class LogTag
@ -259,30 +259,29 @@ public:
template <class T> LogOutputStream& operator<<(T const& _t) { if (Id::verbosity <= g_logVerbosity) { if (_AutoSpacing && m_sstr.str().size() && m_sstr.str().back() != ' ') m_sstr << " "; append(_t); } return *this; }
};
// Simple cout-like stream objects for accessing common log channels.
// Dirties the global namespace, but oh so convenient...
#define cnote dev::LogOutputStream<dev::NoteChannel, true>()
#define cwarn dev::LogOutputStream<dev::WarnChannel, true>()
// Null stream-like objects.
#define ndebug if (true) {} else dev::NullOutputStream()
#define nlog(X) if (true) {} else dev::NullOutputStream()
#define nslog(X) if (true) {} else dev::NullOutputStream()
// Kill debugging log channel when we're in release mode.
#if NDEBUG
#define cdebug ndebug
#else
#define cdebug dev::LogOutputStream<dev::DebugChannel, true>()
#endif
// Kill all logs when when NLOG is defined.
#if NLOG
#define clog(X) nlog(X)
#define cslog(X) nslog(X)
#else
#if NDEBUG
#define clog(X) if (X::debug) {} else dev::LogOutputStream<X, true>()
#define cslog(X) if (X::debug) {} else dev::LogOutputStream<X, false>()
#else
#define clog(X) dev::LogOutputStream<X, true>()
#define cslog(X) dev::LogOutputStream<X, false>()
#endif
#endif
// Simple cout-like stream objects for accessing common log channels.
// Dirties the global namespace, but oh so convenient...
#define cdebug clog(dev::DebugChannel)
#define cnote clog(dev::NoteChannel)
#define cwarn clog(dev::WarnChannel)
// Null stream-like objects.
#define ndebug if (true) {} else dev::NullOutputStream()
#define nlog(X) if (true) {} else dev::NullOutputStream()
#define nslog(X) if (true) {} else dev::NullOutputStream()
}

1
libethcore/BlockInfo.cpp

@ -20,6 +20,7 @@
*/
#include <libdevcore/Common.h>
#include <libdevcore/Log.h>
#include <libdevcore/RLP.h>
#include <libdevcore/TrieDB.h>
#include <libdevcore/TrieHash.h>

4
libethereum/EthereumHost.h

@ -74,7 +74,7 @@ public:
DownloadMan const& downloadMan() const { return m_man; }
DownloadMan& downloadMan() { return m_man; }
bool isSyncing() const;
bool isBanned(p2p::NodeId const& _id) const { return !!m_banned.count(_id); }
bool isBanned(p2p::NodeID const& _id) const { return !!m_banned.count(_id); }
void noteNewTransactions() { m_newTransactions = true; }
void noteNewBlocks() { m_newBlocks = true; }
@ -129,7 +129,7 @@ private:
h256 m_latestBlockSent;
h256Hash m_transactionsSent;
std::unordered_set<p2p::NodeId> m_banned;
std::unordered_set<p2p::NodeID> m_banned;
bool m_newTransactions = false;
bool m_newBlocks = false;

58
libp2p/Common.cpp

@ -20,6 +20,7 @@
*/
#include "Common.h"
#include "Network.h"
using namespace std;
using namespace dev;
using namespace dev::p2p;
@ -29,7 +30,7 @@ const unsigned dev::p2p::c_defaultIPPort = 30303;
static_assert(dev::p2p::c_protocolVersion == 4, "Replace v3 compatbility with v4 compatibility before updating network version.");
const dev::p2p::NodeIPEndpoint dev::p2p::UnspecifiedNodeIPEndpoint = NodeIPEndpoint(bi::address(), 0, 0);
const dev::p2p::Node dev::p2p::UnspecifiedNode = dev::p2p::Node(NodeId(), UnspecifiedNodeIPEndpoint);
const dev::p2p::Node dev::p2p::UnspecifiedNode = dev::p2p::Node(NodeID(), UnspecifiedNodeIPEndpoint);
bool dev::p2p::NodeIPEndpoint::test_allowLocal = false;
@ -199,7 +200,60 @@ void DeadlineOps::reap()
});
}
namespace dev {
Node::Node(NodeSpec const& _s, PeerType _p):
id(_s.id()),
endpoint(_s.nodeIPEndpoint()),
peerType(_p)
{}
NodeSpec::NodeSpec(string const& _user)
{
m_address = _user;
if (m_address.substr(0, 8) == "enode://" && m_address.find('@') == 136)
{
m_id = p2p::NodeID(m_address.substr(8, 128));
m_address = m_address.substr(137);
}
size_t colon = m_address.find_first_of(":");
if (colon != string::npos)
{
m_address = m_address.substr(0, colon);
string ports = m_address.substr(colon + 1);
size_t p2 = ports.find_first_of(".");
if (p2 != string::npos)
{
m_udpPort = stoi(ports.substr(p2 + 1));
m_tcpPort = stoi(ports.substr(0, p2));
}
else
m_tcpPort = m_udpPort = stoi(ports);
}
}
NodeIPEndpoint NodeSpec::nodeIPEndpoint() const
{
return NodeIPEndpoint(p2p::Network::resolveHost(m_address).address(), m_udpPort, m_tcpPort);
}
std::string NodeSpec::enode() const
{
string ret = m_address;
if (m_tcpPort)
if (m_udpPort && m_tcpPort != m_udpPort)
ret += ":" + toString(m_tcpPort) + "." + toString(m_udpPort);
else
ret += ":" + toString(m_tcpPort);
else if (m_udpPort)
ret += ":" + toString(m_udpPort);
if (m_id)
return "enode://" + m_id.hex() + "@" + ret;
return ret;
}
namespace dev
{
std::ostream& operator<<(std::ostream& _out, dev::p2p::NodeIPEndpoint const& _ep)
{

73
libp2p/Common.h

@ -54,12 +54,12 @@ namespace p2p
extern const unsigned c_protocolVersion;
extern const unsigned c_defaultIPPort;
struct NodeIPEndpoint;
struct Node;
class NodeIPEndpoint;
class Node;
extern const NodeIPEndpoint UnspecifiedNodeIPEndpoint;
extern const Node UnspecifiedNode;
using NodeId = h512;
using NodeID = h512;
bool isPrivateAddress(bi::address const& _addressToCheck);
bool isPrivateAddress(std::string const& _addressToCheck);
@ -150,7 +150,7 @@ using CapDescs = std::vector<CapDesc>;
*/
struct PeerSessionInfo
{
NodeId const id;
NodeID const id;
std::string const clientVersion;
std::string const host;
unsigned short const port;
@ -163,11 +163,18 @@ struct PeerSessionInfo
using PeerSessionInfos = std::vector<PeerSessionInfo>;
enum class PeerType
{
Optional,
Required
};
/**
* @brief IPv4,UDP/TCP endpoints.
*/
struct NodeIPEndpoint
class NodeIPEndpoint
{
public:
enum RLPAppend
{
StreamList,
@ -181,10 +188,6 @@ struct NodeIPEndpoint
NodeIPEndpoint(bi::address _addr, uint16_t _udp, uint16_t _tcp): address(_addr), udpPort(_udp), tcpPort(_tcp) {}
NodeIPEndpoint(RLP const& _r) { interpretRLP(_r); }
bi::address address = bi::address();
uint16_t udpPort = 0;
uint16_t tcpPort = 0;
operator bi::udp::endpoint() const { return bi::udp::endpoint(address, udpPort); }
operator bi::tcp::endpoint() const { return bi::tcp::endpoint(address, tcpPort); }
@ -194,25 +197,61 @@ struct NodeIPEndpoint
void streamRLP(RLPStream& _s, RLPAppend _append = StreamList) const;
void interpretRLP(RLP const& _r);
// TODO: make private, give accessors and rename m_...
bi::address address;
uint16_t udpPort = 0;
uint16_t tcpPort = 0;
};
struct Node
struct NodeSpec
{
Node(Public _pubk, NodeIPEndpoint const& _ip, bool _required = false): id(_pubk), endpoint(_ip), required(_required) {}
NodeSpec() {}
/// Accepts user-readable strings of the form (enode://pubkey@)host({:port,:tcpport.udpport})
NodeSpec(std::string const& _user);
NodeSpec(std::string const& _addr, uint16_t _port, int _udpPort = -1):
m_address(_addr),
m_tcpPort(_port),
m_udpPort(_udpPort == -1 ? _port : (uint16_t)_udpPort)
{}
NodeID id() const { return m_id; }
virtual NodeId const& address() const { return id; }
NodeIPEndpoint nodeIPEndpoint() const;
std::string enode() const;
private:
std::string m_address;
uint16_t m_tcpPort = 0;
uint16_t m_udpPort = 0;
NodeID m_id;
};
class Node
{
public:
Node() = default;
Node(Node const&) = default;
Node(Public _publicKey, NodeIPEndpoint const& _ip, PeerType _peerType = PeerType::Optional): id(_publicKey), endpoint(_ip), peerType(_peerType) {}
Node(NodeSpec const& _s, PeerType _peerType = PeerType::Optional);
virtual NodeID const& address() const { return id; }
virtual Public const& publicKey() const { return id; }
NodeId id;
virtual operator bool() const { return (bool)id; }
// TODO: make private, give accessors and rename m_...
NodeID id;
/// Endpoints by which we expect to reach node.
// TODO: make private, give accessors and rename m_...
NodeIPEndpoint endpoint;
/// If true, node will not be removed from Node list.
// TODO: p2p implement
bool required = false;
virtual operator bool() const { return (bool)id; }
PeerType peerType = PeerType::Optional;
};
class DeadlineOps

52
libp2p/Host.cpp

@ -50,7 +50,7 @@ std::chrono::milliseconds const c_keepAliveTimeOut = std::chrono::milliseconds(1
HostNodeTableHandler::HostNodeTableHandler(Host& _host): m_host(_host) {}
void HostNodeTableHandler::processEvent(NodeId const& _n, NodeTableEventType const& _e)
void HostNodeTableHandler::processEvent(NodeID const& _n, NodeTableEventType const& _e)
{
m_host.onNodeTableEvent(_n, _e);
}
@ -305,7 +305,7 @@ void Host::startPeerSession(Public const& _id, RLP const& _rlp, unique_ptr<RLPXF
StructuredLogger::p2pConnected(_id.abridged(), ps->m_peer->endpoint, ps->m_peer->m_lastConnected, clientVersion, peerCount());
}
void Host::onNodeTableEvent(NodeId const& _n, NodeTableEventType const& _e)
void Host::onNodeTableEvent(NodeID const& _n, NodeTableEventType const& _e)
{
if (_e == NodeEntryAdded)
{
@ -336,7 +336,7 @@ void Host::onNodeTableEvent(NodeId const& _n, NodeTableEventType const& _e)
{
clog(NetP2PNote) << "p2p.host.nodeTable.events.NodeEntryDropped " << _n;
RecursiveGuard l(x_sessions);
if (m_peers.count(_n) && !m_peers[_n]->required)
if (m_peers.count(_n) && m_peers[_n]->peerType == PeerType::Optional)
m_peers.erase(_n);
}
}
@ -450,7 +450,15 @@ std::unordered_map<Public, std::string> const& Host::pocHosts()
return c_ret;
}
void Host::addNode(NodeId const& _node, NodeIPEndpoint const& _endpoint)
void Host::addPeer(NodeSpec const& _s, PeerType _t)
{
if (_t == PeerType::Optional)
addNode(_s.id(), _s.nodeIPEndpoint());
else
requirePeer(_s.id(), _s.nodeIPEndpoint());
}
void Host::addNode(NodeID const& _node, NodeIPEndpoint const& _endpoint)
{
// return if network is stopped while waiting on Host::run() or nodeTable to start
while (!haveNetwork())
@ -466,12 +474,12 @@ void Host::addNode(NodeId const& _node, NodeIPEndpoint const& _endpoint)
m_nodeTable->addNode(Node(_node, _endpoint));
}
void Host::requirePeer(NodeId const& _n, NodeIPEndpoint const& _endpoint)
void Host::requirePeer(NodeID const& _n, NodeIPEndpoint const& _endpoint)
{
if (!m_run)
return;
Node node(_n, _endpoint, true);
Node node(_n, _endpoint, PeerType::Required);
if (_n)
{
// create or update m_peers entry
@ -481,7 +489,7 @@ void Host::requirePeer(NodeId const& _n, NodeIPEndpoint const& _endpoint)
{
p = m_peers[_n];
p->endpoint = node.endpoint;
p->required = true;
p->peerType = PeerType::Required;
}
else
{
@ -506,7 +514,7 @@ void Host::requirePeer(NodeId const& _n, NodeIPEndpoint const& _endpoint)
}
}
void Host::relinquishPeer(NodeId const& _node)
void Host::relinquishPeer(NodeID const& _node)
{
Guard l(x_requiredPeers);
if (m_requiredPeers.count(_node))
@ -524,7 +532,7 @@ void Host::connect(std::shared_ptr<Peer> const& _p)
return;
}
if (!!m_nodeTable && !m_nodeTable->haveNode(_p->id) && !_p->required)
if (!!m_nodeTable && !m_nodeTable->haveNode(_p->id) && _p->peerType == PeerType::Optional)
return;
// prevent concurrently connecting to a node
@ -638,7 +646,7 @@ void Host::run(boost::system::error_code const&)
for (auto const& p: m_peers)
{
bool haveSession = havePeerSession(p.second->id);
bool required = p.second->required;
bool required = p.second->peerType == PeerType::Required;
if (haveSession && required)
reqConn++;
else if (!haveSession && p.second->shouldReconnect() && (!m_netPrefs.pin || required))
@ -647,7 +655,7 @@ void Host::run(boost::system::error_code const&)
}
for (auto p: toConnect)
if (p->required && reqConn++ < m_idealPeerCount)
if (p->peerType == PeerType::Required && reqConn++ < m_idealPeerCount)
connect(p);
if (!m_netPrefs.pin)
@ -658,7 +666,7 @@ void Host::run(boost::system::error_code const&)
int openSlots = m_idealPeerCount - peerCount() - pendingCount + reqConn;
if (openSlots > 0)
for (auto p: toConnect)
if (!p->required && openSlots--)
if (p->peerType == PeerType::Optional && openSlots--)
connect(p);
}
@ -780,11 +788,11 @@ bytes Host::saveNetwork() const
continue;
// Only save peers which have connected within 2 days, with properly-advertised port and public IP address
if (chrono::system_clock::now() - p.m_lastConnected < chrono::seconds(3600 * 48) && !!p.endpoint && p.id != id() && (p.required || p.endpoint.isAllowed()))
if (chrono::system_clock::now() - p.m_lastConnected < chrono::seconds(3600 * 48) && !!p.endpoint && p.id != id() && (p.peerType == PeerType::Required || p.endpoint.isAllowed()))
{
network.appendList(11);
p.endpoint.streamRLP(network, NodeIPEndpoint::StreamInline);
network << p.id << p.required
network << p.id << (p.peerType == PeerType::Required ? true : false)
<< chrono::duration_cast<chrono::seconds>(p.m_lastConnected.time_since_epoch()).count()
<< chrono::duration_cast<chrono::seconds>(p.m_lastAttempted.time_since_epoch()).count()
<< p.m_failedAttempts << (unsigned)p.m_lastDisconnect << p.m_score << p.m_rating;
@ -843,13 +851,13 @@ void Host::restoreNetwork(bytesConstRef _b)
if (i.itemCount() == 4 || i.itemCount() == 11)
{
Node n((NodeId)i[3], NodeIPEndpoint(i));
Node n((NodeID)i[3], NodeIPEndpoint(i));
if (i.itemCount() == 4 && n.endpoint.isAllowed())
m_nodeTable->addNode(n);
else if (i.itemCount() == 11)
{
n.required = i[4].toInt<bool>();
if (!n.endpoint.isAllowed() && !n.required)
n.peerType = i[4].toInt<bool>() ? PeerType::Required : PeerType::Optional;
if (!n.endpoint.isAllowed() && n.peerType == PeerType::Optional)
continue;
shared_ptr<Peer> p = make_shared<Peer>(n);
p->m_lastConnected = chrono::system_clock::time_point(chrono::seconds(i[5].toInt<unsigned>()));
@ -859,7 +867,7 @@ void Host::restoreNetwork(bytesConstRef _b)
p->m_score = (int)i[9].toInt<unsigned>();
p->m_rating = (int)i[10].toInt<unsigned>();
m_peers[p->id] = p;
if (p->required)
if (p->peerType == PeerType::Required)
requirePeer(p->id, n.endpoint);
else
m_nodeTable->addNode(*p.get(), NodeTable::NodeRelation::Known);
@ -867,13 +875,13 @@ void Host::restoreNetwork(bytesConstRef _b)
}
else if (i.itemCount() == 3 || i.itemCount() == 10)
{
Node n((NodeId)i[2], NodeIPEndpoint(bi::address_v4(i[0].toArray<byte, 4>()), i[1].toInt<uint16_t>(), i[1].toInt<uint16_t>()));
Node n((NodeID)i[2], NodeIPEndpoint(bi::address_v4(i[0].toArray<byte, 4>()), i[1].toInt<uint16_t>(), i[1].toInt<uint16_t>()));
if (i.itemCount() == 3 && n.endpoint.isAllowed())
m_nodeTable->addNode(n);
else if (i.itemCount() == 10)
{
n.required = i[3].toInt<bool>();
if (!n.endpoint.isAllowed() && !n.required)
n.peerType = i[3].toInt<bool>() ? PeerType::Required : PeerType::Optional;
if (!n.endpoint.isAllowed() && n.peerType == PeerType::Optional)
continue;
shared_ptr<Peer> p = make_shared<Peer>(n);
p->m_lastConnected = chrono::system_clock::time_point(chrono::seconds(i[4].toInt<unsigned>()));
@ -883,7 +891,7 @@ void Host::restoreNetwork(bytesConstRef _b)
p->m_score = (int)i[8].toInt<unsigned>();
p->m_rating = (int)i[9].toInt<unsigned>();
m_peers[p->id] = p;
if (p->required)
if (p->peerType == PeerType::Required)
requirePeer(p->id, n.endpoint);
else
m_nodeTable->addNode(*p.get(), NodeTable::NodeRelation::Known);

39
libp2p/Host.h

@ -47,11 +47,11 @@ namespace bi = ba::ip;
namespace std
{
template<> struct hash<pair<dev::p2p::NodeId, string>>
template<> struct hash<pair<dev::p2p::NodeID, string>>
{
size_t operator()(pair<dev::p2p::NodeId, string> const& _value) const
size_t operator()(pair<dev::p2p::NodeID, string> const& _value) const
{
size_t ret = hash<dev::p2p::NodeId>()(_value.first);
size_t ret = hash<dev::p2p::NodeID>()(_value.first);
return ret ^ (hash<string>()(_value.second) + 0x9e3779b9 + (ret << 6) + (ret >> 2));
}
};
@ -73,7 +73,7 @@ public:
Host const& host() const { return m_host; }
private:
virtual void processEvent(NodeId const& _n, NodeTableEventType const& _e);
virtual void processEvent(NodeID const& _n, NodeTableEventType const& _e);
Host& m_host;
};
@ -101,19 +101,19 @@ public:
bytes data(Session const& _s, std::string const& _subs) const;
private:
std::unordered_map<std::pair<p2p::NodeId, std::string>, Reputation> m_nodes; ///< Nodes that were impolite while syncing. We avoid syncing from these if possible.
std::unordered_map<std::pair<p2p::NodeID, std::string>, Reputation> m_nodes; ///< Nodes that were impolite while syncing. We avoid syncing from these if possible.
SharedMutex mutable x_nodes;
};
struct NodeInfo
{
NodeInfo() = default;
NodeInfo(NodeId const& _id, std::string const& _address, unsigned _port, std::string const& _version):
NodeInfo(NodeID const& _id, std::string const& _address, unsigned _port, std::string const& _version):
id(_id), address(_address), port(_port), version(_version) {}
std::string enode() const { return "enode://" + id.hex() + "@" + address + ":" + toString(port); }
NodeId id;
NodeID id;
std::string address;
unsigned port;
std::string version;
@ -156,17 +156,20 @@ public:
CapDescs caps() const { CapDescs ret; for (auto const& i: m_capabilities) ret.push_back(i.first); return ret; }
template <class T> std::shared_ptr<T> cap() const { try { return std::static_pointer_cast<T>(m_capabilities.at(std::make_pair(T::staticName(), T::staticVersion()))); } catch (...) { return nullptr; } }
/// Add a potential peer.
void addPeer(NodeSpec const& _s, PeerType _t);
/// Add node as a peer candidate. Node is added if discovery ping is successful and table has capacity.
void addNode(NodeId const& _node, NodeIPEndpoint const& _endpoint);
void addNode(NodeID const& _node, NodeIPEndpoint const& _endpoint);
/// Create Peer and attempt keeping peer connected.
void requirePeer(NodeId const& _node, NodeIPEndpoint const& _endpoint);
void requirePeer(NodeID const& _node, NodeIPEndpoint const& _endpoint);
/// Create Peer and attempt keeping peer connected.
void requirePeer(NodeId const& _node, bi::address const& _addr, unsigned short _udpPort, unsigned short _tcpPort) { requirePeer(_node, NodeIPEndpoint(_addr, _udpPort, _tcpPort)); }
void requirePeer(NodeID const& _node, bi::address const& _addr, unsigned short _udpPort, unsigned short _tcpPort) { requirePeer(_node, NodeIPEndpoint(_addr, _udpPort, _tcpPort)); }
/// Note peer as no longer being required.
void relinquishPeer(NodeId const& _node);
void relinquishPeer(NodeID const& _node);
/// Set ideal number of peers.
void setIdealPeerCount(unsigned _n) { m_idealPeerCount = _n; }
@ -216,10 +219,10 @@ public:
void startPeerSession(Public const& _id, RLP const& _hello, std::unique_ptr<RLPXFrameCoder>&& _io, std::shared_ptr<RLPXSocket> const& _s);
/// Get session by id
std::shared_ptr<Session> peerSession(NodeId const& _id) { RecursiveGuard l(x_sessions); return m_sessions.count(_id) ? m_sessions[_id].lock() : std::shared_ptr<Session>(); }
std::shared_ptr<Session> peerSession(NodeID const& _id) { RecursiveGuard l(x_sessions); return m_sessions.count(_id) ? m_sessions[_id].lock() : std::shared_ptr<Session>(); }
/// Get our current node ID.
NodeId id() const { return m_alias.pub(); }
NodeID id() const { return m_alias.pub(); }
/// Get the public TCP endpoint.
bi::tcp::endpoint const& tcpPublic() const { return m_tcpPublic; }
@ -231,7 +234,7 @@ public:
p2p::NodeInfo nodeInfo() const { return NodeInfo(id(), (networkPreferences().publicIPAddress.empty() ? m_tcpPublic.address().to_string() : networkPreferences().publicIPAddress), m_tcpPublic.port(), m_clientVersion); }
protected:
void onNodeTableEvent(NodeId const& _n, NodeTableEventType const& _e);
void onNodeTableEvent(NodeID const& _n, NodeTableEventType const& _e);
/// Deserialise the data and populate the set of known peers.
void restoreNetwork(bytesConstRef _b);
@ -241,7 +244,7 @@ private:
unsigned peerSlots(PeerSlotType _type) { return _type == Egress ? m_idealPeerCount : m_idealPeerCount * m_stretchPeers; }
bool havePeerSession(NodeId const& _id) { return !!peerSession(_id); }
bool havePeerSession(NodeID const& _id) { return !!peerSession(_id); }
/// Determines and sets m_tcpPublic to publicly advertised address.
void determinePublic();
@ -302,15 +305,15 @@ private:
std::shared_ptr<NodeTable> m_nodeTable; ///< Node table (uses kademlia-like discovery).
/// Shared storage of Peer objects. Peers are created or destroyed on demand by the Host. Active sessions maintain a shared_ptr to a Peer;
std::unordered_map<NodeId, std::shared_ptr<Peer>> m_peers;
std::unordered_map<NodeID, std::shared_ptr<Peer>> m_peers;
/// Peers we try to connect regardless of p2p network.
std::set<NodeId> m_requiredPeers;
std::set<NodeID> m_requiredPeers;
Mutex x_requiredPeers;
/// The nodes to which we are currently connected. Used by host to service peer requests and keepAlivePeers and for shutdown. (see run())
/// Mutable because we flush zombie entries (null-weakptrs) as regular maintenance from a const method.
mutable std::unordered_map<NodeId, std::weak_ptr<Session>> m_sessions;
mutable std::unordered_map<NodeID, std::weak_ptr<Session>> m_sessions;
mutable RecursiveMutex x_sessions;
std::list<std::weak_ptr<RLPXHandshake>> m_connecting; ///< Pending connections.

18
libp2p/NodeTable.cpp

@ -38,7 +38,7 @@ const char* NodeTableAllDetail::name() { return "=P="; }
const char* NodeTableEgress::name() { return ">>P"; }
const char* NodeTableIngress::name() { return "<<P"; }
NodeEntry::NodeEntry(NodeId const& _src, Public const& _pubk, NodeIPEndpoint const& _gw): Node(_pubk, _gw), distance(NodeTable::distance(_src, _pubk)) {}
NodeEntry::NodeEntry(NodeID const& _src, Public const& _pubk, NodeIPEndpoint const& _gw): Node(_pubk, _gw), distance(NodeTable::distance(_src, _pubk)) {}
NodeTable::NodeTable(ba::io_service& _io, KeyPair const& _alias, NodeIPEndpoint const& _endpoint, bool _enabled):
m_node(Node(_alias.pub(), _endpoint)),
@ -115,9 +115,9 @@ shared_ptr<NodeEntry> NodeTable::addNode(Node const& _node, NodeRelation _relati
return ret;
}
list<NodeId> NodeTable::nodes() const
list<NodeID> NodeTable::nodes() const
{
list<NodeId> nodes;
list<NodeID> nodes;
DEV_GUARDED(x_nodes)
for (auto& i: m_nodes)
nodes.push_back(i.second->id);
@ -135,24 +135,24 @@ list<NodeEntry> NodeTable::snapshot() const
return ret;
}
Node NodeTable::node(NodeId const& _id)
Node NodeTable::node(NodeID const& _id)
{
Guard l(x_nodes);
if (m_nodes.count(_id))
{
auto entry = m_nodes[_id];
return Node(_id, entry->endpoint, entry->required);
return Node(_id, entry->endpoint, entry->peerType);
}
return UnspecifiedNode;
}
shared_ptr<NodeEntry> NodeTable::nodeEntry(NodeId _id)
shared_ptr<NodeEntry> NodeTable::nodeEntry(NodeID _id)
{
Guard l(x_nodes);
return m_nodes.count(_id) ? m_nodes[_id] : shared_ptr<NodeEntry>();
}
void NodeTable::doDiscover(NodeId _node, unsigned _round, shared_ptr<set<shared_ptr<NodeEntry>>> _tried)
void NodeTable::doDiscover(NodeID _node, unsigned _round, shared_ptr<set<shared_ptr<NodeEntry>>> _tried)
{
// NOTE: ONLY called by doDiscovery!
@ -214,7 +214,7 @@ void NodeTable::doDiscover(NodeId _node, unsigned _round, shared_ptr<set<shared_
});
}
vector<shared_ptr<NodeEntry>> NodeTable::nearestNodeEntries(NodeId _target)
vector<shared_ptr<NodeEntry>> NodeTable::nearestNodeEntries(NodeID _target)
{
// send s_alpha FindNode packets to nodes we know, closest to target
static unsigned lastBin = s_bins - 1;
@ -611,7 +611,7 @@ void NodeTable::doDiscovery()
return;
clog(NodeTableEvent) << "performing random discovery";
NodeId randNodeId;
NodeID randNodeId;
crypto::Nonce::get().ref().copyTo(randNodeId.ref().cropped(0, h256::size));
crypto::Nonce::get().ref().copyTo(randNodeId.ref().cropped(h256::size, h256::size));
doDiscover(randNodeId);

40
libp2p/NodeTable.h

@ -40,7 +40,7 @@ namespace p2p
*/
struct NodeEntry: public Node
{
NodeEntry(NodeId const& _src, Public const& _pubk, NodeIPEndpoint const& _gw);
NodeEntry(NodeID const& _src, Public const& _pubk, NodeIPEndpoint const& _gw);
unsigned const distance; ///< Node's distance (xor of _src as integer).
bool pending = true; ///< Node will be ignored until Pong is received
};
@ -56,13 +56,13 @@ class NodeTableEventHandler
{
friend class NodeTable;
public:
virtual void processEvent(NodeId const& _n, NodeTableEventType const& _e) = 0;
virtual void processEvent(NodeID const& _n, NodeTableEventType const& _e) = 0;
protected:
/// Called by NodeTable on behalf of an implementation (Host) to process new events without blocking nodetable.
void processEvents()
{
std::list<std::pair<NodeId, NodeTableEventType>> events;
std::list<std::pair<NodeID, NodeTableEventType>> events;
{
Guard l(x_events);
if (!m_nodeEventHandler.size())
@ -78,11 +78,11 @@ protected:
}
/// Called by NodeTable to append event.
virtual void appendEvent(NodeId _n, NodeTableEventType _e) { Guard l(x_events); m_nodeEventHandler.push_back(_n); m_events[_n] = _e; }
virtual void appendEvent(NodeID _n, NodeTableEventType _e) { Guard l(x_events); m_nodeEventHandler.push_back(_n); m_events[_n] = _e; }
Mutex x_events;
std::list<NodeId> m_nodeEventHandler;
std::unordered_map<NodeId, NodeTableEventType> m_events;
std::list<NodeID> m_nodeEventHandler;
std::unordered_map<NodeID, NodeTableEventType> m_events;
};
class NodeTable;
@ -123,8 +123,8 @@ class NodeTable: UDPSocketEvents, public std::enable_shared_from_this<NodeTable>
friend std::ostream& operator<<(std::ostream& _out, NodeTable const& _nodeTable);
using NodeSocket = UDPSocket<NodeTable, 1280>;
using TimePoint = std::chrono::steady_clock::time_point; ///< Steady time point.
using NodeIdTimePoint = std::pair<NodeId, TimePoint>;
using EvictionTimeout = std::pair<NodeIdTimePoint, NodeId>; ///< First NodeId (NodeIdTimePoint) may be evicted and replaced with second NodeId.
using NodeIdTimePoint = std::pair<NodeID, TimePoint>;
using EvictionTimeout = std::pair<NodeIdTimePoint, NodeID>; ///< First NodeID (NodeIdTimePoint) may be evicted and replaced with second NodeID.
public:
enum NodeRelation { Unknown = 0, Known };
@ -135,7 +135,7 @@ public:
~NodeTable();
/// Returns distance based on xor metric two node ids. Used by NodeEntry and NodeTable.
static unsigned distance(NodeId const& _a, NodeId const& _b) { u256 d = sha3(_a) ^ sha3(_b); unsigned ret; for (ret = 0; d >>= 1; ++ret) {}; return ret; }
static unsigned distance(NodeID const& _a, NodeID const& _b) { u256 d = sha3(_a) ^ sha3(_b); unsigned ret; for (ret = 0; d >>= 1; ++ret) {}; return ret; }
/// Set event handler for NodeEntryAdded and NodeEntryDropped events.
void setEventHandler(NodeTableEventHandler* _handler) { m_nodeEventHandler.reset(_handler); }
@ -143,11 +143,11 @@ public:
/// Called by implementation which provided handler to process NodeEntryAdded/NodeEntryDropped events. Events are coalesced by type whereby old events are ignored.
void processEvents();
/// Add node. Node will be pinged and empty shared_ptr is returned if node has never been seen or NodeId is empty.
/// Add node. Node will be pinged and empty shared_ptr is returned if node has never been seen or NodeID is empty.
std::shared_ptr<NodeEntry> addNode(Node const& _node, NodeRelation _relation = NodeRelation::Unknown);
/// Returns list of node ids active in node table.
std::list<NodeId> nodes() const;
std::list<NodeID> nodes() const;
/// Returns node count.
unsigned count() const { return m_nodes.size(); }
@ -156,10 +156,10 @@ public:
std::list<NodeEntry> snapshot() const;
/// Returns true if node id is in node table.
bool haveNode(NodeId const& _id) { Guard l(x_nodes); return m_nodes.count(_id) > 0; }
bool haveNode(NodeID const& _id) { Guard l(x_nodes); return m_nodes.count(_id) > 0; }
/// Returns the Node to the corresponding node id or the empty Node if that id is not found.
Node node(NodeId const& _id);
Node node(NodeID const& _id);
#if defined(BOOST_AUTO_TEST_SUITE) || defined(_MSC_VER) // MSVC includes access specifier in symbol name
protected:
@ -202,14 +202,14 @@ private:
NodeEntry center() const { return NodeEntry(m_node.id, m_node.publicKey(), m_node.endpoint); }
/// Used by asynchronous operations to return NodeEntry which is active and managed by node table.
std::shared_ptr<NodeEntry> nodeEntry(NodeId _id);
std::shared_ptr<NodeEntry> nodeEntry(NodeID _id);
/// Used to discovery nodes on network which are close to the given target.
/// Sends s_alpha concurrent requests to nodes nearest to target, for nodes nearest to target, up to s_maxSteps rounds.
void doDiscover(NodeId _target, unsigned _round = 0, std::shared_ptr<std::set<std::shared_ptr<NodeEntry>>> _tried = std::shared_ptr<std::set<std::shared_ptr<NodeEntry>>>());
void doDiscover(NodeID _target, unsigned _round = 0, std::shared_ptr<std::set<std::shared_ptr<NodeEntry>>> _tried = std::shared_ptr<std::set<std::shared_ptr<NodeEntry>>>());
/// Returns nodes from node table which are closest to target.
std::vector<std::shared_ptr<NodeEntry>> nearestNodeEntries(NodeId _target);
std::vector<std::shared_ptr<NodeEntry>> nearestNodeEntries(NodeID _target);
/// Asynchronously drops _leastSeen node if it doesn't reply and adds _new node, otherwise _new node is thrown away.
void evict(std::shared_ptr<NodeEntry> _leastSeen, std::shared_ptr<NodeEntry> _new);
@ -247,7 +247,7 @@ private:
Secret m_secret; ///< This nodes secret key.
mutable Mutex x_nodes; ///< LOCK x_state first if both locks are required. Mutable for thread-safe copy in nodes() const.
std::unordered_map<NodeId, std::shared_ptr<NodeEntry>> m_nodes; ///< Known Node Endpoints
std::unordered_map<NodeID, std::shared_ptr<NodeEntry>> m_nodes; ///< Known Node Endpoints
mutable Mutex x_state; ///< LOCK x_state first if both x_nodes and x_state locks are required.
std::array<NodeBucket, s_bins> m_state; ///< State of p2p node network.
@ -333,13 +333,13 @@ struct Pong: RLPXDatagram<Pong>
* Minimum Encoded Size: 21 bytes
* Maximum Encoded Size: 30 bytes
*
* target: NodeId of node. The responding node will send back nodes closest to the target.
* target: NodeID of node. The responding node will send back nodes closest to the target.
*
*/
struct FindNode: RLPXDatagram<FindNode>
{
FindNode(bi::udp::endpoint _ep): RLPXDatagram<FindNode>(_ep) {}
FindNode(bi::udp::endpoint _ep, NodeId _target): RLPXDatagram<FindNode>(_ep), target(_target), ts(futureFromEpoch(std::chrono::seconds(60))) {}
FindNode(bi::udp::endpoint _ep, NodeID _target): RLPXDatagram<FindNode>(_ep), target(_target), ts(futureFromEpoch(std::chrono::seconds(60))) {}
static const uint8_t type = 3;
@ -360,7 +360,7 @@ struct Neighbours: RLPXDatagram<Neighbours>
Neighbour(Node const& _node): endpoint(_node.endpoint), node(_node.id) {}
Neighbour(RLP const& _r): endpoint(_r) { node = h512(_r[3].toBytes()); }
NodeIPEndpoint endpoint;
NodeId node;
NodeID node;
void streamRLP(RLPStream& _s) const { _s.appendList(4); endpoint.streamRLP(_s, NodeIPEndpoint::StreamInline); _s << node; }
};

2
libp2p/Peer.cpp

@ -38,7 +38,7 @@ bool Peer::shouldReconnect() const
unsigned Peer::fallbackSeconds() const
{
if (required)
if (peerType == PeerType::Required)
return 5;
switch (m_lastDisconnect)
{

2
libp2p/Peer.h

@ -58,7 +58,7 @@ class Peer: public Node
public:
/// Construct Peer from Node.
Peer(Node const& _node): Node(_node.id, _node.endpoint, _node.required) {}
Peer(Node const& _node): Node(_node) {}
bool isOffline() const { return !m_session.lock(); }

4
libp2p/RLPxHandshake.h

@ -66,7 +66,7 @@ public:
RLPXHandshake(Host* _host, std::shared_ptr<RLPXSocket> const& _socket): m_host(_host), m_originated(false), m_socket(_socket), m_idleTimer(m_socket->ref().get_io_service()) { crypto::Nonce::get().ref().copyTo(m_nonce.ref()); }
/// Setup outbound connection.
RLPXHandshake(Host* _host, std::shared_ptr<RLPXSocket> const& _socket, NodeId _remote): m_host(_host), m_remote(_remote), m_originated(true), m_socket(_socket), m_idleTimer(m_socket->ref().get_io_service()) { crypto::Nonce::get().ref().copyTo(m_nonce.ref()); }
RLPXHandshake(Host* _host, std::shared_ptr<RLPXSocket> const& _socket, NodeID _remote): m_host(_host), m_remote(_remote), m_originated(true), m_socket(_socket), m_idleTimer(m_socket->ref().get_io_service()) { crypto::Nonce::get().ref().copyTo(m_nonce.ref()); }
~RLPXHandshake() {}
@ -104,7 +104,7 @@ protected:
Host* m_host; ///< Host which provides m_alias, protocolVersion(), m_clientVersion, caps(), and TCP listenPort().
/// Node id of remote host for socket.
NodeId m_remote; ///< Public address of remote host.
NodeID m_remote; ///< Public address of remote host.
bool m_originated = false; ///< True if connection is outbound.
/// Buffers for encoded and decoded handshake phases

4
libp2p/Session.cpp

@ -76,9 +76,9 @@ ReputationManager& Session::repMan() const
return m_server->repMan();
}
NodeId Session::id() const
NodeID Session::id() const
{
return m_peer ? m_peer->id : NodeId();
return m_peer ? m_peer->id : NodeID();
}
void Session::addRating(int _r)

2
libp2p/Session.h

@ -65,7 +65,7 @@ public:
bool isConnected() const { return m_socket->ref().is_open(); }
NodeId id() const;
NodeID id() const;
unsigned socketId() const { Guard l(x_info); return m_info.socketId; }
template <class PeerCap>

14
libweb3jsonrpc/WebThreeStubServerBase.cpp

@ -525,19 +525,7 @@ bool WebThreeStubServerBase::admin_net_stop(std::string const& _session)
bool WebThreeStubServerBase::admin_net_connect(std::string const& _node, std::string const& _session)
{
ADMIN;
p2p::NodeId id;
bi::tcp::endpoint ep;
if (_node.substr(0, 8) == "enode://" && _node.find('@') == 136)
{
id = p2p::NodeId(_node.substr(8, 128));
ep = p2p::Network::resolveHost(_node.substr(137));
}
else
ep = p2p::Network::resolveHost(_node);
if (ep == bi::tcp::endpoint())
return false;
network()->requirePeer(id, ep);
network()->addPeer(p2p::NodeSpec(_node), p2p::PeerType::Required);
return true;
}

8
libwebthree/WebThree.cpp

@ -128,14 +128,18 @@ bytes WebThreeDirect::saveNetwork()
return m_net.saveNetwork();
}
void WebThreeDirect::addNode(NodeId const& _node, bi::tcp::endpoint const& _host)
void WebThreeDirect::addNode(NodeID const& _node, bi::tcp::endpoint const& _host)
{
m_net.addNode(_node, NodeIPEndpoint(_host.address(), _host.port(), _host.port()));
}
void WebThreeDirect::requirePeer(NodeId const& _node, bi::tcp::endpoint const& _host)
void WebThreeDirect::requirePeer(NodeID const& _node, bi::tcp::endpoint const& _host)
{
m_net.requirePeer(_node, NodeIPEndpoint(_host.address(), _host.port(), _host.port()));
}
void WebThreeDirect::addPeer(NodeSpec const& _s, PeerType _t)
{
m_net.addPeer(_s, _t);
}

26
libwebthree/WebThree.h

@ -62,11 +62,14 @@ public:
/// Same as peers().size(), but more efficient.
virtual size_t peerCount() const = 0;
/// Generalised peer addition.
virtual void addPeer(p2p::NodeSpec const& _node, p2p::PeerType _t) = 0;
/// Add node to connect to.
virtual void addNode(p2p::NodeId const& _node, bi::tcp::endpoint const& _hostEndpoint) = 0;
virtual void addNode(p2p::NodeID const& _node, bi::tcp::endpoint const& _hostEndpoint) = 0;
/// Require connection to peer.
virtual void requirePeer(p2p::NodeId const& _node, bi::tcp::endpoint const& _endpoint) = 0;
virtual void requirePeer(p2p::NodeID const& _node, bi::tcp::endpoint const& _endpoint) = 0;
/// Save peers
virtual dev::bytes saveNetwork() = 0;
@ -79,7 +82,7 @@ public:
virtual p2p::NetworkPreferences const& networkPreferences() const = 0;
virtual void setNetworkPreferences(p2p::NetworkPreferences const& _n, bool _dropPeers) = 0;
virtual p2p::NodeId id() const = 0;
virtual p2p::NodeID id() const = 0;
/// Gets the nodes.
virtual p2p::Peers nodes() const = 0;
@ -147,23 +150,26 @@ public:
/// Same as peers().size(), but more efficient.
size_t peerCount() const override;
/// Generalised peer addition.
virtual void addPeer(p2p::NodeSpec const& _node, p2p::PeerType _t) override;
/// Add node to connect to.
virtual void addNode(p2p::NodeId const& _node, bi::tcp::endpoint const& _hostEndpoint) override;
virtual void addNode(p2p::NodeID const& _node, bi::tcp::endpoint const& _hostEndpoint) override;
/// Add node to connect to.
void addNode(p2p::NodeId const& _node, std::string const& _hostString) { addNode(_node, p2p::Network::resolveHost(_hostString)); }
void addNode(p2p::NodeID const& _node, std::string const& _hostString) { addNode(_node, p2p::Network::resolveHost(_hostString)); }
/// Add node to connect to.
void addNode(bi::tcp::endpoint const& _endpoint) { addNode(p2p::NodeId(), _endpoint); }
void addNode(bi::tcp::endpoint const& _endpoint) { addNode(p2p::NodeID(), _endpoint); }
/// Add node to connect to.
void addNode(std::string const& _hostString) { addNode(p2p::NodeId(), _hostString); }
void addNode(std::string const& _hostString) { addNode(p2p::NodeID(), _hostString); }
/// Require connection to peer.
void requirePeer(p2p::NodeId const& _node, bi::tcp::endpoint const& _endpoint) override;
void requirePeer(p2p::NodeID const& _node, bi::tcp::endpoint const& _endpoint) override;
/// Require connection to peer.
void requirePeer(p2p::NodeId const& _node, std::string const& _hostString) { requirePeer(_node, p2p::Network::resolveHost(_hostString)); }
void requirePeer(p2p::NodeID const& _node, std::string const& _hostString) { requirePeer(_node, p2p::Network::resolveHost(_hostString)); }
/// Save peers
dev::bytes saveNetwork() override;
@ -182,7 +188,7 @@ public:
p2p::NodeInfo nodeInfo() const override { return m_net.nodeInfo(); }
p2p::NodeId id() const override { return m_net.id(); }
p2p::NodeID id() const override { return m_net.id(); }
std::string enode() const override { return m_net.enode(); }

14
mix/Web3Server.cpp

@ -44,13 +44,19 @@ class EmptyNetwork : public dev::WebThreeNetworkFace
return 0;
}
void addNode(p2p::NodeId const& _node, bi::tcp::endpoint const& _hostEndpoint) override
void addPeer(p2p::NodeSpec const& _node, p2p::PeerType _t) override
{
(void)_node;
(void)_t;
}
void addNode(p2p::NodeID const& _node, bi::tcp::endpoint const& _hostEndpoint) override
{
(void)_node;
(void)_hostEndpoint;
}
void requirePeer(p2p::NodeId const& _node, bi::tcp::endpoint const& _endpoint) override
void requirePeer(p2p::NodeID const& _node, bi::tcp::endpoint const& _endpoint) override
{
(void)_node;
(void)_endpoint;
@ -87,9 +93,9 @@ class EmptyNetwork : public dev::WebThreeNetworkFace
std::string enode() const override { return ""; }
p2p::NodeId id() const override
p2p::NodeID id() const override
{
return p2p::NodeId();
return p2p::NodeID();
}
p2p::Peers nodes() const override

4
test/libp2p/capability.cpp

@ -73,14 +73,14 @@ public:
TestHostCapability(): Worker("test") {}
virtual ~TestHostCapability() {}
void sendTestMessage(NodeId const& _id, int _x)
void sendTestMessage(NodeID const& _id, int _x)
{
for (auto i: peerSessions())
if (_id == i.second->id)
i.first->cap<TestCapability>().get()->sendTestMessage(_x);
}
std::pair<int, int> retrieveTestData(NodeId const& _id)
std::pair<int, int> retrieveTestData(NodeID const& _id)
{
int cnt = 0;
int checksum = 0;

8
test/libp2p/net.cpp

@ -159,13 +159,13 @@ BOOST_AUTO_TEST_CASE(requestTimeout)
return;
using TimePoint = std::chrono::steady_clock::time_point;
using RequestTimeout = std::pair<NodeId, TimePoint>;
using RequestTimeout = std::pair<NodeID, TimePoint>;
std::chrono::milliseconds timeout(300);
std::list<RequestTimeout> timeouts;
NodeId nodeA(sha3("a"));
NodeId nodeB(sha3("b"));
NodeID nodeA(sha3("a"));
NodeID nodeB(sha3("b"));
timeouts.push_back(make_pair(nodeA, chrono::steady_clock::now()));
this_thread::sleep_for(std::chrono::milliseconds(100));
timeouts.push_back(make_pair(nodeB, chrono::steady_clock::now()));
@ -385,7 +385,7 @@ BOOST_AUTO_TEST_CASE(nodeTableReturnsUnspecifiedNode)
ba::io_service io;
NodeTable t(io, KeyPair::create(), NodeIPEndpoint(bi::address::from_string("127.0.0.1"), 30303, 30303));
if (Node n = t.node(NodeId()))
if (Node n = t.node(NodeID()))
BOOST_REQUIRE(false);
else
BOOST_REQUIRE(n == UnspecifiedNode);

6
test/libp2p/peer.cpp

@ -217,15 +217,15 @@ BOOST_AUTO_TEST_CASE(emptySharedPeer)
shared_ptr<Peer> p;
BOOST_REQUIRE(!p);
std::map<NodeId, std::shared_ptr<Peer>> peers;
p = peers[NodeId()];
std::map<NodeID, std::shared_ptr<Peer>> peers;
p = peers[NodeID()];
BOOST_REQUIRE(!p);
p.reset(new Peer(UnspecifiedNode));
BOOST_REQUIRE(!p->id);
BOOST_REQUIRE(!*p);
p.reset(new Peer(Node(NodeId(EmptySHA3), UnspecifiedNodeIPEndpoint)));
p.reset(new Peer(Node(NodeID(EmptySHA3), UnspecifiedNodeIPEndpoint)));
BOOST_REQUIRE(!(!*p));
BOOST_REQUIRE(*p);
BOOST_REQUIRE(p);

Loading…
Cancel
Save