|
|
@ -174,17 +174,25 @@ void Host::doneWorking() |
|
|
|
|
|
|
|
void Host::startPeerSession(Public const& _id, RLP const& _rlp, RLPXFrameIO* _io, bi::tcp::endpoint _endpoint) |
|
|
|
{ |
|
|
|
// session maybe ingress or egress so m_peers and node table entries may not exist
|
|
|
|
shared_ptr<Peer> p; |
|
|
|
if (!m_peers.count(_id)) |
|
|
|
ETH_RECURSIVE_GUARDED(x_sessions) |
|
|
|
{ |
|
|
|
p.reset(new Peer()); |
|
|
|
p->id = _id; |
|
|
|
if (m_peers.count(_id)) |
|
|
|
p = m_peers[_id]; |
|
|
|
else |
|
|
|
{ |
|
|
|
// peer doesn't exist, try to get port info from node table
|
|
|
|
if (Node n = m_nodeTable->node(_id)) |
|
|
|
p.reset(new Peer(n)); |
|
|
|
else |
|
|
|
p.reset(new Peer(Node(_id, UnspecifiedNodeIPEndpoint))); |
|
|
|
m_peers[_id] = p; |
|
|
|
} |
|
|
|
} |
|
|
|
else |
|
|
|
p = m_peers[_id]; |
|
|
|
if (p->isOffline()) |
|
|
|
p->m_lastConnected = std::chrono::system_clock::now(); |
|
|
|
p->endpoint.tcp.address(_endpoint.address()); |
|
|
|
p->endpoint.address = _endpoint.address(); |
|
|
|
|
|
|
|
auto protocolVersion = _rlp[0].toInt<unsigned>(); |
|
|
|
auto clientVersion = _rlp[1].toString(); |
|
|
@ -231,46 +239,32 @@ void Host::startPeerSession(Public const& _id, RLP const& _rlp, RLPXFrameIO* _io |
|
|
|
} |
|
|
|
|
|
|
|
clog(NetNote) << "p2p.host.peer.register" << _id.abridged(); |
|
|
|
StructuredLogger::p2pConnected(_id.abridged(), ps->m_peer->peerEndpoint(), ps->m_peer->m_lastConnected, clientVersion, peerCount()); |
|
|
|
StructuredLogger::p2pConnected(_id.abridged(), ps->m_peer->endpoint, ps->m_peer->m_lastConnected, clientVersion, peerCount()); |
|
|
|
} |
|
|
|
|
|
|
|
void Host::onNodeTableEvent(NodeId const& _n, NodeTableEventType const& _e) |
|
|
|
{ |
|
|
|
|
|
|
|
if (_e == NodeEntryAdded) |
|
|
|
{ |
|
|
|
clog(NetNote) << "p2p.host.nodeTable.events.nodeEntryAdded " << _n; |
|
|
|
|
|
|
|
auto n = m_nodeTable->node(_n); |
|
|
|
if (n) |
|
|
|
// only add iff node is in node table
|
|
|
|
if (Node n = m_nodeTable->node(_n)) |
|
|
|
{ |
|
|
|
shared_ptr<Peer> p; |
|
|
|
ETH_RECURSIVE_GUARDED(x_sessions) |
|
|
|
{ |
|
|
|
RecursiveGuard l(x_sessions); |
|
|
|
if (m_peers.count(_n)) |
|
|
|
{ |
|
|
|
p = m_peers[_n]; |
|
|
|
p->endpoint = n.endpoint; |
|
|
|
} |
|
|
|
else |
|
|
|
{ |
|
|
|
// TODO p2p: construct peer from node
|
|
|
|
p.reset(new Peer()); |
|
|
|
p->id = _n; |
|
|
|
p->endpoint = NodeIPEndpoint(n.endpoint.udp, n.endpoint.tcp); |
|
|
|
p->required = n.required; |
|
|
|
p.reset(new Peer(n)); |
|
|
|
m_peers[_n] = p; |
|
|
|
|
|
|
|
clog(NetNote) << "p2p.host.peers.events.peersAdded " << _n << "udp:" << p->endpoint.udp.address() << "tcp:" << p->endpoint.tcp.address(); |
|
|
|
clog(NetNote) << "p2p.host.peers.events.peerAdded " << _n << p->endpoint; |
|
|
|
} |
|
|
|
p->endpoint.tcp = n.endpoint.tcp; |
|
|
|
} |
|
|
|
|
|
|
|
// TODO: Implement similar to discover. Attempt connecting to nodes
|
|
|
|
// until ideal peer count is reached; if all nodes are tried,
|
|
|
|
// repeat. Notably, this is an integrated process such that
|
|
|
|
// when onNodeTableEvent occurs we should also update +/-
|
|
|
|
// the list of nodes to be tried. Thus:
|
|
|
|
// 1) externalize connection attempts
|
|
|
|
// 2) attempt copies potentialPeer list
|
|
|
|
// 3) replace this logic w/maintenance of potentialPeers
|
|
|
|
if (peerCount() < m_idealPeerCount) |
|
|
|
connect(p); |
|
|
|
} |
|
|
@ -278,7 +272,6 @@ void Host::onNodeTableEvent(NodeId const& _n, NodeTableEventType const& _e) |
|
|
|
else if (_e == NodeEntryDropped) |
|
|
|
{ |
|
|
|
clog(NetNote) << "p2p.host.nodeTable.events.NodeEntryDropped " << _n; |
|
|
|
|
|
|
|
RecursiveGuard l(x_sessions); |
|
|
|
m_peers.erase(_n); |
|
|
|
} |
|
|
@ -381,7 +374,7 @@ string Host::pocHost() |
|
|
|
return "poc-" + strs[1] + ".ethdev.com"; |
|
|
|
} |
|
|
|
|
|
|
|
void Host::addNode(NodeId const& _node, bi::address const& _addr, unsigned short _udpNodePort, unsigned short _tcpPeerPort) |
|
|
|
void Host::addNode(NodeId const& _node, NodeIPEndpoint const& _endpoint) |
|
|
|
{ |
|
|
|
// return if network is stopped while waiting on Host::run() or nodeTable to start
|
|
|
|
while (!haveNetwork()) |
|
|
@ -390,43 +383,33 @@ void Host::addNode(NodeId const& _node, bi::address const& _addr, unsigned short |
|
|
|
else |
|
|
|
return; |
|
|
|
|
|
|
|
if (_tcpPeerPort < 30300 || _tcpPeerPort > 30305) |
|
|
|
cwarn << "Non-standard port being recorded: " << _tcpPeerPort; |
|
|
|
if (_endpoint.tcpPort < 30300 || _endpoint.tcpPort > 30305) |
|
|
|
clog(NetConnect) << "Non-standard port being recorded: " << _endpoint.tcpPort; |
|
|
|
|
|
|
|
if (_tcpPeerPort >= /*49152*/32768) |
|
|
|
{ |
|
|
|
cwarn << "Private port being recorded - setting to 0"; |
|
|
|
_tcpPeerPort = 0; |
|
|
|
} |
|
|
|
|
|
|
|
if (m_nodeTable) |
|
|
|
m_nodeTable->addNode(Node(_node, NodeIPEndpoint(bi::udp::endpoint(_addr, _udpNodePort), bi::tcp::endpoint(_addr, _tcpPeerPort)))); |
|
|
|
m_nodeTable->addNode(Node(_node, _endpoint)); |
|
|
|
} |
|
|
|
|
|
|
|
void Host::requirePeer(NodeId const& _n, bi::address const& _udpAddr, unsigned short _udpPort, bi::address const& _tcpAddr, unsigned short _tcpPort) |
|
|
|
void Host::requirePeer(NodeId const& _n, NodeIPEndpoint const& _endpoint) |
|
|
|
{ |
|
|
|
auto naddr = _udpAddr; |
|
|
|
auto paddr = _tcpAddr.is_unspecified() ? naddr : _tcpAddr; |
|
|
|
auto udp = bi::udp::endpoint(naddr, _udpPort); |
|
|
|
auto tcp = bi::tcp::endpoint(paddr, _tcpPort ? _tcpPort : _udpPort); |
|
|
|
Node node(_n, NodeIPEndpoint(udp, tcp)); |
|
|
|
Node node(_n, _endpoint, true); |
|
|
|
if (_n) |
|
|
|
{ |
|
|
|
// add or replace peer
|
|
|
|
// create or update m_peers entry
|
|
|
|
shared_ptr<Peer> p; |
|
|
|
ETH_RECURSIVE_GUARDED(x_sessions) |
|
|
|
{ |
|
|
|
RecursiveGuard l(x_sessions); |
|
|
|
if (m_peers.count(_n)) |
|
|
|
{ |
|
|
|
p = m_peers[_n]; |
|
|
|
p->endpoint = node.endpoint; |
|
|
|
p->required = true; |
|
|
|
} |
|
|
|
else |
|
|
|
{ |
|
|
|
p.reset(new Peer()); |
|
|
|
p->id = _n; |
|
|
|
p->required = true; |
|
|
|
p.reset(new Peer(node)); |
|
|
|
m_peers[_n] = p; |
|
|
|
} |
|
|
|
p->endpoint.udp = node.endpoint.udp; |
|
|
|
p->endpoint.tcp = node.endpoint.tcp; |
|
|
|
} |
|
|
|
connect(p); |
|
|
|
} |
|
|
@ -441,7 +424,7 @@ void Host::requirePeer(NodeId const& _n, bi::address const& _udpAddr, unsigned s |
|
|
|
{ |
|
|
|
if (!_ec && m_nodeTable) |
|
|
|
if (auto n = m_nodeTable->node(_n)) |
|
|
|
requirePeer(n.id, n.endpoint.udp.address(), n.endpoint.udp.port(), n.endpoint.tcp.address(), n.endpoint.tcp.port()); |
|
|
|
requirePeer(n.id, n.endpoint); |
|
|
|
}); |
|
|
|
} |
|
|
|
} |
|
|
@ -482,22 +465,23 @@ void Host::connect(std::shared_ptr<Peer> const& _p) |
|
|
|
m_pendingPeerConns.insert(nptr); |
|
|
|
} |
|
|
|
|
|
|
|
clog(NetConnect) << "Attempting connection to node" << _p->id.abridged() << "@" << _p->peerEndpoint() << "from" << id().abridged(); |
|
|
|
bi::tcp::endpoint ep(_p->endpoint); |
|
|
|
clog(NetConnect) << "Attempting connection to node" << _p->id.abridged() << "@" << ep << "from" << id().abridged(); |
|
|
|
auto socket = make_shared<RLPXSocket>(new bi::tcp::socket(m_ioService)); |
|
|
|
socket->ref().async_connect(_p->peerEndpoint(), [=](boost::system::error_code const& ec) |
|
|
|
socket->ref().async_connect(ep, [=](boost::system::error_code const& ec) |
|
|
|
{ |
|
|
|
_p->m_lastAttempted = std::chrono::system_clock::now(); |
|
|
|
_p->m_failedAttempts++; |
|
|
|
|
|
|
|
if (ec) |
|
|
|
{ |
|
|
|
clog(NetConnect) << "Connection refused to node" << _p->id.abridged() << "@" << _p->peerEndpoint() << "(" << ec.message() << ")"; |
|
|
|
clog(NetConnect) << "Connection refused to node" << _p->id.abridged() << "@" << ep << "(" << ec.message() << ")"; |
|
|
|
// Manually set error (session not present)
|
|
|
|
_p->m_lastDisconnect = TCPError; |
|
|
|
} |
|
|
|
else |
|
|
|
{ |
|
|
|
clog(NetConnect) << "Connecting to" << _p->id.abridged() << "@" << _p->peerEndpoint(); |
|
|
|
clog(NetConnect) << "Connecting to" << _p->id.abridged() << "@" << ep; |
|
|
|
auto handshake = make_shared<RLPXHandshake>(this, socket, _p->id); |
|
|
|
{ |
|
|
|
Guard l(x_connecting); |
|
|
@ -637,7 +621,7 @@ void Host::startedWorking() |
|
|
|
else |
|
|
|
clog(NetNote) << "p2p.start.notice id:" << id().abridged() << "TCP Listen port is invalid or unavailable."; |
|
|
|
|
|
|
|
shared_ptr<NodeTable> nodeTable(new NodeTable(m_ioService, m_alias, bi::address::from_string(listenAddress()), listenPort())); |
|
|
|
shared_ptr<NodeTable> nodeTable(new NodeTable(m_ioService, m_alias, NodeIPEndpoint(bi::address::from_string(listenAddress()), listenPort(), listenPort()))); |
|
|
|
nodeTable->setEventHandler(new HostNodeTableHandler(*this)); |
|
|
|
m_nodeTable = nodeTable; |
|
|
|
restoreNetwork(&m_restoreNetwork); |
|
|
@ -696,14 +680,13 @@ bytes Host::saveNetwork() const |
|
|
|
{ |
|
|
|
// Only save peers which have connected within 2 days, with properly-advertised port and public IP address
|
|
|
|
// todo: e2e ipv6 support
|
|
|
|
bi::tcp::endpoint endpoint(p.peerEndpoint()); |
|
|
|
if (!endpoint.address().is_v4()) |
|
|
|
if (!p.endpoint.address.is_v4()) |
|
|
|
continue; |
|
|
|
|
|
|
|
if (chrono::system_clock::now() - p.m_lastConnected < chrono::seconds(3600 * 48) && endpoint.port() > 0 && endpoint.port() < /*49152*/32768 && p.id != id() && !isPrivateAddress(p.endpoint.udp.address()) && !isPrivateAddress(endpoint.address())) |
|
|
|
|
|
|
|
if (chrono::system_clock::now() - p.m_lastConnected < chrono::seconds(3600 * 48) && p.endpoint.tcpPort > 0 && p.id != id() && (p.required || p.endpoint.isAllowed())) |
|
|
|
{ |
|
|
|
network.appendList(10); |
|
|
|
network << endpoint.address().to_v4().to_bytes() << endpoint.port() << p.id << p.required |
|
|
|
network << p.endpoint.address.to_v4().to_bytes() << p.endpoint.tcpPort << p.id << p.required |
|
|
|
<< chrono::duration_cast<chrono::seconds>(p.m_lastConnected.time_since_epoch()).count() |
|
|
|
<< chrono::duration_cast<chrono::seconds>(p.m_lastAttempted.time_since_epoch()).count() |
|
|
|
<< p.m_failedAttempts << (unsigned)p.m_lastDisconnect << p.m_score << p.m_rating; |
|
|
@ -715,14 +698,14 @@ bytes Host::saveNetwork() const |
|
|
|
{ |
|
|
|
auto state = m_nodeTable->snapshot(); |
|
|
|
state.sort(); |
|
|
|
for (auto const& s: state) |
|
|
|
for (auto const& entry: state) |
|
|
|
{ |
|
|
|
network.appendList(3); |
|
|
|
if (s.endpoint.tcp.address().is_v4()) |
|
|
|
network << s.endpoint.tcp.address().to_v4().to_bytes(); |
|
|
|
if (entry.endpoint.address.is_v4()) |
|
|
|
network << entry.endpoint.address.to_v4().to_bytes(); |
|
|
|
else |
|
|
|
network << s.endpoint.tcp.address().to_v6().to_bytes(); |
|
|
|
network << s.endpoint.tcp.port() << s.id; |
|
|
|
network << entry.endpoint.address.to_v6().to_bytes(); |
|
|
|
network << entry.endpoint.tcpPort << entry.id; |
|
|
|
count++; |
|
|
|
} |
|
|
|
} |
|
|
@ -755,36 +738,28 @@ void Host::restoreNetwork(bytesConstRef _b) |
|
|
|
|
|
|
|
for (auto i: r[2]) |
|
|
|
{ |
|
|
|
// todo: e2e ipv6 support
|
|
|
|
// bi::tcp::endpoint(bi::address_v6(i[0].toArray<byte, 16>()), i[1].toInt<short>());
|
|
|
|
if (i[0].itemCount() != 4) |
|
|
|
continue; |
|
|
|
bi::tcp::endpoint tcp; |
|
|
|
bi::udp::endpoint udp; |
|
|
|
tcp = bi::tcp::endpoint(bi::address_v4(i[0].toArray<byte, 4>()), i[1].toInt<short>()); |
|
|
|
udp = bi::udp::endpoint(bi::address_v4(i[0].toArray<byte, 4>()), i[1].toInt<short>()); |
|
|
|
if (isPrivateAddress(tcp.address()) || isPrivateAddress(udp.address())) |
|
|
|
continue; |
|
|
|
|
|
|
|
auto id = (NodeId)i[2]; |
|
|
|
if (i.itemCount() == 3) |
|
|
|
m_nodeTable->addNode(id, udp, tcp); |
|
|
|
// todo: ipv6, bi::address_v6(i[0].toArray<byte, 16>()
|
|
|
|
Node n((NodeId)i[2], NodeIPEndpoint(bi::address_v4(i[0].toArray<byte, 4>()), i[1].toInt<uint16_t>(), i[1].toInt<uint16_t>())); |
|
|
|
if (i.itemCount() == 3 && n.endpoint.isAllowed()) |
|
|
|
m_nodeTable->addNode(n); |
|
|
|
else if (i.itemCount() == 10) |
|
|
|
{ |
|
|
|
shared_ptr<Peer> p = make_shared<Peer>(); |
|
|
|
p->id = id; |
|
|
|
p->required = i[3].toInt<bool>(); |
|
|
|
n.required = i[3].toInt<bool>(); |
|
|
|
if (!n.endpoint.isAllowed() && !n.required) |
|
|
|
continue; |
|
|
|
shared_ptr<Peer> p = make_shared<Peer>(n); |
|
|
|
p->m_lastConnected = chrono::system_clock::time_point(chrono::seconds(i[4].toInt<unsigned>())); |
|
|
|
p->m_lastAttempted = chrono::system_clock::time_point(chrono::seconds(i[5].toInt<unsigned>())); |
|
|
|
p->m_failedAttempts = i[6].toInt<unsigned>(); |
|
|
|
p->m_lastDisconnect = (DisconnectReason)i[7].toInt<unsigned>(); |
|
|
|
p->m_score = (int)i[8].toInt<unsigned>(); |
|
|
|
p->m_rating = (int)i[9].toInt<unsigned>(); |
|
|
|
p->endpoint.tcp = tcp; |
|
|
|
p->endpoint.udp = udp; |
|
|
|
m_peers[p->id] = p; |
|
|
|
if (p->required) |
|
|
|
requirePeer(p->id, p->endpoint.udp.address(), p->endpoint.udp.port()); |
|
|
|
requirePeer(p->id, n.endpoint); |
|
|
|
else |
|
|
|
m_nodeTable->addNode(*p.get()); |
|
|
|
} |
|
|
|