// first work out how many are old enough to kick off.
shared_ptr<Session>worst;
unsignedagedPeers=0;
for(autoi:m_peers)
if(!dc.count(i.first))
if(autop=i.second.lock())
if(/*(m_mode != NodeMode::Host || p->m_caps != 0x01) &&*/chrono::steady_clock::now()>p->m_connect+chrono::milliseconds(old))// don't throw off new peers; peer-servers should never kick off other peer-servers.
#warning integration: todo grow/prune into 'maintainPeers' & evaluate reputation instead of availability. schedule via deadline timer.
//void Host::growPeers()
//{
// RecursiveGuard l(x_peers);
// int morePeers = (int)m_idealPeerCount - m_peers.size();
// if (morePeers > 0)
// {
// auto toTry = m_ready;
// if (!m_netPrefs.localNetworking)
// toTry -= m_private;
// set<NodeInfo> ns;
// for (auto i: toTry)
// if (m_nodes[m_nodesList[i]]->shouldReconnect())
// ns.insert(*m_nodes[m_nodesList[i]]);
//
// if (ns.size())
// for (NodeInfo const& i: ns)
// {
// connect(m_nodes[i.id]);
// if (!--morePeers)
// return;
// }
// else
// for (auto const& i: m_peers)
// if (auto p = i.second.lock())
// p->ensureNodesRequested();
// }
//}
//
//void Host::prunePeers()
//{
// RecursiveGuard l(x_peers);
// // We'll keep at most twice as many as is ideal, halfing what counts as "too young to kill" until we get there.
// set<NodeId> dc;
// for (unsigned old = 15000; m_peers.size() - dc.size() > m_idealPeerCount * 2 && old > 100; old /= 2)
// if (m_peers.size() - dc.size() > m_idealPeerCount)
// {
// // look for worst peer to kick off
// // first work out how many are old enough to kick off.
// shared_ptr<Session> worst;
// unsigned agedPeers = 0;
// for (auto i: m_peers)
// if (!dc.count(i.first))
// if (auto p = i.second.lock())
// if (/*(m_mode != NodeMode::Host || p->m_caps != 0x01) &&*/ chrono::steady_clock::now() > p->m_connect + chrono::milliseconds(old)) // don't throw off new peers; peer-servers should never kick off other peer-servers.
std::unique_ptr<boost::asio::deadline_timer>m_timer;///< Timer which, when network is running, calls scheduler() every c_timerInterval ms.
staticconstunsignedc_timerInterval=100;///< Interval which m_timer is run when network is connected.
unsignedm_lastTick=0;///< Used by run() for scheduling; must not be mutated outside of run().
std::set<Node*>m_pendingNodeConns;/// Used only by connect(Node&) to limit concurrently connecting to same node. See connect(shared_ptr<Node>const&).
std::set<NodeInfo*>m_pendingNodeConns;/// Used only by connect(NodeInfo&) to limit concurrently connecting to same node. See connect(shared_ptr<NodeInfo>const&).
Mutexx_pendingNodeConns;
bi::tcp::endpointm_tcpPublic;///< Our public listening endpoint.
PeerInfom_info;///< Dynamic information about this peer.
unsignedm_protocolVersion=0;///< The protocol version of the peer.
std::shared_ptr<Node>m_node;///< The Node object. Might be null if we constructed using a bare address/port.
std::shared_ptr<NodeInfo>m_node;///< The NodeInfo object. Might be null if we constructed using a bare address/port.
bi::tcp::endpointm_manualEndpoint;///< The endpoint as specified by the constructor.
boolm_force=false;///< If true, ignore IDs being different. This could open you up to MitM attacks.
boolm_dropped=false;///< If true, we've already divested ourselves of this peer. We're just waiting for the reads & writes to fail before the shared_ptr goes OOS and the destructor kicks in.