Browse Source

Distinguish private and public methods of peer mgr

Give private methods a leading _
Remove dead code
patch-2
Neil Booth 7 years ago
parent
commit
79b98b2b54
  1. 354
      electrumx/server/peers.py

354
electrumx/server/peers.py

@ -89,13 +89,13 @@ class PeerSession(ClientSession):
def fail(self, request, reason):
self.logger.error(f'{request} failed: {reason}')
self.peer_mgr.set_verification_status(self.peer, self.kind, False)
self.peer_mgr._set_verification_status(self.peer, self.kind, False)
self.close()
def bad(self, reason):
self.logger.error(f'marking bad: {reason}')
self.peer.mark_bad()
self.peer_mgr.set_verification_status(self.peer, self.kind, False)
self.peer_mgr._set_verification_status(self.peer, self.kind, False)
self.close()
def on_version(self, request):
@ -212,7 +212,7 @@ class PeerSession(ClientSession):
self.bad('bad server.peers.subscribe response')
return
features = self.peer_mgr.features_to_register(self.peer, peers)
features = self.peer_mgr._features_to_register(self.peer, peers)
if features:
self.logger.info(f'registering ourself with "server.add_peer"')
self.send_request('server.add_peer', [features],
@ -231,7 +231,7 @@ class PeerSession(ClientSession):
'''
if not self.all_requests():
self.close()
self.peer_mgr.set_verification_status(self.peer, self.kind, True)
self.peer_mgr._set_verification_status(self.peer, self.kind, True)
class PeerManager(object):
@ -264,24 +264,12 @@ class PeerManager(object):
self.proxy = None
self.last_proxy_try = 0
def my_clearnet_peer(self):
def _my_clearnet_peer(self):
'''Returns the clearnet peer representing this server, if any.'''
clearnet = [peer for peer in self.myselves if not peer.is_tor]
return clearnet[0] if clearnet else None
def info(self):
'''The number of peers.'''
self.set_peer_statuses()
counter = Counter(peer.status for peer in self.peers)
return {
'bad': counter[PEER_BAD],
'good': counter[PEER_GOOD],
'never': counter[PEER_NEVER],
'stale': counter[PEER_STALE],
'total': len(self.peers),
}
def set_peer_statuses(self):
def _set_peer_statuses(self):
'''Set peer statuses.'''
cutoff = time.time() - STALE_SECS
for peer in self.peers:
@ -294,22 +282,7 @@ class PeerManager(object):
else:
peer.status = PEER_NEVER
def rpc_data(self):
'''Peer data for the peers RPC method.'''
self.set_peer_statuses()
descs = ['good', 'stale', 'never', 'bad']
def peer_data(peer):
data = peer.serialize()
data['status'] = descs[peer.status]
return data
def peer_key(peer):
return (peer.bad, -peer.last_good)
return [peer_data(peer) for peer in sorted(self.peers, key=peer_key)]
def features_to_register(self, peer, remote_peers):
def _features_to_register(self, peer, remote_peers):
'''If we should register ourselves to the remote peer, which has
reported the given list of known peers, return the clearnet
identity features to register, otherwise None.
@ -320,7 +293,7 @@ class PeerManager(object):
# are a non-public IP address, or to ourselves.
if not self.env.peer_announce or peer in self.myselves:
return None
my = self.my_clearnet_peer()
my = self._my_clearnet_peer()
if not my or not my.is_public:
return None
# Register if no matches, or ports have changed
@ -329,40 +302,7 @@ class PeerManager(object):
return None
return my.features
def add_peers(self, peers, limit=2, check_ports=False, source=None):
'''Add a limited number of peers that are not already present.'''
retry = False
new_peers = []
for peer in peers:
if not peer.is_public or (peer.is_tor and not self.proxy):
continue
matches = peer.matches(self.peers)
if not matches:
new_peers.append(peer)
elif check_ports:
for match in matches:
if match.check_ports(peer):
self.logger.info('ports changed for {}'.format(peer))
retry = True
if new_peers:
retry = True
source = source or new_peers[0].source
if limit:
random.shuffle(new_peers)
use_peers = new_peers[:limit]
else:
use_peers = new_peers
for n, peer in enumerate(use_peers):
self.logger.info('accepted new peer {:d}/{:d} {} from {} '
.format(n + 1, len(use_peers), peer, source))
self.peers.update(use_peers)
if retry:
self.retry_event.set()
def permit_new_onion_peer(self):
def _permit_new_onion_peer(self):
'''Accept a new onion peer only once per random time interval.'''
now = time.time()
if now < self.permit_onion_peer_time:
@ -370,81 +310,7 @@ class PeerManager(object):
self.permit_onion_peer_time = now + random.randrange(0, 1200)
return True
async def on_add_peer(self, features, source_info):
'''Add a peer (but only if the peer resolves to the source).'''
if not source_info:
self.logger.info('ignored add_peer request: no source info')
return False
source = source_info[0]
peers = Peer.peers_from_features(features, source)
if not peers:
self.logger.info('ignored add_peer request: no peers given')
return False
# Just look at the first peer, require it
peer = peers[0]
host = peer.host
if peer.is_tor:
permit = self.permit_new_onion_peer()
reason = 'rate limiting'
else:
try:
infos = await self.loop.getaddrinfo(host, 80,
type=socket.SOCK_STREAM)
except socket.gaierror:
permit = False
reason = 'address resolution failure'
else:
permit = any(source == info[-1][0] for info in infos)
reason = 'source-destination mismatch'
if permit:
self.logger.info('accepted add_peer request from {} for {}'
.format(source, host))
self.add_peers([peer], check_ports=True)
else:
self.logger.warning('rejected add_peer request from {} for {} ({})'
.format(source, host, reason))
return permit
def on_peers_subscribe(self, is_tor):
'''Returns the server peers as a list of (ip, host, details) tuples.
We return all peers we've connected to in the last day.
Additionally, if we don't have onion routing, we return a few
hard-coded onion servers.
'''
cutoff = time.time() - STALE_SECS
recent = [peer for peer in self.peers
if peer.last_good > cutoff and
not peer.bad and peer.is_public]
onion_peers = []
# Always report ourselves if valid (even if not public)
peers = set(myself for myself in self.myselves
if myself.last_good > cutoff)
# Bucket the clearnet peers and select up to two from each
buckets = defaultdict(list)
for peer in recent:
if peer.is_tor:
onion_peers.append(peer)
else:
buckets[peer.bucket()].append(peer)
for bucket_peers in buckets.values():
random.shuffle(bucket_peers)
peers.update(bucket_peers[:2])
# Add up to 20% onion peers (but up to 10 is OK anyway)
random.shuffle(onion_peers)
max_onion = 50 if is_tor else max(10, len(peers) // 4)
peers.update(onion_peers[:max_onion])
return [peer.to_tuple() for peer in peers]
def import_peers(self):
def _import_peers(self):
'''Import hard-coded peers from a file or the coin defaults.'''
self.add_peers(self.myselves)
@ -455,7 +321,7 @@ class PeerManager(object):
for real_name in coin_peers]
self.add_peers(peers, limit=None)
async def maybe_detect_proxy(self):
async def _maybe_detect_proxy(self):
'''Detect a proxy if we don't have one and some time has passed since
the last attempt.
@ -479,32 +345,19 @@ class PeerManager(object):
self.proxy = result
self.logger.info(f'detected {self.proxy}')
def proxy_peername(self):
'''Return the peername of the proxy, if there is a proxy, otherwise
None.'''
return self.proxy.peername if self.proxy else None
def start_peer_discovery(self):
if self.env.peer_discovery == self.env.PD_ON:
self.logger.info(f'beginning peer discovery. Force use of '
f'proxy: {self.env.force_proxy}')
self.tasks.create_task(self.peer_discovery_loop())
else:
self.logger.info('peer discovery is disabled')
async def peer_discovery_loop(self):
async def _peer_discovery_loop(self):
'''Main loop performing peer maintenance. This includes
1) Forgetting unreachable peers.
2) Verifying connectivity of new peers.
3) Retrying old peers at regular intervals.
'''
self.import_peers()
self._import_peers()
try:
while True:
await self.maybe_detect_proxy()
await self.retry_peers()
await self._maybe_detect_proxy()
await self._retry_peers()
timeout = self.loop.call_later(WAKEUP_SECS,
self.retry_event.set)
await self.retry_event.wait()
@ -515,12 +368,7 @@ class PeerManager(object):
session.abort()
await session.wait_closed()
def is_coin_onion_peer(self, peer):
'''Return true if this peer is a hard-coded onion peer.'''
return peer.is_tor and any(peer.host in real_name
for real_name in self.env.coin.PEERS)
async def retry_peers(self):
async def _retry_peers(self):
'''Retry peers that are close to getting stale.'''
# Exponential backoff of retries
now = time.time()
@ -542,11 +390,11 @@ class PeerManager(object):
peer.try_count += 1
pairs = peer.connection_port_pairs()
if peer.bad or not pairs:
self.maybe_forget_peer(peer)
self._maybe_forget_peer(peer)
else:
self.retry_peer(peer, pairs)
self._retry_peer(peer, pairs)
def retry_peer(self, peer, port_pairs):
def _retry_peer(self, peer, port_pairs):
peer.last_try = time.time()
kwargs = {'loop': self.loop}
@ -570,10 +418,10 @@ class PeerManager(object):
kwargs['local_addr'] = (host, None)
session = PeerSession(peer, self, kind, peer.host, port, **kwargs)
callback = partial(self.on_connected, peer, port_pairs)
callback = partial(self._on_connected, peer, port_pairs)
self.tasks.create_task(session.create_connection(), callback)
def on_connected(self, peer, port_pairs, task):
def _on_connected(self, peer, port_pairs, task):
'''Called when a connection attempt succeeds or fails.
If failed, close the session, log it and try remaining port pairs.
@ -584,11 +432,11 @@ class PeerManager(object):
self.logger.info(f'failed connecting to {peer} at {kind} port '
f'{port} in {elapsed:.1f}s: {task.exception()}')
if port_pairs:
self.retry_peer(peer, port_pairs)
self._retry_peer(peer, port_pairs)
else:
self.maybe_forget_peer(peer)
self._maybe_forget_peer(peer)
def set_verification_status(self, peer, kind, good):
def _set_verification_status(self, peer, kind, good):
'''Called when a verification succeeded or failed.'''
now = time.time()
if self.env.force_proxy or peer.is_tor:
@ -613,9 +461,9 @@ class PeerManager(object):
elif peer.host in match.features['hosts']:
match.update_features_from_peer(peer)
else:
self.maybe_forget_peer(peer)
self._maybe_forget_peer(peer)
def maybe_forget_peer(self, peer):
def _maybe_forget_peer(self, peer):
'''Forget the peer if appropriate, e.g. long-term unreachable.'''
if peer.last_good and not peer.bad:
try_limit = 10
@ -629,3 +477,153 @@ class PeerManager(object):
self.peers.discard(peer)
return forget
#
# External interface
#
def start_peer_discovery(self):
if self.env.peer_discovery == self.env.PD_ON:
self.logger.info(f'beginning peer discovery. Force use of '
f'proxy: {self.env.force_proxy}')
self.tasks.create_task(self._peer_discovery_loop())
else:
self.logger.info('peer discovery is disabled')
def add_peers(self, peers, limit=2, check_ports=False, source=None):
'''Add a limited number of peers that are not already present.'''
retry = False
new_peers = []
for peer in peers:
if not peer.is_public or (peer.is_tor and not self.proxy):
continue
matches = peer.matches(self.peers)
if not matches:
new_peers.append(peer)
elif check_ports:
for match in matches:
if match.check_ports(peer):
self.logger.info('ports changed for {}'.format(peer))
retry = True
if new_peers:
retry = True
source = source or new_peers[0].source
if limit:
random.shuffle(new_peers)
use_peers = new_peers[:limit]
else:
use_peers = new_peers
for n, peer in enumerate(use_peers):
self.logger.info('accepted new peer {:d}/{:d} {} from {} '
.format(n + 1, len(use_peers), peer, source))
self.peers.update(use_peers)
if retry:
self.retry_event.set()
def info(self):
'''The number of peers.'''
self._set_peer_statuses()
counter = Counter(peer.status for peer in self.peers)
return {
'bad': counter[PEER_BAD],
'good': counter[PEER_GOOD],
'never': counter[PEER_NEVER],
'stale': counter[PEER_STALE],
'total': len(self.peers),
}
async def on_add_peer(self, features, source_info):
'''Add a peer (but only if the peer resolves to the source).'''
if not source_info:
self.logger.info('ignored add_peer request: no source info')
return False
source = source_info[0]
peers = Peer.peers_from_features(features, source)
if not peers:
self.logger.info('ignored add_peer request: no peers given')
return False
# Just look at the first peer, require it
peer = peers[0]
host = peer.host
if peer.is_tor:
permit = self._permit_new_onion_peer()
reason = 'rate limiting'
else:
try:
infos = await self.loop.getaddrinfo(host, 80,
type=socket.SOCK_STREAM)
except socket.gaierror:
permit = False
reason = 'address resolution failure'
else:
permit = any(source == info[-1][0] for info in infos)
reason = 'source-destination mismatch'
if permit:
self.logger.info('accepted add_peer request from {} for {}'
.format(source, host))
self.add_peers([peer], check_ports=True)
else:
self.logger.warning('rejected add_peer request from {} for {} ({})'
.format(source, host, reason))
return permit
def on_peers_subscribe(self, is_tor):
'''Returns the server peers as a list of (ip, host, details) tuples.
We return all peers we've connected to in the last day.
Additionally, if we don't have onion routing, we return a few
hard-coded onion servers.
'''
cutoff = time.time() - STALE_SECS
recent = [peer for peer in self.peers
if peer.last_good > cutoff and
not peer.bad and peer.is_public]
onion_peers = []
# Always report ourselves if valid (even if not public)
peers = set(myself for myself in self.myselves
if myself.last_good > cutoff)
# Bucket the clearnet peers and select up to two from each
buckets = defaultdict(list)
for peer in recent:
if peer.is_tor:
onion_peers.append(peer)
else:
buckets[peer.bucket()].append(peer)
for bucket_peers in buckets.values():
random.shuffle(bucket_peers)
peers.update(bucket_peers[:2])
# Add up to 20% onion peers (but up to 10 is OK anyway)
random.shuffle(onion_peers)
max_onion = 50 if is_tor else max(10, len(peers) // 4)
peers.update(onion_peers[:max_onion])
return [peer.to_tuple() for peer in peers]
def proxy_peername(self):
'''Return the peername of the proxy, if there is a proxy, otherwise
None.'''
return self.proxy.peername if self.proxy else None
def rpc_data(self):
'''Peer data for the peers RPC method.'''
self._set_peer_statuses()
descs = ['good', 'stale', 'never', 'bad']
def peer_data(peer):
data = peer.serialize()
data['status'] = descs[peer.status]
return data
def peer_key(peer):
return (peer.bad, -peer.last_good)
return [peer_data(peer) for peer in sorted(self.peers, key=peer_key)]

Loading…
Cancel
Save