Browse Source

Merge branch 'release-0.8.1'

master 0.8.1
Neil Booth 8 years ago
parent
commit
93536c6a10
  1. 10
      RELEASE-NOTES
  2. 1
      docs/HOWTO.rst
  3. 2
      electrumx_server.py
  4. 5
      server/irc.py
  5. 72
      server/protocol.py
  6. 2
      server/version.py
  7. 2
      setup.py

10
RELEASE-NOTES

@ -1,4 +1,12 @@
verion 0.8.0 version 0.8.1
-------------
** NOTE: this version has a new Python package dependency: pylru
- fix for IRC encoding killing IRC connection
- add lru cache for history
version 0.8.0
------------ ------------
- stale connections are periodically closed. See docs/ENV-NOTES for - stale connections are periodically closed. See docs/ENV-NOTES for

1
docs/HOWTO.rst

@ -8,6 +8,7 @@ small - patches welcome.
+ Python3: ElectrumX uses asyncio. Python version >=3.5 is required. + Python3: ElectrumX uses asyncio. Python version >=3.5 is required.
+ plyvel: Python interface to LevelDB. I am using plyvel-0.9. + plyvel: Python interface to LevelDB. I am using plyvel-0.9.
+ pylru: Python LRU cache package. I'm using 1.0.9.
+ aiohttp: Python library for asynchronous HTTP. ElectrumX uses it for + aiohttp: Python library for asynchronous HTTP. ElectrumX uses it for
communication with the daemon. Version >= 1.0 required; I am communication with the daemon. Version >= 1.0 required; I am
using 1.0.5. using 1.0.5.

2
electrumx_server.py

@ -41,7 +41,7 @@ def main_loop():
def on_exception(loop, context): def on_exception(loop, context):
'''Suppress spurious messages it appears we cannot control.''' '''Suppress spurious messages it appears we cannot control.'''
message = context.get('message') message = context.get('message')
if not loop.is_closed() and not message in SUPPRESS_MESSAGES: if not message in SUPPRESS_MESSAGES:
if not ('task' in context and if not ('task' in context and
'accept_connection2()' in repr(context.get('task'))): 'accept_connection2()' in repr(context.get('task'))):
loop.default_exception_handler(context) loop.default_exception_handler(context)

5
server/irc.py

@ -70,6 +70,11 @@ class IRC(LoggedClass):
async def join(self): async def join(self):
import irc.client as irc_client import irc.client as irc_client
from jaraco.stream import buffer
# see https://pypi.python.org/pypi/irc under DecodingInput
irc_client.ServerConnection.buffer_class = \
buffer.LenientDecodingLineBuffer
reactor = irc_client.Reactor() reactor = irc_client.Reactor()
for event in ['welcome', 'join', 'quit', 'kick', 'whoreply', for event in ['welcome', 'join', 'quit', 'kick', 'whoreply',

72
server/protocol.py

@ -15,7 +15,9 @@ import ssl
import time import time
import traceback import traceback
from collections import defaultdict, namedtuple from collections import defaultdict, namedtuple
from functools import partial from functools import partial, lru_cache
import pylru
from lib.hash import sha256, double_sha256, hash_to_str, hex_str_to_hash from lib.hash import sha256, double_sha256, hash_to_str, hex_str_to_hash
from lib.jsonrpc import JSONRPC, json_notification_payload from lib.jsonrpc import JSONRPC, json_notification_payload
@ -229,6 +231,7 @@ class ServerManager(util.LoggedClass):
self.max_subs = env.max_subs self.max_subs = env.max_subs
self.subscription_count = 0 self.subscription_count = 0
self.next_stale_check = 0 self.next_stale_check = 0
self.history_cache = pylru.lrucache(512)
self.futures = [] self.futures = []
env.max_send = max(350000, env.max_send) env.max_send = max(350000, env.max_send)
self.logger.info('session timeout: {:,d} seconds' self.logger.info('session timeout: {:,d} seconds'
@ -329,6 +332,25 @@ class ServerManager(util.LoggedClass):
self.logger.info(json.dumps(self.server_summary())) self.logger.info(json.dumps(self.server_summary()))
self.next_log_sessions = time.time() + self.env.log_sessions self.next_log_sessions = time.time() + self.env.log_sessions
async def async_get_history(self, hash168):
if hash168 in self.history_cache:
return self.history_cache[hash168]
# History DoS limit. Each element of history is about 99
# bytes when encoded as JSON. This limits resource usage on
# bloated history requests, and uses a smaller divisor so
# large requests are logged before refusing them.
limit = self.env.max_send // 97
# Python 3.6: use async generators; update callers
history = []
for item in self.bp.get_history(hash168, limit=limit):
history.append(item)
if len(history) % 100 == 0:
await asyncio.sleep(0)
self.history_cache[hash168] = history
return history
async def shutdown(self): async def shutdown(self):
'''Call to shutdown the servers. Returns when done.''' '''Call to shutdown the servers. Returns when done.'''
self.bp.shutdown() self.bp.shutdown()
@ -351,8 +373,13 @@ class ServerManager(util.LoggedClass):
self.logger.info('server listening sockets closed, waiting ' self.logger.info('server listening sockets closed, waiting '
'{:d} seconds for socket cleanup'.format(secs)) '{:d} seconds for socket cleanup'.format(secs))
limit = time.time() + secs limit = time.time() + secs
while self.sessions and time.time() < limit: while self.sessions:
if time.time() < limit:
await asyncio.sleep(4) await asyncio.sleep(4)
else:
for session in list(self.sessions):
self.close_session(session, hard=True)
await asyncio.sleep(0)
self.logger.info('{:,d} sessions remaining' self.logger.info('{:,d} sessions remaining'
.format(len(self.sessions))) .format(len(self.sessions)))
@ -368,23 +395,26 @@ class ServerManager(util.LoggedClass):
self.close_session(session) self.close_session(session)
def remove_session(self, session): def remove_session(self, session):
# It might have been forcefully removed earlier by close_session()
if session in self.sessions:
self.subscription_count -= session.sub_count() self.subscription_count -= session.sub_count()
future = self.sessions.pop(session) future = self.sessions.pop(session)
future.cancel() future.cancel()
def close_session(self, session): def close_session(self, session, hard=False):
'''Close the session's transport and cancel its future.''' '''Close the session's transport and cancel its future.'''
session.transport.close() session.transport.close()
self.sessions[session].cancel() self.sessions[session].cancel()
return '{:d} disconnected'.format(session.id_) if hard:
self.remove_session(session)
socket = session.transport.get_extra_info('socket')
socket.close()
return 'disconnected {:d}'.format(session.id_)
def toggle_logging(self, session): def toggle_logging(self, session):
'''Close the session's transport and cancel its future.''' '''Toggle logging of the session.'''
session.log_me = not session.log_me session.log_me = not session.log_me
if session.log_me: return 'log {:d}: {}'.format(session.id_, session.log_me)
return 'logging {:d}'.format(session.id_)
else:
return 'not logging {:d}'.format(session.id_)
def clear_stale_sessions(self): def clear_stale_sessions(self):
'''Cut off sessions that haven't done anything for 10 minutes.''' '''Cut off sessions that haven't done anything for 10 minutes.'''
@ -395,10 +425,10 @@ class ServerManager(util.LoggedClass):
stale = [session for session in self.sessions stale = [session for session in self.sessions
if session.last_recv < cutoff] if session.last_recv < cutoff]
for session in stale: for session in stale:
self.close_session(session) self.close_session(session, hard=True)
if stale: if stale:
self.logger.info('dropped {:,d} stale connections' self.logger.info('dropped stale connections {}'
.format(len(stale))) .format([session.id_ for session in stale]))
def new_subscription(self): def new_subscription(self):
if self.subscription_count >= self.max_subs: if self.subscription_count >= self.max_subs:
@ -716,7 +746,7 @@ class ElectrumX(Session):
'''Returns status as 32 bytes.''' '''Returns status as 32 bytes.'''
# Note history is ordered and mempool unordered in electrum-server # Note history is ordered and mempool unordered in electrum-server
# For mempool, height is -1 if unconfirmed txins, otherwise 0 # For mempool, height is -1 if unconfirmed txins, otherwise 0
history = await self.async_get_history(hash168) history = await self.manager.async_get_history(hash168)
mempool = self.manager.mempool_transactions(hash168) mempool = self.manager.mempool_transactions(hash168)
status = ''.join('{}:{:d}:'.format(hash_to_str(tx_hash), height) status = ''.join('{}:{:d}:'.format(hash_to_str(tx_hash), height)
@ -761,7 +791,7 @@ class ElectrumX(Session):
async def get_history(self, hash168): async def get_history(self, hash168):
# Note history is ordered but unconfirmed is unordered in e-s # Note history is ordered but unconfirmed is unordered in e-s
history = await self.async_get_history(hash168) history = await self.manager.async_get_history(hash168)
conf = [{'tx_hash': hash_to_str(tx_hash), 'height': height} conf = [{'tx_hash': hash_to_str(tx_hash), 'height': height}
for tx_hash, height in history] for tx_hash, height in history]
@ -775,20 +805,6 @@ class ElectrumX(Session):
count = min(next_height - start_height, chunk_size) count = min(next_height - start_height, chunk_size)
return self.bp.read_headers(start_height, count).hex() return self.bp.read_headers(start_height, count).hex()
async def async_get_history(self, hash168):
# History DoS limit. Each element of history is about 99
# bytes when encoded as JSON. This limits resource usage on
# bloated history requests, and uses a smaller divisor so
# large requests are logged before refusing them.
limit = self.max_send // 97
# Python 3.6: use async generators; update callers
history = []
for item in self.bp.get_history(hash168, limit=limit):
history.append(item)
if len(history) % 100 == 0:
await asyncio.sleep(0)
return history
async def get_utxos(self, hash168): async def get_utxos(self, hash168):
# Python 3.6: use async generators; update callers # Python 3.6: use async generators; update callers
utxos = [] utxos = []

2
server/version.py

@ -1 +1 @@
VERSION = "ElectrumX 0.8.0" VERSION = "ElectrumX 0.8.1"

2
setup.py

@ -10,7 +10,7 @@ setuptools.setup(
# "irc" package is only required if IRC connectivity is enabled # "irc" package is only required if IRC connectivity is enabled
# via environment variables, in which case I've tested with 15.0.4 # via environment variables, in which case I've tested with 15.0.4
# "x11_hash" package (1.4) is required to sync DASH network. # "x11_hash" package (1.4) is required to sync DASH network.
install_requires=['plyvel', 'aiohttp >= 1'], install_requires=['plyvel', 'pylru', 'aiohttp >= 1'],
packages=setuptools.find_packages(), packages=setuptools.find_packages(),
description='ElectrumX Server', description='ElectrumX Server',
author='Neil Booth', author='Neil Booth',

Loading…
Cancel
Save