Browse Source

Merge branch 'release-0.7.10'

master 0.7.10
Neil Booth 8 years ago
parent
commit
1c3c8f61e3
  1. 7
      RELEASE-NOTES
  2. 0
      docs/ACKNOWLEDGEMENTS
  3. 0
      docs/AUTHORS
  4. 24
      docs/ENV-NOTES
  5. 4
      electrumx_rpc.py
  6. 36
      lib/coins.py
  7. 35
      lib/jsonrpc.py
  8. 9
      server/block_processor.py
  9. 5
      server/db.py
  10. 2
      server/env.py
  11. 19
      server/protocol.py
  12. 2
      server/version.py

7
docs/RELEASE-NOTES → RELEASE-NOTES

@ -1,3 +1,10 @@
version 0.7.10
--------------
- replaced MAX_HIST environment variable with MAX_SEND, see docs/ENV-NOTES.
Large requests are blocked and logged. The logs should help you determine
if the requests are genuine (perhaps requiring a higher MAX_SEND) or abuse.
version 0.7.9
-------------

0
ACKNOWLEDGEMENTS → docs/ACKNOWLEDGEMENTS

0
AUTHORS → docs/AUTHORS

24
docs/ENV-NOTES

@ -44,17 +44,21 @@ in ElectrumX are very cheap - they consume about 100 bytes of memory
each and are processed efficiently. I feel the defaults are low and
encourage you to raise them.
MAX_HIST - maximum number of historical transactions to serve for
a single address. The current Electrum protocol requires
address histories be served en-masse or not at all,
an obvious avenue for abuse. This limit is a
MAX_SEND - maximum size of a response message to send over the wire,
in bytes. Defaults to 350,000 and will treat smaller
values as the same because standard Electrum protocol
header chunk requests are nearly that large.
The Electrum protocol has a flaw in that address
histories must be served all at once or not at all,
an obvious avenue for abuse. MAX_SEND is a
stop-gap until the protocol is improved to admit
incremental history requests. The default value is
2,000 which should be ample for most legitimate
users. Increasing to around 10,000 is likely fine
but bear in mind one client can request multiple
addresses. I welcome your experiences and suggestions
for an appropriate value.
incremental history requests. Each history entry
is appoximately 100 bytes so the default is
equivalent to a history limit of around 3,500
entries, which should be ample for most legitimate
users. Increasing by a single-digit factor is
likely fine but bear in mind one client can request
history for multiple addresses.
MAX_SUBS - maximum number of address subscriptions across all
sessions. Defaults to 250,000.
MAX_SESSION_SUBS - maximum number of address subscriptions permitted to a

4
electrumx_rpc.py

@ -30,7 +30,7 @@ class RPCClient(JSONRPC):
message = await f
except asyncio.TimeoutError:
future.cancel()
print('request timed out')
print('request timed out after {}s'.format(timeout))
else:
await self.handle_message(message)
@ -82,7 +82,7 @@ def main():
coro = loop.create_connection(RPCClient, 'localhost', args.port)
try:
transport, protocol = loop.run_until_complete(coro)
coro = protocol.send_and_wait(args.command[0], args.param, timeout=5)
coro = protocol.send_and_wait(args.command[0], args.param, timeout=15)
loop.run_until_complete(coro)
except OSError:
print('error connecting - is ElectrumX catching up or not running?')

36
lib/coins.py

@ -34,14 +34,10 @@ class Coin(object):
REORG_LIMIT=200
# Not sure if these are coin-specific
HEADER_LEN = 80
DEFAULT_RPC_PORT = 8332
RPC_URL_REGEX = re.compile('.+@[^:]+(:[0-9]+)?')
VALUE_PER_COIN = 100000000
CHUNK_SIZE=2016
STRANGE_VERBYTE = 0xff
# IRC Defaults
IRC_PREFIX = "E_"
IRC_CHANNEL = "#electrum"
IRC_SERVER = "irc.freenode.net"
IRC_PORT = 6667
@ -65,7 +61,7 @@ class Coin(object):
if not match:
raise CoinError('invalid daemon URL: "{}"'.format(url))
if match.groups()[0] is None:
url += ':{:d}'.format(cls.DEFAULT_RPC_PORT)
url += ':{:d}'.format(cls.RPC_PORT)
if not url.startswith('http://'):
url = 'http://' + url
return url + '/'
@ -215,9 +211,14 @@ class Coin(object):
return Base58.encode_check(payload)
@classmethod
def header_hashes(cls, header):
'''Given a header return the previous and current block hashes.'''
return header[4:36], double_sha256(header)
def header_hash(cls, header):
'''Given a header return hash'''
return double_sha256(header)
@classmethod
def header_prevhash(cls, header):
'''Given a header return previous hash'''
return header[4:36]
@classmethod
def read_block(cls, block):
@ -264,18 +265,20 @@ class Bitcoin(Coin):
TX_COUNT = 142791895
TX_COUNT_HEIGHT = 420976
TX_PER_BLOCK = 1600
IRC_PREFIX = "E_"
IRC_CHANNEL = "#electrum"
RPC_PORT = 8332
class BitcoinTestnet(Coin):
NAME = "Bitcoin"
class BitcoinTestnet(Bitcoin):
SHORTNAME = "XTN"
REORG_LIMIT = 2000
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = 0x6f
P2SH_VERBYTE = 0xc4
WIF_BYTE = 0xef
REORG_LIMIT = 2000
# Source: pycoin and others
@ -363,18 +366,17 @@ class Dash(Coin):
TX_COUNT_HEIGHT = 569399
TX_COUNT = 2157510
TX_PER_BLOCK = 4
DEFAULT_RPC_PORT = 9998
RPC_PORT = 9998
IRC_PREFIX = "D_"
IRC_CHANNEL = "#electrum-dash"
@classmethod
def header_hashes(cls, header):
'''Given a header return the previous and current block hashes.'''
def header_hash(cls, header):
'''Given a header return the hash.'''
import x11_hash
return header[4:36], x11_hash.getPoWHash(header)
return x11_hash.getPoWHash(header)
class DashTestnet(Dash):
NAME = "Dash"
SHORTNAME = "tDASH"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("3a805837")
@ -387,5 +389,5 @@ class DashTestnet(Dash):
TX_COUNT_HEIGHT = 101619
TX_COUNT = 132681
TX_PER_BLOCK = 1
DEFAULT_RPC_PORT = 19998
RPC_PORT = 19998
IRC_PREFIX = "d_"

35
lib/jsonrpc.py

@ -72,6 +72,10 @@ class JSONRPC(asyncio.Protocol, LoggedClass):
self.msg = msg
self.code = code
class LargeRequestError(Exception):
'''Raised if a large request was prevented from being sent.'''
def __init__(self):
super().__init__()
self.start = time.time()
@ -87,6 +91,20 @@ class JSONRPC(asyncio.Protocol, LoggedClass):
self.error_count = 0
self.peer_info = None
self.messages = asyncio.Queue()
# Sends longer than max_send are prevented, instead returning
# an oversized request error to other end of the network
# connection. The request causing it is logged. Values under
# 1000 are treated as 1000.
self.max_send = 0
self.anon_logs = False
def peername(self, *, for_log=True):
'''Return the peer name of this connection.'''
if not self.peer_info:
return 'unknown'
if for_log and self.anon_logs:
return 'xx.xx.xx.xx:xx'
return '{}:{}'.format(self.peer_info[0], self.peer_info[1])
def connection_made(self, transport):
'''Handle an incoming client connection.'''
@ -175,9 +193,14 @@ class JSONRPC(asyncio.Protocol, LoggedClass):
self.logger.error(msg)
self.send_json_error(msg, self.INTERNAL_ERROR, payload.get('id'))
else:
self.send_count += 1
self.send_size += len(data)
self.transport.write(data)
if len(data) > max(1000, self.max_send):
self.send_json_error('request too large', self.INVALID_REQUEST,
payload.get('id'))
raise self.LargeRequestError
else:
self.send_count += 1
self.send_size += len(data)
self.transport.write(data)
async def handle_message(self, message):
'''Asynchronously handle a JSON request or response.
@ -190,7 +213,11 @@ class JSONRPC(asyncio.Protocol, LoggedClass):
payload = await self.single_payload(message)
if payload:
self.send_json(payload)
try:
self.send_json(payload)
except self.LargeRequestError:
self.logger.warning('blocked large request from {}: {}'
.format(self.peername(), message))
async def batch_payload(self, batch):
'''Return the JSON payload corresponding to a batch JSON request.'''

9
server/block_processor.py

@ -568,13 +568,12 @@ class BlockProcessor(server.db.DB):
# the UTXO cache uses the FS cache via get_tx_hash() to
# resolve compressed key collisions
header, tx_hashes, txs = self.coin.read_block(block)
prev_hash, header_hash = self.coin.header_hashes(header)
if prev_hash != self.tip:
if self.tip != self.coin.header_prevhash(header):
raise ChainReorg
touched = set()
self.fs_advance_block(header, tx_hashes, txs)
self.tip = header_hash
self.tip = self.coin.header_hash(header)
self.height += 1
undo_info = self.advance_txs(tx_hashes, txs, touched)
if self.daemon.cached_height() - self.height <= self.reorg_limit:
@ -636,14 +635,14 @@ class BlockProcessor(server.db.DB):
touched = set()
for block in blocks:
header, tx_hashes, txs = self.coin.read_block(block)
prev_hash, header_hash = self.coin.header_hashes(header)
header_hash = self.coin.header_hash(header)
if header_hash != self.tip:
raise ChainError('backup block {} is not tip {} at height {:,d}'
.format(hash_to_str(header_hash),
hash_to_str(self.tip), self.height))
self.backup_txs(tx_hashes, txs, touched)
self.tip = prev_hash
self.tip = self.coin.header_prevhash(header)
assert self.height >= 0
self.height -= 1
self.tx_counts.pop()

5
server/db.py

@ -16,7 +16,7 @@ from bisect import bisect_right
from collections import namedtuple
from lib.util import chunks, formatted_time, LoggedClass
from lib.hash import double_sha256, hash_to_str
from lib.hash import hash_to_str
from server.storage import open_db
from server.version import VERSION
@ -175,7 +175,8 @@ class DB(LoggedClass):
headers = self.fs_read_headers(height, count)
# FIXME: move to coins.py
hlen = self.coin.HEADER_LEN
return [double_sha256(header) for header in chunks(headers, hlen)]
return [self.coin.header_hash(header)
for header in chunks(headers, hlen)]
@staticmethod
def _resolve_limit(limit):

2
server/env.py

@ -45,7 +45,7 @@ class Env(LoggedClass):
self.donation_address = self.default('DONATION_ADDRESS', '')
self.db_engine = self.default('DB_ENGINE', 'leveldb')
# Server limits to help prevent DoS
self.max_hist = self.integer('MAX_HIST', 2000)
self.max_send = self.integer('MAX_SEND', 250000)
self.max_subs = self.integer('MAX_SUBS', 250000)
self.max_session_subs = self.integer('MAX_SESSION_SUBS', 50000)
# IRC

19
server/protocol.py

@ -227,6 +227,8 @@ class ServerManager(util.LoggedClass):
self.max_subs = env.max_subs
self.subscription_count = 0
self.futures = []
env.max_send = max(350000, env.max_send)
self.logger.info('max response size {:,d} bytes'.format(env.max_send))
self.logger.info('max subscriptions across all sessions: {:,d}'
.format(self.max_subs))
self.logger.info('max subscriptions per session: {:,d}'
@ -421,6 +423,8 @@ class Session(JSONRPC):
self.coin = bp.coin
self.kind = kind
self.client = 'unknown'
self.anon_logs = env.anon_logs
self.max_send = env.max_send
def connection_made(self, transport):
'''Handle an incoming client connection.'''
@ -463,14 +467,6 @@ class Session(JSONRPC):
self.logger.error('error handling request {}'.format(message))
traceback.print_exc()
def peername(self, *, for_log=True):
if not self.peer_info:
return 'unknown'
# Anonymize IP addresses that will be logged
if for_log and self.env.anon_logs:
return 'xx.xx.xx.xx:xx'
return '{}:{}'.format(self.peer_info[0], self.peer_info[1])
def sub_count(self):
return 0
@ -674,8 +670,11 @@ class ElectrumX(Session):
return self.bp.read_headers(start_height, count).hex()
async def async_get_history(self, hash168):
# Apply DoS limit
limit = self.env.max_hist
# History DoS limit. Each element of history is about 99
# bytes when encoded as JSON. This limits resource usage on
# bloated history requests, and uses a smaller divisor so
# large requests are logged before refusing them.
limit = self.max_send // 97
# Python 3.6: use async generators; update callers
history = []
for item in self.bp.get_history(hash168, limit=limit):

2
server/version.py

@ -1 +1 @@
VERSION = "ElectrumX 0.7.9"
VERSION = "ElectrumX 0.7.10"

Loading…
Cancel
Save