Browse Source

Log large requests and reject them

master
Neil Booth 8 years ago
parent
commit
292073f2c7
  1. 23
      docs/ENV-NOTES
  2. 35
      lib/jsonrpc.py
  3. 2
      server/env.py
  4. 19
      server/protocol.py

23
docs/ENV-NOTES

@ -44,17 +44,18 @@ in ElectrumX are very cheap - they consume about 100 bytes of memory
each and are processed efficiently. I feel the defaults are low and each and are processed efficiently. I feel the defaults are low and
encourage you to raise them. encourage you to raise them.
MAX_HIST - maximum number of historical transactions to serve for MAX_SEND - maximum size of a response message to send over the wire,
a single address. The current Electrum protocol requires in bytes. Defaults to 250,000. The current Electrum
address histories be served en-masse or not at all, protocol has a flaw in that address histories must be
an obvious avenue for abuse. This limit is a served all at once or not at all, an obvious avenue for
stop-gap until the protocol is improved to admit abuse. This limit is a stop-gap until the protocol is
incremental history requests. The default value is improved to admit incremental history requests.
2,000 which should be ample for most legitimate Each history entry is appoximately 100 bytes so the
users. Increasing to around 10,000 is likely fine default is equivalent to a history limit of around 2,500
but bear in mind one client can request multiple entries, which should be ample for most legitimate
addresses. I welcome your experiences and suggestions users. Increasing by a single-digit factor is likely fine
for an appropriate value. but bear in mind one client can request history for
multiple addresses.
MAX_SUBS - maximum number of address subscriptions across all MAX_SUBS - maximum number of address subscriptions across all
sessions. Defaults to 250,000. sessions. Defaults to 250,000.
MAX_SESSION_SUBS - maximum number of address subscriptions permitted to a MAX_SESSION_SUBS - maximum number of address subscriptions permitted to a

35
lib/jsonrpc.py

@ -72,6 +72,10 @@ class JSONRPC(asyncio.Protocol, LoggedClass):
self.msg = msg self.msg = msg
self.code = code self.code = code
class LargeRequestError(Exception):
'''Raised if a large request was prevented from being sent.'''
def __init__(self): def __init__(self):
super().__init__() super().__init__()
self.start = time.time() self.start = time.time()
@ -87,6 +91,20 @@ class JSONRPC(asyncio.Protocol, LoggedClass):
self.error_count = 0 self.error_count = 0
self.peer_info = None self.peer_info = None
self.messages = asyncio.Queue() self.messages = asyncio.Queue()
# Sends longer than max_send are prevented, instead returning
# an oversized request error to other end of the network
# connection. The request causing it is logged. Values under
# 1000 are treated as 1000.
self.max_send = 0
self.anon_logs = False
def peername(self, *, for_log=True):
'''Return the peer name of this connection.'''
if not self.peer_info:
return 'unknown'
if for_log and self.anon_logs:
return 'xx.xx.xx.xx:xx'
return '{}:{}'.format(self.peer_info[0], self.peer_info[1])
def connection_made(self, transport): def connection_made(self, transport):
'''Handle an incoming client connection.''' '''Handle an incoming client connection.'''
@ -175,9 +193,14 @@ class JSONRPC(asyncio.Protocol, LoggedClass):
self.logger.error(msg) self.logger.error(msg)
self.send_json_error(msg, self.INTERNAL_ERROR, payload.get('id')) self.send_json_error(msg, self.INTERNAL_ERROR, payload.get('id'))
else: else:
self.send_count += 1 if len(data) > max(1000, self.max_send):
self.send_size += len(data) self.send_json_error('request too large', self.INVALID_REQUEST,
self.transport.write(data) payload.get('id'))
raise self.LargeRequestError
else:
self.send_count += 1
self.send_size += len(data)
self.transport.write(data)
async def handle_message(self, message): async def handle_message(self, message):
'''Asynchronously handle a JSON request or response. '''Asynchronously handle a JSON request or response.
@ -190,7 +213,11 @@ class JSONRPC(asyncio.Protocol, LoggedClass):
payload = await self.single_payload(message) payload = await self.single_payload(message)
if payload: if payload:
self.send_json(payload) try:
self.send_json(payload)
except self.LargeRequestError:
self.logger.warning('blocked large request from {}: {}'
.format(self.peername(), message))
async def batch_payload(self, batch): async def batch_payload(self, batch):
'''Return the JSON payload corresponding to a batch JSON request.''' '''Return the JSON payload corresponding to a batch JSON request.'''

2
server/env.py

@ -45,7 +45,7 @@ class Env(LoggedClass):
self.donation_address = self.default('DONATION_ADDRESS', '') self.donation_address = self.default('DONATION_ADDRESS', '')
self.db_engine = self.default('DB_ENGINE', 'leveldb') self.db_engine = self.default('DB_ENGINE', 'leveldb')
# Server limits to help prevent DoS # Server limits to help prevent DoS
self.max_hist = self.integer('MAX_HIST', 2000) self.max_send = self.integer('MAX_SEND', 250000)
self.max_subs = self.integer('MAX_SUBS', 250000) self.max_subs = self.integer('MAX_SUBS', 250000)
self.max_session_subs = self.integer('MAX_SESSION_SUBS', 50000) self.max_session_subs = self.integer('MAX_SESSION_SUBS', 50000)
# IRC # IRC

19
server/protocol.py

@ -227,6 +227,8 @@ class ServerManager(util.LoggedClass):
self.max_subs = env.max_subs self.max_subs = env.max_subs
self.subscription_count = 0 self.subscription_count = 0
self.futures = [] self.futures = []
env.max_send = max(1000, env.max_send)
self.logger.info('max response size {:,d} bytes'.format(env.max_send))
self.logger.info('max subscriptions across all sessions: {:,d}' self.logger.info('max subscriptions across all sessions: {:,d}'
.format(self.max_subs)) .format(self.max_subs))
self.logger.info('max subscriptions per session: {:,d}' self.logger.info('max subscriptions per session: {:,d}'
@ -421,6 +423,8 @@ class Session(JSONRPC):
self.coin = bp.coin self.coin = bp.coin
self.kind = kind self.kind = kind
self.client = 'unknown' self.client = 'unknown'
self.anon_logs = env.anon_logs
self.max_send = env.max_send
def connection_made(self, transport): def connection_made(self, transport):
'''Handle an incoming client connection.''' '''Handle an incoming client connection.'''
@ -463,14 +467,6 @@ class Session(JSONRPC):
self.logger.error('error handling request {}'.format(message)) self.logger.error('error handling request {}'.format(message))
traceback.print_exc() traceback.print_exc()
def peername(self, *, for_log=True):
if not self.peer_info:
return 'unknown'
# Anonymize IP addresses that will be logged
if for_log and self.env.anon_logs:
return 'xx.xx.xx.xx:xx'
return '{}:{}'.format(self.peer_info[0], self.peer_info[1])
def sub_count(self): def sub_count(self):
return 0 return 0
@ -674,8 +670,11 @@ class ElectrumX(Session):
return self.bp.read_headers(start_height, count).hex() return self.bp.read_headers(start_height, count).hex()
async def async_get_history(self, hash168): async def async_get_history(self, hash168):
# Apply DoS limit # History DoS limit. Each element of history is about 99
limit = self.env.max_hist # bytes when encoded as JSON. This limits resource usage on
# bloated history requests, and uses a smaller divisor so
# large requests are logged before refusing them.
limit = self.max_send // 97
# Python 3.6: use async generators; update callers # Python 3.6: use async generators; update callers
history = [] history = []
for item in self.bp.get_history(hash168, limit=limit): for item in self.bp.get_history(hash168, limit=limit):

Loading…
Cancel
Save