Neil Booth
8 years ago
13 changed files with 506 additions and 554 deletions
@ -1,107 +0,0 @@ |
|||
# Copyright (c) 2016, Neil Booth |
|||
# |
|||
# All rights reserved. |
|||
# |
|||
# See the file "LICENCE" for information about the copyright |
|||
# and warranty status of this software. |
|||
|
|||
'''Server controller. |
|||
|
|||
Coordinates the parts of the server. Serves as a cache for |
|||
client-serving data such as histories. |
|||
''' |
|||
|
|||
import asyncio |
|||
import signal |
|||
import ssl |
|||
from functools import partial |
|||
|
|||
from server.daemon import Daemon |
|||
from server.block_processor import BlockProcessor |
|||
from server.protocol import ElectrumX, LocalRPC, JSONRPC |
|||
from lib.util import LoggedClass |
|||
|
|||
|
|||
class Controller(LoggedClass): |
|||
|
|||
def __init__(self, loop, env): |
|||
'''Create up the controller. |
|||
|
|||
Creates DB, Daemon and BlockProcessor instances. |
|||
''' |
|||
super().__init__() |
|||
self.loop = loop |
|||
self.env = env |
|||
self.coin = env.coin |
|||
self.daemon = Daemon(env.daemon_url, env.debug) |
|||
self.block_processor = BlockProcessor(env, self.daemon, |
|||
on_update=self.on_update) |
|||
JSONRPC.init(self.block_processor, self.daemon, self.coin) |
|||
self.servers = [] |
|||
|
|||
def start(self): |
|||
'''Prime the event loop with asynchronous jobs.''' |
|||
coros = self.block_processor.coros() |
|||
|
|||
for coro in coros: |
|||
asyncio.ensure_future(coro) |
|||
|
|||
# Signal handlers |
|||
for signame in ('SIGINT', 'SIGTERM'): |
|||
self.loop.add_signal_handler(getattr(signal, signame), |
|||
partial(self.on_signal, signame)) |
|||
|
|||
async def on_update(self, height, touched): |
|||
if not self.servers: |
|||
self.servers = await self.start_servers() |
|||
ElectrumX.notify(height, touched) |
|||
|
|||
async def start_servers(self): |
|||
'''Start listening on RPC, TCP and SSL ports. |
|||
|
|||
Does not start a server if the port wasn't specified. Does |
|||
nothing if servers are already running. |
|||
''' |
|||
servers = [] |
|||
env = self.env |
|||
loop = self.loop |
|||
|
|||
protocol = LocalRPC |
|||
if env.rpc_port is not None: |
|||
host = 'localhost' |
|||
rpc_server = loop.create_server(protocol, host, env.rpc_port) |
|||
servers.append(await rpc_server) |
|||
self.logger.info('RPC server listening on {}:{:d}' |
|||
.format(host, env.rpc_port)) |
|||
|
|||
protocol = partial(ElectrumX, env) |
|||
if env.tcp_port is not None: |
|||
tcp_server = loop.create_server(protocol, env.host, env.tcp_port) |
|||
servers.append(await tcp_server) |
|||
self.logger.info('TCP server listening on {}:{:d}' |
|||
.format(env.host, env.tcp_port)) |
|||
|
|||
if env.ssl_port is not None: |
|||
# FIXME: update if we want to require Python >= 3.5.3 |
|||
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) |
|||
ssl_context.load_cert_chain(env.ssl_certfile, |
|||
keyfile=env.ssl_keyfile) |
|||
ssl_server = loop.create_server(protocol, env.host, env.ssl_port, |
|||
ssl=ssl_context) |
|||
servers.append(await ssl_server) |
|||
self.logger.info('SSL server listening on {}:{:d}' |
|||
.format(env.host, env.ssl_port)) |
|||
|
|||
return servers |
|||
|
|||
def stop(self): |
|||
'''Close the listening servers.''' |
|||
for server in self.servers: |
|||
server.close() |
|||
|
|||
def on_signal(self, signame): |
|||
'''Call on receipt of a signal to cleanly shutdown.''' |
|||
self.logger.warning('received {} signal, preparing to shut down' |
|||
.format(signame)) |
|||
for task in asyncio.Task.all_tasks(self.loop): |
|||
task.cancel() |
@ -0,0 +1,220 @@ |
|||
# Copyright (c) 2016, Neil Booth |
|||
# |
|||
# All rights reserved. |
|||
# |
|||
# See the file "LICENCE" for information about the copyright |
|||
# and warranty status of this software. |
|||
|
|||
'''Interface to the blockchain database.''' |
|||
|
|||
|
|||
import array |
|||
import ast |
|||
import os |
|||
import struct |
|||
from bisect import bisect_right |
|||
from collections import namedtuple |
|||
|
|||
from lib.util import chunks, LoggedClass |
|||
from lib.hash import double_sha256 |
|||
from server.storage import open_db |
|||
|
|||
UTXO = namedtuple("UTXO", "tx_num tx_pos tx_hash height value") |
|||
|
|||
class DB(LoggedClass): |
|||
'''Simple wrapper of the backend database for querying. |
|||
|
|||
Performs no DB update, though the DB will be cleaned on opening if |
|||
it was shutdown uncleanly. |
|||
''' |
|||
|
|||
class DBError(Exception): |
|||
pass |
|||
|
|||
def __init__(self, env): |
|||
super().__init__() |
|||
self.env = env |
|||
self.coin = env.coin |
|||
|
|||
self.logger.info('switching current directory to {}' |
|||
.format(env.db_dir)) |
|||
os.chdir(env.db_dir) |
|||
|
|||
# Open DB and metadata files. Record some of its state. |
|||
db_name = '{}-{}'.format(self.coin.NAME, self.coin.NET) |
|||
self.db = open_db(db_name, env.db_engine) |
|||
if self.db.is_new: |
|||
self.logger.info('created new {} database {}' |
|||
.format(env.db_engine, db_name)) |
|||
else: |
|||
self.logger.info('successfully opened {} database {}' |
|||
.format(env.db_engine, db_name)) |
|||
self.init_state_from_db() |
|||
|
|||
create = self.db_height == -1 |
|||
self.headers_file = self.open_file('headers', create) |
|||
self.txcount_file = self.open_file('txcount', create) |
|||
self.tx_hash_file_size = 16 * 1024 * 1024 |
|||
|
|||
# tx_counts[N] has the cumulative number of txs at the end of |
|||
# height N. So tx_counts[0] is 1 - the genesis coinbase |
|||
self.tx_counts = array.array('I') |
|||
self.txcount_file.seek(0) |
|||
self.tx_counts.fromfile(self.txcount_file, self.db_height + 1) |
|||
if self.tx_counts: |
|||
assert self.db_tx_count == self.tx_counts[-1] |
|||
else: |
|||
assert self.db_tx_count == 0 |
|||
|
|||
def init_state_from_db(self): |
|||
if self.db.is_new: |
|||
self.db_height = -1 |
|||
self.db_tx_count = 0 |
|||
self.db_tip = b'\0' * 32 |
|||
self.flush_count = 0 |
|||
self.utxo_flush_count = 0 |
|||
self.wall_time = 0 |
|||
self.first_sync = True |
|||
else: |
|||
state = self.db.get(b'state') |
|||
state = ast.literal_eval(state.decode()) |
|||
if state['genesis'] != self.coin.GENESIS_HASH: |
|||
raise self.DBError('DB genesis hash {} does not match coin {}' |
|||
.format(state['genesis_hash'], |
|||
self.coin.GENESIS_HASH)) |
|||
self.db_height = state['height'] |
|||
self.db_tx_count = state['tx_count'] |
|||
self.db_tip = state['tip'] |
|||
self.flush_count = state['flush_count'] |
|||
self.utxo_flush_count = state['utxo_flush_count'] |
|||
self.wall_time = state['wall_time'] |
|||
self.first_sync = state.get('first_sync', True) |
|||
|
|||
def open_file(self, filename, create=False): |
|||
'''Open the file name. Return its handle.''' |
|||
try: |
|||
return open(filename, 'rb+') |
|||
except FileNotFoundError: |
|||
if create: |
|||
return open(filename, 'wb+') |
|||
raise |
|||
|
|||
def fs_read_headers(self, start, count): |
|||
# Read some from disk |
|||
disk_count = min(count, self.db_height + 1 - start) |
|||
if start < 0 or count < 0 or disk_count != count: |
|||
raise self.DBError('{:,d} headers starting at {:,d} not on disk' |
|||
.format(count, start)) |
|||
if disk_count: |
|||
header_len = self.coin.HEADER_LEN |
|||
self.headers_file.seek(start * header_len) |
|||
return self.headers_file.read(disk_count * header_len) |
|||
return b'' |
|||
|
|||
def fs_tx_hash(self, tx_num): |
|||
'''Return a par (tx_hash, tx_height) for the given tx number. |
|||
|
|||
If the tx_height is not on disk, returns (None, tx_height).''' |
|||
tx_height = bisect_right(self.tx_counts, tx_num) |
|||
|
|||
if tx_height > self.db_height: |
|||
return None, tx_height |
|||
raise self.DBError('tx_num {:,d} is not on disk') |
|||
|
|||
file_pos = tx_num * 32 |
|||
file_num, offset = divmod(file_pos, self.tx_hash_file_size) |
|||
filename = 'hashes{:04d}'.format(file_num) |
|||
with self.open_file(filename) as f: |
|||
f.seek(offset) |
|||
return f.read(32), tx_height |
|||
|
|||
def fs_block_hashes(self, height, count): |
|||
headers = self.fs_read_headers(height, count) |
|||
# FIXME: move to coins.py |
|||
hlen = self.coin.HEADER_LEN |
|||
return [double_sha256(header) for header in chunks(headers, hlen)] |
|||
|
|||
@staticmethod |
|||
def _resolve_limit(limit): |
|||
if limit is None: |
|||
return -1 |
|||
assert isinstance(limit, int) and limit >= 0 |
|||
return limit |
|||
|
|||
def get_history(self, hash168, limit=1000): |
|||
'''Generator that returns an unpruned, sorted list of (tx_hash, |
|||
height) tuples of confirmed transactions that touched the address, |
|||
earliest in the blockchain first. Includes both spending and |
|||
receiving transactions. By default yields at most 1000 entries. |
|||
Set limit to None to get them all. |
|||
''' |
|||
limit = self._resolve_limit(limit) |
|||
prefix = b'H' + hash168 |
|||
for key, hist in self.db.iterator(prefix=prefix): |
|||
a = array.array('I') |
|||
a.frombytes(hist) |
|||
for tx_num in a: |
|||
if limit == 0: |
|||
return |
|||
yield self.fs_tx_hash(tx_num) |
|||
limit -= 1 |
|||
|
|||
def get_balance(self, hash168): |
|||
'''Returns the confirmed balance of an address.''' |
|||
return sum(utxo.value for utxo in self.get_utxos(hash168, limit=None)) |
|||
|
|||
def get_utxos(self, hash168, limit=1000): |
|||
'''Generator that yields all UTXOs for an address sorted in no |
|||
particular order. By default yields at most 1000 entries. |
|||
Set limit to None to get them all. |
|||
''' |
|||
limit = self._resolve_limit(limit) |
|||
unpack = struct.unpack |
|||
prefix = b'u' + hash168 |
|||
for k, v in self.db.iterator(prefix=prefix): |
|||
(tx_pos,) = unpack('<H', k[-2:]) |
|||
|
|||
for n in range(0, len(v), 12): |
|||
if limit == 0: |
|||
return |
|||
(tx_num,) = unpack('<I', v[n:n + 4]) |
|||
(value,) = unpack('<Q', v[n + 4:n + 12]) |
|||
tx_hash, height = self.fs_tx_hash(tx_num) |
|||
yield UTXO(tx_num, tx_pos, tx_hash, height, value) |
|||
limit -= 1 |
|||
|
|||
def get_utxos_sorted(self, hash168): |
|||
'''Returns all the UTXOs for an address sorted by height and |
|||
position in the block.''' |
|||
return sorted(self.get_utxos(hash168, limit=None)) |
|||
|
|||
def get_utxo_hash168(self, tx_hash, index): |
|||
'''Returns the hash168 for a UTXO.''' |
|||
hash168 = None |
|||
if 0 <= index <= 65535: |
|||
idx_packed = struct.pack('<H', index) |
|||
hash168 = self.hash168(tx_hash, idx_packed) |
|||
return hash168 |
|||
|
|||
def hash168(self, tx_hash, idx_packed): |
|||
'''Return the hash168 paid to by the given TXO. |
|||
|
|||
Return None if not found.''' |
|||
key = b'h' + tx_hash[:ADDR_TX_HASH_LEN] + idx_packed |
|||
data = self.db.get(key) |
|||
if data is None: |
|||
return None |
|||
|
|||
if len(data) == 25: |
|||
return data[:21] |
|||
|
|||
assert len(data) % 25 == 0 |
|||
|
|||
# Resolve the compressed key collision using the TX number |
|||
for n in range(0, len(data), 25): |
|||
tx_num, = struct.unpack('<I', data[n+21:n+25]) |
|||
my_hash, height = self.fs_tx_hash(tx_num) |
|||
if my_hash == tx_hash: |
|||
return data[n:n+21] |
|||
|
|||
raise self.DBError('could not resolve hash168 collision') |
@ -1 +1 @@ |
|||
VERSION = "ElectrumX 0.2.1" |
|||
VERSION = "ElectrumX 0.2.2" |
|||
|
Loading…
Reference in new issue