diff --git a/AUTHORS b/AUTHORS index 10c4bb7..722c701 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1 +1,2 @@ -Neil Booth: creator and maintainer \ No newline at end of file +Neil Booth: creator and maintainer +Johann Bauer: backend DB abstraction \ No newline at end of file diff --git a/HOWTO.rst b/HOWTO.rst index 2f7abd1..af5be2e 100644 --- a/HOWTO.rst +++ b/HOWTO.rst @@ -29,6 +29,19 @@ metadata comes to just over 17GB. Leveldb needs a bit more for brief periods, and the block chain is only getting longer, so I would recommend having at least 30-40GB free space. +Database Engine +=============== + +You can choose between either RocksDB, LevelDB or LMDB to store transaction +information on disk. Currently, the fastest seems to be RocksDB with LevelDB +being about 10% slower. LMDB seems to be the slowest but maybe that's because +of bad implementation or configuration. + +You will need to install either: + ++ `plyvel `_ for LevelDB ++ `pyrocksdb `_ for RocksDB ++ `lmdb `_ for LMDB Running ======= diff --git a/samples/scripts/NOTES b/samples/scripts/NOTES index d38a327..54c9fca 100644 --- a/samples/scripts/NOTES +++ b/samples/scripts/NOTES @@ -31,18 +31,21 @@ bit bigger than the combine cache size, because of Python overhead and also because leveldb can consume quite a lot of memory during UTXO flushing. So these are rough numbers only: -HIST_MB - amount of history cache, in MB, to retain before flushing to - disk. Default is 250; probably no benefit being much larger - as history is append-only and not searched. - -UTXO_MB - amount of UTXO and history cache, in MB, to retain before - flushing to disk. Default is 1000. This may be too large - for small boxes or too small for machines with lots of RAM. - Larger caches generally perform better as there is - significant searching of the UTXO cache during indexing. - However, I don't see much benefit in my tests pushing this - too high, and in fact performance begins to fall. My - machine has 24GB RAM; the slow down is probably because of - leveldb caching and Python GC effects. However this may be - very dependent on hardware and you may have different - results. \ No newline at end of file +HIST_MB - amount of history cache, in MB, to retain before flushing to + disk. Default is 250; probably no benefit being much larger + as history is append-only and not searched. + +UTXO_MB - amount of UTXO and history cache, in MB, to retain before + flushing to disk. Default is 1000. This may be too large + for small boxes or too small for machines with lots of RAM. + Larger caches generally perform better as there is + significant searching of the UTXO cache during indexing. + However, I don't see much benefit in my tests pushing this + too high, and in fact performance begins to fall. My + machine has 24GB RAM; the slow down is probably because of + leveldb caching and Python GC effects. However this may be + very dependent on hardware and you may have different + results. + +DB_ENGINE - database engine for the transaction database, either rocksdb, + leveldb or lmdb \ No newline at end of file diff --git a/server/block_processor.py b/server/block_processor.py index 6be4e7d..60cf37d 100644 --- a/server/block_processor.py +++ b/server/block_processor.py @@ -17,13 +17,12 @@ from bisect import bisect_left from collections import defaultdict, namedtuple from functools import partial -import plyvel - from server.cache import FSCache, UTXOCache, NO_CACHE_ENTRY from server.daemon import DaemonError from lib.hash import hash_to_str from lib.script import ScriptPubKey from lib.util import chunks, LoggedClass +from server.storage import LMDB, RocksDB, LevelDB, NoDatabaseException def formatted_time(t): @@ -150,7 +149,7 @@ class BlockProcessor(LoggedClass): self.first_sync = True # Open DB and metadata files. Record some of its state. - self.db = self.open_db(self.coin) + self.db = self.open_db(self.coin, env.db_engine) self.tx_count = self.db_tx_count self.height = self.db_height self.tip = self.db_tip @@ -227,7 +226,7 @@ class BlockProcessor(LoggedClass): await self.handle_chain_reorg() self.have_caught_up = False break - await asyncio.sleep(0) # Yield + await asyncio.sleep(0) # Yield if self.height == self.daemon.cached_height(): await self.caught_up() @@ -257,6 +256,7 @@ class BlockProcessor(LoggedClass): '''Return the list of hashes to back up beacuse of a reorg. The hashes are returned in order of increasing height.''' + def match_pos(hashes1, hashes2): for n, (hash1, hash2) in enumerate(zip(hashes1, hashes2)): if hash1 == hash2: @@ -286,17 +286,22 @@ class BlockProcessor(LoggedClass): return self.fs_cache.block_hashes(start, count) - def open_db(self, coin): + def open_db(self, coin, db_engine): db_name = '{}-{}'.format(coin.NAME, coin.NET) + db_engine_class = { + "leveldb": LevelDB, + "rocksdb": RocksDB, + "lmdb": LMDB + }[db_engine.lower()] try: - db = plyvel.DB(db_name, create_if_missing=False, - error_if_exists=False, compression=None) - except: - db = plyvel.DB(db_name, create_if_missing=True, - error_if_exists=True, compression=None) - self.logger.info('created new database {}'.format(db_name)) + db = db_engine_class(db_name, create_if_missing=False, + error_if_exists=False, compression=None) + except NoDatabaseException: + db = db_engine_class(db_name, create_if_missing=True, + error_if_exists=True, compression=None) + self.logger.info('created new {} database {}'.format(db_engine, db_name)) else: - self.logger.info('successfully opened database {}'.format(db_name)) + self.logger.info('successfully opened {} database {}'.format(db_engine, db_name)) self.read_state(db) return db @@ -325,7 +330,7 @@ class BlockProcessor(LoggedClass): ''' if self.flush_count < self.utxo_flush_count: raise ChainError('DB corrupt: flush_count < utxo_flush_count') - with self.db.write_batch(transaction=True) as batch: + with self.db.write_batch() as batch: if self.flush_count > self.utxo_flush_count: self.logger.info('DB shut down uncleanly. Scanning for ' 'excess history flushes...') @@ -423,7 +428,7 @@ class BlockProcessor(LoggedClass): if self.height > self.db_height: self.fs_cache.flush(self.height, self.tx_count) - with self.db.write_batch(transaction=True) as batch: + with self.db.write_batch() as batch: # History first - fast and frees memory. Flush state last # as it reads the wall time. if self.height > self.db_height: @@ -669,7 +674,7 @@ class BlockProcessor(LoggedClass): if not tx.is_coinbase: for txin in reversed(tx.inputs): n -= 33 - undo_item = undo_info[n:n+33] + undo_item = undo_info[n:n + 33] put_utxo(txin.prev_hash + pack('