Browse Source

Improve logging

master
Neil Booth 8 years ago
parent
commit
80fe427c89
  1. 47
      server/block_processor.py

47
server/block_processor.py

@ -551,7 +551,6 @@ class BlockProcessor(server.db.DB):
# as it reads the wall time.
flush_history(batch)
if flush_utxos:
self.fs_flush()
self.flush_utxos(batch)
self.flush_state(batch)
self.logger.info('committing transaction...')
@ -604,7 +603,6 @@ class BlockProcessor(server.db.DB):
def fs_flush(self):
'''Flush the things stored on the filesystem.'''
flush_start = time.time()
blocks_done = len(self.headers)
prior_tx_count = (self.tx_counts[self.db_height]
if self.db_height >= 0 else 0)
@ -649,8 +647,6 @@ class BlockProcessor(server.db.DB):
self.tx_hashes = []
self.headers = []
self.logger.info('FS flush took {:.1f} seconds'
.format(time.time() - flush_start))
def backup_history(self, batch, hash168s):
self.logger.info('backing up history to height {:,d} tx_count {:,d}'
@ -694,19 +690,16 @@ class BlockProcessor(server.db.DB):
utxo_cache_size = len(self.utxo_cache) * 187
db_cache_size = len(self.db_cache) * 105
hist_cache_size = len(self.history) * 180 + self.history_size * 4
utxo_MB = (db_cache_size + utxo_cache_size) // one_MB
tx_hash_size = (self.tx_count - self.db_tx_count) * 74
utxo_MB = (db_deletes_size + utxo_cache_size + tx_hash_size) // one_MB
hist_MB = hist_cache_size // one_MB
self.logger.info('cache stats at height {:,d} daemon height: {:,d}'
self.logger.info('UTXOs: {:,d} deletes: {:,d} '
'UTXOs {:,d}MB hist {:,d}MB'
.format(len(self.utxo_cache), len(self.db_deletes),
utxo_MB, hist_MB))
self.logger.info('our height: {:,d} daemon height: {:,d}'
.format(self.height, self.daemon.cached_height()))
self.logger.info(' entries: UTXO: {:,d} DB: {:,d} '
'hist addrs: {:,d} hist size {:,d}'
.format(len(self.utxo_cache),
len(self.db_cache),
len(self.history),
self.history_size))
self.logger.info(' size: {:,d}MB (UTXOs {:,d}MB hist {:,d}MB)'
.format(utxo_MB + hist_MB, utxo_MB, hist_MB))
return utxo_MB, hist_MB
def undo_key(self, height):
@ -989,9 +982,21 @@ class BlockProcessor(server.db.DB):
# Care is needed because the writes generated by flushing the
# UTXO state may have keys in common with our write cache or
# may be in the DB already.
self.logger.info('flushing UTXOs: {:,d} txs and {:,d} blocks'
.format(self.tx_count - self.db_tx_count,
self.height - self.db_height))
self.logger.info('flushing {:,d} blocks with {:,d} txs'
.format(self.height - self.db_height,
self.tx_count - self.db_tx_count))
self.logger.info('UTXO cache adds: {:,d} spends: {:,d} '
'DB spends: {:,d}'
.format(len(self.utxo_cache) + self.utxo_cache_spends,
self.utxo_cache_spends,
len(self.db_deletes) // 2))
fs_flush_start = time.time()
self.fs_flush()
fs_flush_end = time.time()
self.logger.info('FS flush took {:.1f} seconds'
.format(fs_flush_end - fs_flush_start))
collisions = 0
new_utxos = len(self.utxo_cache)
@ -1022,11 +1027,6 @@ class BlockProcessor(server.db.DB):
adds = new_utxos + self.utxo_cache_spends
self.logger.info('UTXO cache adds: {:,d} spends: {:,d} '
.format(adds, self.utxo_cache_spends))
self.logger.info('DB adds: {:,d} spends: {:,d}, collisions: {:,d}'
.format(new_utxos, self.db_deletes, collisions))
self.db_cache = {}
self.utxo_cache_spends = self.db_deletes = 0
self.utxo_flush_count = self.flush_count
@ -1034,6 +1034,9 @@ class BlockProcessor(server.db.DB):
self.db_height = self.height
self.db_tip = self.tip
self.logger.info('UTXO flush took {:.1f} seconds'
.format(time.time() - fs_flush_end))
def read_headers(self, start, count):
# Read some from disk
disk_count = min(count, self.db_height + 1 - start)

Loading…
Cancel
Save