From 3f8b26291abd433d12dbbd83a82ac300620aea78 Mon Sep 17 00:00:00 2001 From: Neil Booth Date: Fri, 18 Nov 2016 08:01:06 +0900 Subject: [PATCH 1/2] At startup only log some things during initial sync --- server/block_processor.py | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/server/block_processor.py b/server/block_processor.py index 3761609..314a3ca 100644 --- a/server/block_processor.py +++ b/server/block_processor.py @@ -357,20 +357,19 @@ class BlockProcessor(server.db.DB): self.db_deletes = [] # Log state - self.logger.info('{}/{} height: {:,d} tx count: {:,d} ' - 'flush count: {:,d} utxo flush count: {:,d} ' - 'sync time: {}' - .format(self.coin.NAME, self.coin.NET, self.height, - self.tx_count, self.flush_count, - self.utxo_flush_count, - formatted_time(self.wall_time))) - self.logger.info('reorg limit of {:,d} blocks' + self.logger.info('coin: {}'.format(self.coin.NAME)) + self.logger.info('network: {}'.format(self.coin.NET)) + self.logger.info('height: {:,d}'.format(self.db_height)) + self.logger.info('tx count: {:,d}'.format(self.db_tx_count)) + self.logger.info('reorg limit is {:,d} blocks' .format(self.reorg_limit)) - self.logger.info('flushing UTXO cache at {:,d} MB' - .format(self.utxo_MB)) - self.logger.info('flushing history cache at {:,d} MB' - .format(self.hist_MB)) - + if self.first_sync: + self.logger.info('sync time so far: {}' + .format(formatted_time(self.wall_time))) + self.logger.info('flushing UTXO cache at {:,d} MB' + .format(self.utxo_MB)) + self.logger.info('flushing history cache at {:,d} MB' + .format(self.hist_MB)) self.clean_db() async def main_loop(self): From d2c575a24bd5f7c24f88d24d5b98cfda3904775a Mon Sep 17 00:00:00 2001 From: Neil Booth Date: Fri, 18 Nov 2016 19:50:06 +0900 Subject: [PATCH 2/2] Better, more concise logs, particularly when caught up. --- server/block_processor.py | 66 ++++++++++++++++++--------------------- 1 file changed, 31 insertions(+), 35 deletions(-) diff --git a/server/block_processor.py b/server/block_processor.py index 314a3ca..c8f679e 100644 --- a/server/block_processor.py +++ b/server/block_processor.py @@ -82,7 +82,8 @@ class Prefetcher(LoggedClass): async def main_loop(self): '''Loop forever polling for more blocks.''' - self.logger.info('starting daemon poll loop') + self.logger.info('catching up to daemon height {:,d}...' + .format(await self.daemon.height())) while True: try: if await self._caught_up(): @@ -117,6 +118,7 @@ class Prefetcher(LoggedClass): if self.queue_size >= self.target_cache_size: return [], 0 + caught_up = self.daemon.cached_height() == self.fetched_height daemon_height = await self.daemon.height() cache_room = self.target_cache_size // self.ave_size @@ -129,6 +131,11 @@ class Prefetcher(LoggedClass): first = self.fetched_height + 1 hex_hashes = await self.daemon.block_hex_hashes(first, count) + if caught_up: + self.logger.info('new block height {:,d} hash {}' + .format(first + count - 1, + hash_to_str(hex_hashes[-1]))) + blocks = await self.daemon.raw_blocks(hex_hashes) size = sum(len(block) for block in blocks) @@ -353,7 +360,6 @@ class BlockProcessor(server.db.DB): # UTXO cache self.utxo_cache = {} - self.utxo_cache_spends = 0 self.db_deletes = [] # Log state @@ -553,7 +559,6 @@ class BlockProcessor(server.db.DB): flush_start = time.time() last_flush = self.last_flush tx_diff = self.tx_count - self.last_flush_tx_count - show_stats = self.first_sync if self.height > self.db_height: assert flush_history is None @@ -566,19 +571,18 @@ class BlockProcessor(server.db.DB): if flush_utxos: self.flush_utxos(batch) self.flush_state(batch) - self.logger.info('committing transaction...') # Update and put the wall time again - otherwise we drop the # time it took to commit the batch self.flush_state(self.db) - self.logger.info('flush #{:,d} to height {:,d} txs: {:,d} ' - 'took {:,.1f}s' - .format(self.flush_count, self.height, self.tx_count, - self.last_flush - flush_start)) + self.logger.info('flush #{:,d} took {:.1f}s. Height {:,d} txs: {:,d}' + .format(self.flush_count, + self.last_flush - flush_start, + self.height, self.tx_count)) # Catch-up stats - if show_stats: + if self.first_sync: daemon_height = self.daemon.cached_height() tx_per_sec = int(self.tx_count / self.wall_time) this_tx_per_sec = 1 + int(tx_diff / (self.last_flush - last_flush)) @@ -601,11 +605,9 @@ class BlockProcessor(server.db.DB): formatted_time(tx_est / this_tx_per_sec))) def flush_history(self, batch): - fs_flush_start = time.time() + fs_start = time.time() self.fs_flush() - fs_flush_end = time.time() - self.logger.info('FS flush took {:.1f} seconds' - .format(fs_flush_end - fs_flush_start)) + fs_end = time.time() flush_id = pack('>H', self.flush_count) @@ -613,10 +615,11 @@ class BlockProcessor(server.db.DB): key = b'H' + hash168 + flush_id batch.put(key, hist.tobytes()) - self.logger.info('flushed {:,d} history entries for {:,d} addrs ' - 'in {:.1f}s' - .format(self.history_size, len(self.history), - time.time() - fs_flush_end)) + if self.first_sync: + self.logger.info('flushed to FS in {:.1f}s, history in {:.1f}s ' + 'for {:,d} addrs' + .format(fs_end - fs_start, time.time() - fs_end, + len(self.history))) self.history = defaultdict(partial(array.array, 'I')) self.history_size = 0 @@ -712,13 +715,10 @@ class BlockProcessor(server.db.DB): utxo_MB = (db_deletes_size + utxo_cache_size) // one_MB hist_MB = (hist_cache_size + tx_hash_size) // one_MB - self.logger.info('UTXOs: {:,d} deletes: {:,d} ' + self.logger.info('our height: {:,d} daemon: {:,d} ' 'UTXOs {:,d}MB hist {:,d}MB' - .format(len(self.utxo_cache), - len(self.db_deletes) // 2, + .format(self.height, self.daemon.cached_height(), utxo_MB, hist_MB)) - self.logger.info('our height: {:,d} daemon height: {:,d}' - .format(self.height, self.daemon.cached_height())) return utxo_MB, hist_MB def undo_key(self, height): @@ -939,7 +939,6 @@ class BlockProcessor(server.db.DB): idx_packed = pack('