From d781a0f646f80f1bc43a76d22877324a6adaf246 Mon Sep 17 00:00:00 2001 From: Alexis Hernandez Date: Sun, 24 Mar 2019 08:22:49 -0700 Subject: [PATCH] server: Remove hardcoded 1000 on the BlockHeaderCache --- .../app/com/xsn/explorer/cache/BlockHeaderCache.scala | 11 ++++++----- .../app/com/xsn/explorer/services/BlockService.scala | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/server/app/com/xsn/explorer/cache/BlockHeaderCache.scala b/server/app/com/xsn/explorer/cache/BlockHeaderCache.scala index 4921b69..3bfc7b1 100644 --- a/server/app/com/xsn/explorer/cache/BlockHeaderCache.scala +++ b/server/app/com/xsn/explorer/cache/BlockHeaderCache.scala @@ -31,12 +31,13 @@ class BlockHeaderCache(cache: Cache[BlockHeaderCache.Key, BlockHeaderCache.Encod import BlockHeaderCache._ def getOrSet( - key: Key)( + key: Key, + entrySize: Int)( f: => FutureApplicationResult[Value])( implicit ec: ExecutionContext, writes: Writes[Value]): FutureApplicationResult[EncodedValue] = { - if (isCacheable(key)) { + if (isCacheable(key, entrySize)) { get(key) .map(v => Future.successful(Good(v))) .getOrElse { @@ -65,12 +66,12 @@ class BlockHeaderCache(cache: Cache[BlockHeaderCache.Key, BlockHeaderCache.Encod /** * A block header list is cacheable if all the following meet: * - The ordering is from oldest to newest - * - The limit is 1000 + * - The limit matches the expected entry size * - The entry is not the last one (TODO) */ - def isCacheable(key: Key): Boolean = { + def isCacheable(key: Key, entrySize: Int): Boolean = { key.orderingCondition == OrderingCondition.AscendingOrder && - key.limit.int == 1000 + key.limit.int == entrySize } def get(key: Key): Option[EncodedValue] = { diff --git a/server/app/com/xsn/explorer/services/BlockService.scala b/server/app/com/xsn/explorer/services/BlockService.scala index 40bb009..9ed80dd 100644 --- a/server/app/com/xsn/explorer/services/BlockService.scala +++ b/server/app/com/xsn/explorer/services/BlockService.scala @@ -64,7 +64,7 @@ class BlockService @Inject() ( implicit writes: Writes[BlockHeader]) = { val cacheKey = BlockHeaderCache.Key(limit, orderingCondition, lastSeenHash) - blockHeaderCache.getOrSet(cacheKey) { + blockHeaderCache.getOrSet(cacheKey, maxHeadersPerQuery) { val result = for { headers <-blockDataHandler.getHeaders(limit, orderingCondition, lastSeenHash).toFutureOr } yield WrappedResult(headers)