Browse Source

Cache list of transaction IDs for blocks

tmp/parse_blk
Dagur Valberg Johannsson 6 years ago
parent
commit
2e9ac5c0d8
  1. 7
      examples/index.rs
  2. 4
      src/bin/electrs.rs
  3. 33
      src/cache.rs
  4. 7
      src/config.rs
  5. 12
      src/daemon.rs
  6. 1
      src/lib.rs

7
examples/index.rs

@ -6,16 +6,18 @@ extern crate error_chain;
extern crate log;
use electrs::{
config::Config, daemon::Daemon, errors::*, fake::FakeStore, index::Index, metrics::Metrics,
signal::Waiter,
cache::BlockTxIDsCache, config::Config, daemon::Daemon, errors::*, fake::FakeStore,
index::Index, metrics::Metrics, signal::Waiter,
};
use error_chain::ChainedError;
use std::sync::Arc;
fn run() -> Result<()> {
let signal = Waiter::start();
let config = Config::from_args();
let metrics = Metrics::new(config.monitoring_addr);
metrics.start();
let cache = Arc::new(BlockTxIDsCache::new(0));
let daemon = Daemon::new(
&config.daemon_dir,
@ -23,6 +25,7 @@ fn run() -> Result<()> {
config.cookie_getter(),
config.network_type,
signal.clone(),
cache,
&metrics,
)?;
let fake_store = FakeStore {};

4
src/bin/electrs.rs

@ -6,11 +6,13 @@ extern crate log;
use error_chain::ChainedError;
use std::process;
use std::sync::Arc;
use std::time::Duration;
use electrs::{
app::App,
bulk,
cache::BlockTxIDsCache,
config::Config,
daemon::Daemon,
errors::*,
@ -26,6 +28,7 @@ fn run_server(config: &Config) -> Result<()> {
let signal = Waiter::start();
let metrics = Metrics::new(config.monitoring_addr);
metrics.start();
let blocktxids_cache = Arc::new(BlockTxIDsCache::new(config.blocktxids_cache_size));
let daemon = Daemon::new(
&config.daemon_dir,
@ -33,6 +36,7 @@ fn run_server(config: &Config) -> Result<()> {
config.cookie_getter(),
config.network_type,
signal.clone(),
blocktxids_cache,
&metrics,
)?;
// Perform initial indexing from local blk*.dat block files.

33
src/cache.rs

@ -0,0 +1,33 @@
use crate::errors::*;
use bitcoin_hashes::sha256d::Hash as Sha256dHash;
use lru::LruCache;
use std::sync::Mutex;
pub struct BlockTxIDsCache {
map: Mutex<LruCache<Sha256dHash /* blockhash */, Vec<Sha256dHash /* txid */>>>,
}
impl BlockTxIDsCache {
pub fn new(capacity: usize) -> BlockTxIDsCache {
BlockTxIDsCache {
map: Mutex::new(LruCache::new(capacity)),
}
}
pub fn get_or_else<F>(
&self,
blockhash: &Sha256dHash,
load_txids_func: F,
) -> Result<Vec<Sha256dHash>>
where
F: FnOnce() -> Result<Vec<Sha256dHash>>,
{
if let Some(txids) = self.map.lock().unwrap().get(blockhash) {
return Ok(txids.clone());
}
let txids = load_txids_func()?;
self.map.lock().unwrap().put(*blockhash, txids.clone());
Ok(txids)
}
}

7
src/config.rs

@ -31,6 +31,7 @@ pub struct Config {
pub tx_cache_size: usize,
pub txid_limit: usize,
pub server_banner: String,
pub blocktxids_cache_size: usize,
}
fn str_to_socketaddr(address: &str, what: &str) -> SocketAddr {
@ -125,6 +126,11 @@ impl Config {
.help("Number of transactions to keep in for query LRU cache")
.default_value("10000") // should be enough for a small wallet.
)
.arg(
Arg::with_name("blocktxids_cache_size")
.long("blocktxids-cache-size")
.help("Number of blocks to cache transactions IDs in LRU cache")
.default_value("100")) // Needs ~0.305MB per per block at 10k txs each
.arg(
Arg::with_name("txid_limit")
.long("txid-limit")
@ -227,6 +233,7 @@ impl Config {
index_batch_size: value_t_or_exit!(m, "index_batch_size", usize),
bulk_index_threads,
tx_cache_size: value_t_or_exit!(m, "tx_cache_size", usize),
blocktxids_cache_size: value_t_or_exit!(m, "blocktxids_cache_size", usize),
txid_limit: value_t_or_exit!(m, "txid_limit", usize),
server_banner: value_t_or_exit!(m, "server_banner", String),
};

12
src/daemon.rs

@ -18,6 +18,7 @@ use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Arc, Mutex};
use std::time::Duration;
use crate::cache::BlockTxIDsCache;
use crate::errors::*;
use crate::metrics::{HistogramOpts, HistogramVec, Metrics};
use crate::signal::Waiter;
@ -311,6 +312,7 @@ pub struct Daemon {
conn: Mutex<Connection>,
message_id: Counter, // for monotonic JSONRPC 'id'
signal: Waiter,
blocktxids_cache: Arc<BlockTxIDsCache>,
// monitoring
latency: HistogramVec,
@ -324,6 +326,7 @@ impl Daemon {
cookie_getter: Arc<CookieGetter>,
network: Network,
signal: Waiter,
blocktxids_cache: Arc<BlockTxIDsCache>,
metrics: &Metrics,
) -> Result<Daemon> {
let daemon = Daemon {
@ -335,6 +338,7 @@ impl Daemon {
signal.clone(),
)?),
message_id: Counter::new(),
blocktxids_cache: blocktxids_cache,
signal: signal.clone(),
latency: metrics.histogram_vec(
HistogramOpts::new("electrs_daemon_rpc", "Bitcoind RPC latency (in seconds)"),
@ -376,6 +380,7 @@ impl Daemon {
conn: Mutex::new(self.conn.lock().unwrap().reconnect()?),
message_id: Counter::new(),
signal: self.signal.clone(),
blocktxids_cache: Arc::clone(&self.blocktxids_cache),
latency: self.latency.clone(),
size: self.size.clone(),
})
@ -506,7 +511,7 @@ impl Daemon {
Ok(block)
}
pub fn getblocktxids(&self, blockhash: &Sha256dHash) -> Result<Vec<Sha256dHash>> {
fn load_blocktxids(&self, blockhash: &Sha256dHash) -> Result<Vec<Sha256dHash>> {
self.request("getblock", json!([blockhash.to_hex(), /*verbose=*/ 1]))?
.get("tx")
.chain_err(|| "block missing txids")?
@ -517,6 +522,11 @@ impl Daemon {
.collect::<Result<Vec<Sha256dHash>>>()
}
pub fn getblocktxids(&self, blockhash: &Sha256dHash) -> Result<Vec<Sha256dHash>> {
self.blocktxids_cache
.get_or_else(&blockhash, || self.load_blocktxids(blockhash))
}
pub fn getblocks(&self, blockhashes: &[Sha256dHash]) -> Result<Vec<Block>> {
let params_list: Vec<Value> = blockhashes
.iter()

1
src/lib.rs

@ -15,6 +15,7 @@ extern crate serde_json;
pub mod app;
pub mod bulk;
pub mod cache;
pub mod config;
pub mod daemon;
pub mod errors;

Loading…
Cancel
Save