Browse Source

Use LRU cache (instead of an unbounded HashMap) for transaction caching

refactor-mempool
Roman Zeyde 6 years ago
parent
commit
42ab247776
No known key found for this signature in database GPG Key ID: 87CAE5FA46917CBB
  1. 7
      Cargo.lock
  2. 1
      Cargo.toml
  3. 5
      src/bin/electrs.rs
  4. 29
      src/config.rs
  5. 1
      src/lib.rs
  6. 19
      src/query.rs

7
Cargo.lock

@ -290,6 +290,7 @@ dependencies = [
"hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"lru 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"page_size 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"prometheus 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -512,6 +513,11 @@ dependencies = [
"cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "lru"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "make-cmd"
version = "0.1.0"
@ -1094,6 +1100,7 @@ dependencies = [
"checksum libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3ad660d7cb8c5822cd83d10897b0f1f1526792737a179e73896152f85b88c2"
"checksum librocksdb-sys 5.14.2 (registry+https://github.com/rust-lang/crates.io-index)" = "474d805d72e23a06310fa5201dfe182dc4b80ab1f18bb2823c1ac17ff9dcbaa2"
"checksum log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fcce5fa49cc693c312001daf1d13411c4a5283796bac1084299ea3e567113f"
"checksum lru 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "5fb41c1934bda881f2ab7ad8afa2ec25b8e0453563bfb09854bf3c319b1c96c3"
"checksum make-cmd 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a8ca8afbe8af1785e09636acb5a41e08a765f5f0340568716c18a8700ba3c0d3"
"checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08"
"checksum memchr 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "148fab2e51b4f1cfc66da2a7c32981d1d3c083a803978268bb11fe4b86925e7a"

1
Cargo.toml

@ -24,6 +24,7 @@ glob = "0.2"
hex = "0.3"
libc = "0.2"
log = "0.4"
lru = "0.1"
num_cpus = "1.0"
page_size = "0.4"
prometheus = "0.4"

5
src/bin/electrs.rs

@ -16,7 +16,7 @@ use electrs::{
errors::*,
index::Index,
metrics::Metrics,
query::Query,
query::{Query, TransactionCache},
rpc::RPC,
signal::Waiter,
store::{full_compaction, is_fully_compacted, DBStore},
@ -54,7 +54,8 @@ fn run_server(config: &Config) -> Result<()> {
}.enable_compaction(); // enable auto compactions before starting incremental index updates.
let app = App::new(store, index, daemon)?;
let query = Query::new(app.clone(), &metrics);
let tx_cache = TransactionCache::new(config.tx_cache_size);
let query = Query::new(app.clone(), &metrics, tx_cache);
let mut server = None; // Electrum RPC server
loop {

29
src/config.rs

@ -14,17 +14,19 @@ use errors::*;
#[derive(Debug)]
pub struct Config {
// See below for the documentation of each field:
pub log: stderrlog::StdErrLog,
pub network_type: Network, // bitcoind JSONRPC endpoint
pub db_path: PathBuf, // RocksDB directory path
pub daemon_dir: PathBuf, // Bitcoind data directory
pub daemon_rpc_addr: SocketAddr, // for connecting Bitcoind JSONRPC
pub cookie: Option<String>, // for bitcoind JSONRPC authentication ("USER:PASSWORD")
pub electrum_rpc_addr: SocketAddr, // for serving Electrum clients
pub monitoring_addr: SocketAddr, // for Prometheus monitoring
pub jsonrpc_import: bool, // slower initial indexing, for low-memory systems
pub index_batch_size: usize, // number of blocks to index in parallel
pub bulk_index_threads: usize, // number of threads to use for bulk indexing
pub network_type: Network,
pub db_path: PathBuf,
pub daemon_dir: PathBuf,
pub daemon_rpc_addr: SocketAddr,
pub cookie: Option<String>,
pub electrum_rpc_addr: SocketAddr,
pub monitoring_addr: SocketAddr,
pub jsonrpc_import: bool,
pub index_batch_size: usize,
pub bulk_index_threads: usize,
pub tx_cache_size: usize,
}
impl Config {
@ -101,6 +103,12 @@ impl Config {
.help("Number of threads used for bulk indexing (default: use the # of CPUs)")
.default_value("0")
)
.arg(
Arg::with_name("tx_cache_size")
.long("tx-cache-size")
.help("Number of transactions to keep in for query LRU cache")
.default_value("10000") // should be enough for a small wallet.
)
.get_matches();
let network_name = m.value_of("network").unwrap_or("mainnet");
@ -184,6 +192,7 @@ impl Config {
jsonrpc_import: m.is_present("jsonrpc_import"),
index_batch_size: value_t_or_exit!(m, "index_batch_size", usize),
bulk_index_threads,
tx_cache_size: value_t_or_exit!(m, "tx_cache_size", usize),
};
eprintln!("{:?}", config);
config

1
src/lib.rs

@ -9,6 +9,7 @@ extern crate dirs;
extern crate glob;
extern crate hex;
extern crate libc;
extern crate lru;
extern crate num_cpus;
extern crate page_size;
extern crate prometheus;

19
src/query.rs

@ -4,8 +4,9 @@ use bitcoin::network::serialize::deserialize;
use bitcoin::util::hash::Sha256dHash;
use crypto::digest::Digest;
use crypto::sha2::Sha256;
use lru::LruCache;
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
use std::sync::{Arc, Mutex, RwLock};
use app::App;
use index::{compute_script_hash, TxInRow, TxOutRow, TxRow};
@ -155,14 +156,14 @@ fn txids_by_funding_output(
.collect()
}
struct TransactionCache {
map: RwLock<HashMap<Sha256dHash, Transaction>>,
pub struct TransactionCache {
map: Mutex<LruCache<Sha256dHash, Transaction>>,
}
impl TransactionCache {
fn new() -> TransactionCache {
pub fn new(capacity: usize) -> TransactionCache {
TransactionCache {
map: RwLock::new(HashMap::new()),
map: Mutex::new(LruCache::new(capacity)),
}
}
@ -170,11 +171,11 @@ impl TransactionCache {
where
F: FnOnce() -> Result<Transaction>,
{
if let Some(txn) = self.map.read().unwrap().get(txid) {
if let Some(txn) = self.map.lock().unwrap().get(txid) {
return Ok(txn.clone());
}
let txn = load_txn_func()?;
self.map.write().unwrap().insert(*txid, txn.clone());
self.map.lock().unwrap().put(*txid, txn.clone());
Ok(txn)
}
}
@ -186,11 +187,11 @@ pub struct Query {
}
impl Query {
pub fn new(app: Arc<App>, metrics: &Metrics) -> Arc<Query> {
pub fn new(app: Arc<App>, metrics: &Metrics, tx_cache: TransactionCache) -> Arc<Query> {
Arc::new(Query {
app,
tracker: RwLock::new(Tracker::new(metrics)),
tx_cache: TransactionCache::new(),
tx_cache,
})
}

Loading…
Cancel
Save