Browse Source

more cleanup, update to ldk 106

patch-1
John Cantrell 3 years ago
parent
commit
819e1c2234
  1. 1
      .dockerignore
  2. 28
      Cargo.lock
  3. 12
      Cargo.toml
  4. 19
      Dockerfile
  5. 2
      src/chain/listener_database.rs
  6. 4
      src/database/admin.rs
  7. 1
      src/database/node.rs
  8. 72
      src/disk.rs
  9. 5
      src/event_handler.rs
  10. 1
      src/grpc/admin.rs
  11. 15
      src/hex_utils.rs
  12. 23
      src/http/admin.rs
  13. 20
      src/lib/network_graph.rs
  14. 3
      src/main.rs
  15. 148
      src/node.rs
  16. 9
      src/services/admin.rs
  17. 2
      src/utils.rs

1
.dockerignore

@ -0,0 +1 @@
./target

28
Cargo.lock

@ -1074,7 +1074,9 @@ dependencies = [
[[package]]
name = "lightning"
version = "0.0.104"
version = "0.0.106"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "580647f97f8e6d138ad724027c8ca9b890b1001b05374c270bbee4c10309b641"
dependencies = [
"bitcoin",
"secp256k1",
@ -1082,7 +1084,9 @@ dependencies = [
[[package]]
name = "lightning-background-processor"
version = "0.0.104"
version = "0.0.106"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4a9c8c4c6e4b9652287d8ce2fc3a168861fdb40eb68793567be5ed77ecce6eb"
dependencies = [
"bitcoin",
"lightning",
@ -1091,7 +1095,9 @@ dependencies = [
[[package]]
name = "lightning-block-sync"
version = "0.0.104"
version = "0.0.106"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8731c4f20bd4e0d588db6e849ac2114674a112cdfda65354cf9b4cf6018878a"
dependencies = [
"bitcoin",
"chunked_transfer",
@ -1102,7 +1108,9 @@ dependencies = [
[[package]]
name = "lightning-invoice"
version = "0.12.0"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f069b6eb46d7639d07977d14dc7d9a40d9d8bc5ac2b47f1924318fec13edd5c0"
dependencies = [
"bech32",
"bitcoin_hashes",
@ -1113,7 +1121,9 @@ dependencies = [
[[package]]
name = "lightning-net-tokio"
version = "0.0.104"
version = "0.0.106"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85119f898ac097d46c17a0ad7dda0f6ef6b923e5bcb4d1a5e39d33c3c68aa7bc"
dependencies = [
"bitcoin",
"lightning",
@ -1122,7 +1132,9 @@ dependencies = [
[[package]]
name = "lightning-persister"
version = "0.0.104"
version = "0.0.106"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f8aac01a61b302f3928adf235660c38aa5c246113fc7d19cc4cb60b5f53b7ae"
dependencies = [
"bitcoin",
"libc",
@ -2746,6 +2758,6 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "zeroize"
version = "1.5.3"
version = "1.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "50344758e2f40e3a1fcfc8f6f91aa57b5f8ebd8d27919fe6451f15aaaf9ee608"
checksum = "7eb5728b8afd3f280a869ce1d4c554ffaed35f45c231fc41bfbd0381bef50317"

12
Cargo.toml

@ -12,12 +12,12 @@ name = "senseid"
path = "src/main.rs"
[dependencies]
lightning = { version = "0.0.104", features = ["max_level_trace"], path = "/Users/developer/Development/rust-lightning/lightning" }
lightning-block-sync = { version = "0.0.104", features = [ "rpc-client" ], path = "/Users/developer/Development/rust-lightning/lightning-block-sync" }
lightning-invoice = { version = "0.12.0", path = "/Users/developer/Development/rust-lightning/lightning-invoice" }
lightning-net-tokio = { version = "0.0.104", path = "/Users/developer/Development/rust-lightning/lightning-net-tokio" }
lightning-persister = { version = "0.0.104", path = "/Users/developer/Development/rust-lightning/lightning-persister" }
lightning-background-processor = { version = "0.0.104", path = "/Users/developer/Development/rust-lightning/lightning-background-processor" }
lightning = { version = "0.0.106", features = ["max_level_trace"] }
lightning-block-sync = { version = "0.0.106", features = [ "rpc-client" ] }
lightning-invoice = { version = "0.14.0" }
lightning-net-tokio = { version = "0.0.106" }
lightning-persister = { version = "0.0.106" }
lightning-background-processor = { version = "0.0.106" }
base64 = "0.13.0"
bitcoin = "0.27"

19
Dockerfile

@ -0,0 +1,19 @@
FROM rust:1.56 as build
WORKDIR /senseid
# copy your source tree
COPY . .
RUN rustup component add rustfmt
RUN cargo build --verbose --release
# our final base
FROM debian:buster-slim
# copy the build artifact from the build stage
COPY --from=build /senseid/target/release/senseid .
# set the startup command to run your binary
CMD ["./senseid"]

2
src/chain/listener_database.rs

@ -74,7 +74,7 @@ impl ListenerDatabase {
let mut outputs_sum: u64 = 0;
// look for our own inputs
for (i, input) in tx.input.iter().enumerate() {
for (_i, input) in tx.input.iter().enumerate() {
if let Some(previous_output) = database
.get_previous_output(&input.previous_output)
.unwrap()

4
src/database/admin.rs

@ -7,10 +7,8 @@
// You may not use this file except in accordance with one or both of these
// licenses.
use std::time::SystemTime;
use super::Error;
use crate::utils::{self, seconds_since_epoch};
use crate::utils::{self};
use crate::{
hex_utils,
services::{PaginationRequest, PaginationResponse},

1
src/database/node.rs

@ -14,7 +14,6 @@ use crate::{
services::{PaginationRequest, PaginationResponse, PaymentsFilter},
};
use bitcoin::consensus::encode::{deserialize, serialize};
use std::str::FromStr;
use super::Error;
use bitcoin::BlockHash;

72
src/disk.rs

@ -7,20 +7,28 @@
// You may not use this file except in accordance with one or both of these
// licenses.
use crate::node;
use crate::chain::broadcaster::SenseiBroadcaster;
use crate::chain::fee_estimator::SenseiFeeEstimator;
use crate::node::{self, ChannelManager, ChainMonitor};
use bitcoin::secp256k1::key::PublicKey;
use bitcoin::BlockHash;
use chrono::Utc;
use lightning::chain::keysinterface::{InMemorySigner, KeysManager};
use lightning::routing::network_graph::NetworkGraph;
use lightning::routing::scoring::Scorer;
use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringParameters};
use lightning::util::logger::{Logger, Record};
use lightning::util::ser::{Readable, Writeable, Writer};
use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer};
use lightning_background_processor::{Persister};
use lightning_persister::FilesystemPersister;
use std::collections::HashMap;
use std::fs;
use std::fs::File;
use std::io::{BufRead, BufReader, BufWriter};
use std::net::SocketAddr;
use std::path::Path;
use std::sync::Arc;
pub struct FilesystemLogger {
data_dir: String,
@ -86,22 +94,6 @@ pub fn read_channel_peer_data(
Ok(peer_data)
}
pub fn persist_network(path: &Path, network_graph: &NetworkGraph) -> std::io::Result<()> {
let mut tmp_path = path.to_path_buf().into_os_string();
tmp_path.push(".tmp");
let file = fs::OpenOptions::new()
.write(true)
.create(true)
.open(&tmp_path)?;
let write_res = network_graph.write(&mut BufWriter::new(file));
if let Err(e) = write_res.and_then(|_| fs::rename(&tmp_path, path)) {
let _ = fs::remove_file(&tmp_path);
Err(e)
} else {
Ok(())
}
}
pub fn read_network(path: &Path, genesis_hash: BlockHash) -> NetworkGraph {
if let Ok(file) = File::open(path) {
if let Ok(graph) = NetworkGraph::read(&mut BufReader::new(file)) {
@ -111,7 +103,7 @@ pub fn read_network(path: &Path, genesis_hash: BlockHash) -> NetworkGraph {
NetworkGraph::new(genesis_hash)
}
pub fn persist_scorer(path: &Path, scorer: &Scorer) -> std::io::Result<()> {
pub fn persist_scorer(path: &Path, scorer: &ProbabilisticScorer<Arc<NetworkGraph>>) -> std::io::Result<()> {
let mut tmp_path = path.to_path_buf().into_os_string();
tmp_path.push(".tmp");
let file = fs::OpenOptions::new()
@ -127,11 +119,45 @@ pub fn persist_scorer(path: &Path, scorer: &Scorer) -> std::io::Result<()> {
}
}
pub fn read_scorer(path: &Path) -> Scorer {
pub fn read_scorer(path: &Path, graph: Arc<NetworkGraph>) -> ProbabilisticScorer<Arc<NetworkGraph>> {
let params = ProbabilisticScoringParameters::default();
if let Ok(file) = File::open(path) {
if let Ok(scorer) = Scorer::read(&mut BufReader::new(file)) {
if let Ok(scorer) =
ProbabilisticScorer::read(&mut BufReader::new(file), (params, Arc::clone(&graph))) {
return scorer;
}
}
Scorer::default()
ProbabilisticScorer::new(params, graph)
}
pub struct DataPersister {
pub data_dir: String,
pub external_router: bool
}
impl
Persister<
InMemorySigner,
Arc<ChainMonitor>,
Arc<SenseiBroadcaster>,
Arc<KeysManager>,
Arc<SenseiFeeEstimator>,
Arc<FilesystemLogger>,
> for DataPersister
{
fn persist_manager(&self, channel_manager: &ChannelManager) -> Result<(), std::io::Error> {
FilesystemPersister::persist_manager(self.data_dir.clone(), channel_manager)
}
fn persist_graph(&self, network_graph: &NetworkGraph) -> Result<(), std::io::Error> {
if !self.external_router {
if FilesystemPersister::persist_network_graph(self.data_dir.clone(), network_graph).is_err()
{
// Persistence errors here are non-fatal as we can just fetch the routing graph
// again later, but they may indicate a disk error which could be fatal elsewhere.
eprintln!("Warning: Failed to persist network graph, check your disk and permissions");
}
}
Ok(())
}
}

5
src/event_handler.rs

@ -43,6 +43,9 @@ pub struct LightningNodeEventHandler {
impl EventHandler for LightningNodeEventHandler {
fn handle_event(&self, event: &Event) {
match event {
Event::OpenChannelRequest { .. } => {
// Unreachable, we don't set manually_accept_inbound_channels
},
Event::FundingGenerationReady {
temporary_channel_id,
channel_value_satoshis,
@ -68,7 +71,7 @@ impl EventHandler for LightningNodeEventHandler {
let wallet = self.wallet.lock().unwrap();
let mut tx_builder = wallet.build_tx();
let fee_sats_per_1000_wu = self
let _fee_sats_per_1000_wu = self
.chain_manager
.bitcoind_client
.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);

1
src/grpc/admin.rs

@ -21,7 +21,6 @@ use super::sensei::{
use crate::{
database::admin::AccessToken,
services::admin::{AdminRequest, AdminResponse},
utils,
};
use tonic::{metadata::MetadataMap, Response, Status};

15
src/hex_utils.rs

@ -39,7 +39,22 @@ pub fn hex_str(value: &[u8]) -> String {
res
}
pub fn sanitize_string(bytes: &[u8]) -> String {
let mut ret = String::with_capacity(bytes.len());
// We should really support some sane subset of UTF-8 here, but limiting to printable ASCII
// instead makes this trivial.
for b in bytes {
if *b >= 0x20 && *b <= 0x7e {
ret.push(*b as char);
}
}
ret
}
pub fn to_compressed_pubkey(hex: &str) -> Option<PublicKey> {
if hex.len() != 33 * 2 {
return None;
}
let data = match to_vec(&hex[0..33 * 2]) {
Some(bytes) => bytes,
None => return None,

23
src/http/admin.rs

@ -25,7 +25,7 @@ use crate::{
admin::{AdminRequest, AdminResponse},
PaginationRequest,
},
utils, RequestContext,
RequestContext,
};
use super::auth_header::AuthHeader;
@ -168,7 +168,6 @@ pub fn get_token_from_cookies_or_header(
.to_str()
.map(|str| str.to_string())
.map_err(|_| StatusCode::UNAUTHORIZED);
println!("{:?}", res);
res
}
None => match cookies.get("token") {
@ -232,7 +231,7 @@ pub async fn list_tokens(
Extension(request_context): Extension<Arc<RequestContext>>,
cookies: Cookies,
Query(pagination): Query<PaginationRequest>,
AuthHeader { macaroon, token }: AuthHeader,
AuthHeader { macaroon:_, token }: AuthHeader,
) -> Result<Json<AdminResponse>, StatusCode> {
let authenticated =
authenticate_request(&request_context, "tokens/list", &cookies, token).await?;
@ -254,7 +253,7 @@ pub async fn create_token(
Extension(request_context): Extension<Arc<RequestContext>>,
Json(payload): Json<Value>,
cookies: Cookies,
AuthHeader { macaroon, token }: AuthHeader,
AuthHeader { macaroon:_, token }: AuthHeader,
) -> Result<Json<AdminResponse>, StatusCode> {
let authenticated =
authenticate_request(&request_context, "tokens/create", &cookies, token).await?;
@ -280,7 +279,7 @@ pub async fn delete_token(
Extension(request_context): Extension<Arc<RequestContext>>,
Json(payload): Json<Value>,
cookies: Cookies,
AuthHeader { macaroon, token }: AuthHeader,
AuthHeader { macaroon:_, token }: AuthHeader,
) -> Result<Json<AdminResponse>, StatusCode> {
let authenticated =
authenticate_request(&request_context, "tokens/delete", &cookies, token).await?;
@ -306,7 +305,7 @@ pub async fn list_nodes(
Extension(request_context): Extension<Arc<RequestContext>>,
cookies: Cookies,
Query(pagination): Query<PaginationRequest>,
AuthHeader { macaroon, token }: AuthHeader,
AuthHeader { macaroon:_, token }: AuthHeader,
) -> Result<Json<AdminResponse>, StatusCode> {
let authenticated =
authenticate_request(&request_context, "nodes/list", &cookies, token).await?;
@ -367,7 +366,7 @@ pub async fn login(
})))
}
AdminResponse::StartAdmin {
pubkey,
pubkey:_,
macaroon,
token,
} => {
@ -452,7 +451,7 @@ pub async fn init_sensei(
pub async fn get_status(
Extension(request_context): Extension<Arc<RequestContext>>,
cookies: Cookies,
AuthHeader { macaroon, token }: AuthHeader,
AuthHeader { macaroon:_, token }: AuthHeader,
) -> Result<Json<AdminResponse>, StatusCode> {
let token = match get_token_from_cookies_or_header(&cookies, token) {
Ok(token) => token,
@ -534,7 +533,7 @@ pub async fn create_node(
Extension(request_context): Extension<Arc<RequestContext>>,
Json(payload): Json<Value>,
cookies: Cookies,
AuthHeader { macaroon, token }: AuthHeader,
AuthHeader { macaroon:_, token }: AuthHeader,
) -> Result<Json<AdminResponse>, StatusCode> {
let authenticated =
authenticate_request(&request_context, "nodes/create", &cookies, token).await?;
@ -560,7 +559,7 @@ pub async fn start_node(
Extension(request_context): Extension<Arc<RequestContext>>,
Json(payload): Json<Value>,
cookies: Cookies,
AuthHeader { macaroon, token }: AuthHeader,
AuthHeader { macaroon:_, token }: AuthHeader,
) -> Result<Json<AdminResponse>, StatusCode> {
let authenticated =
authenticate_request(&request_context, "nodes/start", &cookies, token).await?;
@ -586,7 +585,7 @@ pub async fn stop_node(
Extension(request_context): Extension<Arc<RequestContext>>,
Json(payload): Json<Value>,
cookies: Cookies,
AuthHeader { macaroon, token }: AuthHeader,
AuthHeader { macaroon:_, token }: AuthHeader,
) -> Result<Json<AdminResponse>, StatusCode> {
let authenticated =
authenticate_request(&request_context, "nodes/stop", &cookies, token).await?;
@ -612,7 +611,7 @@ pub async fn delete_node(
Extension(request_context): Extension<Arc<RequestContext>>,
Json(payload): Json<Value>,
cookies: Cookies,
AuthHeader { macaroon, token }: AuthHeader,
AuthHeader { macaroon:_, token }: AuthHeader,
) -> Result<Json<AdminResponse>, StatusCode> {
let authenticated =
authenticate_request(&request_context, "nodes/delete", &cookies, token).await?;

20
src/lib/network_graph.rs

@ -9,7 +9,7 @@
use bitcoin::secp256k1::PublicKey;
use lightning::{
ln::msgs::{self, LightningError, RoutingMessageHandler},
ln::msgs::{self, LightningError, RoutingMessageHandler, Init},
util::events::{MessageSendEvent, MessageSendEventsProvider},
};
use std::{ops::Deref, sync::Arc};
@ -44,6 +44,15 @@ impl RoutingMessageHandler for OptionalNetworkGraphMsgHandler {
}
}
fn peer_connected(&self, their_node_id: &PublicKey, init: &Init) {
match &self.network_graph_msg_handler {
None => {},
Some(network_graph_msg_handler) => {
network_graph_msg_handler.peer_connected(their_node_id, init)
}
}
}
fn handle_channel_announcement(
&self,
_msg: &msgs::ChannelAnnouncement,
@ -93,15 +102,6 @@ impl RoutingMessageHandler for OptionalNetworkGraphMsgHandler {
}
}
fn sync_routing_table(&self, _their_node_id: &PublicKey, _init: &msgs::Init) {
match &self.network_graph_msg_handler {
None => (),
Some(network_graph_msg_handler) => {
network_graph_msg_handler.sync_routing_table(_their_node_id, _init)
}
}
}
fn handle_reply_channel_range(
&self,
_their_node_id: &PublicKey,

3
src/main.rs

@ -35,7 +35,7 @@ use axum::{
handler::Handler,
http::StatusCode,
response::{Html, IntoResponse, Response},
routing::{get, get_service},
routing::{get},
AddExtensionLayer, Router,
};
use clap::Parser;
@ -43,7 +43,6 @@ use rust_embed::RustEmbed;
use std::net::SocketAddr;
use tower_cookies::CookieManagerLayer;
use tower_http::services::ServeDir;
use grpc::admin::{AdminServer, AdminService as GrpcAdminService};
use grpc::node::{NodeServer, NodeService as GrpcNodeService};

148
src/node.rs

@ -13,7 +13,7 @@ use crate::chain::listener_database::ListenerDatabase;
use crate::chain::manager::SenseiChainManager;
use crate::config::LightningNodeConfig;
use crate::database::node::NodeDatabase;
use crate::disk::FilesystemLogger;
use crate::disk::{FilesystemLogger, DataPersister};
use crate::error::Error;
use crate::event_handler::LightningNodeEventHandler;
use crate::lib::network_graph::OptionalNetworkGraphMsgHandler;
@ -29,7 +29,6 @@ use bitcoin::hashes::Hash;
use lightning::chain::channelmonitor::ChannelMonitor;
use lightning::ln::msgs::NetAddress;
use lightning::routing::router::{self, Payee, RouteParameters};
use lightning_invoice::payment::PaymentError;
use tindercrypt::cryptors::RingCryptor;
@ -40,7 +39,7 @@ use bitcoin::secp256k1::PublicKey;
use bitcoin::util::bip32::{ChildNumber, DerivationPath, ExtendedPrivKey};
use bitcoin::BlockHash;
use lightning::chain::chainmonitor;
use lightning::chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager};
use lightning::chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager, Recipient};
use lightning::chain::{self, Filter};
use lightning::chain::{Watch};
use lightning::ln::channelmanager::{self, ChannelDetails, SimpleArcChannelManager};
@ -50,11 +49,12 @@ use lightning::ln::peer_handler::{
};
use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
use lightning::routing::network_graph::{NetGraphMsgHandler, NetworkGraph, NodeId};
use lightning::routing::scoring::{Scorer, ScorerUsingTime};
use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScorerUsingTime};
use lightning::util::config::{ChannelConfig, ChannelHandshakeLimits, UserConfig};
use lightning::util::ser::ReadableArgs;
use lightning_background_processor::BackgroundProcessor;
use lightning_invoice::utils::DefaultRouter;
use bitcoin::hashes::sha256::Hash as Sha256;
use lightning_invoice::{payment, utils, Currency, Invoice};
use lightning_net_tokio::SocketDescriptor;
use lightning_persister::FilesystemPersister;
@ -65,10 +65,10 @@ use std::fmt::Display;
use std::fs::File;
use std::io::Read;
use std::io::Write;
use std::iter;
use std::net::{IpAddr, SocketAddr, ToSocketAddrs};
use std::path::Path;
use std::str::FromStr;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant, SystemTime};
use std::{fmt, fs};
@ -174,7 +174,7 @@ pub type Router = DefaultRouter<Arc<NetworkGraph>, Arc<FilesystemLogger>>;
pub type InvoicePayer = payment::InvoicePayer<
Arc<ChannelManager>,
Router,
Arc<Mutex<Scorer>>,
Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph>>>>,
Arc<FilesystemLogger>,
Arc<LightningNodeEventHandler>,
>;
@ -254,7 +254,8 @@ pub struct LightningNode {
pub keys_manager: Arc<KeysManager>,
pub logger: Arc<FilesystemLogger>,
pub invoice_payer: Arc<InvoicePayer>,
pub scorer: Arc<Mutex<ScorerUsingTime<Instant>>>,
pub scorer: Arc<Mutex<ProbabilisticScorerUsingTime<Arc<NetworkGraph>, Instant>>>,
pub stop_listen: Arc<AtomicBool>
}
impl LightningNode {
@ -434,7 +435,7 @@ impl LightningNode {
let best_block = chain_manager.get_best_block().await?;
let (channel_manager_blockhash, mut channel_manager) = {
let (channel_manager_blockhash, channel_manager) = {
if let Ok(mut f) = fs::File::open(channel_manager_path) {
let mut channel_monitor_mut_references = Vec::new();
for (_, channel_monitor) in channelmonitors.iter_mut() {
@ -489,16 +490,14 @@ impl LightningNode {
));
}
let channel_manager_info = (channel_manager_blockhash, &mut channel_manager);
let monitor_info = bundled_channel_monitors
.iter_mut()
.map(|monitor_bundle| (monitor_bundle.0, &mut monitor_bundle.1))
.collect::<Vec<(BlockHash, &mut SyncableMonitor)>>();
.map(|monitor_bundle| (monitor_bundle.0, &monitor_bundle.1))
.collect::<Vec<(BlockHash, &SyncableMonitor)>>();
let mut chain_listeners = vec![(
channel_manager_info.0,
channel_manager_info.1 as &(dyn chain::Listen + Send + Sync),
channel_manager_blockhash,
&channel_manager as &(dyn chain::Listen + Send + Sync),
)];
for (block_hash, monitor) in monitor_info.into_iter() {
@ -568,16 +567,23 @@ impl LightningNode {
let peer_manager = Arc::new(PeerManager::new(
lightning_msg_handler,
keys_manager.get_node_secret(),
keys_manager.get_node_secret(Recipient::Node).unwrap(),
&ephemeral_bytes,
logger.clone(),
Arc::new(IgnoringMessageHandler {}),
));
let scorer_path = config.scorer_path();
let scorer = Arc::new(Mutex::new(disk::read_scorer(Path::new(&scorer_path))));
let scorer = Arc::new(Mutex::new(disk::read_scorer(
Path::new(&scorer_path),
Arc::clone(&network_graph),
)));
let router = DefaultRouter::new(network_graph.clone(), logger.clone());
let router = DefaultRouter::new(
network_graph.clone(),
logger.clone(),
keys_manager.get_secure_random_bytes()
);
let pubkey = channel_manager.get_our_node_id().to_string();
@ -609,6 +615,8 @@ impl LightningNode {
payment::RetryAttempts(5),
));
let stop_listen = Arc::new(AtomicBool::new(false));
Ok(LightningNode {
config,
database,
@ -625,6 +633,7 @@ impl LightningNode {
logger,
scorer,
invoice_payer,
stop_listen
})
}
@ -644,29 +653,10 @@ impl LightningNode {
.await
.unwrap();
if !config.external_router {
let network_graph = self.network_graph.clone();
let network_graph_path = config.network_graph_path();
handles.push(tokio::spawn(async move {
let mut interval = tokio::time::interval(Duration::from_secs(600));
loop {
interval.tick().await;
if disk::persist_network(Path::new(&network_graph_path), &network_graph.clone())
.is_err()
{
// Persistence errors here are non-fatal as we can just fetch the routing graph
// again later, but they may indicate a disk error which could be fatal elsewhere.
eprintln!(
"Warning: Failed to persist network graph, check your disk and permissions"
);
}
}
}));
}
let peer_manager_connection_handler = self.peer_manager.clone();
let stop_listen_ref = Arc::clone(&self.stop_listen);
handles.push(tokio::spawn(async move {
let listener = tokio::net::TcpListener::bind(format!(
"0.0.0.0:{}",
@ -677,6 +667,9 @@ impl LightningNode {
loop {
let peer_mgr = peer_manager_connection_handler.clone();
let tcp_stream = listener.accept().await.unwrap().0;
if stop_listen_ref.load(Ordering::Acquire) {
return;
}
tokio::spawn(async move {
lightning_net_tokio::setup_inbound(
peer_mgr.clone(),
@ -704,14 +697,15 @@ impl LightningNode {
}
}));
let persist_channel_manager_callback = move |node: &ChannelManager| {
FilesystemPersister::persist_manager(self.config.data_dir(), &*node)
let persister = DataPersister {
data_dir: self.config.data_dir(),
external_router: self.config.external_router
};
// TODO: should we allow 'child' nodes to update NetworkGraph based on payment failures?
// feels like probably but depends on exactly what is updated
let background_processor = BackgroundProcessor::start(
persist_channel_manager_callback,
persister,
self.invoice_payer.clone(),
self.chain_monitor.clone(),
self.channel_manager.clone(),
@ -896,39 +890,49 @@ impl LightningNode {
Ok(())
}
pub fn keysend(&self, payee_pubkey: PublicKey, amt_msat: u64) -> Result<(), Error> {
let first_hops = self.channel_manager.list_usable_channels();
let payer_pubkey = self.channel_manager.get_our_node_id();
fn keysend<K: KeysInterface>(
&self, invoice_payer: &InvoicePayer, payee_pubkey: PublicKey, amt_msat: u64, keys: &K,
) -> Result<(), Error> {
let payment_preimage = keys.get_secure_random_bytes();
let payee = Payee::for_keysend(payee_pubkey);
let params = RouteParameters {
payee,
final_value_msat: amt_msat,
final_cltv_expiry_delta: 40,
let status = match invoice_payer.pay_pubkey(
payee_pubkey,
PaymentPreimage(payment_preimage),
amt_msat,
40,
) {
Ok(_payment_id) => {
println!("EVENT: initiated sending {} msats to {}", amt_msat, payee_pubkey);
print!("> ");
HTLCStatus::Pending
}
Err(PaymentError::Invoice(e)) => {
println!("ERROR: invalid payee: {}", e);
print!("> ");
return Ok(());
}
Err(PaymentError::Routing(e)) => {
println!("ERROR: failed to find route: {}", e.err);
print!("> ");
return Ok(());
}
Err(PaymentError::Sending(e)) => {
println!("ERROR: failed to send payment: {:?}", e);
print!("> ");
HTLCStatus::Failed
}
};
let route = router::find_route(
&payer_pubkey,
&params,
&self.network_graph,
Some(&first_hops.iter().collect::<Vec<_>>()),
self.logger.clone(),
&self.scorer.lock().unwrap(),
)?;
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage).into_inner());
let mut database = self.database.lock().unwrap();
let payment_hash = self
.channel_manager
.send_spontaneous_payment(&route, None)
.unwrap()
.0;
database.create_or_update_payment(
PaymentInfo {
hash: payment_hash,
preimage: None,
secret: None,
status: HTLCStatus::Pending,
status,
amt_msat: MillisatAmount(Some(amt_msat)),
origin: PaymentOrigin::SpontaneousOutgoing,
label: None,
@ -1038,7 +1042,7 @@ impl LightningNode {
channel.alias = self
.get_alias_for_channel_counterparty(&chan_info)
.map(|alias_bytes| String::from_utf8(alias_bytes.to_vec()).unwrap());
.map(|alias_bytes| hex_utils::sanitize_string(&alias_bytes));
let match_channel = channel.clone();
let matches_channel_id = match_channel.channel_id.contains(&query);
@ -1156,12 +1160,7 @@ impl LightningNode {
pub fn node_info(&self) -> Result<NodeInfo, Error> {
let chans = self.channel_manager.list_channels();
let local_balance_msat = chans
.iter()
.map(|c| {
c.unspendable_punishment_reserve.unwrap_or(0) * 1000 + c.outbound_capacity_msat
})
.sum::<u64>();
let local_balance_msat = chans.iter().map(|c| c.balance_msat).sum::<u64>();
Ok(NodeInfo {
node_pubkey: self.channel_manager.get_our_node_id().to_string(),
@ -1191,7 +1190,7 @@ impl LightningNode {
pub fn sign_message(&self, message: String) -> Result<String, Error> {
Ok(lightning::util::message_signing::sign(
message.as_bytes(),
&self.keys_manager.get_node_secret(),
&self.keys_manager.get_node_secret(Recipient::Node).unwrap(),
)?)
}
@ -1268,7 +1267,12 @@ impl LightningNode {
amt_msat,
} => match hex_utils::to_compressed_pubkey(&dest_pubkey) {
Some(pubkey) => {
self.keysend(pubkey, amt_msat)?;
self.keysend(
&*self.invoice_payer,
pubkey,
amt_msat,
&*self.keys_manager,
)?;
Ok(NodeResponse::Keysend {})
}
None => Err(NodeRequestError::Sensei("invalid dest_pubkey".into())),

9
src/services/admin.rs

@ -15,7 +15,6 @@ use crate::database::{
admin::{AdminDatabase, Node, Role, Status},
};
use crate::error::Error as SenseiError;
use crate::AdminRequestResponse;
use crate::{
config::{LightningNodeConfig, SenseiConfig},
hex_utils,
@ -23,9 +22,8 @@ use crate::{
NodeDirectory, NodeHandle,
};
use lightning_block_sync::BlockSource;
use serde::Serialize;
use std::sync::mpsc::Receiver;
use std::sync::atomic::Ordering;
use std::{collections::hash_map::Entry, fs, sync::Arc};
use tokio::sync::Mutex;
pub enum AdminRequest {
@ -540,8 +538,11 @@ impl AdminService {
if let Entry::Occupied(entry) = entry {
let node_handle = entry.remove();
// TODO: stop accepting new incoming connections somehow?
// Disconnect our peers and stop accepting new connections. This ensures we don't continue
// updating our channel data after we've stopped the background processor.
node_handle.node.peer_manager.disconnect_all_peers();
node_handle.node.stop_listen.store(true, Ordering::Release);
let _res = node_handle.background_processor.stop();
for handle in node_handle.handles {
handle.abort();

2
src/utils.rs

@ -16,7 +16,7 @@ use std::{
};
pub fn hours_since_epoch() -> Result<u64, SystemTimeError> {
let time_since_epoch = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?;
let _time_since_epoch = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?;
let hours_since_epoch = seconds_since_epoch()? / 3600;
Ok(hours_since_epoch)
}

Loading…
Cancel
Save