Browse Source

logging: adapt lightning branch to logging changes

dependabot/pip/contrib/deterministic-build/ecdsa-0.13.3
SomberNight 6 years ago
committed by ThomasV
parent
commit
cc57da704b
  1. 2
      electrum/gui/qt/main_window.py
  2. 2
      electrum/gui/qt/util.py
  3. 26
      electrum/lnchannel.py
  4. 92
      electrum/lnpeer.py
  5. 37
      electrum/lnrouter.py
  6. 10
      electrum/lnsweep.py
  7. 8
      electrum/lnverifier.py
  8. 24
      electrum/lnwatcher.py
  9. 60
      electrum/lnworker.py
  10. 7
      electrum/sql_db.py

2
electrum/gui/qt/main_window.py

@ -1678,7 +1678,7 @@ class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
LN_NUM_PAYMENT_ATTEMPTS = 1 # TODO increase LN_NUM_PAYMENT_ATTEMPTS = 1 # TODO increase
def on_success(result): def on_success(result):
self.print_error('ln payment success', result) self.logger.info(f'ln payment success. {result}')
self.do_clear() self.do_clear()
def on_failure(exc_info): def on_failure(exc_info):
type_, e, traceback = exc_info type_, e, traceback = exc_info

2
electrum/gui/qt/util.py

@ -23,7 +23,7 @@ from PyQt5.QtWidgets import (QPushButton, QLabel, QMessageBox, QHBoxLayout,
QHeaderView, QApplication, QToolTip, QTreeWidget, QStyledItemDelegate) QHeaderView, QApplication, QToolTip, QTreeWidget, QStyledItemDelegate)
from electrum.i18n import _, languages from electrum.i18n import _, languages
from electrum.util import FileImportFailed, FileExportFailed, make_aiohttp_session, PrintError, resource_path from electrum.util import FileImportFailed, FileExportFailed, make_aiohttp_session, resource_path
from electrum.util import PR_UNPAID, PR_PAID, PR_EXPIRED, PR_INFLIGHT, PR_UNKNOWN from electrum.util import PR_UNPAID, PR_PAID, PR_EXPIRED, PR_INFLIGHT, PR_UNKNOWN
if TYPE_CHECKING: if TYPE_CHECKING:

26
electrum/lnchannel.py

@ -31,12 +31,13 @@ from typing import Optional, Dict, List, Tuple, NamedTuple, Set, Callable, Itera
import time import time
from . import ecc from . import ecc
from .util import bfh, PrintError, bh2u from .util import bfh, bh2u
from .bitcoin import TYPE_SCRIPT, TYPE_ADDRESS from .bitcoin import TYPE_SCRIPT, TYPE_ADDRESS
from .bitcoin import redeem_script_to_address from .bitcoin import redeem_script_to_address
from .crypto import sha256, sha256d from .crypto import sha256, sha256d
from .simple_config import get_config from .simple_config import get_config
from .transaction import Transaction from .transaction import Transaction
from .logging import Logger
from .lnutil import (Outpoint, LocalConfig, RemoteConfig, Keypair, OnlyPubkeyKeypair, ChannelConstraints, from .lnutil import (Outpoint, LocalConfig, RemoteConfig, Keypair, OnlyPubkeyKeypair, ChannelConstraints,
get_per_commitment_secret_from_seed, secret_to_pubkey, derive_privkey, make_closing_tx, get_per_commitment_secret_from_seed, secret_to_pubkey, derive_privkey, make_closing_tx,
@ -111,7 +112,7 @@ def str_bytes_dict_from_save(x):
def str_bytes_dict_to_save(x): def str_bytes_dict_to_save(x):
return {str(k): bh2u(v) for k, v in x.items()} return {str(k): bh2u(v) for k, v in x.items()}
class Channel(PrintError): class Channel(Logger):
def diagnostic_name(self): def diagnostic_name(self):
if self.name: if self.name:
return str(self.name) return str(self.name)
@ -155,6 +156,7 @@ class Channel(PrintError):
self.hm = HTLCManager(self.config[LOCAL].ctn if log else 0, self.config[REMOTE].ctn if log else 0, log) self.hm = HTLCManager(self.config[LOCAL].ctn if log else 0, self.config[REMOTE].ctn if log else 0, log)
self.name = name self.name = name
Logger.__init__(self)
self.pending_fee = None self.pending_fee = None
@ -254,7 +256,7 @@ class Channel(PrintError):
self._check_can_pay(htlc.amount_msat) self._check_can_pay(htlc.amount_msat)
htlc = htlc._replace(htlc_id=self.config[LOCAL].next_htlc_id) htlc = htlc._replace(htlc_id=self.config[LOCAL].next_htlc_id)
self.hm.send_htlc(htlc) self.hm.send_htlc(htlc)
self.print_error("add_htlc") self.logger.info("add_htlc")
self.config[LOCAL]=self.config[LOCAL]._replace(next_htlc_id=htlc.htlc_id + 1) self.config[LOCAL]=self.config[LOCAL]._replace(next_htlc_id=htlc.htlc_id + 1)
return htlc return htlc
@ -275,7 +277,7 @@ class Channel(PrintError):
f' Available at remote: {self.available_to_spend(REMOTE)},' +\ f' Available at remote: {self.available_to_spend(REMOTE)},' +\
f' HTLC amount: {htlc.amount_msat}') f' HTLC amount: {htlc.amount_msat}')
self.hm.recv_htlc(htlc) self.hm.recv_htlc(htlc)
self.print_error("receive_htlc") self.logger.info("receive_htlc")
self.config[REMOTE]=self.config[REMOTE]._replace(next_htlc_id=htlc.htlc_id + 1) self.config[REMOTE]=self.config[REMOTE]._replace(next_htlc_id=htlc.htlc_id + 1)
return htlc return htlc
@ -292,7 +294,7 @@ class Channel(PrintError):
This docstring was adapted from LND. This docstring was adapted from LND.
""" """
next_remote_ctn = self.get_current_ctn(REMOTE) + 1 next_remote_ctn = self.get_current_ctn(REMOTE) + 1
self.print_error("sign_next_commitment", next_remote_ctn) self.logger.info(f"sign_next_commitment {next_remote_ctn}")
self.hm.send_ctx() self.hm.send_ctx()
pending_remote_commitment = self.pending_commitment(REMOTE) pending_remote_commitment = self.pending_commitment(REMOTE)
sig_64 = sign_and_get_sig_string(pending_remote_commitment, self.config[LOCAL], self.config[REMOTE]) sig_64 = sign_and_get_sig_string(pending_remote_commitment, self.config[LOCAL], self.config[REMOTE])
@ -341,7 +343,7 @@ class Channel(PrintError):
This docstring is from LND. This docstring is from LND.
""" """
self.print_error("receive_new_commitment") self.logger.info("receive_new_commitment")
self.hm.recv_ctx() self.hm.recv_ctx()
@ -405,7 +407,7 @@ class Channel(PrintError):
return remote_htlc_sig return remote_htlc_sig
def revoke_current_commitment(self): def revoke_current_commitment(self):
self.print_error("revoke_current_commitment") self.logger.info("revoke_current_commitment")
new_feerate = self.constraints.feerate new_feerate = self.constraints.feerate
if self.pending_fee is not None: if self.pending_fee is not None:
@ -462,7 +464,7 @@ class Channel(PrintError):
self.lnwatcher.add_sweep_tx(outpoint, prev_txid, str(tx)) self.lnwatcher.add_sweep_tx(outpoint, prev_txid, str(tx))
def receive_revocation(self, revocation: RevokeAndAck): def receive_revocation(self, revocation: RevokeAndAck):
self.print_error("receive_revocation") self.logger.info("receive_revocation")
cur_point = self.config[REMOTE].current_per_commitment_point cur_point = self.config[REMOTE].current_per_commitment_point
derived_point = ecc.ECPrivkey(revocation.per_commitment_secret).get_public_key_bytes(compressed=True) derived_point = ecc.ECPrivkey(revocation.per_commitment_secret).get_public_key_bytes(compressed=True)
@ -614,7 +616,7 @@ class Channel(PrintError):
""" """
SettleHTLC attempts to settle an existing outstanding received HTLC. SettleHTLC attempts to settle an existing outstanding received HTLC.
""" """
self.print_error("settle_htlc") self.logger.info("settle_htlc")
log = self.hm.log[REMOTE] log = self.hm.log[REMOTE]
htlc = log['adds'][htlc_id] htlc = log['adds'][htlc_id]
assert htlc.payment_hash == sha256(preimage) assert htlc.payment_hash == sha256(preimage)
@ -624,7 +626,7 @@ class Channel(PrintError):
self.lnworker.set_paid(htlc.payment_hash) self.lnworker.set_paid(htlc.payment_hash)
def receive_htlc_settle(self, preimage, htlc_id): def receive_htlc_settle(self, preimage, htlc_id):
self.print_error("receive_htlc_settle") self.logger.info("receive_htlc_settle")
log = self.hm.log[LOCAL] log = self.hm.log[LOCAL]
htlc = log['adds'][htlc_id] htlc = log['adds'][htlc_id]
assert htlc.payment_hash == sha256(preimage) assert htlc.payment_hash == sha256(preimage)
@ -635,11 +637,11 @@ class Channel(PrintError):
self.lnworker.set_paid(htlc.payment_hash) self.lnworker.set_paid(htlc.payment_hash)
def fail_htlc(self, htlc_id): def fail_htlc(self, htlc_id):
self.print_error("fail_htlc") self.logger.info("fail_htlc")
self.hm.send_fail(htlc_id) self.hm.send_fail(htlc_id)
def receive_fail_htlc(self, htlc_id): def receive_fail_htlc(self, htlc_id):
self.print_error("receive_fail_htlc") self.logger.info("receive_fail_htlc")
self.hm.recv_fail(htlc_id) self.hm.recv_fail(htlc_id)
@property @property

92
electrum/lnpeer.py

@ -24,8 +24,9 @@ from . import bitcoin
from . import ecc from . import ecc
from .ecc import sig_string_from_r_and_s, get_r_and_s_from_sig_string, der_sig_from_sig_string from .ecc import sig_string_from_r_and_s, get_r_and_s_from_sig_string, der_sig_from_sig_string
from . import constants from . import constants
from .util import PrintError, bh2u, print_error, bfh, log_exceptions, list_enabled_bits, ignore_exceptions from .util import bh2u, bfh, log_exceptions, list_enabled_bits, ignore_exceptions
from .transaction import Transaction, TxOutput from .transaction import Transaction, TxOutput
from .logging import Logger
from .lnonion import (new_onion_packet, decode_onion_error, OnionFailureCode, calc_hops_data_for_payment, from .lnonion import (new_onion_packet, decode_onion_error, OnionFailureCode, calc_hops_data_for_payment,
process_onion_packet, OnionPacket, construct_onion_error, OnionRoutingFailureMessage, process_onion_packet, OnionPacket, construct_onion_error, OnionRoutingFailureMessage,
ProcessedOnionPacket) ProcessedOnionPacket)
@ -52,7 +53,7 @@ def channel_id_from_funding_tx(funding_txid: str, funding_index: int) -> Tuple[b
i = int.from_bytes(funding_txid_bytes, 'big') ^ funding_index i = int.from_bytes(funding_txid_bytes, 'big') ^ funding_index
return i.to_bytes(32, 'big'), funding_txid_bytes return i.to_bytes(32, 'big'), funding_txid_bytes
class Peer(PrintError): class Peer(Logger):
def __init__(self, lnworker: 'LNWorker', pubkey:bytes, transport: LNTransportBase): def __init__(self, lnworker: 'LNWorker', pubkey:bytes, transport: LNTransportBase):
self.initialized = asyncio.Event() self.initialized = asyncio.Event()
@ -84,10 +85,11 @@ class Peer(PrintError):
self._local_changed_events = defaultdict(asyncio.Event) self._local_changed_events = defaultdict(asyncio.Event)
self._remote_changed_events = defaultdict(asyncio.Event) self._remote_changed_events = defaultdict(asyncio.Event)
self.receiving_channels = False self.receiving_channels = False
Logger.__init__(self)
def send_message(self, message_name: str, **kwargs): def send_message(self, message_name: str, **kwargs):
assert type(message_name) is str assert type(message_name) is str
self.print_error("Sending '%s'"%message_name.upper()) self.logger.info(f"Sending {message_name.upper()}")
self.transport.send_bytes(encode_msg(message_name, **kwargs)) self.transport.send_bytes(encode_msg(message_name, **kwargs))
async def initialize(self): async def initialize(self):
@ -112,7 +114,7 @@ class Peer(PrintError):
try: try:
f = getattr(self, 'on_' + message_type) f = getattr(self, 'on_' + message_type)
except AttributeError: except AttributeError:
#self.print_error("Received '%s'" % message_type.upper(), payload) #self.logger.info("Received '%s'" % message_type.upper(), payload)
return return
# raw message is needed to check signature # raw message is needed to check signature
if message_type in ['node_announcement', 'channel_update']: if message_type in ['node_announcement', 'channel_update']:
@ -123,7 +125,7 @@ class Peer(PrintError):
def on_error(self, payload): def on_error(self, payload):
# todo: self.channel_reestablished is not a queue # todo: self.channel_reestablished is not a queue
self.print_error("error", payload["data"].decode("ascii")) self.logger.info(f"error {payload['data'].decode('ascii')}")
chan_id = payload.get("channel_id") chan_id = payload.get("channel_id")
for d in [ self.channel_accepted, self.funding_signed, for d in [ self.channel_accepted, self.funding_signed,
self.funding_created, self.funding_created,
@ -156,7 +158,7 @@ class Peer(PrintError):
def on_init(self, payload): def on_init(self, payload):
if self.initialized.is_set(): if self.initialized.is_set():
self.print_error("ALREADY INITIALIZED BUT RECEIVED INIT") self.logger.info("ALREADY INITIALIZED BUT RECEIVED INIT")
return return
# if they required some even flag we don't have, they will close themselves # if they required some even flag we don't have, they will close themselves
# but if we require an even flag they don't have, we close # but if we require an even flag they don't have, we close
@ -196,7 +198,7 @@ class Peer(PrintError):
try: try:
return await func(self, *args, **kwargs) return await func(self, *args, **kwargs)
except Exception as e: except Exception as e:
self.print_error("Disconnecting: {}".format(e)) self.logger.info("Disconnecting: {}".format(e))
finally: finally:
self.close_and_cleanup() self.close_and_cleanup()
return wrapper_func return wrapper_func
@ -215,9 +217,9 @@ class Peer(PrintError):
await self.initialized.wait() await self.initialized.wait()
timestamp = self.channel_db.get_last_timestamp() timestamp = self.channel_db.get_last_timestamp()
if timestamp == 0: if timestamp == 0:
self.print_error('requesting whole channel graph') self.logger.info('requesting whole channel graph')
else: else:
self.print_error('requesting channel graph since', datetime.fromtimestamp(timestamp).ctime()) self.logger.info(f'requesting channel graph since {datetime.fromtimestamp(timestamp).ctime()}')
self.send_message( self.send_message(
'gossip_timestamp_filter', 'gossip_timestamp_filter',
chain_hash=constants.net.rev_genesis_bytes(), chain_hash=constants.net.rev_genesis_bytes(),
@ -237,7 +239,7 @@ class Peer(PrintError):
# todo: enable when db is fixed # todo: enable when db is fixed
#need_to_get = sorted(self.channel_db.missing_short_chan_ids()) #need_to_get = sorted(self.channel_db.missing_short_chan_ids())
#if need_to_get and not self.receiving_channels: #if need_to_get and not self.receiving_channels:
# self.print_error('missing', len(need_to_get), 'channels') # self.logger.info(f'missing {len(need_to_get)} channels')
# zlibencoded = zlib.compress(bfh(''.join(need_to_get[0:100]))) # zlibencoded = zlib.compress(bfh(''.join(need_to_get[0:100])))
# self.send_message( # self.send_message(
# 'query_short_channel_ids', # 'query_short_channel_ids',
@ -250,7 +252,7 @@ class Peer(PrintError):
try: try:
await asyncio.wait_for(self.initialize(), 10) await asyncio.wait_for(self.initialize(), 10)
except (OSError, asyncio.TimeoutError, HandshakeFailed) as e: except (OSError, asyncio.TimeoutError, HandshakeFailed) as e:
self.print_error('initialize failed, disconnecting: {}'.format(repr(e))) self.logger.info('initialize failed, disconnecting: {}'.format(repr(e)))
return return
async for msg in self.transport.read_messages(): async for msg in self.transport.read_messages():
self.process_message(msg) self.process_message(msg)
@ -414,7 +416,7 @@ class Peer(PrintError):
funding_output_index=funding_index, funding_output_index=funding_index,
signature=sig_64) signature=sig_64)
payload = await asyncio.wait_for(self.funding_signed[channel_id].get(), 5) payload = await asyncio.wait_for(self.funding_signed[channel_id].get(), 5)
self.print_error('received funding_signed') self.logger.info('received funding_signed')
remote_sig = payload['signature'] remote_sig = payload['signature']
chan.receive_new_commitment(remote_sig, []) chan.receive_new_commitment(remote_sig, [])
# broadcast funding tx # broadcast funding tx
@ -533,12 +535,12 @@ class Peer(PrintError):
chan_id = payload["channel_id"] chan_id = payload["channel_id"]
chan = self.channels.get(chan_id) chan = self.channels.get(chan_id)
if not chan: if not chan:
self.print_error("Received unknown channel_reestablish", bh2u(chan_id), payload) self.logger.info(f"Received unknown channel_reestablish {bh2u(chan_id)} {payload}")
raise Exception('Unknown channel_reestablish') raise Exception('Unknown channel_reestablish')
self.channel_reestablished[chan_id].set_result(payload) self.channel_reestablished[chan_id].set_result(payload)
def try_to_get_remote_to_force_close_with_their_latest(self, chan_id): def try_to_get_remote_to_force_close_with_their_latest(self, chan_id):
self.print_error("trying to get remote to force close", bh2u(chan_id)) self.logger.info(f"trying to get remote to force close {bh2u(chan_id)}")
self.send_message( self.send_message(
"channel_reestablish", "channel_reestablish",
channel_id=chan_id, channel_id=chan_id,
@ -550,7 +552,7 @@ class Peer(PrintError):
await self.initialized.wait() await self.initialized.wait()
chan_id = chan.channel_id chan_id = chan.channel_id
if chan.get_state() != 'DISCONNECTED': if chan.get_state() != 'DISCONNECTED':
self.print_error('reestablish_channel was called but channel {} already in state {}' self.logger.info('reestablish_channel was called but channel {} already in state {}'
.format(chan_id, chan.get_state())) .format(chan_id, chan.get_state()))
return return
chan.set_state('REESTABLISHING') chan.set_state('REESTABLISHING')
@ -558,7 +560,7 @@ class Peer(PrintError):
current_remote_ctn = chan.config[REMOTE].ctn current_remote_ctn = chan.config[REMOTE].ctn
# send message # send message
if self.their_localfeatures & LnLocalFeatures.OPTION_DATA_LOSS_PROTECT_REQ: if self.their_localfeatures & LnLocalFeatures.OPTION_DATA_LOSS_PROTECT_REQ:
self.print_error('peer requires data loss protect') self.logger.info('peer requires data loss protect')
if current_remote_ctn == 0: if current_remote_ctn == 0:
last_rev_secret = 0 last_rev_secret = 0
else: else:
@ -586,7 +588,7 @@ class Peer(PrintError):
their_next_local_ctn = int.from_bytes(channel_reestablish_msg["next_local_commitment_number"], 'big') their_next_local_ctn = int.from_bytes(channel_reestablish_msg["next_local_commitment_number"], 'big')
their_next_remote_ctn = int.from_bytes(channel_reestablish_msg["next_remote_revocation_number"], 'big') their_next_remote_ctn = int.from_bytes(channel_reestablish_msg["next_remote_revocation_number"], 'big')
if their_next_local_ctn != chan.config[REMOTE].ctn + 1: if their_next_local_ctn != chan.config[REMOTE].ctn + 1:
self.print_error("expected remote ctn {}, got {}".format(chan.config[REMOTE].ctn + 1, their_next_local_ctn)) self.logger.info("expected remote ctn {}, got {}".format(chan.config[REMOTE].ctn + 1, their_next_local_ctn))
# TODO iff their ctn is lower than ours, we should force close instead # TODO iff their ctn is lower than ours, we should force close instead
self.try_to_get_remote_to_force_close_with_their_latest(chan_id) self.try_to_get_remote_to_force_close_with_their_latest(chan_id)
return return
@ -607,7 +609,7 @@ class Peer(PrintError):
per_commitment_secret=last_secret, per_commitment_secret=last_secret,
next_per_commitment_point=next_point) next_per_commitment_point=next_point)
else: else:
self.print_error("expected local ctn {}, got {}".format(chan.config[LOCAL].ctn, their_next_remote_ctn)) self.logger.info(f"expected local ctn {chan.config[LOCAL].ctn}, got {their_next_remote_ctn}")
# TODO iff their ctn is lower than ours, we should force close instead # TODO iff their ctn is lower than ours, we should force close instead
self.try_to_get_remote_to_force_close_with_their_latest(chan_id) self.try_to_get_remote_to_force_close_with_their_latest(chan_id)
return return
@ -618,7 +620,7 @@ class Peer(PrintError):
if our_pcp is None: if our_pcp is None:
our_pcp = chan.config[REMOTE].next_per_commitment_point our_pcp = chan.config[REMOTE].next_per_commitment_point
if our_pcp != their_pcp: if our_pcp != their_pcp:
self.print_error("Remote PCP mismatch: {} {}".format(bh2u(our_pcp), bh2u(their_pcp))) self.logger.info(f"Remote PCP mismatch: {bh2u(our_pcp)} {bh2u(their_pcp)}")
# FIXME ...what now? # FIXME ...what now?
self.try_to_get_remote_to_force_close_with_their_latest(chan_id) self.try_to_get_remote_to_force_close_with_their_latest(chan_id)
return return
@ -770,7 +772,7 @@ class Peer(PrintError):
if pending_channel_update: if pending_channel_update:
self.channel_db.on_channel_update(pending_channel_update) self.channel_db.on_channel_update(pending_channel_update)
self.print_error("CHANNEL OPENING COMPLETED") self.logger.info("CHANNEL OPENING COMPLETED")
def send_announcement_signatures(self, chan: Channel): def send_announcement_signatures(self, chan: Channel):
@ -816,7 +818,7 @@ class Peer(PrintError):
except KeyError: except KeyError:
# the remote might try to fail an htlc after we restarted... # the remote might try to fail an htlc after we restarted...
# attempted_route is not persisted, so we will get here then # attempted_route is not persisted, so we will get here then
self.print_error("UPDATE_FAIL_HTLC. cannot decode! attempted route is MISSING. {}".format(key)) self.logger.info("UPDATE_FAIL_HTLC. cannot decode! attempted route is MISSING. {}".format(key))
else: else:
try: try:
self._handle_error_code_from_failed_htlc(payload["reason"], route, channel_id, htlc_id) self._handle_error_code_from_failed_htlc(payload["reason"], route, channel_id, htlc_id)
@ -841,8 +843,8 @@ class Peer(PrintError):
[x.node_id for x in route], [x.node_id for x in route],
chan.onion_keys[htlc_id]) chan.onion_keys[htlc_id])
code, data = failure_msg.code, failure_msg.data code, data = failure_msg.code, failure_msg.data
self.print_error("UPDATE_FAIL_HTLC", repr(code), data) self.logger.info(f"UPDATE_FAIL_HTLC {repr(code)} {data}")
self.print_error(f"error reported by {bh2u(route[sender_idx].node_id)}") self.logger.info(f"error reported by {bh2u(route[sender_idx].node_id)}")
# handle some specific error codes # handle some specific error codes
failure_codes = { failure_codes = {
OnionFailureCode.TEMPORARY_CHANNEL_FAILURE: 2, OnionFailureCode.TEMPORARY_CHANNEL_FAILURE: 2,
@ -858,12 +860,12 @@ class Peer(PrintError):
message_type, payload = decode_msg(channel_update) message_type, payload = decode_msg(channel_update)
payload['raw'] = channel_update payload['raw'] = channel_update
try: try:
self.print_error("trying to apply channel update on our db", payload) self.logger.info(f"trying to apply channel update on our db {payload}")
self.channel_db.on_channel_update(payload) self.channel_db.on_channel_update(payload)
self.print_error("successfully applied channel update on our db") self.logger.info("successfully applied channel update on our db")
except NotFoundChanAnnouncementForUpdate: except NotFoundChanAnnouncementForUpdate:
# maybe it is a private channel (and data in invoice was outdated) # maybe it is a private channel (and data in invoice was outdated)
self.print_error("maybe channel update is for private channel?") self.logger.info("maybe channel update is for private channel?")
start_node_id = route[sender_idx].node_id start_node_id = route[sender_idx].node_id
self.channel_db.add_channel_update_for_private_channel(payload, start_node_id) self.channel_db.add_channel_update_for_private_channel(payload, start_node_id)
else: else:
@ -873,7 +875,7 @@ class Peer(PrintError):
try: try:
short_chan_id = route[sender_idx + 1].short_channel_id short_chan_id = route[sender_idx + 1].short_channel_id
except IndexError: except IndexError:
self.print_error("payment destination reported error") self.logger.info("payment destination reported error")
else: else:
self.network.path_finder.blacklist.add(short_chan_id) self.network.path_finder.blacklist.add(short_chan_id)
@ -885,7 +887,7 @@ class Peer(PrintError):
and chan.pending_feerate(REMOTE) == chan.constraints.feerate) \ and chan.pending_feerate(REMOTE) == chan.constraints.feerate) \
or ctn_to_sign == self.sent_commitment_for_ctn_last[chan]: or ctn_to_sign == self.sent_commitment_for_ctn_last[chan]:
return return
self.print_error(f'send_commitment. old number htlcs: {len(current)}, new number htlcs: {len(pending)}') self.logger.info(f'send_commitment. old number htlcs: {len(current)}, new number htlcs: {len(pending)}')
sig_64, htlc_sigs = chan.sign_next_commitment() sig_64, htlc_sigs = chan.sign_next_commitment()
self.send_message("commitment_signed", channel_id=chan.channel_id, signature=sig_64, num_htlcs=len(htlc_sigs), htlc_signature=b"".join(htlc_sigs)) self.send_message("commitment_signed", channel_id=chan.channel_id, signature=sig_64, num_htlcs=len(htlc_sigs), htlc_signature=b"".join(htlc_sigs))
self.sent_commitment_for_ctn_last[chan] = ctn_to_sign self.sent_commitment_for_ctn_last[chan] = ctn_to_sign
@ -916,7 +918,7 @@ class Peer(PrintError):
remote_ctn = chan.get_current_ctn(REMOTE) remote_ctn = chan.get_current_ctn(REMOTE)
chan.onion_keys[htlc.htlc_id] = secret_key chan.onion_keys[htlc.htlc_id] = secret_key
self.attempted_route[(chan.channel_id, htlc.htlc_id)] = route self.attempted_route[(chan.channel_id, htlc.htlc_id)] = route
self.print_error(f"starting payment. route: {route}") self.logger.info(f"starting payment. route: {route}")
self.send_message("update_add_htlc", self.send_message("update_add_htlc",
channel_id=chan.channel_id, channel_id=chan.channel_id,
id=htlc.htlc_id, id=htlc.htlc_id,
@ -939,7 +941,7 @@ class Peer(PrintError):
self.maybe_send_commitment(chan) self.maybe_send_commitment(chan)
def on_commitment_signed(self, payload): def on_commitment_signed(self, payload):
self.print_error("on_commitment_signed") self.logger.info("on_commitment_signed")
channel_id = payload['channel_id'] channel_id = payload['channel_id']
chan = self.channels[channel_id] chan = self.channels[channel_id]
ctn_to_recv = chan.get_current_ctn(LOCAL) + 1 ctn_to_recv = chan.get_current_ctn(LOCAL) + 1
@ -956,7 +958,7 @@ class Peer(PrintError):
self.send_revoke_and_ack(chan) self.send_revoke_and_ack(chan)
def on_update_fulfill_htlc(self, update_fulfill_htlc_msg): def on_update_fulfill_htlc(self, update_fulfill_htlc_msg):
self.print_error("update_fulfill") self.logger.info("update_fulfill")
chan = self.channels[update_fulfill_htlc_msg["channel_id"]] chan = self.channels[update_fulfill_htlc_msg["channel_id"]]
preimage = update_fulfill_htlc_msg["payment_preimage"] preimage = update_fulfill_htlc_msg["payment_preimage"]
htlc_id = int.from_bytes(update_fulfill_htlc_msg["id"], "big") htlc_id = int.from_bytes(update_fulfill_htlc_msg["id"], "big")
@ -971,11 +973,11 @@ class Peer(PrintError):
self.payment_preimages[sha256(preimage)].put_nowait(preimage) self.payment_preimages[sha256(preimage)].put_nowait(preimage)
def on_update_fail_malformed_htlc(self, payload): def on_update_fail_malformed_htlc(self, payload):
self.print_error("error", payload["data"].decode("ascii")) self.logger.info(f"error {payload['data'].decode('ascii')}")
def on_update_add_htlc(self, payload): def on_update_add_htlc(self, payload):
# no onion routing for the moment: we assume we are the end node # no onion routing for the moment: we assume we are the end node
self.print_error('on_update_add_htlc') self.logger.info('on_update_add_htlc')
# check if this in our list of requests # check if this in our list of requests
payment_hash = payload["payment_hash"] payment_hash = payload["payment_hash"]
channel_id = payload['channel_id'] channel_id = payload['channel_id']
@ -1020,11 +1022,11 @@ class Peer(PrintError):
next_chan = self.lnworker.get_channel_by_short_id(dph.short_channel_id) next_chan = self.lnworker.get_channel_by_short_id(dph.short_channel_id)
next_peer = self.lnworker.peers[next_chan.node_id] next_peer = self.lnworker.peers[next_chan.node_id]
if next_chan is None or next_chan.get_state() != 'OPEN': if next_chan is None or next_chan.get_state() != 'OPEN':
self.print_error("cannot forward htlc", next_chan.get_state() if next_chan else None) self.logger.info(f"cannot forward htlc {next_chan.get_state() if next_chan else None}")
reason = OnionRoutingFailureMessage(code=OnionFailureCode.PERMANENT_CHANNEL_FAILURE, data=b'') reason = OnionRoutingFailureMessage(code=OnionFailureCode.PERMANENT_CHANNEL_FAILURE, data=b'')
await self.fail_htlc(chan, htlc.htlc_id, onion_packet, reason) await self.fail_htlc(chan, htlc.htlc_id, onion_packet, reason)
return return
self.print_error('forwarding htlc to', next_chan.node_id) self.logger.info(f'forwarding htlc to {next_chan.node_id}')
next_cltv_expiry = int.from_bytes(dph.outgoing_cltv_value, 'big') next_cltv_expiry = int.from_bytes(dph.outgoing_cltv_value, 'big')
next_amount_msat_htlc = int.from_bytes(dph.amt_to_forward, 'big') next_amount_msat_htlc = int.from_bytes(dph.amt_to_forward, 'big')
next_htlc = UpdateAddHtlc(amount_msat=next_amount_msat_htlc, payment_hash=htlc.payment_hash, cltv_expiry=next_cltv_expiry, timestamp=int(time.time())) next_htlc = UpdateAddHtlc(amount_msat=next_amount_msat_htlc, payment_hash=htlc.payment_hash, cltv_expiry=next_cltv_expiry, timestamp=int(time.time()))
@ -1044,7 +1046,7 @@ class Peer(PrintError):
preimage = await next_peer.payment_preimages[next_htlc.payment_hash].get() preimage = await next_peer.payment_preimages[next_htlc.payment_hash].get()
# fulfill the original htlc # fulfill the original htlc
await self._fulfill_htlc(chan, htlc.htlc_id, preimage) await self._fulfill_htlc(chan, htlc.htlc_id, preimage)
self.print_error("htlc forwarded successfully") self.logger.info("htlc forwarded successfully")
@log_exceptions @log_exceptions
async def _maybe_fulfill_htlc(self, chan: Channel, htlc: UpdateAddHtlc, *, local_ctn: int, remote_ctn: int, async def _maybe_fulfill_htlc(self, chan: Channel, htlc: UpdateAddHtlc, *, local_ctn: int, remote_ctn: int,
@ -1098,7 +1100,7 @@ class Peer(PrintError):
async def fail_htlc(self, chan: Channel, htlc_id: int, onion_packet: OnionPacket, async def fail_htlc(self, chan: Channel, htlc_id: int, onion_packet: OnionPacket,
reason: OnionRoutingFailureMessage): reason: OnionRoutingFailureMessage):
self.print_error(f"failing received htlc {(bh2u(chan.channel_id), htlc_id)}. reason: {reason}") self.logger.info(f"failing received htlc {(bh2u(chan.channel_id), htlc_id)}. reason: {reason}")
chan.fail_htlc(htlc_id) chan.fail_htlc(htlc_id)
remote_ctn = chan.get_current_ctn(REMOTE) remote_ctn = chan.get_current_ctn(REMOTE)
error_packet = construct_onion_error(reason, onion_packet, our_onion_private_key=self.privkey) error_packet = construct_onion_error(reason, onion_packet, our_onion_private_key=self.privkey)
@ -1110,7 +1112,7 @@ class Peer(PrintError):
await self.await_remote(chan, remote_ctn) await self.await_remote(chan, remote_ctn)
def on_revoke_and_ack(self, payload): def on_revoke_and_ack(self, payload):
self.print_error("on_revoke_and_ack") self.logger.info("on_revoke_and_ack")
channel_id = payload["channel_id"] channel_id = payload["channel_id"]
chan = self.channels[channel_id] chan = self.channels[channel_id]
chan.receive_revocation(RevokeAndAck(payload["per_commitment_secret"], payload["next_per_commitment_point"])) chan.receive_revocation(RevokeAndAck(payload["per_commitment_secret"], payload["next_per_commitment_point"]))
@ -1133,12 +1135,12 @@ class Peer(PrintError):
return return
feerate_per_kw = self.lnworker.current_feerate_per_kw() feerate_per_kw = self.lnworker.current_feerate_per_kw()
chan_fee = chan.pending_feerate(REMOTE) chan_fee = chan.pending_feerate(REMOTE)
self.print_error("current pending feerate", chan_fee) self.logger.info(f"current pending feerate {chan_fee}")
self.print_error("new feerate", feerate_per_kw) self.logger.info(f"new feerate {feerate_per_kw}")
if feerate_per_kw < chan_fee / 2: if feerate_per_kw < chan_fee / 2:
self.print_error("FEES HAVE FALLEN") self.logger.info("FEES HAVE FALLEN")
elif feerate_per_kw > chan_fee * 2: elif feerate_per_kw > chan_fee * 2:
self.print_error("FEES HAVE RISEN") self.logger.info("FEES HAVE RISEN")
else: else:
return return
chan.update_fee(feerate_per_kw, True) chan.update_fee(feerate_per_kw, True)
@ -1160,7 +1162,7 @@ class Peer(PrintError):
self.send_shutdown(chan) self.send_shutdown(chan)
payload = await self.shutdown_received[chan_id] payload = await self.shutdown_received[chan_id]
txid = await self._shutdown(chan, payload, True) txid = await self._shutdown(chan, payload, True)
self.print_error('Channel closed', txid) self.logger.info(f'Channel closed {txid}')
return txid return txid
@log_exceptions @log_exceptions
@ -1175,7 +1177,7 @@ class Peer(PrintError):
chan = self.channels[chan_id] chan = self.channels[chan_id]
self.send_shutdown(chan) self.send_shutdown(chan)
txid = await self._shutdown(chan, payload, False) txid = await self._shutdown(chan, payload, False)
self.print_error('Channel closed by remote peer', txid) self.logger.info(f'Channel closed by remote peer {txid}')
def send_shutdown(self, chan: Channel): def send_shutdown(self, chan: Channel):
scriptpubkey = bfh(bitcoin.address_to_script(chan.sweep_address)) scriptpubkey = bfh(bitcoin.address_to_script(chan.sweep_address))
@ -1186,7 +1188,7 @@ class Peer(PrintError):
# set state so that we stop accepting HTLCs # set state so that we stop accepting HTLCs
chan.set_state('CLOSING') chan.set_state('CLOSING')
while len(chan.hm.htlcs_by_direction(LOCAL, RECEIVED)) > 0: while len(chan.hm.htlcs_by_direction(LOCAL, RECEIVED)) > 0:
self.print_error('waiting for htlcs to settle...') self.logger.info('waiting for htlcs to settle...')
await asyncio.sleep(1) await asyncio.sleep(1)
our_fee = chan.pending_local_fee() our_fee = chan.pending_local_fee()
scriptpubkey = bfh(bitcoin.address_to_script(chan.sweep_address)) scriptpubkey = bfh(bitcoin.address_to_script(chan.sweep_address))

37
electrum/lnrouter.py

@ -43,7 +43,8 @@ from sqlalchemy.sql import not_, or_
from .sql_db import SqlDB, sql from .sql_db import SqlDB, sql
from . import constants from . import constants
from .util import PrintError, bh2u, profiler, get_headers_dir, bfh, is_ip_address, list_enabled_bits from .util import bh2u, profiler, get_headers_dir, bfh, is_ip_address, list_enabled_bits, print_msg
from .logging import Logger
from .storage import JsonDB from .storage import JsonDB
from .lnverifier import LNChannelVerifier, verify_sig_for_channel_update from .lnverifier import LNChannelVerifier, verify_sig_for_channel_update
from .crypto import sha256d from .crypto import sha256d
@ -231,7 +232,7 @@ class ChannelDB(SqlDB):
self.num_channels = self.DBSession.query(ChannelInfo).count() self.num_channels = self.DBSession.query(ChannelInfo).count()
self.num_policies = self.DBSession.query(Policy).count() self.num_policies = self.DBSession.query(Policy).count()
self.num_nodes = self.DBSession.query(NodeInfo).count() self.num_nodes = self.DBSession.query(NodeInfo).count()
self.print_error('update counts', self.num_channels, self.num_policies) self.logger.info(f'update counts {self.num_channels} {self.num_policies}')
@sql @sql
def add_recent_peer(self, peer: LNPeerAddr): def add_recent_peer(self, peer: LNPeerAddr):
@ -301,12 +302,12 @@ class ChannelDB(SqlDB):
if self.DBSession.query(ChannelInfo).filter_by(short_channel_id=short_channel_id).count(): if self.DBSession.query(ChannelInfo).filter_by(short_channel_id=short_channel_id).count():
continue continue
if constants.net.rev_genesis_bytes() != msg['chain_hash']: if constants.net.rev_genesis_bytes() != msg['chain_hash']:
self.print_error("ChanAnn has unexpected chain_hash {}".format(bh2u(msg_payload['chain_hash']))) self.logger.info("ChanAnn has unexpected chain_hash {}".format(bh2u(msg['chain_hash'])))
continue continue
try: try:
channel_info = ChannelInfo.from_msg(msg) channel_info = ChannelInfo.from_msg(msg)
except UnknownEvenFeatureBits: except UnknownEvenFeatureBits:
self.print_error("unknown feature bits") self.logger.info("unknown feature bits")
continue continue
channel_info.trusted = trusted channel_info.trusted = trusted
new_channels[short_channel_id] = channel_info new_channels[short_channel_id] = channel_info
@ -315,7 +316,7 @@ class ChannelDB(SqlDB):
for channel_info in new_channels.values(): for channel_info in new_channels.values():
self.DBSession.add(channel_info) self.DBSession.add(channel_info)
self.DBSession.commit() self.DBSession.commit()
#self.print_error('on_channel_announcement: %d/%d'%(len(new_channels), len(msg_payloads))) #self.logger.info('on_channel_announcement: %d/%d'%(len(new_channels), len(msg_payloads)))
self._update_counts() self._update_counts()
self.network.trigger_callback('ln_status') self.network.trigger_callback('ln_status')
@ -351,7 +352,7 @@ class ChannelDB(SqlDB):
continue continue
short_channel_id = channel_info.short_channel_id short_channel_id = channel_info.short_channel_id
new_policy = Policy.from_msg(msg_payload, node_id, short_channel_id) new_policy = Policy.from_msg(msg_payload, node_id, short_channel_id)
#self.print_error('on_channel_update', datetime.fromtimestamp(new_policy.timestamp).ctime()) #self.logger.info(f'on_channel_update {datetime.fromtimestamp(new_policy.timestamp).ctime()}')
old_policy = self.DBSession.query(Policy).filter_by(short_channel_id=short_channel_id, start_node=node_id).one_or_none() old_policy = self.DBSession.query(Policy).filter_by(short_channel_id=short_channel_id, start_node=node_id).one_or_none()
if old_policy: if old_policy:
if old_policy.timestamp >= new_policy.timestamp: if old_policy.timestamp >= new_policy.timestamp:
@ -368,8 +369,8 @@ class ChannelDB(SqlDB):
self.DBSession.add(new_policy) self.DBSession.add(new_policy)
self.DBSession.commit() self.DBSession.commit()
if new_policies: if new_policies:
self.print_error('on_channel_update: %d/%d'%(len(new_policies), len(msg_payloads))) self.logger.info(f'on_channel_update: {len(new_policies)}/{len(msg_payloads)}')
self.print_error('last timestamp:', datetime.fromtimestamp(self._get_last_timestamp()).ctime()) self.logger.info(f'last timestamp: {datetime.fromtimestamp(self._get_last_timestamp()).ctime()}')
self._update_counts() self._update_counts()
@sql @sql
@ -390,12 +391,12 @@ class ChannelDB(SqlDB):
node_info, node_addresses = NodeInfo.from_msg(msg_payload) node_info, node_addresses = NodeInfo.from_msg(msg_payload)
except UnknownEvenFeatureBits: except UnknownEvenFeatureBits:
continue continue
#self.print_error('received node announcement from', datetime.fromtimestamp(node_info.timestamp).ctime()) #self.logger.info(f'received node announcement from {datetime.fromtimestamp(node_info.timestamp).ctime()}')
node_id = node_info.node_id node_id = node_info.node_id
# Ignore node if it has no associated channel (DoS protection) # Ignore node if it has no associated channel (DoS protection)
expr = or_(ChannelInfo.node1_id==node_id, ChannelInfo.node2_id==node_id) expr = or_(ChannelInfo.node1_id==node_id, ChannelInfo.node2_id==node_id)
if self.DBSession.query(ChannelInfo.short_channel_id).filter(expr).count() == 0: if self.DBSession.query(ChannelInfo.short_channel_id).filter(expr).count() == 0:
#self.print_error('ignoring orphan node_announcement') #self.logger.info('ignoring orphan node_announcement')
continue continue
node = self.DBSession.query(NodeInfo).filter_by(node_id=node_id).one_or_none() node = self.DBSession.query(NodeInfo).filter_by(node_id=node_id).one_or_none()
if node and node.timestamp >= node_info.timestamp: if node and node.timestamp >= node_info.timestamp:
@ -406,7 +407,7 @@ class ChannelDB(SqlDB):
new_nodes[node_id] = node_info new_nodes[node_id] = node_info
for addr in node_addresses: for addr in node_addresses:
new_addresses[(addr.node_id,addr.host,addr.port)] = addr new_addresses[(addr.node_id,addr.host,addr.port)] = addr
#self.print_error("on_node_announcement: %d/%d"%(len(new_nodes), len(msg_payloads))) #self.logger.info("on_node_announcement: %d/%d"%(len(new_nodes), len(msg_payloads)))
for node_info in new_nodes.values(): for node_info in new_nodes.values():
self.DBSession.add(node_info) self.DBSession.add(node_info)
for new_addr in new_addresses.values(): for new_addr in new_addresses.values():
@ -453,12 +454,13 @@ class ChannelDB(SqlDB):
other = channel_info.node1_id other = channel_info.node1_id
return other if full_ids else other[-4:] return other if full_ids else other[-4:]
self.print_msg('nodes') print_msg('nodes')
for node in self.DBSession.query(NodeInfo).all(): for node in self.DBSession.query(NodeInfo).all():
self.print_msg(node) print_msg(node)
self.print_msg('channels') print_msg('channels')
for channel_info in self.DBSession.query(ChannelInfo).all(): for channel_info in self.DBSession.query(ChannelInfo).all():
short_channel_id = channel_info.short_channel_id
node1 = channel_info.node1_id node1 = channel_info.node1_id
node2 = channel_info.node2_id node2 = channel_info.node2_id
direction1 = self.get_policy_for_node(channel_info, node1) is not None direction1 = self.get_policy_for_node(channel_info, node1) is not None
@ -471,7 +473,7 @@ class ChannelDB(SqlDB):
direction = 'backward' direction = 'backward'
else: else:
direction = 'none' direction = 'none'
self.print_msg('{}: {}, {}, {}' print_msg('{}: {}, {}, {}'
.format(bh2u(short_channel_id), .format(bh2u(short_channel_id),
bh2u(node1) if full_ids else bh2u(node1[-4:]), bh2u(node1) if full_ids else bh2u(node1[-4:]),
bh2u(node2) if full_ids else bh2u(node2[-4:]), bh2u(node2) if full_ids else bh2u(node2[-4:]),
@ -493,7 +495,7 @@ class ChannelDB(SqlDB):
for channel_info in self._channels.values(): for channel_info in self._channels.values():
self._channels_for_node[bfh(channel_info.node1_id)].add(bfh(channel_info.short_channel_id)) self._channels_for_node[bfh(channel_info.node1_id)].add(bfh(channel_info.short_channel_id))
self._channels_for_node[bfh(channel_info.node2_id)].add(bfh(channel_info.short_channel_id)) self._channels_for_node[bfh(channel_info.node2_id)].add(bfh(channel_info.short_channel_id))
self.print_error('load data', len(self._channels), len(self._policies), len(self._channels_for_node)) self.logger.info(f'load data {len(self._channels)} {len(self._policies)} {len(self._channels_for_node)}')
def get_policy_for_node(self, short_channel_id: bytes, node_id: bytes) -> Optional['Policy']: def get_policy_for_node(self, short_channel_id: bytes, node_id: bytes) -> Optional['Policy']:
return self._policies.get((node_id, short_channel_id)) return self._policies.get((node_id, short_channel_id))
@ -572,9 +574,10 @@ def is_route_sane_to_use(route: List[RouteEdge], invoice_amount_msat: int, min_f
return True return True
class LNPathFinder(PrintError): class LNPathFinder(Logger):
def __init__(self, channel_db: ChannelDB): def __init__(self, channel_db: ChannelDB):
Logger.__init__(self)
self.channel_db = channel_db self.channel_db = channel_db
self.blacklist = set() self.blacklist = set()

10
electrum/lnsweep.py

@ -4,7 +4,7 @@
from typing import Optional, Dict, List, Tuple, TYPE_CHECKING from typing import Optional, Dict, List, Tuple, TYPE_CHECKING
from .util import bfh, bh2u, print_error from .util import bfh, bh2u
from .bitcoin import TYPE_ADDRESS, redeem_script_to_address, dust_threshold from .bitcoin import TYPE_ADDRESS, redeem_script_to_address, dust_threshold
from . import ecc from . import ecc
from .lnutil import (make_commitment_output_to_remote_address, make_commitment_output_to_local_witness_script, from .lnutil import (make_commitment_output_to_remote_address, make_commitment_output_to_local_witness_script,
@ -15,11 +15,15 @@ from .lnutil import (make_commitment_output_to_remote_address, make_commitment_o
RevocationStore, extract_ctn_from_tx_and_chan, UnableToDeriveSecret, SENT, RECEIVED) RevocationStore, extract_ctn_from_tx_and_chan, UnableToDeriveSecret, SENT, RECEIVED)
from .transaction import Transaction, TxOutput, construct_witness from .transaction import Transaction, TxOutput, construct_witness
from .simple_config import SimpleConfig, FEERATE_FALLBACK_STATIC_FEE from .simple_config import SimpleConfig, FEERATE_FALLBACK_STATIC_FEE
from .logging import get_logger
if TYPE_CHECKING: if TYPE_CHECKING:
from .lnchannel import Channel from .lnchannel import Channel
_logger = get_logger(__name__)
def maybe_create_sweeptx_for_their_ctx_to_remote(ctx: Transaction, sweep_address: str, def maybe_create_sweeptx_for_their_ctx_to_remote(ctx: Transaction, sweep_address: str,
our_payment_privkey: ecc.ECPrivkey) -> Optional[Transaction]: our_payment_privkey: ecc.ECPrivkey) -> Optional[Transaction]:
our_payment_pubkey = our_payment_privkey.get_public_key_bytes(compressed=True) our_payment_pubkey = our_payment_privkey.get_public_key_bytes(compressed=True)
@ -159,7 +163,7 @@ def create_sweeptxs_for_our_latest_ctx(chan: 'Channel', ctx: Transaction,
try: try:
preimage = chan.lnworker.get_preimage(htlc.payment_hash) preimage = chan.lnworker.get_preimage(htlc.payment_hash)
except UnknownPaymentHash as e: except UnknownPaymentHash as e:
print_error(f'trying to sweep htlc from our latest ctx but getting {repr(e)}') _logger.info(f'trying to sweep htlc from our latest ctx but getting {repr(e)}')
return None, None return None, None
else: else:
preimage = None preimage = None
@ -262,7 +266,7 @@ def create_sweeptxs_for_their_latest_ctx(chan: 'Channel', ctx: Transaction,
try: try:
preimage = chan.lnworker.get_preimage(htlc.payment_hash) preimage = chan.lnworker.get_preimage(htlc.payment_hash)
except UnknownPaymentHash as e: except UnknownPaymentHash as e:
print_error(f'trying to sweep htlc from their latest ctx but getting {repr(e)}') _logger.info(f'trying to sweep htlc from their latest ctx but getting {repr(e)}')
return None return None
else: else:
preimage = None preimage = None

8
electrum/lnverifier.py

@ -106,7 +106,7 @@ class LNChannelVerifier(NetworkJobOnDefaultServer):
continue continue
self.started_verifying_channel.add(short_channel_id) self.started_verifying_channel.add(short_channel_id)
await self.group.spawn(self.verify_channel(block_height, tx_pos, short_channel_id)) await self.group.spawn(self.verify_channel(block_height, tx_pos, short_channel_id))
#self.print_error('requested short_channel_id', bh2u(short_channel_id)) #self.logger.info(f'requested short_channel_id {bh2u(short_channel_id)}')
async def verify_channel(self, block_height: int, tx_pos: int, short_channel_id: bytes): async def verify_channel(self, block_height: int, tx_pos: int, short_channel_id: bytes):
# we are verifying channel announcements as they are from untrusted ln peers. # we are verifying channel announcements as they are from untrusted ln peers.
@ -140,12 +140,12 @@ class LNChannelVerifier(NetworkJobOnDefaultServer):
except Exception: except Exception:
# either bug in client, or electrum server is evil. # either bug in client, or electrum server is evil.
# if we connect to a diff server at some point, let's try again. # if we connect to a diff server at some point, let's try again.
self.print_msg("cannot deserialize transaction, skipping", tx_hash) self.logger.warning(f"cannot deserialize transaction, skipping {tx_hash}")
return return
if tx_hash != tx.txid(): if tx_hash != tx.txid():
# either bug in client, or electrum server is evil. # either bug in client, or electrum server is evil.
# if we connect to a diff server at some point, let's try again. # if we connect to a diff server at some point, let's try again.
self.print_error(f"received tx does not match expected txid ({tx_hash} != {tx.txid()})") self.logger.info(f"received tx does not match expected txid ({tx_hash} != {tx.txid()})")
return return
# check funding output # check funding output
msg_payload = self.unverified_channel_info[short_channel_id] msg_payload = self.unverified_channel_info[short_channel_id]
@ -161,7 +161,7 @@ class LNChannelVerifier(NetworkJobOnDefaultServer):
return return
if expected_address != actual_output.address: if expected_address != actual_output.address:
# FIXME what now? best would be to ban the originating ln peer. # FIXME what now? best would be to ban the originating ln peer.
self.print_error(f"funding output script mismatch for {bh2u(short_channel_id)}") self.logger.info(f"funding output script mismatch for {bh2u(short_channel_id)}")
self._remove_channel_from_unverified_db(short_channel_id) self._remove_channel_from_unverified_db(short_channel_id)
return return
# put channel into channel DB # put channel into channel DB

24
electrum/lnwatcher.py

@ -19,7 +19,7 @@ from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql import not_, or_ from sqlalchemy.sql import not_, or_
from .sql_db import SqlDB, sql from .sql_db import SqlDB, sql
from .util import PrintError, bh2u, bfh, log_exceptions, ignore_exceptions from .util import bh2u, bfh, log_exceptions, ignore_exceptions
from . import wallet from . import wallet
from .storage import WalletStorage from .storage import WalletStorage
from .address_synchronizer import AddressSynchronizer, TX_HEIGHT_LOCAL, TX_HEIGHT_UNCONF_PARENT, TX_HEIGHT_UNCONFIRMED from .address_synchronizer import AddressSynchronizer, TX_HEIGHT_LOCAL, TX_HEIGHT_UNCONF_PARENT, TX_HEIGHT_UNCONFIRMED
@ -152,7 +152,7 @@ class LNWatcher(AddressSynchronizer):
@ignore_exceptions @ignore_exceptions
@log_exceptions @log_exceptions
async def watchtower_task(self): async def watchtower_task(self):
self.print_error('watchtower task started') self.logger.info('watchtower task started')
# initial check # initial check
for address, outpoint in self.sweepstore.list_channel_info(): for address, outpoint in self.sweepstore.list_channel_info():
await self.watchtower_queue.put(outpoint) await self.watchtower_queue.put(outpoint)
@ -167,12 +167,12 @@ class LNWatcher(AddressSynchronizer):
if n == 0: if n == 0:
address = self.sweepstore.get_address(outpoint) address = self.sweepstore.get_address(outpoint)
self.watchtower.add_channel(outpoint, address) self.watchtower.add_channel(outpoint, address)
self.print_error("sending %d transactions to watchtower"%(local_n - n)) self.logger.info("sending %d transactions to watchtower"%(local_n - n))
for index in range(n, local_n): for index in range(n, local_n):
prev_txid, tx = self.sweepstore.get_tx_by_index(outpoint, index) prev_txid, tx = self.sweepstore.get_tx_by_index(outpoint, index)
self.watchtower.add_sweep_tx(outpoint, prev_txid, tx) self.watchtower.add_sweep_tx(outpoint, prev_txid, tx)
except ConnectionRefusedError: except ConnectionRefusedError:
self.print_error('could not reach watchtower, will retry in 5s') self.logger.info('could not reach watchtower, will retry in 5s')
await asyncio.sleep(5) await asyncio.sleep(5)
await self.watchtower_queue.put(outpoint) await self.watchtower_queue.put(outpoint)
@ -183,7 +183,7 @@ class LNWatcher(AddressSynchronizer):
self.sweepstore.add_channel(outpoint, address) self.sweepstore.add_channel(outpoint, address)
def unwatch_channel(self, address, funding_outpoint): def unwatch_channel(self, address, funding_outpoint):
self.print_error('unwatching', funding_outpoint) self.logger.info(f'unwatching {funding_outpoint}')
self.sweepstore.remove_sweep_tx(funding_outpoint) self.sweepstore.remove_sweep_tx(funding_outpoint)
self.sweepstore.remove_channel(funding_outpoint) self.sweepstore.remove_channel(funding_outpoint)
if funding_outpoint in self.tx_progress: if funding_outpoint in self.tx_progress:
@ -195,7 +195,7 @@ class LNWatcher(AddressSynchronizer):
if args[0] != self: if args[0] != self:
return return
if not self.synchronizer: if not self.synchronizer:
self.print_error("synchronizer not set yet") self.logger.info("synchronizer not set yet")
return return
if not self.synchronizer.is_up_to_date(): if not self.synchronizer.is_up_to_date():
return return
@ -216,7 +216,7 @@ class LNWatcher(AddressSynchronizer):
if not keep_watching: if not keep_watching:
self.unwatch_channel(address, funding_outpoint) self.unwatch_channel(address, funding_outpoint)
else: else:
#self.print_error('we will keep_watching', funding_outpoint) #self.logger.info(f'we will keep_watching {funding_outpoint}')
pass pass
def inspect_tx_candidate(self, outpoint, n): def inspect_tx_candidate(self, outpoint, n):
@ -227,12 +227,12 @@ class LNWatcher(AddressSynchronizer):
result = {outpoint:txid} result = {outpoint:txid}
if txid is None: if txid is None:
self.channel_status[outpoint] = 'open' self.channel_status[outpoint] = 'open'
#self.print_error('keep watching because outpoint is unspent') #self.logger.info('keep watching because outpoint is unspent')
return True, result return True, result
keep_watching = (self.get_tx_mined_depth(txid) != TxMinedDepth.DEEP) keep_watching = (self.get_tx_mined_depth(txid) != TxMinedDepth.DEEP)
if keep_watching: if keep_watching:
self.channel_status[outpoint] = 'closed (%d)' % self.get_tx_height(txid).conf self.channel_status[outpoint] = 'closed (%d)' % self.get_tx_height(txid).conf
#self.print_error('keep watching because spending tx is not deep') #self.logger.info('keep watching because spending tx is not deep')
else: else:
self.channel_status[outpoint] = 'closed (deep)' self.channel_status[outpoint] = 'closed (deep)'
@ -255,7 +255,7 @@ class LNWatcher(AddressSynchronizer):
sweep_txns = self.sweepstore.get_sweep_tx(funding_outpoint, prev_txid) sweep_txns = self.sweepstore.get_sweep_tx(funding_outpoint, prev_txid)
for tx in sweep_txns: for tx in sweep_txns:
if not await self.broadcast_or_log(funding_outpoint, tx): if not await self.broadcast_or_log(funding_outpoint, tx):
self.print_error(tx.name, f'could not publish tx: {str(tx)}, prev_txid: {prev_txid}') self.logger.info(f'{tx.name} could not publish tx: {str(tx)}, prev_txid: {prev_txid}')
async def broadcast_or_log(self, funding_outpoint, tx): async def broadcast_or_log(self, funding_outpoint, tx):
height = self.get_tx_height(tx.txid()).height height = self.get_tx_height(tx.txid()).height
@ -264,9 +264,9 @@ class LNWatcher(AddressSynchronizer):
try: try:
txid = await self.network.broadcast_transaction(tx) txid = await self.network.broadcast_transaction(tx)
except Exception as e: except Exception as e:
self.print_error(f'broadcast: {tx.name}: failure: {repr(e)}') self.logger.info(f'broadcast: {tx.name}: failure: {repr(e)}')
else: else:
self.print_error(f'broadcast: {tx.name}: success. txid: {txid}') self.logger.info(f'broadcast: {tx.name}: success. txid: {txid}')
if funding_outpoint in self.tx_progress: if funding_outpoint in self.tx_progress:
await self.tx_progress[funding_outpoint].tx_queue.put(tx) await self.tx_progress[funding_outpoint].tx_queue.put(tx)
return txid return txid

60
electrum/lnworker.py

@ -26,8 +26,9 @@ from .bitcoin import COIN
from .transaction import Transaction from .transaction import Transaction
from .crypto import sha256 from .crypto import sha256
from .bip32 import BIP32Node from .bip32 import BIP32Node
from .util import bh2u, bfh, PrintError, InvoiceError, resolve_dns_srv, is_ip_address, log_exceptions from .util import bh2u, bfh, InvoiceError, resolve_dns_srv, is_ip_address, log_exceptions
from .util import timestamp_to_datetime from .util import timestamp_to_datetime
from .logging import Logger
from .lntransport import LNTransport, LNResponderTransport from .lntransport import LNTransport, LNResponderTransport
from .lnpeer import Peer from .lnpeer import Peer
from .lnaddr import lnencode, LnAddr, lndecode from .lnaddr import lnencode, LnAddr, lndecode
@ -72,9 +73,10 @@ FALLBACK_NODE_LIST_MAINNET = [
encoder = ChannelJsonEncoder() encoder = ChannelJsonEncoder()
class LNWorker(PrintError): class LNWorker(Logger):
def __init__(self, xprv): def __init__(self, xprv):
Logger.__init__(self)
self.node_keypair = generate_keypair(keystore.from_xprv(xprv), LnKeyFamily.NODE_KEY, 0) self.node_keypair = generate_keypair(keystore.from_xprv(xprv), LnKeyFamily.NODE_KEY, 0)
self.peers = {} # type: Dict[bytes, Peer] # pubkey -> Peer self.peers = {} # type: Dict[bytes, Peer] # pubkey -> Peer
self.localfeatures = LnLocalFeatures(0) self.localfeatures = LnLocalFeatures(0)
@ -91,7 +93,7 @@ class LNWorker(PrintError):
try: try:
node_id = await transport.handshake() node_id = await transport.handshake()
except: except:
self.print_error('handshake failure from incoming connection') self.logger.info('handshake failure from incoming connection')
return return
peer = Peer(self, node_id, transport) peer = Peer(self, node_id, transport)
self.peers[node_id] = peer self.peers[node_id] = peer
@ -118,7 +120,7 @@ class LNWorker(PrintError):
peer_addr = LNPeerAddr(host, port, node_id) peer_addr = LNPeerAddr(host, port, node_id)
transport = LNTransport(self.node_keypair.privkey, peer_addr) transport = LNTransport(self.node_keypair.privkey, peer_addr)
self._last_tried_peer[peer_addr] = time.time() self._last_tried_peer[peer_addr] = time.time()
self.print_error("adding peer", peer_addr) self.logger.info(f"adding peer {peer_addr}")
peer = Peer(self, node_id, transport) peer = Peer(self, node_id, transport)
await self.network.main_taskgroup.spawn(peer.main_loop()) await self.network.main_taskgroup.spawn(peer.main_loop())
self.peers[node_id] = peer self.peers[node_id] = peer
@ -165,7 +167,7 @@ class LNWorker(PrintError):
host, port = self.choose_preferred_address(addrs) host, port = self.choose_preferred_address(addrs)
peer = LNPeerAddr(host, port, bytes.fromhex(node.node_id)) peer = LNPeerAddr(host, port, bytes.fromhex(node.node_id))
if peer in self._last_tried_peer: continue if peer in self._last_tried_peer: continue
self.print_error('taking random ln peer from our channel db') self.logger.info('taking random ln peer from our channel db')
return [peer] return [peer]
# TODO remove this. For some reason the dns seeds seem to ignore the realm byte # TODO remove this. For some reason the dns seeds seem to ignore the realm byte
@ -182,7 +184,7 @@ class LNWorker(PrintError):
if not constants.net.LN_DNS_SEEDS: if not constants.net.LN_DNS_SEEDS:
return [] return []
dns_seed = random.choice(constants.net.LN_DNS_SEEDS) dns_seed = random.choice(constants.net.LN_DNS_SEEDS)
self.print_error('asking dns seed "{}" for ln peers'.format(dns_seed)) self.logger.info('asking dns seed "{}" for ln peers'.format(dns_seed))
try: try:
# note: this might block for several seconds # note: this might block for several seconds
# this will include bech32-encoded-pubkeys and ports # this will include bech32-encoded-pubkeys and ports
@ -208,9 +210,9 @@ class LNWorker(PrintError):
pubkey = get_compressed_pubkey_from_bech32(bech32_pubkey) pubkey = get_compressed_pubkey_from_bech32(bech32_pubkey)
peers.append(LNPeerAddr(ln_host, port, pubkey)) peers.append(LNPeerAddr(ln_host, port, pubkey))
except Exception as e: except Exception as e:
self.print_error('error with parsing peer from dns seed: {}'.format(e)) self.logger.info('error with parsing peer from dns seed: {}'.format(e))
continue continue
self.print_error('got {} ln peers from dns seed'.format(len(peers))) self.logger.info('got {} ln peers from dns seed'.format(len(peers)))
return peers return peers
@ -228,6 +230,7 @@ class LNGossip(LNWorker):
class LNWallet(LNWorker): class LNWallet(LNWorker):
def __init__(self, wallet: 'Abstract_Wallet'): def __init__(self, wallet: 'Abstract_Wallet'):
Logger.__init__(self)
self.wallet = wallet self.wallet = wallet
self.storage = wallet.storage self.storage = wallet.storage
xprv = self.storage.get('lightning_privkey2') xprv = self.storage.get('lightning_privkey2')
@ -405,7 +408,7 @@ class LNWallet(LNWorker):
self.save_channel(chan) self.save_channel(chan)
self.on_channels_updated() self.on_channels_updated()
else: else:
self.print_error("funding tx is still not at sufficient depth. actual depth: " + str(conf)) self.logger.info(f"funding tx is still not at sufficient depth. actual depth: {conf}")
def channel_by_txo(self, txo): def channel_by_txo(self, txo):
with self.lock: with self.lock:
@ -418,7 +421,7 @@ class LNWallet(LNWorker):
chan = self.channel_by_txo(funding_outpoint) chan = self.channel_by_txo(funding_outpoint)
if not chan: if not chan:
return return
self.print_error('on_channel_open', funding_outpoint) self.logger.info(f'on_channel_open {funding_outpoint}')
self.channel_timestamps[bh2u(chan.channel_id)] = funding_txid, funding_height.height, funding_height.timestamp, None, None, None self.channel_timestamps[bh2u(chan.channel_id)] = funding_txid, funding_height.height, funding_height.timestamp, None, None, None
self.storage.put('lightning_channel_timestamps', self.channel_timestamps) self.storage.put('lightning_channel_timestamps', self.channel_timestamps)
chan.set_funding_txo_spentness(False) chan.set_funding_txo_spentness(False)
@ -430,7 +433,7 @@ class LNWallet(LNWorker):
chan = self.channel_by_txo(funding_outpoint) chan = self.channel_by_txo(funding_outpoint)
if not chan: if not chan:
return return
self.print_error('on_channel_closed', funding_outpoint) self.logger.info('on_channel_closed', funding_outpoint)
self.channel_timestamps[bh2u(chan.channel_id)] = funding_txid, funding_height.height, funding_height.timestamp, closing_txid, closing_height.height, closing_height.timestamp self.channel_timestamps[bh2u(chan.channel_id)] = funding_txid, funding_height.height, funding_height.timestamp, closing_txid, closing_height.height, closing_height.timestamp
self.storage.put('lightning_channel_timestamps', self.channel_timestamps) self.storage.put('lightning_channel_timestamps', self.channel_timestamps)
chan.set_funding_txo_spentness(True) chan.set_funding_txo_spentness(True)
@ -443,13 +446,13 @@ class LNWallet(LNWorker):
self.channel_db.remove_channel(chan.short_channel_id) self.channel_db.remove_channel(chan.short_channel_id)
# detect who closed # detect who closed
if closing_txid == chan.local_commitment.txid(): if closing_txid == chan.local_commitment.txid():
self.print_error('we force closed', funding_outpoint) self.logger.info(f'we force closed {funding_outpoint}')
encumbered_sweeptxs = chan.local_sweeptxs encumbered_sweeptxs = chan.local_sweeptxs
elif closing_txid == chan.remote_commitment.txid(): elif closing_txid == chan.remote_commitment.txid():
self.print_error('they force closed', funding_outpoint) self.logger.info(f'they force closed {funding_outpoint}')
encumbered_sweeptxs = chan.remote_sweeptxs encumbered_sweeptxs = chan.remote_sweeptxs
else: else:
self.print_error('not sure who closed', funding_outpoint, closing_txid) self.logger.info(f'not sure who closed {funding_outpoint} {closing_txid}')
return return
# sweep # sweep
for prevout, spender in spenders.items(): for prevout, spender in spenders.items():
@ -457,7 +460,7 @@ class LNWallet(LNWorker):
if e_tx is None: if e_tx is None:
continue continue
if spender is not None: if spender is not None:
self.print_error('outpoint already spent', prevout) self.logger.info(f'outpoint already spent {prevout}')
continue continue
prev_txid, prev_index = prevout.split(':') prev_txid, prev_index = prevout.split(':')
broadcast = True broadcast = True
@ -465,19 +468,19 @@ class LNWallet(LNWorker):
local_height = self.network.get_local_height() local_height = self.network.get_local_height()
remaining = e_tx.cltv_expiry - local_height remaining = e_tx.cltv_expiry - local_height
if remaining > 0: if remaining > 0:
self.print_error(e_tx.name, 'waiting for {}: CLTV ({} > {}), funding outpoint {} and tx {}' self.logger.info(e_tx.name, 'waiting for {}: CLTV ({} > {}), funding outpoint {} and tx {}'
.format(e_tx.name, local_height, e_tx.cltv_expiry, funding_outpoint[:8], prev_txid[:8])) .format(e_tx.name, local_height, e_tx.cltv_expiry, funding_outpoint[:8], prev_txid[:8]))
broadcast = False broadcast = False
if e_tx.csv_delay: if e_tx.csv_delay:
prev_height = self.network.lnwatcher.get_tx_height(prev_txid) prev_height = self.network.lnwatcher.get_tx_height(prev_txid)
remaining = e_tx.csv_delay - prev_height.conf remaining = e_tx.csv_delay - prev_height.conf
if remaining > 0: if remaining > 0:
self.print_error(e_tx.name, 'waiting for {}: CSV ({} >= {}), funding outpoint {} and tx {}' self.logger.info(e_tx.name, 'waiting for {}: CSV ({} >= {}), funding outpoint {} and tx {}'
.format(e_tx.name, prev_height.conf, e_tx.csv_delay, funding_outpoint[:8], prev_txid[:8])) .format(e_tx.name, prev_height.conf, e_tx.csv_delay, funding_outpoint[:8], prev_txid[:8]))
broadcast = False broadcast = False
if broadcast: if broadcast:
if not await self.network.lnwatcher.broadcast_or_log(funding_outpoint, e_tx): if not await self.network.lnwatcher.broadcast_or_log(funding_outpoint, e_tx):
self.print_error(e_tx.name, f'could not publish encumbered tx: {str(e_tx)}, prevout: {prevout}') self.logger.info(f'{e_tx.name} could not publish encumbered tx: {str(e_tx)}, prevout: {prevout}')
else: else:
self.wallet.add_future_tx(e_tx, remaining) self.wallet.add_future_tx(e_tx, remaining)
@ -486,10 +489,10 @@ class LNWallet(LNWorker):
dust_limit = chan.config[REMOTE].dust_limit_sat * 1000 dust_limit = chan.config[REMOTE].dust_limit_sat * 1000
delay = x.cltv_expiry - self.network.get_local_height() delay = x.cltv_expiry - self.network.get_local_height()
if x.amount_msat > 10 * dust_limit and delay < 3: if x.amount_msat > 10 * dust_limit and delay < 3:
self.print_error('htlc is dangerous') self.logger.info('htlc is dangerous')
return True return True
else: else:
self.print_error('htlc is not dangerous', delay) self.logger.info(f'htlc is not dangerous. delay {delay}')
return False return False
@log_exceptions @log_exceptions
@ -515,7 +518,7 @@ class LNWallet(LNWorker):
elif chan.get_state() == "OPEN": elif chan.get_state() == "OPEN":
peer = self.peers.get(chan.node_id) peer = self.peers.get(chan.node_id)
if peer is None: if peer is None:
self.print_error("peer not found for {}".format(bh2u(chan.node_id))) self.logger.info("peer not found for {}".format(bh2u(chan.node_id)))
return return
if event == 'fee': if event == 'fee':
await peer.bitcoin_fee_update(chan) await peer.bitcoin_fee_update(chan)
@ -524,9 +527,9 @@ class LNWallet(LNWorker):
elif chan.get_state() == 'FORCE_CLOSING': elif chan.get_state() == 'FORCE_CLOSING':
txid = chan.force_close_tx().txid() txid = chan.force_close_tx().txid()
height = lnwatcher.get_tx_height(txid).height height = lnwatcher.get_tx_height(txid).height
self.print_error("force closing tx", txid, "height", height) self.logger.info(f"force closing tx {txid}, height {height}")
if height == TX_HEIGHT_LOCAL: if height == TX_HEIGHT_LOCAL:
self.print_error('REBROADCASTING CLOSING TX') self.logger.info('REBROADCASTING CLOSING TX')
await self.force_close_channel(chan.channel_id) await self.force_close_channel(chan.channel_id)
async def _open_channel_coroutine(self, peer, local_amount_sat, push_sat, password): async def _open_channel_coroutine(self, peer, local_amount_sat, push_sat, password):
@ -671,7 +674,7 @@ class LNWallet(LNWorker):
prev_node_id = node_pubkey prev_node_id = node_pubkey
# test sanity # test sanity
if not is_route_sane_to_use(route, amount_msat, decoded_invoice.get_min_final_cltv_expiry()): if not is_route_sane_to_use(route, amount_msat, decoded_invoice.get_min_final_cltv_expiry()):
self.print_error(f"rejecting insane route {route}") self.logger.info(f"rejecting insane route {route}")
route = None route = None
continue continue
break break
@ -682,7 +685,7 @@ class LNWallet(LNWorker):
raise PaymentFailure(_("No path found")) raise PaymentFailure(_("No path found"))
route = self.network.path_finder.create_route_from_path(path, self.node_keypair.pubkey) route = self.network.path_finder.create_route_from_path(path, self.node_keypair.pubkey)
if not is_route_sane_to_use(route, amount_msat, decoded_invoice.get_min_final_cltv_expiry()): if not is_route_sane_to_use(route, amount_msat, decoded_invoice.get_min_final_cltv_expiry()):
self.print_error(f"rejecting insane route {route}") self.logger.info(f"rejecting insane route {route}")
raise PaymentFailure(_("No path found")) raise PaymentFailure(_("No path found"))
return route return route
@ -692,7 +695,7 @@ class LNWallet(LNWorker):
amount_btc = amount_sat/Decimal(COIN) if amount_sat else None amount_btc = amount_sat/Decimal(COIN) if amount_sat else None
routing_hints = self._calc_routing_hints_for_invoice(amount_sat) routing_hints = self._calc_routing_hints_for_invoice(amount_sat)
if not routing_hints: if not routing_hints:
self.print_error("Warning. No routing hints added to invoice. " self.logger.info("Warning. No routing hints added to invoice. "
"Other clients will likely not be able to send to us.") "Other clients will likely not be able to send to us.")
invoice = lnencode(LnAddr(payment_hash, amount_btc, invoice = lnencode(LnAddr(payment_hash, amount_btc,
tags=[('d', message), tags=[('d', message),
@ -771,7 +774,7 @@ class LNWallet(LNWorker):
cltv_expiry_delta = policy.cltv_expiry_delta cltv_expiry_delta = policy.cltv_expiry_delta
missing_info = False missing_info = False
if missing_info: if missing_info:
self.print_error(f"Warning. Missing channel update for our channel {bh2u(chan_id)}; " self.logger.info(f"Warning. Missing channel update for our channel {bh2u(chan_id)}; "
f"filling invoice with incorrect data.") f"filling invoice with incorrect data.")
routing_hints.append(('r', [(chan.node_id, routing_hints.append(('r', [(chan.node_id,
chan_id, chan_id,
@ -854,7 +857,8 @@ class LNWallet(LNWorker):
if constants.net is not constants.BitcoinRegtest: if constants.net is not constants.BitcoinRegtest:
ratio = chan.constraints.feerate / self.current_feerate_per_kw() ratio = chan.constraints.feerate / self.current_feerate_per_kw()
if ratio < 0.5: if ratio < 0.5:
self.print_error(f"WARNING: fee level for channel {bh2u(chan.channel_id)} is {chan.constraints.feerate} sat/kiloweight, current recommended feerate is {self.current_feerate_per_kw()} sat/kiloweight, consider force closing!") self.logger.warning(f"fee level for channel {bh2u(chan.channel_id)} is {chan.constraints.feerate} sat/kiloweight, "
f"current recommended feerate is {self.current_feerate_per_kw()} sat/kiloweight, consider force closing!")
if not chan.should_try_to_reestablish_peer(): if not chan.should_try_to_reestablish_peer():
continue continue
peer = self.peers.get(chan.node_id, None) peer = self.peers.get(chan.node_id, None)

7
electrum/sql_db.py

@ -7,7 +7,7 @@ from sqlalchemy import create_engine
from sqlalchemy.pool import StaticPool from sqlalchemy.pool import StaticPool
from sqlalchemy.orm import sessionmaker from sqlalchemy.orm import sessionmaker
from .util import PrintError from .logging import Logger
def sql(func): def sql(func):
@ -19,9 +19,10 @@ def sql(func):
return f.result(timeout=10) return f.result(timeout=10)
return wrapper return wrapper
class SqlDB(PrintError): class SqlDB(Logger):
def __init__(self, network, path, base): def __init__(self, network, path, base):
Logger.__init__(self)
self.base = base self.base = base
self.network = network self.network = network
self.path = path self.path = path
@ -48,4 +49,4 @@ class SqlDB(PrintError):
future.set_result(result) future.set_result(result)
# write # write
self.DBSession.commit() self.DBSession.commit()
self.print_error("SQL thread terminated") self.logger.info("SQL thread terminated")

Loading…
Cancel
Save