Browse Source

pytest: Change the channel persistence test to add inflight HTLCs

Now that we have HTLC persistence we'd also like to test it. This
kills the second node in the middle of an HTLC, it'll recover and
finish the flow.

Signed-off-by: Christian Decker <decker.christian@gmail.com>
ppa-0.6.1
Christian Decker 7 years ago
committed by Rusty Russell
parent
commit
f5a412d90d
  1. 28
      tests/test_lightningd.py
  2. 2
      tests/utils.py

28
tests/test_lightningd.py

@ -1501,8 +1501,12 @@ class LightningDTests(BaseLightningDTests):
assert outputs[2] == 10000000 assert outputs[2] == 10000000
def test_channel_persistence(self): def test_channel_persistence(self):
# Start two nodes and open a channel (to remember) # Start two nodes and open a channel (to remember). l2 will
l1, l2 = self.connect() # mysteriously die while committing the first HTLC so we can
# check that HTLCs reloaded from the DB work.
l1 = self.node_factory.get_node()
l2 = self.node_factory.get_node(disconnect=['_WIRE_COMMITMENT_SIGNED'])
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
# Neither node should have a channel open, they are just connected # Neither node should have a channel open, they are just connected
for n in (l1, l2): for n in (l1, l2):
@ -1517,13 +1521,18 @@ class LightningDTests(BaseLightningDTests):
for n in (l1, l2): for n in (l1, l2):
assert(n.db_query('SELECT COUNT(id) as count FROM channels;')[0]['count'] == 1) assert(n.db_query('SELECT COUNT(id) as count FROM channels;')[0]['count'] == 1)
# Perform a payment so we have something to restore # Fire off a sendpay request, it'll get interrupted by a restart
self.pay(l1, l2, 10000) fut = self.executor.submit(self.pay, l1, l2, 10000)
time.sleep(1) # Wait for it to be committed to, i.e., stored in the DB
assert l1.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 99990000 l1.daemon.wait_for_log('peer_in WIRE_COMMITMENT_SIGNED')
assert l2.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 10000
# Stop l2, l1 will reattempt to connect # Stop l2, l1 will reattempt to connect
l2.stop() print("Killing l2 in mid HTLC")
l2.daemon.proc.terminate()
# Clear the disconnect and timer stop so we can proceed normally
l2.daemon.cmd_line = [e for e in l2.daemon.cmd_line if 'disconnect' not in e]
print(" ".join(l2.daemon.cmd_line + ['--dev-debugger=channeld']))
# Wait for l1 to notice # Wait for l1 to notice
wait_for(lambda: not l1.rpc.getpeers()['peers'][0]['connected']) wait_for(lambda: not l1.rpc.getpeers()['peers'][0]['connected'])
@ -1532,6 +1541,9 @@ class LightningDTests(BaseLightningDTests):
l2.daemon.start() l2.daemon.start()
wait_for(lambda: len(l2.rpc.getpeers()['peers']) == 1) wait_for(lambda: len(l2.rpc.getpeers()['peers']) == 1)
# Wait for the restored HTLC to finish
wait_for(lambda: l1.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 99990000, interval=1)
wait_for(lambda: len([p for p in l1.rpc.getpeers()['peers'] if p['connected']]), interval=1) wait_for(lambda: len([p for p in l1.rpc.getpeers()['peers'] if p['connected']]), interval=1)
wait_for(lambda: len([p for p in l2.rpc.getpeers()['peers'] if p['connected']]), interval=1) wait_for(lambda: len([p for p in l2.rpc.getpeers()['peers'] if p['connected']]), interval=1)

2
tests/utils.py

@ -235,7 +235,7 @@ class LightningD(TailableProc):
] ]
self.cmd_line += ["--{}={}".format(k, v) for k, v in LIGHTNINGD_CONFIG.items()] self.cmd_line += ["--{}={}".format(k, v) for k, v in LIGHTNINGD_CONFIG.items()]
self.prefix = 'lightningd' self.prefix = 'lightningd(%d)' % (port)
if not os.path.exists(lightning_dir): if not os.path.exists(lightning_dir):
os.makedirs(lightning_dir) os.makedirs(lightning_dir)

Loading…
Cancel
Save