From 2fe2a0bcf92296012df059c15b41d7a8c48f416b Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 28 Sep 2017 13:23:23 +0930 Subject: [PATCH] peer_control: don't double-free on permanent fail of non-persistent peer. peer_fail_permanent() frees peer->owner, but for bad_peer() we're being called by the sd->badpeercb(), which then goes on to io_close(conn) which is a child of sd. We need to detach the two for this case, so neither tries to free the other. This leads to a corner case when the subd exits after the peer is gone: subd->peer is NULL, so we have to handle that too. Fixes: #282 Signed-off-by: Rusty Russell --- lightningd/peer_control.c | 11 ++++++++--- tests/test_lightningd.py | 24 +++++++++++++++++++++++- 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/lightningd/peer_control.c b/lightningd/peer_control.c index 1da45f501..2b7875981 100644 --- a/lightningd/peer_control.c +++ b/lightningd/peer_control.c @@ -216,7 +216,12 @@ void peer_fail_transient(struct peer *peer, const char *fmt, ...) /* When daemon reports a STATUS_FAIL_PEER_BAD, it goes here. */ static void bad_peer(struct subd *subd, const char *msg) { - peer_fail_permanent_str(subd->peer, msg); + struct peer *peer = subd->peer; + + /* Don't close peer->owner, subd will clean that up. */ + peer->owner = NULL; + subd->peer = NULL; + peer_fail_permanent_str(peer, msg); } void peer_set_condition(struct peer *peer, enum peer_state old_state, @@ -684,8 +689,8 @@ struct peer *peer_by_id(struct lightningd *ld, const struct pubkey *id) /* When a per-peer subdaemon exits, see if we need to do anything. */ static void peer_owner_finished(struct subd *subd, int status) { - /* If peer has moved on, do nothing. */ - if (subd->peer->owner != subd) { + /* If peer has moved on, do nothing (can be NULL if it errored out) */ + if (!subd->peer || subd->peer->owner != subd) { log_debug(subd->ld->log, "Subdaemon %s died (%i), peer moved", subd->name, status); return; diff --git a/tests/test_lightningd.py b/tests/test_lightningd.py index 0a1818d18..cc3e5e30f 100644 --- a/tests/test_lightningd.py +++ b/tests/test_lightningd.py @@ -87,7 +87,7 @@ class NodeFactory(object): self.nodes = [] self.executor = executor - def get_node(self, disconnect=None): + def get_node(self, disconnect=None, options=None): node_id = self.next_id self.next_id += 1 @@ -103,6 +103,8 @@ class NodeFactory(object): f.write("\n".join(disconnect)) daemon.cmd_line.append("--dev-disconnect=dev_disconnect") daemon.cmd_line.append("--dev-fail-on-subdaemon-fail") + if options: + daemon.cmd_line.append(options) rpc = LightningRpc(socket_path, self.executor) node = utils.LightningNode(daemon, rpc, bitcoind, self.executor) @@ -382,6 +384,26 @@ class LightningDTests(BaseLightningDTests): # But this should work. self.pay(l2, l1, available - reserve*2) + def test_bad_opening(self): + # l1 asks for a too-long locktime + l1 = self.node_factory.get_node(options='--locktime-blocks=100') + l2 = self.node_factory.get_node(options='--max-locktime-blocks=99') + ret = l1.rpc.connect('localhost', l2.info['port'], l2.info['id']) + + assert ret['id'] == l2.info['id'] + + l1.daemon.wait_for_log('WIRE_GOSSIPCTL_NEW_PEER') + l2.daemon.wait_for_log('WIRE_GOSSIPCTL_NEW_PEER') + + addr = l1.rpc.newaddr()['address'] + txid = l1.bitcoin.rpc.sendtoaddress(addr, 10**6 / 10**8 + 0.01) + tx = l1.bitcoin.rpc.getrawtransaction(txid) + + l1.rpc.addfunds(tx) + self.assertRaises(ValueError, l1.rpc.fundchannel, l2.info['id'], 10**6) + + l2.daemon.wait_for_log('to_self_delay 100 larger than 99') + def test_closing(self): l1,l2 = self.connect()