Browse Source

peer_control: don't double-free on permanent fail of non-persistent peer.

peer_fail_permanent() frees peer->owner, but for bad_peer() we're
being called by the sd->badpeercb(), which then goes on to
io_close(conn) which is a child of sd.

We need to detach the two for this case, so neither tries to free the
other.

This leads to a corner case when the subd exits after the peer is gone:
subd->peer is NULL, so we have to handle that too.

Fixes: #282
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
ppa-0.6.1
Rusty Russell 7 years ago
committed by Christian Decker
parent
commit
2fe2a0bcf9
  1. 11
      lightningd/peer_control.c
  2. 24
      tests/test_lightningd.py

11
lightningd/peer_control.c

@ -216,7 +216,12 @@ void peer_fail_transient(struct peer *peer, const char *fmt, ...)
/* When daemon reports a STATUS_FAIL_PEER_BAD, it goes here. */
static void bad_peer(struct subd *subd, const char *msg)
{
peer_fail_permanent_str(subd->peer, msg);
struct peer *peer = subd->peer;
/* Don't close peer->owner, subd will clean that up. */
peer->owner = NULL;
subd->peer = NULL;
peer_fail_permanent_str(peer, msg);
}
void peer_set_condition(struct peer *peer, enum peer_state old_state,
@ -684,8 +689,8 @@ struct peer *peer_by_id(struct lightningd *ld, const struct pubkey *id)
/* When a per-peer subdaemon exits, see if we need to do anything. */
static void peer_owner_finished(struct subd *subd, int status)
{
/* If peer has moved on, do nothing. */
if (subd->peer->owner != subd) {
/* If peer has moved on, do nothing (can be NULL if it errored out) */
if (!subd->peer || subd->peer->owner != subd) {
log_debug(subd->ld->log, "Subdaemon %s died (%i), peer moved",
subd->name, status);
return;

24
tests/test_lightningd.py

@ -87,7 +87,7 @@ class NodeFactory(object):
self.nodes = []
self.executor = executor
def get_node(self, disconnect=None):
def get_node(self, disconnect=None, options=None):
node_id = self.next_id
self.next_id += 1
@ -103,6 +103,8 @@ class NodeFactory(object):
f.write("\n".join(disconnect))
daemon.cmd_line.append("--dev-disconnect=dev_disconnect")
daemon.cmd_line.append("--dev-fail-on-subdaemon-fail")
if options:
daemon.cmd_line.append(options)
rpc = LightningRpc(socket_path, self.executor)
node = utils.LightningNode(daemon, rpc, bitcoind, self.executor)
@ -382,6 +384,26 @@ class LightningDTests(BaseLightningDTests):
# But this should work.
self.pay(l2, l1, available - reserve*2)
def test_bad_opening(self):
# l1 asks for a too-long locktime
l1 = self.node_factory.get_node(options='--locktime-blocks=100')
l2 = self.node_factory.get_node(options='--max-locktime-blocks=99')
ret = l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
assert ret['id'] == l2.info['id']
l1.daemon.wait_for_log('WIRE_GOSSIPCTL_NEW_PEER')
l2.daemon.wait_for_log('WIRE_GOSSIPCTL_NEW_PEER')
addr = l1.rpc.newaddr()['address']
txid = l1.bitcoin.rpc.sendtoaddress(addr, 10**6 / 10**8 + 0.01)
tx = l1.bitcoin.rpc.getrawtransaction(txid)
l1.rpc.addfunds(tx)
self.assertRaises(ValueError, l1.rpc.fundchannel, l2.info['id'], 10**6)
l2.daemon.wait_for_log('to_self_delay 100 larger than 99')
def test_closing(self):
l1,l2 = self.connect()

Loading…
Cancel
Save