Browse Source

lightningd: simplify peer destruction.

We have to do a dance when we get a reconnect in openingd, because we
don't normally expect to free both owner and peer.  It's a layering
violation: freeing a peer should clean up the owner's pointer to it,
to avoid a double free, and we can eliminate this dance.

The free order is now different, and the test_reconnect_openingd was
overprecise.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
ppa-0.6.1
Rusty Russell 7 years ago
committed by Christian Decker
parent
commit
871d0b1d74
  1. 17
      lightningd/peer_control.c
  2. 6
      lightningd/subd.c
  3. 11
      tests/test_lightningd.py

17
lightningd/peer_control.c

@ -46,6 +46,9 @@
static void destroy_peer(struct peer *peer)
{
/* Don't leave owner pointer dangling. */
if (peer->owner && peer->owner->peer == peer)
peer->owner->peer = NULL;
list_del_from(&peer->ld->peers, &peer->list);
}
@ -436,7 +439,6 @@ static bool peer_reconnected(struct lightningd *ld,
int fd,
const struct crypto_state *cs)
{
struct subd *subd;
struct peer *peer = peer_by_id(ld, id);
if (!peer)
return false;
@ -481,11 +483,8 @@ static bool peer_reconnected(struct lightningd *ld,
return false;
case OPENINGD:
/* Kill off openingd, forget old peer. */
subd = peer->owner;
peer->owner = NULL; /* We'll free it ourselves */
tal_free(subd);
tal_free(peer);
/* Kill off openingd, which will free peer. */
tal_free(peer->owner);
/* A fresh start. */
return false;
@ -1072,12 +1071,14 @@ static enum watch_result funding_announce_cb(struct peer *peer,
static void peer_onchain_finished(struct subd *subd, int status)
{
/* Moved on? Eg. reorg, and it has a new onchaind. */
if (subd->peer->owner != subd)
if (!subd->peer || subd->peer->owner != subd)
return;
/* Unlink peer from us, so it doesn't try to free us in destroy_peer */
subd->peer->owner = NULL;
if (status != 0) {
log_broken(subd->peer->log, "onchaind died status %i", status);
subd->peer->owner = NULL;
return;
}

6
lightningd/subd.c

@ -11,6 +11,7 @@
#include <fcntl.h>
#include <lightningd/lightningd.h>
#include <lightningd/log.h>
#include <lightningd/peer_control.h>
#include <lightningd/subd.h>
#include <stdarg.h>
#include <sys/socket.h>
@ -596,6 +597,11 @@ void subd_shutdown(struct subd *sd, unsigned int seconds)
/* No finished callback any more. */
sd->finished = NULL;
/* Don't let destroy_peer dereference us */
if (sd->peer) {
sd->peer->owner = NULL;
sd->peer = NULL;
}
/* Don't free sd when we close connection manually. */
tal_steal(sd->ld, sd);
/* Close connection: should begin shutdown now. */

11
tests/test_lightningd.py

@ -1250,25 +1250,22 @@ class LightningDTests(BaseLightningDTests):
tx = l1.bitcoin.rpc.getrawtransaction(txid)
l1.rpc.addfunds(tx)
# It closes on us, we forget about it.
# l2 closes on l1, l1 forgets.
self.assertRaises(ValueError, l1.rpc.fundchannel, l2.info['id'], 20000)
assert l1.rpc.getpeer(l2.info['id']) == None
# Reconnect.
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
# Truncate (hack to release old openingd).
with open(os.path.join(l2.daemon.lightning_dir, 'dev_disconnect'), "w"):
pass
# We should get a message about old one exiting.
l2.daemon.wait_for_log('Subdaemon lightning_openingd died')
l2.daemon.wait_for_log('Peer has reconnected, state OPENINGD')
l2.daemon.wait_for_log('Owning subdaemon lightning_openingd died')
# Should work fine.
l1.rpc.fundchannel(l2.info['id'], 20000)
l1.daemon.wait_for_log('sendrawtx exit 0')
# Just to be sure, second openingd should die too.
# Just to be sure, second openingd hand over to channeld.
l2.daemon.wait_for_log('Subdaemon lightning_openingd died \(0\)')
def test_reconnect_normal(self):

Loading…
Cancel
Save