From c17848a3f36f39bc0626500e34b05e172dc3957a Mon Sep 17 00:00:00 2001 From: Christian Decker Date: Mon, 28 May 2018 19:10:25 +0200 Subject: [PATCH] gossip: Disable local channels after loading the gossip_store We don't have any connection yet, so how could they be active? Disable both sides to avoid trying to route through them or telling others to use them as `contact_points` in invoices. Signed-off-by: Christian Decker --- gossipd/gossip.c | 24 ++++++++++++++++++++++++ tests/test_gossip.py | 36 ++++++++++++++++++++++++++++++++++++ tests/test_lightningd.py | 18 +++++++++++++----- 3 files changed, 73 insertions(+), 5 deletions(-) diff --git a/gossipd/gossip.c b/gossipd/gossip.c index 88adb492b..fee7813e5 100644 --- a/gossipd/gossip.c +++ b/gossipd/gossip.c @@ -1900,6 +1900,26 @@ static void gossip_disable_channel(struct routing_state *rstate, struct chan *ch tal_hex(tmpctx, err)); } +static void gossip_disable_local_channels(struct daemon *daemon) +{ + struct node *local_node = + get_node(daemon->rstate, &daemon->rstate->local_id); + struct chan *c; + size_t i; + + /* We don't have a local_node, so we don't have any channels yet + * either */ + if (!local_node) + return; + + for (i=0; ichans); i++) { + c = local_node->chans[i]; + c->half[0].flags |= ROUTING_FLAGS_DISABLED; + c->half[1].flags |= ROUTING_FLAGS_DISABLED; + gossip_disable_channel(daemon->rstate, c); + } +} + /* Parse an incoming gossip init message and assign config variables * to the daemon. */ @@ -1939,6 +1959,10 @@ static struct io_plan *gossip_init(struct daemon_conn *master, /* Load stored gossip messages */ gossip_store_load(daemon->rstate, daemon->rstate->store); + /* Now disable all local channels, they can't be connected yet. */ + gossip_disable_local_channels(daemon); + + new_reltimer(&daemon->timers, daemon, time_from_sec(daemon->rstate->prune_timeout/4), gossip_refresh_network, daemon); diff --git a/tests/test_gossip.py b/tests/test_gossip.py index d84c32ad3..6a0ebcce8 100644 --- a/tests/test_gossip.py +++ b/tests/test_gossip.py @@ -58,3 +58,39 @@ def test_gossip_pruning(node_factory, bitcoind): assert scid2 not in [c['short_channel_id'] for c in l2.rpc.listchannels()['channels']] assert l3.info['id'] not in [n['nodeid'] for n in l1.rpc.listnodes()['nodes']] assert l3.info['id'] not in [n['nodeid'] for n in l2.rpc.listnodes()['nodes']] + + +@unittest.skipIf(not DEVELOPER, "needs --dev-broadcast-interval, --dev-no-reconnect") +def test_gossip_disable_channels(node_factory, bitcoind): + """Simple test to check that channels get disabled correctly on disconnect and + reenabled upon reconnecting + + """ + opts = {'dev-no-reconnect': None, 'may_reconnect': True} + l1, l2 = node_factory.get_nodes(2, opts=opts) + + l1.rpc.connect(l2.info['id'], 'localhost', l2.port) + scid = l1.fund_channel(l2, 10**6) + bitcoind.rpc.generate(5) + + def count_active(node): + chans = node.rpc.listchannels()['channels'] + active = [c for c in chans if c['active']] + return len(active) + + l1.wait_channel_active(scid) + l2.wait_channel_active(scid) + + assert(count_active(l1) == 2) + assert(count_active(l2) == 2) + + l2.restart() + + wait_for(lambda: count_active(l1) == 0) + assert(count_active(l2) == 0) + + # Now reconnect, they should re-enable the channels + l1.rpc.connect(l2.info['id'], 'localhost', l2.port) + + wait_for(lambda: count_active(l1) == 2) + wait_for(lambda: count_active(l2) == 2) diff --git a/tests/test_lightningd.py b/tests/test_lightningd.py index fc92f1d4c..e9376be8a 100644 --- a/tests/test_lightningd.py +++ b/tests/test_lightningd.py @@ -2505,10 +2505,10 @@ class LightningDTests(BaseLightningDTests): too. """ opts = {'dev-no-reconnect': None} - l1 = self.node_factory.get_node(options=opts) - l2 = self.node_factory.get_node(options=opts) - l3 = self.node_factory.get_node(options=opts) - l4 = self.node_factory.get_node(options=opts) + l1 = self.node_factory.get_node(options=opts, may_reconnect=True) + l2 = self.node_factory.get_node(options=opts, may_reconnect=True) + l3 = self.node_factory.get_node(options=opts, may_reconnect=True) + l4 = self.node_factory.get_node(options=opts, may_reconnect=True) l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l2.rpc.connect(l3.info['id'], 'localhost', l3.port) @@ -2532,8 +2532,14 @@ class LightningDTests(BaseLightningDTests): wait_for(lambda: count_active(l2) == 4) wait_for(lambda: count_active(l3) == 6) # 4 public + 2 local - # l1 restarts and doesn't connect, but loads from persisted store + # l1 restarts and doesn't connect, but loads from persisted store, all + # local channels should be disabled, leaving only the two l2 <-> l3 + # directions l1.restart() + wait_for(lambda: count_active(l1) == 2) + + # Now reconnect, they should re-enable the two l1 <-> l2 directions + l1.rpc.connect(l2.info['id'], 'localhost', l2.port) wait_for(lambda: count_active(l1) == 4) # Now spend the funding tx, generate a block and see others deleting the @@ -2563,6 +2569,8 @@ class LightningDTests(BaseLightningDTests): # Finally, it should also remember the deletion after a restart l3.restart() l4.restart() + l2.rpc.connect(l3.info['id'], 'localhost', l3.port) + l3.rpc.connect(l4.info['id'], 'localhost', l4.port) wait_for(lambda: count_active(l3) == 4) # 2 public + 2 local # Both l3 and l4 should remember their local-only channel