Browse Source

channeld: only enable option_data_loss_protect if EXPERIMENTAL_FEATURES.

We have an incompatibility with lnd it seems: I've lost channels on
reconnect with 'sync error'.  Since I never got this code to be reliable,
disable it for next release since I suspect it's our fault :(

And reenable the check which didn't work, for others to untangle.

I couldn't get option_data_loss_protect to be reliable, and I disabled
the check.  This was a mistake, I should have either spent even more
time trying to get to the bottom of this (especially, writing test
vectors for the spec and testing against other implementations).

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
plugin-6
Rusty Russell 6 years ago
parent
commit
6aa511fa7a
  1. 2
      CHANGELOG.md
  2. 2
      channeld/channeld.c
  3. 2
      common/features.c
  4. 18
      tests/test_connection.py

2
CHANGELOG.md

@ -30,6 +30,8 @@ changes.
### Removed
- option_data_loss_protect is now only offered if EXPERIMENTAL_FEATURES is enabled, since it seems incompatible with lnd and has known bugs.
### Fixed
- JSON API: uppercase invoices now parsed correctly (broken in 0.6.2).

2
channeld/channeld.c

@ -1986,7 +1986,6 @@ static void check_current_dataloss_fields(struct peer *peer,
type_to_string(tmpctx, struct secret,
&old_commit_secret));
#if 0 /* FIXME: This isn't reliable! */
/* FIXME: We don't keep really old per_commit numbers, so we can't
* check this 'needs retransmit' case! */
if (next_remote_revocation_number == peer->next_index[REMOTE]) {
@ -2010,7 +2009,6 @@ static void check_current_dataloss_fields(struct peer *peer,
remote_current_per_commitment_point),
next_remote_revocation_number,
peer->next_index[REMOTE]);
#endif
status_trace("option_data_loss_protect: fields are correct");
}

2
common/features.c

@ -4,7 +4,9 @@
#include <wire/peer_wire.h>
static const u32 our_localfeatures[] = {
#if EXPERIMENTAL_FEATURES
LOCAL_DATA_LOSS_PROTECT,
#endif
LOCAL_INITIAL_ROUTING_SYNC,
LOCAL_GOSSIP_QUERIES
};

18
tests/test_connection.py

@ -1,7 +1,7 @@
from collections import namedtuple
from fixtures import * # noqa: F401,F403
from lightning import RpcError
from utils import DEVELOPER, only_one, wait_for, sync_blockheight, VALGRIND
from utils import DEVELOPER, only_one, wait_for, sync_blockheight, VALGRIND, EXPERIMENTAL_FEATURES
import os
@ -1004,11 +1004,14 @@ def test_forget_channel(node_factory):
def test_peerinfo(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2, fundchannel=False, opts={'may_reconnect': True})
if EXPERIMENTAL_FEATURES:
lfeatures = '8a'
else:
lfeatures = '88'
# Gossiping but no node announcement yet
assert l1.rpc.getpeer(l2.info['id'])['connected']
assert len(l1.rpc.getpeer(l2.info['id'])['channels']) == 0
assert l1.rpc.getpeer(l2.info['id'])['localfeatures'] == '8a'
assert l1.rpc.getpeer(l2.info['id'])['globalfeatures'] == ''
assert l1.rpc.getpeer(l2.info['id'])['localfeatures'] == lfeatures
# Fund a channel to force a node announcement
chan = l1.fund_channel(l2, 10**6)
@ -1025,15 +1028,15 @@ def test_peerinfo(node_factory, bitcoind):
assert only_one(nodes1)['globalfeatures'] == peer1['globalfeatures']
assert only_one(nodes2)['globalfeatures'] == peer2['globalfeatures']
assert l1.rpc.getpeer(l2.info['id'])['localfeatures'] == '8a'
assert l2.rpc.getpeer(l1.info['id'])['localfeatures'] == '8a'
assert l1.rpc.getpeer(l2.info['id'])['localfeatures'] == lfeatures
assert l2.rpc.getpeer(l1.info['id'])['localfeatures'] == lfeatures
# If it reconnects after db load, it should know features.
l1.restart()
wait_for(lambda: l1.rpc.getpeer(l2.info['id'])['connected'])
wait_for(lambda: l2.rpc.getpeer(l1.info['id'])['connected'])
assert l1.rpc.getpeer(l2.info['id'])['localfeatures'] == '8a'
assert l2.rpc.getpeer(l1.info['id'])['localfeatures'] == '8a'
assert l1.rpc.getpeer(l2.info['id'])['localfeatures'] == lfeatures
assert l2.rpc.getpeer(l1.info['id'])['localfeatures'] == lfeatures
# Close the channel to forget the peer
with pytest.raises(RpcError, match=r'Channel close negotiation not finished'):
@ -1247,6 +1250,7 @@ def test_funder_simple_reconnect(node_factory, bitcoind):
@unittest.skipIf(not DEVELOPER, "needs LIGHTNINGD_DEV_LOG_IO")
@unittest.skipIf(not EXPERIMENTAL_FEATURES, "needs option_dataloss_protect")
def test_dataloss_protection(node_factory, bitcoind):
l1 = node_factory.get_node(may_reconnect=True, log_all_io=True)
l2 = node_factory.get_node(may_reconnect=True, log_all_io=True)

Loading…
Cancel
Save