Browse Source

closingd: retransmit 'funding_locked' if we reconnect without any update

As per BOLT02 #message-retransmission :
if `next_commitment_number` is 1 in both the `channel_reestablish` it sent and received:
    - MUST retransmit `funding_locked`
travis-debug
darosior 5 years ago
committed by Rusty Russell
parent
commit
a7cbe93fb8
  1. 1
      CHANGELOG.md
  2. 2
      channeld/channeld.c
  3. 50
      closingd/closingd.c
  4. 27
      tests/test_connection.py

1
CHANGELOG.md

@ -11,6 +11,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- bolt11: support for parsing feature bits (field `9`). - bolt11: support for parsing feature bits (field `9`).
- Protocol: we now retransmit `funding_locked` upon reconnection while closing if there was no update
### Changed ### Changed
- JSON API: `txprepare` now uses `outputs` as parameter other than `destination` and `satoshi` - JSON API: `txprepare` now uses `outputs` as parameter other than `destination` and `satoshi`

2
channeld/channeld.c

@ -2315,6 +2315,8 @@ static void peer_reconnect(struct peer *peer,
&& next_commitment_number == 1) { && next_commitment_number == 1) {
u8 *msg; u8 *msg;
status_debug("Retransmitting funding_locked for channel %s",
type_to_string(tmpctx, struct channel_id, &peer->channel_id));
/* Contains per commit point #1, for first post-opening commit */ /* Contains per commit point #1, for first post-opening commit */
msg = towire_funding_locked(NULL, msg = towire_funding_locked(NULL,
&peer->channel_id, &peer->channel_id,

50
closingd/closingd.c

@ -108,25 +108,17 @@ static u8 *closing_read_peer_msg(const tal_t *ctx,
} }
} }
static void do_reconnect(struct per_peer_state *pps, static struct pubkey get_per_commitment_point(u64 commitment_number)
const struct channel_id *channel_id,
const u64 next_index[NUM_SIDES],
u64 revocations_received,
const u8 *channel_reestablish,
const u8 *final_scriptpubkey,
const struct secret *last_remote_per_commit_secret)
{ {
u8 *msg; u8 *msg;
struct channel_id their_channel_id; struct pubkey commitment_point;
u64 next_local_commitment_number, next_remote_revocation_number;
struct pubkey my_current_per_commitment_point;
struct secret *s; struct secret *s;
/* Our current per-commitment point is the commitment point in the last /* Our current per-commitment point is the commitment point in the last
* received signed commitment; HSM gives us that and the previous * received signed commitment; HSM gives us that and the previous
* secret (which we don't need). */ * secret (which we don't need). */
msg = towire_hsm_get_per_commitment_point(NULL, msg = towire_hsm_get_per_commitment_point(NULL,
next_index[LOCAL]-1); commitment_number);
if (!wire_sync_write(HSM_FD, take(msg))) if (!wire_sync_write(HSM_FD, take(msg)))
status_failed(STATUS_FAIL_HSM_IO, status_failed(STATUS_FAIL_HSM_IO,
"Writing get_per_commitment_point to HSM: %s", "Writing get_per_commitment_point to HSM: %s",
@ -138,12 +130,30 @@ static void do_reconnect(struct per_peer_state *pps,
"Reading resp get_per_commitment_point reply: %s", "Reading resp get_per_commitment_point reply: %s",
strerror(errno)); strerror(errno));
if (!fromwire_hsm_get_per_commitment_point_reply(tmpctx, msg, if (!fromwire_hsm_get_per_commitment_point_reply(tmpctx, msg,
&my_current_per_commitment_point, &commitment_point,
&s)) &s))
status_failed(STATUS_FAIL_HSM_IO, status_failed(STATUS_FAIL_HSM_IO,
"Bad per_commitment_point reply %s", "Bad per_commitment_point reply %s",
tal_hex(tmpctx, msg)); tal_hex(tmpctx, msg));
return commitment_point;
}
static void do_reconnect(struct per_peer_state *pps,
const struct channel_id *channel_id,
const u64 next_index[NUM_SIDES],
u64 revocations_received,
const u8 *channel_reestablish,
const u8 *final_scriptpubkey,
const struct secret *last_remote_per_commit_secret)
{
u8 *msg;
struct channel_id their_channel_id;
u64 next_local_commitment_number, next_remote_revocation_number;
struct pubkey my_current_per_commitment_point, next_commitment_point;
my_current_per_commitment_point = get_per_commitment_point(next_index[LOCAL]-1);
/* BOLT #2: /* BOLT #2:
* *
* - upon reconnection: * - upon reconnection:
@ -207,8 +217,20 @@ static void do_reconnect(struct per_peer_state *pps,
msg = towire_shutdown(NULL, channel_id, final_scriptpubkey); msg = towire_shutdown(NULL, channel_id, final_scriptpubkey);
sync_crypto_write(pps, take(msg)); sync_crypto_write(pps, take(msg));
/* FIXME: Spec says to re-xmit funding_locked here if we haven't /* BOLT #2:
* done any updates. */ *
* A node:
*...
* - if `next_commitment_number` is 1 in both the `channel_reestablish` it sent and received:
* - MUST retransmit `funding_locked`.
*/
if (next_index[REMOTE] == 1 && next_index[LOCAL] == 1) {
status_debug("Retransmitting funding_locked for channel %s",
type_to_string(tmpctx, struct channel_id, channel_id));
next_commitment_point = get_per_commitment_point(next_index[LOCAL]);
msg = towire_funding_locked(NULL, channel_id, &next_commitment_point);
sync_crypto_write(pps, take(msg));
}
} }
static void send_offer(struct per_peer_state *pps, static void send_offer(struct per_peer_state *pps,

27
tests/test_connection.py

@ -391,6 +391,33 @@ def test_reconnect_gossiping(node_factory):
l2.daemon.wait_for_log('processing now old peer gone') l2.daemon.wait_for_log('processing now old peer gone')
@unittest.skipIf(not DEVELOPER, "needs dev-disconnect")
def test_reconnect_no_update(node_factory, executor):
"""
This tests if the `funding_locked` is sent if we receive a
`channel_reestablish` message with `next_commitment_number` == 1 and
our `next_commitment_number` == 1.
"""
disconnects = ["@WIRE_FUNDING_LOCKED", "@WIRE_SHUTDOWN"]
# Allow bad gossip because it might receive WIRE_CHANNEL_UPDATE before
# announcement before of the disconnection
l1 = node_factory.get_node(may_reconnect=True, allow_bad_gossip=True)
l2 = node_factory.get_node(disconnect=disconnects, may_reconnect=True)
# For channeld reconnection
l1.rpc.connect(l2.info["id"], "localhost", l2.port)
fundchannel_exec = executor.submit(l1.fund_channel, l2, 10**6, False)
l1.daemon.wait_for_log(r"channeld.* Retransmitting funding_locked for channel")
l1.stop()
# For closingd reconnection
scid = fundchannel_exec.result()
l1.daemon.start()
executor.submit(l1.rpc.close, scid, 0)
l2.daemon.wait_for_log(r"closingd.* Retransmitting funding_locked for channel")
l1.daemon.wait_for_log(r"CLOSINGD_COMPLETE")
def test_connect_stresstest(node_factory, executor): def test_connect_stresstest(node_factory, executor):
# This test is unreliable, but it's better than nothing. # This test is unreliable, but it's better than nothing.
l1 = node_factory.get_node(may_reconnect=True) l1 = node_factory.get_node(may_reconnect=True)

Loading…
Cancel
Save