Browse Source

channeld: exit after shutdown when no more HTLCs.

Ready for the introduction of closingd.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
ppa-0.6.1
Rusty Russell 7 years ago
parent
commit
6fa90c926a
  1. 15
      lightningd/channel.c
  2. 6
      lightningd/channel.h
  3. 69
      lightningd/channel/channel.c
  4. 5
      lightningd/channel/channel_wire.csv
  5. 36
      lightningd/peer_control.c
  6. 3
      tests/test_lightningd.py

15
lightningd/channel.c

@ -792,6 +792,21 @@ bool channel_awaiting_revoke_and_ack(const struct channel *channel)
return false; return false;
} }
bool channel_has_htlcs(const struct channel *channel)
{
struct htlc_map_iter it;
const struct htlc *htlc;
for (htlc = htlc_map_first(&channel->htlcs, &it);
htlc;
htlc = htlc_map_next(&channel->htlcs, &it)) {
/* FIXME: Clean these out! */
if (!htlc_is_dead(htlc))
return true;
}
return false;
}
static bool adjust_balance(struct channel *channel, struct htlc *htlc) static bool adjust_balance(struct channel *channel, struct htlc *htlc)
{ {
enum side side; enum side side;

6
lightningd/channel.h

@ -371,6 +371,12 @@ bool channel_sending_revoke_and_ack(struct channel *channel);
*/ */
bool channel_awaiting_revoke_and_ack(const struct channel *channel); bool channel_awaiting_revoke_and_ack(const struct channel *channel);
/**
* channel_has_htlcs: are there any HTLCs at all in channel?
* @channel: the channel
*/
bool channel_has_htlcs(const struct channel *channel);
/** /**
* channel_force_htlcs: force these htlcs into the (new) channel * channel_force_htlcs: force these htlcs into the (new) channel
* @channel: the channel * @channel: the channel

69
lightningd/channel/channel.c

@ -125,6 +125,7 @@ struct peer {
/* If master told us to shut down, this contains scriptpubkey until /* If master told us to shut down, this contains scriptpubkey until
* we're ready to send it. */ * we're ready to send it. */
u8 *unsent_shutdown_scriptpubkey; u8 *unsent_shutdown_scriptpubkey;
bool shutdown_sent[NUM_SIDES];
/* Information used for reestablishment. */ /* Information used for reestablishment. */
bool last_was_revoke; bool last_was_revoke;
@ -433,6 +434,13 @@ static u8 *sending_commitsig_msg(const tal_t *ctx,
return msg; return msg;
} }
static bool shutdown_complete(const struct peer *peer)
{
return peer->shutdown_sent[LOCAL]
&& peer->shutdown_sent[REMOTE]
&& !channel_has_htlcs(peer->channel);
}
/* BOLT #2: /* BOLT #2:
* *
* A node MUST NOT send a `shutdown` if there are updates pending on * A node MUST NOT send a `shutdown` if there are updates pending on
@ -451,6 +459,7 @@ static void maybe_send_shutdown(struct peer *peer)
msg_enqueue(&peer->peer_out, take(msg)); msg_enqueue(&peer->peer_out, take(msg));
peer->unsent_shutdown_scriptpubkey peer->unsent_shutdown_scriptpubkey
= tal_free(peer->unsent_shutdown_scriptpubkey); = tal_free(peer->unsent_shutdown_scriptpubkey);
peer->shutdown_sent[LOCAL] = true;
} }
/* Master has acknowledged that we're sending commitment, so send it. */ /* Master has acknowledged that we're sending commitment, so send it. */
@ -472,6 +481,9 @@ static void handle_sending_commitsig_reply(struct peer *peer, const u8 *msg)
/* Timer now considered expired, you can add a new one. */ /* Timer now considered expired, you can add a new one. */
peer->commit_timer = NULL; peer->commit_timer = NULL;
start_commit_timer(peer); start_commit_timer(peer);
if (shutdown_complete(peer))
io_break(peer);
} }
/* This blocks other traffic from the master until we get reply. */ /* This blocks other traffic from the master until we get reply. */
@ -601,6 +613,9 @@ static void send_commit(struct peer *peer)
/* Covers the case where we've just been told to shutdown. */ /* Covers the case where we've just been told to shutdown. */
maybe_send_shutdown(peer); maybe_send_shutdown(peer);
if (shutdown_complete(peer))
io_break(peer);
peer->commit_timer = NULL; peer->commit_timer = NULL;
tal_free(tmpctx); tal_free(tmpctx);
return; return;
@ -951,6 +966,11 @@ static struct io_plan *accepted_revocation(struct io_conn *conn,
struct peer *peer) struct peer *peer)
{ {
start_commit_timer(peer); start_commit_timer(peer);
/* We might now have an empty HTLC. */
if (shutdown_complete(peer))
io_break(peer);
return peer_read_message(conn, &peer->pcs, peer_in); return peer_read_message(conn, &peer->pcs, peer_in);
} }
@ -1239,9 +1259,14 @@ static struct io_plan *handle_peer_shutdown(struct io_conn *conn,
if (!fromwire_shutdown(peer, shutdown, NULL, &channel_id, &scriptpubkey)) if (!fromwire_shutdown(peer, shutdown, NULL, &channel_id, &scriptpubkey))
status_failed(WIRE_CHANNEL_PEER_READ_FAILED, "Bad shutdown"); status_failed(WIRE_CHANNEL_PEER_READ_FAILED, "Bad shutdown");
/* Tell master, it will tell us what to send. */ /* Tell master, it will tell us what to send (if any). */
daemon_conn_send(&peer->master, daemon_conn_send(&peer->master,
take(towire_channel_got_shutdown(peer, scriptpubkey))); take(towire_channel_got_shutdown(peer, scriptpubkey)));
peer->shutdown_sent[REMOTE] = true;
if (shutdown_complete(peer))
io_break(peer);
return peer_read_message(conn, &peer->pcs, peer_in); return peer_read_message(conn, &peer->pcs, peer_in);
} }
@ -1586,6 +1611,10 @@ again:
*/ */
maybe_send_shutdown(peer); maybe_send_shutdown(peer);
/* Corner case: we didn't send shutdown before because update_add_htlc
* pending, but now they're cleared by restart, and we're actually
* complete. In that case, their `shutdown` will trigger us. */
/* Start commit timer: if we sent revoke we might need it. */ /* Start commit timer: if we sent revoke we might need it. */
start_commit_timer(peer); start_commit_timer(peer);
@ -1913,6 +1942,7 @@ static struct io_plan *req_in(struct io_conn *conn, struct daemon_conn *master)
case WIRE_CHANNEL_GOT_REVOKE_REPLY: case WIRE_CHANNEL_GOT_REVOKE_REPLY:
case WIRE_CHANNEL_GOT_FUNDING_LOCKED: case WIRE_CHANNEL_GOT_FUNDING_LOCKED:
case WIRE_CHANNEL_GOT_SHUTDOWN: case WIRE_CHANNEL_GOT_SHUTDOWN:
case WIRE_CHANNEL_SHUTDOWN_COMPLETE:
break; break;
} }
status_failed(WIRE_CHANNEL_BAD_COMMAND, "%u %s", t, status_failed(WIRE_CHANNEL_BAD_COMMAND, "%u %s", t,
@ -2002,6 +2032,7 @@ static void init_channel(struct peer *peer)
&peer->short_channel_ids[LOCAL], &peer->short_channel_ids[LOCAL],
&reconnected, &reconnected,
&peer->unsent_shutdown_scriptpubkey, &peer->unsent_shutdown_scriptpubkey,
&peer->shutdown_sent[REMOTE],
&peer->channel_flags, &peer->channel_flags,
&funding_signed)) &funding_signed))
status_failed(WIRE_CHANNEL_BAD_COMMAND, "Init: %s", status_failed(WIRE_CHANNEL_BAD_COMMAND, "Init: %s",
@ -2075,6 +2106,34 @@ static void gossip_gone(struct io_conn *unused, struct daemon_conn *dc)
"Gossip connection closed"); "Gossip connection closed");
} }
static void send_shutdown_complete(struct peer *peer)
{
const u8 *msg;
/* Push out any outstanding messages to peer. */
if (!io_flush_sync(peer->peer_conn))
status_failed(WIRE_CHANNEL_PEER_WRITE_FAILED, "Syncing conn");
/* Set FD blocking to flush it */
io_fd_block(PEER_FD, true);
while ((msg = msg_dequeue(&peer->peer_out)) != NULL) {
if (!sync_crypto_write(&peer->pcs.cs, PEER_FD, take(msg)))
status_failed(WIRE_CHANNEL_PEER_WRITE_FAILED,
"Flushing msgs");
}
/* Now we can tell master shutdown is complete. */
daemon_conn_send(&peer->master,
take(towire_channel_shutdown_complete(peer,
&peer->pcs.cs)));
daemon_conn_send_fd(&peer->master, PEER_FD);
daemon_conn_send_fd(&peer->master, GOSSIP_FD);
if (!daemon_conn_sync_flush(&peer->master))
status_failed(WIRE_CHANNEL_INTERNAL_ERROR, "Flushing master");
}
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
struct peer *peer = tal(NULL, struct peer); struct peer *peer = tal(NULL, struct peer);
@ -2101,7 +2160,7 @@ int main(int argc, char *argv[])
msg_queue_init(&peer->master_deferred, peer); msg_queue_init(&peer->master_deferred, peer);
msg_queue_init(&peer->peer_out, peer); msg_queue_init(&peer->peer_out, peer);
peer->next_commit_sigs = NULL; peer->next_commit_sigs = NULL;
peer->unsent_shutdown_scriptpubkey = NULL; peer->shutdown_sent[LOCAL] = false;
/* We send these to HSM to get real signatures; don't have valgrind /* We send these to HSM to get real signatures; don't have valgrind
* complain. */ * complain. */
@ -2116,7 +2175,6 @@ int main(int argc, char *argv[])
gossip_client_recv, gossip_gone); gossip_client_recv, gossip_gone);
init_peer_crypto_state(peer, &peer->pcs); init_peer_crypto_state(peer, &peer->pcs);
peer->funding_locked[LOCAL] = peer->funding_locked[REMOTE] = false;
/* Read init_channel message sync. */ /* Read init_channel message sync. */
init_channel(peer); init_channel(peer);
@ -2130,7 +2188,10 @@ int main(int argc, char *argv[])
timer_expired(peer, expired); timer_expired(peer, expired);
} }
tal_free(peer); /* We only exit when shutdown is complete. */
assert(shutdown_complete(peer));
send_shutdown_complete(peer);
return 0; return 0;
} }
#endif /* TESTING */ #endif /* TESTING */

5
lightningd/channel/channel_wire.csv

@ -67,6 +67,7 @@ channel_init,,funding_short_id,struct short_channel_id
channel_init,,reestablish,bool channel_init,,reestablish,bool
channel_init,,shutdown_scriptpubkey_len,u16 channel_init,,shutdown_scriptpubkey_len,u16
channel_init,,shutdown_scriptpubkey,shutdown_scriptpubkey_len*u8 channel_init,,shutdown_scriptpubkey,shutdown_scriptpubkey_len*u8
channel_init,,remote_shutdown_received,bool
channel_init,,flags,u8 channel_init,,flags,u8
channel_init,,init_peer_pkt_len,u16 channel_init,,init_peer_pkt_len,u16
channel_init,,init_peer_pkt,init_peer_pkt_len*u8 channel_init,,init_peer_pkt,init_peer_pkt_len*u8
@ -180,3 +181,7 @@ channel_send_shutdown,,scriptpubkey,scriptpubkey_len*u8
channel_got_shutdown,24 channel_got_shutdown,24
channel_got_shutdown,,scriptpubkey_len,u16 channel_got_shutdown,,scriptpubkey_len,u16
channel_got_shutdown,,scriptpubkey,scriptpubkey_len*u8 channel_got_shutdown,,scriptpubkey,scriptpubkey_len*u8
# Shutdown is complete, ready for closing negotiation. + peer_fd & gossip_fd.
channel_shutdown_complete,25
channel_shutdown_complete,,crypto_state,struct crypto_state

Can't render this file because it has a wrong number of fields in line 2.

36
lightningd/peer_control.c

@ -1058,7 +1058,38 @@ static int peer_got_bad_message(struct peer *peer, const u8 *msg)
return -1; return -1;
} }
static int channel_msg(struct subd *sd, const u8 *msg, const int *unused) static int peer_start_closing(struct peer *peer, const u8 *msg, const int *fds)
{
struct crypto_state cs;
/* We expect 2 fds. */
if (!fds)
return 2;
if (!fromwire_channel_shutdown_complete(msg, NULL, &cs)) {
peer_internal_error(peer, "bad shutdown_complete: %s",
tal_hex(peer, msg));
return -1;
}
if (peer->local_shutdown_idx == -1
|| !peer->remote_shutdown_scriptpubkey) {
peer_internal_error(peer,
"Can't start closing: local %s remote %s",
peer->local_shutdown_idx == -1
? "not shutdown" : "shutdown",
peer->remote_shutdown_scriptpubkey
? "shutdown" : "not shutdown");
return -1;
}
/* FIXME: Start closingd. */
peer->owner = NULL;
peer_set_condition(peer, CHANNELD_SHUTTING_DOWN, CLOSINGD_SIGEXCHANGE);
return -1;
}
static int channel_msg(struct subd *sd, const u8 *msg, const int *fds)
{ {
enum channel_wire_type t = fromwire_peektype(msg); enum channel_wire_type t = fromwire_peektype(msg);
@ -1079,6 +1110,8 @@ static int channel_msg(struct subd *sd, const u8 *msg, const int *unused)
return peer_got_funding_locked(sd->peer, msg); return peer_got_funding_locked(sd->peer, msg);
case WIRE_CHANNEL_GOT_SHUTDOWN: case WIRE_CHANNEL_GOT_SHUTDOWN:
return peer_got_shutdown(sd->peer, msg); return peer_got_shutdown(sd->peer, msg);
case WIRE_CHANNEL_SHUTDOWN_COMPLETE:
return peer_start_closing(sd->peer, msg, fds);
/* We let peer_owner_finished handle these as transient errors. */ /* We let peer_owner_finished handle these as transient errors. */
case WIRE_CHANNEL_BAD_COMMAND: case WIRE_CHANNEL_BAD_COMMAND:
@ -1224,6 +1257,7 @@ static bool peer_start_channeld(struct peer *peer,
&funding_channel_id, &funding_channel_id,
peer->reconnected, peer->reconnected,
shutdown_scriptpubkey, shutdown_scriptpubkey,
peer->remote_shutdown_scriptpubkey != NULL,
peer->channel_flags, peer->channel_flags,
funding_signed); funding_signed);

3
tests/test_lightningd.py

@ -318,6 +318,9 @@ class LightningDTests(BaseLightningDTests):
l1.daemon.wait_for_log('-> CHANNELD_SHUTTING_DOWN') l1.daemon.wait_for_log('-> CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log('-> CHANNELD_SHUTTING_DOWN') l2.daemon.wait_for_log('-> CHANNELD_SHUTTING_DOWN')
l1.daemon.wait_for_log('-> CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log('-> CLOSINGD_SIGEXCHANGE')
def test_gossip_jsonrpc(self): def test_gossip_jsonrpc(self):
l1,l2 = self.connect() l1,l2 = self.connect()

Loading…
Cancel
Save