Browse Source

gossipd: have master explicitly tell us when peer is disconnected.

Currently we intuit it from the fd being closed, but that may happen out
of order with when the master thinks it's dead.

So now if the gossip fd closes we just ignore it, and we'll get a
notification from the master when the peer is disconnected.

The notification is slightly ugly in that we have to disable it for
a channel when we manually hand the channel back to gossipd.

Note: as stands, this is racy with reconnects.  See the next patch.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
ppa-0.6.1
Rusty Russell 7 years ago
parent
commit
be1f33b265
  1. 51
      gossipd/gossip.c
  2. 4
      gossipd/gossip_wire.csv
  3. 19
      lightningd/channel.c
  4. 6
      lightningd/channel.h
  5. 1
      lightningd/gossip_control.c
  6. 6
      lightningd/opening_control.c
  7. 3
      lightningd/peer_control.c
  8. 3
      wallet/test/run-wallet.c
  9. 4
      wallet/wallet.c

51
gossipd/gossip.c

@ -398,8 +398,8 @@ static struct io_plan *peer_init_received(struct io_conn *conn,
peer->broadcast_index peer->broadcast_index
= peer->daemon->rstate->broadcasts->next_index; = peer->daemon->rstate->broadcasts->next_index;
/* This is a full peer now; we keep it around until its /* This is a full peer now; we keep it around until master says
* gossipfd closed (forget_peer) or reconnect. */ * it's dead, or reconnect. */
peer_finalized(peer); peer_finalized(peer);
/* We will not have anything queued, since we're not duplex. */ /* We will not have anything queued, since we're not duplex. */
@ -845,25 +845,12 @@ static struct io_plan *owner_msg_in(struct io_conn *conn,
status_broken("peer %s: send us unknown msg of type %s", status_broken("peer %s: send us unknown msg of type %s",
type_to_string(tmpctx, struct pubkey, &peer->id), type_to_string(tmpctx, struct pubkey, &peer->id),
gossip_wire_type_name(type)); gossip_wire_type_name(type));
/* Calls forget_peer */
return io_close(conn); return io_close(conn);
} }
return daemon_conn_read_next(conn, dc); return daemon_conn_read_next(conn, dc);
} }
static void forget_peer(struct io_conn *conn UNUSED, struct daemon_conn *dc)
{
struct peer *peer = dc->ctx;
status_trace("Forgetting %s peer %s",
peer->local ? "local" : "remote",
type_to_string(tmpctx, struct pubkey, &peer->id));
/* Free peer. */
tal_free(dc->ctx);
}
/* When a peer is to be owned by another daemon, we create a socket /* When a peer is to be owned by another daemon, we create a socket
* pair to send/receive gossip from it */ * pair to send/receive gossip from it */
static bool send_peer_with_fds(struct peer *peer, const u8 *msg) static bool send_peer_with_fds(struct peer *peer, const u8 *msg)
@ -884,7 +871,7 @@ static bool send_peer_with_fds(struct peer *peer, const u8 *msg)
peer->local = tal_free(peer->local); peer->local = tal_free(peer->local);
peer->remote = tal(peer, struct daemon_conn); peer->remote = tal(peer, struct daemon_conn);
daemon_conn_init(peer, peer->remote, fds[0], daemon_conn_init(peer, peer->remote, fds[0],
owner_msg_in, forget_peer); owner_msg_in, NULL);
peer->remote->msg_queue_cleared_cb = nonlocal_dump_gossip; peer->remote->msg_queue_cleared_cb = nonlocal_dump_gossip;
/* Peer stays around, even though caller will close conn. */ /* Peer stays around, even though caller will close conn. */
@ -1871,6 +1858,35 @@ static struct io_plan *peer_important(struct io_conn *conn,
return daemon_conn_read_next(conn, &daemon->master); return daemon_conn_read_next(conn, &daemon->master);
} }
static struct io_plan *peer_disconnected(struct io_conn *conn,
struct daemon *daemon, const u8 *msg)
{
struct pubkey id;
struct peer *peer;
if (!fromwire_gossipctl_peer_disconnected(msg, &id))
master_badmsg(WIRE_GOSSIPCTL_PEER_DISCONNECTED, msg);
peer = find_peer(daemon, &id);
if (!peer)
status_failed(STATUS_FAIL_INTERNAL_ERROR,
"peer_disconnected unknown peer: %s",
type_to_string(tmpctx, struct pubkey, &id));
/* Possible if there's a reconnect: ignore disonnect. */
if (peer->local) {
status_trace("peer_disconnected %s: reconnected, ignoring",
type_to_string(tmpctx, struct pubkey, &id));
return daemon_conn_read_next(conn, &daemon->master);
}
status_trace("Forgetting remote peer %s",
type_to_string(tmpctx, struct pubkey, &peer->id));
tal_free(peer);
return daemon_conn_read_next(conn, &daemon->master);
}
static struct io_plan *get_peers(struct io_conn *conn, static struct io_plan *get_peers(struct io_conn *conn,
struct daemon *daemon, const u8 *msg) struct daemon *daemon, const u8 *msg)
{ {
@ -2122,6 +2138,9 @@ static struct io_plan *recv_req(struct io_conn *conn, struct daemon_conn *master
case WIRE_GOSSIPCTL_PEER_IMPORTANT: case WIRE_GOSSIPCTL_PEER_IMPORTANT:
return peer_important(conn, daemon, master->msg_in); return peer_important(conn, daemon, master->msg_in);
case WIRE_GOSSIPCTL_PEER_DISCONNECTED:
return peer_disconnected(conn, daemon, master->msg_in);
case WIRE_GOSSIP_GETPEERS_REQUEST: case WIRE_GOSSIP_GETPEERS_REQUEST:
return get_peers(conn, daemon, master->msg_in); return get_peers(conn, daemon, master->msg_in);

4
gossipd/gossip_wire.csv

@ -92,6 +92,10 @@ gossipctl_hand_back_peer,,crypto_state,struct crypto_state
gossipctl_hand_back_peer,,len,u16 gossipctl_hand_back_peer,,len,u16
gossipctl_hand_back_peer,,msg,len*u8 gossipctl_hand_back_peer,,msg,len*u8
# master -> gossipd: peer has disconnected.
gossipctl_peer_disconnected,3015
gossipctl_peer_disconnected,,id,struct pubkey
# Pass JSON-RPC getnodes call through # Pass JSON-RPC getnodes call through
gossip_getnodes_request,3005 gossip_getnodes_request,3005
# Can be 0 or 1 currently # Can be 0 or 1 currently

Can't render this file because it has a wrong number of fields in line 6.

19
lightningd/channel.c

@ -12,13 +12,26 @@
#include <lightningd/peer_control.h> #include <lightningd/peer_control.h>
#include <lightningd/subd.h> #include <lightningd/subd.h>
static bool connects_to_peer(struct subd *owner)
{
return owner && owner->talks_to_peer;
}
void channel_set_owner(struct channel *channel, struct subd *owner) void channel_set_owner(struct channel *channel, struct subd *owner)
{ {
struct subd *old_owner = channel->owner; struct subd *old_owner = channel->owner;
channel->owner = owner; channel->owner = owner;
if (old_owner) if (old_owner) {
subd_release_channel(old_owner, channel); subd_release_channel(old_owner, channel);
if (channel->connected && !connects_to_peer(owner)) {
u8 *msg = towire_gossipctl_peer_disconnected(NULL,
&channel->peer->id);
subd_send_msg(channel->peer->ld->gossip, take(msg));
channel->connected = false;
}
}
channel->connected = connects_to_peer(owner);
} }
struct htlc_out *channel_has_htlc_out(struct channel *channel) struct htlc_out *channel_has_htlc_out(struct channel *channel)
@ -155,7 +168,8 @@ struct channel *new_channel(struct peer *peer, u64 dbid,
struct changed_htlc *last_sent_commit, struct changed_htlc *last_sent_commit,
u32 first_blocknum, u32 first_blocknum,
u32 min_possible_feerate, u32 min_possible_feerate,
u32 max_possible_feerate) u32 max_possible_feerate,
bool connected)
{ {
struct channel *channel = tal(peer->ld, struct channel); struct channel *channel = tal(peer->ld, struct channel);
@ -212,6 +226,7 @@ struct channel *new_channel(struct peer *peer, u64 dbid,
channel->first_blocknum = first_blocknum; channel->first_blocknum = first_blocknum;
channel->min_possible_feerate = min_possible_feerate; channel->min_possible_feerate = min_possible_feerate;
channel->max_possible_feerate = max_possible_feerate; channel->max_possible_feerate = max_possible_feerate;
channel->connected = connected;
derive_channel_seed(peer->ld, &channel->seed, &peer->id, channel->dbid); derive_channel_seed(peer->ld, &channel->seed, &peer->id, channel->dbid);
list_add_tail(&peer->channels, &channel->list); list_add_tail(&peer->channels, &channel->list);

6
lightningd/channel.h

@ -96,6 +96,9 @@ struct channel {
/* Feerate range */ /* Feerate range */
u32 min_possible_feerate, max_possible_feerate; u32 min_possible_feerate, max_possible_feerate;
/* Does gossipd need to know if the owner dies? (ie. not onchaind) */
bool connected;
}; };
struct channel *new_channel(struct peer *peer, u64 dbid, struct channel *new_channel(struct peer *peer, u64 dbid,
@ -136,7 +139,8 @@ struct channel *new_channel(struct peer *peer, u64 dbid,
struct changed_htlc *last_sent_commit, struct changed_htlc *last_sent_commit,
u32 first_blocknum, u32 first_blocknum,
u32 min_possible_feerate, u32 min_possible_feerate,
u32 max_possible_feerate); u32 max_possible_feerate,
bool connected);
void delete_channel(struct channel *channel); void delete_channel(struct channel *channel);

1
lightningd/gossip_control.c

@ -137,6 +137,7 @@ static unsigned gossip_msg(struct subd *gossip, const u8 *msg, const int *fds)
case WIRE_GOSSIP_MARK_CHANNEL_UNROUTABLE: case WIRE_GOSSIP_MARK_CHANNEL_UNROUTABLE:
case WIRE_GOSSIPCTL_PEER_DISCONNECT: case WIRE_GOSSIPCTL_PEER_DISCONNECT:
case WIRE_GOSSIPCTL_PEER_IMPORTANT: case WIRE_GOSSIPCTL_PEER_IMPORTANT:
case WIRE_GOSSIPCTL_PEER_DISCONNECTED:
/* This is a reply, so never gets through to here. */ /* This is a reply, so never gets through to here. */
case WIRE_GOSSIPCTL_INIT_REPLY: case WIRE_GOSSIPCTL_INIT_REPLY:
case WIRE_GOSSIP_GET_UPDATE_REPLY: case WIRE_GOSSIP_GET_UPDATE_REPLY:

6
lightningd/opening_control.c

@ -237,7 +237,9 @@ wallet_commit_channel(struct lightningd *ld,
final_key_idx, false, final_key_idx, false,
NULL, /* No commit sent yet */ NULL, /* No commit sent yet */
uc->first_blocknum, uc->first_blocknum,
feerate, feerate); feerate, feerate,
/* We are connected */
true);
/* Now we finally put it in the database. */ /* Now we finally put it in the database. */
wallet_channel_insert(ld->wallet, channel); wallet_channel_insert(ld->wallet, channel);
@ -546,7 +548,9 @@ static void opening_channel_errmsg(struct uncommitted_channel *uc,
const u8 *err_for_them) const u8 *err_for_them)
{ {
if (peer_fd == -1) { if (peer_fd == -1) {
u8 *msg = towire_gossipctl_peer_disconnected(tmpctx, &uc->peer->id);
log_info(uc->log, "%s", desc); log_info(uc->log, "%s", desc);
subd_send_msg(uc->peer->ld->gossip, msg);
if (uc->fc) if (uc->fc)
command_fail(uc->fc->cmd, "%s", desc); command_fail(uc->fc->cmd, "%s", desc);
} else { } else {

3
lightningd/peer_control.c

@ -366,6 +366,9 @@ void channel_errmsg(struct channel *channel,
err_for_them, err_for_them,
tal_len(err_for_them), 0); tal_len(err_for_them), 0);
/* Make sure channel_fail_permanent doesn't tell gossipd we died! */
channel->connected = false;
/* BOLT #1: /* BOLT #1:
* *
* A sending node: * A sending node:

3
wallet/test/run-wallet.c

@ -376,6 +376,9 @@ u8 *towire_gossipctl_peer_addrhint(const tal_t *ctx UNNEEDED, const struct pubke
/* Generated stub for towire_gossipctl_peer_disconnect */ /* Generated stub for towire_gossipctl_peer_disconnect */
u8 *towire_gossipctl_peer_disconnect(const tal_t *ctx UNNEEDED, const struct pubkey *id UNNEEDED) u8 *towire_gossipctl_peer_disconnect(const tal_t *ctx UNNEEDED, const struct pubkey *id UNNEEDED)
{ fprintf(stderr, "towire_gossipctl_peer_disconnect called!\n"); abort(); } { fprintf(stderr, "towire_gossipctl_peer_disconnect called!\n"); abort(); }
/* Generated stub for towire_gossipctl_peer_disconnected */
u8 *towire_gossipctl_peer_disconnected(const tal_t *ctx UNNEEDED, const struct pubkey *id UNNEEDED)
{ fprintf(stderr, "towire_gossipctl_peer_disconnected called!\n"); abort(); }
/* Generated stub for towire_gossipctl_peer_important */ /* Generated stub for towire_gossipctl_peer_important */
u8 *towire_gossipctl_peer_important(const tal_t *ctx UNNEEDED, const struct pubkey *id UNNEEDED, bool important UNNEEDED) u8 *towire_gossipctl_peer_important(const tal_t *ctx UNNEEDED, const struct pubkey *id UNNEEDED, bool important UNNEEDED)
{ fprintf(stderr, "towire_gossipctl_peer_important called!\n"); abort(); } { fprintf(stderr, "towire_gossipctl_peer_important called!\n"); abort(); }

4
wallet/wallet.c

@ -651,7 +651,9 @@ static struct channel *wallet_stmt2channel(const tal_t *ctx, struct wallet *w, s
last_sent_commit, last_sent_commit,
sqlite3_column_int64(stmt, 35), sqlite3_column_int64(stmt, 35),
sqlite3_column_int(stmt, 36), sqlite3_column_int(stmt, 36),
sqlite3_column_int(stmt, 37)); sqlite3_column_int(stmt, 37),
/* Not connected */
false);
return chan; return chan;
} }

Loading…
Cancel
Save