Browse Source

gossipd: prepare for internally-generated short-channel-id queries.

Up until now we only generated these in dev mode for testing.  Hoist
into common code, turn counter into a flag (we're only allowed one!)
and note if query is internal or not.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
pull/2938/head
Rusty Russell 6 years ago
parent
commit
5ef7aa70d2
  1. 134
      gossipd/gossipd.c

134
gossipd/gossipd.c

@ -132,7 +132,7 @@ struct peer {
/* The two features gossip cares about (so far) */
bool gossip_queries_feature, initial_routing_sync_feature;
/* Are there outstanding queries on short_channel_ids? */
/* Are there outstanding responses for queries on short_channel_ids? */
const struct short_channel_id *scid_queries;
size_t scid_query_idx;
@ -140,8 +140,9 @@ struct peer {
struct node_id *scid_query_nodes;
size_t scid_query_nodes_idx;
/* How many query responses are we expecting? */
size_t num_scid_queries_outstanding;
/* Do we have an scid_query outstanding? Was it internal? */
bool scid_query_outstanding;
bool scid_query_was_internal;
/* How many pongs are we expecting? */
size_t num_pings_outstanding;
@ -516,6 +517,61 @@ static void maybe_send_own_node_announce(struct daemon *daemon)
daemon->rstate->local_channel_announced = false;
}
/* Query this peer for these short-channel-ids. */
static bool query_short_channel_ids(struct daemon *daemon,
struct peer *peer,
const struct short_channel_id *scids,
bool internal)
{
u8 *encoded, *msg;
/* BOLT #7:
*
* 1. type: 261 (`query_short_channel_ids`) (`gossip_queries`)
* 2. data:
* * [`32`:`chain_hash`]
* * [`2`:`len`]
* * [`len`:`encoded_short_ids`]
*/
const size_t reply_overhead = 32 + 2;
const size_t max_encoded_bytes = 65535 - 2 - reply_overhead;
/* Can't query if they don't have gossip_queries_feature */
if (!peer->gossip_queries_feature)
return false;
/* BOLT #7:
*
* The sender:
* - MUST NOT send `query_short_channel_ids` if it has sent a previous
* `query_short_channel_ids` to this peer and not received
* `reply_short_channel_ids_end`.
*/
if (peer->scid_query_outstanding)
return false;
encoded = encode_short_channel_ids_start(tmpctx);
for (size_t i = 0; i < tal_count(scids); i++)
encode_add_short_channel_id(&encoded, &scids[i]);
if (!encode_short_channel_ids_end(&encoded, max_encoded_bytes)) {
status_broken("query_short_channel_ids: %zu is too many",
tal_count(scids));
return false;
}
msg = towire_query_short_channel_ids(NULL, &daemon->chain_hash,
encoded);
queue_peer_msg(peer, take(msg));
peer->scid_query_outstanding = true;
peer->scid_query_was_internal = internal;
status_trace("%s: sending query for %zu scids",
type_to_string(tmpctx, struct node_id, &peer->id),
tal_count(scids));
return true;
}
/*~Routines to handle gossip messages from peer, forwarded by subdaemons.
*-----------------------------------------------------------------------
*
@ -981,17 +1037,22 @@ static u8 *handle_reply_short_channel_ids_end(struct peer *peer, const u8 *msg)
tal_hex(tmpctx, msg));
}
if (peer->num_scid_queries_outstanding == 0) {
if (!peer->scid_query_outstanding) {
return towire_errorfmt(peer, NULL,
"unexpected reply_short_channel_ids_end: %s",
tal_hex(tmpctx, msg));
}
peer->num_scid_queries_outstanding--;
/* We tell lightningd: this is because we currently only ask for
* query_short_channel_ids when lightningd asks. */
msg = towire_gossip_scids_reply(msg, true, complete);
daemon_conn_send(peer->daemon->master, take(msg));
peer->scid_query_outstanding = false;
/* If it wasn't generated by us, it's the dev interface from lightningd
*/
if (!peer->scid_query_was_internal) {
msg = towire_gossip_scids_reply(msg, true, complete);
daemon_conn_send(peer->daemon->master, take(msg));
}
/* All good, no error. */
return NULL;
}
@ -1625,7 +1686,7 @@ static struct io_plan *connectd_new_peer(struct io_conn *conn,
peer->scid_query_idx = 0;
peer->scid_query_nodes = NULL;
peer->scid_query_nodes_idx = 0;
peer->num_scid_queries_outstanding = 0;
peer->scid_query_outstanding = false;
peer->query_channel_blocks = NULL;
peer->num_pings_outstanding = 0;
@ -2280,8 +2341,6 @@ static struct io_plan *get_incoming_channels(struct io_conn *conn,
}
#if DEVELOPER
/* FIXME: One day this will be called internally; for now it's just for
* testing with dev_query_scids. */
static struct io_plan *query_scids_req(struct io_conn *conn,
struct daemon *daemon,
const u8 *msg)
@ -2289,17 +2348,6 @@ static struct io_plan *query_scids_req(struct io_conn *conn,
struct node_id id;
struct short_channel_id *scids;
struct peer *peer;
u8 *encoded;
/* BOLT #7:
*
* 1. type: 261 (`query_short_channel_ids`) (`gossip_queries`)
* 2. data:
* * [`32`:`chain_hash`]
* * [`2`:`len`]
* * [`len`:`encoded_short_ids`]
*/
const size_t reply_overhead = 32 + 2;
const size_t max_encoded_bytes = 65535 - 2 - reply_overhead;
if (!fromwire_gossip_query_scids(msg, msg, &id, &scids))
master_badmsg(WIRE_GOSSIP_QUERY_SCIDS, msg);
@ -2308,40 +2356,14 @@ static struct io_plan *query_scids_req(struct io_conn *conn,
if (!peer) {
status_broken("query_scids: unknown peer %s",
type_to_string(tmpctx, struct node_id, &id));
goto fail;
}
if (!peer->gossip_queries_feature) {
status_broken("query_scids: no gossip_query support in peer %s",
type_to_string(tmpctx, struct node_id, &id));
goto fail;
}
encoded = encode_short_channel_ids_start(tmpctx);
for (size_t i = 0; i < tal_count(scids); i++)
encode_add_short_channel_id(&encoded, &scids[i]);
/* Because this is a dev command, we simply say this case is
* "too hard". */
if (!encode_short_channel_ids_end(&encoded, max_encoded_bytes)) {
status_broken("query_short_channel_ids: %zu is too many",
tal_count(scids));
goto fail;
}
msg = towire_query_short_channel_ids(NULL, &daemon->chain_hash,
encoded);
queue_peer_msg(peer, take(msg));
peer->num_scid_queries_outstanding++;
status_trace("sending query for %zu scids", tal_count(scids));
out:
daemon_conn_send(daemon->master,
take(towire_gossip_scids_reply(NULL,
false, false)));
} else if (!query_short_channel_ids(daemon, peer, scids, false))
daemon_conn_send(daemon->master,
take(towire_gossip_scids_reply(NULL,
false, false)));
return daemon_conn_read_next(conn, daemon->master);
fail:
daemon_conn_send(daemon->master,
take(towire_gossip_scids_reply(NULL, false, false)));
goto out;
}
/* BOLT #7:

Loading…
Cancel
Save