diff --git a/gossipd/Makefile b/gossipd/Makefile
index 50ce42251..4eb781902 100644
--- a/gossipd/Makefile
+++ b/gossipd/Makefile
@@ -17,6 +17,7 @@ LIGHTNINGD_GOSSIP_HEADERS_WSRC := gossipd/gen_gossip_wire.h \
 	gossipd/gen_gossip_store.h			\
 	gossipd/gossipd.h				\
 	gossipd/gossip_store.h				\
+	gossipd/queries.h				\
 	gossipd/gossip_generation.h			\
 	gossipd/routing.h
 LIGHTNINGD_GOSSIP_HEADERS := $(LIGHTNINGD_GOSSIP_HEADERS_WSRC) gossipd/broadcast.h
diff --git a/gossipd/gossipd.c b/gossipd/gossipd.c
index 730b82335..6bac883e6 100644
--- a/gossipd/gossipd.c
+++ b/gossipd/gossipd.c
@@ -12,12 +12,9 @@
  */
 #include <bitcoin/chainparams.h>
 #include <ccan/array_size/array_size.h>
-#include <ccan/asort/asort.h>
-#include <ccan/bitmap/bitmap.h>
 #include <ccan/build_assert/build_assert.h>
 #include <ccan/cast/cast.h>
 #include <ccan/container_of/container_of.h>
-#include <ccan/crc32c/crc32c.h>
 #include <ccan/crypto/hkdf_sha256/hkdf_sha256.h>
 #include <ccan/crypto/siphash24/siphash24.h>
 #include <ccan/endian/endian.h>
@@ -28,12 +25,10 @@
 #include <ccan/noerr/noerr.h>
 #include <ccan/take/take.h>
 #include <ccan/tal/str/str.h>
-#include <ccan/timer/timer.h>
 #include <common/bech32.h>
 #include <common/bech32_util.h>
 #include <common/cryptomsg.h>
 #include <common/daemon_conn.h>
-#include <common/decode_short_channel_ids.h>
 #include <common/features.h>
 #include <common/memleak.h>
 #include <common/ping.h>
@@ -54,6 +49,7 @@
 #include <gossipd/gen_gossip_wire.h>
 #include <gossipd/gossip_generation.h>
 #include <gossipd/gossipd.h>
+#include <gossipd/queries.h>
 #include <gossipd/routing.h>
 #include <inttypes.h>
 #include <lightningd/gossip_msg.h>
@@ -69,26 +65,12 @@
 #include <wire/gen_peer_wire.h>
 #include <wire/wire_io.h>
 #include <wire/wire_sync.h>
-#include <zlib.h>
 
 /* In developer mode we provide hooks for whitebox testing */
 #if DEVELOPER
-static u32 max_encoding_bytes = -1U;
 static bool suppress_gossip = false;
 #endif
 
-/*~ How gossipy do we ask a peer to be? */
-enum gossip_level {
-	/* Give us everything since epoch */
-	GOSSIP_HIGH,
-	/* Give us everything from 24 hours ago. */
-	GOSSIP_MEDIUM,
-	/* Give us everything from now. */
-	GOSSIP_LOW,
-	/* Give us nothing. */
-	GOSSIP_NONE,
-};
-
 /* What are our targets for each gossip level? (including levels above).
  *
  * If we're missing gossip: 3 high.
@@ -96,50 +78,6 @@ enum gossip_level {
  */
 static const size_t gossip_level_targets[] = { 3, 2, 8, SIZE_MAX };
 
-/* This represents each peer we're gossiping with */
-struct peer {
-	/* daemon->peers */
-	struct list_node list;
-
-	/* parent pointer. */
-	struct daemon *daemon;
-
-	/* The ID of the peer (always unique) */
-	struct node_id id;
-
-	/* The two features gossip cares about (so far) */
-	bool gossip_queries_feature, initial_routing_sync_feature;
-
-	/* Are there outstanding responses for queries on short_channel_ids? */
-	const struct short_channel_id *scid_queries;
-	const bigsize_t *scid_query_flags;
-	size_t scid_query_idx;
-
-	/* Are there outstanding node_announcements from scid_queries? */
-	struct node_id *scid_query_nodes;
-	size_t scid_query_nodes_idx;
-
-	/* Do we have an scid_query outstanding?  Was it internal? */
-	bool scid_query_outstanding;
-	bool scid_query_was_internal;
-
-	/* How many pongs are we expecting? */
-	size_t num_pings_outstanding;
-
-	/* Map of outstanding channel_range requests. */
-	bitmap *query_channel_blocks;
-	/* What we're querying: [range_first_blocknum, range_end_blocknum) */
-	u32 range_first_blocknum, range_end_blocknum;
-	u32 range_blocks_remaining;
-	struct short_channel_id *query_channel_scids;
-
-	/* Are we asking this peer to give us lot of gossip? */
-	enum gossip_level gossip_level;
-
-	/* The daemon_conn used to queue messages to/from the peer. */
-	struct daemon_conn *dc;
-};
-
 /*~ A channel consists of a `struct half_chan` for each direction, each of
  * which has a `flags` word from the `channel_update`; bit 1 is
  * ROUTING_FLAGS_DISABLED in the `channel_update`.  But we also keep a local
@@ -203,134 +141,13 @@ void queue_peer_msg(struct peer *peer, const u8 *msg TAKES)
 }
 
 /*~ We have a helper for messages from the store. */
-static void queue_peer_from_store(struct peer *peer,
-				  const struct broadcastable *bcast)
+void queue_peer_from_store(struct peer *peer,
+			   const struct broadcastable *bcast)
 {
 	struct gossip_store *gs = peer->daemon->rstate->gs;
 	queue_peer_msg(peer, take(gossip_store_get(NULL, gs, bcast->index)));
 }
 
-/* BOLT #7:
- *
- * There are several messages which contain a long array of
- * `short_channel_id`s (called `encoded_short_ids`) so we utilize a
- * simple compression scheme: the first byte indicates the encoding, the
- * rest contains the data.
- */
-static u8 *encoding_start(const tal_t *ctx)
-{
-	return tal_arr(ctx, u8, 0);
-}
-
-/* Marshal a single short_channel_id */
-static void encoding_add_short_channel_id(u8 **encoded,
-					  const struct short_channel_id *scid)
-{
-	towire_short_channel_id(encoded, scid);
-}
-
-/* Marshal a single channel_update_timestamps */
-static void encoding_add_timestamps(u8 **encoded,
-				    const struct channel_update_timestamps *ts)
-{
-	towire_channel_update_timestamps(encoded, ts);
-}
-
-/* Marshal a single query flag (we don't query, so not currently used) */
-static UNNEEDED void encoding_add_query_flag(u8 **encoded, bigsize_t flag)
-{
-	towire_bigsize(encoded, flag);
-}
-
-/* Greg Maxwell asked me privately about using zlib for communicating a set,
- * and suggested that we'd be better off using Golomb-Rice coding a-la BIP
- * 158.  However, naively using Rice encoding isn't a win: we have to get
- * more complex and use separate streams.  The upside is that it's between
- * 2 and 5 times smaller (assuming optimal Rice encoding + gzip).  We can add
- * that later. */
-static u8 *zencode(const tal_t *ctx, const u8 *scids, size_t len)
-{
-	u8 *z;
-	int err;
-	unsigned long compressed_len = len;
-
-#ifdef ZLIB_EVEN_IF_EXPANDS
-	/* Needed for test vectors */
-	compressed_len = 128 * 1024;
-#endif
-	/* Prefer to fail if zlib makes it larger */
-	z = tal_arr(ctx, u8, compressed_len);
-	err = compress2(z, &compressed_len, scids, len, Z_DEFAULT_COMPRESSION);
-	if (err == Z_OK) {
-		status_debug("compressed %zu into %lu",
-			     len, compressed_len);
-		tal_resize(&z, compressed_len);
-		return z;
-	}
-	status_debug("compress %zu returned %i:"
-		     " not compresssing", len, err);
-	return NULL;
-}
-
-/* Try compressing *encoded: fails if result would be longer.
- * @off is offset to place result in *encoded.
- */
-static bool encoding_end_zlib(u8 **encoded, size_t off)
-{
-	u8 *z;
-	size_t len = tal_count(*encoded);
-
-	z = zencode(tmpctx, *encoded, len);
-	if (!z)
-		return false;
-
-	/* Successful: copy over and trim */
-	tal_resize(encoded, off + tal_count(z));
-	memcpy(*encoded + off, z, tal_count(z));
-
-	tal_free(z);
-	return true;
-}
-
-static void encoding_end_no_compress(u8 **encoded, size_t off)
-{
-	size_t len = tal_count(*encoded);
-
-	tal_resize(encoded, off + len);
-	memmove(*encoded + off, *encoded, len);
-}
-
-/* Once we've assembled it, try compressing.
- * Prepends encoding type to @encoding. */
-static bool encoding_end_prepend_type(u8 **encoded, size_t max_bytes)
-{
-	if (encoding_end_zlib(encoded, 1))
-		**encoded = SHORTIDS_ZLIB;
-	else {
-		encoding_end_no_compress(encoded, 1);
-		**encoded = SHORTIDS_UNCOMPRESSED;
-	}
-
-#if DEVELOPER
-	if (tal_count(*encoded) > max_encoding_bytes)
-		return false;
-#endif
-	return tal_count(*encoded) <= max_bytes;
-}
-
-/* Try compressing, leaving type external */
-static UNNEEDED bool encoding_end_external_type(u8 **encoded, u8 *type, size_t max_bytes)
-{
-	if (encoding_end_zlib(encoded, 0))
-		*type = SHORTIDS_ZLIB;
-	else {
-		encoding_end_no_compress(encoded, 0);
-		*type = SHORTIDS_UNCOMPRESSED;
-	}
-
-	return tal_count(*encoded) <= max_bytes;
-}
-
 /*~ We have different levels of gossipiness, depending on our needs. */
 static u32 gossip_start(const struct routing_state *rstate,
 			enum gossip_level gossip_level)
@@ -447,82 +264,6 @@ static bool get_node_announcement_by_id(const tal_t *ctx,
 				     features, wireaddrs);
 }
 
-/* Query this peer for these short-channel-ids. */
-static bool query_short_channel_ids(struct daemon *daemon,
-				    struct peer *peer,
-				    const struct short_channel_id *scids,
-				    bool internal)
-{
-	u8 *encoded, *msg;
-
-	/* BOLT #7:
-	 *
-	 * 1. type: 261 (`query_short_channel_ids`) (`gossip_queries`)
-	 * 2. data:
-	 *     * [`chain_hash`:`chain_hash`]
-	 *     * [`u16`:`len`]
-	 *     * [`len*byte`:`encoded_short_ids`]
-	 */
-	const size_t reply_overhead = 32 + 2;
-	const size_t max_encoded_bytes = 65535 - 2 - reply_overhead;
-
-	/* Can't query if they don't have gossip_queries_feature */
-	if (!peer->gossip_queries_feature)
-		return false;
-
-	/* BOLT #7:
-	 *
-	 * The sender:
-	 *  - MUST NOT send `query_short_channel_ids` if it has sent a previous
-	 *   `query_short_channel_ids` to this peer and not received
-	 *   `reply_short_channel_ids_end`.
-	 */
-	if (peer->scid_query_outstanding)
-		return false;
-
-	encoded = encoding_start(tmpctx);
-	for (size_t i = 0; i < tal_count(scids); i++)
-		encoding_add_short_channel_id(&encoded, &scids[i]);
-
-	if (!encoding_end_prepend_type(&encoded, max_encoded_bytes)) {
-		status_broken("query_short_channel_ids: %zu is too many",
-			      tal_count(scids));
-		return false;
-	}
-
-	msg = towire_query_short_channel_ids(NULL, &daemon->chain_hash,
-					     encoded, NULL);
-	queue_peer_msg(peer, take(msg));
-	peer->scid_query_outstanding = true;
-	peer->scid_query_was_internal = internal;
-
-	status_debug("%s: sending query for %zu scids",
-		     type_to_string(tmpctx, struct node_id, &peer->id),
-		     tal_count(scids));
-	return true;
-}
-
-/*~ This peer told us about an update to an unknown channel.  Ask it for
- * a channel_announcement. */
-static void query_unknown_channel(struct daemon *daemon,
-				  struct peer *peer,
-				  const struct short_channel_id *id)
-{
-	/* Don't go overboard if we're already asking for a lot. */
-	if (tal_count(daemon->unknown_scids) > 1000)
-		return;
-
-	/* Check we're not already getting this one. */
-	for (size_t i = 0; i < tal_count(daemon->unknown_scids); i++)
-		if (short_channel_id_eq(&daemon->unknown_scids[i], id))
-			return;
-
-	tal_arr_expand(&daemon->unknown_scids, *id);
-
-	/* This is best effort: if peer is busy, we'll try next time. */
-	query_short_channel_ids(daemon, peer, daemon->unknown_scids, true);
-}
-
 /*~Routines to handle gossip messages from peer, forwarded by subdaemons.
  *-----------------------------------------------------------------------
  *
@@ -592,107 +333,6 @@ static u8 *handle_channel_update_msg(struct peer *peer, const u8 *msg)
 	return NULL;
 }
 
-/*~ The peer can ask about an array of short channel ids: we don't assemble the
- * reply immediately but process them one at a time in dump_gossip which is
- * called when there's nothing more important to send. */
-static const u8 *handle_query_short_channel_ids(struct peer *peer, const u8 *msg)
-{
-	struct bitcoin_blkid chain;
-	u8 *encoded;
-	struct short_channel_id *scids;
-	bigsize_t *flags;
-
-	struct tlv_query_short_channel_ids_tlvs *tlvs
-		= tlv_query_short_channel_ids_tlvs_new(tmpctx);
-
-	if (!fromwire_query_short_channel_ids(tmpctx, msg, &chain, &encoded,
-					      tlvs)) {
-		return towire_errorfmt(peer, NULL,
-				       "Bad query_short_channel_ids w/tlvs %s",
-				       tal_hex(tmpctx, msg));
-	}
-	if (tlvs->query_flags) {
-		/* BOLT #7:
-		 *
-		 * The receiver:
-		 *...
-		 *  - if the incoming message includes
-		 *    `query_short_channel_ids_tlvs`:
-		 *    - if `encoding_type` is not a known encoding type:
-		 *      - MAY fail the connection
-		 */
-		flags = decode_scid_query_flags(tmpctx, tlvs->query_flags);
-		if (!flags) {
-			return towire_errorfmt(peer, NULL,
-					       "Bad query_short_channel_ids query_flags %s",
-					       tal_hex(tmpctx, msg));
-		}
-	} else
-		flags = NULL;
-
-	if (!bitcoin_blkid_eq(&peer->daemon->chain_hash, &chain)) {
-		status_debug("%s sent query_short_channel_ids chainhash %s",
-			     type_to_string(tmpctx, struct node_id, &peer->id),
-			     type_to_string(tmpctx, struct bitcoin_blkid, &chain));
-		return NULL;
-	}
-
-	/* BOLT #7:
-	 *
-	 * - if it has not sent `reply_short_channel_ids_end` to a
-	 *   previously received `query_short_channel_ids` from this
-         *   sender:
-	 *    - MAY fail the connection.
-	 */
-	if (peer->scid_queries || peer->scid_query_nodes) {
-		return towire_errorfmt(peer, NULL,
-				       "Bad concurrent query_short_channel_ids");
-	}
-
-	scids = decode_short_ids(tmpctx, encoded);
-	if (!scids) {
-		return towire_errorfmt(peer, NULL,
-				       "Bad query_short_channel_ids encoding %s",
-				       tal_hex(tmpctx, encoded));
-	}
-
-	/* BOLT #7:
-	 *
-	 * The receiver:
-	 *...
-	 *    - if `encoded_query_flags` does not decode to exactly one flag per
-	 *      `short_channel_id`:
-	 *      - MAY fail the connection.
-	 */
-	if (!flags) {
-		/* Pretend they asked for everything. */
-		flags = tal_arr(tmpctx, bigsize_t, tal_count(scids));
-		memset(flags, 0xFF, tal_bytelen(flags));
-	} else {
-		if (tal_count(flags) != tal_count(scids)) {
-			return towire_errorfmt(peer, NULL,
-					       "Bad query_short_channel_ids flags count %zu scids %zu",
-					       tal_count(flags), tal_count(scids));
-		}
-	}
-
-	/* BOLT #7:
-	 *
-	 * - MUST respond to each known `short_channel_id`:
-	 *...
-	 *    - SHOULD NOT wait for the next outgoing gossip flush to send
-	 *      these.
-	 */
-	peer->scid_queries = tal_steal(peer, scids);
-	peer->scid_query_flags = tal_steal(peer, flags);
-	peer->scid_query_idx = 0;
-	peer->scid_query_nodes = tal_arr(peer, struct node_id, 0);
-
-	/* Notify the daemon_conn-write loop to invoke create_next_scid_reply */
-	daemon_conn_wake(peer->dc);
-	return NULL;
-}
-
 /*~ When we compact the gossip store, all the broadcast indexs move.
  * We simply offset everyone, which means in theory they could retransmit
  * some, but that's a lesser evil than skipping some. */
@@ -719,427 +359,6 @@ void update_peers_broadcast_index(struct list_head *peers, u32 offset)
 	}
 }
 
-/*~ We can send multiple replies when the peer queries for all channels in
- * a given range of blocks; each one indicates the range of blocks it covers. */
-static void reply_channel_range(struct peer *peer,
-				u32 first_blocknum, u32 number_of_blocks,
-				const u8 *encoded_scids,
-				struct tlv_reply_channel_range_tlvs_timestamps_tlv *timestamps,
-				struct tlv_reply_channel_range_tlvs_checksums_tlv *checksums)
-{
-	/* BOLT #7:
-	 *
-	 * - For each `reply_channel_range`:
-	 *   - MUST set with `chain_hash` equal to that of `query_channel_range`,
-	 *   - MUST encode a `short_channel_id` for every open channel it
-	 *     knows in blocks `first_blocknum` to `first_blocknum` plus
-	 *     `number_of_blocks` minus one.
-	 *   - MUST limit `number_of_blocks` to the maximum number of blocks
-         *     whose results could fit in `encoded_short_ids`
-	 *   - if does not maintain up-to-date channel information for
-	 *     `chain_hash`:
-	 *     - MUST set `complete` to 0.
-	 *   - otherwise:
-	 *     - SHOULD set `complete` to 1.
-	 */
- 	struct tlv_reply_channel_range_tlvs *tlvs
- 		= tlv_reply_channel_range_tlvs_new(tmpctx);
-	tlvs->timestamps_tlv = timestamps;
-	tlvs->checksums_tlv = checksums;
-
-	u8 *msg = towire_reply_channel_range(NULL,
-					     &peer->daemon->chain_hash,
-					     first_blocknum,
-					     number_of_blocks,
-					     1, encoded_scids, tlvs);
-	queue_peer_msg(peer, take(msg));
-}
-
-/* BOLT #7:
- *
- * `query_option_flags` is a bitfield represented as a minimally-encoded varint.
- * Bits have the following meaning:
- *
- * | Bit Position  | Meaning                 |
- * | ------------- | ----------------------- |
- * | 0             | Sender wants timestamps |
- * | 1             | Sender wants checksums  |
- */
-enum query_option_flags {
-	QUERY_ADD_TIMESTAMPS = 0x1,
-	QUERY_ADD_CHECKSUMS = 0x2,
-};
-
-/* BOLT #7:
- *
- * The checksum of a `channel_update` is the CRC32C checksum as specified in
- * [RFC3720](https://tools.ietf.org/html/rfc3720#appendix-B.4) of this
- * `channel_update` without its `signature` and `timestamp` fields.
- */
-static u32 crc32_of_update(const u8 *channel_update)
-{
-	u32 sum;
-	const u8 *parts[2];
-	size_t sizes[ARRAY_SIZE(parts)];
-
-	get_cupdate_parts(channel_update, parts, sizes);
-
-	sum = 0;
-	for (size_t i = 0; i < ARRAY_SIZE(parts); i++)
-		sum = crc32c(sum, parts[i], sizes[i]);
-	return sum;
-}
-
-static void get_checksum_and_timestamp(struct routing_state *rstate,
-				       const struct chan *chan,
-				       int direction,
-				       u32 *tstamp, u32 *csum)
-{
-	const struct half_chan *hc = &chan->half[direction];
-
-	if (!is_chan_public(chan) || !is_halfchan_defined(hc)) {
-		*tstamp = *csum = 0;
-	} else {
-		const u8 *update = gossip_store_get(tmpctx, rstate->gs,
-						    hc->bcast.index);
-		*tstamp = hc->bcast.timestamp;
-		*csum = crc32_of_update(update);
-	}
-}
-
-/* FIXME: This assumes that the tlv type encodes into 1 byte! */
-static size_t tlv_len(const tal_t *msg)
-{
-	return 1 + bigsize_len(tal_count(msg)) + tal_count(msg);
-}
-
-/*~ When we need to send an array of channels, it might go over our 64k packet
- * size.  If it doesn't, we recurse, splitting in two, etc.  Each message
- * indicates what blocks it contains, so the recipient knows when we're
- * finished.
- *
- * tail_blocks is the empty blocks at the end, in case they asked for all
- * blocks to 4 billion.
- */
-static bool queue_channel_ranges(struct peer *peer,
-				 u32 first_blocknum, u32 number_of_blocks,
-				 u32 tail_blocks,
-				 enum query_option_flags query_option_flags)
-{
-	struct routing_state *rstate = peer->daemon->rstate;
-	u8 *encoded_scids = encoding_start(tmpctx);
-	struct tlv_reply_channel_range_tlvs_timestamps_tlv *tstamps;
-	struct tlv_reply_channel_range_tlvs_checksums_tlv *csums;
-	struct short_channel_id scid;
-	bool scid_ok;
-
-	/* BOLT #7:
-	 *
-	 * 1. type: 264 (`reply_channel_range`) (`gossip_queries`)
-	 * 2. data:
-	 *   * [`chain_hash`:`chain_hash`]
-	 *   * [`u32`:`first_blocknum`]
-	 *   * [`u32`:`number_of_blocks`]
-	 *   * [`byte`:`complete`]
-	 *   * [`u16`:`len`]
-	 *   * [`len*byte`:`encoded_short_ids`]
-	 */
-	const size_t reply_overhead = 32 + 4 + 4 + 1 + 2;
-	const size_t max_encoded_bytes = 65535 - 2 - reply_overhead;
-	size_t extension_bytes;
-
-	if (query_option_flags & QUERY_ADD_TIMESTAMPS) {
-		tstamps = tal(tmpctx,
-			      struct tlv_reply_channel_range_tlvs_timestamps_tlv);
-		tstamps->encoded_timestamps = encoding_start(tstamps);
-	} else
-		tstamps = NULL;
-
-	if (query_option_flags & QUERY_ADD_CHECKSUMS) {
-		csums = tal(tmpctx,
-			    struct tlv_reply_channel_range_tlvs_checksums_tlv);
-		csums->checksums
-			= tal_arr(csums, struct channel_update_checksums, 0);
-	} else
-		csums = NULL;
-
-	/* Avoid underflow: we don't use block 0 anyway */
-	if (first_blocknum == 0)
-		scid_ok = mk_short_channel_id(&scid, 1, 0, 0);
-	else
-		scid_ok = mk_short_channel_id(&scid, first_blocknum, 0, 0);
-	scid.u64--;
-	if (!scid_ok)
-		return false;
-
-	/* We keep a `uintmap` of `short_channel_id` to `struct chan *`.
-	 * Unlike a htable, it's efficient to iterate through, but it only
-	 * works because each short_channel_id is basically a 64-bit unsigned
-	 * integer.
-	 *
-	 * First we iterate and gather all the short channel ids. */
-	while (uintmap_after(&rstate->chanmap, &scid.u64)) {
-		struct chan *chan;
-		struct channel_update_timestamps ts;
-		struct channel_update_checksums cs;
-		u32 blocknum = short_channel_id_blocknum(&scid);
-		if (blocknum >= first_blocknum + number_of_blocks)
-			break;
-
-		encoding_add_short_channel_id(&encoded_scids, &scid);
-
-		/* FIXME: Store csum in header. */
-		chan = get_channel(rstate, &scid);
-		get_checksum_and_timestamp(rstate, chan, 0,
-					   &ts.timestamp_node_id_1,
-					   &cs.checksum_node_id_1);
-		get_checksum_and_timestamp(rstate, chan, 1,
-					   &ts.timestamp_node_id_2,
-					   &cs.checksum_node_id_2);
-
-		if (csums)
-			tal_arr_expand(&csums->checksums, cs);
-		if (tstamps)
-			encoding_add_timestamps(&tstamps->encoded_timestamps,
-						&ts);
-	}
-
-	extension_bytes = 0;
-
-	/* If either of these can't fit in max_encoded_bytes by itself,
-	 * it's over. */
-	if (csums) {
-		extension_bytes += tlv_len(csums->checksums);
-	}
-
-	if (tstamps) {
-		if (!encoding_end_external_type(&tstamps->encoded_timestamps,
-						&tstamps->encoding_type,
-						max_encoded_bytes))
-			goto wont_fit;
-		/* 1 byte for encoding_type, too */
-		extension_bytes += 1 + tlv_len(tstamps->encoded_timestamps);
-	}
-
-	/* If we can encode that, fine: send it */
-	if (extension_bytes <= max_encoded_bytes
-	    && encoding_end_prepend_type(&encoded_scids,
-					 max_encoded_bytes - extension_bytes)) {
-		reply_channel_range(peer, first_blocknum,
-				    number_of_blocks + tail_blocks,
-				    encoded_scids,
-				    tstamps, csums);
-		return true;
-	}
-
-wont_fit:
-	/* It wouldn't all fit: divide in half */
-	/* We assume we can always send one block! */
-	if (number_of_blocks <= 1) {
-		/* We always assume we can send 1 blocks worth */
-		status_broken("Could not fit scids for single block %u",
-			      first_blocknum);
-		return false;
-	}
-	status_debug("queue_channel_ranges full: splitting %u+%u and %u+%u(+%u)",
-		     first_blocknum,
-		     number_of_blocks / 2,
-		     first_blocknum + number_of_blocks / 2,
-		     number_of_blocks - number_of_blocks / 2,
-		     tail_blocks);
-	return queue_channel_ranges(peer, first_blocknum, number_of_blocks / 2,
-				    0, query_option_flags)
-		&& queue_channel_ranges(peer, first_blocknum + number_of_blocks / 2,
-					number_of_blocks - number_of_blocks / 2,
-					tail_blocks, query_option_flags);
-}
-
-/*~ The peer can ask for all channels is a series of blocks.  We reply with one
- * or more messages containing the short_channel_ids. */
-static u8 *handle_query_channel_range(struct peer *peer, const u8 *msg)
-{
-	struct routing_state *rstate = peer->daemon->rstate;
-	struct bitcoin_blkid chain_hash;
-	u32 first_blocknum, number_of_blocks, tail_blocks;
-	struct short_channel_id last_scid;
-	enum query_option_flags query_option_flags;
-	struct tlv_query_channel_range_tlvs *tlvs
-		= tlv_query_channel_range_tlvs_new(msg);
-
-	if (!fromwire_query_channel_range(msg, &chain_hash,
-					  &first_blocknum, &number_of_blocks,
-					  tlvs)) {
-		return towire_errorfmt(peer, NULL,
-				       "Bad query_channel_range w/tlvs %s",
-				       tal_hex(tmpctx, msg));
-	}
-	if (tlvs->query_option)
-		query_option_flags = tlvs->query_option->query_option_flags;
-	else
-		query_option_flags = 0;
-
-	/* If they ask for the wrong chain, we give an empty response
-	 * with the `complete` flag unset */
-	if (!bitcoin_blkid_eq(&peer->daemon->chain_hash, &chain_hash)) {
-		status_debug("%s sent query_channel_range chainhash %s",
-			     type_to_string(tmpctx, struct node_id, &peer->id),
-			     type_to_string(tmpctx, struct bitcoin_blkid,
-					    &chain_hash));
-		u8 *end = towire_reply_channel_range(NULL, &chain_hash, first_blocknum,
-		                                     number_of_blocks, false, NULL, NULL);
-		queue_peer_msg(peer, take(end));
-		return NULL;
-	}
-
-	/* If they ask for number_of_blocks UINTMAX, and we have to divide
-	 * and conquer, we'll do a lot of unnecessary work.  Cap it at the
-	 * last value we have, then send an empty reply. */
-	if (uintmap_last(&rstate->chanmap, &last_scid.u64)) {
-		u32 last_block = short_channel_id_blocknum(&last_scid);
-
-		/* u64 here avoids overflow on number_of_blocks
-		   UINTMAX for example */
-		if ((u64)first_blocknum + number_of_blocks > last_block) {
-			tail_blocks = first_blocknum + number_of_blocks
-				- last_block - 1;
-			number_of_blocks -= tail_blocks;
-		} else
-			tail_blocks = 0;
-	} else
-		tail_blocks = 0;
-
-	if (!queue_channel_ranges(peer, first_blocknum, number_of_blocks,
-				  tail_blocks, query_option_flags))
-		return towire_errorfmt(peer, NULL,
-				       "Invalid query_channel_range %u+%u",
-				       first_blocknum, number_of_blocks + tail_blocks);
-
-	return NULL;
-}
-
-/*~ This is the reply we get when we send query_channel_range; we keep
- * expecting them until the entire range we asked for is covered. */
-static const u8 *handle_reply_channel_range(struct peer *peer, const u8 *msg)
-{
-	struct bitcoin_blkid chain;
-	u8 complete;
-	u32 first_blocknum, number_of_blocks, start, end;
-	u8 *encoded;
-	struct short_channel_id *scids;
-	size_t n;
-	unsigned long b;
-	struct tlv_reply_channel_range_tlvs *tlvs
-		= tlv_reply_channel_range_tlvs_new(tmpctx);
-
-	if (!fromwire_reply_channel_range(tmpctx, msg, &chain, &first_blocknum,
-					  &number_of_blocks, &complete,
-					  &encoded, tlvs)) {
-		return towire_errorfmt(peer, NULL,
-				       "Bad reply_channel_range w/tlvs %s",
-				       tal_hex(tmpctx, msg));
-	}
-
-	if (!bitcoin_blkid_eq(&peer->daemon->chain_hash, &chain)) {
-		return towire_errorfmt(peer, NULL,
-				       "reply_channel_range for bad chain: %s",
-				       tal_hex(tmpctx, msg));
-	}
-
-	if (!peer->query_channel_blocks) {
-		return towire_errorfmt(peer, NULL,
-				       "reply_channel_range without query: %s",
-				       tal_hex(tmpctx, msg));
-	}
-
-	/* Beware overflow! */
-	if (first_blocknum + number_of_blocks < first_blocknum) {
-		return towire_errorfmt(peer, NULL,
-				       "reply_channel_range invalid %u+%u",
-				       first_blocknum, number_of_blocks);
-	}
-
-	scids = decode_short_ids(tmpctx, encoded);
-	if (!scids) {
-		return towire_errorfmt(peer, NULL,
-				       "Bad reply_channel_range encoding %s",
-				       tal_hex(tmpctx, encoded));
-	}
-
-	status_debug("peer %s reply_channel_range %u+%u (of %u+%u) %zu scids",
-		     type_to_string(tmpctx, struct node_id, &peer->id),
-		     first_blocknum, number_of_blocks,
-		     peer->range_first_blocknum,
-		     peer->range_end_blocknum - peer->range_first_blocknum,
-		     tal_count(scids));
-
-	/* BOLT #7:
-	 *
-	 * The receiver of `query_channel_range`:
-	 *...
-	 *  - MUST respond with one or more `reply_channel_range` whose
-	 *    combined range cover the requested `first_blocknum` to
-	 *    `first_blocknum` plus `number_of_blocks` minus one.
-	 */
-	/* ie. They can be outside range we asked, but they must overlap! */
-	if (first_blocknum + number_of_blocks <= peer->range_first_blocknum
-	    || first_blocknum >= peer->range_end_blocknum) {
-		return towire_errorfmt(peer, NULL,
-				       "reply_channel_range invalid %u+%u for query %u+%u",
-				       first_blocknum, number_of_blocks,
-				       peer->range_first_blocknum,
-				       peer->range_end_blocknum
-				       - peer->range_first_blocknum);
-	}
-
-	start = first_blocknum;
-	end = first_blocknum + number_of_blocks;
-	/* Trim to make it a subset of what we want. */
-	if (start < peer->range_first_blocknum)
-		start = peer->range_first_blocknum;
-	if (end > peer->range_end_blocknum)
-		end = peer->range_end_blocknum;
-
-	/* We keep a bitmap of what blocks have been covered by replies: bit 0
-	 * represents block peer->range_first_blocknum */
-	b = bitmap_ffs(peer->query_channel_blocks,
-		       start - peer->range_first_blocknum,
-		       end - peer->range_first_blocknum);
-	if (b != end - peer->range_first_blocknum) {
-		return towire_errorfmt(peer, NULL,
-				       "reply_channel_range %u+%u already have block %lu",
-				       first_blocknum, number_of_blocks,
-				       peer->range_first_blocknum + b);
-	}
-
-	/* Mark that short_channel_ids for this block have been received */
-	bitmap_fill_range(peer->query_channel_blocks,
-			  start - peer->range_first_blocknum,
-			  end - peer->range_first_blocknum);
-	peer->range_blocks_remaining -= end - start;
-
-	/* Add scids */
-	n = tal_count(peer->query_channel_scids);
-	tal_resize(&peer->query_channel_scids, n + tal_count(scids));
-	memcpy(peer->query_channel_scids + n, scids, tal_bytelen(scids));
-
-	/* Still more to go? */
-	if (peer->range_blocks_remaining)
-		return NULL;
-
-	/* All done, send reply to lightningd: that's currently the only thing
-	 * which triggers this (for testing).  Eventually we might start probing
-	 * for gossip information on our own. */
-	msg = towire_gossip_query_channel_range_reply(NULL,
-						      first_blocknum,
-						      number_of_blocks,
-						      complete,
-						      peer->query_channel_scids);
-	daemon_conn_send(peer->daemon->master, take(msg));
-	peer->query_channel_scids = tal_free(peer->query_channel_scids);
-	peer->query_channel_blocks = tal_free(peer->query_channel_blocks);
-	return NULL;
-}
-
 /*~ For simplicity, all pings and pongs are forwarded to us here in gossipd. */
 static u8 *handle_ping(struct peer *peer, const u8 *ping)
 {
@@ -1170,232 +389,6 @@ static const u8 *handle_pong(struct peer *peer, const u8 *pong)
 	return NULL;
 }
 
-/*~ When we ask about an array of short_channel_ids, we get all channel &
- * node announcements and channel updates which the peer knows.  There's an
- * explicit end packet; this is needed to differentiate between 'I'm slow'
- * and 'I don't know those channels'. */
-static u8 *handle_reply_short_channel_ids_end(struct peer *peer, const u8 *msg)
-{
-	struct bitcoin_blkid chain;
-	u8 complete;
-
-	if (!fromwire_reply_short_channel_ids_end(msg, &chain, &complete)) {
-		return towire_errorfmt(peer, NULL,
-				       "Bad reply_short_channel_ids_end %s",
-				       tal_hex(tmpctx, msg));
-	}
-
-	if (!bitcoin_blkid_eq(&peer->daemon->chain_hash, &chain)) {
-		return towire_errorfmt(peer, NULL,
-				       "reply_short_channel_ids_end for bad chain: %s",
-				       tal_hex(tmpctx, msg));
-	}
-
-	if (!peer->scid_query_outstanding) {
-		return towire_errorfmt(peer, NULL,
-				       "unexpected reply_short_channel_ids_end: %s",
-				       tal_hex(tmpctx, msg));
-	}
-
-	peer->scid_query_outstanding = false;
-
-	/* If it wasn't generated by us, it's the dev interface from lightningd
-	 */
-	if (!peer->scid_query_was_internal) {
-		msg = towire_gossip_scids_reply(msg, true, complete);
-		daemon_conn_send(peer->daemon->master, take(msg));
-	}
-
-	/* All good, no error. */
-	return NULL;
-}
-
-/*~ Arbitrary ordering function of pubkeys.
- *
- * Note that we could use memcmp() here: even if they had somehow different
- * bitwise representations for the same key, we copied them all from struct
- * node which should make them unique.  Even if not (say, a node vanished
- * and reappeared) we'd just end up sending two node_announcement for the
- * same node.
- */
-static int pubkey_order(const struct node_id *k1,
-			const struct node_id *k2,
-			void *unused UNUSED)
-{
-	return node_id_cmp(k1, k2);
-}
-
-static void uniquify_node_ids(struct node_id **ids)
-{
-	size_t dst, src;
-
-	/* BOLT #7:
-	 *   - SHOULD avoid sending duplicate `node_announcements` in
-	 *     response to a single `query_short_channel_ids`.
-	 */
-	/* ccan/asort is a typesafe qsort wrapper: like most ccan modules
-	 * it eschews exposing 'void *' pointers and ensures that the
-	 * callback function and its arguments match types correctly. */
-	asort(*ids, tal_count(*ids), pubkey_order, NULL);
-
-	/* Compact the array */
-	for (dst = 0, src = 0; src < tal_count(*ids); src++) {
-		if (dst && node_id_eq(&(*ids)[dst-1], &(*ids)[src]))
-			continue;
-		(*ids)[dst++] = (*ids)[src];
-	}
-
-	/* And trim to length, so tal_count() gives correct answer. */
-	tal_resize(ids, dst);
-}
-
-/*~ We are fairly careful to avoid the peer DoSing us with channel queries:
- * this routine sends information about a single short_channel_id, unless
- * it's finished all of them. */
-static void maybe_create_next_scid_reply(struct peer *peer)
-{
-	struct routing_state *rstate = peer->daemon->rstate;
-	size_t i, num;
-	bool sent = false;
-
-	/* BOLT #7:
-	 *
-	 *   - MUST respond to each known `short_channel_id`:
-	 */
-	/* Search for next short_channel_id we know about. */
-	num = tal_count(peer->scid_queries);
-	for (i = peer->scid_query_idx; !sent && i < num; i++) {
-		struct chan *chan;
-
-		chan = get_channel(rstate, &peer->scid_queries[i]);
-		if (!chan || !is_chan_public(chan))
-			continue;
-
-		/* BOLT #7:
-		 * - if bit 0 of `query_flag` is set:
-		 *   - MUST reply with a `channel_announcement`
-		 */
-		if (peer->scid_query_flags[i] & SCID_QF_ANNOUNCE) {
-			queue_peer_from_store(peer, &chan->bcast);
-			sent = true;
-		}
-
-		/* BOLT #7:
-		 * - if bit 1 of `query_flag` is set and it has received a
-		 *   `channel_update` from `node_id_1`:
-		 *   - MUST reply with the latest `channel_update` for
-		 *   `node_id_1`
-		 * - if bit 2 of `query_flag` is set and it has received a
-		 *   `channel_update` from `node_id_2`:
-		 *   - MUST reply with the latest `channel_update` for
-		 *   `node_id_2` */
-		if ((peer->scid_query_flags[i] & SCID_QF_UPDATE1)
-		    && is_halfchan_defined(&chan->half[0])) {
-			queue_peer_from_store(peer, &chan->half[0].bcast);
-			sent = true;
-		}
-		if ((peer->scid_query_flags[i] & SCID_QF_UPDATE2)
-		    && is_halfchan_defined(&chan->half[1])) {
-			queue_peer_from_store(peer, &chan->half[1].bcast);
-			sent = true;
-		}
-
-		/* BOLT #7:
-		 * - if bit 3 of `query_flag` is set and it has received
-		 *   a `node_announcement` from `node_id_1`:
-		 *   - MUST reply with the latest `node_announcement` for
-		 *   `node_id_1`
-		 * - if bit 4 of `query_flag` is set and it has received a
-		 *    `node_announcement` from `node_id_2`:
-		 *   - MUST reply with the latest `node_announcement` for
-		 *   `node_id_2` */
-		/* Save node ids for later transmission of node_announcement */
-		if (peer->scid_query_flags[i] & SCID_QF_NODE1)
-			tal_arr_expand(&peer->scid_query_nodes,
-				       chan->nodes[0]->id);
-		if (peer->scid_query_flags[i] & SCID_QF_NODE2)
-			tal_arr_expand(&peer->scid_query_nodes,
-				       chan->nodes[1]->id);
-	}
-
-	/* Just finished channels?  Remove duplicate nodes. */
-	if (peer->scid_query_idx != num && i == num)
-		uniquify_node_ids(&peer->scid_query_nodes);
-
-	/* Update index for next time we're called. */
-	peer->scid_query_idx = i;
-
-	/* BOLT #7:
-	 *
-	 *    - if the incoming message does not include `encoded_query_flags`:
-	 *      ...
-	 *      - MUST follow with any `node_announcement`s for each
-	 *      `channel_announcement`
-	 *    - otherwise:
-	 *      ...
-	 *      - if bit 3 of `query_flag` is set and it has received a
-	 *        `node_announcement` from `node_id_1`:
-	 *        - MUST reply with the latest `node_announcement` for
-	 *          `node_id_1`
-	 *      - if bit 4 of `query_flag` is set and it has received a
-	 *        `node_announcement` from `node_id_2`:
-	 *        - MUST reply with the latest `node_announcement` for
-	 *          `node_id_2`
-	 */
-	/* If we haven't sent anything above, we look for the next
-	 * node_announcement to send. */
-	num = tal_count(peer->scid_query_nodes);
-	for (i = peer->scid_query_nodes_idx; !sent && i < num; i++) {
-		const struct node *n;
-
-		/* Not every node announces itself (we know it exists because
-		 * of a channel_announcement, however) */
-		n = get_node(rstate, &peer->scid_query_nodes[i]);
-		if (!n || !n->bcast.index)
-			continue;
-
-		queue_peer_from_store(peer, &n->bcast);
-		sent = true;
-	}
-	peer->scid_query_nodes_idx = i;
-
-	/* All finished? */
-	if (peer->scid_queries
-	    && peer->scid_query_idx == tal_count(peer->scid_queries)
-	    && peer->scid_query_nodes_idx == num) {
-		/* BOLT #7:
-		 *
-		 * - MUST follow these responses with
-		 *   `reply_short_channel_ids_end`.
-		 *   - if does not maintain up-to-date channel information for
-		 *     `chain_hash`:
-		 *      - MUST set `complete` to 0.
-		 *   - otherwise:
-		 *      - SHOULD set `complete` to 1.
-		 */
-		/* FIXME: We consider ourselves to have complete knowledge. */
-		u8 *end = towire_reply_short_channel_ids_end(peer,
-							     &peer->daemon->chain_hash,
-							     true);
-		queue_peer_msg(peer, take(end));
-
-		/* We're done!  Clean up so we simply pass-through next time. */
-		peer->scid_queries = tal_free(peer->scid_queries);
-		peer->scid_query_flags = tal_free(peer->scid_query_flags);
-		peer->scid_query_idx = 0;
-		peer->scid_query_nodes = tal_free(peer->scid_query_nodes);
-		peer->scid_query_nodes_idx = 0;
-	}
-}
-
-/*~ This is called when the outgoing queue is empty; gossip has lower priority
- * than just about anything else. */
-static void dump_gossip(struct peer *peer)
-{
-	/* Do we have scid query replies to send? */
-	maybe_create_next_scid_reply(peer);
-}
-
 /*~ This is when channeld asks us for a channel_update for a local channel.
  * It does that to fill in the error field when lightningd fails an HTLC and
  * sets the UPDATE bit in the error type.  lightningd is too important to
@@ -1646,10 +639,11 @@ static struct io_plan *connectd_new_peer(struct io_conn *conn,
 	list_add_tail(&peer->daemon->peers, &peer->list);
 	tal_add_destructor(peer, destroy_peer);
 
-	/* This is the new connection: calls dump_gossip when nothing else to
-	 * send. */
+	/* This is the new connection: calls maybe_send_query_responses when
+	 * nothing else to send. */
 	peer->dc = daemon_conn_new(daemon, fds[0],
-				   peer_msg_in, dump_gossip, peer);
+				   peer_msg_in,
+				   maybe_send_query_responses, peer);
 	/* Free peer if conn closed (destroy_peer closes conn if peer freed) */
 	tal_steal(peer->dc, peer);
 
@@ -2395,31 +1389,6 @@ static struct io_plan *new_blockheight(struct io_conn *conn,
 }
 
 #if DEVELOPER
-static struct io_plan *query_scids_req(struct io_conn *conn,
-				       struct daemon *daemon,
-				       const u8 *msg)
-{
-	struct node_id id;
-	struct short_channel_id *scids;
-	struct peer *peer;
-
-	if (!fromwire_gossip_query_scids(msg, msg, &id, &scids))
-		master_badmsg(WIRE_GOSSIP_QUERY_SCIDS, msg);
-
-	peer = find_peer(daemon, &id);
-	if (!peer) {
-		status_broken("query_scids: unknown peer %s",
-			      type_to_string(tmpctx, struct node_id, &id));
-		daemon_conn_send(daemon->master,
-				 take(towire_gossip_scids_reply(NULL,
-								false, false)));
-	} else if (!query_short_channel_ids(daemon, peer, scids, false))
-		daemon_conn_send(daemon->master,
-				 take(towire_gossip_scids_reply(NULL,
-								false, false)));
-	return daemon_conn_read_next(conn, daemon->master);
-}
-
 /* BOLT #7:
  *
  * ### The `gossip_timestamp_filter` Message
@@ -2462,86 +1431,6 @@ static struct io_plan *send_timestamp_filter(struct io_conn *conn,
 out:
 	return daemon_conn_read_next(conn, daemon->master);
 }
-
-/* FIXME: One day this will be called internally; for now it's just for
- * testing with dev_query_channel_range. */
-static struct io_plan *query_channel_range(struct io_conn *conn,
-					   struct daemon *daemon,
-					   const u8 *msg)
-{
-	struct node_id id;
-	u32 first_blocknum, number_of_blocks;
-	struct peer *peer;
-
-	if (!fromwire_gossip_query_channel_range(msg, &id, &first_blocknum,
-						 &number_of_blocks))
-		master_badmsg(WIRE_GOSSIP_QUERY_SCIDS, msg);
-
-	peer = find_peer(daemon, &id);
-	if (!peer) {
-		status_broken("query_channel_range: unknown peer %s",
-			      type_to_string(tmpctx, struct node_id, &id));
-		goto fail;
-	}
-
-	if (!peer->gossip_queries_feature) {
-		status_broken("query_channel_range: no gossip_query support in peer %s",
-			      type_to_string(tmpctx, struct node_id, &id));
-		goto fail;
-	}
-
-	if (peer->query_channel_blocks) {
-		status_broken("query_channel_range: previous query active");
-		goto fail;
-	}
-
-	/* Check for overflow on 32-bit machines! */
-	if (BITMAP_NWORDS(number_of_blocks) < number_of_blocks / BITMAP_WORD_BITS) {
-		status_broken("query_channel_range: huge number_of_blocks (%u) not supported",
-			number_of_blocks);
-		goto fail;
-	}
-
-	status_debug("sending query_channel_range for blocks %u+%u",
-		     first_blocknum, number_of_blocks);
-
-	msg = towire_query_channel_range(NULL, &daemon->chain_hash,
-					 first_blocknum, number_of_blocks,
-					 NULL);
-	queue_peer_msg(peer, take(msg));
-	peer->range_first_blocknum = first_blocknum;
-	peer->range_end_blocknum = first_blocknum + number_of_blocks;
-	peer->range_blocks_remaining = number_of_blocks;
-	peer->query_channel_blocks = tal_arrz(peer, bitmap,
-					      BITMAP_NWORDS(number_of_blocks));
-	peer->query_channel_scids = tal_arr(peer, struct short_channel_id, 0);
-
-out:
-	return daemon_conn_read_next(conn, daemon->master);
-
-fail:
-	daemon_conn_send(daemon->master,
-			 take(towire_gossip_query_channel_range_reply(NULL,
-								      0, 0,
-								      false,
-								      NULL)));
-	goto out;
-}
-
-/* This is a testing hack to allow us to artificially lower the maximum bytes
- * of short_channel_ids we'll encode, using dev_set_max_scids_encode_size. */
-static struct io_plan *dev_set_max_scids_encode_size(struct io_conn *conn,
-						     struct daemon *daemon,
-						     const u8 *msg)
-{
-	if (!fromwire_gossip_dev_set_max_scids_encode_size(msg,
-							   &max_encoding_bytes))
-		master_badmsg(WIRE_GOSSIP_DEV_SET_MAX_SCIDS_ENCODE_SIZE, msg);
-
-	status_debug("Set max_scids_encode_bytes to %u", max_encoding_bytes);
-	return daemon_conn_read_next(conn, daemon->master);
-}
-
 /* Another testing hack */
 static struct io_plan *dev_gossip_suppress(struct io_conn *conn,
 					   struct daemon *daemon,
diff --git a/gossipd/gossipd.h b/gossipd/gossipd.h
index fcb3a6a96..d5a23ef8a 100644
--- a/gossipd/gossipd.h
+++ b/gossipd/gossipd.h
@@ -1,8 +1,12 @@
 #ifndef LIGHTNING_GOSSIPD_GOSSIPD_H
 #define LIGHTNING_GOSSIPD_GOSSIPD_H
 #include "config.h"
+#include <bitcoin/block.h>
+#include <ccan/bitmap/bitmap.h>
 #include <ccan/list/list.h>
 #include <ccan/short_types/short_types.h>
+#include <ccan/timer/timer.h>
+#include <common/bigsize.h>
 #include <common/node_id.h>
 
 /* We talk to `hsmd` to sign our gossip messages with the node key */
@@ -10,6 +14,9 @@
 /* connectd asks us for help finding nodes, and gossip fds for new peers */
 #define CONNECTD_FD 4
 
+struct chan;
+struct broadcastable;
+
 /*~ The core daemon structure: */
 struct daemon {
 	/* Who am I?  Helps us find ourself in the routing map. */
@@ -59,10 +66,72 @@ struct daemon {
 	struct short_channel_id *deferred_txouts;
 };
 
+/*~ How gossipy do we ask a peer to be? */
+enum gossip_level {
+	/* Give us everything since epoch */
+	GOSSIP_HIGH,
+	/* Give us everything from 24 hours ago. */
+	GOSSIP_MEDIUM,
+	/* Give us everything from now. */
+	GOSSIP_LOW,
+	/* Give us nothing. */
+	GOSSIP_NONE,
+};
+
+/* This represents each peer we're gossiping with */
+struct peer {
+	/* daemon->peers */
+	struct list_node list;
+
+	/* parent pointer. */
+	struct daemon *daemon;
+
+	/* The ID of the peer (always unique) */
+	struct node_id id;
+
+	/* The two features gossip cares about (so far) */
+	bool gossip_queries_feature, initial_routing_sync_feature;
+
+	/* Are there outstanding responses for queries on short_channel_ids? */
+	const struct short_channel_id *scid_queries;
+	const bigsize_t *scid_query_flags;
+	size_t scid_query_idx;
+
+	/* Are there outstanding node_announcements from scid_queries? */
+	struct node_id *scid_query_nodes;
+	size_t scid_query_nodes_idx;
+
+	/* Do we have an scid_query outstanding?  Was it internal? */
+	bool scid_query_outstanding;
+	bool scid_query_was_internal;
+
+	/* How many pongs are we expecting? */
+	size_t num_pings_outstanding;
+
+	/* Map of outstanding channel_range requests. */
+	bitmap *query_channel_blocks;
+	/* What we're querying: [range_first_blocknum, range_end_blocknum) */
+	u32 range_first_blocknum, range_end_blocknum;
+	u32 range_blocks_remaining;
+	struct short_channel_id *query_channel_scids;
+
+	/* Are we asking this peer to give us lot of gossip? */
+	enum gossip_level gossip_level;
+
+	/* The daemon_conn used to queue messages to/from the peer. */
+	struct daemon_conn *dc;
+};
+
 /* Search for a peer. */
 struct peer *find_peer(struct daemon *daemon, const struct node_id *id);
 
 /* Queue a gossip message for the peer: the subdaemon on the other end simply
  * forwards it to the peer. */
 void queue_peer_msg(struct peer *peer, const u8 *msg TAKES);
+
+/* Queue a gossip_store message for the peer: the subdaemon on the
+ * other end simply forwards it to the peer. */
+void queue_peer_from_store(struct peer *peer,
+			   const struct broadcastable *bcast);
+
 #endif /* LIGHTNING_GOSSIPD_GOSSIPD_H */
diff --git a/gossipd/queries.c b/gossipd/queries.c
new file mode 100644
index 000000000..66f46112c
--- /dev/null
+++ b/gossipd/queries.c
@@ -0,0 +1,1064 @@
+/* Routines to generate and handle gossip query messages */
+#include <ccan/array_size/array_size.h>
+#include <ccan/asort/asort.h>
+#include <ccan/crc32c/crc32c.h>
+#include <ccan/tal/tal.h>
+#include <common/daemon_conn.h>
+#include <common/decode_short_channel_ids.h>
+#include <common/status.h>
+#include <common/type_to_string.h>
+#include <common/wire_error.h>
+#include <gossipd/gen_gossip_wire.h>
+#include <gossipd/gossip_generation.h>
+#include <gossipd/gossipd.h>
+#include <gossipd/queries.h>
+#include <gossipd/routing.h>
+#include <wire/gen_peer_wire.h>
+#include <wire/wire.h>
+#include <zlib.h>
+
+#if DEVELOPER
+static u32 max_encoding_bytes = -1U;
+#endif
+
+/* BOLT #7:
+ *
+ * There are several messages which contain a long array of
+ * `short_channel_id`s (called `encoded_short_ids`) so we utilize a
+ * simple compression scheme: the first byte indicates the encoding, the
+ * rest contains the data.
+ */
+static u8 *encoding_start(const tal_t *ctx)
+{
+	return tal_arr(ctx, u8, 0);
+}
+
+/* Marshal a single short_channel_id */
+static void encoding_add_short_channel_id(u8 **encoded,
+					  const struct short_channel_id *scid)
+{
+	towire_short_channel_id(encoded, scid);
+}
+
+/* Marshal a single channel_update_timestamps */
+static void encoding_add_timestamps(u8 **encoded,
+				    const struct channel_update_timestamps *ts)
+{
+	towire_channel_update_timestamps(encoded, ts);
+}
+
+/* Marshal a single query flag (we don't query, so not currently used) */
+static UNNEEDED void encoding_add_query_flag(u8 **encoded, bigsize_t flag)
+{
+	towire_bigsize(encoded, flag);
+}
+
+/* Greg Maxwell asked me privately about using zlib for communicating a set,
+ * and suggested that we'd be better off using Golomb-Rice coding a-la BIP
+ * 158.  However, naively using Rice encoding isn't a win: we have to get
+ * more complex and use separate streams.  The upside is that it's between
+ * 2 and 5 times smaller (assuming optimal Rice encoding + gzip).  We can add
+ * that later. */
+static u8 *zencode(const tal_t *ctx, const u8 *scids, size_t len)
+{
+	u8 *z;
+	int err;
+	unsigned long compressed_len = len;
+
+#ifdef ZLIB_EVEN_IF_EXPANDS
+	/* Needed for test vectors */
+	compressed_len = 128 * 1024;
+#endif
+	/* Prefer to fail if zlib makes it larger */
+	z = tal_arr(ctx, u8, compressed_len);
+	err = compress2(z, &compressed_len, scids, len, Z_DEFAULT_COMPRESSION);
+	if (err == Z_OK) {
+		status_debug("compressed %zu into %lu",
+			     len, compressed_len);
+		tal_resize(&z, compressed_len);
+		return z;
+	}
+	status_debug("compress %zu returned %i:"
+		     " not compresssing", len, err);
+	return NULL;
+}
+
+/* Try compressing *encoded: fails if result would be longer.
+ * @off is offset to place result in *encoded.
+ */
+static bool encoding_end_zlib(u8 **encoded, size_t off)
+{
+	u8 *z;
+	size_t len = tal_count(*encoded);
+
+	z = zencode(tmpctx, *encoded, len);
+	if (!z)
+		return false;
+
+	/* Successful: copy over and trim */
+	tal_resize(encoded, off + tal_count(z));
+	memcpy(*encoded + off, z, tal_count(z));
+
+	tal_free(z);
+	return true;
+}
+
+static void encoding_end_no_compress(u8 **encoded, size_t off)
+{
+	size_t len = tal_count(*encoded);
+
+	tal_resize(encoded, off + len);
+	memmove(*encoded + off, *encoded, len);
+}
+
+/* Once we've assembled it, try compressing.
+ * Prepends encoding type to @encoding. */
+static bool encoding_end_prepend_type(u8 **encoded, size_t max_bytes)
+{
+	if (encoding_end_zlib(encoded, 1))
+		**encoded = SHORTIDS_ZLIB;
+	else {
+		encoding_end_no_compress(encoded, 1);
+		**encoded = SHORTIDS_UNCOMPRESSED;
+	}
+
+#if DEVELOPER
+	if (tal_count(*encoded) > max_encoding_bytes)
+		return false;
+#endif
+	return tal_count(*encoded) <= max_bytes;
+}
+
+/* Try compressing, leaving type external */
+static UNNEEDED bool encoding_end_external_type(u8 **encoded, u8 *type, size_t max_bytes)
+{
+	if (encoding_end_zlib(encoded, 0))
+		*type = SHORTIDS_ZLIB;
+	else {
+		encoding_end_no_compress(encoded, 0);
+		*type = SHORTIDS_UNCOMPRESSED;
+	}
+
+	return tal_count(*encoded) <= max_bytes;
+}
+
+/* Query this peer for these short-channel-ids. */
+static bool query_short_channel_ids(struct daemon *daemon,
+				    struct peer *peer,
+				    const struct short_channel_id *scids,
+				    bool internal)
+{
+	u8 *encoded, *msg;
+
+	/* BOLT #7:
+	 *
+	 * 1. type: 261 (`query_short_channel_ids`) (`gossip_queries`)
+	 * 2. data:
+	 *     * [`chain_hash`:`chain_hash`]
+	 *     * [`u16`:`len`]
+	 *     * [`len*byte`:`encoded_short_ids`]
+	 */
+	const size_t reply_overhead = 32 + 2;
+	const size_t max_encoded_bytes = 65535 - 2 - reply_overhead;
+
+	/* Can't query if they don't have gossip_queries_feature */
+	if (!peer->gossip_queries_feature)
+		return false;
+
+	/* BOLT #7:
+	 *
+	 * The sender:
+	 *  - MUST NOT send `query_short_channel_ids` if it has sent a previous
+	 *   `query_short_channel_ids` to this peer and not received
+	 *   `reply_short_channel_ids_end`.
+	 */
+	if (peer->scid_query_outstanding)
+		return false;
+
+	encoded = encoding_start(tmpctx);
+	for (size_t i = 0; i < tal_count(scids); i++)
+		encoding_add_short_channel_id(&encoded, &scids[i]);
+
+	if (!encoding_end_prepend_type(&encoded, max_encoded_bytes)) {
+		status_broken("query_short_channel_ids: %zu is too many",
+			      tal_count(scids));
+		return false;
+	}
+
+	msg = towire_query_short_channel_ids(NULL, &daemon->chain_hash,
+					     encoded, NULL);
+	queue_peer_msg(peer, take(msg));
+	peer->scid_query_outstanding = true;
+	peer->scid_query_was_internal = internal;
+
+	status_debug("%s: sending query for %zu scids",
+		     type_to_string(tmpctx, struct node_id, &peer->id),
+		     tal_count(scids));
+	return true;
+}
+
+/* This peer told us about an update to an unknown channel.  Ask it for a
+ * channel_announcement. */
+void query_unknown_channel(struct daemon *daemon,
+			   struct peer *peer,
+			   const struct short_channel_id *id)
+{
+	/* Don't go overboard if we're already asking for a lot. */
+	if (tal_count(daemon->unknown_scids) > 1000)
+		return;
+
+	/* Check we're not already getting this one. */
+	for (size_t i = 0; i < tal_count(daemon->unknown_scids); i++)
+		if (short_channel_id_eq(&daemon->unknown_scids[i], id))
+			return;
+
+	tal_arr_expand(&daemon->unknown_scids, *id);
+
+	/* This is best effort: if peer is busy, we'll try next time. */
+	query_short_channel_ids(daemon, peer, daemon->unknown_scids, true);
+}
+
+/* The peer can ask about an array of short channel ids: we don't assemble the
+ * reply immediately but process them one at a time in dump_gossip which is
+ * called when there's nothing more important to send. */
+const u8 *handle_query_short_channel_ids(struct peer *peer, const u8 *msg)
+{
+	struct bitcoin_blkid chain;
+	u8 *encoded;
+	struct short_channel_id *scids;
+	bigsize_t *flags;
+	struct tlv_query_short_channel_ids_tlvs *tlvs
+		= tlv_query_short_channel_ids_tlvs_new(tmpctx);
+
+	if (!fromwire_query_short_channel_ids(tmpctx, msg, &chain, &encoded,
+					      tlvs)) {
+		return towire_errorfmt(peer, NULL,
+				       "Bad query_short_channel_ids w/tlvs %s",
+				       tal_hex(tmpctx, msg));
+	}
+	if (tlvs->query_flags) {
+		/* BOLT #7:
+		 *
+		 * The receiver:
+		 *...
+		 *  - if the incoming message includes
+		 *    `query_short_channel_ids_tlvs`:
+		 *    - if `encoding_type` is not a known encoding type:
+		 *      - MAY fail the connection
+		 */
+		flags = decode_scid_query_flags(tmpctx, tlvs->query_flags);
+		if (!flags) {
+			return towire_errorfmt(peer, NULL,
+					       "Bad query_short_channel_ids query_flags %s",
+					       tal_hex(tmpctx, msg));
+		}
+	} else
+		flags = NULL;
+
+	if (!bitcoin_blkid_eq(&peer->daemon->chain_hash, &chain)) {
+		status_debug("%s sent query_short_channel_ids chainhash %s",
+			     type_to_string(tmpctx, struct node_id, &peer->id),
+			     type_to_string(tmpctx, struct bitcoin_blkid, &chain));
+		return NULL;
+	}
+
+	/* BOLT #7:
+	 *
+	 * - if it has not sent `reply_short_channel_ids_end` to a
+	 *   previously received `query_short_channel_ids` from this
+         *   sender:
+	 *    - MAY fail the connection.
+	 */
+	if (peer->scid_queries || peer->scid_query_nodes) {
+		return towire_errorfmt(peer, NULL,
+				       "Bad concurrent query_short_channel_ids");
+	}
+
+	scids = decode_short_ids(tmpctx, encoded);
+	if (!scids) {
+		return towire_errorfmt(peer, NULL,
+				       "Bad query_short_channel_ids encoding %s",
+				       tal_hex(tmpctx, encoded));
+	}
+
+	/* BOLT #7:
+	 *
+	 * The receiver:
+	 *...
+	 *    - if `encoded_query_flags` does not decode to exactly one flag per
+	 *      `short_channel_id`:
+	 *      - MAY fail the connection.
+	 */
+	if (!flags) {
+		/* Pretend they asked for everything. */
+		flags = tal_arr(tmpctx, bigsize_t, tal_count(scids));
+		memset(flags, 0xFF, tal_bytelen(flags));
+	} else {
+		if (tal_count(flags) != tal_count(scids)) {
+			return towire_errorfmt(peer, NULL,
+					       "Bad query_short_channel_ids flags count %zu scids %zu",
+					       tal_count(flags), tal_count(scids));
+		}
+	}
+
+	/* BOLT #7:
+	 *
+	 * - MUST respond to each known `short_channel_id`:
+	 *...
+	 *    - SHOULD NOT wait for the next outgoing gossip flush to send
+	 *      these.
+	 */
+	peer->scid_queries = tal_steal(peer, scids);
+	peer->scid_query_flags = tal_steal(peer, flags);
+	peer->scid_query_idx = 0;
+	peer->scid_query_nodes = tal_arr(peer, struct node_id, 0);
+
+	/* Notify the daemon_conn-write loop to invoke create_next_scid_reply */
+	daemon_conn_wake(peer->dc);
+	return NULL;
+}
+
+/*~ We can send multiple replies when the peer queries for all channels in
+ * a given range of blocks; each one indicates the range of blocks it covers. */
+static void reply_channel_range(struct peer *peer,
+				u32 first_blocknum, u32 number_of_blocks,
+				const u8 *encoded_scids,
+				struct tlv_reply_channel_range_tlvs_timestamps_tlv *timestamps,
+				struct tlv_reply_channel_range_tlvs_checksums_tlv *checksums)
+{
+	/* BOLT #7:
+	 *
+	 * - For each `reply_channel_range`:
+	 *   - MUST set with `chain_hash` equal to that of `query_channel_range`,
+	 *   - MUST encode a `short_channel_id` for every open channel it
+	 *     knows in blocks `first_blocknum` to `first_blocknum` plus
+	 *     `number_of_blocks` minus one.
+	 *   - MUST limit `number_of_blocks` to the maximum number of blocks
+         *     whose results could fit in `encoded_short_ids`
+	 *   - if does not maintain up-to-date channel information for
+	 *     `chain_hash`:
+	 *     - MUST set `complete` to 0.
+	 *   - otherwise:
+	 *     - SHOULD set `complete` to 1.
+	 */
+ 	struct tlv_reply_channel_range_tlvs *tlvs
+ 		= tlv_reply_channel_range_tlvs_new(tmpctx);
+	tlvs->timestamps_tlv = timestamps;
+	tlvs->checksums_tlv = checksums;
+
+	u8 *msg = towire_reply_channel_range(NULL,
+					     &peer->daemon->chain_hash,
+					     first_blocknum,
+					     number_of_blocks,
+					     1, encoded_scids, tlvs);
+	queue_peer_msg(peer, take(msg));
+}
+
+/* BOLT #7:
+ *
+ * `query_option_flags` is a bitfield represented as a minimally-encoded varint.
+ * Bits have the following meaning:
+ *
+ * | Bit Position  | Meaning                 |
+ * | ------------- | ----------------------- |
+ * | 0             | Sender wants timestamps |
+ * | 1             | Sender wants checksums  |
+ */
+enum query_option_flags {
+	QUERY_ADD_TIMESTAMPS = 0x1,
+	QUERY_ADD_CHECKSUMS = 0x2,
+};
+
+/* BOLT #7:
+ *
+ * The checksum of a `channel_update` is the CRC32C checksum as specified in
+ * [RFC3720](https://tools.ietf.org/html/rfc3720#appendix-B.4) of this
+ * `channel_update` without its `signature` and `timestamp` fields.
+ */
+static u32 crc32_of_update(const u8 *channel_update)
+{
+	u32 sum;
+	const u8 *parts[2];
+	size_t sizes[ARRAY_SIZE(parts)];
+
+	get_cupdate_parts(channel_update, parts, sizes);
+
+	sum = 0;
+	for (size_t i = 0; i < ARRAY_SIZE(parts); i++)
+		sum = crc32c(sum, parts[i], sizes[i]);
+	return sum;
+}
+
+static void get_checksum_and_timestamp(struct routing_state *rstate,
+				       const struct chan *chan,
+				       int direction,
+				       u32 *tstamp, u32 *csum)
+{
+	const struct half_chan *hc = &chan->half[direction];
+
+	if (!is_chan_public(chan) || !is_halfchan_defined(hc)) {
+		*tstamp = *csum = 0;
+	} else {
+		const u8 *update = gossip_store_get(tmpctx, rstate->gs,
+						    hc->bcast.index);
+		*tstamp = hc->bcast.timestamp;
+		*csum = crc32_of_update(update);
+	}
+}
+
+/* FIXME: This assumes that the tlv type encodes into 1 byte! */
+static size_t tlv_len(const tal_t *msg)
+{
+	return 1 + bigsize_len(tal_count(msg)) + tal_count(msg);
+}
+
+/*~ When we need to send an array of channels, it might go over our 64k packet
+ * size.  If it doesn't, we recurse, splitting in two, etc.  Each message
+ * indicates what blocks it contains, so the recipient knows when we're
+ * finished.
+ *
+ * tail_blocks is the empty blocks at the end, in case they asked for all
+ * blocks to 4 billion.
+ */
+static bool queue_channel_ranges(struct peer *peer,
+				 u32 first_blocknum, u32 number_of_blocks,
+				 u32 tail_blocks,
+				 enum query_option_flags query_option_flags)
+{
+	struct routing_state *rstate = peer->daemon->rstate;
+	u8 *encoded_scids = encoding_start(tmpctx);
+	struct tlv_reply_channel_range_tlvs_timestamps_tlv *tstamps;
+	struct tlv_reply_channel_range_tlvs_checksums_tlv *csums;
+	struct short_channel_id scid;
+	bool scid_ok;
+
+	/* BOLT #7:
+	 *
+	 * 1. type: 264 (`reply_channel_range`) (`gossip_queries`)
+	 * 2. data:
+	 *   * [`chain_hash`:`chain_hash`]
+	 *   * [`u32`:`first_blocknum`]
+	 *   * [`u32`:`number_of_blocks`]
+	 *   * [`byte`:`complete`]
+	 *   * [`u16`:`len`]
+	 *   * [`len*byte`:`encoded_short_ids`]
+	 */
+	const size_t reply_overhead = 32 + 4 + 4 + 1 + 2;
+	const size_t max_encoded_bytes = 65535 - 2 - reply_overhead;
+	size_t extension_bytes;
+
+	if (query_option_flags & QUERY_ADD_TIMESTAMPS) {
+		tstamps = tal(tmpctx,
+			      struct tlv_reply_channel_range_tlvs_timestamps_tlv);
+		tstamps->encoded_timestamps = encoding_start(tstamps);
+	} else
+		tstamps = NULL;
+
+	if (query_option_flags & QUERY_ADD_CHECKSUMS) {
+		csums = tal(tmpctx,
+			    struct tlv_reply_channel_range_tlvs_checksums_tlv);
+		csums->checksums
+			= tal_arr(csums, struct channel_update_checksums, 0);
+	} else
+		csums = NULL;
+
+	/* Avoid underflow: we don't use block 0 anyway */
+	if (first_blocknum == 0)
+		scid_ok = mk_short_channel_id(&scid, 1, 0, 0);
+	else
+		scid_ok = mk_short_channel_id(&scid, first_blocknum, 0, 0);
+	scid.u64--;
+	if (!scid_ok)
+		return false;
+
+	/* We keep a `uintmap` of `short_channel_id` to `struct chan *`.
+	 * Unlike a htable, it's efficient to iterate through, but it only
+	 * works because each short_channel_id is basically a 64-bit unsigned
+	 * integer.
+	 *
+	 * First we iterate and gather all the short channel ids. */
+	while (uintmap_after(&rstate->chanmap, &scid.u64)) {
+		struct chan *chan;
+		struct channel_update_timestamps ts;
+		struct channel_update_checksums cs;
+		u32 blocknum = short_channel_id_blocknum(&scid);
+		if (blocknum >= first_blocknum + number_of_blocks)
+			break;
+
+		encoding_add_short_channel_id(&encoded_scids, &scid);
+
+		/* FIXME: Store csum in header. */
+		chan = get_channel(rstate, &scid);
+		get_checksum_and_timestamp(rstate, chan, 0,
+					   &ts.timestamp_node_id_1,
+					   &cs.checksum_node_id_1);
+		get_checksum_and_timestamp(rstate, chan, 1,
+					   &ts.timestamp_node_id_2,
+					   &cs.checksum_node_id_2);
+
+		if (csums)
+			tal_arr_expand(&csums->checksums, cs);
+		if (tstamps)
+			encoding_add_timestamps(&tstamps->encoded_timestamps,
+						&ts);
+	}
+
+	extension_bytes = 0;
+
+	/* If either of these can't fit in max_encoded_bytes by itself,
+	 * it's over. */
+	if (csums) {
+		extension_bytes += tlv_len(csums->checksums);
+	}
+
+	if (tstamps) {
+		if (!encoding_end_external_type(&tstamps->encoded_timestamps,
+						&tstamps->encoding_type,
+						max_encoded_bytes))
+			goto wont_fit;
+		/* 1 byte for encoding_type, too */
+		extension_bytes += 1 + tlv_len(tstamps->encoded_timestamps);
+	}
+
+	/* If we can encode that, fine: send it */
+	if (extension_bytes <= max_encoded_bytes
+	    && encoding_end_prepend_type(&encoded_scids,
+					 max_encoded_bytes - extension_bytes)) {
+		reply_channel_range(peer, first_blocknum,
+				    number_of_blocks + tail_blocks,
+				    encoded_scids,
+				    tstamps, csums);
+		return true;
+	}
+
+wont_fit:
+	/* It wouldn't all fit: divide in half */
+	/* We assume we can always send one block! */
+	if (number_of_blocks <= 1) {
+		/* We always assume we can send 1 blocks worth */
+		status_broken("Could not fit scids for single block %u",
+			      first_blocknum);
+		return false;
+	}
+	status_debug("queue_channel_ranges full: splitting %u+%u and %u+%u(+%u)",
+		     first_blocknum,
+		     number_of_blocks / 2,
+		     first_blocknum + number_of_blocks / 2,
+		     number_of_blocks - number_of_blocks / 2,
+		     tail_blocks);
+	return queue_channel_ranges(peer, first_blocknum, number_of_blocks / 2,
+				    0, query_option_flags)
+		&& queue_channel_ranges(peer, first_blocknum + number_of_blocks / 2,
+					number_of_blocks - number_of_blocks / 2,
+					tail_blocks, query_option_flags);
+}
+
+/*~ The peer can ask for all channels is a series of blocks.  We reply with one
+ * or more messages containing the short_channel_ids. */
+const u8 *handle_query_channel_range(struct peer *peer, const u8 *msg)
+{
+	struct routing_state *rstate = peer->daemon->rstate;
+	struct bitcoin_blkid chain_hash;
+	u32 first_blocknum, number_of_blocks, tail_blocks;
+	struct short_channel_id last_scid;
+	enum query_option_flags query_option_flags;
+	struct tlv_query_channel_range_tlvs *tlvs
+		= tlv_query_channel_range_tlvs_new(msg);
+
+	if (!fromwire_query_channel_range(msg, &chain_hash,
+					  &first_blocknum, &number_of_blocks,
+					  tlvs)) {
+		return towire_errorfmt(peer, NULL,
+				       "Bad query_channel_range w/tlvs %s",
+				       tal_hex(tmpctx, msg));
+	}
+	if (tlvs->query_option)
+		query_option_flags = tlvs->query_option->query_option_flags;
+	else
+		query_option_flags = 0;
+
+	/* If they ask for the wrong chain, we give an empty response
+	 * with the `complete` flag unset */
+	if (!bitcoin_blkid_eq(&peer->daemon->chain_hash, &chain_hash)) {
+		status_debug("%s sent query_channel_range chainhash %s",
+			     type_to_string(tmpctx, struct node_id, &peer->id),
+			     type_to_string(tmpctx, struct bitcoin_blkid,
+					    &chain_hash));
+		u8 *end = towire_reply_channel_range(NULL, &chain_hash, first_blocknum,
+		                                     number_of_blocks, false, NULL, NULL);
+		queue_peer_msg(peer, take(end));
+		return NULL;
+	}
+
+	/* If they ask for number_of_blocks UINTMAX, and we have to divide
+	 * and conquer, we'll do a lot of unnecessary work.  Cap it at the
+	 * last value we have, then send an empty reply. */
+	if (uintmap_last(&rstate->chanmap, &last_scid.u64)) {
+		u32 last_block = short_channel_id_blocknum(&last_scid);
+
+		/* u64 here avoids overflow on number_of_blocks
+		   UINTMAX for example */
+		if ((u64)first_blocknum + number_of_blocks > last_block) {
+			tail_blocks = first_blocknum + number_of_blocks
+				- last_block - 1;
+			number_of_blocks -= tail_blocks;
+		} else
+			tail_blocks = 0;
+	} else
+		tail_blocks = 0;
+
+	if (!queue_channel_ranges(peer, first_blocknum, number_of_blocks,
+				  tail_blocks, query_option_flags))
+		return towire_errorfmt(peer, NULL,
+				       "Invalid query_channel_range %u+%u",
+				       first_blocknum, number_of_blocks + tail_blocks);
+
+	return NULL;
+}
+
+/*~ This is the reply we get when we send query_channel_range; we keep
+ * expecting them until the entire range we asked for is covered. */
+const u8 *handle_reply_channel_range(struct peer *peer, const u8 *msg)
+{
+	struct bitcoin_blkid chain;
+	u8 complete;
+	u32 first_blocknum, number_of_blocks, start, end;
+	u8 *encoded;
+	struct short_channel_id *scids;
+	size_t n;
+	unsigned long b;
+	struct tlv_reply_channel_range_tlvs *tlvs
+		= tlv_reply_channel_range_tlvs_new(tmpctx);
+
+	if (!fromwire_reply_channel_range(tmpctx, msg, &chain, &first_blocknum,
+					  &number_of_blocks, &complete,
+					  &encoded, tlvs)) {
+		return towire_errorfmt(peer, NULL,
+				       "Bad reply_channel_range w/tlvs %s",
+				       tal_hex(tmpctx, msg));
+	}
+
+	if (!bitcoin_blkid_eq(&peer->daemon->chain_hash, &chain)) {
+		return towire_errorfmt(peer, NULL,
+				       "reply_channel_range for bad chain: %s",
+				       tal_hex(tmpctx, msg));
+	}
+
+	if (!peer->query_channel_blocks) {
+		return towire_errorfmt(peer, NULL,
+				       "reply_channel_range without query: %s",
+				       tal_hex(tmpctx, msg));
+	}
+
+	/* Beware overflow! */
+	if (first_blocknum + number_of_blocks < first_blocknum) {
+		return towire_errorfmt(peer, NULL,
+				       "reply_channel_range invalid %u+%u",
+				       first_blocknum, number_of_blocks);
+	}
+
+	scids = decode_short_ids(tmpctx, encoded);
+	if (!scids) {
+		return towire_errorfmt(peer, NULL,
+				       "Bad reply_channel_range encoding %s",
+				       tal_hex(tmpctx, encoded));
+	}
+
+	status_debug("peer %s reply_channel_range %u+%u (of %u+%u) %zu scids",
+		     type_to_string(tmpctx, struct node_id, &peer->id),
+		     first_blocknum, number_of_blocks,
+		     peer->range_first_blocknum,
+		     peer->range_end_blocknum - peer->range_first_blocknum,
+		     tal_count(scids));
+
+	/* BOLT #7:
+	 *
+	 * The receiver of `query_channel_range`:
+	 *...
+	 *  - MUST respond with one or more `reply_channel_range` whose
+	 *    combined range cover the requested `first_blocknum` to
+	 *    `first_blocknum` plus `number_of_blocks` minus one.
+	 */
+	/* ie. They can be outside range we asked, but they must overlap! */
+	if (first_blocknum + number_of_blocks <= peer->range_first_blocknum
+	    || first_blocknum >= peer->range_end_blocknum) {
+		return towire_errorfmt(peer, NULL,
+				       "reply_channel_range invalid %u+%u for query %u+%u",
+				       first_blocknum, number_of_blocks,
+				       peer->range_first_blocknum,
+				       peer->range_end_blocknum
+				       - peer->range_first_blocknum);
+	}
+
+	start = first_blocknum;
+	end = first_blocknum + number_of_blocks;
+	/* Trim to make it a subset of what we want. */
+	if (start < peer->range_first_blocknum)
+		start = peer->range_first_blocknum;
+	if (end > peer->range_end_blocknum)
+		end = peer->range_end_blocknum;
+
+	/* We keep a bitmap of what blocks have been covered by replies: bit 0
+	 * represents block peer->range_first_blocknum */
+	b = bitmap_ffs(peer->query_channel_blocks,
+		       start - peer->range_first_blocknum,
+		       end - peer->range_first_blocknum);
+	if (b != end - peer->range_first_blocknum) {
+		return towire_errorfmt(peer, NULL,
+				       "reply_channel_range %u+%u already have block %lu",
+				       first_blocknum, number_of_blocks,
+				       peer->range_first_blocknum + b);
+	}
+
+	/* Mark that short_channel_ids for this block have been received */
+	bitmap_fill_range(peer->query_channel_blocks,
+			  start - peer->range_first_blocknum,
+			  end - peer->range_first_blocknum);
+	peer->range_blocks_remaining -= end - start;
+
+	/* Add scids */
+	n = tal_count(peer->query_channel_scids);
+	tal_resize(&peer->query_channel_scids, n + tal_count(scids));
+	memcpy(peer->query_channel_scids + n, scids, tal_bytelen(scids));
+
+	/* Still more to go? */
+	if (peer->range_blocks_remaining)
+		return NULL;
+
+	/* All done, send reply to lightningd: that's currently the only thing
+	 * which triggers this (for testing).  Eventually we might start probing
+	 * for gossip information on our own. */
+	msg = towire_gossip_query_channel_range_reply(NULL,
+						      first_blocknum,
+						      number_of_blocks,
+						      complete,
+						      peer->query_channel_scids);
+	daemon_conn_send(peer->daemon->master, take(msg));
+	peer->query_channel_scids = tal_free(peer->query_channel_scids);
+	peer->query_channel_blocks = tal_free(peer->query_channel_blocks);
+	return NULL;
+}
+
+/*~ When we ask about an array of short_channel_ids, we get all channel &
+ * node announcements and channel updates which the peer knows.  There's an
+ * explicit end packet; this is needed to differentiate between 'I'm slow'
+ * and 'I don't know those channels'. */
+const u8 *handle_reply_short_channel_ids_end(struct peer *peer, const u8 *msg)
+{
+	struct bitcoin_blkid chain;
+	u8 complete;
+
+	if (!fromwire_reply_short_channel_ids_end(msg, &chain, &complete)) {
+		return towire_errorfmt(peer, NULL,
+				       "Bad reply_short_channel_ids_end %s",
+				       tal_hex(tmpctx, msg));
+	}
+
+	if (!bitcoin_blkid_eq(&peer->daemon->chain_hash, &chain)) {
+		return towire_errorfmt(peer, NULL,
+				       "reply_short_channel_ids_end for bad chain: %s",
+				       tal_hex(tmpctx, msg));
+	}
+
+	if (!peer->scid_query_outstanding) {
+		return towire_errorfmt(peer, NULL,
+				       "unexpected reply_short_channel_ids_end: %s",
+				       tal_hex(tmpctx, msg));
+	}
+
+	peer->scid_query_outstanding = false;
+
+	/* If it wasn't generated by us, it's the dev interface from lightningd
+	 */
+	if (!peer->scid_query_was_internal) {
+		msg = towire_gossip_scids_reply(msg, true, complete);
+		daemon_conn_send(peer->daemon->master, take(msg));
+	}
+
+	/* All good, no error. */
+	return NULL;
+}
+
+/*~ Arbitrary ordering function of pubkeys.
+ *
+ * Note that we could use memcmp() here: even if they had somehow different
+ * bitwise representations for the same key, we copied them all from struct
+ * node which should make them unique.  Even if not (say, a node vanished
+ * and reappeared) we'd just end up sending two node_announcement for the
+ * same node.
+ */
+static int pubkey_order(const struct node_id *k1,
+			const struct node_id *k2,
+			void *unused UNUSED)
+{
+	return node_id_cmp(k1, k2);
+}
+
+static void uniquify_node_ids(struct node_id **ids)
+{
+	size_t dst, src;
+
+	/* BOLT #7:
+	 *   - SHOULD avoid sending duplicate `node_announcements` in
+	 *     response to a single `query_short_channel_ids`.
+	 */
+	/* ccan/asort is a typesafe qsort wrapper: like most ccan modules
+	 * it eschews exposing 'void *' pointers and ensures that the
+	 * callback function and its arguments match types correctly. */
+	asort(*ids, tal_count(*ids), pubkey_order, NULL);
+
+	/* Compact the array */
+	for (dst = 0, src = 0; src < tal_count(*ids); src++) {
+		if (dst && node_id_eq(&(*ids)[dst-1], &(*ids)[src]))
+			continue;
+		(*ids)[dst++] = (*ids)[src];
+	}
+
+	/* And trim to length, so tal_count() gives correct answer. */
+	tal_resize(ids, dst);
+}
+
+/* We are fairly careful to avoid the peer DoSing us with channel queries:
+ * this routine sends information about a single short_channel_id, unless
+ * it's finished all of them. */
+void maybe_send_query_responses(struct peer *peer)
+{
+	struct routing_state *rstate = peer->daemon->rstate;
+	size_t i, num;
+	bool sent = false;
+
+	/* BOLT #7:
+	 *
+	 *   - MUST respond to each known `short_channel_id`:
+	 */
+	/* Search for next short_channel_id we know about. */
+	num = tal_count(peer->scid_queries);
+	for (i = peer->scid_query_idx; !sent && i < num; i++) {
+		struct chan *chan;
+
+		chan = get_channel(rstate, &peer->scid_queries[i]);
+		if (!chan || !is_chan_public(chan))
+			continue;
+
+		/* BOLT #7:
+		 * - if bit 0 of `query_flag` is set:
+		 *   - MUST reply with a `channel_announcement`
+		 */
+		if (peer->scid_query_flags[i] & SCID_QF_ANNOUNCE) {
+			queue_peer_from_store(peer, &chan->bcast);
+			sent = true;
+		}
+
+		/* BOLT #7:
+		 * - if bit 1 of `query_flag` is set and it has received a
+		 *   `channel_update` from `node_id_1`:
+		 *   - MUST reply with the latest `channel_update` for
+		 *   `node_id_1`
+		 * - if bit 2 of `query_flag` is set and it has received a
+		 *   `channel_update` from `node_id_2`:
+		 *   - MUST reply with the latest `channel_update` for
+		 *   `node_id_2` */
+		if ((peer->scid_query_flags[i] & SCID_QF_UPDATE1)
+		    && is_halfchan_defined(&chan->half[0])) {
+			queue_peer_from_store(peer, &chan->half[0].bcast);
+			sent = true;
+		}
+		if ((peer->scid_query_flags[i] & SCID_QF_UPDATE2)
+		    && is_halfchan_defined(&chan->half[1])) {
+			queue_peer_from_store(peer, &chan->half[1].bcast);
+			sent = true;
+		}
+
+		/* BOLT #7:
+		 * - if bit 3 of `query_flag` is set and it has received
+		 *   a `node_announcement` from `node_id_1`:
+		 *   - MUST reply with the latest `node_announcement` for
+		 *   `node_id_1`
+		 * - if bit 4 of `query_flag` is set and it has received a
+		 *    `node_announcement` from `node_id_2`:
+		 *   - MUST reply with the latest `node_announcement` for
+		 *   `node_id_2` */
+		/* Save node ids for later transmission of node_announcement */
+		if (peer->scid_query_flags[i] & SCID_QF_NODE1)
+			tal_arr_expand(&peer->scid_query_nodes,
+				       chan->nodes[0]->id);
+		if (peer->scid_query_flags[i] & SCID_QF_NODE2)
+			tal_arr_expand(&peer->scid_query_nodes,
+				       chan->nodes[1]->id);
+	}
+
+	/* Just finished channels?  Remove duplicate nodes. */
+	if (peer->scid_query_idx != num && i == num)
+		uniquify_node_ids(&peer->scid_query_nodes);
+
+	/* Update index for next time we're called. */
+	peer->scid_query_idx = i;
+
+	/* BOLT #7:
+	 *
+	 *    - if the incoming message does not include `encoded_query_flags`:
+	 *      ...
+	 *      - MUST follow with any `node_announcement`s for each
+	 *      `channel_announcement`
+	 *    - otherwise:
+	 *      ...
+	 *      - if bit 3 of `query_flag` is set and it has received a
+	 *        `node_announcement` from `node_id_1`:
+	 *        - MUST reply with the latest `node_announcement` for
+	 *          `node_id_1`
+	 *      - if bit 4 of `query_flag` is set and it has received a
+	 *        `node_announcement` from `node_id_2`:
+	 *        - MUST reply with the latest `node_announcement` for
+	 *          `node_id_2`
+	 */
+	/* If we haven't sent anything above, we look for the next
+	 * node_announcement to send. */
+	num = tal_count(peer->scid_query_nodes);
+	for (i = peer->scid_query_nodes_idx; !sent && i < num; i++) {
+		const struct node *n;
+
+		/* Not every node announces itself (we know it exists because
+		 * of a channel_announcement, however) */
+		n = get_node(rstate, &peer->scid_query_nodes[i]);
+		if (!n || !n->bcast.index)
+			continue;
+
+		queue_peer_from_store(peer, &n->bcast);
+		sent = true;
+	}
+	peer->scid_query_nodes_idx = i;
+
+	/* All finished? */
+	if (peer->scid_queries
+	    && peer->scid_query_idx == tal_count(peer->scid_queries)
+	    && peer->scid_query_nodes_idx == num) {
+		/* BOLT #7:
+		 *
+		 * - MUST follow these responses with
+		 *   `reply_short_channel_ids_end`.
+		 *   - if does not maintain up-to-date channel information for
+		 *     `chain_hash`:
+		 *      - MUST set `complete` to 0.
+		 *   - otherwise:
+		 *      - SHOULD set `complete` to 1.
+		 */
+		/* FIXME: We consider ourselves to have complete knowledge. */
+		u8 *end = towire_reply_short_channel_ids_end(peer,
+							     &peer->daemon->chain_hash,
+							     true);
+		queue_peer_msg(peer, take(end));
+
+		/* We're done!  Clean up so we simply pass-through next time. */
+		peer->scid_queries = tal_free(peer->scid_queries);
+		peer->scid_query_flags = tal_free(peer->scid_query_flags);
+		peer->scid_query_idx = 0;
+		peer->scid_query_nodes = tal_free(peer->scid_query_nodes);
+		peer->scid_query_nodes_idx = 0;
+	}
+}
+
+#if DEVELOPER
+struct io_plan *query_scids_req(struct io_conn *conn,
+				struct daemon *daemon,
+				const u8 *msg)
+{
+	struct node_id id;
+	struct short_channel_id *scids;
+	struct peer *peer;
+
+	if (!fromwire_gossip_query_scids(msg, msg, &id, &scids))
+		master_badmsg(WIRE_GOSSIP_QUERY_SCIDS, msg);
+
+	peer = find_peer(daemon, &id);
+	if (!peer) {
+		status_broken("query_scids: unknown peer %s",
+			      type_to_string(tmpctx, struct node_id, &id));
+		daemon_conn_send(daemon->master,
+				 take(towire_gossip_scids_reply(NULL,
+								false, false)));
+	} else if (!query_short_channel_ids(daemon, peer, scids, false))
+		daemon_conn_send(daemon->master,
+				 take(towire_gossip_scids_reply(NULL,
+								false, false)));
+	return daemon_conn_read_next(conn, daemon->master);
+}
+
+/* FIXME: One day this will be called internally; for now it's just for
+ * testing with dev_query_channel_range. */
+struct io_plan *query_channel_range(struct io_conn *conn,
+				    struct daemon *daemon,
+				    const u8 *msg)
+{
+	struct node_id id;
+	u32 first_blocknum, number_of_blocks;
+	struct peer *peer;
+
+	if (!fromwire_gossip_query_channel_range(msg, &id, &first_blocknum,
+						 &number_of_blocks))
+		master_badmsg(WIRE_GOSSIP_QUERY_SCIDS, msg);
+
+	peer = find_peer(daemon, &id);
+	if (!peer) {
+		status_broken("query_channel_range: unknown peer %s",
+			      type_to_string(tmpctx, struct node_id, &id));
+		goto fail;
+	}
+
+	if (!peer->gossip_queries_feature) {
+		status_broken("query_channel_range: no gossip_query support in peer %s",
+			      type_to_string(tmpctx, struct node_id, &id));
+		goto fail;
+	}
+
+	if (peer->query_channel_blocks) {
+		status_broken("query_channel_range: previous query active");
+		goto fail;
+	}
+
+	/* Check for overflow on 32-bit machines! */
+	if (BITMAP_NWORDS(number_of_blocks) < number_of_blocks / BITMAP_WORD_BITS) {
+		status_broken("query_channel_range: huge number_of_blocks (%u) not supported",
+			number_of_blocks);
+		goto fail;
+	}
+
+	status_debug("sending query_channel_range for blocks %u+%u",
+		     first_blocknum, number_of_blocks);
+
+	msg = towire_query_channel_range(NULL, &daemon->chain_hash,
+					 first_blocknum, number_of_blocks,
+					 NULL);
+	queue_peer_msg(peer, take(msg));
+	peer->range_first_blocknum = first_blocknum;
+	peer->range_end_blocknum = first_blocknum + number_of_blocks;
+	peer->range_blocks_remaining = number_of_blocks;
+	peer->query_channel_blocks = tal_arrz(peer, bitmap,
+					      BITMAP_NWORDS(number_of_blocks));
+	peer->query_channel_scids = tal_arr(peer, struct short_channel_id, 0);
+
+out:
+	return daemon_conn_read_next(conn, daemon->master);
+
+fail:
+	daemon_conn_send(daemon->master,
+			 take(towire_gossip_query_channel_range_reply(NULL,
+								      0, 0,
+								      false,
+								      NULL)));
+	goto out;
+}
+
+/* This is a testing hack to allow us to artificially lower the maximum bytes
+ * of short_channel_ids we'll encode, using dev_set_max_scids_encode_size. */
+struct io_plan *dev_set_max_scids_encode_size(struct io_conn *conn,
+					      struct daemon *daemon,
+					      const u8 *msg)
+{
+	if (!fromwire_gossip_dev_set_max_scids_encode_size(msg,
+							   &max_encoding_bytes))
+		master_badmsg(WIRE_GOSSIP_DEV_SET_MAX_SCIDS_ENCODE_SIZE, msg);
+
+	status_debug("Set max_scids_encode_bytes to %u", max_encoding_bytes);
+	return daemon_conn_read_next(conn, daemon->master);
+}
+#endif /* DEVELOPER */
diff --git a/gossipd/queries.h b/gossipd/queries.h
new file mode 100644
index 000000000..19c271109
--- /dev/null
+++ b/gossipd/queries.h
@@ -0,0 +1,41 @@
+#ifndef LIGHTNING_GOSSIPD_QUERIES_H
+#define LIGHTNING_GOSSIPD_QUERIES_H
+#include "config.h"
+#include <ccan/short_types/short_types.h>
+
+struct daemon;
+struct io_conn;
+struct peer;
+struct short_channel_id;
+
+/* Various handlers when peer fwds a gossip query msg: return is NULL or
+ * error packet. */
+const u8 *handle_query_short_channel_ids(struct peer *peer, const u8 *msg);
+const u8 *handle_reply_short_channel_ids_end(struct peer *peer, const u8 *msg);
+const u8 *handle_query_channel_range(struct peer *peer, const u8 *msg);
+const u8 *handle_reply_channel_range(struct peer *peer, const u8 *msg);
+
+void query_unknown_channel(struct daemon *daemon,
+			   struct peer *peer,
+			   const struct short_channel_id *id);
+
+/* This called when the peer is idle. */
+void maybe_send_query_responses(struct peer *peer);
+
+#if DEVELOPER
+struct io_plan *query_scids_req(struct io_conn *conn,
+				struct daemon *daemon,
+				const u8 *msg);
+
+struct io_plan *query_channel_range(struct io_conn *conn,
+				    struct daemon *daemon,
+				    const u8 *msg);
+
+/* This is a testing hack to allow us to artificially lower the maximum bytes
+ * of short_channel_ids we'll encode, using dev_set_max_scids_encode_size. */
+struct io_plan *dev_set_max_scids_encode_size(struct io_conn *conn,
+					      struct daemon *daemon,
+					      const u8 *msg);
+#endif /* DEVELOPER */
+
+#endif /* LIGHTNING_GOSSIPD_QUERIES_H */
diff --git a/gossipd/test/run-crc32_of_update.c b/gossipd/test/run-crc32_of_update.c
index 157a3e68d..035c2f8a1 100644
--- a/gossipd/test/run-crc32_of_update.c
+++ b/gossipd/test/run-crc32_of_update.c
@@ -1,22 +1,11 @@
 int unused_main(int argc, char *argv[]);
 #define main unused_main
-#include "../gossipd.c"
+#include "../queries.c"
 #include "../gossip_generation.c"
 #undef main
 #include <stdio.h>
 
 /* AUTOGENERATED MOCKS START */
-/* Generated stub for check_ping_make_pong */
-bool check_ping_make_pong(const tal_t *ctx UNNEEDED, const u8 *ping UNNEEDED, u8 **pong UNNEEDED)
-{ fprintf(stderr, "check_ping_make_pong called!\n"); abort(); }
-/* Generated stub for daemon_conn_new_ */
-struct daemon_conn *daemon_conn_new_(const tal_t *ctx UNNEEDED, int fd UNNEEDED,
-				     struct io_plan *(*recv)(struct io_conn * UNNEEDED,
-							     const u8 * UNNEEDED,
-							     void *) UNNEEDED,
-				     void (*outq_empty)(void *) UNNEEDED,
-				     void *arg UNNEEDED)
-{ fprintf(stderr, "daemon_conn_new_ called!\n"); abort(); }
 /* Generated stub for daemon_conn_read_next */
 struct io_plan *daemon_conn_read_next(struct io_conn *conn UNNEEDED,
 				      struct daemon_conn *dc UNNEEDED)
@@ -24,15 +13,9 @@ struct io_plan *daemon_conn_read_next(struct io_conn *conn UNNEEDED,
 /* Generated stub for daemon_conn_send */
 void daemon_conn_send(struct daemon_conn *dc UNNEEDED, const u8 *msg UNNEEDED)
 { fprintf(stderr, "daemon_conn_send called!\n"); abort(); }
-/* Generated stub for daemon_conn_send_fd */
-void daemon_conn_send_fd(struct daemon_conn *dc UNNEEDED, int fd UNNEEDED)
-{ fprintf(stderr, "daemon_conn_send_fd called!\n"); abort(); }
 /* Generated stub for daemon_conn_wake */
 void daemon_conn_wake(struct daemon_conn *dc UNNEEDED)
 { fprintf(stderr, "daemon_conn_wake called!\n"); abort(); }
-/* Generated stub for daemon_shutdown */
-void daemon_shutdown(void)
-{ fprintf(stderr, "daemon_shutdown called!\n"); abort(); }
 /* Generated stub for decode_scid_query_flags */
 bigsize_t *decode_scid_query_flags(const tal_t *ctx UNNEEDED,
 				   const struct tlv_query_short_channel_ids_tlvs_query_flags *qf UNNEEDED)
@@ -40,216 +23,66 @@ bigsize_t *decode_scid_query_flags(const tal_t *ctx UNNEEDED,
 /* Generated stub for decode_short_ids */
 struct short_channel_id *decode_short_ids(const tal_t *ctx UNNEEDED, const u8 *encoded UNNEEDED)
 { fprintf(stderr, "decode_short_ids called!\n"); abort(); }
-/* Generated stub for dump_memleak */
-bool dump_memleak(struct htable *memtable UNNEEDED)
-{ fprintf(stderr, "dump_memleak called!\n"); abort(); }
-/* Generated stub for first_chan */
-struct chan *first_chan(const struct node *node UNNEEDED, struct chan_map_iter *i UNNEEDED)
-{ fprintf(stderr, "first_chan called!\n"); abort(); }
-/* Generated stub for free_chan */
-void free_chan(struct routing_state *rstate UNNEEDED, struct chan *chan UNNEEDED)
-{ fprintf(stderr, "free_chan called!\n"); abort(); }
-/* Generated stub for fromwire_amount_below_minimum */
-bool fromwire_amount_below_minimum(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct amount_msat *htlc_msat UNNEEDED, u8 **channel_update UNNEEDED)
-{ fprintf(stderr, "fromwire_amount_below_minimum called!\n"); abort(); }
-/* Generated stub for fromwire_expiry_too_soon */
-bool fromwire_expiry_too_soon(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, u8 **channel_update UNNEEDED)
-{ fprintf(stderr, "fromwire_expiry_too_soon called!\n"); abort(); }
-/* Generated stub for fromwire_fee_insufficient */
-bool fromwire_fee_insufficient(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct amount_msat *htlc_msat UNNEEDED, u8 **channel_update UNNEEDED)
-{ fprintf(stderr, "fromwire_fee_insufficient called!\n"); abort(); }
-/* Generated stub for fromwire_gossipctl_init */
-bool fromwire_gossipctl_init(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct bitcoin_blkid *chain_hash UNNEEDED, struct node_id *id UNNEEDED, u8 **globalfeatures UNNEEDED, u8 rgb[3] UNNEEDED, u8 alias[32] UNNEEDED, struct wireaddr **announcable UNNEEDED, u32 **dev_gossip_time UNNEEDED, bool *dev_fast_gossip UNNEEDED)
-{ fprintf(stderr, "fromwire_gossipctl_init called!\n"); abort(); }
+/* Generated stub for find_peer */
+struct peer *find_peer(struct daemon *daemon UNNEEDED, const struct node_id *id UNNEEDED)
+{ fprintf(stderr, "find_peer called!\n"); abort(); }
 /* Generated stub for fromwire_gossip_dev_set_max_scids_encode_size */
 bool fromwire_gossip_dev_set_max_scids_encode_size(const void *p UNNEEDED, u32 *max UNNEEDED)
 { fprintf(stderr, "fromwire_gossip_dev_set_max_scids_encode_size called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_dev_set_time */
-bool fromwire_gossip_dev_set_time(const void *p UNNEEDED, u32 *dev_gossip_time UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_dev_set_time called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_dev_suppress */
-bool fromwire_gossip_dev_suppress(const void *p UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_dev_suppress called!\n"); abort(); }
-/* Generated stub for fromwire_gossipd_get_update */
-bool fromwire_gossipd_get_update(const void *p UNNEEDED, struct short_channel_id *short_channel_id UNNEEDED)
-{ fprintf(stderr, "fromwire_gossipd_get_update called!\n"); abort(); }
 /* Generated stub for fromwire_gossipd_local_channel_update */
 bool fromwire_gossipd_local_channel_update(const void *p UNNEEDED, struct short_channel_id *short_channel_id UNNEEDED, bool *disable UNNEEDED, u16 *cltv_expiry_delta UNNEEDED, struct amount_msat *htlc_minimum_msat UNNEEDED, u32 *fee_base_msat UNNEEDED, u32 *fee_proportional_millionths UNNEEDED, struct amount_msat *htlc_maximum_msat UNNEEDED)
 { fprintf(stderr, "fromwire_gossipd_local_channel_update called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_get_addrs */
-bool fromwire_gossip_get_addrs(const void *p UNNEEDED, struct node_id *id UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_get_addrs called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_get_channel_peer */
-bool fromwire_gossip_get_channel_peer(const void *p UNNEEDED, struct short_channel_id *channel_id UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_get_channel_peer called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_getchannels_request */
-bool fromwire_gossip_getchannels_request(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct short_channel_id **short_channel_id UNNEEDED, struct node_id **source UNNEEDED, struct short_channel_id **prev UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_getchannels_request called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_get_incoming_channels */
-bool fromwire_gossip_get_incoming_channels(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, bool **private_too UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_get_incoming_channels called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_getnodes_request */
-bool fromwire_gossip_getnodes_request(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct node_id **id UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_getnodes_request called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_getroute_request */
-bool fromwire_gossip_getroute_request(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct node_id **source UNNEEDED, struct node_id *destination UNNEEDED, struct amount_msat *msatoshi UNNEEDED, u64 *riskfactor_by_million UNNEEDED, u32 *final_cltv UNNEEDED, double *fuzz UNNEEDED, struct exclude_entry ***excluded UNNEEDED, u32 *max_hops UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_getroute_request called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_get_txout_reply */
-bool fromwire_gossip_get_txout_reply(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct short_channel_id *short_channel_id UNNEEDED, struct amount_sat *satoshis UNNEEDED, u8 **outscript UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_get_txout_reply called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_local_channel_close */
-bool fromwire_gossip_local_channel_close(const void *p UNNEEDED, struct short_channel_id *short_channel_id UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_local_channel_close called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_new_blockheight */
-bool fromwire_gossip_new_blockheight(const void *p UNNEEDED, u32 *blockheight UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_new_blockheight called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_new_peer */
-bool fromwire_gossip_new_peer(const void *p UNNEEDED, struct node_id *id UNNEEDED, bool *gossip_queries_feature UNNEEDED, bool *initial_routing_sync UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_new_peer called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_outpoint_spent */
-bool fromwire_gossip_outpoint_spent(const void *p UNNEEDED, struct short_channel_id *short_channel_id UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_outpoint_spent called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_payment_failure */
-bool fromwire_gossip_payment_failure(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct node_id *erring_node UNNEEDED, struct short_channel_id *erring_channel UNNEEDED, u8 *erring_channel_direction UNNEEDED, u8 **error UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_payment_failure called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_ping */
-bool fromwire_gossip_ping(const void *p UNNEEDED, struct node_id *id UNNEEDED, u16 *num_pong_bytes UNNEEDED, u16 *len UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_ping called!\n"); abort(); }
 /* Generated stub for fromwire_gossip_query_channel_range */
 bool fromwire_gossip_query_channel_range(const void *p UNNEEDED, struct node_id *id UNNEEDED, u32 *first_blocknum UNNEEDED, u32 *number_of_blocks UNNEEDED)
 { fprintf(stderr, "fromwire_gossip_query_channel_range called!\n"); abort(); }
 /* Generated stub for fromwire_gossip_query_scids */
 bool fromwire_gossip_query_scids(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct node_id *id UNNEEDED, struct short_channel_id **ids UNNEEDED)
 { fprintf(stderr, "fromwire_gossip_query_scids called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_send_timestamp_filter */
-bool fromwire_gossip_send_timestamp_filter(const void *p UNNEEDED, struct node_id *id UNNEEDED, u32 *first_timestamp UNNEEDED, u32 *timestamp_range UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_send_timestamp_filter called!\n"); abort(); }
 /* Generated stub for fromwire_hsm_cupdate_sig_reply */
 bool fromwire_hsm_cupdate_sig_reply(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, u8 **cu UNNEEDED)
 { fprintf(stderr, "fromwire_hsm_cupdate_sig_reply called!\n"); abort(); }
 /* Generated stub for fromwire_hsm_node_announcement_sig_reply */
 bool fromwire_hsm_node_announcement_sig_reply(const void *p UNNEEDED, secp256k1_ecdsa_signature *signature UNNEEDED)
 { fprintf(stderr, "fromwire_hsm_node_announcement_sig_reply called!\n"); abort(); }
-/* Generated stub for fromwire_incorrect_cltv_expiry */
-bool fromwire_incorrect_cltv_expiry(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, u32 *cltv_expiry UNNEEDED, u8 **channel_update UNNEEDED)
-{ fprintf(stderr, "fromwire_incorrect_cltv_expiry called!\n"); abort(); }
-/* Generated stub for fromwire_temporary_channel_failure */
-bool fromwire_temporary_channel_failure(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, u8 **channel_update UNNEEDED)
-{ fprintf(stderr, "fromwire_temporary_channel_failure called!\n"); abort(); }
 /* Generated stub for get_node */
 struct node *get_node(struct routing_state *rstate UNNEEDED,
 		      const struct node_id *id UNNEEDED)
 { fprintf(stderr, "get_node called!\n"); abort(); }
-/* Generated stub for get_route */
-struct route_hop *get_route(const tal_t *ctx UNNEEDED, struct routing_state *rstate UNNEEDED,
-			    const struct node_id *source UNNEEDED,
-			    const struct node_id *destination UNNEEDED,
-			    const struct amount_msat msat UNNEEDED, double riskfactor UNNEEDED,
-			    u32 final_cltv UNNEEDED,
-			    double fuzz UNNEEDED,
-			    u64 seed UNNEEDED,
-			    struct exclude_entry **excluded UNNEEDED,
-			    u32 max_hops UNNEEDED)
-{ fprintf(stderr, "get_route called!\n"); abort(); }
-/* Generated stub for gossip_peerd_wire_type_name */
-const char *gossip_peerd_wire_type_name(int e UNNEEDED)
-{ fprintf(stderr, "gossip_peerd_wire_type_name called!\n"); abort(); }
-/* Generated stub for gossip_store_compact */
-bool gossip_store_compact(struct gossip_store *gs UNNEEDED)
-{ fprintf(stderr, "gossip_store_compact called!\n"); abort(); }
 /* Generated stub for gossip_store_get */
 const u8 *gossip_store_get(const tal_t *ctx UNNEEDED,
 			   struct gossip_store *gs UNNEEDED,
 			   u64 offset UNNEEDED)
 { fprintf(stderr, "gossip_store_get called!\n"); abort(); }
-/* Generated stub for gossip_store_load */
-bool gossip_store_load(struct routing_state *rstate UNNEEDED, struct gossip_store *gs UNNEEDED)
-{ fprintf(stderr, "gossip_store_load called!\n"); abort(); }
-/* Generated stub for gossip_store_readonly_fd */
-int gossip_store_readonly_fd(struct gossip_store *gs UNNEEDED)
-{ fprintf(stderr, "gossip_store_readonly_fd called!\n"); abort(); }
 /* Generated stub for gossip_time_now */
 struct timeabs gossip_time_now(const struct routing_state *rstate UNNEEDED)
 { fprintf(stderr, "gossip_time_now called!\n"); abort(); }
-/* Generated stub for got_pong */
-const char *got_pong(const u8 *pong UNNEEDED, size_t *num_pings_outstanding UNNEEDED)
-{ fprintf(stderr, "got_pong called!\n"); abort(); }
-/* Generated stub for handle_channel_announcement */
-u8 *handle_channel_announcement(struct routing_state *rstate UNNEEDED,
-				const u8 *announce TAKES UNNEEDED,
-				u32 current_blockheight UNNEEDED,
-				const struct short_channel_id **scid UNNEEDED)
-{ fprintf(stderr, "handle_channel_announcement called!\n"); abort(); }
 /* Generated stub for handle_channel_update */
 u8 *handle_channel_update(struct routing_state *rstate UNNEEDED, const u8 *update TAKES UNNEEDED,
 			  const char *source UNNEEDED,
 			  struct short_channel_id *unknown_scid UNNEEDED)
 { fprintf(stderr, "handle_channel_update called!\n"); abort(); }
-/* Generated stub for handle_local_add_channel */
-bool handle_local_add_channel(struct routing_state *rstate UNNEEDED, const u8 *msg UNNEEDED,
-			      u64 index UNNEEDED)
-{ fprintf(stderr, "handle_local_add_channel called!\n"); abort(); }
 /* Generated stub for handle_node_announcement */
 u8 *handle_node_announcement(struct routing_state *rstate UNNEEDED, const u8 *node UNNEEDED)
 { fprintf(stderr, "handle_node_announcement called!\n"); abort(); }
-/* Generated stub for handle_pending_cannouncement */
-bool handle_pending_cannouncement(struct routing_state *rstate UNNEEDED,
-				  const struct short_channel_id *scid UNNEEDED,
-				  const struct amount_sat sat UNNEEDED,
-				  const u8 *txscript UNNEEDED)
-{ fprintf(stderr, "handle_pending_cannouncement called!\n"); abort(); }
-/* Generated stub for make_ping */
-u8 *make_ping(const tal_t *ctx UNNEEDED, u16 num_pong_bytes UNNEEDED, u16 padlen UNNEEDED)
-{ fprintf(stderr, "make_ping called!\n"); abort(); }
 /* Generated stub for master_badmsg */
 void master_badmsg(u32 type_expected UNNEEDED, const u8 *msg)
 { fprintf(stderr, "master_badmsg called!\n"); abort(); }
-/* Generated stub for memleak_enter_allocations */
-struct htable *memleak_enter_allocations(const tal_t *ctx UNNEEDED,
-					 const void *exclude1 UNNEEDED,
-					 const void *exclude2 UNNEEDED)
-{ fprintf(stderr, "memleak_enter_allocations called!\n"); abort(); }
-/* Generated stub for memleak_remove_referenced */
-void memleak_remove_referenced(struct htable *memtable UNNEEDED, const void *root UNNEEDED)
-{ fprintf(stderr, "memleak_remove_referenced called!\n"); abort(); }
 /* Generated stub for new_reltimer_ */
 struct oneshot *new_reltimer_(struct timers *timers UNNEEDED,
 			      const tal_t *ctx UNNEEDED,
 			      struct timerel expire UNNEEDED,
 			      void (*cb)(void *) UNNEEDED, void *arg UNNEEDED)
 { fprintf(stderr, "new_reltimer_ called!\n"); abort(); }
-/* Generated stub for new_routing_state */
-struct routing_state *new_routing_state(const tal_t *ctx UNNEEDED,
-					const struct chainparams *chainparams UNNEEDED,
-					const struct node_id *local_id UNNEEDED,
-					struct list_head *peers UNNEEDED,
-					const u32 *dev_gossip_time TAKES UNNEEDED,
-					bool dev_fast_gossip UNNEEDED)
-{ fprintf(stderr, "new_routing_state called!\n"); abort(); }
-/* Generated stub for next_chan */
-struct chan *next_chan(const struct node *node UNNEEDED, struct chan_map_iter *i UNNEEDED)
-{ fprintf(stderr, "next_chan called!\n"); abort(); }
 /* Generated stub for notleak_ */
 void *notleak_(const void *ptr UNNEEDED, bool plus_children UNNEEDED)
 { fprintf(stderr, "notleak_ called!\n"); abort(); }
-/* Generated stub for read_addresses */
-struct wireaddr *read_addresses(const tal_t *ctx UNNEEDED, const u8 *ser UNNEEDED)
-{ fprintf(stderr, "read_addresses called!\n"); abort(); }
-/* Generated stub for remove_channel_from_store */
-void remove_channel_from_store(struct routing_state *rstate UNNEEDED,
-			       struct chan *chan UNNEEDED)
-{ fprintf(stderr, "remove_channel_from_store called!\n"); abort(); }
-/* Generated stub for route_prune */
-void route_prune(struct routing_state *rstate UNNEEDED)
-{ fprintf(stderr, "route_prune called!\n"); abort(); }
-/* Generated stub for routing_failure */
-void routing_failure(struct routing_state *rstate UNNEEDED,
-		     const struct node_id *erring_node UNNEEDED,
-		     const struct short_channel_id *erring_channel UNNEEDED,
-		     int erring_direction UNNEEDED,
-		     enum onion_type failcode UNNEEDED,
-		     const u8 *channel_update UNNEEDED)
-{ fprintf(stderr, "routing_failure called!\n"); abort(); }
+/* Generated stub for queue_peer_from_store */
+void queue_peer_from_store(struct peer *peer UNNEEDED,
+			   const struct broadcastable *bcast UNNEEDED)
+{ fprintf(stderr, "queue_peer_from_store called!\n"); abort(); }
+/* Generated stub for queue_peer_msg */
+void queue_peer_msg(struct peer *peer UNNEEDED, const u8 *msg TAKES UNNEEDED)
+{ fprintf(stderr, "queue_peer_msg called!\n"); abort(); }
 /* Generated stub for status_failed */
 void status_failed(enum status_failreason code UNNEEDED,
 		   const char *fmt UNNEEDED, ...)
@@ -258,59 +91,11 @@ void status_failed(enum status_failreason code UNNEEDED,
 void status_fmt(enum log_level level UNNEEDED, const char *fmt UNNEEDED, ...)
 
 { fprintf(stderr, "status_fmt called!\n"); abort(); }
-/* Generated stub for status_setup_async */
-void status_setup_async(struct daemon_conn *master UNNEEDED)
-{ fprintf(stderr, "status_setup_async called!\n"); abort(); }
-/* Generated stub for subdaemon_setup */
-void subdaemon_setup(int argc UNNEEDED, char *argv[])
-{ fprintf(stderr, "subdaemon_setup called!\n"); abort(); }
-/* Generated stub for timer_expired */
-void timer_expired(tal_t *ctx UNNEEDED, struct timer *timer UNNEEDED)
-{ fprintf(stderr, "timer_expired called!\n"); abort(); }
 /* Generated stub for towire_errorfmt */
 u8 *towire_errorfmt(const tal_t *ctx UNNEEDED,
 		    const struct channel_id *channel UNNEEDED,
 		    const char *fmt UNNEEDED, ...)
 { fprintf(stderr, "towire_errorfmt called!\n"); abort(); }
-/* Generated stub for towire_gossip_dev_compact_store_reply */
-u8 *towire_gossip_dev_compact_store_reply(const tal_t *ctx UNNEEDED, bool success UNNEEDED)
-{ fprintf(stderr, "towire_gossip_dev_compact_store_reply called!\n"); abort(); }
-/* Generated stub for towire_gossip_dev_memleak_reply */
-u8 *towire_gossip_dev_memleak_reply(const tal_t *ctx UNNEEDED, bool leak UNNEEDED)
-{ fprintf(stderr, "towire_gossip_dev_memleak_reply called!\n"); abort(); }
-/* Generated stub for towire_gossipd_get_update_reply */
-u8 *towire_gossipd_get_update_reply(const tal_t *ctx UNNEEDED, const u8 *update UNNEEDED)
-{ fprintf(stderr, "towire_gossipd_get_update_reply called!\n"); abort(); }
-/* Generated stub for towire_gossipd_new_store_fd */
-u8 *towire_gossipd_new_store_fd(const tal_t *ctx UNNEEDED, u64 offset_shorter UNNEEDED)
-{ fprintf(stderr, "towire_gossipd_new_store_fd called!\n"); abort(); }
-/* Generated stub for towire_gossip_get_addrs_reply */
-u8 *towire_gossip_get_addrs_reply(const tal_t *ctx UNNEEDED, const struct wireaddr *addrs UNNEEDED)
-{ fprintf(stderr, "towire_gossip_get_addrs_reply called!\n"); abort(); }
-/* Generated stub for towire_gossip_get_channel_peer_reply */
-u8 *towire_gossip_get_channel_peer_reply(const tal_t *ctx UNNEEDED, const struct node_id *peer_id UNNEEDED)
-{ fprintf(stderr, "towire_gossip_get_channel_peer_reply called!\n"); abort(); }
-/* Generated stub for towire_gossip_getchannels_reply */
-u8 *towire_gossip_getchannels_reply(const tal_t *ctx UNNEEDED, bool complete UNNEEDED, const struct gossip_getchannels_entry **nodes UNNEEDED)
-{ fprintf(stderr, "towire_gossip_getchannels_reply called!\n"); abort(); }
-/* Generated stub for towire_gossip_get_incoming_channels_reply */
-u8 *towire_gossip_get_incoming_channels_reply(const tal_t *ctx UNNEEDED, const struct route_info *route_info UNNEEDED)
-{ fprintf(stderr, "towire_gossip_get_incoming_channels_reply called!\n"); abort(); }
-/* Generated stub for towire_gossip_getnodes_reply */
-u8 *towire_gossip_getnodes_reply(const tal_t *ctx UNNEEDED, const struct gossip_getnodes_entry **nodes UNNEEDED)
-{ fprintf(stderr, "towire_gossip_getnodes_reply called!\n"); abort(); }
-/* Generated stub for towire_gossip_getroute_reply */
-u8 *towire_gossip_getroute_reply(const tal_t *ctx UNNEEDED, const struct route_hop *hops UNNEEDED)
-{ fprintf(stderr, "towire_gossip_getroute_reply called!\n"); abort(); }
-/* Generated stub for towire_gossip_get_txout */
-u8 *towire_gossip_get_txout(const tal_t *ctx UNNEEDED, const struct short_channel_id *short_channel_id UNNEEDED)
-{ fprintf(stderr, "towire_gossip_get_txout called!\n"); abort(); }
-/* Generated stub for towire_gossip_new_peer_reply */
-u8 *towire_gossip_new_peer_reply(const tal_t *ctx UNNEEDED, bool success UNNEEDED, const struct gossip_state *gs UNNEEDED)
-{ fprintf(stderr, "towire_gossip_new_peer_reply called!\n"); abort(); }
-/* Generated stub for towire_gossip_ping_reply */
-u8 *towire_gossip_ping_reply(const tal_t *ctx UNNEEDED, const struct node_id *id UNNEEDED, bool sent UNNEEDED, u16 totlen UNNEEDED)
-{ fprintf(stderr, "towire_gossip_ping_reply called!\n"); abort(); }
 /* Generated stub for towire_gossip_query_channel_range_reply */
 u8 *towire_gossip_query_channel_range_reply(const tal_t *ctx UNNEEDED, u32 final_first_block UNNEEDED, u32 final_num_blocks UNNEEDED, bool final_complete UNNEEDED, const struct short_channel_id *scids UNNEEDED)
 { fprintf(stderr, "towire_gossip_query_channel_range_reply called!\n"); abort(); }
diff --git a/gossipd/test/run-extended-info.c b/gossipd/test/run-extended-info.c
index 0fa27c471..542da7a90 100644
--- a/gossipd/test/run-extended-info.c
+++ b/gossipd/test/run-extended-info.c
@@ -1,12 +1,8 @@
 #include "config.h"
 
-#define main gossipd_main
-int gossipd_main(int argc, char *argv[]);
-
 #define ZLIB_EVEN_IF_EXPANDS 1
 
-#include "../gossipd.c"
-#undef main
+#include "../queries.c"
 #include <ccan/str/hex/hex.h>
 #include <common/json.h>
 #include <common/json_helpers.h>
@@ -17,17 +13,6 @@ int gossipd_main(int argc, char *argv[]);
 #endif
 
 /* AUTOGENERATED MOCKS START */
-/* Generated stub for check_ping_make_pong */
-bool check_ping_make_pong(const tal_t *ctx UNNEEDED, const u8 *ping UNNEEDED, u8 **pong UNNEEDED)
-{ fprintf(stderr, "check_ping_make_pong called!\n"); abort(); }
-/* Generated stub for daemon_conn_new_ */
-struct daemon_conn *daemon_conn_new_(const tal_t *ctx UNNEEDED, int fd UNNEEDED,
-				     struct io_plan *(*recv)(struct io_conn * UNNEEDED,
-							     const u8 * UNNEEDED,
-							     void *) UNNEEDED,
-				     void (*outq_empty)(void *) UNNEEDED,
-				     void *arg UNNEEDED)
-{ fprintf(stderr, "daemon_conn_new_ called!\n"); abort(); }
 /* Generated stub for daemon_conn_read_next */
 struct io_plan *daemon_conn_read_next(struct io_conn *conn UNNEEDED,
 				      struct daemon_conn *dc UNNEEDED)
@@ -35,15 +20,9 @@ struct io_plan *daemon_conn_read_next(struct io_conn *conn UNNEEDED,
 /* Generated stub for daemon_conn_send */
 void daemon_conn_send(struct daemon_conn *dc UNNEEDED, const u8 *msg UNNEEDED)
 { fprintf(stderr, "daemon_conn_send called!\n"); abort(); }
-/* Generated stub for daemon_conn_send_fd */
-void daemon_conn_send_fd(struct daemon_conn *dc UNNEEDED, int fd UNNEEDED)
-{ fprintf(stderr, "daemon_conn_send_fd called!\n"); abort(); }
 /* Generated stub for daemon_conn_wake */
 void daemon_conn_wake(struct daemon_conn *dc UNNEEDED)
 { fprintf(stderr, "daemon_conn_wake called!\n"); abort(); }
-/* Generated stub for daemon_shutdown */
-void daemon_shutdown(void)
-{ fprintf(stderr, "daemon_shutdown called!\n"); abort(); }
 /* Generated stub for decode_scid_query_flags */
 bigsize_t *decode_scid_query_flags(const tal_t *ctx UNNEEDED,
 				   const struct tlv_query_short_channel_ids_tlvs_query_flags *qf UNNEEDED)
@@ -51,93 +30,18 @@ bigsize_t *decode_scid_query_flags(const tal_t *ctx UNNEEDED,
 /* Generated stub for decode_short_ids */
 struct short_channel_id *decode_short_ids(const tal_t *ctx UNNEEDED, const u8 *encoded UNNEEDED)
 { fprintf(stderr, "decode_short_ids called!\n"); abort(); }
-/* Generated stub for dump_memleak */
-bool dump_memleak(struct htable *memtable UNNEEDED)
-{ fprintf(stderr, "dump_memleak called!\n"); abort(); }
-/* Generated stub for first_chan */
-struct chan *first_chan(const struct node *node UNNEEDED, struct chan_map_iter *i UNNEEDED)
-{ fprintf(stderr, "first_chan called!\n"); abort(); }
-/* Generated stub for free_chan */
-void free_chan(struct routing_state *rstate UNNEEDED, struct chan *chan UNNEEDED)
-{ fprintf(stderr, "free_chan called!\n"); abort(); }
-/* Generated stub for fromwire_amount_below_minimum */
-bool fromwire_amount_below_minimum(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct amount_msat *htlc_msat UNNEEDED, u8 **channel_update UNNEEDED)
-{ fprintf(stderr, "fromwire_amount_below_minimum called!\n"); abort(); }
-/* Generated stub for fromwire_expiry_too_soon */
-bool fromwire_expiry_too_soon(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, u8 **channel_update UNNEEDED)
-{ fprintf(stderr, "fromwire_expiry_too_soon called!\n"); abort(); }
-/* Generated stub for fromwire_fee_insufficient */
-bool fromwire_fee_insufficient(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct amount_msat *htlc_msat UNNEEDED, u8 **channel_update UNNEEDED)
-{ fprintf(stderr, "fromwire_fee_insufficient called!\n"); abort(); }
-/* Generated stub for fromwire_gossipctl_init */
-bool fromwire_gossipctl_init(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct bitcoin_blkid *chain_hash UNNEEDED, struct node_id *id UNNEEDED, u8 **globalfeatures UNNEEDED, u8 rgb[3] UNNEEDED, u8 alias[32] UNNEEDED, struct wireaddr **announcable UNNEEDED, u32 **dev_gossip_time UNNEEDED, bool *dev_fast_gossip UNNEEDED)
-{ fprintf(stderr, "fromwire_gossipctl_init called!\n"); abort(); }
+/* Generated stub for find_peer */
+struct peer *find_peer(struct daemon *daemon UNNEEDED, const struct node_id *id UNNEEDED)
+{ fprintf(stderr, "find_peer called!\n"); abort(); }
 /* Generated stub for fromwire_gossip_dev_set_max_scids_encode_size */
 bool fromwire_gossip_dev_set_max_scids_encode_size(const void *p UNNEEDED, u32 *max UNNEEDED)
 { fprintf(stderr, "fromwire_gossip_dev_set_max_scids_encode_size called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_dev_set_time */
-bool fromwire_gossip_dev_set_time(const void *p UNNEEDED, u32 *dev_gossip_time UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_dev_set_time called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_dev_suppress */
-bool fromwire_gossip_dev_suppress(const void *p UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_dev_suppress called!\n"); abort(); }
-/* Generated stub for fromwire_gossipd_get_update */
-bool fromwire_gossipd_get_update(const void *p UNNEEDED, struct short_channel_id *short_channel_id UNNEEDED)
-{ fprintf(stderr, "fromwire_gossipd_get_update called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_get_addrs */
-bool fromwire_gossip_get_addrs(const void *p UNNEEDED, struct node_id *id UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_get_addrs called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_get_channel_peer */
-bool fromwire_gossip_get_channel_peer(const void *p UNNEEDED, struct short_channel_id *channel_id UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_get_channel_peer called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_getchannels_request */
-bool fromwire_gossip_getchannels_request(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct short_channel_id **short_channel_id UNNEEDED, struct node_id **source UNNEEDED, struct short_channel_id **prev UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_getchannels_request called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_get_incoming_channels */
-bool fromwire_gossip_get_incoming_channels(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, bool **private_too UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_get_incoming_channels called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_getnodes_request */
-bool fromwire_gossip_getnodes_request(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct node_id **id UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_getnodes_request called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_getroute_request */
-bool fromwire_gossip_getroute_request(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct node_id **source UNNEEDED, struct node_id *destination UNNEEDED, struct amount_msat *msatoshi UNNEEDED, u64 *riskfactor_by_million UNNEEDED, u32 *final_cltv UNNEEDED, double *fuzz UNNEEDED, struct exclude_entry ***excluded UNNEEDED, u32 *max_hops UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_getroute_request called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_get_txout_reply */
-bool fromwire_gossip_get_txout_reply(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct short_channel_id *short_channel_id UNNEEDED, struct amount_sat *satoshis UNNEEDED, u8 **outscript UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_get_txout_reply called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_local_channel_close */
-bool fromwire_gossip_local_channel_close(const void *p UNNEEDED, struct short_channel_id *short_channel_id UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_local_channel_close called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_new_blockheight */
-bool fromwire_gossip_new_blockheight(const void *p UNNEEDED, u32 *blockheight UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_new_blockheight called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_new_peer */
-bool fromwire_gossip_new_peer(const void *p UNNEEDED, struct node_id *id UNNEEDED, bool *gossip_queries_feature UNNEEDED, bool *initial_routing_sync UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_new_peer called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_outpoint_spent */
-bool fromwire_gossip_outpoint_spent(const void *p UNNEEDED, struct short_channel_id *short_channel_id UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_outpoint_spent called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_payment_failure */
-bool fromwire_gossip_payment_failure(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct node_id *erring_node UNNEEDED, struct short_channel_id *erring_channel UNNEEDED, u8 *erring_channel_direction UNNEEDED, u8 **error UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_payment_failure called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_ping */
-bool fromwire_gossip_ping(const void *p UNNEEDED, struct node_id *id UNNEEDED, u16 *num_pong_bytes UNNEEDED, u16 *len UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_ping called!\n"); abort(); }
 /* Generated stub for fromwire_gossip_query_channel_range */
 bool fromwire_gossip_query_channel_range(const void *p UNNEEDED, struct node_id *id UNNEEDED, u32 *first_blocknum UNNEEDED, u32 *number_of_blocks UNNEEDED)
 { fprintf(stderr, "fromwire_gossip_query_channel_range called!\n"); abort(); }
 /* Generated stub for fromwire_gossip_query_scids */
 bool fromwire_gossip_query_scids(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct node_id *id UNNEEDED, struct short_channel_id **ids UNNEEDED)
 { fprintf(stderr, "fromwire_gossip_query_scids called!\n"); abort(); }
-/* Generated stub for fromwire_gossip_send_timestamp_filter */
-bool fromwire_gossip_send_timestamp_filter(const void *p UNNEEDED, struct node_id *id UNNEEDED, u32 *first_timestamp UNNEEDED, u32 *timestamp_range UNNEEDED)
-{ fprintf(stderr, "fromwire_gossip_send_timestamp_filter called!\n"); abort(); }
-/* Generated stub for fromwire_incorrect_cltv_expiry */
-bool fromwire_incorrect_cltv_expiry(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, u32 *cltv_expiry UNNEEDED, u8 **channel_update UNNEEDED)
-{ fprintf(stderr, "fromwire_incorrect_cltv_expiry called!\n"); abort(); }
-/* Generated stub for fromwire_temporary_channel_failure */
-bool fromwire_temporary_channel_failure(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, u8 **channel_update UNNEEDED)
-{ fprintf(stderr, "fromwire_temporary_channel_failure called!\n"); abort(); }
 /* Generated stub for get_cupdate_parts */
 void get_cupdate_parts(const u8 *channel_update UNNEEDED,
 		       const u8 *parts[2] UNNEEDED,
@@ -147,186 +51,26 @@ void get_cupdate_parts(const u8 *channel_update UNNEEDED,
 struct node *get_node(struct routing_state *rstate UNNEEDED,
 		      const struct node_id *id UNNEEDED)
 { fprintf(stderr, "get_node called!\n"); abort(); }
-/* Generated stub for get_route */
-struct route_hop *get_route(const tal_t *ctx UNNEEDED, struct routing_state *rstate UNNEEDED,
-			    const struct node_id *source UNNEEDED,
-			    const struct node_id *destination UNNEEDED,
-			    const struct amount_msat msat UNNEEDED, double riskfactor UNNEEDED,
-			    u32 final_cltv UNNEEDED,
-			    double fuzz UNNEEDED,
-			    u64 seed UNNEEDED,
-			    struct exclude_entry **excluded UNNEEDED,
-			    u32 max_hops UNNEEDED)
-{ fprintf(stderr, "get_route called!\n"); abort(); }
-/* Generated stub for gossip_peerd_wire_type_name */
-const char *gossip_peerd_wire_type_name(int e UNNEEDED)
-{ fprintf(stderr, "gossip_peerd_wire_type_name called!\n"); abort(); }
-/* Generated stub for gossip_store_compact */
-bool gossip_store_compact(struct gossip_store *gs UNNEEDED)
-{ fprintf(stderr, "gossip_store_compact called!\n"); abort(); }
 /* Generated stub for gossip_store_get */
 const u8 *gossip_store_get(const tal_t *ctx UNNEEDED,
 			   struct gossip_store *gs UNNEEDED,
 			   u64 offset UNNEEDED)
 { fprintf(stderr, "gossip_store_get called!\n"); abort(); }
-/* Generated stub for gossip_store_load */
-bool gossip_store_load(struct routing_state *rstate UNNEEDED, struct gossip_store *gs UNNEEDED)
-{ fprintf(stderr, "gossip_store_load called!\n"); abort(); }
-/* Generated stub for gossip_store_readonly_fd */
-int gossip_store_readonly_fd(struct gossip_store *gs UNNEEDED)
-{ fprintf(stderr, "gossip_store_readonly_fd called!\n"); abort(); }
-/* Generated stub for gossip_time_now */
-struct timeabs gossip_time_now(const struct routing_state *rstate UNNEEDED)
-{ fprintf(stderr, "gossip_time_now called!\n"); abort(); }
-/* Generated stub for got_pong */
-const char *got_pong(const u8 *pong UNNEEDED, size_t *num_pings_outstanding UNNEEDED)
-{ fprintf(stderr, "got_pong called!\n"); abort(); }
-/* Generated stub for handle_channel_announcement */
-u8 *handle_channel_announcement(struct routing_state *rstate UNNEEDED,
-				const u8 *announce TAKES UNNEEDED,
-				u32 current_blockheight UNNEEDED,
-				const struct short_channel_id **scid UNNEEDED)
-{ fprintf(stderr, "handle_channel_announcement called!\n"); abort(); }
-/* Generated stub for handle_channel_update */
-u8 *handle_channel_update(struct routing_state *rstate UNNEEDED, const u8 *update TAKES UNNEEDED,
-			  const char *source UNNEEDED,
-			  struct short_channel_id *unknown_scid UNNEEDED)
-{ fprintf(stderr, "handle_channel_update called!\n"); abort(); }
-/* Generated stub for handle_local_add_channel */
-bool handle_local_add_channel(struct routing_state *rstate UNNEEDED, const u8 *msg UNNEEDED,
-			      u64 index UNNEEDED)
-{ fprintf(stderr, "handle_local_add_channel called!\n"); abort(); }
-/* Generated stub for handle_local_channel_update */
-bool handle_local_channel_update(struct daemon *daemon UNNEEDED,
-				 const struct node_id *src UNNEEDED,
-				 const u8 *msg UNNEEDED)
-{ fprintf(stderr, "handle_local_channel_update called!\n"); abort(); }
-/* Generated stub for handle_node_announcement */
-u8 *handle_node_announcement(struct routing_state *rstate UNNEEDED, const u8 *node UNNEEDED)
-{ fprintf(stderr, "handle_node_announcement called!\n"); abort(); }
-/* Generated stub for handle_pending_cannouncement */
-bool handle_pending_cannouncement(struct routing_state *rstate UNNEEDED,
-				  const struct short_channel_id *scid UNNEEDED,
-				  const struct amount_sat sat UNNEEDED,
-				  const u8 *txscript UNNEEDED)
-{ fprintf(stderr, "handle_pending_cannouncement called!\n"); abort(); }
-/* Generated stub for make_ping */
-u8 *make_ping(const tal_t *ctx UNNEEDED, u16 num_pong_bytes UNNEEDED, u16 padlen UNNEEDED)
-{ fprintf(stderr, "make_ping called!\n"); abort(); }
 /* Generated stub for master_badmsg */
 void master_badmsg(u32 type_expected UNNEEDED, const u8 *msg)
 { fprintf(stderr, "master_badmsg called!\n"); abort(); }
-/* Generated stub for maybe_send_own_node_announce */
-void maybe_send_own_node_announce(struct daemon *daemon UNNEEDED)
-{ fprintf(stderr, "maybe_send_own_node_announce called!\n"); abort(); }
-/* Generated stub for memleak_enter_allocations */
-struct htable *memleak_enter_allocations(const tal_t *ctx UNNEEDED,
-					 const void *exclude1 UNNEEDED,
-					 const void *exclude2 UNNEEDED)
-{ fprintf(stderr, "memleak_enter_allocations called!\n"); abort(); }
-/* Generated stub for memleak_remove_referenced */
-void memleak_remove_referenced(struct htable *memtable UNNEEDED, const void *root UNNEEDED)
-{ fprintf(stderr, "memleak_remove_referenced called!\n"); abort(); }
-/* Generated stub for new_reltimer_ */
-struct oneshot *new_reltimer_(struct timers *timers UNNEEDED,
-			      const tal_t *ctx UNNEEDED,
-			      struct timerel expire UNNEEDED,
-			      void (*cb)(void *) UNNEEDED, void *arg UNNEEDED)
-{ fprintf(stderr, "new_reltimer_ called!\n"); abort(); }
-/* Generated stub for new_routing_state */
-struct routing_state *new_routing_state(const tal_t *ctx UNNEEDED,
-					const struct chainparams *chainparams UNNEEDED,
-					const struct node_id *local_id UNNEEDED,
-					struct list_head *peers UNNEEDED,
-					const u32 *dev_gossip_time TAKES UNNEEDED,
-					bool dev_fast_gossip UNNEEDED)
-{ fprintf(stderr, "new_routing_state called!\n"); abort(); }
-/* Generated stub for next_chan */
-struct chan *next_chan(const struct node *node UNNEEDED, struct chan_map_iter *i UNNEEDED)
-{ fprintf(stderr, "next_chan called!\n"); abort(); }
-/* Generated stub for notleak_ */
-void *notleak_(const void *ptr UNNEEDED, bool plus_children UNNEEDED)
-{ fprintf(stderr, "notleak_ called!\n"); abort(); }
-/* Generated stub for read_addresses */
-struct wireaddr *read_addresses(const tal_t *ctx UNNEEDED, const u8 *ser UNNEEDED)
-{ fprintf(stderr, "read_addresses called!\n"); abort(); }
-/* Generated stub for refresh_local_channel */
-void refresh_local_channel(struct daemon *daemon UNNEEDED,
-			   struct local_chan *local_chan UNNEEDED,
-			   bool even_if_identical UNNEEDED)
-{ fprintf(stderr, "refresh_local_channel called!\n"); abort(); }
-/* Generated stub for remove_channel_from_store */
-void remove_channel_from_store(struct routing_state *rstate UNNEEDED,
-			       struct chan *chan UNNEEDED)
-{ fprintf(stderr, "remove_channel_from_store called!\n"); abort(); }
-/* Generated stub for route_prune */
-void route_prune(struct routing_state *rstate UNNEEDED)
-{ fprintf(stderr, "route_prune called!\n"); abort(); }
-/* Generated stub for routing_failure */
-void routing_failure(struct routing_state *rstate UNNEEDED,
-		     const struct node_id *erring_node UNNEEDED,
-		     const struct short_channel_id *erring_channel UNNEEDED,
-		     int erring_direction UNNEEDED,
-		     enum onion_type failcode UNNEEDED,
-		     const u8 *channel_update UNNEEDED)
-{ fprintf(stderr, "routing_failure called!\n"); abort(); }
-/* Generated stub for status_failed */
-void status_failed(enum status_failreason code UNNEEDED,
-		   const char *fmt UNNEEDED, ...)
-{ fprintf(stderr, "status_failed called!\n"); abort(); }
-/* Generated stub for status_setup_async */
-void status_setup_async(struct daemon_conn *master UNNEEDED)
-{ fprintf(stderr, "status_setup_async called!\n"); abort(); }
-/* Generated stub for subdaemon_setup */
-void subdaemon_setup(int argc UNNEEDED, char *argv[])
-{ fprintf(stderr, "subdaemon_setup called!\n"); abort(); }
-/* Generated stub for timer_expired */
-void timer_expired(tal_t *ctx UNNEEDED, struct timer *timer UNNEEDED)
-{ fprintf(stderr, "timer_expired called!\n"); abort(); }
+/* Generated stub for queue_peer_from_store */
+void queue_peer_from_store(struct peer *peer UNNEEDED,
+			   const struct broadcastable *bcast UNNEEDED)
+{ fprintf(stderr, "queue_peer_from_store called!\n"); abort(); }
+/* Generated stub for queue_peer_msg */
+void queue_peer_msg(struct peer *peer UNNEEDED, const u8 *msg TAKES UNNEEDED)
+{ fprintf(stderr, "queue_peer_msg called!\n"); abort(); }
 /* Generated stub for towire_errorfmt */
 u8 *towire_errorfmt(const tal_t *ctx UNNEEDED,
 		    const struct channel_id *channel UNNEEDED,
 		    const char *fmt UNNEEDED, ...)
 { fprintf(stderr, "towire_errorfmt called!\n"); abort(); }
-/* Generated stub for towire_gossip_dev_compact_store_reply */
-u8 *towire_gossip_dev_compact_store_reply(const tal_t *ctx UNNEEDED, bool success UNNEEDED)
-{ fprintf(stderr, "towire_gossip_dev_compact_store_reply called!\n"); abort(); }
-/* Generated stub for towire_gossip_dev_memleak_reply */
-u8 *towire_gossip_dev_memleak_reply(const tal_t *ctx UNNEEDED, bool leak UNNEEDED)
-{ fprintf(stderr, "towire_gossip_dev_memleak_reply called!\n"); abort(); }
-/* Generated stub for towire_gossipd_get_update_reply */
-u8 *towire_gossipd_get_update_reply(const tal_t *ctx UNNEEDED, const u8 *update UNNEEDED)
-{ fprintf(stderr, "towire_gossipd_get_update_reply called!\n"); abort(); }
-/* Generated stub for towire_gossipd_new_store_fd */
-u8 *towire_gossipd_new_store_fd(const tal_t *ctx UNNEEDED, u64 offset_shorter UNNEEDED)
-{ fprintf(stderr, "towire_gossipd_new_store_fd called!\n"); abort(); }
-/* Generated stub for towire_gossip_get_addrs_reply */
-u8 *towire_gossip_get_addrs_reply(const tal_t *ctx UNNEEDED, const struct wireaddr *addrs UNNEEDED)
-{ fprintf(stderr, "towire_gossip_get_addrs_reply called!\n"); abort(); }
-/* Generated stub for towire_gossip_get_channel_peer_reply */
-u8 *towire_gossip_get_channel_peer_reply(const tal_t *ctx UNNEEDED, const struct node_id *peer_id UNNEEDED)
-{ fprintf(stderr, "towire_gossip_get_channel_peer_reply called!\n"); abort(); }
-/* Generated stub for towire_gossip_getchannels_reply */
-u8 *towire_gossip_getchannels_reply(const tal_t *ctx UNNEEDED, bool complete UNNEEDED, const struct gossip_getchannels_entry **nodes UNNEEDED)
-{ fprintf(stderr, "towire_gossip_getchannels_reply called!\n"); abort(); }
-/* Generated stub for towire_gossip_get_incoming_channels_reply */
-u8 *towire_gossip_get_incoming_channels_reply(const tal_t *ctx UNNEEDED, const struct route_info *route_info UNNEEDED)
-{ fprintf(stderr, "towire_gossip_get_incoming_channels_reply called!\n"); abort(); }
-/* Generated stub for towire_gossip_getnodes_reply */
-u8 *towire_gossip_getnodes_reply(const tal_t *ctx UNNEEDED, const struct gossip_getnodes_entry **nodes UNNEEDED)
-{ fprintf(stderr, "towire_gossip_getnodes_reply called!\n"); abort(); }
-/* Generated stub for towire_gossip_getroute_reply */
-u8 *towire_gossip_getroute_reply(const tal_t *ctx UNNEEDED, const struct route_hop *hops UNNEEDED)
-{ fprintf(stderr, "towire_gossip_getroute_reply called!\n"); abort(); }
-/* Generated stub for towire_gossip_get_txout */
-u8 *towire_gossip_get_txout(const tal_t *ctx UNNEEDED, const struct short_channel_id *short_channel_id UNNEEDED)
-{ fprintf(stderr, "towire_gossip_get_txout called!\n"); abort(); }
-/* Generated stub for towire_gossip_new_peer_reply */
-u8 *towire_gossip_new_peer_reply(const tal_t *ctx UNNEEDED, bool success UNNEEDED, const struct gossip_state *gs UNNEEDED)
-{ fprintf(stderr, "towire_gossip_new_peer_reply called!\n"); abort(); }
-/* Generated stub for towire_gossip_ping_reply */
-u8 *towire_gossip_ping_reply(const tal_t *ctx UNNEEDED, const struct node_id *id UNNEEDED, bool sent UNNEEDED, u16 totlen UNNEEDED)
-{ fprintf(stderr, "towire_gossip_ping_reply called!\n"); abort(); }
 /* Generated stub for towire_gossip_query_channel_range_reply */
 u8 *towire_gossip_query_channel_range_reply(const tal_t *ctx UNNEEDED, u32 final_first_block UNNEEDED, u32 final_num_blocks UNNEEDED, bool final_complete UNNEEDED, const struct short_channel_id *scids UNNEEDED)
 { fprintf(stderr, "towire_gossip_query_channel_range_reply called!\n"); abort(); }
@@ -335,15 +79,6 @@ u8 *towire_gossip_scids_reply(const tal_t *ctx UNNEEDED, bool ok UNNEEDED, bool
 { fprintf(stderr, "towire_gossip_scids_reply called!\n"); abort(); }
 /* AUTOGENERATED MOCKS END */
 
-#if DEVELOPER
-/* Generated stub for memleak_remove_htable */
-void memleak_remove_htable(struct htable *memtable UNNEEDED, const struct htable *ht UNNEEDED)
-{ fprintf(stderr, "memleak_remove_htable called!\n"); abort(); }
-/* Generated stub for memleak_remove_intmap_ */
-void memleak_remove_intmap_(struct htable *memtable UNNEEDED, const struct intmap *m UNNEEDED)
-{ fprintf(stderr, "memleak_remove_intmap_ called!\n"); abort(); }
-#endif
-
 /* Generated stub for status_fmt */
 void status_fmt(enum log_level level UNNEEDED, const char *fmt UNNEEDED, ...)
 {