Browse Source

developer: consolidiate gossip timing options into one --dev-fast-gossip.

It's generally clearer to have simple hardcoded numbers with an
#if DEVELOPER around it, than apparent variables which aren't, really.

Interestingly, our pruning test was always kinda broken: we have to pass
two cycles, since l2 will refresh the channel once to avoid pruning.

Do the more obvious thing, and cut the network in half and check that
l1 and l3 time out.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
travis-debug
Rusty Russell 5 years ago
parent
commit
147eaced2e
  1. 2
      channeld/channel_wire.csv
  2. 9
      channeld/channeld.c
  3. 1
      closingd/closing_wire.csv
  4. 3
      closingd/closingd.c
  5. 14
      common/per_peer_state.c
  6. 8
      common/per_peer_state.h
  7. 4
      connectd/connectd.c
  8. 37
      gossipd/gossip_constants.h
  9. 3
      gossipd/gossip_wire.csv
  10. 32
      gossipd/gossipd.c
  11. 3
      gossipd/gossipd.h
  12. 8
      gossipd/make_gossip.c
  13. 14
      gossipd/routing.c
  14. 10
      gossipd/routing.h
  15. 6
      gossipd/test/run-crc32_of_update.c
  16. 6
      gossipd/test/run-extended-info.c
  17. 6
      lightningd/channel_control.c
  18. 8
      lightningd/closing_control.c
  19. 7
      lightningd/gossip_control.c
  20. 1
      lightningd/lightningd.c
  21. 9
      lightningd/lightningd.h
  22. 3
      lightningd/opening_control.c
  23. 30
      lightningd/options.c
  24. 6
      lightningd/peer_control.c
  25. 1
      openingd/opening_wire.csv
  26. 3
      openingd/openingd.c
  27. 2
      tests/test_closing.py
  28. 2
      tests/test_connection.py
  29. 31
      tests/test_gossip.py
  30. 4
      tests/test_invoices.py
  31. 2
      tests/test_misc.py
  32. 10
      tests/test_pay.py
  33. 2
      tests/utils.py

2
channeld/channel_wire.csv

@ -68,8 +68,8 @@ msgdata,channel_init,upfront_shutdown_script_len,u16,
msgdata,channel_init,upfront_shutdown_script,u8,upfront_shutdown_script_len
msgdata,channel_init,remote_ann_node_sig,?secp256k1_ecdsa_signature,
msgdata,channel_init,remote_ann_bitcoin_sig,?secp256k1_ecdsa_signature,
msgdata,channel_init,announce_delay,u32,
msgdata,channel_init,option_static_remotekey,bool,
msgdata,channel_init,dev_fast_gossip,bool,
# master->channeld funding hit new depth(funding locked if >= lock depth)
msgtype,channel_funding_depth,1002

Can't render this file because it has a wrong number of fields in line 7.

9
channeld/channeld.c

@ -110,9 +110,6 @@ struct peer {
u64 commit_timer_attempts;
u32 commit_msec;
/* How long to delay before broadcasting announcement? */
u32 announce_delay;
/* Are we expecting a pong? */
bool expecting_pong;
@ -505,7 +502,7 @@ static void channel_announcement_negotiate(struct peer *peer)
/* Give other nodes time to notice new block. */
notleak(new_reltimer(&peer->timers, peer,
time_from_sec(peer->announce_delay),
time_from_sec(GOSSIP_ANNOUNCE_DELAY(dev_fast_gossip)),
announce_channel, peer));
}
}
@ -3000,8 +2997,8 @@ static void init_channel(struct peer *peer)
&peer->remote_upfront_shutdown_script,
&remote_ann_node_sig,
&remote_ann_bitcoin_sig,
&peer->announce_delay,
&option_static_remotekey)) {
&option_static_remotekey,
&dev_fast_gossip)) {
master_badmsg(WIRE_CHANNEL_INIT, msg);
}
/* stdin == requests, 3 == peer, 4 = gossip, 5 = gossip_store, 6 = HSM */

1
closingd/closing_wire.csv

@ -30,6 +30,7 @@ msgdata,closing_init,channel_reestablish,u8,channel_reestablish_len
msgdata,closing_init,final_scriptpubkey_len,u16,
msgdata,closing_init,final_scriptpubkey,u8,final_scriptpubkey_len
msgdata,closing_init,last_remote_secret,secret,
msgdata,closing_init,dev_fast_gossip,bool,
# We received an offer, save signature.
msgtype,closing_received_signature,2002

Can't render this file because it has a wrong number of fields in line 4.

3
closingd/closingd.c

@ -605,7 +605,8 @@ int main(int argc, char *argv[])
&revocations_received,
&channel_reestablish,
&final_scriptpubkey,
&last_remote_per_commit_secret))
&last_remote_per_commit_secret,
&dev_fast_gossip))
master_badmsg(WIRE_CLOSING_INIT, msg);
/* stdin == requests, 3 == peer, 4 = gossip, 5 = gossip_store, 6 = hsmd */

14
common/per_peer_state.c

@ -2,9 +2,12 @@
#include <ccan/fdpass/fdpass.h>
#include <common/gossip_rcvd_filter.h>
#include <common/per_peer_state.h>
#include <gossipd/gossip_constants.h>
#include <unistd.h>
#include <wire/wire.h>
bool dev_fast_gossip = false;
static void destroy_per_peer_state(struct per_peer_state *pps)
{
if (pps->peer_fd != -1)
@ -66,9 +69,6 @@ void fromwire_gossip_state(const u8 **cursor, size_t *max,
void towire_per_peer_state(u8 **pptr, const struct per_peer_state *pps)
{
towire_crypto_state(pptr, &pps->cs);
#if DEVELOPER
towire_u32(pptr, pps->dev_gossip_broadcast_msec);
#endif
towire_bool(pptr, pps->gs != NULL);
if (pps->gs)
towire_gossip_state(pptr, pps->gs);
@ -93,9 +93,6 @@ struct per_peer_state *fromwire_per_peer_state(const tal_t *ctx,
fromwire_crypto_state(cursor, max, &cs);
pps = new_per_peer_state(ctx, &cs);
#if DEVELOPER
pps->dev_gossip_broadcast_msec = fromwire_u32(cursor, max);
#endif
if (fromwire_bool(cursor, max)) {
pps->gs = tal(pps, struct gossip_state);
fromwire_gossip_state(cursor, max, pps->gs);
@ -135,11 +132,8 @@ bool time_to_next_gossip(const struct per_peer_state *pps,
*/
void per_peer_state_reset_gossip_timer(struct per_peer_state *pps)
{
struct timerel t = time_from_sec(60);
struct timerel t = time_from_sec(GOSSIP_FLUSH_INTERVAL(dev_fast_gossip));
#if DEVELOPER
t = time_from_msec(pps->dev_gossip_broadcast_msec);
#endif
pps->gs->next_gossip = timemono_add(time_mono(), t);
gossip_rcvd_filter_age(pps->grf);
}

8
common/per_peer_state.h

@ -22,10 +22,6 @@ struct per_peer_state {
struct gossip_state *gs;
/* Cache of msgs we have received, to avoid re-xmitting from store */
struct gossip_rcvd_filter *grf;
#if DEVELOPER
/* Normally 60000, but adjustable for dev mode */
u32 dev_gossip_broadcast_msec;
#endif /* DEVELOPER */
/* If not -1, closed on freeing */
int peer_fd, gossip_fd, gossip_store_fd;
};
@ -60,4 +56,8 @@ bool time_to_next_gossip(const struct per_peer_state *pps,
/* Reset pps->next_gossip now we've drained gossip_store */
void per_peer_state_reset_gossip_timer(struct per_peer_state *pps);
/* Used to speed up gossip iff DEVELOPER*/
extern bool dev_fast_gossip;
#endif /* LIGHTNING_COMMON_PER_PEER_STATE_H */

4
connectd/connectd.c

@ -439,10 +439,6 @@ struct io_plan *peer_connected(struct io_conn *conn,
/* This contains the per-peer state info; gossipd fills in pps->gs */
pps = new_per_peer_state(tmpctx, cs);
#if DEVELOPER
/* Overridden by lightningd, but initialize to keep valgrind happy */
pps->dev_gossip_broadcast_msec = 0;
#endif
/* If gossipd can't give us a file descriptor, we give up connecting. */
if (!get_gossipfds(daemon, id, localfeatures, pps))

37
gossipd/gossip_constants.h

@ -1,5 +1,6 @@
#ifndef LIGHTNING_GOSSIPD_GOSSIP_CONSTANTS_H
#define LIGHTNING_GOSSIPD_GOSSIP_CONSTANTS_H
#include <common/utils.h>
/* BOLT #4:
*
@ -40,4 +41,40 @@
*/
#define ANNOUNCE_MIN_DEPTH 6
/* Gossip timing constants. These can be overridden in --enable-developer
* configurations with --dev-fast-gossip, otherwise the argument is ignored */
#define DEV_FAST_GOSSIP(dev_fast_gossip_flag, fast, normal) \
IFDEV((dev_fast_gossip_flag) ? (fast) : (normal), (normal))
/* How close we can generate gossip msgs (5 minutes) */
#define GOSSIP_MIN_INTERVAL(dev_fast_gossip_flag) \
DEV_FAST_GOSSIP(dev_fast_gossip_flag, 5, 300)
/* BOLT #7:
*
* - SHOULD flush outgoing gossip messages once every 60 seconds,
* independently of the arrival times of the messages.
*/
#define GOSSIP_FLUSH_INTERVAL(dev_fast_gossip_flag) \
DEV_FAST_GOSSIP(dev_fast_gossip_flag, 1, 60)
/* BOLT #7:
*
* A node:
* - if a channel's latest `channel_update`s `timestamp` is older than two weeks
* (1209600 seconds):
* - MAY prune the channel.
* - MAY ignore the channel.
*/
#define GOSSIP_PRUNE_INTERVAL(dev_fast_gossip_flag) \
DEV_FAST_GOSSIP(dev_fast_gossip_flag, 90, 1209600)
/* How long after seeing lockin until we announce the channel. */
#define GOSSIP_ANNOUNCE_DELAY(dev_fast_gossip_flag) \
DEV_FAST_GOSSIP(dev_fast_gossip_flag, 1, 60)
/* How long before deadline should we send refresh update? 1 day normally */
#define GOSSIP_BEFORE_DEADLINE(dev_fast_gossip_flag) \
DEV_FAST_GOSSIP(dev_fast_gossip_flag, 30, 24*60*60)
#endif /* LIGHTNING_GOSSIPD_GOSSIP_CONSTANTS_H */

3
gossipd/gossip_wire.csv

@ -10,11 +10,10 @@ msgdata,gossipctl_init,gflen,u16,
msgdata,gossipctl_init,globalfeatures,u8,gflen
msgdata,gossipctl_init,rgb,u8,3
msgdata,gossipctl_init,alias,u8,32
msgdata,gossipctl_init,update_channel_interval,u32,
msgdata,gossipctl_init,gossip_min_interval,u32,
msgdata,gossipctl_init,num_announcable,u16,
msgdata,gossipctl_init,announcable,wireaddr,num_announcable
msgdata,gossipctl_init,dev_gossip_time,?u32,
msgdata,gossipctl_init,dev_fast_gossip,bool,
# In developer mode, we can mess with time.
msgtype,gossip_dev_set_time,3001

Can't render this file because it has a wrong number of fields in line 6.

32
gossipd/gossipd.c

@ -1832,16 +1832,13 @@ static void gossip_refresh_network(struct daemon *daemon)
s64 highwater;
struct node *n;
/* For DEVELOPER testing, this can be set really short; otherwise, we
* set it to 1 day before deadline. */
if (daemon->rstate->prune_timeout < 24*3600)
highwater = now - daemon->rstate->prune_timeout / 2;
else
highwater = now - (daemon->rstate->prune_timeout - 24*3600);
/* Send out 1 day before deadline */
highwater = now - (GOSSIP_PRUNE_INTERVAL(daemon->rstate->dev_fast_gossip)
- GOSSIP_BEFORE_DEADLINE(daemon->rstate->dev_fast_gossip));
/* Schedule next run now (prune_timeout is 2 weeks) */
/* Schedule next run now */
notleak(new_reltimer(&daemon->timers, daemon,
time_from_sec(daemon->rstate->prune_timeout/4),
time_from_sec(GOSSIP_PRUNE_INTERVAL(daemon->rstate->dev_fast_gossip)/4),
gossip_refresh_network, daemon));
/* Find myself in the network */
@ -1980,32 +1977,25 @@ static struct io_plan *gossip_init(struct io_conn *conn,
struct daemon *daemon,
const u8 *msg)
{
u32 update_channel_interval;
u32 *dev_gossip_time;
bool dev_fast_gossip;
if (!fromwire_gossipctl_init(daemon, msg,
&daemon->chain_hash,
&daemon->id, &daemon->globalfeatures,
daemon->rgb,
daemon->alias,
/* 1 week in seconds
* (unless --dev-channel-update-interval) */
&update_channel_interval,
/* 5 minutes, or
* --dev-broadcast-interval * 5 seconds */
&daemon->gossip_min_interval,
&daemon->announcable,
&dev_gossip_time)) {
&dev_gossip_time, &dev_fast_gossip)) {
master_badmsg(WIRE_GOSSIPCTL_INIT, msg);
}
/* Prune time (usually 2 weeks) is twice update time */
daemon->rstate = new_routing_state(daemon,
chainparams_by_chainhash(&daemon->chain_hash),
&daemon->id,
update_channel_interval * 2,
&daemon->peers,
take(dev_gossip_time));
take(dev_gossip_time),
dev_fast_gossip);
/* Load stored gossip messages */
if (!gossip_store_load(daemon->rstate, daemon->rstate->gs))
@ -2018,9 +2008,9 @@ static struct io_plan *gossip_init(struct io_conn *conn,
* or addresses might have changed!) */
maybe_send_own_node_announce(daemon);
/* Start the weekly refresh timer. */
/* Start the twice- weekly refresh timer. */
notleak(new_reltimer(&daemon->timers, daemon,
time_from_sec(daemon->rstate->prune_timeout/4),
time_from_sec(GOSSIP_PRUNE_INTERVAL(daemon->rstate->dev_fast_gossip) / 4),
gossip_refresh_network, daemon));
return daemon_conn_read_next(conn, daemon->master);

3
gossipd/gossipd.h

@ -33,9 +33,6 @@ struct daemon {
/* Timers: we batch gossip, and also refresh announcements */
struct timers timers;
/* Minimum interval for generating updated gossip */
u32 gossip_min_interval;
/* Global features to list in node_announcement. */
u8 *globalfeatures;

8
gossipd/make_gossip.c

@ -170,7 +170,8 @@ static void update_own_node_announcement(struct daemon *daemon)
* previous `node_announcement` it has previously created.
*/
/* We do better: never send them within more than 5 minutes. */
next = self->bcast.timestamp + daemon->gossip_min_interval;
next = self->bcast.timestamp
+ GOSSIP_MIN_INTERVAL(daemon->rstate->dev_fast_gossip);
if (timestamp < next) {
status_debug("node_announcement: delaying %u secs",
@ -260,7 +261,7 @@ static void update_local_channel(struct local_cupdate *lc /* frees! */)
/* Create an unsigned channel_update: we backdate enables, so
* we can always send a disable in an emergency. */
if (!lc->disable)
timestamp -= daemon->gossip_min_interval;
timestamp -= GOSSIP_MIN_INTERVAL(daemon->rstate->dev_fast_gossip);
/* BOLT #7:
*
@ -321,7 +322,8 @@ static void update_local_channel(struct local_cupdate *lc /* frees! */)
}
/* Is it too soon to send another update? */
next = hc->bcast.timestamp + daemon->gossip_min_interval;
next = hc->bcast.timestamp
+ GOSSIP_MIN_INTERVAL(daemon->rstate->dev_fast_gossip);
if (timestamp < next) {
status_debug("channel_update %s/%u: delaying %u secs",

14
gossipd/routing.c

@ -205,7 +205,7 @@ static bool timestamp_reasonable(struct routing_state *rstate, u32 timestamp)
if (timestamp > now + 24*60*60)
return false;
/* More than 2 weeks behind? */
if (timestamp < now - rstate->prune_timeout)
if (timestamp < now - GOSSIP_PRUNE_INTERVAL(rstate->dev_fast_gossip))
return false;
return true;
}
@ -234,16 +234,15 @@ static void memleak_help_routing_tables(struct htable *memtable,
struct routing_state *new_routing_state(const tal_t *ctx,
const struct chainparams *chainparams,
const struct node_id *local_id,
u32 prune_timeout,
struct list_head *peers,
const u32 *dev_gossip_time TAKES)
const u32 *dev_gossip_time TAKES,
bool dev_fast_gossip)
{
struct routing_state *rstate = tal(ctx, struct routing_state);
rstate->nodes = new_node_map(rstate);
rstate->gs = gossip_store_new(rstate, peers);
rstate->chainparams = chainparams;
rstate->local_id = *local_id;
rstate->prune_timeout = prune_timeout;
rstate->local_channel_announced = false;
pending_cannouncement_map_init(&rstate->pending_cannouncements);
@ -263,6 +262,7 @@ struct routing_state *new_routing_state(const tal_t *ctx,
rstate->gossip_time->ts.tv_nsec = 0;
} else
rstate->gossip_time = NULL;
rstate->dev_fast_gossip = dev_fast_gossip;
#endif
tal_add_destructor(rstate, destroy_routing_state);
memleak_add_helper(rstate, memleak_help_routing_tables);
@ -2000,7 +2000,7 @@ bool routing_add_channel_update(struct routing_state *rstate,
}
/* Allow redundant updates once every 7 days */
if (timestamp < hc->bcast.timestamp + rstate->prune_timeout / 2
if (timestamp < hc->bcast.timestamp + GOSSIP_PRUNE_INTERVAL(rstate->dev_fast_gossip) / 2
&& !cupdate_different(rstate->gs, hc, update)) {
status_debug("Ignoring redundant update for %s/%u"
" (last %u, now %u)",
@ -2351,7 +2351,7 @@ bool routing_add_node_announcement(struct routing_state *rstate,
}
/* Allow redundant updates once every 7 days */
if (timestamp < node->bcast.timestamp + rstate->prune_timeout / 2
if (timestamp < node->bcast.timestamp + GOSSIP_PRUNE_INTERVAL(rstate->dev_fast_gossip) / 2
&& !nannounce_different(rstate->gs, node, msg)) {
status_debug("Ignoring redundant nannounce for %s"
" (last %u, now %u)",
@ -2698,7 +2698,7 @@ void route_prune(struct routing_state *rstate)
{
u64 now = gossip_time_now(rstate).ts.tv_sec;
/* Anything below this highwater mark ought to be pruned */
const s64 highwater = now - rstate->prune_timeout;
const s64 highwater = now - GOSSIP_PRUNE_INTERVAL(rstate->dev_fast_gossip);
struct chan **pruned = tal_arr(tmpctx, struct chan *, 0);
u64 idx;

10
gossipd/routing.h

@ -263,9 +263,6 @@ struct routing_state {
/* Our own ID so we can identify local channels */
struct node_id local_id;
/* How old does a channel have to be before we prune it? */
u32 prune_timeout;
/* A map of channels indexed by short_channel_ids */
UINTMAP(struct chan *) chanmap;
@ -286,6 +283,9 @@ struct routing_state {
#if DEVELOPER
/* Override local time for gossip messages */
struct timeabs *gossip_time;
/* Speed up gossip. */
bool dev_fast_gossip;
#endif
};
@ -320,9 +320,9 @@ struct exclude_entry {
struct routing_state *new_routing_state(const tal_t *ctx,
const struct chainparams *chainparams,
const struct node_id *local_id,
u32 prune_timeout,
struct list_head *peers,
const u32 *dev_gossip_time TAKES);
const u32 *dev_gossip_time TAKES,
bool dev_fast_gossip);
/**
* Add a new bidirectional channel from id1 to id2 with the given

6
gossipd/test/run-crc32_of_update.c

@ -55,7 +55,7 @@ bool fromwire_expiry_too_soon(const tal_t *ctx UNNEEDED, const void *p UNNEEDED,
bool fromwire_fee_insufficient(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct amount_msat *htlc_msat UNNEEDED, u8 **channel_update UNNEEDED)
{ fprintf(stderr, "fromwire_fee_insufficient called!\n"); abort(); }
/* Generated stub for fromwire_gossipctl_init */
bool fromwire_gossipctl_init(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct bitcoin_blkid *chain_hash UNNEEDED, struct node_id *id UNNEEDED, u8 **globalfeatures UNNEEDED, u8 rgb[3] UNNEEDED, u8 alias[32] UNNEEDED, u32 *update_channel_interval UNNEEDED, u32 *gossip_min_interval UNNEEDED, struct wireaddr **announcable UNNEEDED, u32 **dev_gossip_time UNNEEDED)
bool fromwire_gossipctl_init(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct bitcoin_blkid *chain_hash UNNEEDED, struct node_id *id UNNEEDED, u8 **globalfeatures UNNEEDED, u8 rgb[3] UNNEEDED, u8 alias[32] UNNEEDED, struct wireaddr **announcable UNNEEDED, u32 **dev_gossip_time UNNEEDED, bool *dev_fast_gossip UNNEEDED)
{ fprintf(stderr, "fromwire_gossipctl_init called!\n"); abort(); }
/* Generated stub for fromwire_gossip_dev_set_max_scids_encode_size */
bool fromwire_gossip_dev_set_max_scids_encode_size(const void *p UNNEEDED, u32 *max UNNEEDED)
@ -214,9 +214,9 @@ struct oneshot *new_reltimer_(struct timers *timers UNNEEDED,
struct routing_state *new_routing_state(const tal_t *ctx UNNEEDED,
const struct chainparams *chainparams UNNEEDED,
const struct node_id *local_id UNNEEDED,
u32 prune_timeout UNNEEDED,
struct list_head *peers UNNEEDED,
const u32 *dev_gossip_time TAKES UNNEEDED)
const u32 *dev_gossip_time TAKES UNNEEDED,
bool dev_fast_gossip UNNEEDED)
{ fprintf(stderr, "new_routing_state called!\n"); abort(); }
/* Generated stub for next_chan */
struct chan *next_chan(const struct node *node UNNEEDED, struct chan_map_iter *i UNNEEDED)

6
gossipd/test/run-extended-info.c

@ -77,7 +77,7 @@ bool fromwire_expiry_too_soon(const tal_t *ctx UNNEEDED, const void *p UNNEEDED,
bool fromwire_fee_insufficient(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct amount_msat *htlc_msat UNNEEDED, u8 **channel_update UNNEEDED)
{ fprintf(stderr, "fromwire_fee_insufficient called!\n"); abort(); }
/* Generated stub for fromwire_gossipctl_init */
bool fromwire_gossipctl_init(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct bitcoin_blkid *chain_hash UNNEEDED, struct node_id *id UNNEEDED, u8 **globalfeatures UNNEEDED, u8 rgb[3] UNNEEDED, u8 alias[32] UNNEEDED, u32 *update_channel_interval UNNEEDED, u32 *gossip_min_interval UNNEEDED, struct wireaddr **announcable UNNEEDED, u32 **dev_gossip_time UNNEEDED)
bool fromwire_gossipctl_init(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct bitcoin_blkid *chain_hash UNNEEDED, struct node_id *id UNNEEDED, u8 **globalfeatures UNNEEDED, u8 rgb[3] UNNEEDED, u8 alias[32] UNNEEDED, struct wireaddr **announcable UNNEEDED, u32 **dev_gossip_time UNNEEDED, bool *dev_fast_gossip UNNEEDED)
{ fprintf(stderr, "fromwire_gossipctl_init called!\n"); abort(); }
/* Generated stub for fromwire_gossip_dev_set_max_scids_encode_size */
bool fromwire_gossip_dev_set_max_scids_encode_size(const void *p UNNEEDED, u32 *max UNNEEDED)
@ -240,9 +240,9 @@ struct oneshot *new_reltimer_(struct timers *timers UNNEEDED,
struct routing_state *new_routing_state(const tal_t *ctx UNNEEDED,
const struct chainparams *chainparams UNNEEDED,
const struct node_id *local_id UNNEEDED,
u32 prune_timeout UNNEEDED,
struct list_head *peers UNNEEDED,
const u32 *dev_gossip_time TAKES UNNEEDED)
const u32 *dev_gossip_time TAKES UNNEEDED,
bool dev_fast_gossip UNNEEDED)
{ fprintf(stderr, "new_routing_state called!\n"); abort(); }
/* Generated stub for next_chan */
struct chan *next_chan(const struct node *node UNNEEDED, struct chan_map_iter *i UNNEEDED)

6
lightningd/channel_control.c

@ -478,12 +478,10 @@ void peer_start_channeld(struct channel *channel,
channel->remote_upfront_shutdown_script,
remote_ann_node_sig,
remote_ann_bitcoin_sig,
/* Delay announce by 60 seconds after
* seeing block (adjustable if dev) */
ld->topology->poll_seconds * 2,
/* Set at channel open, even if not
* negotiated now! */
channel->option_static_remotekey);
channel->option_static_remotekey,
IFDEV(ld->dev_fast_gossip, false));
/* We don't expect a response: we are triggered by funding_depth_cb. */
subd_send_msg(channel->owner, take(initmsg));

8
lightningd/closing_control.c

@ -288,7 +288,13 @@ void peer_start_closingd(struct channel *channel,
channel_reestablish,
p2wpkh_for_keyidx(tmpctx, ld,
channel->final_key_idx),
&last_remote_per_commit_secret);
&last_remote_per_commit_secret,
#if DEVELOPER
ld->dev_fast_gossip
#else
false
#endif
);
/* We don't expect a response: it will give us feedback on
* signatures sent and received, then closing_complete. */

7
lightningd/gossip_control.c

@ -195,11 +195,10 @@ void gossip_init(struct lightningd *ld, int connectd_fd)
&get_chainparams(ld)->genesis_blockhash, &ld->id,
get_offered_globalfeatures(tmpctx),
ld->rgb,
ld->alias, ld->config.channel_update_interval,
/* gossip_min_interval: 5x the broadcast interval */
ld->config.broadcast_interval_msec / 200,
ld->alias,
ld->announcable,
IFDEV(ld->dev_gossip_time ? &ld->dev_gossip_time: NULL, NULL));
IFDEV(ld->dev_gossip_time ? &ld->dev_gossip_time: NULL, NULL),
IFDEV(ld->dev_fast_gossip, false));
subd_send_msg(ld->gossip, msg);
}

1
lightningd/lightningd.c

@ -118,6 +118,7 @@ static struct lightningd *new_lightningd(const tal_t *ctx)
ld->dev_subdaemon_fail = false;
ld->dev_allow_localhost = false;
ld->dev_gossip_time = 0;
ld->dev_fast_gossip = false;
ld->dev_force_privkey = NULL;
ld->dev_force_bip32_seed = NULL;
ld->dev_force_channel_secrets = NULL;

9
lightningd/lightningd.h

@ -48,12 +48,6 @@ struct config {
/* How long between changing commit and sending COMMIT message. */
u32 commit_time_ms;
/* How often to broadcast gossip (msec) */
u32 broadcast_interval_msec;
/* Channel update interval */
u32 channel_update_interval;
/* Do we let the funder set any fee rate they want */
bool ignore_fee_limits;
@ -207,6 +201,9 @@ struct lightningd {
/* Timestamp to use for gossipd, iff non-zero */
u32 dev_gossip_time;
/* Speedup gossip propagation, for testing. */
bool dev_fast_gossip;
/* Things we've marked as not leaking. */
const void **notleaks;

3
lightningd/opening_control.c

@ -949,7 +949,8 @@ void peer_start_openingd(struct peer *peer,
peer->localfeatures,
local_feature_negotiated(peer->localfeatures,
LOCAL_STATIC_REMOTEKEY),
send_msg);
send_msg,
IFDEV(peer->ld->dev_fast_gossip, false));
subd_send_msg(uc->openingd, take(msg));
}

30
lightningd/options.c

@ -398,9 +398,6 @@ static void dev_register_opts(struct lightningd *ld)
"Disable automatic reconnect-attempts by this node, but accept incoming");
opt_register_noarg("--dev-fail-on-subdaemon-fail", opt_set_bool,
&ld->dev_subdaemon_fail, opt_hidden);
opt_register_arg("--dev-broadcast-interval=<ms>", opt_set_uintval,
opt_show_uintval, &ld->config.broadcast_interval_msec,
"Time between gossip broadcasts in milliseconds");
opt_register_arg("--dev-disconnect=<filename>", opt_subd_dev_disconnect,
NULL, ld, "File containing disconnection points");
opt_register_noarg("--dev-allow-localhost", opt_set_bool,
@ -417,10 +414,9 @@ static void dev_register_opts(struct lightningd *ld)
"fee fluctuations, large values may result in large "
"fees.");
opt_register_arg(
"--dev-channel-update-interval=<s>", opt_set_u32, opt_show_u32,
&ld->config.channel_update_interval,
"Time in seconds between channel updates for our own channels.");
opt_register_noarg("--dev-fast-gossip", opt_set_bool,
&ld->dev_fast_gossip,
"Make gossip broadcast 1 second, pruning 14 seconds");
opt_register_arg("--dev-gossip-time", opt_set_u32, opt_show_u32,
&ld->dev_gossip_time,
@ -473,16 +469,6 @@ static const struct config testnet_config = {
/* Take 0.001% */
.fee_per_satoshi = 10,
/* BOLT #7:
*
* - SHOULD flush outgoing gossip messages once every 60
* seconds, independently of the arrival times of the messages.
*/
.broadcast_interval_msec = 60000,
/* Send a keepalive update at least every week, prune every twice that */
.channel_update_interval = 1209600/2,
/* Testnet sucks */
.ignore_fee_limits = true,
@ -542,16 +528,6 @@ static const struct config mainnet_config = {
/* Take 0.001% */
.fee_per_satoshi = 10,
/* BOLT #7:
*
* - SHOULD flush outgoing gossip messages once every 60
* seconds, independently of the arrival times of the messages.
*/
.broadcast_interval_msec = 60000,
/* Send a keepalive update at least every week, prune every twice that */
.channel_update_interval = 1209600/2,
/* Mainnet should have more stable fees */
.ignore_fee_limits = false,

6
lightningd/peer_control.c

@ -937,12 +937,6 @@ void peer_connected(struct lightningd *ld, const u8 *msg,
fatal("Connectd gave bad CONNECT_PEER_CONNECTED message %s",
tal_hex(msg, msg));
#if DEVELOPER
/* Override broaedcast interval from our config */
hook_payload->pps->dev_gossip_broadcast_msec
= ld->config.broadcast_interval_msec;
#endif
per_peer_state_set_fds(hook_payload->pps,
peer_fd, gossip_fd, gossip_store_fd);

1
openingd/opening_wire.csv

@ -24,6 +24,7 @@ msgdata,opening_init,option_static_remotekey,bool,
# Optional msg to send.
msgdata,opening_init,len,u16,
msgdata,opening_init,msg,u8,len
msgdata,opening_init,dev_fast_gossip,bool,
# Openingd->master: they offered channel, should we continue?
msgtype,opening_got_offer,6005

Can't render this file because it has a wrong number of fields in line 6.

3
openingd/openingd.c

@ -1433,7 +1433,8 @@ int main(int argc, char *argv[])
&state->min_feerate, &state->max_feerate,
&state->localfeatures,
&state->option_static_remotekey,
&inner))
&inner,
&dev_fast_gossip))
master_badmsg(WIRE_OPENING_INIT, msg);
/* 3 == peer, 4 == gossipd, 5 = gossip_store, 6 = hsmd */

2
tests/test_closing.py

@ -29,7 +29,7 @@ def test_closing(node_factory, bitcoind):
# Only wait for the channels to activate with DEVELOPER=1,
# otherwise it's going to take too long because of the missing
# --dev-broadcast-interval
# --dev-fast-gossip
if DEVELOPER:
wait_for(lambda: len(l1.getactivechannels()) == 2)
wait_for(lambda: len(l2.getactivechannels()) == 2)

2
tests/test_connection.py

@ -1176,7 +1176,7 @@ def test_private_channel(node_factory):
assert not only_one(only_one(l4.rpc.listpeers(l3.info['id'])['peers'])['channels'])['private']
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for --dev-broadcast-interval")
@unittest.skipIf(not DEVELOPER, "Too slow without --dev-fast-gossip")
def test_channel_reenable(node_factory):
l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True}, fundchannel=True, wait_for_announce=True)

31
tests/test_gossip.py

@ -18,12 +18,11 @@ with open('config.vars') as configfile:
DEVELOPER = os.getenv("DEVELOPER", config['DEVELOPER']) == "1"
@unittest.skipIf(not DEVELOPER, "needs --dev-broadcast-interval, --dev-channelupdate-interval")
@unittest.skipIf(not DEVELOPER, "needs --dev-fast-gossip for fast pruning")
def test_gossip_pruning(node_factory, bitcoind):
""" Create channel and see it being updated in time before pruning
"""
opts = {'dev-channel-update-interval': 5}
l1, l2, l3 = node_factory.get_nodes(3, opts)
l1, l2, l3 = node_factory.get_nodes(3)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
@ -38,10 +37,10 @@ def test_gossip_pruning(node_factory, bitcoind):
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels()['channels']] == [True] * 4)
wait_for(lambda: [c['active'] for c in l3.rpc.listchannels()['channels']] == [True] * 4)
# All of them should send a keepalive message
# All of them should send a keepalive message (after ~60 seconds)
l1.daemon.wait_for_logs([
'Sending keepalive channel_update for {}'.format(scid1),
])
], timeout=90)
l2.daemon.wait_for_logs([
'Sending keepalive channel_update for {}'.format(scid1),
'Sending keepalive channel_update for {}'.format(scid2),
@ -50,19 +49,21 @@ def test_gossip_pruning(node_factory, bitcoind):
'Sending keepalive channel_update for {}'.format(scid2),
])
# Now kill l3, so that l2 and l1 can prune it from their view after 10 seconds
l3.stop()
# Now kill l2, so that l1 and l3 will prune from their view after 90 seconds
l2.stop()
l1.daemon.wait_for_log("Pruning channel {} from network view".format(scid2))
l2.daemon.wait_for_log("Pruning channel {} from network view".format(scid2))
# We check every 90/4 seconds, and takes 90 seconds since last update.
l1.daemon.wait_for_log("Pruning channel {} from network view".format(scid2),
timeout=120)
l3.daemon.wait_for_log("Pruning channel {} from network view".format(scid1))
assert scid2 not in [c['short_channel_id'] for c in l1.rpc.listchannels()['channels']]
assert scid2 not in [c['short_channel_id'] for c in l2.rpc.listchannels()['channels']]
assert scid1 not in [c['short_channel_id'] for c in l3.rpc.listchannels()['channels']]
assert l3.info['id'] not in [n['nodeid'] for n in l1.rpc.listnodes()['nodes']]
assert l3.info['id'] not in [n['nodeid'] for n in l2.rpc.listnodes()['nodes']]
assert l1.info['id'] not in [n['nodeid'] for n in l3.rpc.listnodes()['nodes']]
@unittest.skipIf(not DEVELOPER, "needs --dev-broadcast-interval, --dev-no-reconnect")
@unittest.skipIf(not DEVELOPER, "needs --dev-fast-gossip, --dev-no-reconnect")
def test_gossip_disable_channels(node_factory, bitcoind):
"""Simple test to check that channels get disabled correctly on disconnect and
reenabled upon reconnecting
@ -319,7 +320,7 @@ def test_gossip_jsonrpc(node_factory):
assert [c['public'] for c in l2.rpc.listchannels()['channels']] == [True, True]
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for --dev-broadcast-interval")
@unittest.skipIf(not DEVELOPER, "Too slow without --dev-fast-gossip")
def test_gossip_badsig(node_factory):
"""Make sure node announcement signatures are ok.
@ -510,7 +511,7 @@ def test_gossip_no_empty_announcements(node_factory, bitcoind):
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 2)
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for --dev-broadcast-interval")
@unittest.skipIf(not DEVELOPER, "Too slow without --dev-fast-gossip")
def test_routing_gossip(node_factory, bitcoind):
nodes = node_factory.get_nodes(5)
@ -1295,7 +1296,7 @@ def test_gossip_store_compact_noappend(node_factory, bitcoind):
assert not l2.daemon.is_in_log('gossip_store:.*truncate')
@unittest.skipIf(not DEVELOPER, "updates are delayed without --dev-broadcast-interval")
@unittest.skipIf(not DEVELOPER, "updates are delayed without --dev-fast-gossip")
def test_gossip_store_load_complex(node_factory, bitcoind):
l2 = setup_gossip_store_test(node_factory, bitcoind)

4
tests/test_invoices.py

@ -330,7 +330,7 @@ def test_invoice_expiry(node_factory, executor):
assert expiry >= start + 7 * 24 * 3600 and expiry <= end + 7 * 24 * 3600
@unittest.skipIf(not DEVELOPER, "Too slow without --dev-bitcoind-poll")
@unittest.skipIf(not DEVELOPER, "Too slow without --dev-fast-gossip")
def test_waitinvoice(node_factory, executor):
"""Test waiting for one invoice will not return if another invoice is paid.
"""
@ -366,7 +366,7 @@ def test_waitinvoice(node_factory, executor):
assert not f3.done()
@unittest.skipIf(not DEVELOPER, "Too slow without --dev-bitcoind-poll")
@unittest.skipIf(not DEVELOPER, "Too slow without --dev-fast-gossip")
def test_waitanyinvoice(node_factory, executor):
"""Test various variants of waiting for the next invoice to complete.
"""

2
tests/test_misc.py

@ -250,7 +250,7 @@ def test_ping(node_factory):
.format(l2.info['version']))
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for --dev-broadcast-interval")
@unittest.skipIf(not DEVELOPER, "needs --dev-disconnect")
def test_htlc_sig_persistence(node_factory, bitcoind, executor):
"""Interrupt a payment between two peers, then fail and recover funds using the HTLC sig.
"""

10
tests/test_pay.py

@ -916,7 +916,7 @@ def test_decodepay(node_factory):
l1.rpc.decodepay('1111111')
@unittest.skipIf(not DEVELOPER, "Too slow without --dev-bitcoind-poll")
@unittest.skipIf(not DEVELOPER, "Too slow without --dev-fast-gossip")
def test_forward(node_factory, bitcoind):
# Connect 1 -> 2 -> 3.
l1, l2, l3 = node_factory.line_graph(3, fundchannel=True)
@ -975,7 +975,7 @@ def test_forward(node_factory, bitcoind):
l1.rpc.waitsendpay(rhash)
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for --dev-broadcast-interval")
@unittest.skipIf(not DEVELOPER, "needs --dev-fast-gossip")
def test_forward_different_fees_and_cltv(node_factory, bitcoind):
# FIXME: Check BOLT quotes here too
# BOLT #7:
@ -1111,7 +1111,7 @@ def test_forward_different_fees_and_cltv(node_factory, bitcoind):
assert c[1]['source'] == c[0]['destination']
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for --dev-broadcast-interval")
@unittest.skipIf(not DEVELOPER, "too slow without --dev-fast-gossip")
def test_forward_pad_fees_and_cltv(node_factory, bitcoind):
"""Test that we are allowed extra locktime delta, and fees"""
@ -1799,7 +1799,7 @@ def test_pay_direct(node_factory, bitcoind):
assert l1l2msat == l1l2msatreference
@unittest.skipIf(not DEVELOPER, "updates are delayed without --dev-broadcast-interval")
@unittest.skipIf(not DEVELOPER, "updates are delayed without --dev-fast-gossip")
def test_setchannelfee_usage(node_factory, bitcoind):
# TEST SETUP
#
@ -2132,7 +2132,7 @@ def test_setchannelfee_restart(node_factory, bitcoind):
assert result['msatoshi_sent'] == 5002020
@unittest.skipIf(not DEVELOPER, "updates are delayed without --dev-broadcast-interval")
@unittest.skipIf(not DEVELOPER, "updates are delayed without --dev-fast-gossip")
def test_setchannelfee_all(node_factory, bitcoind):
# TEST SETUP
#

2
tests/utils.py

@ -420,7 +420,7 @@ class LightningD(TailableProc):
with open(os.path.join(lightning_dir, 'hsm_secret'), 'wb') as f:
f.write(seed)
if DEVELOPER:
self.opts['dev-broadcast-interval'] = 1000
self.opts['dev-fast-gossip'] = None
self.opts['dev-bitcoind-poll'] = 1
self.prefix = 'lightningd-%d' % (node_id)

Loading…
Cancel
Save