Browse Source

connectd: remove unused handback APIs and code.

We now simply maintain a pubkey set for connected peers (we only care
if there's a reconnect), not the entire peer structure.

lightningd no longer queries us for getpeers: it knows more than we do
already.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
ppa-0.6.1
Rusty Russell 7 years ago
committed by Christian Decker
parent
commit
02966a4857
  1. 536
      connectd/connect.c
  2. 56
      connectd/connect_wire.csv
  3. 10
      lightningd/connect_control.c
  4. 419
      lightningd/peer_control.c
  5. 8
      tests/test_closing.py
  6. 11
      tests/test_connection.py
  7. 21
      wallet/test/run-wallet.c

536
connectd/connect.c

@ -112,12 +112,29 @@ struct listen_fd {
bool mayfail;
};
static const struct pubkey *
pubkey_keyof(const struct pubkey *pk)
{
return pk;
}
static size_t pubkey_hash(const struct pubkey *id)
{
return siphash24(siphash_seed(), id, sizeof(*id));
}
HTABLE_DEFINE_TYPE(struct pubkey,
pubkey_keyof,
pubkey_hash,
pubkey_eq,
pubkey_set);
struct daemon {
/* Who am I? */
struct pubkey id;
/* Peers we have directly or indirectly: id is unique */
struct list_head peers;
/* Peers we know of */
struct pubkey_set peers;
/* Peers reconnecting now (waiting for current peer to die). */
struct list_head reconnecting;
@ -201,15 +218,6 @@ struct local_peer_state {
/* Our connection (and owner) */
struct io_conn *conn;
/* Gossipd connection */
struct daemon_conn gossip_conn;
/* Waiting to send_peer_with_fds to master? */
bool return_to_master;
/* If we're exiting due to non-gossip msg, otherwise release */
u8 *nongossip_msg;
/* Message queue for outgoing. */
struct msg_queue peer_out;
};
@ -217,7 +225,7 @@ struct local_peer_state {
struct peer {
struct daemon *daemon;
/* daemon->peers */
/* For reconnecting peers, this is in daemon->reconnecting. */
struct list_node list;
/* The ID of the peer (not necessarily unique, in transit!) */
@ -229,7 +237,6 @@ struct peer {
/* Feature bitmaps. */
u8 *gfeatures, *lfeatures;
/* Non-NULL if we're talking to peer */
struct local_peer_state *local;
};
@ -246,29 +253,6 @@ struct addrhint {
static void send_peer_with_fds(struct peer *peer, const u8 *msg);
static void retry_important(struct important_peerid *imp);
static void destroy_peer(struct peer *peer)
{
struct important_peerid *imp;
list_del_from(&peer->daemon->peers, &peer->list);
imp = important_peerid_map_get(&peer->daemon->important_peerids,
&peer->id);
if (imp) {
imp->wait_seconds = INITIAL_WAIT_SECONDS;
retry_important(imp);
}
}
static struct peer *find_peer(struct daemon *daemon, const struct pubkey *id)
{
struct peer *peer;
list_for_each(&daemon->peers, peer, list)
if (pubkey_eq(&peer->id, id))
return peer;
return NULL;
}
static struct peer *find_reconnecting_peer(struct daemon *daemon,
const struct pubkey *id)
{
@ -321,7 +305,6 @@ new_local_peer_state(struct peer *peer, const struct crypto_state *cs)
init_peer_crypto_state(peer, &lps->pcs);
lps->pcs.cs = *cs;
lps->return_to_master = false;
msg_queue_init(&lps->peer_out, lps);
return lps;
@ -374,16 +357,6 @@ static struct peer *new_peer(const tal_t *ctx,
return peer;
}
static void peer_finalized(struct peer *peer)
{
/* No longer tied to peer->conn's lifetime. */
tal_steal(peer->daemon, peer);
/* Now we can put this in the list of peers */
list_add_tail(&peer->daemon->peers, &peer->list);
tal_add_destructor(peer, destroy_peer);
}
static void destroy_reaching(struct reaching *reach)
{
list_del_from(&reach->daemon->reaching, &reach->list);
@ -468,22 +441,6 @@ static int get_gossipfd(struct peer *peer)
return fdpass_recv(GOSSIPCTL_FD);
}
static void peer_error(struct peer *peer, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
status_trace("peer %s: %s",
type_to_string(tmpctx, struct pubkey, &peer->id),
tal_vfmt(tmpctx, fmt, ap));
va_end(ap);
/* Send error: we'll close after writing this. */
va_start(ap, fmt);
queue_peer_msg(peer, take(towire_errorfmtv(peer, NULL, fmt, ap)));
va_end(ap);
}
static bool is_all_channel_error(const u8 *msg)
{
struct channel_id channel_id;
@ -520,25 +477,18 @@ static struct io_plan *retry_peer_connected(struct io_conn *conn,
static struct io_plan *peer_connected(struct io_conn *conn, struct peer *peer)
{
struct peer *old_peer;
u8 *msg;
/* Now, is this a reconnect? */
old_peer = find_peer(peer->daemon, &peer->id);
if (old_peer) {
status_trace("peer %s: reconnect for %s",
type_to_string(tmpctx, struct pubkey, &peer->id),
old_peer->local ? "local peer" : "active peer");
if (!old_peer->local) {
/* Tell master to kill it: will send peer_disconnect */
msg = towire_connect_reconnected(NULL, &peer->id);
daemon_conn_send(&peer->daemon->master, take(msg));
add_reconnecting_peer(peer->daemon, peer);
return io_wait(conn, peer, retry_peer_connected, peer);
}
/* Local peers can just be discarded when they reconnect:
* closing conn will free peer. */
io_close(old_peer->local->conn);
/* FIXME: We could do this before exchanging init msgs. */
if (pubkey_set_get(&peer->daemon->peers, &peer->id)) {
status_trace("peer %s: reconnect",
type_to_string(tmpctx, struct pubkey, &peer->id));
/* Tell master to kill it: will send peer_disconnect */
msg = towire_connect_reconnected(NULL, &peer->id);
daemon_conn_send(&peer->daemon->master, take(msg));
add_reconnecting_peer(peer->daemon, peer);
return io_wait(conn, peer, retry_peer_connected, peer);
}
reached_peer(peer, conn);
@ -553,10 +503,6 @@ static struct io_plan *peer_connected(struct io_conn *conn, struct peer *peer)
peer->gfeatures, peer->lfeatures);
send_peer_with_fds(peer, msg);
/* This is a full peer now; we keep it around until master says
* it's dead. */
peer_finalized(peer);
return io_close_taken_fd(conn);
}
@ -630,124 +576,6 @@ static struct io_plan *init_new_peer(struct io_conn *conn,
take(initmsg), read_init);
}
/* If master asks us to release peer, we attach this destructor in case it
* dies while we're waiting for it to finish IO */
static void fail_release(struct peer *peer)
{
u8 *msg = towire_connectctl_release_peer_replyfail(NULL);
daemon_conn_send(&peer->daemon->master, take(msg));
}
static struct io_plan *ready_for_master(struct io_conn *conn, struct peer *peer)
{
u8 *msg;
if (peer->local->nongossip_msg)
msg = towire_connect_peer_nongossip(peer, &peer->id,
&peer->addr,
&peer->local->pcs.cs,
peer->gfeatures,
peer->lfeatures,
peer->local->nongossip_msg);
else
msg = towire_connectctl_release_peer_reply(peer,
&peer->addr,
&peer->local->pcs.cs,
peer->gfeatures,
peer->lfeatures);
/* FIXME: This can block (bad!) and anyway we can still have
* half-*read* gossip messages! */
daemon_conn_sync_flush(&peer->local->gossip_conn);
io_close_taken_fd(peer->local->gossip_conn.conn);
send_peer_with_fds(peer, take(msg));
/* In case we set this earlier. */
tal_del_destructor(peer, fail_release);
return io_close_taken_fd(conn);
}
static struct io_plan *peer_msgin(struct io_conn *conn,
struct peer *peer, u8 *msg);
/* Wrapper around peer_read_message: don't read another if we want to
* pass up to master */
static struct io_plan *peer_next_in(struct io_conn *conn, struct peer *peer)
{
if (peer->local->return_to_master) {
assert(!peer_in_started(conn, &peer->local->pcs));
/* Wake writer. */
msg_wake(&peer->local->peer_out);
return io_wait(conn, peer, peer_next_in, peer);
}
return peer_read_message(conn, &peer->local->pcs, peer_msgin);
}
static struct io_plan *peer_msgin(struct io_conn *conn,
struct peer *peer, u8 *msg)
{
enum wire_type t = fromwire_peektype(msg);
assert(peer->local);
switch (t) {
case WIRE_ERROR:
status_trace("%s sent ERROR %s",
type_to_string(tmpctx, struct pubkey, &peer->id),
sanitize_error(tmpctx, msg, NULL));
return io_close(conn);
case WIRE_PING:
case WIRE_PONG:
case WIRE_CHANNEL_ANNOUNCEMENT:
case WIRE_NODE_ANNOUNCEMENT:
case WIRE_CHANNEL_UPDATE:
case WIRE_QUERY_SHORT_CHANNEL_IDS:
case WIRE_REPLY_SHORT_CHANNEL_IDS_END:
case WIRE_QUERY_CHANNEL_RANGE:
case WIRE_REPLY_CHANNEL_RANGE:
case WIRE_GOSSIP_TIMESTAMP_FILTER:
daemon_conn_send(&peer->local->gossip_conn, msg);
return peer_next_in(conn, peer);
case WIRE_INIT:
case WIRE_OPEN_CHANNEL:
case WIRE_ACCEPT_CHANNEL:
case WIRE_FUNDING_CREATED:
case WIRE_FUNDING_SIGNED:
case WIRE_FUNDING_LOCKED:
case WIRE_SHUTDOWN:
case WIRE_CLOSING_SIGNED:
case WIRE_UPDATE_ADD_HTLC:
case WIRE_UPDATE_FULFILL_HTLC:
case WIRE_UPDATE_FAIL_HTLC:
case WIRE_UPDATE_FAIL_MALFORMED_HTLC:
case WIRE_COMMITMENT_SIGNED:
case WIRE_REVOKE_AND_ACK:
case WIRE_UPDATE_FEE:
case WIRE_CHANNEL_REESTABLISH:
case WIRE_ANNOUNCEMENT_SIGNATURES:
/* Not our place to handle this, so we punt */
peer->local->return_to_master = true;
peer->local->nongossip_msg = tal_steal(peer, msg);
/* This will wait. */
return peer_next_in(conn, peer);
}
/* BOLT #1:
*
* The type follows the _it's ok to be odd_ rule, so nodes MAY send
* _odd_-numbered types without ascertaining that the recipient
* understands it. */
if (t & 1) {
status_trace("Peer %s sent packet with unknown message type %u, ignoring",
type_to_string(tmpctx, struct pubkey, &peer->id), t);
} else
peer_error(peer, "Packet with unknown message type %u", t);
return peer_next_in(conn, peer);
}
static struct io_plan *peer_pkt_out(struct io_conn *conn, struct peer *peer)
{
/* First priority is queued packets, if any */
@ -765,222 +593,18 @@ static struct io_plan *peer_pkt_out(struct io_conn *conn, struct peer *peer)
peer_pkt_out);
}
/* Do we want to send this peer to the master daemon? */
if (peer->local->return_to_master) {
if (!peer_in_started(conn, &peer->local->pcs))
return ready_for_master(conn, peer);
}
return msg_queue_wait(conn, &peer->local->peer_out, peer_pkt_out, peer);
}
/* Now we're a fully-fledged peer. */
static struct io_plan *peer_start_duplex(struct io_conn *conn, struct peer *peer)
{
return io_duplex(conn,
peer_next_in(conn, peer),
peer_pkt_out(conn, peer));
}
static struct io_plan *recv_gossip(struct io_conn *conn,
struct daemon_conn *dc)
{
struct peer *peer = dc->ctx;
u8 *gossip;
if (!fromwire_gossip_send_gossip(tmpctx, dc->msg_in, &gossip)) {
status_broken("Got bad message for %s from gossipd: %s",
type_to_string(tmpctx, struct pubkey, &peer->id),
tal_hex(tmpctx, dc->msg_in));
return io_close(conn);
}
/* Gossipd can send us gossip messages, OR errors */
if (is_msg_for_gossipd(gossip)
|| fromwire_peektype(gossip) == WIRE_ERROR) {
queue_peer_msg(peer, take(gossip));
} else {
status_broken("Gossipd gave %s bad gossip message %s",
type_to_string(tmpctx, struct pubkey, &peer->id),
tal_hex(tmpctx, dc->msg_in));
return io_close(conn);
}
return daemon_conn_read_next(conn, dc);
}
/* When a peer is to be owned by another daemon */
/* When a peer is to be owned by another daemon: will be freed by caller */
static void send_peer_with_fds(struct peer *peer, const u8 *msg)
{
int peer_fd = peer->local->fd;
int gossip_fd = peer->local->gossip_fd;
/* Now we talk to socket to get to peer's owner daemon. */
peer->local = tal_free(peer->local);
/* Peer stays around, even though caller will close conn. */
tal_steal(peer->daemon, peer);
status_debug("peer %s now remote",
type_to_string(tmpctx, struct pubkey, &peer->id));
daemon_conn_send(&peer->daemon->master, msg);
daemon_conn_send_fd(&peer->daemon->master, peer_fd);
daemon_conn_send_fd(&peer->daemon->master, gossip_fd);
}
daemon_conn_send_fd(&peer->daemon->master, peer->local->fd);
daemon_conn_send_fd(&peer->daemon->master, peer->local->gossip_fd);
static struct io_plan *new_peer_got_fd(struct io_conn *conn, struct peer *peer)
{
struct daemon *daemon = peer->daemon;
peer->local->conn = io_new_conn(conn, peer->local->fd,
peer_start_duplex, peer);
if (!peer->local->conn) {
status_trace("Could not create connection for peer: %s",
strerror(errno));
tal_free(peer);
} else {
/* If conn dies, we forget peer. */
tal_steal(peer->local->conn, peer);
}
return daemon_conn_read_next(conn, &daemon->master);
}
/* This lets us read the fds in before handling anything. */
struct returning_peer {
struct daemon *daemon;
struct pubkey id;
struct crypto_state cs;
u8 *inner_msg;
int peer_fd, gossip_fd;
};
static struct io_plan *handle_returning_peer(struct io_conn *conn,
struct returning_peer *rpeer)
{
struct daemon *daemon = rpeer->daemon;
struct peer *peer, *connecting;
peer = find_peer(daemon, &rpeer->id);
if (!peer)
status_failed(STATUS_FAIL_INTERNAL_ERROR,
"hand_back_peer unknown peer: %s",
type_to_string(tmpctx, struct pubkey, &rpeer->id));
assert(!peer->local);
/* Corner case: we got a reconnection while master was handing this
* back. We would have killed it immediately if it was local previously
* so do that now */
connecting = find_reconnecting_peer(daemon, &rpeer->id);
if (connecting) {
status_trace("Forgetting handed back peer %s",
type_to_string(tmpctx, struct pubkey, &peer->id));
tal_free(peer);
/* Now connecting peer can go ahead. */
io_wake(connecting);
return daemon_conn_read_next(conn, &daemon->master);
}
status_trace("hand_back_peer %s: now local again",
type_to_string(tmpctx, struct pubkey, &rpeer->id));
peer->local = new_local_peer_state(peer, &rpeer->cs);
peer->local->fd = rpeer->peer_fd;
peer->local->gossip_fd = rpeer->gossip_fd;
daemon_conn_init(peer, &peer->local->gossip_conn, peer->local->gossip_fd,
recv_gossip, NULL);
/* If they told us to send a message, queue it now */
if (tal_count(rpeer->inner_msg))
msg_enqueue(&peer->local->peer_out, take(rpeer->inner_msg));
/* FIXME: rpeer destructor should close peer_fd, gossip_fd */
tal_free(rpeer);
return new_peer_got_fd(conn, peer);
}
static struct io_plan *read_returning_gossipfd(struct io_conn *conn,
struct returning_peer *rpeer)
{
return io_recv_fd(conn, &rpeer->gossip_fd,
handle_returning_peer, rpeer);
}
static struct io_plan *hand_back_peer(struct io_conn *conn,
struct daemon *daemon,
const u8 *msg)
{
struct returning_peer *rpeer = tal(daemon, struct returning_peer);
rpeer->daemon = daemon;
if (!fromwire_connectctl_hand_back_peer(msg, msg,
&rpeer->id, &rpeer->cs,
&rpeer->inner_msg))
master_badmsg(WIRE_CONNECTCTL_HAND_BACK_PEER, msg);
status_debug("Handing back peer %s to master",
type_to_string(msg, struct pubkey, &rpeer->id));
return io_recv_fd(conn, &rpeer->peer_fd,
read_returning_gossipfd, rpeer);
}
static struct io_plan *disconnect_peer(struct io_conn *conn, struct daemon *daemon,
const u8 *msg)
{
struct pubkey id;
struct peer *peer;
if (!fromwire_connectctl_peer_disconnect(msg, &id))
master_badmsg(WIRE_CONNECTCTL_PEER_DISCONNECT, msg);
peer = find_peer(daemon, &id);
if (peer && peer->local) {
/* This peer is local to this (connectd) daemon */
io_close(peer->local->conn);
msg = towire_connectctl_peer_disconnect_reply(NULL);
daemon_conn_send(&daemon->master, take(msg));
} else {
status_trace("disconnect_peer: peer %s %s",
type_to_string(tmpctx, struct pubkey, &id),
!peer ? "not connected" : "not gossiping");
msg = towire_connectctl_peer_disconnect_replyfail(NULL, peer ? true : false);
daemon_conn_send(&daemon->master, take(msg));
}
return daemon_conn_read_next(conn, &daemon->master);
}
static struct io_plan *release_peer(struct io_conn *conn, struct daemon *daemon,
const u8 *msg)
{
struct pubkey id;
struct peer *peer;
if (!fromwire_connectctl_release_peer(msg, &id))
master_badmsg(WIRE_CONNECTCTL_RELEASE_PEER, msg);
peer = find_peer(daemon, &id);
if (!peer || !peer->local || peer->local->return_to_master) {
/* This can happen with dying peers, or reconnect */
status_trace("release_peer: peer %s %s",
type_to_string(tmpctx, struct pubkey, &id),
!peer ? "not found"
: peer->local ? "already releasing"
: "not local");
msg = towire_connectctl_release_peer_replyfail(NULL);
daemon_conn_send(&daemon->master, take(msg));
} else {
peer->local->return_to_master = true;
peer->local->nongossip_msg = NULL;
/* Wake output, in case it's idle. */
msg_wake(&peer->local->peer_out);
}
return daemon_conn_read_next(conn, &daemon->master);
pubkey_set_add(&peer->daemon->peers,
tal_dup(peer->daemon, struct pubkey, &peer->id));
}
static int make_listen_fd(int domain, void *addr, socklen_t len, bool mayfail)
@ -1583,9 +1207,8 @@ static void try_reach_peer(struct daemon *daemon, const struct pubkey *id,
struct reaching *reach;
u8 *msg;
bool use_proxy = daemon->use_proxy_always;
struct peer *peer = find_peer(daemon, id);
if (peer) {
if (pubkey_set_get(&daemon->peers, id)) {
status_debug("try_reach_peer: have peer %s",
type_to_string(tmpctx, struct pubkey, id));
if (master_needs_response) {
@ -1809,80 +1432,37 @@ static struct io_plan *peer_important(struct io_conn *conn,
static struct io_plan *peer_disconnected(struct io_conn *conn,
struct daemon *daemon, const u8 *msg)
{
struct pubkey id;
struct pubkey id, *key;
struct peer *peer;
struct important_peerid *imp;
if (!fromwire_connectctl_peer_disconnected(msg, &id))
master_badmsg(WIRE_CONNECTCTL_PEER_DISCONNECTED, msg);
peer = find_peer(daemon, &id);
if (!peer)
key = pubkey_set_get(&daemon->peers, &id);
if (!key)
status_failed(STATUS_FAIL_INTERNAL_ERROR,
"peer_disconnected unknown peer: %s",
type_to_string(tmpctx, struct pubkey, &id));
pubkey_set_del(&daemon->peers, key);
tal_free(key);
assert(!peer->local);
status_trace("Forgetting remote peer %s",
type_to_string(tmpctx, struct pubkey, &peer->id));
tal_free(peer);
status_trace("Forgetting peer %s",
type_to_string(tmpctx, struct pubkey, &id));
/* If there was a connecting peer waiting, wake it now */
peer = find_reconnecting_peer(daemon, &id);
if (peer)
io_wake(peer);
return daemon_conn_read_next(conn, &daemon->master);
}
static void append_peer_features(const struct peer_features ***pf,
const u8 *gfeatures,
const u8 *lfeatures)
{
struct peer_features *new;
size_t num_nodes = tal_count(*pf);
new = tal(*pf, struct peer_features);
new->global_features = tal_dup_arr(new, u8, gfeatures,
tal_count(gfeatures), 0);
new->local_features = tal_dup_arr(new, u8, lfeatures,
tal_count(lfeatures), 0);
tal_resize(pf, num_nodes + 1);
(*pf)[num_nodes] = new;
}
static struct io_plan *get_peers(struct io_conn *conn,
struct daemon *daemon, const u8 *msg)
{
struct peer *peer;
size_t n = 0;
struct pubkey *id = tal_arr(conn, struct pubkey, n);
struct wireaddr_internal *wireaddr = tal_arr(conn, struct wireaddr_internal, n);
const struct peer_features **pf = tal_arr(conn, const struct peer_features *, n);
struct pubkey *specific_id;
if (!fromwire_connect_getpeers_request(msg, msg, &specific_id))
master_badmsg(WIRE_CONNECTCTL_PEER_ADDRHINT, msg);
list_for_each(&daemon->peers, peer, list) {
if (specific_id && !pubkey_eq(specific_id, &peer->id))
continue;
tal_resize(&id, n+1);
tal_resize(&wireaddr, n+1);
id[n] = peer->id;
wireaddr[n] = peer->addr;
append_peer_features(&pf, peer->gfeatures, peer->lfeatures);
n++;
imp = important_peerid_map_get(&daemon->important_peerids, &id);
if (imp) {
imp->wait_seconds = INITIAL_WAIT_SECONDS;
retry_important(imp);
}
daemon_conn_send(&daemon->master,
take(towire_connect_getpeers_reply(NULL, id, wireaddr, pf)));
return daemon_conn_read_next(conn, &daemon->master);
}
static struct io_plan *recv_req(struct io_conn *conn, struct daemon_conn *master)
{
struct daemon *daemon = container_of(master, struct daemon, master);
@ -1895,12 +1475,6 @@ static struct io_plan *recv_req(struct io_conn *conn, struct daemon_conn *master
case WIRE_CONNECTCTL_ACTIVATE:
return connect_activate(master, daemon, master->msg_in);
case WIRE_CONNECTCTL_RELEASE_PEER:
return release_peer(conn, daemon, master->msg_in);
case WIRE_CONNECTCTL_HAND_BACK_PEER:
return hand_back_peer(conn, daemon, master->msg_in);
case WIRE_CONNECTCTL_CONNECT_TO_PEER:
return connect_to_peer(conn, daemon, master->msg_in);
@ -1913,23 +1487,11 @@ static struct io_plan *recv_req(struct io_conn *conn, struct daemon_conn *master
case WIRE_CONNECTCTL_PEER_DISCONNECTED:
return peer_disconnected(conn, daemon, master->msg_in);
case WIRE_CONNECT_GETPEERS_REQUEST:
return get_peers(conn, daemon, master->msg_in);
case WIRE_CONNECTCTL_PEER_DISCONNECT:
return disconnect_peer(conn, daemon, master->msg_in);
/* We send these, we don't receive them */
case WIRE_CONNECTCTL_INIT_REPLY:
case WIRE_CONNECTCTL_ACTIVATE_REPLY:
case WIRE_CONNECTCTL_RELEASE_PEER_REPLY:
case WIRE_CONNECTCTL_RELEASE_PEER_REPLYFAIL:
case WIRE_CONNECT_GETPEERS_REPLY:
case WIRE_CONNECT_PEER_CONNECTED:
case WIRE_CONNECTCTL_CONNECT_TO_PEER_RESULT:
case WIRE_CONNECT_PEER_NONGOSSIP:
case WIRE_CONNECTCTL_PEER_DISCONNECT_REPLY:
case WIRE_CONNECTCTL_PEER_DISCONNECT_REPLYFAIL:
case WIRE_CONNECT_RECONNECTED:
break;
}
@ -1955,7 +1517,7 @@ int main(int argc, char *argv[])
subdaemon_setup(argc, argv);
daemon = tal(NULL, struct daemon);
list_head_init(&daemon->peers);
pubkey_set_init(&daemon->peers);
list_head_init(&daemon->reconnecting);
list_head_init(&daemon->reaching);
list_head_init(&daemon->addrhints);

56
connectd/connect_wire.csv

@ -70,62 +70,6 @@ connect_peer_connected,,gfeatures,gflen*u8
connect_peer_connected,,lflen,u16
connect_peer_connected,,lfeatures,lflen*u8
# Connectd -> master: peer sent non-gossip packet. Two fds: peer and gossip
connect_peer_nongossip,2003
connect_peer_nongossip,,id,struct pubkey
connect_peer_nongossip,,addr,struct wireaddr_internal
connect_peer_nongossip,,crypto_state,struct crypto_state
connect_peer_nongossip,,gflen,u16
connect_peer_nongossip,,gfeatures,gflen*u8
connect_peer_nongossip,,lflen,u16
connect_peer_nongossip,,lfeatures,lflen*u8
connect_peer_nongossip,,len,u16
connect_peer_nongossip,,msg,len*u8
# Master -> connectd: release a peer (so we can open a channel)
connectctl_release_peer,2004
connectctl_release_peer,,id,struct pubkey
# Connectd -> master: reply to connect_release_peer. Two fds: peer and gossip.
connectctl_release_peer_reply,2104
connectctl_release_peer_reply,,addr,struct wireaddr_internal
connectctl_release_peer_reply,,crypto_state,struct crypto_state
connectctl_release_peer_reply,,gflen,u16
connectctl_release_peer_reply,,gfeatures,gflen*u8
connectctl_release_peer_reply,,lflen,u16
connectctl_release_peer_reply,,lfeatures,lflen*u8
# Connectd -> master: reply to connect_release_peer if we couldn't find the peer.
connectctl_release_peer_replyfail,2204
# master -> connectd: take back peer, with optional msg. (+peer fd, +gossip fd)
connectctl_hand_back_peer,2013
connectctl_hand_back_peer,,id,struct pubkey
connectctl_hand_back_peer,,crypto_state,struct crypto_state
connectctl_hand_back_peer,,len,u16
connectctl_hand_back_peer,,msg,len*u8
# master -> connectd: peer has disconnected.
connectctl_peer_disconnected,2015
connectctl_peer_disconnected,,id,struct pubkey
# The main daemon asks for peers
connect_getpeers_request,2011
connect_getpeers_request,,id,?struct pubkey
connect_getpeers_reply,2111
connect_getpeers_reply,,num,u16
connect_getpeers_reply,,id,num*struct pubkey
connect_getpeers_reply,,addr,num*struct wireaddr_internal
connect_getpeers_reply,,features,num*struct peer_features
# master->connectd: Request to disconnect from a peer.
connectctl_peer_disconnect,2023
connectctl_peer_disconnect,,id,struct pubkey
# Connectd -> master: reply to connect_peer_disconnect with peer id.
connectctl_peer_disconnect_reply,2123
# Connectd -> master: reply to connect_peer_disconnect if we couldn't find the peer.
connectctl_peer_disconnect_replyfail,2223
connectctl_peer_disconnect_replyfail,,isconnected,bool

Can't render this file because it has a wrong number of fields in line 5.

10
lightningd/connect_control.c

@ -224,23 +224,13 @@ static unsigned connectd_msg(struct subd *connectd, const u8 *msg, const int *fd
/* These are messages we send, not them. */
case WIRE_CONNECTCTL_INIT:
case WIRE_CONNECTCTL_ACTIVATE:
case WIRE_CONNECT_GETPEERS_REQUEST:
case WIRE_CONNECTCTL_PEER_ADDRHINT:
case WIRE_CONNECTCTL_CONNECT_TO_PEER:
case WIRE_CONNECTCTL_PEER_IMPORTANT:
case WIRE_CONNECTCTL_RELEASE_PEER:
case WIRE_CONNECTCTL_HAND_BACK_PEER:
case WIRE_CONNECTCTL_PEER_DISCONNECTED:
case WIRE_CONNECTCTL_PEER_DISCONNECT:
/* This is a reply, so never gets through to here. */
case WIRE_CONNECTCTL_INIT_REPLY:
case WIRE_CONNECTCTL_ACTIVATE_REPLY:
case WIRE_CONNECT_GETPEERS_REPLY:
case WIRE_CONNECTCTL_RELEASE_PEER_REPLY:
case WIRE_CONNECTCTL_RELEASE_PEER_REPLYFAIL:
case WIRE_CONNECTCTL_PEER_DISCONNECT_REPLY:
case WIRE_CONNECTCTL_PEER_DISCONNECT_REPLYFAIL:
case WIRE_CONNECT_PEER_NONGOSSIP:
break;
case WIRE_CONNECT_RECONNECTED:

419
lightningd/peer_control.c

@ -384,9 +384,6 @@ void channel_errmsg(struct channel *channel,
const char *desc,
const u8 *err_for_them)
{
struct lightningd *ld = channel->peer->ld;
u8 *msg;
/* No peer fd means a subd crash or disconnection. */
if (peer_fd == -1) {
channel_fail_transient(channel, "%s: %s",
@ -411,7 +408,7 @@ void channel_errmsg(struct channel *channel,
* - MUST fail all channels with the receiving node.
* - MUST close the connection.
*/
/* FIXME: Connectd closes connection, but doesn't fail channels. */
/* FIXME: Close if it's an all-channels error sent or rcvd */
/* BOLT #1:
*
@ -428,12 +425,8 @@ void channel_errmsg(struct channel *channel,
channel->owner->name,
err_for_them ? "sent" : "received", desc);
/* Hand back to connectd, with any error packet. */
msg = towire_connectctl_hand_back_peer(NULL, &channel->peer->id,
cs, err_for_them);
subd_send_msg(ld->connectd, take(msg));
subd_send_fd(ld->connectd, peer_fd);
subd_send_fd(ld->connectd, gossip_fd);
/* Get openingd to chat with them, maybe sending error. */
peer_start_openingd(channel->peer, cs, peer_fd, gossip_fd, err_for_them);
}
/* Connectd tells us a peer has connected: it never hands us duplicates, since
@ -599,237 +592,187 @@ void channel_watch_funding(struct lightningd *ld, struct channel *channel)
funding_spent);
}
struct getpeers_args {
struct command *cmd;
/* If non-NULL, they want logs too */
enum log_level *ll;
/* If set, only report on a specific id. */
struct pubkey *specific_id;
};
static void connectd_getpeers_complete(struct subd *connectd, const u8 *msg,
const int *fds UNUSED,
struct getpeers_args *gpa)
static void json_add_peer(struct lightningd *ld,
struct json_result *response,
struct peer *p,
const enum log_level *ll)
{
/* This is a little sneaky... */
struct pubkey *ids;
struct wireaddr_internal *addrs;
struct peer_features **pf;
struct json_result *response = new_json_result(gpa->cmd);
struct peer *p;
if (!fromwire_connect_getpeers_reply(msg, msg, &ids, &addrs, &pf)) {
command_fail(gpa->cmd, LIGHTNINGD,
"Bad response from connectd");
return;
}
bool connected;
struct channel *channel;
/* First the peers not just gossiping. */
json_object_start(response, NULL);
json_array_start(response, "peers");
list_for_each(&gpa->cmd->ld->peers, p, list) {
bool connected;
struct channel *channel;
struct channel_stats channel_stats;
json_add_pubkey(response, "id", &p->id);
if (gpa->specific_id && !pubkey_eq(gpa->specific_id, &p->id))
continue;
json_object_start(response, NULL);
json_add_pubkey(response, "id", &p->id);
/* Channel is also connected if uncommitted channel */
if (p->uncommitted_channel)
connected = true;
else {
channel = peer_active_channel(p);
connected = channel && channel->connected;
}
json_add_bool(response, "connected", connected);
/* If it's not connected, features are unreliable: we don't
* store them in the database, and they would only reflect
* their features *last* time they connected. */
if (connected) {
json_array_start(response, "netaddr");
if (p->addr.itype != ADDR_INTERNAL_WIREADDR
|| p->addr.u.wireaddr.type != ADDR_TYPE_PADDING)
json_add_string(response, NULL,
type_to_string(response,
struct wireaddr_internal,
&p->addr));
json_array_end(response);
json_add_hex_talarr(response, "global_features",
p->global_features);
json_add_hex_talarr(response, "local_features",
p->local_features);
}
/* Channel is also connected if uncommitted channel */
if (p->uncommitted_channel)
connected = true;
else {
channel = peer_active_channel(p);
connected = channel && channel->connected;
}
json_add_bool(response, "connected", connected);
json_array_start(response, "channels");
json_add_uncommitted_channel(response, p->uncommitted_channel);
list_for_each(&p->channels, channel, list) {
struct channel_id cid;
u64 our_reserve_msat = channel->channel_info.their_config.channel_reserve_satoshis * 1000;
json_object_start(response, NULL);
json_add_string(response, "state",
channel_state_name(channel));
if (channel->owner)
json_add_string(response, "owner",
channel->owner->name);
if (channel->scid)
json_add_short_channel_id(response,
"short_channel_id",
channel->scid);
derive_channel_id(&cid,
&channel->funding_txid,
channel->funding_outnum);
json_add_string(response, "channel_id",
type_to_string(tmpctx,
struct channel_id,
&cid));
json_add_txid(response,
"funding_txid",
&channel->funding_txid);
json_add_u64(response, "msatoshi_to_us",
channel->our_msatoshi);
json_add_u64(response, "msatoshi_to_us_min",
channel->msatoshi_to_us_min);
json_add_u64(response, "msatoshi_to_us_max",
channel->msatoshi_to_us_max);
json_add_u64(response, "msatoshi_total",
channel->funding_satoshi * 1000);
/* channel config */
json_add_u64(response, "dust_limit_satoshis",
channel->our_config.dust_limit_satoshis);
json_add_u64(response, "max_htlc_value_in_flight_msat",
channel->our_config.max_htlc_value_in_flight_msat);
/* The `channel_reserve_satoshis` is imposed on
* the *other* side (see `channel_reserve_msat`
* function in, it uses `!side` to flip sides).
* So our configuration `channel_reserve_satoshis`
* is imposed on their side, while their
* configuration `channel_reserve_satoshis` is
* imposed on ours. */
json_add_u64(response, "their_channel_reserve_satoshis",
channel->our_config.channel_reserve_satoshis);
json_add_u64(response, "our_channel_reserve_satoshis",
channel->channel_info.their_config.channel_reserve_satoshis);
/* Compute how much we can send via this channel. */
if (channel->our_msatoshi <= our_reserve_msat)
json_add_u64(response, "spendable_msatoshi", 0);
else
json_add_u64(response, "spendable_msatoshi",
channel->our_msatoshi - our_reserve_msat);
json_add_u64(response, "htlc_minimum_msat",
channel->our_config.htlc_minimum_msat);
/* The `to_self_delay` is imposed on the *other*
* side, so our configuration `to_self_delay` is
* imposed on their side, while their configuration
* `to_self_delay` is imposed on ours. */
json_add_num(response, "their_to_self_delay",
channel->our_config.to_self_delay);
json_add_num(response, "our_to_self_delay",
channel->channel_info.their_config.to_self_delay);
json_add_num(response, "max_accepted_htlcs",
channel->our_config.max_accepted_htlcs);
json_array_start(response, "status");
for (size_t i = 0;
i < ARRAY_SIZE(channel->billboard.permanent);
i++) {
if (!channel->billboard.permanent[i])
continue;
json_add_string(response, NULL,
channel->billboard.permanent[i]);
}
if (channel->billboard.transient)
json_add_string(response, NULL,
channel->billboard.transient);
json_array_end(response);
/* Provide channel statistics */
wallet_channel_stats_load(gpa->cmd->ld->wallet,
channel->dbid,
&channel_stats);
json_add_u64(response, "in_payments_offered",
channel_stats.in_payments_offered);
json_add_u64(response, "in_msatoshi_offered",
channel_stats.in_msatoshi_offered);
json_add_u64(response, "in_payments_fulfilled",
channel_stats.in_payments_fulfilled);
json_add_u64(response, "in_msatoshi_fulfilled",
channel_stats.in_msatoshi_fulfilled);
json_add_u64(response, "out_payments_offered",
channel_stats.out_payments_offered);
json_add_u64(response, "out_msatoshi_offered",
channel_stats.out_msatoshi_offered);
json_add_u64(response, "out_payments_fulfilled",
channel_stats.out_payments_fulfilled);
json_add_u64(response, "out_msatoshi_fulfilled",
channel_stats.out_msatoshi_fulfilled);
json_object_end(response);
}
/* If it's not connected, features are unreliable: we don't
* store them in the database, and they would only reflect
* their features *last* time they connected. */
if (connected) {
json_array_start(response, "netaddr");
if (p->addr.itype != ADDR_INTERNAL_WIREADDR
|| p->addr.u.wireaddr.type != ADDR_TYPE_PADDING)
json_add_string(response, NULL,
type_to_string(response,
struct wireaddr_internal,
&p->addr));
json_array_end(response);
json_add_hex_talarr(response, "global_features",
p->global_features);
if (gpa->ll)
json_add_log(response, p->log_book, *gpa->ll);
json_object_end(response);
json_add_hex_talarr(response, "local_features",
p->local_features);
}
for (size_t i = 0; i < tal_count(ids); i++) {
/* Don't report peers in both, which can happen if they're
* reconnecting */
if (peer_by_id(gpa->cmd->ld, ids + i))
continue;
json_array_start(response, "channels");
json_add_uncommitted_channel(response, p->uncommitted_channel);
list_for_each(&p->channels, channel, list) {
struct channel_id cid;
struct channel_stats channel_stats;
u64 our_reserve_msat = channel->channel_info.their_config.channel_reserve_satoshis * 1000;
json_object_start(response, NULL);
/* Fake state. */
json_add_string(response, "state", "GOSSIPING");
json_add_pubkey(response, "id", ids+i);
json_add_hex_talarr(response, "global_features",
pf[i]->global_features);
json_add_hex_talarr(response, "local_features",
pf[i]->local_features);
json_array_start(response, "netaddr");
if (addrs[i].itype != ADDR_INTERNAL_WIREADDR
|| addrs[i].u.wireaddr.type != ADDR_TYPE_PADDING)
json_add_string(response, "state",
channel_state_name(channel));
if (channel->owner)
json_add_string(response, "owner",
channel->owner->name);
if (channel->scid)
json_add_short_channel_id(response,
"short_channel_id",
channel->scid);
derive_channel_id(&cid,
&channel->funding_txid,
channel->funding_outnum);
json_add_string(response, "channel_id",
type_to_string(tmpctx, struct channel_id, &cid));
json_add_txid(response,
"funding_txid",
&channel->funding_txid);
json_add_u64(response, "msatoshi_to_us",
channel->our_msatoshi);
json_add_u64(response, "msatoshi_to_us_min",
channel->msatoshi_to_us_min);
json_add_u64(response, "msatoshi_to_us_max",
channel->msatoshi_to_us_max);
json_add_u64(response, "msatoshi_total",
channel->funding_satoshi * 1000);
/* channel config */
json_add_u64(response, "dust_limit_satoshis",
channel->our_config.dust_limit_satoshis);
json_add_u64(response, "max_htlc_value_in_flight_msat",
channel->our_config.max_htlc_value_in_flight_msat);
/* The `channel_reserve_satoshis` is imposed on
* the *other* side (see `channel_reserve_msat`
* function in, it uses `!side` to flip sides).
* So our configuration `channel_reserve_satoshis`
* is imposed on their side, while their
* configuration `channel_reserve_satoshis` is
* imposed on ours. */
json_add_u64(response, "their_channel_reserve_satoshis",
channel->our_config.channel_reserve_satoshis);
json_add_u64(response, "our_channel_reserve_satoshis",
channel->channel_info.their_config.channel_reserve_satoshis);
/* Compute how much we can send via this channel. */
if (channel->our_msatoshi <= our_reserve_msat)
json_add_u64(response, "spendable_msatoshi", 0);
else
json_add_u64(response, "spendable_msatoshi",
channel->our_msatoshi - our_reserve_msat);
json_add_u64(response, "htlc_minimum_msat",
channel->our_config.htlc_minimum_msat);
/* The `to_self_delay` is imposed on the *other*
* side, so our configuration `to_self_delay` is
* imposed on their side, while their configuration
* `to_self_delay` is imposed on ours. */
json_add_num(response, "their_to_self_delay",
channel->our_config.to_self_delay);
json_add_num(response, "our_to_self_delay",
channel->channel_info.their_config.to_self_delay);
json_add_num(response, "max_accepted_htlcs",
channel->our_config.max_accepted_htlcs);
json_array_start(response, "status");
for (size_t i = 0;
i < ARRAY_SIZE(channel->billboard.permanent);
i++) {
if (!channel->billboard.permanent[i])
continue;
json_add_string(response, NULL,
type_to_string(response,
struct wireaddr_internal,
addrs + i));
channel->billboard.permanent[i]);
}
if (channel->billboard.transient)
json_add_string(response, NULL,
channel->billboard.transient);
json_array_end(response);
json_add_bool(response, "connected", true);
json_add_string(response, "owner", connectd->name);
/* Provide channel statistics */
wallet_channel_stats_load(ld->wallet,
channel->dbid,
&channel_stats);
json_add_u64(response, "in_payments_offered",
channel_stats.in_payments_offered);
json_add_u64(response, "in_msatoshi_offered",
channel_stats.in_msatoshi_offered);
json_add_u64(response, "in_payments_fulfilled",
channel_stats.in_payments_fulfilled);
json_add_u64(response, "in_msatoshi_fulfilled",
channel_stats.in_msatoshi_fulfilled);
json_add_u64(response, "out_payments_offered",
channel_stats.out_payments_offered);
json_add_u64(response, "out_msatoshi_offered",
channel_stats.out_msatoshi_offered);
json_add_u64(response, "out_payments_fulfilled",
channel_stats.out_payments_fulfilled);
json_add_u64(response, "out_msatoshi_fulfilled",
channel_stats.out_msatoshi_fulfilled);
json_object_end(response);
}
json_array_end(response);
if (ll)
json_add_log(response, p->log_book, *ll);
json_object_end(response);
command_success(gpa->cmd, response);
}
static void json_listpeers(struct command *cmd,
const char *buffer, const jsmntok_t *params)
{
struct getpeers_args *gpa = tal(cmd, struct getpeers_args);
enum log_level *ll;
struct pubkey *specific_id;
struct peer *peer;
struct json_result *response = new_json_result(cmd);
gpa->cmd = cmd;
if (!param(cmd, buffer, params,
p_opt("id", json_tok_pubkey, &gpa->specific_id),
p_opt("level", json_tok_loglevel, &gpa->ll),
p_opt("id", json_tok_pubkey, &specific_id),
p_opt("level", json_tok_loglevel, &ll),
NULL))
return;
/* Get peers from connectd. */
subd_req(cmd, cmd->ld->connectd,
take(towire_connect_getpeers_request(cmd, gpa->specific_id)),
-1, 0, connectd_getpeers_complete, gpa);
command_still_pending(cmd);
json_object_start(response, NULL);
json_array_start(response, "peers");
if (specific_id) {
peer = peer_by_id(cmd->ld, specific_id);
if (peer)
json_add_peer(cmd->ld, response, peer, ll);
} else {
list_for_each(&cmd->ld->peers, peer, list)
json_add_peer(cmd->ld, response, peer, ll);
}
json_array_end(response);
json_object_end(response);
command_success(cmd, response);
}
static const struct json_command listpeers_command = {
@ -1002,44 +945,36 @@ void activate_peers(struct lightningd *ld)
activate_peer(p);
}
/* Peer has been released from connectd. */
static void connectd_peer_disconnected(struct subd *connectd,
const u8 *resp,
const int *fds,
struct command *cmd)
{
bool isconnected;
if (!fromwire_connectctl_peer_disconnect_reply(resp)) {
if (!fromwire_connectctl_peer_disconnect_replyfail(resp, &isconnected))
fatal("Connect daemon gave invalid reply %s",
tal_hex(tmpctx, resp));
if (isconnected)
command_fail(cmd, LIGHTNINGD,
"Peer is not in gossip mode");
else
command_fail(cmd, LIGHTNINGD, "Peer not connected");
} else {
/* Successfully disconnected */
command_success(cmd, null_response(cmd));
}
return;
}
static void json_disconnect(struct command *cmd,
const char *buffer, const jsmntok_t *params)
{
struct pubkey id;
u8 *msg;
struct peer *peer;
struct channel *channel;
if (!param(cmd, buffer, params,
p_req("id", json_tok_pubkey, &id),
NULL))
return;
msg = towire_connectctl_peer_disconnect(cmd, &id);
subd_req(cmd, cmd->ld->connectd, msg, -1, 0, connectd_peer_disconnected, cmd);
command_still_pending(cmd);
peer = peer_by_id(cmd->ld, &id);
if (!peer) {
command_fail(cmd, LIGHTNINGD, "Peer not connected");
return;
}
channel = peer_active_channel(peer);
if (channel) {
command_fail(cmd, LIGHTNINGD, "Peer is in state %s",
channel_state_name(channel));
return;
}
if (!peer->uncommitted_channel) {
command_fail(cmd, LIGHTNINGD, "Peer not connected");
return;
}
kill_uncommitted_channel(peer->uncommitted_channel,
"disconnect command");
command_success(cmd, null_response(cmd));
}
static const struct json_command disconnect_command = {

8
tests/test_closing.py

@ -125,8 +125,8 @@ def test_closing_id(node_factory):
l1.fund_channel(l2, 10**6)
cid = l2.rpc.listpeers()['peers'][0]['channels'][0]['channel_id']
l2.rpc.close(cid)
l1.daemon.wait_for_log("Forgetting remote peer .*")
l2.daemon.wait_for_log("Forgetting remote peer .*")
l1.daemon.wait_for_log("Forgetting peer .*")
l2.daemon.wait_for_log("Forgetting peer .*")
# Close by peer ID.
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
@ -134,8 +134,8 @@ def test_closing_id(node_factory):
l2.fund_channel(l1, 10**6)
pid = l1.info['id']
l2.rpc.close(pid)
l1.daemon.wait_for_log("Forgetting remote peer .*")
l2.daemon.wait_for_log("Forgetting remote peer .*")
l1.daemon.wait_for_log("Forgetting peer .*")
l2.daemon.wait_for_log("Forgetting peer .*")
@unittest.skipIf(not DEVELOPER, "needs dev-rescan-outputs")

11
tests/test_connection.py

@ -280,9 +280,9 @@ def test_reconnect_openingd(node_factory):
# Reconnect.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# We should get a message about reconnecting, but order unsynced.
l2.daemon.wait_for_logs(['connectd.*reconnect for active peer',
'Killing openingd: Reconnected'])
# We should get a message about reconnecting.
l2.daemon.wait_for_log('Killing openingd: Reconnected')
l2.daemon.wait_for_log('lightning_openingd.*Handed peer, entering loop')
# Should work fine.
l1.rpc.fundchannel(l2.info['id'], 20000)
@ -967,7 +967,7 @@ def test_peerinfo(node_factory, bitcoind):
with pytest.raises(RpcError, match=r'Channel close negotiation not finished'):
l1.rpc.close(chan, False, 0)
l1.daemon.wait_for_log('Forgetting remote peer')
l1.daemon.wait_for_log('Forgetting peer')
bitcoind.generate_block(100)
l1.daemon.wait_for_log('WIRE_ONCHAIN_ALL_IRREVOCABLY_RESOLVED')
l2.daemon.wait_for_log('WIRE_ONCHAIN_ALL_IRREVOCABLY_RESOLVED')
@ -977,7 +977,6 @@ def test_peerinfo(node_factory, bitcoind):
assert l2.rpc.listnodes()['nodes'] == []
@unittest.skip("FIXME: Disabled during transition: disconnect not updated")
def test_disconnectpeer(node_factory, bitcoind):
l1, l2, l3 = node_factory.get_nodes(3, opts={'may_reconnect': False})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
@ -1008,7 +1007,7 @@ def test_disconnectpeer(node_factory, bitcoind):
bitcoind.generate_block(5)
# disconnecting a non gossiping peer results in error
with pytest.raises(RpcError, match=r'Peer is not in gossip mode'):
with pytest.raises(RpcError, match=r'Peer is in state CHANNELD_NORMAL'):
l1.rpc.disconnect(l3.info['id'])

21
wallet/test/run-wallet.c

@ -59,15 +59,6 @@ void command_still_pending(struct command *cmd UNNEEDED)
/* Generated stub for command_success */
void command_success(struct command *cmd UNNEEDED, struct json_result *response UNNEEDED)
{ fprintf(stderr, "command_success called!\n"); abort(); }
/* Generated stub for fromwire_connectctl_peer_disconnect_reply */
bool fromwire_connectctl_peer_disconnect_reply(const void *p UNNEEDED)
{ fprintf(stderr, "fromwire_connectctl_peer_disconnect_reply called!\n"); abort(); }
/* Generated stub for fromwire_connectctl_peer_disconnect_replyfail */
bool fromwire_connectctl_peer_disconnect_replyfail(const void *p UNNEEDED, bool *isconnected UNNEEDED)
{ fprintf(stderr, "fromwire_connectctl_peer_disconnect_replyfail called!\n"); abort(); }
/* Generated stub for fromwire_connect_getpeers_reply */
bool fromwire_connect_getpeers_reply(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct pubkey **id UNNEEDED, struct wireaddr_internal **addr UNNEEDED, struct peer_features ***features UNNEEDED)
{ fprintf(stderr, "fromwire_connect_getpeers_reply called!\n"); abort(); }
/* Generated stub for fromwire_connect_peer_connected */
bool fromwire_connect_peer_connected(const tal_t *ctx UNNEEDED, const void *p UNNEEDED, struct pubkey *id UNNEEDED, struct wireaddr_internal *addr UNNEEDED, struct crypto_state *crypto_state UNNEEDED, u8 **gfeatures UNNEEDED, u8 **lfeatures UNNEEDED)
{ fprintf(stderr, "fromwire_connect_peer_connected called!\n"); abort(); }
@ -315,9 +306,6 @@ void subd_req_(const tal_t *ctx UNNEEDED,
void (*replycb)(struct subd * UNNEEDED, const u8 * UNNEEDED, const int * UNNEEDED, void *) UNNEEDED,
void *replycb_data UNNEEDED)
{ fprintf(stderr, "subd_req_ called!\n"); abort(); }
/* Generated stub for subd_send_fd */
void subd_send_fd(struct subd *sd UNNEEDED, int fd UNNEEDED)
{ fprintf(stderr, "subd_send_fd called!\n"); abort(); }
/* Generated stub for subd_send_msg */
void subd_send_msg(struct subd *sd UNNEEDED, const u8 *msg_out UNNEEDED)
{ fprintf(stderr, "subd_send_msg called!\n"); abort(); }
@ -331,24 +319,15 @@ u8 *towire_channel_dev_reenable_commit(const tal_t *ctx UNNEEDED)
/* Generated stub for towire_channel_send_shutdown */
u8 *towire_channel_send_shutdown(const tal_t *ctx UNNEEDED)
{ fprintf(stderr, "towire_channel_send_shutdown called!\n"); abort(); }
/* Generated stub for towire_connectctl_hand_back_peer */
u8 *towire_connectctl_hand_back_peer(const tal_t *ctx UNNEEDED, const struct pubkey *id UNNEEDED, const struct crypto_state *crypto_state UNNEEDED, const u8 *msg UNNEEDED)
{ fprintf(stderr, "towire_connectctl_hand_back_peer called!\n"); abort(); }
/* Generated stub for towire_connectctl_peer_addrhint */
u8 *towire_connectctl_peer_addrhint(const tal_t *ctx UNNEEDED, const struct pubkey *id UNNEEDED, const struct wireaddr_internal *addr UNNEEDED)
{ fprintf(stderr, "towire_connectctl_peer_addrhint called!\n"); abort(); }
/* Generated stub for towire_connectctl_peer_disconnect */
u8 *towire_connectctl_peer_disconnect(const tal_t *ctx UNNEEDED, const struct pubkey *id UNNEEDED)
{ fprintf(stderr, "towire_connectctl_peer_disconnect called!\n"); abort(); }
/* Generated stub for towire_connectctl_peer_disconnected */
u8 *towire_connectctl_peer_disconnected(const tal_t *ctx UNNEEDED, const struct pubkey *id UNNEEDED)
{ fprintf(stderr, "towire_connectctl_peer_disconnected called!\n"); abort(); }
/* Generated stub for towire_connectctl_peer_important */
u8 *towire_connectctl_peer_important(const tal_t *ctx UNNEEDED, const struct pubkey *id UNNEEDED, bool important UNNEEDED)
{ fprintf(stderr, "towire_connectctl_peer_important called!\n"); abort(); }
/* Generated stub for towire_connect_getpeers_request */
u8 *towire_connect_getpeers_request(const tal_t *ctx UNNEEDED, const struct pubkey *id UNNEEDED)
{ fprintf(stderr, "towire_connect_getpeers_request called!\n"); abort(); }
/* Generated stub for towire_errorfmt */
u8 *towire_errorfmt(const tal_t *ctx UNNEEDED,
const struct channel_id *channel UNNEEDED,

Loading…
Cancel
Save