Browse Source

routing: Ask gossipd to resolve channel_id and forward HTLCs

Since we now use the short_channel_id to identify the next hop we need
to resolve the channel_id to the pubkey of the next hop. This is done
by calling out to `gossipd` and stuffing the necessary information
into `htlc_end` and recovering it from there once we receive a reply.
ppa-0.6.1
Christian Decker 8 years ago
parent
commit
25f1cba3cf
  1. 3
      lightningd/channel/channel.c
  2. 2
      lightningd/channel/channel_wire.csv
  3. 32
      lightningd/gossip/gossip.c
  4. 8
      lightningd/gossip/gossip_wire.csv
  5. 2
      lightningd/gossip_control.c
  6. 11
      lightningd/htlc_end.h
  7. 78
      lightningd/peer_control.c

3
lightningd/channel/channel.c

@ -636,7 +636,6 @@ static void their_htlc_locked(const struct htlc *htlc, struct peer *peer)
goto remove_htlc; goto remove_htlc;
} }
u8 dummy_next_hop[20]; memset(dummy_next_hop, 0, 20);
/* Tell master to deal with it. */ /* Tell master to deal with it. */
msg = towire_channel_accepted_htlc(tmpctx, htlc->id, htlc->msatoshi, msg = towire_channel_accepted_htlc(tmpctx, htlc->id, htlc->msatoshi,
abs_locktime_to_blocks(&htlc->expiry), abs_locktime_to_blocks(&htlc->expiry),
@ -646,7 +645,7 @@ static void their_htlc_locked(const struct htlc *htlc, struct peer *peer)
rs->nextcase == ONION_FORWARD, rs->nextcase == ONION_FORWARD,
rs->hop_data.amt_forward, rs->hop_data.amt_forward,
rs->hop_data.outgoing_cltv, rs->hop_data.outgoing_cltv,
dummy_next_hop); &rs->hop_data.channel_id);
daemon_conn_send(&peer->master, take(msg)); daemon_conn_send(&peer->master, take(msg));
tal_free(tmpctx); tal_free(tmpctx);
return; return;

2
lightningd/channel/channel_wire.csv

@ -85,7 +85,7 @@ channel_accepted_htlc,0,next_onion,1366*u8
channel_accepted_htlc,0,forward,bool channel_accepted_htlc,0,forward,bool
channel_accepted_htlc,0,amt_to_forward,u64 channel_accepted_htlc,0,amt_to_forward,u64
channel_accepted_htlc,0,outgoing_cltv_value,u32 channel_accepted_htlc,0,outgoing_cltv_value,u32
channel_accepted_htlc,0,nexthop,20*u8 channel_accepted_htlc,0,next_channel,struct short_channel_id
# FIXME: Add code to commit current channel state! # FIXME: Add code to commit current channel state!

Can't render this file because it has a wrong number of fields in line 2.

32
lightningd/gossip/gossip.c

@ -633,6 +633,34 @@ static struct io_plan *gossip_init(struct daemon_conn *master,
return daemon_conn_read_next(master->conn, master); return daemon_conn_read_next(master->conn, master);
} }
static struct io_plan *resolve_channel_req(struct io_conn *conn,
struct daemon *daemon, const u8 *msg)
{
struct short_channel_id scid;
struct node_connection *nc;
u8 *reply;
struct pubkey *nullkey = talz(msg, struct pubkey);
if (!fromwire_gossip_resolve_channel_request(msg, NULL, &scid)) {
status_trace("Unable to parse resolver request");
reply = towire_gossip_resolve_channel_reply(msg, 1, nullkey,
nullkey);
} else {
status_trace("Attempting to resolce channel %d/%d/%d",
scid.blocknum, scid.txnum, scid.outnum);
nc = get_connection_by_scid(daemon->rstate, &scid, 0);
if (!nc) {
reply = towire_gossip_resolve_channel_reply(
msg, 2, nullkey, nullkey);
} else {
reply = towire_gossip_resolve_channel_reply(
msg, 0, &nc->src->id, &nc->dst->id);
}
}
daemon_conn_send(&daemon->master, reply);
return daemon_conn_read_next(conn, &daemon->master);
}
static struct io_plan *recv_req(struct io_conn *conn, struct daemon_conn *master) static struct io_plan *recv_req(struct io_conn *conn, struct daemon_conn *master)
{ {
struct daemon *daemon = container_of(master, struct daemon, master); struct daemon *daemon = container_of(master, struct daemon, master);
@ -662,11 +690,15 @@ static struct io_plan *recv_req(struct io_conn *conn, struct daemon_conn *master
case WIRE_GOSSIP_PING: case WIRE_GOSSIP_PING:
return ping_req(conn, daemon, daemon->master.msg_in); return ping_req(conn, daemon, daemon->master.msg_in);
case WIRE_GOSSIP_RESOLVE_CHANNEL_REQUEST:
return resolve_channel_req(conn, daemon, daemon->master.msg_in);
case WIRE_GOSSIPCTL_RELEASE_PEER_REPLY: case WIRE_GOSSIPCTL_RELEASE_PEER_REPLY:
case WIRE_GOSSIP_GETNODES_REPLY: case WIRE_GOSSIP_GETNODES_REPLY:
case WIRE_GOSSIP_GETROUTE_REPLY: case WIRE_GOSSIP_GETROUTE_REPLY:
case WIRE_GOSSIP_GETCHANNELS_REPLY: case WIRE_GOSSIP_GETCHANNELS_REPLY:
case WIRE_GOSSIP_PING_REPLY: case WIRE_GOSSIP_PING_REPLY:
case WIRE_GOSSIP_RESOLVE_CHANNEL_REPLY:
case WIRE_GOSSIPSTATUS_INIT_FAILED: case WIRE_GOSSIPSTATUS_INIT_FAILED:
case WIRE_GOSSIPSTATUS_BAD_NEW_PEER_REQUEST: case WIRE_GOSSIPSTATUS_BAD_NEW_PEER_REQUEST:
case WIRE_GOSSIPSTATUS_BAD_RELEASE_REQUEST: case WIRE_GOSSIPSTATUS_BAD_RELEASE_REQUEST:

8
lightningd/gossip/gossip_wire.csv

@ -91,3 +91,11 @@ gossip_ping,0,len,u16
gossip_ping_reply,108 gossip_ping_reply,108
gossip_ping_reply,0,totlen,u16 gossip_ping_reply,0,totlen,u16
# Given a short_channel_id, return the endpoints
gossip_resolve_channel_request,9
gossip_resolve_channel_request,0,channel_id,struct short_channel_id
gossip_resolve_channel_reply,109
gossip_resolve_channel_reply,0,error,u8
gossip_resolve_channel_reply,1,node_id_1,struct pubkey
gossip_resolve_channel_reply,34,node_id_2,struct pubkey

Can't render this file because it has a wrong number of fields in line 2.

2
lightningd/gossip_control.c

@ -147,12 +147,14 @@ static int gossip_msg(struct subd *gossip, const u8 *msg, const int *fds)
case WIRE_GOSSIP_GETROUTE_REQUEST: case WIRE_GOSSIP_GETROUTE_REQUEST:
case WIRE_GOSSIP_GETCHANNELS_REQUEST: case WIRE_GOSSIP_GETCHANNELS_REQUEST:
case WIRE_GOSSIP_PING: case WIRE_GOSSIP_PING:
case WIRE_GOSSIP_RESOLVE_CHANNEL_REQUEST:
/* This is a reply, so never gets through to here. */ /* This is a reply, so never gets through to here. */
case WIRE_GOSSIPCTL_RELEASE_PEER_REPLY: case WIRE_GOSSIPCTL_RELEASE_PEER_REPLY:
case WIRE_GOSSIP_GETNODES_REPLY: case WIRE_GOSSIP_GETNODES_REPLY:
case WIRE_GOSSIP_GETROUTE_REPLY: case WIRE_GOSSIP_GETROUTE_REPLY:
case WIRE_GOSSIP_GETCHANNELS_REPLY: case WIRE_GOSSIP_GETCHANNELS_REPLY:
case WIRE_GOSSIP_PING_REPLY: case WIRE_GOSSIP_PING_REPLY:
case WIRE_GOSSIP_RESOLVE_CHANNEL_REPLY:
break; break;
case WIRE_GOSSIPSTATUS_PEER_BAD_MSG: case WIRE_GOSSIPSTATUS_PEER_BAD_MSG:
peer_bad_message(gossip, msg); peer_bad_message(gossip, msg);

11
lightningd/htlc_end.h

@ -3,6 +3,7 @@
#include "config.h" #include "config.h"
#include <ccan/htable/htable_type.h> #include <ccan/htable/htable_type.h>
#include <ccan/short_types/short_types.h> #include <ccan/short_types/short_types.h>
#include <lightningd/sphinx.h>
/* A HTLC has a source and destination: if other is NULL, it's this node. /* A HTLC has a source and destination: if other is NULL, it's this node.
* *
@ -14,11 +15,19 @@ struct htlc_end {
enum htlc_end_type which_end; enum htlc_end_type which_end;
struct peer *peer; struct peer *peer;
u64 htlc_id; u64 htlc_id;
u64 msatoshis; u32 msatoshis;
struct htlc_end *other_end; struct htlc_end *other_end;
/* If this is driven by a command. */ /* If this is driven by a command. */
struct pay_command *pay_command; struct pay_command *pay_command;
/* Temporary information, while we resolve the next hop */
u8 next_onion[TOTAL_PACKET_SIZE];
struct short_channel_id next_channel;
u64 amt_to_forward;
u32 outgoing_cltv_value;
u32 cltv_expiry;
struct sha256 payment_hash;
}; };
static inline const struct htlc_end *keyof_htlc_end(const struct htlc_end *e) static inline const struct htlc_end *keyof_htlc_end(const struct htlc_end *e)

78
lightningd/peer_control.c

@ -875,14 +875,11 @@ fail:
tal_free(hend); tal_free(hend);
} }
static struct peer *peer_by_pkhash(struct lightningd *ld, const u8 pkhash[20]) static struct peer *peer_by_pubkey(struct lightningd *ld, const struct pubkey *id)
{ {
struct peer *peer; struct peer *peer;
u8 addr[20];
list_for_each(&ld->peers, peer, list) { list_for_each(&ld->peers, peer, list) {
pubkey_hash160(addr, peer->id); if (pubkey_cmp(id, peer->id) == 0)
if (memcmp(addr, pkhash, sizeof(addr)) == 0)
return peer; return peer;
} }
return NULL; return NULL;
@ -945,13 +942,13 @@ static void forward_htlc(struct htlc_end *hend,
const struct sha256 *payment_hash, const struct sha256 *payment_hash,
u64 amt_to_forward, u64 amt_to_forward,
u32 outgoing_cltv_value, u32 outgoing_cltv_value,
const u8 next_hop[20], const struct pubkey *next_hop,
const u8 next_onion[TOTAL_PACKET_SIZE]) const u8 next_onion[TOTAL_PACKET_SIZE])
{ {
u8 *err, *msg; u8 *err, *msg;
u64 fee; u64 fee;
struct lightningd *ld = hend->peer->ld; struct lightningd *ld = hend->peer->ld;
struct peer *next = peer_by_pkhash(ld, next_hop); struct peer *next = peer_by_pubkey(ld, next_hop);
if (!next) { if (!next) {
err = towire_unknown_next_peer(hend); err = towire_unknown_next_peer(hend);
@ -1035,44 +1032,63 @@ fail:
tal_free(hend); tal_free(hend);
} }
/* We received a resolver reply, which gives us the node_ids of the
* channel we want to forward over */
static bool channel_resolve_reply(struct subd *gossip, const u8 *msg,
const int *fds, struct htlc_end *hend)
{
struct pubkey node_id_1, node_id_2, peer_id;
u8 error;
fromwire_gossip_resolve_channel_reply(msg, NULL, &error, &node_id_1,
&node_id_2);
/* Get the other peer matching the id that is not us */
if (pubkey_cmp(&node_id_1, &gossip->ld->dstate.id) == 0) {
peer_id = node_id_2;
} else {
peer_id = node_id_1;
}
forward_htlc(hend, hend->cltv_expiry, &hend->payment_hash,
hend->amt_to_forward, hend->outgoing_cltv_value, &peer_id,
hend->next_onion);
/* FIXME(cdecker) Cleanup things we stuffed into hend before (maybe?) */
return true;
}
static int peer_accepted_htlc(struct peer *peer, const u8 *msg) static int peer_accepted_htlc(struct peer *peer, const u8 *msg)
{ {
u64 id;
u32 cltv_expiry, amount_msat;
struct sha256 payment_hash;
u8 next_onion[TOTAL_PACKET_SIZE];
u8 next_hop[20];
bool forward; bool forward;
u64 amt_to_forward;
u32 outgoing_cltv_value;
struct htlc_end *hend; struct htlc_end *hend;
u8 *req;
if (!fromwire_channel_accepted_htlc(msg, NULL, &id, &amount_msat,
&cltv_expiry, &payment_hash, hend = tal(msg, struct htlc_end);
next_onion, &forward, if (!fromwire_channel_accepted_htlc(msg, NULL,
&amt_to_forward, &hend->htlc_id, &hend->msatoshis,
&outgoing_cltv_value, &hend->cltv_expiry, &hend->payment_hash,
next_hop)) { hend->next_onion, &forward,
&hend->amt_to_forward,
&hend->outgoing_cltv_value,
&hend->next_channel)) {
log_broken(peer->log, "bad fromwire_channel_accepted_htlc %s", log_broken(peer->log, "bad fromwire_channel_accepted_htlc %s",
tal_hex(peer, msg)); tal_hex(peer, msg));
return -1; return -1;
} }
hend = tal(peer, struct htlc_end); tal_steal(peer, hend);
hend->which_end = HTLC_SRC; hend->which_end = HTLC_SRC;
hend->peer = peer; hend->peer = peer;
hend->htlc_id = id;
hend->other_end = NULL; hend->other_end = NULL;
hend->pay_command = NULL; hend->pay_command = NULL;
hend->msatoshis = amount_msat;
if (forward) if (forward) {
forward_htlc(hend, cltv_expiry, &payment_hash, req = towire_gossip_resolve_channel_request(msg, &hend->next_channel);
amt_to_forward, outgoing_cltv_value, log_broken(peer->log, "Asking gossip to resolve channel %d/%d/%d", hend->next_channel.blocknum, hend->next_channel.txnum, hend->next_channel.outnum);
next_hop, next_onion); subd_req(hend, peer->ld->gossip, req, -1, 0, channel_resolve_reply, hend);
else /* FIXME(cdecker) Stuff all this info into hend */
handle_localpay(hend, cltv_expiry, &payment_hash, } else
amt_to_forward, outgoing_cltv_value); handle_localpay(hend, hend->cltv_expiry, &hend->payment_hash,
hend->amt_to_forward, hend->outgoing_cltv_value);
return 0; return 0;
} }

Loading…
Cancel
Save