Browse Source

JSONRPC: listpeers multiple channels, output improvements.

We're going to have to support multiple channels per peer, even if only
when some are onchain.  This would break the current listpeers, so
change it to an array (single element for now).

Other cleanups:

1. Only set connected true if daemon is not onchaind.
2. Only show netaddr if connected; don't make it an array, call it `address`
   in comparison with `addresses` in listnodes.
3. Rename `channel` to `short_channel_id`
4. Add `funding_txid` field for voyeurism.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
ppa-0.6.1
Rusty Russell 7 years ago
parent
commit
c8c68cfb7c
  1. 36
      lightningd/peer_control.c
  2. 24
      tests/test_lightningd.py

36
lightningd/peer_control.c

@ -813,29 +813,47 @@ static void gossipd_getpeers_complete(struct subd *gossip, const u8 *msg,
json_object_start(response, NULL);
json_array_start(response, "peers");
list_for_each(&gpa->cmd->ld->peers, p, list) {
bool connected;
if (gpa->specific_id && !pubkey_eq(gpa->specific_id, &p->id))
continue;
json_object_start(response, NULL);
json_add_string(response, "state", peer_state_name(p->state));
json_array_start(response, "netaddr");
if (p->addr.type != ADDR_TYPE_PADDING)
json_add_string(response, NULL,
type_to_string(response, struct wireaddr,
&p->addr));
json_array_end(response);
json_add_pubkey(response, "id", &p->id);
json_add_bool(response, "connected", p->owner != NULL);
connected = (p->owner != NULL && !peer_state_on_chain(p->state));
json_add_bool(response, "connected", connected);
if (connected) {
json_array_start(response, "netaddr");
if (p->addr.type != ADDR_TYPE_PADDING)
json_add_string(response, NULL,
type_to_string(response,
struct wireaddr,
&p->addr));
json_array_end(response);
}
/* FIXME: We only support one channel per peer, but API must
* support multiple already! */
json_array_start(response, "channels");
json_object_start(response, NULL);
json_add_string(response, "state", peer_state_name(p->state));
if (p->owner)
json_add_string(response, "owner", p->owner->name);
if (p->scid)
json_add_short_channel_id(response, "channel", p->scid);
json_add_short_channel_id(response,
"short_channel_id", p->scid);
if (p->funding_txid)
json_add_txid(response,
"funding_txid", p->funding_txid);
if (p->our_msatoshi) {
json_add_u64(response, "msatoshi_to_us",
*p->our_msatoshi);
json_add_u64(response, "msatoshi_total",
p->funding_satoshi * 1000);
}
json_object_end(response);
json_array_end(response);
if (gpa->ll) {
struct log_info info;

24
tests/test_lightningd.py

@ -396,8 +396,8 @@ class LightningDTests(BaseLightningDTests):
self.fund_channel(l1, l2, 10**6)
p1 = l1.rpc.getpeer(l2.info['id'], 'info')
p2 = l2.rpc.getpeer(l1.info['id'], 'info')
p1 = l1.rpc.getpeer(l2.info['id'], 'info')['channels'][0]
p2 = l2.rpc.getpeer(l1.info['id'], 'info')['channels'][0]
assert p1['msatoshi_to_us'] == 10**6 * 1000
assert p1['msatoshi_total'] == 10**6 * 1000
assert p2['msatoshi_to_us'] == 0
@ -1637,10 +1637,10 @@ class LightningDTests(BaseLightningDTests):
# If they're at different block heights we can get spurious errors.
sync_blockheight([l1, l2, l3])
chanid1 = l1.rpc.getpeer(l2.info['id'])['channel']
chanid2 = l2.rpc.getpeer(l3.info['id'])['channel']
assert l2.rpc.getpeer(l1.info['id'])['channel'] == chanid1
assert l3.rpc.getpeer(l2.info['id'])['channel'] == chanid2
chanid1 = l1.rpc.getpeer(l2.info['id'])['channels'][0]['short_channel_id']
chanid2 = l2.rpc.getpeer(l3.info['id'])['channels'][0]['short_channel_id']
assert l2.rpc.getpeer(l1.info['id'])['channels'][0]['short_channel_id'] == chanid1
assert l3.rpc.getpeer(l2.info['id'])['channels'][0]['short_channel_id'] == chanid2
rhash = l3.rpc.invoice(100000000, 'testpayment1', 'desc')['payment_hash']
assert l3.rpc.listinvoices('testpayment1')['invoices'][0]['status'] == 'unpaid'
@ -2505,7 +2505,7 @@ class LightningDTests(BaseLightningDTests):
self.fund_channel(l1, l2, 100000)
peers = l1.rpc.listpeers()['peers']
assert(len(peers) == 1 and peers[0]['state'] == 'CHANNELD_NORMAL')
assert(len(peers) == 1 and peers[0]['channels'][0]['state'] == 'CHANNELD_NORMAL')
# Both nodes should now have exactly one channel in the database
for n in (l1, l2):
@ -2525,14 +2525,14 @@ class LightningDTests(BaseLightningDTests):
print(" ".join(l2.daemon.cmd_line + ['--dev-debugger=channeld']))
# Wait for l1 to notice
wait_for(lambda: not l1.rpc.listpeers()['peers'][0]['connected'])
wait_for(lambda: not 'connected' in l1.rpc.listpeers()['peers'][0]['channels'][0])
# Now restart l1 and it should reload peers/channels from the DB
l2.daemon.start()
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 1)
# Wait for the restored HTLC to finish
wait_for(lambda: l1.rpc.listpeers()['peers'][0]['msatoshi_to_us'] == 99990000, interval=1)
wait_for(lambda: l1.rpc.listpeers()['peers'][0]['channels'][0]['msatoshi_to_us'] == 99990000, interval=1)
wait_for(lambda: len([p for p in l1.rpc.listpeers()['peers'] if p['connected']]), interval=1)
wait_for(lambda: len([p for p in l2.rpc.listpeers()['peers'] if p['connected']]), interval=1)
@ -2540,13 +2540,13 @@ class LightningDTests(BaseLightningDTests):
# Now make sure this is really functional by sending a payment
self.pay(l1, l2, 10000)
time.sleep(1)
assert l1.rpc.listpeers()['peers'][0]['msatoshi_to_us'] == 99980000
assert l2.rpc.listpeers()['peers'][0]['msatoshi_to_us'] == 20000
assert l1.rpc.listpeers()['peers'][0]['channels'][0]['msatoshi_to_us'] == 99980000
assert l2.rpc.listpeers()['peers'][0]['channels'][0]['msatoshi_to_us'] == 20000
# Finally restart l1, and make sure it remembers
l1.stop()
l1.daemon.start()
assert l1.rpc.listpeers()['peers'][0]['msatoshi_to_us'] == 99980000
assert l1.rpc.listpeers()['peers'][0]['channels'][0]['msatoshi_to_us'] == 99980000
# Now make sure l1 is watching for unilateral closes
l2.rpc.dev_fail(l1.info['id']);

Loading…
Cancel
Save