From 5698a133c2b5d38ea287f4604cf089b9a53ac1eb Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 17 Jan 2018 10:23:20 +1030 Subject: [PATCH] JSONRPC: rename getpeers to listpeers. Signed-off-by: Rusty Russell --- contrib/pylightning/lightning/lightning.py | 4 +-- lightningd/peer_control.c | 10 +++--- tests/test_lightningd.py | 42 +++++++++++----------- 3 files changed, 28 insertions(+), 28 deletions(-) diff --git a/contrib/pylightning/lightning/lightning.py b/contrib/pylightning/lightning/lightning.py index d7c56f045..6697069e3 100644 --- a/contrib/pylightning/lightning/lightning.py +++ b/contrib/pylightning/lightning/lightning.py @@ -79,9 +79,9 @@ class LightningRpc(UnixDomainSocketRpc): """Get info about a specific peer, optionally with its log. """ if log_level: - peers = self.getpeers(log_level)['peers'] + peers = self.listpeers(log_level)['peers'] else: - peers = self.getpeers()['peers'] + peers = self.listpeers()['peers'] for p in peers: if p['peerid'] == peer_id: return p diff --git a/lightningd/peer_control.c b/lightningd/peer_control.c index 55dcd28e4..f4d9a4588 100644 --- a/lightningd/peer_control.c +++ b/lightningd/peer_control.c @@ -869,7 +869,7 @@ static void gossipd_getpeers_complete(struct subd *gossip, const u8 *msg, command_success(gpa->cmd, response); } -static void json_getpeers(struct command *cmd, +static void json_listpeers(struct command *cmd, const char *buffer, const jsmntok_t *params) { jsmntok_t *leveltok; @@ -905,13 +905,13 @@ static void json_getpeers(struct command *cmd, command_still_pending(cmd); } -static const struct json_command getpeers_command = { - "getpeers", - json_getpeers, +static const struct json_command listpeers_command = { + "listpeers", + json_listpeers, "List the current peers, if {level} is set, include {log}s", "Returns a 'peers' array" }; -AUTODATA(json_command, &getpeers_command); +AUTODATA(json_command, &listpeers_command); struct peer *peer_from_json(struct lightningd *ld, const char *buffer, diff --git a/tests/test_lightningd.py b/tests/test_lightningd.py index a23affb40..edf428d77 100644 --- a/tests/test_lightningd.py +++ b/tests/test_lightningd.py @@ -385,8 +385,8 @@ class LightningDTests(BaseLightningDTests): assert ret['id'] == l1.info['id'] # Should still only have one peer! - assert len(l1.rpc.getpeers()) == 1 - assert len(l2.rpc.getpeers()) == 1 + assert len(l1.rpc.listpeers()) == 1 + assert len(l2.rpc.listpeers()) == 1 def test_balance(self): l1,l2 = self.connect() @@ -1979,8 +1979,8 @@ class LightningDTests(BaseLightningDTests): .format(l2.info['id'])) # Should still only have one peer! - assert len(l1.rpc.getpeers()) == 1 - assert len(l2.rpc.getpeers()) == 1 + assert len(l1.rpc.listpeers()) == 1 + assert len(l2.rpc.listpeers()) == 1 @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1") def test_disconnect_funder(self): @@ -2008,8 +2008,8 @@ class LightningDTests(BaseLightningDTests): l1.rpc.fundchannel(l2.info['id'], 20000) # Should still only have one peer! - assert len(l1.rpc.getpeers()) == 1 - assert len(l2.rpc.getpeers()) == 1 + assert len(l1.rpc.listpeers()) == 1 + assert len(l2.rpc.listpeers()) == 1 @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1") def test_disconnect_fundee(self): @@ -2035,8 +2035,8 @@ class LightningDTests(BaseLightningDTests): l1.rpc.fundchannel(l2.info['id'], 20000) # Should still only have one peer! - assert len(l1.rpc.getpeers()) == 1 - assert len(l2.rpc.getpeers()) == 1 + assert len(l1.rpc.listpeers()) == 1 + assert len(l2.rpc.listpeers()) == 1 @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1") def test_disconnect_half_signed(self): @@ -2460,8 +2460,8 @@ class LightningDTests(BaseLightningDTests): # Fail because l1 dislikes l2's huge locktime. self.assertRaisesRegex(ValueError, r'to_self_delay \d+ larger than \d+', l1.rpc.fundchannel, l2.info['id'], int(funds/10)) - assert l1.rpc.getpeers()['peers'][0]['connected'] - assert l2.rpc.getpeers()['peers'][0]['connected'] + assert l1.rpc.listpeers()['peers'][0]['connected'] + assert l2.rpc.listpeers()['peers'][0]['connected'] # Restart l2 without ridiculous locktime. l2.daemon.cmd_line.remove('--locktime-blocks={}'.format(max_locktime + 1)) @@ -2473,8 +2473,8 @@ class LightningDTests(BaseLightningDTests): l1.rpc.fundchannel, l2.info['id'], funds) # Should still be connected. - assert l1.rpc.getpeers()['peers'][0]['connected'] - assert l2.rpc.getpeers()['peers'][0]['connected'] + assert l1.rpc.listpeers()['peers'][0]['connected'] + assert l2.rpc.listpeers()['peers'][0]['connected'] # This works. l1.rpc.fundchannel(l2.info['id'], int(funds/10)) @@ -2541,7 +2541,7 @@ class LightningDTests(BaseLightningDTests): self.fund_channel(l1, l2, 100000) - peers = l1.rpc.getpeers()['peers'] + peers = l1.rpc.listpeers()['peers'] assert(len(peers) == 1 and peers[0]['state'] == 'CHANNELD_NORMAL') # Both nodes should now have exactly one channel in the database @@ -2562,28 +2562,28 @@ class LightningDTests(BaseLightningDTests): print(" ".join(l2.daemon.cmd_line + ['--dev-debugger=channeld'])) # Wait for l1 to notice - wait_for(lambda: not l1.rpc.getpeers()['peers'][0]['connected']) + wait_for(lambda: not l1.rpc.listpeers()['peers'][0]['connected']) # Now restart l1 and it should reload peers/channels from the DB l2.daemon.start() - wait_for(lambda: len(l2.rpc.getpeers()['peers']) == 1) + wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 1) # Wait for the restored HTLC to finish - wait_for(lambda: l1.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 99990000, interval=1) + wait_for(lambda: l1.rpc.listpeers()['peers'][0]['msatoshi_to_us'] == 99990000, interval=1) - wait_for(lambda: len([p for p in l1.rpc.getpeers()['peers'] if p['connected']]), interval=1) - wait_for(lambda: len([p for p in l2.rpc.getpeers()['peers'] if p['connected']]), interval=1) + wait_for(lambda: len([p for p in l1.rpc.listpeers()['peers'] if p['connected']]), interval=1) + wait_for(lambda: len([p for p in l2.rpc.listpeers()['peers'] if p['connected']]), interval=1) # Now make sure this is really functional by sending a payment self.pay(l1, l2, 10000) time.sleep(1) - assert l1.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 99980000 - assert l2.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 20000 + assert l1.rpc.listpeers()['peers'][0]['msatoshi_to_us'] == 99980000 + assert l2.rpc.listpeers()['peers'][0]['msatoshi_to_us'] == 20000 # Finally restart l1, and make sure it remembers l1.stop() l1.daemon.start() - assert l1.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 99980000 + assert l1.rpc.listpeers()['peers'][0]['msatoshi_to_us'] == 99980000 # Now make sure l1 is watching for unilateral closes l2.rpc.dev_fail(l1.info['id']);