Browse Source

json_connect: separate port arg so we can parse IPv6 addresses.

Fixes: #391
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
ppa-0.6.1
Rusty Russell 7 years ago
committed by Christian Decker
parent
commit
af5dbbc9f8
  1. 4
      README.md
  2. 36
      lightningd/peer_control.c
  3. 87
      tests/test_lightningd.py

4
README.md

@ -76,10 +76,10 @@ Eventually `lightningd` will include its own wallet making this transfer easier,
If you don't have any testcoins you can get a few from a faucet such as [TPs' testnet faucet](http://tpfaucet.appspot.com/) or [Kiwi's testnet faucet](https://testnet.manu.backend.hamburg/faucet).
Once `lightningd` has funds, we can connect to a node and open a channel.
Let's assume the remote node is accepting connections at `<ip>:<port>` and has the node ID `<node_id>`:
Let's assume the remote node is accepting connections at `<ip>` (and optional `<port>`, if not 9735) and has the node ID `<node_id>`:
```
cli/lightning-cli connect <node_id> <ip>:<port>
cli/lightning-cli connect <node_id> <ip> [<port>]
cli/lightning-cli fundchannel <node_id> <amount>
```

36
lightningd/peer_control.c

@ -738,15 +738,16 @@ struct peer *peer_by_id(struct lightningd *ld, const struct pubkey *id)
static void json_connect(struct command *cmd,
const char *buffer, const jsmntok_t *params)
{
jsmntok_t *hosttok, *idtok;
jsmntok_t *hosttok, *porttok, *idtok;
struct pubkey id;
const char *name, *port, *colon;
const char *name;
struct wireaddr addr;
u8 *msg;
if (!json_get_params(buffer, params,
"id", &idtok,
"?host", &hosttok,
"?port", &porttok,
NULL)) {
command_fail(cmd, "Need id to connect");
return;
@ -759,22 +760,29 @@ static void json_connect(struct command *cmd,
return;
}
if (porttok && !hosttok) {
command_fail(cmd, "Can't specify port without host");
return;
}
if (hosttok) {
colon = memchr(buffer + hosttok->start, ':',
hosttok->end - hosttok->start);
if (colon) {
name = tal_strndup(cmd, buffer + hosttok->start,
colon - (buffer + hosttok->start));
port = tal_strndup(cmd, colon + 1,
(buffer + hosttok->end) - colon - 1);
name = tal_strndup(cmd, buffer + hosttok->start,
hosttok->end - hosttok->start);
if (porttok) {
u32 port;
if (!json_tok_number(buffer, porttok, &port)) {
command_fail(cmd, "port %.*s not valid",
porttok->end - porttok->start,
buffer + porttok->start);
return;
}
addr.port = port;
} else {
name = tal_strndup(cmd, buffer + hosttok->start,
hosttok->end - hosttok->start);
port = tal_strdup(cmd, stringify(DEFAULT_PORT));
addr.port = DEFAULT_PORT;
}
addr.port = atoi(port);
if (!parse_wireaddr(name, &addr, addr.port) || !addr.port) {
command_fail(cmd, "host %s:%s not valid", name, port);
command_fail(cmd, "host %s:%u not valid",
name, addr.port);
return;
}

87
tests/test_lightningd.py

@ -217,7 +217,7 @@ class LightningDTests(BaseLightningDTests):
def connect(self):
l1 = self.node_factory.get_node()
l2 = self.node_factory.get_node()
ret = l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
assert ret['id'] == l2.info['id']
@ -262,7 +262,8 @@ class LightningDTests(BaseLightningDTests):
for i in range(len(nodes)-1):
nodes[i].rpc.connect(
nodes[i+1].info['id'],
'localhost:{}'.format(nodes[i+1].info['port'])
'localhost',
nodes[i+1].info['port']
)
self.fund_channel(nodes[i], nodes[i+1], 10**6)
@ -701,7 +702,7 @@ class LightningDTests(BaseLightningDTests):
# l1 asks for a too-long locktime
l1 = self.node_factory.get_node(options=['--locktime-blocks=100'])
l2 = self.node_factory.get_node(options=['--max-locktime-blocks=99'])
ret = l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
assert ret['id'] == l2.info['id']
@ -792,7 +793,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(disconnect=disconnects)
l2 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
# Like fundchannel, but we'll probably fail before CHANNELD_NORMAL.
addr = l1.rpc.newaddr()['address']
@ -834,7 +835,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(disconnect=disconnects)
l2 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
self.fund_channel(l1, l2, 10**6)
# Must be dust!
@ -887,7 +888,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(disconnect=disconnects)
l2 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
self.fund_channel(l1, l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['rhash']
@ -944,8 +945,8 @@ class LightningDTests(BaseLightningDTests):
l3 = self.node_factory.get_node()
# l2 connects to both, so l1 can't reconnect and thus l2 drops to chain
l2.rpc.connect(l1.info['id'], 'localhost:{}'.format(l1.info['port']))
l2.rpc.connect(l3.info['id'], 'localhost:{}'.format(l3.info['port']))
l2.rpc.connect(l1.info['id'], 'localhost', l1.info['port'])
l2.rpc.connect(l3.info['id'], 'localhost', l3.info['port'])
self.fund_channel(l2, l1, 10**6)
self.fund_channel(l2, l3, 10**6)
@ -1017,7 +1018,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'], may_fail=True)
l2 = self.node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'])
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
self.fund_channel(l1, l2, 10**6)
# Now, this will get stuck due to l1 commit being disabled..
@ -1076,7 +1077,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit'], may_fail=True)
l2 = self.node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit'])
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
self.fund_channel(l1, l2, 10**6)
# Move some across to l2.
@ -1139,7 +1140,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node()
l2 = self.node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
self.fund_channel(l1, l2, 10**6)
# This will fail at l2's end.
@ -1175,7 +1176,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node()
l2 = self.node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
self.fund_channel(l1, l2, 10**6)
# This will fail at l2's end.
@ -1217,7 +1218,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node()
l2 = self.node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
self.fund_channel(l2, l1, 10**6)
# This will fail at l2's end.
@ -1329,11 +1330,11 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(disconnect=disconnects)
l2 = self.node_factory.get_node()
l3 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
l1.openchannel(l2, 20000)
# Now open new channels and everybody should sync
l2.rpc.connect(l3.info['id'], 'localhost:{}'.format(l3.info['port']))
l2.rpc.connect(l3.info['id'], 'localhost', l3.info['port'])
l2.openchannel(l3, 20000)
# Settle the gossip
@ -1345,8 +1346,8 @@ class LightningDTests(BaseLightningDTests):
l2 = self.node_factory.get_node()
l3 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l3.info['id'], 'localhost:{}'.format(l3.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
l1.rpc.connect(l3.info['id'], 'localhost', l3.info['port'])
self.fund_channel(l1, l2, 10**6)
self.fund_channel(l1, l3, 10**6)
@ -1358,7 +1359,7 @@ class LightningDTests(BaseLightningDTests):
for i in range(len(nodes)-1):
src, dst = nodes[i], nodes[i+1]
src.rpc.connect(dst.info['id'], 'localhost:{}'.format(dst.info['port']))
src.rpc.connect(dst.info['id'], 'localhost', dst.info['port'])
src.openchannel(dst, 20000)
# Allow announce messages.
@ -1397,7 +1398,7 @@ class LightningDTests(BaseLightningDTests):
# Connect 1 -> 2 -> 3.
l1,l2 = self.connect()
l3 = self.node_factory.get_node()
ret = l2.rpc.connect(l3.info['id'], 'localhost:{}'.format(l3.info['port']))
ret = l2.rpc.connect(l3.info['id'], 'localhost', l3.info['port'])
assert ret['id'] == l3.info['id']
@ -1489,13 +1490,13 @@ class LightningDTests(BaseLightningDTests):
l2 = self.node_factory.get_node(options=['--cltv-delta=20', '--fee-base=200', '--fee-per-satoshi=2000'])
l3 = self.node_factory.get_node(options=['--cltv-delta=30', '--cltv-final=9', '--fee-base=300', '--fee-per-satoshi=3000'])
ret = l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
assert ret['id'] == l2.info['id']
l1.daemon.wait_for_log('WIRE_GOSSIPCTL_HANDLE_PEER')
l2.daemon.wait_for_log('WIRE_GOSSIPCTL_HANDLE_PEER')
ret = l2.rpc.connect(l3.info['id'], 'localhost:{}'.format(l3.info['port']))
ret = l2.rpc.connect(l3.info['id'], 'localhost', l3.info['port'])
assert ret['id'] == l3.info['id']
l2.daemon.wait_for_log('WIRE_GOSSIPCTL_HANDLE_PEER')
@ -1588,13 +1589,13 @@ class LightningDTests(BaseLightningDTests):
l2 = self.node_factory.get_node(options=['--cltv-delta=20', '--fee-base=200', '--fee-per-satoshi=2000'])
l3 = self.node_factory.get_node(options=['--cltv-delta=30', '--cltv-final=9', '--fee-base=300', '--fee-per-satoshi=3000'])
ret = l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
assert ret['id'] == l2.info['id']
l1.daemon.wait_for_log('WIRE_GOSSIPCTL_HANDLE_PEER')
l2.daemon.wait_for_log('WIRE_GOSSIPCTL_HANDLE_PEER')
ret = l2.rpc.connect(l3.info['id'], 'localhost:{}'.format(l3.info['port']))
ret = l2.rpc.connect(l3.info['id'], 'localhost', l3.info['port'])
assert ret['id'] == l3.info['id']
l2.daemon.wait_for_log('WIRE_GOSSIPCTL_HANDLE_PEER')
@ -1642,7 +1643,7 @@ class LightningDTests(BaseLightningDTests):
options=['--no-reconnect'])
l2 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
chanid = self.fund_channel(l1, l2, 10**6)
# Wait for route propagation.
@ -1682,7 +1683,7 @@ class LightningDTests(BaseLightningDTests):
options=['--no-reconnect'])
l2 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
chanid = self.fund_channel(l1, l2, 10**6)
# Wait for route propagation.
@ -1721,7 +1722,7 @@ class LightningDTests(BaseLightningDTests):
'+WIRE_INIT']
l1 = self.node_factory.get_node(disconnect=disconnects)
l2 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
# Should have 3 connect fails.
for d in disconnects:
@ -1745,7 +1746,7 @@ class LightningDTests(BaseLightningDTests):
l1.rpc.addfunds(tx)
for d in disconnects:
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
self.assertRaises(ValueError, l1.rpc.fundchannel, l2.info['id'], 20000)
assert l1.rpc.getpeer(l2.info['id']) == None
@ -1764,7 +1765,7 @@ class LightningDTests(BaseLightningDTests):
l1.rpc.addfunds(tx)
for d in disconnects:
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
self.assertRaises(ValueError, l1.rpc.fundchannel, l2.info['id'], 20000)
assert l1.rpc.getpeer(l2.info['id']) == None
@ -1781,7 +1782,7 @@ class LightningDTests(BaseLightningDTests):
tx = l1.bitcoin.rpc.getrawtransaction(txid)
l1.rpc.addfunds(tx)
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
self.assertRaises(ValueError, l1.rpc.fundchannel, l2.info['id'], 20000)
# Fundee remembers, funder doesn't.
@ -1800,7 +1801,7 @@ class LightningDTests(BaseLightningDTests):
tx = l1.bitcoin.rpc.getrawtransaction(txid)
l1.rpc.addfunds(tx)
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
l1.rpc.fundchannel(l2.info['id'], 20000)
# They haven't forgotten each other.
@ -1824,7 +1825,7 @@ class LightningDTests(BaseLightningDTests):
disconnects = ['0WIRE_ACCEPT_CHANNEL']
l1 = self.node_factory.get_node()
l2 = self.node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
addr = l1.rpc.newaddr()['address']
txid = l1.bitcoin.rpc.sendtoaddress(addr, 20000 / 10**6)
@ -1836,7 +1837,7 @@ class LightningDTests(BaseLightningDTests):
assert l1.rpc.getpeer(l2.info['id']) == None
# Reconnect.
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
# We should get a message about reconnecting.
l2.daemon.wait_for_log('Peer has reconnected, state OPENINGD')
@ -1856,7 +1857,7 @@ class LightningDTests(BaseLightningDTests):
'+WIRE_FUNDING_LOCKED']
l1 = self.node_factory.get_node(disconnect=disconnects)
l2 = self.node_factory.get_node()
ret = l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
self.fund_channel(l1, l2, 10**6)
@ -1869,7 +1870,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(disconnect=disconnects)
l2 = self.node_factory.get_node()
ret = l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
self.fund_channel(l1, l2, 10**6)
@ -1897,7 +1898,7 @@ class LightningDTests(BaseLightningDTests):
'+WIRE_REVOKE_AND_ACK']
l1 = self.node_factory.get_node(disconnect=disconnects)
l2 = self.node_factory.get_node()
ret = l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
self.fund_channel(l1, l2, 10**6)
@ -1923,7 +1924,7 @@ class LightningDTests(BaseLightningDTests):
'+WIRE_REVOKE_AND_ACK']
l1 = self.node_factory.get_node()
l2 = self.node_factory.get_node(disconnect=disconnects)
ret = l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
self.fund_channel(l1, l2, 10**6)
@ -1953,7 +1954,7 @@ class LightningDTests(BaseLightningDTests):
'+WIRE_REVOKE_AND_ACK']
l1 = self.node_factory.get_node()
l2 = self.node_factory.get_node(disconnect=disconnects)
ret = l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
self.fund_channel(l1, l2, 10**6)
@ -1974,7 +1975,7 @@ class LightningDTests(BaseLightningDTests):
'+WIRE_SHUTDOWN']
l1 = self.node_factory.get_node(disconnect=disconnects)
l2 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
self.fund_channel(l1, l2, 10**6)
self.pay(l1,l2,200000000)
@ -2002,7 +2003,7 @@ class LightningDTests(BaseLightningDTests):
'+WIRE_CLOSING_SIGNED']
l1 = self.node_factory.get_node(disconnect=disconnects)
l2 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
self.fund_channel(l1, l2, 10**6)
self.pay(l1,l2,200000000)
@ -2123,7 +2124,7 @@ class LightningDTests(BaseLightningDTests):
# check that HTLCs reloaded from the DB work.
l1 = self.node_factory.get_node()
l2 = self.node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'])
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
# Neither node should have a channel open, they are just connected
for n in (l1, l2):
@ -2181,8 +2182,8 @@ class LightningDTests(BaseLightningDTests):
l3 = self.node_factory.get_node()
# l2 connects to both, so l1 can't reconnect and thus l2 drops to chain
l2.rpc.connect(l1.info['id'], 'localhost:{}'.format(l1.info['port']))
l2.rpc.connect(l3.info['id'], 'localhost:{}'.format(l3.info['port']))
l2.rpc.connect(l1.info['id'], 'localhost', l1.info['port'])
l2.rpc.connect(l3.info['id'], 'localhost', l3.info['port'])
self.fund_channel(l2, l1, 10**6)
self.fund_channel(l2, l3, 10**6)
@ -2300,7 +2301,7 @@ class LightningDTests(BaseLightningDTests):
disconnects = ['+WIRE_COMMITMENT_SIGNED']
l1 = self.node_factory.get_node(disconnect=disconnects)
l2 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
self.fund_channel(l1, l2, 10**6)

Loading…
Cancel
Save