Browse Source

logging: remove spaces from subsystem names.

Spaces just make life a little harder for everyone.

(Plus, fix documentation: it's 'jsonrpc' not 'json' subsystem).

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
travis-debug
Rusty Russell 5 years ago
parent
commit
709c98f539
  1. 2
      contrib/pyln-testing/pyln/testing/utils.py
  2. 14
      doc/lightningd-config.5
  3. 12
      doc/lightningd-config.5.md
  4. 2
      lightningd/channel.c
  5. 2
      lightningd/jsonrpc.c
  6. 2
      lightningd/opening_control.c
  7. 2
      tests/test_closing.py
  8. 2
      tests/test_connection.py
  9. 2
      tests/test_gossip.py
  10. 2
      tests/test_misc.py
  11. 2
      tests/test_plugin.py

2
contrib/pyln-testing/pyln/testing/utils.py

@ -1023,7 +1023,7 @@ class NodeFactory(object):
# getpeers.
if not fundchannel:
for src, dst in connections:
dst.daemon.wait_for_log(r'{}-.*openingd-chan #[0-9]*: Handed peer, entering loop'.format(src.info['id']))
dst.daemon.wait_for_log(r'{}-.*openingd-chan#[0-9]*: Handed peer, entering loop'.format(src.info['id']))
return nodes
# If we got here, we want to fund channels

14
doc/lightningd-config.5

@ -143,6 +143,8 @@ for any subsystem containing that string\. Subsystems include:
\fIgossipd\fR: The gossip daemon
.IP \[bu]
\fIconnectd\fR: The network connection daemon
.IP \[bu]
\fIjsonrpc#FD\fR: Each JSONRPC connection, FD = file descriptor number
.RE
@ -151,20 +153,18 @@ internal integer id assigned for the lifetime of the channel:
.RS
.IP \[bu]
\fIopeningd-chan #N\fR: Each opening / idling daemon
.IP \[bu]
\fIchanneld-chan #N\fR: Each channel management daemon
\fIopeningd-chan#N\fR: Each opening / idling daemon
.IP \[bu]
\fIclosingd-chan #N\fR: Each closing negotiation daemon
\fIchanneld-chan#N\fR: Each channel management daemon
.IP \[bu]
\fIonchaind-chan #N\fR: Each onchain close handling daemon
\fIclosingd-chan#N\fR: Each closing negotiation daemon
.IP \[bu]
\fIjson #FD\fR: Each JSONRPC connection, FD = file descriptor number
\fIonchaind-chan#N\fR: Each onchain close handling daemon
.RE
So, \fBlog-level=debug:plugin\fR would set debug level logging on all
plugins and the plugin manager\. \fBlog-level=io:chan #55\fR would set
plugins and the plugin manager\. \fBlog-level=io:chan#55\fR would set
IO logging on channel number 55 (or 550, for that matter)\.

12
doc/lightningd-config.5.md

@ -117,19 +117,19 @@ for any subsystem containing that string. Subsystems include:
* *hsmd*: The secret-holding daemon
* *gossipd*: The gossip daemon
* *connectd*: The network connection daemon
* *jsonrpc#FD*: Each JSONRPC connection, FD = file descriptor number
The following subsystems exist for each channel, where N is an incrementing
internal integer id assigned for the lifetime of the channel:
* *openingd-chan #N*: Each opening / idling daemon
* *channeld-chan #N*: Each channel management daemon
* *closingd-chan #N*: Each closing negotiation daemon
* *onchaind-chan #N*: Each onchain close handling daemon
* *json #FD*: Each JSONRPC connection, FD = file descriptor number
* *openingd-chan#N*: Each opening / idling daemon
* *channeld-chan#N*: Each channel management daemon
* *closingd-chan#N*: Each closing negotiation daemon
* *onchaind-chan#N*: Each onchain close handling daemon
So, **log-level=debug:plugin** would set debug level logging on all
plugins and the plugin manager. **log-level=io:chan #55** would set
plugins and the plugin manager. **log-level=io:chan#55** would set
IO logging on channel number 55 (or 550, for that matter).
**log-prefix**=*PREFIX*

2
lightningd/channel.c

@ -211,7 +211,7 @@ struct channel *new_channel(struct peer *peer, u64 dbid,
channel->log = new_log(channel,
peer->ld->log_book,
&channel->peer->id,
"chan #%"PRIu64,
"chan#%"PRIu64,
dbid);
} else
channel->log = tal_steal(channel, log);

2
lightningd/jsonrpc.c

@ -902,7 +902,7 @@ static struct io_plan *jcon_connected(struct io_conn *conn,
list_head_init(&jcon->commands);
/* We want to log on destruction, so we free this in destructor. */
jcon->log = new_log(ld->log_book, ld->log_book, NULL, "jsonrpc #%i",
jcon->log = new_log(ld->log_book, ld->log_book, NULL, "jsonrpc#%i",
io_conn_fd(conn));
tal_add_destructor(jcon, destroy_jcon);

2
lightningd/opening_control.c

@ -629,7 +629,7 @@ new_uncommitted_channel(struct peer *peer)
uc->dbid = wallet_get_channel_dbid(ld->wallet);
uc->log = new_log(uc, ld->log_book, &uc->peer->id,
"chan #%"PRIu64, uc->dbid);
"chan#%"PRIu64, uc->dbid);
uc->fc = NULL;
uc->our_config.id = 0;

2
tests/test_closing.py

@ -1459,7 +1459,7 @@ def test_permfail_htlc_out(node_factory, bitcoind, executor):
l2 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500))
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.daemon.wait_for_log('openingd-chan #1: Handed peer, entering loop'.format(l1.info['id']))
l2.daemon.wait_for_log('openingd-chan#1: Handed peer, entering loop'.format(l1.info['id']))
l2.fund_channel(l1, 10**6)
# This will fail at l2's end.

2
tests/test_connection.py

@ -373,7 +373,7 @@ def test_reconnect_openingd(node_factory):
l1.bitcoin.generate_block(3)
# Just to be sure, second openingd hand over to channeld. This log line is about channeld being started
l2.daemon.wait_for_log(r'channeld-chan #[0-9]: pid [0-9]+, msgfd [0-9]+')
l2.daemon.wait_for_log(r'channeld-chan#[0-9]: pid [0-9]+, msgfd [0-9]+')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")

2
tests/test_gossip.py

@ -354,7 +354,7 @@ def test_gossip_weirdalias(node_factory, bitcoind):
.format(normal_name))
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.daemon.wait_for_log('openingd-chan #1: Handed peer, entering loop')
l2.daemon.wait_for_log('openingd-chan#1: Handed peer, entering loop')
l2.fund_channel(l1, 10**6)
bitcoind.generate_block(6)

2
tests/test_misc.py

@ -1199,7 +1199,7 @@ def test_htlc_send_timeout(node_factory, bitcoind):
timedout = False
while not timedout:
try:
l2.daemon.wait_for_log(r'channeld-chan #[0-9]*: \[IN\] ', timeout=30)
l2.daemon.wait_for_log(r'channeld-chan#[0-9]*: \[IN\] ', timeout=30)
except TimeoutError:
timedout = True

2
tests/test_plugin.py

@ -263,7 +263,7 @@ def test_plugin_connected_hook(node_factory):
l1.daemon.wait_for_log(r"{} is in reject list".format(l3.info['id']))
# FIXME: this error occurs *after* connection, so we connect then drop.
l3.daemon.wait_for_log(r"openingd-chan #1: peer_in WIRE_ERROR")
l3.daemon.wait_for_log(r"openingd-chan#1: peer_in WIRE_ERROR")
l3.daemon.wait_for_log(r"You are in reject list")
def check_disconnect():

Loading…
Cancel
Save