diff --git a/lightningd/jsonrpc.c b/lightningd/jsonrpc.c index df70e08c7..fc4b83fd7 100644 --- a/lightningd/jsonrpc.c +++ b/lightningd/jsonrpc.c @@ -89,6 +89,10 @@ struct json_connection { /* How much has just been filled. */ size_t len_read; + /* JSON parsing state. */ + jsmn_parser input_parser; + jsmntok_t *input_toks; + /* Our commands */ struct list_head commands; @@ -912,9 +916,7 @@ static struct io_plan *stream_out_complete(struct io_conn *conn, static struct io_plan *read_json(struct io_conn *conn, struct json_connection *jcon) { - jsmntok_t *toks; bool complete; - jsmn_parser parser; if (jcon->len_read) log_io(jcon->log, LOG_IO_IN, NULL, "", @@ -931,9 +933,8 @@ static struct io_plan *read_json(struct io_conn *conn, return io_wait(conn, conn, read_json, jcon); } - toks = toks_alloc(jcon->buffer); - jsmn_init(&parser); - if (!json_parse_input(&parser, &toks, jcon->buffer, jcon->used, + if (!json_parse_input(&jcon->input_parser, &jcon->input_toks, + jcon->buffer, jcon->used, &complete)) { json_command_malformed(jcon, "null", "Invalid token in json input"); @@ -944,30 +945,36 @@ static struct io_plan *read_json(struct io_conn *conn, goto read_more; /* Empty buffer? (eg. just whitespace). */ - if (tal_count(toks) == 1) { + if (tal_count(jcon->input_toks) == 1) { jcon->used = 0; + + /* Reset parser. */ + jsmn_init(&jcon->input_parser); + toks_reset(jcon->input_toks); goto read_more; } - parse_request(jcon, toks); + parse_request(jcon, jcon->input_toks); /* Remove first {}. */ - memmove(jcon->buffer, jcon->buffer + toks[0].end, - tal_count(jcon->buffer) - toks[0].end); - jcon->used -= toks[0].end; + memmove(jcon->buffer, jcon->buffer + jcon->input_toks[0].end, + tal_count(jcon->buffer) - jcon->input_toks[0].end); + jcon->used -= jcon->input_toks[0].end; + + /* Reset parser. */ + jsmn_init(&jcon->input_parser); + toks_reset(jcon->input_toks); /* If we have more to process, try again. FIXME: this still gets * first priority in io_loop, so can starve others. Hack would be * a (non-zero) timer, but better would be to have io_loop avoid * such livelock */ if (jcon->used) { - tal_free(toks); jcon->len_read = 0; return io_always(conn, read_json, jcon); } read_more: - tal_free(toks); return io_read_partial(conn, jcon->buffer + jcon->used, tal_count(jcon->buffer) - jcon->used, &jcon->len_read, read_json, jcon); @@ -986,6 +993,8 @@ static struct io_plan *jcon_connected(struct io_conn *conn, jcon->buffer = tal_arr(jcon, char, 64); jcon->js_arr = tal_arr(jcon, struct json_stream *, 0); jcon->len_read = 0; + jsmn_init(&jcon->input_parser); + jcon->input_toks = toks_alloc(jcon); list_head_init(&jcon->commands); /* We want to log on destruction, so we free this in destructor. */ diff --git a/lightningd/plugin.c b/lightningd/plugin.c index 0ea8b92b2..a6db1b75a 100644 --- a/lightningd/plugin.c +++ b/lightningd/plugin.c @@ -386,22 +386,16 @@ static const char *plugin_read_json_one(struct plugin *plugin, bool *complete, bool *destroyed) { - jsmntok_t *toks; const jsmntok_t *jrtok, *idtok; struct plugin_destroyed *pd; const char *err; - jsmn_parser parser; *destroyed = false; /* Note that in the case of 'plugin stop' this can free request (since * plugin is parent), so detect that case */ - /* FIXME: This could be done more efficiently by storing the - * toks and doing an incremental parse, like lightning-cli - * does. */ - toks = toks_alloc(plugin); - jsmn_init(&parser); - if (!json_parse_input(&parser, &toks, plugin->buffer, plugin->used, + if (!json_parse_input(&plugin->parser, &plugin->toks, + plugin->buffer, plugin->used, complete)) { return tal_fmt(plugin, "Failed to parse JSON response '%.*s'", @@ -410,21 +404,21 @@ static const char *plugin_read_json_one(struct plugin *plugin, if (!*complete) { /* We need more. */ - tal_free(toks); return NULL; } /* Empty buffer? (eg. just whitespace). */ - if (tal_count(toks) == 1) { - tal_free(toks); + if (tal_count(plugin->toks) == 1) { plugin->used = 0; + jsmn_init(&plugin->parser); + toks_reset(plugin->toks); /* We need more. */ *complete = false; return NULL; } - jrtok = json_get_member(plugin->buffer, toks, "jsonrpc"); - idtok = json_get_member(plugin->buffer, toks, "id"); + jrtok = json_get_member(plugin->buffer, plugin->toks, "jsonrpc"); + idtok = json_get_member(plugin->buffer, plugin->toks, "id"); if (!jrtok) { return tal_fmt( @@ -445,7 +439,7 @@ static const char *plugin_read_json_one(struct plugin *plugin, * * https://www.jsonrpc.org/specification#notification */ - err = plugin_notification_handle(plugin, toks); + err = plugin_notification_handle(plugin, plugin->toks); } else { /* When a rpc call is made, the Server MUST reply with @@ -475,7 +469,7 @@ static const char *plugin_read_json_one(struct plugin *plugin, * * https://www.jsonrpc.org/specification#response_object */ - err = plugin_response_handle(plugin, toks, idtok); + err = plugin_response_handle(plugin, plugin->toks, idtok); } /* Corner case: rpc_command hook can destroy plugin for 'plugin @@ -484,10 +478,11 @@ static const char *plugin_read_json_one(struct plugin *plugin, *destroyed = true; } else { /* Move this object out of the buffer */ - memmove(plugin->buffer, plugin->buffer + toks[0].end, - tal_count(plugin->buffer) - toks[0].end); - plugin->used -= toks[0].end; - tal_free(toks); + memmove(plugin->buffer, plugin->buffer + plugin->toks[0].end, + tal_count(plugin->buffer) - plugin->toks[0].end); + plugin->used -= plugin->toks[0].end; + jsmn_init(&plugin->parser); + toks_reset(plugin->toks); } return err; } @@ -1306,6 +1301,8 @@ const char *plugin_send_getmanifest(struct plugin *p) log_debug(p->plugins->log, "started(%u) %s", p->pid, p->cmd); p->buffer = tal_arr(p, char, 64); + jsmn_init(&p->parser); + p->toks = toks_alloc(p); /* Create two connections, one read-only on top of p->stdout, and one * write-only on p->stdin */ diff --git a/lightningd/plugin.h b/lightningd/plugin.h index 60e5fc505..4b1a91f25 100644 --- a/lightningd/plugin.h +++ b/lightningd/plugin.h @@ -56,6 +56,8 @@ struct plugin { /* Stuff we read */ char *buffer; size_t used, len_read; + jsmn_parser parser; + jsmntok_t *toks; /* Our json_streams. Since multiple streams could start * returning data at once, we always service these in order,