Browse Source

lightningd: use a simple array for less memory usage.

The tal overhead of 5 pointers, the linked list node is 2; and we also
tal'd the string.  That's 96 bytes per entry.

Use a simple array instead, though it means more work on deletion
since each log_entry is no longer a tal object.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
travis-debug
Rusty Russell 5 years ago
parent
commit
b210c9d91f
  1. 114
      lightningd/log.c
  2. 1
      lightningd/log.h
  3. 1
      lightningd/log_status.c
  4. 128
      lightningd/test/run-log-pruning.c

114
lightningd/log.c

@ -54,7 +54,8 @@ struct log_book {
struct timeabs init_time;
FILE *outf;
struct list_head log;
struct log_entry *log;
/* Although log_book will copy log entries to parent log_book
* (the log_book belongs to lightningd), a pointer to lightningd
* is more directly because the notification needs ld->plugins.
@ -183,41 +184,61 @@ static u32 delete_threshold(enum log_level level)
abort();
}
/* Delete a log entry: returns how many now deleted */
static size_t delete_entry(struct log_book *log, struct log_entry *i)
{
log->mem_used -= mem_used(i);
log->num_entries--;
if (i->nc && --i->nc->count == 0)
tal_free(i->nc);
free(i->log);
tal_free(i->io);
return 1 + i->skipped;
}
static size_t prune_log(struct log_book *log)
{
struct log_entry *i, *next;
size_t skipped = 0, deleted = 0, count = 0, max;
size_t skipped = 0, deleted = 0, count = 0, dst = 0, max, tail;
/* Never delete the last 10% (and definitely not last one!). */
max = log->num_entries * 9 / 10 - 1;
tail = log->num_entries / 10 + 1;
max = log->num_entries - tail;
list_for_each_safe(&log->log, i, next, list) {
if (count++ == max) {
i->skipped += skipped;
skipped = 0;
break;
}
for (count = 0; count < max; count++) {
struct log_entry *i = &log->log[count];
if (pseudorand(1000) > delete_threshold(i->level)) {
i->skipped += skipped;
skipped = 0;
/* Move down if necesary. */
log->log[dst++] = *i;
continue;
}
list_del_from(&log->log, &i->list);
log->mem_used -= mem_used(i);
log->num_entries--;
skipped += 1 + i->skipped;
if (i->nc && --i->nc->count == 0)
tal_free(i->nc);
tal_free(i);
skipped += delete_entry(log, i);
deleted++;
}
assert(!skipped);
/* Any skipped at tail go on the next entry */
log->log[count].skipped += skipped;
/* Move down the last 10% */
memmove(log->log + dst, log->log + count, tail * sizeof(*log->log));
return deleted;
}
static void destroy_log_book(struct log_book *log)
{
size_t num = log->num_entries;
for (size_t i = 0; i < num; i++)
delete_entry(log, &log->log[i]);
assert(log->num_entries == 0);
assert(log->mem_used == 0);
}
struct log_book *new_log_book(struct lightningd *ld, size_t max_mem)
{
struct log_book *lr = tal_linkable(tal(NULL, struct log_book));
@ -234,7 +255,8 @@ struct log_book *new_log_book(struct lightningd *ld, size_t max_mem)
lr->ld = ld;
lr->cache = tal(lr, struct node_id_map);
node_id_map_init(lr->cache);
list_head_init(&lr->log);
lr->log = tal_arr(lr, struct log_entry, 128);
tal_add_destructor(lr, destroy_log_book);
return lr;
}
@ -294,15 +316,18 @@ enum log_level log_print_level(struct log *log)
return *log->print_level;
}
static void add_entry(struct log *log, struct log_entry *l)
/* This may move entry! */
static void add_entry(struct log *log, struct log_entry **l)
{
log->lr->mem_used += mem_used(l);
log->lr->mem_used += mem_used(*l);
log->lr->num_entries++;
list_add_tail(&log->lr->log, &l->list);
if (log->lr->mem_used > log->lr->max_mem) {
size_t old_mem = log->lr->mem_used, deleted;
deleted = prune_log(log->lr);
/* Will have moved, but will be last entry. */
*l = &log->lr->log[log->lr->num_entries-1];
log_debug(log, "Log pruned %zu entries (mem %zu -> %zu)",
deleted, old_mem, log->lr->mem_used);
}
@ -316,8 +341,12 @@ static void destroy_node_id_cache(struct node_id_cache *nc, struct log_book *lr)
static struct log_entry *new_log_entry(struct log *log, enum log_level level,
const struct node_id *node_id)
{
struct log_entry *l = tal(log->lr, struct log_entry);
struct log_entry *l;
if (log->lr->num_entries == tal_count(log->lr->log))
tal_resize(&log->lr->log, tal_count(log->lr->log) * 2);
l = &log->lr->log[log->lr->num_entries];
l->time = time_now();
l->level = level;
l->skipped = 0;
@ -359,7 +388,10 @@ void logv(struct log *log, enum log_level level,
int save_errno = errno;
struct log_entry *l = new_log_entry(log, level, node_id);
l->log = tal_vfmt(l, fmt, ap);
/* This is WARN_UNUSED_RESULT, because everyone should somehow deal
* with OOM, even though nobody does. */
if (vasprintf(&l->log, fmt, ap) == -1)
abort();
size_t log_len = strlen(l->log);
@ -370,7 +402,7 @@ void logv(struct log *log, enum log_level level,
maybe_print(log, l);
add_entry(log, l);
add_entry(log, &l);
if (call_notifier)
notify_warning(log->lr->ld, l);
@ -395,16 +427,22 @@ void log_io(struct log *log, enum log_level dir,
&l->time, str,
data, len, log->lr->outf);
l->log = tal_strdup(l, str);
/* Save a tal header, by using raw malloc. */
l->log = strdup(str);
if (taken(str))
tal_free(str);
/* Don't immediately fill buffer with giant IOs */
if (len > log->lr->max_mem / 64) {
l->skipped++;
len = log->lr->max_mem / 64;
}
l->io = tal_dup_arr(l, u8, data, len, 0);
add_entry(log, l);
/* FIXME: We could save 4 pointers by using a raw allow, but saving
* the length. */
l->io = tal_dup_arr(log->lr, u8, data, len, 0);
add_entry(log, &l);
errno = save_errno;
}
@ -442,12 +480,12 @@ static void log_each_line_(const struct log_book *lr,
void *arg),
void *arg)
{
const struct log_entry *i;
for (size_t i = 0; i < lr->num_entries; i++) {
const struct log_entry *l = &lr->log[i];
list_for_each(&lr->log, i, list) {
func(i->skipped, time_between(i->time, lr->init_time),
i->level, i->nc ? &i->nc->node_id : NULL,
i->prefix, i->log, i->io, arg);
func(l->skipped, time_between(l->time, lr->init_time),
l->level, l->nc ? &l->nc->node_id : NULL,
l->prefix, l->log, l->io, arg);
}
}
@ -680,8 +718,6 @@ void opt_register_logging(struct lightningd *ld)
void logging_options_parsed(struct log_book *lr)
{
const struct log_entry *l;
/* If they didn't set an explicit level, set to info */
if (!lr->default_print_level) {
lr->default_print_level = tal(lr, enum log_level);
@ -689,7 +725,9 @@ void logging_options_parsed(struct log_book *lr)
}
/* Catch up, since before we were only printing BROKEN msgs */
list_for_each(&lr->log, l, list) {
for (size_t i = 0; i < lr->num_entries; i++) {
const struct log_entry *l = &lr->log[i];
if (l->level >= filter_level(lr, l->prefix))
log_to_file(l->prefix, l->level,
l->nc ? &l->nc->node_id : NULL,
@ -712,14 +750,12 @@ void log_backtrace_print(const char *fmt, ...)
static void log_dump_to_file(int fd, const struct log_book *lr)
{
const struct log_entry *i;
char buf[100];
int len;
struct log_data data;
time_t start;
i = list_top(&lr->log, const struct log_entry, list);
if (!i) {
if (lr->num_entries == 0) {
write_all(fd, "0 bytes:\n\n", strlen("0 bytes:\n\n"));
return;
}

1
lightningd/log.h

@ -75,7 +75,6 @@ struct command_result *param_loglevel(struct command *cmd,
enum log_level **level);
struct log_entry {
struct list_node list;
struct timeabs time;
enum log_level level;
unsigned int skipped;

1
lightningd/log_status.c

@ -22,6 +22,7 @@ bool log_status_msg(struct log *log,
log_(log, level, node_id, call_notifier, "%s", entry);
return true;
}
/* FIXME: This would be far more efficient to copy to log in place, rather than doing the additional allocation in fromwire. */
} else if (fromwire_status_io(msg, msg, &level, &suggested_node_id,
&who, &data)) {
if (level == LOG_IO_IN || level == LOG_IO_OUT) {

128
lightningd/test/run-log-pruning.c

@ -0,0 +1,128 @@
#include "../log.c"
/* AUTOGENERATED MOCKS START */
/* Generated stub for bigsize_get */
size_t bigsize_get(const u8 *p UNNEEDED, size_t max UNNEEDED, bigsize_t *val UNNEEDED)
{ fprintf(stderr, "bigsize_get called!\n"); abort(); }
/* Generated stub for bigsize_put */
size_t bigsize_put(u8 buf[BIGSIZE_MAX_LEN] UNNEEDED, bigsize_t v UNNEEDED)
{ fprintf(stderr, "bigsize_put called!\n"); abort(); }
/* Generated stub for command_fail */
struct command_result *command_fail(struct command *cmd UNNEEDED, int code UNNEEDED,
const char *fmt UNNEEDED, ...)
{ fprintf(stderr, "command_fail called!\n"); abort(); }
/* Generated stub for command_param_failed */
struct command_result *command_param_failed(void)
{ fprintf(stderr, "command_param_failed called!\n"); abort(); }
/* Generated stub for command_success */
struct command_result *command_success(struct command *cmd UNNEEDED,
struct json_stream *response)
{ fprintf(stderr, "command_success called!\n"); abort(); }
/* Generated stub for json_add_hex_talarr */
void json_add_hex_talarr(struct json_stream *result UNNEEDED,
const char *fieldname UNNEEDED,
const tal_t *data UNNEEDED)
{ fprintf(stderr, "json_add_hex_talarr called!\n"); abort(); }
/* Generated stub for json_add_member */
void json_add_member(struct json_stream *js UNNEEDED,
const char *fieldname UNNEEDED,
bool quote UNNEEDED,
const char *fmt UNNEEDED, ...)
{ fprintf(stderr, "json_add_member called!\n"); abort(); }
/* Generated stub for json_add_node_id */
void json_add_node_id(struct json_stream *response UNNEEDED,
const char *fieldname UNNEEDED,
const struct node_id *id UNNEEDED)
{ fprintf(stderr, "json_add_node_id called!\n"); abort(); }
/* Generated stub for json_add_num */
void json_add_num(struct json_stream *result UNNEEDED, const char *fieldname UNNEEDED,
unsigned int value UNNEEDED)
{ fprintf(stderr, "json_add_num called!\n"); abort(); }
/* Generated stub for json_add_string */
void json_add_string(struct json_stream *result UNNEEDED, const char *fieldname UNNEEDED, const char *value UNNEEDED)
{ fprintf(stderr, "json_add_string called!\n"); abort(); }
/* Generated stub for json_add_time */
void json_add_time(struct json_stream *result UNNEEDED, const char *fieldname UNNEEDED,
struct timespec ts UNNEEDED)
{ fprintf(stderr, "json_add_time called!\n"); abort(); }
/* Generated stub for json_array_end */
void json_array_end(struct json_stream *js UNNEEDED)
{ fprintf(stderr, "json_array_end called!\n"); abort(); }
/* Generated stub for json_array_start */
void json_array_start(struct json_stream *js UNNEEDED, const char *fieldname UNNEEDED)
{ fprintf(stderr, "json_array_start called!\n"); abort(); }
/* Generated stub for json_object_end */
void json_object_end(struct json_stream *js UNNEEDED)
{ fprintf(stderr, "json_object_end called!\n"); abort(); }
/* Generated stub for json_object_start */
void json_object_start(struct json_stream *ks UNNEEDED, const char *fieldname UNNEEDED)
{ fprintf(stderr, "json_object_start called!\n"); abort(); }
/* Generated stub for json_stream_log_suppress_for_cmd */
void json_stream_log_suppress_for_cmd(struct json_stream *js UNNEEDED,
const struct command *cmd UNNEEDED)
{ fprintf(stderr, "json_stream_log_suppress_for_cmd called!\n"); abort(); }
/* Generated stub for json_stream_success */
struct json_stream *json_stream_success(struct command *cmd UNNEEDED)
{ fprintf(stderr, "json_stream_success called!\n"); abort(); }
/* Generated stub for node_id_to_hexstr */
char *node_id_to_hexstr(const tal_t *ctx UNNEEDED, const struct node_id *id UNNEEDED)
{ fprintf(stderr, "node_id_to_hexstr called!\n"); abort(); }
/* Generated stub for notify_warning */
void notify_warning(struct lightningd *ld UNNEEDED, struct log_entry *l UNNEEDED)
{ fprintf(stderr, "notify_warning called!\n"); abort(); }
/* Generated stub for param */
bool param(struct command *cmd UNNEEDED, const char *buffer UNNEEDED,
const jsmntok_t params[] UNNEEDED, ...)
{ fprintf(stderr, "param called!\n"); abort(); }
/* AUTOGENERATED MOCKS END */
int main(void)
{
struct log_book *lb;
struct log *l;
setup_locale();
lb = new_log_book(NULL,
(sizeof(struct log_entry) + sizeof("test XXXXXX"))
*100);
l = new_log(lb, lb, NULL, "test %s", "prefix");
assert(streq(log_prefix(l), "test prefix"));
for (size_t i = 0; i < 100; i++)
log_debug(l, "test %06zi", i);
assert(lb->num_entries == 100);
for (size_t i = 0; i < 100; i++) {
assert(lb->log[i].level == LOG_DBG);
assert(lb->log[i].skipped == 0);
assert(lb->log[i].nc == NULL);
assert(streq(lb->log[i].prefix, "test prefix"));
assert(streq(lb->log[i].log, tal_fmt(lb, "test %06zi", i)));
assert(lb->log[i].io == NULL);
}
log_debug(l, "final test message");
assert(lb->num_entries < 100);
assert(lb->num_entries > 11);
/* last 10% must be preserved exactly (with final and pruning
* msg appended) */
for (size_t i = 91; i < 100; i++) {
size_t pos = lb->num_entries - 2 - (100 - i);
assert(streq(lb->log[pos].log, tal_fmt(lb, "test %06zi", i)));
}
assert(streq(lb->log[lb->num_entries - 2].log, "final test message"));
/* Sum should still reflect 102 total messages */
size_t total = 0;
for (size_t i = 0; i < lb->num_entries; i++)
total += 1 + lb->log[i].skipped;
assert(total == 102);
/* Freeing (last) log frees logbook */
tal_free(l);
}
Loading…
Cancel
Save