Browse Source

ccan: update to get ccan/io exclusive helpers.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
pr-2587
Rusty Russell 6 years ago
parent
commit
a319439957
  1. 2
      ccan/README
  2. 6
      ccan/ccan/compiler/compiler.h
  3. 34
      ccan/ccan/htable/htable.c
  4. 7
      ccan/ccan/htable/htable.h
  5. 70
      ccan/ccan/htable/test/run-allocator.c
  6. 223
      ccan/ccan/htable/test/run-debug.c
  7. 51
      ccan/ccan/htable/test/run-extra.c
  8. 13
      ccan/ccan/io/backend.h
  9. 49
      ccan/ccan/io/io.c
  10. 29
      ccan/ccan/io/io.h
  11. 191
      ccan/ccan/io/poll.c
  12. 2
      ccan/ccan/io/test/run-41-io_poll_override.c
  13. 142
      ccan/ccan/io/test/run-46-exclusive.c
  14. 144
      ccan/ccan/io/test/run-47-exclusive-duplex.c
  15. 144
      ccan/ccan/io/test/run-48-exclusive-duplex-write.c
  16. 73
      ccan/ccan/timer/test/run-allocator.c
  17. 28
      ccan/ccan/timer/timer.c
  18. 11
      ccan/ccan/timer/timer.h
  19. 5
      common/memleak.c

2
ccan/README

@ -1,3 +1,3 @@
CCAN imported from http://ccodearchive.net.
CCAN version: init-2458-g8cc0749a
CCAN version: init-2470-g2b3517d4

6
ccan/ccan/compiler/compiler.h

@ -263,12 +263,12 @@
* The compiler will warn if any of the specified pointer args are NULL.
*
* Example:
* char *my_copy2(char *buf, char *maybenull) NON_NULL_ARGS(1, 2);
* char *my_copy2(char *buf, char *maybenull) NON_NULL_ARGS(1);
*/
#define NON_NULL_ARGS(index, ...) __attribute__((__nonnull__(index, __VA_ARGS__)))
#define NON_NULL_ARGS(...) __attribute__((__nonnull__(__VA_ARGS__)))
#else
#define NO_NULL_ARGS
#define NON_NULL_ARGS(index, ...)
#define NON_NULL_ARGS(...)
#endif

34
ccan/ccan/htable/htable.c

@ -11,6 +11,30 @@
/* We use 0x1 as deleted marker. */
#define HTABLE_DELETED (0x1)
static void *htable_default_alloc(struct htable *ht, size_t len)
{
return calloc(len, 1);
}
static void htable_default_free(struct htable *ht, void *p)
{
free(p);
}
static void *(*htable_alloc)(struct htable *, size_t) = htable_default_alloc;
static void (*htable_free)(struct htable *, void *) = htable_default_free;
void htable_set_allocator(void *(*alloc)(struct htable *, size_t len),
void (*free)(struct htable *, void *p))
{
if (!alloc)
alloc = htable_default_alloc;
if (!free)
free = htable_default_free;
htable_alloc = alloc;
htable_free = free;
}
/* We clear out the bits which are always the same, and put metadata there. */
static inline uintptr_t get_extra_ptr_bits(const struct htable *ht,
uintptr_t e)
@ -73,7 +97,7 @@ bool htable_init_sized(struct htable *ht,
break;
}
ht->table = calloc(1 << ht->bits, sizeof(size_t));
ht->table = htable_alloc(ht, sizeof(size_t) << ht->bits);
if (!ht->table) {
ht->table = &ht->perfect_bit;
return false;
@ -86,13 +110,13 @@ bool htable_init_sized(struct htable *ht,
void htable_clear(struct htable *ht)
{
if (ht->table != &ht->perfect_bit)
free((void *)ht->table);
htable_free(ht, (void *)ht->table);
htable_init(ht, ht->rehash, ht->priv);
}
bool htable_copy_(struct htable *dst, const struct htable *src)
{
uintptr_t *htable = malloc(sizeof(size_t) << src->bits);
uintptr_t *htable = htable_alloc(dst, sizeof(size_t) << src->bits);
if (!htable)
return false;
@ -189,7 +213,7 @@ static COLD bool double_table(struct htable *ht)
uintptr_t *oldtable, e;
oldtable = ht->table;
ht->table = calloc(1 << (ht->bits+1), sizeof(size_t));
ht->table = htable_alloc(ht, sizeof(size_t) << (ht->bits+1));
if (!ht->table) {
ht->table = oldtable;
return false;
@ -214,7 +238,7 @@ static COLD bool double_table(struct htable *ht)
ht_add(ht, p, ht->rehash(p, ht->priv));
}
}
free(oldtable);
htable_free(ht, oldtable);
}
ht->deleted = 0;

7
ccan/ccan/htable/htable.h

@ -259,4 +259,11 @@ void *htable_prev_(const struct htable *htable, struct htable_iter *i);
htable_delval_(htable_debug(htable, HTABLE_LOC), i)
void htable_delval_(struct htable *ht, struct htable_iter *i);
/**
* htable_set_allocator - set calloc/free functions.
* @alloc: allocator to use, must zero memory!
* @free: unallocator to use (@p is NULL or a return from @alloc)
*/
void htable_set_allocator(void *(*alloc)(struct htable *, size_t len),
void (*free)(struct htable *, void *p));
#endif /* CCAN_HTABLE_H */

70
ccan/ccan/htable/test/run-allocator.c

@ -0,0 +1,70 @@
/* Include the C files directly. */
#include <ccan/htable/htable.h>
#include <ccan/htable/htable.c>
#include <ccan/tap/tap.h>
#include <stdbool.h>
#include <string.h>
struct htable_with_counters {
struct htable ht;
size_t num_alloc, num_free;
};
static void *test_alloc(struct htable *ht, size_t len)
{
((struct htable_with_counters *)ht)->num_alloc++;
return calloc(len, 1);
}
static void test_free(struct htable *ht, void *p)
{
if (p) {
((struct htable_with_counters *)ht)->num_free++;
free(p);
}
}
static size_t hash(const void *elem, void *unused UNNEEDED)
{
return *(size_t *)elem;
}
int main(void)
{
struct htable_with_counters htc;
size_t val[] = { 0, 1 };
htc.num_alloc = htc.num_free = 0;
plan_tests(12);
htable_set_allocator(test_alloc, test_free);
htable_init(&htc.ht, hash, NULL);
htable_add(&htc.ht, hash(&val[0], NULL), &val[0]);
ok1(htc.num_alloc == 1);
ok1(htc.num_free == 0);
/* Adding another increments, then frees old */
htable_add(&htc.ht, hash(&val[1], NULL), &val[1]);
ok1(htc.num_alloc == 2);
ok1(htc.num_free == 1);
htable_clear(&htc.ht);
ok1(htc.num_alloc == 2);
ok1(htc.num_free == 2);
/* Should restore defaults */
htable_set_allocator(NULL, NULL);
ok1(htable_alloc == htable_default_alloc);
ok1(htable_free == htable_default_free);
htable_init(&htc.ht, hash, NULL);
htable_add(&htc.ht, hash(&val[0], NULL), &val[0]);
ok1(htc.num_alloc == 2);
ok1(htc.num_free == 2);
htable_add(&htc.ht, hash(&val[1], NULL), &val[1]);
ok1(htc.num_alloc == 2);
ok1(htc.num_free == 2);
htable_clear(&htc.ht);
/* This exits depending on whether all tests passed */
return exit_status();
}

223
ccan/ccan/htable/test/run-debug.c

@ -0,0 +1,223 @@
#define CCAN_HTABLE_DEBUG
#include <ccan/htable/htable.h>
#include <ccan/htable/htable.c>
#include <ccan/tap/tap.h>
#include <stdbool.h>
#include <string.h>
#define NUM_BITS 7
#define NUM_VALS (1 << NUM_BITS)
static void *bad_pointer;
/* We use the number divided by two as the hash (for lots of
collisions), plus set all the higher bits so we can detect if they
don't get masked out. */
static size_t hash(const void *elem, void *unused UNNEEDED)
{
size_t h;
/* With CCAN_HTABLE_DEBUG enabled, it will try to hash each element,
* including this one... */
if (elem == bad_pointer)
return 0;
h = *(uint64_t *)elem / 2;
h |= -1UL << NUM_BITS;
return h;
}
static bool objcmp(const void *htelem, void *cmpdata)
{
return *(uint64_t *)htelem == *(uint64_t *)cmpdata;
}
static void add_vals(struct htable *ht,
const uint64_t val[],
unsigned int off, unsigned int num)
{
uint64_t i;
for (i = off; i < off+num; i++) {
if (htable_get(ht, hash(&i, NULL), objcmp, &i)) {
fail("%llu already in hash", (long long)i);
return;
}
htable_add(ht, hash(&val[i], NULL), &val[i]);
if (htable_get(ht, hash(&i, NULL), objcmp, &i) != &val[i]) {
fail("%llu not added to hash", (long long)i);
return;
}
}
pass("Added %llu numbers to hash", (long long)i);
}
#if 0
static void refill_vals(struct htable *ht,
const uint64_t val[], unsigned int num)
{
uint64_t i;
for (i = 0; i < num; i++) {
if (htable_get(ht, hash(&i, NULL), objcmp, &i))
continue;
htable_add(ht, hash(&val[i], NULL), &val[i]);
}
}
#endif
static void find_vals(struct htable *ht,
const uint64_t val[], unsigned int num)
{
uint64_t i;
for (i = 0; i < num; i++) {
if (htable_get(ht, hash(&i, NULL), objcmp, &i) != &val[i]) {
fail("%llu not found in hash", (long long)i);
return;
}
}
pass("Found %llu numbers in hash", (long long)i);
}
static void del_vals(struct htable *ht,
const uint64_t val[], unsigned int num)
{
uint64_t i;
for (i = 0; i < num; i++) {
if (!htable_del(ht, hash(&val[i], NULL), &val[i])) {
fail("%llu not deleted from hash", (long long)i);
return;
}
}
pass("Deleted %llu numbers in hash", (long long)i);
}
static bool check_mask(struct htable *ht, uint64_t val[], unsigned num)
{
uint64_t i;
for (i = 0; i < num; i++) {
if (((uintptr_t)&val[i] & ht->common_mask) != ht->common_bits)
return false;
}
return true;
}
int main(void)
{
unsigned int i, weight;
uintptr_t perfect_bit;
struct htable ht;
uint64_t val[NUM_VALS];
uint64_t dne;
void *p;
struct htable_iter iter;
plan_tests(36);
for (i = 0; i < NUM_VALS; i++)
val[i] = i;
dne = i;
htable_init(&ht, hash, NULL);
ok1(ht.max == 0);
ok1(ht.bits == 0);
/* We cannot find an entry which doesn't exist. */
ok1(!htable_get(&ht, hash(&dne, NULL), objcmp, &dne));
/* This should increase it once. */
add_vals(&ht, val, 0, 1);
ok1(ht.bits == 1);
ok1(ht.max == 1);
weight = 0;
for (i = 0; i < sizeof(ht.common_mask) * CHAR_BIT; i++) {
if (ht.common_mask & ((uintptr_t)1 << i)) {
weight++;
}
}
/* Only one bit should be clear. */
ok1(weight == i-1);
/* Mask should be set. */
ok1(check_mask(&ht, val, 1));
/* This should increase it again. */
add_vals(&ht, val, 1, 1);
ok1(ht.bits == 2);
ok1(ht.max == 3);
/* Mask should be set. */
ok1(ht.common_mask != 0);
ok1(ht.common_mask != -1);
ok1(check_mask(&ht, val, 2));
/* Now do the rest. */
add_vals(&ht, val, 2, NUM_VALS - 2);
/* Find all. */
find_vals(&ht, val, NUM_VALS);
ok1(!htable_get(&ht, hash(&dne, NULL), objcmp, &dne));
/* Walk once, should get them all. */
i = 0;
for (p = htable_first(&ht,&iter); p; p = htable_next(&ht, &iter))
i++;
ok1(i == NUM_VALS);
i = 0;
for (p = htable_prev(&ht, &iter); p; p = htable_prev(&ht, &iter))
i++;
ok1(i == NUM_VALS);
/* Delete all. */
del_vals(&ht, val, NUM_VALS);
ok1(!htable_get(&ht, hash(&val[0], NULL), objcmp, &val[0]));
/* Worst case, a "pointer" which doesn't have any matching bits. */
bad_pointer = (void *)~(uintptr_t)&val[NUM_VALS-1];
htable_add(&ht, 0, bad_pointer);
htable_add(&ht, hash(&val[NUM_VALS-1], NULL), &val[NUM_VALS-1]);
ok1(ht.common_mask == 0);
ok1(ht.common_bits == 0);
/* Get rid of bogus pointer before we trip over it! */
htable_del(&ht, 0, bad_pointer);
/* Add the rest. */
add_vals(&ht, val, 0, NUM_VALS-1);
/* Check we can find them all. */
find_vals(&ht, val, NUM_VALS);
ok1(!htable_get(&ht, hash(&dne, NULL), objcmp, &dne));
/* Corner cases: wipe out the perfect bit using bogus pointer. */
htable_clear(&ht);
htable_add(&ht, hash(&val[NUM_VALS-1], NULL), &val[NUM_VALS-1]);
ok1(ht.perfect_bit);
perfect_bit = ht.perfect_bit;
bad_pointer = (void *)((uintptr_t)&val[NUM_VALS-1] | perfect_bit);
htable_add(&ht, 0, bad_pointer);
ok1(ht.perfect_bit == 0);
htable_del(&ht, 0, bad_pointer);
/* Enlarging should restore it... */
add_vals(&ht, val, 0, NUM_VALS-1);
ok1(ht.perfect_bit != 0);
htable_clear(&ht);
ok1(htable_init_sized(&ht, hash, NULL, 1024));
ok1(ht.max >= 1024);
htable_clear(&ht);
ok1(htable_init_sized(&ht, hash, NULL, 1023));
ok1(ht.max >= 1023);
htable_clear(&ht);
ok1(htable_init_sized(&ht, hash, NULL, 1025));
ok1(ht.max >= 1025);
htable_clear(&ht);
return exit_status();
}

51
ccan/ccan/htable/test/run-extra.c

@ -0,0 +1,51 @@
#include "../htable.c"
static size_t hash(const void *ptr, void *priv UNNEEDED)
{
/* We're hashing pointers; no need to get too fancy. */
return ((size_t)ptr / sizeof(ptr)) ^ ((size_t)ptr % sizeof(ptr));
}
/* 24042: Waiting on 0x5570a500c3f8 (11742786623615)
24042: Waiting on 0x5570a500c430 (11742786623622)
24042: Searching for 0x5570a500c3f8 (11742786623615) in 2 elems
24042: Searching for 0x5570a500c3f8 (11742786623615) in 2 elems
*/
static struct htable waittable = HTABLE_INITIALIZER(waittable, hash, NULL);
int main(void)
{
const void *p1 = (void *)0x5570a500c3f8ULL;
const void *p2 = (void *)0x5570a500c430ULL;
size_t h;
struct htable_iter i;
void *p;
bool found;
printf("hash %p == %zu\n", p1, hash(p1, NULL));
printf("hash %p == %zu\n", p2, hash(p2, NULL));
htable_add(&waittable, hash(p1, NULL), p1);
htable_add(&waittable, hash(p2, NULL), p2);
found = false;
h = hash(p1, NULL);
for (p = htable_firstval(&waittable, &i, h);
p;
p = htable_nextval(&waittable, &i, h)) {
if (p == p1)
found = true;
}
assert(found);
found = false;
h = hash(p2, NULL);
for (p = htable_firstval(&waittable, &i, h);
p;
p = htable_nextval(&waittable, &i, h)) {
if (p == p2)
found = true;
}
assert(found);
return found ? 0 : 1;
}

13
ccan/ccan/io/backend.h

@ -8,6 +8,8 @@
struct fd {
int fd;
bool listener;
/* We could put these in io_plan, but they pack nicely here */
bool exclusive[2];
size_t backend_info;
};
@ -37,6 +39,7 @@ enum io_plan_status {
/**
* struct io_plan - one half of I/O to do
* @status: the status of this plan.
* @dir: are we plan[0] or plan[1] inside io_conn?
* @io: function to call when fd becomes read/writable, returns 0 to be
* called again, 1 if it's finished, and -1 on error (fd will be closed)
* @next: the next function which is called if io returns 1.
@ -45,6 +48,7 @@ enum io_plan_status {
*/
struct io_plan {
enum io_plan_status status;
enum io_direction dir;
int (*io)(int fd, struct io_plan_arg *arg);
@ -58,9 +62,6 @@ struct io_plan {
struct io_conn {
struct fd fd;
/* always list. */
struct list_node always;
void (*finish)(struct io_conn *, void *arg);
void *finish_arg;
@ -74,15 +75,15 @@ bool add_conn(struct io_conn *c);
bool add_duplex(struct io_conn *c);
void del_listener(struct io_listener *l);
void cleanup_conn_without_close(struct io_conn *c);
void backend_new_always(struct io_conn *conn);
bool backend_new_always(struct io_plan *plan);
void backend_new_plan(struct io_conn *conn);
void remove_from_always(struct io_conn *conn);
void backend_plan_done(struct io_conn *conn);
bool backend_set_exclusive(struct io_plan *plan, bool exclusive);
void backend_wake(const void *wait);
void io_ready(struct io_conn *conn, int pollflags);
void io_do_always(struct io_conn *conn);
void io_do_always(struct io_plan *conn);
void io_do_wakeup(struct io_conn *conn, enum io_direction dir);
void *do_io_loop(struct io_conn **ready);
#endif /* CCAN_IO_BACKEND_H */

49
ccan/ccan/io/io.c

@ -61,10 +61,7 @@ static bool next_plan(struct io_conn *conn, struct io_plan *plan)
if (plan == &io_conn_freed)
return false;
/* It should have set a plan inside this conn (or duplex) */
assert(plan == &conn->plan[IO_IN]
|| plan == &conn->plan[IO_OUT]
|| plan == &conn->plan[2]);
assert(plan == &conn->plan[plan->dir]);
assert(conn->plan[IO_IN].status != IO_UNSET
|| conn->plan[IO_OUT].status != IO_UNSET);
@ -100,7 +97,6 @@ struct io_conn *io_new_conn_(const tal_t *ctx, int fd,
conn->fd.fd = fd;
conn->finish = NULL;
conn->finish_arg = NULL;
list_node_init(&conn->always);
if (!add_conn(conn))
return tal_free(conn);
@ -108,6 +104,10 @@ struct io_conn *io_new_conn_(const tal_t *ctx, int fd,
/* Keep our I/O async. */
io_fd_block(fd, false);
/* So we can get back from plan -> conn later */
conn->plan[IO_OUT].dir = IO_OUT;
conn->plan[IO_IN].dir = IO_IN;
/* We start with out doing nothing, and in doing our init. */
conn->plan[IO_OUT].status = IO_UNSET;
@ -119,6 +119,16 @@ struct io_conn *io_new_conn_(const tal_t *ctx, int fd,
return conn;
}
bool io_conn_exclusive(struct io_conn *conn, bool exclusive)
{
return backend_set_exclusive(&conn->plan[IO_IN], exclusive);
}
bool io_conn_out_exclusive(struct io_conn *conn, bool exclusive)
{
return backend_set_exclusive(&conn->plan[IO_OUT], exclusive);
}
void io_set_finish_(struct io_conn *conn,
void (*finish)(struct io_conn *, void *),
void *arg)
@ -144,7 +154,9 @@ static struct io_plan *set_always(struct io_conn *conn,
struct io_plan *plan = &conn->plan[dir];
plan->status = IO_ALWAYS;
backend_new_always(conn);
/* Only happens on OOM, and only with non-default tal_backend. */
if (!backend_new_always(plan))
return NULL;
return io_set_plan(conn, dir, NULL, next, arg);
}
@ -413,27 +425,14 @@ void io_ready(struct io_conn *conn, int pollflags)
|| conn->plan[IO_IN].status == IO_POLLING_STARTED);
}
void io_do_always(struct io_conn *conn)
void io_do_always(struct io_plan *plan)
{
/* There's a corner case where the in next_plan wakes up the
* out, placing it in IO_ALWAYS and we end up processing it immediately,
* only to leave it in the always list.
*
* Yet we can't just process one, in case they are both supposed
* to be done, so grab state beforehand.
*/
bool always_out = (conn->plan[IO_OUT].status == IO_ALWAYS);
struct io_conn *conn;
if (conn->plan[IO_IN].status == IO_ALWAYS)
if (!next_plan(conn, &conn->plan[IO_IN]))
return;
assert(plan->status == IO_ALWAYS);
conn = container_of(plan, struct io_conn, plan[plan->dir]);
if (always_out) {
/* You can't *unalways* a conn (except by freeing, in which
* case next_plan() returned false */
assert(conn->plan[IO_OUT].status == IO_ALWAYS);
next_plan(conn, &conn->plan[IO_OUT]);
}
next_plan(conn, plan);
}
void io_do_wakeup(struct io_conn *conn, enum io_direction dir)
@ -488,7 +487,7 @@ struct io_plan *io_duplex(struct io_conn *conn,
assert(conn == container_of(in_plan, struct io_conn, plan[IO_IN]));
/* in_plan must be conn->plan[IO_IN], out_plan must be [IO_OUT] */
assert(out_plan == in_plan + 1);
return out_plan + 1;
return in_plan;
}
struct io_plan *io_halfclose(struct io_conn *conn)

29
ccan/ccan/io/io.h

@ -722,6 +722,35 @@ bool io_plan_out_started(const struct io_conn *conn);
*/
bool io_flush_sync(struct io_conn *conn);
/**
* io_conn_exclusive - set/unset an io_conn to exclusively serviced
* @conn: the connection
* @exclusive: whether to be exclusive or not
*
* If any io_conn is set exclusive, then no non-exclusive io_conn (or
* io_listener) will be serviced by io_loop(). If it's a io_duplex io_conn(),
* then io_conn_exclusive() makes the read-side exclusive; io_conn_out_exclusive()
* makes the write-side exclusive.
*
* This allows you to temporarily service only one (or several) fds.
* For example, you might want to flush out one io_conn and not
* receive any new connections or read any other input.
*
* Returns true if any exclusive io_conn remain, otherwise false.
* (This is useful for checking your own logic: dangling exclusive io_conn
* are dangerous!).
*/
bool io_conn_exclusive(struct io_conn *conn, bool exclusive);
/**
* io_conn_out_exclusive - set/unset exclusive on the write-side of a duplex
* @conn: the connection, post io_duplex
* @exclusive: whether to be exclusive or not
*
* See io_conn_exclusive() above.
*/
bool io_conn_out_exclusive(struct io_conn *conn, bool exclusive);
/**
* io_fd_block - helper to set an fd blocking/nonblocking.
* @fd: the file descriptor

191
ccan/ccan/io/poll.c

@ -11,11 +11,10 @@
#include <ccan/time/time.h>
#include <ccan/timer/timer.h>
static size_t num_fds = 0, max_fds = 0, num_waiting = 0;
static size_t num_fds = 0, max_fds = 0, num_waiting = 0, num_always = 0, max_always = 0, num_exclusive = 0;
static struct pollfd *pollfds = NULL;
static struct fd **fds = NULL;
static LIST_HEAD(closing);
static LIST_HEAD(always);
static struct io_plan **always = NULL;
static struct timemono (*nowfn)(void) = time_mono;
static int (*pollfn)(struct pollfd *fds, nfds_t nfds, int timeout) = poll;
@ -59,12 +58,13 @@ static bool add_fd(struct fd *fd, short events)
pollfds[num_fds].events = events;
/* In case it's idle. */
if (!events)
pollfds[num_fds].fd = -fd->fd;
pollfds[num_fds].fd = -fd->fd - 1;
else
pollfds[num_fds].fd = fd->fd;
pollfds[num_fds].revents = 0; /* In case we're iterating now */
fds[num_fds] = fd;
fd->backend_info = num_fds;
fd->exclusive[0] = fd->exclusive[1] = false;
num_fds++;
if (events)
num_waiting++;
@ -91,9 +91,18 @@ static void del_fd(struct fd *fd)
pollfds = tal_free(pollfds);
fds = NULL;
max_fds = 0;
if (num_always == 0) {
always = tal_free(always);
max_always = 0;
}
}
num_fds--;
fd->backend_info = -1;
if (fd->exclusive[IO_IN])
num_exclusive--;
if (fd->exclusive[IO_OUT])
num_exclusive--;
}
static void destroy_listener(struct io_listener *l)
@ -110,24 +119,63 @@ bool add_listener(struct io_listener *l)
return true;
}
void remove_from_always(struct io_conn *conn)
static int find_always(const struct io_plan *plan)
{
list_del_init(&conn->always);
for (size_t i = 0; i < num_always; i++)
if (always[i] == plan)
return i;
return -1;
}
void backend_new_always(struct io_conn *conn)
static void remove_from_always(const struct io_plan *plan)
{
/* In case it's already in always list. */
list_del(&conn->always);
list_add_tail(&always, &conn->always);
int pos;
if (plan->status != IO_ALWAYS)
return;
pos = find_always(plan);
assert(pos >= 0);
/* Move last one down if we made a hole */
if (pos != num_always-1)
always[pos] = always[num_always-1];
num_always--;
/* Only free if no fds left either. */
if (num_always == 0 && max_fds == 0) {
always = tal_free(always);
max_always = 0;
}
}
void backend_new_plan(struct io_conn *conn)
bool backend_new_always(struct io_plan *plan)
{
struct pollfd *pfd = &pollfds[conn->fd.backend_info];
assert(find_always(plan) == -1);
if (pfd->events)
num_waiting--;
if (!max_always) {
assert(num_always == 0);
always = tal_arr(NULL, struct io_plan *, 8);
if (!always)
return false;
max_always = 8;
}
if (num_always + 1 > max_always) {
size_t num = max_always * 2;
if (!tal_resize(&always, num))
return false;
max_always = num;
}
always[num_always++] = plan;
return true;
}
static void setup_pfd(struct io_conn *conn, struct pollfd *pfd)
{
assert(pfd == &pollfds[conn->fd.backend_info]);
pfd->events = 0;
if (conn->plan[IO_IN].status == IO_POLLING_NOTSTARTED
@ -138,13 +186,25 @@ void backend_new_plan(struct io_conn *conn)
pfd->events |= POLLOUT;
if (pfd->events) {
num_waiting++;
pfd->fd = conn->fd.fd;
} else {
pfd->fd = -conn->fd.fd;
pfd->fd = -conn->fd.fd - 1;
}
}
void backend_new_plan(struct io_conn *conn)
{
struct pollfd *pfd = &pollfds[conn->fd.backend_info];
if (pfd->events)
num_waiting--;
setup_pfd(conn, pfd);
if (pfd->events)
num_waiting++;
}
void backend_wake(const void *wait)
{
unsigned int i;
@ -174,8 +234,9 @@ static void destroy_conn(struct io_conn *conn, bool close_fd)
if (close_fd)
close(conn->fd.fd);
del_fd(&conn->fd);
/* In case it's on always list, remove it. */
list_del_init(&conn->always);
remove_from_always(&conn->plan[IO_IN]);
remove_from_always(&conn->plan[IO_OUT]);
/* errno saved/restored by tal_free itself. */
if (conn->finish) {
@ -214,21 +275,88 @@ static void accept_conn(struct io_listener *l)
io_new_conn(l->ctx, fd, l->init, l->arg);
}
static bool handle_always(void)
/* Return pointer to exclusive flag for this plan. */
static bool *exclusive(struct io_plan *plan)
{
bool ret = false;
struct io_conn *conn;
while ((conn = list_pop(&always, struct io_conn, always)) != NULL) {
assert(conn->plan[IO_IN].status == IO_ALWAYS
|| conn->plan[IO_OUT].status == IO_ALWAYS);
conn = container_of(plan, struct io_conn, plan[plan->dir]);
return &conn->fd.exclusive[plan->dir];
}
/* Re-initialize, for next time. */
list_node_init(&conn->always);
io_do_always(conn);
ret = true;
/* For simplicity, we do one always at a time */
static bool handle_always(void)
{
/* Backwards is simple easier to remove entries */
for (int i = num_always - 1; i >= 0; i--) {
struct io_plan *plan = always[i];
if (num_exclusive && !*exclusive(plan))
continue;
/* Remove first: it might re-add */
if (i != num_always-1)
always[i] = always[num_always-1];
num_always--;
io_do_always(plan);
return true;
}
return false;
}
bool backend_set_exclusive(struct io_plan *plan, bool excl)
{
bool *excl_ptr = exclusive(plan);
if (excl != *excl_ptr) {
*excl_ptr = excl;
if (!excl)
num_exclusive--;
else
num_exclusive++;
}
return num_exclusive != 0;
}
/* FIXME: We could do this once at set_exclusive time, and catch everywhere
* else that we manipulate events. */
static void exclude_pollfds(void)
{
if (num_exclusive == 0)
return;
for (size_t i = 0; i < num_fds; i++) {
struct pollfd *pfd = &pollfds[fds[i]->backend_info];
if (!fds[i]->exclusive[IO_IN])
pfd->events &= ~POLLIN;
if (!fds[i]->exclusive[IO_OUT])
pfd->events &= ~POLLOUT;
/* If we're not listening, we don't want error events
* either. */
if (!pfd->events)
pfd->fd = -fds[i]->fd - 1;
}
}
static void restore_pollfds(void)
{
if (num_exclusive == 0)
return;
for (size_t i = 0; i < num_fds; i++) {
struct pollfd *pfd = &pollfds[fds[i]->backend_info];
if (fds[i]->listener) {
pfd->events = POLLIN;
pfd->fd = fds[i]->fd;
} else {
struct io_conn *conn = (void *)fds[i];
setup_pfd(conn, pfd);
}
}
return ret;
}
/* This is the main loop. */
@ -279,7 +407,11 @@ void *io_loop(struct timers *timers, struct timer **expired)
}
}
/* We do this temporarily, assuming exclusive is unusual */
exclude_pollfds();
r = pollfn(pollfds, num_fds, ms_timeout);
restore_pollfds();
if (r < 0) {
/* Signals shouldn't break us, unless they set
* io_loop_return. */
@ -292,6 +424,9 @@ void *io_loop(struct timers *timers, struct timer **expired)
struct io_conn *c = (void *)fds[i];
int events = pollfds[i].revents;
/* Clear so we don't get confused if exclusive next time */
pollfds[i].revents = 0;
if (r == 0)
break;

2
ccan/ccan/io/test/run-41-io_poll_override.c

@ -6,7 +6,7 @@
#include <sys/wait.h>
#include <stdio.h>
#define PORT "65020"
#define PORT "65041"
/* Should be looking to read from one fd. */
static int mypoll(struct pollfd *fds, nfds_t nfds, int timeout)

142
ccan/ccan/io/test/run-46-exclusive.c

@ -0,0 +1,142 @@
#include <ccan/io/io.h>
/* Include the C files directly. */
#include <ccan/io/poll.c>
#include <ccan/io/io.c>
#include <ccan/tap/tap.h>
#include <sys/wait.h>
#include <stdio.h>
#define PORT "65046"
struct data {
struct io_listener *l;
int num_clients;
char *pattern;
char buf[30];
size_t buflen;
};
static struct io_plan *read_more(struct io_conn *conn, struct data *d);
static struct io_plan *read_done(struct io_conn *conn, struct data *d)
{
tal_resize(&d->pattern, tal_count(d->pattern) + strlen(d->buf));
strcat(d->pattern, d->buf);
return read_more(conn, d);
}
static struct io_plan *read_more(struct io_conn *conn, struct data *d)
{
memset(d->buf, 0, sizeof(d->buf));
return io_read_partial(conn, d->buf, sizeof(d->buf), &d->buflen,
read_done, d);
}
static struct io_plan *init_conn(struct io_conn *conn, struct data *d)
{
d->num_clients++;
if (d->num_clients == 2) {
/* Free listener so when conns close we exit io_loop */
io_close_listener(d->l);
/* Set priority to second connection. */
ok1(io_conn_exclusive(conn, true) == true);
}
return read_more(conn, d);
}
static int make_listen_fd(const char *port, struct addrinfo **info)
{
int fd, on = 1;
struct addrinfo *addrinfo, hints;
memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_PASSIVE;
hints.ai_protocol = 0;
if (getaddrinfo(NULL, port, &hints, &addrinfo) != 0)
return -1;
fd = socket(addrinfo->ai_family, addrinfo->ai_socktype,
addrinfo->ai_protocol);
if (fd < 0)
return -1;
setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
if (bind(fd, addrinfo->ai_addr, addrinfo->ai_addrlen) != 0) {
close(fd);
return -1;
}
if (listen(fd, 1) != 0) {
close(fd);
return -1;
}
*info = addrinfo;
return fd;
}
int main(void)
{
struct addrinfo *addrinfo = NULL;
int fd, status;
struct data d;
d.num_clients = 0;
/* This is how many tests you plan to run */
plan_tests(8);
fd = make_listen_fd(PORT, &addrinfo);
ok1(fd >= 0);
d.l = io_new_listener(NULL, fd, init_conn, &d);
ok1(d.l);
fflush(stdout);
if (!fork()) {
int fd1, fd2;
io_close_listener(d.l);
fd1 = socket(addrinfo->ai_family, addrinfo->ai_socktype,
addrinfo->ai_protocol);
if (fd1 < 0)
exit(1);
if (connect(fd1, addrinfo->ai_addr, addrinfo->ai_addrlen) != 0)
exit(2);
if (write(fd1, "1hellothere", strlen("1hellothere")) != strlen("1hellothere"))
exit(3);
fd2 = socket(addrinfo->ai_family, addrinfo->ai_socktype,
addrinfo->ai_protocol);
if (fd2 < 0)
exit(1);
if (connect(fd2, addrinfo->ai_addr, addrinfo->ai_addrlen) != 0)
exit(2);
signal(SIGPIPE, SIG_IGN);
sleep(1);
if (write(fd1, "1helloagain", strlen("1helloagain")) != strlen("1helloagain"))
exit(4);
sleep(1);
if (write(fd2, "2hellonew", strlen("2hellonew")) != strlen("2hellonew"))
exit(5);
close(fd1);
close(fd2);
freeaddrinfo(addrinfo);
exit(0);
}
freeaddrinfo(addrinfo);
d.pattern = tal_arrz(NULL, char, 1);
ok1(io_loop(NULL, NULL) == NULL);
if (!ok1(strcmp(d.pattern, "1hellothere2hellonew1helloagain") == 0))
printf("d.patterns = %s\n", d.pattern);
tal_free(d.pattern);
ok1(wait(&status));
ok1(WIFEXITED(status));
ok1(WEXITSTATUS(status) == 0);
/* This exits depending on whether all tests passed */
return exit_status();
}

144
ccan/ccan/io/test/run-47-exclusive-duplex.c

@ -0,0 +1,144 @@
#include <ccan/io/io.h>
/* Include the C files directly. */
#include <ccan/io/poll.c>
#include <ccan/io/io.c>
#include <ccan/tap/tap.h>
#include <sys/wait.h>
#include <stdio.h>
#define PORT "65047"
struct data {
struct io_listener *l;
char *pattern;
char buf[30];
size_t buflen;
};
static struct io_plan *read_more(struct io_conn *conn, struct data *d);
static struct io_plan *write_more(struct io_conn *conn, struct data *d);
static struct io_plan *read_done(struct io_conn *conn, struct data *d)
{
tal_resize(&d->pattern, tal_count(d->pattern) + 1 + strlen(d->buf));
strcat(d->pattern, "<");
strcat(d->pattern, d->buf);
return read_more(conn, d);
}
static struct io_plan *read_more(struct io_conn *conn, struct data *d)
{
memset(d->buf, 0, sizeof(d->buf));
return io_read_partial(conn, d->buf, sizeof(d->buf), &d->buflen,
read_done, d);
}
static struct io_plan *write_done(struct io_conn *conn, struct data *d)
{
tal_resize(&d->pattern, tal_count(d->pattern) + 1);
strcat(d->pattern, ">");
return write_more(conn, d);
}
static struct io_plan *write_more(struct io_conn *conn, struct data *d)
{
return io_write_partial(conn, d->buf, 1, &d->buflen,
write_done, d);
}
static struct io_plan *read_priority_init(struct io_conn *conn, struct data *d)
{
/* This should suppress the write */
ok1(io_conn_exclusive(conn, true));
return read_more(conn, d);
}
static struct io_plan *init_conn(struct io_conn *conn, struct data *d)
{
/* Free listener so when conns close we exit io_loop */
io_close_listener(d->l);
return io_duplex(conn, read_priority_init(conn, d), write_more(conn, d));
}
static int make_listen_fd(const char *port, struct addrinfo **info)
{
int fd, on = 1;
struct addrinfo *addrinfo, hints;
memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_PASSIVE;
hints.ai_protocol = 0;
if (getaddrinfo(NULL, port, &hints, &addrinfo) != 0)
return -1;
fd = socket(addrinfo->ai_family, addrinfo->ai_socktype,
addrinfo->ai_protocol);
if (fd < 0)
return -1;
setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
if (bind(fd, addrinfo->ai_addr, addrinfo->ai_addrlen) != 0) {
close(fd);
return -1;
}
if (listen(fd, 1) != 0) {
close(fd);
return -1;
}
*info = addrinfo;
return fd;
}
int main(void)
{
struct addrinfo *addrinfo = NULL;
int fd, status;
struct data d;
/* This is how many tests you plan to run */
plan_tests(8);
fd = make_listen_fd(PORT, &addrinfo);
ok1(fd >= 0);
d.l = io_new_listener(NULL, fd, init_conn, &d);
ok1(d.l);
fflush(stdout);
if (!fork()) {
io_close_listener(d.l);
fd = socket(addrinfo->ai_family, addrinfo->ai_socktype,
addrinfo->ai_protocol);
if (fd < 0)
exit(1);
if (connect(fd, addrinfo->ai_addr, addrinfo->ai_addrlen) != 0)
exit(2);
signal(SIGPIPE, SIG_IGN);
if (write(fd, "1hellothere", strlen("1hellothere")) != strlen("1hellothere"))
exit(3);
sleep(1);
if (write(fd, "1helloagain", strlen("1helloagain")) != strlen("1helloagain"))
exit(4);
close(fd);
freeaddrinfo(addrinfo);
exit(0);
}
freeaddrinfo(addrinfo);
d.pattern = tal_arrz(NULL, char, 1);
ok1(io_loop(NULL, NULL) == NULL);
/* No trace of writes */
ok1(strcmp(d.pattern, "<1hellothere<1helloagain") == 0);
tal_free(d.pattern);
ok1(wait(&status));
ok1(WIFEXITED(status));
ok1(WEXITSTATUS(status) == 0);
/* This exits depending on whether all tests passed */
return exit_status();
}

144
ccan/ccan/io/test/run-48-exclusive-duplex-write.c

@ -0,0 +1,144 @@
#include <ccan/io/io.h>
/* Include the C files directly. */
#include <ccan/io/poll.c>
#include <ccan/io/io.c>
#include <ccan/tap/tap.h>
#include <sys/wait.h>
#include <stdio.h>
#define PORT "65048"
struct data {
struct io_listener *l;
char *pattern;
char buf[30];
size_t buflen;
};
static struct io_plan *read_more(struct io_conn *conn, struct data *d);
static struct io_plan *write_more(struct io_conn *conn, struct data *d);
static struct io_plan *read_done(struct io_conn *conn, struct data *d)
{
tal_resize(&d->pattern, tal_count(d->pattern) + 1 + strlen(d->buf));
strcat(d->pattern, "<");
strcat(d->pattern, d->buf);
return read_more(conn, d);
}
static struct io_plan *read_more(struct io_conn *conn, struct data *d)
{
memset(d->buf, 0, sizeof(d->buf));
return io_read_partial(conn, d->buf, sizeof(d->buf), &d->buflen,
read_done, d);
}
static struct io_plan *write_done(struct io_conn *conn, struct data *d)
{
tal_resize(&d->pattern, tal_count(d->pattern) + 1);
strcat(d->pattern, ">");
return write_more(conn, d);
}
static struct io_plan *write_more(struct io_conn *conn, struct data *d)
{
return io_write_partial(conn, d->buf, 1, &d->buflen,
write_done, d);
}
static struct io_plan *write_priority_init(struct io_conn *conn, struct data *d)
{
/* This should suppress the read */
ok1(io_conn_out_exclusive(conn, true));
return write_more(conn, d);
}
static struct io_plan *init_conn(struct io_conn *conn, struct data *d)
{
/* Free listener so when conns close we exit io_loop */
io_close_listener(d->l);
return io_duplex(conn, read_more(conn, d), write_priority_init(conn, d));
}
static int make_listen_fd(const char *port, struct addrinfo **info)
{
int fd, on = 1;
struct addrinfo *addrinfo, hints;
memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_PASSIVE;
hints.ai_protocol = 0;
if (getaddrinfo(NULL, port, &hints, &addrinfo) != 0)
return -1;
fd = socket(addrinfo->ai_family, addrinfo->ai_socktype,
addrinfo->ai_protocol);
if (fd < 0)
return -1;
setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
if (bind(fd, addrinfo->ai_addr, addrinfo->ai_addrlen) != 0) {
close(fd);
return -1;
}
if (listen(fd, 1) != 0) {
close(fd);
return -1;
}
*info = addrinfo;
return fd;
}
int main(void)
{
struct addrinfo *addrinfo = NULL;
int fd, status;
struct data d;
/* This is how many tests you plan to run */
plan_tests(8);
fd = make_listen_fd(PORT, &addrinfo);
ok1(fd >= 0);
d.l = io_new_listener(NULL, fd, init_conn, &d);
ok1(d.l);
fflush(stdout);
if (!fork()) {
io_close_listener(d.l);
fd = socket(addrinfo->ai_family, addrinfo->ai_socktype,
addrinfo->ai_protocol);
if (fd < 0)
exit(1);
if (connect(fd, addrinfo->ai_addr, addrinfo->ai_addrlen) != 0)
exit(2);
signal(SIGPIPE, SIG_IGN);
if (write(fd, "1hellothere", strlen("1hellothere")) != strlen("1hellothere"))
exit(3);
sleep(1);
if (write(fd, "1helloagain", strlen("1helloagain")) != strlen("1helloagain"))
exit(4);
close(fd);
freeaddrinfo(addrinfo);
exit(0);
}
freeaddrinfo(addrinfo);
d.pattern = tal_arrz(NULL, char, 1);
ok1(io_loop(NULL, NULL) == NULL);
/* No trace of reads */
ok1(strspn(d.pattern, ">") == strlen(d.pattern));
tal_free(d.pattern);
ok1(wait(&status));
ok1(WIFEXITED(status));
ok1(WEXITSTATUS(status) == 0);
/* This exits depending on whether all tests passed */
return exit_status();
}

73
ccan/ccan/timer/test/run-allocator.c

@ -0,0 +1,73 @@
#define CCAN_TIMER_DEBUG
/* Include the C files directly. */
#include <ccan/timer/timer.c>
#include <ccan/tap/tap.h>
struct timers_with_counters {
struct timers timers;
size_t num_alloc, num_free;
};
static void *test_alloc(struct timers *timers, size_t len)
{
((struct timers_with_counters *)timers)->num_alloc++;
return malloc(len);
}
static void test_free(struct timers *timers, void *p)
{
if (p) {
((struct timers_with_counters *)timers)->num_free++;
free(p);
}
}
static struct timemono timemono_from_nsec(unsigned long long nsec)
{
struct timemono epoch = { { 0, 0 } };
return timemono_add(epoch, time_from_nsec(nsec));
}
int main(void)
{
struct timers_with_counters tc;
struct timer t[64];
const struct timemono epoch = { { 0, 0 } };
tc.num_alloc = tc.num_free = 0;
plan_tests(12);
timers_set_allocator(test_alloc, test_free);
timers_init(&tc.timers, epoch);
timer_init(&t[0]);
timer_addmono(&tc.timers, &t[0],
timemono_from_nsec(TIMER_GRANULARITY << TIMER_LEVEL_BITS));
timers_expire(&tc.timers, timemono_from_nsec(1));
ok1(tc.num_alloc == 1);
ok1(tc.num_free == 0);
timer_del(&tc.timers, &t[0]);
ok1(tc.num_alloc == 1);
ok1(tc.num_free == 0);
timers_cleanup(&tc.timers);
ok1(tc.num_alloc == 1);
ok1(tc.num_free == 1);
/* Should restore defaults */
timers_set_allocator(NULL, NULL);
ok1(timer_alloc == timer_default_alloc);
ok1(timer_free == timer_default_free);
timers_init(&tc.timers, epoch);
timer_addmono(&tc.timers, &t[0],
timemono_from_nsec(TIMER_GRANULARITY << TIMER_LEVEL_BITS));
ok1(tc.num_alloc == 1);
ok1(tc.num_free == 1);
timers_cleanup(&tc.timers);
ok1(tc.num_alloc == 1);
ok1(tc.num_free == 1);
/* This exits depending on whether all tests passed */
return exit_status();
}

28
ccan/ccan/timer/timer.c

@ -11,6 +11,30 @@ struct timer_level {
struct list_head list[PER_LEVEL];
};
static void *timer_default_alloc(struct timers *timers, size_t len)
{
return malloc(len);
}
static void timer_default_free(struct timers *timers, void *p)
{
free(p);
}
static void *(*timer_alloc)(struct timers *, size_t) = timer_default_alloc;
static void (*timer_free)(struct timers *, void *) = timer_default_free;
void timers_set_allocator(void *(*alloc)(struct timers *, size_t len),
void (*free)(struct timers *, void *p))
{
if (!alloc)
alloc = timer_default_alloc;
if (!free)
free = timer_default_free;
timer_alloc = alloc;
timer_free = free;
}
static uint64_t time_to_grains(struct timemono t)
{
return t.ts.tv_sec * ((uint64_t)1000000000 / TIMER_GRANULARITY)
@ -139,7 +163,7 @@ static void add_level(struct timers *timers, unsigned int level)
unsigned int i;
struct list_head from_far;
l = malloc(sizeof(*l));
l = timer_alloc(timers, sizeof(*l));
if (!l)
return;
@ -520,5 +544,5 @@ void timers_cleanup(struct timers *timers)
unsigned int l;
for (l = 0; l < ARRAY_SIZE(timers->level); l++)
free(timers->level[l]);
timer_free(timers, timers->level[l]);
}

11
ccan/ccan/timer/timer.h

@ -162,6 +162,17 @@ struct timer *timers_expire(struct timers *timers, struct timemono expire);
*/
struct timers *timers_check(const struct timers *t, const char *abortstr);
/**
* timers_set_allocator - set malloc/free functions.
* @alloc: allocator to use
* @free: unallocator to use (@p is NULL or a return from @alloc)
*
* This replaces the underlying malloc/free with these allocators.
* Setting either one to NULL restores the default allocators.
*/
void timers_set_allocator(void *(*alloc)(struct timers *, size_t len),
void (*free)(struct timers *, void *p));
#ifdef CCAN_TIMER_DEBUG
#include <stdio.h>

5
common/memleak.c

@ -87,10 +87,13 @@ static void children_into_htable(const void *exclude1, const void *exclude2,
|| strends(name, "struct linkable"))
continue;
/* ccan/io allocates pollfd array. */
/* ccan/io allocates pollfd array, always array. */
if (strends(name, "struct pollfd[]") && !tal_parent(i))
continue;
if (strends(name, "struct io_plan *[]") && !tal_parent(i))
continue;
/* Don't add tmpctx. */
if (streq(name, "tmpctx"))
continue;

Loading…
Cancel
Save