From 6a366d1c433c48f98ad41b4f8a12da4e879cfe54 Mon Sep 17 00:00:00 2001 From: Upkeep Date: Sat, 15 Oct 2022 17:24:32 +0200 Subject: [PATCH] init: initial commit --- .gitignore | 56 +++ Makefile.am | 5 + configure.ac | 27 + src/Makefile.am | 8 + src/defer.h | 5 + src/dynarr.c | 67 +++ src/dynarr.h | 53 ++ src/htonl.h | 12 + src/linked_list.c | 50 ++ src/linked_list.h | 32 ++ src/server.c | 0 src/server.h | 10 + src/test/main.c | 67 +++ src/transport/tcp.c | 530 ++++++++++++++++++++ src/transport/tcp.h | 157 ++++++ src/uthash.h | 1140 +++++++++++++++++++++++++++++++++++++++++++ 16 files changed, 2219 insertions(+) create mode 100644 .gitignore create mode 100644 Makefile.am create mode 100644 configure.ac create mode 100644 src/Makefile.am create mode 100644 src/defer.h create mode 100644 src/dynarr.c create mode 100644 src/dynarr.h create mode 100644 src/htonl.h create mode 100644 src/linked_list.c create mode 100644 src/linked_list.h create mode 100644 src/server.c create mode 100644 src/server.h create mode 100644 src/test/main.c create mode 100644 src/transport/tcp.c create mode 100644 src/transport/tcp.h create mode 100644 src/uthash.h diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..f6a2f38 --- /dev/null +++ b/.gitignore @@ -0,0 +1,56 @@ +.ccls-cache +/build + +# http://www.gnu.org/software/automake + +Makefile.in +/ar-lib +/mdate-sh +/py-compile +/test-driver +/ylwrap +.deps/ +.dirstamp + +# http://www.gnu.org/software/autoconf + +autom4te.cache +/autoscan.log +/autoscan-*.log +/aclocal.m4 +/compile +/config.cache +/config.guess +/config.h.in +/config.log +/config.status +/config.sub +/configure +/configure.scan +/configure~ +/depcomp +/install-sh +/missing +/stamp-h1 + +# https://www.gnu.org/software/libtool/ + +/ltmain.sh + +# http://www.gnu.org/software/texinfo + +/texinfo.tex + +# http://www.gnu.org/software/m4/ + +m4/libtool.m4 +m4/ltoptions.m4 +m4/ltsugar.m4 +m4/ltversion.m4 +m4/lt~obsolete.m4 + +# Generated Makefile +# (meta build system like autotools, +# can automatically generate from config.status script +# (which is called by configure script)) +Makefile diff --git a/Makefile.am b/Makefile.am new file mode 100644 index 0000000..5b7fd19 --- /dev/null +++ b/Makefile.am @@ -0,0 +1,5 @@ +AC_CFLAGS = -fno-strict-aliasing + +SUBDIRS = src + +ACLOCAL_AMFLAGS = -I m4 diff --git a/configure.ac b/configure.ac new file mode 100644 index 0000000..ff43722 --- /dev/null +++ b/configure.ac @@ -0,0 +1,27 @@ +AC_INIT([ipsp-rpc], [0.1], [bug-report@address]) +AM_INIT_AUTOMAKE([foreign subdir-objects -Wall -Werror]) + +AC_PROG_C +AM_PROG_AR +LT_INIT +AC_CONFIG_MACRO_DIRS([m4]) + +AC_SEARCH_LIBS([uv_loop_init], [uv], [], + [AC_MSG_ERROR(Missing the libuv library.)]) + +AC_ARG_WITH([mpack], + [AS_HELP_STRING([--without-mpack], + [disable support for mpack])], + [], + [with_mpack=yes]) + +AS_IF([test "x$with_mpack" = xyes], + [dnl AC_CONFIG_SUBDIRS([mpack]) + AC_SUBST([HAVE_MPACK], [1])], + [AC_SUBST([HAVE_MPACK], [0])]) + +AC_CONFIG_HEADERS([config.h]) + +AC_CONFIG_FILES([Makefile src/Makefile]) + +AC_OUTPUT diff --git a/src/Makefile.am b/src/Makefile.am new file mode 100644 index 0000000..125176d --- /dev/null +++ b/src/Makefile.am @@ -0,0 +1,8 @@ +lib_LTLIBRARIES = libipsp.la +libipsp_la_SOURCES = htonl.h uthash.h dynarr.h dynarr.c linked_list.h linked_list.c transport/tcp.h transport/tcp.c server.c server.h +pkginclude_HEADERS = server.h + + +bin_PROGRAMS = ipsptest +ipsptest_SOURCES = test/main.c +ipsptest_LDADD = libipsp.la diff --git a/src/defer.h b/src/defer.h new file mode 100644 index 0000000..19bafce --- /dev/null +++ b/src/defer.h @@ -0,0 +1,5 @@ +#ifndef RPC_DEFER_H +#define RPC_DEFER_H + + +#endif // RPC_DEFER_H diff --git a/src/dynarr.c b/src/dynarr.c new file mode 100644 index 0000000..2352119 --- /dev/null +++ b/src/dynarr.c @@ -0,0 +1,67 @@ +#include "dynarr.h" +#include + +int +dynarr_init(dynarr_t *d, size_t n) +{ + void *data = calloc(n, 1); + if(data == NULL) return -1; + + d->data = data; + d->cap = n; + d->sz = 0; + + return 0; +} + +int +dynarr_release(dynarr_t *d) +{ + free(d->data); + return 0; +} + +int +dynarr_reserve(dynarr_t *d, size_t cap) +{ + if(d->cap >= cap) return 0; + + void *ndata = realloc(d->data, cap); + if(ndata == NULL) return -1; + + d->cap = cap; + d->data = ndata; + return 0; +} + +int +dynarr_clear(dynarr_t *d, u_int8_t c) +{ + memset(d->data, c, d->cap); + d->sz = 0; + return 0; +} + +int +dynarr_shrink(dynarr_t *d) +{ + if(d->sz == d->cap) return 0; + + void *ndata = realloc(d->data, d->sz); + if(ndata == NULL) return -1; + + d->cap = d->sz; + d->data = ndata; + return 0; +} + +int +dynarr_append(dynarr_t *d, void *buf, size_t sz) +{ + if(d->sz + sz > d->cap) + dynarr_reserve(d,(d->sz + sz) < (2 * d->sz) + ? (2 * d->sz) : (d->sz + sz)); + memcpy(d->data + d->sz, buf, sz); + d->sz += sz; + return 0; +} diff --git a/src/dynarr.h b/src/dynarr.h new file mode 100644 index 0000000..06d2ad1 --- /dev/null +++ b/src/dynarr.h @@ -0,0 +1,53 @@ +#ifndef RPC_DYNARR_H +#define RPC_DYNARR_H + +#include + +typedef struct dynarr_s { + void *data; + size_t sz, cap; +} dynarr_t; + +/** + * Initializes a dynarr_t with at least #n bytes of capacity. + * Returns -1 if unable to allocate enough memory. + */ +int +dynarr_init(dynarr_t *d, size_t n); + +/** + * Releases allocated memory associated with dynarr_t #d. + */ +int +dynarr_release(dynarr_t *d); + +/** + * Fill the buffer #d with character #c and set its size to 0. + */ +int +dynarr_clear(dynarr_t *d, u_int8_t c); + +/** + * Reserves at least #cap bytes in #d's buffer. + * Returns -1 if unable to allocate enough memory. + */ +int +dynarr_reserve(dynarr_t *d, size_t cap); + +/** + * Shrinks #d's buffer to be as small as the data inside. No-op if capacity + * matches size. + */ +int +dynarr_shrink(dynarr_t *d); + +/** + * Copies over #sz bytes from #buf to the end of #d's data, possibly expanding + * the buffer by at least the amount necessary to store both the data and #buf. + * + * Returns -1 if unable to allocate enough memory. + */ +int +dynarr_append(dynarr_t *d, void *buf, size_t sz); + +#endif // RPC_DYNARR_H diff --git a/src/htonl.h b/src/htonl.h new file mode 100644 index 0000000..a3e7681 --- /dev/null +++ b/src/htonl.h @@ -0,0 +1,12 @@ +#ifndef RPC_HTONL_H +#define RPC_HTONL_H + +#include + +#ifdef HAVE_UNISTD_H +#include +#else +#include +#endif + +#endif //RPC_HTONL_H diff --git a/src/linked_list.c b/src/linked_list.c new file mode 100644 index 0000000..f151c27 --- /dev/null +++ b/src/linked_list.c @@ -0,0 +1,50 @@ +#include "linked_list.h" + +list_node_t * +llist_node(void *data) +{ + list_node_t *node = malloc(sizeof(list_node_t)); + if(node == NULL) return node; + + node->data = data; + node->next = NULL; + + return node; +} + +list_node_t * +llist_append(list_node_t *list, void *data) +{ + for(; list->next != NULL; list = list->next); + list_node_t* new = llist_node(data); + list->next = new; + return new; +} + +list_node_t * +llist_prepend(list_node_t **list, void *data) +{ + list_node_t* new = llist_node(data); + if(new != NULL) { + new->next = *list; + *list = new; + } + + return new; +} + +void * +llist_at(list_node_t *list, size_t n) +{ + for(int i = 0; i < n && list != NULL; list = list->next, i++); + return list; +} + + +size_t +llist_length(const list_node_t *list) +{ + int i = 0; + for(; list != NULL; list = list->next, ++i); + return i; +} diff --git a/src/linked_list.h b/src/linked_list.h new file mode 100644 index 0000000..e1b6b71 --- /dev/null +++ b/src/linked_list.h @@ -0,0 +1,32 @@ +#ifndef RPC_LINKED_LIST_H +#define RPC_LINKED_LIST_H + +#include + +typedef struct list_node_s { + struct list_node_s *next; + void *data; +} list_node_t; + +list_node_t * +llist_node(void *data); + +list_node_t * +llist_append(list_node_t *list, void *data); + +list_node_t * +llist_prepend(list_node_t **list, void *data); + +/** + * Inserts at #n, or appends if shorter than #n. + */ +list_node_t * +llist_insert(list_node_t **list, size_t n, void *data); + +void * +llist_at(list_node_t *list, size_t n); + +size_t +llist_length(const list_node_t *list); + +#endif // RPC_LINKED_LIST_H diff --git a/src/server.c b/src/server.c new file mode 100644 index 0000000..e69de29 diff --git a/src/server.h b/src/server.h new file mode 100644 index 0000000..d191d48 --- /dev/null +++ b/src/server.h @@ -0,0 +1,10 @@ +#ifndef RPC_SERVER_H +#define RPC_SERVER_H + +#include + +typedef struct { + +} server_t; + +#endif //RPC_SERVER_H diff --git a/src/test/main.c b/src/test/main.c new file mode 100644 index 0000000..c1b7ed1 --- /dev/null +++ b/src/test/main.c @@ -0,0 +1,67 @@ +#include +#include +#include +#include + +#include "../server.h" +#include "../transport/tcp.h" + +static uv_loop_t loop; +static uv_tcp_t server_sock; +static rpc_tcp_server_t *server; + +static void +catch_sigint(int signal) +{ + rpc_tcp_close(server, NULL, NULL); + uv_loop_close(&loop); + exit(0); +} + +static void +on_msg(rpc_tcp_stream_t *stream, int status, void *buf, ssize_t n) +{ + if(status < 0) { + fprintf(stderr, "on_msg() recieved status = %d, %s\n", + status, uv_err_name(status)); + if(n == -1) + rpc_tcp_conn_close((rpc_tcp_conn_t*)stream, NULL, NULL); + return; + } + + printf("on_msg() recieved message on stream (%d): ", + stream == NULL ? 0 : stream->id); + printf("%.*s\n", (int)n, (char *)buf); + + /* rpc_tcp_conn_close(stream->owner, NULL, NULL); */ + free(buf); +} + +static void +on_new_conn(rpc_tcp_conn_t *conn, int status) +{ + rpc_tcp_conn_read_start(conn, on_msg); +} + +int +main() +{ + uv_loop_init(&loop); + + printf("rpc: main() started\n\n"); + + uv_tcp_t server_sock; + uv_tcp_init(&loop, &server_sock); + + struct sockaddr_in s; + uv_ip4_addr("0.0.0.0", 4321, &s); + uv_tcp_bind(&server_sock, (const struct sockaddr*)&s, 0); + + server = rpc_tcp_listen(&server_sock, 10, on_new_conn); + signal(SIGINT, catch_sigint); + + uv_run(&loop, UV_RUN_DEFAULT); + + uv_loop_close(&loop); + return 0; +} diff --git a/src/transport/tcp.c b/src/transport/tcp.c new file mode 100644 index 0000000..1aee0e4 --- /dev/null +++ b/src/transport/tcp.c @@ -0,0 +1,530 @@ +#include "tcp.h" +#include "../htonl.h" +#include "../uthash.h" +#include "../dynarr.h" + +#include + +typedef struct { + rpc_tcp_conn_new_cb new_conn; + rpc_tcp_server_t *server; +} rpc_tcp_state_t; + +static void +init_tcp_server(rpc_tcp_server_t *s, uv_tcp_t *sock) +{ + s->_conns = NULL; + s->_sock = sock; +} + +static void +init_tcp_state(rpc_tcp_state_t *s, rpc_tcp_conn_new_cb cb, + rpc_tcp_server_t *server) +{ + s->new_conn = cb; + s->server = server; + return; +} + +static void +init_tcp_conn(rpc_tcp_conn_t *c, rpc_tcp_server_t *_server, + UT_hash_handle *hh, uv_tcp_t *_client, dynarr_t *buf) +{ + c->_client = _client; + c->_server = _server; + c->_hh = hh; + c->_buf = buf; + c->_streams = NULL; + c->stream_id = 0; +} + +static void +on_new_connection(uv_stream_t *stream, int status) +{ + rpc_tcp_conn_new_cb cb = ((rpc_tcp_state_t *)stream->data)->new_conn; + rpc_tcp_server_t *server = ((rpc_tcp_state_t *)stream->data)->server; + + if(status < 0) { + goto ERR_CB; + } + + uv_tcp_t *client = (uv_tcp_t*) malloc(sizeof(uv_tcp_t)); + if(client == NULL) { + status = -1; + goto ERR_CB; + } + + uv_tcp_init(stream->loop, client); + + status = uv_accept(stream, (uv_stream_t*) client); + if(status < 0) { + goto FREE_CLIENT; + } + + rpc_tcp_conn_t *conn = malloc(sizeof(rpc_tcp_conn_t)); + if(conn == NULL) goto FREE_CLIENT; + + dynarr_t *buf = malloc(sizeof(dynarr_t)); + if(buf == NULL) { status = -1; goto FREE_CONNECTION; } + status = dynarr_init(buf, 65536); + if(status < 0) { goto FREE_BUF; } + + UT_hash_handle *hh = malloc(sizeof(UT_hash_handle)); + if(hh == NULL) { status = -1; goto FREE_BUF; } + + init_tcp_conn(conn, server, hh, client, buf); + // Keep a map of all connections, indexed by their client socket ptr + HASH_ADD(_hh, server->_conns, _client, sizeof(void*), conn); + + cb(conn, 0); + + return; +FREE_BUF: + free(buf); +FREE_CONNECTION: + free(conn); +FREE_CLIENT: + free(client); +ERR_CB: + free(stream->data); + cb(NULL, status); +} + +rpc_tcp_server_t * +rpc_tcp_listen(uv_tcp_t *sock, int backlog, rpc_tcp_conn_new_cb cb) +{ + rpc_tcp_server_t *ret = NULL; + + rpc_tcp_server_t *server = malloc(sizeof(rpc_tcp_server_t)); + if(server == NULL) goto RET; else ret = server; + + init_tcp_server(server, sock); + + rpc_tcp_state_t *state = malloc(sizeof(rpc_tcp_state_t)); + if(state == NULL) { ret = NULL; goto FREE_SERVER; } + init_tcp_state(state, cb, server); + + sock->data = state; + + int r = uv_listen((uv_stream_t *)sock, backlog, on_new_connection); + if(r < 0) { ret = NULL; goto FREE_STATE; } + + // `ret' should contain a pointer to the server at this point + goto RET; +FREE_STATE: + free(state); +FREE_SERVER: + free(server); +RET: + return ret; +} + +static void +cleanup_conn(uv_handle_t *h) +{ + rpc_tcp_conn_t *conn = h->data; + dynarr_release(conn->_buf); + free(conn->_buf); + free(conn->_client); + free(conn->_hh); + free(conn); +} + +int +rpc_tcp_conn_close(rpc_tcp_conn_t *conn, rpc_tcp_conn_close_cb conn_cb, + rpc_tcp_stream_close_cb stream_cb) +{ + rpc_tcp_stream_t *stream, *tmp; + HASH_ITER(_hh, conn->_streams, stream, tmp) { + HASH_DELETE(_hh, conn->_streams, stream); + if(stream_cb != NULL) stream_cb(stream, 0); + free(stream); + free(stream->_hh); + } + + HASH_DELETE(_hh, conn->_server->_conns, conn); + if(conn_cb != NULL) conn_cb(conn, 0); + conn->_client->data = conn; + uv_close((uv_handle_t*)conn->_client, cleanup_conn); + return 0; +} + +static void +cleanup_server(uv_handle_t *h) +{ + // All connections, streams, and hashtable should have been closed & freed + // at this point. This only leaves the struct itself. + rpc_tcp_server_t *s = h->data; + free(s); +} + +int +rpc_tcp_close(rpc_tcp_server_t *server, rpc_tcp_conn_close_cb conn_cb, + rpc_tcp_stream_close_cb stream_cb) +{ + rpc_tcp_conn_t *conn, *tmp; + HASH_ITER(_hh, server->_conns, conn, tmp) { + HASH_DELETE(_hh, server->_conns, conn); + + // Copy-paste of rpc_tcp_conn_close(), sans removing itself from parent + // server + { + rpc_tcp_stream_t *stream, *tmp; + HASH_ITER(_hh, conn->_streams, stream, tmp) { + HASH_DELETE(_hh, conn->_streams, stream); + if(stream_cb != NULL) stream_cb(stream, 0); + free(stream); + free(stream->_hh); + } + + if(conn_cb != NULL) conn_cb(conn, 0); + conn->_client->data = conn; + uv_close((uv_handle_t*)conn->_client, cleanup_conn); + } + } + + // Allocated in `rpc_tcp_listen()' to pass user-provided callbacks, etc. + free(server->_sock->data); + + server->_sock->data = server; + uv_close((uv_handle_t*)server->_sock, cleanup_server); + return 0; +} + +typedef struct { + rpc_tcp_stream_new_cb cb; + rpc_tcp_stream_t *s; + int r; +} rpc_new_stream_state_t; + +static void +init_tcp_stream(rpc_tcp_stream_t *s, + uv_stream_t *_stream, + rpc_tcp_conn_t *_owner, + UT_hash_handle *_hh, + uint16_t id) +{ + s->_stream = _stream; + s->owner = _owner; + s->id = id; + +} + +static void +defer_new_stream_cb(uv_timer_t *t) +{ + rpc_new_stream_state_t *state = t->data; + rpc_tcp_stream_new_cb cb = state->cb; + rpc_tcp_stream_t *s = state->s; + int r = state->r; + + uv_close((uv_handle_t *)t, (uv_close_cb)free); + free(state); + + cb(s, r); +} + +int +rpc_tcp_stream_open(rpc_tcp_conn_t *conn, rpc_tcp_stream_new_cb cb) +{ + int status = 0; + + rpc_tcp_stream_t *stream = malloc(sizeof(rpc_tcp_stream_t)); + if(stream == NULL) { + status = -1; + goto RET; + } + + // TODO: keep track of and reuse freed stream ids + + UT_hash_handle *hh = malloc(sizeof(UT_hash_handle)); + if(hh == NULL) { status = -1; goto FREE_STREAM; } + + init_tcp_stream(stream, (uv_stream_t *)conn->_client, + conn, hh, conn->stream_id++); + // Keep track of all streams by their id in a hashmap + HASH_ADD(_hh, conn->_streams, id, 2, stream); + + + // TODO: notify peer of new stream instead of just tagging frames + + uv_timer_t *t = malloc(sizeof(uv_timer_t)); + if(t == NULL) { + status = -1; + goto FREE_HH; + } + + // Yield execution with a next-iteration timer + uv_timer_init(conn->_server->_sock->loop, t); + uv_timer_start(t, defer_new_stream_cb, 0, 0); + + goto RET; +FREE_HH: + free(hh); +FREE_STREAM: + free(stream); +RET: + return status; +} + +int +rpc_tcp_stream_close(rpc_tcp_stream_t *stream, rpc_tcp_stream_close_cb cb) +{ + // Delete the stream from the owning connection's hashmap + HASH_DELETE(_hh, stream->owner->_streams, stream); + if(cb != NULL) cb(stream, 0); + free(stream->_hh); + free(stream); + return 0; +} + + +typedef struct { + rpc_tcp_stream_msg_cb cb; + rpc_tcp_stream_t *stream; + uv_buf_t *bufs; +} stream_send_state_t; + +static void +on_write_complete(uv_write_t *handle, int status) +{ + stream_send_state_t *data = handle->data; + rpc_tcp_stream_msg_cb cb = data->cb; + rpc_tcp_stream_t *stream = data->stream; + uv_buf_t buf = data->bufs[1]; + + free(data->bufs[0].base); + free(data->bufs); + free(data); + uv_close((uv_handle_t*)handle, (uv_close_cb)free); + + cb(stream, status, buf.base, buf.len); +} + +int +rpc_tcp_stream_send(rpc_tcp_stream_t *stream, void *buf, uint32_t n, + rpc_tcp_stream_msg_cb cb) +{ + int status = 0; + + uv_write_t *w = malloc(sizeof(uv_write_t)); + if(w == NULL) { + status = -1; + goto RET; + } + + uv_buf_t *bufs = malloc(sizeof(uv_buf_t) * 2); + if(bufs == NULL) { + status = -1; + goto FREE_REQ; + } + + rpc_tcp_msg_header *header = malloc(sizeof(rpc_tcp_msg_header)); + if(header == NULL) { + status = -1; + goto FREE_BUFS; + } + + stream_send_state_t *data = malloc(sizeof(stream_send_state_t)); + if(data == NULL) { + status = -1; + goto FREE_HEADER; + } + + data->cb = cb; + data->stream = stream; + data->bufs = bufs; + + header->sz = htonl(n); + header->id = htons(stream->id); + + bufs[0].base = (char *)header; + bufs[0].len = sizeof(rpc_tcp_msg_header); + + bufs[1].base = buf; + bufs[1].len = n; + + uv_write(w, stream->_stream, bufs, 2, on_write_complete); + + return 0; +FREE_HEADER: + free(header); +FREE_BUFS: + free(bufs); +FREE_REQ: + free(w); +RET: + return status; +} + +typedef struct { + rpc_tcp_conn_t *owner; + size_t total_sz, left_sz; + uint16_t stream_id; + unsigned char is_reading_msg; +} read_state; + +static void +read_alloc(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) +{ + buf->base = malloc(suggested_size); + buf->len = suggested_size; +} + +static void +read_init(read_state *s, void *buf, ssize_t nread); + +static void +read_body(read_state *s, void *buf, ssize_t nread) +{ + rpc_tcp_conn_t *c = s->owner; + ssize_t leftover = s->left_sz - nread; + +#define HAND_OFF_MSG \ + void *message = malloc(s->total_sz); \ + memcpy(message, c->_buf->data, c->_buf->sz); \ + dynarr_clear(c->_buf, 0); \ + if (s->stream_id == 0) { \ + if(c->_recv != NULL) \ + c->_recv(NULL, 0, message, s->total_sz); \ + else free(message); \ + } \ + else if(s->stream_id != 0) { \ + rpc_tcp_stream_t *stream = NULL; \ + HASH_FIND(_hh, c->_streams, &s->stream_id, sizeof(s->stream_id), \ + stream); \ + if(stream != NULL) { \ + if(stream->_recv != NULL) \ + c->_recv(stream, 0, message, s->total_sz); \ + else free(message); \ + } else free(message); \ + } + + // We've read the entire message, exactly + if (leftover == 0) { + dynarr_append(c->_buf, buf, s->left_sz); + HAND_OFF_MSG; + } + + // We've read a chunk of the message + else if(leftover > 0) { + dynarr_append(c->_buf, buf, s->left_sz); + } + + // We've read more than just the current message + else if (leftover < 0) { + dynarr_append(c->_buf, buf, s->left_sz); + + HAND_OFF_MSG; + + s->is_reading_msg = 0; + read_init(s, buf + (nread + leftover), -leftover); + } +#undef HAND_OFF_MSG +} + +static void +read_init(read_state *s, void *buf, ssize_t nread) +{ + if(nread < sizeof(rpc_tcp_msg_header)) { + // TODO: handle this case + fprintf(stderr, "read_cb(stub): initial message packet smaller " + "than header"); + exit(2); + } + + s->is_reading_msg = 1; + s->total_sz = ntohl(*(uint32_t *)(buf + offsetof(rpc_tcp_msg_header, sz))); + s->stream_id = ntohs(*(uint16_t *)(buf + offsetof(rpc_tcp_msg_header, id))); + s->left_sz = s->total_sz; + + size_t leftover = nread - sizeof(rpc_tcp_msg_header); + if(leftover > 0) { + read_body(s, buf + sizeof(rpc_tcp_msg_header), leftover); + } +} + +static void +read_cb(uv_stream_t *stream, ssize_t nread, const uv_buf_t *buf) +{ + if(nread == 0) goto FREE_BUF; + if(nread < 0) { + read_state *s = stream->data; + s->is_reading_msg = 0; + dynarr_clear(s->owner->_buf, 0); + + s->owner->_recv((rpc_tcp_stream_t*)s->owner, nread, NULL, -1); + + // Pipe broken: prepare to destroy the connection + if(nread == UV_EOF) { + free(s); + } + + goto FREE_BUF; + } + + read_state *s = (read_state *)stream->data; + rpc_tcp_conn_t *c = s->owner; + + if(s->is_reading_msg) { + // Already in the middle of reading a message + read_body(s, buf->base, nread); + } else { + // Read the message starting from the header + read_init(s, buf->base, nread); + } + +FREE_BUF: + if(buf != NULL && buf->base != NULL) + free(buf->base); +} + +int +rpc_tcp_conn_read_start(rpc_tcp_conn_t *conn, rpc_tcp_stream_msg_cb cb) +{ + read_state *state = malloc(sizeof(read_state)); + if(state == NULL) return -1; + *state = (read_state){ + .owner = conn, + .total_sz = 0, + .left_sz = 0, + .is_reading_msg = 0 + }; + + conn->_client->data = state; + conn->_recv = cb; + + uv_read_start((uv_stream_t*)conn->_client, read_alloc, read_cb); +} + +int +rpc_tcp_conn_read_stop(rpc_tcp_conn_t *conn) +{ + conn->_recv = NULL; + rpc_tcp_conn_read_stop_all(conn); + free(conn->_client->data); + return 0; +} + +int +rpc_tcp_conn_read_stop_all(rpc_tcp_conn_t *conn) +{ + rpc_tcp_stream_t *stream, *tmp; + HASH_ITER(_hh, conn->_streams, stream, tmp) { + stream->_recv = NULL; + } +} + +int +rpc_tcp_stream_read_start(rpc_tcp_stream_t *stream, rpc_tcp_stream_msg_cb cb) +{ + stream->_recv = cb; + return 0; +} + +int +rpc_tcp_stream_read_stop(rpc_tcp_stream_t *stream) +{ + stream->_recv = NULL; + return 0; +} diff --git a/src/transport/tcp.h b/src/transport/tcp.h new file mode 100644 index 0000000..2f31a4b --- /dev/null +++ b/src/transport/tcp.h @@ -0,0 +1,157 @@ +#ifndef RPC_TRANSPORT_H +#define RPC_TRANSPORT_H + +#include + +#pragma pack(1) +typedef struct { + uint32_t sz; + uint16_t id; +} rpc_tcp_msg_header; + +// Forward decl for opaque hashtable impl +typedef struct UT_hash_handle UT_hash_handle; +typedef struct rpc_tcp_conn_s rpc_tcp_conn_t; +typedef struct rpc_tcp_server_s rpc_tcp_server_t; +typedef struct rpc_tcp_stream_s rpc_tcp_stream_t; + +typedef void(*rpc_tcp_stream_msg_cb)(rpc_tcp_stream_t*, int status, + void *buf, ssize_t n); +typedef rpc_tcp_stream_msg_cb rpc_tcp_msg_cb; + +typedef struct rpc_tcp_stream_s { + void *data; + uint16_t id; + rpc_tcp_msg_cb _recv; + uv_stream_t *_stream; + UT_hash_handle *_hh; + rpc_tcp_conn_t *owner; +} rpc_tcp_stream_t; + +typedef struct dynarr_s dynarr_t; + +typedef struct rpc_tcp_conn_s { + void *data; + uint16_t stream_id; + dynarr_t *_buf; + rpc_tcp_msg_cb _recv; + UT_hash_handle *_hh; + rpc_tcp_server_t *_server; + uv_tcp_t *_client; + rpc_tcp_stream_t *_streams; +} rpc_tcp_conn_t; + +typedef struct rpc_tcp_server_s { + rpc_tcp_conn_t *_conns; + uv_tcp_t *_sock; +} rpc_tcp_server_t; + +typedef void(*rpc_tcp_server_close_cb)(rpc_tcp_server_t*, int status); + +typedef void(*rpc_tcp_conn_new_cb)(rpc_tcp_conn_t*, int status); +typedef void(*rpc_tcp_conn_close_cb)(rpc_tcp_conn_t*, int status); + +typedef void(*rpc_tcp_stream_new_cb)(rpc_tcp_stream_t*, int status); +typedef void(*rpc_tcp_stream_close_cb)(rpc_tcp_stream_t*, int status); + +/** + * Upgrade a bound uv_tcp_t socket #sock into an #rpc_tcp_server_t. + * Callback #cb will be called on each new connection. + */ +rpc_tcp_server_t * +rpc_tcp_listen(uv_tcp_t *sock, int backlog, rpc_tcp_conn_new_cb cb); + +/** + * Closes a TCP server #server. + * + * #conn_cb (#stream_cb) will be called when closing each connection (stream), + * to give you a chance to release any user-allocated data. You should free any + * user-allocated resources on the server before calling this function. + * + * All pointers to the server, and any connection or stream belonging to the + * server are invalidated after a call to this function. + */ +int +rpc_tcp_close(rpc_tcp_server_t *server, rpc_tcp_conn_close_cb conn_cb, + rpc_tcp_stream_close_cb stream_cb); + +/** + * Closes the connection #conn and all its streams. + * + * #stream_cb will be called when closing each stream to give you a chance to + * release any user-allocated data. #conn_cb will be called when closing the + * connection to give you a chance to release any user-allocated data. + * + * The connection will be removed from its server after this call. All pointers + * to the connection or its streams are invalidated after a call to this + * function. + * + */ +int +rpc_tcp_conn_close(rpc_tcp_conn_t *conn, rpc_tcp_conn_close_cb conn_cb, + rpc_tcp_stream_close_cb stream_cb); + +/** + * Starts reading for messages on the connection's default stream. + * + * Callback stream pointer argument will be NULL. Ownership of the returned + * buffer (if `status == 0') is transferred to the callback. + */ +int +rpc_tcp_conn_read_start(rpc_tcp_conn_t *conn, rpc_tcp_stream_msg_cb cb); + +/** + * Stops reading for messages on the connection altogether. + */ +int +rpc_tcp_conn_read_stop(rpc_tcp_conn_t *conn); + +/** + * Stops reading for messages on all of the connection's streams (but not the + * connection itself). + */ +int +rpc_tcp_conn_read_stop_all(rpc_tcp_conn_t *conn); + +/** + * Opens a stream on this connection. #cb will be called with the opened stream + * if successful, or `status < 0' otherwise. + */ +int +rpc_tcp_stream_open(rpc_tcp_conn_t *conn, rpc_tcp_stream_new_cb cb); + +/** + * Closes the stream #stream. #cb will called when closing the stream to give + * you a change to release any user-allocated data. + * + * The stream will be removed from its connection after this call. All pointers + * to the stream are invalidated after a call to this function. + */ +int +rpc_tcp_stream_close(rpc_tcp_stream_t *stream, rpc_tcp_stream_close_cb cb); + +/** + * Send message #buf of length #n over stream #stream. Calls #cb after + * finishing. + * + * The provided buffer and length will be returned untouched to the callback, + * with `status == 0' if successful. + */ +int +rpc_tcp_stream_send(rpc_tcp_stream_t *stream, void *buf, uint32_t n, + rpc_tcp_stream_msg_cb cb); + +/** + * Start listening to messages on #stream with #cb. + */ +int +rpc_tcp_stream_read_start(rpc_tcp_stream_t *stream, rpc_tcp_stream_msg_cb cb); + +/** + * Stop listening to messages on #stream. + */ +int +rpc_tcp_stream_read_stop(rpc_tcp_stream_t *stream); + + +#endif //RPC_TRANSPORT_H diff --git a/src/uthash.h b/src/uthash.h new file mode 100644 index 0000000..b101e3a --- /dev/null +++ b/src/uthash.h @@ -0,0 +1,1140 @@ +/* +Copyright (c) 2003-2022, Troy D. Hanson https://troydhanson.github.io/uthash/ +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef UTHASH_H +#define UTHASH_H + +#define UTHASH_VERSION 2.3.0 + +#include /* memcmp, memset, strlen */ +#include /* ptrdiff_t */ +#include /* exit */ + +#if defined(HASH_DEFINE_OWN_STDINT) && HASH_DEFINE_OWN_STDINT +/* This codepath is provided for backward compatibility, but I plan to remove it. */ +#warning "HASH_DEFINE_OWN_STDINT is deprecated; please use HASH_NO_STDINT instead" +typedef unsigned int uint32_t; +typedef unsigned char uint8_t; +#elif defined(HASH_NO_STDINT) && HASH_NO_STDINT +#else +#include /* uint8_t, uint32_t */ +#endif + +/* These macros use decltype or the earlier __typeof GNU extension. + As decltype is only available in newer compilers (VS2010 or gcc 4.3+ + when compiling c++ source) this code uses whatever method is needed + or, for VS2008 where neither is available, uses casting workarounds. */ +#if !defined(DECLTYPE) && !defined(NO_DECLTYPE) +#if defined(_MSC_VER) /* MS compiler */ +#if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */ +#define DECLTYPE(x) (decltype(x)) +#else /* VS2008 or older (or VS2010 in C mode) */ +#define NO_DECLTYPE +#endif +#elif defined(__MCST__) /* Elbrus C Compiler */ +#define DECLTYPE(x) (__typeof(x)) +#elif defined(__BORLANDC__) || defined(__ICCARM__) || defined(__LCC__) || defined(__WATCOMC__) +#define NO_DECLTYPE +#else /* GNU, Sun and other compilers */ +#define DECLTYPE(x) (__typeof(x)) +#endif +#endif + +#ifdef NO_DECLTYPE +#define DECLTYPE(x) +#define DECLTYPE_ASSIGN(dst,src) \ +do { \ + char **_da_dst = (char**)(&(dst)); \ + *_da_dst = (char*)(src); \ +} while (0) +#else +#define DECLTYPE_ASSIGN(dst,src) \ +do { \ + (dst) = DECLTYPE(dst)(src); \ +} while (0) +#endif + +#ifndef uthash_malloc +#define uthash_malloc(sz) malloc(sz) /* malloc fcn */ +#endif +#ifndef uthash_free +#define uthash_free(ptr,sz) free(ptr) /* free fcn */ +#endif +#ifndef uthash_bzero +#define uthash_bzero(a,n) memset(a,'\0',n) +#endif +#ifndef uthash_strlen +#define uthash_strlen(s) strlen(s) +#endif + +#ifndef HASH_FUNCTION +#define HASH_FUNCTION(keyptr,keylen,hashv) HASH_JEN(keyptr, keylen, hashv) +#endif + +#ifndef HASH_KEYCMP +#define HASH_KEYCMP(a,b,n) memcmp(a,b,n) +#endif + +#ifndef uthash_noexpand_fyi +#define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */ +#endif +#ifndef uthash_expand_fyi +#define uthash_expand_fyi(tbl) /* can be defined to log expands */ +#endif + +#ifndef HASH_NONFATAL_OOM +#define HASH_NONFATAL_OOM 0 +#endif + +#if HASH_NONFATAL_OOM +/* malloc failures can be recovered from */ + +#ifndef uthash_nonfatal_oom +#define uthash_nonfatal_oom(obj) do {} while (0) /* non-fatal OOM error */ +#endif + +#define HASH_RECORD_OOM(oomed) do { (oomed) = 1; } while (0) +#define IF_HASH_NONFATAL_OOM(x) x + +#else +/* malloc failures result in lost memory, hash tables are unusable */ + +#ifndef uthash_fatal +#define uthash_fatal(msg) exit(-1) /* fatal OOM error */ +#endif + +#define HASH_RECORD_OOM(oomed) uthash_fatal("out of memory") +#define IF_HASH_NONFATAL_OOM(x) + +#endif + +/* initial number of buckets */ +#define HASH_INITIAL_NUM_BUCKETS 32U /* initial number of buckets */ +#define HASH_INITIAL_NUM_BUCKETS_LOG2 5U /* lg2 of initial number of buckets */ +#define HASH_BKT_CAPACITY_THRESH 10U /* expand when bucket count reaches */ + +/* calculate the element whose hash handle address is hhp */ +#define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho))) +/* calculate the hash handle from element address elp */ +#define HH_FROM_ELMT(tbl,elp) ((UT_hash_handle*)(void*)(((char*)(elp)) + ((tbl)->hho))) + +#define HASH_ROLLBACK_BKT(hh, head, itemptrhh) \ +do { \ + struct UT_hash_handle *_hd_hh_item = (itemptrhh); \ + unsigned _hd_bkt; \ + HASH_TO_BKT(_hd_hh_item->hashv, (head)->hh->tbl->num_buckets, _hd_bkt); \ + (head)->hh->tbl->buckets[_hd_bkt].count++; \ + _hd_hh_item->hh_next = NULL; \ + _hd_hh_item->hh_prev = NULL; \ +} while (0) + +#define HASH_VALUE(keyptr,keylen,hashv) \ +do { \ + HASH_FUNCTION(keyptr, keylen, hashv); \ +} while (0) + +#define HASH_FIND_BYHASHVALUE(hh,head,keyptr,keylen,hashval,out) \ +do { \ + (out) = NULL; \ + if (head) { \ + unsigned _hf_bkt; \ + HASH_TO_BKT(hashval, (head)->hh->tbl->num_buckets, _hf_bkt); \ + if (HASH_BLOOM_TEST((head)->hh->tbl, hashval) != 0) { \ + HASH_FIND_IN_BKT((head)->hh->tbl, hh, (head)->hh->tbl->buckets[ _hf_bkt ], keyptr, keylen, hashval, out); \ + } \ + } \ +} while (0) + +#define HASH_FIND(hh,head,keyptr,keylen,out) \ +do { \ + (out) = NULL; \ + if (head) { \ + unsigned _hf_hashv; \ + HASH_VALUE(keyptr, keylen, _hf_hashv); \ + HASH_FIND_BYHASHVALUE(hh, head, keyptr, keylen, _hf_hashv, out); \ + } \ +} while (0) + +#ifdef HASH_BLOOM +#define HASH_BLOOM_BITLEN (1UL << HASH_BLOOM) +#define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8UL) + (((HASH_BLOOM_BITLEN%8UL)!=0UL) ? 1UL : 0UL) +#define HASH_BLOOM_MAKE(tbl,oomed) \ +do { \ + (tbl)->bloom_nbits = HASH_BLOOM; \ + (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \ + if (!(tbl)->bloom_bv) { \ + HASH_RECORD_OOM(oomed); \ + } else { \ + uthash_bzero((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \ + (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \ + } \ +} while (0) + +#define HASH_BLOOM_FREE(tbl) \ +do { \ + uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \ +} while (0) + +#define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8U] |= (1U << ((idx)%8U))) +#define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8U] & (1U << ((idx)%8U))) + +#define HASH_BLOOM_ADD(tbl,hashv) \ + HASH_BLOOM_BITSET((tbl)->bloom_bv, ((hashv) & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U))) + +#define HASH_BLOOM_TEST(tbl,hashv) \ + HASH_BLOOM_BITTEST((tbl)->bloom_bv, ((hashv) & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U))) + +#else +#define HASH_BLOOM_MAKE(tbl,oomed) +#define HASH_BLOOM_FREE(tbl) +#define HASH_BLOOM_ADD(tbl,hashv) +#define HASH_BLOOM_TEST(tbl,hashv) (1) +#define HASH_BLOOM_BYTELEN 0U +#endif + +#define HASH_MAKE_TABLE(hh,head,oomed) \ +do { \ + (head)->hh->tbl = (UT_hash_table*)uthash_malloc(sizeof(UT_hash_table)); \ + if (!(head)->hh->tbl) { \ + HASH_RECORD_OOM(oomed); \ + } else { \ + uthash_bzero((head)->hh->tbl, sizeof(UT_hash_table)); \ + (head)->hh->tbl->tail = ((head)->hh); \ + (head)->hh->tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \ + (head)->hh->tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \ + (head)->hh->tbl->hho = (char*)((head)->hh) - (char*)(head); \ + (head)->hh->tbl->buckets = (UT_hash_bucket*)uthash_malloc( \ + HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \ + (head)->hh->tbl->signature = HASH_SIGNATURE; \ + if (!(head)->hh->tbl->buckets) { \ + HASH_RECORD_OOM(oomed); \ + uthash_free((head)->hh->tbl, sizeof(UT_hash_table)); \ + } else { \ + uthash_bzero((head)->hh->tbl->buckets, \ + HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \ + HASH_BLOOM_MAKE((head)->hh->tbl, oomed); \ + IF_HASH_NONFATAL_OOM( \ + if (oomed) { \ + uthash_free((head)->hh->tbl->buckets, \ + HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \ + uthash_free((head)->hh->tbl, sizeof(UT_hash_table)); \ + } \ + ) \ + } \ + } \ +} while (0) + +#define HASH_REPLACE_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,replaced,cmpfcn) \ +do { \ + (replaced) = NULL; \ + HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \ + if (replaced) { \ + HASH_DELETE(hh, head, replaced); \ + } \ + HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn); \ +} while (0) + +#define HASH_REPLACE_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add,replaced) \ +do { \ + (replaced) = NULL; \ + HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \ + if (replaced) { \ + HASH_DELETE(hh, head, replaced); \ + } \ + HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add); \ +} while (0) + +#define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced) \ +do { \ + unsigned _hr_hashv; \ + HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \ + HASH_REPLACE_BYHASHVALUE(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced); \ +} while (0) + +#define HASH_REPLACE_INORDER(hh,head,fieldname,keylen_in,add,replaced,cmpfcn) \ +do { \ + unsigned _hr_hashv; \ + HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \ + HASH_REPLACE_BYHASHVALUE_INORDER(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced, cmpfcn); \ +} while (0) + +#define HASH_APPEND_LIST(hh, head, add) \ +do { \ + (add)->hh->next = NULL; \ + (add)->hh->prev = ELMT_FROM_HH((head)->hh->tbl, (head)->hh->tbl->tail); \ + (head)->hh->tbl->tail->next = (add); \ + (head)->hh->tbl->tail = ((add)->hh); \ +} while (0) + +#define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \ +do { \ + do { \ + if (cmpfcn(DECLTYPE(head)(_hs_iter), add) > 0) { \ + break; \ + } \ + } while ((_hs_iter = HH_FROM_ELMT((head)->hh->tbl, _hs_iter)->next)); \ +} while (0) + +#ifdef NO_DECLTYPE +#undef HASH_AKBI_INNER_LOOP +#define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \ +do { \ + char *_hs_saved_head = (char*)(head); \ + do { \ + DECLTYPE_ASSIGN(head, _hs_iter); \ + if (cmpfcn(head, add) > 0) { \ + DECLTYPE_ASSIGN(head, _hs_saved_head); \ + break; \ + } \ + DECLTYPE_ASSIGN(head, _hs_saved_head); \ + } while ((_hs_iter = HH_FROM_ELMT((head)->hh->tbl, _hs_iter)->next)); \ +} while (0) +#endif + +#if HASH_NONFATAL_OOM + +#define HASH_ADD_TO_TABLE(hh,head,keyptr,keylen_in,hashval,add,oomed) \ +do { \ + if (!(oomed)) { \ + unsigned _ha_bkt; \ + (head)->hh->tbl->num_items++; \ + HASH_TO_BKT(hashval, (head)->hh->tbl->num_buckets, _ha_bkt); \ + HASH_ADD_TO_BKT((head)->hh->tbl->buckets[_ha_bkt], hh, (add)->hh, oomed); \ + if (oomed) { \ + HASH_ROLLBACK_BKT(hh, head, (add)->hh); \ + HASH_DELETE_HH(hh, head, (add)->hh); \ + (add)->hh->tbl = NULL; \ + uthash_nonfatal_oom(add); \ + } else { \ + HASH_BLOOM_ADD((head)->hh->tbl, hashval); \ + HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \ + } \ + } else { \ + (add)->hh->tbl = NULL; \ + uthash_nonfatal_oom(add); \ + } \ +} while (0) + +#else + +#define HASH_ADD_TO_TABLE(hh,head,keyptr,keylen_in,hashval,add,oomed) \ +do { \ + unsigned _ha_bkt; \ + (head)->hh->tbl->num_items++; \ + HASH_TO_BKT(hashval, (head)->hh->tbl->num_buckets, _ha_bkt); \ + HASH_ADD_TO_BKT((head)->hh->tbl->buckets[_ha_bkt], hh, (add)->hh, oomed); \ + HASH_BLOOM_ADD((head)->hh->tbl, hashval); \ + HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \ +} while (0) + +#endif + + +#define HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh,head,keyptr,keylen_in,hashval,add,cmpfcn) \ +do { \ + IF_HASH_NONFATAL_OOM( int _ha_oomed = 0; ) \ + (add)->hh->hashv = (hashval); \ + (add)->hh->key = (char*) (keyptr); \ + (add)->hh->keylen = (unsigned) (keylen_in); \ + if (!(head)) { \ + (add)->hh->next = NULL; \ + (add)->hh->prev = NULL; \ + HASH_MAKE_TABLE(hh, add, _ha_oomed); \ + IF_HASH_NONFATAL_OOM( if (!_ha_oomed) { ) \ + (head) = (add); \ + IF_HASH_NONFATAL_OOM( } ) \ + } else { \ + void *_hs_iter = (head); \ + (add)->hh->tbl = (head)->hh->tbl; \ + HASH_AKBI_INNER_LOOP(hh, head, add, cmpfcn); \ + if (_hs_iter) { \ + (add)->hh->next = _hs_iter; \ + if (((add)->hh->prev = HH_FROM_ELMT((head)->hh->tbl, _hs_iter)->prev)) { \ + HH_FROM_ELMT((head)->hh->tbl, (add)->hh->prev)->next = (add); \ + } else { \ + (head) = (add); \ + } \ + HH_FROM_ELMT((head)->hh->tbl, _hs_iter)->prev = (add); \ + } else { \ + HASH_APPEND_LIST(hh, head, add); \ + } \ + } \ + HASH_ADD_TO_TABLE(hh, head, keyptr, keylen_in, hashval, add, _ha_oomed); \ + HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE_INORDER"); \ +} while (0) + +#define HASH_ADD_KEYPTR_INORDER(hh,head,keyptr,keylen_in,add,cmpfcn) \ +do { \ + unsigned _hs_hashv; \ + HASH_VALUE(keyptr, keylen_in, _hs_hashv); \ + HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, keyptr, keylen_in, _hs_hashv, add, cmpfcn); \ +} while (0) + +#define HASH_ADD_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,cmpfcn) \ + HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn) + +#define HASH_ADD_INORDER(hh,head,fieldname,keylen_in,add,cmpfcn) \ + HASH_ADD_KEYPTR_INORDER(hh, head, &((add)->fieldname), keylen_in, add, cmpfcn) + +#define HASH_ADD_KEYPTR_BYHASHVALUE(hh,head,keyptr,keylen_in,hashval,add) \ +do { \ + IF_HASH_NONFATAL_OOM( int _ha_oomed = 0; ) \ + (add)->hh->hashv = (hashval); \ + (add)->hh->key = (const void*) (keyptr); \ + (add)->hh->keylen = (unsigned) (keylen_in); \ + if (!(head)) { \ + (add)->hh->next = NULL; \ + (add)->hh->prev = NULL; \ + HASH_MAKE_TABLE(hh, add, _ha_oomed); \ + IF_HASH_NONFATAL_OOM( if (!_ha_oomed) { ) \ + (head) = (add); \ + IF_HASH_NONFATAL_OOM( } ) \ + } else { \ + (add)->hh->tbl = (head)->hh->tbl; \ + HASH_APPEND_LIST(hh, head, add); \ + } \ + HASH_ADD_TO_TABLE(hh, head, keyptr, keylen_in, hashval, add, _ha_oomed); \ + HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE"); \ +} while (0) + +#define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \ +do { \ + unsigned _ha_hashv; \ + HASH_VALUE(keyptr, keylen_in, _ha_hashv); \ + HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, keyptr, keylen_in, _ha_hashv, add); \ +} while (0) + +#define HASH_ADD_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add) \ + HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add) + +#define HASH_ADD(hh,head,fieldname,keylen_in,add) \ + HASH_ADD_KEYPTR(hh, head, &((add)->fieldname), keylen_in, add) + +#define HASH_TO_BKT(hashv,num_bkts,bkt) \ +do { \ + bkt = ((hashv) & ((num_bkts) - 1U)); \ +} while (0) + +/* delete "delptr" from the hash table. + * "the usual" patch-up process for the app-order doubly-linked-list. + * The use of _hd_hh_del below deserves special explanation. + * These used to be expressed using (delptr) but that led to a bug + * if someone used the same symbol for the head and deletee, like + * HASH_DELETE(hh,users,users); + * We want that to work, but by changing the head (users) below + * we were forfeiting our ability to further refer to the deletee (users) + * in the patch-up process. Solution: use scratch space to + * copy the deletee pointer, then the latter references are via that + * scratch pointer rather than through the repointed (users) symbol. + */ +#define HASH_DELETE(hh,head,delptr) \ + HASH_DELETE_HH(hh, head, (delptr)->hh) + +#define HASH_DELETE_HH(hh,head,delptrhh) \ +do { \ + struct UT_hash_handle *_hd_hh_del = (delptrhh); \ + if ((_hd_hh_del->prev == NULL) && (_hd_hh_del->next == NULL)) { \ + HASH_BLOOM_FREE((head)->hh->tbl); \ + uthash_free((head)->hh->tbl->buckets, \ + (head)->hh->tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ + uthash_free((head)->hh->tbl, sizeof(UT_hash_table)); \ + (head) = NULL; \ + } else { \ + unsigned _hd_bkt; \ + if (_hd_hh_del == (head)->hh->tbl->tail) { \ + (head)->hh->tbl->tail = HH_FROM_ELMT((head)->hh->tbl, _hd_hh_del->prev); \ + } \ + if (_hd_hh_del->prev != NULL) { \ + HH_FROM_ELMT((head)->hh->tbl, _hd_hh_del->prev)->next = _hd_hh_del->next; \ + } else { \ + DECLTYPE_ASSIGN(head, _hd_hh_del->next); \ + } \ + if (_hd_hh_del->next != NULL) { \ + HH_FROM_ELMT((head)->hh->tbl, _hd_hh_del->next)->prev = _hd_hh_del->prev; \ + } \ + HASH_TO_BKT(_hd_hh_del->hashv, (head)->hh->tbl->num_buckets, _hd_bkt); \ + HASH_DEL_IN_BKT((head)->hh->tbl->buckets[_hd_bkt], _hd_hh_del); \ + (head)->hh->tbl->num_items--; \ + } \ + HASH_FSCK(hh, head, "HASH_DELETE_HH"); \ +} while (0) + +/* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */ +#define HASH_FIND_STR(head,findstr,out) \ +do { \ + unsigned _uthash_hfstr_keylen = (unsigned)uthash_strlen(findstr); \ + HASH_FIND(hh, head, findstr, _uthash_hfstr_keylen, out); \ +} while (0) +#define HASH_ADD_STR(head,strfield,add) \ +do { \ + unsigned _uthash_hastr_keylen = (unsigned)uthash_strlen((add)->strfield); \ + HASH_ADD(hh, head, strfield[0], _uthash_hastr_keylen, add); \ +} while (0) +#define HASH_REPLACE_STR(head,strfield,add,replaced) \ +do { \ + unsigned _uthash_hrstr_keylen = (unsigned)uthash_strlen((add)->strfield); \ + HASH_REPLACE(hh, head, strfield[0], _uthash_hrstr_keylen, add, replaced); \ +} while (0) +#define HASH_FIND_INT(head,findint,out) \ + HASH_FIND(hh,head,findint,sizeof(int),out) +#define HASH_ADD_INT(head,intfield,add) \ + HASH_ADD(hh,head,intfield,sizeof(int),add) +#define HASH_REPLACE_INT(head,intfield,add,replaced) \ + HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced) +#define HASH_FIND_PTR(head,findptr,out) \ + HASH_FIND(hh,head,findptr,sizeof(void *),out) +#define HASH_ADD_PTR(head,ptrfield,add) \ + HASH_ADD(hh,head,ptrfield,sizeof(void *),add) +#define HASH_REPLACE_PTR(head,ptrfield,add,replaced) \ + HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced) +#define HASH_DEL(head,delptr) \ + HASH_DELETE(hh,head,delptr) + +/* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined. + * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined. + */ +#ifdef HASH_DEBUG +#include /* fprintf, stderr */ +#define HASH_OOPS(...) do { fprintf(stderr, __VA_ARGS__); exit(-1); } while (0) +#define HASH_FSCK(hh,head,where) \ +do { \ + struct UT_hash_handle *_thh; \ + if (head) { \ + unsigned _bkt_i; \ + unsigned _count = 0; \ + char *_prev; \ + for (_bkt_i = 0; _bkt_i < (head)->hh->tbl->num_buckets; ++_bkt_i) { \ + unsigned _bkt_count = 0; \ + _thh = (head)->hh->tbl->buckets[_bkt_i].hh_head; \ + _prev = NULL; \ + while (_thh) { \ + if (_prev != (char*)(_thh->hh_prev)) { \ + HASH_OOPS("%s: invalid hh_prev %p, actual %p\n", \ + (where), (void*)_thh->hh_prev, (void*)_prev); \ + } \ + _bkt_count++; \ + _prev = (char*)(_thh); \ + _thh = _thh->hh_next; \ + } \ + _count += _bkt_count; \ + if ((head)->hh->tbl->buckets[_bkt_i].count != _bkt_count) { \ + HASH_OOPS("%s: invalid bucket count %u, actual %u\n", \ + (where), (head)->hh->tbl->buckets[_bkt_i].count, _bkt_count); \ + } \ + } \ + if (_count != (head)->hh->tbl->num_items) { \ + HASH_OOPS("%s: invalid hh item count %u, actual %u\n", \ + (where), (head)->hh->tbl->num_items, _count); \ + } \ + _count = 0; \ + _prev = NULL; \ + _thh = (head)->hh; \ + while (_thh) { \ + _count++; \ + if (_prev != (char*)_thh->prev) { \ + HASH_OOPS("%s: invalid prev %p, actual %p\n", \ + (where), (void*)_thh->prev, (void*)_prev); \ + } \ + _prev = (char*)ELMT_FROM_HH((head)->hh->tbl, _thh); \ + _thh = (_thh->next ? HH_FROM_ELMT((head)->hh->tbl, _thh->next) : NULL); \ + } \ + if (_count != (head)->hh->tbl->num_items) { \ + HASH_OOPS("%s: invalid app item count %u, actual %u\n", \ + (where), (head)->hh->tbl->num_items, _count); \ + } \ + } \ +} while (0) +#else +#define HASH_FSCK(hh,head,where) +#endif + +/* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to + * the descriptor to which this macro is defined for tuning the hash function. + * The app can #include to get the prototype for write(2). */ +#ifdef HASH_EMIT_KEYS +#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \ +do { \ + unsigned _klen = fieldlen; \ + write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \ + write(HASH_EMIT_KEYS, keyptr, (unsigned long)fieldlen); \ +} while (0) +#else +#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) +#endif + +/* The Bernstein hash function, used in Perl prior to v5.6. Note (x<<5+x)=x*33. */ +#define HASH_BER(key,keylen,hashv) \ +do { \ + unsigned _hb_keylen = (unsigned)keylen; \ + const unsigned char *_hb_key = (const unsigned char*)(key); \ + (hashv) = 0; \ + while (_hb_keylen-- != 0U) { \ + (hashv) = (((hashv) << 5) + (hashv)) + *_hb_key++; \ + } \ +} while (0) + + +/* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at + * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx + * (archive link: https://archive.is/Ivcan ) + */ +#define HASH_SAX(key,keylen,hashv) \ +do { \ + unsigned _sx_i; \ + const unsigned char *_hs_key = (const unsigned char*)(key); \ + hashv = 0; \ + for (_sx_i=0; _sx_i < keylen; _sx_i++) { \ + hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \ + } \ +} while (0) +/* FNV-1a variation */ +#define HASH_FNV(key,keylen,hashv) \ +do { \ + unsigned _fn_i; \ + const unsigned char *_hf_key = (const unsigned char*)(key); \ + (hashv) = 2166136261U; \ + for (_fn_i=0; _fn_i < keylen; _fn_i++) { \ + hashv = hashv ^ _hf_key[_fn_i]; \ + hashv = hashv * 16777619U; \ + } \ +} while (0) + +#define HASH_OAT(key,keylen,hashv) \ +do { \ + unsigned _ho_i; \ + const unsigned char *_ho_key=(const unsigned char*)(key); \ + hashv = 0; \ + for(_ho_i=0; _ho_i < keylen; _ho_i++) { \ + hashv += _ho_key[_ho_i]; \ + hashv += (hashv << 10); \ + hashv ^= (hashv >> 6); \ + } \ + hashv += (hashv << 3); \ + hashv ^= (hashv >> 11); \ + hashv += (hashv << 15); \ +} while (0) + +#define HASH_JEN_MIX(a,b,c) \ +do { \ + a -= b; a -= c; a ^= ( c >> 13 ); \ + b -= c; b -= a; b ^= ( a << 8 ); \ + c -= a; c -= b; c ^= ( b >> 13 ); \ + a -= b; a -= c; a ^= ( c >> 12 ); \ + b -= c; b -= a; b ^= ( a << 16 ); \ + c -= a; c -= b; c ^= ( b >> 5 ); \ + a -= b; a -= c; a ^= ( c >> 3 ); \ + b -= c; b -= a; b ^= ( a << 10 ); \ + c -= a; c -= b; c ^= ( b >> 15 ); \ +} while (0) + +#define HASH_JEN(key,keylen,hashv) \ +do { \ + unsigned _hj_i,_hj_j,_hj_k; \ + unsigned const char *_hj_key=(unsigned const char*)(key); \ + hashv = 0xfeedbeefu; \ + _hj_i = _hj_j = 0x9e3779b9u; \ + _hj_k = (unsigned)(keylen); \ + while (_hj_k >= 12U) { \ + _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \ + + ( (unsigned)_hj_key[2] << 16 ) \ + + ( (unsigned)_hj_key[3] << 24 ) ); \ + _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \ + + ( (unsigned)_hj_key[6] << 16 ) \ + + ( (unsigned)_hj_key[7] << 24 ) ); \ + hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \ + + ( (unsigned)_hj_key[10] << 16 ) \ + + ( (unsigned)_hj_key[11] << 24 ) ); \ + \ + HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ + \ + _hj_key += 12; \ + _hj_k -= 12U; \ + } \ + hashv += (unsigned)(keylen); \ + switch ( _hj_k ) { \ + case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); /* FALLTHROUGH */ \ + case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); /* FALLTHROUGH */ \ + case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); /* FALLTHROUGH */ \ + case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); /* FALLTHROUGH */ \ + case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); /* FALLTHROUGH */ \ + case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); /* FALLTHROUGH */ \ + case 5: _hj_j += _hj_key[4]; /* FALLTHROUGH */ \ + case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); /* FALLTHROUGH */ \ + case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); /* FALLTHROUGH */ \ + case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); /* FALLTHROUGH */ \ + case 1: _hj_i += _hj_key[0]; /* FALLTHROUGH */ \ + default: ; \ + } \ + HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ +} while (0) + +/* The Paul Hsieh hash function */ +#undef get16bits +#if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \ + || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__) +#define get16bits(d) (*((const uint16_t *) (d))) +#endif + +#if !defined (get16bits) +#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \ + +(uint32_t)(((const uint8_t *)(d))[0]) ) +#endif +#define HASH_SFH(key,keylen,hashv) \ +do { \ + unsigned const char *_sfh_key=(unsigned const char*)(key); \ + uint32_t _sfh_tmp, _sfh_len = (uint32_t)keylen; \ + \ + unsigned _sfh_rem = _sfh_len & 3U; \ + _sfh_len >>= 2; \ + hashv = 0xcafebabeu; \ + \ + /* Main loop */ \ + for (;_sfh_len > 0U; _sfh_len--) { \ + hashv += get16bits (_sfh_key); \ + _sfh_tmp = ((uint32_t)(get16bits (_sfh_key+2)) << 11) ^ hashv; \ + hashv = (hashv << 16) ^ _sfh_tmp; \ + _sfh_key += 2U*sizeof (uint16_t); \ + hashv += hashv >> 11; \ + } \ + \ + /* Handle end cases */ \ + switch (_sfh_rem) { \ + case 3: hashv += get16bits (_sfh_key); \ + hashv ^= hashv << 16; \ + hashv ^= (uint32_t)(_sfh_key[sizeof (uint16_t)]) << 18; \ + hashv += hashv >> 11; \ + break; \ + case 2: hashv += get16bits (_sfh_key); \ + hashv ^= hashv << 11; \ + hashv += hashv >> 17; \ + break; \ + case 1: hashv += *_sfh_key; \ + hashv ^= hashv << 10; \ + hashv += hashv >> 1; \ + break; \ + default: ; \ + } \ + \ + /* Force "avalanching" of final 127 bits */ \ + hashv ^= hashv << 3; \ + hashv += hashv >> 5; \ + hashv ^= hashv << 4; \ + hashv += hashv >> 17; \ + hashv ^= hashv << 25; \ + hashv += hashv >> 6; \ +} while (0) + +/* iterate over items in a known bucket to find desired item */ +#define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,hashval,out) \ +do { \ + if ((head).hh_head != NULL) { \ + DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (head).hh_head)); \ + } else { \ + (out) = NULL; \ + } \ + while ((out) != NULL) { \ + if ((out)->hh->hashv == (hashval) && (out)->hh->keylen == (keylen_in)) { \ + if (HASH_KEYCMP((out)->hh->key, keyptr, keylen_in) == 0) { \ + break; \ + } \ + } \ + if ((out)->hh->hh_next != NULL) { \ + DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (out)->hh->hh_next)); \ + } else { \ + (out) = NULL; \ + } \ + } \ +} while (0) + +/* add an item to a bucket */ +#define HASH_ADD_TO_BKT(head,hh,addhh,oomed) \ +do { \ + UT_hash_bucket *_ha_head = &(head); \ + _ha_head->count++; \ + (addhh)->hh_next = _ha_head->hh_head; \ + (addhh)->hh_prev = NULL; \ + if (_ha_head->hh_head != NULL) { \ + _ha_head->hh_head->hh_prev = (addhh); \ + } \ + _ha_head->hh_head = (addhh); \ + if ((_ha_head->count >= ((_ha_head->expand_mult + 1U) * HASH_BKT_CAPACITY_THRESH)) \ + && !(addhh)->tbl->noexpand) { \ + HASH_EXPAND_BUCKETS(addhh,(addhh)->tbl, oomed); \ + IF_HASH_NONFATAL_OOM( \ + if (oomed) { \ + HASH_DEL_IN_BKT(head,addhh); \ + } \ + ) \ + } \ +} while (0) + +/* remove an item from a given bucket */ +#define HASH_DEL_IN_BKT(head,delhh) \ +do { \ + UT_hash_bucket *_hd_head = &(head); \ + _hd_head->count--; \ + if (_hd_head->hh_head == (delhh)) { \ + _hd_head->hh_head = (delhh)->hh_next; \ + } \ + if ((delhh)->hh_prev) { \ + (delhh)->hh_prev->hh_next = (delhh)->hh_next; \ + } \ + if ((delhh)->hh_next) { \ + (delhh)->hh_next->hh_prev = (delhh)->hh_prev; \ + } \ +} while (0) + +/* Bucket expansion has the effect of doubling the number of buckets + * and redistributing the items into the new buckets. Ideally the + * items will distribute more or less evenly into the new buckets + * (the extent to which this is true is a measure of the quality of + * the hash function as it applies to the key domain). + * + * With the items distributed into more buckets, the chain length + * (item count) in each bucket is reduced. Thus by expanding buckets + * the hash keeps a bound on the chain length. This bounded chain + * length is the essence of how a hash provides constant time lookup. + * + * The calculation of tbl->ideal_chain_maxlen below deserves some + * explanation. First, keep in mind that we're calculating the ideal + * maximum chain length based on the *new* (doubled) bucket count. + * In fractions this is just n/b (n=number of items,b=new num buckets). + * Since the ideal chain length is an integer, we want to calculate + * ceil(n/b). We don't depend on floating point arithmetic in this + * hash, so to calculate ceil(n/b) with integers we could write + * + * ceil(n/b) = (n/b) + ((n%b)?1:0) + * + * and in fact a previous version of this hash did just that. + * But now we have improved things a bit by recognizing that b is + * always a power of two. We keep its base 2 log handy (call it lb), + * so now we can write this with a bit shift and logical AND: + * + * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0) + * + */ +#define HASH_EXPAND_BUCKETS(hh,tbl,oomed) \ +do { \ + unsigned _he_bkt; \ + unsigned _he_bkt_i; \ + struct UT_hash_handle *_he_thh, *_he_hh_nxt; \ + UT_hash_bucket *_he_new_buckets, *_he_newbkt; \ + _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \ + sizeof(struct UT_hash_bucket) * (tbl)->num_buckets * 2U); \ + if (!_he_new_buckets) { \ + HASH_RECORD_OOM(oomed); \ + } else { \ + uthash_bzero(_he_new_buckets, \ + sizeof(struct UT_hash_bucket) * (tbl)->num_buckets * 2U); \ + (tbl)->ideal_chain_maxlen = \ + ((tbl)->num_items >> ((tbl)->log2_num_buckets+1U)) + \ + ((((tbl)->num_items & (((tbl)->num_buckets*2U)-1U)) != 0U) ? 1U : 0U); \ + (tbl)->nonideal_items = 0; \ + for (_he_bkt_i = 0; _he_bkt_i < (tbl)->num_buckets; _he_bkt_i++) { \ + _he_thh = (tbl)->buckets[ _he_bkt_i ].hh_head; \ + while (_he_thh != NULL) { \ + _he_hh_nxt = _he_thh->hh_next; \ + HASH_TO_BKT(_he_thh->hashv, (tbl)->num_buckets * 2U, _he_bkt); \ + _he_newbkt = &(_he_new_buckets[_he_bkt]); \ + if (++(_he_newbkt->count) > (tbl)->ideal_chain_maxlen) { \ + (tbl)->nonideal_items++; \ + if (_he_newbkt->count > _he_newbkt->expand_mult * (tbl)->ideal_chain_maxlen) { \ + _he_newbkt->expand_mult++; \ + } \ + } \ + _he_thh->hh_prev = NULL; \ + _he_thh->hh_next = _he_newbkt->hh_head; \ + if (_he_newbkt->hh_head != NULL) { \ + _he_newbkt->hh_head->hh_prev = _he_thh; \ + } \ + _he_newbkt->hh_head = _he_thh; \ + _he_thh = _he_hh_nxt; \ + } \ + } \ + uthash_free((tbl)->buckets, (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \ + (tbl)->num_buckets *= 2U; \ + (tbl)->log2_num_buckets++; \ + (tbl)->buckets = _he_new_buckets; \ + (tbl)->ineff_expands = ((tbl)->nonideal_items > ((tbl)->num_items >> 1)) ? \ + ((tbl)->ineff_expands+1U) : 0U; \ + if ((tbl)->ineff_expands > 1U) { \ + (tbl)->noexpand = 1; \ + uthash_noexpand_fyi(tbl); \ + } \ + uthash_expand_fyi(tbl); \ + } \ +} while (0) + + +/* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */ +/* Note that HASH_SORT assumes the hash handle name to be hh. + * HASH_SRT was added to allow the hash handle name to be passed in. */ +#define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn) +#define HASH_SRT(hh,head,cmpfcn) \ +do { \ + unsigned _hs_i; \ + unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \ + struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \ + if (head != NULL) { \ + _hs_insize = 1; \ + _hs_looping = 1; \ + _hs_list = ((head)->hh); \ + while (_hs_looping != 0U) { \ + _hs_p = _hs_list; \ + _hs_list = NULL; \ + _hs_tail = NULL; \ + _hs_nmerges = 0; \ + while (_hs_p != NULL) { \ + _hs_nmerges++; \ + _hs_q = _hs_p; \ + _hs_psize = 0; \ + for (_hs_i = 0; _hs_i < _hs_insize; ++_hs_i) { \ + _hs_psize++; \ + _hs_q = ((_hs_q->next != NULL) ? \ + HH_FROM_ELMT((head)->hh->tbl, _hs_q->next) : NULL); \ + if (_hs_q == NULL) { \ + break; \ + } \ + } \ + _hs_qsize = _hs_insize; \ + while ((_hs_psize != 0U) || ((_hs_qsize != 0U) && (_hs_q != NULL))) { \ + if (_hs_psize == 0U) { \ + _hs_e = _hs_q; \ + _hs_q = ((_hs_q->next != NULL) ? \ + HH_FROM_ELMT((head)->hh->tbl, _hs_q->next) : NULL); \ + _hs_qsize--; \ + } else if ((_hs_qsize == 0U) || (_hs_q == NULL)) { \ + _hs_e = _hs_p; \ + if (_hs_p != NULL) { \ + _hs_p = ((_hs_p->next != NULL) ? \ + HH_FROM_ELMT((head)->hh->tbl, _hs_p->next) : NULL); \ + } \ + _hs_psize--; \ + } else if ((cmpfcn( \ + DECLTYPE(head)(ELMT_FROM_HH((head)->hh->tbl, _hs_p)), \ + DECLTYPE(head)(ELMT_FROM_HH((head)->hh->tbl, _hs_q)) \ + )) <= 0) { \ + _hs_e = _hs_p; \ + if (_hs_p != NULL) { \ + _hs_p = ((_hs_p->next != NULL) ? \ + HH_FROM_ELMT((head)->hh->tbl, _hs_p->next) : NULL); \ + } \ + _hs_psize--; \ + } else { \ + _hs_e = _hs_q; \ + _hs_q = ((_hs_q->next != NULL) ? \ + HH_FROM_ELMT((head)->hh->tbl, _hs_q->next) : NULL); \ + _hs_qsize--; \ + } \ + if ( _hs_tail != NULL ) { \ + _hs_tail->next = ((_hs_e != NULL) ? \ + ELMT_FROM_HH((head)->hh->tbl, _hs_e) : NULL); \ + } else { \ + _hs_list = _hs_e; \ + } \ + if (_hs_e != NULL) { \ + _hs_e->prev = ((_hs_tail != NULL) ? \ + ELMT_FROM_HH((head)->hh->tbl, _hs_tail) : NULL); \ + } \ + _hs_tail = _hs_e; \ + } \ + _hs_p = _hs_q; \ + } \ + if (_hs_tail != NULL) { \ + _hs_tail->next = NULL; \ + } \ + if (_hs_nmerges <= 1U) { \ + _hs_looping = 0; \ + (head)->hh->tbl->tail = _hs_tail; \ + DECLTYPE_ASSIGN(head, ELMT_FROM_HH((head)->hh->tbl, _hs_list)); \ + } \ + _hs_insize *= 2U; \ + } \ + HASH_FSCK(hh, head, "HASH_SRT"); \ + } \ +} while (0) + +/* This function selects items from one hash into another hash. + * The end result is that the selected items have dual presence + * in both hashes. There is no copy of the items made; rather + * they are added into the new hash through a secondary hash + * hash handle that must be present in the structure. */ +#define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \ +do { \ + unsigned _src_bkt, _dst_bkt; \ + void *_last_elt = NULL, *_elt; \ + UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \ + ptrdiff_t _dst_hho = ((char*)((dst)->hh_dst) - (char*)(dst)); \ + if ((src) != NULL) { \ + for (_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \ + for (_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \ + _src_hh != NULL; \ + _src_hh = _src_hh->hh_next) { \ + _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \ + if (cond(_elt)) { \ + IF_HASH_NONFATAL_OOM( int _hs_oomed = 0; ) \ + _dst_hh = (UT_hash_handle*)(void*)(((char*)_elt) + _dst_hho); \ + _dst_hh->key = _src_hh->key; \ + _dst_hh->keylen = _src_hh->keylen; \ + _dst_hh->hashv = _src_hh->hashv; \ + _dst_hh->prev = _last_elt; \ + _dst_hh->next = NULL; \ + if (_last_elt_hh != NULL) { \ + _last_elt_hh->next = _elt; \ + } \ + if ((dst) == NULL) { \ + DECLTYPE_ASSIGN(dst, _elt); \ + HASH_MAKE_TABLE(hh_dst, dst, _hs_oomed); \ + IF_HASH_NONFATAL_OOM( \ + if (_hs_oomed) { \ + uthash_nonfatal_oom(_elt); \ + (dst) = NULL; \ + continue; \ + } \ + ) \ + } else { \ + _dst_hh->tbl = (dst)->hh_dst.tbl; \ + } \ + HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \ + HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt], hh_dst, _dst_hh, _hs_oomed); \ + (dst)->hh_dst.tbl->num_items++; \ + IF_HASH_NONFATAL_OOM( \ + if (_hs_oomed) { \ + HASH_ROLLBACK_BKT(hh_dst, dst, _dst_hh); \ + HASH_DELETE_HH(hh_dst, dst, _dst_hh); \ + _dst_hh->tbl = NULL; \ + uthash_nonfatal_oom(_elt); \ + continue; \ + } \ + ) \ + HASH_BLOOM_ADD(_dst_hh->tbl, _dst_hh->hashv); \ + _last_elt = _elt; \ + _last_elt_hh = _dst_hh; \ + } \ + } \ + } \ + } \ + HASH_FSCK(hh_dst, dst, "HASH_SELECT"); \ +} while (0) + +#define HASH_CLEAR(hh,head) \ +do { \ + if ((head) != NULL) { \ + HASH_BLOOM_FREE((head)->hh->tbl); \ + uthash_free((head)->hh->tbl->buckets, \ + (head)->hh->tbl->num_buckets*sizeof(struct UT_hash_bucket)); \ + uthash_free((head)->hh->tbl, sizeof(UT_hash_table)); \ + (head) = NULL; \ + } \ +} while (0) + +#define HASH_OVERHEAD(hh,head) \ + (((head) != NULL) ? ( \ + (size_t)(((head)->hh->tbl->num_items * sizeof(UT_hash_handle)) + \ + ((head)->hh->tbl->num_buckets * sizeof(UT_hash_bucket)) + \ + sizeof(UT_hash_table) + \ + (HASH_BLOOM_BYTELEN))) : 0U) + +#ifdef NO_DECLTYPE +#define HASH_ITER(hh,head,el,tmp) \ +for(((el)=(head)), ((*(char**)(&(tmp)))=(char*)((head!=NULL)?(head)->hh->next:NULL)); \ + (el) != NULL; ((el)=(tmp)), ((*(char**)(&(tmp)))=(char*)((tmp!=NULL)?(tmp)->hh->next:NULL))) +#else +#define HASH_ITER(hh,head,el,tmp) \ +for(((el)=(head)), ((tmp)=DECLTYPE(el)((head!=NULL)?(head)->hh->next:NULL)); \ + (el) != NULL; ((el)=(tmp)), ((tmp)=DECLTYPE(el)((tmp!=NULL)?(tmp)->hh->next:NULL))) +#endif + +/* obtain a count of items in the hash */ +#define HASH_COUNT(head) HASH_CNT(hh,head) +#define HASH_CNT(hh,head) ((head != NULL)?((head)->hh->tbl->num_items):0U) + +typedef struct UT_hash_bucket { + struct UT_hash_handle *hh_head; + unsigned count; + + /* expand_mult is normally set to 0. In this situation, the max chain length + * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If + * the bucket's chain exceeds this length, bucket expansion is triggered). + * However, setting expand_mult to a non-zero value delays bucket expansion + * (that would be triggered by additions to this particular bucket) + * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH. + * (The multiplier is simply expand_mult+1). The whole idea of this + * multiplier is to reduce bucket expansions, since they are expensive, in + * situations where we know that a particular bucket tends to be overused. + * It is better to let its chain length grow to a longer yet-still-bounded + * value, than to do an O(n) bucket expansion too often. + */ + unsigned expand_mult; + +} UT_hash_bucket; + +/* random signature used only to find hash tables in external analysis */ +#define HASH_SIGNATURE 0xa0111fe1u +#define HASH_BLOOM_SIGNATURE 0xb12220f2u + +typedef struct UT_hash_table { + UT_hash_bucket *buckets; + unsigned num_buckets, log2_num_buckets; + unsigned num_items; + struct UT_hash_handle *tail; /* tail hh in app order, for fast append */ + ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */ + + /* in an ideal situation (all buckets used equally), no bucket would have + * more than ceil(#items/#buckets) items. that's the ideal chain length. */ + unsigned ideal_chain_maxlen; + + /* nonideal_items is the number of items in the hash whose chain position + * exceeds the ideal chain maxlen. these items pay the penalty for an uneven + * hash distribution; reaching them in a chain traversal takes >ideal steps */ + unsigned nonideal_items; + + /* ineffective expands occur when a bucket doubling was performed, but + * afterward, more than half the items in the hash had nonideal chain + * positions. If this happens on two consecutive expansions we inhibit any + * further expansion, as it's not helping; this happens when the hash + * function isn't a good fit for the key domain. When expansion is inhibited + * the hash will still work, albeit no longer in constant time. */ + unsigned ineff_expands, noexpand; + + uint32_t signature; /* used only to find hash tables in external analysis */ +#ifdef HASH_BLOOM + uint32_t bloom_sig; /* used only to test bloom exists in external analysis */ + uint8_t *bloom_bv; + uint8_t bloom_nbits; +#endif + +} UT_hash_table; + +typedef struct UT_hash_handle { + struct UT_hash_table *tbl; + void *prev; /* prev element in app order */ + void *next; /* next element in app order */ + struct UT_hash_handle *hh_prev; /* previous hh in bucket order */ + struct UT_hash_handle *hh_next; /* next hh in bucket order */ + const void *key; /* ptr to enclosing struct's key */ + unsigned keylen; /* enclosing struct's key len */ + unsigned hashv; /* result of hash-fcn(key) */ +} UT_hash_handle; + +#endif /* UTHASH_H */