raindrops RubyGem user+dev discussion/patches/pulls/bugs/help
 help / color / mirror / code / Atom feed
* [PATCH 0/2] drop ruby/st.h usage in favor of khashl
@ 2024-03-24  0:19 Eric Wong
  2024-03-24  0:19 ` [PATCH 1/2] linux_inet_diag: remove needless OBJ_FREEZE calls Eric Wong
  2024-03-24  0:19 ` [PATCH 2/2] use switch to khashl for hash table outside of GVL Eric Wong
  0 siblings, 2 replies; 4+ messages in thread
From: Eric Wong @ 2024-03-24  0:19 UTC (permalink / raw)
  To: raindrops-public

Eric Wong (2):
  linux_inet_diag: remove needless OBJ_FREEZE calls
  use switch to khashl for hash table outside of GVL

 ext/raindrops/khashl.h          | 444 ++++++++++++++++++++++++++++++++
 ext/raindrops/linux_inet_diag.c | 127 ++++-----
 2 files changed, 508 insertions(+), 63 deletions(-)
 create mode 100644 ext/raindrops/khashl.h

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 1/2] linux_inet_diag: remove needless OBJ_FREEZE calls
  2024-03-24  0:19 [PATCH 0/2] drop ruby/st.h usage in favor of khashl Eric Wong
@ 2024-03-24  0:19 ` Eric Wong
  2024-03-24  0:19 ` [PATCH 2/2] use switch to khashl for hash table outside of GVL Eric Wong
  1 sibling, 0 replies; 4+ messages in thread
From: Eric Wong @ 2024-03-24  0:19 UTC (permalink / raw)
  To: raindrops-public

OBJ_FREEZE before calling rb_hash_aset was actually preventing
deduplication since Ruby 2.6.  This introduces a performance
regression for Ruby 2.5 users, but I expect the majority of
users are on newer versions (I'm on 2.7, which is still ancient).
---
 ext/raindrops/linux_inet_diag.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/ext/raindrops/linux_inet_diag.c b/ext/raindrops/linux_inet_diag.c
index e4050cb..799b4da 100644
--- a/ext/raindrops/linux_inet_diag.c
+++ b/ext/raindrops/linux_inet_diag.c
@@ -159,7 +159,6 @@ static int st_to_hash(st_data_t key, st_data_t value, VALUE hash)
 		VALUE k = remove_scope_id((const char *)key);
 		VALUE v = rb_listen_stats(stats);
 
-		OBJ_FREEZE(k);
 		rb_hash_aset(hash, k, v);
 	}
 	return st_free_data(key, value, 0);
@@ -174,7 +173,6 @@ static int st_AND_hash(st_data_t key, st_data_t value, VALUE hash)
 
 		if (rb_hash_lookup(hash, k) == Qtrue) {
 			VALUE v = rb_listen_stats(stats);
-			OBJ_FREEZE(k);
 			rb_hash_aset(hash, k, v);
 		}
 	}

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/2] use switch to khashl for hash table outside of GVL
  2024-03-24  0:19 [PATCH 0/2] drop ruby/st.h usage in favor of khashl Eric Wong
  2024-03-24  0:19 ` [PATCH 1/2] linux_inet_diag: remove needless OBJ_FREEZE calls Eric Wong
@ 2024-03-24  0:19 ` Eric Wong
  2024-03-24 21:10   ` [PATCH 3/2] khashl: use ruby_xrealloc2 to avoid overflow Eric Wong
  1 sibling, 1 reply; 4+ messages in thread
From: Eric Wong @ 2024-03-24  0:19 UTC (permalink / raw)
  To: raindrops-public

Given the history of Ruby removing public C APIs, get ahead
of potential incompatibilities by switching to an externally
maintained unordered hash table.

khashl is a newer, more memory-efficient evolution of the
khash hash table adopted by the git SCM and this will be
my first (and likely not last) time using it in a public
codebase.
---
 ext/raindrops/khashl.h          | 444 ++++++++++++++++++++++++++++++++
 ext/raindrops/linux_inet_diag.c | 125 ++++-----
 2 files changed, 508 insertions(+), 61 deletions(-)
 create mode 100644 ext/raindrops/khashl.h

diff --git a/ext/raindrops/khashl.h b/ext/raindrops/khashl.h
new file mode 100644
index 0000000..df97c7f
--- /dev/null
+++ b/ext/raindrops/khashl.h
@@ -0,0 +1,444 @@
+/* The MIT License
+
+   Copyright (c) 2019-2023 by Attractive Chaos <attractor@live.co.uk>
+
+   Permission is hereby granted, free of charge, to any person obtaining
+   a copy of this software and associated documentation files (the
+   "Software"), to deal in the Software without restriction, including
+   without limitation the rights to use, copy, modify, merge, publish,
+   distribute, sublicense, and/or sell copies of the Software, and to
+   permit persons to whom the Software is furnished to do so, subject to
+   the following conditions:
+
+   The above copyright notice and this permission notice shall be
+   included in all copies or substantial portions of the Software.
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+   BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+   SOFTWARE.
+*/
+
+#ifndef __AC_KHASHL_H
+#define __AC_KHASHL_H
+
+#define AC_VERSION_KHASHL_H "0.2"
+
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+
+/************************************
+ * Compiler specific configurations *
+ ************************************/
+
+#if UINT_MAX == 0xffffffffu
+typedef unsigned int khint32_t;
+#elif ULONG_MAX == 0xffffffffu
+typedef unsigned long khint32_t;
+#endif
+
+#if ULONG_MAX == ULLONG_MAX
+typedef unsigned long khint64_t;
+#else
+typedef unsigned long long khint64_t;
+#endif
+
+#ifndef kh_inline
+#ifdef _MSC_VER
+#define kh_inline __inline
+#else
+#define kh_inline inline
+#endif
+#endif /* kh_inline */
+
+#ifndef klib_unused
+#if (defined __clang__ && __clang_major__ >= 3) || (defined __GNUC__ && __GNUC__ >= 3)
+#define klib_unused __attribute__ ((__unused__))
+#else
+#define klib_unused
+#endif
+#endif /* klib_unused */
+
+#define KH_LOCAL static kh_inline klib_unused
+
+typedef khint32_t khint_t;
+
+/******************
+ * malloc aliases *
+ ******************/
+
+#ifndef kcalloc
+#define kcalloc(N,Z) calloc(N,Z)
+#endif
+#ifndef kmalloc
+#define kmalloc(Z) malloc(Z)
+#endif
+#ifndef krealloc
+#define krealloc(P,Z) realloc(P,Z)
+#endif
+#ifndef kfree
+#define kfree(P) free(P)
+#endif
+
+/****************************
+ * Simple private functions *
+ ****************************/
+
+#define __kh_used(flag, i)       (flag[i>>5] >> (i&0x1fU) & 1U)
+#define __kh_set_used(flag, i)   (flag[i>>5] |= 1U<<(i&0x1fU))
+#define __kh_set_unused(flag, i) (flag[i>>5] &= ~(1U<<(i&0x1fU)))
+
+#define __kh_fsize(m) ((m) < 32? 1 : (m)>>5)
+
+static kh_inline khint_t __kh_h2b(khint_t hash, khint_t bits) { return hash * 2654435769U >> (32 - bits); }
+
+/*******************
+ * Hash table base *
+ *******************/
+
+#define __KHASHL_TYPE(HType, khkey_t) \
+	typedef struct HType { \
+		khint_t bits, count; \
+		khint32_t *used; \
+		khkey_t *keys; \
+	} HType;
+
+#define __KHASHL_PROTOTYPES(HType, prefix, khkey_t) \
+	extern HType *prefix##_init(void); \
+	extern void prefix##_destroy(HType *h); \
+	extern void prefix##_clear(HType *h); \
+	extern khint_t prefix##_getp(const HType *h, const khkey_t *key); \
+	extern int prefix##_resize(HType *h, khint_t new_n_buckets); \
+	extern khint_t prefix##_putp(HType *h, const khkey_t *key, int *absent); \
+	extern void prefix##_del(HType *h, khint_t k);
+
+#define __KHASHL_IMPL_BASIC(SCOPE, HType, prefix) \
+	SCOPE HType *prefix##_init(void) { \
+		return (HType*)kcalloc(1, sizeof(HType)); \
+	} \
+	SCOPE void prefix##_destroy(HType *h) { \
+		if (!h) return; \
+		kfree((void *)h->keys); kfree(h->used); \
+		kfree(h); \
+	} \
+	SCOPE void prefix##_clear(HType *h) { \
+		if (h && h->used) { \
+			khint_t n_buckets = (khint_t)1U << h->bits; \
+			memset(h->used, 0, __kh_fsize(n_buckets) * sizeof(khint32_t)); \
+			h->count = 0; \
+		} \
+	}
+
+#define __KHASHL_IMPL_GET(SCOPE, HType, prefix, khkey_t, __hash_fn, __hash_eq) \
+	SCOPE khint_t prefix##_getp_core(const HType *h, const khkey_t *key, khint_t hash) { \
+		khint_t i, last, n_buckets, mask; \
+		if (h->keys == 0) return 0; \
+		n_buckets = (khint_t)1U << h->bits; \
+		mask = n_buckets - 1U; \
+		i = last = __kh_h2b(hash, h->bits); \
+		while (__kh_used(h->used, i) && !__hash_eq(h->keys[i], *key)) { \
+			i = (i + 1U) & mask; \
+			if (i == last) return n_buckets; \
+		} \
+		return !__kh_used(h->used, i)? n_buckets : i; \
+	} \
+	SCOPE khint_t prefix##_getp(const HType *h, const khkey_t *key) { return prefix##_getp_core(h, key, __hash_fn(*key)); } \
+	SCOPE khint_t prefix##_get(const HType *h, khkey_t key) { return prefix##_getp_core(h, &key, __hash_fn(key)); }
+
+#define __KHASHL_IMPL_RESIZE(SCOPE, HType, prefix, khkey_t, __hash_fn, __hash_eq) \
+	SCOPE int prefix##_resize(HType *h, khint_t new_n_buckets) { \
+		khint32_t *new_used = 0; \
+		khint_t j = 0, x = new_n_buckets, n_buckets, new_bits, new_mask; \
+		while ((x >>= 1) != 0) ++j; \
+		if (new_n_buckets & (new_n_buckets - 1)) ++j; \
+		new_bits = j > 2? j : 2; \
+		new_n_buckets = (khint_t)1U << new_bits; \
+		if (h->count > (new_n_buckets>>1) + (new_n_buckets>>2)) return 0; /* requested size is too small */ \
+		new_used = (khint32_t*)kmalloc(__kh_fsize(new_n_buckets) * sizeof(khint32_t)); \
+		memset(new_used, 0, __kh_fsize(new_n_buckets) * sizeof(khint32_t)); \
+		if (!new_used) return -1; /* not enough memory */ \
+		n_buckets = h->keys? (khint_t)1U<<h->bits : 0U; \
+		if (n_buckets < new_n_buckets) { /* expand */ \
+			khkey_t *new_keys = (khkey_t*)krealloc((void*)h->keys, new_n_buckets * sizeof(khkey_t)); \
+			if (!new_keys) { kfree(new_used); return -1; } \
+			h->keys = new_keys; \
+		} /* otherwise shrink */ \
+		new_mask = new_n_buckets - 1; \
+		for (j = 0; j != n_buckets; ++j) { \
+			khkey_t key; \
+			if (!__kh_used(h->used, j)) continue; \
+			key = h->keys[j]; \
+			__kh_set_unused(h->used, j); \
+			while (1) { /* kick-out process; sort of like in Cuckoo hashing */ \
+				khint_t i; \
+				i = __kh_h2b(__hash_fn(key), new_bits); \
+				while (__kh_used(new_used, i)) i = (i + 1) & new_mask; \
+				__kh_set_used(new_used, i); \
+				if (i < n_buckets && __kh_used(h->used, i)) { /* kick out the existing element */ \
+					{ khkey_t tmp = h->keys[i]; h->keys[i] = key; key = tmp; } \
+					__kh_set_unused(h->used, i); /* mark it as deleted in the old hash table */ \
+				} else { /* write the element and jump out of the loop */ \
+					h->keys[i] = key; \
+					break; \
+				} \
+			} \
+		} \
+		if (n_buckets > new_n_buckets) /* shrink the hash table */ \
+			h->keys = (khkey_t*)krealloc((void *)h->keys, new_n_buckets * sizeof(khkey_t)); \
+		kfree(h->used); /* free the working space */ \
+		h->used = new_used, h->bits = new_bits; \
+		return 0; \
+	}
+
+#define __KHASHL_IMPL_PUT(SCOPE, HType, prefix, khkey_t, __hash_fn, __hash_eq) \
+	SCOPE khint_t prefix##_putp_core(HType *h, const khkey_t *key, khint_t hash, int *absent) { \
+		khint_t n_buckets, i, last, mask; \
+		n_buckets = h->keys? (khint_t)1U<<h->bits : 0U; \
+		*absent = -1; \
+		if (h->count >= (n_buckets>>1) + (n_buckets>>2)) { /* rehashing */ \
+			if (prefix##_resize(h, n_buckets + 1U) < 0) \
+				return n_buckets; \
+			n_buckets = (khint_t)1U<<h->bits; \
+		} /* TODO: to implement automatically shrinking; resize() already support shrinking */ \
+		mask = n_buckets - 1; \
+		i = last = __kh_h2b(hash, h->bits); \
+		while (__kh_used(h->used, i) && !__hash_eq(h->keys[i], *key)) { \
+			i = (i + 1U) & mask; \
+			if (i == last) break; \
+		} \
+		if (!__kh_used(h->used, i)) { /* not present at all */ \
+			h->keys[i] = *key; \
+			__kh_set_used(h->used, i); \
+			++h->count; \
+			*absent = 1; \
+		} else *absent = 0; /* Don't touch h->keys[i] if present */ \
+		return i; \
+	} \
+	SCOPE khint_t prefix##_putp(HType *h, const khkey_t *key, int *absent) { return prefix##_putp_core(h, key, __hash_fn(*key), absent); } \
+	SCOPE khint_t prefix##_put(HType *h, khkey_t key, int *absent) { return prefix##_putp_core(h, &key, __hash_fn(key), absent); }
+
+#define __KHASHL_IMPL_DEL(SCOPE, HType, prefix, khkey_t, __hash_fn) \
+	SCOPE int prefix##_del(HType *h, khint_t i) { \
+		khint_t j = i, k, mask, n_buckets; \
+		if (h->keys == 0) return 0; \
+		n_buckets = (khint_t)1U<<h->bits; \
+		mask = n_buckets - 1U; \
+		while (1) { \
+			j = (j + 1U) & mask; \
+			if (j == i || !__kh_used(h->used, j)) break; /* j==i only when the table is completely full */ \
+			k = __kh_h2b(__hash_fn(h->keys[j]), h->bits); \
+			if ((j > i && (k <= i || k > j)) || (j < i && (k <= i && k > j))) \
+				h->keys[i] = h->keys[j], i = j; \
+		} \
+		__kh_set_unused(h->used, i); \
+		--h->count; \
+		return 1; \
+	}
+
+#define KHASHL_DECLARE(HType, prefix, khkey_t) \
+	__KHASHL_TYPE(HType, khkey_t) \
+	__KHASHL_PROTOTYPES(HType, prefix, khkey_t)
+
+#define KHASHL_INIT(SCOPE, HType, prefix, khkey_t, __hash_fn, __hash_eq) \
+	__KHASHL_TYPE(HType, khkey_t) \
+	__KHASHL_IMPL_BASIC(SCOPE, HType, prefix) \
+	__KHASHL_IMPL_GET(SCOPE, HType, prefix, khkey_t, __hash_fn, __hash_eq) \
+	__KHASHL_IMPL_RESIZE(SCOPE, HType, prefix, khkey_t, __hash_fn, __hash_eq) \
+	__KHASHL_IMPL_PUT(SCOPE, HType, prefix, khkey_t, __hash_fn, __hash_eq) \
+	__KHASHL_IMPL_DEL(SCOPE, HType, prefix, khkey_t, __hash_fn)
+
+/***************************
+ * Ensemble of hash tables *
+ ***************************/
+
+typedef struct {
+	khint_t sub, pos;
+} kh_ensitr_t;
+
+#define KHASHE_INIT(SCOPE, HType, prefix, khkey_t, __hash_fn, __hash_eq) \
+	KHASHL_INIT(KH_LOCAL, HType##_sub, prefix##_sub, khkey_t, __hash_fn, __hash_eq) \
+	typedef struct HType { \
+		khint64_t count:54, bits:8; \
+		HType##_sub *sub; \
+	} HType; \
+	SCOPE HType *prefix##_init(int bits) { \
+		HType *g; \
+		g = (HType*)kcalloc(1, sizeof(*g)); \
+		g->bits = bits; \
+		g->sub = (HType##_sub*)kcalloc(1U<<bits, sizeof(*g->sub)); \
+		return g; \
+	} \
+	SCOPE void prefix##_destroy(HType *g) { \
+		int t; \
+		if (!g) return; \
+		for (t = 0; t < 1<<g->bits; ++t) { kfree((void*)g->sub[t].keys); kfree(g->sub[t].used); } \
+		kfree(g->sub); kfree(g); \
+	} \
+	SCOPE kh_ensitr_t prefix##_getp(const HType *g, const khkey_t *key) { \
+		khint_t hash, low, ret; \
+		kh_ensitr_t r; \
+		HType##_sub *h; \
+		hash = __hash_fn(*key); \
+		low = hash & ((1U<<g->bits) - 1); \
+		h = &g->sub[low]; \
+		ret = prefix##_sub_getp_core(h, key, hash); \
+		if (ret == 1U<<h->bits) r.sub = low, r.pos = (khint_t)-1; \
+		else r.sub = low, r.pos = ret; \
+		return r; \
+	} \
+	SCOPE kh_ensitr_t prefix##_get(const HType *g, const khkey_t key) { return prefix##_getp(g, &key); } \
+	SCOPE kh_ensitr_t prefix##_putp(HType *g, const khkey_t *key, int *absent) { \
+		khint_t hash, low, ret; \
+		kh_ensitr_t r; \
+		HType##_sub *h; \
+		hash = __hash_fn(*key); \
+		low = hash & ((1U<<g->bits) - 1); \
+		h = &g->sub[low]; \
+		ret = prefix##_sub_putp_core(h, key, hash, absent); \
+		if (*absent) ++g->count; \
+		if (ret == 1U<<h->bits) r.sub = low, r.pos = (khint_t)-1; \
+		else r.sub = low, r.pos = ret; \
+		return r; \
+	} \
+	SCOPE kh_ensitr_t prefix##_put(HType *g, const khkey_t key, int *absent) { return prefix##_putp(g, &key, absent); } \
+	SCOPE int prefix##_del(HType *g, kh_ensitr_t itr) { \
+		HType##_sub *h = &g->sub[itr.sub]; \
+		int ret; \
+		ret = prefix##_sub_del(h, itr.pos); \
+		if (ret) --g->count; \
+		return ret; \
+	}
+
+/*****************************
+ * More convenient interface *
+ *****************************/
+
+#define __kh_packed __attribute__ ((__packed__))
+#define __kh_cached_hash(x) ((x).hash)
+
+#define KHASHL_SET_INIT(SCOPE, HType, prefix, khkey_t, __hash_fn, __hash_eq) \
+	typedef struct { khkey_t key; } __kh_packed HType##_s_bucket_t; \
+	static kh_inline khint_t prefix##_s_hash(HType##_s_bucket_t x) { return __hash_fn(x.key); } \
+	static kh_inline int prefix##_s_eq(HType##_s_bucket_t x, HType##_s_bucket_t y) { return __hash_eq(x.key, y.key); } \
+	KHASHL_INIT(KH_LOCAL, HType, prefix##_s, HType##_s_bucket_t, prefix##_s_hash, prefix##_s_eq) \
+	SCOPE HType *prefix##_init(void) { return prefix##_s_init(); } \
+	SCOPE void prefix##_destroy(HType *h) { prefix##_s_destroy(h); } \
+	SCOPE void prefix##_resize(HType *h, khint_t new_n_buckets) { prefix##_s_resize(h, new_n_buckets); } \
+	SCOPE khint_t prefix##_get(const HType *h, khkey_t key) { HType##_s_bucket_t t; t.key = key; return prefix##_s_getp(h, &t); } \
+	SCOPE int prefix##_del(HType *h, khint_t k) { return prefix##_s_del(h, k); } \
+	SCOPE khint_t prefix##_put(HType *h, khkey_t key, int *absent) { HType##_s_bucket_t t; t.key = key; return prefix##_s_putp(h, &t, absent); }
+
+#define KHASHL_MAP_INIT(SCOPE, HType, prefix, khkey_t, kh_val_t, __hash_fn, __hash_eq) \
+	typedef struct { khkey_t key; kh_val_t val; } __kh_packed HType##_m_bucket_t; \
+	static kh_inline khint_t prefix##_m_hash(HType##_m_bucket_t x) { return __hash_fn(x.key); } \
+	static kh_inline int prefix##_m_eq(HType##_m_bucket_t x, HType##_m_bucket_t y) { return __hash_eq(x.key, y.key); } \
+	KHASHL_INIT(KH_LOCAL, HType, prefix##_m, HType##_m_bucket_t, prefix##_m_hash, prefix##_m_eq) \
+	SCOPE HType *prefix##_init(void) { return prefix##_m_init(); } \
+	SCOPE void prefix##_destroy(HType *h) { prefix##_m_destroy(h); } \
+	SCOPE khint_t prefix##_get(const HType *h, khkey_t key) { HType##_m_bucket_t t; t.key = key; return prefix##_m_getp(h, &t); } \
+	SCOPE int prefix##_del(HType *h, khint_t k) { return prefix##_m_del(h, k); } \
+	SCOPE khint_t prefix##_put(HType *h, khkey_t key, int *absent) { HType##_m_bucket_t t; t.key = key; return prefix##_m_putp(h, &t, absent); }
+
+#define KHASHL_CSET_INIT(SCOPE, HType, prefix, khkey_t, __hash_fn, __hash_eq) \
+	typedef struct { khkey_t key; khint_t hash; } __kh_packed HType##_cs_bucket_t; \
+	static kh_inline int prefix##_cs_eq(HType##_cs_bucket_t x, HType##_cs_bucket_t y) { return x.hash == y.hash && __hash_eq(x.key, y.key); } \
+	KHASHL_INIT(KH_LOCAL, HType, prefix##_cs, HType##_cs_bucket_t, __kh_cached_hash, prefix##_cs_eq) \
+	SCOPE HType *prefix##_init(void) { return prefix##_cs_init(); } \
+	SCOPE void prefix##_destroy(HType *h) { prefix##_cs_destroy(h); } \
+	SCOPE khint_t prefix##_get(const HType *h, khkey_t key) { HType##_cs_bucket_t t; t.key = key; t.hash = __hash_fn(key); return prefix##_cs_getp(h, &t); } \
+	SCOPE int prefix##_del(HType *h, khint_t k) { return prefix##_cs_del(h, k); } \
+	SCOPE khint_t prefix##_put(HType *h, khkey_t key, int *absent) { HType##_cs_bucket_t t; t.key = key, t.hash = __hash_fn(key); return prefix##_cs_putp(h, &t, absent); }
+
+#define KHASHL_CMAP_INIT(SCOPE, HType, prefix, khkey_t, kh_val_t, __hash_fn, __hash_eq) \
+	typedef struct { khkey_t key; kh_val_t val; khint_t hash; } __kh_packed HType##_cm_bucket_t; \
+	static kh_inline int prefix##_cm_eq(HType##_cm_bucket_t x, HType##_cm_bucket_t y) { return x.hash == y.hash && __hash_eq(x.key, y.key); } \
+	KHASHL_INIT(KH_LOCAL, HType, prefix##_cm, HType##_cm_bucket_t, __kh_cached_hash, prefix##_cm_eq) \
+	SCOPE HType *prefix##_init(void) { return prefix##_cm_init(); } \
+	SCOPE void prefix##_destroy(HType *h) { prefix##_cm_destroy(h); } \
+	SCOPE khint_t prefix##_get(const HType *h, khkey_t key) { HType##_cm_bucket_t t; t.key = key; t.hash = __hash_fn(key); return prefix##_cm_getp(h, &t); } \
+	SCOPE int prefix##_del(HType *h, khint_t k) { return prefix##_cm_del(h, k); } \
+	SCOPE khint_t prefix##_put(HType *h, khkey_t key, int *absent) { HType##_cm_bucket_t t; t.key = key, t.hash = __hash_fn(key); return prefix##_cm_putp(h, &t, absent); }
+
+#define KHASHE_MAP_INIT(SCOPE, HType, prefix, khkey_t, kh_val_t, __hash_fn, __hash_eq) \
+	typedef struct { khkey_t key; kh_val_t val; } __kh_packed HType##_m_bucket_t; \
+	static kh_inline khint_t prefix##_m_hash(HType##_m_bucket_t x) { return __hash_fn(x.key); } \
+	static kh_inline int prefix##_m_eq(HType##_m_bucket_t x, HType##_m_bucket_t y) { return __hash_eq(x.key, y.key); } \
+	KHASHE_INIT(KH_LOCAL, HType, prefix##_m, HType##_m_bucket_t, prefix##_m_hash, prefix##_m_eq) \
+	SCOPE HType *prefix##_init(int bits) { return prefix##_m_init(bits); } \
+	SCOPE void prefix##_destroy(HType *h) { prefix##_m_destroy(h); } \
+	SCOPE kh_ensitr_t prefix##_get(const HType *h, khkey_t key) { HType##_m_bucket_t t; t.key = key; return prefix##_m_getp(h, &t); } \
+	SCOPE int prefix##_del(HType *h, kh_ensitr_t k) { return prefix##_m_del(h, k); } \
+	SCOPE kh_ensitr_t prefix##_put(HType *h, khkey_t key, int *absent) { HType##_m_bucket_t t; t.key = key; return prefix##_m_putp(h, &t, absent); }
+
+/**************************
+ * Public macro functions *
+ **************************/
+
+#define kh_bucket(h, x) ((h)->keys[x])
+#define kh_size(h) ((h)->count)
+#define kh_capacity(h) ((h)->keys? 1U<<(h)->bits : 0U)
+#define kh_end(h) kh_capacity(h)
+
+#define kh_key(h, x) ((h)->keys[x].key)
+#define kh_val(h, x) ((h)->keys[x].val)
+#define kh_exist(h, x) __kh_used((h)->used, (x))
+
+#define kh_ens_key(g, x) kh_key(&(g)->sub[(x).sub], (x).pos)
+#define kh_ens_val(g, x) kh_val(&(g)->sub[(x).sub], (x).pos)
+#define kh_ens_exist(g, x) kh_exist(&(g)->sub[(x).sub], (x).pos)
+#define kh_ens_is_end(x) ((x).pos == (khint_t)-1)
+#define kh_ens_size(g) ((g)->count)
+
+/**************************************
+ * Common hash and equality functions *
+ **************************************/
+
+#define kh_eq_generic(a, b) ((a) == (b))
+#define kh_eq_str(a, b) (strcmp((a), (b)) == 0)
+#define kh_hash_dummy(x) ((khint_t)(x))
+
+static kh_inline khint_t kh_hash_uint32(khint_t key) {
+	key += ~(key << 15);
+	key ^=  (key >> 10);
+	key +=  (key << 3);
+	key ^=  (key >> 6);
+	key += ~(key << 11);
+	key ^=  (key >> 16);
+	return key;
+}
+
+static kh_inline khint_t kh_hash_uint64(khint64_t key) {
+	key = ~key + (key << 21);
+	key = key ^ key >> 24;
+	key = (key + (key << 3)) + (key << 8);
+	key = key ^ key >> 14;
+	key = (key + (key << 2)) + (key << 4);
+	key = key ^ key >> 28;
+	key = key + (key << 31);
+	return (khint_t)key;
+}
+
+#define KH_FNV_SEED 11
+
+static kh_inline khint_t kh_hash_str(const char *s) { /* FNV1a */
+	khint_t h = KH_FNV_SEED ^ 2166136261U;
+	const unsigned char *t = (const unsigned char*)s;
+	for (; *t; ++t)
+		h ^= *t, h *= 16777619;
+	return h;
+}
+
+static kh_inline khint_t kh_hash_bytes(int len, const unsigned char *s) {
+	khint_t h = KH_FNV_SEED ^ 2166136261U;
+	int i;
+	for (i = 0; i < len; ++i)
+		h ^= s[i], h *= 16777619;
+	return h;
+}
+
+#endif /* __AC_KHASHL_H */
diff --git a/ext/raindrops/linux_inet_diag.c b/ext/raindrops/linux_inet_diag.c
index 799b4da..d0638d7 100644
--- a/ext/raindrops/linux_inet_diag.c
+++ b/ext/raindrops/linux_inet_diag.c
@@ -1,6 +1,5 @@
 #include <ruby.h>
 #include <stdarg.h>
-#include <ruby/st.h>
 #include "my_fileno.h"
 #ifdef __linux__
 
@@ -54,12 +53,23 @@ struct listen_stats {
 	uint32_t listener_p;
 };
 
+/* override khashl.h defaults */
+#define kcalloc(N,Z) xcalloc(N,Z)
+#define kmalloc(Z) xmalloc(Z)
+#define krealloc(P,Z) xrealloc(P,Z)
+#define kfree(P) xfree(P)
+
+#include "khashl.h"
+KHASHL_CMAP_INIT(KH_LOCAL, addr2stats /* type */, a2s /* pfx */,
+		char * /* key */, struct listen_stats * /* val */,
+		kh_hash_str, kh_eq_str)
+
 #define OPLEN (sizeof(struct inet_diag_bc_op) + \
 	       sizeof(struct inet_diag_hostcond) + \
 	       sizeof(struct sockaddr_storage))
 
 struct nogvl_args {
-	st_table *table;
+	addr2stats *a2s;
 	struct iovec iov[3]; /* last iov holds inet_diag bytecode */
 	struct listen_stats stats;
 	int fd;
@@ -106,14 +116,6 @@ static VALUE rb_listen_stats(struct listen_stats *stats)
 	return rb_struct_new(cListenStats, active, queued);
 }
 
-static int st_free_data(st_data_t key, st_data_t value, st_data_t ignored)
-{
-	xfree((void *)key);
-	xfree((void *)value);
-
-	return ST_DELETE;
-}
-
 /*
  * call-seq:
  *      remove_scope_id(ip_address)
@@ -151,34 +153,6 @@ static VALUE remove_scope_id(const char *addr)
         return rv;
 }
 
-static int st_to_hash(st_data_t key, st_data_t value, VALUE hash)
-{
-	struct listen_stats *stats = (struct listen_stats *)value;
-
-	if (stats->listener_p) {
-		VALUE k = remove_scope_id((const char *)key);
-		VALUE v = rb_listen_stats(stats);
-
-		rb_hash_aset(hash, k, v);
-	}
-	return st_free_data(key, value, 0);
-}
-
-static int st_AND_hash(st_data_t key, st_data_t value, VALUE hash)
-{
-	struct listen_stats *stats = (struct listen_stats *)value;
-
-	if (stats->listener_p) {
-		VALUE k = remove_scope_id((const char *)key);
-
-		if (rb_hash_lookup(hash, k) == Qtrue) {
-			VALUE v = rb_listen_stats(stats);
-			rb_hash_aset(hash, k, v);
-		}
-	}
-	return st_free_data(key, value, 0);
-}
-
 static const char *addr_any(sa_family_t family)
 {
 	static const char ipv4[] = "0.0.0.0";
@@ -207,16 +181,17 @@ static void bug_warn_nogvl(const char *fmt, ...)
 	fflush(stderr);
 }
 
-static struct listen_stats *stats_for(st_table *table, struct inet_diag_msg *r)
+static struct listen_stats *stats_for(addr2stats *a2s, struct inet_diag_msg *r)
 {
 	char *host, *key, *port, *old_key;
 	struct listen_stats *stats;
 	socklen_t hostlen;
 	socklen_t portlen = (socklen_t)sizeof("65535");
-	int n;
+	int n, absent;
 	const void *src = r->id.idiag_src;
 	char buf[INET6_ADDRSTRLEN];
 	size_t buf_len;
+	khint_t ki;
 
 	switch (r->idiag_family) {
 	case AF_INET: {
@@ -262,8 +237,9 @@ static struct listen_stats *stats_for(st_table *table, struct inet_diag_msg *r)
 		*key = '\0';
 	}
 
-	if (st_lookup(table, (st_data_t)key, (st_data_t *)&stats))
-		return stats;
+	ki = a2s_get(a2s, key);
+	if (ki < kh_end(a2s))
+		return kh_val(a2s, ki);
 
 	old_key = key;
 
@@ -275,8 +251,10 @@ static struct listen_stats *stats_for(st_table *table, struct inet_diag_msg *r)
 			bug_warn_nogvl("BUG: snprintf: %d\n", n);
 			*key = '\0';
 		}
-		if (st_lookup(table, (st_data_t)key, (st_data_t *)&stats))
-			return stats;
+
+		ki = a2s_get(a2s, key);
+		if (ki < kh_end(a2s))
+			return kh_val(a2s, ki);
 		if (n <= 0) {
 			key = xmalloc(1);
 			*key = '\0';
@@ -291,19 +269,21 @@ static struct listen_stats *stats_for(st_table *table, struct inet_diag_msg *r)
 		memcpy(key, old_key, old_len);
 	}
 	stats = xcalloc(1, sizeof(struct listen_stats));
-	st_insert(table, (st_data_t)key, (st_data_t)stats);
+	ki = a2s_put(a2s, key, &absent); /* fails on OOM due to xrealloc */
+	assert(absent > 0 && "redundant put");
+	kh_val(a2s, ki) = stats;
 	return stats;
 }
 
-static void table_incr_active(st_table *table, struct inet_diag_msg *r)
+static void table_incr_active(addr2stats *a2s, struct inet_diag_msg *r)
 {
-	struct listen_stats *stats = stats_for(table, r);
+	struct listen_stats *stats = stats_for(a2s, r);
 	++stats->active;
 }
 
-static void table_set_queued(st_table *table, struct inet_diag_msg *r)
+static void table_set_queued(addr2stats *a2s, struct inet_diag_msg *r)
 {
-	struct listen_stats *stats = stats_for(table, r);
+	struct listen_stats *stats = stats_for(a2s, r);
 	stats->listener_p = 1;
 	stats->queued += r->idiag_rqueue;
 }
@@ -319,13 +299,13 @@ static inline void r_acc(struct nogvl_args *args, struct inet_diag_msg *r)
 	if (r->idiag_inode == 0)
 		return;
 	if (r->idiag_state == TCP_ESTABLISHED) {
-		if (args->table)
-			table_incr_active(args->table, r);
+		if (args->a2s)
+			table_incr_active(args->a2s, r);
 		else
 			args->stats.active++;
 	} else { /* if (r->idiag_state == TCP_LISTEN) */
-		if (args->table)
-			table_set_queued(args->table, r);
+		if (args->a2s)
+			table_set_queued(args->a2s, r);
 		else
 			args->stats.queued += r->idiag_rqueue;
 	}
@@ -443,11 +423,18 @@ static VALUE diag(void *ptr)
 	}
 out:
 	/* prepare to raise, free memory before reacquiring GVL */
-	if (err && args->table) {
+	if (err && args->a2s) {
 		int save_errno = errno;
+		khint_t ki;
+
+		/* no kh_foreach* in khashl.h (unlike original khash.h) */
+		for (ki = 0; ki < kh_end(args->a2s); ki++) {
+			if (!kh_exist(args->a2s, ki)) continue;
 
-		st_foreach(args->table, st_free_data, 0);
-		st_free_table(args->table);
+			xfree(kh_key(args->a2s, ki));
+			xfree(kh_val(args->a2s, ki));
+		}
+		a2s_destroy(args->a2s);
 		errno = save_errno;
 	}
 	return (VALUE)err;
@@ -563,7 +550,7 @@ static void gen_bytecode(struct iovec *iov, union any_addr *inet)
 
 /*
  * n.b. we may safely raise here because an error will cause diag()
- * to free args->table
+ * to free args->a2s
  */
 static void nl_errcheck(VALUE r)
 {
@@ -590,6 +577,7 @@ static VALUE tcp_stats(struct nogvl_args *args, VALUE addr)
 	return rb_listen_stats(&args->stats);
 }
 
+/* part of the Ruby rb_hash_* API still relies on st_data_t... */
 static int drop_placeholders(st_data_t k, st_data_t v, st_data_t ign)
 {
 	if ((VALUE)v == Qtrue)
@@ -615,6 +603,9 @@ static VALUE tcp_listener_stats(int argc, VALUE *argv, VALUE self)
 	VALUE rv = rb_hash_new();
 	struct nogvl_args args;
 	VALUE addrs, sock, buf;
+	khint_t ki;
+	struct listen_stats *stats;
+	char *key;
 
 	rb_scan_args(argc, argv, "02", &addrs, &sock);
 
@@ -626,7 +617,7 @@ static VALUE tcp_listener_stats(int argc, VALUE *argv, VALUE self)
 	buf = rb_str_buf_new(page_size);
 	args.iov[2].iov_len = OPLEN;
 	args.iov[2].iov_base = RSTRING_PTR(buf);
-	args.table = NULL;
+	args.a2s = NULL;
 	sock = NIL_P(sock) ? rb_funcall(cIDSock, id_new, 0)
 			: rb_io_get_io(sock);
 	args.fd = my_fileno(sock);
@@ -655,7 +646,7 @@ static VALUE tcp_listener_stats(int argc, VALUE *argv, VALUE self)
 		/* fall through */
 	}
 	case T_NIL:
-		args.table = st_init_strtable();
+		args.a2s = a2s_init();
 		gen_bytecode_all(&args.iov[2]);
 		break;
 	default:
@@ -666,8 +657,20 @@ static VALUE tcp_listener_stats(int argc, VALUE *argv, VALUE self)
 
 	nl_errcheck(rd_fd_region(diag, &args, args.fd));
 
-	st_foreach(args.table, NIL_P(addrs) ? st_to_hash : st_AND_hash, rv);
-	st_free_table(args.table);
+	/* no kh_foreach* in khashl.h (unlike original khash.h) */
+	for (ki = 0; ki < kh_end(args.a2s); ki++) {
+		if (!kh_exist(args.a2s, ki)) continue;
+		key = kh_key(args.a2s, ki);
+		stats = kh_val(args.a2s, ki);
+		if (stats->listener_p) {
+			VALUE k = remove_scope_id(key);
+			if (NIL_P(addrs) || rb_hash_lookup(rv, k) == Qtrue)
+				rb_hash_aset(rv, k, rb_listen_stats(stats));
+		}
+		xfree(key);
+		xfree(stats);
+	}
+	a2s_destroy(args.a2s);
 
 	if (RHASH_SIZE(rv) > 1)
 		rb_hash_foreach(rv, drop_placeholders, Qfalse);

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 3/2] khashl: use ruby_xrealloc2 to avoid overflow
  2024-03-24  0:19 ` [PATCH 2/2] use switch to khashl for hash table outside of GVL Eric Wong
@ 2024-03-24 21:10   ` Eric Wong
  0 siblings, 0 replies; 4+ messages in thread
From: Eric Wong @ 2024-03-24 21:10 UTC (permalink / raw)
  To: raindrops-public

While no user is likely to have enough listeners to trigger an
overflow, just use ruby_xrealloc2 to be safe since it's already
provided by Ruby (and AFAIK reallocarray(3) isn't standardized).
---
 ext/raindrops/khashl.h          | 8 ++++----
 ext/raindrops/linux_inet_diag.c | 4 ++--
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/ext/raindrops/khashl.h b/ext/raindrops/khashl.h
index df97c7f..425d95e 100644
--- a/ext/raindrops/khashl.h
+++ b/ext/raindrops/khashl.h
@@ -164,9 +164,8 @@ static kh_inline khint_t __kh_h2b(khint_t hash, khint_t bits) { return hash * 26
 		if (!new_used) return -1; /* not enough memory */ \
 		n_buckets = h->keys? (khint_t)1U<<h->bits : 0U; \
 		if (n_buckets < new_n_buckets) { /* expand */ \
-			khkey_t *new_keys = (khkey_t*)krealloc((void*)h->keys, new_n_buckets * sizeof(khkey_t)); \
-			if (!new_keys) { kfree(new_used); return -1; } \
-			h->keys = new_keys; \
+			h->keys = ruby_xrealloc2(h->keys, new_n_buckets, \
+						sizeof(khkey_t)); \
 		} /* otherwise shrink */ \
 		new_mask = new_n_buckets - 1; \
 		for (j = 0; j != n_buckets; ++j) { \
@@ -189,7 +188,8 @@ static kh_inline khint_t __kh_h2b(khint_t hash, khint_t bits) { return hash * 26
 			} \
 		} \
 		if (n_buckets > new_n_buckets) /* shrink the hash table */ \
-			h->keys = (khkey_t*)krealloc((void *)h->keys, new_n_buckets * sizeof(khkey_t)); \
+			h->keys = ruby_xrealloc2(h->keys, new_n_buckets, \
+						sizeof(khkey_t)); \
 		kfree(h->used); /* free the working space */ \
 		h->used = new_used, h->bits = new_bits; \
 		return 0; \
diff --git a/ext/raindrops/linux_inet_diag.c b/ext/raindrops/linux_inet_diag.c
index 058936b..79f24bb 100644
--- a/ext/raindrops/linux_inet_diag.c
+++ b/ext/raindrops/linux_inet_diag.c
@@ -53,10 +53,10 @@ struct listen_stats {
 	uint32_t listener_p;
 };
 
-/* override khashl.h defaults */
+/* override khashl.h defaults, these run w/o GVL */
 #define kcalloc(N,Z) xcalloc(N,Z)
 #define kmalloc(Z) xmalloc(Z)
-#define krealloc(P,Z) xrealloc(P,Z)
+#define krealloc(P,Z) abort() /* never called, we use ruby_xrealloc2 */
 #define kfree(P) xfree(P)
 
 #include "khashl.h"

^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2024-03-24 21:10 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-03-24  0:19 [PATCH 0/2] drop ruby/st.h usage in favor of khashl Eric Wong
2024-03-24  0:19 ` [PATCH 1/2] linux_inet_diag: remove needless OBJ_FREEZE calls Eric Wong
2024-03-24  0:19 ` [PATCH 2/2] use switch to khashl for hash table outside of GVL Eric Wong
2024-03-24 21:10   ` [PATCH 3/2] khashl: use ruby_xrealloc2 to avoid overflow Eric Wong

Code repositories for project(s) associated with this public inbox

	https://yhbt.net/raindrops.git/

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).