about summary refs log tree commit homepage
path: root/alloc.c
diff options
context:
space:
mode:
authorEric Wong <normalperson@yhbt.net>2013-06-27 03:54:39 +0000
committerEric Wong <normalperson@yhbt.net>2013-07-10 00:55:55 +0000
commit160e768fe8d6043af1e435daeb35d5c92e05de11 (patch)
tree13399e0ff1e82be38a531cbce29127b419d2954c /alloc.c
parent331e7a1300ae59a052763ffecc77b45a56e2deb3 (diff)
downloadcmogstored-160e768fe8d6043af1e435daeb35d5c92e05de11.tar.gz
Reattaching/reusing read buffers allows us to avoid repeated
reallocation/growth/free when clients repeatedly send us large headers.
This may also increase cache-hits by favoring recently-used buffers as
long as fragmentation is kept in check.  The fragmentation should be
no worse that is currently, due to the existing detach nature of rbufs
Diffstat (limited to 'alloc.c')
-rw-r--r--alloc.c27
1 files changed, 27 insertions, 0 deletions
diff --git a/alloc.c b/alloc.c
index bddad4d..7a14173 100644
--- a/alloc.c
+++ b/alloc.c
@@ -193,3 +193,30 @@ void *mog_fsbuf_get(size_t *size)
 
         return ptr;
 }
+
+/*
+ * attempts to reattach an rbuf belonging to a previously-idle client
+ * if it makes sense to reattach.
+ *
+ * We want to favor rbufs attached to clients if they are bigger than
+ * the thread-local one.
+ */
+void mog_rbuf_reattach_and_null(struct mog_rbuf **ptrptr)
+{
+        struct mog_rbuf *rbuf = *ptrptr;
+
+        if (!rbuf)
+                return;
+        *ptrptr = NULL;
+
+        assert(rbuf != tls_rbuf && "cannot reattach, already attached");
+        if (tls_rbuf) {
+                /* we never want to swap a small buffer for a big buffer */
+                if (rbuf->rcapa < tls_rbuf->rcapa) {
+                        mog_rbuf_free(rbuf);
+                        return;
+                }
+                free(tls_rbuf);
+        }
+        tls_rbuf = rbuf;
+}