about summary refs log tree commit homepage
path: root/queue_loop.c
diff options
context:
space:
mode:
authorEric Wong <normalperson@yhbt.net>2012-02-25 11:41:00 +0000
committerEric Wong <normalperson@yhbt.net>2012-02-25 11:51:02 +0000
commite5991473a91697a4924530b87051c764e538ce15 (patch)
tree05981669a42de64cc0a35cacd44fe6c2aeab1eeb /queue_loop.c
parent025e09f5be3d1a42a5f585f80b18d5947da5e766 (diff)
downloadcmogstored-e5991473a91697a4924530b87051c764e538ce15.tar.gz
By going into single-threaded mode, we can drastically simplify our
shutdown sequence to avoid race conditions.  This also allows us
to not have additional overhead during normal runtime: as all the
shutdown-specific logic is isolated to only a few portions of
the code.

Like all graceful shutdown schemes, this is one is still vulnerable to
race conditions due to network latency, but this one should be no worse
than any other server.  Fortunately all requests we service are
idempotent.
Diffstat (limited to 'queue_loop.c')
-rw-r--r--queue_loop.c32
1 files changed, 32 insertions, 0 deletions
diff --git a/queue_loop.c b/queue_loop.c
index 117c0d6..3e788b9 100644
--- a/queue_loop.c
+++ b/queue_loop.c
@@ -85,3 +85,35 @@ void * mog_queue_loop(void *arg)
 
         return NULL;
 }
+
+static void queue_quit_step(struct mog_fd *mfd)
+{
+        switch (mfd->fd_type) {
+        case MOG_FD_TYPE_MGMT: mog_mgmt_quit_step(mfd); return;
+        case MOG_FD_TYPE_HTTP: mog_http_quit_step(mfd); return;
+        case MOG_FD_TYPE_FILE:
+        case MOG_FD_TYPE_QUEUE:
+        case MOG_FD_TYPE_SVC:
+                assert(0 && "invalid fd_type in queue_quit_step");
+        default:
+                break;
+        }
+}
+
+/* called at shutdown when only one thread is active */
+void mog_queue_quit_loop(struct mog_queue *queue)
+{
+        struct mog_fd *mfd;
+
+        while (mog_nr_active_at_quit) {
+                assert(mog_nr_active_at_quit <= (size_t)INT_MAX
+                       && "mog_nr_active_at_quit underflow");
+
+                if ((mfd = mog_activeq_trytake(queue))) {
+                        queue_quit_step(mfd);
+                } else {
+                        if ((mfd = mog_idleq_wait(queue, -1)))
+                                queue_quit_step(mfd);
+                }
+        }
+}