about summary refs log tree commit homepage
path: root/queue_loop.c
diff options
context:
space:
mode:
authorEric Wong <normalperson@yhbt.net>2012-12-05 00:35:40 +0000
committerEric Wong <normalperson@yhbt.net>2012-12-05 00:50:34 +0000
commitef7bd6492102c02abe175d2adb81b8566e697bac (patch)
treee204ec801b6e9b4f4565924cad0f8bca793d2a14 /queue_loop.c
parent0de5d9ef69b344138beacb9db5664c36215a4a59 (diff)
downloadcmogstored-ef7bd6492102c02abe175d2adb81b8566e697bac.tar.gz
We now assume any non-zero timeout (not just infinite timeout)
is cancellable as well as interruptible.  This means
mog_idleq_wait will no longer retry blindly on EINTR.

Handling of explicit device refreshes and aio_threads
is now easier-to-understand and follow.
Diffstat (limited to 'queue_loop.c')
-rw-r--r--queue_loop.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/queue_loop.c b/queue_loop.c
index df32fe6..a799de4 100644
--- a/queue_loop.c
+++ b/queue_loop.c
@@ -53,7 +53,7 @@ void * mog_queue_loop(void *arg)
 
         for (;;) {
                 while (mfd == NULL)
-                        mfd = mog_idleq_wait(q, MOG_CANCELLABLE);
+                        mfd = mog_idleq_wait(q, -1);
                 switch (mog_queue_step(mfd)) {
                 case MOG_NEXT_ACTIVE:
                         mfd = queue_xchg_maybe(q, mfd);
@@ -67,7 +67,7 @@ void * mog_queue_loop(void *arg)
                 case MOG_NEXT_IGNORE:
                 case MOG_NEXT_CLOSE:
                         /* already hanndled */
-                        mfd = mog_idleq_wait(q, MOG_CANCELLABLE);
+                        mfd = mog_idleq_wait(q, -1);
                 }
         }