about summary refs log tree commit homepage
path: root/ioq.c
diff options
context:
space:
mode:
Diffstat (limited to 'ioq.c')
-rw-r--r--ioq.c38
1 files changed, 37 insertions, 1 deletions
diff --git a/ioq.c b/ioq.c
index 028b225..829e00d 100644
--- a/ioq.c
+++ b/ioq.c
@@ -19,11 +19,20 @@ void mog_ioq_init(struct mog_ioq *ioq, struct mog_svc *svc, size_t val)
         ioq->cur = val;
         ioq->max = val;
         ioq->svc = svc;
+        ioq->contended = false;
         SIMPLEQ_INIT(&ioq->ioq_head);
         CHECK(int, 0, pthread_mutex_init(&ioq->mtx, NULL));
 }
 
 /*
+ * this is only a hint, so no explicit memory barriers or atomics
+ */
+static inline void ioq_set_contended(struct mog_ioq *ioq)
+{
+        ioq->contended = true;
+}
+
+/*
  * This is like sem_trywait.  Each thread is only allowed to acquire
  * one ioq at once.
  *
@@ -43,6 +52,7 @@ bool mog_ioq_ready(struct mog_ioq *ioq, struct mog_fd *client_mfd)
         } else {
                 client_mfd->ioq_blocked = 1;
                 SIMPLEQ_INSERT_TAIL(&ioq->ioq_head, client_mfd, ioqent);
+                ioq_set_contended(ioq);
         }
 
         CHECK(int, 0, pthread_mutex_unlock(&ioq->mtx));
@@ -70,8 +80,13 @@ void mog_ioq_next(struct mog_ioq *check_ioq)
         if (mog_ioq_current->cur <= mog_ioq_current->max) {
                 /* wake up any waiters */
                 client_mfd = SIMPLEQ_FIRST(&mog_ioq_current->ioq_head);
-                if (client_mfd)
+                if (client_mfd) {
                         SIMPLEQ_REMOVE_HEAD(&mog_ioq_current->ioq_head, ioqent);
+
+                        /* if there's another head, we're still contended */
+                        if (SIMPLEQ_FIRST(&mog_ioq_current->ioq_head))
+                                ioq_set_contended(mog_ioq_current);
+                }
         } else {
                 /* mog_ioq_adjust was called and lowered our capacity */
                 mog_ioq_current->cur--;
@@ -85,6 +100,27 @@ void mog_ioq_next(struct mog_ioq *check_ioq)
         mog_ioq_current = NULL;
 }
 
+/*
+ * Returns true if the currently held ioq is contended.
+ * This releases the contended flag if it is set, so the caller
+ * is expected to yield the current thread shortly afterwards.
+ * This is only a hint.
+ */
+bool mog_ioq_contended(void)
+{
+        struct mog_ioq *cur = mog_ioq_current;
+
+        /* assume contended for non /devXXX* paths */
+        if (!cur)
+                return true;
+
+        /*
+         * we only want to minimize the threads hitting true, so we use
+         * an atomic exchange and hope for the best.  This is only a hint.
+         */
+        return __sync_bool_compare_and_swap(&cur->contended, true, false);
+}
+
 void mog_ioq_destroy(struct mog_ioq *ioq)
 {
         CHECK(int, 0, pthread_mutex_destroy(&ioq->mtx));