about summary refs log tree commit homepage
diff options
context:
space:
mode:
authorEric Wong <normalperson@yhbt.net>2013-02-11 09:48:20 +0000
committerEric Wong <normalperson@yhbt.net>2013-02-11 09:48:20 +0000
commit4ccf06a600ce31c6dbd61d9c44b491233758c18b (patch)
tree49f86ddf5346d4df5f9bfbae424dc0565cedc35b
parentf54e27e0ec0a520c0a079d6e8428eeefdcd366ab (diff)
downloadcmogstored-4ccf06a600ce31c6dbd61d9c44b491233758c18b.tar.gz
Since we now update future copies of by_dev offline and only
need a lock to swap in the new one, contention for by_dev should
be less of a problem than it was before.  This should make
reader-writer locks an unnecessary risk.

Reader-writer locks are riskier since writer starvation can
potentially be an issue with many readers.
-rw-r--r--mnt.c28
1 files changed, 12 insertions, 16 deletions
diff --git a/mnt.c b/mnt.c
index 5acb75c..8b34255 100644
--- a/mnt.c
+++ b/mnt.c
@@ -8,7 +8,7 @@
  */
 #include "cmogstored.h"
 
-static pthread_rwlock_t by_dev_lock = PTHREAD_RWLOCK_INITIALIZER;
+static pthread_mutex_t by_dev_lock = PTHREAD_MUTEX_INITIALIZER;
 
 /*
  * by_dev maps (system) device IDs to a mount_entry; mount_entry structs may
@@ -121,11 +121,11 @@ void mog_mnt_refresh(void)
 
         CHECK(int, 0, pthread_mutex_lock(&refresh_lock) ); /* protects old */
 
-        CHECK(int, 0, pthread_rwlock_rdlock(&by_dev_lock) );
+        CHECK(int, 0, pthread_mutex_lock(&by_dev_lock) );
         old = by_dev; /* save early for validation */
         if (old)
                 n = hash_get_n_buckets_used(old);
-        CHECK(int, 0, pthread_rwlock_unlock(&by_dev_lock) );
+        CHECK(int, 0, pthread_mutex_unlock(&by_dev_lock) );
 
         if (old) {
                 mog_iou_cleanup_begin();
@@ -133,11 +133,11 @@ void mog_mnt_refresh(void)
                 mnt_populate(new); /* slow, can stat all devices */
 
                 /* quickly swap in the new mount list */
-                CHECK(int, 0, pthread_rwlock_wrlock(&by_dev_lock) );
+                CHECK(int, 0, pthread_mutex_lock(&by_dev_lock) );
                 assert(old == by_dev &&
                        "by_dev hash modified during update");
                 by_dev = new;
-                CHECK(int, 0, pthread_rwlock_unlock(&by_dev_lock) );
+                CHECK(int, 0, pthread_mutex_unlock(&by_dev_lock) );
 
                 /*
                  * must cleanup _after_ replacing by_dev, since readers
@@ -147,12 +147,12 @@ void mog_mnt_refresh(void)
                 hash_free(old);
         } else {
                 /* once-only initialization */
-                CHECK(int, 0, pthread_rwlock_wrlock(&by_dev_lock) );
+                CHECK(int, 0, pthread_mutex_lock(&by_dev_lock) );
                 assert(by_dev == NULL &&
                        "by_dev exists during initialization");
                 by_dev = mnt_new(7);
                 mnt_populate(by_dev);
-                CHECK(int, 0, pthread_rwlock_unlock(&by_dev_lock) );
+                CHECK(int, 0, pthread_mutex_unlock(&by_dev_lock) );
                 atexit(mnt_atexit);
         }
 
@@ -168,7 +168,7 @@ const struct mount_entry * mog_mnt_acquire(dev_t st_dev)
         struct mount_entry me = { .me_dev = st_dev };
         struct mount_entry *rv;
 
-        CHECK(int, 0, pthread_rwlock_rdlock(&by_dev_lock) );
+        CHECK(int, 0, pthread_mutex_lock(&by_dev_lock) );
         rv = hash_lookup(by_dev, &me);
 
         /* user must release this via mog_mnt_release if non-NULL */
@@ -185,7 +185,7 @@ const struct mount_entry * mog_mnt_acquire(dev_t st_dev)
                 return rv_me ? rv_me : rv;
         }
 
-        CHECK(int, 0, pthread_rwlock_unlock(&by_dev_lock) );
+        CHECK(int, 0, pthread_mutex_unlock(&by_dev_lock) );
         return NULL;
 }
 
@@ -201,7 +201,7 @@ void mog_mnt_release(const struct mount_entry *me)
                 check_me = check_me->me_next;
 
         assert(check_me == me && "did not release acquired mount_entry");
-        CHECK(int, 0, pthread_rwlock_unlock(&by_dev_lock) );
+        CHECK(int, 0, pthread_mutex_unlock(&by_dev_lock) );
 }
 
 struct mnt_update {
@@ -260,13 +260,9 @@ void mog_mnt_update_util(struct mog_iostat *iostat)
         assert(sizeof(update.util) == sizeof(iostat->util));
         memcpy(&update.util, iostat->util, sizeof(update.util));
 
-        /*
-         * rdlock is enough here.  mog_iou_* locks internally
-         * by_dev_lock only needs to protect the structure of the hash table.
-         */
-        CHECK(int, 0, pthread_rwlock_rdlock(&by_dev_lock) );
+        CHECK(int, 0, pthread_mutex_lock(&by_dev_lock) );
         (void)hash_do_for_each(by_dev, update_util_each, &update);
-        CHECK(int, 0, pthread_rwlock_unlock(&by_dev_lock) );
+        CHECK(int, 0, pthread_mutex_unlock(&by_dev_lock) );
 
         mog_free(update.prefix);
 }