From 8f9b7e28eaf74e5fdc72328f0dfb890d92c02ec1 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Sun, 14 Jul 2013 00:46:10 +0000 Subject: ioq: reset internal queues during requeue/shutdown This should avoid concurrency bugs where client may run in multiple threads if we switch to multi-threaded graceful shutdown. --- cmogstored.h | 3 +++ dev.c | 10 ++++++++++ exit.c | 6 ++++-- ioq.c | 12 ++++++++++++ svc_dev.c | 8 ++++++++ 5 files changed, 37 insertions(+), 2 deletions(-) diff --git a/cmogstored.h b/cmogstored.h index ffd0668..ffea4b0 100644 --- a/cmogstored.h +++ b/cmogstored.h @@ -380,6 +380,7 @@ size_t mog_dev_hash(const void *, size_t tablesize); bool mog_dev_cmp(const void *a, const void *b); void mog_dev_free(void *devptr); bool mog_dev_user_rescale_i(void *devp, void *svcp); +bool mog_dev_requeue_prepare(void *devp, void *ign); /* valid_path.rl */ int mog_valid_path(const char *buf, size_t len); @@ -404,6 +405,7 @@ void mog_svc_dev_shutdown(void); void mog_mkusage_all(void); void mog_svc_dev_user_rescale(struct mog_svc *, size_t ndev_new); void mog_svc_dev_quit_prepare(struct mog_svc *); +void mog_svc_dev_requeue_prepare(struct mog_svc *svc); /* cloexec_detect.c */ extern bool mog_cloexec_atomic; @@ -650,3 +652,4 @@ void mog_ioq_next(struct mog_ioq *); void mog_ioq_adjust(struct mog_ioq *, unsigned value); void mog_ioq_destroy(struct mog_ioq *); bool mog_ioq_unblock(struct mog_fd *); +void mog_ioq_requeue_prepare(struct mog_ioq *); diff --git a/dev.c b/dev.c index 64deec4..dd46fe5 100644 --- a/dev.c +++ b/dev.c @@ -260,3 +260,13 @@ bool mog_dev_user_rescale_i(void *devp, void *svcp) return true; /* continue iteration */ } + +bool mog_dev_requeue_prepare(void *devp, void *ign) +{ + struct mog_dev *dev = devp; + + mog_ioq_requeue_prepare(&dev->ioq); + mog_ioq_requeue_prepare(&dev->fsckq); + + return true; /* continue iteration */ +} diff --git a/exit.c b/exit.c index f5194a3..d27fcfd 100644 --- a/exit.c +++ b/exit.c @@ -31,12 +31,14 @@ static bool svc_quit_i(void *svcptr, void *ignored) return true; } -static bool svc_queue_set(void *svcptr, void *queue) +static bool svc_requeue_prepare(void *svcptr, void *queue) { struct mog_svc *svc = svcptr; svc->queue = queue; + mog_svc_dev_requeue_prepare(svc); + return true; } @@ -48,7 +50,7 @@ _Noreturn void cmogstored_exit(void) mog_svc_dev_shutdown(); mog_queue_stop(mog_notify_queue); mog_svc_dev_shutdown(); - mog_svc_each(svc_queue_set, mog_notify_queue); + mog_svc_each(svc_requeue_prepare, mog_notify_queue); mog_fdmap_requeue(mog_notify_queue); mog_queue_quit_loop(mog_notify_queue); exit(EXIT_SUCCESS); diff --git a/ioq.c b/ioq.c index 772da38..0000bb8 100644 --- a/ioq.c +++ b/ioq.c @@ -24,6 +24,18 @@ void mog_ioq_init(struct mog_ioq *ioq, struct mog_svc *svc, size_t val) CHECK(int, 0, pthread_mutex_init(&ioq->mtx, NULL)); } +/* + * we do not need this, yet, but this will allow us to have multi-threaded + * shutdown in the future (we currently drop into single-threaded mode) + */ +void mog_ioq_requeue_prepare(struct mog_ioq *ioq) +{ + assert(ioq->cur >= ioq->max && + "we should only get here when idle before mog_fdmap_requeue"); + + SIMPLEQ_INIT(&ioq->ioq_head); +} + /* * this is only a hint, so no explicit memory barriers or atomics */ diff --git a/svc_dev.c b/svc_dev.c index 93e3c43..e57f0b6 100644 --- a/svc_dev.c +++ b/svc_dev.c @@ -318,6 +318,14 @@ static void mog_svc_dev_rescale_all(struct mog_svc *svc) CHECK(int, 0, pthread_mutex_unlock(&svc->by_mog_devid_lock)); } +void mog_svc_dev_requeue_prepare(struct mog_svc *svc) +{ + /* iterate through each device of this svc */ + CHECK(int, 0, pthread_mutex_lock(&svc->by_mog_devid_lock)); + hash_do_for_each(svc->by_mog_devid, mog_dev_requeue_prepare, svc); + CHECK(int, 0, pthread_mutex_unlock(&svc->by_mog_devid_lock)); +} + /* rescaling only happens in the main thread */ void mog_svc_dev_user_rescale(struct mog_svc *svc, size_t ndev_new) { -- cgit v1.2.3-24-ge0c7