diff options
author | Eric Wong <normalperson@yhbt.net> | 2013-07-17 01:39:24 +0000 |
---|---|---|
committer | Eric Wong <normalperson@yhbt.net> | 2013-07-17 01:44:00 +0000 |
commit | 7c49988ebf5c176cadd4a9e287e443d49a2cdeec (patch) | |
tree | 0b41e3d84ae2d6b607852003d08273bbb355221e | |
parent | 2869d2bf7a24a0b42bde738589221def0289ce54 (diff) | |
download | cmogstored-7c49988ebf5c176cadd4a9e287e443d49a2cdeec.tar.gz |
I needed to spend time to convince myself this was safe, so leave a note to others (and future self) in case there is cause for concern. Basically, this is highly dependent on our overall one-shot-based concurrency model and safe as long as basic rules are followed.
-rw-r--r-- | http.c | 9 | ||||
-rw-r--r-- | ioq.c | 11 | ||||
-rw-r--r-- | mgmt.c | 2 |
3 files changed, 20 insertions, 2 deletions
@@ -122,7 +122,14 @@ void mog_http_unlink_ftmp(struct mog_http *http) file->tmppath, http->svc->docroot); } -/* called if epoll/kevent is out-of-space */ +/* + * called only if epoll/kevent is out-of-space + * Note: it's impossible for this to be called while this mfd is + * inside an ioq SIMPLEQ, however mfd->ioq_blocked /may/ be true when + * this function is called. We could add validation code to ensure + * this remains true, but that would increase the size of "struct mog_fd", + * so we will rely on this comment instead. + */ void mog_http_drop(struct mog_fd *mfd) { struct mog_http *http = &mfd->as.http; @@ -48,6 +48,11 @@ static inline void ioq_set_contended(struct mog_ioq *ioq) * This is like sem_trywait. Each thread is only allowed to acquire * one ioq at once. * + * If this returns false, the caller _must_ return MOG_NEXT_IGNORE to + * prevent the mfd from being added to an epoll/kqueue watch list. + * Adding the mfd to an epoll/kqueue watch list in the same thread/context + * where this function returns true is a guaranteed bug. + * * client_mfd is the client socket, not the open (regular) file */ bool mog_ioq_ready(struct mog_ioq *ioq, struct mog_fd *client_mfd) @@ -115,6 +120,12 @@ void mog_ioq_next(struct mog_ioq *check_ioq) /* wake up the next sleeper on this queue */ if (client_mfd) mog_activeq_push(mog_ioq_current->svc->queue, client_mfd); + /* + * We may not touch or use client_mfd here anymore. Another + * thread may already have it. In the worst case, it's been + * closed due to epoll/kqueue running out-of-space and another + * system call (open/accept) may have already reused the FD + */ mog_ioq_current = NULL; } @@ -83,7 +83,7 @@ MOG_NOINLINE static void mgmt_close(struct mog_fd *mfd) mog_fd_put(mfd); } -/* called if epoll/kevent is out-of-space */ +/* called only if epoll/kevent is out-of-space (see mog_http_drop) */ void mog_mgmt_drop(struct mog_fd *mfd) { struct mog_mgmt *mgmt = &mfd->as.mgmt; |