cmogstored.git  about / heads / tags
alternative mogstored implementation for MogileFS
blob 3468e24a5a1a2abb5b81d79893bdcec94ecf2751 8604 bytes (raw)
$ git show wip-1.3:mgmt.c	# shows this blob on the CLI

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
 
/*
 * Copyright (C) 2012-2013, Eric Wong <normalperson@yhbt.net>
 * License: GPLv3 or later (see COPYING for details)
 */
#include "cmogstored.h"
#include "trace.h"
#include "mgmt.h"
#include "digest.h"
#include "ioprio.h"

static void mgmt_digest_step(struct mog_fd *mfd)
{
	struct mog_mgmt *mgmt = &mfd->as.mgmt;
	struct mog_fd *fmfd = mgmt->forward;
	enum mog_digest_next next;

	/*
	 * MOG_PRIO_FSCK means we're likely the _only_ thread handling
	 * MD5, so run it as fast as possible.
	 */
	if (mgmt->prio == MOG_PRIO_FSCK) {
		int ioprio = mog_ioprio_drop();

		do {
			next = mog_digest_read(&fmfd->as.file.digest, fmfd->fd);
		} while (next == MOG_DIGEST_CONTINUE);

		if (ioprio != -1)
			mog_ioprio_restore(ioprio);
	} else {
		next = mog_digest_read(&fmfd->as.file.digest, fmfd->fd);
	}

	assert(mgmt->wbuf == NULL && "wbuf should be NULL here");

	switch (next) {
	case MOG_DIGEST_CONTINUE: return;
	case MOG_DIGEST_EOF:
		mog_mgmt_fn_digest_emit(mgmt);
		break;
	case MOG_DIGEST_ERROR:
		mog_mgmt_fn_digest_err(mgmt);
	}

	if (mgmt->prio == MOG_PRIO_FSCK)
		mog_fsck_queue_next(mfd);

	mog_file_close(mgmt->forward);
	mgmt->prio = MOG_PRIO_NONE;
	mgmt->forward = NULL;
}

static enum mog_next mgmt_digest_in_progress(struct mog_fd *mfd)
{
	struct mog_mgmt *mgmt = &mfd->as.mgmt;

	assert(mgmt->forward && mgmt->forward != MOG_IOSTAT && "bad forward");

	if (mgmt->prio == MOG_PRIO_FSCK && !mog_fsck_queue_ready(mfd))
		return MOG_NEXT_IGNORE;

	mgmt_digest_step(mfd);

	if (mgmt->wbuf == MOG_WR_ERROR) return MOG_NEXT_CLOSE;
	if (mgmt->wbuf) return MOG_NEXT_WAIT_WR;

	/*
	 * we can error on the MD5 but continue if we didn't
	 * have a socket error (from wbuf == MOG_WR_ERROR)
	 */
	return MOG_NEXT_ACTIVE;
}

MOG_NOINLINE static void mgmt_close(struct mog_fd *mfd)
{
	struct mog_mgmt *mgmt = &mfd->as.mgmt;

	mog_rbuf_free(mgmt->rbuf);
	assert((mgmt->wbuf == NULL || mgmt->wbuf == MOG_WR_ERROR) &&
	       "would leak mgmt->wbuf on close");

	mog_fd_put(mfd);
}

/* called if epoll/kevent is out-of-space */
void mog_mgmt_drop(struct mog_fd *mfd)
{
	struct mog_mgmt *mgmt = &mfd->as.mgmt;

	if (mgmt->forward && mgmt->forward != MOG_IOSTAT)
		mog_file_close(mgmt->forward);
	mgmt_close(mfd);
}

void mog_mgmt_writev(struct mog_mgmt *mgmt, struct iovec *iov, int iovcnt)
{
	struct mog_fd *mfd = mog_fd_of(mgmt);

	assert(mgmt->wbuf == NULL && "tried to write while busy");
	mgmt->wbuf = mog_trywritev(mfd->fd, iov, iovcnt);
}

static enum mog_next mgmt_iostat_forever(struct mog_mgmt *mgmt)
{
	mog_rbuf_free_and_null(&mgmt->rbuf); /* no coming back from this */
	mog_notify(MOG_NOTIFY_DEVICE_REFRESH);
	mog_svc_devstats_subscribe(mgmt);

	return MOG_NEXT_IGNORE;
}

/* returns true if we can continue queue step, false if not */
static enum mog_next mgmt_wbuf_in_progress(struct mog_mgmt *mgmt)
{
	assert(mgmt->wbuf != MOG_WR_ERROR && "still active after write error");
	switch (mog_tryflush(mog_fd_of(mgmt)->fd, &mgmt->wbuf)) {
	case MOG_WRSTATE_ERR: return MOG_NEXT_CLOSE;
	case MOG_WRSTATE_DONE:
		if (mgmt->forward == MOG_IOSTAT)
			return mgmt_iostat_forever(mgmt);
		return MOG_NEXT_ACTIVE;
	case MOG_WRSTATE_BUSY:
		/* unlikely, we never put anything big in wbuf */
		return MOG_NEXT_WAIT_WR;
	}
	assert(0 && "compiler bug?");
	return MOG_NEXT_CLOSE;
}

/* stash any pipelined data for the next round */
static void
mgmt_defer_rbuf(struct mog_mgmt *mgmt, struct mog_rbuf *rbuf, size_t buf_len)
{
	struct mog_rbuf *old = mgmt->rbuf;
	size_t defer_bytes = buf_len - mgmt->buf_off;
	char *src = rbuf->rptr + mgmt->buf_off;

	assert(mgmt->buf_off >= 0 && "mgmt->buf_off negative");
	assert(defer_bytes <= MOG_RBUF_BASE_SIZE && "defer bytes overflow");

	if (defer_bytes == 0) {
		mog_rbuf_free_and_null(&mgmt->rbuf);
	} else if (old) { /* no allocation needed, reuse existing */
		assert(old == rbuf && "mgmt->rbuf not reused properly");
		memmove(old->rptr, src, defer_bytes);
		old->rsize = defer_bytes;
	} else {
		mgmt->rbuf = mog_rbuf_new(MOG_RBUF_BASE_SIZE);
		memcpy(mgmt->rbuf->rptr, src, defer_bytes);
		mgmt->rbuf->rsize = defer_bytes;
	}
	mgmt->buf_off = 0;
}

/*
 * this is the main event callback and called whenever mgmt
 * is pulled out of a queue (either idle or active)
 */
static enum mog_next mgmt_queue_step(struct mog_fd *mfd)
{
	struct mog_mgmt *mgmt = &mfd->as.mgmt;
	struct mog_rbuf *rbuf;
	char *buf;
	ssize_t r;
	off_t off;
	size_t buf_len = 0;
	enum mog_parser_state state;

	assert(mfd->fd >= 0 && "mgmt fd is invalid");

	if (mgmt->wbuf) return mgmt_wbuf_in_progress(mgmt);
	if (mgmt->forward) return mgmt_digest_in_progress(mfd);

	/* we may have pipelined data in mgmt->rbuf */
	rbuf = mgmt->rbuf ? mgmt->rbuf : mog_rbuf_get(MOG_RBUF_BASE_SIZE);
	buf = rbuf->rptr;
	off = mgmt->buf_off;
	assert(off >= 0 && "offset is negative");
	assert(off < MOG_RBUF_BASE_SIZE && "offset is too big");
	if (mgmt->rbuf && off == 0) {
		/* request got "pipelined", resuming now */
		buf_len = mgmt->rbuf->rsize;
		goto parse;
	}
reread:
	r = read(mfd->fd, buf + off, MOG_RBUF_BASE_SIZE - off);
	if (r > 0) {
		buf_len = r + off;
parse:
		state = mog_mgmt_parse(mgmt, buf, buf_len);
		if (mgmt->wbuf == MOG_WR_ERROR) return MOG_NEXT_CLOSE;

		switch (state) {
		case MOG_PARSER_ERROR:
			syslog(LOG_ERR, "mgmt parser error");
			return MOG_NEXT_CLOSE;
		case MOG_PARSER_CONTINUE:
			assert(mgmt->wbuf == NULL &&
			       "tried to write (and failed) with partial req");
			if (mgmt->buf_off == MOG_RBUF_BASE_SIZE) {
				assert(buf_len == MOG_RBUF_BASE_SIZE &&
				       "bad rbuf");
				syslog(LOG_ERR, "mgmt request too large");
				return MOG_NEXT_CLOSE;
			}
			off = mgmt->buf_off;
			goto reread;
		case MOG_PARSER_DONE:
			if (mgmt->forward == MOG_IOSTAT)
				return mgmt_iostat_forever(mgmt);

			/* stash unread portion in a new buffer */
			mgmt_defer_rbuf(mgmt, rbuf, buf_len);
			mog_mgmt_reset_parser(mgmt);
			assert(mgmt->wbuf != MOG_WR_ERROR);
			return mgmt->wbuf ? MOG_NEXT_WAIT_WR : MOG_NEXT_ACTIVE;
		}
	} else if (r == 0) { /* client shut down */
		TRACE(CMOGSTORED_MGMT_RDCLOSE(mfd, buf_len));
		return MOG_NEXT_CLOSE;
	} else {
		switch (errno) {
		case_EAGAIN:
			if ((buf_len > 0) && (mgmt->rbuf == NULL))
				mgmt->rbuf = mog_rbuf_detach(rbuf);
			return MOG_NEXT_WAIT_RD;
		case EINTR: goto reread;
		case ECONNRESET:
		case ENOTCONN:
			/* these errors are too common to log, normally */
			TRACE(CMOGSTORED_MGMT_RDERR(mfd, buf_len, errno));
			return MOG_NEXT_CLOSE;
		default:
			TRACE(CMOGSTORED_MGMT_RDERR(mfd, buf_len, errno));
			syslog(LOG_NOTICE, "mgmt client died: %m");
			return MOG_NEXT_CLOSE;
		}
	}

	assert(0 && "compiler bug?");
	return MOG_NEXT_CLOSE;
}

/*
 * this function is called whenever a mgmt client is pulled out of
 * _any_ queue (listen/idle/active).  Our queueing model should be
 * designed to prevent this function from executing concurrently
 * for any fd.
 */
enum mog_next mog_mgmt_queue_step(struct mog_fd *mfd)
{
	enum mog_next rv = mgmt_queue_step(mfd);

	if (rv == MOG_NEXT_CLOSE)
		mgmt_close(mfd);
	return rv;
}

/* called during graceful shutdown instead of mog_mgmt_queue_step */
void mog_mgmt_quit_step(struct mog_fd *mfd)
{
	struct mog_mgmt *mgmt = &mfd->as.mgmt;
	struct mog_queue *q = mgmt->svc->queue;

	/* centralize all queue transitions here: */
	switch (mgmt_queue_step(mfd)) {
	case MOG_NEXT_WAIT_RD:
		if (mgmt->forward || mgmt->rbuf) {
			/* something is in progress, do not drop it */
			mog_idleq_push(q, mfd, MOG_QEV_RD);
			return;
		}
		/* fall-through */
	case MOG_NEXT_IGNORE: /* no new iostat watchers during shutdown */
		assert(mgmt->prio == MOG_PRIO_NONE && "bad prio");
		/* fall-through */
	case MOG_NEXT_CLOSE:
		mog_nr_active_at_quit--;
		mgmt_close(mfd);
		return;
	case MOG_NEXT_ACTIVE: mog_activeq_push(q, mfd); return;
	case MOG_NEXT_WAIT_WR: mog_idleq_push(q, mfd, MOG_QEV_WR); return;
	}
}

/* stringify the address for tracers */
static MOG_NOINLINE void
trace_mgmt_accepted(
	struct mog_fd *mfd, union mog_sockaddr *msa, socklen_t salen)
{
#ifdef HAVE_SYSTEMTAP
	struct mog_packaddr mpa;
	struct mog_ni ni;

	mog_nameinfo(&mpa, &ni);
	TRACE(CMOGSTORED_MGMT_ACCEPTED(mfd->fd, ni.ni_host, ni.ni_serv));
#endif /* !HAVE_SYSTEMTAP */
}

/* called immediately after accept(), this initializes the mfd (once) */
void mog_mgmt_post_accept(int fd, struct mog_svc *svc,
			union mog_sockaddr *msa, socklen_t salen)
{
	struct mog_fd *mfd = mog_fd_init(fd, MOG_FD_TYPE_MGMT);
	struct mog_mgmt *mgmt = &mfd->as.mgmt;

	if (TRACE_ENABLED(CMOGSTORED_MGMT_ACCEPTED))
		trace_mgmt_accepted(mfd, msa, salen);

	mog_mgmt_init(mgmt, svc);
	mog_idleq_add(svc->queue, mfd, MOG_QEV_RD);
}

git clone https://yhbt.net/cmogstored.git