1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/ptlrpcd.c
39 #define DEBUG_SUBSYSTEM S_RPC
42 # include <libcfs/libcfs.h>
43 #else /* __KERNEL__ */
44 # include <liblustre.h>
48 #include <lustre_net.h>
49 # include <lustre_lib.h>
51 #include <lustre_ha.h>
52 #include <obd_class.h> /* for obd_zombie */
53 #include <obd_support.h> /* for OBD_FAIL_CHECK */
54 #include <cl_object.h> /* cl_env_{get,put}() */
55 #include <lprocfs_status.h>
63 struct ptlrpcd_scope_ctl {
64 struct ptlrpcd_thread {
66 struct ptlrpcd_ctl pt_ctl;
67 } pscope_thread[PT_NR];
70 static struct ptlrpcd_scope_ctl ptlrpcd_scopes[PSCOPE_NR] = {
74 .pt_name = "ptlrpcd-brw"
77 .pt_name = "ptlrpcd-brw-rcv"
87 .pt_name = "ptlrpcd-rcv"
93 struct semaphore ptlrpcd_sem;
94 static int ptlrpcd_users = 0;
96 void ptlrpcd_wake(struct ptlrpc_request *req)
98 struct ptlrpc_request_set *rq_set = req->rq_set;
100 LASSERT(rq_set != NULL);
102 cfs_waitq_signal(&rq_set->set_waitq);
106 * Move all request from an existing request set to the ptlrpcd queue.
107 * All requests from the set must be in phase RQ_PHASE_NEW.
109 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
111 struct list_head *tmp, *pos;
113 list_for_each_safe(pos, tmp, &set->set_requests) {
114 struct ptlrpc_request *req =
115 list_entry(pos, struct ptlrpc_request, rq_set_chain);
117 LASSERT(req->rq_phase == RQ_PHASE_NEW);
118 list_del_init(&req->rq_set_chain);
120 ptlrpcd_add_req(req, PSCOPE_OTHER);
121 set->set_remaining--;
123 LASSERT(set->set_remaining == 0);
125 EXPORT_SYMBOL(ptlrpcd_add_rqset);
128 * Requests that are added to the ptlrpcd queue are sent via
129 * ptlrpcd_check->ptlrpc_check_set().
131 int ptlrpcd_add_req(struct ptlrpc_request *req, enum ptlrpcd_scope scope)
133 struct ptlrpcd_ctl *pc;
134 enum pscope_thread pt;
137 LASSERT(scope < PSCOPE_NR);
138 pt = req->rq_send_state == LUSTRE_IMP_FULL ? PT_NORMAL : PT_RECOVERY;
139 pc = &ptlrpcd_scopes[scope].pscope_thread[pt].pt_ctl;
140 rc = ptlrpc_set_add_new_req(pc, req);
142 * XXX disable this for CLIO: environment is needed for interpreter.
145 ptlrpc_interpterer_t interpreter;
147 interpreter = req->rq_interpret_reply;
150 * Thread is probably in stop now so we need to
151 * kill this rpc as it was not added. Let's call
152 * interpret for it to let know we're killing it
153 * so that higher levels might free assosiated
156 ptlrpc_req_interpret(NULL, req, -EBADR);
158 ptlrpc_req_finished(req);
164 static int ptlrpcd_check(const struct lu_env *env, struct ptlrpcd_ctl *pc)
166 struct list_head *tmp, *pos;
167 struct ptlrpc_request *req;
171 spin_lock(&pc->pc_set->set_new_req_lock);
172 list_for_each_safe(pos, tmp, &pc->pc_set->set_new_requests) {
173 req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
174 list_del_init(&req->rq_set_chain);
175 ptlrpc_set_add_req(pc->pc_set, req);
177 * Need to calculate its timeout.
181 spin_unlock(&pc->pc_set->set_new_req_lock);
183 if (pc->pc_set->set_remaining) {
184 rc = rc | ptlrpc_check_set(env, pc->pc_set);
187 * XXX: our set never completes, so we prune the completed
188 * reqs after each iteration. boy could this be smarter.
190 list_for_each_safe(pos, tmp, &pc->pc_set->set_requests) {
191 req = list_entry(pos, struct ptlrpc_request,
193 if (req->rq_phase != RQ_PHASE_COMPLETE)
196 list_del_init(&req->rq_set_chain);
198 ptlrpc_req_finished (req);
204 * If new requests have been added, make sure to wake up.
206 spin_lock(&pc->pc_set->set_new_req_lock);
207 rc = !list_empty(&pc->pc_set->set_new_requests);
208 spin_unlock(&pc->pc_set->set_new_req_lock);
216 * ptlrpc's code paths like to execute in process context, so we have this
217 * thread which spins on a set which contains the io rpcs. llite specifies
218 * ptlrpcd's set when it pushes pages down into the oscs.
220 static int ptlrpcd(void *arg)
222 struct ptlrpcd_ctl *pc = arg;
223 struct lu_env env = { .le_ses = NULL };
227 rc = cfs_daemonize_ctxt(pc->pc_name);
230 * XXX So far only "client" ptlrpcd uses an environment. In
231 * the future, ptlrpcd thread (or a thread-set) has to given
232 * an argument, describing its "scope".
234 rc = lu_context_init(&env.le_ctx,
235 LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
238 complete(&pc->pc_starting);
242 env.le_ctx.lc_cookie = 0x7;
245 * This mainloop strongly resembles ptlrpc_set_wait() except that our
246 * set never completes. ptlrpcd_check() calls ptlrpc_check_set() when
247 * there are requests in the set. New requests come in on the set's
248 * new_req_list and ptlrpcd_check() moves them into the set.
251 struct l_wait_info lwi;
254 rc = lu_env_refill(&env);
257 * XXX This is very awkward situation, because
258 * execution can neither continue (request
259 * interpreters assume that env is set up), nor repeat
260 * the loop (as this potentially results in a tight
261 * loop of -ENOMEM's).
263 * Fortunately, refill only ever does something when
264 * new modules are loaded, i.e., early during boot up.
266 CERROR("Failure to refill session: %d\n", rc);
270 timeout = ptlrpc_set_next_timeout(pc->pc_set);
271 lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
272 ptlrpc_expired_set, pc->pc_set);
274 lu_context_enter(&env.le_ctx);
275 l_wait_event(pc->pc_set->set_waitq,
276 ptlrpcd_check(&env, pc), &lwi);
277 lu_context_exit(&env.le_ctx);
280 * Abort inflight rpcs for forced stop case.
282 if (test_bit(LIOD_STOP, &pc->pc_flags)) {
283 if (test_bit(LIOD_FORCE, &pc->pc_flags))
284 ptlrpc_abort_set(pc->pc_set);
289 * Let's make one more loop to make sure that ptlrpcd_check()
290 * copied all raced new rpcs into the set so we can kill them.
295 * Wait for inflight requests to drain.
297 if (!list_empty(&pc->pc_set->set_requests))
298 ptlrpc_set_wait(pc->pc_set);
299 lu_context_fini(&env.le_ctx);
300 complete(&pc->pc_finishing);
302 clear_bit(LIOD_START, &pc->pc_flags);
303 clear_bit(LIOD_STOP, &pc->pc_flags);
304 clear_bit(LIOD_FORCE, &pc->pc_flags);
308 #else /* !__KERNEL__ */
310 int ptlrpcd_check_async_rpcs(void *arg)
312 struct ptlrpcd_ctl *pc = arg;
320 if (pc->pc_recurred == 1) {
321 rc = lu_env_refill(&pc->pc_env);
323 lu_context_enter(&pc->pc_env.le_ctx);
324 rc = ptlrpcd_check(&pc->pc_env, pc);
325 lu_context_exit(&pc->pc_env.le_ctx);
327 ptlrpc_expired_set(pc->pc_set);
329 * XXX: send replay requests.
331 if (test_bit(LIOD_RECOVERY, &pc->pc_flags))
332 rc = ptlrpcd_check(&pc->pc_env, pc);
340 int ptlrpcd_idle(void *arg)
342 struct ptlrpcd_ctl *pc = arg;
344 return (list_empty(&pc->pc_set->set_new_requests) &&
345 pc->pc_set->set_remaining == 0);
350 int ptlrpcd_start(const char *name, struct ptlrpcd_ctl *pc)
356 * Do not allow start second thread for one pc.
358 if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
359 CERROR("Starting second thread (%s) for same pc %p\n",
364 init_completion(&pc->pc_starting);
365 init_completion(&pc->pc_finishing);
366 spin_lock_init(&pc->pc_lock);
367 snprintf (pc->pc_name, sizeof (pc->pc_name), name);
368 pc->pc_set = ptlrpc_prep_set();
369 if (pc->pc_set == NULL)
370 GOTO(out, rc = -ENOMEM);
372 * So far only "client" ptlrpcd uses an environment. In the future,
373 * ptlrpcd thread (or a thread-set) has to be given an argument,
374 * describing its "scope".
376 rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
378 ptlrpc_set_destroy(pc->pc_set);
383 rc = cfs_kernel_thread(ptlrpcd, pc, 0);
385 lu_context_fini(&pc->pc_env.le_ctx);
386 ptlrpc_set_destroy(pc->pc_set);
390 wait_for_completion(&pc->pc_starting);
392 pc->pc_wait_callback =
393 liblustre_register_wait_callback("ptlrpcd_check_async_rpcs",
394 &ptlrpcd_check_async_rpcs, pc);
395 pc->pc_idle_callback =
396 liblustre_register_idle_callback("ptlrpcd_check_idle_rpcs",
401 clear_bit(LIOD_START, &pc->pc_flags);
405 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
407 if (!test_bit(LIOD_START, &pc->pc_flags)) {
408 CERROR("Thread for pc %p was not started\n", pc);
412 set_bit(LIOD_STOP, &pc->pc_flags);
414 set_bit(LIOD_FORCE, &pc->pc_flags);
415 cfs_waitq_signal(&pc->pc_set->set_waitq);
417 wait_for_completion(&pc->pc_finishing);
419 liblustre_deregister_wait_callback(pc->pc_wait_callback);
420 liblustre_deregister_idle_callback(pc->pc_idle_callback);
422 lu_context_fini(&pc->pc_env.le_ctx);
423 ptlrpc_set_destroy(pc->pc_set);
426 void ptlrpcd_fini(void)
433 for (i = 0; i < PSCOPE_NR; ++i) {
434 for (j = 0; j < PT_NR; ++j) {
435 struct ptlrpcd_ctl *pc;
437 pc = &ptlrpcd_scopes[i].pscope_thread[j].pt_ctl;
439 if (test_bit(LIOD_START, &pc->pc_flags))
446 int ptlrpcd_addref(void)
453 mutex_down(&ptlrpcd_sem);
454 if (++ptlrpcd_users == 1) {
455 for (i = 0; rc == 0 && i < PSCOPE_NR; ++i) {
456 for (j = 0; rc == 0 && j < PT_NR; ++j) {
457 struct ptlrpcd_thread *pt;
458 struct ptlrpcd_ctl *pc;
460 pt = &ptlrpcd_scopes[i].pscope_thread[j];
462 if (j == PT_RECOVERY)
463 set_bit(LIOD_RECOVERY, &pc->pc_flags);
464 rc = ptlrpcd_start(pt->pt_name, pc);
472 mutex_up(&ptlrpcd_sem);
476 void ptlrpcd_decref(void)
478 mutex_down(&ptlrpcd_sem);
479 if (--ptlrpcd_users == 0)
481 mutex_up(&ptlrpcd_sem);