Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lustre / ptlrpc / ptlrpcd.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/ptlrpcd.c
37  */
38
39 #define DEBUG_SUBSYSTEM S_RPC
40
41 #ifdef __KERNEL__
42 # include <libcfs/libcfs.h>
43 #else /* __KERNEL__ */
44 # include <liblustre.h>
45 # include <ctype.h>
46 #endif
47
48 #include <lustre_net.h>
49 # include <lustre_lib.h>
50
51 #include <lustre_ha.h>
52 #include <obd_class.h>   /* for obd_zombie */
53 #include <obd_support.h> /* for OBD_FAIL_CHECK */
54 #include <cl_object.h> /* cl_env_{get,put}() */
55 #include <lprocfs_status.h>
56
57 enum pscope_thread {
58         PT_NORMAL,
59         PT_RECOVERY,
60         PT_NR
61 };
62
63 struct ptlrpcd_scope_ctl {
64         struct ptlrpcd_thread {
65                 const char        *pt_name;
66                 struct ptlrpcd_ctl pt_ctl;
67         } pscope_thread[PT_NR];
68 };
69
70 static struct ptlrpcd_scope_ctl ptlrpcd_scopes[PSCOPE_NR] = {
71         [PSCOPE_BRW] = {
72                 .pscope_thread = {
73                         [PT_NORMAL] = {
74                                 .pt_name = "ptlrpcd-brw"
75                         },
76                         [PT_RECOVERY] = {
77                                 .pt_name = "ptlrpcd-brw-rcv"
78                         }
79                 }
80         },
81         [PSCOPE_OTHER] = {
82                 .pscope_thread = {
83                         [PT_NORMAL] = {
84                                 .pt_name = "ptlrpcd"
85                         },
86                         [PT_RECOVERY] = {
87                                 .pt_name = "ptlrpcd-rcv"
88                         }
89                 }
90         }
91 };
92
93 struct semaphore ptlrpcd_sem;
94 static int ptlrpcd_users = 0;
95
96 void ptlrpcd_wake(struct ptlrpc_request *req)
97 {
98         struct ptlrpc_request_set *rq_set = req->rq_set;
99
100         LASSERT(rq_set != NULL);
101
102         cfs_waitq_signal(&rq_set->set_waitq);
103 }
104
105 /*
106  * Move all request from an existing request set to the ptlrpcd queue.
107  * All requests from the set must be in phase RQ_PHASE_NEW.
108  */
109 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
110 {
111         struct list_head *tmp, *pos;
112
113         list_for_each_safe(pos, tmp, &set->set_requests) {
114                 struct ptlrpc_request *req =
115                         list_entry(pos, struct ptlrpc_request, rq_set_chain);
116
117                 LASSERT(req->rq_phase == RQ_PHASE_NEW);
118                 list_del_init(&req->rq_set_chain);
119                 req->rq_set = NULL;
120                 ptlrpcd_add_req(req, PSCOPE_OTHER);
121                 set->set_remaining--;
122         }
123         LASSERT(set->set_remaining == 0);
124 }
125 EXPORT_SYMBOL(ptlrpcd_add_rqset);
126
127 /*
128  * Requests that are added to the ptlrpcd queue are sent via
129  * ptlrpcd_check->ptlrpc_check_set().
130  */
131 void ptlrpcd_add_req(struct ptlrpc_request *req, enum ptlrpcd_scope scope)
132 {
133         struct ptlrpcd_ctl *pc;
134         enum pscope_thread  pt;
135         int rc;
136
137         LASSERT(scope < PSCOPE_NR);
138         pt = req->rq_send_state == LUSTRE_IMP_FULL ? PT_NORMAL : PT_RECOVERY;
139         pc = &ptlrpcd_scopes[scope].pscope_thread[pt].pt_ctl;
140         rc = ptlrpc_set_add_new_req(pc, req);
141         /*
142          * XXX disable this for CLIO: environment is needed for interpreter.
143          */
144         if (rc && 0) {
145                 ptlrpc_interpterer_t interpreter;
146
147                 interpreter = req->rq_interpret_reply;
148
149                 /*
150                  * Thread is probably in stop now so we need to
151                  * kill this rpc as it was not added. Let's call
152                  * interpret for it to let know we're killing it
153                  * so that higher levels might free assosiated
154                  * resources.
155                  */
156                 req->rq_status = -EBADR;
157                 interpreter(NULL, req, &req->rq_async_args,
158                             req->rq_status);
159                 req->rq_set = NULL;
160                 ptlrpc_req_finished(req);
161         }
162 }
163
164 static int ptlrpcd_check(const struct lu_env *env, struct ptlrpcd_ctl *pc)
165 {
166         struct list_head *tmp, *pos;
167         struct ptlrpc_request *req;
168         int rc = 0;
169         ENTRY;
170
171         spin_lock(&pc->pc_set->set_new_req_lock);
172         list_for_each_safe(pos, tmp, &pc->pc_set->set_new_requests) {
173                 req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
174                 list_del_init(&req->rq_set_chain);
175                 ptlrpc_set_add_req(pc->pc_set, req);
176                 /*
177                  * Need to calculate its timeout.
178                  */
179                 rc = 1;
180         }
181         spin_unlock(&pc->pc_set->set_new_req_lock);
182
183         if (pc->pc_set->set_remaining) {
184                 rc = rc | ptlrpc_check_set(env, pc->pc_set);
185
186                 /*
187                  * XXX: our set never completes, so we prune the completed
188                  * reqs after each iteration. boy could this be smarter.
189                  */
190                 list_for_each_safe(pos, tmp, &pc->pc_set->set_requests) {
191                         req = list_entry(pos, struct ptlrpc_request,
192                                          rq_set_chain);
193                         if (req->rq_phase != RQ_PHASE_COMPLETE)
194                                 continue;
195
196                         list_del_init(&req->rq_set_chain);
197                         req->rq_set = NULL;
198                         ptlrpc_req_finished (req);
199                 }
200         }
201
202         if (rc == 0) {
203                 /*
204                  * If new requests have been added, make sure to wake up.
205                  */
206                 spin_lock(&pc->pc_set->set_new_req_lock);
207                 rc = !list_empty(&pc->pc_set->set_new_requests);
208                 spin_unlock(&pc->pc_set->set_new_req_lock);
209         }
210
211         RETURN(rc);
212 }
213
214 #ifdef __KERNEL__
215 /*
216  * ptlrpc's code paths like to execute in process context, so we have this
217  * thread which spins on a set which contains the io rpcs. llite specifies
218  * ptlrpcd's set when it pushes pages down into the oscs.
219  */
220 static int ptlrpcd(void *arg)
221 {
222         struct ptlrpcd_ctl *pc = arg;
223         struct lu_env env = { .le_ses = NULL };
224         int rc, exit = 0;
225         ENTRY;
226
227         rc = cfs_daemonize_ctxt(pc->pc_name);
228         if (rc == 0) {
229                 /*
230                  * XXX So far only "client" ptlrpcd uses an environment. In
231                  * the future, ptlrpcd thread (or a thread-set) has to given
232                  * an argument, describing its "scope".
233                  */
234                 rc = lu_context_init(&env.le_ctx,
235                                      LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
236         }
237
238         complete(&pc->pc_starting);
239
240         if (rc != 0)
241                 RETURN(rc);
242         env.le_ctx.lc_cookie = 0x7;
243
244         /*
245          * This mainloop strongly resembles ptlrpc_set_wait() except that our
246          * set never completes.  ptlrpcd_check() calls ptlrpc_check_set() when
247          * there are requests in the set. New requests come in on the set's
248          * new_req_list and ptlrpcd_check() moves them into the set.
249          */
250         do {
251                 struct l_wait_info lwi;
252                 int timeout;
253
254                 rc = lu_env_refill(&env);
255                 if (rc != 0) {
256                         /*
257                          * XXX This is very awkward situation, because
258                          * execution can neither continue (request
259                          * interpreters assume that env is set up), nor repeat
260                          * the loop (as this potentially results in a tight
261                          * loop of -ENOMEM's).
262                          *
263                          * Fortunately, refill only ever does something when
264                          * new modules are loaded, i.e., early during boot up.
265                          */
266                         CERROR("Failure to refill session: %d\n", rc);
267                         continue;
268                 }
269
270                 timeout = ptlrpc_set_next_timeout(pc->pc_set);
271                 lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
272                                   ptlrpc_expired_set, pc->pc_set);
273
274                 lu_context_enter(&env.le_ctx);
275                 l_wait_event(pc->pc_set->set_waitq,
276                              ptlrpcd_check(&env, pc), &lwi);
277                 lu_context_exit(&env.le_ctx);
278
279                 /*
280                  * Abort inflight rpcs for forced stop case.
281                  */
282                 if (test_bit(LIOD_STOP, &pc->pc_flags)) {
283                         if (test_bit(LIOD_FORCE, &pc->pc_flags))
284                                 ptlrpc_abort_set(pc->pc_set);
285                         exit++;
286                 }
287
288                 /*
289                  * Let's make one more loop to make sure that ptlrpcd_check()
290                  * copied all raced new rpcs into the set so we can kill them.
291                  */
292         } while (exit < 2);
293
294         /*
295          * Wait for inflight requests to drain.
296          */
297         if (!list_empty(&pc->pc_set->set_requests))
298                 ptlrpc_set_wait(pc->pc_set);
299         lu_context_fini(&env.le_ctx);
300         complete(&pc->pc_finishing);
301
302         clear_bit(LIOD_START, &pc->pc_flags);
303         clear_bit(LIOD_STOP, &pc->pc_flags);
304         clear_bit(LIOD_FORCE, &pc->pc_flags);
305         return 0;
306 }
307
308 #else /* !__KERNEL__ */
309
310 int ptlrpcd_check_async_rpcs(void *arg)
311 {
312         struct ptlrpcd_ctl *pc = arg;
313         int                 rc = 0;
314
315         /*
316          * Single threaded!!
317          */
318         pc->pc_recurred++;
319
320         if (pc->pc_recurred == 1) {
321                 rc = lu_env_refill(&pc->pc_env);
322                 if (rc == 0) {
323                         lu_context_enter(&pc->pc_env.le_ctx);
324                         rc = ptlrpcd_check(&pc->pc_env, pc);
325                         lu_context_exit(&pc->pc_env.le_ctx);
326                         if (!rc)
327                                 ptlrpc_expired_set(pc->pc_set);
328                         /*
329                          * XXX: send replay requests.
330                          */
331                         if (test_bit(LIOD_RECOVERY, &pc->pc_flags))
332                                 rc = ptlrpcd_check(&pc->pc_env, pc);
333                 }
334         }
335
336         pc->pc_recurred--;
337         return rc;
338 }
339
340 int ptlrpcd_idle(void *arg)
341 {
342         struct ptlrpcd_ctl *pc = arg;
343
344         return (list_empty(&pc->pc_set->set_new_requests) &&
345                 pc->pc_set->set_remaining == 0);
346 }
347
348 #endif
349
350 int ptlrpcd_start(const char *name, struct ptlrpcd_ctl *pc)
351 {
352         int rc;
353         ENTRY;
354
355         /*
356          * Do not allow start second thread for one pc.
357          */
358         if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
359                 CERROR("Starting second thread (%s) for same pc %p\n",
360                        name, pc);
361                 RETURN(-EALREADY);
362         }
363
364         init_completion(&pc->pc_starting);
365         init_completion(&pc->pc_finishing);
366         spin_lock_init(&pc->pc_lock);
367         snprintf (pc->pc_name, sizeof (pc->pc_name), name);
368         pc->pc_set = ptlrpc_prep_set();
369         if (pc->pc_set == NULL)
370                 GOTO(out, rc = -ENOMEM);
371         /*
372          * So far only "client" ptlrpcd uses an environment. In the future,
373          * ptlrpcd thread (or a thread-set) has to be given an argument,
374          * describing its "scope".
375          */
376         rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
377         if (rc != 0) {
378                 ptlrpc_set_destroy(pc->pc_set);
379                 GOTO(out, rc);
380         }
381
382 #ifdef __KERNEL__
383         rc = cfs_kernel_thread(ptlrpcd, pc, 0);
384         if (rc < 0)  {
385                 lu_context_fini(&pc->pc_env.le_ctx);
386                 ptlrpc_set_destroy(pc->pc_set);
387                 GOTO(out, rc);
388         }
389         rc = 0;
390         wait_for_completion(&pc->pc_starting);
391 #else
392         pc->pc_wait_callback =
393                 liblustre_register_wait_callback("ptlrpcd_check_async_rpcs",
394                                                  &ptlrpcd_check_async_rpcs, pc);
395         pc->pc_idle_callback =
396                 liblustre_register_idle_callback("ptlrpcd_check_idle_rpcs",
397                                                  &ptlrpcd_idle, pc);
398 #endif
399 out:
400         if (rc)
401                 clear_bit(LIOD_START, &pc->pc_flags);
402         RETURN(rc);
403 }
404
405 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
406 {
407         if (!test_bit(LIOD_START, &pc->pc_flags)) {
408                 CERROR("Thread for pc %p was not started\n", pc);
409                 return;
410         }
411
412         set_bit(LIOD_STOP, &pc->pc_flags);
413         if (force)
414                 set_bit(LIOD_FORCE, &pc->pc_flags);
415         cfs_waitq_signal(&pc->pc_set->set_waitq);
416 #ifdef __KERNEL__
417         wait_for_completion(&pc->pc_finishing);
418 #else
419         liblustre_deregister_wait_callback(pc->pc_wait_callback);
420         liblustre_deregister_idle_callback(pc->pc_idle_callback);
421 #endif
422         lu_context_fini(&pc->pc_env.le_ctx);
423         ptlrpc_set_destroy(pc->pc_set);
424 }
425
426 void ptlrpcd_fini(void)
427 {
428         int i;
429         int j;
430
431         ENTRY;
432
433         for (i = 0; i < PSCOPE_NR; ++i) {
434                 for (j = 0; j < PT_NR; ++j) {
435                         struct ptlrpcd_ctl *pc;
436
437                         pc = &ptlrpcd_scopes[i].pscope_thread[j].pt_ctl;
438
439                         if (test_bit(LIOD_START, &pc->pc_flags))
440                                 ptlrpcd_stop(pc, 0);
441                 }
442         }
443         EXIT;
444 }
445
446 int ptlrpcd_addref(void)
447 {
448         int rc = 0;
449         int i;
450         int j;
451         ENTRY;
452
453         mutex_down(&ptlrpcd_sem);
454         if (++ptlrpcd_users == 1) {
455                 for (i = 0; rc == 0 && i < PSCOPE_NR; ++i) {
456                         for (j = 0; rc == 0 && j < PT_NR; ++j) {
457                                 struct ptlrpcd_thread *pt;
458                                 struct ptlrpcd_ctl    *pc;
459
460                                 pt = &ptlrpcd_scopes[i].pscope_thread[j];
461                                 pc = &pt->pt_ctl;
462                                 if (j == PT_RECOVERY)
463                                         set_bit(LIOD_RECOVERY, &pc->pc_flags);
464                                 rc = ptlrpcd_start(pt->pt_name, pc);
465                         }
466                 }
467                 if (rc != 0) {
468                         --ptlrpcd_users;
469                         ptlrpcd_fini();
470                 }
471         }
472         mutex_up(&ptlrpcd_sem);
473         RETURN(rc);
474 }
475
476 void ptlrpcd_decref(void)
477 {
478         mutex_down(&ptlrpcd_sem);
479         if (--ptlrpcd_users == 0)
480                 ptlrpcd_fini();
481         mutex_up(&ptlrpcd_sem);
482 }