Whamcloud - gitweb
b=21938 wait and signal correct waitq
[fs/lustre-release.git] / lustre / ptlrpc / ptlrpcd.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/ptlrpcd.c
37  */
38
39 #define DEBUG_SUBSYSTEM S_RPC
40
41 #ifdef __KERNEL__
42 # include <libcfs/libcfs.h>
43 #else /* __KERNEL__ */
44 # include <liblustre.h>
45 # include <ctype.h>
46 #endif
47
48 #include <lustre_net.h>
49 # include <lustre_lib.h>
50
51 #include <lustre_ha.h>
52 #include <obd_class.h>   /* for obd_zombie */
53 #include <obd_support.h> /* for OBD_FAIL_CHECK */
54 #include <cl_object.h> /* cl_env_{get,put}() */
55 #include <lprocfs_status.h>
56
57 enum pscope_thread {
58         PT_NORMAL,
59         PT_RECOVERY,
60         PT_NR
61 };
62
63 struct ptlrpcd_scope_ctl {
64         struct ptlrpcd_thread {
65                 const char        *pt_name;
66                 struct ptlrpcd_ctl pt_ctl;
67         } pscope_thread[PT_NR];
68 };
69
70 static struct ptlrpcd_scope_ctl ptlrpcd_scopes[PSCOPE_NR] = {
71         [PSCOPE_BRW] = {
72                 .pscope_thread = {
73                         [PT_NORMAL] = {
74                                 .pt_name = "ptlrpcd-brw"
75                         },
76                         [PT_RECOVERY] = {
77                                 .pt_name = "ptlrpcd-brw-rcv"
78                         }
79                 }
80         },
81         [PSCOPE_OTHER] = {
82                 .pscope_thread = {
83                         [PT_NORMAL] = {
84                                 .pt_name = "ptlrpcd"
85                         },
86                         [PT_RECOVERY] = {
87                                 .pt_name = "ptlrpcd-rcv"
88                         }
89                 }
90         }
91 };
92
93 cfs_semaphore_t ptlrpcd_sem;
94 static int ptlrpcd_users = 0;
95
96 void ptlrpcd_wake(struct ptlrpc_request *req)
97 {
98         struct ptlrpc_request_set *rq_set = req->rq_set;
99
100         LASSERT(rq_set != NULL);
101
102         cfs_waitq_signal(&rq_set->set_waitq);
103 }
104
105 /*
106  * Move all request from an existing request set to the ptlrpcd queue.
107  * All requests from the set must be in phase RQ_PHASE_NEW.
108  */
109 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
110 {
111         cfs_list_t *tmp, *pos;
112
113         cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
114                 struct ptlrpc_request *req =
115                         cfs_list_entry(pos, struct ptlrpc_request,
116                                        rq_set_chain);
117
118                 LASSERT(req->rq_phase == RQ_PHASE_NEW);
119                 cfs_list_del_init(&req->rq_set_chain);
120                 req->rq_set = NULL;
121                 ptlrpcd_add_req(req, PSCOPE_OTHER);
122                 cfs_atomic_dec(&set->set_remaining);
123         }
124         LASSERT(cfs_atomic_read(&set->set_remaining) == 0);
125 }
126 EXPORT_SYMBOL(ptlrpcd_add_rqset);
127
128 /*
129  * Requests that are added to the ptlrpcd queue are sent via
130  * ptlrpcd_check->ptlrpc_check_set().
131  */
132 int ptlrpcd_add_req(struct ptlrpc_request *req, enum ptlrpcd_scope scope)
133 {
134         struct ptlrpcd_ctl *pc;
135         enum pscope_thread  pt;
136         int rc;
137
138         LASSERT(scope < PSCOPE_NR);
139         
140         cfs_spin_lock(&req->rq_lock);
141         if (req->rq_invalid_rqset) {
142                 cfs_duration_t timeout;
143                 struct l_wait_info lwi;
144
145                 req->rq_invalid_rqset = 0;
146                 cfs_spin_unlock(&req->rq_lock);
147
148                 timeout = cfs_time_seconds(5);
149                 lwi = LWI_TIMEOUT(timeout, back_to_sleep, NULL);
150                 l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
151         } else if (req->rq_set) {
152                 LASSERT(req->rq_phase == RQ_PHASE_NEW);
153                 LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
154
155                 /* ptlrpc_check_set will decrease the count */
156                 cfs_atomic_inc(&req->rq_set->set_remaining);
157                 cfs_spin_unlock(&req->rq_lock);
158
159                 cfs_waitq_signal(&req->rq_set->set_waitq);
160         } else {
161                 cfs_spin_unlock(&req->rq_lock);
162         }
163
164         pt = req->rq_send_state == LUSTRE_IMP_FULL ? PT_NORMAL : PT_RECOVERY;
165         pc = &ptlrpcd_scopes[scope].pscope_thread[pt].pt_ctl;
166         rc = ptlrpc_set_add_new_req(pc, req);
167         /*
168          * XXX disable this for CLIO: environment is needed for interpreter.
169          *     add debug temporary to check rc.
170          */
171         LASSERTF(rc == 0, "ptlrpcd_add_req failed (rc = %d)\n", rc);
172         if (rc && 0) {
173                 /*
174                  * Thread is probably in stop now so we need to
175                  * kill this rpc as it was not added. Let's call
176                  * interpret for it to let know we're killing it
177                  * so that higher levels might free associated
178                  * resources.
179                  */
180                 ptlrpc_req_interpret(NULL, req, -EBADR);
181                 req->rq_set = NULL;
182                 ptlrpc_req_finished(req);
183         } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING) {
184                 /*
185                  * The request is for recovery, should be sent ASAP.
186                  */
187                 cfs_waitq_signal(&pc->pc_set->set_waitq);
188         }
189
190         return rc;
191 }
192
193 static int ptlrpcd_check(const struct lu_env *env, struct ptlrpcd_ctl *pc)
194 {
195         cfs_list_t *tmp, *pos;
196         struct ptlrpc_request *req;
197         int rc = 0;
198         ENTRY;
199
200         cfs_spin_lock(&pc->pc_set->set_new_req_lock);
201         cfs_list_for_each_safe(pos, tmp, &pc->pc_set->set_new_requests) {
202                 req = cfs_list_entry(pos, struct ptlrpc_request, rq_set_chain);
203                 cfs_list_del_init(&req->rq_set_chain);
204                 ptlrpc_set_add_req(pc->pc_set, req);
205                 /*
206                  * Need to calculate its timeout.
207                  */
208                 rc = 1;
209         }
210         cfs_spin_unlock(&pc->pc_set->set_new_req_lock);
211
212         if (cfs_atomic_read(&pc->pc_set->set_remaining)) {
213                 rc = rc | ptlrpc_check_set(env, pc->pc_set);
214
215                 /*
216                  * XXX: our set never completes, so we prune the completed
217                  * reqs after each iteration. boy could this be smarter.
218                  */
219                 cfs_list_for_each_safe(pos, tmp, &pc->pc_set->set_requests) {
220                         req = cfs_list_entry(pos, struct ptlrpc_request,
221                                          rq_set_chain);
222                         if (req->rq_phase != RQ_PHASE_COMPLETE)
223                                 continue;
224
225                         cfs_list_del_init(&req->rq_set_chain);
226                         req->rq_set = NULL;
227                         ptlrpc_req_finished (req);
228                 }
229         }
230
231         if (rc == 0) {
232                 /*
233                  * If new requests have been added, make sure to wake up.
234                  */
235                 cfs_spin_lock(&pc->pc_set->set_new_req_lock);
236                 rc = !cfs_list_empty(&pc->pc_set->set_new_requests);
237                 cfs_spin_unlock(&pc->pc_set->set_new_req_lock);
238         }
239
240         RETURN(rc);
241 }
242
243 #ifdef __KERNEL__
244 /*
245  * ptlrpc's code paths like to execute in process context, so we have this
246  * thread which spins on a set which contains the io rpcs. llite specifies
247  * ptlrpcd's set when it pushes pages down into the oscs.
248  */
249 static int ptlrpcd(void *arg)
250 {
251         struct ptlrpcd_ctl *pc = arg;
252         struct lu_env env = { .le_ses = NULL };
253         int rc, exit = 0;
254         ENTRY;
255
256         rc = cfs_daemonize_ctxt(pc->pc_name);
257         if (rc == 0) {
258                 /*
259                  * XXX So far only "client" ptlrpcd uses an environment. In
260                  * the future, ptlrpcd thread (or a thread-set) has to given
261                  * an argument, describing its "scope".
262                  */
263                 rc = lu_context_init(&env.le_ctx,
264                                      LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
265         }
266
267         cfs_complete(&pc->pc_starting);
268
269         if (rc != 0)
270                 RETURN(rc);
271         env.le_ctx.lc_cookie = 0x7;
272
273         /*
274          * This mainloop strongly resembles ptlrpc_set_wait() except that our
275          * set never completes.  ptlrpcd_check() calls ptlrpc_check_set() when
276          * there are requests in the set. New requests come in on the set's
277          * new_req_list and ptlrpcd_check() moves them into the set.
278          */
279         do {
280                 struct l_wait_info lwi;
281                 int timeout;
282
283                 rc = lu_env_refill(&env);
284                 if (rc != 0) {
285                         /*
286                          * XXX This is very awkward situation, because
287                          * execution can neither continue (request
288                          * interpreters assume that env is set up), nor repeat
289                          * the loop (as this potentially results in a tight
290                          * loop of -ENOMEM's).
291                          *
292                          * Fortunately, refill only ever does something when
293                          * new modules are loaded, i.e., early during boot up.
294                          */
295                         CERROR("Failure to refill session: %d\n", rc);
296                         continue;
297                 }
298
299                 timeout = ptlrpc_set_next_timeout(pc->pc_set);
300                 lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
301                                   ptlrpc_expired_set, pc->pc_set);
302
303                 lu_context_enter(&env.le_ctx);
304                 l_wait_event(pc->pc_set->set_waitq,
305                              ptlrpcd_check(&env, pc), &lwi);
306                 lu_context_exit(&env.le_ctx);
307
308                 /*
309                  * Abort inflight rpcs for forced stop case.
310                  */
311                 if (cfs_test_bit(LIOD_STOP, &pc->pc_flags)) {
312                         if (cfs_test_bit(LIOD_FORCE, &pc->pc_flags))
313                                 ptlrpc_abort_set(pc->pc_set);
314                         exit++;
315                 }
316
317                 /*
318                  * Let's make one more loop to make sure that ptlrpcd_check()
319                  * copied all raced new rpcs into the set so we can kill them.
320                  */
321         } while (exit < 2);
322
323         /*
324          * Wait for inflight requests to drain.
325          */
326         if (!cfs_list_empty(&pc->pc_set->set_requests))
327                 ptlrpc_set_wait(pc->pc_set);
328         lu_context_fini(&env.le_ctx);
329         cfs_complete(&pc->pc_finishing);
330
331         cfs_clear_bit(LIOD_START, &pc->pc_flags);
332         cfs_clear_bit(LIOD_STOP, &pc->pc_flags);
333         cfs_clear_bit(LIOD_FORCE, &pc->pc_flags);
334         return 0;
335 }
336
337 #else /* !__KERNEL__ */
338
339 int ptlrpcd_check_async_rpcs(void *arg)
340 {
341         struct ptlrpcd_ctl *pc = arg;
342         int                 rc = 0;
343
344         /*
345          * Single threaded!!
346          */
347         pc->pc_recurred++;
348
349         if (pc->pc_recurred == 1) {
350                 rc = lu_env_refill(&pc->pc_env);
351                 if (rc == 0) {
352                         lu_context_enter(&pc->pc_env.le_ctx);
353                         rc = ptlrpcd_check(&pc->pc_env, pc);
354                         lu_context_exit(&pc->pc_env.le_ctx);
355                         if (!rc)
356                                 ptlrpc_expired_set(pc->pc_set);
357                         /*
358                          * XXX: send replay requests.
359                          */
360                         if (cfs_test_bit(LIOD_RECOVERY, &pc->pc_flags))
361                                 rc = ptlrpcd_check(&pc->pc_env, pc);
362                 }
363         }
364
365         pc->pc_recurred--;
366         return rc;
367 }
368
369 int ptlrpcd_idle(void *arg)
370 {
371         struct ptlrpcd_ctl *pc = arg;
372
373         return (cfs_list_empty(&pc->pc_set->set_new_requests) &&
374                 cfs_atomic_read(&pc->pc_set->set_remaining) == 0);
375 }
376
377 #endif
378
379 int ptlrpcd_start(const char *name, struct ptlrpcd_ctl *pc)
380 {
381         int rc;
382         ENTRY;
383
384         /*
385          * Do not allow start second thread for one pc.
386          */
387         if (cfs_test_and_set_bit(LIOD_START, &pc->pc_flags)) {
388                 CERROR("Starting second thread (%s) for same pc %p\n",
389                        name, pc);
390                 RETURN(-EALREADY);
391         }
392
393         cfs_init_completion(&pc->pc_starting);
394         cfs_init_completion(&pc->pc_finishing);
395         cfs_spin_lock_init(&pc->pc_lock);
396         strncpy(pc->pc_name, name, sizeof(pc->pc_name) - 1);
397         pc->pc_set = ptlrpc_prep_set();
398         if (pc->pc_set == NULL)
399                 GOTO(out, rc = -ENOMEM);
400         /*
401          * So far only "client" ptlrpcd uses an environment. In the future,
402          * ptlrpcd thread (or a thread-set) has to be given an argument,
403          * describing its "scope".
404          */
405         rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
406         if (rc != 0) {
407                 ptlrpc_set_destroy(pc->pc_set);
408                 GOTO(out, rc);
409         }
410
411 #ifdef __KERNEL__
412         rc = cfs_kernel_thread(ptlrpcd, pc, 0);
413         if (rc < 0)  {
414                 lu_context_fini(&pc->pc_env.le_ctx);
415                 ptlrpc_set_destroy(pc->pc_set);
416                 GOTO(out, rc);
417         }
418         rc = 0;
419         cfs_wait_for_completion(&pc->pc_starting);
420 #else
421         pc->pc_wait_callback =
422                 liblustre_register_wait_callback("ptlrpcd_check_async_rpcs",
423                                                  &ptlrpcd_check_async_rpcs, pc);
424         pc->pc_idle_callback =
425                 liblustre_register_idle_callback("ptlrpcd_check_idle_rpcs",
426                                                  &ptlrpcd_idle, pc);
427 #endif
428 out:
429         if (rc)
430                 cfs_clear_bit(LIOD_START, &pc->pc_flags);
431         RETURN(rc);
432 }
433
434 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
435 {
436         if (!cfs_test_bit(LIOD_START, &pc->pc_flags)) {
437                 CERROR("Thread for pc %p was not started\n", pc);
438                 return;
439         }
440
441         cfs_set_bit(LIOD_STOP, &pc->pc_flags);
442         if (force)
443                 cfs_set_bit(LIOD_FORCE, &pc->pc_flags);
444         cfs_waitq_signal(&pc->pc_set->set_waitq);
445 #ifdef __KERNEL__
446         cfs_wait_for_completion(&pc->pc_finishing);
447 #else
448         liblustre_deregister_wait_callback(pc->pc_wait_callback);
449         liblustre_deregister_idle_callback(pc->pc_idle_callback);
450 #endif
451         lu_context_fini(&pc->pc_env.le_ctx);
452         ptlrpc_set_destroy(pc->pc_set);
453 }
454
455 void ptlrpcd_fini(void)
456 {
457         int i;
458         int j;
459
460         ENTRY;
461
462         for (i = 0; i < PSCOPE_NR; ++i) {
463                 for (j = 0; j < PT_NR; ++j) {
464                         struct ptlrpcd_ctl *pc;
465
466                         pc = &ptlrpcd_scopes[i].pscope_thread[j].pt_ctl;
467
468                         if (cfs_test_bit(LIOD_START, &pc->pc_flags))
469                                 ptlrpcd_stop(pc, 0);
470                 }
471         }
472         EXIT;
473 }
474
475 int ptlrpcd_addref(void)
476 {
477         int rc = 0;
478         int i;
479         int j;
480         ENTRY;
481
482         cfs_mutex_down(&ptlrpcd_sem);
483         if (++ptlrpcd_users == 1) {
484                 for (i = 0; rc == 0 && i < PSCOPE_NR; ++i) {
485                         for (j = 0; rc == 0 && j < PT_NR; ++j) {
486                                 struct ptlrpcd_thread *pt;
487                                 struct ptlrpcd_ctl    *pc;
488
489                                 pt = &ptlrpcd_scopes[i].pscope_thread[j];
490                                 pc = &pt->pt_ctl;
491                                 if (j == PT_RECOVERY)
492                                         cfs_set_bit(LIOD_RECOVERY, &pc->pc_flags);
493                                 rc = ptlrpcd_start(pt->pt_name, pc);
494                         }
495                 }
496                 if (rc != 0) {
497                         --ptlrpcd_users;
498                         ptlrpcd_fini();
499                 }
500         }
501         cfs_mutex_up(&ptlrpcd_sem);
502         RETURN(rc);
503 }
504
505 void ptlrpcd_decref(void)
506 {
507         cfs_mutex_down(&ptlrpcd_sem);
508         if (--ptlrpcd_users == 0)
509                 ptlrpcd_fini();
510         cfs_mutex_up(&ptlrpcd_sem);
511 }