Whamcloud - gitweb
Add lu_env parameter to ->rq_interpreter call-back. NULL is passed
[fs/lustre-release.git] / lustre / ptlrpc / ptlrpcd.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/ptlrpcd.c
37  */
38
39 #define DEBUG_SUBSYSTEM S_RPC
40
41 #ifdef __KERNEL__
42 # include <libcfs/libcfs.h>
43 #else /* __KERNEL__ */
44 # include <liblustre.h>
45 # include <ctype.h>
46 #endif
47
48 #include <lustre_net.h>
49 # include <lustre_lib.h>
50
51 #include <lustre_ha.h>
52 #include <obd_class.h>   /* for obd_zombie */
53 #include <obd_support.h> /* for OBD_FAIL_CHECK */
54 #include <lprocfs_status.h>
55
56 static struct ptlrpcd_ctl ptlrpcd_pc;
57 static struct ptlrpcd_ctl ptlrpcd_recovery_pc;
58
59 struct semaphore ptlrpcd_sem;
60 static int ptlrpcd_users = 0;
61
62 void ptlrpcd_wake(struct ptlrpc_request *req)
63 {
64         struct ptlrpc_request_set *rq_set = req->rq_set;
65
66         LASSERT(rq_set != NULL);
67
68         cfs_waitq_signal(&rq_set->set_waitq);
69 }
70
71 /* 
72  * Requests that are added to the ptlrpcd queue are sent via
73  * ptlrpcd_check->ptlrpc_check_set().
74  */
75 void ptlrpcd_add_req(struct ptlrpc_request *req)
76 {
77         struct ptlrpcd_ctl *pc;
78         int rc;
79
80         if (req->rq_send_state == LUSTRE_IMP_FULL)
81                 pc = &ptlrpcd_pc;
82         else
83                 pc = &ptlrpcd_recovery_pc;
84
85         rc = ptlrpc_set_add_new_req(pc, req);
86         if (rc) {
87                 ptlrpc_interpterer_t interpreter;
88                                    
89                 interpreter = req->rq_interpret_reply;
90
91                 /*
92                  * Thread is probably in stop now so we need to
93                  * kill this rpc as it was not added. Let's call
94                  * interpret for it to let know we're killing it
95                  * so that higher levels might free assosiated
96                  * resources.
97                  */
98                 req->rq_status = -EBADR;
99                 interpreter(NULL, req, &req->rq_async_args,
100                             req->rq_status);
101                 req->rq_set = NULL;
102                 ptlrpc_req_finished(req);
103         }
104 }
105
106 static int ptlrpcd_check(const struct lu_env *env, struct ptlrpcd_ctl *pc)
107 {
108         struct list_head *tmp, *pos;
109         struct ptlrpc_request *req;
110         int rc = 0;
111         ENTRY;
112
113         if (test_bit(LIOD_STOP, &pc->pc_flags))
114                 RETURN(1);
115
116         spin_lock(&pc->pc_set->set_new_req_lock);
117         list_for_each_safe(pos, tmp, &pc->pc_set->set_new_requests) {
118                 req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
119                 list_del_init(&req->rq_set_chain);
120                 ptlrpc_set_add_req(pc->pc_set, req);
121                 /* 
122                  * Need to calculate its timeout. 
123                  */
124                 rc = 1;
125         }
126         spin_unlock(&pc->pc_set->set_new_req_lock);
127
128         if (pc->pc_set->set_remaining) {
129                 rc = rc | ptlrpc_check_set(env, pc->pc_set);
130
131                 /* 
132                  * XXX: our set never completes, so we prune the completed
133                  * reqs after each iteration. boy could this be smarter. 
134                  */
135                 list_for_each_safe(pos, tmp, &pc->pc_set->set_requests) {
136                         req = list_entry(pos, struct ptlrpc_request,
137                                          rq_set_chain);
138                         if (req->rq_phase != RQ_PHASE_COMPLETE)
139                                 continue;
140
141                         list_del_init(&req->rq_set_chain);
142                         req->rq_set = NULL;
143                         ptlrpc_req_finished (req);
144                 }
145         }
146
147         if (rc == 0) {
148                 /* 
149                  * If new requests have been added, make sure to wake up. 
150                  */
151                 spin_lock(&pc->pc_set->set_new_req_lock);
152                 rc = !list_empty(&pc->pc_set->set_new_requests);
153                 spin_unlock(&pc->pc_set->set_new_req_lock);
154         }
155
156         RETURN(rc);
157 }
158
159 #ifdef __KERNEL__
160 /* 
161  * ptlrpc's code paths like to execute in process context, so we have this
162  * thread which spins on a set which contains the io rpcs. llite specifies
163  * ptlrpcd's set when it pushes pages down into the oscs.
164  */
165 static int ptlrpcd(void *arg)
166 {
167         struct ptlrpcd_ctl *pc = arg;
168         int rc;
169         ENTRY;
170
171         if ((rc = cfs_daemonize_ctxt(pc->pc_name))) {
172                 complete(&pc->pc_starting);
173                 goto out;
174         }
175
176         complete(&pc->pc_starting);
177
178         /* 
179          * This mainloop strongly resembles ptlrpc_set_wait() except that our
180          * set never completes.  ptlrpcd_check() calls ptlrpc_check_set() when
181          * there are requests in the set. New requests come in on the set's 
182          * new_req_list and ptlrpcd_check() moves them into the set. 
183          */
184         while (1) {
185                 struct l_wait_info lwi;
186                 cfs_duration_t timeout;
187
188                 timeout = cfs_time_seconds(ptlrpc_set_next_timeout(pc->pc_set));
189                 lwi = LWI_TIMEOUT(timeout, ptlrpc_expired_set, pc->pc_set);
190
191                 lu_context_enter(&pc->pc_env.le_ctx);
192                 l_wait_event(pc->pc_set->set_waitq,
193                              ptlrpcd_check(&pc->pc_env, pc), &lwi);
194                 lu_context_exit(&pc->pc_env.le_ctx);
195
196                 /*
197                  * Abort inflight rpcs for forced stop case.
198                  */
199                 if (test_bit(LIOD_STOP_FORCE, &pc->pc_flags))
200                         ptlrpc_abort_set(pc->pc_set);
201
202                 if (test_bit(LIOD_STOP, &pc->pc_flags))
203                         break;
204         }
205
206         /* 
207          * Wait for inflight requests to drain. 
208          */
209         if (!list_empty(&pc->pc_set->set_requests))
210                 ptlrpc_set_wait(pc->pc_set);
211
212         complete(&pc->pc_finishing);
213 out:
214         clear_bit(LIOD_START, &pc->pc_flags);
215         clear_bit(LIOD_STOP, &pc->pc_flags);
216         return 0;
217 }
218
219 #else /* !__KERNEL__ */
220
221 int ptlrpcd_check_async_rpcs(void *arg)
222 {
223         struct ptlrpcd_ctl *pc = arg;
224         int                  rc = 0;
225
226         /* 
227          * Single threaded!! 
228          */
229         pc->pc_recurred++;
230
231         if (pc->pc_recurred == 1) {
232                 lu_context_enter(&pc->pc_env.le_ctx);
233                 rc = ptlrpcd_check(&pc->pc_env, pc);
234                 lu_context_exit(&pc->pc_env.le_ctx);
235                 if (!rc)
236                         ptlrpc_expired_set(pc->pc_set);
237                 /* 
238                  * XXX: send replay requests. 
239                  */
240                 if (pc == &ptlrpcd_recovery_pc)
241                         rc = ptlrpcd_check(&pc->pc_env, pc);
242         }
243
244         pc->pc_recurred--;
245         return rc;
246 }
247
248 int ptlrpcd_idle(void *arg)
249 {
250         struct ptlrpcd_ctl *pc = arg;
251
252         return (list_empty(&pc->pc_set->set_new_requests) &&
253                 pc->pc_set->set_remaining == 0);
254 }
255
256 #endif
257
258 int ptlrpcd_start(char *name, struct ptlrpcd_ctl *pc)
259 {
260         int rc;
261         ENTRY;
262  
263         /* 
264          * Do not allow start second thread for one pc. 
265          */
266         if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
267                 CERROR("Starting second thread (%s) for same pc %p\n",
268                        name, pc);
269                 RETURN(-EALREADY);
270         }
271
272         init_completion(&pc->pc_starting);
273         init_completion(&pc->pc_finishing);
274         spin_lock_init(&pc->pc_lock);
275         snprintf (pc->pc_name, sizeof (pc->pc_name), name);
276         pc->pc_set = ptlrpc_prep_set();
277         if (pc->pc_set == NULL)
278                 GOTO(out, rc = -ENOMEM);
279         /*
280          * So far only "client" ptlrpcd uses an environment. In the future,
281          * ptlrpcd thread (or a thread-set) has to be given an argument,
282          * describing its "scope".
283          */
284         rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
285         if (rc != 0) {
286                 ptlrpc_set_destroy(pc->pc_set);
287                 GOTO(out, rc);
288         }
289
290 #ifdef __KERNEL__
291         rc = cfs_kernel_thread(ptlrpcd, pc, 0);
292         if (rc < 0)  {
293                 lu_context_fini(&pc->pc_env.le_ctx);
294                 ptlrpc_set_destroy(pc->pc_set);
295                 GOTO(out, rc);
296         }
297         rc = 0;
298         wait_for_completion(&pc->pc_starting);
299 #else
300         pc->pc_wait_callback =
301                 liblustre_register_wait_callback("ptlrpcd_check_async_rpcs",
302                                                  &ptlrpcd_check_async_rpcs, pc);
303         pc->pc_idle_callback =
304                 liblustre_register_idle_callback("ptlrpcd_check_idle_rpcs",
305                                                  &ptlrpcd_idle, pc);
306 #endif
307 out:
308         if (rc)
309                 clear_bit(LIOD_START, &pc->pc_flags);
310         RETURN(rc);
311 }
312
313 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
314 {
315         if (!test_bit(LIOD_START, &pc->pc_flags)) {
316                 CERROR("Thread for pc %p was not started\n", pc);
317                 return;
318         }
319
320         set_bit(LIOD_STOP, &pc->pc_flags);
321         if (force)
322                 set_bit(LIOD_STOP_FORCE, &pc->pc_flags);
323         cfs_waitq_signal(&pc->pc_set->set_waitq);
324 #ifdef __KERNEL__
325         wait_for_completion(&pc->pc_finishing);
326 #else
327         liblustre_deregister_wait_callback(pc->pc_wait_callback);
328         liblustre_deregister_idle_callback(pc->pc_idle_callback);
329 #endif
330         lu_context_fini(&pc->pc_env.le_ctx);
331         ptlrpc_set_destroy(pc->pc_set);
332 }
333
334 int ptlrpcd_addref(void)
335 {
336         int rc = 0;
337         ENTRY;
338
339         mutex_down(&ptlrpcd_sem);
340         if (++ptlrpcd_users != 1)
341                 GOTO(out, rc);
342
343         rc = ptlrpcd_start("ptlrpcd", &ptlrpcd_pc);
344         if (rc) {
345                 --ptlrpcd_users;
346                 GOTO(out, rc);
347         }
348
349         rc = ptlrpcd_start("ptlrpcd-recov", &ptlrpcd_recovery_pc);
350         if (rc) {
351                 ptlrpcd_stop(&ptlrpcd_pc, 0);
352                 --ptlrpcd_users;
353                 GOTO(out, rc);
354         }
355 out:
356         mutex_up(&ptlrpcd_sem);
357         RETURN(rc);
358 }
359
360 void ptlrpcd_decref(void)
361 {
362         mutex_down(&ptlrpcd_sem);
363         if (--ptlrpcd_users == 0) {
364                 ptlrpcd_stop(&ptlrpcd_pc, 0);
365                 ptlrpcd_stop(&ptlrpcd_recovery_pc, 0);
366         }
367         mutex_up(&ptlrpcd_sem);
368 }