Whamcloud - gitweb
Land b1_8_gate onto b1_8 (20081218_1708)
[fs/lustre-release.git] / lustre / ptlrpc / ptlrpcd.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/ptlrpcd.c
37  */
38
39 #define DEBUG_SUBSYSTEM S_RPC
40
41 #ifdef __KERNEL__
42 # include <libcfs/libcfs.h>
43 #else /* __KERNEL__ */
44 # include <liblustre.h>
45 # include <ctype.h>
46 #endif
47
48 #include <libcfs/kp30.h>
49 #include <lustre_net.h>
50 # include <lustre_lib.h>
51
52 #include <lustre_ha.h>
53 #include <obd_class.h>   /* for obd_zombie */
54 #include <obd_support.h> /* for OBD_FAIL_CHECK */
55 #include <lprocfs_status.h>
56
57 static struct ptlrpcd_ctl ptlrpcd_pc;
58 static struct ptlrpcd_ctl ptlrpcd_recovery_pc;
59
60 struct semaphore ptlrpcd_sem;
61 static int ptlrpcd_users = 0;
62
63 void ptlrpcd_wake(struct ptlrpc_request *req)
64 {
65         struct ptlrpc_request_set *rq_set = req->rq_set;
66
67         LASSERT(rq_set != NULL);
68
69         cfs_waitq_signal(&rq_set->set_waitq);
70 }
71
72 /* 
73  * Requests that are added to the ptlrpcd queue are sent via
74  * ptlrpcd_check->ptlrpc_check_set().
75  */
76 void ptlrpcd_add_req(struct ptlrpc_request *req)
77 {
78         struct ptlrpcd_ctl *pc;
79         int rc;
80
81         if (req->rq_send_state == LUSTRE_IMP_FULL)
82                 pc = &ptlrpcd_pc;
83         else
84                 pc = &ptlrpcd_recovery_pc;
85         rc = ptlrpc_set_add_new_req(pc, req);
86         if (rc) {
87                 int (*interpreter)(struct ptlrpc_request *,
88                                    void *, int);
89                                 
90                 interpreter = req->rq_interpret_reply;
91
92                 /*
93                  * Thread is probably in stop now so we need to
94                  * kill this rpc as it was not added. Let's call
95                  * interpret for it to let know we're killing it
96                  * so that higher levels might free assosiated
97                  * resources.
98                 */
99                 req->rq_status = -EBADR;
100                 interpreter(req, &req->rq_async_args,
101                             req->rq_status);
102                 req->rq_set = NULL;
103                 ptlrpc_req_finished(req);
104         }
105 }
106
107 static int ptlrpcd_check(struct ptlrpcd_ctl *pc)
108 {
109         struct list_head *tmp, *pos;
110         struct ptlrpc_request *req;
111         int rc = 0;
112         ENTRY;
113
114         spin_lock(&pc->pc_set->set_new_req_lock);
115         list_for_each_safe(pos, tmp, &pc->pc_set->set_new_requests) {
116                 req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
117                 list_del_init(&req->rq_set_chain);
118                 ptlrpc_set_add_req(pc->pc_set, req);
119                 /* 
120                  * Need to calculate its timeout. 
121                  */
122                 rc = 1;
123         }
124         spin_unlock(&pc->pc_set->set_new_req_lock);
125
126         if (pc->pc_set->set_remaining) {
127                 rc = rc | ptlrpc_check_set(pc->pc_set);
128
129                 /* 
130                  * XXX: our set never completes, so we prune the completed
131                  * reqs after each iteration. boy could this be smarter. 
132                  */
133                 list_for_each_safe(pos, tmp, &pc->pc_set->set_requests) {
134                         req = list_entry(pos, struct ptlrpc_request,
135                                          rq_set_chain);
136                         if (req->rq_phase != RQ_PHASE_COMPLETE)
137                                 continue;
138
139                         list_del_init(&req->rq_set_chain);
140                         req->rq_set = NULL;
141                         ptlrpc_req_finished (req);
142                 }
143         }
144
145         if (rc == 0) {
146                 /* 
147                  * If new requests have been added, make sure to wake up. 
148                  */
149                 spin_lock(&pc->pc_set->set_new_req_lock);
150                 rc = !list_empty(&pc->pc_set->set_new_requests);
151                 spin_unlock(&pc->pc_set->set_new_req_lock);
152         }
153
154         RETURN(rc);
155 }
156
157 #ifdef __KERNEL__
158 /* 
159  * ptlrpc's code paths like to execute in process context, so we have this
160  * thread which spins on a set which contains the io rpcs. llite specifies
161  * ptlrpcd's set when it pushes pages down into the oscs.
162  */
163 static int ptlrpcd(void *arg)
164 {
165         struct ptlrpcd_ctl *pc = arg;
166         int rc, exit = 0;
167         ENTRY;
168
169         if ((rc = cfs_daemonize_ctxt(pc->pc_name))) {
170                 complete(&pc->pc_starting);
171                 goto out;
172         }
173
174         complete(&pc->pc_starting);
175
176         /* 
177          * This mainloop strongly resembles ptlrpc_set_wait() except that our
178          * set never completes.  ptlrpcd_check() calls ptlrpc_check_set() when
179          * there are requests in the set. New requests come in on the set's 
180          * new_req_list and ptlrpcd_check() moves them into the set. 
181          */
182         do {
183                 struct l_wait_info lwi;
184                 int timeout;
185
186                 timeout = ptlrpc_set_next_timeout(pc->pc_set);
187                 lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1), 
188                                   ptlrpc_expired_set, pc->pc_set);
189
190                 l_wait_event(pc->pc_set->set_waitq, ptlrpcd_check(pc), &lwi);
191
192                 /*
193                  * Abort inflight rpcs for forced stop case.
194                  */
195                 if (test_bit(LIOD_STOP, &pc->pc_flags)) {
196                         if (test_bit(LIOD_FORCE, &pc->pc_flags))
197                                 ptlrpc_abort_set(pc->pc_set);
198                         exit++;
199                 }
200
201                 /* 
202                  * Let's make one more loop to make sure that ptlrpcd_check()
203                  * copied all raced new rpcs into the set so we can kill them.
204                  */
205         } while (exit < 2);
206
207         /* 
208          * Wait for inflight requests to drain. 
209          */
210         if (!list_empty(&pc->pc_set->set_requests))
211                 ptlrpc_set_wait(pc->pc_set);
212
213         complete(&pc->pc_finishing);
214 out:
215         clear_bit(LIOD_START, &pc->pc_flags);
216         clear_bit(LIOD_STOP, &pc->pc_flags);
217         clear_bit(LIOD_FORCE, &pc->pc_flags);
218         return 0;
219 }
220
221 #else
222
223 int ptlrpcd_check_async_rpcs(void *arg)
224 {
225         struct ptlrpcd_ctl *pc = arg;
226         int                  rc = 0;
227
228         /* 
229          * Single threaded!! 
230          */
231         pc->pc_recurred++;
232
233         if (pc->pc_recurred == 1) {
234                 rc = ptlrpcd_check(pc);
235                 if (!rc)
236                         ptlrpc_expired_set(pc->pc_set);
237                 /* 
238                  * XXX: send replay requests. 
239                  */
240                 if (pc == &ptlrpcd_recovery_pc)
241                         rc = ptlrpcd_check(pc);
242         }
243
244         pc->pc_recurred--;
245         return rc;
246 }
247
248 int ptlrpcd_idle(void *arg)
249 {
250         struct ptlrpcd_ctl *pc = arg;
251
252         return (list_empty(&pc->pc_set->set_new_requests) &&
253                 pc->pc_set->set_remaining == 0);
254 }
255
256 #endif
257
258 int ptlrpcd_start(char *name, struct ptlrpcd_ctl *pc)
259 {
260         int rc = 0;
261         ENTRY;
262  
263         /* 
264          * Do not allow start second thread for one pc. 
265          */
266         if (test_bit(LIOD_START, &pc->pc_flags)) {
267                 CERROR("Starting second thread (%s) for same pc %p\n",
268                        name, pc);
269                 RETURN(-EALREADY);
270         }
271
272         set_bit(LIOD_START, &pc->pc_flags);
273         init_completion(&pc->pc_starting);
274         init_completion(&pc->pc_finishing);
275         spin_lock_init(&pc->pc_lock);
276         snprintf (pc->pc_name, sizeof (pc->pc_name), name);
277
278         pc->pc_set = ptlrpc_prep_set();
279         if (pc->pc_set == NULL)
280                 GOTO(out, rc = -ENOMEM);
281
282 #ifdef __KERNEL__
283         rc = cfs_kernel_thread(ptlrpcd, pc, 0);
284         if (rc < 0)  {
285                 ptlrpc_set_destroy(pc->pc_set);
286                 GOTO(out, rc);
287         }
288         rc = 0;
289         wait_for_completion(&pc->pc_starting);
290 #else
291         pc->pc_wait_callback =
292                 liblustre_register_wait_callback("ptlrpcd_check_async_rpcs",
293                                                  &ptlrpcd_check_async_rpcs, pc);
294         pc->pc_idle_callback =
295                 liblustre_register_idle_callback("ptlrpcd_check_idle_rpcs",
296                                                  &ptlrpcd_idle, pc);
297 #endif
298 out:
299         if (rc)
300                 clear_bit(LIOD_START, &pc->pc_flags);
301         RETURN(rc);
302 }
303
304 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
305 {
306         if (!test_bit(LIOD_START, &pc->pc_flags)) {
307                 CERROR("Thread for pc %p was not started\n", pc);
308                 return;
309         }
310
311         set_bit(LIOD_STOP, &pc->pc_flags);
312         if (force)
313                 set_bit(LIOD_FORCE, &pc->pc_flags);
314         cfs_waitq_signal(&pc->pc_set->set_waitq);
315 #ifdef __KERNEL__
316         wait_for_completion(&pc->pc_finishing);
317 #else
318         liblustre_deregister_wait_callback(pc->pc_wait_callback);
319         liblustre_deregister_idle_callback(pc->pc_idle_callback);
320 #endif
321         ptlrpc_set_destroy(pc->pc_set);
322 }
323
324 int ptlrpcd_addref(void)
325 {
326         int rc = 0;
327         ENTRY;
328
329         mutex_down(&ptlrpcd_sem);
330         if (++ptlrpcd_users != 1)
331                 GOTO(out, rc);
332
333         rc = ptlrpcd_start("ptlrpcd", &ptlrpcd_pc);
334         if (rc) {
335                 --ptlrpcd_users;
336                 GOTO(out, rc);
337         }
338
339         rc = ptlrpcd_start("ptlrpcd-recov", &ptlrpcd_recovery_pc);
340         if (rc) {
341                 ptlrpcd_stop(&ptlrpcd_pc, 0);
342                 --ptlrpcd_users;
343                 GOTO(out, rc);
344         }
345 out:
346         mutex_up(&ptlrpcd_sem);
347         RETURN(rc);
348 }
349
350 void ptlrpcd_decref(void)
351 {
352         mutex_down(&ptlrpcd_sem);
353         if (--ptlrpcd_users == 0) {
354                 ptlrpcd_stop(&ptlrpcd_pc, 0);
355                 ptlrpcd_stop(&ptlrpcd_recovery_pc, 0);
356         }
357         mutex_up(&ptlrpcd_sem);
358 }