Whamcloud - gitweb
b=23701 a build fix
[fs/lustre-release.git] / lustre / ptlrpc / ptlrpcd.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/ptlrpcd.c
37  */
38
39 #define DEBUG_SUBSYSTEM S_RPC
40
41 #ifdef __KERNEL__
42 # include <libcfs/libcfs.h>
43 #else /* __KERNEL__ */
44 # include <liblustre.h>
45 # include <ctype.h>
46 #endif
47
48 #include <libcfs/kp30.h>
49 #include <lustre_net.h>
50 # include <lustre_lib.h>
51
52 #include <lustre_ha.h>
53 #include <obd_class.h>   /* for obd_zombie */
54 #include <obd_support.h> /* for OBD_FAIL_CHECK */
55 #include <lprocfs_status.h>
56
57 static struct ptlrpcd_ctl ptlrpcd_pc;
58 static struct ptlrpcd_ctl ptlrpcd_recovery_pc;
59
60 struct semaphore ptlrpcd_sem;
61 static int ptlrpcd_users = 0;
62
63 void ptlrpcd_wake(struct ptlrpc_request *req)
64 {
65         struct ptlrpc_request_set *rq_set = req->rq_set;
66
67         LASSERT(rq_set != NULL);
68
69         cfs_waitq_signal(&rq_set->set_waitq);
70 }
71
72 /*
73  * Move all request from an existing request set to the ptlrpcd queue.
74  * All requests from the set must be in phase RQ_PHASE_NEW.
75  */
76 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
77 {
78         struct list_head *tmp, *pos;
79
80         list_for_each_safe(pos, tmp, &set->set_requests) {
81                 struct ptlrpc_request *req =
82                         list_entry(pos, struct ptlrpc_request, rq_set_chain);
83
84                 LASSERT(req->rq_phase == RQ_PHASE_NEW);
85                 list_del_init(&req->rq_set_chain);
86                 req->rq_set = NULL;
87                 ptlrpcd_add_req(req);
88                 atomic_dec(&set->set_remaining);
89         }
90         LASSERT(atomic_read(&set->set_remaining) == 0);
91 }
92 EXPORT_SYMBOL(ptlrpcd_add_rqset);
93
94 /*
95  * Requests that are added to the ptlrpcd queue are sent via
96  * ptlrpcd_check->ptlrpc_check_set().
97  */
98 int ptlrpcd_add_req(struct ptlrpc_request *req)
99 {
100         struct ptlrpcd_ctl *pc;
101         int rc;
102
103         spin_lock(&req->rq_lock);
104         if (req->rq_invalid_rqset) {
105                 cfs_duration_t timeout;
106                 struct l_wait_info lwi;
107
108                 req->rq_invalid_rqset = 0;
109                 spin_unlock(&req->rq_lock);
110
111                 timeout = cfs_time_seconds(5);
112                 lwi = LWI_TIMEOUT(timeout, back_to_sleep, NULL);
113                 l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
114         } else if (req->rq_set) {
115                 LASSERT(req->rq_phase == RQ_PHASE_NEW);
116                 LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
117
118                 /* ptlrpc_check_set will decrease the count */
119                 atomic_inc(&req->rq_set->set_remaining);
120                 spin_unlock(&req->rq_lock);
121
122                 cfs_waitq_signal(&req->rq_set->set_waitq);
123                 return 0;
124         } else {
125                 spin_unlock(&req->rq_lock);
126         }
127
128         if (req->rq_send_state == LUSTRE_IMP_FULL)
129                 pc = &ptlrpcd_pc;
130         else
131                 pc = &ptlrpcd_recovery_pc;
132         rc = ptlrpc_set_add_new_req(pc, req);
133         if (rc) {
134                 /*
135                  * Thread is probably in stop now so we need to
136                  * kill this rpc as it was not added. Let's call
137                  * interpret for it to let know we're killing it
138                  * so that higher levels might free assosiated
139                  * resources.
140                 */
141
142                 ptlrpc_req_interpret(req, -EBADR);
143                 req->rq_set = NULL;
144                 ptlrpc_req_finished(req);
145         } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING) {
146                /*
147                 * The request is for recovery, should be sent ASAP.
148                 */
149                cfs_waitq_signal(&pc->pc_set->set_waitq);
150         }
151
152         return rc;
153 }
154
155 static int ptlrpcd_check(struct ptlrpcd_ctl *pc)
156 {
157         struct list_head *tmp, *pos;
158         struct ptlrpc_request *req;
159         int rc = 0;
160         ENTRY;
161
162         spin_lock(&pc->pc_set->set_new_req_lock);
163         list_for_each_safe(pos, tmp, &pc->pc_set->set_new_requests) {
164                 req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
165                 list_del_init(&req->rq_set_chain);
166                 ptlrpc_set_add_req(pc->pc_set, req);
167                 /*
168                  * Need to calculate its timeout.
169                  */
170                 rc = 1;
171         }
172         spin_unlock(&pc->pc_set->set_new_req_lock);
173
174         if (atomic_read(&pc->pc_set->set_remaining)) {
175                 rc = rc | ptlrpc_check_set(pc->pc_set);
176
177                 /*
178                  * XXX: our set never completes, so we prune the completed
179                  * reqs after each iteration. boy could this be smarter.
180                  */
181                 list_for_each_safe(pos, tmp, &pc->pc_set->set_requests) {
182                         req = list_entry(pos, struct ptlrpc_request,
183                                          rq_set_chain);
184                         if (req->rq_phase != RQ_PHASE_COMPLETE)
185                                 continue;
186
187                         list_del_init(&req->rq_set_chain);
188                         req->rq_set = NULL;
189                         ptlrpc_req_finished (req);
190                 }
191         }
192
193         if (rc == 0) {
194                 /*
195                  * If new requests have been added, make sure to wake up.
196                  */
197                 spin_lock(&pc->pc_set->set_new_req_lock);
198                 rc = !list_empty(&pc->pc_set->set_new_requests);
199                 spin_unlock(&pc->pc_set->set_new_req_lock);
200         }
201
202         RETURN(rc);
203 }
204
205 #ifdef __KERNEL__
206 /*
207  * ptlrpc's code paths like to execute in process context, so we have this
208  * thread which spins on a set which contains the io rpcs. llite specifies
209  * ptlrpcd's set when it pushes pages down into the oscs.
210  */
211 static int ptlrpcd(void *arg)
212 {
213         struct ptlrpcd_ctl *pc = arg;
214         int rc, exit = 0;
215         ENTRY;
216
217         if ((rc = cfs_daemonize_ctxt(pc->pc_name))) {
218                 complete(&pc->pc_starting);
219                 goto out;
220         }
221
222         complete(&pc->pc_starting);
223
224         /*
225          * This mainloop strongly resembles ptlrpc_set_wait() except that our
226          * set never completes.  ptlrpcd_check() calls ptlrpc_check_set() when
227          * there are requests in the set. New requests come in on the set's
228          * new_req_list and ptlrpcd_check() moves them into the set.
229          */
230         do {
231                 struct l_wait_info lwi;
232                 int timeout;
233
234                 timeout = ptlrpc_set_next_timeout(pc->pc_set);
235                 lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
236                                   ptlrpc_expired_set, pc->pc_set);
237
238                 l_wait_event(pc->pc_set->set_waitq, ptlrpcd_check(pc), &lwi);
239
240                 /*
241                  * Abort inflight rpcs for forced stop case.
242                  */
243                 if (test_bit(LIOD_STOP, &pc->pc_flags)) {
244                         if (test_bit(LIOD_FORCE, &pc->pc_flags))
245                                 ptlrpc_abort_set(pc->pc_set);
246                         exit++;
247                 }
248
249                 /*
250                  * Let's make one more loop to make sure that ptlrpcd_check()
251                  * copied all raced new rpcs into the set so we can kill them.
252                  */
253         } while (exit < 2);
254
255         /*
256          * Wait for inflight requests to drain.
257          */
258         if (!list_empty(&pc->pc_set->set_requests))
259                 ptlrpc_set_wait(pc->pc_set);
260
261         complete(&pc->pc_finishing);
262 out:
263         clear_bit(LIOD_START, &pc->pc_flags);
264         clear_bit(LIOD_STOP, &pc->pc_flags);
265         clear_bit(LIOD_FORCE, &pc->pc_flags);
266         return 0;
267 }
268
269 #else
270
271 int ptlrpcd_check_async_rpcs(void *arg)
272 {
273         struct ptlrpcd_ctl *pc = arg;
274         int                  rc = 0;
275
276         /*
277          * Single threaded!!
278          */
279         pc->pc_recurred++;
280
281         if (pc->pc_recurred == 1) {
282                 rc = ptlrpcd_check(pc);
283                 if (!rc)
284                         ptlrpc_expired_set(pc->pc_set);
285                 /*
286                  * XXX: send replay requests.
287                  */
288                 if (pc == &ptlrpcd_recovery_pc)
289                         rc = ptlrpcd_check(pc);
290         }
291
292         pc->pc_recurred--;
293         return rc;
294 }
295
296 int ptlrpcd_idle(void *arg)
297 {
298         struct ptlrpcd_ctl *pc = arg;
299
300         return (list_empty(&pc->pc_set->set_new_requests) &&
301                 atomic_read(&pc->pc_set->set_remaining) == 0);
302 }
303
304 #endif
305
306 int ptlrpcd_start(char *name, struct ptlrpcd_ctl *pc)
307 {
308         int rc = 0;
309         ENTRY;
310
311         /*
312          * Do not allow start second thread for one pc.
313          */
314         if (test_bit(LIOD_START, &pc->pc_flags)) {
315                 CERROR("Starting second thread (%s) for same pc %p\n",
316                        name, pc);
317                 RETURN(-EALREADY);
318         }
319
320         set_bit(LIOD_START, &pc->pc_flags);
321         init_completion(&pc->pc_starting);
322         init_completion(&pc->pc_finishing);
323         spin_lock_init(&pc->pc_lock);
324         strncpy(pc->pc_name, name, sizeof(pc->pc_name) - 1);
325
326         pc->pc_set = ptlrpc_prep_set();
327         if (pc->pc_set == NULL)
328                 GOTO(out, rc = -ENOMEM);
329
330 #ifdef __KERNEL__
331         rc = cfs_kernel_thread(ptlrpcd, pc, 0);
332         if (rc < 0)  {
333                 ptlrpc_set_destroy(pc->pc_set);
334                 GOTO(out, rc);
335         }
336         rc = 0;
337         wait_for_completion(&pc->pc_starting);
338 #else
339         pc->pc_wait_callback =
340                 liblustre_register_wait_callback("ptlrpcd_check_async_rpcs",
341                                                  &ptlrpcd_check_async_rpcs, pc);
342         pc->pc_idle_callback =
343                 liblustre_register_idle_callback("ptlrpcd_check_idle_rpcs",
344                                                  &ptlrpcd_idle, pc);
345 #endif
346 out:
347         if (rc)
348                 clear_bit(LIOD_START, &pc->pc_flags);
349         RETURN(rc);
350 }
351
352 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
353 {
354         if (!test_bit(LIOD_START, &pc->pc_flags)) {
355                 CERROR("Thread for pc %p was not started\n", pc);
356                 return;
357         }
358
359         set_bit(LIOD_STOP, &pc->pc_flags);
360         if (force)
361                 set_bit(LIOD_FORCE, &pc->pc_flags);
362         cfs_waitq_signal(&pc->pc_set->set_waitq);
363 #ifdef __KERNEL__
364         wait_for_completion(&pc->pc_finishing);
365 #else
366         liblustre_deregister_wait_callback(pc->pc_wait_callback);
367         liblustre_deregister_idle_callback(pc->pc_idle_callback);
368 #endif
369         ptlrpc_set_destroy(pc->pc_set);
370 }
371
372 int ptlrpcd_addref(void)
373 {
374         int rc = 0;
375         ENTRY;
376
377         mutex_down(&ptlrpcd_sem);
378         if (++ptlrpcd_users != 1)
379                 GOTO(out, rc);
380
381         rc = ptlrpcd_start("ptlrpcd", &ptlrpcd_pc);
382         if (rc) {
383                 --ptlrpcd_users;
384                 GOTO(out, rc);
385         }
386
387         rc = ptlrpcd_start("ptlrpcd-recov", &ptlrpcd_recovery_pc);
388         if (rc) {
389                 ptlrpcd_stop(&ptlrpcd_pc, 0);
390                 --ptlrpcd_users;
391                 GOTO(out, rc);
392         }
393 out:
394         mutex_up(&ptlrpcd_sem);
395         RETURN(rc);
396 }
397
398 void ptlrpcd_decref(void)
399 {
400         mutex_down(&ptlrpcd_sem);
401         if (--ptlrpcd_users == 0) {
402                 ptlrpcd_stop(&ptlrpcd_pc, 0);
403                 ptlrpcd_stop(&ptlrpcd_recovery_pc, 0);
404         }
405         mutex_up(&ptlrpcd_sem);
406 }