Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lustre / ptlrpc / ptlrpcd.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2001-2003 Cluster File Systems, Inc.
5  *   Author Peter Braam <braam@clusterfs.com>
6  *
7  *   This file is part of the Lustre file system, http://www.lustre.org
8  *   Lustre is a trademark of Cluster File Systems, Inc.
9  *
10  *   You may have signed or agreed to another license before downloading
11  *   this software.  If so, you are bound by the terms and conditions
12  *   of that agreement, and the following does not apply to you.  See the
13  *   LICENSE file included with this distribution for more information.
14  *
15  *   If you did not agree to a different license, then this copy of Lustre
16  *   is open source software; you can redistribute it and/or modify it
17  *   under the terms of version 2 of the GNU General Public License as
18  *   published by the Free Software Foundation.
19  *
20  *   In either case, Lustre is distributed in the hope that it will be
21  *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty
22  *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
23  *   license text for more details.
24  *
25  */
26
27 #define DEBUG_SUBSYSTEM S_RPC
28
29 #ifdef __KERNEL__
30 # include <libcfs/libcfs.h>
31 #else /* __KERNEL__ */
32 # include <liblustre.h>
33 # include <ctype.h>
34 #endif
35
36 #include <lustre_net.h>
37 # include <lustre_lib.h>
38
39 #include <lustre_ha.h>
40 #include <obd_class.h>   /* for obd_zombie */
41 #include <obd_support.h> /* for OBD_FAIL_CHECK */
42 #include <lprocfs_status.h>
43
44 #define LIOD_STOP 0
45 struct ptlrpcd_ctl {
46         unsigned long             pc_flags;
47         spinlock_t                pc_lock;
48         struct completion         pc_starting;
49         struct completion         pc_finishing;
50         struct ptlrpc_request_set *pc_set;
51         char                      pc_name[16];
52 #ifndef __KERNEL__
53         int                       pc_recurred;
54         void                     *pc_callback;
55         void                     *pc_wait_callback;
56         void                     *pc_idle_callback;
57 #endif
58 };
59
60 static struct ptlrpcd_ctl ptlrpcd_pc;
61 static struct ptlrpcd_ctl ptlrpcd_recovery_pc;
62
63 struct semaphore ptlrpcd_sem;
64 static int ptlrpcd_users = 0;
65
66 void ptlrpcd_wake(struct ptlrpc_request *req)
67 {
68         struct ptlrpc_request_set *rq_set = req->rq_set;
69
70         LASSERT(rq_set != NULL);
71
72         cfs_waitq_signal(&rq_set->set_waitq);
73 }
74
75 /* requests that are added to the ptlrpcd queue are sent via
76  * ptlrpcd_check->ptlrpc_check_set() */
77 void ptlrpcd_add_req(struct ptlrpc_request *req)
78 {
79         struct ptlrpcd_ctl *pc;
80
81         if (req->rq_send_state == LUSTRE_IMP_FULL)
82                 pc = &ptlrpcd_pc;
83         else
84                 pc = &ptlrpcd_recovery_pc;
85
86         ptlrpc_set_add_new_req(pc->pc_set, req);
87         cfs_waitq_signal(&pc->pc_set->set_waitq);
88 }
89
90 static int ptlrpcd_check(struct ptlrpcd_ctl *pc)
91 {
92         struct list_head *tmp, *pos;
93         struct ptlrpc_request *req;
94         int rc = 0;
95         ENTRY;
96
97         if (test_bit(LIOD_STOP, &pc->pc_flags))
98                 RETURN(1);
99
100         spin_lock(&pc->pc_set->set_new_req_lock);
101         list_for_each_safe(pos, tmp, &pc->pc_set->set_new_requests) {
102                 req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
103                 list_del_init(&req->rq_set_chain);
104                 ptlrpc_set_add_req(pc->pc_set, req);
105                 rc = 1; /* need to calculate its timeout */
106         }
107         spin_unlock(&pc->pc_set->set_new_req_lock);
108
109         if (pc->pc_set->set_remaining) {
110                 rc = rc | ptlrpc_check_set(pc->pc_set);
111
112                 /* XXX our set never completes, so we prune the completed
113                  * reqs after each iteration. boy could this be smarter. */
114                 list_for_each_safe(pos, tmp, &pc->pc_set->set_requests) {
115                         req = list_entry(pos, struct ptlrpc_request,
116                                          rq_set_chain);
117                         if (req->rq_phase != RQ_PHASE_COMPLETE)
118                                 continue;
119
120                         list_del_init(&req->rq_set_chain);
121                         req->rq_set = NULL;
122                         ptlrpc_req_finished (req);
123                 }
124         }
125
126         if (rc == 0) {
127                 /* If new requests have been added, make sure to wake up */
128                 spin_lock(&pc->pc_set->set_new_req_lock);
129                 rc = !list_empty(&pc->pc_set->set_new_requests);
130                 spin_unlock(&pc->pc_set->set_new_req_lock);
131         }
132
133         RETURN(rc);
134 }
135
136 #ifdef __KERNEL__
137 /* ptlrpc's code paths like to execute in process context, so we have this
138  * thread which spins on a set which contains the io rpcs.  llite specifies
139  * ptlrpcd's set when it pushes pages down into the oscs */
140 static int ptlrpcd(void *arg)
141 {
142         struct ptlrpcd_ctl *pc = arg;
143         int rc;
144         ENTRY;
145
146         if ((rc = cfs_daemonize_ctxt(pc->pc_name))) {
147                 complete(&pc->pc_starting);
148                 return rc;
149         }
150
151         complete(&pc->pc_starting);
152
153         /* this mainloop strongly resembles ptlrpc_set_wait except
154          * that our set never completes.  ptlrpcd_check calls ptlrpc_check_set
155          * when there are requests in the set.  new requests come in
156          * on the set's new_req_list and ptlrpcd_check moves them into
157          * the set. */
158         while (1) {
159                 struct l_wait_info lwi;
160                 cfs_duration_t timeout;
161
162                 timeout = cfs_time_seconds(ptlrpc_set_next_timeout(pc->pc_set));
163                 lwi = LWI_TIMEOUT(timeout, ptlrpc_expired_set, pc->pc_set);
164
165                 l_wait_event(pc->pc_set->set_waitq, ptlrpcd_check(pc), &lwi);
166
167                 if (test_bit(LIOD_STOP, &pc->pc_flags))
168                         break;
169         }
170         /* wait for inflight requests to drain */
171         if (!list_empty(&pc->pc_set->set_requests))
172                 ptlrpc_set_wait(pc->pc_set);
173         complete(&pc->pc_finishing);
174         return 0;
175 }
176
177 #else
178
179 int ptlrpcd_check_async_rpcs(void *arg)
180 {
181         struct ptlrpcd_ctl *pc = arg;
182         int                  rc = 0;
183
184         /* single threaded!! */
185         pc->pc_recurred++;
186
187         if (pc->pc_recurred == 1) {
188                 rc = ptlrpcd_check(pc);
189                 if (!rc)
190                         ptlrpc_expired_set(pc->pc_set);
191                 /*XXX send replay requests */
192                 if (pc == &ptlrpcd_recovery_pc)
193                         rc = ptlrpcd_check(pc);
194         }
195
196         pc->pc_recurred--;
197         return rc;
198 }
199
200 int ptlrpcd_idle(void *arg)
201 {
202         struct ptlrpcd_ctl *pc = arg;
203
204         return (list_empty(&pc->pc_set->set_new_requests) &&
205                 pc->pc_set->set_remaining == 0);
206 }
207
208 #endif
209
210 static int ptlrpcd_start(char *name, struct ptlrpcd_ctl *pc)
211 {
212         int rc;
213
214         ENTRY;
215         memset(pc, 0, sizeof(*pc));
216         init_completion(&pc->pc_starting);
217         init_completion(&pc->pc_finishing);
218         pc->pc_flags = 0;
219         spin_lock_init(&pc->pc_lock);
220         snprintf (pc->pc_name, sizeof (pc->pc_name), name);
221
222         pc->pc_set = ptlrpc_prep_set();
223         if (pc->pc_set == NULL)
224                 RETURN(-ENOMEM);
225
226 #ifdef __KERNEL__
227         rc = cfs_kernel_thread(ptlrpcd, pc, 0);
228         if (rc < 0)  {
229                 ptlrpc_set_destroy(pc->pc_set);
230                 RETURN(rc);
231         }
232
233         wait_for_completion(&pc->pc_starting);
234 #else
235         pc->pc_wait_callback =
236                 liblustre_register_wait_callback("ptlrpcd_check_async_rpcs",
237                                                  &ptlrpcd_check_async_rpcs, pc);
238         pc->pc_idle_callback =
239                 liblustre_register_idle_callback("ptlrpcd_check_idle_rpcs",
240                                                  &ptlrpcd_idle, pc);
241         (void)rc;
242 #endif
243         RETURN(0);
244 }
245
246 static void ptlrpcd_stop(struct ptlrpcd_ctl *pc)
247 {
248         set_bit(LIOD_STOP, &pc->pc_flags);
249         cfs_waitq_signal(&pc->pc_set->set_waitq);
250 #ifdef __KERNEL__
251         wait_for_completion(&pc->pc_finishing);
252 #else
253         liblustre_deregister_wait_callback(pc->pc_wait_callback);
254         liblustre_deregister_idle_callback(pc->pc_idle_callback);
255 #endif
256         ptlrpc_set_destroy(pc->pc_set);
257 }
258
259 int ptlrpcd_addref(void)
260 {
261         int rc = 0;
262         ENTRY;
263
264         mutex_down(&ptlrpcd_sem);
265         if (++ptlrpcd_users != 1)
266                 GOTO(out, rc);
267
268         rc = ptlrpcd_start("ptlrpcd", &ptlrpcd_pc);
269         if (rc) {
270                 --ptlrpcd_users;
271                 GOTO(out, rc);
272         }
273
274         rc = ptlrpcd_start("ptlrpcd-recov", &ptlrpcd_recovery_pc);
275         if (rc) {
276                 ptlrpcd_stop(&ptlrpcd_pc);
277                 --ptlrpcd_users;
278                 GOTO(out, rc);
279         }
280 out:
281         mutex_up(&ptlrpcd_sem);
282         RETURN(rc);
283 }
284
285 void ptlrpcd_decref(void)
286 {
287         mutex_down(&ptlrpcd_sem);
288         if (--ptlrpcd_users == 0) {
289                 ptlrpcd_stop(&ptlrpcd_pc);
290                 ptlrpcd_stop(&ptlrpcd_recovery_pc);
291         }
292         mutex_up(&ptlrpcd_sem);
293 }