Whamcloud - gitweb
- update from b1_4_mountconf
[fs/lustre-release.git] / lustre / ptlrpc / ptlrpcd.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2001-2003 Cluster File Systems, Inc.
5  *   Author Peter Braam <braam@clusterfs.com>
6  *
7  *   This file is part of the Lustre file system, http://www.lustre.org
8  *   Lustre is a trademark of Cluster File Systems, Inc.
9  *
10  *   You may have signed or agreed to another license before downloading
11  *   this software.  If so, you are bound by the terms and conditions
12  *   of that agreement, and the following does not apply to you.  See the
13  *   LICENSE file included with this distribution for more information.
14  *
15  *   If you did not agree to a different license, then this copy of Lustre
16  *   is open source software; you can redistribute it and/or modify it
17  *   under the terms of version 2 of the GNU General Public License as
18  *   published by the Free Software Foundation.
19  *
20  *   In either case, Lustre is distributed in the hope that it will be
21  *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty
22  *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
23  *   license text for more details.
24  *
25  */
26
27 #define DEBUG_SUBSYSTEM S_RPC
28
29 #ifdef __KERNEL__
30 # include <libcfs/libcfs.h>
31 #else /* __KERNEL__ */
32 # include <liblustre.h>
33 # include <ctype.h>
34 #endif
35
36 #include <libcfs/kp30.h>
37 #include <lustre_net.h>
38 # include <lustre_lib.h>
39
40 #include <lustre_ha.h>
41 #include <obd_support.h> /* for OBD_FAIL_CHECK */
42 #include <lprocfs_status.h>
43
44 #define LIOD_STOP 0
45 struct ptlrpcd_ctl {
46         unsigned long             pc_flags;
47         spinlock_t                pc_lock;
48         struct completion         pc_starting;
49         struct completion         pc_finishing;
50         struct list_head          pc_req_list;
51         cfs_waitq_t               pc_waitq;
52         struct ptlrpc_request_set *pc_set;
53         char                      pc_name[16];
54 #ifndef __KERNEL__
55         int                       pc_recurred;
56         void                     *pc_callback;
57 #endif
58 };
59
60 static struct ptlrpcd_ctl ptlrpcd_pc;
61 static struct ptlrpcd_ctl ptlrpcd_recovery_pc;
62
63 struct semaphore ptlrpcd_sem;
64 static int ptlrpcd_users = 0;
65
66 void ptlrpcd_wake(struct ptlrpc_request *req)
67 {
68         struct ptlrpcd_ctl *pc = req->rq_ptlrpcd_data;
69
70         LASSERT(pc != NULL);
71
72         cfs_waitq_signal(&pc->pc_waitq);
73 }
74
75 /* requests that are added to the ptlrpcd queue are sent via
76  * ptlrpcd_check->ptlrpc_check_set() */
77 void ptlrpcd_add_req(struct ptlrpc_request *req)
78 {
79         struct ptlrpcd_ctl *pc;
80
81         if (req->rq_send_state == LUSTRE_IMP_FULL)
82                 pc = &ptlrpcd_pc;
83         else
84                 pc = &ptlrpcd_recovery_pc;
85
86         req->rq_ptlrpcd_data = pc;
87         ptlrpc_set_add_new_req(pc->pc_set, req);
88         wake_up(&pc->pc_waitq);
89 }
90
91 static int ptlrpcd_check(struct ptlrpcd_ctl *pc)
92 {
93         struct list_head *tmp, *pos;
94         struct ptlrpc_request *req;
95         unsigned long flags;
96         int rc = 0;
97         ENTRY;
98
99         if (test_bit(LIOD_STOP, &pc->pc_flags))
100                 RETURN(1);
101
102         spin_lock_irqsave(&pc->pc_set->set_new_req_lock, flags);
103         list_for_each_safe(pos, tmp, &pc->pc_set->set_new_requests) {
104                 req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
105                 list_del_init(&req->rq_set_chain);
106                 ptlrpc_set_add_req(pc->pc_set, req);
107                 rc = 1; /* need to calculate its timeout */
108         }
109         spin_unlock_irqrestore(&pc->pc_set->set_new_req_lock, flags);
110
111         if (pc->pc_set->set_remaining) {
112                 rc = rc | ptlrpc_check_set(pc->pc_set);
113
114                 /* XXX our set never completes, so we prune the completed
115                  * reqs after each iteration. boy could this be smarter. */
116                 list_for_each_safe(pos, tmp, &pc->pc_set->set_requests) {
117                         req = list_entry(pos, struct ptlrpc_request,
118                                          rq_set_chain);
119                         if (req->rq_phase != RQ_PHASE_COMPLETE)
120                                 continue;
121
122                         list_del_init(&req->rq_set_chain);
123                         req->rq_set = NULL;
124                         ptlrpc_req_finished (req);
125                 }
126         }
127
128         if (rc == 0) {
129                 /* If new requests have been added, make sure to wake up */
130                 spin_lock_irqsave(&pc->pc_set->set_new_req_lock, flags);
131                 rc = !list_empty(&pc->pc_set->set_new_requests);
132                 spin_unlock_irqrestore(&pc->pc_set->set_new_req_lock, flags);
133         }
134
135         RETURN(rc);
136 }
137
138 #ifdef __KERNEL__
139 /* ptlrpc's code paths like to execute in process context, so we have this
140  * thread which spins on a set which contains the io rpcs.  llite specifies
141  * ptlrpcd's set when it pushes pages down into the oscs */
142 static int ptlrpcd(void *arg)
143 {
144         struct ptlrpcd_ctl *pc = arg;
145         ENTRY;
146
147         cfs_daemonize(pc->pc_name);
148
149         complete(&pc->pc_starting);
150
151         /* this mainloop strongly resembles ptlrpc_set_wait except
152          * that our set never completes.  ptlrpcd_check calls ptlrpc_check_set
153          * when there are requests in the set.  new requests come in
154          * on the set's new_req_list and ptlrpcd_check moves them into
155          * the set. */
156         while (1) {
157                 cfs_waitlink_t set_wait;
158                 struct l_wait_info lwi;
159                 cfs_duration_t timeout;
160
161                 timeout = cfs_time_seconds(ptlrpc_set_next_timeout(pc->pc_set));
162                 lwi = LWI_TIMEOUT(timeout, ptlrpc_expired_set, pc->pc_set);
163
164                 /* ala the pinger, wait on pc's waitqueue and the set's */
165                 cfs_waitlink_init(&set_wait);
166                 cfs_waitq_add(&pc->pc_set->set_waitq, &set_wait);
167                 cfs_waitq_forward(&set_wait, &pc->pc_waitq);
168                 l_wait_event(pc->pc_waitq, ptlrpcd_check(pc), &lwi);
169                 cfs_waitq_del(&pc->pc_set->set_waitq, &set_wait);
170
171                 if (test_bit(LIOD_STOP, &pc->pc_flags))
172                         break;
173         }
174         /* wait for inflight requests to drain */
175         if (!list_empty(&pc->pc_set->set_requests))
176                 ptlrpc_set_wait(pc->pc_set);
177         complete(&pc->pc_finishing);
178         return 0;
179 }
180 #else
181
182 int ptlrpcd_check_async_rpcs(void *arg)
183 {
184         struct ptlrpcd_ctl *pc = arg;
185         int                  rc = 0;
186
187         /* single threaded!! */
188         pc->pc_recurred++;
189
190         if (pc->pc_recurred == 1) {
191                 rc = ptlrpcd_check(pc);
192                 if (!rc)
193                         ptlrpc_expired_set(pc->pc_set);
194         }
195
196         pc->pc_recurred--;
197         return rc;
198 }
199 #endif
200
201 static int ptlrpcd_start(char *name, struct ptlrpcd_ctl *pc)
202 {
203         int rc;
204
205         ENTRY;
206         memset(pc, 0, sizeof(*pc));
207         init_completion(&pc->pc_starting);
208         init_completion(&pc->pc_finishing);
209         cfs_waitq_init(&pc->pc_waitq);
210         pc->pc_flags = 0;
211         spin_lock_init(&pc->pc_lock);
212         CFS_INIT_LIST_HEAD(&pc->pc_req_list);
213         snprintf (pc->pc_name, sizeof (pc->pc_name), name);
214
215         pc->pc_set = ptlrpc_prep_set();
216         if (pc->pc_set == NULL)
217                 RETURN(-ENOMEM);
218
219 #ifdef __KERNEL__
220         rc = cfs_kernel_thread(ptlrpcd, pc, 0);
221         if (rc < 0)  {
222                 ptlrpc_set_destroy(pc->pc_set);
223                 RETURN(rc);
224         }
225
226         wait_for_completion(&pc->pc_starting);
227 #else
228         pc->pc_callback =
229                 liblustre_register_wait_callback(&ptlrpcd_check_async_rpcs, pc);
230         (void)rc;
231 #endif
232         RETURN(0);
233 }
234
235 static void ptlrpcd_stop(struct ptlrpcd_ctl *pc)
236 {
237         set_bit(LIOD_STOP, &pc->pc_flags);
238         cfs_waitq_signal(&pc->pc_waitq);
239 #ifdef __KERNEL__
240         wait_for_completion(&pc->pc_finishing);
241 #else
242         liblustre_deregister_wait_callback(pc->pc_callback);
243 #endif
244         ptlrpc_set_destroy(pc->pc_set);
245 }
246
247 int ptlrpcd_addref(void)
248 {
249         int rc = 0;
250         ENTRY;
251
252         mutex_down(&ptlrpcd_sem);
253         if (++ptlrpcd_users != 1)
254                 GOTO(out, rc);
255
256         rc = ptlrpcd_start("ptlrpcd", &ptlrpcd_pc);
257         if (rc) {
258                 --ptlrpcd_users;
259                 GOTO(out, rc);
260         }
261
262         rc = ptlrpcd_start("ptlrpcd-recov", &ptlrpcd_recovery_pc);
263         if (rc) {
264                 ptlrpcd_stop(&ptlrpcd_pc);
265                 --ptlrpcd_users;
266                 GOTO(out, rc);
267         }
268 out:
269         mutex_up(&ptlrpcd_sem);
270         RETURN(rc);
271 }
272
273 void ptlrpcd_decref(void)
274 {
275         mutex_down(&ptlrpcd_sem);
276         if (--ptlrpcd_users == 0) {
277                 ptlrpcd_stop(&ptlrpcd_pc);
278                 ptlrpcd_stop(&ptlrpcd_recovery_pc);
279         }
280         mutex_up(&ptlrpcd_sem);
281 }