Whamcloud - gitweb
Branch b1_4_mountconf
[fs/lustre-release.git] / lustre / ptlrpc / ptlrpcd.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2001-2003 Cluster File Systems, Inc.
5  *   Author Peter Braam <braam@clusterfs.com>
6  *
7  *   This file is part of the Lustre file system, http://www.lustre.org
8  *   Lustre is a trademark of Cluster File Systems, Inc.
9  *
10  *   You may have signed or agreed to another license before downloading
11  *   this software.  If so, you are bound by the terms and conditions
12  *   of that agreement, and the following does not apply to you.  See the
13  *   LICENSE file included with this distribution for more information.
14  *
15  *   If you did not agree to a different license, then this copy of Lustre
16  *   is open source software; you can redistribute it and/or modify it
17  *   under the terms of version 2 of the GNU General Public License as
18  *   published by the Free Software Foundation.
19  *
20  *   In either case, Lustre is distributed in the hope that it will be
21  *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty
22  *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
23  *   license text for more details.
24  *
25  */
26
27 #define DEBUG_SUBSYSTEM S_RPC
28
29 #ifdef __KERNEL__
30 # include <linux/version.h>
31 # include <linux/module.h>
32 # include <linux/mm.h>
33 # include <linux/highmem.h>
34 # include <linux/lustre_dlm.h>
35 # if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
36 #  include <linux/workqueue.h>
37 #  include <linux/smp_lock.h>
38 # else
39 #  include <linux/locks.h>
40 # endif
41 # include <linux/ctype.h>
42 # include <linux/init.h>
43 #else /* __KERNEL__ */
44 # include <liblustre.h>
45 # include <ctype.h>
46 #endif
47
48 #include <libcfs/kp30.h>
49 #include <linux/lustre_net.h>
50
51 #include <linux/lustre_ha.h>
52 #include <linux/obd_support.h> /* for OBD_FAIL_CHECK */
53 #include <linux/lprocfs_status.h>
54
55 #define LIOD_STOP 0
56 struct ptlrpcd_ctl {
57         unsigned long             pc_flags;
58         spinlock_t                pc_lock;
59         struct completion         pc_starting;
60         struct completion         pc_finishing;
61         struct list_head          pc_req_list;
62         wait_queue_head_t         pc_waitq;
63         struct ptlrpc_request_set *pc_set;
64         char                      pc_name[16];
65 #ifndef __KERNEL__
66         int                       pc_recurred;
67         void                     *pc_callback;
68 #endif
69 };
70
71 static struct ptlrpcd_ctl ptlrpcd_pc;
72 static struct ptlrpcd_ctl ptlrpcd_recovery_pc;
73
74 static DECLARE_MUTEX(ptlrpcd_sem);
75 static int ptlrpcd_users = 0;
76
77 void ptlrpcd_wake(struct ptlrpc_request *req)
78 {
79         struct ptlrpcd_ctl *pc = req->rq_ptlrpcd_data;
80
81         LASSERT(pc != NULL);
82
83         wake_up(&pc->pc_waitq);
84 }
85
86 void ptlrpcd_add_req(struct ptlrpc_request *req)
87 {
88         struct ptlrpcd_ctl *pc;
89
90         if (req->rq_send_state == LUSTRE_IMP_FULL)
91                 pc = &ptlrpcd_pc;
92         else
93                 pc = &ptlrpcd_recovery_pc;
94
95         ptlrpc_set_add_new_req(pc->pc_set, req);
96         req->rq_ptlrpcd_data = pc;
97
98         ptlrpcd_wake(req);
99 }
100
101 static int ptlrpcd_check(struct ptlrpcd_ctl *pc)
102 {
103         struct list_head *tmp, *pos;
104         struct ptlrpc_request *req;
105         unsigned long flags;
106         int rc = 0;
107         ENTRY;
108
109         if (test_bit(LIOD_STOP, &pc->pc_flags))
110                 RETURN(1);
111
112         spin_lock_irqsave(&pc->pc_set->set_new_req_lock, flags);
113         list_for_each_safe(pos, tmp, &pc->pc_set->set_new_requests) {
114                 req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
115                 list_del_init(&req->rq_set_chain);
116                 ptlrpc_set_add_req(pc->pc_set, req);
117                 rc = 1; /* need to calculate its timeout */
118         }
119         spin_unlock_irqrestore(&pc->pc_set->set_new_req_lock, flags);
120
121         if (pc->pc_set->set_remaining) {
122                 rc = rc | ptlrpc_check_set(pc->pc_set);
123
124                 /* XXX our set never completes, so we prune the completed
125                  * reqs after each iteration. boy could this be smarter. */
126                 list_for_each_safe(pos, tmp, &pc->pc_set->set_requests) {
127                         req = list_entry(pos, struct ptlrpc_request,
128                                          rq_set_chain);
129                         if (req->rq_phase != RQ_PHASE_COMPLETE)
130                                 continue;
131
132                         list_del_init(&req->rq_set_chain);
133                         req->rq_set = NULL;
134                         ptlrpc_req_finished (req);
135                 }
136         }
137
138         if (rc == 0) {
139                 /* If new requests have been added, make sure to wake up */
140                 spin_lock_irqsave(&pc->pc_set->set_new_req_lock, flags);
141                 rc = !list_empty(&pc->pc_set->set_new_requests);
142                 spin_unlock_irqrestore(&pc->pc_set->set_new_req_lock, flags);
143         }
144
145         RETURN(rc);
146 }
147
148 #ifdef __KERNEL__
149 /* ptlrpc's code paths like to execute in process context, so we have this
150  * thread which spins on a set which contains the io rpcs.  llite specifies
151  * ptlrpcd's set when it pushes pages down into the oscs */
152 static int ptlrpcd(void *arg)
153 {
154         struct ptlrpcd_ctl *pc = arg;
155         unsigned long flags;
156         ENTRY;
157
158         libcfs_daemonize(pc->pc_name);
159
160         SIGNAL_MASK_LOCK(current, flags);
161         sigfillset(&current->blocked);
162         RECALC_SIGPENDING;
163         SIGNAL_MASK_UNLOCK(current, flags);
164
165         complete(&pc->pc_starting);
166
167         /* this mainloop strongly resembles ptlrpc_set_wait except
168          * that our set never completes.  ptlrpcd_check calls ptlrpc_check_set
169          * when there are requests in the set.  new requests come in
170          * on the set's new_req_list and ptlrpcd_check moves them into
171          * the set. */
172         while (1) {
173                 wait_queue_t set_wait;
174                 struct l_wait_info lwi;
175                 int timeout;
176
177                 timeout = ptlrpc_set_next_timeout(pc->pc_set) * HZ;
178                 lwi = LWI_TIMEOUT(timeout, ptlrpc_expired_set, pc->pc_set);
179
180                 /* ala the pinger, wait on pc's waitqueue and the set's */
181                 init_waitqueue_entry(&set_wait, current);
182                 add_wait_queue(&pc->pc_set->set_waitq, &set_wait);
183                 l_wait_event(pc->pc_waitq, ptlrpcd_check(pc), &lwi);
184                 remove_wait_queue(&pc->pc_set->set_waitq, &set_wait);
185
186                 if (test_bit(LIOD_STOP, &pc->pc_flags))
187                         break;
188         }
189         /* wait for inflight requests to drain */
190         if (!list_empty(&pc->pc_set->set_requests))
191                 ptlrpc_set_wait(pc->pc_set);
192         complete(&pc->pc_finishing);
193         return 0;
194 }
195 #else
196
197 int ptlrpcd_check_async_rpcs(void *arg)
198 {
199         struct ptlrpcd_ctl *pc = arg;
200         int                  rc = 0;
201
202         /* single threaded!! */
203         pc->pc_recurred++;
204
205         if (pc->pc_recurred == 1)
206                 rc = ptlrpcd_check(pc);
207
208         pc->pc_recurred--;
209         return rc;
210 }
211 #endif
212
213 static int ptlrpcd_start(char *name, struct ptlrpcd_ctl *pc)
214 {
215         int rc = 0;
216
217         memset(pc, 0, sizeof(*pc));
218         init_completion(&pc->pc_starting);
219         init_completion(&pc->pc_finishing);
220         init_waitqueue_head(&pc->pc_waitq);
221         pc->pc_flags = 0;
222         spin_lock_init(&pc->pc_lock);
223         INIT_LIST_HEAD(&pc->pc_req_list);
224         snprintf (pc->pc_name, sizeof (pc->pc_name), name);
225
226         pc->pc_set = ptlrpc_prep_set();
227         if (pc->pc_set == NULL)
228                 GOTO(out, rc = -ENOMEM);
229
230 #ifdef __KERNEL__
231         if (kernel_thread(ptlrpcd, pc, 0) < 0)  {
232                 ptlrpc_set_destroy(pc->pc_set);
233                 GOTO(out, rc = -ECHILD);
234         }
235
236         wait_for_completion(&pc->pc_starting);
237 #else
238         pc->pc_callback =
239                 liblustre_register_wait_callback(&ptlrpcd_check_async_rpcs, pc);
240 #endif
241 out:
242         RETURN(rc);
243 }
244
245 static void ptlrpcd_stop(struct ptlrpcd_ctl *pc)
246 {
247         set_bit(LIOD_STOP, &pc->pc_flags);
248         wake_up(&pc->pc_waitq);
249 #ifdef __KERNEL__
250         wait_for_completion(&pc->pc_finishing);
251 #else
252         liblustre_deregister_wait_callback(pc->pc_callback);
253 #endif
254         ptlrpc_set_destroy(pc->pc_set);
255 }
256
257 int ptlrpcd_addref(void)
258 {
259         int rc = 0;
260         ENTRY;
261
262         down(&ptlrpcd_sem);
263         if (++ptlrpcd_users != 1)
264                 GOTO(out, rc);
265
266         rc = ptlrpcd_start("ptlrpcd", &ptlrpcd_pc);
267         if (rc) {
268                 --ptlrpcd_users;
269                 GOTO(out, rc);
270         }
271
272         rc = ptlrpcd_start("ptlrpcd-recov", &ptlrpcd_recovery_pc);
273         if (rc) {
274                 ptlrpcd_stop(&ptlrpcd_pc);
275                 --ptlrpcd_users;
276                 GOTO(out, rc);
277         }
278 out:
279         up(&ptlrpcd_sem);
280         RETURN(rc);
281 }
282
283 void ptlrpcd_decref(void)
284 {
285         down(&ptlrpcd_sem);
286         if (--ptlrpcd_users == 0) {
287                 ptlrpcd_stop(&ptlrpcd_pc);
288                 ptlrpcd_stop(&ptlrpcd_recovery_pc);
289         }
290         up(&ptlrpcd_sem);
291 }