Whamcloud - gitweb
Land b1_2_smallfix onto b1_2 (20040616_1009)
[fs/lustre-release.git] / lustre / ptlrpc / ptlrpcd.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2001-2003 Cluster File Systems, Inc.
5  *   Author Peter Braam <braam@clusterfs.com>
6  *
7  *   This file is part of Lustre, http://www.lustre.org.
8  *
9  *   Lustre is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Lustre is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Lustre; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  */
23
24 #define DEBUG_SUBSYSTEM S_RPC
25
26 #ifdef __KERNEL__
27 # include <linux/version.h>
28 # include <linux/module.h>
29 # include <linux/mm.h>
30 # include <linux/highmem.h>
31 # include <linux/lustre_dlm.h>
32 # if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
33 #  include <linux/workqueue.h>
34 #  include <linux/smp_lock.h>
35 # else
36 #  include <linux/locks.h>
37 # endif
38 #else /* __KERNEL__ */
39 # include <liblustre.h>
40 #endif
41
42 #include <linux/kp30.h>
43 #include <linux/lustre_net.h>
44
45 #ifdef __KERNEL__
46 # include <linux/ctype.h>
47 # include <linux/init.h>
48 #else
49 # include <ctype.h>
50 #endif
51
52 #include <linux/lustre_ha.h>
53 #include <linux/obd_support.h> /* for OBD_FAIL_CHECK */
54 #include <linux/lprocfs_status.h>
55
56 #define LIOD_STOP 0
57 struct ptlrpcd_ctl {
58         unsigned long             pc_flags;
59         spinlock_t                pc_lock;
60         struct completion         pc_starting;
61         struct completion         pc_finishing;
62         struct list_head          pc_req_list;
63         wait_queue_head_t         pc_waitq;
64         struct ptlrpc_request_set *pc_set;
65 #ifndef __KERNEL__
66         int                       pc_recurred;
67         void                     *pc_callback;
68 #endif
69 };
70
71 static struct ptlrpcd_ctl ptlrpcd_pc;
72 static struct ptlrpcd_ctl ptlrpcd_recovery_pc;
73
74 static DECLARE_MUTEX(ptlrpcd_sem);
75 static int ptlrpcd_users = 0;
76
77 void ptlrpcd_wake(struct ptlrpc_request *req)
78 {
79         struct ptlrpcd_ctl *pc = req->rq_ptlrpcd_data;
80
81         LASSERT(pc != NULL);
82
83         wake_up(&pc->pc_waitq);
84 }
85
86 void ptlrpcd_add_req(struct ptlrpc_request *req)
87 {
88         struct ptlrpcd_ctl *pc;
89
90         if (req->rq_send_state == LUSTRE_IMP_FULL)
91                 pc = &ptlrpcd_pc;
92         else 
93                 pc = &ptlrpcd_recovery_pc;
94
95         ptlrpc_set_add_new_req(pc->pc_set, req);
96         req->rq_ptlrpcd_data = pc;
97                 
98         ptlrpcd_wake(req);
99 }
100
101 static int ptlrpcd_check(struct ptlrpcd_ctl *pc)
102 {
103         struct list_head *tmp, *pos;
104         struct ptlrpc_request *req;
105         unsigned long flags;
106         int rc = 0;
107         ENTRY;
108
109         if (test_bit(LIOD_STOP, &pc->pc_flags))
110                 RETURN(1);
111
112         spin_lock_irqsave(&pc->pc_set->set_new_req_lock, flags);
113         list_for_each_safe(pos, tmp, &pc->pc_set->set_new_requests) {
114                 req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
115                 list_del_init(&req->rq_set_chain);
116                 ptlrpc_set_add_req(pc->pc_set, req);
117                 rc = 1; /* need to calculate its timeout */
118         }
119         spin_unlock_irqrestore(&pc->pc_set->set_new_req_lock, flags);
120
121         if (pc->pc_set->set_remaining) {
122                 rc = rc | ptlrpc_check_set(pc->pc_set);
123
124                 /* XXX our set never completes, so we prune the completed
125                  * reqs after each iteration. boy could this be smarter. */
126                 list_for_each_safe(pos, tmp, &pc->pc_set->set_requests) {
127                         req = list_entry(pos, struct ptlrpc_request,
128                                          rq_set_chain);
129                         if (req->rq_phase != RQ_PHASE_COMPLETE)
130                                 continue;
131
132                         list_del_init(&req->rq_set_chain);
133                         req->rq_set = NULL;
134                         ptlrpc_req_finished (req);
135                 }
136         }
137
138         if (rc == 0) {
139                 /* If new requests have been added, make sure to wake up */
140                 spin_lock_irqsave(&pc->pc_set->set_new_req_lock, flags);
141                 rc = !list_empty(&pc->pc_set->set_new_requests);
142                 spin_unlock_irqrestore(&pc->pc_set->set_new_req_lock, flags);
143         }
144
145         RETURN(rc);
146 }
147
148 #ifdef __KERNEL__
149 /* ptlrpc's code paths like to execute in process context, so we have this
150  * thread which spins on a set which contains the io rpcs.  llite specifies
151  * ptlrpcd's set when it pushes pages down into the oscs */
152 static int ptlrpcd(void *arg)
153 {
154         struct ptlrpcd_ctl *pc = arg;
155         unsigned long flags;
156         ENTRY;
157
158         kportal_daemonize("ptlrpcd");
159
160         SIGNAL_MASK_LOCK(current, flags);
161         sigfillset(&current->blocked);
162         RECALC_SIGPENDING;
163         SIGNAL_MASK_UNLOCK(current, flags);
164
165         complete(&pc->pc_starting);
166
167         /* this mainloop strongly resembles ptlrpc_set_wait except
168          * that our set never completes.  ptlrpcd_check calls ptlrpc_check_set
169          * when there are requests in the set.  new requests come in
170          * on the set's new_req_list and ptlrpcd_check moves them into
171          * the set. */
172         while (1) {
173                 wait_queue_t set_wait;
174                 struct l_wait_info lwi;
175                 int timeout;
176
177                 timeout = ptlrpc_set_next_timeout(pc->pc_set) * HZ;
178                 lwi = LWI_TIMEOUT(timeout, ptlrpc_expired_set, pc->pc_set);
179
180                 /* ala the pinger, wait on pc's waitqueue and the set's */
181                 init_waitqueue_entry(&set_wait, current);
182                 add_wait_queue(&pc->pc_set->set_waitq, &set_wait);
183                 l_wait_event(pc->pc_waitq, ptlrpcd_check(pc), &lwi);
184                 remove_wait_queue(&pc->pc_set->set_waitq, &set_wait);
185
186                 if (test_bit(LIOD_STOP, &pc->pc_flags))
187                         break;
188         }
189         /* wait for inflight requests to drain */
190         if (!list_empty(&pc->pc_set->set_requests))
191                 ptlrpc_set_wait(pc->pc_set);
192         complete(&pc->pc_finishing);
193         return 0;
194 }
195 #else
196
197 int ptlrpcd_check_async_rpcs(void *arg)
198 {
199         struct ptlrpcd_ctl *pc = arg;
200         int                  rc = 0;
201
202         /* single threaded!! */
203         pc->pc_recurred++;
204
205         if (pc->pc_recurred == 1)
206                 rc = ptlrpcd_check(pc);
207
208         pc->pc_recurred--;
209         return rc;
210 }
211 #endif
212
213 static int ptlrpcd_start(struct ptlrpcd_ctl *pc)
214 {
215         int rc = 0;
216
217         memset(pc, 0, sizeof(*pc));
218         init_completion(&pc->pc_starting);
219         init_completion(&pc->pc_finishing);
220         init_waitqueue_head(&pc->pc_waitq);
221         pc->pc_flags = 0;
222         spin_lock_init(&pc->pc_lock);
223         INIT_LIST_HEAD(&pc->pc_req_list);
224
225         pc->pc_set = ptlrpc_prep_set();
226         if (pc->pc_set == NULL)
227                 GOTO(out, rc = -ENOMEM);
228
229 #ifdef __KERNEL__
230         if (kernel_thread(ptlrpcd, pc, 0) < 0)  {
231                 ptlrpc_set_destroy(pc->pc_set);
232                 GOTO(out, rc = -ECHILD);
233         }
234
235         wait_for_completion(&pc->pc_starting);
236 #else
237         pc->pc_callback =
238                 liblustre_register_wait_callback(&ptlrpcd_check_async_rpcs, pc);
239 #endif
240 out:
241         RETURN(rc);
242 }
243
244 static void ptlrpcd_stop(struct ptlrpcd_ctl *pc)
245 {
246         set_bit(LIOD_STOP, &pc->pc_flags);
247         wake_up(&pc->pc_waitq);
248 #ifdef __KERNEL__
249         wait_for_completion(&pc->pc_finishing);
250 #else
251         liblustre_deregister_wait_callback(pc->pc_callback);
252 #endif
253         ptlrpc_set_destroy(pc->pc_set);
254 }
255
256 int ptlrpcd_addref(void)
257 {
258         int rc = 0;
259         ENTRY;
260
261         down(&ptlrpcd_sem);
262         if (++ptlrpcd_users != 1)
263                 GOTO(out, rc);
264
265         rc = ptlrpcd_start(&ptlrpcd_pc);
266         if (rc) {
267                 --ptlrpcd_users;
268                 GOTO(out, rc);
269         }
270
271         rc = ptlrpcd_start(&ptlrpcd_recovery_pc);
272         if (rc) {
273                 ptlrpcd_stop(&ptlrpcd_pc);
274                 --ptlrpcd_users;
275                 GOTO(out, rc);
276         }
277 out:
278         up(&ptlrpcd_sem);
279         RETURN(rc);
280 }
281
282 void ptlrpcd_decref(void)
283 {
284         down(&ptlrpcd_sem);
285         if (--ptlrpcd_users == 0) {
286                 ptlrpcd_stop(&ptlrpcd_pc);
287                 ptlrpcd_stop(&ptlrpcd_recovery_pc);
288         }
289         up(&ptlrpcd_sem);
290 }