Whamcloud - gitweb
Fixed service request buffer race
[fs/lustre-release.git] / lustre / ptlrpc / service.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_RPC
24
25 #include <linux/obd_support.h>
26 #include <linux/obd_class.h>
27 #include <linux/lustre_net.h>
28
29 extern int request_in_callback(ptl_event_t *ev);
30
31 static int ptlrpc_check_event(struct ptlrpc_service *svc,
32                               struct ptlrpc_thread *thread, ptl_event_t *event)
33 {
34         int rc = 0;
35         ENTRY;
36
37         spin_lock(&svc->srv_lock);
38         if (thread->t_flags & SVC_STOPPING)
39                 GOTO(out, rc = 1);
40
41         LASSERT ((thread->t_flags & SVC_EVENT) == 0);
42
43         if (ptl_is_valid_handle(&svc->srv_eq_h)) {
44                 int err;
45                 err = PtlEQGet(svc->srv_eq_h, event);
46
47                 if (err == PTL_OK) {
48                         thread->t_flags |= SVC_EVENT;
49                         GOTO(out, rc = 1);
50                 }
51
52                 if (err != PTL_EQ_EMPTY) {
53                         CERROR("BUG: PtlEQGet returned %d\n", rc);
54                         LBUG();
55                 }
56
57                 GOTO(out, rc = 0);
58         }
59
60         EXIT;
61  out:
62         spin_unlock(&svc->srv_lock);
63         return rc;
64 }
65
66 struct ptlrpc_service *
67 ptlrpc_init_svc(__u32 bufsize, int nbuffs, int req_portal, int rep_portal, char *uuid,
68                 svc_handler_t handler, char *name)
69 {
70         int err;
71         int rc, i;
72         struct ptlrpc_service *service;
73         ENTRY;
74
75         OBD_ALLOC(service, sizeof(*service));
76         if (!service) {
77                 LBUG();
78                 RETURN(NULL);
79         }
80
81         service->srv_name = name;
82         spin_lock_init(&service->srv_lock);
83         INIT_LIST_HEAD(&service->srv_reqs);
84         INIT_LIST_HEAD(&service->srv_threads);
85         init_waitqueue_head(&service->srv_waitq);
86
87         service->srv_buf_size = bufsize;
88         service->srv_nbuffs = nbuffs;
89         service->srv_rep_portal = rep_portal;
90         service->srv_req_portal = req_portal;
91         service->srv_handler = handler;
92
93         err = kportal_uuid_to_peer(uuid, &service->srv_self);
94         if (err) {
95                 CERROR("cannot get peer for uuid '%s'\n", uuid);
96                 OBD_FREE(service, sizeof(*service));
97                 RETURN(NULL);
98         }
99
100         /* NB We need exactly 1 event for each buffer we queue */
101         rc = PtlEQAlloc(service->srv_self.peer_ni, service->srv_nbuffs, 
102                         request_in_callback, &(service->srv_eq_h));
103
104         if (rc != PTL_OK) {
105                 CERROR("PtlEQAlloc failed: %d\n", rc);
106                 LBUG();
107                 OBD_FREE(service, sizeof(*service));
108                 RETURN(NULL);
109         }
110
111         OBD_ALLOC(service->srv_rqbds, 
112                   service->srv_nbuffs * sizeof (struct ptlrpc_request_buffer_desc));
113         if (service->srv_rqbds == NULL) {
114                 CERROR("no memory\n");
115                 LBUG();
116                 GOTO(failed, NULL);
117         }
118         
119         for (i = 0; i < service->srv_nbuffs; i++) {
120                 struct ptlrpc_request_buffer_desc *rqbd = &service->srv_rqbds[i];
121                 
122                 rqbd->rqbd_service = service;
123                 ptl_set_inv_handle (&rqbd->rqbd_me_h);
124                 OBD_ALLOC(rqbd->rqbd_buffer, service->srv_buf_size);
125                 if (rqbd->rqbd_buffer == NULL) {
126                         CERROR("no memory\n");
127                         LBUG();
128                         GOTO(failed, NULL);
129                 }
130                 ptlrpc_link_svc_me(rqbd);
131         }
132
133         CDEBUG(D_NET, "Starting service listening on portal %d\n",
134                service->srv_req_portal);
135
136         RETURN(service);
137 failed:
138         ptlrpc_unregister_service(service);
139         return NULL;
140 }
141
142 static int handle_incoming_request(struct obd_device *obddev,
143                                    struct ptlrpc_service *svc,
144                                    ptl_event_t *event)
145 {
146         struct ptlrpc_request_buffer_desc *rqbd = event->mem_desc.user_ptr;
147         struct ptlrpc_request request;
148         int rc;
149
150         /* FIXME: If we move to an event-driven model, we should put the request
151          * on the stack of mds_handle instead. */
152         LASSERT ((event->mem_desc.options & PTL_MD_IOV) == 0);
153         LASSERT (rqbd->rqbd_service == svc);
154         LASSERT (rqbd->rqbd_buffer == event->mem_desc.start);
155         LASSERT (event->offset == 0);
156         
157         memset(&request, 0, sizeof(request));
158         request.rq_svc = svc;
159         request.rq_obd = obddev;
160         request.rq_xid = event->match_bits;
161         request.rq_reqmsg = event->mem_desc.start + event->offset;
162         request.rq_reqlen = event->mem_desc.length;
163
164         if (request.rq_reqlen < sizeof(struct lustre_msg)) {
165                 CERROR("incomplete request (%d): ptl %d from %Lx xid %Ld\n",
166                        request.rq_reqlen, svc->srv_req_portal,
167                        event->initiator.nid, request.rq_xid);
168                 spin_unlock(&svc->srv_lock);
169                 RETURN(-EINVAL);
170         }
171
172         if (NTOH__u32(request.rq_reqmsg->type) != PTL_RPC_MSG_REQUEST) {
173                 CERROR("wrong packet type received (type=%u)\n",
174                        request.rq_reqmsg->type);
175                 LBUG();
176                 spin_unlock(&svc->srv_lock);
177                 RETURN(-EINVAL);
178         }
179
180         if (request.rq_reqmsg->magic != PTLRPC_MSG_MAGIC) {
181                 CERROR("wrong lustre_msg magic %d: ptl %d from %Lx xid %Ld\n",
182                        request.rq_reqmsg->magic, svc->srv_req_portal,
183                        event->initiator.nid, request.rq_xid);
184                 spin_unlock(&svc->srv_lock);
185                 RETURN(-EINVAL);
186         }
187
188         if (request.rq_reqmsg->version != PTLRPC_MSG_VERSION) {
189                 CERROR("wrong lustre_msg version %d: ptl %d from %Lx xid %Ld\n",
190                        request.rq_reqmsg->version, svc->srv_req_portal,
191                        event->initiator.nid, request.rq_xid);
192                 spin_unlock(&svc->srv_lock);
193                 RETURN(-EINVAL);
194         }
195
196         CDEBUG(D_NET, "got req %Ld\n", request.rq_xid);
197
198         request.rq_peer.peer_nid = event->initiator.nid;
199         /* FIXME: this NI should be the incoming NI.
200          * We don't know how to find that from here. */
201         request.rq_peer.peer_ni = svc->srv_self.peer_ni;
202
203         request.rq_export = class_conn2export((struct lustre_handle *) request.rq_reqmsg);
204
205         if (request.rq_export) {
206                 request.rq_connection = request.rq_export->exp_connection;
207                 ptlrpc_connection_addref(request.rq_connection);
208         }
209
210         spin_unlock(&svc->srv_lock);
211
212         rc = svc->srv_handler(&request);
213         ptlrpc_put_connection(request.rq_connection);
214
215         ptlrpc_link_svc_me (rqbd);
216         return rc;
217 }
218
219 static int ptlrpc_main(void *arg)
220 {
221         int rc;
222         struct ptlrpc_svc_data *data = (struct ptlrpc_svc_data *)arg;
223         struct obd_device *obddev = data->dev;
224         struct ptlrpc_service *svc = data->svc;
225         struct ptlrpc_thread *thread = data->thread;
226
227         ENTRY;
228
229         lock_kernel();
230         daemonize();
231         spin_lock_irq(&current->sigmask_lock);
232         sigfillset(&current->blocked);
233         recalc_sigpending(current);
234         spin_unlock_irq(&current->sigmask_lock);
235
236         sprintf(current->comm, data->name);
237         unlock_kernel();
238
239         /* Record that the thread is running */
240         thread->t_flags = SVC_RUNNING;
241         wake_up(&thread->t_ctl_waitq);
242
243         /* XXX maintain a list of all managed devices: insert here */
244
245         /* And now, loop forever on requests */
246         while (1) {
247                 ptl_event_t event;
248
249                 wait_event(svc->srv_waitq,
250                            ptlrpc_check_event(svc, thread, &event));
251
252                 spin_lock(&svc->srv_lock);
253
254                 if (thread->t_flags & SVC_STOPPING) {
255                         thread->t_flags &= ~SVC_STOPPING;
256                         spin_unlock(&svc->srv_lock);
257                         EXIT;
258                         break;
259                 }
260
261                 if (thread->t_flags & SVC_EVENT) {
262                         LASSERT (event.sequence != 0);
263                         rc = handle_incoming_request(obddev, svc, &event);
264                         thread->t_flags &= ~SVC_EVENT;
265                         continue;
266                 }
267
268                 CERROR("unknown break in service");
269                 spin_unlock(&svc->srv_lock);
270                 EXIT;
271                 break;
272         }
273
274         thread->t_flags = SVC_STOPPED;
275         wake_up(&thread->t_ctl_waitq);
276         CDEBUG(D_NET, "service thread exiting, process %d\n", current->pid);
277         return 0;
278 }
279
280 static void ptlrpc_stop_thread(struct ptlrpc_service *svc,
281                                struct ptlrpc_thread *thread)
282 {
283         spin_lock(&svc->srv_lock);
284         thread->t_flags = SVC_STOPPING;
285         spin_unlock(&svc->srv_lock);
286
287         wake_up(&svc->srv_waitq);
288         wait_event(thread->t_ctl_waitq, (thread->t_flags & SVC_STOPPED));
289 }
290
291 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
292 {
293         spin_lock(&svc->srv_lock);
294         while (!list_empty(&svc->srv_threads)) {
295                 struct ptlrpc_thread *thread;
296                 thread = list_entry(svc->srv_threads.next, struct ptlrpc_thread,
297                                     t_link);
298                 spin_unlock(&svc->srv_lock);
299                 ptlrpc_stop_thread(svc, thread);
300                 spin_lock(&svc->srv_lock);
301                 list_del(&thread->t_link);
302                 OBD_FREE(thread, sizeof(*thread));
303         }
304         spin_unlock(&svc->srv_lock);
305 }
306
307 int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc,
308                         char *name)
309 {
310         struct ptlrpc_svc_data d;
311         struct ptlrpc_thread *thread;
312         int rc;
313         ENTRY;
314
315         OBD_ALLOC(thread, sizeof(*thread));
316         if (thread == NULL) {
317                 LBUG();
318                 RETURN(-ENOMEM);
319         }
320         init_waitqueue_head(&thread->t_ctl_waitq);
321
322         d.dev = dev;
323         d.svc = svc;
324         d.name = name;
325         d.thread = thread;
326
327         spin_lock(&svc->srv_lock);
328         list_add(&thread->t_link, &svc->srv_threads);
329         spin_unlock(&svc->srv_lock);
330
331         rc = kernel_thread(ptlrpc_main, (void *) &d,
332                            CLONE_VM | CLONE_FS | CLONE_FILES);
333         if (rc < 0) {
334                 CERROR("cannot start thread\n");
335                 OBD_FREE(thread, sizeof(*thread));
336                 RETURN(-EINVAL);
337         }
338         wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_RUNNING);
339
340         RETURN(0);
341 }
342
343 int ptlrpc_unregister_service(struct ptlrpc_service *service)
344 {
345         int rc, i;
346
347         if (service->srv_rqbds != NULL)
348         {
349                 for (i = 0; i < service->srv_nbuffs; i++) {
350                         struct ptlrpc_request_buffer_desc *rqbd = &service->srv_rqbds[i];
351                         
352                         if (rqbd->rqbd_buffer == NULL)  /* no buffer allocated */
353                                 continue;               /* => never initialised */
354
355                         /* Buffer allocated => got linked */
356                         LASSERT (ptl_is_valid_handle (&rqbd->rqbd_me_h));
357                         
358                         rc = PtlMEUnlink(rqbd->rqbd_me_h);
359                         if (rc)
360                                 CERROR("PtlMEUnlink failed: %d\n", rc);
361                         
362                         OBD_FREE(rqbd->rqbd_buffer, service->srv_buf_size);
363                 }
364
365                 OBD_FREE(service->srv_rqbds,
366                          service->srv_nbuffs * sizeof (struct ptlrpc_request_buffer_desc));
367         }
368
369         rc = PtlEQFree(service->srv_eq_h);
370         if (rc)
371                 CERROR("PtlEQFree failed: %d\n", rc);
372
373         if (!list_empty(&service->srv_reqs)) {
374                 // XXX reply with errors and clean up
375                 CERROR("Request list not empty!\n");
376                 rc = -EBUSY;
377         }
378
379         OBD_FREE(service, sizeof(*service));
380         if (rc) 
381                 LBUG();
382         return rc;
383 }