Whamcloud - gitweb
land 0.5.20.3 b_devel onto HEAD (b_devel will remain)
[fs/lustre-release.git] / lustre / ptlrpc / service.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_RPC
24 #ifndef __KERNEL__
25 #include <liblustre.h>
26 #include <linux/kp30.h>
27 #endif
28 #include <linux/obd_support.h>
29 #include <linux/obd_class.h>
30 #include <linux/lustre_net.h>
31
32 extern int request_in_callback(ptl_event_t *ev);
33
34 static int ptlrpc_check_event(struct ptlrpc_service *svc,
35                               struct ptlrpc_thread *thread, ptl_event_t *event)
36 {
37         struct ptlrpc_srv_ni *srv_ni;
38         int i;
39         int idx;
40         int rc;
41         ENTRY;
42
43         spin_lock(&svc->srv_lock);
44
45         if (thread->t_flags & SVC_STOPPING)
46                 GOTO(out, rc = 1);
47
48         LASSERT ((thread->t_flags & SVC_EVENT) == 0);
49         LASSERT (ptlrpc_ninterfaces > 0);
50
51         for (i = 0; i < ptlrpc_ninterfaces; i++) {
52                 idx = (svc->srv_interface_rover + i) % ptlrpc_ninterfaces;
53                 srv_ni = &svc->srv_interfaces[idx];
54
55                 LASSERT (ptl_is_valid_handle (&srv_ni->sni_eq_h));
56
57                 rc = PtlEQGet(srv_ni->sni_eq_h, event);
58                 switch (rc)
59                 {
60                 case PTL_OK:
61                         /* next time start with the next interface */
62                         svc->srv_interface_rover = (idx+1) % ptlrpc_ninterfaces;
63                         thread->t_flags |= SVC_EVENT;
64                         GOTO(out, rc = 1);
65
66                 case PTL_EQ_EMPTY:
67                         continue;
68
69                 default:
70                         CERROR("BUG: PtlEQGet returned %d\n", rc);
71                         LBUG();
72                 }
73         }
74         rc = 0;
75  out:
76         spin_unlock(&svc->srv_lock);
77         return rc;
78 }
79
80 struct ptlrpc_service *
81 ptlrpc_init_svc(__u32 nevents, __u32 nbufs,
82                 __u32 bufsize, __u32 max_req_size,
83                 int req_portal, int rep_portal,
84                 svc_handler_t handler, char *name)
85 {
86         int ssize;
87         int rc;
88         int i;
89         int j;
90         struct ptlrpc_service *service;
91         struct ptlrpc_srv_ni  *srv_ni;
92         ENTRY;
93
94         LASSERT (ptlrpc_ninterfaces > 0);
95
96         ssize = offsetof (struct ptlrpc_service,
97                           srv_interfaces[ptlrpc_ninterfaces]);
98         OBD_ALLOC(service, ssize);
99         if (service == NULL)
100                 RETURN(NULL);
101
102         service->srv_name = name;
103         spin_lock_init(&service->srv_lock);
104         INIT_LIST_HEAD(&service->srv_threads);
105         init_waitqueue_head(&service->srv_waitq);
106
107         service->srv_max_req_size = max_req_size;
108         service->srv_buf_size = bufsize;
109
110         service->srv_rep_portal = rep_portal;
111         service->srv_req_portal = req_portal;
112         service->srv_handler = handler;
113         service->srv_interface_rover = 0;
114
115         /* First initialise enough for early teardown */
116         for (i = 0; i < ptlrpc_ninterfaces; i++) {
117                 srv_ni = &service->srv_interfaces[i];
118
119                 srv_ni->sni_service = service;
120                 srv_ni->sni_ni = &ptlrpc_interfaces[i];
121                 ptl_set_inv_handle (&srv_ni->sni_eq_h);
122                 INIT_LIST_HEAD(&srv_ni->sni_rqbds);
123                 srv_ni->sni_nrqbds = 0;
124                 atomic_set(&srv_ni->sni_nrqbds_receiving, 0);
125         }
126
127         /* Now allocate the event queue and request buffers, assuming all
128          * interfaces require the same level of buffering. */
129         for (i = 0; i < ptlrpc_ninterfaces; i++) {
130                 srv_ni = &service->srv_interfaces[i];
131                 CDEBUG (D_NET, "%s: initialising interface %s\n", name,
132                         srv_ni->sni_ni->pni_name);
133
134                 rc = PtlEQAlloc(srv_ni->sni_ni->pni_ni_h, nevents,
135                                 request_in_callback, &(srv_ni->sni_eq_h));
136                 if (rc != PTL_OK) {
137                         CERROR("%s.%d: PtlEQAlloc on %s failed: %d\n",
138                                name, i, srv_ni->sni_ni->pni_name, rc);
139                         GOTO (failed, NULL);
140                 }
141
142                 for (j = 0; j < nbufs; j++) {
143                         struct ptlrpc_request_buffer_desc *rqbd;
144
145                         OBD_ALLOC(rqbd, sizeof(*rqbd));
146                         if (rqbd == NULL) {
147                                 CERROR ("%s.%d: Can't allocate request "
148                                         "descriptor %d on %s\n",
149                                         name, i, srv_ni->sni_nrqbds,
150                                         srv_ni->sni_ni->pni_name);
151                                 GOTO(failed, NULL);
152                         }
153
154                         rqbd->rqbd_srv_ni = srv_ni;
155                         ptl_set_inv_handle(&rqbd->rqbd_me_h);
156                         atomic_set(&rqbd->rqbd_refcount, 0);
157
158                         OBD_ALLOC(rqbd->rqbd_buffer, service->srv_buf_size);
159                         if (rqbd->rqbd_buffer == NULL) {
160                                 CERROR ("%s.%d: Can't allocate request "
161                                         "buffer %d on %s\n",
162                                         name, i, srv_ni->sni_nrqbds,
163                                         srv_ni->sni_ni->pni_name);
164                                 OBD_FREE(rqbd, sizeof(*rqbd));
165                                 GOTO(failed, NULL);
166                         }
167                         list_add(&rqbd->rqbd_list, &srv_ni->sni_rqbds);
168                         srv_ni->sni_nrqbds++;
169
170                         ptlrpc_link_svc_me(rqbd);
171                 }
172         }
173
174         CDEBUG(D_NET, "%s: Started on %d interfaces, listening on portal %d\n",
175                service->srv_name, ptlrpc_ninterfaces, service->srv_req_portal);
176
177         RETURN(service);
178 failed:
179         ptlrpc_unregister_service(service);
180         return NULL;
181 }
182
183 static int handle_incoming_request(struct obd_device *obddev,
184                                    struct ptlrpc_service *svc,
185                                    ptl_event_t *event,
186                                    struct ptlrpc_request *request)
187 {
188         struct ptlrpc_request_buffer_desc *rqbd = event->mem_desc.user_ptr;
189         int rc;
190
191         /* FIXME: If we move to an event-driven model, we should put the request
192          * on the stack of mds_handle instead. */
193
194         LASSERT (atomic_read (&rqbd->rqbd_refcount) > 0);
195         LASSERT ((event->mem_desc.options & PTL_MD_IOV) == 0);
196         LASSERT (rqbd->rqbd_srv_ni->sni_service == svc);
197         LASSERT (rqbd->rqbd_buffer == event->mem_desc.start);
198         LASSERT (event->offset + event->mlength <= svc->srv_buf_size);
199
200         memset(request, 0, sizeof(*request));
201         INIT_LIST_HEAD(&request->rq_list);
202         request->rq_svc = svc;
203         request->rq_obd = obddev;
204         request->rq_xid = event->match_bits;
205         request->rq_reqmsg = event->mem_desc.start + event->offset;
206         request->rq_reqlen = event->mlength;
207
208         rc = -EINVAL;
209
210         if (request->rq_reqlen < sizeof(struct lustre_msg)) {
211                 CERROR("incomplete request (%d): ptl %d from "LPX64" xid "
212                        LPU64"\n",
213                        request->rq_reqlen, svc->srv_req_portal,
214                        event->initiator.nid, request->rq_xid);
215                 goto out;
216         }
217
218         CDEBUG(D_RPCTRACE, "Handling RPC ni:pid:xid:nid:opc %d:%d:"LPU64":"
219                LPX64":%d\n", rqbd->rqbd_srv_ni - &svc->srv_interfaces[0],
220                NTOH__u32(request->rq_reqmsg->status), request->rq_xid,
221                event->initiator.nid, NTOH__u32(request->rq_reqmsg->opc));
222
223         if (NTOH__u32(request->rq_reqmsg->type) != PTL_RPC_MSG_REQUEST) {
224                 CERROR("wrong packet type received (type=%u)\n",
225                        request->rq_reqmsg->type);
226                 goto out;
227         }
228
229         if (request->rq_reqmsg->magic != PTLRPC_MSG_MAGIC) {
230                 CERROR("wrong lustre_msg magic %d: ptl %d from "LPX64" xid "
231                        LPD64"\n",
232                        request->rq_reqmsg->magic, svc->srv_req_portal,
233                        event->initiator.nid, request->rq_xid);
234                 goto out;
235         }
236
237         if (request->rq_reqmsg->version != PTLRPC_MSG_VERSION) {
238                 CERROR("wrong lustre_msg version %d: ptl %d from "LPX64" xid "
239                        LPD64"\n",
240                        request->rq_reqmsg->version, svc->srv_req_portal,
241                        event->initiator.nid, request->rq_xid);
242                 goto out;
243         }
244
245         CDEBUG(D_NET, "got req "LPD64" (md: %p + %d)\n", request->rq_xid,
246                event->mem_desc.start, event->offset);
247
248         request->rq_peer.peer_nid = event->initiator.nid;
249         request->rq_peer.peer_ni = rqbd->rqbd_srv_ni->sni_ni;
250
251         request->rq_export = class_conn2export((struct lustre_handle *)
252                                                request->rq_reqmsg);
253
254         if (request->rq_export) {
255                 request->rq_connection = request->rq_export->exp_connection;
256                 ptlrpc_connection_addref(request->rq_connection);
257         } else {
258                 /* create a (hopefully temporary) connection that will be used
259                  * to send the reply if this call doesn't create an export.
260                  * XXX revisit this when we revamp ptlrpc */
261                 request->rq_connection =
262                         ptlrpc_get_connection(&request->rq_peer, NULL);
263         }
264
265         rc = svc->srv_handler(request);
266         ptlrpc_put_connection(request->rq_connection);
267
268  out:
269         if (atomic_dec_and_test (&rqbd->rqbd_refcount)) /* last reference? */
270                 ptlrpc_link_svc_me (rqbd);
271
272         return rc;
273 }
274
275 /* Don't use daemonize, it removes fs struct from new thread  (bug 418) */
276 static void ptlrpc_daemonize(void)
277 {
278         exit_mm(current);
279
280         current->session = 1;
281         current->pgrp = 1;
282         current->tty = NULL;
283
284         exit_files(current);
285         reparent_to_init();
286 }
287
288 static int ptlrpc_main(void *arg)
289 {
290         struct ptlrpc_svc_data *data = (struct ptlrpc_svc_data *)arg;
291         struct obd_device *obddev = data->dev;
292         struct ptlrpc_service *svc = data->svc;
293         struct ptlrpc_thread *thread = data->thread;
294         struct ptlrpc_request *request;
295         ptl_event_t *event;
296         int rc = 0;
297         unsigned long flags;
298         ENTRY;
299
300         lock_kernel();
301         ptlrpc_daemonize();
302
303 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
304         sigfillset(&current->blocked);
305         recalc_sigpending();
306 #else
307         spin_lock_irqsave(&current->sigmask_lock, flags);
308         sigfillset(&current->blocked);
309         recalc_sigpending(current);
310         spin_unlock_irqrestore(&current->sigmask_lock, flags);
311 #endif
312
313 #ifdef __arch_um__
314 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
315         sprintf(current->comm, "%s|%d", data->name,current->thread.extern_pid);
316 #endif
317 #else
318         strcpy(current->comm, data->name);
319 #endif
320         unlock_kernel();
321
322         OBD_ALLOC(event, sizeof(*event));
323         if (!event)
324                 GOTO(out, rc = -ENOMEM);
325         OBD_ALLOC(request, sizeof(*request));
326         if (!request)
327                 GOTO(out_event, rc = -ENOMEM);
328
329         /* Record that the thread is running */
330         thread->t_flags = SVC_RUNNING;
331         wake_up(&thread->t_ctl_waitq);
332
333         /* XXX maintain a list of all managed devices: insert here */
334
335         /* And now, loop forever on requests */
336         while (1) {
337                 wait_event(svc->srv_waitq,
338                            ptlrpc_check_event(svc, thread, event));
339
340                 if (thread->t_flags & SVC_STOPPING) {
341                         spin_lock(&svc->srv_lock);
342                         thread->t_flags &= ~SVC_STOPPING;
343                         spin_unlock(&svc->srv_lock);
344
345                         EXIT;
346                         break;
347                 }
348
349                 if (thread->t_flags & SVC_EVENT) {
350                         spin_lock(&svc->srv_lock);
351                         thread->t_flags &= ~SVC_EVENT;
352                         spin_unlock(&svc->srv_lock);
353
354                         rc = handle_incoming_request(obddev, svc, event,
355                                                      request);
356                         continue;
357                 }
358
359                 CERROR("unknown break in service");
360                 LBUG();
361                 EXIT;
362                 break;
363         }
364
365         OBD_FREE(request, sizeof(*request));
366 out_event:
367         OBD_FREE(event, sizeof(*event));
368 out:
369         thread->t_flags = SVC_STOPPED;
370         wake_up(&thread->t_ctl_waitq);
371
372         CDEBUG(D_NET, "service thread exiting, process %d: rc = %d\n",
373                current->pid, rc);
374         return rc;
375 }
376
377 static void ptlrpc_stop_thread(struct ptlrpc_service *svc,
378                                struct ptlrpc_thread *thread)
379 {
380         spin_lock(&svc->srv_lock);
381         thread->t_flags = SVC_STOPPING;
382         spin_unlock(&svc->srv_lock);
383
384         wake_up(&svc->srv_waitq);
385         wait_event(thread->t_ctl_waitq, (thread->t_flags & SVC_STOPPED));
386 }
387
388 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
389 {
390         spin_lock(&svc->srv_lock);
391         while (!list_empty(&svc->srv_threads)) {
392                 struct ptlrpc_thread *thread;
393                 thread = list_entry(svc->srv_threads.next, struct ptlrpc_thread,
394                                     t_link);
395                 spin_unlock(&svc->srv_lock);
396                 ptlrpc_stop_thread(svc, thread);
397                 spin_lock(&svc->srv_lock);
398                 list_del(&thread->t_link);
399                 OBD_FREE(thread, sizeof(*thread));
400         }
401         spin_unlock(&svc->srv_lock);
402 }
403
404 int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc,
405                         char *name)
406 {
407         struct ptlrpc_svc_data d;
408         struct ptlrpc_thread *thread;
409         int rc;
410         ENTRY;
411
412         OBD_ALLOC(thread, sizeof(*thread));
413         if (thread == NULL) {
414                 LBUG();
415                 RETURN(-ENOMEM);
416         }
417         init_waitqueue_head(&thread->t_ctl_waitq);
418
419         d.dev = dev;
420         d.svc = svc;
421         d.name = name;
422         d.thread = thread;
423
424         spin_lock(&svc->srv_lock);
425         list_add(&thread->t_link, &svc->srv_threads);
426         spin_unlock(&svc->srv_lock);
427
428         /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
429          * just drop the VM and FILES in ptlrpc_daemonize() right away.
430          */
431         rc = kernel_thread(ptlrpc_main, (void *) &d, CLONE_VM | CLONE_FILES);
432         if (rc < 0) {
433                 CERROR("cannot start thread\n");
434                 OBD_FREE(thread, sizeof(*thread));
435                 RETURN(rc);
436         }
437         wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_RUNNING);
438
439         RETURN(0);
440 }
441
442 int ptlrpc_unregister_service(struct ptlrpc_service *service)
443 {
444         int i;
445         int rc;
446         struct ptlrpc_srv_ni *srv_ni;
447
448         LASSERT (list_empty (&service->srv_threads));
449
450         /* XXX We could reply (with failure) to all buffered requests
451          * _after_ unlinking _all_ the request buffers, but _before_
452          * freeing them.
453          */
454
455         for (i = 0; i < ptlrpc_ninterfaces; i++) {
456                 srv_ni = &service->srv_interfaces[i];
457                 CDEBUG (D_NET, "%s: tearing down interface %s\n",
458                         service->srv_name, srv_ni->sni_ni->pni_name);
459
460                 while (!list_empty (&srv_ni->sni_rqbds)) {
461                         struct ptlrpc_request_buffer_desc *rqbd =
462                                 list_entry (srv_ni->sni_rqbds.next,
463                                             struct ptlrpc_request_buffer_desc,
464                                             rqbd_list);
465
466                         list_del (&rqbd->rqbd_list);
467
468                         LASSERT (atomic_read (&rqbd->rqbd_refcount) > 0);
469                         /* refcount could be anything; it's possible for
470                          * the buffers to continued to get filled after all
471                          * the server threads exited.  But we know they
472                          * _have_ exited.
473                          */
474
475                         (void) PtlMEUnlink(rqbd->rqbd_me_h);
476                         /* The callback handler could have unlinked this ME
477                          * already (we're racing with her) but it's safe to
478                          * ensure it _has_ been unlinked.
479                          */
480
481                         OBD_FREE (rqbd->rqbd_buffer, service->srv_buf_size);
482                         OBD_FREE (rqbd, sizeof (*rqbd));
483                         srv_ni->sni_nrqbds--;
484                 }
485
486                 LASSERT (srv_ni->sni_nrqbds == 0);
487
488                 if (ptl_is_valid_handle (&srv_ni->sni_eq_h)) {
489                         rc = PtlEQFree(srv_ni->sni_eq_h);
490                         if (rc)
491                                 CERROR("%s.%d: PtlEQFree failed on %s: %d\n",
492                                        service->srv_name, i,
493                                        srv_ni->sni_ni->pni_name, rc);
494                 }
495         }
496
497         OBD_FREE(service,
498                  offsetof (struct ptlrpc_service,
499                            srv_interfaces[ptlrpc_ninterfaces]));
500         return 0;
501 }