Whamcloud - gitweb
merge b_devel into HEAD (20030626 merge tag) for 0.7.1
[fs/lustre-release.git] / lustre / ptlrpc / service.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_RPC
24 #ifndef __KERNEL__
25 #include <liblustre.h>
26 #include <linux/kp30.h>
27 #endif
28 #include <linux/obd_support.h>
29 #include <linux/obd_class.h>
30 #include <linux/lustre_net.h>
31 #include <portals/types.h>
32 #include "ptlrpc_internal.h"
33
34 extern int request_in_callback(ptl_event_t *ev);
35
36 static int ptlrpc_check_event(struct ptlrpc_service *svc,
37                               struct ptlrpc_thread *thread, ptl_event_t *event)
38 {
39         struct ptlrpc_srv_ni *srv_ni;
40         int i;
41         int idx;
42         int rc;
43         ENTRY;
44
45         spin_lock(&svc->srv_lock);
46
47         if (thread->t_flags & SVC_STOPPING)
48                 GOTO(out, rc = 1);
49
50         LASSERT ((thread->t_flags & SVC_EVENT) == 0);
51         LASSERT (ptlrpc_ninterfaces > 0);
52
53         for (i = 0; i < ptlrpc_ninterfaces; i++) {
54                 idx = (svc->srv_interface_rover + i) % ptlrpc_ninterfaces;
55                 srv_ni = &svc->srv_interfaces[idx];
56
57                 LASSERT (!PtlHandleEqual (srv_ni->sni_eq_h, PTL_HANDLE_NONE));
58
59                 rc = PtlEQGet(srv_ni->sni_eq_h, event);
60                 switch (rc) {
61                 case PTL_OK:
62                         /* next time start with the next interface */
63                         svc->srv_interface_rover = (idx+1) % ptlrpc_ninterfaces;
64                         thread->t_flags |= SVC_EVENT;
65                         GOTO(out, rc = 1);
66
67                 case PTL_EQ_EMPTY:
68                         continue;
69
70                 default:
71                         CERROR("BUG: PtlEQGet returned %d\n", rc);
72                         LBUG();
73                 }
74         }
75         rc = 0;
76         EXIT;
77  out:
78         spin_unlock(&svc->srv_lock);
79         return rc;
80 }
81
82 struct ptlrpc_service * ptlrpc_init_svc(__u32 nevents, __u32 nbufs,
83                                         __u32 bufsize, __u32 max_req_size,
84                                         int req_portal, int rep_portal,
85                                         svc_handler_t handler, char *name,
86                                         struct obd_device *obddev)
87 {
88         int i, j, ssize, rc;
89         struct ptlrpc_service *service;
90         struct ptlrpc_srv_ni  *srv_ni;
91         ENTRY;
92
93         LASSERT (ptlrpc_ninterfaces > 0);
94
95         ssize = offsetof (struct ptlrpc_service,
96                           srv_interfaces[ptlrpc_ninterfaces]);
97         OBD_ALLOC(service, ssize);
98         if (service == NULL)
99                 RETURN(NULL);
100
101         service->srv_name = name;
102         spin_lock_init(&service->srv_lock);
103         INIT_LIST_HEAD(&service->srv_threads);
104         init_waitqueue_head(&service->srv_waitq);
105
106         service->srv_max_req_size = max_req_size;
107         service->srv_buf_size = bufsize;
108
109         service->srv_rep_portal = rep_portal;
110         service->srv_req_portal = req_portal;
111         service->srv_handler = handler;
112         service->srv_interface_rover = 0;
113
114         /* First initialise enough for early teardown */
115         for (i = 0; i < ptlrpc_ninterfaces; i++) {
116                 srv_ni = &service->srv_interfaces[i];
117
118                 srv_ni->sni_service = service;
119                 srv_ni->sni_ni = &ptlrpc_interfaces[i];
120                 srv_ni->sni_eq_h = PTL_HANDLE_NONE;
121                 INIT_LIST_HEAD(&srv_ni->sni_rqbds);
122                 srv_ni->sni_nrqbds = 0;
123                 atomic_set(&srv_ni->sni_nrqbds_receiving, 0);
124         }
125
126         /* Now allocate the event queue and request buffers, assuming all
127          * interfaces require the same level of buffering. */
128         for (i = 0; i < ptlrpc_ninterfaces; i++) {
129                 srv_ni = &service->srv_interfaces[i];
130                 CDEBUG (D_NET, "%s: initialising interface %s\n", name,
131                         srv_ni->sni_ni->pni_name);
132
133                 rc = PtlEQAlloc(srv_ni->sni_ni->pni_ni_h, nevents,
134                                 request_in_callback, &(srv_ni->sni_eq_h));
135                 if (rc != PTL_OK) {
136                         CERROR("%s.%d: PtlEQAlloc on %s failed: %d\n",
137                                name, i, srv_ni->sni_ni->pni_name, rc);
138                         GOTO (failed, NULL);
139                 }
140
141                 for (j = 0; j < nbufs; j++) {
142                         struct ptlrpc_request_buffer_desc *rqbd;
143
144                         OBD_ALLOC(rqbd, sizeof(*rqbd));
145                         if (rqbd == NULL) {
146                                 CERROR ("%s.%d: Can't allocate request "
147                                         "descriptor %d on %s\n",
148                                         name, i, srv_ni->sni_nrqbds,
149                                         srv_ni->sni_ni->pni_name);
150                                 GOTO(failed, NULL);
151                         }
152
153                         rqbd->rqbd_srv_ni = srv_ni;
154                         rqbd->rqbd_me_h = PTL_HANDLE_NONE;
155                         atomic_set(&rqbd->rqbd_refcount, 0);
156
157                         OBD_ALLOC(rqbd->rqbd_buffer, service->srv_buf_size);
158                         if (rqbd->rqbd_buffer == NULL) {
159                                 CERROR ("%s.%d: Can't allocate request "
160                                         "buffer %d on %s\n",
161                                         name, i, srv_ni->sni_nrqbds,
162                                         srv_ni->sni_ni->pni_name);
163                                 OBD_FREE(rqbd, sizeof(*rqbd));
164                                 GOTO(failed, NULL);
165                         }
166                         list_add(&rqbd->rqbd_list, &srv_ni->sni_rqbds);
167                         srv_ni->sni_nrqbds++;
168
169                         ptlrpc_link_svc_me(rqbd);
170                 }
171         }
172
173         ptlrpc_lprocfs_register_service(obddev, service);
174
175         CDEBUG(D_NET, "%s: Started on %d interfaces, listening on portal %d\n",
176                service->srv_name, ptlrpc_ninterfaces, service->srv_req_portal);
177
178         RETURN(service);
179 failed:
180         ptlrpc_unregister_service(service);
181         return NULL;
182 }
183
184 static int handle_incoming_request(struct obd_device *obddev,
185                                    struct ptlrpc_service *svc,
186                                    ptl_event_t *event,
187                                    struct ptlrpc_request *request)
188 {
189         struct ptlrpc_request_buffer_desc *rqbd = event->mem_desc.user_ptr;
190         int rc;
191
192         /* FIXME: If we move to an event-driven model, we should put the request
193          * on the stack of mds_handle instead. */
194
195         LASSERT (atomic_read (&rqbd->rqbd_refcount) > 0);
196         LASSERT ((event->mem_desc.options & (PTL_MD_IOV | PTL_MD_KIOV)) == 0);
197         LASSERT (rqbd->rqbd_srv_ni->sni_service == svc);
198         LASSERT (rqbd->rqbd_buffer == event->mem_desc.start);
199         LASSERT (event->offset + event->mlength <= svc->srv_buf_size);
200
201         memset(request, 0, sizeof(*request));
202         spin_lock_init (&request->rq_lock);
203         INIT_LIST_HEAD(&request->rq_list);
204         request->rq_svc = svc;
205         request->rq_obd = obddev;
206         request->rq_xid = event->match_bits;
207         request->rq_reqmsg = event->mem_desc.start + event->offset;
208         request->rq_reqlen = event->mlength;
209
210 #if SWAB_PARANOIA
211         /* Clear request swab mask; this is a new request */
212         request->rq_req_swab_mask = 0;
213 #endif
214         rc = lustre_unpack_msg (request->rq_reqmsg, request->rq_reqlen);
215         if (rc != 0) {
216                 CERROR ("error unpacking request: ptl %d from "LPX64
217                         " xid "LPU64"\n", svc->srv_req_portal,
218                        event->initiator.nid, request->rq_xid);
219                 goto out;
220         }
221         rc = -EINVAL;
222         if (request->rq_reqmsg->type != PTL_RPC_MSG_REQUEST) {
223                 CERROR("wrong packet type received (type=%u)\n",
224                        request->rq_reqmsg->type);
225                 goto out;
226         }
227
228         CDEBUG(D_NET, "got req "LPD64" (md: %p + %d)\n", request->rq_xid,
229                event->mem_desc.start, event->offset);
230
231         request->rq_peer.peer_nid = event->initiator.nid;
232         request->rq_peer.peer_ni = rqbd->rqbd_srv_ni->sni_ni;
233
234         request->rq_export = class_conn2export(&request->rq_reqmsg->handle);
235
236         if (request->rq_export) {
237                 request->rq_connection = request->rq_export->exp_connection;
238                 ptlrpc_connection_addref(request->rq_connection);
239                 request->rq_export->exp_last_request_time =
240                         LTIME_S(CURRENT_TIME);
241         } else {
242                 /* create a (hopefully temporary) connection that will be used
243                  * to send the reply if this call doesn't create an export.
244                  * XXX revisit this when we revamp ptlrpc */
245                 request->rq_connection =
246                         ptlrpc_get_connection(&request->rq_peer, NULL);
247         }
248
249         CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid:pid:xid:ni:nid:opc %s:%s:%d:"
250                LPU64":%s:"LPX64":%d\n",
251                current->comm,
252                (request->rq_export ? 
253                 (char *)request->rq_export->exp_client_uuid.uuid : "0"), 
254                request->rq_reqmsg->status, request->rq_xid,
255                rqbd->rqbd_srv_ni->sni_ni->pni_name, event->initiator.nid,
256                request->rq_reqmsg->opc);
257
258         rc = svc->srv_handler(request);
259         CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid:pid:xid:ni:nid:opc %s:%s:%d:"
260                LPU64":%s:"LPX64":%d\n",
261                current->comm,
262                (request->rq_export ? 
263                 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
264                request->rq_reqmsg->status, request->rq_xid,
265                rqbd->rqbd_srv_ni->sni_ni->pni_name, event->initiator.nid,
266                request->rq_reqmsg->opc);
267
268         ptlrpc_put_connection(request->rq_connection);
269         if (request->rq_export != NULL)
270                 class_export_put(request->rq_export);
271
272  out:
273         if (atomic_dec_and_test (&rqbd->rqbd_refcount)) /* last reference? */
274                 ptlrpc_link_svc_me (rqbd);
275
276         return rc;
277 }
278
279 /* Don't use daemonize, it removes fs struct from new thread (bug 418) */
280 void ptlrpc_daemonize(void)
281 {
282         exit_mm(current);
283
284         current->session = 1;
285         current->pgrp = 1;
286         current->tty = NULL;
287
288         exit_files(current);
289         reparent_to_init();
290 }
291
292 static int ptlrpc_main(void *arg)
293 {
294         struct ptlrpc_svc_data *data = (struct ptlrpc_svc_data *)arg;
295         struct obd_device *obddev = data->dev;
296         struct ptlrpc_service *svc = data->svc;
297         struct ptlrpc_thread *thread = data->thread;
298         struct ptlrpc_request *request;
299         ptl_event_t *event;
300         int rc = 0;
301         unsigned long flags;
302         cycles_t workdone_time = -1;
303         cycles_t svc_workcycles = -1;
304         ENTRY;
305
306         lock_kernel();
307         ptlrpc_daemonize();
308
309         SIGNAL_MASK_LOCK(current, flags);
310         sigfillset(&current->blocked);
311         RECALC_SIGPENDING;
312         SIGNAL_MASK_UNLOCK(current, flags);
313
314 #if defined(__arch_um__) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20))
315         sprintf(current->comm, "%s|%d", data->name,current->thread.extern_pid);
316 #elif defined(__arch_um__) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
317         sprintf(current->comm, "%s|%d", data->name,
318                 current->thread.mode.tt.extern_pid);
319 #else
320         strcpy(current->comm, data->name);
321 #endif
322         unlock_kernel();
323
324         OBD_ALLOC(event, sizeof(*event));
325         if (!event)
326                 GOTO(out, rc = -ENOMEM);
327         OBD_ALLOC(request, sizeof(*request));
328         if (!request)
329                 GOTO(out_event, rc = -ENOMEM);
330
331         /* Record that the thread is running */
332         thread->t_flags = SVC_RUNNING;
333         wake_up(&thread->t_ctl_waitq);
334
335         /* XXX maintain a list of all managed devices: insert here */
336
337         /* And now, loop forever on requests */
338         while (1) {
339                 struct l_wait_info lwi = { 0 };
340                 l_wait_event(svc->srv_waitq,
341                              ptlrpc_check_event(svc, thread, event), &lwi);
342
343                 if (thread->t_flags & SVC_STOPPING) {
344                         spin_lock(&svc->srv_lock);
345                         thread->t_flags &= ~SVC_STOPPING;
346                         spin_unlock(&svc->srv_lock);
347
348                         EXIT;
349                         break;
350                 }
351
352                 if (thread->t_flags & SVC_EVENT) {
353                         cycles_t  workstart_time;
354
355                         spin_lock(&svc->srv_lock);
356                         thread->t_flags &= ~SVC_EVENT;
357                         /* Update Service Statistics */
358                         workstart_time = get_cycles();
359                         if (workdone_time != -1 && svc->svc_stats != NULL) {
360                                 /* Stats for req(n) are updated just before
361                                  * req(n+1) is executed. This avoids need to
362                                  * reacquire svc->srv_lock after
363                                  * call to handling_request().
364                                  */
365                                 int opc;
366
367                                 /* req_waittime */
368                                 lprocfs_counter_add(svc->svc_stats,
369                                                     PTLRPC_REQWAIT_CNTR,
370                                                     (workstart_time -
371                                                      event->arrival_time));
372                                 /* svc_eqdepth */
373                                 /* Wait for b_eq branch
374                                 lprocfs_counter_add(svc->svc_stats,
375                                                     PTLRPC_SVCEQDEPTH_CNTR,
376                                                     0);
377                                 */
378                                 /* svc_idletime */
379                                 lprocfs_counter_add(svc->svc_stats,
380                                                     PTLRPC_SVCIDLETIME_CNTR,
381                                                     (workstart_time -
382                                                      workdone_time));
383                                 /* previous request */
384                                 opc = opcode_offset(request->rq_reqmsg->opc);
385                                 if (opc > 0) {
386                                         LASSERT(opc < LUSTRE_MAX_OPCODES);
387                                         lprocfs_counter_add(svc->svc_stats, opc,
388                                                             PTLRPC_LAST_CNTR +
389                                                             svc_workcycles);
390                                 }
391                         }
392                         spin_unlock(&svc->srv_lock);
393
394                         rc = handle_incoming_request(obddev, svc, event,
395                                                      request);
396                         workdone_time = get_cycles();
397                         svc_workcycles = workdone_time - workstart_time;
398                         continue;
399                 }
400
401                 CERROR("unknown break in service");
402                 LBUG();
403                 EXIT;
404                 break;
405         }
406
407         /* NB should wait for all SENT callbacks to complete before exiting
408          * here.  Unfortunately at this time there is no way to track this
409          * state.
410          */
411         OBD_FREE(request, sizeof(*request));
412 out_event:
413         OBD_FREE(event, sizeof(*event));
414 out:
415         thread->t_flags = SVC_STOPPED;
416         wake_up(&thread->t_ctl_waitq);
417
418         CDEBUG(D_NET, "service thread exiting, process %d: rc = %d\n",
419                current->pid, rc);
420         return rc;
421 }
422
423 static void ptlrpc_stop_thread(struct ptlrpc_service *svc,
424                                struct ptlrpc_thread *thread)
425 {
426         struct l_wait_info lwi = { 0 };
427
428         spin_lock(&svc->srv_lock);
429         thread->t_flags = SVC_STOPPING;
430         spin_unlock(&svc->srv_lock);
431
432         wake_up(&svc->srv_waitq);
433         l_wait_event(thread->t_ctl_waitq, (thread->t_flags & SVC_STOPPED),
434                      &lwi);
435 }
436
437 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
438 {
439         spin_lock(&svc->srv_lock);
440         while (!list_empty(&svc->srv_threads)) {
441                 struct ptlrpc_thread *thread;
442                 thread = list_entry(svc->srv_threads.next, struct ptlrpc_thread,
443                                     t_link);
444                 spin_unlock(&svc->srv_lock);
445                 ptlrpc_stop_thread(svc, thread);
446                 spin_lock(&svc->srv_lock);
447                 list_del(&thread->t_link);
448                 OBD_FREE(thread, sizeof(*thread));
449         }
450         spin_unlock(&svc->srv_lock);
451 }
452
453 int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc,
454                         char *name)
455 {
456         struct l_wait_info lwi = { 0 };
457         struct ptlrpc_svc_data d;
458         struct ptlrpc_thread *thread;
459         int rc;
460         ENTRY;
461
462         OBD_ALLOC(thread, sizeof(*thread));
463         if (thread == NULL)
464                 RETURN(-ENOMEM);
465         init_waitqueue_head(&thread->t_ctl_waitq);
466
467         d.dev = dev;
468         d.svc = svc;
469         d.name = name;
470         d.thread = thread;
471
472         spin_lock(&svc->srv_lock);
473         list_add(&thread->t_link, &svc->srv_threads);
474         spin_unlock(&svc->srv_lock);
475
476         /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
477          * just drop the VM and FILES in ptlrpc_daemonize() right away.
478          */
479         rc = kernel_thread(ptlrpc_main, &d, CLONE_VM | CLONE_FILES);
480         if (rc < 0) {
481                 CERROR("cannot start thread: %d\n", rc);
482                 OBD_FREE(thread, sizeof(*thread));
483                 RETURN(rc);
484         }
485         l_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_RUNNING, &lwi);
486
487         RETURN(0);
488 }
489
490 int ptlrpc_unregister_service(struct ptlrpc_service *service)
491 {
492         int i, rc;
493         struct ptlrpc_srv_ni *srv_ni;
494
495         LASSERT (list_empty (&service->srv_threads));
496
497         /* XXX We could reply (with failure) to all buffered requests
498          * _after_ unlinking _all_ the request buffers, but _before_
499          * freeing them.
500          */
501
502         for (i = 0; i < ptlrpc_ninterfaces; i++) {
503                 srv_ni = &service->srv_interfaces[i];
504                 CDEBUG (D_NET, "%s: tearing down interface %s\n",
505                         service->srv_name, srv_ni->sni_ni->pni_name);
506
507                 while (!list_empty (&srv_ni->sni_rqbds)) {
508                         struct ptlrpc_request_buffer_desc *rqbd =
509                                 list_entry (srv_ni->sni_rqbds.next,
510                                             struct ptlrpc_request_buffer_desc,
511                                             rqbd_list);
512
513                         list_del (&rqbd->rqbd_list);
514
515                         LASSERT (atomic_read (&rqbd->rqbd_refcount) > 0);
516                         /* refcount could be anything; it's possible for
517                          * the buffers to continued to get filled after all
518                          * the server threads exited.  But we know they
519                          * _have_ exited.
520                          */
521
522                         (void) PtlMEUnlink(rqbd->rqbd_me_h);
523                         /* The callback handler could have unlinked this ME
524                          * already (we're racing with her) but it's safe to
525                          * ensure it _has_ been unlinked.
526                          */
527
528                         OBD_FREE (rqbd->rqbd_buffer, service->srv_buf_size);
529                         OBD_FREE (rqbd, sizeof (*rqbd));
530                         srv_ni->sni_nrqbds--;
531                 }
532
533                 LASSERT (srv_ni->sni_nrqbds == 0);
534
535                 if (!PtlHandleEqual (srv_ni->sni_eq_h, PTL_HANDLE_NONE)) {
536                         rc = PtlEQFree(srv_ni->sni_eq_h);
537                         if (rc)
538                                 CERROR("%s.%d: PtlEQFree failed on %s: %d\n",
539                                        service->srv_name, i,
540                                        srv_ni->sni_ni->pni_name, rc);
541                 }
542         }
543
544         ptlrpc_lprocfs_unregister_service(service);
545
546         OBD_FREE(service,
547                  offsetof (struct ptlrpc_service,
548                            srv_interfaces[ptlrpc_ninterfaces]));
549         return 0;
550 }