1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_RPC
25 #include <liblustre.h>
26 #include <linux/kp30.h>
28 #include <linux/obd_support.h>
29 #include <linux/obd_class.h>
30 #include <linux/lustre_net.h>
31 #include <portals/types.h>
32 #include "ptlrpc_internal.h"
34 extern int request_in_callback(ptl_event_t *ev);
36 static int ptlrpc_check_event(struct ptlrpc_service *svc,
37 struct ptlrpc_thread *thread, ptl_event_t *event)
39 struct ptlrpc_srv_ni *srv_ni;
45 spin_lock(&svc->srv_lock);
47 if (thread->t_flags & SVC_STOPPING)
50 LASSERT ((thread->t_flags & SVC_EVENT) == 0);
51 LASSERT (ptlrpc_ninterfaces > 0);
53 for (i = 0; i < ptlrpc_ninterfaces; i++) {
54 idx = (svc->srv_interface_rover + i) % ptlrpc_ninterfaces;
55 srv_ni = &svc->srv_interfaces[idx];
57 LASSERT (!PtlHandleEqual (srv_ni->sni_eq_h, PTL_HANDLE_NONE));
59 rc = PtlEQGet(srv_ni->sni_eq_h, event);
62 /* next time start with the next interface */
63 svc->srv_interface_rover = (idx+1) % ptlrpc_ninterfaces;
64 thread->t_flags |= SVC_EVENT;
71 CERROR("BUG: PtlEQGet returned %d\n", rc);
78 spin_unlock(&svc->srv_lock);
82 struct ptlrpc_service * ptlrpc_init_svc(__u32 nevents, __u32 nbufs,
83 __u32 bufsize, __u32 max_req_size,
84 int req_portal, int rep_portal,
85 svc_handler_t handler, char *name,
86 struct obd_device *obddev)
89 struct ptlrpc_service *service;
90 struct ptlrpc_srv_ni *srv_ni;
93 LASSERT (ptlrpc_ninterfaces > 0);
95 ssize = offsetof (struct ptlrpc_service,
96 srv_interfaces[ptlrpc_ninterfaces]);
97 OBD_ALLOC(service, ssize);
101 service->srv_name = name;
102 spin_lock_init(&service->srv_lock);
103 INIT_LIST_HEAD(&service->srv_threads);
104 init_waitqueue_head(&service->srv_waitq);
106 service->srv_max_req_size = max_req_size;
107 service->srv_buf_size = bufsize;
109 service->srv_rep_portal = rep_portal;
110 service->srv_req_portal = req_portal;
111 service->srv_handler = handler;
112 service->srv_interface_rover = 0;
114 /* First initialise enough for early teardown */
115 for (i = 0; i < ptlrpc_ninterfaces; i++) {
116 srv_ni = &service->srv_interfaces[i];
118 srv_ni->sni_service = service;
119 srv_ni->sni_ni = &ptlrpc_interfaces[i];
120 srv_ni->sni_eq_h = PTL_HANDLE_NONE;
121 INIT_LIST_HEAD(&srv_ni->sni_rqbds);
122 srv_ni->sni_nrqbds = 0;
123 atomic_set(&srv_ni->sni_nrqbds_receiving, 0);
126 /* Now allocate the event queue and request buffers, assuming all
127 * interfaces require the same level of buffering. */
128 for (i = 0; i < ptlrpc_ninterfaces; i++) {
129 srv_ni = &service->srv_interfaces[i];
130 CDEBUG (D_NET, "%s: initialising interface %s\n", name,
131 srv_ni->sni_ni->pni_name);
133 rc = PtlEQAlloc(srv_ni->sni_ni->pni_ni_h, nevents,
134 request_in_callback, &(srv_ni->sni_eq_h));
136 CERROR("%s.%d: PtlEQAlloc on %s failed: %d\n",
137 name, i, srv_ni->sni_ni->pni_name, rc);
141 for (j = 0; j < nbufs; j++) {
142 struct ptlrpc_request_buffer_desc *rqbd;
144 OBD_ALLOC(rqbd, sizeof(*rqbd));
146 CERROR ("%s.%d: Can't allocate request "
147 "descriptor %d on %s\n",
148 name, i, srv_ni->sni_nrqbds,
149 srv_ni->sni_ni->pni_name);
153 rqbd->rqbd_srv_ni = srv_ni;
154 rqbd->rqbd_me_h = PTL_HANDLE_NONE;
155 atomic_set(&rqbd->rqbd_refcount, 0);
157 OBD_ALLOC(rqbd->rqbd_buffer, service->srv_buf_size);
158 if (rqbd->rqbd_buffer == NULL) {
159 CERROR ("%s.%d: Can't allocate request "
161 name, i, srv_ni->sni_nrqbds,
162 srv_ni->sni_ni->pni_name);
163 OBD_FREE(rqbd, sizeof(*rqbd));
166 list_add(&rqbd->rqbd_list, &srv_ni->sni_rqbds);
167 srv_ni->sni_nrqbds++;
169 ptlrpc_link_svc_me(rqbd);
173 ptlrpc_lprocfs_register_service(obddev, service);
175 CDEBUG(D_NET, "%s: Started on %d interfaces, listening on portal %d\n",
176 service->srv_name, ptlrpc_ninterfaces, service->srv_req_portal);
180 ptlrpc_unregister_service(service);
184 static int handle_incoming_request(struct obd_device *obddev,
185 struct ptlrpc_service *svc,
187 struct ptlrpc_request *request)
189 struct ptlrpc_request_buffer_desc *rqbd = event->mem_desc.user_ptr;
192 /* FIXME: If we move to an event-driven model, we should put the request
193 * on the stack of mds_handle instead. */
195 LASSERT (atomic_read (&rqbd->rqbd_refcount) > 0);
196 LASSERT ((event->mem_desc.options & (PTL_MD_IOV | PTL_MD_KIOV)) == 0);
197 LASSERT (rqbd->rqbd_srv_ni->sni_service == svc);
198 LASSERT (rqbd->rqbd_buffer == event->mem_desc.start);
199 LASSERT (event->offset + event->mlength <= svc->srv_buf_size);
201 memset(request, 0, sizeof(*request));
202 spin_lock_init (&request->rq_lock);
203 INIT_LIST_HEAD(&request->rq_list);
204 request->rq_svc = svc;
205 request->rq_obd = obddev;
206 request->rq_xid = event->match_bits;
207 request->rq_reqmsg = event->mem_desc.start + event->offset;
208 request->rq_reqlen = event->mlength;
211 /* Clear request swab mask; this is a new request */
212 request->rq_req_swab_mask = 0;
214 rc = lustre_unpack_msg (request->rq_reqmsg, request->rq_reqlen);
216 CERROR ("error unpacking request: ptl %d from "LPX64
217 " xid "LPU64"\n", svc->srv_req_portal,
218 event->initiator.nid, request->rq_xid);
222 if (request->rq_reqmsg->type != PTL_RPC_MSG_REQUEST) {
223 CERROR("wrong packet type received (type=%u)\n",
224 request->rq_reqmsg->type);
228 CDEBUG(D_NET, "got req "LPD64" (md: %p + %d)\n", request->rq_xid,
229 event->mem_desc.start, event->offset);
231 request->rq_peer.peer_nid = event->initiator.nid;
232 request->rq_peer.peer_ni = rqbd->rqbd_srv_ni->sni_ni;
234 request->rq_export = class_conn2export(&request->rq_reqmsg->handle);
236 if (request->rq_export) {
237 request->rq_connection = request->rq_export->exp_connection;
238 ptlrpc_connection_addref(request->rq_connection);
239 request->rq_export->exp_last_request_time =
240 LTIME_S(CURRENT_TIME);
242 /* create a (hopefully temporary) connection that will be used
243 * to send the reply if this call doesn't create an export.
244 * XXX revisit this when we revamp ptlrpc */
245 request->rq_connection =
246 ptlrpc_get_connection(&request->rq_peer, NULL);
249 CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid:pid:xid:ni:nid:opc %s:%s:%d:"
250 LPU64":%s:"LPX64":%d\n",
252 (request->rq_export ?
253 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
254 request->rq_reqmsg->status, request->rq_xid,
255 rqbd->rqbd_srv_ni->sni_ni->pni_name, event->initiator.nid,
256 request->rq_reqmsg->opc);
258 rc = svc->srv_handler(request);
259 CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid:pid:xid:ni:nid:opc %s:%s:%d:"
260 LPU64":%s:"LPX64":%d\n",
262 (request->rq_export ?
263 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
264 request->rq_reqmsg->status, request->rq_xid,
265 rqbd->rqbd_srv_ni->sni_ni->pni_name, event->initiator.nid,
266 request->rq_reqmsg->opc);
268 ptlrpc_put_connection(request->rq_connection);
269 if (request->rq_export != NULL)
270 class_export_put(request->rq_export);
273 if (atomic_dec_and_test (&rqbd->rqbd_refcount)) /* last reference? */
274 ptlrpc_link_svc_me (rqbd);
279 /* Don't use daemonize, it removes fs struct from new thread (bug 418) */
280 void ptlrpc_daemonize(void)
284 current->session = 1;
292 static int ptlrpc_main(void *arg)
294 struct ptlrpc_svc_data *data = (struct ptlrpc_svc_data *)arg;
295 struct obd_device *obddev = data->dev;
296 struct ptlrpc_service *svc = data->svc;
297 struct ptlrpc_thread *thread = data->thread;
298 struct ptlrpc_request *request;
302 cycles_t workdone_time = -1;
303 cycles_t svc_workcycles = -1;
309 SIGNAL_MASK_LOCK(current, flags);
310 sigfillset(¤t->blocked);
312 SIGNAL_MASK_UNLOCK(current, flags);
314 #if defined(__arch_um__) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20))
315 sprintf(current->comm, "%s|%d", data->name,current->thread.extern_pid);
316 #elif defined(__arch_um__) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
317 sprintf(current->comm, "%s|%d", data->name,
318 current->thread.mode.tt.extern_pid);
320 strcpy(current->comm, data->name);
324 OBD_ALLOC(event, sizeof(*event));
326 GOTO(out, rc = -ENOMEM);
327 OBD_ALLOC(request, sizeof(*request));
329 GOTO(out_event, rc = -ENOMEM);
331 /* Record that the thread is running */
332 thread->t_flags = SVC_RUNNING;
333 wake_up(&thread->t_ctl_waitq);
335 /* XXX maintain a list of all managed devices: insert here */
337 /* And now, loop forever on requests */
339 struct l_wait_info lwi = { 0 };
340 l_wait_event(svc->srv_waitq,
341 ptlrpc_check_event(svc, thread, event), &lwi);
343 if (thread->t_flags & SVC_STOPPING) {
344 spin_lock(&svc->srv_lock);
345 thread->t_flags &= ~SVC_STOPPING;
346 spin_unlock(&svc->srv_lock);
352 if (thread->t_flags & SVC_EVENT) {
353 cycles_t workstart_time;
355 spin_lock(&svc->srv_lock);
356 thread->t_flags &= ~SVC_EVENT;
357 /* Update Service Statistics */
358 workstart_time = get_cycles();
359 if (workdone_time != -1 && svc->svc_stats != NULL) {
360 /* Stats for req(n) are updated just before
361 * req(n+1) is executed. This avoids need to
362 * reacquire svc->srv_lock after
363 * call to handling_request().
368 lprocfs_counter_add(svc->svc_stats,
371 event->arrival_time));
373 /* Wait for b_eq branch
374 lprocfs_counter_add(svc->svc_stats,
375 PTLRPC_SVCEQDEPTH_CNTR,
379 lprocfs_counter_add(svc->svc_stats,
380 PTLRPC_SVCIDLETIME_CNTR,
383 /* previous request */
384 opc = opcode_offset(request->rq_reqmsg->opc);
386 LASSERT(opc < LUSTRE_MAX_OPCODES);
387 lprocfs_counter_add(svc->svc_stats, opc,
392 spin_unlock(&svc->srv_lock);
394 rc = handle_incoming_request(obddev, svc, event,
396 workdone_time = get_cycles();
397 svc_workcycles = workdone_time - workstart_time;
401 CERROR("unknown break in service");
407 /* NB should wait for all SENT callbacks to complete before exiting
408 * here. Unfortunately at this time there is no way to track this
411 OBD_FREE(request, sizeof(*request));
413 OBD_FREE(event, sizeof(*event));
415 thread->t_flags = SVC_STOPPED;
416 wake_up(&thread->t_ctl_waitq);
418 CDEBUG(D_NET, "service thread exiting, process %d: rc = %d\n",
423 static void ptlrpc_stop_thread(struct ptlrpc_service *svc,
424 struct ptlrpc_thread *thread)
426 struct l_wait_info lwi = { 0 };
428 spin_lock(&svc->srv_lock);
429 thread->t_flags = SVC_STOPPING;
430 spin_unlock(&svc->srv_lock);
432 wake_up(&svc->srv_waitq);
433 l_wait_event(thread->t_ctl_waitq, (thread->t_flags & SVC_STOPPED),
437 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
439 spin_lock(&svc->srv_lock);
440 while (!list_empty(&svc->srv_threads)) {
441 struct ptlrpc_thread *thread;
442 thread = list_entry(svc->srv_threads.next, struct ptlrpc_thread,
444 spin_unlock(&svc->srv_lock);
445 ptlrpc_stop_thread(svc, thread);
446 spin_lock(&svc->srv_lock);
447 list_del(&thread->t_link);
448 OBD_FREE(thread, sizeof(*thread));
450 spin_unlock(&svc->srv_lock);
453 int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc,
456 struct l_wait_info lwi = { 0 };
457 struct ptlrpc_svc_data d;
458 struct ptlrpc_thread *thread;
462 OBD_ALLOC(thread, sizeof(*thread));
465 init_waitqueue_head(&thread->t_ctl_waitq);
472 spin_lock(&svc->srv_lock);
473 list_add(&thread->t_link, &svc->srv_threads);
474 spin_unlock(&svc->srv_lock);
476 /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
477 * just drop the VM and FILES in ptlrpc_daemonize() right away.
479 rc = kernel_thread(ptlrpc_main, &d, CLONE_VM | CLONE_FILES);
481 CERROR("cannot start thread: %d\n", rc);
482 OBD_FREE(thread, sizeof(*thread));
485 l_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_RUNNING, &lwi);
490 int ptlrpc_unregister_service(struct ptlrpc_service *service)
493 struct ptlrpc_srv_ni *srv_ni;
495 LASSERT (list_empty (&service->srv_threads));
497 /* XXX We could reply (with failure) to all buffered requests
498 * _after_ unlinking _all_ the request buffers, but _before_
502 for (i = 0; i < ptlrpc_ninterfaces; i++) {
503 srv_ni = &service->srv_interfaces[i];
504 CDEBUG (D_NET, "%s: tearing down interface %s\n",
505 service->srv_name, srv_ni->sni_ni->pni_name);
507 while (!list_empty (&srv_ni->sni_rqbds)) {
508 struct ptlrpc_request_buffer_desc *rqbd =
509 list_entry (srv_ni->sni_rqbds.next,
510 struct ptlrpc_request_buffer_desc,
513 list_del (&rqbd->rqbd_list);
515 LASSERT (atomic_read (&rqbd->rqbd_refcount) > 0);
516 /* refcount could be anything; it's possible for
517 * the buffers to continued to get filled after all
518 * the server threads exited. But we know they
522 (void) PtlMEUnlink(rqbd->rqbd_me_h);
523 /* The callback handler could have unlinked this ME
524 * already (we're racing with her) but it's safe to
525 * ensure it _has_ been unlinked.
528 OBD_FREE (rqbd->rqbd_buffer, service->srv_buf_size);
529 OBD_FREE (rqbd, sizeof (*rqbd));
530 srv_ni->sni_nrqbds--;
533 LASSERT (srv_ni->sni_nrqbds == 0);
535 if (!PtlHandleEqual (srv_ni->sni_eq_h, PTL_HANDLE_NONE)) {
536 rc = PtlEQFree(srv_ni->sni_eq_h);
538 CERROR("%s.%d: PtlEQFree failed on %s: %d\n",
539 service->srv_name, i,
540 srv_ni->sni_ni->pni_name, rc);
544 ptlrpc_lprocfs_unregister_service(service);
547 offsetof (struct ptlrpc_service,
548 srv_interfaces[ptlrpc_ninterfaces]));