1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_RPC
25 #include <liblustre.h>
26 #include <linux/kp30.h>
28 #include <linux/obd_support.h>
29 #include <linux/obd_class.h>
30 #include <linux/lustre_net.h>
31 #include <portals/types.h>
32 #include "ptlrpc_internal.h"
34 extern int request_in_callback(ptl_event_t *ev);
36 static int ptlrpc_check_event(struct ptlrpc_service *svc,
37 struct ptlrpc_thread *thread, ptl_event_t *event)
39 struct ptlrpc_srv_ni *srv_ni;
45 spin_lock(&svc->srv_lock);
47 if (thread->t_flags & SVC_STOPPING)
50 LASSERT ((thread->t_flags & SVC_EVENT) == 0);
51 LASSERT (ptlrpc_ninterfaces > 0);
53 for (i = 0; i < ptlrpc_ninterfaces; i++) {
54 idx = (svc->srv_interface_rover + i) % ptlrpc_ninterfaces;
55 srv_ni = &svc->srv_interfaces[idx];
57 LASSERT (!PtlHandleEqual (srv_ni->sni_eq_h, PTL_HANDLE_NONE));
59 rc = PtlEQGet(srv_ni->sni_eq_h, event);
62 /* next time start with the next interface */
63 svc->srv_interface_rover = (idx+1) % ptlrpc_ninterfaces;
64 thread->t_flags |= SVC_EVENT;
71 CERROR("BUG: PtlEQGet returned %d\n", rc);
78 spin_unlock(&svc->srv_lock);
82 struct ptlrpc_service *
83 ptlrpc_init_svc(__u32 nevents, __u32 nbufs,
84 __u32 bufsize, __u32 max_req_size,
85 int req_portal, int rep_portal,
86 svc_handler_t handler, char *name,
87 struct obd_device *obddev)
90 struct ptlrpc_service *service;
91 struct ptlrpc_srv_ni *srv_ni;
94 LASSERT (ptlrpc_ninterfaces > 0);
96 ssize = offsetof (struct ptlrpc_service,
97 srv_interfaces[ptlrpc_ninterfaces]);
98 OBD_ALLOC(service, ssize);
102 service->srv_name = name;
103 spin_lock_init(&service->srv_lock);
104 INIT_LIST_HEAD(&service->srv_threads);
105 init_waitqueue_head(&service->srv_waitq);
107 service->srv_max_req_size = max_req_size;
108 service->srv_buf_size = bufsize;
110 service->srv_rep_portal = rep_portal;
111 service->srv_req_portal = req_portal;
112 service->srv_handler = handler;
113 service->srv_interface_rover = 0;
115 /* First initialise enough for early teardown */
116 for (i = 0; i < ptlrpc_ninterfaces; i++) {
117 srv_ni = &service->srv_interfaces[i];
119 srv_ni->sni_service = service;
120 srv_ni->sni_ni = &ptlrpc_interfaces[i];
121 srv_ni->sni_eq_h = PTL_HANDLE_NONE;
122 INIT_LIST_HEAD(&srv_ni->sni_rqbds);
123 srv_ni->sni_nrqbds = 0;
124 atomic_set(&srv_ni->sni_nrqbds_receiving, 0);
127 /* Now allocate the event queue and request buffers, assuming all
128 * interfaces require the same level of buffering. */
129 for (i = 0; i < ptlrpc_ninterfaces; i++) {
130 srv_ni = &service->srv_interfaces[i];
131 CDEBUG (D_NET, "%s: initialising interface %s\n", name,
132 srv_ni->sni_ni->pni_name);
134 rc = PtlEQAlloc(srv_ni->sni_ni->pni_ni_h, nevents,
135 request_in_callback, &(srv_ni->sni_eq_h));
137 CERROR("%s.%d: PtlEQAlloc on %s failed: %d\n",
138 name, i, srv_ni->sni_ni->pni_name, rc);
142 for (j = 0; j < nbufs; j++) {
143 struct ptlrpc_request_buffer_desc *rqbd;
145 OBD_ALLOC(rqbd, sizeof(*rqbd));
147 CERROR ("%s.%d: Can't allocate request "
148 "descriptor %d on %s\n",
149 name, i, srv_ni->sni_nrqbds,
150 srv_ni->sni_ni->pni_name);
154 rqbd->rqbd_srv_ni = srv_ni;
155 rqbd->rqbd_me_h = PTL_HANDLE_NONE;
156 atomic_set(&rqbd->rqbd_refcount, 0);
158 OBD_ALLOC(rqbd->rqbd_buffer, service->srv_buf_size);
159 if (rqbd->rqbd_buffer == NULL) {
160 CERROR ("%s.%d: Can't allocate request "
162 name, i, srv_ni->sni_nrqbds,
163 srv_ni->sni_ni->pni_name);
164 OBD_FREE(rqbd, sizeof(*rqbd));
167 list_add(&rqbd->rqbd_list, &srv_ni->sni_rqbds);
168 srv_ni->sni_nrqbds++;
170 ptlrpc_link_svc_me(rqbd);
174 ptlrpc_lprocfs_register_service(obddev, service);
176 CDEBUG(D_NET, "%s: Started on %d interfaces, listening on portal %d\n",
177 service->srv_name, ptlrpc_ninterfaces, service->srv_req_portal);
181 ptlrpc_unregister_service(service);
185 static int handle_incoming_request(struct obd_device *obddev,
186 struct ptlrpc_service *svc,
188 struct ptlrpc_request *request)
190 struct ptlrpc_request_buffer_desc *rqbd = event->mem_desc.user_ptr;
193 /* FIXME: If we move to an event-driven model, we should put the request
194 * on the stack of mds_handle instead. */
196 LASSERT (atomic_read (&rqbd->rqbd_refcount) > 0);
197 LASSERT ((event->mem_desc.options & (PTL_MD_IOV | PTL_MD_KIOV)) == 0);
198 LASSERT (rqbd->rqbd_srv_ni->sni_service == svc);
199 LASSERT (rqbd->rqbd_buffer == event->mem_desc.start);
200 LASSERT (event->offset + event->mlength <= svc->srv_buf_size);
202 memset(request, 0, sizeof(*request));
203 spin_lock_init (&request->rq_lock);
204 INIT_LIST_HEAD(&request->rq_list);
205 request->rq_svc = svc;
206 request->rq_obd = obddev;
207 request->rq_xid = event->match_bits;
208 request->rq_reqmsg = event->mem_desc.start + event->offset;
209 request->rq_reqlen = event->mlength;
212 /* Clear request swab mask; this is a new request */
213 request->rq_req_swab_mask = 0;
215 rc = lustre_unpack_msg (request->rq_reqmsg, request->rq_reqlen);
217 CERROR ("error unpacking request: ptl %d from "LPX64
218 " xid "LPU64"\n", svc->srv_req_portal,
219 event->initiator.nid, request->rq_xid);
223 if (request->rq_reqmsg->type != PTL_RPC_MSG_REQUEST) {
224 CERROR("wrong packet type received (type=%u)\n",
225 request->rq_reqmsg->type);
229 CDEBUG(D_NET, "got req "LPD64" (md: %p + %d)\n", request->rq_xid,
230 event->mem_desc.start, event->offset);
232 request->rq_peer.peer_nid = event->initiator.nid;
233 request->rq_peer.peer_ni = rqbd->rqbd_srv_ni->sni_ni;
235 request->rq_export = class_conn2export(&request->rq_reqmsg->handle);
237 if (request->rq_export) {
238 request->rq_connection = request->rq_export->exp_connection;
239 ptlrpc_connection_addref(request->rq_connection);
240 request->rq_export->exp_last_request_time =
241 LTIME_S(CURRENT_TIME);
243 /* create a (hopefully temporary) connection that will be used
244 * to send the reply if this call doesn't create an export.
245 * XXX revisit this when we revamp ptlrpc */
246 request->rq_connection =
247 ptlrpc_get_connection(&request->rq_peer, NULL);
250 CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid:pid:xid:ni:nid:opc %s:%s:%d:"
251 LPU64":%s:"LPX64":%d\n",
253 (request->rq_export ?
254 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
255 request->rq_reqmsg->status, request->rq_xid,
256 rqbd->rqbd_srv_ni->sni_ni->pni_name, event->initiator.nid,
257 request->rq_reqmsg->opc);
259 rc = svc->srv_handler(request);
260 CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid:pid:xid:ni:nid:opc %s:%s:%d:"
261 LPU64":%s:"LPX64":%d\n",
263 (request->rq_export ?
264 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
265 request->rq_reqmsg->status, request->rq_xid,
266 rqbd->rqbd_srv_ni->sni_ni->pni_name, event->initiator.nid,
267 request->rq_reqmsg->opc);
269 ptlrpc_put_connection(request->rq_connection);
270 if (request->rq_export != NULL)
271 class_export_put(request->rq_export);
274 if (atomic_dec_and_test (&rqbd->rqbd_refcount)) /* last reference? */
275 ptlrpc_link_svc_me (rqbd);
280 /* Don't use daemonize, it removes fs struct from new thread (bug 418) */
281 void ptlrpc_daemonize(void)
285 current->session = 1;
293 static int ptlrpc_main(void *arg)
295 struct ptlrpc_svc_data *data = (struct ptlrpc_svc_data *)arg;
296 struct obd_device *obddev = data->dev;
297 struct ptlrpc_service *svc = data->svc;
298 struct ptlrpc_thread *thread = data->thread;
299 struct ptlrpc_request *request;
303 cycles_t workdone_time;
304 cycles_t svc_workcycles;
310 SIGNAL_MASK_LOCK(current, flags);
311 sigfillset(¤t->blocked);
313 SIGNAL_MASK_UNLOCK(current, flags);
315 #if defined(__arch_um__) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20))
316 sprintf(current->comm, "%s|%d", data->name,current->thread.extern_pid);
317 #elif defined(__arch_um__) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
318 sprintf(current->comm, "%s|%d", data->name,
319 current->thread.mode.tt.extern_pid);
321 strcpy(current->comm, data->name);
325 OBD_ALLOC(event, sizeof(*event));
327 GOTO(out, rc = -ENOMEM);
328 OBD_ALLOC(request, sizeof(*request));
330 GOTO(out_event, rc = -ENOMEM);
332 /* Record that the thread is running */
333 thread->t_flags = SVC_RUNNING;
334 svc_workcycles = workdone_time = 0;
335 wake_up(&thread->t_ctl_waitq);
337 /* XXX maintain a list of all managed devices: insert here */
339 /* And now, loop forever on requests */
341 struct l_wait_info lwi = { 0 };
342 l_wait_event(svc->srv_waitq,
343 ptlrpc_check_event(svc, thread, event), &lwi);
345 if (thread->t_flags & SVC_STOPPING) {
346 spin_lock(&svc->srv_lock);
347 thread->t_flags &= ~SVC_STOPPING;
348 spin_unlock(&svc->srv_lock);
354 if (thread->t_flags & SVC_EVENT) {
355 cycles_t workstart_time;
356 spin_lock(&svc->srv_lock);
357 thread->t_flags &= ~SVC_EVENT;
358 /* Update Service Statistics */
359 workstart_time = get_cycles();
360 if (workdone_time && (svc->svc_counters != NULL)) {
361 /* Stats for req(n) are updated just before
362 * req(n+1) is executed. This avoids need to
363 * reacquire svc->srv_lock after
364 * call to handling_request().
368 LPROCFS_COUNTER_INCR(&svc->svc_counters->cntr[PTLRPC_REQWAIT_CNTR],
370 event->arrival_time));
372 LPROCFS_COUNTER_INCR(&svc->svc_counters->cntr[PTLRPC_SVCEQDEPTH_CNTR],
373 0); /* Wait for b_eq branch */
375 LPROCFS_COUNTER_INCR(&svc->svc_counters->cntr[PTLRPC_SVCIDLETIME_CNTR],
378 /* previous request */
380 opcode_offset(request->rq_reqmsg->opc);
381 if (opc_offset >= 0) {
382 LASSERT(opc_offset < LUSTRE_MAX_OPCODES);
383 LPROCFS_COUNTER_INCR(&svc->svc_counters->cntr[PTLRPC_LAST_CNTR+opc_offset], svc_workcycles);
386 spin_unlock(&svc->srv_lock);
388 rc = handle_incoming_request(obddev, svc, event,
390 workdone_time = get_cycles();
391 svc_workcycles = workdone_time - workstart_time;
395 CERROR("unknown break in service");
401 /* NB should wait for all SENT callbacks to complete before exiting
402 * here. Unfortunately at this time there is no way to track this
405 OBD_FREE(request, sizeof(*request));
407 OBD_FREE(event, sizeof(*event));
409 thread->t_flags = SVC_STOPPED;
410 wake_up(&thread->t_ctl_waitq);
412 CDEBUG(D_NET, "service thread exiting, process %d: rc = %d\n",
417 static void ptlrpc_stop_thread(struct ptlrpc_service *svc,
418 struct ptlrpc_thread *thread)
420 struct l_wait_info lwi = { 0 };
422 spin_lock(&svc->srv_lock);
423 thread->t_flags = SVC_STOPPING;
424 spin_unlock(&svc->srv_lock);
426 wake_up(&svc->srv_waitq);
427 l_wait_event(thread->t_ctl_waitq, (thread->t_flags & SVC_STOPPED),
431 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
433 spin_lock(&svc->srv_lock);
434 while (!list_empty(&svc->srv_threads)) {
435 struct ptlrpc_thread *thread;
436 thread = list_entry(svc->srv_threads.next, struct ptlrpc_thread,
438 spin_unlock(&svc->srv_lock);
439 ptlrpc_stop_thread(svc, thread);
440 spin_lock(&svc->srv_lock);
441 list_del(&thread->t_link);
442 OBD_FREE(thread, sizeof(*thread));
444 spin_unlock(&svc->srv_lock);
447 int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc,
450 struct l_wait_info lwi = { 0 };
451 struct ptlrpc_svc_data d;
452 struct ptlrpc_thread *thread;
456 OBD_ALLOC(thread, sizeof(*thread));
459 init_waitqueue_head(&thread->t_ctl_waitq);
466 spin_lock(&svc->srv_lock);
467 list_add(&thread->t_link, &svc->srv_threads);
468 spin_unlock(&svc->srv_lock);
470 /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
471 * just drop the VM and FILES in ptlrpc_daemonize() right away.
473 rc = kernel_thread(ptlrpc_main, &d, CLONE_VM | CLONE_FILES);
475 CERROR("cannot start thread: %d\n", rc);
476 OBD_FREE(thread, sizeof(*thread));
479 l_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_RUNNING, &lwi);
484 int ptlrpc_unregister_service(struct ptlrpc_service *service)
487 struct ptlrpc_srv_ni *srv_ni;
489 LASSERT (list_empty (&service->srv_threads));
491 /* XXX We could reply (with failure) to all buffered requests
492 * _after_ unlinking _all_ the request buffers, but _before_
496 for (i = 0; i < ptlrpc_ninterfaces; i++) {
497 srv_ni = &service->srv_interfaces[i];
498 CDEBUG (D_NET, "%s: tearing down interface %s\n",
499 service->srv_name, srv_ni->sni_ni->pni_name);
501 while (!list_empty (&srv_ni->sni_rqbds)) {
502 struct ptlrpc_request_buffer_desc *rqbd =
503 list_entry (srv_ni->sni_rqbds.next,
504 struct ptlrpc_request_buffer_desc,
507 list_del (&rqbd->rqbd_list);
509 LASSERT (atomic_read (&rqbd->rqbd_refcount) > 0);
510 /* refcount could be anything; it's possible for
511 * the buffers to continued to get filled after all
512 * the server threads exited. But we know they
516 (void) PtlMEUnlink(rqbd->rqbd_me_h);
517 /* The callback handler could have unlinked this ME
518 * already (we're racing with her) but it's safe to
519 * ensure it _has_ been unlinked.
522 OBD_FREE (rqbd->rqbd_buffer, service->srv_buf_size);
523 OBD_FREE (rqbd, sizeof (*rqbd));
524 srv_ni->sni_nrqbds--;
527 LASSERT (srv_ni->sni_nrqbds == 0);
529 if (!PtlHandleEqual (srv_ni->sni_eq_h, PTL_HANDLE_NONE)) {
530 rc = PtlEQFree(srv_ni->sni_eq_h);
532 CERROR("%s.%d: PtlEQFree failed on %s: %d\n",
533 service->srv_name, i,
534 srv_ni->sni_ni->pni_name, rc);
538 ptlrpc_lprocfs_unregister_service(service);
541 offsetof (struct ptlrpc_service,
542 srv_interfaces[ptlrpc_ninterfaces]));