1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_RPC
25 #include <liblustre.h>
26 #include <linux/kp30.h>
28 #include <linux/obd_support.h>
29 #include <linux/obd_class.h>
30 #include <linux/lustre_net.h>
31 #include <portals/types.h>
32 #include "ptlrpc_internal.h"
34 extern int request_in_callback(ptl_event_t *ev);
36 static int ptlrpc_check_event(struct ptlrpc_service *svc,
37 struct ptlrpc_thread *thread, ptl_event_t *event)
39 struct ptlrpc_srv_ni *srv_ni;
43 spin_lock(&svc->srv_lock);
45 if (thread->t_flags & SVC_STOPPING)
48 LASSERT ((thread->t_flags & SVC_EVENT) == 0);
49 LASSERT (ptlrpc_ninterfaces > 0);
51 for (i = 0; i < ptlrpc_ninterfaces; i++) {
52 idx = (svc->srv_interface_rover + i) % ptlrpc_ninterfaces;
53 srv_ni = &svc->srv_interfaces[idx];
55 LASSERT (!PtlHandleEqual (srv_ni->sni_eq_h, PTL_HANDLE_NONE));
57 rc = PtlEQGet(srv_ni->sni_eq_h, event);
60 /* next time start with the next interface */
61 svc->srv_interface_rover = (idx+1) % ptlrpc_ninterfaces;
62 thread->t_flags |= SVC_EVENT;
69 CWARN("Event queue overflow (bug 2125): timeouts will "
74 CERROR("BUG: PtlEQGet returned %d\n", rc);
81 spin_unlock(&svc->srv_lock);
85 struct ptlrpc_service * ptlrpc_init_svc(__u32 nevents, __u32 nbufs,
86 __u32 bufsize, __u32 max_req_size,
87 int req_portal, int rep_portal,
88 svc_handler_t handler, char *name,
89 struct proc_dir_entry *proc_entry)
92 struct ptlrpc_service *service;
93 struct ptlrpc_srv_ni *srv_ni;
96 LASSERT (ptlrpc_ninterfaces > 0);
98 ssize = offsetof (struct ptlrpc_service,
99 srv_interfaces[ptlrpc_ninterfaces]);
100 OBD_ALLOC(service, ssize);
104 service->srv_name = name;
105 spin_lock_init(&service->srv_lock);
106 INIT_LIST_HEAD(&service->srv_threads);
107 init_waitqueue_head(&service->srv_waitq);
109 service->srv_max_req_size = max_req_size;
110 service->srv_buf_size = bufsize;
112 service->srv_rep_portal = rep_portal;
113 service->srv_req_portal = req_portal;
114 service->srv_handler = handler;
115 service->srv_interface_rover = 0;
117 /* First initialise enough for early teardown */
118 for (i = 0; i < ptlrpc_ninterfaces; i++) {
119 srv_ni = &service->srv_interfaces[i];
121 srv_ni->sni_service = service;
122 srv_ni->sni_ni = &ptlrpc_interfaces[i];
123 srv_ni->sni_eq_h = PTL_HANDLE_NONE;
124 INIT_LIST_HEAD(&srv_ni->sni_rqbds);
125 srv_ni->sni_nrqbds = 0;
126 atomic_set(&srv_ni->sni_nrqbds_receiving, 0);
129 /* Now allocate the event queue and request buffers, assuming all
130 * interfaces require the same level of buffering. */
131 for (i = 0; i < ptlrpc_ninterfaces; i++) {
132 srv_ni = &service->srv_interfaces[i];
133 CDEBUG (D_NET, "%s: initialising interface %s\n", name,
134 srv_ni->sni_ni->pni_name);
136 rc = PtlEQAlloc(srv_ni->sni_ni->pni_ni_h, nevents,
137 request_in_callback, &(srv_ni->sni_eq_h));
139 CERROR("%s.%d: PtlEQAlloc on %s failed: %d\n",
140 name, i, srv_ni->sni_ni->pni_name, rc);
144 for (j = 0; j < nbufs; j++) {
145 struct ptlrpc_request_buffer_desc *rqbd;
147 OBD_ALLOC_WAIT(rqbd, sizeof(*rqbd));
149 CERROR ("%s.%d: Can't allocate request "
150 "descriptor %d on %s\n",
151 name, i, srv_ni->sni_nrqbds,
152 srv_ni->sni_ni->pni_name);
156 rqbd->rqbd_srv_ni = srv_ni;
157 rqbd->rqbd_me_h = PTL_HANDLE_NONE;
158 atomic_set(&rqbd->rqbd_refcount, 0);
160 OBD_ALLOC_WAIT(rqbd->rqbd_buffer, service->srv_buf_size);
161 if (rqbd->rqbd_buffer == NULL) {
162 CERROR ("%s.%d: Can't allocate request "
164 name, i, srv_ni->sni_nrqbds,
165 srv_ni->sni_ni->pni_name);
166 OBD_FREE(rqbd, sizeof(*rqbd));
169 list_add(&rqbd->rqbd_list, &srv_ni->sni_rqbds);
170 srv_ni->sni_nrqbds++;
172 ptlrpc_link_svc_me(rqbd);
176 if (proc_entry != NULL)
177 ptlrpc_lprocfs_register_service(proc_entry, service);
179 CDEBUG(D_NET, "%s: Started on %d interfaces, listening on portal %d\n",
180 service->srv_name, ptlrpc_ninterfaces, service->srv_req_portal);
184 ptlrpc_unregister_service(service);
188 static int handle_incoming_request(struct obd_device *obddev,
189 struct ptlrpc_service *svc,
191 struct ptlrpc_request *request)
193 struct ptlrpc_request_buffer_desc *rqbd = event->mem_desc.user_ptr;
196 /* FIXME: If we move to an event-driven model, we should put the request
197 * on the stack of mds_handle instead. */
199 LASSERT (atomic_read (&rqbd->rqbd_refcount) > 0);
200 LASSERT ((event->mem_desc.options & (PTL_MD_IOV | PTL_MD_KIOV)) == 0);
201 LASSERT (rqbd->rqbd_srv_ni->sni_service == svc);
202 LASSERT (rqbd->rqbd_buffer == event->mem_desc.start);
203 LASSERT (event->offset + event->mlength <= svc->srv_buf_size);
205 memset(request, 0, sizeof(*request));
206 spin_lock_init (&request->rq_lock);
207 INIT_LIST_HEAD(&request->rq_list);
208 request->rq_svc = svc;
209 request->rq_obd = obddev;
210 request->rq_xid = event->match_bits;
211 request->rq_reqmsg = event->mem_desc.start + event->offset;
212 request->rq_reqlen = event->mlength;
215 /* Clear request swab mask; this is a new request */
216 request->rq_req_swab_mask = 0;
218 rc = lustre_unpack_msg (request->rq_reqmsg, request->rq_reqlen);
220 CERROR ("error unpacking request: ptl %d from "LPX64
221 " xid "LPU64"\n", svc->srv_req_portal,
222 event->initiator.nid, request->rq_xid);
226 if (request->rq_reqmsg->type != PTL_RPC_MSG_REQUEST) {
227 CERROR("wrong packet type received (type=%u)\n",
228 request->rq_reqmsg->type);
232 CDEBUG(D_NET, "got req "LPD64" (md: %p + %d)\n", request->rq_xid,
233 event->mem_desc.start, event->offset);
235 request->rq_peer.peer_nid = event->initiator.nid;
236 request->rq_peer.peer_ni = rqbd->rqbd_srv_ni->sni_ni;
238 request->rq_export = class_conn2export(&request->rq_reqmsg->handle);
240 if (request->rq_export) {
241 request->rq_connection = request->rq_export->exp_connection;
242 ptlrpc_connection_addref(request->rq_connection);
243 if (request->rq_reqmsg->conn_cnt <
244 request->rq_export->exp_conn_cnt) {
245 DEBUG_REQ(D_ERROR, request,
246 "DROPPING req from old connection %d < %d",
247 request->rq_reqmsg->conn_cnt,
248 request->rq_export->exp_conn_cnt);
252 request->rq_export->exp_last_request_time =
253 LTIME_S(CURRENT_TIME);
255 /* create a (hopefully temporary) connection that will be used
256 * to send the reply if this call doesn't create an export.
257 * XXX revisit this when we revamp ptlrpc */
258 request->rq_connection =
259 ptlrpc_get_connection(&request->rq_peer, NULL);
262 CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:ni:nid:opc "
263 "%s:%s+%d:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
264 (request->rq_export ?
265 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
266 (request->rq_export ?
267 atomic_read(&request->rq_export->exp_refcount) : -99),
268 request->rq_reqmsg->status, request->rq_xid,
269 rqbd->rqbd_srv_ni->sni_ni->pni_name, event->initiator.nid,
270 request->rq_reqmsg->opc);
272 rc = svc->srv_handler(request);
273 CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:ni:nid:opc "
274 "%s:%s+%d:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
275 (request->rq_export ?
276 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
277 (request->rq_export ?
278 atomic_read(&request->rq_export->exp_refcount) : -99),
279 request->rq_reqmsg->status, request->rq_xid,
280 rqbd->rqbd_srv_ni->sni_ni->pni_name, event->initiator.nid,
281 request->rq_reqmsg->opc);
284 ptlrpc_put_connection(request->rq_connection);
285 if (request->rq_export != NULL)
286 class_export_put(request->rq_export);
289 if (atomic_dec_and_test (&rqbd->rqbd_refcount)) /* last reference? */
290 ptlrpc_link_svc_me (rqbd);
295 /* Don't use daemonize, it removes fs struct from new thread (bug 418) */
296 void ptlrpc_daemonize(void)
299 lustre_daemonize_helper();
304 static long timeval_sub(struct timeval *large, struct timeval *small)
306 return (large->tv_sec - small->tv_sec) * 1000000 +
307 (large->tv_usec - small->tv_usec);
310 static int ptlrpc_main(void *arg)
312 struct ptlrpc_svc_data *data = arg;
313 struct obd_device *obddev = data->dev;
314 struct ptlrpc_service *svc = data->svc;
315 struct ptlrpc_thread *thread = data->thread;
316 struct ptlrpc_request *request;
319 struct timeval start_time, finish_time;
327 SIGNAL_MASK_LOCK(current, flags);
328 sigfillset(¤t->blocked);
330 SIGNAL_MASK_UNLOCK(current, flags);
332 THREAD_NAME(current->comm, "%s", data->name);
335 OBD_ALLOC(event, sizeof(*event));
337 GOTO(out, rc = -ENOMEM);
338 OBD_ALLOC(request, sizeof(*request));
340 GOTO(out_event, rc = -ENOMEM);
342 /* Record that the thread is running */
343 thread->t_flags = SVC_RUNNING;
344 wake_up(&thread->t_ctl_waitq);
346 /* XXX maintain a list of all managed devices: insert here */
348 do_gettimeofday(&finish_time);
349 /* And now, loop forever on requests */
351 struct l_wait_info lwi = { 0 };
352 l_wait_event_exclusive(svc->srv_waitq,
353 ptlrpc_check_event(svc, thread, event),
356 spin_lock(&svc->srv_lock);
357 if (thread->t_flags & SVC_STOPPING) {
358 thread->t_flags &= ~SVC_STOPPING;
359 spin_unlock(&svc->srv_lock);
365 if (!(thread->t_flags & SVC_EVENT)) {
366 CERROR("unknown flag in service");
367 spin_unlock(&svc->srv_lock);
373 thread->t_flags &= ~SVC_EVENT;
374 spin_unlock(&svc->srv_lock);
376 do_gettimeofday(&start_time);
377 total = timeval_sub(&start_time, &event->arrival_time);
378 if (svc->svc_stats != NULL) {
379 lprocfs_counter_add(svc->svc_stats, PTLRPC_REQWAIT_CNTR,
381 lprocfs_counter_add(svc->svc_stats,
382 PTLRPC_SVCIDLETIME_CNTR,
383 timeval_sub(&start_time,
385 #if 0 /* Wait for b_eq branch */
386 lprocfs_counter_add(svc->svc_stats,
387 PTLRPC_SVCEQDEPTH_CNTR, 0);
391 if (total / 1000000 > (long)obd_timeout) {
392 CERROR("Dropping request from NID "LPX64" because it's "
393 "%ld seconds old.\n", event->initiator.nid,
394 total / 1000000); /* bug 1502 */
396 CDEBUG(D_HA, "request from NID "LPX64" noticed after "
397 "%ldus\n", event->initiator.nid, total);
398 rc = handle_incoming_request(obddev, svc, event,
401 do_gettimeofday(&finish_time);
402 total = timeval_sub(&finish_time, &start_time);
404 CDEBUG((total / 1000000 > (long)obd_timeout) ? D_ERROR : D_HA,
405 "request "LPU64" from NID "LPX64" processed in %ldus "
406 "(%ldus total)\n", request->rq_xid, event->initiator.nid,
407 total, timeval_sub(&finish_time, &event->arrival_time));
409 if (svc->svc_stats != NULL) {
410 int opc = opcode_offset(request->rq_reqmsg->opc);
412 LASSERT(opc < LUSTRE_MAX_OPCODES);
413 lprocfs_counter_add(svc->svc_stats,
414 opc + PTLRPC_LAST_CNTR,
420 /* NB should wait for all SENT callbacks to complete before exiting
421 * here. Unfortunately at this time there is no way to track this
423 OBD_FREE(request, sizeof(*request));
425 OBD_FREE(event, sizeof(*event));
427 thread->t_flags = SVC_STOPPED;
428 wake_up(&thread->t_ctl_waitq);
430 CDEBUG(D_NET, "service thread exiting, process %d: rc = %d\n",
435 static void ptlrpc_stop_thread(struct ptlrpc_service *svc,
436 struct ptlrpc_thread *thread)
438 struct l_wait_info lwi = { 0 };
440 spin_lock(&svc->srv_lock);
441 thread->t_flags = SVC_STOPPING;
442 spin_unlock(&svc->srv_lock);
444 wake_up_all(&svc->srv_waitq);
445 l_wait_event(thread->t_ctl_waitq, (thread->t_flags & SVC_STOPPED),
449 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
451 spin_lock(&svc->srv_lock);
452 while (!list_empty(&svc->srv_threads)) {
453 struct ptlrpc_thread *thread;
454 thread = list_entry(svc->srv_threads.next, struct ptlrpc_thread,
456 spin_unlock(&svc->srv_lock);
457 ptlrpc_stop_thread(svc, thread);
458 spin_lock(&svc->srv_lock);
459 list_del(&thread->t_link);
460 OBD_FREE(thread, sizeof(*thread));
462 spin_unlock(&svc->srv_lock);
465 int ptlrpc_start_n_threads(struct obd_device *dev, struct ptlrpc_service *svc,
466 int num_threads, char *base_name)
471 for (i = 0; i < num_threads; i++) {
473 sprintf(name, "%s_%02d", base_name, i);
474 rc = ptlrpc_start_thread(dev, svc, name);
476 CERROR("cannot start %s thread #%d: rc %d\n", base_name,
478 ptlrpc_stop_all_threads(svc);
484 int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc,
487 struct l_wait_info lwi = { 0 };
488 struct ptlrpc_svc_data d;
489 struct ptlrpc_thread *thread;
493 OBD_ALLOC(thread, sizeof(*thread));
496 init_waitqueue_head(&thread->t_ctl_waitq);
503 spin_lock(&svc->srv_lock);
504 list_add(&thread->t_link, &svc->srv_threads);
505 spin_unlock(&svc->srv_lock);
507 /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
508 * just drop the VM and FILES in ptlrpc_daemonize() right away.
510 rc = kernel_thread(ptlrpc_main, &d, CLONE_VM | CLONE_FILES);
512 CERROR("cannot start thread: %d\n", rc);
513 OBD_FREE(thread, sizeof(*thread));
516 l_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_RUNNING, &lwi);
521 int ptlrpc_unregister_service(struct ptlrpc_service *service)
524 struct ptlrpc_srv_ni *srv_ni;
526 LASSERT (list_empty (&service->srv_threads));
528 /* XXX We could reply (with failure) to all buffered requests
529 * _after_ unlinking _all_ the request buffers, but _before_
533 for (i = 0; i < ptlrpc_ninterfaces; i++) {
534 srv_ni = &service->srv_interfaces[i];
535 CDEBUG (D_NET, "%s: tearing down interface %s\n",
536 service->srv_name, srv_ni->sni_ni->pni_name);
538 while (!list_empty (&srv_ni->sni_rqbds)) {
539 struct ptlrpc_request_buffer_desc *rqbd =
540 list_entry (srv_ni->sni_rqbds.next,
541 struct ptlrpc_request_buffer_desc,
544 list_del (&rqbd->rqbd_list);
546 LASSERT (atomic_read (&rqbd->rqbd_refcount) > 0);
547 /* refcount could be anything; it's possible for
548 * the buffers to continued to get filled after all
549 * the server threads exited. But we know they
553 (void) PtlMEUnlink(rqbd->rqbd_me_h);
554 /* The callback handler could have unlinked this ME
555 * already (we're racing with her) but it's safe to
556 * ensure it _has_ been unlinked.
559 OBD_FREE (rqbd->rqbd_buffer, service->srv_buf_size);
560 OBD_FREE (rqbd, sizeof (*rqbd));
561 srv_ni->sni_nrqbds--;
564 LASSERT (srv_ni->sni_nrqbds == 0);
566 if (!PtlHandleEqual (srv_ni->sni_eq_h, PTL_HANDLE_NONE)) {
567 rc = PtlEQFree(srv_ni->sni_eq_h);
569 CERROR("%s.%d: PtlEQFree failed on %s: %d\n",
570 service->srv_name, i,
571 srv_ni->sni_ni->pni_name, rc);
575 ptlrpc_lprocfs_unregister_service(service);
578 offsetof (struct ptlrpc_service,
579 srv_interfaces[ptlrpc_ninterfaces]));