1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_RPC
25 #include <liblustre.h>
26 #include <linux/kp30.h>
28 #include <linux/obd_support.h>
29 #include <linux/obd_class.h>
30 #include <linux/lustre_net.h>
31 #include <portals/types.h>
32 #include "ptlrpc_internal.h"
34 extern int request_in_callback(ptl_event_t *ev);
36 static int ptlrpc_check_event(struct ptlrpc_service *svc,
37 struct ptlrpc_thread *thread, ptl_event_t *event)
39 struct ptlrpc_srv_ni *srv_ni;
43 spin_lock(&svc->srv_lock);
45 if (thread->t_flags & SVC_STOPPING)
48 LASSERT ((thread->t_flags & SVC_EVENT) == 0);
49 LASSERT (ptlrpc_ninterfaces > 0);
51 for (i = 0; i < ptlrpc_ninterfaces; i++) {
52 idx = (svc->srv_interface_rover + i) % ptlrpc_ninterfaces;
53 srv_ni = &svc->srv_interfaces[idx];
55 LASSERT (!PtlHandleEqual (srv_ni->sni_eq_h, PTL_HANDLE_NONE));
57 rc = PtlEQGet(srv_ni->sni_eq_h, event);
60 /* next time start with the next interface */
61 svc->srv_interface_rover = (idx+1) % ptlrpc_ninterfaces;
62 thread->t_flags |= SVC_EVENT;
69 CWARN("Event queue overflow (bug 2125): timeouts will "
74 CERROR("BUG: PtlEQGet returned %d\n", rc);
81 spin_unlock(&svc->srv_lock);
85 struct ptlrpc_service * ptlrpc_init_svc(__u32 nevents, __u32 nbufs,
86 __u32 bufsize, __u32 max_req_size,
87 int req_portal, int rep_portal,
88 svc_handler_t handler, char *name,
89 struct proc_dir_entry *proc_entry)
92 struct ptlrpc_service *service;
93 struct ptlrpc_srv_ni *srv_ni;
96 LASSERT (ptlrpc_ninterfaces > 0);
98 ssize = offsetof (struct ptlrpc_service,
99 srv_interfaces[ptlrpc_ninterfaces]);
100 OBD_ALLOC(service, ssize);
104 service->srv_name = name;
105 spin_lock_init(&service->srv_lock);
106 INIT_LIST_HEAD(&service->srv_threads);
107 init_waitqueue_head(&service->srv_waitq);
109 service->srv_max_req_size = max_req_size;
110 service->srv_buf_size = bufsize;
112 service->srv_rep_portal = rep_portal;
113 service->srv_req_portal = req_portal;
114 service->srv_handler = handler;
115 service->srv_interface_rover = 0;
117 /* First initialise enough for early teardown */
118 for (i = 0; i < ptlrpc_ninterfaces; i++) {
119 srv_ni = &service->srv_interfaces[i];
121 srv_ni->sni_service = service;
122 srv_ni->sni_ni = &ptlrpc_interfaces[i];
123 srv_ni->sni_eq_h = PTL_HANDLE_NONE;
124 INIT_LIST_HEAD(&srv_ni->sni_rqbds);
125 srv_ni->sni_nrqbds = 0;
126 atomic_set(&srv_ni->sni_nrqbds_receiving, 0);
129 /* Now allocate the event queue and request buffers, assuming all
130 * interfaces require the same level of buffering. */
131 for (i = 0; i < ptlrpc_ninterfaces; i++) {
132 srv_ni = &service->srv_interfaces[i];
133 CDEBUG (D_NET, "%s: initialising interface %s\n", name,
134 srv_ni->sni_ni->pni_name);
136 rc = PtlEQAlloc(srv_ni->sni_ni->pni_ni_h, nevents,
137 request_in_callback, &(srv_ni->sni_eq_h));
139 CERROR("%s.%d: PtlEQAlloc on %s failed: %d\n",
140 name, i, srv_ni->sni_ni->pni_name, rc);
144 for (j = 0; j < nbufs; j++) {
145 struct ptlrpc_request_buffer_desc *rqbd;
147 OBD_ALLOC_WAIT(rqbd, sizeof(*rqbd));
149 CERROR ("%s.%d: Can't allocate request "
150 "descriptor %d on %s\n",
151 name, i, srv_ni->sni_nrqbds,
152 srv_ni->sni_ni->pni_name);
156 rqbd->rqbd_srv_ni = srv_ni;
157 rqbd->rqbd_me_h = PTL_HANDLE_NONE;
158 atomic_set(&rqbd->rqbd_refcount, 0);
160 OBD_ALLOC_WAIT(rqbd->rqbd_buffer, service->srv_buf_size);
161 if (rqbd->rqbd_buffer == NULL) {
162 CERROR ("%s.%d: Can't allocate request "
164 name, i, srv_ni->sni_nrqbds,
165 srv_ni->sni_ni->pni_name);
166 OBD_FREE(rqbd, sizeof(*rqbd));
169 list_add(&rqbd->rqbd_list, &srv_ni->sni_rqbds);
170 srv_ni->sni_nrqbds++;
172 ptlrpc_link_svc_me(rqbd);
176 if (proc_entry != NULL)
177 ptlrpc_lprocfs_register_service(proc_entry, service);
179 CDEBUG(D_NET, "%s: Started on %d interfaces, listening on portal %d\n",
180 service->srv_name, ptlrpc_ninterfaces, service->srv_req_portal);
184 ptlrpc_unregister_service(service);
188 static int handle_incoming_request(struct obd_device *obddev,
189 struct ptlrpc_service *svc,
191 struct ptlrpc_request *request)
193 struct ptlrpc_request_buffer_desc *rqbd = event->mem_desc.user_ptr;
196 /* FIXME: If we move to an event-driven model, we should put the request
197 * on the stack of mds_handle instead. */
199 LASSERT (atomic_read (&rqbd->rqbd_refcount) > 0);
200 LASSERT ((event->mem_desc.options & (PTL_MD_IOV | PTL_MD_KIOV)) == 0);
201 LASSERT (rqbd->rqbd_srv_ni->sni_service == svc);
202 LASSERT (rqbd->rqbd_buffer == event->mem_desc.start);
203 LASSERT (event->offset + event->mlength <= svc->srv_buf_size);
205 memset(request, 0, sizeof(*request));
206 spin_lock_init (&request->rq_lock);
207 INIT_LIST_HEAD(&request->rq_list);
208 request->rq_svc = svc;
209 request->rq_xid = event->match_bits;
210 request->rq_reqmsg = event->mem_desc.start + event->offset;
211 request->rq_reqlen = event->mlength;
214 /* Clear request swab mask; this is a new request */
215 request->rq_req_swab_mask = 0;
217 rc = lustre_unpack_msg (request->rq_reqmsg, request->rq_reqlen);
219 CERROR ("error unpacking request: ptl %d from "LPX64
220 " xid "LPU64"\n", svc->srv_req_portal,
221 event->initiator.nid, request->rq_xid);
225 if (request->rq_reqmsg->type != PTL_RPC_MSG_REQUEST) {
226 CERROR("wrong packet type received (type=%u)\n",
227 request->rq_reqmsg->type);
231 CDEBUG(D_NET, "got req "LPD64" (md: %p + %d)\n", request->rq_xid,
232 event->mem_desc.start, event->offset);
234 request->rq_peer.peer_nid = event->initiator.nid;
235 request->rq_peer.peer_ni = rqbd->rqbd_srv_ni->sni_ni;
237 request->rq_export = class_conn2export(&request->rq_reqmsg->handle);
239 if (request->rq_export) {
240 request->rq_connection = request->rq_export->exp_connection;
241 ptlrpc_connection_addref(request->rq_connection);
242 if (request->rq_reqmsg->conn_cnt <
243 request->rq_export->exp_conn_cnt) {
244 DEBUG_REQ(D_ERROR, request,
245 "DROPPING req from old connection %d < %d",
246 request->rq_reqmsg->conn_cnt,
247 request->rq_export->exp_conn_cnt);
251 request->rq_export->exp_last_request_time =
252 LTIME_S(CURRENT_TIME);
254 /* create a (hopefully temporary) connection that will be used
255 * to send the reply if this call doesn't create an export.
256 * XXX revisit this when we revamp ptlrpc */
257 request->rq_connection =
258 ptlrpc_get_connection(&request->rq_peer, NULL);
261 CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:ni:nid:opc "
262 "%s:%s+%d:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
263 (request->rq_export ?
264 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
265 (request->rq_export ?
266 atomic_read(&request->rq_export->exp_refcount) : -99),
267 request->rq_reqmsg->status, request->rq_xid,
268 rqbd->rqbd_srv_ni->sni_ni->pni_name, event->initiator.nid,
269 request->rq_reqmsg->opc);
271 rc = svc->srv_handler(request);
272 CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:ni:nid:opc "
273 "%s:%s+%d:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
274 (request->rq_export ?
275 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
276 (request->rq_export ?
277 atomic_read(&request->rq_export->exp_refcount) : -99),
278 request->rq_reqmsg->status, request->rq_xid,
279 rqbd->rqbd_srv_ni->sni_ni->pni_name, event->initiator.nid,
280 request->rq_reqmsg->opc);
283 ptlrpc_put_connection(request->rq_connection);
284 if (request->rq_export != NULL)
285 class_export_put(request->rq_export);
288 if (atomic_dec_and_test (&rqbd->rqbd_refcount)) /* last reference? */
289 ptlrpc_link_svc_me (rqbd);
294 /* Don't use daemonize, it removes fs struct from new thread (bug 418) */
295 void ptlrpc_daemonize(void)
298 lustre_daemonize_helper();
303 static long timeval_sub(struct timeval *large, struct timeval *small)
305 return (large->tv_sec - small->tv_sec) * 1000000 +
306 (large->tv_usec - small->tv_usec);
309 static int ptlrpc_main(void *arg)
311 struct ptlrpc_svc_data *data = arg;
312 struct obd_device *obddev = data->dev;
313 struct ptlrpc_service *svc = data->svc;
314 struct ptlrpc_thread *thread = data->thread;
315 struct ptlrpc_request *request;
318 struct timeval start_time, finish_time;
326 SIGNAL_MASK_LOCK(current, flags);
327 sigfillset(¤t->blocked);
329 SIGNAL_MASK_UNLOCK(current, flags);
331 THREAD_NAME(current->comm, "%s", data->name);
334 OBD_ALLOC(event, sizeof(*event));
336 GOTO(out, rc = -ENOMEM);
337 OBD_ALLOC(request, sizeof(*request));
339 GOTO(out_event, rc = -ENOMEM);
341 /* Record that the thread is running */
342 thread->t_flags = SVC_RUNNING;
343 wake_up(&thread->t_ctl_waitq);
345 /* XXX maintain a list of all managed devices: insert here */
347 do_gettimeofday(&finish_time);
348 /* And now, loop forever on requests */
350 struct l_wait_info lwi = { 0 };
351 l_wait_event_exclusive(svc->srv_waitq,
352 ptlrpc_check_event(svc, thread, event),
355 spin_lock(&svc->srv_lock);
356 if (thread->t_flags & SVC_STOPPING) {
357 thread->t_flags &= ~SVC_STOPPING;
358 spin_unlock(&svc->srv_lock);
364 if (!(thread->t_flags & SVC_EVENT)) {
365 CERROR("unknown flag in service");
366 spin_unlock(&svc->srv_lock);
372 thread->t_flags &= ~SVC_EVENT;
373 spin_unlock(&svc->srv_lock);
375 do_gettimeofday(&start_time);
376 total = timeval_sub(&start_time, &event->arrival_time);
377 if (svc->srv_stats != NULL) {
378 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
380 lprocfs_counter_add(svc->srv_stats,
381 PTLRPC_SVCIDLETIME_CNTR,
382 timeval_sub(&start_time,
384 #if 0 /* Wait for b_eq branch */
385 lprocfs_counter_add(svc->srv_stats,
386 PTLRPC_SVCEQDEPTH_CNTR, 0);
390 if (total / 1000000 > (long)obd_timeout) {
391 CERROR("Dropping request from NID "LPX64" because it's "
392 "%ld seconds old.\n", event->initiator.nid,
393 total / 1000000); /* bug 1502 */
395 CDEBUG(D_HA, "request from NID "LPX64" noticed after "
396 "%ldus\n", event->initiator.nid, total);
397 rc = handle_incoming_request(obddev, svc, event,
400 do_gettimeofday(&finish_time);
401 total = timeval_sub(&finish_time, &start_time);
403 CDEBUG((total / 1000000 > (long)obd_timeout) ? D_ERROR : D_HA,
404 "request "LPU64" from NID "LPX64" processed in %ldus "
405 "(%ldus total)\n", request->rq_xid, event->initiator.nid,
406 total, timeval_sub(&finish_time, &event->arrival_time));
408 if (svc->srv_stats != NULL) {
409 int opc = opcode_offset(request->rq_reqmsg->opc);
411 LASSERT(opc < LUSTRE_MAX_OPCODES);
412 lprocfs_counter_add(svc->srv_stats,
413 opc + PTLRPC_LAST_CNTR,
419 /* NB should wait for all SENT callbacks to complete before exiting
420 * here. Unfortunately at this time there is no way to track this
422 OBD_FREE(request, sizeof(*request));
424 OBD_FREE(event, sizeof(*event));
426 thread->t_flags = SVC_STOPPED;
427 wake_up(&thread->t_ctl_waitq);
429 CDEBUG(D_NET, "service thread exiting, process %d: rc = %d\n",
434 static void ptlrpc_stop_thread(struct ptlrpc_service *svc,
435 struct ptlrpc_thread *thread)
437 struct l_wait_info lwi = { 0 };
439 spin_lock(&svc->srv_lock);
440 thread->t_flags = SVC_STOPPING;
441 spin_unlock(&svc->srv_lock);
443 wake_up_all(&svc->srv_waitq);
444 l_wait_event(thread->t_ctl_waitq, (thread->t_flags & SVC_STOPPED),
448 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
450 spin_lock(&svc->srv_lock);
451 while (!list_empty(&svc->srv_threads)) {
452 struct ptlrpc_thread *thread;
453 thread = list_entry(svc->srv_threads.next, struct ptlrpc_thread,
455 spin_unlock(&svc->srv_lock);
456 ptlrpc_stop_thread(svc, thread);
457 spin_lock(&svc->srv_lock);
458 list_del(&thread->t_link);
459 OBD_FREE(thread, sizeof(*thread));
461 spin_unlock(&svc->srv_lock);
464 int ptlrpc_start_n_threads(struct obd_device *dev, struct ptlrpc_service *svc,
465 int num_threads, char *base_name)
470 for (i = 0; i < num_threads; i++) {
472 sprintf(name, "%s_%02d", base_name, i);
473 rc = ptlrpc_start_thread(dev, svc, name);
475 CERROR("cannot start %s thread #%d: rc %d\n", base_name,
477 ptlrpc_stop_all_threads(svc);
483 int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc,
486 struct l_wait_info lwi = { 0 };
487 struct ptlrpc_svc_data d;
488 struct ptlrpc_thread *thread;
492 OBD_ALLOC(thread, sizeof(*thread));
495 init_waitqueue_head(&thread->t_ctl_waitq);
502 spin_lock(&svc->srv_lock);
503 list_add(&thread->t_link, &svc->srv_threads);
504 spin_unlock(&svc->srv_lock);
506 /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
507 * just drop the VM and FILES in ptlrpc_daemonize() right away.
509 rc = kernel_thread(ptlrpc_main, &d, CLONE_VM | CLONE_FILES);
511 CERROR("cannot start thread: %d\n", rc);
512 OBD_FREE(thread, sizeof(*thread));
515 l_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_RUNNING, &lwi);
520 int ptlrpc_unregister_service(struct ptlrpc_service *service)
523 struct ptlrpc_srv_ni *srv_ni;
525 LASSERT (list_empty (&service->srv_threads));
527 /* XXX We could reply (with failure) to all buffered requests
528 * _after_ unlinking _all_ the request buffers, but _before_
532 for (i = 0; i < ptlrpc_ninterfaces; i++) {
533 srv_ni = &service->srv_interfaces[i];
534 CDEBUG (D_NET, "%s: tearing down interface %s\n",
535 service->srv_name, srv_ni->sni_ni->pni_name);
537 while (!list_empty (&srv_ni->sni_rqbds)) {
538 struct ptlrpc_request_buffer_desc *rqbd =
539 list_entry (srv_ni->sni_rqbds.next,
540 struct ptlrpc_request_buffer_desc,
543 list_del (&rqbd->rqbd_list);
545 LASSERT (atomic_read (&rqbd->rqbd_refcount) > 0);
546 /* refcount could be anything; it's possible for
547 * the buffers to continued to get filled after all
548 * the server threads exited. But we know they
552 (void) PtlMEUnlink(rqbd->rqbd_me_h);
553 /* The callback handler could have unlinked this ME
554 * already (we're racing with her) but it's safe to
555 * ensure it _has_ been unlinked.
558 OBD_FREE (rqbd->rqbd_buffer, service->srv_buf_size);
559 OBD_FREE (rqbd, sizeof (*rqbd));
560 srv_ni->sni_nrqbds--;
563 LASSERT (srv_ni->sni_nrqbds == 0);
565 if (!PtlHandleEqual (srv_ni->sni_eq_h, PTL_HANDLE_NONE)) {
566 rc = PtlEQFree(srv_ni->sni_eq_h);
568 CERROR("%s.%d: PtlEQFree failed on %s: %d\n",
569 service->srv_name, i,
570 srv_ni->sni_ni->pni_name, rc);
574 ptlrpc_lprocfs_unregister_service(service);
577 offsetof (struct ptlrpc_service,
578 srv_interfaces[ptlrpc_ninterfaces]));