1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_RPC
25 #include <liblustre.h>
26 #include <linux/kp30.h>
28 #include <linux/obd_support.h>
29 #include <linux/obd_class.h>
30 #include <linux/lustre_net.h>
32 extern int request_in_callback(ptl_event_t *ev);
34 static int ptlrpc_check_event(struct ptlrpc_service *svc,
35 struct ptlrpc_thread *thread, ptl_event_t *event)
37 struct ptlrpc_srv_ni *srv_ni;
43 spin_lock(&svc->srv_lock);
45 if (thread->t_flags & SVC_STOPPING)
48 LASSERT ((thread->t_flags & SVC_EVENT) == 0);
49 LASSERT (ptlrpc_ninterfaces > 0);
51 for (i = 0; i < ptlrpc_ninterfaces; i++) {
52 idx = (svc->srv_interface_rover + i) % ptlrpc_ninterfaces;
53 srv_ni = &svc->srv_interfaces[idx];
55 LASSERT (ptl_is_valid_handle (&srv_ni->sni_eq_h));
57 rc = PtlEQGet(srv_ni->sni_eq_h, event);
61 /* next time start with the next interface */
62 svc->srv_interface_rover = (idx+1) % ptlrpc_ninterfaces;
63 thread->t_flags |= SVC_EVENT;
70 CERROR("BUG: PtlEQGet returned %d\n", rc);
76 spin_unlock(&svc->srv_lock);
80 struct ptlrpc_service *
81 ptlrpc_init_svc(__u32 nevents, __u32 nbufs,
82 __u32 bufsize, __u32 max_req_size,
83 int req_portal, int rep_portal,
84 svc_handler_t handler, char *name)
90 struct ptlrpc_service *service;
91 struct ptlrpc_srv_ni *srv_ni;
94 LASSERT (ptlrpc_ninterfaces > 0);
96 ssize = offsetof (struct ptlrpc_service,
97 srv_interfaces[ptlrpc_ninterfaces]);
98 OBD_ALLOC(service, ssize);
102 service->srv_name = name;
103 spin_lock_init(&service->srv_lock);
104 INIT_LIST_HEAD(&service->srv_threads);
105 init_waitqueue_head(&service->srv_waitq);
107 service->srv_max_req_size = max_req_size;
108 service->srv_buf_size = bufsize;
110 service->srv_rep_portal = rep_portal;
111 service->srv_req_portal = req_portal;
112 service->srv_handler = handler;
113 service->srv_interface_rover = 0;
115 /* First initialise enough for early teardown */
116 for (i = 0; i < ptlrpc_ninterfaces; i++) {
117 srv_ni = &service->srv_interfaces[i];
119 srv_ni->sni_service = service;
120 srv_ni->sni_ni = &ptlrpc_interfaces[i];
121 ptl_set_inv_handle (&srv_ni->sni_eq_h);
122 INIT_LIST_HEAD(&srv_ni->sni_rqbds);
123 srv_ni->sni_nrqbds = 0;
124 atomic_set(&srv_ni->sni_nrqbds_receiving, 0);
127 /* Now allocate the event queue and request buffers, assuming all
128 * interfaces require the same level of buffering. */
129 for (i = 0; i < ptlrpc_ninterfaces; i++) {
130 srv_ni = &service->srv_interfaces[i];
131 CDEBUG (D_NET, "%s: initialising interface %s\n", name,
132 srv_ni->sni_ni->pni_name);
134 rc = PtlEQAlloc(srv_ni->sni_ni->pni_ni_h, nevents,
135 request_in_callback, &(srv_ni->sni_eq_h));
137 CERROR("%s.%d: PtlEQAlloc on %s failed: %d\n",
138 name, i, srv_ni->sni_ni->pni_name, rc);
142 for (j = 0; j < nbufs; j++) {
143 struct ptlrpc_request_buffer_desc *rqbd;
145 OBD_ALLOC(rqbd, sizeof(*rqbd));
147 CERROR ("%s.%d: Can't allocate request "
148 "descriptor %d on %s\n",
149 name, i, srv_ni->sni_nrqbds,
150 srv_ni->sni_ni->pni_name);
154 rqbd->rqbd_srv_ni = srv_ni;
155 ptl_set_inv_handle(&rqbd->rqbd_me_h);
156 atomic_set(&rqbd->rqbd_refcount, 0);
158 OBD_ALLOC(rqbd->rqbd_buffer, service->srv_buf_size);
159 if (rqbd->rqbd_buffer == NULL) {
160 CERROR ("%s.%d: Can't allocate request "
162 name, i, srv_ni->sni_nrqbds,
163 srv_ni->sni_ni->pni_name);
164 OBD_FREE(rqbd, sizeof(*rqbd));
167 list_add(&rqbd->rqbd_list, &srv_ni->sni_rqbds);
168 srv_ni->sni_nrqbds++;
170 ptlrpc_link_svc_me(rqbd);
174 CDEBUG(D_NET, "%s: Started on %d interfaces, listening on portal %d\n",
175 service->srv_name, ptlrpc_ninterfaces, service->srv_req_portal);
179 ptlrpc_unregister_service(service);
183 static int handle_incoming_request(struct obd_device *obddev,
184 struct ptlrpc_service *svc,
186 struct ptlrpc_request *request)
188 struct ptlrpc_request_buffer_desc *rqbd = event->mem_desc.user_ptr;
191 /* FIXME: If we move to an event-driven model, we should put the request
192 * on the stack of mds_handle instead. */
194 LASSERT (atomic_read (&rqbd->rqbd_refcount) > 0);
195 LASSERT ((event->mem_desc.options & PTL_MD_IOV) == 0);
196 LASSERT (rqbd->rqbd_srv_ni->sni_service == svc);
197 LASSERT (rqbd->rqbd_buffer == event->mem_desc.start);
198 LASSERT (event->offset + event->mlength <= svc->srv_buf_size);
200 memset(request, 0, sizeof(*request));
201 INIT_LIST_HEAD(&request->rq_list);
202 request->rq_svc = svc;
203 request->rq_obd = obddev;
204 request->rq_xid = event->match_bits;
205 request->rq_reqmsg = event->mem_desc.start + event->offset;
206 request->rq_reqlen = event->mlength;
210 if (request->rq_reqlen < sizeof(struct lustre_msg)) {
211 CERROR("incomplete request (%d): ptl %d from "LPX64" xid "
213 request->rq_reqlen, svc->srv_req_portal,
214 event->initiator.nid, request->rq_xid);
218 CDEBUG(D_RPCTRACE, "Handling RPC ni:pid:xid:nid:opc %d:%d:"LPU64":"
219 LPX64":%d\n", rqbd->rqbd_srv_ni - &svc->srv_interfaces[0],
220 NTOH__u32(request->rq_reqmsg->status), request->rq_xid,
221 event->initiator.nid, NTOH__u32(request->rq_reqmsg->opc));
223 if (NTOH__u32(request->rq_reqmsg->type) != PTL_RPC_MSG_REQUEST) {
224 CERROR("wrong packet type received (type=%u)\n",
225 request->rq_reqmsg->type);
229 if (request->rq_reqmsg->magic != PTLRPC_MSG_MAGIC) {
230 CERROR("wrong lustre_msg magic %d: ptl %d from "LPX64" xid "
232 request->rq_reqmsg->magic, svc->srv_req_portal,
233 event->initiator.nid, request->rq_xid);
237 if (request->rq_reqmsg->version != PTLRPC_MSG_VERSION) {
238 CERROR("wrong lustre_msg version %d: ptl %d from "LPX64" xid "
240 request->rq_reqmsg->version, svc->srv_req_portal,
241 event->initiator.nid, request->rq_xid);
245 CDEBUG(D_NET, "got req "LPD64" (md: %p + %d)\n", request->rq_xid,
246 event->mem_desc.start, event->offset);
248 request->rq_peer.peer_nid = event->initiator.nid;
249 request->rq_peer.peer_ni = rqbd->rqbd_srv_ni->sni_ni;
251 request->rq_export = class_conn2export((struct lustre_handle *)
254 if (request->rq_export) {
255 request->rq_connection = request->rq_export->exp_connection;
256 ptlrpc_connection_addref(request->rq_connection);
258 /* create a (hopefully temporary) connection that will be used
259 * to send the reply if this call doesn't create an export.
260 * XXX revisit this when we revamp ptlrpc */
261 request->rq_connection =
262 ptlrpc_get_connection(&request->rq_peer, NULL);
265 rc = svc->srv_handler(request);
266 ptlrpc_put_connection(request->rq_connection);
269 if (atomic_dec_and_test (&rqbd->rqbd_refcount)) /* last reference? */
270 ptlrpc_link_svc_me (rqbd);
275 /* Don't use daemonize, it removes fs struct from new thread (bug 418) */
276 static void ptlrpc_daemonize(void)
280 current->session = 1;
288 static int ptlrpc_main(void *arg)
290 struct ptlrpc_svc_data *data = (struct ptlrpc_svc_data *)arg;
291 struct obd_device *obddev = data->dev;
292 struct ptlrpc_service *svc = data->svc;
293 struct ptlrpc_thread *thread = data->thread;
294 struct ptlrpc_request *request;
303 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
304 sigfillset(¤t->blocked);
307 spin_lock_irqsave(¤t->sigmask_lock, flags);
308 sigfillset(¤t->blocked);
309 recalc_sigpending(current);
310 spin_unlock_irqrestore(¤t->sigmask_lock, flags);
314 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
315 sprintf(current->comm, "%s|%d", data->name,current->thread.extern_pid);
318 strcpy(current->comm, data->name);
322 OBD_ALLOC(event, sizeof(*event));
324 GOTO(out, rc = -ENOMEM);
325 OBD_ALLOC(request, sizeof(*request));
327 GOTO(out_event, rc = -ENOMEM);
329 /* Record that the thread is running */
330 thread->t_flags = SVC_RUNNING;
331 wake_up(&thread->t_ctl_waitq);
333 /* XXX maintain a list of all managed devices: insert here */
335 /* And now, loop forever on requests */
337 wait_event(svc->srv_waitq,
338 ptlrpc_check_event(svc, thread, event));
340 if (thread->t_flags & SVC_STOPPING) {
341 spin_lock(&svc->srv_lock);
342 thread->t_flags &= ~SVC_STOPPING;
343 spin_unlock(&svc->srv_lock);
349 if (thread->t_flags & SVC_EVENT) {
350 spin_lock(&svc->srv_lock);
351 thread->t_flags &= ~SVC_EVENT;
352 spin_unlock(&svc->srv_lock);
354 rc = handle_incoming_request(obddev, svc, event,
359 CERROR("unknown break in service");
365 OBD_FREE(request, sizeof(*request));
367 OBD_FREE(event, sizeof(*event));
369 thread->t_flags = SVC_STOPPED;
370 wake_up(&thread->t_ctl_waitq);
372 CDEBUG(D_NET, "service thread exiting, process %d: rc = %d\n",
377 static void ptlrpc_stop_thread(struct ptlrpc_service *svc,
378 struct ptlrpc_thread *thread)
380 spin_lock(&svc->srv_lock);
381 thread->t_flags = SVC_STOPPING;
382 spin_unlock(&svc->srv_lock);
384 wake_up(&svc->srv_waitq);
385 wait_event(thread->t_ctl_waitq, (thread->t_flags & SVC_STOPPED));
388 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
390 spin_lock(&svc->srv_lock);
391 while (!list_empty(&svc->srv_threads)) {
392 struct ptlrpc_thread *thread;
393 thread = list_entry(svc->srv_threads.next, struct ptlrpc_thread,
395 spin_unlock(&svc->srv_lock);
396 ptlrpc_stop_thread(svc, thread);
397 spin_lock(&svc->srv_lock);
398 list_del(&thread->t_link);
399 OBD_FREE(thread, sizeof(*thread));
401 spin_unlock(&svc->srv_lock);
404 int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc,
407 struct ptlrpc_svc_data d;
408 struct ptlrpc_thread *thread;
412 OBD_ALLOC(thread, sizeof(*thread));
413 if (thread == NULL) {
417 init_waitqueue_head(&thread->t_ctl_waitq);
424 spin_lock(&svc->srv_lock);
425 list_add(&thread->t_link, &svc->srv_threads);
426 spin_unlock(&svc->srv_lock);
428 /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
429 * just drop the VM and FILES in ptlrpc_daemonize() right away.
431 rc = kernel_thread(ptlrpc_main, (void *) &d, CLONE_VM | CLONE_FILES);
433 CERROR("cannot start thread\n");
434 OBD_FREE(thread, sizeof(*thread));
437 wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_RUNNING);
442 int ptlrpc_unregister_service(struct ptlrpc_service *service)
446 struct ptlrpc_srv_ni *srv_ni;
448 LASSERT (list_empty (&service->srv_threads));
450 /* XXX We could reply (with failure) to all buffered requests
451 * _after_ unlinking _all_ the request buffers, but _before_
455 for (i = 0; i < ptlrpc_ninterfaces; i++) {
456 srv_ni = &service->srv_interfaces[i];
457 CDEBUG (D_NET, "%s: tearing down interface %s\n",
458 service->srv_name, srv_ni->sni_ni->pni_name);
460 while (!list_empty (&srv_ni->sni_rqbds)) {
461 struct ptlrpc_request_buffer_desc *rqbd =
462 list_entry (srv_ni->sni_rqbds.next,
463 struct ptlrpc_request_buffer_desc,
466 list_del (&rqbd->rqbd_list);
468 LASSERT (atomic_read (&rqbd->rqbd_refcount) > 0);
469 /* refcount could be anything; it's possible for
470 * the buffers to continued to get filled after all
471 * the server threads exited. But we know they
475 (void) PtlMEUnlink(rqbd->rqbd_me_h);
476 /* The callback handler could have unlinked this ME
477 * already (we're racing with her) but it's safe to
478 * ensure it _has_ been unlinked.
481 OBD_FREE (rqbd->rqbd_buffer, service->srv_buf_size);
482 OBD_FREE (rqbd, sizeof (*rqbd));
483 srv_ni->sni_nrqbds--;
486 LASSERT (srv_ni->sni_nrqbds == 0);
488 if (ptl_is_valid_handle (&srv_ni->sni_eq_h)) {
489 rc = PtlEQFree(srv_ni->sni_eq_h);
491 CERROR("%s.%d: PtlEQFree failed on %s: %d\n",
492 service->srv_name, i,
493 srv_ni->sni_ni->pni_name, rc);
498 offsetof (struct ptlrpc_service,
499 srv_interfaces[ptlrpc_ninterfaces]));