1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_RPC
25 #include <liblustre.h>
26 #include <linux/kp30.h>
28 #include <linux/obd_support.h>
29 #include <linux/obd_class.h>
30 #include <linux/lustre_net.h>
31 #include <portals/types.h>
32 #include "ptlrpc_internal.h"
35 static int ptlrpc_server_post_idle_rqbds (struct ptlrpc_service *svc);
37 static LIST_HEAD (ptlrpc_all_services);
38 static spinlock_t ptlrpc_all_services_lock = SPIN_LOCK_UNLOCKED;
41 ptlrpc_free_server_req (struct ptlrpc_request *req)
43 /* The last request to be received into a request buffer uses space
44 * in the request buffer descriptor, otherwise requests are
45 * allocated dynamically in the incoming reply event handler */
46 if (req == &req->rq_rqbd->rqbd_req)
49 OBD_FREE(req, sizeof(*req));
53 ptlrpc_alloc_request_buffer (int size)
57 if (size > SVC_BUF_VMALLOC_THRESHOLD)
58 OBD_VMALLOC(ptr, size);
66 ptlrpc_free_request_buffer (char *ptr, int size)
68 if (size > SVC_BUF_VMALLOC_THRESHOLD)
74 struct ptlrpc_request_buffer_desc *
75 ptlrpc_alloc_rqbd (struct ptlrpc_srv_ni *srv_ni)
77 struct ptlrpc_service *svc = srv_ni->sni_service;
79 struct ptlrpc_request_buffer_desc *rqbd;
81 OBD_ALLOC(rqbd, sizeof (*rqbd));
85 rqbd->rqbd_srv_ni = srv_ni;
86 rqbd->rqbd_refcount = 0;
87 rqbd->rqbd_cbid.cbid_fn = request_in_callback;
88 rqbd->rqbd_cbid.cbid_arg = rqbd;
89 rqbd->rqbd_buffer = ptlrpc_alloc_request_buffer(svc->srv_buf_size);
91 if (rqbd->rqbd_buffer == NULL) {
92 OBD_FREE(rqbd, sizeof (*rqbd));
96 spin_lock_irqsave (&svc->srv_lock, flags);
97 list_add(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
99 spin_unlock_irqrestore (&svc->srv_lock, flags);
105 ptlrpc_free_rqbd (struct ptlrpc_request_buffer_desc *rqbd)
107 struct ptlrpc_srv_ni *sni = rqbd->rqbd_srv_ni;
108 struct ptlrpc_service *svc = sni->sni_service;
111 LASSERT (rqbd->rqbd_refcount == 0);
113 spin_lock_irqsave(&svc->srv_lock, flags);
114 list_del(&rqbd->rqbd_list);
116 spin_unlock_irqrestore(&svc->srv_lock, flags);
118 ptlrpc_free_request_buffer (rqbd->rqbd_buffer, svc->srv_buf_size);
119 OBD_FREE (rqbd, sizeof (*rqbd));
123 ptlrpc_grow_req_bufs(struct ptlrpc_srv_ni *srv_ni)
125 struct ptlrpc_service *svc = srv_ni->sni_service;
126 struct ptlrpc_request_buffer_desc *rqbd;
129 for (i = 0; i < svc->srv_nbuf_per_group; i++) {
130 rqbd = ptlrpc_alloc_rqbd(srv_ni);
133 CERROR ("%s/%s: Can't allocate request buffer\n",
134 svc->srv_name, srv_ni->sni_ni->pni_name);
138 if (ptlrpc_server_post_idle_rqbds(svc) < 0)
146 ptlrpc_save_lock (struct ptlrpc_request *req,
147 struct lustre_handle *lock, int mode)
149 struct ptlrpc_reply_state *rs = req->rq_reply_state;
152 LASSERT (rs != NULL);
153 LASSERT (rs->rs_nlocks < RS_MAX_LOCKS);
155 idx = rs->rs_nlocks++;
156 rs->rs_locks[idx] = *lock;
157 rs->rs_modes[idx] = mode;
158 rs->rs_difficult = 1;
162 ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs)
164 struct ptlrpc_service *svc = rs->rs_srv_ni->sni_service;
167 LASSERT (spin_is_locked (&svc->srv_lock));
169 LASSERT (rs->rs_difficult);
170 rs->rs_scheduled_ever = 1; /* flag any notification attempt */
172 if (rs->rs_scheduled) /* being set up or already notified */
175 rs->rs_scheduled = 1;
176 list_del (&rs->rs_list);
177 list_add (&rs->rs_list, &svc->srv_reply_queue);
178 wake_up (&svc->srv_waitq);
182 ptlrpc_commit_replies (struct obd_device *obd)
184 struct list_head *tmp;
185 struct list_head *nxt;
188 /* Find any replies that have been committed and get their service
189 * to attend to complete them. */
191 /* CAVEAT EMPTOR: spinlock ordering!!! */
192 spin_lock_irqsave (&obd->obd_uncommitted_replies_lock, flags);
194 list_for_each_safe (tmp, nxt, &obd->obd_uncommitted_replies) {
195 struct ptlrpc_reply_state *rs =
196 list_entry (tmp, struct ptlrpc_reply_state, rs_obd_list);
198 LASSERT (rs->rs_difficult);
200 if (rs->rs_transno <= obd->obd_last_committed) {
201 struct ptlrpc_service *svc = rs->rs_srv_ni->sni_service;
203 spin_lock (&svc->srv_lock);
204 list_del_init (&rs->rs_obd_list);
205 ptlrpc_schedule_difficult_reply (rs);
206 spin_unlock (&svc->srv_lock);
210 spin_unlock_irqrestore (&obd->obd_uncommitted_replies_lock, flags);
214 timeval_sub(struct timeval *large, struct timeval *small)
216 return (large->tv_sec - small->tv_sec) * 1000000 +
217 (large->tv_usec - small->tv_usec);
221 ptlrpc_server_post_idle_rqbds (struct ptlrpc_service *svc)
223 struct ptlrpc_srv_ni *srv_ni;
224 struct ptlrpc_request_buffer_desc *rqbd;
230 spin_lock_irqsave(&svc->srv_lock, flags);
232 if (list_empty (&svc->srv_idle_rqbds)) {
233 spin_unlock_irqrestore(&svc->srv_lock, flags);
237 rqbd = list_entry(svc->srv_idle_rqbds.next,
238 struct ptlrpc_request_buffer_desc,
240 list_del (&rqbd->rqbd_list);
242 /* assume we will post successfully */
243 srv_ni = rqbd->rqbd_srv_ni;
244 srv_ni->sni_nrqbd_receiving++;
245 list_add (&rqbd->rqbd_list, &srv_ni->sni_active_rqbds);
247 spin_unlock_irqrestore(&svc->srv_lock, flags);
249 rc = ptlrpc_register_rqbd(rqbd);
256 spin_lock_irqsave(&svc->srv_lock, flags);
258 srv_ni->sni_nrqbd_receiving--;
259 list_del(&rqbd->rqbd_list);
260 list_add_tail(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
262 if (srv_ni->sni_nrqbd_receiving == 0) {
263 /* This service is off-air on this interface because all
264 * its request buffers are busy. Portals will have started
265 * dropping incoming requests until more buffers get
267 CERROR("All %s %s request buffers busy\n",
268 svc->srv_name, srv_ni->sni_ni->pni_name);
271 spin_unlock_irqrestore (&svc->srv_lock, flags);
276 struct ptlrpc_service *
277 ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size,
278 int req_portal, int rep_portal,
279 svc_handler_t handler, char *name,
280 struct proc_dir_entry *proc_entry)
285 struct ptlrpc_service *service;
286 struct ptlrpc_srv_ni *srv_ni;
289 LASSERT (ptlrpc_ninterfaces > 0);
291 LASSERT (bufsize >= max_req_size);
293 ssize = offsetof (struct ptlrpc_service,
294 srv_interfaces[ptlrpc_ninterfaces]);
295 OBD_ALLOC(service, ssize);
299 service->srv_name = name;
300 spin_lock_init(&service->srv_lock);
301 INIT_LIST_HEAD(&service->srv_threads);
302 init_waitqueue_head(&service->srv_waitq);
304 service->srv_nbuf_per_group = nbufs;
305 service->srv_max_req_size = max_req_size;
306 service->srv_buf_size = bufsize;
307 service->srv_rep_portal = rep_portal;
308 service->srv_req_portal = req_portal;
309 service->srv_handler = handler;
311 INIT_LIST_HEAD(&service->srv_request_queue);
312 INIT_LIST_HEAD(&service->srv_idle_rqbds);
313 INIT_LIST_HEAD(&service->srv_reply_queue);
315 /* First initialise enough for early teardown */
316 for (i = 0; i < ptlrpc_ninterfaces; i++) {
317 srv_ni = &service->srv_interfaces[i];
319 srv_ni->sni_service = service;
320 srv_ni->sni_ni = &ptlrpc_interfaces[i];
321 INIT_LIST_HEAD(&srv_ni->sni_active_rqbds);
322 INIT_LIST_HEAD(&srv_ni->sni_active_replies);
325 spin_lock (&ptlrpc_all_services_lock);
326 list_add (&service->srv_list, &ptlrpc_all_services);
327 spin_unlock (&ptlrpc_all_services_lock);
329 /* Now allocate the request buffers, assuming all interfaces require
330 * the same number. */
331 for (i = 0; i < ptlrpc_ninterfaces; i++) {
332 srv_ni = &service->srv_interfaces[i];
333 CDEBUG (D_NET, "%s: initialising interface %s\n", name,
334 srv_ni->sni_ni->pni_name);
336 rc = ptlrpc_grow_req_bufs(srv_ni);
337 /* We shouldn't be under memory pressure at startup, so
338 * fail if we can't post all our buffers at this time. */
343 if (proc_entry != NULL)
344 ptlrpc_lprocfs_register_service(proc_entry, service);
346 CDEBUG(D_NET, "%s: Started on %d interfaces, listening on portal %d\n",
347 service->srv_name, ptlrpc_ninterfaces, service->srv_req_portal);
351 ptlrpc_unregister_service(service);
356 ptlrpc_server_free_request(struct ptlrpc_service *svc, struct ptlrpc_request *req)
361 spin_lock_irqsave(&svc->srv_lock, flags);
362 svc->srv_n_active_reqs--;
363 refcount = --(req->rq_rqbd->rqbd_refcount);
365 /* request buffer is now idle */
366 list_del(&req->rq_rqbd->rqbd_list);
367 list_add_tail(&req->rq_rqbd->rqbd_list,
368 &svc->srv_idle_rqbds);
370 spin_unlock_irqrestore(&svc->srv_lock, flags);
372 ptlrpc_free_server_req(req);
376 ptlrpc_server_handle_request (struct ptlrpc_service *svc)
378 struct ptlrpc_request *request;
380 struct timeval work_start;
381 struct timeval work_end;
386 spin_lock_irqsave (&svc->srv_lock, flags);
387 if (list_empty (&svc->srv_request_queue) ||
388 (svc->srv_n_difficult_replies != 0 &&
389 svc->srv_n_active_reqs >= (svc->srv_nthreads - 1))) {
390 /* If all the other threads are handling requests, I must
391 * remain free to handle any 'difficult' reply that might
393 spin_unlock_irqrestore (&svc->srv_lock, flags);
397 request = list_entry (svc->srv_request_queue.next,
398 struct ptlrpc_request, rq_list);
399 list_del_init (&request->rq_list);
400 svc->srv_n_queued_reqs--;
401 svc->srv_n_active_reqs++;
403 spin_unlock_irqrestore (&svc->srv_lock, flags);
405 do_gettimeofday(&work_start);
406 timediff = timeval_sub(&work_start, &request->rq_arrival_time);
407 if (svc->srv_stats != NULL) {
408 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
410 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
411 svc->srv_n_queued_reqs);
412 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQACTIVE_CNTR,
413 svc->srv_n_active_reqs);
417 /* Clear request swab mask; this is a new request */
418 request->rq_req_swab_mask = 0;
420 rc = lustre_unpack_msg (request->rq_reqmsg, request->rq_reqlen);
422 CERROR ("error unpacking request: ptl %d from "LPX64
423 " xid "LPU64"\n", svc->srv_req_portal,
424 request->rq_peer.peer_nid, request->rq_xid);
429 if (request->rq_reqmsg->type != PTL_RPC_MSG_REQUEST) {
430 CERROR("wrong packet type received (type=%u) from "
431 LPX64"\n", request->rq_reqmsg->type,
432 request->rq_peer.peer_nid);
436 CDEBUG(D_NET, "got req "LPD64"\n", request->rq_xid);
438 /* Discard requests queued for longer than my timeout. If the
439 * client's timeout is similar to mine, she'll be timing out this
440 * REQ anyway (bug 1502) */
441 if (timediff / 1000000 > (long)obd_timeout) {
442 CERROR("Dropping timed-out opc %d request from "LPX64
443 ": %ld seconds old\n", request->rq_reqmsg->opc,
444 request->rq_peer.peer_nid, timediff / 1000000);
448 request->rq_export = class_conn2export(&request->rq_reqmsg->handle);
450 if (request->rq_export) {
451 if (request->rq_reqmsg->conn_cnt <
452 request->rq_export->exp_conn_cnt) {
453 DEBUG_REQ(D_ERROR, request,
454 "DROPPING req from old connection %d < %d",
455 request->rq_reqmsg->conn_cnt,
456 request->rq_export->exp_conn_cnt);
460 request->rq_export->exp_last_request_time =
461 LTIME_S(CURRENT_TIME);
464 CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:ni:nid:opc "
465 "%s:%s+%d:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
466 (request->rq_export ?
467 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
468 (request->rq_export ?
469 atomic_read(&request->rq_export->exp_refcount) : -99),
470 request->rq_reqmsg->status, request->rq_xid,
471 request->rq_peer.peer_ni->pni_name,
472 request->rq_peer.peer_nid,
473 request->rq_reqmsg->opc);
475 rc = svc->srv_handler(request);
476 CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:ni:nid:opc "
477 "%s:%s+%d:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
478 (request->rq_export ?
479 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
480 (request->rq_export ?
481 atomic_read(&request->rq_export->exp_refcount) : -99),
482 request->rq_reqmsg->status, request->rq_xid,
483 request->rq_peer.peer_ni->pni_name,
484 request->rq_peer.peer_nid,
485 request->rq_reqmsg->opc);
488 if (request->rq_export != NULL)
489 class_export_put(request->rq_export);
492 do_gettimeofday(&work_end);
494 timediff = timeval_sub(&work_end, &work_start);
496 CDEBUG((timediff / 1000000 > (long)obd_timeout) ? D_ERROR : D_HA,
497 "request "LPU64" opc %u from NID "LPX64" processed in %ldus "
498 "(%ldus total)\n", request->rq_xid, request->rq_reqmsg->opc,
499 request->rq_peer.peer_nid,
500 timediff, timeval_sub(&work_end, &request->rq_arrival_time));
502 if (svc->srv_stats != NULL) {
503 int opc = opcode_offset(request->rq_reqmsg->opc);
505 LASSERT(opc < LUSTRE_MAX_OPCODES);
506 lprocfs_counter_add(svc->srv_stats,
507 opc + PTLRPC_LAST_CNTR,
512 ptlrpc_server_free_request(svc, request);
518 ptlrpc_server_handle_reply (struct ptlrpc_service *svc)
520 struct ptlrpc_reply_state *rs;
522 struct obd_export *exp;
523 struct obd_device *obd;
528 spin_lock_irqsave (&svc->srv_lock, flags);
529 if (list_empty (&svc->srv_reply_queue)) {
530 spin_unlock_irqrestore (&svc->srv_lock, flags);
534 rs = list_entry (svc->srv_reply_queue.next,
535 struct ptlrpc_reply_state, rs_list);
540 LASSERT (rs->rs_difficult);
541 LASSERT (rs->rs_scheduled);
543 list_del_init (&rs->rs_list);
545 /* Disengage from notifiers carefully (lock ordering!) */
546 spin_unlock(&svc->srv_lock);
548 spin_lock (&obd->obd_uncommitted_replies_lock);
549 /* Noop if removed already */
550 list_del_init (&rs->rs_obd_list);
551 spin_unlock (&obd->obd_uncommitted_replies_lock);
553 spin_lock (&exp->exp_lock);
554 /* Noop if removed already */
555 list_del_init (&rs->rs_exp_list);
556 spin_unlock (&exp->exp_lock);
558 spin_lock(&svc->srv_lock);
560 been_handled = rs->rs_handled;
563 nlocks = rs->rs_nlocks; /* atomic "steal", but */
564 rs->rs_nlocks = 0; /* locks still on rs_locks! */
566 if (nlocks == 0 && !been_handled) {
567 /* If we see this, we should already have seen the warning
568 * in mds_steal_ack_locks() */
569 CWARN("All locks stolen from rs %p x"LPD64".t"LPD64
572 rs->rs_xid, rs->rs_transno,
573 rs->rs_msg.opc, exp->exp_connection->c_peer.peer_nid);
576 if ((!been_handled && rs->rs_on_net) ||
578 spin_unlock_irqrestore(&svc->srv_lock, flags);
580 if (!been_handled && rs->rs_on_net) {
581 PtlMDUnlink(rs->rs_md_h);
582 /* Ignore return code; we're racing with
587 ldlm_lock_decref(&rs->rs_locks[nlocks],
588 rs->rs_modes[nlocks]);
590 spin_lock_irqsave(&svc->srv_lock, flags);
593 rs->rs_scheduled = 0;
595 if (!rs->rs_on_net) {
597 svc->srv_n_difficult_replies--;
598 spin_unlock_irqrestore(&svc->srv_lock, flags);
600 class_export_put (exp);
601 rs->rs_export = NULL;
602 lustre_free_reply_state (rs);
603 atomic_dec (&svc->srv_outstanding_replies);
607 /* still on the net; callback will schedule */
608 spin_unlock_irqrestore (&svc->srv_lock, flags);
613 /* FIXME make use of timeout later */
615 liblustre_check_services (void *arg)
617 int did_something = 0;
619 struct list_head *tmp, *nxt;
622 /* I'm relying on being single threaded, not to have to lock
623 * ptlrpc_all_services etc */
624 list_for_each_safe (tmp, nxt, &ptlrpc_all_services) {
625 struct ptlrpc_service *svc =
626 list_entry (tmp, struct ptlrpc_service, srv_list);
628 if (svc->srv_nthreads != 0) /* I've recursed */
631 /* service threads can block for bulk, so this limits us
632 * (arbitrarily) to recursing 1 stack frame per service.
633 * Note that the problem with recursion is that we have to
634 * unwind completely before our caller can resume. */
639 rc = ptlrpc_server_handle_reply(svc);
640 rc |= ptlrpc_server_handle_request(svc);
641 rc |= (ptlrpc_server_post_idle_rqbds(svc) > 0);
648 RETURN(did_something);
651 #else /* __KERNEL__ */
653 /* Don't use daemonize, it removes fs struct from new thread (bug 418) */
654 void ptlrpc_daemonize(void)
657 lustre_daemonize_helper();
663 ptlrpc_check_rqbd_pools(struct ptlrpc_service *svc)
665 struct ptlrpc_srv_ni *sni;
667 int low_water = svc->srv_nbuf_per_group/2;
669 for (i = 0; i < ptlrpc_ninterfaces; i++) {
670 sni = &svc->srv_interfaces[i];
672 avail += sni->sni_nrqbd_receiving;
673 /* NB I'm not locking; just looking. */
674 if (sni->sni_nrqbd_receiving <= low_water)
675 ptlrpc_grow_req_bufs(sni);
677 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQBUF_AVAIL_CNTR, avail);
681 ptlrpc_retry_rqbds(void *arg)
683 struct ptlrpc_service *svc = (struct ptlrpc_service *)arg;
685 svc->srv_rqbd_timeout = 0;
689 static int ptlrpc_main(void *arg)
691 struct ptlrpc_svc_data *data = (struct ptlrpc_svc_data *)arg;
692 struct ptlrpc_service *svc = data->svc;
693 struct ptlrpc_thread *thread = data->thread;
700 SIGNAL_MASK_LOCK(current, flags);
701 sigfillset(¤t->blocked);
703 SIGNAL_MASK_UNLOCK(current, flags);
705 LASSERTF(strlen(data->name) < sizeof(current->comm),
706 "name %d > len %d\n",
707 (int)strlen(data->name), (int)sizeof(current->comm));
708 THREAD_NAME(current->comm, sizeof(current->comm) - 1, "%s", data->name);
711 /* Record that the thread is running */
712 thread->t_flags = SVC_RUNNING;
713 wake_up(&thread->t_ctl_waitq);
715 spin_lock_irqsave(&svc->srv_lock, flags);
717 spin_unlock_irqrestore(&svc->srv_lock, flags);
719 /* XXX maintain a list of all managed devices: insert here */
721 while ((thread->t_flags & SVC_STOPPING) == 0 ||
722 svc->srv_n_difficult_replies != 0) {
723 /* Don't exit while there are replies to be handled */
724 struct l_wait_info lwi = LWI_TIMEOUT(svc->srv_rqbd_timeout,
725 ptlrpc_retry_rqbds, svc);
727 l_wait_event_exclusive (svc->srv_waitq,
728 ((thread->t_flags & SVC_STOPPING) != 0 &&
729 svc->srv_n_difficult_replies == 0) ||
730 (!list_empty(&svc->srv_idle_rqbds) &&
731 svc->srv_rqbd_timeout == 0) ||
732 !list_empty (&svc->srv_reply_queue) ||
733 (!list_empty (&svc->srv_request_queue) &&
734 (svc->srv_n_difficult_replies == 0 ||
735 svc->srv_n_active_reqs <
736 (svc->srv_nthreads - 1))),
739 ptlrpc_check_rqbd_pools(svc);
741 if (!list_empty (&svc->srv_reply_queue))
742 ptlrpc_server_handle_reply (svc);
744 /* only handle requests if there are no difficult replies
745 * outstanding, or I'm not the last thread handling
747 if (!list_empty (&svc->srv_request_queue) &&
748 (svc->srv_n_difficult_replies == 0 ||
749 svc->srv_n_active_reqs < (svc->srv_nthreads - 1)))
750 ptlrpc_server_handle_request (svc);
752 if (!list_empty(&svc->srv_idle_rqbds) &&
753 ptlrpc_server_post_idle_rqbds(svc) < 0) {
754 /* I just failed to repost request buffers. Wait
755 * for a timeout (unless something else happens)
756 * before I try again */
757 svc->srv_rqbd_timeout = HZ/10;
761 spin_lock_irqsave(&svc->srv_lock, flags);
763 svc->srv_nthreads--; /* must know immediately */
764 thread->t_flags = SVC_STOPPED;
765 wake_up(&thread->t_ctl_waitq);
767 spin_unlock_irqrestore(&svc->srv_lock, flags);
769 CDEBUG(D_NET, "service thread exiting, process %d\n", current->pid);
773 static void ptlrpc_stop_thread(struct ptlrpc_service *svc,
774 struct ptlrpc_thread *thread)
776 struct l_wait_info lwi = { 0 };
779 spin_lock_irqsave(&svc->srv_lock, flags);
780 thread->t_flags = SVC_STOPPING;
781 spin_unlock_irqrestore(&svc->srv_lock, flags);
783 wake_up_all(&svc->srv_waitq);
784 l_wait_event(thread->t_ctl_waitq, (thread->t_flags & SVC_STOPPED),
787 spin_lock_irqsave(&svc->srv_lock, flags);
788 list_del(&thread->t_link);
789 spin_unlock_irqrestore(&svc->srv_lock, flags);
791 OBD_FREE(thread, sizeof(*thread));
794 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
797 struct ptlrpc_thread *thread;
799 spin_lock_irqsave(&svc->srv_lock, flags);
800 while (!list_empty(&svc->srv_threads)) {
801 thread = list_entry(svc->srv_threads.next,
802 struct ptlrpc_thread, t_link);
804 spin_unlock_irqrestore(&svc->srv_lock, flags);
805 ptlrpc_stop_thread(svc, thread);
806 spin_lock_irqsave(&svc->srv_lock, flags);
809 spin_unlock_irqrestore(&svc->srv_lock, flags);
812 /* @base_name should be 12 characters or less - 3 will be added on */
813 int ptlrpc_start_n_threads(struct obd_device *dev, struct ptlrpc_service *svc,
814 int num_threads, char *base_name)
819 for (i = 0; i < num_threads; i++) {
821 sprintf(name, "%s_%02d", base_name, i);
822 rc = ptlrpc_start_thread(dev, svc, name);
824 CERROR("cannot start %s thread #%d: rc %d\n", base_name,
826 ptlrpc_stop_all_threads(svc);
832 int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc,
835 struct l_wait_info lwi = { 0 };
836 struct ptlrpc_svc_data d;
837 struct ptlrpc_thread *thread;
842 OBD_ALLOC(thread, sizeof(*thread));
845 init_waitqueue_head(&thread->t_ctl_waitq);
852 spin_lock_irqsave(&svc->srv_lock, flags);
853 list_add(&thread->t_link, &svc->srv_threads);
854 spin_unlock_irqrestore(&svc->srv_lock, flags);
856 /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
857 * just drop the VM and FILES in ptlrpc_daemonize() right away.
859 rc = kernel_thread(ptlrpc_main, &d, CLONE_VM | CLONE_FILES);
861 CERROR("cannot start thread: %d\n", rc);
862 OBD_FREE(thread, sizeof(*thread));
865 l_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_RUNNING, &lwi);
871 int ptlrpc_unregister_service(struct ptlrpc_service *service)
876 struct ptlrpc_srv_ni *srv_ni;
877 struct l_wait_info lwi;
878 struct list_head *tmp;
880 LASSERT(list_empty(&service->srv_threads));
882 spin_lock (&ptlrpc_all_services_lock);
883 list_del_init (&service->srv_list);
884 spin_unlock (&ptlrpc_all_services_lock);
886 ptlrpc_lprocfs_unregister_service(service);
888 for (i = 0; i < ptlrpc_ninterfaces; i++) {
889 srv_ni = &service->srv_interfaces[i];
890 CDEBUG(D_NET, "%s: tearing down interface %s\n",
891 service->srv_name, srv_ni->sni_ni->pni_name);
893 /* Unlink all the request buffers. This forces a 'final'
894 * event with its 'unlink' flag set for each posted rqbd */
895 list_for_each(tmp, &srv_ni->sni_active_rqbds) {
896 struct ptlrpc_request_buffer_desc *rqbd =
897 list_entry(tmp, struct ptlrpc_request_buffer_desc,
900 rc = PtlMDUnlink(rqbd->rqbd_md_h);
901 LASSERT (rc == PTL_OK || rc == PTL_INV_MD);
904 /* Wait for the network to release any buffers it's
905 * currently filling */
907 spin_lock_irqsave(&service->srv_lock, flags);
908 rc = srv_ni->sni_nrqbd_receiving;
909 spin_unlock_irqrestore(&service->srv_lock, flags);
914 /* Network access will complete in finite time but
915 * the HUGE timeout lets us CWARN for visibility of
917 lwi = LWI_TIMEOUT(300 * HZ, NULL, NULL);
918 rc = l_wait_event(service->srv_waitq,
919 srv_ni->sni_nrqbd_receiving == 0,
921 if (rc == -ETIMEDOUT)
922 CWARN("Waiting for request buffers on "
923 "service %s on interface %s ",
924 service->srv_name, srv_ni->sni_ni->pni_name);
927 /* schedule all outstanding replies to terminate them */
928 spin_lock_irqsave(&service->srv_lock, flags);
929 while (!list_empty(&srv_ni->sni_active_replies)) {
930 struct ptlrpc_reply_state *rs =
931 list_entry(srv_ni->sni_active_replies.next,
932 struct ptlrpc_reply_state,
934 ptlrpc_schedule_difficult_reply(rs);
936 spin_unlock_irqrestore(&service->srv_lock, flags);
939 /* purge the request queue. NB No new replies (rqbds all unlinked)
940 * and no service threads, so I'm the only thread noodling the
941 * request queue now */
942 while (!list_empty(&service->srv_request_queue)) {
943 struct ptlrpc_request *req =
944 list_entry(service->srv_request_queue.next,
945 struct ptlrpc_request,
948 list_del(&req->rq_list);
949 service->srv_n_queued_reqs--;
950 service->srv_n_active_reqs++;
952 ptlrpc_server_free_request(service, req);
954 LASSERT(service->srv_n_queued_reqs == 0);
955 LASSERT(service->srv_n_active_reqs == 0);
957 for (i = 0; i < ptlrpc_ninterfaces; i++) {
958 srv_ni = &service->srv_interfaces[i];
959 LASSERT(list_empty(&srv_ni->sni_active_rqbds));
962 /* Now free all the request buffers since nothing references them
964 while (!list_empty(&service->srv_idle_rqbds)) {
965 struct ptlrpc_request_buffer_desc *rqbd =
966 list_entry(service->srv_idle_rqbds.next,
967 struct ptlrpc_request_buffer_desc,
970 ptlrpc_free_rqbd(rqbd);
973 /* wait for all outstanding replies to complete (they were
974 * scheduled having been flagged to abort above) */
975 while (atomic_read(&service->srv_outstanding_replies) != 0) {
976 struct l_wait_info lwi = LWI_TIMEOUT(10 * HZ, NULL, NULL);
978 rc = l_wait_event(service->srv_waitq,
979 !list_empty(&service->srv_reply_queue), &lwi);
980 LASSERT(rc == 0 || rc == -ETIMEDOUT);
983 ptlrpc_server_handle_reply(service);
986 CWARN("Unexpectedly long timeout %p\n", service);
990 offsetof(struct ptlrpc_service,
991 srv_interfaces[ptlrpc_ninterfaces]));