1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_RPC
25 #include <liblustre.h>
26 #include <linux/kp30.h>
28 #include <linux/obd_support.h>
29 #include <linux/obd_class.h>
30 #include <linux/lustre_net.h>
31 #include <linux/lustre_log.h>
32 #include <portals/types.h>
33 #include "ptlrpc_internal.h"
35 static LIST_HEAD (ptlrpc_all_services);
36 static spinlock_t ptlrpc_all_services_lock = SPIN_LOCK_UNLOCKED;
39 ptlrpc_free_server_req (struct ptlrpc_request *req)
41 /* The last request to be received into a request buffer uses space
42 * in the request buffer descriptor, otherwise requests are
43 * allocated dynamically in the incoming reply event handler */
44 if (req == &req->rq_rqbd->rqbd_req)
47 OBD_FREE(req, sizeof(*req));
51 ptlrpc_alloc_request_buffer (int size)
55 if (size > SVC_BUF_VMALLOC_THRESHOLD)
56 OBD_VMALLOC(ptr, size);
64 ptlrpc_free_request_buffer (char *ptr, int size)
66 if (size > SVC_BUF_VMALLOC_THRESHOLD)
72 struct ptlrpc_request_buffer_desc *
73 ptlrpc_alloc_rqbd (struct ptlrpc_srv_ni *srv_ni)
75 struct ptlrpc_service *svc = srv_ni->sni_service;
77 struct ptlrpc_request_buffer_desc *rqbd;
79 OBD_ALLOC(rqbd, sizeof (*rqbd));
83 rqbd->rqbd_srv_ni = srv_ni;
84 rqbd->rqbd_refcount = 0;
85 rqbd->rqbd_cbid.cbid_fn = request_in_callback;
86 rqbd->rqbd_cbid.cbid_arg = rqbd;
87 rqbd->rqbd_buffer = ptlrpc_alloc_request_buffer(svc->srv_buf_size);
89 if (rqbd->rqbd_buffer == NULL) {
90 OBD_FREE(rqbd, sizeof (*rqbd));
94 spin_lock_irqsave (&svc->srv_lock, flags);
95 list_add(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
97 spin_unlock_irqrestore (&svc->srv_lock, flags);
103 ptlrpc_free_rqbd (struct ptlrpc_request_buffer_desc *rqbd)
105 struct ptlrpc_srv_ni *sni = rqbd->rqbd_srv_ni;
106 struct ptlrpc_service *svc = sni->sni_service;
109 LASSERT (rqbd->rqbd_refcount == 0);
111 spin_lock_irqsave(&svc->srv_lock, flags);
112 list_del(&rqbd->rqbd_list);
114 spin_unlock_irqrestore(&svc->srv_lock, flags);
116 ptlrpc_free_request_buffer (rqbd->rqbd_buffer, svc->srv_buf_size);
117 OBD_FREE (rqbd, sizeof (*rqbd));
121 ptlrpc_save_llog_lock (struct ptlrpc_request *req,
122 struct llog_create_locks *lcl)
124 struct ptlrpc_reply_state *rs = req->rq_reply_state;
125 LASSERT (rs != NULL);
126 LASSERT (rs->rs_llog_locks == NULL);
128 rs->rs_llog_locks = lcl;
132 ptlrpc_require_repack(struct ptlrpc_request *req)
134 struct ptlrpc_reply_state *rs = req->rq_reply_state;
135 LASSERT (rs != NULL);
136 rs->rs_difficult = 1;
140 ptlrpc_save_lock (struct ptlrpc_request *req,
141 struct lustre_handle *lock, int mode)
143 struct ptlrpc_reply_state *rs = req->rq_reply_state;
149 LASSERT (rs != NULL);
150 LASSERT (rs->rs_nlocks < RS_MAX_LOCKS);
152 idx = rs->rs_nlocks++;
153 rs->rs_locks[idx] = *lock;
154 rs->rs_modes[idx] = mode;
155 rs->rs_difficult = 1;
159 ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs)
161 struct ptlrpc_service *svc = rs->rs_srv_ni->sni_service;
164 LASSERT (spin_is_locked (&svc->srv_lock));
166 LASSERT (rs->rs_difficult);
167 rs->rs_scheduled_ever = 1; /* flag any notification attempt */
169 if (rs->rs_scheduled) /* being set up or already notified */
172 rs->rs_scheduled = 1;
173 list_del (&rs->rs_list);
174 list_add (&rs->rs_list, &svc->srv_reply_queue);
175 wake_up (&svc->srv_waitq);
179 ptlrpc_commit_replies (struct obd_device *obd)
181 struct list_head *tmp;
182 struct list_head *nxt;
185 /* Find any replies that have been committed and get their service
186 * to attend to complete them. */
188 /* CAVEAT EMPTOR: spinlock ordering!!! */
189 spin_lock_irqsave (&obd->obd_uncommitted_replies_lock, flags);
191 list_for_each_safe (tmp, nxt, &obd->obd_uncommitted_replies) {
192 struct ptlrpc_reply_state *rs =
193 list_entry (tmp, struct ptlrpc_reply_state, rs_obd_list);
194 struct llog_create_locks *lcl = rs->rs_llog_locks;
196 rs->rs_llog_locks = NULL;
197 LASSERT (rs->rs_difficult);
199 if (rs->rs_transno <= obd->obd_last_committed) {
200 struct ptlrpc_service *svc = rs->rs_srv_ni->sni_service;
202 spin_lock (&svc->srv_lock);
203 list_del_init (&rs->rs_obd_list);
204 ptlrpc_schedule_difficult_reply (rs);
205 spin_unlock (&svc->srv_lock);
208 llog_create_lock_free(lcl);
212 spin_unlock_irqrestore (&obd->obd_uncommitted_replies_lock, flags);
216 timeval_sub(struct timeval *large, struct timeval *small)
218 return (large->tv_sec - small->tv_sec) * 1000000 +
219 (large->tv_usec - small->tv_usec);
223 ptlrpc_server_post_idle_rqbds (struct ptlrpc_service *svc)
225 struct ptlrpc_srv_ni *srv_ni;
226 struct ptlrpc_request_buffer_desc *rqbd;
230 spin_lock_irqsave(&svc->srv_lock, flags);
231 if (list_empty (&svc->srv_idle_rqbds)) {
232 spin_unlock_irqrestore(&svc->srv_lock, flags);
236 rqbd = list_entry(svc->srv_idle_rqbds.next,
237 struct ptlrpc_request_buffer_desc,
239 list_del (&rqbd->rqbd_list);
241 /* assume we will post successfully */
242 srv_ni = rqbd->rqbd_srv_ni;
243 srv_ni->sni_nrqbd_receiving++;
244 list_add (&rqbd->rqbd_list, &srv_ni->sni_active_rqbds);
246 spin_unlock_irqrestore(&svc->srv_lock, flags);
248 rc = ptlrpc_register_rqbd(rqbd);
252 spin_lock_irqsave(&svc->srv_lock, flags);
254 srv_ni->sni_nrqbd_receiving--;
255 list_del(&rqbd->rqbd_list);
256 list_add_tail(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
258 if (srv_ni->sni_nrqbd_receiving == 0) {
259 /* This service is off-air on this interface because all
260 * its request buffers are busy. Portals will have started
261 * dropping incoming requests until more buffers get
263 CERROR("All %s %s request buffers busy\n",
264 svc->srv_name, srv_ni->sni_ni->pni_name);
267 spin_unlock_irqrestore (&svc->srv_lock, flags);
272 struct ptlrpc_service *
273 ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size,
274 int req_portal, int rep_portal,
275 svc_handler_t handler, char *name,
276 struct proc_dir_entry *proc_entry)
281 struct ptlrpc_service *service;
282 struct ptlrpc_srv_ni *srv_ni;
283 struct ptlrpc_request_buffer_desc *rqbd;
286 LASSERT (ptlrpc_ninterfaces > 0);
288 LASSERT (bufsize >= max_req_size);
290 ssize = offsetof (struct ptlrpc_service,
291 srv_interfaces[ptlrpc_ninterfaces]);
292 OBD_ALLOC(service, ssize);
296 service->srv_name = name;
297 spin_lock_init(&service->srv_lock);
298 INIT_LIST_HEAD(&service->srv_threads);
299 init_waitqueue_head(&service->srv_waitq);
301 service->srv_max_req_size = max_req_size;
302 service->srv_buf_size = bufsize;
303 service->srv_rep_portal = rep_portal;
304 service->srv_req_portal = req_portal;
305 service->srv_handler = handler;
307 INIT_LIST_HEAD(&service->srv_request_queue);
308 INIT_LIST_HEAD(&service->srv_idle_rqbds);
309 INIT_LIST_HEAD(&service->srv_reply_queue);
311 /* First initialise enough for early teardown */
312 for (i = 0; i < ptlrpc_ninterfaces; i++) {
313 srv_ni = &service->srv_interfaces[i];
315 srv_ni->sni_service = service;
316 srv_ni->sni_ni = &ptlrpc_interfaces[i];
317 INIT_LIST_HEAD(&srv_ni->sni_active_rqbds);
318 INIT_LIST_HEAD(&srv_ni->sni_active_replies);
321 spin_lock (&ptlrpc_all_services_lock);
322 list_add (&service->srv_list, &ptlrpc_all_services);
323 spin_unlock (&ptlrpc_all_services_lock);
325 /* Now allocate the request buffers, assuming all interfaces require
326 * the same number. */
327 for (i = 0; i < ptlrpc_ninterfaces; i++) {
328 srv_ni = &service->srv_interfaces[i];
329 CDEBUG (D_NET, "%s: initialising interface %s\n", name,
330 srv_ni->sni_ni->pni_name);
332 for (j = 0; j < nbufs; j++) {
333 rqbd = ptlrpc_alloc_rqbd (srv_ni);
336 CERROR ("%s.%d: Can't allocate request %d "
337 "on %s\n", name, i, j,
338 srv_ni->sni_ni->pni_name);
342 /* We shouldn't be under memory pressure at
343 * startup, so fail if we can't post all our
344 * buffers at this time. */
345 if (ptlrpc_server_post_idle_rqbds(service) <= 0)
350 if (proc_entry != NULL)
351 ptlrpc_lprocfs_register_service(proc_entry, service);
353 CDEBUG(D_NET, "%s: Started on %d interfaces, listening on portal %d\n",
354 service->srv_name, ptlrpc_ninterfaces, service->srv_req_portal);
358 ptlrpc_unregister_service(service);
363 ptlrpc_server_free_request(struct ptlrpc_service *svc, struct ptlrpc_request *req)
368 spin_lock_irqsave(&svc->srv_lock, flags);
369 svc->srv_n_active_reqs--;
370 refcount = --(req->rq_rqbd->rqbd_refcount);
372 /* request buffer is now idle */
373 list_del(&req->rq_rqbd->rqbd_list);
374 list_add_tail(&req->rq_rqbd->rqbd_list,
375 &svc->srv_idle_rqbds);
377 spin_unlock_irqrestore(&svc->srv_lock, flags);
379 ptlrpc_free_server_req(req);
383 ptlrpc_server_handle_request (struct ptlrpc_service *svc)
385 struct obd_export *export = NULL;
386 struct ptlrpc_request *request;
388 struct timeval work_start;
389 struct timeval work_end;
392 char str[PTL_NALFMT_SIZE];
395 spin_lock_irqsave (&svc->srv_lock, flags);
396 if (list_empty (&svc->srv_request_queue) ||
397 (svc->srv_n_difficult_replies != 0 &&
398 svc->srv_n_active_reqs >= (svc->srv_nthreads - 1))) {
399 /* If all the other threads are handling requests, I must
400 * remain free to handle any 'difficult' reply that might
402 spin_unlock_irqrestore (&svc->srv_lock, flags);
406 request = list_entry (svc->srv_request_queue.next,
407 struct ptlrpc_request, rq_list);
408 list_del_init (&request->rq_list);
409 svc->srv_n_queued_reqs--;
410 svc->srv_n_active_reqs++;
412 spin_unlock_irqrestore (&svc->srv_lock, flags);
414 do_gettimeofday(&work_start);
415 timediff = timeval_sub(&work_start, &request->rq_arrival_time);
416 if (svc->srv_stats != NULL) {
417 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
419 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
420 svc->srv_n_queued_reqs);
421 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQACTIVE_CNTR,
422 svc->srv_n_active_reqs);
426 /* Clear request swab mask; this is a new request */
427 request->rq_req_swab_mask = 0;
429 rc = lustre_unpack_msg (request->rq_reqmsg, request->rq_reqlen);
431 CERROR ("error unpacking request: ptl %d from %s"
432 " xid "LPU64"\n", svc->srv_req_portal,
433 ptlrpc_peernid2str(&request->rq_peer, str),
439 if (request->rq_reqmsg->type != PTL_RPC_MSG_REQUEST) {
440 CERROR("wrong packet type received (type=%u) from %s\n",
441 request->rq_reqmsg->type,
442 ptlrpc_peernid2str(&request->rq_peer, str));
446 CDEBUG(D_NET, "got req "LPD64"\n", request->rq_xid);
448 /* Discard requests queued for longer than my timeout. If the
449 * client's timeout is similar to mine, she'll be timing out this
450 * REQ anyway (bug 1502) */
451 if (timediff / 1000000 > (long)obd_timeout) {
452 CERROR("Dropping timed-out request from %s: %ld seconds old\n",
453 ptlrpc_peernid2str(&request->rq_peer, str),
458 request->rq_export = class_conn2export(&request->rq_reqmsg->handle);
460 if (request->rq_export) {
461 if (request->rq_reqmsg->conn_cnt <
462 request->rq_export->exp_conn_cnt) {
463 DEBUG_REQ(D_ERROR, request,
464 "DROPPING req from old connection %d < %d",
465 request->rq_reqmsg->conn_cnt,
466 request->rq_export->exp_conn_cnt);
470 export = class_export_rpc_get(request->rq_export);
471 request->rq_export->exp_last_request_time =
472 LTIME_S(CURRENT_TIME);
475 CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:ni:nid:opc "
476 "%s:%s+%d:%d:"LPU64":%s:%s:%d\n", current->comm,
477 (request->rq_export ?
478 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
479 (request->rq_export ?
480 atomic_read(&request->rq_export->exp_refcount) : -99),
481 request->rq_reqmsg->status, request->rq_xid,
482 request->rq_peer.peer_ni->pni_name,
483 ptlrpc_peernid2str(&request->rq_peer, str),
484 request->rq_reqmsg->opc);
486 rc = svc->srv_handler(request);
487 CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:ni:nid:opc "
488 "%s:%s+%d:%d:"LPU64":%s:%s:%d\n", current->comm,
489 (request->rq_export ?
490 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
491 (request->rq_export ?
492 atomic_read(&request->rq_export->exp_refcount) : -99),
493 request->rq_reqmsg->status, request->rq_xid,
494 request->rq_peer.peer_ni->pni_name,
495 ptlrpc_peernid2str(&request->rq_peer, str),
496 request->rq_reqmsg->opc);
499 class_export_rpc_put(export);
502 if (request->rq_export != NULL)
503 class_export_put(request->rq_export);
506 do_gettimeofday(&work_end);
508 timediff = timeval_sub(&work_end, &work_start);
510 CDEBUG((timediff / 1000000 > (long)obd_timeout) ? D_ERROR : D_HA,
511 "request "LPU64" opc %u from NID %s processed in %ldus "
512 "(%ldus total)\n", request->rq_xid, request->rq_reqmsg->opc,
513 ptlrpc_peernid2str(&request->rq_peer, str),
514 timediff, timeval_sub(&work_end, &request->rq_arrival_time));
516 if (svc->srv_stats != NULL) {
517 int opc = opcode_offset(request->rq_reqmsg->opc);
519 LASSERT(opc < LUSTRE_MAX_OPCODES);
520 lprocfs_counter_add(svc->srv_stats,
521 opc + PTLRPC_LAST_CNTR,
526 ptlrpc_server_free_request(svc, request);
532 ptlrpc_server_handle_reply (struct ptlrpc_service *svc)
534 struct ptlrpc_reply_state *rs;
536 struct obd_export *exp;
537 struct obd_device *obd;
538 struct llog_create_locks *lcl;
541 char str[PTL_NALFMT_SIZE];
544 spin_lock_irqsave (&svc->srv_lock, flags);
545 if (list_empty (&svc->srv_reply_queue)) {
546 spin_unlock_irqrestore (&svc->srv_lock, flags);
550 rs = list_entry (svc->srv_reply_queue.next,
551 struct ptlrpc_reply_state, rs_list);
556 LASSERT (rs->rs_difficult);
557 LASSERT (rs->rs_scheduled);
559 list_del_init (&rs->rs_list);
561 /* Disengage from notifiers carefully (lock ordering!) */
562 spin_unlock(&svc->srv_lock);
564 spin_lock (&obd->obd_uncommitted_replies_lock);
565 /* Noop if removed already */
566 list_del_init (&rs->rs_obd_list);
567 spin_unlock (&obd->obd_uncommitted_replies_lock);
569 spin_lock (&exp->exp_lock);
570 /* Noop if removed already */
571 list_del_init (&rs->rs_exp_list);
572 spin_unlock (&exp->exp_lock);
574 spin_lock(&svc->srv_lock);
576 been_handled = rs->rs_handled;
579 nlocks = rs->rs_nlocks; /* atomic "steal", but */
580 rs->rs_nlocks = 0; /* locks still on rs_locks! */
582 lcl = rs->rs_llog_locks;
583 rs->rs_llog_locks = NULL;
585 if (nlocks == 0 && !been_handled) {
586 /* If we see this, we should already have seen the warning
587 * in mds_steal_ack_locks() */
589 /* CMD may ask to save request with no DLM locks -bzzz */
590 CWARN("All locks stolen from rs %p x"LPD64".t"LPD64
593 rs->rs_xid, rs->rs_transno,
595 ptlrpc_peernid2str(&exp->exp_connection->c_peer, str));
599 if ((!been_handled && rs->rs_on_net) ||
600 nlocks > 0 || lcl != NULL) {
601 spin_unlock_irqrestore(&svc->srv_lock, flags);
603 if (!been_handled && rs->rs_on_net) {
604 PtlMDUnlink(rs->rs_md_h);
605 /* Ignore return code; we're racing with
610 ldlm_lock_decref(&rs->rs_locks[nlocks],
611 rs->rs_modes[nlocks]);
614 llog_create_lock_free(lcl);
616 spin_lock_irqsave(&svc->srv_lock, flags);
619 rs->rs_scheduled = 0;
621 if (!rs->rs_on_net) {
623 svc->srv_n_difficult_replies--;
624 spin_unlock_irqrestore(&svc->srv_lock, flags);
626 class_export_put (exp);
627 rs->rs_export = NULL;
628 lustre_free_reply_state (rs);
629 atomic_dec (&svc->srv_outstanding_replies);
633 /* still on the net; callback will schedule */
634 spin_unlock_irqrestore (&svc->srv_lock, flags);
639 /* FIXME make use of timeout later */
641 liblustre_check_services (void *arg)
643 int did_something = 0;
645 struct list_head *tmp, *nxt;
648 /* I'm relying on being single threaded, not to have to lock
649 * ptlrpc_all_services etc */
650 list_for_each_safe (tmp, nxt, &ptlrpc_all_services) {
651 struct ptlrpc_service *svc =
652 list_entry (tmp, struct ptlrpc_service, srv_list);
654 if (svc->srv_nthreads != 0) /* I've recursed */
657 /* service threads can block for bulk, so this limits us
658 * (arbitrarily) to recursing 1 stack frame per service.
659 * Note that the problem with recursion is that we have to
660 * unwind completely before our caller can resume. */
665 rc = ptlrpc_server_handle_reply(svc);
666 rc |= ptlrpc_server_handle_request(svc);
667 rc |= (ptlrpc_server_post_idle_rqbds(svc) > 0);
674 RETURN(did_something);
677 #else /* __KERNEL__ */
679 /* Don't use daemonize, it removes fs struct from new thread (bug 418) */
680 void ptlrpc_daemonize(void)
683 lustre_daemonize_helper();
689 ptlrpc_retry_rqbds(void *arg)
691 struct ptlrpc_service *svc = (struct ptlrpc_service *)arg;
693 svc->srv_rqbd_timeout = 0;
697 static int ptlrpc_main(void *arg)
699 struct ptlrpc_svc_data *data = (struct ptlrpc_svc_data *)arg;
700 struct ptlrpc_service *svc = data->svc;
701 struct ptlrpc_thread *thread = data->thread;
708 SIGNAL_MASK_LOCK(current, flags);
709 sigfillset(¤t->blocked);
711 SIGNAL_MASK_UNLOCK(current, flags);
713 LASSERTF(strlen(data->name) < sizeof(current->comm),
714 "name %d > len %d\n",
715 (int)strlen(data->name), (int)sizeof(current->comm));
716 THREAD_NAME(current->comm, sizeof(current->comm) - 1, "%s", data->name);
720 /* Record that the thread is running */
721 thread->t_flags = SVC_RUNNING;
722 wake_up(&thread->t_ctl_waitq);
724 spin_lock_irqsave(&svc->srv_lock, flags);
726 spin_unlock_irqrestore(&svc->srv_lock, flags);
728 /* XXX maintain a list of all managed devices: insert here */
730 while ((thread->t_flags & SVC_STOPPING) == 0 ||
731 svc->srv_n_difficult_replies != 0) {
732 /* Don't exit while there are replies to be handled */
733 struct l_wait_info lwi = LWI_TIMEOUT(svc->srv_rqbd_timeout,
734 ptlrpc_retry_rqbds, svc);
736 l_wait_event_exclusive (svc->srv_waitq,
737 ((thread->t_flags & SVC_STOPPING) != 0 &&
738 svc->srv_n_difficult_replies == 0) ||
739 (!list_empty(&svc->srv_idle_rqbds) &&
740 svc->srv_rqbd_timeout == 0) ||
741 !list_empty (&svc->srv_reply_queue) ||
742 (!list_empty (&svc->srv_request_queue) &&
743 (svc->srv_n_difficult_replies == 0 ||
744 svc->srv_n_active_reqs <
745 (svc->srv_nthreads - 1))),
748 if (!list_empty (&svc->srv_reply_queue))
749 ptlrpc_server_handle_reply (svc);
751 /* only handle requests if there are no difficult replies
752 * outstanding, or I'm not the last thread handling
754 if (!list_empty (&svc->srv_request_queue) &&
755 (svc->srv_n_difficult_replies == 0 ||
756 svc->srv_n_active_reqs < (svc->srv_nthreads - 1)))
757 ptlrpc_server_handle_request (svc);
759 if (!list_empty(&svc->srv_idle_rqbds) &&
760 ptlrpc_server_post_idle_rqbds(svc) < 0) {
761 /* I just failed to repost request buffers. Wait
762 * for a timeout (unless something else happens)
763 * before I try again */
764 svc->srv_rqbd_timeout = HZ/10;
768 spin_lock_irqsave(&svc->srv_lock, flags);
770 svc->srv_nthreads--; /* must know immediately */
771 thread->t_flags = SVC_STOPPED;
772 wake_up(&thread->t_ctl_waitq);
774 spin_unlock_irqrestore(&svc->srv_lock, flags);
776 CDEBUG(D_NET, "service thread exiting, process %d\n", current->pid);
780 static void ptlrpc_stop_thread(struct ptlrpc_service *svc,
781 struct ptlrpc_thread *thread)
783 struct l_wait_info lwi = { 0 };
786 spin_lock_irqsave(&svc->srv_lock, flags);
787 thread->t_flags = SVC_STOPPING;
788 spin_unlock_irqrestore(&svc->srv_lock, flags);
790 wake_up_all(&svc->srv_waitq);
791 l_wait_event(thread->t_ctl_waitq, (thread->t_flags & SVC_STOPPED),
794 spin_lock_irqsave(&svc->srv_lock, flags);
795 list_del(&thread->t_link);
796 spin_unlock_irqrestore(&svc->srv_lock, flags);
798 OBD_FREE(thread, sizeof(*thread));
801 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
804 struct ptlrpc_thread *thread;
806 spin_lock_irqsave(&svc->srv_lock, flags);
807 while (!list_empty(&svc->srv_threads)) {
808 thread = list_entry(svc->srv_threads.next,
809 struct ptlrpc_thread, t_link);
811 spin_unlock_irqrestore(&svc->srv_lock, flags);
812 ptlrpc_stop_thread(svc, thread);
813 spin_lock_irqsave(&svc->srv_lock, flags);
816 spin_unlock_irqrestore(&svc->srv_lock, flags);
819 /* @base_name should be 12 characters or less - 3 will be added on */
820 int ptlrpc_start_n_threads(struct obd_device *dev, struct ptlrpc_service *svc,
821 int num_threads, char *base_name)
826 for (i = 0; i < num_threads; i++) {
828 sprintf(name, "%s_%02d", base_name, i);
829 rc = ptlrpc_start_thread(dev, svc, name);
831 CERROR("cannot start %s thread #%d: rc %d\n", base_name,
833 ptlrpc_stop_all_threads(svc);
839 int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc,
842 struct l_wait_info lwi = { 0 };
843 struct ptlrpc_svc_data d;
844 struct ptlrpc_thread *thread;
849 OBD_ALLOC(thread, sizeof(*thread));
852 init_waitqueue_head(&thread->t_ctl_waitq);
859 spin_lock_irqsave(&svc->srv_lock, flags);
860 list_add(&thread->t_link, &svc->srv_threads);
861 spin_unlock_irqrestore(&svc->srv_lock, flags);
863 /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
864 * just drop the VM and FILES in ptlrpc_daemonize() right away.
866 rc = kernel_thread(ptlrpc_main, &d, CLONE_VM | CLONE_FILES);
868 CERROR("cannot start thread: %d\n", rc);
869 OBD_FREE(thread, sizeof(*thread));
872 l_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_RUNNING, &lwi);
878 int ptlrpc_unregister_service(struct ptlrpc_service *service)
883 struct ptlrpc_srv_ni *srv_ni;
884 struct l_wait_info lwi;
885 struct list_head *tmp;
887 LASSERT(list_empty(&service->srv_threads));
889 spin_lock (&ptlrpc_all_services_lock);
890 list_del_init (&service->srv_list);
891 spin_unlock (&ptlrpc_all_services_lock);
893 ptlrpc_lprocfs_unregister_service(service);
895 for (i = 0; i < ptlrpc_ninterfaces; i++) {
896 srv_ni = &service->srv_interfaces[i];
897 CDEBUG(D_NET, "%s: tearing down interface %s\n",
898 service->srv_name, srv_ni->sni_ni->pni_name);
900 /* Unlink all the request buffers. This forces a 'final'
901 * event with its 'unlink' flag set for each posted rqbd */
902 list_for_each(tmp, &srv_ni->sni_active_rqbds) {
903 struct ptlrpc_request_buffer_desc *rqbd =
904 list_entry(tmp, struct ptlrpc_request_buffer_desc,
907 rc = PtlMDUnlink(rqbd->rqbd_md_h);
908 LASSERT (rc == PTL_OK || rc == PTL_MD_INVALID);
911 /* Wait for the network to release any buffers it's
912 * currently filling */
914 spin_lock_irqsave(&service->srv_lock, flags);
915 rc = srv_ni->sni_nrqbd_receiving;
916 spin_unlock_irqrestore(&service->srv_lock, flags);
921 /* Network access will complete in finite time but
922 * the HUGE timeout lets us CWARN for visibility of
924 lwi = LWI_TIMEOUT(300 * HZ, NULL, NULL);
925 rc = l_wait_event(service->srv_waitq,
926 srv_ni->sni_nrqbd_receiving == 0,
928 if (rc == -ETIMEDOUT)
929 CWARN("Waiting for request buffers on "
930 "service %s on interface %s ",
931 service->srv_name, srv_ni->sni_ni->pni_name);
934 /* schedule all outstanding replies to terminate them */
935 spin_lock_irqsave(&service->srv_lock, flags);
936 while (!list_empty(&srv_ni->sni_active_replies)) {
937 struct ptlrpc_reply_state *rs =
938 list_entry(srv_ni->sni_active_replies.next,
939 struct ptlrpc_reply_state,
941 ptlrpc_schedule_difficult_reply(rs);
943 spin_unlock_irqrestore(&service->srv_lock, flags);
946 /* purge the request queue. NB No new replies (rqbds all unlinked)
947 * and no service threads, so I'm the only thread noodling the
948 * request queue now */
949 while (!list_empty(&service->srv_request_queue)) {
950 struct ptlrpc_request *req =
951 list_entry(service->srv_request_queue.next,
952 struct ptlrpc_request,
955 list_del(&req->rq_list);
956 service->srv_n_queued_reqs--;
957 service->srv_n_active_reqs++;
959 ptlrpc_server_free_request(service, req);
961 LASSERT(service->srv_n_queued_reqs == 0);
962 LASSERT(service->srv_n_active_reqs == 0);
964 for (i = 0; i < ptlrpc_ninterfaces; i++) {
965 srv_ni = &service->srv_interfaces[i];
966 LASSERT(list_empty(&srv_ni->sni_active_rqbds));
969 /* Now free all the request buffers since nothing references them
971 while (!list_empty(&service->srv_idle_rqbds)) {
972 struct ptlrpc_request_buffer_desc *rqbd =
973 list_entry(service->srv_idle_rqbds.next,
974 struct ptlrpc_request_buffer_desc,
977 ptlrpc_free_rqbd(rqbd);
980 /* wait for all outstanding replies to complete (they were
981 * scheduled having been flagged to abort above) */
982 while (atomic_read(&service->srv_outstanding_replies) != 0) {
983 struct l_wait_info lwi = LWI_TIMEOUT(10 * HZ, NULL, NULL);
985 rc = l_wait_event(service->srv_waitq,
986 !list_empty(&service->srv_reply_queue), &lwi);
987 LASSERT(rc == 0 || rc == -ETIMEDOUT);
990 ptlrpc_server_handle_reply(service);
993 CWARN("Unexpectedly long timeout %p\n", service);
997 offsetof(struct ptlrpc_service,
998 srv_interfaces[ptlrpc_ninterfaces]));