1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_RPC
25 #include <liblustre.h>
26 #include <libcfs/kp30.h>
28 #include <linux/obd_support.h>
29 #include <linux/obd_class.h>
30 #include <linux/lustre_net.h>
31 #include <linux/lustre_sec.h>
32 #include <linux/lustre_log.h>
33 #include <portals/types.h>
34 #include "ptlrpc_internal.h"
37 static int ptlrpc_server_post_idle_rqbds (struct ptlrpc_service *svc);
40 static LIST_HEAD (ptlrpc_all_services);
41 static spinlock_t ptlrpc_all_services_lock = SPIN_LOCK_UNLOCKED;
44 ptlrpc_free_server_req (struct ptlrpc_request *req)
47 svcsec_cleanup_req(req);
48 svcsec_put(req->rq_svcsec);
49 req->rq_svcsec = NULL;
52 /* The last request to be received into a request buffer uses space
53 * in the request buffer descriptor, otherwise requests are
54 * allocated dynamically in the incoming reply event handler */
55 if (req == &req->rq_rqbd->rqbd_req)
58 OBD_FREE(req, sizeof(*req));
62 ptlrpc_alloc_request_buffer (int size)
66 if (size > SVC_BUF_VMALLOC_THRESHOLD)
67 OBD_VMALLOC(ptr, size);
75 ptlrpc_free_request_buffer (char *ptr, int size)
77 if (size > SVC_BUF_VMALLOC_THRESHOLD)
83 struct ptlrpc_request_buffer_desc *
84 ptlrpc_alloc_rqbd (struct ptlrpc_srv_ni *srv_ni)
86 struct ptlrpc_service *svc = srv_ni->sni_service;
88 struct ptlrpc_request_buffer_desc *rqbd;
90 OBD_ALLOC(rqbd, sizeof (*rqbd));
94 rqbd->rqbd_srv_ni = srv_ni;
95 rqbd->rqbd_refcount = 0;
96 rqbd->rqbd_cbid.cbid_fn = request_in_callback;
97 rqbd->rqbd_cbid.cbid_arg = rqbd;
98 rqbd->rqbd_buffer = ptlrpc_alloc_request_buffer(svc->srv_buf_size);
100 if (rqbd->rqbd_buffer == NULL) {
101 OBD_FREE(rqbd, sizeof (*rqbd));
105 spin_lock_irqsave (&svc->srv_lock, flags);
106 list_add(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
108 spin_unlock_irqrestore (&svc->srv_lock, flags);
114 ptlrpc_free_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
116 struct ptlrpc_srv_ni *sni = rqbd->rqbd_srv_ni;
117 struct ptlrpc_service *svc = sni->sni_service;
120 LASSERT (rqbd->rqbd_refcount == 0);
122 spin_lock_irqsave(&svc->srv_lock, flags);
123 list_del(&rqbd->rqbd_list);
125 spin_unlock_irqrestore(&svc->srv_lock, flags);
127 ptlrpc_free_request_buffer (rqbd->rqbd_buffer, svc->srv_buf_size);
128 OBD_FREE (rqbd, sizeof (*rqbd));
132 ptlrpc_grow_req_bufs(struct ptlrpc_srv_ni *srv_ni)
134 struct ptlrpc_service *svc = srv_ni->sni_service;
135 struct ptlrpc_request_buffer_desc *rqbd;
138 CDEBUG(D_RPCTRACE, "%s: allocate %d new %d-byte reqbufs (%d/%d left)\n",
139 svc->srv_name, svc->srv_nbuf_per_group, svc->srv_buf_size,
140 srv_ni->sni_nrqbd_receiving, svc->srv_nbufs);
141 for (i = 0; i < svc->srv_nbuf_per_group; i++) {
142 rqbd = ptlrpc_alloc_rqbd(srv_ni);
145 CERROR ("%s/%s: Can't allocate request buffer\n",
146 svc->srv_name, srv_ni->sni_ni->pni_name);
150 if (ptlrpc_server_post_idle_rqbds(svc) < 0)
158 ptlrpc_save_llog_lock(struct ptlrpc_request *req, struct llog_create_locks *lcl)
160 struct ptlrpc_reply_state *rs = req->rq_reply_state;
161 LASSERT (rs != NULL);
162 LASSERT (rs->rs_llog_locks == NULL);
164 rs->rs_llog_locks = lcl;
168 ptlrpc_require_repack(struct ptlrpc_request *req)
170 struct ptlrpc_reply_state *rs = req->rq_reply_state;
171 LASSERT (rs != NULL);
172 rs->rs_difficult = 1;
176 ptlrpc_save_lock (struct ptlrpc_request *req,
177 struct lustre_handle *lock, int mode)
179 struct ptlrpc_reply_state *rs = req->rq_reply_state;
185 LASSERT (rs != NULL);
186 LASSERT (rs->rs_nlocks < RS_MAX_LOCKS);
188 idx = rs->rs_nlocks++;
189 rs->rs_locks[idx] = *lock;
190 rs->rs_modes[idx] = mode;
191 rs->rs_difficult = 1;
195 ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs)
197 struct ptlrpc_service *svc = rs->rs_srv_ni->sni_service;
200 LASSERT (spin_is_locked (&svc->srv_lock));
202 LASSERT (rs->rs_difficult);
203 rs->rs_scheduled_ever = 1; /* flag any notification attempt */
205 if (rs->rs_scheduled) /* being set up or already notified */
208 rs->rs_scheduled = 1;
209 list_del (&rs->rs_list);
210 list_add (&rs->rs_list, &svc->srv_reply_queue);
211 wake_up (&svc->srv_waitq);
215 ptlrpc_commit_replies (struct obd_device *obd)
217 struct list_head *tmp;
218 struct list_head *nxt;
221 /* Find any replies that have been committed and get their service
222 * to attend to complete them. */
224 /* CAVEAT EMPTOR: spinlock ordering!!! */
225 spin_lock_irqsave (&obd->obd_uncommitted_replies_lock, flags);
227 list_for_each_safe (tmp, nxt, &obd->obd_uncommitted_replies) {
228 struct ptlrpc_reply_state *rs =
229 list_entry (tmp, struct ptlrpc_reply_state, rs_obd_list);
230 struct llog_create_locks *lcl = rs->rs_llog_locks;
232 rs->rs_llog_locks = NULL;
233 LASSERT (rs->rs_difficult);
235 if (rs->rs_transno <= obd->obd_last_committed) {
236 struct ptlrpc_service *svc = rs->rs_srv_ni->sni_service;
238 spin_lock (&svc->srv_lock);
239 list_del_init (&rs->rs_obd_list);
240 ptlrpc_schedule_difficult_reply (rs);
241 spin_unlock (&svc->srv_lock);
244 llog_create_lock_free(lcl);
248 spin_unlock_irqrestore (&obd->obd_uncommitted_replies_lock, flags);
252 timeval_sub(struct timeval *large, struct timeval *small)
254 return (large->tv_sec - small->tv_sec) * 1000000 +
255 (large->tv_usec - small->tv_usec);
259 ptlrpc_server_post_idle_rqbds (struct ptlrpc_service *svc)
261 struct ptlrpc_srv_ni *srv_ni;
262 struct ptlrpc_request_buffer_desc *rqbd;
268 spin_lock_irqsave(&svc->srv_lock, flags);
269 if (list_empty (&svc->srv_idle_rqbds)) {
270 spin_unlock_irqrestore(&svc->srv_lock, flags);
273 rqbd = list_entry(svc->srv_idle_rqbds.next,
274 struct ptlrpc_request_buffer_desc,
276 list_del (&rqbd->rqbd_list);
278 /* assume we will post successfully */
279 srv_ni = rqbd->rqbd_srv_ni;
280 srv_ni->sni_nrqbd_receiving++;
281 list_add (&rqbd->rqbd_list, &srv_ni->sni_active_rqbds);
282 spin_unlock_irqrestore(&svc->srv_lock, flags);
284 rc = ptlrpc_register_rqbd(rqbd);
292 spin_lock_irqsave(&svc->srv_lock, flags);
294 srv_ni->sni_nrqbd_receiving--;
295 list_del(&rqbd->rqbd_list);
296 list_add_tail(&rqbd->rqbd_list, &svc->srv_idle_rqbds);
298 if (srv_ni->sni_nrqbd_receiving == 0) {
299 /* This service is off-air on this interface because all
300 * its request buffers are busy. Portals will have started
301 * dropping incoming requests until more buffers get
303 CERROR("All %s %s request buffers busy\n",
304 svc->srv_name, srv_ni->sni_ni->pni_name);
307 spin_unlock_irqrestore (&svc->srv_lock, flags);
312 struct ptlrpc_service *
313 ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size,
314 int req_portal, int rep_portal, int watchdog_timeout,
315 svc_handler_t handler, char *name,
316 struct proc_dir_entry *proc_entry)
321 struct ptlrpc_service *service;
322 struct ptlrpc_srv_ni *srv_ni;
325 LASSERT (ptlrpc_ninterfaces > 0);
327 LASSERT (bufsize >= max_req_size);
329 ssize = offsetof (struct ptlrpc_service,
330 srv_interfaces[ptlrpc_ninterfaces]);
331 OBD_ALLOC(service, ssize);
335 service->srv_name = name;
336 spin_lock_init(&service->srv_lock);
337 INIT_LIST_HEAD(&service->srv_threads);
338 init_waitqueue_head(&service->srv_waitq);
340 service->srv_nbuf_per_group = nbufs;
341 service->srv_max_req_size = max_req_size;
342 service->srv_buf_size = bufsize;
343 service->srv_rep_portal = rep_portal;
344 service->srv_req_portal = req_portal;
345 service->srv_watchdog_timeout = watchdog_timeout;
346 service->srv_handler = handler;
348 INIT_LIST_HEAD(&service->srv_request_queue);
349 INIT_LIST_HEAD(&service->srv_idle_rqbds);
350 INIT_LIST_HEAD(&service->srv_reply_queue);
352 /* First initialise enough for early teardown */
353 for (i = 0; i < ptlrpc_ninterfaces; i++) {
354 srv_ni = &service->srv_interfaces[i];
356 srv_ni->sni_service = service;
357 srv_ni->sni_ni = &ptlrpc_interfaces[i];
358 INIT_LIST_HEAD(&srv_ni->sni_active_rqbds);
359 INIT_LIST_HEAD(&srv_ni->sni_active_replies);
362 spin_lock (&ptlrpc_all_services_lock);
363 list_add (&service->srv_list, &ptlrpc_all_services);
364 spin_unlock (&ptlrpc_all_services_lock);
366 /* Now allocate the request buffers, assuming all interfaces require
367 * the same number. */
368 for (i = 0; i < ptlrpc_ninterfaces; i++) {
369 srv_ni = &service->srv_interfaces[i];
370 CDEBUG (D_NET, "%s: initialising interface %s\n", name,
371 srv_ni->sni_ni->pni_name);
373 rc = ptlrpc_grow_req_bufs(srv_ni);
374 /* We shouldn't be under memory pressure at startup, so
375 * fail if we can't post all our buffers at this time. */
380 if (proc_entry != NULL)
381 ptlrpc_lprocfs_register_service(proc_entry, service);
383 CDEBUG(D_NET, "%s: Started on %d interfaces, listening on portal %d\n",
384 service->srv_name, ptlrpc_ninterfaces, service->srv_req_portal);
388 ptlrpc_unregister_service(service);
393 ptlrpc_server_free_request(struct ptlrpc_service *svc, struct ptlrpc_request *req)
398 spin_lock_irqsave(&svc->srv_lock, flags);
399 svc->srv_n_active_reqs--;
400 refcount = --(req->rq_rqbd->rqbd_refcount);
402 /* request buffer is now idle */
403 list_del(&req->rq_rqbd->rqbd_list);
404 list_add_tail(&req->rq_rqbd->rqbd_list,
405 &svc->srv_idle_rqbds);
407 spin_unlock_irqrestore(&svc->srv_lock, flags);
409 ptlrpc_free_server_req(req);
413 ptlrpc_server_handle_request (struct ptlrpc_service *svc)
415 struct obd_export *export = NULL;
416 struct ptlrpc_request *request;
418 struct timeval work_start;
419 struct timeval work_end;
421 enum ptlrpcs_error sec_err;
425 spin_lock_irqsave (&svc->srv_lock, flags);
426 if (list_empty (&svc->srv_request_queue) ||
427 (svc->srv_n_difficult_replies != 0 &&
428 svc->srv_n_active_reqs >= (svc->srv_nthreads - 1))) {
429 /* If all the other threads are handling requests, I must
430 * remain free to handle any 'difficult' reply that might
432 spin_unlock_irqrestore (&svc->srv_lock, flags);
436 request = list_entry (svc->srv_request_queue.next,
437 struct ptlrpc_request, rq_list);
438 list_del_init (&request->rq_list);
439 svc->srv_n_queued_reqs--;
440 svc->srv_n_active_reqs++;
442 spin_unlock_irqrestore (&svc->srv_lock, flags);
444 do_gettimeofday(&work_start);
445 timediff = timeval_sub(&work_start, &request->rq_arrival_time);
446 if (svc->srv_stats != NULL) {
447 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
449 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
450 svc->srv_n_queued_reqs);
451 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQACTIVE_CNTR,
452 svc->srv_n_active_reqs);
456 /* Clear request swab mask; this is a new request */
457 request->rq_req_swab_mask = 0;
460 /* go through security check/transform */
461 request->rq_auth_uid = -1;
462 request->rq_mapped_uid = -1;
463 request->rq_remote_realm = 0;
464 request->rq_auth_usr_mds = 0;
465 request->rq_auth_usr_oss = 0;
467 secrc = svcsec_accept(request, &sec_err);
470 CDEBUG(D_SEC, "request accepted ok\n");
473 target_send_reply(request, 0, OBD_FAIL_MDS_ALL_REPLY_NET);
484 rc = lustre_unpack_msg(request->rq_reqmsg, request->rq_reqlen);
486 CERROR ("error unpacking request: ptl %d from %s"
487 " xid "LPU64"\n", svc->srv_req_portal,
488 request->rq_peerstr, request->rq_xid);
493 if (request->rq_reqmsg->type != PTL_RPC_MSG_REQUEST) {
494 CERROR("wrong packet type received (type=%u) from %s\n",
495 request->rq_reqmsg->type, request->rq_peerstr);
499 CDEBUG(D_NET, "got req "LPD64"\n", request->rq_xid);
501 /* Discard requests queued for longer than my timeout. If the
502 * client's timeout is similar to mine, she'll be timing out this
503 * REQ anyway (bug 1502) */
504 if (timediff / 1000000 > (long)obd_timeout) {
505 CERROR("Dropping timed-out opc %d request from %s"
506 ": %ld seconds old\n", request->rq_reqmsg->opc,
512 request->rq_export = class_conn2export(&request->rq_reqmsg->handle);
514 if (request->rq_export) {
515 if (request->rq_reqmsg->conn_cnt <
516 request->rq_export->exp_conn_cnt) {
517 DEBUG_REQ(D_ERROR, request,
518 "DROPPING req from old connection %d < %d",
519 request->rq_reqmsg->conn_cnt,
520 request->rq_export->exp_conn_cnt);
524 export = class_export_rpc_get(request->rq_export);
525 request->rq_export->exp_last_request_time =
526 LTIME_S(CURRENT_TIME);
529 CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:ni:nid:opc "
530 "%s:%s+%d:%d:"LPU64":%s:%s:%d\n", current->comm,
531 (request->rq_export ?
532 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
533 (request->rq_export ?
534 atomic_read(&request->rq_export->exp_refcount) : -99),
535 request->rq_reqmsg->status, request->rq_xid,
536 request->rq_peer.peer_ni->pni_name,
538 request->rq_reqmsg->opc);
540 do_gettimeofday(&request->rq_rpcd_start);
541 request->rq_svc = svc;
542 rc = svc->srv_handler(request);
543 request->rq_svc = NULL;
545 CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:ni:nid:opc "
546 "%s:%s+%d:%d:"LPU64":%s:%s:%d\n", current->comm,
547 (request->rq_export ?
548 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
549 (request->rq_export ?
550 atomic_read(&request->rq_export->exp_refcount) : -99),
551 request->rq_reqmsg->status, request->rq_xid,
552 request->rq_peer.peer_ni->pni_name,
554 request->rq_reqmsg->opc);
557 class_export_rpc_put(export);
560 if (request->rq_export != NULL)
561 class_export_put(request->rq_export);
564 do_gettimeofday(&work_end);
566 timediff = timeval_sub(&work_end, &work_start);
568 CDEBUG((timediff / 1000000 > (long)obd_timeout) ? D_ERROR : D_HA,
569 "request "LPU64" opc %u from NID %s processed in %ldus "
570 "(%ldus total)\n", request->rq_xid,
571 request->rq_reqmsg ? request->rq_reqmsg->opc : 0,
573 timediff, timeval_sub(&work_end, &request->rq_arrival_time));
575 if (svc->srv_stats != NULL && request->rq_reqmsg != NULL) {
576 int opc = opcode_offset(request->rq_reqmsg->opc);
578 LASSERT(opc < LUSTRE_MAX_OPCODES);
579 lprocfs_counter_add(svc->srv_stats,
580 opc + PTLRPC_LAST_CNTR,
585 ptlrpc_server_free_request(svc, request);
591 ptlrpc_server_handle_reply (struct ptlrpc_service *svc)
593 struct ptlrpc_reply_state *rs;
595 struct obd_export *exp;
596 struct obd_device *obd;
597 struct llog_create_locks *lcl;
602 spin_lock_irqsave (&svc->srv_lock, flags);
603 if (list_empty (&svc->srv_reply_queue)) {
604 spin_unlock_irqrestore (&svc->srv_lock, flags);
608 rs = list_entry (svc->srv_reply_queue.next,
609 struct ptlrpc_reply_state, rs_list);
614 LASSERT (rs->rs_difficult);
615 LASSERT (rs->rs_scheduled);
617 list_del_init (&rs->rs_list);
619 /* Disengage from notifiers carefully (lock ordering!) */
620 spin_unlock(&svc->srv_lock);
622 spin_lock (&obd->obd_uncommitted_replies_lock);
623 /* Noop if removed already */
624 list_del_init (&rs->rs_obd_list);
625 spin_unlock (&obd->obd_uncommitted_replies_lock);
627 spin_lock (&exp->exp_lock);
628 /* Noop if removed already */
629 list_del_init (&rs->rs_exp_list);
630 spin_unlock (&exp->exp_lock);
632 spin_lock(&svc->srv_lock);
634 been_handled = rs->rs_handled;
637 nlocks = rs->rs_nlocks; /* atomic "steal", but */
638 rs->rs_nlocks = 0; /* locks still on rs_locks! */
640 lcl = rs->rs_llog_locks;
641 rs->rs_llog_locks = NULL;
643 if (nlocks == 0 && !been_handled) {
644 /* If we see this, we should already have seen the warning
645 * in mds_steal_ack_locks() */
647 char str[PTL_NALFMT_SIZE];
648 /* CMD may ask to save request with no DLM locks -bzzz */
649 CWARN("All locks stolen from rs %p x"LPD64".t"LPD64
652 rs->rs_xid, rs->rs_transno,
654 ptlrpc_peernid2str(&exp->exp_connection->c_peer, str));
658 if ((!been_handled && rs->rs_on_net) ||
659 nlocks > 0 || lcl != NULL) {
660 spin_unlock_irqrestore(&svc->srv_lock, flags);
662 if (!been_handled && rs->rs_on_net) {
663 PtlMDUnlink(rs->rs_md_h);
664 /* Ignore return code; we're racing with
669 ldlm_lock_decref(&rs->rs_locks[nlocks],
670 rs->rs_modes[nlocks]);
673 llog_create_lock_free(lcl);
675 spin_lock_irqsave(&svc->srv_lock, flags);
678 rs->rs_scheduled = 0;
680 if (!rs->rs_on_net) {
682 svc->srv_n_difficult_replies--;
683 spin_unlock_irqrestore(&svc->srv_lock, flags);
685 class_export_put (exp);
686 rs->rs_export = NULL;
687 lustre_free_reply_state (rs);
688 atomic_dec (&svc->srv_outstanding_replies);
692 /* still on the net; callback will schedule */
693 spin_unlock_irqrestore (&svc->srv_lock, flags);
698 /* FIXME make use of timeout later */
700 liblustre_check_services (void *arg)
702 int did_something = 0;
704 struct list_head *tmp, *nxt;
707 /* I'm relying on being single threaded, not to have to lock
708 * ptlrpc_all_services etc */
709 list_for_each_safe (tmp, nxt, &ptlrpc_all_services) {
710 struct ptlrpc_service *svc =
711 list_entry (tmp, struct ptlrpc_service, srv_list);
713 if (svc->srv_nthreads != 0) /* I've recursed */
716 /* service threads can block for bulk, so this limits us
717 * (arbitrarily) to recursing 1 stack frame per service.
718 * Note that the problem with recursion is that we have to
719 * unwind completely before our caller can resume. */
724 rc = ptlrpc_server_handle_reply(svc);
725 rc |= ptlrpc_server_handle_request(svc);
726 rc |= (ptlrpc_server_post_idle_rqbds(svc) > 0);
733 RETURN(did_something);
736 #else /* __KERNEL__ */
738 /* Don't use daemonize, it removes fs struct from new thread (bug 418) */
739 void ptlrpc_daemonize(void)
742 lustre_daemonize_helper();
748 ptlrpc_check_rqbd_pools(struct ptlrpc_service *svc)
750 struct ptlrpc_srv_ni *sni;
753 int low_water = svc->srv_nbuf_per_group/2;
755 for (i = 0; i < ptlrpc_ninterfaces; i++) {
756 sni = &svc->srv_interfaces[i];
758 avail += sni->sni_nrqbd_receiving;
759 /* NB I'm not locking; just looking. */
760 if (sni->sni_nrqbd_receiving <= low_water)
761 ptlrpc_grow_req_bufs(sni);
764 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQBUF_AVAIL_CNTR, avail);
768 ptlrpc_retry_rqbds(void *arg)
770 struct ptlrpc_service *svc = (struct ptlrpc_service *)arg;
772 svc->srv_rqbd_timeout = 0;
776 static int ptlrpc_main(void *arg)
778 struct ptlrpc_svc_data *data = (struct ptlrpc_svc_data *)arg;
779 struct ptlrpc_service *svc = data->svc;
780 struct ptlrpc_thread *thread = data->thread;
781 struct lc_watchdog *watchdog;
783 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,4)
784 struct group_info *ginfo = NULL;
791 SIGNAL_MASK_LOCK(current, flags);
792 sigfillset(¤t->blocked);
794 SIGNAL_MASK_UNLOCK(current, flags);
796 LASSERTF(strlen(data->name) < sizeof(current->comm),
797 "name %d > len %d\n",
798 (int)strlen(data->name), (int)sizeof(current->comm));
799 THREAD_NAME(current->comm, sizeof(current->comm) - 1, "%s", data->name);
803 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,4)
804 ginfo = groups_alloc(0);
806 thread->t_flags = SVC_RUNNING;
807 wake_up(&thread->t_ctl_waitq);
810 set_current_groups(ginfo);
811 put_group_info(ginfo);
814 /* Record that the thread is running */
815 thread->t_flags = SVC_RUNNING;
816 wake_up(&thread->t_ctl_waitq);
818 watchdog = lc_watchdog_add(svc->srv_watchdog_timeout,
819 LC_WATCHDOG_DEFAULT_CB, NULL);
821 spin_lock_irqsave(&svc->srv_lock, flags);
823 spin_unlock_irqrestore(&svc->srv_lock, flags);
825 /* XXX maintain a list of all managed devices: insert here */
827 while ((thread->t_flags & SVC_STOPPING) == 0 ||
828 svc->srv_n_difficult_replies != 0) {
829 /* Don't exit while there are replies to be handled */
830 struct l_wait_info lwi = LWI_TIMEOUT(svc->srv_rqbd_timeout,
831 ptlrpc_retry_rqbds, svc);
833 lc_watchdog_disable(watchdog);
835 l_wait_event_exclusive (svc->srv_waitq,
836 ((thread->t_flags & SVC_STOPPING) != 0 &&
837 svc->srv_n_difficult_replies == 0) ||
838 (!list_empty(&svc->srv_idle_rqbds) &&
839 svc->srv_rqbd_timeout == 0) ||
840 !list_empty (&svc->srv_reply_queue) ||
841 (!list_empty (&svc->srv_request_queue) &&
842 (svc->srv_n_difficult_replies == 0 ||
843 svc->srv_n_active_reqs <
844 (svc->srv_nthreads - 1))),
848 /* disable watchdog: with CMD server can issue request
849 * to another server to satisfy the request -bzzz */
850 lc_watchdog_touch(watchdog);
852 ptlrpc_check_rqbd_pools(svc);
854 if (!list_empty (&svc->srv_reply_queue))
855 ptlrpc_server_handle_reply (svc);
857 /* only handle requests if there are no difficult replies
858 * outstanding, or I'm not the last thread handling
860 if (!list_empty (&svc->srv_request_queue) &&
861 (svc->srv_n_difficult_replies == 0 ||
862 svc->srv_n_active_reqs < (svc->srv_nthreads - 1)))
863 ptlrpc_server_handle_request (svc);
865 if (!list_empty(&svc->srv_idle_rqbds) &&
866 ptlrpc_server_post_idle_rqbds(svc) < 0) {
867 /* I just failed to repost request buffers. Wait
868 * for a timeout (unless something else happens)
869 * before I try again */
870 svc->srv_rqbd_timeout = HZ/10;
874 spin_lock_irqsave(&svc->srv_lock, flags);
876 svc->srv_nthreads--; /* must know immediately */
877 thread->t_flags = SVC_STOPPED;
878 wake_up(&thread->t_ctl_waitq);
880 spin_unlock_irqrestore(&svc->srv_lock, flags);
882 lc_watchdog_delete(watchdog);
884 CDEBUG(D_NET, "service thread exiting, process %d\n", current->pid);
888 static void ptlrpc_stop_thread(struct ptlrpc_service *svc,
889 struct ptlrpc_thread *thread)
891 struct l_wait_info lwi = { 0 };
894 spin_lock_irqsave(&svc->srv_lock, flags);
895 thread->t_flags = SVC_STOPPING;
896 spin_unlock_irqrestore(&svc->srv_lock, flags);
898 wake_up_all(&svc->srv_waitq);
899 l_wait_event(thread->t_ctl_waitq, (thread->t_flags & SVC_STOPPED),
902 spin_lock_irqsave(&svc->srv_lock, flags);
903 list_del(&thread->t_link);
904 spin_unlock_irqrestore(&svc->srv_lock, flags);
906 OBD_FREE(thread, sizeof(*thread));
909 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
912 struct ptlrpc_thread *thread;
914 spin_lock_irqsave(&svc->srv_lock, flags);
915 while (!list_empty(&svc->srv_threads)) {
916 thread = list_entry(svc->srv_threads.next,
917 struct ptlrpc_thread, t_link);
919 spin_unlock_irqrestore(&svc->srv_lock, flags);
920 ptlrpc_stop_thread(svc, thread);
921 spin_lock_irqsave(&svc->srv_lock, flags);
924 spin_unlock_irqrestore(&svc->srv_lock, flags);
927 /* @base_name should be 12 characters or less - 3 will be added on */
928 int ptlrpc_start_n_threads(struct obd_device *dev, struct ptlrpc_service *svc,
929 int num_threads, char *base_name)
934 for (i = 0; i < num_threads; i++) {
936 sprintf(name, "%s_%02d", base_name, i);
937 rc = ptlrpc_start_thread(dev, svc, name);
939 CERROR("cannot start %s thread #%d: rc %d\n", base_name,
941 ptlrpc_stop_all_threads(svc);
947 int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc,
950 struct l_wait_info lwi = { 0 };
951 struct ptlrpc_svc_data d;
952 struct ptlrpc_thread *thread;
957 OBD_ALLOC(thread, sizeof(*thread));
960 init_waitqueue_head(&thread->t_ctl_waitq);
967 spin_lock_irqsave(&svc->srv_lock, flags);
968 list_add(&thread->t_link, &svc->srv_threads);
969 spin_unlock_irqrestore(&svc->srv_lock, flags);
971 /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
972 * just drop the VM and FILES in ptlrpc_daemonize() right away.
974 rc = kernel_thread(ptlrpc_main, &d, CLONE_VM | CLONE_FILES);
976 CERROR("cannot start thread: %d\n", rc);
977 OBD_FREE(thread, sizeof(*thread));
980 l_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_RUNNING, &lwi);
986 int ptlrpc_unregister_service(struct ptlrpc_service *service)
991 struct ptlrpc_srv_ni *srv_ni;
992 struct l_wait_info lwi;
993 struct list_head *tmp;
995 LASSERT(list_empty(&service->srv_threads));
997 spin_lock (&ptlrpc_all_services_lock);
998 list_del_init (&service->srv_list);
999 spin_unlock (&ptlrpc_all_services_lock);
1001 ptlrpc_lprocfs_unregister_service(service);
1003 for (i = 0; i < ptlrpc_ninterfaces; i++) {
1004 srv_ni = &service->srv_interfaces[i];
1005 CDEBUG(D_NET, "%s: tearing down interface %s\n",
1006 service->srv_name, srv_ni->sni_ni->pni_name);
1008 /* Unlink all the request buffers. This forces a 'final'
1009 * event with its 'unlink' flag set for each posted rqbd */
1010 list_for_each(tmp, &srv_ni->sni_active_rqbds) {
1011 struct ptlrpc_request_buffer_desc *rqbd =
1012 list_entry(tmp, struct ptlrpc_request_buffer_desc,
1015 rc = PtlMDUnlink(rqbd->rqbd_md_h);
1016 LASSERT (rc == PTL_OK || rc == PTL_MD_INVALID);
1019 /* Wait for the network to release any buffers it's
1020 * currently filling */
1022 spin_lock_irqsave(&service->srv_lock, flags);
1023 rc = srv_ni->sni_nrqbd_receiving;
1024 spin_unlock_irqrestore(&service->srv_lock, flags);
1029 /* Network access will complete in finite time but
1030 * the HUGE timeout lets us CWARN for visibility of
1032 lwi = LWI_TIMEOUT(300 * HZ, NULL, NULL);
1033 rc = l_wait_event(service->srv_waitq,
1034 srv_ni->sni_nrqbd_receiving == 0,
1036 if (rc == -ETIMEDOUT)
1037 CWARN("Waiting for request buffers on "
1038 "service %s on interface %s ",
1039 service->srv_name, srv_ni->sni_ni->pni_name);
1042 /* schedule all outstanding replies to terminate them */
1043 spin_lock_irqsave(&service->srv_lock, flags);
1044 while (!list_empty(&srv_ni->sni_active_replies)) {
1045 struct ptlrpc_reply_state *rs =
1046 list_entry(srv_ni->sni_active_replies.next,
1047 struct ptlrpc_reply_state,
1049 ptlrpc_schedule_difficult_reply(rs);
1051 spin_unlock_irqrestore(&service->srv_lock, flags);
1054 /* purge the request queue. NB No new replies (rqbds all unlinked)
1055 * and no service threads, so I'm the only thread noodling the
1056 * request queue now */
1057 while (!list_empty(&service->srv_request_queue)) {
1058 struct ptlrpc_request *req =
1059 list_entry(service->srv_request_queue.next,
1060 struct ptlrpc_request,
1063 list_del(&req->rq_list);
1064 service->srv_n_queued_reqs--;
1065 service->srv_n_active_reqs++;
1067 ptlrpc_server_free_request(service, req);
1069 LASSERT(service->srv_n_queued_reqs == 0);
1070 LASSERT(service->srv_n_active_reqs == 0);
1072 for (i = 0; i < ptlrpc_ninterfaces; i++) {
1073 srv_ni = &service->srv_interfaces[i];
1074 LASSERT(list_empty(&srv_ni->sni_active_rqbds));
1077 /* Now free all the request buffers since nothing references them
1079 while (!list_empty(&service->srv_idle_rqbds)) {
1080 struct ptlrpc_request_buffer_desc *rqbd =
1081 list_entry(service->srv_idle_rqbds.next,
1082 struct ptlrpc_request_buffer_desc,
1085 ptlrpc_free_rqbd(rqbd);
1088 /* wait for all outstanding replies to complete (they were
1089 * scheduled having been flagged to abort above) */
1090 while (atomic_read(&service->srv_outstanding_replies) != 0) {
1091 struct l_wait_info lwi = LWI_TIMEOUT(10 * HZ, NULL, NULL);
1093 rc = l_wait_event(service->srv_waitq,
1094 !list_empty(&service->srv_reply_queue), &lwi);
1095 LASSERT(rc == 0 || rc == -ETIMEDOUT);
1098 ptlrpc_server_handle_reply(service);
1101 CWARN("Unexpectedly long timeout %p\n", service);
1105 offsetof(struct ptlrpc_service,
1106 srv_interfaces[ptlrpc_ninterfaces]));