1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/pinger.c
38 * Portal-RPC reconnection and replay operations, for use in recovery.
42 #include <liblustre.h>
44 #define DEBUG_SUBSYSTEM S_RPC
47 #include <obd_support.h>
48 #include <obd_class.h>
49 #include <lustre_net.h>
50 #include "ptlrpc_internal.h"
53 /* What time pinger is supposed to wake up next.
54 * pd_next_ping is the equivalent for liblustre. */
55 cfs_time_t pinger_next_wake;
58 struct semaphore pinger_sem;
59 static struct list_head pinger_imports = CFS_LIST_HEAD_INIT(pinger_imports);
60 static struct list_head timeout_list = CFS_LIST_HEAD_INIT(timeout_list);
62 struct ptlrpc_request *
63 ptlrpc_prep_ping(struct obd_import *imp)
65 struct ptlrpc_request *req;
67 req = ptlrpc_prep_req(imp, LUSTRE_OBD_VERSION,
68 OBD_PING, 1, NULL, NULL);
70 ptlrpc_req_set_repsize(req, 1, NULL);
71 req->rq_no_resend = req->rq_no_delay = 1;
76 int ptlrpc_obd_ping(struct obd_device *obd)
79 struct ptlrpc_request *req;
82 req = ptlrpc_prep_ping(obd->u.cli.cl_import);
86 req->rq_send_state = LUSTRE_IMP_FULL;
88 rc = ptlrpc_queue_wait(req);
90 ptlrpc_req_finished(req);
94 EXPORT_SYMBOL(ptlrpc_obd_ping);
96 int ptlrpc_ping(struct obd_import *imp)
98 struct ptlrpc_request *req;
102 req = ptlrpc_prep_ping(imp);
104 DEBUG_REQ(D_INFO, req, "pinging %s->%s",
105 imp->imp_obd->obd_uuid.uuid,
106 obd2cli_tgt(imp->imp_obd));
108 /* To quickly detect server failure ping timeouts must be
109 * kept small. Therefore we must override/ignore the server
110 * rpc completion estimate which may be very large since
111 * it includes non-ping service times. The right long term
112 * fix will be to add a per-server (not per-service) thread
113 * in order to reduce the number of pings in the system in
114 * general (see bug 12471). */
116 req->rq_timeout = PING_SVC_TIMEOUT +
117 at_get(&imp->imp_at.iat_net_latency);
118 lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
121 ptlrpcd_add_req(req);
123 CERROR("OOM trying to ping %s->%s\n",
124 imp->imp_obd->obd_uuid.uuid,
125 obd2cli_tgt(imp->imp_obd));
131 EXPORT_SYMBOL(ptlrpc_ping);
133 static void ptlrpc_update_next_ping(struct obd_import *imp, int soon)
136 cfs_time_t delay, dtime, ctime = cfs_time_current();
138 if (imp->imp_state == LUSTRE_IMP_DISCON ||
139 imp->imp_state == LUSTRE_IMP_CONNECTING) {
140 /* In the disconnected case aggressively reconnect, for
141 * this request the AT service timeout will be set to
142 * INITIAL_CONNECT_TIMEOUT. To ensure the request times
143 * out before we send another we add one extra second. */
144 dtime = cfs_time_seconds(max_t(int, CONNECTION_SWITCH_MIN,
145 AT_OFF ? 0 : INITIAL_CONNECT_TIMEOUT + 1 +
146 at_get(&imp->imp_at.iat_net_latency)));
148 /* In the common case we want to cluster the pings at
149 * at regular intervals to minimize system noise. */
150 delay = cfs_time_seconds(soon ? PING_INTERVAL_SHORT :
152 dtime = delay - (ctime % delay);
155 dtime = cfs_time_add(ctime, dtime);
157 if (soon && cfs_time_after(imp->imp_next_ping, ctime) &&
158 cfs_time_after(dtime, imp->imp_next_ping)) {
159 /* if the next ping is due to be sent before the
160 * new deadline, don't delay it */
164 /* May harmlessly race with ptlrpc_update_next_ping() */
165 imp->imp_next_ping = dtime;
168 if (pinger_next_wake != 0 && cfs_time_after(pinger_next_wake, dtime))
169 /* pinger is supposed to sleep until after the new ping
170 * deadline, wake it up to take into account our update.
171 * no needed for liblustre which updates pd_next_ping. */
172 ptlrpc_pinger_wake_up();
175 CDEBUG(D_INFO, "Setting %s next ping to "CFS_TIME_T" ("CFS_TIME_T")\n",
176 obd2cli_tgt(imp->imp_obd), imp->imp_next_ping, dtime);
178 #endif /* ENABLE_PINGER */
181 static inline int imp_is_deactive(struct obd_import *imp)
183 return (imp->imp_deactive ||
184 OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_IMP_DEACTIVE));
187 cfs_duration_t pinger_check_timeout(cfs_time_t time)
189 struct timeout_item *item;
190 cfs_time_t timeout = PING_INTERVAL;
192 /* The timeout list is a increase order sorted list */
193 mutex_down(&pinger_sem);
194 list_for_each_entry(item, &timeout_list, ti_chain) {
195 int ti_timeout = item->ti_timeout;
196 if (timeout > ti_timeout)
197 timeout = ti_timeout;
200 mutex_up(&pinger_sem);
202 return cfs_time_sub(cfs_time_add(time, cfs_time_seconds(timeout)),
207 static int ptlrpc_pinger_main(void *arg)
209 struct ptlrpc_svc_data *data = (struct ptlrpc_svc_data *)arg;
210 struct ptlrpc_thread *thread = data->thread;
213 cfs_daemonize(data->name);
215 /* Record that the thread is running */
216 thread->t_flags = SVC_RUNNING;
217 cfs_waitq_signal(&thread->t_ctl_waitq);
219 /* And now, loop forever, pinging as needed. */
221 cfs_time_t this_ping = cfs_time_current();
222 struct l_wait_info lwi;
223 cfs_duration_t time_to_next_wake;
224 cfs_time_t time_of_next_wake;
225 struct timeout_item *item;
226 struct list_head *iter;
228 time_of_next_wake = cfs_time_shift(PING_INTERVAL);
230 mutex_down(&pinger_sem);
231 list_for_each_entry(item, &timeout_list, ti_chain) {
232 item->ti_cb(item, item->ti_cb_data);
234 list_for_each(iter, &pinger_imports) {
235 struct obd_import *imp =
236 list_entry(iter, struct obd_import,
240 spin_lock(&imp->imp_lock);
241 level = imp->imp_state;
242 force = imp->imp_force_verify;
243 imp->imp_force_verify = 0;
244 spin_unlock(&imp->imp_lock);
246 CDEBUG(level == LUSTRE_IMP_FULL ? D_INFO : D_RPCTRACE,
247 "level %s/%u force %u deactive %u pingable %u\n",
248 ptlrpc_import_state_name(level), level,
249 force, imp->imp_deactive, imp->imp_pingable);
251 /* Include any ping which misses the deadline by up to
252 * 1/10 of a second. The pings are designed to clump
253 * and this helps ensure the entire batch gets sent
254 * promptly, which minimizes system noise from pings */
257 cfs_time_aftereq(this_ping, imp->imp_next_ping -
258 (cfs_time_seconds(1) + 9) / 10)) {
259 if (level == LUSTRE_IMP_DISCON &&
260 !imp_is_deactive(imp)) {
261 ptlrpc_update_next_ping(imp, 0);
262 ptlrpc_initiate_recovery(imp);
263 } else if (level != LUSTRE_IMP_FULL ||
264 imp->imp_obd->obd_no_recov ||
265 imp_is_deactive(imp)) {
266 CDEBUG(D_HA, "not pinging %s "
267 "(in recovery: %s or recovery "
268 "disabled: %u/%u)\n",
269 obd2cli_tgt(imp->imp_obd),
270 ptlrpc_import_state_name(level),
272 imp->imp_obd->obd_no_recov);
273 } else if (imp->imp_pingable || force) {
275 /* ptlrpc_pinger_sending_on_import()
276 * will asynch update imp_next_ping
277 * so it must not be used below to
278 * calculate minimum wait time. */
282 if (!imp->imp_pingable)
285 "don't need to ping %s ("CFS_TIME_T
286 " > "CFS_TIME_T")\n",
287 obd2cli_tgt(imp->imp_obd),
288 imp->imp_next_ping, this_ping);
291 /* Wait time until next ping, or until we stopped. */
292 if (cfs_time_before(imp->imp_next_ping,
294 time_of_next_wake = imp->imp_next_ping;
296 pinger_next_wake = time_of_next_wake;
297 mutex_up(&pinger_sem);
298 obd_update_maxusage();
300 time_to_next_wake = max_t(cfs_duration_t,
302 cfs_time_sub(time_of_next_wake,
303 cfs_time_current()));
304 CDEBUG(D_INFO, "next ping in "CFS_DURATION_T" ("CFS_TIME_T")\n",
305 time_to_next_wake, time_of_next_wake);
307 if (time_to_next_wake > 0) {
308 lwi = LWI_TIMEOUT(time_to_next_wake, NULL, NULL);
309 l_wait_event(thread->t_ctl_waitq,
310 thread->t_flags & (SVC_STOPPING|SVC_EVENT),
312 if (thread->t_flags & SVC_STOPPING) {
313 thread->t_flags &= ~SVC_STOPPING;
316 } else if (thread->t_flags & SVC_EVENT) {
317 /* woken after adding import to reset timer */
318 thread->t_flags &= ~SVC_EVENT;
323 thread->t_flags = SVC_STOPPED;
324 cfs_waitq_signal(&thread->t_ctl_waitq);
326 CDEBUG(D_NET, "pinger thread exiting, process %d\n", cfs_curproc_pid());
330 static struct ptlrpc_thread *pinger_thread = NULL;
332 int ptlrpc_start_pinger(void)
334 struct l_wait_info lwi = { 0 };
335 struct ptlrpc_svc_data d;
337 #ifndef ENABLE_PINGER
342 if (pinger_thread != NULL)
345 OBD_ALLOC(pinger_thread, sizeof(*pinger_thread));
346 if (pinger_thread == NULL)
348 cfs_waitq_init(&pinger_thread->t_ctl_waitq);
351 d.thread = pinger_thread;
353 /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
354 * just drop the VM and FILES in cfs_daemonize_ctxt() right away. */
355 rc = cfs_kernel_thread(ptlrpc_pinger_main, &d, CLONE_VM | CLONE_FILES);
357 CERROR("cannot start thread: %d\n", rc);
358 OBD_FREE(pinger_thread, sizeof(*pinger_thread));
359 pinger_thread = NULL;
362 l_wait_event(pinger_thread->t_ctl_waitq,
363 pinger_thread->t_flags & SVC_RUNNING, &lwi);
368 int ptlrpc_pinger_remove_timeouts(void);
370 int ptlrpc_stop_pinger(void)
372 struct l_wait_info lwi = { 0 };
374 #ifndef ENABLE_PINGER
379 if (pinger_thread == NULL)
382 ptlrpc_pinger_remove_timeouts();
383 mutex_down(&pinger_sem);
384 pinger_thread->t_flags = SVC_STOPPING;
385 cfs_waitq_signal(&pinger_thread->t_ctl_waitq);
386 mutex_up(&pinger_sem);
388 l_wait_event(pinger_thread->t_ctl_waitq,
389 (pinger_thread->t_flags & SVC_STOPPED), &lwi);
391 OBD_FREE(pinger_thread, sizeof(*pinger_thread));
392 pinger_thread = NULL;
396 void ptlrpc_pinger_sending_on_import(struct obd_import *imp)
398 ptlrpc_update_next_ping(imp, 0);
401 void ptlrpc_pinger_commit_expected(struct obd_import *imp)
403 ptlrpc_update_next_ping(imp, 1);
406 int ptlrpc_pinger_add_import(struct obd_import *imp)
409 if (!list_empty(&imp->imp_pinger_chain))
412 mutex_down(&pinger_sem);
413 CDEBUG(D_HA, "adding pingable import %s->%s\n",
414 imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
415 /* if we add to pinger we want recovery on this import */
416 imp->imp_obd->obd_no_recov = 0;
418 ptlrpc_update_next_ping(imp, 0);
419 /* XXX sort, blah blah */
420 list_add_tail(&imp->imp_pinger_chain, &pinger_imports);
421 class_import_get(imp);
423 ptlrpc_pinger_wake_up();
424 mutex_up(&pinger_sem);
429 int ptlrpc_pinger_del_import(struct obd_import *imp)
432 if (list_empty(&imp->imp_pinger_chain))
435 mutex_down(&pinger_sem);
436 list_del_init(&imp->imp_pinger_chain);
437 CDEBUG(D_HA, "removing pingable import %s->%s\n",
438 imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
439 /* if we remove from pinger we don't want recovery on this import */
440 imp->imp_obd->obd_no_recov = 1;
441 class_import_put(imp);
442 mutex_up(&pinger_sem);
447 * Register a timeout callback to the pinger list, and the callback will
448 * be called when timeout happens.
450 struct timeout_item* ptlrpc_new_timeout(int time, enum timeout_event event,
451 timeout_cb_t cb, void *data)
453 struct timeout_item *ti;
459 CFS_INIT_LIST_HEAD(&ti->ti_obd_list);
460 CFS_INIT_LIST_HEAD(&ti->ti_chain);
461 ti->ti_timeout = time;
462 ti->ti_event = event;
464 ti->ti_cb_data = data;
470 * Register timeout event on the the pinger thread.
471 * Note: the timeout list is an sorted list with increased timeout value.
473 static struct timeout_item*
474 ptlrpc_pinger_register_timeout(int time, enum timeout_event event,
475 timeout_cb_t cb, void *data)
477 struct timeout_item *item, *tmp;
479 LASSERT_SEM_LOCKED(&pinger_sem);
481 list_for_each_entry(item, &timeout_list, ti_chain)
482 if (item->ti_event == event)
485 item = ptlrpc_new_timeout(time, event, cb, data);
487 list_for_each_entry_reverse(tmp, &timeout_list, ti_chain) {
488 if (tmp->ti_timeout < time) {
489 list_add(&item->ti_chain, &tmp->ti_chain);
493 list_add(&item->ti_chain, &timeout_list);
499 /* Add a client_obd to the timeout event list, when timeout(@time)
500 * happens, the callback(@cb) will be called.
502 int ptlrpc_add_timeout_client(int time, enum timeout_event event,
503 timeout_cb_t cb, void *data,
504 struct list_head *obd_list)
506 struct timeout_item *ti;
508 mutex_down(&pinger_sem);
509 ti = ptlrpc_pinger_register_timeout(time, event, cb, data);
511 mutex_up(&pinger_sem);
514 list_add(obd_list, &ti->ti_obd_list);
515 mutex_up(&pinger_sem);
519 int ptlrpc_del_timeout_client(struct list_head *obd_list,
520 enum timeout_event event)
522 struct timeout_item *ti = NULL, *item;
524 if (list_empty(obd_list))
526 mutex_down(&pinger_sem);
527 list_del_init(obd_list);
529 * If there are no obd attached to the timeout event
530 * list, remove this timeout event from the pinger
532 list_for_each_entry(item, &timeout_list, ti_chain) {
533 if (item->ti_event == event) {
538 LASSERTF(ti != NULL, "ti is NULL ! \n");
539 if (list_empty(&ti->ti_obd_list)) {
540 list_del(&ti->ti_chain);
543 mutex_up(&pinger_sem);
547 int ptlrpc_pinger_remove_timeouts(void)
549 struct timeout_item *item, *tmp;
551 mutex_down(&pinger_sem);
552 list_for_each_entry_safe(item, tmp, &timeout_list, ti_chain) {
553 LASSERT(list_empty(&item->ti_obd_list));
554 list_del(&item->ti_chain);
557 mutex_up(&pinger_sem);
561 void ptlrpc_pinger_wake_up()
564 pinger_thread->t_flags |= SVC_EVENT;
565 cfs_waitq_signal(&pinger_thread->t_ctl_waitq);
569 /* Ping evictor thread */
571 #define PET_TERMINATE 2
573 static int pet_refcount = 0;
574 static int pet_state;
575 static wait_queue_head_t pet_waitq;
576 CFS_LIST_HEAD(pet_list);
577 static spinlock_t pet_lock = SPIN_LOCK_UNLOCKED;
579 int ping_evictor_wake(struct obd_export *exp)
581 struct obd_device *obd;
583 spin_lock(&pet_lock);
584 if (pet_state != PET_READY) {
585 /* eventually the new obd will call here again. */
586 spin_unlock(&pet_lock);
590 obd = class_exp2obd(exp);
591 if (list_empty(&obd->obd_evict_list)) {
593 list_add(&obd->obd_evict_list, &pet_list);
595 spin_unlock(&pet_lock);
601 static int ping_evictor_main(void *arg)
603 struct obd_device *obd;
604 struct obd_export *exp;
605 struct l_wait_info lwi = { 0 };
609 cfs_daemonize_ctxt("ll_evictor");
611 CDEBUG(D_HA, "Starting Ping Evictor\n");
612 pet_state = PET_READY;
614 l_wait_event(pet_waitq, (!list_empty(&pet_list)) ||
615 (pet_state == PET_TERMINATE), &lwi);
617 /* loop until all obd's will be removed */
618 if ((pet_state == PET_TERMINATE) && list_empty(&pet_list))
621 /* we only get here if pet_exp != NULL, and the end of this
622 * loop is the only place which sets it NULL again, so lock
623 * is not strictly necessary. */
624 spin_lock(&pet_lock);
625 obd = list_entry(pet_list.next, struct obd_device,
627 spin_unlock(&pet_lock);
629 /* bug 18948: ensure recovery is aborted in a timely fashion */
630 if (target_recovery_check_and_stop(obd) ||
631 obd->obd_recovering /* no evictor during recovery */)
634 expire_time = cfs_time_current_sec() - PING_EVICT_TIMEOUT;
636 CDEBUG(D_HA, "evicting all exports of obd %s older than %ld\n",
637 obd->obd_name, expire_time);
639 /* Exports can't be deleted out of the list while we hold
640 * the obd lock (class_unlink_export), which means we can't
641 * lose the last ref on the export. If they've already been
642 * removed from the list, we won't find them here. */
643 spin_lock(&obd->obd_dev_lock);
644 while (!list_empty(&obd->obd_exports_timed)) {
645 exp = list_entry(obd->obd_exports_timed.next,
646 struct obd_export,exp_obd_chain_timed);
647 if (expire_time > exp->exp_last_request_time) {
648 class_export_get(exp);
649 spin_unlock(&obd->obd_dev_lock);
650 LCONSOLE_WARN("%s: haven't heard from client %s"
651 " (at %s) in %ld seconds. I think"
652 " it's dead, and I am evicting"
653 " it.\n", obd->obd_name,
654 obd_uuid2str(&exp->exp_client_uuid),
655 obd_export_nid2str(exp),
656 (long)(cfs_time_current_sec() -
657 exp->exp_last_request_time));
658 CDEBUG(D_HA, "Last request was at %ld\n",
659 exp->exp_last_request_time);
660 class_fail_export(exp);
661 class_export_put(exp);
662 spin_lock(&obd->obd_dev_lock);
664 /* List is sorted, so everyone below is ok */
668 spin_unlock(&obd->obd_dev_lock);
670 spin_lock(&pet_lock);
671 list_del_init(&obd->obd_evict_list);
672 spin_unlock(&pet_lock);
677 CDEBUG(D_HA, "Exiting Ping Evictor\n");
682 void ping_evictor_start(void)
686 if (++pet_refcount > 1)
689 init_waitqueue_head(&pet_waitq);
691 rc = cfs_kernel_thread(ping_evictor_main, NULL, CLONE_VM | CLONE_FILES);
694 CERROR("Cannot start ping evictor thread: %d\n", rc);
697 EXPORT_SYMBOL(ping_evictor_start);
699 void ping_evictor_stop(void)
701 if (--pet_refcount > 0)
704 pet_state = PET_TERMINATE;
707 EXPORT_SYMBOL(ping_evictor_stop);
708 #else /* !__KERNEL__ */
711 * the current implementation of pinger in liblustre is not optimized
715 static struct pinger_data {
717 cfs_time_t pd_this_ping; /* jiffies */
718 cfs_time_t pd_next_ping; /* jiffies */
719 struct ptlrpc_request_set *pd_set;
722 static int pinger_check_rpcs(void *arg)
724 cfs_time_t curtime = cfs_time_current();
725 struct ptlrpc_request *req;
726 struct ptlrpc_request_set *set;
727 struct list_head *iter;
728 struct obd_import *imp;
729 struct pinger_data *pd = &pinger_args;
732 /* prevent recursion */
733 if (pd->pd_recursion++) {
734 CDEBUG(D_HA, "pinger: recursion! quit\n");
740 /* have we reached ping point? */
741 if (!pd->pd_set && time_before(curtime, pd->pd_next_ping)) {
746 /* if we have rpc_set already, continue processing it */
748 LASSERT(pd->pd_this_ping);
753 pd->pd_this_ping = curtime;
754 pd->pd_set = ptlrpc_prep_set();
755 if (pd->pd_set == NULL)
759 /* add rpcs into set */
760 mutex_down(&pinger_sem);
761 list_for_each(iter, &pinger_imports) {
762 struct obd_import *imp =
763 list_entry(iter, struct obd_import, imp_pinger_chain);
764 int generation, level;
766 /* Include any ping within 1/10 of a second of the deadline */
767 if (cfs_time_aftereq(pd->pd_this_ping, imp->imp_next_ping -
768 (cfs_time_seconds(1) + 9) / 10)) {
770 spin_lock(&imp->imp_lock);
771 generation = imp->imp_generation;
772 level = imp->imp_state;
773 spin_unlock(&imp->imp_lock);
775 if (level != LUSTRE_IMP_FULL) {
777 "not pinging %s (in recovery)\n",
778 obd2cli_tgt(imp->imp_obd));
782 req = ptlrpc_prep_req(imp, LUSTRE_OBD_VERSION, OBD_PING,
785 CERROR("out of memory\n");
788 req->rq_no_resend = 1;
789 ptlrpc_req_set_repsize(req, 1, NULL);
790 req->rq_send_state = LUSTRE_IMP_FULL;
791 ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
792 req->rq_import_generation = generation;
793 ptlrpc_set_add_req(set, req);
795 CDEBUG(D_INFO, "don't need to ping %s ("CFS_TIME_T
796 " > "CFS_TIME_T")\n", obd2cli_tgt(imp->imp_obd),
797 imp->imp_next_ping, pd->pd_this_ping);
800 pd->pd_this_ping = curtime;
801 mutex_up(&pinger_sem);
803 /* Might be empty, that's OK. */
804 if (atomic_read(&set->set_remaining) == 0)
805 CDEBUG(D_RPCTRACE, "nothing to ping\n");
807 list_for_each(iter, &set->set_requests) {
808 struct ptlrpc_request *req =
809 list_entry(iter, struct ptlrpc_request,
811 DEBUG_REQ(D_RPCTRACE, req, "pinging %s->%s",
812 req->rq_import->imp_obd->obd_uuid.uuid,
813 obd2cli_tgt(req->rq_import->imp_obd));
814 (void)ptl_send_rpc(req, 0);
818 rc = ptlrpc_check_set(set);
820 /* not finished, and we are not expired, simply return */
821 if (!rc && cfs_time_before(curtime, cfs_time_add(pd->pd_this_ping,
822 cfs_time_seconds(PING_INTERVAL)))) {
823 CDEBUG(D_RPCTRACE, "not finished, but also not expired\n");
828 /* Expire all the requests that didn't come back. */
829 mutex_down(&pinger_sem);
830 list_for_each(iter, &set->set_requests) {
831 req = list_entry(iter, struct ptlrpc_request,
834 if (req->rq_phase == RQ_PHASE_COMPLETE)
837 CDEBUG(D_RPCTRACE, "Pinger initiate expire request(%p)\n",
840 /* This will also unregister reply. */
841 ptlrpc_expire_one_request(req, 0);
843 /* We're done with this req, let's finally move it to complete
844 * phase and take care of inflights. */
845 ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
846 imp = req->rq_import;
847 spin_lock(&imp->imp_lock);
848 if (!list_empty(&req->rq_list)) {
849 list_del_init(&req->rq_list);
850 atomic_dec(&imp->imp_inflight);
852 spin_unlock(&imp->imp_lock);
853 atomic_dec(&set->set_remaining);
855 mutex_up(&pinger_sem);
857 ptlrpc_set_destroy(set);
861 pd->pd_next_ping = cfs_time_add(pd->pd_this_ping,
862 cfs_time_seconds(PING_INTERVAL));
863 pd->pd_this_ping = 0; /* XXX for debug */
865 CDEBUG(D_INFO, "finished a round ping\n");
870 static void *pinger_callback = NULL;
871 #endif /* ENABLE_PINGER */
873 int ptlrpc_start_pinger(void)
876 memset(&pinger_args, 0, sizeof(pinger_args));
877 pinger_callback = liblustre_register_wait_callback("pinger_check_rpcs",
884 int ptlrpc_stop_pinger(void)
888 liblustre_deregister_wait_callback(pinger_callback);
893 void ptlrpc_pinger_sending_on_import(struct obd_import *imp)
896 mutex_down(&pinger_sem);
897 ptlrpc_update_next_ping(imp, 0);
898 if (pinger_args.pd_set == NULL &&
899 time_before(imp->imp_next_ping, pinger_args.pd_next_ping)) {
900 CDEBUG(D_HA, "set next ping to "CFS_TIME_T"(cur "CFS_TIME_T")\n",
901 imp->imp_next_ping, cfs_time_current());
902 pinger_args.pd_next_ping = imp->imp_next_ping;
904 mutex_up(&pinger_sem);
908 void ptlrpc_pinger_commit_expected(struct obd_import *imp)
911 mutex_down(&pinger_sem);
912 ptlrpc_update_next_ping(imp, 1);
913 if (pinger_args.pd_set == NULL &&
914 time_before(imp->imp_next_ping, pinger_args.pd_next_ping)) {
915 CDEBUG(D_HA, "set next ping to "CFS_TIME_T"(cur "CFS_TIME_T")\n",
916 imp->imp_next_ping, cfs_time_current());
917 pinger_args.pd_next_ping = imp->imp_next_ping;
919 mutex_up(&pinger_sem);
923 int ptlrpc_add_timeout_client(int time, enum timeout_event event,
924 timeout_cb_t cb, void *data,
925 struct list_head *obd_list)
930 int ptlrpc_del_timeout_client(struct list_head *obd_list,
931 enum timeout_event event)
936 int ptlrpc_pinger_add_import(struct obd_import *imp)
939 if (!list_empty(&imp->imp_pinger_chain))
942 CDEBUG(D_HA, "adding pingable import %s->%s\n",
943 imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
944 ptlrpc_pinger_sending_on_import(imp);
946 mutex_down(&pinger_sem);
947 list_add_tail(&imp->imp_pinger_chain, &pinger_imports);
948 class_import_get(imp);
949 mutex_up(&pinger_sem);
954 int ptlrpc_pinger_del_import(struct obd_import *imp)
957 if (list_empty(&imp->imp_pinger_chain))
960 mutex_down(&pinger_sem);
961 list_del_init(&imp->imp_pinger_chain);
962 CDEBUG(D_HA, "removing pingable import %s->%s\n",
963 imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
964 class_import_put(imp);
965 mutex_up(&pinger_sem);
969 void ptlrpc_pinger_wake_up()
973 /* XXX force pinger to run, if needed */
974 struct obd_import *imp;
975 list_for_each_entry(imp, &pinger_imports, imp_pinger_chain) {
976 CDEBUG(D_RPCTRACE, "checking import %s->%s\n",
977 imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
978 #ifdef ENABLE_LIBLUSTRE_RECOVERY
979 if (imp->imp_state == LUSTRE_IMP_DISCON &&
980 !imp_is_deactive(imp))
982 /*XXX only recover for the initial connection */
983 if (!lustre_handle_is_used(&imp->imp_remote_handle) &&
984 imp->imp_state == LUSTRE_IMP_DISCON &&
985 !imp_is_deactive(imp))
987 ptlrpc_initiate_recovery(imp);
988 else if (imp->imp_state != LUSTRE_IMP_FULL)
989 CDEBUG(D_HA, "Refused to recover import %s->%s "
990 "state %d, deactive %d\n",
991 imp->imp_obd->obd_uuid.uuid,
992 obd2cli_tgt(imp->imp_obd), imp->imp_state,
993 imp_is_deactive(imp));
998 #endif /* !__KERNEL__ */