X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fptlrpc%2Frecover.c;h=02f79d08e306d6f3d1a32e400527c63e76840942;hp=0045ddd141decea2883f56a167d467ed581816e6;hb=51f1b46dab6c2cc16f71d3f3be483f530b5e4001;hpb=cefa8cda2ba2d288ccaa4ec077a6c627592503ea diff --git a/lustre/ptlrpc/recover.c b/lustre/ptlrpc/recover.c index 0045ddd..02f79d0 100644 --- a/lustre/ptlrpc/recover.c +++ b/lustre/ptlrpc/recover.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -17,17 +15,15 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2015, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -39,41 +35,40 @@ */ #define DEBUG_SUBSYSTEM S_RPC -#ifdef __KERNEL__ -# include -#else -# include -#endif - +#include +#include #include #include #include #include #include #include -#include #include -#include /* for IOC_LOV_SET_OSC_ACTIVE */ -#include #include "ptlrpc_internal.h" -static int ptlrpc_recover_import_no_retry(struct obd_import *, char *); - +/** + * Start recovery on disconnected import. + * This is done by just attempting a connect + */ void ptlrpc_initiate_recovery(struct obd_import *imp) { ENTRY; CDEBUG(D_HA, "%s: starting recovery\n", obd2cli_tgt(imp->imp_obd)); - ptlrpc_connect_import(imp, NULL); + ptlrpc_connect_import(imp); EXIT; } +/** + * Identify what request from replay list needs to be replayed next + * (based on what we have already replayed) and send it to server. + */ int ptlrpc_replay_next(struct obd_import *imp, int *inflight) { int rc = 0; - struct list_head *tmp, *pos; + struct list_head *tmp, *pos; struct ptlrpc_request *req = NULL; __u64 last_transno; ENTRY; @@ -83,13 +78,13 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight) /* It might have committed some after we last spoke, so make sure we * get rid of them now. */ - spin_lock(&imp->imp_lock); - imp->imp_last_transno_checked = 0; - ptlrpc_free_committed(imp); - last_transno = imp->imp_last_replay_transno; - spin_unlock(&imp->imp_lock); + spin_lock(&imp->imp_lock); + imp->imp_last_transno_checked = 0; + ptlrpc_free_committed(imp); + last_transno = imp->imp_last_replay_transno; + spin_unlock(&imp->imp_lock); - CDEBUG(D_HA, "import %p from %s committed "LPU64" last "LPU64"\n", + CDEBUG(D_HA, "import %p from %s committed %llu last %llu\n", imp, obd2cli_tgt(imp->imp_obd), imp->imp_peer_committed_transno, last_transno); @@ -108,44 +103,94 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight) * imp_lock is being held by ptlrpc_replay, but it's not. it's * just a little race... */ - list_for_each_safe(tmp, pos, &imp->imp_replay_list) { - req = list_entry(tmp, struct ptlrpc_request, rq_replay_list); - - /* If need to resend the last sent transno (because a - reconnect has occurred), then stop on the matching - req and send it again. If, however, the last sent - transno has been committed then we continue replay - from the next request. */ - if (imp->imp_resend_replay && - req->rq_transno == last_transno) { - lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT); - break; - } - - if (req->rq_transno > last_transno) { - imp->imp_last_replay_transno = req->rq_transno; - break; - } - - req = NULL; - } - - spin_lock(&imp->imp_lock); - imp->imp_resend_replay = 0; - spin_unlock(&imp->imp_lock); - if (req != NULL) { - rc = ptlrpc_replay_req(req); - if (rc) { - CERROR("recovery replay error %d for req " - LPD64"\n", rc, req->rq_xid); - RETURN(rc); - } - *inflight = 1; - } - RETURN(rc); + /* Replay all the committed open requests on committed_list first */ + if (!list_empty(&imp->imp_committed_list)) { + tmp = imp->imp_committed_list.prev; + req = list_entry(tmp, struct ptlrpc_request, + rq_replay_list); + + /* The last request on committed_list hasn't been replayed */ + if (req->rq_transno > last_transno) { + /* Since the imp_committed_list is immutable before + * all of it's requests being replayed, it's safe to + * use a cursor to accelerate the search */ + if (!imp->imp_resend_replay || + imp->imp_replay_cursor == &imp->imp_committed_list) + imp->imp_replay_cursor = + imp->imp_replay_cursor->next; + + while (imp->imp_replay_cursor != + &imp->imp_committed_list) { + req = list_entry(imp->imp_replay_cursor, + struct ptlrpc_request, + rq_replay_list); + if (req->rq_transno > last_transno) + break; + + req = NULL; + imp->imp_replay_cursor = + imp->imp_replay_cursor->next; + } + } else { + /* All requests on committed_list have been replayed */ + imp->imp_replay_cursor = &imp->imp_committed_list; + req = NULL; + } + } + + /* All the requests in committed list have been replayed, let's replay + * the imp_replay_list */ + if (req == NULL) { + list_for_each_safe(tmp, pos, &imp->imp_replay_list) { + req = list_entry(tmp, struct ptlrpc_request, + rq_replay_list); + + if (req->rq_transno > last_transno) + break; + req = NULL; + } + } + + /* If need to resend the last sent transno (because a reconnect + * has occurred), then stop on the matching req and send it again. + * If, however, the last sent transno has been committed then we + * continue replay from the next request. */ + if (req != NULL && imp->imp_resend_replay) + lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT); + + spin_lock(&imp->imp_lock); + /* The resend replay request may have been removed from the + * unreplied list. */ + if (req != NULL && imp->imp_resend_replay && + list_empty(&req->rq_unreplied_list)) { + ptlrpc_add_unreplied(req); + imp->imp_known_replied_xid = ptlrpc_known_replied_xid(imp); + } + + imp->imp_resend_replay = 0; + spin_unlock(&imp->imp_lock); + + if (req != NULL) { + /* The request should have been added back in unreplied list + * by ptlrpc_prepare_replay(). */ + LASSERT(!list_empty(&req->rq_unreplied_list)); + + rc = ptlrpc_replay_req(req); + if (rc) { + CERROR("recovery replay error %d for req " + "%llu\n", rc, req->rq_xid); + RETURN(rc); + } + *inflight = 1; + } + RETURN(rc); } +/** + * Schedule resending of request on sending_list. This is done after + * we completed replaying of requests and locks. + */ int ptlrpc_resend(struct obd_import *imp) { struct ptlrpc_request *req, *next; @@ -158,37 +203,45 @@ int ptlrpc_resend(struct obd_import *imp) */ /* Well... what if lctl recover is called twice at the same time? */ - spin_lock(&imp->imp_lock); - if (imp->imp_state != LUSTRE_IMP_RECOVER) { - spin_unlock(&imp->imp_lock); + spin_lock(&imp->imp_lock); + if (imp->imp_state != LUSTRE_IMP_RECOVER) { + spin_unlock(&imp->imp_lock); RETURN(-1); } - list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) { - LASSERTF((long)req > CFS_PAGE_SIZE && req != LP_POISON, - "req %p bad\n", req); - LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req); - if (!req->rq_no_resend) - ptlrpc_resend_req(req); - } - spin_unlock(&imp->imp_lock); + list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) { + LASSERTF((long)req > PAGE_SIZE && req != LP_POISON, + "req %p bad\n", req); + LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req); + + /* If the request is allowed to be sent during replay and it + * is not timeout yet, then it does not need to be resent. */ + if (!ptlrpc_no_resend(req) && + (req->rq_timedout || !req->rq_allow_replay)) + ptlrpc_resend_req(req); + } + spin_unlock(&imp->imp_lock); - RETURN(0); + RETURN(0); } +/** + * Go through all requests in delayed list and wake their threads + * for resending + */ void ptlrpc_wake_delayed(struct obd_import *imp) { - struct list_head *tmp, *pos; - struct ptlrpc_request *req; + struct list_head *tmp, *pos; + struct ptlrpc_request *req; - spin_lock(&imp->imp_lock); - list_for_each_safe(tmp, pos, &imp->imp_delayed_list) { - req = list_entry(tmp, struct ptlrpc_request, rq_list); + spin_lock(&imp->imp_lock); + list_for_each_safe(tmp, pos, &imp->imp_delayed_list) { + req = list_entry(tmp, struct ptlrpc_request, rq_list); - DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set); - ptlrpc_client_wake_req(req); - } - spin_unlock(&imp->imp_lock); + DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set); + ptlrpc_client_wake_req(req); + } + spin_unlock(&imp->imp_lock); } void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req) @@ -212,20 +265,20 @@ void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req) } /* to control recovery via lctl {disable|enable}_recovery */ if (imp->imp_deactive == 0) - ptlrpc_connect_import(imp, NULL); + ptlrpc_connect_import(imp); } /* Wait for recovery to complete and resend. If evicted, then this request will be errored out later.*/ - spin_lock(&failed_req->rq_lock); - if (!failed_req->rq_no_resend) - failed_req->rq_resend = 1; - spin_unlock(&failed_req->rq_lock); + spin_lock(&failed_req->rq_lock); + if (!failed_req->rq_no_resend) + failed_req->rq_resend = 1; + spin_unlock(&failed_req->rq_lock); - EXIT; + EXIT; } -/* +/** * Administratively active/deactive a client. * This should only be called by the ioctl interface, currently * - the lctl deactivate and activate commands @@ -245,88 +298,107 @@ int ptlrpc_set_import_active(struct obd_import *imp, int active) if (!active) { LCONSOLE_WARN("setting import %s INACTIVE by administrator " "request\n", obd2cli_tgt(imp->imp_obd)); - ptlrpc_invalidate_import(imp); - spin_lock(&imp->imp_lock); - imp->imp_deactive = 1; - spin_unlock(&imp->imp_lock); + /* set before invalidate to avoid messages about imp_inval + * set without imp_deactive in ptlrpc_import_delay_req */ + spin_lock(&imp->imp_lock); + imp->imp_deactive = 1; + spin_unlock(&imp->imp_lock); + + obd_import_event(imp->imp_obd, imp, IMP_EVENT_DEACTIVATE); + + ptlrpc_invalidate_import(imp); } /* When activating, mark import valid, and attempt recovery */ if (active) { - spin_lock(&imp->imp_lock); - imp->imp_deactive = 0; - spin_unlock(&imp->imp_lock); - CDEBUG(D_HA, "setting import %s VALID\n", obd2cli_tgt(imp->imp_obd)); - rc = ptlrpc_recover_import(imp, NULL); + + spin_lock(&imp->imp_lock); + imp->imp_deactive = 0; + spin_unlock(&imp->imp_lock); + obd_import_event(imp->imp_obd, imp, IMP_EVENT_ACTIVATE); + + rc = ptlrpc_recover_import(imp, NULL, 0); } RETURN(rc); } +EXPORT_SYMBOL(ptlrpc_set_import_active); /* Attempt to reconnect an import */ -int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid) +int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async) { - int rc; - ENTRY; + int rc = 0; + ENTRY; + + spin_lock(&imp->imp_lock); + if (imp->imp_state == LUSTRE_IMP_NEW || imp->imp_deactive || + atomic_read(&imp->imp_inval_count)) + rc = -EINVAL; + spin_unlock(&imp->imp_lock); + if (rc) + GOTO(out, rc); /* force import to be disconnected. */ ptlrpc_set_import_discon(imp, 0); - spin_lock(&imp->imp_lock); - imp->imp_deactive = 0; - spin_unlock(&imp->imp_lock); + if (new_uuid) { + struct obd_uuid uuid; - rc = ptlrpc_recover_import_no_retry(imp, new_uuid); + /* intruct import to use new uuid */ + obd_str2uuid(&uuid, new_uuid); + rc = import_set_conn_priority(imp, &uuid); + if (rc) + GOTO(out, rc); + } - RETURN(rc); -} + /* Check if reconnect is already in progress */ + spin_lock(&imp->imp_lock); + if (imp->imp_state != LUSTRE_IMP_DISCON) { + imp->imp_force_verify = 1; + rc = -EALREADY; + } + spin_unlock(&imp->imp_lock); + if (rc) + GOTO(out, rc); -int ptlrpc_import_in_recovery(struct obd_import *imp) -{ - int in_recovery = 1; - spin_lock(&imp->imp_lock); - if (imp->imp_state == LUSTRE_IMP_FULL || - imp->imp_state == LUSTRE_IMP_CLOSED || - imp->imp_state == LUSTRE_IMP_DISCON) - in_recovery = 0; - spin_unlock(&imp->imp_lock); - return in_recovery; -} + rc = ptlrpc_connect_import(imp); + if (rc) + GOTO(out, rc); -static int ptlrpc_recover_import_no_retry(struct obd_import *imp, - char *new_uuid) -{ - int rc; - int in_recovery = 0; - struct l_wait_info lwi; - ENTRY; + if (!async) { + struct l_wait_info lwi; + int secs = cfs_time_seconds(obd_timeout); - /* Check if reconnect is already in progress */ - spin_lock(&imp->imp_lock); - if (imp->imp_state != LUSTRE_IMP_DISCON) { - in_recovery = 1; - } - spin_unlock(&imp->imp_lock); + CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n", + obd2cli_tgt(imp->imp_obd), secs); - if (in_recovery == 1) - RETURN(-EALREADY); + lwi = LWI_TIMEOUT(secs, NULL, NULL); + rc = l_wait_event(imp->imp_recovery_waitq, + !ptlrpc_import_in_recovery(imp), &lwi); + CDEBUG(D_HA, "%s: recovery finished\n", + obd2cli_tgt(imp->imp_obd)); + } + EXIT; - rc = ptlrpc_connect_import(imp, new_uuid); - if (rc) - RETURN(rc); +out: + return rc; +} +EXPORT_SYMBOL(ptlrpc_recover_import); - CDEBUG(D_HA, "%s: recovery started, waiting\n", - obd2cli_tgt(imp->imp_obd)); +int ptlrpc_import_in_recovery(struct obd_import *imp) +{ + int in_recovery = 1; - lwi = LWI_TIMEOUT(cfs_timeout_cap(cfs_time_seconds(obd_timeout)), - NULL, NULL); - rc = l_wait_event(imp->imp_recovery_waitq, - !ptlrpc_import_in_recovery(imp), &lwi); - CDEBUG(D_HA, "%s: recovery finished\n", - obd2cli_tgt(imp->imp_obd)); + spin_lock(&imp->imp_lock); + if (imp->imp_state == LUSTRE_IMP_FULL || + imp->imp_state == LUSTRE_IMP_CLOSED || + imp->imp_state == LUSTRE_IMP_DISCON || + imp->imp_obd->obd_no_recov) + in_recovery = 0; + spin_unlock(&imp->imp_lock); - RETURN(rc); + return in_recovery; }