X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fptlrpc%2Frecover.c;h=17085d62f489b8197f6473d4172a774abc264daf;hb=09fe7811cb076a00a905b747d0048294a0ef2e4d;hp=fbfebe7e08c8e3550e42909e1b3bbd9c2f489d77;hpb=61038f56be879087d957e0831441f1573385f5be;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/recover.c b/lustre/ptlrpc/recover.c index fbfebe7..17085d6 100644 --- a/lustre/ptlrpc/recover.c +++ b/lustre/ptlrpc/recover.c @@ -1,276 +1,351 @@ /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * - * Portal-RPC reconnection and replay operations, for use in recovery. + * GPL HEADER START * - * This code is issued under the GNU General Public License. - * See the file COPYING in this distribution + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * Copyright (C) 1996 Peter J. Braam - * Copyright (C) 1999 Stelias Computing Inc. - * Copyright (C) 1999 Seagate Technology Inc. - * Copyright (C) 2001 Mountain View Data, Inc. - * Copyright (C) 2002 Cluster File Systems, Inc. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * lustre/ptlrpc/recover.c + * + * Author: Mike Shaver */ - -#include -#include -#include #define DEBUG_SUBSYSTEM S_RPC +#ifdef __KERNEL__ +# include +#else +# include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for IOC_LOV_SET_OSC_ACTIVE */ +#include + +#include "ptlrpc_internal.h" + +static int ptlrpc_recover_import_no_retry(struct obd_import *, char *); + +/** + * Start recovery on disconnected import. + * This is done by just attempting a connect + */ +void ptlrpc_initiate_recovery(struct obd_import *imp) +{ + ENTRY; + + CDEBUG(D_HA, "%s: starting recovery\n", obd2cli_tgt(imp->imp_obd)); + ptlrpc_connect_import(imp, NULL); -#include -#include -#include + EXIT; +} -int ptlrpc_reconnect_import(struct obd_import *imp, int rq_opc) +/** + * Identify what request from replay list needs to be replayed next + * (based on what we have already replayed) and send it to server. + */ +int ptlrpc_replay_next(struct obd_import *imp, int *inflight) { - struct obd_device *obd = imp->imp_obd; - struct client_obd *cli = &obd->u.cli; - int size[] = { sizeof(cli->cl_target_uuid), sizeof(obd->obd_uuid) }; - char *tmp[] = {cli->cl_target_uuid, obd->obd_uuid }; - struct ptlrpc_connection *conn = imp->imp_connection; - struct lustre_handle old_hdl; - struct ptlrpc_request *request; - struct obd_export *ldlmexp; - int rc; + int rc = 0; + cfs_list_t *tmp, *pos; + struct ptlrpc_request *req = NULL; + __u64 last_transno; + ENTRY; - request = ptlrpc_prep_req(imp, rq_opc, 2, size, tmp); - request->rq_level = LUSTRE_CONN_NEW; - request->rq_replen = lustre_msg_size(0, NULL); - /* - * This address is the export that represents our client-side LDLM - * service (for ASTs). We should only have one on this list, so we - * just grab the first one. + *inflight = 0; + + /* It might have committed some after we last spoke, so make sure we + * get rid of them now. + */ + cfs_spin_lock(&imp->imp_lock); + imp->imp_last_transno_checked = 0; + ptlrpc_free_committed(imp); + last_transno = imp->imp_last_replay_transno; + cfs_spin_unlock(&imp->imp_lock); + + CDEBUG(D_HA, "import %p from %s committed "LPU64" last "LPU64"\n", + imp, obd2cli_tgt(imp->imp_obd), + imp->imp_peer_committed_transno, last_transno); + + /* Do I need to hold a lock across this iteration? We shouldn't be + * racing with any additions to the list, because we're in recovery + * and are therefore not processing additional requests to add. Calls + * to ptlrpc_free_committed might commit requests, but nothing "newer" + * than the one we're replaying (it can't be committed until it's + * replayed, and we're doing that here). l_f_e_safe protects against + * problems with the current request being committed, in the unlikely + * event of that race. So, in conclusion, I think that it's safe to + * perform this list-walk without the imp_lock held. * - * XXX tear down export, call class_obd_connect? + * But, the {mdc,osc}_replay_open callbacks both iterate + * request lists, and have comments saying they assume the + * imp_lock is being held by ptlrpc_replay, but it's not. it's + * just a little race... */ - ldlmexp = list_entry(obd->obd_exports.next, struct obd_export, - exp_obd_chain); - request->rq_reqmsg->addr = (__u64)(unsigned long)ldlmexp; - request->rq_reqmsg->cookie = ldlmexp->exp_cookie; - rc = ptlrpc_queue_wait(request); - switch (rc) { - case EALREADY: - case -EALREADY: - /* already connected! */ - memset(&old_hdl, 0, sizeof(old_hdl)); - if (!memcmp(&old_hdl.addr, &request->rq_repmsg->addr, - sizeof (old_hdl.addr)) && - !memcmp(&old_hdl.cookie, &request->rq_repmsg->cookie, - sizeof (old_hdl.cookie))) { - CERROR("%s@%s didn't like our handle %Lx/%Lx, failed\n", - cli->cl_target_uuid, conn->c_remote_uuid, - (__u64)(unsigned long)ldlmexp, - ldlmexp->exp_cookie); - GOTO(out_disc, rc = -ENOTCONN); + cfs_list_for_each_safe(tmp, pos, &imp->imp_replay_list) { + req = cfs_list_entry(tmp, struct ptlrpc_request, + rq_replay_list); + + /* If need to resend the last sent transno (because a + reconnect has occurred), then stop on the matching + req and send it again. If, however, the last sent + transno has been committed then we continue replay + from the next request. */ + if (req->rq_transno > last_transno) { + if (imp->imp_resend_replay) + lustre_msg_add_flags(req->rq_reqmsg, + MSG_RESENT); + break; } + req = NULL; + } + + cfs_spin_lock(&imp->imp_lock); + imp->imp_resend_replay = 0; + cfs_spin_unlock(&imp->imp_lock); - old_hdl.addr = request->rq_repmsg->addr; - old_hdl.cookie = request->rq_repmsg->cookie; - if (memcmp(&imp->imp_handle, &old_hdl, sizeof(old_hdl))) { - CERROR("%s@%s changed handle from %Lx/%Lx to %Lx/%Lx; " - "copying, but this may foreshadow disaster\n", - cli->cl_target_uuid, conn->c_remote_uuid, - old_hdl.addr, old_hdl.cookie, - imp->imp_handle.addr, imp->imp_handle.cookie); - imp->imp_handle.addr = request->rq_repmsg->addr; - imp->imp_handle.cookie = request->rq_repmsg->cookie; - GOTO(out_disc, rc = EALREADY); + if (req != NULL) { + rc = ptlrpc_replay_req(req); + if (rc) { + CERROR("recovery replay error %d for req " + LPU64"\n", rc, req->rq_xid); + RETURN(rc); } - - CERROR("reconnected to %s@%s after partition\n", - cli->cl_target_uuid, conn->c_remote_uuid); - GOTO(out_disc, rc = EALREADY); - case 0: - old_hdl = imp->imp_handle; - imp->imp_handle.addr = request->rq_repmsg->addr; - imp->imp_handle.cookie = request->rq_repmsg->cookie; - CERROR("now connected to %s@%s (%Lx/%Lx, was %Lx/%Lx)!\n", - cli->cl_target_uuid, conn->c_remote_uuid, - imp->imp_handle.addr, imp->imp_handle.cookie, - old_hdl.addr, old_hdl.cookie); - GOTO(out_disc, rc = 0); - default: - CERROR("cannot connect to %s@%s: rc = %d\n", - cli->cl_target_uuid, conn->c_remote_uuid, rc); - GOTO(out_disc, rc = -ENOTCONN); /* XXX preserve rc? */ + *inflight = 1; } - - out_disc: - ptlrpc_req_finished(request); - return rc; + RETURN(rc); } -int ptlrpc_run_recovery_upcall(struct ptlrpc_connection *conn) +/** + * Schedule resending of request on sending_list. This is done after + * we completed replaying of requests and locks. + */ +int ptlrpc_resend(struct obd_import *imp) { - char *argv[3]; - char *envp[3]; - int rc; + struct ptlrpc_request *req, *next; ENTRY; - argv[0] = obd_recovery_upcall; - argv[1] = conn->c_remote_uuid; - argv[2] = NULL; - - envp[0] = "HOME=/"; - envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; - envp[2] = NULL; - - rc = call_usermodehelper(argv[0], argv, envp); - if (rc < 0) { - CERROR("Error invoking recovery upcall %s for %s: %d\n", - argv[0], argv[1], rc); - CERROR("Check /proc/sys/lustre/recovery_upcall?\n"); - } else { - CERROR("Invoked upcall %s for connection %s\n", - argv[0], argv[1]); - } - /* - * We don't want to make this a "failed" recovery, because the system - * administrator -- or, perhaps, tester -- may well be able to rescue - * things by running the correct upcall. + /* As long as we're in recovery, nothing should be added to the sending + * list, so we don't need to hold the lock during this iteration and + * resend process. + */ + /* Well... what if lctl recover is called twice at the same time? */ + cfs_spin_lock(&imp->imp_lock); + if (imp->imp_state != LUSTRE_IMP_RECOVER) { + cfs_spin_unlock(&imp->imp_lock); + RETURN(-1); + } + + cfs_list_for_each_entry_safe(req, next, &imp->imp_sending_list, + rq_list) { + LASSERTF((long)req > CFS_PAGE_SIZE && req != LP_POISON, + "req %p bad\n", req); + LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req); + if (!ptlrpc_no_resend(req)) + ptlrpc_resend_req(req); + } + cfs_spin_unlock(&imp->imp_lock); + RETURN(0); } -int ptlrpc_replay(struct obd_import *imp, int send_last_flag) +/** + * Go through all requests in delayed list and wake their threads + * for resending + */ +void ptlrpc_wake_delayed(struct obd_import *imp) { - int rc = 0; - struct list_head *tmp, *pos; + cfs_list_t *tmp, *pos; struct ptlrpc_request *req; - __u64 committed = imp->imp_peer_committed_transno; - ENTRY; - /* It might have committed some after we last spoke, so make sure we - * get rid of them now. - */ - ptlrpc_free_committed(imp); + cfs_spin_lock(&imp->imp_lock); + cfs_list_for_each_safe(tmp, pos, &imp->imp_delayed_list) { + req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list); - spin_lock(&imp->imp_lock); + DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set); + ptlrpc_client_wake_req(req); + } + cfs_spin_unlock(&imp->imp_lock); +} - CDEBUG(D_HA, "import %p from %s has committed "LPD64"\n", - imp, imp->imp_obd->u.cli.cl_target_uuid, committed); +void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req) +{ + struct obd_import *imp = failed_req->rq_import; + ENTRY; - list_for_each(tmp, &imp->imp_replay_list) { - req = list_entry(tmp, struct ptlrpc_request, rq_list); - DEBUG_REQ(D_HA, req, "RETAINED: "); + CDEBUG(D_HA, "import %s of %s@%s abruptly disconnected: reconnecting\n", + imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd), + imp->imp_connection->c_remote_uuid.uuid); + + if (ptlrpc_set_import_discon(imp, + lustre_msg_get_conn_cnt(failed_req->rq_reqmsg))) { + if (!imp->imp_replayable) { + CDEBUG(D_HA, "import %s@%s for %s not replayable, " + "auto-deactivating\n", + obd2cli_tgt(imp->imp_obd), + imp->imp_connection->c_remote_uuid.uuid, + imp->imp_obd->obd_name); + ptlrpc_deactivate_import(imp); + } + /* to control recovery via lctl {disable|enable}_recovery */ + if (imp->imp_deactive == 0) + ptlrpc_connect_import(imp, NULL); } - list_for_each_safe(tmp, pos, &imp->imp_replay_list) { - req = list_entry(tmp, struct ptlrpc_request, rq_list); + /* Wait for recovery to complete and resend. If evicted, then + this request will be errored out later.*/ + cfs_spin_lock(&failed_req->rq_lock); + if (!failed_req->rq_no_resend) + failed_req->rq_resend = 1; + cfs_spin_unlock(&failed_req->rq_lock); - if (req->rq_transno == imp->imp_max_transno && - send_last_flag) { - req->rq_reqmsg->flags |= MSG_LAST_REPLAY; - DEBUG_REQ(D_HA, req, "LAST_REPLAY:"); - } else { - DEBUG_REQ(D_HA, req, "REPLAY:"); - } + EXIT; +} - rc = ptlrpc_replay_req(req); - req->rq_reqmsg->flags &= ~MSG_LAST_REPLAY; +/** + * Administratively active/deactive a client. + * This should only be called by the ioctl interface, currently + * - the lctl deactivate and activate commands + * - echo 0/1 >> /proc/osc/XXX/active + * - client umount -f (ll_umount_begin) + */ +int ptlrpc_set_import_active(struct obd_import *imp, int active) +{ + struct obd_device *obd = imp->imp_obd; + int rc = 0; - if (rc) { - CERROR("recovery replay error %d for req %Ld\n", - rc, req->rq_xid); - GOTO(out, rc); - } - } + ENTRY; + LASSERT(obd); - out: - spin_unlock(&imp->imp_lock); - return rc; -} + /* When deactivating, mark import invalid, and abort in-flight + * requests. */ + if (!active) { + LCONSOLE_WARN("setting import %s INACTIVE by administrator " + "request\n", obd2cli_tgt(imp->imp_obd)); -#define NO_RESEND 0 /* No action required. */ -#define RESEND 1 /* Resend required. */ -#define RESEND_IGNORE 2 /* Resend, ignore the reply (already saw it). */ -#define RESTART 3 /* Have to restart the call, sorry! */ + /* set before invalidate to avoid messages about imp_inval + * set without imp_deactive in ptlrpc_import_delay_req */ + cfs_spin_lock(&imp->imp_lock); + imp->imp_deactive = 1; + cfs_spin_unlock(&imp->imp_lock); -static int resend_type(struct ptlrpc_request *req, __u64 committed) -{ - if (req->rq_transno < committed) { - if (req->rq_flags & PTL_RPC_FL_REPLIED) { - /* Saw the reply and it was committed, no biggie. */ - DEBUG_REQ(D_HA, req, "NO_RESEND"); - return NO_RESEND; - } - /* Request committed, but no reply: have to restart. */ - return RESTART; + ptlrpc_invalidate_import(imp); } - if (req->rq_flags & PTL_RPC_FL_REPLIED) { - /* Saw reply, so resend and ignore new reply. */ - return RESEND_IGNORE; + /* When activating, mark import valid, and attempt recovery */ + if (active) { + CDEBUG(D_HA, "setting import %s VALID\n", + obd2cli_tgt(imp->imp_obd)); + rc = ptlrpc_recover_import(imp, NULL); } - /* Didn't see reply either, so resend. */ - return RESEND; - + RETURN(rc); } -int ptlrpc_resend(struct obd_import *imp) +/* Attempt to reconnect an import */ +int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid) { - int rc = 0; - struct list_head *tmp, *pos; - struct ptlrpc_request *req; - __u64 committed = imp->imp_peer_committed_transno; - + int rc; ENTRY; - spin_lock(&imp->imp_lock); - list_for_each(tmp, &imp->imp_sending_list) { - req = list_entry(tmp, struct ptlrpc_request, rq_list); - DEBUG_REQ(D_HA, req, "SENDING: "); + cfs_spin_lock(&imp->imp_lock); + if (cfs_atomic_read(&imp->imp_inval_count)) { + cfs_spin_unlock(&imp->imp_lock); + RETURN(-EINVAL); } + cfs_spin_unlock(&imp->imp_lock); - list_for_each_safe(tmp, pos, &imp->imp_sending_list) { - req = list_entry(tmp, struct ptlrpc_request, rq_list); - - switch(resend_type(req, committed)) { - case NO_RESEND: - break; - - case RESTART: - DEBUG_REQ(D_HA, req, "RESTART:"); - ptlrpc_restart_req(req); - break; + /* force import to be disconnected. */ + ptlrpc_set_import_discon(imp, 0); - case RESEND_IGNORE: - DEBUG_REQ(D_HA, req, "RESEND_IGNORE:"); - rc = ptlrpc_replay_req(req); - if (rc) { - DEBUG_REQ(D_ERROR, req, "error %d resending:", - rc); - ptlrpc_restart_req(req); /* might as well */ - } - break; + cfs_spin_lock(&imp->imp_lock); + imp->imp_deactive = 0; + cfs_spin_unlock(&imp->imp_lock); - case RESEND: - DEBUG_REQ(D_HA, req, "RESEND:"); - ptlrpc_resend_req(req); - break; + rc = ptlrpc_recover_import_no_retry(imp, new_uuid); - default: - LBUG(); - } - } RETURN(rc); } -void ptlrpc_wake_delayed(struct obd_import *imp) +int ptlrpc_import_in_recovery(struct obd_import *imp) { - struct list_head *tmp, *pos; - struct ptlrpc_request *req; + int in_recovery = 1; + cfs_spin_lock(&imp->imp_lock); + if (imp->imp_state == LUSTRE_IMP_FULL || + imp->imp_state == LUSTRE_IMP_CLOSED || + imp->imp_state == LUSTRE_IMP_DISCON) + in_recovery = 0; + cfs_spin_unlock(&imp->imp_lock); + return in_recovery; +} + +static int ptlrpc_recover_import_no_retry(struct obd_import *imp, + char *new_uuid) +{ + int rc; + int in_recovery = 0; + struct l_wait_info lwi; + ENTRY; - spin_lock(&imp->imp_lock); - list_for_each_safe(tmp, pos, &imp->imp_delayed_list) { - req = list_entry(tmp, struct ptlrpc_request, rq_list); - DEBUG_REQ(D_HA, req, "waking:"); - wake_up(&req->rq_wait_for_rep); + /* Check if reconnect is already in progress */ + cfs_spin_lock(&imp->imp_lock); + if (imp->imp_state != LUSTRE_IMP_DISCON) { + in_recovery = 1; } - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); + + if (in_recovery == 1) + RETURN(-EALREADY); + + rc = ptlrpc_connect_import(imp, new_uuid); + if (rc) + RETURN(rc); + + CDEBUG(D_HA, "%s: recovery started, waiting\n", + obd2cli_tgt(imp->imp_obd)); + + lwi = LWI_TIMEOUT(cfs_timeout_cap(cfs_time_seconds(obd_timeout)), + NULL, NULL); + rc = l_wait_event(imp->imp_recovery_waitq, + !ptlrpc_import_in_recovery(imp), &lwi); + CDEBUG(D_HA, "%s: recovery finished\n", + obd2cli_tgt(imp->imp_obd)); + + RETURN(rc); }