* Lustre is a trademark of Sun Microsystems, Inc.
*/
+/**
+ * This file deals with various client/target related logic including recovery.
+ *
+ * TODO: This code more logically belongs in the ptlrpc module than in ldlm and
+ * should be moved.
+ */
+
#define DEBUG_SUBSYSTEM S_LDLM
#ifdef __KERNEL__
#include <lustre_sec.h>
#include "ldlm_internal.h"
-/* @priority: if non-zero, move the selected to the list head
- * @create: if zero, only search in existed connections
+/* @priority: If non-zero, move the selected connection to the list head.
+ * @create: If zero, only search in existing connections.
*/
static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
int priority, int create)
}
}
- cfs_spin_lock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
cfs_list_for_each_entry(item, &imp->imp_conn_list, oic_item) {
if (obd_uuid_equals(uuid, &item->oic_uuid)) {
if (priority) {
CDEBUG(D_HA, "imp %p@%s: found existing conn %s%s\n",
imp, imp->imp_obd->obd_name, uuid->uuid,
(priority ? ", moved to head" : ""));
- cfs_spin_unlock(&imp->imp_lock);
+ spin_unlock(&imp->imp_lock);
GOTO(out_free, rc = 0);
}
}
- /* not found */
+ /* No existing import connection found for \a uuid. */
if (create) {
imp_conn->oic_conn = ptlrpc_conn;
imp_conn->oic_uuid = *uuid;
imp, imp->imp_obd->obd_name, uuid->uuid,
(priority ? "head" : "tail"));
} else {
- cfs_spin_unlock(&imp->imp_lock);
- GOTO(out_free, rc = -ENOENT);
- }
+ spin_unlock(&imp->imp_lock);
+ GOTO(out_free, rc = -ENOENT);
+ }
- cfs_spin_unlock(&imp->imp_lock);
+ spin_unlock(&imp->imp_lock);
RETURN(0);
out_free:
if (imp_conn)
int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
{
- struct obd_import_conn *imp_conn;
- struct obd_export *dlmexp;
- int rc = -ENOENT;
- ENTRY;
+ struct obd_import_conn *imp_conn;
+ struct obd_export *dlmexp;
+ int rc = -ENOENT;
+ ENTRY;
- cfs_spin_lock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
if (cfs_list_empty(&imp->imp_conn_list)) {
LASSERT(!imp->imp_connection);
GOTO(out, rc);
continue;
LASSERT(imp_conn->oic_conn);
- /* is current conn? */
if (imp_conn == imp->imp_conn_current) {
LASSERT(imp_conn->oic_conn == imp->imp_connection);
break;
}
out:
- cfs_spin_unlock(&imp->imp_lock);
- if (rc == -ENOENT)
- CERROR("connection %s not found\n", uuid->uuid);
- RETURN(rc);
+ spin_unlock(&imp->imp_lock);
+ if (rc == -ENOENT)
+ CERROR("connection %s not found\n", uuid->uuid);
+ RETURN(rc);
}
EXPORT_SYMBOL(client_import_del_conn);
/**
- * Find conn uuid by peer nid. @peer is a server nid. This function is used
- * to find a conn uuid of @imp which can reach @peer.
+ * Find conn UUID by peer NID. \a peer is a server NID. This function is used
+ * to find a conn uuid of \a imp which can reach \a peer.
*/
int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
- struct obd_uuid *uuid)
+ struct obd_uuid *uuid)
{
- struct obd_import_conn *conn;
- int rc = -ENOENT;
- ENTRY;
+ struct obd_import_conn *conn;
+ int rc = -ENOENT;
+ ENTRY;
- cfs_spin_lock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
cfs_list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
- /* check if conn uuid does have this peer nid */
+ /* Check if conn UUID does have this peer NID. */
if (class_check_uuid(&conn->oic_uuid, peer)) {
*uuid = conn->oic_uuid;
rc = 0;
break;
}
}
- cfs_spin_unlock(&imp->imp_lock);
- RETURN(rc);
+ spin_unlock(&imp->imp_lock);
+ RETURN(rc);
}
EXPORT_SYMBOL(client_import_find_conn);
void client_destroy_import(struct obd_import *imp)
{
- /* drop security policy instance after all rpc finished/aborted
- * to let all busy contexts be released. */
+ /* Drop security policy instance after all RPCs have finished/aborted
+ * to let all busy contexts be released. */
class_import_get(imp);
class_destroy_import(imp);
sptlrpc_import_sec_put(imp);
EXPORT_SYMBOL(client_destroy_import);
/**
- * check whether the osc is on MDT or not
+ * Check whether or not the OSC is on MDT.
* In the config log,
* osc on MDT
* setup 0:{fsname}-OSTxxxx-osc[-MDTxxxx] 1:lustre-OST0000_UUID 2:NID
return 0;
}
-/* configure an RPC client OBD device
+/* Configure an RPC client OBD device.
*
* lcfg parameters:
* 1 - client UUID
RETURN(-EINVAL);
}
- cfs_init_rwsem(&cli->cl_sem);
- cfs_sema_init(&cli->cl_mgc_sem, 1);
+ init_rwsem(&cli->cl_sem);
+ sema_init(&cli->cl_mgc_sem, 1);
cli->cl_conn_count = 0;
memcpy(server_uuid.uuid, lustre_cfg_buf(lcfg, 2),
min_t(unsigned int, LUSTRE_CFG_BUFLEN(lcfg, 2),
cli->cl_dirty = 0;
cli->cl_avail_grant = 0;
- /* FIXME: should limit this for the sum of all cl_dirty_max */
+ /* FIXME: Should limit this for the sum of all cl_dirty_max. */
cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
if (cli->cl_dirty_max >> CFS_PAGE_SHIFT > cfs_num_physpages / 8)
cli->cl_dirty_max = cfs_num_physpages << (CFS_PAGE_SHIFT - 3);
client_obd_list_lock_init(&cli->cl_loi_list_lock);
cfs_atomic_set(&cli->cl_pending_w_pages, 0);
cfs_atomic_set(&cli->cl_pending_r_pages, 0);
- cli->cl_r_in_flight = 0;
- cli->cl_w_in_flight = 0;
+ cli->cl_r_in_flight = 0;
+ cli->cl_w_in_flight = 0;
- cfs_spin_lock_init(&cli->cl_read_rpc_hist.oh_lock);
- cfs_spin_lock_init(&cli->cl_write_rpc_hist.oh_lock);
- cfs_spin_lock_init(&cli->cl_read_page_hist.oh_lock);
- cfs_spin_lock_init(&cli->cl_write_page_hist.oh_lock);
- cfs_spin_lock_init(&cli->cl_read_offset_hist.oh_lock);
- cfs_spin_lock_init(&cli->cl_write_offset_hist.oh_lock);
+ spin_lock_init(&cli->cl_read_rpc_hist.oh_lock);
+ spin_lock_init(&cli->cl_write_rpc_hist.oh_lock);
+ spin_lock_init(&cli->cl_read_page_hist.oh_lock);
+ spin_lock_init(&cli->cl_write_page_hist.oh_lock);
+ spin_lock_init(&cli->cl_read_offset_hist.oh_lock);
+ spin_lock_init(&cli->cl_write_offset_hist.oh_lock);
/* lru for osc. */
CFS_INIT_LIST_HEAD(&cli->cl_lru_osc);
CDEBUG(D_HA, "marking %s %s->%s as inactive\n",
name, obddev->obd_name,
cli->cl_target_uuid.uuid);
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_deactive = 1;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ imp->imp_deactive = 1;
+ spin_unlock(&imp->imp_lock);
}
}
ENTRY;
*exp = NULL;
- cfs_down_write(&cli->cl_sem);
+ down_write(&cli->cl_sem);
if (cli->cl_conn_count > 0 )
GOTO(out_sem, rc = -EALREADY);
*exp = NULL;
}
out_sem:
- cfs_up_write(&cli->cl_sem);
+ up_write(&cli->cl_sem);
return rc;
}
cli = &obd->u.cli;
imp = cli->cl_import;
- cfs_down_write(&cli->cl_sem);
+ down_write(&cli->cl_sem);
CDEBUG(D_INFO, "disconnect %s - %d\n", obd->obd_name,
cli->cl_conn_count);
if (cli->cl_conn_count)
GOTO(out_disconnect, rc = 0);
- /* Mark import deactivated now, so we don't try to reconnect if any
- * of the cleanup RPCs fails (e.g. ldlm cancel, etc). We don't
- * fully deactivate the import, or that would drop all requests. */
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_deactive = 1;
- cfs_spin_unlock(&imp->imp_lock);
+ /* Mark import deactivated now, so we don't try to reconnect if any
+ * of the cleanup RPCs fails (e.g. LDLM cancel, etc). We don't
+ * fully deactivate the import, or that would drop all requests. */
+ spin_lock(&imp->imp_lock);
+ imp->imp_deactive = 1;
+ spin_unlock(&imp->imp_lock);
/* Some non-replayable imports (MDS's OSCs) are pinged, so just
* delete it regardless. (It's safe to delete an import that was
ldlm_namespace_free_prior(obd->obd_namespace, imp, obd->obd_force);
}
- /*
- * there's no need to hold sem during disconnecting an import,
- * and actually it may cause deadlock in gss.
- */
- cfs_up_write(&cli->cl_sem);
- rc = ptlrpc_disconnect_import(imp, 0);
- cfs_down_write(&cli->cl_sem);
+ /* There's no need to hold sem while disconnecting an import,
+ * and it may actually cause deadlock in GSS. */
+ up_write(&cli->cl_sem);
+ rc = ptlrpc_disconnect_import(imp, 0);
+ down_write(&cli->cl_sem);
ptlrpc_invalidate_import(imp);
EXIT;
- out_disconnect:
- /* use server style - class_disconnect should be always called for
- * o_disconnect */
+out_disconnect:
+ /* Use server style - class_disconnect should be always called for
+ * o_disconnect. */
err = class_disconnect(exp);
if (!rc && err)
rc = err;
- cfs_up_write(&cli->cl_sem);
+ up_write(&cli->cl_sem);
RETURN(rc);
}
int rc;
ENTRY;
- /* Disconnect early so that clients can't keep using export */
- rc = class_disconnect(exp);
- /* close import for avoid sending any requests */
- if (exp->exp_imp_reverse)
- ptlrpc_cleanup_imp(exp->exp_imp_reverse);
+ /* Disconnect early so that clients can't keep using export. */
+ rc = class_disconnect(exp);
+ /* Close import to avoid sending any requests. */
+ if (exp->exp_imp_reverse)
+ ptlrpc_cleanup_imp(exp->exp_imp_reverse);
- if (exp->exp_obd->obd_namespace != NULL)
- ldlm_cancel_locks_for_export(exp);
+ if (exp->exp_obd->obd_namespace != NULL)
+ ldlm_cancel_locks_for_export(exp);
/* complete all outstanding replies */
- cfs_spin_lock(&exp->exp_lock);
- while (!cfs_list_empty(&exp->exp_outstanding_replies)) {
- struct ptlrpc_reply_state *rs =
- cfs_list_entry(exp->exp_outstanding_replies.next,
- struct ptlrpc_reply_state, rs_exp_list);
+ spin_lock(&exp->exp_lock);
+ while (!cfs_list_empty(&exp->exp_outstanding_replies)) {
+ struct ptlrpc_reply_state *rs =
+ cfs_list_entry(exp->exp_outstanding_replies.next,
+ struct ptlrpc_reply_state, rs_exp_list);
struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
- cfs_spin_lock(&svcpt->scp_rep_lock);
+ spin_lock(&svcpt->scp_rep_lock);
cfs_list_del_init(&rs->rs_exp_list);
- cfs_spin_lock(&rs->rs_lock);
+ spin_lock(&rs->rs_lock);
ptlrpc_schedule_difficult_reply(rs);
- cfs_spin_unlock(&rs->rs_lock);
+ spin_unlock(&rs->rs_lock);
- cfs_spin_unlock(&svcpt->scp_rep_lock);
+ spin_unlock(&svcpt->scp_rep_lock);
}
- cfs_spin_unlock(&exp->exp_lock);
+ spin_unlock(&exp->exp_lock);
RETURN(rc);
}
CDEBUG(D_RPCTRACE, "%s: committing for initial connect of %s\n",
obd->obd_name, exp->exp_client_uuid.uuid);
- cfs_spin_lock(&exp->exp_lock);
- exp->exp_need_sync = 0;
- cfs_spin_unlock(&exp->exp_lock);
- class_export_cb_put(exp);
+ spin_lock(&exp->exp_lock);
+ exp->exp_need_sync = 0;
+ spin_unlock(&exp->exp_lock);
+ class_export_cb_put(exp);
}
EXPORT_SYMBOL(target_client_add_cb);
GOTO(out, rc = -ENODEV);
}
- cfs_spin_lock(&target->obd_dev_lock);
+ spin_lock(&target->obd_dev_lock);
if (target->obd_stopping || !target->obd_set_up) {
- cfs_spin_unlock(&target->obd_dev_lock);
+ spin_unlock(&target->obd_dev_lock);
deuuidify(str, NULL, &target_start, &target_len);
LCONSOLE_ERROR_MSG(0x137, "%.*s: Not available for connect "
}
if (target->obd_no_conn) {
- cfs_spin_unlock(&target->obd_dev_lock);
+ spin_unlock(&target->obd_dev_lock);
LCONSOLE_WARN("%s: Temporarily refusing client connection "
"from %s\n", target->obd_name,
GOTO(out, rc = -EAGAIN);
}
- /* Make sure the target isn't cleaned up while we're here. Yes,
- there's still a race between the above check and our incref here.
- Really, class_uuid2obd should take the ref. */
+ /* Make sure the target isn't cleaned up while we're here. Yes,
+ * there's still a race between the above check and our incref here.
+ * Really, class_uuid2obd should take the ref. */
targref = class_incref(target, __FUNCTION__, cfs_current());
target->obd_conn_inprogress++;
- cfs_spin_unlock(&target->obd_dev_lock);
+ spin_unlock(&target->obd_dev_lock);
str = req_capsule_client_get(&req->rq_pill, &RMF_CLUUID);
if (str == NULL) {
obd_str2uuid(&cluuid, str);
- /* XXX extract a nettype and format accordingly */
- switch (sizeof(lnet_nid_t)) {
- /* NB the casts only avoid compiler warnings */
+ /* XXX Extract a nettype and format accordingly. */
+ switch (sizeof(lnet_nid_t)) {
+ /* NB the casts only avoid compiler warnings. */
case 8:
snprintf(remote_uuid.uuid, sizeof remote_uuid,
"NET_"LPX64"_UUID", (__u64)req->rq_peer.nid);
if (!export)
goto no_export;
- /* we've found an export in the hash */
+ /* We've found an export in the hash. */
- cfs_spin_lock(&export->exp_lock);
+ spin_lock(&export->exp_lock);
- if (export->exp_connecting) { /* bug 9635, et. al. */
- cfs_spin_unlock(&export->exp_lock);
- LCONSOLE_WARN("%s: Export %p already connecting from %s\n",
- export->exp_obd->obd_name, export,
- libcfs_nid2str(req->rq_peer.nid));
- class_export_put(export);
- export = NULL;
- rc = -EALREADY;
- } else if (mds_conn && export->exp_connection) {
- cfs_spin_unlock(&export->exp_lock);
+ if (export->exp_connecting) { /* bug 9635, et. al. */
+ spin_unlock(&export->exp_lock);
+ LCONSOLE_WARN("%s: Export %p already connecting from %s\n",
+ export->exp_obd->obd_name, export,
+ libcfs_nid2str(req->rq_peer.nid));
+ class_export_put(export);
+ export = NULL;
+ rc = -EALREADY;
+ } else if (mds_conn && export->exp_connection) {
+ spin_unlock(&export->exp_lock);
if (req->rq_peer.nid != export->exp_connection->c_peer.nid)
- /* mds reconnected after failover */
- LCONSOLE_WARN("%s: Received MDS connection from "
- "%s, removing former export from %s\n",
- target->obd_name, libcfs_nid2str(req->rq_peer.nid),
- libcfs_nid2str(export->exp_connection->c_peer.nid));
- else
- /* new mds connection from the same nid */
+ /* MDS reconnected after failover. */
+ LCONSOLE_WARN("%s: Received MDS connection from "
+ "%s, removing former export from %s\n",
+ target->obd_name, libcfs_nid2str(req->rq_peer.nid),
+ libcfs_nid2str(export->exp_connection->c_peer.nid));
+ else
+ /* New MDS connection from the same NID. */
LCONSOLE_WARN("%s: Received new MDS connection from "
"%s, removing former export from same NID\n",
target->obd_name, libcfs_nid2str(req->rq_peer.nid));
req->rq_peer.nid != export->exp_connection->c_peer.nid &&
(lustre_msg_get_op_flags(req->rq_reqmsg) &
MSG_CONNECT_INITIAL)) {
- cfs_spin_unlock(&export->exp_lock);
- /* in mds failover we have static uuid but nid can be
- * changed*/
+ spin_unlock(&export->exp_lock);
+ /* In MDS failover we have static UUID but NID can change. */
LCONSOLE_WARN("%s: Client %s seen on new nid %s when "
"existing nid %s is already connected\n",
target->obd_name, cluuid.uuid,
export = NULL;
} else {
export->exp_connecting = 1;
- cfs_spin_unlock(&export->exp_lock);
+ spin_unlock(&export->exp_lock);
LASSERT(export->exp_obd == target);
rc = target_handle_reconnect(&conn, export, &cluuid);
GOTO(out, rc = -EBUSY);
} else if (req->rq_export != NULL &&
(cfs_atomic_read(&export->exp_rpc_count) > 1)) {
- /* the current connect rpc has increased exp_rpc_count */
+ /* The current connect RPC has increased exp_rpc_count. */
LCONSOLE_WARN("%s: Client %s (at %s) refused reconnection, "
"still busy with %d active RPCs\n",
target->obd_name, cluuid.uuid,
libcfs_nid2str(req->rq_peer.nid),
cfs_atomic_read(&export->exp_rpc_count) - 1);
- cfs_spin_lock(&export->exp_lock);
- if (req->rq_export->exp_conn_cnt <
- lustre_msg_get_conn_cnt(req->rq_reqmsg))
- /* try to abort active requests */
- req->rq_export->exp_abort_active_req = 1;
- cfs_spin_unlock(&export->exp_lock);
- GOTO(out, rc = -EBUSY);
+ spin_lock(&export->exp_lock);
+ if (req->rq_export->exp_conn_cnt <
+ lustre_msg_get_conn_cnt(req->rq_reqmsg))
+ /* try to abort active requests */
+ req->rq_export->exp_abort_active_req = 1;
+ spin_unlock(&export->exp_lock);
+ GOTO(out, rc = -EBUSY);
} else if (lustre_msg_get_conn_cnt(req->rq_reqmsg) == 1) {
if (!strstr(cluuid.uuid, "mdt"))
LCONSOLE_WARN("%s: Rejecting reconnect from the "
export ? (long)export->exp_last_request_time : 0);
/* If this is the first time a client connects, reset the recovery
- * timer. Discard lightweight connections which might be local */
+ * timer. Discard lightweight connections which might be local. */
if (!lw_client && rc == 0 && target->obd_recovering)
- check_and_start_recovery_timer(target, req, export == NULL);
+ check_and_start_recovery_timer(target, req, export == NULL);
- /* We want to handle EALREADY but *not* -EALREADY from
- * target_handle_reconnect(), return reconnection state in a flag */
+ /* We want to handle EALREADY but *not* -EALREADY from
+ * target_handle_reconnect(), return reconnection state in a flag. */
if (rc == EALREADY) {
lustre_msg_add_op_flags(req->rq_repmsg, MSG_CONNECT_RECONNECT);
rc = 0;
LASSERT(rc == 0);
}
- /* Tell the client if we support replayable requests */
+ /* Tell the client if we support replayable requests. */
if (target->obd_replayable)
lustre_msg_add_op_flags(req->rq_repmsg, MSG_CONNECT_REPLAYABLE);
client_nid = &req->rq_peer.nid;
/* allow lightweight connections during recovery */
if (target->obd_recovering && !lw_client) {
cfs_time_t t;
- int c; /* connected */
- int i; /* in progress */
- int k; /* known */
+ int c; /* connected */
+ int i; /* in progress */
+ int k; /* known */
+ int s; /* stale/evicted */
c = cfs_atomic_read(&target->obd_connected_clients);
i = cfs_atomic_read(&target->obd_lock_replay_clients);
k = target->obd_max_recoverable_clients;
+ s = target->obd_stale_clients;
t = cfs_timer_deadline(&target->obd_recovery_timer);
t = cfs_time_sub(t, cfs_time_current());
t = cfs_duration_sec(t);
LCONSOLE_WARN("%s: Denying connection for new client "
"%s (at %s), waiting for all %d known "
"clients (%d recovered, %d in progress, "
- "and %d unseen) to recover in %d:%.02d\n",
+ "and %d evicted) to recover in %d:%.02d\n",
target->obd_name, cluuid.uuid,
libcfs_nid2str(req->rq_peer.nid), k,
- c - i, i, k - c, (int)t / 60,
+ c - i, i, s, (int)t / 60,
(int)t % 60);
rc = -EBUSY;
} else {
if (req->rq_export != NULL)
class_export_put(req->rq_export);
- /* request takes one export refcount */
+ /* Request takes one export reference. */
req->rq_export = class_export_get(export);
- cfs_spin_lock(&export->exp_lock);
- if (export->exp_conn_cnt >= lustre_msg_get_conn_cnt(req->rq_reqmsg)) {
- cfs_spin_unlock(&export->exp_lock);
- CDEBUG(D_RPCTRACE, "%s: %s already connected at higher "
- "conn_cnt: %d > %d\n",
+ spin_lock(&export->exp_lock);
+ if (export->exp_conn_cnt >= lustre_msg_get_conn_cnt(req->rq_reqmsg)) {
+ spin_unlock(&export->exp_lock);
+ CDEBUG(D_RPCTRACE, "%s: %s already connected at greater "
+ "or equal conn_cnt: %d >= %d\n",
cluuid.uuid, libcfs_nid2str(req->rq_peer.nid),
export->exp_conn_cnt,
lustre_msg_get_conn_cnt(req->rq_reqmsg));
export->exp_conn_cnt = lustre_msg_get_conn_cnt(req->rq_reqmsg);
export->exp_abort_active_req = 0;
- /* request from liblustre? Don't evict it for not pinging. */
+ /* Don't evict liblustre clients for not pinging. */
if (lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_LIBCLIENT) {
export->exp_libclient = 1;
- cfs_spin_unlock(&export->exp_lock);
+ spin_unlock(&export->exp_lock);
- cfs_spin_lock(&target->obd_dev_lock);
- cfs_list_del_init(&export->exp_obd_chain_timed);
- cfs_spin_unlock(&target->obd_dev_lock);
- } else {
- cfs_spin_unlock(&export->exp_lock);
- }
+ spin_lock(&target->obd_dev_lock);
+ cfs_list_del_init(&export->exp_obd_chain_timed);
+ spin_unlock(&target->obd_dev_lock);
+ } else {
+ spin_unlock(&export->exp_lock);
+ }
if (export->exp_connection != NULL) {
- /* Check to see if connection came from another NID */
+ /* Check to see if connection came from another NID. */
if ((export->exp_connection->c_peer.nid != req->rq_peer.nid) &&
!cfs_hlist_unhashed(&export->exp_nid_hash))
cfs_hash_del(export->exp_obd->obd_nid_hash,
int has_transno;
__u64 transno = data->ocd_transno;
- cfs_spin_lock(&export->exp_lock);
+ spin_lock(&export->exp_lock);
/* possible race with class_disconnect_stale_exports,
* export may be already in the eviction process */
if (export->exp_failed) {
- cfs_spin_unlock(&export->exp_lock);
+ spin_unlock(&export->exp_lock);
GOTO(out, rc = -ENODEV);
}
- export->exp_in_recovery = 1;
- export->exp_req_replay_needed = 1;
- export->exp_lock_replay_needed = 1;
- cfs_spin_unlock(&export->exp_lock);
+ export->exp_in_recovery = 1;
+ export->exp_req_replay_needed = 1;
+ export->exp_lock_replay_needed = 1;
+ spin_unlock(&export->exp_lock);
has_transno = !!(lustre_msg_get_op_flags(req->rq_reqmsg) &
MSG_CONNECT_TRANSNO);
if (has_transno && transno > 0 &&
transno < target->obd_next_recovery_transno &&
transno > target->obd_last_committed) {
- /* another way is to use cmpxchg() so it will be
- * lock free */
- cfs_spin_lock(&target->obd_recovery_task_lock);
- if (transno < target->obd_next_recovery_transno)
- target->obd_next_recovery_transno = transno;
- cfs_spin_unlock(&target->obd_recovery_task_lock);
+ /* Another way is to use cmpxchg() to be lock-free. */
+ spin_lock(&target->obd_recovery_task_lock);
+ if (transno < target->obd_next_recovery_transno)
+ target->obd_next_recovery_transno = transno;
+ spin_unlock(&target->obd_recovery_task_lock);
}
cfs_atomic_inc(&target->obd_req_replay_clients);
tmp = req_capsule_client_get(&req->rq_pill, &RMF_CONN);
conn = *tmp;
- /* for the rest part, we return -ENOTCONN in case of errors
- * in order to let client initialize connection again.
- */
+ /* Return -ENOTCONN in case of errors to let client reconnect. */
revimp = class_new_import(target);
if (revimp == NULL) {
CERROR("fail to alloc new reverse import.\n");
GOTO(out, rc = -ENOTCONN);
}
- cfs_spin_lock(&export->exp_lock);
- if (export->exp_imp_reverse != NULL) {
+ spin_lock(&export->exp_lock);
+ if (export->exp_imp_reverse != NULL)
/* destroyed import can be still referenced in ctxt */
- obd_set_info_async(req->rq_svc_thread->t_env, export,
- sizeof(KEY_REVIMP_UPD), KEY_REVIMP_UPD,
- 0, NULL, NULL);
tmp_imp = export->exp_imp_reverse;
- }
export->exp_imp_reverse = revimp;
- cfs_spin_unlock(&export->exp_lock);
+ spin_unlock(&export->exp_lock);
revimp->imp_connection = ptlrpc_connection_addref(export->exp_connection);
revimp->imp_client = &export->exp_obd->obd_ldlm_client;
revimp->imp_dlm_fake = 1;
revimp->imp_state = LUSTRE_IMP_FULL;
- /* unknown versions will be caught in
- * ptlrpc_handle_server_req_in->lustre_unpack_msg() */
+ /* Unknown versions will be caught in
+ * ptlrpc_handle_server_req_in->lustre_unpack_msg(). */
revimp->imp_msg_magic = req->rq_reqmsg->lm_magic;
- if ((export->exp_connect_flags & OBD_CONNECT_AT) &&
- (revimp->imp_msg_magic != LUSTRE_MSG_MAGIC_V1))
- revimp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT;
- else
- revimp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
+ if ((data->ocd_connect_flags & OBD_CONNECT_AT) &&
+ (revimp->imp_msg_magic != LUSTRE_MSG_MAGIC_V1))
+ revimp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT;
+ else
+ revimp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
- if ((export->exp_connect_flags & OBD_CONNECT_FULL20) &&
+ if ((data->ocd_connect_flags & OBD_CONNECT_FULL20) &&
(revimp->imp_msg_magic != LUSTRE_MSG_MAGIC_V1))
revimp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18;
else
rc = sptlrpc_import_sec_adapt(revimp, req->rq_svc_ctx, &req->rq_flvr);
if (rc) {
CERROR("Failed to get sec for reverse import: %d\n", rc);
- cfs_spin_lock(&export->exp_lock);
+ spin_lock(&export->exp_lock);
export->exp_imp_reverse = NULL;
- cfs_spin_unlock(&export->exp_lock);
+ spin_unlock(&export->exp_lock);
class_destroy_import(revimp);
}
out:
if (tmp_imp != NULL)
client_destroy_import(tmp_imp);
- if (export) {
- cfs_spin_lock(&export->exp_lock);
- export->exp_connecting = 0;
- cfs_spin_unlock(&export->exp_lock);
+ if (export) {
+ spin_lock(&export->exp_lock);
+ export->exp_connecting = 0;
+ spin_unlock(&export->exp_lock);
- class_export_put(export);
- }
- if (targref) {
- cfs_spin_lock(&target->obd_dev_lock);
+ class_export_put(export);
+ }
+ if (targref) {
+ spin_lock(&target->obd_dev_lock);
target->obd_conn_inprogress--;
- cfs_spin_unlock(&target->obd_dev_lock);
+ spin_unlock(&target->obd_dev_lock);
- class_decref(targref, __FUNCTION__, cfs_current());
+ class_decref(targref, __func__, cfs_current());
}
- if (rc)
- req->rq_status = rc;
- RETURN(rc);
+ if (rc)
+ req->rq_status = rc;
+ RETURN(rc);
}
EXPORT_SYMBOL(target_handle_connect);
if (rc)
RETURN(rc);
- /* keep the rq_export around so we can send the reply */
+ /* Keep the rq_export around so we can send the reply. */
req->rq_status = obd_disconnect(class_export_get(req->rq_export));
RETURN(0);
struct obd_import *imp = NULL;
/* exports created from last_rcvd data, and "fake"
exports created by lctl don't have an import */
- cfs_spin_lock(&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
if (exp->exp_imp_reverse != NULL) {
imp = exp->exp_imp_reverse;
exp->exp_imp_reverse = NULL;
}
- cfs_spin_unlock(&exp->exp_lock);
+ spin_unlock(&exp->exp_lock);
if (imp != NULL)
client_destroy_import(imp);
LASSERT(cfs_list_empty(&req->rq_list));
CFS_INIT_LIST_HEAD(&req->rq_replay_list);
- /* increase refcount to keep request in queue */
- cfs_atomic_inc(&req->rq_refcount);
- /** let export know it has replays to be handled */
+ /* Increase refcount to keep request in queue. */
+ cfs_atomic_inc(&req->rq_refcount);
+ /* Let export know it has replays to be handled. */
cfs_atomic_inc(&req->rq_export->exp_replay_count);
}
LASSERT(exp);
- cfs_spin_lock(&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
cfs_list_for_each_entry(reqiter, &exp->exp_req_replay_queue,
rq_replay_list) {
if (lustre_msg_get_transno(reqiter->rq_reqmsg) == transno) {
}
if (dup) {
- /* we expect it with RESENT and REPLAY flags */
+ /* We expect it with RESENT and REPLAY flags. */
if ((lustre_msg_get_flags(req->rq_reqmsg) &
(MSG_RESENT | MSG_REPLAY)) != (MSG_RESENT | MSG_REPLAY))
CERROR("invalid flags %x of resent replay\n",
&exp->exp_req_replay_queue);
}
- cfs_spin_unlock(&exp->exp_lock);
+ spin_unlock(&exp->exp_lock);
return dup;
}
static void target_exp_dequeue_req_replay(struct ptlrpc_request *req)
{
- LASSERT(!cfs_list_empty(&req->rq_replay_list));
- LASSERT(req->rq_export);
+ LASSERT(!cfs_list_empty(&req->rq_replay_list));
+ LASSERT(req->rq_export);
- cfs_spin_lock(&req->rq_export->exp_lock);
- cfs_list_del_init(&req->rq_replay_list);
- cfs_spin_unlock(&req->rq_export->exp_lock);
+ spin_lock(&req->rq_export->exp_lock);
+ cfs_list_del_init(&req->rq_replay_list);
+ spin_unlock(&req->rq_export->exp_lock);
}
#ifdef __KERNEL__
{
ENTRY;
- /* only log a recovery message when recovery has occurred */
+ /* Only log a recovery message when recovery has occurred. */
if (obd->obd_recovery_start) {
time_t elapsed_time = max_t(time_t, 1, cfs_time_current_sec() -
obd->obd_recovery_start);
}
ldlm_reprocess_all_ns(obd->obd_namespace);
- cfs_spin_lock(&obd->obd_recovery_task_lock);
+ spin_lock(&obd->obd_recovery_task_lock);
if (!cfs_list_empty(&obd->obd_req_replay_queue) ||
!cfs_list_empty(&obd->obd_lock_replay_queue) ||
!cfs_list_empty(&obd->obd_final_req_queue)) {
"" : "lock ",
cfs_list_empty(&obd->obd_final_req_queue) ? \
"" : "final ");
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- LBUG();
- }
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ spin_unlock(&obd->obd_recovery_task_lock);
+ LBUG();
+ }
+ spin_unlock(&obd->obd_recovery_task_lock);
obd->obd_recovery_end = cfs_time_current_sec();
- /* when recovery finished, cleanup orphans on mds and ost */
+ /* When recovery finished, cleanup orphans on MDS and OST. */
if (OBT(obd) && OBP(obd, postrecov)) {
int rc = OBP(obd, postrecov)(obd);
if (rc < 0)
static void abort_req_replay_queue(struct obd_device *obd)
{
- struct ptlrpc_request *req, *n;
- cfs_list_t abort_list;
+ struct ptlrpc_request *req, *n;
+ cfs_list_t abort_list;
- CFS_INIT_LIST_HEAD(&abort_list);
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- cfs_list_splice_init(&obd->obd_req_replay_queue, &abort_list);
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ CFS_INIT_LIST_HEAD(&abort_list);
+ spin_lock(&obd->obd_recovery_task_lock);
+ cfs_list_splice_init(&obd->obd_req_replay_queue, &abort_list);
+ spin_unlock(&obd->obd_recovery_task_lock);
cfs_list_for_each_entry_safe(req, n, &abort_list, rq_list) {
DEBUG_REQ(D_WARNING, req, "aborted:");
req->rq_status = -ENOTCONN;
static void abort_lock_replay_queue(struct obd_device *obd)
{
- struct ptlrpc_request *req, *n;
- cfs_list_t abort_list;
+ struct ptlrpc_request *req, *n;
+ cfs_list_t abort_list;
- CFS_INIT_LIST_HEAD(&abort_list);
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- cfs_list_splice_init(&obd->obd_lock_replay_queue, &abort_list);
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ CFS_INIT_LIST_HEAD(&abort_list);
+ spin_lock(&obd->obd_recovery_task_lock);
+ cfs_list_splice_init(&obd->obd_lock_replay_queue, &abort_list);
+ spin_unlock(&obd->obd_recovery_task_lock);
cfs_list_for_each_entry_safe(req, n, &abort_list, rq_list){
DEBUG_REQ(D_ERROR, req, "aborted:");
req->rq_status = -ENOTCONN;
ENTRY;
CFS_INIT_LIST_HEAD(&clean_list);
- cfs_spin_lock(&obd->obd_dev_lock);
- if (!obd->obd_recovering) {
- cfs_spin_unlock(&obd->obd_dev_lock);
- EXIT;
- return;
- }
- obd->obd_recovering = obd->obd_abort_recovery = 0;
- cfs_spin_unlock(&obd->obd_dev_lock);
-
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- target_cancel_recovery_timer(obd);
- cfs_list_splice_init(&obd->obd_req_replay_queue, &clean_list);
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
-
- cfs_list_for_each_entry_safe(req, n, &clean_list, rq_list) {
- LASSERT(req->rq_reply_state == 0);
- target_exp_dequeue_req_replay(req);
- target_request_copy_put(req);
- }
+ spin_lock(&obd->obd_dev_lock);
+ if (!obd->obd_recovering) {
+ spin_unlock(&obd->obd_dev_lock);
+ EXIT;
+ return;
+ }
+ obd->obd_recovering = obd->obd_abort_recovery = 0;
+ spin_unlock(&obd->obd_dev_lock);
+
+ spin_lock(&obd->obd_recovery_task_lock);
+ target_cancel_recovery_timer(obd);
+ cfs_list_splice_init(&obd->obd_req_replay_queue, &clean_list);
+ spin_unlock(&obd->obd_recovery_task_lock);
+
+ cfs_list_for_each_entry_safe(req, n, &clean_list, rq_list) {
+ LASSERT(req->rq_reply_state == 0);
+ target_exp_dequeue_req_replay(req);
+ target_request_copy_put(req);
+ }
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- cfs_list_splice_init(&obd->obd_lock_replay_queue, &clean_list);
- cfs_list_splice_init(&obd->obd_final_req_queue, &clean_list);
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ spin_lock(&obd->obd_recovery_task_lock);
+ cfs_list_splice_init(&obd->obd_lock_replay_queue, &clean_list);
+ cfs_list_splice_init(&obd->obd_final_req_queue, &clean_list);
+ spin_unlock(&obd->obd_recovery_task_lock);
cfs_list_for_each_entry_safe(req, n, &clean_list, rq_list){
LASSERT(req->rq_reply_state == 0);
static void target_start_recovery_timer(struct obd_device *obd)
{
- if (obd->obd_recovery_start != 0)
- return;
+ if (obd->obd_recovery_start != 0)
+ return;
- cfs_spin_lock(&obd->obd_dev_lock);
- if (!obd->obd_recovering || obd->obd_abort_recovery) {
- cfs_spin_unlock(&obd->obd_dev_lock);
- return;
- }
+ spin_lock(&obd->obd_dev_lock);
+ if (!obd->obd_recovering || obd->obd_abort_recovery) {
+ spin_unlock(&obd->obd_dev_lock);
+ return;
+ }
- LASSERT(obd->obd_recovery_timeout != 0);
+ LASSERT(obd->obd_recovery_timeout != 0);
- if (obd->obd_recovery_start != 0) {
- cfs_spin_unlock(&obd->obd_dev_lock);
- return;
- }
+ if (obd->obd_recovery_start != 0) {
+ spin_unlock(&obd->obd_dev_lock);
+ return;
+ }
- cfs_timer_arm(&obd->obd_recovery_timer,
- cfs_time_shift(obd->obd_recovery_timeout));
- obd->obd_recovery_start = cfs_time_current_sec();
- cfs_spin_unlock(&obd->obd_dev_lock);
+ cfs_timer_arm(&obd->obd_recovery_timer,
+ cfs_time_shift(obd->obd_recovery_timeout));
+ obd->obd_recovery_start = cfs_time_current_sec();
+ spin_unlock(&obd->obd_dev_lock);
LCONSOLE_WARN("%s: Will be in recovery for at least %d:%.02d, "
"or until %d client%s reconnect%s\n",
*/
static void extend_recovery_timer(struct obd_device *obd, int drt, bool extend)
{
- cfs_time_t now;
- cfs_time_t end;
- cfs_duration_t left;
- int to;
-
- cfs_spin_lock(&obd->obd_dev_lock);
- if (!obd->obd_recovering || obd->obd_abort_recovery) {
- cfs_spin_unlock(&obd->obd_dev_lock);
+ cfs_time_t now;
+ cfs_time_t end;
+ cfs_duration_t left;
+ int to;
+
+ spin_lock(&obd->obd_dev_lock);
+ if (!obd->obd_recovering || obd->obd_abort_recovery) {
+ spin_unlock(&obd->obd_dev_lock);
return;
}
LASSERT(obd->obd_recovery_start != 0);
if (to > obd->obd_recovery_time_hard)
to = obd->obd_recovery_time_hard;
- if (obd->obd_recovery_timeout < to) {
+ if (obd->obd_recovery_timeout < to ||
+ obd->obd_recovery_timeout == obd->obd_recovery_time_hard) {
obd->obd_recovery_timeout = to;
cfs_timer_arm(&obd->obd_recovery_timer,
cfs_time_shift(drt));
}
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_unlock(&obd->obd_dev_lock);
- CDEBUG(D_HA, "%s: recovery timer will expire in %u seconds\n",
- obd->obd_name, (unsigned)drt);
+ CDEBUG(D_HA, "%s: recovery timer will expire in %u seconds\n",
+ obd->obd_name, (unsigned)drt);
}
/* Reset the timer with each new client connection */
target_start_recovery_timer(obd);
- /* convert the service time to rpc timeout,
- * reuse service_time to limit stack usage */
- service_time = at_est2timeout(service_time);
+ /* Convert the service time to RPC timeout,
+ * and reuse service_time to limit stack usage. */
+ service_time = at_est2timeout(service_time);
- /* We expect other clients to timeout within service_time, then try
- * to reconnect, then try the failover server. The max delay between
- * connect attempts is SWITCH_MAX + SWITCH_INC + INITIAL */
+ /* We expect other clients to timeout within service_time, then try
+ * to reconnect, then try the failover server. The max delay between
+ * connect attempts is SWITCH_MAX + SWITCH_INC + INITIAL. */
service_time += 2 * INITIAL_CONNECT_TIMEOUT;
LASSERT(obt->obt_magic == OBT_MAGIC);
if (obd->obd_abort_recovery || obd->obd_recovery_expired)
return 1;
LASSERT(clnts <= obd->obd_max_recoverable_clients);
- if (obd->obd_no_conn == 0 &&
- clnts + obd->obd_stale_clients == obd->obd_max_recoverable_clients)
- return 1;
- return 0;
+ return (clnts + obd->obd_stale_clients ==
+ obd->obd_max_recoverable_clients);
}
static int check_for_next_transno(struct obd_device *obd)
{
- struct ptlrpc_request *req = NULL;
- int wake_up = 0, connected, completed, queue_len;
- __u64 next_transno, req_transno;
- ENTRY;
+ struct ptlrpc_request *req = NULL;
+ int wake_up = 0, connected, completed, queue_len;
+ __u64 next_transno, req_transno;
+ ENTRY;
- cfs_spin_lock(&obd->obd_recovery_task_lock);
+ spin_lock(&obd->obd_recovery_task_lock);
if (!cfs_list_empty(&obd->obd_req_replay_queue)) {
req = cfs_list_entry(obd->obd_req_replay_queue.next,
struct ptlrpc_request, rq_list);
obd->obd_next_recovery_transno = req_transno;
wake_up = 1;
}
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- return wake_up;
+ spin_unlock(&obd->obd_recovery_task_lock);
+ return wake_up;
}
static int check_for_next_lock(struct obd_device *obd)
{
- int wake_up = 0;
+ int wake_up = 0;
- cfs_spin_lock(&obd->obd_recovery_task_lock);
+ spin_lock(&obd->obd_recovery_task_lock);
if (!cfs_list_empty(&obd->obd_lock_replay_queue)) {
CDEBUG(D_HA, "waking for next lock\n");
wake_up = 1;
CDEBUG(D_HA, "waking for expired recovery\n");
wake_up = 1;
}
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ spin_unlock(&obd->obd_recovery_task_lock);
- return wake_up;
+ return wake_up;
}
/**
/** evict cexports with no replay in queue, they are stalled */
class_disconnect_stale_exports(obd, health_check);
/** continue with VBR */
- cfs_spin_lock(&obd->obd_dev_lock);
- obd->obd_version_recov = 1;
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_dev_lock);
+ obd->obd_version_recov = 1;
+ spin_unlock(&obd->obd_dev_lock);
/**
* reset timer, recovery will proceed with versions now,
* timeout is set just to handle reconnection delays
abort_lock_replay_queue(obd);
}
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- if (!cfs_list_empty(&obd->obd_req_replay_queue)) {
- req = cfs_list_entry(obd->obd_req_replay_queue.next,
- struct ptlrpc_request, rq_list);
- cfs_list_del_init(&req->rq_list);
- obd->obd_requests_queued_for_recovery--;
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- } else {
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- LASSERT(cfs_list_empty(&obd->obd_req_replay_queue));
- LASSERT(cfs_atomic_read(&obd->obd_req_replay_clients) == 0);
- /** evict exports failed VBR */
- class_disconnect_stale_exports(obd, exp_vbr_healthy);
- }
- RETURN(req);
+ spin_lock(&obd->obd_recovery_task_lock);
+ if (!cfs_list_empty(&obd->obd_req_replay_queue)) {
+ req = cfs_list_entry(obd->obd_req_replay_queue.next,
+ struct ptlrpc_request, rq_list);
+ cfs_list_del_init(&req->rq_list);
+ obd->obd_requests_queued_for_recovery--;
+ spin_unlock(&obd->obd_recovery_task_lock);
+ } else {
+ spin_unlock(&obd->obd_recovery_task_lock);
+ LASSERT(cfs_list_empty(&obd->obd_req_replay_queue));
+ LASSERT(cfs_atomic_read(&obd->obd_req_replay_clients) == 0);
+ /** evict exports failed VBR */
+ class_disconnect_stale_exports(obd, exp_vbr_healthy);
+ }
+ RETURN(req);
}
static struct ptlrpc_request *target_next_replay_lock(struct obd_device *obd)
exp_lock_replay_healthy))
abort_lock_replay_queue(obd);
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- if (!cfs_list_empty(&obd->obd_lock_replay_queue)) {
- req = cfs_list_entry(obd->obd_lock_replay_queue.next,
- struct ptlrpc_request, rq_list);
- cfs_list_del_init(&req->rq_list);
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- } else {
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ spin_lock(&obd->obd_recovery_task_lock);
+ if (!cfs_list_empty(&obd->obd_lock_replay_queue)) {
+ req = cfs_list_entry(obd->obd_lock_replay_queue.next,
+ struct ptlrpc_request, rq_list);
+ cfs_list_del_init(&req->rq_list);
+ spin_unlock(&obd->obd_recovery_task_lock);
+ } else {
+ spin_unlock(&obd->obd_recovery_task_lock);
LASSERT(cfs_list_empty(&obd->obd_lock_replay_queue));
LASSERT(cfs_atomic_read(&obd->obd_lock_replay_clients) == 0);
/** evict exports failed VBR */
static struct ptlrpc_request *target_next_final_ping(struct obd_device *obd)
{
- struct ptlrpc_request *req = NULL;
-
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- if (!cfs_list_empty(&obd->obd_final_req_queue)) {
- req = cfs_list_entry(obd->obd_final_req_queue.next,
- struct ptlrpc_request, rq_list);
- cfs_list_del_init(&req->rq_list);
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- if (req->rq_export->exp_in_recovery) {
- cfs_spin_lock(&req->rq_export->exp_lock);
- req->rq_export->exp_in_recovery = 0;
- cfs_spin_unlock(&req->rq_export->exp_lock);
- }
- } else {
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- }
- return req;
+ struct ptlrpc_request *req = NULL;
+
+ spin_lock(&obd->obd_recovery_task_lock);
+ if (!cfs_list_empty(&obd->obd_final_req_queue)) {
+ req = cfs_list_entry(obd->obd_final_req_queue.next,
+ struct ptlrpc_request, rq_list);
+ cfs_list_del_init(&req->rq_list);
+ spin_unlock(&obd->obd_recovery_task_lock);
+ if (req->rq_export->exp_in_recovery) {
+ spin_lock(&req->rq_export->exp_lock);
+ req->rq_export->exp_in_recovery = 0;
+ spin_unlock(&req->rq_export->exp_lock);
+ }
+ } else {
+ spin_unlock(&obd->obd_recovery_task_lock);
+ }
+ return req;
}
static int handle_recovery_req(struct ptlrpc_thread *thread,
to = max((int)at_est2timeout(
at_get(&svcpt->scp_at_estimate)),
(int)lustre_msg_get_timeout(req->rq_reqmsg));
- /* Add net_latency (see ptlrpc_replay_req) */
- to += lustre_msg_get_service_time(req->rq_reqmsg);
+ /* Add 2 net_latency, one for balance rq_deadline
+ * (see ptl_send_rpc), one for resend the req to server,
+ * Note: client will pack net_latency in replay req
+ * (see ptlrpc_replay_req) */
+ to += 2 * lustre_msg_get_service_time(req->rq_reqmsg);
}
extend_recovery_timer(class_exp2obd(req->rq_export), to, true);
}
cfs_curproc_pid());
trd->trd_processing_task = cfs_curproc_pid();
- cfs_spin_lock(&obd->obd_dev_lock);
- obd->obd_recovering = 1;
- cfs_spin_unlock(&obd->obd_dev_lock);
- cfs_complete(&trd->trd_starting);
+ spin_lock(&obd->obd_dev_lock);
+ obd->obd_recovering = 1;
+ spin_unlock(&obd->obd_dev_lock);
+ complete(&trd->trd_starting);
/* first of all, we have to know the first transno to replay */
if (target_recovery_overseer(obd, check_for_clients,
* bz18031: increase next_recovery_transno before
* target_request_copy_put() will drop exp_rpc reference
*/
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- obd->obd_next_recovery_transno++;
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ spin_lock(&obd->obd_recovery_task_lock);
+ obd->obd_next_recovery_transno++;
+ spin_unlock(&obd->obd_recovery_task_lock);
target_exp_dequeue_req_replay(req);
target_request_copy_put(req);
obd->obd_replayed_requests++;
tgt_boot_epoch_update(lut);
/* We drop recoverying flag to forward all new requests
* to regular mds_handle() since now */
- cfs_spin_lock(&obd->obd_dev_lock);
- obd->obd_recovering = obd->obd_abort_recovery = 0;
- cfs_spin_unlock(&obd->obd_dev_lock);
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- target_cancel_recovery_timer(obd);
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ spin_lock(&obd->obd_dev_lock);
+ obd->obd_recovering = obd->obd_abort_recovery = 0;
+ spin_unlock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_recovery_task_lock);
+ target_cancel_recovery_timer(obd);
+ spin_unlock(&obd->obd_recovery_task_lock);
while ((req = target_next_final_ping(obd))) {
LASSERT(trd->trd_processing_task == cfs_curproc_pid());
DEBUG_REQ(D_HA, req, "processing final ping from %s: ",
lu_context_fini(&env->le_ctx);
trd->trd_processing_task = 0;
- cfs_complete(&trd->trd_finishing);
+ complete(&trd->trd_finishing);
OBD_FREE_PTR(thread);
OBD_FREE_PTR(env);
struct target_recovery_data *trd = &obd->obd_recovery_data;
memset(trd, 0, sizeof(*trd));
- cfs_init_completion(&trd->trd_starting);
- cfs_init_completion(&trd->trd_finishing);
+ init_completion(&trd->trd_starting);
+ init_completion(&trd->trd_finishing);
trd->trd_recovery_handler = handler;
if (cfs_create_thread(target_recovery_thread, lut, 0) > 0) {
- cfs_wait_for_completion(&trd->trd_starting);
+ wait_for_completion(&trd->trd_starting);
LASSERT(obd->obd_recovering != 0);
} else
rc = -ECHILD;
void target_stop_recovery_thread(struct obd_device *obd)
{
- if (obd->obd_recovery_data.trd_processing_task > 0) {
- struct target_recovery_data *trd = &obd->obd_recovery_data;
- /** recovery can be done but postrecovery is not yet */
- cfs_spin_lock(&obd->obd_dev_lock);
- if (obd->obd_recovering) {
- CERROR("%s: Aborting recovery\n", obd->obd_name);
- obd->obd_abort_recovery = 1;
- cfs_waitq_signal(&obd->obd_next_transno_waitq);
- }
- cfs_spin_unlock(&obd->obd_dev_lock);
- cfs_wait_for_completion(&trd->trd_finishing);
- }
+ if (obd->obd_recovery_data.trd_processing_task > 0) {
+ struct target_recovery_data *trd = &obd->obd_recovery_data;
+ /** recovery can be done but postrecovery is not yet */
+ spin_lock(&obd->obd_dev_lock);
+ if (obd->obd_recovering) {
+ CERROR("%s: Aborting recovery\n", obd->obd_name);
+ obd->obd_abort_recovery = 1;
+ cfs_waitq_signal(&obd->obd_next_transno_waitq);
+ }
+ spin_unlock(&obd->obd_dev_lock);
+ wait_for_completion(&trd->trd_finishing);
+ }
}
EXPORT_SYMBOL(target_stop_recovery_thread);
static int target_process_req_flags(struct obd_device *obd,
struct ptlrpc_request *req)
{
- struct obd_export *exp = req->rq_export;
- LASSERT(exp != NULL);
- if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
- /* client declares he's ready to replay locks */
- cfs_spin_lock(&exp->exp_lock);
- if (exp->exp_req_replay_needed) {
- exp->exp_req_replay_needed = 0;
- cfs_spin_unlock(&exp->exp_lock);
-
- LASSERT_ATOMIC_POS(&obd->obd_req_replay_clients);
- cfs_atomic_dec(&obd->obd_req_replay_clients);
- } else {
- cfs_spin_unlock(&exp->exp_lock);
- }
- }
- if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LOCK_REPLAY_DONE) {
- /* client declares he's ready to complete recovery
- * so, we put the request on th final queue */
- cfs_spin_lock(&exp->exp_lock);
- if (exp->exp_lock_replay_needed) {
- exp->exp_lock_replay_needed = 0;
- cfs_spin_unlock(&exp->exp_lock);
-
- LASSERT_ATOMIC_POS(&obd->obd_lock_replay_clients);
- cfs_atomic_dec(&obd->obd_lock_replay_clients);
- } else {
- cfs_spin_unlock(&exp->exp_lock);
- }
- }
- return 0;
+ struct obd_export *exp = req->rq_export;
+ LASSERT(exp != NULL);
+ if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
+ /* client declares he's ready to replay locks */
+ spin_lock(&exp->exp_lock);
+ if (exp->exp_req_replay_needed) {
+ exp->exp_req_replay_needed = 0;
+ spin_unlock(&exp->exp_lock);
+
+ LASSERT_ATOMIC_POS(&obd->obd_req_replay_clients);
+ cfs_atomic_dec(&obd->obd_req_replay_clients);
+ } else {
+ spin_unlock(&exp->exp_lock);
+ }
+ }
+ if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LOCK_REPLAY_DONE) {
+ /* client declares he's ready to complete recovery
+ * so, we put the request on th final queue */
+ spin_lock(&exp->exp_lock);
+ if (exp->exp_lock_replay_needed) {
+ exp->exp_lock_replay_needed = 0;
+ spin_unlock(&exp->exp_lock);
+
+ LASSERT_ATOMIC_POS(&obd->obd_lock_replay_clients);
+ cfs_atomic_dec(&obd->obd_lock_replay_clients);
+ } else {
+ spin_unlock(&exp->exp_lock);
+ }
+ }
+ return 0;
}
int target_queue_recovery_request(struct ptlrpc_request *req,
target_request_copy_get(req);
DEBUG_REQ(D_HA, req, "queue final req");
cfs_waitq_signal(&obd->obd_next_transno_waitq);
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- if (obd->obd_recovering) {
- cfs_list_add_tail(&req->rq_list,
- &obd->obd_final_req_queue);
- } else {
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- target_request_copy_put(req);
- RETURN(obd->obd_stopping ? -ENOTCONN : 1);
- }
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- RETURN(0);
- }
- if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
- /* client declares he's ready to replay locks */
- target_request_copy_get(req);
- DEBUG_REQ(D_HA, req, "queue lock replay req");
- cfs_waitq_signal(&obd->obd_next_transno_waitq);
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- LASSERT(obd->obd_recovering);
- /* usually due to recovery abort */
- if (!req->rq_export->exp_in_recovery) {
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- target_request_copy_put(req);
- RETURN(-ENOTCONN);
- }
- LASSERT(req->rq_export->exp_lock_replay_needed);
- cfs_list_add_tail(&req->rq_list, &obd->obd_lock_replay_queue);
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- RETURN(0);
- }
+ spin_lock(&obd->obd_recovery_task_lock);
+ if (obd->obd_recovering) {
+ cfs_list_add_tail(&req->rq_list,
+ &obd->obd_final_req_queue);
+ } else {
+ spin_unlock(&obd->obd_recovery_task_lock);
+ target_request_copy_put(req);
+ RETURN(obd->obd_stopping ? -ENOTCONN : 1);
+ }
+ spin_unlock(&obd->obd_recovery_task_lock);
+ RETURN(0);
+ }
+ if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
+ /* client declares he's ready to replay locks */
+ target_request_copy_get(req);
+ DEBUG_REQ(D_HA, req, "queue lock replay req");
+ cfs_waitq_signal(&obd->obd_next_transno_waitq);
+ spin_lock(&obd->obd_recovery_task_lock);
+ LASSERT(obd->obd_recovering);
+ /* usually due to recovery abort */
+ if (!req->rq_export->exp_in_recovery) {
+ spin_unlock(&obd->obd_recovery_task_lock);
+ target_request_copy_put(req);
+ RETURN(-ENOTCONN);
+ }
+ LASSERT(req->rq_export->exp_lock_replay_needed);
+ cfs_list_add_tail(&req->rq_list, &obd->obd_lock_replay_queue);
+ spin_unlock(&obd->obd_recovery_task_lock);
+ RETURN(0);
+ }
/* CAVEAT EMPTOR: The incoming request message has been swabbed
* (i.e. buflens etc are in my own byte order), but type-dependent
CDEBUG(D_HA, "Next recovery transno: "LPU64
", current: "LPU64", replaying\n",
obd->obd_next_recovery_transno, transno);
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- if (transno < obd->obd_next_recovery_transno) {
- /* Processing the queue right now, don't re-add. */
- LASSERT(cfs_list_empty(&req->rq_list));
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- RETURN(1);
- }
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ spin_lock(&obd->obd_recovery_task_lock);
+ if (transno < obd->obd_next_recovery_transno) {
+ /* Processing the queue right now, don't re-add. */
+ LASSERT(cfs_list_empty(&req->rq_list));
+ spin_unlock(&obd->obd_recovery_task_lock);
+ RETURN(1);
+ }
+ spin_unlock(&obd->obd_recovery_task_lock);
if (OBD_FAIL_CHECK(OBD_FAIL_TGT_REPLAY_DROP))
RETURN(0);
}
/* XXX O(n^2) */
- cfs_spin_lock(&obd->obd_recovery_task_lock);
+ spin_lock(&obd->obd_recovery_task_lock);
LASSERT(obd->obd_recovering);
cfs_list_for_each(tmp, &obd->obd_req_replay_queue) {
struct ptlrpc_request *reqiter =
transno)) {
DEBUG_REQ(D_ERROR, req, "dropping replay: transno "
"has been claimed by another client");
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ spin_unlock(&obd->obd_recovery_task_lock);
target_exp_dequeue_req_replay(req);
target_request_copy_put(req);
RETURN(0);
cfs_list_add_tail(&req->rq_list, &obd->obd_req_replay_queue);
obd->obd_requests_queued_for_recovery++;
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- cfs_waitq_signal(&obd->obd_next_transno_waitq);
- RETURN(0);
+ spin_unlock(&obd->obd_recovery_task_lock);
+ cfs_waitq_signal(&obd->obd_next_transno_waitq);
+ RETURN(0);
}
EXPORT_SYMBOL(target_queue_recovery_request);
struct obd_device *obd;
ENTRY;
- /*
- * Check that we still have all structures alive as this may
- * be some late rpc in shutdown time.
- */
+ /* Check that we still have all structures alive as this may
+ * be some late RPC at shutdown time. */
if (unlikely(!req->rq_export || !req->rq_export->exp_obd ||
!exp_connect_lru_resize(req->rq_export))) {
lustre_msg_set_slv(req->rq_repmsg, 0);
RETURN(0);
}
- /*
- * OBD is alive here as export is alive, which we checked above.
- */
+ /* OBD is alive here as export is alive, which we checked above. */
obd = req->rq_export->exp_obd;
- cfs_read_lock(&obd->obd_pool_lock);
+ read_lock(&obd->obd_pool_lock);
lustre_msg_set_slv(req->rq_repmsg, obd->obd_pool_slv);
lustre_msg_set_limit(req->rq_repmsg, obd->obd_pool_limit);
- cfs_read_unlock(&obd->obd_pool_lock);
+ read_unlock(&obd->obd_pool_lock);
RETURN(0);
}
rs->rs_export = exp;
rs->rs_opc = lustre_msg_get_opc(req->rq_reqmsg);
- cfs_spin_lock(&exp->exp_uncommitted_replies_lock);
- CDEBUG(D_NET, "rs transno = "LPU64", last committed = "LPU64"\n",
- rs->rs_transno, exp->exp_last_committed);
- if (rs->rs_transno > exp->exp_last_committed) {
- /* not committed already */
- cfs_list_add_tail(&rs->rs_obd_list,
- &exp->exp_uncommitted_replies);
- }
- cfs_spin_unlock (&exp->exp_uncommitted_replies_lock);
+ spin_lock(&exp->exp_uncommitted_replies_lock);
+ CDEBUG(D_NET, "rs transno = "LPU64", last committed = "LPU64"\n",
+ rs->rs_transno, exp->exp_last_committed);
+ if (rs->rs_transno > exp->exp_last_committed) {
+ /* not committed already */
+ cfs_list_add_tail(&rs->rs_obd_list,
+ &exp->exp_uncommitted_replies);
+ }
+ spin_unlock(&exp->exp_uncommitted_replies_lock);
- cfs_spin_lock(&exp->exp_lock);
- cfs_list_add_tail(&rs->rs_exp_list, &exp->exp_outstanding_replies);
- cfs_spin_unlock(&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
+ cfs_list_add_tail(&rs->rs_exp_list, &exp->exp_outstanding_replies);
+ spin_unlock(&exp->exp_lock);
- netrc = target_send_reply_msg (req, rc, fail_id);
+ netrc = target_send_reply_msg(req, rc, fail_id);
- cfs_spin_lock(&svcpt->scp_rep_lock);
+ spin_lock(&svcpt->scp_rep_lock);
cfs_atomic_inc(&svcpt->scp_nreps_difficult);
ptlrpc_rs_addref(rs);
}
- cfs_spin_lock(&rs->rs_lock);
- if (rs->rs_transno <= exp->exp_last_committed ||
- (!rs->rs_on_net && !rs->rs_no_ack) ||
- cfs_list_empty(&rs->rs_exp_list) || /* completed already */
- cfs_list_empty(&rs->rs_obd_list)) {
- CDEBUG(D_HA, "Schedule reply immediately\n");
- ptlrpc_dispatch_difficult_reply(rs);
- } else {
+ spin_lock(&rs->rs_lock);
+ if (rs->rs_transno <= exp->exp_last_committed ||
+ (!rs->rs_on_net && !rs->rs_no_ack) ||
+ cfs_list_empty(&rs->rs_exp_list) || /* completed already */
+ cfs_list_empty(&rs->rs_obd_list)) {
+ CDEBUG(D_HA, "Schedule reply immediately\n");
+ ptlrpc_dispatch_difficult_reply(rs);
+ } else {
cfs_list_add(&rs->rs_list, &svcpt->scp_rep_active);
rs->rs_scheduled = 0; /* allow notifier to schedule */
}
- cfs_spin_unlock(&rs->rs_lock);
- cfs_spin_unlock(&svcpt->scp_rep_lock);
+ spin_unlock(&rs->rs_lock);
+ spin_unlock(&svcpt->scp_rep_lock);
EXIT;
}
EXPORT_SYMBOL(target_send_reply);
#if LUSTRE_TRACKS_LOCK_EXP_REFS
void ldlm_dump_export_locks(struct obd_export *exp)
{
- cfs_spin_lock(&exp->exp_locks_list_guard);
- if (!cfs_list_empty(&exp->exp_locks_list)) {
- struct ldlm_lock *lock;
-
- CERROR("dumping locks for export %p,"
- "ignore if the unmount doesn't hang\n", exp);
- cfs_list_for_each_entry(lock, &exp->exp_locks_list, l_exp_refs_link)
- LDLM_ERROR(lock, "lock:");
- }
- cfs_spin_unlock(&exp->exp_locks_list_guard);
+ spin_lock(&exp->exp_locks_list_guard);
+ if (!cfs_list_empty(&exp->exp_locks_list)) {
+ struct ldlm_lock *lock;
+
+ CERROR("dumping locks for export %p,"
+ "ignore if the unmount doesn't hang\n", exp);
+ cfs_list_for_each_entry(lock, &exp->exp_locks_list,
+ l_exp_refs_link)
+ LDLM_ERROR(lock, "lock:");
+ }
+ spin_unlock(&exp->exp_locks_list_guard);
}
#endif
int rc = 0;
ENTRY;
- /* Check if there is eviction in progress, and if so, wait for
- * it to finish */
+ /* If there is eviction in progress, wait for it to finish. */
if (unlikely(cfs_atomic_read(&exp->exp_obd->obd_evict_inprogress))) {
*lwi = LWI_INTR(NULL, NULL);
rc = l_wait_event(exp->exp_obd->obd_evict_inprogress_waitq,
lwi);
}
- /* Check if client was evicted or tried to reconnect already */
+ /* Check if client was evicted or tried to reconnect already. */
if (exp->exp_failed || exp->exp_abort_active_req) {
rc = -ENOTCONN;
} else {
exp->exp_abort_active_req,
lwi);
LASSERT(rc == 0 || rc == -ETIMEDOUT);
- /* Wait again if we changed deadline */
+ /* Wait again if we changed deadline. */
} while ((rc == -ETIMEDOUT) &&
(req->rq_deadline > cfs_time_current_sec()));
} else if (exp->exp_abort_active_req) {
DEBUG_REQ(D_ERROR, req, "Reconnect on bulk %s",
bulk2type(desc));
- /* we don't reply anyway */
+ /* We don't reply anyway. */
rc = -ETIMEDOUT;
ptlrpc_abort_bulk(desc);
} else if (!desc->bd_success ||
bulk2type(desc),
desc->bd_nob_transferred,
desc->bd_nob);
- /* XXX should this be a different errno? */
+ /* XXX Should this be a different errno? */
rc = -ETIMEDOUT;
} else if (desc->bd_type == BULK_GET_SINK) {
rc = sptlrpc_svc_unwrap_bulk(req, desc);