X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fldlm%2Fldlm_lib.c;h=b0eea4758c492244cd799324d4f64fe6e8e6662c;hp=30d2fd5f196e2847fcd463976375a3c8ea67eb0c;hb=14a1102268941d851ef5ef793923e39081b81ff4;hpb=fa9c4d0fee01d30d538a819f370f281431a43f68 diff --git a/lustre/ldlm/ldlm_lib.c b/lustre/ldlm/ldlm_lib.c index 30d2fd5..b0eea47 100644 --- a/lustre/ldlm/ldlm_lib.c +++ b/lustre/ldlm/ldlm_lib.c @@ -141,6 +141,51 @@ int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid, } EXPORT_SYMBOL(client_import_add_conn); +int client_import_dyn_add_conn(struct obd_import *imp, struct obd_uuid *uuid, + lnet_nid_t prim_nid, int priority) +{ + struct ptlrpc_connection *ptlrpc_conn; + int rc; + + ptlrpc_conn = ptlrpc_uuid_to_connection(uuid, prim_nid); + if (!ptlrpc_conn) { + const char *str_uuid = obd_uuid2str(uuid); + + rc = class_add_uuid(str_uuid, prim_nid); + if (rc) { + CERROR("%s: failed to add UUID '%s': rc = %d\n", + imp->imp_obd->obd_name, str_uuid, rc); + return rc; + } + } + return import_set_conn(imp, uuid, priority, 1); +} +EXPORT_SYMBOL(client_import_dyn_add_conn); + +int client_import_add_nids_to_conn(struct obd_import *imp, lnet_nid_t *nids, + int nid_count, struct obd_uuid *uuid) +{ + struct obd_import_conn *conn; + int rc = -ENOENT; + + ENTRY; + if (nid_count <= 0 || !nids) + return rc; + + spin_lock(&imp->imp_lock); + list_for_each_entry(conn, &imp->imp_conn_list, oic_item) { + if (class_check_uuid(&conn->oic_uuid, nids[0])) { + *uuid = conn->oic_uuid; + rc = class_add_nids_to_uuid(&conn->oic_uuid, nids, + nid_count); + break; + } + } + spin_unlock(&imp->imp_lock); + RETURN(rc); +} +EXPORT_SYMBOL(client_import_add_nids_to_conn); + int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid) { struct obd_import_conn *imp_conn; @@ -271,13 +316,13 @@ static int osc_on_mdt(char *obdname) * 3 - inactive-on-startup * 4 - restrictive net */ -int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) +int client_obd_setup(struct obd_device *obd, struct lustre_cfg *lcfg) { - struct client_obd *cli = &obddev->u.cli; + struct client_obd *cli = &obd->u.cli; struct obd_import *imp; struct obd_uuid server_uuid; int rq_portal, rp_portal, connect_op; - const char *name = obddev->obd_type->typ_name; + const char *name = obd->obd_type->typ_name; enum ldlm_ns_type ns_type = LDLM_NS_TYPE_UNKNOWN; char *cli_name = lustre_cfg_buf(lcfg, 0); struct ptlrpc_connection fake_conn = { .c_self = 0, @@ -378,7 +423,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) * ptlrpc_connect_interpret(). */ client_adjust_max_dirty(cli); - INIT_LIST_HEAD(&cli->cl_cache_waiters); + init_waitqueue_head(&cli->cl_cache_waiters); INIT_LIST_HEAD(&cli->cl_loi_ready_list); INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list); INIT_LIST_HEAD(&cli->cl_loi_write_list); @@ -434,7 +479,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) */ cli->cl_max_pages_per_rpc = PTLRPC_MAX_BRW_PAGES; - cli->cl_max_short_io_bytes = OBD_MAX_SHORT_IO_BYTES; + cli->cl_max_short_io_bytes = OBD_DEF_SHORT_IO_BYTES; /* * set cl_chunkbits default value to PAGE_SHIFT, @@ -451,7 +496,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) } else if (cfs_totalram_pages() >> (20 - PAGE_SHIFT) <= 512 /* MB */) { cli->cl_max_rpcs_in_flight = 4; } else { - if (osc_on_mdt(obddev->obd_name)) + if (osc_on_mdt(obd->obd_name)) cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_MAX; else cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT; @@ -482,12 +527,12 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) } ptlrpc_init_client(rq_portal, rp_portal, name, - &obddev->obd_ldlm_client); + &obd->obd_ldlm_client); - imp = class_new_import(obddev); + imp = class_new_import(obd); if (imp == NULL) GOTO(err_ldlm, rc = -ENOENT); - imp->imp_client = &obddev->obd_ldlm_client; + imp->imp_client = &obd->obd_ldlm_client; imp->imp_connect_op = connect_op; memcpy(cli->cl_target_uuid.uuid, lustre_cfg_buf(lcfg, 1), LUSTRE_CFG_BUFLEN(lcfg, 1)); @@ -496,10 +541,10 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) if (lustre_cfg_buf(lcfg, 4)) { __u32 refnet = libcfs_str2net(lustre_cfg_string(lcfg, 4)); - if (refnet == LNET_NIDNET(LNET_NID_ANY)) { + if (refnet == LNET_NET_ANY) { rc = -EINVAL; CERROR("%s: bad mount option 'network=%s': rc = %d\n", - obddev->obd_name, lustre_cfg_string(lcfg, 4), + obd->obd_name, lustre_cfg_string(lcfg, 4), rc); GOTO(err_import, rc); } @@ -521,7 +566,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) if (LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) { if (!strcmp(lustre_cfg_string(lcfg, 3), "inactive")) { CDEBUG(D_HA, "marking %s %s->%s as inactive\n", - name, obddev->obd_name, + name, obd->obd_name, cli->cl_target_uuid.uuid); spin_lock(&imp->imp_lock); imp->imp_deactive = 1; @@ -529,14 +574,16 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) } } - obddev->obd_namespace = ldlm_namespace_new(obddev, obddev->obd_name, - LDLM_NAMESPACE_CLIENT, - LDLM_NAMESPACE_GREEDY, - ns_type); - if (obddev->obd_namespace == NULL) { - CERROR("Unable to create client namespace - %s\n", - obddev->obd_name); - GOTO(err_import, rc = -ENOMEM); + obd->obd_namespace = ldlm_namespace_new(obd, obd->obd_name, + LDLM_NAMESPACE_CLIENT, + LDLM_NAMESPACE_GREEDY, + ns_type); + if (IS_ERR(obd->obd_namespace)) { + rc = PTR_ERR(obd->obd_namespace); + CERROR("%s: unable to create client namespace: rc = %d\n", + obd->obd_name, rc); + obd->obd_namespace = NULL; + GOTO(err_import, rc); } RETURN(rc); @@ -550,22 +597,22 @@ err: OBD_FREE(cli->cl_mod_tag_bitmap, BITS_TO_LONGS(OBD_MAX_RIF_MAX) * sizeof(long)); cli->cl_mod_tag_bitmap = NULL; - RETURN(rc); + RETURN(rc); } EXPORT_SYMBOL(client_obd_setup); -int client_obd_cleanup(struct obd_device *obddev) +int client_obd_cleanup(struct obd_device *obd) { - struct client_obd *cli = &obddev->u.cli; + struct client_obd *cli = &obd->u.cli; ENTRY; - ldlm_namespace_free_post(obddev->obd_namespace); - obddev->obd_namespace = NULL; + ldlm_namespace_free_post(obd->obd_namespace); + obd->obd_namespace = NULL; - obd_cleanup_client_import(obddev); - LASSERT(obddev->u.cli.cl_import == NULL); + obd_cleanup_client_import(obd); + LASSERT(obd->u.cli.cl_import == NULL); ldlm_put_ref(); @@ -787,12 +834,37 @@ int server_disconnect_export(struct obd_export *exp) } EXPORT_SYMBOL(server_disconnect_export); +static inline int target_check_recovery_timer(struct obd_device *target) +{ + ktime_t remaining; + s64 timeout; + + if (!target->obd_recovering || target->obd_recovery_start == 0) + return 0; + + remaining = hrtimer_get_remaining(&target->obd_recovery_timer); + timeout = ktime_divns(remaining, NSEC_PER_SEC); + if (timeout > -30) + return 0; + + /* the recovery timer should expire, but it isn't triggered, + * it's better to abort the recovery of this target to speed up + * the recovery of the whole cluster. */ + spin_lock(&target->obd_dev_lock); + if (target->obd_recovering) { + CERROR("%s: Aborting recovery\n", target->obd_name); + target->obd_abort_recovery = 1; + wake_up(&target->obd_next_transno_waitq); + } + spin_unlock(&target->obd_dev_lock); + return 0; +} + /* * -------------------------------------------------------------------------- * from old lib/target.c * -------------------------------------------------------------------------- */ - static int target_handle_reconnect(struct lustre_handle *conn, struct obd_export *exp, struct obd_uuid *cluuid) @@ -838,7 +910,7 @@ static int target_handle_reconnect(struct lustre_handle *conn, GOTO(out_already, rc); } - remaining = hrtimer_expires_remaining(&target->obd_recovery_timer); + remaining = hrtimer_get_remaining(&target->obd_recovery_timer); timeout = ktime_divns(remaining, NSEC_PER_SEC); if (timeout > 0) { LCONSOLE_WARN("%s: Client %s (at %s) reconnected, waiting for %d clients in recovery for %lld:%.02lld\n", @@ -853,6 +925,8 @@ static int target_handle_reconnect(struct lustre_handle *conn, int count = 0; char *buf = NULL; + target_check_recovery_timer(target); + tdtd = class_exp2tgt(exp)->lut_tdtd; if (tdtd && tdtd->tdtd_show_update_logs_retrievers) buf = tdtd->tdtd_show_update_logs_retrievers( @@ -970,8 +1044,7 @@ static int rev_import_reconnect(struct obd_export *exp, /* avoid sending a request until import flags are changed */ ptlrpc_import_enter_resend(revimp); - if (revimp->imp_connection != NULL) - ptlrpc_connection_put(revimp->imp_connection); + ptlrpc_connection_put(revimp->imp_connection); /* * client from recovery don't have a handle so we need to take from @@ -1138,8 +1211,8 @@ int target_handle_connect(struct ptlrpc_request *req) OBD_CONNECT_MDS_MDS) != 0; /* - * OBD_CONNECT_MNE_SWAB is defined as OBD_CONNECT_MDS_MDS - * for Imperative Recovery connection from MGC to MGS. + * OBD_CONNECT_MNE_SWAB is removed at 2.14 + * Checking OBD_CONNECT_FID can be removed in the future. * * Via check OBD_CONNECT_FID, we can distinguish whether * the OBD_CONNECT_MDS_MDS/OBD_CONNECT_MNE_SWAB is from @@ -1176,7 +1249,7 @@ int target_handle_connect(struct ptlrpc_request *req) if (obd_uuid_equals(&cluuid, &target->obd_uuid)) goto dont_check_exports; - export = cfs_hash_lookup(target->obd_uuid_hash, &cluuid); + export = obd_uuid_lookup(target, &cluuid); if (!export) goto no_export; @@ -1244,6 +1317,10 @@ int target_handle_connect(struct ptlrpc_request *req) rc = -EALREADY; class_export_put(export); export = NULL; + } else if (OBD_FAIL_PRECHECK(OBD_FAIL_TGT_RECOVERY_CONNECT) && + !lw_client) { + spin_unlock(&export->exp_lock); + rc = -EAGAIN; } else { export->exp_connecting = 1; spin_unlock(&export->exp_lock); @@ -1261,12 +1338,12 @@ no_export: LCONSOLE_WARN("%s: Client %s (at %s) refused connection, still busy with %d references\n", target->obd_name, cluuid.uuid, libcfs_nid2str(req->rq_peer.nid), - atomic_read(&export->exp_refcount)); + refcount_read(&export->exp_handle.h_ref)); GOTO(out, rc = -EBUSY); } else if (lustre_msg_get_conn_cnt(req->rq_reqmsg) == 1 && rc != EALREADY) { if (!strstr(cluuid.uuid, "mdt")) - LCONSOLE_WARN("%s: Rejecting reconnect from the known client %s (at %s) because it is indicating it is a new client", + LCONSOLE_WARN("%s: Rejecting reconnect from the known client %s (at %s) because it is indicating it is a new client\n", target->obd_name, cluuid.uuid, libcfs_nid2str(req->rq_peer.nid)); GOTO(out, rc = -EALREADY); @@ -1327,7 +1404,7 @@ no_export: known = atomic_read(&target->obd_max_recoverable_clients); stale = target->obd_stale_clients; - remaining = hrtimer_expires_remaining(timer); + remaining = hrtimer_get_remaining(timer); left = ktime_divns(remaining, NSEC_PER_SEC); if (ktime_to_ns(remaining) > 0) { @@ -1336,6 +1413,8 @@ no_export: } else { msg = "already passed deadline"; timeout = -left; + + target_check_recovery_timer(target); } LCONSOLE_WARN("%s: Denying connection for new client %s (at %s), waiting for %d known clients (%d recovered, %d in progress, and %d evicted) %s %lld:%.02lld\n", @@ -1414,11 +1493,8 @@ dont_check_exports: if (export->exp_connection != NULL) { /* Check to see if connection came from another NID. */ - if ((export->exp_connection->c_peer.nid != req->rq_peer.nid) && - !hlist_unhashed(&export->exp_nid_hash)) - cfs_hash_del(export->exp_obd->obd_nid_hash, - &export->exp_connection->c_peer.nid, - &export->exp_nid_hash); + if (export->exp_connection->c_peer.nid != req->rq_peer.nid) + obd_nid_del(export->exp_obd, export); ptlrpc_connection_put(export->exp_connection); } @@ -1426,10 +1502,10 @@ dont_check_exports: export->exp_connection = ptlrpc_connection_get(req->rq_peer, req->rq_self, &cluuid); - if (hlist_unhashed(&export->exp_nid_hash)) - cfs_hash_add(export->exp_obd->obd_nid_hash, - &export->exp_connection->c_peer.nid, - &export->exp_nid_hash); + if (!export->exp_connection) + GOTO(out, rc = -ENOTCONN); + + obd_nid_add(export->exp_obd, export); lustre_msg_set_handle(req->rq_repmsg, &conn); @@ -1520,6 +1596,18 @@ int target_handle_disconnect(struct ptlrpc_request *req) if (rc) RETURN(rc); + /* In case of target disconnect, updating sec ctx immediately is + * required in order to record latest sequence number used. + * Sequence is normally updated on export destroy, but this event + * can occur too late, ie after a new target connect request has + * been processed. + * Maintaining correct sequence when client connection becomes idle + * ensures that GSS does not erroneously consider requests as replays. + */ + rc = sptlrpc_export_update_ctx(req->rq_export); + if (rc) + RETURN(rc); + /* Keep the rq_export around so we can send the reply. */ req->rq_status = obd_disconnect(class_export_get(req->rq_export)); @@ -1643,8 +1731,8 @@ static void target_finish_recovery(struct lu_target *lut) elapsed_time = max_t(time64_t, now - obd->obd_recovery_start, 1); LCONSOLE_INFO("%s: Recovery over after %lld:%.02lld, of %d clients %d recovered and %d %s evicted.\n", - obd->obd_name, (s64)elapsed_time / 60, - (s64)elapsed_time % 60, + obd->obd_name, elapsed_time / 60, + elapsed_time % 60, atomic_read(&obd->obd_max_recoverable_clients), atomic_read(&obd->obd_connected_clients), obd->obd_stale_clients, @@ -1684,9 +1772,8 @@ static void target_finish_recovery(struct lu_target *lut) static void abort_req_replay_queue(struct obd_device *obd) { struct ptlrpc_request *req, *n; - struct list_head abort_list; + LIST_HEAD(abort_list); - INIT_LIST_HEAD(&abort_list); spin_lock(&obd->obd_recovery_task_lock); list_splice_init(&obd->obd_req_replay_queue, &abort_list); spin_unlock(&obd->obd_recovery_task_lock); @@ -1705,9 +1792,8 @@ static void abort_req_replay_queue(struct obd_device *obd) static void abort_lock_replay_queue(struct obd_device *obd) { struct ptlrpc_request *req, *n; - struct list_head abort_list; + LIST_HEAD(abort_list); - INIT_LIST_HEAD(&abort_list); spin_lock(&obd->obd_recovery_task_lock); list_splice_init(&obd->obd_lock_replay_queue, &abort_list); spin_unlock(&obd->obd_recovery_task_lock); @@ -1734,9 +1820,8 @@ static void abort_lock_replay_queue(struct obd_device *obd) void target_cleanup_recovery(struct obd_device *obd) { struct ptlrpc_request *req, *n; - struct list_head clean_list; + LIST_HEAD(clean_list); - INIT_LIST_HEAD(&clean_list); spin_lock(&obd->obd_dev_lock); if (!obd->obd_recovering) { spin_unlock(&obd->obd_dev_lock); @@ -1804,7 +1889,7 @@ static void target_start_recovery_timer(struct obd_device *obd) hrtimer_start(&obd->obd_recovery_timer, delay, HRTIMER_MODE_ABS); spin_unlock(&obd->obd_dev_lock); - LCONSOLE_WARN("%s: Will be in recovery for at least %lu:%02lu, or until %d client%s reconnect%s\n", + LCONSOLE_WARN("%s: Will be in recovery for at least %u:%02u, or until %d client%s reconnect%s\n", obd->obd_name, obd->obd_recovery_timeout / 60, obd->obd_recovery_timeout % 60, @@ -1822,21 +1907,22 @@ static void target_start_recovery_timer(struct obd_device *obd) * at least; otherwise, make sure the recovery timeout value is not less * than @dr_timeout. */ -static void extend_recovery_timer(struct obd_device *obd, time_t dr_timeout, +static void extend_recovery_timer(struct obd_device *obd, timeout_t dr_timeout, bool extend) { ktime_t left_ns; - time_t timeout; - time_t left; + timeout_t timeout; + timeout_t left; spin_lock(&obd->obd_dev_lock); - if (!obd->obd_recovering || obd->obd_abort_recovery) { + if (!obd->obd_recovering || obd->obd_abort_recovery || + obd->obd_stopping) { spin_unlock(&obd->obd_dev_lock); return; } LASSERT(obd->obd_recovery_start != 0); - left_ns = hrtimer_expires_remaining(&obd->obd_recovery_timer); + left_ns = hrtimer_get_remaining(&obd->obd_recovery_timer); left = ktime_divns(left_ns, NSEC_PER_SEC); if (extend) { @@ -1847,16 +1933,17 @@ static void extend_recovery_timer(struct obd_device *obd, time_t dr_timeout, */ if (dr_timeout > left) { timeout += dr_timeout - left; - timeout = min_t(time_t, obd->obd_recovery_time_hard, + timeout = min_t(timeout_t, obd->obd_recovery_time_hard, timeout); } } else { - timeout = clamp_t(time_t, dr_timeout, obd->obd_recovery_timeout, + timeout = clamp_t(timeout_t, dr_timeout, + obd->obd_recovery_timeout, obd->obd_recovery_time_hard); } if (timeout == obd->obd_recovery_time_hard) - CWARN("%s: extended recovery timer reached hard limit: %ld, extend: %d\n", + CWARN("%s: extended recovery timer reached hard limit: %d, extend: %d\n", obd->obd_name, timeout, extend); if (obd->obd_recovery_timeout < timeout) { @@ -1871,7 +1958,7 @@ static void extend_recovery_timer(struct obd_device *obd, time_t dr_timeout, } spin_unlock(&obd->obd_dev_lock); - CDEBUG(D_HA, "%s: recovery timer will expire in %ld seconds\n", + CDEBUG(D_HA, "%s: recovery timer will expire in %d seconds\n", obd->obd_name, left); } @@ -1890,40 +1977,40 @@ check_and_start_recovery_timer(struct obd_device *obd, struct ptlrpc_request *req, int new_client) { - time_t service_time = lustre_msg_get_service_time(req->rq_reqmsg); + timeout_t service_timeout = lustre_msg_get_service_timeout(req->rq_reqmsg); struct obd_device_target *obt = &obd->u.obt; - if (!new_client && service_time) + if (!new_client && service_timeout) /* * Teach server about old server's estimates, as first guess * at how long new requests will take. */ at_measured(&req->rq_rqbd->rqbd_svcpt->scp_at_estimate, - service_time); + service_timeout); target_start_recovery_timer(obd); /* * Convert the service time to RPC timeout, - * and reuse service_time to limit stack usage. + * and reuse service_timeout to limit stack usage. */ - service_time = at_est2timeout(service_time); + service_timeout = at_est2timeout(service_timeout); if (OBD_FAIL_CHECK(OBD_FAIL_TGT_SLUGGISH_NET) && - service_time < at_extra) - service_time = at_extra; + service_timeout < at_extra) + service_timeout = at_extra; /* - * We expect other clients to timeout within service_time, then try + * We expect other clients to timeout within service_timeout, then try * to reconnect, then try the failover server. The max delay between * connect attempts is SWITCH_MAX + SWITCH_INC + INITIAL. */ - service_time += 2 * INITIAL_CONNECT_TIMEOUT; + service_timeout += 2 * INITIAL_CONNECT_TIMEOUT; LASSERT(obt->obt_magic == OBT_MAGIC); - service_time += 2 * (CONNECTION_SWITCH_MAX + CONNECTION_SWITCH_INC); - if (service_time > obd->obd_recovery_timeout && !new_client) - extend_recovery_timer(obd, service_time, false); + service_timeout += 2 * (CONNECTION_SWITCH_MAX + CONNECTION_SWITCH_INC); + if (service_timeout > obd->obd_recovery_timeout && !new_client) + extend_recovery_timer(obd, service_timeout, false); } /** Health checking routines */ @@ -1988,7 +2075,7 @@ static int check_for_next_transno(struct lu_target *lut) req_transno = lustre_msg_get_transno(req->rq_reqmsg); } - if (tdtd != NULL) + if (!obd->obd_abort_recov_mdt && tdtd) update_transno = distribute_txn_get_next_transno(tdtd); connected = atomic_read(&obd->obd_connected_clients); @@ -2008,7 +2095,7 @@ static int check_for_next_transno(struct lu_target *lut) } else if (obd->obd_recovery_expired) { CDEBUG(D_HA, "waking for expired recovery\n"); wake_up = 1; - } else if (tdtd != NULL && req != NULL && + } else if (!obd->obd_abort_recov_mdt && tdtd && req && is_req_replayed_by_update(req)) { LASSERTF(req_transno < next_transno, "req_transno %llu next_transno%llu\n", req_transno, @@ -2071,6 +2158,24 @@ static int check_for_next_lock(struct lu_target *lut) return wake_up; } +static int check_update_llog(struct lu_target *lut) +{ + struct obd_device *obd = lut->lut_obd; + struct target_distribute_txn_data *tdtd = lut->lut_tdtd; + + if (obd->obd_abort_recovery) { + CDEBUG(D_HA, "waking for aborted recovery\n"); + return 1; + } + + if (atomic_read(&tdtd->tdtd_recovery_threads_count) == 0) { + CDEBUG(D_HA, "waking for completion of reading update log\n"); + return 1; + } + + return 0; +} + /** * wait for recovery events, * check its status with help of check_routine @@ -2102,7 +2207,7 @@ repeat: * left in the queue */ spin_lock(&obd->obd_recovery_task_lock); - if (lut->lut_tdtd != NULL) { + if (!obd->obd_abort_recov_mdt && lut->lut_tdtd) { next_update_transno = distribute_txn_get_next_transno(lut->lut_tdtd); @@ -2113,14 +2218,16 @@ repeat: * yet, let's wait those threads stopped */ if (next_update_transno == 0) { - wait_event_idle( + spin_unlock(&obd->obd_recovery_task_lock); + + while (wait_event_timeout( tdtd->tdtd_recovery_threads_waitq, - atomic_read(&tdtd->tdtd_recovery_threads_count) - == 0); + check_update_llog(lut), + cfs_time_seconds(60)) == 0); + spin_lock(&obd->obd_recovery_task_lock); next_update_transno = - distribute_txn_get_next_transno( - lut->lut_tdtd); + distribute_txn_get_next_transno(tdtd); } } @@ -2268,16 +2375,19 @@ static void handle_recovery_req(struct ptlrpc_thread *thread, (void)handler(req); lu_context_exit(&thread->t_env->le_ctx); + req->rq_svc_thread->t_env->le_ses = NULL; + /* don't reset timer for final stage */ if (!exp_finished(req->rq_export)) { - time_t to = obd_timeout; + timeout_t timeout = obd_timeout; /** - * Add request timeout to the recovery time so next request from + * Add request @timeout to the recovery time so next request from * this client may come in recovery time */ if (!AT_OFF) { struct ptlrpc_service_part *svcpt; + timeout_t est_timeout; svcpt = req->rq_rqbd->rqbd_svcpt; /* @@ -2287,18 +2397,19 @@ static void handle_recovery_req(struct ptlrpc_thread *thread, * use the maxium timeout here for waiting the client * sending the next req */ - to = max_t(time_t, - at_est2timeout(at_get(&svcpt->scp_at_estimate)), - lustre_msg_get_timeout(req->rq_reqmsg)); + est_timeout = at_get(&svcpt->scp_at_estimate); + timeout = max_t(timeout_t, at_est2timeout(est_timeout), + lustre_msg_get_timeout(req->rq_reqmsg)); /* * Add 2 net_latency, one for balance rq_deadline * (see ptl_send_rpc), one for resend the req to server, * Note: client will pack net_latency in replay req * (see ptlrpc_replay_req) */ - to += 2 * lustre_msg_get_service_time(req->rq_reqmsg); + timeout += 2 * lustre_msg_get_service_timeout(req->rq_reqmsg); } - extend_recovery_timer(class_exp2obd(req->rq_export), to, true); + extend_recovery_timer(class_exp2obd(req->rq_export), timeout, + true); } EXIT; } @@ -2323,9 +2434,9 @@ static int check_for_recovery_ready(struct lu_target *lut) return 0; } - if (lut->lut_tdtd != NULL) { + if (!obd->obd_abort_recov_mdt && lut->lut_tdtd != NULL) { if (!lut->lut_tdtd->tdtd_replay_ready && - !obd->obd_abort_recovery) { + !obd->obd_abort_recovery && !obd->obd_stopping) { /* * Let's extend recovery timer, in case the recovery * timer expired, and some clients got evicted @@ -2333,7 +2444,7 @@ static int check_for_recovery_ready(struct lu_target *lut) extend_recovery_timer(obd, obd->obd_recovery_timeout, true); CDEBUG(D_HA, - "%s update recovery is not ready, extend recovery %lu\n", + "%s update recovery is not ready, extend recovery %d\n", obd->obd_name, obd->obd_recovery_timeout); return 0; } @@ -2375,7 +2486,7 @@ static __u64 get_next_transno(struct lu_target *lut, int *type) if (type != NULL) *type = REQUEST_RECOVERY; - if (tdtd == NULL) + if (!tdtd || obd->obd_abort_recov_mdt) RETURN(transno); update_transno = distribute_txn_get_next_transno(tdtd); @@ -2429,6 +2540,8 @@ static void drop_duplicate_replay_req(struct lu_env *env, obd->obd_replayed_requests++; } +#define WATCHDOG_TIMEOUT (obd_timeout * 10) + static void replay_request_or_update(struct lu_env *env, struct lu_target *lut, struct target_recovery_data *trd, @@ -2500,14 +2613,18 @@ static void replay_request_or_update(struct lu_env *env, continue; } - LASSERT(trd->trd_processing_task == current_pid()); + LASSERT(trd->trd_processing_task == current->pid); DEBUG_REQ(D_HA, req, "processing x%llu t%lld from %s", req->rq_xid, lustre_msg_get_transno(req->rq_reqmsg), libcfs_nid2str(req->rq_peer.nid)); + ptlrpc_watchdog_init(&thread->t_watchdog, + WATCHDOG_TIMEOUT); handle_recovery_req(thread, req, trd->trd_recovery_handler); + ptlrpc_watchdog_disable(&thread->t_watchdog); + /** * bz18031: increase next_recovery_transno before * target_request_copy_put() will drop exp_rpc reference @@ -2527,7 +2644,10 @@ static void replay_request_or_update(struct lu_env *env, LASSERT(tdtd != NULL); dtrq = distribute_txn_get_next_req(tdtd); lu_context_enter(&thread->t_env->le_ctx); + ptlrpc_watchdog_init(&thread->t_watchdog, + WATCHDOG_TIMEOUT); rc = tdtd->tdtd_replay_handler(env, tdtd, dtrq); + ptlrpc_watchdog_disable(&thread->t_watchdog); lu_context_exit(&thread->t_env->le_ctx); extend_recovery_timer(obd, obd_timeout, true); @@ -2578,8 +2698,6 @@ static int target_recovery_thread(void *arg) int rc = 0; ENTRY; - - unshare_fs_struct(); OBD_ALLOC_PTR(thread); if (thread == NULL) RETURN(-ENOMEM); @@ -2597,12 +2715,13 @@ static int target_recovery_thread(void *arg) thread->t_env = env; thread->t_id = -1; /* force filter_iobuf_get/put to use local buffers */ + thread->t_task = current; env->le_ctx.lc_thread = thread; tgt_io_thread_init(thread); /* init thread_big_cache for IO requests */ CDEBUG(D_HA, "%s: started recovery thread pid %d\n", obd->obd_name, - current_pid()); - trd->trd_processing_task = current_pid(); + current->pid); + trd->trd_processing_task = current->pid; spin_lock(&obd->obd_dev_lock); obd->obd_recovering = 1; @@ -2631,9 +2750,14 @@ static int target_recovery_thread(void *arg) CDEBUG(D_INFO, "2: lock replay stage - %d clients\n", atomic_read(&obd->obd_lock_replay_clients)); while ((req = target_next_replay_lock(lut))) { - LASSERT(trd->trd_processing_task == current_pid()); + LASSERT(trd->trd_processing_task == current->pid); DEBUG_REQ(D_HA, req, "processing lock from %s:", libcfs_nid2str(req->rq_peer.nid)); + if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_LOCK_REPLAY)) { + req->rq_status = -ENODEV; + target_request_copy_put(req); + continue; + } handle_recovery_req(thread, req, trd->trd_recovery_handler); target_request_copy_put(req); @@ -2659,7 +2783,7 @@ static int target_recovery_thread(void *arg) target_cancel_recovery_timer(obd); spin_unlock(&obd->obd_recovery_task_lock); while ((req = target_next_final_ping(obd))) { - LASSERT(trd->trd_processing_task == current_pid()); + LASSERT(trd->trd_processing_task == current->pid); DEBUG_REQ(D_HA, req, "processing final ping from %s:", libcfs_nid2str(req->rq_peer.nid)); handle_recovery_req(thread, req, @@ -2756,7 +2880,7 @@ static enum hrtimer_restart target_recovery_expired(struct hrtimer *timer) CDEBUG(D_HA, "%s: recovery timed out; %d clients are still in recovery after %llu seconds (%d clients connected)\n", obd->obd_name, atomic_read(&obd->obd_lock_replay_clients), - ktime_get_real_seconds() - obd->obd_recovery_start, + ktime_get_seconds() - obd->obd_recovery_start, atomic_read(&obd->obd_connected_clients)); obd->obd_recovery_expired = 1; @@ -2840,7 +2964,7 @@ int target_queue_recovery_request(struct ptlrpc_request *req, ENTRY; - if (obd->obd_recovery_data.trd_processing_task == current_pid()) { + if (obd->obd_recovery_data.trd_processing_task == current->pid) { /* Processing the queue right now, don't re-add. */ RETURN(1); } @@ -2854,8 +2978,8 @@ int target_queue_recovery_request(struct ptlrpc_request *req, cfs_fail_val = 0; wake_up(&cfs_race_waitq); - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_interruptible( + cfs_time_seconds(1)); } } @@ -3296,16 +3420,6 @@ void ldlm_dump_export_locks(struct obd_export *exp) #endif #ifdef HAVE_SERVER_SUPPORT -static int target_bulk_timeout(void *data) -{ - ENTRY; - /* - * We don't fail the connection here, because having the export - * killed makes the (vital) call to commitrw very sad. - */ - RETURN(1); -} - static inline const char *bulk2type(struct ptlrpc_request *req) { if (req->rq_bulk_read) @@ -3320,7 +3434,6 @@ int target_bulk_io(struct obd_export *exp, struct ptlrpc_bulk_desc *desc) struct ptlrpc_request *req = desc->bd_req; time64_t start = ktime_get_seconds(); time64_t deadline; - struct l_wait_info lwi; int rc = 0; ENTRY; @@ -3365,20 +3478,19 @@ int target_bulk_io(struct obd_export *exp, struct ptlrpc_bulk_desc *desc) do { time64_t timeoutl = deadline - ktime_get_seconds(); - long timeout_jiffies = timeoutl <= 0 ? - 1 : cfs_time_seconds(timeoutl); time64_t rq_deadline; - lwi = LWI_TIMEOUT_INTERVAL(timeout_jiffies, - cfs_time_seconds(1), - target_bulk_timeout, desc); - rc = l_wait_event(desc->bd_waitq, - !ptlrpc_server_bulk_active(desc) || - exp->exp_failed || - exp->exp_conn_cnt > - lustre_msg_get_conn_cnt(req->rq_reqmsg), - &lwi); - LASSERT(rc == 0 || rc == -ETIMEDOUT); + while (timeoutl >= 0 && + wait_event_idle_timeout( + desc->bd_waitq, + !ptlrpc_server_bulk_active(desc) || + exp->exp_failed || + exp->exp_conn_cnt > + lustre_msg_get_conn_cnt(req->rq_reqmsg), + timeoutl ? cfs_time_seconds(1) : 1) == 0) + timeoutl -= 1; + rc = timeoutl < 0 ? -ETIMEDOUT : 0; + /* Wait again if we changed rq_deadline. */ rq_deadline = READ_ONCE(req->rq_deadline); deadline = start + bulk_timeout;