X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ffld%2Ffld_request.c;h=a59ab95693acd2502058d34bd0bde1849c3cd9e2;hp=cde98e2a6704705b6437c827ce555a8afe4bb0de;hb=b046468f58a1f40e85cb59ed9abf75fd2fd5ea5a;hpb=9fb46705ae86aa2c0ac29427f0ff24f923560eb7 diff --git a/lustre/fld/fld_request.c b/lustre/fld/fld_request.c index cde98e2..a59ab95 100644 --- a/lustre/fld/fld_request.c +++ b/lustre/fld/fld_request.c @@ -27,7 +27,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, Whamcloud, Inc. + * Copyright (c) 2011, 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -42,106 +42,61 @@ #define DEBUG_SUBSYSTEM S_FLD -#ifdef __KERNEL__ -# include -# include -# include -# include -#else /* __KERNEL__ */ -# include -# include -#endif +#include +#include +#include #include #include -#include #include #include - -#include -#include #include #include #include #include "fld_internal.h" -/* TODO: these 3 functions are copies of flow-control code from mdc_lib.c - * It should be common thing. The same about mdc RPC lock */ -static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw) -{ - int rc; - ENTRY; - client_obd_list_lock(&cli->cl_loi_list_lock); - rc = cfs_list_empty(&mcw->mcw_entry); - client_obd_list_unlock(&cli->cl_loi_list_lock); - RETURN(rc); -}; - -static void fld_enter_request(struct client_obd *cli) -{ - struct mdc_cache_waiter mcw; - struct l_wait_info lwi = { 0 }; - - client_obd_list_lock(&cli->cl_loi_list_lock); - if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) { - cfs_list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters); - cfs_waitq_init(&mcw.mcw_waitq); - client_obd_list_unlock(&cli->cl_loi_list_lock); - l_wait_event(mcw.mcw_waitq, fld_req_avail(cli, &mcw), &lwi); - } else { - cli->cl_r_in_flight++; - client_obd_list_unlock(&cli->cl_loi_list_lock); - } -} - -static void fld_exit_request(struct client_obd *cli) -{ - cfs_list_t *l, *tmp; - struct mdc_cache_waiter *mcw; - - client_obd_list_lock(&cli->cl_loi_list_lock); - cli->cl_r_in_flight--; - cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) { - - if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) { - /* No free request slots anymore */ - break; - } - - mcw = cfs_list_entry(l, struct mdc_cache_waiter, mcw_entry); - cfs_list_del_init(&mcw->mcw_entry); - cli->cl_r_in_flight++; - cfs_waitq_signal(&mcw->mcw_waitq); - } - client_obd_list_unlock(&cli->cl_loi_list_lock); -} - -static int fld_rrb_hash(struct lu_client_fld *fld, - seqno_t seq) +static int fld_rrb_hash(struct lu_client_fld *fld, u64 seq) { - LASSERT(fld->lcf_count > 0); - return do_div(seq, fld->lcf_count); + LASSERT(fld->lcf_count > 0); + return do_div(seq, fld->lcf_count); } static struct lu_fld_target * -fld_rrb_scan(struct lu_client_fld *fld, seqno_t seq) +fld_rrb_scan(struct lu_client_fld *fld, u64 seq) { struct lu_fld_target *target; int hash; ENTRY; - hash = fld_rrb_hash(fld, seq); - - cfs_list_for_each_entry(target, &fld->lcf_targets, ft_chain) { + /* Because almost all of special sequence located in MDT0, + * it should go to index 0 directly, instead of calculating + * hash again, and also if other MDTs is not being connected, + * the fld lookup requests(for seq on MDT0) should not be + * blocked because of other MDTs */ + if (fid_seq_is_norm(seq)) + hash = fld_rrb_hash(fld, seq); + else + hash = 0; + +again: + list_for_each_entry(target, &fld->lcf_targets, ft_chain) { if (target->ft_idx == hash) RETURN(target); } + if (hash != 0) { + /* It is possible the remote target(MDT) are not connected to + * with client yet, so we will refer this to MDT0, which should + * be connected during mount */ + hash = 0; + goto again; + } + CERROR("%s: Can't find target by hash %d (seq "LPX64"). " "Targets (%d):\n", fld->lcf_name, hash, seq, fld->lcf_count); - cfs_list_for_each_entry(target, &fld->lcf_targets, ft_chain) { + list_for_each_entry(target, &fld->lcf_targets, ft_chain) { const char *srv_name = target->ft_srv != NULL ? target->ft_srv->lsf_name : ""; const char *exp_name = target->ft_exp != NULL ? @@ -168,12 +123,12 @@ struct lu_fld_hash fld_hash[] = { .fh_scan_func = fld_rrb_scan }, { - 0, + NULL, } }; static struct lu_fld_target * -fld_client_get_target(struct lu_client_fld *fld, seqno_t seq) +fld_client_get_target(struct lu_client_fld *fld, u64 seq) { struct lu_fld_target *target; ENTRY; @@ -200,30 +155,24 @@ fld_client_get_target(struct lu_client_fld *fld, seqno_t seq) int fld_client_add_target(struct lu_client_fld *fld, struct lu_fld_target *tar) { - const char *name = fld_target_name(tar); + const char *name; struct lu_fld_target *target, *tmp; ENTRY; LASSERT(tar != NULL); + name = fld_target_name(tar); LASSERT(name != NULL); LASSERT(tar->ft_srv != NULL || tar->ft_exp != NULL); - if (fld->lcf_flags != LUSTRE_FLD_INIT) { - CERROR("%s: Attempt to add target %s (idx "LPU64") " - "on fly - skip it\n", fld->lcf_name, name, - tar->ft_idx); - RETURN(0); - } else { - CDEBUG(D_INFO, "%s: Adding target %s (idx " - LPU64")\n", fld->lcf_name, name, tar->ft_idx); - } + CDEBUG(D_INFO, "%s: Adding target %s (idx "LPU64")\n", fld->lcf_name, + name, tar->ft_idx); OBD_ALLOC_PTR(target); if (target == NULL) RETURN(-ENOMEM); spin_lock(&fld->lcf_lock); - cfs_list_for_each_entry(tmp, &fld->lcf_targets, ft_chain) { + list_for_each_entry(tmp, &fld->lcf_targets, ft_chain) { if (tmp->ft_idx == tar->ft_idx) { spin_unlock(&fld->lcf_lock); OBD_FREE_PTR(target); @@ -239,8 +188,7 @@ int fld_client_add_target(struct lu_client_fld *fld, target->ft_srv = tar->ft_srv; target->ft_idx = tar->ft_idx; - cfs_list_add_tail(&target->ft_chain, - &fld->lcf_targets); + list_add_tail(&target->ft_chain, &fld->lcf_targets); fld->lcf_count++; spin_unlock(&fld->lcf_lock); @@ -256,11 +204,10 @@ int fld_client_del_target(struct lu_client_fld *fld, __u64 idx) ENTRY; spin_lock(&fld->lcf_lock); - cfs_list_for_each_entry_safe(target, tmp, - &fld->lcf_targets, ft_chain) { + list_for_each_entry_safe(target, tmp, &fld->lcf_targets, ft_chain) { if (target->ft_idx == idx) { fld->lcf_count--; - cfs_list_del(&target->ft_chain); + list_del(&target->ft_chain); spin_unlock(&fld->lcf_lock); if (target->ft_exp != NULL) @@ -273,38 +220,34 @@ int fld_client_del_target(struct lu_client_fld *fld, __u64 idx) spin_unlock(&fld->lcf_lock); RETURN(-ENOENT); } -EXPORT_SYMBOL(fld_client_del_target); -#ifdef LPROCFS +#ifdef CONFIG_PROC_FS static int fld_client_proc_init(struct lu_client_fld *fld) { - int rc; - ENTRY; - - fld->lcf_proc_dir = lprocfs_register(fld->lcf_name, - fld_type_proc_dir, - NULL, NULL); - - if (IS_ERR(fld->lcf_proc_dir)) { - CERROR("%s: LProcFS failed in fld-init\n", - fld->lcf_name); - rc = PTR_ERR(fld->lcf_proc_dir); - RETURN(rc); - } + int rc; + ENTRY; - rc = lprocfs_add_vars(fld->lcf_proc_dir, - fld_client_proc_list, fld); - if (rc) { - CERROR("%s: Can't init FLD proc, rc %d\n", - fld->lcf_name, rc); - GOTO(out_cleanup, rc); - } + fld->lcf_proc_dir = lprocfs_register(fld->lcf_name, fld_type_proc_dir, + NULL, NULL); + if (IS_ERR(fld->lcf_proc_dir)) { + CERROR("%s: LProcFS failed in fld-init\n", + fld->lcf_name); + rc = PTR_ERR(fld->lcf_proc_dir); + RETURN(rc); + } + + rc = lprocfs_add_vars(fld->lcf_proc_dir, fld_client_proc_list, fld); + if (rc) { + CERROR("%s: Can't init FLD proc, rc %d\n", + fld->lcf_name, rc); + GOTO(out_cleanup, rc); + } - RETURN(0); + RETURN(0); out_cleanup: - fld_client_proc_fini(fld); - return rc; + fld_client_proc_fini(fld); + return rc; } void fld_client_proc_fini(struct lu_client_fld *fld) @@ -317,7 +260,7 @@ void fld_client_proc_fini(struct lu_client_fld *fld) } EXIT; } -#else +#else /* !CONFIG_PROC_FS */ static int fld_client_proc_init(struct lu_client_fld *fld) { return 0; @@ -327,7 +270,7 @@ void fld_client_proc_fini(struct lu_client_fld *fld) { return; } -#endif +#endif /* CONFIG_PROC_FS */ EXPORT_SYMBOL(fld_client_proc_fini); @@ -354,11 +297,10 @@ int fld_client_init(struct lu_client_fld *fld, RETURN(-EINVAL); } - fld->lcf_count = 0; + fld->lcf_count = 0; spin_lock_init(&fld->lcf_lock); - fld->lcf_hash = &fld_hash[hash]; - fld->lcf_flags = LUSTRE_FLD_INIT; - CFS_INIT_LIST_HEAD(&fld->lcf_targets); + fld->lcf_hash = &fld_hash[hash]; + INIT_LIST_HEAD(&fld->lcf_targets); cache_size = FLD_CLIENT_CACHE_SIZE / sizeof(struct fld_cache_entry); @@ -394,10 +336,9 @@ void fld_client_fini(struct lu_client_fld *fld) ENTRY; spin_lock(&fld->lcf_lock); - cfs_list_for_each_entry_safe(target, tmp, - &fld->lcf_targets, ft_chain) { + list_for_each_entry_safe(target, tmp, &fld->lcf_targets, ft_chain) { fld->lcf_count--; - cfs_list_del(&target->ft_chain); + list_del(&target->ft_chain); if (target->ft_exp != NULL) class_export_put(target->ft_exp); OBD_FREE_PTR(target); @@ -415,96 +356,166 @@ void fld_client_fini(struct lu_client_fld *fld) EXPORT_SYMBOL(fld_client_fini); int fld_client_rpc(struct obd_export *exp, - struct lu_seq_range *range, __u32 fld_op) + struct lu_seq_range *range, __u32 fld_op, + struct ptlrpc_request **reqp) { - struct ptlrpc_request *req; - struct lu_seq_range *prange; - __u32 *op; - int rc; - ENTRY; - - LASSERT(exp != NULL); - - req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_FLD_QUERY, - LUSTRE_MDS_VERSION, FLD_QUERY); - if (req == NULL) - RETURN(-ENOMEM); - - op = req_capsule_client_get(&req->rq_pill, &RMF_FLD_OPC); - *op = fld_op; - - prange = req_capsule_client_get(&req->rq_pill, &RMF_FLD_MDFLD); - *prange = *range; + struct ptlrpc_request *req = NULL; + struct lu_seq_range *prange; + __u32 *op; + int rc = 0; + struct obd_import *imp; + ENTRY; - ptlrpc_request_set_replen(req); + LASSERT(exp != NULL); + +again: + imp = class_exp2cliimp(exp); + switch (fld_op) { + case FLD_QUERY: + req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_QUERY, + LUSTRE_MDS_VERSION, FLD_QUERY); + if (req == NULL) + RETURN(-ENOMEM); + + /* XXX: only needed when talking to old server(< 2.6), it should + * be removed when < 2.6 server is not supported */ + op = req_capsule_client_get(&req->rq_pill, &RMF_FLD_OPC); + *op = FLD_LOOKUP; + + /* For MDS_MDS seq lookup, it will always use LWP connection, + * but LWP will be evicted after restart, so cause the error. + * so we will set no_delay for seq lookup request, once the + * request fails because of the eviction. always retry here */ + if (imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS) { + req->rq_allow_replay = 1; + req->rq_no_delay = 1; + } + break; + case FLD_READ: + req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_READ, + LUSTRE_MDS_VERSION, FLD_READ); + if (req == NULL) + RETURN(-ENOMEM); + + req_capsule_set_size(&req->rq_pill, &RMF_GENERIC_DATA, + RCL_SERVER, PAGE_CACHE_SIZE); + break; + default: + rc = -EINVAL; + break; + } + + if (rc != 0) + RETURN(rc); + + prange = req_capsule_client_get(&req->rq_pill, &RMF_FLD_MDFLD); + *prange = *range; + ptlrpc_request_set_replen(req); req->rq_request_portal = FLD_REQUEST_PORTAL; + req->rq_reply_portal = MDC_REPLY_PORTAL; ptlrpc_at_set_req_timeout(req); - if (fld_op != FLD_LOOKUP) - mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL); - fld_enter_request(&exp->exp_obd->u.cli); - rc = ptlrpc_queue_wait(req); - fld_exit_request(&exp->exp_obd->u.cli); - if (fld_op != FLD_LOOKUP) - mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL); - if (rc) - GOTO(out_req, rc); - - prange = req_capsule_server_get(&req->rq_pill, &RMF_FLD_MDFLD); - if (prange == NULL) - GOTO(out_req, rc = -EFAULT); - *range = *prange; - EXIT; + obd_get_request_slot(&exp->exp_obd->u.cli); + rc = ptlrpc_queue_wait(req); + obd_put_request_slot(&exp->exp_obd->u.cli); + + if (rc == -ENOENT) { + /* Don't loop forever on non-existing FID sequences. */ + GOTO(out_req, rc); + } + + if (rc != 0) { + if (imp->imp_state != LUSTRE_IMP_CLOSED && !imp->imp_deactive) { + /* Since LWP is not replayable, so it will keep + * trying unless umount happens, otherwise it would + * cause unecessary failure of the application. */ + ptlrpc_req_finished(req); + rc = 0; + goto again; + } + GOTO(out_req, rc); + } + + if (fld_op == FLD_QUERY) { + prange = req_capsule_server_get(&req->rq_pill, + &RMF_FLD_MDFLD); + if (prange == NULL) + GOTO(out_req, rc = -EFAULT); + *range = *prange; + } + + EXIT; out_req: - ptlrpc_req_finished(req); - return rc; + if (rc != 0 || reqp == NULL) { + ptlrpc_req_finished(req); + req = NULL; + } + + if (reqp != NULL) + *reqp = req; + + return rc; } -int fld_client_lookup(struct lu_client_fld *fld, seqno_t seq, mdsno_t *mds, - __u32 flags, const struct lu_env *env) +int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds, + __u32 flags, const struct lu_env *env) { - struct lu_seq_range res; - struct lu_fld_target *target; - int rc; - ENTRY; - - fld->lcf_flags |= LUSTRE_FLD_RUN; + struct lu_seq_range res = { 0 }; + struct lu_fld_target *target; + struct lu_fld_target *origin; + int rc; + ENTRY; - rc = fld_cache_lookup(fld->lcf_cache, seq, &res); - if (rc == 0) { - *mds = res.lsr_index; - RETURN(0); - } + rc = fld_cache_lookup(fld->lcf_cache, seq, &res); + if (rc == 0) { + *mds = res.lsr_index; + RETURN(0); + } /* Can not find it in the cache */ target = fld_client_get_target(fld, seq); LASSERT(target != NULL); - + origin = target; +again: CDEBUG(D_INFO, "%s: Lookup fld entry (seq: "LPX64") on " "target %s (idx "LPU64")\n", fld->lcf_name, seq, fld_target_name(target), target->ft_idx); - res.lsr_start = seq; - res.lsr_flags = flags; -#ifdef __KERNEL__ - if (target->ft_srv != NULL) { - LASSERT(env != NULL); - rc = fld_server_lookup(target->ft_srv, - env, seq, &res); - } else { -#endif - rc = fld_client_rpc(target->ft_exp, - &res, FLD_LOOKUP); -#ifdef __KERNEL__ - } -#endif - - if (rc == 0) { - *mds = res.lsr_index; - - fld_cache_insert(fld->lcf_cache, &res); - } - RETURN(rc); + res.lsr_start = seq; + fld_range_set_type(&res, flags); + +#ifdef HAVE_SERVER_SUPPORT + if (target->ft_srv != NULL) { + LASSERT(env != NULL); + rc = fld_server_lookup(env, target->ft_srv, seq, &res); + } else +#endif /* HAVE_SERVER_SUPPORT */ + { + rc = fld_client_rpc(target->ft_exp, &res, FLD_QUERY, NULL); + } + + if (rc == -ESHUTDOWN) { + /* If fld lookup failed because the target has been shutdown, + * then try next target in the list, until trying all targets + * or fld lookup succeeds */ + spin_lock(&fld->lcf_lock); + if (target->ft_chain.next == fld->lcf_targets.prev) + target = list_entry(fld->lcf_targets.next, + struct lu_fld_target, ft_chain); + else + target = list_entry(target->ft_chain.next, + struct lu_fld_target, + ft_chain); + spin_unlock(&fld->lcf_lock); + if (target != origin) + goto again; + } + if (rc == 0) { + *mds = res.lsr_index; + fld_cache_insert(fld->lcf_cache, &res); + } + + RETURN(rc); } EXPORT_SYMBOL(fld_client_lookup); @@ -512,4 +523,41 @@ void fld_client_flush(struct lu_client_fld *fld) { fld_cache_flush(fld->lcf_cache); } -EXPORT_SYMBOL(fld_client_flush); + + +struct proc_dir_entry *fld_type_proc_dir; + +static int __init fld_mod_init(void) +{ + fld_type_proc_dir = lprocfs_register(LUSTRE_FLD_NAME, + proc_lustre_root, + NULL, NULL); + if (IS_ERR(fld_type_proc_dir)) + return PTR_ERR(fld_type_proc_dir); + +#ifdef HAVE_SERVER_SUPPORT + fld_server_mod_init(); +#endif /* HAVE_SERVER_SUPPORT */ + + return 0; +} + +static void __exit fld_mod_exit(void) +{ +#ifdef HAVE_SERVER_SUPPORT + fld_server_mod_exit(); +#endif /* HAVE_SERVER_SUPPORT */ + + if (fld_type_proc_dir != NULL && !IS_ERR(fld_type_proc_dir)) { + lprocfs_remove(&fld_type_proc_dir); + fld_type_proc_dir = NULL; + } +} + +MODULE_AUTHOR("Sun Microsystems, Inc. "); +MODULE_DESCRIPTION("Lustre FLD"); +MODULE_VERSION(LUSTRE_VERSION_STRING); +MODULE_LICENSE("GPL"); + +module_init(fld_mod_init); +module_exit(fld_mod_exit);