X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;ds=sidebyside;f=lustre%2Fldlm%2Fldlm_request.c;h=dd561dce9c2ccedbf49fc871b3d681f6d8593bda;hb=2ad695f40cf1920033d77723311ed5ed7a3b07cc;hp=5bffd30820aaf11c68b3f1a0c60a62266ce90164;hpb=f2a9374170e4522b9d2ac3b7096cf2912339d480;p=fs%2Flustre-release.git diff --git a/lustre/ldlm/ldlm_request.c b/lustre/ldlm/ldlm_request.c index 5bffd30..dd561dc 100644 --- a/lustre/ldlm/ldlm_request.c +++ b/lustre/ldlm/ldlm_request.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -29,8 +27,7 @@ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011 Whamcloud, Inc. - * + * Copyright (c) 2010, 2011, Whamcloud, Inc. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -111,6 +108,7 @@ int ldlm_expired_completion_wait(void *data) RETURN(0); } +EXPORT_SYMBOL(ldlm_expired_completion_wait); /* We use the same basis for both server side and client side functions from a single node. */ @@ -175,10 +173,10 @@ int ldlm_completion_ast_async(struct ldlm_lock *lock, int flags, void *data) LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, " "going forward"); - ldlm_lock_dump(D_OTHER, lock, 0); ldlm_reprocess_all(lock->l_resource); RETURN(0); } +EXPORT_SYMBOL(ldlm_completion_ast_async); /** * Client side LDLM "completion" AST. This is called in several cases: @@ -225,7 +223,6 @@ int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data) LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, " "sleeping"); - ldlm_lock_dump(D_OTHER, lock, 0); noreproc: @@ -277,6 +274,7 @@ noreproc: RETURN(ldlm_completion_tail(lock)); } +EXPORT_SYMBOL(ldlm_completion_ast); /** * A helper to build a blocking ast function @@ -313,6 +311,7 @@ int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock) } RETURN(0); } +EXPORT_SYMBOL(ldlm_blocking_ast_nocheck); /** * Server blocking AST @@ -349,6 +348,7 @@ int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, } RETURN(ldlm_blocking_ast_nocheck(lock)); } +EXPORT_SYMBOL(ldlm_blocking_ast); /* * ->l_glimpse_ast() for DLM extent locks acquired on the server-side. See @@ -376,6 +376,7 @@ int ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp) */ return -ELDLM_NO_LOCK_DATA; } +EXPORT_SYMBOL(ldlm_glimpse_ast); int ldlm_cli_enqueue_local(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, @@ -439,6 +440,7 @@ int ldlm_cli_enqueue_local(struct ldlm_namespace *ns, out_nolock: return err; } +EXPORT_SYMBOL(ldlm_cli_enqueue_local); static void failed_lock_cleanup(struct ldlm_namespace *ns, struct ldlm_lock *lock, int mode) @@ -538,6 +540,9 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, lock_res_and_lock(lock); /* Key change rehash lock in per-export hash with new key */ if (exp->exp_lock_hash) { + /* In the function below, .hs_keycmp resolves to + * ldlm_export_lock_keycmp() */ + /* coverity[overrun-buffer-val] */ cfs_hash_rehash_key(exp->exp_lock_hash, &lock->l_remote_handle, &reply->lock_handle, @@ -591,7 +596,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, if (!(type == LDLM_IBITS && !(exp->exp_connect_flags & OBD_CONNECT_IBITS))) /* We assume lock type cannot change on server*/ - ldlm_convert_policy_to_local( + ldlm_convert_policy_to_local(exp, lock->l_resource->lr_type, &reply->lock_desc.l_policy_data, &lock->l_policy_data); @@ -660,6 +665,7 @@ cleanup: LDLM_LOCK_RELEASE(lock); return rc; } +EXPORT_SYMBOL(ldlm_cli_enqueue_fini); /* PAGE_SIZE-512 is to allow TCP/IP and LNET headers to fit into * a single page on the send/receive side. XXX: 512 should be changed @@ -703,7 +709,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req, struct ldlm_namespace *ns = exp->exp_obd->obd_namespace; struct req_capsule *pill = &req->rq_pill; struct ldlm_request *dlm = NULL; - int flags, avail, to_free, bufcount, pack = 0; + int flags, avail, to_free, pack = 0; CFS_LIST_HEAD(head); int rc; ENTRY; @@ -712,7 +718,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req, cancels = &head; if (exp_connect_cancelset(exp)) { /* Estimate the amount of available space in the request. */ - bufcount = req_capsule_filled_sizes(pill, RCL_CLIENT); + req_capsule_filled_sizes(pill, RCL_CLIENT); avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff); flags = ns_connect_lru_resize(ns) ? @@ -759,6 +765,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req, } RETURN(0); } +EXPORT_SYMBOL(ldlm_prep_elc_req); int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req, cfs_list_t *cancels, int count) @@ -766,6 +773,7 @@ int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req, return ldlm_prep_elc_req(exp, req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE, LDLM_ENQUEUE_CANCEL_OFF, cancels, count); } +EXPORT_SYMBOL(ldlm_prep_enqueue_req); /* If a request has some specific initialisation it is passed in @reqp, * otherwise it is created in ldlm_cli_enqueue. @@ -833,6 +841,11 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, LDLM_DEBUG(lock, "client-side enqueue START"); } + lock->l_conn_export = exp; + lock->l_export = NULL; + lock->l_blocking_ast = einfo->ei_cb_bl; + lock->l_flags |= (*flags & LDLM_FL_NO_LRU); + /* lock not sent to server yet */ if (reqp == NULL || *reqp == NULL) { @@ -858,10 +871,6 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, DLM_LOCKREQ_OFF, len, (int)sizeof(*body)); } - lock->l_conn_export = exp; - lock->l_export = NULL; - lock->l_blocking_ast = einfo->ei_cb_bl; - /* Dump lock data into the request buffer */ body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ); ldlm_lock2desc(lock, &body->lock_desc); @@ -915,6 +924,7 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, RETURN(rc); } +EXPORT_SYMBOL(ldlm_cli_enqueue); static int ldlm_cli_convert_local(struct ldlm_lock *lock, int new_mode, __u32 *flags) @@ -1013,6 +1023,7 @@ int ldlm_cli_convert(struct lustre_handle *lockh, int new_mode, __u32 *flags) ptlrpc_req_finished(req); return rc; } +EXPORT_SYMBOL(ldlm_cli_convert); /* Cancel locks locally. * Returns: @@ -1118,8 +1129,6 @@ int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *cancels, count = free; while (1) { - int bufcount; - imp = class_exp2cliimp(exp); if (imp == NULL || imp->imp_invalid) { CDEBUG(D_DLMTRACE, @@ -1131,7 +1140,7 @@ int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *cancels, if (req == NULL) GOTO(out, rc = -ENOMEM); - bufcount = req_capsule_filled_sizes(&req->rq_pill, RCL_CLIENT); + req_capsule_filled_sizes(&req->rq_pill, RCL_CLIENT); req_capsule_set_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT, ldlm_request_bufsize(count, LDLM_CANCEL)); @@ -1151,7 +1160,7 @@ int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *cancels, ptlrpc_request_set_replen(req); if (flags & LCF_ASYNC) { - ptlrpcd_add_req(req, PSCOPE_OTHER); + ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1); sent = count; GOTO(out, 0); } else { @@ -1181,6 +1190,7 @@ int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *cancels, out: return sent ? sent : rc; } +EXPORT_SYMBOL(ldlm_cli_cancel_req); static inline struct ldlm_pool *ldlm_imp2pl(struct obd_import *imp) { @@ -1194,7 +1204,7 @@ static inline struct ldlm_pool *ldlm_imp2pl(struct obd_import *imp) int ldlm_cli_update_pool(struct ptlrpc_request *req) { struct obd_device *obd; - __u64 old_slv, new_slv; + __u64 new_slv; __u32 new_limit; ENTRY; if (unlikely(!req->rq_import || !req->rq_import->imp_obd || @@ -1234,7 +1244,6 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req) * oops in that time. */ cfs_write_lock(&obd->obd_pool_lock); - old_slv = obd->obd_pool_slv; obd->obd_pool_slv = new_slv; obd->obd_pool_limit = new_limit; cfs_write_unlock(&obd->obd_pool_lock); @@ -1286,6 +1295,7 @@ int ldlm_cli_cancel(struct lustre_handle *lockh) ldlm_cli_cancel_list(&cancels, count, NULL, 0); RETURN(0); } +EXPORT_SYMBOL(ldlm_cli_cancel); /* XXX until we will have compound requests and can cut cancels from generic rpc * we need send cancels with LDLM_FL_BL_AST flag as separate rpc */ @@ -1329,6 +1339,7 @@ int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count, RETURN(count); } +EXPORT_SYMBOL(ldlm_cli_cancel_list_local); /** * Cancel as many locks as possible w/o sending any rpcs (e.g. to write back @@ -1754,6 +1765,7 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res, RETURN(ldlm_cli_cancel_list_local(cancels, count, cancel_flags)); } +EXPORT_SYMBOL(ldlm_cancel_resource_local); /* If @req is NULL, send CANCEL request to server with handles of locks * in the @cancels. If EARLY_CANCEL is not supported, send CANCEL requests @@ -1806,6 +1818,7 @@ int ldlm_cli_cancel_list(cfs_list_t *cancels, int count, LASSERT(count == 0); RETURN(0); } +EXPORT_SYMBOL(ldlm_cli_cancel_list); int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, @@ -1838,6 +1851,7 @@ int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, ldlm_resource_putref(res); RETURN(0); } +EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource); struct ldlm_cli_cancel_arg { int lc_flags; @@ -1891,6 +1905,7 @@ int ldlm_cli_cancel_unused(struct ldlm_namespace *ns, RETURN(ELDLM_OK); } } +EXPORT_SYMBOL(ldlm_cli_cancel_unused); /* Lock iterators. */ @@ -1931,6 +1946,7 @@ int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter, unlock_res(res); RETURN(rc); } +EXPORT_SYMBOL(ldlm_resource_foreach); struct iter_helper_data { ldlm_iterator_t iter; @@ -1963,6 +1979,7 @@ void ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_res_iter_helper, &helper); } +EXPORT_SYMBOL(ldlm_namespace_foreach); /* non-blocking function to manipulate a lock whose cb_data is being put away. * return 0: find no resource @@ -1992,6 +2009,7 @@ int ldlm_resource_iterate(struct ldlm_namespace *ns, ldlm_resource_putref(res); RETURN(rc); } +EXPORT_SYMBOL(ldlm_resource_iterate); /* Lock replay */ @@ -2046,6 +2064,9 @@ static int replay_lock_interpret(const struct lu_env *env, /* Key change rehash lock in per-export hash with new key */ exp = req->rq_export; if (exp && exp->exp_lock_hash) { + /* In the function below, .hs_keycmp resolves to + * ldlm_export_lock_keycmp() */ + /* coverity[overrun-buffer-val] */ cfs_hash_rehash_key(exp->exp_lock_hash, &lock->l_remote_handle, &reply->lock_handle, @@ -2059,7 +2080,7 @@ static int replay_lock_interpret(const struct lu_env *env, LDLM_LOCK_PUT(lock); out: if (rc != ELDLM_OK) - ptlrpc_connect_import(req->rq_import, NULL); + ptlrpc_connect_import(req->rq_import); RETURN(rc); } @@ -2142,7 +2163,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock) aa = ptlrpc_req_async_args(req); aa->lock_handle = body->lock_handle[0]; req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret; - ptlrpcd_add_req(req, PSCOPE_OTHER); + ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1); RETURN(0); } @@ -2213,3 +2234,4 @@ int ldlm_replay_locks(struct obd_import *imp) RETURN(rc); } +EXPORT_SYMBOL(ldlm_replay_locks);