X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fmdc%2Fmdc_locks.c;h=fecf49aa1ebba20ffbf9c6d51a991ebeddcda603;hp=b70e97457391db9f25f7b542077547f4cb92ace4;hb=8d161d44214f907a9f2d9cf5a79cd2c83de995c3;hpb=f95393b0d0a59cf3dc2f29cffc35dcc4cc9d7728 diff --git a/lustre/mdc/mdc_locks.c b/lustre/mdc/mdc_locks.c index b70e974..fecf49a 100644 --- a/lustre/mdc/mdc_locks.c +++ b/lustre/mdc/mdc_locks.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -28,54 +26,46 @@ /* * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. */ -#ifndef EXPORT_SYMTAB -# define EXPORT_SYMTAB -#endif #define DEBUG_SUBSYSTEM S_MDC #ifdef __KERNEL__ # include -# include -# include -# include +# include #else # include #endif -#include +#include #include #include -/* fid_res_name_eq() */ -#include -#include +#include /* fid_res_name_eq() */ +#include +#include +#include #include "mdc_internal.h" -int it_disposition(struct lookup_intent *it, int flag) -{ - return it->d.lustre.it_disposition & flag; -} -EXPORT_SYMBOL(it_disposition); - -void it_set_disposition(struct lookup_intent *it, int flag) -{ - it->d.lustre.it_disposition |= flag; -} -EXPORT_SYMBOL(it_set_disposition); - -void it_clear_disposition(struct lookup_intent *it, int flag) -{ - it->d.lustre.it_disposition &= ~flag; -} -EXPORT_SYMBOL(it_clear_disposition); +struct mdc_getattr_args { + struct obd_export *ga_exp; + struct md_enqueue_info *ga_minfo; + struct ldlm_enqueue_info *ga_einfo; +}; int it_open_error(int phase, struct lookup_intent *it) { + if (it_disposition(it, DISP_OPEN_LEASE)) { + if (phase >= DISP_OPEN_LEASE) + return it->d.lustre.it_status; + else + return 0; + } if (it_disposition(it, DISP_OPEN_OPEN)) { if (phase >= DISP_OPEN_OPEN) return it->d.lustre.it_status; @@ -112,36 +102,35 @@ EXPORT_SYMBOL(it_open_error); /* this must be called on a lockh that is known to have a referenced lock */ int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data, - __u32 *bits) + __u64 *bits) { - struct ldlm_lock *lock; + struct ldlm_lock *lock; + struct inode *new_inode = data; ENTRY; if(bits) *bits = 0; - if (!*lockh) { - EXIT; + if (!*lockh) RETURN(0); - } lock = ldlm_handle2lock((struct lustre_handle *)lockh); LASSERT(lock != NULL); lock_res_and_lock(lock); #ifdef __KERNEL__ - if (lock->l_ast_data && lock->l_ast_data != data) { - struct inode *new_inode = data; - struct inode *old_inode = lock->l_ast_data; - LASSERTF(old_inode->i_state & I_FREEING, - "Found existing inode %p/%lu/%u state %lu in lock: " - "setting data to %p/%lu/%u\n", old_inode, - old_inode->i_ino, old_inode->i_generation, - old_inode->i_state, - new_inode, new_inode->i_ino, new_inode->i_generation); - } + if (lock->l_resource->lr_lvb_inode && + lock->l_resource->lr_lvb_inode != data) { + struct inode *old_inode = lock->l_resource->lr_lvb_inode; + LASSERTF(old_inode->i_state & I_FREEING, + "Found existing inode %p/%lu/%u state %lu in lock: " + "setting data to %p/%lu/%u\n", old_inode, + old_inode->i_ino, old_inode->i_generation, + old_inode->i_state, + new_inode, new_inode->i_ino, new_inode->i_generation); + } #endif - lock->l_ast_data = data; + lock->l_resource->lr_lvb_inode = new_inode; if (bits) *bits = lock->l_policy_data.l_inodebits.bits; @@ -151,19 +140,21 @@ int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data, RETURN(0); } -ldlm_mode_t mdc_lock_match(struct obd_export *exp, int flags, - const struct lu_fid *fid, ldlm_type_t type, - ldlm_policy_data_t *policy, ldlm_mode_t mode, - struct lustre_handle *lockh) +ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags, + const struct lu_fid *fid, ldlm_type_t type, + ldlm_policy_data_t *policy, ldlm_mode_t mode, + struct lustre_handle *lockh) { - struct ldlm_res_id res_id; - ldlm_mode_t rc; - ENTRY; - - fid_build_reg_res_name(fid, &res_id); - rc = ldlm_lock_match(class_exp2obd(exp)->obd_namespace, flags, - &res_id, type, policy, mode, lockh, 0); - RETURN(rc); + struct ldlm_res_id res_id; + ldlm_mode_t rc; + ENTRY; + + fid_build_reg_res_name(fid, &res_id); + /* LU-4405: Clear bits not supported by server */ + policy->l_inodebits.bits &= exp_connect_ibits(exp); + rc = ldlm_lock_match(class_exp2obd(exp)->obd_namespace, flags, + &res_id, type, policy, mode, lockh, 0); + RETURN(rc); } int mdc_cancel_unused(struct obd_export *exp, @@ -185,19 +176,28 @@ int mdc_cancel_unused(struct obd_export *exp, RETURN(rc); } -int mdc_change_cbdata(struct obd_export *exp, - const struct lu_fid *fid, - ldlm_iterator_t it, void *data) +int mdc_null_inode(struct obd_export *exp, + const struct lu_fid *fid) { - struct ldlm_res_id res_id; - ENTRY; + struct ldlm_res_id res_id; + struct ldlm_resource *res; + struct ldlm_namespace *ns = class_exp2obd(exp)->obd_namespace; + ENTRY; - fid_build_reg_res_name(fid, &res_id); - ldlm_resource_iterate(class_exp2obd(exp)->obd_namespace, - &res_id, it, data); + LASSERTF(ns != NULL, "no namespace passed\n"); - EXIT; - return 0; + fid_build_reg_res_name(fid, &res_id); + + res = ldlm_resource_get(ns, NULL, &res_id, 0, 0); + if(res == NULL) + RETURN(0); + + lock_res(res); + res->lr_lvb_inode = NULL; + unlock_res(res); + + ldlm_resource_putref(res); + RETURN(0); } /* find any ldlm lock of the inode in mdc @@ -224,11 +224,11 @@ int mdc_find_cbdata(struct obd_export *exp, static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc) { - /* Don't hold error requests for replay. */ - if (req->rq_replay) { - cfs_spin_lock(&req->rq_lock); - req->rq_replay = 0; - cfs_spin_unlock(&req->rq_lock); + /* Don't hold error requests for replay. */ + if (req->rq_replay) { + spin_lock(&req->rq_lock); + req->rq_replay = 0; + spin_unlock(&req->rq_lock); } if (rc && req->rq_transno != 0) { DEBUG_REQ(D_ERROR, req, "transno returned on error rc %d", rc); @@ -263,38 +263,45 @@ static void mdc_realloc_openmsg(struct ptlrpc_request *req, } } -static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp, - struct lookup_intent *it, - struct md_op_data *op_data, - void *lmm, int lmmsize, - void *cb_data) +static struct ptlrpc_request * +mdc_intent_open_pack(struct obd_export *exp, struct lookup_intent *it, + struct md_op_data *op_data) { - struct ptlrpc_request *req; - struct obd_device *obddev = class_exp2obd(exp); - struct ldlm_intent *lit; - CFS_LIST_HEAD(cancels); - int count = 0; - int mode; - int rc; + struct ptlrpc_request *req; + struct obd_device *obddev = class_exp2obd(exp); + struct ldlm_intent *lit; + const void *lmm = op_data->op_data; + int lmmsize = op_data->op_data_size; + struct list_head cancels = LIST_HEAD_INIT(cancels); + int count = 0; + int mode; + int rc; ENTRY; it->it_create_mode = (it->it_create_mode & ~S_IFMT) | S_IFREG; /* XXX: openlock is not cancelled for cross-refs. */ /* If inode is known, cancel conflicting OPEN locks. */ - if (fid_is_sane(&op_data->op_fid2)) { - if (it->it_flags & (FMODE_WRITE|MDS_OPEN_TRUNC)) - mode = LCK_CW; + if (fid_is_sane(&op_data->op_fid2)) { + if (it->it_flags & MDS_OPEN_LEASE) { /* try to get lease */ + if (it->it_flags & FMODE_WRITE) + mode = LCK_EX; + else + mode = LCK_PR; + } else { + if (it->it_flags & (FMODE_WRITE|MDS_OPEN_TRUNC)) + mode = LCK_CW; #ifdef FMODE_EXEC - else if (it->it_flags & FMODE_EXEC) - mode = LCK_PR; + else if (it->it_flags & FMODE_EXEC) + mode = LCK_PR; #endif - else - mode = LCK_CR; - count = mdc_resource_get_unused(exp, &op_data->op_fid2, - &cancels, mode, - MDS_INODELOCK_OPEN); - } + else + mode = LCK_CR; + } + count = mdc_resource_get_unused(exp, &op_data->op_fid2, + &cancels, mode, + MDS_INODELOCK_OPEN); + } /* If CREATE, cancel parent's UPDATE lock. */ if (it->it_op & IT_CREAT) @@ -320,18 +327,18 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp, req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, op_data->op_namelen + 1); - req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, - max(lmmsize, obddev->u.cli.cl_default_mds_easize)); + req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, + max(lmmsize, obddev->u.cli.cl_default_mds_easize)); - rc = ldlm_prep_enqueue_req(exp, req, &cancels, count); - if (rc) { - ptlrpc_request_free(req); - return NULL; - } + rc = ldlm_prep_enqueue_req(exp, req, &cancels, count); + if (rc < 0) { + ptlrpc_request_free(req); + RETURN(ERR_PTR(rc)); + } - cfs_spin_lock(&req->rq_lock); - req->rq_replay = req->rq_import->imp_replayable; - cfs_spin_unlock(&req->rq_lock); + spin_lock(&req->rq_lock); + req->rq_replay = req->rq_import->imp_replayable; + spin_unlock(&req->rq_lock); /* pack the intent */ lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT); @@ -341,6 +348,9 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp, mdc_open_pack(req, op_data, it->it_create_mode, 0, it->it_flags, lmm, lmmsize); + req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, + obddev->u.cli.cl_max_mds_easize); + /* for remote client, fetch remote perm for current user */ if (client_is_remote(exp)) req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, @@ -349,6 +359,55 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp, return req; } +static struct ptlrpc_request * +mdc_intent_getxattr_pack(struct obd_export *exp, + struct lookup_intent *it, + struct md_op_data *op_data) +{ + struct ptlrpc_request *req; + struct ldlm_intent *lit; + int rc, count = 0, maxdata; + struct list_head cancels = LIST_HEAD_INIT(cancels); + + ENTRY; + + req = ptlrpc_request_alloc(class_exp2cliimp(exp), + &RQF_LDLM_INTENT_GETXATTR); + if (req == NULL) + RETURN(ERR_PTR(-ENOMEM)); + + mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); + + rc = ldlm_prep_enqueue_req(exp, req, &cancels, count); + if (rc) { + ptlrpc_request_free(req); + RETURN(ERR_PTR(rc)); + } + + /* pack the intent */ + lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT); + lit->opc = IT_GETXATTR; + + maxdata = class_exp2cliimp(exp)->imp_connect_data.ocd_max_easize; + + /* pack the intended request */ + mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1, + op_data->op_valid, maxdata, -1, 0); + + req_capsule_set_size(&req->rq_pill, &RMF_EADATA, + RCL_SERVER, maxdata); + + req_capsule_set_size(&req->rq_pill, &RMF_EAVALS, + RCL_SERVER, maxdata); + + req_capsule_set_size(&req->rq_pill, &RMF_EAVALS_LENS, + RCL_SERVER, maxdata); + + ptlrpc_request_set_replen(req); + + RETURN(req); +} + static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp, struct lookup_intent *it, struct md_op_data *op_data) @@ -381,28 +440,29 @@ static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp, /* pack the intended request */ mdc_unlink_pack(req, op_data); - req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, - obddev->u.cli.cl_max_mds_easize); - req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, - obddev->u.cli.cl_max_mds_cookiesize); - ptlrpc_request_set_replen(req); - RETURN(req); + req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, + obddev->u.cli.cl_default_mds_easize); + req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, + obddev->u.cli.cl_default_mds_cookiesize); + ptlrpc_request_set_replen(req); + RETURN(req); } static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp, struct lookup_intent *it, struct md_op_data *op_data) { - struct ptlrpc_request *req; - struct obd_device *obddev = class_exp2obd(exp); - obd_valid valid = OBD_MD_FLGETATTR | OBD_MD_FLEASIZE | - OBD_MD_FLMODEASIZE | OBD_MD_FLDIREA | - OBD_MD_FLMDSCAPA | OBD_MD_MEA | - (client_is_remote(exp) ? - OBD_MD_FLRMTPERM : OBD_MD_FLACL); - struct ldlm_intent *lit; - int rc; - ENTRY; + struct ptlrpc_request *req; + struct obd_device *obddev = class_exp2obd(exp); + obd_valid valid = OBD_MD_FLGETATTR | OBD_MD_FLEASIZE | + OBD_MD_FLMODEASIZE | OBD_MD_FLDIREA | + OBD_MD_FLMDSCAPA | OBD_MD_MEA | + (client_is_remote(exp) ? + OBD_MD_FLRMTPERM : OBD_MD_FLACL); + struct ldlm_intent *lit; + int rc; + int easize; + ENTRY; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_INTENT_GETATTR); @@ -423,19 +483,63 @@ static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp, lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT); lit->opc = (__u64)it->it_op; - /* pack the intended request */ - mdc_getattr_pack(req, valid, it->it_flags, op_data); + if (obddev->u.cli.cl_default_mds_easize > 0) + easize = obddev->u.cli.cl_default_mds_easize; + else + easize = obddev->u.cli.cl_max_mds_easize; - req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, - obddev->u.cli.cl_max_mds_easize); - if (client_is_remote(exp)) - req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, - sizeof(struct mdt_remote_perm)); - ptlrpc_request_set_replen(req); - RETURN(req); + /* pack the intended request */ + mdc_getattr_pack(req, valid, it->it_flags, op_data, easize); + + req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, easize); + if (client_is_remote(exp)) + req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, + sizeof(struct mdt_remote_perm)); + ptlrpc_request_set_replen(req); + RETURN(req); } -static struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp) +static struct ptlrpc_request *mdc_intent_layout_pack(struct obd_export *exp, + struct lookup_intent *it, + struct md_op_data *unused) +{ + struct obd_device *obd = class_exp2obd(exp); + struct ptlrpc_request *req; + struct ldlm_intent *lit; + struct layout_intent *layout; + int rc; + ENTRY; + + req = ptlrpc_request_alloc(class_exp2cliimp(exp), + &RQF_LDLM_INTENT_LAYOUT); + if (req == NULL) + RETURN(ERR_PTR(-ENOMEM)); + + req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, 0); + rc = ldlm_prep_enqueue_req(exp, req, NULL, 0); + if (rc) { + ptlrpc_request_free(req); + RETURN(ERR_PTR(rc)); + } + + /* pack the intent */ + lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT); + lit->opc = (__u64)it->it_op; + + /* pack the layout intent request */ + layout = req_capsule_client_get(&req->rq_pill, &RMF_LAYOUT_INTENT); + /* LAYOUT_INTENT_ACCESS is generic, specific operation will be + * set for replication */ + layout->li_opc = LAYOUT_INTENT_ACCESS; + + req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, + obd->u.cli.cl_default_mds_easize); + ptlrpc_request_set_replen(req); + RETURN(req); +} + +static struct ptlrpc_request * +mdc_enqueue_pack(struct obd_export *exp, int lvb_len) { struct ptlrpc_request *req; int rc; @@ -451,6 +555,7 @@ static struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp) RETURN(ERR_PTR(rc)); } + req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lvb_len); ptlrpc_request_set_replen(req); RETURN(req); } @@ -462,9 +567,13 @@ static int mdc_finish_enqueue(struct obd_export *exp, struct lustre_handle *lockh, int rc) { - struct req_capsule *pill = &req->rq_pill; - struct ldlm_request *lockreq; - struct ldlm_reply *lockrep; + struct req_capsule *pill = &req->rq_pill; + struct ldlm_request *lockreq; + struct ldlm_reply *lockrep; + struct lustre_intent_data *intent = &it->d.lustre; + struct ldlm_lock *lock; + void *lvb_data = NULL; + int lvb_len = 0; ENTRY; LASSERT(rc >= 0); @@ -472,7 +581,7 @@ static int mdc_finish_enqueue(struct obd_export *exp, * actually get a lock, just perform the intent. */ if (req->rq_transno || req->rq_replay) { lockreq = req_capsule_client_get(pill, &RMF_DLM_REQ); - lockreq->lock_flags |= LDLM_FL_INTENT_ONLY; + lockreq->lock_flags |= ldlm_flags_to_wire(LDLM_FL_INTENT_ONLY); } if (rc == ELDLM_LOCK_ABORTED) { @@ -480,8 +589,8 @@ static int mdc_finish_enqueue(struct obd_export *exp, memset(lockh, 0, sizeof(*lockh)); rc = 0; } else { /* rc = 0 */ - struct ldlm_lock *lock = ldlm_handle2lock(lockh); - LASSERT(lock); + lock = ldlm_handle2lock(lockh); + LASSERT(lock != NULL); /* If the server gave us back a different lock mode, we should * fix up our variables. */ @@ -490,20 +599,22 @@ static int mdc_finish_enqueue(struct obd_export *exp, ldlm_lock_decref(lockh, einfo->ei_mode); einfo->ei_mode = lock->l_req_mode; } - LDLM_LOCK_PUT(lock); - } + LDLM_LOCK_PUT(lock); + } - lockrep = req_capsule_server_get(pill, &RMF_DLM_REP); - LASSERT(lockrep != NULL); /* checked by ldlm_cli_enqueue() */ + lockrep = req_capsule_server_get(pill, &RMF_DLM_REP); + LASSERT(lockrep != NULL); /* checked by ldlm_cli_enqueue() */ - it->d.lustre.it_disposition = (int)lockrep->lock_policy_res1; - it->d.lustre.it_status = (int)lockrep->lock_policy_res2; - it->d.lustre.it_lock_mode = einfo->ei_mode; - it->d.lustre.it_lock_handle = lockh->cookie; - it->d.lustre.it_data = req; + intent->it_disposition = (int)lockrep->lock_policy_res1; + intent->it_status = (int)lockrep->lock_policy_res2; + intent->it_lock_mode = einfo->ei_mode; + intent->it_lock_handle = lockh->cookie; + intent->it_data = req; - if (it->d.lustre.it_status < 0 && req->rq_replay) - mdc_clear_replay_flag(req, it->d.lustre.it_status); + /* Technically speaking rq_transno must already be zero if + * it_status is in error, so the check is a bit redundant */ + if ((!req->rq_transno || intent->it_status < 0) && req->rq_replay) + mdc_clear_replay_flag(req, intent->it_status); /* If we're doing an IT_OPEN which did not result in an actual * successful open, then we need to remove the bit which saves @@ -513,11 +624,11 @@ static int mdc_finish_enqueue(struct obd_export *exp, * function without doing so, and try to replay a failed create * (bug 3440) */ if (it->it_op & IT_OPEN && req->rq_replay && - (!it_disposition(it, DISP_OPEN_OPEN) ||it->d.lustre.it_status != 0)) - mdc_clear_replay_flag(req, it->d.lustre.it_status); + (!it_disposition(it, DISP_OPEN_OPEN) ||intent->it_status != 0)) + mdc_clear_replay_flag(req, intent->it_status); - DEBUG_REQ(D_RPCTRACE, req, "op: %d disposition: %x, status: %d", - it->it_op,it->d.lustre.it_disposition,it->d.lustre.it_status); + DEBUG_REQ(D_RPCTRACE, req, "op: %d disposition: %x, status: %d", + it->it_op, intent->it_disposition, intent->it_status); /* We know what to expect, so we do any byte flipping required here */ if (it->it_op & (IT_OPEN | IT_UNLINK | IT_LOOKUP | IT_GETATTR)) { @@ -537,13 +648,13 @@ static int mdc_finish_enqueue(struct obd_export *exp, * happens immediately after swabbing below, new reply * is swabbed by that handler correctly. */ - mdc_set_open_replay_data(NULL, NULL, req); - } + mdc_set_open_replay_data(NULL, NULL, it); + } if ((body->valid & (OBD_MD_FLDIREA | OBD_MD_FLEASIZE)) != 0) { void *eadata; - mdc_update_max_ea_from_body(exp, body); + mdc_update_max_ea_from_body(exp, body); /* * The eadata is opaque; just check that it is there. @@ -554,6 +665,11 @@ static int mdc_finish_enqueue(struct obd_export *exp, if (eadata == NULL) RETURN(-EPROTO); + /* save lvb data and length in case this is for layout + * lock */ + lvb_data = eadata; + lvb_len = body->eadatasize; + /* * We save the reply LOV EA in case we have to replay a * create for recovery. If we didn't allocate a large @@ -614,107 +730,217 @@ static int mdc_finish_enqueue(struct obd_export *exp, if (capa == NULL) RETURN(-EPROTO); } - } - - RETURN(rc); + } else if (it->it_op & IT_LAYOUT) { + /* maybe the lock was granted right away and layout + * is packed into RMF_DLM_LVB of req */ + lvb_len = req_capsule_get_size(pill, &RMF_DLM_LVB, RCL_SERVER); + if (lvb_len > 0) { + lvb_data = req_capsule_server_sized_get(pill, + &RMF_DLM_LVB, lvb_len); + if (lvb_data == NULL) + RETURN(-EPROTO); + } + } + + /* fill in stripe data for layout lock */ + lock = ldlm_handle2lock(lockh); + if (lock != NULL && ldlm_has_layout(lock) && lvb_data != NULL) { + void *lmm; + + LDLM_DEBUG(lock, "layout lock returned by: %s, lvb_len: %d\n", + ldlm_it2str(it->it_op), lvb_len); + + OBD_ALLOC_LARGE(lmm, lvb_len); + if (lmm == NULL) { + LDLM_LOCK_PUT(lock); + RETURN(-ENOMEM); + } + memcpy(lmm, lvb_data, lvb_len); + + /* install lvb_data */ + lock_res_and_lock(lock); + if (lock->l_lvb_data == NULL) { + lock->l_lvb_type = LVB_T_LAYOUT; + lock->l_lvb_data = lmm; + lock->l_lvb_len = lvb_len; + lmm = NULL; + } + unlock_res_and_lock(lock); + if (lmm != NULL) + OBD_FREE_LARGE(lmm, lvb_len); + } + if (lock != NULL) + LDLM_LOCK_PUT(lock); + + RETURN(rc); } /* We always reserve enough space in the reply packet for a stripe MD, because * we don't know in advance the file type. */ -int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, - struct lookup_intent *it, struct md_op_data *op_data, - struct lustre_handle *lockh, void *lmm, int lmmsize, - struct ptlrpc_request **reqp, int extra_lock_flags) +int mdc_enqueue(struct obd_export *exp, + struct ldlm_enqueue_info *einfo, + const union ldlm_policy_data *policy, + struct lookup_intent *it, struct md_op_data *op_data, + struct lustre_handle *lockh, __u64 extra_lock_flags) { struct obd_device *obddev = class_exp2obd(exp); struct ptlrpc_request *req = NULL; - struct req_capsule *pill; - int flags = extra_lock_flags; + __u64 flags, saved_flags = extra_lock_flags; int rc; struct ldlm_res_id res_id; static const ldlm_policy_data_t lookup_policy = { .l_inodebits = { MDS_INODELOCK_LOOKUP } }; static const ldlm_policy_data_t update_policy = { .l_inodebits = { MDS_INODELOCK_UPDATE } }; - ldlm_policy_data_t const *policy = &lookup_policy; + static const ldlm_policy_data_t layout_policy = + { .l_inodebits = { MDS_INODELOCK_LAYOUT } }; + static const ldlm_policy_data_t getxattr_policy = { + .l_inodebits = { MDS_INODELOCK_XATTR } }; + int generation, resends = 0; + struct ldlm_reply *lockrep; + enum lvb_type lvb_type = 0; ENTRY; LASSERTF(!it || einfo->ei_type == LDLM_IBITS, "lock type %d\n", einfo->ei_type); - fid_build_reg_res_name(&op_data->op_fid1, &res_id); - if (it) - flags |= LDLM_FL_HAS_INTENT; - if (it && it->it_op & (IT_UNLINK | IT_GETATTR | IT_READDIR)) - policy = &update_policy; - - if (reqp) - req = *reqp; - - if (!it) { - /* The only way right now is FLOCK, in this case we hide flock - policy as lmm, but lmmsize is 0 */ - LASSERT(lmm && lmmsize == 0); - LASSERTF(einfo->ei_type == LDLM_FLOCK, "lock type %d\n", - einfo->ei_type); - policy = (ldlm_policy_data_t *)lmm; - res_id.name[3] = LDLM_FLOCK; - } else if (it->it_op & IT_OPEN) { - req = mdc_intent_open_pack(exp, it, op_data, lmm, lmmsize, - einfo->ei_cbdata); - policy = &update_policy; - einfo->ei_cbdata = NULL; - lmm = NULL; - } else if (it->it_op & IT_UNLINK) - req = mdc_intent_unlink_pack(exp, it, op_data); - else if (it->it_op & (IT_GETATTR | IT_LOOKUP)) - req = mdc_intent_getattr_pack(exp, it, op_data); - else if (it->it_op == IT_READDIR) - req = ldlm_enqueue_pack(exp); - else { + if (it != NULL) { + LASSERT(policy == NULL); + + saved_flags |= LDLM_FL_HAS_INTENT; + if (it->it_op & (IT_OPEN | IT_UNLINK | IT_GETATTR | IT_READDIR)) + policy = &update_policy; + else if (it->it_op & IT_LAYOUT) + policy = &layout_policy; + else if (it->it_op & (IT_GETXATTR | IT_SETXATTR)) + policy = &getxattr_policy; + else + policy = &lookup_policy; + } + + generation = obddev->u.cli.cl_import->imp_generation; +resend: + flags = saved_flags; + if (it == NULL) { + /* The only way right now is FLOCK. */ + LASSERTF(einfo->ei_type == LDLM_FLOCK, "lock type %d\n", + einfo->ei_type); + res_id.name[3] = LDLM_FLOCK; + } else if (it->it_op & IT_OPEN) { + req = mdc_intent_open_pack(exp, it, op_data); + } else if (it->it_op & IT_UNLINK) { + req = mdc_intent_unlink_pack(exp, it, op_data); + } else if (it->it_op & (IT_GETATTR | IT_LOOKUP)) { + req = mdc_intent_getattr_pack(exp, it, op_data); + } else if (it->it_op & IT_READDIR) { + req = mdc_enqueue_pack(exp, 0); + } else if (it->it_op & IT_LAYOUT) { + if (!imp_connect_lvb_type(class_exp2cliimp(exp))) + RETURN(-EOPNOTSUPP); + req = mdc_intent_layout_pack(exp, it, op_data); + lvb_type = LVB_T_LAYOUT; + } else if (it->it_op & IT_GETXATTR) { + req = mdc_intent_getxattr_pack(exp, it, op_data); + } else { LBUG(); RETURN(-EINVAL); } if (IS_ERR(req)) RETURN(PTR_ERR(req)); - pill = &req->rq_pill; + + if (req != NULL && it && it->it_op & IT_CREAT) + /* ask ptlrpc not to resend on EINPROGRESS since we have our own + * retry logic */ + req->rq_no_retry_einprogress = 1; + + if (resends) { + req->rq_generation_set = 1; + req->rq_import_generation = generation; + req->rq_sent = cfs_time_current_sec() + resends; + } /* It is important to obtain rpc_lock first (if applicable), so that * threads that are serialised with rpc_lock are not polluting our * rpcs in flight counter. We do not do flock request limiting, though*/ if (it) { mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it); - mdc_enter_request(&obddev->u.cli); + rc = obd_get_request_slot(&obddev->u.cli); + if (rc != 0) { + mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it); + mdc_clear_replay_flag(req, 0); + ptlrpc_req_finished(req); + RETURN(rc); + } } rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, policy, &flags, NULL, - 0, lockh, 0); - if (reqp) - *reqp = req; - - if (it) { - mdc_exit_request(&obddev->u.cli); - mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it); - } + 0, lvb_type, lockh, 0); if (!it) { /* For flock requests we immediatelly return without further delay and let caller deal with the rest, since rest of this function metadata processing makes no sense for flock - requests anyway */ - RETURN(rc); - } - - if (rc < 0) { - CERROR("ldlm_cli_enqueue: %d\n", rc); + requests anyway. But in case of problem during comms with + Server (ETIMEDOUT) or any signal/kill attempt (EINTR), we + can not rely on caller and this mainly for F_UNLCKs + (explicits or automatically generated by Kernel to clean + current FLocks upon exit) that can't be trashed */ + if (((rc == -EINTR) || (rc == -ETIMEDOUT)) && + (einfo->ei_type == LDLM_FLOCK) && + (einfo->ei_mode == LCK_NL)) + goto resend; + RETURN(rc); + } + + obd_put_request_slot(&obddev->u.cli); + mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it); + + if (rc < 0) { + CDEBUG_LIMIT((rc == -EACCES || rc == -EIDRM) ? D_INFO : D_ERROR, + "%s: ldlm_cli_enqueue failed: rc = %d\n", + obddev->obd_name, rc); + + mdc_clear_replay_flag(req, rc); + ptlrpc_req_finished(req); + RETURN(rc); + } + + lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); + LASSERT(lockrep != NULL); + + lockrep->lock_policy_res2 = + ptlrpc_status_ntoh(lockrep->lock_policy_res2); + + /* Retry the create infinitely when we get -EINPROGRESS from + * server. This is required by the new quota design. */ + if (it && it->it_op & IT_CREAT && + (int)lockrep->lock_policy_res2 == -EINPROGRESS) { mdc_clear_replay_flag(req, rc); ptlrpc_req_finished(req); - RETURN(rc); + resends++; + + CDEBUG(D_HA, "%s: resend:%d op:%d "DFID"/"DFID"\n", + obddev->obd_name, resends, it->it_op, + PFID(&op_data->op_fid1), PFID(&op_data->op_fid2)); + + if (generation == obddev->u.cli.cl_import->imp_generation) { + goto resend; + } else { + CDEBUG(D_HA, "resend cross eviction\n"); + RETURN(-EIO); + } } - rc = mdc_finish_enqueue(exp, req, einfo, it, lockh, rc); - RETURN(rc); + rc = mdc_finish_enqueue(exp, req, einfo, it, lockh, rc); + if (rc < 0) { + if (lustre_handle_is_used(lockh)) { + ldlm_lock_decref(lockh, einfo->ei_mode); + memset(lockh, 0, sizeof(*lockh)); + } + ptlrpc_req_finished(req); + } + RETURN(rc); } static int mdc_finish_intent_lock(struct obd_export *exp, @@ -727,12 +953,15 @@ static int mdc_finish_intent_lock(struct obd_export *exp, struct mdt_body *mdt_body; struct ldlm_lock *lock; int rc; - + ENTRY; LASSERT(request != NULL); LASSERT(request != LP_POISON); LASSERT(request->rq_repmsg != LP_POISON); + if (it->it_op & IT_READDIR) + RETURN(0); + if (!it_disposition(it, DISP_IT_EXECD)) { /* The server failed before it even started executing the * intent, i.e. because it couldn't unpack the request. */ @@ -751,8 +980,6 @@ static int mdc_finish_intent_lock(struct obd_export *exp, if (fid_is_sane(&op_data->op_fid2) && it->it_create_mode & M_CHECK_STALE && it->it_op != IT_GETATTR) { - it_set_disposition(it, DISP_ENQ_COMPLETE); - /* Also: did we find the same inode? */ /* sever can return one of two fids: * op_fid2 - new allocated fid - if file is created. @@ -794,7 +1021,7 @@ static int mdc_finish_intent_lock(struct obd_export *exp, } else if (it->it_op == IT_OPEN) { LASSERT(!it_disposition(it, DISP_OPEN_CREATE)); } else { - LASSERT(it->it_op & (IT_GETATTR | IT_LOOKUP)); + LASSERT(it->it_op & (IT_GETATTR | IT_LOOKUP | IT_LAYOUT)); } /* If we already have a matching lock, then cancel the new @@ -807,16 +1034,11 @@ static int mdc_finish_intent_lock(struct obd_export *exp, ldlm_policy_data_t policy = lock->l_policy_data; LDLM_DEBUG(lock, "matching against this"); - LASSERTF(fid_res_name_eq(&mdt_body->fid1, - &lock->l_resource->lr_name), - "Lock res_id: %lu/%lu/%lu, fid: %lu/%lu/%lu.\n", - (unsigned long)lock->l_resource->lr_name.name[0], - (unsigned long)lock->l_resource->lr_name.name[1], - (unsigned long)lock->l_resource->lr_name.name[2], - (unsigned long)fid_seq(&mdt_body->fid1), - (unsigned long)fid_oid(&mdt_body->fid1), - (unsigned long)fid_ver(&mdt_body->fid1)); - LDLM_LOCK_PUT(lock); + LASSERTF(fid_res_name_eq(&mdt_body->fid1, + &lock->l_resource->lr_name), + "Lock res_id: "DLDLMRES", fid: "DFID"\n", + PLDLMRES(lock->l_resource), PFID(&mdt_body->fid1)); + LDLM_LOCK_PUT(lock); memcpy(&old_lock, lockh, sizeof(*lockh)); if (ldlm_lock_match(NULL, LDLM_FL_BLOCK_GRANTED, NULL, @@ -834,7 +1056,7 @@ static int mdc_finish_intent_lock(struct obd_export *exp, } int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, - struct lu_fid *fid, __u32 *bits) + struct lu_fid *fid, __u64 *bits) { /* We could just return 1 immediately, but since we should only * be called in revalidate_it if we already have a lock, let's @@ -845,27 +1067,54 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, ldlm_mode_t mode; ENTRY; - fid_build_reg_res_name(fid, &res_id); - /* Firstly consider the bits */ - if (bits && *bits) - policy.l_inodebits.bits = *bits; - else - policy.l_inodebits.bits = (it->it_op == IT_GETATTR) ? - MDS_INODELOCK_UPDATE : MDS_INODELOCK_LOOKUP; + if (it->d.lustre.it_lock_handle) { + lockh.cookie = it->d.lustre.it_lock_handle; + mode = ldlm_revalidate_lock_handle(&lockh, bits); + } else { + fid_build_reg_res_name(fid, &res_id); + switch (it->it_op) { + case IT_GETATTR: + /* File attributes are held under multiple bits: + * nlink is under lookup lock, size and times are + * under UPDATE lock and recently we've also got + * a separate permissions lock for owner/group/acl that + * were protected by lookup lock before. + * Getattr must provide all of that information, + * so we need to ensure we have all of those locks. + * Unfortunately, if the bits are split across multiple + * locks, there's no easy way to match all of them here, + * so an extra RPC would be performed to fetch all + * of those bits at once for now. */ + /* For new MDTs(> 2.4), UPDATE|PERM should be enough, + * but for old MDTs (< 2.4), permission is covered + * by LOOKUP lock, so it needs to match all bits here.*/ + policy.l_inodebits.bits = MDS_INODELOCK_UPDATE | + MDS_INODELOCK_LOOKUP | + MDS_INODELOCK_PERM; + break; + case IT_READDIR: + policy.l_inodebits.bits = MDS_INODELOCK_UPDATE; + break; + case IT_LAYOUT: + policy.l_inodebits.bits = MDS_INODELOCK_LAYOUT; + break; + default: + policy.l_inodebits.bits = MDS_INODELOCK_LOOKUP; + break; + } + + mode = mdc_lock_match(exp, LDLM_FL_BLOCK_GRANTED, fid, + LDLM_IBITS, &policy, + LCK_CR | LCK_CW | LCK_PR | LCK_PW, + &lockh); + } - mode = ldlm_lock_match(exp->exp_obd->obd_namespace, - LDLM_FL_BLOCK_GRANTED, &res_id, LDLM_IBITS, - &policy, LCK_CR|LCK_CW|LCK_PR|LCK_PW, &lockh, 0); if (mode) { it->d.lustre.it_lock_handle = lockh.cookie; it->d.lustre.it_lock_mode = mode; - if (bits) { - struct ldlm_lock *lock = ldlm_handle2lock(&lockh); - - LASSERT(lock != NULL); - *bits = lock->l_policy_data.l_inodebits.bits; - LDLM_LOCK_PUT(lock); - } + } else { + it->d.lustre.it_lock_handle = 0; + it->d.lustre.it_lock_mode = 0; } RETURN(!!mode); @@ -899,84 +1148,72 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, * child lookup. */ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data, - void *lmm, int lmmsize, struct lookup_intent *it, - int lookup_flags, struct ptlrpc_request **reqp, - ldlm_blocking_callback cb_blocking, - int extra_lock_flags) + struct lookup_intent *it, struct ptlrpc_request **reqp, + ldlm_blocking_callback cb_blocking, __u64 extra_lock_flags) { - struct lustre_handle lockh; - int rc = 0; - ENTRY; - LASSERT(it); - - CDEBUG(D_DLMTRACE, "(name: %.*s,"DFID") in obj "DFID - ", intent: %s flags %#o\n", op_data->op_namelen, - op_data->op_name, PFID(&op_data->op_fid2), - PFID(&op_data->op_fid1), ldlm_it2str(it->it_op), - it->it_flags); - - lockh.cookie = 0; - if (fid_is_sane(&op_data->op_fid2) && - (it->it_op & (IT_LOOKUP | IT_GETATTR))) { - /* We could just return 1 immediately, but since we should only - * be called in revalidate_it if we already have a lock, let's - * verify that. */ - rc = mdc_revalidate_lock(exp, it, &op_data->op_fid2, NULL); - /* Only return failure if it was not GETATTR by cfid - (from inode_revalidate) */ - if (rc || op_data->op_namelen != 0) - RETURN(rc); - } - - /* lookup_it may be called only after revalidate_it has run, because - * revalidate_it cannot return errors, only zero. Returning zero causes - * this call to lookup, which *can* return an error. - * - * We only want to execute the request associated with the intent one - * time, however, so don't send the request again. Instead, skip past - * this and use the request from revalidate. In this case, revalidate - * never dropped its reference, so the refcounts are all OK */ - if (!it_disposition(it, DISP_ENQ_COMPLETE)) { - struct ldlm_enqueue_info einfo = - { LDLM_IBITS, it_to_lock_mode(it), cb_blocking, - ldlm_completion_ast, NULL, NULL, NULL }; - - /* For case if upper layer did not alloc fid, do it now. */ - if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) { - rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data); - if (rc < 0) { - CERROR("Can't alloc new fid, rc %d\n", rc); - RETURN(rc); - } - } - rc = mdc_enqueue(exp, &einfo, it, op_data, &lockh, - lmm, lmmsize, NULL, extra_lock_flags); - if (rc < 0) - RETURN(rc); - } else if (!fid_is_sane(&op_data->op_fid2) || - !(it->it_create_mode & M_CHECK_STALE)) { - /* DISP_ENQ_COMPLETE set means there is extra reference on - * request referenced from this intent, saved for subsequent - * lookup. This path is executed when we proceed to this - * lookup, so we clear DISP_ENQ_COMPLETE */ - it_clear_disposition(it, DISP_ENQ_COMPLETE); - } - *reqp = it->d.lustre.it_data; + struct ldlm_enqueue_info einfo = { + .ei_type = LDLM_IBITS, + .ei_mode = it_to_lock_mode(it), + .ei_cb_bl = cb_blocking, + .ei_cb_cp = ldlm_completion_ast, + }; + struct lustre_handle lockh; + int rc = 0; + ENTRY; + LASSERT(it); + + CDEBUG(D_DLMTRACE, "(name: %.*s,"DFID") in obj "DFID + ", intent: %s flags %#"LPF64"o\n", op_data->op_namelen, + op_data->op_name, PFID(&op_data->op_fid2), + PFID(&op_data->op_fid1), ldlm_it2str(it->it_op), + it->it_flags); + + lockh.cookie = 0; + if (fid_is_sane(&op_data->op_fid2) && + (it->it_op & (IT_LOOKUP | IT_GETATTR | IT_READDIR))) { + /* We could just return 1 immediately, but since we should only + * be called in revalidate_it if we already have a lock, let's + * verify that. */ + it->d.lustre.it_lock_handle = 0; + rc = mdc_revalidate_lock(exp, it, &op_data->op_fid2, NULL); + /* Only return failure if it was not GETATTR by cfid + (from inode_revalidate) */ + if (rc || op_data->op_namelen != 0) + RETURN(rc); + } + + /* For case if upper layer did not alloc fid, do it now. */ + if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) { + rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data); + if (rc < 0) { + CERROR("Can't alloc new fid, rc %d\n", rc); + RETURN(rc); + } + } + + rc = mdc_enqueue(exp, &einfo, NULL, it, op_data, &lockh, + extra_lock_flags); + if (rc < 0) + RETURN(rc); + + *reqp = it->d.lustre.it_data; rc = mdc_finish_intent_lock(exp, *reqp, op_data, it, &lockh); RETURN(rc); } static int mdc_intent_getattr_async_interpret(const struct lu_env *env, struct ptlrpc_request *req, - void *unused, int rc) + void *args, int rc) { - struct obd_export *exp = req->rq_async_args.pointer_arg[0]; - struct md_enqueue_info *minfo = req->rq_async_args.pointer_arg[1]; - struct ldlm_enqueue_info *einfo = req->rq_async_args.pointer_arg[2]; + struct mdc_getattr_args *ga = args; + struct obd_export *exp = ga->ga_exp; + struct md_enqueue_info *minfo = ga->ga_minfo; + struct ldlm_enqueue_info *einfo = ga->ga_einfo; struct lookup_intent *it; struct lustre_handle *lockh; struct obd_device *obddev; - int flags = LDLM_FL_HAS_INTENT; + struct ldlm_reply *lockrep; + __u64 flags = LDLM_FL_HAS_INTENT; ENTRY; it = &minfo->mi_it; @@ -984,7 +1221,7 @@ static int mdc_intent_getattr_async_interpret(const struct lu_env *env, obddev = class_exp2obd(exp); - mdc_exit_request(&obddev->u.cli); + obd_put_request_slot(&obddev->u.cli); if (OBD_FAIL_CHECK(OBD_FAIL_MDC_GETATTR_ENQUEUE)) rc = -ETIMEDOUT; @@ -996,6 +1233,12 @@ static int mdc_intent_getattr_async_interpret(const struct lu_env *env, GOTO(out, rc); } + lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); + LASSERT(lockrep != NULL); + + lockrep->lock_policy_res2 = + ptlrpc_status_ntoh(lockrep->lock_policy_res2); + rc = mdc_finish_enqueue(exp, req, einfo, it, lockh, rc); if (rc) GOTO(out, rc); @@ -1016,6 +1259,7 @@ int mdc_intent_getattr_async(struct obd_export *exp, struct md_op_data *op_data = &minfo->mi_data; struct lookup_intent *it = &minfo->mi_it; struct ptlrpc_request *req; + struct mdc_getattr_args *ga; struct obd_device *obddev = class_exp2obd(exp); struct ldlm_res_id res_id; /*XXX: Both MDS_INODELOCK_LOOKUP and MDS_INODELOCK_UPDATE are needed @@ -1025,32 +1269,42 @@ int mdc_intent_getattr_async(struct obd_export *exp, .l_inodebits = { MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE } }; - int rc; - int flags = LDLM_FL_HAS_INTENT; - ENTRY; - - CDEBUG(D_DLMTRACE,"name: %.*s in inode "DFID", intent: %s flags %#o\n", - op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1), - ldlm_it2str(it->it_op), it->it_flags); - - fid_build_reg_res_name(&op_data->op_fid1, &res_id); - req = mdc_intent_getattr_pack(exp, it, op_data); - if (!req) - RETURN(-ENOMEM); + int rc = 0; + __u64 flags = LDLM_FL_HAS_INTENT; + ENTRY; + + CDEBUG(D_DLMTRACE, "name: %.*s in inode "DFID", intent: %s flags %#" + LPF64"o\n", + op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1), + ldlm_it2str(it->it_op), it->it_flags); + + fid_build_reg_res_name(&op_data->op_fid1, &res_id); + req = mdc_intent_getattr_pack(exp, it, op_data); + if (IS_ERR(req)) + RETURN(PTR_ERR(req)); + + rc = obd_get_request_slot(&obddev->u.cli); + if (rc != 0) { + ptlrpc_req_finished(req); + RETURN(rc); + } - mdc_enter_request(&obddev->u.cli); rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, &policy, &flags, NULL, - 0, &minfo->mi_lockh, 1); + 0, LVB_T_NONE, &minfo->mi_lockh, 1); if (rc < 0) { - mdc_exit_request(&obddev->u.cli); + obd_put_request_slot(&obddev->u.cli); + ptlrpc_req_finished(req); RETURN(rc); } - req->rq_async_args.pointer_arg[0] = exp; - req->rq_async_args.pointer_arg[1] = minfo; - req->rq_async_args.pointer_arg[2] = einfo; + CLASSERT(sizeof(*ga) <= sizeof(req->rq_async_args)); + ga = ptlrpc_req_async_args(req); + ga->ga_exp = exp; + ga->ga_minfo = minfo; + ga->ga_einfo = einfo; + req->rq_interpret_reply = mdc_intent_getattr_async_interpret; - ptlrpcd_add_req(req, PSCOPE_OTHER); + ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1); RETURN(0); }