X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fmdc%2Fmdc_locks.c;h=8ab2f4bef5bf1d3aa7e1295b315ab8edbc69b92c;hp=ae07c91f9774a094aabcda67ab5bccf864590eb7;hb=32becabb43b6e8f21c030c23c01c3f02e351eaea;hpb=0463bbc6ebdcb1316ece4077f67573679635a973 diff --git a/lustre/mdc/mdc_locks.c b/lustre/mdc/mdc_locks.c index ae07c91..8ab2f4b 100644 --- a/lustre/mdc/mdc_locks.c +++ b/lustre/mdc/mdc_locks.c @@ -1,22 +1,37 @@ /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * - * Copyright (C) 2001-2003 Cluster File Systems, Inc. + * GPL HEADER START * - * This file is part of Lustre, http://www.sf.net/projects/lustre/ + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * Lustre is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. * - * Lustre is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * along with Lustre; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Use is subject to license terms. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. */ #ifndef EXPORT_SYMTAB @@ -33,11 +48,12 @@ # include #endif -#include -#include -#include -#include -#include +#include +#include +#include +/* fid_res_name_eq() */ +#include +#include #include "mdc_internal.h" int it_disposition(struct lookup_intent *it, int flag) @@ -52,64 +68,37 @@ void it_set_disposition(struct lookup_intent *it, int flag) } EXPORT_SYMBOL(it_set_disposition); -static void mdc_fid2mdc_op_data(struct mdc_op_data *data, struct ll_uctxt *ctxt, - struct ll_fid *f1, struct ll_fid *f2, - const char *name, int namelen, int mode) +void it_clear_disposition(struct lookup_intent *it, int flag) { - LASSERT(data); - LASSERT(ctxt); - LASSERT(f1); - - data->ctxt = *ctxt; - data->fid1 = *f1; - if (f2) - data->fid2 = *f2; - else - memset(&data->fid2, 0, sizeof(data->fid2)); - data->name = name; - data->namelen = namelen; - data->create_mode = mode; - data->mod_time = LTIME_S(CURRENT_TIME); -} - -static int it_to_lock_mode(struct lookup_intent *it) -{ - /* CREAT needs to be tested before open (both could be set) */ - if (it->it_op & IT_CREAT) - return LCK_PW; - else if (it->it_op & (IT_READDIR | IT_GETATTR | IT_OPEN | IT_LOOKUP | - IT_CHDIR)) - return LCK_PR; - - LBUG(); - RETURN(-EINVAL); + it->d.lustre.it_disposition &= ~flag; } +EXPORT_SYMBOL(it_clear_disposition); int it_open_error(int phase, struct lookup_intent *it) { if (it_disposition(it, DISP_OPEN_OPEN)) { - if (phase == DISP_OPEN_OPEN) + if (phase >= DISP_OPEN_OPEN) return it->d.lustre.it_status; else return 0; } if (it_disposition(it, DISP_OPEN_CREATE)) { - if (phase == DISP_OPEN_CREATE) + if (phase >= DISP_OPEN_CREATE) return it->d.lustre.it_status; else return 0; } if (it_disposition(it, DISP_LOOKUP_EXECD)) { - if (phase == DISP_LOOKUP_EXECD) + if (phase >= DISP_LOOKUP_EXECD) return it->d.lustre.it_status; else return 0; } if (it_disposition(it, DISP_IT_EXECD)) { - if (phase == DISP_IT_EXECD) + if (phase >= DISP_IT_EXECD) return it->d.lustre.it_status; else return 0; @@ -122,21 +111,24 @@ int it_open_error(int phase, struct lookup_intent *it) EXPORT_SYMBOL(it_open_error); /* this must be called on a lockh that is known to have a referenced lock */ -int mdc_set_lock_data(struct obd_export *exp, __u64 *l, void *data) +int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data, + __u32 *bits) { struct ldlm_lock *lock; - struct lustre_handle *lockh = (struct lustre_handle *)l; ENTRY; - if (!*l) { + if(bits) + *bits = 0; + + if (!*lockh) { EXIT; - return 0; + RETURN(0); } - lock = ldlm_handle2lock(lockh); + lock = ldlm_handle2lock((struct lustre_handle *)lockh); LASSERT(lock != NULL); - l_lock(&lock->l_resource->lr_namespace->ns_lock); + lock_res_and_lock(lock); #ifdef __KERNEL__ if (lock->l_ast_data && lock->l_ast_data != data) { struct inode *new_inode = data; @@ -150,275 +142,679 @@ int mdc_set_lock_data(struct obd_export *exp, __u64 *l, void *data) } #endif lock->l_ast_data = data; - l_unlock(&lock->l_resource->lr_namespace->ns_lock); + if (bits) + *bits = lock->l_policy_data.l_inodebits.bits; + + unlock_res_and_lock(lock); LDLM_LOCK_PUT(lock); - EXIT; - return 0; + RETURN(0); } -EXPORT_SYMBOL(mdc_set_lock_data); -int mdc_change_cbdata(struct obd_export *exp, struct ll_fid *fid, - ldlm_iterator_t it, void *data) +ldlm_mode_t mdc_lock_match(struct obd_export *exp, int flags, + const struct lu_fid *fid, ldlm_type_t type, + ldlm_policy_data_t *policy, ldlm_mode_t mode, + struct lustre_handle *lockh) +{ + struct ldlm_res_id res_id; + ldlm_mode_t rc; + ENTRY; + + fid_build_reg_res_name(fid, &res_id); + rc = ldlm_lock_match(class_exp2obd(exp)->obd_namespace, flags, + &res_id, type, policy, mode, lockh, 0); + RETURN(rc); +} + +int mdc_cancel_unused(struct obd_export *exp, + const struct lu_fid *fid, + ldlm_policy_data_t *policy, + ldlm_mode_t mode, int flags, void *opaque) { - struct ldlm_res_id res_id = { .name = {0} }; + struct ldlm_res_id res_id; + struct obd_device *obd = class_exp2obd(exp); + int rc; + ENTRY; - res_id.name[0] = fid->id; - res_id.name[1] = fid->generation; + fid_build_reg_res_name(fid, &res_id); + rc = ldlm_cli_cancel_unused_resource(obd->obd_namespace, &res_id, + policy, mode, flags, opaque); + RETURN(rc); +} + +int mdc_change_cbdata(struct obd_export *exp, + const struct lu_fid *fid, + ldlm_iterator_t it, void *data) +{ + struct ldlm_res_id res_id; + ENTRY; - ldlm_change_cbdata(class_exp2obd(exp)->obd_namespace, &res_id, it, - data); + fid_build_reg_res_name(fid, &res_id); + ldlm_resource_iterate(class_exp2obd(exp)->obd_namespace, + &res_id, it, data); EXIT; return 0; } -#if CONFIG_SNAPFS -int mdc_set_clone_info(struct obd_export *exp, struct lustre_msg *msg, - int offset) +static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc) +{ + /* Don't hold error requests for replay. */ + if (req->rq_replay) { + spin_lock(&req->rq_lock); + req->rq_replay = 0; + spin_unlock(&req->rq_lock); + } + if (rc && req->rq_transno != 0) { + DEBUG_REQ(D_ERROR, req, "transno returned on error rc %d", rc); + LBUG(); + } +} + +/* Save a large LOV EA into the request buffer so that it is available + * for replay. We don't do this in the initial request because the + * original request doesn't need this buffer (at most it sends just the + * lov_mds_md) and it is a waste of RAM/bandwidth to send the empty + * buffer and may also be difficult to allocate and save a very large + * request buffer for each open. (bug 5707) + * + * OOM here may cause recovery failure if lmm is needed (only for the + * original open if the MDS crashed just when this client also OOM'd) + * but this is incredibly unlikely, and questionable whether the client + * could do MDS recovery under OOM anyways... */ +static void mdc_realloc_openmsg(struct ptlrpc_request *req, + struct mdt_body *body) { - struct client_obd *cli_obd = &exp->exp_obd->u.cli; - struct clonefs_info *cl_info; + int rc; + + /* FIXME: remove this explicit offset. */ + rc = sptlrpc_cli_enlarge_reqbuf(req, DLM_INTENT_REC_OFF + 4, + body->eadatasize); + if (rc) { + CERROR("Can't enlarge segment %d size to %d\n", + DLM_INTENT_REC_OFF + 4, body->eadatasize); + body->valid &= ~OBD_MD_FLEASIZE; + body->eadatasize = 0; + } +} + +static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp, + struct lookup_intent *it, + struct md_op_data *op_data, + void *lmm, int lmmsize, + void *cb_data) +{ + struct ptlrpc_request *req; + struct obd_device *obddev = class_exp2obd(exp); + struct ldlm_intent *lit; + int joinfile = !!((it->it_create_mode & M_JOIN_FILE) && + op_data->op_data); + CFS_LIST_HEAD(cancels); + int count = 0; + int mode; + int rc; ENTRY; - - if (cli_obd->cl_clone_info) { - cl_info = (struct clonefs_info *)lustre_msg_buf(msg, offset, - sizeof (*cl_info)); - memcpy(cl_info, cli_obd->cl_clone_info, sizeof(*cl_info)); - } - RETURN(0); -} + + it->it_create_mode = (it->it_create_mode & ~S_IFMT) | S_IFREG; + + /* XXX: openlock is not cancelled for cross-refs. */ + /* If inode is known, cancel conflicting OPEN locks. */ + if (fid_is_sane(&op_data->op_fid2)) { + if (it->it_flags & (FMODE_WRITE|MDS_OPEN_TRUNC)) + mode = LCK_CW; +#ifdef FMODE_EXEC + else if (it->it_flags & FMODE_EXEC) + mode = LCK_PR; #endif + else + mode = LCK_CR; + count = mdc_resource_get_unused(exp, &op_data->op_fid2, + &cancels, mode, + MDS_INODELOCK_OPEN); + } -/* We always reserve enough space in the reply packet for a stripe MD, because - * we don't know in advance the file type. */ -int mdc_enqueue(struct obd_export *exp, - int lock_type, - struct lookup_intent *it, - int lock_mode, - struct mdc_op_data *data, - struct lustre_handle *lockh, - void *lmm, - int lmmsize, - ldlm_completion_callback cb_completion, - ldlm_blocking_callback cb_blocking, - void *cb_data) + /* If CREATE or JOIN_FILE, cancel parent's UPDATE lock. */ + if (it->it_op & IT_CREAT || joinfile) + mode = LCK_EX; + else + mode = LCK_CR; + count += mdc_resource_get_unused(exp, &op_data->op_fid1, + &cancels, mode, + MDS_INODELOCK_UPDATE); + + req = ptlrpc_request_alloc(class_exp2cliimp(exp), + &RQF_LDLM_INTENT_OPEN); + if (req == NULL) { + ldlm_lock_list_put(&cancels, l_bl_ast, count); + RETURN(ERR_PTR(-ENOMEM)); + } + + /* parent capability */ + mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); + /* child capability, reserve the size according to parent capa, it will + * be filled after we get the reply */ + mdc_set_capa_size(req, &RMF_CAPA2, op_data->op_capa1); + + req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, + op_data->op_namelen + 1); + req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, + max(lmmsize, obddev->u.cli.cl_default_mds_easize)); + if (!joinfile) { + req_capsule_set_size(&req->rq_pill, &RMF_REC_JOINFILE, + RCL_CLIENT, 0); + } + + rc = ldlm_prep_enqueue_req(exp, req, &cancels, count); + if (rc) { + ptlrpc_request_free(req); + return NULL; + } + + if (joinfile) { + __u64 head_size = *(__u64 *)op_data->op_data; + mdc_join_pack(req, op_data, head_size); + } + + spin_lock(&req->rq_lock); + req->rq_replay = req->rq_import->imp_replayable; + spin_unlock(&req->rq_lock); + + /* pack the intent */ + lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT); + lit->opc = (__u64)it->it_op; + + /* pack the intended request */ + mdc_open_pack(req, op_data, it->it_create_mode, 0, it->it_flags, lmm, + lmmsize); + + /* for remote client, fetch remote perm for current user */ + if (client_is_remote(exp)) + req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, + sizeof(struct mdt_remote_perm)); + ptlrpc_request_set_replen(req); + return req; +} + +static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp, + struct lookup_intent *it, + struct md_op_data *op_data) { struct ptlrpc_request *req; - struct obd_device *obddev = class_exp2obd(exp); - struct ldlm_res_id res_id = - { .name = {data->fid1.id, data->fid1.generation} }; - ldlm_policy_data_t policy = { .l_inodebits = { MDS_INODELOCK_LOOKUP } }; - int size[6] = {sizeof(struct ldlm_request), sizeof(struct ldlm_intent)}; - int rc, flags = LDLM_FL_HAS_INTENT; - int repsize[4] = {sizeof(struct ldlm_reply), - sizeof(struct mds_body), - obddev->u.cli.cl_max_mds_easize, - obddev->u.cli.cl_max_mds_cookiesize}; - struct ldlm_reply *dlm_rep; - struct ldlm_intent *lit; - struct ldlm_request *lockreq; - void *eadata; - unsigned long irqflags; - int reply_buffers = 0; + struct obd_device *obddev = class_exp2obd(exp); + struct ldlm_intent *lit; + int rc; ENTRY; -// LDLM_DEBUG_NOLOCK("mdsintent=%s,name=%s,dir=%lu", -// ldlm_it2str(it->it_op), it_name, it_inode->i_ino); - - if (it->it_op & IT_OPEN) { - it->it_create_mode |= S_IFREG; - it->it_create_mode &= ~current->fs->umask; - - size[2] = sizeof(struct mds_rec_create); - size[3] = data->namelen + 1; - size[4] = obddev->u.cli.cl_max_mds_easize; - req = ptlrpc_prep_req(class_exp2cliimp(exp), LDLM_ENQUEUE, - 5, size, NULL); - if (!req) - RETURN(-ENOMEM); - - spin_lock_irqsave (&req->rq_lock, irqflags); - req->rq_replay = 1; - spin_unlock_irqrestore (&req->rq_lock, irqflags); - - /* pack the intent */ - lit = lustre_msg_buf(req->rq_reqmsg, 1, sizeof (*lit)); - lit->opc = (__u64)it->it_op; - - /* pack the intended request */ - mdc_open_pack(req->rq_reqmsg, 2, data, it->it_create_mode, 0, - it->it_flags, lmm, lmmsize); - /* get ready for the reply */ - reply_buffers = 3; - req->rq_replen = lustre_msg_size(3, repsize); - } else if (it->it_op & (IT_GETATTR | IT_LOOKUP | IT_CHDIR)) { - int valid = OBD_MD_FLNOTOBD | OBD_MD_FLEASIZE; - size[2] = sizeof(struct mds_body); - size[3] = data->namelen + 1; - - if (it->it_op & IT_GETATTR) - policy.l_inodebits.bits = MDS_INODELOCK_UPDATE; -#if CONFIG_SNAPFS - size[4] = sizeof(struct clonefs_info); - req = ptlrpc_prep_req(class_exp2cliimp(exp), LDLM_ENQUEUE, 5, - size, NULL); -#else - req = ptlrpc_prep_req(class_exp2cliimp(exp), LDLM_ENQUEUE, 4, - size, NULL); + req = ptlrpc_request_alloc(class_exp2cliimp(exp), + &RQF_LDLM_INTENT_UNLINK); + if (req == NULL) + RETURN(ERR_PTR(-ENOMEM)); -#endif - if (!req) - RETURN(-ENOMEM); - - /* pack the intent */ - lit = lustre_msg_buf(req->rq_reqmsg, 1, sizeof (*lit)); - lit->opc = (__u64)it->it_op; - - /* pack the intended request */ - mdc_getattr_pack(req->rq_reqmsg, valid, 2, it->it_flags, data); -#if CONFIG_SNAPFS - mdc_set_clone_info(exp, req->rq_reqmsg, 4); -#endif - /* get ready for the reply */ - reply_buffers = 3; - req->rq_replen = lustre_msg_size(3, repsize); - } else if (it->it_op == IT_READDIR) { - policy.l_inodebits.bits = MDS_INODELOCK_UPDATE; - req = ptlrpc_prep_req(class_exp2cliimp(exp), LDLM_ENQUEUE, 1, - size, NULL); - - if (!req) - RETURN(-ENOMEM); - /* get ready for the reply */ - reply_buffers = 1; - req->rq_replen = lustre_msg_size(1, repsize); - } else if (it->it_op == IT_UNLINK) { - size[2] = sizeof(struct mds_body); - policy.l_inodebits.bits = MDS_INODELOCK_UPDATE; - req = ptlrpc_prep_req(class_exp2cliimp(exp), LDLM_ENQUEUE, 3, - size, NULL); - if (!req) - RETURN(-ENOMEM); + mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); + req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, + op_data->op_namelen + 1); - /* pack the intended request */ - mdc_getattr_pack(req->rq_reqmsg, 0, 2, 0, data); + rc = ldlm_prep_enqueue_req(exp, req, NULL, 0); + if (rc) { + ptlrpc_request_free(req); + RETURN(ERR_PTR(rc)); + } - /* pack the intent */ - lit = lustre_msg_buf(req->rq_reqmsg, 1, sizeof (*lit)); - lit->opc = (__u64)it->it_op; + /* pack the intent */ + lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT); + lit->opc = (__u64)it->it_op; - /* get ready for the reply */ - reply_buffers = 3; - req->rq_replen = lustre_msg_size(3, repsize); - } else { - LBUG(); - RETURN(-EINVAL); + /* pack the intended request */ + mdc_unlink_pack(req, op_data); + + req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, + obddev->u.cli.cl_max_mds_easize); + req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, + obddev->u.cli.cl_max_mds_cookiesize); + ptlrpc_request_set_replen(req); + RETURN(req); +} + +static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp, + struct lookup_intent *it, + struct md_op_data *op_data) +{ + struct ptlrpc_request *req; + struct obd_device *obddev = class_exp2obd(exp); + obd_valid valid = OBD_MD_FLGETATTR | OBD_MD_FLEASIZE | + OBD_MD_FLMODEASIZE | OBD_MD_FLDIREA | + OBD_MD_FLMDSCAPA | OBD_MD_MEA | + (client_is_remote(exp) ? + OBD_MD_FLRMTPERM : OBD_MD_FLACL); + struct ldlm_intent *lit; + int rc; + ENTRY; + + req = ptlrpc_request_alloc(class_exp2cliimp(exp), + &RQF_LDLM_INTENT_GETATTR); + if (req == NULL) + RETURN(ERR_PTR(-ENOMEM)); + + mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); + req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, + op_data->op_namelen + 1); + + rc = ldlm_prep_enqueue_req(exp, req, NULL, 0); + if (rc) { + ptlrpc_request_free(req); + RETURN(ERR_PTR(rc)); + } + + /* pack the intent */ + lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT); + lit->opc = (__u64)it->it_op; + + /* pack the intended request */ + mdc_getattr_pack(req, valid, it->it_flags, op_data); + + req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, + obddev->u.cli.cl_max_mds_easize); + if (client_is_remote(exp)) + req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, + sizeof(struct mdt_remote_perm)); + ptlrpc_request_set_replen(req); + RETURN(req); +} + +static struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp) +{ + struct ptlrpc_request *req; + int rc; + ENTRY; + + req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE); + if (req == NULL) + RETURN(ERR_PTR(-ENOMEM)); + + rc = ldlm_prep_enqueue_req(exp, req, NULL, 0); + if (rc) { + ptlrpc_request_free(req); + RETURN(ERR_PTR(rc)); } - mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it); - rc = ldlm_cli_enqueue(exp, req, obddev->obd_namespace, res_id, - lock_type, &policy, lock_mode, &flags,cb_blocking, - cb_completion, NULL, cb_data, NULL, 0, NULL, - lockh); - mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it); + ptlrpc_request_set_replen(req); + RETURN(req); +} + +static int mdc_finish_enqueue(struct obd_export *exp, + struct ptlrpc_request *req, + struct ldlm_enqueue_info *einfo, + struct lookup_intent *it, + struct lustre_handle *lockh, + int rc) +{ + struct req_capsule *pill = &req->rq_pill; + struct ldlm_request *lockreq; + struct ldlm_reply *lockrep; + ENTRY; + + LASSERT(rc >= 0); /* Similarly, if we're going to replay this request, we don't want to * actually get a lock, just perform the intent. */ if (req->rq_transno || req->rq_replay) { - lockreq = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*lockreq)); + lockreq = req_capsule_client_get(pill, &RMF_DLM_REQ); lockreq->lock_flags |= LDLM_FL_INTENT_ONLY; } - /* This can go when we're sure that this can never happen */ - LASSERT(rc != -ENOENT); if (rc == ELDLM_LOCK_ABORTED) { - lock_mode = 0; + einfo->ei_mode = 0; memset(lockh, 0, sizeof(*lockh)); rc = 0; - } else if (rc != 0) { - CERROR("ldlm_cli_enqueue: %d\n", rc); - LASSERT (rc < 0); - ptlrpc_req_finished(req); - RETURN(rc); } else { /* rc = 0 */ struct ldlm_lock *lock = ldlm_handle2lock(lockh); LASSERT(lock); /* If the server gave us back a different lock mode, we should * fix up our variables. */ - if (lock->l_req_mode != lock_mode) { + if (lock->l_req_mode != einfo->ei_mode) { ldlm_lock_addref(lockh, lock->l_req_mode); - ldlm_lock_decref(lockh, lock_mode); - lock_mode = lock->l_req_mode; + ldlm_lock_decref(lockh, einfo->ei_mode); + einfo->ei_mode = lock->l_req_mode; } - - ldlm_lock_allow_match(lock); LDLM_LOCK_PUT(lock); } - dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep)); - LASSERT(dlm_rep != NULL); /* checked by ldlm_cli_enqueue() */ - LASSERT_REPSWABBED(req, 0); /* swabbed by ldlm_cli_enqueue() */ + lockrep = req_capsule_server_get(pill, &RMF_DLM_REP); + LASSERT(lockrep != NULL); /* checked by ldlm_cli_enqueue() */ - it->d.lustre.it_disposition = (int) dlm_rep->lock_policy_res1; - it->d.lustre.it_status = (int) dlm_rep->lock_policy_res2; - it->d.lustre.it_lock_mode = lock_mode; + it->d.lustre.it_disposition = (int)lockrep->lock_policy_res1; + it->d.lustre.it_status = (int)lockrep->lock_policy_res2; + it->d.lustre.it_lock_mode = einfo->ei_mode; + it->d.lustre.it_lock_handle = lockh->cookie; it->d.lustre.it_data = req; - if (it->d.lustre.it_status < 0 && req->rq_replay) { - LASSERT(req->rq_transno == 0); - /* Don't hold error requests for replay. */ - spin_lock(&req->rq_lock); - req->rq_replay = 0; - spin_unlock(&req->rq_lock); - } + if (it->d.lustre.it_status < 0 && req->rq_replay) + mdc_clear_replay_flag(req, it->d.lustre.it_status); + + /* If we're doing an IT_OPEN which did not result in an actual + * successful open, then we need to remove the bit which saves + * this request for unconditional replay. + * + * It's important that we do this first! Otherwise we might exit the + * function without doing so, and try to replay a failed create + * (bug 3440) */ + if (it->it_op & IT_OPEN && req->rq_replay && + (!it_disposition(it, DISP_OPEN_OPEN) ||it->d.lustre.it_status != 0)) + mdc_clear_replay_flag(req, it->d.lustre.it_status); - DEBUG_REQ(D_RPCTRACE, req, "disposition: %x, status: %d", - it->d.lustre.it_disposition, it->d.lustre.it_status); + DEBUG_REQ(D_RPCTRACE, req, "op: %d disposition: %x, status: %d", + it->it_op,it->d.lustre.it_disposition,it->d.lustre.it_status); /* We know what to expect, so we do any byte flipping required here */ - LASSERT(reply_buffers == 4 || reply_buffers == 3 || reply_buffers == 1); - if (reply_buffers >= 3) { - struct mds_body *body; + if (it->it_op & (IT_OPEN | IT_UNLINK | IT_LOOKUP | IT_GETATTR)) { + struct mdt_body *body; - body = lustre_swab_repbuf(req, 1, sizeof (*body), - lustre_swab_mds_body); + body = req_capsule_server_get(pill, &RMF_MDT_BODY); if (body == NULL) { - CERROR ("Can't swab mds_body\n"); + CERROR ("Can't swab mdt_body\n"); RETURN (-EPROTO); } - if ((body->valid & OBD_MD_FLEASIZE) != 0) { - /* The eadata is opaque; just check that it is there. - * Eventually, obd_unpackmd() will check the contents */ - eadata = lustre_swab_repbuf(req, 2, body->eadatasize, - NULL); - if (eadata == NULL) { - CERROR ("Missing/short eadata\n"); - RETURN (-EPROTO); + if (it_disposition(it, DISP_OPEN_OPEN) && + !it_open_error(DISP_OPEN_OPEN, it)) { + /* + * If this is a successful OPEN request, we need to set + * replay handler and data early, so that if replay + * happens immediately after swabbing below, new reply + * is swabbed by that handler correctly. + */ + mdc_set_open_replay_data(NULL, NULL, req); + } + + if ((body->valid & (OBD_MD_FLDIREA | OBD_MD_FLEASIZE)) != 0) { + void *eadata; + + /* + * The eadata is opaque; just check that it is there. + * Eventually, obd_unpackmd() will check the contents. + */ + eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD, + body->eadatasize); + if (eadata == NULL) + RETURN(-EPROTO); + + /* + * We save the reply LOV EA in case we have to replay a + * create for recovery. If we didn't allocate a large + * enough request buffer above we need to reallocate it + * here to hold the actual LOV EA. + * + * To not save LOV EA if request is not going to replay + * (for example error one). + */ + if ((it->it_op & IT_OPEN) && req->rq_replay) { + void *lmm; + if (req_capsule_get_size(pill, &RMF_EADATA, + RCL_CLIENT) < + body->eadatasize) { + mdc_realloc_openmsg(req, body); + req_capsule_set_size(pill, &RMF_EADATA, + RCL_CLIENT, + body->eadatasize); + } + lmm = req_capsule_client_get(pill, &RMF_EADATA); + if (lmm) + memcpy(lmm, eadata, body->eadatasize); } - if (it->it_op & IT_OPEN) { - void *replayea; + } - replayea = lustre_msg_buf(req->rq_reqmsg, 4, - body->eadatasize); - LASSERT(replayea); - memcpy(replayea, eadata, body->eadatasize); + if (body->valid & OBD_MD_FLRMTPERM) { + struct mdt_remote_perm *perm; - LASSERT(req->rq_reqmsg->bufcount == 5); - req->rq_reqmsg->buflens[4] = body->eadatasize; - /* If this isn't the last buffer, we might - * have to shift other data around. */ + LASSERT(client_is_remote(exp)); + perm = req_capsule_server_swab_get(pill, &RMF_ACL, + lustre_swab_mdt_remote_perm); + if (perm == NULL) + RETURN(-EPROTO); + } + if (body->valid & OBD_MD_FLMDSCAPA) { + struct lustre_capa *capa, *p; + + capa = req_capsule_server_get(pill, &RMF_CAPA1); + if (capa == NULL) + RETURN(-EPROTO); + + if (it->it_op & IT_OPEN) { + /* client fid capa will be checked in replay */ + p = req_capsule_client_get(pill, &RMF_CAPA2); + LASSERT(p); + *p = *capa; } } + if (body->valid & OBD_MD_FLOSSCAPA) { + struct lustre_capa *capa; + + capa = req_capsule_server_get(pill, &RMF_CAPA2); + if (capa == NULL) + RETURN(-EPROTO); + } } RETURN(rc); } -EXPORT_SYMBOL(mdc_enqueue); -/* +/* We always reserve enough space in the reply packet for a stripe MD, because + * we don't know in advance the file type. */ +int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, + struct lookup_intent *it, struct md_op_data *op_data, + struct lustre_handle *lockh, void *lmm, int lmmsize, + struct ptlrpc_request **reqp, int extra_lock_flags) +{ + struct obd_device *obddev = class_exp2obd(exp); + struct ptlrpc_request *req = NULL; + struct req_capsule *pill; + int flags = extra_lock_flags; + int rc; + struct ldlm_res_id res_id; + ldlm_policy_data_t policy = { .l_inodebits = { MDS_INODELOCK_LOOKUP } }; + ENTRY; + + LASSERTF(!it || einfo->ei_type == LDLM_IBITS, "lock type %d\n", + einfo->ei_type); + + fid_build_reg_res_name(&op_data->op_fid1, &res_id); + + if (it) + flags |= LDLM_FL_HAS_INTENT; + if (it && it->it_op & (IT_UNLINK | IT_GETATTR | IT_READDIR)) + policy.l_inodebits.bits = MDS_INODELOCK_UPDATE; + + if (reqp) + req = *reqp; + + if (!it) { + /* The only way right now is FLOCK, in this case we hide flock + policy as lmm, but lmmsize is 0 */ + LASSERT(lmm && lmmsize == 0); + LASSERTF(einfo->ei_type == LDLM_FLOCK, "lock type %d\n", + einfo->ei_type); + policy = *(ldlm_policy_data_t *)lmm; + res_id.name[3] = LDLM_FLOCK; + } else if (it->it_op & IT_OPEN) { + int joinfile = !!((it->it_create_mode & M_JOIN_FILE) && + op_data->op_data); + + req = mdc_intent_open_pack(exp, it, op_data, lmm, lmmsize, + einfo->ei_cbdata); + if (!joinfile) { + policy.l_inodebits.bits = MDS_INODELOCK_UPDATE; + einfo->ei_cbdata = NULL; + lmm = NULL; + } else + it->it_create_mode &= ~M_JOIN_FILE; + } else if (it->it_op & IT_UNLINK) + req = mdc_intent_unlink_pack(exp, it, op_data); + else if (it->it_op & (IT_GETATTR | IT_LOOKUP)) + req = mdc_intent_getattr_pack(exp, it, op_data); + else if (it->it_op == IT_READDIR) + req = ldlm_enqueue_pack(exp); + else { + LBUG(); + RETURN(-EINVAL); + } + + if (IS_ERR(req)) + RETURN(PTR_ERR(req)); + pill = &req->rq_pill; + + /* It is important to obtain rpc_lock first (if applicable), so that + * threads that are serialised with rpc_lock are not polluting our + * rpcs in flight counter. We do not do flock request limiting, though*/ + if (it) { + mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it); + mdc_enter_request(&obddev->u.cli); + } + rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, &policy, &flags, NULL, + 0, NULL, lockh, 0); + if (reqp) + *reqp = req; + + if (it) { + mdc_exit_request(&obddev->u.cli); + mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it); + } + if (!it) { + /* For flock requests we immediatelly return without further + delay and let caller deal with the rest, since rest of + this function metadata processing makes no sense for flock + requests anyway */ + RETURN(rc); + } + + if (rc < 0) { + CERROR("ldlm_cli_enqueue: %d\n", rc); + mdc_clear_replay_flag(req, rc); + ptlrpc_req_finished(req); + RETURN(rc); + } + rc = mdc_finish_enqueue(exp, req, einfo, it, lockh, rc); + + RETURN(rc); +} + +static int mdc_finish_intent_lock(struct obd_export *exp, + struct ptlrpc_request *request, + struct md_op_data *op_data, + struct lookup_intent *it, + struct lustre_handle *lockh) +{ + struct lustre_handle old_lock; + struct mdt_body *mdt_body; + struct ldlm_lock *lock; + int rc; + + + LASSERT(request != NULL); + LASSERT(request != LP_POISON); + LASSERT(request->rq_repmsg != LP_POISON); + + if (!it_disposition(it, DISP_IT_EXECD)) { + /* The server failed before it even started executing the + * intent, i.e. because it couldn't unpack the request. */ + LASSERT(it->d.lustre.it_status != 0); + RETURN(it->d.lustre.it_status); + } + rc = it_open_error(DISP_IT_EXECD, it); + if (rc) + RETURN(rc); + + mdt_body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY); + LASSERT(mdt_body != NULL); /* mdc_enqueue checked */ + + /* If we were revalidating a fid/name pair, mark the intent in + * case we fail and get called again from lookup */ + if (fid_is_sane(&op_data->op_fid2) && + it->it_create_mode & M_CHECK_STALE && + it->it_op != IT_GETATTR) { + it_set_disposition(it, DISP_ENQ_COMPLETE); + + /* Also: did we find the same inode? */ + /* sever can return one of two fids: + * op_fid2 - new allocated fid - if file is created. + * op_fid3 - existent fid - if file only open. + * op_fid3 is saved in lmv_intent_open */ + if ((!lu_fid_eq(&op_data->op_fid2, &mdt_body->fid1)) && + (!lu_fid_eq(&op_data->op_fid3, &mdt_body->fid1))) { + CDEBUG(D_DENTRY, "Found stale data "DFID"("DFID")/"DFID + "\n", PFID(&op_data->op_fid2), + PFID(&op_data->op_fid2), PFID(&mdt_body->fid1)); + RETURN(-ESTALE); + } + } + + rc = it_open_error(DISP_LOOKUP_EXECD, it); + if (rc) + RETURN(rc); + + /* keep requests around for the multiple phases of the call + * this shows the DISP_XX must guarantee we make it into the call + */ + if (!it_disposition(it, DISP_ENQ_CREATE_REF) && + it_disposition(it, DISP_OPEN_CREATE) && + !it_open_error(DISP_OPEN_CREATE, it)) { + it_set_disposition(it, DISP_ENQ_CREATE_REF); + ptlrpc_request_addref(request); /* balanced in ll_create_node */ + } + if (!it_disposition(it, DISP_ENQ_OPEN_REF) && + it_disposition(it, DISP_OPEN_OPEN) && + !it_open_error(DISP_OPEN_OPEN, it)) { + it_set_disposition(it, DISP_ENQ_OPEN_REF); + ptlrpc_request_addref(request); /* balanced in ll_file_open */ + /* BUG 11546 - eviction in the middle of open rpc processing */ + OBD_FAIL_TIMEOUT(OBD_FAIL_MDC_ENQUEUE_PAUSE, obd_timeout); + } + + if (it->it_op & IT_CREAT) { + /* XXX this belongs in ll_create_it */ + } else if (it->it_op == IT_OPEN) { + LASSERT(!it_disposition(it, DISP_OPEN_CREATE)); + } else { + LASSERT(it->it_op & (IT_GETATTR | IT_LOOKUP)); + } + + /* If we already have a matching lock, then cancel the new + * one. We have to set the data here instead of in + * mdc_enqueue, because we need to use the child's inode as + * the l_ast_data to match, and that's not available until + * intent_finish has performed the iget().) */ + lock = ldlm_handle2lock(lockh); + if (lock) { + ldlm_policy_data_t policy = lock->l_policy_data; + LDLM_DEBUG(lock, "matching against this"); + + LASSERTF(fid_res_name_eq(&mdt_body->fid1, + &lock->l_resource->lr_name), + "Lock res_id: %lu/%lu/%lu, fid: %lu/%lu/%lu.\n", + (unsigned long)lock->l_resource->lr_name.name[0], + (unsigned long)lock->l_resource->lr_name.name[1], + (unsigned long)lock->l_resource->lr_name.name[2], + (unsigned long)fid_seq(&mdt_body->fid1), + (unsigned long)fid_oid(&mdt_body->fid1), + (unsigned long)fid_ver(&mdt_body->fid1)); + LDLM_LOCK_PUT(lock); + + memcpy(&old_lock, lockh, sizeof(*lockh)); + if (ldlm_lock_match(NULL, LDLM_FL_BLOCK_GRANTED, NULL, + LDLM_IBITS, &policy, LCK_NL, &old_lock, 0)) { + ldlm_lock_decref_and_cancel(lockh, + it->d.lustre.it_lock_mode); + memcpy(lockh, &old_lock, sizeof(old_lock)); + it->d.lustre.it_lock_handle = lockh->cookie; + } + } + CDEBUG(D_DENTRY,"D_IT dentry %.*s intent: %s status %d disp %x rc %d\n", + op_data->op_namelen, op_data->op_name, ldlm_it2str(it->it_op), + it->d.lustre.it_status, it->d.lustre.it_disposition, rc); + RETURN(rc); +} + +/* * This long block is all about fixing up the lock and request state * so that it is correct as of the moment _before_ the operation was * applied; that way, the VFS will think that everything is normal and @@ -445,60 +841,55 @@ EXPORT_SYMBOL(mdc_enqueue); * Else, if DISP_LOOKUP_EXECD then d.lustre.it_status is the rc of the * child lookup. */ -int mdc_intent_lock(struct obd_export *exp, struct ll_uctxt *uctxt, - struct ll_fid *pfid, const char *name, int len, - void *lmm, int lmmsize, struct ll_fid *cfid, - struct lookup_intent *it, int lookup_flags, - struct ptlrpc_request **reqp, - ldlm_blocking_callback cb_blocking) +int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data, + void *lmm, int lmmsize, struct lookup_intent *it, + int lookup_flags, struct ptlrpc_request **reqp, + ldlm_blocking_callback cb_blocking, + int extra_lock_flags) { struct lustre_handle lockh; - struct ptlrpc_request *request; int rc = 0; - struct mds_body *mds_body; - struct lustre_handle old_lock; - struct ldlm_lock *lock; ENTRY; LASSERT(it); - CDEBUG(D_DLMTRACE, "name: %*s in %ld, intent: %s\n", len, name, - pfid ? (unsigned long) pfid->id : 0 , ldlm_it2str(it->it_op)); + CDEBUG(D_DLMTRACE, "(name: %.*s,"DFID") in obj "DFID + ", intent: %s flags %#o\n", op_data->op_namelen, + op_data->op_name, PFID(&op_data->op_fid2), + PFID(&op_data->op_fid1), ldlm_it2str(it->it_op), + it->it_flags); - if (cfid && (it->it_op == IT_LOOKUP || it->it_op == IT_GETATTR || - it->it_op == IT_CHDIR)) { + lockh.cookie = 0; + if (fid_is_sane(&op_data->op_fid2) && + (it->it_op & (IT_LOOKUP | IT_GETATTR))) { /* We could just return 1 immediately, but since we should only * be called in revalidate_it if we already have a lock, let's * verify that. */ - struct ldlm_res_id res_id ={.name = {cfid->id, - cfid->generation}}; - struct lustre_handle lockh; ldlm_policy_data_t policy; - int mode = LCK_PR; - - /* For the GETATTR case, ll_revalidate_it issues two separate - queries - for LOOKUP and for UPDATE lock because if cannot - check them together - we might have those two bits to be - present in two separate granted locks */ - policy.l_inodebits.bits = - (it->it_op == IT_GETATTR)?MDS_INODELOCK_UPDATE: - MDS_INODELOCK_LOOKUP; - mode = LCK_PR; - rc = ldlm_lock_match(exp->exp_obd->obd_namespace, - LDLM_FL_BLOCK_GRANTED, &res_id, - LDLM_IBITS, &policy, LCK_PR, &lockh); - if (!rc) { - mode = LCK_PW; - rc = ldlm_lock_match(exp->exp_obd->obd_namespace, - LDLM_FL_BLOCK_GRANTED, &res_id, - LDLM_IBITS, &policy, LCK_PW, - &lockh); - } - if (rc) { - memcpy(&it->d.lustre.it_lock_handle, &lockh, - sizeof(lockh)); + ldlm_mode_t mode; + + /* As not all attributes are kept under update lock, e.g. + owner/group/acls are under lookup lock, we need both + ibits for GETATTR. */ + + /* For CMD, UPDATE lock and LOOKUP lock can not be got + * at the same for cross-object, so we can not match + * the 2 lock at the same time FIXME: but how to handle + * the above situation */ + policy.l_inodebits.bits = (it->it_op == IT_GETATTR) ? + MDS_INODELOCK_UPDATE : MDS_INODELOCK_LOOKUP; + + mode = mdc_lock_match(exp, LDLM_FL_BLOCK_GRANTED, + &op_data->op_fid2, LDLM_IBITS, &policy, + LCK_CR|LCK_CW|LCK_PR|LCK_PW, &lockh); + if (mode) { + it->d.lustre.it_lock_handle = lockh.cookie; it->d.lustre.it_lock_mode = mode; } - RETURN(rc); + + /* Only return failure if it was not GETATTR by cfid + (from inode_revalidate) */ + if (mode || op_data->op_namelen != 0) + RETURN(!!mode); } /* lookup_it may be called only after revalidate_it has run, because @@ -510,107 +901,148 @@ int mdc_intent_lock(struct obd_export *exp, struct ll_uctxt *uctxt, * this and use the request from revalidate. In this case, revalidate * never dropped its reference, so the refcounts are all OK */ if (!it_disposition(it, DISP_ENQ_COMPLETE)) { - struct mdc_op_data op_data; - mdc_fid2mdc_op_data(&op_data, uctxt, pfid, cfid, name, len, 0); - - rc = mdc_enqueue(exp, LDLM_IBITS, it, it_to_lock_mode(it), - &op_data, &lockh, lmm, lmmsize, - ldlm_completion_ast, cb_blocking, NULL); + struct ldlm_enqueue_info einfo = + { LDLM_IBITS, it_to_lock_mode(it), cb_blocking, + ldlm_completion_ast, NULL, NULL, NULL }; + + /* For case if upper layer did not alloc fid, do it now. */ + if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) { + rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data); + if (rc < 0) { + CERROR("Can't alloc new fid, rc %d\n", rc); + RETURN(rc); + } + } + rc = mdc_enqueue(exp, &einfo, it, op_data, &lockh, + lmm, lmmsize, NULL, extra_lock_flags); if (rc < 0) RETURN(rc); - memcpy(&it->d.lustre.it_lock_handle, &lockh, sizeof(lockh)); + } else if (!fid_is_sane(&op_data->op_fid2) || + !(it->it_create_mode & M_CHECK_STALE)) { + /* DISP_ENQ_COMPLETE set means there is extra reference on + * request referenced from this intent, saved for subsequent + * lookup. This path is executed when we proceed to this + * lookup, so we clear DISP_ENQ_COMPLETE */ + it_clear_disposition(it, DISP_ENQ_COMPLETE); } - request = *reqp = it->d.lustre.it_data; - LASSERT(request != NULL); + *reqp = it->d.lustre.it_data; + rc = mdc_finish_intent_lock(exp, *reqp, op_data, it, &lockh); + RETURN(rc); +} - if (!it_disposition(it, DISP_IT_EXECD)) { - /* The server failed before it even started executing the - * intent, i.e. because it couldn't unpack the request. */ - LASSERT(it->d.lustre.it_status != 0); - RETURN(it->d.lustre.it_status); - } - rc = it_open_error(DISP_IT_EXECD, it); - if (rc) - RETURN(rc); +static int mdc_intent_getattr_async_interpret(const struct lu_env *env, + struct ptlrpc_request *req, + void *unused, int rc) +{ + struct obd_export *exp = req->rq_async_args.pointer_arg[0]; + struct md_enqueue_info *minfo = req->rq_async_args.pointer_arg[1]; + struct ldlm_enqueue_info *einfo = req->rq_async_args.pointer_arg[2]; + struct lookup_intent *it; + struct lustre_handle *lockh; + struct obd_device *obddev; + int flags = LDLM_FL_HAS_INTENT; + ENTRY; - mds_body = lustre_msg_buf(request->rq_repmsg, 1, sizeof(*mds_body)); - LASSERT(mds_body != NULL); /* mdc_enqueue checked */ - LASSERT_REPSWABBED(request, 1); /* mdc_enqueue swabbed */ + it = &minfo->mi_it; + lockh = &minfo->mi_lockh; - /* If we were revalidating a fid/name pair, mark the intent in - * case we fail and get called again from lookup */ - if (cfid != NULL) { - it_set_disposition(it, DISP_ENQ_COMPLETE); - /* Also: did we find the same inode? */ - /* we have to compare all the fields but type, because - * MDS can return mds/ino/generation triple if inode - * lives on another MDS -bzzz */ - if (cfid->generation != mds_body->fid1.generation || - cfid->id != mds_body->fid1.id || - cfid->mds != mds_body->fid1.mds) - RETURN(-ESTALE); - } + obddev = class_exp2obd(exp); - /* If we're doing an IT_OPEN which did not result in an actual - * successful open, then we need to remove the bit which saves - * this request for unconditional replay. */ - if (it->it_op & IT_OPEN) { - if (!it_disposition(it, DISP_OPEN_OPEN) || - it->d.lustre.it_status != 0) { - unsigned long irqflags; - - spin_lock_irqsave(&request->rq_lock, irqflags); - request->rq_replay = 0; - spin_unlock_irqrestore(&request->rq_lock, irqflags); - } + mdc_exit_request(&obddev->u.cli); + if (OBD_FAIL_CHECK(OBD_FAIL_MDC_GETATTR_ENQUEUE)) + rc = -ETIMEDOUT; + + rc = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, 1, einfo->ei_mode, + &flags, NULL, 0, NULL, lockh, rc); + if (rc < 0) { + CERROR("ldlm_cli_enqueue_fini: %d\n", rc); + mdc_clear_replay_flag(req, rc); + GOTO(out, rc); } - rc = it_open_error(DISP_LOOKUP_EXECD, it); + rc = mdc_finish_enqueue(exp, req, einfo, it, lockh, rc); if (rc) - RETURN(rc); + GOTO(out, rc); - /* keep requests around for the multiple phases of the call - * this shows the DISP_XX must guarantee we make it into the call - */ - if (it_disposition(it, DISP_OPEN_CREATE) && - !it_open_error(DISP_OPEN_CREATE, it)) - ptlrpc_request_addref(request); - if (it_disposition(it, DISP_OPEN_OPEN) && - !it_open_error(DISP_OPEN_OPEN, it)) - ptlrpc_request_addref(request); + rc = mdc_finish_intent_lock(exp, req, &minfo->mi_data, it, lockh); + EXIT; - if (it->it_op & IT_CREAT) { - /* XXX this belongs in ll_create_iit */ - } else if (it->it_op == IT_OPEN) { - LASSERT(!it_disposition(it, DISP_OPEN_CREATE)); - } else { - LASSERT(it->it_op & (IT_GETATTR | IT_LOOKUP | IT_CHDIR)); +out: + OBD_FREE_PTR(einfo); + minfo->mi_cb(req, minfo, rc); + return 0; +} + +int mdc_intent_getattr_async(struct obd_export *exp, + struct md_enqueue_info *minfo, + struct ldlm_enqueue_info *einfo) +{ + struct md_op_data *op_data = &minfo->mi_data; + struct lookup_intent *it = &minfo->mi_it; + struct ptlrpc_request *req; + struct obd_device *obddev = class_exp2obd(exp); + struct ldlm_res_id res_id; + ldlm_policy_data_t policy = { + .l_inodebits = { MDS_INODELOCK_LOOKUP } + }; + int rc; + int flags = LDLM_FL_HAS_INTENT; + ENTRY; + + CDEBUG(D_DLMTRACE,"name: %.*s in inode "DFID", intent: %s flags %#o\n", + op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1), + ldlm_it2str(it->it_op), it->it_flags); + + fid_build_reg_res_name(&op_data->op_fid1, &res_id); + req = mdc_intent_getattr_pack(exp, it, op_data); + if (!req) + RETURN(-ENOMEM); + + mdc_enter_request(&obddev->u.cli); + rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, &policy, &flags, NULL, + 0, NULL, &minfo->mi_lockh, 1); + if (rc < 0) { + mdc_exit_request(&obddev->u.cli); + RETURN(rc); } - /* If we already have a matching lock, then cancel the new - * one. We have to set the data here instead of in - * mdc_enqueue, because we need to use the child's inode as - * the l_ast_data to match, and that's not available until - * intent_finish has performed the iget().) */ - lock = ldlm_handle2lock(&lockh); - if (lock) { - ldlm_policy_data_t policy = lock->l_policy_data; - LDLM_DEBUG(lock, "matching against this"); - LDLM_LOCK_PUT(lock); - memcpy(&old_lock, &lockh, sizeof(lockh)); - if (ldlm_lock_match(NULL, LDLM_FL_BLOCK_GRANTED, NULL, - LDLM_IBITS, &policy, LCK_NL, &old_lock)) { - ldlm_lock_decref_and_cancel(&lockh, - it->d.lustre.it_lock_mode); - memcpy(&lockh, &old_lock, sizeof(old_lock)); - memcpy(&it->d.lustre.it_lock_handle, &lockh, - sizeof(lockh)); - } + req->rq_async_args.pointer_arg[0] = exp; + req->rq_async_args.pointer_arg[1] = minfo; + req->rq_async_args.pointer_arg[2] = einfo; + req->rq_interpret_reply = mdc_intent_getattr_async_interpret; + ptlrpcd_add_req(req, PSCOPE_OTHER); + + RETURN(0); +} + +int mdc_revalidate_lock(struct obd_export *exp, + struct lookup_intent *it, + struct lu_fid *fid) +{ + /* We could just return 1 immediately, but since we should only + * be called in revalidate_it if we already have a lock, let's + * verify that. */ + struct ldlm_res_id res_id; + struct lustre_handle lockh; + ldlm_policy_data_t policy; + ldlm_mode_t mode; + ENTRY; + + fid_build_reg_res_name(fid, &res_id); + /* As not all attributes are kept under update lock, e.g. + owner/group/acls are under lookup lock, we need both + ibits for GETATTR. */ + policy.l_inodebits.bits = (it->it_op == IT_GETATTR) ? + MDS_INODELOCK_UPDATE | MDS_INODELOCK_LOOKUP : + MDS_INODELOCK_LOOKUP; + + mode = ldlm_lock_match(exp->exp_obd->obd_namespace, + LDLM_FL_BLOCK_GRANTED, &res_id, LDLM_IBITS, + &policy, LCK_CR|LCK_CW|LCK_PR|LCK_PW, &lockh, 0); + if (mode) { + it->d.lustre.it_lock_handle = lockh.cookie; + it->d.lustre.it_lock_mode = mode; } - CDEBUG(D_DENTRY, "D_IT dentry %*s intent: %s status %d disp %x rc %d\n", - len, name, ldlm_it2str(it->it_op), it->d.lustre.it_status, - it->d.lustre.it_disposition, rc); - RETURN(rc); + RETURN(!!mode); } -EXPORT_SYMBOL(mdc_intent_lock);