1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001-2003 Cluster File Systems, Inc.
6 * This file is part of the Lustre file system, http://www.lustre.org
7 * Lustre is a trademark of Cluster File Systems, Inc.
9 * You may have signed or agreed to another license before downloading
10 * this software. If so, you are bound by the terms and conditions
11 * of that agreement, and the following does not apply to you. See the
12 * LICENSE file included with this distribution for more information.
14 * If you did not agree to a different license, then this copy of Lustre
15 * is open source software; you can redistribute it and/or modify it
16 * under the terms of version 2 of the GNU General Public License as
17 * published by the Free Software Foundation.
19 * In either case, Lustre is distributed in the hope that it will be
20 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
21 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * license text for more details.
26 # define EXPORT_SYMTAB
28 #define DEBUG_SUBSYSTEM S_MDC
31 # include <linux/module.h>
32 # include <linux/pagemap.h>
33 # include <linux/miscdevice.h>
34 # include <linux/init.h>
36 # include <liblustre.h>
39 #include <linux/lustre_acl.h>
40 #include <obd_class.h>
41 #include <lustre_dlm.h>
42 /* fid_res_name_eq() */
43 #include <lustre_fid.h>
44 #include <lprocfs_status.h>
45 #include "mdc_internal.h"
47 int it_disposition(struct lookup_intent *it, int flag)
49 return it->d.lustre.it_disposition & flag;
51 EXPORT_SYMBOL(it_disposition);
53 void it_set_disposition(struct lookup_intent *it, int flag)
55 it->d.lustre.it_disposition |= flag;
57 EXPORT_SYMBOL(it_set_disposition);
59 void it_clear_disposition(struct lookup_intent *it, int flag)
61 it->d.lustre.it_disposition &= ~flag;
63 EXPORT_SYMBOL(it_clear_disposition);
65 static int it_to_lock_mode(struct lookup_intent *it)
69 /* CREAT needs to be tested before open (both could be set) */
70 if (it->it_op & IT_CREAT)
72 else if (it->it_op & (IT_READDIR | IT_GETATTR | IT_OPEN | IT_LOOKUP))
79 int it_open_error(int phase, struct lookup_intent *it)
81 if (it_disposition(it, DISP_OPEN_OPEN)) {
82 if (phase >= DISP_OPEN_OPEN)
83 return it->d.lustre.it_status;
88 if (it_disposition(it, DISP_OPEN_CREATE)) {
89 if (phase >= DISP_OPEN_CREATE)
90 return it->d.lustre.it_status;
95 if (it_disposition(it, DISP_LOOKUP_EXECD)) {
96 if (phase >= DISP_LOOKUP_EXECD)
97 return it->d.lustre.it_status;
102 if (it_disposition(it, DISP_IT_EXECD)) {
103 if (phase >= DISP_IT_EXECD)
104 return it->d.lustre.it_status;
108 CERROR("it disp: %X, status: %d\n", it->d.lustre.it_disposition,
109 it->d.lustre.it_status);
113 EXPORT_SYMBOL(it_open_error);
115 /* this must be called on a lockh that is known to have a referenced lock */
116 int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data)
118 struct ldlm_lock *lock;
126 lock = ldlm_handle2lock((struct lustre_handle *)lockh);
128 LASSERT(lock != NULL);
129 lock_res_and_lock(lock);
131 if (lock->l_ast_data && lock->l_ast_data != data) {
132 struct inode *new_inode = data;
133 struct inode *old_inode = lock->l_ast_data;
134 LASSERTF(old_inode->i_state & I_FREEING,
135 "Found existing inode %p/%lu/%u state %lu in lock: "
136 "setting data to %p/%lu/%u\n", old_inode,
137 old_inode->i_ino, old_inode->i_generation,
139 new_inode, new_inode->i_ino, new_inode->i_generation);
142 lock->l_ast_data = data;
143 unlock_res_and_lock(lock);
149 int mdc_lock_match(struct obd_export *exp, int flags,
150 const struct lu_fid *fid, ldlm_type_t type,
151 ldlm_policy_data_t *policy, ldlm_mode_t mode,
152 struct lustre_handle *lockh)
154 struct ldlm_res_id res_id =
155 { .name = {fid_seq(fid),
158 struct obd_device *obd = class_exp2obd(exp);
162 rc = ldlm_lock_match(obd->obd_namespace, flags,
163 &res_id, type, policy, mode, lockh);
168 int mdc_cancel_unused(struct obd_export *exp,
169 const struct lu_fid *fid,
170 ldlm_policy_data_t *policy,
171 ldlm_mode_t mode, int flags, void *opaque)
173 struct ldlm_res_id res_id =
174 { .name = {fid_seq(fid),
177 struct obd_device *obd = class_exp2obd(exp);
182 rc = ldlm_cli_cancel_unused_resource(obd->obd_namespace, &res_id,
183 policy, mode, flags, opaque);
187 int mdc_change_cbdata(struct obd_export *exp,
188 const struct lu_fid *fid,
189 ldlm_iterator_t it, void *data)
191 struct ldlm_res_id res_id = { .name = {0} };
194 res_id.name[0] = fid_seq(fid);
195 res_id.name[1] = fid_oid(fid);
196 res_id.name[2] = fid_ver(fid);
198 ldlm_resource_iterate(class_exp2obd(exp)->obd_namespace,
205 static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc)
207 /* Don't hold error requests for replay. */
208 if (req->rq_replay) {
209 spin_lock(&req->rq_lock);
211 spin_unlock(&req->rq_lock);
213 if (rc && req->rq_transno != 0) {
214 DEBUG_REQ(D_ERROR, req, "transno returned on error rc %d", rc);
219 /* Save a large LOV EA into the request buffer so that it is available
220 * for replay. We don't do this in the initial request because the
221 * original request doesn't need this buffer (at most it sends just the
222 * lov_mds_md) and it is a waste of RAM/bandwidth to send the empty
223 * buffer and may also be difficult to allocate and save a very large
224 * request buffer for each open. (bug 5707)
226 * OOM here may cause recovery failure if lmm is needed (only for the
227 * original open if the MDS crashed just when this client also OOM'd)
228 * but this is incredibly unlikely, and questionable whether the client
229 * could do MDS recovery under OOM anyways... */
230 static void mdc_realloc_openmsg(struct ptlrpc_request *req,
231 struct mdt_body *body, int size[9])
236 rc = sptlrpc_cli_enlarge_reqbuf(req, DLM_INTENT_REC_OFF + 4,
239 CERROR("Can't enlarge segment %d size to %d\n",
240 DLM_INTENT_REC_OFF + 4, body->eadatasize);
241 body->valid &= ~OBD_MD_FLEASIZE;
242 body->eadatasize = 0;
247 /* We always reserve enough space in the reply packet for a stripe MD, because
248 * we don't know in advance the file type. */
249 int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
250 struct lookup_intent *it, struct md_op_data *op_data,
251 struct lustre_handle *lockh, void *lmm, int lmmsize,
252 int extra_lock_flags)
254 struct ptlrpc_request *req;
255 struct obd_device *obddev = class_exp2obd(exp);
256 struct ldlm_res_id res_id =
257 { .name = {fid_seq(&op_data->op_fid1),
258 fid_oid(&op_data->op_fid1),
259 fid_ver(&op_data->op_fid1)} };
260 ldlm_policy_data_t policy = { .l_inodebits = { MDS_INODELOCK_LOOKUP } };
261 struct ldlm_request *lockreq;
262 struct ldlm_intent *lit;
263 struct ldlm_reply *lockrep;
264 int size[9] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
265 [DLM_LOCKREQ_OFF] = sizeof(*lockreq),
266 [DLM_INTENT_IT_OFF] = sizeof(*lit),
268 int repsize[7] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
269 [DLM_LOCKREPLY_OFF] = sizeof(*lockrep),
270 [DLM_REPLY_REC_OFF] = sizeof(struct mdt_body),
271 [DLM_REPLY_REC_OFF+1] = obddev->u.cli.
274 int flags = extra_lock_flags | LDLM_FL_HAS_INTENT;
275 int repbufcnt = 4, rc;
278 LASSERTF(einfo->ei_type == LDLM_IBITS,"lock type %d\n", einfo->ei_type);
280 if (it->it_op & IT_OPEN) {
281 int do_join = !!(it->it_flags & O_JOIN_FILE);
282 CFS_LIST_HEAD(cancels);
286 it->it_create_mode = (it->it_create_mode & ~S_IFMT) | S_IFREG;
288 size[DLM_INTENT_REC_OFF] = sizeof(struct mdt_rec_create);
289 /* parent capability */
290 size[DLM_INTENT_REC_OFF + 1] = op_data->op_capa1 ?
291 sizeof(struct lustre_capa) : 0;
292 /* child capability, used for replay only */
293 size[DLM_INTENT_REC_OFF + 2] = sizeof(struct lustre_capa);
294 size[DLM_INTENT_REC_OFF + 3] = op_data->op_namelen + 1;
295 /* As an optimization, we allocate an RPC request buffer for
296 * at least a default-sized LOV EA even if we aren't sending
299 size[DLM_INTENT_REC_OFF + 4] = max(lmmsize,
300 obddev->u.cli.cl_default_mds_easize);
302 /* XXX: openlock is not cancelled for cross-refs. */
303 /* If inode is known, cancel conflicting OPEN locks. */
304 if (fid_is_sane(&op_data->op_fid2)) {
305 if (it->it_flags & (FMODE_WRITE|MDS_OPEN_TRUNC))
308 else if (it->it_flags & FMODE_EXEC)
313 count = mdc_resource_get_unused(exp, &op_data->op_fid2,
318 /* If CREATE or JOIN_FILE, cancel parent's UPDATE lock. */
319 if (it->it_op & IT_CREAT || it->it_flags & O_JOIN_FILE)
323 count += mdc_resource_get_unused(exp, &op_data->op_fid1,
325 MDS_INODELOCK_UPDATE);
328 size[DLM_INTENT_REC_OFF + 5] =
329 sizeof(struct mdt_rec_join);
331 req = ldlm_prep_enqueue_req(exp, 8 + do_join, size, &cancels,
337 /* join is like an unlink of the tail */
338 policy.l_inodebits.bits = MDS_INODELOCK_UPDATE;
339 mdc_join_pack(req, DLM_INTENT_REC_OFF + 5, op_data,
340 (*(__u64 *)op_data->op_data));
343 spin_lock(&req->rq_lock);
345 spin_unlock(&req->rq_lock);
347 /* pack the intent */
348 lit = lustre_msg_buf(req->rq_reqmsg, DLM_INTENT_IT_OFF,
350 lit->opc = (__u64)it->it_op;
352 /* pack the intended request */
353 mdc_open_pack(req, DLM_INTENT_REC_OFF, op_data,
354 it->it_create_mode, 0, it->it_flags,
357 /* for remote client, fetch remote perm for current user */
358 repsize[repbufcnt++] = client_is_remote(exp) ?
359 sizeof(struct mdt_remote_perm) :
360 LUSTRE_POSIX_ACL_MAX_SIZE;
361 repsize[repbufcnt++] = sizeof(struct lustre_capa);
362 repsize[repbufcnt++] = sizeof(struct lustre_capa);
363 } else if (it->it_op & IT_UNLINK) {
364 size[DLM_INTENT_REC_OFF] = sizeof(struct mdt_rec_unlink);
365 size[DLM_INTENT_REC_OFF + 1] = op_data->op_capa1 ?
366 sizeof(struct lustre_capa) : 0;
367 size[DLM_INTENT_REC_OFF + 2] = op_data->op_namelen + 1;
368 policy.l_inodebits.bits = MDS_INODELOCK_UPDATE;
369 req = ldlm_prep_enqueue_req(exp, 6, size, NULL, 0);
373 /* pack the intent */
374 lit = lustre_msg_buf(req->rq_reqmsg, DLM_INTENT_IT_OFF,
376 lit->opc = (__u64)it->it_op;
378 /* pack the intended request */
379 mdc_unlink_pack(req, DLM_INTENT_REC_OFF, op_data);
381 repsize[repbufcnt++] = obddev->u.cli.cl_max_mds_cookiesize;
382 } else if (it->it_op & (IT_GETATTR | IT_LOOKUP)) {
383 obd_valid valid = OBD_MD_FLGETATTR | OBD_MD_FLEASIZE |
384 OBD_MD_FLMODEASIZE | OBD_MD_FLDIREA |
385 OBD_MD_FLMDSCAPA | OBD_MD_MEA;
386 valid |= client_is_remote(exp) ? OBD_MD_FLRMTPERM :
388 size[DLM_INTENT_REC_OFF] = sizeof(struct mdt_body);
389 size[DLM_INTENT_REC_OFF + 1] = op_data->op_capa1 ?
390 sizeof(struct lustre_capa) : 0;
391 size[DLM_INTENT_REC_OFF + 2] = op_data->op_namelen + 1;
393 if (it->it_op & IT_GETATTR)
394 policy.l_inodebits.bits = MDS_INODELOCK_UPDATE;
396 req = ldlm_prep_enqueue_req(exp, 6, size, NULL, 0);
400 /* pack the intent */
401 lit = lustre_msg_buf(req->rq_reqmsg, DLM_INTENT_IT_OFF,
403 lit->opc = (__u64)it->it_op;
405 /* pack the intended request */
406 mdc_getattr_pack(req, DLM_INTENT_REC_OFF, valid,
407 it->it_flags, op_data);
409 repsize[repbufcnt++] = client_is_remote(exp) ?
410 sizeof(struct mdt_remote_perm) :
411 LUSTRE_POSIX_ACL_MAX_SIZE;
412 repsize[repbufcnt++] = sizeof(struct lustre_capa);
413 } else if (it->it_op == IT_READDIR) {
414 policy.l_inodebits.bits = MDS_INODELOCK_UPDATE;
415 req = ldlm_prep_enqueue_req(exp, 2, size, NULL, 0);
425 /* get ready for the reply */
426 ptlrpc_req_set_repsize(req, repbufcnt, repsize);
428 /* It is important to obtain rpc_lock first (if applicable), so that
429 * threads that are serialised with rpc_lock are not polluting our
430 * rpcs in flight counter */
431 mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
432 mdc_enter_request(&obddev->u.cli);
433 rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, &policy, &flags, NULL,
435 mdc_exit_request(&obddev->u.cli);
436 mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
438 /* Similarly, if we're going to replay this request, we don't want to
439 * actually get a lock, just perform the intent. */
440 if (req->rq_transno || req->rq_replay) {
441 lockreq = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF,
443 lockreq->lock_flags |= LDLM_FL_INTENT_ONLY;
446 if (rc == ELDLM_LOCK_ABORTED) {
448 memset(lockh, 0, sizeof(*lockh));
450 } else if (rc != 0) {
451 CERROR("ldlm_cli_enqueue: %d\n", rc);
452 LASSERTF(rc < 0, "rc %d\n", rc);
453 mdc_clear_replay_flag(req, rc);
454 ptlrpc_req_finished(req);
456 } else { /* rc = 0 */
457 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
460 /* If the server gave us back a different lock mode, we should
461 * fix up our variables. */
462 if (lock->l_req_mode != einfo->ei_mode) {
463 ldlm_lock_addref(lockh, lock->l_req_mode);
464 ldlm_lock_decref(lockh, einfo->ei_mode);
465 einfo->ei_mode = lock->l_req_mode;
470 lockrep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF,
472 LASSERT(lockrep != NULL); /* checked by ldlm_cli_enqueue() */
473 LASSERT_REPSWABBED(req, DLM_LOCKREPLY_OFF); /* swabbed by ldlm_cli_enqueue() */
475 it->d.lustre.it_disposition = (int)lockrep->lock_policy_res1;
476 it->d.lustre.it_status = (int)lockrep->lock_policy_res2;
477 it->d.lustre.it_lock_mode = einfo->ei_mode;
478 it->d.lustre.it_data = req;
480 if (it->d.lustre.it_status < 0 && req->rq_replay)
481 mdc_clear_replay_flag(req, it->d.lustre.it_status);
483 /* If we're doing an IT_OPEN which did not result in an actual
484 * successful open, then we need to remove the bit which saves
485 * this request for unconditional replay.
487 * It's important that we do this first! Otherwise we might exit the
488 * function without doing so, and try to replay a failed create
490 if (it->it_op & IT_OPEN && req->rq_replay &&
491 (!it_disposition(it, DISP_OPEN_OPEN) ||it->d.lustre.it_status != 0))
492 mdc_clear_replay_flag(req, it->d.lustre.it_status);
494 DEBUG_REQ(D_RPCTRACE, req, "op: %d disposition: %x, status: %d",
495 it->it_op,it->d.lustre.it_disposition,it->d.lustre.it_status);
497 /* We know what to expect, so we do any byte flipping required here */
498 LASSERT(repbufcnt == 7 || repbufcnt == 6 || repbufcnt == 2);
499 if (repbufcnt >= 6) {
500 int reply_off = DLM_REPLY_REC_OFF;
501 struct mdt_body *body;
503 body = lustre_swab_repbuf(req, reply_off++, sizeof(*body),
504 lustre_swab_mdt_body);
506 CERROR ("Can't swab mdt_body\n");
510 if (req->rq_replay && it_disposition(it, DISP_OPEN_OPEN) &&
511 !it_open_error(DISP_OPEN_OPEN, it)) {
513 * If this is a successful OPEN request, we need to set
514 * replay handler and data early, so that if replay
515 * happens immediately after swabbing below, new reply
516 * is swabbed by that handler correctly.
518 mdc_set_open_replay_data(NULL, NULL, req);
521 if ((body->valid & (OBD_MD_FLDIREA | OBD_MD_FLEASIZE)) != 0) {
525 * The eadata is opaque; just check that it is there.
526 * Eventually, obd_unpackmd() will check the contents.
528 eadata = lustre_swab_repbuf(req, reply_off++,
529 body->eadatasize, NULL);
530 if (eadata == NULL) {
531 CERROR("Missing/short eadata\n");
534 if (body->valid & OBD_MD_FLMODEASIZE) {
535 if (obddev->u.cli.cl_max_mds_easize <
537 obddev->u.cli.cl_max_mds_easize =
539 CDEBUG(D_INFO, "maxeasize become %d\n",
542 if (obddev->u.cli.cl_max_mds_cookiesize <
543 body->max_cookiesize) {
544 obddev->u.cli.cl_max_mds_cookiesize =
545 body->max_cookiesize;
546 CDEBUG(D_INFO, "cookiesize become %d\n",
547 body->max_cookiesize);
552 * We save the reply LOV EA in case we have to replay a
553 * create for recovery. If we didn't allocate a large
554 * enough request buffer above we need to reallocate it
555 * here to hold the actual LOV EA.
557 * To not save LOV EA if request is not going to replay
558 * (for example error one).
560 if ((it->it_op & IT_OPEN) && req->rq_replay) {
561 if (lustre_msg_buflen(req->rq_reqmsg,
562 DLM_INTENT_REC_OFF + 4) <
564 mdc_realloc_openmsg(req, body, size);
566 lmm = lustre_msg_buf(req->rq_reqmsg,
567 DLM_INTENT_REC_OFF + 4,
570 memcpy(lmm, eadata, body->eadatasize);
573 if (body->valid & OBD_MD_FLRMTPERM) {
574 struct mdt_remote_perm *perm;
576 LASSERT(client_is_remote(exp));
577 perm = lustre_swab_repbuf(req, reply_off++,
579 lustre_swab_mdt_remote_perm);
581 CERROR("missing remote permission!\n");
584 } else if ((body->valid & OBD_MD_FLACL) && body->aclsize) {
587 if (body->valid & OBD_MD_FLMDSCAPA) {
588 struct lustre_capa *capa, *p;
590 capa = lustre_unpack_capa(req->rq_repmsg, reply_off++);
592 CERROR("Missing/short MDS capability\n");
596 if (it->it_op & IT_OPEN) {
597 /* client fid capa will be checked in replay */
598 p = lustre_msg_buf(req->rq_reqmsg,
599 DLM_INTENT_REC_OFF + 2,
605 if (body->valid & OBD_MD_FLOSSCAPA) {
606 struct lustre_capa *capa;
608 capa = lustre_unpack_capa(req->rq_repmsg, reply_off++);
610 CERROR("Missing/short OSS capability\n");
619 * This long block is all about fixing up the lock and request state
620 * so that it is correct as of the moment _before_ the operation was
621 * applied; that way, the VFS will think that everything is normal and
622 * call Lustre's regular VFS methods.
624 * If we're performing a creation, that means that unless the creation
625 * failed with EEXIST, we should fake up a negative dentry.
627 * For everything else, we want to lookup to succeed.
629 * One additional note: if CREATE or OPEN succeeded, we add an extra
630 * reference to the request because we need to keep it around until
631 * ll_create/ll_open gets called.
633 * The server will return to us, in it_disposition, an indication of
634 * exactly what d.lustre.it_status refers to.
636 * If DISP_OPEN_OPEN is set, then d.lustre.it_status refers to the open() call,
637 * otherwise if DISP_OPEN_CREATE is set, then it status is the
638 * creation failure mode. In either case, one of DISP_LOOKUP_NEG or
639 * DISP_LOOKUP_POS will be set, indicating whether the child lookup
642 * Else, if DISP_LOOKUP_EXECD then d.lustre.it_status is the rc of the
645 int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
646 void *lmm, int lmmsize, struct lookup_intent *it,
647 int lookup_flags, struct ptlrpc_request **reqp,
648 ldlm_blocking_callback cb_blocking,
649 int extra_lock_flags)
651 struct ptlrpc_request *request;
652 struct lustre_handle old_lock;
653 struct lustre_handle lockh;
654 struct mdt_body *mdt_body;
655 struct ldlm_lock *lock;
660 CDEBUG(D_DLMTRACE, "(name: %.*s,"DFID") in obj "DFID
661 ", intent: %s flags %#o\n", op_data->op_namelen,
662 op_data->op_name, PFID(&op_data->op_fid2),
663 PFID(&op_data->op_fid1), ldlm_it2str(it->it_op),
666 if (fid_is_sane(&op_data->op_fid2) &&
667 (it->it_op & (IT_LOOKUP | IT_GETATTR))) {
668 /* We could just return 1 immediately, but since we should only
669 * be called in revalidate_it if we already have a lock, let's
671 struct ldlm_res_id res_id = { .name = { fid_seq(&op_data->op_fid2),
672 fid_oid(&op_data->op_fid2),
673 fid_ver(&op_data->op_fid2) } };
674 ldlm_policy_data_t policy;
675 ldlm_mode_t mode = LCK_CR;
677 /* As not all attributes are kept under update lock, e.g.
678 owner/group/acls are under lookup lock, we need both
679 ibits for GETATTR. */
681 /* For CMD, UPDATE lock and LOOKUP lock can not be got
682 * at the same for cross-object, so we can not match
683 * the 2 lock at the same time FIXME: but how to handle
684 * the above situation */
685 policy.l_inodebits.bits = (it->it_op == IT_GETATTR) ?
686 MDS_INODELOCK_UPDATE : MDS_INODELOCK_LOOKUP;
688 rc = ldlm_lock_match(exp->exp_obd->obd_namespace,
689 LDLM_FL_BLOCK_GRANTED, &res_id,
690 LDLM_IBITS, &policy, mode, &lockh);
693 rc = ldlm_lock_match(exp->exp_obd->obd_namespace,
694 LDLM_FL_BLOCK_GRANTED, &res_id,
695 LDLM_IBITS, &policy, mode, &lockh);
699 rc = ldlm_lock_match(exp->exp_obd->obd_namespace,
700 LDLM_FL_BLOCK_GRANTED, &res_id,
701 LDLM_IBITS, &policy, mode, &lockh);
706 rc = ldlm_lock_match(exp->exp_obd->obd_namespace,
707 LDLM_FL_BLOCK_GRANTED, &res_id,
708 LDLM_IBITS, &policy, mode, &lockh);
712 memcpy(&it->d.lustre.it_lock_handle, &lockh,
714 it->d.lustre.it_lock_mode = mode;
717 /* Only return failure if it was not GETATTR by cfid
718 (from inode_revalidate) */
719 if (rc || op_data->op_namelen != 0)
723 /* lookup_it may be called only after revalidate_it has run, because
724 * revalidate_it cannot return errors, only zero. Returning zero causes
725 * this call to lookup, which *can* return an error.
727 * We only want to execute the request associated with the intent one
728 * time, however, so don't send the request again. Instead, skip past
729 * this and use the request from revalidate. In this case, revalidate
730 * never dropped its reference, so the refcounts are all OK */
731 if (!it_disposition(it, DISP_ENQ_COMPLETE)) {
732 struct ldlm_enqueue_info einfo =
733 { LDLM_IBITS, it_to_lock_mode(it), cb_blocking,
734 ldlm_completion_ast, NULL, NULL };
736 /* For case if upper layer did not alloc fid, do it now. */
737 if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) {
738 rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
740 CERROR("Can't alloc new fid, rc %d\n", rc);
744 rc = mdc_enqueue(exp, &einfo, it, op_data, &lockh,
745 lmm, lmmsize, extra_lock_flags);
748 memcpy(&it->d.lustre.it_lock_handle, &lockh, sizeof(lockh));
749 } else if (!fid_is_sane(&op_data->op_fid2) ||
750 !(it->it_flags & O_CHECK_STALE)) {
751 /* DISP_ENQ_COMPLETE set means there is extra reference on
752 * request referenced from this intent, saved for subsequent
753 * lookup. This path is executed when we proceed to this
754 * lookup, so we clear DISP_ENQ_COMPLETE */
755 it_clear_disposition(it, DISP_ENQ_COMPLETE);
757 request = *reqp = it->d.lustre.it_data;
758 LASSERT(request != NULL);
759 LASSERT(request != LP_POISON);
760 LASSERT(request->rq_repmsg != LP_POISON);
762 if (!it_disposition(it, DISP_IT_EXECD)) {
763 /* The server failed before it even started executing the
764 * intent, i.e. because it couldn't unpack the request. */
765 LASSERT(it->d.lustre.it_status != 0);
766 RETURN(it->d.lustre.it_status);
768 rc = it_open_error(DISP_IT_EXECD, it);
772 mdt_body = lustre_msg_buf(request->rq_repmsg, DLM_REPLY_REC_OFF,
774 LASSERT(mdt_body != NULL); /* mdc_enqueue checked */
775 LASSERT_REPSWABBED(request, 1); /* mdc_enqueue swabbed */
777 /* If we were revalidating a fid/name pair, mark the intent in
778 * case we fail and get called again from lookup */
779 if (fid_is_sane(&op_data->op_fid2) && (it->it_flags & O_CHECK_STALE) &&
780 (it->it_op != IT_GETATTR)) {
781 it_set_disposition(it, DISP_ENQ_COMPLETE);
783 /* Also: did we find the same inode? */
784 if (!lu_fid_eq(&op_data->op_fid2, &mdt_body->fid1))
788 rc = it_open_error(DISP_LOOKUP_EXECD, it);
792 /* keep requests around for the multiple phases of the call
793 * this shows the DISP_XX must guarantee we make it into the call
795 if (!it_disposition(it, DISP_ENQ_CREATE_REF) &&
796 it_disposition(it, DISP_OPEN_CREATE) &&
797 !it_open_error(DISP_OPEN_CREATE, it)) {
798 it_set_disposition(it, DISP_ENQ_CREATE_REF);
799 ptlrpc_request_addref(request); /* balanced in ll_create_node */
801 if (!it_disposition(it, DISP_ENQ_OPEN_REF) &&
802 it_disposition(it, DISP_OPEN_OPEN) &&
803 !it_open_error(DISP_OPEN_OPEN, it)) {
804 it_set_disposition(it, DISP_ENQ_OPEN_REF);
805 ptlrpc_request_addref(request); /* balanced in ll_file_open */
806 /* BUG 11546 - eviction in the middle of open rpc processing */
807 OBD_FAIL_TIMEOUT(OBD_FAIL_MDC_ENQUEUE_PAUSE, obd_timeout);
810 if (it->it_op & IT_CREAT) {
811 /* XXX this belongs in ll_create_it */
812 } else if (it->it_op == IT_OPEN) {
813 LASSERT(!it_disposition(it, DISP_OPEN_CREATE));
815 LASSERT(it->it_op & (IT_GETATTR | IT_LOOKUP));
818 /* If we already have a matching lock, then cancel the new
819 * one. We have to set the data here instead of in
820 * mdc_enqueue, because we need to use the child's inode as
821 * the l_ast_data to match, and that's not available until
822 * intent_finish has performed the iget().) */
823 lock = ldlm_handle2lock(&lockh);
825 ldlm_policy_data_t policy = lock->l_policy_data;
826 LDLM_DEBUG(lock, "matching against this");
828 LASSERTF(fid_res_name_eq(&mdt_body->fid1,
829 &lock->l_resource->lr_name),
830 "Lock res_id: %lu/%lu/%lu, fid: %lu/%lu/%lu.\n",
831 (unsigned long)lock->l_resource->lr_name.name[0],
832 (unsigned long)lock->l_resource->lr_name.name[1],
833 (unsigned long)lock->l_resource->lr_name.name[2],
834 (unsigned long)fid_seq(&mdt_body->fid1),
835 (unsigned long)fid_oid(&mdt_body->fid1),
836 (unsigned long)fid_ver(&mdt_body->fid1));
839 memcpy(&old_lock, &lockh, sizeof(lockh));
840 if (ldlm_lock_match(NULL, LDLM_FL_BLOCK_GRANTED, NULL,
841 LDLM_IBITS, &policy, LCK_NL, &old_lock)) {
842 ldlm_lock_decref_and_cancel(&lockh,
843 it->d.lustre.it_lock_mode);
844 memcpy(&lockh, &old_lock, sizeof(old_lock));
845 memcpy(&it->d.lustre.it_lock_handle, &lockh,
849 CDEBUG(D_DENTRY,"D_IT dentry %.*s intent: %s status %d disp %x rc %d\n",
850 op_data->op_namelen, op_data->op_name, ldlm_it2str(it->it_op),
851 it->d.lustre.it_status, it->d.lustre.it_disposition, rc);