1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001-2003 Cluster File Systems, Inc.
6 * This file is part of the Lustre file system, http://www.lustre.org
7 * Lustre is a trademark of Cluster File Systems, Inc.
9 * You may have signed or agreed to another license before downloading
10 * this software. If so, you are bound by the terms and conditions
11 * of that agreement, and the following does not apply to you. See the
12 * LICENSE file included with this distribution for more information.
14 * If you did not agree to a different license, then this copy of Lustre
15 * is open source software; you can redistribute it and/or modify it
16 * under the terms of version 2 of the GNU General Public License as
17 * published by the Free Software Foundation.
19 * In either case, Lustre is distributed in the hope that it will be
20 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
21 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * license text for more details.
26 # define EXPORT_SYMTAB
28 #define DEBUG_SUBSYSTEM S_MDC
31 # include <linux/module.h>
32 # include <linux/pagemap.h>
33 # include <linux/miscdevice.h>
34 # include <linux/init.h>
36 # include <liblustre.h>
39 #include <linux/lustre_acl.h>
40 #include <obd_class.h>
41 #include <lustre_dlm.h>
42 /* fid_res_name_eq() */
43 #include <lustre_fid.h>
44 #include <lprocfs_status.h>
45 #include "mdc_internal.h"
47 int it_disposition(struct lookup_intent *it, int flag)
49 return it->d.lustre.it_disposition & flag;
51 EXPORT_SYMBOL(it_disposition);
53 void it_set_disposition(struct lookup_intent *it, int flag)
55 it->d.lustre.it_disposition |= flag;
57 EXPORT_SYMBOL(it_set_disposition);
59 void it_clear_disposition(struct lookup_intent *it, int flag)
61 it->d.lustre.it_disposition &= ~flag;
63 EXPORT_SYMBOL(it_clear_disposition);
65 static int it_to_lock_mode(struct lookup_intent *it)
69 /* CREAT needs to be tested before open (both could be set) */
70 if (it->it_op & IT_CREAT)
72 else if (it->it_op & (IT_READDIR | IT_GETATTR | IT_OPEN | IT_LOOKUP))
79 int it_open_error(int phase, struct lookup_intent *it)
81 if (it_disposition(it, DISP_OPEN_OPEN)) {
82 if (phase >= DISP_OPEN_OPEN)
83 return it->d.lustre.it_status;
88 if (it_disposition(it, DISP_OPEN_CREATE)) {
89 if (phase >= DISP_OPEN_CREATE)
90 return it->d.lustre.it_status;
95 if (it_disposition(it, DISP_LOOKUP_EXECD)) {
96 if (phase >= DISP_LOOKUP_EXECD)
97 return it->d.lustre.it_status;
102 if (it_disposition(it, DISP_IT_EXECD)) {
103 if (phase >= DISP_IT_EXECD)
104 return it->d.lustre.it_status;
108 CERROR("it disp: %X, status: %d\n", it->d.lustre.it_disposition,
109 it->d.lustre.it_status);
113 EXPORT_SYMBOL(it_open_error);
115 /* this must be called on a lockh that is known to have a referenced lock */
116 int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data)
118 struct ldlm_lock *lock;
126 lock = ldlm_handle2lock((struct lustre_handle *)lockh);
128 LASSERT(lock != NULL);
129 lock_res_and_lock(lock);
131 if (lock->l_ast_data && lock->l_ast_data != data) {
132 struct inode *new_inode = data;
133 struct inode *old_inode = lock->l_ast_data;
134 LASSERTF(old_inode->i_state & I_FREEING,
135 "Found existing inode %p/%lu/%u state %lu in lock: "
136 "setting data to %p/%lu/%u\n", old_inode,
137 old_inode->i_ino, old_inode->i_generation,
139 new_inode, new_inode->i_ino, new_inode->i_generation);
142 lock->l_ast_data = data;
143 unlock_res_and_lock(lock);
149 ldlm_mode_t mdc_lock_match(struct obd_export *exp, int flags,
150 const struct lu_fid *fid, ldlm_type_t type,
151 ldlm_policy_data_t *policy, ldlm_mode_t mode,
152 struct lustre_handle *lockh)
154 struct ldlm_res_id res_id =
155 { .name = {fid_seq(fid),
161 rc = ldlm_lock_match(class_exp2obd(exp)->obd_namespace, flags,
162 &res_id, type, policy, mode, lockh);
166 int mdc_cancel_unused(struct obd_export *exp,
167 const struct lu_fid *fid,
168 ldlm_policy_data_t *policy,
169 ldlm_mode_t mode, int flags, void *opaque)
171 struct ldlm_res_id res_id =
172 { .name = {fid_seq(fid),
175 struct obd_device *obd = class_exp2obd(exp);
180 rc = ldlm_cli_cancel_unused_resource(obd->obd_namespace, &res_id,
181 policy, mode, flags, opaque);
185 int mdc_change_cbdata(struct obd_export *exp,
186 const struct lu_fid *fid,
187 ldlm_iterator_t it, void *data)
189 struct ldlm_res_id res_id = { .name = {0} };
192 res_id.name[0] = fid_seq(fid);
193 res_id.name[1] = fid_oid(fid);
194 res_id.name[2] = fid_ver(fid);
196 ldlm_resource_iterate(class_exp2obd(exp)->obd_namespace,
203 static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc)
205 /* Don't hold error requests for replay. */
206 if (req->rq_replay) {
207 spin_lock(&req->rq_lock);
209 spin_unlock(&req->rq_lock);
211 if (rc && req->rq_transno != 0) {
212 DEBUG_REQ(D_ERROR, req, "transno returned on error rc %d", rc);
217 /* Save a large LOV EA into the request buffer so that it is available
218 * for replay. We don't do this in the initial request because the
219 * original request doesn't need this buffer (at most it sends just the
220 * lov_mds_md) and it is a waste of RAM/bandwidth to send the empty
221 * buffer and may also be difficult to allocate and save a very large
222 * request buffer for each open. (bug 5707)
224 * OOM here may cause recovery failure if lmm is needed (only for the
225 * original open if the MDS crashed just when this client also OOM'd)
226 * but this is incredibly unlikely, and questionable whether the client
227 * could do MDS recovery under OOM anyways... */
228 static void mdc_realloc_openmsg(struct ptlrpc_request *req,
229 struct mdt_body *body, int size[9])
234 rc = sptlrpc_cli_enlarge_reqbuf(req, DLM_INTENT_REC_OFF + 4,
237 CERROR("Can't enlarge segment %d size to %d\n",
238 DLM_INTENT_REC_OFF + 4, body->eadatasize);
239 body->valid &= ~OBD_MD_FLEASIZE;
240 body->eadatasize = 0;
245 /* We always reserve enough space in the reply packet for a stripe MD, because
246 * we don't know in advance the file type. */
247 int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
248 struct lookup_intent *it, struct md_op_data *op_data,
249 struct lustre_handle *lockh, void *lmm, int lmmsize,
250 int extra_lock_flags)
252 struct ptlrpc_request *req;
253 struct obd_device *obddev = class_exp2obd(exp);
254 struct ldlm_res_id res_id =
255 { .name = {fid_seq(&op_data->op_fid1),
256 fid_oid(&op_data->op_fid1),
257 fid_ver(&op_data->op_fid1)} };
258 ldlm_policy_data_t policy = { .l_inodebits = { MDS_INODELOCK_LOOKUP } };
259 struct ldlm_request *lockreq;
260 struct ldlm_intent *lit;
261 struct ldlm_reply *lockrep;
262 int size[9] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
263 [DLM_LOCKREQ_OFF] = sizeof(*lockreq),
264 [DLM_INTENT_IT_OFF] = sizeof(*lit),
266 int repsize[7] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
267 [DLM_LOCKREPLY_OFF] = sizeof(*lockrep),
268 [DLM_REPLY_REC_OFF] = sizeof(struct mdt_body),
269 [DLM_REPLY_REC_OFF+1] = obddev->u.cli.
272 int flags = extra_lock_flags | LDLM_FL_HAS_INTENT;
273 int repbufcnt = 4, rc;
276 LASSERTF(einfo->ei_type == LDLM_IBITS,"lock type %d\n", einfo->ei_type);
278 if (it->it_op & IT_OPEN) {
279 int do_join = (!!(it->it_flags & O_JOIN_FILE) &&
281 CFS_LIST_HEAD(cancels);
285 it->it_create_mode = (it->it_create_mode & ~S_IFMT) | S_IFREG;
287 size[DLM_INTENT_REC_OFF] = sizeof(struct mdt_rec_create);
288 /* parent capability */
289 size[DLM_INTENT_REC_OFF + 1] = op_data->op_capa1 ?
290 sizeof(struct lustre_capa) : 0;
291 /* child capability, used for replay only */
292 size[DLM_INTENT_REC_OFF + 2] = sizeof(struct lustre_capa);
293 size[DLM_INTENT_REC_OFF + 3] = op_data->op_namelen + 1;
294 /* As an optimization, we allocate an RPC request buffer for
295 * at least a default-sized LOV EA even if we aren't sending
298 size[DLM_INTENT_REC_OFF + 4] = max(lmmsize,
299 obddev->u.cli.cl_default_mds_easize);
301 /* XXX: openlock is not cancelled for cross-refs. */
302 /* If inode is known, cancel conflicting OPEN locks. */
303 if (fid_is_sane(&op_data->op_fid2)) {
304 if (it->it_flags & (FMODE_WRITE|MDS_OPEN_TRUNC))
307 else if (it->it_flags & FMODE_EXEC)
312 count = mdc_resource_get_unused(exp, &op_data->op_fid2,
317 /* If CREATE or JOIN_FILE, cancel parent's UPDATE lock. */
318 if (it->it_op & IT_CREAT || do_join)
322 count += mdc_resource_get_unused(exp, &op_data->op_fid1,
324 MDS_INODELOCK_UPDATE);
327 size[DLM_INTENT_REC_OFF + 5] =
328 sizeof(struct mdt_rec_join);
330 it->it_flags &= ~O_JOIN_FILE;
332 req = ldlm_prep_enqueue_req(exp, 8 + do_join, size, &cancels,
338 /* join is like an unlink of the tail */
339 policy.l_inodebits.bits = MDS_INODELOCK_UPDATE;
340 mdc_join_pack(req, DLM_INTENT_REC_OFF + 5, op_data,
341 (*(__u64 *)op_data->op_data));
344 spin_lock(&req->rq_lock);
346 spin_unlock(&req->rq_lock);
348 /* pack the intent */
349 lit = lustre_msg_buf(req->rq_reqmsg, DLM_INTENT_IT_OFF,
351 lit->opc = (__u64)it->it_op;
353 /* pack the intended request */
354 mdc_open_pack(req, DLM_INTENT_REC_OFF, op_data,
355 it->it_create_mode, 0, it->it_flags,
358 /* for remote client, fetch remote perm for current user */
359 repsize[repbufcnt++] = client_is_remote(exp) ?
360 sizeof(struct mdt_remote_perm) :
361 LUSTRE_POSIX_ACL_MAX_SIZE;
362 repsize[repbufcnt++] = sizeof(struct lustre_capa);
363 repsize[repbufcnt++] = sizeof(struct lustre_capa);
364 } else if (it->it_op & IT_UNLINK) {
365 size[DLM_INTENT_REC_OFF] = sizeof(struct mdt_rec_unlink);
366 size[DLM_INTENT_REC_OFF + 1] = op_data->op_capa1 ?
367 sizeof(struct lustre_capa) : 0;
368 size[DLM_INTENT_REC_OFF + 2] = op_data->op_namelen + 1;
369 policy.l_inodebits.bits = MDS_INODELOCK_UPDATE;
370 req = ldlm_prep_enqueue_req(exp, 6, size, NULL, 0);
374 /* pack the intent */
375 lit = lustre_msg_buf(req->rq_reqmsg, DLM_INTENT_IT_OFF,
377 lit->opc = (__u64)it->it_op;
379 /* pack the intended request */
380 mdc_unlink_pack(req, DLM_INTENT_REC_OFF, op_data);
382 repsize[repbufcnt++] = obddev->u.cli.cl_max_mds_cookiesize;
383 } else if (it->it_op & (IT_GETATTR | IT_LOOKUP)) {
384 obd_valid valid = OBD_MD_FLGETATTR | OBD_MD_FLEASIZE |
385 OBD_MD_FLMODEASIZE | OBD_MD_FLDIREA |
386 OBD_MD_FLMDSCAPA | OBD_MD_MEA;
387 valid |= client_is_remote(exp) ? OBD_MD_FLRMTPERM :
389 size[DLM_INTENT_REC_OFF] = sizeof(struct mdt_body);
390 size[DLM_INTENT_REC_OFF + 1] = op_data->op_capa1 ?
391 sizeof(struct lustre_capa) : 0;
392 size[DLM_INTENT_REC_OFF + 2] = op_data->op_namelen + 1;
394 if (it->it_op & IT_GETATTR)
395 policy.l_inodebits.bits = MDS_INODELOCK_UPDATE;
397 req = ldlm_prep_enqueue_req(exp, 6, size, NULL, 0);
401 /* pack the intent */
402 lit = lustre_msg_buf(req->rq_reqmsg, DLM_INTENT_IT_OFF,
404 lit->opc = (__u64)it->it_op;
406 /* pack the intended request */
407 mdc_getattr_pack(req, DLM_INTENT_REC_OFF, valid,
408 it->it_flags, op_data);
410 repsize[repbufcnt++] = client_is_remote(exp) ?
411 sizeof(struct mdt_remote_perm) :
412 LUSTRE_POSIX_ACL_MAX_SIZE;
413 repsize[repbufcnt++] = sizeof(struct lustre_capa);
414 } else if (it->it_op == IT_READDIR) {
415 policy.l_inodebits.bits = MDS_INODELOCK_UPDATE;
416 req = ldlm_prep_enqueue_req(exp, 2, size, NULL, 0);
426 /* get ready for the reply */
427 ptlrpc_req_set_repsize(req, repbufcnt, repsize);
429 /* It is important to obtain rpc_lock first (if applicable), so that
430 * threads that are serialised with rpc_lock are not polluting our
431 * rpcs in flight counter */
432 mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
433 mdc_enter_request(&obddev->u.cli);
434 rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, &policy, &flags, NULL,
436 mdc_exit_request(&obddev->u.cli);
437 mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
439 /* Similarly, if we're going to replay this request, we don't want to
440 * actually get a lock, just perform the intent. */
441 if (req->rq_transno || req->rq_replay) {
442 lockreq = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF,
444 lockreq->lock_flags |= LDLM_FL_INTENT_ONLY;
447 if (rc == ELDLM_LOCK_ABORTED) {
449 memset(lockh, 0, sizeof(*lockh));
451 } else if (rc != 0) {
452 CERROR("ldlm_cli_enqueue: %d\n", rc);
453 LASSERTF(rc < 0, "rc %d\n", rc);
454 mdc_clear_replay_flag(req, rc);
455 ptlrpc_req_finished(req);
457 } else { /* rc = 0 */
458 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
461 /* If the server gave us back a different lock mode, we should
462 * fix up our variables. */
463 if (lock->l_req_mode != einfo->ei_mode) {
464 ldlm_lock_addref(lockh, lock->l_req_mode);
465 ldlm_lock_decref(lockh, einfo->ei_mode);
466 einfo->ei_mode = lock->l_req_mode;
471 lockrep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF,
473 LASSERT(lockrep != NULL); /* checked by ldlm_cli_enqueue() */
474 /* swabbed by ldlm_cli_enqueue() */
475 LASSERT(lustre_rep_swabbed(req, DLM_LOCKREPLY_OFF));
477 it->d.lustre.it_disposition = (int)lockrep->lock_policy_res1;
478 it->d.lustre.it_status = (int)lockrep->lock_policy_res2;
479 it->d.lustre.it_lock_mode = einfo->ei_mode;
480 it->d.lustre.it_data = req;
482 if (it->d.lustre.it_status < 0 && req->rq_replay)
483 mdc_clear_replay_flag(req, it->d.lustre.it_status);
485 /* If we're doing an IT_OPEN which did not result in an actual
486 * successful open, then we need to remove the bit which saves
487 * this request for unconditional replay.
489 * It's important that we do this first! Otherwise we might exit the
490 * function without doing so, and try to replay a failed create
492 if (it->it_op & IT_OPEN && req->rq_replay &&
493 (!it_disposition(it, DISP_OPEN_OPEN) ||it->d.lustre.it_status != 0))
494 mdc_clear_replay_flag(req, it->d.lustre.it_status);
496 DEBUG_REQ(D_RPCTRACE, req, "op: %d disposition: %x, status: %d",
497 it->it_op,it->d.lustre.it_disposition,it->d.lustre.it_status);
499 /* We know what to expect, so we do any byte flipping required here */
500 LASSERT(repbufcnt == 7 || repbufcnt == 6 || repbufcnt == 2);
501 if (repbufcnt >= 6) {
502 int reply_off = DLM_REPLY_REC_OFF;
503 struct mdt_body *body;
505 body = lustre_swab_repbuf(req, reply_off++, sizeof(*body),
506 lustre_swab_mdt_body);
508 CERROR ("Can't swab mdt_body\n");
512 if (req->rq_replay && it_disposition(it, DISP_OPEN_OPEN) &&
513 !it_open_error(DISP_OPEN_OPEN, it)) {
515 * If this is a successful OPEN request, we need to set
516 * replay handler and data early, so that if replay
517 * happens immediately after swabbing below, new reply
518 * is swabbed by that handler correctly.
520 mdc_set_open_replay_data(NULL, NULL, req);
523 if ((body->valid & (OBD_MD_FLDIREA | OBD_MD_FLEASIZE)) != 0) {
527 * The eadata is opaque; just check that it is there.
528 * Eventually, obd_unpackmd() will check the contents.
530 eadata = lustre_swab_repbuf(req, reply_off++,
531 body->eadatasize, NULL);
532 if (eadata == NULL) {
533 CERROR("Missing/short eadata\n");
536 if (body->valid & OBD_MD_FLMODEASIZE) {
537 if (obddev->u.cli.cl_max_mds_easize <
539 obddev->u.cli.cl_max_mds_easize =
541 CDEBUG(D_INFO, "maxeasize become %d\n",
544 if (obddev->u.cli.cl_max_mds_cookiesize <
545 body->max_cookiesize) {
546 obddev->u.cli.cl_max_mds_cookiesize =
547 body->max_cookiesize;
548 CDEBUG(D_INFO, "cookiesize become %d\n",
549 body->max_cookiesize);
554 * We save the reply LOV EA in case we have to replay a
555 * create for recovery. If we didn't allocate a large
556 * enough request buffer above we need to reallocate it
557 * here to hold the actual LOV EA.
559 * To not save LOV EA if request is not going to replay
560 * (for example error one).
562 if ((it->it_op & IT_OPEN) && req->rq_replay) {
563 if (lustre_msg_buflen(req->rq_reqmsg,
564 DLM_INTENT_REC_OFF + 4) <
566 mdc_realloc_openmsg(req, body, size);
568 lmm = lustre_msg_buf(req->rq_reqmsg,
569 DLM_INTENT_REC_OFF + 4,
572 memcpy(lmm, eadata, body->eadatasize);
575 if (body->valid & OBD_MD_FLRMTPERM) {
576 struct mdt_remote_perm *perm;
578 LASSERT(client_is_remote(exp));
579 perm = lustre_swab_repbuf(req, reply_off++,
581 lustre_swab_mdt_remote_perm);
583 CERROR("missing remote permission!\n");
586 } else if ((body->valid & OBD_MD_FLACL) && body->aclsize) {
589 if (body->valid & OBD_MD_FLMDSCAPA) {
590 struct lustre_capa *capa, *p;
592 capa = lustre_unpack_capa(req->rq_repmsg, reply_off++);
594 CERROR("Missing/short MDS capability\n");
598 if (it->it_op & IT_OPEN) {
599 /* client fid capa will be checked in replay */
600 p = lustre_msg_buf(req->rq_reqmsg,
601 DLM_INTENT_REC_OFF + 2,
607 if (body->valid & OBD_MD_FLOSSCAPA) {
608 struct lustre_capa *capa;
610 capa = lustre_unpack_capa(req->rq_repmsg, reply_off++);
612 CERROR("Missing/short OSS capability\n");
621 * This long block is all about fixing up the lock and request state
622 * so that it is correct as of the moment _before_ the operation was
623 * applied; that way, the VFS will think that everything is normal and
624 * call Lustre's regular VFS methods.
626 * If we're performing a creation, that means that unless the creation
627 * failed with EEXIST, we should fake up a negative dentry.
629 * For everything else, we want to lookup to succeed.
631 * One additional note: if CREATE or OPEN succeeded, we add an extra
632 * reference to the request because we need to keep it around until
633 * ll_create/ll_open gets called.
635 * The server will return to us, in it_disposition, an indication of
636 * exactly what d.lustre.it_status refers to.
638 * If DISP_OPEN_OPEN is set, then d.lustre.it_status refers to the open() call,
639 * otherwise if DISP_OPEN_CREATE is set, then it status is the
640 * creation failure mode. In either case, one of DISP_LOOKUP_NEG or
641 * DISP_LOOKUP_POS will be set, indicating whether the child lookup
644 * Else, if DISP_LOOKUP_EXECD then d.lustre.it_status is the rc of the
647 int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
648 void *lmm, int lmmsize, struct lookup_intent *it,
649 int lookup_flags, struct ptlrpc_request **reqp,
650 ldlm_blocking_callback cb_blocking,
651 int extra_lock_flags)
653 struct ptlrpc_request *request;
654 struct lustre_handle old_lock;
655 struct lustre_handle lockh;
656 struct mdt_body *mdt_body;
657 struct ldlm_lock *lock;
662 CDEBUG(D_DLMTRACE, "(name: %.*s,"DFID") in obj "DFID
663 ", intent: %s flags %#o\n", op_data->op_namelen,
664 op_data->op_name, PFID(&op_data->op_fid2),
665 PFID(&op_data->op_fid1), ldlm_it2str(it->it_op),
668 if (fid_is_sane(&op_data->op_fid2) &&
669 (it->it_op & (IT_LOOKUP | IT_GETATTR))) {
670 /* We could just return 1 immediately, but since we should only
671 * be called in revalidate_it if we already have a lock, let's
673 ldlm_policy_data_t policy;
676 /* As not all attributes are kept under update lock, e.g.
677 owner/group/acls are under lookup lock, we need both
678 ibits for GETATTR. */
680 /* For CMD, UPDATE lock and LOOKUP lock can not be got
681 * at the same for cross-object, so we can not match
682 * the 2 lock at the same time FIXME: but how to handle
683 * the above situation */
684 policy.l_inodebits.bits = (it->it_op == IT_GETATTR) ?
685 MDS_INODELOCK_UPDATE : MDS_INODELOCK_LOOKUP;
687 mode = mdc_lock_match(exp, LDLM_FL_BLOCK_GRANTED,
688 &op_data->op_fid2, LDLM_IBITS, &policy,
689 LCK_CR|LCK_CW|LCK_PR|LCK_PW, &lockh);
691 memcpy(&it->d.lustre.it_lock_handle, &lockh,
693 it->d.lustre.it_lock_mode = mode;
696 /* Only return failure if it was not GETATTR by cfid
697 (from inode_revalidate) */
698 if (mode || op_data->op_namelen != 0)
702 /* lookup_it may be called only after revalidate_it has run, because
703 * revalidate_it cannot return errors, only zero. Returning zero causes
704 * this call to lookup, which *can* return an error.
706 * We only want to execute the request associated with the intent one
707 * time, however, so don't send the request again. Instead, skip past
708 * this and use the request from revalidate. In this case, revalidate
709 * never dropped its reference, so the refcounts are all OK */
710 if (!it_disposition(it, DISP_ENQ_COMPLETE)) {
711 struct ldlm_enqueue_info einfo =
712 { LDLM_IBITS, it_to_lock_mode(it), cb_blocking,
713 ldlm_completion_ast, NULL, NULL };
715 /* For case if upper layer did not alloc fid, do it now. */
716 if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) {
717 rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
719 CERROR("Can't alloc new fid, rc %d\n", rc);
723 rc = mdc_enqueue(exp, &einfo, it, op_data, &lockh,
724 lmm, lmmsize, extra_lock_flags);
727 memcpy(&it->d.lustre.it_lock_handle, &lockh, sizeof(lockh));
728 } else if (!fid_is_sane(&op_data->op_fid2) ||
729 !(it->it_flags & O_CHECK_STALE)) {
730 /* DISP_ENQ_COMPLETE set means there is extra reference on
731 * request referenced from this intent, saved for subsequent
732 * lookup. This path is executed when we proceed to this
733 * lookup, so we clear DISP_ENQ_COMPLETE */
734 it_clear_disposition(it, DISP_ENQ_COMPLETE);
736 request = *reqp = it->d.lustre.it_data;
737 LASSERT(request != NULL);
738 LASSERT(request != LP_POISON);
739 LASSERT(request->rq_repmsg != LP_POISON);
741 if (!it_disposition(it, DISP_IT_EXECD)) {
742 /* The server failed before it even started executing the
743 * intent, i.e. because it couldn't unpack the request. */
744 LASSERT(it->d.lustre.it_status != 0);
745 RETURN(it->d.lustre.it_status);
747 rc = it_open_error(DISP_IT_EXECD, it);
751 mdt_body = lustre_msg_buf(request->rq_repmsg, DLM_REPLY_REC_OFF,
753 /* mdc_enqueue checked */
754 LASSERT(mdt_body != NULL);
755 /* mdc_enqueue swabbed */
756 LASSERT(lustre_rep_swabbed(request, 1));
758 /* If we were revalidating a fid/name pair, mark the intent in
759 * case we fail and get called again from lookup */
760 if (fid_is_sane(&op_data->op_fid2) && (it->it_flags & O_CHECK_STALE) &&
761 (it->it_op != IT_GETATTR)) {
762 it_set_disposition(it, DISP_ENQ_COMPLETE);
764 /* Also: did we find the same inode? */
765 if (!lu_fid_eq(&op_data->op_fid2, &mdt_body->fid1))
769 rc = it_open_error(DISP_LOOKUP_EXECD, it);
773 /* keep requests around for the multiple phases of the call
774 * this shows the DISP_XX must guarantee we make it into the call
776 if (!it_disposition(it, DISP_ENQ_CREATE_REF) &&
777 it_disposition(it, DISP_OPEN_CREATE) &&
778 !it_open_error(DISP_OPEN_CREATE, it)) {
779 it_set_disposition(it, DISP_ENQ_CREATE_REF);
780 ptlrpc_request_addref(request); /* balanced in ll_create_node */
782 if (!it_disposition(it, DISP_ENQ_OPEN_REF) &&
783 it_disposition(it, DISP_OPEN_OPEN) &&
784 !it_open_error(DISP_OPEN_OPEN, it)) {
785 it_set_disposition(it, DISP_ENQ_OPEN_REF);
786 ptlrpc_request_addref(request); /* balanced in ll_file_open */
787 /* BUG 11546 - eviction in the middle of open rpc processing */
788 OBD_FAIL_TIMEOUT(OBD_FAIL_MDC_ENQUEUE_PAUSE, obd_timeout);
791 if (it->it_op & IT_CREAT) {
792 /* XXX this belongs in ll_create_it */
793 } else if (it->it_op == IT_OPEN) {
794 LASSERT(!it_disposition(it, DISP_OPEN_CREATE));
796 LASSERT(it->it_op & (IT_GETATTR | IT_LOOKUP));
799 /* If we already have a matching lock, then cancel the new
800 * one. We have to set the data here instead of in
801 * mdc_enqueue, because we need to use the child's inode as
802 * the l_ast_data to match, and that's not available until
803 * intent_finish has performed the iget().) */
804 lock = ldlm_handle2lock(&lockh);
806 ldlm_policy_data_t policy = lock->l_policy_data;
807 LDLM_DEBUG(lock, "matching against this");
809 LASSERTF(fid_res_name_eq(&mdt_body->fid1,
810 &lock->l_resource->lr_name),
811 "Lock res_id: %lu/%lu/%lu, fid: %lu/%lu/%lu.\n",
812 (unsigned long)lock->l_resource->lr_name.name[0],
813 (unsigned long)lock->l_resource->lr_name.name[1],
814 (unsigned long)lock->l_resource->lr_name.name[2],
815 (unsigned long)fid_seq(&mdt_body->fid1),
816 (unsigned long)fid_oid(&mdt_body->fid1),
817 (unsigned long)fid_ver(&mdt_body->fid1));
820 memcpy(&old_lock, &lockh, sizeof(lockh));
821 if (ldlm_lock_match(NULL, LDLM_FL_BLOCK_GRANTED, NULL,
822 LDLM_IBITS, &policy, LCK_NL, &old_lock)) {
823 ldlm_lock_decref_and_cancel(&lockh,
824 it->d.lustre.it_lock_mode);
825 memcpy(&lockh, &old_lock, sizeof(old_lock));
826 memcpy(&it->d.lustre.it_lock_handle, &lockh,
830 CDEBUG(D_DENTRY,"D_IT dentry %.*s intent: %s status %d disp %x rc %d\n",
831 op_data->op_namelen, op_data->op_name, ldlm_it2str(it->it_op),
832 it->d.lustre.it_status, it->d.lustre.it_disposition, rc);