1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001-2003 Cluster File Systems, Inc.
6 * This file is part of the Lustre file system, http://www.lustre.org
7 * Lustre is a trademark of Cluster File Systems, Inc.
9 * You may have signed or agreed to another license before downloading
10 * this software. If so, you are bound by the terms and conditions
11 * of that agreement, and the following does not apply to you. See the
12 * LICENSE file included with this distribution for more information.
14 * If you did not agree to a different license, then this copy of Lustre
15 * is open source software; you can redistribute it and/or modify it
16 * under the terms of version 2 of the GNU General Public License as
17 * published by the Free Software Foundation.
19 * In either case, Lustre is distributed in the hope that it will be
20 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
21 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * license text for more details.
26 # define EXPORT_SYMTAB
28 #define DEBUG_SUBSYSTEM S_MDC
31 # include <linux/module.h>
32 # include <linux/pagemap.h>
33 # include <linux/miscdevice.h>
34 # include <linux/init.h>
36 # include <liblustre.h>
39 #include <linux/lustre_acl.h>
40 #include <obd_class.h>
41 #include <lustre_dlm.h>
42 /* fid_res_name_eq() */
43 #include <lustre_fid.h>
44 #include <lprocfs_status.h>
45 #include "mdc_internal.h"
47 int it_disposition(struct lookup_intent *it, int flag)
49 return it->d.lustre.it_disposition & flag;
51 EXPORT_SYMBOL(it_disposition);
53 void it_set_disposition(struct lookup_intent *it, int flag)
55 it->d.lustre.it_disposition |= flag;
57 EXPORT_SYMBOL(it_set_disposition);
59 void it_clear_disposition(struct lookup_intent *it, int flag)
61 it->d.lustre.it_disposition &= ~flag;
63 EXPORT_SYMBOL(it_clear_disposition);
65 static int it_to_lock_mode(struct lookup_intent *it)
69 /* CREAT needs to be tested before open (both could be set) */
70 if (it->it_op & IT_CREAT)
72 else if (it->it_op & (IT_READDIR | IT_GETATTR | IT_OPEN | IT_LOOKUP))
79 int it_open_error(int phase, struct lookup_intent *it)
81 if (it_disposition(it, DISP_OPEN_OPEN)) {
82 if (phase >= DISP_OPEN_OPEN)
83 return it->d.lustre.it_status;
88 if (it_disposition(it, DISP_OPEN_CREATE)) {
89 if (phase >= DISP_OPEN_CREATE)
90 return it->d.lustre.it_status;
95 if (it_disposition(it, DISP_LOOKUP_EXECD)) {
96 if (phase >= DISP_LOOKUP_EXECD)
97 return it->d.lustre.it_status;
102 if (it_disposition(it, DISP_IT_EXECD)) {
103 if (phase >= DISP_IT_EXECD)
104 return it->d.lustre.it_status;
108 CERROR("it disp: %X, status: %d\n", it->d.lustre.it_disposition,
109 it->d.lustre.it_status);
113 EXPORT_SYMBOL(it_open_error);
115 /* this must be called on a lockh that is known to have a referenced lock */
116 int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data)
118 struct ldlm_lock *lock;
126 lock = ldlm_handle2lock((struct lustre_handle *)lockh);
128 LASSERT(lock != NULL);
129 lock_res_and_lock(lock);
131 if (lock->l_ast_data && lock->l_ast_data != data) {
132 struct inode *new_inode = data;
133 struct inode *old_inode = lock->l_ast_data;
134 LASSERTF(old_inode->i_state & I_FREEING,
135 "Found existing inode %p/%lu/%u state %lu in lock: "
136 "setting data to %p/%lu/%u\n", old_inode,
137 old_inode->i_ino, old_inode->i_generation,
139 new_inode, new_inode->i_ino, new_inode->i_generation);
142 lock->l_ast_data = data;
143 unlock_res_and_lock(lock);
149 ldlm_mode_t mdc_lock_match(struct obd_export *exp, int flags,
150 const struct lu_fid *fid, ldlm_type_t type,
151 ldlm_policy_data_t *policy, ldlm_mode_t mode,
152 struct lustre_handle *lockh)
154 struct ldlm_res_id res_id =
155 { .name = {fid_seq(fid),
161 rc = ldlm_lock_match(class_exp2obd(exp)->obd_namespace, flags,
162 &res_id, type, policy, mode, lockh);
166 int mdc_cancel_unused(struct obd_export *exp,
167 const struct lu_fid *fid,
168 ldlm_policy_data_t *policy,
169 ldlm_mode_t mode, int flags, void *opaque)
171 struct ldlm_res_id res_id =
172 { .name = {fid_seq(fid),
175 struct obd_device *obd = class_exp2obd(exp);
180 rc = ldlm_cli_cancel_unused_resource(obd->obd_namespace, &res_id,
181 policy, mode, flags, opaque);
185 int mdc_change_cbdata(struct obd_export *exp,
186 const struct lu_fid *fid,
187 ldlm_iterator_t it, void *data)
189 struct ldlm_res_id res_id = { .name = {0} };
192 res_id.name[0] = fid_seq(fid);
193 res_id.name[1] = fid_oid(fid);
194 res_id.name[2] = fid_ver(fid);
196 ldlm_resource_iterate(class_exp2obd(exp)->obd_namespace,
203 static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc)
205 /* Don't hold error requests for replay. */
206 if (req->rq_replay) {
207 spin_lock(&req->rq_lock);
209 spin_unlock(&req->rq_lock);
211 if (rc && req->rq_transno != 0) {
212 DEBUG_REQ(D_ERROR, req, "transno returned on error rc %d", rc);
217 /* Save a large LOV EA into the request buffer so that it is available
218 * for replay. We don't do this in the initial request because the
219 * original request doesn't need this buffer (at most it sends just the
220 * lov_mds_md) and it is a waste of RAM/bandwidth to send the empty
221 * buffer and may also be difficult to allocate and save a very large
222 * request buffer for each open. (bug 5707)
224 * OOM here may cause recovery failure if lmm is needed (only for the
225 * original open if the MDS crashed just when this client also OOM'd)
226 * but this is incredibly unlikely, and questionable whether the client
227 * could do MDS recovery under OOM anyways... */
228 static void mdc_realloc_openmsg(struct ptlrpc_request *req,
229 struct mdt_body *body)
233 /* FIXME: remove this explicit offset. */
234 rc = sptlrpc_cli_enlarge_reqbuf(req, DLM_INTENT_REC_OFF + 4,
237 CERROR("Can't enlarge segment %d size to %d\n",
238 DLM_INTENT_REC_OFF + 4, body->eadatasize);
239 body->valid &= ~OBD_MD_FLEASIZE;
240 body->eadatasize = 0;
244 static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
245 struct lookup_intent *it,
246 struct md_op_data *op_data,
247 void *lmm, int lmmsize,
250 struct ptlrpc_request *req;
251 struct obd_device *obddev = class_exp2obd(exp);
252 struct ldlm_intent *lit;
253 int joinfile = !!((it->it_flags & O_JOIN_FILE) &&
255 CFS_LIST_HEAD(cancels);
261 it->it_create_mode = (it->it_create_mode & ~S_IFMT) | S_IFREG;
263 /* XXX: openlock is not cancelled for cross-refs. */
264 /* If inode is known, cancel conflicting OPEN locks. */
265 if (fid_is_sane(&op_data->op_fid2)) {
266 if (it->it_flags & (FMODE_WRITE|MDS_OPEN_TRUNC))
269 else if (it->it_flags & FMODE_EXEC)
274 count = mdc_resource_get_unused(exp, &op_data->op_fid2,
279 /* If CREATE or JOIN_FILE, cancel parent's UPDATE lock. */
280 if (it->it_op & IT_CREAT || joinfile)
284 count += mdc_resource_get_unused(exp, &op_data->op_fid1,
286 MDS_INODELOCK_UPDATE);
288 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
289 &RQF_LDLM_INTENT_OPEN);
291 ldlm_lock_list_put(&cancels, l_bl_ast, count);
292 RETURN(ERR_PTR(-ENOMEM));
295 /* parent capability */
296 mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
297 /* child capability, reserve the size according to parent capa, it will
298 * be filled after we get the reply */
299 mdc_set_capa_size(req, &RMF_CAPA2, op_data->op_capa1);
301 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
302 op_data->op_namelen + 1);
303 req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT,
304 max(lmmsize, obddev->u.cli.cl_default_mds_easize));
306 req_capsule_set_size(&req->rq_pill, &RMF_REC_JOINFILE,
310 if (exp_connect_cancelset(exp) && count) {
311 req_capsule_set_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT,
312 ldlm_request_bufsize(count, LDLM_ENQUEUE));
315 rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
317 ptlrpc_request_free(req);
318 ldlm_lock_list_put(&cancels, l_bl_ast, count);
321 if (exp_connect_cancelset(exp) && req)
322 ldlm_cli_cancel_list(&cancels, count, req, 0);
325 __u64 head_size = *(__u64 *)op_data->op_data;
326 mdc_join_pack(req, op_data, head_size);
329 spin_lock(&req->rq_lock);
331 spin_unlock(&req->rq_lock);
333 /* pack the intent */
334 lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
335 lit->opc = (__u64)it->it_op;
337 /* pack the intended request */
338 mdc_open_pack(req, op_data, it->it_create_mode, 0, it->it_flags, lmm,
341 /* for remote client, fetch remote perm for current user */
342 if (client_is_remote(exp))
343 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
344 sizeof(struct mdt_remote_perm));
345 ptlrpc_request_set_replen(req);
349 static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp,
350 struct lookup_intent *it,
351 struct md_op_data *op_data)
353 struct ptlrpc_request *req;
354 struct obd_device *obddev = class_exp2obd(exp);
355 struct ldlm_intent *lit;
359 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
360 &RQF_LDLM_INTENT_UNLINK);
362 RETURN(ERR_PTR(-ENOMEM));
364 mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
365 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
366 op_data->op_namelen + 1);
368 rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
370 ptlrpc_request_free(req);
374 /* pack the intent */
375 lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
376 lit->opc = (__u64)it->it_op;
378 /* pack the intended request */
379 mdc_unlink_pack(req, op_data);
381 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
382 obddev->u.cli.cl_max_mds_easize);
383 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
384 obddev->u.cli.cl_max_mds_cookiesize);
385 ptlrpc_request_set_replen(req);
389 static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp,
390 struct lookup_intent *it,
391 struct md_op_data *op_data)
393 struct ptlrpc_request *req;
394 struct obd_device *obddev = class_exp2obd(exp);
395 obd_valid valid = OBD_MD_FLGETATTR | OBD_MD_FLEASIZE |
396 OBD_MD_FLMODEASIZE | OBD_MD_FLDIREA |
397 OBD_MD_FLMDSCAPA | OBD_MD_MEA |
398 (client_is_remote(exp) ?
399 OBD_MD_FLRMTPERM : OBD_MD_FLACL);
400 struct ldlm_intent *lit;
404 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
405 &RQF_LDLM_INTENT_GETATTR);
407 RETURN(ERR_PTR(-ENOMEM));
409 mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
410 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
411 op_data->op_namelen + 1);
413 rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
415 ptlrpc_request_free(req);
419 /* pack the intent */
420 lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
421 lit->opc = (__u64)it->it_op;
423 /* pack the intended request */
424 mdc_getattr_pack(req, valid, it->it_flags, op_data);
426 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
427 obddev->u.cli.cl_max_mds_easize);
428 if (client_is_remote(exp))
429 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
430 sizeof(struct mdt_remote_perm));
431 ptlrpc_request_set_replen(req);
435 static struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp)
437 struct ptlrpc_request *req;
440 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
441 &RQF_LDLM_ENQUEUE, LUSTRE_DLM_VERSION,
444 RETURN(ERR_PTR(-ENOMEM));
446 ptlrpc_request_set_replen(req);
450 /* We always reserve enough space in the reply packet for a stripe MD, because
451 * we don't know in advance the file type. */
452 int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
453 struct lookup_intent *it, struct md_op_data *op_data,
454 struct lustre_handle *lockh, void *lmm, int lmmsize,
455 int extra_lock_flags)
457 struct obd_device *obddev = class_exp2obd(exp);
458 struct ptlrpc_request *req;
459 struct req_capsule *pill;
460 struct ldlm_request *lockreq;
461 struct ldlm_reply *lockrep;
462 int flags = extra_lock_flags | LDLM_FL_HAS_INTENT;
464 struct ldlm_res_id res_id =
465 { .name = {fid_seq(&op_data->op_fid1),
466 fid_oid(&op_data->op_fid1),
467 fid_ver(&op_data->op_fid1)} };
468 ldlm_policy_data_t policy = { .l_inodebits = { MDS_INODELOCK_LOOKUP } };
471 LASSERTF(einfo->ei_type == LDLM_IBITS,"lock type %d\n", einfo->ei_type);
473 if (it->it_op & (IT_UNLINK | IT_GETATTR | IT_READDIR))
474 policy.l_inodebits.bits = MDS_INODELOCK_UPDATE;
476 if (it->it_op & IT_OPEN) {
477 int joinfile = !!((it->it_flags & O_JOIN_FILE) &&
480 req = mdc_intent_open_pack(exp, it, op_data, lmm, lmmsize,
483 policy.l_inodebits.bits = MDS_INODELOCK_UPDATE;
484 einfo->ei_cbdata = NULL;
487 it->it_flags &= ~O_JOIN_FILE;
488 } else if (it->it_op & IT_UNLINK)
489 req = mdc_intent_unlink_pack(exp, it, op_data);
490 else if (it->it_op & (IT_GETATTR | IT_LOOKUP))
491 req = mdc_intent_getattr_pack(exp, it, op_data);
492 else if (it->it_op == IT_READDIR)
493 req = ldlm_enqueue_pack(exp);
500 RETURN(PTR_ERR(req));
501 pill = &req->rq_pill;
503 /* It is important to obtain rpc_lock first (if applicable), so that
504 * threads that are serialised with rpc_lock are not polluting our
505 * rpcs in flight counter */
506 mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
507 mdc_enter_request(&obddev->u.cli);
508 rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, &policy, &flags, NULL,
510 mdc_exit_request(&obddev->u.cli);
511 mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
513 /* Similarly, if we're going to replay this request, we don't want to
514 * actually get a lock, just perform the intent. */
515 if (req->rq_transno || req->rq_replay) {
516 lockreq = req_capsule_client_get(pill, &RMF_DLM_REQ);
517 lockreq->lock_flags |= LDLM_FL_INTENT_ONLY;
520 if (rc == ELDLM_LOCK_ABORTED) {
522 memset(lockh, 0, sizeof(*lockh));
524 } else if (rc != 0) {
525 CERROR("ldlm_cli_enqueue: %d\n", rc);
526 LASSERTF(rc < 0, "rc %d\n", rc);
527 mdc_clear_replay_flag(req, rc);
528 ptlrpc_req_finished(req);
530 } else { /* rc = 0 */
531 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
534 /* If the server gave us back a different lock mode, we should
535 * fix up our variables. */
536 if (lock->l_req_mode != einfo->ei_mode) {
537 ldlm_lock_addref(lockh, lock->l_req_mode);
538 ldlm_lock_decref(lockh, einfo->ei_mode);
539 einfo->ei_mode = lock->l_req_mode;
544 lockrep = req_capsule_server_get(pill, &RMF_DLM_REP);
545 LASSERT(lockrep != NULL); /* checked by ldlm_cli_enqueue() */
547 it->d.lustre.it_disposition = (int)lockrep->lock_policy_res1;
548 it->d.lustre.it_status = (int)lockrep->lock_policy_res2;
549 it->d.lustre.it_lock_mode = einfo->ei_mode;
550 it->d.lustre.it_data = req;
552 if (it->d.lustre.it_status < 0 && req->rq_replay)
553 mdc_clear_replay_flag(req, it->d.lustre.it_status);
555 /* If we're doing an IT_OPEN which did not result in an actual
556 * successful open, then we need to remove the bit which saves
557 * this request for unconditional replay.
559 * It's important that we do this first! Otherwise we might exit the
560 * function without doing so, and try to replay a failed create
562 if (it->it_op & IT_OPEN && req->rq_replay &&
563 (!it_disposition(it, DISP_OPEN_OPEN) ||it->d.lustre.it_status != 0))
564 mdc_clear_replay_flag(req, it->d.lustre.it_status);
566 DEBUG_REQ(D_RPCTRACE, req, "op: %d disposition: %x, status: %d",
567 it->it_op,it->d.lustre.it_disposition,it->d.lustre.it_status);
569 /* We know what to expect, so we do any byte flipping required here */
570 if (it->it_op & (IT_OPEN | IT_UNLINK | IT_LOOKUP | IT_GETATTR)) {
571 struct mdt_body *body;
573 body = req_capsule_server_get(pill, &RMF_MDT_BODY);
575 CERROR ("Can't swab mdt_body\n");
579 if (req->rq_replay && it_disposition(it, DISP_OPEN_OPEN) &&
580 !it_open_error(DISP_OPEN_OPEN, it)) {
582 * If this is a successful OPEN request, we need to set
583 * replay handler and data early, so that if replay
584 * happens immediately after swabbing below, new reply
585 * is swabbed by that handler correctly.
587 mdc_set_open_replay_data(NULL, NULL, req);
590 if ((body->valid & (OBD_MD_FLDIREA | OBD_MD_FLEASIZE)) != 0) {
594 * The eadata is opaque; just check that it is there.
595 * Eventually, obd_unpackmd() will check the contents.
597 eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
602 if (body->valid & OBD_MD_FLMODEASIZE) {
603 if (obddev->u.cli.cl_max_mds_easize <
605 obddev->u.cli.cl_max_mds_easize =
607 CDEBUG(D_INFO, "maxeasize become %d\n",
610 if (obddev->u.cli.cl_max_mds_cookiesize <
611 body->max_cookiesize) {
612 obddev->u.cli.cl_max_mds_cookiesize =
613 body->max_cookiesize;
614 CDEBUG(D_INFO, "cookiesize become %d\n",
615 body->max_cookiesize);
620 * We save the reply LOV EA in case we have to replay a
621 * create for recovery. If we didn't allocate a large
622 * enough request buffer above we need to reallocate it
623 * here to hold the actual LOV EA.
625 * To not save LOV EA if request is not going to replay
626 * (for example error one).
628 if ((it->it_op & IT_OPEN) && req->rq_replay) {
629 if (req_capsule_get_size(pill, &RMF_EADATA,
632 mdc_realloc_openmsg(req, body);
633 req_capsule_set_size(pill, &RMF_EADATA,
637 lmm = req_capsule_client_get(pill, &RMF_EADATA);
639 memcpy(lmm, eadata, body->eadatasize);
643 if (body->valid & OBD_MD_FLRMTPERM) {
644 struct mdt_remote_perm *perm;
646 LASSERT(client_is_remote(exp));
647 perm = req_capsule_server_get(pill, &RMF_ACL);
651 lustre_swab_mdt_remote_perm(perm);
653 if (body->valid & OBD_MD_FLMDSCAPA) {
654 struct lustre_capa *capa, *p;
656 capa = req_capsule_server_get(pill, &RMF_CAPA1);
660 if (it->it_op & IT_OPEN) {
661 /* client fid capa will be checked in replay */
662 p = req_capsule_client_get(pill, &RMF_CAPA2);
667 if (body->valid & OBD_MD_FLOSSCAPA) {
668 struct lustre_capa *capa;
670 capa = req_capsule_server_get(pill, &RMF_CAPA2);
679 * This long block is all about fixing up the lock and request state
680 * so that it is correct as of the moment _before_ the operation was
681 * applied; that way, the VFS will think that everything is normal and
682 * call Lustre's regular VFS methods.
684 * If we're performing a creation, that means that unless the creation
685 * failed with EEXIST, we should fake up a negative dentry.
687 * For everything else, we want to lookup to succeed.
689 * One additional note: if CREATE or OPEN succeeded, we add an extra
690 * reference to the request because we need to keep it around until
691 * ll_create/ll_open gets called.
693 * The server will return to us, in it_disposition, an indication of
694 * exactly what d.lustre.it_status refers to.
696 * If DISP_OPEN_OPEN is set, then d.lustre.it_status refers to the open() call,
697 * otherwise if DISP_OPEN_CREATE is set, then it status is the
698 * creation failure mode. In either case, one of DISP_LOOKUP_NEG or
699 * DISP_LOOKUP_POS will be set, indicating whether the child lookup
702 * Else, if DISP_LOOKUP_EXECD then d.lustre.it_status is the rc of the
705 int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
706 void *lmm, int lmmsize, struct lookup_intent *it,
707 int lookup_flags, struct ptlrpc_request **reqp,
708 ldlm_blocking_callback cb_blocking,
709 int extra_lock_flags)
711 struct ptlrpc_request *request;
712 struct lustre_handle old_lock;
713 struct lustre_handle lockh;
714 struct mdt_body *mdt_body;
715 struct ldlm_lock *lock;
720 CDEBUG(D_DLMTRACE, "(name: %.*s,"DFID") in obj "DFID
721 ", intent: %s flags %#o\n", op_data->op_namelen,
722 op_data->op_name, PFID(&op_data->op_fid2),
723 PFID(&op_data->op_fid1), ldlm_it2str(it->it_op),
726 if (fid_is_sane(&op_data->op_fid2) &&
727 (it->it_op & (IT_LOOKUP | IT_GETATTR))) {
728 /* We could just return 1 immediately, but since we should only
729 * be called in revalidate_it if we already have a lock, let's
731 ldlm_policy_data_t policy;
734 /* As not all attributes are kept under update lock, e.g.
735 owner/group/acls are under lookup lock, we need both
736 ibits for GETATTR. */
738 /* For CMD, UPDATE lock and LOOKUP lock can not be got
739 * at the same for cross-object, so we can not match
740 * the 2 lock at the same time FIXME: but how to handle
741 * the above situation */
742 policy.l_inodebits.bits = (it->it_op == IT_GETATTR) ?
743 MDS_INODELOCK_UPDATE : MDS_INODELOCK_LOOKUP;
745 mode = mdc_lock_match(exp, LDLM_FL_BLOCK_GRANTED,
746 &op_data->op_fid2, LDLM_IBITS, &policy,
747 LCK_CR|LCK_CW|LCK_PR|LCK_PW, &lockh);
749 memcpy(&it->d.lustre.it_lock_handle, &lockh,
751 it->d.lustre.it_lock_mode = mode;
754 /* Only return failure if it was not GETATTR by cfid
755 (from inode_revalidate) */
756 if (mode || op_data->op_namelen != 0)
760 /* lookup_it may be called only after revalidate_it has run, because
761 * revalidate_it cannot return errors, only zero. Returning zero causes
762 * this call to lookup, which *can* return an error.
764 * We only want to execute the request associated with the intent one
765 * time, however, so don't send the request again. Instead, skip past
766 * this and use the request from revalidate. In this case, revalidate
767 * never dropped its reference, so the refcounts are all OK */
768 if (!it_disposition(it, DISP_ENQ_COMPLETE)) {
769 struct ldlm_enqueue_info einfo =
770 { LDLM_IBITS, it_to_lock_mode(it), cb_blocking,
771 ldlm_completion_ast, NULL, NULL };
773 /* For case if upper layer did not alloc fid, do it now. */
774 if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) {
775 rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
777 CERROR("Can't alloc new fid, rc %d\n", rc);
781 rc = mdc_enqueue(exp, &einfo, it, op_data, &lockh,
782 lmm, lmmsize, extra_lock_flags);
785 memcpy(&it->d.lustre.it_lock_handle, &lockh, sizeof(lockh));
786 } else if (!fid_is_sane(&op_data->op_fid2) ||
787 !(it->it_flags & O_CHECK_STALE)) {
788 /* DISP_ENQ_COMPLETE set means there is extra reference on
789 * request referenced from this intent, saved for subsequent
790 * lookup. This path is executed when we proceed to this
791 * lookup, so we clear DISP_ENQ_COMPLETE */
792 it_clear_disposition(it, DISP_ENQ_COMPLETE);
794 request = *reqp = it->d.lustre.it_data;
795 LASSERT(request != NULL);
796 LASSERT(request != LP_POISON);
797 LASSERT(request->rq_repmsg != LP_POISON);
799 if (!it_disposition(it, DISP_IT_EXECD)) {
800 /* The server failed before it even started executing the
801 * intent, i.e. because it couldn't unpack the request. */
802 LASSERT(it->d.lustre.it_status != 0);
803 RETURN(it->d.lustre.it_status);
805 rc = it_open_error(DISP_IT_EXECD, it);
809 mdt_body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
810 LASSERT(mdt_body != NULL); /* mdc_enqueue checked */
812 /* If we were revalidating a fid/name pair, mark the intent in
813 * case we fail and get called again from lookup */
814 if (fid_is_sane(&op_data->op_fid2) &&
815 (it->it_flags & O_CHECK_STALE) &&
816 it->it_op != IT_GETATTR) {
817 it_set_disposition(it, DISP_ENQ_COMPLETE);
819 /* Also: did we find the same inode? */
820 if (!lu_fid_eq(&op_data->op_fid2, &mdt_body->fid1))
824 rc = it_open_error(DISP_LOOKUP_EXECD, it);
828 /* keep requests around for the multiple phases of the call
829 * this shows the DISP_XX must guarantee we make it into the call
831 if (!it_disposition(it, DISP_ENQ_CREATE_REF) &&
832 it_disposition(it, DISP_OPEN_CREATE) &&
833 !it_open_error(DISP_OPEN_CREATE, it)) {
834 it_set_disposition(it, DISP_ENQ_CREATE_REF);
835 ptlrpc_request_addref(request); /* balanced in ll_create_node */
837 if (!it_disposition(it, DISP_ENQ_OPEN_REF) &&
838 it_disposition(it, DISP_OPEN_OPEN) &&
839 !it_open_error(DISP_OPEN_OPEN, it)) {
840 it_set_disposition(it, DISP_ENQ_OPEN_REF);
841 ptlrpc_request_addref(request); /* balanced in ll_file_open */
842 /* BUG 11546 - eviction in the middle of open rpc processing */
843 OBD_FAIL_TIMEOUT(OBD_FAIL_MDC_ENQUEUE_PAUSE, obd_timeout);
846 if (it->it_op & IT_CREAT) {
847 /* XXX this belongs in ll_create_it */
848 } else if (it->it_op == IT_OPEN) {
849 LASSERT(!it_disposition(it, DISP_OPEN_CREATE));
851 LASSERT(it->it_op & (IT_GETATTR | IT_LOOKUP));
854 /* If we already have a matching lock, then cancel the new
855 * one. We have to set the data here instead of in
856 * mdc_enqueue, because we need to use the child's inode as
857 * the l_ast_data to match, and that's not available until
858 * intent_finish has performed the iget().) */
859 lock = ldlm_handle2lock(&lockh);
861 ldlm_policy_data_t policy = lock->l_policy_data;
862 LDLM_DEBUG(lock, "matching against this");
864 LASSERTF(fid_res_name_eq(&mdt_body->fid1,
865 &lock->l_resource->lr_name),
866 "Lock res_id: %lu/%lu/%lu, fid: %lu/%lu/%lu.\n",
867 (unsigned long)lock->l_resource->lr_name.name[0],
868 (unsigned long)lock->l_resource->lr_name.name[1],
869 (unsigned long)lock->l_resource->lr_name.name[2],
870 (unsigned long)fid_seq(&mdt_body->fid1),
871 (unsigned long)fid_oid(&mdt_body->fid1),
872 (unsigned long)fid_ver(&mdt_body->fid1));
875 memcpy(&old_lock, &lockh, sizeof(lockh));
876 if (ldlm_lock_match(NULL, LDLM_FL_BLOCK_GRANTED, NULL,
877 LDLM_IBITS, &policy, LCK_NL, &old_lock)) {
878 ldlm_lock_decref_and_cancel(&lockh,
879 it->d.lustre.it_lock_mode);
880 memcpy(&lockh, &old_lock, sizeof(old_lock));
881 memcpy(&it->d.lustre.it_lock_handle, &lockh,
885 CDEBUG(D_DENTRY,"D_IT dentry %.*s intent: %s status %d disp %x rc %d\n",
886 op_data->op_namelen, op_data->op_name, ldlm_it2str(it->it_op),
887 it->d.lustre.it_status, it->d.lustre.it_disposition, rc);