1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001-2003 Cluster File Systems, Inc.
6 * This file is part of the Lustre file system, http://www.lustre.org
7 * Lustre is a trademark of Cluster File Systems, Inc.
9 * You may have signed or agreed to another license before downloading
10 * this software. If so, you are bound by the terms and conditions
11 * of that agreement, and the following does not apply to you. See the
12 * LICENSE file included with this distribution for more information.
14 * If you did not agree to a different license, then this copy of Lustre
15 * is open source software; you can redistribute it and/or modify it
16 * under the terms of version 2 of the GNU General Public License as
17 * published by the Free Software Foundation.
19 * In either case, Lustre is distributed in the hope that it will be
20 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
21 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * license text for more details.
26 # define EXPORT_SYMTAB
28 #define DEBUG_SUBSYSTEM S_MDC
31 # include <linux/module.h>
32 # include <linux/pagemap.h>
33 # include <linux/miscdevice.h>
34 # include <linux/init.h>
36 # include <liblustre.h>
39 #include <linux/obd_class.h>
40 #include <linux/lustre_mds.h>
41 #include <linux/lustre_dlm.h>
42 #include <linux/lprocfs_status.h>
43 #include "mdc_internal.h"
45 int it_disposition(struct lookup_intent *it, int flag)
47 return it->d.lustre.it_disposition & flag;
49 EXPORT_SYMBOL(it_disposition);
51 void it_set_disposition(struct lookup_intent *it, int flag)
53 it->d.lustre.it_disposition |= flag;
55 EXPORT_SYMBOL(it_set_disposition);
57 void it_clear_disposition(struct lookup_intent *it, int flag)
59 it->d.lustre.it_disposition &= ~flag;
62 static int it_to_lock_mode(struct lookup_intent *it)
64 /* CREAT needs to be tested before open (both could be set) */
65 if (it->it_op & IT_CREAT)
67 else if (it->it_op & (IT_READDIR | IT_GETATTR | IT_OPEN | IT_LOOKUP))
74 int it_open_error(int phase, struct lookup_intent *it)
76 if (it_disposition(it, DISP_OPEN_OPEN)) {
77 if (phase >= DISP_OPEN_OPEN)
78 return it->d.lustre.it_status;
83 if (it_disposition(it, DISP_OPEN_CREATE)) {
84 if (phase >= DISP_OPEN_CREATE)
85 return it->d.lustre.it_status;
90 if (it_disposition(it, DISP_LOOKUP_EXECD)) {
91 if (phase >= DISP_LOOKUP_EXECD)
92 return it->d.lustre.it_status;
97 if (it_disposition(it, DISP_IT_EXECD)) {
98 if (phase >= DISP_IT_EXECD)
99 return it->d.lustre.it_status;
103 CERROR("it disp: %X, status: %d\n", it->d.lustre.it_disposition,
104 it->d.lustre.it_status);
108 EXPORT_SYMBOL(it_open_error);
110 /* this must be called on a lockh that is known to have a referenced lock */
111 void mdc_set_lock_data(__u64 *l, void *data)
113 struct ldlm_lock *lock;
114 struct lustre_handle *lockh = (struct lustre_handle *)l;
122 lock = ldlm_handle2lock(lockh);
124 LASSERT(lock != NULL);
125 l_lock(&lock->l_resource->lr_namespace->ns_lock);
127 if (lock->l_ast_data && lock->l_ast_data != data) {
128 struct inode *new_inode = data;
129 struct inode *old_inode = lock->l_ast_data;
130 LASSERTF(old_inode->i_state & I_FREEING,
131 "Found existing inode %p/%lu/%u state %lu in lock: "
132 "setting data to %p/%lu/%u\n", old_inode,
133 old_inode->i_ino, old_inode->i_generation,
135 new_inode, new_inode->i_ino, new_inode->i_generation);
138 lock->l_ast_data = data;
139 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
144 EXPORT_SYMBOL(mdc_set_lock_data);
146 int mdc_change_cbdata(struct obd_export *exp, struct ll_fid *fid,
147 ldlm_iterator_t it, void *data)
149 struct ldlm_res_id res_id = { .name = {0} };
152 res_id.name[0] = fid->id;
153 res_id.name[1] = fid->generation;
155 ldlm_change_cbdata(class_exp2obd(exp)->obd_namespace, &res_id, it,
162 static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc)
164 /* Don't hold error requests for replay. */
165 if (req->rq_replay) {
166 unsigned long irqflags;
167 spin_lock_irqsave(&req->rq_lock, irqflags);
169 spin_unlock_irqrestore(&req->rq_lock, irqflags);
171 if (rc && req->rq_transno != 0) {
172 DEBUG_REQ(D_ERROR, req, "transno returned on error rc %d", rc);
177 static int round_up(int val)
187 /* Save a large LOV EA into the request buffer so that it is available
188 * for replay. We don't do this in the initial request because the
189 * original request doesn't need this buffer (at most it sends just the
190 * lov_mds_md) and it is a waste of RAM/bandwidth to send the empty
191 * buffer and may also be difficult to allocate and save a very large
192 * request buffer for each open. (bug 5707)
194 * OOM here may cause recovery failure if lmm is needed (only for the
195 * original open if the MDS crashed just when this client also OOM'd)
196 * but this is incredibly unlikely, and questionable whether the client
197 * could do MDS recovery under OOM anyways... */
198 static void mdc_realloc_openmsg(struct ptlrpc_request *req,
199 struct mds_body *body, int size[5])
201 int new_size, old_size;
202 struct lustre_msg *new_msg;
205 old_size = lustre_msg_size(5, size);
207 size[4] = body->eadatasize;
208 new_size = lustre_msg_size(5, size);
209 OBD_ALLOC(new_msg, new_size);
210 if (new_msg != NULL) {
211 struct lustre_msg *old_msg = req->rq_reqmsg;
212 unsigned long irqflags;
214 DEBUG_REQ(D_INFO, req, "replace reqmsg for larger EA %u\n",
216 memcpy(new_msg, old_msg, old_size);
217 new_msg->buflens[4] = body->eadatasize;
219 spin_lock_irqsave(&req->rq_lock, irqflags);
220 req->rq_reqmsg = new_msg;
221 req->rq_reqlen = new_size;
222 spin_unlock_irqrestore(&req->rq_lock, irqflags);
224 OBD_FREE(old_msg, old_size);
226 body->valid &= ~OBD_MD_FLEASIZE;
227 body->eadatasize = 0;
231 /* We always reserve enough space in the reply packet for a stripe MD, because
232 * we don't know in advance the file type. */
233 int mdc_enqueue(struct obd_export *exp,
235 struct lookup_intent *it,
237 struct mdc_op_data *data,
238 struct lustre_handle *lockh,
241 ldlm_completion_callback cb_completion,
242 ldlm_blocking_callback cb_blocking,
243 void *cb_data, int extra_lock_flags)
245 struct ptlrpc_request *req;
246 struct obd_device *obddev = class_exp2obd(exp);
247 struct ldlm_res_id res_id =
248 { .name = {data->fid1.id, data->fid1.generation} };
249 ldlm_policy_data_t policy = { .l_inodebits = { MDS_INODELOCK_LOOKUP } };
250 struct ldlm_request *lockreq;
251 struct ldlm_intent *lit;
252 int size[6] = {[MDS_REQ_INTENT_LOCKREQ_OFF] = sizeof(*lockreq),
253 [MDS_REQ_INTENT_IT_OFF] = sizeof(*lit) };
254 struct ldlm_reply *dlm_rep;
255 int repsize[4] = {sizeof(*dlm_rep),
256 sizeof(struct mds_body),
257 obddev->u.cli.cl_max_mds_easize};
259 unsigned long irqflags;
260 int repbufcnt = 3, req_buffers = 2;
261 int rc, flags = extra_lock_flags | LDLM_FL_HAS_INTENT;
264 LASSERTF(lock_type == LDLM_IBITS, "lock type %d\n", lock_type);
265 // LDLM_DEBUG_NOLOCK("mdsintent=%s,name=%s,dir=%lu",
266 // ldlm_it2str(it->it_op), it_name, it_inode->i_ino);
268 if (it->it_op & IT_OPEN) {
269 it->it_create_mode |= S_IFREG;
271 size[req_buffers++] = sizeof(struct mds_rec_create);
272 size[req_buffers++] = data->namelen + 1;
273 /* As an optimization, we allocate an RPC request buffer for
274 * at least a default-sized LOV EA even if we aren't sending
275 * one. We grow the whole request to the next power-of-two
276 * size since we get that much from a slab allocation anyways.
277 * This avoids an allocation below in the common case where
278 * we need to save a default-sized LOV EA for open replay. */
279 size[req_buffers++] = max(lmmsize,
280 obddev->u.cli.cl_default_mds_easize);
281 rc = lustre_msg_size(req_buffers, size);
283 size[req_buffers - 1] = min(size[req_buffers - 1] +
285 obddev->u.cli.cl_max_mds_easize);
287 if (it->it_flags & O_JOIN_FILE) {
288 __u64 head_size = *(__u32*)cb_data;
289 __u32 tsize = *(__u32*)lmm;
291 /* join is like an unlink of the tail */
292 policy.l_inodebits.bits = MDS_INODELOCK_UPDATE;
293 size[req_buffers++] = sizeof(struct mds_rec_join);
294 req = ptlrpc_prep_req(class_exp2cliimp(exp),
295 LUSTRE_DLM_VERSION, LDLM_ENQUEUE,
296 req_buffers, size, NULL);
297 /* when joining file, cb_data and lmm args together
298 * indicate the head file size*/
299 mdc_join_pack(req, req_buffers - 1, data,
300 (head_size << 32) | tsize);
304 req = ptlrpc_prep_req(class_exp2cliimp(exp),
305 LUSTRE_DLM_VERSION, LDLM_ENQUEUE,
306 req_buffers, size, NULL);
310 spin_lock_irqsave (&req->rq_lock, irqflags);
312 spin_unlock_irqrestore (&req->rq_lock, irqflags);
314 /* pack the intent */
315 lit = lustre_msg_buf(req->rq_reqmsg, MDS_REQ_INTENT_IT_OFF,
317 lit->opc = (__u64)it->it_op;
319 /* pack the intended request */
320 mdc_open_pack(req, MDS_REQ_INTENT_REC_OFF, data,
321 it->it_create_mode, 0,
322 it->it_flags, lmm, lmmsize);
324 repsize[repbufcnt++] = LUSTRE_POSIX_ACL_MAX_SIZE;
325 } else if (it->it_op & IT_UNLINK) {
326 size[req_buffers++] = sizeof(struct mds_rec_unlink);
327 size[req_buffers++] = data->namelen + 1;
328 policy.l_inodebits.bits = MDS_INODELOCK_UPDATE;
329 req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_DLM_VERSION,
330 LDLM_ENQUEUE, req_buffers, size, NULL);
334 /* pack the intent */
335 lit = lustre_msg_buf(req->rq_reqmsg, MDS_REQ_INTENT_IT_OFF,
337 lit->opc = (__u64)it->it_op;
339 /* pack the intended request */
340 mdc_unlink_pack(req, MDS_REQ_INTENT_REC_OFF, data);
341 /* get ready for the reply */
342 repsize[repbufcnt++] = obddev->u.cli.cl_max_mds_cookiesize;
343 } else if (it->it_op & (IT_GETATTR | IT_LOOKUP)) {
344 obd_valid valid = OBD_MD_FLGETATTR | OBD_MD_FLEASIZE |
345 OBD_MD_FLACL | OBD_MD_FLMODEASIZE;
346 size[req_buffers++] = sizeof(struct mds_body);
347 size[req_buffers++] = data->namelen + 1;
349 if (it->it_op & IT_GETATTR)
350 policy.l_inodebits.bits = MDS_INODELOCK_UPDATE;
352 req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_DLM_VERSION,
353 LDLM_ENQUEUE, req_buffers, size, NULL);
357 /* pack the intent */
358 lit = lustre_msg_buf(req->rq_reqmsg, MDS_REQ_INTENT_IT_OFF,
360 lit->opc = (__u64)it->it_op;
362 /* pack the intended request */
363 mdc_getattr_pack(req, MDS_REQ_INTENT_REC_OFF, valid,
365 /* get ready for the reply */
366 repsize[repbufcnt++] = LUSTRE_POSIX_ACL_MAX_SIZE;
367 } else if (it->it_op == IT_READDIR) {
368 policy.l_inodebits.bits = MDS_INODELOCK_UPDATE;
369 req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_DLM_VERSION,
370 LDLM_ENQUEUE, 1, size, NULL);
374 /* get ready for the reply */
381 /* get ready for the reply */
382 req->rq_replen = lustre_msg_size(repbufcnt, repsize);
384 mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
385 rc = ldlm_cli_enqueue(exp, req, obddev->obd_namespace, res_id,
386 lock_type, &policy,lock_mode, &flags, cb_blocking,
387 cb_completion, NULL, cb_data, NULL, 0, NULL,
389 mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
391 /* Similarly, if we're going to replay this request, we don't want to
392 * actually get a lock, just perform the intent. */
393 if (req->rq_transno || req->rq_replay) {
394 lockreq = lustre_msg_buf(req->rq_reqmsg,
395 MDS_REQ_INTENT_LOCKREQ_OFF,
397 lockreq->lock_flags |= LDLM_FL_INTENT_ONLY;
400 /* This can go when we're sure that this can never happen */
401 LASSERT(rc != -ENOENT);
402 if (rc == ELDLM_LOCK_ABORTED) {
404 memset(lockh, 0, sizeof(*lockh));
406 } else if (rc != 0) {
407 CERROR("ldlm_cli_enqueue: %d\n", rc);
408 LASSERTF(rc < 0, "rc %d\n", rc);
409 mdc_clear_replay_flag(req, rc);
410 ptlrpc_req_finished(req);
412 } else { /* rc = 0 */
413 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
416 /* If the server gave us back a different lock mode, we should
417 * fix up our variables. */
418 if (lock->l_req_mode != lock_mode) {
419 ldlm_lock_addref(lockh, lock->l_req_mode);
420 ldlm_lock_decref(lockh, lock_mode);
421 lock_mode = lock->l_req_mode;
424 ldlm_lock_allow_match(lock);
428 dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep));
429 LASSERT(dlm_rep != NULL); /* checked by ldlm_cli_enqueue() */
430 LASSERT_REPSWABBED(req, 0); /* swabbed by ldlm_cli_enqueue() */
432 it->d.lustre.it_disposition = (int) dlm_rep->lock_policy_res1;
433 it->d.lustre.it_status = (int) dlm_rep->lock_policy_res2;
434 it->d.lustre.it_lock_mode = lock_mode;
435 it->d.lustre.it_data = req;
437 if (it->d.lustre.it_status < 0 && req->rq_replay)
438 mdc_clear_replay_flag(req, it->d.lustre.it_status);
440 DEBUG_REQ(D_RPCTRACE, req, "op: %d disposition: %x, status: %d",
441 it->it_op,it->d.lustre.it_disposition,it->d.lustre.it_status);
443 /* We know what to expect, so we do any byte flipping required here */
444 LASSERT(repbufcnt == 4 || repbufcnt == 1);
445 if (repbufcnt == 4) {
446 struct mds_body *body;
448 body = lustre_swab_repbuf(req, 1, sizeof (*body),
449 lustre_swab_mds_body);
451 CERROR ("Can't swab mds_body\n");
455 if ((body->valid & OBD_MD_FLEASIZE) != 0) {
456 /* The eadata is opaque; just check that it is there.
457 * Eventually, obd_unpackmd() will check the contents */
458 eadata = lustre_swab_repbuf(req, 2, body->eadatasize,
460 if (eadata == NULL) {
461 CERROR ("Missing/short eadata\n");
464 if (body->valid & OBD_MD_FLMODEASIZE) {
465 if (obddev->u.cli.cl_max_mds_easize <
467 obddev->u.cli.cl_max_mds_easize =
469 CDEBUG(D_INFO, "maxeasize become %d\n",
472 if (obddev->u.cli.cl_max_mds_cookiesize <
473 body->max_cookiesize) {
474 obddev->u.cli.cl_max_mds_cookiesize =
475 body->max_cookiesize;
476 CDEBUG(D_INFO, "cookiesize become %d\n",
477 body->max_cookiesize);
480 /* We save the reply LOV EA in case we have to replay
481 * a create for recovery. If we didn't allocate a
482 * large enough request buffer above we need to
483 * reallocate it here to hold the actual LOV EA. */
484 if (it->it_op & IT_OPEN) {
485 int pos = MDS_REQ_INTENT_REC_OFF + 2;
487 if (req->rq_reqmsg->buflens[pos] <
489 mdc_realloc_openmsg(req, body, size);
491 lmm = lustre_msg_buf(req->rq_reqmsg, pos,
494 memcpy(lmm, eadata, body->eadatasize);
501 EXPORT_SYMBOL(mdc_enqueue);
504 * This long block is all about fixing up the lock and request state
505 * so that it is correct as of the moment _before_ the operation was
506 * applied; that way, the VFS will think that everything is normal and
507 * call Lustre's regular VFS methods.
509 * If we're performing a creation, that means that unless the creation
510 * failed with EEXIST, we should fake up a negative dentry.
512 * For everything else, we want to lookup to succeed.
514 * One additional note: if CREATE or OPEN succeeded, we add an extra
515 * reference to the request because we need to keep it around until
516 * ll_create/ll_open gets called.
518 * The server will return to us, in it_disposition, an indication of
519 * exactly what d.lustre.it_status refers to.
521 * If DISP_OPEN_OPEN is set, then d.lustre.it_status refers to the open() call,
522 * otherwise if DISP_OPEN_CREATE is set, then it status is the
523 * creation failure mode. In either case, one of DISP_LOOKUP_NEG or
524 * DISP_LOOKUP_POS will be set, indicating whether the child lookup
527 * Else, if DISP_LOOKUP_EXECD then d.lustre.it_status is the rc of the
530 int mdc_intent_lock(struct obd_export *exp, struct mdc_op_data *op_data,
531 void *lmm, int lmmsize, struct lookup_intent *it,
532 int lookup_flags, struct ptlrpc_request **reqp,
533 ldlm_blocking_callback cb_blocking, int extra_lock_flags)
535 struct lustre_handle lockh;
536 struct ptlrpc_request *request;
538 struct mds_body *mds_body;
539 struct lustre_handle old_lock;
540 struct ldlm_lock *lock;
544 CDEBUG(D_DLMTRACE,"name: %.*s in inode "LPU64", intent: %s flags %#o\n",
545 op_data->namelen, op_data->name, op_data->fid1.id,
546 ldlm_it2str(it->it_op), it->it_flags);
548 if (op_data->fid2.id &&
549 (it->it_op == IT_LOOKUP || it->it_op == IT_GETATTR)) {
550 /* We could just return 1 immediately, but since we should only
551 * be called in revalidate_it if we already have a lock, let's
553 struct ldlm_res_id res_id = {.name ={op_data->fid2.id,
554 op_data->fid2.generation}};
555 struct lustre_handle lockh;
556 ldlm_policy_data_t policy;
559 policy.l_inodebits.bits = (it->it_op == IT_GETATTR) ?
560 MDS_INODELOCK_UPDATE : MDS_INODELOCK_LOOKUP;
561 rc = ldlm_lock_match(exp->exp_obd->obd_namespace,
562 LDLM_FL_BLOCK_GRANTED, &res_id,
563 LDLM_IBITS, &policy, LCK_CR, &lockh);
566 rc = ldlm_lock_match(exp->exp_obd->obd_namespace,
567 LDLM_FL_BLOCK_GRANTED, &res_id,
568 LDLM_IBITS, &policy, LCK_CW, &lockh);
572 rc = ldlm_lock_match(exp->exp_obd->obd_namespace,
573 LDLM_FL_BLOCK_GRANTED, &res_id,
574 LDLM_IBITS, &policy, LCK_PR, &lockh);
577 memcpy(&it->d.lustre.it_lock_handle, &lockh,
579 it->d.lustre.it_lock_mode = mode;
584 /* lookup_it may be called only after revalidate_it has run, because
585 * revalidate_it cannot return errors, only zero. Returning zero causes
586 * this call to lookup, which *can* return an error.
588 * We only want to execute the request associated with the intent one
589 * time, however, so don't send the request again. Instead, skip past
590 * this and use the request from revalidate. In this case, revalidate
591 * never dropped its reference, so the refcounts are all OK */
592 if (!it_disposition(it, DISP_ENQ_COMPLETE)) {
594 rc = mdc_enqueue(exp, LDLM_IBITS, it, it_to_lock_mode(it),
595 op_data, &lockh, lmm, lmmsize,
596 ldlm_completion_ast, cb_blocking, NULL,
600 memcpy(&it->d.lustre.it_lock_handle, &lockh, sizeof(lockh));
601 } else if (!op_data->fid2.id) {
602 /* DISP_ENQ_COMPLETE set means there is extra reference on
603 * request referenced from this intent, saved for subsequent
604 * lookup. This path is executed when we proceed to this
605 * lookup, so we clear DISP_ENQ_COMPLETE */
606 it_clear_disposition(it, DISP_ENQ_COMPLETE);
608 request = *reqp = it->d.lustre.it_data;
609 LASSERT(request != NULL);
610 LASSERT(request != LP_POISON);
611 LASSERT(request->rq_repmsg != LP_POISON);
613 /* If we're doing an IT_OPEN which did not result in an actual
614 * successful open, then we need to remove the bit which saves
615 * this request for unconditional replay.
617 * It's important that we do this first! Otherwise we might exit the
618 * function without doing so, and try to replay a failed create
620 if (it->it_op & IT_OPEN && request->rq_replay &&
621 (!it_disposition(it, DISP_OPEN_OPEN) ||it->d.lustre.it_status != 0))
622 mdc_clear_replay_flag(request, it->d.lustre.it_status);
624 if (!it_disposition(it, DISP_IT_EXECD)) {
625 /* The server failed before it even started executing the
626 * intent, i.e. because it couldn't unpack the request. */
627 LASSERT(it->d.lustre.it_status != 0);
628 RETURN(it->d.lustre.it_status);
630 rc = it_open_error(DISP_IT_EXECD, it);
634 mds_body = lustre_msg_buf(request->rq_repmsg, 1, sizeof(*mds_body));
635 LASSERT(mds_body != NULL); /* mdc_enqueue checked */
636 LASSERT_REPSWABBED(request, 1); /* mdc_enqueue swabbed */
638 /* If we were revalidating a fid/name pair, mark the intent in
639 * case we fail and get called again from lookup */
640 if (op_data->fid2.id) {
641 it_set_disposition(it, DISP_ENQ_COMPLETE);
642 /* Also: did we find the same inode? */
643 if (memcmp(&op_data->fid2, &mds_body->fid1, sizeof(op_data->fid2)))
647 rc = it_open_error(DISP_LOOKUP_EXECD, it);
651 /* keep requests around for the multiple phases of the call
652 * this shows the DISP_XX must guarantee we make it into the call
654 if (it_disposition(it, DISP_OPEN_CREATE) &&
655 !it_open_error(DISP_OPEN_CREATE, it))
656 ptlrpc_request_addref(request); /* balanced in ll_create_node */
657 if (it_disposition(it, DISP_OPEN_OPEN) &&
658 !it_open_error(DISP_OPEN_OPEN, it))
659 ptlrpc_request_addref(request); /* balanced in ll_file_open */
661 if (it->it_op & IT_CREAT) {
662 /* XXX this belongs in ll_create_it */
663 } else if (it->it_op == IT_OPEN) {
664 LASSERT(!it_disposition(it, DISP_OPEN_CREATE));
666 LASSERT(it->it_op & (IT_GETATTR | IT_LOOKUP));
669 /* If we already have a matching lock, then cancel the new
670 * one. We have to set the data here instead of in
671 * mdc_enqueue, because we need to use the child's inode as
672 * the l_ast_data to match, and that's not available until
673 * intent_finish has performed the iget().) */
674 lock = ldlm_handle2lock(&lockh);
676 ldlm_policy_data_t policy = lock->l_policy_data;
677 LDLM_DEBUG(lock, "matching against this");
679 memcpy(&old_lock, &lockh, sizeof(lockh));
680 if (ldlm_lock_match(NULL, LDLM_FL_BLOCK_GRANTED, NULL,
681 LDLM_IBITS, &policy, LCK_NL, &old_lock)) {
682 ldlm_lock_decref_and_cancel(&lockh,
683 it->d.lustre.it_lock_mode);
684 memcpy(&lockh, &old_lock, sizeof(old_lock));
685 memcpy(&it->d.lustre.it_lock_handle, &lockh,
689 CDEBUG(D_DENTRY,"D_IT dentry %.*s intent: %s status %d disp %x rc %d\n",
690 op_data->namelen, op_data->name, ldlm_it2str(it->it_op),
691 it->d.lustre.it_status, it->d.lustre.it_disposition, rc);
695 EXPORT_SYMBOL(mdc_intent_lock);