4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 #include <linux/sched.h>
39 #include <linux/quotaops.h>
41 #define DEBUG_SUBSYSTEM S_LLITE
43 #include <obd_support.h>
44 #include <lustre_lite.h>
45 #include <lustre/lustre_idl.h>
46 #include <lustre_dlm.h>
48 #include "llite_internal.h"
50 static void free_dentry_data(struct rcu_head *head)
52 struct ll_dentry_data *lld;
54 lld = container_of(head, struct ll_dentry_data, lld_rcu_head);
58 /* should NOT be called with the dcache lock, see fs/dcache.c */
59 static void ll_release(struct dentry *de)
61 struct ll_dentry_data *lld;
65 if (lld == NULL) /* NFS copies the de->d_op methods (bug 4655) */
69 ll_intent_release(lld->lld_it);
70 OBD_FREE(lld->lld_it, sizeof(*lld->lld_it));
72 LASSERT(lld->lld_cwd_count == 0);
73 LASSERT(lld->lld_mnt_count == 0);
75 call_rcu(&lld->lld_rcu_head, free_dentry_data);
80 /* Compare if two dentries are the same. Don't match if the existing dentry
81 * is marked invalid. Returns 1 if different, 0 if the same.
83 * This avoids a race where ll_lookup_it() instantiates a dentry, but we get
84 * an AST before calling d_revalidate_it(). The dentry still exists (marked
85 * INVALID) so d_lookup() matches it, but we have no lock on it (so
86 * lock_match() fails) and we spin around real_lookup(). */
87 #ifdef HAVE_D_COMPARE_7ARGS
88 int ll_dcompare(const struct dentry *parent, const struct inode *pinode,
89 const struct dentry *dentry, const struct inode *inode,
90 unsigned int len, const char *str, const struct qstr *name)
92 int ll_dcompare(struct dentry *parent, struct qstr *d_name, struct qstr *name)
95 #ifdef HAVE_D_COMPARE_7ARGS
101 if (memcmp(str, name->name, len))
104 struct dentry *dentry;
107 if (d_name->len != name->len)
110 if (memcmp(d_name->name, name->name, name->len))
113 /* XXX: d_name must be in-dentry structure */
114 dentry = container_of(d_name, struct dentry, d_name); /* ugh */
117 CDEBUG(D_DENTRY, "found name %.*s(%p) flags %#x refc %d\n",
118 name->len, name->name, dentry, dentry->d_flags,
121 /* mountpoint is always valid */
122 if (d_mountpoint((struct dentry *)dentry))
125 if (d_lustre_invalid(dentry))
131 static inline int return_if_equal(struct ldlm_lock *lock, void *data)
134 (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA)) ==
135 (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA))
136 return LDLM_ITER_CONTINUE;
137 return LDLM_ITER_STOP;
140 /* find any ldlm lock of the inode in mdc and lov
144 static int find_cbdata(struct inode *inode)
146 struct ll_sb_info *sbi = ll_i2sbi(inode);
147 struct lov_stripe_md *lsm;
152 rc = md_find_cbdata(sbi->ll_md_exp, ll_inode2fid(inode),
153 return_if_equal, NULL);
157 lsm = ccc_inode_lsm_get(inode);
161 rc = obd_find_cbdata(sbi->ll_dt_exp, lsm, return_if_equal, NULL);
162 ccc_inode_lsm_put(inode, lsm);
168 * Called when last reference to a dentry is dropped and dcache wants to know
169 * whether or not it should cache it:
170 * - return 1 to delete the dentry immediately
171 * - return 0 to cache the dentry
172 * Should NOT be called with the dcache lock, see fs/dcache.c
174 static int ll_ddelete(HAVE_D_DELETE_CONST struct dentry *de)
179 CDEBUG(D_DENTRY, "%s dentry %.*s (%p, parent %p, inode %p) %s%s\n",
180 d_lustre_invalid((struct dentry *)de) ? "deleting" : "keeping",
181 de->d_name.len, de->d_name.name, de, de->d_parent, de->d_inode,
182 d_unhashed((struct dentry *)de) ? "" : "hashed,",
183 list_empty(&de->d_subdirs) ? "" : "subdirs");
185 #ifdef HAVE_DCACHE_LOCK
186 LASSERT(d_refcount(de) == 0);
188 /* kernel >= 2.6.38 last refcount is decreased after this function. */
189 LASSERT(d_refcount(de) == 1);
192 /* Disable this piece of code temproarily because this is called
193 * inside dcache_lock so it's not appropriate to do lots of work
196 /* if not ldlm lock for this inode, set i_nlink to 0 so that
197 * this inode can be recycled later b=20433 */
198 if (de->d_inode && !find_cbdata(de->d_inode))
199 clear_nlink(de->d_inode);
202 if (d_lustre_invalid((struct dentry *)de))
207 static int ll_set_dd(struct dentry *de)
212 CDEBUG(D_DENTRY, "ldd on dentry %.*s (%p) parent %p inode %p refc %d\n",
213 de->d_name.len, de->d_name.name, de, de->d_parent, de->d_inode,
216 if (de->d_fsdata == NULL) {
217 struct ll_dentry_data *lld;
220 if (likely(lld != NULL)) {
221 spin_lock(&de->d_lock);
222 if (likely(de->d_fsdata == NULL))
226 spin_unlock(&de->d_lock);
235 int ll_dops_init(struct dentry *de, int block, int init_sa)
237 struct ll_dentry_data *lld = ll_d2d(de);
240 if (lld == NULL && block != 0) {
248 if (lld != NULL && init_sa != 0)
249 lld->lld_sa_generation = 0;
251 #ifdef HAVE_DCACHE_LOCK
252 de->d_op = &ll_d_ops;
254 /* kernel >= 2.6.38 d_op is set in d_alloc() */
255 LASSERT(de->d_op == &ll_d_ops);
260 void ll_intent_drop_lock(struct lookup_intent *it)
262 struct lustre_handle *handle;
264 if (it->it_op && it->d.lustre.it_lock_mode) {
265 struct ldlm_lock *lock;
267 handle = (struct lustre_handle *)&it->d.lustre.it_lock_handle;
268 lock = ldlm_handle2lock(handle);
270 /* it can only be allowed to match after layout is
271 * applied to inode otherwise false layout would be
272 * seen. Applying layout shoud happen before dropping
273 * the intent lock. */
274 if (it->d.lustre.it_lock_bits & MDS_INODELOCK_LAYOUT)
275 ldlm_lock_allow_match(lock);
279 CDEBUG(D_DLMTRACE, "releasing lock with cookie "LPX64
280 " from it %p\n", handle->cookie, it);
281 ldlm_lock_decref(handle, it->d.lustre.it_lock_mode);
283 /* bug 494: intent_release may be called multiple times, from
284 * this thread and we don't want to double-decref this lock */
285 it->d.lustre.it_lock_mode = 0;
289 void ll_intent_release(struct lookup_intent *it)
293 CDEBUG(D_INFO, "intent %p released\n", it);
294 ll_intent_drop_lock(it);
295 /* We are still holding extra reference on a request, need to free it */
296 if (it_disposition(it, DISP_ENQ_OPEN_REF))
297 ptlrpc_req_finished(it->d.lustre.it_data); /* ll_file_open */
298 if (it_disposition(it, DISP_ENQ_CREATE_REF)) /* create rec */
299 ptlrpc_req_finished(it->d.lustre.it_data);
300 if (it_disposition(it, DISP_ENQ_COMPLETE)) /* saved req from revalidate
302 ptlrpc_req_finished(it->d.lustre.it_data);
304 it->d.lustre.it_disposition = 0;
305 it->d.lustre.it_data = NULL;
309 void ll_invalidate_aliases(struct inode *inode)
311 struct dentry *dentry;
314 LASSERT(inode != NULL);
316 CDEBUG(D_INODE, "marking dentries for ino %lu/%u(%p) invalid\n",
317 inode->i_ino, inode->i_generation, inode);
319 ll_lock_dcache(inode);
320 cfs_list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
321 CDEBUG(D_DENTRY, "dentry in drop %.*s (%p) parent %p "
322 "inode %p flags %d\n", dentry->d_name.len,
323 dentry->d_name.name, dentry, dentry->d_parent,
324 dentry->d_inode, dentry->d_flags);
326 if (dentry->d_name.len == 1 && dentry->d_name.name[0] == '/') {
327 CERROR("called on root (?) dentry=%p, inode=%p "
328 "ino=%lu\n", dentry, inode, inode->i_ino);
329 lustre_dump_dentry(dentry, 1);
330 libcfs_debug_dumpstack(NULL);
333 d_lustre_invalidate(dentry);
335 ll_unlock_dcache(inode);
340 int ll_revalidate_it_finish(struct ptlrpc_request *request,
341 struct lookup_intent *it,
350 if (it_disposition(it, DISP_LOOKUP_NEG))
353 rc = ll_prep_inode(&de->d_inode, request, NULL);
358 void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry)
361 LASSERT(dentry != NULL);
363 if (it->d.lustre.it_lock_mode && dentry->d_inode != NULL) {
364 struct inode *inode = dentry->d_inode;
365 struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
367 CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
368 inode, inode->i_ino, inode->i_generation);
369 ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
372 /* drop lookup or getattr locks immediately */
373 if (it->it_op == IT_LOOKUP || it->it_op == IT_GETATTR) {
374 /* on 2.6 there are situation when several lookups and
375 * revalidations may be requested during single operation.
376 * therefore, we don't release intent here -bzzz */
377 ll_intent_drop_lock(it);
381 void ll_frob_intent(struct lookup_intent **itp, struct lookup_intent *deft)
383 struct lookup_intent *it = *itp;
385 if (!it || it->it_op == IT_GETXATTR)
390 int ll_revalidate_it(struct dentry *de, int lookup_flags,
391 struct lookup_intent *it)
393 struct md_op_data *op_data;
394 struct ptlrpc_request *req = NULL;
395 struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
396 struct obd_export *exp;
397 struct inode *parent = de->d_parent->d_inode;
401 CDEBUG(D_VFSTRACE, "VFS Op:name=%s,intent=%s\n", de->d_name.name,
404 if (de->d_inode == NULL) {
407 /* We can only use negative dentries if this is stat or lookup,
408 for opens and stuff we do need to query server. */
409 /* If there is IT_CREAT in intent op set, then we must throw
410 away this negative dentry and actually do the request to
411 kernel to create whatever needs to be created (if possible)*/
412 if (it && (it->it_op & IT_CREAT))
415 if (d_lustre_invalid(de))
418 ibits = MDS_INODELOCK_UPDATE;
419 rc = ll_have_md_lock(parent, &ibits, LCK_MINMODE);
423 /* Never execute intents for mount points.
424 * Attributes will be fixed up in ll_inode_revalidate_it */
425 if (d_mountpoint(de))
426 GOTO(out_sa, rc = 1);
428 /* need to get attributes in case root got changed from other client */
429 if (de == de->d_sb->s_root) {
430 rc = __ll_inode_revalidate_it(de, it, MDS_INODELOCK_LOOKUP);
436 exp = ll_i2mdexp(de->d_inode);
438 OBD_FAIL_TIMEOUT(OBD_FAIL_MDC_REVALIDATE_PAUSE, 5);
439 ll_frob_intent(&it, &lookup_it);
442 if (it->it_op == IT_LOOKUP && !d_lustre_invalid(de))
445 if (it->it_op == IT_OPEN) {
446 struct inode *inode = de->d_inode;
447 struct ll_inode_info *lli = ll_i2info(inode);
448 struct obd_client_handle **och_p;
453 * We used to check for MDS_INODELOCK_OPEN here, but in fact
454 * just having LOOKUP lock is enough to justify inode is the
455 * same. And if inode is the same and we have suitable
456 * openhandle, then there is no point in doing another OPEN RPC
457 * just to throw away newly received openhandle. There are no
458 * security implications too, if file owner or access mode is
459 * change, LOOKUP lock is revoked.
463 if (it->it_flags & FMODE_WRITE) {
464 och_p = &lli->lli_mds_write_och;
465 och_usecount = &lli->lli_open_fd_write_count;
466 } else if (it->it_flags & FMODE_EXEC) {
467 och_p = &lli->lli_mds_exec_och;
468 och_usecount = &lli->lli_open_fd_exec_count;
470 och_p = &lli->lli_mds_read_och;
471 och_usecount = &lli->lli_open_fd_read_count;
473 /* Check for the proper lock. */
474 ibits = MDS_INODELOCK_LOOKUP;
475 if (!ll_have_md_lock(inode, &ibits, LCK_MINMODE))
477 mutex_lock(&lli->lli_och_mutex);
478 if (*och_p) { /* Everything is open already, do nothing */
479 /*(*och_usecount)++; Do not let them steal our open
480 handle from under us */
481 SET_BUT_UNUSED(och_usecount);
482 /* XXX The code above was my original idea, but in case
483 we have the handle, but we cannot use it due to later
484 checks (e.g. O_CREAT|O_EXCL flags set), nobody
485 would decrement counter increased here. So we just
486 hope the lock won't be invalidated in between. But
487 if it would be, we'll reopen the open request to
488 MDS later during file open path */
489 mutex_unlock(&lli->lli_och_mutex);
492 mutex_unlock(&lli->lli_och_mutex);
496 if (it->it_op == IT_GETATTR) {
497 rc = ll_statahead_enter(parent, &de, 0);
500 else if (rc != -EAGAIN && rc != 0)
505 op_data = ll_prep_md_op_data(NULL, parent, de->d_inode,
506 de->d_name.name, de->d_name.len,
507 0, LUSTRE_OPC_ANY, NULL);
509 RETURN(PTR_ERR(op_data));
511 if (!IS_POSIXACL(parent) || !exp_connect_umask(exp))
512 it->it_create_mode &= ~cfs_curproc_umask();
513 it->it_create_mode |= M_CHECK_STALE;
514 rc = md_intent_lock(exp, op_data, NULL, 0, it,
516 &req, ll_md_blocking_ast, 0);
517 it->it_create_mode &= ~M_CHECK_STALE;
518 ll_finish_md_op_data(op_data);
520 /* If req is NULL, then md_intent_lock only tried to do a lock match;
521 * if all was well, it will return 1 if it found locks, 0 otherwise. */
522 if (req == NULL && rc >= 0) {
530 CDEBUG(D_INFO, "ll_intent_lock: rc %d : it->it_status "
531 "%d\n", rc, it->d.lustre.it_status);
537 rc = ll_revalidate_it_finish(req, it, de);
539 if (rc != -ESTALE && rc != -ENOENT)
540 ll_intent_release(it);
544 if ((it->it_op & IT_OPEN) && de->d_inode &&
545 !S_ISREG(de->d_inode->i_mode) &&
546 !S_ISDIR(de->d_inode->i_mode)) {
547 ll_release_openhandle(de, it);
552 /* We do not free request as it may be reused during following lookup
553 * (see comment in mdc/mdc_locks.c::mdc_intent_lock()), request will
554 * be freed in ll_lookup_it or in ll_intent_release. But if
555 * request was not completed, we need to free it. (bug 5154, 9903) */
556 if (req != NULL && !it_disposition(it, DISP_ENQ_COMPLETE))
557 ptlrpc_req_finished(req);
559 /* mdt may grant layout lock for the newly created file, so
560 * release the lock to avoid leaking */
561 ll_intent_drop_lock(it);
562 ll_invalidate_aliases(de->d_inode);
566 CDEBUG(D_DENTRY, "revalidated dentry %.*s (%p) parent %p "
567 "inode %p refc %d\n", de->d_name.len,
568 de->d_name.name, de, de->d_parent, de->d_inode,
570 ll_set_lock_data(exp, de->d_inode, it, &bits);
571 if ((bits & MDS_INODELOCK_LOOKUP) && d_lustre_invalid(de))
572 d_lustre_revalidate(de);
573 ll_lookup_finish_locks(it, de);
577 if (it != NULL && it->it_op == IT_GETATTR && rc > 0)
578 ll_statahead_mark(parent, de);
582 * This part is here to combat evil-evil race in real_lookup on 2.6
583 * kernels. The race details are: We enter do_lookup() looking for some
584 * name, there is nothing in dcache for this name yet and d_lookup()
585 * returns NULL. We proceed to real_lookup(), and while we do this,
586 * another process does open on the same file we looking up (most simple
587 * reproducer), open succeeds and the dentry is added. Now back to
588 * us. In real_lookup() we do d_lookup() again and suddenly find the
589 * dentry, so we call d_revalidate on it, but there is no lock, so
590 * without this code we would return 0, but unpatched real_lookup just
591 * returns -ENOENT in such a case instead of retrying the lookup. Once
592 * this is dealt with in real_lookup(), all of this ugly mess can go and
593 * we can just check locks in ->d_revalidate without doing any RPCs
597 if (it != &lookup_it) {
598 /* MDS_INODELOCK_UPDATE needed for IT_GETATTR case. */
599 if (it->it_op == IT_GETATTR)
600 lookup_it.it_op = IT_GETATTR;
601 ll_lookup_finish_locks(it, de);
605 /* Do real lookup here. */
606 op_data = ll_prep_md_op_data(NULL, parent, NULL, de->d_name.name,
607 de->d_name.len, 0, (it->it_op & IT_CREAT ?
609 LUSTRE_OPC_ANY), NULL);
611 RETURN(PTR_ERR(op_data));
613 rc = md_intent_lock(exp, op_data, NULL, 0, it, 0, &req,
614 ll_md_blocking_ast, 0);
616 struct mdt_body *mdt_body;
617 struct lu_fid fid = {.f_seq = 0, .f_oid = 0, .f_ver = 0};
618 mdt_body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
621 fid = *ll_inode2fid(de->d_inode);
623 /* see if we got same inode, if not - return error */
624 if (lu_fid_eq(&fid, &mdt_body->fid1)) {
625 ll_finish_md_op_data(op_data);
627 goto revalidate_finish;
629 ll_intent_release(it);
631 ll_finish_md_op_data(op_data);
636 * For rc == 1 case, should not return directly to prevent losing
637 * statahead windows; for rc == 0 case, the "lookup" will be done later.
639 if (it != NULL && it->it_op == IT_GETATTR && rc == 1)
640 ll_statahead_enter(parent, &de, 1);
644 int ll_revalidate_nd(struct dentry *dentry, struct nameidata *nd)
649 #ifndef HAVE_DCACHE_LOCK
650 /* kernel >= 2.6.38 supports rcu-walk, but lustre doesn't. */
651 if (nd->flags & LOOKUP_RCU)
655 if (nd && !(nd->flags & (LOOKUP_CONTINUE|LOOKUP_PARENT))) {
656 struct lookup_intent *it;
658 it = ll_convert_intent(&nd->intent.open, nd->flags);
662 if (it->it_op == (IT_OPEN|IT_CREAT) &&
663 nd->intent.open.flags & O_EXCL) {
664 CDEBUG(D_VFSTRACE, "create O_EXCL, returning 0\n");
669 rc = ll_revalidate_it(dentry, nd->flags, it);
671 if (rc && (nd->flags & LOOKUP_OPEN) &&
672 it_disposition(it, DISP_OPEN_OPEN)) {/*Open*/
673 // XXX Code duplication with ll_lookup_nd
674 if (S_ISFIFO(dentry->d_inode->i_mode)) {
675 // We cannot call open here as it would
678 (struct ptlrpc_request *)
679 it->d.lustre.it_data);
683 nd->intent.open.file->private_data = it;
684 filp = lookup_instantiate_filp(nd, dentry,NULL);
689 if (!rc && (nd->flags & LOOKUP_CREATE) &&
690 it_disposition(it, DISP_OPEN_CREATE)) {
691 /* We created something but we may only return
692 * negative dentry here, so save request in dentry,
693 * if lookup will be called later on, it will
694 * pick the request, otherwise it would be freed
696 ll_d2d(dentry)->lld_it = it;
697 it = NULL; /* avoid freeing */
702 ll_intent_release(it);
703 OBD_FREE(it, sizeof(*it));
706 rc = ll_revalidate_it(dentry, 0, NULL);
712 void ll_d_iput(struct dentry *de, struct inode *inode)
715 if (!find_cbdata(inode))
720 struct dentry_operations ll_d_ops = {
721 .d_revalidate = ll_revalidate_nd,
722 .d_release = ll_release,
723 .d_delete = ll_ddelete,
725 .d_compare = ll_dcompare,