4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 #include <linux/sched.h>
39 #include <linux/quotaops.h>
41 #define DEBUG_SUBSYSTEM S_LLITE
43 #include <obd_support.h>
44 #include <lustre_lite.h>
45 #include <lustre/lustre_idl.h>
46 #include <lustre_dlm.h>
48 #include "llite_internal.h"
50 static void free_dentry_data(struct rcu_head *head)
52 struct ll_dentry_data *lld;
54 lld = container_of(head, struct ll_dentry_data, lld_rcu_head);
58 /* should NOT be called with the dcache lock, see fs/dcache.c */
59 static void ll_release(struct dentry *de)
61 struct ll_dentry_data *lld;
65 if (lld == NULL) /* NFS copies the de->d_op methods (bug 4655) */
69 ll_intent_release(lld->lld_it);
70 OBD_FREE(lld->lld_it, sizeof(*lld->lld_it));
72 LASSERT(lld->lld_cwd_count == 0);
73 LASSERT(lld->lld_mnt_count == 0);
75 call_rcu(&lld->lld_rcu_head, free_dentry_data);
80 /* Compare if two dentries are the same. Don't match if the existing dentry
81 * is marked invalid. Returns 1 if different, 0 if the same.
83 * This avoids a race where ll_lookup_it() instantiates a dentry, but we get
84 * an AST before calling d_revalidate_it(). The dentry still exists (marked
85 * INVALID) so d_lookup() matches it, but we have no lock on it (so
86 * lock_match() fails) and we spin around real_lookup(). */
87 #ifdef HAVE_D_COMPARE_7ARGS
88 int ll_dcompare(const struct dentry *parent, const struct inode *pinode,
89 const struct dentry *dentry, const struct inode *inode,
90 unsigned int len, const char *str, const struct qstr *name)
92 int ll_dcompare(struct dentry *parent, struct qstr *d_name, struct qstr *name)
95 #ifdef HAVE_D_COMPARE_7ARGS
101 if (memcmp(str, name->name, len))
104 struct dentry *dentry;
107 if (d_name->len != name->len)
110 if (memcmp(d_name->name, name->name, name->len))
113 /* XXX: d_name must be in-dentry structure */
114 dentry = container_of(d_name, struct dentry, d_name); /* ugh */
117 CDEBUG(D_DENTRY, "found name %.*s(%p) flags %#x refc %d\n",
118 name->len, name->name, dentry, dentry->d_flags,
121 /* mountpoint is always valid */
122 if (d_mountpoint((struct dentry *)dentry))
125 if (d_lustre_invalid(dentry))
131 static inline int return_if_equal(struct ldlm_lock *lock, void *data)
134 (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA)) ==
135 (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA))
136 return LDLM_ITER_CONTINUE;
137 return LDLM_ITER_STOP;
140 /* find any ldlm lock of the inode in mdc and lov
144 static int find_cbdata(struct inode *inode)
146 struct ll_sb_info *sbi = ll_i2sbi(inode);
147 struct lov_stripe_md *lsm;
152 rc = md_find_cbdata(sbi->ll_md_exp, ll_inode2fid(inode),
153 return_if_equal, NULL);
157 lsm = ccc_inode_lsm_get(inode);
161 rc = obd_find_cbdata(sbi->ll_dt_exp, lsm, return_if_equal, NULL);
162 ccc_inode_lsm_put(inode, lsm);
168 * Called when last reference to a dentry is dropped and dcache wants to know
169 * whether or not it should cache it:
170 * - return 1 to delete the dentry immediately
171 * - return 0 to cache the dentry
172 * Should NOT be called with the dcache lock, see fs/dcache.c
174 static int ll_ddelete(HAVE_D_DELETE_CONST struct dentry *de)
179 CDEBUG(D_DENTRY, "%s dentry %.*s (%p, parent %p, inode %p) %s%s\n",
180 d_lustre_invalid((struct dentry *)de) ? "deleting" : "keeping",
181 de->d_name.len, de->d_name.name, de, de->d_parent, de->d_inode,
182 d_unhashed((struct dentry *)de) ? "" : "hashed,",
183 list_empty(&de->d_subdirs) ? "" : "subdirs");
185 #ifdef HAVE_DCACHE_LOCK
186 LASSERT(d_refcount(de) == 0);
188 /* kernel >= 2.6.38 last refcount is decreased after this function. */
189 LASSERT(d_refcount(de) == 1);
192 /* Disable this piece of code temproarily because this is called
193 * inside dcache_lock so it's not appropriate to do lots of work
196 /* if not ldlm lock for this inode, set i_nlink to 0 so that
197 * this inode can be recycled later b=20433 */
198 if (de->d_inode && !find_cbdata(de->d_inode))
199 clear_nlink(de->d_inode);
202 if (d_lustre_invalid((struct dentry *)de))
207 static int ll_set_dd(struct dentry *de)
212 CDEBUG(D_DENTRY, "ldd on dentry %.*s (%p) parent %p inode %p refc %d\n",
213 de->d_name.len, de->d_name.name, de, de->d_parent, de->d_inode,
216 if (de->d_fsdata == NULL) {
217 struct ll_dentry_data *lld;
220 if (likely(lld != NULL)) {
221 spin_lock(&de->d_lock);
222 if (likely(de->d_fsdata == NULL))
226 spin_unlock(&de->d_lock);
235 int ll_dops_init(struct dentry *de, int block, int init_sa)
237 struct ll_dentry_data *lld = ll_d2d(de);
240 if (lld == NULL && block != 0) {
248 if (lld != NULL && init_sa != 0)
249 lld->lld_sa_generation = 0;
251 #ifdef HAVE_DCACHE_LOCK
252 de->d_op = &ll_d_ops;
254 /* kernel >= 2.6.38 d_op is set in d_alloc() */
255 LASSERT(de->d_op == &ll_d_ops);
260 void ll_intent_drop_lock(struct lookup_intent *it)
262 struct lustre_handle *handle;
264 if (it->it_op && it->d.lustre.it_lock_mode) {
265 struct ldlm_lock *lock;
267 handle = (struct lustre_handle *)&it->d.lustre.it_lock_handle;
268 lock = ldlm_handle2lock(handle);
270 /* it can only be allowed to match after layout is
271 * applied to inode otherwise false layout would be
272 * seen. Applying layout shoud happen before dropping
273 * the intent lock. */
274 if (it->d.lustre.it_lock_bits & MDS_INODELOCK_LAYOUT)
275 ldlm_lock_allow_match(lock);
279 CDEBUG(D_DLMTRACE, "releasing lock with cookie "LPX64
280 " from it %p\n", handle->cookie, it);
281 ldlm_lock_decref(handle, it->d.lustre.it_lock_mode);
283 /* bug 494: intent_release may be called multiple times, from
284 * this thread and we don't want to double-decref this lock */
285 it->d.lustre.it_lock_mode = 0;
289 void ll_intent_release(struct lookup_intent *it)
293 CDEBUG(D_INFO, "intent %p released\n", it);
294 ll_intent_drop_lock(it);
295 /* We are still holding extra reference on a request, need to free it */
296 if (it_disposition(it, DISP_ENQ_OPEN_REF))
297 ptlrpc_req_finished(it->d.lustre.it_data); /* ll_file_open */
298 if (it_disposition(it, DISP_ENQ_CREATE_REF)) /* create rec */
299 ptlrpc_req_finished(it->d.lustre.it_data);
300 if (it_disposition(it, DISP_ENQ_COMPLETE)) /* saved req from revalidate
302 ptlrpc_req_finished(it->d.lustre.it_data);
304 it->d.lustre.it_disposition = 0;
305 it->d.lustre.it_data = NULL;
309 void ll_invalidate_aliases(struct inode *inode)
311 struct dentry *dentry;
312 struct ll_d_hlist_node *p;
315 LASSERT(inode != NULL);
317 CDEBUG(D_INODE, "marking dentries for ino %lu/%u(%p) invalid\n",
318 inode->i_ino, inode->i_generation, inode);
320 ll_lock_dcache(inode);
321 ll_d_hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
322 CDEBUG(D_DENTRY, "dentry in drop %.*s (%p) parent %p "
323 "inode %p flags %d\n", dentry->d_name.len,
324 dentry->d_name.name, dentry, dentry->d_parent,
325 dentry->d_inode, dentry->d_flags);
327 if (dentry->d_name.len == 1 && dentry->d_name.name[0] == '/') {
328 CERROR("called on root (?) dentry=%p, inode=%p "
329 "ino=%lu\n", dentry, inode, inode->i_ino);
330 lustre_dump_dentry(dentry, 1);
331 libcfs_debug_dumpstack(NULL);
334 d_lustre_invalidate(dentry);
336 ll_unlock_dcache(inode);
341 int ll_revalidate_it_finish(struct ptlrpc_request *request,
342 struct lookup_intent *it,
351 if (it_disposition(it, DISP_LOOKUP_NEG))
354 rc = ll_prep_inode(&de->d_inode, request, NULL);
359 void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry)
362 LASSERT(dentry != NULL);
364 if (it->d.lustre.it_lock_mode && dentry->d_inode != NULL) {
365 struct inode *inode = dentry->d_inode;
366 struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
368 CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
369 inode, inode->i_ino, inode->i_generation);
370 ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
373 /* drop lookup or getattr locks immediately */
374 if (it->it_op == IT_LOOKUP || it->it_op == IT_GETATTR) {
375 /* on 2.6 there are situation when several lookups and
376 * revalidations may be requested during single operation.
377 * therefore, we don't release intent here -bzzz */
378 ll_intent_drop_lock(it);
382 void ll_frob_intent(struct lookup_intent **itp, struct lookup_intent *deft)
384 struct lookup_intent *it = *itp;
386 if (!it || it->it_op == IT_GETXATTR)
391 int ll_revalidate_it(struct dentry *de, int lookup_flags,
392 struct lookup_intent *it)
394 struct md_op_data *op_data;
395 struct ptlrpc_request *req = NULL;
396 struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
397 struct obd_export *exp;
398 struct inode *parent = de->d_parent->d_inode;
402 CDEBUG(D_VFSTRACE, "VFS Op:name=%s,intent=%s\n", de->d_name.name,
405 if (de->d_inode == NULL) {
408 /* We can only use negative dentries if this is stat or lookup,
409 for opens and stuff we do need to query server. */
410 /* If there is IT_CREAT in intent op set, then we must throw
411 away this negative dentry and actually do the request to
412 kernel to create whatever needs to be created (if possible)*/
413 if (it && (it->it_op & IT_CREAT))
416 if (d_lustre_invalid(de))
419 ibits = MDS_INODELOCK_UPDATE;
420 rc = ll_have_md_lock(parent, &ibits, LCK_MINMODE);
424 /* Never execute intents for mount points.
425 * Attributes will be fixed up in ll_inode_revalidate_it */
426 if (d_mountpoint(de))
427 GOTO(out_sa, rc = 1);
429 /* need to get attributes in case root got changed from other client */
430 if (de == de->d_sb->s_root) {
431 rc = __ll_inode_revalidate_it(de, it, MDS_INODELOCK_LOOKUP);
437 exp = ll_i2mdexp(de->d_inode);
439 OBD_FAIL_TIMEOUT(OBD_FAIL_MDC_REVALIDATE_PAUSE, 5);
440 ll_frob_intent(&it, &lookup_it);
443 if (it->it_op == IT_LOOKUP && !d_lustre_invalid(de))
446 if (it->it_op == IT_OPEN) {
447 struct inode *inode = de->d_inode;
448 struct ll_inode_info *lli = ll_i2info(inode);
449 struct obd_client_handle **och_p;
454 * We used to check for MDS_INODELOCK_OPEN here, but in fact
455 * just having LOOKUP lock is enough to justify inode is the
456 * same. And if inode is the same and we have suitable
457 * openhandle, then there is no point in doing another OPEN RPC
458 * just to throw away newly received openhandle. There are no
459 * security implications too, if file owner or access mode is
460 * change, LOOKUP lock is revoked.
464 if (it->it_flags & FMODE_WRITE) {
465 och_p = &lli->lli_mds_write_och;
466 och_usecount = &lli->lli_open_fd_write_count;
467 } else if (it->it_flags & FMODE_EXEC) {
468 och_p = &lli->lli_mds_exec_och;
469 och_usecount = &lli->lli_open_fd_exec_count;
471 och_p = &lli->lli_mds_read_och;
472 och_usecount = &lli->lli_open_fd_read_count;
474 /* Check for the proper lock. */
475 ibits = MDS_INODELOCK_LOOKUP;
476 if (!ll_have_md_lock(inode, &ibits, LCK_MINMODE))
478 mutex_lock(&lli->lli_och_mutex);
479 if (*och_p) { /* Everything is open already, do nothing */
480 /*(*och_usecount)++; Do not let them steal our open
481 handle from under us */
482 SET_BUT_UNUSED(och_usecount);
483 /* XXX The code above was my original idea, but in case
484 we have the handle, but we cannot use it due to later
485 checks (e.g. O_CREAT|O_EXCL flags set), nobody
486 would decrement counter increased here. So we just
487 hope the lock won't be invalidated in between. But
488 if it would be, we'll reopen the open request to
489 MDS later during file open path */
490 mutex_unlock(&lli->lli_och_mutex);
493 mutex_unlock(&lli->lli_och_mutex);
497 if (it->it_op == IT_GETATTR) {
498 rc = ll_statahead_enter(parent, &de, 0);
501 else if (rc != -EAGAIN && rc != 0)
506 op_data = ll_prep_md_op_data(NULL, parent, de->d_inode,
507 de->d_name.name, de->d_name.len,
508 0, LUSTRE_OPC_ANY, NULL);
510 RETURN(PTR_ERR(op_data));
512 if (!IS_POSIXACL(parent) || !exp_connect_umask(exp))
513 it->it_create_mode &= ~cfs_curproc_umask();
514 it->it_create_mode |= M_CHECK_STALE;
515 rc = md_intent_lock(exp, op_data, NULL, 0, it,
517 &req, ll_md_blocking_ast, 0);
518 it->it_create_mode &= ~M_CHECK_STALE;
519 ll_finish_md_op_data(op_data);
521 /* If req is NULL, then md_intent_lock only tried to do a lock match;
522 * if all was well, it will return 1 if it found locks, 0 otherwise. */
523 if (req == NULL && rc >= 0) {
531 CDEBUG(D_INFO, "ll_intent_lock: rc %d : it->it_status "
532 "%d\n", rc, it->d.lustre.it_status);
538 rc = ll_revalidate_it_finish(req, it, de);
540 if (rc != -ESTALE && rc != -ENOENT)
541 ll_intent_release(it);
545 if ((it->it_op & IT_OPEN) && de->d_inode &&
546 !S_ISREG(de->d_inode->i_mode) &&
547 !S_ISDIR(de->d_inode->i_mode)) {
548 ll_release_openhandle(de, it);
553 /* We do not free request as it may be reused during following lookup
554 * (see comment in mdc/mdc_locks.c::mdc_intent_lock()), request will
555 * be freed in ll_lookup_it or in ll_intent_release. But if
556 * request was not completed, we need to free it. (bug 5154, 9903) */
557 if (req != NULL && !it_disposition(it, DISP_ENQ_COMPLETE))
558 ptlrpc_req_finished(req);
560 /* mdt may grant layout lock for the newly created file, so
561 * release the lock to avoid leaking */
562 ll_intent_drop_lock(it);
563 ll_invalidate_aliases(de->d_inode);
567 CDEBUG(D_DENTRY, "revalidated dentry %.*s (%p) parent %p "
568 "inode %p refc %d\n", de->d_name.len,
569 de->d_name.name, de, de->d_parent, de->d_inode,
571 ll_set_lock_data(exp, de->d_inode, it, &bits);
572 if ((bits & MDS_INODELOCK_LOOKUP) && d_lustre_invalid(de))
573 d_lustre_revalidate(de);
574 ll_lookup_finish_locks(it, de);
578 if (it != NULL && it->it_op == IT_GETATTR && rc > 0)
579 ll_statahead_mark(parent, de);
583 * This part is here to combat evil-evil race in real_lookup on 2.6
584 * kernels. The race details are: We enter do_lookup() looking for some
585 * name, there is nothing in dcache for this name yet and d_lookup()
586 * returns NULL. We proceed to real_lookup(), and while we do this,
587 * another process does open on the same file we looking up (most simple
588 * reproducer), open succeeds and the dentry is added. Now back to
589 * us. In real_lookup() we do d_lookup() again and suddenly find the
590 * dentry, so we call d_revalidate on it, but there is no lock, so
591 * without this code we would return 0, but unpatched real_lookup just
592 * returns -ENOENT in such a case instead of retrying the lookup. Once
593 * this is dealt with in real_lookup(), all of this ugly mess can go and
594 * we can just check locks in ->d_revalidate without doing any RPCs
598 if (it != &lookup_it) {
599 /* MDS_INODELOCK_UPDATE needed for IT_GETATTR case. */
600 if (it->it_op == IT_GETATTR)
601 lookup_it.it_op = IT_GETATTR;
602 ll_lookup_finish_locks(it, de);
606 /* Do real lookup here. */
607 op_data = ll_prep_md_op_data(NULL, parent, NULL, de->d_name.name,
608 de->d_name.len, 0, (it->it_op & IT_CREAT ?
610 LUSTRE_OPC_ANY), NULL);
612 RETURN(PTR_ERR(op_data));
614 rc = md_intent_lock(exp, op_data, NULL, 0, it, 0, &req,
615 ll_md_blocking_ast, 0);
617 struct mdt_body *mdt_body;
618 struct lu_fid fid = {.f_seq = 0, .f_oid = 0, .f_ver = 0};
619 mdt_body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
622 fid = *ll_inode2fid(de->d_inode);
624 /* see if we got same inode, if not - return error */
625 if (lu_fid_eq(&fid, &mdt_body->fid1)) {
626 ll_finish_md_op_data(op_data);
628 goto revalidate_finish;
630 ll_intent_release(it);
632 ll_finish_md_op_data(op_data);
637 * For rc == 1 case, should not return directly to prevent losing
638 * statahead windows; for rc == 0 case, the "lookup" will be done later.
640 if (it != NULL && it->it_op == IT_GETATTR && rc == 1)
641 ll_statahead_enter(parent, &de, 1);
645 int ll_revalidate_nd(struct dentry *dentry, struct nameidata *nd)
650 #ifndef HAVE_DCACHE_LOCK
651 /* kernel >= 2.6.38 supports rcu-walk, but lustre doesn't. */
652 if (nd->flags & LOOKUP_RCU)
656 if (nd && !(nd->flags & (LOOKUP_CONTINUE|LOOKUP_PARENT))) {
657 struct lookup_intent *it;
659 it = ll_convert_intent(&nd->intent.open, nd->flags);
663 if (it->it_op == (IT_OPEN|IT_CREAT) &&
664 nd->intent.open.flags & O_EXCL) {
665 CDEBUG(D_VFSTRACE, "create O_EXCL, returning 0\n");
670 rc = ll_revalidate_it(dentry, nd->flags, it);
672 if (rc && (nd->flags & LOOKUP_OPEN) &&
673 it_disposition(it, DISP_OPEN_OPEN)) {/*Open*/
674 // XXX Code duplication with ll_lookup_nd
675 if (S_ISFIFO(dentry->d_inode->i_mode)) {
676 // We cannot call open here as it would
679 (struct ptlrpc_request *)
680 it->d.lustre.it_data);
684 nd->intent.open.file->private_data = it;
685 filp = lookup_instantiate_filp(nd, dentry,NULL);
690 if (!rc && (nd->flags & LOOKUP_CREATE) &&
691 it_disposition(it, DISP_OPEN_CREATE)) {
692 /* We created something but we may only return
693 * negative dentry here, so save request in dentry,
694 * if lookup will be called later on, it will
695 * pick the request, otherwise it would be freed
697 ll_d2d(dentry)->lld_it = it;
698 it = NULL; /* avoid freeing */
703 ll_intent_release(it);
704 OBD_FREE(it, sizeof(*it));
707 rc = ll_revalidate_it(dentry, 0, NULL);
713 void ll_d_iput(struct dentry *de, struct inode *inode)
716 if (!find_cbdata(inode))
721 struct dentry_operations ll_d_ops = {
722 .d_revalidate = ll_revalidate_nd,
723 .d_release = ll_release,
724 .d_delete = ll_ddelete,
726 .d_compare = ll_dcompare,