-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Whamcloud, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <linux/fs.h>
#include <linux/sched.h>
-#include <linux/smp_lock.h>
#include <linux/quotaops.h>
#define DEBUG_SUBSYSTEM S_LLITE
#include <lustre_lite.h>
#include <lustre/lustre_idl.h>
#include <lustre_dlm.h>
-#include <lustre_mdc.h>
-//#include <lustre_ver.h>
-//#include <lustre_version.h>
#include "llite_internal.h"
-cfs_spinlock_t ll_lookup_lock = CFS_SPIN_LOCK_UNLOCKED;
+static void free_dentry_data(struct rcu_head *head)
+{
+ struct ll_dentry_data *lld;
+
+ lld = container_of(head, struct ll_dentry_data, lld_rcu_head);
+ OBD_FREE_PTR(lld);
+}
/* should NOT be called with the dcache lock, see fs/dcache.c */
static void ll_release(struct dentry *de)
ENTRY;
LASSERT(de != NULL);
lld = ll_d2d(de);
- if (lld == NULL) { /* NFS copies the de->d_op methods (bug 4655) */
- EXIT;
- return;
- }
-#ifndef HAVE_VFS_INTENT_PATCHES
+ if (lld == NULL) /* NFS copies the de->d_op methods (bug 4655) */
+ RETURN_EXIT;
+
if (lld->lld_it) {
ll_intent_release(lld->lld_it);
OBD_FREE(lld->lld_it, sizeof(*lld->lld_it));
}
-#endif
- LASSERT(lld->lld_cwd_count == 0);
- LASSERT(lld->lld_mnt_count == 0);
- OBD_FREE(de->d_fsdata, sizeof(*lld));
+ LASSERT(lld->lld_cwd_count == 0);
+ LASSERT(lld->lld_mnt_count == 0);
+ de->d_fsdata = NULL;
+ call_rcu(&lld->lld_rcu_head, free_dentry_data);
- EXIT;
+ EXIT;
}
/* Compare if two dentries are the same. Don't match if the existing dentry
- * is marked DCACHE_LUSTRE_INVALID. Returns 1 if different, 0 if the same.
+ * is marked invalid. Returns 1 if different, 0 if the same.
*
* This avoids a race where ll_lookup_it() instantiates a dentry, but we get
* an AST before calling d_revalidate_it(). The dentry still exists (marked
* INVALID) so d_lookup() matches it, but we have no lock on it (so
* lock_match() fails) and we spin around real_lookup(). */
+#ifdef HAVE_D_COMPARE_7ARGS
+int ll_dcompare(const struct dentry *parent, const struct inode *pinode,
+ const struct dentry *dentry, const struct inode *inode,
+ unsigned int len, const char *str, const struct qstr *name)
+#else
int ll_dcompare(struct dentry *parent, struct qstr *d_name, struct qstr *name)
+#endif
{
- struct dentry *dchild;
- ENTRY;
+#ifdef HAVE_D_COMPARE_7ARGS
+ ENTRY;
- if (d_name->len != name->len)
- RETURN(1);
+ if (len != name->len)
+ RETURN(1);
- if (memcmp(d_name->name, name->name, name->len))
- RETURN(1);
+ if (memcmp(str, name->name, len))
+ RETURN(1);
+#else
+ struct dentry *dentry;
+ ENTRY;
- /* XXX: d_name must be in-dentry structure */
- dchild = container_of(d_name, struct dentry, d_name); /* ugh */
+ if (d_name->len != name->len)
+ RETURN(1);
- CDEBUG(D_DENTRY,"found name %.*s(%p) - flags %d/%x - refc %d\n",
- name->len, name->name, dchild,
- d_mountpoint(dchild), dchild->d_flags & DCACHE_LUSTRE_INVALID,
- atomic_read(&dchild->d_count));
+ if (memcmp(d_name->name, name->name, name->len))
+ RETURN(1);
- /* mountpoint is always valid */
- if (d_mountpoint(dchild))
- RETURN(0);
+ /* XXX: d_name must be in-dentry structure */
+ dentry = container_of(d_name, struct dentry, d_name); /* ugh */
+#endif
- if (dchild->d_flags & DCACHE_LUSTRE_INVALID)
- RETURN(1);
+ CDEBUG(D_DENTRY, "found name %.*s(%p) flags %#x refc %d\n",
+ name->len, name->name, dentry, dentry->d_flags,
+ d_refcount(dentry));
+ /* mountpoint is always valid */
+ if (d_mountpoint((struct dentry *)dentry))
+ RETURN(0);
+
+ if (d_lustre_invalid(dentry))
+ RETURN(1);
RETURN(0);
}
static inline int return_if_equal(struct ldlm_lock *lock, void *data)
{
+ if ((lock->l_flags &
+ (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA)) ==
+ (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA))
+ return LDLM_ITER_CONTINUE;
return LDLM_ITER_STOP;
}
* < 0 error */
static int find_cbdata(struct inode *inode)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct lov_stripe_md *lsm;
int rc = 0;
ENTRY;
if (rc != 0)
RETURN(rc);
- if (lli->lli_smd)
- rc = obd_find_cbdata(sbi->ll_dt_exp, lli->lli_smd,
- return_if_equal, NULL);
+ lsm = ccc_inode_lsm_get(inode);
+ if (lsm == NULL)
+ RETURN(rc);
- RETURN(rc);
+ rc = obd_find_cbdata(sbi->ll_dt_exp, lsm, return_if_equal, NULL);
+ ccc_inode_lsm_put(inode, lsm);
+
+ RETURN(rc);
}
-/* should NOT be called with the dcache lock, see fs/dcache.c */
-static int ll_ddelete(struct dentry *de)
+/**
+ * Called when last reference to a dentry is dropped and dcache wants to know
+ * whether or not it should cache it:
+ * - return 1 to delete the dentry immediately
+ * - return 0 to cache the dentry
+ * Should NOT be called with the dcache lock, see fs/dcache.c
+ */
+static int ll_ddelete(HAVE_D_DELETE_CONST struct dentry *de)
{
- ENTRY;
- LASSERT(de);
+ ENTRY;
+ LASSERT(de);
- CDEBUG(D_DENTRY, "%s dentry %.*s (%p, parent %p, inode %p) %s%s\n",
- (de->d_flags & DCACHE_LUSTRE_INVALID ? "deleting" : "keeping"),
- de->d_name.len, de->d_name.name, de, de->d_parent, de->d_inode,
- d_unhashed(de) ? "" : "hashed,",
- list_empty(&de->d_subdirs) ? "" : "subdirs");
+ CDEBUG(D_DENTRY, "%s dentry %.*s (%p, parent %p, inode %p) %s%s\n",
+ d_lustre_invalid((struct dentry *)de) ? "deleting" : "keeping",
+ de->d_name.len, de->d_name.name, de, de->d_parent, de->d_inode,
+ d_unhashed((struct dentry *)de) ? "" : "hashed,",
+ list_empty(&de->d_subdirs) ? "" : "subdirs");
- /* if not ldlm lock for this inode, set i_nlink to 0 so that
- * this inode can be recycled later b=20433 */
- LASSERT(atomic_read(&de->d_count) == 0);
- if (de->d_inode && !find_cbdata(de->d_inode))
- de->d_inode->i_nlink = 0;
+#ifdef HAVE_DCACHE_LOCK
+ LASSERT(d_refcount(de) == 0);
+#else
+ /* kernel >= 2.6.38 last refcount is decreased after this function. */
+ LASSERT(d_refcount(de) == 1);
+#endif
- if (de->d_flags & DCACHE_LUSTRE_INVALID)
- RETURN(1);
+ /* Disable this piece of code temproarily because this is called
+ * inside dcache_lock so it's not appropriate to do lots of work
+ * here. */
+#if 0
+ /* if not ldlm lock for this inode, set i_nlink to 0 so that
+ * this inode can be recycled later b=20433 */
+ if (de->d_inode && !find_cbdata(de->d_inode))
+ clear_nlink(de->d_inode);
+#endif
- RETURN(0);
+ if (d_lustre_invalid((struct dentry *)de))
+ RETURN(1);
+ RETURN(0);
}
-void ll_set_dd(struct dentry *de)
+static int ll_set_dd(struct dentry *de)
{
- ENTRY;
- LASSERT(de != NULL);
+ ENTRY;
+ LASSERT(de != NULL);
+
+ CDEBUG(D_DENTRY, "ldd on dentry %.*s (%p) parent %p inode %p refc %d\n",
+ de->d_name.len, de->d_name.name, de, de->d_parent, de->d_inode,
+ d_refcount(de));
+
+ if (de->d_fsdata == NULL) {
+ struct ll_dentry_data *lld;
+
+ OBD_ALLOC_PTR(lld);
+ if (likely(lld != NULL)) {
+ spin_lock(&de->d_lock);
+ if (likely(de->d_fsdata == NULL))
+ de->d_fsdata = lld;
+ else
+ OBD_FREE_PTR(lld);
+ spin_unlock(&de->d_lock);
+ } else {
+ RETURN(-ENOMEM);
+ }
+ }
+
+ RETURN(0);
+}
- CDEBUG(D_DENTRY, "ldd on dentry %.*s (%p) parent %p inode %p refc %d\n",
- de->d_name.len, de->d_name.name, de, de->d_parent, de->d_inode,
- atomic_read(&de->d_count));
-
- if (de->d_fsdata == NULL) {
- struct ll_dentry_data *lld;
-
- OBD_ALLOC_PTR(lld);
- if (likely(lld != NULL)) {
- lock_dentry(de);
- if (likely(de->d_fsdata == NULL))
- de->d_fsdata = lld;
- else
- OBD_FREE_PTR(lld);
- unlock_dentry(de);
- }
+int ll_dops_init(struct dentry *de, int block, int init_sa)
+{
+ struct ll_dentry_data *lld = ll_d2d(de);
+ int rc = 0;
+
+ if (lld == NULL && block != 0) {
+ rc = ll_set_dd(de);
+ if (rc)
+ return rc;
+
+ lld = ll_d2d(de);
}
- EXIT;
+ if (lld != NULL && init_sa != 0)
+ lld->lld_sa_generation = 0;
+
+#ifdef HAVE_DCACHE_LOCK
+ de->d_op = &ll_d_ops;
+#else
+ /* kernel >= 2.6.38 d_op is set in d_alloc() */
+ LASSERT(de->d_op == &ll_d_ops);
+#endif
+ return rc;
}
void ll_intent_drop_lock(struct lookup_intent *it)
struct lustre_handle *handle;
if (it->it_op && it->d.lustre.it_lock_mode) {
- handle = (struct lustre_handle *)&it->d.lustre.it_lock_handle;
+ struct ldlm_lock *lock;
+
+ handle = (struct lustre_handle *)&it->d.lustre.it_lock_handle;
+ lock = ldlm_handle2lock(handle);
+ if (lock != NULL) {
+ /* it can only be allowed to match after layout is
+ * applied to inode otherwise false layout would be
+ * seen. Applying layout shoud happen before dropping
+ * the intent lock. */
+ if (it->d.lustre.it_lock_bits & MDS_INODELOCK_LAYOUT)
+ ldlm_lock_allow_match(lock);
+ LDLM_LOCK_PUT(lock);
+ }
+
CDEBUG(D_DLMTRACE, "releasing lock with cookie "LPX64
" from it %p\n", handle->cookie, it);
ldlm_lock_decref(handle, it->d.lustre.it_lock_mode);
CDEBUG(D_INFO, "intent %p released\n", it);
ll_intent_drop_lock(it);
-#ifdef HAVE_VFS_INTENT_PATCHES
- it->it_magic = 0;
- it->it_op_release = 0;
-#endif
/* We are still holding extra reference on a request, need to free it */
if (it_disposition(it, DISP_ENQ_OPEN_REF))
ptlrpc_req_finished(it->d.lustre.it_data); /* ll_file_open */
EXIT;
}
-/* Drop dentry if it is not used already, unhash otherwise.
- Should be called with dcache lock held!
- Returns: 1 if dentry was dropped, 0 if unhashed. */
-int ll_drop_dentry(struct dentry *dentry)
+void ll_invalidate_aliases(struct inode *inode)
{
- lock_dentry(dentry);
- if (atomic_read(&dentry->d_count) == 0) {
- CDEBUG(D_DENTRY, "deleting dentry %.*s (%p) parent %p "
- "inode %p\n", dentry->d_name.len,
- dentry->d_name.name, dentry, dentry->d_parent,
- dentry->d_inode);
- dget_locked(dentry);
- __d_drop(dentry);
- unlock_dentry(dentry);
- spin_unlock(&dcache_lock);
- cfs_spin_unlock(&ll_lookup_lock);
- dput(dentry);
- cfs_spin_lock(&ll_lookup_lock);
- spin_lock(&dcache_lock);
- return 1;
- }
- /* disconected dentry can not be find without lookup, because we
- * not need his to unhash or mark invalid. */
- if (dentry->d_flags & DCACHE_DISCONNECTED) {
- unlock_dentry(dentry);
- RETURN (0);
- }
+ struct dentry *dentry;
+ struct ll_d_hlist_node *p;
+ ENTRY;
- if (!(dentry->d_flags & DCACHE_LUSTRE_INVALID)) {
- CDEBUG(D_DENTRY, "unhashing dentry %.*s (%p) parent %p "
- "inode %p refc %d\n", dentry->d_name.len,
- dentry->d_name.name, dentry, dentry->d_parent,
- dentry->d_inode, atomic_read(&dentry->d_count));
- /* actually we don't unhash the dentry, rather just
- * mark it inaccessible for to __d_lookup(). otherwise
- * sys_getcwd() could return -ENOENT -bzzz */
- dentry->d_flags |= DCACHE_LUSTRE_INVALID;
- if (!dentry->d_inode || !S_ISDIR(dentry->d_inode->i_mode))
- __d_drop(dentry);
- }
- unlock_dentry(dentry);
- return 0;
-}
-
-void ll_unhash_aliases(struct inode *inode)
-{
- struct list_head *tmp, *head;
- ENTRY;
+ LASSERT(inode != NULL);
- if (inode == NULL) {
- CERROR("unexpected NULL inode, tell phil\n");
- return;
- }
-
- CDEBUG(D_INODE, "marking dentries for ino %lu/%u(%p) invalid\n",
- inode->i_ino, inode->i_generation, inode);
+ CDEBUG(D_INODE, "marking dentries for ino %lu/%u(%p) invalid\n",
+ inode->i_ino, inode->i_generation, inode);
- head = &inode->i_dentry;
- cfs_spin_lock(&ll_lookup_lock);
- spin_lock(&dcache_lock);
-restart:
- tmp = head;
- while ((tmp = tmp->next) != head) {
- struct dentry *dentry = list_entry(tmp, struct dentry, d_alias);
-
- CDEBUG(D_DENTRY, "dentry in drop %.*s (%p) parent %p "
- "inode %p flags %d\n", dentry->d_name.len,
- dentry->d_name.name, dentry, dentry->d_parent,
- dentry->d_inode, dentry->d_flags);
+ ll_lock_dcache(inode);
+ ll_d_hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
+ CDEBUG(D_DENTRY, "dentry in drop %.*s (%p) parent %p "
+ "inode %p flags %d\n", dentry->d_name.len,
+ dentry->d_name.name, dentry, dentry->d_parent,
+ dentry->d_inode, dentry->d_flags);
if (dentry->d_name.len == 1 && dentry->d_name.name[0] == '/') {
CERROR("called on root (?) dentry=%p, inode=%p "
libcfs_debug_dumpstack(NULL);
}
- if (ll_drop_dentry(dentry))
- goto restart;
- }
- spin_unlock(&dcache_lock);
- cfs_spin_unlock(&ll_lookup_lock);
+ d_lustre_invalidate(dentry);
+ }
+ ll_unlock_dcache(inode);
EXIT;
}
CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
inode, inode->i_ino, inode->i_generation);
- md_set_lock_data(sbi->ll_md_exp, &it->d.lustre.it_lock_handle,
- inode, NULL);
+ ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
}
/* drop lookup or getattr locks immediately */
void ll_frob_intent(struct lookup_intent **itp, struct lookup_intent *deft)
{
struct lookup_intent *it = *itp;
-#ifdef HAVE_VFS_INTENT_PATCHES
- if (it) {
- LASSERTF(it->it_magic == INTENT_MAGIC,
- "%p has bad intent magic: %x\n",
- it, it->it_magic);
- }
-#endif
if (!it || it->it_op == IT_GETXATTR)
it = *itp = deft;
-#ifdef HAVE_VFS_INTENT_PATCHES
- it->it_op_release = ll_intent_release;
-#endif
}
int ll_revalidate_it(struct dentry *de, int lookup_flags,
struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
struct obd_export *exp;
struct inode *parent = de->d_parent->d_inode;
- int rc, first = 0;
+ int rc;
ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%s,intent=%s\n", de->d_name.name,
LL_IT2STR(it));
if (de->d_inode == NULL) {
+ __u64 ibits;
+
/* We can only use negative dentries if this is stat or lookup,
for opens and stuff we do need to query server. */
/* If there is IT_CREAT in intent op set, then we must throw
if (it && (it->it_op & IT_CREAT))
RETURN(0);
- if (de->d_flags & DCACHE_LUSTRE_INVALID)
+ if (d_lustre_invalid(de))
RETURN(0);
- rc = ll_have_md_lock(parent, MDS_INODELOCK_UPDATE);
+ ibits = MDS_INODELOCK_UPDATE;
+ rc = ll_have_md_lock(parent, &ibits, LCK_MINMODE);
GOTO(out_sa, rc);
}
ll_frob_intent(&it, &lookup_it);
LASSERT(it);
- if (it->it_op == IT_LOOKUP && !(de->d_flags & DCACHE_LUSTRE_INVALID))
- GOTO(out_sa, rc = 1);
-
- op_data = ll_prep_md_op_data(NULL, parent, de->d_inode,
- de->d_name.name, de->d_name.len,
- 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ if (it->it_op == IT_LOOKUP && !d_lustre_invalid(de))
+ RETURN(1);
- if ((it->it_op == IT_OPEN) && de->d_inode) {
- struct inode *inode = de->d_inode;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct obd_client_handle **och_p;
- __u64 *och_usecount;
+ if (it->it_op == IT_OPEN) {
+ struct inode *inode = de->d_inode;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_client_handle **och_p;
+ __u64 *och_usecount;
+ __u64 ibits;
/*
* We used to check for MDS_INODELOCK_OPEN here, but in fact
och_usecount = &lli->lli_open_fd_read_count;
}
/* Check for the proper lock. */
- if (!ll_have_md_lock(inode, MDS_INODELOCK_LOOKUP))
+ ibits = MDS_INODELOCK_LOOKUP;
+ if (!ll_have_md_lock(inode, &ibits, LCK_MINMODE))
goto do_lock;
- cfs_down(&lli->lli_och_sem);
+ mutex_lock(&lli->lli_och_mutex);
if (*och_p) { /* Everything is open already, do nothing */
/*(*och_usecount)++; Do not let them steal our open
handle from under us */
+ SET_BUT_UNUSED(och_usecount);
/* XXX The code above was my original idea, but in case
we have the handle, but we cannot use it due to later
checks (e.g. O_CREAT|O_EXCL flags set), nobody
hope the lock won't be invalidated in between. But
if it would be, we'll reopen the open request to
MDS later during file open path */
- cfs_up(&lli->lli_och_sem);
- ll_finish_md_op_data(op_data);
+ mutex_unlock(&lli->lli_och_mutex);
RETURN(1);
} else {
- cfs_up(&lli->lli_och_sem);
+ mutex_unlock(&lli->lli_och_mutex);
}
}
if (it->it_op == IT_GETATTR) {
- first = ll_statahead_enter(parent, &de, 0);
- if (first == 1) {
- ll_statahead_exit(parent, de, 1);
- ll_finish_md_op_data(op_data);
- GOTO(out, rc = 1);
- }
+ rc = ll_statahead_enter(parent, &de, 0);
+ if (rc == 1)
+ goto mark;
+ else if (rc != -EAGAIN && rc != 0)
+ GOTO(out, rc = 0);
}
do_lock:
- it->it_create_mode &= ~current->fs->umask;
+ op_data = ll_prep_md_op_data(NULL, parent, de->d_inode,
+ de->d_name.name, de->d_name.len,
+ 0, LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ RETURN(PTR_ERR(op_data));
+
+ if (!IS_POSIXACL(parent) || !exp_connect_umask(exp))
+ it->it_create_mode &= ~cfs_curproc_umask();
it->it_create_mode |= M_CHECK_STALE;
rc = md_intent_lock(exp, op_data, NULL, 0, it,
lookup_flags,
&req, ll_md_blocking_ast, 0);
it->it_create_mode &= ~M_CHECK_STALE;
ll_finish_md_op_data(op_data);
- if (it->it_op == IT_GETATTR && !first)
- /* If there are too many locks on client-side, then some
- * locks taken by statahead maybe dropped automatically
- * before the real "revalidate" using them. */
- ll_statahead_exit(parent, de, req == NULL ? rc : 0);
- else if (first == -EEXIST)
- ll_statahead_mark(parent, de);
/* If req is NULL, then md_intent_lock only tried to do a lock match;
* if all was well, it will return 1 if it found locks, 0 otherwise. */
}
rc = 1;
- /* unfortunately ll_intent_lock may cause a callback and revoke our
- * dentry */
- cfs_spin_lock(&ll_lookup_lock);
- spin_lock(&dcache_lock);
- lock_dentry(de);
- __d_drop(de);
- unlock_dentry(de);
- d_rehash_cond(de, 0);
- spin_unlock(&dcache_lock);
- cfs_spin_unlock(&ll_lookup_lock);
-
out:
/* We do not free request as it may be reused during following lookup
* (see comment in mdc/mdc_locks.c::mdc_intent_lock()), request will
if (req != NULL && !it_disposition(it, DISP_ENQ_COMPLETE))
ptlrpc_req_finished(req);
if (rc == 0) {
- ll_unhash_aliases(de->d_inode);
- /* done in ll_unhash_aliases()
- dentry->d_flags |= DCACHE_LUSTRE_INVALID; */
- } else {
- CDEBUG(D_DENTRY, "revalidated dentry %.*s (%p) parent %p "
- "inode %p refc %d\n", de->d_name.len,
- de->d_name.name, de, de->d_parent, de->d_inode,
- atomic_read(&de->d_count));
- if (first != 1) {
- if (de->d_flags & DCACHE_LUSTRE_INVALID) {
- lock_dentry(de);
- de->d_flags &= ~DCACHE_LUSTRE_INVALID;
- unlock_dentry(de);
- }
- ll_lookup_finish_locks(it, de);
- }
+ /* mdt may grant layout lock for the newly created file, so
+ * release the lock to avoid leaking */
+ ll_intent_drop_lock(it);
+ ll_invalidate_aliases(de->d_inode);
+ } else {
+ __u64 bits = 0;
+
+ CDEBUG(D_DENTRY, "revalidated dentry %.*s (%p) parent %p "
+ "inode %p refc %d\n", de->d_name.len,
+ de->d_name.name, de, de->d_parent, de->d_inode,
+ d_refcount(de));
+ ll_set_lock_data(exp, de->d_inode, it, &bits);
+ if ((bits & MDS_INODELOCK_LOOKUP) && d_lustre_invalid(de))
+ d_lustre_revalidate(de);
+ ll_lookup_finish_locks(it, de);
}
+
+mark:
+ if (it != NULL && it->it_op == IT_GETATTR && rc > 0)
+ ll_statahead_mark(parent, de);
RETURN(rc);
/*
* For rc == 1 case, should not return directly to prevent losing
* statahead windows; for rc == 0 case, the "lookup" will be done later.
*/
- if (it && it->it_op == IT_GETATTR && rc == 1) {
- first = ll_statahead_enter(parent, &de, 0);
- if (first >= 0)
- ll_statahead_exit(parent, de, 1);
- else if (first == -EEXIST)
- ll_statahead_mark(parent, de);
- }
-
- return rc;
+ if (it != NULL && it->it_op == IT_GETATTR && rc == 1)
+ ll_statahead_enter(parent, &de, 1);
+ goto mark;
}
-#if 0
-static void ll_pin(struct dentry *de, struct vfsmount *mnt, int flag)
+#ifdef HAVE_IOP_ATOMIC_OPEN
+/*
+ * Always trust cached dentries. Update statahead window if necessary.
+ */
+int ll_revalidate_nd(struct dentry *dentry, unsigned int flags)
{
- struct inode *inode= de->d_inode;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_dentry_data *ldd = ll_d2d(de);
- struct obd_client_handle *handle;
- struct obd_capa *oc;
- int rc = 0;
- ENTRY;
- LASSERT(ldd);
-
- cfs_lock_kernel();
- /* Strictly speaking this introduces an additional race: the
- * increments should wait until the rpc has returned.
- * However, given that at present the function is void, this
- * issue is moot. */
- if (flag == 1 && (++ldd->lld_mnt_count) > 1) {
- cfs_unlock_kernel();
- EXIT;
- return;
- }
+ struct inode *parent = dentry->d_parent->d_inode;
+ int unplug = 0;
- if (flag == 0 && (++ldd->lld_cwd_count) > 1) {
- cfs_unlock_kernel();
- EXIT;
- return;
- }
- cfs_unlock_kernel();
-
- handle = (flag) ? &ldd->lld_mnt_och : &ldd->lld_cwd_och;
- oc = ll_mdscapa_get(inode);
- rc = obd_pin(sbi->ll_md_exp, ll_inode2fid(inode), oc, handle, flag);
- capa_put(oc);
- if (rc) {
- cfs_lock_kernel();
- memset(handle, 0, sizeof(*handle));
- if (flag == 0)
- ldd->lld_cwd_count--;
- else
- ldd->lld_mnt_count--;
- cfs_unlock_kernel();
- }
+ ENTRY;
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%s,flags=%u\n",
+ dentry->d_name.name, flags);
- EXIT;
- return;
-}
-
-static void ll_unpin(struct dentry *de, struct vfsmount *mnt, int flag)
-{
- struct ll_sb_info *sbi = ll_i2sbi(de->d_inode);
- struct ll_dentry_data *ldd = ll_d2d(de);
- struct obd_client_handle handle;
- int count, rc = 0;
- ENTRY;
- LASSERT(ldd);
-
- cfs_lock_kernel();
- /* Strictly speaking this introduces an additional race: the
- * increments should wait until the rpc has returned.
- * However, given that at present the function is void, this
- * issue is moot. */
- handle = (flag) ? ldd->lld_mnt_och : ldd->lld_cwd_och;
- if (handle.och_magic != OBD_CLIENT_HANDLE_MAGIC) {
- /* the "pin" failed */
- cfs_unlock_kernel();
- EXIT;
- return;
- }
+ if (!(flags & (LOOKUP_PARENT|LOOKUP_OPEN|LOOKUP_CREATE)) &&
+ ll_need_statahead(parent, dentry) > 0) {
+ if (flags & LOOKUP_RCU)
+ RETURN(-ECHILD);
- if (flag)
- count = --ldd->lld_mnt_count;
- else
- count = --ldd->lld_cwd_count;
- cfs_unlock_kernel();
-
- if (count != 0) {
- EXIT;
- return;
- }
+ if (dentry->d_inode == NULL)
+ unplug = 1;
+ do_statahead_enter(parent, &dentry, unplug);
+ ll_statahead_mark(parent, dentry);
+ }
- rc = obd_unpin(sbi->ll_md_exp, &handle, flag);
- EXIT;
- return;
+ RETURN(1);
}
-#endif
-#ifdef HAVE_VFS_INTENT_PATCHES
+#else /* !HAVE_IOP_ATOMIC_OPEN */
int ll_revalidate_nd(struct dentry *dentry, struct nameidata *nd)
{
int rc;
ENTRY;
- if (nd && nd->flags & LOOKUP_LAST && !(nd->flags & LOOKUP_LINK_NOTLAST))
- rc = ll_revalidate_it(dentry, nd->flags, &nd->intent);
- else
- rc = ll_revalidate_it(dentry, 0, NULL);
-
- RETURN(rc);
-}
-#else
-int ll_revalidate_nd(struct dentry *dentry, struct nameidata *nd)
-{
- int rc;
- ENTRY;
+#ifndef HAVE_DCACHE_LOCK
+ /* kernel >= 2.6.38 supports rcu-walk, but lustre doesn't. */
+ if (nd->flags & LOOKUP_RCU)
+ return -ECHILD;
+#endif
if (nd && !(nd->flags & (LOOKUP_CONTINUE|LOOKUP_PARENT))) {
struct lookup_intent *it;
+
it = ll_convert_intent(&nd->intent.open, nd->flags);
if (IS_ERR(it))
RETURN(0);
- if (it->it_op == (IT_OPEN|IT_CREAT))
- if (nd->intent.open.flags & O_EXCL) {
- CDEBUG(D_VFSTRACE, "create O_EXCL, returning 0\n");
- rc = 0;
- goto out_it;
- }
+
+ if (it->it_op == (IT_OPEN|IT_CREAT) &&
+ nd->intent.open.flags & O_EXCL) {
+ CDEBUG(D_VFSTRACE, "create O_EXCL, returning 0\n");
+ rc = 0;
+ goto out_it;
+ }
rc = ll_revalidate_it(dentry, nd->flags, it);
if (rc && (nd->flags & LOOKUP_OPEN) &&
it_disposition(it, DISP_OPEN_OPEN)) {/*Open*/
-#ifdef HAVE_FILE_IN_STRUCT_INTENT
// XXX Code duplication with ll_lookup_nd
if (S_ISFIFO(dentry->d_inode->i_mode)) {
// We cannot call open here as it would
(struct ptlrpc_request *)
it->d.lustre.it_data);
} else {
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17))
-/* 2.6.1[456] have a bug in open_namei() that forgets to check
- * nd->intent.open.file for error, so we need to return it as lookup's result
- * instead */
- struct file *filp;
-
- nd->intent.open.file->private_data = it;
- filp = lookup_instantiate_filp(nd, dentry,NULL);
- if (IS_ERR(filp)) {
- rc = PTR_ERR(filp);
- }
-#else
- nd->intent.open.file->private_data = it;
- (void)lookup_instantiate_filp(nd, dentry,NULL);
-#endif
+ struct file *filp;
+
+ nd->intent.open.file->private_data = it;
+ filp = lookup_instantiate_filp(nd, dentry,NULL);
+ if (IS_ERR(filp))
+ rc = PTR_ERR(filp);
}
-#else
- ll_release_openhandle(dentry, it);
-#endif /* HAVE_FILE_IN_STRUCT_INTENT */
}
if (!rc && (nd->flags & LOOKUP_CREATE) &&
it_disposition(it, DISP_OPEN_CREATE)) {
RETURN(rc);
}
-#endif
+#endif /* HAVE_IOP_ATOMIC_OPEN */
void ll_d_iput(struct dentry *de, struct inode *inode)
{
- LASSERT(inode);
- if (!find_cbdata(inode))
- inode->i_nlink = 0;
- iput(inode);
+ LASSERT(inode);
+ if (!find_cbdata(inode))
+ clear_nlink(inode);
+ iput(inode);
}
struct dentry_operations ll_d_ops = {
.d_delete = ll_ddelete,
.d_iput = ll_d_iput,
.d_compare = ll_dcompare,
-#if 0
- .d_pin = ll_pin,
- .d_unpin = ll_unpin,
-#endif
};