1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/sched.h>
24 #include <linux/smp_lock.h>
25 #include <linux/quotaops.h>
27 #define DEBUG_SUBSYSTEM S_LLITE
29 #include <linux/obd_support.h>
30 #include <linux/lustre_lite.h>
31 #include <linux/lustre_idl.h>
32 #include <linux/lustre_dlm.h>
34 #include "llite_internal.h"
36 /* should NOT be called with the dcache lock, see fs/dcache.c */
37 static void ll_release(struct dentry *de)
39 struct ll_dentry_data *lld;
44 LASSERT(lld->lld_cwd_count == 0);
45 LASSERT(lld->lld_mnt_count == 0);
46 OBD_FREE(de->d_fsdata, sizeof(struct ll_dentry_data));
51 void ll_set_dd(struct dentry *de)
57 if (de->d_fsdata == NULL) {
58 OBD_ALLOC(de->d_fsdata, sizeof(struct ll_dentry_data));
65 void ll_intent_drop_lock(struct lookup_intent *it)
67 struct lustre_handle *handle;
69 if (it->it_op && it->d.lustre.it_lock_mode) {
70 handle = (struct lustre_handle *)&it->d.lustre.it_lock_handle;
71 CDEBUG(D_DLMTRACE, "releasing lock with cookie "LPX64
72 " from it %p\n", handle->cookie, it);
73 ldlm_lock_decref(handle, it->d.lustre.it_lock_mode);
75 /* bug 494: intent_release may be called multiple times, from
76 * this thread and we don't want to double-decref this lock */
77 it->d.lustre.it_lock_mode = 0;
81 void ll_intent_release(struct lookup_intent *it)
85 ll_intent_drop_lock(it);
87 it->it_op_release = 0;
88 it->d.lustre.it_disposition = 0;
89 it->d.lustre.it_data = NULL;
93 void ll_unhash_aliases(struct inode *inode)
95 struct list_head *tmp, *head;
96 struct ll_sb_info *sbi;
99 sbi = ll_i2sbi(inode);
101 CDEBUG(D_INODE, "marking dentries for ino %lu/%u(%p) invalid\n",
102 inode->i_ino, inode->i_generation, inode);
105 CERROR("unexpected NULL inode, tell phil\n");
108 head = &inode->i_dentry;
110 spin_lock(&dcache_lock);
112 while ((tmp = tmp->next) != head) {
113 struct dentry *dentry = list_entry(tmp, struct dentry, d_alias);
114 if (!atomic_read(&dentry->d_count)) {
117 spin_unlock(&dcache_lock);
121 hlist_del_init(&dentry->d_hash);
122 dentry->d_flags |= DCACHE_LUSTRE_INVALID;
123 hlist_add_head(&dentry->d_hash,
124 &sbi->ll_orphan_dentry_list);
127 spin_unlock(&dcache_lock);
131 extern struct dentry *ll_find_alias(struct inode *, struct dentry *);
133 static int revalidate_it_finish(struct ptlrpc_request *request, int offset,
134 struct lookup_intent *it,
137 struct ll_sb_info *sbi;
144 if (it_disposition(it, DISP_LOOKUP_NEG))
147 sbi = ll_i2sbi(de->d_inode);
148 rc = ll_prep_inode(sbi->ll_osc_exp, &de->d_inode, request, offset,NULL);
153 void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry)
156 LASSERT(dentry != NULL);
158 if (it->d.lustre.it_lock_mode && dentry->d_inode != NULL) {
159 struct inode *inode = dentry->d_inode;
160 CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
161 inode, inode->i_ino, inode->i_generation);
162 mdc_set_lock_data(&it->d.lustre.it_lock_handle, inode);
165 /* drop lookup or getattr locks immediately */
166 if (it->it_op == IT_LOOKUP || it->it_op == IT_GETATTR)
167 ll_intent_release(it);
170 void ll_frob_intent(struct lookup_intent **itp, struct lookup_intent *deft)
172 struct lookup_intent *it = *itp;
173 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
174 if (it && it->it_magic != INTENT_MAGIC) {
175 CERROR("WARNING: uninitialized intent\n");
178 if (it && (it->it_op == IT_GETATTR || it->it_op == 0))
179 it->it_op = IT_LOOKUP;
182 if (!it || it->it_op == IT_GETXATTR)
185 it->it_op_release = ll_intent_release;
188 int ll_revalidate_it(struct dentry *de, int flags, struct lookup_intent *it)
191 struct ll_fid pfid, cfid;
192 struct it_cb_data icbd;
193 struct ll_uctxt ctxt;
194 struct ptlrpc_request *req = NULL;
195 struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
196 struct obd_export *exp;
199 CDEBUG(D_VFSTRACE, "VFS Op:name=%s,intent=%s\n", de->d_name.name,
202 /* Cached negative dentries are unsafe for now - look them up again */
203 if (de->d_inode == NULL)
206 exp = ll_i2mdcexp(de->d_inode);
207 ll_inode2fid(&pfid, de->d_parent->d_inode);
208 ll_inode2fid(&cfid, de->d_inode);
209 icbd.icbd_parent = de->d_parent->d_inode;
210 icbd.icbd_childp = &de;
213 * never execute intents for mount points
214 * - attrs will be fixed up in ll_revalidate_inode
216 if (d_mountpoint(de))
219 ll_frob_intent(&it, &lookup_it);
222 ll_i2uctxt(&ctxt, de->d_parent->d_inode, de->d_inode);
224 rc = mdc_intent_lock(exp, &ctxt, &pfid, de->d_name.name, de->d_name.len,
226 &cfid, it, flags, &req, ll_mdc_blocking_ast);
227 /* If req is NULL, then mdc_intent_lock only tried to do a lock match;
228 * if all was well, it will return 1 if it found locks, 0 otherwise. */
229 if (req == NULL && rc >= 0)
234 CDEBUG(D_INFO, "ll_intent_lock: rc %d : it->it_status "
235 "%d\n", rc, it->d.lustre.it_status);
240 rc = revalidate_it_finish(req, 1, it, de);
242 ll_intent_release(it);
247 /* unfortunately ll_intent_lock may cause a callback and revoke our
249 spin_lock(&dcache_lock);
250 hlist_del_init(&de->d_hash);
252 spin_unlock(&dcache_lock);
255 if (req != NULL && rc == 1)
256 ptlrpc_req_finished(req);
258 ll_unhash_aliases(de->d_inode);
259 de->d_flags |= DCACHE_LUSTRE_INVALID;
261 ll_lookup_finish_locks(it, de);
262 de->d_flags &= ~DCACHE_LUSTRE_INVALID;
267 /*static*/ void ll_pin(struct dentry *de, struct vfsmount *mnt, int flag)
269 struct inode *inode= de->d_inode;
270 struct ll_sb_info *sbi = ll_i2sbi(inode);
271 struct ll_dentry_data *ldd = ll_d2d(de);
272 struct obd_client_handle *handle;
278 /* Strictly speaking this introduces an additional race: the
279 * increments should wait until the rpc has returned.
280 * However, given that at present the function is void, this
282 if (flag == 1 && (++ldd->lld_mnt_count) > 1) {
288 if (flag == 0 && (++ldd->lld_cwd_count) > 1) {
295 handle = (flag) ? &ldd->lld_mnt_och : &ldd->lld_cwd_och;
296 rc = obd_pin(sbi->ll_mdc_exp, inode->i_ino, inode->i_generation,
297 inode->i_mode & S_IFMT, handle, flag);
301 memset(handle, 0, sizeof(*handle));
303 ldd->lld_cwd_count--;
305 ldd->lld_mnt_count--;
313 /*static*/ void ll_unpin(struct dentry *de, struct vfsmount *mnt, int flag)
315 struct ll_sb_info *sbi = ll_i2sbi(de->d_inode);
316 struct ll_dentry_data *ldd = ll_d2d(de);
317 struct obd_client_handle handle;
323 /* Strictly speaking this introduces an additional race: the
324 * increments should wait until the rpc has returned.
325 * However, given that at present the function is void, this
327 handle = (flag) ? ldd->lld_mnt_och : ldd->lld_cwd_och;
328 if (handle.och_magic != OBD_CLIENT_HANDLE_MAGIC) {
329 /* the "pin" failed */
336 count = --ldd->lld_mnt_count;
338 count = --ldd->lld_cwd_count;
346 rc = obd_unpin(sbi->ll_mdc_exp, &handle, flag);
351 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
352 static int ll_revalidate_nd(struct dentry *dentry, struct nameidata *nd)
357 if (nd->flags & LOOKUP_LAST && !(nd->flags & LOOKUP_LINK_NOTLAST))
358 rc = ll_revalidate_it(dentry, nd->flags, &nd->intent);
360 rc = ll_revalidate_it(dentry, 0, NULL);
366 struct dentry_operations ll_d_ops = {
367 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
368 .d_revalidate = ll_revalidate_nd,
370 .d_revalidate_it = ll_revalidate_it,
372 .d_release = ll_release,