1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/sched.h>
24 #include <linux/smp_lock.h>
25 #include <linux/quotaops.h>
27 #define DEBUG_SUBSYSTEM S_LLITE
29 #include <linux/obd_support.h>
30 #include <linux/lustre_lite.h>
31 #include <linux/lustre_idl.h>
32 #include <linux/lustre_dlm.h>
33 #include <linux/lustre_version.h>
35 #include "llite_internal.h"
37 /* should NOT be called with the dcache lock, see fs/dcache.c */
38 static void ll_release(struct dentry *de)
40 struct ll_dentry_data *lld;
44 CDEBUG(D_DENTRY, "releasing dentry %p\n", de);
47 if (lld) { /* Root dentry does not have ll_dentry_data */
48 LASSERT(lld->lld_cwd_count == 0);
49 LASSERT(lld->lld_mnt_count == 0);
50 OBD_FREE(de->d_fsdata, sizeof(struct ll_dentry_data));
56 /* Compare if two dentries are the same. Don't match if the existing dentry
57 * is marked DCACHE_LUSTRE_INVALID. Returns 1 if different, 0 if the same.
59 * This avoids a race where ll_lookup_it() instantiates a dentry, but we get
60 * an AST before calling d_revalidate_it(). The dentry still exists (marked
61 * INVALID) so d_lookup() matches it, but we have no lock on it (so
62 * lock_match() fails) and we spin around real_lookup(). */
63 static int ll_dcompare(struct dentry *parent, struct qstr *d_name,
65 struct dentry *dchild;
68 if (d_name->len != name->len)
71 if (memcmp(d_name->name, name->name, name->len))
74 dchild = container_of(d_name, struct dentry, d_name); /* ugh */
75 if (dchild->d_flags & DCACHE_LUSTRE_INVALID) {
76 CDEBUG(D_DENTRY,"INVALID dentry %p not matched, was bug 3784\n",
84 /* should NOT be called with the dcache lock, see fs/dcache.c */
85 static int ll_ddelete(struct dentry *de)
89 CDEBUG(D_DENTRY, "%s dentry %*s (%p, parent %p, inode %p) %s%s\n",
90 (de->d_flags & DCACHE_LUSTRE_INVALID ? "deleting" : "keeping"),
91 de->d_name.len, de->d_name.name, de, de->d_parent, de->d_inode,
92 d_unhashed(de) ? "" : "hashed,",
93 list_empty(&de->d_subdirs) ? "" : "subdirs");
97 void ll_set_dd(struct dentry *de)
102 CDEBUG(D_DENTRY, "ldd on dentry %.*s (%p) parent %p inode %p refc %d\n",
103 de->d_name.len, de->d_name.name, de, de->d_parent, de->d_inode,
104 atomic_read(&de->d_count));
106 if (de->d_fsdata == NULL) {
107 OBD_ALLOC(de->d_fsdata, sizeof(struct ll_dentry_data));
114 void ll_intent_drop_lock(struct lookup_intent *it)
116 struct lustre_handle *handle;
117 struct lustre_intent_data *itdata = LUSTRE_IT(it);
119 if (it->it_op && itdata && itdata->it_lock_mode) {
120 handle = (struct lustre_handle *)&itdata->it_lock_handle;
121 CDEBUG(D_DLMTRACE, "releasing lock with cookie "LPX64
122 " from it %p\n", handle->cookie, it);
123 ldlm_lock_decref(handle, itdata->it_lock_mode);
125 /* bug 494: intent_release may be called multiple times, from
126 * this thread and we don't want to double-decref this lock */
127 itdata->it_lock_mode = 0;
131 void ll_intent_release(struct lookup_intent *it)
135 ll_intent_drop_lock(it);
137 it->it_op_release = 0;
142 void ll_intent_free(struct lookup_intent *it)
145 OBD_SLAB_FREE(it->d.fs_data, ll_intent_slab,
146 sizeof(struct lustre_intent_data));
147 it->d.fs_data = NULL;
151 void ll_unhash_aliases(struct inode *inode)
153 struct list_head *tmp, *head;
154 struct ll_sb_info *sbi;
158 CERROR("unexpected NULL inode, tell phil\n");
163 CDEBUG(D_INODE, "marking dentries for ino %lu/%u(%p) invalid\n",
164 inode->i_ino, inode->i_generation, inode);
166 sbi = ll_i2sbi(inode);
167 head = &inode->i_dentry;
169 spin_lock(&dcache_lock);
171 while ((tmp = tmp->next) != head) {
172 struct dentry *dentry = list_entry(tmp, struct dentry, d_alias);
173 if (atomic_read(&dentry->d_count) == 0) {
174 CDEBUG(D_DENTRY, "deleting dentry %.*s (%p) parent %p "
175 "inode %p\n", dentry->d_name.len,
176 dentry->d_name.name, dentry, dentry->d_parent,
180 spin_unlock(&dcache_lock);
183 } else if (!(dentry->d_flags & DCACHE_LUSTRE_INVALID)) {
184 CDEBUG(D_DENTRY, "unhashing dentry %.*s (%p) parent %p "
185 "inode %p refc %d\n", dentry->d_name.len,
186 dentry->d_name.name, dentry, dentry->d_parent,
187 dentry->d_inode, atomic_read(&dentry->d_count));
188 hlist_del_init(&dentry->d_hash);
189 dentry->d_flags |= DCACHE_LUSTRE_INVALID;
190 hlist_add_head(&dentry->d_hash,
191 &sbi->ll_orphan_dentry_list);
194 spin_unlock(&dcache_lock);
198 extern struct dentry *ll_find_alias(struct inode *, struct dentry *);
200 int revalidate_it_finish(struct ptlrpc_request *request, int offset,
201 struct lookup_intent *it, struct dentry *de)
203 struct ll_sb_info *sbi;
210 if (it_disposition(it, DISP_LOOKUP_NEG))
213 sbi = ll_i2sbi(de->d_inode);
214 rc = ll_prep_inode(sbi->ll_dt_exp, sbi->ll_md_exp,
215 &de->d_inode, request, offset, NULL);
220 void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry)
223 LASSERT(dentry != NULL);
225 if (LUSTRE_IT(it)->it_lock_mode && dentry->d_inode != NULL) {
226 struct inode *inode = dentry->d_inode;
227 CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
228 inode, inode->i_ino, inode->i_generation);
229 mdc_set_lock_data(NULL, &LUSTRE_IT(it)->it_lock_handle, inode);
232 /* drop lookup or getattr locks immediately */
233 if (it->it_op == IT_LOOKUP || it->it_op == IT_GETATTR ||
234 it->it_op == IT_CHDIR) {
235 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
237 * on 2.6 there are situations when several lookups and
238 * revalidations may be requested during single operation.
239 * Therefore, we don't release intent here -bzzz
241 ll_intent_drop_lock(it);
243 ll_intent_release(it);
248 void ll_frob_intent(struct lookup_intent **itp, struct lookup_intent *deft)
250 struct lookup_intent *it = *itp;
252 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
254 LASSERTF(it->it_magic == INTENT_MAGIC, "bad intent magic: %x\n",
259 if (!it || it->it_op == IT_GETXATTR)
265 if (ll_intent_alloc(it)) {
266 CERROR("Failed to allocate memory for lustre specific intent "
268 /* XXX: we cannot return status just yet */
273 int ll_intent_alloc(struct lookup_intent *it)
276 CERROR("Intent alloc on already allocated intent\n");
279 OBD_SLAB_ALLOC(it->d.fs_data, ll_intent_slab, SLAB_KERNEL,
280 sizeof(struct lustre_intent_data));
281 if (!it->d.fs_data) {
282 CERROR("Failed to allocate memory for lustre specific intent "
287 it->it_op_release = ll_intent_release;
292 int ll_revalidate_it(struct dentry *de, int flags, struct nameidata *nd,
293 struct lookup_intent *it)
295 struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
296 struct ptlrpc_request *req = NULL;
297 struct obd_export *exp;
298 struct it_cb_data icbd;
299 struct lustre_id pid;
300 struct lustre_id cid;
304 spin_lock(&de->d_lock);
306 if ((de->d_flags & DCACHE_GNS_PENDING) &&
307 !(de->d_flags & DCACHE_GNS_MOUNTING))
309 spin_unlock(&de->d_lock);
312 int err = ll_gns_mount_object(de, nd->mnt);
314 CERROR("can't mount %s, err = %d\n",
315 de->d_name.name, err);
319 spin_unlock(&de->d_lock);
321 CDEBUG(D_VFSTRACE, "VFS Op:name=%s (%p), intent=%s\n", de->d_name.name,
324 /* Cached negative dentries are unsafe for now - look them up again */
325 if (de->d_inode == NULL)
328 /* Root of the tree is always valid, attributes would be fixed in
329 ll_inode_revalidate_it */
330 if (de->d_sb->s_root == de)
333 CDEBUG(D_INODE, "revalidate 0x%p: %*s -> %lu/%lu\n",
334 de, de->d_name.len, de->d_name.name,
335 (unsigned long) de->d_inode->i_ino,
336 (unsigned long) de->d_inode->i_generation);
338 exp = ll_i2mdexp(de->d_inode);
339 ll_inode2id(&pid, de->d_parent->d_inode);
340 ll_inode2id(&cid, de->d_inode);
341 LASSERT(id_fid(&cid) != 0);
343 icbd.icbd_parent = de->d_parent->d_inode;
344 icbd.icbd_childp = &de;
347 * never execute intents for mount points. Attributes will be fixed up
348 * in ll_inode_revalidate_it().
350 if (d_mountpoint(de))
354 nd->mnt->mnt_last_used = jiffies;
356 OBD_FAIL_TIMEOUT(OBD_FAIL_MDC_REVALIDATE_PAUSE, 5);
357 orig_it = it ? it->it_op : IT_OPEN;
358 ll_frob_intent(&it, &lookup_it);
361 if (it->it_op == IT_GETATTR) { /* We need to check for LOOKUP lock as
363 rc = ll_intent_alloc(&lookup_it);
365 LBUG(); /* Can't think of better idea just yet */
367 rc = md_intent_lock(exp, &pid, de->d_name.name,
368 de->d_name.len, NULL, 0, &cid, &lookup_it,
369 flags, &req, ll_mdc_blocking_ast);
370 /* If there was no lookup lock, no point in even checking for
380 if (it_disposition(&lookup_it, DISP_LOOKUP_NEG)) {
387 ptlrpc_req_finished(req);
389 ll_lookup_finish_locks(&lookup_it, de);
390 /* XXX: on 2.6 ll_lookup_finish_locks does not call ll_intent_release */
391 ll_intent_release(&lookup_it);
395 if ((it->it_op == IT_OPEN) && de->d_inode) {
396 struct inode *inode = de->d_inode;
397 struct ll_inode_info *lli = ll_i2info(inode);
398 struct obd_client_handle **och_p;
400 struct obd_device *obddev;
401 struct lustre_handle lockh;
402 int flags = LDLM_FL_BLOCK_GRANTED;
403 ldlm_policy_data_t policy = {.l_inodebits = {MDS_INODELOCK_OPEN}};
404 struct ldlm_res_id file_res_id = {.name = {id_fid(&lli->lli_id),
405 id_group(&lli->lli_id)}};
408 if (it->it_flags & FMODE_WRITE) {
409 och_p = &lli->lli_mds_write_och;
410 och_usecount = &lli->lli_open_fd_write_count;
412 } else if (it->it_flags & FMODE_EXEC) {
413 och_p = &lli->lli_mds_exec_och;
414 och_usecount = &lli->lli_open_fd_exec_count;
417 och_p = &lli->lli_mds_read_och;
418 och_usecount = &lli->lli_open_fd_read_count;
422 /* Check for the proper lock */
423 obddev = md_get_real_obd(exp, &lli->lli_id);
424 if (!ldlm_lock_match(obddev->obd_namespace, flags, &file_res_id,
425 LDLM_IBITS, &policy, lockmode, &lockh))
427 down(&lli->lli_och_sem);
428 if (*och_p) { /* Everything is open already, do nothing */
429 /*(*och_usecount)++; Do not let them steal our open
430 handle from under us */
431 /* XXX The code above was my original idea, but in case
432 we have the handle, but we cannot use it due to later
433 checks (e.g. O_CREAT|O_EXCL flags set), nobody
434 would decrement counter increased here. So we just
435 hope the lock won't be invalidated in between. But
436 if it would be, we'll reopen the open request to
437 MDS later during file open path */
438 up(&lli->lli_och_sem);
439 memcpy(&LUSTRE_IT(it)->it_lock_handle, &lockh,
441 LUSTRE_IT(it)->it_lock_mode = lockmode;
444 /* Hm, interesting. Lock is present, but no open
446 up(&lli->lli_och_sem);
447 ldlm_lock_decref(&lockh, lockmode);
453 rc = md_intent_lock(exp, &pid, de->d_name.name, de->d_name.len,
454 NULL, 0, &cid, it, flags, &req, ll_mdc_blocking_ast);
455 /* If req is NULL, then md_intent_lock() only tried to do a lock match;
456 * if all was well, it will return 1 if it found locks, 0 otherwise. */
457 if (req == NULL && rc >= 0) {
465 CDEBUG(D_INFO, "ll_intent_lock(): rc %d : it->it_status "
466 "%d\n", rc, LUSTRE_IT(it)->it_status);
471 rc = revalidate_it_finish(req, 1, it, de);
473 ll_intent_release(it);
478 /* unfortunately ll_intent_lock may cause a callback and revoke our
480 spin_lock(&dcache_lock);
481 hlist_del_init(&de->d_hash);
483 spin_unlock(&dcache_lock);
487 if (req != NULL && rc == 1)
488 ptlrpc_req_finished(req);
491 if (it == &lookup_it) {
492 ll_intent_release(it);
493 if (req) /* Special case: We did lookup and it failed,
494 need to free request */
495 ptlrpc_req_finished(req);
497 ll_unhash_aliases(de->d_inode);
501 CDEBUG(D_DENTRY, "revalidated dentry %*s (%p) parent %p "
502 "inode %p refc %d\n", de->d_name.len,
503 de->d_name.name, de, de->d_parent, de->d_inode,
504 atomic_read(&de->d_count));
506 ll_lookup_finish_locks(it, de);
507 de->d_flags &= ~DCACHE_LUSTRE_INVALID;
508 if (it == &lookup_it)
509 ll_intent_release(it);
511 if (!((de->d_inode->i_mode & S_ISUID) && S_ISDIR(de->d_inode->i_mode)) ||
512 !(flags & LOOKUP_CONTINUE || (orig_it & (IT_CHDIR | IT_OPEN))))
515 if (nd && !(de->d_flags & DCACHE_GNS_MOUNTING)) {
516 int err = ll_gns_mount_object(de, nd->mnt);
518 CERROR("can't mount %s, err = %d\n",
519 de->d_name.name, err);
524 if (ll_intent_alloc(it))
526 // We did that already, right? ll_inode2id(&pid, de->d_parent->d_inode);
527 rc = md_intent_lock(exp, &pid, de->d_name.name,
528 de->d_name.len, NULL, 0, NULL,
529 it, 0, &req, ll_mdc_blocking_ast);
531 struct mds_body *mds_body = lustre_msg_buf(req->rq_repmsg, 1, sizeof(*mds_body));
533 /* See if we got same inode, if not - return error */
534 if (id_equal_stc(&cid, &mds_body->id1))
535 goto revalidate_finish;
541 /*static*/ void ll_pin(struct dentry *de, struct vfsmount *mnt, int flag)
543 struct inode *inode= de->d_inode;
544 struct ll_sb_info *sbi = ll_i2sbi(inode);
545 struct ll_dentry_data *ldd = ll_d2d(de);
546 struct obd_client_handle *handle;
552 /* Strictly speaking this introduces an additional race: the
553 * increments should wait until the rpc has returned.
554 * However, given that at present the function is void, this
556 if (flag == 1 && (++ldd->lld_mnt_count) > 1) {
562 if (flag == 0 && (++ldd->lld_cwd_count) > 1) {
569 handle = (flag) ? &ldd->lld_mnt_och : &ldd->lld_cwd_och;
570 rc = obd_pin(sbi->ll_md_exp, inode->i_ino, inode->i_generation,
571 inode->i_mode & S_IFMT, handle, flag);
575 memset(handle, 0, sizeof(*handle));
577 ldd->lld_cwd_count--;
579 ldd->lld_mnt_count--;
587 /*static*/ void ll_unpin(struct dentry *de, struct vfsmount *mnt, int flag)
589 struct ll_sb_info *sbi = ll_i2sbi(de->d_inode);
590 struct ll_dentry_data *ldd = ll_d2d(de);
591 struct obd_client_handle handle;
597 /* Strictly speaking this introduces an additional race: the
598 * increments should wait until the rpc has returned.
599 * However, given that at present the function is void, this
601 handle = (flag) ? ldd->lld_mnt_och : ldd->lld_cwd_och;
602 if (handle.och_magic != OBD_CLIENT_HANDLE_MAGIC) {
603 /* the "pin" failed */
610 count = --ldd->lld_mnt_count;
612 count = --ldd->lld_cwd_count;
620 rc = obd_unpin(sbi->ll_md_exp, &handle, flag);
624 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
625 static int ll_revalidate_nd(struct dentry *dentry, struct nameidata *nd)
630 if (nd && nd->flags & LOOKUP_LAST && !(nd->flags & LOOKUP_LINK_NOTLAST))
631 rc = ll_revalidate_it(dentry, nd->flags, nd, &nd->intent.open);
633 rc = ll_revalidate_it(dentry, 0, nd, NULL);
639 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
640 static void ll_dentry_iput(struct dentry *dentry, struct inode *inode)
642 struct ll_sb_info *sbi = ll_i2sbi(inode);
643 struct lustre_id parent, child;
645 LASSERT(dentry->d_parent && dentry->d_parent->d_inode);
646 ll_inode2id(&parent, dentry->d_parent->d_inode);
647 ll_inode2id(&child, inode);
648 md_change_cbdata_name(sbi->ll_md_exp, &parent,
649 (char *)dentry->d_name.name,
650 dentry->d_name.len, &child,
651 null_if_equal, inode);
655 static void ll_dentry_iput(struct dentry *dentry, struct inode *inode)
657 struct ll_sb_info *sbi = ll_i2sbi(inode);
658 struct lustre_id parent, child;
660 if (dentry->d_parent != dentry) {
661 /* Do not do this for root of the tree */
662 LASSERT(dentry->d_parent && dentry->d_parent->d_inode);
663 ll_inode2id(&parent, dentry->d_parent->d_inode);
664 ll_inode2id(&child, inode);
665 md_change_cbdata_name(sbi->ll_md_exp, &parent,
666 (char *)dentry->d_name.name,
667 dentry->d_name.len, &child,
668 null_if_equal, inode);
675 struct dentry_operations ll_d_ops = {
676 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
677 .d_revalidate = ll_revalidate_nd,
679 .d_revalidate_it = ll_revalidate_it,
681 .d_release = ll_release,
682 .d_iput = ll_dentry_iput,
683 .d_delete = ll_ddelete,
684 .d_compare = ll_dcompare,