Whamcloud - gitweb
- removed hardcoded checking for ".mntinfo" on MDS to see if we are trying to open...
[fs/lustre-release.git] / lustre / llite / dcache.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21
22 #include <linux/fs.h>
23 #include <linux/sched.h>
24 #include <linux/smp_lock.h>
25 #include <linux/quotaops.h>
26
27 #define DEBUG_SUBSYSTEM S_LLITE
28
29 #include <linux/obd_support.h>
30 #include <linux/lustre_lite.h>
31 #include <linux/lustre_idl.h>
32 #include <linux/lustre_dlm.h>
33 #include <linux/lustre_version.h>
34
35 #include "llite_internal.h"
36
37 /* should NOT be called with the dcache lock, see fs/dcache.c */
38 static void ll_release(struct dentry *de)
39 {
40         struct ll_dentry_data *lld;
41         ENTRY;
42         LASSERT(de != NULL);
43
44         CDEBUG(D_DENTRY, "releasing dentry %p\n", de);
45
46         lld = ll_d2d(de);
47         if (lld) { /* Root dentry does not have ll_dentry_data */
48                 LASSERT(lld->lld_cwd_count == 0);
49                 LASSERT(lld->lld_mnt_count == 0);
50                 OBD_FREE(de->d_fsdata, sizeof(struct ll_dentry_data));
51         }
52
53         EXIT;
54 }
55
56 /* Compare if two dentries are the same.  Don't match if the existing dentry
57  * is marked DCACHE_LUSTRE_INVALID.  Returns 1 if different, 0 if the same.
58  *
59  * This avoids a race where ll_lookup_it() instantiates a dentry, but we get
60  * an AST before calling d_revalidate_it().  The dentry still exists (marked
61  * INVALID) so d_lookup() matches it, but we have no lock on it (so
62  * lock_match() fails) and we spin around real_lookup(). */
63 static int ll_dcompare(struct dentry *parent, struct qstr *d_name,
64                        struct qstr *name){
65         struct dentry *dchild;
66         ENTRY;
67
68         if (d_name->len != name->len)
69                 RETURN(1);
70
71         if (memcmp(d_name->name, name->name, name->len))
72                 RETURN(1);
73
74         dchild = container_of(d_name, struct dentry, d_name); /* ugh */
75         if (dchild->d_flags & DCACHE_LUSTRE_INVALID) {
76                 CDEBUG(D_DENTRY,"INVALID dentry %p not matched, was bug 3784\n",
77                        dchild);
78                 RETURN(1);
79         }
80
81         RETURN(0);
82 }
83
84 /* should NOT be called with the dcache lock, see fs/dcache.c */
85 static int ll_ddelete(struct dentry *de)
86 {
87         ENTRY;
88         LASSERT(de);
89         CDEBUG(D_DENTRY, "%s dentry %*s (%p, parent %p, inode %p) %s%s\n",
90                (de->d_flags & DCACHE_LUSTRE_INVALID ? "deleting" : "keeping"),
91                de->d_name.len, de->d_name.name, de, de->d_parent, de->d_inode,
92                d_unhashed(de) ? "" : "hashed,",
93                list_empty(&de->d_subdirs) ? "" : "subdirs");
94         RETURN(0);
95 }
96
97 void ll_set_dd(struct dentry *de)
98 {
99         ENTRY;
100         LASSERT(de != NULL);
101
102         CDEBUG(D_DENTRY, "ldd on dentry %.*s (%p) parent %p inode %p refc %d\n",
103                de->d_name.len, de->d_name.name, de, de->d_parent, de->d_inode,
104                atomic_read(&de->d_count));
105         lock_kernel();
106         if (de->d_fsdata == NULL) {
107                 OBD_ALLOC(de->d_fsdata, sizeof(struct ll_dentry_data));
108         }
109         unlock_kernel();
110
111         EXIT;
112 }
113
114 void ll_intent_drop_lock(struct lookup_intent *it)
115 {
116         struct lustre_handle *handle;
117         struct lustre_intent_data *itdata = LUSTRE_IT(it);
118
119         if (it->it_op && itdata && itdata->it_lock_mode) {
120                 handle = (struct lustre_handle *)&itdata->it_lock_handle;
121                 CDEBUG(D_DLMTRACE, "releasing lock with cookie "LPX64
122                        " from it %p\n", handle->cookie, it);
123                 ldlm_lock_decref(handle, itdata->it_lock_mode);
124
125                 /* bug 494: intent_release may be called multiple times, from
126                  * this thread and we don't want to double-decref this lock */
127                 itdata->it_lock_mode = 0;
128         }
129 }
130
131 void ll_intent_release(struct lookup_intent *it)
132 {
133         ENTRY;
134
135         ll_intent_drop_lock(it);
136         it->it_magic = 0;
137         it->it_op_release = 0;
138         ll_intent_free(it);
139         EXIT;
140 }
141
142 void ll_intent_free(struct lookup_intent *it)
143 {
144         if (it->d.fs_data) {
145                 OBD_SLAB_FREE(it->d.fs_data, ll_intent_slab,
146                                sizeof(struct lustre_intent_data));
147                 it->d.fs_data = NULL;
148         }
149 }
150
151 void ll_unhash_aliases(struct inode *inode)
152 {
153         struct list_head *tmp, *head;
154         struct ll_sb_info *sbi;
155         ENTRY;
156
157         if (inode == NULL) {
158                 CERROR("unexpected NULL inode, tell phil\n");
159                 EXIT;
160                 return;
161         }
162
163         CDEBUG(D_INODE, "marking dentries for ino %lu/%u(%p) invalid\n",
164                inode->i_ino, inode->i_generation, inode);
165
166         sbi = ll_i2sbi(inode);
167         head = &inode->i_dentry;
168 restart:
169         spin_lock(&dcache_lock);
170         tmp = head;
171         while ((tmp = tmp->next) != head) {
172                 struct dentry *dentry = list_entry(tmp, struct dentry, d_alias);
173                 if (atomic_read(&dentry->d_count) == 0) {
174                         CDEBUG(D_DENTRY, "deleting dentry %.*s (%p) parent %p "
175                                "inode %p\n", dentry->d_name.len,
176                                dentry->d_name.name, dentry, dentry->d_parent,
177                                dentry->d_inode);
178                         dget_locked(dentry);
179                         __d_drop(dentry);
180                         spin_unlock(&dcache_lock);
181                         dput(dentry);
182                         goto restart;
183                 } else if (!(dentry->d_flags & DCACHE_LUSTRE_INVALID)) {
184                         CDEBUG(D_DENTRY, "unhashing dentry %.*s (%p) parent %p "
185                                "inode %p refc %d\n", dentry->d_name.len,
186                                dentry->d_name.name, dentry, dentry->d_parent,
187                                dentry->d_inode, atomic_read(&dentry->d_count));
188                         hlist_del_init(&dentry->d_hash);
189                         dentry->d_flags |= DCACHE_LUSTRE_INVALID;
190                         hlist_add_head(&dentry->d_hash,
191                                        &sbi->ll_orphan_dentry_list);
192                 }
193         }
194         spin_unlock(&dcache_lock);
195         EXIT;
196 }
197
198 extern struct dentry *ll_find_alias(struct inode *, struct dentry *);
199
200 int revalidate_it_finish(struct ptlrpc_request *request, int offset, 
201                          struct lookup_intent *it, struct dentry *de)
202 {
203         struct ll_sb_info *sbi;
204         int rc = 0;
205         ENTRY;
206
207         if (!request)
208                 RETURN(0);
209
210         if (it_disposition(it, DISP_LOOKUP_NEG))
211                 RETURN(-ENOENT);
212
213         sbi = ll_i2sbi(de->d_inode);
214         rc = ll_prep_inode(sbi->ll_dt_exp, sbi->ll_md_exp,
215                            &de->d_inode, request, offset, NULL);
216
217         RETURN(rc);
218 }
219
220 void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry)
221 {
222         LASSERT(it != NULL);
223         LASSERT(dentry != NULL);
224
225         if (LUSTRE_IT(it)->it_lock_mode && dentry->d_inode != NULL) {
226                 struct inode *inode = dentry->d_inode;
227                 CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
228                        inode, inode->i_ino, inode->i_generation);
229                 mdc_set_lock_data(NULL, &LUSTRE_IT(it)->it_lock_handle, inode);
230         }
231
232         /* drop lookup or getattr locks immediately */
233         if (it->it_op == IT_LOOKUP || it->it_op == IT_GETATTR ||
234             it->it_op == IT_CHDIR) {
235 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
236                 /*
237                  * on 2.6 there are situations when several lookups and
238                  * revalidations may be requested during single operation.
239                  * Therefore, we don't release intent here -bzzz
240                  */
241                 ll_intent_drop_lock(it);
242 #else
243                 ll_intent_release(it);
244 #endif
245         }
246 }
247
248 void ll_frob_intent(struct lookup_intent **itp, struct lookup_intent *deft)
249 {
250         struct lookup_intent *it = *itp;
251
252 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
253         if (it) {
254                 LASSERTF(it->it_magic == INTENT_MAGIC, "bad intent magic: %x\n",
255                          it->it_magic);
256         }
257 #endif
258
259         if (!it || it->it_op == IT_GETXATTR)
260                 it = *itp = deft;
261
262         if (it->d.fs_data)
263                 return;
264
265         if (ll_intent_alloc(it)) {
266                 CERROR("Failed to allocate memory for lustre specific intent "
267                        "data\n");
268                 /* XXX: we cannot return status just yet */
269                 LBUG();
270         }
271 }
272
273 int ll_intent_alloc(struct lookup_intent *it)
274 {
275         if (it->d.fs_data) {
276                 CERROR("Intent alloc on already allocated intent\n");
277                 return 0;
278         }
279         OBD_SLAB_ALLOC(it->d.fs_data, ll_intent_slab, SLAB_KERNEL,
280                         sizeof(struct lustre_intent_data));
281         if (!it->d.fs_data) {
282                 CERROR("Failed to allocate memory for lustre specific intent "
283                        "data\n");
284                 return -ENOMEM;
285         }
286
287         it->it_op_release = ll_intent_release;
288
289         return 0;
290 }
291
292 int ll_revalidate_it(struct dentry *de, int flags, struct nameidata *nd,
293                      struct lookup_intent *it)
294 {
295         struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
296         struct ptlrpc_request *req = NULL;
297         struct obd_export *exp;
298         struct it_cb_data icbd;
299         struct lustre_id pid;
300         struct lustre_id cid;
301         int orig_it, rc = 0;
302         ENTRY;
303
304         spin_lock(&de->d_lock);
305
306         if ((de->d_flags & DCACHE_GNS_PENDING) &&
307             !(de->d_flags & DCACHE_GNS_MOUNTING))
308         {
309                 spin_unlock(&de->d_lock);
310                         
311                 if (nd) {
312                         int err = ll_gns_mount_object(de, nd->mnt);
313                         if (err)
314                                 CERROR("can't mount %s, err = %d\n",
315                                        de->d_name.name, err);
316                 }
317                 RETURN(1);
318         }
319         spin_unlock(&de->d_lock);
320
321         CDEBUG(D_VFSTRACE, "VFS Op:name=%s (%p), intent=%s\n", de->d_name.name,
322                de, LL_IT2STR(it));
323
324         /* Cached negative dentries are unsafe for now - look them up again */
325         if (de->d_inode == NULL)
326                 RETURN(0);
327
328         /* Root of the tree is always valid, attributes would be fixed in
329           ll_inode_revalidate_it */
330         if (de->d_sb->s_root == de)
331                 RETURN(1);
332
333         CDEBUG(D_INODE, "revalidate 0x%p: %*s -> %lu/%lu\n",
334                de, de->d_name.len, de->d_name.name,
335                (unsigned long) de->d_inode->i_ino,
336                (unsigned long) de->d_inode->i_generation);
337
338         exp = ll_i2mdexp(de->d_inode);
339         ll_inode2id(&pid, de->d_parent->d_inode);
340         ll_inode2id(&cid, de->d_inode);
341         LASSERT(id_fid(&cid) != 0);
342
343         icbd.icbd_parent = de->d_parent->d_inode;
344         icbd.icbd_childp = &de;
345
346         /*
347          * never execute intents for mount points. Attributes will be fixed up
348          * in ll_inode_revalidate_it().
349          */
350         if (d_mountpoint(de))
351                 RETURN(1);
352
353         if (nd != NULL)
354                 nd->mnt->mnt_last_used = jiffies;
355
356         OBD_FAIL_TIMEOUT(OBD_FAIL_MDC_REVALIDATE_PAUSE, 5);
357         orig_it = it ? it->it_op : IT_OPEN;
358         ll_frob_intent(&it, &lookup_it);
359         LASSERT(it != NULL);
360
361         if (it->it_op == IT_GETATTR) { /* We need to check for LOOKUP lock as
362                                           well */
363                 rc = ll_intent_alloc(&lookup_it);
364                 if (rc)
365                         LBUG(); /* Can't think of better idea just yet */
366
367                 rc = md_intent_lock(exp, &pid, de->d_name.name,
368                                     de->d_name.len, NULL, 0, &cid, &lookup_it,
369                                     flags, &req, ll_mdc_blocking_ast);
370                 /* If there was no lookup lock, no point in even checking for
371                    UPDATE lock */
372                 if (!rc) {
373                         it = &lookup_it;
374                         if (!req) {
375                                 ll_intent_free(it);
376                                 goto do_lookup;
377                         }
378                         GOTO(out, rc);
379                 }
380                 if (it_disposition(&lookup_it, DISP_LOOKUP_NEG)) {
381                         it = &lookup_it;
382                         ll_intent_free(it);
383                         GOTO(out, rc = 0);
384                 }
385
386                 if (req)
387                         ptlrpc_req_finished(req);
388                 req = NULL;
389                 ll_lookup_finish_locks(&lookup_it, de);
390                 /* XXX: on 2.6 ll_lookup_finish_locks does not call ll_intent_release */
391                 ll_intent_release(&lookup_it);
392         }
393
394 #if 1
395         if ((it->it_op == IT_OPEN) && de->d_inode) {
396                 struct inode *inode = de->d_inode;
397                 struct ll_inode_info *lli = ll_i2info(inode);
398                 struct obd_client_handle **och_p;
399                 __u64 *och_usecount;
400                 struct obd_device *obddev;
401                 struct lustre_handle lockh;
402                 int flags = LDLM_FL_BLOCK_GRANTED;
403                 ldlm_policy_data_t policy = {.l_inodebits = {MDS_INODELOCK_OPEN}};
404                 struct ldlm_res_id file_res_id = {.name = {id_fid(&lli->lli_id), 
405                                                            id_group(&lli->lli_id)}};
406                 int lockmode;
407
408                 if (it->it_flags & FMODE_WRITE) {
409                         och_p = &lli->lli_mds_write_och;
410                         och_usecount = &lli->lli_open_fd_write_count;
411                         lockmode = LCK_CW;
412                 } else if (it->it_flags & FMODE_EXEC) {
413                         och_p = &lli->lli_mds_exec_och;
414                         och_usecount = &lli->lli_open_fd_exec_count;
415                         lockmode = LCK_PR;
416                 } else {
417                         och_p = &lli->lli_mds_read_och;
418                         och_usecount = &lli->lli_open_fd_read_count;
419                         lockmode = LCK_CR;
420                 }
421
422                 /* Check for the proper lock */
423                 obddev = md_get_real_obd(exp, &lli->lli_id);
424                 if (!ldlm_lock_match(obddev->obd_namespace, flags, &file_res_id,
425                                      LDLM_IBITS, &policy, lockmode, &lockh))
426                         goto do_lock;
427                 down(&lli->lli_och_sem);
428                 if (*och_p) { /* Everything is open already, do nothing */
429                         /*(*och_usecount)++;  Do not let them steal our open
430                                               handle from under us */
431                         /* XXX The code above was my original idea, but in case
432                            we have the handle, but we cannot use it due to later
433                            checks (e.g. O_CREAT|O_EXCL flags set), nobody
434                            would decrement counter increased here. So we just
435                            hope the lock won't be invalidated in between. But
436                            if it would be, we'll reopen the open request to
437                            MDS later during file open path */
438                         up(&lli->lli_och_sem);
439                         memcpy(&LUSTRE_IT(it)->it_lock_handle, &lockh,
440                                sizeof(lockh));
441                         LUSTRE_IT(it)->it_lock_mode = lockmode;
442                         RETURN(1);
443                 } else {
444                         /* Hm, interesting. Lock is present, but no open
445                            handle? */
446                         up(&lli->lli_och_sem);
447                         ldlm_lock_decref(&lockh, lockmode);
448                 }
449         }
450 #endif
451
452 do_lock:
453         rc = md_intent_lock(exp, &pid, de->d_name.name, de->d_name.len,
454                             NULL, 0, &cid, it, flags, &req, ll_mdc_blocking_ast);
455         /* If req is NULL, then md_intent_lock() only tried to do a lock match;
456          * if all was well, it will return 1 if it found locks, 0 otherwise. */
457         if (req == NULL && rc >= 0) {
458                 if (!rc)
459                         goto do_lookup;
460                 GOTO(out, rc);
461         }
462
463         if (rc < 0) {
464                 if (rc != -ESTALE) {
465                         CDEBUG(D_INFO, "ll_intent_lock(): rc %d : it->it_status "
466                                "%d\n", rc, LUSTRE_IT(it)->it_status);
467                 }
468                 GOTO(out, rc = 0);
469         }
470 revalidate_finish:
471         rc = revalidate_it_finish(req, 1, it, de);
472         if (rc != 0) {
473                 ll_intent_release(it);
474                 GOTO(out, rc = 0);
475         }
476         rc = 1;
477
478         /* unfortunately ll_intent_lock may cause a callback and revoke our
479            dentry */
480         spin_lock(&dcache_lock);
481         hlist_del_init(&de->d_hash);
482         __d_rehash(de);
483         spin_unlock(&dcache_lock);
484
485         GOTO(out, rc);
486 out:
487         if (req != NULL && rc == 1)
488                 ptlrpc_req_finished(req);
489
490         if (rc == 0) {
491                 if (it == &lookup_it) {
492                         ll_intent_release(it);
493                         if (req) /* Special case: We did lookup and it failed,
494                                     need to free request */
495                                 ptlrpc_req_finished(req);
496                 }
497                 ll_unhash_aliases(de->d_inode);
498                 return 0;
499         }
500
501         CDEBUG(D_DENTRY, "revalidated dentry %*s (%p) parent %p "
502                "inode %p refc %d\n", de->d_name.len,
503                de->d_name.name, de, de->d_parent, de->d_inode,
504                atomic_read(&de->d_count));
505
506         ll_lookup_finish_locks(it, de);
507         de->d_flags &= ~DCACHE_LUSTRE_INVALID;
508         if (it == &lookup_it)
509                 ll_intent_release(it);
510     
511         if (!((de->d_inode->i_mode & S_ISUID) && S_ISDIR(de->d_inode->i_mode)) ||
512             !(flags & LOOKUP_CONTINUE || (orig_it & (IT_CHDIR | IT_OPEN))))
513                 return rc;
514
515         if (nd && !(de->d_flags & DCACHE_GNS_MOUNTING)) {
516                 int err = ll_gns_mount_object(de, nd->mnt);
517                 if (err)
518                         CERROR("can't mount %s, err = %d\n",
519                                de->d_name.name, err);
520         }
521         return rc;
522 do_lookup:
523         it = &lookup_it;
524         if (ll_intent_alloc(it))
525                 LBUG();
526 // We did that already, right?  ll_inode2id(&pid, de->d_parent->d_inode);
527         rc = md_intent_lock(exp, &pid, de->d_name.name,
528                             de->d_name.len, NULL, 0, NULL,
529                             it, 0, &req, ll_mdc_blocking_ast);
530         if (rc >= 0) {
531                 struct mds_body *mds_body = lustre_msg_buf(req->rq_repmsg, 1, sizeof(*mds_body));
532
533                 /* See if we got same inode, if not - return error */
534                 if (id_equal_stc(&cid, &mds_body->id1))
535                         goto revalidate_finish;
536         }
537
538         GOTO(out, rc = 0);
539 }
540
541 /*static*/ void ll_pin(struct dentry *de, struct vfsmount *mnt, int flag)
542 {
543         struct inode *inode= de->d_inode;
544         struct ll_sb_info *sbi = ll_i2sbi(inode);
545         struct ll_dentry_data *ldd = ll_d2d(de);
546         struct obd_client_handle *handle;
547         int rc = 0;
548         ENTRY;
549         LASSERT(ldd);
550
551         lock_kernel();
552         /* Strictly speaking this introduces an additional race: the
553          * increments should wait until the rpc has returned.
554          * However, given that at present the function is void, this
555          * issue is moot. */
556         if (flag == 1 && (++ldd->lld_mnt_count) > 1) {
557                 unlock_kernel();
558                 EXIT;
559                 return;
560         }
561
562         if (flag == 0 && (++ldd->lld_cwd_count) > 1) {
563                 unlock_kernel();
564                 EXIT;
565                 return;
566         }
567         unlock_kernel();
568
569         handle = (flag) ? &ldd->lld_mnt_och : &ldd->lld_cwd_och;
570         rc = obd_pin(sbi->ll_md_exp, inode->i_ino, inode->i_generation,
571                      inode->i_mode & S_IFMT, handle, flag);
572
573         if (rc) {
574                 lock_kernel();
575                 memset(handle, 0, sizeof(*handle));
576                 if (flag == 0)
577                         ldd->lld_cwd_count--;
578                 else
579                         ldd->lld_mnt_count--;
580                 unlock_kernel();
581         }
582
583         EXIT;
584         return;
585 }
586
587 /*static*/ void ll_unpin(struct dentry *de, struct vfsmount *mnt, int flag)
588 {
589         struct ll_sb_info *sbi = ll_i2sbi(de->d_inode);
590         struct ll_dentry_data *ldd = ll_d2d(de);
591         struct obd_client_handle handle;
592         int count, rc = 0;
593         ENTRY;
594         LASSERT(ldd);
595
596         lock_kernel();
597         /* Strictly speaking this introduces an additional race: the
598          * increments should wait until the rpc has returned.
599          * However, given that at present the function is void, this
600          * issue is moot. */
601         handle = (flag) ? ldd->lld_mnt_och : ldd->lld_cwd_och;
602         if (handle.och_magic != OBD_CLIENT_HANDLE_MAGIC) {
603                 /* the "pin" failed */
604                 unlock_kernel();
605                 EXIT;
606                 return;
607         }
608
609         if (flag)
610                 count = --ldd->lld_mnt_count;
611         else
612                 count = --ldd->lld_cwd_count;
613         unlock_kernel();
614
615         if (count != 0) {
616                 EXIT;
617                 return;
618         }
619
620         rc = obd_unpin(sbi->ll_md_exp, &handle, flag);
621         EXIT;
622 }
623
624 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
625 static int ll_revalidate_nd(struct dentry *dentry, struct nameidata *nd)
626 {
627         int rc;
628         ENTRY;
629
630         if (nd && nd->flags & LOOKUP_LAST && !(nd->flags & LOOKUP_LINK_NOTLAST))
631                 rc = ll_revalidate_it(dentry, nd->flags, nd, &nd->intent.open);
632         else
633                 rc = ll_revalidate_it(dentry, 0, nd, NULL);
634
635         RETURN(rc);
636 }
637 #endif
638
639 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
640 static void ll_dentry_iput(struct dentry *dentry, struct inode *inode)
641 {
642         struct ll_sb_info *sbi = ll_i2sbi(inode);
643         struct lustre_id parent, child;
644
645         LASSERT(dentry->d_parent && dentry->d_parent->d_inode);
646         ll_inode2id(&parent, dentry->d_parent->d_inode);
647         ll_inode2id(&child, inode);
648         md_change_cbdata_name(sbi->ll_md_exp, &parent,
649                               (char *)dentry->d_name.name, 
650                               dentry->d_name.len, &child, 
651                               null_if_equal, inode);
652         iput(inode);
653 }
654 #else
655 static void ll_dentry_iput(struct dentry *dentry, struct inode *inode)
656 {
657         struct ll_sb_info *sbi = ll_i2sbi(inode);
658         struct lustre_id parent, child;
659
660         if (dentry->d_parent != dentry) {
661                 /* Do not do this for root of the tree */
662                 LASSERT(dentry->d_parent && dentry->d_parent->d_inode);
663                 ll_inode2id(&parent, dentry->d_parent->d_inode);
664                 ll_inode2id(&child, inode);
665                 md_change_cbdata_name(sbi->ll_md_exp, &parent,
666                                       (char *)dentry->d_name.name,
667                                       dentry->d_name.len, &child,
668                                       null_if_equal, inode);
669         }
670         iput(inode);
671
672 }
673 #endif
674
675 struct dentry_operations ll_d_ops = {
676 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
677         .d_revalidate = ll_revalidate_nd,
678 #else
679         .d_revalidate_it = ll_revalidate_it,
680 #endif
681         .d_release = ll_release,
682         .d_iput = ll_dentry_iput,
683         .d_delete = ll_ddelete,
684         .d_compare = ll_dcompare,
685 #if 0
686         .d_pin = ll_pin,
687         .d_unpin = ll_unpin,
688 #endif
689 };