Whamcloud - gitweb
- many fixes in GNS code after Nikita's code review. They are the following:
[fs/lustre-release.git] / lustre / llite / dcache.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21
22 #include <linux/fs.h>
23 #include <linux/sched.h>
24 #include <linux/smp_lock.h>
25 #include <linux/quotaops.h>
26
27 #define DEBUG_SUBSYSTEM S_LLITE
28
29 #include <linux/obd_support.h>
30 #include <linux/lustre_lite.h>
31 #include <linux/lustre_idl.h>
32 #include <linux/lustre_dlm.h>
33 #include <linux/lustre_version.h>
34
35 #include "llite_internal.h"
36
37 /* should NOT be called with the dcache lock, see fs/dcache.c */
38 static void ll_release(struct dentry *de)
39 {
40         struct ll_dentry_data *lld;
41         ENTRY;
42         LASSERT(de != NULL);
43
44         CDEBUG(D_DENTRY, "releasing dentry %p\n", de);
45
46         lld = ll_d2d(de);
47         if (lld) { /* Root dentry does not have ll_dentry_data */
48                 LASSERT(lld->lld_cwd_count == 0);
49                 LASSERT(lld->lld_mnt_count == 0);
50                 OBD_FREE(de->d_fsdata, sizeof(struct ll_dentry_data));
51         }
52
53         EXIT;
54 }
55
56 /* Compare if two dentries are the same.  Don't match if the existing dentry
57  * is marked DCACHE_LUSTRE_INVALID.  Returns 1 if different, 0 if the same.
58  *
59  * This avoids a race where ll_lookup_it() instantiates a dentry, but we get
60  * an AST before calling d_revalidate_it().  The dentry still exists (marked
61  * INVALID) so d_lookup() matches it, but we have no lock on it (so
62  * lock_match() fails) and we spin around real_lookup(). */
63 static int ll_dcompare(struct dentry *parent, struct qstr *d_name,
64                        struct qstr *name){
65         struct dentry *dchild;
66         ENTRY;
67
68         if (d_name->len != name->len)
69                 RETURN(1);
70
71         if (memcmp(d_name->name, name->name, name->len))
72                 RETURN(1);
73
74         dchild = container_of(d_name, struct dentry, d_name); /* ugh */
75         if (dchild->d_flags & DCACHE_LUSTRE_INVALID) {
76                 CDEBUG(D_DENTRY,"INVALID dentry %p not matched, was bug 3784\n",
77                        dchild);
78                 RETURN(1);
79         }
80
81         RETURN(0);
82 }
83
84 /* should NOT be called with the dcache lock, see fs/dcache.c */
85 static int ll_ddelete(struct dentry *de)
86 {
87         ENTRY;
88         LASSERT(de);
89         CDEBUG(D_DENTRY, "%s dentry %*s (%p, parent %p, inode %p) %s%s\n",
90                (de->d_flags & DCACHE_LUSTRE_INVALID ? "deleting" : "keeping"),
91                de->d_name.len, de->d_name.name, de, de->d_parent, de->d_inode,
92                d_unhashed(de) ? "" : "hashed,",
93                list_empty(&de->d_subdirs) ? "" : "subdirs");
94         RETURN(0);
95 }
96
97 void ll_set_dd(struct dentry *de)
98 {
99         ENTRY;
100         LASSERT(de != NULL);
101
102         CDEBUG(D_DENTRY, "ldd on dentry %.*s (%p) parent %p inode %p refc %d\n",
103                de->d_name.len, de->d_name.name, de, de->d_parent, de->d_inode,
104                atomic_read(&de->d_count));
105         lock_kernel();
106         if (de->d_fsdata == NULL) {
107                 OBD_ALLOC(de->d_fsdata, sizeof(struct ll_dentry_data));
108         }
109         unlock_kernel();
110
111         EXIT;
112 }
113
114 void ll_intent_drop_lock(struct lookup_intent *it)
115 {
116         struct lustre_handle *handle;
117         struct lustre_intent_data *itdata = LUSTRE_IT(it);
118
119         if (it->it_op && itdata && itdata->it_lock_mode) {
120                 handle = (struct lustre_handle *)&itdata->it_lock_handle;
121                 CDEBUG(D_DLMTRACE, "releasing lock with cookie "LPX64
122                        " from it %p\n", handle->cookie, it);
123                 ldlm_lock_decref(handle, itdata->it_lock_mode);
124
125                 /* bug 494: intent_release may be called multiple times, from
126                  * this thread and we don't want to double-decref this lock */
127                 itdata->it_lock_mode = 0;
128         }
129 }
130
131 void ll_intent_release(struct lookup_intent *it)
132 {
133         ENTRY;
134
135         ll_intent_drop_lock(it);
136         it->it_magic = 0;
137         it->it_op_release = 0;
138         ll_intent_free(it);
139         EXIT;
140 }
141
142 void ll_intent_free(struct lookup_intent *it)
143 {
144         if (it->d.fs_data) {
145                 OBD_SLAB_FREE(it->d.fs_data, ll_intent_slab,
146                                sizeof(struct lustre_intent_data));
147                 it->d.fs_data = NULL;
148         }
149 }
150
151 void ll_unhash_aliases(struct inode *inode)
152 {
153         struct list_head *tmp, *head;
154         struct ll_sb_info *sbi;
155         ENTRY;
156
157         if (inode == NULL) {
158                 CERROR("unexpected NULL inode, tell phil\n");
159                 EXIT;
160                 return;
161         }
162
163         CDEBUG(D_INODE, "marking dentries for ino %lu/%u(%p) invalid\n",
164                inode->i_ino, inode->i_generation, inode);
165
166         sbi = ll_i2sbi(inode);
167         head = &inode->i_dentry;
168 restart:
169         spin_lock(&dcache_lock);
170         tmp = head;
171         while ((tmp = tmp->next) != head) {
172                 struct dentry *dentry = list_entry(tmp, struct dentry, d_alias);
173                 if (atomic_read(&dentry->d_count) == 0) {
174                         CDEBUG(D_DENTRY, "deleting dentry %.*s (%p) parent %p "
175                                "inode %p\n", dentry->d_name.len,
176                                dentry->d_name.name, dentry, dentry->d_parent,
177                                dentry->d_inode);
178                         dget_locked(dentry);
179                         __d_drop(dentry);
180                         spin_unlock(&dcache_lock);
181                         dput(dentry);
182                         goto restart;
183                 } else if (!(dentry->d_flags & DCACHE_LUSTRE_INVALID)) {
184                         CDEBUG(D_DENTRY, "unhashing dentry %.*s (%p) parent %p "
185                                "inode %p refc %d\n", dentry->d_name.len,
186                                dentry->d_name.name, dentry, dentry->d_parent,
187                                dentry->d_inode, atomic_read(&dentry->d_count));
188                         hlist_del_init(&dentry->d_hash);
189                         dentry->d_flags |= DCACHE_LUSTRE_INVALID;
190                         hlist_add_head(&dentry->d_hash,
191                                        &sbi->ll_orphan_dentry_list);
192                 }
193         }
194         spin_unlock(&dcache_lock);
195         EXIT;
196 }
197
198 extern struct dentry *ll_find_alias(struct inode *, struct dentry *);
199
200 int revalidate_it_finish(struct ptlrpc_request *request, int offset, 
201                          struct lookup_intent *it, struct dentry *de)
202 {
203         struct ll_sb_info *sbi;
204         int rc = 0;
205         ENTRY;
206
207         if (!request)
208                 RETURN(0);
209
210         if (it_disposition(it, DISP_LOOKUP_NEG))
211                 RETURN(-ENOENT);
212
213         sbi = ll_i2sbi(de->d_inode);
214         rc = ll_prep_inode(sbi->ll_dt_exp, sbi->ll_md_exp,
215                            &de->d_inode, request, offset, NULL);
216
217         RETURN(rc);
218 }
219
220 void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry)
221 {
222         LASSERT(it != NULL);
223         LASSERT(dentry != NULL);
224
225         if (LUSTRE_IT(it)->it_lock_mode && dentry->d_inode != NULL) {
226                 struct inode *inode = dentry->d_inode;
227                 CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
228                        inode, inode->i_ino, inode->i_generation);
229                 mdc_set_lock_data(NULL, &LUSTRE_IT(it)->it_lock_handle, inode);
230         }
231
232         /* drop lookup or getattr locks immediately */
233         if (it->it_op == IT_LOOKUP || it->it_op == IT_GETATTR ||
234             it->it_op == IT_CHDIR) {
235 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
236                 /*
237                  * on 2.6 there are situations when several lookups and
238                  * revalidations may be requested during single operation.
239                  * Therefore, we don't release intent here -bzzz
240                  */
241                 ll_intent_drop_lock(it);
242 #else
243                 ll_intent_release(it);
244 #endif
245         }
246 }
247
248 void ll_frob_intent(struct lookup_intent **itp, struct lookup_intent *deft)
249 {
250         struct lookup_intent *it = *itp;
251
252 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
253         if (it) {
254                 LASSERTF(it->it_magic == INTENT_MAGIC, "bad intent magic: %x\n",
255                          it->it_magic);
256         }
257 #endif
258
259         if (!it || it->it_op == IT_GETXATTR)
260                 it = *itp = deft;
261
262         if (it->d.fs_data)
263                 return;
264
265         if (ll_intent_alloc(it)) {
266                 CERROR("Failed to allocate memory for lustre specific intent "
267                        "data\n");
268                 /* XXX: we cannot return status just yet */
269                 LBUG();
270         }
271 }
272
273 int ll_intent_alloc(struct lookup_intent *it)
274 {
275         if (it->d.fs_data) {
276                 CERROR("Intent alloc on already allocated intent\n");
277                 return 0;
278         }
279         OBD_SLAB_ALLOC(it->d.fs_data, ll_intent_slab, SLAB_KERNEL,
280                         sizeof(struct lustre_intent_data));
281         if (!it->d.fs_data) {
282                 CERROR("Failed to allocate memory for lustre specific intent "
283                        "data\n");
284                 return -ENOMEM;
285         }
286
287         it->it_op_release = ll_intent_release;
288
289         return 0;
290 }
291
292 int ll_revalidate_it(struct dentry *de, int flags, struct nameidata *nd,
293                      struct lookup_intent *it)
294 {
295         struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
296         struct ptlrpc_request *req = NULL;
297         int orig_it, err, rc = 0;
298         struct obd_export *exp;
299         struct it_cb_data icbd;
300         struct lustre_id pid;
301         struct lustre_id cid;
302         ENTRY;
303
304         CDEBUG(D_VFSTRACE, "VFS Op:name=%s (%p), intent=%s\n", de->d_name.name,
305                de, LL_IT2STR(it));
306
307         /* Cached negative dentries are unsafe for now - look them up again */
308         if (de->d_inode == NULL)
309                 RETURN(0);
310
311         /* Root of the tree is always valid, attributes would be fixed in
312           ll_inode_revalidate_it */
313         if (de->d_sb->s_root == de)
314                 RETURN(1);
315
316         CDEBUG(D_INODE, "revalidate 0x%p: %*s -> %lu/%lu\n",
317                de, de->d_name.len, de->d_name.name,
318                (unsigned long) de->d_inode->i_ino,
319                (unsigned long) de->d_inode->i_generation);
320
321         exp = ll_i2mdexp(de->d_inode);
322         ll_inode2id(&pid, de->d_parent->d_inode);
323         ll_inode2id(&cid, de->d_inode);
324         LASSERT(id_fid(&cid) != 0);
325
326         icbd.icbd_parent = de->d_parent->d_inode;
327         icbd.icbd_childp = &de;
328
329         /*
330          * never execute intents for mount points. Attributes will be fixed up
331          * in ll_inode_revalidate_it().
332          */
333         if (d_mountpoint(de))
334                 RETURN(1);
335
336         if (nd != NULL)
337                 nd->mnt->mnt_last_used = jiffies;
338
339         OBD_FAIL_TIMEOUT(OBD_FAIL_MDC_REVALIDATE_PAUSE, 5);
340         orig_it = it ? it->it_op : IT_OPEN;
341         ll_frob_intent(&it, &lookup_it);
342         LASSERT(it != NULL);
343
344         if (it->it_op == IT_GETATTR) { /* We need to check for LOOKUP lock as
345                                           well */
346                 rc = ll_intent_alloc(&lookup_it);
347                 if (rc)
348                         LBUG(); /* Can't think of better idea just yet */
349
350                 rc = md_intent_lock(exp, &pid, de->d_name.name,
351                                     de->d_name.len, NULL, 0, &cid, &lookup_it,
352                                     flags, &req, ll_mdc_blocking_ast);
353                 /* If there was no lookup lock, no point in even checking for
354                    UPDATE lock */
355                 if (!rc) {
356                         it = &lookup_it;
357                         if (!req) {
358                                 ll_intent_free(it);
359                                 goto do_lookup;
360                         }
361                         GOTO(out, rc);
362                 }
363                 if (it_disposition(&lookup_it, DISP_LOOKUP_NEG)) {
364                         it = &lookup_it;
365                         ll_intent_free(it);
366                         GOTO(out, rc = 0);
367                 }
368
369                 if (req)
370                         ptlrpc_req_finished(req);
371                 req = NULL;
372                 ll_lookup_finish_locks(&lookup_it, de);
373                 /* XXX: on 2.6 ll_lookup_finish_locks does not call ll_intent_release */
374                 ll_intent_release(&lookup_it);
375         }
376
377 #if 1
378         if ((it->it_op == IT_OPEN) && de->d_inode) {
379                 struct inode *inode = de->d_inode;
380                 struct ll_inode_info *lli = ll_i2info(inode);
381                 struct obd_client_handle **och_p;
382                 __u64 *och_usecount;
383                 struct obd_device *obddev;
384                 struct lustre_handle lockh;
385                 int flags = LDLM_FL_BLOCK_GRANTED;
386                 ldlm_policy_data_t policy = {.l_inodebits = {MDS_INODELOCK_OPEN}};
387                 struct ldlm_res_id file_res_id = {.name = {id_fid(&lli->lli_id), 
388                                                            id_group(&lli->lli_id)}};
389                 int lockmode;
390
391                 if (it->it_flags & FMODE_WRITE) {
392                         och_p = &lli->lli_mds_write_och;
393                         och_usecount = &lli->lli_open_fd_write_count;
394                         lockmode = LCK_CW;
395                 } else if (it->it_flags & FMODE_EXEC) {
396                         och_p = &lli->lli_mds_exec_och;
397                         och_usecount = &lli->lli_open_fd_exec_count;
398                         lockmode = LCK_PR;
399                 } else {
400                         och_p = &lli->lli_mds_read_och;
401                         och_usecount = &lli->lli_open_fd_read_count;
402                         lockmode = LCK_CR;
403                 }
404
405                 /* Check for the proper lock */
406                 obddev = md_get_real_obd(exp, &lli->lli_id);
407                 if (!ldlm_lock_match(obddev->obd_namespace, flags, &file_res_id,
408                                      LDLM_IBITS, &policy, lockmode, &lockh))
409                         goto do_lock;
410                 down(&lli->lli_och_sem);
411                 if (*och_p) { /* Everything is open already, do nothing */
412                         /*(*och_usecount)++;  Do not let them steal our open
413                                               handle from under us */
414                         /* XXX The code above was my original idea, but in case
415                            we have the handle, but we cannot use it due to later
416                            checks (e.g. O_CREAT|O_EXCL flags set), nobody
417                            would decrement counter increased here. So we just
418                            hope the lock won't be invalidated in between. But
419                            if it would be, we'll reopen the open request to
420                            MDS later during file open path */
421                         up(&lli->lli_och_sem);
422                         memcpy(&LUSTRE_IT(it)->it_lock_handle, &lockh,
423                                sizeof(lockh));
424                         LUSTRE_IT(it)->it_lock_mode = lockmode;
425                         RETURN(1);
426                 } else {
427                         /* Hm, interesting. Lock is present, but no open
428                            handle? */
429                         up(&lli->lli_och_sem);
430                         ldlm_lock_decref(&lockh, lockmode);
431                 }
432         }
433 #endif
434
435 do_lock:
436         rc = md_intent_lock(exp, &pid, de->d_name.name, de->d_name.len,
437                             NULL, 0, &cid, it, flags, &req, ll_mdc_blocking_ast);
438         /* If req is NULL, then md_intent_lock() only tried to do a lock match;
439          * if all was well, it will return 1 if it found locks, 0 otherwise. */
440         if (req == NULL && rc >= 0) {
441                 if (!rc)
442                         goto do_lookup;
443                 GOTO(out, rc);
444         }
445
446         if (rc < 0) {
447                 if (rc != -ESTALE) {
448                         CDEBUG(D_INFO, "ll_intent_lock(): rc %d : it->it_status "
449                                "%d\n", rc, LUSTRE_IT(it)->it_status);
450                 }
451                 GOTO(out, rc = 0);
452         }
453 revalidate_finish:
454         rc = revalidate_it_finish(req, 1, it, de);
455         if (rc != 0) {
456                 ll_intent_release(it);
457                 GOTO(out, rc = 0);
458         }
459         rc = 1;
460
461         /* unfortunately ll_intent_lock may cause a callback and revoke our
462            dentry */
463         spin_lock(&dcache_lock);
464         hlist_del_init(&de->d_hash);
465         __d_rehash(de);
466         spin_unlock(&dcache_lock);
467
468         GOTO(out, rc);
469 out:
470         if (req != NULL && rc == 1)
471                 ptlrpc_req_finished(req);
472
473         if (rc == 0) {
474                 if (it == &lookup_it) {
475                         ll_intent_release(it);
476                         if (req) /* Special case: We did lookup and it failed,
477                                     need to free request */
478                                 ptlrpc_req_finished(req);
479                 }
480                 ll_unhash_aliases(de->d_inode);
481                 return 0;
482         }
483
484         CDEBUG(D_DENTRY, "revalidated dentry %*s (%p) parent %p "
485                "inode %p refc %d\n", de->d_name.len,
486                de->d_name.name, de, de->d_parent, de->d_inode,
487                atomic_read(&de->d_count));
488
489         ll_lookup_finish_locks(it, de);
490         de->d_flags &= ~DCACHE_LUSTRE_INVALID;
491         if (it == &lookup_it)
492                 ll_intent_release(it);
493     
494         if (!((de->d_inode->i_mode & S_ISUID) && S_ISDIR(de->d_inode->i_mode)) ||
495             !(flags & LOOKUP_CONTINUE || (orig_it & (IT_CHDIR | IT_OPEN))))
496                 return rc;
497
498         if (nd != NULL) {
499                 err = ll_gns_mount_object(de, nd->mnt);
500                 if (err == -ERESTARTSYS) {
501                         /* 
502                          * making system to restart syscall as currently GNS is
503                          * in mounting progress.
504                          */
505                         return err;
506                 }
507         }
508         return rc;
509 do_lookup:
510         it = &lookup_it;
511         if (ll_intent_alloc(it))
512                 LBUG();
513         
514         // We did that already, right?  ll_inode2id(&pid, de->d_parent->d_inode);
515         rc = md_intent_lock(exp, &pid, de->d_name.name,
516                             de->d_name.len, NULL, 0, NULL,
517                             it, 0, &req, ll_mdc_blocking_ast);
518         if (rc >= 0) {
519                 struct mds_body *mds_body = lustre_msg_buf(req->rq_repmsg, 1, sizeof(*mds_body));
520
521                 /* See if we got same inode, if not - return error */
522                 if (id_equal_stc(&cid, &mds_body->id1))
523                         goto revalidate_finish;
524         }
525
526         GOTO(out, rc = 0);
527 }
528
529 /*static*/ void ll_pin(struct dentry *de, struct vfsmount *mnt, int flag)
530 {
531         struct inode *inode= de->d_inode;
532         struct ll_sb_info *sbi = ll_i2sbi(inode);
533         struct ll_dentry_data *ldd = ll_d2d(de);
534         struct obd_client_handle *handle;
535         int rc = 0;
536         ENTRY;
537         LASSERT(ldd);
538
539         lock_kernel();
540         /* Strictly speaking this introduces an additional race: the
541          * increments should wait until the rpc has returned.
542          * However, given that at present the function is void, this
543          * issue is moot. */
544         if (flag == 1 && (++ldd->lld_mnt_count) > 1) {
545                 unlock_kernel();
546                 EXIT;
547                 return;
548         }
549
550         if (flag == 0 && (++ldd->lld_cwd_count) > 1) {
551                 unlock_kernel();
552                 EXIT;
553                 return;
554         }
555         unlock_kernel();
556
557         handle = (flag) ? &ldd->lld_mnt_och : &ldd->lld_cwd_och;
558         rc = obd_pin(sbi->ll_md_exp, inode->i_ino, inode->i_generation,
559                      inode->i_mode & S_IFMT, handle, flag);
560
561         if (rc) {
562                 lock_kernel();
563                 memset(handle, 0, sizeof(*handle));
564                 if (flag == 0)
565                         ldd->lld_cwd_count--;
566                 else
567                         ldd->lld_mnt_count--;
568                 unlock_kernel();
569         }
570
571         EXIT;
572         return;
573 }
574
575 /*static*/ void ll_unpin(struct dentry *de, struct vfsmount *mnt, int flag)
576 {
577         struct ll_sb_info *sbi = ll_i2sbi(de->d_inode);
578         struct ll_dentry_data *ldd = ll_d2d(de);
579         struct obd_client_handle handle;
580         int count, rc = 0;
581         ENTRY;
582         LASSERT(ldd);
583
584         lock_kernel();
585         /* Strictly speaking this introduces an additional race: the
586          * increments should wait until the rpc has returned.
587          * However, given that at present the function is void, this
588          * issue is moot. */
589         handle = (flag) ? ldd->lld_mnt_och : ldd->lld_cwd_och;
590         if (handle.och_magic != OBD_CLIENT_HANDLE_MAGIC) {
591                 /* the "pin" failed */
592                 unlock_kernel();
593                 EXIT;
594                 return;
595         }
596
597         if (flag)
598                 count = --ldd->lld_mnt_count;
599         else
600                 count = --ldd->lld_cwd_count;
601         unlock_kernel();
602
603         if (count != 0) {
604                 EXIT;
605                 return;
606         }
607
608         rc = obd_unpin(sbi->ll_md_exp, &handle, flag);
609         EXIT;
610 }
611
612 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
613 static int ll_revalidate_nd(struct dentry *dentry, struct nameidata *nd)
614 {
615         int rc;
616         ENTRY;
617
618         if (nd && nd->flags & LOOKUP_LAST && !(nd->flags & LOOKUP_LINK_NOTLAST))
619                 rc = ll_revalidate_it(dentry, nd->flags, nd, &nd->intent.open);
620         else
621                 rc = ll_revalidate_it(dentry, 0, nd, NULL);
622
623         RETURN(rc);
624 }
625 #endif
626
627 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
628 static void ll_dentry_iput(struct dentry *dentry, struct inode *inode)
629 {
630         struct ll_sb_info *sbi = ll_i2sbi(inode);
631         struct lustre_id parent, child;
632
633         LASSERT(dentry->d_parent && dentry->d_parent->d_inode);
634         ll_inode2id(&parent, dentry->d_parent->d_inode);
635         ll_inode2id(&child, inode);
636         md_change_cbdata_name(sbi->ll_md_exp, &parent,
637                               (char *)dentry->d_name.name, 
638                               dentry->d_name.len, &child, 
639                               null_if_equal, inode);
640         iput(inode);
641 }
642 #else
643 static void ll_dentry_iput(struct dentry *dentry, struct inode *inode)
644 {
645         struct ll_sb_info *sbi = ll_i2sbi(inode);
646         struct lustre_id parent, child;
647
648         if (dentry->d_parent != dentry) {
649                 /* Do not do this for root of the tree */
650                 LASSERT(dentry->d_parent && dentry->d_parent->d_inode);
651                 ll_inode2id(&parent, dentry->d_parent->d_inode);
652                 ll_inode2id(&child, inode);
653                 md_change_cbdata_name(sbi->ll_md_exp, &parent,
654                                       (char *)dentry->d_name.name,
655                                       dentry->d_name.len, &child,
656                                       null_if_equal, inode);
657         }
658         iput(inode);
659
660 }
661 #endif
662
663 struct dentry_operations ll_d_ops = {
664 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
665         .d_revalidate = ll_revalidate_nd,
666 #else
667         .d_revalidate_it = ll_revalidate_it,
668 #endif
669         .d_release = ll_release,
670         .d_iput = ll_dentry_iput,
671         .d_delete = ll_ddelete,
672         .d_compare = ll_dcompare,
673 #if 0
674         .d_pin = ll_pin,
675         .d_unpin = ll_unpin,
676 #endif
677 };