Whamcloud - gitweb
1)reorganize the smfs hook ops to make smfs walk a list of hooks ops in hook macro
[fs/lustre-release.git] / lustre / smfs / cache_space.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  lustre/smfs/cache_space.c
5  *  A library of functions to manage cache space based on ARC
6  *  (modified LRU) replacement algorithm.
7  *
8  *  Copyright (c) 2004 Cluster File Systems, Inc.
9  *
10  *   This file is part of Lustre, http://www.lustre.org.
11  *
12  *   Lustre is free software; you can redistribute it and/or
13  *   modify it under the terms of version 2 of the GNU General Public
14  *   License as published by the Free Software Foundation.
15  *
16  *   Lustre is distributed in the hope that it will be useful,
17  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  *   GNU General Public License for more details.
20  *
21  *   You should have received a copy of the GNU General Public License
22  *   along with Lustre; if not, write to the Free Software
23  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24  */
25 #define DEBUG_SUBSYSTEM S_SM
26
27 #include <linux/lustre_log.h>
28 #include <linux/lustre_fsfilt.h>
29 #include <linux/lustre_smfs.h>
30
31 #include "smfs_internal.h"
32
33 struct cache_purge_param {
34         int nfract;     /* Percentage of cache dirty to activate cpurge */
35         int ndirty;     /* Maximum number of objects to write out per
36                            wake-cycle */
37         int interval;   /* jiffies delay between cache purge */
38         int nfract_sync;/* Percentage of cache dirty to activate
39                            cpurge synchronously */
40         int nfract_stop_cpurge; /* Percentage of cache dirty to stop cpurge */
41 } cf_prm = {30, 512, 600 * HZ, 60, 20};
42
43 static struct cache_purge_queue smfs_cpq;
44 static struct cache_purge_queue *cpq = &smfs_cpq;
45
46 #define CACHE_HOOK "cache_hook"
47 int cache_space_pre_hook(struct inode *inode, struct dentry *dentry,
48                          void *data1, void *data2, int op, void *handle)
49 {
50         int rc = 0;
51         ENTRY;
52
53         if (smfs_cache_hook(inode)) {                                          
54                if (!handle) {                                                  
55                         handle = smfs_trans_start(inode, KML_CACHE_NOOP, NULL);   
56                         if (IS_ERR(handle)) {                                   
57                                 RETURN(PTR_ERR(handle));
58                         }                                                       
59                 }                                                               
60                 cache_space_pre(inode, op);                                       
61         }                                                                       
62         RETURN(rc); 
63 }
64
65 int cache_space_post_hook(struct inode *inode, struct dentry *dentry,
66                          void *data1, void *data2, int op, void *handle)
67 {
68         int rc = 0;
69         ENTRY;
70         if (smfs_cache_hook(inode)) {      
71                 struct inode *new_inode = (struct inode*)data1;
72                 struct dentry *new_dentry = (struct dentry*)data2;                    
73                 LASSERT(handle != NULL);                                
74                 rc = cache_space_post(op, handle, inode, dentry, new_inode, 
75                                       new_dentry);
76         }
77         RETURN(rc);                                                               
78 }
79
80 int cache_space_hook_init(struct super_block *sb)
81 {
82         struct smfs_super_info  *smfs_info = S2SMI(sb);
83         struct smfs_hook_ops    *cache_hops;
84         int    rc = 0;
85         ENTRY;
86
87         cache_hops = smfs_alloc_hook_ops(CACHE_HOOK, cache_space_pre_hook, 
88                                          cache_space_post_hook);
89         if (!cache_hops) {
90                 RETURN(-ENOMEM);
91         }
92         rc = smfs_register_hook_ops(sb, cache_hops);      
93         if (rc) {
94                 smfs_free_hook_ops(cache_hops);
95                 RETURN(rc);
96         }
97         SMFS_SET_CACHE_HOOK(smfs_info);
98
99         RETURN(0);
100 }
101
102 int cache_space_hook_exit(struct super_block *sb)
103 {
104         struct smfs_super_info  *smfs_info = S2SMI(sb);
105         struct smfs_hook_ops *cache_hops; 
106
107         cache_hops = smfs_unregister_hook_ops(sb, CACHE_HOOK);
108         smfs_free_hook_ops(cache_hops);
109
110         SMFS_CLEAN_CACHE_HOOK(smfs_info);
111         return 0;
112 }
113
114 static int cache_leaf_node(struct dentry *dentry, __u64 *active_entry)
115 {
116         struct inode *inode = dentry->d_inode;
117
118         if (S_ISDIR(inode->i_mode)) {
119                 if (inode->i_nlink != 2)
120                         return 0;
121                 if (!strncmp(dentry->d_name.name, "lost+found", dentry->d_name.len))
122                         return 0;
123                 LASSERT(active_entry != NULL);
124                 get_active_entry(inode, active_entry);
125                 return(*active_entry > 0 ? 0 : 1);
126         } else {
127                 if (inode->i_nlink != 1)
128                         return 0;
129                 if (!strncmp(dentry->d_name.name, KML_LOG_NAME, dentry->d_name.len) ||
130                     !strncmp(dentry->d_name.name, CACHE_LRU_LOG, dentry->d_name.len))
131                         return 0;
132                 return 1;
133         }
134 }
135 static int cache_pre_leaf_node(struct dentry *dentry, __u64 *active_entry, int op)
136 {
137         if (((op == 0 && dentry->d_inode->i_nlink == 0) ||
138             (op == 1 && dentry->d_inode->i_nlink == 2)) &&
139             strncmp(dentry->d_name.name, KML_LOG_NAME, dentry->d_name.len) &&
140             strncmp(dentry->d_name.name, CACHE_LRU_LOG, dentry->d_name.len))
141                 return 1;
142         else if ((op == 2 && dentry->d_inode->i_nlink == 0) ||
143                  (op == 3 && dentry->d_inode->i_nlink == 3)) {
144                 LASSERT(active_entry != NULL);
145                 get_active_entry(dentry->d_inode, active_entry);
146                 return(*active_entry > 0 ? 0 : 1);
147         }
148         return 0;
149 }
150
151 static int set_lru_logcookie(struct inode *inode, void *handle,
152                              struct llog_cookie *logcookie)
153 {
154         struct fsfilt_operations *fsops = I2CSB(inode)->sm_fsfilt;
155         int rc;
156         rc = fsops->fs_set_xattr(inode, handle, XATTR_SMFS_CACHE_LOGCOOKIE,
157                                  logcookie, sizeof(*logcookie));
158         RETURN(rc);
159 }
160 static int get_lru_logcookie(struct inode *inode, struct llog_cookie *logcookie)
161 {
162         struct fsfilt_operations *fsops = I2CSB(inode)->sm_fsfilt;
163         int rc;
164         rc = fsops->fs_get_xattr(inode, XATTR_SMFS_CACHE_LOGCOOKIE,
165                                  logcookie, sizeof(*logcookie));
166         RETURN(rc);
167 }
168
169 static int try2purge_from_cache(struct ll_fid cfid, struct ll_fid pfid)
170 {
171         struct inode *inode, *parent;
172         struct super_block *sb = cpq->cpq_sb;
173         //struct llog_cookie logcookie;
174         __u32 hoard_priority = 0;
175         int rc = 0;
176         ENTRY;
177
178         inode = iget(sb, cfid.id);
179         if (IS_ERR(inode)) {
180                 CERROR("not existent inode: "LPX64"/%u\n",
181                        cfid.id, cfid.generation);
182                 RETURN(-ENOENT);
183         }
184         parent = iget(sb, pfid.id);
185         if (IS_ERR(parent)) {
186                 CERROR("not existent inode: "LPX64"/%u\n",
187                        pfid.id, pfid.generation);
188                 iput(inode);
189                 RETURN(-ENOENT);
190         }
191
192         CWARN("inode/parent %lu:%lu on the lru list\n",
193               inode->i_ino, parent->i_ino);
194
195         rc = get_hoard_priority(inode, &hoard_priority);
196         if (hoard_priority) {
197                 CWARN("inode %lu set hoard\n", inode->i_ino);
198                 GOTO(out, rc);
199         }
200         if (atomic_read(&inode->i_count) > 1 || (inode->i_state & I_DIRTY)) {
201                 CWARN("inode %lu is busy\n", inode->i_ino);
202                 GOTO(out, rc = 0);
203         }
204
205 out:
206         iput(inode);
207         iput(parent);
208         RETURN(rc);
209 }
210
211 static int cache_lru_get_rec_cb(struct llog_handle *llh,
212                                 struct llog_rec_hdr *rec, void *data)
213 {
214         struct llog_lru_rec *llr;
215         int count = *(int *)data, rc = 0;
216         ENTRY;
217
218         if (!(le32_to_cpu(llh->lgh_hdr->llh_flags) & LLOG_F_IS_PLAIN)) {
219                 CERROR("log is not plain\n");
220                 RETURN(-EINVAL);
221         }
222         if (rec->lrh_type != CACHE_LRU_REC) {
223                 CERROR("log record type error\n");
224                 RETURN(-EINVAL);
225         }
226
227         llr = (struct llog_lru_rec *)rec;
228
229         if (try2purge_from_cache(llr->llr_cfid, llr->llr_pfid)==1){
230                 CDEBUG(D_INODE, "purge ino/gen "LPX64"/%u from cache\n",
231                        llr->llr_cfid.id, llr->llr_cfid.generation);
232                 count --;
233                 if (count == 0)
234                         rc = LLOG_PROC_BREAK;
235                 *(int *)data = count;
236         }
237
238         RETURN(rc);
239 }
240
241 static int cpurge_stop(void)
242 {
243         struct fsfilt_operations *fsops = S2SMI(cpq->cpq_sb)->sm_fsfilt;
244         struct obd_statfs osfs;
245         int rc, free;
246
247         rc = fsops->fs_statfs(cpq->cpq_sb, &osfs);
248         LASSERT(rc == 0);
249
250         free = osfs.os_bfree * 100;
251         if (free < cf_prm.nfract_stop_cpurge * osfs.os_blocks)
252                 return 1;
253         return 0;
254 }
255
256 static int cache_balance_state(void)
257 {
258         struct fsfilt_operations *fsops = S2SMI(cpq->cpq_sb)->sm_fsfilt;
259         struct obd_statfs osfs;
260         int rc, free;
261
262         rc = fsops->fs_statfs(cpq->cpq_sb, &osfs);
263         LASSERT(rc == 0);
264
265         free = (osfs.os_blocks - osfs.os_bfree) * 100;
266         if (free > cf_prm.nfract * osfs.os_blocks) {
267                 if (free < cf_prm.nfract_sync)
268                         return 1;
269                 return 0;
270         }
271         return -1;
272 }
273
274 void wakeup_cpurge(void)
275 {
276         wake_up(&cpq->cpq_waitq);
277 }
278
279 /* walk the lru llog to purge count number of objects */
280 static int purge_some_cache(int *count)
281 {
282         int rc;
283         ENTRY;
284
285         rc = llog_cat_process(cpq->cpq_loghandle,
286                               (llog_cb_t)cache_lru_get_rec_cb,
287                               count);
288         if (!rc)
289                 CDEBUG(D_INODE, "no enough objects available\n");
290
291         RETURN(rc);
292 }
293
294 #define CFLUSH_NR 512
295 static void check_cache_space(void)
296 {
297         int state = cache_balance_state();
298         ENTRY;
299
300         if (state < 0)
301                 return;
302
303         wakeup_cpurge();
304
305         if (state > 0) {
306                 int count = CFLUSH_NR;
307                 purge_some_cache(&count);
308         }
309 }
310
311 void cache_space_pre(struct inode *inode, int op)
312 {
313         ENTRY;
314
315         /* FIXME have not used op */
316         check_cache_space();
317 }
318
319 static int cache_space_hook_lru(struct inode *inode, struct inode *parent,
320                      void *handle, int op, int flags)
321 {
322         struct fsfilt_operations *fsops = S2SMI(cpq->cpq_sb)->sm_fsfilt;
323         struct llog_ctxt *ctxt = cpq->cpq_loghandle->lgh_ctxt;
324         struct llog_lru_rec *llr = NULL;
325         struct llog_cookie *logcookie = NULL;
326         int cookie_size = sizeof(struct llog_cookie);
327         int rc = 0, err;
328         ENTRY;
329
330         LASSERT(ctxt != NULL);
331
332         if (op & ~(CACHE_SPACE_DELETE | CACHE_SPACE_INSERT |CACHE_SPACE_COMMIT))
333                 RETURN(-EINVAL);
334
335         OBD_ALLOC(logcookie, cookie_size);
336         if (!logcookie)
337                 GOTO(out, rc = -ENOMEM);
338
339         if (op & CACHE_SPACE_DELETE) {
340                 rc = get_lru_logcookie(inode, logcookie);
341                 if (rc < 0)
342                         GOTO(out, rc);
343
344                 if (logcookie->lgc_lgl.lgl_oid == 0) {
345                         CWARN("inode %lu/%u is not in lru list\n",
346                               inode->i_ino, inode->i_generation);
347                         GOTO(insert, rc = -ENOENT);
348                 }
349                 if (flags && llog_cat_half_bottom(logcookie, ctxt->loc_handle))
350                         GOTO(out, rc = 0);
351
352                 rc = llog_cancel(ctxt, 1, logcookie, 0, NULL);
353                 if (!rc) {
354                         memset(logcookie, 0, cookie_size);
355                         rc = set_lru_logcookie(inode, handle, logcookie);
356                         if (rc)
357                                 GOTO(out, rc);
358                 } else {
359                         CERROR("failed at llog_cancel: %d\n", rc);
360                         GOTO(out, rc);
361                 }
362         }
363
364 insert:
365         if (op & CACHE_SPACE_INSERT) {
366                 LASSERT(parent != NULL);
367                 OBD_ALLOC(llr, sizeof(*llr));
368                 if (llr == NULL)
369                         GOTO(out, rc = -ENOMEM);
370
371                 llr->llr_hdr.lrh_len = llr->llr_tail.lrt_len = sizeof(*llr);
372                 llr->llr_hdr.lrh_type = CACHE_LRU_REC;
373                 llr->llr_cfid.id = inode->i_ino;
374                 llr->llr_cfid.generation = inode->i_generation;
375                 llr->llr_cfid.f_type = inode->i_mode & S_IFMT;
376                 llr->llr_pfid.id = parent->i_ino;
377                 llr->llr_pfid.generation = parent->i_generation;
378                 llr->llr_pfid.f_type = parent->i_mode & S_IFMT;
379
380                 rc = llog_add(ctxt, &llr->llr_hdr, NULL, logcookie, 1,
381                               NULL, NULL, NULL);
382                 if (rc != 1) {
383                         CERROR("failed at llog_add: %d\n", rc);
384                         GOTO(out, rc);
385                 }
386                 rc = set_lru_logcookie(inode, handle, logcookie);
387         }
388
389         if (op & CACHE_SPACE_COMMIT) {
390                 if (handle) {
391                         err = fsops->fs_commit(inode->i_sb, inode, handle, 0);
392                         if (err) {
393                                 CERROR("error committing transaction: %d\n", err);
394                                 if (!rc)
395                                         rc = err;
396                         }
397                 }
398         }
399 out:
400         if (logcookie)
401                 OBD_FREE(logcookie, cookie_size);
402         if (llr)
403                 OBD_FREE(llr, sizeof(*llr));
404         RETURN(rc);
405 }
406
407 static int cache_purge_thread(void *args)
408 {
409         unsigned long flags;
410         struct l_wait_info lwi = LWI_TIMEOUT(cf_prm.interval * HZ, NULL, NULL);
411         ENTRY;
412
413         lock_kernel();
414         kportal_daemonize("wb_cache_purge");
415
416         SIGNAL_MASK_LOCK(current, flags);
417         sigfillset(&current->blocked);
418         RECALC_SIGPENDING;
419         SIGNAL_MASK_UNLOCK(current, flags);
420
421         unlock_kernel();
422         complete(&cpq->cpq_comp);
423
424         while (1) {
425                 int ndirty = cf_prm.ndirty;
426
427                 purge_some_cache(&ndirty);
428                 if (ndirty > 0 || cpurge_stop())
429                         l_wait_event(cpq->cpq_waitq,
430                                      cpq->cpq_flags & SVC_STOPPING,
431                                      &lwi);
432                 if (cpq->cpq_flags & SVC_STOPPING) {
433                         cpq->cpq_flags &= ~SVC_STOPPING;
434                         EXIT;
435                         break;
436                 }
437         }
438         cpq->cpq_flags = SVC_STOPPED;
439         complete(&cpq->cpq_comp);
440         return 0;
441 }
442
443 int cache_space_hook_setup(struct super_block *sb)
444 {
445         struct llog_ctxt *ctxt;
446         int rc;
447         ENTRY;
448
449         /* first to initialize the cache lru catalog on local fs */
450         rc = llog_catalog_setup(&ctxt, CACHE_LRU_LOG,
451                                 S2SMI(sb)->smsi_exp,
452                                 S2SMI(sb)->smsi_ctxt,
453                                 S2SMI(sb)->sm_fsfilt,
454                                 S2SMI(sb)->smsi_logs_dir,
455                                 S2SMI(sb)->smsi_objects_dir);
456         if (rc) {
457                 CERROR("failed to initialize cache lru list catalog %d\n", rc);
458                 RETURN(rc);
459         }
460         cpq->cpq_sb = sb;
461         cpq->cpq_loghandle = ctxt->loc_handle;
462
463         /* start cache purge daemon, only one daemon now */
464         init_waitqueue_head(&cpq->cpq_waitq);
465         init_completion(&cpq->cpq_comp);
466         cpq->cpq_flags = 0;
467
468         rc = kernel_thread(cache_purge_thread, NULL, CLONE_VM | CLONE_FILES);
469         if (rc < 0) {
470                 CERROR("cannot start thread: %d\n", rc);
471                 GOTO(err_out, rc);
472         }
473         wait_for_completion(&cpq->cpq_comp);
474
475         RETURN(0);
476 err_out:
477         llog_catalog_cleanup(ctxt);
478         OBD_FREE(ctxt, sizeof(*ctxt));
479         RETURN(rc);
480 }
481
482 int cache_space_hook_cleanup(void)
483 {
484         struct llog_ctxt *ctxt;
485         int rc;
486         ENTRY;
487
488         init_completion(&cpq->cpq_comp);
489         cpq->cpq_flags = SVC_STOPPING;
490         wake_up(&cpq->cpq_waitq);
491         wait_for_completion(&cpq->cpq_comp);
492         
493         ctxt = cpq->cpq_loghandle->lgh_ctxt;
494         rc = llog_catalog_cleanup(ctxt);
495         OBD_FREE(ctxt, sizeof(*ctxt));
496         if (rc)
497                 CERROR("failed to clean up cache lru list catalog %d\n", rc);
498
499         RETURN(rc);
500 }
501
502 static int cache_space_hook_create(void *handle, struct inode *dir,
503                                    struct dentry *dentry, struct inode *new_dir,
504                                    struct dentry *new_dentry)
505 {
506         __u64 active_entry = 0;
507         int rc;
508
509         LASSERT(cache_leaf_node(dentry, NULL));
510         rc = cache_space_hook_lru(dentry->d_inode, dir, handle,
511                                   CACHE_SPACE_INSERT, 0);
512         if (rc)
513                 RETURN(rc);
514         if (cache_leaf_node(dentry->d_parent, &active_entry)) {
515                 rc = cache_space_hook_lru(dir,NULL,handle,CACHE_SPACE_DELETE,0);
516                 if (rc)
517                         RETURN(rc);
518         }
519         if (!active_entry)
520                 rc = get_active_entry(dir, &active_entry);
521         active_entry ++;
522         if (!rc)
523                 rc = set_active_entry(dir, &active_entry, handle);
524         RETURN(rc);
525 }
526 static int cache_space_hook_lookup(void *handle, struct inode *dir,
527                                    struct dentry *dentry, struct inode *new_dir,
528                                    struct dentry *new_dentry)
529 {
530         __u64 active_entry;
531         int rc = 0;
532
533         if (cache_leaf_node(dentry, &active_entry))
534                 rc = cache_space_hook_lru(dentry->d_inode, dir, handle,
535                                 CACHE_SPACE_DELETE | CACHE_SPACE_INSERT,1);
536         RETURN(rc);
537 }
538 static int cache_space_hook_link(void *handle, struct inode *dir,
539                                  struct dentry *dentry, struct inode *new_dir,
540                                  struct dentry *new_dentry)
541 {
542         __u64 active_entry = 0;
543         int rc = 0;
544
545         if (cache_pre_leaf_node(dentry, NULL, 1)) {
546                 rc = cache_space_hook_lru(dentry->d_inode, NULL,
547                                           handle, CACHE_SPACE_DELETE, 0);
548                 if (rc)
549                         RETURN(rc);
550         }
551
552         if (cache_leaf_node(dentry->d_parent, &active_entry)) {
553                 rc = cache_space_hook_lru(dir,NULL,handle,CACHE_SPACE_DELETE,0);
554                 if (rc)
555                         RETURN(rc);
556         }
557
558         if (!active_entry)
559                 rc = get_active_entry(dir, &active_entry);
560         active_entry ++;
561         if (!rc)
562                 rc = set_active_entry(dir, &active_entry, handle);
563         RETURN(rc);
564 }
565 static int cache_space_hook_unlink(void *handle, struct inode *dir,
566                                    struct dentry *dentry, struct inode *new_dir,
567                                    struct dentry *new_dentry)
568 {
569         __u64 active_entry;
570         int rc = 0;
571
572         if (cache_pre_leaf_node(dentry, NULL, 0))
573                 rc = cache_space_hook_lru(dentry->d_inode, NULL,
574                                           handle, CACHE_SPACE_DELETE, 0);
575         else if (cache_leaf_node(dentry, NULL))
576                         rc = cache_space_hook_lru(dentry->d_inode, dir,
577                                                   handle, CACHE_SPACE_INSERT,0);
578         if (rc)
579                 RETURN(rc);
580
581         rc = get_active_entry(dir, &active_entry);
582         active_entry --;
583         if (!rc)
584                 rc = set_active_entry(dir, &active_entry, handle);
585         if (!rc && cache_leaf_node(dentry->d_parent, &active_entry))
586                 rc = cache_space_hook_lru(dir,
587                                           dentry->d_parent->d_parent->d_inode,
588                                           handle, CACHE_SPACE_INSERT, 0);
589         RETURN(rc);
590 }
591 static int cache_space_hook_mkdir(void *handle, struct inode *dir,
592                                   struct dentry *dentry, struct inode *new_dir,
593                                   struct dentry *new_dentry)
594 {
595         __u64 active_entry;
596         int rc;
597
598         LASSERT(cache_leaf_node(dentry, &active_entry));
599         rc = cache_space_hook_lru(dentry->d_inode, dir, handle,
600                                   CACHE_SPACE_INSERT,0);
601
602         if (!rc && cache_pre_leaf_node(dentry->d_parent, &active_entry, 3))
603                 rc = cache_space_hook_lru(dir,NULL,handle,CACHE_SPACE_DELETE,0);
604         RETURN(rc);
605 }
606 static int cache_space_hook_rmdir(void *handle, struct inode *dir,
607                                   struct dentry *dentry, struct inode *new_dir,
608                                   struct dentry *new_dentry)
609 {
610         __u64 active_entry;
611         int rc;
612
613         LASSERT(cache_pre_leaf_node(dentry, &active_entry, 2));
614         rc = cache_space_hook_lru(dentry->d_inode, NULL, handle,
615                                   CACHE_SPACE_DELETE, 0);
616
617         if (!rc && cache_leaf_node(dentry->d_parent, &active_entry))
618                 rc = cache_space_hook_lru(dir,
619                                           dentry->d_parent->d_parent->d_inode,
620                                           handle, CACHE_SPACE_INSERT, 0);
621         RETURN(rc);
622 }
623 static int cache_space_hook_rename(void *handle, struct inode *old_dir,
624                         struct dentry *old_dentry, struct inode *new_dir,
625                         struct dentry *new_dentry)
626 {
627         __u64 active_entry;
628         int rc = 0;
629
630         if (new_dentry->d_inode) {
631                 if (cache_pre_leaf_node(new_dentry, NULL, 0))
632                         rc = cache_space_hook_lru(new_dentry->d_inode, NULL,
633                                                   handle, CACHE_SPACE_DELETE,0);
634                 else if (cache_leaf_node(new_dentry, NULL))
635                         rc = cache_space_hook_lru(new_dentry->d_inode,
636                                                   new_dir, handle,
637                                                   CACHE_SPACE_INSERT,0);
638         }
639
640         if (rc || old_dir == new_dir)
641                 RETURN(rc);
642
643         if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
644                 if (cache_leaf_node(new_dentry->d_parent, &active_entry)) {
645                         rc = cache_space_hook_lru(new_dir, NULL, handle,
646                                                   CACHE_SPACE_DELETE, 0);
647                         if (rc)
648                                 RETURN(rc);
649                 }
650                 if (!active_entry)
651                         rc = get_active_entry(new_dir, &active_entry);
652                 active_entry ++;
653                 if (!rc)
654                         rc = set_active_entry(new_dir, &active_entry, handle);
655                 if (rc)
656                         RETURN(rc);
657                 rc = get_active_entry(old_dir, &active_entry);
658                 active_entry --;
659                 if (!rc)
660                         rc = set_active_entry(old_dir, &active_entry, handle);
661         } else if (cache_pre_leaf_node(new_dentry->d_parent, &active_entry, 3))
662                 rc = cache_space_hook_lru(new_dir, NULL, handle,
663                                           CACHE_SPACE_DELETE, 0);
664
665         if (!rc && cache_leaf_node(old_dentry->d_parent, &active_entry))
666                 rc = cache_space_hook_lru(old_dir,
667                                         old_dentry->d_parent->d_parent->d_inode,
668                                         handle, CACHE_SPACE_INSERT, 0);
669         RETURN(rc);
670 }
671
672 typedef int (*cache_hook_op)(void *handle, struct inode *old_dir,
673                              struct dentry *old_dentry, struct inode *new_dir,
674                              struct dentry *new_dentry);
675
676 static  cache_hook_op cache_space_hook_ops[HOOK_MAX + 1] = {
677         [HOOK_CREATE]     cache_space_hook_create,
678         [HOOK_LOOKUP]     cache_space_hook_lookup,
679         [HOOK_LINK]       cache_space_hook_link,
680         [HOOK_UNLINK]     cache_space_hook_unlink,
681         [HOOK_SYMLINK]    cache_space_hook_create,
682         [HOOK_MKDIR]      cache_space_hook_mkdir,
683         [HOOK_RMDIR]      cache_space_hook_rmdir,
684         [HOOK_MKNOD]      cache_space_hook_create,
685         [HOOK_RENAME]     cache_space_hook_rename,
686         [HOOK_SETATTR]    NULL,
687         [HOOK_WRITE]      NULL,
688 };
689
690 int cache_space_post(int op, void *handle, struct inode *old_dir,
691                struct dentry *old_dentry, struct inode *new_dir,
692                struct dentry *new_dentry)
693 {
694         int rc = 0;
695         ENTRY;
696
697         LASSERT(op <= HOOK_MAX + 1);
698
699         if (cache_space_hook_ops[op]) 
700                 rc = cache_space_hook_ops[op](handle, old_dir, old_dentry,
701                                               new_dir, new_dentry);
702         RETURN(rc);
703 }