Whamcloud - gitweb
- unland b_fid to HEAD
[fs/lustre-release.git] / lustre / smfs / cache_space.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  lustre/smfs/cache_space.c
5  *  A library of functions to manage cache space based on ARC
6  *  (modified LRU) replacement algorithm.
7  *
8  *  Copyright (c) 2004 Cluster File Systems, Inc.
9  *
10  *   This file is part of Lustre, http://www.lustre.org.
11  *
12  *   Lustre is free software; you can redistribute it and/or
13  *   modify it under the terms of version 2 of the GNU General Public
14  *   License as published by the Free Software Foundation.
15  *
16  *   Lustre is distributed in the hope that it will be useful,
17  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  *   GNU General Public License for more details.
20  *
21  *   You should have received a copy of the GNU General Public License
22  *   along with Lustre; if not, write to the Free Software
23  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24  */
25 #define DEBUG_SUBSYSTEM S_SM
26
27 #include <linux/lustre_log.h>
28 #include <linux/lustre_fsfilt.h>
29 #include <linux/lustre_smfs.h>
30
31 #include "smfs_internal.h"
32
33 struct cache_purge_param {
34         int nfract;     /* Percentage of cache dirty to activate cpurge */
35         int ndirty;     /* Maximum number of objects to write out per
36                            wake-cycle */
37         int interval;   /* jiffies delay between cache purge */
38         int nfract_sync;/* Percentage of cache dirty to activate
39                            cpurge synchronously */
40         int nfract_stop_cpurge; /* Percentage of cache dirty to stop cpurge */
41 } cf_prm = {30, 512, 600 * HZ, 60, 20};
42
43 static struct cache_purge_queue smfs_cpq;
44 static struct cache_purge_queue *cpq = &smfs_cpq;
45
46 #define CACHE_HOOK "cache_hook"
47 int cache_space_pre_hook(struct inode *inode, void *dentry,
48                          void *data1, void *data2, int op, void *handle)
49 {
50         int rc = 0;
51         ENTRY;
52
53         if (smfs_cache_hook(inode)) {                                          
54                if (!handle) {                                                  
55                         handle = smfs_trans_start(inode, KML_CACHE_NOOP, NULL);   
56                         if (IS_ERR(handle)) {                                   
57                                 RETURN(PTR_ERR(handle));
58                         }                                                       
59                 }                                                               
60                 cache_space_pre(inode, op);                                       
61         }                                                                       
62         RETURN(rc); 
63 }
64
65 int cache_space_post_hook(struct inode *inode, void *de, void *data1, 
66                           void *data2, int op, void *handle)
67 {
68         int rc = 0;
69         ENTRY;
70         if (smfs_cache_hook(inode)) {      
71                 struct inode *new_inode = (struct inode*)data1;
72                 struct dentry *new_dentry = (struct dentry*)data2;        
73                 struct dentry *dentry = (struct dentry *)de;
74             
75                 LASSERT(handle != NULL);                                
76                 rc = cache_space_post(op, handle, inode, dentry, new_inode, 
77                                       new_dentry);
78         }
79         RETURN(rc);                                                               
80 }
81
82 int cache_space_hook_init(struct super_block *sb)
83 {
84         struct smfs_super_info *smfs_info = S2SMI(sb);
85         struct smfs_hook_ops    *cache_hops;
86         int    rc = 0;
87         ENTRY;
88
89         cache_hops = smfs_alloc_hook_ops(CACHE_HOOK, cache_space_pre_hook, 
90                                          cache_space_post_hook);
91         if (!cache_hops) {
92                 RETURN(-ENOMEM);
93         }
94         rc = smfs_register_hook_ops(smfs_info, cache_hops); 
95         if (rc) {
96                 smfs_free_hook_ops(cache_hops);
97                 RETURN(rc);
98         }
99         SMFS_SET_CACHE_HOOK(smfs_info);
100
101         RETURN(0);
102 }
103
104 int cache_space_hook_exit(struct smfs_super_info *smfs_info)
105 {
106         struct smfs_hook_ops *cache_hops; 
107
108         cache_hops = smfs_unregister_hook_ops(smfs_info, CACHE_HOOK);
109         smfs_free_hook_ops(cache_hops);
110
111         SMFS_CLEAN_CACHE_HOOK(smfs_info);
112         return 0;
113 }
114
115 static int cache_leaf_node(struct dentry *dentry, __u64 *active_entry)
116 {
117         struct inode *inode = dentry->d_inode;
118
119         if (!dentry->d_inode)
120                 return 0;
121         if (S_ISDIR(inode->i_mode)) {
122                 if (inode->i_nlink != 2)
123                         return 0;
124                 if (!strncmp(dentry->d_name.name, "lost+found", dentry->d_name.len))
125                         return 0;
126                 LASSERT(active_entry != NULL);
127                 get_active_entry(inode, active_entry);
128                 return(*active_entry > 0 ? 0 : 1);
129         } else {
130                 if (inode->i_nlink != 1)
131                         return 0;
132                 if (!strncmp(dentry->d_name.name, KML_LOG_NAME, dentry->d_name.len) ||
133                     !strncmp(dentry->d_name.name, CACHE_LRU_LOG, dentry->d_name.len))
134                         return 0;
135                 return 1;
136         }
137 }
138 static int cache_pre_leaf_node(struct dentry *dentry, __u64 *active_entry, int op)
139 {
140         if (((op == 0 && dentry->d_inode->i_nlink == 0) ||
141             (op == 1 && dentry->d_inode->i_nlink == 2)) &&
142             strncmp(dentry->d_name.name, KML_LOG_NAME, dentry->d_name.len) &&
143             strncmp(dentry->d_name.name, CACHE_LRU_LOG, dentry->d_name.len))
144                 return 1;
145         else if ((op == 2 && dentry->d_inode->i_nlink == 0) ||
146                  (op == 3 && dentry->d_inode->i_nlink == 3)) {
147                 LASSERT(active_entry != NULL);
148                 get_active_entry(dentry->d_inode, active_entry);
149                 return(*active_entry > 0 ? 0 : 1);
150         }
151         return 0;
152 }
153
154 static int set_lru_logcookie(struct inode *inode, void *handle,
155                              struct llog_cookie *logcookie)
156 {
157         struct fsfilt_operations *fsops = I2CSB(inode)->sm_fsfilt;
158         int rc;
159         rc = fsops->fs_set_xattr(inode, handle, XATTR_SMFS_CACHE_LOGCOOKIE,
160                                  logcookie, sizeof(*logcookie));
161         RETURN(rc);
162 }
163 static int get_lru_logcookie(struct inode *inode, struct llog_cookie *logcookie)
164 {
165         struct fsfilt_operations *fsops = I2CSB(inode)->sm_fsfilt;
166         int rc;
167         rc = fsops->fs_get_xattr(inode, XATTR_SMFS_CACHE_LOGCOOKIE,
168                                  logcookie, sizeof(*logcookie));
169         RETURN(rc);
170 }
171
172 static int try2purge_from_cache(struct ll_fid cfid, struct ll_fid pfid)
173 {
174         struct inode *inode, *parent;
175         struct super_block *sb = cpq->cpq_sb;
176         //struct llog_cookie logcookie;
177         __u32 hoard_priority = 0;
178         int rc = 0;
179         ENTRY;
180
181         inode = iget(sb, cfid.id);
182         if (IS_ERR(inode)) {
183                 CERROR("not existent inode: "LPX64"/%u\n",
184                        cfid.id, cfid.generation);
185                 RETURN(-ENOENT);
186         }
187         parent = iget(sb, pfid.id);
188         if (IS_ERR(parent)) {
189                 CERROR("not existent inode: "LPX64"/%u\n",
190                        pfid.id, pfid.generation);
191                 iput(inode);
192                 RETURN(-ENOENT);
193         }
194
195         CWARN("inode/parent %lu:%lu on the lru list\n",
196               inode->i_ino, parent->i_ino);
197
198         rc = get_hoard_priority(inode, &hoard_priority);
199         if (hoard_priority) {
200                 CWARN("inode %lu set hoard\n", inode->i_ino);
201                 GOTO(out, rc);
202         }
203         if (atomic_read(&inode->i_count) > 1 || (inode->i_state & I_DIRTY)) {
204                 CWARN("inode %lu is busy\n", inode->i_ino);
205                 GOTO(out, rc = 0);
206         }
207
208 out:
209         iput(inode);
210         iput(parent);
211         RETURN(rc);
212 }
213
214 static int cache_lru_get_rec_cb(struct llog_handle *llh,
215                                 struct llog_rec_hdr *rec, void *data)
216 {
217         struct llog_lru_rec *llr;
218         int count = *(int *)data, rc = 0;
219         ENTRY;
220
221         if (!(le32_to_cpu(llh->lgh_hdr->llh_flags) & LLOG_F_IS_PLAIN)) {
222                 CERROR("log is not plain\n");
223                 RETURN(-EINVAL);
224         }
225         if (rec->lrh_type != CACHE_LRU_REC) {
226                 CERROR("log record type error\n");
227                 RETURN(-EINVAL);
228         }
229
230         llr = (struct llog_lru_rec *)rec;
231
232         if (try2purge_from_cache(llr->llr_cfid, llr->llr_pfid)==1){
233                 CDEBUG(D_INODE, "purge ino/gen "LPX64"/%u from cache\n",
234                        llr->llr_cfid.id, llr->llr_cfid.generation);
235                 count --;
236                 if (count == 0)
237                         rc = LLOG_PROC_BREAK;
238                 *(int *)data = count;
239         }
240
241         RETURN(rc);
242 }
243
244 static int cpurge_stop(void)
245 {
246         struct fsfilt_operations *fsops = S2SMI(cpq->cpq_sb)->sm_fsfilt;
247         struct obd_statfs osfs;
248         int rc, free;
249
250         rc = fsops->fs_statfs(cpq->cpq_sb, &osfs);
251         LASSERT(rc == 0);
252
253         free = osfs.os_bfree * 100;
254         if (free < cf_prm.nfract_stop_cpurge * osfs.os_blocks)
255                 return 1;
256         return 0;
257 }
258
259 static int cache_balance_state(void)
260 {
261         struct fsfilt_operations *fsops = S2SMI(cpq->cpq_sb)->sm_fsfilt;
262         struct obd_statfs osfs;
263         int rc, free;
264
265         rc = fsops->fs_statfs(cpq->cpq_sb, &osfs);
266         LASSERT(rc == 0);
267
268         free = (osfs.os_blocks - osfs.os_bfree) * 100;
269         if (free > cf_prm.nfract * osfs.os_blocks) {
270                 if (free < cf_prm.nfract_sync)
271                         return 1;
272                 return 0;
273         }
274         return -1;
275 }
276
277 void wakeup_cpurge(void)
278 {
279         wake_up(&cpq->cpq_waitq);
280 }
281
282 /* walk the lru llog to purge count number of objects */
283 static int purge_some_cache(int *count)
284 {
285         int rc;
286         ENTRY;
287
288         rc = llog_cat_process(cpq->cpq_loghandle,
289                               (llog_cb_t)cache_lru_get_rec_cb,
290                               count);
291         if (!rc)
292                 CDEBUG(D_INODE, "no enough objects available\n");
293
294         RETURN(rc);
295 }
296
297 #define CFLUSH_NR 512
298 static void check_cache_space(void)
299 {
300         int state = cache_balance_state();
301         ENTRY;
302
303         if (state < 0)
304                 return;
305
306         wakeup_cpurge();
307
308         if (state > 0) {
309                 int count = CFLUSH_NR;
310                 purge_some_cache(&count);
311         }
312 }
313
314 void cache_space_pre(struct inode *inode, int op)
315 {
316         ENTRY;
317
318         /* FIXME have not used op */
319         check_cache_space();
320 }
321
322 static int cache_space_hook_lru(struct inode *inode, struct inode *parent,
323                      void *handle, int op, int flags)
324 {
325         struct fsfilt_operations *fsops = S2SMI(cpq->cpq_sb)->sm_fsfilt;
326         struct llog_ctxt *ctxt = cpq->cpq_loghandle->lgh_ctxt;
327         struct llog_lru_rec *llr = NULL;
328         struct llog_cookie *logcookie = NULL;
329         int cookie_size = sizeof(struct llog_cookie);
330         int rc = 0, err;
331         ENTRY;
332
333         LASSERT(ctxt != NULL);
334
335         if (op & ~(CACHE_SPACE_DELETE | CACHE_SPACE_INSERT |CACHE_SPACE_COMMIT))
336                 RETURN(-EINVAL);
337
338         OBD_ALLOC(logcookie, cookie_size);
339         if (!logcookie)
340                 GOTO(out, rc = -ENOMEM);
341
342         if (op & CACHE_SPACE_DELETE) {
343                 rc = get_lru_logcookie(inode, logcookie);
344                 if (rc < 0)
345                         GOTO(out, rc);
346
347                 if (logcookie->lgc_lgl.lgl_oid == 0) {
348                         CWARN("inode %lu/%u is not in lru list\n",
349                               inode->i_ino, inode->i_generation);
350                         GOTO(insert, rc = -ENOENT);
351                 }
352                 if (flags && llog_cat_half_bottom(logcookie, ctxt->loc_handle))
353                         GOTO(out, rc = 0);
354
355                 rc = llog_cancel(ctxt, 1, logcookie, 0, NULL);
356                 if (!rc) {
357                         memset(logcookie, 0, cookie_size);
358                         rc = set_lru_logcookie(inode, handle, logcookie);
359                         if (rc)
360                                 GOTO(out, rc);
361                 } else {
362                         CERROR("failed at llog_cancel: %d\n", rc);
363                         GOTO(out, rc);
364                 }
365         }
366
367 insert:
368         if (op & CACHE_SPACE_INSERT) {
369                 LASSERT(parent != NULL);
370                 OBD_ALLOC(llr, sizeof(*llr));
371                 if (llr == NULL)
372                         GOTO(out, rc = -ENOMEM);
373
374                 llr->llr_hdr.lrh_len = llr->llr_tail.lrt_len = sizeof(*llr);
375                 llr->llr_hdr.lrh_type = CACHE_LRU_REC;
376                 llr->llr_cfid.id = inode->i_ino;
377                 llr->llr_cfid.generation = inode->i_generation;
378                 llr->llr_cfid.f_type = inode->i_mode & S_IFMT;
379                 llr->llr_pfid.id = parent->i_ino;
380                 llr->llr_pfid.generation = parent->i_generation;
381                 llr->llr_pfid.f_type = parent->i_mode & S_IFMT;
382
383                 rc = llog_add(ctxt, &llr->llr_hdr, NULL, logcookie, 1,
384                               NULL, NULL, NULL);
385                 if (rc != 1) {
386                         CERROR("failed at llog_add: %d\n", rc);
387                         GOTO(out, rc);
388                 }
389                 rc = set_lru_logcookie(inode, handle, logcookie);
390         }
391
392         if (op & CACHE_SPACE_COMMIT) {
393                 if (handle) {
394                         err = fsops->fs_commit(inode->i_sb, inode, handle, 0);
395                         if (err) {
396                                 CERROR("error committing transaction: %d\n", err);
397                                 if (!rc)
398                                         rc = err;
399                         }
400                 }
401         }
402 out:
403         if (logcookie)
404                 OBD_FREE(logcookie, cookie_size);
405         if (llr)
406                 OBD_FREE(llr, sizeof(*llr));
407         RETURN(rc);
408 }
409
410 static int cache_purge_thread(void *args)
411 {
412         unsigned long flags;
413         struct l_wait_info lwi = LWI_TIMEOUT(cf_prm.interval * HZ, NULL, NULL);
414         ENTRY;
415
416         lock_kernel();
417         kportal_daemonize("wb_cache_purge");
418
419         SIGNAL_MASK_LOCK(current, flags);
420         sigfillset(&current->blocked);
421         RECALC_SIGPENDING;
422         SIGNAL_MASK_UNLOCK(current, flags);
423
424         unlock_kernel();
425         complete(&cpq->cpq_comp);
426
427         while (1) {
428                 int ndirty = cf_prm.ndirty;
429
430                 purge_some_cache(&ndirty);
431                 if (ndirty > 0 || cpurge_stop())
432                         l_wait_event(cpq->cpq_waitq,
433                                      cpq->cpq_flags & SVC_STOPPING,
434                                      &lwi);
435                 if (cpq->cpq_flags & SVC_STOPPING) {
436                         cpq->cpq_flags &= ~SVC_STOPPING;
437                         EXIT;
438                         break;
439                 }
440         }
441         cpq->cpq_flags = SVC_STOPPED;
442         complete(&cpq->cpq_comp);
443         return 0;
444 }
445
446 int cache_space_hook_setup(struct super_block *sb)
447 {
448         struct llog_ctxt *ctxt;
449         int rc;
450         ENTRY;
451
452         /* first to initialize the cache lru catalog on local fs */
453         rc = llog_catalog_setup(&ctxt, CACHE_LRU_LOG,
454                                 S2SMI(sb)->smsi_exp,
455                                 S2SMI(sb)->smsi_ctxt,
456                                 S2SMI(sb)->sm_fsfilt,
457                                 S2SMI(sb)->smsi_logs_dir,
458                                 S2SMI(sb)->smsi_objects_dir);
459         if (rc) {
460                 CERROR("failed to initialize cache lru list catalog %d\n", rc);
461                 RETURN(rc);
462         }
463         cpq->cpq_sb = sb;
464         cpq->cpq_loghandle = ctxt->loc_handle;
465
466         /* start cache purge daemon, only one daemon now */
467         init_waitqueue_head(&cpq->cpq_waitq);
468         init_completion(&cpq->cpq_comp);
469         cpq->cpq_flags = 0;
470
471         rc = kernel_thread(cache_purge_thread, NULL, CLONE_VM | CLONE_FILES);
472         if (rc < 0) {
473                 CERROR("cannot start thread: %d\n", rc);
474                 GOTO(err_out, rc);
475         }
476         wait_for_completion(&cpq->cpq_comp);
477
478         RETURN(0);
479 err_out:
480         llog_catalog_cleanup(ctxt);
481         OBD_FREE(ctxt, sizeof(*ctxt));
482         RETURN(rc);
483 }
484
485 int cache_space_hook_cleanup(void)
486 {
487         struct llog_ctxt *ctxt;
488         int rc;
489         ENTRY;
490
491         init_completion(&cpq->cpq_comp);
492         cpq->cpq_flags = SVC_STOPPING;
493         wake_up(&cpq->cpq_waitq);
494         wait_for_completion(&cpq->cpq_comp);
495         
496         ctxt = cpq->cpq_loghandle->lgh_ctxt;
497         rc = llog_catalog_cleanup(ctxt);
498         OBD_FREE(ctxt, sizeof(*ctxt));
499         if (rc)
500                 CERROR("failed to clean up cache lru list catalog %d\n", rc);
501
502         RETURN(rc);
503 }
504
505 static int cache_space_hook_create(void *handle, struct inode *dir,
506                                    struct dentry *dentry, struct inode *new_dir,
507                                    struct dentry *new_dentry)
508 {
509         __u64 active_entry = 0;
510         int rc;
511
512         LASSERT(cache_leaf_node(dentry, NULL));
513         rc = cache_space_hook_lru(dentry->d_inode, dir, handle,
514                                   CACHE_SPACE_INSERT, 0);
515         if (rc)
516                 RETURN(rc);
517         if (cache_leaf_node(dentry->d_parent, &active_entry)) {
518                 rc = cache_space_hook_lru(dir,NULL,handle,CACHE_SPACE_DELETE,0);
519                 if (rc)
520                         RETURN(rc);
521         }
522         if (!active_entry)
523                 rc = get_active_entry(dir, &active_entry);
524         active_entry ++;
525         if (!rc)
526                 rc = set_active_entry(dir, &active_entry, handle);
527         RETURN(rc);
528 }
529 static int cache_space_hook_lookup(void *handle, struct inode *dir,
530                                    struct dentry *dentry, struct inode *new_dir,
531                                    struct dentry *new_dentry)
532 {
533         __u64 active_entry;
534         int rc = 0;
535
536         if (cache_leaf_node(dentry, &active_entry))
537                 rc = cache_space_hook_lru(dentry->d_inode, dir, handle,
538                                 CACHE_SPACE_DELETE | CACHE_SPACE_INSERT,1);
539         RETURN(rc);
540 }
541 static int cache_space_hook_link(void *handle, struct inode *dir,
542                                  struct dentry *dentry, struct inode *new_dir,
543                                  struct dentry *new_dentry)
544 {
545         __u64 active_entry = 0;
546         int rc = 0;
547
548         if (cache_pre_leaf_node(dentry, NULL, 1)) {
549                 rc = cache_space_hook_lru(dentry->d_inode, NULL,
550                                           handle, CACHE_SPACE_DELETE, 0);
551                 if (rc)
552                         RETURN(rc);
553         }
554
555         if (cache_leaf_node(dentry->d_parent, &active_entry)) {
556                 rc = cache_space_hook_lru(dir,NULL,handle,CACHE_SPACE_DELETE,0);
557                 if (rc)
558                         RETURN(rc);
559         }
560
561         if (!active_entry)
562                 rc = get_active_entry(dir, &active_entry);
563         active_entry ++;
564         if (!rc)
565                 rc = set_active_entry(dir, &active_entry, handle);
566         RETURN(rc);
567 }
568 static int cache_space_hook_unlink(void *handle, struct inode *dir,
569                                    struct dentry *dentry, struct inode *new_dir,
570                                    struct dentry *new_dentry)
571 {
572         __u64 active_entry;
573         int rc = 0;
574
575         if (cache_pre_leaf_node(dentry, NULL, 0))
576                 rc = cache_space_hook_lru(dentry->d_inode, NULL,
577                                           handle, CACHE_SPACE_DELETE, 0);
578         else if (cache_leaf_node(dentry, NULL))
579                         rc = cache_space_hook_lru(dentry->d_inode, dir,
580                                                   handle, CACHE_SPACE_INSERT,0);
581         if (rc)
582                 RETURN(rc);
583
584         rc = get_active_entry(dir, &active_entry);
585         active_entry --;
586         if (!rc)
587                 rc = set_active_entry(dir, &active_entry, handle);
588         if (!rc && cache_leaf_node(dentry->d_parent, &active_entry))
589                 rc = cache_space_hook_lru(dir,
590                                           dentry->d_parent->d_parent->d_inode,
591                                           handle, CACHE_SPACE_INSERT, 0);
592         RETURN(rc);
593 }
594 static int cache_space_hook_mkdir(void *handle, struct inode *dir,
595                                   struct dentry *dentry, struct inode *new_dir,
596                                   struct dentry *new_dentry)
597 {
598         __u64 active_entry;
599         int rc;
600
601         LASSERT(cache_leaf_node(dentry, &active_entry));
602         rc = cache_space_hook_lru(dentry->d_inode, dir, handle,
603                                   CACHE_SPACE_INSERT,0);
604
605         if (!rc && cache_pre_leaf_node(dentry->d_parent, &active_entry, 3))
606                 rc = cache_space_hook_lru(dir,NULL,handle,CACHE_SPACE_DELETE,0);
607         RETURN(rc);
608 }
609 static int cache_space_hook_rmdir(void *handle, struct inode *dir,
610                                   struct dentry *dentry, struct inode *new_dir,
611                                   struct dentry *new_dentry)
612 {
613         __u64 active_entry;
614         int rc;
615
616         LASSERT(cache_pre_leaf_node(dentry, &active_entry, 2));
617         rc = cache_space_hook_lru(dentry->d_inode, NULL, handle,
618                                   CACHE_SPACE_DELETE, 0);
619
620         if (!rc && cache_leaf_node(dentry->d_parent, &active_entry))
621                 rc = cache_space_hook_lru(dir,
622                                           dentry->d_parent->d_parent->d_inode,
623                                           handle, CACHE_SPACE_INSERT, 0);
624         RETURN(rc);
625 }
626 static int cache_space_hook_rename(void *handle, struct inode *old_dir,
627                         struct dentry *old_dentry, struct inode *new_dir,
628                         struct dentry *new_dentry)
629 {
630         __u64 active_entry;
631         int rc = 0;
632
633         if (new_dentry->d_inode) {
634                 if (cache_pre_leaf_node(new_dentry, NULL, 0))
635                         rc = cache_space_hook_lru(new_dentry->d_inode, NULL,
636                                                   handle, CACHE_SPACE_DELETE,0);
637                 else if (cache_leaf_node(new_dentry, NULL))
638                         rc = cache_space_hook_lru(new_dentry->d_inode,
639                                                   new_dir, handle,
640                                                   CACHE_SPACE_INSERT,0);
641         }
642
643         if (rc || old_dir == new_dir)
644                 RETURN(rc);
645
646         if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
647                 if (cache_leaf_node(new_dentry->d_parent, &active_entry)) {
648                         rc = cache_space_hook_lru(new_dir, NULL, handle,
649                                                   CACHE_SPACE_DELETE, 0);
650                         if (rc)
651                                 RETURN(rc);
652                 }
653                 if (!active_entry)
654                         rc = get_active_entry(new_dir, &active_entry);
655                 active_entry ++;
656                 if (!rc)
657                         rc = set_active_entry(new_dir, &active_entry, handle);
658                 if (rc)
659                         RETURN(rc);
660                 rc = get_active_entry(old_dir, &active_entry);
661                 active_entry --;
662                 if (!rc)
663                         rc = set_active_entry(old_dir, &active_entry, handle);
664         } else if (cache_pre_leaf_node(new_dentry->d_parent, &active_entry, 3))
665                 rc = cache_space_hook_lru(new_dir, NULL, handle,
666                                           CACHE_SPACE_DELETE, 0);
667
668         if (!rc && cache_leaf_node(old_dentry->d_parent, &active_entry))
669                 rc = cache_space_hook_lru(old_dir,
670                                         old_dentry->d_parent->d_parent->d_inode,
671                                         handle, CACHE_SPACE_INSERT, 0);
672         RETURN(rc);
673 }
674
675 typedef int (*cache_hook_op)(void *handle, struct inode *old_dir,
676                              struct dentry *old_dentry, struct inode *new_dir,
677                              struct dentry *new_dentry);
678
679 static  cache_hook_op cache_space_hook_ops[HOOK_MAX + 1] = {
680         [HOOK_CREATE]     cache_space_hook_create,
681         [HOOK_LOOKUP]     cache_space_hook_lookup,
682         [HOOK_LINK]       cache_space_hook_link,
683         [HOOK_UNLINK]     cache_space_hook_unlink,
684         [HOOK_SYMLINK]    cache_space_hook_create,
685         [HOOK_MKDIR]      cache_space_hook_mkdir,
686         [HOOK_RMDIR]      cache_space_hook_rmdir,
687         [HOOK_MKNOD]      cache_space_hook_create,
688         [HOOK_RENAME]     cache_space_hook_rename,
689         [HOOK_SETATTR]    NULL,
690         [HOOK_WRITE]      NULL,
691         [HOOK_READDIR]    NULL,
692 };
693
694 int cache_space_post(int op, void *handle, struct inode *old_dir,
695                struct dentry *old_dentry, struct inode *new_dir,
696                struct dentry *new_dentry)
697 {
698         int rc = 0;
699         ENTRY;
700
701         LASSERT(op <= HOOK_MAX + 1);
702
703         if (cache_space_hook_ops[op]) 
704                 rc = cache_space_hook_ops[op](handle, old_dir, old_dentry,
705                                               new_dir, new_dentry);
706         RETURN(rc);
707 }