Whamcloud - gitweb
- landing of b_fid after merge with b_hd_cleanup_merge.
[fs/lustre-release.git] / lustre / smfs / cache_space.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  lustre/smfs/cache_space.c
5  *  A library of functions to manage cache space based on ARC
6  *  (modified LRU) replacement algorithm.
7  *
8  *  Copyright (c) 2004 Cluster File Systems, Inc.
9  *
10  *   This file is part of Lustre, http://www.lustre.org.
11  *
12  *   Lustre is free software; you can redistribute it and/or
13  *   modify it under the terms of version 2 of the GNU General Public
14  *   License as published by the Free Software Foundation.
15  *
16  *   Lustre is distributed in the hope that it will be useful,
17  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  *   GNU General Public License for more details.
20  *
21  *   You should have received a copy of the GNU General Public License
22  *   along with Lustre; if not, write to the Free Software
23  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24  */
25 #define DEBUG_SUBSYSTEM S_SM
26
27 #include <linux/lustre_log.h>
28 #include <linux/lustre_fsfilt.h>
29 #include <linux/lustre_smfs.h>
30
31 #include "smfs_internal.h"
32
33 struct cache_purge_param {
34         int nfract;             /* percentage of cache dirty to activate
35                                  * cpurge */
36         int ndirty;             /* maximum number of objects to write out per
37                                    wake-cycle */
38         int interval;           /* jiffies delay between cache purge */
39         int nfract_sync;        /* percentage of cache dirty to activate cpurge
40                                    synchronously */
41         int nfract_stop_cpurge; /* percentage of cache dirty to stop cpurge */
42 } cf_prm = {30, 512, 600 * HZ, 60, 20};
43
44 static struct cache_purge_queue smfs_cpq;
45 static struct cache_purge_queue *cpq = &smfs_cpq;
46
47 #define CACHE_HOOK "cache_hook"
48 int cache_space_pre_hook(struct inode *inode, void *dentry,
49                          void *data1, void *data2, int op, void *handle)
50 {
51         int rc = 0;
52         ENTRY;
53
54         if (smfs_cache_hook(inode)) {                                          
55                if (!handle) {                                                  
56                         handle = smfs_trans_start(inode, KML_CACHE_NOOP, NULL);   
57                         if (IS_ERR(handle)) {                                   
58                                 RETURN(PTR_ERR(handle));
59                         }                                                       
60                 }                                                               
61                 cache_space_pre(inode, op);                                       
62         }                                                                       
63         RETURN(rc); 
64 }
65
66 int cache_space_post_hook(struct inode *inode, void *de, void *data1, 
67                           void *data2, int op, void *handle)
68 {
69         int rc = 0;
70         ENTRY;
71         if (smfs_cache_hook(inode)) {      
72                 struct inode *new_inode = (struct inode*)data1;
73                 struct dentry *new_dentry = (struct dentry*)data2;        
74                 struct dentry *dentry = (struct dentry *)de;
75             
76                 LASSERT(handle != NULL);                                
77                 rc = cache_space_post(op, handle, inode, dentry, new_inode, 
78                                       new_dentry);
79         }
80         RETURN(rc);                                                               
81 }
82
83 int cache_space_hook_init(struct super_block *sb)
84 {
85         struct smfs_super_info *smfs_info = S2SMI(sb);
86         struct smfs_hook_ops    *cache_hops;
87         int    rc = 0;
88         ENTRY;
89
90         cache_hops = smfs_alloc_hook_ops(CACHE_HOOK, cache_space_pre_hook, 
91                                          cache_space_post_hook);
92         if (!cache_hops) {
93                 RETURN(-ENOMEM);
94         }
95         rc = smfs_register_hook_ops(smfs_info, cache_hops); 
96         if (rc) {
97                 smfs_free_hook_ops(cache_hops);
98                 RETURN(rc);
99         }
100         SMFS_SET_CACHE_HOOK(smfs_info);
101
102         RETURN(0);
103 }
104
105 int cache_space_hook_exit(struct smfs_super_info *smfs_info)
106 {
107         struct smfs_hook_ops *cache_hops; 
108
109         cache_hops = smfs_unregister_hook_ops(smfs_info, CACHE_HOOK);
110         smfs_free_hook_ops(cache_hops);
111
112         SMFS_CLEAN_CACHE_HOOK(smfs_info);
113         return 0;
114 }
115
116 static int cache_leaf_node(struct dentry *dentry, __u64 *active_entry)
117 {
118         struct inode *inode = dentry->d_inode;
119
120         if (!dentry->d_inode)
121                 return 0;
122         if (S_ISDIR(inode->i_mode)) {
123                 if (inode->i_nlink != 2)
124                         return 0;
125                 if (!strncmp(dentry->d_name.name, "lost+found",
126                              dentry->d_name.len))
127                         return 0;
128                 LASSERT(active_entry != NULL);
129                 get_active_entry(inode, active_entry);
130                 return(*active_entry > 0 ? 0 : 1);
131         } else {
132                 if (inode->i_nlink != 1)
133                         return 0;
134                 if (!strncmp(dentry->d_name.name, KML_LOG_NAME, dentry->d_name.len) ||
135                     !strncmp(dentry->d_name.name, CACHE_LRU_LOG, dentry->d_name.len))
136                         return 0;
137                 return 1;
138         }
139 }
140
141 static int cache_pre_leaf_node(struct dentry *dentry, __u64 *active_entry, int op)
142 {
143         if (((op == 0 && dentry->d_inode->i_nlink == 0) ||
144             (op == 1 && dentry->d_inode->i_nlink == 2)) &&
145             strncmp(dentry->d_name.name, KML_LOG_NAME, dentry->d_name.len) &&
146             strncmp(dentry->d_name.name, CACHE_LRU_LOG, dentry->d_name.len))
147                 return 1;
148         else if ((op == 2 && dentry->d_inode->i_nlink == 0) ||
149                  (op == 3 && dentry->d_inode->i_nlink == 3)) {
150                 LASSERT(active_entry != NULL);
151                 get_active_entry(dentry->d_inode, active_entry);
152                 return(*active_entry > 0 ? 0 : 1);
153         }
154         return 0;
155 }
156
157 static int set_lru_logcookie(struct inode *inode, void *handle,
158                              struct llog_cookie *logcookie)
159 {
160         struct fsfilt_operations *fsops = I2CSB(inode)->sm_fsfilt;
161         int rc;
162         ENTRY;
163
164         rc = fsops->fs_set_xattr(inode, handle, XATTR_SMFS_CACHE_LOGCOOKIE,
165                                  logcookie, sizeof(*logcookie));
166         RETURN(rc);
167 }
168
169 static int get_lru_logcookie(struct inode *inode, struct llog_cookie *logcookie)
170 {
171         struct fsfilt_operations *fsops = I2CSB(inode)->sm_fsfilt;
172         int rc;
173         
174         ENTRY;
175         rc = fsops->fs_get_xattr(inode, XATTR_SMFS_CACHE_LOGCOOKIE,
176                                  logcookie, sizeof(*logcookie));
177         RETURN(rc);
178 }
179
180 static int try2purge_from_cache(struct lustre_id cid,
181                                 struct lustre_id pid)
182 {
183         struct inode *inode, *parent;
184         struct super_block *sb = cpq->cpq_sb;
185         __u32 hoard_priority = 0;
186         int rc = 0;
187         ENTRY;
188
189         inode = iget(sb, cid.li_stc.u.e3s.l3s_ino);
190         if (IS_ERR(inode)) {
191                 CERROR("not existent inode: "LPX64"/%u\n",
192                        cid.li_stc.u.e3s.l3s_ino,
193                        cid.li_stc.u.e3s.l3s_gen);
194                 RETURN(-ENOENT);
195         }
196         parent = iget(sb, pid.li_stc.u.e3s.l3s_ino);
197         if (IS_ERR(parent)) {
198                 CERROR("not existent inode: "LPX64"/%u\n",
199                        pid.li_stc.u.e3s.l3s_ino,
200                        pid.li_stc.u.e3s.l3s_gen);
201                 iput(inode);
202                 RETURN(-ENOENT);
203         }
204
205         CWARN("inode/parent %lu:%lu on the lru list\n",
206               inode->i_ino, parent->i_ino);
207
208         rc = get_hoard_priority(inode, &hoard_priority);
209         if (hoard_priority) {
210                 CWARN("inode %lu set hoard\n", inode->i_ino);
211                 GOTO(out, rc);
212         }
213         if (atomic_read(&inode->i_count) > 1 || (inode->i_state & I_DIRTY)) {
214                 CWARN("inode %lu is busy\n", inode->i_ino);
215                 GOTO(out, rc = 0);
216         }
217
218 out:
219         iput(inode);
220         iput(parent);
221         RETURN(rc);
222 }
223
224 static int cache_lru_get_rec_cb(struct llog_handle *llh,
225                                 struct llog_rec_hdr *rec, void *data)
226 {
227         struct llog_lru_rec *llr;
228         int count = *(int *)data, rc = 0;
229         ENTRY;
230
231         if (!(le32_to_cpu(llh->lgh_hdr->llh_flags) & LLOG_F_IS_PLAIN)) {
232                 CERROR("log is not plain\n");
233                 RETURN(-EINVAL);
234         }
235         if (rec->lrh_type != CACHE_LRU_REC) {
236                 CERROR("log record type error\n");
237                 RETURN(-EINVAL);
238         }
239
240         llr = (struct llog_lru_rec *)rec;
241
242         if (try2purge_from_cache(llr->llr_cid, llr->llr_pid)==1){
243                 CDEBUG(D_INODE, "purge ino/gen "LPX64"/%u from cache\n",
244                        llr->llr_cid.li_stc.u.e3s.l3s_ino,
245                        llr->llr_cid.li_stc.u.e3s.l3s_gen);
246                 count --;
247                 if (count == 0)
248                         rc = LLOG_PROC_BREAK;
249                 *(int *)data = count;
250         }
251
252         RETURN(rc);
253 }
254
255 static int cpurge_stop(void)
256 {
257         struct fsfilt_operations *fsops = S2SMI(cpq->cpq_sb)->sm_fsfilt;
258         struct obd_statfs osfs;
259         int rc, free;
260
261         rc = fsops->fs_statfs(cpq->cpq_sb, &osfs);
262         LASSERT(rc == 0);
263
264         free = osfs.os_bfree * 100;
265         if (free < cf_prm.nfract_stop_cpurge * osfs.os_blocks)
266                 return 1;
267         return 0;
268 }
269
270 static int cache_balance_state(void)
271 {
272         struct fsfilt_operations *fsops = S2SMI(cpq->cpq_sb)->sm_fsfilt;
273         struct obd_statfs osfs;
274         int rc, free;
275
276         rc = fsops->fs_statfs(cpq->cpq_sb, &osfs);
277         LASSERT(rc == 0);
278
279         free = (osfs.os_blocks - osfs.os_bfree) * 100;
280         if (free > cf_prm.nfract * osfs.os_blocks) {
281                 if (free < cf_prm.nfract_sync)
282                         return 1;
283                 return 0;
284         }
285         return -1;
286 }
287
288 void wakeup_cpurge(void)
289 {
290         wake_up(&cpq->cpq_waitq);
291 }
292
293 /* walk the lru llog to purge count number of objects */
294 static int purge_some_cache(int *count)
295 {
296         int rc;
297         ENTRY;
298
299         rc = llog_cat_process(cpq->cpq_loghandle,
300                               (llog_cb_t)cache_lru_get_rec_cb,
301                               count);
302         if (!rc)
303                 CDEBUG(D_INODE, "no enough objects available\n");
304
305         RETURN(rc);
306 }
307
308 #define CFLUSH_NR 512
309
310 static void check_cache_space(void)
311 {
312         int state = cache_balance_state();
313         ENTRY;
314
315         if (state < 0) {
316                 EXIT;
317                 return;
318         }
319
320         wakeup_cpurge();
321
322         if (state > 0) {
323                 int count = CFLUSH_NR;
324                 purge_some_cache(&count);
325         }
326         EXIT;
327 }
328
329 void cache_space_pre(struct inode *inode, int op)
330 {
331         ENTRY;
332
333         /* FIXME have not used op */
334         check_cache_space();
335         
336         EXIT;
337 }
338
339 static int cache_space_hook_lru(struct inode *inode, struct inode *parent,
340                                 void *handle, int op, int flags)
341 {
342         struct fsfilt_operations *fsops = S2SMI(cpq->cpq_sb)->sm_fsfilt;
343         struct llog_ctxt *ctxt = cpq->cpq_loghandle->lgh_ctxt;
344         struct llog_lru_rec *llr = NULL;
345         struct llog_cookie *logcookie = NULL;
346         int cookie_size = sizeof(struct llog_cookie);
347         int rc = 0, err;
348         ENTRY;
349
350         LASSERT(ctxt != NULL);
351
352         if (op & ~(CACHE_SPACE_DELETE | CACHE_SPACE_INSERT |CACHE_SPACE_COMMIT))
353                 RETURN(-EINVAL);
354
355         OBD_ALLOC(logcookie, cookie_size);
356         if (!logcookie)
357                 GOTO(out, rc = -ENOMEM);
358
359         if (op & CACHE_SPACE_DELETE) {
360                 rc = get_lru_logcookie(inode, logcookie);
361                 if (rc < 0)
362                         GOTO(out, rc);
363
364                 if (logcookie->lgc_lgl.lgl_oid == 0) {
365                         CWARN("inode %lu/%u is not in lru list\n",
366                               inode->i_ino, inode->i_generation);
367                         GOTO(insert, rc = -ENOENT);
368                 }
369                 if (flags && llog_cat_half_bottom(logcookie, ctxt->loc_handle))
370                         GOTO(out, rc = 0);
371
372                 rc = llog_cancel(ctxt, 1, logcookie, 0, NULL);
373                 if (!rc) {
374                         memset(logcookie, 0, cookie_size);
375                         rc = set_lru_logcookie(inode, handle, logcookie);
376                         if (rc)
377                                 GOTO(out, rc);
378                 } else {
379                         CERROR("failed at llog_cancel: %d\n", rc);
380                         GOTO(out, rc);
381                 }
382         }
383
384 insert:
385         if (op & CACHE_SPACE_INSERT) {
386                 LASSERT(parent != NULL);
387                 OBD_ALLOC(llr, sizeof(*llr));
388                 if (llr == NULL)
389                         GOTO(out, rc = -ENOMEM);
390
391                 llr->llr_hdr.lrh_len = llr->llr_tail.lrt_len = sizeof(*llr);
392                 llr->llr_hdr.lrh_type = CACHE_LRU_REC;
393
394                 /* FIXME-UMKA: should we setup fid components here? */
395                 llr->llr_cid.li_stc.u.e3s.l3s_ino = inode->i_ino;
396                 llr->llr_cid.li_stc.u.e3s.l3s_gen = inode->i_generation;
397                 llr->llr_cid.li_stc.u.e3s.l3s_type = inode->i_mode & S_IFMT;
398
399                 llr->llr_pid.li_stc.u.e3s.l3s_ino = parent->i_ino;
400                 llr->llr_pid.li_stc.u.e3s.l3s_gen = parent->i_generation;
401                 llr->llr_pid.li_stc.u.e3s.l3s_type = parent->i_mode & S_IFMT;
402
403                 rc = llog_add(ctxt, &llr->llr_hdr, NULL, logcookie, 1,
404                               NULL, NULL, NULL);
405                 if (rc != 1) {
406                         CERROR("failed at llog_add: %d\n", rc);
407                         GOTO(out, rc);
408                 }
409                 rc = set_lru_logcookie(inode, handle, logcookie);
410         }
411
412         if (op & CACHE_SPACE_COMMIT) {
413                 if (handle) {
414                         err = fsops->fs_commit(inode->i_sb, inode, handle, 0);
415                         if (err) {
416                                 CERROR("error committing transaction: %d\n", err);
417                                 if (!rc)
418                                         rc = err;
419                         }
420                 }
421         }
422 out:
423         if (logcookie)
424                 OBD_FREE(logcookie, cookie_size);
425         if (llr)
426                 OBD_FREE(llr, sizeof(*llr));
427         RETURN(rc);
428 }
429
430 static int cache_purge_thread(void *args)
431 {
432         unsigned long flags;
433         struct l_wait_info lwi = LWI_TIMEOUT(cf_prm.interval * HZ, NULL, NULL);
434         ENTRY;
435
436         lock_kernel();
437         kportal_daemonize("wb_cache_purge");
438
439         SIGNAL_MASK_LOCK(current, flags);
440         sigfillset(&current->blocked);
441         RECALC_SIGPENDING;
442         SIGNAL_MASK_UNLOCK(current, flags);
443
444         unlock_kernel();
445         complete(&cpq->cpq_comp);
446
447         while (1) {
448                 int ndirty = cf_prm.ndirty;
449
450                 purge_some_cache(&ndirty);
451                 if (ndirty > 0 || cpurge_stop())
452                         l_wait_event(cpq->cpq_waitq,
453                                      cpq->cpq_flags & SVC_STOPPING,
454                                      &lwi);
455                 if (cpq->cpq_flags & SVC_STOPPING) {
456                         cpq->cpq_flags &= ~SVC_STOPPING;
457                         EXIT;
458                         break;
459                 }
460         }
461         cpq->cpq_flags = SVC_STOPPED;
462         complete(&cpq->cpq_comp);
463         RETURN(0);
464 }
465
466 int cache_space_hook_setup(struct super_block *sb)
467 {
468         struct llog_ctxt *ctxt;
469         int rc;
470         ENTRY;
471
472         /* first to initialize the cache lru catalog on local fs */
473         rc = llog_catalog_setup(&ctxt, CACHE_LRU_LOG,
474                                 S2SMI(sb)->smsi_exp,
475                                 S2SMI(sb)->smsi_ctxt,
476                                 S2SMI(sb)->sm_fsfilt,
477                                 S2SMI(sb)->smsi_logs_dir,
478                                 S2SMI(sb)->smsi_objects_dir);
479         if (rc) {
480                 CERROR("failed to initialize cache lru list catalog %d\n", rc);
481                 RETURN(rc);
482         }
483         cpq->cpq_sb = sb;
484         cpq->cpq_loghandle = ctxt->loc_handle;
485
486         /* start cache purge daemon, only one daemon now */
487         init_waitqueue_head(&cpq->cpq_waitq);
488         init_completion(&cpq->cpq_comp);
489         cpq->cpq_flags = 0;
490
491         rc = kernel_thread(cache_purge_thread, NULL, CLONE_VM | CLONE_FILES);
492         if (rc < 0) {
493                 CERROR("cannot start thread: %d\n", rc);
494                 GOTO(err_out, rc);
495         }
496         wait_for_completion(&cpq->cpq_comp);
497
498         RETURN(0);
499 err_out:
500         llog_catalog_cleanup(ctxt);
501         OBD_FREE(ctxt, sizeof(*ctxt));
502         RETURN(rc);
503 }
504
505 int cache_space_hook_cleanup(void)
506 {
507         struct llog_ctxt *ctxt;
508         int rc;
509         ENTRY;
510
511         init_completion(&cpq->cpq_comp);
512         cpq->cpq_flags = SVC_STOPPING;
513         wake_up(&cpq->cpq_waitq);
514         wait_for_completion(&cpq->cpq_comp);
515         
516         ctxt = cpq->cpq_loghandle->lgh_ctxt;
517         rc = llog_catalog_cleanup(ctxt);
518         OBD_FREE(ctxt, sizeof(*ctxt));
519         
520         if (rc)
521                 CERROR("failed to clean up cache lru list catalog %d\n", rc);
522
523         RETURN(rc);
524 }
525
526 static int cache_space_hook_create(void *handle, struct inode *dir,
527                                    struct dentry *dentry, struct inode *new_dir,
528                                    struct dentry *new_dentry)
529 {
530         __u64 active_entry = 0;
531         int rc;
532         ENTRY;
533
534         LASSERT(cache_leaf_node(dentry, NULL));
535         rc = cache_space_hook_lru(dentry->d_inode, dir, handle,
536                                   CACHE_SPACE_INSERT, 0);
537         if (rc)
538                 RETURN(rc);
539         if (cache_leaf_node(dentry->d_parent, &active_entry)) {
540                 rc = cache_space_hook_lru(dir,NULL,handle,CACHE_SPACE_DELETE,0);
541                 if (rc)
542                         RETURN(rc);
543         }
544         if (!active_entry)
545                 rc = get_active_entry(dir, &active_entry);
546         active_entry ++;
547         if (!rc)
548                 rc = set_active_entry(dir, &active_entry, handle);
549         RETURN(rc);
550 }
551
552 static int cache_space_hook_lookup(void *handle, struct inode *dir,
553                                    struct dentry *dentry, struct inode *new_dir,
554                                    struct dentry *new_dentry)
555 {
556         __u64 active_entry;
557         int rc = 0;
558         ENTRY;
559
560         if (cache_leaf_node(dentry, &active_entry))
561                 rc = cache_space_hook_lru(dentry->d_inode, dir, handle,
562                                 CACHE_SPACE_DELETE | CACHE_SPACE_INSERT,1);
563         RETURN(rc);
564 }
565
566 static int cache_space_hook_link(void *handle, struct inode *dir,
567                                  struct dentry *dentry, struct inode *new_dir,
568                                  struct dentry *new_dentry)
569 {
570         __u64 active_entry = 0;
571         int rc = 0;
572         ENTRY;
573
574         if (cache_pre_leaf_node(dentry, NULL, 1)) {
575                 rc = cache_space_hook_lru(dentry->d_inode, NULL,
576                                           handle, CACHE_SPACE_DELETE, 0);
577                 if (rc)
578                         RETURN(rc);
579         }
580
581         if (cache_leaf_node(dentry->d_parent, &active_entry)) {
582                 rc = cache_space_hook_lru(dir, NULL, handle, CACHE_SPACE_DELETE, 0);
583                 if (rc)
584                         RETURN(rc);
585         }
586
587         if (!active_entry)
588                 rc = get_active_entry(dir, &active_entry);
589         active_entry ++;
590         if (!rc)
591                 rc = set_active_entry(dir, &active_entry, handle);
592         RETURN(rc);
593 }
594
595 static int cache_space_hook_unlink(void *handle, struct inode *dir,
596                                    struct dentry *dentry, struct inode *new_dir,
597                                    struct dentry *new_dentry)
598 {
599         __u64 active_entry;
600         int rc = 0;
601         ENTRY;
602
603         if (cache_pre_leaf_node(dentry, NULL, 0))
604                 rc = cache_space_hook_lru(dentry->d_inode, NULL,
605                                           handle, CACHE_SPACE_DELETE, 0);
606         else if (cache_leaf_node(dentry, NULL))
607                         rc = cache_space_hook_lru(dentry->d_inode, dir,
608                                                   handle, CACHE_SPACE_INSERT,0);
609         if (rc)
610                 RETURN(rc);
611
612         rc = get_active_entry(dir, &active_entry);
613         active_entry --;
614         if (!rc)
615                 rc = set_active_entry(dir, &active_entry, handle);
616         if (!rc && cache_leaf_node(dentry->d_parent, &active_entry))
617                 rc = cache_space_hook_lru(dir,
618                                           dentry->d_parent->d_parent->d_inode,
619                                           handle, CACHE_SPACE_INSERT, 0);
620         RETURN(rc);
621 }
622
623 static int cache_space_hook_mkdir(void *handle, struct inode *dir,
624                                   struct dentry *dentry, struct inode *new_dir,
625                                   struct dentry *new_dentry)
626 {
627         __u64 active_entry;
628         int rc;
629         ENTRY;
630
631         LASSERT(cache_leaf_node(dentry, &active_entry));
632         rc = cache_space_hook_lru(dentry->d_inode, dir, handle,
633                                   CACHE_SPACE_INSERT, 0);
634
635         if (!rc && cache_pre_leaf_node(dentry->d_parent, &active_entry, 3))
636                 rc = cache_space_hook_lru(dir, NULL, handle, CACHE_SPACE_DELETE, 0);
637         RETURN(rc);
638 }
639
640 static int cache_space_hook_rmdir(void *handle, struct inode *dir,
641                                   struct dentry *dentry, struct inode *new_dir,
642                                   struct dentry *new_dentry)
643 {
644         __u64 active_entry;
645         int rc;
646         ENTRY;
647
648         LASSERT(cache_pre_leaf_node(dentry, &active_entry, 2));
649         rc = cache_space_hook_lru(dentry->d_inode, NULL, handle,
650                                   CACHE_SPACE_DELETE, 0);
651
652         if (!rc && cache_leaf_node(dentry->d_parent, &active_entry))
653                 rc = cache_space_hook_lru(dir,
654                                           dentry->d_parent->d_parent->d_inode,
655                                           handle, CACHE_SPACE_INSERT, 0);
656         RETURN(rc);
657 }
658
659 static int cache_space_hook_rename(void *handle, struct inode *old_dir,
660                                    struct dentry *old_dentry, struct inode *new_dir,
661                                    struct dentry *new_dentry)
662 {
663         __u64 active_entry;
664         int rc = 0;
665         ENTRY;
666
667         if (new_dentry->d_inode) {
668                 if (cache_pre_leaf_node(new_dentry, NULL, 0))
669                         rc = cache_space_hook_lru(new_dentry->d_inode, NULL,
670                                                   handle, CACHE_SPACE_DELETE,0);
671                 else if (cache_leaf_node(new_dentry, NULL))
672                         rc = cache_space_hook_lru(new_dentry->d_inode,
673                                                   new_dir, handle,
674                                                   CACHE_SPACE_INSERT,0);
675         }
676
677         if (rc || old_dir == new_dir)
678                 RETURN(rc);
679
680         if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
681                 if (cache_leaf_node(new_dentry->d_parent, &active_entry)) {
682                         rc = cache_space_hook_lru(new_dir, NULL, handle,
683                                                   CACHE_SPACE_DELETE, 0);
684                         if (rc)
685                                 RETURN(rc);
686                 }
687                 if (!active_entry)
688                         rc = get_active_entry(new_dir, &active_entry);
689                 active_entry ++;
690                 if (!rc)
691                         rc = set_active_entry(new_dir, &active_entry, handle);
692                 if (rc)
693                         RETURN(rc);
694                 rc = get_active_entry(old_dir, &active_entry);
695                 active_entry --;
696                 if (!rc)
697                         rc = set_active_entry(old_dir, &active_entry, handle);
698         } else if (cache_pre_leaf_node(new_dentry->d_parent, &active_entry, 3)) {
699                 rc = cache_space_hook_lru(new_dir, NULL, handle,
700                                           CACHE_SPACE_DELETE, 0);
701         }
702
703         if (!rc && cache_leaf_node(old_dentry->d_parent, &active_entry)) {
704                 rc = cache_space_hook_lru(old_dir,
705                                           old_dentry->d_parent->d_parent->d_inode,
706                                           handle, CACHE_SPACE_INSERT, 0);
707         }
708         
709         RETURN(rc);
710 }
711
712 typedef int (*cache_hook_op)(void *handle, struct inode *old_dir,
713                              struct dentry *old_dentry, struct inode *new_dir,
714                              struct dentry *new_dentry);
715
716 static  cache_hook_op cache_space_hook_ops[HOOK_MAX + 1] = {
717         [HOOK_CREATE]     cache_space_hook_create,
718         [HOOK_LOOKUP]     cache_space_hook_lookup,
719         [HOOK_LINK]       cache_space_hook_link,
720         [HOOK_UNLINK]     cache_space_hook_unlink,
721         [HOOK_SYMLINK]    cache_space_hook_create,
722         [HOOK_MKDIR]      cache_space_hook_mkdir,
723         [HOOK_RMDIR]      cache_space_hook_rmdir,
724         [HOOK_MKNOD]      cache_space_hook_create,
725         [HOOK_RENAME]     cache_space_hook_rename,
726         [HOOK_SETATTR]    NULL,
727         [HOOK_WRITE]      NULL,
728         [HOOK_READDIR]    NULL,
729 };
730
731 int cache_space_post(int op, void *handle, struct inode *old_dir,
732                      struct dentry *old_dentry, struct inode *new_dir,
733                      struct dentry *new_dentry)
734 {
735         int rc = 0;
736         ENTRY;
737
738         LASSERT(op <= HOOK_MAX + 1);
739
740         if (cache_space_hook_ops[op]) 
741                 rc = cache_space_hook_ops[op](handle, old_dir, old_dentry,
742                                               new_dir, new_dentry);
743         RETURN(rc);
744 }