1 /* -*- MODE: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * lustre/fld/fld_cache.c
5 * FLD (Fids Location Database)
7 * Copyright (C) 2006 Cluster File Systems, Inc.
8 * Author: Yury Umanets <umka@clusterfs.com>
10 * This file is part of the Lustre file system, http://www.lustre.org
11 * Lustre is a trademark of Cluster File Systems, Inc.
13 * You may have signed or agreed to another license before downloading
14 * this software. If so, you are bound by the terms and conditions
15 * of that agreement, and the following does not apply to you. See the
16 * LICENSE file included with this distribution for more information.
18 * If you did not agree to a different license, then this copy of Lustre
19 * is open source software; you can redistribute it and/or modify it
20 * under the terms of version 2 of the GNU General Public License as
21 * published by the Free Software Foundation.
23 * In either case, Lustre is distributed in the hope that it will be
24 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
25 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * license text for more details.
29 # define EXPORT_SYMTAB
31 #define DEBUG_SUBSYSTEM S_FLD
34 # include <libcfs/libcfs.h>
35 # include <linux/module.h>
36 # include <linux/jbd.h>
37 # include <asm/div64.h>
38 #else /* __KERNEL__ */
39 # include <liblustre.h>
40 # include <libcfs/list.h>
44 #include <obd_class.h>
45 #include <lustre_ver.h>
46 #include <obd_support.h>
47 #include <lprocfs_status.h>
49 #include <dt_object.h>
50 #include <md_object.h>
51 #include <lustre_req_layout.h>
52 #include <lustre_fld.h>
53 #include "fld_internal.h"
56 static inline __u32 fld_cache_hash(seqno_t seq)
61 struct fld_cache_info *fld_cache_init(int hash_size, int cache_size,
64 struct fld_cache_info *cache;
68 /* check if size is power of two */
69 LASSERT(IS_PO2(hash_size));
71 LASSERT(cache_threshold < cache_size);
75 RETURN(ERR_PTR(-ENOMEM));
77 INIT_LIST_HEAD(&cache->fci_lru);
79 cache->fci_cache_count = 0;
80 spin_lock_init(&cache->fci_lock);
81 cache->fci_hash_size = hash_size;
82 cache->fci_cache_size = cache_size;
83 cache->fci_threshold = cache_threshold;
85 /* init fld cache info */
86 cache->fci_hash_mask = hash_size - 1;
87 OBD_ALLOC(cache->fci_hash_table,
88 hash_size * sizeof(*cache->fci_hash_table));
89 if (cache->fci_hash_table == NULL) {
91 RETURN(ERR_PTR(-ENOMEM));
94 for (i = 0; i < hash_size; i++)
95 INIT_HLIST_HEAD(&cache->fci_hash_table[i]);
97 CDEBUG(D_INFO|D_WARNING, "FLD cache - htable size: %d, "
98 "cache size: %d, cache threshold: %d\n",
99 hash_size, cache_size, cache_threshold);
103 EXPORT_SYMBOL(fld_cache_init);
105 void fld_cache_fini(struct fld_cache_info *cache)
107 struct fld_cache_entry *flde;
108 struct hlist_head *bucket;
109 struct hlist_node *scan;
110 struct hlist_node *next;
114 LASSERT(cache != NULL);
116 /* free all cache entries */
117 spin_lock(&cache->fci_lock);
118 for (i = 0; i < cache->fci_hash_size; i++) {
119 bucket = cache->fci_hash_table + i;
120 hlist_for_each_entry_safe(flde, scan, next, bucket, fce_list) {
121 hlist_del_init(&flde->fce_list);
122 list_del_init(&flde->fce_lru);
123 cache->fci_cache_count--;
127 spin_unlock(&cache->fci_lock);
129 /* free cache hash table and cache itself */
130 OBD_FREE(cache->fci_hash_table, cache->fci_hash_size *
131 sizeof(*cache->fci_hash_table));
136 EXPORT_SYMBOL(fld_cache_fini);
138 static inline struct hlist_head *
139 fld_cache_bucket(struct fld_cache_info *cache, seqno_t seq)
141 return cache->fci_hash_table + (fld_cache_hash(seq) &
142 cache->fci_hash_mask);
146 * check if cache needs to be shrinked. If so - do it. Tries to keep all
147 * collision lists wel balanced. That is, checks all of them and removes one
148 * entry in list and so on.
150 static int fld_cache_shrink(struct fld_cache_info *cache)
152 struct fld_cache_entry *flde;
153 struct list_head *curr;
156 LASSERT(cache != NULL);
158 if (cache->fci_cache_count < cache->fci_cache_size)
161 curr = cache->fci_lru.prev;
163 while (cache->fci_cache_count + cache->fci_threshold >
164 cache->fci_cache_size && curr != &cache->fci_lru)
166 flde = list_entry(curr, struct fld_cache_entry, fce_lru);
169 hlist_del_init(&flde->fce_list);
170 list_del_init(&flde->fce_lru);
171 cache->fci_cache_count--;
178 int fld_cache_insert(struct fld_cache_info *cache,
179 seqno_t seq, mdsno_t mds)
181 struct fld_cache_entry *flde, *fldt;
182 struct hlist_head *bucket;
183 struct hlist_node *scan;
187 spin_lock(&cache->fci_lock);
189 /* check if need to shrink cache */
190 rc = fld_cache_shrink(cache);
192 spin_unlock(&cache->fci_lock);
196 /* check if cache already has the entry with such a seq */
197 bucket = fld_cache_bucket(cache, seq);
198 hlist_for_each_entry(fldt, scan, bucket, fce_list) {
199 if (fldt->fce_seq == seq)
200 spin_unlock(&cache->fci_lock);
201 RETURN(rc = -EEXIST);
203 spin_unlock(&cache->fci_lock);
205 /* allocate new entry */
211 * check if cache has the entry with such a seq again. It could be added
212 * while we were allocating new entry.
214 spin_lock(&cache->fci_lock);
215 hlist_for_each_entry(fldt, scan, bucket, fce_list) {
216 if (fldt->fce_seq == seq) {
217 spin_unlock(&cache->fci_lock);
223 /* add new entry to cache and lru list */
224 INIT_HLIST_NODE(&flde->fce_list);
228 hlist_add_head(&flde->fce_list, bucket);
229 list_add(&flde->fce_lru, &cache->fci_lru);
230 cache->fci_cache_count++;
232 spin_unlock(&cache->fci_lock);
236 EXPORT_SYMBOL(fld_cache_insert);
238 void fld_cache_delete(struct fld_cache_info *cache, seqno_t seq)
240 struct fld_cache_entry *flde;
241 struct hlist_node *scan, *n;
242 struct hlist_head *bucket;
245 bucket = fld_cache_bucket(cache, seq);
247 spin_lock(&cache->fci_lock);
248 hlist_for_each_entry_safe(flde, scan, n, bucket, fce_list) {
249 if (flde->fce_seq == seq) {
250 hlist_del_init(&flde->fce_list);
251 list_del_init(&flde->fce_lru);
252 cache->fci_cache_count--;
260 spin_unlock(&cache->fci_lock);
262 EXPORT_SYMBOL(fld_cache_delete);
264 int fld_cache_lookup(struct fld_cache_info *cache,
265 seqno_t seq, mdsno_t *mds)
267 struct fld_cache_entry *flde;
268 struct hlist_node *scan, *n;
269 struct hlist_head *bucket;
272 bucket = fld_cache_bucket(cache, seq);
274 spin_lock(&cache->fci_lock);
275 hlist_for_each_entry_safe(flde, scan, n, bucket, fce_list) {
276 if (flde->fce_seq == seq) {
277 *mds = flde->fce_mds;
279 /* move found entry to the head of lru list */
280 list_del(&flde->fce_lru);
281 list_add(&flde->fce_lru, &cache->fci_lru);
283 spin_unlock(&cache->fci_lock);
287 spin_unlock(&cache->fci_lock);
290 EXPORT_SYMBOL(fld_cache_lookup);
292 int fld_cache_insert(struct fld_cache_info *cache,
293 seqno_t seq, mdsno_t mds)
297 EXPORT_SYMBOL(fld_cache_insert);
299 void fld_cache_delete(struct fld_cache_info *cache,
304 EXPORT_SYMBOL(fld_cache_delete);
306 int fld_cache_lookup(struct fld_cache_info *cache,
307 seqno_t seq, mdsno_t *mds)
311 EXPORT_SYMBOL(fld_cache_lookup);