1 /* -*- MODE: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * lustre/fld/fld_cache.c
5 * FLD (Fids Location Database)
7 * Copyright (C) 2006 Cluster File Systems, Inc.
8 * Author: Yury Umanets <umka@clusterfs.com>
10 * This file is part of the Lustre file system, http://www.lustre.org
11 * Lustre is a trademark of Cluster File Systems, Inc.
13 * You may have signed or agreed to another license before downloading
14 * this software. If so, you are bound by the terms and conditions
15 * of that agreement, and the following does not apply to you. See the
16 * LICENSE file included with this distribution for more information.
18 * If you did not agree to a different license, then this copy of Lustre
19 * is open source software; you can redistribute it and/or modify it
20 * under the terms of version 2 of the GNU General Public License as
21 * published by the Free Software Foundation.
23 * In either case, Lustre is distributed in the hope that it will be
24 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
25 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * license text for more details.
29 # define EXPORT_SYMTAB
31 #define DEBUG_SUBSYSTEM S_FLD
34 # include <libcfs/libcfs.h>
35 # include <linux/module.h>
36 # include <linux/jbd.h>
37 # include <asm/div64.h>
38 #else /* __KERNEL__ */
39 # include <liblustre.h>
40 # include <libcfs/list.h>
44 #include <obd_class.h>
45 #include <lustre_ver.h>
46 #include <obd_support.h>
47 #include <lprocfs_status.h>
49 #include <dt_object.h>
50 #include <md_object.h>
51 #include <lustre_req_layout.h>
52 #include <lustre_fld.h>
53 #include "fld_internal.h"
56 static inline __u32 fld_cache_hash(seqno_t seq)
61 void fld_cache_flush(struct fld_cache *cache)
63 struct fld_cache_entry *flde;
64 struct hlist_head *bucket;
65 struct hlist_node *scan;
66 struct hlist_node *next;
70 /* Free all cache entries. */
71 spin_lock(&cache->fci_lock);
72 for (i = 0; i < cache->fci_hash_size; i++) {
73 bucket = cache->fci_hash_table + i;
74 hlist_for_each_entry_safe(flde, scan, next, bucket, fce_list) {
75 hlist_del_init(&flde->fce_list);
76 list_del_init(&flde->fce_lru);
77 cache->fci_cache_count--;
81 spin_unlock(&cache->fci_lock);
85 struct fld_cache *fld_cache_init(const char *name, int hash_size,
86 int cache_size, int cache_threshold)
88 struct fld_cache *cache;
92 LASSERT(name != NULL);
93 LASSERT(IS_PO2(hash_size));
94 LASSERT(cache_threshold < cache_size);
98 RETURN(ERR_PTR(-ENOMEM));
100 INIT_LIST_HEAD(&cache->fci_lru);
102 cache->fci_cache_count = 0;
103 spin_lock_init(&cache->fci_lock);
105 strncpy(cache->fci_name, name,
106 sizeof(cache->fci_name));
108 cache->fci_hash_size = hash_size;
109 cache->fci_cache_size = cache_size;
110 cache->fci_threshold = cache_threshold;
112 /* Init fld cache info. */
113 cache->fci_hash_mask = hash_size - 1;
114 OBD_ALLOC(cache->fci_hash_table,
115 hash_size * sizeof(*cache->fci_hash_table));
116 if (cache->fci_hash_table == NULL) {
118 RETURN(ERR_PTR(-ENOMEM));
121 for (i = 0; i < hash_size; i++)
122 INIT_HLIST_HEAD(&cache->fci_hash_table[i]);
123 memset(&cache->fci_stat, 0, sizeof(cache->fci_stat));
125 CDEBUG(D_INFO, "%s: FLD cache - Size: %d, Threshold: %d\n",
126 cache->fci_name, cache_size, cache_threshold);
130 EXPORT_SYMBOL(fld_cache_init);
132 void fld_cache_fini(struct fld_cache *cache)
137 LASSERT(cache != NULL);
138 fld_cache_flush(cache);
140 if (cache->fci_stat.fst_count > 0) {
141 pct = cache->fci_stat.fst_cache * 100;
142 do_div(pct, cache->fci_stat.fst_count);
147 printk("FLD cache statistics (%s):\n", cache->fci_name);
148 printk(" Total reqs: "LPU64"\n", cache->fci_stat.fst_count);
149 printk(" Cache reqs: "LPU64"\n", cache->fci_stat.fst_cache);
150 printk(" Cache hits: "LPU64"%%\n", pct);
152 OBD_FREE(cache->fci_hash_table, cache->fci_hash_size *
153 sizeof(*cache->fci_hash_table));
158 EXPORT_SYMBOL(fld_cache_fini);
160 static inline struct hlist_head *
161 fld_cache_bucket(struct fld_cache *cache, seqno_t seq)
163 return cache->fci_hash_table + (fld_cache_hash(seq) &
164 cache->fci_hash_mask);
168 * Check if cache needs to be shrinked. If so - do it. Tries to keep all
169 * collision lists well balanced. That is, checks all of them and removes one
170 * entry in list and so on.
172 static int fld_cache_shrink(struct fld_cache *cache)
174 struct fld_cache_entry *flde;
175 struct list_head *curr;
179 LASSERT(cache != NULL);
181 if (cache->fci_cache_count < cache->fci_cache_size)
184 curr = cache->fci_lru.prev;
186 while (cache->fci_cache_count + cache->fci_threshold >
187 cache->fci_cache_size && curr != &cache->fci_lru)
189 flde = list_entry(curr, struct fld_cache_entry, fce_lru);
192 hlist_del_init(&flde->fce_list);
193 list_del_init(&flde->fce_lru);
194 cache->fci_cache_count--;
199 CDEBUG(D_INFO, "%s: FLD cache - Shrinked by "
200 "%d entries\n", cache->fci_name, num);
205 int fld_cache_insert(struct fld_cache *cache,
206 seqno_t seq, mdsno_t mds)
208 struct fld_cache_entry *flde, *fldt;
209 struct hlist_head *bucket;
210 struct hlist_node *scan;
214 spin_lock(&cache->fci_lock);
216 /* Check if need to shrink cache. */
217 rc = fld_cache_shrink(cache);
219 spin_unlock(&cache->fci_lock);
223 /* Check if cache already has the entry with such a seq. */
224 bucket = fld_cache_bucket(cache, seq);
225 hlist_for_each_entry(fldt, scan, bucket, fce_list) {
226 if (fldt->fce_seq == seq) {
227 spin_unlock(&cache->fci_lock);
228 RETURN(rc = -EEXIST);
231 spin_unlock(&cache->fci_lock);
233 /* Allocate new entry. */
239 * Check if cache has the entry with such a seq again. It could be added
240 * while we were allocating new entry.
242 spin_lock(&cache->fci_lock);
243 hlist_for_each_entry(fldt, scan, bucket, fce_list) {
244 if (fldt->fce_seq == seq) {
245 spin_unlock(&cache->fci_lock);
251 /* Add new entry to cache and lru list. */
252 INIT_HLIST_NODE(&flde->fce_list);
256 hlist_add_head(&flde->fce_list, bucket);
257 list_add(&flde->fce_lru, &cache->fci_lru);
258 cache->fci_cache_count++;
260 spin_unlock(&cache->fci_lock);
264 EXPORT_SYMBOL(fld_cache_insert);
266 void fld_cache_delete(struct fld_cache *cache, seqno_t seq)
268 struct fld_cache_entry *flde;
269 struct hlist_node *scan, *n;
270 struct hlist_head *bucket;
273 bucket = fld_cache_bucket(cache, seq);
275 spin_lock(&cache->fci_lock);
276 hlist_for_each_entry_safe(flde, scan, n, bucket, fce_list) {
277 if (flde->fce_seq == seq) {
278 hlist_del_init(&flde->fce_list);
279 list_del_init(&flde->fce_lru);
280 cache->fci_cache_count--;
288 spin_unlock(&cache->fci_lock);
290 EXPORT_SYMBOL(fld_cache_delete);
292 int fld_cache_lookup(struct fld_cache *cache,
293 seqno_t seq, mdsno_t *mds)
295 struct fld_cache_entry *flde;
296 struct hlist_node *scan, *n;
297 struct hlist_head *bucket;
300 bucket = fld_cache_bucket(cache, seq);
302 spin_lock(&cache->fci_lock);
303 cache->fci_stat.fst_count++;
304 hlist_for_each_entry_safe(flde, scan, n, bucket, fce_list) {
305 if (flde->fce_seq == seq) {
306 *mds = flde->fce_mds;
307 list_del(&flde->fce_lru);
308 list_add(&flde->fce_lru, &cache->fci_lru);
309 cache->fci_stat.fst_cache++;
310 spin_unlock(&cache->fci_lock);
314 spin_unlock(&cache->fci_lock);
317 EXPORT_SYMBOL(fld_cache_lookup);
319 int fld_cache_insert(struct fld_cache *cache,
320 seqno_t seq, mdsno_t mds)
324 EXPORT_SYMBOL(fld_cache_insert);
326 void fld_cache_delete(struct fld_cache *cache,
331 EXPORT_SYMBOL(fld_cache_delete);
333 int fld_cache_lookup(struct fld_cache *cache,
334 seqno_t seq, mdsno_t *mds)
338 EXPORT_SYMBOL(fld_cache_lookup);