4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
34 * lustre/fld/fld_cache.c
36 * FLD (Fids Location Database)
38 * Author: Pravin Shelar <pravin.shelar@sun.com>
39 * Author: Yury Umanets <umka@clusterfs.com>
42 #define DEBUG_SUBSYSTEM S_FLD
45 # include <libcfs/libcfs.h>
46 # include <linux/module.h>
47 # include <linux/jbd.h>
48 # include <asm/div64.h>
49 #else /* __KERNEL__ */
50 # include <liblustre.h>
51 # include <libcfs/list.h>
55 #include <obd_class.h>
56 #include <lustre_ver.h>
57 #include <obd_support.h>
58 #include <lprocfs_status.h>
60 #include <dt_object.h>
61 #include <md_object.h>
62 #include <lustre_req_layout.h>
63 #include <lustre_fld.h>
64 #include "fld_internal.h"
69 struct fld_cache *fld_cache_init(const char *name,
70 int cache_size, int cache_threshold)
72 struct fld_cache *cache;
75 LASSERT(name != NULL);
76 LASSERT(cache_threshold < cache_size);
80 RETURN(ERR_PTR(-ENOMEM));
82 CFS_INIT_LIST_HEAD(&cache->fci_entries_head);
83 CFS_INIT_LIST_HEAD(&cache->fci_lru);
85 cache->fci_cache_count = 0;
86 spin_lock_init(&cache->fci_lock);
88 strncpy(cache->fci_name, name,
89 sizeof(cache->fci_name));
91 cache->fci_cache_size = cache_size;
92 cache->fci_threshold = cache_threshold;
94 /* Init fld cache info. */
95 memset(&cache->fci_stat, 0, sizeof(cache->fci_stat));
97 CDEBUG(D_INFO, "%s: FLD cache - Size: %d, Threshold: %d\n",
98 cache->fci_name, cache_size, cache_threshold);
106 void fld_cache_fini(struct fld_cache *cache)
111 LASSERT(cache != NULL);
112 fld_cache_flush(cache);
114 if (cache->fci_stat.fst_count > 0) {
115 pct = cache->fci_stat.fst_cache * 100;
116 do_div(pct, cache->fci_stat.fst_count);
121 CDEBUG(D_INFO, "FLD cache statistics (%s):\n", cache->fci_name);
122 CDEBUG(D_INFO, " Total reqs: "LPU64"\n", cache->fci_stat.fst_count);
123 CDEBUG(D_INFO, " Cache reqs: "LPU64"\n", cache->fci_stat.fst_cache);
124 CDEBUG(D_INFO, " Cache hits: "LPU64"%%\n", pct);
132 * delete given node from list.
134 static inline void fld_cache_entry_delete(struct fld_cache *cache,
135 struct fld_cache_entry *node)
137 cfs_list_del(&node->fce_list);
138 cfs_list_del(&node->fce_lru);
139 cache->fci_cache_count--;
144 * fix list by checking new entry with NEXT entry in order.
146 static void fld_fix_new_list(struct fld_cache *cache)
148 struct fld_cache_entry *f_curr;
149 struct fld_cache_entry *f_next;
150 struct lu_seq_range *c_range;
151 struct lu_seq_range *n_range;
152 cfs_list_t *head = &cache->fci_entries_head;
157 cfs_list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
158 c_range = &f_curr->fce_range;
159 n_range = &f_next->fce_range;
161 LASSERT(range_is_sane(c_range));
162 if (&f_next->fce_list == head)
165 if (c_range->lsr_flags != n_range->lsr_flags)
168 LASSERTF(c_range->lsr_start <= n_range->lsr_start,
169 "cur lsr_start "DRANGE" next lsr_start "DRANGE"\n",
170 PRANGE(c_range), PRANGE(n_range));
172 /* check merge possibility with next range */
173 if (c_range->lsr_end == n_range->lsr_start) {
174 if (c_range->lsr_index != n_range->lsr_index)
176 n_range->lsr_start = c_range->lsr_start;
177 fld_cache_entry_delete(cache, f_curr);
181 /* check if current range overlaps with next range. */
182 if (n_range->lsr_start < c_range->lsr_end) {
183 if (c_range->lsr_index == n_range->lsr_index) {
184 n_range->lsr_start = c_range->lsr_start;
185 n_range->lsr_end = max(c_range->lsr_end,
187 fld_cache_entry_delete(cache, f_curr);
189 if (n_range->lsr_end <= c_range->lsr_end) {
191 fld_cache_entry_delete(cache, f_curr);
193 n_range->lsr_start = c_range->lsr_end;
196 /* we could have overlap over next
197 * range too. better restart. */
201 /* kill duplicates */
202 if (c_range->lsr_start == n_range->lsr_start &&
203 c_range->lsr_end == n_range->lsr_end)
204 fld_cache_entry_delete(cache, f_curr);
211 * add node to fld cache
213 static inline void fld_cache_entry_add(struct fld_cache *cache,
214 struct fld_cache_entry *f_new,
217 cfs_list_add(&f_new->fce_list, pos);
218 cfs_list_add(&f_new->fce_lru, &cache->fci_lru);
220 cache->fci_cache_count++;
221 fld_fix_new_list(cache);
225 * Check if cache needs to be shrunk. If so - do it.
226 * Remove one entry in list and so on until cache is shrunk enough.
228 static int fld_cache_shrink(struct fld_cache *cache)
230 struct fld_cache_entry *flde;
235 LASSERT(cache != NULL);
237 if (cache->fci_cache_count < cache->fci_cache_size)
240 curr = cache->fci_lru.prev;
242 while (cache->fci_cache_count + cache->fci_threshold >
243 cache->fci_cache_size && curr != &cache->fci_lru) {
245 flde = cfs_list_entry(curr, struct fld_cache_entry, fce_lru);
247 fld_cache_entry_delete(cache, flde);
251 CDEBUG(D_INFO, "%s: FLD cache - Shrunk by "
252 "%d entries\n", cache->fci_name, num);
258 * kill all fld cache entries.
260 void fld_cache_flush(struct fld_cache *cache)
264 spin_lock(&cache->fci_lock);
265 cache->fci_cache_size = 0;
266 fld_cache_shrink(cache);
267 spin_unlock(&cache->fci_lock);
273 * punch hole in existing range. divide this range and add new
277 void fld_cache_punch_hole(struct fld_cache *cache,
278 struct fld_cache_entry *f_curr,
279 struct fld_cache_entry *f_new)
281 const struct lu_seq_range *range = &f_new->fce_range;
282 const seqno_t new_start = range->lsr_start;
283 const seqno_t new_end = range->lsr_end;
284 struct fld_cache_entry *fldt;
287 OBD_ALLOC_GFP(fldt, sizeof *fldt, CFS_ALLOC_ATOMIC);
291 /* overlap is not allowed, so dont mess up list. */
294 /* break f_curr RANGE into three RANGES:
295 * f_curr, f_new , fldt
301 fldt->fce_range.lsr_start = new_end;
302 fldt->fce_range.lsr_end = f_curr->fce_range.lsr_end;
303 fldt->fce_range.lsr_index = f_curr->fce_range.lsr_index;
306 f_curr->fce_range.lsr_end = new_start;
308 /* add these two entries to list */
309 fld_cache_entry_add(cache, f_new, &f_curr->fce_list);
310 fld_cache_entry_add(cache, fldt, &f_new->fce_list);
312 /* no need to fixup */
317 * handle range overlap in fld cache.
319 void fld_cache_overlap_handle(struct fld_cache *cache,
320 struct fld_cache_entry *f_curr,
321 struct fld_cache_entry *f_new)
323 const struct lu_seq_range *range = &f_new->fce_range;
324 const seqno_t new_start = range->lsr_start;
325 const seqno_t new_end = range->lsr_end;
326 const mdsno_t mdt = range->lsr_index;
328 /* this is overlap case, these case are checking overlapping with
329 * prev range only. fixup will handle overlaping with next range. */
331 if (f_curr->fce_range.lsr_index == mdt) {
332 f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start,
335 f_curr->fce_range.lsr_end = max(f_curr->fce_range.lsr_end,
339 fld_fix_new_list(cache);
341 } else if (new_start <= f_curr->fce_range.lsr_start &&
342 f_curr->fce_range.lsr_end <= new_end) {
343 /* case 1: new range completely overshadowed existing range.
344 * e.g. whole range migrated. update fld cache entry */
346 f_curr->fce_range = *range;
348 fld_fix_new_list(cache);
350 } else if (f_curr->fce_range.lsr_start < new_start &&
351 new_end < f_curr->fce_range.lsr_end) {
352 /* case 2: new range fit within existing range. */
354 fld_cache_punch_hole(cache, f_curr, f_new);
356 } else if (new_end <= f_curr->fce_range.lsr_end) {
358 * [new_start [c_start new_end) c_end)
361 LASSERT(new_start <= f_curr->fce_range.lsr_start);
363 f_curr->fce_range.lsr_start = new_end;
364 fld_cache_entry_add(cache, f_new, f_curr->fce_list.prev);
366 } else if (f_curr->fce_range.lsr_start <= new_start) {
368 * [c_start [new_start c_end) new_end)
371 LASSERT(f_curr->fce_range.lsr_end <= new_end);
373 f_curr->fce_range.lsr_end = new_start;
374 fld_cache_entry_add(cache, f_new, &f_curr->fce_list);
376 CERROR("NEW range ="DRANGE" curr = "DRANGE"\n",
377 PRANGE(range),PRANGE(&f_curr->fce_range));
381 * Insert FLD entry in FLD cache.
383 * This function handles all cases of merging and breaking up of
386 void fld_cache_insert(struct fld_cache *cache,
387 const struct lu_seq_range *range)
389 struct fld_cache_entry *f_new;
390 struct fld_cache_entry *f_curr;
391 struct fld_cache_entry *n;
393 cfs_list_t *prev = NULL;
394 const seqno_t new_start = range->lsr_start;
395 const seqno_t new_end = range->lsr_end;
396 __u32 new_flags = range->lsr_flags;
399 LASSERT(range_is_sane(range));
401 /* Allocate new entry. */
402 OBD_ALLOC_PTR(f_new);
408 f_new->fce_range = *range;
411 * Duplicate entries are eliminated in inset op.
412 * So we don't need to search new entry before starting insertion loop.
415 spin_lock(&cache->fci_lock);
416 fld_cache_shrink(cache);
418 head = &cache->fci_entries_head;
420 cfs_list_for_each_entry_safe(f_curr, n, head, fce_list) {
421 /* add list if next is end of list */
422 if (new_end < f_curr->fce_range.lsr_start ||
423 (new_end == f_curr->fce_range.lsr_start &&
424 new_flags != f_curr->fce_range.lsr_flags))
427 prev = &f_curr->fce_list;
428 /* check if this range is to left of new range. */
429 if (new_start < f_curr->fce_range.lsr_end &&
430 new_flags == f_curr->fce_range.lsr_flags) {
431 fld_cache_overlap_handle(cache, f_curr, f_new);
439 CDEBUG(D_INFO, "insert range "DRANGE"\n", PRANGE(&f_new->fce_range));
440 /* Add new entry to cache and lru list. */
441 fld_cache_entry_add(cache, f_new, prev);
443 spin_unlock(&cache->fci_lock);
448 * lookup \a seq sequence for range in fld cache.
450 int fld_cache_lookup(struct fld_cache *cache,
451 const seqno_t seq, struct lu_seq_range *range)
453 struct fld_cache_entry *flde;
457 spin_lock(&cache->fci_lock);
458 head = &cache->fci_entries_head;
460 cache->fci_stat.fst_count++;
461 cfs_list_for_each_entry(flde, head, fce_list) {
462 if (flde->fce_range.lsr_start > seq)
465 if (range_within(&flde->fce_range, seq)) {
466 *range = flde->fce_range;
468 /* update position of this entry in lru list. */
469 cfs_list_move(&flde->fce_lru, &cache->fci_lru);
470 cache->fci_stat.fst_cache++;
471 spin_unlock(&cache->fci_lock);
475 spin_unlock(&cache->fci_lock);