* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <linux/module.h>
#include <asm/unaligned.h>
-#include <linux/dynlocks.h>
+
+#include "osd_dynlocks.h"
/*
- * linux/include/linux/osd_iam.h
+ * osd_iam.h
*/
#ifndef CLASSERT
#define CLASSERT(cond) do {switch(42) {case (cond): case 0: break;}} while (0)
iam_ptr_t curidx; /* (logical) offset of this node. Used to
* per-node locking to detect concurrent
* splits. */
+ unsigned int at_shifted:1; /* The "at" entry has moved to next
+ * because of shrinking index node
+ * for recycling empty leaf node. */
};
/*
int (*key_cmp)(const struct iam_leaf *l, const struct iam_key *k);
int (*key_eq)(const struct iam_leaf *l, const struct iam_key *k);
+ int (*rec_eq)(const struct iam_leaf *l, const struct iam_rec *r);
+
int (*key_size)(const struct iam_leaf *l);
/*
* Search leaf @l for a record with key @k or for a place
*/
void (*split)(struct iam_leaf *l, struct buffer_head **bh,
iam_ptr_t newblknr);
+ /*
+ * the leaf is empty?
+ */
+ int (*leaf_empty)(struct iam_leaf *l);
};
/*
struct iam_leaf_operations *id_leaf_ops;
};
+enum {
+ IAM_IDLE_HEADER_MAGIC = 0x7903,
+};
+
+/*
+ * Header structure to record idle blocks.
+ */
+struct iam_idle_head {
+ __le16 iih_magic;
+ __le16 iih_count; /* how many idle blocks in this head */
+ __le32 iih_next; /* next head for idle blocks */
+ __le32 iih_blks[0];
+};
+
/*
* An instance of iam container.
*/
/*
* read-write lock protecting index consistency.
*/
- cfs_rw_semaphore_t ic_sem;
+ struct rw_semaphore ic_sem;
+ struct dynlock ic_tree_lock;
+ /*
+ * Protect ic_idle_bh
+ */
+ struct semaphore ic_idle_sem;
+ /*
+ * BH for idle blocks
+ */
+ struct buffer_head *ic_idle_bh;
+ unsigned int ic_idle_failed:1; /* Idle block mechanism failed */
};
/*
};
struct dx_countlimit {
- __le16 limit;
- __le16 count;
+ __le16 limit;
+ __le16 count;
};
/*
static inline struct iam_ikey *iam_path_ikey(const struct iam_path *path,
int nr)
{
- assert(0 <= nr && nr < ARRAY_SIZE(path->ip_data->ipd_key_scratch));
+ LASSERT(0 <= nr && nr < ARRAY_SIZE(path->ip_data->ipd_key_scratch));
return path->ip_data->ipd_key_scratch[nr];
}
-
-static inline struct dynlock *path_dynlock(struct iam_path *path)
-{
- return &LDISKFS_I(iam_path_obj(path))->i_htree_lock;
-}
-
static inline int iam_leaf_is_locked(const struct iam_leaf *leaf)
{
int result;
- result = dynlock_is_locked(path_dynlock(leaf->il_path),
- leaf->il_curidx);
+ result = dynlock_is_locked(&iam_leaf_container(leaf)->ic_tree_lock,
+ leaf->il_curidx);
if (!result)
dump_stack();
return result;
{
int result;
- result = dynlock_is_locked(path_dynlock(path), frame->curidx);
+ result = dynlock_is_locked(&path->ip_container->ic_tree_lock,
+ frame->curidx);
if (!result)
dump_stack();
return result;
{
DX_DEVAL(iam_lock_stats.dls_bh_lock++);
#ifdef CONFIG_SMP
- while (cfs_test_and_set_bit(BH_DXLock, &bh->b_state)) {
- DX_DEVAL(iam_lock_stats.dls_bh_busy++);
- while (cfs_test_bit(BH_DXLock, &bh->b_state))
+ while (test_and_set_bit(BH_DXLock, &bh->b_state)) {
+ DX_DEVAL(iam_lock_stats.dls_bh_busy++);
+ while (test_bit(BH_DXLock, &bh->b_state))
cpu_relax();
}
#endif