* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*
* No locking. Callers synchronize.
*/
-static CFS_LIST_HEAD(iam_formats);
+static struct list_head iam_formats = LIST_HEAD_INIT(iam_formats);
void iam_format_register(struct iam_format *fmt)
{
- cfs_list_add(&fmt->if_linkage, &iam_formats);
+ list_add(&fmt->if_linkage, &iam_formats);
}
EXPORT_SYMBOL(iam_format_register);
struct buffer_head *bh;
int err;
- LASSERT_SEM_LOCKED(&c->ic_idle_sem);
+ LASSERT(mutex_is_locked(&c->ic_idle_mutex));
if (blk == 0)
return NULL;
}
result = -ENOENT;
- cfs_list_for_each_entry(fmt, &iam_formats, if_linkage) {
+ list_for_each_entry(fmt, &iam_formats, if_linkage) {
result = fmt->if_guess(c);
if (result == 0)
break;
idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
c->ic_descr->id_root_gap +
sizeof(struct dx_countlimit));
- cfs_down(&c->ic_idle_sem);
+ mutex_lock(&c->ic_idle_mutex);
bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
if (bh != NULL && IS_ERR(bh))
result = PTR_ERR(bh);
else
c->ic_idle_bh = bh;
- cfs_up(&c->ic_idle_sem);
+ mutex_unlock(&c->ic_idle_mutex);
}
return result;
memset(c, 0, sizeof *c);
c->ic_descr = descr;
c->ic_object = inode;
- cfs_init_rwsem(&c->ic_sem);
+ init_rwsem(&c->ic_sem);
dynlock_init(&c->ic_tree_lock);
- cfs_sema_init(&c->ic_idle_sem, 1);
+ mutex_init(&c->ic_idle_mutex);
return 0;
}
EXPORT_SYMBOL(iam_container_init);
for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
if (path->ip_frames[i].bh != NULL) {
+ path->ip_frames[i].at_shifted = 0;
brelse(path->ip_frames[i].bh);
path->ip_frames[i].bh = NULL;
}
static int iam_leaf_load(struct iam_path *path)
{
- iam_ptr_t block;
- int err;
- struct iam_container *c;
- struct buffer_head *bh;
- struct iam_leaf *leaf;
- struct iam_descr *descr;
-
- c = path->ip_container;
- leaf = &path->ip_leaf;
- descr = iam_path_descr(path);
- block = path->ip_frame->leaf;
- if (block == 0) {
- /* XXX bug 11027 */
- printk(CFS_KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
- (long unsigned)path->ip_frame->leaf,
- dx_get_count(dx_node_get_entries(path, path->ip_frame)),
- path->ip_frames[0].bh, path->ip_frames[1].bh,
- path->ip_frames[2].bh);
- }
- err = descr->id_ops->id_node_read(c, block, NULL, &bh);
- if (err == 0) {
- leaf->il_bh = bh;
- leaf->il_curidx = block;
- err = iam_leaf_ops(leaf)->init(leaf);
- assert_inv(ergo(err == 0, iam_leaf_check(leaf)));
- }
- return err;
+ iam_ptr_t block;
+ int err;
+ struct iam_container *c;
+ struct buffer_head *bh;
+ struct iam_leaf *leaf;
+ struct iam_descr *descr;
+
+ c = path->ip_container;
+ leaf = &path->ip_leaf;
+ descr = iam_path_descr(path);
+ block = path->ip_frame->leaf;
+ if (block == 0) {
+ /* XXX bug 11027 */
+ printk(KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
+ (long unsigned)path->ip_frame->leaf,
+ dx_get_count(dx_node_get_entries(path, path->ip_frame)),
+ path->ip_frames[0].bh, path->ip_frames[1].bh,
+ path->ip_frames[2].bh);
+ }
+ err = descr->id_ops->id_node_read(c, block, NULL, &bh);
+ if (err == 0) {
+ leaf->il_bh = bh;
+ leaf->il_curidx = block;
+ err = iam_leaf_ops(leaf)->init(leaf);
+ assert_inv(ergo(err == 0, iam_leaf_check(leaf)));
+ }
+ return err;
}
static void iam_unlock_htree(struct iam_container *ic,
void iam_container_write_lock(struct iam_container *ic)
{
- cfs_down_write(&ic->ic_sem);
+ down_write(&ic->ic_sem);
}
void iam_container_write_unlock(struct iam_container *ic)
{
- cfs_up_write(&ic->ic_sem);
+ up_write(&ic->ic_sem);
}
void iam_container_read_lock(struct iam_container *ic)
{
- cfs_down_read(&ic->ic_sem);
+ down_read(&ic->ic_sem);
}
void iam_container_read_unlock(struct iam_container *ic)
{
- cfs_up_read(&ic->ic_sem);
+ up_read(&ic->ic_sem);
}
/*
struct dynlock_handle **dl, enum dynlock_type lt)
{
int result;
- struct inode *dir;
- dir = iam_path_obj(path);
while ((result = __iam_path_lookup(path)) == 0) {
do_corr(schedule());
*dl = iam_lock_htree(path->ip_container, path->ip_frame->leaf,
static int iam_path_lookup(struct iam_path *path, int index)
{
struct iam_container *c;
- struct iam_descr *descr;
struct iam_leaf *leaf;
int result;
c = path->ip_container;
leaf = &path->ip_leaf;
- descr = iam_path_descr(path);
result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
assert_inv(iam_path_check(path));
do_corr(schedule());
int result;
struct iam_path *path;
struct iam_leaf *leaf;
- struct inode *obj;
do_corr(struct iam_ikey *ik_orig);
/* assert_corr(it->ii_flags&IAM_IT_MOVE); */
path = &it->ii_path;
leaf = &path->ip_leaf;
- obj = iam_path_obj(path);
assert_corr(iam_leaf_is_locked(leaf));
if (c->ic_idle_bh == NULL)
goto newblock;
- cfs_down(&c->ic_idle_sem);
- if (unlikely(c->ic_idle_failed || c->ic_idle_bh == NULL)) {
- cfs_up(&c->ic_idle_sem);
+ mutex_lock(&c->ic_idle_mutex);
+ if (unlikely(c->ic_idle_bh == NULL)) {
+ mutex_unlock(&c->ic_idle_mutex);
goto newblock;
}
if (*e != 0)
goto fail;
- cfs_up(&c->ic_idle_sem);
+ mutex_unlock(&c->ic_idle_mutex);
bh = ldiskfs_bread(NULL, inode, *b, 0, e);
if (bh == NULL)
return NULL;
}
c->ic_idle_bh = idle;
- cfs_up(&c->ic_idle_sem);
+ mutex_unlock(&c->ic_idle_mutex);
got:
/* get write access for the found buffer head */
brelse(bh);
bh = NULL;
ldiskfs_std_error(inode->i_sb, *e);
+ } else {
+ /* Clear the reused node as new node does. */
+ memset(bh->b_data, 0, inode->i_sb->s_blocksize);
+ set_buffer_uptodate(bh);
}
return bh;
newblock:
- bh = ldiskfs_append(h, inode, b, e);
+ bh = osd_ldiskfs_append(h, inode, b, e);
return bh;
fail:
- cfs_up(&c->ic_idle_sem);
+ mutex_unlock(&c->ic_idle_mutex);
ldiskfs_std_error(inode->i_sb, *e);
return NULL;
}
++ frame;
assert_inv(dx_node_check(path, frame));
bh_new[0] = NULL; /* buffer head is "consumed" */
- err = ldiskfs_journal_get_write_access(handle, bh2);
+ err = ldiskfs_journal_dirty_metadata(handle, bh2);
if (err)
goto journal_error;
do_corr(schedule());
struct inode *inode = c->ic_object;
struct iam_frame *frame = p->ip_frame;
struct iam_entry *entries;
+ struct iam_entry *pos;
struct dynlock_handle *lh;
int count;
int rc;
iam_lock_bh(frame->bh);
entries = frame->entries;
count = dx_get_count(entries);
+ /* NOT shrink the last entry in the index node, which can be reused
+ * directly by next new node. */
+ if (count == 2) {
+ iam_unlock_bh(frame->bh);
+ iam_unlock_htree(c, lh);
+ return 0;
+ }
+
+ pos = iam_find_position(p, frame);
+ /* There may be some new leaf nodes have been added or empty leaf nodes
+ * have been shrinked during my delete operation.
+ *
+ * If the empty leaf is not under current index node because the index
+ * node has been split, then just skip the empty leaf, which is rare. */
+ if (unlikely(frame->leaf != dx_get_block(p, pos))) {
+ iam_unlock_bh(frame->bh);
+ iam_unlock_htree(c, lh);
+ return 0;
+ }
+
+ frame->at = pos;
if (frame->at < iam_entry_shift(p, entries, count - 1)) {
struct iam_entry *n = iam_entry_shift(p, frame->at, 1);
frame->at_shifted = 1;
}
dx_set_count(entries, count - 1);
- rc = iam_txn_dirty(h, p, frame->bh);
iam_unlock_bh(frame->bh);
+ rc = iam_txn_dirty(h, p, frame->bh);
iam_unlock_htree(c, lh);
if (rc != 0)
return 0;
struct iam_idle_head *head;
int rc;
- rc = iam_txn_add(h, p, bh);
- if (rc != 0)
- return rc;
-
head = (struct iam_idle_head *)(bh->b_data);
head->iih_magic = cpu_to_le16(IAM_IDLE_HEADER_MAGIC);
head->iih_count = 0;
head->iih_next = *idle_blocks;
+ /* The bh already get_write_accessed. */
rc = iam_txn_dirty(h, p, bh);
if (rc != 0)
return rc;
int count;
int rc;
- cfs_down(&c->ic_idle_sem);
+ mutex_lock(&c->ic_idle_mutex);
if (unlikely(c->ic_idle_failed)) {
rc = -EFAULT;
goto unlock;
rc = iam_txn_dirty(h, p, c->ic_idle_bh);
unlock:
- cfs_up(&c->ic_idle_sem);
+ mutex_unlock(&c->ic_idle_mutex);
if (rc != 0)
CWARN("%.16s: idle blocks failed, will lose the blk %u\n",
LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk);