* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
c->ic_descr->id_root_gap +
sizeof(struct dx_countlimit));
- cfs_down(&c->ic_idle_sem);
+ down(&c->ic_idle_sem);
bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
if (bh != NULL && IS_ERR(bh))
result = PTR_ERR(bh);
else
c->ic_idle_bh = bh;
- cfs_up(&c->ic_idle_sem);
+ up(&c->ic_idle_sem);
}
return result;
memset(c, 0, sizeof *c);
c->ic_descr = descr;
c->ic_object = inode;
- cfs_init_rwsem(&c->ic_sem);
+ init_rwsem(&c->ic_sem);
dynlock_init(&c->ic_tree_lock);
- cfs_sema_init(&c->ic_idle_sem, 1);
+ sema_init(&c->ic_idle_sem, 1);
return 0;
}
EXPORT_SYMBOL(iam_container_init);
void iam_container_write_lock(struct iam_container *ic)
{
- cfs_down_write(&ic->ic_sem);
+ down_write(&ic->ic_sem);
}
void iam_container_write_unlock(struct iam_container *ic)
{
- cfs_up_write(&ic->ic_sem);
+ up_write(&ic->ic_sem);
}
void iam_container_read_lock(struct iam_container *ic)
{
- cfs_down_read(&ic->ic_sem);
+ down_read(&ic->ic_sem);
}
void iam_container_read_unlock(struct iam_container *ic)
{
- cfs_up_read(&ic->ic_sem);
+ up_read(&ic->ic_sem);
}
/*
if (c->ic_idle_bh == NULL)
goto newblock;
- cfs_down(&c->ic_idle_sem);
+ down(&c->ic_idle_sem);
if (unlikely(c->ic_idle_bh == NULL)) {
- cfs_up(&c->ic_idle_sem);
+ up(&c->ic_idle_sem);
goto newblock;
}
if (*e != 0)
goto fail;
- cfs_up(&c->ic_idle_sem);
+ up(&c->ic_idle_sem);
bh = ldiskfs_bread(NULL, inode, *b, 0, e);
if (bh == NULL)
return NULL;
}
c->ic_idle_bh = idle;
- cfs_up(&c->ic_idle_sem);
+ up(&c->ic_idle_sem);
got:
/* get write access for the found buffer head */
brelse(bh);
bh = NULL;
ldiskfs_std_error(inode->i_sb, *e);
+ } else {
+ /* Clear the reused node as new node does. */
+ memset(bh->b_data, 0, inode->i_sb->s_blocksize);
+ set_buffer_uptodate(bh);
}
return bh;
return bh;
fail:
- cfs_up(&c->ic_idle_sem);
+ up(&c->ic_idle_sem);
ldiskfs_std_error(inode->i_sb, *e);
return NULL;
}
++ frame;
assert_inv(dx_node_check(path, frame));
bh_new[0] = NULL; /* buffer head is "consumed" */
- err = ldiskfs_journal_get_write_access(handle, bh2);
+ err = ldiskfs_journal_dirty_metadata(handle, bh2);
if (err)
goto journal_error;
do_corr(schedule());
struct inode *inode = c->ic_object;
struct iam_frame *frame = p->ip_frame;
struct iam_entry *entries;
+ struct iam_entry *pos;
struct dynlock_handle *lh;
int count;
int rc;
return 0;
}
+ rc = iam_txn_add(h, p, frame->bh);
+ if (rc != 0) {
+ iam_unlock_htree(c, lh);
+ return 0;
+ }
+
+ iam_lock_bh(frame->bh);
entries = frame->entries;
count = dx_get_count(entries);
/* NOT shrink the last entry in the index node, which can be reused
* directly by next new node. */
if (count == 2) {
+ iam_unlock_bh(frame->bh);
iam_unlock_htree(c, lh);
return 0;
}
- rc = iam_txn_add(h, p, frame->bh);
- if (rc != 0) {
+ pos = iam_find_position(p, frame);
+ /* There may be some new leaf nodes have been added or empty leaf nodes
+ * have been shrinked during my delete operation.
+ *
+ * If the empty leaf is not under current index node because the index
+ * node has been split, then just skip the empty leaf, which is rare. */
+ if (unlikely(frame->leaf != dx_get_block(p, pos))) {
+ iam_unlock_bh(frame->bh);
iam_unlock_htree(c, lh);
return 0;
}
- iam_lock_bh(frame->bh);
+ frame->at = pos;
if (frame->at < iam_entry_shift(p, entries, count - 1)) {
struct iam_entry *n = iam_entry_shift(p, frame->at, 1);
struct iam_idle_head *head;
int rc;
- rc = iam_txn_add(h, p, bh);
- if (rc != 0)
- return rc;
-
head = (struct iam_idle_head *)(bh->b_data);
head->iih_magic = cpu_to_le16(IAM_IDLE_HEADER_MAGIC);
head->iih_count = 0;
head->iih_next = *idle_blocks;
+ /* The bh already get_write_accessed. */
rc = iam_txn_dirty(h, p, bh);
if (rc != 0)
return rc;
int count;
int rc;
- cfs_down(&c->ic_idle_sem);
+ down(&c->ic_idle_sem);
if (unlikely(c->ic_idle_failed)) {
rc = -EFAULT;
goto unlock;
rc = iam_txn_dirty(h, p, c->ic_idle_bh);
unlock:
- cfs_up(&c->ic_idle_sem);
+ up(&c->ic_idle_sem);
if (rc != 0)
CWARN("%.16s: idle blocks failed, will lose the blk %u\n",
LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk);