brelse(bh);
bh = NULL;
ldiskfs_std_error(inode->i_sb, *e);
+ } else {
+ /* Clear the reused node as new node does. */
+ memset(bh->b_data, 0, inode->i_sb->s_blocksize);
+ set_buffer_uptodate(bh);
}
return bh;
++ frame;
assert_inv(dx_node_check(path, frame));
bh_new[0] = NULL; /* buffer head is "consumed" */
- err = ldiskfs_journal_get_write_access(handle, bh2);
+ err = ldiskfs_journal_dirty_metadata(handle, bh2);
if (err)
goto journal_error;
do_corr(schedule());
struct inode *inode = c->ic_object;
struct iam_frame *frame = p->ip_frame;
struct iam_entry *entries;
+ struct iam_entry *pos;
struct dynlock_handle *lh;
int count;
int rc;
return 0;
}
+ rc = iam_txn_add(h, p, frame->bh);
+ if (rc != 0) {
+ iam_unlock_htree(c, lh);
+ return 0;
+ }
+
+ iam_lock_bh(frame->bh);
entries = frame->entries;
count = dx_get_count(entries);
/* NOT shrink the last entry in the index node, which can be reused
* directly by next new node. */
if (count == 2) {
+ iam_unlock_bh(frame->bh);
iam_unlock_htree(c, lh);
return 0;
}
- rc = iam_txn_add(h, p, frame->bh);
- if (rc != 0) {
+ pos = iam_find_position(p, frame);
+ /* There may be some new leaf nodes have been added or empty leaf nodes
+ * have been shrinked during my delete operation.
+ *
+ * If the empty leaf is not under current index node because the index
+ * node has been split, then just skip the empty leaf, which is rare. */
+ if (unlikely(frame->leaf != dx_get_block(p, pos))) {
+ iam_unlock_bh(frame->bh);
iam_unlock_htree(c, lh);
return 0;
}
- iam_lock_bh(frame->bh);
+ frame->at = pos;
if (frame->at < iam_entry_shift(p, entries, count - 1)) {
struct iam_entry *n = iam_entry_shift(p, frame->at, 1);
struct iam_idle_head *head;
int rc;
- rc = iam_txn_add(h, p, bh);
- if (rc != 0)
- return rc;
-
head = (struct iam_idle_head *)(bh->b_data);
head->iih_magic = cpu_to_le16(IAM_IDLE_HEADER_MAGIC);
head->iih_count = 0;
head->iih_next = *idle_blocks;
+ /* The bh already get_write_accessed. */
rc = iam_txn_dirty(h, p, bh);
if (rc != 0)
return rc;