+
+static inline int iam_idle_blocks_limit(struct inode *inode)
+{
+ return (inode->i_sb->s_blocksize - sizeof(struct iam_idle_head)) >> 2;
+}
+
+/*
+ * If the leaf cannnot be recycled, we will lose one block for reusing.
+ * It is not a serious issue because it almost the same of non-recycle.
+ */
+static iam_ptr_t iam_index_shrink(handle_t *h, struct iam_path *p,
+ struct iam_leaf *l, struct buffer_head **bh)
+{
+ struct iam_container *c = p->ip_container;
+ struct inode *inode = c->ic_object;
+ struct iam_frame *frame = p->ip_frame;
+ struct iam_entry *entries;
+ struct iam_entry *pos;
+ struct dynlock_handle *lh;
+ int count;
+ int rc;
+
+ if (c->ic_idle_failed)
+ return 0;
+
+ if (unlikely(frame == NULL))
+ return 0;
+
+ if (!iam_leaf_empty(l))
+ return 0;
+
+ lh = iam_lock_htree(c, frame->curidx, DLT_WRITE);
+ if (lh == NULL) {
+ CWARN("%.16s: No memory to recycle idle blocks\n",
+ LDISKFS_SB(inode->i_sb)->s_es->s_volume_name);
+ return 0;
+ }
+
+ rc = iam_txn_add(h, p, frame->bh);
+ if (rc != 0) {
+ iam_unlock_htree(c, lh);
+ return 0;
+ }
+
+ iam_lock_bh(frame->bh);
+ entries = frame->entries;
+ count = dx_get_count(entries);
+ /* NOT shrink the last entry in the index node, which can be reused
+ * directly by next new node. */
+ if (count == 2) {
+ iam_unlock_bh(frame->bh);
+ iam_unlock_htree(c, lh);
+ return 0;
+ }
+
+ pos = iam_find_position(p, frame);
+ /* There may be some new leaf nodes have been added or empty leaf nodes
+ * have been shrinked during my delete operation.
+ *
+ * If the empty leaf is not under current index node because the index
+ * node has been split, then just skip the empty leaf, which is rare. */
+ if (unlikely(frame->leaf != dx_get_block(p, pos))) {
+ iam_unlock_bh(frame->bh);
+ iam_unlock_htree(c, lh);
+ return 0;
+ }
+
+ frame->at = pos;
+ if (frame->at < iam_entry_shift(p, entries, count - 1)) {
+ struct iam_entry *n = iam_entry_shift(p, frame->at, 1);
+
+ memmove(frame->at, n,
+ (char *)iam_entry_shift(p, entries, count) - (char *)n);
+ frame->at_shifted = 1;
+ }
+ dx_set_count(entries, count - 1);
+ iam_unlock_bh(frame->bh);
+ rc = iam_txn_dirty(h, p, frame->bh);
+ iam_unlock_htree(c, lh);
+ if (rc != 0)
+ return 0;
+
+ get_bh(l->il_bh);
+ *bh = l->il_bh;
+ return frame->leaf;
+}
+
+static int
+iam_install_idle_blocks(handle_t *h, struct iam_path *p, struct buffer_head *bh,
+ __u32 *idle_blocks, iam_ptr_t blk)
+{
+ struct iam_container *c = p->ip_container;
+ struct buffer_head *old = c->ic_idle_bh;
+ struct iam_idle_head *head;
+ int rc;
+
+ head = (struct iam_idle_head *)(bh->b_data);
+ head->iih_magic = cpu_to_le16(IAM_IDLE_HEADER_MAGIC);
+ head->iih_count = 0;
+ head->iih_next = *idle_blocks;
+ /* The bh already get_write_accessed. */
+ rc = iam_txn_dirty(h, p, bh);
+ if (rc != 0)
+ return rc;
+
+ rc = iam_txn_add(h, p, c->ic_root_bh);
+ if (rc != 0)
+ return rc;
+
+ iam_lock_bh(c->ic_root_bh);
+ *idle_blocks = cpu_to_le32(blk);
+ iam_unlock_bh(c->ic_root_bh);
+ rc = iam_txn_dirty(h, p, c->ic_root_bh);
+ if (rc == 0) {
+ /* NOT release old before new assigned. */
+ get_bh(bh);
+ c->ic_idle_bh = bh;
+ brelse(old);
+ } else {
+ iam_lock_bh(c->ic_root_bh);
+ *idle_blocks = head->iih_next;
+ iam_unlock_bh(c->ic_root_bh);
+ }
+ return rc;
+}
+
+/*
+ * If the leaf cannnot be recycled, we will lose one block for reusing.
+ * It is not a serious issue because it almost the same of non-recycle.
+ */
+static void iam_recycle_leaf(handle_t *h, struct iam_path *p,
+ struct buffer_head *bh, iam_ptr_t blk)
+{
+ struct iam_container *c = p->ip_container;
+ struct inode *inode = c->ic_object;
+ struct iam_idle_head *head;
+ __u32 *idle_blocks;
+ int count;
+ int rc;
+
+ mutex_lock(&c->ic_idle_mutex);
+ if (unlikely(c->ic_idle_failed)) {
+ rc = -EFAULT;
+ goto unlock;
+ }
+
+ idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
+ c->ic_descr->id_root_gap +
+ sizeof(struct dx_countlimit));
+ /* It is the first idle block. */
+ if (c->ic_idle_bh == NULL) {
+ rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
+ goto unlock;
+ }
+
+ head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
+ count = le16_to_cpu(head->iih_count);
+ /* Current ic_idle_bh is full, to be replaced by the leaf. */
+ if (count == iam_idle_blocks_limit(inode)) {
+ rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
+ goto unlock;
+ }
+
+ /* Just add to ic_idle_bh. */
+ rc = iam_txn_add(h, p, c->ic_idle_bh);
+ if (rc != 0)
+ goto unlock;
+
+ head->iih_blks[count] = cpu_to_le32(blk);
+ head->iih_count = cpu_to_le16(count + 1);
+ rc = iam_txn_dirty(h, p, c->ic_idle_bh);
+
+unlock:
+ mutex_unlock(&c->ic_idle_mutex);
+ if (rc != 0)
+ CWARN("%.16s: idle blocks failed, will lose the blk %u\n",
+ LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk);
+}