+/* Borrow @ext4_chunk_trans_blocks */
+static int osd_chunk_trans_blocks(struct inode *inode, int nrblocks)
+{
+ ldiskfs_group_t groups;
+ int gdpblocks;
+ int idxblocks;
+ int depth;
+ int ret;
+
+ depth = ext_depth(inode);
+ idxblocks = depth * 2;
+
+ /*
+ * Now let's see how many group bitmaps and group descriptors need
+ * to account.
+ */
+ groups = idxblocks + 1;
+ gdpblocks = groups;
+ if (groups > LDISKFS_SB(inode->i_sb)->s_groups_count)
+ groups = LDISKFS_SB(inode->i_sb)->s_groups_count;
+ if (gdpblocks > LDISKFS_SB(inode->i_sb)->s_gdb_count)
+ gdpblocks = LDISKFS_SB(inode->i_sb)->s_gdb_count;
+
+ /* bitmaps and block group descriptor blocks */
+ ret = idxblocks + groups + gdpblocks;
+
+ /* Blocks for super block, inode, quota and xattr blocks */
+ ret += LDISKFS_META_TRANS_BLOCKS(inode->i_sb);
+
+ return ret;
+}
+
+static int osd_extend_trans(handle_t *handle, int needed)
+{
+ if (ldiskfs_handle_has_enough_credits(handle, needed))
+ return 0;
+
+ return ldiskfs_journal_extend(handle,
+ needed - handle->h_buffer_credits);
+}
+
+static int osd_extend_restart_trans(handle_t *handle, int needed)
+{
+
+ int rc = osd_extend_trans(handle, needed);
+
+ if (rc <= 0)
+ return rc;
+
+ return ldiskfs_journal_restart(handle, needed);
+}
+
+static int osd_ldiskfs_map_write(struct inode *inode, struct osd_iobuf *iobuf,
+ struct osd_device *osd, sector_t start_blocks,
+ sector_t count, loff_t *disk_size,
+ __u64 user_size)
+{
+ /* if file has grown, take user_size into account */
+ if (user_size && *disk_size > user_size)
+ *disk_size = user_size;
+
+ spin_lock(&inode->i_lock);
+ if (*disk_size > i_size_read(inode)) {
+ i_size_write(inode, *disk_size);
+ LDISKFS_I(inode)->i_disksize = *disk_size;
+ spin_unlock(&inode->i_lock);
+ osd_dirty_inode(inode, I_DIRTY_DATASYNC);
+ } else {
+ spin_unlock(&inode->i_lock);
+ }
+
+ /*
+ * We don't do stats here as in read path because
+ * write is async: we'll do this in osd_put_bufs()
+ */
+ return osd_do_bio(osd, inode, iobuf, start_blocks, count);
+}