1 Index: linux-2.4.29/fs/ext3/extents.c
2 ===================================================================
3 --- linux-2.4.29.orig/fs/ext3/extents.c 2005-05-03 16:52:08.723069952 +0300
4 +++ linux-2.4.29/fs/ext3/extents.c 2005-05-03 16:52:08.802057944 +0300
7 + * Copyright(c) 2003, 2004, 2005, Cluster File Systems, Inc, info@clusterfs.com
8 + * Written by Alex Tomas <alex@clusterfs.com>
10 + * This program is free software; you can redistribute it and/or modify
11 + * it under the terms of the GNU General Public License version 2 as
12 + * published by the Free Software Foundation.
14 + * This program is distributed in the hope that it will be useful,
15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 + * GNU General Public License for more details.
19 + * You should have received a copy of the GNU General Public Licens
20 + * along with this program; if not, write to the Free Software
21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
25 + * Extents support for EXT3
28 + * - ext3_ext_walk_space() sould not use ext3_ext_find_extent()
29 + * - ext3_ext_calc_credits() could take 'mergable' into account
30 + * - ext3*_error() should be used in some situations
31 + * - find_goal() [to be tested and improved]
32 + * - smart tree reduction
33 + * - arch-independence
34 + * common on-disk format for big/little-endian arch
37 +#include <linux/module.h>
38 +#include <linux/fs.h>
39 +#include <linux/time.h>
40 +#include <linux/ext3_jbd.h>
41 +#include <linux/jbd.h>
42 +#include <linux/locks.h>
43 +#include <linux/smp_lock.h>
44 +#include <linux/highuid.h>
45 +#include <linux/pagemap.h>
46 +#include <linux/quotaops.h>
47 +#include <linux/string.h>
48 +#include <linux/slab.h>
49 +#include <linux/ext3_extents.h>
50 +#include <asm/uaccess.h>
53 +static inline int ext3_ext_check_header(struct ext3_extent_header *eh)
55 + if (eh->eh_magic != EXT3_EXT_MAGIC) {
56 + printk(KERN_ERR "EXT3-fs: invalid magic = 0x%x\n",
57 + (unsigned)eh->eh_magic);
60 + if (eh->eh_max == 0) {
61 + printk(KERN_ERR "EXT3-fs: invalid eh_max = %u\n",
62 + (unsigned)eh->eh_max);
65 + if (eh->eh_entries > eh->eh_max) {
66 + printk(KERN_ERR "EXT3-fs: invalid eh_entries = %u\n",
67 + (unsigned)eh->eh_entries);
73 +static handle_t *ext3_ext_journal_restart(handle_t *handle, int needed)
77 + if (handle->h_buffer_credits > needed)
79 + if (!ext3_journal_extend(handle, needed))
81 + err = ext3_journal_restart(handle, needed);
87 +ext3_ext_get_access_for_root(handle_t *h, struct ext3_extents_tree *tree)
89 + if (tree->ops->get_write_access)
90 + return tree->ops->get_write_access(h,tree->buffer);
96 +ext3_ext_mark_root_dirty(handle_t *h, struct ext3_extents_tree *tree)
98 + if (tree->ops->mark_buffer_dirty)
99 + return tree->ops->mark_buffer_dirty(h,tree->buffer);
109 +static int ext3_ext_get_access(handle_t *handle,
110 + struct ext3_extents_tree *tree,
111 + struct ext3_ext_path *path)
116 + /* path points to block */
117 + err = ext3_journal_get_write_access(handle, path->p_bh);
119 + /* path points to leaf/index in inode body */
120 + err = ext3_ext_get_access_for_root(handle, tree);
131 +static int ext3_ext_dirty(handle_t *handle, struct ext3_extents_tree *tree,
132 + struct ext3_ext_path *path)
136 + /* path points to block */
137 + err =ext3_journal_dirty_metadata(handle, path->p_bh);
139 + /* path points to leaf/index in inode body */
140 + err = ext3_ext_mark_root_dirty(handle, tree);
146 +ext3_ext_new_block(handle_t *handle, struct ext3_extents_tree *tree,
147 + struct ext3_ext_path *path, struct ext3_extent *ex,
150 + int goal, depth, newblock;
151 + struct inode *inode;
154 + if (tree->ops->new_block)
155 + return tree->ops->new_block(handle, tree, path, ex, err);
157 + inode = tree->inode;
158 + depth = EXT_DEPTH(tree);
159 + if (path && depth > 0) {
160 + goal = path[depth-1].p_block;
162 + struct ext3_inode_info *ei = EXT3_I(inode);
163 + unsigned long bg_start;
164 + unsigned long colour;
166 + bg_start = (ei->i_block_group *
167 + EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
168 + le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
169 + colour = (current->pid % 16) *
170 + (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
171 + goal = bg_start + colour;
175 + newblock = ext3_new_block(handle, inode, goal, 0, 0, err);
180 +static inline void ext3_ext_tree_changed(struct ext3_extents_tree *tree)
182 + struct ext3_extent_header *neh = EXT_ROOT_HDR(tree);
183 + neh->eh_generation = ((EXT_FLAGS(neh) & ~EXT_FLAGS_CLR_UNKNOWN) << 24) |
184 + (EXT_HDR_GEN(neh) + 1);
187 +static inline int ext3_ext_space_block(struct ext3_extents_tree *tree)
191 + size = (tree->inode->i_sb->s_blocksize -
192 + sizeof(struct ext3_extent_header)) /
193 + sizeof(struct ext3_extent);
194 +#ifdef AGRESSIVE_TEST
200 +static inline int ext3_ext_space_block_idx(struct ext3_extents_tree *tree)
204 + size = (tree->inode->i_sb->s_blocksize -
205 + sizeof(struct ext3_extent_header)) /
206 + sizeof(struct ext3_extent_idx);
207 +#ifdef AGRESSIVE_TEST
213 +static inline int ext3_ext_space_root(struct ext3_extents_tree *tree)
217 + size = (tree->buffer_len - sizeof(struct ext3_extent_header)) /
218 + sizeof(struct ext3_extent);
219 +#ifdef AGRESSIVE_TEST
225 +static inline int ext3_ext_space_root_idx(struct ext3_extents_tree *tree)
229 + size = (tree->buffer_len - sizeof(struct ext3_extent_header)) /
230 + sizeof(struct ext3_extent_idx);
231 +#ifdef AGRESSIVE_TEST
237 +static void ext3_ext_show_path(struct ext3_extents_tree *tree,
238 + struct ext3_ext_path *path)
241 + int k, l = path->p_depth;
243 + ext_debug(tree, "path:");
244 + for (k = 0; k <= l; k++, path++) {
246 + ext_debug(tree, " %d->%d", path->p_idx->ei_block,
247 + path->p_idx->ei_leaf);
248 + } else if (path->p_ext) {
249 + ext_debug(tree, " %d:%d:%d",
250 + path->p_ext->ee_block,
251 + path->p_ext->ee_len,
252 + path->p_ext->ee_start);
254 + ext_debug(tree, " []");
256 + ext_debug(tree, "\n");
260 +static void ext3_ext_show_leaf(struct ext3_extents_tree *tree,
261 + struct ext3_ext_path *path)
264 + int depth = EXT_DEPTH(tree);
265 + struct ext3_extent_header *eh;
266 + struct ext3_extent *ex;
272 + eh = path[depth].p_hdr;
273 + ex = EXT_FIRST_EXTENT(eh);
275 + for (i = 0; i < eh->eh_entries; i++, ex++) {
276 + ext_debug(tree, "%d:%d:%d ",
277 + ex->ee_block, ex->ee_len, ex->ee_start);
279 + ext_debug(tree, "\n");
283 +static void ext3_ext_drop_refs(struct ext3_ext_path *path)
285 + int depth = path->p_depth;
288 + for (i = 0; i <= depth; i++, path++) {
290 + brelse(path->p_bh);
297 + * binary search for closest index by given block
300 +ext3_ext_binsearch_idx(struct ext3_extents_tree *tree,
301 + struct ext3_ext_path *path, int block)
303 + struct ext3_extent_header *eh = path->p_hdr;
304 + struct ext3_extent_idx *ix;
307 + EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
308 + EXT_ASSERT(eh->eh_entries <= eh->eh_max);
309 + EXT_ASSERT(eh->eh_entries > 0);
311 + ext_debug(tree, "binsearch for %d(idx): ", block);
313 + path->p_idx = ix = EXT_FIRST_INDEX(eh);
315 + r = k = eh->eh_entries;
318 + if (block < ix[l + k].ei_block)
322 + ext_debug(tree, "%d:%d:%d ", k, l, r);
327 + ext_debug(tree," -> %d->%d ",path->p_idx->ei_block,path->p_idx->ei_leaf);
330 + if (block < ix->ei_block)
332 + path->p_idx = ix++;
334 + ext_debug(tree, " -> %d->%d\n", path->p_idx->ei_block,
335 + path->p_idx->ei_leaf);
337 +#ifdef CHECK_BINSEARCH
339 + struct ext3_extent_idx *chix;
341 + chix = ix = EXT_FIRST_INDEX(eh);
342 + for (k = 0; k < eh->eh_entries; k++, ix++) {
343 + if (k != 0 && ix->ei_block <= ix[-1].ei_block) {
344 + printk("k=%d, ix=0x%p, first=0x%p\n", k,
345 + ix, EXT_FIRST_INDEX(eh));
346 + printk("%u <= %u\n",
347 + ix->ei_block,ix[-1].ei_block);
349 + EXT_ASSERT(k == 0 || ix->ei_block > ix[-1].ei_block);
350 + if (block < ix->ei_block)
354 + EXT_ASSERT(chix == path->p_idx);
360 + * binary search for closest extent by given block
363 +ext3_ext_binsearch(struct ext3_extents_tree *tree,
364 + struct ext3_ext_path *path, int block)
366 + struct ext3_extent_header *eh = path->p_hdr;
367 + struct ext3_extent *ex;
370 + EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
371 + EXT_ASSERT(eh->eh_entries <= eh->eh_max);
373 + if (eh->eh_entries == 0) {
375 + * this leaf is empty yet:
376 + * we get such a leaf in split/add case
381 + ext_debug(tree, "binsearch for %d: ", block);
383 + path->p_ext = ex = EXT_FIRST_EXTENT(eh);
385 + r = k = eh->eh_entries;
388 + if (block < ex[l + k].ee_block)
392 + ext_debug(tree, "%d:%d:%d ", k, l, r);
397 + ext_debug(tree, " -> %d:%d:%d ", path->p_ext->ee_block,
398 + path->p_ext->ee_start, path->p_ext->ee_len);
401 + if (block < ex->ee_block)
403 + path->p_ext = ex++;
405 + ext_debug(tree, " -> %d:%d:%d\n", path->p_ext->ee_block,
406 + path->p_ext->ee_start, path->p_ext->ee_len);
408 +#ifdef CHECK_BINSEARCH
410 + struct ext3_extent *chex;
412 + chex = ex = EXT_FIRST_EXTENT(eh);
413 + for (k = 0; k < eh->eh_entries; k++, ex++) {
414 + EXT_ASSERT(k == 0 || ex->ee_block > ex[-1].ee_block);
415 + if (block < ex->ee_block)
419 + EXT_ASSERT(chex == path->p_ext);
424 +int ext3_extent_tree_init(handle_t *handle, struct ext3_extents_tree *tree)
426 + struct ext3_extent_header *eh;
428 + BUG_ON(tree->buffer_len == 0);
429 + ext3_ext_get_access_for_root(handle, tree);
430 + eh = EXT_ROOT_HDR(tree);
432 + eh->eh_entries = 0;
433 + eh->eh_magic = EXT3_EXT_MAGIC;
434 + eh->eh_max = ext3_ext_space_root(tree);
435 + ext3_ext_mark_root_dirty(handle, tree);
436 + ext3_ext_invalidate_cache(tree);
440 +struct ext3_ext_path *
441 +ext3_ext_find_extent(struct ext3_extents_tree *tree, int block,
442 + struct ext3_ext_path *path)
444 + struct ext3_extent_header *eh;
445 + struct buffer_head *bh;
446 + int depth, i, ppos = 0;
449 + EXT_ASSERT(tree->inode);
450 + EXT_ASSERT(tree->root);
452 + eh = EXT_ROOT_HDR(tree);
454 + if (ext3_ext_check_header(eh))
457 + i = depth = EXT_DEPTH(tree);
458 + EXT_ASSERT(eh->eh_max);
459 + EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
461 + /* account possible depth increase */
463 + path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 2),
466 + return ERR_PTR(-ENOMEM);
468 + memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
469 + path[0].p_hdr = eh;
471 + /* walk through the tree */
473 + ext_debug(tree, "depth %d: num %d, max %d\n",
474 + ppos, eh->eh_entries, eh->eh_max);
475 + ext3_ext_binsearch_idx(tree, path + ppos, block);
476 + path[ppos].p_block = path[ppos].p_idx->ei_leaf;
477 + path[ppos].p_depth = i;
478 + path[ppos].p_ext = NULL;
480 + bh = sb_bread(tree->inode->i_sb, path[ppos].p_block);
483 + eh = EXT_BLOCK_HDR(bh);
485 + EXT_ASSERT(ppos <= depth);
486 + path[ppos].p_bh = bh;
487 + path[ppos].p_hdr = eh;
490 + if (ext3_ext_check_header(eh))
494 + path[ppos].p_depth = i;
495 + path[ppos].p_hdr = eh;
496 + path[ppos].p_ext = NULL;
497 + path[ppos].p_idx = NULL;
499 + if (ext3_ext_check_header(eh))
503 + ext3_ext_binsearch(tree, path + ppos, block);
505 + ext3_ext_show_path(tree, path);
510 + printk(KERN_ERR "EXT3-fs: header is corrupted!\n");
511 + ext3_ext_drop_refs(path);
513 + return ERR_PTR(-EIO);
517 + * insert new index [logical;ptr] into the block at cupr
518 + * it check where to insert: before curp or after curp
520 +static int ext3_ext_insert_index(handle_t *handle,
521 + struct ext3_extents_tree *tree,
522 + struct ext3_ext_path *curp,
523 + int logical, int ptr)
525 + struct ext3_extent_idx *ix;
528 + if ((err = ext3_ext_get_access(handle, tree, curp)))
531 + EXT_ASSERT(logical != curp->p_idx->ei_block);
532 + len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
533 + if (logical > curp->p_idx->ei_block) {
535 + if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
536 + len = (len - 1) * sizeof(struct ext3_extent_idx);
537 + len = len < 0 ? 0 : len;
538 + ext_debug(tree, "insert new index %d after: %d. "
539 + "move %d from 0x%p to 0x%p\n",
541 + (curp->p_idx + 1), (curp->p_idx + 2));
542 + memmove(curp->p_idx + 2, curp->p_idx + 1, len);
544 + ix = curp->p_idx + 1;
546 + /* insert before */
547 + len = len * sizeof(struct ext3_extent_idx);
548 + len = len < 0 ? 0 : len;
549 + ext_debug(tree, "insert new index %d before: %d. "
550 + "move %d from 0x%p to 0x%p\n",
552 + curp->p_idx, (curp->p_idx + 1));
553 + memmove(curp->p_idx + 1, curp->p_idx, len);
557 + ix->ei_block = logical;
559 + curp->p_hdr->eh_entries++;
561 + EXT_ASSERT(curp->p_hdr->eh_entries <= curp->p_hdr->eh_max);
562 + EXT_ASSERT(ix <= EXT_LAST_INDEX(curp->p_hdr));
564 + err = ext3_ext_dirty(handle, tree, curp);
565 + ext3_std_error(tree->inode->i_sb, err);
571 + * routine inserts new subtree into the path, using free index entry
573 + * - allocates all needed blocks (new leaf and all intermediate index blocks)
574 + * - makes decision where to split
575 + * - moves remaining extens and index entries (right to the split point)
576 + * into the newly allocated blocks
577 + * - initialize subtree
579 +static int ext3_ext_split(handle_t *handle, struct ext3_extents_tree *tree,
580 + struct ext3_ext_path *path,
581 + struct ext3_extent *newext, int at)
583 + struct buffer_head *bh = NULL;
584 + int depth = EXT_DEPTH(tree);
585 + struct ext3_extent_header *neh;
586 + struct ext3_extent_idx *fidx;
587 + struct ext3_extent *ex;
588 + int i = at, k, m, a;
589 + unsigned long newblock, oldblock, border;
590 + int *ablocks = NULL; /* array of allocated blocks */
593 + /* make decision: where to split? */
594 + /* FIXME: now desicion is simplest: at current extent */
596 + /* if current leaf will be splitted, then we should use
597 + * border from split point */
598 + EXT_ASSERT(path[depth].p_ext <= EXT_MAX_EXTENT(path[depth].p_hdr));
599 + if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
600 + border = path[depth].p_ext[1].ee_block;
601 + ext_debug(tree, "leaf will be splitted."
602 + " next leaf starts at %d\n",
605 + border = newext->ee_block;
606 + ext_debug(tree, "leaf will be added."
607 + " next leaf starts at %d\n",
612 + * if error occurs, then we break processing
613 + * and turn filesystem read-only. so, index won't
614 + * be inserted and tree will be in consistent
615 + * state. next mount will repair buffers too
619 + * get array to track all allocated blocks
620 + * we need this to handle errors and free blocks
623 + ablocks = kmalloc(sizeof(unsigned long) * depth, GFP_NOFS);
626 + memset(ablocks, 0, sizeof(unsigned long) * depth);
628 + /* allocate all needed blocks */
629 + ext_debug(tree, "allocate %d blocks for indexes/leaf\n", depth - at);
630 + for (a = 0; a < depth - at; a++) {
631 + newblock = ext3_ext_new_block(handle, tree, path, newext, &err);
634 + ablocks[a] = newblock;
637 + /* initialize new leaf */
638 + newblock = ablocks[--a];
639 + EXT_ASSERT(newblock);
640 + bh = sb_getblk(tree->inode->i_sb, newblock);
647 + if ((err = ext3_journal_get_create_access(handle, bh)))
650 + neh = EXT_BLOCK_HDR(bh);
651 + neh->eh_entries = 0;
652 + neh->eh_max = ext3_ext_space_block(tree);
653 + neh->eh_magic = EXT3_EXT_MAGIC;
655 + ex = EXT_FIRST_EXTENT(neh);
657 + /* move remain of path[depth] to the new leaf */
658 + EXT_ASSERT(path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max);
659 + /* start copy from next extent */
660 + /* TODO: we could do it by single memmove */
662 + path[depth].p_ext++;
663 + while (path[depth].p_ext <=
664 + EXT_MAX_EXTENT(path[depth].p_hdr)) {
665 + ext_debug(tree, "move %d:%d:%d in new leaf %lu\n",
666 + path[depth].p_ext->ee_block,
667 + path[depth].p_ext->ee_start,
668 + path[depth].p_ext->ee_len,
670 + memmove(ex++, path[depth].p_ext++, sizeof(struct ext3_extent));
674 + mark_buffer_uptodate(bh, 1);
677 + if ((err = ext3_journal_dirty_metadata(handle, bh)))
682 + /* correct old leaf */
684 + if ((err = ext3_ext_get_access(handle, tree, path + depth)))
686 + path[depth].p_hdr->eh_entries -= m;
687 + if ((err = ext3_ext_dirty(handle, tree, path + depth)))
692 + /* create intermediate indexes */
693 + k = depth - at - 1;
694 + EXT_ASSERT(k >= 0);
696 + ext_debug(tree, "create %d intermediate indices\n", k);
697 + /* insert new index into current index block */
698 + /* current depth stored in i var */
701 + oldblock = newblock;
702 + newblock = ablocks[--a];
703 + bh = sb_getblk(tree->inode->i_sb, newblock);
710 + if ((err = ext3_journal_get_create_access(handle, bh)))
713 + neh = EXT_BLOCK_HDR(bh);
714 + neh->eh_entries = 1;
715 + neh->eh_magic = EXT3_EXT_MAGIC;
716 + neh->eh_max = ext3_ext_space_block_idx(tree);
717 + neh->eh_depth = depth - i;
718 + fidx = EXT_FIRST_INDEX(neh);
719 + fidx->ei_block = border;
720 + fidx->ei_leaf = oldblock;
722 + ext_debug(tree, "int.index at %d (block %lu): %lu -> %lu\n",
723 + i, newblock, border, oldblock);
728 + ext_debug(tree, "cur 0x%p, last 0x%p\n", path[i].p_idx,
729 + EXT_MAX_INDEX(path[i].p_hdr));
730 + EXT_ASSERT(EXT_MAX_INDEX(path[i].p_hdr) ==
731 + EXT_LAST_INDEX(path[i].p_hdr));
732 + while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
733 + ext_debug(tree, "%d: move %d:%d in new index %lu\n",
734 + i, path[i].p_idx->ei_block,
735 + path[i].p_idx->ei_leaf, newblock);
736 + memmove(++fidx, path[i].p_idx++,
737 + sizeof(struct ext3_extent_idx));
739 + EXT_ASSERT(neh->eh_entries <= neh->eh_max);
742 + mark_buffer_uptodate(bh, 1);
745 + if ((err = ext3_journal_dirty_metadata(handle, bh)))
750 + /* correct old index */
752 + err = ext3_ext_get_access(handle, tree, path + i);
755 + path[i].p_hdr->eh_entries -= m;
756 + err = ext3_ext_dirty(handle, tree, path + i);
764 + /* insert new index */
766 + err = ext3_ext_insert_index(handle, tree, path + at,
771 + if (buffer_locked(bh))
777 + /* free all allocated blocks in error case */
778 + for (i = 0; i < depth; i++) {
781 + ext3_free_blocks(handle, tree->inode, ablocks[i], 1);
790 + * routine implements tree growing procedure:
791 + * - allocates new block
792 + * - moves top-level data (index block or leaf) into the new block
793 + * - initialize new top-level, creating index that points to the
794 + * just created block
796 +static int ext3_ext_grow_indepth(handle_t *handle,
797 + struct ext3_extents_tree *tree,
798 + struct ext3_ext_path *path,
799 + struct ext3_extent *newext)
801 + struct ext3_ext_path *curp = path;
802 + struct ext3_extent_header *neh;
803 + struct ext3_extent_idx *fidx;
804 + struct buffer_head *bh;
805 + unsigned long newblock;
808 + newblock = ext3_ext_new_block(handle, tree, path, newext, &err);
812 + bh = sb_getblk(tree->inode->i_sb, newblock);
815 + ext3_std_error(tree->inode->i_sb, err);
820 + if ((err = ext3_journal_get_create_access(handle, bh))) {
825 + /* move top-level index/leaf into new block */
826 + memmove(bh->b_data, curp->p_hdr, tree->buffer_len);
828 + /* set size of new block */
829 + neh = EXT_BLOCK_HDR(bh);
830 + /* old root could have indexes or leaves
831 + * so calculate eh_max right way */
832 + if (EXT_DEPTH(tree))
833 + neh->eh_max = ext3_ext_space_block_idx(tree);
835 + neh->eh_max = ext3_ext_space_block(tree);
836 + neh->eh_magic = EXT3_EXT_MAGIC;
837 + mark_buffer_uptodate(bh, 1);
840 + if ((err = ext3_journal_dirty_metadata(handle, bh)))
843 + /* create index in new top-level index: num,max,pointer */
844 + if ((err = ext3_ext_get_access(handle, tree, curp)))
847 + curp->p_hdr->eh_magic = EXT3_EXT_MAGIC;
848 + curp->p_hdr->eh_max = ext3_ext_space_root_idx(tree);
849 + curp->p_hdr->eh_entries = 1;
850 + curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
851 + /* FIXME: it works, but actually path[0] can be index */
852 + curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
853 + curp->p_idx->ei_leaf = newblock;
855 + neh = EXT_ROOT_HDR(tree);
856 + fidx = EXT_FIRST_INDEX(neh);
857 + ext_debug(tree, "new root: num %d(%d), lblock %d, ptr %d\n",
858 + neh->eh_entries, neh->eh_max, fidx->ei_block, fidx->ei_leaf);
860 + neh->eh_depth = path->p_depth + 1;
861 + err = ext3_ext_dirty(handle, tree, curp);
869 + * routine finds empty index and adds new leaf. if no free index found
870 + * then it requests in-depth growing
872 +static int ext3_ext_create_new_leaf(handle_t *handle,
873 + struct ext3_extents_tree *tree,
874 + struct ext3_ext_path *path,
875 + struct ext3_extent *newext)
877 + struct ext3_ext_path *curp;
878 + int depth, i, err = 0;
881 + i = depth = EXT_DEPTH(tree);
883 + /* walk up to the tree and look for free index entry */
884 + curp = path + depth;
885 + while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
890 + /* we use already allocated block for index block
891 + * so, subsequent data blocks should be contigoues */
892 + if (EXT_HAS_FREE_INDEX(curp)) {
893 + /* if we found index with free entry, then use that
894 + * entry: create all needed subtree and add new leaf */
895 + err = ext3_ext_split(handle, tree, path, newext, i);
898 + ext3_ext_drop_refs(path);
899 + path = ext3_ext_find_extent(tree, newext->ee_block, path);
901 + err = PTR_ERR(path);
903 + /* tree is full, time to grow in depth */
904 + err = ext3_ext_grow_indepth(handle, tree, path, newext);
907 + ext3_ext_drop_refs(path);
908 + path = ext3_ext_find_extent(tree, newext->ee_block, path);
910 + err = PTR_ERR(path);
913 + * only first (depth 0 -> 1) produces free space
914 + * in all other cases we have to split growed tree
916 + depth = EXT_DEPTH(tree);
917 + if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
918 + /* now we need split */
930 + * returns allocated block in subsequent extent or EXT_MAX_BLOCK
931 + * NOTE: it consider block number from index entry as
932 + * allocated block. thus, index entries have to be consistent
935 +static unsigned long
936 +ext3_ext_next_allocated_block(struct ext3_ext_path *path)
940 + EXT_ASSERT(path != NULL);
941 + depth = path->p_depth;
943 + if (depth == 0 && path->p_ext == NULL)
944 + return EXT_MAX_BLOCK;
946 + /* FIXME: what if index isn't full ?! */
947 + while (depth >= 0) {
948 + if (depth == path->p_depth) {
950 + if (path[depth].p_ext !=
951 + EXT_LAST_EXTENT(path[depth].p_hdr))
952 + return path[depth].p_ext[1].ee_block;
955 + if (path[depth].p_idx !=
956 + EXT_LAST_INDEX(path[depth].p_hdr))
957 + return path[depth].p_idx[1].ei_block;
962 + return EXT_MAX_BLOCK;
966 + * returns first allocated block from next leaf or EXT_MAX_BLOCK
968 +static unsigned ext3_ext_next_leaf_block(struct ext3_extents_tree *tree,
969 + struct ext3_ext_path *path)
973 + EXT_ASSERT(path != NULL);
974 + depth = path->p_depth;
976 + /* zero-tree has no leaf blocks at all */
978 + return EXT_MAX_BLOCK;
980 + /* go to index block */
983 + while (depth >= 0) {
984 + if (path[depth].p_idx !=
985 + EXT_LAST_INDEX(path[depth].p_hdr))
986 + return path[depth].p_idx[1].ei_block;
990 + return EXT_MAX_BLOCK;
994 + * if leaf gets modified and modified extent is first in the leaf
995 + * then we have to correct all indexes above
996 + * TODO: do we need to correct tree in all cases?
998 +int ext3_ext_correct_indexes(handle_t *handle, struct ext3_extents_tree *tree,
999 + struct ext3_ext_path *path)
1001 + struct ext3_extent_header *eh;
1002 + int depth = EXT_DEPTH(tree);
1003 + struct ext3_extent *ex;
1004 + unsigned long border;
1007 + eh = path[depth].p_hdr;
1008 + ex = path[depth].p_ext;
1013 + /* there is no tree at all */
1017 + if (ex != EXT_FIRST_EXTENT(eh)) {
1018 + /* we correct tree if first leaf got modified only */
1023 + * TODO: we need correction if border is smaller then current one
1026 + border = path[depth].p_ext->ee_block;
1027 + if ((err = ext3_ext_get_access(handle, tree, path + k)))
1029 + path[k].p_idx->ei_block = border;
1030 + if ((err = ext3_ext_dirty(handle, tree, path + k)))
1034 + /* change all left-side indexes */
1035 + if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1037 + if ((err = ext3_ext_get_access(handle, tree, path + k)))
1039 + path[k].p_idx->ei_block = border;
1040 + if ((err = ext3_ext_dirty(handle, tree, path + k)))
1048 +ext3_can_extents_be_merged(struct ext3_extents_tree *tree,
1049 + struct ext3_extent *ex1,
1050 + struct ext3_extent *ex2)
1052 + if (ex1->ee_block + ex1->ee_len != ex2->ee_block)
1055 +#ifdef AGRESSIVE_TEST
1056 + if (ex1->ee_len >= 4)
1060 + if (!tree->ops->mergable)
1063 + return tree->ops->mergable(ex1, ex2);
1067 + * this routine tries to merge requsted extent into the existing
1068 + * extent or inserts requested extent as new one into the tree,
1069 + * creating new leaf in no-space case
1071 +int ext3_ext_insert_extent(handle_t *handle, struct ext3_extents_tree *tree,
1072 + struct ext3_ext_path *path,
1073 + struct ext3_extent *newext)
1075 + struct ext3_extent_header * eh;
1076 + struct ext3_extent *ex, *fex;
1077 + struct ext3_extent *nearex; /* nearest extent */
1078 + struct ext3_ext_path *npath = NULL;
1079 + int depth, len, err, next;
1081 + EXT_ASSERT(newext->ee_len > 0);
1082 + depth = EXT_DEPTH(tree);
1083 + ex = path[depth].p_ext;
1084 + EXT_ASSERT(path[depth].p_hdr);
1086 + /* try to insert block into found extent and return */
1087 + if (ex && ext3_can_extents_be_merged(tree, ex, newext)) {
1088 + ext_debug(tree, "append %d block to %d:%d (from %d)\n",
1089 + newext->ee_len, ex->ee_block, ex->ee_len,
1091 + if ((err = ext3_ext_get_access(handle, tree, path + depth)))
1093 + ex->ee_len += newext->ee_len;
1094 + eh = path[depth].p_hdr;
1100 + depth = EXT_DEPTH(tree);
1101 + eh = path[depth].p_hdr;
1102 + if (eh->eh_entries < eh->eh_max)
1105 + /* probably next leaf has space for us? */
1106 + fex = EXT_LAST_EXTENT(eh);
1107 + next = ext3_ext_next_leaf_block(tree, path);
1108 + if (newext->ee_block > fex->ee_block && next != EXT_MAX_BLOCK) {
1109 + ext_debug(tree, "next leaf block - %d\n", next);
1110 + EXT_ASSERT(!npath);
1111 + npath = ext3_ext_find_extent(tree, next, NULL);
1112 + if (IS_ERR(npath))
1113 + return PTR_ERR(npath);
1114 + EXT_ASSERT(npath->p_depth == path->p_depth);
1115 + eh = npath[depth].p_hdr;
1116 + if (eh->eh_entries < eh->eh_max) {
1117 + ext_debug(tree, "next leaf isnt full(%d)\n",
1122 + ext_debug(tree, "next leaf hasno free space(%d,%d)\n",
1123 + eh->eh_entries, eh->eh_max);
1127 + * there is no free space in found leaf
1128 + * we're gonna add new leaf in the tree
1130 + err = ext3_ext_create_new_leaf(handle, tree, path, newext);
1133 + depth = EXT_DEPTH(tree);
1134 + eh = path[depth].p_hdr;
1137 + nearex = path[depth].p_ext;
1139 + if ((err = ext3_ext_get_access(handle, tree, path + depth)))
1143 + /* there is no extent in this leaf, create first one */
1144 + ext_debug(tree, "first extent in the leaf: %d:%d:%d\n",
1145 + newext->ee_block, newext->ee_start,
1147 + path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1148 + } else if (newext->ee_block > nearex->ee_block) {
1149 + EXT_ASSERT(newext->ee_block != nearex->ee_block);
1150 + if (nearex != EXT_LAST_EXTENT(eh)) {
1151 + len = EXT_MAX_EXTENT(eh) - nearex;
1152 + len = (len - 1) * sizeof(struct ext3_extent);
1153 + len = len < 0 ? 0 : len;
1154 + ext_debug(tree, "insert %d:%d:%d after: nearest 0x%p, "
1155 + "move %d from 0x%p to 0x%p\n",
1156 + newext->ee_block, newext->ee_start,
1158 + nearex, len, nearex + 1, nearex + 2);
1159 + memmove(nearex + 2, nearex + 1, len);
1161 + path[depth].p_ext = nearex + 1;
1163 + EXT_ASSERT(newext->ee_block != nearex->ee_block);
1164 + len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext3_extent);
1165 + len = len < 0 ? 0 : len;
1166 + ext_debug(tree, "insert %d:%d:%d before: nearest 0x%p, "
1167 + "move %d from 0x%p to 0x%p\n",
1168 + newext->ee_block, newext->ee_start, newext->ee_len,
1169 + nearex, len, nearex + 1, nearex + 2);
1170 + memmove(nearex + 1, nearex, len);
1171 + path[depth].p_ext = nearex;
1175 + nearex = path[depth].p_ext;
1176 + nearex->ee_block = newext->ee_block;
1177 + nearex->ee_start = newext->ee_start;
1178 + nearex->ee_len = newext->ee_len;
1179 + /* FIXME: support for large fs */
1180 + nearex->ee_start_hi = 0;
1183 + /* try to merge extents to the right */
1184 + while (nearex < EXT_LAST_EXTENT(eh)) {
1185 + if (!ext3_can_extents_be_merged(tree, nearex, nearex + 1))
1187 + /* merge with next extent! */
1188 + nearex->ee_len += nearex[1].ee_len;
1189 + if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
1190 + len = (EXT_LAST_EXTENT(eh) - nearex - 1) *
1191 + sizeof(struct ext3_extent);
1192 + memmove(nearex + 1, nearex + 2, len);
1195 + EXT_ASSERT(eh->eh_entries > 0);
1198 + /* try to merge extents to the left */
1200 + /* time to correct all indexes above */
1201 + err = ext3_ext_correct_indexes(handle, tree, path);
1205 + err = ext3_ext_dirty(handle, tree, path + depth);
1209 + ext3_ext_drop_refs(npath);
1212 + ext3_ext_tree_changed(tree);
1213 + ext3_ext_invalidate_cache(tree);
1217 +int ext3_ext_walk_space(struct ext3_extents_tree *tree, unsigned long block,
1218 + unsigned long num, ext_prepare_callback func)
1220 + struct ext3_ext_path *path = NULL;
1221 + struct ext3_ext_cache cbex;
1222 + struct ext3_extent *ex;
1223 + unsigned long next, start = 0, end = 0;
1224 + unsigned long last = block + num;
1225 + int depth, exists, err = 0;
1229 + EXT_ASSERT(tree->inode);
1230 + EXT_ASSERT(tree->root);
1232 + while (block < last && block != EXT_MAX_BLOCK) {
1233 + num = last - block;
1234 + /* find extent for this block */
1235 + path = ext3_ext_find_extent(tree, block, path);
1236 + if (IS_ERR(path)) {
1237 + err = PTR_ERR(path);
1242 + depth = EXT_DEPTH(tree);
1243 + EXT_ASSERT(path[depth].p_hdr);
1244 + ex = path[depth].p_ext;
1245 + next = ext3_ext_next_allocated_block(path);
1249 + /* there is no extent yet, so try to allocate
1250 + * all requested space */
1252 + end = block + num;
1253 + } else if (ex->ee_block > block) {
1254 + /* need to allocate space before found extent */
1256 + end = ex->ee_block;
1257 + if (block + num < end)
1258 + end = block + num;
1259 + } else if (block >= ex->ee_block + ex->ee_len) {
1260 + /* need to allocate space after found extent */
1262 + end = block + num;
1265 + } else if (block >= ex->ee_block) {
1267 + * some part of requested space is covered
1271 + end = ex->ee_block + ex->ee_len;
1272 + if (block + num < end)
1273 + end = block + num;
1278 + EXT_ASSERT(end > start);
1281 + cbex.ec_block = start;
1282 + cbex.ec_len = end - start;
1283 + cbex.ec_start = 0;
1284 + cbex.ec_type = EXT3_EXT_CACHE_GAP;
1286 + cbex.ec_block = ex->ee_block;
1287 + cbex.ec_len = ex->ee_len;
1288 + cbex.ec_start = ex->ee_start;
1289 + cbex.ec_type = EXT3_EXT_CACHE_EXTENT;
1292 + EXT_ASSERT(cbex.ec_len > 0);
1293 + EXT_ASSERT(path[depth].p_hdr);
1294 + err = func(tree, path, &cbex);
1295 + ext3_ext_drop_refs(path);
1299 + if (err == EXT_REPEAT)
1301 + else if (err == EXT_BREAK) {
1306 + if (EXT_DEPTH(tree) != depth) {
1307 + /* depth was changed. we have to realloc path */
1312 + block = cbex.ec_block + cbex.ec_len;
1316 + ext3_ext_drop_refs(path);
1324 +ext3_ext_put_in_cache(struct ext3_extents_tree *tree, __u32 block,
1325 + __u32 len, __u32 start, int type)
1327 + EXT_ASSERT(len > 0);
1329 + tree->cex->ec_type = type;
1330 + tree->cex->ec_block = block;
1331 + tree->cex->ec_len = len;
1332 + tree->cex->ec_start = start;
1337 + * this routine calculate boundaries of the gap requested block fits into
1338 + * and cache this gap
1341 +ext3_ext_put_gap_in_cache(struct ext3_extents_tree *tree,
1342 + struct ext3_ext_path *path,
1343 + unsigned long block)
1345 + int depth = EXT_DEPTH(tree);
1346 + unsigned long lblock, len;
1347 + struct ext3_extent *ex;
1352 + ex = path[depth].p_ext;
1354 + /* there is no extent yet, so gap is [0;-] */
1356 + len = EXT_MAX_BLOCK;
1357 + ext_debug(tree, "cache gap(whole file):");
1358 + } else if (block < ex->ee_block) {
1360 + len = ex->ee_block - block;
1361 + ext_debug(tree, "cache gap(before): %lu [%lu:%lu]",
1362 + (unsigned long) block,
1363 + (unsigned long) ex->ee_block,
1364 + (unsigned long) ex->ee_len);
1365 + } else if (block >= ex->ee_block + ex->ee_len) {
1366 + lblock = ex->ee_block + ex->ee_len;
1367 + len = ext3_ext_next_allocated_block(path);
1368 + ext_debug(tree, "cache gap(after): [%lu:%lu] %lu",
1369 + (unsigned long) ex->ee_block,
1370 + (unsigned long) ex->ee_len,
1371 + (unsigned long) block);
1372 + EXT_ASSERT(len > lblock);
1373 + len = len - lblock;
1379 + ext_debug(tree, " -> %lu:%lu\n", (unsigned long) lblock, len);
1380 + ext3_ext_put_in_cache(tree, lblock, len, 0, EXT3_EXT_CACHE_GAP);
1384 +ext3_ext_in_cache(struct ext3_extents_tree *tree, unsigned long block,
1385 + struct ext3_extent *ex)
1387 + struct ext3_ext_cache *cex = tree->cex;
1389 + /* is there cache storage at all? */
1391 + return EXT3_EXT_CACHE_NO;
1393 + /* has cache valid data? */
1394 + if (cex->ec_type == EXT3_EXT_CACHE_NO)
1395 + return EXT3_EXT_CACHE_NO;
1397 + EXT_ASSERT(cex->ec_type == EXT3_EXT_CACHE_GAP ||
1398 + cex->ec_type == EXT3_EXT_CACHE_EXTENT);
1399 + if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
1400 + ex->ee_block = cex->ec_block;
1401 + ex->ee_start = cex->ec_start;
1402 + ex->ee_len = cex->ec_len;
1403 + ext_debug(tree, "%lu cached by %lu:%lu:%lu\n",
1404 + (unsigned long) block,
1405 + (unsigned long) ex->ee_block,
1406 + (unsigned long) ex->ee_len,
1407 + (unsigned long) ex->ee_start);
1408 + return cex->ec_type;
1411 + /* not in cache */
1412 + return EXT3_EXT_CACHE_NO;
1416 + * routine removes index from the index block
1417 + * it's used in truncate case only. thus all requests are for
1418 + * last index in the block only
1420 +int ext3_ext_rm_idx(handle_t *handle, struct ext3_extents_tree *tree,
1421 + struct ext3_ext_path *path)
1423 + struct buffer_head *bh;
1426 + /* free index block */
1428 + EXT_ASSERT(path->p_hdr->eh_entries);
1429 + if ((err = ext3_ext_get_access(handle, tree, path)))
1431 + path->p_hdr->eh_entries--;
1432 + if ((err = ext3_ext_dirty(handle, tree, path)))
1434 + ext_debug(tree, "index is empty, remove it, free block %d\n",
1435 + path->p_idx->ei_leaf);
1436 + bh = sb_get_hash_table(tree->inode->i_sb, path->p_idx->ei_leaf);
1437 + ext3_forget(handle, 1, tree->inode, bh, path->p_idx->ei_leaf);
1438 + ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1);
1442 +int ext3_ext_calc_credits_for_insert(struct ext3_extents_tree *tree,
1443 + struct ext3_ext_path *path)
1445 + int depth = EXT_DEPTH(tree);
1449 + /* probably there is space in leaf? */
1450 + if (path[depth].p_hdr->eh_entries < path[depth].p_hdr->eh_max)
1455 + * the worste case we're expecting is creation of the
1456 + * new root (growing in depth) with index splitting
1457 + * for splitting we have to consider depth + 1 because
1458 + * previous growing could increase it
1460 + depth = depth + 1;
1463 + * growing in depth:
1464 + * block allocation + new root + old root
1466 + needed = EXT3_ALLOC_NEEDED + 2;
1468 + /* index split. we may need:
1469 + * allocate intermediate indexes and new leaf
1470 + * change two blocks at each level, but root
1471 + * modify root block (inode)
1473 + needed += (depth * EXT3_ALLOC_NEEDED) + (2 * depth) + 1;
1479 +ext3_ext_split_for_rm(handle_t *handle, struct ext3_extents_tree *tree,
1480 + struct ext3_ext_path *path, unsigned long start,
1481 + unsigned long end)
1483 + struct ext3_extent *ex, tex;
1484 + struct ext3_ext_path *npath;
1485 + int depth, creds, err;
1487 + depth = EXT_DEPTH(tree);
1488 + ex = path[depth].p_ext;
1490 + EXT_ASSERT(end < ex->ee_block + ex->ee_len - 1);
1491 + EXT_ASSERT(ex->ee_block < start);
1493 + /* calculate tail extent */
1494 + tex.ee_block = end + 1;
1495 + EXT_ASSERT(tex.ee_block < ex->ee_block + ex->ee_len);
1496 + tex.ee_len = ex->ee_block + ex->ee_len - tex.ee_block;
1498 + creds = ext3_ext_calc_credits_for_insert(tree, path);
1499 + handle = ext3_ext_journal_restart(handle, creds);
1500 + if (IS_ERR(handle))
1501 + return PTR_ERR(handle);
1503 + /* calculate head extent. use primary extent */
1504 + err = ext3_ext_get_access(handle, tree, path + depth);
1507 + ex->ee_len = start - ex->ee_block;
1508 + err = ext3_ext_dirty(handle, tree, path + depth);
1512 + /* FIXME: some callback to free underlying resource
1513 + * and correct ee_start? */
1514 + ext_debug(tree, "split extent: head %u:%u, tail %u:%u\n",
1515 + ex->ee_block, ex->ee_len, tex.ee_block, tex.ee_len);
1517 + npath = ext3_ext_find_extent(tree, ex->ee_block, NULL);
1518 + if (IS_ERR(npath))
1519 + return PTR_ERR(npath);
1520 + depth = EXT_DEPTH(tree);
1521 + EXT_ASSERT(npath[depth].p_ext->ee_block == ex->ee_block);
1522 + EXT_ASSERT(npath[depth].p_ext->ee_len == ex->ee_len);
1524 + err = ext3_ext_insert_extent(handle, tree, npath, &tex);
1525 + ext3_ext_drop_refs(npath);
1532 +ext3_ext_rm_leaf(handle_t *handle, struct ext3_extents_tree *tree,
1533 + struct ext3_ext_path *path, unsigned long start,
1534 + unsigned long end)
1536 + struct ext3_extent *ex, *fu = NULL, *lu, *le;
1537 + int err = 0, correct_index = 0;
1538 + int depth = EXT_DEPTH(tree), credits;
1539 + struct ext3_extent_header *eh;
1540 + unsigned a, b, block, num;
1542 + ext_debug(tree, "remove [%lu:%lu] in leaf\n", start, end);
1543 + if (!path[depth].p_hdr)
1544 + path[depth].p_hdr = EXT_BLOCK_HDR(path[depth].p_bh);
1545 + eh = path[depth].p_hdr;
1547 + EXT_ASSERT(eh->eh_entries <= eh->eh_max);
1548 + EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
1550 + /* find where to start removing */
1551 + le = ex = EXT_LAST_EXTENT(eh);
1552 + while (ex != EXT_FIRST_EXTENT(eh)) {
1553 + if (ex->ee_block <= end)
1558 + if (start > ex->ee_block && end < ex->ee_block + ex->ee_len - 1) {
1559 + /* removal of internal part of the extent requested
1560 + * tail and head must be placed in different extent
1561 + * so, we have to insert one more extent */
1562 + path[depth].p_ext = ex;
1563 + return ext3_ext_split_for_rm(handle, tree, path, start, end);
1567 + while (ex >= EXT_FIRST_EXTENT(eh) && ex->ee_block + ex->ee_len > start) {
1568 + ext_debug(tree, "remove ext %u:%u\n", ex->ee_block, ex->ee_len);
1569 + path[depth].p_ext = ex;
1571 + a = ex->ee_block > start ? ex->ee_block : start;
1572 + b = ex->ee_block + ex->ee_len - 1 < end ?
1573 + ex->ee_block + ex->ee_len - 1 : end;
1575 + ext_debug(tree, " border %u:%u\n", a, b);
1577 + if (a != ex->ee_block && b != ex->ee_block + ex->ee_len - 1) {
1581 + } else if (a != ex->ee_block) {
1582 + /* remove tail of the extent */
1583 + block = ex->ee_block;
1585 + } else if (b != ex->ee_block + ex->ee_len - 1) {
1586 + /* remove head of the extent */
1590 + /* remove whole extent: excelent! */
1591 + block = ex->ee_block;
1593 + EXT_ASSERT(a == ex->ee_block &&
1594 + b == ex->ee_block + ex->ee_len - 1);
1597 + if (ex == EXT_FIRST_EXTENT(eh))
1598 + correct_index = 1;
1601 + if (correct_index)
1602 + credits += (EXT_DEPTH(tree) * EXT3_ALLOC_NEEDED) + 1;
1603 + if (tree->ops->remove_extent_credits)
1604 + credits+=tree->ops->remove_extent_credits(tree,ex,a,b);
1606 + handle = ext3_ext_journal_restart(handle, credits);
1607 + if (IS_ERR(handle)) {
1608 + err = PTR_ERR(handle);
1612 + err = ext3_ext_get_access(handle, tree, path + depth);
1616 + if (tree->ops->remove_extent)
1617 + err = tree->ops->remove_extent(tree, ex, a, b);
1622 + /* this extent is removed entirely mark slot unused */
1628 + ex->ee_block = block;
1631 + err = ext3_ext_dirty(handle, tree, path + depth);
1635 + ext_debug(tree, "new extent: %u:%u:%u\n",
1636 + ex->ee_block, ex->ee_len, ex->ee_start);
1641 + /* reuse unused slots */
1643 + if (lu->ee_start) {
1652 + if (correct_index && eh->eh_entries)
1653 + err = ext3_ext_correct_indexes(handle, tree, path);
1655 + /* if this leaf is free, then we should
1656 + * remove it from index block above */
1657 + if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
1658 + err = ext3_ext_rm_idx(handle, tree, path + depth);
1665 +static struct ext3_extent_idx *
1666 +ext3_ext_last_covered(struct ext3_extent_header *hdr, unsigned long block)
1668 + struct ext3_extent_idx *ix;
1670 + ix = EXT_LAST_INDEX(hdr);
1671 + while (ix != EXT_FIRST_INDEX(hdr)) {
1672 + if (ix->ei_block <= block)
1680 + * returns 1 if current index have to be freed (even partial)
1683 +ext3_ext_more_to_rm(struct ext3_ext_path *path)
1685 + EXT_ASSERT(path->p_idx);
1687 + if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
1691 + * if truncate on deeper level happened it it wasn't partial
1692 + * so we have to consider current index for truncation
1694 + if (path->p_hdr->eh_entries == path->p_block)
1699 +int ext3_ext_remove_space(struct ext3_extents_tree *tree,
1700 + unsigned long start, unsigned long end)
1702 + struct inode *inode = tree->inode;
1703 + struct super_block *sb = inode->i_sb;
1704 + int depth = EXT_DEPTH(tree);
1705 + struct ext3_ext_path *path;
1707 + int i = 0, err = 0;
1709 + ext_debug(tree, "space to be removed: %lu:%lu\n", start, end);
1711 + /* probably first extent we're gonna free will be last in block */
1712 + handle = ext3_journal_start(inode, depth + 1);
1713 + if (IS_ERR(handle))
1714 + return PTR_ERR(handle);
1716 + ext3_ext_invalidate_cache(tree);
1719 + * we start scanning from right side freeing all the blocks
1720 + * after i_size and walking into the deep
1722 + path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 1), GFP_KERNEL);
1723 + if (IS_ERR(path)) {
1724 + ext3_error(sb, __FUNCTION__, "Can't allocate path array");
1725 + ext3_journal_stop(handle, inode);
1728 + memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
1729 + path[i].p_hdr = EXT_ROOT_HDR(tree);
1731 + while (i >= 0 && err == 0) {
1733 + /* this is leaf block */
1734 + err = ext3_ext_rm_leaf(handle, tree, path, start, end);
1735 + /* root level have p_bh == NULL, brelse() eats this */
1736 + brelse(path[i].p_bh);
1741 + /* this is index block */
1742 + if (!path[i].p_hdr) {
1743 + ext_debug(tree, "initialize header\n");
1744 + path[i].p_hdr = EXT_BLOCK_HDR(path[i].p_bh);
1747 + EXT_ASSERT(path[i].p_hdr->eh_entries <= path[i].p_hdr->eh_max);
1748 + EXT_ASSERT(path[i].p_hdr->eh_magic == EXT3_EXT_MAGIC);
1750 + if (!path[i].p_idx) {
1751 + /* this level hasn't touched yet */
1753 + ext3_ext_last_covered(path[i].p_hdr, end);
1754 + path[i].p_block = path[i].p_hdr->eh_entries + 1;
1755 + ext_debug(tree, "init index ptr: hdr 0x%p, num %d\n",
1756 + path[i].p_hdr, path[i].p_hdr->eh_entries);
1758 + /* we've already was here, see at next index */
1762 + ext_debug(tree, "level %d - index, first 0x%p, cur 0x%p\n",
1763 + i, EXT_FIRST_INDEX(path[i].p_hdr),
1765 + if (ext3_ext_more_to_rm(path + i)) {
1766 + /* go to the next level */
1767 + ext_debug(tree, "move to level %d (block %d)\n",
1768 + i + 1, path[i].p_idx->ei_leaf);
1769 + memset(path + i + 1, 0, sizeof(*path));
1770 + path[i+1].p_bh = sb_bread(sb, path[i].p_idx->ei_leaf);
1771 + if (!path[i+1].p_bh) {
1772 + /* should we reset i_size? */
1776 + /* put actual number of indexes to know is this
1777 + * number got changed at the next iteration */
1778 + path[i].p_block = path[i].p_hdr->eh_entries;
1781 + /* we finish processing this index, go up */
1782 + if (path[i].p_hdr->eh_entries == 0 && i > 0) {
1783 + /* index is empty, remove it
1784 + * handle must be already prepared by the
1785 + * truncatei_leaf() */
1786 + err = ext3_ext_rm_idx(handle, tree, path + i);
1788 + /* root level have p_bh == NULL, brelse() eats this */
1789 + brelse(path[i].p_bh);
1791 + ext_debug(tree, "return to level %d\n", i);
1795 + /* TODO: flexible tree reduction should be here */
1796 + if (path->p_hdr->eh_entries == 0) {
1798 + * truncate to zero freed all the tree
1799 + * so, we need to correct eh_depth
1801 + err = ext3_ext_get_access(handle, tree, path);
1803 + EXT_ROOT_HDR(tree)->eh_depth = 0;
1804 + EXT_ROOT_HDR(tree)->eh_max = ext3_ext_space_root(tree);
1805 + err = ext3_ext_dirty(handle, tree, path);
1808 + ext3_ext_tree_changed(tree);
1811 + ext3_journal_stop(handle, inode);
1817 + * called at mount time
1819 +void ext3_ext_init(struct super_block *sb)
1822 + * possible initialization would be here
1825 + if (test_opt(sb, EXTENTS)) {
1826 + printk("EXT3-fs: file extents enabled");
1827 +#ifdef AGRESSIVE_TEST
1828 + printk(", agressive tests");
1830 +#ifdef CHECK_BINSEARCH
1831 + printk(", check binsearch");
1838 + * called at umount time
1840 +void ext3_ext_release(struct super_block *sb)
1844 +/************************************************************************
1845 + * VFS related routines
1846 + ************************************************************************/
1848 +static int ext3_get_inode_write_access(handle_t *handle, void *buffer)
1850 + /* we use in-core data, not bh */
1854 +static int ext3_mark_buffer_dirty(handle_t *handle, void *buffer)
1856 + struct inode *inode = buffer;
1857 + return ext3_mark_inode_dirty(handle, inode);
1860 +static int ext3_ext_mergable(struct ext3_extent *ex1,
1861 + struct ext3_extent *ex2)
1863 + /* FIXME: support for large fs */
1864 + if (ex1->ee_start + ex1->ee_len == ex2->ee_start)
1870 +ext3_remove_blocks_credits(struct ext3_extents_tree *tree,
1871 + struct ext3_extent *ex,
1872 + unsigned long from, unsigned long to)
1876 + /* at present, extent can't cross block group */;
1877 + needed = 4; /* bitmap + group desc + sb + inode */
1879 +#ifdef CONFIG_QUOTA
1880 + needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
1886 +ext3_remove_blocks(struct ext3_extents_tree *tree,
1887 + struct ext3_extent *ex,
1888 + unsigned long from, unsigned long to)
1890 + int needed = ext3_remove_blocks_credits(tree, ex, from, to);
1891 + handle_t *handle = ext3_journal_start(tree->inode, needed);
1892 + struct buffer_head *bh;
1895 + if (IS_ERR(handle))
1896 + return PTR_ERR(handle);
1897 + if (from >= ex->ee_block && to == ex->ee_block + ex->ee_len - 1) {
1898 + /* tail removal */
1899 + unsigned long num, start;
1900 + num = ex->ee_block + ex->ee_len - from;
1901 + start = ex->ee_start + ex->ee_len - num;
1902 + ext_debug(tree, "free last %lu blocks starting %lu\n",
1904 + for (i = 0; i < num; i++) {
1905 + bh = sb_get_hash_table(tree->inode->i_sb, start + i);
1906 + ext3_forget(handle, 0, tree->inode, bh, start + i);
1908 + ext3_free_blocks(handle, tree->inode, start, num);
1909 + } else if (from == ex->ee_block && to <= ex->ee_block + ex->ee_len - 1) {
1910 + printk("strange request: removal %lu-%lu from %u:%u\n",
1911 + from, to, ex->ee_block, ex->ee_len);
1913 + printk("strange request: removal(2) %lu-%lu from %u:%u\n",
1914 + from, to, ex->ee_block, ex->ee_len);
1916 + ext3_journal_stop(handle, tree->inode);
1920 +int ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path,
1921 + unsigned long block)
1923 + struct ext3_inode_info *ei = EXT3_I(inode);
1924 + unsigned long bg_start;
1925 + unsigned long colour;
1929 + struct ext3_extent *ex;
1930 + depth = path->p_depth;
1932 + /* try to predict block placement */
1933 + if ((ex = path[depth].p_ext))
1934 + return ex->ee_start + (block - ex->ee_block);
1936 + /* it looks index is empty
1937 + * try to find starting from index itself */
1938 + if (path[depth].p_bh)
1939 + return path[depth].p_bh->b_blocknr;
1942 + /* OK. use inode's group */
1943 + bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
1944 + le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
1945 + colour = (current->pid % 16) *
1946 + (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
1947 + return bg_start + colour + block;
1950 +static int ext3_new_block_cb(handle_t *handle, struct ext3_extents_tree *tree,
1951 + struct ext3_ext_path *path,
1952 + struct ext3_extent *ex, int *err)
1954 + struct inode *inode = tree->inode;
1955 + int newblock, goal;
1959 + EXT_ASSERT(ex->ee_start);
1960 + EXT_ASSERT(ex->ee_len);
1962 + /* reuse block from the extent to order data/metadata */
1963 + newblock = ex->ee_start++;
1965 + if (ex->ee_len == 0) {
1967 + /* allocate new block for the extent */
1968 + goal = ext3_ext_find_goal(inode, path, ex->ee_block);
1970 + ex->ee_start = ext3_new_block(handle, inode, goal, 0, 0, err);
1972 + if (ex->ee_start == 0) {
1973 + /* error occured: restore old extent */
1974 + ex->ee_start = newblock;
1981 +static struct ext3_extents_helpers ext3_blockmap_helpers = {
1982 + .get_write_access = ext3_get_inode_write_access,
1983 + .mark_buffer_dirty = ext3_mark_buffer_dirty,
1984 + .mergable = ext3_ext_mergable,
1985 + .new_block = ext3_new_block_cb,
1986 + .remove_extent = ext3_remove_blocks,
1987 + .remove_extent_credits = ext3_remove_blocks_credits,
1990 +void ext3_init_tree_desc(struct ext3_extents_tree *tree,
1991 + struct inode *inode)
1993 + tree->inode = inode;
1994 + tree->root = (void *) EXT3_I(inode)->i_data;
1995 + tree->buffer = (void *) inode;
1996 + tree->buffer_len = sizeof(EXT3_I(inode)->i_data);
1997 + tree->cex = (struct ext3_ext_cache *) &EXT3_I(inode)->i_cached_extent;
1998 + tree->ops = &ext3_blockmap_helpers;
2001 +int ext3_ext_get_block(handle_t *handle, struct inode *inode,
2002 + long iblock, struct buffer_head *bh_result, int create)
2004 + struct ext3_ext_path *path = NULL;
2005 + struct ext3_extent newex;
2006 + struct ext3_extent *ex;
2007 + int goal, newblock, err = 0, depth;
2008 + struct ext3_extents_tree tree;
2010 + clear_bit(BH_New, &bh_result->b_state);
2011 + ext3_init_tree_desc(&tree, inode);
2012 + ext_debug(&tree, "block %d requested for inode %u\n",
2013 + (int) iblock, (unsigned) inode->i_ino);
2014 + down_write(&EXT3_I(inode)->truncate_sem);
2016 + /* check in cache */
2017 + if ((goal = ext3_ext_in_cache(&tree, iblock, &newex))) {
2018 + if (goal == EXT3_EXT_CACHE_GAP) {
2020 + /* block isn't allocated yet and
2021 + * user don't want to allocate it */
2024 + /* we should allocate requested block */
2025 + } else if (goal == EXT3_EXT_CACHE_EXTENT) {
2026 + /* block is already allocated */
2027 + newblock = iblock - newex.ee_block + newex.ee_start;
2034 + /* find extent for this block */
2035 + path = ext3_ext_find_extent(&tree, iblock, NULL);
2036 + if (IS_ERR(path)) {
2037 + err = PTR_ERR(path);
2042 + depth = EXT_DEPTH(&tree);
2045 + * consistent leaf must not be empty
2046 + * this situations is possible, though, _during_ tree modification
2047 + * this is why assert can't be put in ext3_ext_find_extent()
2049 + EXT_ASSERT(path[depth].p_ext != NULL || depth == 0);
2051 + if ((ex = path[depth].p_ext)) {
2052 + /* if found exent covers block, simple return it */
2053 + if (iblock >= ex->ee_block && iblock < ex->ee_block + ex->ee_len) {
2054 + newblock = iblock - ex->ee_block + ex->ee_start;
2055 + ext_debug(&tree, "%d fit into %d:%d -> %d\n",
2056 + (int) iblock, ex->ee_block, ex->ee_len,
2058 + ext3_ext_put_in_cache(&tree, ex->ee_block,
2059 + ex->ee_len, ex->ee_start,
2060 + EXT3_EXT_CACHE_EXTENT);
2066 + * requested block isn't allocated yet
2067 + * we couldn't try to create block if create flag is zero
2070 + /* put just found gap into cache to speedup subsequest reqs */
2071 + ext3_ext_put_gap_in_cache(&tree, path, iblock);
2075 + /* allocate new block */
2076 + goal = ext3_ext_find_goal(inode, path, iblock);
2078 + newblock = ext3_new_block(handle, inode, goal, 0, 0, &err);
2082 + ext_debug(&tree, "allocate new block: goal %d, found %d\n",
2085 + /* try to insert new extent into found leaf and return */
2086 + newex.ee_block = iblock;
2087 + newex.ee_start = newblock;
2089 + err = ext3_ext_insert_extent(handle, &tree, path, &newex);
2093 + if (inode->i_size > EXT3_I(inode)->i_disksize)
2094 + EXT3_I(inode)->i_disksize = inode->i_size;
2096 + /* previous routine could use block we allocated */
2097 + newblock = newex.ee_start;
2098 + set_bit(BH_New, &bh_result->b_state);
2100 + ext3_ext_put_in_cache(&tree, newex.ee_block, newex.ee_len,
2101 + newex.ee_start, EXT3_EXT_CACHE_EXTENT);
2103 + ext3_ext_show_leaf(&tree, path);
2104 + set_bit(BH_Mapped, &bh_result->b_state);
2105 + bh_result->b_dev = inode->i_sb->s_dev;
2106 + bh_result->b_blocknr = newblock;
2109 + ext3_ext_drop_refs(path);
2112 + up_write(&EXT3_I(inode)->truncate_sem);
2117 +void ext3_ext_truncate(struct inode * inode)
2119 + struct address_space *mapping = inode->i_mapping;
2120 + struct super_block *sb = inode->i_sb;
2121 + struct ext3_extents_tree tree;
2122 + unsigned long last_block;
2126 + ext3_init_tree_desc(&tree, inode);
2129 + * probably first extent we're gonna free will be last in block
2131 + err = ext3_writepage_trans_blocks(inode) + 3;
2132 + handle = ext3_journal_start(inode, err);
2133 + if (IS_ERR(handle))
2136 + ext3_block_truncate_page(handle, mapping, inode->i_size);
2138 + down_write(&EXT3_I(inode)->truncate_sem);
2139 + ext3_ext_invalidate_cache(&tree);
2142 + * TODO: optimization is possible here
2143 + * probably we need not scaning at all,
2144 + * because page truncation is enough
2146 + if (ext3_orphan_add(handle, inode))
2149 + /* we have to know where to truncate from in crash case */
2150 + EXT3_I(inode)->i_disksize = inode->i_size;
2151 + ext3_mark_inode_dirty(handle, inode);
2153 + last_block = (inode->i_size + sb->s_blocksize - 1) >>
2154 + EXT3_BLOCK_SIZE_BITS(sb);
2155 + err = ext3_ext_remove_space(&tree, last_block, EXT_MAX_BLOCK);
2157 + /* In a multi-transaction truncate, we only make the final
2158 + * transaction synchronous */
2159 + if (IS_SYNC(inode))
2160 + handle->h_sync = 1;
2164 + * If this was a simple ftruncate(), and the file will remain alive
2165 + * then we need to clear up the orphan record which we created above.
2166 + * However, if this was a real unlink then we were called by
2167 + * ext3_delete_inode(), and we allow that function to clean up the
2168 + * orphan info for us.
2170 + if (inode->i_nlink)
2171 + ext3_orphan_del(handle, inode);
2173 + up_write(&EXT3_I(inode)->truncate_sem);
2174 + ext3_journal_stop(handle, inode);
2178 + * this routine calculate max number of blocks we could modify
2179 + * in order to allocate new block for an inode
2181 +int ext3_ext_writepage_trans_blocks(struct inode *inode, int num)
2183 + struct ext3_extents_tree tree;
2186 + ext3_init_tree_desc(&tree, inode);
2188 + needed = ext3_ext_calc_credits_for_insert(&tree, NULL);
2190 + /* caller want to allocate num blocks */
2193 +#ifdef CONFIG_QUOTA
2195 + * FIXME: real calculation should be here
2196 + * it depends on blockmap format of qouta file
2198 + needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
2204 +void ext3_extents_initialize_blockmap(handle_t *handle, struct inode *inode)
2206 + struct ext3_extents_tree tree;
2208 + ext3_init_tree_desc(&tree, inode);
2209 + ext3_extent_tree_init(handle, &tree);
2213 +ext3_ext_store_extent_cb(struct ext3_extents_tree *tree,
2214 + struct ext3_ext_path *path,
2215 + struct ext3_ext_cache *newex)
2217 + struct ext3_extent_buf *buf = (struct ext3_extent_buf *) tree->private;
2219 + if (newex->ec_type != EXT3_EXT_CACHE_EXTENT)
2220 + return EXT_CONTINUE;
2224 + if (buf->cur - buf->buffer + sizeof(*newex) > buf->buflen)
2227 + if (!copy_to_user(buf->cur, newex, sizeof(*newex))) {
2229 + buf->cur += sizeof(*newex);
2231 + buf->err = -EFAULT;
2234 + return EXT_CONTINUE;
2238 +ext3_ext_collect_stats_cb(struct ext3_extents_tree *tree,
2239 + struct ext3_ext_path *path,
2240 + struct ext3_ext_cache *ex)
2242 + struct ext3_extent_tree_stats *buf =
2243 + (struct ext3_extent_tree_stats *) tree->private;
2246 + if (ex->ec_type != EXT3_EXT_CACHE_EXTENT)
2247 + return EXT_CONTINUE;
2249 + depth = EXT_DEPTH(tree);
2250 + buf->extents_num++;
2251 + if (path[depth].p_ext == EXT_FIRST_EXTENT(path[depth].p_hdr))
2253 + return EXT_CONTINUE;
2256 +int ext3_ext_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
2257 + unsigned long arg)
2261 + if (!(EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL))
2264 + if (cmd == EXT3_IOC_GET_EXTENTS) {
2265 + struct ext3_extent_buf buf;
2266 + struct ext3_extents_tree tree;
2268 + if (copy_from_user(&buf, (void *) arg, sizeof(buf)))
2271 + ext3_init_tree_desc(&tree, inode);
2272 + buf.cur = buf.buffer;
2274 + tree.private = &buf;
2275 + down_write(&EXT3_I(inode)->truncate_sem);
2276 + err = ext3_ext_walk_space(&tree, buf.start, EXT_MAX_BLOCK,
2277 + ext3_ext_store_extent_cb);
2278 + up_write(&EXT3_I(inode)->truncate_sem);
2281 + } else if (cmd == EXT3_IOC_GET_TREE_STATS) {
2282 + struct ext3_extent_tree_stats buf;
2283 + struct ext3_extents_tree tree;
2285 + ext3_init_tree_desc(&tree, inode);
2286 + down_write(&EXT3_I(inode)->truncate_sem);
2287 + buf.depth = EXT_DEPTH(&tree);
2288 + buf.extents_num = 0;
2290 + tree.private = &buf;
2291 + err = ext3_ext_walk_space(&tree, 0, EXT_MAX_BLOCK,
2292 + ext3_ext_collect_stats_cb);
2293 + up_write(&EXT3_I(inode)->truncate_sem);
2295 + err = copy_to_user((void *) arg, &buf, sizeof(buf));
2296 + } else if (cmd == EXT3_IOC_GET_TREE_DEPTH) {
2297 + struct ext3_extents_tree tree;
2298 + ext3_init_tree_desc(&tree, inode);
2299 + down_write(&EXT3_I(inode)->truncate_sem);
2300 + err = EXT_DEPTH(&tree);
2301 + up_write(&EXT3_I(inode)->truncate_sem);
2307 +EXPORT_SYMBOL(ext3_init_tree_desc);
2308 +EXPORT_SYMBOL(ext3_mark_inode_dirty);
2309 +EXPORT_SYMBOL(ext3_ext_invalidate_cache);
2310 +EXPORT_SYMBOL(ext3_ext_insert_extent);
2311 +EXPORT_SYMBOL(ext3_ext_walk_space);
2312 +EXPORT_SYMBOL(ext3_ext_find_goal);
2313 +EXPORT_SYMBOL(ext3_ext_calc_credits_for_insert);
2314 Index: linux-2.4.29/fs/ext3/ialloc.c
2315 ===================================================================
2316 --- linux-2.4.29.orig/fs/ext3/ialloc.c 2005-05-03 16:50:30.216045296 +0300
2317 +++ linux-2.4.29/fs/ext3/ialloc.c 2005-05-03 16:52:08.804057640 +0300
2319 inode->i_blksize = PAGE_SIZE;
2320 inode->i_blocks = 0;
2321 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
2322 - inode->u.ext3_i.i_flags = dir->u.ext3_i.i_flags & ~EXT3_INDEX_FL;
2323 + inode->u.ext3_i.i_flags = dir->u.ext3_i.i_flags &
2324 + ~(EXT3_INDEX_FL | EXT3_EXTENTS_FL);
2326 inode->u.ext3_i.i_flags &= ~(EXT3_IMMUTABLE_FL|EXT3_APPEND_FL);
2327 #ifdef EXT3_FRAGMENTS
2328 @@ -592,6 +593,19 @@
2332 + if (test_opt(sb, EXTENTS) && S_ISREG(inode->i_mode)) {
2333 + EXT3_I(inode)->i_flags |= EXT3_EXTENTS_FL;
2334 + memset(&EXT3_I(inode)->i_cached_extent, 0, sizeof(__u32) * 4);
2335 + ext3_extents_initialize_blockmap(handle, inode);
2336 + if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS)) {
2337 + err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
2338 + if (err) goto fail;
2339 + EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS);
2340 + BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "call ext3_journal_dirty_metadata");
2341 + err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
2345 err = ext3_mark_iloc_dirty(handle, inode, &iloc);
2348 Index: linux-2.4.29/fs/ext3/inode.c
2349 ===================================================================
2350 --- linux-2.4.29.orig/fs/ext3/inode.c 2005-05-03 16:51:50.331865840 +0300
2351 +++ linux-2.4.29/fs/ext3/inode.c 2005-05-03 16:52:08.808057032 +0300
2352 @@ -861,6 +861,15 @@
2357 +ext3_get_block_wrap(handle_t *handle, struct inode *inode, long block,
2358 + struct buffer_head *bh, int create)
2360 + if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
2361 + return ext3_ext_get_block(handle, inode, block, bh, create);
2362 + return ext3_get_block_handle(handle, inode, block, bh, create);
2366 * The BKL is not held on entry here.
2369 handle = ext3_journal_current_handle();
2370 J_ASSERT(handle != 0);
2372 - ret = ext3_get_block_handle(handle, inode, iblock, bh_result, create);
2373 + ret = ext3_get_block_wrap(handle, inode, iblock, bh_result, create);
2379 dummy.b_blocknr = -1000;
2380 buffer_trace_init(&dummy.b_history);
2381 - *errp = ext3_get_block_handle(handle, inode, block, &dummy, create);
2382 + *errp = ext3_get_block_wrap(handle, inode, block, &dummy, create);
2383 if (!*errp && buffer_mapped(&dummy)) {
2384 struct buffer_head *bh;
2385 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
2386 @@ -1416,7 +1425,7 @@
2387 * This required during truncate. We need to physically zero the tail end
2388 * of that block so it doesn't yield old data if the file is later grown.
2390 -static int ext3_block_truncate_page(handle_t *handle,
2391 +int ext3_block_truncate_page(handle_t *handle,
2392 struct address_space *mapping, loff_t from)
2394 unsigned long index = from >> PAGE_CACHE_SHIFT;
2395 @@ -1904,6 +1913,9 @@
2397 ext3_discard_prealloc(inode);
2399 + if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
2400 + return ext3_ext_truncate(inode);
2402 handle = start_transaction(inode);
2404 return; /* AKPM: return what? */
2405 @@ -2240,6 +2252,7 @@
2406 for (block = 0; block < EXT3_N_BLOCKS; block++)
2407 inode->u.ext3_i.i_data[block] = iloc.raw_inode->i_block[block];
2408 INIT_LIST_HEAD(&inode->u.ext3_i.i_orphan);
2409 + memset(&EXT3_I(inode)->i_cached_extent, 0, sizeof(__u32) * 4);
2411 if (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE)
2412 inode->u.ext3_i.i_extra_isize =
2413 @@ -2546,6 +2559,9 @@
2414 int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
2417 + if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
2418 + return ext3_ext_writepage_trans_blocks(inode, bpp);
2420 if (ext3_should_journal_data(inode))
2421 ret = 3 * (bpp + indirects) + 2;
2423 @@ -2982,7 +2998,7 @@
2425 /* alloc blocks one by one */
2426 for (i = 0; i < nblocks; i++) {
2427 - ret = ext3_get_block_handle(handle, inode, blocks[i],
2428 + ret = ext3_get_block_wrap(handle, inode, blocks[i],
2432 @@ -3058,7 +3074,7 @@
2436 - rc = ext3_get_block_handle(handle, inode, iblock, &bh, 1);
2437 + rc = ext3_get_block_wrap(handle, inode, iblock, &bh, 1);
2439 printk(KERN_INFO "ext3_map_inode_page: error %d "
2440 "allocating block %ld\n", rc, iblock);
2441 Index: linux-2.4.29/fs/ext3/Makefile
2442 ===================================================================
2443 --- linux-2.4.29.orig/fs/ext3/Makefile 2005-05-03 16:51:32.127633304 +0300
2444 +++ linux-2.4.29/fs/ext3/Makefile 2005-05-03 16:53:38.634401352 +0300
2447 obj-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
2448 ioctl.o namei.o super.o symlink.o hash.o ext3-exports.o \
2450 + xattr_trusted.o extents.o
2451 +export-objs += extents.o
2453 obj-m := $(O_TARGET)
2455 export-objs += xattr.o
2456 Index: linux-2.4.29/fs/ext3/super.c
2457 ===================================================================
2458 --- linux-2.4.29.orig/fs/ext3/super.c 2005-05-03 16:50:14.750396432 +0300
2459 +++ linux-2.4.29/fs/ext3/super.c 2005-05-03 16:52:08.813056272 +0300
2461 #ifdef EXT3_DELETE_THREAD
2462 J_ASSERT(sbi->s_delete_inodes == 0);
2464 + ext3_ext_release(sb);
2465 ext3_xattr_put_super(sb);
2466 journal_destroy(sbi->s_journal);
2467 if (!(sb->s_flags & MS_RDONLY)) {
2468 @@ -702,6 +703,12 @@
2472 + else if (!strcmp (this_char, "extents"))
2473 + set_opt (*mount_options, EXTENTS);
2474 + else if (!strcmp (this_char, "noextents"))
2475 + clear_opt (*mount_options, EXTENTS);
2476 + else if (!strcmp (this_char, "extdebug"))
2477 + set_opt (*mount_options, EXTDEBUG);
2478 else if (!strcmp (this_char, "grpid") ||
2479 !strcmp (this_char, "bsdgroups"))
2480 set_opt (*mount_options, GRPID);
2481 @@ -1405,6 +1410,8 @@
2482 test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered":
2485 + ext3_ext_init(sb);
2490 Index: linux-2.4.29/fs/ext3/ioctl.c
2491 ===================================================================
2492 --- linux-2.4.29.orig/fs/ext3/ioctl.c 2005-05-03 16:49:36.825161944 +0300
2493 +++ linux-2.4.29/fs/ext3/ioctl.c 2005-05-03 16:52:08.814056120 +0300
2494 @@ -174,6 +174,10 @@
2498 + case EXT3_IOC_GET_EXTENTS:
2499 + case EXT3_IOC_GET_TREE_STATS:
2500 + case EXT3_IOC_GET_TREE_DEPTH:
2501 + return ext3_ext_ioctl(inode, filp, cmd, arg);
2505 Index: linux-2.4.29/include/linux/ext3_fs.h
2506 ===================================================================
2507 --- linux-2.4.29.orig/include/linux/ext3_fs.h 2005-05-03 16:50:30.228043472 +0300
2508 +++ linux-2.4.29/include/linux/ext3_fs.h 2005-05-03 16:52:08.817055664 +0300
2510 #define EXT3_IMAGIC_FL 0x00002000 /* AFS directory */
2511 #define EXT3_JOURNAL_DATA_FL 0x00004000 /* file data should be journaled */
2512 #define EXT3_RESERVED_FL 0x80000000 /* reserved for ext3 lib */
2513 +#define EXT3_EXTENTS_FL 0x00080000 /* Inode uses extents */
2515 -#define EXT3_FL_USER_VISIBLE 0x00005FFF /* User visible flags */
2516 +#define EXT3_FL_USER_VISIBLE 0x00085FFF /* User visible flags */
2517 #define EXT3_FL_USER_MODIFIABLE 0x000000FF /* User modifiable flags */
2521 #ifdef CONFIG_JBD_DEBUG
2522 #define EXT3_IOC_WAIT_FOR_READONLY _IOR('f', 99, long)
2524 +#define EXT3_IOC_GET_EXTENTS _IOR('f', 7, long)
2525 +#define EXT3_IOC_GET_TREE_DEPTH _IOR('f', 8, long)
2526 +#define EXT3_IOC_GET_TREE_STATS _IOR('f', 9, long)
2529 * Structure of an inode on the disk
2531 #define EXT3_MOUNT_ASYNCDEL 0x20000 /* Delayed deletion */
2532 #define EXT3_MOUNT_IOPEN 0x80000 /* Allow access via iopen */
2533 #define EXT3_MOUNT_IOPEN_NOPRIV 0x100000/* Make iopen world-readable */
2534 +#define EXT3_MOUNT_EXTENTS 0x200000/* Extents support */
2535 +#define EXT3_MOUNT_EXTDEBUG 0x400000/* Extents debug */
2537 /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
2538 #ifndef _LINUX_EXT2_FS_H
2539 @@ -506,11 +512,13 @@
2540 #define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */
2541 #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Journal device */
2542 #define EXT3_FEATURE_INCOMPAT_META_BG 0x0010
2543 +#define EXT3_FEATURE_INCOMPAT_EXTENTS 0x0040 /* extents support */
2545 #define EXT3_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
2546 #define EXT3_FEATURE_INCOMPAT_SUPP (EXT3_FEATURE_INCOMPAT_FILETYPE| \
2547 EXT3_FEATURE_INCOMPAT_RECOVER| \
2548 - EXT3_FEATURE_INCOMPAT_META_BG)
2549 + EXT3_FEATURE_INCOMPAT_META_BG| \
2550 + EXT3_FEATURE_INCOMPAT_EXTENTS)
2551 #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
2552 EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
2553 EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
2555 extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
2558 +extern int ext3_block_truncate_page(handle_t *, struct address_space *, loff_t);
2559 +extern int ext3_writepage_trans_blocks(struct inode *inode);
2560 extern int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
2561 extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
2562 extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
2563 @@ -783,6 +793,16 @@
2564 extern struct inode_operations ext3_symlink_inode_operations;
2565 extern struct inode_operations ext3_fast_symlink_inode_operations;
2568 +extern int ext3_ext_writepage_trans_blocks(struct inode *, int);
2569 +extern int ext3_ext_get_block(handle_t *, struct inode *, long,
2570 + struct buffer_head *, int);
2571 +extern void ext3_ext_truncate(struct inode *);
2572 +extern void ext3_ext_init(struct super_block *);
2573 +extern void ext3_ext_release(struct super_block *);
2574 +extern void ext3_extents_initialize_blockmap(handle_t *, struct inode *);
2575 +extern int ext3_ext_ioctl(struct inode *inode, struct file *filp,
2576 + unsigned int cmd, unsigned long arg);
2578 #endif /* __KERNEL__ */
2580 Index: linux-2.4.29/include/linux/ext3_extents.h
2581 ===================================================================
2582 --- linux-2.4.29.orig/include/linux/ext3_extents.h 2005-05-03 16:52:08.724069800 +0300
2583 +++ linux-2.4.29/include/linux/ext3_extents.h 2005-05-03 16:52:08.819055360 +0300
2586 + * Copyright (c) 2003, Cluster File Systems, Inc, info@clusterfs.com
2587 + * Written by Alex Tomas <alex@clusterfs.com>
2589 + * This program is free software; you can redistribute it and/or modify
2590 + * it under the terms of the GNU General Public License version 2 as
2591 + * published by the Free Software Foundation.
2593 + * This program is distributed in the hope that it will be useful,
2594 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2595 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2596 + * GNU General Public License for more details.
2598 + * You should have received a copy of the GNU General Public Licens
2599 + * along with this program; if not, write to the Free Software
2600 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
2603 +#ifndef _LINUX_EXT3_EXTENTS
2604 +#define _LINUX_EXT3_EXTENTS
2607 + * with AGRESSIVE_TEST defined capacity of index/leaf blocks
2608 + * become very little, so index split, in-depth growing and
2609 + * other hard changes happens much more often
2610 + * this is for debug purposes only
2612 +#define AGRESSIVE_TEST_
2615 + * if CHECK_BINSEARCH defined, then results of binary search
2616 + * will be checked by linear search
2618 +#define CHECK_BINSEARCH_
2621 + * if EXT_DEBUG is defined you can use 'extdebug' mount option
2622 + * to get lots of info what's going on
2626 +#define ext_debug(tree,fmt,a...) \
2628 + if (test_opt((tree)->inode->i_sb, EXTDEBUG)) \
2629 + printk(fmt, ##a); \
2632 +#define ext_debug(tree,fmt,a...)
2636 + * if EXT_STATS is defined then stats numbers are collected
2637 + * these number will be displayed at umount time
2642 +#define EXT3_ALLOC_NEEDED 3 /* block bitmap + group desc. + sb */
2645 + * ext3_inode has i_block array (total 60 bytes)
2646 + * first 4 bytes are used to store:
2647 + * - tree depth (0 mean there is no tree yet. all extents in the inode)
2648 + * - number of alive extents in the inode
2652 + * this is extent on-disk structure
2653 + * it's used at the bottom of the tree
2655 +struct ext3_extent {
2656 + __u32 ee_block; /* first logical block extent covers */
2657 + __u16 ee_len; /* number of blocks covered by extent */
2658 + __u16 ee_start_hi; /* high 16 bits of physical block */
2659 + __u32 ee_start; /* low 32 bigs of physical block */
2663 + * this is index on-disk structure
2664 + * it's used at all the levels, but the bottom
2666 +struct ext3_extent_idx {
2667 + __u32 ei_block; /* index covers logical blocks from 'block' */
2668 + __u32 ei_leaf; /* pointer to the physical block of the next *
2669 + * level. leaf or next index could bet here */
2670 + __u16 ei_leaf_hi; /* high 16 bits of physical block */
2675 + * each block (leaves and indexes), even inode-stored has header
2677 +struct ext3_extent_header {
2678 + __u16 eh_magic; /* probably will support different formats */
2679 + __u16 eh_entries; /* number of valid entries */
2680 + __u16 eh_max; /* capacity of store in entries */
2681 + __u16 eh_depth; /* has tree real underlaying blocks? */
2682 + __u32 eh_generation; /* flags(8 bits) | generation of the tree */
2685 +#define EXT3_EXT_MAGIC 0xf30a
2688 + * array of ext3_ext_path contains path to some extent
2689 + * creation/lookup routines use it for traversal/splitting/etc
2690 + * truncate uses it to simulate recursive walking
2692 +struct ext3_ext_path {
2695 + struct ext3_extent *p_ext;
2696 + struct ext3_extent_idx *p_idx;
2697 + struct ext3_extent_header *p_hdr;
2698 + struct buffer_head *p_bh;
2702 + * structure for external API
2706 + * storage for cached extent
2708 +struct ext3_ext_cache {
2715 +#define EXT3_EXT_CACHE_NO 0
2716 +#define EXT3_EXT_CACHE_GAP 1
2717 +#define EXT3_EXT_CACHE_EXTENT 2
2720 + * ext3_extents_tree is used to pass initial information
2721 + * to top-level extents API
2723 +struct ext3_extents_helpers;
2724 +struct ext3_extents_tree {
2725 + struct inode *inode; /* inode which tree belongs to */
2726 + void *root; /* ptr to data top of tree resides at */
2727 + void *buffer; /* will be passed as arg to ^^ routines */
2730 + struct ext3_ext_cache *cex;/* last found extent */
2731 + struct ext3_extents_helpers *ops;
2734 +struct ext3_extents_helpers {
2735 + int (*get_write_access)(handle_t *h, void *buffer);
2736 + int (*mark_buffer_dirty)(handle_t *h, void *buffer);
2737 + int (*mergable)(struct ext3_extent *ex1, struct ext3_extent *ex2);
2738 + int (*remove_extent_credits)(struct ext3_extents_tree *,
2739 + struct ext3_extent *, unsigned long,
2741 + int (*remove_extent)(struct ext3_extents_tree *,
2742 + struct ext3_extent *, unsigned long,
2744 + int (*new_block)(handle_t *, struct ext3_extents_tree *,
2745 + struct ext3_ext_path *, struct ext3_extent *,
2750 + * to be called by ext3_ext_walk_space()
2751 + * negative retcode - error
2752 + * positive retcode - signal for ext3_ext_walk_space(), see below
2753 + * callback must return valid extent (passed or newly created)
2755 +typedef int (*ext_prepare_callback)(struct ext3_extents_tree *,
2756 + struct ext3_ext_path *,
2757 + struct ext3_ext_cache *);
2759 +#define EXT_CONTINUE 0
2760 +#define EXT_BREAK 1
2761 +#define EXT_REPEAT 2
2764 +#define EXT_MAX_BLOCK 0xffffffff
2767 +#define EXT_FIRST_EXTENT(__hdr__) \
2768 + ((struct ext3_extent *) (((char *) (__hdr__)) + \
2769 + sizeof(struct ext3_extent_header)))
2770 +#define EXT_FIRST_INDEX(__hdr__) \
2771 + ((struct ext3_extent_idx *) (((char *) (__hdr__)) + \
2772 + sizeof(struct ext3_extent_header)))
2773 +#define EXT_HAS_FREE_INDEX(__path__) \
2774 + ((__path__)->p_hdr->eh_entries < (__path__)->p_hdr->eh_max)
2775 +#define EXT_LAST_EXTENT(__hdr__) \
2776 + (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->eh_entries - 1)
2777 +#define EXT_LAST_INDEX(__hdr__) \
2778 + (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->eh_entries - 1)
2779 +#define EXT_MAX_EXTENT(__hdr__) \
2780 + (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->eh_max - 1)
2781 +#define EXT_MAX_INDEX(__hdr__) \
2782 + (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->eh_max - 1)
2783 +#define EXT_HDR_GEN(__hdr__) ((__hdr__)->eh_generation & 0x00ffffff)
2784 +#define EXT_FLAGS(__hdr__) ((__hdr__)->eh_generation >> 24)
2785 +#define EXT_FLAGS_CLR_UNKNOWN 0x7 /* Flags cleared on modification */
2787 +#define EXT_BLOCK_HDR(__bh__) ((struct ext3_extent_header *)(__bh__)->b_data)
2788 +#define EXT_ROOT_HDR(__tree__) ((struct ext3_extent_header *)(__tree__)->root)
2789 +#define EXT_DEPTH(__tree__) (EXT_ROOT_HDR(__tree__)->eh_depth)
2790 +#define EXT_GENERATION(__tree__) EXT_HDR_GEN(EXT_ROOT_HDR(__tree__))
2792 +#define EXT_ASSERT(__x__) if (!(__x__)) BUG();
2794 +#define EXT_CHECK_PATH(tree,path) \
2796 + int depth = EXT_DEPTH(tree); \
2797 + BUG_ON((unsigned long) (path) < __PAGE_OFFSET); \
2798 + BUG_ON((unsigned long) (path)[depth].p_idx < \
2799 + __PAGE_OFFSET && (path)[depth].p_idx != NULL); \
2800 + BUG_ON((unsigned long) (path)[depth].p_ext < \
2801 + __PAGE_OFFSET && (path)[depth].p_ext != NULL); \
2802 + BUG_ON((unsigned long) (path)[depth].p_hdr < __PAGE_OFFSET); \
2803 + BUG_ON((unsigned long) (path)[depth].p_bh < __PAGE_OFFSET \
2805 + BUG_ON((path)[0].p_depth != depth); \
2810 + * this structure is used to gather extents from the tree via ioctl
2812 +struct ext3_extent_buf {
2813 + unsigned long start;
2821 + * this structure is used to collect stats info about the tree
2823 +struct ext3_extent_tree_stats {
2829 +extern void ext3_init_tree_desc(struct ext3_extents_tree *, struct inode *);
2830 +extern int ext3_extent_tree_init(handle_t *, struct ext3_extents_tree *);
2831 +extern int ext3_ext_calc_credits_for_insert(struct ext3_extents_tree *, struct ext3_ext_path *);
2832 +extern int ext3_ext_insert_extent(handle_t *, struct ext3_extents_tree *, struct ext3_ext_path *, struct ext3_extent *);
2833 +extern int ext3_ext_walk_space(struct ext3_extents_tree *, unsigned long, unsigned long, ext_prepare_callback);
2834 +extern int ext3_ext_remove_space(struct ext3_extents_tree *, unsigned long, unsigned long);
2835 +extern struct ext3_ext_path * ext3_ext_find_extent(struct ext3_extents_tree *, int, struct ext3_ext_path *);
2838 +ext3_ext_invalidate_cache(struct ext3_extents_tree *tree)
2841 + tree->cex->ec_type = EXT3_EXT_CACHE_NO;
2845 +#endif /* _LINUX_EXT3_EXTENTS */
2846 Index: linux-2.4.29/include/linux/ext3_fs_i.h
2847 ===================================================================
2848 --- linux-2.4.29.orig/include/linux/ext3_fs_i.h 2005-05-03 16:50:30.229043320 +0300
2849 +++ linux-2.4.29/include/linux/ext3_fs_i.h 2005-05-03 16:52:08.823054752 +0300
2851 * by other means, so we have truncate_sem.
2853 struct rw_semaphore truncate_sem;
2855 + __u32 i_cached_extent[4];
2858 #endif /* _LINUX_EXT3_FS_I */