1 Index: linux-stage/fs/ext3/extents.c
2 ===================================================================
3 --- linux-stage.orig/fs/ext3/extents.c 2005-02-25 15:33:48.890198160 +0200
4 +++ linux-stage/fs/ext3/extents.c 2005-02-25 15:33:48.917194056 +0200
7 + * Copyright(c) 2003, 2004, 2005, Cluster File Systems, Inc, info@clusterfs.com
8 + * Written by Alex Tomas <alex@clusterfs.com>
10 + * This program is free software; you can redistribute it and/or modify
11 + * it under the terms of the GNU General Public License version 2 as
12 + * published by the Free Software Foundation.
14 + * This program is distributed in the hope that it will be useful,
15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 + * GNU General Public License for more details.
19 + * You should have received a copy of the GNU General Public Licens
20 + * along with this program; if not, write to the Free Software
21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
25 + * Extents support for EXT3
28 + * - ext3_ext_walk_space() sould not use ext3_ext_find_extent()
29 + * - ext3_ext_calc_credits() could take 'mergable' into account
30 + * - ext3*_error() should be used in some situations
31 + * - find_goal() [to be tested and improved]
32 + * - smart tree reduction
33 + * - arch-independence
34 + * common on-disk format for big/little-endian arch
37 +#include <linux/module.h>
38 +#include <linux/fs.h>
39 +#include <linux/time.h>
40 +#include <linux/ext3_jbd.h>
41 +#include <linux/jbd.h>
42 +#include <linux/smp_lock.h>
43 +#include <linux/highuid.h>
44 +#include <linux/pagemap.h>
45 +#include <linux/quotaops.h>
46 +#include <linux/string.h>
47 +#include <linux/slab.h>
48 +#include <linux/ext3_extents.h>
49 +#include <asm/uaccess.h>
52 +static inline int ext3_ext_check_header(struct ext3_extent_header *eh)
54 + if (eh->eh_magic != EXT3_EXT_MAGIC) {
55 + printk(KERN_ERR "EXT3-fs: invalid magic = 0x%x\n",
56 + (unsigned)eh->eh_magic);
59 + if (eh->eh_max == 0) {
60 + printk(KERN_ERR "EXT3-fs: invalid eh_max = %u\n",
61 + (unsigned)eh->eh_max);
64 + if (eh->eh_entries > eh->eh_max) {
65 + printk(KERN_ERR "EXT3-fs: invalid eh_entries = %u\n",
66 + (unsigned)eh->eh_entries);
72 +static handle_t *ext3_ext_journal_restart(handle_t *handle, int needed)
76 + if (handle->h_buffer_credits > needed)
78 + if (!ext3_journal_extend(handle, needed))
80 + err = ext3_journal_restart(handle, needed);
86 +ext3_ext_get_access_for_root(handle_t *h, struct ext3_extents_tree *tree)
88 + if (tree->ops->get_write_access)
89 + return tree->ops->get_write_access(h,tree->buffer);
95 +ext3_ext_mark_root_dirty(handle_t *h, struct ext3_extents_tree *tree)
97 + if (tree->ops->mark_buffer_dirty)
98 + return tree->ops->mark_buffer_dirty(h,tree->buffer);
108 +static int ext3_ext_get_access(handle_t *handle,
109 + struct ext3_extents_tree *tree,
110 + struct ext3_ext_path *path)
115 + /* path points to block */
116 + err = ext3_journal_get_write_access(handle, path->p_bh);
118 + /* path points to leaf/index in inode body */
119 + err = ext3_ext_get_access_for_root(handle, tree);
130 +static int ext3_ext_dirty(handle_t *handle, struct ext3_extents_tree *tree,
131 + struct ext3_ext_path *path)
135 + /* path points to block */
136 + err =ext3_journal_dirty_metadata(handle, path->p_bh);
138 + /* path points to leaf/index in inode body */
139 + err = ext3_ext_mark_root_dirty(handle, tree);
145 +ext3_ext_new_block(handle_t *handle, struct ext3_extents_tree *tree,
146 + struct ext3_ext_path *path, struct ext3_extent *ex,
149 + int goal, depth, newblock;
150 + struct inode *inode;
153 + if (tree->ops->new_block)
154 + return tree->ops->new_block(handle, tree, path, ex, err);
156 + inode = tree->inode;
157 + depth = EXT_DEPTH(tree);
158 + if (path && depth > 0) {
159 + goal = path[depth-1].p_block;
161 + struct ext3_inode_info *ei = EXT3_I(inode);
162 + unsigned long bg_start;
163 + unsigned long colour;
165 + bg_start = (ei->i_block_group *
166 + EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
167 + le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
168 + colour = (current->pid % 16) *
169 + (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
170 + goal = bg_start + colour;
173 + newblock = ext3_new_block(handle, inode, goal, err);
177 +static inline void ext3_ext_tree_changed(struct ext3_extents_tree *tree)
179 + struct ext3_extent_header *neh = EXT_ROOT_HDR(tree);
180 + neh->eh_generation = ((EXT_FLAGS(neh) & ~EXT_FLAGS_CLR_UNKNOWN) << 24) |
181 + (EXT_HDR_GEN(neh) + 1);
184 +static inline int ext3_ext_space_block(struct ext3_extents_tree *tree)
188 + size = (tree->inode->i_sb->s_blocksize -
189 + sizeof(struct ext3_extent_header)) /
190 + sizeof(struct ext3_extent);
191 +#ifdef AGRESSIVE_TEST
197 +static inline int ext3_ext_space_block_idx(struct ext3_extents_tree *tree)
201 + size = (tree->inode->i_sb->s_blocksize -
202 + sizeof(struct ext3_extent_header)) /
203 + sizeof(struct ext3_extent_idx);
204 +#ifdef AGRESSIVE_TEST
210 +static inline int ext3_ext_space_root(struct ext3_extents_tree *tree)
214 + size = (tree->buffer_len - sizeof(struct ext3_extent_header)) /
215 + sizeof(struct ext3_extent);
216 +#ifdef AGRESSIVE_TEST
222 +static inline int ext3_ext_space_root_idx(struct ext3_extents_tree *tree)
226 + size = (tree->buffer_len - sizeof(struct ext3_extent_header)) /
227 + sizeof(struct ext3_extent_idx);
228 +#ifdef AGRESSIVE_TEST
234 +static void ext3_ext_show_path(struct ext3_extents_tree *tree,
235 + struct ext3_ext_path *path)
238 + int k, l = path->p_depth;
240 + ext_debug(tree, "path:");
241 + for (k = 0; k <= l; k++, path++) {
243 + ext_debug(tree, " %d->%d", path->p_idx->ei_block,
244 + path->p_idx->ei_leaf);
245 + } else if (path->p_ext) {
246 + ext_debug(tree, " %d:%d:%d",
247 + path->p_ext->ee_block,
248 + path->p_ext->ee_len,
249 + path->p_ext->ee_start);
251 + ext_debug(tree, " []");
253 + ext_debug(tree, "\n");
257 +static void ext3_ext_show_leaf(struct ext3_extents_tree *tree,
258 + struct ext3_ext_path *path)
261 + int depth = EXT_DEPTH(tree);
262 + struct ext3_extent_header *eh;
263 + struct ext3_extent *ex;
269 + eh = path[depth].p_hdr;
270 + ex = EXT_FIRST_EXTENT(eh);
272 + for (i = 0; i < eh->eh_entries; i++, ex++) {
273 + ext_debug(tree, "%d:%d:%d ",
274 + ex->ee_block, ex->ee_len, ex->ee_start);
276 + ext_debug(tree, "\n");
280 +static void ext3_ext_drop_refs(struct ext3_ext_path *path)
282 + int depth = path->p_depth;
285 + for (i = 0; i <= depth; i++, path++) {
287 + brelse(path->p_bh);
294 + * binary search for closest index by given block
297 +ext3_ext_binsearch_idx(struct ext3_extents_tree *tree,
298 + struct ext3_ext_path *path, int block)
300 + struct ext3_extent_header *eh = path->p_hdr;
301 + struct ext3_extent_idx *ix;
304 + EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
305 + EXT_ASSERT(eh->eh_entries <= eh->eh_max);
306 + EXT_ASSERT(eh->eh_entries > 0);
308 + ext_debug(tree, "binsearch for %d(idx): ", block);
310 + path->p_idx = ix = EXT_FIRST_INDEX(eh);
312 + r = k = eh->eh_entries;
315 + if (block < ix[l + k].ei_block)
319 + ext_debug(tree, "%d:%d:%d ", k, l, r);
324 + ext_debug(tree," -> %d->%d ",path->p_idx->ei_block,path->p_idx->ei_leaf);
327 + if (block < ix->ei_block)
329 + path->p_idx = ix++;
331 + ext_debug(tree, " -> %d->%d\n", path->p_idx->ei_block,
332 + path->p_idx->ei_leaf);
334 +#ifdef CHECK_BINSEARCH
336 + struct ext3_extent_idx *chix;
338 + chix = ix = EXT_FIRST_INDEX(eh);
339 + for (k = 0; k < eh->eh_entries; k++, ix++) {
340 + if (k != 0 && ix->ei_block <= ix[-1].ei_block) {
341 + printk("k=%d, ix=0x%p, first=0x%p\n", k,
342 + ix, EXT_FIRST_INDEX(eh));
343 + printk("%u <= %u\n",
344 + ix->ei_block,ix[-1].ei_block);
346 + EXT_ASSERT(k == 0 || ix->ei_block > ix[-1].ei_block);
347 + if (block < ix->ei_block)
351 + EXT_ASSERT(chix == path->p_idx);
357 + * binary search for closest extent by given block
360 +ext3_ext_binsearch(struct ext3_extents_tree *tree,
361 + struct ext3_ext_path *path, int block)
363 + struct ext3_extent_header *eh = path->p_hdr;
364 + struct ext3_extent *ex;
367 + EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
368 + EXT_ASSERT(eh->eh_entries <= eh->eh_max);
370 + if (eh->eh_entries == 0) {
372 + * this leaf is empty yet:
373 + * we get such a leaf in split/add case
378 + ext_debug(tree, "binsearch for %d: ", block);
380 + path->p_ext = ex = EXT_FIRST_EXTENT(eh);
382 + r = k = eh->eh_entries;
385 + if (block < ex[l + k].ee_block)
389 + ext_debug(tree, "%d:%d:%d ", k, l, r);
394 + ext_debug(tree, " -> %d:%d:%d ", path->p_ext->ee_block,
395 + path->p_ext->ee_start, path->p_ext->ee_len);
398 + if (block < ex->ee_block)
400 + path->p_ext = ex++;
402 + ext_debug(tree, " -> %d:%d:%d\n", path->p_ext->ee_block,
403 + path->p_ext->ee_start, path->p_ext->ee_len);
405 +#ifdef CHECK_BINSEARCH
407 + struct ext3_extent *chex;
409 + chex = ex = EXT_FIRST_EXTENT(eh);
410 + for (k = 0; k < eh->eh_entries; k++, ex++) {
411 + EXT_ASSERT(k == 0 || ex->ee_block > ex[-1].ee_block);
412 + if (block < ex->ee_block)
416 + EXT_ASSERT(chex == path->p_ext);
421 +int ext3_extent_tree_init(handle_t *handle, struct ext3_extents_tree *tree)
423 + struct ext3_extent_header *eh;
425 + BUG_ON(tree->buffer_len == 0);
426 + ext3_ext_get_access_for_root(handle, tree);
427 + eh = EXT_ROOT_HDR(tree);
429 + eh->eh_entries = 0;
430 + eh->eh_magic = EXT3_EXT_MAGIC;
431 + eh->eh_max = ext3_ext_space_root(tree);
432 + ext3_ext_mark_root_dirty(handle, tree);
433 + ext3_ext_invalidate_cache(tree);
437 +struct ext3_ext_path *
438 +ext3_ext_find_extent(struct ext3_extents_tree *tree, int block,
439 + struct ext3_ext_path *path)
441 + struct ext3_extent_header *eh;
442 + struct buffer_head *bh;
443 + int depth, i, ppos = 0;
446 + EXT_ASSERT(tree->inode);
447 + EXT_ASSERT(tree->root);
449 + eh = EXT_ROOT_HDR(tree);
451 + if (ext3_ext_check_header(eh)) {
452 + /* don't free previously allocated path
453 + * -- caller should take care */
458 + i = depth = EXT_DEPTH(tree);
459 + EXT_ASSERT(eh->eh_max);
460 + EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
462 + /* account possible depth increase */
464 + path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 2),
467 + return ERR_PTR(-ENOMEM);
469 + memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
470 + path[0].p_hdr = eh;
472 + /* walk through the tree */
474 + ext_debug(tree, "depth %d: num %d, max %d\n",
475 + ppos, eh->eh_entries, eh->eh_max);
476 + ext3_ext_binsearch_idx(tree, path + ppos, block);
477 + path[ppos].p_block = path[ppos].p_idx->ei_leaf;
478 + path[ppos].p_depth = i;
479 + path[ppos].p_ext = NULL;
481 + bh = sb_bread(tree->inode->i_sb, path[ppos].p_block);
485 + eh = EXT_BLOCK_HDR(bh);
487 + EXT_ASSERT(ppos <= depth);
488 + path[ppos].p_bh = bh;
489 + path[ppos].p_hdr = eh;
492 + if (ext3_ext_check_header(eh))
496 + path[ppos].p_depth = i;
497 + path[ppos].p_hdr = eh;
498 + path[ppos].p_ext = NULL;
499 + path[ppos].p_idx = NULL;
501 + if (ext3_ext_check_header(eh))
505 + ext3_ext_binsearch(tree, path + ppos, block);
507 + ext3_ext_show_path(tree, path);
512 + printk(KERN_ERR "EXT3-fs: header is corrupted!\n");
514 + ext3_ext_drop_refs(path);
517 + return ERR_PTR(-EIO);
521 + * insert new index [logical;ptr] into the block at cupr
522 + * it check where to insert: before curp or after curp
524 +static int ext3_ext_insert_index(handle_t *handle,
525 + struct ext3_extents_tree *tree,
526 + struct ext3_ext_path *curp,
527 + int logical, int ptr)
529 + struct ext3_extent_idx *ix;
532 + if ((err = ext3_ext_get_access(handle, tree, curp)))
535 + EXT_ASSERT(logical != curp->p_idx->ei_block);
536 + len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
537 + if (logical > curp->p_idx->ei_block) {
539 + if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
540 + len = (len - 1) * sizeof(struct ext3_extent_idx);
541 + len = len < 0 ? 0 : len;
542 + ext_debug(tree, "insert new index %d after: %d. "
543 + "move %d from 0x%p to 0x%p\n",
545 + (curp->p_idx + 1), (curp->p_idx + 2));
546 + memmove(curp->p_idx + 2, curp->p_idx + 1, len);
548 + ix = curp->p_idx + 1;
550 + /* insert before */
551 + len = len * sizeof(struct ext3_extent_idx);
552 + len = len < 0 ? 0 : len;
553 + ext_debug(tree, "insert new index %d before: %d. "
554 + "move %d from 0x%p to 0x%p\n",
556 + curp->p_idx, (curp->p_idx + 1));
557 + memmove(curp->p_idx + 1, curp->p_idx, len);
561 + ix->ei_block = logical;
563 + ix->ei_leaf_hi = ix->ei_unused = 0;
564 + curp->p_hdr->eh_entries++;
566 + EXT_ASSERT(curp->p_hdr->eh_entries <= curp->p_hdr->eh_max);
567 + EXT_ASSERT(ix <= EXT_LAST_INDEX(curp->p_hdr));
569 + err = ext3_ext_dirty(handle, tree, curp);
570 + ext3_std_error(tree->inode->i_sb, err);
576 + * routine inserts new subtree into the path, using free index entry
578 + * - allocates all needed blocks (new leaf and all intermediate index blocks)
579 + * - makes decision where to split
580 + * - moves remaining extens and index entries (right to the split point)
581 + * into the newly allocated blocks
582 + * - initialize subtree
584 +static int ext3_ext_split(handle_t *handle, struct ext3_extents_tree *tree,
585 + struct ext3_ext_path *path,
586 + struct ext3_extent *newext, int at)
588 + struct buffer_head *bh = NULL;
589 + int depth = EXT_DEPTH(tree);
590 + struct ext3_extent_header *neh;
591 + struct ext3_extent_idx *fidx;
592 + struct ext3_extent *ex;
593 + int i = at, k, m, a;
594 + unsigned long newblock, oldblock, border;
595 + int *ablocks = NULL; /* array of allocated blocks */
598 + /* make decision: where to split? */
599 + /* FIXME: now desicion is simplest: at current extent */
601 + /* if current leaf will be splitted, then we should use
602 + * border from split point */
603 + EXT_ASSERT(path[depth].p_ext <= EXT_MAX_EXTENT(path[depth].p_hdr));
604 + if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
605 + border = path[depth].p_ext[1].ee_block;
606 + ext_debug(tree, "leaf will be splitted."
607 + " next leaf starts at %d\n",
610 + border = newext->ee_block;
611 + ext_debug(tree, "leaf will be added."
612 + " next leaf starts at %d\n",
617 + * if error occurs, then we break processing
618 + * and turn filesystem read-only. so, index won't
619 + * be inserted and tree will be in consistent
620 + * state. next mount will repair buffers too
624 + * get array to track all allocated blocks
625 + * we need this to handle errors and free blocks
628 + ablocks = kmalloc(sizeof(unsigned long) * depth, GFP_NOFS);
631 + memset(ablocks, 0, sizeof(unsigned long) * depth);
633 + /* allocate all needed blocks */
634 + ext_debug(tree, "allocate %d blocks for indexes/leaf\n", depth - at);
635 + for (a = 0; a < depth - at; a++) {
636 + newblock = ext3_ext_new_block(handle, tree, path, newext, &err);
639 + ablocks[a] = newblock;
642 + /* initialize new leaf */
643 + newblock = ablocks[--a];
644 + EXT_ASSERT(newblock);
645 + bh = sb_getblk(tree->inode->i_sb, newblock);
652 + if ((err = ext3_journal_get_create_access(handle, bh)))
655 + neh = EXT_BLOCK_HDR(bh);
656 + neh->eh_entries = 0;
657 + neh->eh_max = ext3_ext_space_block(tree);
658 + neh->eh_magic = EXT3_EXT_MAGIC;
660 + ex = EXT_FIRST_EXTENT(neh);
662 + /* move remain of path[depth] to the new leaf */
663 + EXT_ASSERT(path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max);
664 + /* start copy from next extent */
665 + /* TODO: we could do it by single memmove */
667 + path[depth].p_ext++;
668 + while (path[depth].p_ext <=
669 + EXT_MAX_EXTENT(path[depth].p_hdr)) {
670 + ext_debug(tree, "move %d:%d:%d in new leaf %lu\n",
671 + path[depth].p_ext->ee_block,
672 + path[depth].p_ext->ee_start,
673 + path[depth].p_ext->ee_len,
675 + memmove(ex++, path[depth].p_ext++, sizeof(struct ext3_extent));
679 + set_buffer_uptodate(bh);
682 + if ((err = ext3_journal_dirty_metadata(handle, bh)))
687 + /* correct old leaf */
689 + if ((err = ext3_ext_get_access(handle, tree, path + depth)))
691 + path[depth].p_hdr->eh_entries -= m;
692 + if ((err = ext3_ext_dirty(handle, tree, path + depth)))
697 + /* create intermediate indexes */
698 + k = depth - at - 1;
699 + EXT_ASSERT(k >= 0);
701 + ext_debug(tree, "create %d intermediate indices\n", k);
702 + /* insert new index into current index block */
703 + /* current depth stored in i var */
706 + oldblock = newblock;
707 + newblock = ablocks[--a];
708 + bh = sb_getblk(tree->inode->i_sb, newblock);
715 + if ((err = ext3_journal_get_create_access(handle, bh)))
718 + neh = EXT_BLOCK_HDR(bh);
719 + neh->eh_entries = 1;
720 + neh->eh_magic = EXT3_EXT_MAGIC;
721 + neh->eh_max = ext3_ext_space_block_idx(tree);
722 + neh->eh_depth = depth - i;
723 + fidx = EXT_FIRST_INDEX(neh);
724 + fidx->ei_block = border;
725 + fidx->ei_leaf = oldblock;
726 + fidx->ei_leaf_hi = fidx->ei_unused = 0;
728 + ext_debug(tree, "int.index at %d (block %lu): %lu -> %lu\n",
729 + i, newblock, border, oldblock);
734 + ext_debug(tree, "cur 0x%p, last 0x%p\n", path[i].p_idx,
735 + EXT_MAX_INDEX(path[i].p_hdr));
736 + EXT_ASSERT(EXT_MAX_INDEX(path[i].p_hdr) ==
737 + EXT_LAST_INDEX(path[i].p_hdr));
738 + while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
739 + ext_debug(tree, "%d: move %d:%d in new index %lu\n",
740 + i, path[i].p_idx->ei_block,
741 + path[i].p_idx->ei_leaf, newblock);
742 + memmove(++fidx, path[i].p_idx++,
743 + sizeof(struct ext3_extent_idx));
745 + EXT_ASSERT(neh->eh_entries <= neh->eh_max);
748 + set_buffer_uptodate(bh);
751 + if ((err = ext3_journal_dirty_metadata(handle, bh)))
756 + /* correct old index */
758 + err = ext3_ext_get_access(handle, tree, path + i);
761 + path[i].p_hdr->eh_entries -= m;
762 + err = ext3_ext_dirty(handle, tree, path + i);
770 + /* insert new index */
772 + err = ext3_ext_insert_index(handle, tree, path + at,
777 + if (buffer_locked(bh))
783 + /* free all allocated blocks in error case */
784 + for (i = 0; i < depth; i++) {
787 + ext3_free_blocks(handle, tree->inode, ablocks[i], 1);
796 + * routine implements tree growing procedure:
797 + * - allocates new block
798 + * - moves top-level data (index block or leaf) into the new block
799 + * - initialize new top-level, creating index that points to the
800 + * just created block
802 +static int ext3_ext_grow_indepth(handle_t *handle,
803 + struct ext3_extents_tree *tree,
804 + struct ext3_ext_path *path,
805 + struct ext3_extent *newext)
807 + struct ext3_ext_path *curp = path;
808 + struct ext3_extent_header *neh;
809 + struct ext3_extent_idx *fidx;
810 + struct buffer_head *bh;
811 + unsigned long newblock;
814 + newblock = ext3_ext_new_block(handle, tree, path, newext, &err);
818 + bh = sb_getblk(tree->inode->i_sb, newblock);
821 + ext3_std_error(tree->inode->i_sb, err);
826 + if ((err = ext3_journal_get_create_access(handle, bh))) {
831 + /* move top-level index/leaf into new block */
832 + memmove(bh->b_data, curp->p_hdr, tree->buffer_len);
834 + /* set size of new block */
835 + neh = EXT_BLOCK_HDR(bh);
836 + /* old root could have indexes or leaves
837 + * so calculate eh_max right way */
838 + if (EXT_DEPTH(tree))
839 + neh->eh_max = ext3_ext_space_block_idx(tree);
841 + neh->eh_max = ext3_ext_space_block(tree);
842 + neh->eh_magic = EXT3_EXT_MAGIC;
843 + set_buffer_uptodate(bh);
846 + if ((err = ext3_journal_dirty_metadata(handle, bh)))
849 + /* create index in new top-level index: num,max,pointer */
850 + if ((err = ext3_ext_get_access(handle, tree, curp)))
853 + curp->p_hdr->eh_magic = EXT3_EXT_MAGIC;
854 + curp->p_hdr->eh_max = ext3_ext_space_root_idx(tree);
855 + curp->p_hdr->eh_entries = 1;
856 + curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
857 + /* FIXME: it works, but actually path[0] can be index */
858 + curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
859 + curp->p_idx->ei_leaf = newblock;
860 + curp->p_idx->ei_leaf_hi = curp->p_idx->ei_unused = 0;
862 + neh = EXT_ROOT_HDR(tree);
863 + fidx = EXT_FIRST_INDEX(neh);
864 + ext_debug(tree, "new root: num %d(%d), lblock %d, ptr %d\n",
865 + neh->eh_entries, neh->eh_max, fidx->ei_block, fidx->ei_leaf);
867 + neh->eh_depth = path->p_depth + 1;
868 + err = ext3_ext_dirty(handle, tree, curp);
876 + * routine finds empty index and adds new leaf. if no free index found
877 + * then it requests in-depth growing
879 +static int ext3_ext_create_new_leaf(handle_t *handle,
880 + struct ext3_extents_tree *tree,
881 + struct ext3_ext_path *path,
882 + struct ext3_extent *newext)
884 + struct ext3_ext_path *curp;
885 + int depth, i, err = 0;
888 + i = depth = EXT_DEPTH(tree);
890 + /* walk up to the tree and look for free index entry */
891 + curp = path + depth;
892 + while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
897 + /* we use already allocated block for index block
898 + * so, subsequent data blocks should be contigoues */
899 + if (EXT_HAS_FREE_INDEX(curp)) {
900 + /* if we found index with free entry, then use that
901 + * entry: create all needed subtree and add new leaf */
902 + err = ext3_ext_split(handle, tree, path, newext, i);
905 + ext3_ext_drop_refs(path);
906 + path = ext3_ext_find_extent(tree, newext->ee_block, path);
908 + err = PTR_ERR(path);
910 + /* tree is full, time to grow in depth */
911 + err = ext3_ext_grow_indepth(handle, tree, path, newext);
914 + ext3_ext_drop_refs(path);
915 + path = ext3_ext_find_extent(tree, newext->ee_block, path);
917 + err = PTR_ERR(path);
920 + * only first (depth 0 -> 1) produces free space
921 + * in all other cases we have to split growed tree
923 + depth = EXT_DEPTH(tree);
924 + if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
925 + /* now we need split */
937 + * returns allocated block in subsequent extent or EXT_MAX_BLOCK
938 + * NOTE: it consider block number from index entry as
939 + * allocated block. thus, index entries have to be consistent
942 +static unsigned long
943 +ext3_ext_next_allocated_block(struct ext3_ext_path *path)
947 + EXT_ASSERT(path != NULL);
948 + depth = path->p_depth;
950 + if (depth == 0 && path->p_ext == NULL)
951 + return EXT_MAX_BLOCK;
953 + /* FIXME: what if index isn't full ?! */
954 + while (depth >= 0) {
955 + if (depth == path->p_depth) {
957 + if (path[depth].p_ext !=
958 + EXT_LAST_EXTENT(path[depth].p_hdr))
959 + return path[depth].p_ext[1].ee_block;
962 + if (path[depth].p_idx !=
963 + EXT_LAST_INDEX(path[depth].p_hdr))
964 + return path[depth].p_idx[1].ei_block;
969 + return EXT_MAX_BLOCK;
973 + * returns first allocated block from next leaf or EXT_MAX_BLOCK
975 +static unsigned ext3_ext_next_leaf_block(struct ext3_extents_tree *tree,
976 + struct ext3_ext_path *path)
980 + EXT_ASSERT(path != NULL);
981 + depth = path->p_depth;
983 + /* zero-tree has no leaf blocks at all */
985 + return EXT_MAX_BLOCK;
987 + /* go to index block */
990 + while (depth >= 0) {
991 + if (path[depth].p_idx !=
992 + EXT_LAST_INDEX(path[depth].p_hdr))
993 + return path[depth].p_idx[1].ei_block;
997 + return EXT_MAX_BLOCK;
1001 + * if leaf gets modified and modified extent is first in the leaf
1002 + * then we have to correct all indexes above
1003 + * TODO: do we need to correct tree in all cases?
1005 +int ext3_ext_correct_indexes(handle_t *handle, struct ext3_extents_tree *tree,
1006 + struct ext3_ext_path *path)
1008 + struct ext3_extent_header *eh;
1009 + int depth = EXT_DEPTH(tree);
1010 + struct ext3_extent *ex;
1011 + unsigned long border;
1014 + eh = path[depth].p_hdr;
1015 + ex = path[depth].p_ext;
1020 + /* there is no tree at all */
1024 + if (ex != EXT_FIRST_EXTENT(eh)) {
1025 + /* we correct tree if first leaf got modified only */
1030 + * TODO: we need correction if border is smaller then current one
1033 + border = path[depth].p_ext->ee_block;
1034 + if ((err = ext3_ext_get_access(handle, tree, path + k)))
1036 + path[k].p_idx->ei_block = border;
1037 + if ((err = ext3_ext_dirty(handle, tree, path + k)))
1041 + /* change all left-side indexes */
1042 + if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1044 + if ((err = ext3_ext_get_access(handle, tree, path + k)))
1046 + path[k].p_idx->ei_block = border;
1047 + if ((err = ext3_ext_dirty(handle, tree, path + k)))
1055 +ext3_can_extents_be_merged(struct ext3_extents_tree *tree,
1056 + struct ext3_extent *ex1,
1057 + struct ext3_extent *ex2)
1059 + if (ex1->ee_block + ex1->ee_len != ex2->ee_block)
1062 +#ifdef AGRESSIVE_TEST
1063 + if (ex1->ee_len >= 4)
1067 + if (!tree->ops->mergable)
1070 + return tree->ops->mergable(ex1, ex2);
1074 + * this routine tries to merge requsted extent into the existing
1075 + * extent or inserts requested extent as new one into the tree,
1076 + * creating new leaf in no-space case
1078 +int ext3_ext_insert_extent(handle_t *handle, struct ext3_extents_tree *tree,
1079 + struct ext3_ext_path *path,
1080 + struct ext3_extent *newext)
1082 + struct ext3_extent_header * eh;
1083 + struct ext3_extent *ex, *fex;
1084 + struct ext3_extent *nearex; /* nearest extent */
1085 + struct ext3_ext_path *npath = NULL;
1086 + int depth, len, err, next;
1088 + EXT_ASSERT(newext->ee_len > 0);
1089 + depth = EXT_DEPTH(tree);
1090 + ex = path[depth].p_ext;
1091 + EXT_ASSERT(path[depth].p_hdr);
1093 + /* try to insert block into found extent and return */
1094 + if (ex && ext3_can_extents_be_merged(tree, ex, newext)) {
1095 + ext_debug(tree, "append %d block to %d:%d (from %d)\n",
1096 + newext->ee_len, ex->ee_block, ex->ee_len,
1098 + if ((err = ext3_ext_get_access(handle, tree, path + depth)))
1100 + ex->ee_len += newext->ee_len;
1101 + eh = path[depth].p_hdr;
1107 + depth = EXT_DEPTH(tree);
1108 + eh = path[depth].p_hdr;
1109 + if (eh->eh_entries < eh->eh_max)
1112 + /* probably next leaf has space for us? */
1113 + fex = EXT_LAST_EXTENT(eh);
1114 + next = ext3_ext_next_leaf_block(tree, path);
1115 + if (newext->ee_block > fex->ee_block && next != EXT_MAX_BLOCK) {
1116 + ext_debug(tree, "next leaf block - %d\n", next);
1117 + EXT_ASSERT(!npath);
1118 + npath = ext3_ext_find_extent(tree, next, NULL);
1119 + if (IS_ERR(npath))
1120 + return PTR_ERR(npath);
1121 + EXT_ASSERT(npath->p_depth == path->p_depth);
1122 + eh = npath[depth].p_hdr;
1123 + if (eh->eh_entries < eh->eh_max) {
1124 + ext_debug(tree, "next leaf isnt full(%d)\n",
1129 + ext_debug(tree, "next leaf hasno free space(%d,%d)\n",
1130 + eh->eh_entries, eh->eh_max);
1134 + * there is no free space in found leaf
1135 + * we're gonna add new leaf in the tree
1137 + err = ext3_ext_create_new_leaf(handle, tree, path, newext);
1140 + depth = EXT_DEPTH(tree);
1141 + eh = path[depth].p_hdr;
1144 + nearex = path[depth].p_ext;
1146 + if ((err = ext3_ext_get_access(handle, tree, path + depth)))
1150 + /* there is no extent in this leaf, create first one */
1151 + ext_debug(tree, "first extent in the leaf: %d:%d:%d\n",
1152 + newext->ee_block, newext->ee_start,
1154 + path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1155 + } else if (newext->ee_block > nearex->ee_block) {
1156 + EXT_ASSERT(newext->ee_block != nearex->ee_block);
1157 + if (nearex != EXT_LAST_EXTENT(eh)) {
1158 + len = EXT_MAX_EXTENT(eh) - nearex;
1159 + len = (len - 1) * sizeof(struct ext3_extent);
1160 + len = len < 0 ? 0 : len;
1161 + ext_debug(tree, "insert %d:%d:%d after: nearest 0x%p, "
1162 + "move %d from 0x%p to 0x%p\n",
1163 + newext->ee_block, newext->ee_start,
1165 + nearex, len, nearex + 1, nearex + 2);
1166 + memmove(nearex + 2, nearex + 1, len);
1168 + path[depth].p_ext = nearex + 1;
1170 + EXT_ASSERT(newext->ee_block != nearex->ee_block);
1171 + len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext3_extent);
1172 + len = len < 0 ? 0 : len;
1173 + ext_debug(tree, "insert %d:%d:%d before: nearest 0x%p, "
1174 + "move %d from 0x%p to 0x%p\n",
1175 + newext->ee_block, newext->ee_start, newext->ee_len,
1176 + nearex, len, nearex + 1, nearex + 2);
1177 + memmove(nearex + 1, nearex, len);
1178 + path[depth].p_ext = nearex;
1182 + nearex = path[depth].p_ext;
1183 + nearex->ee_block = newext->ee_block;
1184 + nearex->ee_start = newext->ee_start;
1185 + nearex->ee_len = newext->ee_len;
1186 + /* FIXME: support for large fs */
1187 + nearex->ee_start_hi = 0;
1190 + /* try to merge extents to the right */
1191 + while (nearex < EXT_LAST_EXTENT(eh)) {
1192 + if (!ext3_can_extents_be_merged(tree, nearex, nearex + 1))
1194 + /* merge with next extent! */
1195 + nearex->ee_len += nearex[1].ee_len;
1196 + if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
1197 + len = (EXT_LAST_EXTENT(eh) - nearex - 1) *
1198 + sizeof(struct ext3_extent);
1199 + memmove(nearex + 1, nearex + 2, len);
1202 + EXT_ASSERT(eh->eh_entries > 0);
1205 + /* try to merge extents to the left */
1207 + /* time to correct all indexes above */
1208 + err = ext3_ext_correct_indexes(handle, tree, path);
1212 + err = ext3_ext_dirty(handle, tree, path + depth);
1216 + ext3_ext_drop_refs(npath);
1219 + ext3_ext_tree_changed(tree);
1220 + ext3_ext_invalidate_cache(tree);
1224 +int ext3_ext_walk_space(struct ext3_extents_tree *tree, unsigned long block,
1225 + unsigned long num, ext_prepare_callback func)
1227 + struct ext3_ext_path *path = NULL;
1228 + struct ext3_ext_cache cbex;
1229 + struct ext3_extent *ex;
1230 + unsigned long next, start = 0, end = 0;
1231 + unsigned long last = block + num;
1232 + int depth, exists, err = 0;
1236 + EXT_ASSERT(tree->inode);
1237 + EXT_ASSERT(tree->root);
1239 + while (block < last && block != EXT_MAX_BLOCK) {
1240 + num = last - block;
1241 + /* find extent for this block */
1242 + path = ext3_ext_find_extent(tree, block, path);
1243 + if (IS_ERR(path)) {
1244 + err = PTR_ERR(path);
1249 + depth = EXT_DEPTH(tree);
1250 + EXT_ASSERT(path[depth].p_hdr);
1251 + ex = path[depth].p_ext;
1252 + next = ext3_ext_next_allocated_block(path);
1256 + /* there is no extent yet, so try to allocate
1257 + * all requested space */
1259 + end = block + num;
1260 + } else if (ex->ee_block > block) {
1261 + /* need to allocate space before found extent */
1263 + end = ex->ee_block;
1264 + if (block + num < end)
1265 + end = block + num;
1266 + } else if (block >= ex->ee_block + ex->ee_len) {
1267 + /* need to allocate space after found extent */
1269 + end = block + num;
1272 + } else if (block >= ex->ee_block) {
1274 + * some part of requested space is covered
1278 + end = ex->ee_block + ex->ee_len;
1279 + if (block + num < end)
1280 + end = block + num;
1285 + EXT_ASSERT(end > start);
1288 + cbex.ec_block = start;
1289 + cbex.ec_len = end - start;
1290 + cbex.ec_start = 0;
1291 + cbex.ec_type = EXT3_EXT_CACHE_GAP;
1293 + cbex.ec_block = ex->ee_block;
1294 + cbex.ec_len = ex->ee_len;
1295 + cbex.ec_start = ex->ee_start;
1296 + cbex.ec_type = EXT3_EXT_CACHE_EXTENT;
1299 + EXT_ASSERT(cbex.ec_len > 0);
1300 + EXT_ASSERT(path[depth].p_hdr);
1301 + err = func(tree, path, &cbex);
1302 + ext3_ext_drop_refs(path);
1306 + if (err == EXT_REPEAT)
1308 + else if (err == EXT_BREAK) {
1313 + if (EXT_DEPTH(tree) != depth) {
1314 + /* depth was changed. we have to realloc path */
1319 + block = cbex.ec_block + cbex.ec_len;
1323 + ext3_ext_drop_refs(path);
1331 +ext3_ext_put_in_cache(struct ext3_extents_tree *tree, __u32 block,
1332 + __u32 len, __u32 start, int type)
1334 + EXT_ASSERT(len > 0);
1336 + tree->cex->ec_type = type;
1337 + tree->cex->ec_block = block;
1338 + tree->cex->ec_len = len;
1339 + tree->cex->ec_start = start;
1344 + * this routine calculate boundaries of the gap requested block fits into
1345 + * and cache this gap
1348 +ext3_ext_put_gap_in_cache(struct ext3_extents_tree *tree,
1349 + struct ext3_ext_path *path,
1350 + unsigned long block)
1352 + int depth = EXT_DEPTH(tree);
1353 + unsigned long lblock, len;
1354 + struct ext3_extent *ex;
1359 + ex = path[depth].p_ext;
1361 + /* there is no extent yet, so gap is [0;-] */
1363 + len = EXT_MAX_BLOCK;
1364 + ext_debug(tree, "cache gap(whole file):");
1365 + } else if (block < ex->ee_block) {
1367 + len = ex->ee_block - block;
1368 + ext_debug(tree, "cache gap(before): %lu [%lu:%lu]",
1369 + (unsigned long) block,
1370 + (unsigned long) ex->ee_block,
1371 + (unsigned long) ex->ee_len);
1372 + } else if (block >= ex->ee_block + ex->ee_len) {
1373 + lblock = ex->ee_block + ex->ee_len;
1374 + len = ext3_ext_next_allocated_block(path);
1375 + ext_debug(tree, "cache gap(after): [%lu:%lu] %lu",
1376 + (unsigned long) ex->ee_block,
1377 + (unsigned long) ex->ee_len,
1378 + (unsigned long) block);
1379 + EXT_ASSERT(len > lblock);
1380 + len = len - lblock;
1386 + ext_debug(tree, " -> %lu:%lu\n", (unsigned long) lblock, len);
1387 + ext3_ext_put_in_cache(tree, lblock, len, 0, EXT3_EXT_CACHE_GAP);
1391 +ext3_ext_in_cache(struct ext3_extents_tree *tree, unsigned long block,
1392 + struct ext3_extent *ex)
1394 + struct ext3_ext_cache *cex = tree->cex;
1396 + /* is there cache storage at all? */
1398 + return EXT3_EXT_CACHE_NO;
1400 + /* has cache valid data? */
1401 + if (cex->ec_type == EXT3_EXT_CACHE_NO)
1402 + return EXT3_EXT_CACHE_NO;
1404 + EXT_ASSERT(cex->ec_type == EXT3_EXT_CACHE_GAP ||
1405 + cex->ec_type == EXT3_EXT_CACHE_EXTENT);
1406 + if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
1407 + ex->ee_block = cex->ec_block;
1408 + ex->ee_start = cex->ec_start;
1409 + ex->ee_start_hi = 0;
1410 + ex->ee_len = cex->ec_len;
1411 + ext_debug(tree, "%lu cached by %lu:%lu:%lu\n",
1412 + (unsigned long) block,
1413 + (unsigned long) ex->ee_block,
1414 + (unsigned long) ex->ee_len,
1415 + (unsigned long) ex->ee_start);
1416 + return cex->ec_type;
1419 + /* not in cache */
1420 + return EXT3_EXT_CACHE_NO;
1424 + * routine removes index from the index block
1425 + * it's used in truncate case only. thus all requests are for
1426 + * last index in the block only
1428 +int ext3_ext_rm_idx(handle_t *handle, struct ext3_extents_tree *tree,
1429 + struct ext3_ext_path *path)
1431 + struct buffer_head *bh;
1434 + /* free index block */
1436 + EXT_ASSERT(path->p_hdr->eh_entries);
1437 + if ((err = ext3_ext_get_access(handle, tree, path)))
1439 + path->p_hdr->eh_entries--;
1440 + if ((err = ext3_ext_dirty(handle, tree, path)))
1442 + ext_debug(tree, "index is empty, remove it, free block %d\n",
1443 + path->p_idx->ei_leaf);
1444 + bh = sb_find_get_block(tree->inode->i_sb, path->p_idx->ei_leaf);
1445 + ext3_forget(handle, 1, tree->inode, bh, path->p_idx->ei_leaf);
1446 + ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1);
1450 +int ext3_ext_calc_credits_for_insert(struct ext3_extents_tree *tree,
1451 + struct ext3_ext_path *path)
1453 + int depth = EXT_DEPTH(tree);
1457 + /* probably there is space in leaf? */
1458 + if (path[depth].p_hdr->eh_entries < path[depth].p_hdr->eh_max)
1463 + * the worste case we're expecting is creation of the
1464 + * new root (growing in depth) with index splitting
1465 + * for splitting we have to consider depth + 1 because
1466 + * previous growing could increase it
1468 + depth = depth + 1;
1471 + * growing in depth:
1472 + * block allocation + new root + old root
1474 + needed = EXT3_ALLOC_NEEDED + 2;
1476 + /* index split. we may need:
1477 + * allocate intermediate indexes and new leaf
1478 + * change two blocks at each level, but root
1479 + * modify root block (inode)
1481 + needed += (depth * EXT3_ALLOC_NEEDED) + (2 * depth) + 1;
1487 +ext3_ext_split_for_rm(handle_t *handle, struct ext3_extents_tree *tree,
1488 + struct ext3_ext_path *path, unsigned long start,
1489 + unsigned long end)
1491 + struct ext3_extent *ex, tex;
1492 + struct ext3_ext_path *npath;
1493 + int depth, creds, err;
1495 + depth = EXT_DEPTH(tree);
1496 + ex = path[depth].p_ext;
1498 + EXT_ASSERT(end < ex->ee_block + ex->ee_len - 1);
1499 + EXT_ASSERT(ex->ee_block < start);
1501 + /* calculate tail extent */
1502 + tex.ee_block = end + 1;
1503 + EXT_ASSERT(tex.ee_block < ex->ee_block + ex->ee_len);
1504 + tex.ee_len = ex->ee_block + ex->ee_len - tex.ee_block;
1506 + creds = ext3_ext_calc_credits_for_insert(tree, path);
1507 + handle = ext3_ext_journal_restart(handle, creds);
1508 + if (IS_ERR(handle))
1509 + return PTR_ERR(handle);
1511 + /* calculate head extent. use primary extent */
1512 + err = ext3_ext_get_access(handle, tree, path + depth);
1515 + ex->ee_len = start - ex->ee_block;
1516 + err = ext3_ext_dirty(handle, tree, path + depth);
1520 + /* FIXME: some callback to free underlying resource
1521 + * and correct ee_start? */
1522 + ext_debug(tree, "split extent: head %u:%u, tail %u:%u\n",
1523 + ex->ee_block, ex->ee_len, tex.ee_block, tex.ee_len);
1525 + npath = ext3_ext_find_extent(tree, ex->ee_block, NULL);
1526 + if (IS_ERR(npath))
1527 + return PTR_ERR(npath);
1528 + depth = EXT_DEPTH(tree);
1529 + EXT_ASSERT(npath[depth].p_ext->ee_block == ex->ee_block);
1530 + EXT_ASSERT(npath[depth].p_ext->ee_len == ex->ee_len);
1532 + err = ext3_ext_insert_extent(handle, tree, npath, &tex);
1533 + ext3_ext_drop_refs(npath);
1540 +ext3_ext_rm_leaf(handle_t *handle, struct ext3_extents_tree *tree,
1541 + struct ext3_ext_path *path, unsigned long start,
1542 + unsigned long end)
1544 + struct ext3_extent *ex, *fu = NULL, *lu, *le;
1545 + int err = 0, correct_index = 0;
1546 + int depth = EXT_DEPTH(tree), credits;
1547 + struct ext3_extent_header *eh;
1548 + unsigned a, b, block, num;
1550 + ext_debug(tree, "remove [%lu:%lu] in leaf\n", start, end);
1551 + if (!path[depth].p_hdr)
1552 + path[depth].p_hdr = EXT_BLOCK_HDR(path[depth].p_bh);
1553 + eh = path[depth].p_hdr;
1555 + EXT_ASSERT(eh->eh_entries <= eh->eh_max);
1556 + EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
1558 + /* find where to start removing */
1559 + le = ex = EXT_LAST_EXTENT(eh);
1560 + while (ex != EXT_FIRST_EXTENT(eh)) {
1561 + if (ex->ee_block <= end)
1566 + if (start > ex->ee_block && end < ex->ee_block + ex->ee_len - 1) {
1567 + /* removal of internal part of the extent requested
1568 + * tail and head must be placed in different extent
1569 + * so, we have to insert one more extent */
1570 + path[depth].p_ext = ex;
1571 + return ext3_ext_split_for_rm(handle, tree, path, start, end);
1575 + while (ex >= EXT_FIRST_EXTENT(eh) && ex->ee_block + ex->ee_len > start) {
1576 + ext_debug(tree, "remove ext %u:%u\n", ex->ee_block, ex->ee_len);
1577 + path[depth].p_ext = ex;
1579 + a = ex->ee_block > start ? ex->ee_block : start;
1580 + b = ex->ee_block + ex->ee_len - 1 < end ?
1581 + ex->ee_block + ex->ee_len - 1 : end;
1583 + ext_debug(tree, " border %u:%u\n", a, b);
1585 + if (a != ex->ee_block && b != ex->ee_block + ex->ee_len - 1) {
1589 + } else if (a != ex->ee_block) {
1590 + /* remove tail of the extent */
1591 + block = ex->ee_block;
1593 + } else if (b != ex->ee_block + ex->ee_len - 1) {
1594 + /* remove head of the extent */
1598 + /* remove whole extent: excelent! */
1599 + block = ex->ee_block;
1601 + EXT_ASSERT(a == ex->ee_block &&
1602 + b == ex->ee_block + ex->ee_len - 1);
1605 + if (ex == EXT_FIRST_EXTENT(eh))
1606 + correct_index = 1;
1609 + if (correct_index)
1610 + credits += (EXT_DEPTH(tree) * EXT3_ALLOC_NEEDED) + 1;
1611 + if (tree->ops->remove_extent_credits)
1612 + credits+=tree->ops->remove_extent_credits(tree,ex,a,b);
1614 + handle = ext3_ext_journal_restart(handle, credits);
1615 + if (IS_ERR(handle)) {
1616 + err = PTR_ERR(handle);
1620 + err = ext3_ext_get_access(handle, tree, path + depth);
1624 + if (tree->ops->remove_extent)
1625 + err = tree->ops->remove_extent(tree, ex, a, b);
1630 + /* this extent is removed entirely mark slot unused */
1631 + ex->ee_start = ex->ee_start_hi = 0;
1636 + ex->ee_block = block;
1639 + err = ext3_ext_dirty(handle, tree, path + depth);
1643 + ext_debug(tree, "new extent: %u:%u:%u\n",
1644 + ex->ee_block, ex->ee_len, ex->ee_start);
1649 + /* reuse unused slots */
1651 + if (lu->ee_start) {
1653 + lu->ee_start = lu->ee_start_hi = 0;
1660 + if (correct_index && eh->eh_entries)
1661 + err = ext3_ext_correct_indexes(handle, tree, path);
1663 + /* if this leaf is free, then we should
1664 + * remove it from index block above */
1665 + if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
1666 + err = ext3_ext_rm_idx(handle, tree, path + depth);
1673 +static struct ext3_extent_idx *
1674 +ext3_ext_last_covered(struct ext3_extent_header *hdr, unsigned long block)
1676 + struct ext3_extent_idx *ix;
1678 + ix = EXT_LAST_INDEX(hdr);
1679 + while (ix != EXT_FIRST_INDEX(hdr)) {
1680 + if (ix->ei_block <= block)
1688 + * returns 1 if current index have to be freed (even partial)
1691 +ext3_ext_more_to_rm(struct ext3_ext_path *path)
1693 + EXT_ASSERT(path->p_idx);
1695 + if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
1699 + * if truncate on deeper level happened it it wasn't partial
1700 + * so we have to consider current index for truncation
1702 + if (path->p_hdr->eh_entries == path->p_block)
1707 +int ext3_ext_remove_space(struct ext3_extents_tree *tree,
1708 + unsigned long start, unsigned long end)
1710 + struct inode *inode = tree->inode;
1711 + struct super_block *sb = inode->i_sb;
1712 + int depth = EXT_DEPTH(tree);
1713 + struct ext3_ext_path *path;
1715 + int i = 0, err = 0;
1717 + ext_debug(tree, "space to be removed: %lu:%lu\n", start, end);
1719 + /* probably first extent we're gonna free will be last in block */
1720 + handle = ext3_journal_start(inode, depth + 1);
1721 + if (IS_ERR(handle))
1722 + return PTR_ERR(handle);
1724 + ext3_ext_invalidate_cache(tree);
1727 + * we start scanning from right side freeing all the blocks
1728 + * after i_size and walking into the deep
1730 + path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 1), GFP_KERNEL);
1731 + if (IS_ERR(path)) {
1732 + ext3_error(sb, __FUNCTION__, "Can't allocate path array");
1733 + ext3_journal_stop(handle);
1736 + memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
1737 + path[i].p_hdr = EXT_ROOT_HDR(tree);
1739 + while (i >= 0 && err == 0) {
1741 + /* this is leaf block */
1742 + err = ext3_ext_rm_leaf(handle, tree, path, start, end);
1743 + /* root level have p_bh == NULL, brelse() eats this */
1744 + brelse(path[i].p_bh);
1749 + /* this is index block */
1750 + if (!path[i].p_hdr) {
1751 + ext_debug(tree, "initialize header\n");
1752 + path[i].p_hdr = EXT_BLOCK_HDR(path[i].p_bh);
1755 + EXT_ASSERT(path[i].p_hdr->eh_entries <= path[i].p_hdr->eh_max);
1756 + EXT_ASSERT(path[i].p_hdr->eh_magic == EXT3_EXT_MAGIC);
1758 + if (!path[i].p_idx) {
1759 + /* this level hasn't touched yet */
1761 + ext3_ext_last_covered(path[i].p_hdr, end);
1762 + path[i].p_block = path[i].p_hdr->eh_entries + 1;
1763 + ext_debug(tree, "init index ptr: hdr 0x%p, num %d\n",
1764 + path[i].p_hdr, path[i].p_hdr->eh_entries);
1766 + /* we've already was here, see at next index */
1770 + ext_debug(tree, "level %d - index, first 0x%p, cur 0x%p\n",
1771 + i, EXT_FIRST_INDEX(path[i].p_hdr),
1773 + if (ext3_ext_more_to_rm(path + i)) {
1774 + /* go to the next level */
1775 + ext_debug(tree, "move to level %d (block %d)\n",
1776 + i + 1, path[i].p_idx->ei_leaf);
1777 + memset(path + i + 1, 0, sizeof(*path));
1778 + path[i+1].p_bh = sb_bread(sb, path[i].p_idx->ei_leaf);
1779 + if (!path[i+1].p_bh) {
1780 + /* should we reset i_size? */
1784 + /* put actual number of indexes to know is this
1785 + * number got changed at the next iteration */
1786 + path[i].p_block = path[i].p_hdr->eh_entries;
1789 + /* we finish processing this index, go up */
1790 + if (path[i].p_hdr->eh_entries == 0 && i > 0) {
1791 + /* index is empty, remove it
1792 + * handle must be already prepared by the
1793 + * truncatei_leaf() */
1794 + err = ext3_ext_rm_idx(handle, tree, path + i);
1796 + /* root level have p_bh == NULL, brelse() eats this */
1797 + brelse(path[i].p_bh);
1799 + ext_debug(tree, "return to level %d\n", i);
1803 + /* TODO: flexible tree reduction should be here */
1804 + if (path->p_hdr->eh_entries == 0) {
1806 + * truncate to zero freed all the tree
1807 + * so, we need to correct eh_depth
1809 + err = ext3_ext_get_access(handle, tree, path);
1811 + EXT_ROOT_HDR(tree)->eh_depth = 0;
1812 + EXT_ROOT_HDR(tree)->eh_max = ext3_ext_space_root(tree);
1813 + err = ext3_ext_dirty(handle, tree, path);
1816 + ext3_ext_tree_changed(tree);
1819 + ext3_journal_stop(handle);
1824 +int ext3_ext_calc_metadata_amount(struct ext3_extents_tree *tree, int blocks)
1826 + int lcap, icap, rcap, leafs, idxs, num;
1828 + rcap = ext3_ext_space_root(tree);
1829 + if (blocks <= rcap) {
1830 + /* all extents fit to the root */
1834 + rcap = ext3_ext_space_root_idx(tree);
1835 + lcap = ext3_ext_space_block(tree);
1836 + icap = ext3_ext_space_block_idx(tree);
1838 + num = leafs = (blocks + lcap - 1) / lcap;
1839 + if (leafs <= rcap) {
1840 + /* all pointers to leafs fit to the root */
1844 + /* ok. we need separate index block(s) to link all leaf blocks */
1845 + idxs = (leafs + icap - 1) / icap;
1848 + idxs = (idxs + icap - 1) / icap;
1849 + } while (idxs > rcap);
1855 + * called at mount time
1857 +void ext3_ext_init(struct super_block *sb)
1860 + * possible initialization would be here
1863 + if (test_opt(sb, EXTENTS)) {
1864 + printk("EXT3-fs: file extents enabled");
1865 +#ifdef AGRESSIVE_TEST
1866 + printk(", agressive tests");
1868 +#ifdef CHECK_BINSEARCH
1869 + printk(", check binsearch");
1876 + * called at umount time
1878 +void ext3_ext_release(struct super_block *sb)
1882 +/************************************************************************
1883 + * VFS related routines
1884 + ************************************************************************/
1886 +static int ext3_get_inode_write_access(handle_t *handle, void *buffer)
1888 + /* we use in-core data, not bh */
1892 +static int ext3_mark_buffer_dirty(handle_t *handle, void *buffer)
1894 + struct inode *inode = buffer;
1895 + return ext3_mark_inode_dirty(handle, inode);
1898 +static int ext3_ext_mergable(struct ext3_extent *ex1,
1899 + struct ext3_extent *ex2)
1901 + /* FIXME: support for large fs */
1902 + if (ex1->ee_start + ex1->ee_len == ex2->ee_start)
1908 +ext3_remove_blocks_credits(struct ext3_extents_tree *tree,
1909 + struct ext3_extent *ex,
1910 + unsigned long from, unsigned long to)
1914 + /* at present, extent can't cross block group */;
1915 + needed = 4; /* bitmap + group desc + sb + inode */
1917 +#ifdef CONFIG_QUOTA
1918 + needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
1924 +ext3_remove_blocks(struct ext3_extents_tree *tree,
1925 + struct ext3_extent *ex,
1926 + unsigned long from, unsigned long to)
1928 + int needed = ext3_remove_blocks_credits(tree, ex, from, to);
1929 + handle_t *handle = ext3_journal_start(tree->inode, needed);
1930 + struct buffer_head *bh;
1933 + if (IS_ERR(handle))
1934 + return PTR_ERR(handle);
1935 + if (from >= ex->ee_block && to == ex->ee_block + ex->ee_len - 1) {
1936 + /* tail removal */
1937 + unsigned long num, start;
1938 + num = ex->ee_block + ex->ee_len - from;
1939 + start = ex->ee_start + ex->ee_len - num;
1940 + ext_debug(tree, "free last %lu blocks starting %lu\n",
1942 + for (i = 0; i < num; i++) {
1943 + bh = sb_find_get_block(tree->inode->i_sb, start + i);
1944 + ext3_forget(handle, 0, tree->inode, bh, start + i);
1946 + ext3_free_blocks(handle, tree->inode, start, num);
1947 + } else if (from == ex->ee_block && to <= ex->ee_block + ex->ee_len - 1) {
1948 + printk("strange request: removal %lu-%lu from %u:%u\n",
1949 + from, to, ex->ee_block, ex->ee_len);
1951 + printk("strange request: removal(2) %lu-%lu from %u:%u\n",
1952 + from, to, ex->ee_block, ex->ee_len);
1954 + ext3_journal_stop(handle);
1958 +static int ext3_ext_find_goal(struct inode *inode,
1959 + struct ext3_ext_path *path, unsigned long block)
1961 + struct ext3_inode_info *ei = EXT3_I(inode);
1962 + unsigned long bg_start;
1963 + unsigned long colour;
1967 + struct ext3_extent *ex;
1968 + depth = path->p_depth;
1970 + /* try to predict block placement */
1971 + if ((ex = path[depth].p_ext))
1972 + return ex->ee_start + (block - ex->ee_block);
1974 + /* it looks index is empty
1975 + * try to find starting from index itself */
1976 + if (path[depth].p_bh)
1977 + return path[depth].p_bh->b_blocknr;
1980 + /* OK. use inode's group */
1981 + bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
1982 + le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
1983 + colour = (current->pid % 16) *
1984 + (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
1985 + return bg_start + colour + block;
1988 +static int ext3_new_block_cb(handle_t *handle, struct ext3_extents_tree *tree,
1989 + struct ext3_ext_path *path,
1990 + struct ext3_extent *ex, int *err)
1992 + struct inode *inode = tree->inode;
1993 + int newblock, goal;
1997 + EXT_ASSERT(ex->ee_start);
1998 + EXT_ASSERT(ex->ee_len);
2000 + /* reuse block from the extent to order data/metadata */
2001 + newblock = ex->ee_start++;
2003 + if (ex->ee_len == 0) {
2005 + /* allocate new block for the extent */
2006 + goal = ext3_ext_find_goal(inode, path, ex->ee_block);
2007 + ex->ee_start = ext3_new_block(handle, inode, goal, err);
2008 + ex->ee_start_hi = 0;
2009 + if (ex->ee_start == 0) {
2010 + /* error occured: restore old extent */
2011 + ex->ee_start = newblock;
2018 +static struct ext3_extents_helpers ext3_blockmap_helpers = {
2019 + .get_write_access = ext3_get_inode_write_access,
2020 + .mark_buffer_dirty = ext3_mark_buffer_dirty,
2021 + .mergable = ext3_ext_mergable,
2022 + .new_block = ext3_new_block_cb,
2023 + .remove_extent = ext3_remove_blocks,
2024 + .remove_extent_credits = ext3_remove_blocks_credits,
2027 +void ext3_init_tree_desc(struct ext3_extents_tree *tree,
2028 + struct inode *inode)
2030 + tree->inode = inode;
2031 + tree->root = (void *) EXT3_I(inode)->i_data;
2032 + tree->buffer = (void *) inode;
2033 + tree->buffer_len = sizeof(EXT3_I(inode)->i_data);
2034 + tree->cex = (struct ext3_ext_cache *) &EXT3_I(inode)->i_cached_extent;
2035 + tree->ops = &ext3_blockmap_helpers;
2038 +int ext3_ext_get_block(handle_t *handle, struct inode *inode,
2039 + long iblock, struct buffer_head *bh_result,
2040 + int create, int extend_disksize)
2042 + struct ext3_ext_path *path = NULL;
2043 + struct ext3_extent newex;
2044 + struct ext3_extent *ex;
2045 + int goal, newblock, err = 0, depth;
2046 + struct ext3_extents_tree tree;
2048 + clear_buffer_new(bh_result);
2049 + ext3_init_tree_desc(&tree, inode);
2050 + ext_debug(&tree, "block %d requested for inode %u\n",
2051 + (int) iblock, (unsigned) inode->i_ino);
2052 + down(&EXT3_I(inode)->truncate_sem);
2054 + /* check in cache */
2055 + if ((goal = ext3_ext_in_cache(&tree, iblock, &newex))) {
2056 + if (goal == EXT3_EXT_CACHE_GAP) {
2058 + /* block isn't allocated yet and
2059 + * user don't want to allocate it */
2062 + /* we should allocate requested block */
2063 + } else if (goal == EXT3_EXT_CACHE_EXTENT) {
2064 + /* block is already allocated */
2065 + newblock = iblock - newex.ee_block + newex.ee_start;
2072 + /* find extent for this block */
2073 + path = ext3_ext_find_extent(&tree, iblock, NULL);
2074 + if (IS_ERR(path)) {
2075 + err = PTR_ERR(path);
2080 + depth = EXT_DEPTH(&tree);
2083 + * consistent leaf must not be empty
2084 + * this situations is possible, though, _during_ tree modification
2085 + * this is why assert can't be put in ext3_ext_find_extent()
2087 + EXT_ASSERT(path[depth].p_ext != NULL || depth == 0);
2089 + if ((ex = path[depth].p_ext)) {
2090 + /* if found exent covers block, simple return it */
2091 + if (iblock >= ex->ee_block && iblock < ex->ee_block + ex->ee_len) {
2092 + newblock = iblock - ex->ee_block + ex->ee_start;
2093 + ext_debug(&tree, "%d fit into %d:%d -> %d\n",
2094 + (int) iblock, ex->ee_block, ex->ee_len,
2096 + ext3_ext_put_in_cache(&tree, ex->ee_block,
2097 + ex->ee_len, ex->ee_start,
2098 + EXT3_EXT_CACHE_EXTENT);
2104 + * requested block isn't allocated yet
2105 + * we couldn't try to create block if create flag is zero
2108 + /* put just found gap into cache to speedup subsequest reqs */
2109 + ext3_ext_put_gap_in_cache(&tree, path, iblock);
2113 + /* allocate new block */
2114 + goal = ext3_ext_find_goal(inode, path, iblock);
2115 + newblock = ext3_new_block(handle, inode, goal, &err);
2118 + ext_debug(&tree, "allocate new block: goal %d, found %d\n",
2121 + /* try to insert new extent into found leaf and return */
2122 + newex.ee_block = iblock;
2123 + newex.ee_start = newblock;
2124 + newex.ee_start_hi = 0;
2126 + err = ext3_ext_insert_extent(handle, &tree, path, &newex);
2130 + if (extend_disksize && inode->i_size > EXT3_I(inode)->i_disksize)
2131 + EXT3_I(inode)->i_disksize = inode->i_size;
2133 + /* previous routine could use block we allocated */
2134 + newblock = newex.ee_start;
2135 + set_buffer_new(bh_result);
2137 + ext3_ext_put_in_cache(&tree, newex.ee_block, newex.ee_len,
2138 + newex.ee_start, EXT3_EXT_CACHE_EXTENT);
2140 + ext3_ext_show_leaf(&tree, path);
2141 + map_bh(bh_result, inode->i_sb, newblock);
2144 + ext3_ext_drop_refs(path);
2147 + up(&EXT3_I(inode)->truncate_sem);
2152 +void ext3_ext_truncate(struct inode * inode, struct page *page)
2154 + struct address_space *mapping = inode->i_mapping;
2155 + struct super_block *sb = inode->i_sb;
2156 + struct ext3_extents_tree tree;
2157 + unsigned long last_block;
2161 + ext3_init_tree_desc(&tree, inode);
2164 + * probably first extent we're gonna free will be last in block
2166 + err = ext3_writepage_trans_blocks(inode) + 3;
2167 + handle = ext3_journal_start(inode, err);
2168 + if (IS_ERR(handle)) {
2170 + clear_highpage(page);
2171 + flush_dcache_page(page);
2172 + unlock_page(page);
2173 + page_cache_release(page);
2179 + ext3_block_truncate_page(handle, page, mapping, inode->i_size);
2181 + down(&EXT3_I(inode)->truncate_sem);
2182 + ext3_ext_invalidate_cache(&tree);
2185 + * TODO: optimization is possible here
2186 + * probably we need not scaning at all,
2187 + * because page truncation is enough
2189 + if (ext3_orphan_add(handle, inode))
2192 + /* we have to know where to truncate from in crash case */
2193 + EXT3_I(inode)->i_disksize = inode->i_size;
2194 + ext3_mark_inode_dirty(handle, inode);
2196 + last_block = (inode->i_size + sb->s_blocksize - 1) >>
2197 + EXT3_BLOCK_SIZE_BITS(sb);
2198 + err = ext3_ext_remove_space(&tree, last_block, EXT_MAX_BLOCK);
2200 + /* In a multi-transaction truncate, we only make the final
2201 + * transaction synchronous */
2202 + if (IS_SYNC(inode))
2203 + handle->h_sync = 1;
2207 + * If this was a simple ftruncate(), and the file will remain alive
2208 + * then we need to clear up the orphan record which we created above.
2209 + * However, if this was a real unlink then we were called by
2210 + * ext3_delete_inode(), and we allow that function to clean up the
2211 + * orphan info for us.
2213 + if (inode->i_nlink)
2214 + ext3_orphan_del(handle, inode);
2216 + up(&EXT3_I(inode)->truncate_sem);
2217 + ext3_journal_stop(handle);
2221 + * this routine calculate max number of blocks we could modify
2222 + * in order to allocate new block for an inode
2224 +int ext3_ext_writepage_trans_blocks(struct inode *inode, int num)
2226 + struct ext3_extents_tree tree;
2229 + ext3_init_tree_desc(&tree, inode);
2231 + needed = ext3_ext_calc_credits_for_insert(&tree, NULL);
2233 + /* caller want to allocate num blocks */
2236 +#ifdef CONFIG_QUOTA
2238 + * FIXME: real calculation should be here
2239 + * it depends on blockmap format of qouta file
2241 + needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
2247 +void ext3_extents_initialize_blockmap(handle_t *handle, struct inode *inode)
2249 + struct ext3_extents_tree tree;
2251 + ext3_init_tree_desc(&tree, inode);
2252 + ext3_extent_tree_init(handle, &tree);
2255 +int ext3_ext_calc_blockmap_metadata(struct inode *inode, int blocks)
2257 + struct ext3_extents_tree tree;
2259 + ext3_init_tree_desc(&tree, inode);
2260 + return ext3_ext_calc_metadata_amount(&tree, blocks);
2264 +ext3_ext_store_extent_cb(struct ext3_extents_tree *tree,
2265 + struct ext3_ext_path *path,
2266 + struct ext3_ext_cache *newex)
2268 + struct ext3_extent_buf *buf = (struct ext3_extent_buf *) tree->private;
2270 + if (newex->ec_type != EXT3_EXT_CACHE_EXTENT)
2271 + return EXT_CONTINUE;
2275 + if (buf->cur - buf->buffer + sizeof(*newex) > buf->buflen)
2278 + if (!copy_to_user(buf->cur, newex, sizeof(*newex))) {
2280 + buf->cur += sizeof(*newex);
2282 + buf->err = -EFAULT;
2285 + return EXT_CONTINUE;
2289 +ext3_ext_collect_stats_cb(struct ext3_extents_tree *tree,
2290 + struct ext3_ext_path *path,
2291 + struct ext3_ext_cache *ex)
2293 + struct ext3_extent_tree_stats *buf =
2294 + (struct ext3_extent_tree_stats *) tree->private;
2297 + if (ex->ec_type != EXT3_EXT_CACHE_EXTENT)
2298 + return EXT_CONTINUE;
2300 + depth = EXT_DEPTH(tree);
2301 + buf->extents_num++;
2302 + if (path[depth].p_ext == EXT_FIRST_EXTENT(path[depth].p_hdr))
2304 + return EXT_CONTINUE;
2307 +int ext3_ext_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
2308 + unsigned long arg)
2312 + if (!(EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL))
2315 + if (cmd == EXT3_IOC_GET_EXTENTS) {
2316 + struct ext3_extent_buf buf;
2317 + struct ext3_extents_tree tree;
2319 + if (copy_from_user(&buf, (void *) arg, sizeof(buf)))
2322 + ext3_init_tree_desc(&tree, inode);
2323 + buf.cur = buf.buffer;
2325 + tree.private = &buf;
2326 + down(&EXT3_I(inode)->truncate_sem);
2327 + err = ext3_ext_walk_space(&tree, buf.start, EXT_MAX_BLOCK,
2328 + ext3_ext_store_extent_cb);
2329 + up(&EXT3_I(inode)->truncate_sem);
2332 + } else if (cmd == EXT3_IOC_GET_TREE_STATS) {
2333 + struct ext3_extent_tree_stats buf;
2334 + struct ext3_extents_tree tree;
2336 + ext3_init_tree_desc(&tree, inode);
2337 + down(&EXT3_I(inode)->truncate_sem);
2338 + buf.depth = EXT_DEPTH(&tree);
2339 + buf.extents_num = 0;
2341 + tree.private = &buf;
2342 + err = ext3_ext_walk_space(&tree, 0, EXT_MAX_BLOCK,
2343 + ext3_ext_collect_stats_cb);
2344 + up(&EXT3_I(inode)->truncate_sem);
2346 + err = copy_to_user((void *) arg, &buf, sizeof(buf));
2347 + } else if (cmd == EXT3_IOC_GET_TREE_DEPTH) {
2348 + struct ext3_extents_tree tree;
2349 + ext3_init_tree_desc(&tree, inode);
2350 + down(&EXT3_I(inode)->truncate_sem);
2351 + err = EXT_DEPTH(&tree);
2352 + up(&EXT3_I(inode)->truncate_sem);
2358 +EXPORT_SYMBOL(ext3_init_tree_desc);
2359 +EXPORT_SYMBOL(ext3_mark_inode_dirty);
2360 +EXPORT_SYMBOL(ext3_ext_invalidate_cache);
2361 +EXPORT_SYMBOL(ext3_ext_insert_extent);
2362 +EXPORT_SYMBOL(ext3_ext_walk_space);
2363 +EXPORT_SYMBOL(ext3_ext_find_goal);
2364 +EXPORT_SYMBOL(ext3_ext_calc_credits_for_insert);
2365 Index: linux-stage/fs/ext3/ialloc.c
2366 ===================================================================
2367 --- linux-stage.orig/fs/ext3/ialloc.c 2005-02-25 14:50:50.304202816 +0200
2368 +++ linux-stage/fs/ext3/ialloc.c 2005-02-25 15:33:48.920193600 +0200
2369 @@ -566,7 +566,7 @@ repeat:
2370 ei->i_dir_start_lookup = 0;
2373 - ei->i_flags = EXT3_I(dir)->i_flags & ~EXT3_INDEX_FL;
2374 + ei->i_flags = EXT3_I(dir)->i_flags & ~(EXT3_INDEX_FL|EXT3_EXTENTS_FL);
2376 ei->i_flags &= ~(EXT3_IMMUTABLE_FL|EXT3_APPEND_FL);
2377 /* dirsync only applies to directories */
2378 @@ -646,6 +646,18 @@
2379 DQUOT_FREE_INODE(inode);
2382 + if (test_opt(sb, EXTENTS) && (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
2383 + EXT3_I(inode)->i_flags |= EXT3_EXTENTS_FL;
2384 + ext3_extents_initialize_blockmap(handle, inode);
2385 + if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS)) {
2386 + err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
2387 + if (err) goto fail;
2388 + EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS);
2389 + BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "call ext3_journal_dirty_metadata");
2390 + err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
2394 err = ext3_mark_inode_dirty(handle, inode);
2396 ext3_std_error(sb, err);
2397 Index: linux-stage/fs/ext3/inode.c
2398 ===================================================================
2399 --- linux-stage.orig/fs/ext3/inode.c 2005-02-25 14:50:50.309202056 +0200
2400 +++ linux-stage/fs/ext3/inode.c 2005-02-25 15:36:51.846384592 +0200
2401 @@ -796,6 +796,17 @@
2406 +ext3_get_block_wrap(handle_t *handle, struct inode *inode, long block,
2407 + struct buffer_head *bh, int create, int extend_disksize)
2409 + if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
2410 + return ext3_ext_get_block(handle, inode, block, bh, create,
2412 + return ext3_get_block_handle(handle, inode, block, bh, create,
2416 static int ext3_get_block(struct inode *inode, sector_t iblock,
2417 struct buffer_head *bh_result, int create)
2420 handle = ext3_journal_current_handle();
2421 J_ASSERT(handle != 0);
2423 - ret = ext3_get_block_handle(handle, inode, iblock,
2424 - bh_result, create, 1);
2425 + ret = ext3_get_block_wrap(handle, inode, iblock,
2426 + bh_result, create, 1);
2434 - ret = ext3_get_block_handle(handle, inode, iblock,
2435 + ret = ext3_get_block_wrap(handle, inode, iblock,
2436 bh_result, create, 0);
2437 bh_result->b_size = (1 << inode->i_blkbits);
2441 dummy.b_blocknr = -1000;
2442 buffer_trace_init(&dummy.b_history);
2443 - *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1);
2444 + *errp = ext3_get_block_wrap(handle, inode, block, &dummy, create, 1);
2445 if (!*errp && buffer_mapped(&dummy)) {
2446 struct buffer_head *bh;
2447 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
2448 @@ -1589,7 +1600,7 @@
2449 * This required during truncate. We need to physically zero the tail end
2450 * of that block so it doesn't yield old data if the file is later grown.
2452 -static int ext3_block_truncate_page(handle_t *handle, struct page *page,
2453 +int ext3_block_truncate_page(handle_t *handle, struct page *page,
2454 struct address_space *mapping, loff_t from)
2456 unsigned long index = from >> PAGE_CACHE_SHIFT;
2457 @@ -2087,6 +2098,9 @@
2461 + if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
2462 + return ext3_ext_truncate(inode, page);
2464 handle = start_transaction(inode);
2465 if (IS_ERR(handle)) {
2467 @@ -2814,6 +2828,9 @@
2468 int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
2471 + if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
2472 + return ext3_ext_writepage_trans_blocks(inode, bpp);
2474 if (ext3_should_journal_data(inode))
2475 ret = 3 * (bpp + indirects) + 2;
2477 Index: linux-stage/fs/ext3/Makefile
2478 ===================================================================
2479 --- linux-stage.orig/fs/ext3/Makefile 2005-02-25 14:49:42.168561008 +0200
2480 +++ linux-stage/fs/ext3/Makefile 2005-02-25 15:39:28.384587168 +0200
2482 obj-$(CONFIG_EXT3_FS) += ext3.o
2484 ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
2485 - ioctl.o namei.o super.o symlink.o hash.o resize.o
2486 + ioctl.o namei.o super.o symlink.o hash.o resize.o \
2489 ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
2490 ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
2491 Index: linux-stage/fs/ext3/super.c
2492 ===================================================================
2493 --- linux-stage.orig/fs/ext3/super.c 2005-02-25 14:52:33.550506992 +0200
2494 +++ linux-stage/fs/ext3/super.c 2005-02-25 15:38:10.474431312 +0200
2496 struct ext3_super_block *es = sbi->s_es;
2499 + ext3_ext_release(sb);
2500 ext3_xattr_put_super(sb);
2501 journal_destroy(sbi->s_journal);
2502 if (!(sb->s_flags & MS_RDONLY)) {
2505 ei->i_rsv_window.rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
2506 ei->vfs_inode.i_version = 1;
2508 + memset(&ei->i_cached_extent, 0, sizeof(ei->i_cached_extent));
2509 return &ei->vfs_inode;
2513 Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0,
2514 Opt_ignore, Opt_barrier, Opt_err, Opt_resize,
2515 Opt_iopen, Opt_noiopen, Opt_iopen_nopriv,
2516 + Opt_extents, Opt_noextents, Opt_extdebug,
2519 static match_table_t tokens = {
2521 {Opt_iopen, "iopen"},
2522 {Opt_noiopen, "noiopen"},
2523 {Opt_iopen_nopriv, "iopen_nopriv"},
2524 + {Opt_extents, "extents"},
2525 + {Opt_noextents, "noextents"},
2526 + {Opt_extdebug, "extdebug"},
2527 {Opt_barrier, "barrier=%u"},
2529 {Opt_resize, "resize"},
2530 @@ -943,6 +950,15 @@
2531 match_int(&args[0], &option);
2532 *n_blocks_count = option;
2535 + set_opt (sbi->s_mount_opt, EXTENTS);
2537 + case Opt_noextents:
2538 + clear_opt (sbi->s_mount_opt, EXTENTS);
2540 + case Opt_extdebug:
2541 + set_opt (sbi->s_mount_opt, EXTDEBUG);
2545 "EXT3-fs: Unrecognized mount option \"%s\" "
2546 @@ -1625,6 +1638,8 @@
2547 percpu_counter_mod(&sbi->s_dirs_counter,
2548 ext3_count_dirs(sb));
2550 + ext3_ext_init(sb);
2555 Index: linux-stage/fs/ext3/ioctl.c
2556 ===================================================================
2557 --- linux-stage.orig/fs/ext3/ioctl.c 2005-02-25 14:37:28.971023976 +0200
2558 +++ linux-stage/fs/ext3/ioctl.c 2005-02-25 15:33:48.938190864 +0200
2559 @@ -124,6 +124,10 @@
2560 err = ext3_change_inode_journal_flag(inode, jflag);
2563 + case EXT3_IOC_GET_EXTENTS:
2564 + case EXT3_IOC_GET_TREE_STATS:
2565 + case EXT3_IOC_GET_TREE_DEPTH:
2566 + return ext3_ext_ioctl(inode, filp, cmd, arg);
2567 case EXT3_IOC_GETVERSION:
2568 case EXT3_IOC_GETVERSION_OLD:
2569 return put_user(inode->i_generation, (int __user *) arg);
2570 Index: linux-stage/include/linux/ext3_fs.h
2571 ===================================================================
2572 --- linux-stage.orig/include/linux/ext3_fs.h 2005-02-25 14:53:56.424908168 +0200
2573 +++ linux-stage/include/linux/ext3_fs.h 2005-02-25 15:39:12.841950008 +0200
2575 #define EXT3_NOTAIL_FL 0x00008000 /* don't merge file tail */
2576 #define EXT3_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
2577 #define EXT3_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
2578 +#define EXT3_EXTENTS_FL 0x00080000 /* Inode uses extents */
2579 #define EXT3_RESERVED_FL 0x80000000 /* reserved for ext3 lib */
2581 -#define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
2582 +#define EXT3_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */
2583 #define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
2587 #define EXT3_IOC_GETRSVSZ _IOR('f', 5, long)
2588 #define EXT3_IOC_SETRSVSZ _IOW('f', 6, long)
2589 +#define EXT3_IOC_GET_EXTENTS _IOR('f', 7, long)
2590 +#define EXT3_IOC_GET_TREE_DEPTH _IOR('f', 8, long)
2591 +#define EXT3_IOC_GET_TREE_STATS _IOR('f', 9, long)
2594 * Structure of an inode on the disk
2596 #define EXT3_MOUNT_RESERVATION 0x20000 /* Preallocation */
2597 #define EXT3_MOUNT_IOPEN 0x80000 /* Allow access via iopen */
2598 #define EXT3_MOUNT_IOPEN_NOPRIV 0x100000/* Make iopen world-readable */
2599 +#define EXT3_MOUNT_EXTENTS 0x200000/* Extents support */
2600 +#define EXT3_MOUNT_EXTDEBUG 0x400000/* Extents debug */
2602 /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
2603 #ifndef _LINUX_EXT2_FS_H
2604 @@ -503,11 +509,13 @@
2605 #define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */
2606 #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Journal device */
2607 #define EXT3_FEATURE_INCOMPAT_META_BG 0x0010
2608 +#define EXT3_FEATURE_INCOMPAT_EXTENTS 0x0040 /* extents support */
2610 #define EXT3_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
2611 #define EXT3_FEATURE_INCOMPAT_SUPP (EXT3_FEATURE_INCOMPAT_FILETYPE| \
2612 EXT3_FEATURE_INCOMPAT_RECOVER| \
2613 - EXT3_FEATURE_INCOMPAT_META_BG)
2614 + EXT3_FEATURE_INCOMPAT_META_BG| \
2615 + EXT3_FEATURE_INCOMPAT_EXTENTS)
2616 #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
2617 EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
2618 EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
2623 +extern int ext3_block_truncate_page(handle_t *, struct page *,
2624 + struct address_space *, loff_t);
2625 +extern int ext3_writepage_trans_blocks(struct inode *inode);
2626 extern int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
2627 extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
2628 extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
2629 @@ -836,6 +844,16 @@
2630 extern struct inode_operations ext3_symlink_inode_operations;
2631 extern struct inode_operations ext3_fast_symlink_inode_operations;
2634 +extern int ext3_ext_writepage_trans_blocks(struct inode *, int);
2635 +extern int ext3_ext_get_block(handle_t *, struct inode *, long,
2636 + struct buffer_head *, int, int);
2637 +extern void ext3_ext_truncate(struct inode *, struct page *);
2638 +extern void ext3_ext_init(struct super_block *);
2639 +extern void ext3_ext_release(struct super_block *);
2640 +extern void ext3_extents_initialize_blockmap(handle_t *, struct inode *);
2641 +extern int ext3_ext_ioctl(struct inode *inode, struct file *filp,
2642 + unsigned int cmd, unsigned long arg);
2644 #endif /* __KERNEL__ */
2646 Index: linux-stage/include/linux/ext3_extents.h
2647 ===================================================================
2648 --- linux-stage.orig/include/linux/ext3_extents.h 2005-02-25 15:33:48.891198008 +0200
2649 +++ linux-stage/include/linux/ext3_extents.h 2005-02-25 15:33:48.944189952 +0200
2652 + * Copyright (c) 2003, Cluster File Systems, Inc, info@clusterfs.com
2653 + * Written by Alex Tomas <alex@clusterfs.com>
2655 + * This program is free software; you can redistribute it and/or modify
2656 + * it under the terms of the GNU General Public License version 2 as
2657 + * published by the Free Software Foundation.
2659 + * This program is distributed in the hope that it will be useful,
2660 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2661 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2662 + * GNU General Public License for more details.
2664 + * You should have received a copy of the GNU General Public Licens
2665 + * along with this program; if not, write to the Free Software
2666 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
2669 +#ifndef _LINUX_EXT3_EXTENTS
2670 +#define _LINUX_EXT3_EXTENTS
2673 + * with AGRESSIVE_TEST defined capacity of index/leaf blocks
2674 + * become very little, so index split, in-depth growing and
2675 + * other hard changes happens much more often
2676 + * this is for debug purposes only
2678 +#define AGRESSIVE_TEST_
2681 + * if CHECK_BINSEARCH defined, then results of binary search
2682 + * will be checked by linear search
2684 +#define CHECK_BINSEARCH_
2687 + * if EXT_DEBUG is defined you can use 'extdebug' mount option
2688 + * to get lots of info what's going on
2692 +#define ext_debug(tree,fmt,a...) \
2694 + if (test_opt((tree)->inode->i_sb, EXTDEBUG)) \
2695 + printk(fmt, ##a); \
2698 +#define ext_debug(tree,fmt,a...)
2702 + * if EXT_STATS is defined then stats numbers are collected
2703 + * these number will be displayed at umount time
2708 +#define EXT3_ALLOC_NEEDED 3 /* block bitmap + group desc. + sb */
2711 + * ext3_inode has i_block array (total 60 bytes)
2712 + * first 4 bytes are used to store:
2713 + * - tree depth (0 mean there is no tree yet. all extents in the inode)
2714 + * - number of alive extents in the inode
2718 + * this is extent on-disk structure
2719 + * it's used at the bottom of the tree
2721 +struct ext3_extent {
2722 + __u32 ee_block; /* first logical block extent covers */
2723 + __u16 ee_len; /* number of blocks covered by extent */
2724 + __u16 ee_start_hi; /* high 16 bits of physical block */
2725 + __u32 ee_start; /* low 32 bigs of physical block */
2729 + * this is index on-disk structure
2730 + * it's used at all the levels, but the bottom
2732 +struct ext3_extent_idx {
2733 + __u32 ei_block; /* index covers logical blocks from 'block' */
2734 + __u32 ei_leaf; /* pointer to the physical block of the next *
2735 + * level. leaf or next index could bet here */
2736 + __u16 ei_leaf_hi; /* high 16 bits of physical block */
2741 + * each block (leaves and indexes), even inode-stored has header
2743 +struct ext3_extent_header {
2744 + __u16 eh_magic; /* probably will support different formats */
2745 + __u16 eh_entries; /* number of valid entries */
2746 + __u16 eh_max; /* capacity of store in entries */
2747 + __u16 eh_depth; /* has tree real underlaying blocks? */
2748 + __u32 eh_generation; /* flags(8 bits) | generation of the tree */
2751 +#define EXT3_EXT_MAGIC 0xf30a
2754 + * array of ext3_ext_path contains path to some extent
2755 + * creation/lookup routines use it for traversal/splitting/etc
2756 + * truncate uses it to simulate recursive walking
2758 +struct ext3_ext_path {
2761 + struct ext3_extent *p_ext;
2762 + struct ext3_extent_idx *p_idx;
2763 + struct ext3_extent_header *p_hdr;
2764 + struct buffer_head *p_bh;
2768 + * structure for external API
2772 + * storage for cached extent
2774 +struct ext3_ext_cache {
2781 +#define EXT3_EXT_CACHE_NO 0
2782 +#define EXT3_EXT_CACHE_GAP 1
2783 +#define EXT3_EXT_CACHE_EXTENT 2
2786 + * ext3_extents_tree is used to pass initial information
2787 + * to top-level extents API
2789 +struct ext3_extents_helpers;
2790 +struct ext3_extents_tree {
2791 + struct inode *inode; /* inode which tree belongs to */
2792 + void *root; /* ptr to data top of tree resides at */
2793 + void *buffer; /* will be passed as arg to ^^ routines */
2796 + struct ext3_ext_cache *cex;/* last found extent */
2797 + struct ext3_extents_helpers *ops;
2800 +struct ext3_extents_helpers {
2801 + int (*get_write_access)(handle_t *h, void *buffer);
2802 + int (*mark_buffer_dirty)(handle_t *h, void *buffer);
2803 + int (*mergable)(struct ext3_extent *ex1, struct ext3_extent *ex2);
2804 + int (*remove_extent_credits)(struct ext3_extents_tree *,
2805 + struct ext3_extent *, unsigned long,
2807 + int (*remove_extent)(struct ext3_extents_tree *,
2808 + struct ext3_extent *, unsigned long,
2810 + int (*new_block)(handle_t *, struct ext3_extents_tree *,
2811 + struct ext3_ext_path *, struct ext3_extent *,
2816 + * to be called by ext3_ext_walk_space()
2817 + * negative retcode - error
2818 + * positive retcode - signal for ext3_ext_walk_space(), see below
2819 + * callback must return valid extent (passed or newly created)
2821 +typedef int (*ext_prepare_callback)(struct ext3_extents_tree *,
2822 + struct ext3_ext_path *,
2823 + struct ext3_ext_cache *);
2825 +#define EXT_CONTINUE 0
2826 +#define EXT_BREAK 1
2827 +#define EXT_REPEAT 2
2830 +#define EXT_MAX_BLOCK 0xffffffff
2833 +#define EXT_FIRST_EXTENT(__hdr__) \
2834 + ((struct ext3_extent *) (((char *) (__hdr__)) + \
2835 + sizeof(struct ext3_extent_header)))
2836 +#define EXT_FIRST_INDEX(__hdr__) \
2837 + ((struct ext3_extent_idx *) (((char *) (__hdr__)) + \
2838 + sizeof(struct ext3_extent_header)))
2839 +#define EXT_HAS_FREE_INDEX(__path__) \
2840 + ((__path__)->p_hdr->eh_entries < (__path__)->p_hdr->eh_max)
2841 +#define EXT_LAST_EXTENT(__hdr__) \
2842 + (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->eh_entries - 1)
2843 +#define EXT_LAST_INDEX(__hdr__) \
2844 + (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->eh_entries - 1)
2845 +#define EXT_MAX_EXTENT(__hdr__) \
2846 + (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->eh_max - 1)
2847 +#define EXT_MAX_INDEX(__hdr__) \
2848 + (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->eh_max - 1)
2849 +#define EXT_HDR_GEN(__hdr__) ((__hdr__)->eh_generation & 0x00ffffff)
2850 +#define EXT_FLAGS(__hdr__) ((__hdr__)->eh_generation >> 24)
2851 +#define EXT_FLAGS_CLR_UNKNOWN 0x7 /* Flags cleared on modification */
2853 +#define EXT_BLOCK_HDR(__bh__) ((struct ext3_extent_header *)(__bh__)->b_data)
2854 +#define EXT_ROOT_HDR(__tree__) ((struct ext3_extent_header *)(__tree__)->root)
2855 +#define EXT_DEPTH(__tree__) (EXT_ROOT_HDR(__tree__)->eh_depth)
2856 +#define EXT_GENERATION(__tree__) EXT_HDR_GEN(EXT_ROOT_HDR(__tree__))
2858 +#define EXT_ASSERT(__x__) if (!(__x__)) BUG();
2860 +#define EXT_CHECK_PATH(tree,path) \
2862 + int depth = EXT_DEPTH(tree); \
2863 + BUG_ON((unsigned long) (path) < __PAGE_OFFSET); \
2864 + BUG_ON((unsigned long) (path)[depth].p_idx < \
2865 + __PAGE_OFFSET && (path)[depth].p_idx != NULL); \
2866 + BUG_ON((unsigned long) (path)[depth].p_ext < \
2867 + __PAGE_OFFSET && (path)[depth].p_ext != NULL); \
2868 + BUG_ON((unsigned long) (path)[depth].p_hdr < __PAGE_OFFSET); \
2869 + BUG_ON((unsigned long) (path)[depth].p_bh < __PAGE_OFFSET \
2871 + BUG_ON((path)[0].p_depth != depth); \
2876 + * this structure is used to gather extents from the tree via ioctl
2878 +struct ext3_extent_buf {
2879 + unsigned long start;
2887 + * this structure is used to collect stats info about the tree
2889 +struct ext3_extent_tree_stats {
2895 +extern void ext3_init_tree_desc(struct ext3_extents_tree *, struct inode *);
2896 +extern int ext3_extent_tree_init(handle_t *, struct ext3_extents_tree *);
2897 +extern int ext3_ext_calc_credits_for_insert(struct ext3_extents_tree *, struct ext3_ext_path *);
2898 +extern int ext3_ext_insert_extent(handle_t *, struct ext3_extents_tree *, struct ext3_ext_path *, struct ext3_extent *);
2899 +extern int ext3_ext_walk_space(struct ext3_extents_tree *, unsigned long, unsigned long, ext_prepare_callback);
2900 +extern int ext3_ext_remove_space(struct ext3_extents_tree *, unsigned long, unsigned long);
2901 +extern struct ext3_ext_path * ext3_ext_find_extent(struct ext3_extents_tree *, int, struct ext3_ext_path *);
2902 +extern int ext3_ext_calc_blockmap_metadata(struct inode *, int);
2905 +ext3_ext_invalidate_cache(struct ext3_extents_tree *tree)
2908 + tree->cex->ec_type = EXT3_EXT_CACHE_NO;
2912 +#endif /* _LINUX_EXT3_EXTENTS */
2913 Index: linux-stage/include/linux/ext3_fs_i.h
2914 ===================================================================
2915 --- linux-stage.orig/include/linux/ext3_fs_i.h 2005-02-25 14:50:50.320200384 +0200
2916 +++ linux-stage/include/linux/ext3_fs_i.h 2005-02-25 15:33:48.945189800 +0200
2919 struct semaphore truncate_sem;
2920 struct inode vfs_inode;
2922 + __u32 i_cached_extent[4];
2925 #endif /* _LINUX_EXT3_FS_I */