1 Index: linux-2.4.21-suse2/fs/ext3/extents.c
2 ===================================================================
3 --- linux-2.4.21-suse2.orig/fs/ext3/extents.c 2003-01-30 13:24:37.000000000 +0300
4 +++ linux-2.4.21-suse2/fs/ext3/extents.c 2004-09-12 17:56:01.000000000 +0400
7 + * Copyright (c) 2003, Cluster File Systems, Inc, info@clusterfs.com
8 + * Written by Alex Tomas <alex@clusterfs.com>
10 + * This program is free software; you can redistribute it and/or modify
11 + * it under the terms of the GNU General Public License version 2 as
12 + * published by the Free Software Foundation.
14 + * This program is distributed in the hope that it will be useful,
15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 + * GNU General Public License for more details.
19 + * You should have received a copy of the GNU General Public Licens
20 + * along with this program; if not, write to the Free Software
21 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
25 + * Extents support for EXT3
28 + * - ext3_ext_walk_space() sould not use ext3_ext_find_extent()
29 + * - ext3_ext_calc_credits() could take 'mergable' into account
30 + * - ext3*_error() should be used in some situations
31 + * - find_goal() [to be tested and improved]
32 + * - smart tree reduction
33 + * - arch-independence
34 + * common on-disk format for big/little-endian arch
37 +#include <linux/module.h>
38 +#include <linux/fs.h>
39 +#include <linux/time.h>
40 +#include <linux/ext3_jbd.h>
41 +#include <linux/jbd.h>
42 +#include <linux/locks.h>
43 +#include <linux/smp_lock.h>
44 +#include <linux/highuid.h>
45 +#include <linux/pagemap.h>
46 +#include <linux/quotaops.h>
47 +#include <linux/string.h>
48 +#include <linux/slab.h>
49 +#include <linux/ext3_extents.h>
50 +#include <asm/uaccess.h>
52 +static handle_t *ext3_ext_journal_restart(handle_t *handle, int needed)
56 + if (handle->h_buffer_credits > needed)
58 + if (!ext3_journal_extend(handle, needed))
60 + err = ext3_journal_restart(handle, needed);
66 +ext3_ext_get_access_for_root(handle_t *h, struct ext3_extents_tree *tree)
68 + if (tree->ops->get_write_access)
69 + return tree->ops->get_write_access(h,tree->buffer);
75 +ext3_ext_mark_root_dirty(handle_t *h, struct ext3_extents_tree *tree)
77 + if (tree->ops->mark_buffer_dirty)
78 + return tree->ops->mark_buffer_dirty(h,tree->buffer);
88 +static int ext3_ext_get_access(handle_t *handle,
89 + struct ext3_extents_tree *tree,
90 + struct ext3_ext_path *path)
95 + /* path points to block */
96 + err = ext3_journal_get_write_access(handle, path->p_bh);
98 + /* path points to leaf/index in inode body */
99 + err = ext3_ext_get_access_for_root(handle, tree);
110 +static int ext3_ext_dirty(handle_t *handle, struct ext3_extents_tree *tree,
111 + struct ext3_ext_path *path)
115 + /* path points to block */
116 + err =ext3_journal_dirty_metadata(handle, path->p_bh);
118 + /* path points to leaf/index in inode body */
119 + err = ext3_ext_mark_root_dirty(handle, tree);
125 +ext3_ext_new_block(handle_t *handle, struct ext3_extents_tree *tree,
126 + struct ext3_ext_path *path, struct ext3_extent *ex,
129 + int goal, depth, newblock;
130 + struct inode *inode;
133 + if (tree->ops->new_block)
134 + return tree->ops->new_block(handle, tree, path, ex, err);
136 + inode = tree->inode;
137 + depth = EXT_DEPTH(tree);
138 + if (path && depth > 0) {
139 + goal = path[depth-1].p_block;
141 + struct ext3_inode_info *ei = EXT3_I(inode);
142 + unsigned long bg_start;
143 + unsigned long colour;
145 + bg_start = (ei->i_block_group *
146 + EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
147 + le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
148 + colour = (current->pid % 16) *
149 + (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
150 + goal = bg_start + colour;
153 + newblock = ext3_new_block(handle, inode, goal, 0, 0, err);
157 +static inline void ext3_ext_tree_changed(struct ext3_extents_tree *tree)
159 + struct ext3_extent_header *neh;
160 + neh = EXT_ROOT_HDR(tree);
161 + neh->eh_generation++;
164 +static inline int ext3_ext_space_block(struct ext3_extents_tree *tree)
168 + size = (tree->inode->i_sb->s_blocksize -
169 + sizeof(struct ext3_extent_header))
170 + / sizeof(struct ext3_extent);
171 +#ifdef AGRESSIVE_TEST
177 +static inline int ext3_ext_space_block_idx(struct ext3_extents_tree *tree)
181 + size = (tree->inode->i_sb->s_blocksize -
182 + sizeof(struct ext3_extent_header))
183 + / sizeof(struct ext3_extent_idx);
184 +#ifdef AGRESSIVE_TEST
190 +static inline int ext3_ext_space_root(struct ext3_extents_tree *tree)
194 + size = (tree->buffer_len - sizeof(struct ext3_extent_header))
195 + / sizeof(struct ext3_extent);
196 +#ifdef AGRESSIVE_TEST
202 +static inline int ext3_ext_space_root_idx(struct ext3_extents_tree *tree)
206 + size = (tree->buffer_len -
207 + sizeof(struct ext3_extent_header))
208 + / sizeof(struct ext3_extent_idx);
209 +#ifdef AGRESSIVE_TEST
215 +static void ext3_ext_show_path(struct ext3_extents_tree *tree,
216 + struct ext3_ext_path *path)
219 + int k, l = path->p_depth;
221 + ext_debug(tree, "path:");
222 + for (k = 0; k <= l; k++, path++) {
224 + ext_debug(tree, " %d->%d", path->p_idx->ei_block,
225 + path->p_idx->ei_leaf);
226 + } else if (path->p_ext) {
227 + ext_debug(tree, " %d:%d:%d",
228 + path->p_ext->ee_block,
229 + path->p_ext->ee_len,
230 + path->p_ext->ee_start);
232 + ext_debug(tree, " []");
234 + ext_debug(tree, "\n");
238 +static void ext3_ext_show_leaf(struct ext3_extents_tree *tree,
239 + struct ext3_ext_path *path)
242 + int depth = EXT_DEPTH(tree);
243 + struct ext3_extent_header *eh;
244 + struct ext3_extent *ex;
250 + eh = path[depth].p_hdr;
251 + ex = EXT_FIRST_EXTENT(eh);
253 + for (i = 0; i < eh->eh_entries; i++, ex++) {
254 + ext_debug(tree, "%d:%d:%d ",
255 + ex->ee_block, ex->ee_len, ex->ee_start);
257 + ext_debug(tree, "\n");
261 +static void ext3_ext_drop_refs(struct ext3_ext_path *path)
263 + int depth = path->p_depth;
266 + for (i = 0; i <= depth; i++, path++)
268 + brelse(path->p_bh);
274 + * binary search for closest index by given block
277 +ext3_ext_binsearch_idx(struct ext3_extents_tree *tree,
278 + struct ext3_ext_path *path, int block)
280 + struct ext3_extent_header *eh = path->p_hdr;
281 + struct ext3_extent_idx *ix;
284 + EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
285 + EXT_ASSERT(eh->eh_entries <= eh->eh_max);
286 + EXT_ASSERT(eh->eh_entries > 0);
288 + ext_debug(tree, "binsearch for %d(idx): ", block);
290 + path->p_idx = ix = EXT_FIRST_INDEX(eh);
292 + r = k = eh->eh_entries;
295 + if (block < ix[l + k].ei_block)
299 + ext_debug(tree, "%d:%d:%d ", k, l, r);
304 + ext_debug(tree, " -> %d->%d ", path->p_idx->ei_block, path->p_idx->ei_leaf);
307 + if (block < ix->ei_block)
309 + path->p_idx = ix++;
311 + ext_debug(tree, " -> %d->%d\n", path->p_idx->ei_block,
312 + path->p_idx->ei_leaf);
314 +#ifdef CHECK_BINSEARCH
316 + struct ext3_extent_idx *chix;
318 + chix = ix = EXT_FIRST_INDEX(eh);
319 + for (k = 0; k < eh->eh_entries; k++, ix++) {
320 + if (k != 0 && ix->ei_block <= ix[-1].ei_block) {
321 + printk("k=%d, ix=0x%p, first=0x%p\n", k,
322 + ix, EXT_FIRST_INDEX(eh));
323 + printk("%u <= %u\n",
324 + ix->ei_block,ix[-1].ei_block);
326 + EXT_ASSERT(k == 0 || ix->ei_block > ix[-1].ei_block);
327 + if (block < ix->ei_block)
331 + EXT_ASSERT(chix == path->p_idx);
338 + * binary search for closest extent by given block
341 +ext3_ext_binsearch(struct ext3_extents_tree *tree,
342 + struct ext3_ext_path *path, int block)
344 + struct ext3_extent_header *eh = path->p_hdr;
345 + struct ext3_extent *ex;
348 + EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
349 + EXT_ASSERT(eh->eh_entries <= eh->eh_max);
351 + if (eh->eh_entries == 0) {
353 + * this leaf is empty yet:
354 + * we get such a leaf in split/add case
359 + ext_debug(tree, "binsearch for %d: ", block);
361 + path->p_ext = ex = EXT_FIRST_EXTENT(eh);
363 + r = k = eh->eh_entries;
366 + if (block < ex[l + k].ee_block)
370 + ext_debug(tree, "%d:%d:%d ", k, l, r);
375 + ext_debug(tree, " -> %d:%d:%d ", path->p_ext->ee_block,
376 + path->p_ext->ee_start, path->p_ext->ee_len);
379 + if (block < ex->ee_block)
381 + path->p_ext = ex++;
383 + ext_debug(tree, " -> %d:%d:%d\n", path->p_ext->ee_block,
384 + path->p_ext->ee_start, path->p_ext->ee_len);
386 +#ifdef CHECK_BINSEARCH
388 + struct ext3_extent *chex;
390 + chex = ex = EXT_FIRST_EXTENT(eh);
391 + for (k = 0; k < eh->eh_entries; k++, ex++) {
392 + EXT_ASSERT(k == 0 || ex->ee_block > ex[-1].ee_block);
393 + if (block < ex->ee_block)
397 + EXT_ASSERT(chex == path->p_ext);
403 +int ext3_extent_tree_init(handle_t *handle, struct ext3_extents_tree *tree)
405 + struct ext3_extent_header *eh;
407 + BUG_ON(tree->buffer_len == 0);
408 + ext3_ext_get_access_for_root(handle, tree);
409 + eh = EXT_ROOT_HDR(tree);
411 + eh->eh_entries = 0;
412 + eh->eh_magic = EXT3_EXT_MAGIC;
413 + eh->eh_max = ext3_ext_space_root(tree);
414 + ext3_ext_mark_root_dirty(handle, tree);
415 + ext3_ext_invalidate_cache(tree);
419 +struct ext3_ext_path *
420 +ext3_ext_find_extent(struct ext3_extents_tree *tree, int block,
421 + struct ext3_ext_path *path)
423 + struct ext3_extent_header *eh;
424 + struct buffer_head *bh;
425 + int depth, i, ppos = 0;
428 + EXT_ASSERT(tree->inode);
429 + EXT_ASSERT(tree->root);
431 + eh = EXT_ROOT_HDR(tree);
433 + i = depth = EXT_DEPTH(tree);
434 + EXT_ASSERT(eh->eh_max);
435 + EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
436 + EXT_ASSERT(i == 0 || eh->eh_entries > 0);
438 + /* account possible depth increase */
440 + path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 2),
443 + return ERR_PTR(-ENOMEM);
445 + memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
446 + path[0].p_hdr = eh;
448 + /* walk through the tree */
450 + ext_debug(tree, "depth %d: num %d, max %d\n",
451 + ppos, eh->eh_entries, eh->eh_max);
452 + ext3_ext_binsearch_idx(tree, path + ppos, block);
453 + path[ppos].p_block = path[ppos].p_idx->ei_leaf;
454 + path[ppos].p_depth = i;
455 + path[ppos].p_ext = NULL;
457 + bh = sb_bread(tree->inode->i_sb, path[ppos].p_block);
459 + ext3_ext_drop_refs(path);
461 + return ERR_PTR(-EIO);
463 + eh = EXT_BLOCK_HDR(bh);
465 + EXT_ASSERT(ppos <= depth);
466 + path[ppos].p_bh = bh;
467 + path[ppos].p_hdr = eh;
471 + path[ppos].p_depth = i;
472 + path[ppos].p_hdr = eh;
473 + path[ppos].p_ext = NULL;
476 + ext3_ext_binsearch(tree, path + ppos, block);
478 + ext3_ext_show_path(tree, path);
484 + * insert new index [logical;ptr] into the block at cupr
485 + * it check where to insert: before curp or after curp
487 +static int ext3_ext_insert_index(handle_t *handle,
488 + struct ext3_extents_tree *tree,
489 + struct ext3_ext_path *curp,
490 + int logical, int ptr)
492 + struct ext3_extent_idx *ix;
495 + if ((err = ext3_ext_get_access(handle, tree, curp)))
498 + EXT_ASSERT(logical != curp->p_idx->ei_block);
499 + len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
500 + if (logical > curp->p_idx->ei_block) {
502 + if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
503 + len = (len - 1) * sizeof(struct ext3_extent_idx);
504 + len = len < 0 ? 0 : len;
505 + ext_debug(tree, "insert new index %d after: %d. "
506 + "move %d from 0x%p to 0x%p\n",
508 + (curp->p_idx + 1), (curp->p_idx + 2));
509 + memmove(curp->p_idx + 2, curp->p_idx + 1, len);
511 + ix = curp->p_idx + 1;
513 + /* insert before */
514 + len = len * sizeof(struct ext3_extent_idx);
515 + len = len < 0 ? 0 : len;
516 + ext_debug(tree, "insert new index %d before: %d. "
517 + "move %d from 0x%p to 0x%p\n",
519 + curp->p_idx, (curp->p_idx + 1));
520 + memmove(curp->p_idx + 1, curp->p_idx, len);
524 + ix->ei_block = logical;
526 + curp->p_hdr->eh_entries++;
528 + EXT_ASSERT(curp->p_hdr->eh_entries <= curp->p_hdr->eh_max);
529 + EXT_ASSERT(ix <= EXT_LAST_INDEX(curp->p_hdr));
531 + err = ext3_ext_dirty(handle, tree, curp);
532 + ext3_std_error(tree->inode->i_sb, err);
538 + * routine inserts new subtree into the path, using free index entry
540 + * - allocates all needed blocks (new leaf and all intermediate index blocks)
541 + * - makes decision where to split
542 + * - moves remaining extens and index entries (right to the split point)
543 + * into the newly allocated blocks
544 + * - initialize subtree
546 +static int ext3_ext_split(handle_t *handle, struct ext3_extents_tree *tree,
547 + struct ext3_ext_path *path,
548 + struct ext3_extent *newext, int at)
550 + struct buffer_head *bh = NULL;
551 + int depth = EXT_DEPTH(tree);
552 + struct ext3_extent_header *neh;
553 + struct ext3_extent_idx *fidx;
554 + struct ext3_extent *ex;
555 + int i = at, k, m, a;
556 + unsigned long newblock, oldblock, border;
557 + int *ablocks = NULL; /* array of allocated blocks */
560 + /* make decision: where to split? */
561 + /* FIXME: now desicion is simplest: at current extent */
563 + /* if current leaf will be splitted, then we should use
564 + * border from split point */
565 + EXT_ASSERT(path[depth].p_ext <= EXT_MAX_EXTENT(path[depth].p_hdr));
566 + if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
567 + border = path[depth].p_ext[1].ee_block;
568 + ext_debug(tree, "leaf will be splitted."
569 + " next leaf starts at %d\n",
572 + border = newext->ee_block;
573 + ext_debug(tree, "leaf will be added."
574 + " next leaf starts at %d\n",
579 + * if error occurs, then we break processing
580 + * and turn filesystem read-only. so, index won't
581 + * be inserted and tree will be in consistent
582 + * state. next mount will repair buffers too
586 + * get array to track all allocated blocks
587 + * we need this to handle errors and free blocks
590 + ablocks = kmalloc(sizeof(unsigned long) * depth, GFP_NOFS);
593 + memset(ablocks, 0, sizeof(unsigned long) * depth);
595 + /* allocate all needed blocks */
596 + ext_debug(tree, "allocate %d blocks for indexes/leaf\n", depth - at);
597 + for (a = 0; a < depth - at; a++) {
598 + newblock = ext3_ext_new_block(handle, tree, path, newext, &err);
601 + ablocks[a] = newblock;
604 + /* initialize new leaf */
605 + newblock = ablocks[--a];
606 + EXT_ASSERT(newblock);
607 + bh = sb_getblk(tree->inode->i_sb, newblock);
614 + if ((err = ext3_journal_get_create_access(handle, bh)))
617 + neh = EXT_BLOCK_HDR(bh);
618 + neh->eh_entries = 0;
619 + neh->eh_max = ext3_ext_space_block(tree);
620 + neh->eh_magic = EXT3_EXT_MAGIC;
622 + ex = EXT_FIRST_EXTENT(neh);
624 + /* move remain of path[depth] to the new leaf */
625 + EXT_ASSERT(path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max);
626 + /* start copy from next extent */
627 + /* TODO: we could do it by single memmove */
629 + path[depth].p_ext++;
630 + while (path[depth].p_ext <=
631 + EXT_MAX_EXTENT(path[depth].p_hdr)) {
632 + ext_debug(tree, "move %d:%d:%d in new leaf %lu\n",
633 + path[depth].p_ext->ee_block,
634 + path[depth].p_ext->ee_start,
635 + path[depth].p_ext->ee_len,
637 + memmove(ex++, path[depth].p_ext++,
638 + sizeof(struct ext3_extent));
642 + mark_buffer_uptodate(bh, 1);
645 + if ((err = ext3_journal_dirty_metadata(handle, bh)))
650 + /* correct old leaf */
652 + if ((err = ext3_ext_get_access(handle, tree, path + depth)))
654 + path[depth].p_hdr->eh_entries -= m;
655 + if ((err = ext3_ext_dirty(handle, tree, path + depth)))
660 + /* create intermediate indexes */
661 + k = depth - at - 1;
662 + EXT_ASSERT(k >= 0);
664 + ext_debug(tree, "create %d intermediate indices\n", k);
665 + /* insert new index into current index block */
666 + /* current depth stored in i var */
669 + oldblock = newblock;
670 + newblock = ablocks[--a];
671 + bh = sb_getblk(tree->inode->i_sb, newblock);
678 + if ((err = ext3_journal_get_create_access(handle, bh)))
681 + neh = EXT_BLOCK_HDR(bh);
682 + neh->eh_entries = 1;
683 + neh->eh_magic = EXT3_EXT_MAGIC;
684 + neh->eh_max = ext3_ext_space_block_idx(tree);
685 + neh->eh_depth = depth - i;
686 + fidx = EXT_FIRST_INDEX(neh);
687 + fidx->ei_block = border;
688 + fidx->ei_leaf = oldblock;
690 + ext_debug(tree, "int.index at %d (block %lu): %lu -> %lu\n",
691 + i, newblock, border, oldblock);
696 + ext_debug(tree, "cur 0x%p, last 0x%p\n", path[i].p_idx,
697 + EXT_MAX_INDEX(path[i].p_hdr));
698 + EXT_ASSERT(EXT_MAX_INDEX(path[i].p_hdr) ==
699 + EXT_LAST_INDEX(path[i].p_hdr));
700 + while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
701 + ext_debug(tree, "%d: move %d:%d in new index %lu\n",
702 + i, path[i].p_idx->ei_block,
703 + path[i].p_idx->ei_leaf, newblock);
704 + memmove(++fidx, path[i].p_idx++,
705 + sizeof(struct ext3_extent_idx));
707 + EXT_ASSERT(neh->eh_entries <= neh->eh_max);
710 + mark_buffer_uptodate(bh, 1);
713 + if ((err = ext3_journal_dirty_metadata(handle, bh)))
718 + /* correct old index */
720 + err = ext3_ext_get_access(handle, tree, path + i);
723 + path[i].p_hdr->eh_entries -= m;
724 + err = ext3_ext_dirty(handle, tree, path + i);
732 + /* insert new index */
734 + err = ext3_ext_insert_index(handle, tree, path + at,
739 + if (buffer_locked(bh))
745 + /* free all allocated blocks in error case */
746 + for (i = 0; i < depth; i++) {
749 + ext3_free_blocks(handle, tree->inode, ablocks[i], 1);
758 + * routine implements tree growing procedure:
759 + * - allocates new block
760 + * - moves top-level data (index block or leaf) into the new block
761 + * - initialize new top-level, creating index that points to the
762 + * just created block
764 +static int ext3_ext_grow_indepth(handle_t *handle,
765 + struct ext3_extents_tree *tree,
766 + struct ext3_ext_path *path,
767 + struct ext3_extent *newext)
769 + struct ext3_ext_path *curp = path;
770 + struct ext3_extent_header *neh;
771 + struct ext3_extent_idx *fidx;
772 + struct buffer_head *bh;
773 + unsigned long newblock;
776 + newblock = ext3_ext_new_block(handle, tree, path, newext, &err);
780 + bh = sb_getblk(tree->inode->i_sb, newblock);
783 + ext3_std_error(tree->inode->i_sb, err);
788 + if ((err = ext3_journal_get_create_access(handle, bh))) {
793 + /* move top-level index/leaf into new block */
794 + memmove(bh->b_data, curp->p_hdr, tree->buffer_len);
796 + /* set size of new block */
797 + neh = EXT_BLOCK_HDR(bh);
798 + /* old root could have indexes or leaves
799 + * so calculate eh_max right way */
800 + if (EXT_DEPTH(tree))
801 + neh->eh_max = ext3_ext_space_block_idx(tree);
803 + neh->eh_max = ext3_ext_space_block(tree);
804 + neh->eh_magic = EXT3_EXT_MAGIC;
805 + mark_buffer_uptodate(bh, 1);
808 + if ((err = ext3_journal_dirty_metadata(handle, bh)))
811 + /* create index in new top-level index: num,max,pointer */
812 + if ((err = ext3_ext_get_access(handle, tree, curp)))
815 + curp->p_hdr->eh_magic = EXT3_EXT_MAGIC;
816 + curp->p_hdr->eh_max = ext3_ext_space_root_idx(tree);
817 + curp->p_hdr->eh_entries = 1;
818 + curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
819 + /* FIXME: it works, but actually path[0] can be index */
820 + curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
821 + curp->p_idx->ei_leaf = newblock;
823 + neh = EXT_ROOT_HDR(tree);
824 + fidx = EXT_FIRST_INDEX(neh);
825 + ext_debug(tree, "new root: num %d(%d), lblock %d, ptr %d\n",
826 + neh->eh_entries, neh->eh_max, fidx->ei_block, fidx->ei_leaf);
828 + neh->eh_depth = path->p_depth + 1;
829 + err = ext3_ext_dirty(handle, tree, curp);
837 + * routine finds empty index and adds new leaf. if no free index found
838 + * then it requests in-depth growing
840 +static int ext3_ext_create_new_leaf(handle_t *handle,
841 + struct ext3_extents_tree *tree,
842 + struct ext3_ext_path *path,
843 + struct ext3_extent *newext)
845 + struct ext3_ext_path *curp;
846 + int depth, i, err = 0;
849 + i = depth = EXT_DEPTH(tree);
851 + /* walk up to the tree and look for free index entry */
852 + curp = path + depth;
853 + while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
858 + /* we use already allocated block for index block
859 + * so, subsequent data blocks should be contigoues */
860 + if (EXT_HAS_FREE_INDEX(curp)) {
861 + /* if we found index with free entry, then use that
862 + * entry: create all needed subtree and add new leaf */
863 + err = ext3_ext_split(handle, tree, path, newext, i);
866 + ext3_ext_drop_refs(path);
867 + path = ext3_ext_find_extent(tree, newext->ee_block, path);
869 + err = PTR_ERR(path);
871 + /* tree is full, time to grow in depth */
872 + err = ext3_ext_grow_indepth(handle, tree, path, newext);
875 + ext3_ext_drop_refs(path);
876 + path = ext3_ext_find_extent(tree, newext->ee_block, path);
878 + err = PTR_ERR(path);
881 + * only first (depth 0 -> 1) produces free space
882 + * in all other cases we have to split growed tree
884 + depth = EXT_DEPTH(tree);
885 + if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
886 + /* now we need split */
898 + * returns allocated block in subsequent extent or EXT_MAX_BLOCK
899 + * NOTE: it consider block number from index entry as
900 + * allocated block. thus, index entries have to be consistent
903 +static unsigned long
904 +ext3_ext_next_allocated_block(struct ext3_ext_path *path)
908 + EXT_ASSERT(path != NULL);
909 + depth = path->p_depth;
911 + if (depth == 0 && path->p_ext == NULL)
912 + return EXT_MAX_BLOCK;
914 + /* FIXME: what if index isn't full ?! */
915 + while (depth >= 0) {
916 + if (depth == path->p_depth) {
918 + if (path[depth].p_ext !=
919 + EXT_LAST_EXTENT(path[depth].p_hdr))
920 + return path[depth].p_ext[1].ee_block;
923 + if (path[depth].p_idx !=
924 + EXT_LAST_INDEX(path[depth].p_hdr))
925 + return path[depth].p_idx[1].ei_block;
930 + return EXT_MAX_BLOCK;
934 + * returns first allocated block from next leaf or EXT_MAX_BLOCK
936 +static unsigned ext3_ext_next_leaf_block(struct ext3_extents_tree *tree,
937 + struct ext3_ext_path *path)
941 + EXT_ASSERT(path != NULL);
942 + depth = path->p_depth;
944 + /* zero-tree has no leaf blocks at all */
946 + return EXT_MAX_BLOCK;
948 + /* go to index block */
951 + while (depth >= 0) {
952 + if (path[depth].p_idx !=
953 + EXT_LAST_INDEX(path[depth].p_hdr))
954 + return path[depth].p_idx[1].ei_block;
958 + return EXT_MAX_BLOCK;
962 + * if leaf gets modified and modified extent is first in the leaf
963 + * then we have to correct all indexes above
964 + * TODO: do we need to correct tree in all cases?
966 +int ext3_ext_correct_indexes(handle_t *handle, struct ext3_extents_tree *tree,
967 + struct ext3_ext_path *path)
969 + struct ext3_extent_header *eh;
970 + int depth = EXT_DEPTH(tree);
971 + struct ext3_extent *ex;
972 + unsigned long border;
975 + eh = path[depth].p_hdr;
976 + ex = path[depth].p_ext;
981 + /* there is no tree at all */
985 + if (ex != EXT_FIRST_EXTENT(eh)) {
986 + /* we correct tree if first leaf got modified only */
991 + * TODO: we need correction if border is smaller then current one
994 + border = path[depth].p_ext->ee_block;
995 + if ((err = ext3_ext_get_access(handle, tree, path + k)))
997 + path[k].p_idx->ei_block = border;
998 + if ((err = ext3_ext_dirty(handle, tree, path + k)))
1002 + /* change all left-side indexes */
1003 + if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1005 + if ((err = ext3_ext_get_access(handle, tree, path + k)))
1007 + path[k].p_idx->ei_block = border;
1008 + if ((err = ext3_ext_dirty(handle, tree, path + k)))
1016 +ext3_can_extents_be_merged(struct ext3_extents_tree *tree,
1017 + struct ext3_extent *ex1,
1018 + struct ext3_extent *ex2)
1020 + if (ex1->ee_block + ex1->ee_len != ex2->ee_block)
1023 +#ifdef AGRESSIVE_TEST
1024 + if (ex1->ee_len >= 4)
1028 + if (!tree->ops->mergable)
1031 + return tree->ops->mergable(ex1, ex2);
1035 + * this routine tries to merge requsted extent into the existing
1036 + * extent or inserts requested extent as new one into the tree,
1037 + * creating new leaf in no-space case
1039 +int ext3_ext_insert_extent(handle_t *handle, struct ext3_extents_tree *tree,
1040 + struct ext3_ext_path *path,
1041 + struct ext3_extent *newext)
1043 + struct ext3_extent_header * eh;
1044 + struct ext3_extent *ex, *fex;
1045 + struct ext3_extent *nearex; /* nearest extent */
1046 + struct ext3_ext_path *npath = NULL;
1047 + int depth, len, err, next;
1049 + EXT_ASSERT(newext->ee_len > 0);
1050 + EXT_ASSERT(newext->ee_len < EXT_CACHE_MARK);
1051 + depth = EXT_DEPTH(tree);
1052 + ex = path[depth].p_ext;
1053 + EXT_ASSERT(path[depth].p_hdr);
1055 + /* try to insert block into found extent and return */
1056 + if (ex && ext3_can_extents_be_merged(tree, ex, newext)) {
1057 + ext_debug(tree, "append %d block to %d:%d (from %d)\n",
1058 + newext->ee_len, ex->ee_block, ex->ee_len,
1060 + if ((err = ext3_ext_get_access(handle, tree, path + depth)))
1062 + ex->ee_len += newext->ee_len;
1063 + eh = path[depth].p_hdr;
1069 + depth = EXT_DEPTH(tree);
1070 + eh = path[depth].p_hdr;
1071 + if (eh->eh_entries < eh->eh_max)
1074 + /* probably next leaf has space for us? */
1075 + fex = EXT_LAST_EXTENT(eh);
1076 + next = ext3_ext_next_leaf_block(tree, path);
1077 + if (newext->ee_block > fex->ee_block && next != EXT_MAX_BLOCK) {
1078 + ext_debug(tree, "next leaf block - %d\n", next);
1079 + EXT_ASSERT(!npath);
1080 + npath = ext3_ext_find_extent(tree, next, NULL);
1081 + if (IS_ERR(npath))
1082 + return PTR_ERR(npath);
1083 + EXT_ASSERT(npath->p_depth == path->p_depth);
1084 + eh = npath[depth].p_hdr;
1085 + if (eh->eh_entries < eh->eh_max) {
1086 + ext_debug(tree, "next leaf isnt full(%d)\n",
1091 + ext_debug(tree, "next leaf hasno free space(%d,%d)\n",
1092 + eh->eh_entries, eh->eh_max);
1096 + * there is no free space in found leaf
1097 + * we're gonna add new leaf in the tree
1099 + err = ext3_ext_create_new_leaf(handle, tree, path, newext);
1102 + depth = EXT_DEPTH(tree);
1103 + eh = path[depth].p_hdr;
1106 + nearex = path[depth].p_ext;
1108 + if ((err = ext3_ext_get_access(handle, tree, path + depth)))
1112 + /* there is no extent in this leaf, create first one */
1113 + ext_debug(tree, "first extent in the leaf: %d:%d:%d\n",
1114 + newext->ee_block, newext->ee_start,
1116 + path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1117 + } else if (newext->ee_block > nearex->ee_block) {
1118 + EXT_ASSERT(newext->ee_block != nearex->ee_block);
1119 + if (nearex != EXT_LAST_EXTENT(eh)) {
1120 + len = EXT_MAX_EXTENT(eh) - nearex;
1121 + len = (len - 1) * sizeof(struct ext3_extent);
1122 + len = len < 0 ? 0 : len;
1123 + ext_debug(tree, "insert %d:%d:%d after: nearest 0x%p, "
1124 + "move %d from 0x%p to 0x%p\n",
1125 + newext->ee_block, newext->ee_start,
1127 + nearex, len, nearex + 1, nearex + 2);
1128 + memmove(nearex + 2, nearex + 1, len);
1130 + path[depth].p_ext = nearex + 1;
1132 + EXT_ASSERT(newext->ee_block != nearex->ee_block);
1133 + len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext3_extent);
1134 + len = len < 0 ? 0 : len;
1135 + ext_debug(tree, "insert %d:%d:%d before: nearest 0x%p, "
1136 + "move %d from 0x%p to 0x%p\n",
1137 + newext->ee_block, newext->ee_start, newext->ee_len,
1138 + nearex, len, nearex + 1, nearex + 2);
1139 + memmove(nearex + 1, nearex, len);
1140 + path[depth].p_ext = nearex;
1144 + nearex = path[depth].p_ext;
1145 + nearex->ee_block = newext->ee_block;
1146 + nearex->ee_start = newext->ee_start;
1147 + nearex->ee_len = newext->ee_len;
1148 + /* FIXME: support for large fs */
1149 + nearex->ee_start_hi = 0;
1152 + /* try to merge extents to the right */
1153 + while (nearex < EXT_LAST_EXTENT(eh)) {
1154 + if (!ext3_can_extents_be_merged(tree, nearex, nearex + 1))
1156 + /* merge with next extent! */
1157 + nearex->ee_len += nearex[1].ee_len;
1158 + if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
1159 + len = (EXT_LAST_EXTENT(eh) - nearex - 1)
1160 + * sizeof(struct ext3_extent);
1161 + memmove(nearex + 1, nearex + 2, len);
1164 + EXT_ASSERT(eh->eh_entries > 0);
1167 + /* try to merge extents to the left */
1169 + /* time to correct all indexes above */
1170 + err = ext3_ext_correct_indexes(handle, tree, path);
1174 + err = ext3_ext_dirty(handle, tree, path + depth);
1178 + ext3_ext_drop_refs(npath);
1181 + ext3_ext_tree_changed(tree);
1182 + ext3_ext_invalidate_cache(tree);
1186 +int ext3_ext_walk_space(struct ext3_extents_tree *tree, unsigned long block,
1187 + unsigned long num, ext_prepare_callback func)
1189 + struct ext3_ext_path *path = NULL;
1190 + struct ext3_extent *ex, cbex;
1191 + unsigned long next, start = 0, end = 0;
1192 + unsigned long last = block + num;
1193 + int depth, exists, err = 0;
1197 + EXT_ASSERT(tree->inode);
1198 + EXT_ASSERT(tree->root);
1200 + while (block < last && block != EXT_MAX_BLOCK) {
1201 + num = last - block;
1202 + /* find extent for this block */
1203 + path = ext3_ext_find_extent(tree, block, path);
1204 + if (IS_ERR(path)) {
1205 + err = PTR_ERR(path);
1210 + depth = EXT_DEPTH(tree);
1211 + EXT_ASSERT(path[depth].p_hdr);
1212 + ex = path[depth].p_ext;
1213 + next = ext3_ext_next_allocated_block(path);
1217 + /* there is no extent yet, so try to allocate
1218 + * all requested space */
1220 + end = block + num;
1221 + } else if (ex->ee_block > block) {
1222 + /* need to allocate space before found extent */
1224 + end = ex->ee_block;
1225 + if (block + num < end)
1226 + end = block + num;
1227 + } else if (block >= ex->ee_block + ex->ee_len) {
1228 + /* need to allocate space after found extent */
1230 + end = block + num;
1233 + } else if (block >= ex->ee_block) {
1235 + * some part of requested space is covered
1239 + end = ex->ee_block + ex->ee_len;
1240 + if (block + num < end)
1241 + end = block + num;
1246 + EXT_ASSERT(end > start);
1249 + cbex.ee_block = start;
1250 + cbex.ee_len = end - start;
1251 + cbex.ee_start = 0;
1255 + EXT_ASSERT(path[depth].p_hdr);
1256 + err = func(tree, path, &cbex, exists);
1257 + ext3_ext_drop_refs(path);
1261 + if (err == EXT_REPEAT)
1263 + else if (err == EXT_BREAK) {
1268 + if (EXT_DEPTH(tree) != depth) {
1269 + /* depth was changed. we have to realloc path */
1274 + block = cbex.ee_block + cbex.ee_len;
1278 + ext3_ext_drop_refs(path);
1286 +ext3_ext_put_in_cache(struct ext3_extents_tree *tree, struct ext3_extent *ex)
1290 + EXT_ASSERT(ex->ee_len);
1291 + tree->cex->ee_block = ex->ee_block;
1292 + tree->cex->ee_start = ex->ee_start;
1293 + tree->cex->ee_len = ex->ee_len;
1298 + * this routine calculate boundaries of the gap requested block fits into
1299 + * and cache this gap
1302 +ext3_ext_put_gap_in_cache(struct ext3_extents_tree *tree,
1303 + struct ext3_ext_path *path,
1304 + unsigned long block)
1306 + int depth = EXT_DEPTH(tree);
1307 + struct ext3_extent *ex, gex;
1312 + ex = path[depth].p_ext;
1314 + /* there is no extent yet, so gap is [0;-] */
1316 + gex.ee_len = EXT_CACHE_MARK;
1317 + ext_debug(tree, "cache gap(whole file):");
1318 + } else if (block < ex->ee_block) {
1319 + gex.ee_block = block;
1320 + gex.ee_len = ex->ee_block - block;
1321 + ext_debug(tree, "cache gap(before): %lu [%lu:%lu]",
1322 + (unsigned long) block,
1323 + (unsigned long) ex->ee_block,
1324 + (unsigned long) ex->ee_len);
1325 + } else if (block >= ex->ee_block + ex->ee_len) {
1326 + gex.ee_block = ex->ee_block + ex->ee_len;
1327 + gex.ee_len = ext3_ext_next_allocated_block(path);
1328 + ext_debug(tree, "cache gap(after): [%lu:%lu] %lu",
1329 + (unsigned long) ex->ee_block,
1330 + (unsigned long) ex->ee_len,
1331 + (unsigned long) block);
1332 + EXT_ASSERT(gex.ee_len > gex.ee_block);
1333 + gex.ee_len = gex.ee_len - gex.ee_block;
1338 + ext_debug(tree, " -> %lu:%lu\n", (unsigned long) gex.ee_block,
1339 + (unsigned long) gex.ee_len);
1340 + gex.ee_start = EXT_CACHE_MARK;
1341 + ext3_ext_put_in_cache(tree, &gex);
1345 +ext3_ext_in_cache(struct ext3_extents_tree *tree, unsigned long block,
1346 + struct ext3_extent *ex)
1348 + struct ext3_extent *cex = tree->cex;
1350 + /* is there cache storage at all? */
1354 + /* has cache valid data? */
1355 + if (cex->ee_len == 0)
1358 + if (block >= cex->ee_block && block < cex->ee_block + cex->ee_len) {
1359 + ex->ee_block = cex->ee_block;
1360 + ex->ee_start = cex->ee_start;
1361 + ex->ee_len = cex->ee_len;
1362 + ext_debug(tree, "%lu cached by %lu:%lu:%lu\n",
1363 + (unsigned long) block,
1364 + (unsigned long) ex->ee_block,
1365 + (unsigned long) ex->ee_len,
1366 + (unsigned long) ex->ee_start);
1370 + /* not in cache */
1375 + * routine removes index from the index block
1376 + * it's used in truncate case only. thus all requests are for
1377 + * last index in the block only
1379 +int ext3_ext_rm_idx(handle_t *handle, struct ext3_extents_tree *tree,
1380 + struct ext3_ext_path *path)
1382 + struct buffer_head *bh;
1385 + /* free index block */
1387 + EXT_ASSERT(path->p_hdr->eh_entries);
1388 + if ((err = ext3_ext_get_access(handle, tree, path)))
1390 + path->p_hdr->eh_entries--;
1391 + if ((err = ext3_ext_dirty(handle, tree, path)))
1393 + ext_debug(tree, "index is empty, remove it, free block %d\n",
1394 + path->p_idx->ei_leaf);
1395 + bh = sb_get_hash_table(tree->inode->i_sb, path->p_idx->ei_leaf);
1396 + ext3_forget(handle, 1, tree->inode, bh, path->p_idx->ei_leaf);
1397 + ext3_free_blocks(handle, tree->inode, path->p_idx->ei_leaf, 1);
1401 +int ext3_ext_calc_credits_for_insert(struct ext3_extents_tree *tree,
1402 + struct ext3_ext_path *path)
1404 + int depth = EXT_DEPTH(tree);
1408 + /* probably there is space in leaf? */
1409 + if (path[depth].p_hdr->eh_entries < path[depth].p_hdr->eh_max)
1414 + * the worste case we're expecting is creation of the
1415 + * new root (growing in depth) with index splitting
1416 + * for splitting we have to consider depth + 1 because
1417 + * previous growing could increase it
1419 + depth = depth + 1;
1422 + * growing in depth:
1423 + * block allocation + new root + old root
1425 + needed = EXT3_ALLOC_NEEDED + 2;
1427 + /* index split. we may need:
1428 + * allocate intermediate indexes and new leaf
1429 + * change two blocks at each level, but root
1430 + * modify root block (inode)
1432 + needed += (depth * EXT3_ALLOC_NEEDED) + (2 * depth) + 1;
1438 +ext3_ext_split_for_rm(handle_t *handle, struct ext3_extents_tree *tree,
1439 + struct ext3_ext_path *path, unsigned long start,
1440 + unsigned long end)
1442 + struct ext3_extent *ex, tex;
1443 + struct ext3_ext_path *npath;
1444 + int depth, creds, err;
1446 + depth = EXT_DEPTH(tree);
1447 + ex = path[depth].p_ext;
1449 + EXT_ASSERT(end < ex->ee_block + ex->ee_len - 1);
1450 + EXT_ASSERT(ex->ee_block < start);
1452 + /* calculate tail extent */
1453 + tex.ee_block = end + 1;
1454 + EXT_ASSERT(tex.ee_block < ex->ee_block + ex->ee_len);
1455 + tex.ee_len = ex->ee_block + ex->ee_len - tex.ee_block;
1457 + creds = ext3_ext_calc_credits_for_insert(tree, path);
1458 + handle = ext3_ext_journal_restart(handle, creds);
1459 + if (IS_ERR(handle))
1460 + return PTR_ERR(handle);
1462 + /* calculate head extent. use primary extent */
1463 + err = ext3_ext_get_access(handle, tree, path + depth);
1466 + ex->ee_len = start - ex->ee_block;
1467 + err = ext3_ext_dirty(handle, tree, path + depth);
1471 + /* FIXME: some callback to free underlying resource
1472 + * and correct ee_start? */
1473 + ext_debug(tree, "split extent: head %u:%u, tail %u:%u\n",
1474 + ex->ee_block, ex->ee_len, tex.ee_block, tex.ee_len);
1476 + npath = ext3_ext_find_extent(tree, ex->ee_block, NULL);
1477 + if (IS_ERR(npath))
1478 + return PTR_ERR(npath);
1479 + depth = EXT_DEPTH(tree);
1480 + EXT_ASSERT(npath[depth].p_ext->ee_block == ex->ee_block);
1481 + EXT_ASSERT(npath[depth].p_ext->ee_len == ex->ee_len);
1483 + err = ext3_ext_insert_extent(handle, tree, npath, &tex);
1484 + ext3_ext_drop_refs(npath);
1492 +ext3_ext_rm_leaf(handle_t *handle, struct ext3_extents_tree *tree,
1493 + struct ext3_ext_path *path, unsigned long start,
1494 + unsigned long end)
1496 + struct ext3_extent *ex, *fu = NULL, *lu, *le;
1497 + int err = 0, correct_index = 0;
1498 + int depth = EXT_DEPTH(tree), credits;
1499 + struct ext3_extent_header *eh;
1500 + unsigned a, b, block, num;
1502 + ext_debug(tree, "remove [%lu:%lu] in leaf\n", start, end);
1503 + if (!path[depth].p_hdr)
1504 + path[depth].p_hdr = EXT_BLOCK_HDR(path[depth].p_bh);
1505 + eh = path[depth].p_hdr;
1507 + EXT_ASSERT(eh->eh_entries <= eh->eh_max);
1508 + EXT_ASSERT(eh->eh_magic == EXT3_EXT_MAGIC);
1510 + /* find where to start removing */
1511 + le = ex = EXT_LAST_EXTENT(eh);
1512 + while (ex != EXT_FIRST_EXTENT(eh)) {
1513 + if (ex->ee_block <= end)
1518 + if (start > ex->ee_block && end < ex->ee_block + ex->ee_len - 1) {
1519 + /* removal of internal part of the extent requested
1520 + * tail and head must be placed in different extent
1521 + * so, we have to insert one more extent */
1522 + path[depth].p_ext = ex;
1523 + return ext3_ext_split_for_rm(handle, tree, path, start, end);
1527 + while (ex >= EXT_FIRST_EXTENT(eh) &&
1528 + ex->ee_block + ex->ee_len > start) {
1529 + ext_debug(tree, "remove ext %u:%u\n", ex->ee_block, ex->ee_len);
1530 + path[depth].p_ext = ex;
1532 + a = ex->ee_block > start ? ex->ee_block : start;
1533 + b = ex->ee_block + ex->ee_len - 1 < end ?
1534 + ex->ee_block + ex->ee_len - 1 : end;
1536 + ext_debug(tree, " border %u:%u\n", a, b);
1538 + if (a != ex->ee_block && b != ex->ee_block + ex->ee_len - 1) {
1542 + } else if (a != ex->ee_block) {
1543 + /* remove tail of the extent */
1544 + block = ex->ee_block;
1546 + } else if (b != ex->ee_block + ex->ee_len - 1) {
1547 + /* remove head of the extent */
1551 + /* remove whole extent: excelent! */
1552 + block = ex->ee_block;
1554 + EXT_ASSERT(a == ex->ee_block &&
1555 + b == ex->ee_block + ex->ee_len - 1);
1558 + if (ex == EXT_FIRST_EXTENT(eh))
1559 + correct_index = 1;
1562 + if (correct_index)
1563 + credits += (EXT_DEPTH(tree) * EXT3_ALLOC_NEEDED) + 1;
1564 + if (tree->ops->remove_extent_credits)
1565 + credits+=tree->ops->remove_extent_credits(tree,ex,a,b);
1567 + handle = ext3_ext_journal_restart(handle, credits);
1568 + if (IS_ERR(handle)) {
1569 + err = PTR_ERR(handle);
1573 + err = ext3_ext_get_access(handle, tree, path + depth);
1577 + if (tree->ops->remove_extent)
1578 + err = tree->ops->remove_extent(tree, ex, a, b);
1583 + /* this extent is removed entirely mark slot unused */
1589 + ex->ee_block = block;
1592 + err = ext3_ext_dirty(handle, tree, path + depth);
1596 + ext_debug(tree, "new extent: %u:%u:%u\n",
1597 + ex->ee_block, ex->ee_len, ex->ee_start);
1602 + /* reuse unused slots */
1604 + if (lu->ee_start) {
1613 + if (correct_index && eh->eh_entries)
1614 + err = ext3_ext_correct_indexes(handle, tree, path);
1616 + /* if this leaf is free, then we should
1617 + * remove it from index block above */
1618 + if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
1619 + err = ext3_ext_rm_idx(handle, tree, path + depth);
1626 +static struct ext3_extent_idx *
1627 +ext3_ext_last_covered(struct ext3_extent_header *hdr, unsigned long block)
1629 + struct ext3_extent_idx *ix;
1631 + ix = EXT_LAST_INDEX(hdr);
1632 + while (ix != EXT_FIRST_INDEX(hdr)) {
1633 + if (ix->ei_block <= block)
1641 + * returns 1 if current index have to be freed (even partial)
1644 +ext3_ext_more_to_rm(struct ext3_ext_path *path)
1646 + EXT_ASSERT(path->p_idx);
1648 + if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
1652 + * if truncate on deeper level happened it it wasn't partial
1653 + * so we have to consider current index for truncation
1655 + if (path->p_hdr->eh_entries == path->p_block)
1660 +int ext3_ext_remove_space(struct ext3_extents_tree *tree,
1661 + unsigned long start, unsigned long end)
1663 + struct inode *inode = tree->inode;
1664 + struct super_block *sb = inode->i_sb;
1665 + int depth = EXT_DEPTH(tree);
1666 + struct ext3_ext_path *path;
1668 + int i = 0, err = 0;
1670 + ext_debug(tree, "space to be removed: %lu:%lu\n", start, end);
1672 + /* probably first extent we're gonna free will be last in block */
1673 + handle = ext3_journal_start(inode, depth + 1);
1674 + if (IS_ERR(handle))
1675 + return PTR_ERR(handle);
1677 + ext3_ext_invalidate_cache(tree);
1680 + * we start scanning from right side freeing all the blocks
1681 + * after i_size and walking into the deep
1683 + path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 1), GFP_KERNEL);
1684 + if (IS_ERR(path)) {
1685 + ext3_error(sb, "ext3_ext_remove_space",
1686 + "Can't allocate path array");
1687 + ext3_journal_stop(handle, inode);
1690 + memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
1691 + path[i].p_hdr = EXT_ROOT_HDR(tree);
1693 + while (i >= 0 && err == 0) {
1695 + /* this is leaf block */
1696 + err = ext3_ext_rm_leaf(handle, tree, path, start, end);
1697 + /* root level have p_bh == NULL, brelse() eats this */
1698 + brelse(path[i].p_bh);
1703 + /* this is index block */
1704 + if (!path[i].p_hdr) {
1705 + ext_debug(tree, "initialize header\n");
1706 + path[i].p_hdr = EXT_BLOCK_HDR(path[i].p_bh);
1709 + EXT_ASSERT(path[i].p_hdr->eh_entries <= path[i].p_hdr->eh_max);
1710 + EXT_ASSERT(path[i].p_hdr->eh_magic == EXT3_EXT_MAGIC);
1712 + if (!path[i].p_idx) {
1713 + /* this level hasn't touched yet */
1715 + ext3_ext_last_covered(path[i].p_hdr, end);
1716 + path[i].p_block = path[i].p_hdr->eh_entries + 1;
1717 + ext_debug(tree, "init index ptr: hdr 0x%p, num %d\n",
1718 + path[i].p_hdr, path[i].p_hdr->eh_entries);
1720 + /* we've already was here, see at next index */
1724 + ext_debug(tree, "level %d - index, first 0x%p, cur 0x%p\n",
1725 + i, EXT_FIRST_INDEX(path[i].p_hdr),
1727 + if (ext3_ext_more_to_rm(path + i)) {
1728 + /* go to the next level */
1729 + ext_debug(tree, "move to level %d (block %d)\n",
1730 + i + 1, path[i].p_idx->ei_leaf);
1731 + memset(path + i + 1, 0, sizeof(*path));
1732 + path[i+1].p_bh = sb_bread(sb, path[i].p_idx->ei_leaf);
1733 + if (!path[i+1].p_bh) {
1734 + /* should we reset i_size? */
1738 + /* put actual number of indexes to know is this
1739 + * number got changed at the next iteration */
1740 + path[i].p_block = path[i].p_hdr->eh_entries;
1743 + /* we finish processing this index, go up */
1744 + if (path[i].p_hdr->eh_entries == 0 && i > 0) {
1745 + /* index is empty, remove it
1746 + * handle must be already prepared by the
1747 + * truncatei_leaf() */
1748 + err = ext3_ext_rm_idx(handle, tree, path + i);
1750 + /* root level have p_bh == NULL, brelse() eats this */
1751 + brelse(path[i].p_bh);
1753 + ext_debug(tree, "return to level %d\n", i);
1757 + /* TODO: flexible tree reduction should be here */
1758 + if (path->p_hdr->eh_entries == 0) {
1760 + * truncate to zero freed all the tree
1761 + * so, we need to correct eh_depth
1763 + err = ext3_ext_get_access(handle, tree, path);
1765 + EXT_ROOT_HDR(tree)->eh_depth = 0;
1766 + EXT_ROOT_HDR(tree)->eh_max = ext3_ext_space_root(tree);
1767 + err = ext3_ext_dirty(handle, tree, path);
1770 + ext3_ext_tree_changed(tree);
1773 + ext3_journal_stop(handle, inode);
1779 + * called at mount time
1781 +void ext3_ext_init(struct super_block *sb)
1784 + * possible initialization would be here
1787 + if (test_opt(sb, EXTENTS)) {
1788 + printk("EXT3-fs: file extents enabled");
1789 +#ifdef AGRESSIVE_TEST
1790 + printk(", agressive tests");
1792 +#ifdef CHECK_BINSEARCH
1793 + printk(", check binsearch");
1800 + * called at umount time
1802 +void ext3_ext_release(struct super_block *sb)
1806 +/************************************************************************
1807 + * VFS related routines
1808 + ************************************************************************/
1810 +static int ext3_get_inode_write_access(handle_t *handle, void *buffer)
1812 + /* we use in-core data, not bh */
1816 +static int ext3_mark_buffer_dirty(handle_t *handle, void *buffer)
1818 + struct inode *inode = buffer;
1819 + return ext3_mark_inode_dirty(handle, inode);
1822 +static int ext3_ext_mergable(struct ext3_extent *ex1,
1823 + struct ext3_extent *ex2)
1825 + /* FIXME: support for large fs */
1826 + if (ex1->ee_start + ex1->ee_len == ex2->ee_start)
1832 +ext3_remove_blocks_credits(struct ext3_extents_tree *tree,
1833 + struct ext3_extent *ex,
1834 + unsigned long from, unsigned long to)
1838 + /* at present, extent can't cross block group */;
1839 + needed = 4; /* bitmap + group desc + sb + inode */
1841 +#ifdef CONFIG_QUOTA
1842 + needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
1848 +ext3_remove_blocks(struct ext3_extents_tree *tree,
1849 + struct ext3_extent *ex,
1850 + unsigned long from, unsigned long to)
1852 + int needed = ext3_remove_blocks_credits(tree, ex, from, to);
1853 + handle_t *handle = ext3_journal_start(tree->inode, needed);
1854 + struct buffer_head *bh;
1857 + if (IS_ERR(handle))
1858 + return PTR_ERR(handle);
1859 + if (from >= ex->ee_block && to == ex->ee_block + ex->ee_len - 1) {
1860 + /* tail removal */
1861 + unsigned long num, start;
1862 + num = ex->ee_block + ex->ee_len - from;
1863 + start = ex->ee_start + ex->ee_len - num;
1864 + ext_debug(tree, "free last %lu blocks starting %lu\n",
1866 + for (i = 0; i < num; i++) {
1867 + bh = sb_get_hash_table(tree->inode->i_sb, start + i);
1868 + ext3_forget(handle, 0, tree->inode, bh, start + i);
1870 + ext3_free_blocks(handle, tree->inode, start, num);
1871 + } else if (from == ex->ee_block && to <= ex->ee_block + ex->ee_len - 1) {
1872 + printk("strange request: removal %lu-%lu from %u:%u\n",
1873 + from, to, ex->ee_block, ex->ee_len);
1875 + printk("strange request: removal(2) %lu-%lu from %u:%u\n",
1876 + from, to, ex->ee_block, ex->ee_len);
1878 + ext3_journal_stop(handle, tree->inode);
1882 +int ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path,
1883 + unsigned long block)
1885 + struct ext3_inode_info *ei = EXT3_I(inode);
1886 + unsigned long bg_start;
1887 + unsigned long colour;
1891 + struct ext3_extent *ex;
1892 + depth = path->p_depth;
1894 + /* try to predict block placement */
1895 + if ((ex = path[depth].p_ext))
1896 + return ex->ee_start + (block - ex->ee_block);
1898 + /* it looks index is empty
1899 + * try to find starting from index itself */
1900 + if (path[depth].p_bh)
1901 + return path[depth].p_bh->b_blocknr;
1904 + /* OK. use inode's group */
1905 + bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
1906 + le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
1907 + colour = (current->pid % 16) *
1908 + (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
1909 + return bg_start + colour + block;
1912 +static int ext3_new_block_cb(handle_t *handle, struct ext3_extents_tree *tree,
1913 + struct ext3_ext_path *path,
1914 + struct ext3_extent *ex, int *err)
1916 + struct inode *inode = tree->inode;
1917 + int newblock, goal;
1921 + EXT_ASSERT(ex->ee_start);
1922 + EXT_ASSERT(ex->ee_len);
1924 + /* reuse block from the extent to order data/metadata */
1925 + newblock = ex->ee_start++;
1927 + if (ex->ee_len == 0) {
1929 + /* allocate new block for the extent */
1930 + goal = ext3_ext_find_goal(inode, path, ex->ee_block);
1931 + ex->ee_start = ext3_new_block(handle, inode, goal, 0, 0, err);
1932 + if (ex->ee_start == 0) {
1933 + /* error occured: restore old extent */
1934 + ex->ee_start = newblock;
1941 +static struct ext3_extents_helpers ext3_blockmap_helpers = {
1942 + .get_write_access = ext3_get_inode_write_access,
1943 + .mark_buffer_dirty = ext3_mark_buffer_dirty,
1944 + .mergable = ext3_ext_mergable,
1945 + .new_block = ext3_new_block_cb,
1946 + .remove_extent = ext3_remove_blocks,
1947 + .remove_extent_credits = ext3_remove_blocks_credits,
1950 +void ext3_init_tree_desc(struct ext3_extents_tree *tree,
1951 + struct inode *inode)
1953 + tree->inode = inode;
1954 + tree->root = (void *) EXT3_I(inode)->i_data;
1955 + tree->buffer = (void *) inode;
1956 + tree->buffer_len = sizeof(EXT3_I(inode)->i_data);
1957 + tree->cex = (struct ext3_extent *) &EXT3_I(inode)->i_cached_extent;
1958 + tree->ops = &ext3_blockmap_helpers;
1961 +int ext3_ext_get_block(handle_t *handle, struct inode *inode,
1962 + long iblock, struct buffer_head *bh_result, int create)
1964 + struct ext3_ext_path *path = NULL;
1965 + struct ext3_extent newex;
1966 + struct ext3_extent *ex;
1967 + int goal, newblock, err = 0, depth;
1968 + struct ext3_extents_tree tree;
1970 + clear_bit(BH_New, &bh_result->b_state);
1971 + ext3_init_tree_desc(&tree, inode);
1972 + ext_debug(&tree, "block %d requested for inode %u\n",
1973 + (int) iblock, (unsigned) inode->i_ino);
1974 + down_write(&EXT3_I(inode)->truncate_sem);
1976 + /* check in cache */
1977 + if (ext3_ext_in_cache(&tree, iblock, &newex)) {
1978 + if (newex.ee_start == EXT_CACHE_MARK) {
1979 + /* this is cached gap */
1981 + /* block isn't allocated yet and
1982 + * user don't want to allocate it */
1985 + /* we should allocate requested block */
1986 + } else if (newex.ee_start) {
1987 + /* block is already allocated */
1988 + newblock = iblock - newex.ee_block + newex.ee_start;
1993 + /* find extent for this block */
1994 + path = ext3_ext_find_extent(&tree, iblock, NULL);
1995 + if (IS_ERR(path)) {
1996 + err = PTR_ERR(path);
2001 + depth = EXT_DEPTH(&tree);
2004 + * consistent leaf must not be empty
2005 + * this situations is possible, though, _during_ tree modification
2006 + * this is why assert can't be put in ext3_ext_find_extent()
2008 + EXT_ASSERT(path[depth].p_ext != NULL || depth == 0);
2010 + if ((ex = path[depth].p_ext)) {
2011 + /* if found exent covers block, simple return it */
2012 + if (iblock >= ex->ee_block && iblock < ex->ee_block + ex->ee_len) {
2013 + newblock = iblock - ex->ee_block + ex->ee_start;
2014 + ext_debug(&tree, "%d fit into %d:%d -> %d\n",
2015 + (int) iblock, ex->ee_block, ex->ee_len,
2017 + ext3_ext_put_in_cache(&tree, ex);
2023 + * requested block isn't allocated yet
2024 + * we couldn't try to create block if create flag is zero
2027 + /* put just found gap into cache to speedup subsequest reqs */
2028 + ext3_ext_put_gap_in_cache(&tree, path, iblock);
2032 + /* allocate new block */
2033 + goal = ext3_ext_find_goal(inode, path, iblock);
2034 + newblock = ext3_new_block(handle, inode, goal, 0, 0, &err);
2037 + ext_debug(&tree, "allocate new block: goal %d, found %d\n",
2040 + /* try to insert new extent into found leaf and return */
2041 + newex.ee_block = iblock;
2042 + newex.ee_start = newblock;
2044 + err = ext3_ext_insert_extent(handle, &tree, path, &newex);
2048 + if (inode->i_size > EXT3_I(inode)->i_disksize)
2049 + EXT3_I(inode)->i_disksize = inode->i_size;
2051 + /* previous routine could use block we allocated */
2052 + newblock = newex.ee_start;
2053 + set_bit(BH_New, &bh_result->b_state);
2055 + ext3_ext_put_in_cache(&tree, &newex);
2057 + ext3_ext_show_leaf(&tree, path);
2058 + set_bit(BH_Mapped, &bh_result->b_state);
2059 + bh_result->b_dev = inode->i_sb->s_dev;
2060 + bh_result->b_blocknr = newblock;
2063 + ext3_ext_drop_refs(path);
2066 + up_write(&EXT3_I(inode)->truncate_sem);
2071 +void ext3_ext_truncate(struct inode * inode)
2073 + struct address_space *mapping = inode->i_mapping;
2074 + struct super_block *sb = inode->i_sb;
2075 + struct ext3_extents_tree tree;
2076 + unsigned long last_block;
2080 + ext3_init_tree_desc(&tree, inode);
2083 + * probably first extent we're gonna free will be last in block
2085 + err = ext3_writepage_trans_blocks(inode) + 3;
2086 + handle = ext3_journal_start(inode, err);
2087 + if (IS_ERR(handle))
2090 + ext3_block_truncate_page(handle, mapping, inode->i_size);
2092 + down_write(&EXT3_I(inode)->truncate_sem);
2093 + ext3_ext_invalidate_cache(&tree);
2096 + * TODO: optimization is possible here
2097 + * probably we need not scaning at all,
2098 + * because page truncation is enough
2100 + if (ext3_orphan_add(handle, inode))
2103 + /* we have to know where to truncate from in crash case */
2104 + EXT3_I(inode)->i_disksize = inode->i_size;
2105 + ext3_mark_inode_dirty(handle, inode);
2107 + last_block = (inode->i_size + sb->s_blocksize - 1)
2108 + >> EXT3_BLOCK_SIZE_BITS(sb);
2109 + err = ext3_ext_remove_space(&tree, last_block, EXT_MAX_BLOCK);
2111 + /* In a multi-transaction truncate, we only make the final
2112 + * transaction synchronous */
2113 + if (IS_SYNC(inode))
2114 + handle->h_sync = 1;
2118 + * If this was a simple ftruncate(), and the file will remain alive
2119 + * then we need to clear up the orphan record which we created above.
2120 + * However, if this was a real unlink then we were called by
2121 + * ext3_delete_inode(), and we allow that function to clean up the
2122 + * orphan info for us.
2124 + if (inode->i_nlink)
2125 + ext3_orphan_del(handle, inode);
2127 + up_write(&EXT3_I(inode)->truncate_sem);
2128 + ext3_journal_stop(handle, inode);
2132 + * this routine calculate max number of blocks we could modify
2133 + * in order to allocate new block for an inode
2135 +int ext3_ext_writepage_trans_blocks(struct inode *inode, int num)
2137 + struct ext3_extents_tree tree;
2140 + ext3_init_tree_desc(&tree, inode);
2142 + needed = ext3_ext_calc_credits_for_insert(&tree, NULL);
2144 + /* caller want to allocate num blocks */
2147 +#ifdef CONFIG_QUOTA
2149 + * FIXME: real calculation should be here
2150 + * it depends on blockmap format of qouta file
2152 + needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
2158 +void ext3_extents_initialize_blockmap(handle_t *handle, struct inode *inode)
2160 + struct ext3_extents_tree tree;
2162 + ext3_init_tree_desc(&tree, inode);
2163 + ext3_extent_tree_init(handle, &tree);
2167 +ext3_ext_store_extent_cb(struct ext3_extents_tree *tree,
2168 + struct ext3_ext_path *path,
2169 + struct ext3_extent *newex, int exist)
2171 + struct ext3_extent_buf *buf = (struct ext3_extent_buf *) tree->private;
2174 + return EXT_CONTINUE;
2177 + if (buf->cur - buf->buffer + sizeof(*newex) > buf->buflen)
2180 + if (!copy_to_user(buf->cur, newex, sizeof(*newex))) {
2182 + buf->cur += sizeof(*newex);
2184 + buf->err = -EFAULT;
2187 + return EXT_CONTINUE;
2191 +ext3_ext_collect_stats_cb(struct ext3_extents_tree *tree,
2192 + struct ext3_ext_path *path,
2193 + struct ext3_extent *ex, int exist)
2195 + struct ext3_extent_tree_stats *buf =
2196 + (struct ext3_extent_tree_stats *) tree->private;
2200 + return EXT_CONTINUE;
2202 + depth = EXT_DEPTH(tree);
2203 + buf->extents_num++;
2204 + if (path[depth].p_ext == EXT_FIRST_EXTENT(path[depth].p_hdr))
2206 + return EXT_CONTINUE;
2209 +int ext3_ext_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
2210 + unsigned long arg)
2214 + if (!(EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL))
2217 + if (cmd == EXT3_IOC_GET_EXTENTS) {
2218 + struct ext3_extent_buf buf;
2219 + struct ext3_extents_tree tree;
2221 + if (copy_from_user(&buf, (void *) arg, sizeof(buf)))
2224 + ext3_init_tree_desc(&tree, inode);
2225 + buf.cur = buf.buffer;
2227 + tree.private = &buf;
2228 + down_write(&EXT3_I(inode)->truncate_sem);
2229 + err = ext3_ext_walk_space(&tree, buf.start, EXT_MAX_BLOCK,
2230 + ext3_ext_store_extent_cb);
2231 + up_write(&EXT3_I(inode)->truncate_sem);
2234 + } else if (cmd == EXT3_IOC_GET_TREE_STATS) {
2235 + struct ext3_extent_tree_stats buf;
2236 + struct ext3_extents_tree tree;
2238 + ext3_init_tree_desc(&tree, inode);
2239 + down_write(&EXT3_I(inode)->truncate_sem);
2240 + buf.depth = EXT_DEPTH(&tree);
2241 + buf.extents_num = 0;
2243 + tree.private = &buf;
2244 + err = ext3_ext_walk_space(&tree, 0, EXT_MAX_BLOCK,
2245 + ext3_ext_collect_stats_cb);
2246 + up_write(&EXT3_I(inode)->truncate_sem);
2248 + err = copy_to_user((void *) arg, &buf, sizeof(buf));
2249 + } else if (cmd == EXT3_IOC_GET_TREE_DEPTH) {
2250 + struct ext3_extents_tree tree;
2251 + ext3_init_tree_desc(&tree, inode);
2252 + down_write(&EXT3_I(inode)->truncate_sem);
2253 + err = EXT_DEPTH(&tree);
2254 + up_write(&EXT3_I(inode)->truncate_sem);
2260 +EXPORT_SYMBOL(ext3_init_tree_desc);
2261 +EXPORT_SYMBOL(ext3_mark_inode_dirty);
2262 +EXPORT_SYMBOL(ext3_ext_invalidate_cache);
2263 +EXPORT_SYMBOL(ext3_ext_insert_extent);
2264 +EXPORT_SYMBOL(ext3_ext_walk_space);
2265 +EXPORT_SYMBOL(ext3_ext_find_goal);
2266 +EXPORT_SYMBOL(ext3_ext_calc_credits_for_insert);
2268 Index: linux-2.4.21-suse2/fs/ext3/ialloc.c
2269 ===================================================================
2270 --- linux-2.4.21-suse2.orig/fs/ext3/ialloc.c 2004-08-19 13:51:48.000000000 +0400
2271 +++ linux-2.4.21-suse2/fs/ext3/ialloc.c 2004-09-12 18:26:45.000000000 +0400
2272 @@ -592,10 +592,21 @@
2276 - err = ext3_mark_iloc_dirty(handle, inode, &iloc);
2277 - if (err) goto fail;
2280 + if (test_opt(sb, EXTENTS)) {
2281 + EXT3_I(inode)->i_flags |= EXT3_EXTENTS_FL;
2282 + ext3_extents_initialize_blockmap(handle, inode);
2283 + if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS)) {
2284 + err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
2285 + if (err) goto fail;
2286 + EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_EXTENTS);
2287 + BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "call ext3_journal_dirty_metadata");
2288 + err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
2292 + err = ext3_mark_iloc_dirty(handle, inode, &iloc);
2293 + if (err) goto fail;
2295 #ifdef CONFIG_EXT3_FS_XATTR
2296 init_rwsem(&inode->u.ext3_i.xattr_sem);
2297 Index: linux-2.4.21-suse2/fs/ext3/inode.c
2298 ===================================================================
2299 --- linux-2.4.21-suse2.orig/fs/ext3/inode.c 2004-08-19 13:51:48.000000000 +0400
2300 +++ linux-2.4.21-suse2/fs/ext3/inode.c 2004-09-12 17:56:01.000000000 +0400
2301 @@ -853,6 +853,16 @@
2306 +ext3_get_block_wrap(handle_t *handle, struct inode *inode, long block,
2307 + struct buffer_head *bh, int create, int extend_disksize)
2309 + if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
2310 + return ext3_ext_get_block(handle, inode, block, bh, create);
2311 + return ext3_get_block_handle(handle, inode, block, bh, create,
2316 * The BKL is not held on entry here.
2319 handle = ext3_journal_current_handle();
2320 J_ASSERT(handle != 0);
2322 - ret = ext3_get_block_handle(handle, inode, iblock,
2323 + ret = ext3_get_block_wrap(handle, inode, iblock,
2324 bh_result, create, 1);
2331 - ret = ext3_get_block_handle(handle, inode, iblock,
2332 + ret = ext3_get_block_wrap(handle, inode, iblock,
2333 bh_result, create, 0);
2335 bh_result->b_size = (1 << inode->i_blkbits);
2338 dummy.b_blocknr = -1000;
2339 buffer_trace_init(&dummy.b_history);
2340 - *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1);
2341 + *errp = ext3_get_block_wrap(handle, inode, block, &dummy, create, 1);
2342 if (!*errp && buffer_mapped(&dummy)) {
2343 struct buffer_head *bh;
2344 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
2345 @@ -1502,7 +1512,7 @@
2346 * This required during truncate. We need to physically zero the tail end
2347 * of that block so it doesn't yield old data if the file is later grown.
2349 -static int ext3_block_truncate_page(handle_t *handle,
2350 +int ext3_block_truncate_page(handle_t *handle,
2351 struct address_space *mapping, loff_t from)
2353 unsigned long index = from >> PAGE_CACHE_SHIFT;
2354 @@ -1988,6 +1998,9 @@
2356 ext3_discard_prealloc(inode);
2358 + if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
2359 + return ext3_ext_truncate(inode);
2361 handle = start_transaction(inode);
2363 return; /* AKPM: return what? */
2364 @@ -2664,6 +2677,9 @@
2365 int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
2368 + if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
2369 + return ext3_ext_writepage_trans_blocks(inode, bpp);
2371 if (ext3_should_journal_data(inode))
2372 ret = 3 * (bpp + indirects) + 2;
2374 @@ -3100,7 +3116,7 @@
2376 /* alloc blocks one by one */
2377 for (i = 0; i < nblocks; i++) {
2378 - ret = ext3_get_block_handle(handle, inode, blocks[i],
2379 + ret = ext3_get_block_wrap(handle, inode, blocks[i],
2383 @@ -3176,7 +3192,7 @@
2387 - rc = ext3_get_block_handle(handle, inode, iblock, &bh, 1, 1);
2388 + rc = ext3_get_block_wrap(handle, inode, iblock, &bh, 1, 1);
2390 printk(KERN_INFO "ext3_map_inode_page: error %d "
2391 "allocating block %ld\n", rc, iblock);
2392 Index: linux-2.4.21-suse2/fs/ext3/Makefile
2393 ===================================================================
2394 --- linux-2.4.21-suse2.orig/fs/ext3/Makefile 2004-08-19 13:45:03.000000000 +0400
2395 +++ linux-2.4.21-suse2/fs/ext3/Makefile 2004-09-12 17:56:01.000000000 +0400
2397 export-objs := ext3-exports.o
2399 obj-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
2400 - ioctl.o namei.o super.o symlink.o hash.o ext3-exports.o
2401 + ioctl.o namei.o super.o symlink.o hash.o ext3-exports.o \
2403 +export-objs += extents.o
2405 obj-m := $(O_TARGET)
2407 export-objs += xattr.o
2408 Index: linux-2.4.21-suse2/fs/ext3/super.c
2409 ===================================================================
2410 --- linux-2.4.21-suse2.orig/fs/ext3/super.c 2004-08-19 13:51:48.000000000 +0400
2411 +++ linux-2.4.21-suse2/fs/ext3/super.c 2004-09-12 17:56:01.000000000 +0400
2415 J_ASSERT(sbi->s_delete_inodes == 0);
2416 + ext3_ext_release(sb);
2417 ext3_xattr_put_super(sb);
2418 journal_destroy(sbi->s_journal);
2419 if (!(sb->s_flags & MS_RDONLY)) {
2420 @@ -829,6 +830,10 @@
2424 + else if (!strcmp (this_char, "extents"))
2425 + set_opt (*mount_options, EXTENTS);
2426 + else if (!strcmp (this_char, "extdebug"))
2427 + set_opt (*mount_options, EXTDEBUG);
2428 else if (!strcmp (this_char, "grpid") ||
2429 !strcmp (this_char, "bsdgroups"))
2430 set_opt (*mount_options, GRPID);
2431 @@ -1524,6 +1529,8 @@
2432 test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered":
2435 + ext3_ext_init(sb);
2440 Index: linux-2.4.21-suse2/fs/ext3/ioctl.c
2441 ===================================================================
2442 --- linux-2.4.21-suse2.orig/fs/ext3/ioctl.c 2004-08-19 13:45:03.000000000 +0400
2443 +++ linux-2.4.21-suse2/fs/ext3/ioctl.c 2004-09-12 17:56:01.000000000 +0400
2444 @@ -174,6 +174,10 @@
2448 + case EXT3_IOC_GET_EXTENTS:
2449 + case EXT3_IOC_GET_TREE_STATS:
2450 + case EXT3_IOC_GET_TREE_DEPTH:
2451 + return ext3_ext_ioctl(inode, filp, cmd, arg);
2455 Index: linux-2.4.21-suse2/include/linux/ext3_fs.h
2456 ===================================================================
2457 --- linux-2.4.21-suse2.orig/include/linux/ext3_fs.h 2004-08-19 13:51:48.000000000 +0400
2458 +++ linux-2.4.21-suse2/include/linux/ext3_fs.h 2004-09-12 18:26:06.000000000 +0400
2460 #define EXT3_IMAGIC_FL 0x00002000 /* AFS directory */
2461 #define EXT3_JOURNAL_DATA_FL 0x00004000 /* file data should be journaled */
2462 #define EXT3_RESERVED_FL 0x80000000 /* reserved for ext3 lib */
2463 +#define EXT3_EXTENTS_FL 0x00080000 /* Inode uses extents */
2465 #define EXT3_FL_USER_VISIBLE 0x00005FFF /* User visible flags */
2466 #define EXT3_FL_USER_MODIFIABLE 0x000000FF /* User modifiable flags */
2468 #ifdef CONFIG_JBD_DEBUG
2469 #define EXT3_IOC_WAIT_FOR_READONLY _IOR('f', 99, long)
2471 +#define EXT3_IOC_GET_EXTENTS _IOR('f', 5, long)
2472 +#define EXT3_IOC_GET_TREE_DEPTH _IOR('f', 6, long)
2473 +#define EXT3_IOC_GET_TREE_STATS _IOR('f', 7, long)
2476 * Structure of an inode on the disk
2478 #define EXT3_MOUNT_ASYNCDEL 0x20000 /* Delayed deletion */
2479 #define EXT3_MOUNT_IOPEN 0x40000 /* Allow access via iopen */
2480 #define EXT3_MOUNT_IOPEN_NOPRIV 0x80000 /* Make iopen world-readable */
2481 +#define EXT3_MOUNT_EXTENTS 0x100000/* Extents support */
2482 +#define EXT3_MOUNT_EXTDEBUG 0x200000/* Extents debug */
2484 /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
2485 #ifndef _LINUX_EXT2_FS_H
2486 @@ -504,10 +510,12 @@
2487 #define EXT3_FEATURE_INCOMPAT_FILETYPE 0x0002
2488 #define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */
2489 #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Journal device */
2490 +#define EXT3_FEATURE_INCOMPAT_EXTENTS 0x0040 /* extents support */
2492 #define EXT3_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
2493 #define EXT3_FEATURE_INCOMPAT_SUPP (EXT3_FEATURE_INCOMPAT_FILETYPE| \
2494 - EXT3_FEATURE_INCOMPAT_RECOVER)
2495 + EXT3_FEATURE_INCOMPAT_RECOVER| \
2496 + EXT3_FEATURE_INCOMPAT_EXTENTS)
2497 #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
2498 EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
2499 EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
2501 extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
2504 +extern int ext3_block_truncate_page(handle_t *, struct address_space *, loff_t);
2505 extern int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
2506 extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
2507 extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
2508 @@ -770,6 +779,14 @@
2509 extern struct inode_operations ext3_symlink_inode_operations;
2510 extern struct inode_operations ext3_fast_symlink_inode_operations;
2513 +extern int ext3_ext_writepage_trans_blocks(struct inode *, int);
2514 +extern int ext3_ext_get_block(handle_t *, struct inode *, long,
2515 + struct buffer_head *, int);
2516 +extern void ext3_ext_truncate(struct inode *);
2517 +extern void ext3_ext_init(struct super_block *);
2518 +extern void ext3_ext_release(struct super_block *);
2519 +extern void ext3_extents_initialize_blockmap(handle_t *, struct inode *);
2521 #endif /* __KERNEL__ */
2523 Index: linux-2.4.21-suse2/include/linux/ext3_extents.h
2524 ===================================================================
2525 --- linux-2.4.21-suse2.orig/include/linux/ext3_extents.h 2003-01-30 13:24:37.000000000 +0300
2526 +++ linux-2.4.21-suse2/include/linux/ext3_extents.h 2004-09-12 17:56:01.000000000 +0400
2529 + * Copyright (c) 2003, Cluster File Systems, Inc, info@clusterfs.com
2530 + * Written by Alex Tomas <alex@clusterfs.com>
2532 + * This program is free software; you can redistribute it and/or modify
2533 + * it under the terms of the GNU General Public License version 2 as
2534 + * published by the Free Software Foundation.
2536 + * This program is distributed in the hope that it will be useful,
2537 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2538 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2539 + * GNU General Public License for more details.
2541 + * You should have received a copy of the GNU General Public Licens
2542 + * along with this program; if not, write to the Free Software
2543 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
2546 +#ifndef _LINUX_EXT3_EXTENTS
2547 +#define _LINUX_EXT3_EXTENTS
2550 + * with AGRESSIVE_TEST defined capacity of index/leaf blocks
2551 + * become very little, so index split, in-depth growing and
2552 + * other hard changes happens much more often
2553 + * this is for debug purposes only
2555 +#define AGRESSIVE_TEST_
2558 + * if CHECK_BINSEARCH defined, then results of binary search
2559 + * will be checked by linear search
2561 +#define CHECK_BINSEARCH_
2564 + * if EXT_DEBUG is defined you can use 'extdebug' mount option
2565 + * to get lots of info what's going on
2569 +#define ext_debug(tree,fmt,a...) \
2571 + if (test_opt((tree)->inode->i_sb, EXTDEBUG)) \
2572 + printk(fmt, ##a); \
2575 +#define ext_debug(tree,fmt,a...)
2579 + * if EXT_STATS is defined then stats numbers are collected
2580 + * these number will be displayed at umount time
2585 +#define EXT3_ALLOC_NEEDED 3 /* block bitmap + group desc. + sb */
2588 + * ext3_inode has i_block array (total 60 bytes)
2589 + * first 4 bytes are used to store:
2590 + * - tree depth (0 mean there is no tree yet. all extents in the inode)
2591 + * - number of alive extents in the inode
2595 + * this is extent on-disk structure
2596 + * it's used at the bottom of the tree
2598 +struct ext3_extent {
2599 + __u32 ee_block; /* first logical block extent covers */
2600 + __u16 ee_len; /* number of blocks covered by extent */
2601 + __u16 ee_start_hi; /* high 16 bits of physical block */
2602 + __u32 ee_start; /* low 32 bigs of physical block */
2606 + * this is index on-disk structure
2607 + * it's used at all the levels, but the bottom
2609 +struct ext3_extent_idx {
2610 + __u32 ei_block; /* index covers logical blocks from 'block' */
2611 + __u32 ei_leaf; /* pointer to the physical block of the next *
2612 + * level. leaf or next index could bet here */
2613 + __u16 ei_leaf_hi; /* high 16 bits of physical block */
2618 + * each block (leaves and indexes), even inode-stored has header
2620 +struct ext3_extent_header {
2621 + __u16 eh_magic; /* probably will support different formats */
2622 + __u16 eh_entries; /* number of valid entries */
2623 + __u16 eh_max; /* capacity of store in entries */
2624 + __u16 eh_depth; /* has tree real underlaying blocks? */
2625 + __u32 eh_generation; /* generation of the tree */
2628 +#define EXT3_EXT_MAGIC 0xf30a
2631 + * array of ext3_ext_path contains path to some extent
2632 + * creation/lookup routines use it for traversal/splitting/etc
2633 + * truncate uses it to simulate recursive walking
2635 +struct ext3_ext_path {
2638 + struct ext3_extent *p_ext;
2639 + struct ext3_extent_idx *p_idx;
2640 + struct ext3_extent_header *p_hdr;
2641 + struct buffer_head *p_bh;
2645 + * structure for external API
2649 + * ext3_extents_tree is used to pass initial information
2650 + * to top-level extents API
2652 +struct ext3_extents_helpers;
2653 +struct ext3_extents_tree {
2654 + struct inode *inode; /* inode which tree belongs to */
2655 + void *root; /* ptr to data top of tree resides at */
2656 + void *buffer; /* will be passed as arg to ^^ routines */
2659 + struct ext3_extent *cex;/* last found extent */
2660 + struct ext3_extents_helpers *ops;
2663 +struct ext3_extents_helpers {
2664 + int (*get_write_access)(handle_t *h, void *buffer);
2665 + int (*mark_buffer_dirty)(handle_t *h, void *buffer);
2666 + int (*mergable)(struct ext3_extent *ex1, struct ext3_extent *ex2);
2667 + int (*remove_extent_credits)(struct ext3_extents_tree *,
2668 + struct ext3_extent *, unsigned long,
2670 + int (*remove_extent)(struct ext3_extents_tree *,
2671 + struct ext3_extent *, unsigned long,
2673 + int (*new_block)(handle_t *, struct ext3_extents_tree *,
2674 + struct ext3_ext_path *, struct ext3_extent *,
2679 + * to be called by ext3_ext_walk_space()
2680 + * negative retcode - error
2681 + * positive retcode - signal for ext3_ext_walk_space(), see below
2682 + * callback must return valid extent (passed or newly created)
2684 +typedef int (*ext_prepare_callback)(struct ext3_extents_tree *,
2685 + struct ext3_ext_path *,
2686 + struct ext3_extent *, int);
2688 +#define EXT_CONTINUE 0
2689 +#define EXT_BREAK 1
2690 +#define EXT_REPEAT 2
2693 +#define EXT_MAX_BLOCK 0xffffffff
2694 +#define EXT_CACHE_MARK 0xffff
2697 +#define EXT_FIRST_EXTENT(__hdr__) \
2698 + ((struct ext3_extent *) (((char *) (__hdr__)) + \
2699 + sizeof(struct ext3_extent_header)))
2700 +#define EXT_FIRST_INDEX(__hdr__) \
2701 + ((struct ext3_extent_idx *) (((char *) (__hdr__)) + \
2702 + sizeof(struct ext3_extent_header)))
2703 +#define EXT_HAS_FREE_INDEX(__path__) \
2704 + ((__path__)->p_hdr->eh_entries < (__path__)->p_hdr->eh_max)
2705 +#define EXT_LAST_EXTENT(__hdr__) \
2706 + (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->eh_entries - 1)
2707 +#define EXT_LAST_INDEX(__hdr__) \
2708 + (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->eh_entries - 1)
2709 +#define EXT_MAX_EXTENT(__hdr__) \
2710 + (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->eh_max - 1)
2711 +#define EXT_MAX_INDEX(__hdr__) \
2712 + (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->eh_max - 1)
2714 +#define EXT_ROOT_HDR(tree) \
2715 + ((struct ext3_extent_header *) (tree)->root)
2716 +#define EXT_BLOCK_HDR(bh) \
2717 + ((struct ext3_extent_header *) (bh)->b_data)
2718 +#define EXT_DEPTH(_t_) \
2719 + (((struct ext3_extent_header *)((_t_)->root))->eh_depth)
2720 +#define EXT_GENERATION(_t_) \
2721 + (((struct ext3_extent_header *)((_t_)->root))->eh_generation)
2724 +#define EXT_ASSERT(__x__) if (!(__x__)) BUG();
2728 + * this structure is used to gather extents from the tree via ioctl
2730 +struct ext3_extent_buf {
2731 + unsigned long start;
2739 + * this structure is used to collect stats info about the tree
2741 +struct ext3_extent_tree_stats {
2747 +void ext3_init_tree_desc(struct ext3_extents_tree *, struct inode *);
2748 +extern int ext3_extent_tree_init(handle_t *, struct ext3_extents_tree *);
2749 +extern int ext3_ext_calc_credits_for_insert(struct ext3_extents_tree *, struct ext3_ext_path *);
2750 +extern int ext3_ext_insert_extent(handle_t *, struct ext3_extents_tree *, struct ext3_ext_path *, struct ext3_extent *);
2751 +extern int ext3_ext_walk_space(struct ext3_extents_tree *, unsigned long, unsigned long, ext_prepare_callback);
2752 +extern int ext3_ext_remove_space(struct ext3_extents_tree *, unsigned long, unsigned long);
2753 +extern struct ext3_ext_path * ext3_ext_find_extent(struct ext3_extents_tree *, int, struct ext3_ext_path *);
2756 +ext3_ext_invalidate_cache(struct ext3_extents_tree *tree)
2759 + tree->cex->ee_len = 0;
2763 +#endif /* _LINUX_EXT3_EXTENTS */
2765 Index: linux-2.4.21-suse2/include/linux/ext3_fs_i.h
2766 ===================================================================
2767 --- linux-2.4.21-suse2.orig/include/linux/ext3_fs_i.h 2004-08-19 13:51:48.000000000 +0400
2768 +++ linux-2.4.21-suse2/include/linux/ext3_fs_i.h 2004-09-12 17:56:01.000000000 +0400
2770 * by other means, so we have truncate_sem.
2772 struct rw_semaphore truncate_sem;
2774 + __u32 i_cached_extent[3];
2777 #endif /* _LINUX_EXT3_FS_I */