Whamcloud - gitweb
- list_for_each_entry() added
[fs/lustre-release.git] / lustre / kernel_patches / patches / ext3-extents-2.4.20.patch
1  fs/ext3/Makefile           |    3 
2  fs/ext3/extents.c          | 1570 +++++++++++++++++++++++++++++++++++++++++++++
3  fs/ext3/ialloc.c           |    4 
4  fs/ext3/inode.c            |   28 
5  fs/ext3/super.c            |    6 
6  include/linux/ext3_fs.h    |   16 
7  include/linux/ext3_fs_i.h  |    4 
8  include/linux/ext3_fs_sb.h |   10 
9  8 files changed, 1634 insertions(+), 7 deletions(-)
10
11 --- /dev/null   2003-01-30 13:24:37.000000000 +0300
12 +++ linux-2.4.20-vanilla-alexey/fs/ext3/extents.c       2003-09-15 19:57:29.000000000 +0400
13 @@ -0,0 +1,1570 @@
14 +/*
15 + *
16 + * linux/fs/ext3/extents.c
17 + *
18 + * Extents support for EXT3
19 + *
20 + * 07/08/2003    Alex Tomas <bzzz@tmi.comex.ru>
21 + * 
22 + * TODO:
23 + *   - ext3*_error() should be used in some situations
24 + *   - find_goal() [to be tested and improved]
25 + *   - error handling
26 + *   - we could leak allocated block in some error cases
27 + *   - quick search for index/leaf in ext3_ext_find_extent()
28 + *   - tree reduction
29 + *   - cache last found extent
30 + *   - arch-independent
31 + */
32 +
33 +#include <linux/module.h>
34 +#include <linux/fs.h>
35 +#include <linux/time.h>
36 +#include <linux/ext3_jbd.h>
37 +#include <linux/jbd.h>
38 +#include <linux/smp_lock.h>
39 +#include <linux/highuid.h>
40 +#include <linux/pagemap.h>
41 +#include <linux/quotaops.h>
42 +#include <linux/string.h>
43 +#include <linux/slab.h>
44 +#include <linux/locks.h>
45 +
46 +/*
47 + * with AGRESSIVE_TEST defined capacity of index/leaf blocks
48 + * become very little, so index split, in-depth growing and
49 + * other hard changes happens much more often
50 + * this is for debug purposes only
51 + */
52 +#define AGRESSIVE_TEST_
53 +
54 +/*
55 + * if EXT_DEBUG defined you can use 'extdebug' mount option
56 + * to get lots of info what's going on
57 + */
58 +#define EXT_DEBUG
59 +#ifdef EXT_DEBUG
60 +#define ext_debug(inode,fmt,a...)              \
61 +do {                                           \
62 +       if (test_opt((inode)->i_sb, EXTDEBUG))  \
63 +               printk(fmt, ##a);               \
64 +} while (0);
65 +#else
66 +#define ext_debug(inode,fmt,a...)
67 +#endif
68 +
69 +#define EXT3_ALLOC_NEEDED      2       /* block bitmap + group descriptor */
70 +
71 +/*
72 + * ext3_inode has i_block array (total 60 bytes)
73 + * first 4 bytes are used to store:
74 + *  - tree depth (0 mean there is no tree yet. all extents in the inode)
75 + *  - number of alive extents in the inode
76 + */
77 +
78 +/*
79 + * this is extent on-disk structure
80 + * it's used at the bottom of the tree
81 + */
82 +struct ext3_extent {
83 +       __u32   e_block;        /* first logical block extent covers */
84 +       __u32   e_start;        /* first physical block extents lives */
85 +       __u32   e_num;          /* number of blocks covered by extent */
86 +};
87 +
88 +/*
89 + * this is index on-disk structure
90 + * it's used at all the levels, but the bottom
91 + */
92 +struct ext3_extent_idx {
93 +       __u32   e_block;        /* index covers logical blocks from 'block' */
94 +       __u32   e_leaf;         /* pointer to the physical block of the next *
95 +                                * level. leaf or next index could bet here */
96 +};
97 +
98 +/*
99 + * each block (leaves and indexes), even inode-stored has header
100 + */
101 +struct ext3_extent_header {    
102 +       __u16   e_num;          /* number of valid entries */
103 +       __u16   e_max;          /* capacity of store in entries */
104 +};
105 +
106 +/*
107 + * array of ext3_ext_path contains path to some extent
108 + * creation/lookup routines use it for traversal/splitting/etc
109 + * truncate uses it to simulate recursive walking
110 + */
111 +struct ext3_ext_path {
112 +       __u32                           p_block;
113 +       __u16                           p_depth;
114 +       struct ext3_extent              *p_ext;
115 +       struct ext3_extent_idx          *p_idx;
116 +       struct ext3_extent_header       *p_hdr;
117 +       struct buffer_head              *p_bh;
118 +};
119 +
120 +#define EXT_FIRST_EXTENT(__hdr__) \
121 +       ((struct ext3_extent *) (((char *) (__hdr__)) +         \
122 +                                sizeof(struct ext3_extent_header)))
123 +#define EXT_FIRST_INDEX(__hdr__) \
124 +       ((struct ext3_extent_idx *) (((char *) (__hdr__)) +     \
125 +                                    sizeof(struct ext3_extent_header)))
126 +#define EXT_HAS_FREE_INDEX(__path__) \
127 +       ((__path__)->p_hdr->e_num < (__path__)->p_hdr->e_max)
128 +#define EXT_LAST_EXTENT(__hdr__) \
129 +       (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->e_num - 1)
130 +#define EXT_LAST_INDEX(__hdr__) \
131 +       (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->e_num - 1)
132 +#define EXT_MAX_EXTENT(__hdr__) \
133 +       (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->e_max - 1)
134 +#define EXT_MAX_INDEX(__hdr__) \
135 +       (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->e_max - 1)
136 +
137 +
138 +#define EXT_ASSERT(__x__) if (!(__x__)) BUG();
139 +
140 +/*
141 + * could return:
142 + *  - EROFS
143 + *  - ENOMEM
144 + */
145 +static int ext3_ext_get_access(handle_t *handle, struct inode *inode,
146 +                               struct ext3_ext_path *path)
147 +{
148 +       if (path->p_bh) {
149 +               /* path points to block */
150 +               return ext3_journal_get_write_access(handle, path->p_bh);
151 +       }
152 +
153 +       /* path points to leaf/index in inode body */
154 +       return 0;
155 +}
156 +
157 +/*
158 + * could return:
159 + *  - EROFS
160 + *  - ENOMEM
161 + *  - EIO
162 + */
163 +static int ext3_ext_dirty(handle_t *handle, struct inode *inode,
164 +                               struct ext3_ext_path *path)
165 +{
166 +       if (path->p_bh) {
167 +               /* path points to block */
168 +               return ext3_journal_dirty_metadata(handle, path->p_bh);
169 +       }
170 +
171 +       /* path points to leaf/index in inode body */
172 +       return ext3_mark_inode_dirty(handle, inode);
173 +}
174 +
175 +static inline int ext3_ext_space_block(struct inode *inode)
176 +{
177 +       int size;
178 +
179 +       size = (inode->i_sb->s_blocksize - sizeof(struct ext3_extent_header))
180 +               / sizeof(struct ext3_extent);
181 +#ifdef AGRESSIVE_TEST
182 +       size = 6; /* FIXME: for debug, remove this line */
183 +#endif
184 +       return size;
185 +}
186 +
187 +static inline int ext3_ext_space_inode(struct inode *inode)
188 +{
189 +       int size;
190 +
191 +       size = (sizeof(EXT3_I(inode)->i_data) -
192 +                       sizeof(struct ext3_extent_header))
193 +                       / sizeof(struct ext3_extent);
194 +#ifdef AGRESSIVE_TEST
195 +       size = 3; /* FIXME: for debug, remove this line */
196 +#endif
197 +       return size;
198 +}
199 +
200 +static inline int ext3_ext_space_inode_idx(struct inode *inode)
201 +{
202 +       int size;
203 +
204 +       size = (sizeof(EXT3_I(inode)->i_data) -
205 +                       sizeof(struct ext3_extent_header))
206 +                       / sizeof(struct ext3_extent_idx);
207 +#ifdef AGRESSIVE_TEST
208 +       size = 4; /* FIXME: for debug, remove this line */
209 +#endif
210 +       return size;
211 +}
212 +
213 +static void ext3_ext_show_path(struct inode *inode, struct ext3_ext_path *path)
214 +{
215 +       int k, l = path->p_depth;
216 +
217 +       ext_debug(inode, "path:");
218 +       for (k = 0; k <= l; k++, path++) {
219 +               if (path->p_idx) {
220 +                       ext_debug(inode, "  %d->%d", path->p_idx->e_block,
221 +                                       path->p_idx->e_leaf);
222 +               } else if (path->p_ext) {
223 +                       ext_debug(inode, "  %d:%d:%d",
224 +                                       path->p_ext->e_block,
225 +                                       path->p_ext->e_start,
226 +                                       path->p_ext->e_num);
227 +               } else
228 +                       ext_debug(inode, "  []");
229 +       }
230 +       ext_debug(inode, "\n");
231 +}
232 +
233 +static void ext3_ext_show_leaf(struct inode *inode, struct ext3_ext_path *path)
234 +{
235 +       int depth = EXT3_I(inode)->i_depth;
236 +       struct ext3_extent_header *eh = path[depth].p_hdr;
237 +       struct ext3_extent *ex = EXT_FIRST_EXTENT(eh);
238 +       int i;
239 +
240 +       for (i = 0; i < eh->e_num; i++, ex++) {
241 +               ext_debug(inode, "%d:%d:%d ",
242 +                               ex->e_block, ex->e_start, ex->e_num);
243 +       }
244 +       ext_debug(inode, "\n");
245 +}
246 +
247 +static void ext3_ext_drop_refs(struct inode *inode, struct ext3_ext_path *path)
248 +{
249 +       int depth = path->p_depth;
250 +       int i;
251 +
252 +       for (i = 0; i <= depth; i++, path++)
253 +               if (path->p_bh) {
254 +                       brelse(path->p_bh);
255 +                       path->p_bh = NULL;
256 +               }
257 +}
258 +
259 +static int ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path)
260 +{
261 +       struct ext3_inode_info *ei = EXT3_I(inode);
262 +       unsigned long bg_start;
263 +       unsigned long colour;
264 +       int depth;
265 +       
266 +       if (path) {
267 +               depth = path->p_depth;
268 +               /* try to find previous block */
269 +               if (path[depth].p_ext)
270 +                       return path[depth].p_ext->e_start +
271 +                               path[depth].p_ext->e_num - 1;
272 +               
273 +               /* it looks index is empty
274 +                * try to find starting from index itself */
275 +               if (path[depth].p_bh)
276 +                       return path[depth].p_bh->b_blocknr;
277 +       }
278 +
279 +       /* OK. use inode's group */
280 +       bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
281 +               le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
282 +       colour = (current->pid % 16) *
283 +                       (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
284 +       return bg_start + colour;
285 +}
286 +
287 +static struct ext3_ext_path *
288 +ext3_ext_find_extent(struct inode *inode, int block, struct ext3_ext_path *path)
289 +{
290 +       struct ext3_inode_info *ei = EXT3_I(inode);
291 +       struct ext3_extent_header *eh = (void *) ei->i_data;
292 +       struct ext3_extent_idx *ix;
293 +       struct buffer_head *bh;
294 +       struct ext3_extent *ex;
295 +       int depth, i, k, ppos = 0;
296 +       
297 +       eh = (struct ext3_extent_header *) ei->i_data;
298 +
299 +       /* initialize capacity of leaf in inode for first time */
300 +       if (eh->e_max == 0)
301 +               eh->e_max = ext3_ext_space_inode(inode);
302 +       i = depth = ei->i_depth;
303 +       EXT_ASSERT(i == 0 || eh->e_num > 0);
304 +       
305 +       /* account possible depth increase */
306 +       if (!path) {
307 +               path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 2),
308 +                               GFP_NOFS);
309 +               if (!path)
310 +                       return ERR_PTR(-ENOMEM);
311 +       }
312 +       memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
313 +
314 +       /* walk through the tree */
315 +       while (i) {
316 +               ext_debug(inode, "depth %d: num %d, max %d\n",
317 +                               ppos, eh->e_num, eh->e_max);
318 +               ix = EXT_FIRST_INDEX(eh);
319 +               if (eh->e_num)
320 +                       path[ppos].p_idx = ix;
321 +               EXT_ASSERT(eh->e_num <= eh->e_max);
322 +               for (k = 0; k < eh->e_num; k++, ix++) {
323 +                       ext_debug(inode, "index: %d -> %d\n",
324 +                                       ix->e_block, ix->e_leaf);
325 +                       if (block < ix->e_block)
326 +                               break;
327 +                       path[ppos].p_idx = ix;
328 +               }
329 +               path[ppos].p_block = path[ppos].p_idx->e_leaf;
330 +               path[ppos].p_depth = i;
331 +               path[ppos].p_hdr = eh;
332 +               path[ppos].p_ext = NULL;
333 +
334 +               bh = sb_bread(inode->i_sb, path[ppos].p_block);
335 +               if (!bh) {
336 +                       ext3_ext_drop_refs(inode, path);
337 +                       kfree(path);
338 +                       return ERR_PTR(-EIO);
339 +               }
340 +               eh = (struct ext3_extent_header *) bh->b_data;
341 +               ppos++;
342 +               EXT_ASSERT(ppos <= depth);
343 +               path[ppos].p_bh = bh;
344 +               i--;
345 +       }
346 +
347 +       path[ppos].p_depth = i;
348 +       path[ppos].p_hdr = eh;
349 +       path[ppos].p_ext = NULL;
350 +       
351 +       /* find extent */
352 +       ex = EXT_FIRST_EXTENT(eh);
353 +       if (eh->e_num)
354 +               path[ppos].p_ext = ex;
355 +       EXT_ASSERT(eh->e_num <= eh->e_max);
356 +       for (k = 0; k < eh->e_num; k++, ex++) {
357 +               if (block < ex->e_block) 
358 +                       break;
359 +               path[ppos].p_ext = ex;
360 +       }
361 +
362 +       ext3_ext_show_path(inode, path);
363 +
364 +       return path;
365 +}
366 +
367 +static void ext3_ext_check_boundary(struct inode *inode,
368 +                                       struct ext3_ext_path *curp,
369 +                                       void *addr, int len)
370 +{
371 +       void *end;
372 +
373 +       if (!len)
374 +               return;
375 +       if (curp->p_bh)
376 +               end = (void *) curp->p_hdr + inode->i_sb->s_blocksize;
377 +       else
378 +               end = (void *) curp->p_hdr + sizeof(EXT3_I(inode)->i_data);
379 +       if (((unsigned long) addr) + len > (unsigned long) end) {
380 +               printk("overflow! 0x%p > 0x%p\n", addr + len, end);
381 +               BUG();
382 +       }
383 +       if ((unsigned long) addr < (unsigned long) curp->p_hdr) {
384 +               printk("underflow! 0x%p < 0x%p\n", addr, curp->p_hdr);
385 +               BUG();
386 +       }
387 +}
388 +
389 +/*
390 + * insert new index [logical;ptr] into the block at cupr
391 + * it check where to insert: before curp or after curp
392 + */
393 +static int ext3_ext_insert_index(handle_t *handle, struct inode *inode,
394 +                               struct ext3_ext_path *curp, int logical,
395 +                               int ptr)
396 +{
397 +       struct ext3_extent_idx *ix;
398 +       int len, err;
399 +
400 +       if ((err = ext3_ext_get_access(handle, inode, curp)))
401 +               return err;
402 +
403 +       EXT_ASSERT(logical != curp->p_idx->e_block);
404 +       len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
405 +       if (logical > curp->p_idx->e_block) {
406 +               /* insert after */
407 +               len = (len - 1) * sizeof(struct ext3_extent_idx);
408 +               len = len < 0 ? 0 : len;
409 +               ext_debug(inode, "insert new index %d after: %d. "
410 +                               "move %d from 0x%p to 0x%p\n",
411 +                               logical, ptr, len,
412 +                               (curp->p_idx + 1), (curp->p_idx + 2));
413 +
414 +               ext3_ext_check_boundary(inode, curp, curp->p_idx + 2, len);
415 +               memmove(curp->p_idx + 2, curp->p_idx + 1, len);
416 +               ix = curp->p_idx + 1;
417 +       } else {
418 +               /* insert before */
419 +               len = len * sizeof(struct ext3_extent_idx);
420 +               len = len < 0 ? 0 : len;
421 +               ext_debug(inode, "insert new index %d before: %d. "
422 +                               "move %d from 0x%p to 0x%p\n",
423 +                               logical, ptr, len,
424 +                               curp->p_idx, (curp->p_idx + 1));
425 +
426 +               ext3_ext_check_boundary(inode, curp, curp->p_idx + 1, len);
427 +               memmove(curp->p_idx + 1, curp->p_idx, len);
428 +               ix = curp->p_idx;
429 +       }
430 +
431 +       ix->e_block = logical;
432 +       ix->e_leaf = ptr;
433 +       curp->p_hdr->e_num++;
434 +
435 +       err = ext3_ext_dirty(handle, inode, curp);
436 +       ext3_std_error(inode->i_sb, err);
437 +
438 +       return err;
439 +}
440 +
441 +/*
442 + * routine inserts new subtree into the path, using free index entry
443 + * at depth 'at:
444 + *  - allocates all needed blocks (new leaf and all intermediate index blocks)
445 + *  - makes decision where to split
446 + *  - moves remaining extens and index entries (right to the split point)
447 + *    into the newly allocated blocks
448 + *  - initialize subtree
449 + */
450 +static int ext3_ext_split(handle_t *handle, struct inode *inode,
451 +                               struct ext3_ext_path *path,
452 +                               struct ext3_extent *newext, int at)
453 +{
454 +       struct buffer_head *bh = NULL;
455 +       int depth = EXT3_I(inode)->i_depth;
456 +       struct ext3_extent_header *neh;
457 +       struct ext3_extent_idx *fidx;
458 +       struct ext3_extent *ex;
459 +       int i = at, k, m, a;
460 +       long newblock, oldblock, border;
461 +       int *ablocks = NULL; /* array of allocated blocks */
462 +       int err = 0;
463 +
464 +       /* make decision: where to split? */
465 +       /* FIXME: now desicion is simplest: at current extent */
466 +
467 +       /* if current leaf will be splitted, then we should use 
468 +        * border from split point */
469 +       if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
470 +               border = path[depth].p_ext[1].e_block;
471 +               ext_debug(inode, "leaf will be splitted."
472 +                               " next leaf starts at %d\n",
473 +                               (int)border);
474 +       } else {
475 +               border = newext->e_block;
476 +               ext_debug(inode, "leaf will be added."
477 +                               " next leaf starts at %d\n",
478 +                               (int)border);
479 +       }
480 +
481 +       /* 
482 +        * if error occurs, then we break processing
483 +        * and turn filesystem read-only. so, index won't
484 +        * be inserted and tree will be in consistent
485 +        * state. next mount will repair buffers too
486 +        */
487 +
488 +       /*
489 +        * get array to track all allocated blocks
490 +        * we need this to handle errors and free blocks
491 +        * upon them
492 +        */
493 +       ablocks = kmalloc(sizeof(long) * depth, GFP_NOFS);
494 +       if (!ablocks)
495 +               return -ENOMEM;
496 +       memset(ablocks, 0, sizeof(long) * depth);
497 +
498 +       /* allocate all needed blocks */
499 +       ext_debug(inode, "allocate %d blocks for indexes and leaf\n",
500 +                       depth - at);
501 +       ablocks[0] = newext->e_start++;
502 +       newext->e_num--;
503 +       for (a = 1; a < depth - at; a++) {
504 +               newblock = ext3_new_block(handle, inode, newext->e_start,
505 +                                               0, 0, &err);
506 +               if (newblock == 0)
507 +                       goto cleanup;
508 +               ablocks[a] = newblock;
509 +       }
510 +
511 +       /* initialize new leaf */
512 +       newblock = ablocks[--a];
513 +       EXT_ASSERT(newblock);
514 +       bh = sb_getblk(inode->i_sb, newblock);
515 +       if (!bh) {
516 +               err = -EIO;
517 +               goto cleanup;
518 +       }
519 +       lock_buffer(bh);
520 +
521 +       if ((err = ext3_journal_get_create_access(handle, bh)))
522 +               goto cleanup;
523 +
524 +       neh = (struct ext3_extent_header *) bh->b_data;
525 +       neh->e_num = 0;
526 +       neh->e_max = ext3_ext_space_block(inode);
527 +       ex = EXT_FIRST_EXTENT(neh);
528 +
529 +       /* move remain of path[depth] to the new leaf */
530 +       EXT_ASSERT(path[depth].p_hdr->e_num ==
531 +                       path[depth].p_hdr->e_max);
532 +       /* start copy from next extent */
533 +       /* TODO: we could do it by single memmove */
534 +       m = 0;
535 +       path[depth].p_ext++;
536 +       while (path[depth].p_ext <=
537 +                       EXT_MAX_EXTENT(path[depth].p_hdr)) {
538 +               ext_debug(inode, "move %d:%d:%d in new leaf\n",
539 +                               path[depth].p_ext->e_block,
540 +                               path[depth].p_ext->e_start,
541 +                               path[depth].p_ext->e_num);
542 +               memmove(ex++, path[depth].p_ext++,
543 +                               sizeof(struct ext3_extent));
544 +               neh->e_num++;
545 +               m++;
546 +       }
547 +       mark_buffer_uptodate(bh, 1);
548 +       unlock_buffer(bh);
549 +
550 +       if ((err = ext3_journal_dirty_metadata(handle, bh)))
551 +               goto cleanup;   
552 +       brelse(bh);
553 +       bh = NULL;
554 +
555 +       /* correct old leaf */
556 +       if (m) {
557 +               if ((err = ext3_ext_get_access(handle, inode, path)))
558 +                       goto cleanup;
559 +               path[depth].p_hdr->e_num -= m;
560 +               if ((err = ext3_ext_dirty(handle, inode, path)))
561 +                       goto cleanup;
562 +               
563 +       }
564 +
565 +       /* create intermediate indexes */
566 +       k = depth - at - 1;
567 +       EXT_ASSERT(k >= 0);
568 +       if (k)
569 +               ext_debug(inode,
570 +                               "create %d intermediate indices\n", k);
571 +       /* insert new index into current index block */
572 +       /* current depth stored in i var */
573 +       i = depth - 1;
574 +       while (k--) {
575 +               oldblock = newblock;
576 +               newblock = ablocks[--a];
577 +               bh = sb_getblk(inode->i_sb, newblock);
578 +               if (!bh) {
579 +                       err = -EIO;
580 +                       goto cleanup;
581 +               }
582 +               lock_buffer(bh);
583 +
584 +               if ((err = ext3_journal_get_create_access(handle, bh)))
585 +                       goto cleanup;
586 +
587 +               neh = (struct ext3_extent_header *) bh->b_data;
588 +               neh->e_num = 1;
589 +               neh->e_max = ext3_ext_space_block(inode);
590 +               fidx = EXT_FIRST_INDEX(neh);
591 +               fidx->e_block = border;
592 +               fidx->e_leaf = oldblock;
593 +
594 +               ext_debug(inode,
595 +                               "int.index at %d (block %u): %d -> %d\n",
596 +                               i, (unsigned) newblock,
597 +                               (int) border,
598 +                               (int) oldblock);
599 +               /* copy indexes */
600 +               m = 0;
601 +               path[i].p_idx++;
602 +               EXT_ASSERT(EXT_MAX_INDEX(path[i].p_hdr) ==
603 +                               EXT_LAST_INDEX(path[i].p_hdr));
604 +               ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx,
605 +                               EXT_MAX_INDEX(path[i].p_hdr));
606 +               while (path[i].p_idx <=
607 +                               EXT_MAX_INDEX(path[i].p_hdr)) {
608 +                       ext_debug(inode, "%d: move %d:%d in new index\n",
609 +                                       i, path[i].p_idx->e_block,
610 +                                       path[i].p_idx->e_leaf);
611 +                       memmove(++fidx, path[i].p_idx++,
612 +                                       sizeof(struct ext3_extent_idx));
613 +                       neh->e_num++;
614 +                       m++;
615 +               }
616 +
617 +               mark_buffer_uptodate(bh, 1);
618 +               unlock_buffer(bh);
619 +
620 +               if ((err = ext3_journal_dirty_metadata(handle, bh)))
621 +                       goto cleanup;
622 +               brelse(bh);
623 +               bh = NULL;
624 +
625 +               /* correct old index */
626 +               if (m) {
627 +                       err = ext3_ext_get_access(handle,inode,path+i);
628 +                       if (err)
629 +                               goto cleanup;
630 +                       path[i].p_hdr->e_num -= m;
631 +                       err = ext3_ext_dirty(handle, inode, path + i);
632 +                       if (err)
633 +                               goto cleanup;
634 +               }
635 +
636 +               i--;
637 +       }
638 +
639 +       /* insert new index */
640 +       if (!err) 
641 +               err = ext3_ext_insert_index(handle, inode, path + at,
642 +                                               border, newblock);
643 +
644 +cleanup:
645 +       if (bh) {
646 +               if (buffer_locked(bh))
647 +                       unlock_buffer(bh);
648 +               brelse(bh);
649 +       }
650 +
651 +       if (err) {
652 +               /* free all allocated blocks in error case */
653 +               for (i = 0; i < depth; i++)
654 +                       if (!ablocks[i])
655 +                               continue;
656 +                       ext3_free_blocks(handle, inode, ablocks[i], 1);
657 +       }
658 +       kfree(ablocks);
659 +
660 +       return err;
661 +}
662 +
663 +/*
664 + * routine implements tree growing procedure:
665 + *  - allocates new block
666 + *  - moves top-level data (index block or leaf) into the new block
667 + *  - initialize new top-level, creating index that points to the
668 + *    just created block
669 + */
670 +static int ext3_ext_grow_indepth(handle_t *handle, struct inode *inode,
671 +                                       struct ext3_ext_path *path,
672 +                                       struct ext3_extent *newext)
673 +{
674 +       struct buffer_head *bh;
675 +       struct ext3_ext_path *curp = path;
676 +       struct ext3_extent_header *neh;
677 +       struct ext3_extent_idx *fidx;
678 +       int len, err = 0;
679 +       long newblock;
680 +
681 +       /*
682 +        * use already allocated by the called block for new root block
683 +        */
684 +       newblock = newext->e_start++;
685 +       newext->e_num--;
686 +       
687 +       bh = sb_getblk(inode->i_sb, newblock);
688 +       if (!bh) {
689 +               err = -EIO;
690 +               ext3_std_error(inode->i_sb, err);
691 +               return err;
692 +       }
693 +       lock_buffer(bh);
694 +
695 +       if ((err = ext3_journal_get_create_access(handle, bh))) {
696 +               unlock_buffer(bh);
697 +               goto out;       
698 +       }
699 +
700 +       /* move top-level index/leaf into new block */
701 +       len = sizeof(struct ext3_extent_header) +
702 +               sizeof(struct ext3_extent) * curp->p_hdr->e_max;
703 +       EXT_ASSERT(len >= 0 && len < 4096);
704 +       memmove(bh->b_data, curp->p_hdr, len);
705 +
706 +       /* set size of new block */
707 +       neh = (struct ext3_extent_header *) bh->b_data;
708 +       neh->e_max = ext3_ext_space_block(inode);
709 +       mark_buffer_uptodate(bh, 1);
710 +       unlock_buffer(bh);
711 +
712 +       if ((err = ext3_journal_dirty_metadata(handle, bh)))
713 +               goto out;
714 +
715 +       /* create index in new top-level index: num,max,pointer */
716 +       if ((err = ext3_ext_get_access(handle, inode, curp)))
717 +               goto out;
718 +
719 +       curp->p_hdr->e_max = ext3_ext_space_inode_idx(inode);
720 +       curp->p_hdr->e_num = 1;
721 +       curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
722 +       curp->p_idx->e_block = EXT_FIRST_EXTENT(path[0].p_hdr)->e_block;
723 +       curp->p_idx->e_leaf = newblock;
724 +
725 +       neh = (struct ext3_extent_header *) EXT3_I(inode)->i_data;
726 +       fidx = EXT_FIRST_INDEX(neh);
727 +       ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %d\n",
728 +                       neh->e_num, neh->e_max, fidx->e_block, fidx->e_leaf); 
729 +
730 +       EXT3_I(inode)->i_depth++;
731 +       err = ext3_ext_dirty(handle, inode, curp);
732 +out:
733 +       brelse(bh);
734 +
735 +       return err;
736 +}
737 +
738 +/*
739 + * routine finds empty index and adds new leaf. if no free index found
740 + * then it requests in-depth growing
741 + */
742 +static int ext3_ext_create_new_leaf(handle_t *handle, struct inode *inode,
743 +                                       struct ext3_ext_path *path,
744 +                                       struct ext3_extent *newext)
745 +{
746 +       int depth = EXT3_I(inode)->i_depth;
747 +       struct ext3_ext_path *curp;
748 +       int i = depth, err = 0;
749 +       long newblock = newext->e_start;
750 +
751 +       /* walk up to the tree and look for free index entry */
752 +       curp = path + depth;
753 +       while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
754 +               i--;
755 +               curp--;
756 +       }
757 +
758 +       /* we use already allocated block for index block
759 +        * so, subsequent data blocks should be contigoues */
760 +       if (EXT_HAS_FREE_INDEX(curp)) {
761 +               /* if we found index with free entry, then use that
762 +                * entry: create all needed subtree and add new leaf */
763 +               err = ext3_ext_split(handle, inode, path, newext, i);
764 +       } else {
765 +               /* tree is full, time to grow in depth */
766 +               err = ext3_ext_grow_indepth(handle, inode, path, newext);
767 +       }
768 +
769 +       if (!err) {
770 +               /* refill path */
771 +               ext3_ext_drop_refs(inode, path);
772 +               path = ext3_ext_find_extent(inode, newext->e_block, path);
773 +               if (IS_ERR(path))
774 +                       err = PTR_ERR(path);
775 +
776 +               /*
777 +                * probably we've used some blocks from extent
778 +                * let's allocate new block for it
779 +                */
780 +               if (newext->e_num == 0 && !err) {
781 +                       newext->e_start =
782 +                               ext3_new_block(handle, inode, newblock,
783 +                                               0, 0, &err);
784 +                       newext->e_num = 1;
785 +               }
786 +       }
787 +
788 +       return err;
789 +}
790 +
791 +/*
792 + * returns next allocated block or 0xffffffff
793 + * NOTE: it consider block number from index entry as
794 + * allocated block. thus, index entries have to be consistent
795 + * with leafs
796 + */
797 +static inline unsigned ext3_ext_next_allocated_block(struct inode *inode,
798 +                                               struct ext3_ext_path *path)
799 +{
800 +       int depth;
801 +
802 +       EXT_ASSERT(path != NULL);
803 +       depth = path->p_depth;
804 +
805 +       if (depth == 0 && path->p_ext == NULL)
806 +               return 0xffffffff;
807 +
808 +       /* FIXME: what if index isn't full ?! */
809 +       while (depth >= 0) {
810 +               if (depth == path->p_depth) {
811 +                       /* leaf */
812 +                       if (path[depth].p_ext !=
813 +                                       EXT_LAST_EXTENT(path[depth].p_hdr))
814 +                               return path[depth].p_ext[1].e_block;
815 +               } else {
816 +                       /* index */
817 +                       if (path[depth].p_idx !=
818 +                                       EXT_LAST_INDEX(path[depth].p_hdr))
819 +                               return path[depth].p_idx[1].e_block;
820 +               }
821 +               depth--;        
822 +       }
823 +
824 +       return 0xffffffff;
825 +}
826 +
827 +/*
828 + * returns first allocated block from next leaf or 0xffffffff
829 + */
830 +static unsigned ext3_ext_next_leaf_block(struct inode *inode,
831 +                                               struct ext3_ext_path *path)
832 +{
833 +       int depth;
834 +
835 +       EXT_ASSERT(path != NULL);
836 +       depth = path->p_depth;
837 +
838 +       /* zero-tree has no leaf blocks at all */
839 +       if (depth == 0)
840 +               return 0xffffffff;
841 +
842 +       /* go to index block */
843 +       depth--;
844 +       
845 +       while (depth >= 0) {
846 +               if (path[depth].p_idx !=
847 +                               EXT_LAST_INDEX(path[depth].p_hdr))
848 +                       return path[depth].p_idx[1].e_block;
849 +               depth--;        
850 +       }
851 +
852 +       return 0xffffffff;
853 +}
854 +
855 +/*
856 + * if leaf gets modified and modified extent is first in the leaf
857 + * then we have to correct all indexes above
858 + * TODO: do we need to correct tree in all cases?
859 + */
860 +int ext3_ext_correct_indexes(handle_t *handle, struct inode *inode,
861 +                               struct ext3_ext_path *path)
862 +{
863 +       int depth = EXT3_I(inode)->i_depth;     
864 +       struct ext3_extent_header *eh;
865 +       struct ext3_extent *ex;
866 +       long border;
867 +       int k, err = 0;
868 +       
869 +       eh = path[depth].p_hdr;
870 +       ex = path[depth].p_ext;
871 +
872 +       EXT_ASSERT(ex);
873 +       EXT_ASSERT(eh);
874 +       
875 +       if (depth == 0) {
876 +               /* there is no tree at all */
877 +               return 0;
878 +       }
879 +       
880 +       if (ex != EXT_FIRST_EXTENT(eh)) {
881 +               /* we correct tree if first leaf got modified only */
882 +               return 0;
883 +       }
884 +       
885 +       k = depth - 1;
886 +       border = path[depth].p_ext->e_block;
887 +       if ((err = ext3_ext_get_access(handle, inode, path + k)))
888 +               return err;
889 +       path[k].p_idx->e_block = border;
890 +       if ((err = ext3_ext_dirty(handle, inode, path + k)))
891 +               return err;
892 +
893 +       while (k--) {
894 +               /* change all left-side indexes */
895 +               if (path[k].p_idx != EXT_FIRST_INDEX(path[k].p_hdr)
896 +                               && k != 0)
897 +                       break;
898 +               if ((err = ext3_ext_get_access(handle, inode, path + k)))
899 +                       break;
900 +               path[k].p_idx->e_block = border;
901 +               if ((err = ext3_ext_dirty(handle, inode, path + k)))
902 +                       break;
903 +       }
904 +
905 +       return err;
906 +}
907 +
908 +/*
909 + * this routine tries to merge requsted extent into the existing
910 + * extent or inserts requested extent as new one into the tree,
911 + * creating new leaf in no-space case
912 + */
913 +int ext3_ext_insert_extent(handle_t *handle, struct inode *inode,
914 +                               struct ext3_ext_path *path,
915 +                               struct ext3_extent *newext)
916 +{
917 +       int depth, len;
918 +       struct ext3_extent_header * eh;
919 +       struct ext3_extent *ex;
920 +       struct ext3_extent *nearex; /* nearest extent */
921 +       struct ext3_ext_path *npath = NULL;
922 +       int err;
923 +
924 +       depth = EXT3_I(inode)->i_depth; 
925 +       if ((ex = path[depth].p_ext)) {
926 +               /* try to insert block into found extent and return */
927 +               if (ex->e_block + ex->e_num == newext->e_block &&
928 +                               ex->e_start + ex->e_num == newext->e_start) {
929 +#ifdef AGRESSIVE_TEST
930 +                       if (ex->e_num >= 2)
931 +                               goto repeat;
932 +#endif
933 +                       if ((err = ext3_ext_get_access(handle, inode,
934 +                                                       path + depth)))
935 +                               return err;
936 +                       ext_debug(inode, "append %d block to %d:%d (from %d)\n",
937 +                                       newext->e_num, ex->e_block, ex->e_num,
938 +                                       ex->e_start);
939 +                       ex->e_num += newext->e_num;
940 +                       err = ext3_ext_dirty(handle, inode, path + depth);
941 +                       return err;
942 +               }
943 +       }
944 +
945 +repeat:
946 +       depth = EXT3_I(inode)->i_depth; 
947 +       eh = path[depth].p_hdr;
948 +       if (eh->e_num == eh->e_max) {
949 +               /* probably next leaf has space for us? */
950 +               int next = ext3_ext_next_leaf_block(inode, path);
951 +               if (next != 0xffffffff) {
952 +                       ext_debug(inode, "next leaf block - %d\n", next);
953 +                       EXT_ASSERT(!npath);
954 +                       npath = ext3_ext_find_extent(inode, next, NULL);
955 +                       if (IS_ERR(npath))
956 +                               return PTR_ERR(npath);
957 +                       EXT_ASSERT(npath->p_depth == path->p_depth);
958 +                       eh = npath[depth].p_hdr;
959 +                       if (eh->e_num < eh->e_max) {
960 +                               ext_debug(inode,
961 +                                               "next leaf has free ext(%d)\n",
962 +                                               eh->e_num);
963 +                               path = npath;
964 +                               goto repeat;
965 +                       }
966 +                       ext_debug(inode, "next leaf hasno free space(%d,%d)\n",
967 +                                       eh->e_num, eh->e_max);
968 +               }
969 +               /*
970 +                * there is no free space in found leaf
971 +                * we're gonna add new leaf in the tree
972 +                */
973 +               err = ext3_ext_create_new_leaf(handle, inode, path, newext);
974 +               if (err)
975 +                       goto cleanup;
976 +               goto repeat;
977 +       }
978 +
979 +       nearex = path[depth].p_ext;
980 +
981 +       if ((err = ext3_ext_get_access(handle, inode, path + depth)))
982 +               goto cleanup;
983 +
984 +       if (!nearex) {
985 +               /* there is no extent in this leaf, create first one */
986 +               ext_debug(inode, "first extent in the leaf: %d:%d:%d\n",
987 +                               newext->e_block, newext->e_start,
988 +                               newext->e_num);
989 +               eh->e_num++;
990 +               path[depth].p_ext = EXT_FIRST_EXTENT(eh);
991 +
992 +       } else if (newext->e_block > nearex->e_block) {
993 +               EXT_ASSERT(newext->e_block != nearex->e_block);
994 +               len = EXT_MAX_EXTENT(eh) - nearex;
995 +               len = (len - 1) * sizeof(struct ext3_extent);
996 +               len = len < 0 ? 0 : len;
997 +               ext_debug(inode, "insert %d:%d:%d after: nearest 0x%p, "
998 +                               "move %d from 0x%p to 0x%p\n",
999 +                               newext->e_block, newext->e_start, newext->e_num,
1000 +                               nearex, len, nearex + 1, nearex + 2);
1001 +               ext3_ext_check_boundary(inode, path + depth, nearex + 2, len);
1002 +               memmove(nearex + 2, nearex + 1, len);
1003 +               path[depth].p_ext = nearex + 1;
1004 +               eh->e_num++;
1005 +       } else {
1006 +               EXT_ASSERT(newext->e_block != nearex->e_block);
1007 +               len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext3_extent);
1008 +               len = len < 0 ? 0 : len;
1009 +               ext_debug(inode, "insert %d:%d:%d before: nearest 0x%p, "
1010 +                               "move %d from 0x%p to 0x%p\n",
1011 +                               newext->e_block, newext->e_start, newext->e_num,
1012 +                               nearex, len, nearex + 1, nearex + 2);
1013 +               memmove(nearex + 1, nearex, len);
1014 +               path[depth].p_ext = nearex;
1015 +               eh->e_num++;
1016 +
1017 +               /* time to correct all indexes above */
1018 +               err = ext3_ext_correct_indexes(handle, inode, path);
1019 +       }
1020 +
1021 +       if (!err) {
1022 +               nearex = path[depth].p_ext;
1023 +               nearex->e_block = newext->e_block;
1024 +               nearex->e_start = newext->e_start;
1025 +               nearex->e_num = newext->e_num;
1026 +       }
1027 +
1028 +       err = ext3_ext_dirty(handle, inode, path + depth);
1029 +
1030 +cleanup:
1031 +       if (npath) {
1032 +               ext3_ext_drop_refs(inode, npath);
1033 +               kfree(npath);
1034 +       }
1035 +               
1036 +       return err;
1037 +}
1038 +
1039 +int ext3_ext_get_block(handle_t *handle, struct inode *inode, long iblock,
1040 +                       struct buffer_head *bh_result, int create)
1041 +{
1042 +       struct ext3_ext_path *path;
1043 +       int depth = EXT3_I(inode)->i_depth;
1044 +       struct ext3_extent newex;
1045 +       struct ext3_extent *ex;
1046 +       int goal, newblock, err = 0;
1047 +
1048 +       ext_debug(inode, "block %d requested for inode %u, bh_result 0x%p\n",
1049 +                       (int) iblock, (unsigned) inode->i_ino, bh_result);
1050 +       bh_result->b_state &= ~(1UL << BH_New);
1051 +
1052 +       down(&EXT3_I(inode)->i_ext_sem);
1053 +
1054 +       /* find extent for this block */
1055 +       path = ext3_ext_find_extent(inode, iblock, NULL);
1056 +       if (IS_ERR(path)) {
1057 +               err = PTR_ERR(path);
1058 +               goto out2;
1059 +       }
1060 +
1061 +       if ((ex = path[depth].p_ext)) {
1062 +               /* if found exent covers block, simple return it */
1063 +               if (iblock >= ex->e_block && iblock < ex->e_block + ex->e_num) {
1064 +                       newblock = iblock - ex->e_block + ex->e_start;
1065 +                       ext_debug(inode, "%d fit into %d:%d -> %d\n",
1066 +                                       (int) iblock, ex->e_block, ex->e_num,
1067 +                                       newblock);
1068 +                       goto out;
1069 +               }
1070 +       }
1071 +
1072 +       /*
1073 +        * we couldn't try to create block if create flag is zero 
1074 +        */
1075 +       if (!create) 
1076 +               goto out2;
1077 +
1078 +       /* allocate new block */
1079 +       goal = ext3_ext_find_goal(inode, path);
1080 +       newblock = ext3_new_block(handle, inode, goal, 0, 0, &err);
1081 +       if (!newblock)
1082 +               goto out2;
1083 +       ext_debug(inode, "allocate new block: goal %d, found %d\n",
1084 +                       goal, newblock);
1085 +
1086 +       /* try to insert new extent into found leaf and return */
1087 +       newex.e_block = iblock;
1088 +       newex.e_start = newblock;
1089 +       newex.e_num = 1;
1090 +       err = ext3_ext_insert_extent(handle, inode, path, &newex);
1091 +       if (err)
1092 +               goto out2;
1093 +       
1094 +       /* previous routine could use block we allocated */
1095 +       newblock = newex.e_start;
1096 +       bh_result->b_state |= (1UL << BH_New);
1097 +
1098 +out:
1099 +       ext3_ext_show_leaf(inode, path);
1100 +       bh_result->b_dev = inode->i_dev;
1101 +       bh_result->b_blocknr = newblock;
1102 +       bh_result->b_state |= (1UL << BH_Mapped);
1103 +out2:
1104 +       ext3_ext_drop_refs(inode, path);
1105 +       kfree(path);
1106 +       up(&EXT3_I(inode)->i_ext_sem);
1107 +
1108 +       return err;     
1109 +}
1110 +
1111 +/*
1112 + * returns 1 if current index have to be freed (even partial)
1113 + */
1114 +static int ext3_ext_more_to_truncate(struct inode *inode,
1115 +                               struct ext3_ext_path *path)
1116 +{
1117 +       EXT_ASSERT(path->p_idx);
1118 +
1119 +       if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
1120 +               return 0;
1121 +
1122 +       /*
1123 +        * if truncate on deeper level happened it it wasn't partial
1124 +        * so we have to consider current index for truncation
1125 +        */
1126 +       if (path->p_hdr->e_num == path->p_block)
1127 +               return 0;
1128 +
1129 +       /*
1130 +        * put actual number of indexes to know is this number got
1131 +        * changed at the next iteration
1132 +        */
1133 +       path->p_block = path->p_hdr->e_num;
1134 +       
1135 +       return 1;
1136 +}
1137 +
1138 +/*
1139 + * routine removes index from the index block
1140 + * it's used in truncate case only. thus all requests are for
1141 + * last index in the block only
1142 + */
1143 +int ext3_ext_remove_index(handle_t *handle, struct inode *inode,
1144 +                                       struct ext3_ext_path *path)
1145 +{
1146 +       struct buffer_head *bh;
1147 +       int err;
1148 +       
1149 +       /* free index block */
1150 +       path--;
1151 +       EXT_ASSERT(path->p_hdr->e_num);
1152 +       if ((err = ext3_ext_get_access(handle, inode, path)))
1153 +               return err;
1154 +       path->p_hdr->e_num--;
1155 +       if ((err = ext3_ext_dirty(handle, inode, path)))
1156 +               return err;
1157 +       bh = sb_get_hash_table(inode->i_sb, path->p_idx->e_leaf);
1158 +       ext3_forget(handle, 0, inode, bh, path->p_idx->e_leaf);
1159 +       ext3_free_blocks(handle, inode, path->p_idx->e_leaf, 1);
1160 +
1161 +       ext_debug(inode, "index is empty, remove it, free block %d\n",
1162 +                       path->p_idx->e_leaf);
1163 +       return err;
1164 +}
1165 +
1166 +/*
1167 + * returns 1 if current extent needs to be freed (even partial)
1168 + * instead, returns 0
1169 + */
1170 +int ext3_ext_more_leaves_to_truncate(struct inode *inode,
1171 +                                       struct ext3_ext_path *path)
1172 +{
1173 +       unsigned blocksize = inode->i_sb->s_blocksize;
1174 +       struct ext3_extent *ex = path->p_ext;
1175 +       int last_block; 
1176 +
1177 +       EXT_ASSERT(ex);
1178 +
1179 +       /* is there leave in the current leaf? */
1180 +       if (ex < EXT_FIRST_EXTENT(path->p_hdr))
1181 +               return 0;
1182 +       
1183 +       last_block = (inode->i_size + blocksize-1)
1184 +                       >> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
1185 +
1186 +       if (last_block >= ex->e_block + ex->e_num)
1187 +               return 0;
1188 +
1189 +       /* seems it extent have to be freed */
1190 +       return 1;
1191 +}
1192 +
1193 +handle_t *ext3_ext_journal_restart(handle_t *handle, int needed)
1194 +{
1195 +       int err;
1196 +
1197 +       if (handle->h_buffer_credits > needed)
1198 +               return handle;
1199 +       if (!ext3_journal_extend(handle, needed))
1200 +               return handle;
1201 +       err = ext3_journal_restart(handle, needed);
1202 +       
1203 +       return handle;
1204 +}
1205 +
1206 +/*
1207 + * this routine calculate max number of blocks to be modified
1208 + * while freeing extent and is intended to be used in truncate path
1209 + */
1210 +static int ext3_ext_calc_credits(struct inode *inode,
1211 +                                       struct ext3_ext_path *path,
1212 +                                       int num)
1213 +{
1214 +       int depth = EXT3_I(inode)->i_depth;
1215 +       int needed;
1216 +       
1217 +       /*
1218 +        * extent couldn't cross group, so we will modify
1219 +        * single bitmap block and single group descriptor
1220 +        */
1221 +       needed = 2;
1222 +
1223 +       /*
1224 +        * if this is last extent in a leaf, then we have to
1225 +        * free leaf block and remove pointer from index above.
1226 +        * that pointer could be last in index block, so we'll
1227 +        * have to remove it too. this way we could modify/free
1228 +        * the whole path + root index (inode stored) will be
1229 +        * modified
1230 +        */
1231 +       if (!path || (num == path->p_ext->e_num &&
1232 +                               path->p_ext == EXT_FIRST_EXTENT(path->p_hdr)))
1233 +               needed += (depth * EXT3_ALLOC_NEEDED) + 1;
1234 +
1235 +       return needed;
1236 +}
1237 +
1238 +/*
1239 + * core of the truncate procedure:
1240 + * - calculated what part of each extent in the requested leaf
1241 + *   need to be freed
1242 + * - frees and forgets these blocks
1243 + *
1244 + * TODO: we could optimize and free several extents during
1245 + *       single journal_restart()-journal_restart() cycle
1246 + */
1247 +static int ext3_ext_truncate_leaf(handle_t *handle,
1248 +                                       struct inode *inode,
1249 +                                       struct ext3_ext_path *path,
1250 +                                       int depth)
1251 +{
1252 +       unsigned blocksize = inode->i_sb->s_blocksize;
1253 +       int last_block; 
1254 +       int i, err = 0, sf, num;
1255 +
1256 +       ext_debug(inode, "level %d - leaf\n", depth);
1257 +       if (!path->p_hdr)
1258 +               path->p_hdr =
1259 +                       (struct ext3_extent_header *) path->p_bh->b_data;
1260 +
1261 +       EXT_ASSERT(path->p_hdr->e_num <= path->p_hdr->e_max);
1262 +       
1263 +       last_block = (inode->i_size + blocksize-1)
1264 +                                       >> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
1265 +       path->p_ext = EXT_LAST_EXTENT(path->p_hdr);
1266 +       while (ext3_ext_more_leaves_to_truncate(inode, path)) {
1267 +
1268 +               /* what part of extent have to be freed? */
1269 +               sf = last_block > path->p_ext->e_block ?
1270 +                       last_block : path->p_ext->e_block;
1271 +
1272 +               /* number of blocks from extent to be freed */
1273 +               num = path->p_ext->e_block + path->p_ext->e_num - sf;
1274 +
1275 +               /* calc physical first physical block to be freed */
1276 +               sf = path->p_ext->e_start + (sf - path->p_ext->e_block);
1277 +
1278 +               i = ext3_ext_calc_credits(inode, path, num);
1279 +               handle = ext3_ext_journal_restart(handle, i);
1280 +               if (IS_ERR(handle))
1281 +                       return PTR_ERR(handle);
1282 +               
1283 +               ext_debug(inode, "free extent %d:%d:%d -> free %d:%d\n",
1284 +                               path->p_ext->e_block, path->p_ext->e_start,
1285 +                               path->p_ext->e_num, sf, num);
1286 +               for (i = 0; i < num; i++) {
1287 +                       struct buffer_head *bh =
1288 +                               sb_get_hash_table(inode->i_sb, sf + i);
1289 +                       ext3_forget(handle, 0, inode, bh, sf + i);
1290 +               }
1291 +               ext3_free_blocks(handle, inode, sf, num);
1292 +
1293 +               /* collect extents usage stats */
1294 +               spin_lock(&EXT3_SB(inode->i_sb)->s_ext_lock);
1295 +               EXT3_SB(inode->i_sb)->s_ext_extents++;
1296 +               EXT3_SB(inode->i_sb)->s_ext_blocks += num;
1297 +               spin_unlock(&EXT3_SB(inode->i_sb)->s_ext_lock);
1298 +
1299 +               /* reduce extent */
1300 +               if ((err = ext3_ext_get_access(handle, inode, path)))
1301 +                       return err;
1302 +               path->p_ext->e_num -= num;
1303 +               if (path->p_ext->e_num == 0)
1304 +                       path->p_hdr->e_num--;
1305 +               if ((err = ext3_ext_dirty(handle, inode, path)))
1306 +                       return err;
1307 +
1308 +               path->p_ext--;
1309 +       }
1310 +       
1311 +       /* if this leaf is free, then we should
1312 +        * remove it from index block above */
1313 +       if (path->p_hdr->e_num == 0 && depth > 0) 
1314 +               err = ext3_ext_remove_index(handle, inode, path);
1315 +
1316 +       return err;
1317 +}
1318 +
1319 +static void ext3_ext_collect_stats(struct inode *inode)
1320 +{
1321 +       int depth;
1322 +       
1323 +       /* skip inodes with old good bitmap */
1324 +       if (!(EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL))
1325 +               return;
1326 +       
1327 +       /* collect on full truncate only */
1328 +       if (inode->i_size)
1329 +               return;
1330 +
1331 +       depth = EXT3_I(inode)->i_depth;
1332 +       if (depth < EXT3_SB(inode->i_sb)->s_ext_mindepth)
1333 +                EXT3_SB(inode->i_sb)->s_ext_mindepth = depth;
1334 +       if (depth > EXT3_SB(inode->i_sb)->s_ext_maxdepth)
1335 +                EXT3_SB(inode->i_sb)->s_ext_maxdepth = depth;
1336 +       EXT3_SB(inode->i_sb)->s_ext_sum += depth;
1337 +       EXT3_SB(inode->i_sb)->s_ext_count++;
1338 +       
1339 +}
1340 +
1341 +void ext3_ext_truncate(struct inode * inode)
1342 +{
1343 +       struct address_space *mapping = inode->i_mapping;
1344 +       struct ext3_ext_path *path;
1345 +       struct page * page;
1346 +       handle_t *handle;
1347 +       int i, depth, err = 0;
1348 +
1349 +       ext3_ext_collect_stats(inode);
1350 +
1351 +       /*
1352 +        * We have to lock the EOF page here, because lock_page() nests
1353 +        * outside journal_start().
1354 +        */
1355 +       if ((inode->i_size & (inode->i_sb->s_blocksize - 1)) == 0) {
1356 +               /* Block boundary? Nothing to do */
1357 +               page = NULL;
1358 +       } else {
1359 +               page = grab_cache_page(mapping,
1360 +                               inode->i_size >> PAGE_CACHE_SHIFT);
1361 +               if (!page)
1362 +                       return;
1363 +       }
1364 +
1365 +       /*
1366 +        * probably first extent we're gonna free will be last in block
1367 +        */
1368 +       i = ext3_ext_calc_credits(inode, NULL, 0);
1369 +       handle = ext3_journal_start(inode, i);
1370 +       if (IS_ERR(handle)) {
1371 +               if (page) {
1372 +                       clear_highpage(page);
1373 +                       flush_dcache_page(page);
1374 +                       unlock_page(page);
1375 +                       page_cache_release(page);
1376 +               }
1377 +               return;
1378 +       }
1379 +
1380 +       if (page)
1381 +               ext3_block_truncate_page(handle, mapping, inode->i_size);
1382 +
1383 +       down(&EXT3_I(inode)->i_ext_sem);
1384 +
1385 +       /* 
1386 +        * TODO: optimization is possible here
1387 +        * probably we need not scaning at all,
1388 +        * because page truncation is enough
1389 +        */
1390 +       if (ext3_orphan_add(handle, inode))
1391 +               goto out_stop;
1392 +
1393 +       /* we have to know where to truncate from in crash case */
1394 +       EXT3_I(inode)->i_disksize = inode->i_size;
1395 +       ext3_mark_inode_dirty(handle, inode);
1396 +
1397 +       /*
1398 +        * we start scanning from right side freeing all the blocks
1399 +        * after i_size and walking into the deep
1400 +        */
1401 +       i = 0;
1402 +       depth = EXT3_I(inode)->i_depth;
1403 +       path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 1), GFP_KERNEL);
1404 +       if (IS_ERR(path)) {
1405 +               ext3_error(inode->i_sb, "ext3_ext_truncate",
1406 +                               "Can't allocate path array");
1407 +               goto out_stop;
1408 +       }
1409 +       memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
1410 +
1411 +       path[i].p_hdr = (struct ext3_extent_header *) EXT3_I(inode)->i_data;
1412 +       while (i >= 0 && err == 0) {
1413 +               if (i == depth) {
1414 +                       /* this is leaf block */
1415 +                       err = ext3_ext_truncate_leaf(handle, inode,
1416 +                                                       path + i, i);
1417 +                       /* root level have p_bh == NULL, brelse() eats this */
1418 +                       brelse(path[i].p_bh);
1419 +                       i--;
1420 +                       continue;
1421 +               }
1422 +               
1423 +               /* this is index block */
1424 +               if (!path[i].p_hdr) {
1425 +                       path[i].p_hdr =
1426 +                               (struct ext3_extent_header *) path[i].p_bh->b_data;
1427 +                       ext_debug(inode, "initialize header\n");
1428 +               }
1429 +
1430 +               EXT_ASSERT(path[i].p_hdr->e_num <= path[i].p_hdr->e_max);
1431 +               
1432 +               if (!path[i].p_idx) {
1433 +                       /* this level hasn't touched yet */
1434 +                       path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
1435 +                       path[i].p_block = path[i].p_hdr->e_num + 1;
1436 +                       ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n",
1437 +                                       path[i].p_hdr, path[i].p_hdr->e_num);
1438 +               } else {
1439 +                       /* we've already was here, see at next index */
1440 +                       path[i].p_idx--;
1441 +               }
1442 +
1443 +               ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n",
1444 +                               i, EXT_FIRST_INDEX(path[i].p_hdr),
1445 +                               path[i].p_idx);
1446 +               if (ext3_ext_more_to_truncate(inode, path + i)) {
1447 +                       /* go to the next level */
1448 +                       ext_debug(inode, "move to level %d (block %d)\n", i+1,
1449 +                                       path[i].p_idx->e_leaf);
1450 +                       memset(path + i + 1, 0, sizeof(*path));
1451 +                       path[i+1].p_bh = sb_bread(inode->i_sb,
1452 +                                                       path[i].p_idx->e_leaf);
1453 +                       if (!path[i+1].p_bh) {
1454 +                               /* should we reset i_size? */
1455 +                               err = -EIO;
1456 +                               break;
1457 +                       }
1458 +                       i++;
1459 +               } else {
1460 +                       /* we finish processing this index, go up */
1461 +                       if (path[i].p_hdr->e_num == 0 && i > 0) {
1462 +                               /* index is empty, remove it
1463 +                                * handle must be already prepared by the
1464 +                                * truncate_leaf()
1465 +                                */
1466 +                               err = ext3_ext_remove_index(handle, inode,
1467 +                                                               path + i);
1468 +                       }
1469 +                       /* root level have p_bh == NULL, brelse() eats this */
1470 +                       brelse(path[i].p_bh);
1471 +                       i--;
1472 +                       ext_debug(inode, "return to level %d\n", i);
1473 +               }
1474 +       }
1475 +
1476 +       /* TODO: flexible tree reduction should be here */
1477 +       if (path->p_hdr->e_num == 0) {
1478 +               /*
1479 +                * truncate to zero freed all the tree
1480 +                * so, we need to correct i_depth
1481 +                */
1482 +               EXT3_I(inode)->i_depth = 0;
1483 +               path->p_hdr->e_max = 0;
1484 +               ext3_mark_inode_dirty(handle, inode);
1485 +       }
1486 +
1487 +       kfree(path);
1488 +
1489 +       /* In a multi-transaction truncate, we only make the final
1490 +        * transaction synchronous */
1491 +       if (IS_SYNC(inode))
1492 +               handle->h_sync = 1;
1493 +
1494 +out_stop:
1495 +       /*
1496 +        * If this was a simple ftruncate(), and the file will remain alive
1497 +        * then we need to clear up the orphan record which we created above.
1498 +        * However, if this was a real unlink then we were called by
1499 +        * ext3_delete_inode(), and we allow that function to clean up the
1500 +        * orphan info for us.
1501 +        */
1502 +       if (inode->i_nlink)
1503 +               ext3_orphan_del(handle, inode);
1504 +
1505 +       up(&EXT3_I(inode)->i_ext_sem);
1506 +       ext3_journal_stop(handle, inode);
1507 +}
1508 +
1509 +/*
1510 + * this routine calculate max number of blocks we could modify
1511 + * in order to allocate new block for an inode
1512 + */
1513 +int ext3_ext_writepage_trans_blocks(struct inode *inode, int num)
1514 +{
1515 +       struct ext3_inode_info *ei = EXT3_I(inode);
1516 +       int depth = ei->i_depth + 1;
1517 +       int needed;
1518 +       
1519 +       /*
1520 +        * the worste case we're expecting is creation of the
1521 +        * new root (growing in depth) with index splitting
1522 +        * for splitting we have to consider depth + 1 because
1523 +        * previous growing could increase it
1524 +        */
1525 +
1526 +       /* 
1527 +        * growing in depth:
1528 +        * block allocation + new root + old root
1529 +        */
1530 +       needed = EXT3_ALLOC_NEEDED + 2;
1531 +
1532 +       /* index split. we may need:
1533 +        *   allocate intermediate indexes and new leaf
1534 +        *   change two blocks at each level, but root
1535 +        *   modify root block (inode)
1536 +        */
1537 +       needed += (depth * EXT3_ALLOC_NEEDED) + (2 * depth) + 1;
1538 +
1539 +       /* caller want to allocate num blocks */
1540 +       needed *= num;
1541 +       
1542 +#ifdef CONFIG_QUOTA
1543 +       /* 
1544 +        * FIXME: real calculation should be here
1545 +        * it depends on blockmap format of qouta file
1546 +        */
1547 +       needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
1548 +#endif
1549 +
1550 +       return needed;
1551 +}
1552 +
1553 +/*
1554 + * called at mount time
1555 + */
1556 +void ext3_ext_init(struct super_block *sb)
1557 +{
1558 +       /*
1559 +        * possible initialization would be here
1560 +        */
1561 +
1562 +       if (test_opt(sb, EXTENTS))
1563 +               printk("EXT3-fs: file extents enabled\n");
1564 +       spin_lock_init(&EXT3_SB(sb)->s_ext_lock);
1565 +}
1566 +
1567 +/*
1568 + * called at umount time
1569 + */
1570 +void ext3_ext_release(struct super_block *sb)
1571 +{
1572 +       struct ext3_sb_info *sbi = EXT3_SB(sb);
1573 +
1574 +       /* show collected stats */
1575 +       if (sbi->s_ext_count && sbi->s_ext_extents)
1576 +               printk("EXT3-fs: min depth - %d, max depth - %d, "
1577 +                               "ave. depth - %d, ave. blocks/extent - %d\n",
1578 +                               sbi->s_ext_mindepth,
1579 +                               sbi->s_ext_maxdepth,
1580 +                               sbi->s_ext_sum / sbi->s_ext_count,
1581 +                               sbi->s_ext_blocks / sbi->s_ext_extents);
1582 +}
1583 +
1584 --- linux-2.4.20-vanilla/fs/ext3/ialloc.c~ext3-extents-2.4.20   2003-09-15 18:54:58.000000000 +0400
1585 +++ linux-2.4.20-vanilla-alexey/fs/ext3/ialloc.c        2003-09-15 19:31:40.000000000 +0400
1586 @@ -569,6 +569,10 @@ repeat:
1587         inode->u.ext3_i.i_prealloc_count = 0;
1588  #endif
1589         inode->u.ext3_i.i_block_group = i;
1590 +       if (test_opt(sb, EXTENTS))
1591 +               inode->u.ext3_i.i_flags |= EXT3_EXTENTS_FL;
1592 +       inode->u.ext3_i.i_depth = 0;
1593 +       sema_init(&inode->u.ext3_i.i_ext_sem, 1);
1594         
1595         if (inode->u.ext3_i.i_flags & EXT3_SYNC_FL)
1596                 inode->i_flags |= S_SYNC;
1597 --- linux-2.4.20-vanilla/fs/ext3/inode.c~ext3-extents-2.4.20    2003-09-15 18:54:58.000000000 +0400
1598 +++ linux-2.4.20-vanilla-alexey/fs/ext3/inode.c 2003-09-15 19:53:10.000000000 +0400
1599 @@ -848,6 +848,15 @@ changed:
1600         goto reread;
1601  }
1602  
1603 +static inline int
1604 +ext3_get_block_wrap(handle_t *handle, struct inode *inode, long block,
1605 +               struct buffer_head *bh, int create)
1606 +{
1607 +       if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
1608 +               return ext3_ext_get_block(handle, inode, block, bh, create);
1609 +       return ext3_get_block_handle(handle, inode, block, bh, create);
1610 +}
1611 +
1612  /*
1613   * The BKL is not held on entry here.
1614   */
1615 @@ -861,7 +870,7 @@ static int ext3_get_block(struct inode *
1616                 handle = ext3_journal_current_handle();
1617                 J_ASSERT(handle != 0);
1618         }
1619 -       ret = ext3_get_block_handle(handle, inode, iblock, bh_result, create);
1620 +       ret = ext3_get_block_wrap(handle, inode, iblock, bh_result, create);
1621         return ret;
1622  }
1623  
1624 @@ -879,7 +888,7 @@ struct buffer_head *ext3_getblk(handle_t
1625         dummy.b_state = 0;
1626         dummy.b_blocknr = -1000;
1627         buffer_trace_init(&dummy.b_history);
1628 -       *errp = ext3_get_block_handle(handle, inode, block, &dummy, create);
1629 +       *errp = ext3_get_block_wrap(handle, inode, block, &dummy, create);
1630         if (!*errp && buffer_mapped(&dummy)) {
1631                 struct buffer_head *bh;
1632                 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1633 @@ -1403,7 +1412,7 @@ struct address_space_operations ext3_aop
1634   * This required during truncate. We need to physically zero the tail end
1635   * of that block so it doesn't yield old data if the file is later grown.
1636   */
1637 -static int ext3_block_truncate_page(handle_t *handle,
1638 +int ext3_block_truncate_page(handle_t *handle,
1639                 struct address_space *mapping, loff_t from)
1640  {
1641         unsigned long index = from >> PAGE_CACHE_SHIFT;
1642 @@ -1888,6 +1897,9 @@ void ext3_truncate(struct inode * inode)
1643  
1644         ext3_discard_prealloc(inode);
1645  
1646 +       if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
1647 +               return ext3_ext_truncate(inode);
1648 +
1649         handle = start_transaction(inode);
1650         if (IS_ERR(handle))
1651                 return;         /* AKPM: return what? */
1652 @@ -2200,6 +2212,8 @@ void ext3_read_inode(struct inode * inod
1653         inode->u.ext3_i.i_prealloc_count = 0;
1654  #endif
1655         inode->u.ext3_i.i_block_group = iloc.block_group;
1656 +       inode->u.ext3_i.i_depth = raw_inode->osd2.linux2.l_i_depth;
1657 +       sema_init(&inode->u.ext3_i.i_ext_sem, 1);
1658  
1659         /*
1660          * NOTE! The in-memory inode i_data array is in little-endian order
1661 @@ -2321,6 +2335,7 @@ static int ext3_do_update_inode(handle_t
1662                 raw_inode->i_fsize = 0;
1663         }
1664  #endif
1665 +       raw_inode->osd2.linux2.l_i_depth = inode->u.ext3_i.i_depth;
1666         raw_inode->i_file_acl = cpu_to_le32(inode->u.ext3_i.i_file_acl);
1667         if (!S_ISREG(inode->i_mode)) {
1668                 raw_inode->i_dir_acl = cpu_to_le32(inode->u.ext3_i.i_dir_acl);
1669 @@ -2525,6 +2540,9 @@ int ext3_writepage_trans_blocks(struct i
1670         int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
1671         int ret;
1672         
1673 +       if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
1674 +               return ext3_ext_writepage_trans_blocks(inode, bpp);
1675 +
1676         if (ext3_should_journal_data(inode))
1677                 ret = 3 * (bpp + indirects) + 2;
1678         else
1679 @@ -2961,7 +2979,7 @@ int ext3_prep_san_write(struct inode *in
1680  
1681         /* alloc blocks one by one */
1682         for (i = 0; i < nblocks; i++) {
1683 -               ret = ext3_get_block_handle(handle, inode, blocks[i],
1684 +               ret = ext3_get_block_wrap(handle, inode, blocks[i],
1685                                                 &bh_tmp, 1);
1686                 if (ret)
1687                         break;
1688 @@ -3022,7 +3040,7 @@ int ext3_map_inode_page(struct inode *in
1689                  if (blocks[i] != 0)
1690                          continue;
1691  
1692 -                rc = ext3_get_block_handle(handle, inode, iblock, &bh, 1);
1693 +                rc = ext3_get_block_wrap(handle, inode, iblock, &bh, 1);
1694                  if (rc) {
1695                          printk(KERN_INFO "ext3_map_inode_page: error %d "
1696                                 "allocating block %ld\n", rc, iblock);
1697 --- linux-2.4.20-vanilla/fs/ext3/Makefile~ext3-extents-2.4.20   2003-09-15 18:54:58.000000000 +0400
1698 +++ linux-2.4.20-vanilla-alexey/fs/ext3/Makefile        2003-09-15 19:41:08.000000000 +0400
1699 @@ -12,7 +12,8 @@ O_TARGET := ext3.o
1700  export-objs := ext3-exports.o
1701  
1702  obj-y    := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
1703 -               ioctl.o namei.o super.o symlink.o hash.o ext3-exports.o
1704 +               ioctl.o namei.o super.o symlink.o hash.o ext3-exports.o \
1705 +               extents.o
1706  obj-m    := $(O_TARGET)
1707  
1708  export-objs += xattr.o
1709 --- linux-2.4.20-vanilla/fs/ext3/super.c~ext3-extents-2.4.20    2003-09-15 18:54:59.000000000 +0400
1710 +++ linux-2.4.20-vanilla-alexey/fs/ext3/super.c 2003-09-15 19:42:57.000000000 +0400
1711 @@ -619,6 +619,7 @@ void ext3_put_super (struct super_block 
1712         kdev_t j_dev = sbi->s_journal->j_dev;
1713         int i;
1714  
1715 +       ext3_ext_release(sb);
1716         ext3_stop_delete_thread(sbi);
1717         ext3_xattr_put_super(sb);
1718         journal_destroy(sbi->s_journal);
1719 @@ -765,6 +766,10 @@ static int parse_options (char * options
1720                                        "EXT3 Check option not supported\n");
1721  #endif
1722                 }
1723 +               else if (!strcmp (this_char, "extents"))
1724 +                       set_opt (sbi->s_mount_opt, EXTENTS);
1725 +               else if (!strcmp (this_char, "extdebug"))
1726 +                       set_opt (sbi->s_mount_opt, EXTDEBUG);
1727                 else if (!strcmp (this_char, "debug"))
1728                         set_opt (*mount_options, DEBUG);
1729                 else if (!strcmp (this_char, "errors")) {
1730 @@ -1478,6 +1483,7 @@ struct super_block * ext3_read_super (st
1731                 test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ? "journal":
1732                 test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered":
1733                 "writeback");
1734 +       ext3_ext_init(sb);
1735  
1736         return sb;
1737  
1738 --- linux-2.4.20-vanilla/include/linux/ext3_fs.h~ext3-extents-2.4.20    2003-09-15 18:54:58.000000000 +0400
1739 +++ linux-2.4.20-vanilla-alexey/include/linux/ext3_fs.h 2003-09-15 20:15:52.000000000 +0400
1740 @@ -184,6 +184,7 @@ struct ext3_group_desc
1741  #define EXT3_IMAGIC_FL                 0x00002000 /* AFS directory */
1742  #define EXT3_JOURNAL_DATA_FL           0x00004000 /* file data should be journaled */
1743  #define EXT3_RESERVED_FL               0x80000000 /* reserved for ext3 lib */
1744 +#define EXT3_EXTENTS_FL                        0x00080000 /* Inode uses extents */
1745  
1746  #define EXT3_FL_USER_VISIBLE           0x00005FFF /* User visible flags */
1747  #define EXT3_FL_USER_MODIFIABLE                0x000000FF /* User modifiable flags */
1748 @@ -244,7 +245,7 @@ struct ext3_inode {
1749                 struct {
1750                         __u8    l_i_frag;       /* Fragment number */
1751                         __u8    l_i_fsize;      /* Fragment size */
1752 -                       __u16   i_pad1;
1753 +                       __u16   l_i_depth;
1754                         __u16   l_i_uid_high;   /* these 2 fields    */
1755                         __u16   l_i_gid_high;   /* were reserved2[0] */
1756                         __u32   l_i_reserved2;
1757 @@ -325,6 +326,8 @@ struct ext3_inode {
1758  #define EXT3_MOUNT_IOPEN               0x8000  /* Allow access via iopen */
1759  #define EXT3_MOUNT_IOPEN_NOPRIV                0x10000 /* Make iopen world-readable */
1760  #define EXT3_MOUNT_ASYNCDEL            0x20000 /* Delayed deletion */
1761 +#define EXT3_MOUNT_EXTENTS             0x40000 /* Extents support */
1762 +#define EXT3_MOUNT_EXTDEBUG            0x80000 /* Extents debug */
1763  
1764  /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
1765  #ifndef _LINUX_EXT2_FS_H
1766 @@ -702,6 +705,10 @@ extern void ext3_discard_prealloc (struc
1767  extern void ext3_dirty_inode(struct inode *);
1768  extern int ext3_change_inode_journal_flag(struct inode *, int);
1769  extern void ext3_truncate (struct inode *);
1770 +extern int ext3_block_truncate_page(handle_t *, struct address_space *, loff_t);
1771 +extern int ext3_forget(handle_t *handle, int is_metadata,
1772 +                      struct inode *inode, struct buffer_head *bh,
1773 +                      int blocknr);
1774  #ifdef EXT3_DELETE_THREAD
1775  extern void ext3_truncate_thread(struct inode *inode);
1776  #endif
1777 @@ -765,6 +772,13 @@ extern struct inode_operations ext3_spec
1778  extern struct inode_operations ext3_symlink_inode_operations;
1779  extern struct inode_operations ext3_fast_symlink_inode_operations;
1780  
1781 +/* extents.c */
1782 +extern int ext3_ext_writepage_trans_blocks(struct inode *, int);
1783 +extern int ext3_ext_get_block(handle_t *, struct inode *, long,
1784 +                               struct buffer_head *, int);
1785 +extern void ext3_ext_truncate(struct inode *);
1786 +extern void ext3_ext_init(struct super_block *);
1787 +extern void ext3_ext_release(struct super_block *);
1788  
1789  #endif /* __KERNEL__ */
1790  
1791 --- linux-2.4.20-vanilla/include/linux/ext3_fs_i.h~ext3-extents-2.4.20  2003-09-15 10:16:38.000000000 +0400
1792 +++ linux-2.4.20-vanilla-alexey/include/linux/ext3_fs_i.h       2003-09-15 20:14:40.000000000 +0400
1793 @@ -73,6 +73,10 @@ struct ext3_inode_info {
1794          * by other means, so we have truncate_sem.
1795          */
1796         struct rw_semaphore truncate_sem;
1797 +
1798 +       /* extents-related data */
1799 +       struct semaphore i_ext_sem;
1800 +       __u16 i_depth;
1801  };
1802  
1803  #endif /* _LINUX_EXT3_FS_I */
1804 --- linux-2.4.20-vanilla/include/linux/ext3_fs_sb.h~ext3-extents-2.4.20 2003-09-15 18:54:57.000000000 +0400
1805 +++ linux-2.4.20-vanilla-alexey/include/linux/ext3_fs_sb.h      2003-09-15 20:14:40.000000000 +0400
1806 @@ -86,6 +86,16 @@ struct ext3_sb_info {
1807         wait_queue_head_t s_delete_thread_queue;
1808         wait_queue_head_t s_delete_waiter_queue;
1809  #endif
1810 +
1811 +       /* extents */
1812 +       int s_ext_debug;
1813 +       int s_ext_mindepth;
1814 +       int s_ext_maxdepth;
1815 +       int s_ext_sum;
1816 +       int s_ext_count;
1817 +       spinlock_t s_ext_lock;
1818 +       int s_ext_extents;
1819 +       int s_ext_blocks;
1820  };
1821  
1822  #endif /* _LINUX_EXT3_FS_SB */
1823
1824 _