Whamcloud - gitweb
landing b_cmobd_merge on HEAD
[fs/lustre-release.git] / lustre / kernel_patches / patches / ext3-extents-2.4.18-chaos-pdirops.patch
1  fs/ext3/Makefile           |    3 
2  fs/ext3/extents.c          | 1624 +++++++++++++++++++++++++++++++++++++++++++++
3  fs/ext3/ialloc.c           |    4 
4  fs/ext3/inode.c            |   30 
5  fs/ext3/super.c            |    8 
6  include/linux/ext3_fs.h    |   18 
7  include/linux/ext3_fs_i.h  |    4 
8  include/linux/ext3_fs_sb.h |   10 
9  8 files changed, 1693 insertions(+), 8 deletions(-)
10
11 --- /dev/null   2003-01-30 13:24:37.000000000 +0300
12 +++ linux-2.4.18-chaos-pdirops-alexey/fs/ext3/extents.c 2003-09-23 18:09:30.000000000 +0400
13 @@ -0,0 +1,1624 @@
14 +/*
15 + *
16 + * linux/fs/ext3/extents.c
17 + *
18 + * Extents support for EXT3
19 + *
20 + * 07/08/2003    Alex Tomas <bzzz@tmi.comex.ru>
21 + * 
22 + * TODO:
23 + *   - ext3*_error() should be used in some situations
24 + *   - find_goal() [to be tested and improved]
25 + *   - error handling
26 + *   - we could leak allocated block in some error cases
27 + *   - quick search for index/leaf in ext3_ext_find_extent()
28 + *   - tree reduction
29 + *   - cache last found extent
30 + *   - arch-independent
31 + */
32 +
33 +#include <linux/module.h>
34 +#include <linux/fs.h>
35 +#include <linux/time.h>
36 +#include <linux/ext3_jbd.h>
37 +#include <linux/jbd.h>
38 +#include <linux/smp_lock.h>
39 +#include <linux/highuid.h>
40 +#include <linux/pagemap.h>
41 +#include <linux/quotaops.h>
42 +#include <linux/string.h>
43 +#include <linux/slab.h>
44 +#include <linux/locks.h>
45 +
46 +/*
47 + * with AGRESSIVE_TEST defined capacity of index/leaf blocks
48 + * become very little, so index split, in-depth growing and
49 + * other hard changes happens much more often
50 + * this is for debug purposes only
51 + */
52 +#define AGRESSIVE_TEST_
53 +
54 +/*
55 + * if EXT_DEBUG defined you can use 'extdebug' mount option
56 + * to get lots of info what's going on
57 + */
58 +#define EXT_DEBUG
59 +#ifdef EXT_DEBUG
60 +#define ext_debug(inode,fmt,a...)              \
61 +do {                                           \
62 +       if (test_opt((inode)->i_sb, EXTDEBUG))  \
63 +               printk(fmt, ##a);               \
64 +} while (0);
65 +#else
66 +#define ext_debug(inode,fmt,a...)
67 +#endif
68 +
69 +#define EXT3_ALLOC_NEEDED      2       /* block bitmap + group descriptor */
70 +
71 +/*
72 + * ext3_inode has i_block array (total 60 bytes)
73 + * first 4 bytes are used to store:
74 + *  - tree depth (0 mean there is no tree yet. all extents in the inode)
75 + *  - number of alive extents in the inode
76 + */
77 +
78 +/*
79 + * this is extent on-disk structure
80 + * it's used at the bottom of the tree
81 + */
82 +struct ext3_extent {
83 +       __u32   e_block;        /* first logical block extent covers */
84 +       __u32   e_start;        /* first physical block extents lives */
85 +       __u32   e_num;          /* number of blocks covered by extent */
86 +};
87 +
88 +/*
89 + * this is index on-disk structure
90 + * it's used at all the levels, but the bottom
91 + */
92 +struct ext3_extent_idx {
93 +       __u32   e_block;        /* index covers logical blocks from 'block' */
94 +       __u32   e_leaf;         /* pointer to the physical block of the next *
95 +                                * level. leaf or next index could bet here */
96 +};
97 +
98 +/*
99 + * each block (leaves and indexes), even inode-stored has header
100 + */
101 +struct ext3_extent_header {    
102 +       __u16   e_num;          /* number of valid entries */
103 +       __u16   e_max;          /* capacity of store in entries */
104 +};
105 +
106 +/*
107 + * array of ext3_ext_path contains path to some extent
108 + * creation/lookup routines use it for traversal/splitting/etc
109 + * truncate uses it to simulate recursive walking
110 + */
111 +struct ext3_ext_path {
112 +       __u32                           p_block;
113 +       __u16                           p_depth;
114 +       struct ext3_extent              *p_ext;
115 +       struct ext3_extent_idx          *p_idx;
116 +       struct ext3_extent_header       *p_hdr;
117 +       struct buffer_head              *p_bh;
118 +};
119 +
120 +#define EXT_FIRST_EXTENT(__hdr__) \
121 +       ((struct ext3_extent *) (((char *) (__hdr__)) +         \
122 +                                sizeof(struct ext3_extent_header)))
123 +#define EXT_FIRST_INDEX(__hdr__) \
124 +       ((struct ext3_extent_idx *) (((char *) (__hdr__)) +     \
125 +                                    sizeof(struct ext3_extent_header)))
126 +#define EXT_HAS_FREE_INDEX(__path__) \
127 +       ((__path__)->p_hdr->e_num < (__path__)->p_hdr->e_max)
128 +#define EXT_LAST_EXTENT(__hdr__) \
129 +       (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->e_num - 1)
130 +#define EXT_LAST_INDEX(__hdr__) \
131 +       (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->e_num - 1)
132 +#define EXT_MAX_EXTENT(__hdr__) \
133 +       (EXT_FIRST_EXTENT((__hdr__)) + (__hdr__)->e_max - 1)
134 +#define EXT_MAX_INDEX(__hdr__) \
135 +       (EXT_FIRST_INDEX((__hdr__)) + (__hdr__)->e_max - 1)
136 +
137 +
138 +#define EXT_ASSERT(__x__) if (!(__x__)) BUG();
139 +
140 +/*
141 + * could return:
142 + *  - EROFS
143 + *  - ENOMEM
144 + */
145 +static int ext3_ext_get_access(handle_t *handle, struct inode *inode,
146 +                               struct ext3_ext_path *path)
147 +{
148 +       if (path->p_bh) {
149 +               /* path points to block */
150 +               return ext3_journal_get_write_access(handle, path->p_bh);
151 +       }
152 +
153 +       /* path points to leaf/index in inode body */
154 +       return 0;
155 +}
156 +
157 +/*
158 + * could return:
159 + *  - EROFS
160 + *  - ENOMEM
161 + *  - EIO
162 + */
163 +static int ext3_ext_dirty(handle_t *handle, struct inode *inode,
164 +                               struct ext3_ext_path *path)
165 +{
166 +       if (path->p_bh) {
167 +               /* path points to block */
168 +               return ext3_journal_dirty_metadata(handle, path->p_bh);
169 +       }
170 +
171 +       /* path points to leaf/index in inode body */
172 +       return ext3_mark_inode_dirty(handle, inode);
173 +}
174 +
175 +static inline int ext3_ext_space_block(struct inode *inode)
176 +{
177 +       int size;
178 +
179 +       size = (inode->i_sb->s_blocksize - sizeof(struct ext3_extent_header))
180 +               / sizeof(struct ext3_extent);
181 +#ifdef AGRESSIVE_TEST
182 +       size = 6; /* FIXME: for debug, remove this line */
183 +#endif
184 +       return size;
185 +}
186 +
187 +static inline int ext3_ext_space_inode(struct inode *inode)
188 +{
189 +       int size;
190 +
191 +       size = (sizeof(EXT3_I(inode)->i_data) -
192 +                       sizeof(struct ext3_extent_header))
193 +                       / sizeof(struct ext3_extent);
194 +#ifdef AGRESSIVE_TEST
195 +       size = 3; /* FIXME: for debug, remove this line */
196 +#endif
197 +       return size;
198 +}
199 +
200 +static inline int ext3_ext_space_inode_idx(struct inode *inode)
201 +{
202 +       int size;
203 +
204 +       size = (sizeof(EXT3_I(inode)->i_data) -
205 +                       sizeof(struct ext3_extent_header))
206 +                       / sizeof(struct ext3_extent_idx);
207 +#ifdef AGRESSIVE_TEST
208 +       size = 4; /* FIXME: for debug, remove this line */
209 +#endif
210 +       return size;
211 +}
212 +
213 +static void ext3_ext_show_path(struct inode *inode, struct ext3_ext_path *path)
214 +{
215 +       int k, l = path->p_depth;
216 +
217 +       ext_debug(inode, "path:");
218 +       for (k = 0; k <= l; k++, path++) {
219 +               if (path->p_idx) {
220 +                       ext_debug(inode, "  %d->%d", path->p_idx->e_block,
221 +                                       path->p_idx->e_leaf);
222 +               } else if (path->p_ext) {
223 +                       ext_debug(inode, "  %d:%d:%d",
224 +                                       path->p_ext->e_block,
225 +                                       path->p_ext->e_start,
226 +                                       path->p_ext->e_num);
227 +               } else
228 +                       ext_debug(inode, "  []");
229 +       }
230 +       ext_debug(inode, "\n");
231 +}
232 +
233 +static void ext3_ext_show_leaf(struct inode *inode, struct ext3_ext_path *path)
234 +{
235 +       int depth = EXT3_I(inode)->i_depth;
236 +       struct ext3_extent_header *eh = path[depth].p_hdr;
237 +       struct ext3_extent *ex = EXT_FIRST_EXTENT(eh);
238 +       int i;
239 +
240 +       for (i = 0; i < eh->e_num; i++, ex++) {
241 +               ext_debug(inode, "%d:%d:%d ",
242 +                               ex->e_block, ex->e_start, ex->e_num);
243 +       }
244 +       ext_debug(inode, "\n");
245 +}
246 +
247 +static void ext3_ext_drop_refs(struct inode *inode, struct ext3_ext_path *path)
248 +{
249 +       int depth = path->p_depth;
250 +       int i;
251 +
252 +       for (i = 0; i <= depth; i++, path++)
253 +               if (path->p_bh) {
254 +                       brelse(path->p_bh);
255 +                       path->p_bh = NULL;
256 +               }
257 +}
258 +
259 +static int ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path)
260 +{
261 +       struct ext3_inode_info *ei = EXT3_I(inode);
262 +       unsigned long bg_start;
263 +       unsigned long colour;
264 +       int depth;
265 +       
266 +       if (path) {
267 +               depth = path->p_depth;
268 +               /* try to find previous block */
269 +               if (path[depth].p_ext)
270 +                       return path[depth].p_ext->e_start +
271 +                               path[depth].p_ext->e_num - 1;
272 +               
273 +               /* it looks index is empty
274 +                * try to find starting from index itself */
275 +               if (path[depth].p_bh)
276 +                       return path[depth].p_bh->b_blocknr;
277 +       }
278 +
279 +       /* OK. use inode's group */
280 +       bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
281 +               le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
282 +       colour = (current->pid % 16) *
283 +                       (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
284 +       return bg_start + colour;
285 +}
286 +
287 +static struct ext3_ext_path *
288 +ext3_ext_find_extent(struct inode *inode, int block, struct ext3_ext_path *path)
289 +{
290 +       struct ext3_inode_info *ei = EXT3_I(inode);
291 +       struct ext3_extent_header *eh = (void *) ei->i_data;
292 +       struct ext3_extent_idx *ix;
293 +       struct buffer_head *bh;
294 +       struct ext3_extent *ex;
295 +       int depth, i, k, ppos = 0, prev = 0;
296 +       
297 +       eh = (struct ext3_extent_header *) ei->i_data;
298 +
299 +       /* initialize capacity of leaf in inode for first time */
300 +       if (eh->e_max == 0)
301 +               eh->e_max = ext3_ext_space_inode(inode);
302 +       i = depth = ei->i_depth;
303 +       EXT_ASSERT(i == 0 || eh->e_num > 0);
304 +       
305 +       /* account possible depth increase */
306 +       if (!path) {
307 +               path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 2),
308 +                               GFP_NOFS);
309 +               if (!path)
310 +                       return ERR_PTR(-ENOMEM);
311 +       }
312 +       memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
313 +
314 +       /* walk through the tree */
315 +       while (i) {
316 +               ext_debug(inode, "depth %d: num %d, max %d\n",
317 +                               ppos, eh->e_num, eh->e_max);
318 +               ix = EXT_FIRST_INDEX(eh);
319 +               if (eh->e_num) {
320 +                       EXT_ASSERT(prev == 0 || ix->e_block == prev);
321 +                       path[ppos].p_idx = ix;
322 +               }
323 +               EXT_ASSERT(eh->e_num <= eh->e_max);
324 +               for (k = 0; k < eh->e_num; k++, ix++) {
325 +                       ext_debug(inode, "index: %d -> %d\n",
326 +                                       ix->e_block, ix->e_leaf);
327 +                       EXT_ASSERT((k == 0 && prev <= (int)ix->e_block) ||
328 +                                       (k > 0 && prev < (int)ix->e_block));
329 +                       if (block < ix->e_block)
330 +                               break;
331 +                       prev = ix->e_block;
332 +                       path[ppos].p_idx = ix;
333 +               }
334 +               path[ppos].p_block = path[ppos].p_idx->e_leaf;
335 +               path[ppos].p_depth = i;
336 +               path[ppos].p_hdr = eh;
337 +               path[ppos].p_ext = NULL;
338 +
339 +               bh = sb_bread(inode->i_sb, path[ppos].p_block);
340 +               if (!bh) {
341 +                       ext3_ext_drop_refs(inode, path);
342 +                       kfree(path);
343 +                       return ERR_PTR(-EIO);
344 +               }
345 +               eh = (struct ext3_extent_header *) bh->b_data;
346 +               ppos++;
347 +               EXT_ASSERT(ppos <= depth);
348 +               path[ppos].p_bh = bh;
349 +               i--;
350 +       }
351 +
352 +       path[ppos].p_depth = i;
353 +       path[ppos].p_hdr = eh;
354 +       path[ppos].p_ext = NULL;
355 +       
356 +       /* find extent */
357 +       ex = EXT_FIRST_EXTENT(eh);
358 +       if (eh->e_num)
359 +               path[ppos].p_ext = ex;
360 +       EXT_ASSERT(eh->e_num <= eh->e_max);
361 +       for (k = 0; k < eh->e_num; k++, ex++) {
362 +               EXT_ASSERT(ex->e_num < EXT3_BLOCKS_PER_GROUP(inode->i_sb));
363 +               EXT_ASSERT((k == 0 && prev <= (int)ex->e_block) ||
364 +                               (k > 0 && prev < (int)ex->e_block));
365 +               if (block < ex->e_block) 
366 +                       break;
367 +               prev = ex->e_block;
368 +               path[ppos].p_ext = ex;
369 +       }
370 +
371 +       ext3_ext_show_path(inode, path);
372 +
373 +       return path;
374 +}
375 +
376 +static void ext3_ext_check_boundary(struct inode *inode,
377 +                                       struct ext3_ext_path *curp,
378 +                                       void *addr, int len)
379 +{
380 +       void *end;
381 +
382 +       if (!len)
383 +               return;
384 +       if (curp->p_bh)
385 +               end = (void *) curp->p_hdr + inode->i_sb->s_blocksize;
386 +       else
387 +               end = (void *) curp->p_hdr + sizeof(EXT3_I(inode)->i_data);
388 +       if (((unsigned long) addr) + len > (unsigned long) end) {
389 +               printk("overflow! 0x%p > 0x%p\n", addr + len, end);
390 +               BUG();
391 +       }
392 +       if ((unsigned long) addr < (unsigned long) curp->p_hdr) {
393 +               printk("underflow! 0x%p < 0x%p\n", addr, curp->p_hdr);
394 +               BUG();
395 +       }
396 +}
397 +
398 +/*
399 + * insert new index [logical;ptr] into the block at cupr
400 + * it check where to insert: before curp or after curp
401 + */
402 +static int ext3_ext_insert_index(handle_t *handle, struct inode *inode,
403 +                               struct ext3_ext_path *curp, int logical,
404 +                               int ptr)
405 +{
406 +       struct ext3_extent_idx *ix;
407 +       int len, err;
408 +
409 +       if ((err = ext3_ext_get_access(handle, inode, curp)))
410 +               return err;
411 +
412 +       EXT_ASSERT(logical != curp->p_idx->e_block);
413 +       len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
414 +       if (logical > curp->p_idx->e_block) {
415 +               /* insert after */
416 +               len = (len - 1) * sizeof(struct ext3_extent_idx);
417 +               len = len < 0 ? 0 : len;
418 +               ext_debug(inode, "insert new index %d after: %d. "
419 +                               "move %d from 0x%p to 0x%p\n",
420 +                               logical, ptr, len,
421 +                               (curp->p_idx + 1), (curp->p_idx + 2));
422 +
423 +               ext3_ext_check_boundary(inode, curp, curp->p_idx + 2, len);
424 +               memmove(curp->p_idx + 2, curp->p_idx + 1, len);
425 +               ix = curp->p_idx + 1;
426 +       } else {
427 +               /* insert before */
428 +               len = len * sizeof(struct ext3_extent_idx);
429 +               len = len < 0 ? 0 : len;
430 +               ext_debug(inode, "insert new index %d before: %d. "
431 +                               "move %d from 0x%p to 0x%p\n",
432 +                               logical, ptr, len,
433 +                               curp->p_idx, (curp->p_idx + 1));
434 +
435 +               ext3_ext_check_boundary(inode, curp, curp->p_idx + 1, len);
436 +               memmove(curp->p_idx + 1, curp->p_idx, len);
437 +               ix = curp->p_idx;
438 +       }
439 +
440 +       ix->e_block = logical;
441 +       ix->e_leaf = ptr;
442 +       curp->p_hdr->e_num++;
443 +
444 +       err = ext3_ext_dirty(handle, inode, curp);
445 +       ext3_std_error(inode->i_sb, err);
446 +
447 +       return err;
448 +}
449 +
450 +/*
451 + * routine inserts new subtree into the path, using free index entry
452 + * at depth 'at:
453 + *  - allocates all needed blocks (new leaf and all intermediate index blocks)
454 + *  - makes decision where to split
455 + *  - moves remaining extens and index entries (right to the split point)
456 + *    into the newly allocated blocks
457 + *  - initialize subtree
458 + */
459 +static int ext3_ext_split(handle_t *handle, struct inode *inode,
460 +                               struct ext3_ext_path *path,
461 +                               struct ext3_extent *newext, int at)
462 +{
463 +       struct buffer_head *bh = NULL;
464 +       int depth = EXT3_I(inode)->i_depth;
465 +       struct ext3_extent_header *neh;
466 +       struct ext3_extent_idx *fidx;
467 +       struct ext3_extent *ex;
468 +       int i = at, k, m, a;
469 +       long newblock, oldblock, border;
470 +       int *ablocks = NULL; /* array of allocated blocks */
471 +       int err = 0;
472 +
473 +       /* make decision: where to split? */
474 +       /* FIXME: now desicion is simplest: at current extent */
475 +
476 +       /* if current leaf will be splitted, then we should use 
477 +        * border from split point */
478 +       
479 +       if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
480 +               border = path[depth].p_ext[1].e_block;
481 +               ext_debug(inode, "leaf will be splitted."
482 +                               " next leaf starts at %d\n",
483 +                               (int)border);
484 +       } else {
485 +               border = newext->e_block;
486 +               ext_debug(inode, "leaf will be added."
487 +                               " next leaf starts at %d\n",
488 +                               (int)border);
489 +       }
490 +
491 +       /* 
492 +        * if error occurs, then we break processing
493 +        * and turn filesystem read-only. so, index won't
494 +        * be inserted and tree will be in consistent
495 +        * state. next mount will repair buffers too
496 +        */
497 +
498 +       /*
499 +        * get array to track all allocated blocks
500 +        * we need this to handle errors and free blocks
501 +        * upon them
502 +        */
503 +       ablocks = kmalloc(sizeof(long) * depth, GFP_NOFS);
504 +       if (!ablocks)
505 +               return -ENOMEM;
506 +       memset(ablocks, 0, sizeof(long) * depth);
507 +
508 +       /* allocate all needed blocks */
509 +       ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at);
510 +       newblock = 0; /* FIXME: something more sophisticated needed here */ 
511 +       for (a = 0; newext->e_num > 0 && a < depth - at; a++) {
512 +               newblock = ablocks[a] = newext->e_start++;
513 +               newext->e_num--;
514 +       }
515 +       for (; a < depth - at; a++) {
516 +               newblock = ext3_new_block(handle, inode,
517 +                                               newblock + 1, 0, 0, &err);
518 +               if (newblock == 0)
519 +                       goto cleanup;
520 +               ablocks[a] = newblock;
521 +       }
522 +
523 +       /* initialize new leaf */
524 +       newblock = ablocks[--a];
525 +       EXT_ASSERT(newblock);
526 +       bh = sb_getblk(inode->i_sb, newblock);
527 +       if (!bh) {
528 +               err = -EIO;
529 +               goto cleanup;
530 +       }
531 +       lock_buffer(bh);
532 +
533 +       if ((err = ext3_journal_get_create_access(handle, bh)))
534 +               goto cleanup;
535 +
536 +       neh = (struct ext3_extent_header *) bh->b_data;
537 +       neh->e_num = 0;
538 +       neh->e_max = ext3_ext_space_block(inode);
539 +       ex = EXT_FIRST_EXTENT(neh);
540 +
541 +       /* move remain of path[depth] to the new leaf */
542 +       EXT_ASSERT(path[depth].p_hdr->e_num ==
543 +                       path[depth].p_hdr->e_max);
544 +       /* start copy from next extent */
545 +       /* TODO: we could do it by single memmove */
546 +       m = 0;
547 +       path[depth].p_ext++;
548 +       while (path[depth].p_ext <=
549 +                       EXT_MAX_EXTENT(path[depth].p_hdr)) {
550 +               ext_debug(inode, "move %d:%d:%d in new leaf\n",
551 +                               path[depth].p_ext->e_block,
552 +                               path[depth].p_ext->e_start,
553 +                               path[depth].p_ext->e_num);
554 +               memmove(ex++, path[depth].p_ext++,
555 +                               sizeof(struct ext3_extent));
556 +               neh->e_num++;
557 +               m++;
558 +       }
559 +       mark_buffer_uptodate(bh, 1);
560 +       unlock_buffer(bh);
561 +
562 +       if ((err = ext3_journal_dirty_metadata(handle, bh)))
563 +               goto cleanup;   
564 +       brelse(bh);
565 +       bh = NULL;
566 +
567 +       /* correct old leaf */
568 +       if (m) {
569 +               if ((err = ext3_ext_get_access(handle, inode, path)))
570 +                       goto cleanup;
571 +               path[depth].p_hdr->e_num -= m;
572 +               if ((err = ext3_ext_dirty(handle, inode, path)))
573 +                       goto cleanup;
574 +               
575 +       }
576 +
577 +       /* create intermediate indexes */
578 +       k = depth - at - 1;
579 +       EXT_ASSERT(k >= 0);
580 +       if (k)
581 +               ext_debug(inode,
582 +                               "create %d intermediate indices\n", k);
583 +       /* insert new index into current index block */
584 +       /* current depth stored in i var */
585 +       i = depth - 1;
586 +       while (k--) {
587 +               oldblock = newblock;
588 +               newblock = ablocks[--a];
589 +               bh = sb_getblk(inode->i_sb, newblock);
590 +               if (!bh) {
591 +                       err = -EIO;
592 +                       goto cleanup;
593 +               }
594 +               lock_buffer(bh);
595 +
596 +               if ((err = ext3_journal_get_create_access(handle, bh)))
597 +                       goto cleanup;
598 +
599 +               neh = (struct ext3_extent_header *) bh->b_data;
600 +               neh->e_num = 1;
601 +               neh->e_max = ext3_ext_space_block(inode);
602 +               fidx = EXT_FIRST_INDEX(neh);
603 +               fidx->e_block = border;
604 +               fidx->e_leaf = oldblock;
605 +
606 +               ext_debug(inode,
607 +                               "int.index at %d (block %u): %d -> %d\n",
608 +                               i, (unsigned) newblock,
609 +                               (int) border,
610 +                               (int) oldblock);
611 +               /* copy indexes */
612 +               m = 0;
613 +               path[i].p_idx++;
614 +               ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx,
615 +                               EXT_MAX_INDEX(path[i].p_hdr));
616 +               EXT_ASSERT(EXT_MAX_INDEX(path[i].p_hdr) ==
617 +                               EXT_LAST_INDEX(path[i].p_hdr));
618 +               while (path[i].p_idx <=
619 +                               EXT_MAX_INDEX(path[i].p_hdr)) {
620 +                       ext_debug(inode, "%d: move %d:%d in new index\n",
621 +                                       i, path[i].p_idx->e_block,
622 +                                       path[i].p_idx->e_leaf);
623 +                       memmove(++fidx, path[i].p_idx++,
624 +                                       sizeof(struct ext3_extent_idx));
625 +                       neh->e_num++;
626 +                       m++;
627 +               }
628 +
629 +               mark_buffer_uptodate(bh, 1);
630 +               unlock_buffer(bh);
631 +
632 +               if ((err = ext3_journal_dirty_metadata(handle, bh)))
633 +                       goto cleanup;
634 +               brelse(bh);
635 +               bh = NULL;
636 +
637 +               /* correct old index */
638 +               if (m) {
639 +                       err = ext3_ext_get_access(handle,inode,path+i);
640 +                       if (err)
641 +                               goto cleanup;
642 +                       path[i].p_hdr->e_num -= m;
643 +                       err = ext3_ext_dirty(handle, inode, path + i);
644 +                       if (err)
645 +                               goto cleanup;
646 +               }
647 +
648 +               i--;
649 +       }
650 +
651 +       /* insert new index */
652 +       if (!err)
653 +               err = ext3_ext_insert_index(handle, inode, path + at,
654 +                                               border, newblock);
655 +
656 +cleanup:
657 +       if (bh) {
658 +               if (buffer_locked(bh))
659 +                       unlock_buffer(bh);
660 +               brelse(bh);
661 +       }
662 +
663 +       if (err) {
664 +               /* free all allocated blocks in error case */
665 +               for (i = 0; i < depth; i++)
666 +                       if (!ablocks[i])
667 +                               continue;
668 +                       ext3_free_blocks(handle, inode, ablocks[i], 1);
669 +       }
670 +       kfree(ablocks);
671 +
672 +       return err;
673 +}
674 +
675 +/*
676 + * routine implements tree growing procedure:
677 + *  - allocates new block
678 + *  - moves top-level data (index block or leaf) into the new block
679 + *  - initialize new top-level, creating index that points to the
680 + *    just created block
681 + */
682 +static int ext3_ext_grow_indepth(handle_t *handle, struct inode *inode,
683 +                                       struct ext3_ext_path *path,
684 +                                       struct ext3_extent *newext)
685 +{
686 +       struct buffer_head *bh;
687 +       struct ext3_ext_path *curp = path;
688 +       struct ext3_extent_header *neh;
689 +       struct ext3_extent_idx *fidx;
690 +       int len, err = 0;
691 +       long newblock;
692 +
693 +       /*
694 +        * use already allocated by the called block for new root block
695 +        */
696 +       newblock = newext->e_start++;
697 +       if (newext->e_num == 0) {
698 +               /* 
699 +                * FIXME: if this may happen, then we have to handle
700 +                * possible error and free allocated block
701 +                */
702 +               printk("grow_indepth with zero blocks\n");
703 +               newblock = ext3_new_block(handle, inode,
704 +                                               newblock, 0, 0, &err);
705 +       } else
706 +               newext->e_num--;
707 +       
708 +       bh = sb_getblk(inode->i_sb, newblock);
709 +       if (!bh) {
710 +               err = -EIO;
711 +               ext3_std_error(inode->i_sb, err);
712 +               return err;
713 +       }
714 +       lock_buffer(bh);
715 +
716 +       if ((err = ext3_journal_get_create_access(handle, bh))) {
717 +               unlock_buffer(bh);
718 +               goto out;       
719 +       }
720 +
721 +       /* move top-level index/leaf into new block */
722 +       len = sizeof(struct ext3_extent_header) +
723 +               sizeof(struct ext3_extent) * curp->p_hdr->e_max;
724 +       EXT_ASSERT(len >= 0 && len < 4096);
725 +       memmove(bh->b_data, curp->p_hdr, len);
726 +
727 +       /* set size of new block */
728 +       neh = (struct ext3_extent_header *) bh->b_data;
729 +       neh->e_max = ext3_ext_space_block(inode);
730 +       mark_buffer_uptodate(bh, 1);
731 +       unlock_buffer(bh);
732 +
733 +       if ((err = ext3_journal_dirty_metadata(handle, bh)))
734 +               goto out;
735 +
736 +       /* create index in new top-level index: num,max,pointer */
737 +       if ((err = ext3_ext_get_access(handle, inode, curp)))
738 +               goto out;
739 +
740 +       curp->p_hdr->e_max = ext3_ext_space_inode_idx(inode);
741 +       curp->p_hdr->e_num = 1;
742 +       curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
743 +       curp->p_idx->e_block = EXT_FIRST_EXTENT(path[0].p_hdr)->e_block;
744 +       curp->p_idx->e_leaf = newblock;
745 +
746 +       neh = (struct ext3_extent_header *) EXT3_I(inode)->i_data;
747 +       fidx = EXT_FIRST_INDEX(neh);
748 +       ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %d\n",
749 +                       neh->e_num, neh->e_max, fidx->e_block, fidx->e_leaf); 
750 +
751 +       EXT3_I(inode)->i_depth++;
752 +       err = ext3_ext_dirty(handle, inode, curp);
753 +out:
754 +       brelse(bh);
755 +
756 +       return err;
757 +}
758 +
759 +/*
760 + * routine finds empty index and adds new leaf. if no free index found
761 + * then it requests in-depth growing
762 + */
763 +static int ext3_ext_create_new_leaf(handle_t *handle, struct inode *inode,
764 +                                       struct ext3_ext_path *path,
765 +                                       struct ext3_extent *newext)
766 +{
767 +       long newblock = newext->e_start;
768 +       struct ext3_ext_path *curp;
769 +       int depth, i, err = 0;
770 +
771 +repeat:
772 +       i = depth = EXT3_I(inode)->i_depth;
773 +       
774 +       /* walk up to the tree and look for free index entry */
775 +       curp = path + depth;
776 +       while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
777 +               i--;
778 +               curp--;
779 +       }
780 +
781 +       /* we use already allocated block for index block
782 +        * so, subsequent data blocks should be contigoues */
783 +       if (EXT_HAS_FREE_INDEX(curp)) {
784 +               /* if we found index with free entry, then use that
785 +                * entry: create all needed subtree and add new leaf */
786 +               err = ext3_ext_split(handle, inode, path, newext, i);
787 +
788 +               /* refill path */
789 +               ext3_ext_drop_refs(inode, path);
790 +               path = ext3_ext_find_extent(inode, newext->e_block, path);
791 +               if (IS_ERR(path))
792 +                       err = PTR_ERR(path);
793 +       } else {
794 +               /* tree is full, time to grow in depth */
795 +               err = ext3_ext_grow_indepth(handle, inode, path, newext);
796 +
797 +               /* refill path */
798 +               ext3_ext_drop_refs(inode, path);
799 +               path = ext3_ext_find_extent(inode, newext->e_block, path);
800 +               if (IS_ERR(path))
801 +                       err = PTR_ERR(path);
802 +       
803 +               /*
804 +                * only first (depth 0 -> 1) produces free space
805 +                * in all other cases we have to split growed tree
806 +                */
807 +               depth = EXT3_I(inode)->i_depth;
808 +               if (path[depth].p_hdr->e_num == path[depth].p_hdr->e_max) {
809 +                       /* now we need split */
810 +                       goto repeat;
811 +               }
812 +       }
813 +
814 +       if (err)
815 +               return err;
816 +
817 +       /*
818 +        * probably we've used some blocks from extent
819 +        * let's allocate new block for it
820 +        */
821 +       if (newext->e_num == 0 && !err) {
822 +               newext->e_start =
823 +                       ext3_new_block(handle, inode, newblock,
824 +                                       0, 0, &err);
825 +               if (newext->e_start != 0)
826 +                       newext->e_num = 1;
827 +       }
828 +
829 +       return 0;
830 +}
831 +
832 +/*
833 + * returns next allocated block or 0xffffffff
834 + * NOTE: it consider block number from index entry as
835 + * allocated block. thus, index entries have to be consistent
836 + * with leafs
837 + */
838 +static inline unsigned ext3_ext_next_allocated_block(struct inode *inode,
839 +                                               struct ext3_ext_path *path)
840 +{
841 +       int depth;
842 +
843 +       EXT_ASSERT(path != NULL);
844 +       depth = path->p_depth;
845 +
846 +       if (depth == 0 && path->p_ext == NULL)
847 +               return 0xffffffff;
848 +
849 +       /* FIXME: what if index isn't full ?! */
850 +       while (depth >= 0) {
851 +               if (depth == path->p_depth) {
852 +                       /* leaf */
853 +                       if (path[depth].p_ext !=
854 +                                       EXT_LAST_EXTENT(path[depth].p_hdr))
855 +                               return path[depth].p_ext[1].e_block;
856 +               } else {
857 +                       /* index */
858 +                       if (path[depth].p_idx !=
859 +                                       EXT_LAST_INDEX(path[depth].p_hdr))
860 +                               return path[depth].p_idx[1].e_block;
861 +               }
862 +               depth--;        
863 +       }
864 +
865 +       return 0xffffffff;
866 +}
867 +
868 +/*
869 + * returns first allocated block from next leaf or 0xffffffff
870 + */
871 +static unsigned ext3_ext_next_leaf_block(struct inode *inode,
872 +                                               struct ext3_ext_path *path)
873 +{
874 +       int depth;
875 +
876 +       EXT_ASSERT(path != NULL);
877 +       depth = path->p_depth;
878 +
879 +       /* zero-tree has no leaf blocks at all */
880 +       if (depth == 0)
881 +               return 0xffffffff;
882 +
883 +       /* go to index block */
884 +       depth--;
885 +       
886 +       while (depth >= 0) {
887 +               if (path[depth].p_idx !=
888 +                               EXT_LAST_INDEX(path[depth].p_hdr))
889 +                       return path[depth].p_idx[1].e_block;
890 +               depth--;        
891 +       }
892 +
893 +       return 0xffffffff;
894 +}
895 +
896 +/*
897 + * if leaf gets modified and modified extent is first in the leaf
898 + * then we have to correct all indexes above
899 + * TODO: do we need to correct tree in all cases?
900 + */
901 +int ext3_ext_correct_indexes(handle_t *handle, struct inode *inode,
902 +                               struct ext3_ext_path *path)
903 +{
904 +       int depth = EXT3_I(inode)->i_depth;     
905 +       struct ext3_extent_header *eh;
906 +       struct ext3_extent *ex;
907 +       long border;
908 +       int k, err = 0;
909 +       
910 +       eh = path[depth].p_hdr;
911 +       ex = path[depth].p_ext;
912 +
913 +       EXT_ASSERT(ex);
914 +       EXT_ASSERT(eh);
915 +       
916 +       if (depth == 0) {
917 +               /* there is no tree at all */
918 +               return 0;
919 +       }
920 +       
921 +       if (ex != EXT_FIRST_EXTENT(eh)) {
922 +               /* we correct tree if first leaf got modified only */
923 +               return 0;
924 +       }
925 +       
926 +       /*
927 +        * TODO: we need correction if border is smaller then current one
928 +        */
929 +       k = depth - 1;
930 +       border = path[depth].p_ext->e_block;
931 +       if ((err = ext3_ext_get_access(handle, inode, path + k)))
932 +               return err;
933 +       path[k].p_idx->e_block = border;
934 +       if ((err = ext3_ext_dirty(handle, inode, path + k)))
935 +               return err;
936 +
937 +       while (k--) {
938 +               /* change all left-side indexes */
939 +               if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
940 +                       break;
941 +               if ((err = ext3_ext_get_access(handle, inode, path + k)))
942 +                       break;
943 +               path[k].p_idx->e_block = border;
944 +               if ((err = ext3_ext_dirty(handle, inode, path + k)))
945 +                       break;
946 +       }
947 +
948 +       return err;
949 +}
950 +
951 +/*
952 + * this routine tries to merge requsted extent into the existing
953 + * extent or inserts requested extent as new one into the tree,
954 + * creating new leaf in no-space case
955 + */
956 +int ext3_ext_insert_extent(handle_t *handle, struct inode *inode,
957 +                               struct ext3_ext_path *path,
958 +                               struct ext3_extent *newext)
959 +{
960 +       int depth, len;
961 +       struct ext3_extent_header * eh;
962 +       struct ext3_extent *ex;
963 +       struct ext3_extent *nearex; /* nearest extent */
964 +       struct ext3_ext_path *npath = NULL;
965 +       int err;
966 +
967 +       depth = EXT3_I(inode)->i_depth; 
968 +       if ((ex = path[depth].p_ext)) {
969 +               /* try to insert block into found extent and return */
970 +               if (ex->e_block + ex->e_num == newext->e_block &&
971 +                               ex->e_start + ex->e_num == newext->e_start) {
972 +#ifdef AGRESSIVE_TEST
973 +                       if (ex->e_num >= 2)
974 +                               goto repeat;
975 +#endif
976 +                       if ((err = ext3_ext_get_access(handle, inode,
977 +                                                       path + depth)))
978 +                               return err;
979 +                       ext_debug(inode, "append %d block to %d:%d (from %d)\n",
980 +                                       newext->e_num, ex->e_block, ex->e_num,
981 +                                       ex->e_start);
982 +                       ex->e_num += newext->e_num;
983 +                       err = ext3_ext_dirty(handle, inode, path + depth);
984 +                       return err;
985 +               }
986 +       }
987 +
988 +repeat:
989 +       depth = EXT3_I(inode)->i_depth; 
990 +       eh = path[depth].p_hdr;
991 +       if (eh->e_num == eh->e_max) {
992 +               /* probably next leaf has space for us? */
993 +               int next = ext3_ext_next_leaf_block(inode, path);
994 +               if (next != 0xffffffff) {
995 +                       ext_debug(inode, "next leaf block - %d\n", next);
996 +                       EXT_ASSERT(!npath);
997 +                       npath = ext3_ext_find_extent(inode, next, NULL);
998 +                       if (IS_ERR(npath))
999 +                               return PTR_ERR(npath);
1000 +                       EXT_ASSERT(npath->p_depth == path->p_depth);
1001 +                       eh = npath[depth].p_hdr;
1002 +                       if (eh->e_num < eh->e_max) {
1003 +                               ext_debug(inode,
1004 +                                               "next leaf has free ext(%d)\n",
1005 +                                               eh->e_num);
1006 +                               path = npath;
1007 +                               goto repeat;
1008 +                       }
1009 +                       ext_debug(inode, "next leaf hasno free space(%d,%d)\n",
1010 +                                       eh->e_num, eh->e_max);
1011 +               }
1012 +               /*
1013 +                * there is no free space in found leaf
1014 +                * we're gonna add new leaf in the tree
1015 +                */
1016 +               err = ext3_ext_create_new_leaf(handle, inode, path, newext);
1017 +               if (err)
1018 +                       goto cleanup;
1019 +               goto repeat;
1020 +       }
1021 +
1022 +       nearex = path[depth].p_ext;
1023 +
1024 +       if ((err = ext3_ext_get_access(handle, inode, path + depth)))
1025 +               goto cleanup;
1026 +
1027 +       if (!nearex) {
1028 +               /* there is no extent in this leaf, create first one */
1029 +               ext_debug(inode, "first extent in the leaf: %d:%d:%d\n",
1030 +                               newext->e_block, newext->e_start,
1031 +                               newext->e_num);
1032 +               eh->e_num++;
1033 +               path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1034 +
1035 +       } else if (newext->e_block > nearex->e_block) {
1036 +               EXT_ASSERT(newext->e_block != nearex->e_block);
1037 +               len = EXT_MAX_EXTENT(eh) - nearex;
1038 +               len = (len - 1) * sizeof(struct ext3_extent);
1039 +               len = len < 0 ? 0 : len;
1040 +               ext_debug(inode, "insert %d:%d:%d after: nearest 0x%p, "
1041 +                               "move %d from 0x%p to 0x%p\n",
1042 +                               newext->e_block, newext->e_start, newext->e_num,
1043 +                               nearex, len, nearex + 1, nearex + 2);
1044 +               ext3_ext_check_boundary(inode, path + depth, nearex + 2, len);
1045 +               memmove(nearex + 2, nearex + 1, len);
1046 +               path[depth].p_ext = nearex + 1;
1047 +               eh->e_num++;
1048 +       } else {
1049 +               EXT_ASSERT(newext->e_block != nearex->e_block);
1050 +               len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext3_extent);
1051 +               len = len < 0 ? 0 : len;
1052 +               ext_debug(inode, "insert %d:%d:%d before: nearest 0x%p, "
1053 +                               "move %d from 0x%p to 0x%p\n",
1054 +                               newext->e_block, newext->e_start, newext->e_num,
1055 +                               nearex, len, nearex + 1, nearex + 2);
1056 +               
1057 +               memmove(nearex + 1, nearex, len);
1058 +               path[depth].p_ext = nearex;
1059 +       }
1060 +
1061 +       if (!err) {
1062 +               eh->e_num++;
1063 +               nearex = path[depth].p_ext;
1064 +               nearex->e_block = newext->e_block;
1065 +               nearex->e_start = newext->e_start;
1066 +               nearex->e_num = newext->e_num;
1067 +               EXT_ASSERT(nearex->e_num < EXT3_BLOCKS_PER_GROUP(inode->i_sb) &&
1068 +                               nearex->e_num > 0);
1069 +
1070 +               /* time to correct all indexes above */
1071 +               err = ext3_ext_correct_indexes(handle, inode, path);
1072 +       }
1073 +
1074 +       err = ext3_ext_dirty(handle, inode, path + depth);
1075 +
1076 +cleanup:
1077 +       if (npath) {
1078 +               ext3_ext_drop_refs(inode, npath);
1079 +               kfree(npath);
1080 +       }
1081 +               
1082 +       return err;
1083 +}
1084 +
1085 +int ext3_ext_get_block(handle_t *handle, struct inode *inode, long iblock,
1086 +                       struct buffer_head *bh_result, int create,
1087 +                       int extend_disksize)
1088 +{
1089 +       struct ext3_ext_path *path;
1090 +       int depth = EXT3_I(inode)->i_depth;
1091 +       struct ext3_extent newex;
1092 +       struct ext3_extent *ex;
1093 +       int goal, newblock, err = 0;
1094 +
1095 +       ext_debug(inode, "block %d requested for inode %u, bh_result 0x%p\n",
1096 +                       (int) iblock, (unsigned) inode->i_ino, bh_result);
1097 +       bh_result->b_state &= ~(1UL << BH_New);
1098 +
1099 +       down(&EXT3_I(inode)->i_ext_sem);
1100 +
1101 +       /* find extent for this block */
1102 +       path = ext3_ext_find_extent(inode, iblock, NULL);
1103 +       if (IS_ERR(path)) {
1104 +               err = PTR_ERR(path);
1105 +               goto out2;
1106 +       }
1107 +
1108 +       if ((ex = path[depth].p_ext)) {
1109 +               /* if found exent covers block, simple return it */
1110 +               if (iblock >= ex->e_block && iblock < ex->e_block + ex->e_num) {
1111 +                       newblock = iblock - ex->e_block + ex->e_start;
1112 +                       ext_debug(inode, "%d fit into %d:%d -> %d\n",
1113 +                                       (int) iblock, ex->e_block, ex->e_num,
1114 +                                       newblock);
1115 +                       goto out;
1116 +               }
1117 +       }
1118 +
1119 +       /*
1120 +        * we couldn't try to create block if create flag is zero 
1121 +        */
1122 +       if (!create) 
1123 +               goto out2;
1124 +
1125 +       /* allocate new block */
1126 +       goal = ext3_ext_find_goal(inode, path);
1127 +       newblock = ext3_new_block(handle, inode, goal, 0, 0, &err);
1128 +       if (!newblock)
1129 +               goto out2;
1130 +       ext_debug(inode, "allocate new block: goal %d, found %d\n",
1131 +                       goal, newblock);
1132 +
1133 +       /* try to insert new extent into found leaf and return */
1134 +       newex.e_block = iblock;
1135 +       newex.e_start = newblock;
1136 +       newex.e_num = 1;
1137 +       err = ext3_ext_insert_extent(handle, inode, path, &newex);
1138 +       if (err)
1139 +               goto out2;
1140 +       
1141 +       /* previous routine could use block we allocated */
1142 +       newblock = newex.e_start;
1143 +       bh_result->b_state |= (1UL << BH_New);
1144 +
1145 +out:
1146 +       ext3_ext_show_leaf(inode, path);
1147 +       bh_result->b_dev = inode->i_dev;
1148 +       bh_result->b_blocknr = newblock;
1149 +       bh_result->b_state |= (1UL << BH_Mapped);
1150 +out2:
1151 +       ext3_ext_drop_refs(inode, path);
1152 +       kfree(path);
1153 +       up(&EXT3_I(inode)->i_ext_sem);
1154 +
1155 +       return err;     
1156 +}
1157 +
1158 +/*
1159 + * returns 1 if current index have to be freed (even partial)
1160 + */
1161 +static int ext3_ext_more_to_truncate(struct inode *inode,
1162 +                               struct ext3_ext_path *path)
1163 +{
1164 +       EXT_ASSERT(path->p_idx);
1165 +
1166 +       if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
1167 +               return 0;
1168 +
1169 +       /*
1170 +        * if truncate on deeper level happened it it wasn't partial
1171 +        * so we have to consider current index for truncation
1172 +        */
1173 +       if (path->p_hdr->e_num == path->p_block)
1174 +               return 0;
1175 +
1176 +       /*
1177 +        * put actual number of indexes to know is this number got
1178 +        * changed at the next iteration
1179 +        */
1180 +       path->p_block = path->p_hdr->e_num;
1181 +       
1182 +       return 1;
1183 +}
1184 +
1185 +/*
1186 + * routine removes index from the index block
1187 + * it's used in truncate case only. thus all requests are for
1188 + * last index in the block only
1189 + */
1190 +int ext3_ext_remove_index(handle_t *handle, struct inode *inode,
1191 +                                       struct ext3_ext_path *path)
1192 +{
1193 +       struct buffer_head *bh;
1194 +       int err;
1195 +       
1196 +       /* free index block */
1197 +       path--;
1198 +       EXT_ASSERT(path->p_hdr->e_num);
1199 +       if ((err = ext3_ext_get_access(handle, inode, path)))
1200 +               return err;
1201 +       path->p_hdr->e_num--;
1202 +       if ((err = ext3_ext_dirty(handle, inode, path)))
1203 +               return err;
1204 +       bh = sb_get_hash_table(inode->i_sb, path->p_idx->e_leaf);
1205 +       ext3_forget(handle, 0, inode, bh, path->p_idx->e_leaf);
1206 +       ext3_free_blocks(handle, inode, path->p_idx->e_leaf, 1);
1207 +
1208 +       ext_debug(inode, "index is empty, remove it, free block %d\n",
1209 +                       path->p_idx->e_leaf);
1210 +       return err;
1211 +}
1212 +
1213 +/*
1214 + * returns 1 if current extent needs to be freed (even partial)
1215 + * instead, returns 0
1216 + */
1217 +int ext3_ext_more_leaves_to_truncate(struct inode *inode,
1218 +                                       struct ext3_ext_path *path)
1219 +{
1220 +       unsigned blocksize = inode->i_sb->s_blocksize;
1221 +       struct ext3_extent *ex = path->p_ext;
1222 +       int last_block; 
1223 +
1224 +       EXT_ASSERT(ex);
1225 +
1226 +       /* is there leave in the current leaf? */
1227 +       if (ex < EXT_FIRST_EXTENT(path->p_hdr))
1228 +               return 0;
1229 +       
1230 +       last_block = (inode->i_size + blocksize-1)
1231 +                       >> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
1232 +
1233 +       if (last_block >= ex->e_block + ex->e_num)
1234 +               return 0;
1235 +
1236 +       /* seems it extent have to be freed */
1237 +       return 1;
1238 +}
1239 +
1240 +handle_t *ext3_ext_journal_restart(handle_t *handle, int needed)
1241 +{
1242 +       int err;
1243 +
1244 +       if (handle->h_buffer_credits > needed)
1245 +               return handle;
1246 +       if (!ext3_journal_extend(handle, needed))
1247 +               return handle;
1248 +       err = ext3_journal_restart(handle, needed);
1249 +       
1250 +       return handle;
1251 +}
1252 +
1253 +/*
1254 + * this routine calculate max number of blocks to be modified
1255 + * while freeing extent and is intended to be used in truncate path
1256 + */
1257 +static int ext3_ext_calc_credits(struct inode *inode,
1258 +                                       struct ext3_ext_path *path,
1259 +                                       int num)
1260 +{
1261 +       int depth = EXT3_I(inode)->i_depth;
1262 +       int needed;
1263 +       
1264 +       /*
1265 +        * extent couldn't cross group, so we will modify
1266 +        * single bitmap block and single group descriptor
1267 +        */
1268 +       needed = 2;
1269 +
1270 +       /*
1271 +        * if this is last extent in a leaf, then we have to
1272 +        * free leaf block and remove pointer from index above.
1273 +        * that pointer could be last in index block, so we'll
1274 +        * have to remove it too. this way we could modify/free
1275 +        * the whole path + root index (inode stored) will be
1276 +        * modified
1277 +        */
1278 +       if (!path || (num == path->p_ext->e_num &&
1279 +                               path->p_ext == EXT_FIRST_EXTENT(path->p_hdr)))
1280 +               needed += (depth * EXT3_ALLOC_NEEDED) + 1;
1281 +
1282 +       /*
1283 +        * it seems current calculation has bug
1284 +        * this is workaround -bzzz
1285 +        */
1286 +       needed += 10;
1287 +
1288 +       return needed;
1289 +}
1290 +
1291 +/*
1292 + * core of the truncate procedure:
1293 + * - calculated what part of each extent in the requested leaf
1294 + *   need to be freed
1295 + * - frees and forgets these blocks
1296 + *
1297 + * TODO: we could optimize and free several extents during
1298 + *       single journal_restart()-journal_restart() cycle
1299 + */
1300 +static int ext3_ext_truncate_leaf(handle_t *handle,
1301 +                                       struct inode *inode,
1302 +                                       struct ext3_ext_path *path,
1303 +                                       int depth)
1304 +{
1305 +       unsigned blocksize = inode->i_sb->s_blocksize;
1306 +       int last_block; 
1307 +       int i, err = 0, sf, num;
1308 +
1309 +       ext_debug(inode, "level %d - leaf\n", depth);
1310 +       if (!path->p_hdr)
1311 +               path->p_hdr =
1312 +                       (struct ext3_extent_header *) path->p_bh->b_data;
1313 +
1314 +       EXT_ASSERT(path->p_hdr->e_num <= path->p_hdr->e_max);
1315 +       
1316 +       last_block = (inode->i_size + blocksize-1)
1317 +                                       >> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
1318 +       path->p_ext = EXT_LAST_EXTENT(path->p_hdr);
1319 +       while (ext3_ext_more_leaves_to_truncate(inode, path)) {
1320 +
1321 +               /* what part of extent have to be freed? */
1322 +               sf = last_block > path->p_ext->e_block ?
1323 +                       last_block : path->p_ext->e_block;
1324 +
1325 +               /* number of blocks from extent to be freed */
1326 +               num = path->p_ext->e_block + path->p_ext->e_num - sf;
1327 +
1328 +               /* calc physical first physical block to be freed */
1329 +               sf = path->p_ext->e_start + (sf - path->p_ext->e_block);
1330 +
1331 +               i = ext3_ext_calc_credits(inode, path, num);
1332 +               handle = ext3_ext_journal_restart(handle, i);
1333 +               if (IS_ERR(handle))
1334 +                       return PTR_ERR(handle);
1335 +               
1336 +               ext_debug(inode, "free extent %d:%d:%d -> free %d:%d\n",
1337 +                               path->p_ext->e_block, path->p_ext->e_start,
1338 +                               path->p_ext->e_num, sf, num);
1339 +               for (i = 0; i < num; i++) {
1340 +                       struct buffer_head *bh =
1341 +                               sb_get_hash_table(inode->i_sb, sf + i);
1342 +                       ext3_forget(handle, 0, inode, bh, sf + i);
1343 +               }
1344 +               ext3_free_blocks(handle, inode, sf, num);
1345 +
1346 +               /* collect extents usage stats */
1347 +               spin_lock(&EXT3_SB(inode->i_sb)->s_ext_lock);
1348 +               EXT3_SB(inode->i_sb)->s_ext_extents++;
1349 +               EXT3_SB(inode->i_sb)->s_ext_blocks += num;
1350 +               spin_unlock(&EXT3_SB(inode->i_sb)->s_ext_lock);
1351 +
1352 +               /* reduce extent */
1353 +               if ((err = ext3_ext_get_access(handle, inode, path)))
1354 +                       return err;
1355 +               path->p_ext->e_num -= num;
1356 +               if (path->p_ext->e_num == 0)
1357 +                       path->p_hdr->e_num--;
1358 +               if ((err = ext3_ext_dirty(handle, inode, path)))
1359 +                       return err;
1360 +
1361 +               path->p_ext--;
1362 +       }
1363 +       
1364 +       /* if this leaf is free, then we should
1365 +        * remove it from index block above */
1366 +       if (path->p_hdr->e_num == 0 && depth > 0) 
1367 +               err = ext3_ext_remove_index(handle, inode, path);
1368 +
1369 +       return err;
1370 +}
1371 +
1372 +static void ext3_ext_collect_stats(struct inode *inode)
1373 +{
1374 +       int depth;
1375 +       
1376 +       /* skip inodes with old good bitmap */
1377 +       if (!(EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL))
1378 +               return;
1379 +       
1380 +       /* collect on full truncate only */
1381 +       if (inode->i_size)
1382 +               return;
1383 +
1384 +       depth = EXT3_I(inode)->i_depth;
1385 +       if (depth < EXT3_SB(inode->i_sb)->s_ext_mindepth)
1386 +                EXT3_SB(inode->i_sb)->s_ext_mindepth = depth;
1387 +       if (depth > EXT3_SB(inode->i_sb)->s_ext_maxdepth)
1388 +                EXT3_SB(inode->i_sb)->s_ext_maxdepth = depth;
1389 +       EXT3_SB(inode->i_sb)->s_ext_sum += depth;
1390 +       EXT3_SB(inode->i_sb)->s_ext_count++;
1391 +       
1392 +}
1393 +
1394 +void ext3_ext_truncate(struct inode * inode)
1395 +{
1396 +       struct address_space *mapping = inode->i_mapping;
1397 +       struct ext3_ext_path *path;
1398 +       struct page * page;
1399 +       handle_t *handle;
1400 +       int i, depth, err = 0;
1401 +
1402 +       ext3_ext_collect_stats(inode);
1403 +
1404 +       /*
1405 +        * We have to lock the EOF page here, because lock_page() nests
1406 +        * outside journal_start().
1407 +        */
1408 +       if ((inode->i_size & (inode->i_sb->s_blocksize - 1)) == 0) {
1409 +               /* Block boundary? Nothing to do */
1410 +               page = NULL;
1411 +       } else {
1412 +               page = grab_cache_page(mapping,
1413 +                               inode->i_size >> PAGE_CACHE_SHIFT);
1414 +               if (!page)
1415 +                       return;
1416 +       }
1417 +
1418 +       /*
1419 +        * probably first extent we're gonna free will be last in block
1420 +        */
1421 +       i = ext3_ext_calc_credits(inode, NULL, 0);
1422 +       handle = ext3_journal_start(inode, i);
1423 +       if (IS_ERR(handle)) {
1424 +               if (page) {
1425 +                       clear_highpage(page);
1426 +                       flush_dcache_page(page);
1427 +                       unlock_page(page);
1428 +                       page_cache_release(page);
1429 +               }
1430 +               return;
1431 +       }
1432 +
1433 +       if (page)
1434 +               ext3_block_truncate_page(handle, mapping, inode->i_size, page,
1435 +                                               inode->i_sb->s_blocksize);
1436 +
1437 +       down(&EXT3_I(inode)->i_ext_sem);
1438 +
1439 +       /* 
1440 +        * TODO: optimization is possible here
1441 +        * probably we need not scaning at all,
1442 +        * because page truncation is enough
1443 +        */
1444 +       if (ext3_orphan_add(handle, inode))
1445 +               goto out_stop;
1446 +
1447 +       /* we have to know where to truncate from in crash case */
1448 +       EXT3_I(inode)->i_disksize = inode->i_size;
1449 +       ext3_mark_inode_dirty(handle, inode);
1450 +
1451 +       /*
1452 +        * we start scanning from right side freeing all the blocks
1453 +        * after i_size and walking into the deep
1454 +        */
1455 +       i = 0;
1456 +       depth = EXT3_I(inode)->i_depth;
1457 +       path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 1), GFP_KERNEL);
1458 +       if (IS_ERR(path)) {
1459 +               ext3_error(inode->i_sb, "ext3_ext_truncate",
1460 +                               "Can't allocate path array");
1461 +               goto out_stop;
1462 +       }
1463 +       memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));
1464 +
1465 +       path[i].p_hdr = (struct ext3_extent_header *) EXT3_I(inode)->i_data;
1466 +       while (i >= 0 && err == 0) {
1467 +               if (i == depth) {
1468 +                       /* this is leaf block */
1469 +                       err = ext3_ext_truncate_leaf(handle, inode,
1470 +                                                       path + i, i);
1471 +                       /* root level have p_bh == NULL, brelse() eats this */
1472 +                       brelse(path[i].p_bh);
1473 +                       i--;
1474 +                       continue;
1475 +               }
1476 +               
1477 +               /* this is index block */
1478 +               if (!path[i].p_hdr) {
1479 +                       path[i].p_hdr =
1480 +                               (struct ext3_extent_header *) path[i].p_bh->b_data;
1481 +                       ext_debug(inode, "initialize header\n");
1482 +               }
1483 +
1484 +               EXT_ASSERT(path[i].p_hdr->e_num <= path[i].p_hdr->e_max);
1485 +               
1486 +               if (!path[i].p_idx) {
1487 +                       /* this level hasn't touched yet */
1488 +                       path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
1489 +                       path[i].p_block = path[i].p_hdr->e_num + 1;
1490 +                       ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n",
1491 +                                       path[i].p_hdr, path[i].p_hdr->e_num);
1492 +               } else {
1493 +                       /* we've already was here, see at next index */
1494 +                       path[i].p_idx--;
1495 +               }
1496 +
1497 +               ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n",
1498 +                               i, EXT_FIRST_INDEX(path[i].p_hdr),
1499 +                               path[i].p_idx);
1500 +               if (ext3_ext_more_to_truncate(inode, path + i)) {
1501 +                       /* go to the next level */
1502 +                       ext_debug(inode, "move to level %d (block %d)\n", i+1,
1503 +                                       path[i].p_idx->e_leaf);
1504 +                       memset(path + i + 1, 0, sizeof(*path));
1505 +                       path[i+1].p_bh = sb_bread(inode->i_sb,
1506 +                                                       path[i].p_idx->e_leaf);
1507 +                       if (!path[i+1].p_bh) {
1508 +                               /* should we reset i_size? */
1509 +                               err = -EIO;
1510 +                               break;
1511 +                       }
1512 +                       i++;
1513 +               } else {
1514 +                       /* we finish processing this index, go up */
1515 +                       if (path[i].p_hdr->e_num == 0 && i > 0) {
1516 +                               /* index is empty, remove it
1517 +                                * handle must be already prepared by the
1518 +                                * truncate_leaf()
1519 +                                */
1520 +                               err = ext3_ext_remove_index(handle, inode,
1521 +                                                               path + i);
1522 +                       }
1523 +                       /* root level have p_bh == NULL, brelse() eats this */
1524 +                       brelse(path[i].p_bh);
1525 +                       i--;
1526 +                       ext_debug(inode, "return to level %d\n", i);
1527 +               }
1528 +       }
1529 +
1530 +       /* TODO: flexible tree reduction should be here */
1531 +       if (path->p_hdr->e_num == 0) {
1532 +               /*
1533 +                * truncate to zero freed all the tree
1534 +                * so, we need to correct i_depth
1535 +                */
1536 +               EXT3_I(inode)->i_depth = 0;
1537 +               path->p_hdr->e_max = 0;
1538 +               ext3_mark_inode_dirty(handle, inode);
1539 +       }
1540 +
1541 +       kfree(path);
1542 +
1543 +       /* In a multi-transaction truncate, we only make the final
1544 +        * transaction synchronous */
1545 +       if (IS_SYNC(inode))
1546 +               handle->h_sync = 1;
1547 +
1548 +out_stop:
1549 +       /*
1550 +        * If this was a simple ftruncate(), and the file will remain alive
1551 +        * then we need to clear up the orphan record which we created above.
1552 +        * However, if this was a real unlink then we were called by
1553 +        * ext3_delete_inode(), and we allow that function to clean up the
1554 +        * orphan info for us.
1555 +        */
1556 +       if (inode->i_nlink)
1557 +               ext3_orphan_del(handle, inode);
1558 +
1559 +       up(&EXT3_I(inode)->i_ext_sem);
1560 +       ext3_journal_stop(handle, inode);
1561 +}
1562 +
1563 +/*
1564 + * this routine calculate max number of blocks we could modify
1565 + * in order to allocate new block for an inode
1566 + */
1567 +int ext3_ext_writepage_trans_blocks(struct inode *inode, int num)
1568 +{
1569 +       struct ext3_inode_info *ei = EXT3_I(inode);
1570 +       int depth = ei->i_depth + 1;
1571 +       int needed;
1572 +       
1573 +       /*
1574 +        * the worste case we're expecting is creation of the
1575 +        * new root (growing in depth) with index splitting
1576 +        * for splitting we have to consider depth + 1 because
1577 +        * previous growing could increase it
1578 +        */
1579 +
1580 +       /* 
1581 +        * growing in depth:
1582 +        * block allocation + new root + old root
1583 +        */
1584 +       needed = EXT3_ALLOC_NEEDED + 2;
1585 +
1586 +       /* index split. we may need:
1587 +        *   allocate intermediate indexes and new leaf
1588 +        *   change two blocks at each level, but root
1589 +        *   modify root block (inode)
1590 +        */
1591 +       needed += (depth * EXT3_ALLOC_NEEDED) + (2 * depth) + 1;
1592 +
1593 +       /* caller want to allocate num blocks */
1594 +       needed *= num;
1595 +       
1596 +#ifdef CONFIG_QUOTA
1597 +       /* 
1598 +        * FIXME: real calculation should be here
1599 +        * it depends on blockmap format of qouta file
1600 +        */
1601 +       needed += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
1602 +#endif
1603 +
1604 +       return needed;
1605 +}
1606 +
1607 +/*
1608 + * called at mount time
1609 + */
1610 +void ext3_ext_init(struct super_block *sb)
1611 +{
1612 +       /*
1613 +        * possible initialization would be here
1614 +        */
1615 +
1616 +       if (test_opt(sb, EXTENTS))
1617 +               printk("EXT3-fs: file extents enabled\n");
1618 +       spin_lock_init(&EXT3_SB(sb)->s_ext_lock);
1619 +}
1620 +
1621 +/*
1622 + * called at umount time
1623 + */
1624 +void ext3_ext_release(struct super_block *sb)
1625 +{
1626 +       struct ext3_sb_info *sbi = EXT3_SB(sb);
1627 +
1628 +       /* show collected stats */
1629 +       if (sbi->s_ext_count && sbi->s_ext_extents)
1630 +               printk("EXT3-fs: min depth - %d, max depth - %d, "
1631 +                               "ave. depth - %d, ave. blocks/extent - %d\n",
1632 +                               sbi->s_ext_mindepth,
1633 +                               sbi->s_ext_maxdepth,
1634 +                               sbi->s_ext_sum / sbi->s_ext_count,
1635 +                               sbi->s_ext_blocks / sbi->s_ext_extents);
1636 +}
1637 +
1638 --- linux-2.4.18-chaos-pdirops/fs/ext3/ialloc.c~ext3-extents-2.4.18-chaos-pdirops       2003-09-23 13:44:53.000000000 +0400
1639 +++ linux-2.4.18-chaos-pdirops-alexey/fs/ext3/ialloc.c  2003-09-23 14:29:32.000000000 +0400
1640 @@ -573,6 +573,10 @@ repeat:
1641         ei->i_prealloc_count = 0;
1642  #endif
1643         ei->i_block_group = i;
1644 +       if (test_opt(sb, EXTENTS))
1645 +               EXT3_I(inode)->i_flags |= EXT3_EXTENTS_FL;
1646 +       ei->i_depth = 0;
1647 +       sema_init(&ei->i_ext_sem, 1);
1648  
1649         if (ei->i_flags & EXT3_SYNC_FL)
1650                 inode->i_flags |= S_SYNC;
1651 --- linux-2.4.18-chaos-pdirops/fs/ext3/inode.c~ext3-extents-2.4.18-chaos-pdirops        2003-09-23 13:44:53.000000000 +0400
1652 +++ linux-2.4.18-chaos-pdirops-alexey/fs/ext3/inode.c   2003-09-23 14:29:32.000000000 +0400
1653 @@ -842,6 +842,15 @@ changed:
1654         goto reread;
1655  }
1656  
1657 +static inline int
1658 +ext3_get_block_wrap(handle_t *handle, struct inode *inode, long block,
1659 +               struct buffer_head *bh, int create, int extend_disksize)
1660 +{
1661 +       if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
1662 +               return ext3_ext_get_block(handle, inode, block, bh, create, 1);
1663 +       return ext3_get_block_handle(handle, inode, block, bh, create, 1);
1664 +}
1665 +
1666  /*
1667   * The BKL is not held on entry here.
1668   */
1669 @@ -855,7 +864,7 @@ static int ext3_get_block(struct inode *
1670                 handle = ext3_journal_current_handle();
1671                 J_ASSERT(handle != 0);
1672         }
1673 -       ret = ext3_get_block_handle(handle, inode, iblock,
1674 +       ret = ext3_get_block_wrap(handle, inode, iblock,
1675                                 bh_result, create, 1);
1676         return ret;
1677  }
1678 @@ -882,7 +891,7 @@ ext3_direct_io_get_block(struct inode *i
1679                 }
1680         }
1681         if (ret == 0)
1682 -               ret = ext3_get_block_handle(handle, inode, iblock,
1683 +               ret = ext3_get_block_wrap(handle, inode, iblock,
1684                                         bh_result, create, 0);
1685         if (ret == 0)
1686                 bh_result->b_size = (1 << inode->i_blkbits);
1687 @@ -904,7 +913,7 @@ struct buffer_head *ext3_getblk(handle_t
1688         dummy.b_state = 0;
1689         dummy.b_blocknr = -1000;
1690         buffer_trace_init(&dummy.b_history);
1691 -       *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1);
1692 +       *errp = ext3_get_block_wrap(handle, inode, block, &dummy, create, 1);
1693         if (!*errp && buffer_mapped(&dummy)) {
1694                 struct buffer_head *bh;
1695                 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1696 @@ -1520,7 +1529,7 @@ ext3_block_truncate_page_prepare(struct 
1697   * This required during truncate. We need to physically zero the tail end
1698   * of that block so it doesn't yield old data if the file is later grown.
1699   */
1700 -static int ext3_block_truncate_page(handle_t *handle,
1701 +int ext3_block_truncate_page(handle_t *handle,
1702                                     struct address_space *mapping, loff_t from,
1703                                     struct page *page, unsigned blocksize)
1704  {
1705 @@ -1998,6 +2007,9 @@ void ext3_truncate(struct inode * inode)
1706  
1707         ext3_discard_prealloc(inode);
1708  
1709 +       if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
1710 +               return ext3_ext_truncate(inode);
1711 +
1712         blocksize = inode->i_sb->s_blocksize;
1713         last_block = (inode->i_size + blocksize-1)
1714                                         >> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
1715 @@ -2436,6 +2448,8 @@ void ext3_read_inode(struct inode * inod
1716         ei->i_prealloc_count = 0;
1717  #endif
1718         ei->i_block_group = iloc.block_group;
1719 +       ei->i_depth = raw_inode->osd2.linux2.l_i_depth;
1720 +       sema_init(&ei->i_ext_sem, 1);
1721  
1722         /*
1723          * NOTE! The in-memory inode i_data array is in little-endian order
1724 @@ -2559,6 +2573,7 @@ static int ext3_do_update_inode(handle_t
1725                 raw_inode->i_fsize = 0;
1726         }
1727  #endif
1728 +       raw_inode->osd2.linux2.l_i_depth = ei->i_depth;
1729         raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
1730         if (!S_ISREG(inode->i_mode)) {
1731                 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
1732 @@ -2762,6 +2777,9 @@ int ext3_writepage_trans_blocks(struct i
1733         int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
1734         int ret;
1735         
1736 +       if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL)
1737 +               return ext3_ext_writepage_trans_blocks(inode, bpp);
1738 +
1739         if (ext3_should_journal_data(inode))
1740                 ret = 3 * (bpp + indirects) + 2;
1741         else
1742 @@ -3085,7 +3103,7 @@ int ext3_prep_san_write(struct inode *in
1743  
1744         /* alloc blocks one by one */
1745         for (i = 0; i < nblocks; i++) {
1746 -               ret = ext3_get_block_handle(handle, inode, blocks[i],
1747 +               ret = ext3_get_block_wrap(handle, inode, blocks[i],
1748                                                 &bh_tmp, 1, 1);
1749                 if (ret)
1750                         break;
1751 @@ -3146,7 +3164,7 @@ int ext3_map_inode_page(struct inode *in
1752                  if (blocks[i] != 0)
1753                          continue;
1754  
1755 -                rc = ext3_get_block_handle(handle, inode, iblock, &dummy, 1, 1);
1756 +                rc = ext3_get_block_wrap(handle, inode, iblock, &dummy, 1, 1);
1757                  if (rc) {
1758                          printk(KERN_INFO "ext3_map_inode_page: error reading "
1759                                 "block %ld\n", iblock);
1760 --- linux-2.4.18-chaos-pdirops/fs/ext3/Makefile~ext3-extents-2.4.18-chaos-pdirops       2003-09-23 13:44:48.000000000 +0400
1761 +++ linux-2.4.18-chaos-pdirops-alexey/fs/ext3/Makefile  2003-09-23 14:29:32.000000000 +0400
1762 @@ -12,7 +12,8 @@ O_TARGET := ext3.o
1763  export-objs := ext3-exports.o
1764  
1765  obj-y    := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
1766 -               ioctl.o namei.o super.o symlink.o xattr.o hash.o ext3-exports.o
1767 +               ioctl.o namei.o super.o symlink.o xattr.o hash.o ext3-exports.o \
1768 +               extents.o
1769  obj-m    := $(O_TARGET)
1770  
1771  include $(TOPDIR)/Rules.make
1772 --- linux-2.4.18-chaos-pdirops/fs/ext3/super.c~ext3-extents-2.4.18-chaos-pdirops        2003-09-23 13:44:53.000000000 +0400
1773 +++ linux-2.4.18-chaos-pdirops-alexey/fs/ext3/super.c   2003-09-23 14:29:33.000000000 +0400
1774 @@ -619,6 +619,7 @@ void ext3_put_super (struct super_block 
1775         kdev_t j_dev = sbi->s_journal->j_dev;
1776         int i;
1777  
1778 +       ext3_ext_release(sb);
1779         ext3_stop_delete_thread(sbi);
1780         ext3_xattr_put_super(sb);
1781         journal_destroy(sbi->s_journal);
1782 @@ -741,6 +742,12 @@ static int parse_options (char * options
1783                 else
1784  #endif
1785  
1786 +               if (!strcmp (this_char, "extents"))
1787 +                       set_opt (sbi->s_mount_opt, EXTENTS);
1788 +               else
1789 +               if (!strcmp (this_char, "extdebug"))
1790 +                       set_opt (sbi->s_mount_opt, EXTDEBUG);
1791 +               else
1792                 if (!strcmp (this_char, "bsddf"))
1793                         clear_opt (*mount_options, MINIX_DF);
1794                 else if (!strcmp (this_char, "nouid32")) {
1795 @@ -1471,6 +1478,7 @@ struct super_block * ext3_read_super (st
1796                 test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ? "journal":
1797                 test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered":
1798                 "writeback");
1799 +       ext3_ext_init(sb);
1800  
1801         if (test_opt(sb, PDIROPS)) {
1802                 printk (KERN_INFO "EXT3-fs: mounted filesystem with parallel dirops\n");
1803 --- linux-2.4.18-chaos-pdirops/include/linux/ext3_fs.h~ext3-extents-2.4.18-chaos-pdirops        2003-09-23 13:44:53.000000000 +0400
1804 +++ linux-2.4.18-chaos-pdirops-alexey/include/linux/ext3_fs.h   2003-09-23 14:29:33.000000000 +0400
1805 @@ -188,6 +188,7 @@ struct ext3_group_desc
1806  #define EXT3_IMAGIC_FL                 0x00002000 /* AFS directory */
1807  #define EXT3_JOURNAL_DATA_FL           0x00004000 /* file data should be journaled */
1808  #define EXT3_RESERVED_FL               0x80000000 /* reserved for ext3 lib */
1809 +#define EXT3_EXTENTS_FL                        0x00080000 /* Inode uses extents */
1810  
1811  #define EXT3_FL_USER_VISIBLE           0x00005FFF /* User visible flags */
1812  #define EXT3_FL_USER_MODIFIABLE                0x000000FF /* User modifiable flags */
1813 @@ -248,7 +249,7 @@ struct ext3_inode {
1814                 struct {
1815                         __u8    l_i_frag;       /* Fragment number */
1816                         __u8    l_i_fsize;      /* Fragment size */
1817 -                       __u16   i_pad1;
1818 +                       __u16   l_i_depth;
1819                         __u16   l_i_uid_high;   /* these 2 fields    */
1820                         __u16   l_i_gid_high;   /* were reserved2[0] */
1821                         __u32   l_i_reserved2;
1822 @@ -329,6 +330,8 @@ struct ext3_inode {
1823  #define EXT3_MOUNT_IOPEN               0x8000  /* Allow access via iopen */
1824  #define EXT3_MOUNT_IOPEN_NOPRIV                0x10000 /* Make iopen world-readable */
1825  #define EXT3_MOUNT_ASYNCDEL            0x20000 /* Delayed deletion */
1826 +#define EXT3_MOUNT_EXTENTS             0x40000 /* Extents support */
1827 +#define EXT3_MOUNT_EXTDEBUG            0x80000 /* Extents debug */
1828  
1829  /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
1830  #ifndef _LINUX_EXT2_FS_H
1831 @@ -720,6 +723,12 @@ extern void ext3_discard_prealloc (struc
1832  extern void ext3_dirty_inode(struct inode *);
1833  extern int ext3_change_inode_journal_flag(struct inode *, int);
1834  extern void ext3_truncate (struct inode *);
1835 +extern int ext3_block_truncate_page(handle_t *handle,
1836 +                                   struct address_space *mapping, loff_t from,
1837 +                                   struct page *page, unsigned blocksize);
1838 +extern int ext3_forget(handle_t *handle, int is_metadata,
1839 +                      struct inode *inode, struct buffer_head *bh,
1840 +                      int blocknr);
1841  #ifdef EXT3_DELETE_THREAD
1842  extern void ext3_truncate_thread(struct inode *inode);
1843  #endif
1844 @@ -781,6 +790,13 @@ extern struct inode_operations ext3_dir_
1845  /* symlink.c */
1846  extern struct inode_operations ext3_fast_symlink_inode_operations;
1847  
1848 +/* extents.c */
1849 +extern int ext3_ext_writepage_trans_blocks(struct inode *, int);
1850 +extern int ext3_ext_get_block(handle_t *, struct inode *, long,
1851 +                               struct buffer_head *, int, int);
1852 +extern void ext3_ext_truncate(struct inode *);
1853 +extern void ext3_ext_init(struct super_block *);
1854 +extern void ext3_ext_release(struct super_block *);
1855  
1856  #endif /* __KERNEL__ */
1857  
1858 --- linux-2.4.18-chaos-pdirops/include/linux/ext3_fs_i.h~ext3-extents-2.4.18-chaos-pdirops      2003-09-23 13:44:53.000000000 +0400
1859 +++ linux-2.4.18-chaos-pdirops-alexey/include/linux/ext3_fs_i.h 2003-09-23 14:29:33.000000000 +0400
1860 @@ -79,6 +79,10 @@ struct ext3_inode_info {
1861         struct dynlock i_htree_lock;
1862         struct semaphore i_append_sem;
1863         struct semaphore i_rename_sem;
1864 +
1865 +       /* extents-related data */
1866 +       struct semaphore i_ext_sem;
1867 +       __u16 i_depth;
1868  };
1869  
1870  #endif /* _LINUX_EXT3_FS_I */
1871 --- linux-2.4.18-chaos-pdirops/include/linux/ext3_fs_sb.h~ext3-extents-2.4.18-chaos-pdirops     2003-09-23 13:34:51.000000000 +0400
1872 +++ linux-2.4.18-chaos-pdirops-alexey/include/linux/ext3_fs_sb.h        2003-09-23 14:29:33.000000000 +0400
1873 @@ -86,6 +86,16 @@ struct ext3_sb_info {
1874         wait_queue_head_t s_delete_thread_queue;
1875         wait_queue_head_t s_delete_waiter_queue;
1876  #endif
1877 +
1878 +       /* extents */
1879 +       int s_ext_debug;
1880 +       int s_ext_mindepth;
1881 +       int s_ext_maxdepth;
1882 +       int s_ext_sum;
1883 +       int s_ext_count;
1884 +       spinlock_t s_ext_lock;
1885 +       int s_ext_extents;
1886 +       int s_ext_blocks;
1887  };
1888  
1889  #endif /* _LINUX_EXT3_FS_SB */
1890
1891 _