2 * block.c --- iterate over all blocks in an inode
4 * Copyright (C) 1993, 1994, 1995, 1996 Theodore Ts'o.
7 * This file may be redistributed under the terms of the GNU Library
8 * General Public License, version 2.
22 struct block_context {
24 int (*func)(ext2_filsys fs,
40 #define check_for_ro_violation_return(ctx, ret) \
42 if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) && \
43 ((ret) & BLOCK_CHANGED)) { \
44 (ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE; \
45 ret |= BLOCK_ABORT | BLOCK_ERROR; \
50 #define check_for_ro_violation_goto(ctx, ret, label) \
52 if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) && \
53 ((ret) & BLOCK_CHANGED)) { \
54 (ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE; \
55 ret |= BLOCK_ABORT | BLOCK_ERROR; \
60 static int block_iterate_ind(blk_t *ind_block, blk_t ref_block,
61 int ref_offset, struct block_context *ctx)
63 int ret = 0, changed = 0;
64 int i, flags, limit, offset;
68 limit = ctx->fs->blocksize >> 2;
69 if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
70 !(ctx->flags & BLOCK_FLAG_DATA_ONLY)) {
72 ret = (*ctx->func)(ctx->fs, &blk64,
73 BLOCK_COUNT_IND, ref_block,
74 ref_offset, ctx->priv_data);
77 check_for_ro_violation_return(ctx, ret);
78 if (!*ind_block || (ret & BLOCK_ABORT)) {
82 if (*ind_block >= ext2fs_blocks_count(ctx->fs->super) ||
83 *ind_block < ctx->fs->super->s_first_data_block) {
84 ctx->errcode = EXT2_ET_BAD_IND_BLOCK;
88 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *ind_block,
95 block_nr = (blk_t *) ctx->ind_buf;
97 if (ctx->flags & BLOCK_FLAG_APPEND) {
98 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
100 flags = (*ctx->func)(ctx->fs, &blk64, ctx->bcount,
105 if (flags & BLOCK_ABORT) {
109 offset += sizeof(blk_t);
112 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
116 flags = (*ctx->func)(ctx->fs, &blk64, ctx->bcount,
121 if (flags & BLOCK_ABORT) {
126 offset += sizeof(blk_t);
129 check_for_ro_violation_return(ctx, changed);
130 if (changed & BLOCK_CHANGED) {
131 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *ind_block,
134 ret |= BLOCK_ERROR | BLOCK_ABORT;
136 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
137 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
138 !(ret & BLOCK_ABORT)) {
140 ret |= (*ctx->func)(ctx->fs, &blk64,
141 BLOCK_COUNT_IND, ref_block,
142 ref_offset, ctx->priv_data);
145 check_for_ro_violation_return(ctx, ret);
149 static int block_iterate_dind(blk_t *dind_block, blk_t ref_block,
150 int ref_offset, struct block_context *ctx)
152 int ret = 0, changed = 0;
153 int i, flags, limit, offset;
157 limit = ctx->fs->blocksize >> 2;
158 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
159 BLOCK_FLAG_DATA_ONLY))) {
161 ret = (*ctx->func)(ctx->fs, &blk64,
162 BLOCK_COUNT_DIND, ref_block,
163 ref_offset, ctx->priv_data);
166 check_for_ro_violation_return(ctx, ret);
167 if (!*dind_block || (ret & BLOCK_ABORT)) {
168 ctx->bcount += limit*limit;
171 if (*dind_block >= ext2fs_blocks_count(ctx->fs->super) ||
172 *dind_block < ctx->fs->super->s_first_data_block) {
173 ctx->errcode = EXT2_ET_BAD_DIND_BLOCK;
177 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *dind_block,
184 block_nr = (blk_t *) ctx->dind_buf;
186 if (ctx->flags & BLOCK_FLAG_APPEND) {
187 for (i = 0; i < limit; i++, block_nr++) {
188 flags = block_iterate_ind(block_nr,
192 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
193 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
196 offset += sizeof(blk_t);
199 for (i = 0; i < limit; i++, block_nr++) {
200 if (*block_nr == 0) {
201 ctx->bcount += limit;
204 flags = block_iterate_ind(block_nr,
208 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
209 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
212 offset += sizeof(blk_t);
215 check_for_ro_violation_return(ctx, changed);
216 if (changed & BLOCK_CHANGED) {
217 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *dind_block,
220 ret |= BLOCK_ERROR | BLOCK_ABORT;
222 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
223 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
224 !(ret & BLOCK_ABORT)) {
226 ret |= (*ctx->func)(ctx->fs, &blk64,
227 BLOCK_COUNT_DIND, ref_block,
228 ref_offset, ctx->priv_data);
231 check_for_ro_violation_return(ctx, ret);
235 static int block_iterate_tind(blk_t *tind_block, blk_t ref_block,
236 int ref_offset, struct block_context *ctx)
238 int ret = 0, changed = 0;
239 int i, flags, limit, offset;
243 limit = ctx->fs->blocksize >> 2;
244 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
245 BLOCK_FLAG_DATA_ONLY))) {
247 ret = (*ctx->func)(ctx->fs, &blk64,
248 BLOCK_COUNT_TIND, ref_block,
249 ref_offset, ctx->priv_data);
252 check_for_ro_violation_return(ctx, ret);
253 if (!*tind_block || (ret & BLOCK_ABORT)) {
254 ctx->bcount += limit*limit*limit;
257 if (*tind_block >= ext2fs_blocks_count(ctx->fs->super) ||
258 *tind_block < ctx->fs->super->s_first_data_block) {
259 ctx->errcode = EXT2_ET_BAD_TIND_BLOCK;
263 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *tind_block,
270 block_nr = (blk_t *) ctx->tind_buf;
272 if (ctx->flags & BLOCK_FLAG_APPEND) {
273 for (i = 0; i < limit; i++, block_nr++) {
274 flags = block_iterate_dind(block_nr,
278 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
279 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
282 offset += sizeof(blk_t);
285 for (i = 0; i < limit; i++, block_nr++) {
286 if (*block_nr == 0) {
287 ctx->bcount += limit*limit;
290 flags = block_iterate_dind(block_nr,
294 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
295 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
298 offset += sizeof(blk_t);
301 check_for_ro_violation_return(ctx, changed);
302 if (changed & BLOCK_CHANGED) {
303 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *tind_block,
306 ret |= BLOCK_ERROR | BLOCK_ABORT;
308 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
309 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
310 !(ret & BLOCK_ABORT)) {
312 ret |= (*ctx->func)(ctx->fs, &blk64,
313 BLOCK_COUNT_TIND, ref_block,
314 ref_offset, ctx->priv_data);
317 check_for_ro_violation_return(ctx, ret);
321 errcode_t ext2fs_block_iterate3(ext2_filsys fs,
325 int (*func)(ext2_filsys fs,
327 e2_blkcnt_t blockcnt,
335 struct ext2_inode inode;
337 struct block_context ctx;
341 EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS);
343 ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
348 * Check to see if we need to limit large files
350 if (flags & BLOCK_FLAG_NO_LARGE) {
351 if (!LINUX_S_ISDIR(inode.i_mode) &&
352 (inode.i_size_high != 0))
353 return EXT2_ET_FILE_TOO_BIG;
356 limit = fs->blocksize >> 2;
360 ctx.priv_data = priv_data;
364 ctx.ind_buf = block_buf;
366 retval = ext2fs_get_array(3, fs->blocksize, &ctx.ind_buf);
370 ctx.dind_buf = ctx.ind_buf + fs->blocksize;
371 ctx.tind_buf = ctx.dind_buf + fs->blocksize;
374 * Iterate over the HURD translator block (if present)
376 if ((fs->super->s_creator_os == EXT2_OS_HURD) &&
377 !(flags & BLOCK_FLAG_DATA_ONLY)) {
378 if (inode.osd1.hurd1.h_i_translator) {
379 blk64 = inode.osd1.hurd1.h_i_translator;
380 ret |= (*ctx.func)(fs, &blk64,
381 BLOCK_COUNT_TRANSLATOR,
383 inode.osd1.hurd1.h_i_translator = (blk_t) blk64;
384 if (ret & BLOCK_ABORT)
386 check_for_ro_violation_goto(&ctx, ret, abort_exit);
390 if (inode.i_flags & EXT4_EXTENTS_FL) {
391 ext2_extent_handle_t handle;
392 struct ext2fs_extent extent, next;
393 e2_blkcnt_t blockcnt = 0;
394 blk64_t blk, new_blk;
395 int op = EXT2_EXTENT_ROOT;
399 ctx.errcode = ext2fs_extent_open2(fs, ino, &inode, &handle);
404 if (op == EXT2_EXTENT_CURRENT)
407 ctx.errcode = ext2fs_extent_get(handle, op,
410 if (ctx.errcode != EXT2_ET_EXTENT_NO_NEXT)
413 if (!(flags & BLOCK_FLAG_APPEND))
417 r = (*ctx.func)(fs, &blk, blockcnt,
420 check_for_ro_violation_goto(&ctx, ret,
422 if (r & BLOCK_CHANGED) {
424 ext2fs_extent_set_bmap(handle,
425 (blk64_t) blockcnt++,
427 if (ctx.errcode || (ret & BLOCK_ABORT))
435 op = EXT2_EXTENT_NEXT;
437 if (!(extent.e_flags & EXT2_EXTENT_FLAGS_LEAF)) {
438 if (ctx.flags & BLOCK_FLAG_DATA_ONLY)
440 if ((!(extent.e_flags &
441 EXT2_EXTENT_FLAGS_SECOND_VISIT) &&
442 !(ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE)) ||
444 EXT2_EXTENT_FLAGS_SECOND_VISIT) &&
445 (ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE))) {
446 ret |= (*ctx.func)(fs, &blk,
447 -1, 0, 0, priv_data);
448 if (ret & BLOCK_CHANGED) {
451 ext2fs_extent_replace(handle, 0, &extent);
455 if (ret & BLOCK_ABORT)
461 if (extent.e_flags & EXT2_EXTENT_FLAGS_UNINIT)
462 uninit = EXT2_EXTENT_SET_BMAP_UNINIT;
465 * Get the next extent before we start messing
466 * with the current extent
468 retval = ext2fs_extent_get(handle, op, &next);
471 printf("lblk %llu pblk %llu len %d blockcnt %llu\n",
472 extent.e_lblk, extent.e_pblk,
473 extent.e_len, blockcnt);
475 if (extent.e_lblk + extent.e_len <= blockcnt)
477 if (extent.e_lblk > blockcnt)
478 blockcnt = extent.e_lblk;
479 j = blockcnt - extent.e_lblk;
481 for (blockcnt = extent.e_lblk, j = 0;
483 blk++, blockcnt++, j++) {
485 r = (*ctx.func)(fs, &new_blk, blockcnt,
488 check_for_ro_violation_goto(&ctx, ret,
490 if (r & BLOCK_CHANGED) {
492 ext2fs_extent_set_bmap(handle,
498 if (ret & BLOCK_ABORT)
503 op = EXT2_EXTENT_CURRENT;
508 ext2fs_extent_free(handle);
509 ret |= BLOCK_ERROR; /* ctx.errcode is always valid here */
514 * Iterate over normal data blocks
516 for (i = 0; i < EXT2_NDIR_BLOCKS ; i++, ctx.bcount++) {
517 if (inode.i_block[i] || (flags & BLOCK_FLAG_APPEND)) {
518 blk64 = inode.i_block[i];
519 ret |= (*ctx.func)(fs, &blk64, ctx.bcount, 0, i,
521 inode.i_block[i] = (blk_t) blk64;
522 if (ret & BLOCK_ABORT)
526 check_for_ro_violation_goto(&ctx, ret, abort_exit);
527 if (inode.i_block[EXT2_IND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
528 ret |= block_iterate_ind(&inode.i_block[EXT2_IND_BLOCK],
529 0, EXT2_IND_BLOCK, &ctx);
530 if (ret & BLOCK_ABORT)
534 if (inode.i_block[EXT2_DIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
535 ret |= block_iterate_dind(&inode.i_block[EXT2_DIND_BLOCK],
536 0, EXT2_DIND_BLOCK, &ctx);
537 if (ret & BLOCK_ABORT)
540 ctx.bcount += limit * limit;
541 if (inode.i_block[EXT2_TIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
542 ret |= block_iterate_tind(&inode.i_block[EXT2_TIND_BLOCK],
543 0, EXT2_TIND_BLOCK, &ctx);
544 if (ret & BLOCK_ABORT)
549 if (ret & BLOCK_CHANGED) {
550 retval = ext2fs_write_inode(fs, ino, &inode);
553 ctx.errcode = retval;
558 ext2fs_free_mem(&ctx.ind_buf);
560 return (ret & BLOCK_ERROR) ? ctx.errcode : 0;
564 * Emulate the old ext2fs_block_iterate function!
568 int (*func)(ext2_filsys fs,
570 e2_blkcnt_t blockcnt,
577 static int xlate64_func(ext2_filsys fs, blk64_t *blocknr,
578 e2_blkcnt_t blockcnt, blk64_t ref_blk,
579 int ref_offset, void *priv_data)
581 struct xlate64 *xl = (struct xlate64 *) priv_data;
583 blk_t block32 = *blocknr;
585 ret = (*xl->func)(fs, &block32, blockcnt, (blk_t) ref_blk, ref_offset,
591 errcode_t ext2fs_block_iterate2(ext2_filsys fs,
595 int (*func)(ext2_filsys fs,
597 e2_blkcnt_t blockcnt,
605 xl.real_private = priv_data;
608 return ext2fs_block_iterate3(fs, ino, flags, block_buf,
614 int (*func)(ext2_filsys fs,
624 static int xlate_func(ext2_filsys fs, blk_t *blocknr, e2_blkcnt_t blockcnt,
625 blk_t ref_block EXT2FS_ATTR((unused)),
626 int ref_offset EXT2FS_ATTR((unused)),
629 struct xlate *xl = (struct xlate *) priv_data;
631 return (*xl->func)(fs, blocknr, (int) blockcnt, xl->real_private);
634 errcode_t ext2fs_block_iterate(ext2_filsys fs,
638 int (*func)(ext2_filsys fs,
646 xl.real_private = priv_data;
649 return ext2fs_block_iterate2(fs, ino, BLOCK_FLAG_NO_LARGE | flags,
650 block_buf, xlate_func, &xl);