2 * block.c --- iterate over all blocks in an inode
4 * Copyright (C) 1993, 1994, 1995, 1996 Theodore Ts'o.
7 * This file may be redistributed under the terms of the GNU Library
8 * General Public License, version 2.
22 struct block_context {
24 int (*func)(ext2_filsys fs,
40 #define check_for_ro_violation_return(ctx, ret) \
42 if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) && \
43 ((ret) & BLOCK_CHANGED)) { \
44 (ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE; \
45 ret |= BLOCK_ABORT | BLOCK_ERROR; \
50 #define check_for_ro_violation_goto(ctx, ret, label) \
52 if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) && \
53 ((ret) & BLOCK_CHANGED)) { \
54 (ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE; \
55 ret |= BLOCK_ABORT | BLOCK_ERROR; \
60 static int block_iterate_ind(blk_t *ind_block, blk_t ref_block,
61 int ref_offset, struct block_context *ctx)
63 int ret = 0, changed = 0;
64 int i, flags, limit, offset;
68 limit = ctx->fs->blocksize >> 2;
69 if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
70 !(ctx->flags & BLOCK_FLAG_DATA_ONLY)) {
72 ret = (*ctx->func)(ctx->fs, &blk64,
73 BLOCK_COUNT_IND, ref_block,
74 ref_offset, ctx->priv_data);
77 check_for_ro_violation_return(ctx, ret);
78 if (!*ind_block || (ret & BLOCK_ABORT)) {
82 if (*ind_block >= ext2fs_blocks_count(ctx->fs->super) ||
83 *ind_block < ctx->fs->super->s_first_data_block) {
84 ctx->errcode = EXT2_ET_BAD_IND_BLOCK;
88 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *ind_block,
95 block_nr = (blk_t *) ctx->ind_buf;
97 if (ctx->flags & BLOCK_FLAG_APPEND) {
98 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
100 flags = (*ctx->func)(ctx->fs, &blk64, ctx->bcount,
105 if (flags & BLOCK_ABORT) {
109 offset += sizeof(blk_t);
112 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
116 flags = (*ctx->func)(ctx->fs, &blk64, ctx->bcount,
121 if (flags & BLOCK_ABORT) {
126 offset += sizeof(blk_t);
129 check_for_ro_violation_return(ctx, changed);
130 if (changed & BLOCK_CHANGED) {
131 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *ind_block,
134 ret |= BLOCK_ERROR | BLOCK_ABORT;
136 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
137 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
138 !(ret & BLOCK_ABORT)) {
140 ret |= (*ctx->func)(ctx->fs, &blk64,
141 BLOCK_COUNT_IND, ref_block,
142 ref_offset, ctx->priv_data);
145 check_for_ro_violation_return(ctx, ret);
149 static int block_iterate_dind(blk_t *dind_block, blk_t ref_block,
150 int ref_offset, struct block_context *ctx)
152 int ret = 0, changed = 0;
153 int i, flags, limit, offset;
157 limit = ctx->fs->blocksize >> 2;
158 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
159 BLOCK_FLAG_DATA_ONLY))) {
161 ret = (*ctx->func)(ctx->fs, &blk64,
162 BLOCK_COUNT_DIND, ref_block,
163 ref_offset, ctx->priv_data);
166 check_for_ro_violation_return(ctx, ret);
167 if (!*dind_block || (ret & BLOCK_ABORT)) {
168 ctx->bcount += limit*limit;
171 if (*dind_block >= ext2fs_blocks_count(ctx->fs->super) ||
172 *dind_block < ctx->fs->super->s_first_data_block) {
173 ctx->errcode = EXT2_ET_BAD_DIND_BLOCK;
177 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *dind_block,
184 block_nr = (blk_t *) ctx->dind_buf;
186 if (ctx->flags & BLOCK_FLAG_APPEND) {
187 for (i = 0; i < limit; i++, block_nr++) {
188 flags = block_iterate_ind(block_nr,
192 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
193 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
196 offset += sizeof(blk_t);
199 for (i = 0; i < limit; i++, block_nr++) {
200 if (*block_nr == 0) {
201 ctx->bcount += limit;
204 flags = block_iterate_ind(block_nr,
208 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
209 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
212 offset += sizeof(blk_t);
215 check_for_ro_violation_return(ctx, changed);
216 if (changed & BLOCK_CHANGED) {
217 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *dind_block,
220 ret |= BLOCK_ERROR | BLOCK_ABORT;
222 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
223 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
224 !(ret & BLOCK_ABORT)) {
226 ret |= (*ctx->func)(ctx->fs, &blk64,
227 BLOCK_COUNT_DIND, ref_block,
228 ref_offset, ctx->priv_data);
231 check_for_ro_violation_return(ctx, ret);
235 static int block_iterate_tind(blk_t *tind_block, blk_t ref_block,
236 int ref_offset, struct block_context *ctx)
238 int ret = 0, changed = 0;
239 int i, flags, limit, offset;
243 limit = ctx->fs->blocksize >> 2;
244 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
245 BLOCK_FLAG_DATA_ONLY))) {
247 ret = (*ctx->func)(ctx->fs, &blk64,
248 BLOCK_COUNT_TIND, ref_block,
249 ref_offset, ctx->priv_data);
252 check_for_ro_violation_return(ctx, ret);
253 if (!*tind_block || (ret & BLOCK_ABORT)) {
254 ctx->bcount += ((unsigned long long) limit)*limit*limit;
257 if (*tind_block >= ext2fs_blocks_count(ctx->fs->super) ||
258 *tind_block < ctx->fs->super->s_first_data_block) {
259 ctx->errcode = EXT2_ET_BAD_TIND_BLOCK;
263 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *tind_block,
270 block_nr = (blk_t *) ctx->tind_buf;
272 if (ctx->flags & BLOCK_FLAG_APPEND) {
273 for (i = 0; i < limit; i++, block_nr++) {
274 flags = block_iterate_dind(block_nr,
278 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
279 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
282 offset += sizeof(blk_t);
285 for (i = 0; i < limit; i++, block_nr++) {
286 if (*block_nr == 0) {
287 ctx->bcount += limit*limit;
290 flags = block_iterate_dind(block_nr,
294 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
295 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
298 offset += sizeof(blk_t);
301 check_for_ro_violation_return(ctx, changed);
302 if (changed & BLOCK_CHANGED) {
303 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *tind_block,
306 ret |= BLOCK_ERROR | BLOCK_ABORT;
308 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
309 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
310 !(ret & BLOCK_ABORT)) {
312 ret |= (*ctx->func)(ctx->fs, &blk64,
313 BLOCK_COUNT_TIND, ref_block,
314 ref_offset, ctx->priv_data);
317 check_for_ro_violation_return(ctx, ret);
321 errcode_t ext2fs_block_iterate3(ext2_filsys fs,
325 int (*func)(ext2_filsys fs,
327 e2_blkcnt_t blockcnt,
335 struct ext2_inode inode;
337 struct block_context ctx;
341 EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS);
343 ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
348 * An inode with inline data has no blocks over which to
349 * iterate, so return an error code indicating this fact.
351 if (inode.i_flags & EXT4_INLINE_DATA_FL)
352 return EXT2_ET_INLINE_DATA_CANT_ITERATE;
355 * Check to see if we need to limit large files
357 if (flags & BLOCK_FLAG_NO_LARGE) {
358 if (!LINUX_S_ISDIR(inode.i_mode) &&
359 (inode.i_size_high != 0))
360 return EXT2_ET_FILE_TOO_BIG;
363 limit = fs->blocksize >> 2;
367 ctx.priv_data = priv_data;
371 ctx.ind_buf = block_buf;
373 retval = ext2fs_get_array(3, fs->blocksize, &ctx.ind_buf);
377 ctx.dind_buf = ctx.ind_buf + fs->blocksize;
378 ctx.tind_buf = ctx.dind_buf + fs->blocksize;
381 * Iterate over the HURD translator block (if present)
383 if ((fs->super->s_creator_os == EXT2_OS_HURD) &&
384 !(flags & BLOCK_FLAG_DATA_ONLY)) {
385 if (inode.osd1.hurd1.h_i_translator) {
386 blk64 = inode.osd1.hurd1.h_i_translator;
387 ret |= (*ctx.func)(fs, &blk64,
388 BLOCK_COUNT_TRANSLATOR,
390 inode.osd1.hurd1.h_i_translator = (blk_t) blk64;
391 if (ret & BLOCK_ABORT)
393 check_for_ro_violation_goto(&ctx, ret, abort_exit);
397 if (inode.i_flags & EXT4_EXTENTS_FL) {
398 ext2_extent_handle_t handle;
399 struct ext2fs_extent extent, next;
400 e2_blkcnt_t blockcnt = 0;
401 blk64_t blk, new_blk;
402 int op = EXT2_EXTENT_ROOT;
406 ctx.errcode = ext2fs_extent_open2(fs, ino, &inode, &handle);
411 if (op == EXT2_EXTENT_CURRENT)
414 ctx.errcode = ext2fs_extent_get(handle, op,
417 if (ctx.errcode != EXT2_ET_EXTENT_NO_NEXT)
420 if (!(flags & BLOCK_FLAG_APPEND))
424 r = (*ctx.func)(fs, &blk, blockcnt,
427 check_for_ro_violation_goto(&ctx, ret,
429 if (r & BLOCK_CHANGED) {
431 ext2fs_extent_set_bmap(handle,
432 (blk64_t) blockcnt++,
434 if (ctx.errcode || (ret & BLOCK_ABORT))
442 op = EXT2_EXTENT_NEXT;
444 if (!(extent.e_flags & EXT2_EXTENT_FLAGS_LEAF)) {
445 if (ctx.flags & BLOCK_FLAG_DATA_ONLY)
447 if ((!(extent.e_flags &
448 EXT2_EXTENT_FLAGS_SECOND_VISIT) &&
449 !(ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE)) ||
451 EXT2_EXTENT_FLAGS_SECOND_VISIT) &&
452 (ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE))) {
453 ret |= (*ctx.func)(fs, &blk,
454 -1, 0, 0, priv_data);
455 if (ret & BLOCK_CHANGED) {
458 ext2fs_extent_replace(handle, 0, &extent);
462 if (ret & BLOCK_ABORT)
468 if (extent.e_flags & EXT2_EXTENT_FLAGS_UNINIT)
469 uninit = EXT2_EXTENT_SET_BMAP_UNINIT;
472 * Get the next extent before we start messing
473 * with the current extent
475 retval = ext2fs_extent_get(handle, op, &next);
478 printf("lblk %llu pblk %llu len %d blockcnt %llu\n",
479 extent.e_lblk, extent.e_pblk,
480 extent.e_len, blockcnt);
482 if (extent.e_lblk + extent.e_len <= (blk64_t) blockcnt)
484 if (extent.e_lblk > (blk64_t) blockcnt)
485 blockcnt = extent.e_lblk;
486 j = blockcnt - extent.e_lblk;
488 for (blockcnt = extent.e_lblk, j = 0;
490 blk++, blockcnt++, j++) {
492 r = (*ctx.func)(fs, &new_blk, blockcnt,
495 check_for_ro_violation_goto(&ctx, ret,
497 if (r & BLOCK_CHANGED) {
499 ext2fs_extent_set_bmap(handle,
505 if (ret & BLOCK_ABORT)
510 op = EXT2_EXTENT_CURRENT;
515 ext2fs_extent_free(handle);
516 ret |= BLOCK_ERROR; /* ctx.errcode is always valid here */
521 * Iterate over normal data blocks
523 for (i = 0; i < EXT2_NDIR_BLOCKS ; i++, ctx.bcount++) {
524 if (inode.i_block[i] || (flags & BLOCK_FLAG_APPEND)) {
525 blk64 = inode.i_block[i];
526 ret |= (*ctx.func)(fs, &blk64, ctx.bcount, 0, i,
528 inode.i_block[i] = (blk_t) blk64;
529 if (ret & BLOCK_ABORT)
533 check_for_ro_violation_goto(&ctx, ret, abort_exit);
534 if (inode.i_block[EXT2_IND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
535 ret |= block_iterate_ind(&inode.i_block[EXT2_IND_BLOCK],
536 0, EXT2_IND_BLOCK, &ctx);
537 if (ret & BLOCK_ABORT)
541 if (inode.i_block[EXT2_DIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
542 ret |= block_iterate_dind(&inode.i_block[EXT2_DIND_BLOCK],
543 0, EXT2_DIND_BLOCK, &ctx);
544 if (ret & BLOCK_ABORT)
547 ctx.bcount += limit * limit;
548 if (inode.i_block[EXT2_TIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
549 ret |= block_iterate_tind(&inode.i_block[EXT2_TIND_BLOCK],
550 0, EXT2_TIND_BLOCK, &ctx);
551 if (ret & BLOCK_ABORT)
556 if (ret & BLOCK_CHANGED) {
557 retval = ext2fs_write_inode(fs, ino, &inode);
560 ctx.errcode = retval;
565 ext2fs_free_mem(&ctx.ind_buf);
567 return (ret & BLOCK_ERROR) ? ctx.errcode : 0;
571 * Emulate the old ext2fs_block_iterate function!
575 int (*func)(ext2_filsys fs,
577 e2_blkcnt_t blockcnt,
584 static int xlate64_func(ext2_filsys fs, blk64_t *blocknr,
585 e2_blkcnt_t blockcnt, blk64_t ref_blk,
586 int ref_offset, void *priv_data)
588 struct xlate64 *xl = (struct xlate64 *) priv_data;
590 blk_t block32 = *blocknr;
592 ret = (*xl->func)(fs, &block32, blockcnt, (blk_t) ref_blk, ref_offset,
598 errcode_t ext2fs_block_iterate2(ext2_filsys fs,
602 int (*func)(ext2_filsys fs,
604 e2_blkcnt_t blockcnt,
612 xl.real_private = priv_data;
615 return ext2fs_block_iterate3(fs, ino, flags, block_buf,
621 int (*func)(ext2_filsys fs,
631 static int xlate_func(ext2_filsys fs, blk_t *blocknr, e2_blkcnt_t blockcnt,
632 blk_t ref_block EXT2FS_ATTR((unused)),
633 int ref_offset EXT2FS_ATTR((unused)),
636 struct xlate *xl = (struct xlate *) priv_data;
638 return (*xl->func)(fs, blocknr, (int) blockcnt, xl->real_private);
641 errcode_t ext2fs_block_iterate(ext2_filsys fs,
645 int (*func)(ext2_filsys fs,
653 xl.real_private = priv_data;
656 return ext2fs_block_iterate2(fs, ino, BLOCK_FLAG_NO_LARGE | flags,
657 block_buf, xlate_func, &xl);