2 * block.c --- iterate over all blocks in an inode
4 * Copyright (C) 1993, 1994, 1995, 1996 Theodore Ts'o.
7 * This file may be redistributed under the terms of the GNU Library
8 * General Public License, version 2.
21 struct block_context {
23 int (*func)(ext2_filsys fs,
39 #define check_for_ro_violation_return(ctx, ret) \
41 if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) && \
42 ((ret) & BLOCK_CHANGED)) { \
43 (ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE; \
44 ret |= BLOCK_ABORT | BLOCK_ERROR; \
49 #define check_for_ro_violation_goto(ctx, ret, label) \
51 if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) && \
52 ((ret) & BLOCK_CHANGED)) { \
53 (ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE; \
54 ret |= BLOCK_ABORT | BLOCK_ERROR; \
59 static int block_iterate_ind(blk_t *ind_block, blk_t ref_block,
60 int ref_offset, struct block_context *ctx)
62 int ret = 0, changed = 0;
63 int i, flags, limit, offset;
67 limit = ctx->fs->blocksize >> 2;
68 if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
69 !(ctx->flags & BLOCK_FLAG_DATA_ONLY)) {
71 ret = (*ctx->func)(ctx->fs, &blk64,
72 BLOCK_COUNT_IND, ref_block,
73 ref_offset, ctx->priv_data);
76 check_for_ro_violation_return(ctx, ret);
77 if (!*ind_block || (ret & BLOCK_ABORT)) {
81 if (*ind_block >= ext2fs_blocks_count(ctx->fs->super) ||
82 *ind_block < ctx->fs->super->s_first_data_block) {
83 ctx->errcode = EXT2_ET_BAD_IND_BLOCK;
87 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *ind_block,
94 block_nr = (blk_t *) ctx->ind_buf;
96 if (ctx->flags & BLOCK_FLAG_APPEND) {
97 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
99 flags = (*ctx->func)(ctx->fs, &blk64, ctx->bcount,
104 if (flags & BLOCK_ABORT) {
108 offset += sizeof(blk_t);
111 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
115 flags = (*ctx->func)(ctx->fs, &blk64, ctx->bcount,
120 if (flags & BLOCK_ABORT) {
125 offset += sizeof(blk_t);
128 check_for_ro_violation_return(ctx, changed);
129 if (changed & BLOCK_CHANGED) {
130 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *ind_block,
133 ret |= BLOCK_ERROR | BLOCK_ABORT;
135 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
136 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
137 !(ret & BLOCK_ABORT)) {
139 ret |= (*ctx->func)(ctx->fs, &blk64,
140 BLOCK_COUNT_IND, ref_block,
141 ref_offset, ctx->priv_data);
144 check_for_ro_violation_return(ctx, ret);
148 static int block_iterate_dind(blk_t *dind_block, blk_t ref_block,
149 int ref_offset, struct block_context *ctx)
151 int ret = 0, changed = 0;
152 int i, flags, limit, offset;
156 limit = ctx->fs->blocksize >> 2;
157 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
158 BLOCK_FLAG_DATA_ONLY))) {
160 ret = (*ctx->func)(ctx->fs, &blk64,
161 BLOCK_COUNT_DIND, ref_block,
162 ref_offset, ctx->priv_data);
165 check_for_ro_violation_return(ctx, ret);
166 if (!*dind_block || (ret & BLOCK_ABORT)) {
167 ctx->bcount += limit*limit;
170 if (*dind_block >= ext2fs_blocks_count(ctx->fs->super) ||
171 *dind_block < ctx->fs->super->s_first_data_block) {
172 ctx->errcode = EXT2_ET_BAD_DIND_BLOCK;
176 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *dind_block,
183 block_nr = (blk_t *) ctx->dind_buf;
185 if (ctx->flags & BLOCK_FLAG_APPEND) {
186 for (i = 0; i < limit; i++, block_nr++) {
187 flags = block_iterate_ind(block_nr,
191 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
192 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
195 offset += sizeof(blk_t);
198 for (i = 0; i < limit; i++, block_nr++) {
199 if (*block_nr == 0) {
200 ctx->bcount += limit;
203 flags = block_iterate_ind(block_nr,
207 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
208 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
211 offset += sizeof(blk_t);
214 check_for_ro_violation_return(ctx, changed);
215 if (changed & BLOCK_CHANGED) {
216 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *dind_block,
219 ret |= BLOCK_ERROR | BLOCK_ABORT;
221 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
222 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
223 !(ret & BLOCK_ABORT)) {
225 ret |= (*ctx->func)(ctx->fs, &blk64,
226 BLOCK_COUNT_DIND, ref_block,
227 ref_offset, ctx->priv_data);
230 check_for_ro_violation_return(ctx, ret);
234 static int block_iterate_tind(blk_t *tind_block, blk_t ref_block,
235 int ref_offset, struct block_context *ctx)
237 int ret = 0, changed = 0;
238 int i, flags, limit, offset;
242 limit = ctx->fs->blocksize >> 2;
243 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
244 BLOCK_FLAG_DATA_ONLY))) {
246 ret = (*ctx->func)(ctx->fs, &blk64,
247 BLOCK_COUNT_TIND, ref_block,
248 ref_offset, ctx->priv_data);
251 check_for_ro_violation_return(ctx, ret);
252 if (!*tind_block || (ret & BLOCK_ABORT)) {
253 ctx->bcount += limit*limit*limit;
256 if (*tind_block >= ext2fs_blocks_count(ctx->fs->super) ||
257 *tind_block < ctx->fs->super->s_first_data_block) {
258 ctx->errcode = EXT2_ET_BAD_TIND_BLOCK;
262 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *tind_block,
269 block_nr = (blk_t *) ctx->tind_buf;
271 if (ctx->flags & BLOCK_FLAG_APPEND) {
272 for (i = 0; i < limit; i++, block_nr++) {
273 flags = block_iterate_dind(block_nr,
277 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
278 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
281 offset += sizeof(blk_t);
284 for (i = 0; i < limit; i++, block_nr++) {
285 if (*block_nr == 0) {
286 ctx->bcount += limit*limit;
289 flags = block_iterate_dind(block_nr,
293 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
294 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
297 offset += sizeof(blk_t);
300 check_for_ro_violation_return(ctx, changed);
301 if (changed & BLOCK_CHANGED) {
302 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *tind_block,
305 ret |= BLOCK_ERROR | BLOCK_ABORT;
307 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
308 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
309 !(ret & BLOCK_ABORT)) {
311 ret |= (*ctx->func)(ctx->fs, &blk64,
312 BLOCK_COUNT_TIND, ref_block,
313 ref_offset, ctx->priv_data);
316 check_for_ro_violation_return(ctx, ret);
320 errcode_t ext2fs_block_iterate3(ext2_filsys fs,
324 int (*func)(ext2_filsys fs,
326 e2_blkcnt_t blockcnt,
334 struct ext2_inode inode;
336 struct block_context ctx;
340 EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS);
342 ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
347 * Check to see if we need to limit large files
349 if (flags & BLOCK_FLAG_NO_LARGE) {
350 if (!LINUX_S_ISDIR(inode.i_mode) &&
351 (inode.i_size_high != 0))
352 return EXT2_ET_FILE_TOO_BIG;
355 limit = fs->blocksize >> 2;
359 ctx.priv_data = priv_data;
363 ctx.ind_buf = block_buf;
365 retval = ext2fs_get_array(3, fs->blocksize, &ctx.ind_buf);
369 ctx.dind_buf = ctx.ind_buf + fs->blocksize;
370 ctx.tind_buf = ctx.dind_buf + fs->blocksize;
373 * Iterate over the HURD translator block (if present)
375 if ((fs->super->s_creator_os == EXT2_OS_HURD) &&
376 !(flags & BLOCK_FLAG_DATA_ONLY)) {
377 if (inode.osd1.hurd1.h_i_translator) {
378 blk64 = inode.osd1.hurd1.h_i_translator;
379 ret |= (*ctx.func)(fs, &blk64,
380 BLOCK_COUNT_TRANSLATOR,
382 inode.osd1.hurd1.h_i_translator = (blk_t) blk64;
383 if (ret & BLOCK_ABORT)
385 check_for_ro_violation_goto(&ctx, ret, abort_exit);
389 if (inode.i_flags & EXT4_EXTENTS_FL) {
390 ext2_extent_handle_t handle;
391 struct ext2fs_extent extent;
392 e2_blkcnt_t blockcnt = 0;
393 blk64_t blk, new_blk;
394 int op = EXT2_EXTENT_ROOT;
398 ctx.errcode = ext2fs_extent_open2(fs, ino, &inode, &handle);
403 ctx.errcode = ext2fs_extent_get(handle, op, &extent);
405 if (ctx.errcode != EXT2_ET_EXTENT_NO_NEXT)
408 if (!(flags & BLOCK_FLAG_APPEND))
412 r = (*ctx.func)(fs, &blk, blockcnt,
415 check_for_ro_violation_goto(&ctx, ret,
417 if (r & BLOCK_CHANGED) {
419 ext2fs_extent_set_bmap(handle,
420 (blk64_t) blockcnt++,
422 if (ctx.errcode || (ret & BLOCK_ABORT))
430 op = EXT2_EXTENT_NEXT;
432 if (!(extent.e_flags & EXT2_EXTENT_FLAGS_LEAF)) {
433 if (ctx.flags & BLOCK_FLAG_DATA_ONLY)
435 if ((!(extent.e_flags &
436 EXT2_EXTENT_FLAGS_SECOND_VISIT) &&
437 !(ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE)) ||
439 EXT2_EXTENT_FLAGS_SECOND_VISIT) &&
440 (ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE))) {
441 ret |= (*ctx.func)(fs, &blk,
442 -1, 0, 0, priv_data);
443 if (ret & BLOCK_CHANGED) {
446 ext2fs_extent_replace(handle, 0, &extent);
454 if (extent.e_flags & EXT2_EXTENT_FLAGS_UNINIT)
455 uninit = EXT2_EXTENT_SET_BMAP_UNINIT;
456 for (blockcnt = extent.e_lblk, j = 0;
458 blk++, blockcnt++, j++) {
460 r = (*ctx.func)(fs, &new_blk, blockcnt,
463 check_for_ro_violation_goto(&ctx, ret,
465 if (r & BLOCK_CHANGED) {
467 ext2fs_extent_set_bmap(handle,
473 if (ret & BLOCK_ABORT)
479 ext2fs_extent_free(handle);
480 ret |= BLOCK_ERROR | BLOCK_ABORT;
485 * Iterate over normal data blocks
487 for (i = 0; i < EXT2_NDIR_BLOCKS ; i++, ctx.bcount++) {
488 if (inode.i_block[i] || (flags & BLOCK_FLAG_APPEND)) {
489 blk64 = inode.i_block[i];
490 ret |= (*ctx.func)(fs, &blk64, ctx.bcount, 0, i,
492 inode.i_block[i] = (blk_t) blk64;
493 if (ret & BLOCK_ABORT)
497 check_for_ro_violation_goto(&ctx, ret, abort_exit);
498 if (inode.i_block[EXT2_IND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
499 ret |= block_iterate_ind(&inode.i_block[EXT2_IND_BLOCK],
500 0, EXT2_IND_BLOCK, &ctx);
501 if (ret & BLOCK_ABORT)
505 if (inode.i_block[EXT2_DIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
506 ret |= block_iterate_dind(&inode.i_block[EXT2_DIND_BLOCK],
507 0, EXT2_DIND_BLOCK, &ctx);
508 if (ret & BLOCK_ABORT)
511 ctx.bcount += limit * limit;
512 if (inode.i_block[EXT2_TIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
513 ret |= block_iterate_tind(&inode.i_block[EXT2_TIND_BLOCK],
514 0, EXT2_TIND_BLOCK, &ctx);
515 if (ret & BLOCK_ABORT)
520 if (ret & BLOCK_CHANGED) {
521 retval = ext2fs_write_inode(fs, ino, &inode);
524 ctx.errcode = retval;
529 ext2fs_free_mem(&ctx.ind_buf);
531 return (ret & BLOCK_ERROR) ? ctx.errcode : 0;
535 * Emulate the old ext2fs_block_iterate function!
539 int (*func)(ext2_filsys fs,
541 e2_blkcnt_t blockcnt,
548 static int xlate64_func(ext2_filsys fs, blk64_t *blocknr,
549 e2_blkcnt_t blockcnt, blk64_t ref_blk,
550 int ref_offset, void *priv_data)
552 struct xlate64 *xl = (struct xlate64 *) priv_data;
554 blk_t block32 = *blocknr;
556 ret = (*xl->func)(fs, &block32, blockcnt, (blk_t) ref_blk, ref_offset,
562 errcode_t ext2fs_block_iterate2(ext2_filsys fs,
566 int (*func)(ext2_filsys fs,
568 e2_blkcnt_t blockcnt,
576 xl.real_private = priv_data;
579 return ext2fs_block_iterate3(fs, ino, flags, block_buf,
585 int (*func)(ext2_filsys fs,
595 static int xlate_func(ext2_filsys fs, blk_t *blocknr, e2_blkcnt_t blockcnt,
596 blk_t ref_block EXT2FS_ATTR((unused)),
597 int ref_offset EXT2FS_ATTR((unused)),
600 struct xlate *xl = (struct xlate *) priv_data;
602 return (*xl->func)(fs, blocknr, (int) blockcnt, xl->real_private);
605 errcode_t ext2fs_block_iterate(ext2_filsys fs,
609 int (*func)(ext2_filsys fs,
617 xl.real_private = priv_data;
620 return ext2fs_block_iterate2(fs, ino, BLOCK_FLAG_NO_LARGE | flags,
621 block_buf, xlate_func, &xl);