2 * block.c --- iterate over all blocks in an inode
4 * Copyright (C) 1993, 1994, 1995, 1996 Theodore Ts'o.
7 * This file may be redistributed under the terms of the GNU Public
21 struct block_context {
23 int (*func)(ext2_filsys fs,
39 #define check_for_ro_violation_return(ctx, ret) \
41 if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) && \
42 ((ret) & BLOCK_CHANGED)) { \
43 (ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE; \
44 ret |= BLOCK_ABORT | BLOCK_ERROR; \
49 #define check_for_ro_violation_goto(ctx, ret, label) \
51 if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) && \
52 ((ret) & BLOCK_CHANGED)) { \
53 (ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE; \
54 ret |= BLOCK_ABORT | BLOCK_ERROR; \
59 static int block_iterate_ind(blk_t *ind_block, blk_t ref_block,
60 int ref_offset, struct block_context *ctx)
62 int ret = 0, changed = 0;
63 int i, flags, limit, offset;
67 limit = ctx->fs->blocksize >> 2;
68 if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
69 !(ctx->flags & BLOCK_FLAG_DATA_ONLY)) {
71 ret = (*ctx->func)(ctx->fs, &blk64,
72 BLOCK_COUNT_IND, ref_block,
73 ref_offset, ctx->priv_data);
76 check_for_ro_violation_return(ctx, ret);
77 if (!*ind_block || (ret & BLOCK_ABORT)) {
81 if (*ind_block >= ctx->fs->super->s_blocks_count ||
82 *ind_block < ctx->fs->super->s_first_data_block) {
83 ctx->errcode = EXT2_ET_BAD_IND_BLOCK;
87 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *ind_block,
94 block_nr = (blk_t *) ctx->ind_buf;
96 if (ctx->flags & BLOCK_FLAG_APPEND) {
97 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
99 flags = (*ctx->func)(ctx->fs, &blk64, ctx->bcount,
104 if (flags & BLOCK_ABORT) {
108 offset += sizeof(blk_t);
111 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
115 flags = (*ctx->func)(ctx->fs, &blk64, ctx->bcount,
120 if (flags & BLOCK_ABORT) {
124 offset += sizeof(blk_t);
127 check_for_ro_violation_return(ctx, changed);
128 if (changed & BLOCK_CHANGED) {
129 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *ind_block,
132 ret |= BLOCK_ERROR | BLOCK_ABORT;
134 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
135 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
136 !(ret & BLOCK_ABORT)) {
138 ret |= (*ctx->func)(ctx->fs, &blk64,
139 BLOCK_COUNT_IND, ref_block,
140 ref_offset, ctx->priv_data);
143 check_for_ro_violation_return(ctx, ret);
147 static int block_iterate_dind(blk_t *dind_block, blk_t ref_block,
148 int ref_offset, struct block_context *ctx)
150 int ret = 0, changed = 0;
151 int i, flags, limit, offset;
155 limit = ctx->fs->blocksize >> 2;
156 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
157 BLOCK_FLAG_DATA_ONLY))) {
159 ret = (*ctx->func)(ctx->fs, &blk64,
160 BLOCK_COUNT_DIND, ref_block,
161 ref_offset, ctx->priv_data);
164 check_for_ro_violation_return(ctx, ret);
165 if (!*dind_block || (ret & BLOCK_ABORT)) {
166 ctx->bcount += limit*limit;
169 if (*dind_block >= ctx->fs->super->s_blocks_count ||
170 *dind_block < ctx->fs->super->s_first_data_block) {
171 ctx->errcode = EXT2_ET_BAD_DIND_BLOCK;
175 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *dind_block,
182 block_nr = (blk_t *) ctx->dind_buf;
184 if (ctx->flags & BLOCK_FLAG_APPEND) {
185 for (i = 0; i < limit; i++, block_nr++) {
186 flags = block_iterate_ind(block_nr,
190 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
191 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
194 offset += sizeof(blk_t);
197 for (i = 0; i < limit; i++, block_nr++) {
198 if (*block_nr == 0) {
199 ctx->bcount += limit;
202 flags = block_iterate_ind(block_nr,
206 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
207 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
210 offset += sizeof(blk_t);
213 check_for_ro_violation_return(ctx, changed);
214 if (changed & BLOCK_CHANGED) {
215 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *dind_block,
218 ret |= BLOCK_ERROR | BLOCK_ABORT;
220 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
221 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
222 !(ret & BLOCK_ABORT)) {
224 ret |= (*ctx->func)(ctx->fs, &blk64,
225 BLOCK_COUNT_DIND, ref_block,
226 ref_offset, ctx->priv_data);
229 check_for_ro_violation_return(ctx, ret);
233 static int block_iterate_tind(blk_t *tind_block, blk_t ref_block,
234 int ref_offset, struct block_context *ctx)
236 int ret = 0, changed = 0;
237 int i, flags, limit, offset;
241 limit = ctx->fs->blocksize >> 2;
242 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
243 BLOCK_FLAG_DATA_ONLY))) {
245 ret = (*ctx->func)(ctx->fs, &blk64,
246 BLOCK_COUNT_TIND, ref_block,
247 ref_offset, ctx->priv_data);
250 check_for_ro_violation_return(ctx, ret);
251 if (!*tind_block || (ret & BLOCK_ABORT)) {
252 ctx->bcount += limit*limit*limit;
255 if (*tind_block >= ctx->fs->super->s_blocks_count ||
256 *tind_block < ctx->fs->super->s_first_data_block) {
257 ctx->errcode = EXT2_ET_BAD_TIND_BLOCK;
261 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *tind_block,
268 block_nr = (blk_t *) ctx->tind_buf;
270 if (ctx->flags & BLOCK_FLAG_APPEND) {
271 for (i = 0; i < limit; i++, block_nr++) {
272 flags = block_iterate_dind(block_nr,
276 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
277 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
280 offset += sizeof(blk_t);
283 for (i = 0; i < limit; i++, block_nr++) {
284 if (*block_nr == 0) {
285 ctx->bcount += limit*limit;
288 flags = block_iterate_dind(block_nr,
292 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
293 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
296 offset += sizeof(blk_t);
299 check_for_ro_violation_return(ctx, changed);
300 if (changed & BLOCK_CHANGED) {
301 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *tind_block,
304 ret |= BLOCK_ERROR | BLOCK_ABORT;
306 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
307 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
308 !(ret & BLOCK_ABORT)) {
310 ret |= (*ctx->func)(ctx->fs, &blk64,
311 BLOCK_COUNT_TIND, ref_block,
312 ref_offset, ctx->priv_data);
315 check_for_ro_violation_return(ctx, ret);
319 errcode_t ext2fs_block_iterate3(ext2_filsys fs,
323 int (*func)(ext2_filsys fs,
325 e2_blkcnt_t blockcnt,
333 struct ext2_inode inode;
335 struct block_context ctx;
339 EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS);
341 ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
346 * Check to see if we need to limit large files
348 if (flags & BLOCK_FLAG_NO_LARGE) {
349 if (!LINUX_S_ISDIR(inode.i_mode) &&
350 (inode.i_size_high != 0))
351 return EXT2_ET_FILE_TOO_BIG;
354 limit = fs->blocksize >> 2;
358 ctx.priv_data = priv_data;
362 ctx.ind_buf = block_buf;
364 retval = ext2fs_get_array(3, fs->blocksize, &ctx.ind_buf);
368 ctx.dind_buf = ctx.ind_buf + fs->blocksize;
369 ctx.tind_buf = ctx.dind_buf + fs->blocksize;
372 * Iterate over the HURD translator block (if present)
374 if ((fs->super->s_creator_os == EXT2_OS_HURD) &&
375 !(flags & BLOCK_FLAG_DATA_ONLY)) {
376 if (inode.osd1.hurd1.h_i_translator) {
377 blk64 = inode.osd1.hurd1.h_i_translator;
378 ret |= (*ctx.func)(fs, &blk64,
379 BLOCK_COUNT_TRANSLATOR,
381 inode.osd1.hurd1.h_i_translator = (blk_t) blk64;
382 if (ret & BLOCK_ABORT)
384 check_for_ro_violation_goto(&ctx, ret, abort_exit);
388 if (inode.i_flags & EXT4_EXTENTS_FL) {
389 ext2_extent_handle_t handle;
390 struct ext2fs_extent extent;
391 e2_blkcnt_t blockcnt = 0;
392 blk64_t blk, new_blk;
393 int op = EXT2_EXTENT_ROOT;
397 ctx.errcode = ext2fs_extent_open2(fs, ino, &inode, &handle);
402 ctx.errcode = ext2fs_extent_get(handle, op, &extent);
404 if (ctx.errcode != EXT2_ET_EXTENT_NO_NEXT)
407 if (!(flags & BLOCK_FLAG_APPEND))
410 r = (*ctx.func)(fs, &blk, blockcnt,
413 check_for_ro_violation_goto(&ctx, ret,
415 if (r & BLOCK_CHANGED) {
417 ext2fs_extent_set_bmap(handle,
418 (blk64_t) blockcnt++,
420 if (ctx.errcode || (ret & BLOCK_ABORT))
427 op = EXT2_EXTENT_NEXT;
429 if (!(extent.e_flags & EXT2_EXTENT_FLAGS_LEAF)) {
430 if (ctx.flags & BLOCK_FLAG_DATA_ONLY)
432 if ((!(extent.e_flags &
433 EXT2_EXTENT_FLAGS_SECOND_VISIT) &&
434 !(ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE)) ||
436 EXT2_EXTENT_FLAGS_SECOND_VISIT) &&
437 (ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE))) {
438 ret |= (*ctx.func)(fs, &blk,
439 -1, 0, 0, priv_data);
440 if (ret & BLOCK_CHANGED) {
443 ext2fs_extent_replace(handle, 0, &extent);
451 if (extent.e_flags & EXT2_EXTENT_FLAGS_UNINIT)
452 uninit = EXT2_EXTENT_SET_BMAP_UNINIT;
453 for (blockcnt = extent.e_lblk, j = 0;
455 blk++, blockcnt++, j++) {
457 r = (*ctx.func)(fs, &new_blk, blockcnt,
460 check_for_ro_violation_goto(&ctx, ret,
462 if (r & BLOCK_CHANGED) {
464 ext2fs_extent_set_bmap(handle,
470 if (ret & BLOCK_ABORT)
476 ext2fs_extent_free(handle);
477 ret |= BLOCK_ERROR | BLOCK_ABORT;
482 * Iterate over normal data blocks
484 for (i = 0; i < EXT2_NDIR_BLOCKS ; i++, ctx.bcount++) {
485 if (inode.i_block[i] || (flags & BLOCK_FLAG_APPEND)) {
486 blk64 = inode.i_block[i];
487 ret |= (*ctx.func)(fs, &blk64, ctx.bcount, 0, i,
489 inode.i_block[i] = (blk_t) blk64;
490 if (ret & BLOCK_ABORT)
494 check_for_ro_violation_goto(&ctx, ret, abort_exit);
495 if (inode.i_block[EXT2_IND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
496 ret |= block_iterate_ind(&inode.i_block[EXT2_IND_BLOCK],
497 0, EXT2_IND_BLOCK, &ctx);
498 if (ret & BLOCK_ABORT)
502 if (inode.i_block[EXT2_DIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
503 ret |= block_iterate_dind(&inode.i_block[EXT2_DIND_BLOCK],
504 0, EXT2_DIND_BLOCK, &ctx);
505 if (ret & BLOCK_ABORT)
508 ctx.bcount += limit * limit;
509 if (inode.i_block[EXT2_TIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
510 ret |= block_iterate_tind(&inode.i_block[EXT2_TIND_BLOCK],
511 0, EXT2_TIND_BLOCK, &ctx);
512 if (ret & BLOCK_ABORT)
517 if (ret & BLOCK_CHANGED) {
518 retval = ext2fs_write_inode(fs, ino, &inode);
521 ctx.errcode = retval;
526 ext2fs_free_mem(&ctx.ind_buf);
528 return (ret & BLOCK_ERROR) ? ctx.errcode : 0;
532 * Emulate the old ext2fs_block_iterate function!
536 int (*func)(ext2_filsys fs,
538 e2_blkcnt_t blockcnt,
545 static int xlate64_func(ext2_filsys fs, blk64_t *blocknr,
546 e2_blkcnt_t blockcnt, blk64_t ref_blk,
547 int ref_offset, void *priv_data)
549 struct xlate64 *xl = (struct xlate64 *) priv_data;
551 blk_t block32 = *blocknr;
553 ret = (*xl->func)(fs, &block32, blockcnt, (blk_t) ref_blk, ref_offset,
559 errcode_t ext2fs_block_iterate2(ext2_filsys fs,
563 int (*func)(ext2_filsys fs,
565 e2_blkcnt_t blockcnt,
573 xl.real_private = priv_data;
576 return ext2fs_block_iterate3(fs, ino, flags, block_buf,
582 int (*func)(ext2_filsys fs,
592 static int xlate_func(ext2_filsys fs, blk_t *blocknr, e2_blkcnt_t blockcnt,
593 blk_t ref_block EXT2FS_ATTR((unused)),
594 int ref_offset EXT2FS_ATTR((unused)),
597 struct xlate *xl = (struct xlate *) priv_data;
599 return (*xl->func)(fs, blocknr, (int) blockcnt, xl->real_private);
602 errcode_t ext2fs_block_iterate(ext2_filsys fs,
606 int (*func)(ext2_filsys fs,
614 xl.real_private = priv_data;
617 return ext2fs_block_iterate2(fs, ino, BLOCK_FLAG_NO_LARGE | flags,
618 block_buf, xlate_func, &xl);