2 * block.c --- iterate over all blocks in an inode
4 * Copyright (C) 1993, 1994, 1995, 1996 Theodore Ts'o.
7 * This file may be redistributed under the terms of the GNU Public
21 struct block_context {
23 int (*func)(ext2_filsys fs,
39 #define check_for_ro_violation_return(ctx, ret) \
41 if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) && \
42 ((ret) & BLOCK_CHANGED)) { \
43 (ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE; \
44 ret |= BLOCK_ABORT | BLOCK_ERROR; \
49 #define check_for_ro_violation_goto(ctx, ret, label) \
51 if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) && \
52 ((ret) & BLOCK_CHANGED)) { \
53 (ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE; \
54 ret |= BLOCK_ABORT | BLOCK_ERROR; \
59 static int block_iterate_ind(blk_t *ind_block, blk_t ref_block,
60 int ref_offset, struct block_context *ctx)
62 int ret = 0, changed = 0;
63 int i, flags, limit, offset;
66 limit = ctx->fs->blocksize >> 2;
67 if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
68 !(ctx->flags & BLOCK_FLAG_DATA_ONLY))
69 ret = (*ctx->func)(ctx->fs, ind_block,
70 BLOCK_COUNT_IND, ref_block,
71 ref_offset, ctx->priv_data);
72 check_for_ro_violation_return(ctx, ret);
73 if (!*ind_block || (ret & BLOCK_ABORT)) {
77 if (*ind_block >= ctx->fs->super->s_blocks_count ||
78 *ind_block < ctx->fs->super->s_first_data_block) {
79 ctx->errcode = EXT2_ET_BAD_IND_BLOCK;
83 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *ind_block,
90 block_nr = (blk_t *) ctx->ind_buf;
92 if (ctx->flags & BLOCK_FLAG_APPEND) {
93 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
94 flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
98 if (flags & BLOCK_ABORT) {
102 offset += sizeof(blk_t);
105 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
108 flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
112 if (flags & BLOCK_ABORT) {
116 offset += sizeof(blk_t);
119 check_for_ro_violation_return(ctx, changed);
120 if (changed & BLOCK_CHANGED) {
121 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *ind_block,
124 ret |= BLOCK_ERROR | BLOCK_ABORT;
126 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
127 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
128 !(ret & BLOCK_ABORT))
129 ret |= (*ctx->func)(ctx->fs, ind_block,
130 BLOCK_COUNT_IND, ref_block,
131 ref_offset, ctx->priv_data);
132 check_for_ro_violation_return(ctx, ret);
136 static int block_iterate_dind(blk_t *dind_block, blk_t ref_block,
137 int ref_offset, struct block_context *ctx)
139 int ret = 0, changed = 0;
140 int i, flags, limit, offset;
143 limit = ctx->fs->blocksize >> 2;
144 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
145 BLOCK_FLAG_DATA_ONLY)))
146 ret = (*ctx->func)(ctx->fs, dind_block,
147 BLOCK_COUNT_DIND, ref_block,
148 ref_offset, ctx->priv_data);
149 check_for_ro_violation_return(ctx, ret);
150 if (!*dind_block || (ret & BLOCK_ABORT)) {
151 ctx->bcount += limit*limit;
154 if (*dind_block >= ctx->fs->super->s_blocks_count ||
155 *dind_block < ctx->fs->super->s_first_data_block) {
156 ctx->errcode = EXT2_ET_BAD_DIND_BLOCK;
160 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *dind_block,
167 block_nr = (blk_t *) ctx->dind_buf;
169 if (ctx->flags & BLOCK_FLAG_APPEND) {
170 for (i = 0; i < limit; i++, block_nr++) {
171 flags = block_iterate_ind(block_nr,
175 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
176 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
179 offset += sizeof(blk_t);
182 for (i = 0; i < limit; i++, block_nr++) {
183 if (*block_nr == 0) {
184 ctx->bcount += limit;
187 flags = block_iterate_ind(block_nr,
191 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
192 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
195 offset += sizeof(blk_t);
198 check_for_ro_violation_return(ctx, changed);
199 if (changed & BLOCK_CHANGED) {
200 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *dind_block,
203 ret |= BLOCK_ERROR | BLOCK_ABORT;
205 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
206 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
207 !(ret & BLOCK_ABORT))
208 ret |= (*ctx->func)(ctx->fs, dind_block,
209 BLOCK_COUNT_DIND, ref_block,
210 ref_offset, ctx->priv_data);
211 check_for_ro_violation_return(ctx, ret);
215 static int block_iterate_tind(blk_t *tind_block, blk_t ref_block,
216 int ref_offset, struct block_context *ctx)
218 int ret = 0, changed = 0;
219 int i, flags, limit, offset;
222 limit = ctx->fs->blocksize >> 2;
223 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
224 BLOCK_FLAG_DATA_ONLY)))
225 ret = (*ctx->func)(ctx->fs, tind_block,
226 BLOCK_COUNT_TIND, ref_block,
227 ref_offset, ctx->priv_data);
228 check_for_ro_violation_return(ctx, ret);
229 if (!*tind_block || (ret & BLOCK_ABORT)) {
230 ctx->bcount += limit*limit*limit;
233 if (*tind_block >= ctx->fs->super->s_blocks_count ||
234 *tind_block < ctx->fs->super->s_first_data_block) {
235 ctx->errcode = EXT2_ET_BAD_TIND_BLOCK;
239 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *tind_block,
246 block_nr = (blk_t *) ctx->tind_buf;
248 if (ctx->flags & BLOCK_FLAG_APPEND) {
249 for (i = 0; i < limit; i++, block_nr++) {
250 flags = block_iterate_dind(block_nr,
254 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
255 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
258 offset += sizeof(blk_t);
261 for (i = 0; i < limit; i++, block_nr++) {
262 if (*block_nr == 0) {
263 ctx->bcount += limit*limit;
266 flags = block_iterate_dind(block_nr,
270 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
271 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
274 offset += sizeof(blk_t);
277 check_for_ro_violation_return(ctx, changed);
278 if (changed & BLOCK_CHANGED) {
279 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *tind_block,
282 ret |= BLOCK_ERROR | BLOCK_ABORT;
284 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
285 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
286 !(ret & BLOCK_ABORT))
287 ret |= (*ctx->func)(ctx->fs, tind_block,
288 BLOCK_COUNT_TIND, ref_block,
289 ref_offset, ctx->priv_data);
290 check_for_ro_violation_return(ctx, ret);
294 errcode_t ext2fs_block_iterate2(ext2_filsys fs,
298 int (*func)(ext2_filsys fs,
300 e2_blkcnt_t blockcnt,
308 struct ext2_inode inode;
310 struct block_context ctx;
313 EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS);
315 ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
320 * Check to see if we need to limit large files
322 if (flags & BLOCK_FLAG_NO_LARGE) {
323 if (!LINUX_S_ISDIR(inode.i_mode) &&
324 (inode.i_size_high != 0))
325 return EXT2_ET_FILE_TOO_BIG;
328 limit = fs->blocksize >> 2;
332 ctx.priv_data = priv_data;
336 ctx.ind_buf = block_buf;
338 retval = ext2fs_get_array(3, fs->blocksize, &ctx.ind_buf);
342 ctx.dind_buf = ctx.ind_buf + fs->blocksize;
343 ctx.tind_buf = ctx.dind_buf + fs->blocksize;
346 * Iterate over the HURD translator block (if present)
348 if ((fs->super->s_creator_os == EXT2_OS_HURD) &&
349 !(flags & BLOCK_FLAG_DATA_ONLY)) {
350 if (inode.osd1.hurd1.h_i_translator) {
351 ret |= (*ctx.func)(fs,
352 &inode.osd1.hurd1.h_i_translator,
353 BLOCK_COUNT_TRANSLATOR,
355 if (ret & BLOCK_ABORT)
357 check_for_ro_violation_goto(&ctx, ret, abort_exit);
361 if (inode.i_flags & EXT4_EXTENTS_FL) {
362 ext2_extent_handle_t handle;
363 struct ext2fs_extent extent;
364 e2_blkcnt_t blockcnt = 0;
366 int op = EXT2_EXTENT_ROOT;
370 ctx.errcode = ext2fs_extent_open2(fs, ino, &inode, &handle);
375 ctx.errcode = ext2fs_extent_get(handle, op, &extent);
377 if (ctx.errcode != EXT2_ET_EXTENT_NO_NEXT)
380 if (!(flags & BLOCK_FLAG_APPEND))
383 r = (*ctx.func)(fs, &blk, blockcnt,
386 check_for_ro_violation_goto(&ctx, ret,
388 if (r & BLOCK_CHANGED) {
390 ext2fs_extent_set_bmap(handle,
391 (blk64_t) blockcnt++,
393 if (ctx.errcode || (ret & BLOCK_ABORT))
400 op = EXT2_EXTENT_NEXT;
402 if (!(extent.e_flags & EXT2_EXTENT_FLAGS_LEAF)) {
403 if (ctx.flags & BLOCK_FLAG_DATA_ONLY)
405 if ((!(extent.e_flags &
406 EXT2_EXTENT_FLAGS_SECOND_VISIT) &&
407 !(ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE)) ||
409 EXT2_EXTENT_FLAGS_SECOND_VISIT) &&
410 (ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE))) {
411 ret |= (*ctx.func)(fs, &blk,
412 -1, 0, 0, priv_data);
413 if (ret & BLOCK_CHANGED) {
416 ext2fs_extent_replace(handle, 0, &extent);
424 if (extent.e_flags & EXT2_EXTENT_FLAGS_UNINIT)
425 uninit = EXT2_EXTENT_SET_BMAP_UNINIT;
426 for (blockcnt = extent.e_lblk, j = 0;
428 blk++, blockcnt++, j++) {
430 r = (*ctx.func)(fs, &new_blk, blockcnt,
433 check_for_ro_violation_goto(&ctx, ret,
435 if (r & BLOCK_CHANGED) {
437 ext2fs_extent_set_bmap(handle,
444 if (ret & BLOCK_ABORT)
450 ext2fs_extent_free(handle);
451 ret |= BLOCK_ERROR | BLOCK_ABORT;
456 * Iterate over normal data blocks
458 for (i = 0; i < EXT2_NDIR_BLOCKS ; i++, ctx.bcount++) {
459 if (inode.i_block[i] || (flags & BLOCK_FLAG_APPEND)) {
460 ret |= (*ctx.func)(fs, &inode.i_block[i],
461 ctx.bcount, 0, i, priv_data);
462 if (ret & BLOCK_ABORT)
466 check_for_ro_violation_goto(&ctx, ret, abort_exit);
467 if (inode.i_block[EXT2_IND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
468 ret |= block_iterate_ind(&inode.i_block[EXT2_IND_BLOCK],
469 0, EXT2_IND_BLOCK, &ctx);
470 if (ret & BLOCK_ABORT)
474 if (inode.i_block[EXT2_DIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
475 ret |= block_iterate_dind(&inode.i_block[EXT2_DIND_BLOCK],
476 0, EXT2_DIND_BLOCK, &ctx);
477 if (ret & BLOCK_ABORT)
480 ctx.bcount += limit * limit;
481 if (inode.i_block[EXT2_TIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
482 ret |= block_iterate_tind(&inode.i_block[EXT2_TIND_BLOCK],
483 0, EXT2_TIND_BLOCK, &ctx);
484 if (ret & BLOCK_ABORT)
489 if (ret & BLOCK_CHANGED) {
490 retval = ext2fs_write_inode(fs, ino, &inode);
493 ctx.errcode = retval;
498 ext2fs_free_mem(&ctx.ind_buf);
500 return (ret & BLOCK_ERROR) ? ctx.errcode : 0;
504 * Emulate the old ext2fs_block_iterate function!
508 int (*func)(ext2_filsys fs,
518 static int xlate_func(ext2_filsys fs, blk_t *blocknr, e2_blkcnt_t blockcnt,
519 blk_t ref_block EXT2FS_ATTR((unused)),
520 int ref_offset EXT2FS_ATTR((unused)),
523 struct xlate *xl = (struct xlate *) priv_data;
525 return (*xl->func)(fs, blocknr, (int) blockcnt, xl->real_private);
528 errcode_t ext2fs_block_iterate(ext2_filsys fs,
532 int (*func)(ext2_filsys fs,
540 xl.real_private = priv_data;
543 return ext2fs_block_iterate2(fs, ino, BLOCK_FLAG_NO_LARGE | flags,
544 block_buf, xlate_func, &xl);