2 * block.c --- iterate over all blocks in an inode
4 * Copyright (C) 1993, 1994, 1995, 1996 Theodore Ts'o.
7 * This file may be redistributed under the terms of the GNU Public
21 struct block_context {
23 int (*func)(ext2_filsys fs,
39 static int block_iterate_ind(blk_t *ind_block, blk_t ref_block,
40 int ref_offset, struct block_context *ctx)
42 int ret = 0, changed = 0;
43 int i, flags, limit, offset;
46 limit = ctx->fs->blocksize >> 2;
47 if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
48 !(ctx->flags & BLOCK_FLAG_DATA_ONLY))
49 ret = (*ctx->func)(ctx->fs, ind_block,
50 BLOCK_COUNT_IND, ref_block,
51 ref_offset, ctx->priv_data);
52 if (!*ind_block || (ret & BLOCK_ABORT)) {
56 if (*ind_block >= ctx->fs->super->s_blocks_count ||
57 *ind_block < ctx->fs->super->s_first_data_block) {
58 ctx->errcode = EXT2_ET_BAD_IND_BLOCK;
62 if ((ctx->fs->flags & EXT2_FLAG_IMAGE_FILE) &&
63 (ctx->fs->io != ctx->fs->image_io)) {
65 memset(ctx->ind_buf, 0, ctx->fs->blocksize);
67 ctx->errcode = io_channel_read_blk(ctx->fs->io, *ind_block,
73 #ifdef EXT2FS_ENABLE_SWAPFS
74 if (ctx->fs->flags & (EXT2_FLAG_SWAP_BYTES |
75 EXT2_FLAG_SWAP_BYTES_READ)) {
76 block_nr = (blk_t *) ctx->ind_buf;
77 for (i = 0; i < limit; i++, block_nr++)
78 *block_nr = ext2fs_swab32(*block_nr);
81 block_nr = (blk_t *) ctx->ind_buf;
83 if (ctx->flags & BLOCK_FLAG_APPEND) {
84 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
85 flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
89 if (flags & BLOCK_ABORT) {
93 offset += sizeof(blk_t);
96 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
99 flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
103 if (flags & BLOCK_ABORT) {
107 offset += sizeof(blk_t);
110 if (!(ctx->fs->flags & EXT2_FLAG_IMAGE_FILE) &&
111 (changed & BLOCK_CHANGED)) {
112 #ifdef EXT2FS_ENABLE_SWAPFS
113 if (ctx->fs->flags & (EXT2_FLAG_SWAP_BYTES |
114 EXT2_FLAG_SWAP_BYTES_WRITE)) {
115 block_nr = (blk_t *) ctx->ind_buf;
116 for (i = 0; i < limit; i++, block_nr++)
117 *block_nr = ext2fs_swab32(*block_nr);
120 ctx->errcode = io_channel_write_blk(ctx->fs->io, *ind_block,
123 ret |= BLOCK_ERROR | BLOCK_ABORT;
125 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
126 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
127 !(ret & BLOCK_ABORT))
128 ret |= (*ctx->func)(ctx->fs, ind_block,
129 BLOCK_COUNT_IND, ref_block,
130 ref_offset, ctx->priv_data);
134 static int block_iterate_dind(blk_t *dind_block, blk_t ref_block,
135 int ref_offset, struct block_context *ctx)
137 int ret = 0, changed = 0;
138 int i, flags, limit, offset;
141 limit = ctx->fs->blocksize >> 2;
142 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
143 BLOCK_FLAG_DATA_ONLY)))
144 ret = (*ctx->func)(ctx->fs, dind_block,
145 BLOCK_COUNT_DIND, ref_block,
146 ref_offset, ctx->priv_data);
147 if (!*dind_block || (ret & BLOCK_ABORT)) {
148 ctx->bcount += limit*limit;
151 if (*dind_block >= ctx->fs->super->s_blocks_count ||
152 *dind_block < ctx->fs->super->s_first_data_block) {
153 ctx->errcode = EXT2_ET_BAD_DIND_BLOCK;
157 if ((ctx->fs->flags & EXT2_FLAG_IMAGE_FILE) &&
158 (ctx->fs->io != ctx->fs->image_io)) {
160 memset(ctx->dind_buf, 0, ctx->fs->blocksize);
162 ctx->errcode = io_channel_read_blk(ctx->fs->io, *dind_block,
168 #ifdef EXT2FS_ENABLE_SWAPFS
169 if (ctx->fs->flags & (EXT2_FLAG_SWAP_BYTES |
170 EXT2_FLAG_SWAP_BYTES_READ)) {
171 block_nr = (blk_t *) ctx->dind_buf;
172 for (i = 0; i < limit; i++, block_nr++)
173 *block_nr = ext2fs_swab32(*block_nr);
176 block_nr = (blk_t *) ctx->dind_buf;
178 if (ctx->flags & BLOCK_FLAG_APPEND) {
179 for (i = 0; i < limit; i++, block_nr++) {
180 flags = block_iterate_ind(block_nr,
184 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
185 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
188 offset += sizeof(blk_t);
191 for (i = 0; i < limit; i++, block_nr++) {
192 if (*block_nr == 0) {
193 ctx->bcount += limit;
196 flags = block_iterate_ind(block_nr,
200 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
201 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
204 offset += sizeof(blk_t);
207 if (!(ctx->fs->flags & EXT2_FLAG_IMAGE_FILE) &&
208 (changed & BLOCK_CHANGED)) {
209 #ifdef EXT2FS_ENABLE_SWAPFS
210 if (ctx->fs->flags & (EXT2_FLAG_SWAP_BYTES |
211 EXT2_FLAG_SWAP_BYTES_WRITE)) {
212 block_nr = (blk_t *) ctx->dind_buf;
213 for (i = 0; i < limit; i++, block_nr++)
214 *block_nr = ext2fs_swab32(*block_nr);
217 ctx->errcode = io_channel_write_blk(ctx->fs->io, *dind_block,
220 ret |= BLOCK_ERROR | BLOCK_ABORT;
222 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
223 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
224 !(ret & BLOCK_ABORT))
225 ret |= (*ctx->func)(ctx->fs, dind_block,
226 BLOCK_COUNT_DIND, ref_block,
227 ref_offset, ctx->priv_data);
231 static int block_iterate_tind(blk_t *tind_block, blk_t ref_block,
232 int ref_offset, struct block_context *ctx)
234 int ret = 0, changed = 0;
235 int i, flags, limit, offset;
238 limit = ctx->fs->blocksize >> 2;
239 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
240 BLOCK_FLAG_DATA_ONLY)))
241 ret = (*ctx->func)(ctx->fs, tind_block,
242 BLOCK_COUNT_TIND, ref_block,
243 ref_offset, ctx->priv_data);
244 if (!*tind_block || (ret & BLOCK_ABORT)) {
245 ctx->bcount += limit*limit*limit;
248 if (*tind_block >= ctx->fs->super->s_blocks_count ||
249 *tind_block < ctx->fs->super->s_first_data_block) {
250 ctx->errcode = EXT2_ET_BAD_TIND_BLOCK;
254 if ((ctx->fs->flags & EXT2_FLAG_IMAGE_FILE) &&
255 (ctx->fs->io != ctx->fs->image_io)) {
257 memset(ctx->tind_buf, 0, ctx->fs->blocksize);
259 ctx->errcode = io_channel_read_blk(ctx->fs->io, *tind_block,
265 #ifdef EXT2FS_ENABLE_SWAPFS
266 if (ctx->fs->flags & (EXT2_FLAG_SWAP_BYTES |
267 EXT2_FLAG_SWAP_BYTES_READ)) {
268 block_nr = (blk_t *) ctx->tind_buf;
269 for (i = 0; i < limit; i++, block_nr++)
270 *block_nr = ext2fs_swab32(*block_nr);
273 block_nr = (blk_t *) ctx->tind_buf;
275 if (ctx->flags & BLOCK_FLAG_APPEND) {
276 for (i = 0; i < limit; i++, block_nr++) {
277 flags = block_iterate_dind(block_nr,
281 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
282 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
285 offset += sizeof(blk_t);
288 for (i = 0; i < limit; i++, block_nr++) {
289 if (*block_nr == 0) {
290 ctx->bcount += limit*limit;
293 flags = block_iterate_dind(block_nr,
297 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
298 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
301 offset += sizeof(blk_t);
304 if (!(ctx->fs->flags & EXT2_FLAG_IMAGE_FILE) &&
305 (changed & BLOCK_CHANGED)) {
306 #ifdef EXT2FS_ENABLE_SWAPFS
307 if (ctx->fs->flags & (EXT2_FLAG_SWAP_BYTES |
308 EXT2_FLAG_SWAP_BYTES_WRITE)) {
309 block_nr = (blk_t *) ctx->tind_buf;
310 for (i = 0; i < limit; i++, block_nr++)
311 *block_nr = ext2fs_swab32(*block_nr);
314 ctx->errcode = io_channel_write_blk(ctx->fs->io, *tind_block,
317 ret |= BLOCK_ERROR | BLOCK_ABORT;
319 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
320 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
321 !(ret & BLOCK_ABORT))
322 ret |= (*ctx->func)(ctx->fs, tind_block,
323 BLOCK_COUNT_TIND, ref_block,
324 ref_offset, ctx->priv_data);
329 errcode_t ext2fs_block_iterate2(ext2_filsys fs,
333 int (*func)(ext2_filsys fs,
335 e2_blkcnt_t blockcnt,
344 blk_t blocks[EXT2_N_BLOCKS]; /* directory data blocks */
345 struct ext2_inode inode;
347 struct block_context ctx;
350 EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS);
353 * Check to see if we need to limit large files
355 if (flags & BLOCK_FLAG_NO_LARGE) {
356 ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
360 if (!LINUX_S_ISDIR(inode.i_mode) &&
361 (inode.i_size_high != 0))
362 return EXT2_ET_FILE_TOO_BIG;
365 retval = ext2fs_get_blocks(fs, ino, blocks);
369 limit = fs->blocksize >> 2;
373 ctx.priv_data = priv_data;
377 ctx.ind_buf = block_buf;
379 retval = ext2fs_get_mem(fs->blocksize * 3, &ctx.ind_buf);
383 ctx.dind_buf = ctx.ind_buf + fs->blocksize;
384 ctx.tind_buf = ctx.dind_buf + fs->blocksize;
387 * Iterate over the HURD translator block (if present)
389 if ((fs->super->s_creator_os == EXT2_OS_HURD) &&
390 !(flags & BLOCK_FLAG_DATA_ONLY)) {
391 ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
395 if (inode.osd1.hurd1.h_i_translator) {
396 ret |= (*ctx.func)(fs,
397 &inode.osd1.hurd1.h_i_translator,
398 BLOCK_COUNT_TRANSLATOR,
400 if (ret & BLOCK_ABORT)
406 * Iterate over normal data blocks
408 for (i = 0; i < EXT2_NDIR_BLOCKS ; i++, ctx.bcount++) {
409 if (blocks[i] || (flags & BLOCK_FLAG_APPEND)) {
410 ret |= (*ctx.func)(fs, &blocks[i],
411 ctx.bcount, 0, i, priv_data);
412 if (ret & BLOCK_ABORT)
416 if (*(blocks + EXT2_IND_BLOCK) || (flags & BLOCK_FLAG_APPEND)) {
417 ret |= block_iterate_ind(blocks + EXT2_IND_BLOCK,
418 0, EXT2_IND_BLOCK, &ctx);
419 if (ret & BLOCK_ABORT)
423 if (*(blocks + EXT2_DIND_BLOCK) || (flags & BLOCK_FLAG_APPEND)) {
424 ret |= block_iterate_dind(blocks + EXT2_DIND_BLOCK,
425 0, EXT2_DIND_BLOCK, &ctx);
426 if (ret & BLOCK_ABORT)
429 ctx.bcount += limit * limit;
430 if (*(blocks + EXT2_TIND_BLOCK) || (flags & BLOCK_FLAG_APPEND)) {
431 ret |= block_iterate_tind(blocks + EXT2_TIND_BLOCK,
432 0, EXT2_TIND_BLOCK, &ctx);
433 if (ret & BLOCK_ABORT)
438 if (ret & BLOCK_CHANGED) {
440 retval = ext2fs_read_inode(fs, ino, &inode);
444 for (i=0; i < EXT2_N_BLOCKS; i++)
445 inode.i_block[i] = blocks[i];
446 retval = ext2fs_write_inode(fs, ino, &inode);
452 ext2fs_free_mem(&ctx.ind_buf);
454 return (ret & BLOCK_ERROR) ? ctx.errcode : 0;
458 * Emulate the old ext2fs_block_iterate function!
462 int (*func)(ext2_filsys fs,
472 static int xlate_func(ext2_filsys fs, blk_t *blocknr, e2_blkcnt_t blockcnt,
473 blk_t ref_block EXT2FS_ATTR((unused)),
474 int ref_offset EXT2FS_ATTR((unused)),
477 struct xlate *xl = (struct xlate *) priv_data;
479 return (*xl->func)(fs, blocknr, (int) blockcnt, xl->real_private);
482 errcode_t ext2fs_block_iterate(ext2_filsys fs,
486 int (*func)(ext2_filsys fs,
494 xl.real_private = priv_data;
497 return ext2fs_block_iterate2(fs, ino, BLOCK_FLAG_NO_LARGE | flags,
498 block_buf, xlate_func, &xl);