2 * block.c --- iterate over all blocks in an inode
4 * Copyright (C) 1993, 1994, 1995, 1996 Theodore Ts'o.
7 * This file may be redistributed under the terms of the GNU Public
21 struct block_context {
23 int (*func)(ext2_filsys fs,
39 static int block_iterate_ind(blk_t *ind_block, blk_t ref_block,
40 int ref_offset, struct block_context *ctx)
42 int ret = 0, changed = 0;
43 int i, flags, limit, offset;
46 limit = ctx->fs->blocksize >> 2;
47 if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
48 !(ctx->flags & BLOCK_FLAG_DATA_ONLY))
49 ret = (*ctx->func)(ctx->fs, ind_block,
50 BLOCK_COUNT_IND, ref_block,
51 ref_offset, ctx->priv_data);
52 if (!*ind_block || (ret & BLOCK_ABORT)) {
56 if (*ind_block >= ctx->fs->super->s_blocks_count ||
57 *ind_block < ctx->fs->super->s_first_data_block) {
58 ctx->errcode = EXT2_ET_BAD_IND_BLOCK;
62 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *ind_block,
69 block_nr = (blk_t *) ctx->ind_buf;
71 if (ctx->flags & BLOCK_FLAG_APPEND) {
72 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
73 flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
77 if (flags & BLOCK_ABORT) {
81 offset += sizeof(blk_t);
84 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
87 flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
91 if (flags & BLOCK_ABORT) {
95 offset += sizeof(blk_t);
98 if (changed & BLOCK_CHANGED) {
99 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *ind_block,
102 ret |= BLOCK_ERROR | BLOCK_ABORT;
104 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
105 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
106 !(ret & BLOCK_ABORT))
107 ret |= (*ctx->func)(ctx->fs, ind_block,
108 BLOCK_COUNT_IND, ref_block,
109 ref_offset, ctx->priv_data);
113 static int block_iterate_dind(blk_t *dind_block, blk_t ref_block,
114 int ref_offset, struct block_context *ctx)
116 int ret = 0, changed = 0;
117 int i, flags, limit, offset;
120 limit = ctx->fs->blocksize >> 2;
121 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
122 BLOCK_FLAG_DATA_ONLY)))
123 ret = (*ctx->func)(ctx->fs, dind_block,
124 BLOCK_COUNT_DIND, ref_block,
125 ref_offset, ctx->priv_data);
126 if (!*dind_block || (ret & BLOCK_ABORT)) {
127 ctx->bcount += limit*limit;
130 if (*dind_block >= ctx->fs->super->s_blocks_count ||
131 *dind_block < ctx->fs->super->s_first_data_block) {
132 ctx->errcode = EXT2_ET_BAD_DIND_BLOCK;
136 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *dind_block,
143 block_nr = (blk_t *) ctx->dind_buf;
145 if (ctx->flags & BLOCK_FLAG_APPEND) {
146 for (i = 0; i < limit; i++, block_nr++) {
147 flags = block_iterate_ind(block_nr,
151 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
152 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
155 offset += sizeof(blk_t);
158 for (i = 0; i < limit; i++, block_nr++) {
159 if (*block_nr == 0) {
160 ctx->bcount += limit;
163 flags = block_iterate_ind(block_nr,
167 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
168 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
171 offset += sizeof(blk_t);
174 if (changed & BLOCK_CHANGED) {
175 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *dind_block,
178 ret |= BLOCK_ERROR | BLOCK_ABORT;
180 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
181 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
182 !(ret & BLOCK_ABORT))
183 ret |= (*ctx->func)(ctx->fs, dind_block,
184 BLOCK_COUNT_DIND, ref_block,
185 ref_offset, ctx->priv_data);
189 static int block_iterate_tind(blk_t *tind_block, blk_t ref_block,
190 int ref_offset, struct block_context *ctx)
192 int ret = 0, changed = 0;
193 int i, flags, limit, offset;
196 limit = ctx->fs->blocksize >> 2;
197 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
198 BLOCK_FLAG_DATA_ONLY)))
199 ret = (*ctx->func)(ctx->fs, tind_block,
200 BLOCK_COUNT_TIND, ref_block,
201 ref_offset, ctx->priv_data);
202 if (!*tind_block || (ret & BLOCK_ABORT)) {
203 ctx->bcount += limit*limit*limit;
206 if (*tind_block >= ctx->fs->super->s_blocks_count ||
207 *tind_block < ctx->fs->super->s_first_data_block) {
208 ctx->errcode = EXT2_ET_BAD_TIND_BLOCK;
212 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *tind_block,
219 block_nr = (blk_t *) ctx->tind_buf;
221 if (ctx->flags & BLOCK_FLAG_APPEND) {
222 for (i = 0; i < limit; i++, block_nr++) {
223 flags = block_iterate_dind(block_nr,
227 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
228 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
231 offset += sizeof(blk_t);
234 for (i = 0; i < limit; i++, block_nr++) {
235 if (*block_nr == 0) {
236 ctx->bcount += limit*limit;
239 flags = block_iterate_dind(block_nr,
243 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
244 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
247 offset += sizeof(blk_t);
250 if (changed & BLOCK_CHANGED) {
251 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *tind_block,
254 ret |= BLOCK_ERROR | BLOCK_ABORT;
256 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
257 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
258 !(ret & BLOCK_ABORT))
259 ret |= (*ctx->func)(ctx->fs, tind_block,
260 BLOCK_COUNT_TIND, ref_block,
261 ref_offset, ctx->priv_data);
266 errcode_t ext2fs_block_iterate2(ext2_filsys fs,
270 int (*func)(ext2_filsys fs,
272 e2_blkcnt_t blockcnt,
281 blk_t blocks[EXT2_N_BLOCKS]; /* directory data blocks */
282 struct ext2_inode inode;
284 struct block_context ctx;
287 EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS);
290 * Check to see if we need to limit large files
292 if (flags & BLOCK_FLAG_NO_LARGE) {
293 ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
297 if (!LINUX_S_ISDIR(inode.i_mode) &&
298 (inode.i_size_high != 0))
299 return EXT2_ET_FILE_TOO_BIG;
302 retval = ext2fs_get_blocks(fs, ino, blocks);
306 limit = fs->blocksize >> 2;
310 ctx.priv_data = priv_data;
314 ctx.ind_buf = block_buf;
316 retval = ext2fs_get_mem(fs->blocksize * 3, &ctx.ind_buf);
320 ctx.dind_buf = ctx.ind_buf + fs->blocksize;
321 ctx.tind_buf = ctx.dind_buf + fs->blocksize;
324 * Iterate over the HURD translator block (if present)
326 if ((fs->super->s_creator_os == EXT2_OS_HURD) &&
327 !(flags & BLOCK_FLAG_DATA_ONLY)) {
328 ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
332 if (inode.osd1.hurd1.h_i_translator) {
333 ret |= (*ctx.func)(fs,
334 &inode.osd1.hurd1.h_i_translator,
335 BLOCK_COUNT_TRANSLATOR,
337 if (ret & BLOCK_ABORT)
343 * Iterate over normal data blocks
345 for (i = 0; i < EXT2_NDIR_BLOCKS ; i++, ctx.bcount++) {
346 if (blocks[i] || (flags & BLOCK_FLAG_APPEND)) {
347 ret |= (*ctx.func)(fs, &blocks[i],
348 ctx.bcount, 0, i, priv_data);
349 if (ret & BLOCK_ABORT)
353 if (*(blocks + EXT2_IND_BLOCK) || (flags & BLOCK_FLAG_APPEND)) {
354 ret |= block_iterate_ind(blocks + EXT2_IND_BLOCK,
355 0, EXT2_IND_BLOCK, &ctx);
356 if (ret & BLOCK_ABORT)
360 if (*(blocks + EXT2_DIND_BLOCK) || (flags & BLOCK_FLAG_APPEND)) {
361 ret |= block_iterate_dind(blocks + EXT2_DIND_BLOCK,
362 0, EXT2_DIND_BLOCK, &ctx);
363 if (ret & BLOCK_ABORT)
366 ctx.bcount += limit * limit;
367 if (*(blocks + EXT2_TIND_BLOCK) || (flags & BLOCK_FLAG_APPEND)) {
368 ret |= block_iterate_tind(blocks + EXT2_TIND_BLOCK,
369 0, EXT2_TIND_BLOCK, &ctx);
370 if (ret & BLOCK_ABORT)
375 if (ret & BLOCK_CHANGED) {
377 retval = ext2fs_read_inode(fs, ino, &inode);
381 for (i=0; i < EXT2_N_BLOCKS; i++)
382 inode.i_block[i] = blocks[i];
383 retval = ext2fs_write_inode(fs, ino, &inode);
389 ext2fs_free_mem(&ctx.ind_buf);
391 return (ret & BLOCK_ERROR) ? ctx.errcode : 0;
395 * Emulate the old ext2fs_block_iterate function!
399 int (*func)(ext2_filsys fs,
409 static int xlate_func(ext2_filsys fs, blk_t *blocknr, e2_blkcnt_t blockcnt,
410 blk_t ref_block EXT2FS_ATTR((unused)),
411 int ref_offset EXT2FS_ATTR((unused)),
414 struct xlate *xl = (struct xlate *) priv_data;
416 return (*xl->func)(fs, blocknr, (int) blockcnt, xl->real_private);
419 errcode_t ext2fs_block_iterate(ext2_filsys fs,
423 int (*func)(ext2_filsys fs,
431 xl.real_private = priv_data;
434 return ext2fs_block_iterate2(fs, ino, BLOCK_FLAG_NO_LARGE | flags,
435 block_buf, xlate_func, &xl);