2 * block.c --- iterate over all blocks in an inode
4 * Copyright (C) 1993, 1994, 1995, 1996 Theodore Ts'o.
7 * This file may be redistributed under the terms of the GNU Public
18 #if EXT2_FLAT_INCLUDES
21 #include <linux/ext2_fs.h>
26 struct block_context {
28 int (*func)(ext2_filsys fs,
44 static int block_iterate_ind(blk_t *ind_block, blk_t ref_block,
45 int ref_offset, struct block_context *ctx)
47 int ret = 0, changed = 0;
48 int i, flags, limit, offset;
51 limit = ctx->fs->blocksize >> 2;
52 if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
53 !(ctx->flags & BLOCK_FLAG_DATA_ONLY))
54 ret = (*ctx->func)(ctx->fs, ind_block,
55 BLOCK_COUNT_IND, ref_block,
56 ref_offset, ctx->priv_data);
57 if (!*ind_block || (ret & BLOCK_ABORT)) {
61 if (*ind_block >= ctx->fs->super->s_blocks_count ||
62 *ind_block < ctx->fs->super->s_first_data_block) {
63 ctx->errcode = EXT2_ET_BAD_IND_BLOCK;
67 ctx->errcode = io_channel_read_blk(ctx->fs->io, *ind_block,
73 if (ctx->fs->flags & (EXT2_FLAG_SWAP_BYTES |
74 EXT2_FLAG_SWAP_BYTES_READ)) {
75 block_nr = (blk_t *) ctx->ind_buf;
76 for (i = 0; i < limit; i++, block_nr++)
77 *block_nr = ext2fs_swab32(*block_nr);
79 block_nr = (blk_t *) ctx->ind_buf;
81 if (ctx->flags & BLOCK_FLAG_APPEND) {
82 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
83 flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
87 if (flags & BLOCK_ABORT) {
91 offset += sizeof(blk_t);
94 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
97 flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
101 if (flags & BLOCK_ABORT) {
105 offset += sizeof(blk_t);
108 if (changed & BLOCK_CHANGED) {
109 if (ctx->fs->flags & (EXT2_FLAG_SWAP_BYTES |
110 EXT2_FLAG_SWAP_BYTES_WRITE)) {
111 block_nr = (blk_t *) ctx->ind_buf;
112 for (i = 0; i < limit; i++, block_nr++)
113 *block_nr = ext2fs_swab32(*block_nr);
115 ctx->errcode = io_channel_write_blk(ctx->fs->io, *ind_block,
118 ret |= BLOCK_ERROR | BLOCK_ABORT;
120 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
121 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
122 !(ret & BLOCK_ABORT))
123 ret |= (*ctx->func)(ctx->fs, ind_block,
124 BLOCK_COUNT_IND, ref_block,
125 ref_offset, ctx->priv_data);
129 static int block_iterate_dind(blk_t *dind_block, blk_t ref_block,
130 int ref_offset, struct block_context *ctx)
132 int ret = 0, changed = 0;
133 int i, flags, limit, offset;
136 limit = ctx->fs->blocksize >> 2;
137 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
138 BLOCK_FLAG_DATA_ONLY)))
139 ret = (*ctx->func)(ctx->fs, dind_block,
140 BLOCK_COUNT_DIND, ref_block,
141 ref_offset, ctx->priv_data);
142 if (!*dind_block || (ret & BLOCK_ABORT)) {
143 ctx->bcount += limit*limit;
146 if (*dind_block >= ctx->fs->super->s_blocks_count ||
147 *dind_block < ctx->fs->super->s_first_data_block) {
148 ctx->errcode = EXT2_ET_BAD_DIND_BLOCK;
152 ctx->errcode = io_channel_read_blk(ctx->fs->io, *dind_block,
158 if (ctx->fs->flags & (EXT2_FLAG_SWAP_BYTES |
159 EXT2_FLAG_SWAP_BYTES_READ)) {
160 block_nr = (blk_t *) ctx->dind_buf;
161 for (i = 0; i < limit; i++, block_nr++)
162 *block_nr = ext2fs_swab32(*block_nr);
164 block_nr = (blk_t *) ctx->dind_buf;
166 if (ctx->flags & BLOCK_FLAG_APPEND) {
167 for (i = 0; i < limit; i++, block_nr++) {
168 flags = block_iterate_ind(block_nr,
172 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
173 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
176 offset += sizeof(blk_t);
179 for (i = 0; i < limit; i++, block_nr++) {
180 if (*block_nr == 0) {
181 ctx->bcount += limit;
184 flags = block_iterate_ind(block_nr,
188 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
189 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
192 offset += sizeof(blk_t);
195 if (changed & BLOCK_CHANGED) {
196 if (ctx->fs->flags & (EXT2_FLAG_SWAP_BYTES |
197 EXT2_FLAG_SWAP_BYTES_WRITE)) {
198 block_nr = (blk_t *) ctx->dind_buf;
199 for (i = 0; i < limit; i++, block_nr++)
200 *block_nr = ext2fs_swab32(*block_nr);
202 ctx->errcode = io_channel_write_blk(ctx->fs->io, *dind_block,
205 ret |= BLOCK_ERROR | BLOCK_ABORT;
207 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
208 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
209 !(ret & BLOCK_ABORT))
210 ret |= (*ctx->func)(ctx->fs, dind_block,
211 BLOCK_COUNT_DIND, ref_block,
212 ref_offset, ctx->priv_data);
216 static int block_iterate_tind(blk_t *tind_block, blk_t ref_block,
217 int ref_offset, struct block_context *ctx)
219 int ret = 0, changed = 0;
220 int i, flags, limit, offset;
223 limit = ctx->fs->blocksize >> 2;
224 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
225 BLOCK_FLAG_DATA_ONLY)))
226 ret = (*ctx->func)(ctx->fs, tind_block,
227 BLOCK_COUNT_TIND, ref_block,
228 ref_offset, ctx->priv_data);
229 if (!*tind_block || (ret & BLOCK_ABORT)) {
230 ctx->bcount += limit*limit*limit;
233 if (*tind_block >= ctx->fs->super->s_blocks_count ||
234 *tind_block < ctx->fs->super->s_first_data_block) {
235 ctx->errcode = EXT2_ET_BAD_TIND_BLOCK;
239 ctx->errcode = io_channel_read_blk(ctx->fs->io, *tind_block,
245 if (ctx->fs->flags & (EXT2_FLAG_SWAP_BYTES |
246 EXT2_FLAG_SWAP_BYTES_READ)) {
247 block_nr = (blk_t *) ctx->tind_buf;
248 for (i = 0; i < limit; i++, block_nr++)
249 *block_nr = ext2fs_swab32(*block_nr);
251 block_nr = (blk_t *) ctx->tind_buf;
253 if (ctx->flags & BLOCK_FLAG_APPEND) {
254 for (i = 0; i < limit; i++, block_nr++) {
255 flags = block_iterate_dind(block_nr,
259 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
260 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
263 offset += sizeof(blk_t);
266 for (i = 0; i < limit; i++, block_nr++) {
267 if (*block_nr == 0) {
268 ctx->bcount += limit*limit;
271 flags = block_iterate_dind(block_nr,
275 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
276 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
279 offset += sizeof(blk_t);
282 if (changed & BLOCK_CHANGED) {
283 if (ctx->fs->flags & (EXT2_FLAG_SWAP_BYTES |
284 EXT2_FLAG_SWAP_BYTES_WRITE)) {
285 block_nr = (blk_t *) ctx->tind_buf;
286 for (i = 0; i < limit; i++, block_nr++)
287 *block_nr = ext2fs_swab32(*block_nr);
289 ctx->errcode = io_channel_write_blk(ctx->fs->io, *tind_block,
292 ret |= BLOCK_ERROR | BLOCK_ABORT;
294 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
295 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
296 !(ret & BLOCK_ABORT))
297 ret |= (*ctx->func)(ctx->fs, tind_block,
298 BLOCK_COUNT_TIND, ref_block,
299 ref_offset, ctx->priv_data);
304 errcode_t ext2fs_block_iterate2(ext2_filsys fs,
308 int (*func)(ext2_filsys fs,
310 e2_blkcnt_t blockcnt,
319 blk_t blocks[EXT2_N_BLOCKS]; /* directory data blocks */
320 struct ext2_inode inode;
322 struct block_context ctx;
325 EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS);
328 * Check to see if we need to limit large files
330 if (flags & BLOCK_FLAG_NO_LARGE) {
331 ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
335 if (!LINUX_S_ISDIR(inode.i_mode) &&
336 (inode.i_size_high != 0))
337 return EXT2_ET_FILE_TOO_BIG;
340 retval = ext2fs_get_blocks(fs, ino, blocks);
344 limit = fs->blocksize >> 2;
348 ctx.priv_data = priv_data;
352 ctx.ind_buf = block_buf;
354 retval = ext2fs_get_mem(fs->blocksize * 3,
355 (void **) &ctx.ind_buf);
359 ctx.dind_buf = ctx.ind_buf + fs->blocksize;
360 ctx.tind_buf = ctx.dind_buf + fs->blocksize;
363 * Iterate over the HURD translator block (if present)
365 if ((fs->super->s_creator_os == EXT2_OS_HURD) &&
366 !(flags & BLOCK_FLAG_DATA_ONLY)) {
367 ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
371 if (inode.osd1.hurd1.h_i_translator) {
372 ret |= (*ctx.func)(fs,
373 &inode.osd1.hurd1.h_i_translator,
374 BLOCK_COUNT_TRANSLATOR,
376 if (ret & BLOCK_ABORT)
382 * Iterate over normal data blocks
384 for (i = 0; i < EXT2_NDIR_BLOCKS ; i++, ctx.bcount++) {
385 if (blocks[i] || (flags & BLOCK_FLAG_APPEND)) {
386 ret |= (*ctx.func)(fs, &blocks[i],
387 ctx.bcount, 0, i, priv_data);
388 if (ret & BLOCK_ABORT)
392 if (*(blocks + EXT2_IND_BLOCK) || (flags & BLOCK_FLAG_APPEND)) {
393 ret |= block_iterate_ind(blocks + EXT2_IND_BLOCK,
394 0, EXT2_IND_BLOCK, &ctx);
395 if (ret & BLOCK_ABORT)
399 if (*(blocks + EXT2_DIND_BLOCK) || (flags & BLOCK_FLAG_APPEND)) {
400 ret |= block_iterate_dind(blocks + EXT2_DIND_BLOCK,
401 0, EXT2_DIND_BLOCK, &ctx);
402 if (ret & BLOCK_ABORT)
405 ctx.bcount += limit * limit;
406 if (*(blocks + EXT2_TIND_BLOCK) || (flags & BLOCK_FLAG_APPEND)) {
407 ret |= block_iterate_tind(blocks + EXT2_TIND_BLOCK,
408 0, EXT2_TIND_BLOCK, &ctx);
409 if (ret & BLOCK_ABORT)
414 if (ret & BLOCK_CHANGED) {
416 retval = ext2fs_read_inode(fs, ino, &inode);
420 for (i=0; i < EXT2_N_BLOCKS; i++)
421 inode.i_block[i] = blocks[i];
422 retval = ext2fs_write_inode(fs, ino, &inode);
428 ext2fs_free_mem((void **) &ctx.ind_buf);
430 return (ret & BLOCK_ERROR) ? ctx.errcode : 0;
434 * Emulate the old ext2fs_block_iterate function!
438 int (*func)(ext2_filsys fs,
448 static int xlate_func(ext2_filsys fs, blk_t *blocknr, e2_blkcnt_t blockcnt,
449 blk_t ref_block, int ref_offset, void *priv_data)
451 struct xlate *xl = (struct xlate *) priv_data;
453 return (*xl->func)(fs, blocknr, (int) blockcnt, xl->real_private);
456 errcode_t ext2fs_block_iterate(ext2_filsys fs,
460 int (*func)(ext2_filsys fs,
468 xl.real_private = priv_data;
471 return ext2fs_block_iterate2(fs, ino, BLOCK_FLAG_NO_LARGE | flags,
472 block_buf, xlate_func, &xl);