1 /* vi: set sw=4 ts=4: */
3 * block.c --- iterate over all blocks in an inode
5 * Copyright (C) 1993, 1994, 1995, 1996 Theodore Ts'o.
8 * This file may be redistributed under the terms of the GNU Public
22 struct block_context {
24 int (*func)(ext2_filsys fs,
40 static int block_iterate_ind(blk_t *ind_block, blk_t ref_block,
41 int ref_offset, struct block_context *ctx)
43 int ret = 0, changed = 0;
44 int i, flags, limit, offset;
47 limit = ctx->fs->blocksize >> 2;
48 if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
49 !(ctx->flags & BLOCK_FLAG_DATA_ONLY))
50 ret = (*ctx->func)(ctx->fs, ind_block,
51 BLOCK_COUNT_IND, ref_block,
52 ref_offset, ctx->priv_data);
53 if (!*ind_block || (ret & BLOCK_ABORT)) {
57 if (*ind_block >= ctx->fs->super->s_blocks_count ||
58 *ind_block < ctx->fs->super->s_first_data_block) {
59 ctx->errcode = EXT2_ET_BAD_IND_BLOCK;
63 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *ind_block,
70 block_nr = (blk_t *) ctx->ind_buf;
72 if (ctx->flags & BLOCK_FLAG_APPEND) {
73 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
74 flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
78 if (flags & BLOCK_ABORT) {
82 offset += sizeof(blk_t);
85 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
88 flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
92 if (flags & BLOCK_ABORT) {
96 offset += sizeof(blk_t);
99 if (changed & BLOCK_CHANGED) {
100 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *ind_block,
103 ret |= BLOCK_ERROR | BLOCK_ABORT;
105 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
106 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
107 !(ret & BLOCK_ABORT))
108 ret |= (*ctx->func)(ctx->fs, ind_block,
109 BLOCK_COUNT_IND, ref_block,
110 ref_offset, ctx->priv_data);
114 static int block_iterate_dind(blk_t *dind_block, blk_t ref_block,
115 int ref_offset, struct block_context *ctx)
117 int ret = 0, changed = 0;
118 int i, flags, limit, offset;
121 limit = ctx->fs->blocksize >> 2;
122 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
123 BLOCK_FLAG_DATA_ONLY)))
124 ret = (*ctx->func)(ctx->fs, dind_block,
125 BLOCK_COUNT_DIND, ref_block,
126 ref_offset, ctx->priv_data);
127 if (!*dind_block || (ret & BLOCK_ABORT)) {
128 ctx->bcount += limit*limit;
131 if (*dind_block >= ctx->fs->super->s_blocks_count ||
132 *dind_block < ctx->fs->super->s_first_data_block) {
133 ctx->errcode = EXT2_ET_BAD_DIND_BLOCK;
137 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *dind_block,
144 block_nr = (blk_t *) ctx->dind_buf;
146 if (ctx->flags & BLOCK_FLAG_APPEND) {
147 for (i = 0; i < limit; i++, block_nr++) {
148 flags = block_iterate_ind(block_nr,
152 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
153 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
156 offset += sizeof(blk_t);
159 for (i = 0; i < limit; i++, block_nr++) {
160 if (*block_nr == 0) {
161 ctx->bcount += limit;
164 flags = block_iterate_ind(block_nr,
168 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
169 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
172 offset += sizeof(blk_t);
175 if (changed & BLOCK_CHANGED) {
176 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *dind_block,
179 ret |= BLOCK_ERROR | BLOCK_ABORT;
181 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
182 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
183 !(ret & BLOCK_ABORT))
184 ret |= (*ctx->func)(ctx->fs, dind_block,
185 BLOCK_COUNT_DIND, ref_block,
186 ref_offset, ctx->priv_data);
190 static int block_iterate_tind(blk_t *tind_block, blk_t ref_block,
191 int ref_offset, struct block_context *ctx)
193 int ret = 0, changed = 0;
194 int i, flags, limit, offset;
197 limit = ctx->fs->blocksize >> 2;
198 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
199 BLOCK_FLAG_DATA_ONLY)))
200 ret = (*ctx->func)(ctx->fs, tind_block,
201 BLOCK_COUNT_TIND, ref_block,
202 ref_offset, ctx->priv_data);
203 if (!*tind_block || (ret & BLOCK_ABORT)) {
204 ctx->bcount += limit*limit*limit;
207 if (*tind_block >= ctx->fs->super->s_blocks_count ||
208 *tind_block < ctx->fs->super->s_first_data_block) {
209 ctx->errcode = EXT2_ET_BAD_TIND_BLOCK;
213 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *tind_block,
220 block_nr = (blk_t *) ctx->tind_buf;
222 if (ctx->flags & BLOCK_FLAG_APPEND) {
223 for (i = 0; i < limit; i++, block_nr++) {
224 flags = block_iterate_dind(block_nr,
228 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
229 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
232 offset += sizeof(blk_t);
235 for (i = 0; i < limit; i++, block_nr++) {
236 if (*block_nr == 0) {
237 ctx->bcount += limit*limit;
240 flags = block_iterate_dind(block_nr,
244 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
245 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
248 offset += sizeof(blk_t);
251 if (changed & BLOCK_CHANGED) {
252 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *tind_block,
255 ret |= BLOCK_ERROR | BLOCK_ABORT;
257 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
258 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
259 !(ret & BLOCK_ABORT))
260 ret |= (*ctx->func)(ctx->fs, tind_block,
261 BLOCK_COUNT_TIND, ref_block,
262 ref_offset, ctx->priv_data);
267 errcode_t ext2fs_block_iterate2(ext2_filsys fs,
271 int (*func)(ext2_filsys fs,
273 e2_blkcnt_t blockcnt,
282 blk_t blocks[EXT2_N_BLOCKS]; /* directory data blocks */
283 struct ext2_inode inode;
285 struct block_context ctx;
288 EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS);
291 * Check to see if we need to limit large files
293 if (flags & BLOCK_FLAG_NO_LARGE) {
294 ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
298 if (!LINUX_S_ISDIR(inode.i_mode) &&
299 (inode.i_size_high != 0))
300 return EXT2_ET_FILE_TOO_BIG;
303 retval = ext2fs_get_blocks(fs, ino, blocks);
307 limit = fs->blocksize >> 2;
311 ctx.priv_data = priv_data;
315 ctx.ind_buf = block_buf;
317 retval = ext2fs_get_mem(fs->blocksize * 3, &ctx.ind_buf);
321 ctx.dind_buf = ctx.ind_buf + fs->blocksize;
322 ctx.tind_buf = ctx.dind_buf + fs->blocksize;
325 * Iterate over the HURD translator block (if present)
327 if ((fs->super->s_creator_os == EXT2_OS_HURD) &&
328 !(flags & BLOCK_FLAG_DATA_ONLY)) {
329 ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
333 if (inode.osd1.hurd1.h_i_translator) {
334 ret |= (*ctx.func)(fs,
335 &inode.osd1.hurd1.h_i_translator,
336 BLOCK_COUNT_TRANSLATOR,
338 if (ret & BLOCK_ABORT)
344 * Iterate over normal data blocks
346 for (i = 0; i < EXT2_NDIR_BLOCKS; i++, ctx.bcount++) {
347 if (blocks[i] || (flags & BLOCK_FLAG_APPEND)) {
348 ret |= (*ctx.func)(fs, &blocks[i],
349 ctx.bcount, 0, i, priv_data);
350 if (ret & BLOCK_ABORT)
354 if (*(blocks + EXT2_IND_BLOCK) || (flags & BLOCK_FLAG_APPEND)) {
355 ret |= block_iterate_ind(blocks + EXT2_IND_BLOCK,
356 0, EXT2_IND_BLOCK, &ctx);
357 if (ret & BLOCK_ABORT)
361 if (*(blocks + EXT2_DIND_BLOCK) || (flags & BLOCK_FLAG_APPEND)) {
362 ret |= block_iterate_dind(blocks + EXT2_DIND_BLOCK,
363 0, EXT2_DIND_BLOCK, &ctx);
364 if (ret & BLOCK_ABORT)
367 ctx.bcount += limit * limit;
368 if (*(blocks + EXT2_TIND_BLOCK) || (flags & BLOCK_FLAG_APPEND)) {
369 ret |= block_iterate_tind(blocks + EXT2_TIND_BLOCK,
370 0, EXT2_TIND_BLOCK, &ctx);
371 if (ret & BLOCK_ABORT)
376 if (ret & BLOCK_CHANGED) {
378 retval = ext2fs_read_inode(fs, ino, &inode);
382 for (i=0; i < EXT2_N_BLOCKS; i++)
383 inode.i_block[i] = blocks[i];
384 retval = ext2fs_write_inode(fs, ino, &inode);
390 ext2fs_free_mem(&ctx.ind_buf);
392 return (ret & BLOCK_ERROR) ? ctx.errcode : 0;
396 * Emulate the old ext2fs_block_iterate function!
400 int (*func)(ext2_filsys fs,
410 static int xlate_func(ext2_filsys fs, blk_t *blocknr, e2_blkcnt_t blockcnt,
411 blk_t ref_block EXT2FS_ATTR((unused)),
412 int ref_offset EXT2FS_ATTR((unused)),
415 struct xlate *xl = (struct xlate *) priv_data;
417 return (*xl->func)(fs, blocknr, (int) blockcnt, xl->real_private);
420 errcode_t ext2fs_block_iterate(ext2_filsys fs,
424 int (*func)(ext2_filsys fs,
432 xl.real_private = priv_data;
435 return ext2fs_block_iterate2(fs, ino, BLOCK_FLAG_NO_LARGE | flags,
436 block_buf, xlate_func, &xl);