1 Index: iam-src/fs/ext3/namei.c
2 ===================================================================
3 --- iam-src.orig/fs/ext3/namei.c 2006-02-12 16:43:57.000000000 +0300
4 +++ iam-src/fs/ext3/namei.c 2006-02-12 23:22:12.000000000 +0300
5 @@ -83,22 +83,21 @@ static struct buffer_head *ext3_append(h
6 #define dxtrace(command)
20 +struct dx_countlimit {
27 +struct dx_entry; /* incomplete type */
29 +struct dx_entry_compat {
33 @@ -109,8 +108,7 @@ struct dx_entry
34 * hash version mod 4 should never be 0. Sincerely, the paranoia department.
40 struct fake_dirent dot;
42 struct fake_dirent dotdot;
43 @@ -124,13 +122,13 @@ struct dx_root
47 - struct dx_entry entries[0];
48 + struct {} entries[0];
53 struct fake_dirent fake;
54 - struct dx_entry entries[0];
55 + struct {} entries[0];
59 @@ -147,38 +145,76 @@ struct dx_map_entry
65 + size_t dpo_key_size;
66 + size_t dpo_ptr_size;
67 + size_t dpo_node_gap;
68 + size_t dpo_root_gap;
70 + u32 (*dpo_root_ptr)(struct dx_path *path);
71 + int (*dpo_node_check)(struct dx_path *path,
72 + struct dx_frame *frame, void *cookie);
73 + int (*dpo_node_init)(struct dx_path *path,
74 + struct buffer_head *bh, int root);
78 * Structure to keep track of a path drilled through htree.
81 - struct inode *dp_object;
82 - struct dx_frame dp_frames[DX_MAX_TREE_HEIGHT];
83 - struct dx_frame *dp_frame;
84 + struct inode *dp_object;
85 + struct dx_param *dp_param;
87 + struct dx_frame dp_frames[DX_MAX_TREE_HEIGHT];
88 + struct dx_frame *dp_frame;
89 + void *dp_key_target;
93 +static u32 htree_root_ptr(struct dx_path *p);
94 +static int htree_node_check(struct dx_path *path,
95 + struct dx_frame *frame, void *cookie);
96 +static int htree_node_init(struct dx_path *path,
97 + struct buffer_head *bh, int root);
99 +static struct dx_param htree_compat_param = {
100 + .dpo_key_size = sizeof ((struct dx_map_entry *)NULL)->hash,
101 + .dpo_ptr_size = sizeof ((struct dx_map_entry *)NULL)->offs,
102 + .dpo_node_gap = offsetof(struct dx_node, entries),
103 + .dpo_root_gap = offsetof(struct dx_root, entries),
105 + .dpo_root_ptr = htree_root_ptr,
106 + .dpo_node_check = htree_node_check,
107 + .dpo_node_init = htree_node_init
111 #ifdef CONFIG_EXT3_INDEX
112 -static inline unsigned dx_get_block (struct dx_entry *entry);
113 -static void dx_set_block (struct dx_entry *entry, unsigned value);
114 -static inline unsigned dx_get_hash (struct dx_entry *entry);
115 -static void dx_set_hash (struct dx_entry *entry, unsigned value);
116 -static unsigned dx_get_count (struct dx_entry *entries);
117 -static unsigned dx_get_limit (struct dx_entry *entries);
118 -static void dx_set_count (struct dx_entry *entries, unsigned value);
119 -static void dx_set_limit (struct dx_entry *entries, unsigned value);
120 -static unsigned dx_root_limit (struct inode *dir, unsigned infosize);
121 -static unsigned dx_node_limit (struct inode *dir);
122 -static struct dx_frame *dx_probe(struct dentry *dentry,
124 - struct dx_hash_info *hinfo,
125 - struct dx_path *path,
127 +static inline unsigned dx_get_block(struct dx_path *p, struct dx_entry *entry);
128 +static void dx_set_block(struct dx_path *p,
129 + struct dx_entry *entry, unsigned value);
130 +static inline void *dx_get_key(struct dx_path *p,
131 + struct dx_entry *entry, void *key);
132 +static void dx_set_key(struct dx_path *p, struct dx_entry *entry, void *key);
133 +static unsigned dx_get_count(struct dx_entry *entries);
134 +static unsigned dx_get_limit(struct dx_entry *entries);
135 +static void dx_set_count(struct dx_entry *entries, unsigned value);
136 +static void dx_set_limit(struct dx_entry *entries, unsigned value);
137 +static unsigned dx_root_limit(struct dx_path *p);
138 +static unsigned dx_node_limit(struct dx_path *p);
139 +static int dx_probe(struct dentry *dentry,
141 + struct dx_hash_info *hinfo,
142 + struct dx_path *path);
143 static int dx_make_map (struct ext3_dir_entry_2 *de, int size,
144 struct dx_hash_info *hinfo, struct dx_map_entry map[]);
145 static void dx_sort_map(struct dx_map_entry *map, unsigned count);
146 static struct ext3_dir_entry_2 *dx_move_dirents (char *from, char *to,
147 struct dx_map_entry *offsets, int count);
148 static struct ext3_dir_entry_2* dx_pack_dirents (char *base, int size);
149 -static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block);
150 +static void dx_insert_block (struct dx_path *path,
151 + struct dx_frame *frame, u32 hash, u32 block);
152 static int ext3_htree_next_block(struct inode *dir, __u32 hash,
153 struct dx_path *path, __u32 *start_hash);
154 static struct buffer_head * ext3_dx_find_entry(struct dentry *dentry,
155 @@ -186,29 +222,65 @@ static struct buffer_head * ext3_dx_find
156 static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
157 struct inode *inode);
159 +static inline void dx_path_init(struct dx_path *path, struct inode *inode);
160 +static inline void dx_path_fini(struct dx_path *path);
164 * Future: use high four bits of block for coalesce-on-delete flags
165 * Mask them off for now.
168 -static inline unsigned dx_get_block (struct dx_entry *entry)
169 +static inline void *entry_off(struct dx_entry *entry, ptrdiff_t off)
171 + return (void *)((char *)entry + off);
174 +static inline size_t dx_entry_size(struct dx_path *p)
176 - return le32_to_cpu(entry->block) & 0x00ffffff;
177 + return p->dp_param->dpo_key_size + p->dp_param->dpo_ptr_size;
180 -static inline void dx_set_block (struct dx_entry *entry, unsigned value)
181 +static inline struct dx_entry *dx_entry_shift(struct dx_path *p,
182 + struct dx_entry *entry, int shift)
184 - entry->block = cpu_to_le32(value);
186 + return e + shift * dx_entry_size(p);
189 -static inline unsigned dx_get_hash (struct dx_entry *entry)
190 +static inline ptrdiff_t dx_entry_diff(struct dx_path *p,
191 + struct dx_entry *e1, struct dx_entry *e2)
193 - return le32_to_cpu(entry->hash);
196 + diff = (void *)e1 - (void *)e2;
197 + assert(diff / dx_entry_size(p) * dx_entry_size(p) == diff);
198 + return diff / dx_entry_size(p);
201 +static inline unsigned dx_get_block(struct dx_path *p, struct dx_entry *entry)
203 + return le32_to_cpu(*(u32 *)entry_off(entry, p->dp_param->dpo_key_size))
207 -static inline void dx_set_hash (struct dx_entry *entry, unsigned value)
208 +static inline void dx_set_block(struct dx_path *p,
209 + struct dx_entry *entry, unsigned value)
211 - entry->hash = cpu_to_le32(value);
212 + *(u32*)entry_off(entry, p->dp_param->dpo_key_size) = cpu_to_le32(value);
215 +static inline void *dx_get_key(struct dx_path *p,
216 + struct dx_entry *entry, void *key)
218 + memcpy(key, entry, p->dp_param->dpo_key_size);
222 +static inline void dx_set_key(struct dx_path *p,
223 + struct dx_entry *entry, void *key)
225 + memcpy(entry, key, p->dp_param->dpo_key_size);
228 static inline unsigned dx_get_count (struct dx_entry *entries)
229 @@ -231,17 +303,123 @@ static inline void dx_set_limit (struct
230 ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
233 -static inline unsigned dx_root_limit (struct inode *dir, unsigned infosize)
234 +static inline unsigned dx_root_limit(struct dx_path *p)
236 - unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(1) -
237 - EXT3_DIR_REC_LEN(2) - infosize;
238 - return 0? 20: entry_space / sizeof(struct dx_entry);
239 + struct dx_param *param = p->dp_param;
240 + unsigned entry_space = p->dp_object->i_sb->s_blocksize -
241 + param->dpo_root_gap;
242 + return entry_space / (param->dpo_key_size + param->dpo_ptr_size);
245 +static inline unsigned dx_node_limit(struct dx_path *p)
247 + struct dx_param *param = p->dp_param;
248 + unsigned entry_space = p->dp_object->i_sb->s_blocksize -
249 + param->dpo_node_gap;
250 + return entry_space / (param->dpo_key_size + param->dpo_ptr_size);
253 +static inline int dx_index_is_compat(struct dx_path *path)
255 + return path->dp_param == &htree_compat_param;
258 +static struct dx_entry *dx_get_entries(struct dx_path *path, void *data,
263 + path->dp_param->dpo_root_gap : path->dp_param->dpo_node_gap);
266 +static struct dx_entry *dx_node_get_entries(struct dx_path *path,
267 + struct dx_frame *frame)
269 + return dx_get_entries(path,
270 + frame->bh->b_data, frame == path->dp_frames);
273 +static u32 htree_root_ptr(struct dx_path *path)
278 +struct htree_cookie {
279 + struct dx_hash_info *hinfo;
280 + struct dentry *dentry;
283 +static int htree_node_check(struct dx_path *path, struct dx_frame *frame,
287 + struct dx_entry *entries;
288 + struct super_block *sb;
290 + data = frame->bh->b_data;
291 + entries = dx_node_get_entries(path, frame);
292 + sb = path->dp_object->i_sb;
293 + if (frame == path->dp_frames) {
295 + struct dx_root *root;
296 + struct htree_cookie *hc = cookie;
299 + if (root->info.hash_version != DX_HASH_TEA &&
300 + root->info.hash_version != DX_HASH_HALF_MD4 &&
301 + root->info.hash_version != DX_HASH_R5 &&
302 + root->info.hash_version != DX_HASH_LEGACY) {
303 + ext3_warning(sb, __FUNCTION__,
304 + "Unrecognised inode hash code %d",
305 + root->info.hash_version);
306 + return ERR_BAD_DX_DIR;
309 + if (root->info.unused_flags & 1) {
310 + ext3_warning(sb, __FUNCTION__,
311 + "Unimplemented inode hash flags: %#06x",
312 + root->info.unused_flags);
313 + return ERR_BAD_DX_DIR;
316 + path->dp_indirect = root->info.indirect_levels;
317 + if (path->dp_indirect > DX_MAX_TREE_HEIGHT - 1) {
318 + ext3_warning(sb, __FUNCTION__,
319 + "Unimplemented inode hash depth: %#06x",
320 + root->info.indirect_levels);
321 + return ERR_BAD_DX_DIR;
324 + assert((char *)entries == (((char *)&root->info) +
325 + root->info.info_length));
326 + assert(dx_get_limit(entries) == dx_root_limit(path));
328 + hc->hinfo->hash_version = root->info.hash_version;
329 + hc->hinfo->seed = EXT3_SB(sb)->s_hash_seed;
331 + ext3fs_dirhash(hc->dentry->d_name.name,
332 + hc->dentry->d_name.len, hc->hinfo);
333 + path->dp_key_target = &hc->hinfo->hash;
335 + /* non-root index */
336 + assert(entries == data + path->dp_param->dpo_node_gap);
337 + assert(dx_get_limit(entries) == dx_node_limit(path));
339 + frame->entries = frame->at = entries;
343 -static inline unsigned dx_node_limit (struct inode *dir)
344 +static int htree_node_init(struct dx_path *path,
345 + struct buffer_head *bh, int root)
347 - unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(0);
348 - return 0? 22: entry_space / sizeof(struct dx_entry);
349 + struct dx_node *node;
353 + node = (void *)bh->b_data;
354 + node->fake.rec_len = cpu_to_le16(path->dp_object->i_sb->s_blocksize);
355 + node->fake.inode = 0;
360 @@ -327,123 +505,101 @@ struct stats dx_show_entries(struct dx_h
362 #endif /* DX_DEBUG */
365 - * Probe for a directory leaf block to search.
367 - * dx_probe can return ERR_BAD_DX_DIR, which means there was a format
368 - * error in the directory index, and the caller should fall back to
369 - * searching the directory normally. The callers of dx_probe **MUST**
370 - * check for this error code, and make sure it never gets reflected
371 - * back to userspace.
373 -static struct dx_frame *
374 -dx_probe(struct dentry *dentry, struct inode *dir,
375 - struct dx_hash_info *hinfo, struct dx_path *path, int *err)
377 - unsigned count, indirect;
378 - struct dx_entry *at, *entries, *p, *q, *m;
379 - struct dx_root *root;
380 - struct buffer_head *bh;
381 - struct dx_frame *frame = path->dp_frames;
383 +static int dx_lookup(struct dx_path *path, void *cookie)
391 - dir = dentry->d_parent->d_inode;
392 - if (!(bh = ext3_bread (NULL,dir, 0, 0, err)))
394 - root = (struct dx_root *) bh->b_data;
395 - if (root->info.hash_version != DX_HASH_TEA &&
396 - root->info.hash_version != DX_HASH_HALF_MD4 &&
397 - root->info.hash_version != DX_HASH_R5 &&
398 - root->info.hash_version != DX_HASH_LEGACY) {
399 - ext3_warning(dir->i_sb, __FUNCTION__,
400 - "Unrecognised inode hash code %d", root->info.hash_version);
402 - *err = ERR_BAD_DX_DIR;
405 - hinfo->hash_version = root->info.hash_version;
406 - hinfo->seed = EXT3_SB(dir->i_sb)->s_hash_seed;
408 - ext3fs_dirhash(dentry->d_name.name, dentry->d_name.len, hinfo);
409 - hash = hinfo->hash;
411 - if (root->info.unused_flags & 1) {
412 - ext3_warning(dir->i_sb, __FUNCTION__,
413 - "Unimplemented inode hash flags: %#06x",
414 - root->info.unused_flags);
416 - *err = ERR_BAD_DX_DIR;
419 + struct dx_param *param;
420 + struct dx_frame *frame;
422 - if ((indirect = root->info.indirect_levels) > DX_MAX_TREE_HEIGHT - 1) {
423 - ext3_warning(dir->i_sb, __FUNCTION__,
424 - "Unimplemented inode hash depth: %#06x",
425 - root->info.indirect_levels);
427 - *err = ERR_BAD_DX_DIR;
430 + param = path->dp_param;
432 - entries = (struct dx_entry *) (((char *)&root->info) +
433 - root->info.info_length);
434 - assert(dx_get_limit(entries) == dx_root_limit(dir,
435 - root->info.info_length));
436 - dxtrace (printk("Look up %x", hash));
439 + for (frame = path->dp_frames, i = 0,
440 + ptr = param->dpo_root_ptr(path); i <= path->dp_indirect;
441 + ptr = dx_get_block(path, frame->at), ++frame, ++i) {
442 + struct dx_entry *entries;
443 + struct dx_entry *p;
444 + struct dx_entry *q;
445 + struct dx_entry *m;
448 + frame->bh = ext3_bread(NULL, path->dp_object, ptr, 0, &err);
449 + if (frame->bh == NULL) {
453 + err = param->dpo_node_check(path, frame, cookie);
457 + entries = frame->entries;
458 count = dx_get_count(entries);
459 - assert (count && count <= dx_get_limit(entries));
461 - q = entries + count - 1;
465 + assert(count && count <= dx_get_limit(entries));
466 + p = dx_entry_shift(path, entries, 1);
467 + q = dx_entry_shift(path, entries, count - 1);
469 + m = dx_entry_shift(path,
470 + p, dx_entry_diff(path, q, p) / 2);
471 dxtrace(printk("."));
472 - if (dx_get_hash(m) > hash)
474 + if (memcmp(dx_get_key(path, m, path->dp_key),
475 + path->dp_key_target,
476 + param->dpo_key_size) > 0)
477 + q = dx_entry_shift(path, m, -1);
480 + p = dx_entry_shift(path, m, +1);
483 - if (0) // linear search cross check
485 + frame->at = dx_entry_shift(path, p, -1);
486 + if (1) { // linear search cross check
487 unsigned n = count - 1;
488 + struct dx_entry *at;
494 dxtrace(printk(","));
495 - if (dx_get_hash(++at) > hash)
498 + at = dx_entry_shift(path, at, +1);
499 + if (memcmp(dx_get_key(path, at, path->dp_key),
500 + path->dp_key_target,
501 + param->dpo_key_size) > 0) {
502 + at = dx_entry_shift(path, at, -1);
506 - assert (at == p - 1);
507 + assert(at == frame->at);
511 - dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at)));
513 - frame->entries = entries;
516 - return path->dp_frame = frame;
517 - if (!(bh = ext3_bread (NULL,dir, dx_get_block(at), 0, err)))
519 - at = entries = ((struct dx_node *) bh->b_data)->entries;
520 - assert (dx_get_limit(entries) == dx_node_limit (dir));
524 - while (frame >= path->dp_frames) {
531 + dx_path_fini(path);
532 + path->dp_frame = --frame;
537 + * Probe for a directory leaf block to search.
539 + * dx_probe can return ERR_BAD_DX_DIR, which means there was a format
540 + * error in the directory index, and the caller should fall back to
541 + * searching the directory normally. The callers of dx_probe **MUST**
542 + * check for this error code, and make sure it never gets reflected
543 + * back to userspace.
545 +static int dx_probe(struct dentry *dentry, struct inode *dir,
546 + struct dx_hash_info *hinfo, struct dx_path *path)
549 + __u32 hash_storage;
550 + struct htree_cookie hc = {
555 + assert(dx_index_is_compat(path));
556 + path->dp_key = &hash_storage;
557 + err = dx_lookup(path, &hc);
558 + assert(err != 0 || path->dp_frames[path->dp_indirect].bh != NULL);
562 static inline void dx_path_init(struct dx_path *path, struct inode *inode)
563 @@ -458,8 +614,10 @@ static inline void dx_path_fini(struct d
566 for (i = 0; i < ARRAY_SIZE(path->dp_frames); i--) {
567 - if (path->dp_frames[i].bh != NULL)
568 + if (path->dp_frames[i].bh != NULL) {
569 brelse(path->dp_frames[i].bh);
570 + path->dp_frames[i].bh = NULL;
575 @@ -488,6 +646,8 @@ static int ext3_htree_next_block(struct
576 int err, num_frames = 0;
579 + assert(dx_index_is_compat(path));
583 * Find the next leaf page by incrementing the frame pointer.
584 @@ -497,7 +657,9 @@ static int ext3_htree_next_block(struct
585 * nodes need to be read.
588 - if (++(p->at) < p->entries + dx_get_count(p->entries))
589 + p->at = dx_entry_shift(path, p->at, +1);
590 + if (p->at < dx_entry_shift(path, p->entries,
591 + dx_get_count(p->entries)))
593 if (p == path->dp_frames)
595 @@ -512,7 +674,7 @@ static int ext3_htree_next_block(struct
596 * desired contiuation hash. If it doesn't, return since
597 * there's no point to read in the successive index pages.
599 - bhash = dx_get_hash(p->at);
600 + dx_get_key(path, p->at, &bhash);
603 if ((hash & 1) == 0) {
604 @@ -524,12 +686,13 @@ static int ext3_htree_next_block(struct
605 * block so no check is necessary
607 while (num_frames--) {
608 - if (!(bh = ext3_bread(NULL, dir, dx_get_block(p->at), 0, &err)))
609 + if (!(bh = ext3_bread(NULL, dir,
610 + dx_get_block(path, p->at), 0, &err)))
611 return err; /* Failure */
615 - p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
616 + p->at = p->entries = dx_node_get_entries(path, p);
620 @@ -609,6 +772,7 @@ int ext3_htree_fill_tree(struct file *di
622 dir = dir_file->f_dentry->d_inode;
623 dx_path_init(&path, dir);
624 + path.dp_param = &htree_compat_param;
625 if (!(EXT3_I(dir)->i_flags & EXT3_INDEX_FL)) {
626 hinfo.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version;
627 hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed;
628 @@ -619,7 +783,8 @@ int ext3_htree_fill_tree(struct file *di
630 hinfo.hash = start_hash;
631 hinfo.minor_hash = 0;
632 - if (!dx_probe(NULL, dir_file->f_dentry->d_inode, &hinfo, &path, &err))
633 + err = dx_probe(NULL, dir_file->f_dentry->d_inode, &hinfo, &path);
637 /* Add '.' and '..' from the htree header */
638 @@ -634,7 +799,7 @@ int ext3_htree_fill_tree(struct file *di
642 - block = dx_get_block(path.dp_frame->at);
643 + block = dx_get_block(&path, path.dp_frame->at);
644 ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo,
645 start_hash, start_minor_hash);
647 @@ -722,17 +887,19 @@ static void dx_sort_map (struct dx_map_e
651 -static void dx_insert_block(struct dx_frame *frame, u32 hash, u32 block)
652 +static void dx_insert_block(struct dx_path *path,
653 + struct dx_frame *frame, u32 hash, u32 block)
655 struct dx_entry *entries = frame->entries;
656 - struct dx_entry *old = frame->at, *new = old + 1;
657 + struct dx_entry *old = frame->at, *new = dx_entry_shift(path, old, +1);
658 int count = dx_get_count(entries);
660 assert(count < dx_get_limit(entries));
661 - assert(old < entries + count);
662 - memmove(new + 1, new, (char *)(entries + count) - (char *)(new));
663 - dx_set_hash(new, hash);
664 - dx_set_block(new, block);
665 + assert(old < dx_entry_shift(path, entries, count));
666 + memmove(dx_entry_shift(path, new, 1), new,
667 + (char *)dx_entry_shift(path, entries, count) - (char *)new);
668 + dx_set_key(path, new, &hash);
669 + dx_set_block(path, new, block);
670 dx_set_count(entries, count + 1);
673 @@ -934,7 +1101,9 @@ static struct buffer_head * ext3_dx_find
674 struct dx_hash_info hinfo;
677 - struct dx_entry dummy_dot;
678 + struct dx_entry_compat dummy_dot = {
681 struct ext3_dir_entry_2 *de, *top;
682 struct buffer_head *bh;
684 @@ -944,19 +1113,21 @@ static struct buffer_head * ext3_dx_find
685 struct inode *dir = dentry->d_parent->d_inode;
687 dx_path_init(&path, dir);
688 + path.dp_param = &htree_compat_param;
691 /* NFS may look up ".." - look at dx_root directory block */
692 if (namelen > 2 || name[0] != '.'||(name[1] != '.' && name[1] != '\0')){
693 - if (!(dx_probe(dentry, NULL, &hinfo, &path, err)))
694 + *err = dx_probe(dentry, NULL, &hinfo, &path);
698 - path.dp_frame->bh = NULL; /* for dx_path_fini() */
699 - path.dp_frame->at = &dummy_dot; /* hack for zero entry*/
700 - dx_set_block(path.dp_frame->at, 0); /* dx_root block is 0 */
701 + path.dp_frame->bh = NULL; /* for dx_path_fini() */
702 + path.dp_frame->at = (void *)&dummy_dot; /* hack for zero entry*/
706 - block = dx_get_block(path.dp_frame->at);
707 + block = dx_get_block(&path, path.dp_frame->at);
708 if (!(bh = ext3_bread (NULL,dir, block, 0, err)))
710 de = (struct ext3_dir_entry_2 *) bh->b_data;
711 @@ -1115,10 +1286,11 @@ static struct ext3_dir_entry_2* dx_pack_
713 /* Allocate new node, and split leaf node @bh into it, inserting new pointer
714 * into parent node identified by @frame */
715 -static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
716 +static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct dx_path *path,
717 struct buffer_head **bh,struct dx_frame *frame,
718 struct dx_hash_info *hinfo, int *error)
720 + struct inode *dir = path->dp_object;
721 unsigned blocksize = dir->i_sb->s_blocksize;
722 unsigned count, continued;
723 struct buffer_head *bh2;
724 @@ -1180,7 +1352,7 @@ static struct ext3_dir_entry_2 *do_split
728 - dx_insert_block (frame, hash2 + continued, newblock);
729 + dx_insert_block(path, frame, hash2 + continued, newblock);
730 err = ext3_journal_dirty_metadata (handle, bh2);
733 @@ -1315,6 +1487,7 @@ static int make_indexed_dir(handle_t *ha
734 struct fake_dirent *fde;
736 dx_path_init(&path, dir);
737 + path.dp_param = &htree_compat_param;
738 blocksize = dir->i_sb->s_blocksize;
739 dxtrace(printk("Creating index\n"));
740 retval = ext3_journal_get_write_access(handle, bh);
741 @@ -1350,10 +1523,10 @@ static int make_indexed_dir(handle_t *ha
742 root->info.info_length = sizeof(root->info);
743 root->info.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version;
744 root->info.hash_version = DX_HASH_R5;
745 - entries = root->entries;
746 - dx_set_block (entries, 1);
747 + entries = (void *)root->entries;
748 + dx_set_block (&path, entries, 1);
749 dx_set_count (entries, 1);
750 - dx_set_limit (entries, dx_root_limit(dir, sizeof(root->info)));
751 + dx_set_limit (entries, dx_root_limit(&path));
753 /* Initialize as for dx_probe */
754 hinfo.hash_version = root->info.hash_version;
755 @@ -1363,7 +1536,7 @@ static int make_indexed_dir(handle_t *ha
756 path.dp_frame->at = entries;
757 path.dp_frame->bh = bh;
759 - de = do_split(handle,dir, &bh, path.dp_frame, &hinfo, &retval);
760 + de = do_split(handle, &path, &bh, path.dp_frame, &hinfo, &retval);
764 @@ -1446,8 +1619,8 @@ static int ext3_dx_add_entry(handle_t *h
768 + struct dx_param *param;
769 struct dx_frame *frame, *safe;
770 - struct dx_node *node2;
771 struct dx_entry *entries; /* old block contents */
772 struct dx_entry *entries2; /* new block contents */
773 struct dx_hash_info hinfo;
774 @@ -1463,7 +1636,10 @@ static int ext3_dx_add_entry(handle_t *h
777 dx_path_init(&path, dir);
778 - if (!dx_probe(dentry, NULL, &hinfo, &path, &err))
779 + param = path.dp_param = &htree_compat_param;
781 + err = dx_probe(dentry, NULL, &hinfo, &path);
784 frame = path.dp_frame;
785 entries = frame->entries;
786 @@ -1471,7 +1647,8 @@ static int ext3_dx_add_entry(handle_t *h
787 /* XXX nikita: global serialization! */
790 - if (!(bh = ext3_bread(handle,dir, dx_get_block(frame->at), 0, &err)))
791 + if (!(bh = ext3_bread(handle, dir,
792 + dx_get_block(&path, frame->at), 0, &err)))
795 BUFFER_TRACE(bh, "get_write_access");
796 @@ -1519,12 +1696,9 @@ static int ext3_dx_add_entry(handle_t *h
798 for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
799 bh_new[i] = ext3_append (handle, dir, &newblock[i], &err);
802 + param->dpo_node_init(&path, bh_new[i], 0) != 0)
804 - node2 = (struct dx_node *)(bh_new[i]->b_data);
805 - entries2 = node2->entries;
806 - node2->fake.rec_len = cpu_to_le16(sb->s_blocksize);
807 - node2->fake.inode = 0;
808 BUFFER_TRACE(frame->bh, "get_write_access");
809 err = ext3_journal_get_write_access(handle, frame->bh);
811 @@ -1545,11 +1719,10 @@ static int ext3_dx_add_entry(handle_t *h
813 entries = frame->entries;
814 count = dx_get_count(entries);
815 - idx = frame->at - entries;
816 + idx = dx_entry_diff(&path, frame->at, entries);
819 - node2 = (struct dx_node *)(bh2->b_data);
820 - entries2 = node2->entries;
821 + entries2 = dx_get_entries(&path, bh2->b_data, 0);
823 if (frame == path.dp_frames) {
824 /* splitting root node. Tricky point:
825 @@ -1571,19 +1744,19 @@ static int ext3_dx_add_entry(handle_t *h
826 indirects = root->info.indirect_levels;
827 dxtrace(printk("Creating new root %d\n", indirects));
828 memcpy((char *) entries2, (char *) entries,
829 - count * sizeof(struct dx_entry));
830 - dx_set_limit(entries2, dx_node_limit(dir));
831 + count * dx_entry_size(&path));
832 + dx_set_limit(entries2, dx_node_limit(&path));
835 dx_set_count(entries, 1);
836 - dx_set_block(entries + 0, newblock[i]);
837 + dx_set_block(&path, entries, newblock[i]);
838 root->info.indirect_levels = indirects + 1;
840 /* Shift frames in the path */
841 memmove(frames + 2, frames + 1,
842 (sizeof path.dp_frames) - 2 * sizeof frames[0]);
843 /* Add new access path frame */
844 - frames[1].at = entries2 + idx;
845 + frames[1].at = dx_entry_shift(&path, entries2, idx);
846 frames[1].entries = entries = entries2;
849 @@ -1594,23 +1767,30 @@ static int ext3_dx_add_entry(handle_t *h
851 /* splitting non-root index node. */
852 unsigned count1 = count/2, count2 = count - count1;
853 - unsigned hash2 = dx_get_hash(entries + count1);
857 + dx_entry_shift(&path, entries, count1),
860 dxtrace(printk("Split index %i/%i\n", count1, count2));
862 - memcpy ((char *) entries2, (char *) (entries + count1),
863 - count2 * sizeof(struct dx_entry));
864 + memcpy ((char *) entries2,
865 + (char *) dx_entry_shift(&path, entries, count1),
866 + count2 * dx_entry_size(&path));
867 dx_set_count (entries, count1);
868 dx_set_count (entries2, count2);
869 - dx_set_limit (entries2, dx_node_limit(dir));
870 + dx_set_limit (entries2, dx_node_limit(&path));
872 /* Which index block gets the new entry? */
874 - frame->at = entries2 + idx - count1;
875 + frame->at = dx_entry_shift(&path, entries2,
877 frame->entries = entries = entries2;
878 swap(frame->bh, bh2);
881 - dx_insert_block (frame - 1, hash2, newblock[i]);
882 + dx_insert_block(&path, frame - 1, hash2, newblock[i]);
883 dxtrace(dx_show_index ("node", frame->entries));
884 dxtrace(dx_show_index ("node",
885 ((struct dx_node *) bh2->b_data)->entries));
886 @@ -1619,7 +1799,7 @@ static int ext3_dx_add_entry(handle_t *h
890 - de = do_split(handle, dir, &bh, --frame, &hinfo, &err);
891 + de = do_split(handle, &path, &bh, --frame, &hinfo, &err);
894 err = add_dirent_to_buf(handle, dentry, inode, de, bh);