int iam_container_init(struct iam_container *c,
struct iam_descr *descr, struct inode *inode)
{
- memset(c, 0, sizeof *c);
+ memset(c, 0, sizeof(*c));
c->ic_descr = descr;
c->ic_object = inode;
dynlock_init(&c->ic_tree_lock);
}
void iam_path_init(struct iam_path *path, struct iam_container *c,
- struct iam_path_descr *pd)
+ struct iam_path_descr *pd)
{
- memset(path, 0, sizeof *path);
+ memset(path, 0, sizeof(*path));
path->ip_container = c;
path->ip_frame = path->ip_frames;
path->ip_data = pd;
}
int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
- handle_t *h, struct buffer_head **bh)
+ handle_t *h, struct buffer_head **bh)
{
/*
* NB: it can be called by iam_lfix_guess() which is still at
}
static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
- struct iam_ikey *key)
+ struct iam_ikey *key)
{
return iam_leaf_ops(leaf)->ikey(leaf, key);
}
static int iam_leaf_keycmp(const struct iam_leaf *leaf,
- const struct iam_key *key)
+ const struct iam_key *key)
{
return iam_leaf_ops(leaf)->key_cmp(leaf, key);
}
static int iam_leaf_keyeq(const struct iam_leaf *leaf,
- const struct iam_key *key)
+ const struct iam_key *key)
{
return iam_leaf_ops(leaf)->key_eq(leaf, key);
}
block = path->ip_frame->leaf;
if (block == 0) {
/* XXX bug 11027 */
- printk(KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
- (long unsigned)path->ip_frame->leaf,
+ pr_err("wrong leaf: %lu %d [%p %p %p]\n",
+ (unsigned long)path->ip_frame->leaf,
dx_get_count(dx_node_get_entries(path, path->ip_frame)),
path->ip_frames[0].bh, path->ip_frames[1].bh,
path->ip_frames[2].bh);
}
static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
- const struct iam_rec *rec)
+ const struct iam_rec *rec)
{
iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
}
}
int iam_leaf_can_add(const struct iam_leaf *l,
- const struct iam_key *k, const struct iam_rec *r)
+ const struct iam_key *k, const struct iam_rec *r)
{
return iam_leaf_ops(l)->can_add(l, k, r);
}
static int iam_txn_dirty(handle_t *handle,
- struct iam_path *path, struct buffer_head *bh)
+ struct iam_path *path, struct buffer_head *bh)
{
int result;
}
static int iam_txn_add(handle_t *handle,
- struct iam_path *path, struct buffer_head *bh)
+ struct iam_path *path, struct buffer_head *bh)
{
int result;
struct super_block *sb = iam_path_obj(path)->i_sb;
return result;
}
-/***********************************************************************/
-/* iterator interface */
-/***********************************************************************/
-
+/* iterator interface */
static enum iam_it_state it_state(const struct iam_iterator *it)
{
return it->ii_state;
}
static inline int it_keycmp(const struct iam_iterator *it,
- const struct iam_key *k)
+ const struct iam_key *k)
{
return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
}
static inline int it_keyeq(const struct iam_iterator *it,
- const struct iam_key *k)
+ const struct iam_key *k)
{
return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
}
* postcondition: it_state(it) == IAM_IT_DETACHED
*/
int iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
- struct iam_path_descr *pd)
+ struct iam_path_descr *pd)
{
- memset(it, 0, sizeof *it);
+ memset(it, 0, sizeof(*it));
it->ii_flags = flags;
it->ii_state = IAM_IT_DETACHED;
iam_path_init(&it->ii_path, c, pd);
}
void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
- const struct iam_ikey *key, iam_ptr_t ptr)
+ const struct iam_ikey *key, iam_ptr_t ptr)
{
struct iam_entry *entries = frame->entries;
struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
}
void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
- const struct iam_ikey *key, iam_ptr_t ptr)
+ const struct iam_ikey *key, iam_ptr_t ptr)
{
iam_lock_bh(frame->bh);
iam_insert_key(path, frame, key, ptr);
int err;
int i;
- for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
+ for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i)
assert(path->ip_frames[i].bh == NULL);
do {
; /* find last filled in frame */
}
- /*
- * Lock frames, bottom to top.
- */
+ /* Lock frames, bottom to top. */
for (scan = bottom - 1; scan >= path->ip_frames; --scan)
iam_lock_bh(scan->bh);
- /*
- * Check them top to bottom.
- */
+ /* Check them top to bottom. */
result = 0;
for (scan = path->ip_frames; scan < bottom; ++scan) {
struct iam_entry *pos;
}
}
- /*
- * Unlock top to bottom.
- */
+ /* Unlock top to bottom. */
for (scan = path->ip_frames; scan < bottom; ++scan)
- iam_unlock_bh(scan->bh);
+ iam_unlock_bh(scan->bh);
DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
do_corr(schedule());
collision = result & IAM_LOOKUP_LAST;
switch (result & ~IAM_LOOKUP_LAST) {
case IAM_LOOKUP_EXACT:
- result = +1;
+ result = 1;
it->ii_state = IAM_IT_ATTACHED;
break;
case IAM_LOOKUP_OK:
}
result |= collision;
}
- /*
- * See iam_it_get_exact() for explanation.
- */
+ /* See iam_it_get_exact() for explanation. */
assert_corr(result != -ENOENT);
return result;
}
iam_it_fini(it);
result = __iam_it_get(it, 0);
} else
- result = +1;
+ result = 1;
}
if (result > 0)
result &= ~IAM_LOOKUP_LAST;
}
static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
- struct iam_ikey *ikey);
+ struct iam_ikey *ikey);
/*
* hash of the next page.
*/
static int iam_htree_advance(struct inode *dir, __u32 hash,
- struct iam_path *path, __u32 *start_hash,
- int compat)
+ struct iam_path *path, __u32 *start_hash,
+ int compat)
{
struct iam_frame *p;
struct buffer_head *bh;
}
if (compat) {
- /*
- * Htree hash magic.
- */
+ /* Htree hash magic. */
/*
* If the hash is 1, then continue only if the next page has a
struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { NULL, };
int result;
- /*
- * Locking for iam_index_next()... is to be described.
- */
+ /* Locking for iam_index_next()... is to be described. */
cursor = path->ip_frame->leaf;
if (!iam_leaf_at_end(leaf))
/* advance within leaf node */
iam_leaf_next(leaf);
- /*
- * multiple iterations may be necessary due to empty leaves.
- */
+ /* multiple iterations may be necessary due to empty leaves. */
while (result == 0 && iam_leaf_at_end(leaf)) {
do_corr(schedule());
/* advance index portion of the path */
assert_corr(iam_leaf_is_locked(leaf));
if (result == 1) {
struct dynlock_handle *lh;
+
lh = iam_lock_htree(iam_it_container(it),
path->ip_frame->leaf,
DLT_WRITE);
result = -ENOMEM;
} else if (result == 0)
/* end of container reached */
- result = +1;
+ result = 1;
if (result != 0)
iam_it_put(it);
}
* ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
*/
int iam_it_rec_set(handle_t *h,
- struct iam_iterator *it, const struct iam_rec *r)
+ struct iam_iterator *it, const struct iam_rec *r)
{
int result;
struct iam_path *path;
* it_state(it) == IAM_IT_SKEWED
*/
static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
- struct iam_ikey *ikey)
+ struct iam_ikey *ikey)
{
assert_corr(it_state(it) == IAM_IT_ATTACHED ||
it_state(it) == IAM_IT_SKEWED);
}
/* The block itself which contains the iam_idle_head is
- * also an idle block, and can be used as the new node. */
+ * also an idle block, and can be used as the new node.
+ */
idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
c->ic_descr->id_root_gap +
sizeof(struct dx_countlimit));
old_leaf = leaf->il_bh;
iam_leaf_split(leaf, &new_leaf, blknr);
if (old_leaf != leaf->il_bh) {
- /*
- * Switched to the new leaf.
- */
+ /* Switched to the new leaf. */
iam_leaf_unlock(leaf);
leaf->il_lock = lh;
path->ip_frame->leaf = blknr;
return err;
}
-static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
+static inline void dx_set_limit(struct iam_entry *entries, unsigned int value)
{
((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
}
static int iam_shift_entries(struct iam_path *path,
- struct iam_frame *frame, unsigned count,
- struct iam_entry *entries, struct iam_entry *entries2,
- u32 newblock)
+ struct iam_frame *frame, unsigned int count,
+ struct iam_entry *entries, struct iam_entry *entries2,
+ u32 newblock)
{
- unsigned count1;
- unsigned count2;
+ unsigned int count1;
+ unsigned int count2;
int delta;
struct iam_frame *parent = frame - 1;
count2 = count - count1;
dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
- dxtrace(printk("Split index %d/%d\n", count1, count2));
+ dxtrace(pr_info("Split index %d/%d\n", count1, count2));
memcpy((char *) iam_entry_shift(path, entries2, delta),
(char *) iam_entry_shift(path, entries, count1),
int split_index_node(handle_t *handle, struct iam_path *path,
- struct dynlock_handle **lh)
+ struct dynlock_handle **lh)
{
struct iam_entry *entries; /* old block contents */
struct iam_entry *entries2; /* new block contents */
safe = frame;
- /*
- * Lock all nodes, bottom to top.
- */
+ /* Lock all nodes, bottom to top. */
for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
do_corr(schedule());
lock[i] = iam_lock_htree(path->ip_container, frame->curidx,
err = iam_check_full_path(path, 1);
if (err)
goto cleanup;
- /*
- * And check that the same number of nodes is to be split.
- */
+ /* And check that the same number of nodes is to be split. */
for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
dx_get_count(frame->entries) == dx_get_limit(frame->entries);
--frame, ++i) {
goto cleanup;
}
- /*
- * Go back down, allocating blocks, locking them, and adding into
- * transaction...
- */
+ /* Go back down, allocate blocks, lock them, and add to transaction */
for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
bh_new[i] = iam_new_node(handle, path->ip_container,
&newblock[i], &err);
/* Go through nodes once more, inserting pointers */
for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
- unsigned count;
+ unsigned int count;
int idx;
struct buffer_head *bh2;
struct buffer_head *bh;
do_corr(schedule());
/* Shift frames in the path */
memmove(frames + 2, frames + 1,
- (sizeof path->ip_frames) - 2 * sizeof frames[0]);
+ (sizeof(path->ip_frames)) -
+ 2 * sizeof(frames[0]));
/* Add new access path frame */
frames[1].at = iam_entry_shift(path, entries2, idx);
frames[1].entries = entries = entries2;
frames[1].bh = bh2;
assert_inv(dx_node_check(path, frame));
- ++ path->ip_frame;
- ++ frame;
+ ++path->ip_frame;
+ ++frame;
assert_inv(dx_node_check(path, frame));
bh_new[0] = NULL; /* buffer head is "consumed" */
err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
*lh = lock[nr_splet];
lock[nr_splet] = NULL;
if (nr_splet > 0) {
- /*
- * Log ->i_size modification.
- */
+ /* Log ->i_size modification. */
err = ldiskfs_mark_inode_dirty(handle, dir);
if (err)
goto journal_error;
}
static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
- struct iam_path *path,
- const struct iam_key *k, const struct iam_rec *r)
+ struct iam_path *path,
+ const struct iam_key *k, const struct iam_rec *r)
{
int err;
struct iam_leaf *leaf;
do_corr(schedule());
err = iam_it_get_exact(it, k);
if (err == -ENOENT)
- err = +1; /* repeat split */
+ err = 1; /* repeat split */
else if (err == 0)
err = -EEXIST;
}
* !memcmp(iam_it_rec_get(it), r, ...))
*/
int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
- const struct iam_key *k, const struct iam_rec *r)
+ const struct iam_key *k, const struct iam_rec *r)
{
int result;
struct iam_path *path;
assert_inv(iam_path_check(path));
result = iam_txn_add(h, path, leaf->il_bh);
- /*
- * no compaction for now.
- */
+ /* no compaction for now. */
if (result == 0) {
iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
result = iam_txn_dirty(h, path, leaf->il_bh);
assert_corr(it_state(it) == IAM_IT_ATTACHED);
assert_corr(it_at_rec(it));
assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
- sizeof result);
+ sizeof(result));
result = 0;
return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
{
assert_corr(it_state(it) == IAM_IT_DETACHED &&
it->ii_flags&IAM_IT_MOVE);
- assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
+ assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
+ sizeof(pos));
return iam_it_iget(it, (struct iam_ikey *)&pos);
}
/***********************************************************************/
/* invariants */
/***********************************************************************/
-
static inline int ptr_inside(void *base, size_t size, void *ptr)
{
return (base <= ptr) && (ptr < base + size);
* Return values: 0: found, -ENOENT: not-found, -ve: error
*/
int iam_lookup(struct iam_container *c, const struct iam_key *k,
- struct iam_rec *r, struct iam_path_descr *pd)
+ struct iam_rec *r, struct iam_path_descr *pd)
{
struct iam_iterator it;
int result;
result = iam_it_get_exact(&it, k);
if (result == 0)
- /*
- * record with required key found, copy it into user buffer
- */
+ /* record with required key found, copy it into user buffer */
iam_reccpy(&it.ii_path.ip_leaf, r);
iam_it_put(&it);
iam_it_fini(&it);
* iam_lookup(c, k, r2) > 0;
*/
int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
- const struct iam_rec *r, struct iam_path_descr *pd)
+ const struct iam_rec *r, struct iam_path_descr *pd)
{
struct iam_iterator it;
int result;
* -ve: error, including -ENOENT if no record with the given key found.
*/
int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
- const struct iam_rec *r, struct iam_path_descr *pd)
+ const struct iam_rec *r, struct iam_path_descr *pd)
{
struct iam_iterator it;
struct iam_leaf *folio;
* !iam_lookup(c, k, *));
*/
int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
- struct iam_path_descr *pd)
+ struct iam_path_descr *pd)
{
struct iam_iterator it;
int result;