* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*
* No locking. Callers synchronize.
*/
-static CFS_LIST_HEAD(iam_formats);
+static struct list_head iam_formats = LIST_HEAD_INIT(iam_formats);
void iam_format_register(struct iam_format *fmt)
{
- cfs_list_add(&fmt->if_linkage, &iam_formats);
+ list_add(&fmt->if_linkage, &iam_formats);
}
EXPORT_SYMBOL(iam_format_register);
struct buffer_head *bh;
int err;
- LASSERT_SEM_LOCKED(&c->ic_idle_sem);
+ LASSERT(mutex_is_locked(&c->ic_idle_mutex));
if (blk == 0)
return NULL;
}
result = -ENOENT;
- cfs_list_for_each_entry(fmt, &iam_formats, if_linkage) {
+ list_for_each_entry(fmt, &iam_formats, if_linkage) {
result = fmt->if_guess(c);
if (result == 0)
break;
idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
c->ic_descr->id_root_gap +
sizeof(struct dx_countlimit));
- cfs_down(&c->ic_idle_sem);
+ mutex_lock(&c->ic_idle_mutex);
bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
if (bh != NULL && IS_ERR(bh))
result = PTR_ERR(bh);
else
c->ic_idle_bh = bh;
- cfs_up(&c->ic_idle_sem);
+ mutex_unlock(&c->ic_idle_mutex);
}
return result;
memset(c, 0, sizeof *c);
c->ic_descr = descr;
c->ic_object = inode;
- cfs_init_rwsem(&c->ic_sem);
+ init_rwsem(&c->ic_sem);
dynlock_init(&c->ic_tree_lock);
- cfs_sema_init(&c->ic_idle_sem, 1);
+ mutex_init(&c->ic_idle_mutex);
return 0;
}
EXPORT_SYMBOL(iam_container_init);
static int iam_leaf_load(struct iam_path *path)
{
- iam_ptr_t block;
- int err;
- struct iam_container *c;
- struct buffer_head *bh;
- struct iam_leaf *leaf;
- struct iam_descr *descr;
-
- c = path->ip_container;
- leaf = &path->ip_leaf;
- descr = iam_path_descr(path);
- block = path->ip_frame->leaf;
- if (block == 0) {
- /* XXX bug 11027 */
- printk(CFS_KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
- (long unsigned)path->ip_frame->leaf,
- dx_get_count(dx_node_get_entries(path, path->ip_frame)),
- path->ip_frames[0].bh, path->ip_frames[1].bh,
- path->ip_frames[2].bh);
- }
- err = descr->id_ops->id_node_read(c, block, NULL, &bh);
- if (err == 0) {
- leaf->il_bh = bh;
- leaf->il_curidx = block;
- err = iam_leaf_ops(leaf)->init(leaf);
- assert_inv(ergo(err == 0, iam_leaf_check(leaf)));
- }
- return err;
+ iam_ptr_t block;
+ int err;
+ struct iam_container *c;
+ struct buffer_head *bh;
+ struct iam_leaf *leaf;
+ struct iam_descr *descr;
+
+ c = path->ip_container;
+ leaf = &path->ip_leaf;
+ descr = iam_path_descr(path);
+ block = path->ip_frame->leaf;
+ if (block == 0) {
+ /* XXX bug 11027 */
+ printk(KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
+ (long unsigned)path->ip_frame->leaf,
+ dx_get_count(dx_node_get_entries(path, path->ip_frame)),
+ path->ip_frames[0].bh, path->ip_frames[1].bh,
+ path->ip_frames[2].bh);
+ }
+ err = descr->id_ops->id_node_read(c, block, NULL, &bh);
+ if (err == 0) {
+ leaf->il_bh = bh;
+ leaf->il_curidx = block;
+ err = iam_leaf_ops(leaf)->init(leaf);
+ assert_inv(ergo(err == 0, iam_leaf_check(leaf)));
+ }
+ return err;
}
static void iam_unlock_htree(struct iam_container *ic,
void iam_container_write_lock(struct iam_container *ic)
{
- cfs_down_write(&ic->ic_sem);
+ down_write(&ic->ic_sem);
}
void iam_container_write_unlock(struct iam_container *ic)
{
- cfs_up_write(&ic->ic_sem);
+ up_write(&ic->ic_sem);
}
void iam_container_read_lock(struct iam_container *ic)
{
- cfs_down_read(&ic->ic_sem);
+ down_read(&ic->ic_sem);
}
void iam_container_read_unlock(struct iam_container *ic)
{
- cfs_up_read(&ic->ic_sem);
+ up_read(&ic->ic_sem);
}
/*
struct dynlock_handle **dl, enum dynlock_type lt)
{
int result;
- struct inode *dir;
- dir = iam_path_obj(path);
while ((result = __iam_path_lookup(path)) == 0) {
do_corr(schedule());
*dl = iam_lock_htree(path->ip_container, path->ip_frame->leaf,
static int iam_path_lookup(struct iam_path *path, int index)
{
struct iam_container *c;
- struct iam_descr *descr;
struct iam_leaf *leaf;
int result;
c = path->ip_container;
leaf = &path->ip_leaf;
- descr = iam_path_descr(path);
result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
assert_inv(iam_path_check(path));
do_corr(schedule());
int result;
struct iam_path *path;
struct iam_leaf *leaf;
- struct inode *obj;
do_corr(struct iam_ikey *ik_orig);
/* assert_corr(it->ii_flags&IAM_IT_MOVE); */
path = &it->ii_path;
leaf = &path->ip_leaf;
- obj = iam_path_obj(path);
assert_corr(iam_leaf_is_locked(leaf));
if (c->ic_idle_bh == NULL)
goto newblock;
- cfs_down(&c->ic_idle_sem);
+ mutex_lock(&c->ic_idle_mutex);
if (unlikely(c->ic_idle_bh == NULL)) {
- cfs_up(&c->ic_idle_sem);
+ mutex_unlock(&c->ic_idle_mutex);
goto newblock;
}
if (*e != 0)
goto fail;
- cfs_up(&c->ic_idle_sem);
+ mutex_unlock(&c->ic_idle_mutex);
bh = ldiskfs_bread(NULL, inode, *b, 0, e);
if (bh == NULL)
return NULL;
}
c->ic_idle_bh = idle;
- cfs_up(&c->ic_idle_sem);
+ mutex_unlock(&c->ic_idle_mutex);
got:
/* get write access for the found buffer head */
return bh;
newblock:
- bh = ldiskfs_append(h, inode, b, e);
+ bh = osd_ldiskfs_append(h, inode, b, e);
return bh;
fail:
- cfs_up(&c->ic_idle_sem);
+ mutex_unlock(&c->ic_idle_mutex);
ldiskfs_std_error(inode->i_sb, *e);
return NULL;
}
int count;
int rc;
- cfs_down(&c->ic_idle_sem);
+ mutex_lock(&c->ic_idle_mutex);
if (unlikely(c->ic_idle_failed)) {
rc = -EFAULT;
goto unlock;
rc = iam_txn_dirty(h, p, c->ic_idle_bh);
unlock:
- cfs_up(&c->ic_idle_sem);
+ mutex_unlock(&c->ic_idle_mutex);
if (rc != 0)
CWARN("%.16s: idle blocks failed, will lose the blk %u\n",
LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk);