*/
void lod_getref(struct lod_device *lod)
{
- cfs_down_read(&lod->lod_rw_sem);
- cfs_mutex_lock(&lod->lod_mutex);
+ down_read(&lod->lod_rw_sem);
+ mutex_lock(&lod->lod_mutex);
lod->lod_refcount++;
- cfs_mutex_unlock(&lod->lod_mutex);
+ mutex_unlock(&lod->lod_mutex);
}
/*
*/
void lod_putref(struct lod_device *lod)
{
- cfs_mutex_lock(&lod->lod_mutex);
+ mutex_lock(&lod->lod_mutex);
lod->lod_refcount--;
if (lod->lod_refcount == 0 && lod->lod_death_row) {
struct lod_ost_desc *ost_desc, *tmp;
lod->lod_desc.ld_active_tgt_count--;
lod->lod_death_row--;
}
- cfs_mutex_unlock(&lod->lod_mutex);
- cfs_up_read(&lod->lod_rw_sem);
+ mutex_unlock(&lod->lod_mutex);
+ up_read(&lod->lod_rw_sem);
cfs_list_for_each_entry_safe(ost_desc, tmp, &kill, ltd_kill) {
int rc;
OBD_FREE_PTR(ost_desc);
}
} else {
- cfs_mutex_unlock(&lod->lod_mutex);
- cfs_up_read(&lod->lod_rw_sem);
+ mutex_unlock(&lod->lod_mutex);
+ up_read(&lod->lod_rw_sem);
}
}
/* grab write reference on the lod. Relocating the array requires
* exclusive access */
- cfs_down_write(&lod->lod_rw_sem);
+ down_write(&lod->lod_rw_sem);
if (newsize <= lod->lod_osts_size)
/* someone else has already resize the array */
EXIT;
out:
- cfs_up_write(&lod->lod_rw_sem);
+ up_write(&lod->lod_rw_sem);
return rc;
}
lod_getref(lod);
}
- cfs_mutex_lock(&lod->lod_mutex);
+ mutex_lock(&lod->lod_mutex);
if (cfs_bitmap_check(lod->lod_ost_bitmap, index)) {
CERROR("%s: device %d is registered already\n", obd->obd_name,
index);
OST_TGT(lod, index) = ost_desc;
cfs_bitmap_set(lod->lod_ost_bitmap, index);
lod->lod_ostnr++;
- cfs_mutex_unlock(&lod->lod_mutex);
+ mutex_unlock(&lod->lod_mutex);
lod_putref(lod);
if (lod->lod_recovery_completed)
out_pool:
lod_ost_pool_remove(&lod->lod_pool_info, index);
out_mutex:
- cfs_mutex_unlock(&lod->lod_mutex);
+ mutex_unlock(&lod->lod_mutex);
lod_putref(lod);
out_desc:
OBD_FREE_PTR(ost_desc);
obd_str2uuid(&uuid, osp);
lod_getref(lod);
- cfs_mutex_lock(&lod->lod_mutex);
+ mutex_lock(&lod->lod_mutex);
/* check that the index is allocated in the bitmap */
if (!cfs_bitmap_check(lod->lod_ost_bitmap, idx) || !OST_TGT(lod,idx)) {
CERROR("%s: device %d is not set up\n", obd->obd_name, idx);
__lod_del_device(lod, idx);
EXIT;
out:
- cfs_mutex_unlock(&lod->lod_mutex);
+ mutex_unlock(&lod->lod_mutex);
lod_putref(lod);
return(rc);
}
RETURN(0);
}
+/*
+ * generate and write LOV EA for given striped object
+ */
+int lod_generate_and_set_lovea(const struct lu_env *env,
+ struct lod_object *lo, struct thandle *th)
+{
+ struct lod_thread_info *info = lod_env_info(env);
+ struct dt_object *next = dt_object_child(&lo->ldo_obj);
+ const struct lu_fid *fid = lu_object_fid(&lo->ldo_obj.do_lu);
+ struct lov_mds_md_v1 *lmm;
+ struct lov_ost_data_v1 *objs;
+ __u32 magic;
+ int i, rc, lmm_size;
+ ENTRY;
+
+ LASSERT(lo);
+ LASSERT(lo->ldo_stripenr > 0);
+
+ magic = lo->ldo_pool ? LOV_MAGIC_V3 : LOV_MAGIC_V1;
+ lmm_size = lov_mds_md_size(lo->ldo_stripenr, magic);
+ if (info->lti_ea_store_size < lmm_size) {
+ rc = lod_ea_store_resize(info, lmm_size);
+ if (rc)
+ RETURN(rc);
+ }
+
+ lmm = info->lti_ea_store;
+
+ lmm->lmm_magic = cpu_to_le32(magic);
+ lmm->lmm_pattern = cpu_to_le32(LOV_PATTERN_RAID0);
+ lmm->lmm_object_id = cpu_to_le64(fid_ver_oid(fid));
+ lmm->lmm_object_seq = cpu_to_le64(fid_seq(fid));
+ lmm->lmm_stripe_size = cpu_to_le32(lo->ldo_stripe_size);
+ lmm->lmm_stripe_count = cpu_to_le16(lo->ldo_stripenr);
+ lmm->lmm_layout_gen = 0;
+ if (magic == LOV_MAGIC_V1) {
+ objs = &lmm->lmm_objects[0];
+ } else {
+ struct lov_mds_md_v3 *v3 = (struct lov_mds_md_v3 *) lmm;
+ strncpy(v3->lmm_pool_name, lo->ldo_pool, LOV_MAXPOOLNAME);
+ objs = &v3->lmm_objects[0];
+ }
+
+ for (i = 0; i < lo->ldo_stripenr; i++) {
+ const struct lu_fid *fid;
+
+ LASSERT(lo->ldo_stripe[i]);
+ fid = lu_object_fid(&lo->ldo_stripe[i]->do_lu);
+
+ rc = fid_ostid_pack(fid, &info->lti_ostid);
+ LASSERT(rc == 0);
+ LASSERT(info->lti_ostid.oi_seq == FID_SEQ_OST_MDT0);
+
+ objs[i].l_object_id = cpu_to_le64(info->lti_ostid.oi_id);
+ objs[i].l_object_seq = cpu_to_le64(info->lti_ostid.oi_seq);
+ objs[i].l_ost_gen = cpu_to_le32(0);
+ objs[i].l_ost_idx = cpu_to_le32(fid_idif_ost_idx(fid));
+ }
+
+ info->lti_buf.lb_buf = lmm;
+ info->lti_buf.lb_len = lmm_size;
+ rc = dt_xattr_set(env, next, &info->lti_buf, XATTR_NAME_LOV, 0,
+ th, BYPASS_CAPA);
+
+ RETURN(rc);
+}
+
int lod_get_lov_ea(const struct lu_env *env, struct lod_object *lo)
{
struct lod_thread_info *info = lod_env_info(env);
/* Set up allocation policy (QoS and RR) */
CFS_INIT_LIST_HEAD(&lod->lod_qos.lq_oss_list);
- cfs_init_rwsem(&lod->lod_qos.lq_rw_sem);
+ init_rwsem(&lod->lod_qos.lq_rw_sem);
lod->lod_qos.lq_dirty = 1;
lod->lod_qos.lq_rr.lqr_dirty = 1;
lod->lod_qos.lq_reset = 1;
if (lod->lod_osts_size > 0) {
int idx;
lod_getref(lod);
- cfs_mutex_lock(&lod->lod_mutex);
+ mutex_lock(&lod->lod_mutex);
cfs_foreach_bit(lod->lod_ost_bitmap, idx)
__lod_del_device(lod, idx);
- cfs_mutex_unlock(&lod->lod_mutex);
+ mutex_unlock(&lod->lod_mutex);
lod_putref(lod);
CFS_FREE_BITMAP(lod->lod_ost_bitmap);
for (idx = 0; idx < OST_PTRS; idx++) {