struct cl_object *obj, struct cl_io *io);
int (*llo_getattr)(const struct lu_env *env, struct cl_object *obj,
struct cl_attr *attr);
+ int (*llo_flush)(const struct lu_env *env, struct cl_object *obj,
+ struct ldlm_lock *lock);
};
static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov);
+static struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov);
static void lov_lsm_put(struct lov_stripe_md *lsm)
{
old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type);
LASSERT(old_obj != NULL);
old_lov = cl2lov(lu2cl(old_obj));
- if (old_lov->lo_layout_invalid) {
+ if (test_bit(LO_LAYOUT_INVALID, &old_lov->lo_obj_flags)) {
/* the object's layout has already changed but isn't
* refreshed */
lu_object_unhash(env, &subobj->co_lu);
spin_lock_init(&r0->lo_sub_lock);
r0->lo_nr = lse->lsme_stripe_count;
- LASSERT(r0->lo_nr <= lov_targets_nr(dev));
+ r0->lo_trunc_stripeno = -1;
- OBD_ALLOC_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
+ OBD_ALLOC_PTR_ARRAY_LARGE(r0->lo_sub, r0->lo_nr);
if (r0->lo_sub == NULL)
GOTO(out, result = -ENOMEM);
struct cl_object *sub;
struct lu_site *site;
wait_queue_head_t *wq;
- wait_queue_entry_t *waiter;
LASSERT(r0->lo_sub[idx] == los);
/* ... wait until it is actually destroyed---sub-object clears its
* ->lo_sub[] slot in lovsub_object_free() */
- if (r0->lo_sub[idx] == los) {
- waiter = &lov_env_info(env)->lti_waiter;
- init_waitqueue_entry(waiter, current);
- add_wait_queue(wq, waiter);
- set_current_state(TASK_UNINTERRUPTIBLE);
- while (1) {
- /* this wait-queue is signaled at the end of
- * lu_object_free(). */
- set_current_state(TASK_UNINTERRUPTIBLE);
- spin_lock(&r0->lo_sub_lock);
- if (r0->lo_sub[idx] == los) {
- spin_unlock(&r0->lo_sub_lock);
- schedule();
- } else {
- spin_unlock(&r0->lo_sub_lock);
- set_current_state(TASK_RUNNING);
- break;
- }
- }
- remove_wait_queue(wq, waiter);
- }
+ wait_event(*wq, r0->lo_sub[idx] != los);
LASSERT(r0->lo_sub[idx] == NULL);
}
struct lov_layout_raid0 *r0 = &lle->lle_raid0;
if (r0->lo_sub != NULL) {
- OBD_FREE_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
+ OBD_FREE_PTR_ARRAY_LARGE(r0->lo_sub, r0->lo_nr);
r0->lo_sub = NULL;
}
}
struct cl_device *mdcdev;
struct lov_oinfo *loi = NULL;
struct cl_object_conf *sconf = <i->lti_stripe_conf;
-
int rc;
__u32 idx = 0;
ENTRY;
- LASSERT(index == 0);
+ /* DOM entry may be not zero index due to FLR but must start from 0 */
+ if (unlikely(lle->lle_extent->e_start != 0)) {
+ CERROR("%s: DOM entry must be the first stripe in a mirror\n",
+ lov2obd(dev->ld_lov)->obd_name);
+ dump_lsm(D_ERROR, lov->lo_lsm);
+ RETURN(-EINVAL);
+ }
/* find proper MDS device */
rc = lov_fld_lookup(dev, fid, &idx);
int result = 0;
unsigned int seq;
int i, j;
+ bool dom_size = 0;
ENTRY;
LASSERT(lsm->lsm_entry_count > 0);
LASSERT(lov->lo_lsm == NULL);
lov->lo_lsm = lsm_addref(lsm);
- lov->lo_layout_invalid = true;
+ set_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
dump_lsm(D_INODE, lsm);
if (equi(flr_state == LCM_FL_NONE, comp->lo_mirror_count > 1))
RETURN(-EINVAL);
- OBD_ALLOC(comp->lo_mirrors,
- comp->lo_mirror_count * sizeof(*comp->lo_mirrors));
+ OBD_ALLOC_PTR_ARRAY(comp->lo_mirrors, comp->lo_mirror_count);
if (comp->lo_mirrors == NULL)
RETURN(-ENOMEM);
- OBD_ALLOC(comp->lo_entries, entry_count * sizeof(*comp->lo_entries));
+ OBD_ALLOC_PTR_ARRAY(comp->lo_entries, entry_count);
if (comp->lo_entries == NULL)
RETURN(-ENOMEM);
lle->lle_comp_ops = &raid0_ops;
break;
case LOV_PATTERN_MDT:
+ /* Allowed to have several DOM stripes in different
+ * mirrors with the same DoM size.
+ */
+ if (!dom_size) {
+ dom_size = lle->lle_lsme->lsme_extent.e_end;
+ } else if (dom_size !=
+ lle->lle_lsme->lsme_extent.e_end) {
+ CERROR("%s: DOM entries with different sizes\n",
+ lov2obd(dev->ld_lov)->obd_name);
+ dump_lsm(D_ERROR, lsm);
+ RETURN(-EINVAL);
+ }
lle->lle_comp_ops = &dom_ops;
break;
+ case LOV_PATTERN_FOREIGN:
+ lle->lle_comp_ops = NULL;
+ break;
default:
CERROR("%s: unknown composite layout entry type %i\n",
lov2obd(dev->ld_lov)->obd_name,
if (mirror_id == lre->lre_mirror_id) {
lre->lre_valid |= lle->lle_valid;
lre->lre_stale |= !lle->lle_valid;
+ lre->lre_foreign |=
+ lsme_is_foreign(lle->lle_lsme);
lre->lre_end = i;
continue;
}
LCME_FL_PREF_RD);
lre->lre_valid = lle->lle_valid;
lre->lre_stale = !lle->lle_valid;
+ lre->lre_foreign = lsme_is_foreign(lle->lle_lsme);
}
/* sanity check for FLR */
if (!lsme_inited(lle->lle_lsme))
continue;
+ if (lsme_is_foreign(lle->lle_lsme))
+ continue;
+
result = lle->lle_comp_ops->lco_init(env, dev, lov, index,
conf, lle);
if (result < 0)
if (lre->lre_stale)
continue;
+ if (lre->lre_foreign)
+ continue;
+
mirror_count++; /* valid mirror */
if (lre->lre_preferred || comp->lo_preferred_mirror < 0)
return 0;
}
+static int lov_init_foreign(const struct lu_env *env,
+ struct lov_device *dev, struct lov_object *lov,
+ struct lov_stripe_md *lsm,
+ const struct cl_object_conf *conf,
+ union lov_layout_state *state)
+{
+ LASSERT(lsm != NULL);
+ LASSERT(lov->lo_type == LLT_FOREIGN);
+ LASSERT(lov->lo_lsm == NULL);
+
+ lov->lo_lsm = lsm_addref(lsm);
+ return 0;
+}
+
static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
union lov_layout_state *state)
{
- LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
+ LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED ||
+ lov->lo_type == LLT_FOREIGN);
lov_layout_wait(env, lov);
return 0;
lov_layout_wait(env, lov);
if (comp->lo_entries)
- lov_foreach_layout_entry(lov, entry)
+ lov_foreach_layout_entry(lov, entry) {
+ if (entry->lle_lsme && lsme_is_foreign(entry->lle_lsme))
+ continue;
+
lov_delete_raid0(env, lov, entry);
+ }
RETURN(0);
}
struct lov_layout_entry *entry;
lov_foreach_layout_entry(lov, entry)
- entry->lle_comp_ops->lco_fini(env, entry);
+ if (entry->lle_comp_ops)
+ entry->lle_comp_ops->lco_fini(env, entry);
- OBD_FREE(comp->lo_entries,
- comp->lo_entry_count * sizeof(*comp->lo_entries));
+ OBD_FREE_PTR_ARRAY(comp->lo_entries, comp->lo_entry_count);
comp->lo_entries = NULL;
}
if (comp->lo_mirrors != NULL) {
- OBD_FREE(comp->lo_mirrors,
- comp->lo_mirror_count * sizeof(*comp->lo_mirrors));
+ OBD_FREE_PTR_ARRAY(comp->lo_mirrors, comp->lo_mirror_count);
comp->lo_mirrors = NULL;
}
static int lov_print_empty(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *o)
{
- (*p)(env, cookie, "empty %d\n", lu2lov(o)->lo_layout_invalid);
+ (*p)(env, cookie, "empty %d\n",
+ test_bit(LO_LAYOUT_INVALID, &lu2lov(o)->lo_obj_flags));
return 0;
}
(*p)(env, cookie, "entries: %d, %s, lsm{%p 0x%08X %d %u}:\n",
lsm->lsm_entry_count,
- lov->lo_layout_invalid ? "invalid" : "valid", lsm,
- lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
+ test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags) ? "invalid" :
+ "valid", lsm, lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
lsm->lsm_layout_gen);
for (i = 0; i < lsm->lsm_entry_count; i++) {
lse->lsme_id, lse->lsme_pattern, lse->lsme_layout_gen,
lse->lsme_flags, lse->lsme_stripe_count,
lse->lsme_stripe_size);
- lov_print_raid0(env, cookie, p, lle);
+
+ if (!lsme_is_foreign(lse))
+ lov_print_raid0(env, cookie, p, lle);
}
return 0;
(*p)(env, cookie,
"released: %s, lsm{%p 0x%08X %d %u}:\n",
- lov->lo_layout_invalid ? "invalid" : "valid", lsm,
+ test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags) ? "invalid" :
+ "valid", lsm, lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
+ lsm->lsm_layout_gen);
+ return 0;
+}
+
+static int lov_print_foreign(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct lu_object *o)
+{
+ struct lov_object *lov = lu2lov(o);
+ struct lov_stripe_md *lsm = lov->lo_lsm;
+
+ (*p)(env, cookie,
+ "foreign: %s, lsm{%p 0x%08X %d %u}:\n",
+ test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags) ?
+ "invalid" : "valid", lsm,
lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
lsm->lsm_layout_gen);
+ (*p)(env, cookie,
+ "raw_ea_content '%.*s'\n",
+ (int)lsm->lsm_foreign_size, (char *)lsm_foreign(lsm));
return 0;
}
RETURN(0);
}
+static int lov_flush_composite(const struct lu_env *env,
+ struct cl_object *obj,
+ struct ldlm_lock *lock)
+{
+ struct lov_object *lov = cl2lov(obj);
+ struct lov_layout_entry *lle;
+ int rc = -ENODATA;
+
+ ENTRY;
+
+ lov_foreach_layout_entry(lov, lle) {
+ if (!lsme_is_dom(lle->lle_lsme))
+ continue;
+ rc = cl_object_flush(env, lovsub2cl(lle->lle_dom.lo_dom), lock);
+ break;
+ }
+
+ RETURN(rc);
+}
+
+static int lov_flush_empty(const struct lu_env *env, struct cl_object *obj,
+ struct ldlm_lock *lock)
+{
+ return 0;
+}
+
const static struct lov_layout_operations lov_dispatch[] = {
[LLT_EMPTY] = {
.llo_init = lov_init_empty,
.llo_lock_init = lov_lock_init_empty,
.llo_io_init = lov_io_init_empty,
.llo_getattr = lov_attr_get_empty,
+ .llo_flush = lov_flush_empty,
},
[LLT_RELEASED] = {
.llo_init = lov_init_released,
.llo_lock_init = lov_lock_init_empty,
.llo_io_init = lov_io_init_released,
.llo_getattr = lov_attr_get_empty,
+ .llo_flush = lov_flush_empty,
},
[LLT_COMP] = {
.llo_init = lov_init_composite,
.llo_lock_init = lov_lock_init_composite,
.llo_io_init = lov_io_init_composite,
.llo_getattr = lov_attr_get_composite,
+ .llo_flush = lov_flush_composite,
+ },
+ [LLT_FOREIGN] = {
+ .llo_init = lov_init_foreign,
+ .llo_delete = lov_delete_empty,
+ .llo_fini = lov_fini_released,
+ .llo_print = lov_print_foreign,
+ .llo_page_init = lov_page_init_foreign,
+ .llo_lock_init = lov_lock_init_empty,
+ .llo_io_init = lov_io_init_empty,
+ .llo_getattr = lov_attr_get_empty,
+ .llo_flush = lov_flush_empty,
},
};
lsm->lsm_magic == LOV_MAGIC_COMP_V1)
return LLT_COMP;
+ if (lsm->lsm_magic == LOV_MAGIC_FOREIGN)
+ return LLT_FOREIGN;
+
return LLT_EMPTY;
}
static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
{
- struct l_wait_info lwi = { 0 };
ENTRY;
while (atomic_read(&lov->lo_active_ios) > 0) {
PFID(lu_object_fid(lov2lu(lov))),
atomic_read(&lov->lo_active_ios));
- l_wait_event(lov->lo_waitq,
- atomic_read(&lov->lo_active_ios) == 0, &lwi);
+ wait_event_idle(lov->lo_waitq,
+ atomic_read(&lov->lo_active_ios) == 0);
}
RETURN(0);
}
* Lov object operations.
*
*/
-int lov_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf)
+static int lov_object_init(const struct lu_env *env, struct lu_object *obj,
+ const struct lu_object_conf *conf)
{
struct lov_object *lov = lu2lov(obj);
struct lov_device *dev = lov_object_dev(lov);
dump_lsm(D_INODE, lsm);
}
- lov_conf_lock(lov);
if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
- lov->lo_layout_invalid = true;
- GOTO(out, result = 0);
+ set_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
+ GOTO(out_lsm, result = 0);
}
+ lov_conf_lock(lov);
if (conf->coc_opc == OBJECT_CONF_WAIT) {
- if (lov->lo_layout_invalid &&
+ if (test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags) &&
atomic_read(&lov->lo_active_ios) > 0) {
lov_conf_unlock(lov);
result = lov_layout_wait(env, lov);
if ((lsm == NULL && lov->lo_lsm == NULL) ||
((lsm != NULL && lov->lo_lsm != NULL) &&
(lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
+ (lov->lo_lsm->lsm_flags == lsm->lsm_flags) &&
(lov->lo_lsm->lsm_entries[0]->lsme_pattern ==
lsm->lsm_entries[0]->lsme_pattern))) {
/* same version of layout */
- lov->lo_layout_invalid = false;
+ clear_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
GOTO(out, result = 0);
}
/* will change layout - check if there still exists active IO. */
if (atomic_read(&lov->lo_active_ios) > 0) {
- lov->lo_layout_invalid = true;
+ set_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
GOTO(out, result = -EBUSY);
}
result = lov_layout_change(env, lov, lsm, conf);
- lov->lo_layout_invalid = result != 0;
+ if (result)
+ set_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
+ else
+ clear_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);
EXIT;
out:
lov_conf_unlock(lov);
+out_lsm:
lov_lsm_put(lsm);
- CDEBUG(D_INODE, DFID" lo_layout_invalid=%d\n",
- PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid);
+ CDEBUG(D_INODE, DFID" lo_layout_invalid=%u\n",
+ PFID(lu_object_fid(lov2lu(lov))),
+ test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags));
RETURN(result);
}
return LOV_2DISPATCH_NOLOCK(lu2lov(o), llo_print, env, cookie, p, o);
}
-int lov_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, pgoff_t index)
+static int lov_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, pgoff_t index)
{
return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page,
index);
* Implements cl_object_operations::clo_io_init() method for lov
* layer. Dispatches to the appropriate layout io initialization method.
*/
-int lov_io_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io)
+static int lov_io_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io)
{
CL_IO_SLICE_CLEAN(lov_env_io(env), lis_preserved);
return 0;
}
-int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
+static int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
struct cl_lock *lock, const struct cl_io *io)
{
/* No need to lock because we've taken one refcount of layout. */
int start_stripe, int *stripe_count)
{
struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
+ int init_stripe;
int last_stripe;
- u64 obd_start;
- u64 obd_end;
int i, j;
+ init_stripe = lov_stripe_number(lsm, index, ext->e_start);
+
if (ext->e_end - ext->e_start >
lsme->lsme_stripe_size * lsme->lsme_stripe_count) {
- last_stripe = (start_stripe < 1 ? lsme->lsme_stripe_count - 1 :
- start_stripe - 1);
- *stripe_count = lsme->lsme_stripe_count;
+ if (init_stripe == start_stripe) {
+ last_stripe = (start_stripe < 1) ?
+ lsme->lsme_stripe_count - 1 : start_stripe - 1;
+ *stripe_count = lsme->lsme_stripe_count;
+ } else if (init_stripe < start_stripe) {
+ last_stripe = (init_stripe < 1) ?
+ lsme->lsme_stripe_count - 1 : init_stripe - 1;
+ *stripe_count = lsme->lsme_stripe_count -
+ (start_stripe - init_stripe);
+ } else {
+ last_stripe = init_stripe - 1;
+ *stripe_count = init_stripe - start_stripe;
+ }
} else {
for (j = 0, i = start_stripe; j < lsme->lsme_stripe_count;
i = (i + 1) % lsme->lsme_stripe_count, j++) {
- if ((lov_stripe_intersects(lsm, index, i, ext,
- &obd_start, &obd_end)) == 0)
+ if (!lov_stripe_intersects(lsm, index, i, ext, NULL,
+ NULL))
+ break;
+ if ((start_stripe != init_stripe) && (i == init_stripe))
break;
}
*stripe_count = j;
static void fiemap_prepare_and_copy_exts(struct fiemap *fiemap,
struct fiemap_extent *lcl_fm_ext,
int ost_index, unsigned int ext_count,
- int current_extent)
+ int current_extent, int abs_stripeno)
{
char *to;
unsigned int ext;
for (ext = 0; ext < ext_count; ext++) {
- lcl_fm_ext[ext].fe_device = ost_index;
+ set_fe_device_stripenr(&lcl_fm_ext[ext], ost_index,
+ abs_stripeno);
lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
}
{
struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
u64 local_end = fiemap->fm_extents[0].fe_logical;
- u64 lun_start;
u64 lun_end;
u64 fm_end_offset;
int stripe_no = -1;
- int i;
if (fiemap->fm_extent_count == 0 ||
fiemap->fm_extents[0].fe_logical == 0)
return 0;
- /* Find out stripe_no from ost_index saved in the fe_device */
- for (i = 0; i < lsme->lsme_stripe_count; i++) {
- struct lov_oinfo *oinfo = lsme->lsme_oinfo[i];
-
- if (lov_oinfo_is_dummy(oinfo))
- continue;
-
- if (oinfo->loi_ost_idx == fiemap->fm_extents[0].fe_device) {
- stripe_no = i;
- break;
- }
- }
+ stripe_no = *start_stripe;
if (stripe_no == -1)
return -EINVAL;
/* If we have finished mapping on previous device, shift logical
* offset to start of next device */
- if (lov_stripe_intersects(lsm, index, stripe_no, ext,
- &lun_start, &lun_end) != 0 &&
+ if (lov_stripe_intersects(lsm, index, stripe_no, ext, NULL, &lun_end) &&
local_end < lun_end) {
fm_end_offset = local_end;
- *start_stripe = stripe_no;
} else {
/* This is a special value to indicate that caller should
* calculate offset in next stripe. */
struct fiemap_state {
struct fiemap *fs_fm;
- struct lu_extent fs_ext;
+ struct lu_extent fs_ext; /* current entry extent */
u64 fs_length;
- u64 fs_end_offset;
- int fs_cur_extent;
- int fs_cnt_need;
+ u64 fs_end_offset; /* last iteration offset */
+ int fs_cur_extent; /* collected exts so far */
+ int fs_cnt_need; /* # of extents buf can hold */
int fs_start_stripe;
int fs_last_stripe;
- bool fs_device_done;
- bool fs_finish_stripe;
- bool fs_enough;
+ bool fs_device_done; /* enough for this OST */
+ bool fs_finish_stripe; /* reached fs_last_stripe */
+ bool fs_enough; /* enough for this call */
};
static struct cl_object *lov_find_subobj(const struct lu_env *env,
return result;
}
-int fiemap_for_stripe(const struct lu_env *env, struct cl_object *obj,
- struct lov_stripe_md *lsm, struct fiemap *fiemap,
- size_t *buflen, struct ll_fiemap_info_key *fmkey,
- int index, int stripeno, struct fiemap_state *fs)
+static int fiemap_for_stripe(const struct lu_env *env, struct cl_object *obj,
+ struct lov_stripe_md *lsm, struct fiemap *fiemap,
+ size_t *buflen, struct ll_fiemap_info_key *fmkey,
+ int index, int stripe_last, int stripeno,
+ struct fiemap_state *fs)
{
struct lov_stripe_md_entry *lsme = lsm->lsm_entries[index];
struct cl_object *subobj;
struct lov_obd *lov = lu2lov_dev(obj->co_lu.lo_dev)->ld_lov;
struct fiemap_extent *fm_ext = &fs->fs_fm->fm_extents[0];
- u64 req_fm_len; /* Stores length of required mapping */
+ u64 req_fm_len; /* max requested extent coverage */
u64 len_mapped_single_call;
- u64 lun_start;
- u64 lun_end;
- u64 obd_object_end;
+ u64 obd_start;
+ u64 obd_end;
unsigned int ext_count;
/* EOF for object */
bool ost_eof = false;
fs->fs_device_done = false;
/* Find out range of mapping on this stripe */
if ((lov_stripe_intersects(lsm, index, stripeno, &fs->fs_ext,
- &lun_start, &obd_object_end)) == 0)
+ &obd_start, &obd_end)) == 0)
return 0;
if (lov_oinfo_is_dummy(lsme->lsme_oinfo[stripeno]))
return -EIO;
/* If this is a continuation FIEMAP call and we are on
- * starting stripe then lun_start needs to be set to
+ * starting stripe then obd_start needs to be set to
* end_offset */
if (fs->fs_end_offset != 0 && stripeno == fs->fs_start_stripe)
- lun_start = fs->fs_end_offset;
- lun_end = lov_size_to_stripe(lsm, index, fs->fs_ext.e_end, stripeno);
- if (lun_start == lun_end)
+ obd_start = fs->fs_end_offset;
+
+ if (lov_size_to_stripe(lsm, index, fs->fs_ext.e_end, stripeno) ==
+ obd_start)
return 0;
- req_fm_len = obd_object_end - lun_start;
+ req_fm_len = obd_end - obd_start + 1;
fs->fs_fm->fm_length = 0;
len_mapped_single_call = 0;
fs->fs_cur_extent;
}
- lun_start += len_mapped_single_call;
+ obd_start += len_mapped_single_call;
fs->fs_fm->fm_length = req_fm_len - len_mapped_single_call;
req_fm_len = fs->fs_fm->fm_length;
/**
fs->fs_fm->fm_flags |= FIEMAP_EXTENT_LAST;
fs->fs_fm->fm_mapped_extents = 1;
- fm_ext[0].fe_logical = lun_start;
- fm_ext[0].fe_length = obd_object_end - lun_start;
+ fm_ext[0].fe_logical = obd_start;
+ fm_ext[0].fe_length = obd_end - obd_start + 1;
fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN;
goto inactive_tgt;
}
- fs->fs_fm->fm_start = lun_start;
+ fs->fs_fm->fm_start = obd_start;
fs->fs_fm->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
memcpy(&fmkey->lfik_fiemap, fs->fs_fm, sizeof(*fs->fs_fm));
*buflen = fiemap_count_to_size(fs->fs_fm->fm_extent_count);
/* prepare to copy retrived map extents */
len_mapped_single_call = fm_ext[ext_count - 1].fe_logical +
fm_ext[ext_count - 1].fe_length -
- lun_start;
+ obd_start;
/* Have we finished mapping on this device? */
if (req_fm_len <= len_mapped_single_call) {
}
fiemap_prepare_and_copy_exts(fiemap, fm_ext, ost_index,
- ext_count, fs->fs_cur_extent);
+ ext_count, fs->fs_cur_extent,
+ stripe_last + stripeno);
fs->fs_cur_extent += ext_count;
/* Ran out of available extents? */
loff_t whole_start;
loff_t whole_end;
int entry;
- int start_entry;
+ int start_entry = -1;
int end_entry;
int cur_stripe = 0;
int stripe_count;
unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
int rc = 0;
struct fiemap_state fs = { 0 };
+ struct lu_extent range;
+ int cur_ext;
+ int stripe_last;
+ int start_stripe = 0;
+ bool resume = false;
ENTRY;
lsm = lov_lsm_addref(cl2lov(obj));
*/
if (fiemap_count_to_size(fiemap->fm_extent_count) > *buflen)
fiemap->fm_extent_count = fiemap_size_to_count(*buflen);
- if (fiemap->fm_extent_count == 0)
- fs.fs_cnt_need = 0;
fs.fs_enough = false;
fs.fs_cur_extent = 0;
if (whole_start > fmkey->lfik_oa.o_size)
GOTO(out_fm_local, rc = -EINVAL);
whole_end = (fiemap->fm_length == OBD_OBJECT_EOF) ?
- fmkey->lfik_oa.o_size :
- whole_start + fiemap->fm_length - 1;
+ fmkey->lfik_oa.o_size + 1 :
+ whole_start + fiemap->fm_length;
/**
* If fiemap->fm_length != OBD_OBJECT_EOF but whole_end exceeds file
* size
*/
- if (whole_end > fmkey->lfik_oa.o_size)
- whole_end = fmkey->lfik_oa.o_size;
+ if (whole_end > fmkey->lfik_oa.o_size + 1)
+ whole_end = fmkey->lfik_oa.o_size + 1;
- start_entry = lov_lsm_entry(lsm, whole_start);
- end_entry = lov_lsm_entry(lsm, whole_end);
- if (end_entry == -1)
- end_entry = lsm->lsm_entry_count - 1;
+ /**
+ * the high 16bits of fe_device remember which stripe the last
+ * call has been arrived, we'd continue from there in this call.
+ */
+ if (fiemap->fm_extent_count && fiemap->fm_extents[0].fe_logical)
+ resume = true;
+ stripe_last = get_fe_stripenr(&fiemap->fm_extents[0]);
+ /**
+ * stripe_last records stripe number we've been processed in the last
+ * call
+ */
+ end_entry = lsm->lsm_entry_count - 1;
+ cur_stripe = 0;
+ for (entry = 0; entry <= end_entry; entry++) {
+ lsme = lsm->lsm_entries[entry];
+ if (cur_stripe + lsme->lsme_stripe_count >= stripe_last) {
+ start_entry = entry;
+ start_stripe = stripe_last - cur_stripe;
+ break;
+ }
- if (start_entry == -1 || end_entry == -1)
+ cur_stripe += lsme->lsme_stripe_count;
+ }
+ if (start_entry == -1) {
+ CERROR(DFID": FIEMAP does not init start entry, cur_stripe=%d, "
+ "stripe_last=%d\n", PFID(lu_object_fid(&obj->co_lu)),
+ cur_stripe, stripe_last);
GOTO(out_fm_local, rc = -EINVAL);
+ }
+ /**
+ * @start_entry & @start_stripe records the position of fiemap
+ * resumption @stripe_last keeps recording the absolution position
+ * we'are processing. @resume indicates we'd honor @start_stripe.
+ */
+
+ range.e_start = whole_start;
+ range.e_end = whole_end;
- /* TODO: rewrite it with lov_foreach_io_layout() */
for (entry = start_entry; entry <= end_entry; entry++) {
+ /* remeber to update stripe_last accordingly */
lsme = lsm->lsm_entries[entry];
- if (!lsme_inited(lsme))
- break;
+ /* FLR could contain component holes between entries */
+ if (!lsme_inited(lsme)) {
+ stripe_last += lsme->lsme_stripe_count;
+ resume = false;
+ continue;
+ }
- if (entry == start_entry)
- fs.fs_ext.e_start = whole_start;
- else
+ if (!lu_extent_is_overlapped(&range, &lsme->lsme_extent)) {
+ stripe_last += lsme->lsme_stripe_count;
+ resume = false;
+ continue;
+ }
+
+ /* prepare for a component entry iteration */
+ if (lsme->lsme_extent.e_start > whole_start)
fs.fs_ext.e_start = lsme->lsme_extent.e_start;
- if (entry == end_entry)
+ else
+ fs.fs_ext.e_start = whole_start;
+ if (lsme->lsme_extent.e_end > whole_end)
fs.fs_ext.e_end = whole_end;
else
- fs.fs_ext.e_end = lsme->lsme_extent.e_end - 1;
- fs.fs_length = fs.fs_ext.e_end - fs.fs_ext.e_start + 1;
+ fs.fs_ext.e_end = lsme->lsme_extent.e_end;
/* Calculate start stripe, last stripe and length of mapping */
- fs.fs_start_stripe = lov_stripe_number(lsm, entry,
- fs.fs_ext.e_start);
+ if (resume) {
+ fs.fs_start_stripe = start_stripe;
+ /* put stripe_last to the first stripe of the comp */
+ stripe_last -= start_stripe;
+ resume = false;
+ } else {
+ fs.fs_start_stripe = lov_stripe_number(lsm, entry,
+ fs.fs_ext.e_start);
+ }
fs.fs_last_stripe = fiemap_calc_last_stripe(lsm, entry,
&fs.fs_ext, fs.fs_start_stripe,
&stripe_count);
- fs.fs_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, entry,
- &fs.fs_ext, &fs.fs_start_stripe);
+ /**
+ * A new mirror component is under process, reset
+ * fs.fs_end_offset and then fiemap_for_stripe() starts from
+ * the overlapping extent, otherwise starts from
+ * fs.fs_end_offset.
+ */
+ if (entry > start_entry && lsme->lsme_extent.e_start == 0) {
+ /* new mirror */
+ fs.fs_end_offset = 0;
+ } else {
+ fs.fs_end_offset = fiemap_calc_fm_end_offset(fiemap,
+ lsm, entry, &fs.fs_ext,
+ &fs.fs_start_stripe);
+ }
+
/* Check each stripe */
for (cur_stripe = fs.fs_start_stripe; stripe_count > 0;
--stripe_count,
cur_stripe = (cur_stripe + 1) % lsme->lsme_stripe_count) {
+ /* reset fs_finish_stripe */
+ fs.fs_finish_stripe = false;
rc = fiemap_for_stripe(env, obj, lsm, fiemap, buflen,
- fmkey, entry, cur_stripe, &fs);
+ fmkey, entry, stripe_last,
+ cur_stripe, &fs);
if (rc < 0)
GOTO(out_fm_local, rc);
- if (fs.fs_enough)
+ if (fs.fs_enough) {
+ stripe_last += cur_stripe;
GOTO(finish, rc);
+ }
if (fs.fs_finish_stripe)
break;
} /* for each stripe */
- } /* for covering layout component */
- /*
- * We've traversed all components, set @entry to the last component
- * entry, it's for the last stripe check.
- */
- entry--;
+ stripe_last += lsme->lsme_stripe_count;
+ } /* for covering layout component entry */
+
finish:
+ if (fs.fs_cur_extent > 0)
+ cur_ext = fs.fs_cur_extent - 1;
+ else
+ cur_ext = 0;
+
+ /* done all the processing */
+ if (entry > end_entry)
+ fiemap->fm_extents[cur_ext].fe_flags |= FIEMAP_EXTENT_LAST;
+
/* Indicate that we are returning device offsets unless file just has
* single stripe */
if (lsm->lsm_entry_count > 1 ||
if (fiemap->fm_extent_count == 0)
goto skip_last_device_calc;
- /* Check if we have reached the last stripe and whether mapping for that
- * stripe is done. */
- if ((cur_stripe == fs.fs_last_stripe) && fs.fs_device_done)
- fiemap->fm_extents[fs.fs_cur_extent - 1].fe_flags |=
- FIEMAP_EXTENT_LAST;
skip_last_device_calc:
fiemap->fm_mapped_extents = fs.fs_cur_extent;
out_fm_local:
cl->cl_size = lov_comp_md_size(lsm);
cl->cl_layout_gen = lsm->lsm_layout_gen;
- cl->cl_dom_comp_size = 0;
- if (lsm_is_composite(lsm->lsm_magic)) {
- struct lov_stripe_md_entry *lsme = lsm->lsm_entries[0];
-
- cl->cl_is_composite = true;
-
- if (lsme_is_dom(lsme))
- cl->cl_dom_comp_size = lsme->lsme_extent.e_end;
- } else {
- cl->cl_is_composite = false;
- }
+ cl->cl_is_released = lsm->lsm_is_released;
+ cl->cl_is_composite = lsm_is_composite(lsm->lsm_magic);
rc = lov_lsm_pack(lsm, buf->lb_buf, buf->lb_len);
lov_lsm_put(lsm);
- RETURN(rc < 0 ? rc : 0);
+ /* return error or number of bytes */
+ RETURN(rc);
}
static loff_t lov_object_maxbytes(struct cl_object *obj)
return maxbytes;
}
+static int lov_object_flush(const struct lu_env *env, struct cl_object *obj,
+ struct ldlm_lock *lock)
+{
+ return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_flush, true, env, obj,
+ lock);
+}
+
static const struct cl_object_operations lov_ops = {
.coo_page_init = lov_page_init,
.coo_lock_init = lov_lock_init,
.coo_layout_get = lov_object_layout_get,
.coo_maxbytes = lov_object_maxbytes,
.coo_fiemap = lov_object_fiemap,
+ .coo_object_flush = lov_object_flush
};
static const struct lu_object_operations lov_lu_obj_ops = {
- .loo_object_init = lov_object_init,
- .loo_object_delete = lov_object_delete,
- .loo_object_release = NULL,
- .loo_object_free = lov_object_free,
- .loo_object_print = lov_object_print,
- .loo_object_invariant = NULL
+ .loo_object_init = lov_object_init,
+ .loo_object_delete = lov_object_delete,
+ .loo_object_release = NULL,
+ .loo_object_free = lov_object_free,
+ .loo_object_print = lov_object_print,
+ .loo_object_invariant = NULL,
};
struct lu_object *lov_object_alloc(const struct lu_env *env,
RETURN(obj);
}
-struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
+static struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
{
struct lov_stripe_md *lsm = NULL;
lsm = lsm_addref(lov->lo_lsm);
CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n",
lsm, atomic_read(&lsm->lsm_refc),
- lov->lo_layout_invalid, current);
+ test_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags),
+ current);
}
lov_conf_thaw(lov);
return lsm;
}
case LLT_RELEASED:
case LLT_EMPTY:
+ /* fall through */
+ case LLT_FOREIGN:
break;
default:
LBUG();