* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
union lov_layout_state *state);
int (*llo_print)(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *o);
- struct cl_page *(*llo_page_init)(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page,
- cfs_page_t *vmpage);
+ int (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage);
int (*llo_lock_init)(const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
const struct cl_io *io);
int result;
if (OBD_FAIL_CHECK(OBD_FAIL_LOV_INIT)) {
+ /* For sanity:test_206.
+ * Do not leave the object in cache to avoid accessing
+ * freed memory. This is because osc_object is referring to
+ * lov_oinfo of lsm_stripe_data which will be freed due to
+ * this failure. */
+ cl_object_kill(env, stripe);
cl_object_put(env, stripe);
return -EIO;
}
if (r0->lo_sub != NULL) {
result = 0;
subconf->coc_inode = conf->coc_inode;
- cfs_spin_lock_init(&r0->lo_sub_lock);
+ spin_lock_init(&r0->lo_sub_lock);
/*
* Create stripe cl_objects.
*/
/* this wait-queue is signaled at the end of
* lu_object_free(). */
cfs_set_current_state(CFS_TASK_UNINT);
- cfs_spin_lock(&r0->lo_sub_lock);
- if (r0->lo_sub[idx] == los) {
- cfs_spin_unlock(&r0->lo_sub_lock);
- cfs_waitq_wait(waiter, CFS_TASK_UNINT);
- } else {
- cfs_spin_unlock(&r0->lo_sub_lock);
+ spin_lock(&r0->lo_sub_lock);
+ if (r0->lo_sub[idx] == los) {
+ spin_unlock(&r0->lo_sub_lock);
+ cfs_waitq_wait(waiter, CFS_TASK_UNINT);
+ } else {
+ spin_unlock(&r0->lo_sub_lock);
cfs_set_current_state(CFS_TASK_RUNNING);
break;
}
static inline void lov_conf_freeze(struct lov_object *lov)
{
if (lov->lo_owner != cfs_current())
- cfs_down_read(&lov->lo_type_guard);
+ down_read(&lov->lo_type_guard);
}
static inline void lov_conf_thaw(struct lov_object *lov)
{
if (lov->lo_owner != cfs_current())
- cfs_up_read(&lov->lo_type_guard);
+ up_read(&lov->lo_type_guard);
}
#define LOV_2DISPATCH_MAYLOCK(obj, op, lock, ...) \
static void lov_conf_lock(struct lov_object *lov)
{
LASSERT(lov->lo_owner != cfs_current());
- cfs_down_write(&lov->lo_type_guard);
+ down_write(&lov->lo_type_guard);
LASSERT(lov->lo_owner == NULL);
lov->lo_owner = cfs_current();
}
static void lov_conf_unlock(struct lov_object *lov)
{
lov->lo_owner = NULL;
- cfs_up_write(&lov->lo_type_guard);
+ up_write(&lov->lo_type_guard);
}
static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
RETURN(0);
LASSERT(cfs_atomic_read(&lsm->lsm_refc) > 0);
- while (cfs_atomic_read(&lsm->lsm_refc) > 1) {
+ while (cfs_atomic_read(&lsm->lsm_refc) > 1 && lov->lo_lsm_invalid) {
lov_conf_unlock(lov);
+
+ CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n",
+ PFID(lu_object_fid(lov2lu(lov))),
+ cfs_atomic_read(&lsm->lsm_refc));
+
l_wait_event(lov->lo_waitq,
cfs_atomic_read(&lsm->lsm_refc) == 1, &lwi);
lov_conf_lock(lov);
int result;
ENTRY;
- cfs_init_rwsem(&lov->lo_type_guard);
+ init_rwsem(&lov->lo_type_guard);
cfs_waitq_init(&lov->lo_waitq);
+ cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
+
/* no locking is necessary, as object is being created */
lov->lo_type = cconf->u.coc_md->lsm != NULL ? LLT_RAID0 : LLT_EMPTY;
ops = &lov_dispatch[lov->lo_type];
ENTRY;
lov_conf_lock(lov);
- if (conf->coc_invalidate) {
+ if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
lov->lo_lsm_invalid = 1;
GOTO(out, result = 0);
}
+ if (conf->coc_opc == OBJECT_CONF_WAIT) {
+ result = lov_layout_wait(env, lov);
+ GOTO(out, result);
+ }
+
+ LASSERT(conf->coc_opc == OBJECT_CONF_SET);
+
if (conf->u.coc_md != NULL)
lsm = conf->u.coc_md->lsm;
-
if ((lsm == NULL && lov->lo_lsm == NULL) ||
(lsm != NULL && lov->lo_lsm != NULL &&
lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen)) {
+ /* same version of layout */
lov->lo_lsm_invalid = 0;
GOTO(out, result = 0);
}
- /* will change layout */
- lov_layout_wait(env, lov);
+ /* will change layout - check if there still exists active IO. */
+ if (lov->lo_lsm != NULL &&
+ cfs_atomic_read(&lov->lo_lsm->lsm_refc) > 1) {
+ lov->lo_lsm_invalid = 1;
+ GOTO(out, result = -EBUSY);
+ }
/*
* Only LLT_EMPTY <-> LLT_RAID0 transitions are supported.
return LOV_2DISPATCH(lu2lov(o), llo_print, env, cookie, p, o);
}
-struct cl_page *lov_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage)
+int lov_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage)
{
return LOV_2DISPATCH_NOLOCK(cl2lov(obj),
llo_page_init, env, obj, page, vmpage);