/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Internal interfaces of LOV layer.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#ifndef LOV_CL_INTERNAL_H
* Serializes access to lov_device::ld_emrg in low-memory
* conditions.
*/
- cfs_mutex_t ld_mutex;
+ struct mutex ld_mutex;
};
/**
*
* \see lov_object::lo_type
*/
- cfs_rw_semaphore_t lo_type_guard;
- /**
- * Type of an object. Protected by lov_object::lo_type_guard.
- */
- enum lov_layout_type lo_type;
+ struct rw_semaphore lo_type_guard;
+ /**
+ * Type of an object. Protected by lov_object::lo_type_guard.
+ */
+ enum lov_layout_type lo_type;
+ /**
+ * True if layout is invalid. This bit is cleared when layout lock
+ * is lost.
+ */
+ bool lo_layout_invalid;
+ /**
+ * How many IOs are on going on this object. Layout can be changed
+ * only if there is no active IO.
+ */
+ cfs_atomic_t lo_active_ios;
/**
* Waitq - wait for no one else is using lo_lsm
*/
- cfs_waitq_t lo_waitq;
+ cfs_waitq_t lo_waitq;
+ /**
+ * Layout metadata. NULL if empty layout.
+ */
+ struct lov_stripe_md *lo_lsm;
union lov_layout_state {
struct lov_layout_raid0 {
unsigned lo_nr;
- struct lov_stripe_md *lo_lsm;
+ /**
+ * When this is true, lov_object::lo_attr contains
+ * valid up to date attributes for a top-level
+ * object. This field is reset to 0 when attributes of
+ * any sub-object change.
+ */
+ int lo_attr_valid;
/**
* Array of sub-objects. Allocated when top-object is
* created (lov_init_raid0()).
/**
* protect lo_sub
*/
- cfs_spinlock_t lo_sub_lock;
- /**
- * When this is true, lov_object::lo_attr contains
- * valid up to date attributes for a top-level
- * object. This field is reset to 0 when attributes of
- * any sub-object change.
- */
- int lo_attr_valid;
+ spinlock_t lo_sub_lock;
/**
* Cached object attribute, built from sub-object
* attributes.
* Set when sub-lock was canceled, while top-lock was being
* used, or unused.
*/
- int lls_cancel_race:1;
+ unsigned int lls_cancel_race:1;
/**
* An array of sub-locks
*
struct cl_lock_descr lti_ldescr;
struct ost_lvb lti_lvb;
struct cl_2queue lti_cl2q;
- union lov_layout_state lti_state;
struct cl_lock_closure lti_closure;
cfs_waitlink_t lti_waiter;
};
* lov_io::lis_cl::cis_object.
*/
struct lov_object *lis_object;
- /**
- * Lov stripe - this determines how this io fans out.
- * Hold a refcount to the lsm so it can't go away during IO.
- */
- struct lov_stripe_md *lis_lsm;
/**
* Original end-of-io position for this IO, set by the upper layer as
* cl_io::u::ci_rw::pos + cl_io::u::ci_rw::count. lov remembers this,
extern struct lu_context_key lov_key;
extern struct lu_context_key lov_session_key;
-extern cfs_mem_cache_t *lov_page_kmem;
extern cfs_mem_cache_t *lov_lock_kmem;
extern cfs_mem_cache_t *lov_object_kmem;
extern cfs_mem_cache_t *lov_thread_kmem;
extern cfs_mem_cache_t *lov_session_kmem;
extern cfs_mem_cache_t *lov_req_kmem;
-extern cfs_mem_cache_t *lovsub_page_kmem;
extern cfs_mem_cache_t *lovsub_lock_kmem;
extern cfs_mem_cache_t *lovsub_object_kmem;
extern cfs_mem_cache_t *lovsub_req_kmem;
int lov_lock_init_raid0 (const struct lu_env *env, struct cl_object *obj,
struct cl_lock *lock, const struct cl_io *io);
+int lov_lock_init_empty (const struct lu_env *env, struct cl_object *obj,
+ struct cl_lock *lock, const struct cl_io *io);
int lov_io_init_raid0 (const struct lu_env *env, struct cl_object *obj,
struct cl_io *io);
int lov_io_init_empty (const struct lu_env *env, struct cl_object *obj,
const struct cl_lock_descr *d, int idx);
-struct cl_page *lov_page_init (const struct lu_env *env, struct cl_object *ob,
- struct cl_page *page, cfs_page_t *vmpage);
-struct cl_page *lovsub_page_init(const struct lu_env *env, struct cl_object *ob,
- struct cl_page *page, cfs_page_t *vmpage);
+int lov_page_init (const struct lu_env *env, struct cl_object *ob,
+ struct cl_page *page, cfs_page_t *vmpage);
+int lovsub_page_init (const struct lu_env *env, struct cl_object *ob,
+ struct cl_page *page, cfs_page_t *vmpage);
-struct cl_page *lov_page_init_empty(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage);
-struct cl_page *lov_page_init_raid0(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage);
+int lov_page_init_empty (const struct lu_env *env,
+ struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage);
+int lov_page_init_raid0 (const struct lu_env *env,
+ struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage);
struct lu_object *lov_object_alloc (const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
static inline struct lov_layout_raid0 *lov_r0(struct lov_object *lov)
{
- struct lov_layout_raid0 *raid0;
-
- LASSERT(lov->lo_type == LLT_RAID0);
- raid0 = &lov->u.raid0;
- LASSERT(raid0->lo_lsm->lsm_wire.lw_magic == LOV_MAGIC ||
- raid0->lo_lsm->lsm_wire.lw_magic == LOV_MAGIC_V3);
- return raid0;
+ LASSERT(lov->lo_type == LLT_RAID0);
+ LASSERT(lov->lo_lsm->lsm_wire.lw_magic == LOV_MAGIC ||
+ lov->lo_lsm->lsm_wire.lw_magic == LOV_MAGIC_V3);
+ return &lov->u.raid0;
}
/** @} lov */