* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
struct lu_object co_lu;
/** per-object-layer operations */
const struct cl_object_operations *co_ops;
+ /** offset of page slice in cl_page buffer */
+ int co_slice_off;
};
/**
*/
struct inode *coc_inode;
/**
- * Invalidate the current stripe configuration due to losing
- * layout lock.
+ * Layout lock handle.
*/
- bool coc_invalidate;
+ struct ldlm_lock *coc_lock;
+ /**
+ * Operation to handle layout, OBJECT_CONF_XYZ.
+ */
+ int coc_opc;
+};
+
+enum {
+ /** configure layout, set up a new stripe, must be called while
+ * holding layout lock. */
+ OBJECT_CONF_SET = 0,
+ /** invalidate the current stripe configuration due to losing
+ * layout lock. */
+ OBJECT_CONF_INVALIDATE = 1,
+ /** wait for old layout to go away so that new layout can be
+ * set up. */
+ OBJECT_CONF_WAIT = 2
};
/**
* \retval valid-pointer pointer to already existing referenced page
* to be used instead of newly created.
*/
- struct cl_page *(*coo_page_init)(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page,
- cfs_page_t *vmpage);
+ int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage);
/**
* Initialize lock slice for this layer. Called top-to-bottom through
* every object layer when a new cl_lock is instantiated. Layer
*/
/** @{ */
/** Lock protecting page tree. */
- cfs_spinlock_t coh_page_guard;
- /** Lock protecting lock list. */
- cfs_spinlock_t coh_lock_guard;
+ spinlock_t coh_page_guard;
+ /** Lock protecting lock list. */
+ spinlock_t coh_lock_guard;
/** @} locks */
/** Radix tree of cl_page's, cached for this object. */
struct radix_tree_root coh_tree;
*
* \todo XXX this can be read/write lock if needed.
*/
- cfs_spinlock_t coh_attr_guard;
- /**
- * Number of objects above this one: 0 for a top-object, 1 for its
- * sub-object, etc.
- */
- unsigned coh_nesting;
+ spinlock_t coh_attr_guard;
+ /**
+ * Size of cl_page + page slices
+ */
+ unsigned short coh_page_bufsize;
+ /**
+ * Number of objects above this one: 0 for a top-object, 1 for its
+ * sub-object, etc.
+ */
+ unsigned char coh_nesting;
};
/**
* modified only internally within cl_page.c. Protected by a VM lock.
*/
const enum cl_page_state cp_state;
- /** Protect to get and put page, see cl_page_put and cl_vmpage_page */
- cfs_spinlock_t cp_lock;
- /**
- * Linkage of pages within some group. Protected by
- * cl_page::cp_mutex. */
- cfs_list_t cp_batch;
- /** Mutex serializing membership of a page in a batch. */
- cfs_mutex_t cp_mutex;
+ /** Linkage of pages within group. Protected by cl_page::cp_mutex. */
+ cfs_list_t cp_batch;
+ /** Mutex serializing membership of a page in a batch. */
+ struct mutex cp_mutex;
/** Linkage of pages within cl_req. */
cfs_list_t cp_flight;
/** Transfer error. */
} \
} while (0)
+static inline int __page_in_use(const struct cl_page *page, int refc)
+{
+ if (page->cp_type == CPT_CACHEABLE)
+ ++refc;
+ LASSERT(cfs_atomic_read(&page->cp_ref) > 0);
+ return (cfs_atomic_read(&page->cp_ref) > refc);
+}
+#define cl_page_in_use(pg) __page_in_use(pg, 1)
+#define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
+
/** @} cl_page */
/** \addtogroup cl_lock cl_lock
*
* \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
*/
- cfs_mutex_t cll_guard;
+ struct mutex cll_guard;
cfs_task_t *cll_guarder;
int cll_depth;
/** Layers are free to decide between local and global locking. */
CILR_MAYBE,
/** Never lock: there is no cache (e.g., liblustre). */
- CILR_NEVER,
- /** Peek lock: use existing locks, don't queue new ones */
- CILR_PEEK
+ CILR_NEVER
};
enum cl_fsync_mode {
/* @} cl_req */
+enum cache_stats_item {
+ /** how many cache lookups were performed */
+ CS_lookup = 0,
+ /** how many times cache lookup resulted in a hit */
+ CS_hit,
+ /** how many entities are in the cache right now */
+ CS_total,
+ /** how many entities in the cache are actively used (and cannot be
+ * evicted) right now */
+ CS_busy,
+ /** how many entities were created at all */
+ CS_create,
+ CS_NR
+};
+
+#define CS_NAMES { "lookup", "hit", "total", "busy", "create" }
+
/**
* Stats for a generic cache (similar to inode, lu_object, etc. caches).
*/
struct cache_stats {
const char *cs_name;
- /** how many entities were created at all */
- cfs_atomic_t cs_created;
- /** how many cache lookups were performed */
- cfs_atomic_t cs_lookup;
- /** how many times cache lookup resulted in a hit */
- cfs_atomic_t cs_hit;
- /** how many entities are in the cache right now */
- cfs_atomic_t cs_total;
- /** how many entities in the cache are actively used (and cannot be
- * evicted) right now */
- cfs_atomic_t cs_busy;
+ cfs_atomic_t cs_stats[CS_NR];
};
/** These are not exported so far */
return cl_object_header(o0) == cl_object_header(o1);
}
+static inline void cl_object_page_init(struct cl_object *clob, int size)
+{
+ clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
+ cl_object_header(clob)->coh_page_bufsize += ALIGN(size, 8);
+}
+
+static inline void *cl_object_page_slice(struct cl_object *clob,
+ struct cl_page *page)
+{
+ return (void *)((char *)page + clob->co_slice_off);
+}
+
/** @} cl_object */
/** \defgroup cl_page cl_page
* anchor and wakes up waiting thread when transfer is complete.
*/
struct cl_sync_io {
- /** number of pages yet to be transferred. */
- cfs_atomic_t csi_sync_nr;
- /** completion to be signaled when transfer is complete. */
- cfs_waitq_t csi_waitq;
- /** error code. */
- int csi_sync_rc;
+ /** number of pages yet to be transferred. */
+ cfs_atomic_t csi_sync_nr;
+ /** error code. */
+ int csi_sync_rc;
+ /** barrier of destroy this structure */
+ cfs_atomic_t csi_barrier;
+ /** completion to be signaled when transfer is complete. */
+ cfs_waitq_t csi_waitq;
};
void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages);
void cl_env_reexit (void *cookie);
void cl_env_implant (struct lu_env *env, int *refcheck);
void cl_env_unplant (struct lu_env *env, int *refcheck);
-unsigned cl_env_cache_purge(unsigned nr);
/** @} cl_env */
struct lu_device *next);
/** @} clio */
+int cl_global_init(void);
+void cl_global_fini(void);
+
#endif /* _LINUX_CL_OBJECT_H */