* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
struct lu_object co_lu;
/** per-object-layer operations */
const struct cl_object_operations *co_ops;
+ /** offset of page slice in cl_page buffer */
+ int co_slice_off;
};
/**
*/
struct inode *coc_inode;
/**
- * Invalidate the current stripe configuration due to losing
- * layout lock.
+ * Layout lock handle.
*/
- bool coc_invalidate;
+ struct ldlm_lock *coc_lock;
+ /**
+ * Operation to handle layout, OBJECT_CONF_XYZ.
+ */
+ int coc_opc;
+};
+
+enum {
+ /** configure layout, set up a new stripe, must be called while
+ * holding layout lock. */
+ OBJECT_CONF_SET = 0,
+ /** invalidate the current stripe configuration due to losing
+ * layout lock. */
+ OBJECT_CONF_INVALIDATE = 1,
+ /** wait for old layout to go away so that new layout can be
+ * set up. */
+ OBJECT_CONF_WAIT = 2
};
/**
* \retval valid-pointer pointer to already existing referenced page
* to be used instead of newly created.
*/
- struct cl_page *(*coo_page_init)(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page,
- cfs_page_t *vmpage);
+ int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, struct page *vmpage);
/**
* Initialize lock slice for this layer. Called top-to-bottom through
* every object layer when a new cl_lock is instantiated. Layer
*/
/** @{ */
/** Lock protecting page tree. */
- cfs_spinlock_t coh_page_guard;
- /** Lock protecting lock list. */
- cfs_spinlock_t coh_lock_guard;
+ spinlock_t coh_page_guard;
+ /** Lock protecting lock list. */
+ spinlock_t coh_lock_guard;
/** @} locks */
/** Radix tree of cl_page's, cached for this object. */
struct radix_tree_root coh_tree;
*
* \todo XXX this can be read/write lock if needed.
*/
- cfs_spinlock_t coh_attr_guard;
- /**
- * Number of objects above this one: 0 for a top-object, 1 for its
- * sub-object, etc.
- */
- unsigned coh_nesting;
+ spinlock_t coh_attr_guard;
+ /**
+ * Size of cl_page + page slices
+ */
+ unsigned short coh_page_bufsize;
+ /**
+ * Number of objects above this one: 0 for a top-object, 1 for its
+ * sub-object, etc.
+ */
+ unsigned char coh_nesting;
};
/**
* corresponding radix tree at the corresponding logical offset.
*
* cl_page is associated with VM page of the hosting environment (struct
- * page in Linux kernel, for example), cfs_page_t. It is assumed, that this
+ * page in Linux kernel, for example), struct page. It is assumed, that this
* association is implemented by one of cl_page layers (top layer in the
* current design) that
*
* - translates state (page flag bits) and locking between lustre and
* environment.
*
- * The association between cl_page and cfs_page_t is immutable and
+ * The association between cl_page and struct page is immutable and
* established when cl_page is created.
*
* cl_page can be "owned" by a particular cl_io (see below), guaranteeing
* eviction of the page from the memory). Note, that in general cl_io
* cannot be identified with a particular thread, and page ownership is not
* exactly equal to the current thread holding a lock on the page. Layer
- * implementing association between cl_page and cfs_page_t has to implement
+ * implementing association between cl_page and struct page has to implement
* ownership on top of available synchronization mechanisms.
*
* While lustre client maintains the notion of an page ownership by io,
* - by doing a lookup in the cl_object radix tree, protected by the
* spin-lock;
*
- * - by starting from VM-locked cfs_page_t and following some
+ * - by starting from VM-locked struct page and following some
* hosting environment method (e.g., following ->private pointer in
* the case of Linux kernel), see cl_vmpage_page();
*
*
* Linux Kernel implementation.
*
- * Binding between cl_page and cfs_page_t (which is a typedef for
+ * Binding between cl_page and struct page (which is a typedef for
* struct page) is implemented in the vvp layer. cl_page is attached to the
* ->private pointer of the struct page, together with the setting of
* PG_private bit in page->flags, and acquiring additional reference on the
};
/**
- * Fields are protected by the lock on cfs_page_t, except for atomics and
+ * Fields are protected by the lock on struct page, except for atomics and
* immutables.
*
* \invariant Data type invariants are in cl_page_invariant(). Basically:
* modified only internally within cl_page.c. Protected by a VM lock.
*/
const enum cl_page_state cp_state;
- /** Protect to get and put page, see cl_page_put and cl_vmpage_page */
- cfs_spinlock_t cp_lock;
- /**
- * Linkage of pages within some group. Protected by
- * cl_page::cp_mutex. */
- cfs_list_t cp_batch;
- /** Mutex serializing membership of a page in a batch. */
- cfs_mutex_t cp_mutex;
+ /** Linkage of pages within group. Protected by cl_page::cp_mutex. */
+ cfs_list_t cp_batch;
+ /** Mutex serializing membership of a page in a batch. */
+ struct mutex cp_mutex;
/** Linkage of pages within cl_req. */
cfs_list_t cp_flight;
/** Transfer error. */
struct cl_req *cp_req;
/** List of references to this page, for debugging. */
struct lu_ref cp_reference;
- /** Link to an object, for debugging. */
- struct lu_ref_link *cp_obj_ref;
- /** Link to a queue, for debugging. */
- struct lu_ref_link *cp_queue_ref;
- /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */
- unsigned cp_flags;
- /** Assigned if doing a sync_io */
- struct cl_sync_io *cp_sync_io;
+ /** Link to an object, for debugging. */
+ struct lu_ref_link cp_obj_ref;
+ /** Link to a queue, for debugging. */
+ struct lu_ref_link cp_queue_ref;
+ /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */
+ unsigned cp_flags;
+ /** Assigned if doing a sync_io */
+ struct cl_sync_io *cp_sync_io;
};
/**
*/
struct cl_page_operations {
/**
- * cl_page<->cfs_page_t methods. Only one layer in the stack has to
+ * cl_page<->struct page methods. Only one layer in the stack has to
* implement these. Current code assumes that this functionality is
* provided by the topmost layer, see cl_page_disown0() as an example.
*/
/**
* \return the underlying VM page. Optional.
*/
- cfs_page_t *(*cpo_vmpage)(const struct lu_env *env,
+ struct page *(*cpo_vmpage)(const struct lu_env *env,
const struct cl_page_slice *slice);
/**
* Called when \a io acquires this page into the exclusive
} \
} while (0)
+static inline int __page_in_use(const struct cl_page *page, int refc)
+{
+ if (page->cp_type == CPT_CACHEABLE)
+ ++refc;
+ LASSERT(cfs_atomic_read(&page->cp_ref) > 0);
+ return (cfs_atomic_read(&page->cp_ref) > refc);
+}
+#define cl_page_in_use(pg) __page_in_use(pg, 1)
+#define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
+
/** @} cl_page */
/** \addtogroup cl_lock cl_lock
*
* \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
*/
- cfs_mutex_t cll_guard;
+ struct mutex cll_guard;
cfs_task_t *cll_guarder;
int cll_depth;
* A list of holds on this lock, for debugging.
*/
struct lu_ref cll_holders;
- /**
- * A reference for cl_lock::cll_descr::cld_obj. For debugging.
- */
- struct lu_ref_link *cll_obj_ref;
+ /**
+ * A reference for cl_lock::cll_descr::cld_obj. For debugging.
+ */
+ struct lu_ref_link cll_obj_ref;
#ifdef CONFIG_LOCKDEP
- /* "dep_map" name is assumed by lockdep.h macros. */
- struct lockdep_map dep_map;
+ /* "dep_map" name is assumed by lockdep.h macros. */
+ struct lockdep_map dep_map;
#endif
};
/** Layers are free to decide between local and global locking. */
CILR_MAYBE,
/** Never lock: there is no cache (e.g., liblustre). */
- CILR_NEVER,
- /** Peek lock: use existing locks, don't queue new ones */
- CILR_PEEK
+ CILR_NEVER
};
enum cl_fsync_mode {
* A per-object state that (potentially multi-object) transfer request keeps.
*/
struct cl_req_obj {
- /** object itself */
- struct cl_object *ro_obj;
- /** reference to cl_req_obj::ro_obj. For debugging. */
- struct lu_ref_link *ro_obj_ref;
- /* something else? Number of pages for a given object? */
+ /** object itself */
+ struct cl_object *ro_obj;
+ /** reference to cl_req_obj::ro_obj. For debugging. */
+ struct lu_ref_link ro_obj_ref;
+ /* something else? Number of pages for a given object? */
};
/**
/* @} cl_req */
+enum cache_stats_item {
+ /** how many cache lookups were performed */
+ CS_lookup = 0,
+ /** how many times cache lookup resulted in a hit */
+ CS_hit,
+ /** how many entities are in the cache right now */
+ CS_total,
+ /** how many entities in the cache are actively used (and cannot be
+ * evicted) right now */
+ CS_busy,
+ /** how many entities were created at all */
+ CS_create,
+ CS_NR
+};
+
+#define CS_NAMES { "lookup", "hit", "total", "busy", "create" }
+
/**
* Stats for a generic cache (similar to inode, lu_object, etc. caches).
*/
struct cache_stats {
const char *cs_name;
- /** how many entities were created at all */
- cfs_atomic_t cs_created;
- /** how many cache lookups were performed */
- cfs_atomic_t cs_lookup;
- /** how many times cache lookup resulted in a hit */
- cfs_atomic_t cs_hit;
- /** how many entities are in the cache right now */
- cfs_atomic_t cs_total;
- /** how many entities in the cache are actively used (and cannot be
- * evicted) right now */
- cfs_atomic_t cs_busy;
+ cfs_atomic_t cs_stats[CS_NR];
};
/** These are not exported so far */
return cl_object_header(o0) == cl_object_header(o1);
}
+static inline void cl_object_page_init(struct cl_object *clob, int size)
+{
+ clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
+ cl_object_header(clob)->coh_page_bufsize += ALIGN(size, 8);
+}
+
+static inline void *cl_object_page_slice(struct cl_object *clob,
+ struct cl_page *page)
+{
+ return (void *)((char *)page + clob->co_slice_off);
+}
+
/** @} cl_object */
/** \defgroup cl_page cl_page
void cl_page_header_print(const struct lu_env *env, void *cookie,
lu_printer_t printer,
const struct cl_page *pg);
-cfs_page_t *cl_page_vmpage (const struct lu_env *env,
+struct page *cl_page_vmpage (const struct lu_env *env,
struct cl_page *page);
-struct cl_page *cl_vmpage_page (cfs_page_t *vmpage, struct cl_object *obj);
+struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj);
struct cl_page *cl_page_top (struct cl_page *page);
const struct cl_page_slice *cl_page_at(const struct cl_page *page,
struct cl_lock *except,
int pending, int canceld)
{
+ LASSERT(cl_object_header(obj) == cl_object_header(page->cp_obj));
return cl_lock_at_pgoff(env, obj, page->cp_index, except,
pending, canceld);
}
* anchor and wakes up waiting thread when transfer is complete.
*/
struct cl_sync_io {
- /** number of pages yet to be transferred. */
- cfs_atomic_t csi_sync_nr;
- /** completion to be signaled when transfer is complete. */
- cfs_waitq_t csi_waitq;
- /** error code. */
- int csi_sync_rc;
+ /** number of pages yet to be transferred. */
+ cfs_atomic_t csi_sync_nr;
+ /** error code. */
+ int csi_sync_rc;
+ /** barrier of destroy this structure */
+ cfs_atomic_t csi_barrier;
+ /** completion to be signaled when transfer is complete. */
+ cfs_waitq_t csi_waitq;
};
void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages);
void cl_env_reexit (void *cookie);
void cl_env_implant (struct lu_env *env, int *refcheck);
void cl_env_unplant (struct lu_env *env, int *refcheck);
-unsigned cl_env_cache_purge(unsigned nr);
/** @} cl_env */
struct lu_device *next);
/** @} clio */
+int cl_global_init(void);
+void cl_global_fini(void);
+
#endif /* _LINUX_CL_OBJECT_H */