*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* Client Lustre Object.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
/*
*
* i_mutex
* PG_locked
- * ->coh_page_guard
- * ->coh_lock_guard
* ->coh_attr_guard
* ->ls_guard
*/
#define DEBUG_SUBSYSTEM S_CLASS
+#include <linux/list.h>
#include <libcfs/libcfs.h>
-/* class_put_type() */
#include <obd_class.h>
#include <obd_support.h>
#include <lustre_fid.h>
-#include <libcfs/list.h>
-#include <libcfs/libcfs_hash.h> /* for cfs_hash stuff */
-/* lu_time_global_{init,fini}() */
-#include <lu_time.h>
-
#include <cl_object.h>
+#include <lu_object.h>
#include "cl_internal.h"
-static cfs_mem_cache_t *cl_env_kmem;
+static struct kmem_cache *cl_env_kmem;
+struct kmem_cache *cl_dio_aio_kmem;
+struct kmem_cache *cl_sub_dio_kmem;
+struct kmem_cache *cl_page_kmem_array[16];
+unsigned short cl_page_kmem_size_array[16];
-/** Lock class of cl_object_header::coh_page_guard */
-static struct lock_class_key cl_page_guard_class;
-/** Lock class of cl_object_header::coh_lock_guard */
-static struct lock_class_key cl_lock_guard_class;
/** Lock class of cl_object_header::coh_attr_guard */
static struct lock_class_key cl_attr_guard_class;
-extern __u32 lu_context_tags_default;
-extern __u32 lu_session_tags_default;
/**
* Initialize cl_object_header.
*/
int cl_object_header_init(struct cl_object_header *h)
{
- int result;
+ int result;
- ENTRY;
- result = lu_object_header_init(&h->coh_lu);
- if (result == 0) {
- spin_lock_init(&h->coh_page_guard);
- spin_lock_init(&h->coh_lock_guard);
+ ENTRY;
+ result = lu_object_header_init(&h->coh_lu);
+ if (result == 0) {
spin_lock_init(&h->coh_attr_guard);
- lockdep_set_class(&h->coh_page_guard, &cl_page_guard_class);
- lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class);
lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
- h->coh_pages = 0;
- /* XXX hard coded GFP_* mask. */
- INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC);
- CFS_INIT_LIST_HEAD(&h->coh_locks);
- }
- RETURN(result);
+ h->coh_page_bufsize = 0;
+ }
+ RETURN(result);
}
EXPORT_SYMBOL(cl_object_header_init);
*/
void cl_object_header_fini(struct cl_object_header *h)
{
- LASSERT(cfs_list_empty(&h->coh_locks));
lu_object_header_fini(&h->coh_lu);
}
-EXPORT_SYMBOL(cl_object_header_fini);
/**
* Returns a cl_object with a given \a fid.
struct cl_device *cd, const struct lu_fid *fid,
const struct cl_object_conf *c)
{
- cfs_might_sleep();
+ might_sleep();
return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu));
}
EXPORT_SYMBOL(cl_object_find);
/**
* Returns the top-object for a given \a o.
*
- * \see cl_page_top(), cl_io_top()
+ * \see cl_io_top()
*/
struct cl_object *cl_object_top(struct cl_object *o)
{
*
* Prevents data-attributes from changing, until lock is released by
* cl_object_attr_unlock(). This has to be called before calls to
- * cl_object_attr_get(), cl_object_attr_set().
+ * cl_object_attr_get(), cl_object_attr_update().
*/
void cl_object_attr_lock(struct cl_object *o)
+__acquires(cl_object_attr_guard(o))
{
spin_lock(cl_object_attr_guard(o));
}
* Releases data-attributes lock, acquired by cl_object_attr_lock().
*/
void cl_object_attr_unlock(struct cl_object *o)
+__releases(cl_object_attr_guard(o))
{
spin_unlock(cl_object_attr_guard(o));
}
* top-to-bottom to fill in parts of \a attr that this layer is responsible
* for.
*/
-int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr)
+int cl_object_attr_get(const struct lu_env *env, struct cl_object *top,
+ struct cl_attr *attr)
{
- struct lu_object_header *top;
- int result;
+ struct cl_object *obj;
+ int result = 0;
- LASSERT_SPIN_LOCKED(cl_object_attr_guard(obj));
- ENTRY;
-
- top = obj->co_lu.lo_header;
- result = 0;
- cfs_list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
- if (obj->co_ops->coo_attr_get != NULL) {
- result = obj->co_ops->coo_attr_get(env, obj, attr);
- if (result != 0) {
- if (result > 0)
- result = 0;
- break;
- }
- }
- }
- RETURN(result);
+ assert_spin_locked(cl_object_attr_guard(top));
+ ENTRY;
+
+ cl_object_for_each(obj, top) {
+ if (obj->co_ops->coo_attr_get != NULL) {
+ result = obj->co_ops->coo_attr_get(env, obj, attr);
+ if (result != 0) {
+ if (result > 0)
+ result = 0;
+ break;
+ }
+ }
+ }
+ RETURN(result);
}
EXPORT_SYMBOL(cl_object_attr_get);
* Updates data-attributes of an object \a obj.
*
* Only attributes, mentioned in a validness bit-mask \a v are
- * updated. Calls cl_object_operations::coo_attr_set() on every layer, bottom
+ * updated. Calls cl_object_operations::coo_upd_attr() on every layer, bottom
* to top.
*/
-int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned v)
+int cl_object_attr_update(const struct lu_env *env, struct cl_object *top,
+ const struct cl_attr *attr, unsigned v)
{
- struct lu_object_header *top;
- int result;
+ struct cl_object *obj;
+ int result = 0;
- LASSERT_SPIN_LOCKED(cl_object_attr_guard(obj));
- ENTRY;
-
- top = obj->co_lu.lo_header;
- result = 0;
- cfs_list_for_each_entry_reverse(obj, &top->loh_layers,
- co_lu.lo_linkage) {
- if (obj->co_ops->coo_attr_set != NULL) {
- result = obj->co_ops->coo_attr_set(env, obj, attr, v);
- if (result != 0) {
- if (result > 0)
- result = 0;
- break;
- }
- }
- }
- RETURN(result);
+ assert_spin_locked(cl_object_attr_guard(top));
+ ENTRY;
+
+ cl_object_for_each_reverse(obj, top) {
+ if (obj->co_ops->coo_attr_update != NULL) {
+ result = obj->co_ops->coo_attr_update(env, obj, attr,
+ v);
+ if (result != 0) {
+ if (result > 0)
+ result = 0;
+ break;
+ }
+ }
+ }
+ RETURN(result);
}
-EXPORT_SYMBOL(cl_object_attr_set);
+EXPORT_SYMBOL(cl_object_attr_update);
+
+/**
+ * Mark the inode as dirty when the inode has uncommitted (unstable) pages.
+ * Thus when the system is under momory pressure, it will trigger writeback
+ * on background to commit and unpin the pages.
+ */
+void cl_object_dirty_for_sync(const struct lu_env *env, struct cl_object *top)
+{
+ struct cl_object *obj;
+
+ ENTRY;
+
+ cl_object_for_each(obj, top) {
+ if (obj->co_ops->coo_dirty_for_sync != NULL)
+ obj->co_ops->coo_dirty_for_sync(env, obj);
+ }
+ EXIT;
+}
+EXPORT_SYMBOL(cl_object_dirty_for_sync);
/**
* Notifies layers (bottom-to-top) that glimpse AST was received.
*
* \see cl_lock_operations::clo_glimpse()
*/
-int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
- struct ost_lvb *lvb)
+int cl_object_glimpse(const struct lu_env *env, struct cl_object *top,
+ struct ost_lvb *lvb)
{
- struct lu_object_header *top;
- int result;
+ struct cl_object *obj;
+ int result = 0;
- ENTRY;
- top = obj->co_lu.lo_header;
- result = 0;
- cfs_list_for_each_entry_reverse(obj, &top->loh_layers,
- co_lu.lo_linkage) {
- if (obj->co_ops->coo_glimpse != NULL) {
- result = obj->co_ops->coo_glimpse(env, obj, lvb);
- if (result != 0)
- break;
- }
- }
- LU_OBJECT_HEADER(D_DLMTRACE, env, lu_object_top(top),
- "size: "LPU64" mtime: "LPU64" atime: "LPU64" "
- "ctime: "LPU64" blocks: "LPU64"\n",
- lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime,
- lvb->lvb_ctime, lvb->lvb_blocks);
- RETURN(result);
+ ENTRY;
+ cl_object_for_each_reverse(obj, top) {
+ if (obj->co_ops->coo_glimpse != NULL) {
+ result = obj->co_ops->coo_glimpse(env, obj, lvb);
+ if (result != 0)
+ break;
+ }
+ }
+ LU_OBJECT_HEADER(D_DLMTRACE, env, lu_object_top(top->co_lu.lo_header),
+ "size: %llu mtime: %llu atime: %llu "
+ "ctime: %llu blocks: %llu\n",
+ lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime,
+ lvb->lvb_ctime, lvb->lvb_blocks);
+ RETURN(result);
}
EXPORT_SYMBOL(cl_object_glimpse);
/**
* Updates a configuration of an object \a obj.
*/
-int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_object_conf *conf)
+int cl_conf_set(const struct lu_env *env, struct cl_object *top,
+ const struct cl_object_conf *conf)
{
- struct lu_object_header *top;
- int result;
+ struct cl_object *obj;
+ int result = 0;
- ENTRY;
- top = obj->co_lu.lo_header;
- result = 0;
- cfs_list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
- if (obj->co_ops->coo_conf_set != NULL) {
- result = obj->co_ops->coo_conf_set(env, obj, conf);
- if (result != 0)
- break;
- }
- }
- RETURN(result);
+ ENTRY;
+ cl_object_for_each(obj, top) {
+ if (obj->co_ops->coo_conf_set != NULL) {
+ result = obj->co_ops->coo_conf_set(env, obj, conf);
+ if (result)
+ break;
+ }
+ }
+ RETURN(result);
}
EXPORT_SYMBOL(cl_conf_set);
/**
- * Helper function removing all object locks, and marking object for
- * deletion. All object pages must have been deleted at this point.
- *
- * This is called by cl_inode_fini() and lov_object_delete() to destroy top-
- * and sub- objects respectively.
+ * Prunes caches of pages and locks for this object.
*/
-void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
+int cl_object_prune(const struct lu_env *env, struct cl_object *top)
{
- struct cl_object_header *hdr;
+ struct cl_object *obj;
+ int result = 0;
+ ENTRY;
- hdr = cl_object_header(obj);
- LASSERT(hdr->coh_tree.rnode == NULL);
- LASSERT(hdr->coh_pages == 0);
+ cl_object_for_each(obj, top) {
+ if (obj->co_ops->coo_prune != NULL) {
+ result = obj->co_ops->coo_prune(env, obj);
+ if (result)
+ break;
+ }
+ }
- set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
- /*
- * Destroy all locks. Object destruction (including cl_inode_fini())
- * cannot cancel the locks, because in the case of a local client,
- * where client and server share the same thread running
- * prune_icache(), this can dead-lock with ldlm_cancel_handler()
- * waiting on __wait_on_freeing_inode().
- */
- cl_locks_prune(env, obj, 0);
+ RETURN(result);
}
-EXPORT_SYMBOL(cl_object_kill);
+EXPORT_SYMBOL(cl_object_prune);
/**
- * Prunes caches of pages and locks for this object.
+ * Get stripe information of this object.
*/
-void cl_object_prune(const struct lu_env *env, struct cl_object *obj)
+int cl_object_getstripe(const struct lu_env *env, struct cl_object *top,
+ struct lov_user_md __user *uarg, size_t size)
{
- ENTRY;
- cl_pages_prune(env, obj);
- cl_locks_prune(env, obj, 1);
- EXIT;
+ struct cl_object *obj;
+ int result = 0;
+ ENTRY;
+
+ cl_object_for_each(obj, top) {
+ if (obj->co_ops->coo_getstripe) {
+ result = obj->co_ops->coo_getstripe(env, obj, uarg,
+ size);
+ if (result)
+ break;
+ }
+ }
+ RETURN(result);
}
-EXPORT_SYMBOL(cl_object_prune);
+EXPORT_SYMBOL(cl_object_getstripe);
/**
- * Check if the object has locks.
+ * Get fiemap extents from file object.
+ *
+ * \param env [in] lustre environment
+ * \param obj [in] file object
+ * \param key [in] fiemap request argument
+ * \param fiemap [out] fiemap extents mapping retrived
+ * \param buflen [in] max buffer length of @fiemap
+ *
+ * \retval 0 success
+ * \retval < 0 error
*/
-int cl_object_has_locks(struct cl_object *obj)
+int cl_object_fiemap(const struct lu_env *env, struct cl_object *top,
+ struct ll_fiemap_info_key *key,
+ struct fiemap *fiemap, size_t *buflen)
+{
+ struct cl_object *obj;
+ int result = 0;
+ ENTRY;
+
+ cl_object_for_each(obj, top) {
+ if (obj->co_ops->coo_fiemap) {
+ result = obj->co_ops->coo_fiemap(env, obj, key, fiemap,
+ buflen);
+ if (result)
+ break;
+ }
+ }
+ RETURN(result);
+}
+EXPORT_SYMBOL(cl_object_fiemap);
+
+int cl_object_layout_get(const struct lu_env *env, struct cl_object *top,
+ struct cl_layout *cl)
+{
+ struct cl_object *obj;
+ ENTRY;
+
+ cl_object_for_each(obj, top) {
+ if (obj->co_ops->coo_layout_get)
+ return obj->co_ops->coo_layout_get(env, obj, cl);
+ }
+
+ RETURN(-EOPNOTSUPP);
+}
+EXPORT_SYMBOL(cl_object_layout_get);
+
+loff_t cl_object_maxbytes(struct cl_object *top)
+{
+ struct cl_object *obj;
+ loff_t maxbytes = LLONG_MAX;
+ ENTRY;
+
+ cl_object_for_each(obj, top) {
+ if (obj->co_ops->coo_maxbytes)
+ maxbytes = min_t(loff_t, obj->co_ops->coo_maxbytes(obj),
+ maxbytes);
+ }
+
+ RETURN(maxbytes);
+}
+EXPORT_SYMBOL(cl_object_maxbytes);
+
+int cl_object_flush(const struct lu_env *env, struct cl_object *top,
+ struct ldlm_lock *lock)
+{
+ struct cl_object *obj;
+ int rc = 0;
+ ENTRY;
+
+ cl_object_for_each(obj, top) {
+ if (obj->co_ops->coo_object_flush) {
+ rc = obj->co_ops->coo_object_flush(env, obj, lock);
+ if (rc)
+ break;
+ }
+ }
+ RETURN(rc);
+}
+EXPORT_SYMBOL(cl_object_flush);
+
+int cl_object_inode_ops(const struct lu_env *env, struct cl_object *top,
+ enum coo_inode_opc opc, void *data)
{
- struct cl_object_header *head = cl_object_header(obj);
- int has;
+ struct cl_object *obj;
+ int rc = 0;
- spin_lock(&head->coh_lock_guard);
- has = cfs_list_empty(&head->coh_locks);
- spin_unlock(&head->coh_lock_guard);
+ ENTRY;
- return (has == 0);
+ cl_object_for_each(obj, top) {
+ if (obj->co_ops->coo_inode_ops) {
+ rc = obj->co_ops->coo_inode_ops(env, obj, opc, data);
+ if (rc)
+ break;
+ }
+ }
+ RETURN(rc);
}
-EXPORT_SYMBOL(cl_object_has_locks);
+EXPORT_SYMBOL(cl_object_inode_ops);
+
+/**
+ * Helper function removing all object locks, and marking object for
+ * deletion. All object pages must have been deleted at this point.
+ *
+ * This is called by cl_inode_fini() and lov_object_delete() to destroy top-
+ * and sub- objects respectively.
+ */
+void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
+{
+ struct cl_object_header *hdr = cl_object_header(obj);
+
+ set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
+}
+EXPORT_SYMBOL(cl_object_kill);
void cache_stats_init(struct cache_stats *cs, const char *name)
{
cs->cs_name = name;
for (i = 0; i < CS_NR; i++)
- cfs_atomic_set(&cs->cs_stats[i], 0);
+ atomic_set(&cs->cs_stats[i], 0);
}
-int cache_stats_print(const struct cache_stats *cs,
- char *page, int count, int h)
+static int cache_stats_print(const struct cache_stats *cs,
+ struct seq_file *m, int h)
{
- int nob = 0;
int i;
+
/*
* lookup hit total cached create
* env: ...... ...... ...... ...... ......
if (h) {
const char *names[CS_NR] = CS_NAMES;
- nob += snprintf(page + nob, count - nob, "%6s", " ");
+ seq_printf(m, "%6s", " ");
for (i = 0; i < CS_NR; i++)
- nob += snprintf(page + nob, count - nob,
- "%8s", names[i]);
- nob += snprintf(page + nob, count - nob, "\n");
+ seq_printf(m, "%8s", names[i]);
+ seq_printf(m, "\n");
}
- nob += snprintf(page + nob, count - nob, "%5.5s:", cs->cs_name);
+ seq_printf(m, "%5.5s:", cs->cs_name);
for (i = 0; i < CS_NR; i++)
- nob += snprintf(page + nob, count - nob, "%8u",
- cfs_atomic_read(&cs->cs_stats[i]));
- return nob;
+ seq_printf(m, "%8u", atomic_read(&cs->cs_stats[i]));
+ return 0;
}
+static void cl_env_percpu_refill(void);
+
/**
* Initialize client site.
*
*/
int cl_site_init(struct cl_site *s, struct cl_device *d)
{
- int i;
+ size_t i;
int result;
result = lu_site_init(&s->cs_lu, &d->cd_lu_dev);
if (result == 0) {
cache_stats_init(&s->cs_pages, "pages");
- cache_stats_init(&s->cs_locks, "locks");
for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
- cfs_atomic_set(&s->cs_pages_state[0], 0);
- for (i = 0; i < ARRAY_SIZE(s->cs_locks_state); ++i)
- cfs_atomic_set(&s->cs_locks_state[i], 0);
- }
- return result;
+ atomic_set(&s->cs_pages_state[0], 0);
+ cl_env_percpu_refill();
+ }
+ return result;
}
EXPORT_SYMBOL(cl_site_init);
static struct cache_stats cl_env_stats = {
.cs_name = "envs",
- .cs_stats = { CFS_ATOMIC_INIT(0), }
+ .cs_stats = { ATOMIC_INIT(0), }
};
/**
* Outputs client site statistical counters into a buffer. Suitable for
* ll_rd_*()-style functions.
*/
-int cl_site_stats_print(const struct cl_site *site, char *page, int count)
+int cl_site_stats_print(const struct cl_site *site, struct seq_file *m)
{
- int nob;
- int i;
- static const char *pstate[] = {
- [CPS_CACHED] = "c",
- [CPS_OWNED] = "o",
- [CPS_PAGEOUT] = "w",
- [CPS_PAGEIN] = "r",
- [CPS_FREEING] = "f"
- };
- static const char *lstate[] = {
- [CLS_NEW] = "n",
- [CLS_QUEUING] = "q",
- [CLS_ENQUEUED] = "e",
- [CLS_HELD] = "h",
- [CLS_INTRANSIT] = "t",
- [CLS_CACHED] = "c",
- [CLS_FREEING] = "f"
- };
+ static const char *const pstate[] = {
+ [CPS_CACHED] = "c",
+ [CPS_OWNED] = "o",
+ [CPS_PAGEOUT] = "w",
+ [CPS_PAGEIN] = "r",
+ [CPS_FREEING] = "f"
+ };
+ size_t i;
+
/*
lookup hit total busy create
pages: ...... ...... ...... ...... ...... [...... ...... ...... ......]
locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
env: ...... ...... ...... ...... ......
*/
- nob = lu_site_stats_print(&site->cs_lu, page, count);
- nob += cache_stats_print(&site->cs_pages, page + nob, count - nob, 1);
- nob += snprintf(page + nob, count - nob, " [");
- for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
- nob += snprintf(page + nob, count - nob, "%s: %u ",
- pstate[i],
- cfs_atomic_read(&site->cs_pages_state[i]));
- nob += snprintf(page + nob, count - nob, "]\n");
- nob += cache_stats_print(&site->cs_locks, page + nob, count - nob, 0);
- nob += snprintf(page + nob, count - nob, " [");
- for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i)
- nob += snprintf(page + nob, count - nob, "%s: %u ",
- lstate[i],
- cfs_atomic_read(&site->cs_locks_state[i]));
- nob += snprintf(page + nob, count - nob, "]\n");
- nob += cache_stats_print(&cl_env_stats, page + nob, count - nob, 0);
- nob += snprintf(page + nob, count - nob, "\n");
- return nob;
+ lu_site_stats_seq_print(&site->cs_lu, m);
+ cache_stats_print(&site->cs_pages, m, 1);
+ seq_printf(m, " [");
+ for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
+ seq_printf(m, "%s: %u ", pstate[i],
+ atomic_read(&site->cs_pages_state[i]));
+ seq_printf(m, "]\n");
+ cache_stats_print(&cl_env_stats, m, 0);
+ seq_printf(m, "\n");
+ return 0;
}
EXPORT_SYMBOL(cl_site_stats_print);
*
*/
-/**
- * The most efficient way is to store cl_env pointer in task specific
- * structures. On Linux, it wont' be easy to use task_struct->journal_info
- * because Lustre code may call into other fs which has certain assumptions
- * about journal_info. Currently following fields in task_struct are identified
- * can be used for this purpose:
- * - cl_env: for liblustre.
- * - tux_info: ony on RedHat kernel.
- * - ...
- * \note As long as we use task_struct to store cl_env, we assume that once
- * called into Lustre, we'll never call into the other part of the kernel
- * which will use those fields in task_struct without explicitly exiting
- * Lustre.
- *
- * If there's no space in task_struct is available, hash will be used.
- * bz20044, bz22683.
- */
-
-static CFS_LIST_HEAD(cl_envs);
-static unsigned cl_envs_cached_nr = 0;
-static unsigned cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit
- * for now. */
-static DEFINE_SPINLOCK(cl_envs_guard);
+static unsigned cl_envs_cached_max = 32; /* XXX: prototype: arbitrary limit
+ * for now. */
+static struct cl_env_cache {
+ rwlock_t cec_guard;
+ unsigned cec_count;
+ struct list_head cec_envs;
+} *cl_envs = NULL;
struct cl_env {
void *ce_magic;
struct lu_env ce_lu;
struct lu_context ce_ses;
-#ifdef LL_TASK_CL_ENV
- void *ce_prev;
-#else
- /**
- * This allows cl_env to be entered into cl_env_hash which implements
- * the current thread -> client environment lookup.
- */
- cfs_hlist_node_t ce_node;
-#endif
- /**
- * Owner for the current cl_env.
- *
- * If LL_TASK_CL_ENV is defined, this point to the owning cfs_current(),
- * only for debugging purpose ;
- * Otherwise hash is used, and this is the key for cfs_hash.
- * Now current thread pid is stored. Note using thread pointer would
- * lead to unbalanced hash because of its specific allocation locality
- * and could be varied for different platforms and OSes, even different
- * OS versions.
- */
- void *ce_owner;
-
/*
* Linkage into global list of all client environments. Used for
* garbage collection.
*/
- cfs_list_t ce_linkage;
+ struct list_head ce_linkage;
/*
*
*/
void *ce_debug;
};
+static void cl_env_inc(enum cache_stats_item item)
+{
#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
-#define CL_ENV_INC(counter) cfs_atomic_inc(&cl_env_stats.cs_stats[CS_##counter])
-
-#define CL_ENV_DEC(counter) do { \
- LASSERT(cfs_atomic_read(&cl_env_stats.cs_stats[CS_##counter]) > 0); \
- cfs_atomic_dec(&cl_env_stats.cs_stats[CS_##counter]); \
-} while (0)
-#else
-#define CL_ENV_INC(counter)
-#define CL_ENV_DEC(counter)
+ atomic_inc(&cl_env_stats.cs_stats[item]);
#endif
-
-static void cl_env_init0(struct cl_env *cle, void *debug)
-{
- LASSERT(cle->ce_ref == 0);
- LASSERT(cle->ce_magic == &cl_env_init0);
- LASSERT(cle->ce_debug == NULL && cle->ce_owner == NULL);
-
- cle->ce_ref = 1;
- cle->ce_debug = debug;
- CL_ENV_INC(busy);
}
-
-#ifndef LL_TASK_CL_ENV
-/*
- * The implementation of using hash table to connect cl_env and thread
- */
-
-static cfs_hash_t *cl_env_hash;
-
-static unsigned cl_env_hops_hash(cfs_hash_t *lh,
- const void *key, unsigned mask)
+static void cl_env_dec(enum cache_stats_item item)
{
-#if BITS_PER_LONG == 64
- return cfs_hash_u64_hash((__u64)key, mask);
-#else
- return cfs_hash_u32_hash((__u32)key, mask);
+#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
+ LASSERT(atomic_read(&cl_env_stats.cs_stats[item]) > 0);
+ atomic_dec(&cl_env_stats.cs_stats[item]);
#endif
}
-static void *cl_env_hops_obj(cfs_hlist_node_t *hn)
-{
- struct cl_env *cle = cfs_hlist_entry(hn, struct cl_env, ce_node);
- LASSERT(cle->ce_magic == &cl_env_init0);
- return (void *)cle;
-}
-
-static int cl_env_hops_keycmp(const void *key, cfs_hlist_node_t *hn)
-{
- struct cl_env *cle = cl_env_hops_obj(hn);
-
- LASSERT(cle->ce_owner != NULL);
- return (key == cle->ce_owner);
-}
-
-static void cl_env_hops_noop(cfs_hash_t *hs, cfs_hlist_node_t *hn)
-{
- struct cl_env *cle = cfs_hlist_entry(hn, struct cl_env, ce_node);
- LASSERT(cle->ce_magic == &cl_env_init0);
-}
-
-static cfs_hash_ops_t cl_env_hops = {
- .hs_hash = cl_env_hops_hash,
- .hs_key = cl_env_hops_obj,
- .hs_keycmp = cl_env_hops_keycmp,
- .hs_object = cl_env_hops_obj,
- .hs_get = cl_env_hops_noop,
- .hs_put_locked = cl_env_hops_noop,
-};
-
-static inline struct cl_env *cl_env_fetch(void)
-{
- struct cl_env *cle;
-
- cle = cfs_hash_lookup(cl_env_hash, (void *) (long) cfs_current()->pid);
- LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
- return cle;
-}
-
-static inline void cl_env_attach(struct cl_env *cle)
-{
- if (cle) {
- int rc;
-
- LASSERT(cle->ce_owner == NULL);
- cle->ce_owner = (void *) (long) cfs_current()->pid;
- rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
- &cle->ce_node);
- LASSERT(rc == 0);
- }
-}
-
-static inline void cl_env_do_detach(struct cl_env *cle)
-{
- void *cookie;
-
- LASSERT(cle->ce_owner == (void *) (long) cfs_current()->pid);
- cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
- &cle->ce_node);
- LASSERT(cookie == cle);
- cle->ce_owner = NULL;
-}
-
-static int cl_env_store_init(void) {
- cl_env_hash = cfs_hash_create("cl_env",
- HASH_CL_ENV_BITS, HASH_CL_ENV_BITS,
- HASH_CL_ENV_BKT_BITS, 0,
- CFS_HASH_MIN_THETA,
- CFS_HASH_MAX_THETA,
- &cl_env_hops,
- CFS_HASH_RW_BKTLOCK);
- return cl_env_hash != NULL ? 0 :-ENOMEM;
-}
-
-static void cl_env_store_fini(void) {
- cfs_hash_putref(cl_env_hash);
-}
-
-#else /* LL_TASK_CL_ENV */
-/*
- * The implementation of store cl_env directly in thread structure.
- */
-
-static inline struct cl_env *cl_env_fetch(void)
-{
- struct cl_env *cle;
-
- cle = cfs_current()->LL_TASK_CL_ENV;
- if (cle && cle->ce_magic != &cl_env_init0)
- cle = NULL;
- return cle;
-}
-
-static inline void cl_env_attach(struct cl_env *cle)
-{
- if (cle) {
- LASSERT(cle->ce_owner == NULL);
- cle->ce_owner = cfs_current();
- cle->ce_prev = cfs_current()->LL_TASK_CL_ENV;
- cfs_current()->LL_TASK_CL_ENV = cle;
- }
-}
-
-static inline void cl_env_do_detach(struct cl_env *cle)
-{
- LASSERT(cle->ce_owner == cfs_current());
- LASSERT(cfs_current()->LL_TASK_CL_ENV == cle);
- cfs_current()->LL_TASK_CL_ENV = cle->ce_prev;
- cle->ce_owner = NULL;
-}
-
-static int cl_env_store_init(void) { return 0; }
-static void cl_env_store_fini(void) { }
-
-#endif /* LL_TASK_CL_ENV */
-
-static inline struct cl_env *cl_env_detach(struct cl_env *cle)
+static void cl_env_init0(struct cl_env *cle, void *debug)
{
- if (cle == NULL)
- cle = cl_env_fetch();
-
- if (cle && cle->ce_owner)
- cl_env_do_detach(cle);
+ LASSERT(cle->ce_ref == 0);
+ LASSERT(cle->ce_magic == &cl_env_init0);
+ LASSERT(cle->ce_debug == NULL);
- return cle;
+ cle->ce_ref = 1;
+ cle->ce_debug = debug;
+ cl_env_inc(CS_busy);
}
static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
{
- struct lu_env *env;
- struct cl_env *cle;
+ struct lu_env *env;
+ struct cl_env *cle;
- OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, CFS_ALLOC_IO);
- if (cle != NULL) {
- int rc;
+ OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, GFP_NOFS);
+ if (cle != NULL) {
+ int rc;
- CFS_INIT_LIST_HEAD(&cle->ce_linkage);
- cle->ce_magic = &cl_env_init0;
- env = &cle->ce_lu;
- rc = lu_env_init(env, LCT_CL_THREAD|ctx_tags);
- if (rc == 0) {
- rc = lu_context_init(&cle->ce_ses,
- LCT_SESSION | ses_tags);
- if (rc == 0) {
- lu_context_enter(&cle->ce_ses);
- env->le_ses = &cle->ce_ses;
- cl_env_init0(cle, debug);
- } else
- lu_env_fini(env);
- }
- if (rc != 0) {
- OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
- env = ERR_PTR(rc);
- } else {
- CL_ENV_INC(create);
- CL_ENV_INC(total);
- }
- } else
- env = ERR_PTR(-ENOMEM);
- return env;
+ INIT_LIST_HEAD(&cle->ce_linkage);
+ cle->ce_magic = &cl_env_init0;
+ env = &cle->ce_lu;
+ rc = lu_env_init(env, LCT_CL_THREAD|ctx_tags);
+ if (rc == 0) {
+ rc = lu_context_init(&cle->ce_ses,
+ LCT_SESSION | ses_tags);
+ if (rc == 0) {
+ lu_context_enter(&cle->ce_ses);
+ env->le_ses = &cle->ce_ses;
+ cl_env_init0(cle, debug);
+ } else
+ lu_env_fini(env);
+ }
+ if (rc != 0) {
+ OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
+ env = ERR_PTR(rc);
+ } else {
+ cl_env_inc(CS_create);
+ cl_env_inc(CS_total);
+ }
+ } else
+ env = ERR_PTR(-ENOMEM);
+ return env;
}
static void cl_env_fini(struct cl_env *cle)
{
- CL_ENV_DEC(total);
- lu_context_fini(&cle->ce_lu.le_ctx);
- lu_context_fini(&cle->ce_ses);
- OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
+ cl_env_dec(CS_total);
+ lu_context_fini(&cle->ce_lu.le_ctx);
+ lu_context_fini(&cle->ce_ses);
+ OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
}
+/* Get a cl_env, either from the per-CPU cache for the current CPU, or by
+ * allocating a new one.
+ */
static struct lu_env *cl_env_obtain(void *debug)
{
struct cl_env *cle;
struct lu_env *env;
+ int cpu = get_cpu();
ENTRY;
- spin_lock(&cl_envs_guard);
- LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
- if (cl_envs_cached_nr > 0) {
+
+ read_lock(&cl_envs[cpu].cec_guard);
+ LASSERT(equi(cl_envs[cpu].cec_count == 0,
+ list_empty(&cl_envs[cpu].cec_envs)));
+ if (cl_envs[cpu].cec_count > 0) {
int rc;
- cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
- cfs_list_del_init(&cle->ce_linkage);
- cl_envs_cached_nr--;
- spin_unlock(&cl_envs_guard);
+ cle = container_of(cl_envs[cpu].cec_envs.next, struct cl_env,
+ ce_linkage);
+ list_del_init(&cle->ce_linkage);
+ cl_envs[cpu].cec_count--;
+ read_unlock(&cl_envs[cpu].cec_guard);
+ put_cpu();
env = &cle->ce_lu;
rc = lu_env_refill(env);
env = ERR_PTR(rc);
}
} else {
- spin_unlock(&cl_envs_guard);
+ read_unlock(&cl_envs[cpu].cec_guard);
+ put_cpu();
env = cl_env_new(lu_context_tags_default,
lu_session_tags_default, debug);
}
return container_of(env, struct cl_env, ce_lu);
}
-struct lu_env *cl_env_peek(int *refcheck)
-{
- struct lu_env *env;
- struct cl_env *cle;
-
- CL_ENV_INC(lookup);
-
- /* check that we don't go far from untrusted pointer */
- CLASSERT(offsetof(struct cl_env, ce_magic) == 0);
-
- env = NULL;
- cle = cl_env_fetch();
- if (cle != NULL) {
- CL_ENV_INC(hit);
- env = &cle->ce_lu;
- *refcheck = ++cle->ce_ref;
- }
- CDEBUG(D_OTHER, "%d@%p\n", cle ? cle->ce_ref : 0, cle);
- return env;
-}
-EXPORT_SYMBOL(cl_env_peek);
-
/**
- * Returns lu_env: if there already is an environment associated with the
- * current thread, it is returned, otherwise, new environment is allocated.
+ * Returns an lu_env.
*
- * Allocations are amortized through the global cache of environments.
+ * No link to thread, this returns an env from the cache or
+ * allocates a new one.
+ *
+ * If you need to get the specific environment you created for this thread,
+ * you must either pass the pointer directly or store it in the file/inode
+ * private data and retrieve it from there using ll_cl_add/ll_cl_find.
*
* \param refcheck pointer to a counter used to detect environment leaks. In
* the usual case cl_env_get() and cl_env_put() are called in the same lexical
*
* \see cl_env_put()
*/
-struct lu_env *cl_env_get(int *refcheck)
+struct lu_env *cl_env_get(__u16 *refcheck)
{
struct lu_env *env;
- env = cl_env_peek(refcheck);
- if (env == NULL) {
- env = cl_env_obtain(__builtin_return_address(0));
- if (!IS_ERR(env)) {
- struct cl_env *cle;
+ env = cl_env_obtain(__builtin_return_address(0));
+ if (!IS_ERR(env)) {
+ struct cl_env *cle;
- cle = cl_env_container(env);
- cl_env_attach(cle);
- *refcheck = cle->ce_ref;
- CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
- }
+ cle = cl_env_container(env);
+ *refcheck = cle->ce_ref;
+ CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
}
return env;
}
*
* \see cl_env_get()
*/
-struct lu_env *cl_env_alloc(int *refcheck, __u32 tags)
+struct lu_env *cl_env_alloc(__u16 *refcheck, __u32 tags)
{
struct lu_env *env;
- LASSERT(cl_env_peek(refcheck) == NULL);
env = cl_env_new(tags, tags, __builtin_return_address(0));
if (!IS_ERR(env)) {
struct cl_env *cle;
static void cl_env_exit(struct cl_env *cle)
{
- LASSERT(cle->ce_owner == NULL);
lu_context_exit(&cle->ce_lu.le_ctx);
lu_context_exit(&cle->ce_ses);
}
unsigned cl_env_cache_purge(unsigned nr)
{
struct cl_env *cle;
+ unsigned i;
ENTRY;
- spin_lock(&cl_envs_guard);
- for (; !cfs_list_empty(&cl_envs) && nr > 0; --nr) {
- cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
- cfs_list_del_init(&cle->ce_linkage);
- LASSERT(cl_envs_cached_nr > 0);
- cl_envs_cached_nr--;
- spin_unlock(&cl_envs_guard);
-
- cl_env_fini(cle);
- spin_lock(&cl_envs_guard);
+ for_each_possible_cpu(i) {
+ write_lock(&cl_envs[i].cec_guard);
+ for (; !list_empty(&cl_envs[i].cec_envs) && nr > 0; --nr) {
+ cle = container_of(cl_envs[i].cec_envs.next,
+ struct cl_env, ce_linkage);
+ list_del_init(&cle->ce_linkage);
+ LASSERT(cl_envs[i].cec_count > 0);
+ cl_envs[i].cec_count--;
+ write_unlock(&cl_envs[i].cec_guard);
+
+ cl_env_fini(cle);
+ write_lock(&cl_envs[i].cec_guard);
+ }
+ LASSERT(equi(cl_envs[i].cec_count == 0,
+ list_empty(&cl_envs[i].cec_envs)));
+ write_unlock(&cl_envs[i].cec_guard);
}
- LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
- spin_unlock(&cl_envs_guard);
RETURN(nr);
}
EXPORT_SYMBOL(cl_env_cache_purge);
* Release an environment.
*
* Decrement \a env reference counter. When counter drops to 0, nothing in
- * this thread is using environment and it is returned to the allocation
- * cache, or freed straight away, if cache is large enough.
+ * this thread is using environment and it is returned to the per-CPU cache or
+ * freed immediately if the cache is full.
*/
-void cl_env_put(struct lu_env *env, int *refcheck)
+void cl_env_put(struct lu_env *env, __u16 *refcheck)
{
struct cl_env *cle;
CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
if (--cle->ce_ref == 0) {
- CL_ENV_DEC(busy);
- cl_env_detach(cle);
- cle->ce_debug = NULL;
- cl_env_exit(cle);
- /*
- * Don't bother to take a lock here.
- *
- * Return environment to the cache only when it was allocated
- * with the standard tags.
- */
- if (cl_envs_cached_nr < cl_envs_cached_max &&
- (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
- (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
- spin_lock(&cl_envs_guard);
- cfs_list_add(&cle->ce_linkage, &cl_envs);
- cl_envs_cached_nr++;
- spin_unlock(&cl_envs_guard);
+ int cpu = get_cpu();
+
+ cl_env_dec(CS_busy);
+ cle->ce_debug = NULL;
+ cl_env_exit(cle);
+ /*
+ * Don't bother to take a lock here.
+ *
+ * Return environment to the cache only when it was allocated
+ * with the standard tags.
+ */
+ if (cl_envs[cpu].cec_count < cl_envs_cached_max &&
+ (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == lu_context_tags_default &&
+ (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == lu_session_tags_default) {
+ read_lock(&cl_envs[cpu].cec_guard);
+ list_add(&cle->ce_linkage, &cl_envs[cpu].cec_envs);
+ cl_envs[cpu].cec_count++;
+ read_unlock(&cl_envs[cpu].cec_guard);
} else
cl_env_fini(cle);
+ put_cpu();
}
}
EXPORT_SYMBOL(cl_env_put);
/**
- * Declares a point of re-entrancy.
+ * Converts struct cl_attr to struct ost_lvb.
*
- * \see cl_env_reexit()
+ * \see cl_lvb2attr
*/
-void *cl_env_reenter(void)
+void cl_attr2lvb(struct ost_lvb *lvb, const struct cl_attr *attr)
{
- return cl_env_detach(NULL);
+ lvb->lvb_size = attr->cat_size;
+ lvb->lvb_mtime = attr->cat_mtime;
+ lvb->lvb_atime = attr->cat_atime;
+ lvb->lvb_ctime = attr->cat_ctime;
+ lvb->lvb_blocks = attr->cat_blocks;
}
-EXPORT_SYMBOL(cl_env_reenter);
/**
- * Exits re-entrancy.
+ * Converts struct ost_lvb to struct cl_attr.
+ *
+ * \see cl_attr2lvb
*/
-void cl_env_reexit(void *cookie)
+void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
{
- cl_env_detach(NULL);
- cl_env_attach(cookie);
+ attr->cat_size = lvb->lvb_size;
+ attr->cat_mtime = lvb->lvb_mtime;
+ attr->cat_atime = lvb->lvb_atime;
+ attr->cat_ctime = lvb->lvb_ctime;
+ attr->cat_blocks = lvb->lvb_blocks;
}
-EXPORT_SYMBOL(cl_env_reexit);
+EXPORT_SYMBOL(cl_lvb2attr);
-/**
- * Setup user-supplied \a env as a current environment. This is to be used to
- * guaranteed that environment exists even when cl_env_get() fails. It is up
- * to user to ensure proper concurrency control.
- *
- * \see cl_env_unplant()
- */
-void cl_env_implant(struct lu_env *env, int *refcheck)
-{
- struct cl_env *cle = cl_env_container(env);
+static struct cl_env cl_env_percpu[NR_CPUS];
+static DEFINE_MUTEX(cl_env_percpu_mutex);
- LASSERT(cle->ce_ref > 0);
+static int cl_env_percpu_init(void)
+{
+ struct cl_env *cle;
+ int tags = LCT_REMEMBER | LCT_NOREF;
+ int i, j;
+ int rc = 0;
- cl_env_attach(cle);
- cl_env_get(refcheck);
- CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
-}
-EXPORT_SYMBOL(cl_env_implant);
+ for_each_possible_cpu(i) {
+ struct lu_env *env;
-/**
- * Detach environment installed earlier by cl_env_implant().
- */
-void cl_env_unplant(struct lu_env *env, int *refcheck)
-{
- struct cl_env *cle = cl_env_container(env);
+ rwlock_init(&cl_envs[i].cec_guard);
+ INIT_LIST_HEAD(&cl_envs[i].cec_envs);
+ cl_envs[i].cec_count = 0;
- LASSERT(cle->ce_ref > 1);
+ cle = &cl_env_percpu[i];
+ env = &cle->ce_lu;
- CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
+ INIT_LIST_HEAD(&cle->ce_linkage);
+ cle->ce_magic = &cl_env_init0;
+ rc = lu_env_init(env, LCT_CL_THREAD | tags);
+ if (rc == 0) {
+ rc = lu_context_init(&cle->ce_ses, LCT_SESSION | tags);
+ if (rc == 0) {
+ lu_context_enter(&cle->ce_ses);
+ env->le_ses = &cle->ce_ses;
+ } else {
+ lu_env_fini(env);
+ }
+ }
+ if (rc != 0)
+ break;
+ }
+ if (rc != 0) {
+ /* Indices 0 to i (excluding i) were correctly initialized,
+ * thus we must uninitialize up to i, the rest are undefined. */
+ for (j = 0; j < i; j++) {
+ cle = &cl_env_percpu[j];
+ lu_context_exit(&cle->ce_ses);
+ lu_context_fini(&cle->ce_ses);
+ lu_env_fini(&cle->ce_lu);
+ }
+ }
- cl_env_detach(cle);
- cl_env_put(env, refcheck);
+ return rc;
}
-EXPORT_SYMBOL(cl_env_unplant);
-struct lu_env *cl_env_nested_get(struct cl_env_nest *nest)
+static void cl_env_percpu_fini(void)
{
- struct lu_env *env;
+ int i;
- nest->cen_cookie = NULL;
- env = cl_env_peek(&nest->cen_refcheck);
- if (env != NULL) {
- if (!cl_io_is_going(env))
- return env;
- else {
- cl_env_put(env, &nest->cen_refcheck);
- nest->cen_cookie = cl_env_reenter();
- }
- }
- env = cl_env_get(&nest->cen_refcheck);
- if (IS_ERR(env)) {
- cl_env_reexit(nest->cen_cookie);
- return env;
- }
+ for_each_possible_cpu(i) {
+ struct cl_env *cle = &cl_env_percpu[i];
- LASSERT(!cl_io_is_going(env));
- return env;
+ lu_context_exit(&cle->ce_ses);
+ lu_context_fini(&cle->ce_ses);
+ lu_env_fini(&cle->ce_lu);
+ }
}
-EXPORT_SYMBOL(cl_env_nested_get);
-void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env)
+static void cl_env_percpu_refill(void)
{
- cl_env_put(env, &nest->cen_refcheck);
- cl_env_reexit(nest->cen_cookie);
+ int i;
+
+ mutex_lock(&cl_env_percpu_mutex);
+ for_each_possible_cpu(i)
+ lu_env_refill(&cl_env_percpu[i].ce_lu);
+ mutex_unlock(&cl_env_percpu_mutex);
}
-EXPORT_SYMBOL(cl_env_nested_put);
-/**
- * Converts struct cl_attr to struct ost_lvb.
- *
- * \see cl_lvb2attr
- */
-void cl_attr2lvb(struct ost_lvb *lvb, const struct cl_attr *attr)
+void cl_env_percpu_put(struct lu_env *env)
{
- ENTRY;
- lvb->lvb_size = attr->cat_size;
- lvb->lvb_mtime = attr->cat_mtime;
- lvb->lvb_atime = attr->cat_atime;
- lvb->lvb_ctime = attr->cat_ctime;
- lvb->lvb_blocks = attr->cat_blocks;
- EXIT;
+ struct cl_env *cle;
+ int cpu;
+
+ cpu = smp_processor_id();
+ cle = cl_env_container(env);
+ LASSERT(cle == &cl_env_percpu[cpu]);
+
+ cle->ce_ref--;
+ LASSERT(cle->ce_ref == 0);
+
+ cl_env_dec(CS_busy);
+ cle->ce_debug = NULL;
+
+ put_cpu();
}
-EXPORT_SYMBOL(cl_attr2lvb);
+EXPORT_SYMBOL(cl_env_percpu_put);
-/**
- * Converts struct ost_lvb to struct cl_attr.
- *
- * \see cl_attr2lvb
- */
-void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
+struct lu_env *cl_env_percpu_get(void)
{
- ENTRY;
- attr->cat_size = lvb->lvb_size;
- attr->cat_mtime = lvb->lvb_mtime;
- attr->cat_atime = lvb->lvb_atime;
- attr->cat_ctime = lvb->lvb_ctime;
- attr->cat_blocks = lvb->lvb_blocks;
- EXIT;
+ struct cl_env *cle;
+
+ cle = &cl_env_percpu[get_cpu()];
+ cl_env_init0(cle, __builtin_return_address(0));
+
+ return &cle->ce_lu;
}
-EXPORT_SYMBOL(cl_lvb2attr);
+EXPORT_SYMBOL(cl_env_percpu_get);
/*****************************************************************************
*
}
EXPORT_SYMBOL(cl_stack_fini);
-int cl_lock_init(void);
-void cl_lock_fini(void);
-
-int cl_page_init(void);
-void cl_page_fini(void);
-
static struct lu_context_key cl_key;
struct cl_thread_info *cl_env_info(const struct lu_env *env)
return lu_context_key_get(&env->le_ctx, &cl_key);
}
-/* defines cl0_key_{init,fini}() */
-LU_KEY_INIT_FINI(cl0, struct cl_thread_info);
-
-static void *cl_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct cl_thread_info *info;
-
- info = cl0_key_init(ctx, key);
- if (!IS_ERR(info)) {
- int i;
-
- for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
- lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
- }
- return info;
-}
-
-static void cl_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct cl_thread_info *info;
- int i;
-
- info = data;
- for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
- lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
- cl0_key_fini(ctx, key, data);
-}
-
-static void cl_key_exit(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct cl_thread_info *info = data;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i) {
- LASSERT(info->clt_counters[i].ctc_nr_held == 0);
- LASSERT(info->clt_counters[i].ctc_nr_used == 0);
- LASSERT(info->clt_counters[i].ctc_nr_locks_acquired == 0);
- LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
- lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
- lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
- }
-}
+/* defines cl_key_{init,fini}() */
+LU_KEY_INIT_FINI(cl, struct cl_thread_info);
static struct lu_context_key cl_key = {
.lct_tags = LCT_CL_THREAD,
.lct_init = cl_key_init,
.lct_fini = cl_key_fini,
- .lct_exit = cl_key_exit
};
static struct lu_kmem_descr cl_object_caches[] = {
- {
- .ckd_cache = &cl_env_kmem,
- .ckd_name = "cl_env_kmem",
- .ckd_size = sizeof (struct cl_env)
- },
- {
- .ckd_cache = NULL
- }
+ {
+ .ckd_cache = &cl_env_kmem,
+ .ckd_name = "cl_env_kmem",
+ .ckd_size = sizeof(struct cl_env)
+ },
+ {
+ .ckd_cache = &cl_dio_aio_kmem,
+ .ckd_name = "cl_dio_aio_kmem",
+ .ckd_size = sizeof(struct cl_dio_aio)
+ },
+ {
+ .ckd_cache = &cl_sub_dio_kmem,
+ .ckd_name = "cl_sub_dio_kmem",
+ .ckd_size = sizeof(struct cl_sub_dio)
+ },
+ {
+ .ckd_cache = NULL
+ }
};
/**
*/
int cl_global_init(void)
{
- int result;
+ int result;
- result = cl_env_store_init();
- if (result)
- return result;
+ OBD_ALLOC_PTR_ARRAY(cl_envs, num_possible_cpus());
+ if (cl_envs == NULL)
+ GOTO(out, result = -ENOMEM);
- result = lu_kmem_init(cl_object_caches);
- if (result)
- goto out_store;
+ result = lu_kmem_init(cl_object_caches);
+ if (result)
+ GOTO(out_envs, result);
- LU_CONTEXT_KEY_INIT(&cl_key);
- result = lu_context_key_register(&cl_key);
- if (result)
- goto out_kmem;
+ LU_CONTEXT_KEY_INIT(&cl_key);
+ result = lu_context_key_register(&cl_key);
+ if (result)
+ GOTO(out_kmem, result);
- result = cl_lock_init();
- if (result)
- goto out_context;
+ result = cl_env_percpu_init();
+ if (result) /* no cl_env_percpu_fini on error */
+ GOTO(out_keys, result);
- result = cl_page_init();
- if (result)
- goto out_lock;
+ return 0;
- return 0;
-out_lock:
- cl_lock_fini();
-out_context:
- lu_context_key_degister(&cl_key);
+out_keys:
+ lu_context_key_degister(&cl_key);
out_kmem:
- lu_kmem_fini(cl_object_caches);
-out_store:
- cl_env_store_fini();
- return result;
+ lu_kmem_fini(cl_object_caches);
+out_envs:
+ OBD_FREE_PTR_ARRAY(cl_envs, num_possible_cpus());
+out:
+ return result;
}
/**
*/
void cl_global_fini(void)
{
- cl_lock_fini();
- cl_page_fini();
- lu_context_key_degister(&cl_key);
- lu_kmem_fini(cl_object_caches);
- cl_env_store_fini();
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cl_page_kmem_array); i++) {
+ if (cl_page_kmem_array[i]) {
+ kmem_cache_destroy(cl_page_kmem_array[i]);
+ cl_page_kmem_array[i] = NULL;
+ }
+ }
+ cl_env_percpu_fini();
+ lu_context_key_degister(&cl_key);
+ lu_kmem_fini(cl_object_caches);
+ OBD_FREE_PTR_ARRAY(cl_envs, num_possible_cpus());
}