* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*
* i_mutex
* PG_locked
- * ->coh_lock_guard
* ->coh_attr_guard
* ->ls_guard
*/
#include <libcfs/list.h>
#include <libcfs/libcfs_hash.h> /* for cfs_hash stuff */
#include <cl_object.h>
+#include <lu_object.h>
#include "cl_internal.h"
static struct kmem_cache *cl_env_kmem;
-/** Lock class of cl_object_header::coh_lock_guard */
-static struct lock_class_key cl_lock_guard_class;
/** Lock class of cl_object_header::coh_attr_guard */
static struct lock_class_key cl_attr_guard_class;
-extern __u32 lu_context_tags_default;
-extern __u32 lu_session_tags_default;
/**
* Initialize cl_object_header.
*/
ENTRY;
result = lu_object_header_init(&h->coh_lu);
if (result == 0) {
- spin_lock_init(&h->coh_lock_guard);
spin_lock_init(&h->coh_attr_guard);
- lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class);
lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
- INIT_LIST_HEAD(&h->coh_locks);
h->coh_page_bufsize = 0;
}
RETURN(result);
*/
void cl_object_header_fini(struct cl_object_header *h)
{
- LASSERT(list_empty(&h->coh_locks));
lu_object_header_fini(&h->coh_lu);
}
EXPORT_SYMBOL(cl_object_header_fini);
*
* Prevents data-attributes from changing, until lock is released by
* cl_object_attr_unlock(). This has to be called before calls to
- * cl_object_attr_get(), cl_object_attr_set().
+ * cl_object_attr_get(), cl_object_attr_update().
*/
void cl_object_attr_lock(struct cl_object *o)
__acquires(cl_object_attr_guard(o))
* Updates data-attributes of an object \a obj.
*
* Only attributes, mentioned in a validness bit-mask \a v are
- * updated. Calls cl_object_operations::coo_attr_set() on every layer, bottom
+ * updated. Calls cl_object_operations::coo_upd_attr() on every layer, bottom
* to top.
*/
-int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned v)
+int cl_object_attr_update(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_attr *attr, unsigned v)
{
struct lu_object_header *top;
int result;
top = obj->co_lu.lo_header;
result = 0;
list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
- if (obj->co_ops->coo_attr_set != NULL) {
- result = obj->co_ops->coo_attr_set(env, obj, attr, v);
+ if (obj->co_ops->coo_attr_update != NULL) {
+ result = obj->co_ops->coo_attr_update(env, obj, attr,
+ v);
if (result != 0) {
if (result > 0)
result = 0;
}
RETURN(result);
}
-EXPORT_SYMBOL(cl_object_attr_set);
+EXPORT_SYMBOL(cl_object_attr_update);
/**
* Notifies layers (bottom-to-top) that glimpse AST was received.
/**
* Prunes caches of pages and locks for this object.
*/
-void cl_object_prune(const struct lu_env *env, struct cl_object *obj)
+int cl_object_prune(const struct lu_env *env, struct cl_object *obj)
{
struct lu_object_header *top;
struct cl_object *o;
}
}
- /* TODO: pruning locks will be moved into layers after cl_lock
- * simplification is done */
- cl_locks_prune(env, obj, 1);
- EXIT;
+ RETURN(result);
}
EXPORT_SYMBOL(cl_object_prune);
/**
+ * Get stripe information of this object.
+ */
+int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
+ struct lov_user_md __user *uarg)
+{
+ struct lu_object_header *top;
+ int result = 0;
+ ENTRY;
+
+ top = obj->co_lu.lo_header;
+ list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
+ if (obj->co_ops->coo_getstripe != NULL) {
+ result = obj->co_ops->coo_getstripe(env, obj, uarg);
+ if (result != 0)
+ break;
+ }
+ }
+ RETURN(result);
+}
+EXPORT_SYMBOL(cl_object_getstripe);
+
+/**
* Helper function removing all object locks, and marking object for
* deletion. All object pages must have been deleted at this point.
*
*/
void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
{
- struct cl_object_header *hdr;
-
- hdr = cl_object_header(obj);
+ struct cl_object_header *hdr = cl_object_header(obj);
set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
- /*
- * Destroy all locks. Object destruction (including cl_inode_fini())
- * cannot cancel the locks, because in the case of a local client,
- * where client and server share the same thread running
- * prune_icache(), this can dead-lock with ldlm_cancel_handler()
- * waiting on __wait_on_freeing_inode().
- */
- cl_locks_prune(env, obj, 0);
}
EXPORT_SYMBOL(cl_object_kill);
-/**
- * Check if the object has locks.
- */
-int cl_object_has_locks(struct cl_object *obj)
-{
- struct cl_object_header *head = cl_object_header(obj);
- int has;
-
- spin_lock(&head->coh_lock_guard);
- has = list_empty(&head->coh_locks);
- spin_unlock(&head->coh_lock_guard);
-
- return (has == 0);
-}
-EXPORT_SYMBOL(cl_object_has_locks);
-
void cache_stats_init(struct cache_stats *cs, const char *name)
{
int i;
atomic_set(&cs->cs_stats[i], 0);
}
-int cache_stats_print(const struct cache_stats *cs, struct seq_file *m, int h)
+static int cache_stats_print(const struct cache_stats *cs,
+ struct seq_file *m, int h)
{
int i;
*/
int cl_site_init(struct cl_site *s, struct cl_device *d)
{
- int i;
+ size_t i;
int result;
result = lu_site_init(&s->cs_lu, &d->cd_lu_dev);
if (result == 0) {
cache_stats_init(&s->cs_pages, "pages");
- cache_stats_init(&s->cs_locks, "locks");
for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
atomic_set(&s->cs_pages_state[0], 0);
- for (i = 0; i < ARRAY_SIZE(s->cs_locks_state); ++i)
- atomic_set(&s->cs_locks_state[i], 0);
cl_env_percpu_refill();
}
return result;
[CPS_PAGEIN] = "r",
[CPS_FREEING] = "f"
};
- static const char *lstate[] = {
- [CLS_NEW] = "n",
- [CLS_QUEUING] = "q",
- [CLS_ENQUEUED] = "e",
- [CLS_HELD] = "h",
- [CLS_INTRANSIT] = "t",
- [CLS_CACHED] = "c",
- [CLS_FREEING] = "f"
- };
- int i;
+ size_t i;
/*
lookup hit total busy create
seq_printf(m, "%s: %u ", pstate[i],
atomic_read(&site->cs_pages_state[i]));
seq_printf(m, "]\n");
- cache_stats_print(&site->cs_locks, m, 0);
- seq_printf(m, " [");
- for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i)
- seq_printf(m, "%s: %u ", lstate[i],
- atomic_read(&site->cs_locks_state[i]));
- seq_printf(m, "]\n");
cache_stats_print(&cl_env_stats, m, 0);
seq_printf(m, "\n");
return 0;
}
EXPORT_SYMBOL(cl_stack_fini);
-int cl_lock_init(void);
-void cl_lock_fini(void);
-
-int cl_page_init(void);
-void cl_page_fini(void);
-
static struct lu_context_key cl_key;
struct cl_thread_info *cl_env_info(const struct lu_env *env)
info = cl0_key_init(ctx, key);
if (!IS_ERR(info)) {
- int i;
+ size_t i;
for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
static void cl_key_fini(const struct lu_context *ctx,
struct lu_context_key *key, void *data)
{
- struct cl_thread_info *info;
- int i;
+ struct cl_thread_info *info;
+ size_t i;
info = data;
for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
static void cl_key_exit(const struct lu_context *ctx,
struct lu_context_key *key, void *data)
{
- struct cl_thread_info *info = data;
- int i;
+ struct cl_thread_info *info = data;
+ size_t i;
for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i) {
LASSERT(info->clt_counters[i].ctc_nr_held == 0);
if (result)
goto out_kmem;
- result = cl_lock_init();
- if (result)
- goto out_context;
-
- result = cl_page_init();
- if (result)
- goto out_lock;
-
result = cl_env_percpu_init();
if (result)
/* no cl_env_percpu_fini on error */
- goto out_lock;
+ goto out_context;
return 0;
-out_lock:
- cl_lock_fini();
+
out_context:
lu_context_key_degister(&cl_key);
out_kmem:
void cl_global_fini(void)
{
cl_env_percpu_fini();
- cl_lock_fini();
- cl_page_fini();
lu_context_key_degister(&cl_key);
lu_kmem_fini(cl_object_caches);
cl_env_store_fini();