#include <lu_ref.h>
#include <libcfs/list.h>
-extern spinlock_t obd_types_lock;
-
enum {
LU_CACHE_PERCENT_MAX = 50,
LU_CACHE_PERCENT_DEFAULT = 20
if (fid_is_zero(fid)) {
LASSERT(top->loh_hash.next == NULL
&& top->loh_hash.pprev == NULL);
- LASSERT(cfs_list_empty(&top->loh_lru));
+ LASSERT(list_empty(&top->loh_lru));
if (!atomic_dec_and_test(&top->loh_ref))
return;
- cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
+ list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
if (o->lo_ops->loo_object_release != NULL)
o->lo_ops->loo_object_release(env, o);
}
* When last reference is released, iterate over object
* layers, and notify them that object is no longer busy.
*/
- cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
+ list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
if (o->lo_ops->loo_object_release != NULL)
o->lo_ops->loo_object_release(env, o);
}
if (!lu_object_is_dying(top)) {
- LASSERT(cfs_list_empty(&top->loh_lru));
- cfs_list_add_tail(&top->loh_lru, &bkt->lsb_lru);
+ LASSERT(list_empty(&top->loh_lru));
+ list_add_tail(&top->loh_lru, &bkt->lsb_lru);
cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
return;
}
cfs_hash_bd_t bd;
cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1);
- cfs_list_del_init(&top->loh_lru);
+ list_del_init(&top->loh_lru);
cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash);
cfs_hash_bd_unlock(obj_hash, &bd, 1);
}
{
struct lu_object *scan;
struct lu_object *top;
- cfs_list_t *layers;
+ struct list_head *layers;
unsigned int init_mask = 0;
unsigned int init_flag;
int clean;
*/
clean = 1;
init_flag = 1;
- cfs_list_for_each_entry(scan, layers, lo_linkage) {
+ list_for_each_entry(scan, layers, lo_linkage) {
if (init_mask & init_flag)
goto next;
clean = 0;
}
} while (!clean);
- cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
+ list_for_each_entry_reverse(scan, layers, lo_linkage) {
if (scan->lo_ops->loo_object_start != NULL) {
result = scan->lo_ops->loo_object_start(env, scan);
if (result != 0) {
*/
static void lu_object_free(const struct lu_env *env, struct lu_object *o)
{
- struct lu_site_bkt_data *bkt;
- struct lu_site *site;
- struct lu_object *scan;
- cfs_list_t *layers;
- cfs_list_t splice;
+ struct lu_site_bkt_data *bkt;
+ struct lu_site *site;
+ struct lu_object *scan;
+ struct list_head *layers;
+ struct list_head splice;
site = o->lo_dev->ld_site;
layers = &o->lo_header->loh_layers;
/*
* First call ->loo_object_delete() method to release all resources.
*/
- cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
+ list_for_each_entry_reverse(scan, layers, lo_linkage) {
if (scan->lo_ops->loo_object_delete != NULL)
scan->lo_ops->loo_object_delete(env, scan);
}
* necessary, because lu_object_header is freed together with the
* top-level slice.
*/
- CFS_INIT_LIST_HEAD(&splice);
- cfs_list_splice_init(layers, &splice);
- while (!cfs_list_empty(&splice)) {
+ INIT_LIST_HEAD(&splice);
+ list_splice_init(layers, &splice);
+ while (!list_empty(&splice)) {
/*
* Free layers in bottom-to-top order, so that object header
* lives as long as possible and ->loo_object_free() methods
* can look at its contents.
*/
o = container_of0(splice.prev, struct lu_object, lo_linkage);
- cfs_list_del_init(&o->lo_linkage);
+ list_del_init(&o->lo_linkage);
LASSERT(o->lo_ops->loo_object_free != NULL);
o->lo_ops->loo_object_free(env, o);
}
struct lu_site_bkt_data *bkt;
cfs_hash_bd_t bd;
cfs_hash_bd_t bd2;
- cfs_list_t dispose;
+ struct list_head dispose;
int did_sth;
int start;
int count;
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU))
RETURN(0);
- CFS_INIT_LIST_HEAD(&dispose);
+ INIT_LIST_HEAD(&dispose);
/*
* Under LRU list lock, scan LRU list and move unreferenced objects to
* the dispose list, removing them from LRU and hash table.
start = s->ls_purge_start;
bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1;
again:
+ /*
+ * It doesn't make any sense to make purge threads parallel, that can
+ * only bring troubles to us. See LU-5331.
+ */
+ mutex_lock(&s->ls_purge_mutex);
did_sth = 0;
cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
if (i < start)
cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
- cfs_list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
+ list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
LASSERT(atomic_read(&h->loh_ref) == 0);
cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
cfs_hash_bd_del_locked(s->ls_obj_hash,
&bd2, &h->loh_hash);
- cfs_list_move(&h->loh_lru, &dispose);
+ list_move(&h->loh_lru, &dispose);
if (did_sth == 0)
did_sth = 1;
* Free everything on the dispose list. This is safe against
* races due to the reasons described in lu_object_put().
*/
- while (!cfs_list_empty(&dispose)) {
- h = container_of0(dispose.next,
- struct lu_object_header, loh_lru);
- cfs_list_del_init(&h->loh_lru);
- lu_object_free(env, lu_object_top(h));
- lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
- }
+ while (!list_empty(&dispose)) {
+ h = container_of0(dispose.next,
+ struct lu_object_header, loh_lru);
+ list_del_init(&h->loh_lru);
+ lu_object_free(env, lu_object_top(h));
+ lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
+ }
if (nr == 0)
break;
}
+ mutex_unlock(&s->ls_purge_mutex);
if (nr != 0 && did_sth && start != 0) {
start = 0; /* restart from the first bucket */
vsnprintf(key->lck_area + used,
ARRAY_SIZE(key->lck_area) - used, format, args);
if (complete) {
- if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
- libcfs_debug_msg(msgdata, "%s", key->lck_area);
+ if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
+ libcfs_debug_msg(msgdata, "%s\n", key->lck_area);
key->lck_area[0] = 0;
}
va_end(args);
lu_printer_t printer,
const struct lu_object_header *hdr)
{
- (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
+ (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
- PFID(&hdr->loh_fid),
- cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
- cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \
- "" : " lru",
- hdr->loh_attr & LOHA_EXISTS ? " exist":"");
+ PFID(&hdr->loh_fid),
+ hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
+ list_empty((struct list_head *)&hdr->loh_lru) ? \
+ "" : " lru",
+ hdr->loh_attr & LOHA_EXISTS ? " exist" : "");
}
EXPORT_SYMBOL(lu_object_header_print);
lu_object_header_print(env, cookie, printer, top);
(*printer)(env, cookie, "{\n");
- cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
+ list_for_each_entry(o, &top->loh_layers, lo_linkage) {
/*
* print `.' \a depth times followed by type name and address
*/
struct lu_object_header *top;
top = o->lo_header;
- cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
+ list_for_each_entry(o, &top->loh_layers, lo_linkage) {
if (o->lo_ops->loo_object_invariant != NULL &&
!o->lo_ops->loo_object_invariant(o))
return 0;
wait_queue_t *waiter,
__u64 *version)
{
- struct lu_site_bkt_data *bkt;
- struct lu_object_header *h;
- cfs_hlist_node_t *hnode;
- __u64 ver = cfs_hash_bd_version_get(bd);
+ struct lu_site_bkt_data *bkt;
+ struct lu_object_header *h;
+ struct hlist_node *hnode;
+ __u64 ver = cfs_hash_bd_version_get(bd);
if (*version == ver)
return ERR_PTR(-ENOENT);
if (likely(!lu_object_is_dying(h))) {
cfs_hash_get(s->ls_obj_hash, hnode);
lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
- cfs_list_del_init(&h->loh_lru);
+ list_del_init(&h->loh_lru);
return lu_object_top(h);
}
cfs_hash_bd_t *bd,
const struct lu_fid *f)
{
- cfs_hlist_node_t *hnode;
+ struct hlist_node *hnode;
struct lu_object_header *h;
/* cfs_hash_bd_peek_locked is a somehow "internal" function
cfs_hash_get(s->ls_obj_hash, hnode);
lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
- cfs_list_del_init(&h->loh_lru);
+ list_del_init(&h->loh_lru);
return lu_object_top(h);
}
/**
* Global list of all device types.
*/
-static CFS_LIST_HEAD(lu_device_types);
+static struct list_head lu_device_types;
int lu_device_type_init(struct lu_device_type *ldt)
{
/**
* Global list of all sites on this node
*/
-static CFS_LIST_HEAD(lu_sites);
+static struct list_head lu_sites;
static DEFINE_MUTEX(lu_sites_guard);
/**
static int
lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *data)
+ struct hlist_node *hnode, void *data)
{
- struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
- struct lu_object_header *h;
+ struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
+ struct lu_object_header *h;
- h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
- if (!cfs_list_empty(&h->loh_layers)) {
- const struct lu_object *o;
+ h = hlist_entry(hnode, struct lu_object_header, loh_hash);
+ if (!list_empty(&h->loh_layers)) {
+ const struct lu_object *o;
- o = lu_object_top(h);
- lu_object_print(arg->lsp_env, arg->lsp_cookie,
- arg->lsp_printer, o);
- } else {
- lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
- arg->lsp_printer, h);
- }
- return 0;
+ o = lu_object_top(h);
+ lu_object_print(arg->lsp_env, arg->lsp_cookie,
+ arg->lsp_printer, o);
+ } else {
+ lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
+ arg->lsp_printer, h);
+ }
+ return 0;
}
/**
return hash & mask;
}
-static void *lu_obj_hop_object(cfs_hlist_node_t *hnode)
+static void *lu_obj_hop_object(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
+ return hlist_entry(hnode, struct lu_object_header, loh_hash);
}
-static void *lu_obj_hop_key(cfs_hlist_node_t *hnode)
+static void *lu_obj_hop_key(struct hlist_node *hnode)
{
- struct lu_object_header *h;
+ struct lu_object_header *h;
- h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
- return &h->loh_fid;
+ h = hlist_entry(hnode, struct lu_object_header, loh_hash);
+ return &h->loh_fid;
}
-static int lu_obj_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
+static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode)
{
- struct lu_object_header *h;
+ struct lu_object_header *h;
- h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
- return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
+ h = hlist_entry(hnode, struct lu_object_header, loh_hash);
+ return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
}
-static void lu_obj_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
{
- struct lu_object_header *h;
+ struct lu_object_header *h;
- h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
+ h = hlist_entry(hnode, struct lu_object_header, loh_hash);
if (atomic_add_return(1, &h->loh_ref) == 1) {
- struct lu_site_bkt_data *bkt;
- cfs_hash_bd_t bd;
+ struct lu_site_bkt_data *bkt;
+ cfs_hash_bd_t bd;
- cfs_hash_bd_get(hs, &h->loh_fid, &bd);
- bkt = cfs_hash_bd_extra_get(hs, &bd);
- bkt->lsb_busy++;
- }
+ cfs_hash_bd_get(hs, &h->loh_fid, &bd);
+ bkt = cfs_hash_bd_extra_get(hs, &bd);
+ bkt->lsb_busy++;
+ }
}
-static void lu_obj_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void lu_obj_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
{
LBUG(); /* we should never called it */
}
void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
{
spin_lock(&s->ls_ld_lock);
- if (cfs_list_empty(&d->ld_linkage))
- cfs_list_add(&d->ld_linkage, &s->ls_ld_linkage);
+ if (list_empty(&d->ld_linkage))
+ list_add(&d->ld_linkage, &s->ls_ld_linkage);
spin_unlock(&s->ls_ld_lock);
}
EXPORT_SYMBOL(lu_dev_add_linkage);
void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d)
{
spin_lock(&s->ls_ld_lock);
- cfs_list_del_init(&d->ld_linkage);
+ list_del_init(&d->ld_linkage);
spin_unlock(&s->ls_ld_lock);
}
EXPORT_SYMBOL(lu_dev_del_linkage);
int i;
ENTRY;
+ INIT_LIST_HEAD(&lu_sites);
+
memset(s, 0, sizeof *s);
+ mutex_init(&s->ls_purge_mutex);
bits = lu_htable_order(top);
snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name);
for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
- CFS_INIT_LIST_HEAD(&bkt->lsb_lru);
+ INIT_LIST_HEAD(&bkt->lsb_lru);
init_waitqueue_head(&bkt->lsb_marche_funebre);
}
lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
0, "lru_purged", "lru_purged");
- CFS_INIT_LIST_HEAD(&s->ls_linkage);
+ INIT_LIST_HEAD(&s->ls_linkage);
s->ls_top_dev = top;
top->ld_site = s;
lu_device_get(top);
lu_ref_add(&top->ld_reference, "site-top", s);
- CFS_INIT_LIST_HEAD(&s->ls_ld_linkage);
+ INIT_LIST_HEAD(&s->ls_ld_linkage);
spin_lock_init(&s->ls_ld_lock);
lu_dev_add_linkage(s, top);
void lu_site_fini(struct lu_site *s)
{
mutex_lock(&lu_sites_guard);
- cfs_list_del_init(&s->ls_linkage);
+ list_del_init(&s->ls_linkage);
mutex_unlock(&lu_sites_guard);
if (s->ls_obj_hash != NULL) {
mutex_lock(&lu_sites_guard);
result = lu_context_refill(&lu_shrink_env.le_ctx);
if (result == 0)
- cfs_list_add(&s->ls_linkage, &lu_sites);
+ list_add(&s->ls_linkage, &lu_sites);
mutex_unlock(&lu_sites_guard);
return result;
}
o->lo_dev = d;
lu_device_get(d);
lu_ref_add_at(&d->ld_reference, &o->lo_dev_ref, "lu_object", o);
- CFS_INIT_LIST_HEAD(&o->lo_linkage);
+ INIT_LIST_HEAD(&o->lo_linkage);
return 0;
}
{
struct lu_device *dev = o->lo_dev;
- LASSERT(cfs_list_empty(&o->lo_linkage));
+ LASSERT(list_empty(&o->lo_linkage));
if (dev != NULL) {
lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref,
*/
void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
{
- cfs_list_move(&o->lo_linkage, &h->loh_layers);
+ list_move(&o->lo_linkage, &h->loh_layers);
}
EXPORT_SYMBOL(lu_object_add_top);
*/
void lu_object_add(struct lu_object *before, struct lu_object *o)
{
- cfs_list_move(&o->lo_linkage, &before->lo_linkage);
+ list_move(&o->lo_linkage, &before->lo_linkage);
}
EXPORT_SYMBOL(lu_object_add);
{
memset(h, 0, sizeof *h);
atomic_set(&h->loh_ref, 1);
- CFS_INIT_HLIST_NODE(&h->loh_hash);
- CFS_INIT_LIST_HEAD(&h->loh_lru);
- CFS_INIT_LIST_HEAD(&h->loh_layers);
+ INIT_HLIST_NODE(&h->loh_hash);
+ INIT_LIST_HEAD(&h->loh_lru);
+ INIT_LIST_HEAD(&h->loh_layers);
lu_ref_init(&h->loh_reference);
return 0;
}
*/
void lu_object_header_fini(struct lu_object_header *h)
{
- LASSERT(cfs_list_empty(&h->loh_layers));
- LASSERT(cfs_list_empty(&h->loh_lru));
- LASSERT(cfs_hlist_unhashed(&h->loh_hash));
+ LASSERT(list_empty(&h->loh_layers));
+ LASSERT(list_empty(&h->loh_lru));
+ LASSERT(hlist_unhashed(&h->loh_hash));
lu_ref_fini(&h->loh_reference);
}
EXPORT_SYMBOL(lu_object_header_fini);
struct lu_object *lu_object_locate(struct lu_object_header *h,
const struct lu_device_type *dtype)
{
- struct lu_object *o;
+ struct lu_object *o;
- cfs_list_for_each_entry(o, &h->loh_layers, lo_linkage) {
- if (o->lo_dev->ld_type == dtype)
- return o;
- }
- return NULL;
+ list_for_each_entry(o, &h->loh_layers, lo_linkage) {
+ if (o->lo_dev->ld_type == dtype)
+ return o;
+ }
+ return NULL;
}
EXPORT_SYMBOL(lu_object_locate);
-
-
/**
* Finalize and free devices in the device stack.
*
/**
* List of remembered contexts. XXX document me.
*/
-static CFS_LIST_HEAD(lu_context_remembered);
+static struct list_head lu_context_remembered;
/**
* Destroy \a key in all remembered contexts. This is used to destroy key
* XXX memory barrier has to go here.
*/
spin_lock(&lu_keys_guard);
- cfs_list_for_each_entry(ctx, &lu_context_remembered,
- lc_remember)
+ list_for_each_entry(ctx, &lu_context_remembered,
+ lc_remember)
key_fini(ctx, key->lct_index);
spin_unlock(&lu_keys_guard);
++key_set_version;
ctx->lc_tags = tags;
if (tags & LCT_REMEMBER) {
spin_lock(&lu_keys_guard);
- cfs_list_add(&ctx->lc_remember, &lu_context_remembered);
+ list_add(&ctx->lc_remember, &lu_context_remembered);
spin_unlock(&lu_keys_guard);
} else {
- CFS_INIT_LIST_HEAD(&ctx->lc_remember);
+ INIT_LIST_HEAD(&ctx->lc_remember);
}
rc = keys_init(ctx);
ctx->lc_state = LCS_FINALIZED;
if ((ctx->lc_tags & LCT_REMEMBER) == 0) {
- LASSERT(cfs_list_empty(&ctx->lc_remember));
+ LASSERT(list_empty(&ctx->lc_remember));
keys_fini(ctx);
} else { /* could race with key degister */
spin_lock(&lu_keys_guard);
keys_fini(ctx);
- cfs_list_del_init(&ctx->lc_remember);
+ list_del_init(&ctx->lc_remember);
spin_unlock(&lu_keys_guard);
}
}
cfs_hash_for_each_bucket(hs, &bd, i) {
struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
- cfs_hlist_head_t *hhead;
+ struct hlist_head *hhead;
cfs_hash_bd_lock(hs, &bd, 1);
stats->lss_busy += bkt->lsb_busy;
}
cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
- if (!cfs_hlist_empty(hhead))
+ if (!hlist_empty(hhead))
stats->lss_populated++;
}
cfs_hash_bd_unlock(hs, &bd, 1);
CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
+ INIT_LIST_HEAD(&lu_device_types);
+ INIT_LIST_HEAD(&lu_context_remembered);
+
result = lu_ref_global_init();
if (result != 0)
return result;