cfs_hlist_node_t *cfs_hash_bd_lookup_locked(cfs_hash_t *hs,
cfs_hash_bd_t *bd, const void *key);
+cfs_hlist_node_t *cfs_hash_bd_peek_locked(cfs_hash_t *hs,
+ cfs_hash_bd_t *bd, const void *key);
cfs_hlist_node_t *cfs_hash_bd_findadd_locked(cfs_hash_t *hs,
cfs_hash_bd_t *bd, const void *key,
cfs_hlist_node_t *hnode,
CFS_EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
cfs_hlist_node_t *
+cfs_hash_bd_peek_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
+{
+ return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
+ CFS_HS_LOOKUP_IT_PEEK);
+}
+CFS_EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
+
+cfs_hlist_node_t *
cfs_hash_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
const void *key, cfs_hlist_node_t *hnode,
int noref)
*version = ver;
bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
- /* cfs_hash_bd_lookup_intent is a somehow "internal" function
- * of cfs_hash, but we don't want refcount on object right now */
- hnode = cfs_hash_bd_lookup_locked(s->ls_obj_hash, bd, (void *)f);
+ /* cfs_hash_bd_peek_locked is a somehow "internal" function
+ * of cfs_hash, it doesn't add refcount on object. */
+ hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f);
if (hnode == NULL) {
lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
return NULL;
h = container_of0(hnode, struct lu_object_header, loh_hash);
if (likely(!lu_object_is_dying(h))) {
+ cfs_hash_get(s->ls_obj_hash, hnode);
lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
cfs_list_del_init(&h->loh_lru);
return lu_object_top(h);
* returned (to assure that references to dying objects are eventually
* drained), and moreover, lookup has to wait until object is freed.
*/
- cfs_atomic_dec(&h->loh_ref);
cfs_waitlink_init(waiter);
cfs_waitq_add(&bkt->lsb_marche_funebre, waiter);