while (cfs_hash_bd_count_get(&bd) > hw) {
if (unlikely(list_empty(&bkt->ntb_lru)))
break;
- cli = list_entry(bkt->ntb_lru.next,
- struct nrs_tbf_client,
- tc_lru);
+ cli = list_first_entry(&bkt->ntb_lru,
+ struct nrs_tbf_client,
+ tc_lru);
LASSERT(atomic_read(&cli->tc_ref) == 0);
cfs_hash_bd_del_locked(hs, &bd, &cli->tc_hnode);
list_move(&cli->tc_lru, &zombies);
while (cfs_hash_bd_count_get(&bd) > hw) {
if (unlikely(list_empty(&bkt->ntb_lru)))
break;
- cli = list_entry(bkt->ntb_lru.next,
- struct nrs_tbf_client,
- tc_lru);
+ cli = list_first_entry(&bkt->ntb_lru,
+ struct nrs_tbf_client,
+ tc_lru);
LASSERT(atomic_read(&cli->tc_ref) == 0);
cfs_hash_bd_del_locked(hs, &bd, &cli->tc_hnode);
list_move(&cli->tc_lru, &zombies);
cli = container_of(node, struct nrs_tbf_client, tc_node);
LASSERT(cli->tc_in_heap);
if (unlikely(peek)) {
- nrq = list_entry(cli->tc_list.next,
- struct ptlrpc_nrs_request,
- nr_u.tbf.tr_list);
+ nrq = list_first_entry(&cli->tc_list,
+ struct ptlrpc_nrs_request,
+ nr_u.tbf.tr_list);
} else {
struct nrs_tbf_rule *rule = cli->tc_rule;
__u64 now = ktime_to_ns(ktime_get());
ntoken = 1;
if (ntoken > 0) {
- nrq = list_entry(cli->tc_list.next,
+ nrq = list_first_entry(&cli->tc_list,
struct ptlrpc_nrs_request,
nr_u.tbf.tr_list);
ntoken--;