There are several places in lustre where "list_del" (or occasionally
"list_del_init") is followed by "list_add" or "list_add_tail" which
moves the object to a different list.
These can be combined into "list_move" or "list_move_tail".
Test-Parameters: trivial testlist=sanity-lnet
Change-Id: I481de128ea40928186f78a0a0cc26e89b43f1645
Signed-off-by: NeilBrown <neilb@suse.com>
Reviewed-on: https://review.whamcloud.com/36339
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Reviewed-by: Shaun Tancheff <stancheff@cray.com>
Reviewed-by: Petros Koutoupis <pkoutoupis@cray.com>
Reviewed-by: Amir Shehata <ashehata@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
fpo_list);
fpo->fpo_failed = 1;
fpo_list);
fpo->fpo_failed = 1;
- list_del(&fpo->fpo_list);
if (fpo->fpo_map_count == 0)
if (fpo->fpo_map_count == 0)
- list_add(&fpo->fpo_list, zombies);
+ list_move(&fpo->fpo_list, zombies);
- list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
+ list_move(&fpo->fpo_list, &fps->fps_failed_pool_list);
}
spin_unlock(&fps->fps_lock);
}
spin_unlock(&fps->fps_lock);
struct kib_pool, po_list);
po->po_failed = 1;
struct kib_pool, po_list);
po->po_failed = 1;
- list_del(&po->po_list);
if (po->po_allocated == 0)
if (po->po_allocated == 0)
- list_add(&po->po_list, zombies);
+ list_move(&po->po_list, zombies);
- list_add(&po->po_list, &ps->ps_failed_pool_list);
+ list_move(&po->po_list, &ps->ps_failed_pool_list);
}
spin_unlock(&ps->ps_lock);
}
}
spin_unlock(&ps->ps_lock);
}
!list_empty(&conn->ibc_tx_queue_rsrvd)) {
tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
struct kib_tx, tx_list);
!list_empty(&conn->ibc_tx_queue_rsrvd)) {
tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
struct kib_tx, tx_list);
- list_del(&tx->tx_list);
- list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
+ list_move_tail(&tx->tx_list, &conn->ibc_tx_queue);
conn->ibc_reserved_credits--;
}
conn->ibc_reserved_credits--;
}
*/
if (tx->tx_sending == 0) {
tx->tx_queued = 0;
*/
if (tx->tx_sending == 0) {
tx->tx_queued = 0;
- list_del(&tx->tx_list);
- list_add(&tx->tx_list, &zombies);
+ list_move(&tx->tx_list, &zombies);
tx->tx_msg.ksm_zc_cookies[0] = 0;
tx->tx_zc_aborted = 1; /* mark it as not-acked */
tx->tx_msg.ksm_zc_cookies[0] = 0;
tx->tx_zc_aborted = 1; /* mark it as not-acked */
- list_del(&tx->tx_zc_list);
- list_add(&tx->tx_zc_list, &zlist);
+ list_move(&tx->tx_zc_list, &zlist);
}
spin_unlock(&peer_ni->ksnp_lock);
}
spin_unlock(&peer_ni->ksnp_lock);
tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT;
tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT;
- list_del(&tx->tx_list);
- list_add_tail(&tx->tx_list, &stale_txs);
+ list_move_tail(&tx->tx_list, &stale_txs);
}
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
}
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) {
tx->tx_msg.ksm_zc_cookies[0] = 0;
if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) {
tx->tx_msg.ksm_zc_cookies[0] = 0;
- list_del(&tx->tx_zc_list);
- list_add(&tx->tx_zc_list, &zlist);
+ list_move(&tx->tx_zc_list, &zlist);
list_for_each_safe(t, t2, ¤t_nets) {
tb = list_entry(t, struct lnet_text_buf, ltb_list);
list_for_each_safe(t, t2, ¤t_nets) {
tb = list_entry(t, struct lnet_text_buf, ltb_list);
- list_del(&tb->ltb_list);
- list_add_tail(&tb->ltb_list, &matched_nets);
+ list_move_tail(&tb->ltb_list, &matched_nets);
len += snprintf(networks + len, sizeof(networks) - len,
"%s%s", (len == 0) ? "" : ",",
len += snprintf(networks + len, sizeof(networks) - len,
"%s%s", (len == 0) ? "" : ",",
if (tp->tp_threshold == 0 || /* needs culling anyway */
nid == LNET_NID_ANY || /* removing all entries */
tp->tp_nid == nid) { /* matched this one */
if (tp->tp_threshold == 0 || /* needs culling anyway */
nid == LNET_NID_ANY || /* removing all entries */
tp->tp_nid == nid) { /* matched this one */
- list_del(&tp->tp_list);
- list_add(&tp->tp_list, &cull);
+ list_move(&tp->tp_list, &cull);
/* only cull zombies on outgoing tests,
* since we may be at interrupt priority on
* incoming messages. */
/* only cull zombies on outgoing tests,
* since we may be at interrupt priority on
* incoming messages. */
- list_del(&tp->tp_list);
- list_add(&tp->tp_list, &cull);
+ list_move(&tp->tp_list, &cull);
if (outgoing &&
tp->tp_threshold == 0) {
/* see above */
if (outgoing &&
tp->tp_threshold == 0) {
/* see above */
- list_del(&tp->tp_list);
- list_add(&tp->tp_list, &cull);
+ list_move(&tp->tp_list, &cull);
unsigned int idx = LNET_NIDADDR(ndl->ndl_node->nd_id.nid) %
LST_NODE_HASHSIZE;
unsigned int idx = LNET_NIDADDR(ndl->ndl_node->nd_id.nid) %
LST_NODE_HASHSIZE;
- list_del(&ndl->ndl_hlink);
- list_del(&ndl->ndl_link);
- list_add_tail(&ndl->ndl_hlink, &new->grp_ndl_hash[idx]);
- list_add_tail(&ndl->ndl_link, &new->grp_ndl_list);
+ list_move_tail(&ndl->ndl_hlink, &new->grp_ndl_hash[idx]);
+ list_move_tail(&ndl->ndl_link, &new->grp_ndl_list);