struct lnet_test_peer *tp;
struct list_head *el;
struct list_head *next;
- struct list_head cull;
+ LIST_HEAD(cull);
/* NB: use lnet_net_lock(0) to serialize operations on test peers */
if (threshold != 0) {
return 0;
}
- /* removing entries */
- INIT_LIST_HEAD(&cull);
-
lnet_net_lock(0);
list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
struct lnet_test_peer *tp;
struct list_head *el;
struct list_head *next;
- struct list_head cull;
- int fail = 0;
-
- INIT_LIST_HEAD(&cull);
+ LIST_HEAD(cull);
+ int fail = 0;
/* NB: use lnet_net_lock(0) to serialize operations on test peers */
lnet_net_lock(0);
unsigned int nob)
{
/* NB diov, siov are READ-ONLY */
- unsigned int this_nob;
+ unsigned int this_nob;
if (nob == 0)
return;
do {
LASSERT(ndiov > 0);
LASSERT(nsiov > 0);
- this_nob = MIN(diov->iov_len - doffset,
- siov->iov_len - soffset);
- this_nob = MIN(this_nob, nob);
+ this_nob = min3((unsigned int)diov->iov_len - doffset,
+ (unsigned int)siov->iov_len - soffset,
+ nob);
memcpy((char *)diov->iov_base + doffset,
(char *)siov->iov_base + soffset, this_nob);
do {
LASSERT(ndiov > 0);
LASSERT(nsiov > 0);
- this_nob = MIN(diov->kiov_len - doffset,
- siov->kiov_len - soffset);
- this_nob = MIN(this_nob, nob);
+ this_nob = min3(diov->kiov_len - doffset,
+ siov->kiov_len - soffset,
+ nob);
if (daddr == NULL)
daddr = ((char *)kmap(diov->kiov_page)) +
do {
LASSERT(niov > 0);
LASSERT(nkiov > 0);
- this_nob = MIN(iov->iov_len - iovoffset,
- kiov->kiov_len - kiovoffset);
- this_nob = MIN(this_nob, nob);
+ this_nob = min3((unsigned int)iov->iov_len - iovoffset,
+ (unsigned int)kiov->kiov_len - kiovoffset,
+ nob);
if (addr == NULL)
addr = ((char *)kmap(kiov->kiov_page)) +
do {
LASSERT(nkiov > 0);
LASSERT(niov > 0);
- this_nob = MIN(kiov->kiov_len - kiovoffset,
- iov->iov_len - iovoffset);
- this_nob = MIN(this_nob, nob);
+ this_nob = min3((unsigned int)kiov->kiov_len - kiovoffset,
+ (unsigned int)iov->iov_len - iovoffset,
+ nob);
if (addr == NULL)
addr = ((char *)kmap(kiov->kiov_page)) +
/* drop all messages which are queued to be routed on that
* peer. */
if (!the_lnet.ln_routing) {
- struct list_head drop;
- INIT_LIST_HEAD(&drop);
+ LIST_HEAD(drop);
list_splice_init(&lp->lp_rtrq, &drop);
spin_unlock(&lp->lp_lock);
spin_unlock(&rxpeerni->lpni_lock);
}
static inline void
-lnet_set_non_mr_pref_nid(struct lnet_send_data *sd)
+lnet_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, struct lnet_ni *lni,
+ struct lnet_msg *msg)
{
- if (sd->sd_send_case & NMR_DST &&
- sd->sd_msg->msg_type != LNET_MSG_REPLY &&
- sd->sd_msg->msg_type != LNET_MSG_ACK &&
- sd->sd_best_lpni->lpni_pref_nnids == 0) {
+ if (msg->msg_type != LNET_MSG_REPLY && msg->msg_type != LNET_MSG_ACK &&
+ lpni->lpni_pref_nnids == 0) {
CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n",
- libcfs_nid2str(sd->sd_best_ni->ni_nid),
- libcfs_nid2str(sd->sd_best_lpni->lpni_nid));
- lnet_peer_ni_set_non_mr_pref_nid(sd->sd_best_lpni,
- sd->sd_best_ni->ni_nid);
+ libcfs_nid2str(lni->ni_nid),
+ libcfs_nid2str(lpni->lpni_nid));
+ lnet_peer_ni_set_non_mr_pref_nid(lpni, lni->ni_nid);
}
}
return -EINVAL;
}
- /*
- * the preferred NID will only be set for NMR peers
- */
- lnet_set_non_mr_pref_nid(sd);
+ lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
return lnet_handle_send(sd);
}
if (sd->sd_send_case & NMR_DST)
/*
- * since the final destination is non-MR let's set its preferred
- * NID before we send
- */
- lnet_set_non_mr_pref_nid(sd);
+ * since the final destination is non-MR let's set its preferred
+ * NID before we send
+ */
+ lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni,
+ sd->sd_msg);
/*
* We're going to send to the gw found so let's set its
sd->sd_best_ni = best_ni;
/* Set preferred NI if necessary. */
- lnet_set_non_mr_pref_nid(sd);
+ lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
return 0;
}
* set the best_ni we've chosen as the preferred one for
* this peer
*/
- lnet_set_non_mr_pref_nid(sd);
+ lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
/* we'll be sending to the gw */
sd->sd_best_lpni = gw_lpni;
lnet_finalize_expired_responses(void)
{
struct lnet_libmd *md;
- struct list_head local_queue;
struct lnet_rsp_tracker *rspt, *tmp;
ktime_t now;
int i;
return;
cfs_cpt_for_each(i, lnet_cpt_table()) {
- INIT_LIST_HEAD(&local_queue);
+ LIST_HEAD(local_queue);
lnet_net_lock(i);
if (!the_lnet.ln_mt_rstq[i]) {
lnet_recover_local_nis(void)
{
struct lnet_mt_event_info *ev_info;
- struct list_head processed_list;
- struct list_head local_queue;
+ LIST_HEAD(processed_list);
+ LIST_HEAD(local_queue);
struct lnet_handle_md mdh;
struct lnet_ni *tmp;
struct lnet_ni *ni;
int healthv;
int rc;
- INIT_LIST_HEAD(&local_queue);
- INIT_LIST_HEAD(&processed_list);
-
/*
* splice the recovery queue on a local queue. We will iterate
* through the local queue and update it as needed. Once we're
lnet_clean_resendqs(void)
{
struct lnet_msg *msg, *tmp;
- struct list_head msgs;
+ LIST_HEAD(msgs);
int i;
- INIT_LIST_HEAD(&msgs);
-
cfs_cpt_for_each(i, lnet_cpt_table()) {
lnet_net_lock(i);
list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
lnet_recover_peer_nis(void)
{
struct lnet_mt_event_info *ev_info;
- struct list_head processed_list;
- struct list_head local_queue;
+ LIST_HEAD(processed_list);
+ LIST_HEAD(local_queue);
struct lnet_handle_md mdh;
struct lnet_peer_ni *lpni;
struct lnet_peer_ni *tmp;
int healthv;
int rc;
- INIT_LIST_HEAD(&local_queue);
- INIT_LIST_HEAD(&processed_list);
-
/*
* Always use cpt 0 for locking across all interactions with
* ln_mt_peerNIRecovq
struct lnet_hdr *hdr = &msg->msg_hdr;
struct lnet_process_id src = {0};
struct lnet_libmd *md;
- int rlength;
- int mlength;
+ unsigned int rlength;
+ unsigned int mlength;
int cpt;
cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
LASSERT(md->md_offset == 0);
rlength = hdr->payload_length;
- mlength = MIN(rlength, (int)md->md_length);
+ mlength = min(rlength, md->md_length);
if (mlength < rlength &&
(md->md_options & LNET_MD_TRUNCATE) == 0) {