4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2012, 2017, Intel Corporation.
27 * This file is part of Lustre, http://www.lustre.org/
31 * portal & match routines
33 * Author: liang@whamcloud.com
36 #define DEBUG_SUBSYSTEM S_LNET
38 #include <lnet/lib-lnet.h>
40 /* NB: add /proc interfaces in upcoming patches */
41 int portal_rotor = LNET_PTL_ROTOR_HASH_RT;
42 module_param(portal_rotor, int, 0644);
43 MODULE_PARM_DESC(portal_rotor, "redirect PUTs to different cpu-partitions");
46 lnet_ptl_match_type(unsigned int index, struct lnet_processid *match_id,
47 __u64 mbits, __u64 ignore_bits)
49 struct lnet_portal *ptl = the_lnet.ln_portals[index];
52 unique = (ignore_bits == 0 &&
53 !LNET_NID_IS_ANY(&match_id->nid) &&
54 match_id->pid != LNET_PID_ANY);
56 LASSERT(!lnet_ptl_is_unique(ptl) || !lnet_ptl_is_wildcard(ptl));
58 /* prefer to check w/o any lock */
59 if (likely(lnet_ptl_is_unique(ptl) || lnet_ptl_is_wildcard(ptl)))
62 /* unset, new portal */
64 /* check again with lock */
65 if (unlikely(lnet_ptl_is_unique(ptl) || lnet_ptl_is_wildcard(ptl))) {
72 lnet_ptl_setopt(ptl, LNET_PTL_MATCH_UNIQUE);
74 lnet_ptl_setopt(ptl, LNET_PTL_MATCH_WILDCARD);
81 if ((lnet_ptl_is_unique(ptl) && !unique) ||
82 (lnet_ptl_is_wildcard(ptl) && unique))
88 lnet_ptl_enable_mt(struct lnet_portal *ptl, int cpt)
90 struct lnet_match_table *mtable = ptl->ptl_mtables[cpt];
93 /* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */
94 LASSERT(lnet_ptl_is_wildcard(ptl));
96 mtable->mt_enabled = 1;
98 ptl->ptl_mt_maps[ptl->ptl_mt_nmaps] = cpt;
99 for (i = ptl->ptl_mt_nmaps - 1; i >= 0; i--) {
100 LASSERT(ptl->ptl_mt_maps[i] != cpt);
101 if (ptl->ptl_mt_maps[i] < cpt)
105 ptl->ptl_mt_maps[i + 1] = ptl->ptl_mt_maps[i];
106 ptl->ptl_mt_maps[i] = cpt;
113 lnet_ptl_disable_mt(struct lnet_portal *ptl, int cpt)
115 struct lnet_match_table *mtable = ptl->ptl_mtables[cpt];
118 /* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */
119 LASSERT(lnet_ptl_is_wildcard(ptl));
121 if (LNET_CPT_NUMBER == 1)
122 return; /* never disable the only match-table */
124 mtable->mt_enabled = 0;
126 LASSERT(ptl->ptl_mt_nmaps > 0 &&
127 ptl->ptl_mt_nmaps <= LNET_CPT_NUMBER);
129 /* remove it from mt_maps */
131 for (i = 0; i < ptl->ptl_mt_nmaps; i++) {
132 if (ptl->ptl_mt_maps[i] >= cpt) /* overwrite it */
133 ptl->ptl_mt_maps[i] = ptl->ptl_mt_maps[i + 1];
138 lnet_try_match_md(struct lnet_libmd *md,
139 struct lnet_match_info *info, struct lnet_msg *msg)
141 /* ALWAYS called holding the lnet_res_lock, and can't lnet_res_unlock;
142 * lnet_match_blocked_msg() relies on this to avoid races */
144 unsigned int mlength;
145 struct lnet_me *me = md->md_me;
148 if (lnet_md_exhausted(md))
149 return LNET_MATCHMD_NONE | LNET_MATCHMD_EXHAUSTED;
151 /* mismatched MD op */
152 if ((md->md_options & info->mi_opc) == 0)
153 return LNET_MATCHMD_NONE;
155 /* mismatched ME matchbits? */
156 if (((me->me_match_bits ^ info->mi_mbits) & ~me->me_ignore_bits) != 0)
157 return LNET_MATCHMD_NONE;
159 /* mismatched ME nid/pid? */
160 if (me->me_match_bits & ~me->me_ignore_bits) {
161 /* try to accept match based on bits only */
162 if ((!LNET_NID_IS_ANY(&me->me_match_id.nid) &&
163 !nid_same(&me->me_match_id.nid, &info->mi_id.nid)) ||
164 CFS_FAIL_CHECK(CFS_FAIL_MATCH_MD_NID)) {
165 struct lnet_peer *lp_me, *lp_peer;
167 /* check if ME NID matches another NID of same peer */
168 lp_me = lnet_find_peer(&me->me_match_id.nid);
169 lp_peer = lnet_find_peer(&info->mi_id.nid);
171 if (lp_me && lp_peer && (lp_me == lp_peer)) {
172 /* Shouldn't happen, but better than dropping
173 * message entirely. Print warning so we know
174 * it happens, and something needs to be fixed.
176 CWARN("message from %s matched %llu with NID mismatch %s accepted (same peer %pK)\n",
177 libcfs_idstr(&info->mi_id),
179 libcfs_nidstr(&me->me_match_id.nid),
181 lnet_peer_decref_locked(lp_me);
182 lnet_peer_decref_locked(lp_peer);
184 CNETERR("message from %s matched %llu with NID mismatch %s rejected (different peer %pK != %pK)\n",
185 libcfs_idstr(&info->mi_id),
187 libcfs_nidstr(&me->me_match_id.nid),
190 lnet_peer_decref_locked(lp_me);
192 lnet_peer_decref_locked(lp_peer);
194 return LNET_MATCHMD_NONE;
198 if (me->me_match_id.pid != LNET_PID_ANY &&
199 me->me_match_id.pid != info->mi_id.pid) {
200 CNETERR("message from %s matched %llu with PID mismatch %s rejected\n",
201 libcfs_idstr(&info->mi_id), info->mi_mbits,
202 libcfs_idstr(&me->me_match_id));
203 return LNET_MATCHMD_NONE;
206 /* there were no bits to match, reject on nid/pid mismatch */
207 if (!LNET_NID_IS_ANY(&me->me_match_id.nid) &&
208 !nid_same(&me->me_match_id.nid, &info->mi_id.nid))
209 return LNET_MATCHMD_NONE;
211 if (me->me_match_id.pid != LNET_PID_ANY &&
212 me->me_match_id.pid != info->mi_id.pid)
213 return LNET_MATCHMD_NONE;
216 /* Hurrah! This _is_ a match; check it out... */
218 if ((md->md_options & LNET_MD_MANAGE_REMOTE) == 0)
219 offset = md->md_offset;
221 offset = info->mi_roffset;
223 if ((md->md_options & LNET_MD_MAX_SIZE) != 0) {
224 mlength = md->md_max_size;
225 LASSERT(md->md_offset + mlength <= md->md_length);
227 mlength = md->md_length - offset;
230 if (info->mi_rlength <= mlength) { /* fits in allowed space */
231 mlength = info->mi_rlength;
232 } else if ((md->md_options & LNET_MD_TRUNCATE) == 0) {
233 /* this packet _really_ is too big */
234 CERROR("Matching packet from %s, match %llu"
235 " length %d too big: %d left, %d allowed\n",
236 libcfs_idstr(&info->mi_id), info->mi_mbits,
237 info->mi_rlength, md->md_length - offset, mlength);
239 return LNET_MATCHMD_DROP;
242 /* Commit to this ME/MD */
243 CDEBUG(D_NET, "Incoming %s index %x from %s of "
244 "length %d/%d into md %#llx [%d] + %d\n",
245 (info->mi_opc == LNET_MD_OP_PUT) ? "put" : "get",
246 info->mi_portal, libcfs_idstr(&info->mi_id), mlength,
247 info->mi_rlength, md->md_lh.lh_cookie, md->md_niov, offset);
249 lnet_msg_attach_md(msg, md, offset, mlength);
250 md->md_offset = offset + mlength;
252 if (!lnet_md_exhausted(md))
253 return LNET_MATCHMD_OK;
255 /* Auto-unlink NOW, so the ME gets unlinked if required.
256 * We bumped md->md_refcount above so the MD just gets flagged
257 * for unlink when it is finalized. */
258 if ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0)
261 return LNET_MATCHMD_OK | LNET_MATCHMD_EXHAUSTED;
264 static struct lnet_match_table *
265 lnet_match2mt(struct lnet_portal *ptl, struct lnet_processid *id, __u64 mbits)
267 if (LNET_CPT_NUMBER == 1)
268 return ptl->ptl_mtables[0]; /* the only one */
270 /* if it's a unique portal, return match-table hashed by NID */
271 return lnet_ptl_is_unique(ptl) ?
272 ptl->ptl_mtables[lnet_nid2cpt(&id->nid, NULL)] : NULL;
275 struct lnet_match_table *
276 lnet_mt_of_attach(unsigned int index, struct lnet_processid *id,
277 __u64 mbits, __u64 ignore_bits, enum lnet_ins_pos pos)
279 struct lnet_portal *ptl;
280 struct lnet_match_table *mtable;
282 /* NB: called w/o lock */
283 LASSERT(index < the_lnet.ln_nportals);
285 if (!lnet_ptl_match_type(index, id, mbits, ignore_bits))
288 ptl = the_lnet.ln_portals[index];
290 mtable = lnet_match2mt(ptl, id, mbits);
291 if (mtable != NULL) /* unique portal or only one match-table */
294 /* it's a wildcard portal */
298 case LNET_INS_BEFORE:
300 /* posted by no affinity thread, always hash to specific
301 * match-table to avoid buffer stealing which is heavy */
302 return ptl->ptl_mtables[ptl->ptl_index % LNET_CPT_NUMBER];
304 /* posted by cpu-affinity thread */
305 return ptl->ptl_mtables[lnet_cpt_current()];
309 static struct lnet_match_table *
310 lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg)
312 struct lnet_match_table *mtable;
313 struct lnet_portal *ptl;
319 /* NB: called w/o lock */
320 LASSERT(info->mi_portal < the_lnet.ln_nportals);
321 ptl = the_lnet.ln_portals[info->mi_portal];
323 LASSERT(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl));
325 mtable = lnet_match2mt(ptl, &info->mi_id, info->mi_mbits);
329 /* it's a wildcard portal */
330 routed = LNET_NID_NET(&msg->msg_hdr.src_nid) !=
331 LNET_NID_NET(&msg->msg_hdr.dest_nid);
333 if (portal_rotor == LNET_PTL_ROTOR_OFF ||
334 (portal_rotor != LNET_PTL_ROTOR_ON && !routed)) {
335 cpt = lnet_cpt_current();
336 if (ptl->ptl_mtables[cpt]->mt_enabled)
337 return ptl->ptl_mtables[cpt];
340 rotor = ptl->ptl_rotor++; /* get round-robin factor */
341 if (portal_rotor == LNET_PTL_ROTOR_HASH_RT && routed)
344 cpt = rotor % LNET_CPT_NUMBER;
346 if (!ptl->ptl_mtables[cpt]->mt_enabled) {
347 /* is there any active entry for this portal? */
348 nmaps = ptl->ptl_mt_nmaps;
349 /* map to an active mtable to avoid heavy "stealing" */
351 /* NB: there is possibility that ptl_mt_maps is being
352 * changed because we are not under protection of
353 * lnet_ptl_lock, but it shouldn't hurt anything */
354 cpt = ptl->ptl_mt_maps[rotor % nmaps];
358 return ptl->ptl_mtables[cpt];
362 lnet_mt_test_exhausted(struct lnet_match_table *mtable, int pos)
367 if (!lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]))
370 if (pos < 0) { /* check all bits */
371 for (i = 0; i < LNET_MT_EXHAUSTED_BMAP; i++) {
372 if (mtable->mt_exhausted[i] != (__u64)(-1))
378 LASSERT(pos <= LNET_MT_HASH_IGNORE);
379 /* mtable::mt_mhash[pos] is marked as exhausted or not */
380 bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64];
381 pos &= (1 << LNET_MT_BITS_U64) - 1;
383 return ((*bmap) & (1ULL << pos)) != 0;
387 lnet_mt_set_exhausted(struct lnet_match_table *mtable, int pos, int exhausted)
391 LASSERT(lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]));
392 LASSERT(pos <= LNET_MT_HASH_IGNORE);
394 /* set mtable::mt_mhash[pos] as exhausted/non-exhausted */
395 bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64];
396 pos &= (1 << LNET_MT_BITS_U64) - 1;
399 *bmap &= ~(1ULL << pos);
401 *bmap |= 1ULL << pos;
405 lnet_mt_match_head(struct lnet_match_table *mtable,
406 struct lnet_processid *id, __u64 mbits)
408 struct lnet_portal *ptl = the_lnet.ln_portals[mtable->mt_portal];
410 if (lnet_ptl_is_wildcard(ptl)) {
411 return &mtable->mt_mhash[mbits & LNET_MT_HASH_MASK];
413 unsigned long hash = mbits + nidhash(&id->nid) + id->pid;
415 LASSERT(lnet_ptl_is_unique(ptl));
416 hash = cfs_hash_long(hash, LNET_MT_HASH_BITS);
417 return &mtable->mt_mhash[hash & LNET_MT_HASH_MASK];
422 lnet_mt_match_md(struct lnet_match_table *mtable,
423 struct lnet_match_info *info, struct lnet_msg *msg)
425 struct list_head *head;
431 /* any ME with ignore bits? */
432 if (!list_empty(&mtable->mt_mhash[LNET_MT_HASH_IGNORE]))
433 head = &mtable->mt_mhash[LNET_MT_HASH_IGNORE];
435 head = lnet_mt_match_head(mtable, &info->mi_id,
438 /* NB: only wildcard portal needs to return LNET_MATCHMD_EXHAUSTED */
439 if (lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]))
440 exhausted = LNET_MATCHMD_EXHAUSTED;
442 list_for_each_entry_safe(me, tmp, head, me_list) {
443 /* ME attached but MD not attached yet */
444 if (me->me_md == NULL)
447 LASSERT(me == me->me_md->md_me);
449 rc = lnet_try_match_md(me->me_md, info, msg);
450 if ((rc & LNET_MATCHMD_EXHAUSTED) == 0)
451 exhausted = 0; /* mlist is not empty */
453 if ((rc & LNET_MATCHMD_FINISH) != 0) {
454 /* don't return EXHAUSTED bit because we don't know
455 * whether the mlist is empty or not */
456 return rc & ~LNET_MATCHMD_EXHAUSTED;
460 if (exhausted == LNET_MATCHMD_EXHAUSTED) { /* @head is exhausted */
461 lnet_mt_set_exhausted(mtable, head - mtable->mt_mhash, 1);
462 if (!lnet_mt_test_exhausted(mtable, -1))
466 if (exhausted == 0 && head == &mtable->mt_mhash[LNET_MT_HASH_IGNORE]) {
467 head = lnet_mt_match_head(mtable, &info->mi_id,
469 goto again; /* re-check MEs w/o ignore-bits */
472 if (info->mi_opc == LNET_MD_OP_GET ||
473 !lnet_ptl_is_lazy(the_lnet.ln_portals[info->mi_portal]))
474 return LNET_MATCHMD_DROP | exhausted;
476 return LNET_MATCHMD_NONE | exhausted;
480 lnet_ptl_match_early(struct lnet_portal *ptl, struct lnet_msg *msg)
484 /* message arrived before any buffer posting on this portal,
485 * simply delay or drop this message */
486 if (likely(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl)))
490 /* check it again with hold of lock */
491 if (lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl)) {
492 lnet_ptl_unlock(ptl);
496 if (lnet_ptl_is_lazy(ptl)) {
497 if (msg->msg_rx_ready_delay) {
498 msg->msg_rx_delayed = 1;
499 list_add_tail(&msg->msg_list,
500 &ptl->ptl_msg_delayed);
502 rc = LNET_MATCHMD_NONE;
504 rc = LNET_MATCHMD_DROP;
507 lnet_ptl_unlock(ptl);
512 lnet_ptl_match_delay(struct lnet_portal *ptl,
513 struct lnet_match_info *info, struct lnet_msg *msg)
515 int first = ptl->ptl_mt_maps[0]; /* read w/o lock */
520 * Steal buffer from other CPTs, and delay msg if nothing to
521 * steal. This function is more expensive than a regular
522 * match, but we don't expect it can happen a lot. The return
523 * code contains one of LNET_MATCHMD_OK, LNET_MATCHMD_DROP, or
526 LASSERT(lnet_ptl_is_wildcard(ptl));
528 for (i = 0; i < LNET_CPT_NUMBER; i++) {
529 struct lnet_match_table *mtable;
532 cpt = (first + i) % LNET_CPT_NUMBER;
533 mtable = ptl->ptl_mtables[cpt];
534 if (i != 0 && i != LNET_CPT_NUMBER - 1 && !mtable->mt_enabled)
541 /* The first try, add to stealing list. */
542 list_add_tail(&msg->msg_list,
543 &ptl->ptl_msg_stealing);
546 if (!list_empty(&msg->msg_list)) {
547 /* On stealing list. */
548 rc = lnet_mt_match_md(mtable, info, msg);
550 if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 &&
552 lnet_ptl_disable_mt(ptl, cpt);
554 if ((rc & LNET_MATCHMD_FINISH) != 0) {
555 /* Match found, remove from stealing list. */
556 list_del_init(&msg->msg_list);
557 } else if (i == LNET_CPT_NUMBER - 1 || /* (1) */
558 ptl->ptl_mt_nmaps == 0 || /* (2) */
559 (ptl->ptl_mt_nmaps == 1 && /* (3) */
560 ptl->ptl_mt_maps[0] == cpt)) {
562 * No match found, and this is either
563 * (1) the last cpt to check, or
564 * (2) there is no active cpt, or
565 * (3) this is the only active cpt.
566 * There is nothing to steal: delay or
569 list_del_init(&msg->msg_list);
571 if (lnet_ptl_is_lazy(ptl)) {
572 msg->msg_rx_delayed = 1;
573 list_add_tail(&msg->msg_list,
574 &ptl->ptl_msg_delayed);
575 rc = LNET_MATCHMD_NONE;
577 rc = LNET_MATCHMD_DROP;
580 /* Do another iteration. */
585 * No longer on stealing list: another thread
586 * matched the message in lnet_ptl_attach_md().
587 * We are now expected to handle the message.
589 rc = msg->msg_md == NULL ?
590 LNET_MATCHMD_DROP : LNET_MATCHMD_OK;
593 lnet_ptl_unlock(ptl);
594 lnet_res_unlock(cpt);
597 * Note that test (1) above ensures that we always
598 * exit the loop through this break statement.
600 * LNET_MATCHMD_NONE means msg was added to the
601 * delayed queue, and we may no longer reference it
602 * after lnet_ptl_unlock() and lnet_res_unlock().
604 if (rc & (LNET_MATCHMD_FINISH | LNET_MATCHMD_NONE))
612 lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg)
614 struct lnet_match_table *mtable;
615 struct lnet_portal *ptl;
619 "Request from %s of length %d into portal %d MB=%#llx\n",
620 libcfs_idstr(&info->mi_id),
621 info->mi_rlength, info->mi_portal, info->mi_mbits);
623 if (info->mi_portal >= the_lnet.ln_nportals) {
624 CERROR("Invalid portal %d not in [0-%d]\n",
625 info->mi_portal, the_lnet.ln_nportals);
626 return LNET_MATCHMD_DROP;
629 ptl = the_lnet.ln_portals[info->mi_portal];
630 rc = lnet_ptl_match_early(ptl, msg);
631 if (rc != 0) /* matched or delayed early message */
634 mtable = lnet_mt_of_match(info, msg);
635 lnet_res_lock(mtable->mt_cpt);
637 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
638 rc = LNET_MATCHMD_DROP;
642 rc = lnet_mt_match_md(mtable, info, msg);
643 if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 && mtable->mt_enabled) {
645 lnet_ptl_disable_mt(ptl, mtable->mt_cpt);
646 lnet_ptl_unlock(ptl);
649 if ((rc & LNET_MATCHMD_FINISH) != 0) /* matched or dropping */
652 if (!msg->msg_rx_ready_delay)
655 LASSERT(lnet_ptl_is_lazy(ptl));
656 LASSERT(!msg->msg_rx_delayed);
658 /* NB: we don't expect "delay" can happen a lot */
659 if (lnet_ptl_is_unique(ptl) || LNET_CPT_NUMBER == 1) {
662 msg->msg_rx_delayed = 1;
663 list_add_tail(&msg->msg_list, &ptl->ptl_msg_delayed);
665 lnet_ptl_unlock(ptl);
666 lnet_res_unlock(mtable->mt_cpt);
667 rc = LNET_MATCHMD_NONE;
669 lnet_res_unlock(mtable->mt_cpt);
670 rc = lnet_ptl_match_delay(ptl, info, msg);
673 /* LNET_MATCHMD_NONE means msg was added to the delay queue */
674 if (rc & LNET_MATCHMD_NONE) {
676 "Delaying %s from %s ptl %d MB %#llx off %d len %d\n",
677 info->mi_opc == LNET_MD_OP_PUT ? "PUT" : "GET",
678 libcfs_idstr(&info->mi_id), info->mi_portal,
679 info->mi_mbits, info->mi_roffset, info->mi_rlength);
683 lnet_res_unlock(mtable->mt_cpt);
685 /* EXHAUSTED bit is only meaningful for internal functions */
686 return rc & ~LNET_MATCHMD_EXHAUSTED;
690 lnet_ptl_detach_md(struct lnet_me *me, struct lnet_libmd *md)
692 LASSERT(me->me_md == md && md->md_me == me);
698 /* called with lnet_res_lock held */
700 lnet_ptl_attach_md(struct lnet_me *me, struct lnet_libmd *md,
701 struct list_head *matches, struct list_head *drops)
703 struct lnet_portal *ptl = the_lnet.ln_portals[me->me_portal];
704 struct lnet_match_table *mtable;
705 struct list_head *head;
706 struct lnet_msg *tmp;
707 struct lnet_msg *msg;
711 LASSERT(md->md_refcount == 0); /* a brand new MD */
716 cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
717 mtable = ptl->ptl_mtables[cpt];
719 if (list_empty(&ptl->ptl_msg_stealing) &&
720 list_empty(&ptl->ptl_msg_delayed) &&
721 !lnet_mt_test_exhausted(mtable, me->me_pos))
725 head = &ptl->ptl_msg_stealing;
727 list_for_each_entry_safe(msg, tmp, head, msg_list) {
728 struct lnet_match_info info;
729 struct lnet_hdr *hdr;
732 LASSERT(msg->msg_rx_delayed || head == &ptl->ptl_msg_stealing);
735 /* Multi-Rail: Primary peer NID */
736 info.mi_id.nid = msg->msg_initiator;
737 info.mi_id.pid = hdr->src_pid;
738 info.mi_opc = LNET_MD_OP_PUT;
739 info.mi_portal = hdr->msg.put.ptl_index;
740 info.mi_rlength = hdr->payload_length;
741 info.mi_roffset = hdr->msg.put.offset;
742 info.mi_mbits = hdr->msg.put.match_bits;
744 rc = lnet_try_match_md(md, &info, msg);
746 exhausted = (rc & LNET_MATCHMD_EXHAUSTED) != 0;
747 if ((rc & LNET_MATCHMD_NONE) != 0) {
753 /* Hurrah! This _is_ a match */
754 LASSERT((rc & LNET_MATCHMD_FINISH) != 0);
755 list_del_init(&msg->msg_list);
757 if (head == &ptl->ptl_msg_stealing) {
760 /* stealing thread will handle the message */
764 if ((rc & LNET_MATCHMD_OK) != 0) {
765 list_add_tail(&msg->msg_list, matches);
767 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
768 "match %llu offset %d length %d.\n",
769 libcfs_idstr(&info.mi_id),
770 info.mi_portal, info.mi_mbits,
771 info.mi_roffset, info.mi_rlength);
773 list_add_tail(&msg->msg_list, drops);
780 if (!exhausted && head == &ptl->ptl_msg_stealing) {
781 head = &ptl->ptl_msg_delayed;
785 if (lnet_ptl_is_wildcard(ptl) && !exhausted) {
786 lnet_mt_set_exhausted(mtable, me->me_pos, 0);
787 if (!mtable->mt_enabled)
788 lnet_ptl_enable_mt(ptl, cpt);
791 lnet_ptl_unlock(ptl);
795 lnet_ptl_cleanup(struct lnet_portal *ptl)
797 struct lnet_match_table *mtable;
800 if (ptl->ptl_mtables == NULL) /* uninitialized portal */
803 LASSERT(list_empty(&ptl->ptl_msg_delayed));
804 LASSERT(list_empty(&ptl->ptl_msg_stealing));
805 cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) {
806 struct list_head *mhash;
810 if (mtable->mt_mhash == NULL) /* uninitialized match-table */
813 mhash = mtable->mt_mhash;
815 for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++) {
816 while ((me = list_first_entry_or_null(&mhash[j],
819 CERROR("Active ME %p on exit\n", me);
820 list_del(&me->me_list);
821 LIBCFS_FREE_PRE(me, sizeof(*me), "slab-freed");
822 kmem_cache_free(lnet_mes_cachep, me);
825 /* the extra entry is for MEs with ignore bits */
826 CFS_FREE_PTR_ARRAY(mhash, LNET_MT_HASH_SIZE + 1);
829 cfs_percpt_free(ptl->ptl_mtables);
830 ptl->ptl_mtables = NULL;
834 lnet_ptl_setup(struct lnet_portal *ptl, int index)
836 struct lnet_match_table *mtable;
837 struct list_head *mhash;
841 ptl->ptl_mtables = cfs_percpt_alloc(lnet_cpt_table(),
842 sizeof(struct lnet_match_table));
843 if (ptl->ptl_mtables == NULL) {
844 CERROR("Failed to create match table for portal %d\n", index);
848 ptl->ptl_index = index;
849 INIT_LIST_HEAD(&ptl->ptl_msg_delayed);
850 INIT_LIST_HEAD(&ptl->ptl_msg_stealing);
851 spin_lock_init(&ptl->ptl_lock);
852 cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) {
853 /* the extra entry is for MEs with ignore bits */
854 LIBCFS_CPT_ALLOC(mhash, lnet_cpt_table(), i,
855 sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1));
857 CERROR("Failed to create match hash for portal %d\n",
862 memset(&mtable->mt_exhausted[0], -1,
863 sizeof(mtable->mt_exhausted[0]) *
864 LNET_MT_EXHAUSTED_BMAP);
865 mtable->mt_mhash = mhash;
866 for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++)
867 INIT_LIST_HEAD(&mhash[j]);
869 mtable->mt_portal = index;
875 lnet_ptl_cleanup(ptl);
879 #define PORTAL_SIZE (offsetof(struct lnet_portal, ptl_mt_maps[LNET_CPT_NUMBER]))
881 lnet_portals_destroy(void)
885 if (the_lnet.ln_portals == NULL)
888 for (i = 0; i < the_lnet.ln_nportals; i++)
889 if (the_lnet.ln_portals[i]) {
890 lnet_ptl_cleanup(the_lnet.ln_portals[i]);
891 LIBCFS_FREE(the_lnet.ln_portals[i], PORTAL_SIZE);
894 CFS_FREE_PTR_ARRAY(the_lnet.ln_portals, the_lnet.ln_nportals);
895 the_lnet.ln_portals = NULL;
899 lnet_portals_create(void)
903 the_lnet.ln_nportals = MAX_PORTALS;
904 CFS_ALLOC_PTR_ARRAY(the_lnet.ln_portals, the_lnet.ln_nportals);
905 if (the_lnet.ln_portals == NULL) {
906 CERROR("Failed to allocate portals table\n");
910 for (i = 0; i < the_lnet.ln_nportals; i++) {
911 LIBCFS_ALLOC(the_lnet.ln_portals[i], PORTAL_SIZE);
912 if (!the_lnet.ln_portals[i] ||
913 lnet_ptl_setup(the_lnet.ln_portals[i], i)) {
914 lnet_portals_destroy();
923 * Turn on the lazy portal attribute. Use with caution!
925 * This portal attribute only affects incoming PUT requests to the portal,
926 * and is off by default. By default, if there's no matching MD for an
927 * incoming PUT request, it is simply dropped. With the lazy attribute on,
928 * such requests are queued indefinitely until either a matching MD is
929 * posted to the portal or the lazy attribute is turned off.
931 * It would prevent dropped requests, however it should be regarded as the
932 * last line of defense - i.e. users must keep a close watch on active
933 * buffers on a lazy portal and once it becomes too low post more buffers as
934 * soon as possible. This is because delayed requests usually have detrimental
935 * effects on underlying network connections. A few delayed requests often
936 * suffice to bring an underlying connection to a complete halt, due to flow
937 * control mechanisms.
939 * There's also a DOS attack risk. If users don't post match-all MDs on a
940 * lazy portal, a malicious peer can easily stop a service by sending some
941 * PUT requests with match bits that won't match any MD. A routed server is
942 * especially vulnerable since the connections to its neighbor routers are
943 * shared among all clients.
945 * \param portal Index of the portal to enable the lazy attribute on.
947 * \retval 0 On success.
948 * \retval -EINVAL If \a portal is not a valid index.
951 LNetSetLazyPortal(int portal)
953 struct lnet_portal *ptl;
955 if (portal < 0 || portal >= the_lnet.ln_nportals)
958 CDEBUG(D_NET, "Setting portal %d lazy\n", portal);
959 ptl = the_lnet.ln_portals[portal];
961 lnet_res_lock(LNET_LOCK_EX);
964 lnet_ptl_setopt(ptl, LNET_PTL_LAZY);
966 lnet_ptl_unlock(ptl);
967 lnet_res_unlock(LNET_LOCK_EX);
971 EXPORT_SYMBOL(LNetSetLazyPortal);
974 lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason)
976 struct lnet_portal *ptl;
979 if (portal < 0 || portal >= the_lnet.ln_nportals)
982 ptl = the_lnet.ln_portals[portal];
984 lnet_res_lock(LNET_LOCK_EX);
987 if (!lnet_ptl_is_lazy(ptl)) {
988 lnet_ptl_unlock(ptl);
989 lnet_res_unlock(LNET_LOCK_EX);
994 struct lnet_msg *msg, *tmp;
996 /* grab all messages which are on the NI passed in */
997 list_for_each_entry_safe(msg, tmp, &ptl->ptl_msg_delayed,
999 if (msg->msg_txni == ni || msg->msg_rxni == ni)
1000 list_move(&msg->msg_list, &zombies);
1003 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1004 CWARN("Active lazy portal %d on exit\n", portal);
1006 CDEBUG(D_NET, "clearing portal %d lazy\n", portal);
1008 /* grab all the blocked messages atomically */
1009 list_splice_init(&ptl->ptl_msg_delayed, &zombies);
1011 lnet_ptl_unsetopt(ptl, LNET_PTL_LAZY);
1014 lnet_ptl_unlock(ptl);
1015 lnet_res_unlock(LNET_LOCK_EX);
1017 lnet_drop_delayed_msg_list(&zombies, reason);
1023 * Turn off the lazy portal attribute. Delayed requests on the portal,
1024 * if any, will be all dropped when this function returns.
1026 * \param portal Index of the portal to disable the lazy attribute on.
1028 * \retval 0 On success.
1029 * \retval -EINVAL If \a portal is not a valid index.
1032 LNetClearLazyPortal(int portal)
1034 return lnet_clear_lazy_portal(NULL, portal,
1035 "Clearing lazy portal attr");
1037 EXPORT_SYMBOL(LNetClearLazyPortal);