1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2012, 2017, Intel Corporation. */
5 /* This file is part of Lustre, http://www.lustre.org/
7 * portal & match routines
9 * Author: liang@whamcloud.com
12 #define DEBUG_SUBSYSTEM S_LNET
14 #include <lnet/lib-lnet.h>
16 /* NB: add /proc interfaces in upcoming patches */
17 int portal_rotor = LNET_PTL_ROTOR_HASH_RT;
18 module_param(portal_rotor, int, 0644);
19 MODULE_PARM_DESC(portal_rotor, "redirect PUTs to different cpu-partitions");
22 lnet_ptl_match_type(unsigned int index, struct lnet_processid *match_id,
23 __u64 mbits, __u64 ignore_bits)
25 struct lnet_portal *ptl = the_lnet.ln_portals[index];
28 unique = (ignore_bits == 0 &&
29 !LNET_NID_IS_ANY(&match_id->nid) &&
30 match_id->pid != LNET_PID_ANY);
32 LASSERT(!lnet_ptl_is_unique(ptl) || !lnet_ptl_is_wildcard(ptl));
34 /* prefer to check w/o any lock */
35 if (likely(lnet_ptl_is_unique(ptl) || lnet_ptl_is_wildcard(ptl)))
38 /* unset, new portal */
40 /* check again with lock */
41 if (unlikely(lnet_ptl_is_unique(ptl) || lnet_ptl_is_wildcard(ptl))) {
48 lnet_ptl_setopt(ptl, LNET_PTL_MATCH_UNIQUE);
50 lnet_ptl_setopt(ptl, LNET_PTL_MATCH_WILDCARD);
57 if ((lnet_ptl_is_unique(ptl) && !unique) ||
58 (lnet_ptl_is_wildcard(ptl) && unique))
64 lnet_ptl_enable_mt(struct lnet_portal *ptl, int cpt)
66 struct lnet_match_table *mtable = ptl->ptl_mtables[cpt];
69 /* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */
70 LASSERT(lnet_ptl_is_wildcard(ptl));
72 mtable->mt_enabled = 1;
74 ptl->ptl_mt_maps[ptl->ptl_mt_nmaps] = cpt;
75 for (i = ptl->ptl_mt_nmaps - 1; i >= 0; i--) {
76 LASSERT(ptl->ptl_mt_maps[i] != cpt);
77 if (ptl->ptl_mt_maps[i] < cpt)
81 ptl->ptl_mt_maps[i + 1] = ptl->ptl_mt_maps[i];
82 ptl->ptl_mt_maps[i] = cpt;
89 lnet_ptl_disable_mt(struct lnet_portal *ptl, int cpt)
91 struct lnet_match_table *mtable = ptl->ptl_mtables[cpt];
94 /* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */
95 LASSERT(lnet_ptl_is_wildcard(ptl));
97 if (LNET_CPT_NUMBER == 1)
98 return; /* never disable the only match-table */
100 mtable->mt_enabled = 0;
102 LASSERT(ptl->ptl_mt_nmaps > 0 &&
103 ptl->ptl_mt_nmaps <= LNET_CPT_NUMBER);
105 /* remove it from mt_maps */
107 for (i = 0; i < ptl->ptl_mt_nmaps; i++) {
108 if (ptl->ptl_mt_maps[i] >= cpt) /* overwrite it */
109 ptl->ptl_mt_maps[i] = ptl->ptl_mt_maps[i + 1];
114 lnet_try_match_md(struct lnet_libmd *md,
115 struct lnet_match_info *info, struct lnet_msg *msg)
117 /* ALWAYS called holding the lnet_res_lock, and can't lnet_res_unlock;
118 * lnet_match_blocked_msg() relies on this to avoid races */
120 unsigned int mlength;
121 struct lnet_me *me = md->md_me;
124 if (lnet_md_exhausted(md))
125 return LNET_MATCHMD_NONE | LNET_MATCHMD_EXHAUSTED;
127 /* mismatched MD op */
128 if ((md->md_options & info->mi_opc) == 0)
129 return LNET_MATCHMD_NONE;
131 /* mismatched ME matchbits? */
132 if (((me->me_match_bits ^ info->mi_mbits) & ~me->me_ignore_bits) != 0)
133 return LNET_MATCHMD_NONE;
135 /* mismatched PID? */
136 if (me->me_match_id.pid != LNET_PID_ANY &&
137 me->me_match_id.pid != info->mi_id.pid)
138 return LNET_MATCHMD_NONE;
140 /* try to accept match based on bits only */
141 if ((!LNET_NID_IS_ANY(&me->me_match_id.nid) &&
142 !nid_same(&me->me_match_id.nid, &info->mi_id.nid)) ||
143 (!LNET_NID_IS_ANY(&me->me_match_id.nid) &&
144 CFS_FAIL_CHECK(CFS_FAIL_MATCH_MD_NID))) {
145 struct lnet_peer *lp_me, *lp_peer;
147 /* check if ME NID matches another NID of same peer */
148 lp_me = lnet_find_peer(&me->me_match_id.nid);
149 lp_peer = lnet_find_peer(&info->mi_id.nid);
151 if (lp_me && lp_peer && (lp_me == lp_peer)) {
152 /* Shouldn't happen, but better than dropping
153 * message entirely. Print warning so we know
154 * it happens, and something needs to be fixed.
156 CWARN("message from %s matched %llu with NID mismatch %s accepted (same peer %pK)\n",
157 libcfs_idstr(&info->mi_id),
159 libcfs_nidstr(&me->me_match_id.nid),
161 lnet_peer_decref_locked(lp_me);
162 lnet_peer_decref_locked(lp_peer);
164 CWARN("message from %s matched %llu with NID mismatch %s rejected (different peer %pK != %pK)\n",
165 libcfs_idstr(&info->mi_id),
167 libcfs_nidstr(&me->me_match_id.nid),
170 lnet_peer_decref_locked(lp_me);
172 lnet_peer_decref_locked(lp_peer);
174 return LNET_MATCHMD_NONE;
178 /* Hurrah! This _is_ a match; check it out... */
180 if ((md->md_options & LNET_MD_MANAGE_REMOTE) == 0)
181 offset = md->md_offset;
183 offset = info->mi_roffset;
185 if ((md->md_options & LNET_MD_MAX_SIZE) != 0) {
186 mlength = md->md_max_size;
187 LASSERT(md->md_offset + mlength <= md->md_length);
189 mlength = md->md_length - offset;
192 if (info->mi_rlength <= mlength) { /* fits in allowed space */
193 mlength = info->mi_rlength;
194 } else if ((md->md_options & LNET_MD_TRUNCATE) == 0) {
195 /* this packet _really_ is too big */
196 CERROR("Matching packet from %s, match %llu"
197 " length %d too big: %d left, %d allowed\n",
198 libcfs_idstr(&info->mi_id), info->mi_mbits,
199 info->mi_rlength, md->md_length - offset, mlength);
201 return LNET_MATCHMD_DROP;
204 /* Commit to this ME/MD */
205 CDEBUG(D_NET, "Incoming %s index %x from %s of "
206 "length %d/%d into md %#llx [%d] + %d\n",
207 (info->mi_opc == LNET_MD_OP_PUT) ? "put" : "get",
208 info->mi_portal, libcfs_idstr(&info->mi_id), mlength,
209 info->mi_rlength, md->md_lh.lh_cookie, md->md_niov, offset);
211 lnet_msg_attach_md(msg, md, offset, mlength);
212 md->md_offset = offset + mlength;
214 if (!lnet_md_exhausted(md))
215 return LNET_MATCHMD_OK;
217 /* Auto-unlink NOW, so the ME gets unlinked if required.
218 * We bumped md->md_refcount above so the MD just gets flagged
219 * for unlink when it is finalized. */
220 if ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0)
223 return LNET_MATCHMD_OK | LNET_MATCHMD_EXHAUSTED;
226 static struct lnet_match_table *
227 lnet_match2mt(struct lnet_portal *ptl, struct lnet_processid *id, __u64 mbits)
229 if (LNET_CPT_NUMBER == 1)
230 return ptl->ptl_mtables[0]; /* the only one */
232 /* if it's a unique portal, return match-table hashed by NID */
233 return lnet_ptl_is_unique(ptl) ?
234 ptl->ptl_mtables[lnet_nid2cpt(&id->nid, NULL)] : NULL;
237 struct lnet_match_table *
238 lnet_mt_of_attach(unsigned int index, struct lnet_processid *id,
239 __u64 mbits, __u64 ignore_bits, enum lnet_ins_pos pos)
241 struct lnet_portal *ptl;
242 struct lnet_match_table *mtable;
244 /* NB: called w/o lock */
245 LASSERT(index < the_lnet.ln_nportals);
247 if (!lnet_ptl_match_type(index, id, mbits, ignore_bits))
250 ptl = the_lnet.ln_portals[index];
252 mtable = lnet_match2mt(ptl, id, mbits);
253 if (mtable != NULL) /* unique portal or only one match-table */
256 /* it's a wildcard portal */
260 case LNET_INS_BEFORE:
262 /* posted by no affinity thread, always hash to specific
263 * match-table to avoid buffer stealing which is heavy */
264 return ptl->ptl_mtables[ptl->ptl_index % LNET_CPT_NUMBER];
266 /* posted by cpu-affinity thread */
267 return ptl->ptl_mtables[lnet_cpt_current()];
271 static struct lnet_match_table *
272 lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg)
274 struct lnet_match_table *mtable;
275 struct lnet_portal *ptl;
281 /* NB: called w/o lock */
282 LASSERT(info->mi_portal < the_lnet.ln_nportals);
283 ptl = the_lnet.ln_portals[info->mi_portal];
285 LASSERT(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl));
287 mtable = lnet_match2mt(ptl, &info->mi_id, info->mi_mbits);
291 /* it's a wildcard portal */
292 routed = LNET_NID_NET(&msg->msg_hdr.src_nid) !=
293 LNET_NID_NET(&msg->msg_hdr.dest_nid);
295 if (portal_rotor == LNET_PTL_ROTOR_OFF ||
296 (portal_rotor != LNET_PTL_ROTOR_ON && !routed)) {
297 cpt = lnet_cpt_current();
298 if (ptl->ptl_mtables[cpt]->mt_enabled)
299 return ptl->ptl_mtables[cpt];
302 rotor = ptl->ptl_rotor++; /* get round-robin factor */
303 if (portal_rotor == LNET_PTL_ROTOR_HASH_RT && routed)
306 cpt = rotor % LNET_CPT_NUMBER;
308 if (!ptl->ptl_mtables[cpt]->mt_enabled) {
309 /* is there any active entry for this portal? */
310 nmaps = ptl->ptl_mt_nmaps;
311 /* map to an active mtable to avoid heavy "stealing" */
313 /* NB: there is possibility that ptl_mt_maps is being
314 * changed because we are not under protection of
315 * lnet_ptl_lock, but it shouldn't hurt anything */
316 cpt = ptl->ptl_mt_maps[rotor % nmaps];
320 return ptl->ptl_mtables[cpt];
324 lnet_mt_test_exhausted(struct lnet_match_table *mtable, int pos)
329 if (!lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]))
332 if (pos < 0) { /* check all bits */
333 for (i = 0; i < LNET_MT_EXHAUSTED_BMAP; i++) {
334 if (mtable->mt_exhausted[i] != (__u64)(-1))
340 LASSERT(pos <= LNET_MT_HASH_IGNORE);
341 /* mtable::mt_mhash[pos] is marked as exhausted or not */
342 bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64];
343 pos &= (1 << LNET_MT_BITS_U64) - 1;
345 return ((*bmap) & (1ULL << pos)) != 0;
349 lnet_mt_set_exhausted(struct lnet_match_table *mtable, int pos, int exhausted)
353 LASSERT(lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]));
354 LASSERT(pos <= LNET_MT_HASH_IGNORE);
356 /* set mtable::mt_mhash[pos] as exhausted/non-exhausted */
357 bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64];
358 pos &= (1 << LNET_MT_BITS_U64) - 1;
361 *bmap &= ~(1ULL << pos);
363 *bmap |= 1ULL << pos;
367 lnet_mt_match_head(struct lnet_match_table *mtable,
368 struct lnet_processid *id, __u64 mbits)
370 struct lnet_portal *ptl = the_lnet.ln_portals[mtable->mt_portal];
372 if (lnet_ptl_is_wildcard(ptl)) {
373 return &mtable->mt_mhash[mbits & LNET_MT_HASH_MASK];
375 unsigned long hash = mbits + nidhash(&id->nid) + id->pid;
377 LASSERT(lnet_ptl_is_unique(ptl));
378 hash = cfs_hash_long(hash, LNET_MT_HASH_BITS);
379 return &mtable->mt_mhash[hash & LNET_MT_HASH_MASK];
384 lnet_mt_match_md(struct lnet_match_table *mtable,
385 struct lnet_match_info *info, struct lnet_msg *msg)
387 struct list_head *head;
393 /* any ME with ignore bits? */
394 if (!list_empty(&mtable->mt_mhash[LNET_MT_HASH_IGNORE]))
395 head = &mtable->mt_mhash[LNET_MT_HASH_IGNORE];
397 head = lnet_mt_match_head(mtable, &info->mi_id,
400 /* NB: only wildcard portal needs to return LNET_MATCHMD_EXHAUSTED */
401 if (lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]))
402 exhausted = LNET_MATCHMD_EXHAUSTED;
404 list_for_each_entry_safe(me, tmp, head, me_list) {
405 /* ME attached but MD not attached yet */
406 if (me->me_md == NULL)
409 LASSERT(me == me->me_md->md_me);
411 rc = lnet_try_match_md(me->me_md, info, msg);
412 if ((rc & LNET_MATCHMD_EXHAUSTED) == 0)
413 exhausted = 0; /* mlist is not empty */
415 if ((rc & LNET_MATCHMD_FINISH) != 0) {
416 /* don't return EXHAUSTED bit because we don't know
417 * whether the mlist is empty or not */
418 return rc & ~LNET_MATCHMD_EXHAUSTED;
422 if (exhausted == LNET_MATCHMD_EXHAUSTED) { /* @head is exhausted */
423 lnet_mt_set_exhausted(mtable, head - mtable->mt_mhash, 1);
424 if (!lnet_mt_test_exhausted(mtable, -1))
428 if (exhausted == 0 && head == &mtable->mt_mhash[LNET_MT_HASH_IGNORE]) {
429 head = lnet_mt_match_head(mtable, &info->mi_id,
431 goto again; /* re-check MEs w/o ignore-bits */
434 if (info->mi_opc == LNET_MD_OP_GET ||
435 !lnet_ptl_is_lazy(the_lnet.ln_portals[info->mi_portal]))
436 return LNET_MATCHMD_DROP | exhausted;
438 return LNET_MATCHMD_NONE | exhausted;
442 lnet_ptl_match_early(struct lnet_portal *ptl, struct lnet_msg *msg)
446 /* message arrived before any buffer posting on this portal,
447 * simply delay or drop this message */
448 if (likely(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl)))
452 /* check it again with hold of lock */
453 if (lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl)) {
454 lnet_ptl_unlock(ptl);
458 if (lnet_ptl_is_lazy(ptl)) {
459 if (msg->msg_rx_ready_delay) {
460 msg->msg_rx_delayed = 1;
461 list_add_tail(&msg->msg_list,
462 &ptl->ptl_msg_delayed);
464 rc = LNET_MATCHMD_NONE;
466 rc = LNET_MATCHMD_DROP;
469 lnet_ptl_unlock(ptl);
474 lnet_ptl_match_delay(struct lnet_portal *ptl,
475 struct lnet_match_info *info, struct lnet_msg *msg)
477 int first = ptl->ptl_mt_maps[0]; /* read w/o lock */
482 * Steal buffer from other CPTs, and delay msg if nothing to
483 * steal. This function is more expensive than a regular
484 * match, but we don't expect it can happen a lot. The return
485 * code contains one of LNET_MATCHMD_OK, LNET_MATCHMD_DROP, or
488 LASSERT(lnet_ptl_is_wildcard(ptl));
490 for (i = 0; i < LNET_CPT_NUMBER; i++) {
491 struct lnet_match_table *mtable;
494 cpt = (first + i) % LNET_CPT_NUMBER;
495 mtable = ptl->ptl_mtables[cpt];
496 if (i != 0 && i != LNET_CPT_NUMBER - 1 && !mtable->mt_enabled)
503 /* The first try, add to stealing list. */
504 list_add_tail(&msg->msg_list,
505 &ptl->ptl_msg_stealing);
508 if (!list_empty(&msg->msg_list)) {
509 /* On stealing list. */
510 rc = lnet_mt_match_md(mtable, info, msg);
512 if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 &&
514 lnet_ptl_disable_mt(ptl, cpt);
516 if ((rc & LNET_MATCHMD_FINISH) != 0) {
517 /* Match found, remove from stealing list. */
518 list_del_init(&msg->msg_list);
519 } else if (i == LNET_CPT_NUMBER - 1 || /* (1) */
520 ptl->ptl_mt_nmaps == 0 || /* (2) */
521 (ptl->ptl_mt_nmaps == 1 && /* (3) */
522 ptl->ptl_mt_maps[0] == cpt)) {
524 * No match found, and this is either
525 * (1) the last cpt to check, or
526 * (2) there is no active cpt, or
527 * (3) this is the only active cpt.
528 * There is nothing to steal: delay or
531 list_del_init(&msg->msg_list);
533 if (lnet_ptl_is_lazy(ptl)) {
534 msg->msg_rx_delayed = 1;
535 list_add_tail(&msg->msg_list,
536 &ptl->ptl_msg_delayed);
537 rc = LNET_MATCHMD_NONE;
539 rc = LNET_MATCHMD_DROP;
542 /* Do another iteration. */
547 * No longer on stealing list: another thread
548 * matched the message in lnet_ptl_attach_md().
549 * We are now expected to handle the message.
551 rc = msg->msg_md == NULL ?
552 LNET_MATCHMD_DROP : LNET_MATCHMD_OK;
555 lnet_ptl_unlock(ptl);
556 lnet_res_unlock(cpt);
559 * Note that test (1) above ensures that we always
560 * exit the loop through this break statement.
562 * LNET_MATCHMD_NONE means msg was added to the
563 * delayed queue, and we may no longer reference it
564 * after lnet_ptl_unlock() and lnet_res_unlock().
566 if (rc & (LNET_MATCHMD_FINISH | LNET_MATCHMD_NONE))
574 lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg)
576 struct lnet_match_table *mtable;
577 struct lnet_portal *ptl;
581 "Request from %s of length %d into portal %d MB=%#llx\n",
582 libcfs_idstr(&info->mi_id),
583 info->mi_rlength, info->mi_portal, info->mi_mbits);
585 if (info->mi_portal >= the_lnet.ln_nportals) {
586 CERROR("Invalid portal %d not in [0-%d]\n",
587 info->mi_portal, the_lnet.ln_nportals);
588 return LNET_MATCHMD_DROP;
591 ptl = the_lnet.ln_portals[info->mi_portal];
592 rc = lnet_ptl_match_early(ptl, msg);
593 if (rc != 0) /* matched or delayed early message */
596 mtable = lnet_mt_of_match(info, msg);
597 lnet_res_lock(mtable->mt_cpt);
599 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
600 rc = LNET_MATCHMD_DROP;
604 rc = lnet_mt_match_md(mtable, info, msg);
605 if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 && mtable->mt_enabled) {
607 lnet_ptl_disable_mt(ptl, mtable->mt_cpt);
608 lnet_ptl_unlock(ptl);
611 if ((rc & LNET_MATCHMD_FINISH) != 0) /* matched or dropping */
614 if (!msg->msg_rx_ready_delay)
617 LASSERT(lnet_ptl_is_lazy(ptl));
618 LASSERT(!msg->msg_rx_delayed);
620 /* NB: we don't expect "delay" can happen a lot */
621 if (lnet_ptl_is_unique(ptl) || LNET_CPT_NUMBER == 1) {
624 msg->msg_rx_delayed = 1;
625 list_add_tail(&msg->msg_list, &ptl->ptl_msg_delayed);
627 lnet_ptl_unlock(ptl);
628 lnet_res_unlock(mtable->mt_cpt);
629 rc = LNET_MATCHMD_NONE;
631 lnet_res_unlock(mtable->mt_cpt);
632 rc = lnet_ptl_match_delay(ptl, info, msg);
635 /* LNET_MATCHMD_NONE means msg was added to the delay queue */
636 if (rc & LNET_MATCHMD_NONE) {
638 "Delaying %s from %s ptl %d MB %#llx off %d len %d\n",
639 info->mi_opc == LNET_MD_OP_PUT ? "PUT" : "GET",
640 libcfs_idstr(&info->mi_id), info->mi_portal,
641 info->mi_mbits, info->mi_roffset, info->mi_rlength);
645 lnet_res_unlock(mtable->mt_cpt);
647 /* EXHAUSTED bit is only meaningful for internal functions */
648 return rc & ~LNET_MATCHMD_EXHAUSTED;
652 lnet_ptl_detach_md(struct lnet_me *me, struct lnet_libmd *md)
654 LASSERT(me->me_md == md && md->md_me == me);
660 /* called with lnet_res_lock held */
662 lnet_ptl_attach_md(struct lnet_me *me, struct lnet_libmd *md,
663 struct list_head *matches, struct list_head *drops)
665 struct lnet_portal *ptl = the_lnet.ln_portals[me->me_portal];
666 struct lnet_match_table *mtable;
667 struct list_head *head;
668 struct lnet_msg *tmp;
669 struct lnet_msg *msg;
673 LASSERT(md->md_refcount == 0); /* a brand new MD */
678 cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
679 mtable = ptl->ptl_mtables[cpt];
681 if (list_empty(&ptl->ptl_msg_stealing) &&
682 list_empty(&ptl->ptl_msg_delayed) &&
683 !lnet_mt_test_exhausted(mtable, me->me_pos))
687 head = &ptl->ptl_msg_stealing;
689 list_for_each_entry_safe(msg, tmp, head, msg_list) {
690 struct lnet_match_info info;
691 struct lnet_hdr *hdr;
694 LASSERT(msg->msg_rx_delayed || head == &ptl->ptl_msg_stealing);
697 /* Multi-Rail: Primary peer NID */
698 info.mi_id.nid = msg->msg_initiator;
699 info.mi_id.pid = hdr->src_pid;
700 info.mi_opc = LNET_MD_OP_PUT;
701 info.mi_portal = hdr->msg.put.ptl_index;
702 info.mi_rlength = hdr->payload_length;
703 info.mi_roffset = hdr->msg.put.offset;
704 info.mi_mbits = hdr->msg.put.match_bits;
706 rc = lnet_try_match_md(md, &info, msg);
708 exhausted = (rc & LNET_MATCHMD_EXHAUSTED) != 0;
709 if ((rc & LNET_MATCHMD_NONE) != 0) {
715 /* Hurrah! This _is_ a match */
716 LASSERT((rc & LNET_MATCHMD_FINISH) != 0);
717 list_del_init(&msg->msg_list);
719 if (head == &ptl->ptl_msg_stealing) {
722 /* stealing thread will handle the message */
726 if ((rc & LNET_MATCHMD_OK) != 0) {
727 list_add_tail(&msg->msg_list, matches);
729 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
730 "match %llu offset %d length %d.\n",
731 libcfs_idstr(&info.mi_id),
732 info.mi_portal, info.mi_mbits,
733 info.mi_roffset, info.mi_rlength);
735 list_add_tail(&msg->msg_list, drops);
742 if (!exhausted && head == &ptl->ptl_msg_stealing) {
743 head = &ptl->ptl_msg_delayed;
747 if (lnet_ptl_is_wildcard(ptl) && !exhausted) {
748 lnet_mt_set_exhausted(mtable, me->me_pos, 0);
749 if (!mtable->mt_enabled)
750 lnet_ptl_enable_mt(ptl, cpt);
753 lnet_ptl_unlock(ptl);
757 lnet_ptl_cleanup(struct lnet_portal *ptl)
759 struct lnet_match_table *mtable;
762 if (ptl->ptl_mtables == NULL) /* uninitialized portal */
765 LASSERT(list_empty(&ptl->ptl_msg_delayed));
766 LASSERT(list_empty(&ptl->ptl_msg_stealing));
767 cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) {
768 struct list_head *mhash;
772 if (mtable->mt_mhash == NULL) /* uninitialized match-table */
775 mhash = mtable->mt_mhash;
777 for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++) {
778 while ((me = list_first_entry_or_null(&mhash[j],
781 CERROR("Active ME %p on exit\n", me);
782 list_del(&me->me_list);
783 LIBCFS_FREE_PRE(me, sizeof(*me), "slab-freed");
784 kmem_cache_free(lnet_mes_cachep, me);
787 /* the extra entry is for MEs with ignore bits */
788 CFS_FREE_PTR_ARRAY(mhash, LNET_MT_HASH_SIZE + 1);
791 cfs_percpt_free(ptl->ptl_mtables);
792 ptl->ptl_mtables = NULL;
796 lnet_ptl_setup(struct lnet_portal *ptl, int index)
798 struct lnet_match_table *mtable;
799 struct list_head *mhash;
803 ptl->ptl_mtables = cfs_percpt_alloc(lnet_cpt_table(),
804 sizeof(struct lnet_match_table));
805 if (ptl->ptl_mtables == NULL) {
806 CERROR("Failed to create match table for portal %d\n", index);
810 ptl->ptl_index = index;
811 INIT_LIST_HEAD(&ptl->ptl_msg_delayed);
812 INIT_LIST_HEAD(&ptl->ptl_msg_stealing);
813 spin_lock_init(&ptl->ptl_lock);
814 cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) {
815 /* the extra entry is for MEs with ignore bits */
816 LIBCFS_CPT_ALLOC(mhash, lnet_cpt_table(), i,
817 sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1));
819 CERROR("Failed to create match hash for portal %d\n",
824 memset(&mtable->mt_exhausted[0], -1,
825 sizeof(mtable->mt_exhausted[0]) *
826 LNET_MT_EXHAUSTED_BMAP);
827 mtable->mt_mhash = mhash;
828 for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++)
829 INIT_LIST_HEAD(&mhash[j]);
831 mtable->mt_portal = index;
837 lnet_ptl_cleanup(ptl);
841 #define PORTAL_SIZE (offsetof(struct lnet_portal, ptl_mt_maps[LNET_CPT_NUMBER]))
843 lnet_portals_destroy(void)
847 if (the_lnet.ln_portals == NULL)
850 for (i = 0; i < the_lnet.ln_nportals; i++)
851 if (the_lnet.ln_portals[i]) {
852 lnet_ptl_cleanup(the_lnet.ln_portals[i]);
853 LIBCFS_FREE(the_lnet.ln_portals[i], PORTAL_SIZE);
856 CFS_FREE_PTR_ARRAY(the_lnet.ln_portals, the_lnet.ln_nportals);
857 the_lnet.ln_portals = NULL;
861 lnet_portals_create(void)
865 the_lnet.ln_nportals = MAX_PORTALS;
866 CFS_ALLOC_PTR_ARRAY(the_lnet.ln_portals, the_lnet.ln_nportals);
867 if (the_lnet.ln_portals == NULL) {
868 CERROR("Failed to allocate portals table\n");
872 for (i = 0; i < the_lnet.ln_nportals; i++) {
873 LIBCFS_ALLOC(the_lnet.ln_portals[i], PORTAL_SIZE);
874 if (!the_lnet.ln_portals[i] ||
875 lnet_ptl_setup(the_lnet.ln_portals[i], i)) {
876 lnet_portals_destroy();
885 * Turn on the lazy portal attribute. Use with caution!
887 * This portal attribute only affects incoming PUT requests to the portal,
888 * and is off by default. By default, if there's no matching MD for an
889 * incoming PUT request, it is simply dropped. With the lazy attribute on,
890 * such requests are queued indefinitely until either a matching MD is
891 * posted to the portal or the lazy attribute is turned off.
893 * It would prevent dropped requests, however it should be regarded as the
894 * last line of defense - i.e. users must keep a close watch on active
895 * buffers on a lazy portal and once it becomes too low post more buffers as
896 * soon as possible. This is because delayed requests usually have detrimental
897 * effects on underlying network connections. A few delayed requests often
898 * suffice to bring an underlying connection to a complete halt, due to flow
899 * control mechanisms.
901 * There's also a DOS attack risk. If users don't post match-all MDs on a
902 * lazy portal, a malicious peer can easily stop a service by sending some
903 * PUT requests with match bits that won't match any MD. A routed server is
904 * especially vulnerable since the connections to its neighbor routers are
905 * shared among all clients.
907 * \param portal Index of the portal to enable the lazy attribute on.
909 * \retval 0 On success.
910 * \retval -EINVAL If \a portal is not a valid index.
913 LNetSetLazyPortal(int portal)
915 struct lnet_portal *ptl;
917 if (portal < 0 || portal >= the_lnet.ln_nportals)
920 CDEBUG(D_NET, "Setting portal %d lazy\n", portal);
921 ptl = the_lnet.ln_portals[portal];
923 lnet_res_lock(LNET_LOCK_EX);
926 lnet_ptl_setopt(ptl, LNET_PTL_LAZY);
928 lnet_ptl_unlock(ptl);
929 lnet_res_unlock(LNET_LOCK_EX);
933 EXPORT_SYMBOL(LNetSetLazyPortal);
936 lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason)
938 struct lnet_portal *ptl;
941 if (portal < 0 || portal >= the_lnet.ln_nportals)
944 ptl = the_lnet.ln_portals[portal];
946 lnet_res_lock(LNET_LOCK_EX);
949 if (!lnet_ptl_is_lazy(ptl)) {
950 lnet_ptl_unlock(ptl);
951 lnet_res_unlock(LNET_LOCK_EX);
956 struct lnet_msg *msg, *tmp;
958 /* grab all messages which are on the NI passed in */
959 list_for_each_entry_safe(msg, tmp, &ptl->ptl_msg_delayed,
961 if (msg->msg_txni == ni || msg->msg_rxni == ni)
962 list_move(&msg->msg_list, &zombies);
965 if (the_lnet.ln_state != LNET_STATE_RUNNING)
966 CWARN("Active lazy portal %d on exit\n", portal);
968 CDEBUG(D_NET, "clearing portal %d lazy\n", portal);
970 /* grab all the blocked messages atomically */
971 list_splice_init(&ptl->ptl_msg_delayed, &zombies);
973 lnet_ptl_unsetopt(ptl, LNET_PTL_LAZY);
976 lnet_ptl_unlock(ptl);
977 lnet_res_unlock(LNET_LOCK_EX);
979 lnet_drop_delayed_msg_list(&zombies, reason);
985 * Turn off the lazy portal attribute. Delayed requests on the portal,
986 * if any, will be all dropped when this function returns.
988 * \param portal Index of the portal to disable the lazy attribute on.
990 * \retval 0 On success.
991 * \retval -EINVAL If \a portal is not a valid index.
994 LNetClearLazyPortal(int portal)
996 return lnet_clear_lazy_portal(NULL, portal,
997 "Clearing lazy portal attr");
999 EXPORT_SYMBOL(LNetClearLazyPortal);