1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lnet/ulnds/ptllnd/ptllnd_cb.c
38 * Author: Eric Barton <eeb@bartonsoftware.com>
44 ptllnd_set_tx_deadline(ptllnd_tx_t *tx)
46 ptllnd_peer_t *peer = tx->tx_peer;
47 lnet_ni_t *ni = peer->plp_ni;
48 ptllnd_ni_t *plni = ni->ni_data;
50 tx->tx_deadline = cfs_time_current_sec() + plni->plni_timeout;
54 ptllnd_post_tx(ptllnd_tx_t *tx)
56 ptllnd_peer_t *peer = tx->tx_peer;
58 LASSERT (tx->tx_type != PTLLND_MSG_TYPE_NOOP);
60 ptllnd_set_tx_deadline(tx);
61 list_add_tail(&tx->tx_list, &peer->plp_txq);
62 ptllnd_check_sends(peer);
66 ptllnd_ptlid2str(ptl_process_id_t id)
68 static char strs[8][32];
71 char *str = strs[idx++];
73 if (idx >= sizeof(strs)/sizeof(strs[0]))
76 snprintf(str, sizeof(strs[0]), FMT_PTLID, id.pid, id.nid);
81 ptllnd_destroy_peer(ptllnd_peer_t *peer)
83 lnet_ni_t *ni = peer->plp_ni;
84 ptllnd_ni_t *plni = ni->ni_data;
85 int nmsg = peer->plp_lazy_credits +
86 plni->plni_peer_credits;
88 ptllnd_size_buffers(ni, -nmsg);
90 LASSERT (peer->plp_closing);
91 LASSERT (plni->plni_npeers > 0);
92 LASSERT (list_empty(&peer->plp_txq));
93 LASSERT (list_empty(&peer->plp_noopq));
94 LASSERT (list_empty(&peer->plp_activeq));
96 LIBCFS_FREE(peer, sizeof(*peer));
100 ptllnd_abort_txs(ptllnd_ni_t *plni, struct list_head *q)
102 while (!list_empty(q)) {
103 ptllnd_tx_t *tx = list_entry(q->next, ptllnd_tx_t, tx_list);
105 tx->tx_status = -ESHUTDOWN;
106 list_del(&tx->tx_list);
107 list_add_tail(&tx->tx_list, &plni->plni_zombie_txs);
112 ptllnd_close_peer(ptllnd_peer_t *peer, int error)
114 lnet_ni_t *ni = peer->plp_ni;
115 ptllnd_ni_t *plni = ni->ni_data;
117 if (peer->plp_closing)
120 peer->plp_closing = 1;
122 if (!list_empty(&peer->plp_txq) ||
123 !list_empty(&peer->plp_noopq) ||
124 !list_empty(&peer->plp_activeq) ||
126 CWARN("Closing %s\n", libcfs_id2str(peer->plp_id));
127 if (plni->plni_debug)
128 ptllnd_dump_debug(ni, peer->plp_id);
131 ptllnd_abort_txs(plni, &peer->plp_txq);
132 ptllnd_abort_txs(plni, &peer->plp_noopq);
133 ptllnd_abort_txs(plni, &peer->plp_activeq);
135 list_del(&peer->plp_list);
136 ptllnd_peer_decref(peer);
140 ptllnd_find_peer(lnet_ni_t *ni, lnet_process_id_t id, int create)
142 ptllnd_ni_t *plni = ni->ni_data;
143 unsigned int hash = LNET_NIDADDR(id.nid) % plni->plni_peer_hash_size;
148 LASSERT (LNET_NIDNET(id.nid) == LNET_NIDNET(ni->ni_nid));
150 list_for_each_entry (plp, &plni->plni_peer_hash[hash], plp_list) {
151 if (plp->plp_id.nid == id.nid &&
152 plp->plp_id.pid == id.pid) {
153 ptllnd_peer_addref(plp);
161 /* New peer: check first for enough posted buffers */
163 rc = ptllnd_size_buffers(ni, plni->plni_peer_credits);
169 LIBCFS_ALLOC(plp, sizeof(*plp));
171 CERROR("Can't allocate new peer %s\n", libcfs_id2str(id));
173 ptllnd_size_buffers(ni, -plni->plni_peer_credits);
179 plp->plp_ptlid.nid = LNET_NIDADDR(id.nid);
180 plp->plp_ptlid.pid = plni->plni_ptllnd_pid;
181 plp->plp_credits = 1; /* add more later when she gives me credits */
182 plp->plp_max_msg_size = plni->plni_max_msg_size; /* until I hear from her */
183 plp->plp_sent_credits = 1; /* Implicit credit for HELLO */
184 plp->plp_outstanding_credits = plni->plni_peer_credits - 1;
185 plp->plp_lazy_credits = 0;
186 plp->plp_extra_lazy_credits = 0;
189 plp->plp_sent_hello = 0;
190 plp->plp_recvd_hello = 0;
191 plp->plp_closing = 0;
192 plp->plp_refcount = 1;
193 CFS_INIT_LIST_HEAD(&plp->plp_list);
194 CFS_INIT_LIST_HEAD(&plp->plp_txq);
195 CFS_INIT_LIST_HEAD(&plp->plp_noopq);
196 CFS_INIT_LIST_HEAD(&plp->plp_activeq);
198 ptllnd_peer_addref(plp);
199 list_add_tail(&plp->plp_list, &plni->plni_peer_hash[hash]);
201 tx = ptllnd_new_tx(plp, PTLLND_MSG_TYPE_HELLO, 0);
203 CERROR("Can't send HELLO to %s\n", libcfs_id2str(id));
204 ptllnd_close_peer(plp, -ENOMEM);
205 ptllnd_peer_decref(plp);
209 tx->tx_msg.ptlm_u.hello.kptlhm_matchbits = PTL_RESERVED_MATCHBITS;
210 tx->tx_msg.ptlm_u.hello.kptlhm_max_msg_size = plni->plni_max_msg_size;
212 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: post hello %p", libcfs_id2str(id),
213 tx->tx_peer->plp_credits,
214 tx->tx_peer->plp_outstanding_credits,
215 tx->tx_peer->plp_sent_credits,
216 plni->plni_peer_credits +
217 tx->tx_peer->plp_lazy_credits, tx);
224 ptllnd_count_q(struct list_head *q)
229 list_for_each(e, q) {
237 ptllnd_tx_typestr(int type)
240 case PTLLND_RDMA_WRITE:
243 case PTLLND_RDMA_READ:
246 case PTLLND_MSG_TYPE_PUT:
249 case PTLLND_MSG_TYPE_GET:
252 case PTLLND_MSG_TYPE_IMMEDIATE:
255 case PTLLND_MSG_TYPE_NOOP:
258 case PTLLND_MSG_TYPE_HELLO:
267 ptllnd_debug_tx(ptllnd_tx_t *tx)
269 CDEBUG(D_WARNING, "%s %s b %ld.%06ld/%ld.%06ld"
270 " r %ld.%06ld/%ld.%06ld status %d\n",
271 ptllnd_tx_typestr(tx->tx_type),
272 libcfs_id2str(tx->tx_peer->plp_id),
273 tx->tx_bulk_posted.tv_sec, tx->tx_bulk_posted.tv_usec,
274 tx->tx_bulk_done.tv_sec, tx->tx_bulk_done.tv_usec,
275 tx->tx_req_posted.tv_sec, tx->tx_req_posted.tv_usec,
276 tx->tx_req_done.tv_sec, tx->tx_req_done.tv_usec,
281 ptllnd_debug_peer(lnet_ni_t *ni, lnet_process_id_t id)
283 ptllnd_peer_t *plp = ptllnd_find_peer(ni, id, 0);
284 ptllnd_ni_t *plni = ni->ni_data;
288 CDEBUG(D_WARNING, "No peer %s\n", libcfs_id2str(id));
292 CWARN("%s %s%s [%d] "LPU64".%06d m "LPU64" q %d/%d/%d c %d/%d+%d(%d)\n",
294 plp->plp_recvd_hello ? "H" : "_",
295 plp->plp_closing ? "C" : "_",
297 plp->plp_stamp / 1000000, (int)(plp->plp_stamp % 1000000),
299 ptllnd_count_q(&plp->plp_txq),
300 ptllnd_count_q(&plp->plp_noopq),
301 ptllnd_count_q(&plp->plp_activeq),
302 plp->plp_credits, plp->plp_outstanding_credits, plp->plp_sent_credits,
303 plni->plni_peer_credits + plp->plp_lazy_credits);
305 CDEBUG(D_WARNING, "txq:\n");
306 list_for_each_entry (tx, &plp->plp_txq, tx_list) {
310 CDEBUG(D_WARNING, "noopq:\n");
311 list_for_each_entry (tx, &plp->plp_noopq, tx_list) {
315 CDEBUG(D_WARNING, "activeq:\n");
316 list_for_each_entry (tx, &plp->plp_activeq, tx_list) {
320 CDEBUG(D_WARNING, "zombies:\n");
321 list_for_each_entry (tx, &plni->plni_zombie_txs, tx_list) {
322 if (tx->tx_peer->plp_id.nid == id.nid &&
323 tx->tx_peer->plp_id.pid == id.pid)
327 CDEBUG(D_WARNING, "history:\n");
328 list_for_each_entry (tx, &plni->plni_tx_history, tx_list) {
329 if (tx->tx_peer->plp_id.nid == id.nid &&
330 tx->tx_peer->plp_id.pid == id.pid)
334 ptllnd_peer_decref(plp);
338 ptllnd_dump_debug(lnet_ni_t *ni, lnet_process_id_t id)
340 ptllnd_debug_peer(ni, id);
341 ptllnd_dump_history();
345 ptllnd_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive)
347 lnet_process_id_t id;
349 time_t start = cfs_time_current_sec();
350 ptllnd_ni_t *plni = ni->ni_data;
351 int w = plni->plni_long_wait;
353 /* This is only actually used to connect to routers at startup! */
357 id.pid = LUSTRE_SRV_LNET_PID;
359 peer = ptllnd_find_peer(ni, id, 1);
363 /* wait for the peer to reply */
364 while (!peer->plp_recvd_hello) {
365 if (w > 0 && cfs_time_current_sec() > start + w/1000) {
366 CWARN("Waited %ds to connect to %s\n",
367 (int)(cfs_time_current_sec() - start),
375 ptllnd_peer_decref(peer);
379 ptllnd_setasync(lnet_ni_t *ni, lnet_process_id_t id, int nasync)
381 ptllnd_peer_t *peer = ptllnd_find_peer(ni, id, nasync > 0);
387 LASSERT (peer->plp_lazy_credits >= 0);
388 LASSERT (peer->plp_extra_lazy_credits >= 0);
390 /* If nasync < 0, we're being told we can reduce the total message
391 * headroom. We can't do this right now because our peer might already
392 * have credits for the extra buffers, so we just account the extra
393 * headroom in case we need it later and only destroy buffers when the
396 * Note that the following condition handles this case, where it
397 * actually increases the extra lazy credit counter. */
399 if (nasync <= peer->plp_extra_lazy_credits) {
400 peer->plp_extra_lazy_credits -= nasync;
404 LASSERT (nasync > 0);
406 nasync -= peer->plp_extra_lazy_credits;
407 peer->plp_extra_lazy_credits = 0;
409 rc = ptllnd_size_buffers(ni, nasync);
411 peer->plp_lazy_credits += nasync;
412 peer->plp_outstanding_credits += nasync;
419 ptllnd_cksum (void *ptr, int nob)
425 sum = ((sum << 1) | (sum >> 31)) + *c++;
427 /* ensure I don't return 0 (== no checksum) */
428 return (sum == 0) ? 1 : sum;
432 ptllnd_new_tx(ptllnd_peer_t *peer, int type, int payload_nob)
434 lnet_ni_t *ni = peer->plp_ni;
435 ptllnd_ni_t *plni = ni->ni_data;
439 CDEBUG(D_NET, "peer=%p type=%d payload=%d\n", peer, type, payload_nob);
445 case PTLLND_RDMA_WRITE:
446 case PTLLND_RDMA_READ:
447 LASSERT (payload_nob == 0);
451 case PTLLND_MSG_TYPE_PUT:
452 case PTLLND_MSG_TYPE_GET:
453 LASSERT (payload_nob == 0);
454 msgsize = offsetof(kptl_msg_t, ptlm_u) +
455 sizeof(kptl_rdma_msg_t);
458 case PTLLND_MSG_TYPE_IMMEDIATE:
459 msgsize = offsetof(kptl_msg_t,
460 ptlm_u.immediate.kptlim_payload[payload_nob]);
463 case PTLLND_MSG_TYPE_NOOP:
464 LASSERT (payload_nob == 0);
465 msgsize = offsetof(kptl_msg_t, ptlm_u);
468 case PTLLND_MSG_TYPE_HELLO:
469 LASSERT (payload_nob == 0);
470 msgsize = offsetof(kptl_msg_t, ptlm_u) +
471 sizeof(kptl_hello_msg_t);
475 msgsize = (msgsize + 7) & ~7;
476 LASSERT (msgsize <= peer->plp_max_msg_size);
478 LIBCFS_ALLOC(tx, offsetof(ptllnd_tx_t, tx_msg) + msgsize);
481 CERROR("Can't allocate msg type %d for %s\n",
482 type, libcfs_id2str(peer->plp_id));
486 CFS_INIT_LIST_HEAD(&tx->tx_list);
489 tx->tx_lnetmsg = tx->tx_lnetreplymsg = NULL;
492 tx->tx_reqmdh = PTL_INVALID_HANDLE;
493 tx->tx_bulkmdh = PTL_INVALID_HANDLE;
494 tx->tx_msgsize = msgsize;
495 tx->tx_completing = 0;
498 memset(&tx->tx_bulk_posted, 0, sizeof(tx->tx_bulk_posted));
499 memset(&tx->tx_bulk_done, 0, sizeof(tx->tx_bulk_done));
500 memset(&tx->tx_req_posted, 0, sizeof(tx->tx_req_posted));
501 memset(&tx->tx_req_done, 0, sizeof(tx->tx_req_done));
504 tx->tx_msg.ptlm_magic = PTLLND_MSG_MAGIC;
505 tx->tx_msg.ptlm_version = PTLLND_MSG_VERSION;
506 tx->tx_msg.ptlm_type = type;
507 tx->tx_msg.ptlm_credits = 0;
508 tx->tx_msg.ptlm_nob = msgsize;
509 tx->tx_msg.ptlm_cksum = 0;
510 tx->tx_msg.ptlm_srcnid = ni->ni_nid;
511 tx->tx_msg.ptlm_srcstamp = plni->plni_stamp;
512 tx->tx_msg.ptlm_dstnid = peer->plp_id.nid;
513 tx->tx_msg.ptlm_dststamp = peer->plp_stamp;
514 tx->tx_msg.ptlm_srcpid = the_lnet.ln_pid;
515 tx->tx_msg.ptlm_dstpid = peer->plp_id.pid;
518 ptllnd_peer_addref(peer);
521 CDEBUG(D_NET, "tx=%p\n",tx);
527 ptllnd_abort_tx(ptllnd_tx_t *tx, ptl_handle_md_t *mdh)
529 ptllnd_peer_t *peer = tx->tx_peer;
530 lnet_ni_t *ni = peer->plp_ni;
532 time_t start = cfs_time_current_sec();
533 ptllnd_ni_t *plni = ni->ni_data;
534 int w = plni->plni_long_wait;
536 while (!PtlHandleIsEqual(*mdh, PTL_INVALID_HANDLE)) {
537 rc = PtlMDUnlink(*mdh);
538 #ifndef LUSTRE_PORTALS_UNLINK_SEMANTICS
539 if (rc == PTL_OK) /* unlink successful => no unlinked event */
541 LASSERT (rc == PTL_MD_IN_USE);
543 if (w > 0 && cfs_time_current_sec() > start + w/1000) {
544 CWARN("Waited %ds to abort tx to %s\n",
545 (int)(cfs_time_current_sec() - start),
546 libcfs_id2str(peer->plp_id));
549 /* Wait for ptllnd_tx_event() to invalidate */
555 ptllnd_cull_tx_history(ptllnd_ni_t *plni)
557 int max = plni->plni_max_tx_history;
559 while (plni->plni_ntx_history > max) {
560 ptllnd_tx_t *tx = list_entry(plni->plni_tx_history.next,
561 ptllnd_tx_t, tx_list);
562 list_del(&tx->tx_list);
564 ptllnd_peer_decref(tx->tx_peer);
566 LIBCFS_FREE(tx, offsetof(ptllnd_tx_t, tx_msg) + tx->tx_msgsize);
568 LASSERT (plni->plni_ntxs > 0);
570 plni->plni_ntx_history--;
575 ptllnd_tx_done(ptllnd_tx_t *tx)
577 ptllnd_peer_t *peer = tx->tx_peer;
578 lnet_ni_t *ni = peer->plp_ni;
579 ptllnd_ni_t *plni = ni->ni_data;
581 /* CAVEAT EMPTOR: If this tx is being aborted, I'll continue to get
582 * events for this tx until it's unlinked. So I set tx_completing to
583 * flag the tx is getting handled */
585 if (tx->tx_completing)
588 tx->tx_completing = 1;
590 if (!list_empty(&tx->tx_list))
591 list_del_init(&tx->tx_list);
593 if (tx->tx_status != 0) {
594 if (plni->plni_debug) {
595 CERROR("Completing tx for %s with error %d\n",
596 libcfs_id2str(peer->plp_id), tx->tx_status);
599 ptllnd_close_peer(peer, tx->tx_status);
602 ptllnd_abort_tx(tx, &tx->tx_reqmdh);
603 ptllnd_abort_tx(tx, &tx->tx_bulkmdh);
605 if (tx->tx_niov > 0) {
606 LIBCFS_FREE(tx->tx_iov, tx->tx_niov * sizeof(*tx->tx_iov));
610 if (tx->tx_lnetreplymsg != NULL) {
611 LASSERT (tx->tx_type == PTLLND_MSG_TYPE_GET);
612 LASSERT (tx->tx_lnetmsg != NULL);
613 /* Simulate GET success always */
614 lnet_finalize(ni, tx->tx_lnetmsg, 0);
615 CDEBUG(D_NET, "lnet_finalize(tx_lnetreplymsg=%p)\n",tx->tx_lnetreplymsg);
616 lnet_finalize(ni, tx->tx_lnetreplymsg, tx->tx_status);
617 } else if (tx->tx_lnetmsg != NULL) {
618 lnet_finalize(ni, tx->tx_lnetmsg, tx->tx_status);
621 plni->plni_ntx_history++;
622 list_add_tail(&tx->tx_list, &plni->plni_tx_history);
624 ptllnd_cull_tx_history(plni);
628 ptllnd_set_txiov(ptllnd_tx_t *tx,
629 unsigned int niov, struct iovec *iov,
630 unsigned int offset, unsigned int len)
632 ptl_md_iovec_t *piov;
641 * Remove iovec's at the beginning that
642 * are skipped because of the offset.
643 * Adjust the offset accordingly
647 if (offset < iov->iov_len)
649 offset -= iov->iov_len;
655 int temp_offset = offset;
657 LIBCFS_ALLOC(piov, niov * sizeof(*piov));
661 for (npiov = 0;; npiov++) {
662 LASSERT (npiov < niov);
663 LASSERT (iov->iov_len >= temp_offset);
665 piov[npiov].iov_base = iov[npiov].iov_base + temp_offset;
666 piov[npiov].iov_len = iov[npiov].iov_len - temp_offset;
668 if (piov[npiov].iov_len >= resid) {
669 piov[npiov].iov_len = resid;
673 resid -= piov[npiov].iov_len;
683 /* Dang! The piov I allocated was too big and it's a drag to
684 * have to maintain separate 'allocated' and 'used' sizes, so
685 * I'll just do it again; NB this doesn't happen normally... */
686 LIBCFS_FREE(piov, niov * sizeof(*piov));
692 ptllnd_set_md_buffer(ptl_md_t *md, ptllnd_tx_t *tx)
694 unsigned int niov = tx->tx_niov;
695 ptl_md_iovec_t *iov = tx->tx_iov;
697 LASSERT ((md->options & PTL_MD_IOVEC) == 0);
702 } else if (niov == 1) {
703 md->start = iov[0].iov_base;
704 md->length = iov[0].iov_len;
708 md->options |= PTL_MD_IOVEC;
713 ptllnd_post_buffer(ptllnd_buffer_t *buf)
715 lnet_ni_t *ni = buf->plb_ni;
716 ptllnd_ni_t *plni = ni->ni_data;
717 ptl_process_id_t anyid = {
721 .start = buf->plb_buffer,
722 .length = plni->plni_buffer_size,
723 .threshold = PTL_MD_THRESH_INF,
724 .max_size = plni->plni_max_msg_size,
725 .options = (PTLLND_MD_OPTIONS |
726 PTL_MD_OP_PUT | PTL_MD_MAX_SIZE |
727 PTL_MD_LOCAL_ALIGN8),
728 .user_ptr = ptllnd_obj2eventarg(buf, PTLLND_EVENTARG_TYPE_BUF),
729 .eq_handle = plni->plni_eqh};
733 LASSERT (!buf->plb_posted);
735 rc = PtlMEAttach(plni->plni_nih, plni->plni_portal,
736 anyid, LNET_MSG_MATCHBITS, 0,
737 PTL_UNLINK, PTL_INS_AFTER, &meh);
739 CERROR("PtlMEAttach failed: %s(%d)\n",
740 ptllnd_errtype2str(rc), rc);
745 plni->plni_nposted_buffers++;
747 rc = PtlMDAttach(meh, md, LNET_UNLINK, &buf->plb_md);
751 CERROR("PtlMDAttach failed: %s(%d)\n",
752 ptllnd_errtype2str(rc), rc);
755 plni->plni_nposted_buffers--;
757 rc = PtlMEUnlink(meh);
758 LASSERT (rc == PTL_OK);
764 ptllnd_peer_send_noop (ptllnd_peer_t *peer)
766 ptllnd_ni_t *plni = peer->plp_ni->ni_data;
768 if (!peer->plp_sent_hello ||
769 peer->plp_credits == 0 ||
770 !list_empty(&peer->plp_noopq) ||
771 peer->plp_outstanding_credits < PTLLND_CREDIT_HIGHWATER(plni))
774 /* No tx to piggyback NOOP onto or no credit to send a tx */
775 return (list_empty(&peer->plp_txq) || peer->plp_credits == 1);
779 ptllnd_check_sends(ptllnd_peer_t *peer)
781 ptllnd_ni_t *plni = peer->plp_ni->ni_data;
787 CDEBUG(D_NET, "%s: [%d/%d+%d(%d)\n",
788 libcfs_id2str(peer->plp_id), peer->plp_credits,
789 peer->plp_outstanding_credits, peer->plp_sent_credits,
790 plni->plni_peer_credits + peer->plp_lazy_credits);
792 if (ptllnd_peer_send_noop(peer)) {
793 tx = ptllnd_new_tx(peer, PTLLND_MSG_TYPE_NOOP, 0);
794 CDEBUG(D_NET, "NOOP tx=%p\n",tx);
796 CERROR("Can't return credits to %s\n",
797 libcfs_id2str(peer->plp_id));
799 ptllnd_set_tx_deadline(tx);
800 list_add_tail(&tx->tx_list, &peer->plp_noopq);
805 if (!list_empty(&peer->plp_noopq)) {
806 LASSERT (peer->plp_sent_hello);
807 tx = list_entry(peer->plp_noopq.next,
808 ptllnd_tx_t, tx_list);
809 } else if (!list_empty(&peer->plp_txq)) {
810 tx = list_entry(peer->plp_txq.next,
811 ptllnd_tx_t, tx_list);
813 /* nothing to send right now */
817 LASSERT (tx->tx_msgsize > 0);
819 LASSERT (peer->plp_outstanding_credits >= 0);
820 LASSERT (peer->plp_sent_credits >= 0);
821 LASSERT (peer->plp_outstanding_credits + peer->plp_sent_credits
822 <= plni->plni_peer_credits + peer->plp_lazy_credits);
823 LASSERT (peer->plp_credits >= 0);
825 /* say HELLO first */
826 if (!peer->plp_sent_hello) {
827 LASSERT (list_empty(&peer->plp_noopq));
828 LASSERT (tx->tx_type == PTLLND_MSG_TYPE_HELLO);
830 peer->plp_sent_hello = 1;
833 if (peer->plp_credits == 0) { /* no credits */
834 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: no creds for %p",
835 libcfs_id2str(peer->plp_id),
837 peer->plp_outstanding_credits,
838 peer->plp_sent_credits,
839 plni->plni_peer_credits +
840 peer->plp_lazy_credits, tx);
844 /* Last/Initial credit reserved for NOOP/HELLO */
845 if (peer->plp_credits == 1 &&
846 tx->tx_type != PTLLND_MSG_TYPE_NOOP &&
847 tx->tx_type != PTLLND_MSG_TYPE_HELLO) {
848 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: too few creds for %p",
849 libcfs_id2str(peer->plp_id),
851 peer->plp_outstanding_credits,
852 peer->plp_sent_credits,
853 plni->plni_peer_credits +
854 peer->plp_lazy_credits, tx);
858 list_del(&tx->tx_list);
859 list_add_tail(&tx->tx_list, &peer->plp_activeq);
861 CDEBUG(D_NET, "Sending at TX=%p type=%s (%d)\n",tx,
862 ptllnd_msgtype2str(tx->tx_type),tx->tx_type);
864 if (tx->tx_type == PTLLND_MSG_TYPE_NOOP &&
865 !ptllnd_peer_send_noop(peer)) {
871 /* Set stamp at the last minute; on a new peer, I don't know it
872 * until I receive the HELLO back */
873 tx->tx_msg.ptlm_dststamp = peer->plp_stamp;
876 * Return all the credits we have
878 tx->tx_msg.ptlm_credits = peer->plp_outstanding_credits;
879 peer->plp_sent_credits += peer->plp_outstanding_credits;
880 peer->plp_outstanding_credits = 0;
887 if (plni->plni_checksum)
888 tx->tx_msg.ptlm_cksum =
889 ptllnd_cksum(&tx->tx_msg,
890 offsetof(kptl_msg_t, ptlm_u));
892 md.user_ptr = ptllnd_obj2eventarg(tx, PTLLND_EVENTARG_TYPE_TX);
893 md.eq_handle = plni->plni_eqh;
895 md.options = PTLLND_MD_OPTIONS;
896 md.start = &tx->tx_msg;
897 md.length = tx->tx_msgsize;
899 rc = PtlMDBind(plni->plni_nih, md, LNET_UNLINK, &mdh);
901 CERROR("PtlMDBind for %s failed: %s(%d)\n",
902 libcfs_id2str(peer->plp_id),
903 ptllnd_errtype2str(rc), rc);
904 tx->tx_status = -EIO;
909 LASSERT (tx->tx_type != PTLLND_RDMA_WRITE &&
910 tx->tx_type != PTLLND_RDMA_READ);
913 gettimeofday(&tx->tx_req_posted, NULL);
915 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: %s %p c %d",
916 libcfs_id2str(peer->plp_id),
918 peer->plp_outstanding_credits,
919 peer->plp_sent_credits,
920 plni->plni_peer_credits +
921 peer->plp_lazy_credits,
922 ptllnd_msgtype2str(tx->tx_type), tx,
923 tx->tx_msg.ptlm_credits);
925 rc = PtlPut(mdh, PTL_NOACK_REQ, peer->plp_ptlid,
926 plni->plni_portal, 0, LNET_MSG_MATCHBITS, 0, 0);
928 CERROR("PtlPut for %s failed: %s(%d)\n",
929 libcfs_id2str(peer->plp_id),
930 ptllnd_errtype2str(rc), rc);
931 tx->tx_status = -EIO;
939 ptllnd_passive_rdma(ptllnd_peer_t *peer, int type, lnet_msg_t *msg,
940 unsigned int niov, struct iovec *iov,
941 unsigned int offset, unsigned int len)
943 lnet_ni_t *ni = peer->plp_ni;
944 ptllnd_ni_t *plni = ni->ni_data;
945 ptllnd_tx_t *tx = ptllnd_new_tx(peer, type, 0);
955 CDEBUG(D_NET, "niov=%d offset=%d len=%d\n",niov,offset,len);
957 LASSERT (type == PTLLND_MSG_TYPE_GET ||
958 type == PTLLND_MSG_TYPE_PUT);
961 CERROR("Can't allocate %s tx for %s\n",
962 type == PTLLND_MSG_TYPE_GET ? "GET" : "PUT/REPLY",
963 libcfs_id2str(peer->plp_id));
967 rc = ptllnd_set_txiov(tx, niov, iov, offset, len);
969 CERROR ("Can't allocate iov %d for %s\n",
970 niov, libcfs_id2str(peer->plp_id));
975 md.user_ptr = ptllnd_obj2eventarg(tx, PTLLND_EVENTARG_TYPE_TX);
976 md.eq_handle = plni->plni_eqh;
979 md.options = PTLLND_MD_OPTIONS;
980 if(type == PTLLND_MSG_TYPE_GET)
981 md.options |= PTL_MD_OP_PUT | PTL_MD_ACK_DISABLE;
983 md.options |= PTL_MD_OP_GET;
984 ptllnd_set_md_buffer(&md, tx);
986 start = cfs_time_current_sec();
987 w = plni->plni_long_wait;
989 while (!peer->plp_recvd_hello) { /* wait to validate plp_match */
990 if (peer->plp_closing) {
994 if (w > 0 && cfs_time_current_sec() > start + w/1000) {
995 CWARN("Waited %ds to connect to %s\n",
996 (int)(cfs_time_current_sec() - start),
997 libcfs_id2str(peer->plp_id));
1003 if (peer->plp_match < PTL_RESERVED_MATCHBITS)
1004 peer->plp_match = PTL_RESERVED_MATCHBITS;
1005 matchbits = peer->plp_match++;
1007 rc = PtlMEAttach(plni->plni_nih, plni->plni_portal, peer->plp_ptlid,
1008 matchbits, 0, PTL_UNLINK, PTL_INS_BEFORE, &meh);
1010 CERROR("PtlMEAttach for %s failed: %s(%d)\n",
1011 libcfs_id2str(peer->plp_id),
1012 ptllnd_errtype2str(rc), rc);
1017 gettimeofday(&tx->tx_bulk_posted, NULL);
1019 rc = PtlMDAttach(meh, md, LNET_UNLINK, &mdh);
1021 CERROR("PtlMDAttach for %s failed: %s(%d)\n",
1022 libcfs_id2str(peer->plp_id),
1023 ptllnd_errtype2str(rc), rc);
1024 rc2 = PtlMEUnlink(meh);
1025 LASSERT (rc2 == PTL_OK);
1029 tx->tx_bulkmdh = mdh;
1032 * We need to set the stamp here because it
1033 * we could have received a HELLO above that set
1036 tx->tx_msg.ptlm_dststamp = peer->plp_stamp;
1038 tx->tx_msg.ptlm_u.rdma.kptlrm_hdr = msg->msg_hdr;
1039 tx->tx_msg.ptlm_u.rdma.kptlrm_matchbits = matchbits;
1041 if (type == PTLLND_MSG_TYPE_GET) {
1042 tx->tx_lnetreplymsg = lnet_create_reply_msg(ni, msg);
1043 if (tx->tx_lnetreplymsg == NULL) {
1044 CERROR("Can't create reply for GET to %s\n",
1045 libcfs_id2str(msg->msg_target));
1051 tx->tx_lnetmsg = msg;
1052 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: post passive %s p %d %p",
1053 libcfs_id2str(msg->msg_target),
1054 peer->plp_credits, peer->plp_outstanding_credits,
1055 peer->plp_sent_credits,
1056 plni->plni_peer_credits + peer->plp_lazy_credits,
1057 lnet_msgtyp2str(msg->msg_type),
1058 (le32_to_cpu(msg->msg_type) == LNET_MSG_PUT) ?
1059 le32_to_cpu(msg->msg_hdr.msg.put.ptl_index) :
1060 (le32_to_cpu(msg->msg_type) == LNET_MSG_GET) ?
1061 le32_to_cpu(msg->msg_hdr.msg.get.ptl_index) : -1,
1072 ptllnd_active_rdma(ptllnd_peer_t *peer, int type,
1073 lnet_msg_t *msg, __u64 matchbits,
1074 unsigned int niov, struct iovec *iov,
1075 unsigned int offset, unsigned int len)
1077 lnet_ni_t *ni = peer->plp_ni;
1078 ptllnd_ni_t *plni = ni->ni_data;
1079 ptllnd_tx_t *tx = ptllnd_new_tx(peer, type, 0);
1081 ptl_handle_md_t mdh;
1084 LASSERT (type == PTLLND_RDMA_READ ||
1085 type == PTLLND_RDMA_WRITE);
1088 CERROR("Can't allocate tx for RDMA %s with %s\n",
1089 (type == PTLLND_RDMA_WRITE) ? "write" : "read",
1090 libcfs_id2str(peer->plp_id));
1091 ptllnd_close_peer(peer, -ENOMEM);
1095 rc = ptllnd_set_txiov(tx, niov, iov, offset, len);
1097 CERROR ("Can't allocate iov %d for %s\n",
1098 niov, libcfs_id2str(peer->plp_id));
1103 md.user_ptr = ptllnd_obj2eventarg(tx, PTLLND_EVENTARG_TYPE_TX);
1104 md.eq_handle = plni->plni_eqh;
1106 md.options = PTLLND_MD_OPTIONS;
1107 md.threshold = (type == PTLLND_RDMA_READ) ? 2 : 1;
1109 ptllnd_set_md_buffer(&md, tx);
1111 rc = PtlMDBind(plni->plni_nih, md, LNET_UNLINK, &mdh);
1113 CERROR("PtlMDBind for %s failed: %s(%d)\n",
1114 libcfs_id2str(peer->plp_id),
1115 ptllnd_errtype2str(rc), rc);
1120 tx->tx_bulkmdh = mdh;
1121 tx->tx_lnetmsg = msg;
1123 ptllnd_set_tx_deadline(tx);
1124 list_add_tail(&tx->tx_list, &peer->plp_activeq);
1125 gettimeofday(&tx->tx_bulk_posted, NULL);
1127 if (type == PTLLND_RDMA_READ)
1128 rc = PtlGet(mdh, peer->plp_ptlid,
1129 plni->plni_portal, 0, matchbits, 0);
1131 rc = PtlPut(mdh, PTL_NOACK_REQ, peer->plp_ptlid,
1132 plni->plni_portal, 0, matchbits, 0,
1133 (msg == NULL) ? PTLLND_RDMA_FAIL : PTLLND_RDMA_OK);
1138 CERROR("Can't initiate RDMA with %s: %s(%d)\n",
1139 libcfs_id2str(peer->plp_id),
1140 ptllnd_errtype2str(rc), rc);
1142 tx->tx_lnetmsg = NULL;
1145 ptllnd_tx_done(tx); /* this will close peer */
1150 ptllnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *msg)
1152 ptllnd_ni_t *plni = ni->ni_data;
1158 LASSERT (!msg->msg_routing);
1159 LASSERT (msg->msg_kiov == NULL);
1161 LASSERT (msg->msg_niov <= PTL_MD_MAX_IOV); /* !!! */
1163 CDEBUG(D_NET, "%s [%d]+%d,%d -> %s%s\n",
1164 lnet_msgtyp2str(msg->msg_type),
1165 msg->msg_niov, msg->msg_offset, msg->msg_len,
1166 libcfs_nid2str(msg->msg_target.nid),
1167 msg->msg_target_is_router ? "(rtr)" : "");
1169 if ((msg->msg_target.pid & LNET_PID_USERFLAG) != 0) {
1170 CERROR("Can't send to non-kernel peer %s\n",
1171 libcfs_id2str(msg->msg_target));
1172 return -EHOSTUNREACH;
1175 plp = ptllnd_find_peer(ni, msg->msg_target, 1);
1179 switch (msg->msg_type) {
1184 LASSERT (msg->msg_len == 0);
1185 break; /* send IMMEDIATE */
1188 if (msg->msg_target_is_router)
1189 break; /* send IMMEDIATE */
1191 nob = msg->msg_md->md_length;
1192 nob = offsetof(kptl_msg_t, ptlm_u.immediate.kptlim_payload[nob]);
1193 if (nob <= plni->plni_max_msg_size)
1196 LASSERT ((msg->msg_md->md_options & LNET_MD_KIOV) == 0);
1197 rc = ptllnd_passive_rdma(plp, PTLLND_MSG_TYPE_GET, msg,
1198 msg->msg_md->md_niov,
1199 msg->msg_md->md_iov.iov,
1200 0, msg->msg_md->md_length);
1201 ptllnd_peer_decref(plp);
1204 case LNET_MSG_REPLY:
1207 nob = offsetof(kptl_msg_t, ptlm_u.immediate.kptlim_payload[nob]);
1208 if (nob <= plp->plp_max_msg_size)
1209 break; /* send IMMEDIATE */
1211 rc = ptllnd_passive_rdma(plp, PTLLND_MSG_TYPE_PUT, msg,
1212 msg->msg_niov, msg->msg_iov,
1213 msg->msg_offset, msg->msg_len);
1214 ptllnd_peer_decref(plp);
1219 * NB copy the payload so we don't have to do a fragmented send */
1221 tx = ptllnd_new_tx(plp, PTLLND_MSG_TYPE_IMMEDIATE, msg->msg_len);
1223 CERROR("Can't allocate tx for lnet type %d to %s\n",
1224 msg->msg_type, libcfs_id2str(msg->msg_target));
1225 ptllnd_peer_decref(plp);
1229 lnet_copy_iov2flat(tx->tx_msgsize, &tx->tx_msg,
1230 offsetof(kptl_msg_t, ptlm_u.immediate.kptlim_payload),
1231 msg->msg_niov, msg->msg_iov, msg->msg_offset,
1233 tx->tx_msg.ptlm_u.immediate.kptlim_hdr = msg->msg_hdr;
1235 tx->tx_lnetmsg = msg;
1236 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: post immediate %s p %d %p",
1237 libcfs_id2str(msg->msg_target),
1238 plp->plp_credits, plp->plp_outstanding_credits,
1239 plp->plp_sent_credits,
1240 plni->plni_peer_credits + plp->plp_lazy_credits,
1241 lnet_msgtyp2str(msg->msg_type),
1242 (le32_to_cpu(msg->msg_type) == LNET_MSG_PUT) ?
1243 le32_to_cpu(msg->msg_hdr.msg.put.ptl_index) :
1244 (le32_to_cpu(msg->msg_type) == LNET_MSG_GET) ?
1245 le32_to_cpu(msg->msg_hdr.msg.get.ptl_index) : -1,
1248 ptllnd_peer_decref(plp);
1253 ptllnd_rx_done(ptllnd_rx_t *rx)
1255 ptllnd_peer_t *plp = rx->rx_peer;
1256 ptllnd_ni_t *plni = plp->plp_ni->ni_data;
1258 plp->plp_outstanding_credits++;
1260 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: rx=%p done\n",
1261 libcfs_id2str(plp->plp_id),
1262 plp->plp_credits, plp->plp_outstanding_credits,
1263 plp->plp_sent_credits,
1264 plni->plni_peer_credits + plp->plp_lazy_credits, rx);
1266 ptllnd_check_sends(plp);
1268 LASSERT (plni->plni_nrxs > 0);
1273 ptllnd_eager_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg,
1274 void **new_privatep)
1276 /* Shouldn't get here; recvs only block for router buffers */
1282 ptllnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg,
1283 int delayed, unsigned int niov,
1284 struct iovec *iov, lnet_kiov_t *kiov,
1285 unsigned int offset, unsigned int mlen, unsigned int rlen)
1287 ptllnd_rx_t *rx = private;
1291 LASSERT (kiov == NULL);
1292 LASSERT (niov <= PTL_MD_MAX_IOV); /* !!! */
1294 switch (rx->rx_msg->ptlm_type) {
1298 case PTLLND_MSG_TYPE_IMMEDIATE:
1299 nob = offsetof(kptl_msg_t, ptlm_u.immediate.kptlim_payload[mlen]);
1300 if (nob > rx->rx_nob) {
1301 CERROR("Immediate message from %s too big: %d(%d)\n",
1302 libcfs_id2str(rx->rx_peer->plp_id),
1307 lnet_copy_flat2iov(niov, iov, offset,
1308 rx->rx_nob, rx->rx_msg,
1309 offsetof(kptl_msg_t, ptlm_u.immediate.kptlim_payload),
1311 lnet_finalize(ni, msg, 0);
1314 case PTLLND_MSG_TYPE_PUT:
1315 rc = ptllnd_active_rdma(rx->rx_peer, PTLLND_RDMA_READ, msg,
1316 rx->rx_msg->ptlm_u.rdma.kptlrm_matchbits,
1317 niov, iov, offset, mlen);
1320 case PTLLND_MSG_TYPE_GET:
1322 rc = ptllnd_active_rdma(rx->rx_peer, PTLLND_RDMA_WRITE, msg,
1323 rx->rx_msg->ptlm_u.rdma.kptlrm_matchbits,
1324 msg->msg_niov, msg->msg_iov,
1325 msg->msg_offset, msg->msg_len);
1327 rc = ptllnd_active_rdma(rx->rx_peer, PTLLND_RDMA_WRITE, NULL,
1328 rx->rx_msg->ptlm_u.rdma.kptlrm_matchbits,
1338 ptllnd_parse_request(lnet_ni_t *ni, ptl_process_id_t initiator,
1339 kptl_msg_t *msg, unsigned int nob)
1341 ptllnd_ni_t *plni = ni->ni_data;
1342 const int basenob = offsetof(kptl_msg_t, ptlm_u);
1343 lnet_process_id_t srcid;
1352 CERROR("Very short receive from %s\n",
1353 ptllnd_ptlid2str(initiator));
1357 /* I can at least read MAGIC/VERSION */
1359 flip = msg->ptlm_magic == __swab32(PTLLND_MSG_MAGIC);
1360 if (!flip && msg->ptlm_magic != PTLLND_MSG_MAGIC) {
1361 CERROR("Bad protocol magic %08x from %s\n",
1362 msg->ptlm_magic, ptllnd_ptlid2str(initiator));
1366 msg_version = flip ? __swab16(msg->ptlm_version) : msg->ptlm_version;
1368 if (msg_version != PTLLND_MSG_VERSION) {
1369 CERROR("Bad protocol version %04x from %s: %04x expected\n",
1370 (__u32)msg_version, ptllnd_ptlid2str(initiator), PTLLND_MSG_VERSION);
1372 if (plni->plni_abort_on_protocol_mismatch)
1378 if (nob < basenob) {
1379 CERROR("Short receive from %s: got %d, wanted at least %d\n",
1380 ptllnd_ptlid2str(initiator), nob, basenob);
1384 /* checksum must be computed with
1385 * 1) ptlm_cksum zero and
1386 * 2) BEFORE anything gets modified/flipped
1388 msg_cksum = flip ? __swab32(msg->ptlm_cksum) : msg->ptlm_cksum;
1389 msg->ptlm_cksum = 0;
1390 if (msg_cksum != 0 &&
1391 msg_cksum != ptllnd_cksum(msg, offsetof(kptl_msg_t, ptlm_u))) {
1392 CERROR("Bad checksum from %s\n", ptllnd_ptlid2str(initiator));
1396 msg->ptlm_version = msg_version;
1397 msg->ptlm_cksum = msg_cksum;
1400 /* NB stamps are opaque cookies */
1401 __swab32s(&msg->ptlm_nob);
1402 __swab64s(&msg->ptlm_srcnid);
1403 __swab64s(&msg->ptlm_dstnid);
1404 __swab32s(&msg->ptlm_srcpid);
1405 __swab32s(&msg->ptlm_dstpid);
1408 srcid.nid = msg->ptlm_srcnid;
1409 srcid.pid = msg->ptlm_srcpid;
1411 if (LNET_NIDNET(msg->ptlm_srcnid) != LNET_NIDNET(ni->ni_nid)) {
1412 CERROR("Bad source id %s from %s\n",
1413 libcfs_id2str(srcid),
1414 ptllnd_ptlid2str(initiator));
1418 if (msg->ptlm_type == PTLLND_MSG_TYPE_NAK) {
1419 CERROR("NAK from %s (%s)\n",
1420 libcfs_id2str(srcid),
1421 ptllnd_ptlid2str(initiator));
1423 if (plni->plni_dump_on_nak)
1424 ptllnd_dump_debug(ni, srcid);
1426 if (plni->plni_abort_on_nak)
1432 if (msg->ptlm_dstnid != ni->ni_nid ||
1433 msg->ptlm_dstpid != the_lnet.ln_pid) {
1434 CERROR("Bad dstid %s (%s expected) from %s\n",
1435 libcfs_id2str((lnet_process_id_t) {
1436 .nid = msg->ptlm_dstnid,
1437 .pid = msg->ptlm_dstpid}),
1438 libcfs_id2str((lnet_process_id_t) {
1440 .pid = the_lnet.ln_pid}),
1441 libcfs_id2str(srcid));
1445 if (msg->ptlm_dststamp != plni->plni_stamp) {
1446 CERROR("Bad dststamp "LPX64"("LPX64" expected) from %s\n",
1447 msg->ptlm_dststamp, plni->plni_stamp,
1448 libcfs_id2str(srcid));
1452 PTLLND_HISTORY("RX %s: %s %d %p", libcfs_id2str(srcid),
1453 ptllnd_msgtype2str(msg->ptlm_type),
1454 msg->ptlm_credits, &rx);
1456 switch (msg->ptlm_type) {
1457 case PTLLND_MSG_TYPE_PUT:
1458 case PTLLND_MSG_TYPE_GET:
1459 if (nob < basenob + sizeof(kptl_rdma_msg_t)) {
1460 CERROR("Short rdma request from %s(%s)\n",
1461 libcfs_id2str(srcid),
1462 ptllnd_ptlid2str(initiator));
1466 __swab64s(&msg->ptlm_u.rdma.kptlrm_matchbits);
1469 case PTLLND_MSG_TYPE_IMMEDIATE:
1470 if (nob < offsetof(kptl_msg_t,
1471 ptlm_u.immediate.kptlim_payload)) {
1472 CERROR("Short immediate from %s(%s)\n",
1473 libcfs_id2str(srcid),
1474 ptllnd_ptlid2str(initiator));
1479 case PTLLND_MSG_TYPE_HELLO:
1480 if (nob < basenob + sizeof(kptl_hello_msg_t)) {
1481 CERROR("Short hello from %s(%s)\n",
1482 libcfs_id2str(srcid),
1483 ptllnd_ptlid2str(initiator));
1487 __swab64s(&msg->ptlm_u.hello.kptlhm_matchbits);
1488 __swab32s(&msg->ptlm_u.hello.kptlhm_max_msg_size);
1492 case PTLLND_MSG_TYPE_NOOP:
1496 CERROR("Bad message type %d from %s(%s)\n", msg->ptlm_type,
1497 libcfs_id2str(srcid),
1498 ptllnd_ptlid2str(initiator));
1502 plp = ptllnd_find_peer(ni, srcid, 0);
1504 CERROR("Can't find peer %s\n", libcfs_id2str(srcid));
1508 if (msg->ptlm_type == PTLLND_MSG_TYPE_HELLO) {
1509 if (plp->plp_recvd_hello) {
1510 CERROR("Unexpected HELLO from %s\n",
1511 libcfs_id2str(srcid));
1512 ptllnd_peer_decref(plp);
1516 plp->plp_max_msg_size = msg->ptlm_u.hello.kptlhm_max_msg_size;
1517 plp->plp_match = msg->ptlm_u.hello.kptlhm_matchbits;
1518 plp->plp_stamp = msg->ptlm_srcstamp;
1519 plp->plp_recvd_hello = 1;
1521 } else if (!plp->plp_recvd_hello) {
1523 CERROR("Bad message type %d (HELLO expected) from %s\n",
1524 msg->ptlm_type, libcfs_id2str(srcid));
1525 ptllnd_peer_decref(plp);
1528 } else if (msg->ptlm_srcstamp != plp->plp_stamp) {
1530 CERROR("Bad srcstamp "LPX64"("LPX64" expected) from %s\n",
1531 msg->ptlm_srcstamp, plp->plp_stamp,
1532 libcfs_id2str(srcid));
1533 ptllnd_peer_decref(plp);
1537 /* Check peer only sends when I've sent her credits */
1538 if (plp->plp_sent_credits == 0) {
1539 CERROR("%s[%d/%d+%d(%d)]: unexpected message\n",
1540 libcfs_id2str(plp->plp_id),
1541 plp->plp_credits, plp->plp_outstanding_credits,
1542 plp->plp_sent_credits,
1543 plni->plni_peer_credits + plp->plp_lazy_credits);
1546 plp->plp_sent_credits--;
1548 /* No check for credit overflow - the peer may post new buffers after
1549 * the startup handshake. */
1550 plp->plp_credits += msg->ptlm_credits;
1552 /* All OK so far; assume the message is good... */
1559 switch (msg->ptlm_type) {
1560 default: /* message types have been checked already */
1561 ptllnd_rx_done(&rx);
1564 case PTLLND_MSG_TYPE_PUT:
1565 case PTLLND_MSG_TYPE_GET:
1566 rc = lnet_parse(ni, &msg->ptlm_u.rdma.kptlrm_hdr,
1567 msg->ptlm_srcnid, &rx, 1);
1569 ptllnd_rx_done(&rx);
1572 case PTLLND_MSG_TYPE_IMMEDIATE:
1573 rc = lnet_parse(ni, &msg->ptlm_u.immediate.kptlim_hdr,
1574 msg->ptlm_srcnid, &rx, 0);
1576 ptllnd_rx_done(&rx);
1580 if (msg->ptlm_credits > 0)
1581 ptllnd_check_sends(plp);
1583 ptllnd_peer_decref(plp);
1587 ptllnd_buf_event (lnet_ni_t *ni, ptl_event_t *event)
1589 ptllnd_buffer_t *buf = ptllnd_eventarg2obj(event->md.user_ptr);
1590 ptllnd_ni_t *plni = ni->ni_data;
1591 char *msg = &buf->plb_buffer[event->offset];
1593 int unlinked = event->type == PTL_EVENT_UNLINK;
1595 LASSERT (buf->plb_ni == ni);
1596 LASSERT (event->type == PTL_EVENT_PUT_END ||
1597 event->type == PTL_EVENT_UNLINK);
1599 if (event->ni_fail_type != PTL_NI_OK) {
1601 CERROR("event type %s(%d), status %s(%d) from %s\n",
1602 ptllnd_evtype2str(event->type), event->type,
1603 ptllnd_errtype2str(event->ni_fail_type),
1604 event->ni_fail_type,
1605 ptllnd_ptlid2str(event->initiator));
1607 } else if (event->type == PTL_EVENT_PUT_END) {
1608 #if (PTL_MD_LOCAL_ALIGN8 == 0)
1609 /* Portals can't force message alignment - someone sending an
1610 * odd-length message could misalign subsequent messages */
1611 if ((event->mlength & 7) != 0) {
1612 CERROR("Message from %s has odd length %u: "
1613 "probable version incompatibility\n",
1614 ptllnd_ptlid2str(event->initiator),
1619 LASSERT ((event->offset & 7) == 0);
1621 ptllnd_parse_request(ni, event->initiator,
1622 (kptl_msg_t *)msg, event->mlength);
1625 #ifdef LUSTRE_PORTALS_UNLINK_SEMANTICS
1626 /* UNLINK event only on explicit unlink */
1627 repost = (event->unlinked && event->type != PTL_EVENT_UNLINK);
1628 if (event->unlinked)
1631 /* UNLINK event only on implicit unlink */
1632 repost = (event->type == PTL_EVENT_UNLINK);
1636 LASSERT(buf->plb_posted);
1637 buf->plb_posted = 0;
1638 plni->plni_nposted_buffers--;
1642 (void) ptllnd_post_buffer(buf);
1646 ptllnd_tx_event (lnet_ni_t *ni, ptl_event_t *event)
1648 ptllnd_ni_t *plni = ni->ni_data;
1649 ptllnd_tx_t *tx = ptllnd_eventarg2obj(event->md.user_ptr);
1650 int error = (event->ni_fail_type != PTL_NI_OK);
1653 #ifdef LUSTRE_PORTALS_UNLINK_SEMANTICS
1654 int unlinked = event->unlinked;
1656 int unlinked = (event->type == PTL_EVENT_UNLINK);
1660 CERROR("Error %s(%d) event %s(%d) unlinked %d, %s(%d) for %s\n",
1661 ptllnd_errtype2str(event->ni_fail_type),
1662 event->ni_fail_type,
1663 ptllnd_evtype2str(event->type), event->type,
1664 unlinked, ptllnd_msgtype2str(tx->tx_type), tx->tx_type,
1665 libcfs_id2str(tx->tx_peer->plp_id));
1667 LASSERT (!PtlHandleIsEqual(event->md_handle, PTL_INVALID_HANDLE));
1669 isreq = PtlHandleIsEqual(event->md_handle, tx->tx_reqmdh);
1671 LASSERT (event->md.start == (void *)&tx->tx_msg);
1673 tx->tx_reqmdh = PTL_INVALID_HANDLE;
1674 gettimeofday(&tx->tx_req_done, NULL);
1678 isbulk = PtlHandleIsEqual(event->md_handle, tx->tx_bulkmdh);
1679 if ( isbulk && unlinked ) {
1680 tx->tx_bulkmdh = PTL_INVALID_HANDLE;
1681 gettimeofday(&tx->tx_bulk_done, NULL);
1684 LASSERT (!isreq != !isbulk); /* always one and only 1 match */
1686 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: TX done %p %s%s",
1687 libcfs_id2str(tx->tx_peer->plp_id),
1688 tx->tx_peer->plp_credits,
1689 tx->tx_peer->plp_outstanding_credits,
1690 tx->tx_peer->plp_sent_credits,
1691 plni->plni_peer_credits + tx->tx_peer->plp_lazy_credits,
1692 tx, isreq ? "REQ" : "BULK", unlinked ? "(unlinked)" : "");
1694 LASSERT (!isreq != !isbulk); /* always one and only 1 match */
1695 switch (tx->tx_type) {
1699 case PTLLND_MSG_TYPE_NOOP:
1700 case PTLLND_MSG_TYPE_HELLO:
1701 case PTLLND_MSG_TYPE_IMMEDIATE:
1702 LASSERT (event->type == PTL_EVENT_UNLINK ||
1703 event->type == PTL_EVENT_SEND_END);
1707 case PTLLND_MSG_TYPE_GET:
1708 LASSERT (event->type == PTL_EVENT_UNLINK ||
1709 (isreq && event->type == PTL_EVENT_SEND_END) ||
1710 (isbulk && event->type == PTL_EVENT_PUT_END));
1712 if (isbulk && !error && event->type == PTL_EVENT_PUT_END) {
1713 /* Check GET matched */
1714 if (event->hdr_data == PTLLND_RDMA_OK) {
1715 lnet_set_reply_msg_len(ni,
1716 tx->tx_lnetreplymsg,
1719 CERROR ("Unmatched GET with %s\n",
1720 libcfs_id2str(tx->tx_peer->plp_id));
1721 tx->tx_status = -EIO;
1726 case PTLLND_MSG_TYPE_PUT:
1727 LASSERT (event->type == PTL_EVENT_UNLINK ||
1728 (isreq && event->type == PTL_EVENT_SEND_END) ||
1729 (isbulk && event->type == PTL_EVENT_GET_END));
1732 case PTLLND_RDMA_READ:
1733 LASSERT (event->type == PTL_EVENT_UNLINK ||
1734 event->type == PTL_EVENT_SEND_END ||
1735 event->type == PTL_EVENT_REPLY_END);
1739 case PTLLND_RDMA_WRITE:
1740 LASSERT (event->type == PTL_EVENT_UNLINK ||
1741 event->type == PTL_EVENT_SEND_END);
1745 /* Schedule ptllnd_tx_done() on error or last completion event */
1747 (PtlHandleIsEqual(tx->tx_bulkmdh, PTL_INVALID_HANDLE) &&
1748 PtlHandleIsEqual(tx->tx_reqmdh, PTL_INVALID_HANDLE))) {
1750 tx->tx_status = -EIO;
1751 list_del(&tx->tx_list);
1752 list_add_tail(&tx->tx_list, &plni->plni_zombie_txs);
1757 ptllnd_find_timed_out_tx(ptllnd_peer_t *peer)
1759 time_t now = cfs_time_current_sec();
1762 list_for_each_entry (tx, &peer->plp_txq, tx_list) {
1763 if (tx->tx_deadline < now)
1767 list_for_each_entry (tx, &peer->plp_noopq, tx_list) {
1768 if (tx->tx_deadline < now)
1772 list_for_each_entry (tx, &peer->plp_activeq, tx_list) {
1773 if (tx->tx_deadline < now)
1781 ptllnd_check_peer(ptllnd_peer_t *peer)
1783 ptllnd_tx_t *tx = ptllnd_find_timed_out_tx(peer);
1788 CERROR("%s: timed out\n", libcfs_id2str(peer->plp_id));
1789 ptllnd_close_peer(peer, -ETIMEDOUT);
1793 ptllnd_watchdog (lnet_ni_t *ni, time_t now)
1795 ptllnd_ni_t *plni = ni->ni_data;
1797 int p = plni->plni_watchdog_interval;
1798 int chunk = plni->plni_peer_hash_size;
1799 int interval = now - (plni->plni_watchdog_nextt - p);
1801 struct list_head *hashlist;
1802 struct list_head *tmp;
1803 struct list_head *nxt;
1805 /* Time to check for RDMA timeouts on a few more peers:
1806 * I try to do checks every 'p' seconds on a proportion of the peer
1807 * table and I need to check every connection 'n' times within a
1808 * timeout interval, to ensure I detect a timeout on any connection
1809 * within (n+1)/n times the timeout interval. */
1811 LASSERT (now >= plni->plni_watchdog_nextt);
1813 if (plni->plni_timeout > n * interval) { /* Scan less than the whole table? */
1814 chunk = (chunk * n * interval) / plni->plni_timeout;
1819 for (i = 0; i < chunk; i++) {
1820 hashlist = &plni->plni_peer_hash[plni->plni_watchdog_peeridx];
1822 list_for_each_safe(tmp, nxt, hashlist) {
1823 ptllnd_check_peer(list_entry(tmp, ptllnd_peer_t, plp_list));
1826 plni->plni_watchdog_peeridx = (plni->plni_watchdog_peeridx + 1) %
1827 plni->plni_peer_hash_size;
1830 plni->plni_watchdog_nextt = now + p;
1834 ptllnd_wait (lnet_ni_t *ni, int milliseconds)
1836 static struct timeval prevt;
1837 static int prevt_count;
1838 static int call_count;
1840 struct timeval start;
1841 struct timeval then;
1843 struct timeval deadline;
1845 ptllnd_ni_t *plni = ni->ni_data;
1853 /* Handle any currently queued events, returning immediately if any.
1854 * Otherwise block for the timeout and handle all events queued
1857 gettimeofday(&start, NULL);
1860 if (milliseconds <= 0) {
1863 deadline.tv_sec = start.tv_sec + milliseconds/1000;
1864 deadline.tv_usec = start.tv_usec + (milliseconds % 1000)*1000;
1866 if (deadline.tv_usec >= 1000000) {
1867 start.tv_usec -= 1000000;
1873 gettimeofday(&then, NULL);
1875 rc = PtlEQPoll(&plni->plni_eqh, 1, timeout, &event, &which);
1877 gettimeofday(&now, NULL);
1879 if ((now.tv_sec*1000 + now.tv_usec/1000) -
1880 (then.tv_sec*1000 + then.tv_usec/1000) > timeout + 1000) {
1881 /* 1000 mS grace...........................^ */
1882 CERROR("SLOW PtlEQPoll(%d): %dmS elapsed\n", timeout,
1883 (int)(now.tv_sec*1000 + now.tv_usec/1000) -
1884 (int)(then.tv_sec*1000 + then.tv_usec/1000));
1887 if (rc == PTL_EQ_EMPTY) {
1888 if (found) /* handled some events */
1891 if (now.tv_sec >= plni->plni_watchdog_nextt) { /* check timeouts? */
1892 ptllnd_watchdog(ni, now.tv_sec);
1893 LASSERT (now.tv_sec < plni->plni_watchdog_nextt);
1896 if (now.tv_sec > deadline.tv_sec || /* timeout expired */
1897 (now.tv_sec == deadline.tv_sec &&
1898 now.tv_usec >= deadline.tv_usec))
1901 if (milliseconds < 0 ||
1902 plni->plni_watchdog_nextt <= deadline.tv_sec) {
1903 timeout = (plni->plni_watchdog_nextt - now.tv_sec)*1000;
1905 timeout = (deadline.tv_sec - now.tv_sec)*1000 +
1906 (deadline.tv_usec - now.tv_usec)/1000;
1912 LASSERT (rc == PTL_OK || rc == PTL_EQ_DROPPED);
1914 if (rc == PTL_EQ_DROPPED)
1915 CERROR("Event queue: size %d is too small\n",
1916 plni->plni_eq_size);
1921 switch (ptllnd_eventarg2type(event.md.user_ptr)) {
1925 case PTLLND_EVENTARG_TYPE_TX:
1926 ptllnd_tx_event(ni, &event);
1929 case PTLLND_EVENTARG_TYPE_BUF:
1930 ptllnd_buf_event(ni, &event);
1935 while (!list_empty(&plni->plni_zombie_txs)) {
1936 tx = list_entry(plni->plni_zombie_txs.next,
1937 ptllnd_tx_t, tx_list);
1938 list_del_init(&tx->tx_list);
1942 if (prevt.tv_sec == 0 ||
1943 prevt.tv_sec != now.tv_sec) {
1944 PTLLND_HISTORY("%d wait entered at %d.%06d - prev %d %d.%06d",
1945 call_count, (int)start.tv_sec, (int)start.tv_usec,
1946 prevt_count, (int)prevt.tv_sec, (int)prevt.tv_usec);