1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2005 Cluster File Systems, Inc. All rights reserved.
5 * Author: Eric Barton <eeb@bartonsoftware.com>
7 * This file is part of the Lustre file system, http://www.lustre.org
8 * Lustre is a trademark of Cluster File Systems, Inc.
10 * This file is confidential source code owned by Cluster File Systems.
11 * No viewing, modification, compilation, redistribution, or any other
12 * form of use is permitted except through a signed license agreement.
14 * If you have not signed such an agreement, then you have no rights to
15 * this file. Please destroy it immediately and contact CFS.
22 ptllnd_set_tx_deadline(ptllnd_tx_t *tx)
24 ptllnd_peer_t *peer = tx->tx_peer;
25 lnet_ni_t *ni = peer->plp_ni;
26 ptllnd_ni_t *plni = ni->ni_data;
28 tx->tx_deadline = cfs_time_current_sec() + plni->plni_timeout;
32 ptllnd_post_tx(ptllnd_tx_t *tx)
34 ptllnd_peer_t *peer = tx->tx_peer;
36 ptllnd_set_tx_deadline(tx);
37 list_add_tail(&tx->tx_list, &peer->plp_txq);
38 ptllnd_check_sends(peer);
42 ptllnd_ptlid2str(ptl_process_id_t id)
44 static char strs[8][32];
47 char *str = strs[idx++];
49 if (idx >= sizeof(strs)/sizeof(strs[0]))
52 snprintf(str, sizeof(strs[0]), FMT_PTLID, id.pid, id.nid);
57 ptllnd_destroy_peer(ptllnd_peer_t *peer)
59 lnet_ni_t *ni = peer->plp_ni;
60 ptllnd_ni_t *plni = ni->ni_data;
61 int nmsg = peer->plp_lazy_credits +
62 plni->plni_peer_credits;
64 ptllnd_size_buffers(ni, -nmsg);
66 LASSERT (peer->plp_closing);
67 LASSERT (plni->plni_npeers > 0);
68 LASSERT (list_empty(&peer->plp_txq));
69 LASSERT (list_empty(&peer->plp_activeq));
71 LIBCFS_FREE(peer, sizeof(*peer));
75 ptllnd_abort_txs(ptllnd_ni_t *plni, struct list_head *q)
77 while (!list_empty(q)) {
78 ptllnd_tx_t *tx = list_entry(q->next, ptllnd_tx_t, tx_list);
80 tx->tx_status = -ESHUTDOWN;
81 list_del(&tx->tx_list);
82 list_add_tail(&tx->tx_list, &plni->plni_zombie_txs);
87 ptllnd_close_peer(ptllnd_peer_t *peer, int error)
89 lnet_ni_t *ni = peer->plp_ni;
90 ptllnd_ni_t *plni = ni->ni_data;
92 if (peer->plp_closing)
95 peer->plp_closing = 1;
97 if (!list_empty(&peer->plp_txq) ||
98 !list_empty(&peer->plp_activeq) ||
100 CWARN("Closing %s\n", libcfs_id2str(peer->plp_id));
101 if (plni->plni_debug)
102 ptllnd_dump_debug(ni, peer->plp_id);
105 ptllnd_abort_txs(plni, &peer->plp_txq);
106 ptllnd_abort_txs(plni, &peer->plp_activeq);
108 list_del(&peer->plp_list);
109 ptllnd_peer_decref(peer);
113 ptllnd_find_peer(lnet_ni_t *ni, lnet_process_id_t id, int create)
115 ptllnd_ni_t *plni = ni->ni_data;
116 unsigned int hash = LNET_NIDADDR(id.nid) % plni->plni_peer_hash_size;
117 struct list_head *tmp;
122 LASSERT (LNET_NIDNET(id.nid) == LNET_NIDNET(ni->ni_nid));
124 list_for_each(tmp, &plni->plni_peer_hash[hash]) {
125 plp = list_entry(tmp, ptllnd_peer_t, plp_list);
127 if (plp->plp_id.nid == id.nid &&
128 plp->plp_id.pid == id.pid) {
129 ptllnd_peer_addref(plp);
137 /* New peer: check first for enough posted buffers */
139 rc = ptllnd_size_buffers(ni, plni->plni_peer_credits);
145 LIBCFS_ALLOC(plp, sizeof(*plp));
147 CERROR("Can't allocate new peer %s\n", libcfs_id2str(id));
149 ptllnd_size_buffers(ni, -plni->plni_peer_credits);
155 plp->plp_ptlid.nid = LNET_NIDADDR(id.nid);
156 plp->plp_ptlid.pid = plni->plni_ptllnd_pid;
157 plp->plp_credits = 1; /* add more later when she gives me credits */
158 plp->plp_max_msg_size = plni->plni_max_msg_size; /* until I hear from her */
159 plp->plp_sent_credits = 1; /* Implicit credit for HELLO */
160 plp->plp_outstanding_credits = plni->plni_peer_credits - 1;
161 plp->plp_lazy_credits = 0;
162 plp->plp_extra_lazy_credits = 0;
165 plp->plp_recvd_hello = 0;
166 plp->plp_closing = 0;
167 plp->plp_refcount = 1;
168 CFS_INIT_LIST_HEAD(&plp->plp_list);
169 CFS_INIT_LIST_HEAD(&plp->plp_txq);
170 CFS_INIT_LIST_HEAD(&plp->plp_activeq);
172 ptllnd_peer_addref(plp);
173 list_add_tail(&plp->plp_list, &plni->plni_peer_hash[hash]);
175 tx = ptllnd_new_tx(plp, PTLLND_MSG_TYPE_HELLO, 0);
177 CERROR("Can't send HELLO to %s\n", libcfs_id2str(id));
178 ptllnd_close_peer(plp, -ENOMEM);
179 ptllnd_peer_decref(plp);
183 tx->tx_msg.ptlm_u.hello.kptlhm_matchbits = PTL_RESERVED_MATCHBITS;
184 tx->tx_msg.ptlm_u.hello.kptlhm_max_msg_size = plni->plni_max_msg_size;
186 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: post hello %p", libcfs_id2str(id),
187 tx->tx_peer->plp_credits,
188 tx->tx_peer->plp_outstanding_credits,
189 tx->tx_peer->plp_sent_credits,
190 plni->plni_peer_credits +
191 tx->tx_peer->plp_lazy_credits, tx);
198 ptllnd_count_q(struct list_head *q)
203 list_for_each(e, q) {
211 ptllnd_tx_typestr(int type)
214 case PTLLND_RDMA_WRITE:
217 case PTLLND_RDMA_READ:
220 case PTLLND_MSG_TYPE_PUT:
223 case PTLLND_MSG_TYPE_GET:
226 case PTLLND_MSG_TYPE_IMMEDIATE:
229 case PTLLND_MSG_TYPE_NOOP:
232 case PTLLND_MSG_TYPE_HELLO:
241 ptllnd_debug_tx(ptllnd_tx_t *tx)
243 CDEBUG(D_WARNING, "%s %s b %ld.%06ld/%ld.%06ld"
244 " r %ld.%06ld/%ld.%06ld status %d\n",
245 ptllnd_tx_typestr(tx->tx_type),
246 libcfs_id2str(tx->tx_peer->plp_id),
247 tx->tx_bulk_posted.tv_sec, tx->tx_bulk_posted.tv_usec,
248 tx->tx_bulk_done.tv_sec, tx->tx_bulk_done.tv_usec,
249 tx->tx_req_posted.tv_sec, tx->tx_req_posted.tv_usec,
250 tx->tx_req_done.tv_sec, tx->tx_req_done.tv_usec,
255 ptllnd_debug_peer(lnet_ni_t *ni, lnet_process_id_t id)
257 ptllnd_peer_t *plp = ptllnd_find_peer(ni, id, 0);
258 struct list_head *tmp;
259 ptllnd_ni_t *plni = ni->ni_data;
263 CDEBUG(D_WARNING, "No peer %s\n", libcfs_id2str(id));
267 CDEBUG(D_WARNING, "%s %s%s [%d] "LPU64".%06d m "LPU64" q %d/%d c %d/%d+%d(%d)\n",
269 plp->plp_recvd_hello ? "H" : "_",
270 plp->plp_closing ? "C" : "_",
272 plp->plp_stamp / 1000000, (int)(plp->plp_stamp % 1000000),
274 ptllnd_count_q(&plp->plp_txq),
275 ptllnd_count_q(&plp->plp_activeq),
276 plp->plp_credits, plp->plp_outstanding_credits, plp->plp_sent_credits,
277 plni->plni_peer_credits + plp->plp_lazy_credits);
279 CDEBUG(D_WARNING, "txq:\n");
280 list_for_each (tmp, &plp->plp_txq) {
281 tx = list_entry(tmp, ptllnd_tx_t, tx_list);
286 CDEBUG(D_WARNING, "activeq:\n");
287 list_for_each (tmp, &plp->plp_activeq) {
288 tx = list_entry(tmp, ptllnd_tx_t, tx_list);
293 CDEBUG(D_WARNING, "zombies:\n");
294 list_for_each (tmp, &plni->plni_zombie_txs) {
295 tx = list_entry(tmp, ptllnd_tx_t, tx_list);
297 if (tx->tx_peer->plp_id.nid == id.nid &&
298 tx->tx_peer->plp_id.pid == id.pid)
302 CDEBUG(D_WARNING, "history:\n");
303 list_for_each (tmp, &plni->plni_tx_history) {
304 tx = list_entry(tmp, ptllnd_tx_t, tx_list);
306 if (tx->tx_peer->plp_id.nid == id.nid &&
307 tx->tx_peer->plp_id.pid == id.pid)
311 ptllnd_peer_decref(plp);
315 ptllnd_dump_debug(lnet_ni_t *ni, lnet_process_id_t id)
317 ptllnd_debug_peer(ni, id);
318 ptllnd_dump_history();
322 ptllnd_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive)
324 lnet_process_id_t id;
326 time_t start = cfs_time_current_sec();
327 ptllnd_ni_t *plni = ni->ni_data;
328 int w = plni->plni_long_wait;
330 /* This is only actually used to connect to routers at startup! */
334 id.pid = LUSTRE_SRV_LNET_PID;
336 peer = ptllnd_find_peer(ni, id, 1);
340 /* wait for the peer to reply */
341 while (!peer->plp_recvd_hello) {
342 if (w > 0 && cfs_time_current_sec() > start + w/1000) {
343 CWARN("Waited %ds to connect to %s\n",
344 (int)(cfs_time_current_sec() - start),
352 ptllnd_peer_decref(peer);
356 ptllnd_setasync(lnet_ni_t *ni, lnet_process_id_t id, int nasync)
358 ptllnd_peer_t *peer = ptllnd_find_peer(ni, id, nasync > 0);
364 LASSERT (peer->plp_lazy_credits >= 0);
365 LASSERT (peer->plp_extra_lazy_credits >= 0);
367 /* If nasync < 0, we're being told we can reduce the total message
368 * headroom. We can't do this right now because our peer might already
369 * have credits for the extra buffers, so we just account the extra
370 * headroom in case we need it later and only destroy buffers when the
373 * Note that the following condition handles this case, where it
374 * actually increases the extra lazy credit counter. */
376 if (nasync <= peer->plp_extra_lazy_credits) {
377 peer->plp_extra_lazy_credits -= nasync;
381 LASSERT (nasync > 0);
383 nasync -= peer->plp_extra_lazy_credits;
384 peer->plp_extra_lazy_credits = 0;
386 rc = ptllnd_size_buffers(ni, nasync);
388 peer->plp_lazy_credits += nasync;
389 peer->plp_outstanding_credits += nasync;
396 ptllnd_cksum (void *ptr, int nob)
402 sum = ((sum << 1) | (sum >> 31)) + *c++;
404 /* ensure I don't return 0 (== no checksum) */
405 return (sum == 0) ? 1 : sum;
409 ptllnd_new_tx(ptllnd_peer_t *peer, int type, int payload_nob)
411 lnet_ni_t *ni = peer->plp_ni;
412 ptllnd_ni_t *plni = ni->ni_data;
416 CDEBUG(D_NET, "peer=%p type=%d payload=%d\n", peer, type, payload_nob);
422 case PTLLND_RDMA_WRITE:
423 case PTLLND_RDMA_READ:
424 LASSERT (payload_nob == 0);
428 case PTLLND_MSG_TYPE_PUT:
429 case PTLLND_MSG_TYPE_GET:
430 LASSERT (payload_nob == 0);
431 msgsize = offsetof(kptl_msg_t, ptlm_u) +
432 sizeof(kptl_rdma_msg_t);
435 case PTLLND_MSG_TYPE_IMMEDIATE:
436 msgsize = offsetof(kptl_msg_t,
437 ptlm_u.immediate.kptlim_payload[payload_nob]);
440 case PTLLND_MSG_TYPE_NOOP:
441 LASSERT (payload_nob == 0);
442 msgsize = offsetof(kptl_msg_t, ptlm_u);
445 case PTLLND_MSG_TYPE_HELLO:
446 LASSERT (payload_nob == 0);
447 msgsize = offsetof(kptl_msg_t, ptlm_u) +
448 sizeof(kptl_hello_msg_t);
452 msgsize = (msgsize + 7) & ~7;
453 LASSERT (msgsize <= peer->plp_max_msg_size);
455 LIBCFS_ALLOC(tx, offsetof(ptllnd_tx_t, tx_msg) + msgsize);
458 CERROR("Can't allocate msg type %d for %s\n",
459 type, libcfs_id2str(peer->plp_id));
463 CFS_INIT_LIST_HEAD(&tx->tx_list);
466 tx->tx_lnetmsg = tx->tx_lnetreplymsg = NULL;
469 tx->tx_reqmdh = PTL_INVALID_HANDLE;
470 tx->tx_bulkmdh = PTL_INVALID_HANDLE;
471 tx->tx_msgsize = msgsize;
472 tx->tx_completing = 0;
475 memset(&tx->tx_bulk_posted, 0, sizeof(tx->tx_bulk_posted));
476 memset(&tx->tx_bulk_done, 0, sizeof(tx->tx_bulk_done));
477 memset(&tx->tx_req_posted, 0, sizeof(tx->tx_req_posted));
478 memset(&tx->tx_req_done, 0, sizeof(tx->tx_req_done));
481 tx->tx_msg.ptlm_magic = PTLLND_MSG_MAGIC;
482 tx->tx_msg.ptlm_version = PTLLND_MSG_VERSION;
483 tx->tx_msg.ptlm_type = type;
484 tx->tx_msg.ptlm_credits = 0;
485 tx->tx_msg.ptlm_nob = msgsize;
486 tx->tx_msg.ptlm_cksum = 0;
487 tx->tx_msg.ptlm_srcnid = ni->ni_nid;
488 tx->tx_msg.ptlm_srcstamp = plni->plni_stamp;
489 tx->tx_msg.ptlm_dstnid = peer->plp_id.nid;
490 tx->tx_msg.ptlm_dststamp = peer->plp_stamp;
491 tx->tx_msg.ptlm_srcpid = the_lnet.ln_pid;
492 tx->tx_msg.ptlm_dstpid = peer->plp_id.pid;
495 ptllnd_peer_addref(peer);
498 CDEBUG(D_NET, "tx=%p\n",tx);
504 ptllnd_abort_tx(ptllnd_tx_t *tx, ptl_handle_md_t *mdh)
506 ptllnd_peer_t *peer = tx->tx_peer;
507 lnet_ni_t *ni = peer->plp_ni;
509 time_t start = cfs_time_current_sec();
510 ptllnd_ni_t *plni = ni->ni_data;
511 int w = plni->plni_long_wait;
513 while (!PtlHandleIsEqual(*mdh, PTL_INVALID_HANDLE)) {
514 rc = PtlMDUnlink(*mdh);
515 #ifndef LUSTRE_PORTALS_UNLINK_SEMANTICS
516 if (rc == PTL_OK) /* unlink successful => no unlinked event */
518 LASSERT (rc == PTL_MD_IN_USE);
520 if (w > 0 && cfs_time_current_sec() > start + w/1000) {
521 CWARN("Waited %ds to abort tx to %s\n",
522 (int)(cfs_time_current_sec() - start),
523 libcfs_id2str(peer->plp_id));
526 /* Wait for ptllnd_tx_event() to invalidate */
532 ptllnd_cull_tx_history(ptllnd_ni_t *plni)
534 int max = plni->plni_max_tx_history;
536 while (plni->plni_ntx_history > max) {
537 ptllnd_tx_t *tx = list_entry(plni->plni_tx_history.next,
538 ptllnd_tx_t, tx_list);
539 list_del(&tx->tx_list);
541 ptllnd_peer_decref(tx->tx_peer);
543 LIBCFS_FREE(tx, offsetof(ptllnd_tx_t, tx_msg) + tx->tx_msgsize);
545 LASSERT (plni->plni_ntxs > 0);
547 plni->plni_ntx_history--;
552 ptllnd_tx_done(ptllnd_tx_t *tx)
554 ptllnd_peer_t *peer = tx->tx_peer;
555 lnet_ni_t *ni = peer->plp_ni;
556 ptllnd_ni_t *plni = ni->ni_data;
558 /* CAVEAT EMPTOR: If this tx is being aborted, I'll continue to get
559 * events for this tx until it's unlinked. So I set tx_completing to
560 * flag the tx is getting handled */
562 if (tx->tx_completing)
565 tx->tx_completing = 1;
567 if (!list_empty(&tx->tx_list))
568 list_del_init(&tx->tx_list);
570 if (tx->tx_status != 0) {
571 if (plni->plni_debug) {
572 CERROR("Completing tx for %s with error %d\n",
573 libcfs_id2str(peer->plp_id), tx->tx_status);
576 ptllnd_close_peer(peer, tx->tx_status);
579 ptllnd_abort_tx(tx, &tx->tx_reqmdh);
580 ptllnd_abort_tx(tx, &tx->tx_bulkmdh);
582 if (tx->tx_niov > 0) {
583 LIBCFS_FREE(tx->tx_iov, tx->tx_niov * sizeof(*tx->tx_iov));
587 if (tx->tx_lnetreplymsg != NULL) {
588 LASSERT (tx->tx_type == PTLLND_MSG_TYPE_GET);
589 LASSERT (tx->tx_lnetmsg != NULL);
590 /* Simulate GET success always */
591 lnet_finalize(ni, tx->tx_lnetmsg, 0);
592 CDEBUG(D_NET, "lnet_finalize(tx_lnetreplymsg=%p)\n",tx->tx_lnetreplymsg);
593 lnet_finalize(ni, tx->tx_lnetreplymsg, tx->tx_status);
594 } else if (tx->tx_lnetmsg != NULL) {
595 lnet_finalize(ni, tx->tx_lnetmsg, tx->tx_status);
598 plni->plni_ntx_history++;
599 list_add_tail(&tx->tx_list, &plni->plni_tx_history);
601 ptllnd_cull_tx_history(plni);
605 ptllnd_set_txiov(ptllnd_tx_t *tx,
606 unsigned int niov, struct iovec *iov,
607 unsigned int offset, unsigned int len)
609 ptl_md_iovec_t *piov;
618 * Remove iovec's at the beginning that
619 * are skipped because of the offset.
620 * Adjust the offset accordingly
624 if (offset < iov->iov_len)
626 offset -= iov->iov_len;
632 int temp_offset = offset;
634 LIBCFS_ALLOC(piov, niov * sizeof(*piov));
638 for (npiov = 0;; npiov++) {
639 LASSERT (npiov < niov);
640 LASSERT (iov->iov_len >= temp_offset);
642 piov[npiov].iov_base = iov[npiov].iov_base + temp_offset;
643 piov[npiov].iov_len = iov[npiov].iov_len - temp_offset;
645 if (piov[npiov].iov_len >= resid) {
646 piov[npiov].iov_len = resid;
650 resid -= piov[npiov].iov_len;
660 /* Dang! The piov I allocated was too big and it's a drag to
661 * have to maintain separate 'allocated' and 'used' sizes, so
662 * I'll just do it again; NB this doesn't happen normally... */
663 LIBCFS_FREE(piov, niov * sizeof(*piov));
669 ptllnd_set_md_buffer(ptl_md_t *md, ptllnd_tx_t *tx)
671 unsigned int niov = tx->tx_niov;
672 ptl_md_iovec_t *iov = tx->tx_iov;
674 LASSERT ((md->options & PTL_MD_IOVEC) == 0);
679 } else if (niov == 1) {
680 md->start = iov[0].iov_base;
681 md->length = iov[0].iov_len;
685 md->options |= PTL_MD_IOVEC;
690 ptllnd_post_buffer(ptllnd_buffer_t *buf)
692 lnet_ni_t *ni = buf->plb_ni;
693 ptllnd_ni_t *plni = ni->ni_data;
694 ptl_process_id_t anyid = {
698 .start = buf->plb_buffer,
699 .length = plni->plni_buffer_size,
700 .threshold = PTL_MD_THRESH_INF,
701 .max_size = plni->plni_max_msg_size,
702 .options = (PTLLND_MD_OPTIONS |
703 PTL_MD_OP_PUT | PTL_MD_MAX_SIZE |
704 PTL_MD_LOCAL_ALIGN8),
705 .user_ptr = ptllnd_obj2eventarg(buf, PTLLND_EVENTARG_TYPE_BUF),
706 .eq_handle = plni->plni_eqh};
710 LASSERT (!buf->plb_posted);
712 rc = PtlMEAttach(plni->plni_nih, plni->plni_portal,
713 anyid, LNET_MSG_MATCHBITS, 0,
714 PTL_UNLINK, PTL_INS_AFTER, &meh);
716 CERROR("PtlMEAttach failed: %d\n", rc);
721 plni->plni_nposted_buffers++;
723 rc = PtlMDAttach(meh, md, LNET_UNLINK, &buf->plb_md);
727 CERROR("PtlMDAttach failed: %d\n", rc);
730 plni->plni_nposted_buffers--;
732 rc = PtlMEUnlink(meh);
733 LASSERT (rc == PTL_OK);
739 ptllnd_check_sends(ptllnd_peer_t *peer)
741 lnet_ni_t *ni = peer->plp_ni;
742 ptllnd_ni_t *plni = ni->ni_data;
748 CDEBUG(D_NET, "%s: [%d/%d+%d(%d)\n",
749 libcfs_id2str(peer->plp_id), peer->plp_credits,
750 peer->plp_outstanding_credits, peer->plp_sent_credits,
751 plni->plni_peer_credits + peer->plp_lazy_credits);
753 if (list_empty(&peer->plp_txq) &&
754 peer->plp_outstanding_credits >= PTLLND_CREDIT_HIGHWATER(plni) &&
755 peer->plp_credits != 0) {
757 tx = ptllnd_new_tx(peer, PTLLND_MSG_TYPE_NOOP, 0);
758 CDEBUG(D_NET, "NOOP tx=%p\n",tx);
760 CERROR("Can't return credits to %s\n",
761 libcfs_id2str(peer->plp_id));
763 list_add_tail(&tx->tx_list, &peer->plp_txq);
767 while (!list_empty(&peer->plp_txq)) {
768 tx = list_entry(peer->plp_txq.next, ptllnd_tx_t, tx_list);
770 LASSERT (tx->tx_msgsize > 0);
772 LASSERT (peer->plp_outstanding_credits >= 0);
773 LASSERT (peer->plp_sent_credits >= 0);
774 LASSERT (peer->plp_outstanding_credits + peer->plp_sent_credits
775 <= plni->plni_peer_credits + peer->plp_lazy_credits);
776 LASSERT (peer->plp_credits >= 0);
778 if (peer->plp_credits == 0) { /* no credits */
779 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: no creds for %p",
780 libcfs_id2str(peer->plp_id),
782 peer->plp_outstanding_credits,
783 peer->plp_sent_credits,
784 plni->plni_peer_credits +
785 peer->plp_lazy_credits, tx);
789 if (peer->plp_credits == 1 && /* last credit reserved for */
790 peer->plp_outstanding_credits == 0) { /* returning credits */
791 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: too few creds for %p",
792 libcfs_id2str(peer->plp_id),
794 peer->plp_outstanding_credits,
795 peer->plp_sent_credits,
796 plni->plni_peer_credits +
797 peer->plp_lazy_credits, tx);
801 list_del(&tx->tx_list);
802 list_add_tail(&tx->tx_list, &peer->plp_activeq);
804 CDEBUG(D_NET, "Sending at TX=%p type=%s (%d)\n",tx,
805 ptllnd_msgtype2str(tx->tx_type),tx->tx_type);
807 if (tx->tx_type == PTLLND_MSG_TYPE_NOOP &&
808 (!list_empty(&peer->plp_txq) ||
809 peer->plp_outstanding_credits <
810 PTLLND_CREDIT_HIGHWATER(plni))) {
816 /* Set stamp at the last minute; on a new peer, I don't know it
817 * until I receive the HELLO back */
818 tx->tx_msg.ptlm_dststamp = peer->plp_stamp;
821 * Return all the credits we have
823 tx->tx_msg.ptlm_credits = peer->plp_outstanding_credits;
824 peer->plp_sent_credits += peer->plp_outstanding_credits;
825 peer->plp_outstanding_credits = 0;
832 if (plni->plni_checksum)
833 tx->tx_msg.ptlm_cksum =
834 ptllnd_cksum(&tx->tx_msg,
835 offsetof(kptl_msg_t, ptlm_u));
837 md.user_ptr = ptllnd_obj2eventarg(tx, PTLLND_EVENTARG_TYPE_TX);
838 md.eq_handle = plni->plni_eqh;
840 md.options = PTLLND_MD_OPTIONS;
841 md.start = &tx->tx_msg;
842 md.length = tx->tx_msgsize;
844 rc = PtlMDBind(plni->plni_nih, md, LNET_UNLINK, &mdh);
846 CERROR("PtlMDBind for %s failed: %d\n",
847 libcfs_id2str(peer->plp_id), rc);
848 tx->tx_status = -EIO;
853 LASSERT (tx->tx_type != PTLLND_RDMA_WRITE &&
854 tx->tx_type != PTLLND_RDMA_READ);
857 gettimeofday(&tx->tx_req_posted, NULL);
859 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: %s %p c %d",
860 libcfs_id2str(peer->plp_id),
862 peer->plp_outstanding_credits,
863 peer->plp_sent_credits,
864 plni->plni_peer_credits +
865 peer->plp_lazy_credits,
866 ptllnd_msgtype2str(tx->tx_type), tx,
867 tx->tx_msg.ptlm_credits);
869 rc = PtlPut(mdh, PTL_NOACK_REQ, peer->plp_ptlid,
870 plni->plni_portal, 0, LNET_MSG_MATCHBITS, 0, 0);
872 CERROR("PtlPut for %s failed: %d\n",
873 libcfs_id2str(peer->plp_id), rc);
874 tx->tx_status = -EIO;
882 ptllnd_passive_rdma(ptllnd_peer_t *peer, int type, lnet_msg_t *msg,
883 unsigned int niov, struct iovec *iov,
884 unsigned int offset, unsigned int len)
886 lnet_ni_t *ni = peer->plp_ni;
887 ptllnd_ni_t *plni = ni->ni_data;
888 ptllnd_tx_t *tx = ptllnd_new_tx(peer, type, 0);
898 CDEBUG(D_NET, "niov=%d offset=%d len=%d\n",niov,offset,len);
900 LASSERT (type == PTLLND_MSG_TYPE_GET ||
901 type == PTLLND_MSG_TYPE_PUT);
904 CERROR("Can't allocate %s tx for %s\n",
905 type == PTLLND_MSG_TYPE_GET ? "GET" : "PUT/REPLY",
906 libcfs_id2str(peer->plp_id));
910 rc = ptllnd_set_txiov(tx, niov, iov, offset, len);
912 CERROR ("Can't allocate iov %d for %s\n",
913 niov, libcfs_id2str(peer->plp_id));
918 md.user_ptr = ptllnd_obj2eventarg(tx, PTLLND_EVENTARG_TYPE_TX);
919 md.eq_handle = plni->plni_eqh;
922 md.options = PTLLND_MD_OPTIONS;
923 if(type == PTLLND_MSG_TYPE_GET)
924 md.options |= PTL_MD_OP_PUT | PTL_MD_ACK_DISABLE;
926 md.options |= PTL_MD_OP_GET;
927 ptllnd_set_md_buffer(&md, tx);
929 start = cfs_time_current_sec();
930 w = plni->plni_long_wait;
932 while (!peer->plp_recvd_hello) { /* wait to validate plp_match */
933 if (peer->plp_closing) {
937 if (w > 0 && cfs_time_current_sec() > start + w/1000) {
938 CWARN("Waited %ds to connect to %s\n",
939 (int)(cfs_time_current_sec() - start),
940 libcfs_id2str(peer->plp_id));
946 if (peer->plp_match < PTL_RESERVED_MATCHBITS)
947 peer->plp_match = PTL_RESERVED_MATCHBITS;
948 matchbits = peer->plp_match++;
950 rc = PtlMEAttach(plni->plni_nih, plni->plni_portal, peer->plp_ptlid,
951 matchbits, 0, PTL_UNLINK, PTL_INS_BEFORE, &meh);
953 CERROR("PtlMEAttach for %s failed: %d\n",
954 libcfs_id2str(peer->plp_id), rc);
959 gettimeofday(&tx->tx_bulk_posted, NULL);
961 rc = PtlMDAttach(meh, md, LNET_UNLINK, &mdh);
963 CERROR("PtlMDAttach for %s failed: %d\n",
964 libcfs_id2str(peer->plp_id), rc);
965 rc2 = PtlMEUnlink(meh);
966 LASSERT (rc2 == PTL_OK);
970 tx->tx_bulkmdh = mdh;
973 * We need to set the stamp here because it
974 * we could have received a HELLO above that set
977 tx->tx_msg.ptlm_dststamp = peer->plp_stamp;
979 tx->tx_msg.ptlm_u.rdma.kptlrm_hdr = msg->msg_hdr;
980 tx->tx_msg.ptlm_u.rdma.kptlrm_matchbits = matchbits;
982 if (type == PTLLND_MSG_TYPE_GET) {
983 tx->tx_lnetreplymsg = lnet_create_reply_msg(ni, msg);
984 if (tx->tx_lnetreplymsg == NULL) {
985 CERROR("Can't create reply for GET to %s\n",
986 libcfs_id2str(msg->msg_target));
992 tx->tx_lnetmsg = msg;
993 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: post passive %s p %d %p",
994 libcfs_id2str(msg->msg_target),
995 peer->plp_credits, peer->plp_outstanding_credits,
996 peer->plp_sent_credits,
997 plni->plni_peer_credits + peer->plp_lazy_credits,
998 lnet_msgtyp2str(msg->msg_type),
999 (le32_to_cpu(msg->msg_type) == LNET_MSG_PUT) ?
1000 le32_to_cpu(msg->msg_hdr.msg.put.ptl_index) :
1001 (le32_to_cpu(msg->msg_type) == LNET_MSG_GET) ?
1002 le32_to_cpu(msg->msg_hdr.msg.get.ptl_index) : -1,
1013 ptllnd_active_rdma(ptllnd_peer_t *peer, int type,
1014 lnet_msg_t *msg, __u64 matchbits,
1015 unsigned int niov, struct iovec *iov,
1016 unsigned int offset, unsigned int len)
1018 lnet_ni_t *ni = peer->plp_ni;
1019 ptllnd_ni_t *plni = ni->ni_data;
1020 ptllnd_tx_t *tx = ptllnd_new_tx(peer, type, 0);
1022 ptl_handle_md_t mdh;
1025 LASSERT (type == PTLLND_RDMA_READ ||
1026 type == PTLLND_RDMA_WRITE);
1029 CERROR("Can't allocate tx for RDMA %s with %s\n",
1030 (type == PTLLND_RDMA_WRITE) ? "write" : "read",
1031 libcfs_id2str(peer->plp_id));
1032 ptllnd_close_peer(peer, -ENOMEM);
1036 rc = ptllnd_set_txiov(tx, niov, iov, offset, len);
1038 CERROR ("Can't allocate iov %d for %s\n",
1039 niov, libcfs_id2str(peer->plp_id));
1044 md.user_ptr = ptllnd_obj2eventarg(tx, PTLLND_EVENTARG_TYPE_TX);
1045 md.eq_handle = plni->plni_eqh;
1047 md.options = PTLLND_MD_OPTIONS;
1048 md.threshold = (type == PTLLND_RDMA_READ) ? 2 : 1;
1050 ptllnd_set_md_buffer(&md, tx);
1052 rc = PtlMDBind(plni->plni_nih, md, LNET_UNLINK, &mdh);
1054 CERROR("PtlMDBind for %s failed: %d\n",
1055 libcfs_id2str(peer->plp_id), rc);
1060 tx->tx_bulkmdh = mdh;
1061 tx->tx_lnetmsg = msg;
1063 ptllnd_set_tx_deadline(tx);
1064 list_add_tail(&tx->tx_list, &peer->plp_activeq);
1065 gettimeofday(&tx->tx_bulk_posted, NULL);
1067 if (type == PTLLND_RDMA_READ)
1068 rc = PtlGet(mdh, peer->plp_ptlid,
1069 plni->plni_portal, 0, matchbits, 0);
1071 rc = PtlPut(mdh, PTL_NOACK_REQ, peer->plp_ptlid,
1072 plni->plni_portal, 0, matchbits, 0,
1073 (msg == NULL) ? PTLLND_RDMA_FAIL : PTLLND_RDMA_OK);
1078 CERROR("Can't initiate RDMA with %s: %d\n",
1079 libcfs_id2str(peer->plp_id), rc);
1081 tx->tx_lnetmsg = NULL;
1084 ptllnd_tx_done(tx); /* this will close peer */
1089 ptllnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *msg)
1091 ptllnd_ni_t *plni = ni->ni_data;
1097 LASSERT (!msg->msg_routing);
1098 LASSERT (msg->msg_kiov == NULL);
1100 LASSERT (msg->msg_niov <= PTL_MD_MAX_IOV); /* !!! */
1102 CDEBUG(D_NET, "%s [%d]+%d,%d -> %s%s\n",
1103 lnet_msgtyp2str(msg->msg_type),
1104 msg->msg_niov, msg->msg_offset, msg->msg_len,
1105 libcfs_nid2str(msg->msg_target.nid),
1106 msg->msg_target_is_router ? "(rtr)" : "");
1108 if ((msg->msg_target.pid & LNET_PID_USERFLAG) != 0) {
1109 CERROR("Can't send to non-kernel peer %s\n",
1110 libcfs_id2str(msg->msg_target));
1111 return -EHOSTUNREACH;
1114 plp = ptllnd_find_peer(ni, msg->msg_target, 1);
1118 switch (msg->msg_type) {
1123 LASSERT (msg->msg_len == 0);
1124 break; /* send IMMEDIATE */
1127 if (msg->msg_target_is_router)
1128 break; /* send IMMEDIATE */
1130 nob = msg->msg_md->md_length;
1131 nob = offsetof(kptl_msg_t, ptlm_u.immediate.kptlim_payload[nob]);
1132 if (nob <= plni->plni_max_msg_size)
1135 LASSERT ((msg->msg_md->md_options & LNET_MD_KIOV) == 0);
1136 rc = ptllnd_passive_rdma(plp, PTLLND_MSG_TYPE_GET, msg,
1137 msg->msg_md->md_niov,
1138 msg->msg_md->md_iov.iov,
1139 0, msg->msg_md->md_length);
1140 ptllnd_peer_decref(plp);
1143 case LNET_MSG_REPLY:
1146 nob = offsetof(kptl_msg_t, ptlm_u.immediate.kptlim_payload[nob]);
1147 if (nob <= plp->plp_max_msg_size)
1148 break; /* send IMMEDIATE */
1150 rc = ptllnd_passive_rdma(plp, PTLLND_MSG_TYPE_PUT, msg,
1151 msg->msg_niov, msg->msg_iov,
1152 msg->msg_offset, msg->msg_len);
1153 ptllnd_peer_decref(plp);
1158 * NB copy the payload so we don't have to do a fragmented send */
1160 tx = ptllnd_new_tx(plp, PTLLND_MSG_TYPE_IMMEDIATE, msg->msg_len);
1162 CERROR("Can't allocate tx for lnet type %d to %s\n",
1163 msg->msg_type, libcfs_id2str(msg->msg_target));
1164 ptllnd_peer_decref(plp);
1168 lnet_copy_iov2flat(tx->tx_msgsize, &tx->tx_msg,
1169 offsetof(kptl_msg_t, ptlm_u.immediate.kptlim_payload),
1170 msg->msg_niov, msg->msg_iov, msg->msg_offset,
1172 tx->tx_msg.ptlm_u.immediate.kptlim_hdr = msg->msg_hdr;
1174 tx->tx_lnetmsg = msg;
1175 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: post immediate %s p %d %p",
1176 libcfs_id2str(msg->msg_target),
1177 plp->plp_credits, plp->plp_outstanding_credits,
1178 plp->plp_sent_credits,
1179 plni->plni_peer_credits + plp->plp_lazy_credits,
1180 lnet_msgtyp2str(msg->msg_type),
1181 (le32_to_cpu(msg->msg_type) == LNET_MSG_PUT) ?
1182 le32_to_cpu(msg->msg_hdr.msg.put.ptl_index) :
1183 (le32_to_cpu(msg->msg_type) == LNET_MSG_GET) ?
1184 le32_to_cpu(msg->msg_hdr.msg.get.ptl_index) : -1,
1187 ptllnd_peer_decref(plp);
1192 ptllnd_rx_done(ptllnd_rx_t *rx)
1194 ptllnd_peer_t *plp = rx->rx_peer;
1195 lnet_ni_t *ni = plp->plp_ni;
1196 ptllnd_ni_t *plni = ni->ni_data;
1198 plp->plp_outstanding_credits++;
1200 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: rx=%p done\n",
1201 libcfs_id2str(plp->plp_id),
1202 plp->plp_credits, plp->plp_outstanding_credits,
1203 plp->plp_sent_credits,
1204 plni->plni_peer_credits + plp->plp_lazy_credits, rx);
1206 ptllnd_check_sends(rx->rx_peer);
1208 LASSERT (plni->plni_nrxs > 0);
1213 ptllnd_eager_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg,
1214 void **new_privatep)
1216 /* Shouldn't get here; recvs only block for router buffers */
1222 ptllnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg,
1223 int delayed, unsigned int niov,
1224 struct iovec *iov, lnet_kiov_t *kiov,
1225 unsigned int offset, unsigned int mlen, unsigned int rlen)
1227 ptllnd_rx_t *rx = private;
1231 LASSERT (kiov == NULL);
1232 LASSERT (niov <= PTL_MD_MAX_IOV); /* !!! */
1234 switch (rx->rx_msg->ptlm_type) {
1238 case PTLLND_MSG_TYPE_IMMEDIATE:
1239 nob = offsetof(kptl_msg_t, ptlm_u.immediate.kptlim_payload[mlen]);
1240 if (nob > rx->rx_nob) {
1241 CERROR("Immediate message from %s too big: %d(%d)\n",
1242 libcfs_id2str(rx->rx_peer->plp_id),
1247 lnet_copy_flat2iov(niov, iov, offset,
1248 rx->rx_nob, rx->rx_msg,
1249 offsetof(kptl_msg_t, ptlm_u.immediate.kptlim_payload),
1251 lnet_finalize(ni, msg, 0);
1254 case PTLLND_MSG_TYPE_PUT:
1255 rc = ptllnd_active_rdma(rx->rx_peer, PTLLND_RDMA_READ, msg,
1256 rx->rx_msg->ptlm_u.rdma.kptlrm_matchbits,
1257 niov, iov, offset, mlen);
1260 case PTLLND_MSG_TYPE_GET:
1262 rc = ptllnd_active_rdma(rx->rx_peer, PTLLND_RDMA_WRITE, msg,
1263 rx->rx_msg->ptlm_u.rdma.kptlrm_matchbits,
1264 msg->msg_niov, msg->msg_iov,
1265 msg->msg_offset, msg->msg_len);
1267 rc = ptllnd_active_rdma(rx->rx_peer, PTLLND_RDMA_WRITE, NULL,
1268 rx->rx_msg->ptlm_u.rdma.kptlrm_matchbits,
1278 ptllnd_parse_request(lnet_ni_t *ni, ptl_process_id_t initiator,
1279 kptl_msg_t *msg, unsigned int nob)
1281 ptllnd_ni_t *plni = ni->ni_data;
1282 const int basenob = offsetof(kptl_msg_t, ptlm_u);
1283 lnet_process_id_t srcid;
1292 CERROR("Very short receive from %s\n",
1293 ptllnd_ptlid2str(initiator));
1297 /* I can at least read MAGIC/VERSION */
1299 flip = msg->ptlm_magic == __swab32(PTLLND_MSG_MAGIC);
1300 if (!flip && msg->ptlm_magic != PTLLND_MSG_MAGIC) {
1301 CERROR("Bad protocol magic %08x from %s\n",
1302 msg->ptlm_magic, ptllnd_ptlid2str(initiator));
1306 msg_version = flip ? __swab16(msg->ptlm_version) : msg->ptlm_version;
1308 if (msg_version != PTLLND_MSG_VERSION) {
1309 CERROR("Bad protocol version %04x from %s: %04x expected\n",
1310 (__u32)msg_version, ptllnd_ptlid2str(initiator), PTLLND_MSG_VERSION);
1312 if (plni->plni_abort_on_protocol_mismatch)
1318 if (nob < basenob) {
1319 CERROR("Short receive from %s: got %d, wanted at least %d\n",
1320 ptllnd_ptlid2str(initiator), nob, basenob);
1324 /* checksum must be computed with
1325 * 1) ptlm_cksum zero and
1326 * 2) BEFORE anything gets modified/flipped
1328 msg_cksum = flip ? __swab32(msg->ptlm_cksum) : msg->ptlm_cksum;
1329 msg->ptlm_cksum = 0;
1330 if (msg_cksum != 0 &&
1331 msg_cksum != ptllnd_cksum(msg, offsetof(kptl_msg_t, ptlm_u))) {
1332 CERROR("Bad checksum from %s\n", ptllnd_ptlid2str(initiator));
1336 msg->ptlm_version = msg_version;
1337 msg->ptlm_cksum = msg_cksum;
1340 /* NB stamps are opaque cookies */
1341 __swab32s(&msg->ptlm_nob);
1342 __swab64s(&msg->ptlm_srcnid);
1343 __swab64s(&msg->ptlm_dstnid);
1344 __swab32s(&msg->ptlm_srcpid);
1345 __swab32s(&msg->ptlm_dstpid);
1348 srcid.nid = msg->ptlm_srcnid;
1349 srcid.pid = msg->ptlm_srcpid;
1351 if (LNET_NIDNET(msg->ptlm_srcnid) != LNET_NIDNET(ni->ni_nid)) {
1352 CERROR("Bad source id %s from %s\n",
1353 libcfs_id2str(srcid),
1354 ptllnd_ptlid2str(initiator));
1358 if (msg->ptlm_type == PTLLND_MSG_TYPE_NAK) {
1359 CERROR("NAK from %s (%s)\n",
1360 libcfs_id2str(srcid),
1361 ptllnd_ptlid2str(initiator));
1363 if (plni->plni_dump_on_nak)
1364 ptllnd_dump_debug(ni, srcid);
1366 if (plni->plni_abort_on_nak)
1372 if (msg->ptlm_dstnid != ni->ni_nid ||
1373 msg->ptlm_dstpid != the_lnet.ln_pid) {
1374 CERROR("Bad dstid %s (%s expected) from %s\n",
1375 libcfs_id2str((lnet_process_id_t) {
1376 .nid = msg->ptlm_dstnid,
1377 .pid = msg->ptlm_dstpid}),
1378 libcfs_id2str((lnet_process_id_t) {
1380 .pid = the_lnet.ln_pid}),
1381 libcfs_id2str(srcid));
1385 if (msg->ptlm_dststamp != plni->plni_stamp) {
1386 CERROR("Bad dststamp "LPX64"("LPX64" expected) from %s\n",
1387 msg->ptlm_dststamp, plni->plni_stamp,
1388 libcfs_id2str(srcid));
1392 PTLLND_HISTORY("RX %s: %s %d %p", libcfs_id2str(srcid),
1393 ptllnd_msgtype2str(msg->ptlm_type),
1394 msg->ptlm_credits, &rx);
1396 switch (msg->ptlm_type) {
1397 case PTLLND_MSG_TYPE_PUT:
1398 case PTLLND_MSG_TYPE_GET:
1399 if (nob < basenob + sizeof(kptl_rdma_msg_t)) {
1400 CERROR("Short rdma request from %s(%s)\n",
1401 libcfs_id2str(srcid),
1402 ptllnd_ptlid2str(initiator));
1406 __swab64s(&msg->ptlm_u.rdma.kptlrm_matchbits);
1409 case PTLLND_MSG_TYPE_IMMEDIATE:
1410 if (nob < offsetof(kptl_msg_t,
1411 ptlm_u.immediate.kptlim_payload)) {
1412 CERROR("Short immediate from %s(%s)\n",
1413 libcfs_id2str(srcid),
1414 ptllnd_ptlid2str(initiator));
1419 case PTLLND_MSG_TYPE_HELLO:
1420 if (nob < basenob + sizeof(kptl_hello_msg_t)) {
1421 CERROR("Short hello from %s(%s)\n",
1422 libcfs_id2str(srcid),
1423 ptllnd_ptlid2str(initiator));
1427 __swab64s(&msg->ptlm_u.hello.kptlhm_matchbits);
1428 __swab32s(&msg->ptlm_u.hello.kptlhm_max_msg_size);
1432 case PTLLND_MSG_TYPE_NOOP:
1436 CERROR("Bad message type %d from %s(%s)\n", msg->ptlm_type,
1437 libcfs_id2str(srcid),
1438 ptllnd_ptlid2str(initiator));
1442 plp = ptllnd_find_peer(ni, srcid, 0);
1444 CERROR("Can't find peer %s\n", libcfs_id2str(srcid));
1448 if (msg->ptlm_type == PTLLND_MSG_TYPE_HELLO) {
1449 if (plp->plp_recvd_hello) {
1450 CERROR("Unexpected HELLO from %s\n",
1451 libcfs_id2str(srcid));
1452 ptllnd_peer_decref(plp);
1456 plp->plp_max_msg_size = msg->ptlm_u.hello.kptlhm_max_msg_size;
1457 plp->plp_match = msg->ptlm_u.hello.kptlhm_matchbits;
1458 plp->plp_stamp = msg->ptlm_srcstamp;
1459 plp->plp_recvd_hello = 1;
1461 } else if (!plp->plp_recvd_hello) {
1463 CERROR("Bad message type %d (HELLO expected) from %s\n",
1464 msg->ptlm_type, libcfs_id2str(srcid));
1465 ptllnd_peer_decref(plp);
1468 } else if (msg->ptlm_srcstamp != plp->plp_stamp) {
1470 CERROR("Bad srcstamp "LPX64"("LPX64" expected) from %s\n",
1471 msg->ptlm_srcstamp, plp->plp_stamp,
1472 libcfs_id2str(srcid));
1473 ptllnd_peer_decref(plp);
1477 /* Check peer only sends when I've sent her credits */
1478 if (plp->plp_sent_credits == 0) {
1479 CERROR("%s[%d/%d+%d(%d)]: unexpected message\n",
1480 libcfs_id2str(plp->plp_id),
1481 plp->plp_credits, plp->plp_outstanding_credits,
1482 plp->plp_sent_credits,
1483 plni->plni_peer_credits + plp->plp_lazy_credits);
1486 plp->plp_sent_credits--;
1488 /* No check for credit overflow - the peer may post new buffers after
1489 * the startup handshake. */
1490 if (msg->ptlm_credits > 0) {
1491 plp->plp_credits += msg->ptlm_credits;
1492 ptllnd_check_sends(plp);
1495 /* All OK so far; assume the message is good... */
1502 switch (msg->ptlm_type) {
1503 default: /* message types have been checked already */
1504 ptllnd_rx_done(&rx);
1507 case PTLLND_MSG_TYPE_PUT:
1508 case PTLLND_MSG_TYPE_GET:
1509 rc = lnet_parse(ni, &msg->ptlm_u.rdma.kptlrm_hdr,
1510 msg->ptlm_srcnid, &rx, 1);
1512 ptllnd_rx_done(&rx);
1515 case PTLLND_MSG_TYPE_IMMEDIATE:
1516 rc = lnet_parse(ni, &msg->ptlm_u.immediate.kptlim_hdr,
1517 msg->ptlm_srcnid, &rx, 0);
1519 ptllnd_rx_done(&rx);
1523 ptllnd_peer_decref(plp);
1527 ptllnd_buf_event (lnet_ni_t *ni, ptl_event_t *event)
1529 ptllnd_buffer_t *buf = ptllnd_eventarg2obj(event->md.user_ptr);
1530 ptllnd_ni_t *plni = ni->ni_data;
1531 char *msg = &buf->plb_buffer[event->offset];
1533 int unlinked = event->type == PTL_EVENT_UNLINK;
1535 LASSERT (buf->plb_ni == ni);
1536 LASSERT (event->type == PTL_EVENT_PUT_END ||
1537 event->type == PTL_EVENT_UNLINK);
1539 if (event->ni_fail_type != PTL_NI_OK) {
1541 CERROR("event type %s(%d), status %s(%d) from %s\n",
1542 ptllnd_evtype2str(event->type), event->type,
1543 ptllnd_errtype2str(event->ni_fail_type),
1544 event->ni_fail_type,
1545 ptllnd_ptlid2str(event->initiator));
1547 } else if (event->type == PTL_EVENT_PUT_END) {
1548 #if (PTL_MD_LOCAL_ALIGN8 == 0)
1549 /* Portals can't force message alignment - someone sending an
1550 * odd-length message could misalign subsequent messages */
1551 if ((event->mlength & 7) != 0) {
1552 CERROR("Message from %s has odd length %llu: "
1553 "probable version incompatibility\n",
1554 ptllnd_ptlid2str(event->initiator),
1559 LASSERT ((event->offset & 7) == 0);
1561 ptllnd_parse_request(ni, event->initiator,
1562 (kptl_msg_t *)msg, event->mlength);
1565 #ifdef LUSTRE_PORTALS_UNLINK_SEMANTICS
1566 /* UNLINK event only on explicit unlink */
1567 repost = (event->unlinked && event->type != PTL_EVENT_UNLINK);
1568 if (event->unlinked)
1571 /* UNLINK event only on implicit unlink */
1572 repost = (event->type == PTL_EVENT_UNLINK);
1576 LASSERT(buf->plb_posted);
1577 buf->plb_posted = 0;
1578 plni->plni_nposted_buffers--;
1582 (void) ptllnd_post_buffer(buf);
1586 ptllnd_tx_event (lnet_ni_t *ni, ptl_event_t *event)
1588 ptllnd_ni_t *plni = ni->ni_data;
1589 ptllnd_tx_t *tx = ptllnd_eventarg2obj(event->md.user_ptr);
1590 int error = (event->ni_fail_type != PTL_NI_OK);
1593 #ifdef LUSTRE_PORTALS_UNLINK_SEMANTICS
1594 int unlinked = event->unlinked;
1596 int unlinked = (event->type == PTL_EVENT_UNLINK);
1600 CERROR("Error %s(%d) event %s(%d) unlinked %d, %s(%d) for %s\n",
1601 ptllnd_errtype2str(event->ni_fail_type),
1602 event->ni_fail_type,
1603 ptllnd_evtype2str(event->type), event->type,
1604 unlinked, ptllnd_msgtype2str(tx->tx_type), tx->tx_type,
1605 libcfs_id2str(tx->tx_peer->plp_id));
1607 LASSERT (!PtlHandleIsEqual(event->md_handle, PTL_INVALID_HANDLE));
1609 isreq = PtlHandleIsEqual(event->md_handle, tx->tx_reqmdh);
1611 LASSERT (event->md.start == (void *)&tx->tx_msg);
1613 tx->tx_reqmdh = PTL_INVALID_HANDLE;
1614 gettimeofday(&tx->tx_req_done, NULL);
1618 isbulk = PtlHandleIsEqual(event->md_handle, tx->tx_bulkmdh);
1619 if ( isbulk && unlinked ) {
1620 tx->tx_bulkmdh = PTL_INVALID_HANDLE;
1621 gettimeofday(&tx->tx_bulk_done, NULL);
1624 LASSERT (!isreq != !isbulk); /* always one and only 1 match */
1626 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: TX done %p %s%s",
1627 libcfs_id2str(tx->tx_peer->plp_id),
1628 tx->tx_peer->plp_credits,
1629 tx->tx_peer->plp_outstanding_credits,
1630 tx->tx_peer->plp_sent_credits,
1631 plni->plni_peer_credits + tx->tx_peer->plp_lazy_credits,
1632 tx, isreq ? "REQ" : "BULK", unlinked ? "(unlinked)" : "");
1634 LASSERT (!isreq != !isbulk); /* always one and only 1 match */
1635 switch (tx->tx_type) {
1639 case PTLLND_MSG_TYPE_NOOP:
1640 case PTLLND_MSG_TYPE_HELLO:
1641 case PTLLND_MSG_TYPE_IMMEDIATE:
1642 LASSERT (event->type == PTL_EVENT_UNLINK ||
1643 event->type == PTL_EVENT_SEND_END);
1647 case PTLLND_MSG_TYPE_GET:
1648 LASSERT (event->type == PTL_EVENT_UNLINK ||
1649 (isreq && event->type == PTL_EVENT_SEND_END) ||
1650 (isbulk && event->type == PTL_EVENT_PUT_END));
1652 if (isbulk && !error && event->type == PTL_EVENT_PUT_END) {
1653 /* Check GET matched */
1654 if (event->hdr_data == PTLLND_RDMA_OK) {
1655 lnet_set_reply_msg_len(ni,
1656 tx->tx_lnetreplymsg,
1659 CERROR ("Unmatched GET with %s\n",
1660 libcfs_id2str(tx->tx_peer->plp_id));
1661 tx->tx_status = -EIO;
1666 case PTLLND_MSG_TYPE_PUT:
1667 LASSERT (event->type == PTL_EVENT_UNLINK ||
1668 (isreq && event->type == PTL_EVENT_SEND_END) ||
1669 (isbulk && event->type == PTL_EVENT_GET_END));
1672 case PTLLND_RDMA_READ:
1673 LASSERT (event->type == PTL_EVENT_UNLINK ||
1674 event->type == PTL_EVENT_SEND_END ||
1675 event->type == PTL_EVENT_REPLY_END);
1679 case PTLLND_RDMA_WRITE:
1680 LASSERT (event->type == PTL_EVENT_UNLINK ||
1681 event->type == PTL_EVENT_SEND_END);
1685 /* Schedule ptllnd_tx_done() on error or last completion event */
1687 (PtlHandleIsEqual(tx->tx_bulkmdh, PTL_INVALID_HANDLE) &&
1688 PtlHandleIsEqual(tx->tx_reqmdh, PTL_INVALID_HANDLE))) {
1690 tx->tx_status = -EIO;
1691 list_del(&tx->tx_list);
1692 list_add_tail(&tx->tx_list, &plni->plni_zombie_txs);
1697 ptllnd_find_timed_out_tx(ptllnd_peer_t *peer)
1699 time_t now = cfs_time_current_sec();
1700 struct list_head *tmp;
1702 list_for_each(tmp, &peer->plp_txq) {
1703 ptllnd_tx_t *tx = list_entry(tmp, ptllnd_tx_t, tx_list);
1705 if (tx->tx_deadline < now)
1709 list_for_each(tmp, &peer->plp_activeq) {
1710 ptllnd_tx_t *tx = list_entry(tmp, ptllnd_tx_t, tx_list);
1712 if (tx->tx_deadline < now)
1720 ptllnd_check_peer(ptllnd_peer_t *peer)
1722 ptllnd_tx_t *tx = ptllnd_find_timed_out_tx(peer);
1727 CERROR("%s: timed out\n", libcfs_id2str(peer->plp_id));
1728 ptllnd_close_peer(peer, -ETIMEDOUT);
1732 ptllnd_watchdog (lnet_ni_t *ni, time_t now)
1734 ptllnd_ni_t *plni = ni->ni_data;
1736 int p = plni->plni_watchdog_interval;
1737 int chunk = plni->plni_peer_hash_size;
1738 int interval = now - (plni->plni_watchdog_nextt - p);
1740 struct list_head *hashlist;
1741 struct list_head *tmp;
1742 struct list_head *nxt;
1744 /* Time to check for RDMA timeouts on a few more peers:
1745 * I try to do checks every 'p' seconds on a proportion of the peer
1746 * table and I need to check every connection 'n' times within a
1747 * timeout interval, to ensure I detect a timeout on any connection
1748 * within (n+1)/n times the timeout interval. */
1750 LASSERT (now >= plni->plni_watchdog_nextt);
1752 if (plni->plni_timeout > n * interval) { /* Scan less than the whole table? */
1753 chunk = (chunk * n * interval) / plni->plni_timeout;
1758 for (i = 0; i < chunk; i++) {
1759 hashlist = &plni->plni_peer_hash[plni->plni_watchdog_peeridx];
1761 list_for_each_safe(tmp, nxt, hashlist) {
1762 ptllnd_check_peer(list_entry(tmp, ptllnd_peer_t, plp_list));
1765 plni->plni_watchdog_peeridx = (plni->plni_watchdog_peeridx + 1) %
1766 plni->plni_peer_hash_size;
1769 plni->plni_watchdog_nextt = now + p;
1773 ptllnd_wait (lnet_ni_t *ni, int milliseconds)
1775 static struct timeval prevt;
1776 static int prevt_count;
1777 static int call_count;
1779 struct timeval start;
1780 struct timeval then;
1782 struct timeval deadline;
1784 ptllnd_ni_t *plni = ni->ni_data;
1792 /* Handle any currently queued events, returning immediately if any.
1793 * Otherwise block for the timeout and handle all events queued
1796 gettimeofday(&start, NULL);
1799 if (milliseconds <= 0) {
1802 deadline.tv_sec = start.tv_sec + milliseconds/1000;
1803 deadline.tv_usec = start.tv_usec + (milliseconds % 1000)*1000;
1805 if (deadline.tv_usec >= 1000000) {
1806 start.tv_usec -= 1000000;
1812 gettimeofday(&then, NULL);
1814 rc = PtlEQPoll(&plni->plni_eqh, 1, timeout, &event, &which);
1816 gettimeofday(&now, NULL);
1818 if ((now.tv_sec*1000 + now.tv_usec/1000) -
1819 (then.tv_sec*1000 + then.tv_usec/1000) > timeout + 1000) {
1820 /* 1000 mS grace...........................^ */
1821 CERROR("SLOW PtlEQPoll(%d): %dmS elapsed\n", timeout,
1822 (int)(now.tv_sec*1000 + now.tv_usec/1000) -
1823 (int)(then.tv_sec*1000 + then.tv_usec/1000));
1826 if (rc == PTL_EQ_EMPTY) {
1827 if (found) /* handled some events */
1830 if (now.tv_sec >= plni->plni_watchdog_nextt) { /* check timeouts? */
1831 ptllnd_watchdog(ni, now.tv_sec);
1832 LASSERT (now.tv_sec < plni->plni_watchdog_nextt);
1835 if (now.tv_sec > deadline.tv_sec || /* timeout expired */
1836 (now.tv_sec == deadline.tv_sec &&
1837 now.tv_usec >= deadline.tv_usec))
1840 if (milliseconds < 0 ||
1841 plni->plni_watchdog_nextt <= deadline.tv_sec) {
1842 timeout = (plni->plni_watchdog_nextt - now.tv_sec)*1000;
1844 timeout = (deadline.tv_sec - now.tv_sec)*1000 +
1845 (deadline.tv_usec - now.tv_usec)/1000;
1851 LASSERT (rc == PTL_OK || rc == PTL_EQ_DROPPED);
1853 if (rc == PTL_EQ_DROPPED)
1854 CERROR("Event queue: size %d is too small\n",
1855 plni->plni_eq_size);
1860 switch (ptllnd_eventarg2type(event.md.user_ptr)) {
1864 case PTLLND_EVENTARG_TYPE_TX:
1865 ptllnd_tx_event(ni, &event);
1868 case PTLLND_EVENTARG_TYPE_BUF:
1869 ptllnd_buf_event(ni, &event);
1874 while (!list_empty(&plni->plni_zombie_txs)) {
1875 tx = list_entry(plni->plni_zombie_txs.next,
1876 ptllnd_tx_t, tx_list);
1877 list_del_init(&tx->tx_list);
1881 if (prevt.tv_sec == 0 ||
1882 prevt.tv_sec != now.tv_sec) {
1883 PTLLND_HISTORY("%d wait entered at %d.%06d - prev %d %d.%06d",
1884 call_count, (int)start.tv_sec, (int)start.tv_usec,
1885 prevt_count, (int)prevt.tv_sec, (int)prevt.tv_usec);