1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2005 Cluster File Systems, Inc. All rights reserved.
5 * Author: Eric Barton <eeb@bartonsoftware.com>
7 * This file is part of the Lustre file system, http://www.lustre.org
8 * Lustre is a trademark of Cluster File Systems, Inc.
10 * This file is confidential source code owned by Cluster File Systems.
11 * No viewing, modification, compilation, redistribution, or any other
12 * form of use is permitted except through a signed license agreement.
14 * If you have not signed such an agreement, then you have no rights to
15 * this file. Please destroy it immediately and contact CFS.
22 ptllnd_set_tx_deadline(ptllnd_tx_t *tx)
24 ptllnd_peer_t *peer = tx->tx_peer;
25 lnet_ni_t *ni = peer->plp_ni;
26 ptllnd_ni_t *plni = ni->ni_data;
28 tx->tx_deadline = cfs_time_current_sec() + plni->plni_timeout;
32 ptllnd_post_tx(ptllnd_tx_t *tx)
34 ptllnd_peer_t *peer = tx->tx_peer;
36 ptllnd_set_tx_deadline(tx);
37 list_add_tail(&tx->tx_list, &peer->plp_txq);
38 ptllnd_check_sends(peer);
42 ptllnd_ptlid2str(ptl_process_id_t id)
44 static char strs[8][32];
47 char *str = strs[idx++];
49 if (idx >= sizeof(strs)/sizeof(strs[0]))
52 snprintf(str, sizeof(strs[0]), FMT_PTLID, id.pid, id.nid);
57 ptllnd_destroy_peer(ptllnd_peer_t *peer)
59 lnet_ni_t *ni = peer->plp_ni;
60 ptllnd_ni_t *plni = ni->ni_data;
61 int nmsg = peer->plp_lazy_credits +
62 plni->plni_peer_credits;
64 ptllnd_size_buffers(ni, -nmsg);
66 LASSERT (peer->plp_closing);
67 LASSERT (plni->plni_npeers > 0);
68 LASSERT (list_empty(&peer->plp_txq));
69 LASSERT (list_empty(&peer->plp_activeq));
71 LIBCFS_FREE(peer, sizeof(*peer));
75 ptllnd_abort_txs(ptllnd_ni_t *plni, struct list_head *q)
77 while (!list_empty(q)) {
78 ptllnd_tx_t *tx = list_entry(q->next, ptllnd_tx_t, tx_list);
80 tx->tx_status = -ESHUTDOWN;
81 list_del(&tx->tx_list);
82 list_add_tail(&tx->tx_list, &plni->plni_zombie_txs);
87 ptllnd_close_peer(ptllnd_peer_t *peer, int error)
89 lnet_ni_t *ni = peer->plp_ni;
90 ptllnd_ni_t *plni = ni->ni_data;
92 if (peer->plp_closing)
95 peer->plp_closing = 1;
97 if (!list_empty(&peer->plp_txq) ||
98 !list_empty(&peer->plp_activeq) ||
100 CWARN("Closing %s\n", libcfs_id2str(peer->plp_id));
101 if (plni->plni_debug)
102 ptllnd_dump_debug(ni, peer->plp_id);
105 ptllnd_abort_txs(plni, &peer->plp_txq);
106 ptllnd_abort_txs(plni, &peer->plp_activeq);
108 list_del(&peer->plp_list);
109 ptllnd_peer_decref(peer);
113 ptllnd_find_peer(lnet_ni_t *ni, lnet_process_id_t id, int create)
115 ptllnd_ni_t *plni = ni->ni_data;
116 unsigned int hash = LNET_NIDADDR(id.nid) % plni->plni_peer_hash_size;
117 struct list_head *tmp;
122 LASSERT (LNET_NIDNET(id.nid) == LNET_NIDNET(ni->ni_nid));
124 list_for_each(tmp, &plni->plni_peer_hash[hash]) {
125 plp = list_entry(tmp, ptllnd_peer_t, plp_list);
127 if (plp->plp_id.nid == id.nid &&
128 plp->plp_id.pid == id.pid) {
129 ptllnd_peer_addref(plp);
137 /* New peer: check first for enough posted buffers */
139 rc = ptllnd_size_buffers(ni, plni->plni_peer_credits);
145 LIBCFS_ALLOC(plp, sizeof(*plp));
147 CERROR("Can't allocate new peer %s\n", libcfs_id2str(id));
149 ptllnd_size_buffers(ni, -plni->plni_peer_credits);
155 plp->plp_ptlid.nid = LNET_NIDADDR(id.nid);
156 plp->plp_ptlid.pid = plni->plni_ptllnd_pid;
157 plp->plp_credits = 1; /* add more later when she gives me credits */
158 plp->plp_max_msg_size = plni->plni_max_msg_size; /* until I hear from her */
159 plp->plp_sent_credits = 1; /* Implicit credit for HELLO */
160 plp->plp_outstanding_credits = plni->plni_peer_credits - 1;
161 plp->plp_lazy_credits = 0;
162 plp->plp_extra_lazy_credits = 0;
165 plp->plp_recvd_hello = 0;
166 plp->plp_closing = 0;
167 plp->plp_refcount = 1;
168 CFS_INIT_LIST_HEAD(&plp->plp_list);
169 CFS_INIT_LIST_HEAD(&plp->plp_txq);
170 CFS_INIT_LIST_HEAD(&plp->plp_activeq);
172 ptllnd_peer_addref(plp);
173 list_add_tail(&plp->plp_list, &plni->plni_peer_hash[hash]);
175 tx = ptllnd_new_tx(plp, PTLLND_MSG_TYPE_HELLO, 0);
177 CERROR("Can't send HELLO to %s\n", libcfs_id2str(id));
178 ptllnd_close_peer(plp, -ENOMEM);
179 ptllnd_peer_decref(plp);
183 tx->tx_msg.ptlm_u.hello.kptlhm_matchbits = PTL_RESERVED_MATCHBITS;
184 tx->tx_msg.ptlm_u.hello.kptlhm_max_msg_size = plni->plni_max_msg_size;
186 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: post hello %p", libcfs_id2str(id),
187 tx->tx_peer->plp_credits,
188 tx->tx_peer->plp_outstanding_credits,
189 tx->tx_peer->plp_sent_credits,
190 plni->plni_peer_credits +
191 tx->tx_peer->plp_lazy_credits, tx);
198 ptllnd_count_q(struct list_head *q)
203 list_for_each(e, q) {
211 ptllnd_tx_typestr(int type)
214 case PTLLND_RDMA_WRITE:
217 case PTLLND_RDMA_READ:
220 case PTLLND_MSG_TYPE_PUT:
223 case PTLLND_MSG_TYPE_GET:
226 case PTLLND_MSG_TYPE_IMMEDIATE:
229 case PTLLND_MSG_TYPE_NOOP:
232 case PTLLND_MSG_TYPE_HELLO:
241 ptllnd_debug_tx(ptllnd_tx_t *tx)
243 CDEBUG(D_WARNING, "%s %s b %ld.%06ld/%ld.%06ld"
244 " r %ld.%06ld/%ld.%06ld status %d\n",
245 ptllnd_tx_typestr(tx->tx_type),
246 libcfs_id2str(tx->tx_peer->plp_id),
247 tx->tx_bulk_posted.tv_sec, tx->tx_bulk_posted.tv_usec,
248 tx->tx_bulk_done.tv_sec, tx->tx_bulk_done.tv_usec,
249 tx->tx_req_posted.tv_sec, tx->tx_req_posted.tv_usec,
250 tx->tx_req_done.tv_sec, tx->tx_req_done.tv_usec,
255 ptllnd_debug_peer(lnet_ni_t *ni, lnet_process_id_t id)
257 ptllnd_peer_t *plp = ptllnd_find_peer(ni, id, 0);
258 struct list_head *tmp;
259 ptllnd_ni_t *plni = ni->ni_data;
263 CDEBUG(D_WARNING, "No peer %s\n", libcfs_id2str(id));
267 CDEBUG(D_WARNING, "%s %s%s [%d] "LPU64".%06d m "LPU64" q %d/%d c %d/%d+%d(%d)\n",
269 plp->plp_recvd_hello ? "H" : "_",
270 plp->plp_closing ? "C" : "_",
272 plp->plp_stamp / 1000000, (int)(plp->plp_stamp % 1000000),
274 ptllnd_count_q(&plp->plp_txq),
275 ptllnd_count_q(&plp->plp_activeq),
276 plp->plp_credits, plp->plp_outstanding_credits, plp->plp_sent_credits,
277 plni->plni_peer_credits + plp->plp_lazy_credits);
279 CDEBUG(D_WARNING, "txq:\n");
280 list_for_each (tmp, &plp->plp_txq) {
281 tx = list_entry(tmp, ptllnd_tx_t, tx_list);
286 CDEBUG(D_WARNING, "activeq:\n");
287 list_for_each (tmp, &plp->plp_activeq) {
288 tx = list_entry(tmp, ptllnd_tx_t, tx_list);
293 CDEBUG(D_WARNING, "zombies:\n");
294 list_for_each (tmp, &plni->plni_zombie_txs) {
295 tx = list_entry(tmp, ptllnd_tx_t, tx_list);
297 if (tx->tx_peer->plp_id.nid == id.nid &&
298 tx->tx_peer->plp_id.pid == id.pid)
302 CDEBUG(D_WARNING, "history:\n");
303 list_for_each (tmp, &plni->plni_tx_history) {
304 tx = list_entry(tmp, ptllnd_tx_t, tx_list);
306 if (tx->tx_peer->plp_id.nid == id.nid &&
307 tx->tx_peer->plp_id.pid == id.pid)
311 ptllnd_peer_decref(plp);
315 ptllnd_dump_debug(lnet_ni_t *ni, lnet_process_id_t id)
317 ptllnd_debug_peer(ni, id);
318 ptllnd_dump_history();
322 ptllnd_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive)
324 lnet_process_id_t id;
326 time_t start = cfs_time_current_sec();
327 ptllnd_ni_t *plni = ni->ni_data;
328 int w = plni->plni_long_wait;
330 /* This is only actually used to connect to routers at startup! */
334 id.pid = LUSTRE_SRV_LNET_PID;
336 peer = ptllnd_find_peer(ni, id, 1);
340 /* wait for the peer to reply */
341 while (!peer->plp_recvd_hello) {
342 if (w > 0 && cfs_time_current_sec() > start + w/1000) {
343 CWARN("Waited %ds to connect to %s\n",
344 (int)(cfs_time_current_sec() - start),
352 ptllnd_peer_decref(peer);
356 ptllnd_setasync(lnet_ni_t *ni, lnet_process_id_t id, int nasync)
358 ptllnd_peer_t *peer = ptllnd_find_peer(ni, id, nasync > 0);
364 LASSERT (peer->plp_lazy_credits >= 0);
365 LASSERT (peer->plp_extra_lazy_credits >= 0);
367 /* If nasync < 0, we're being told we can reduce the total message
368 * headroom. We can't do this right now because our peer might already
369 * have credits for the extra buffers, so we just account the extra
370 * headroom in case we need it later and only destroy buffers when the
373 * Note that the following condition handles this case, where it
374 * actually increases the extra lazy credit counter. */
376 if (nasync <= peer->plp_extra_lazy_credits) {
377 peer->plp_extra_lazy_credits -= nasync;
381 LASSERT (nasync > 0);
383 nasync -= peer->plp_extra_lazy_credits;
384 peer->plp_extra_lazy_credits = 0;
386 rc = ptllnd_size_buffers(ni, nasync);
388 peer->plp_lazy_credits += nasync;
389 peer->plp_outstanding_credits += nasync;
396 ptllnd_cksum (void *ptr, int nob)
402 sum = ((sum << 1) | (sum >> 31)) + *c++;
404 /* ensure I don't return 0 (== no checksum) */
405 return (sum == 0) ? 1 : sum;
409 ptllnd_new_tx(ptllnd_peer_t *peer, int type, int payload_nob)
411 lnet_ni_t *ni = peer->plp_ni;
412 ptllnd_ni_t *plni = ni->ni_data;
416 CDEBUG(D_NET, "peer=%p type=%d payload=%d\n", peer, type, payload_nob);
422 case PTLLND_RDMA_WRITE:
423 case PTLLND_RDMA_READ:
424 LASSERT (payload_nob == 0);
428 case PTLLND_MSG_TYPE_PUT:
429 case PTLLND_MSG_TYPE_GET:
430 LASSERT (payload_nob == 0);
431 msgsize = offsetof(kptl_msg_t, ptlm_u) +
432 sizeof(kptl_rdma_msg_t);
435 case PTLLND_MSG_TYPE_IMMEDIATE:
436 msgsize = offsetof(kptl_msg_t,
437 ptlm_u.immediate.kptlim_payload[payload_nob]);
440 case PTLLND_MSG_TYPE_NOOP:
441 LASSERT (payload_nob == 0);
442 msgsize = offsetof(kptl_msg_t, ptlm_u);
445 case PTLLND_MSG_TYPE_HELLO:
446 LASSERT (payload_nob == 0);
447 msgsize = offsetof(kptl_msg_t, ptlm_u) +
448 sizeof(kptl_hello_msg_t);
452 msgsize = (msgsize + 7) & ~7;
453 LASSERT (msgsize <= peer->plp_max_msg_size);
455 LIBCFS_ALLOC(tx, offsetof(ptllnd_tx_t, tx_msg) + msgsize);
458 CERROR("Can't allocate msg type %d for %s\n",
459 type, libcfs_id2str(peer->plp_id));
463 CFS_INIT_LIST_HEAD(&tx->tx_list);
466 tx->tx_lnetmsg = tx->tx_lnetreplymsg = NULL;
469 tx->tx_reqmdh = PTL_INVALID_HANDLE;
470 tx->tx_bulkmdh = PTL_INVALID_HANDLE;
471 tx->tx_msgsize = msgsize;
472 tx->tx_completing = 0;
475 memset(&tx->tx_bulk_posted, 0, sizeof(tx->tx_bulk_posted));
476 memset(&tx->tx_bulk_done, 0, sizeof(tx->tx_bulk_done));
477 memset(&tx->tx_req_posted, 0, sizeof(tx->tx_req_posted));
478 memset(&tx->tx_req_done, 0, sizeof(tx->tx_req_done));
481 tx->tx_msg.ptlm_magic = PTLLND_MSG_MAGIC;
482 tx->tx_msg.ptlm_version = PTLLND_MSG_VERSION;
483 tx->tx_msg.ptlm_type = type;
484 tx->tx_msg.ptlm_credits = 0;
485 tx->tx_msg.ptlm_nob = msgsize;
486 tx->tx_msg.ptlm_cksum = 0;
487 tx->tx_msg.ptlm_srcnid = ni->ni_nid;
488 tx->tx_msg.ptlm_srcstamp = plni->plni_stamp;
489 tx->tx_msg.ptlm_dstnid = peer->plp_id.nid;
490 tx->tx_msg.ptlm_dststamp = peer->plp_stamp;
491 tx->tx_msg.ptlm_srcpid = the_lnet.ln_pid;
492 tx->tx_msg.ptlm_dstpid = peer->plp_id.pid;
495 ptllnd_peer_addref(peer);
498 CDEBUG(D_NET, "tx=%p\n",tx);
504 ptllnd_abort_tx(ptllnd_tx_t *tx, ptl_handle_md_t *mdh)
506 ptllnd_peer_t *peer = tx->tx_peer;
507 lnet_ni_t *ni = peer->plp_ni;
509 time_t start = cfs_time_current_sec();
510 ptllnd_ni_t *plni = ni->ni_data;
511 int w = plni->plni_long_wait;
513 while (!PtlHandleIsEqual(*mdh, PTL_INVALID_HANDLE)) {
514 rc = PtlMDUnlink(*mdh);
515 #ifndef LUSTRE_PORTALS_UNLINK_SEMANTICS
516 if (rc == PTL_OK) /* unlink successful => no unlinked event */
518 LASSERT (rc == PTL_MD_IN_USE);
520 if (w > 0 && cfs_time_current_sec() > start + w/1000) {
521 CWARN("Waited %ds to abort tx to %s\n",
522 (int)(cfs_time_current_sec() - start),
523 libcfs_id2str(peer->plp_id));
526 /* Wait for ptllnd_tx_event() to invalidate */
532 ptllnd_cull_tx_history(ptllnd_ni_t *plni)
534 int max = plni->plni_max_tx_history;
536 while (plni->plni_ntx_history > max) {
537 ptllnd_tx_t *tx = list_entry(plni->plni_tx_history.next,
538 ptllnd_tx_t, tx_list);
539 list_del(&tx->tx_list);
541 ptllnd_peer_decref(tx->tx_peer);
543 LIBCFS_FREE(tx, offsetof(ptllnd_tx_t, tx_msg) + tx->tx_msgsize);
545 LASSERT (plni->plni_ntxs > 0);
547 plni->plni_ntx_history--;
552 ptllnd_tx_done(ptllnd_tx_t *tx)
554 ptllnd_peer_t *peer = tx->tx_peer;
555 lnet_ni_t *ni = peer->plp_ni;
556 ptllnd_ni_t *plni = ni->ni_data;
558 /* CAVEAT EMPTOR: If this tx is being aborted, I'll continue to get
559 * events for this tx until it's unlinked. So I set tx_completing to
560 * flag the tx is getting handled */
562 if (tx->tx_completing)
565 tx->tx_completing = 1;
567 if (!list_empty(&tx->tx_list))
568 list_del_init(&tx->tx_list);
570 if (tx->tx_status != 0) {
571 if (plni->plni_debug) {
572 CERROR("Completing tx for %s with error %d\n",
573 libcfs_id2str(peer->plp_id), tx->tx_status);
576 ptllnd_close_peer(peer, tx->tx_status);
579 ptllnd_abort_tx(tx, &tx->tx_reqmdh);
580 ptllnd_abort_tx(tx, &tx->tx_bulkmdh);
582 if (tx->tx_niov > 0) {
583 LIBCFS_FREE(tx->tx_iov, tx->tx_niov * sizeof(*tx->tx_iov));
587 if (tx->tx_lnetreplymsg != NULL) {
588 LASSERT (tx->tx_type == PTLLND_MSG_TYPE_GET);
589 LASSERT (tx->tx_lnetmsg != NULL);
590 /* Simulate GET success always */
591 lnet_finalize(ni, tx->tx_lnetmsg, 0);
592 CDEBUG(D_NET, "lnet_finalize(tx_lnetreplymsg=%p)\n",tx->tx_lnetreplymsg);
593 lnet_finalize(ni, tx->tx_lnetreplymsg, tx->tx_status);
594 } else if (tx->tx_lnetmsg != NULL) {
595 lnet_finalize(ni, tx->tx_lnetmsg, tx->tx_status);
598 plni->plni_ntx_history++;
599 list_add_tail(&tx->tx_list, &plni->plni_tx_history);
601 ptllnd_cull_tx_history(plni);
605 ptllnd_set_txiov(ptllnd_tx_t *tx,
606 unsigned int niov, struct iovec *iov,
607 unsigned int offset, unsigned int len)
609 ptl_md_iovec_t *piov;
618 * Remove iovec's at the beginning that
619 * are skipped because of the offset.
620 * Adjust the offset accordingly
624 if (offset < iov->iov_len)
626 offset -= iov->iov_len;
632 int temp_offset = offset;
634 LIBCFS_ALLOC(piov, niov * sizeof(*piov));
638 for (npiov = 0;; npiov++) {
639 LASSERT (npiov < niov);
640 LASSERT (iov->iov_len >= temp_offset);
642 piov[npiov].iov_base = iov[npiov].iov_base + temp_offset;
643 piov[npiov].iov_len = iov[npiov].iov_len - temp_offset;
645 if (piov[npiov].iov_len >= resid) {
646 piov[npiov].iov_len = resid;
650 resid -= piov[npiov].iov_len;
660 /* Dang! The piov I allocated was too big and it's a drag to
661 * have to maintain separate 'allocated' and 'used' sizes, so
662 * I'll just do it again; NB this doesn't happen normally... */
663 LIBCFS_FREE(piov, niov * sizeof(*piov));
669 ptllnd_set_md_buffer(ptl_md_t *md, ptllnd_tx_t *tx)
671 unsigned int niov = tx->tx_niov;
672 ptl_md_iovec_t *iov = tx->tx_iov;
674 LASSERT ((md->options & PTL_MD_IOVEC) == 0);
679 } else if (niov == 1) {
680 md->start = iov[0].iov_base;
681 md->length = iov[0].iov_len;
685 md->options |= PTL_MD_IOVEC;
690 ptllnd_post_buffer(ptllnd_buffer_t *buf)
692 lnet_ni_t *ni = buf->plb_ni;
693 ptllnd_ni_t *plni = ni->ni_data;
694 ptl_process_id_t anyid = {
698 .start = buf->plb_buffer,
699 .length = plni->plni_buffer_size,
700 .threshold = PTL_MD_THRESH_INF,
701 .max_size = plni->plni_max_msg_size,
702 .options = (PTLLND_MD_OPTIONS |
703 PTL_MD_OP_PUT | PTL_MD_MAX_SIZE |
704 PTL_MD_LOCAL_ALIGN8),
705 .user_ptr = ptllnd_obj2eventarg(buf, PTLLND_EVENTARG_TYPE_BUF),
706 .eq_handle = plni->plni_eqh};
710 LASSERT (!buf->plb_posted);
712 rc = PtlMEAttach(plni->plni_nih, plni->plni_portal,
713 anyid, LNET_MSG_MATCHBITS, 0,
714 PTL_UNLINK, PTL_INS_AFTER, &meh);
716 CERROR("PtlMEAttach failed: %s(%d)\n",
717 ptllnd_errtype2str(rc), rc);
722 plni->plni_nposted_buffers++;
724 rc = PtlMDAttach(meh, md, LNET_UNLINK, &buf->plb_md);
728 CERROR("PtlMDAttach failed: %s(%d)\n",
729 ptllnd_errtype2str(rc), rc);
732 plni->plni_nposted_buffers--;
734 rc = PtlMEUnlink(meh);
735 LASSERT (rc == PTL_OK);
741 ptllnd_check_sends(ptllnd_peer_t *peer)
743 lnet_ni_t *ni = peer->plp_ni;
744 ptllnd_ni_t *plni = ni->ni_data;
750 CDEBUG(D_NET, "%s: [%d/%d+%d(%d)\n",
751 libcfs_id2str(peer->plp_id), peer->plp_credits,
752 peer->plp_outstanding_credits, peer->plp_sent_credits,
753 plni->plni_peer_credits + peer->plp_lazy_credits);
755 if (list_empty(&peer->plp_txq) &&
756 peer->plp_outstanding_credits >= PTLLND_CREDIT_HIGHWATER(plni) &&
757 peer->plp_credits != 0) {
759 tx = ptllnd_new_tx(peer, PTLLND_MSG_TYPE_NOOP, 0);
760 CDEBUG(D_NET, "NOOP tx=%p\n",tx);
762 CERROR("Can't return credits to %s\n",
763 libcfs_id2str(peer->plp_id));
765 list_add_tail(&tx->tx_list, &peer->plp_txq);
769 while (!list_empty(&peer->plp_txq)) {
770 tx = list_entry(peer->plp_txq.next, ptllnd_tx_t, tx_list);
772 LASSERT (tx->tx_msgsize > 0);
774 LASSERT (peer->plp_outstanding_credits >= 0);
775 LASSERT (peer->plp_sent_credits >= 0);
776 LASSERT (peer->plp_outstanding_credits + peer->plp_sent_credits
777 <= plni->plni_peer_credits + peer->plp_lazy_credits);
778 LASSERT (peer->plp_credits >= 0);
780 if (peer->plp_credits == 0) { /* no credits */
781 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: no creds for %p",
782 libcfs_id2str(peer->plp_id),
784 peer->plp_outstanding_credits,
785 peer->plp_sent_credits,
786 plni->plni_peer_credits +
787 peer->plp_lazy_credits, tx);
791 if (peer->plp_credits == 1 && /* last credit reserved for */
792 peer->plp_outstanding_credits == 0) { /* returning credits */
793 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: too few creds for %p",
794 libcfs_id2str(peer->plp_id),
796 peer->plp_outstanding_credits,
797 peer->plp_sent_credits,
798 plni->plni_peer_credits +
799 peer->plp_lazy_credits, tx);
803 list_del(&tx->tx_list);
804 list_add_tail(&tx->tx_list, &peer->plp_activeq);
806 CDEBUG(D_NET, "Sending at TX=%p type=%s (%d)\n",tx,
807 ptllnd_msgtype2str(tx->tx_type),tx->tx_type);
809 if (tx->tx_type == PTLLND_MSG_TYPE_NOOP &&
810 (!list_empty(&peer->plp_txq) ||
811 peer->plp_outstanding_credits <
812 PTLLND_CREDIT_HIGHWATER(plni))) {
818 /* Set stamp at the last minute; on a new peer, I don't know it
819 * until I receive the HELLO back */
820 tx->tx_msg.ptlm_dststamp = peer->plp_stamp;
823 * Return all the credits we have
825 tx->tx_msg.ptlm_credits = peer->plp_outstanding_credits;
826 peer->plp_sent_credits += peer->plp_outstanding_credits;
827 peer->plp_outstanding_credits = 0;
834 if (plni->plni_checksum)
835 tx->tx_msg.ptlm_cksum =
836 ptllnd_cksum(&tx->tx_msg,
837 offsetof(kptl_msg_t, ptlm_u));
839 md.user_ptr = ptllnd_obj2eventarg(tx, PTLLND_EVENTARG_TYPE_TX);
840 md.eq_handle = plni->plni_eqh;
842 md.options = PTLLND_MD_OPTIONS;
843 md.start = &tx->tx_msg;
844 md.length = tx->tx_msgsize;
846 rc = PtlMDBind(plni->plni_nih, md, LNET_UNLINK, &mdh);
848 CERROR("PtlMDBind for %s failed: %s(%d)\n",
849 libcfs_id2str(peer->plp_id),
850 ptllnd_errtype2str(rc), rc);
851 tx->tx_status = -EIO;
856 LASSERT (tx->tx_type != PTLLND_RDMA_WRITE &&
857 tx->tx_type != PTLLND_RDMA_READ);
860 gettimeofday(&tx->tx_req_posted, NULL);
862 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: %s %p c %d",
863 libcfs_id2str(peer->plp_id),
865 peer->plp_outstanding_credits,
866 peer->plp_sent_credits,
867 plni->plni_peer_credits +
868 peer->plp_lazy_credits,
869 ptllnd_msgtype2str(tx->tx_type), tx,
870 tx->tx_msg.ptlm_credits);
872 rc = PtlPut(mdh, PTL_NOACK_REQ, peer->plp_ptlid,
873 plni->plni_portal, 0, LNET_MSG_MATCHBITS, 0, 0);
875 CERROR("PtlPut for %s failed: %s(%d)\n",
876 libcfs_id2str(peer->plp_id),
877 ptllnd_errtype2str(rc), rc);
878 tx->tx_status = -EIO;
886 ptllnd_passive_rdma(ptllnd_peer_t *peer, int type, lnet_msg_t *msg,
887 unsigned int niov, struct iovec *iov,
888 unsigned int offset, unsigned int len)
890 lnet_ni_t *ni = peer->plp_ni;
891 ptllnd_ni_t *plni = ni->ni_data;
892 ptllnd_tx_t *tx = ptllnd_new_tx(peer, type, 0);
902 CDEBUG(D_NET, "niov=%d offset=%d len=%d\n",niov,offset,len);
904 LASSERT (type == PTLLND_MSG_TYPE_GET ||
905 type == PTLLND_MSG_TYPE_PUT);
908 CERROR("Can't allocate %s tx for %s\n",
909 type == PTLLND_MSG_TYPE_GET ? "GET" : "PUT/REPLY",
910 libcfs_id2str(peer->plp_id));
914 rc = ptllnd_set_txiov(tx, niov, iov, offset, len);
916 CERROR ("Can't allocate iov %d for %s\n",
917 niov, libcfs_id2str(peer->plp_id));
922 md.user_ptr = ptllnd_obj2eventarg(tx, PTLLND_EVENTARG_TYPE_TX);
923 md.eq_handle = plni->plni_eqh;
926 md.options = PTLLND_MD_OPTIONS;
927 if(type == PTLLND_MSG_TYPE_GET)
928 md.options |= PTL_MD_OP_PUT | PTL_MD_ACK_DISABLE;
930 md.options |= PTL_MD_OP_GET;
931 ptllnd_set_md_buffer(&md, tx);
933 start = cfs_time_current_sec();
934 w = plni->plni_long_wait;
936 while (!peer->plp_recvd_hello) { /* wait to validate plp_match */
937 if (peer->plp_closing) {
941 if (w > 0 && cfs_time_current_sec() > start + w/1000) {
942 CWARN("Waited %ds to connect to %s\n",
943 (int)(cfs_time_current_sec() - start),
944 libcfs_id2str(peer->plp_id));
950 if (peer->plp_match < PTL_RESERVED_MATCHBITS)
951 peer->plp_match = PTL_RESERVED_MATCHBITS;
952 matchbits = peer->plp_match++;
954 rc = PtlMEAttach(plni->plni_nih, plni->plni_portal, peer->plp_ptlid,
955 matchbits, 0, PTL_UNLINK, PTL_INS_BEFORE, &meh);
957 CERROR("PtlMEAttach for %s failed: %s(%d)\n",
958 libcfs_id2str(peer->plp_id),
959 ptllnd_errtype2str(rc), rc);
964 gettimeofday(&tx->tx_bulk_posted, NULL);
966 rc = PtlMDAttach(meh, md, LNET_UNLINK, &mdh);
968 CERROR("PtlMDAttach for %s failed: %s(%d)\n",
969 libcfs_id2str(peer->plp_id),
970 ptllnd_errtype2str(rc), rc);
971 rc2 = PtlMEUnlink(meh);
972 LASSERT (rc2 == PTL_OK);
976 tx->tx_bulkmdh = mdh;
979 * We need to set the stamp here because it
980 * we could have received a HELLO above that set
983 tx->tx_msg.ptlm_dststamp = peer->plp_stamp;
985 tx->tx_msg.ptlm_u.rdma.kptlrm_hdr = msg->msg_hdr;
986 tx->tx_msg.ptlm_u.rdma.kptlrm_matchbits = matchbits;
988 if (type == PTLLND_MSG_TYPE_GET) {
989 tx->tx_lnetreplymsg = lnet_create_reply_msg(ni, msg);
990 if (tx->tx_lnetreplymsg == NULL) {
991 CERROR("Can't create reply for GET to %s\n",
992 libcfs_id2str(msg->msg_target));
998 tx->tx_lnetmsg = msg;
999 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: post passive %s p %d %p",
1000 libcfs_id2str(msg->msg_target),
1001 peer->plp_credits, peer->plp_outstanding_credits,
1002 peer->plp_sent_credits,
1003 plni->plni_peer_credits + peer->plp_lazy_credits,
1004 lnet_msgtyp2str(msg->msg_type),
1005 (le32_to_cpu(msg->msg_type) == LNET_MSG_PUT) ?
1006 le32_to_cpu(msg->msg_hdr.msg.put.ptl_index) :
1007 (le32_to_cpu(msg->msg_type) == LNET_MSG_GET) ?
1008 le32_to_cpu(msg->msg_hdr.msg.get.ptl_index) : -1,
1019 ptllnd_active_rdma(ptllnd_peer_t *peer, int type,
1020 lnet_msg_t *msg, __u64 matchbits,
1021 unsigned int niov, struct iovec *iov,
1022 unsigned int offset, unsigned int len)
1024 lnet_ni_t *ni = peer->plp_ni;
1025 ptllnd_ni_t *plni = ni->ni_data;
1026 ptllnd_tx_t *tx = ptllnd_new_tx(peer, type, 0);
1028 ptl_handle_md_t mdh;
1031 LASSERT (type == PTLLND_RDMA_READ ||
1032 type == PTLLND_RDMA_WRITE);
1035 CERROR("Can't allocate tx for RDMA %s with %s\n",
1036 (type == PTLLND_RDMA_WRITE) ? "write" : "read",
1037 libcfs_id2str(peer->plp_id));
1038 ptllnd_close_peer(peer, -ENOMEM);
1042 rc = ptllnd_set_txiov(tx, niov, iov, offset, len);
1044 CERROR ("Can't allocate iov %d for %s\n",
1045 niov, libcfs_id2str(peer->plp_id));
1050 md.user_ptr = ptllnd_obj2eventarg(tx, PTLLND_EVENTARG_TYPE_TX);
1051 md.eq_handle = plni->plni_eqh;
1053 md.options = PTLLND_MD_OPTIONS;
1054 md.threshold = (type == PTLLND_RDMA_READ) ? 2 : 1;
1056 ptllnd_set_md_buffer(&md, tx);
1058 rc = PtlMDBind(plni->plni_nih, md, LNET_UNLINK, &mdh);
1060 CERROR("PtlMDBind for %s failed: %s(%d)\n",
1061 libcfs_id2str(peer->plp_id),
1062 ptllnd_errtype2str(rc), rc);
1067 tx->tx_bulkmdh = mdh;
1068 tx->tx_lnetmsg = msg;
1070 ptllnd_set_tx_deadline(tx);
1071 list_add_tail(&tx->tx_list, &peer->plp_activeq);
1072 gettimeofday(&tx->tx_bulk_posted, NULL);
1074 if (type == PTLLND_RDMA_READ)
1075 rc = PtlGet(mdh, peer->plp_ptlid,
1076 plni->plni_portal, 0, matchbits, 0);
1078 rc = PtlPut(mdh, PTL_NOACK_REQ, peer->plp_ptlid,
1079 plni->plni_portal, 0, matchbits, 0,
1080 (msg == NULL) ? PTLLND_RDMA_FAIL : PTLLND_RDMA_OK);
1085 CERROR("Can't initiate RDMA with %s: %s(%d)\n",
1086 libcfs_id2str(peer->plp_id),
1087 ptllnd_errtype2str(rc), rc);
1089 tx->tx_lnetmsg = NULL;
1092 ptllnd_tx_done(tx); /* this will close peer */
1097 ptllnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *msg)
1099 ptllnd_ni_t *plni = ni->ni_data;
1105 LASSERT (!msg->msg_routing);
1106 LASSERT (msg->msg_kiov == NULL);
1108 LASSERT (msg->msg_niov <= PTL_MD_MAX_IOV); /* !!! */
1110 CDEBUG(D_NET, "%s [%d]+%d,%d -> %s%s\n",
1111 lnet_msgtyp2str(msg->msg_type),
1112 msg->msg_niov, msg->msg_offset, msg->msg_len,
1113 libcfs_nid2str(msg->msg_target.nid),
1114 msg->msg_target_is_router ? "(rtr)" : "");
1116 if ((msg->msg_target.pid & LNET_PID_USERFLAG) != 0) {
1117 CERROR("Can't send to non-kernel peer %s\n",
1118 libcfs_id2str(msg->msg_target));
1119 return -EHOSTUNREACH;
1122 plp = ptllnd_find_peer(ni, msg->msg_target, 1);
1126 switch (msg->msg_type) {
1131 LASSERT (msg->msg_len == 0);
1132 break; /* send IMMEDIATE */
1135 if (msg->msg_target_is_router)
1136 break; /* send IMMEDIATE */
1138 nob = msg->msg_md->md_length;
1139 nob = offsetof(kptl_msg_t, ptlm_u.immediate.kptlim_payload[nob]);
1140 if (nob <= plni->plni_max_msg_size)
1143 LASSERT ((msg->msg_md->md_options & LNET_MD_KIOV) == 0);
1144 rc = ptllnd_passive_rdma(plp, PTLLND_MSG_TYPE_GET, msg,
1145 msg->msg_md->md_niov,
1146 msg->msg_md->md_iov.iov,
1147 0, msg->msg_md->md_length);
1148 ptllnd_peer_decref(plp);
1151 case LNET_MSG_REPLY:
1154 nob = offsetof(kptl_msg_t, ptlm_u.immediate.kptlim_payload[nob]);
1155 if (nob <= plp->plp_max_msg_size)
1156 break; /* send IMMEDIATE */
1158 rc = ptllnd_passive_rdma(plp, PTLLND_MSG_TYPE_PUT, msg,
1159 msg->msg_niov, msg->msg_iov,
1160 msg->msg_offset, msg->msg_len);
1161 ptllnd_peer_decref(plp);
1166 * NB copy the payload so we don't have to do a fragmented send */
1168 tx = ptllnd_new_tx(plp, PTLLND_MSG_TYPE_IMMEDIATE, msg->msg_len);
1170 CERROR("Can't allocate tx for lnet type %d to %s\n",
1171 msg->msg_type, libcfs_id2str(msg->msg_target));
1172 ptllnd_peer_decref(plp);
1176 lnet_copy_iov2flat(tx->tx_msgsize, &tx->tx_msg,
1177 offsetof(kptl_msg_t, ptlm_u.immediate.kptlim_payload),
1178 msg->msg_niov, msg->msg_iov, msg->msg_offset,
1180 tx->tx_msg.ptlm_u.immediate.kptlim_hdr = msg->msg_hdr;
1182 tx->tx_lnetmsg = msg;
1183 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: post immediate %s p %d %p",
1184 libcfs_id2str(msg->msg_target),
1185 plp->plp_credits, plp->plp_outstanding_credits,
1186 plp->plp_sent_credits,
1187 plni->plni_peer_credits + plp->plp_lazy_credits,
1188 lnet_msgtyp2str(msg->msg_type),
1189 (le32_to_cpu(msg->msg_type) == LNET_MSG_PUT) ?
1190 le32_to_cpu(msg->msg_hdr.msg.put.ptl_index) :
1191 (le32_to_cpu(msg->msg_type) == LNET_MSG_GET) ?
1192 le32_to_cpu(msg->msg_hdr.msg.get.ptl_index) : -1,
1195 ptllnd_peer_decref(plp);
1200 ptllnd_rx_done(ptllnd_rx_t *rx)
1202 ptllnd_peer_t *plp = rx->rx_peer;
1203 lnet_ni_t *ni = plp->plp_ni;
1204 ptllnd_ni_t *plni = ni->ni_data;
1206 plp->plp_outstanding_credits++;
1208 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: rx=%p done\n",
1209 libcfs_id2str(plp->plp_id),
1210 plp->plp_credits, plp->plp_outstanding_credits,
1211 plp->plp_sent_credits,
1212 plni->plni_peer_credits + plp->plp_lazy_credits, rx);
1214 ptllnd_check_sends(rx->rx_peer);
1216 LASSERT (plni->plni_nrxs > 0);
1221 ptllnd_eager_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg,
1222 void **new_privatep)
1224 /* Shouldn't get here; recvs only block for router buffers */
1230 ptllnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg,
1231 int delayed, unsigned int niov,
1232 struct iovec *iov, lnet_kiov_t *kiov,
1233 unsigned int offset, unsigned int mlen, unsigned int rlen)
1235 ptllnd_rx_t *rx = private;
1239 LASSERT (kiov == NULL);
1240 LASSERT (niov <= PTL_MD_MAX_IOV); /* !!! */
1242 switch (rx->rx_msg->ptlm_type) {
1246 case PTLLND_MSG_TYPE_IMMEDIATE:
1247 nob = offsetof(kptl_msg_t, ptlm_u.immediate.kptlim_payload[mlen]);
1248 if (nob > rx->rx_nob) {
1249 CERROR("Immediate message from %s too big: %d(%d)\n",
1250 libcfs_id2str(rx->rx_peer->plp_id),
1255 lnet_copy_flat2iov(niov, iov, offset,
1256 rx->rx_nob, rx->rx_msg,
1257 offsetof(kptl_msg_t, ptlm_u.immediate.kptlim_payload),
1259 lnet_finalize(ni, msg, 0);
1262 case PTLLND_MSG_TYPE_PUT:
1263 rc = ptllnd_active_rdma(rx->rx_peer, PTLLND_RDMA_READ, msg,
1264 rx->rx_msg->ptlm_u.rdma.kptlrm_matchbits,
1265 niov, iov, offset, mlen);
1268 case PTLLND_MSG_TYPE_GET:
1270 rc = ptllnd_active_rdma(rx->rx_peer, PTLLND_RDMA_WRITE, msg,
1271 rx->rx_msg->ptlm_u.rdma.kptlrm_matchbits,
1272 msg->msg_niov, msg->msg_iov,
1273 msg->msg_offset, msg->msg_len);
1275 rc = ptllnd_active_rdma(rx->rx_peer, PTLLND_RDMA_WRITE, NULL,
1276 rx->rx_msg->ptlm_u.rdma.kptlrm_matchbits,
1286 ptllnd_parse_request(lnet_ni_t *ni, ptl_process_id_t initiator,
1287 kptl_msg_t *msg, unsigned int nob)
1289 ptllnd_ni_t *plni = ni->ni_data;
1290 const int basenob = offsetof(kptl_msg_t, ptlm_u);
1291 lnet_process_id_t srcid;
1300 CERROR("Very short receive from %s\n",
1301 ptllnd_ptlid2str(initiator));
1305 /* I can at least read MAGIC/VERSION */
1307 flip = msg->ptlm_magic == __swab32(PTLLND_MSG_MAGIC);
1308 if (!flip && msg->ptlm_magic != PTLLND_MSG_MAGIC) {
1309 CERROR("Bad protocol magic %08x from %s\n",
1310 msg->ptlm_magic, ptllnd_ptlid2str(initiator));
1314 msg_version = flip ? __swab16(msg->ptlm_version) : msg->ptlm_version;
1316 if (msg_version != PTLLND_MSG_VERSION) {
1317 CERROR("Bad protocol version %04x from %s: %04x expected\n",
1318 (__u32)msg_version, ptllnd_ptlid2str(initiator), PTLLND_MSG_VERSION);
1320 if (plni->plni_abort_on_protocol_mismatch)
1326 if (nob < basenob) {
1327 CERROR("Short receive from %s: got %d, wanted at least %d\n",
1328 ptllnd_ptlid2str(initiator), nob, basenob);
1332 /* checksum must be computed with
1333 * 1) ptlm_cksum zero and
1334 * 2) BEFORE anything gets modified/flipped
1336 msg_cksum = flip ? __swab32(msg->ptlm_cksum) : msg->ptlm_cksum;
1337 msg->ptlm_cksum = 0;
1338 if (msg_cksum != 0 &&
1339 msg_cksum != ptllnd_cksum(msg, offsetof(kptl_msg_t, ptlm_u))) {
1340 CERROR("Bad checksum from %s\n", ptllnd_ptlid2str(initiator));
1344 msg->ptlm_version = msg_version;
1345 msg->ptlm_cksum = msg_cksum;
1348 /* NB stamps are opaque cookies */
1349 __swab32s(&msg->ptlm_nob);
1350 __swab64s(&msg->ptlm_srcnid);
1351 __swab64s(&msg->ptlm_dstnid);
1352 __swab32s(&msg->ptlm_srcpid);
1353 __swab32s(&msg->ptlm_dstpid);
1356 srcid.nid = msg->ptlm_srcnid;
1357 srcid.pid = msg->ptlm_srcpid;
1359 if (LNET_NIDNET(msg->ptlm_srcnid) != LNET_NIDNET(ni->ni_nid)) {
1360 CERROR("Bad source id %s from %s\n",
1361 libcfs_id2str(srcid),
1362 ptllnd_ptlid2str(initiator));
1366 if (msg->ptlm_type == PTLLND_MSG_TYPE_NAK) {
1367 CERROR("NAK from %s (%s)\n",
1368 libcfs_id2str(srcid),
1369 ptllnd_ptlid2str(initiator));
1371 if (plni->plni_dump_on_nak)
1372 ptllnd_dump_debug(ni, srcid);
1374 if (plni->plni_abort_on_nak)
1380 if (msg->ptlm_dstnid != ni->ni_nid ||
1381 msg->ptlm_dstpid != the_lnet.ln_pid) {
1382 CERROR("Bad dstid %s (%s expected) from %s\n",
1383 libcfs_id2str((lnet_process_id_t) {
1384 .nid = msg->ptlm_dstnid,
1385 .pid = msg->ptlm_dstpid}),
1386 libcfs_id2str((lnet_process_id_t) {
1388 .pid = the_lnet.ln_pid}),
1389 libcfs_id2str(srcid));
1393 if (msg->ptlm_dststamp != plni->plni_stamp) {
1394 CERROR("Bad dststamp "LPX64"("LPX64" expected) from %s\n",
1395 msg->ptlm_dststamp, plni->plni_stamp,
1396 libcfs_id2str(srcid));
1400 PTLLND_HISTORY("RX %s: %s %d %p", libcfs_id2str(srcid),
1401 ptllnd_msgtype2str(msg->ptlm_type),
1402 msg->ptlm_credits, &rx);
1404 switch (msg->ptlm_type) {
1405 case PTLLND_MSG_TYPE_PUT:
1406 case PTLLND_MSG_TYPE_GET:
1407 if (nob < basenob + sizeof(kptl_rdma_msg_t)) {
1408 CERROR("Short rdma request from %s(%s)\n",
1409 libcfs_id2str(srcid),
1410 ptllnd_ptlid2str(initiator));
1414 __swab64s(&msg->ptlm_u.rdma.kptlrm_matchbits);
1417 case PTLLND_MSG_TYPE_IMMEDIATE:
1418 if (nob < offsetof(kptl_msg_t,
1419 ptlm_u.immediate.kptlim_payload)) {
1420 CERROR("Short immediate from %s(%s)\n",
1421 libcfs_id2str(srcid),
1422 ptllnd_ptlid2str(initiator));
1427 case PTLLND_MSG_TYPE_HELLO:
1428 if (nob < basenob + sizeof(kptl_hello_msg_t)) {
1429 CERROR("Short hello from %s(%s)\n",
1430 libcfs_id2str(srcid),
1431 ptllnd_ptlid2str(initiator));
1435 __swab64s(&msg->ptlm_u.hello.kptlhm_matchbits);
1436 __swab32s(&msg->ptlm_u.hello.kptlhm_max_msg_size);
1440 case PTLLND_MSG_TYPE_NOOP:
1444 CERROR("Bad message type %d from %s(%s)\n", msg->ptlm_type,
1445 libcfs_id2str(srcid),
1446 ptllnd_ptlid2str(initiator));
1450 plp = ptllnd_find_peer(ni, srcid, 0);
1452 CERROR("Can't find peer %s\n", libcfs_id2str(srcid));
1456 if (msg->ptlm_type == PTLLND_MSG_TYPE_HELLO) {
1457 if (plp->plp_recvd_hello) {
1458 CERROR("Unexpected HELLO from %s\n",
1459 libcfs_id2str(srcid));
1460 ptllnd_peer_decref(plp);
1464 plp->plp_max_msg_size = msg->ptlm_u.hello.kptlhm_max_msg_size;
1465 plp->plp_match = msg->ptlm_u.hello.kptlhm_matchbits;
1466 plp->plp_stamp = msg->ptlm_srcstamp;
1467 plp->plp_recvd_hello = 1;
1469 } else if (!plp->plp_recvd_hello) {
1471 CERROR("Bad message type %d (HELLO expected) from %s\n",
1472 msg->ptlm_type, libcfs_id2str(srcid));
1473 ptllnd_peer_decref(plp);
1476 } else if (msg->ptlm_srcstamp != plp->plp_stamp) {
1478 CERROR("Bad srcstamp "LPX64"("LPX64" expected) from %s\n",
1479 msg->ptlm_srcstamp, plp->plp_stamp,
1480 libcfs_id2str(srcid));
1481 ptllnd_peer_decref(plp);
1485 /* Check peer only sends when I've sent her credits */
1486 if (plp->plp_sent_credits == 0) {
1487 CERROR("%s[%d/%d+%d(%d)]: unexpected message\n",
1488 libcfs_id2str(plp->plp_id),
1489 plp->plp_credits, plp->plp_outstanding_credits,
1490 plp->plp_sent_credits,
1491 plni->plni_peer_credits + plp->plp_lazy_credits);
1494 plp->plp_sent_credits--;
1496 /* No check for credit overflow - the peer may post new buffers after
1497 * the startup handshake. */
1498 if (msg->ptlm_credits > 0) {
1499 plp->plp_credits += msg->ptlm_credits;
1500 ptllnd_check_sends(plp);
1503 /* All OK so far; assume the message is good... */
1510 switch (msg->ptlm_type) {
1511 default: /* message types have been checked already */
1512 ptllnd_rx_done(&rx);
1515 case PTLLND_MSG_TYPE_PUT:
1516 case PTLLND_MSG_TYPE_GET:
1517 rc = lnet_parse(ni, &msg->ptlm_u.rdma.kptlrm_hdr,
1518 msg->ptlm_srcnid, &rx, 1);
1520 ptllnd_rx_done(&rx);
1523 case PTLLND_MSG_TYPE_IMMEDIATE:
1524 rc = lnet_parse(ni, &msg->ptlm_u.immediate.kptlim_hdr,
1525 msg->ptlm_srcnid, &rx, 0);
1527 ptllnd_rx_done(&rx);
1531 ptllnd_peer_decref(plp);
1535 ptllnd_buf_event (lnet_ni_t *ni, ptl_event_t *event)
1537 ptllnd_buffer_t *buf = ptllnd_eventarg2obj(event->md.user_ptr);
1538 ptllnd_ni_t *plni = ni->ni_data;
1539 char *msg = &buf->plb_buffer[event->offset];
1541 int unlinked = event->type == PTL_EVENT_UNLINK;
1543 LASSERT (buf->plb_ni == ni);
1544 LASSERT (event->type == PTL_EVENT_PUT_END ||
1545 event->type == PTL_EVENT_UNLINK);
1547 if (event->ni_fail_type != PTL_NI_OK) {
1549 CERROR("event type %s(%d), status %s(%d) from %s\n",
1550 ptllnd_evtype2str(event->type), event->type,
1551 ptllnd_errtype2str(event->ni_fail_type),
1552 event->ni_fail_type,
1553 ptllnd_ptlid2str(event->initiator));
1555 } else if (event->type == PTL_EVENT_PUT_END) {
1556 #if (PTL_MD_LOCAL_ALIGN8 == 0)
1557 /* Portals can't force message alignment - someone sending an
1558 * odd-length message could misalign subsequent messages */
1559 if ((event->mlength & 7) != 0) {
1560 CERROR("Message from %s has odd length %llu: "
1561 "probable version incompatibility\n",
1562 ptllnd_ptlid2str(event->initiator),
1567 LASSERT ((event->offset & 7) == 0);
1569 ptllnd_parse_request(ni, event->initiator,
1570 (kptl_msg_t *)msg, event->mlength);
1573 #ifdef LUSTRE_PORTALS_UNLINK_SEMANTICS
1574 /* UNLINK event only on explicit unlink */
1575 repost = (event->unlinked && event->type != PTL_EVENT_UNLINK);
1576 if (event->unlinked)
1579 /* UNLINK event only on implicit unlink */
1580 repost = (event->type == PTL_EVENT_UNLINK);
1584 LASSERT(buf->plb_posted);
1585 buf->plb_posted = 0;
1586 plni->plni_nposted_buffers--;
1590 (void) ptllnd_post_buffer(buf);
1594 ptllnd_tx_event (lnet_ni_t *ni, ptl_event_t *event)
1596 ptllnd_ni_t *plni = ni->ni_data;
1597 ptllnd_tx_t *tx = ptllnd_eventarg2obj(event->md.user_ptr);
1598 int error = (event->ni_fail_type != PTL_NI_OK);
1601 #ifdef LUSTRE_PORTALS_UNLINK_SEMANTICS
1602 int unlinked = event->unlinked;
1604 int unlinked = (event->type == PTL_EVENT_UNLINK);
1608 CERROR("Error %s(%d) event %s(%d) unlinked %d, %s(%d) for %s\n",
1609 ptllnd_errtype2str(event->ni_fail_type),
1610 event->ni_fail_type,
1611 ptllnd_evtype2str(event->type), event->type,
1612 unlinked, ptllnd_msgtype2str(tx->tx_type), tx->tx_type,
1613 libcfs_id2str(tx->tx_peer->plp_id));
1615 LASSERT (!PtlHandleIsEqual(event->md_handle, PTL_INVALID_HANDLE));
1617 isreq = PtlHandleIsEqual(event->md_handle, tx->tx_reqmdh);
1619 LASSERT (event->md.start == (void *)&tx->tx_msg);
1621 tx->tx_reqmdh = PTL_INVALID_HANDLE;
1622 gettimeofday(&tx->tx_req_done, NULL);
1626 isbulk = PtlHandleIsEqual(event->md_handle, tx->tx_bulkmdh);
1627 if ( isbulk && unlinked ) {
1628 tx->tx_bulkmdh = PTL_INVALID_HANDLE;
1629 gettimeofday(&tx->tx_bulk_done, NULL);
1632 LASSERT (!isreq != !isbulk); /* always one and only 1 match */
1634 PTLLND_HISTORY("%s[%d/%d+%d(%d)]: TX done %p %s%s",
1635 libcfs_id2str(tx->tx_peer->plp_id),
1636 tx->tx_peer->plp_credits,
1637 tx->tx_peer->plp_outstanding_credits,
1638 tx->tx_peer->plp_sent_credits,
1639 plni->plni_peer_credits + tx->tx_peer->plp_lazy_credits,
1640 tx, isreq ? "REQ" : "BULK", unlinked ? "(unlinked)" : "");
1642 LASSERT (!isreq != !isbulk); /* always one and only 1 match */
1643 switch (tx->tx_type) {
1647 case PTLLND_MSG_TYPE_NOOP:
1648 case PTLLND_MSG_TYPE_HELLO:
1649 case PTLLND_MSG_TYPE_IMMEDIATE:
1650 LASSERT (event->type == PTL_EVENT_UNLINK ||
1651 event->type == PTL_EVENT_SEND_END);
1655 case PTLLND_MSG_TYPE_GET:
1656 LASSERT (event->type == PTL_EVENT_UNLINK ||
1657 (isreq && event->type == PTL_EVENT_SEND_END) ||
1658 (isbulk && event->type == PTL_EVENT_PUT_END));
1660 if (isbulk && !error && event->type == PTL_EVENT_PUT_END) {
1661 /* Check GET matched */
1662 if (event->hdr_data == PTLLND_RDMA_OK) {
1663 lnet_set_reply_msg_len(ni,
1664 tx->tx_lnetreplymsg,
1667 CERROR ("Unmatched GET with %s\n",
1668 libcfs_id2str(tx->tx_peer->plp_id));
1669 tx->tx_status = -EIO;
1674 case PTLLND_MSG_TYPE_PUT:
1675 LASSERT (event->type == PTL_EVENT_UNLINK ||
1676 (isreq && event->type == PTL_EVENT_SEND_END) ||
1677 (isbulk && event->type == PTL_EVENT_GET_END));
1680 case PTLLND_RDMA_READ:
1681 LASSERT (event->type == PTL_EVENT_UNLINK ||
1682 event->type == PTL_EVENT_SEND_END ||
1683 event->type == PTL_EVENT_REPLY_END);
1687 case PTLLND_RDMA_WRITE:
1688 LASSERT (event->type == PTL_EVENT_UNLINK ||
1689 event->type == PTL_EVENT_SEND_END);
1693 /* Schedule ptllnd_tx_done() on error or last completion event */
1695 (PtlHandleIsEqual(tx->tx_bulkmdh, PTL_INVALID_HANDLE) &&
1696 PtlHandleIsEqual(tx->tx_reqmdh, PTL_INVALID_HANDLE))) {
1698 tx->tx_status = -EIO;
1699 list_del(&tx->tx_list);
1700 list_add_tail(&tx->tx_list, &plni->plni_zombie_txs);
1705 ptllnd_find_timed_out_tx(ptllnd_peer_t *peer)
1707 time_t now = cfs_time_current_sec();
1708 struct list_head *tmp;
1710 list_for_each(tmp, &peer->plp_txq) {
1711 ptllnd_tx_t *tx = list_entry(tmp, ptllnd_tx_t, tx_list);
1713 if (tx->tx_deadline < now)
1717 list_for_each(tmp, &peer->plp_activeq) {
1718 ptllnd_tx_t *tx = list_entry(tmp, ptllnd_tx_t, tx_list);
1720 if (tx->tx_deadline < now)
1728 ptllnd_check_peer(ptllnd_peer_t *peer)
1730 ptllnd_tx_t *tx = ptllnd_find_timed_out_tx(peer);
1735 CERROR("%s: timed out\n", libcfs_id2str(peer->plp_id));
1736 ptllnd_close_peer(peer, -ETIMEDOUT);
1740 ptllnd_watchdog (lnet_ni_t *ni, time_t now)
1742 ptllnd_ni_t *plni = ni->ni_data;
1744 int p = plni->plni_watchdog_interval;
1745 int chunk = plni->plni_peer_hash_size;
1746 int interval = now - (plni->plni_watchdog_nextt - p);
1748 struct list_head *hashlist;
1749 struct list_head *tmp;
1750 struct list_head *nxt;
1752 /* Time to check for RDMA timeouts on a few more peers:
1753 * I try to do checks every 'p' seconds on a proportion of the peer
1754 * table and I need to check every connection 'n' times within a
1755 * timeout interval, to ensure I detect a timeout on any connection
1756 * within (n+1)/n times the timeout interval. */
1758 LASSERT (now >= plni->plni_watchdog_nextt);
1760 if (plni->plni_timeout > n * interval) { /* Scan less than the whole table? */
1761 chunk = (chunk * n * interval) / plni->plni_timeout;
1766 for (i = 0; i < chunk; i++) {
1767 hashlist = &plni->plni_peer_hash[plni->plni_watchdog_peeridx];
1769 list_for_each_safe(tmp, nxt, hashlist) {
1770 ptllnd_check_peer(list_entry(tmp, ptllnd_peer_t, plp_list));
1773 plni->plni_watchdog_peeridx = (plni->plni_watchdog_peeridx + 1) %
1774 plni->plni_peer_hash_size;
1777 plni->plni_watchdog_nextt = now + p;
1781 ptllnd_wait (lnet_ni_t *ni, int milliseconds)
1783 static struct timeval prevt;
1784 static int prevt_count;
1785 static int call_count;
1787 struct timeval start;
1788 struct timeval then;
1790 struct timeval deadline;
1792 ptllnd_ni_t *plni = ni->ni_data;
1800 /* Handle any currently queued events, returning immediately if any.
1801 * Otherwise block for the timeout and handle all events queued
1804 gettimeofday(&start, NULL);
1807 if (milliseconds <= 0) {
1810 deadline.tv_sec = start.tv_sec + milliseconds/1000;
1811 deadline.tv_usec = start.tv_usec + (milliseconds % 1000)*1000;
1813 if (deadline.tv_usec >= 1000000) {
1814 start.tv_usec -= 1000000;
1820 gettimeofday(&then, NULL);
1822 rc = PtlEQPoll(&plni->plni_eqh, 1, timeout, &event, &which);
1824 gettimeofday(&now, NULL);
1826 if ((now.tv_sec*1000 + now.tv_usec/1000) -
1827 (then.tv_sec*1000 + then.tv_usec/1000) > timeout + 1000) {
1828 /* 1000 mS grace...........................^ */
1829 CERROR("SLOW PtlEQPoll(%d): %dmS elapsed\n", timeout,
1830 (int)(now.tv_sec*1000 + now.tv_usec/1000) -
1831 (int)(then.tv_sec*1000 + then.tv_usec/1000));
1834 if (rc == PTL_EQ_EMPTY) {
1835 if (found) /* handled some events */
1838 if (now.tv_sec >= plni->plni_watchdog_nextt) { /* check timeouts? */
1839 ptllnd_watchdog(ni, now.tv_sec);
1840 LASSERT (now.tv_sec < plni->plni_watchdog_nextt);
1843 if (now.tv_sec > deadline.tv_sec || /* timeout expired */
1844 (now.tv_sec == deadline.tv_sec &&
1845 now.tv_usec >= deadline.tv_usec))
1848 if (milliseconds < 0 ||
1849 plni->plni_watchdog_nextt <= deadline.tv_sec) {
1850 timeout = (plni->plni_watchdog_nextt - now.tv_sec)*1000;
1852 timeout = (deadline.tv_sec - now.tv_sec)*1000 +
1853 (deadline.tv_usec - now.tv_usec)/1000;
1859 LASSERT (rc == PTL_OK || rc == PTL_EQ_DROPPED);
1861 if (rc == PTL_EQ_DROPPED)
1862 CERROR("Event queue: size %d is too small\n",
1863 plni->plni_eq_size);
1868 switch (ptllnd_eventarg2type(event.md.user_ptr)) {
1872 case PTLLND_EVENTARG_TYPE_TX:
1873 ptllnd_tx_event(ni, &event);
1876 case PTLLND_EVENTARG_TYPE_BUF:
1877 ptllnd_buf_event(ni, &event);
1882 while (!list_empty(&plni->plni_zombie_txs)) {
1883 tx = list_entry(plni->plni_zombie_txs.next,
1884 ptllnd_tx_t, tx_list);
1885 list_del_init(&tx->tx_list);
1889 if (prevt.tv_sec == 0 ||
1890 prevt.tv_sec != now.tv_sec) {
1891 PTLLND_HISTORY("%d wait entered at %d.%06d - prev %d %d.%06d",
1892 call_count, (int)start.tv_sec, (int)start.tv_usec,
1893 prevt_count, (int)prevt.tv_sec, (int)prevt.tv_usec);