1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
6 * Author: Eric Barton <eric@bartonsoftware.com>
8 * This file is part of Portals, http://www.lustre.org
10 * Portals is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Portals is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Portals; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 kqswnal_notify_peer_down(kqswnal_tx_t *ktx)
31 then = cfs_time_current_sec() -
32 cfs_duration_sec(cfs_time_current() -
35 lnet_notify(kqswnal_data.kqn_ni, ktx->ktx_nid, 0, then);
39 kqswnal_unmap_tx (kqswnal_tx_t *ktx)
43 ktx->ktx_rail = -1; /* unset rail */
45 if (ktx->ktx_nmappedpages == 0)
48 CDEBUG(D_NET, "%p unloading %d frags starting at %d\n",
49 ktx, ktx->ktx_nfrag, ktx->ktx_firsttmpfrag);
51 for (i = ktx->ktx_firsttmpfrag; i < ktx->ktx_nfrag; i++)
52 ep_dvma_unload(kqswnal_data.kqn_ep,
53 kqswnal_data.kqn_ep_tx_nmh,
56 ktx->ktx_nmappedpages = 0;
60 kqswnal_map_tx_kiov (kqswnal_tx_t *ktx, int offset, int nob,
61 unsigned int niov, lnet_kiov_t *kiov)
63 int nfrags = ktx->ktx_nfrag;
64 int nmapped = ktx->ktx_nmappedpages;
65 int maxmapped = ktx->ktx_npages;
66 __u32 basepage = ktx->ktx_basepage + nmapped;
72 if (ktx->ktx_rail < 0)
73 ktx->ktx_rail = ep_xmtr_prefrail(kqswnal_data.kqn_eptx,
75 kqswnal_nid2elanid(ktx->ktx_nid));
78 CERROR("No rails available for %s\n", libcfs_nid2str(ktx->ktx_nid));
83 LASSERT (nmapped <= maxmapped);
84 LASSERT (nfrags >= ktx->ktx_firsttmpfrag);
85 LASSERT (nfrags <= EP_MAXFRAG);
89 /* skip complete frags before 'offset' */
90 while (offset >= kiov->kiov_len) {
91 offset -= kiov->kiov_len;
98 int fraglen = kiov->kiov_len - offset;
100 /* each page frag is contained in one page */
101 LASSERT (kiov->kiov_offset + kiov->kiov_len <= PAGE_SIZE);
107 if (nmapped > maxmapped) {
108 CERROR("Can't map message in %d pages (max %d)\n",
113 if (nfrags == EP_MAXFRAG) {
114 CERROR("Message too fragmented in Elan VM (max %d frags)\n",
119 /* XXX this is really crap, but we'll have to kmap until
120 * EKC has a page (rather than vaddr) mapping interface */
122 ptr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset + offset;
125 "%p[%d] loading %p for %d, page %d, %d total\n",
126 ktx, nfrags, ptr, fraglen, basepage, nmapped);
128 ep_dvma_load(kqswnal_data.kqn_ep, NULL,
130 kqswnal_data.kqn_ep_tx_nmh, basepage,
131 &railmask, &ktx->ktx_frags[nfrags]);
133 if (nfrags == ktx->ktx_firsttmpfrag ||
134 !ep_nmd_merge(&ktx->ktx_frags[nfrags - 1],
135 &ktx->ktx_frags[nfrags - 1],
136 &ktx->ktx_frags[nfrags])) {
137 /* new frag if this is the first or can't merge */
141 kunmap (kiov->kiov_page);
143 /* keep in loop for failure case */
144 ktx->ktx_nmappedpages = nmapped;
152 /* iov must not run out before end of data */
153 LASSERT (nob == 0 || niov > 0);
157 ktx->ktx_nfrag = nfrags;
158 CDEBUG (D_NET, "%p got %d frags over %d pages\n",
159 ktx, ktx->ktx_nfrag, ktx->ktx_nmappedpages);
166 kqswnal_csum_kiov (__u32 csum, int offset, int nob,
167 unsigned int niov, lnet_kiov_t *kiov)
177 /* skip complete frags before 'offset' */
178 while (offset >= kiov->kiov_len) {
179 offset -= kiov->kiov_len;
186 int fraglen = kiov->kiov_len - offset;
188 /* each page frag is contained in one page */
189 LASSERT (kiov->kiov_offset + kiov->kiov_len <= PAGE_SIZE);
194 ptr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset + offset;
196 csum = kqswnal_csum(csum, ptr, fraglen);
198 kunmap (kiov->kiov_page);
205 /* iov must not run out before end of data */
206 LASSERT (nob == 0 || niov > 0);
215 kqswnal_map_tx_iov (kqswnal_tx_t *ktx, int offset, int nob,
216 unsigned int niov, struct iovec *iov)
218 int nfrags = ktx->ktx_nfrag;
219 int nmapped = ktx->ktx_nmappedpages;
220 int maxmapped = ktx->ktx_npages;
221 __u32 basepage = ktx->ktx_basepage + nmapped;
223 EP_RAILMASK railmask;
226 if (ktx->ktx_rail < 0)
227 ktx->ktx_rail = ep_xmtr_prefrail(kqswnal_data.kqn_eptx,
229 kqswnal_nid2elanid(ktx->ktx_nid));
230 rail = ktx->ktx_rail;
232 CERROR("No rails available for %s\n", libcfs_nid2str(ktx->ktx_nid));
235 railmask = 1 << rail;
237 LASSERT (nmapped <= maxmapped);
238 LASSERT (nfrags >= ktx->ktx_firsttmpfrag);
239 LASSERT (nfrags <= EP_MAXFRAG);
243 /* skip complete frags before offset */
244 while (offset >= iov->iov_len) {
245 offset -= iov->iov_len;
252 int fraglen = iov->iov_len - offset;
257 npages = kqswnal_pages_spanned (iov->iov_base, fraglen);
260 if (nmapped > maxmapped) {
261 CERROR("Can't map message in %d pages (max %d)\n",
266 if (nfrags == EP_MAXFRAG) {
267 CERROR("Message too fragmented in Elan VM (max %d frags)\n",
273 "%p[%d] loading %p for %d, pages %d for %ld, %d total\n",
274 ktx, nfrags, iov->iov_base + offset, fraglen,
275 basepage, npages, nmapped);
277 ep_dvma_load(kqswnal_data.kqn_ep, NULL,
278 iov->iov_base + offset, fraglen,
279 kqswnal_data.kqn_ep_tx_nmh, basepage,
280 &railmask, &ktx->ktx_frags[nfrags]);
282 if (nfrags == ktx->ktx_firsttmpfrag ||
283 !ep_nmd_merge(&ktx->ktx_frags[nfrags - 1],
284 &ktx->ktx_frags[nfrags - 1],
285 &ktx->ktx_frags[nfrags])) {
286 /* new frag if this is the first or can't merge */
290 /* keep in loop for failure case */
291 ktx->ktx_nmappedpages = nmapped;
299 /* iov must not run out before end of data */
300 LASSERT (nob == 0 || niov > 0);
304 ktx->ktx_nfrag = nfrags;
305 CDEBUG (D_NET, "%p got %d frags over %d pages\n",
306 ktx, ktx->ktx_nfrag, ktx->ktx_nmappedpages);
313 kqswnal_csum_iov (__u32 csum, int offset, int nob,
314 unsigned int niov, struct iovec *iov)
322 /* skip complete frags before offset */
323 while (offset >= iov->iov_len) {
324 offset -= iov->iov_len;
331 int fraglen = iov->iov_len - offset;
336 csum = kqswnal_csum(csum, iov->iov_base + offset, fraglen);
343 /* iov must not run out before end of data */
344 LASSERT (nob == 0 || niov > 0);
353 kqswnal_put_idle_tx (kqswnal_tx_t *ktx)
357 kqswnal_unmap_tx (ktx); /* release temporary mappings */
358 ktx->ktx_state = KTX_IDLE;
360 spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
362 list_del (&ktx->ktx_list); /* take off active list */
363 list_add (&ktx->ktx_list, &kqswnal_data.kqn_idletxds);
365 spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
369 kqswnal_get_idle_tx (void)
374 spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
376 if (kqswnal_data.kqn_shuttingdown ||
377 list_empty (&kqswnal_data.kqn_idletxds)) {
378 spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
383 ktx = list_entry (kqswnal_data.kqn_idletxds.next, kqswnal_tx_t, ktx_list);
384 list_del (&ktx->ktx_list);
386 list_add (&ktx->ktx_list, &kqswnal_data.kqn_activetxds);
387 ktx->ktx_launcher = current->pid;
388 atomic_inc(&kqswnal_data.kqn_pending_txs);
390 spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
392 /* Idle descs can't have any mapped (as opposed to pre-mapped) pages */
393 LASSERT (ktx->ktx_nmappedpages == 0);
398 kqswnal_tx_done_in_thread_context (kqswnal_tx_t *ktx)
400 lnet_msg_t *lnetmsg0 = NULL;
401 lnet_msg_t *lnetmsg1 = NULL;
406 LASSERT (!in_interrupt());
408 if (ktx->ktx_status == -EHOSTDOWN)
409 kqswnal_notify_peer_down(ktx);
411 switch (ktx->ktx_state) {
412 case KTX_RDMA_FETCH: /* optimized PUT/REPLY handled */
413 krx = (kqswnal_rx_t *)ktx->ktx_args[0];
414 lnetmsg0 = (lnet_msg_t *)ktx->ktx_args[1];
415 status0 = ktx->ktx_status;
417 if (status0 == 0) { /* RDMA succeeded */
421 msg = (kqswnal_msg_t *)
422 page_address(krx->krx_kiov[0].kiov_page);
424 csum = (lnetmsg0->msg_kiov != NULL) ?
425 kqswnal_csum_kiov(krx->krx_cksum,
426 lnetmsg0->msg_offset,
427 lnetmsg0->msg_wanted,
429 lnetmsg0->msg_kiov) :
430 kqswnal_csum_iov(krx->krx_cksum,
431 lnetmsg0->msg_offset,
432 lnetmsg0->msg_wanted,
436 /* Can only check csum if I got it all */
437 if (lnetmsg0->msg_wanted == lnetmsg0->msg_len &&
438 csum != msg->kqm_cksum) {
439 ktx->ktx_status = -EIO;
440 krx->krx_rpc_reply.msg.status = -EIO;
441 CERROR("RDMA checksum failed %u(%u) from %s\n",
442 csum, msg->kqm_cksum,
443 libcfs_nid2str(kqswnal_rx_nid(krx)));
447 LASSERT (krx->krx_state == KRX_COMPLETING);
448 kqswnal_rx_decref (krx);
451 case KTX_RDMA_STORE: /* optimized GET handled */
452 case KTX_PUTTING: /* optimized PUT sent */
453 case KTX_SENDING: /* normal send */
454 lnetmsg0 = (lnet_msg_t *)ktx->ktx_args[1];
455 status0 = ktx->ktx_status;
458 case KTX_GETTING: /* optimized GET sent & payload received */
459 /* Complete the GET with success since we can't avoid
460 * delivering a REPLY event; we committed to it when we
461 * launched the GET */
462 lnetmsg0 = (lnet_msg_t *)ktx->ktx_args[1];
464 lnetmsg1 = (lnet_msg_t *)ktx->ktx_args[2];
465 status1 = ktx->ktx_status;
467 if (status1 == 0) { /* RDMA succeeded */
468 lnet_msg_t *lnetmsg0 = (lnet_msg_t *)ktx->ktx_args[1];
469 lnet_libmd_t *md = lnetmsg0->msg_md;
472 csum = ((md->md_options & LNET_MD_KIOV) != 0) ?
473 kqswnal_csum_kiov(~0, 0,
477 kqswnal_csum_iov(~0, 0,
482 if (csum != ktx->ktx_cksum) {
483 CERROR("RDMA checksum failed %u(%u) from %s\n",
484 csum, ktx->ktx_cksum,
485 libcfs_nid2str(ktx->ktx_nid));
496 kqswnal_put_idle_tx (ktx);
498 lnet_finalize (kqswnal_data.kqn_ni, lnetmsg0, status0);
499 if (lnetmsg1 != NULL)
500 lnet_finalize (kqswnal_data.kqn_ni, lnetmsg1, status1);
504 kqswnal_tx_done (kqswnal_tx_t *ktx, int status)
508 ktx->ktx_status = status;
510 if (!in_interrupt()) {
511 kqswnal_tx_done_in_thread_context(ktx);
515 /* Complete the send in thread context */
516 spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
518 list_add_tail(&ktx->ktx_schedlist,
519 &kqswnal_data.kqn_donetxds);
520 wake_up(&kqswnal_data.kqn_sched_waitq);
522 spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
526 kqswnal_txhandler(EP_TXD *txd, void *arg, int status)
528 kqswnal_tx_t *ktx = (kqswnal_tx_t *)arg;
529 kqswnal_rpc_reply_t *reply;
531 LASSERT (txd != NULL);
532 LASSERT (ktx != NULL);
534 CDEBUG(D_NET, "txd %p, arg %p status %d\n", txd, arg, status);
536 if (status != EP_SUCCESS) {
538 CDEBUG (D_NETERROR, "Tx completion to %s failed: %d\n",
539 libcfs_nid2str(ktx->ktx_nid), status);
543 } else switch (ktx->ktx_state) {
548 reply = (kqswnal_rpc_reply_t *)ep_txd_statusblk(txd);
549 if (reply->msg.magic == 0) { /* "old" peer */
550 status = reply->msg.status;
554 if (reply->msg.magic != LNET_PROTO_QSW_MAGIC) {
555 if (reply->msg.magic != swab32(LNET_PROTO_QSW_MAGIC)) {
556 CERROR("%s unexpected rpc reply magic %08x\n",
557 libcfs_nid2str(ktx->ktx_nid),
563 __swab32s(&reply->msg.status);
564 __swab32s(&reply->msg.version);
566 if (ktx->ktx_state == KTX_GETTING) {
567 __swab32s(&reply->msg.u.get.len);
568 __swab32s(&reply->msg.u.get.cksum);
572 status = reply->msg.status;
574 CERROR("%s RPC status %08x\n",
575 libcfs_nid2str(ktx->ktx_nid), status);
579 if (ktx->ktx_state == KTX_GETTING) {
580 lnet_set_reply_msg_len(kqswnal_data.kqn_ni,
581 (lnet_msg_t *)ktx->ktx_args[2],
582 reply->msg.u.get.len);
584 ktx->ktx_cksum = reply->msg.u.get.cksum;
598 kqswnal_tx_done(ktx, status);
602 kqswnal_launch (kqswnal_tx_t *ktx)
604 /* Don't block for transmit descriptor if we're in interrupt context */
605 int attr = in_interrupt() ? (EP_NO_SLEEP | EP_NO_ALLOC) : 0;
606 int dest = kqswnal_nid2elanid (ktx->ktx_nid);
610 ktx->ktx_launchtime = cfs_time_current();
612 if (kqswnal_data.kqn_shuttingdown)
615 LASSERT (dest >= 0); /* must be a peer */
617 if (ktx->ktx_nmappedpages != 0)
618 attr = EP_SET_PREFRAIL(attr, ktx->ktx_rail);
620 switch (ktx->ktx_state) {
623 if (the_lnet.ln_testprotocompat != 0) {
624 kqswnal_msg_t *msg = (kqswnal_msg_t *)ktx->ktx_buffer;
626 /* single-shot proto test:
627 * Future version queries will use an RPC, so I'll
628 * co-opt one of the existing ones */
630 if ((the_lnet.ln_testprotocompat & 1) != 0) {
632 the_lnet.ln_testprotocompat &= ~1;
634 if ((the_lnet.ln_testprotocompat & 2) != 0) {
635 msg->kqm_magic = LNET_PROTO_MAGIC;
636 the_lnet.ln_testprotocompat &= ~2;
641 /* NB ktx_frag[0] is the GET/PUT hdr + kqswnal_remotemd_t.
642 * The other frags are the payload, awaiting RDMA */
643 rc = ep_transmit_rpc(kqswnal_data.kqn_eptx, dest,
645 kqswnal_txhandler, ktx,
646 NULL, ktx->ktx_frags, 1);
650 rc = ep_transmit_message(kqswnal_data.kqn_eptx, dest,
652 kqswnal_txhandler, ktx,
653 NULL, ktx->ktx_frags, ktx->ktx_nfrag);
658 rc = -EINVAL; /* no compiler warning please */
663 case EP_SUCCESS: /* success */
666 case EP_ENOMEM: /* can't allocate ep txd => queue for later */
667 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
669 list_add_tail (&ktx->ktx_schedlist, &kqswnal_data.kqn_delayedtxds);
670 wake_up (&kqswnal_data.kqn_sched_waitq);
672 spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
675 default: /* fatal error */
676 CDEBUG (D_NETERROR, "Tx to %s failed: %d\n", libcfs_nid2str(ktx->ktx_nid), rc);
677 kqswnal_notify_peer_down(ktx);
678 return (-EHOSTUNREACH);
684 hdr_type_string (lnet_hdr_t *hdr)
696 return ("<UNKNOWN>");
701 kqswnal_cerror_hdr(lnet_hdr_t * hdr)
703 char *type_str = hdr_type_string (hdr);
705 CERROR("P3 Header at %p of type %s length %d\n", hdr, type_str,
706 le32_to_cpu(hdr->payload_length));
707 CERROR(" From nid/pid "LPU64"/%u\n", le64_to_cpu(hdr->src_nid),
708 le32_to_cpu(hdr->src_pid));
709 CERROR(" To nid/pid "LPU64"/%u\n", le64_to_cpu(hdr->dest_nid),
710 le32_to_cpu(hdr->dest_pid));
712 switch (le32_to_cpu(hdr->type)) {
714 CERROR(" Ptl index %d, ack md "LPX64"."LPX64", "
715 "match bits "LPX64"\n",
716 le32_to_cpu(hdr->msg.put.ptl_index),
717 hdr->msg.put.ack_wmd.wh_interface_cookie,
718 hdr->msg.put.ack_wmd.wh_object_cookie,
719 le64_to_cpu(hdr->msg.put.match_bits));
720 CERROR(" offset %d, hdr data "LPX64"\n",
721 le32_to_cpu(hdr->msg.put.offset),
722 hdr->msg.put.hdr_data);
726 CERROR(" Ptl index %d, return md "LPX64"."LPX64", "
727 "match bits "LPX64"\n",
728 le32_to_cpu(hdr->msg.get.ptl_index),
729 hdr->msg.get.return_wmd.wh_interface_cookie,
730 hdr->msg.get.return_wmd.wh_object_cookie,
731 hdr->msg.get.match_bits);
732 CERROR(" Length %d, src offset %d\n",
733 le32_to_cpu(hdr->msg.get.sink_length),
734 le32_to_cpu(hdr->msg.get.src_offset));
738 CERROR(" dst md "LPX64"."LPX64", manipulated length %d\n",
739 hdr->msg.ack.dst_wmd.wh_interface_cookie,
740 hdr->msg.ack.dst_wmd.wh_object_cookie,
741 le32_to_cpu(hdr->msg.ack.mlength));
745 CERROR(" dst md "LPX64"."LPX64"\n",
746 hdr->msg.reply.dst_wmd.wh_interface_cookie,
747 hdr->msg.reply.dst_wmd.wh_object_cookie);
750 } /* end of print_hdr() */
754 kqswnal_check_rdma (int nlfrag, EP_NMD *lfrag,
755 int nrfrag, EP_NMD *rfrag)
759 if (nlfrag != nrfrag) {
760 CERROR("Can't cope with unequal # frags: %d local %d remote\n",
765 for (i = 0; i < nlfrag; i++)
766 if (lfrag[i].nmd_len != rfrag[i].nmd_len) {
767 CERROR("Can't cope with unequal frags %d(%d):"
768 " %d local %d remote\n",
769 i, nlfrag, lfrag[i].nmd_len, rfrag[i].nmd_len);
777 kqswnal_get_portalscompat_rmd (kqswnal_rx_t *krx)
779 /* Check that the RMD sent after the "raw" LNET header in a
780 * portals-compatible QSWLND message is OK */
781 char *buffer = (char *)page_address(krx->krx_kiov[0].kiov_page);
782 kqswnal_remotemd_t *rmd = (kqswnal_remotemd_t *)(buffer + sizeof(lnet_hdr_t));
784 /* Note RDMA addresses are sent in native endian-ness in the "old"
785 * portals protocol so no swabbing... */
787 if (buffer + krx->krx_nob < (char *)(rmd + 1)) {
788 /* msg too small to discover rmd size */
789 CERROR ("Incoming message [%d] too small for RMD (%d needed)\n",
790 krx->krx_nob, (int)(((char *)(rmd + 1)) - buffer));
794 if (buffer + krx->krx_nob < (char *)&rmd->kqrmd_frag[rmd->kqrmd_nfrag]) {
795 /* rmd doesn't fit in the incoming message */
796 CERROR ("Incoming message [%d] too small for RMD[%d] (%d needed)\n",
797 krx->krx_nob, rmd->kqrmd_nfrag,
798 (int)(((char *)&rmd->kqrmd_frag[rmd->kqrmd_nfrag]) - buffer));
806 kqswnal_rdma_store_complete (EP_RXD *rxd)
808 int status = ep_rxd_status(rxd);
809 kqswnal_tx_t *ktx = (kqswnal_tx_t *)ep_rxd_arg(rxd);
810 kqswnal_rx_t *krx = (kqswnal_rx_t *)ktx->ktx_args[0];
812 CDEBUG((status == EP_SUCCESS) ? D_NET : D_ERROR,
813 "rxd %p, ktx %p, status %d\n", rxd, ktx, status);
815 LASSERT (ktx->ktx_state == KTX_RDMA_STORE);
816 LASSERT (krx->krx_rxd == rxd);
817 LASSERT (krx->krx_rpc_reply_needed);
819 krx->krx_rpc_reply_needed = 0;
820 kqswnal_rx_decref (krx);
822 /* free ktx & finalize() its lnet_msg_t */
823 kqswnal_tx_done(ktx, (status == EP_SUCCESS) ? 0 : -ECONNABORTED);
827 kqswnal_rdma_fetch_complete (EP_RXD *rxd)
829 /* Completed fetching the PUT/REPLY data */
830 int status = ep_rxd_status(rxd);
831 kqswnal_tx_t *ktx = (kqswnal_tx_t *)ep_rxd_arg(rxd);
832 kqswnal_rx_t *krx = (kqswnal_rx_t *)ktx->ktx_args[0];
834 CDEBUG((status == EP_SUCCESS) ? D_NET : D_ERROR,
835 "rxd %p, ktx %p, status %d\n", rxd, ktx, status);
837 LASSERT (ktx->ktx_state == KTX_RDMA_FETCH);
838 LASSERT (krx->krx_rxd == rxd);
839 /* RPC completes with failure by default */
840 LASSERT (krx->krx_rpc_reply_needed);
841 LASSERT (krx->krx_rpc_reply.msg.status != 0);
843 if (status == EP_SUCCESS) {
844 krx->krx_rpc_reply.msg.status = 0;
847 /* Abandon RPC since get failed */
848 krx->krx_rpc_reply_needed = 0;
849 status = -ECONNABORTED;
852 /* krx gets decref'd in kqswnal_tx_done_in_thread_context() */
853 LASSERT (krx->krx_state == KRX_PARSE);
854 krx->krx_state = KRX_COMPLETING;
856 /* free ktx & finalize() its lnet_msg_t */
857 kqswnal_tx_done(ktx, status);
861 kqswnal_rdma (kqswnal_rx_t *krx, lnet_msg_t *lntmsg,
862 int type, kqswnal_remotemd_t *rmd,
863 unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
864 unsigned int offset, unsigned int len)
870 /* Not both mapped and paged payload */
871 LASSERT (iov == NULL || kiov == NULL);
872 /* RPC completes with failure by default */
873 LASSERT (krx->krx_rpc_reply_needed);
874 LASSERT (krx->krx_rpc_reply.msg.status != 0);
877 /* data got truncated to nothing. */
878 lnet_finalize(kqswnal_data.kqn_ni, lntmsg, 0);
879 /* Let kqswnal_rx_done() complete the RPC with success */
880 krx->krx_rpc_reply.msg.status = 0;
884 /* NB I'm using 'ktx' just to map the local RDMA buffers; I'm not
885 actually sending a portals message with it */
886 ktx = kqswnal_get_idle_tx();
888 CERROR ("Can't get txd for RDMA with %s\n",
889 libcfs_nid2str(kqswnal_rx_nid(krx)));
893 ktx->ktx_state = type;
894 ktx->ktx_nid = kqswnal_rx_nid(krx);
895 ktx->ktx_args[0] = krx;
896 ktx->ktx_args[1] = lntmsg;
898 LASSERT (atomic_read(&krx->krx_refcount) > 0);
899 /* Take an extra ref for the completion callback */
900 atomic_inc(&krx->krx_refcount);
902 /* Map on the rail the RPC prefers */
903 ktx->ktx_rail = ep_rcvr_prefrail(krx->krx_eprx,
904 ep_rxd_railmask(krx->krx_rxd));
906 /* Start mapping at offset 0 (we're not mapping any headers) */
907 ktx->ktx_nfrag = ktx->ktx_firsttmpfrag = 0;
910 rc = kqswnal_map_tx_kiov(ktx, offset, len, niov, kiov);
912 rc = kqswnal_map_tx_iov(ktx, offset, len, niov, iov);
915 CERROR ("Can't map local RDMA data: %d\n", rc);
919 rc = kqswnal_check_rdma (ktx->ktx_nfrag, ktx->ktx_frags,
920 rmd->kqrmd_nfrag, rmd->kqrmd_frag);
922 CERROR ("Incompatible RDMA descriptors\n");
931 krx->krx_rpc_reply.msg.status = 0;
932 krx->krx_rpc_reply.msg.magic = LNET_PROTO_QSW_MAGIC;
933 krx->krx_rpc_reply.msg.version = QSWLND_PROTO_VERSION;
934 krx->krx_rpc_reply.msg.u.get.len = len;
936 krx->krx_rpc_reply.msg.u.get.cksum = (kiov != NULL) ?
937 kqswnal_csum_kiov(~0, offset, len, niov, kiov) :
938 kqswnal_csum_iov(~0, offset, len, niov, iov);
939 if (*kqswnal_tunables.kqn_inject_csum_error == 4) {
940 krx->krx_rpc_reply.msg.u.get.cksum++;
941 *kqswnal_tunables.kqn_inject_csum_error = 0;
944 eprc = ep_complete_rpc(krx->krx_rxd,
945 kqswnal_rdma_store_complete, ktx,
946 &krx->krx_rpc_reply.ep_statusblk,
947 ktx->ktx_frags, rmd->kqrmd_frag,
949 if (eprc != EP_SUCCESS) {
950 CERROR("can't complete RPC: %d\n", eprc);
951 /* don't re-attempt RPC completion */
952 krx->krx_rpc_reply_needed = 0;
958 eprc = ep_rpc_get (krx->krx_rxd,
959 kqswnal_rdma_fetch_complete, ktx,
960 rmd->kqrmd_frag, ktx->ktx_frags, ktx->ktx_nfrag);
961 if (eprc != EP_SUCCESS) {
962 CERROR("ep_rpc_get failed: %d\n", eprc);
963 /* Don't attempt RPC completion:
964 * EKC nuked it when the get failed */
965 krx->krx_rpc_reply_needed = 0;
973 kqswnal_rx_decref(krx); /* drop callback's ref */
974 kqswnal_put_idle_tx (ktx);
977 atomic_dec(&kqswnal_data.kqn_pending_txs);
982 kqswnal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
984 lnet_hdr_t *hdr = &lntmsg->msg_hdr;
985 int type = lntmsg->msg_type;
986 lnet_process_id_t target = lntmsg->msg_target;
987 int target_is_router = lntmsg->msg_target_is_router;
988 int routing = lntmsg->msg_routing;
989 unsigned int payload_niov = lntmsg->msg_niov;
990 struct iovec *payload_iov = lntmsg->msg_iov;
991 lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
992 unsigned int payload_offset = lntmsg->msg_offset;
993 unsigned int payload_nob = lntmsg->msg_len;
998 /* NB 1. hdr is in network byte order */
999 /* 2. 'private' depends on the message type */
1001 CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
1002 payload_nob, payload_niov, libcfs_id2str(target));
1004 LASSERT (payload_nob == 0 || payload_niov > 0);
1005 LASSERT (payload_niov <= LNET_MAX_IOV);
1007 /* It must be OK to kmap() if required */
1008 LASSERT (payload_kiov == NULL || !in_interrupt ());
1009 /* payload is either all vaddrs or all pages */
1010 LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
1012 if (kqswnal_nid2elanid (target.nid) < 0) {
1013 CERROR("%s not in my cluster\n", libcfs_nid2str(target.nid));
1017 /* I may not block for a transmit descriptor if I might block the
1018 * router, receiver, or an interrupt handler. */
1019 ktx = kqswnal_get_idle_tx();
1021 CERROR ("Can't get txd for msg type %d for %s\n",
1022 type, libcfs_nid2str(target.nid));
1026 ktx->ktx_state = KTX_SENDING;
1027 ktx->ktx_nid = target.nid;
1028 ktx->ktx_args[0] = private;
1029 ktx->ktx_args[1] = lntmsg;
1030 ktx->ktx_args[2] = NULL; /* set when a GET commits to REPLY */
1032 /* The first frag will be the pre-mapped buffer. */
1033 ktx->ktx_nfrag = ktx->ktx_firsttmpfrag = 1;
1035 if ((!target_is_router && /* target.nid is final dest */
1036 !routing && /* I'm the source */
1037 type == LNET_MSG_GET && /* optimize GET? */
1038 *kqswnal_tunables.kqn_optimized_gets != 0 &&
1039 lntmsg->msg_md->md_length >=
1040 *kqswnal_tunables.kqn_optimized_gets) ||
1041 ((type == LNET_MSG_PUT || /* optimize PUT? */
1042 type == LNET_MSG_REPLY) && /* optimize REPLY? */
1043 *kqswnal_tunables.kqn_optimized_puts != 0 &&
1044 payload_nob >= *kqswnal_tunables.kqn_optimized_puts)) {
1045 lnet_libmd_t *md = lntmsg->msg_md;
1046 kqswnal_msg_t *msg = (kqswnal_msg_t *)ktx->ktx_buffer;
1048 kqswnal_remotemd_t *rmd;
1050 /* Optimised path: I send over the Elan vaddrs of the local
1051 * buffers, and my peer DMAs directly to/from them.
1053 * First I set up ktx as if it was going to send this
1054 * payload, (it needs to map it anyway). This fills
1055 * ktx_frags[1] and onward with the network addresses
1056 * of the buffer frags. */
1058 /* Send an RDMA message */
1059 msg->kqm_magic = LNET_PROTO_QSW_MAGIC;
1060 msg->kqm_version = QSWLND_PROTO_VERSION;
1061 msg->kqm_type = QSWLND_MSG_RDMA;
1063 mhdr = &msg->kqm_u.rdma.kqrm_hdr;
1064 rmd = &msg->kqm_u.rdma.kqrm_rmd;
1067 nob = (((char *)rmd) - ktx->ktx_buffer);
1069 if (type == LNET_MSG_GET) {
1070 if ((md->md_options & LNET_MD_KIOV) != 0)
1071 rc = kqswnal_map_tx_kiov (ktx, 0, md->md_length,
1072 md->md_niov, md->md_iov.kiov);
1074 rc = kqswnal_map_tx_iov (ktx, 0, md->md_length,
1075 md->md_niov, md->md_iov.iov);
1076 ktx->ktx_state = KTX_GETTING;
1078 if (payload_kiov != NULL)
1079 rc = kqswnal_map_tx_kiov(ktx, 0, payload_nob,
1080 payload_niov, payload_kiov);
1082 rc = kqswnal_map_tx_iov(ktx, 0, payload_nob,
1083 payload_niov, payload_iov);
1084 ktx->ktx_state = KTX_PUTTING;
1090 rmd->kqrmd_nfrag = ktx->ktx_nfrag - 1;
1091 nob += offsetof(kqswnal_remotemd_t,
1092 kqrmd_frag[rmd->kqrmd_nfrag]);
1093 LASSERT (nob <= KQSW_TX_BUFFER_SIZE);
1095 memcpy(&rmd->kqrmd_frag[0], &ktx->ktx_frags[1],
1096 rmd->kqrmd_nfrag * sizeof(EP_NMD));
1098 ep_nmd_subset(&ktx->ktx_frags[0], &ktx->ktx_ebuffer, 0, nob);
1100 msg->kqm_nob = nob + payload_nob;
1102 msg->kqm_cksum = kqswnal_csum(~0, (char *)msg, nob);
1104 if (type == LNET_MSG_GET) {
1105 /* Allocate reply message now while I'm in thread context */
1106 ktx->ktx_args[2] = lnet_create_reply_msg (
1107 kqswnal_data.kqn_ni, lntmsg);
1108 if (ktx->ktx_args[2] == NULL)
1111 /* NB finalizing the REPLY message is my
1112 * responsibility now, whatever happens. */
1114 if (*kqswnal_tunables.kqn_inject_csum_error == 3) {
1116 *kqswnal_tunables.kqn_inject_csum_error = 0;
1119 } else if (payload_kiov != NULL) {
1120 /* must checksum payload after header so receiver can
1121 * compute partial header cksum before swab. Sadly
1122 * this causes 2 rounds of kmap */
1124 kqswnal_csum_kiov(msg->kqm_cksum, 0, payload_nob,
1125 payload_niov, payload_kiov);
1126 if (*kqswnal_tunables.kqn_inject_csum_error == 2) {
1128 *kqswnal_tunables.kqn_inject_csum_error = 0;
1132 kqswnal_csum_iov(msg->kqm_cksum, 0, payload_nob,
1133 payload_niov, payload_iov);
1134 if (*kqswnal_tunables.kqn_inject_csum_error == 2) {
1136 *kqswnal_tunables.kqn_inject_csum_error = 0;
1141 } else if (payload_nob <= *kqswnal_tunables.kqn_tx_maxcontig) {
1144 kqswnal_msg_t *msg = (kqswnal_msg_t *)ktx->ktx_buffer;
1146 /* single frag copied into the pre-mapped buffer */
1147 msg->kqm_magic = LNET_PROTO_QSW_MAGIC;
1148 msg->kqm_version = QSWLND_PROTO_VERSION;
1149 msg->kqm_type = QSWLND_MSG_IMMEDIATE;
1151 mhdr = &msg->kqm_u.immediate.kqim_hdr;
1152 payload = msg->kqm_u.immediate.kqim_payload;
1155 nob = (payload - ktx->ktx_buffer) + payload_nob;
1157 ep_nmd_subset(&ktx->ktx_frags[0], &ktx->ktx_ebuffer, 0, nob);
1159 if (payload_kiov != NULL)
1160 lnet_copy_kiov2flat(KQSW_TX_BUFFER_SIZE, payload, 0,
1161 payload_niov, payload_kiov,
1162 payload_offset, payload_nob);
1164 lnet_copy_iov2flat(KQSW_TX_BUFFER_SIZE, payload, 0,
1165 payload_niov, payload_iov,
1166 payload_offset, payload_nob);
1170 msg->kqm_cksum = kqswnal_csum(~0, (char *)msg, nob);
1171 if (*kqswnal_tunables.kqn_inject_csum_error == 1) {
1173 *kqswnal_tunables.kqn_inject_csum_error = 0;
1178 kqswnal_msg_t *msg = (kqswnal_msg_t *)ktx->ktx_buffer;
1180 /* multiple frags: first is hdr in pre-mapped buffer */
1181 msg->kqm_magic = LNET_PROTO_QSW_MAGIC;
1182 msg->kqm_version = QSWLND_PROTO_VERSION;
1183 msg->kqm_type = QSWLND_MSG_IMMEDIATE;
1185 mhdr = &msg->kqm_u.immediate.kqim_hdr;
1186 nob = offsetof(kqswnal_msg_t, kqm_u.immediate.kqim_payload);
1190 ep_nmd_subset(&ktx->ktx_frags[0], &ktx->ktx_ebuffer, 0, nob);
1192 if (payload_kiov != NULL)
1193 rc = kqswnal_map_tx_kiov (ktx, payload_offset, payload_nob,
1194 payload_niov, payload_kiov);
1196 rc = kqswnal_map_tx_iov (ktx, payload_offset, payload_nob,
1197 payload_niov, payload_iov);
1202 msg->kqm_nob = nob + payload_nob;
1204 msg->kqm_cksum = kqswnal_csum(~0, (char *)msg, nob);
1206 msg->kqm_cksum = (payload_kiov != NULL) ?
1207 kqswnal_csum_kiov(msg->kqm_cksum,
1208 payload_offset, payload_nob,
1209 payload_niov, payload_kiov) :
1210 kqswnal_csum_iov(msg->kqm_cksum,
1211 payload_offset, payload_nob,
1212 payload_niov, payload_iov);
1214 if (*kqswnal_tunables.kqn_inject_csum_error == 1) {
1216 *kqswnal_tunables.kqn_inject_csum_error = 0;
1222 ktx->ktx_port = (nob <= KQSW_SMALLMSG) ?
1223 EP_MSG_SVC_PORTALS_SMALL : EP_MSG_SVC_PORTALS_LARGE;
1225 rc = kqswnal_launch (ktx);
1228 CDEBUG(rc == 0 ? D_NET : D_NETERROR, "%s %d bytes to %s%s: rc %d\n",
1229 routing ? (rc == 0 ? "Routed" : "Failed to route") :
1230 (rc == 0 ? "Sent" : "Failed to send"),
1231 nob, libcfs_nid2str(target.nid),
1232 target_is_router ? "(router)" : "", rc);
1235 lnet_msg_t *repmsg = (lnet_msg_t *)ktx->ktx_args[2];
1236 int state = ktx->ktx_state;
1238 kqswnal_put_idle_tx (ktx);
1240 if (state == KTX_GETTING && repmsg != NULL) {
1241 /* We committed to reply, but there was a problem
1242 * launching the GET. We can't avoid delivering a
1243 * REPLY event since we committed above, so we
1244 * pretend the GET succeeded but the REPLY
1247 lnet_finalize (kqswnal_data.kqn_ni, lntmsg, 0);
1248 lnet_finalize (kqswnal_data.kqn_ni, repmsg, -EIO);
1253 atomic_dec(&kqswnal_data.kqn_pending_txs);
1254 return (rc == 0 ? 0 : -EIO);
1258 kqswnal_requeue_rx (kqswnal_rx_t *krx)
1260 LASSERT (atomic_read(&krx->krx_refcount) == 0);
1261 LASSERT (!krx->krx_rpc_reply_needed);
1263 krx->krx_state = KRX_POSTED;
1265 if (kqswnal_data.kqn_shuttingdown) {
1266 /* free EKC rxd on shutdown */
1267 ep_complete_receive(krx->krx_rxd);
1269 /* repost receive */
1270 ep_requeue_receive(krx->krx_rxd,
1271 kqswnal_rxhandler, krx,
1272 &krx->krx_elanbuffer, 0);
1277 kqswnal_rpc_complete (EP_RXD *rxd)
1279 int status = ep_rxd_status(rxd);
1280 kqswnal_rx_t *krx = (kqswnal_rx_t *)ep_rxd_arg(rxd);
1282 CDEBUG((status == EP_SUCCESS) ? D_NET : D_ERROR,
1283 "rxd %p, krx %p, status %d\n", rxd, krx, status);
1285 LASSERT (krx->krx_rxd == rxd);
1286 LASSERT (krx->krx_rpc_reply_needed);
1288 krx->krx_rpc_reply_needed = 0;
1289 kqswnal_requeue_rx (krx);
1293 kqswnal_rx_done (kqswnal_rx_t *krx)
1297 LASSERT (atomic_read(&krx->krx_refcount) == 0);
1299 if (krx->krx_rpc_reply_needed) {
1300 /* We've not completed the peer's RPC yet... */
1301 krx->krx_rpc_reply.msg.magic = LNET_PROTO_QSW_MAGIC;
1302 krx->krx_rpc_reply.msg.version = QSWLND_PROTO_VERSION;
1304 LASSERT (!in_interrupt());
1306 rc = ep_complete_rpc(krx->krx_rxd,
1307 kqswnal_rpc_complete, krx,
1308 &krx->krx_rpc_reply.ep_statusblk,
1310 if (rc == EP_SUCCESS)
1313 CERROR("can't complete RPC: %d\n", rc);
1314 krx->krx_rpc_reply_needed = 0;
1317 kqswnal_requeue_rx(krx);
1321 kqswnal_parse (kqswnal_rx_t *krx)
1323 lnet_ni_t *ni = kqswnal_data.kqn_ni;
1324 kqswnal_msg_t *msg = (kqswnal_msg_t *)page_address(krx->krx_kiov[0].kiov_page);
1325 lnet_nid_t fromnid = kqswnal_rx_nid(krx);
1332 LASSERT (atomic_read(&krx->krx_refcount) == 1);
1334 if (krx->krx_nob < offsetof(kqswnal_msg_t, kqm_u)) {
1335 CERROR("Short message %d received from %s\n",
1336 krx->krx_nob, libcfs_nid2str(fromnid));
1340 swab = msg->kqm_magic == __swab32(LNET_PROTO_QSW_MAGIC);
1342 if (swab || msg->kqm_magic == LNET_PROTO_QSW_MAGIC) {
1347 /* csum byte array before swab */
1348 csum1 = msg->kqm_cksum;
1350 csum0 = kqswnal_csum_kiov(~0, 0, krx->krx_nob,
1351 krx->krx_npages, krx->krx_kiov);
1352 msg->kqm_cksum = csum1;
1356 __swab16s(&msg->kqm_version);
1357 __swab16s(&msg->kqm_type);
1359 __swab32s(&msg->kqm_cksum);
1360 __swab32s(&msg->kqm_nob);
1364 if (msg->kqm_version != QSWLND_PROTO_VERSION) {
1365 /* Future protocol version compatibility support!
1366 * The next qswlnd-specific protocol rev will first
1367 * send an RPC to check version.
1368 * 1.4.6 and 1.4.7.early reply with a status
1369 * block containing its current version.
1370 * Later versions send a failure (-ve) status +
1373 if (!krx->krx_rpc_reply_needed) {
1374 CERROR("Unexpected version %d from %s\n",
1375 msg->kqm_version, libcfs_nid2str(fromnid));
1379 LASSERT (krx->krx_rpc_reply.msg.status == -EPROTO);
1383 switch (msg->kqm_type) {
1385 CERROR("Bad request type %x from %s\n",
1386 msg->kqm_type, libcfs_nid2str(fromnid));
1389 case QSWLND_MSG_IMMEDIATE:
1390 if (krx->krx_rpc_reply_needed) {
1391 /* Should have been a simple message */
1392 CERROR("IMMEDIATE sent as RPC from %s\n",
1393 libcfs_nid2str(fromnid));
1397 nob = offsetof(kqswnal_msg_t, kqm_u.immediate.kqim_payload);
1398 if (krx->krx_nob < nob) {
1399 CERROR("Short IMMEDIATE %d(%d) from %s\n",
1400 krx->krx_nob, nob, libcfs_nid2str(fromnid));
1405 if (csum0 != msg->kqm_cksum) {
1406 CERROR("Bad IMMEDIATE checksum %08x(%08x) from %s\n",
1407 csum0, msg->kqm_cksum, libcfs_nid2str(fromnid));
1408 CERROR("nob %d (%d)\n", krx->krx_nob, msg->kqm_nob);
1412 rc = lnet_parse(ni, &msg->kqm_u.immediate.kqim_hdr,
1418 case QSWLND_MSG_RDMA:
1419 if (!krx->krx_rpc_reply_needed) {
1420 /* Should have been a simple message */
1421 CERROR("RDMA sent as simple message from %s\n",
1422 libcfs_nid2str(fromnid));
1426 nob = offsetof(kqswnal_msg_t,
1427 kqm_u.rdma.kqrm_rmd.kqrmd_frag[0]);
1428 if (krx->krx_nob < nob) {
1429 CERROR("Short RDMA message %d(%d) from %s\n",
1430 krx->krx_nob, nob, libcfs_nid2str(fromnid));
1435 __swab32s(&msg->kqm_u.rdma.kqrm_rmd.kqrmd_nfrag);
1437 n = msg->kqm_u.rdma.kqrm_rmd.kqrmd_nfrag;
1438 nob = offsetof(kqswnal_msg_t,
1439 kqm_u.rdma.kqrm_rmd.kqrmd_frag[n]);
1441 if (krx->krx_nob < nob) {
1442 CERROR("short RDMA message %d(%d) from %s\n",
1443 krx->krx_nob, nob, libcfs_nid2str(fromnid));
1448 for (i = 0; i < n; i++) {
1449 EP_NMD *nmd = &msg->kqm_u.rdma.kqrm_rmd.kqrmd_frag[i];
1451 __swab32s(&nmd->nmd_addr);
1452 __swab32s(&nmd->nmd_len);
1453 __swab32s(&nmd->nmd_attr);
1458 krx->krx_cksum = csum0; /* stash checksum so far */
1460 rc = lnet_parse(ni, &msg->kqm_u.rdma.kqrm_hdr,
1469 if (msg->kqm_magic == LNET_PROTO_MAGIC ||
1470 msg->kqm_magic == __swab32(LNET_PROTO_MAGIC)) {
1471 /* Future protocol version compatibility support!
1472 * When LNET unifies protocols over all LNDs, the first thing a
1473 * peer will send will be a version query RPC.
1474 * 1.4.6 and 1.4.7.early reply with a status block containing
1475 * LNET_PROTO_QSW_MAGIC..
1476 * Later versions send a failure (-ve) status +
1479 if (!krx->krx_rpc_reply_needed) {
1480 CERROR("Unexpected magic %08x from %s\n",
1481 msg->kqm_magic, libcfs_nid2str(fromnid));
1485 LASSERT (krx->krx_rpc_reply.msg.status == -EPROTO);
1489 CERROR("Unrecognised magic %08x from %s\n",
1490 msg->kqm_magic, libcfs_nid2str(fromnid));
1492 kqswnal_rx_decref(krx);
1495 /* Receive Interrupt Handler: posts to schedulers */
1497 kqswnal_rxhandler(EP_RXD *rxd)
1499 unsigned long flags;
1500 int nob = ep_rxd_len (rxd);
1501 int status = ep_rxd_status (rxd);
1502 kqswnal_rx_t *krx = (kqswnal_rx_t *)ep_rxd_arg (rxd);
1503 CDEBUG(D_NET, "kqswnal_rxhandler: rxd %p, krx %p, nob %d, status %d\n",
1504 rxd, krx, nob, status);
1506 LASSERT (krx != NULL);
1507 LASSERT (krx->krx_state == KRX_POSTED);
1509 krx->krx_state = KRX_PARSE;
1513 /* RPC reply iff rpc request received without error */
1514 krx->krx_rpc_reply_needed = ep_rxd_isrpc(rxd) &&
1515 (status == EP_SUCCESS ||
1516 status == EP_MSG_TOO_BIG);
1518 /* Default to failure if an RPC reply is requested but not handled */
1519 krx->krx_rpc_reply.msg.status = -EPROTO;
1520 atomic_set (&krx->krx_refcount, 1);
1522 if (status != EP_SUCCESS) {
1523 /* receives complete with failure when receiver is removed */
1524 if (status == EP_SHUTDOWN)
1525 LASSERT (kqswnal_data.kqn_shuttingdown);
1527 CERROR("receive status failed with status %d nob %d\n",
1528 ep_rxd_status(rxd), nob);
1529 kqswnal_rx_decref(krx);
1533 if (!in_interrupt()) {
1538 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
1540 list_add_tail (&krx->krx_list, &kqswnal_data.kqn_readyrxds);
1541 wake_up (&kqswnal_data.kqn_sched_waitq);
1543 spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
1547 kqswnal_recv (lnet_ni_t *ni,
1554 unsigned int offset,
1558 kqswnal_rx_t *krx = (kqswnal_rx_t *)private;
1562 kqswnal_remotemd_t *rmd;
1566 LASSERT (!in_interrupt ()); /* OK to map */
1567 /* Either all pages or all vaddrs */
1568 LASSERT (!(kiov != NULL && iov != NULL));
1570 fromnid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ep_rxd_node(krx->krx_rxd));
1571 msg = (kqswnal_msg_t *)page_address(krx->krx_kiov[0].kiov_page);
1573 if (krx->krx_rpc_reply_needed) {
1574 /* optimized (rdma) request sent as RPC */
1576 LASSERT (msg->kqm_type == QSWLND_MSG_RDMA);
1577 hdr = &msg->kqm_u.rdma.kqrm_hdr;
1578 rmd = &msg->kqm_u.rdma.kqrm_rmd;
1580 /* NB header is still in wire byte order */
1582 switch (le32_to_cpu(hdr->type)) {
1584 case LNET_MSG_REPLY:
1585 /* This is an optimized PUT/REPLY */
1586 rc = kqswnal_rdma(krx, lntmsg,
1587 KTX_RDMA_FETCH, rmd,
1588 niov, iov, kiov, offset, mlen);
1593 if (krx->krx_cksum != msg->kqm_cksum) {
1594 CERROR("Bad GET checksum %08x(%08x) from %s\n",
1595 krx->krx_cksum, msg->kqm_cksum,
1596 libcfs_nid2str(fromnid));
1601 if (lntmsg == NULL) {
1602 /* No buffer match: my decref will
1603 * complete the RPC with failure */
1606 /* Matched something! */
1607 rc = kqswnal_rdma(krx, lntmsg,
1608 KTX_RDMA_STORE, rmd,
1618 CERROR("Bad RPC type %d\n",
1619 le32_to_cpu(hdr->type));
1624 kqswnal_rx_decref(krx);
1628 LASSERT (msg->kqm_type == QSWLND_MSG_IMMEDIATE);
1629 msg_offset = offsetof(kqswnal_msg_t, kqm_u.immediate.kqim_payload);
1631 if (krx->krx_nob < msg_offset + rlen) {
1632 CERROR("Bad message size from %s: have %d, need %d + %d\n",
1633 libcfs_nid2str(fromnid), krx->krx_nob,
1635 kqswnal_rx_decref(krx);
1640 lnet_copy_kiov2kiov(niov, kiov, offset,
1641 krx->krx_npages, krx->krx_kiov,
1644 lnet_copy_kiov2iov(niov, iov, offset,
1645 krx->krx_npages, krx->krx_kiov,
1648 lnet_finalize(ni, lntmsg, 0);
1649 kqswnal_rx_decref(krx);
1654 kqswnal_thread_start (int (*fn)(void *arg), void *arg)
1656 long pid = kernel_thread (fn, arg, 0);
1661 atomic_inc (&kqswnal_data.kqn_nthreads);
1666 kqswnal_thread_fini (void)
1668 atomic_dec (&kqswnal_data.kqn_nthreads);
1672 kqswnal_scheduler (void *arg)
1676 unsigned long flags;
1681 cfs_daemonize ("kqswnal_sched");
1682 cfs_block_allsigs ();
1684 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
1690 if (!list_empty (&kqswnal_data.kqn_readyrxds))
1692 krx = list_entry(kqswnal_data.kqn_readyrxds.next,
1693 kqswnal_rx_t, krx_list);
1694 list_del (&krx->krx_list);
1695 spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
1698 LASSERT (krx->krx_state == KRX_PARSE);
1699 kqswnal_parse (krx);
1702 spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
1705 if (!list_empty (&kqswnal_data.kqn_donetxds))
1707 ktx = list_entry(kqswnal_data.kqn_donetxds.next,
1708 kqswnal_tx_t, ktx_schedlist);
1709 list_del_init (&ktx->ktx_schedlist);
1710 spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
1713 kqswnal_tx_done_in_thread_context(ktx);
1716 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
1719 if (!list_empty (&kqswnal_data.kqn_delayedtxds))
1721 ktx = list_entry(kqswnal_data.kqn_delayedtxds.next,
1722 kqswnal_tx_t, ktx_schedlist);
1723 list_del_init (&ktx->ktx_schedlist);
1724 spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
1727 rc = kqswnal_launch (ktx);
1729 CERROR("Failed delayed transmit to %s: %d\n",
1730 libcfs_nid2str(ktx->ktx_nid), rc);
1731 kqswnal_tx_done (ktx, rc);
1733 atomic_dec (&kqswnal_data.kqn_pending_txs);
1736 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
1739 /* nothing to do or hogging CPU */
1740 if (!did_something || counter++ == KQSW_RESCHED) {
1741 spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
1746 if (!did_something) {
1747 if (kqswnal_data.kqn_shuttingdown == 2) {
1748 /* We only exit in stage 2 of shutdown when
1749 * there's nothing left to do */
1752 rc = wait_event_interruptible_exclusive (
1753 kqswnal_data.kqn_sched_waitq,
1754 kqswnal_data.kqn_shuttingdown == 2 ||
1755 !list_empty(&kqswnal_data.kqn_readyrxds) ||
1756 !list_empty(&kqswnal_data.kqn_donetxds) ||
1757 !list_empty(&kqswnal_data.kqn_delayedtxds));
1759 } else if (need_resched())
1762 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
1766 kqswnal_thread_fini ();