2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Author: Eric Barton <eric@bartonsoftware.com>
6 * This file is part of Portals, http://www.lustre.org
8 * Portals is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Portals is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Portals; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 kqswnal_notify_peer_down(kqswnal_tx_t *ktx)
29 then = cfs_time_current_sec() -
30 cfs_duration_sec(cfs_time_current() -
33 lnet_notify(kqswnal_data.kqn_ni, ktx->ktx_nid, 0, then);
37 kqswnal_unmap_tx (kqswnal_tx_t *ktx)
41 ktx->ktx_rail = -1; /* unset rail */
43 if (ktx->ktx_nmappedpages == 0)
46 CDEBUG(D_NET, "%p unloading %d frags starting at %d\n",
47 ktx, ktx->ktx_nfrag, ktx->ktx_firsttmpfrag);
49 for (i = ktx->ktx_firsttmpfrag; i < ktx->ktx_nfrag; i++)
50 ep_dvma_unload(kqswnal_data.kqn_ep,
51 kqswnal_data.kqn_ep_tx_nmh,
54 ktx->ktx_nmappedpages = 0;
58 kqswnal_map_tx_kiov (kqswnal_tx_t *ktx, int offset, int nob,
59 unsigned int niov, lnet_kiov_t *kiov)
61 int nfrags = ktx->ktx_nfrag;
62 int nmapped = ktx->ktx_nmappedpages;
63 int maxmapped = ktx->ktx_npages;
64 __u32 basepage = ktx->ktx_basepage + nmapped;
70 if (ktx->ktx_rail < 0)
71 ktx->ktx_rail = ep_xmtr_prefrail(kqswnal_data.kqn_eptx,
73 kqswnal_nid2elanid(ktx->ktx_nid));
76 CERROR("No rails available for %s\n", libcfs_nid2str(ktx->ktx_nid));
81 LASSERT (nmapped <= maxmapped);
82 LASSERT (nfrags >= ktx->ktx_firsttmpfrag);
83 LASSERT (nfrags <= EP_MAXFRAG);
87 /* skip complete frags before 'offset' */
88 while (offset >= kiov->kiov_len) {
89 offset -= kiov->kiov_len;
96 int fraglen = kiov->kiov_len - offset;
98 /* each page frag is contained in one page */
99 LASSERT (kiov->kiov_offset + kiov->kiov_len <= PAGE_SIZE);
105 if (nmapped > maxmapped) {
106 CERROR("Can't map message in %d pages (max %d)\n",
111 if (nfrags == EP_MAXFRAG) {
112 CERROR("Message too fragmented in Elan VM (max %d frags)\n",
117 /* XXX this is really crap, but we'll have to kmap until
118 * EKC has a page (rather than vaddr) mapping interface */
120 ptr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset + offset;
123 "%p[%d] loading %p for %d, page %d, %d total\n",
124 ktx, nfrags, ptr, fraglen, basepage, nmapped);
126 ep_dvma_load(kqswnal_data.kqn_ep, NULL,
128 kqswnal_data.kqn_ep_tx_nmh, basepage,
129 &railmask, &ktx->ktx_frags[nfrags]);
131 if (nfrags == ktx->ktx_firsttmpfrag ||
132 !ep_nmd_merge(&ktx->ktx_frags[nfrags - 1],
133 &ktx->ktx_frags[nfrags - 1],
134 &ktx->ktx_frags[nfrags])) {
135 /* new frag if this is the first or can't merge */
139 kunmap (kiov->kiov_page);
141 /* keep in loop for failure case */
142 ktx->ktx_nmappedpages = nmapped;
150 /* iov must not run out before end of data */
151 LASSERT (nob == 0 || niov > 0);
155 ktx->ktx_nfrag = nfrags;
156 CDEBUG (D_NET, "%p got %d frags over %d pages\n",
157 ktx, ktx->ktx_nfrag, ktx->ktx_nmappedpages);
164 kqswnal_csum_kiov (__u32 csum, int offset, int nob,
165 unsigned int niov, lnet_kiov_t *kiov)
175 /* skip complete frags before 'offset' */
176 while (offset >= kiov->kiov_len) {
177 offset -= kiov->kiov_len;
184 int fraglen = kiov->kiov_len - offset;
186 /* each page frag is contained in one page */
187 LASSERT (kiov->kiov_offset + kiov->kiov_len <= PAGE_SIZE);
192 ptr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset + offset;
194 csum = kqswnal_csum(csum, ptr, fraglen);
196 kunmap (kiov->kiov_page);
203 /* iov must not run out before end of data */
204 LASSERT (nob == 0 || niov > 0);
213 kqswnal_map_tx_iov (kqswnal_tx_t *ktx, int offset, int nob,
214 unsigned int niov, struct iovec *iov)
216 int nfrags = ktx->ktx_nfrag;
217 int nmapped = ktx->ktx_nmappedpages;
218 int maxmapped = ktx->ktx_npages;
219 __u32 basepage = ktx->ktx_basepage + nmapped;
221 EP_RAILMASK railmask;
224 if (ktx->ktx_rail < 0)
225 ktx->ktx_rail = ep_xmtr_prefrail(kqswnal_data.kqn_eptx,
227 kqswnal_nid2elanid(ktx->ktx_nid));
228 rail = ktx->ktx_rail;
230 CERROR("No rails available for %s\n", libcfs_nid2str(ktx->ktx_nid));
233 railmask = 1 << rail;
235 LASSERT (nmapped <= maxmapped);
236 LASSERT (nfrags >= ktx->ktx_firsttmpfrag);
237 LASSERT (nfrags <= EP_MAXFRAG);
241 /* skip complete frags before offset */
242 while (offset >= iov->iov_len) {
243 offset -= iov->iov_len;
250 int fraglen = iov->iov_len - offset;
255 npages = kqswnal_pages_spanned (iov->iov_base, fraglen);
258 if (nmapped > maxmapped) {
259 CERROR("Can't map message in %d pages (max %d)\n",
264 if (nfrags == EP_MAXFRAG) {
265 CERROR("Message too fragmented in Elan VM (max %d frags)\n",
271 "%p[%d] loading %p for %d, pages %d for %ld, %d total\n",
272 ktx, nfrags, iov->iov_base + offset, fraglen,
273 basepage, npages, nmapped);
275 ep_dvma_load(kqswnal_data.kqn_ep, NULL,
276 iov->iov_base + offset, fraglen,
277 kqswnal_data.kqn_ep_tx_nmh, basepage,
278 &railmask, &ktx->ktx_frags[nfrags]);
280 if (nfrags == ktx->ktx_firsttmpfrag ||
281 !ep_nmd_merge(&ktx->ktx_frags[nfrags - 1],
282 &ktx->ktx_frags[nfrags - 1],
283 &ktx->ktx_frags[nfrags])) {
284 /* new frag if this is the first or can't merge */
288 /* keep in loop for failure case */
289 ktx->ktx_nmappedpages = nmapped;
297 /* iov must not run out before end of data */
298 LASSERT (nob == 0 || niov > 0);
302 ktx->ktx_nfrag = nfrags;
303 CDEBUG (D_NET, "%p got %d frags over %d pages\n",
304 ktx, ktx->ktx_nfrag, ktx->ktx_nmappedpages);
311 kqswnal_csum_iov (__u32 csum, int offset, int nob,
312 unsigned int niov, struct iovec *iov)
320 /* skip complete frags before offset */
321 while (offset >= iov->iov_len) {
322 offset -= iov->iov_len;
329 int fraglen = iov->iov_len - offset;
334 csum = kqswnal_csum(csum, iov->iov_base + offset, fraglen);
341 /* iov must not run out before end of data */
342 LASSERT (nob == 0 || niov > 0);
351 kqswnal_put_idle_tx (kqswnal_tx_t *ktx)
355 kqswnal_unmap_tx(ktx); /* release temporary mappings */
356 ktx->ktx_state = KTX_IDLE;
358 spin_lock_irqsave(&kqswnal_data.kqn_idletxd_lock, flags);
360 cfs_list_del(&ktx->ktx_list); /* take off active list */
361 cfs_list_add(&ktx->ktx_list, &kqswnal_data.kqn_idletxds);
363 spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
367 kqswnal_get_idle_tx (void)
372 cfs_spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
374 if (kqswnal_data.kqn_shuttingdown ||
375 cfs_list_empty (&kqswnal_data.kqn_idletxds)) {
376 cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock,
382 ktx = cfs_list_entry (kqswnal_data.kqn_idletxds.next, kqswnal_tx_t,
384 cfs_list_del (&ktx->ktx_list);
386 cfs_list_add (&ktx->ktx_list, &kqswnal_data.kqn_activetxds);
387 ktx->ktx_launcher = current->pid;
388 cfs_atomic_inc(&kqswnal_data.kqn_pending_txs);
390 spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
392 /* Idle descs can't have any mapped (as opposed to pre-mapped) pages */
393 LASSERT (ktx->ktx_nmappedpages == 0);
398 kqswnal_tx_done_in_thread_context (kqswnal_tx_t *ktx)
400 lnet_msg_t *lnetmsg0 = NULL;
401 lnet_msg_t *lnetmsg1 = NULL;
406 LASSERT (!cfs_in_interrupt());
408 if (ktx->ktx_status == -EHOSTDOWN)
409 kqswnal_notify_peer_down(ktx);
411 switch (ktx->ktx_state) {
412 case KTX_RDMA_FETCH: /* optimized PUT/REPLY handled */
413 krx = (kqswnal_rx_t *)ktx->ktx_args[0];
414 lnetmsg0 = (lnet_msg_t *)ktx->ktx_args[1];
415 status0 = ktx->ktx_status;
417 if (status0 == 0) { /* RDMA succeeded */
421 msg = (kqswnal_msg_t *)
422 page_address(krx->krx_kiov[0].kiov_page);
424 csum = (lnetmsg0->msg_kiov != NULL) ?
425 kqswnal_csum_kiov(krx->krx_cksum,
426 lnetmsg0->msg_offset,
427 lnetmsg0->msg_wanted,
429 lnetmsg0->msg_kiov) :
430 kqswnal_csum_iov(krx->krx_cksum,
431 lnetmsg0->msg_offset,
432 lnetmsg0->msg_wanted,
436 /* Can only check csum if I got it all */
437 if (lnetmsg0->msg_wanted == lnetmsg0->msg_len &&
438 csum != msg->kqm_cksum) {
439 ktx->ktx_status = -EIO;
440 krx->krx_rpc_reply.msg.status = -EIO;
441 CERROR("RDMA checksum failed %u(%u) from %s\n",
442 csum, msg->kqm_cksum,
443 libcfs_nid2str(kqswnal_rx_nid(krx)));
447 LASSERT (krx->krx_state == KRX_COMPLETING);
448 kqswnal_rx_decref (krx);
451 case KTX_RDMA_STORE: /* optimized GET handled */
452 case KTX_PUTTING: /* optimized PUT sent */
453 case KTX_SENDING: /* normal send */
454 lnetmsg0 = (lnet_msg_t *)ktx->ktx_args[1];
455 status0 = ktx->ktx_status;
458 case KTX_GETTING: /* optimized GET sent & payload received */
459 /* Complete the GET with success since we can't avoid
460 * delivering a REPLY event; we committed to it when we
461 * launched the GET */
462 lnetmsg0 = (lnet_msg_t *)ktx->ktx_args[1];
464 lnetmsg1 = (lnet_msg_t *)ktx->ktx_args[2];
465 status1 = ktx->ktx_status;
467 if (status1 == 0) { /* RDMA succeeded */
468 lnet_msg_t *lnetmsg0 = (lnet_msg_t *)ktx->ktx_args[1];
469 lnet_libmd_t *md = lnetmsg0->msg_md;
472 csum = ((md->md_options & LNET_MD_KIOV) != 0) ?
473 kqswnal_csum_kiov(~0, 0,
477 kqswnal_csum_iov(~0, 0,
482 if (csum != ktx->ktx_cksum) {
483 CERROR("RDMA checksum failed %u(%u) from %s\n",
484 csum, ktx->ktx_cksum,
485 libcfs_nid2str(ktx->ktx_nid));
496 kqswnal_put_idle_tx (ktx);
498 lnet_finalize (kqswnal_data.kqn_ni, lnetmsg0, status0);
499 if (lnetmsg1 != NULL)
500 lnet_finalize (kqswnal_data.kqn_ni, lnetmsg1, status1);
504 kqswnal_tx_done (kqswnal_tx_t *ktx, int status)
508 ktx->ktx_status = status;
510 if (!cfs_in_interrupt()) {
511 kqswnal_tx_done_in_thread_context(ktx);
515 /* Complete the send in thread context */
516 spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
518 cfs_list_add_tail(&ktx->ktx_schedlist,
519 &kqswnal_data.kqn_donetxds);
520 cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
522 spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
526 kqswnal_txhandler(EP_TXD *txd, void *arg, int status)
528 kqswnal_tx_t *ktx = (kqswnal_tx_t *)arg;
529 kqswnal_rpc_reply_t *reply;
531 LASSERT (txd != NULL);
532 LASSERT (ktx != NULL);
534 CDEBUG(D_NET, "txd %p, arg %p status %d\n", txd, arg, status);
536 if (status != EP_SUCCESS) {
538 CNETERR("Tx completion to %s failed: %d\n",
539 libcfs_nid2str(ktx->ktx_nid), status);
543 } else switch (ktx->ktx_state) {
548 reply = (kqswnal_rpc_reply_t *)ep_txd_statusblk(txd);
549 if (reply->msg.magic == 0) { /* "old" peer */
550 status = reply->msg.status;
554 if (reply->msg.magic != LNET_PROTO_QSW_MAGIC) {
555 if (reply->msg.magic != swab32(LNET_PROTO_QSW_MAGIC)) {
556 CERROR("%s unexpected rpc reply magic %08x\n",
557 libcfs_nid2str(ktx->ktx_nid),
563 __swab32s(&reply->msg.status);
564 __swab32s(&reply->msg.version);
566 if (ktx->ktx_state == KTX_GETTING) {
567 __swab32s(&reply->msg.u.get.len);
568 __swab32s(&reply->msg.u.get.cksum);
572 status = reply->msg.status;
574 CERROR("%s RPC status %08x\n",
575 libcfs_nid2str(ktx->ktx_nid), status);
579 if (ktx->ktx_state == KTX_GETTING) {
580 lnet_set_reply_msg_len(kqswnal_data.kqn_ni,
581 (lnet_msg_t *)ktx->ktx_args[2],
582 reply->msg.u.get.len);
584 ktx->ktx_cksum = reply->msg.u.get.cksum;
598 kqswnal_tx_done(ktx, status);
602 kqswnal_launch (kqswnal_tx_t *ktx)
604 /* Don't block for transmit descriptor if we're in interrupt context */
605 int attr = cfs_in_interrupt() ? (EP_NO_SLEEP | EP_NO_ALLOC) : 0;
606 int dest = kqswnal_nid2elanid (ktx->ktx_nid);
610 ktx->ktx_launchtime = cfs_time_current();
612 if (kqswnal_data.kqn_shuttingdown)
615 LASSERT (dest >= 0); /* must be a peer */
617 if (ktx->ktx_nmappedpages != 0)
618 attr = EP_SET_PREFRAIL(attr, ktx->ktx_rail);
620 switch (ktx->ktx_state) {
623 if (the_lnet.ln_testprotocompat != 0) {
624 kqswnal_msg_t *msg = (kqswnal_msg_t *)ktx->ktx_buffer;
626 /* single-shot proto test:
627 * Future version queries will use an RPC, so I'll
628 * co-opt one of the existing ones */
630 if ((the_lnet.ln_testprotocompat & 1) != 0) {
632 the_lnet.ln_testprotocompat &= ~1;
634 if ((the_lnet.ln_testprotocompat & 2) != 0) {
635 msg->kqm_magic = LNET_PROTO_MAGIC;
636 the_lnet.ln_testprotocompat &= ~2;
641 /* NB ktx_frag[0] is the GET/PUT hdr + kqswnal_remotemd_t.
642 * The other frags are the payload, awaiting RDMA */
643 rc = ep_transmit_rpc(kqswnal_data.kqn_eptx, dest,
645 kqswnal_txhandler, ktx,
646 NULL, ktx->ktx_frags, 1);
650 rc = ep_transmit_message(kqswnal_data.kqn_eptx, dest,
652 kqswnal_txhandler, ktx,
653 NULL, ktx->ktx_frags, ktx->ktx_nfrag);
658 rc = -EINVAL; /* no compiler warning please */
663 case EP_SUCCESS: /* success */
666 case EP_ENOMEM: /* can't allocate ep txd => queue for later */
667 spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
669 cfs_list_add_tail(&ktx->ktx_schedlist,
670 &kqswnal_data.kqn_delayedtxds);
671 cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
673 spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
677 default: /* fatal error */
678 CNETERR ("Tx to %s failed: %d\n",
679 libcfs_nid2str(ktx->ktx_nid), rc);
680 kqswnal_notify_peer_down(ktx);
681 return (-EHOSTUNREACH);
687 hdr_type_string (lnet_hdr_t *hdr)
699 return ("<UNKNOWN>");
704 kqswnal_cerror_hdr(lnet_hdr_t * hdr)
706 char *type_str = hdr_type_string (hdr);
708 CERROR("P3 Header at %p of type %s length %d\n", hdr, type_str,
709 le32_to_cpu(hdr->payload_length));
710 CERROR(" From nid/pid "LPU64"/%u\n", le64_to_cpu(hdr->src_nid),
711 le32_to_cpu(hdr->src_pid));
712 CERROR(" To nid/pid "LPU64"/%u\n", le64_to_cpu(hdr->dest_nid),
713 le32_to_cpu(hdr->dest_pid));
715 switch (le32_to_cpu(hdr->type)) {
717 CERROR(" Ptl index %d, ack md "LPX64"."LPX64", "
718 "match bits "LPX64"\n",
719 le32_to_cpu(hdr->msg.put.ptl_index),
720 hdr->msg.put.ack_wmd.wh_interface_cookie,
721 hdr->msg.put.ack_wmd.wh_object_cookie,
722 le64_to_cpu(hdr->msg.put.match_bits));
723 CERROR(" offset %d, hdr data "LPX64"\n",
724 le32_to_cpu(hdr->msg.put.offset),
725 hdr->msg.put.hdr_data);
729 CERROR(" Ptl index %d, return md "LPX64"."LPX64", "
730 "match bits "LPX64"\n",
731 le32_to_cpu(hdr->msg.get.ptl_index),
732 hdr->msg.get.return_wmd.wh_interface_cookie,
733 hdr->msg.get.return_wmd.wh_object_cookie,
734 hdr->msg.get.match_bits);
735 CERROR(" Length %d, src offset %d\n",
736 le32_to_cpu(hdr->msg.get.sink_length),
737 le32_to_cpu(hdr->msg.get.src_offset));
741 CERROR(" dst md "LPX64"."LPX64", manipulated length %d\n",
742 hdr->msg.ack.dst_wmd.wh_interface_cookie,
743 hdr->msg.ack.dst_wmd.wh_object_cookie,
744 le32_to_cpu(hdr->msg.ack.mlength));
748 CERROR(" dst md "LPX64"."LPX64"\n",
749 hdr->msg.reply.dst_wmd.wh_interface_cookie,
750 hdr->msg.reply.dst_wmd.wh_object_cookie);
753 } /* end of print_hdr() */
757 kqswnal_check_rdma (int nlfrag, EP_NMD *lfrag,
758 int nrfrag, EP_NMD *rfrag)
762 if (nlfrag != nrfrag) {
763 CERROR("Can't cope with unequal # frags: %d local %d remote\n",
768 for (i = 0; i < nlfrag; i++)
769 if (lfrag[i].nmd_len != rfrag[i].nmd_len) {
770 CERROR("Can't cope with unequal frags %d(%d):"
771 " %d local %d remote\n",
772 i, nlfrag, lfrag[i].nmd_len, rfrag[i].nmd_len);
780 kqswnal_get_portalscompat_rmd (kqswnal_rx_t *krx)
782 /* Check that the RMD sent after the "raw" LNET header in a
783 * portals-compatible QSWLND message is OK */
784 char *buffer = (char *)page_address(krx->krx_kiov[0].kiov_page);
785 kqswnal_remotemd_t *rmd = (kqswnal_remotemd_t *)(buffer + sizeof(lnet_hdr_t));
787 /* Note RDMA addresses are sent in native endian-ness in the "old"
788 * portals protocol so no swabbing... */
790 if (buffer + krx->krx_nob < (char *)(rmd + 1)) {
791 /* msg too small to discover rmd size */
792 CERROR ("Incoming message [%d] too small for RMD (%d needed)\n",
793 krx->krx_nob, (int)(((char *)(rmd + 1)) - buffer));
797 if (buffer + krx->krx_nob < (char *)&rmd->kqrmd_frag[rmd->kqrmd_nfrag]) {
798 /* rmd doesn't fit in the incoming message */
799 CERROR ("Incoming message [%d] too small for RMD[%d] (%d needed)\n",
800 krx->krx_nob, rmd->kqrmd_nfrag,
801 (int)(((char *)&rmd->kqrmd_frag[rmd->kqrmd_nfrag]) - buffer));
809 kqswnal_rdma_store_complete (EP_RXD *rxd)
811 int status = ep_rxd_status(rxd);
812 kqswnal_tx_t *ktx = (kqswnal_tx_t *)ep_rxd_arg(rxd);
813 kqswnal_rx_t *krx = (kqswnal_rx_t *)ktx->ktx_args[0];
815 CDEBUG((status == EP_SUCCESS) ? D_NET : D_ERROR,
816 "rxd %p, ktx %p, status %d\n", rxd, ktx, status);
818 LASSERT (ktx->ktx_state == KTX_RDMA_STORE);
819 LASSERT (krx->krx_rxd == rxd);
820 LASSERT (krx->krx_rpc_reply_needed);
822 krx->krx_rpc_reply_needed = 0;
823 kqswnal_rx_decref (krx);
825 /* free ktx & finalize() its lnet_msg_t */
826 kqswnal_tx_done(ktx, (status == EP_SUCCESS) ? 0 : -ECONNABORTED);
830 kqswnal_rdma_fetch_complete (EP_RXD *rxd)
832 /* Completed fetching the PUT/REPLY data */
833 int status = ep_rxd_status(rxd);
834 kqswnal_tx_t *ktx = (kqswnal_tx_t *)ep_rxd_arg(rxd);
835 kqswnal_rx_t *krx = (kqswnal_rx_t *)ktx->ktx_args[0];
837 CDEBUG((status == EP_SUCCESS) ? D_NET : D_ERROR,
838 "rxd %p, ktx %p, status %d\n", rxd, ktx, status);
840 LASSERT (ktx->ktx_state == KTX_RDMA_FETCH);
841 LASSERT (krx->krx_rxd == rxd);
842 /* RPC completes with failure by default */
843 LASSERT (krx->krx_rpc_reply_needed);
844 LASSERT (krx->krx_rpc_reply.msg.status != 0);
846 if (status == EP_SUCCESS) {
847 krx->krx_rpc_reply.msg.status = 0;
850 /* Abandon RPC since get failed */
851 krx->krx_rpc_reply_needed = 0;
852 status = -ECONNABORTED;
855 /* krx gets decref'd in kqswnal_tx_done_in_thread_context() */
856 LASSERT (krx->krx_state == KRX_PARSE);
857 krx->krx_state = KRX_COMPLETING;
859 /* free ktx & finalize() its lnet_msg_t */
860 kqswnal_tx_done(ktx, status);
864 kqswnal_rdma (kqswnal_rx_t *krx, lnet_msg_t *lntmsg,
865 int type, kqswnal_remotemd_t *rmd,
866 unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
867 unsigned int offset, unsigned int len)
873 /* Not both mapped and paged payload */
874 LASSERT (iov == NULL || kiov == NULL);
875 /* RPC completes with failure by default */
876 LASSERT (krx->krx_rpc_reply_needed);
877 LASSERT (krx->krx_rpc_reply.msg.status != 0);
880 /* data got truncated to nothing. */
881 lnet_finalize(kqswnal_data.kqn_ni, lntmsg, 0);
882 /* Let kqswnal_rx_done() complete the RPC with success */
883 krx->krx_rpc_reply.msg.status = 0;
887 /* NB I'm using 'ktx' just to map the local RDMA buffers; I'm not
888 actually sending a portals message with it */
889 ktx = kqswnal_get_idle_tx();
891 CERROR ("Can't get txd for RDMA with %s\n",
892 libcfs_nid2str(kqswnal_rx_nid(krx)));
896 ktx->ktx_state = type;
897 ktx->ktx_nid = kqswnal_rx_nid(krx);
898 ktx->ktx_args[0] = krx;
899 ktx->ktx_args[1] = lntmsg;
901 LASSERT (cfs_atomic_read(&krx->krx_refcount) > 0);
902 /* Take an extra ref for the completion callback */
903 cfs_atomic_inc(&krx->krx_refcount);
905 /* Map on the rail the RPC prefers */
906 ktx->ktx_rail = ep_rcvr_prefrail(krx->krx_eprx,
907 ep_rxd_railmask(krx->krx_rxd));
909 /* Start mapping at offset 0 (we're not mapping any headers) */
910 ktx->ktx_nfrag = ktx->ktx_firsttmpfrag = 0;
913 rc = kqswnal_map_tx_kiov(ktx, offset, len, niov, kiov);
915 rc = kqswnal_map_tx_iov(ktx, offset, len, niov, iov);
918 CERROR ("Can't map local RDMA data: %d\n", rc);
922 rc = kqswnal_check_rdma (ktx->ktx_nfrag, ktx->ktx_frags,
923 rmd->kqrmd_nfrag, rmd->kqrmd_frag);
925 CERROR ("Incompatible RDMA descriptors\n");
934 krx->krx_rpc_reply.msg.status = 0;
935 krx->krx_rpc_reply.msg.magic = LNET_PROTO_QSW_MAGIC;
936 krx->krx_rpc_reply.msg.version = QSWLND_PROTO_VERSION;
937 krx->krx_rpc_reply.msg.u.get.len = len;
939 krx->krx_rpc_reply.msg.u.get.cksum = (kiov != NULL) ?
940 kqswnal_csum_kiov(~0, offset, len, niov, kiov) :
941 kqswnal_csum_iov(~0, offset, len, niov, iov);
942 if (*kqswnal_tunables.kqn_inject_csum_error == 4) {
943 krx->krx_rpc_reply.msg.u.get.cksum++;
944 *kqswnal_tunables.kqn_inject_csum_error = 0;
947 eprc = ep_complete_rpc(krx->krx_rxd,
948 kqswnal_rdma_store_complete, ktx,
949 &krx->krx_rpc_reply.ep_statusblk,
950 ktx->ktx_frags, rmd->kqrmd_frag,
952 if (eprc != EP_SUCCESS) {
953 CERROR("can't complete RPC: %d\n", eprc);
954 /* don't re-attempt RPC completion */
955 krx->krx_rpc_reply_needed = 0;
961 eprc = ep_rpc_get (krx->krx_rxd,
962 kqswnal_rdma_fetch_complete, ktx,
963 rmd->kqrmd_frag, ktx->ktx_frags, ktx->ktx_nfrag);
964 if (eprc != EP_SUCCESS) {
965 CERROR("ep_rpc_get failed: %d\n", eprc);
966 /* Don't attempt RPC completion:
967 * EKC nuked it when the get failed */
968 krx->krx_rpc_reply_needed = 0;
976 kqswnal_rx_decref(krx); /* drop callback's ref */
977 kqswnal_put_idle_tx (ktx);
980 cfs_atomic_dec(&kqswnal_data.kqn_pending_txs);
985 kqswnal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
987 lnet_hdr_t *hdr = &lntmsg->msg_hdr;
988 int type = lntmsg->msg_type;
989 lnet_process_id_t target = lntmsg->msg_target;
990 int target_is_router = lntmsg->msg_target_is_router;
991 int routing = lntmsg->msg_routing;
992 unsigned int payload_niov = lntmsg->msg_niov;
993 struct iovec *payload_iov = lntmsg->msg_iov;
994 lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
995 unsigned int payload_offset = lntmsg->msg_offset;
996 unsigned int payload_nob = lntmsg->msg_len;
1001 /* NB 1. hdr is in network byte order */
1002 /* 2. 'private' depends on the message type */
1004 CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
1005 payload_nob, payload_niov, libcfs_id2str(target));
1007 LASSERT (payload_nob == 0 || payload_niov > 0);
1008 LASSERT (payload_niov <= LNET_MAX_IOV);
1010 /* It must be OK to kmap() if required */
1011 LASSERT (payload_kiov == NULL || !cfs_in_interrupt ());
1012 /* payload is either all vaddrs or all pages */
1013 LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
1015 if (kqswnal_nid2elanid (target.nid) < 0) {
1016 CERROR("%s not in my cluster\n", libcfs_nid2str(target.nid));
1020 /* I may not block for a transmit descriptor if I might block the
1021 * router, receiver, or an interrupt handler. */
1022 ktx = kqswnal_get_idle_tx();
1024 CERROR ("Can't get txd for msg type %d for %s\n",
1025 type, libcfs_nid2str(target.nid));
1029 ktx->ktx_state = KTX_SENDING;
1030 ktx->ktx_nid = target.nid;
1031 ktx->ktx_args[0] = private;
1032 ktx->ktx_args[1] = lntmsg;
1033 ktx->ktx_args[2] = NULL; /* set when a GET commits to REPLY */
1035 /* The first frag will be the pre-mapped buffer. */
1036 ktx->ktx_nfrag = ktx->ktx_firsttmpfrag = 1;
1038 if ((!target_is_router && /* target.nid is final dest */
1039 !routing && /* I'm the source */
1040 type == LNET_MSG_GET && /* optimize GET? */
1041 *kqswnal_tunables.kqn_optimized_gets != 0 &&
1042 lntmsg->msg_md->md_length >=
1043 *kqswnal_tunables.kqn_optimized_gets) ||
1044 ((type == LNET_MSG_PUT || /* optimize PUT? */
1045 type == LNET_MSG_REPLY) && /* optimize REPLY? */
1046 *kqswnal_tunables.kqn_optimized_puts != 0 &&
1047 payload_nob >= *kqswnal_tunables.kqn_optimized_puts)) {
1048 lnet_libmd_t *md = lntmsg->msg_md;
1049 kqswnal_msg_t *msg = (kqswnal_msg_t *)ktx->ktx_buffer;
1051 kqswnal_remotemd_t *rmd;
1053 /* Optimised path: I send over the Elan vaddrs of the local
1054 * buffers, and my peer DMAs directly to/from them.
1056 * First I set up ktx as if it was going to send this
1057 * payload, (it needs to map it anyway). This fills
1058 * ktx_frags[1] and onward with the network addresses
1059 * of the buffer frags. */
1061 /* Send an RDMA message */
1062 msg->kqm_magic = LNET_PROTO_QSW_MAGIC;
1063 msg->kqm_version = QSWLND_PROTO_VERSION;
1064 msg->kqm_type = QSWLND_MSG_RDMA;
1066 mhdr = &msg->kqm_u.rdma.kqrm_hdr;
1067 rmd = &msg->kqm_u.rdma.kqrm_rmd;
1070 nob = (((char *)rmd) - ktx->ktx_buffer);
1072 if (type == LNET_MSG_GET) {
1073 if ((md->md_options & LNET_MD_KIOV) != 0)
1074 rc = kqswnal_map_tx_kiov (ktx, 0, md->md_length,
1075 md->md_niov, md->md_iov.kiov);
1077 rc = kqswnal_map_tx_iov (ktx, 0, md->md_length,
1078 md->md_niov, md->md_iov.iov);
1079 ktx->ktx_state = KTX_GETTING;
1081 if (payload_kiov != NULL)
1082 rc = kqswnal_map_tx_kiov(ktx, 0, payload_nob,
1083 payload_niov, payload_kiov);
1085 rc = kqswnal_map_tx_iov(ktx, 0, payload_nob,
1086 payload_niov, payload_iov);
1087 ktx->ktx_state = KTX_PUTTING;
1093 rmd->kqrmd_nfrag = ktx->ktx_nfrag - 1;
1094 nob += offsetof(kqswnal_remotemd_t,
1095 kqrmd_frag[rmd->kqrmd_nfrag]);
1096 LASSERT (nob <= KQSW_TX_BUFFER_SIZE);
1098 memcpy(&rmd->kqrmd_frag[0], &ktx->ktx_frags[1],
1099 rmd->kqrmd_nfrag * sizeof(EP_NMD));
1101 ep_nmd_subset(&ktx->ktx_frags[0], &ktx->ktx_ebuffer, 0, nob);
1103 msg->kqm_nob = nob + payload_nob;
1105 msg->kqm_cksum = kqswnal_csum(~0, (char *)msg, nob);
1107 if (type == LNET_MSG_GET) {
1108 /* Allocate reply message now while I'm in thread context */
1109 ktx->ktx_args[2] = lnet_create_reply_msg (
1110 kqswnal_data.kqn_ni, lntmsg);
1111 if (ktx->ktx_args[2] == NULL)
1114 /* NB finalizing the REPLY message is my
1115 * responsibility now, whatever happens. */
1117 if (*kqswnal_tunables.kqn_inject_csum_error == 3) {
1119 *kqswnal_tunables.kqn_inject_csum_error = 0;
1122 } else if (payload_kiov != NULL) {
1123 /* must checksum payload after header so receiver can
1124 * compute partial header cksum before swab. Sadly
1125 * this causes 2 rounds of kmap */
1127 kqswnal_csum_kiov(msg->kqm_cksum, 0, payload_nob,
1128 payload_niov, payload_kiov);
1129 if (*kqswnal_tunables.kqn_inject_csum_error == 2) {
1131 *kqswnal_tunables.kqn_inject_csum_error = 0;
1135 kqswnal_csum_iov(msg->kqm_cksum, 0, payload_nob,
1136 payload_niov, payload_iov);
1137 if (*kqswnal_tunables.kqn_inject_csum_error == 2) {
1139 *kqswnal_tunables.kqn_inject_csum_error = 0;
1144 } else if (payload_nob <= *kqswnal_tunables.kqn_tx_maxcontig) {
1147 kqswnal_msg_t *msg = (kqswnal_msg_t *)ktx->ktx_buffer;
1149 /* single frag copied into the pre-mapped buffer */
1150 msg->kqm_magic = LNET_PROTO_QSW_MAGIC;
1151 msg->kqm_version = QSWLND_PROTO_VERSION;
1152 msg->kqm_type = QSWLND_MSG_IMMEDIATE;
1154 mhdr = &msg->kqm_u.immediate.kqim_hdr;
1155 payload = msg->kqm_u.immediate.kqim_payload;
1158 nob = (payload - ktx->ktx_buffer) + payload_nob;
1160 ep_nmd_subset(&ktx->ktx_frags[0], &ktx->ktx_ebuffer, 0, nob);
1162 if (payload_kiov != NULL)
1163 lnet_copy_kiov2flat(KQSW_TX_BUFFER_SIZE, payload, 0,
1164 payload_niov, payload_kiov,
1165 payload_offset, payload_nob);
1167 lnet_copy_iov2flat(KQSW_TX_BUFFER_SIZE, payload, 0,
1168 payload_niov, payload_iov,
1169 payload_offset, payload_nob);
1173 msg->kqm_cksum = kqswnal_csum(~0, (char *)msg, nob);
1174 if (*kqswnal_tunables.kqn_inject_csum_error == 1) {
1176 *kqswnal_tunables.kqn_inject_csum_error = 0;
1181 kqswnal_msg_t *msg = (kqswnal_msg_t *)ktx->ktx_buffer;
1183 /* multiple frags: first is hdr in pre-mapped buffer */
1184 msg->kqm_magic = LNET_PROTO_QSW_MAGIC;
1185 msg->kqm_version = QSWLND_PROTO_VERSION;
1186 msg->kqm_type = QSWLND_MSG_IMMEDIATE;
1188 mhdr = &msg->kqm_u.immediate.kqim_hdr;
1189 nob = offsetof(kqswnal_msg_t, kqm_u.immediate.kqim_payload);
1193 ep_nmd_subset(&ktx->ktx_frags[0], &ktx->ktx_ebuffer, 0, nob);
1195 if (payload_kiov != NULL)
1196 rc = kqswnal_map_tx_kiov (ktx, payload_offset, payload_nob,
1197 payload_niov, payload_kiov);
1199 rc = kqswnal_map_tx_iov (ktx, payload_offset, payload_nob,
1200 payload_niov, payload_iov);
1205 msg->kqm_nob = nob + payload_nob;
1207 msg->kqm_cksum = kqswnal_csum(~0, (char *)msg, nob);
1209 msg->kqm_cksum = (payload_kiov != NULL) ?
1210 kqswnal_csum_kiov(msg->kqm_cksum,
1211 payload_offset, payload_nob,
1212 payload_niov, payload_kiov) :
1213 kqswnal_csum_iov(msg->kqm_cksum,
1214 payload_offset, payload_nob,
1215 payload_niov, payload_iov);
1217 if (*kqswnal_tunables.kqn_inject_csum_error == 1) {
1219 *kqswnal_tunables.kqn_inject_csum_error = 0;
1225 ktx->ktx_port = (nob <= KQSW_SMALLMSG) ?
1226 EP_MSG_SVC_PORTALS_SMALL : EP_MSG_SVC_PORTALS_LARGE;
1228 rc = kqswnal_launch (ktx);
1231 CDEBUG_LIMIT(rc == 0? D_NET :D_NETERROR, "%s %d bytes to %s%s: rc %d\n",
1232 routing ? (rc == 0 ? "Routed" : "Failed to route") :
1233 (rc == 0 ? "Sent" : "Failed to send"),
1234 nob, libcfs_nid2str(target.nid),
1235 target_is_router ? "(router)" : "", rc);
1238 lnet_msg_t *repmsg = (lnet_msg_t *)ktx->ktx_args[2];
1239 int state = ktx->ktx_state;
1241 kqswnal_put_idle_tx (ktx);
1243 if (state == KTX_GETTING && repmsg != NULL) {
1244 /* We committed to reply, but there was a problem
1245 * launching the GET. We can't avoid delivering a
1246 * REPLY event since we committed above, so we
1247 * pretend the GET succeeded but the REPLY
1250 lnet_finalize (kqswnal_data.kqn_ni, lntmsg, 0);
1251 lnet_finalize (kqswnal_data.kqn_ni, repmsg, -EIO);
1256 cfs_atomic_dec(&kqswnal_data.kqn_pending_txs);
1257 return (rc == 0 ? 0 : -EIO);
1261 kqswnal_requeue_rx (kqswnal_rx_t *krx)
1263 LASSERT (cfs_atomic_read(&krx->krx_refcount) == 0);
1264 LASSERT (!krx->krx_rpc_reply_needed);
1266 krx->krx_state = KRX_POSTED;
1268 if (kqswnal_data.kqn_shuttingdown) {
1269 /* free EKC rxd on shutdown */
1270 ep_complete_receive(krx->krx_rxd);
1272 /* repost receive */
1273 ep_requeue_receive(krx->krx_rxd,
1274 kqswnal_rxhandler, krx,
1275 &krx->krx_elanbuffer, 0);
1280 kqswnal_rpc_complete (EP_RXD *rxd)
1282 int status = ep_rxd_status(rxd);
1283 kqswnal_rx_t *krx = (kqswnal_rx_t *)ep_rxd_arg(rxd);
1285 CDEBUG((status == EP_SUCCESS) ? D_NET : D_ERROR,
1286 "rxd %p, krx %p, status %d\n", rxd, krx, status);
1288 LASSERT (krx->krx_rxd == rxd);
1289 LASSERT (krx->krx_rpc_reply_needed);
1291 krx->krx_rpc_reply_needed = 0;
1292 kqswnal_requeue_rx (krx);
1296 kqswnal_rx_done (kqswnal_rx_t *krx)
1300 LASSERT (cfs_atomic_read(&krx->krx_refcount) == 0);
1302 if (krx->krx_rpc_reply_needed) {
1303 /* We've not completed the peer's RPC yet... */
1304 krx->krx_rpc_reply.msg.magic = LNET_PROTO_QSW_MAGIC;
1305 krx->krx_rpc_reply.msg.version = QSWLND_PROTO_VERSION;
1307 LASSERT (!cfs_in_interrupt());
1309 rc = ep_complete_rpc(krx->krx_rxd,
1310 kqswnal_rpc_complete, krx,
1311 &krx->krx_rpc_reply.ep_statusblk,
1313 if (rc == EP_SUCCESS)
1316 CERROR("can't complete RPC: %d\n", rc);
1317 krx->krx_rpc_reply_needed = 0;
1320 kqswnal_requeue_rx(krx);
1324 kqswnal_parse (kqswnal_rx_t *krx)
1326 lnet_ni_t *ni = kqswnal_data.kqn_ni;
1327 kqswnal_msg_t *msg = (kqswnal_msg_t *)page_address(krx->krx_kiov[0].kiov_page);
1328 lnet_nid_t fromnid = kqswnal_rx_nid(krx);
1335 LASSERT (cfs_atomic_read(&krx->krx_refcount) == 1);
1337 if (krx->krx_nob < offsetof(kqswnal_msg_t, kqm_u)) {
1338 CERROR("Short message %d received from %s\n",
1339 krx->krx_nob, libcfs_nid2str(fromnid));
1343 swab = msg->kqm_magic == __swab32(LNET_PROTO_QSW_MAGIC);
1345 if (swab || msg->kqm_magic == LNET_PROTO_QSW_MAGIC) {
1350 /* csum byte array before swab */
1351 csum1 = msg->kqm_cksum;
1353 csum0 = kqswnal_csum_kiov(~0, 0, krx->krx_nob,
1354 krx->krx_npages, krx->krx_kiov);
1355 msg->kqm_cksum = csum1;
1359 __swab16s(&msg->kqm_version);
1360 __swab16s(&msg->kqm_type);
1362 __swab32s(&msg->kqm_cksum);
1363 __swab32s(&msg->kqm_nob);
1367 if (msg->kqm_version != QSWLND_PROTO_VERSION) {
1368 /* Future protocol version compatibility support!
1369 * The next qswlnd-specific protocol rev will first
1370 * send an RPC to check version.
1371 * 1.4.6 and 1.4.7.early reply with a status
1372 * block containing its current version.
1373 * Later versions send a failure (-ve) status +
1376 if (!krx->krx_rpc_reply_needed) {
1377 CERROR("Unexpected version %d from %s\n",
1378 msg->kqm_version, libcfs_nid2str(fromnid));
1382 LASSERT (krx->krx_rpc_reply.msg.status == -EPROTO);
1386 switch (msg->kqm_type) {
1388 CERROR("Bad request type %x from %s\n",
1389 msg->kqm_type, libcfs_nid2str(fromnid));
1392 case QSWLND_MSG_IMMEDIATE:
1393 if (krx->krx_rpc_reply_needed) {
1394 /* Should have been a simple message */
1395 CERROR("IMMEDIATE sent as RPC from %s\n",
1396 libcfs_nid2str(fromnid));
1400 nob = offsetof(kqswnal_msg_t, kqm_u.immediate.kqim_payload);
1401 if (krx->krx_nob < nob) {
1402 CERROR("Short IMMEDIATE %d(%d) from %s\n",
1403 krx->krx_nob, nob, libcfs_nid2str(fromnid));
1408 if (csum0 != msg->kqm_cksum) {
1409 CERROR("Bad IMMEDIATE checksum %08x(%08x) from %s\n",
1410 csum0, msg->kqm_cksum, libcfs_nid2str(fromnid));
1411 CERROR("nob %d (%d)\n", krx->krx_nob, msg->kqm_nob);
1415 rc = lnet_parse(ni, &msg->kqm_u.immediate.kqim_hdr,
1421 case QSWLND_MSG_RDMA:
1422 if (!krx->krx_rpc_reply_needed) {
1423 /* Should have been a simple message */
1424 CERROR("RDMA sent as simple message from %s\n",
1425 libcfs_nid2str(fromnid));
1429 nob = offsetof(kqswnal_msg_t,
1430 kqm_u.rdma.kqrm_rmd.kqrmd_frag[0]);
1431 if (krx->krx_nob < nob) {
1432 CERROR("Short RDMA message %d(%d) from %s\n",
1433 krx->krx_nob, nob, libcfs_nid2str(fromnid));
1438 __swab32s(&msg->kqm_u.rdma.kqrm_rmd.kqrmd_nfrag);
1440 n = msg->kqm_u.rdma.kqrm_rmd.kqrmd_nfrag;
1441 nob = offsetof(kqswnal_msg_t,
1442 kqm_u.rdma.kqrm_rmd.kqrmd_frag[n]);
1444 if (krx->krx_nob < nob) {
1445 CERROR("short RDMA message %d(%d) from %s\n",
1446 krx->krx_nob, nob, libcfs_nid2str(fromnid));
1451 for (i = 0; i < n; i++) {
1452 EP_NMD *nmd = &msg->kqm_u.rdma.kqrm_rmd.kqrmd_frag[i];
1454 __swab32s(&nmd->nmd_addr);
1455 __swab32s(&nmd->nmd_len);
1456 __swab32s(&nmd->nmd_attr);
1461 krx->krx_cksum = csum0; /* stash checksum so far */
1463 rc = lnet_parse(ni, &msg->kqm_u.rdma.kqrm_hdr,
1472 if (msg->kqm_magic == LNET_PROTO_MAGIC ||
1473 msg->kqm_magic == __swab32(LNET_PROTO_MAGIC)) {
1474 /* Future protocol version compatibility support!
1475 * When LNET unifies protocols over all LNDs, the first thing a
1476 * peer will send will be a version query RPC.
1477 * 1.4.6 and 1.4.7.early reply with a status block containing
1478 * LNET_PROTO_QSW_MAGIC..
1479 * Later versions send a failure (-ve) status +
1482 if (!krx->krx_rpc_reply_needed) {
1483 CERROR("Unexpected magic %08x from %s\n",
1484 msg->kqm_magic, libcfs_nid2str(fromnid));
1488 LASSERT (krx->krx_rpc_reply.msg.status == -EPROTO);
1492 CERROR("Unrecognised magic %08x from %s\n",
1493 msg->kqm_magic, libcfs_nid2str(fromnid));
1495 kqswnal_rx_decref(krx);
1498 /* Receive Interrupt Handler: posts to schedulers */
1500 kqswnal_rxhandler(EP_RXD *rxd)
1502 unsigned long flags;
1503 int nob = ep_rxd_len (rxd);
1504 int status = ep_rxd_status (rxd);
1505 kqswnal_rx_t *krx = (kqswnal_rx_t *)ep_rxd_arg (rxd);
1506 CDEBUG(D_NET, "kqswnal_rxhandler: rxd %p, krx %p, nob %d, status %d\n",
1507 rxd, krx, nob, status);
1509 LASSERT (krx != NULL);
1510 LASSERT (krx->krx_state == KRX_POSTED);
1512 krx->krx_state = KRX_PARSE;
1516 /* RPC reply iff rpc request received without error */
1517 krx->krx_rpc_reply_needed = ep_rxd_isrpc(rxd) &&
1518 (status == EP_SUCCESS ||
1519 status == EP_MSG_TOO_BIG);
1521 /* Default to failure if an RPC reply is requested but not handled */
1522 krx->krx_rpc_reply.msg.status = -EPROTO;
1523 cfs_atomic_set (&krx->krx_refcount, 1);
1525 if (status != EP_SUCCESS) {
1526 /* receives complete with failure when receiver is removed */
1527 if (status == EP_SHUTDOWN)
1528 LASSERT (kqswnal_data.kqn_shuttingdown);
1530 CERROR("receive status failed with status %d nob %d\n",
1531 ep_rxd_status(rxd), nob);
1532 kqswnal_rx_decref(krx);
1536 if (!cfs_in_interrupt()) {
1541 spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
1543 cfs_list_add_tail(&krx->krx_list, &kqswnal_data.kqn_readyrxds);
1544 cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
1546 spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
1550 kqswnal_recv (lnet_ni_t *ni,
1557 unsigned int offset,
1561 kqswnal_rx_t *krx = (kqswnal_rx_t *)private;
1565 kqswnal_remotemd_t *rmd;
1569 LASSERT (!cfs_in_interrupt ()); /* OK to map */
1570 /* Either all pages or all vaddrs */
1571 LASSERT (!(kiov != NULL && iov != NULL));
1573 fromnid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ep_rxd_node(krx->krx_rxd));
1574 msg = (kqswnal_msg_t *)page_address(krx->krx_kiov[0].kiov_page);
1576 if (krx->krx_rpc_reply_needed) {
1577 /* optimized (rdma) request sent as RPC */
1579 LASSERT (msg->kqm_type == QSWLND_MSG_RDMA);
1580 hdr = &msg->kqm_u.rdma.kqrm_hdr;
1581 rmd = &msg->kqm_u.rdma.kqrm_rmd;
1583 /* NB header is still in wire byte order */
1585 switch (le32_to_cpu(hdr->type)) {
1587 case LNET_MSG_REPLY:
1588 /* This is an optimized PUT/REPLY */
1589 rc = kqswnal_rdma(krx, lntmsg,
1590 KTX_RDMA_FETCH, rmd,
1591 niov, iov, kiov, offset, mlen);
1596 if (krx->krx_cksum != msg->kqm_cksum) {
1597 CERROR("Bad GET checksum %08x(%08x) from %s\n",
1598 krx->krx_cksum, msg->kqm_cksum,
1599 libcfs_nid2str(fromnid));
1604 if (lntmsg == NULL) {
1605 /* No buffer match: my decref will
1606 * complete the RPC with failure */
1609 /* Matched something! */
1610 rc = kqswnal_rdma(krx, lntmsg,
1611 KTX_RDMA_STORE, rmd,
1621 CERROR("Bad RPC type %d\n",
1622 le32_to_cpu(hdr->type));
1627 kqswnal_rx_decref(krx);
1631 LASSERT (msg->kqm_type == QSWLND_MSG_IMMEDIATE);
1632 msg_offset = offsetof(kqswnal_msg_t, kqm_u.immediate.kqim_payload);
1634 if (krx->krx_nob < msg_offset + rlen) {
1635 CERROR("Bad message size from %s: have %d, need %d + %d\n",
1636 libcfs_nid2str(fromnid), krx->krx_nob,
1638 kqswnal_rx_decref(krx);
1643 lnet_copy_kiov2kiov(niov, kiov, offset,
1644 krx->krx_npages, krx->krx_kiov,
1647 lnet_copy_kiov2iov(niov, iov, offset,
1648 krx->krx_npages, krx->krx_kiov,
1651 lnet_finalize(ni, lntmsg, 0);
1652 kqswnal_rx_decref(krx);
1657 kqswnal_thread_start (int (*fn)(void *arg), void *arg)
1659 long pid = cfs_create_thread (fn, arg, 0);
1664 cfs_atomic_inc (&kqswnal_data.kqn_nthreads);
1669 kqswnal_thread_fini (void)
1671 cfs_atomic_dec (&kqswnal_data.kqn_nthreads);
1675 kqswnal_scheduler (void *arg)
1679 unsigned long flags;
1684 cfs_daemonize ("kqswnal_sched");
1685 cfs_block_allsigs ();
1687 spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
1693 if (!cfs_list_empty (&kqswnal_data.kqn_readyrxds))
1695 krx = cfs_list_entry(kqswnal_data.kqn_readyrxds.next,
1696 kqswnal_rx_t, krx_list);
1697 cfs_list_del (&krx->krx_list);
1698 spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
1701 LASSERT (krx->krx_state == KRX_PARSE);
1702 kqswnal_parse (krx);
1705 spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
1709 if (!cfs_list_empty (&kqswnal_data.kqn_donetxds))
1711 ktx = cfs_list_entry(kqswnal_data.kqn_donetxds.next,
1712 kqswnal_tx_t, ktx_schedlist);
1713 cfs_list_del_init (&ktx->ktx_schedlist);
1714 spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
1717 kqswnal_tx_done_in_thread_context(ktx);
1720 spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
1724 if (!cfs_list_empty (&kqswnal_data.kqn_delayedtxds))
1726 ktx = cfs_list_entry(kqswnal_data.kqn_delayedtxds.next,
1727 kqswnal_tx_t, ktx_schedlist);
1728 cfs_list_del_init (&ktx->ktx_schedlist);
1729 spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
1732 rc = kqswnal_launch (ktx);
1734 CERROR("Failed delayed transmit to %s: %d\n",
1735 libcfs_nid2str(ktx->ktx_nid), rc);
1736 kqswnal_tx_done (ktx, rc);
1738 cfs_atomic_dec (&kqswnal_data.kqn_pending_txs);
1741 spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
1745 /* nothing to do or hogging CPU */
1746 if (!did_something || counter++ == KQSW_RESCHED) {
1747 spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
1752 if (!did_something) {
1753 if (kqswnal_data.kqn_shuttingdown == 2) {
1754 /* We only exit in stage 2 of shutdown
1755 * when there's nothing left to do */
1758 cfs_wait_event_interruptible_exclusive (
1759 kqswnal_data.kqn_sched_waitq,
1760 kqswnal_data.kqn_shuttingdown == 2 ||
1761 !cfs_list_empty(&kqswnal_data. \
1763 !cfs_list_empty(&kqswnal_data. \
1765 !cfs_list_empty(&kqswnal_data. \
1766 kqn_delayedtxds, rc));
1768 } else if (need_resched())
1771 spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
1776 kqswnal_thread_fini ();