1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
5 * Author: Eric Barton <eric@bartonsoftware.com>
7 * Copyright (C) 2002, Lawrence Livermore National Labs (LLNL)
8 * W. Marcus Miller - Based on ksocknal
10 * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
12 * Portals is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Portals is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Portals; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 atomic_t kqswnal_packets_launched;
30 atomic_t kqswnal_packets_transmitted;
31 atomic_t kqswnal_packets_received;
35 * LIB functions follow
39 kqswnal_read(nal_cb_t *nal, void *private, void *dst_addr, user_ptr src_addr,
42 CDEBUG (D_NET, LPX64": reading "LPSZ" bytes from %p -> %p\n",
43 nal->ni.nid, len, src_addr, dst_addr );
44 memcpy( dst_addr, src_addr, len );
50 kqswnal_write(nal_cb_t *nal, void *private, user_ptr dst_addr, void *src_addr,
53 CDEBUG (D_NET, LPX64": writing "LPSZ" bytes from %p -> %p\n",
54 nal->ni.nid, len, src_addr, dst_addr );
55 memcpy( dst_addr, src_addr, len );
61 kqswnal_malloc(nal_cb_t *nal, size_t len)
65 PORTAL_ALLOC(buf, len);
70 kqswnal_free(nal_cb_t *nal, void *buf, size_t len)
72 PORTAL_FREE(buf, len);
76 kqswnal_printf (nal_cb_t * nal, const char *fmt, ...)
82 vsnprintf (msg, sizeof (msg), fmt, ap); /* sprint safely */
85 msg[sizeof (msg) - 1] = 0; /* ensure terminated */
87 CDEBUG (D_NET, "%s", msg);
92 kqswnal_cli(nal_cb_t *nal, unsigned long *flags)
94 kqswnal_data_t *data= nal->nal_data;
96 spin_lock_irqsave(&data->kqn_statelock, *flags);
101 kqswnal_sti(nal_cb_t *nal, unsigned long *flags)
103 kqswnal_data_t *data= nal->nal_data;
105 spin_unlock_irqrestore(&data->kqn_statelock, *flags);
110 kqswnal_dist(nal_cb_t *nal, ptl_nid_t nid, unsigned long *dist)
112 if (nid == nal->ni.nid)
113 *dist = 0; /* it's me */
114 else if (kqswnal_nid2elanid (nid) >= 0)
115 *dist = 1; /* it's my peer */
117 *dist = 2; /* via router */
122 kqswnal_unmap_tx (kqswnal_tx_t *ktx)
124 if (ktx->ktx_nmappedpages == 0)
127 CDEBUG (D_NET, "%p[%d] unloading pages %d for %d\n",
128 ktx, ktx->ktx_niov, ktx->ktx_basepage, ktx->ktx_nmappedpages);
130 LASSERT (ktx->ktx_nmappedpages <= ktx->ktx_npages);
131 LASSERT (ktx->ktx_basepage + ktx->ktx_nmappedpages <=
132 kqswnal_data.kqn_eptxdmahandle->NumDvmaPages);
134 elan3_dvma_unload(kqswnal_data.kqn_epdev->DmaState,
135 kqswnal_data.kqn_eptxdmahandle,
136 ktx->ktx_basepage, ktx->ktx_nmappedpages);
137 ktx->ktx_nmappedpages = 0;
141 kqswnal_map_tx_kiov (kqswnal_tx_t *ktx, int nob, int niov, ptl_kiov_t *kiov)
143 int nfrags = ktx->ktx_niov;
144 const int maxfrags = sizeof (ktx->ktx_iov)/sizeof (ktx->ktx_iov[0]);
145 int nmapped = ktx->ktx_nmappedpages;
146 int maxmapped = ktx->ktx_npages;
147 uint32_t basepage = ktx->ktx_basepage + nmapped;
150 LASSERT (nmapped <= maxmapped);
151 LASSERT (nfrags <= maxfrags);
156 int fraglen = kiov->kiov_len;
158 /* nob exactly spans the iovs */
159 LASSERT (fraglen <= nob);
160 /* each frag fits in a page */
161 LASSERT (kiov->kiov_offset + kiov->kiov_len <= PAGE_SIZE);
164 if (nmapped > maxmapped) {
165 CERROR("Can't map message in %d pages (max %d)\n",
170 if (nfrags == maxfrags) {
171 CERROR("Message too fragmented in Elan VM (max %d frags)\n",
176 /* XXX this is really crap, but we'll have to kmap until
177 * EKC has a page (rather than vaddr) mapping interface */
179 ptr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset;
182 "%p[%d] loading %p for %d, page %d, %d total\n",
183 ktx, nfrags, ptr, fraglen, basepage, nmapped);
185 elan3_dvma_kaddr_load (kqswnal_data.kqn_epdev->DmaState,
186 kqswnal_data.kqn_eptxdmahandle,
188 basepage, &ktx->ktx_iov[nfrags].Base);
190 kunmap (kiov->kiov_page);
192 /* keep in loop for failure case */
193 ktx->ktx_nmappedpages = nmapped;
195 if (nfrags > 0 && /* previous frag mapped */
196 ktx->ktx_iov[nfrags].Base == /* contiguous with this one */
197 (ktx->ktx_iov[nfrags-1].Base + ktx->ktx_iov[nfrags-1].Len))
198 /* just extend previous */
199 ktx->ktx_iov[nfrags - 1].Len += fraglen;
201 ktx->ktx_iov[nfrags].Len = fraglen;
202 nfrags++; /* new frag */
210 /* iov must not run out before end of data */
211 LASSERT (nob == 0 || niov > 0);
215 ktx->ktx_niov = nfrags;
216 CDEBUG (D_NET, "%p got %d frags over %d pages\n",
217 ktx, ktx->ktx_niov, ktx->ktx_nmappedpages);
223 kqswnal_map_tx_iov (kqswnal_tx_t *ktx, int nob, int niov, struct iovec *iov)
225 int nfrags = ktx->ktx_niov;
226 const int maxfrags = sizeof (ktx->ktx_iov)/sizeof (ktx->ktx_iov[0]);
227 int nmapped = ktx->ktx_nmappedpages;
228 int maxmapped = ktx->ktx_npages;
229 uint32_t basepage = ktx->ktx_basepage + nmapped;
231 LASSERT (nmapped <= maxmapped);
232 LASSERT (nfrags <= maxfrags);
237 int fraglen = iov->iov_len;
238 long npages = kqswnal_pages_spanned (iov->iov_base, fraglen);
240 /* nob exactly spans the iovs */
241 LASSERT (fraglen <= nob);
244 if (nmapped > maxmapped) {
245 CERROR("Can't map message in %d pages (max %d)\n",
250 if (nfrags == maxfrags) {
251 CERROR("Message too fragmented in Elan VM (max %d frags)\n",
257 "%p[%d] loading %p for %d, pages %d for %ld, %d total\n",
258 ktx, nfrags, iov->iov_base, fraglen, basepage, npages,
261 elan3_dvma_kaddr_load (kqswnal_data.kqn_epdev->DmaState,
262 kqswnal_data.kqn_eptxdmahandle,
263 iov->iov_base, fraglen,
264 basepage, &ktx->ktx_iov[nfrags].Base);
265 /* keep in loop for failure case */
266 ktx->ktx_nmappedpages = nmapped;
268 if (nfrags > 0 && /* previous frag mapped */
269 ktx->ktx_iov[nfrags].Base == /* contiguous with this one */
270 (ktx->ktx_iov[nfrags-1].Base + ktx->ktx_iov[nfrags-1].Len))
271 /* just extend previous */
272 ktx->ktx_iov[nfrags - 1].Len += fraglen;
274 ktx->ktx_iov[nfrags].Len = fraglen;
275 nfrags++; /* new frag */
283 /* iov must not run out before end of data */
284 LASSERT (nob == 0 || niov > 0);
288 ktx->ktx_niov = nfrags;
289 CDEBUG (D_NET, "%p got %d frags over %d pages\n",
290 ktx, ktx->ktx_niov, ktx->ktx_nmappedpages);
296 kqswnal_put_idle_tx (kqswnal_tx_t *ktx)
298 kpr_fwd_desc_t *fwd = NULL;
301 kqswnal_unmap_tx (ktx); /* release temporary mappings */
303 spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
305 list_del (&ktx->ktx_list); /* take off active list */
307 if (ktx->ktx_isnblk) {
308 /* reserved for non-blocking tx */
309 list_add (&ktx->ktx_list, &kqswnal_data.kqn_nblk_idletxds);
310 spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
314 list_add (&ktx->ktx_list, &kqswnal_data.kqn_idletxds);
316 /* anything blocking for a tx descriptor? */
317 if (!list_empty(&kqswnal_data.kqn_idletxd_fwdq)) /* forwarded packet? */
319 CDEBUG(D_NET,"wakeup fwd\n");
321 fwd = list_entry (kqswnal_data.kqn_idletxd_fwdq.next,
322 kpr_fwd_desc_t, kprfd_list);
323 list_del (&fwd->kprfd_list);
326 if (waitqueue_active (&kqswnal_data.kqn_idletxd_waitq)) /* process? */
328 /* local sender waiting for tx desc */
329 CDEBUG(D_NET,"wakeup process\n");
330 wake_up (&kqswnal_data.kqn_idletxd_waitq);
333 spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
338 /* schedule packet for forwarding again */
339 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
341 list_add_tail (&fwd->kprfd_list, &kqswnal_data.kqn_delayedfwds);
342 if (waitqueue_active (&kqswnal_data.kqn_sched_waitq))
343 wake_up (&kqswnal_data.kqn_sched_waitq);
345 spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
349 kqswnal_get_idle_tx (kpr_fwd_desc_t *fwd, int may_block)
352 kqswnal_tx_t *ktx = NULL;
355 spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
357 /* "normal" descriptor is free */
358 if (!list_empty (&kqswnal_data.kqn_idletxds)) {
359 ktx = list_entry (kqswnal_data.kqn_idletxds.next,
360 kqswnal_tx_t, ktx_list);
364 /* "normal" descriptor pool is empty */
366 if (fwd != NULL) { /* forwarded packet => queue for idle txd */
367 CDEBUG (D_NET, "blocked fwd [%p]\n", fwd);
368 list_add_tail (&fwd->kprfd_list,
369 &kqswnal_data.kqn_idletxd_fwdq);
373 /* doing a local transmit */
375 if (list_empty (&kqswnal_data.kqn_nblk_idletxds)) {
376 CERROR ("intr tx desc pool exhausted\n");
380 ktx = list_entry (kqswnal_data.kqn_nblk_idletxds.next,
381 kqswnal_tx_t, ktx_list);
385 /* block for idle tx */
387 spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
389 CDEBUG (D_NET, "blocking for tx desc\n");
390 wait_event (kqswnal_data.kqn_idletxd_waitq,
391 !list_empty (&kqswnal_data.kqn_idletxds));
395 list_del (&ktx->ktx_list);
396 list_add (&ktx->ktx_list, &kqswnal_data.kqn_activetxds);
397 ktx->ktx_launcher = current->pid;
400 spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
402 /* Idle descs can't have any mapped (as opposed to pre-mapped) pages */
403 LASSERT (ktx == NULL || ktx->ktx_nmappedpages == 0);
408 kqswnal_tx_done (kqswnal_tx_t *ktx, int error)
410 if (ktx->ktx_forwarding) /* router asked me to forward this packet */
411 kpr_fwd_done (&kqswnal_data.kqn_router,
412 (kpr_fwd_desc_t *)ktx->ktx_args[0], error);
413 else /* packet sourced locally */
414 lib_finalize (&kqswnal_lib, ktx->ktx_args[0],
415 (lib_msg_t *)ktx->ktx_args[1]);
417 kqswnal_put_idle_tx (ktx);
421 kqswnal_txhandler(EP_TXD *txd, void *arg, int status)
423 kqswnal_tx_t *ktx = (kqswnal_tx_t *)arg;
425 LASSERT (txd != NULL);
426 LASSERT (ktx != NULL);
428 CDEBUG(D_NET, "txd %p, arg %p status %d\n", txd, arg, status);
430 if (status == EP_SUCCESS)
431 atomic_inc (&kqswnal_packets_transmitted);
433 if (status != EP_SUCCESS)
435 CERROR ("kqswnal: Transmit failed with %d\n", status);
439 kqswnal_tx_done (ktx, status);
443 kqswnal_launch (kqswnal_tx_t *ktx)
445 /* Don't block for transmit descriptor if we're in interrupt context */
446 int attr = in_interrupt() ? (EP_NO_SLEEP | EP_NO_ALLOC) : 0;
447 int dest = kqswnal_nid2elanid (ktx->ktx_nid);
451 LASSERT (dest >= 0); /* must be a peer */
452 rc = ep_transmit_large(kqswnal_data.kqn_eptx, dest,
453 ktx->ktx_port, attr, kqswnal_txhandler,
454 ktx, ktx->ktx_iov, ktx->ktx_niov);
456 atomic_inc (&kqswnal_packets_launched);
461 /* can't allocate ep txd => queue for later */
463 LASSERT (in_interrupt()); /* not called by thread (not looping) */
465 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
467 list_add_tail (&ktx->ktx_delayed_list, &kqswnal_data.kqn_delayedtxds);
468 if (waitqueue_active (&kqswnal_data.kqn_sched_waitq))
469 wake_up (&kqswnal_data.kqn_sched_waitq);
471 spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
478 hdr_type_string (ptl_hdr_t *hdr)
490 return ("<UNKNOWN>");
495 kqswnal_cerror_hdr(ptl_hdr_t * hdr)
497 char *type_str = hdr_type_string (hdr);
499 CERROR("P3 Header at %p of type %s\n", hdr, type_str);
500 CERROR(" From nid/pid "LPU64"/%u", NTOH__u64(hdr->src_nid),
501 NTOH__u32(hdr->src_pid));
502 CERROR(" To nid/pid "LPU64"/%u\n", NTOH__u64(hdr->dest_nid),
503 NTOH__u32(hdr->dest_pid));
505 switch (NTOH__u32(hdr->type)) {
507 CERROR(" Ptl index %d, ack md "LPX64"."LPX64", "
508 "match bits "LPX64"\n",
509 NTOH__u32 (hdr->msg.put.ptl_index),
510 hdr->msg.put.ack_wmd.wh_interface_cookie,
511 hdr->msg.put.ack_wmd.wh_object_cookie,
512 NTOH__u64 (hdr->msg.put.match_bits));
513 CERROR(" Length %d, offset %d, hdr data "LPX64"\n",
514 NTOH__u32(PTL_HDR_LENGTH(hdr)),
515 NTOH__u32(hdr->msg.put.offset),
516 hdr->msg.put.hdr_data);
520 CERROR(" Ptl index %d, return md "LPX64"."LPX64", "
521 "match bits "LPX64"\n",
522 NTOH__u32 (hdr->msg.get.ptl_index),
523 hdr->msg.get.return_wmd.wh_interface_cookie,
524 hdr->msg.get.return_wmd.wh_object_cookie,
525 hdr->msg.get.match_bits);
526 CERROR(" Length %d, src offset %d\n",
527 NTOH__u32 (hdr->msg.get.sink_length),
528 NTOH__u32 (hdr->msg.get.src_offset));
532 CERROR(" dst md "LPX64"."LPX64", manipulated length %d\n",
533 hdr->msg.ack.dst_wmd.wh_interface_cookie,
534 hdr->msg.ack.dst_wmd.wh_object_cookie,
535 NTOH__u32 (hdr->msg.ack.mlength));
539 CERROR(" dst md "LPX64"."LPX64", length %d\n",
540 hdr->msg.reply.dst_wmd.wh_interface_cookie,
541 hdr->msg.reply.dst_wmd.wh_object_cookie,
542 NTOH__u32 (PTL_HDR_LENGTH(hdr)));
545 } /* end of print_hdr() */
548 kqswnal_sendmsg (nal_cb_t *nal,
555 unsigned int payload_niov,
556 struct iovec *payload_iov,
557 ptl_kiov_t *payload_kiov,
562 ptl_nid_t gatewaynid;
569 CDEBUG(D_NET, "sending "LPSZ" bytes in %d frags to nid: "LPX64
570 " pid %u\n", payload_nob, payload_niov, nid, pid);
572 LASSERT (payload_nob == 0 || payload_niov > 0);
573 LASSERT (payload_niov <= PTL_MD_MAX_IOV);
575 /* It must be OK to kmap() if required */
576 LASSERT (payload_kiov == NULL || !in_interrupt ());
577 /* payload is either all vaddrs or all pages */
578 LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
580 if (payload_nob > KQSW_MAXPAYLOAD) {
581 CERROR ("request exceeds MTU size "LPSZ" (max %u).\n",
582 payload_nob, KQSW_MAXPAYLOAD);
586 if (kqswnal_nid2elanid (nid) < 0) { /* Can't send direct: find gateway? */
587 rc = kpr_lookup (&kqswnal_data.kqn_router, nid, &gatewaynid);
589 CERROR("Can't route to "LPX64": router error %d\n",
593 if (kqswnal_nid2elanid (gatewaynid) < 0) {
594 CERROR("Bad gateway "LPX64" for "LPX64"\n",
601 /* I may not block for a transmit descriptor if I might block the
602 * receiver, or an interrupt handler. */
603 ktx = kqswnal_get_idle_tx(NULL, !(type == PTL_MSG_ACK ||
604 type == PTL_MSG_REPLY ||
607 kqswnal_cerror_hdr (hdr);
608 return (PTL_NOSPACE);
611 memcpy (ktx->ktx_buffer, hdr, sizeof (*hdr)); /* copy hdr from caller's stack */
612 ktx->ktx_wire_hdr = (ptl_hdr_t *)ktx->ktx_buffer;
615 csum = kqsw_csum (0, (char *)hdr, sizeof (*hdr));
616 memcpy (ktx->ktx_buffer + sizeof (*hdr), &csum, sizeof (csum));
617 for (csum = 0, i = 0, sumnob = payload_nob; sumnob > 0; i++) {
618 if (payload_kiov != NULL) {
619 ptl_kiov_t *kiov = &payload_kiov[i];
620 char *addr = ((char *)kmap (kiov->kiov_page)) +
623 csum = kqsw_csum (csum, addr, MIN (sumnob, kiov->kiov_len));
624 sumnob -= kiov->kiov_len;
626 struct iovec *iov = &payload_iov[i];
628 csum = kqsw_csum (csum, iov->iov_base, MIN (sumnob, kiov->iov_len));
629 sumnob -= iov->iov_len;
632 memcpy(ktx->ktx_buffer +sizeof(*hdr) +sizeof(csum), &csum,sizeof(csum));
635 /* Set up first frag from pre-mapped buffer (it's at least the
637 ktx->ktx_iov[0].Base = ktx->ktx_ebuffer;
638 ktx->ktx_iov[0].Len = KQSW_HDR_SIZE;
641 if (payload_nob > 0) { /* got some payload (something more to do) */
642 /* make a single contiguous message? */
643 if (payload_nob <= KQSW_TX_MAXCONTIG) {
644 /* copy payload to ktx_buffer, immediately after hdr */
645 if (payload_kiov != NULL)
646 lib_copy_kiov2buf (ktx->ktx_buffer + KQSW_HDR_SIZE,
647 payload_niov, payload_kiov, payload_nob);
649 lib_copy_iov2buf (ktx->ktx_buffer + KQSW_HDR_SIZE,
650 payload_niov, payload_iov, payload_nob);
651 /* first frag includes payload */
652 ktx->ktx_iov[0].Len += payload_nob;
654 if (payload_kiov != NULL)
655 rc = kqswnal_map_tx_kiov (ktx, payload_nob,
656 payload_niov, payload_kiov);
658 rc = kqswnal_map_tx_iov (ktx, payload_nob,
659 payload_niov, payload_iov);
661 kqswnal_put_idle_tx (ktx);
667 ktx->ktx_port = (payload_nob <= KQSW_SMALLPAYLOAD) ?
668 EP_SVC_LARGE_PORTALS_SMALL : EP_SVC_LARGE_PORTALS_LARGE;
670 ktx->ktx_forwarding = 0; /* => lib_finalize() on completion */
671 ktx->ktx_args[0] = private;
672 ktx->ktx_args[1] = cookie;
674 rc = kqswnal_launch (ktx);
675 if (rc != 0) { /* failed? */
676 CERROR ("Failed to send packet to "LPX64": %d\n", nid, rc);
677 kqswnal_put_idle_tx (ktx);
681 CDEBUG(D_NET, "send to "LPSZ" bytes to "LPX64"\n", payload_nob, nid);
686 kqswnal_send (nal_cb_t *nal,
693 unsigned int payload_niov,
694 struct iovec *payload_iov,
697 return (kqswnal_sendmsg (nal, private, cookie, hdr, type, nid, pid,
698 payload_niov, payload_iov, NULL, payload_nob));
702 kqswnal_send_pages (nal_cb_t *nal,
709 unsigned int payload_niov,
710 ptl_kiov_t *payload_kiov,
713 return (kqswnal_sendmsg (nal, private, cookie, hdr, type, nid, pid,
714 payload_niov, NULL, payload_kiov, payload_nob));
717 int kqswnal_fwd_copy_contig = 0;
720 kqswnal_fwd_packet (void *arg, kpr_fwd_desc_t *fwd)
724 struct iovec *iov = fwd->kprfd_iov;
725 int niov = fwd->kprfd_niov;
726 int nob = fwd->kprfd_nob;
727 ptl_nid_t nid = fwd->kprfd_gateway_nid;
730 CERROR ("checksums for forwarded packets not implemented\n");
733 /* The router wants this NAL to forward a packet */
734 CDEBUG (D_NET, "forwarding [%p] to "LPX64", %d frags %d bytes\n",
735 fwd, nid, niov, nob);
739 ktx = kqswnal_get_idle_tx (fwd, FALSE);
740 if (ktx == NULL) /* can't get txd right now */
741 return; /* fwd will be scheduled when tx desc freed */
743 if (nid == kqswnal_lib.ni.nid) /* gateway is me */
744 nid = fwd->kprfd_target_nid; /* target is final dest */
746 if (kqswnal_nid2elanid (nid) < 0) {
747 CERROR("Can't forward [%p] to "LPX64": not a peer\n", fwd, nid);
752 if (nob > KQSW_NRXMSGBYTES_LARGE) {
753 CERROR ("Can't forward [%p] to "LPX64
754 ": size %d bigger than max packet size %ld\n",
755 fwd, nid, nob, (long)KQSW_NRXMSGBYTES_LARGE);
760 if ((kqswnal_fwd_copy_contig || niov > 1) &&
761 nob <= KQSW_TX_BUFFER_SIZE)
763 /* send from ktx's pre-allocated/mapped contiguous buffer? */
764 lib_copy_iov2buf (ktx->ktx_buffer, niov, iov, nob);
765 ktx->ktx_iov[0].Base = ktx->ktx_ebuffer; /* already mapped */
766 ktx->ktx_iov[0].Len = nob;
769 ktx->ktx_wire_hdr = (ptl_hdr_t *)ktx->ktx_buffer;
774 ktx->ktx_niov = 0; /* no frags mapped yet */
775 rc = kqswnal_map_tx_iov (ktx, nob, niov, iov);
779 ktx->ktx_wire_hdr = (ptl_hdr_t *)iov[0].iov_base;
782 ktx->ktx_port = (nob <= (sizeof (ptl_hdr_t) + KQSW_SMALLPAYLOAD)) ?
783 EP_SVC_LARGE_PORTALS_SMALL : EP_SVC_LARGE_PORTALS_LARGE;
785 ktx->ktx_forwarding = 1;
786 ktx->ktx_args[0] = fwd;
788 rc = kqswnal_launch (ktx);
794 CERROR ("Failed to forward [%p] to "LPX64": %d\n", fwd, nid, rc);
796 kqswnal_put_idle_tx (ktx);
797 /* complete now (with failure) */
798 kpr_fwd_done (&kqswnal_data.kqn_router, fwd, rc);
802 kqswnal_fwd_callback (void *arg, int error)
804 kqswnal_rx_t *krx = (kqswnal_rx_t *)arg;
806 /* The router has finished forwarding this packet */
810 ptl_hdr_t *hdr = (ptl_hdr_t *)page_address (krx->krx_pages[0]);
812 CERROR("Failed to route packet from "LPX64" to "LPX64": %d\n",
813 NTOH__u64(hdr->src_nid), NTOH__u64(hdr->dest_nid),error);
816 kqswnal_requeue_rx (krx);
820 kqswnal_rx (kqswnal_rx_t *krx)
822 ptl_hdr_t *hdr = (ptl_hdr_t *) page_address (krx->krx_pages[0]);
823 ptl_nid_t dest_nid = NTOH__u64 (hdr->dest_nid);
827 if (dest_nid == kqswnal_lib.ni.nid) { /* It's for me :) */
828 /* NB krx requeued when lib_parse() calls back kqswnal_recv */
829 lib_parse (&kqswnal_lib, hdr, krx);
834 CERROR ("checksums for forwarded packets not implemented\n");
837 if (kqswnal_nid2elanid (dest_nid) >= 0) /* should have gone direct to peer */
839 CERROR("dropping packet from "LPX64" for "LPX64
840 ": target is peer\n", NTOH__u64(hdr->src_nid), dest_nid);
841 kqswnal_requeue_rx (krx);
845 /* NB forwarding may destroy iov; rebuild every time */
846 for (nob = krx->krx_nob, niov = 0; nob > 0; nob -= PAGE_SIZE, niov++)
848 LASSERT (niov < krx->krx_npages);
849 krx->krx_iov[niov].iov_base= page_address(krx->krx_pages[niov]);
850 krx->krx_iov[niov].iov_len = MIN(PAGE_SIZE, nob);
853 kpr_fwd_init (&krx->krx_fwd, dest_nid,
854 krx->krx_nob, niov, krx->krx_iov,
855 kqswnal_fwd_callback, krx);
857 kpr_fwd_start (&kqswnal_data.kqn_router, &krx->krx_fwd);
860 /* Receive Interrupt Handler: posts to schedulers */
862 kqswnal_rxhandler(EP_RXD *rxd)
865 int nob = ep_rxd_len (rxd);
866 int status = ep_rxd_status (rxd);
867 kqswnal_rx_t *krx = (kqswnal_rx_t *)ep_rxd_arg (rxd);
869 CDEBUG(D_NET, "kqswnal_rxhandler: rxd %p, krx %p, nob %d, status %d\n",
870 rxd, krx, nob, status);
872 LASSERT (krx != NULL);
877 /* must receive a whole header to be able to parse */
878 if (status != EP_SUCCESS || nob < sizeof (ptl_hdr_t))
880 /* receives complete with failure when receiver is removed */
881 if (kqswnal_data.kqn_shuttingdown)
884 CERROR("receive status failed with status %d nob %d\n",
885 ep_rxd_status(rxd), nob);
886 kqswnal_requeue_rx (krx);
890 atomic_inc (&kqswnal_packets_received);
892 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
894 list_add_tail (&krx->krx_list, &kqswnal_data.kqn_readyrxds);
895 if (waitqueue_active (&kqswnal_data.kqn_sched_waitq))
896 wake_up (&kqswnal_data.kqn_sched_waitq);
898 spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
903 kqswnal_csum_error (kqswnal_rx_t *krx, int ishdr)
905 ptl_hdr_t *hdr = (ptl_hdr_t *)page_address (krx->krx_pages[0]);
907 CERROR ("%s checksum mismatch %p: dnid "LPX64", snid "LPX64
908 ", dpid %d, spid %d, type %d\n",
909 ishdr ? "Header" : "Payload", krx,
910 NTOH__u64(hdr->dest_nid), NTOH__u64(hdr->src_nid)
911 NTOH__u32(hdr->dest_pid), NTOH__u32(hdr->src_pid),
912 NTOH__u32(hdr->type));
914 switch (NTOH__u32 (hdr->type))
917 CERROR("ACK: mlen %d dmd "LPX64"."LPX64" match "LPX64
919 NTOH__u32(hdr->msg.ack.mlength),
920 hdr->msg.ack.dst_wmd.handle_cookie,
921 hdr->msg.ack.dst_wmd.handle_idx,
922 NTOH__u64(hdr->msg.ack.match_bits),
923 NTOH__u32(hdr->msg.ack.length));
926 CERROR("PUT: ptl %d amd "LPX64"."LPX64" match "LPX64
927 " len %u off %u data "LPX64"\n",
928 NTOH__u32(hdr->msg.put.ptl_index),
929 hdr->msg.put.ack_wmd.handle_cookie,
930 hdr->msg.put.ack_wmd.handle_idx,
931 NTOH__u64(hdr->msg.put.match_bits),
932 NTOH__u32(hdr->msg.put.length),
933 NTOH__u32(hdr->msg.put.offset),
934 hdr->msg.put.hdr_data);
937 CERROR ("GET: <>\n");
940 CERROR ("REPLY: <>\n");
943 CERROR ("TYPE?: <>\n");
949 kqswnal_recvmsg (nal_cb_t *nal,
958 kqswnal_rx_t *krx = (kqswnal_rx_t *)private;
966 kqsw_csum_t senders_csum;
967 kqsw_csum_t payload_csum = 0;
968 kqsw_csum_t hdr_csum = kqsw_csum(0, page_address(krx->krx_pages[0]),
970 size_t csum_len = mlen;
973 static atomic_t csum_counter;
974 int csum_verbose = (atomic_read(&csum_counter)%1000001) == 0;
976 atomic_inc (&csum_counter);
978 memcpy (&senders_csum, ((char *)page_address (krx->krx_pages[0])) +
979 sizeof (ptl_hdr_t), sizeof (kqsw_csum_t));
980 if (senders_csum != hdr_csum)
981 kqswnal_csum_error (krx, 1);
983 CDEBUG(D_NET,"kqswnal_recv, mlen="LPSZ", rlen="LPSZ"\n", mlen, rlen);
985 /* What was actually received must be >= payload.
986 * This is an LASSERT, as lib_finalize() doesn't have a completion status. */
987 LASSERT (krx->krx_nob >= KQSW_HDR_SIZE + mlen);
988 LASSERT (mlen <= rlen);
990 /* It must be OK to kmap() if required */
991 LASSERT (kiov == NULL || !in_interrupt ());
992 /* Either all pages or all vaddrs */
993 LASSERT (!(kiov != NULL && iov != NULL));
998 page_ptr = ((char *) page_address(krx->krx_pages[0])) +
1000 page_nob = PAGE_SIZE - KQSW_HDR_SIZE;
1004 iov_ptr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset;
1005 iov_nob = kiov->kiov_len;
1007 iov_ptr = iov->iov_base;
1008 iov_nob = iov->iov_len;
1013 /* We expect the iov to exactly match mlen */
1014 LASSERT (iov_nob <= mlen);
1016 frag = MIN (page_nob, iov_nob);
1017 memcpy (iov_ptr, page_ptr, frag);
1019 payload_csum = kqsw_csum (payload_csum, iov_ptr, frag);
1033 LASSERT (page < krx->krx_npages);
1034 page_ptr = page_address(krx->krx_pages[page]);
1035 page_nob = PAGE_SIZE;
1041 else if (kiov != NULL) {
1042 kunmap (kiov->kiov_page);
1046 iov_ptr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset;
1047 iov_nob = kiov->kiov_len;
1052 iov_ptr = iov->iov_base;
1053 iov_nob = iov->iov_len;
1058 kunmap (kiov->kiov_page);
1062 memcpy (&senders_csum, ((char *)page_address (krx->krx_pages[0])) +
1063 sizeof(ptl_hdr_t) + sizeof(kqsw_csum_t), sizeof(kqsw_csum_t));
1065 if (csum_len != rlen)
1066 CERROR("Unable to checksum data in user's buffer\n");
1067 else if (senders_csum != payload_csum)
1068 kqswnal_csum_error (krx, 0);
1071 CERROR("hdr csum %lx, payload_csum %lx, csum_frags %d, "
1073 hdr_csum, payload_csum, csum_frags, csum_nob);
1075 lib_finalize(nal, private, cookie);
1077 kqswnal_requeue_rx (krx);
1083 kqswnal_recv(nal_cb_t *nal,
1091 return (kqswnal_recvmsg (nal, private, cookie, niov, iov, NULL, mlen, rlen));
1095 kqswnal_recv_pages (nal_cb_t *nal,
1103 return (kqswnal_recvmsg (nal, private, cookie, niov, NULL, kiov, mlen, rlen));
1107 kqswnal_thread_start (int (*fn)(void *arg), void *arg)
1109 long pid = kernel_thread (fn, arg, 0);
1114 atomic_inc (&kqswnal_data.kqn_nthreads);
1119 kqswnal_thread_fini (void)
1121 atomic_dec (&kqswnal_data.kqn_nthreads);
1125 kqswnal_scheduler (void *arg)
1129 kpr_fwd_desc_t *fwd;
1135 kportal_daemonize ("kqswnal_sched");
1136 kportal_blockallsigs ();
1138 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
1140 while (!kqswnal_data.kqn_shuttingdown)
1142 did_something = FALSE;
1144 if (!list_empty (&kqswnal_data.kqn_readyrxds))
1146 krx = list_entry(kqswnal_data.kqn_readyrxds.next,
1147 kqswnal_rx_t, krx_list);
1148 list_del (&krx->krx_list);
1149 spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
1154 did_something = TRUE;
1155 spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
1158 if (!list_empty (&kqswnal_data.kqn_delayedtxds))
1160 ktx = list_entry(kqswnal_data.kqn_delayedtxds.next,
1161 kqswnal_tx_t, ktx_list);
1162 list_del_init (&ktx->ktx_delayed_list);
1163 spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
1166 rc = kqswnal_launch (ktx);
1167 if (rc != 0) /* failed: ktx_nid down? */
1169 CERROR("Failed delayed transmit to "LPX64
1170 ": %d\n", ktx->ktx_nid, rc);
1171 kqswnal_tx_done (ktx, rc);
1174 did_something = TRUE;
1175 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
1178 if (!list_empty (&kqswnal_data.kqn_delayedfwds))
1180 fwd = list_entry (kqswnal_data.kqn_delayedfwds.next, kpr_fwd_desc_t, kprfd_list);
1181 list_del (&fwd->kprfd_list);
1182 spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
1184 kqswnal_fwd_packet (NULL, fwd);
1186 did_something = TRUE;
1187 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
1190 /* nothing to do or hogging CPU */
1191 if (!did_something || counter++ == KQSW_RESCHED) {
1192 spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
1197 if (!did_something) {
1198 rc = wait_event_interruptible (kqswnal_data.kqn_sched_waitq,
1199 kqswnal_data.kqn_shuttingdown ||
1200 !list_empty(&kqswnal_data.kqn_readyrxds) ||
1201 !list_empty(&kqswnal_data.kqn_delayedtxds) ||
1202 !list_empty(&kqswnal_data.kqn_delayedfwds));
1204 } else if (current->need_resched)
1207 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
1211 spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
1213 kqswnal_thread_fini ();
1217 nal_cb_t kqswnal_lib =
1219 nal_data: &kqswnal_data, /* NAL private data */
1220 cb_send: kqswnal_send,
1221 cb_send_pages: kqswnal_send_pages,
1222 cb_recv: kqswnal_recv,
1223 cb_recv_pages: kqswnal_recv_pages,
1224 cb_read: kqswnal_read,
1225 cb_write: kqswnal_write,
1226 cb_malloc: kqswnal_malloc,
1227 cb_free: kqswnal_free,
1228 cb_printf: kqswnal_printf,
1229 cb_cli: kqswnal_cli,
1230 cb_sti: kqswnal_sti,
1231 cb_dist: kqswnal_dist