1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
5 * Author: Eric Barton <eric@bartonsoftware.com>
7 * Copyright (C) 2002, Lawrence Livermore National Labs (LLNL)
8 * W. Marcus Miller - Based on ksocknal
10 * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
12 * Portals is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Portals is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Portals; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 atomic_t kqswnal_packets_launched;
30 atomic_t kqswnal_packets_transmitted;
31 atomic_t kqswnal_packets_received;
35 * LIB functions follow
39 kqswnal_read(nal_cb_t *nal, void *private, void *dst_addr, user_ptr src_addr,
42 CDEBUG (D_NET, LPX64": reading "LPSZ" bytes from %p -> %p\n",
43 nal->ni.nid, len, src_addr, dst_addr );
44 memcpy( dst_addr, src_addr, len );
50 kqswnal_write(nal_cb_t *nal, void *private, user_ptr dst_addr, void *src_addr,
53 CDEBUG (D_NET, LPX64": writing "LPSZ" bytes from %p -> %p\n",
54 nal->ni.nid, len, src_addr, dst_addr );
55 memcpy( dst_addr, src_addr, len );
61 kqswnal_malloc(nal_cb_t *nal, size_t len)
65 PORTAL_ALLOC(buf, len);
70 kqswnal_free(nal_cb_t *nal, void *buf, size_t len)
72 PORTAL_FREE(buf, len);
76 kqswnal_printf (nal_cb_t * nal, const char *fmt, ...)
82 vsnprintf (msg, sizeof (msg), fmt, ap); /* sprint safely */
85 msg[sizeof (msg) - 1] = 0; /* ensure terminated */
87 CDEBUG (D_NET, "%s", msg);
92 kqswnal_cli(nal_cb_t *nal, unsigned long *flags)
94 kqswnal_data_t *data= nal->nal_data;
96 spin_lock_irqsave(&data->kqn_statelock, *flags);
101 kqswnal_sti(nal_cb_t *nal, unsigned long *flags)
103 kqswnal_data_t *data= nal->nal_data;
105 spin_unlock_irqrestore(&data->kqn_statelock, *flags);
110 kqswnal_dist(nal_cb_t *nal, ptl_nid_t nid, unsigned long *dist)
112 if (nid == nal->ni.nid)
113 *dist = 0; /* it's me */
114 else if (kqswnal_nid2elanid (nid) >= 0)
115 *dist = 1; /* it's my peer */
117 *dist = 2; /* via router */
122 kqswnal_notify_peer_down(kqswnal_tx_t *ktx)
127 do_gettimeofday (&now);
128 then = now.tv_sec - (jiffies - ktx->ktx_launchtime)/HZ;
130 kpr_notify(&kqswnal_data.kqn_router, ktx->ktx_nid, 0, then);
134 kqswnal_unmap_tx (kqswnal_tx_t *ktx)
136 if (ktx->ktx_nmappedpages == 0)
139 CDEBUG (D_NET, "%p[%d] unloading pages %d for %d\n",
140 ktx, ktx->ktx_niov, ktx->ktx_basepage, ktx->ktx_nmappedpages);
142 LASSERT (ktx->ktx_nmappedpages <= ktx->ktx_npages);
143 LASSERT (ktx->ktx_basepage + ktx->ktx_nmappedpages <=
144 kqswnal_data.kqn_eptxdmahandle->NumDvmaPages);
146 elan3_dvma_unload(kqswnal_data.kqn_epdev->DmaState,
147 kqswnal_data.kqn_eptxdmahandle,
148 ktx->ktx_basepage, ktx->ktx_nmappedpages);
149 ktx->ktx_nmappedpages = 0;
153 kqswnal_map_tx_kiov (kqswnal_tx_t *ktx, int nob, int niov, ptl_kiov_t *kiov)
155 int nfrags = ktx->ktx_niov;
156 const int maxfrags = sizeof (ktx->ktx_iov)/sizeof (ktx->ktx_iov[0]);
157 int nmapped = ktx->ktx_nmappedpages;
158 int maxmapped = ktx->ktx_npages;
159 uint32_t basepage = ktx->ktx_basepage + nmapped;
162 LASSERT (nmapped <= maxmapped);
163 LASSERT (nfrags <= maxfrags);
168 int fraglen = kiov->kiov_len;
170 /* nob exactly spans the iovs */
171 LASSERT (fraglen <= nob);
172 /* each frag fits in a page */
173 LASSERT (kiov->kiov_offset + kiov->kiov_len <= PAGE_SIZE);
176 if (nmapped > maxmapped) {
177 CERROR("Can't map message in %d pages (max %d)\n",
182 if (nfrags == maxfrags) {
183 CERROR("Message too fragmented in Elan VM (max %d frags)\n",
188 /* XXX this is really crap, but we'll have to kmap until
189 * EKC has a page (rather than vaddr) mapping interface */
191 ptr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset;
194 "%p[%d] loading %p for %d, page %d, %d total\n",
195 ktx, nfrags, ptr, fraglen, basepage, nmapped);
197 elan3_dvma_kaddr_load (kqswnal_data.kqn_epdev->DmaState,
198 kqswnal_data.kqn_eptxdmahandle,
200 basepage, &ktx->ktx_iov[nfrags].Base);
202 kunmap (kiov->kiov_page);
204 /* keep in loop for failure case */
205 ktx->ktx_nmappedpages = nmapped;
207 if (nfrags > 0 && /* previous frag mapped */
208 ktx->ktx_iov[nfrags].Base == /* contiguous with this one */
209 (ktx->ktx_iov[nfrags-1].Base + ktx->ktx_iov[nfrags-1].Len))
210 /* just extend previous */
211 ktx->ktx_iov[nfrags - 1].Len += fraglen;
213 ktx->ktx_iov[nfrags].Len = fraglen;
214 nfrags++; /* new frag */
222 /* iov must not run out before end of data */
223 LASSERT (nob == 0 || niov > 0);
227 ktx->ktx_niov = nfrags;
228 CDEBUG (D_NET, "%p got %d frags over %d pages\n",
229 ktx, ktx->ktx_niov, ktx->ktx_nmappedpages);
235 kqswnal_map_tx_iov (kqswnal_tx_t *ktx, int nob, int niov, struct iovec *iov)
237 int nfrags = ktx->ktx_niov;
238 const int maxfrags = sizeof (ktx->ktx_iov)/sizeof (ktx->ktx_iov[0]);
239 int nmapped = ktx->ktx_nmappedpages;
240 int maxmapped = ktx->ktx_npages;
241 uint32_t basepage = ktx->ktx_basepage + nmapped;
243 LASSERT (nmapped <= maxmapped);
244 LASSERT (nfrags <= maxfrags);
249 int fraglen = iov->iov_len;
250 long npages = kqswnal_pages_spanned (iov->iov_base, fraglen);
252 /* nob exactly spans the iovs */
253 LASSERT (fraglen <= nob);
256 if (nmapped > maxmapped) {
257 CERROR("Can't map message in %d pages (max %d)\n",
262 if (nfrags == maxfrags) {
263 CERROR("Message too fragmented in Elan VM (max %d frags)\n",
269 "%p[%d] loading %p for %d, pages %d for %ld, %d total\n",
270 ktx, nfrags, iov->iov_base, fraglen, basepage, npages,
273 elan3_dvma_kaddr_load (kqswnal_data.kqn_epdev->DmaState,
274 kqswnal_data.kqn_eptxdmahandle,
275 iov->iov_base, fraglen,
276 basepage, &ktx->ktx_iov[nfrags].Base);
277 /* keep in loop for failure case */
278 ktx->ktx_nmappedpages = nmapped;
280 if (nfrags > 0 && /* previous frag mapped */
281 ktx->ktx_iov[nfrags].Base == /* contiguous with this one */
282 (ktx->ktx_iov[nfrags-1].Base + ktx->ktx_iov[nfrags-1].Len))
283 /* just extend previous */
284 ktx->ktx_iov[nfrags - 1].Len += fraglen;
286 ktx->ktx_iov[nfrags].Len = fraglen;
287 nfrags++; /* new frag */
295 /* iov must not run out before end of data */
296 LASSERT (nob == 0 || niov > 0);
300 ktx->ktx_niov = nfrags;
301 CDEBUG (D_NET, "%p got %d frags over %d pages\n",
302 ktx, ktx->ktx_niov, ktx->ktx_nmappedpages);
308 kqswnal_put_idle_tx (kqswnal_tx_t *ktx)
310 kpr_fwd_desc_t *fwd = NULL;
313 kqswnal_unmap_tx (ktx); /* release temporary mappings */
315 spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
317 list_del (&ktx->ktx_list); /* take off active list */
319 if (ktx->ktx_isnblk) {
320 /* reserved for non-blocking tx */
321 list_add (&ktx->ktx_list, &kqswnal_data.kqn_nblk_idletxds);
322 spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
326 list_add (&ktx->ktx_list, &kqswnal_data.kqn_idletxds);
328 /* anything blocking for a tx descriptor? */
329 if (!list_empty(&kqswnal_data.kqn_idletxd_fwdq)) /* forwarded packet? */
331 CDEBUG(D_NET,"wakeup fwd\n");
333 fwd = list_entry (kqswnal_data.kqn_idletxd_fwdq.next,
334 kpr_fwd_desc_t, kprfd_list);
335 list_del (&fwd->kprfd_list);
338 wake_up (&kqswnal_data.kqn_idletxd_waitq);
340 spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
345 /* schedule packet for forwarding again */
346 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
348 list_add_tail (&fwd->kprfd_list, &kqswnal_data.kqn_delayedfwds);
349 wake_up (&kqswnal_data.kqn_sched_waitq);
351 spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
355 kqswnal_get_idle_tx (kpr_fwd_desc_t *fwd, int may_block)
358 kqswnal_tx_t *ktx = NULL;
361 spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
363 /* "normal" descriptor is free */
364 if (!list_empty (&kqswnal_data.kqn_idletxds)) {
365 ktx = list_entry (kqswnal_data.kqn_idletxds.next,
366 kqswnal_tx_t, ktx_list);
370 /* "normal" descriptor pool is empty */
372 if (fwd != NULL) { /* forwarded packet => queue for idle txd */
373 CDEBUG (D_NET, "blocked fwd [%p]\n", fwd);
374 list_add_tail (&fwd->kprfd_list,
375 &kqswnal_data.kqn_idletxd_fwdq);
379 /* doing a local transmit */
381 if (list_empty (&kqswnal_data.kqn_nblk_idletxds)) {
382 CERROR ("intr tx desc pool exhausted\n");
386 ktx = list_entry (kqswnal_data.kqn_nblk_idletxds.next,
387 kqswnal_tx_t, ktx_list);
391 /* block for idle tx */
393 spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
395 CDEBUG (D_NET, "blocking for tx desc\n");
396 wait_event (kqswnal_data.kqn_idletxd_waitq,
397 !list_empty (&kqswnal_data.kqn_idletxds));
401 list_del (&ktx->ktx_list);
402 list_add (&ktx->ktx_list, &kqswnal_data.kqn_activetxds);
403 ktx->ktx_launcher = current->pid;
406 spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
408 /* Idle descs can't have any mapped (as opposed to pre-mapped) pages */
409 LASSERT (ktx == NULL || ktx->ktx_nmappedpages == 0);
414 kqswnal_tx_done (kqswnal_tx_t *ktx, int error)
416 if (ktx->ktx_forwarding) /* router asked me to forward this packet */
417 kpr_fwd_done (&kqswnal_data.kqn_router,
418 (kpr_fwd_desc_t *)ktx->ktx_args[0], error);
419 else /* packet sourced locally */
420 lib_finalize (&kqswnal_lib, ktx->ktx_args[0],
421 (lib_msg_t *)ktx->ktx_args[1]);
423 kqswnal_put_idle_tx (ktx);
427 kqswnal_txhandler(EP_TXD *txd, void *arg, int status)
429 kqswnal_tx_t *ktx = (kqswnal_tx_t *)arg;
431 LASSERT (txd != NULL);
432 LASSERT (ktx != NULL);
434 CDEBUG(D_NET, "txd %p, arg %p status %d\n", txd, arg, status);
436 if (status == EP_SUCCESS)
437 atomic_inc (&kqswnal_packets_transmitted);
439 if (status != EP_SUCCESS)
441 CERROR ("Tx completion to "LPX64" failed: %d\n",
442 ktx->ktx_nid, status);
444 kqswnal_notify_peer_down(ktx);
448 kqswnal_tx_done (ktx, status);
452 kqswnal_launch (kqswnal_tx_t *ktx)
454 /* Don't block for transmit descriptor if we're in interrupt context */
455 int attr = in_interrupt() ? (EP_NO_SLEEP | EP_NO_ALLOC) : 0;
456 int dest = kqswnal_nid2elanid (ktx->ktx_nid);
460 ktx->ktx_launchtime = jiffies;
462 LASSERT (dest >= 0); /* must be a peer */
463 rc = ep_transmit_large(kqswnal_data.kqn_eptx, dest,
464 ktx->ktx_port, attr, kqswnal_txhandler,
465 ktx, ktx->ktx_iov, ktx->ktx_niov);
467 case 0: /* success */
468 atomic_inc (&kqswnal_packets_launched);
471 case ENOMEM: /* can't allocate ep txd => queue for later */
472 LASSERT (in_interrupt());
474 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
476 list_add_tail (&ktx->ktx_delayed_list, &kqswnal_data.kqn_delayedtxds);
477 wake_up (&kqswnal_data.kqn_sched_waitq);
479 spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
482 default: /* fatal error */
483 CERROR ("Tx to "LPX64" failed: %d\n", ktx->ktx_nid, rc);
484 kqswnal_notify_peer_down(ktx);
490 hdr_type_string (ptl_hdr_t *hdr)
502 return ("<UNKNOWN>");
507 kqswnal_cerror_hdr(ptl_hdr_t * hdr)
509 char *type_str = hdr_type_string (hdr);
511 CERROR("P3 Header at %p of type %s\n", hdr, type_str);
512 CERROR(" From nid/pid "LPU64"/%u", NTOH__u64(hdr->src_nid),
513 NTOH__u32(hdr->src_pid));
514 CERROR(" To nid/pid "LPU64"/%u\n", NTOH__u64(hdr->dest_nid),
515 NTOH__u32(hdr->dest_pid));
517 switch (NTOH__u32(hdr->type)) {
519 CERROR(" Ptl index %d, ack md "LPX64"."LPX64", "
520 "match bits "LPX64"\n",
521 NTOH__u32 (hdr->msg.put.ptl_index),
522 hdr->msg.put.ack_wmd.wh_interface_cookie,
523 hdr->msg.put.ack_wmd.wh_object_cookie,
524 NTOH__u64 (hdr->msg.put.match_bits));
525 CERROR(" Length %d, offset %d, hdr data "LPX64"\n",
526 NTOH__u32(PTL_HDR_LENGTH(hdr)),
527 NTOH__u32(hdr->msg.put.offset),
528 hdr->msg.put.hdr_data);
532 CERROR(" Ptl index %d, return md "LPX64"."LPX64", "
533 "match bits "LPX64"\n",
534 NTOH__u32 (hdr->msg.get.ptl_index),
535 hdr->msg.get.return_wmd.wh_interface_cookie,
536 hdr->msg.get.return_wmd.wh_object_cookie,
537 hdr->msg.get.match_bits);
538 CERROR(" Length %d, src offset %d\n",
539 NTOH__u32 (hdr->msg.get.sink_length),
540 NTOH__u32 (hdr->msg.get.src_offset));
544 CERROR(" dst md "LPX64"."LPX64", manipulated length %d\n",
545 hdr->msg.ack.dst_wmd.wh_interface_cookie,
546 hdr->msg.ack.dst_wmd.wh_object_cookie,
547 NTOH__u32 (hdr->msg.ack.mlength));
551 CERROR(" dst md "LPX64"."LPX64", length %d\n",
552 hdr->msg.reply.dst_wmd.wh_interface_cookie,
553 hdr->msg.reply.dst_wmd.wh_object_cookie,
554 NTOH__u32 (PTL_HDR_LENGTH(hdr)));
557 } /* end of print_hdr() */
560 kqswnal_sendmsg (nal_cb_t *nal,
567 unsigned int payload_niov,
568 struct iovec *payload_iov,
569 ptl_kiov_t *payload_kiov,
574 ptl_nid_t gatewaynid;
581 CDEBUG(D_NET, "sending "LPSZ" bytes in %d frags to nid: "LPX64
582 " pid %u\n", payload_nob, payload_niov, nid, pid);
584 LASSERT (payload_nob == 0 || payload_niov > 0);
585 LASSERT (payload_niov <= PTL_MD_MAX_IOV);
587 /* It must be OK to kmap() if required */
588 LASSERT (payload_kiov == NULL || !in_interrupt ());
589 /* payload is either all vaddrs or all pages */
590 LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
592 if (payload_nob > KQSW_MAXPAYLOAD) {
593 CERROR ("request exceeds MTU size "LPSZ" (max %u).\n",
594 payload_nob, KQSW_MAXPAYLOAD);
598 if (kqswnal_nid2elanid (nid) < 0) { /* Can't send direct: find gateway? */
599 rc = kpr_lookup (&kqswnal_data.kqn_router, nid,
600 sizeof (ptl_hdr_t) + payload_nob, &gatewaynid);
602 CERROR("Can't route to "LPX64": router error %d\n",
606 if (kqswnal_nid2elanid (gatewaynid) < 0) {
607 CERROR("Bad gateway "LPX64" for "LPX64"\n",
614 /* I may not block for a transmit descriptor if I might block the
615 * receiver, or an interrupt handler. */
616 ktx = kqswnal_get_idle_tx(NULL, !(type == PTL_MSG_ACK ||
617 type == PTL_MSG_REPLY ||
620 kqswnal_cerror_hdr (hdr);
621 return (PTL_NOSPACE);
624 memcpy (ktx->ktx_buffer, hdr, sizeof (*hdr)); /* copy hdr from caller's stack */
625 ktx->ktx_wire_hdr = (ptl_hdr_t *)ktx->ktx_buffer;
628 csum = kqsw_csum (0, (char *)hdr, sizeof (*hdr));
629 memcpy (ktx->ktx_buffer + sizeof (*hdr), &csum, sizeof (csum));
630 for (csum = 0, i = 0, sumnob = payload_nob; sumnob > 0; i++) {
631 if (payload_kiov != NULL) {
632 ptl_kiov_t *kiov = &payload_kiov[i];
633 char *addr = ((char *)kmap (kiov->kiov_page)) +
636 csum = kqsw_csum (csum, addr, MIN (sumnob, kiov->kiov_len));
637 sumnob -= kiov->kiov_len;
639 struct iovec *iov = &payload_iov[i];
641 csum = kqsw_csum (csum, iov->iov_base, MIN (sumnob, kiov->iov_len));
642 sumnob -= iov->iov_len;
645 memcpy(ktx->ktx_buffer +sizeof(*hdr) +sizeof(csum), &csum,sizeof(csum));
648 /* Set up first frag from pre-mapped buffer (it's at least the
650 ktx->ktx_iov[0].Base = ktx->ktx_ebuffer;
651 ktx->ktx_iov[0].Len = KQSW_HDR_SIZE;
654 if (payload_nob > 0) { /* got some payload (something more to do) */
655 /* make a single contiguous message? */
656 if (payload_nob <= KQSW_TX_MAXCONTIG) {
657 /* copy payload to ktx_buffer, immediately after hdr */
658 if (payload_kiov != NULL)
659 lib_copy_kiov2buf (ktx->ktx_buffer + KQSW_HDR_SIZE,
660 payload_niov, payload_kiov, payload_nob);
662 lib_copy_iov2buf (ktx->ktx_buffer + KQSW_HDR_SIZE,
663 payload_niov, payload_iov, payload_nob);
664 /* first frag includes payload */
665 ktx->ktx_iov[0].Len += payload_nob;
667 if (payload_kiov != NULL)
668 rc = kqswnal_map_tx_kiov (ktx, payload_nob,
669 payload_niov, payload_kiov);
671 rc = kqswnal_map_tx_iov (ktx, payload_nob,
672 payload_niov, payload_iov);
674 kqswnal_put_idle_tx (ktx);
680 ktx->ktx_port = (payload_nob <= KQSW_SMALLPAYLOAD) ?
681 EP_SVC_LARGE_PORTALS_SMALL : EP_SVC_LARGE_PORTALS_LARGE;
683 ktx->ktx_forwarding = 0; /* => lib_finalize() on completion */
684 ktx->ktx_args[0] = private;
685 ktx->ktx_args[1] = cookie;
687 rc = kqswnal_launch (ktx);
688 if (rc != 0) { /* failed? */
689 CERROR ("Failed to send packet to "LPX64": %d\n", nid, rc);
690 kqswnal_put_idle_tx (ktx);
694 CDEBUG(D_NET, "sent "LPSZ" bytes to "LPX64"\n", payload_nob, nid);
699 kqswnal_send (nal_cb_t *nal,
706 unsigned int payload_niov,
707 struct iovec *payload_iov,
710 return (kqswnal_sendmsg (nal, private, cookie, hdr, type, nid, pid,
711 payload_niov, payload_iov, NULL, payload_nob));
715 kqswnal_send_pages (nal_cb_t *nal,
722 unsigned int payload_niov,
723 ptl_kiov_t *payload_kiov,
726 return (kqswnal_sendmsg (nal, private, cookie, hdr, type, nid, pid,
727 payload_niov, NULL, payload_kiov, payload_nob));
730 int kqswnal_fwd_copy_contig = 0;
733 kqswnal_fwd_packet (void *arg, kpr_fwd_desc_t *fwd)
737 struct iovec *iov = fwd->kprfd_iov;
738 int niov = fwd->kprfd_niov;
739 int nob = fwd->kprfd_nob;
740 ptl_nid_t nid = fwd->kprfd_gateway_nid;
743 CERROR ("checksums for forwarded packets not implemented\n");
746 /* The router wants this NAL to forward a packet */
747 CDEBUG (D_NET, "forwarding [%p] to "LPX64", %d frags %d bytes\n",
748 fwd, nid, niov, nob);
752 ktx = kqswnal_get_idle_tx (fwd, FALSE);
753 if (ktx == NULL) /* can't get txd right now */
754 return; /* fwd will be scheduled when tx desc freed */
756 if (nid == kqswnal_lib.ni.nid) /* gateway is me */
757 nid = fwd->kprfd_target_nid; /* target is final dest */
759 if (kqswnal_nid2elanid (nid) < 0) {
760 CERROR("Can't forward [%p] to "LPX64": not a peer\n", fwd, nid);
765 if (nob > KQSW_NRXMSGBYTES_LARGE) {
766 CERROR ("Can't forward [%p] to "LPX64
767 ": size %d bigger than max packet size %ld\n",
768 fwd, nid, nob, (long)KQSW_NRXMSGBYTES_LARGE);
773 if ((kqswnal_fwd_copy_contig || niov > 1) &&
774 nob <= KQSW_TX_BUFFER_SIZE)
776 /* send from ktx's pre-allocated/mapped contiguous buffer? */
777 lib_copy_iov2buf (ktx->ktx_buffer, niov, iov, nob);
778 ktx->ktx_iov[0].Base = ktx->ktx_ebuffer; /* already mapped */
779 ktx->ktx_iov[0].Len = nob;
782 ktx->ktx_wire_hdr = (ptl_hdr_t *)ktx->ktx_buffer;
787 ktx->ktx_niov = 0; /* no frags mapped yet */
788 rc = kqswnal_map_tx_iov (ktx, nob, niov, iov);
792 ktx->ktx_wire_hdr = (ptl_hdr_t *)iov[0].iov_base;
795 ktx->ktx_port = (nob <= (sizeof (ptl_hdr_t) + KQSW_SMALLPAYLOAD)) ?
796 EP_SVC_LARGE_PORTALS_SMALL : EP_SVC_LARGE_PORTALS_LARGE;
798 ktx->ktx_forwarding = 1;
799 ktx->ktx_args[0] = fwd;
801 rc = kqswnal_launch (ktx);
807 CERROR ("Failed to forward [%p] to "LPX64": %d\n", fwd, nid, rc);
809 kqswnal_put_idle_tx (ktx);
810 /* complete now (with failure) */
811 kpr_fwd_done (&kqswnal_data.kqn_router, fwd, rc);
815 kqswnal_fwd_callback (void *arg, int error)
817 kqswnal_rx_t *krx = (kqswnal_rx_t *)arg;
819 /* The router has finished forwarding this packet */
823 ptl_hdr_t *hdr = (ptl_hdr_t *)page_address (krx->krx_pages[0]);
825 CERROR("Failed to route packet from "LPX64" to "LPX64": %d\n",
826 NTOH__u64(hdr->src_nid), NTOH__u64(hdr->dest_nid),error);
829 kqswnal_requeue_rx (krx);
833 kqswnal_rx (kqswnal_rx_t *krx)
835 ptl_hdr_t *hdr = (ptl_hdr_t *) page_address (krx->krx_pages[0]);
836 ptl_nid_t dest_nid = NTOH__u64 (hdr->dest_nid);
840 if (dest_nid == kqswnal_lib.ni.nid) { /* It's for me :) */
841 /* NB krx requeued when lib_parse() calls back kqswnal_recv */
842 lib_parse (&kqswnal_lib, hdr, krx);
847 CERROR ("checksums for forwarded packets not implemented\n");
850 if (kqswnal_nid2elanid (dest_nid) >= 0) /* should have gone direct to peer */
852 CERROR("dropping packet from "LPX64" for "LPX64
853 ": target is peer\n", NTOH__u64(hdr->src_nid), dest_nid);
854 kqswnal_requeue_rx (krx);
858 /* NB forwarding may destroy iov; rebuild every time */
859 for (nob = krx->krx_nob, niov = 0; nob > 0; nob -= PAGE_SIZE, niov++)
861 LASSERT (niov < krx->krx_npages);
862 krx->krx_iov[niov].iov_base= page_address(krx->krx_pages[niov]);
863 krx->krx_iov[niov].iov_len = MIN(PAGE_SIZE, nob);
866 kpr_fwd_init (&krx->krx_fwd, dest_nid,
867 krx->krx_nob, niov, krx->krx_iov,
868 kqswnal_fwd_callback, krx);
870 kpr_fwd_start (&kqswnal_data.kqn_router, &krx->krx_fwd);
873 /* Receive Interrupt Handler: posts to schedulers */
875 kqswnal_rxhandler(EP_RXD *rxd)
878 int nob = ep_rxd_len (rxd);
879 int status = ep_rxd_status (rxd);
880 kqswnal_rx_t *krx = (kqswnal_rx_t *)ep_rxd_arg (rxd);
882 CDEBUG(D_NET, "kqswnal_rxhandler: rxd %p, krx %p, nob %d, status %d\n",
883 rxd, krx, nob, status);
885 LASSERT (krx != NULL);
890 /* must receive a whole header to be able to parse */
891 if (status != EP_SUCCESS || nob < sizeof (ptl_hdr_t))
893 /* receives complete with failure when receiver is removed */
894 if (kqswnal_data.kqn_shuttingdown)
897 CERROR("receive status failed with status %d nob %d\n",
898 ep_rxd_status(rxd), nob);
899 kqswnal_requeue_rx (krx);
903 atomic_inc (&kqswnal_packets_received);
905 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
907 list_add_tail (&krx->krx_list, &kqswnal_data.kqn_readyrxds);
908 wake_up (&kqswnal_data.kqn_sched_waitq);
910 spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
915 kqswnal_csum_error (kqswnal_rx_t *krx, int ishdr)
917 ptl_hdr_t *hdr = (ptl_hdr_t *)page_address (krx->krx_pages[0]);
919 CERROR ("%s checksum mismatch %p: dnid "LPX64", snid "LPX64
920 ", dpid %d, spid %d, type %d\n",
921 ishdr ? "Header" : "Payload", krx,
922 NTOH__u64(hdr->dest_nid), NTOH__u64(hdr->src_nid)
923 NTOH__u32(hdr->dest_pid), NTOH__u32(hdr->src_pid),
924 NTOH__u32(hdr->type));
926 switch (NTOH__u32 (hdr->type))
929 CERROR("ACK: mlen %d dmd "LPX64"."LPX64" match "LPX64
931 NTOH__u32(hdr->msg.ack.mlength),
932 hdr->msg.ack.dst_wmd.handle_cookie,
933 hdr->msg.ack.dst_wmd.handle_idx,
934 NTOH__u64(hdr->msg.ack.match_bits),
935 NTOH__u32(hdr->msg.ack.length));
938 CERROR("PUT: ptl %d amd "LPX64"."LPX64" match "LPX64
939 " len %u off %u data "LPX64"\n",
940 NTOH__u32(hdr->msg.put.ptl_index),
941 hdr->msg.put.ack_wmd.handle_cookie,
942 hdr->msg.put.ack_wmd.handle_idx,
943 NTOH__u64(hdr->msg.put.match_bits),
944 NTOH__u32(hdr->msg.put.length),
945 NTOH__u32(hdr->msg.put.offset),
946 hdr->msg.put.hdr_data);
949 CERROR ("GET: <>\n");
952 CERROR ("REPLY: <>\n");
955 CERROR ("TYPE?: <>\n");
961 kqswnal_recvmsg (nal_cb_t *nal,
970 kqswnal_rx_t *krx = (kqswnal_rx_t *)private;
978 kqsw_csum_t senders_csum;
979 kqsw_csum_t payload_csum = 0;
980 kqsw_csum_t hdr_csum = kqsw_csum(0, page_address(krx->krx_pages[0]),
982 size_t csum_len = mlen;
985 static atomic_t csum_counter;
986 int csum_verbose = (atomic_read(&csum_counter)%1000001) == 0;
988 atomic_inc (&csum_counter);
990 memcpy (&senders_csum, ((char *)page_address (krx->krx_pages[0])) +
991 sizeof (ptl_hdr_t), sizeof (kqsw_csum_t));
992 if (senders_csum != hdr_csum)
993 kqswnal_csum_error (krx, 1);
995 CDEBUG(D_NET,"kqswnal_recv, mlen="LPSZ", rlen="LPSZ"\n", mlen, rlen);
997 /* What was actually received must be >= payload.
998 * This is an LASSERT, as lib_finalize() doesn't have a completion status. */
999 LASSERT (krx->krx_nob >= KQSW_HDR_SIZE + mlen);
1000 LASSERT (mlen <= rlen);
1002 /* It must be OK to kmap() if required */
1003 LASSERT (kiov == NULL || !in_interrupt ());
1004 /* Either all pages or all vaddrs */
1005 LASSERT (!(kiov != NULL && iov != NULL));
1010 page_ptr = ((char *) page_address(krx->krx_pages[0])) +
1012 page_nob = PAGE_SIZE - KQSW_HDR_SIZE;
1016 iov_ptr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset;
1017 iov_nob = kiov->kiov_len;
1019 iov_ptr = iov->iov_base;
1020 iov_nob = iov->iov_len;
1025 /* We expect the iov to exactly match mlen */
1026 LASSERT (iov_nob <= mlen);
1028 frag = MIN (page_nob, iov_nob);
1029 memcpy (iov_ptr, page_ptr, frag);
1031 payload_csum = kqsw_csum (payload_csum, iov_ptr, frag);
1045 LASSERT (page < krx->krx_npages);
1046 page_ptr = page_address(krx->krx_pages[page]);
1047 page_nob = PAGE_SIZE;
1053 else if (kiov != NULL) {
1054 kunmap (kiov->kiov_page);
1058 iov_ptr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset;
1059 iov_nob = kiov->kiov_len;
1064 iov_ptr = iov->iov_base;
1065 iov_nob = iov->iov_len;
1070 kunmap (kiov->kiov_page);
1074 memcpy (&senders_csum, ((char *)page_address (krx->krx_pages[0])) +
1075 sizeof(ptl_hdr_t) + sizeof(kqsw_csum_t), sizeof(kqsw_csum_t));
1077 if (csum_len != rlen)
1078 CERROR("Unable to checksum data in user's buffer\n");
1079 else if (senders_csum != payload_csum)
1080 kqswnal_csum_error (krx, 0);
1083 CERROR("hdr csum %lx, payload_csum %lx, csum_frags %d, "
1085 hdr_csum, payload_csum, csum_frags, csum_nob);
1087 lib_finalize(nal, private, cookie);
1089 kqswnal_requeue_rx (krx);
1095 kqswnal_recv(nal_cb_t *nal,
1103 return (kqswnal_recvmsg (nal, private, cookie, niov, iov, NULL, mlen, rlen));
1107 kqswnal_recv_pages (nal_cb_t *nal,
1115 return (kqswnal_recvmsg (nal, private, cookie, niov, NULL, kiov, mlen, rlen));
1119 kqswnal_thread_start (int (*fn)(void *arg), void *arg)
1121 long pid = kernel_thread (fn, arg, 0);
1126 atomic_inc (&kqswnal_data.kqn_nthreads);
1131 kqswnal_thread_fini (void)
1133 atomic_dec (&kqswnal_data.kqn_nthreads);
1137 kqswnal_scheduler (void *arg)
1141 kpr_fwd_desc_t *fwd;
1147 kportal_daemonize ("kqswnal_sched");
1148 kportal_blockallsigs ();
1150 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
1152 while (!kqswnal_data.kqn_shuttingdown)
1154 did_something = FALSE;
1156 if (!list_empty (&kqswnal_data.kqn_readyrxds))
1158 krx = list_entry(kqswnal_data.kqn_readyrxds.next,
1159 kqswnal_rx_t, krx_list);
1160 list_del (&krx->krx_list);
1161 spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
1166 did_something = TRUE;
1167 spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
1170 if (!list_empty (&kqswnal_data.kqn_delayedtxds))
1172 ktx = list_entry(kqswnal_data.kqn_delayedtxds.next,
1173 kqswnal_tx_t, ktx_list);
1174 list_del_init (&ktx->ktx_delayed_list);
1175 spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
1178 rc = kqswnal_launch (ktx);
1179 if (rc != 0) /* failed: ktx_nid down? */
1181 CERROR("Failed delayed transmit to "LPX64
1182 ": %d\n", ktx->ktx_nid, rc);
1183 kqswnal_tx_done (ktx, rc);
1186 did_something = TRUE;
1187 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
1190 if (!list_empty (&kqswnal_data.kqn_delayedfwds))
1192 fwd = list_entry (kqswnal_data.kqn_delayedfwds.next, kpr_fwd_desc_t, kprfd_list);
1193 list_del (&fwd->kprfd_list);
1194 spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
1196 kqswnal_fwd_packet (NULL, fwd);
1198 did_something = TRUE;
1199 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
1202 /* nothing to do or hogging CPU */
1203 if (!did_something || counter++ == KQSW_RESCHED) {
1204 spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
1209 if (!did_something) {
1210 rc = wait_event_interruptible (kqswnal_data.kqn_sched_waitq,
1211 kqswnal_data.kqn_shuttingdown ||
1212 !list_empty(&kqswnal_data.kqn_readyrxds) ||
1213 !list_empty(&kqswnal_data.kqn_delayedtxds) ||
1214 !list_empty(&kqswnal_data.kqn_delayedfwds));
1216 } else if (current->need_resched)
1219 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
1223 spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
1225 kqswnal_thread_fini ();
1229 nal_cb_t kqswnal_lib =
1231 nal_data: &kqswnal_data, /* NAL private data */
1232 cb_send: kqswnal_send,
1233 cb_send_pages: kqswnal_send_pages,
1234 cb_recv: kqswnal_recv,
1235 cb_recv_pages: kqswnal_recv_pages,
1236 cb_read: kqswnal_read,
1237 cb_write: kqswnal_write,
1238 cb_malloc: kqswnal_malloc,
1239 cb_free: kqswnal_free,
1240 cb_printf: kqswnal_printf,
1241 cb_cli: kqswnal_cli,
1242 cb_sti: kqswnal_sti,
1243 cb_dist: kqswnal_dist