1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
5 * Author: Eric Barton <eric@bartonsoftware.com>
7 * Copyright (C) 2002, Lawrence Livermore National Labs (LLNL)
8 * W. Marcus Miller - Based on ksocknal
10 * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
12 * Portals is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Portals is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Portals; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 * LIB functions follow
34 kqswnal_read(nal_cb_t *nal, void *private, void *dst_addr, user_ptr src_addr,
37 CDEBUG (D_NET, LPX64": reading "LPSZ" bytes from %p -> %p\n",
38 nal->ni.nid, len, src_addr, dst_addr );
39 memcpy( dst_addr, src_addr, len );
45 kqswnal_write(nal_cb_t *nal, void *private, user_ptr dst_addr, void *src_addr,
48 CDEBUG (D_NET, LPX64": writing "LPSZ" bytes from %p -> %p\n",
49 nal->ni.nid, len, src_addr, dst_addr );
50 memcpy( dst_addr, src_addr, len );
56 kqswnal_malloc(nal_cb_t *nal, size_t len)
60 PORTAL_ALLOC(buf, len);
65 kqswnal_free(nal_cb_t *nal, void *buf, size_t len)
67 PORTAL_FREE(buf, len);
71 kqswnal_printf (nal_cb_t * nal, const char *fmt, ...)
77 vsnprintf (msg, sizeof (msg), fmt, ap); /* sprint safely */
80 msg[sizeof (msg) - 1] = 0; /* ensure terminated */
82 CDEBUG (D_NET, "%s", msg);
87 kqswnal_cli(nal_cb_t *nal, unsigned long *flags)
89 kqswnal_data_t *data= nal->nal_data;
91 spin_lock_irqsave(&data->kqn_statelock, *flags);
96 kqswnal_sti(nal_cb_t *nal, unsigned long *flags)
98 kqswnal_data_t *data= nal->nal_data;
100 spin_unlock_irqrestore(&data->kqn_statelock, *flags);
105 kqswnal_dist(nal_cb_t *nal, ptl_nid_t nid, unsigned long *dist)
107 if (nid == nal->ni.nid)
108 *dist = 0; /* it's me */
109 else if (kqswnal_nid2elanid (nid) >= 0)
110 *dist = 1; /* it's my peer */
112 *dist = 2; /* via router */
117 kqswnal_notify_peer_down(kqswnal_tx_t *ktx)
122 do_gettimeofday (&now);
123 then = now.tv_sec - (jiffies - ktx->ktx_launchtime)/HZ;
125 kpr_notify(&kqswnal_data.kqn_router, ktx->ktx_nid, 0, then);
129 kqswnal_unmap_tx (kqswnal_tx_t *ktx)
131 if (ktx->ktx_nmappedpages == 0)
134 CDEBUG (D_NET, "%p[%d] unloading pages %d for %d\n",
135 ktx, ktx->ktx_nfrag, ktx->ktx_basepage, ktx->ktx_nmappedpages);
137 LASSERT (ktx->ktx_nmappedpages <= ktx->ktx_npages);
138 LASSERT (ktx->ktx_basepage + ktx->ktx_nmappedpages <=
139 kqswnal_data.kqn_eptxdmahandle->NumDvmaPages);
141 elan3_dvma_unload(kqswnal_data.kqn_epdev->DmaState,
142 kqswnal_data.kqn_eptxdmahandle,
143 ktx->ktx_basepage, ktx->ktx_nmappedpages);
144 ktx->ktx_nmappedpages = 0;
148 kqswnal_map_tx_kiov (kqswnal_tx_t *ktx, int nob, int niov, ptl_kiov_t *kiov)
150 int nfrags = ktx->ktx_nfrag;
151 int nmapped = ktx->ktx_nmappedpages;
152 int maxmapped = ktx->ktx_npages;
153 uint32_t basepage = ktx->ktx_basepage + nmapped;
156 LASSERT (nmapped <= maxmapped);
157 LASSERT (nfrags <= EP_MAXFRAG);
162 int fraglen = kiov->kiov_len;
164 /* nob exactly spans the iovs */
165 LASSERT (fraglen <= nob);
166 /* each frag fits in a page */
167 LASSERT (kiov->kiov_offset + kiov->kiov_len <= PAGE_SIZE);
170 if (nmapped > maxmapped) {
171 CERROR("Can't map message in %d pages (max %d)\n",
176 if (nfrags == EP_MAXFRAG) {
177 CERROR("Message too fragmented in Elan VM (max %d frags)\n",
182 /* XXX this is really crap, but we'll have to kmap until
183 * EKC has a page (rather than vaddr) mapping interface */
185 ptr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset;
188 "%p[%d] loading %p for %d, page %d, %d total\n",
189 ktx, nfrags, ptr, fraglen, basepage, nmapped);
191 elan3_dvma_kaddr_load (kqswnal_data.kqn_epdev->DmaState,
192 kqswnal_data.kqn_eptxdmahandle,
194 basepage, &ktx->ktx_frags.iov[nfrags].Base);
196 kunmap (kiov->kiov_page);
198 /* keep in loop for failure case */
199 ktx->ktx_nmappedpages = nmapped;
201 if (nfrags > 0 && /* previous frag mapped */
202 ktx->ktx_frags.iov[nfrags].Base == /* contiguous with this one */
203 (ktx->ktx_frags.iov[nfrags-1].Base + ktx->ktx_frags.iov[nfrags-1].Len))
204 /* just extend previous */
205 ktx->ktx_frags.iov[nfrags - 1].Len += fraglen;
207 ktx->ktx_frags.iov[nfrags].Len = fraglen;
208 nfrags++; /* new frag */
216 /* iov must not run out before end of data */
217 LASSERT (nob == 0 || niov > 0);
221 ktx->ktx_nfrag = nfrags;
222 CDEBUG (D_NET, "%p got %d frags over %d pages\n",
223 ktx, ktx->ktx_nfrag, ktx->ktx_nmappedpages);
229 kqswnal_map_tx_iov (kqswnal_tx_t *ktx, int nob, int niov, struct iovec *iov)
231 int nfrags = ktx->ktx_nfrag;
232 int nmapped = ktx->ktx_nmappedpages;
233 int maxmapped = ktx->ktx_npages;
234 uint32_t basepage = ktx->ktx_basepage + nmapped;
236 LASSERT (nmapped <= maxmapped);
237 LASSERT (nfrags <= EP_MAXFRAG);
242 int fraglen = iov->iov_len;
243 long npages = kqswnal_pages_spanned (iov->iov_base, fraglen);
245 /* nob exactly spans the iovs */
246 LASSERT (fraglen <= nob);
249 if (nmapped > maxmapped) {
250 CERROR("Can't map message in %d pages (max %d)\n",
255 if (nfrags == EP_MAXFRAG) {
256 CERROR("Message too fragmented in Elan VM (max %d frags)\n",
262 "%p[%d] loading %p for %d, pages %d for %ld, %d total\n",
263 ktx, nfrags, iov->iov_base, fraglen, basepage, npages,
266 elan3_dvma_kaddr_load (kqswnal_data.kqn_epdev->DmaState,
267 kqswnal_data.kqn_eptxdmahandle,
268 iov->iov_base, fraglen,
269 basepage, &ktx->ktx_frags.iov[nfrags].Base);
270 /* keep in loop for failure case */
271 ktx->ktx_nmappedpages = nmapped;
273 if (nfrags > 0 && /* previous frag mapped */
274 ktx->ktx_frags.iov[nfrags].Base == /* contiguous with this one */
275 (ktx->ktx_frags.iov[nfrags-1].Base + ktx->ktx_frags.iov[nfrags-1].Len))
276 /* just extend previous */
277 ktx->ktx_frags.iov[nfrags - 1].Len += fraglen;
279 ktx->ktx_frags.iov[nfrags].Len = fraglen;
280 nfrags++; /* new frag */
288 /* iov must not run out before end of data */
289 LASSERT (nob == 0 || niov > 0);
293 ktx->ktx_nfrag = nfrags;
294 CDEBUG (D_NET, "%p got %d frags over %d pages\n",
295 ktx, ktx->ktx_nfrag, ktx->ktx_nmappedpages);
302 kqswnal_put_idle_tx (kqswnal_tx_t *ktx)
304 kpr_fwd_desc_t *fwd = NULL;
307 kqswnal_unmap_tx (ktx); /* release temporary mappings */
308 ktx->ktx_state = KTX_IDLE;
310 spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
312 list_del (&ktx->ktx_list); /* take off active list */
314 if (ktx->ktx_isnblk) {
315 /* reserved for non-blocking tx */
316 list_add (&ktx->ktx_list, &kqswnal_data.kqn_nblk_idletxds);
317 spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
321 list_add (&ktx->ktx_list, &kqswnal_data.kqn_idletxds);
323 /* anything blocking for a tx descriptor? */
324 if (!list_empty(&kqswnal_data.kqn_idletxd_fwdq)) /* forwarded packet? */
326 CDEBUG(D_NET,"wakeup fwd\n");
328 fwd = list_entry (kqswnal_data.kqn_idletxd_fwdq.next,
329 kpr_fwd_desc_t, kprfd_list);
330 list_del (&fwd->kprfd_list);
333 wake_up (&kqswnal_data.kqn_idletxd_waitq);
335 spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
340 /* schedule packet for forwarding again */
341 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
343 list_add_tail (&fwd->kprfd_list, &kqswnal_data.kqn_delayedfwds);
344 wake_up (&kqswnal_data.kqn_sched_waitq);
346 spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
350 kqswnal_get_idle_tx (kpr_fwd_desc_t *fwd, int may_block)
353 kqswnal_tx_t *ktx = NULL;
356 spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
358 /* "normal" descriptor is free */
359 if (!list_empty (&kqswnal_data.kqn_idletxds)) {
360 ktx = list_entry (kqswnal_data.kqn_idletxds.next,
361 kqswnal_tx_t, ktx_list);
365 /* "normal" descriptor pool is empty */
367 if (fwd != NULL) { /* forwarded packet => queue for idle txd */
368 CDEBUG (D_NET, "blocked fwd [%p]\n", fwd);
369 list_add_tail (&fwd->kprfd_list,
370 &kqswnal_data.kqn_idletxd_fwdq);
374 /* doing a local transmit */
376 if (list_empty (&kqswnal_data.kqn_nblk_idletxds)) {
377 CERROR ("intr tx desc pool exhausted\n");
381 ktx = list_entry (kqswnal_data.kqn_nblk_idletxds.next,
382 kqswnal_tx_t, ktx_list);
386 /* block for idle tx */
388 spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
390 CDEBUG (D_NET, "blocking for tx desc\n");
391 wait_event (kqswnal_data.kqn_idletxd_waitq,
392 !list_empty (&kqswnal_data.kqn_idletxds));
396 list_del (&ktx->ktx_list);
397 list_add (&ktx->ktx_list, &kqswnal_data.kqn_activetxds);
398 ktx->ktx_launcher = current->pid;
401 spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
403 /* Idle descs can't have any mapped (as opposed to pre-mapped) pages */
404 LASSERT (ktx == NULL || ktx->ktx_nmappedpages == 0);
410 kqswnal_tx_done (kqswnal_tx_t *ktx, int error)
415 switch (ktx->ktx_state) {
416 case KTX_FORWARDING: /* router asked me to forward this packet */
417 kpr_fwd_done (&kqswnal_data.kqn_router,
418 (kpr_fwd_desc_t *)ktx->ktx_args[0], error);
421 case KTX_SENDING: /* packet sourced locally */
422 lib_finalize (&kqswnal_lib, ktx->ktx_args[0],
423 (lib_msg_t *)ktx->ktx_args[1]);
426 case KTX_GETTING: /* Peer has DMA-ed direct? */
427 LASSERT (KQSW_OPTIMIZE_GETS);
428 msg = (lib_msg_t *)ktx->ktx_args[1];
432 repmsg = lib_fake_reply_msg (&kqswnal_lib,
433 ktx->ktx_nid, msg->md);
435 lib_finalize (&kqswnal_lib, ktx->ktx_args[0], msg);
438 lib_finalize (&kqswnal_lib, NULL, repmsg);
445 kqswnal_put_idle_tx (ktx);
449 kqswnal_txhandler(EP_TXD *txd, void *arg, int status)
451 kqswnal_tx_t *ktx = (kqswnal_tx_t *)arg;
453 LASSERT (txd != NULL);
454 LASSERT (ktx != NULL);
456 CDEBUG(D_NET, "txd %p, arg %p status %d\n", txd, arg, status);
458 if (status != EP_SUCCESS)
460 CERROR ("Tx completion to "LPX64" failed: %d\n",
461 ktx->ktx_nid, status);
463 kqswnal_notify_peer_down(ktx);
466 } else if (ktx->ktx_state == KTX_GETTING) {
467 /* RPC completed OK; what did our peer put in the status
469 LASSERT (KQSW_OPTIMIZE_GETS);
470 status = ep_txd_statusblk(txd)->Status;
475 kqswnal_tx_done (ktx, status);
479 kqswnal_launch (kqswnal_tx_t *ktx)
481 /* Don't block for transmit descriptor if we're in interrupt context */
482 int attr = in_interrupt() ? (EP_NO_SLEEP | EP_NO_ALLOC) : 0;
483 int dest = kqswnal_nid2elanid (ktx->ktx_nid);
487 ktx->ktx_launchtime = jiffies;
489 LASSERT (dest >= 0); /* must be a peer */
490 if (ktx->ktx_state == KTX_GETTING) {
491 LASSERT (KQSW_OPTIMIZE_GETS);
492 rc = ep_transmit_rpc(kqswnal_data.kqn_eptx, dest,
493 ktx->ktx_port, attr, kqswnal_txhandler,
494 ktx, NULL, ktx->ktx_frags.iov, ktx->ktx_nfrag);
496 rc = ep_transmit_large(kqswnal_data.kqn_eptx, dest,
497 ktx->ktx_port, attr, kqswnal_txhandler,
498 ktx, ktx->ktx_frags.iov, ktx->ktx_nfrag);
502 case ESUCCESS: /* success */
505 case ENOMEM: /* can't allocate ep txd => queue for later */
506 LASSERT (in_interrupt());
508 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
510 list_add_tail (&ktx->ktx_delayed_list, &kqswnal_data.kqn_delayedtxds);
511 wake_up (&kqswnal_data.kqn_sched_waitq);
513 spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
516 default: /* fatal error */
517 CERROR ("Tx to "LPX64" failed: %d\n", ktx->ktx_nid, rc);
518 kqswnal_notify_peer_down(ktx);
524 hdr_type_string (ptl_hdr_t *hdr)
536 return ("<UNKNOWN>");
541 kqswnal_cerror_hdr(ptl_hdr_t * hdr)
543 char *type_str = hdr_type_string (hdr);
545 CERROR("P3 Header at %p of type %s length %d\n", hdr, type_str,
546 NTOH__u32(hdr->payload_length));
547 CERROR(" From nid/pid "LPU64"/%u\n", NTOH__u64(hdr->src_nid),
548 NTOH__u32(hdr->src_pid));
549 CERROR(" To nid/pid "LPU64"/%u\n", NTOH__u64(hdr->dest_nid),
550 NTOH__u32(hdr->dest_pid));
552 switch (NTOH__u32(hdr->type)) {
554 CERROR(" Ptl index %d, ack md "LPX64"."LPX64", "
555 "match bits "LPX64"\n",
556 NTOH__u32 (hdr->msg.put.ptl_index),
557 hdr->msg.put.ack_wmd.wh_interface_cookie,
558 hdr->msg.put.ack_wmd.wh_object_cookie,
559 NTOH__u64 (hdr->msg.put.match_bits));
560 CERROR(" offset %d, hdr data "LPX64"\n",
561 NTOH__u32(hdr->msg.put.offset),
562 hdr->msg.put.hdr_data);
566 CERROR(" Ptl index %d, return md "LPX64"."LPX64", "
567 "match bits "LPX64"\n",
568 NTOH__u32 (hdr->msg.get.ptl_index),
569 hdr->msg.get.return_wmd.wh_interface_cookie,
570 hdr->msg.get.return_wmd.wh_object_cookie,
571 hdr->msg.get.match_bits);
572 CERROR(" Length %d, src offset %d\n",
573 NTOH__u32 (hdr->msg.get.sink_length),
574 NTOH__u32 (hdr->msg.get.src_offset));
578 CERROR(" dst md "LPX64"."LPX64", manipulated length %d\n",
579 hdr->msg.ack.dst_wmd.wh_interface_cookie,
580 hdr->msg.ack.dst_wmd.wh_object_cookie,
581 NTOH__u32 (hdr->msg.ack.mlength));
585 CERROR(" dst md "LPX64"."LPX64"\n",
586 hdr->msg.reply.dst_wmd.wh_interface_cookie,
587 hdr->msg.reply.dst_wmd.wh_object_cookie);
590 } /* end of print_hdr() */
593 kqswnal_print_eiov (int how, char *str, int n, EP_IOVEC *iov)
597 CDEBUG (how, "%s: %d\n", str, n);
598 for (i = 0; i < n; i++) {
599 CDEBUG (how, " %08x for %d\n", iov[i].Base, iov[i].Len);
604 kqswnal_eiovs2datav (int ndv, EP_DATAVEC *dv,
605 int nsrc, EP_IOVEC *src,
606 int ndst, EP_IOVEC *dst)
615 for (count = 0; count < ndv; count++, dv++) {
617 if (nsrc == 0 || ndst == 0) {
619 /* For now I'll barf on any left over entries */
620 CERROR ("mismatched src and dst iovs\n");
626 nob = (src->Len < dst->Len) ? src->Len : dst->Len;
628 dv->Source = src->Base;
629 dv->Dest = dst->Base;
631 if (nob >= src->Len) {
639 if (nob >= dst->Len) {
648 CERROR ("DATAVEC too small\n");
653 kqswnal_dma_reply (kqswnal_tx_t *ktx, int nfrag,
654 struct iovec *iov, ptl_kiov_t *kiov, int nob)
656 kqswnal_rx_t *krx = (kqswnal_rx_t *)ktx->ktx_args[0];
657 char *buffer = (char *)page_address(krx->krx_pages[0]);
658 kqswnal_remotemd_t *rmd = (kqswnal_remotemd_t *)(buffer + KQSW_HDR_SIZE);
659 EP_IOVEC eiov[EP_MAXFRAG];
663 LASSERT (ep_rxd_isrpc(krx->krx_rxd) && !krx->krx_rpc_completed);
664 LASSERT ((iov == NULL) != (kiov == NULL));
666 /* see .*_pack_k?iov comment regarding endian-ness */
667 if (buffer + krx->krx_nob < (char *)(rmd + 1)) {
668 /* msg too small to discover rmd size */
669 CERROR ("Incoming message [%d] too small for RMD (%d needed)\n",
670 krx->krx_nob, (int)(((char *)(rmd + 1)) - buffer));
674 if (buffer + krx->krx_nob < (char *)&rmd->kqrmd_eiov[rmd->kqrmd_neiov]) {
675 /* rmd doesn't fit in the incoming message */
676 CERROR ("Incoming message [%d] too small for RMD[%d] (%d needed)\n",
677 krx->krx_nob, rmd->kqrmd_neiov,
678 (int)(((char *)&rmd->kqrmd_eiov[rmd->kqrmd_neiov]) - buffer));
682 /* Ghastly hack part 1, uses the existing procedures to map the source data... */
685 rc = kqswnal_map_tx_kiov (ktx, nob, nfrag, kiov);
687 rc = kqswnal_map_tx_iov (ktx, nob, nfrag, iov);
690 CERROR ("Can't map source data: %d\n", rc);
694 /* Ghastly hack part 2, copy out eiov so we can create the datav; Ugghh... */
695 memcpy (eiov, ktx->ktx_frags.iov, ktx->ktx_nfrag * sizeof (eiov[0]));
697 rc = kqswnal_eiovs2datav (EP_MAXFRAG, ktx->ktx_frags.datav,
698 ktx->ktx_nfrag, eiov,
699 rmd->kqrmd_neiov, rmd->kqrmd_eiov);
701 CERROR ("Can't create datavec: %d\n", rc);
706 memset (&blk, 0, sizeof (blk)); /* zero blk.Status */
708 /* Our caller will start to race with kqswnal_rpc_complete... */
709 LASSERT (atomic_read (&krx->krx_refcount) == 1);
710 atomic_set (&krx->krx_refcount, 2);
712 rc = ep_complete_rpc (krx->krx_rxd, kqswnal_reply_complete, ktx,
713 &blk, ktx->ktx_frags.datav, ktx->ktx_nfrag);
717 /* reset refcount back to 1: we're not going to be racing with
718 * kqswnal_rely_complete. */
719 atomic_set (&krx->krx_refcount, 1);
720 return (-ECONNABORTED);
724 kqswnal_sendmsg (nal_cb_t *nal,
731 unsigned int payload_niov,
732 struct iovec *payload_iov,
733 ptl_kiov_t *payload_kiov,
745 CDEBUG(D_NET, "sending "LPSZ" bytes in %d frags to nid: "LPX64
746 " pid %u\n", payload_nob, payload_niov, nid, pid);
748 LASSERT (payload_nob == 0 || payload_niov > 0);
749 LASSERT (payload_niov <= PTL_MD_MAX_IOV);
751 /* It must be OK to kmap() if required */
752 LASSERT (payload_kiov == NULL || !in_interrupt ());
753 /* payload is either all vaddrs or all pages */
754 LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
756 if (payload_nob > KQSW_MAXPAYLOAD) {
757 CERROR ("request exceeds MTU size "LPSZ" (max %u).\n",
758 payload_nob, KQSW_MAXPAYLOAD);
763 if (kqswnal_nid2elanid (nid) < 0) { /* Can't send direct: find gateway? */
764 rc = kpr_lookup (&kqswnal_data.kqn_router, nid,
765 sizeof (ptl_hdr_t) + payload_nob, &targetnid);
767 CERROR("Can't route to "LPX64": router error %d\n",
771 if (kqswnal_nid2elanid (targetnid) < 0) {
772 CERROR("Bad gateway "LPX64" for "LPX64"\n",
778 /* I may not block for a transmit descriptor if I might block the
779 * receiver, or an interrupt handler. */
780 ktx = kqswnal_get_idle_tx(NULL, !(type == PTL_MSG_ACK ||
781 type == PTL_MSG_REPLY ||
784 kqswnal_cerror_hdr (hdr);
785 return (PTL_NOSPACE);
788 ktx->ktx_args[0] = private;
789 ktx->ktx_args[1] = libmsg;
791 #if KQSW_OPTIMIZE_GETS
792 if (type == PTL_MSG_REPLY &&
793 ep_rxd_isrpc(((kqswnal_rx_t *)private)->krx_rxd)) {
794 if (nid != targetnid ||
795 kqswnal_nid2elanid(nid) !=
796 ep_rxd_node(((kqswnal_rx_t *)private)->krx_rxd)) {
797 CERROR("Optimized reply nid conflict: "
798 "nid "LPX64" via "LPX64" elanID %d\n",
800 ep_rxd_node(((kqswnal_rx_t *)private)->krx_rxd));
804 /* peer expects RPC completion with GET data */
805 rc = kqswnal_dma_reply (ktx,
806 payload_niov, payload_iov,
807 payload_kiov, payload_nob);
811 CERROR ("Can't DMA reply to "LPX64": %d\n", nid, rc);
812 kqswnal_put_idle_tx (ktx);
817 memcpy (ktx->ktx_buffer, hdr, sizeof (*hdr)); /* copy hdr from caller's stack */
818 ktx->ktx_wire_hdr = (ptl_hdr_t *)ktx->ktx_buffer;
821 csum = kqsw_csum (0, (char *)hdr, sizeof (*hdr));
822 memcpy (ktx->ktx_buffer + sizeof (*hdr), &csum, sizeof (csum));
823 for (csum = 0, i = 0, sumnob = payload_nob; sumnob > 0; i++) {
824 if (payload_kiov != NULL) {
825 ptl_kiov_t *kiov = &payload_kiov[i];
826 char *addr = ((char *)kmap (kiov->kiov_page)) +
829 csum = kqsw_csum (csum, addr, MIN (sumnob, kiov->kiov_len));
830 sumnob -= kiov->kiov_len;
832 struct iovec *iov = &payload_iov[i];
834 csum = kqsw_csum (csum, iov->iov_base, MIN (sumnob, kiov->iov_len));
835 sumnob -= iov->iov_len;
838 memcpy(ktx->ktx_buffer +sizeof(*hdr) +sizeof(csum), &csum,sizeof(csum));
841 /* Set up first frag from pre-mapped buffer (it's at least the
843 ktx->ktx_frags.iov[0].Base = ktx->ktx_ebuffer;
844 ktx->ktx_frags.iov[0].Len = KQSW_HDR_SIZE;
846 ktx->ktx_state = KTX_SENDING; /* => lib_finalize() on completion */
848 #if KQSW_OPTIMIZE_GETS
849 if (type == PTL_MSG_GET && /* doing a GET */
850 nid == targetnid) { /* not forwarding */
851 lib_md_t *md = libmsg->md;
852 kqswnal_remotemd_t *rmd = (kqswnal_remotemd_t *)(ktx->ktx_buffer + KQSW_HDR_SIZE);
854 /* Optimised path: I send over the Elan vaddrs of the get
855 * sink buffers, and my peer DMAs directly into them.
857 * First I set up ktx as if it was going to send this
858 * payload, (it needs to map it anyway). This fills
859 * ktx_frags.iov[1] and onward with the network addresses
860 * of the get sink frags. I copy these into ktx_buffer,
861 * immediately after the header, and send that as my GET
864 * Note that the addresses are sent in native endian-ness.
865 * When EKC copes with different endian nodes, I'll fix
866 * this (and eat my hat :) */
868 if ((libmsg->md->options & PTL_MD_KIOV) != 0)
869 rc = kqswnal_map_tx_kiov (ktx, md->length,
870 md->md_niov, md->md_iov.kiov);
872 rc = kqswnal_map_tx_iov (ktx, md->length,
873 md->md_niov, md->md_iov.iov);
876 kqswnal_put_idle_tx (ktx);
880 rmd->kqrmd_neiov = ktx->ktx_nfrag - 1;
881 memcpy (&rmd->kqrmd_eiov[0], &ktx->ktx_frags.iov[1],
882 rmd->kqrmd_neiov * sizeof (EP_IOVEC));
885 ktx->ktx_frags.iov[0].Len += offsetof (kqswnal_remotemd_t,
886 kqrmd_eiov[rmd->kqrmd_neiov]);
887 payload_nob = ktx->ktx_frags.iov[0].Len;
888 ktx->ktx_state = KTX_GETTING;
891 if (payload_nob > 0) { /* got some payload (something more to do) */
892 /* make a single contiguous message? */
893 if (payload_nob <= KQSW_TX_MAXCONTIG) {
894 /* copy payload to ktx_buffer, immediately after hdr */
895 if (payload_kiov != NULL)
896 lib_copy_kiov2buf (ktx->ktx_buffer + KQSW_HDR_SIZE,
897 payload_niov, payload_kiov, payload_nob);
899 lib_copy_iov2buf (ktx->ktx_buffer + KQSW_HDR_SIZE,
900 payload_niov, payload_iov, payload_nob);
901 /* first frag includes payload */
902 ktx->ktx_frags.iov[0].Len += payload_nob;
904 if (payload_kiov != NULL)
905 rc = kqswnal_map_tx_kiov (ktx, payload_nob,
906 payload_niov, payload_kiov);
908 rc = kqswnal_map_tx_iov (ktx, payload_nob,
909 payload_niov, payload_iov);
911 kqswnal_put_idle_tx (ktx);
917 ktx->ktx_nid = targetnid;
918 ktx->ktx_port = (payload_nob <= KQSW_SMALLPAYLOAD) ?
919 EP_SVC_LARGE_PORTALS_SMALL : EP_SVC_LARGE_PORTALS_LARGE;
921 rc = kqswnal_launch (ktx);
922 if (rc != 0) { /* failed? */
923 CERROR ("Failed to send packet to "LPX64": %d\n", targetnid, rc);
924 kqswnal_put_idle_tx (ktx);
928 CDEBUG(D_NET, "sent "LPSZ" bytes to "LPX64" via "LPX64"\n",
929 payload_nob, nid, targetnid);
934 kqswnal_send (nal_cb_t *nal,
941 unsigned int payload_niov,
942 struct iovec *payload_iov,
945 return (kqswnal_sendmsg (nal, private, libmsg, hdr, type, nid, pid,
946 payload_niov, payload_iov, NULL, payload_nob));
950 kqswnal_send_pages (nal_cb_t *nal,
957 unsigned int payload_niov,
958 ptl_kiov_t *payload_kiov,
961 return (kqswnal_sendmsg (nal, private, libmsg, hdr, type, nid, pid,
962 payload_niov, NULL, payload_kiov, payload_nob));
965 int kqswnal_fwd_copy_contig = 0;
968 kqswnal_fwd_packet (void *arg, kpr_fwd_desc_t *fwd)
972 struct iovec *iov = fwd->kprfd_iov;
973 int niov = fwd->kprfd_niov;
974 int nob = fwd->kprfd_nob;
975 ptl_nid_t nid = fwd->kprfd_gateway_nid;
978 CERROR ("checksums for forwarded packets not implemented\n");
981 /* The router wants this NAL to forward a packet */
982 CDEBUG (D_NET, "forwarding [%p] to "LPX64", %d frags %d bytes\n",
983 fwd, nid, niov, nob);
987 ktx = kqswnal_get_idle_tx (fwd, FALSE);
988 if (ktx == NULL) /* can't get txd right now */
989 return; /* fwd will be scheduled when tx desc freed */
991 if (nid == kqswnal_lib.ni.nid) /* gateway is me */
992 nid = fwd->kprfd_target_nid; /* target is final dest */
994 if (kqswnal_nid2elanid (nid) < 0) {
995 CERROR("Can't forward [%p] to "LPX64": not a peer\n", fwd, nid);
1000 if (nob > KQSW_NRXMSGBYTES_LARGE) {
1001 CERROR ("Can't forward [%p] to "LPX64
1002 ": size %d bigger than max packet size %ld\n",
1003 fwd, nid, nob, (long)KQSW_NRXMSGBYTES_LARGE);
1008 if ((kqswnal_fwd_copy_contig || niov > 1) &&
1009 nob <= KQSW_TX_BUFFER_SIZE)
1011 /* send from ktx's pre-allocated/mapped contiguous buffer? */
1012 lib_copy_iov2buf (ktx->ktx_buffer, niov, iov, nob);
1013 ktx->ktx_frags.iov[0].Base = ktx->ktx_ebuffer; /* already mapped */
1014 ktx->ktx_frags.iov[0].Len = nob;
1016 ktx->ktx_wire_hdr = (ptl_hdr_t *)ktx->ktx_buffer;
1021 ktx->ktx_nfrag = 0; /* no frags mapped yet */
1022 rc = kqswnal_map_tx_iov (ktx, nob, niov, iov);
1026 ktx->ktx_wire_hdr = (ptl_hdr_t *)iov[0].iov_base;
1029 ktx->ktx_port = (nob <= (sizeof (ptl_hdr_t) + KQSW_SMALLPAYLOAD)) ?
1030 EP_SVC_LARGE_PORTALS_SMALL : EP_SVC_LARGE_PORTALS_LARGE;
1032 ktx->ktx_state = KTX_FORWARDING; /* kpr_put_packet() on completion */
1033 ktx->ktx_args[0] = fwd;
1035 rc = kqswnal_launch (ktx);
1041 CERROR ("Failed to forward [%p] to "LPX64": %d\n", fwd, nid, rc);
1043 kqswnal_put_idle_tx (ktx);
1044 /* complete now (with failure) */
1045 kpr_fwd_done (&kqswnal_data.kqn_router, fwd, rc);
1049 kqswnal_fwd_callback (void *arg, int error)
1051 kqswnal_rx_t *krx = (kqswnal_rx_t *)arg;
1053 /* The router has finished forwarding this packet */
1057 ptl_hdr_t *hdr = (ptl_hdr_t *)page_address (krx->krx_pages[0]);
1059 CERROR("Failed to route packet from "LPX64" to "LPX64": %d\n",
1060 NTOH__u64(hdr->src_nid), NTOH__u64(hdr->dest_nid),error);
1063 kqswnal_requeue_rx (krx);
1067 kqswnal_reply_complete (EP_RXD *rxd)
1069 int status = ep_rxd_status(rxd);
1070 kqswnal_tx_t *ktx = (kqswnal_tx_t *)ep_rxd_arg(rxd);
1071 kqswnal_rx_t *krx = (kqswnal_rx_t *)ktx->ktx_args[0];
1072 lib_msg_t *msg = (lib_msg_t *)ktx->ktx_args[1];
1074 CDEBUG((status == EP_SUCCESS) ? D_NET : D_ERROR,
1075 "rxd %p, ktx %p, status %d\n", rxd, ktx, status);
1077 LASSERT (krx->krx_rxd == rxd);
1079 krx->krx_rpc_completed = 1;
1080 kqswnal_requeue_rx (krx);
1082 lib_finalize (&kqswnal_lib, NULL, msg);
1083 kqswnal_put_idle_tx (ktx);
1087 kqswnal_rpc_complete (EP_RXD *rxd)
1089 int status = ep_rxd_status(rxd);
1090 kqswnal_rx_t *krx = (kqswnal_rx_t *)ep_rxd_arg(rxd);
1092 CDEBUG((status == EP_SUCCESS) ? D_NET : D_ERROR,
1093 "rxd %p, krx %p, status %d\n", rxd, krx, status);
1095 LASSERT (krx->krx_rxd == rxd);
1097 krx->krx_rpc_completed = 1;
1098 kqswnal_requeue_rx (krx);
1102 kqswnal_requeue_rx (kqswnal_rx_t *krx)
1107 LASSERT (atomic_read (&krx->krx_refcount) > 0);
1108 if (!atomic_dec_and_test (&krx->krx_refcount))
1111 if (!ep_rxd_isrpc(krx->krx_rxd) ||
1112 krx->krx_rpc_completed) {
1114 /* don't actually requeue on shutdown */
1115 if (kqswnal_data.kqn_shuttingdown)
1118 ep_requeue_receive (krx->krx_rxd, kqswnal_rxhandler, krx,
1119 krx->krx_elanaddr, krx->krx_npages * PAGE_SIZE);
1123 /* Sender wanted an RPC, but we didn't complete it (we must have
1124 * dropped the sender's message). We complete it now with
1126 memset (&blk, 0, sizeof (blk));
1127 blk.Status = -ECONNREFUSED;
1129 atomic_set (&krx->krx_refcount, 1);
1131 rc = ep_complete_rpc (krx->krx_rxd,
1132 kqswnal_rpc_complete, krx,
1134 if (rc == ESUCCESS) {
1135 /* callback will call me again to requeue, having set
1136 * krx_rpc_completed... */
1140 CERROR("can't complete RPC: %d\n", rc);
1142 /* we don't actually requeue on shutdown */
1143 if (kqswnal_data.kqn_shuttingdown)
1146 /* NB ep_complete_rpc() frees rxd on failure, so we have to requeue
1147 * from scratch here... */
1148 rc = ep_queue_receive(krx->krx_eprx, kqswnal_rxhandler, krx,
1150 krx->krx_npages * PAGE_SIZE, 0);
1152 LASSERT (rc == ESUCCESS);
1153 /* This needs to be fixed by ep_complete_rpc NOT freeing
1154 * krx->krx_rxd on failure so we can just ep_requeue_receive() */
1158 kqswnal_rx (kqswnal_rx_t *krx)
1160 ptl_hdr_t *hdr = (ptl_hdr_t *) page_address (krx->krx_pages[0]);
1161 ptl_nid_t dest_nid = NTOH__u64 (hdr->dest_nid);
1165 if (dest_nid == kqswnal_lib.ni.nid) { /* It's for me :) */
1166 /* NB krx requeued when lib_parse() calls back kqswnal_recv */
1167 lib_parse (&kqswnal_lib, hdr, krx);
1172 CERROR ("checksums for forwarded packets not implemented\n");
1175 if (kqswnal_nid2elanid (dest_nid) >= 0) /* should have gone direct to peer */
1177 CERROR("dropping packet from "LPX64" for "LPX64
1178 ": target is peer\n", NTOH__u64(hdr->src_nid), dest_nid);
1180 kqswnal_requeue_rx (krx);
1184 /* NB forwarding may destroy iov; rebuild every time */
1185 for (nob = krx->krx_nob, niov = 0; nob > 0; nob -= PAGE_SIZE, niov++)
1187 LASSERT (niov < krx->krx_npages);
1188 krx->krx_iov[niov].iov_base= page_address(krx->krx_pages[niov]);
1189 krx->krx_iov[niov].iov_len = MIN(PAGE_SIZE, nob);
1192 kpr_fwd_init (&krx->krx_fwd, dest_nid,
1193 krx->krx_nob, niov, krx->krx_iov,
1194 kqswnal_fwd_callback, krx);
1196 kpr_fwd_start (&kqswnal_data.kqn_router, &krx->krx_fwd);
1199 /* Receive Interrupt Handler: posts to schedulers */
1201 kqswnal_rxhandler(EP_RXD *rxd)
1204 int nob = ep_rxd_len (rxd);
1205 int status = ep_rxd_status (rxd);
1206 kqswnal_rx_t *krx = (kqswnal_rx_t *)ep_rxd_arg (rxd);
1208 CDEBUG(D_NET, "kqswnal_rxhandler: rxd %p, krx %p, nob %d, status %d\n",
1209 rxd, krx, nob, status);
1211 LASSERT (krx != NULL);
1215 LASSERT (atomic_read (&krx->krx_refcount) == 0);
1216 atomic_set (&krx->krx_refcount, 1);
1217 krx->krx_rpc_completed = 0;
1219 /* must receive a whole header to be able to parse */
1220 if (status != EP_SUCCESS || nob < sizeof (ptl_hdr_t))
1222 /* receives complete with failure when receiver is removed */
1223 if (!kqswnal_data.kqn_shuttingdown)
1224 CERROR("receive status failed with status %d nob %d\n",
1225 ep_rxd_status(rxd), nob);
1227 kqswnal_requeue_rx (krx);
1231 if (!in_interrupt()) {
1236 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
1238 list_add_tail (&krx->krx_list, &kqswnal_data.kqn_readyrxds);
1239 wake_up (&kqswnal_data.kqn_sched_waitq);
1241 spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
1246 kqswnal_csum_error (kqswnal_rx_t *krx, int ishdr)
1248 ptl_hdr_t *hdr = (ptl_hdr_t *)page_address (krx->krx_pages[0]);
1250 CERROR ("%s checksum mismatch %p: dnid "LPX64", snid "LPX64
1251 ", dpid %d, spid %d, type %d\n",
1252 ishdr ? "Header" : "Payload", krx,
1253 NTOH__u64(hdr->dest_nid), NTOH__u64(hdr->src_nid)
1254 NTOH__u32(hdr->dest_pid), NTOH__u32(hdr->src_pid),
1255 NTOH__u32(hdr->type));
1257 switch (NTOH__u32 (hdr->type))
1260 CERROR("ACK: mlen %d dmd "LPX64"."LPX64" match "LPX64
1262 NTOH__u32(hdr->msg.ack.mlength),
1263 hdr->msg.ack.dst_wmd.handle_cookie,
1264 hdr->msg.ack.dst_wmd.handle_idx,
1265 NTOH__u64(hdr->msg.ack.match_bits),
1266 NTOH__u32(hdr->msg.ack.length));
1269 CERROR("PUT: ptl %d amd "LPX64"."LPX64" match "LPX64
1270 " len %u off %u data "LPX64"\n",
1271 NTOH__u32(hdr->msg.put.ptl_index),
1272 hdr->msg.put.ack_wmd.handle_cookie,
1273 hdr->msg.put.ack_wmd.handle_idx,
1274 NTOH__u64(hdr->msg.put.match_bits),
1275 NTOH__u32(hdr->msg.put.length),
1276 NTOH__u32(hdr->msg.put.offset),
1277 hdr->msg.put.hdr_data);
1280 CERROR ("GET: <>\n");
1283 CERROR ("REPLY: <>\n");
1286 CERROR ("TYPE?: <>\n");
1292 kqswnal_recvmsg (nal_cb_t *nal,
1301 kqswnal_rx_t *krx = (kqswnal_rx_t *)private;
1309 kqsw_csum_t senders_csum;
1310 kqsw_csum_t payload_csum = 0;
1311 kqsw_csum_t hdr_csum = kqsw_csum(0, page_address(krx->krx_pages[0]),
1313 size_t csum_len = mlen;
1316 static atomic_t csum_counter;
1317 int csum_verbose = (atomic_read(&csum_counter)%1000001) == 0;
1319 atomic_inc (&csum_counter);
1321 memcpy (&senders_csum, ((char *)page_address (krx->krx_pages[0])) +
1322 sizeof (ptl_hdr_t), sizeof (kqsw_csum_t));
1323 if (senders_csum != hdr_csum)
1324 kqswnal_csum_error (krx, 1);
1326 CDEBUG(D_NET,"kqswnal_recv, mlen="LPSZ", rlen="LPSZ"\n", mlen, rlen);
1328 /* What was actually received must be >= payload.
1329 * This is an LASSERT, as lib_finalize() doesn't have a completion status. */
1330 LASSERT (krx->krx_nob >= KQSW_HDR_SIZE + mlen);
1331 LASSERT (mlen <= rlen);
1333 /* It must be OK to kmap() if required */
1334 LASSERT (kiov == NULL || !in_interrupt ());
1335 /* Either all pages or all vaddrs */
1336 LASSERT (!(kiov != NULL && iov != NULL));
1341 page_ptr = ((char *) page_address(krx->krx_pages[0])) +
1343 page_nob = PAGE_SIZE - KQSW_HDR_SIZE;
1347 iov_ptr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset;
1348 iov_nob = kiov->kiov_len;
1350 iov_ptr = iov->iov_base;
1351 iov_nob = iov->iov_len;
1356 /* We expect the iov to exactly match mlen */
1357 LASSERT (iov_nob <= mlen);
1359 frag = MIN (page_nob, iov_nob);
1360 memcpy (iov_ptr, page_ptr, frag);
1362 payload_csum = kqsw_csum (payload_csum, iov_ptr, frag);
1376 LASSERT (page < krx->krx_npages);
1377 page_ptr = page_address(krx->krx_pages[page]);
1378 page_nob = PAGE_SIZE;
1384 else if (kiov != NULL) {
1385 kunmap (kiov->kiov_page);
1389 iov_ptr = ((char *)kmap (kiov->kiov_page)) + kiov->kiov_offset;
1390 iov_nob = kiov->kiov_len;
1395 iov_ptr = iov->iov_base;
1396 iov_nob = iov->iov_len;
1401 kunmap (kiov->kiov_page);
1405 memcpy (&senders_csum, ((char *)page_address (krx->krx_pages[0])) +
1406 sizeof(ptl_hdr_t) + sizeof(kqsw_csum_t), sizeof(kqsw_csum_t));
1408 if (csum_len != rlen)
1409 CERROR("Unable to checksum data in user's buffer\n");
1410 else if (senders_csum != payload_csum)
1411 kqswnal_csum_error (krx, 0);
1414 CERROR("hdr csum %lx, payload_csum %lx, csum_frags %d, "
1416 hdr_csum, payload_csum, csum_frags, csum_nob);
1418 lib_finalize(nal, private, libmsg);
1420 kqswnal_requeue_rx (krx);
1426 kqswnal_recv(nal_cb_t *nal,
1434 return (kqswnal_recvmsg (nal, private, libmsg, niov, iov, NULL, mlen, rlen));
1438 kqswnal_recv_pages (nal_cb_t *nal,
1446 return (kqswnal_recvmsg (nal, private, libmsg, niov, NULL, kiov, mlen, rlen));
1450 kqswnal_thread_start (int (*fn)(void *arg), void *arg)
1452 long pid = kernel_thread (fn, arg, 0);
1457 atomic_inc (&kqswnal_data.kqn_nthreads);
1462 kqswnal_thread_fini (void)
1464 atomic_dec (&kqswnal_data.kqn_nthreads);
1468 kqswnal_scheduler (void *arg)
1472 kpr_fwd_desc_t *fwd;
1478 kportal_daemonize ("kqswnal_sched");
1479 kportal_blockallsigs ();
1481 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
1483 while (!kqswnal_data.kqn_shuttingdown)
1485 did_something = FALSE;
1487 if (!list_empty (&kqswnal_data.kqn_readyrxds))
1489 krx = list_entry(kqswnal_data.kqn_readyrxds.next,
1490 kqswnal_rx_t, krx_list);
1491 list_del (&krx->krx_list);
1492 spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
1497 did_something = TRUE;
1498 spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
1501 if (!list_empty (&kqswnal_data.kqn_delayedtxds))
1503 ktx = list_entry(kqswnal_data.kqn_delayedtxds.next,
1504 kqswnal_tx_t, ktx_list);
1505 list_del_init (&ktx->ktx_delayed_list);
1506 spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
1509 rc = kqswnal_launch (ktx);
1510 if (rc != 0) /* failed: ktx_nid down? */
1512 CERROR("Failed delayed transmit to "LPX64
1513 ": %d\n", ktx->ktx_nid, rc);
1514 kqswnal_tx_done (ktx, rc);
1517 did_something = TRUE;
1518 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
1521 if (!list_empty (&kqswnal_data.kqn_delayedfwds))
1523 fwd = list_entry (kqswnal_data.kqn_delayedfwds.next, kpr_fwd_desc_t, kprfd_list);
1524 list_del (&fwd->kprfd_list);
1525 spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
1527 kqswnal_fwd_packet (NULL, fwd);
1529 did_something = TRUE;
1530 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
1533 /* nothing to do or hogging CPU */
1534 if (!did_something || counter++ == KQSW_RESCHED) {
1535 spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
1540 if (!did_something) {
1541 rc = wait_event_interruptible (kqswnal_data.kqn_sched_waitq,
1542 kqswnal_data.kqn_shuttingdown ||
1543 !list_empty(&kqswnal_data.kqn_readyrxds) ||
1544 !list_empty(&kqswnal_data.kqn_delayedtxds) ||
1545 !list_empty(&kqswnal_data.kqn_delayedfwds));
1547 } else if (current->need_resched)
1550 spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
1554 spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
1556 kqswnal_thread_fini ();
1560 nal_cb_t kqswnal_lib =
1562 nal_data: &kqswnal_data, /* NAL private data */
1563 cb_send: kqswnal_send,
1564 cb_send_pages: kqswnal_send_pages,
1565 cb_recv: kqswnal_recv,
1566 cb_recv_pages: kqswnal_recv_pages,
1567 cb_read: kqswnal_read,
1568 cb_write: kqswnal_write,
1569 cb_malloc: kqswnal_malloc,
1570 cb_free: kqswnal_free,
1571 cb_printf: kqswnal_printf,
1572 cb_cli: kqswnal_cli,
1573 cb_sti: kqswnal_sti,
1574 cb_dist: kqswnal_dist