}
int
-kqswnal_get_tx_desc (struct portal_ioctl_data *data)
+kqswnal_get_tx_desc (struct portals_cfg *pcfg)
{
unsigned long flags;
struct list_head *tmp;
kqswnal_tx_t *ktx;
- int index = data->ioc_count;
+ int index = pcfg->pcfg_count;
int rc = -ENOENT;
spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
list_for_each (tmp, &kqswnal_data.kqn_activetxds) {
if (index-- != 0)
continue;
-
- ktx = list_entry (tmp, kqswnal_tx_t, ktx_list);
- data->ioc_pbuf1 = (char *)ktx;
- data->ioc_count = NTOH__u32(ktx->ktx_wire_hdr->type);
- data->ioc_size = NTOH__u32(PTL_HDR_LENGTH(ktx->ktx_wire_hdr));
- data->ioc_nid = NTOH__u64(ktx->ktx_wire_hdr->dest_nid);
- data->ioc_nid2 = ktx->ktx_nid;
- data->ioc_misc = ktx->ktx_launcher;
- data->ioc_flags = (list_empty (&ktx->ktx_delayed_list) ? 0 : 1) |
- ((!ktx->ktx_forwarding) ? 0 : 2) |
- ((!ktx->ktx_isnblk) ? 0 : 4);
+ ktx = list_entry (tmp, kqswnal_tx_t, ktx_list);
+ pcfg->pcfg_pbuf1 = (char *)ktx;
+ pcfg->pcfg_count = NTOH__u32(ktx->ktx_wire_hdr->type);
+ pcfg->pcfg_size = NTOH__u32(ktx->ktx_wire_hdr->payload_length);
+ pcfg->pcfg_nid = NTOH__u64(ktx->ktx_wire_hdr->dest_nid);
+ pcfg->pcfg_nid2 = ktx->ktx_nid;
+ pcfg->pcfg_misc = ktx->ktx_launcher;
+ pcfg->pcfg_flags = (list_empty (&ktx->ktx_delayed_list) ? 0 : 1) |
+ (!ktx->ktx_isnblk ? 0 : 2) |
+ (ktx->ktx_state << 2);
rc = 0;
break;
}
}
int
-kqswnal_cmd (struct portal_ioctl_data *data, void *private)
+kqswnal_cmd (struct portals_cfg *pcfg, void *private)
{
- LASSERT (data != NULL);
+ LASSERT (pcfg != NULL);
- switch (data->ioc_nal_cmd) {
+ switch (pcfg->pcfg_command) {
case NAL_CMD_GET_TXDESC:
- return (kqswnal_get_tx_desc (data));
+ return (kqswnal_get_tx_desc (pcfg));
case NAL_CMD_REGISTER_MYNID:
CDEBUG (D_IOCTL, "setting NID offset to "LPX64" (was "LPX64")\n",
- data->ioc_nid - kqswnal_data.kqn_elanid,
+ pcfg->pcfg_nid - kqswnal_data.kqn_elanid,
kqswnal_data.kqn_nid_offset);
kqswnal_data.kqn_nid_offset =
- data->ioc_nid - kqswnal_data.kqn_elanid;
- kqswnal_lib.ni.nid = data->ioc_nid;
+ pcfg->pcfg_nid - kqswnal_data.kqn_elanid;
+ kqswnal_lib.ni.nid = pcfg->pcfg_nid;
return (0);
default:
case KQN_INIT_ALL:
PORTAL_SYMBOL_UNREGISTER (kqswnal_ni);
+ kportal_nal_unregister(QSWNAL);
/* fall through */
case KQN_INIT_PTL:
/* fall through */
case KQN_INIT_DATA:
+ LASSERT(list_empty(&kqswnal_data.kqn_activetxds));
break;
case KQN_INIT_NOTHING:
CDEBUG (D_MALLOC, "done kmem %d\n", atomic_read(&portal_kmemory));
- printk (KERN_INFO "Routing QSW NAL unloaded (final mem %d)\n",
+ printk (KERN_INFO "Lustre: Routing QSW NAL unloaded (final mem %d)\n",
atomic_read(&portal_kmemory));
}
}
/**********************************************************************/
- /* Reserve Elan address space for transmit buffers */
+ /* Reserve Elan address space for transmit descriptors NB we may
+ * either send the contents of associated buffers immediately, or
+ * map them for the peer to suck/blow... */
dmareq.Waitfn = DDI_DMA_SLEEP;
dmareq.ElanAddr = (E3_Addr) 0;
dmareq.Attr = PTE_LOAD_LITTLE_ENDIAN;
- dmareq.Perm = ELAN_PERM_REMOTEREAD;
+ dmareq.Perm = ELAN_PERM_REMOTEWRITE;
rc = elan3_dma_reserve(kqswnal_data.kqn_epdev->DmaState,
KQSW_NTXMSGPAGES*(KQSW_NTXMSGS+KQSW_NNBLK_TXMSGS),
INIT_LIST_HEAD (&ktx->ktx_delayed_list);
+ ktx->ktx_state = KTX_IDLE;
ktx->ktx_isnblk = (i >= KQSW_NTXMSGS);
list_add_tail (&ktx->ktx_list,
ktx->ktx_isnblk ? &kqswnal_data.kqn_nblk_idletxds :
rc = ep_queue_receive(krx->krx_eprx, kqswnal_rxhandler, krx,
krx->krx_elanaddr,
krx->krx_npages * PAGE_SIZE, 0);
- if (rc != 0)
+ if (rc != ESUCCESS)
{
CERROR ("failed ep_queue_receive %d\n", rc);
kqswnal_finalise ();
PORTAL_SYMBOL_REGISTER(kqswnal_ni);
kqswnal_data.kqn_init = KQN_INIT_ALL;
- printk(KERN_INFO "Routing QSW NAL loaded on node %d of %d "
+ printk(KERN_INFO "Lustre: Routing QSW NAL loaded on node %d of %d "
"(Routing %s, initial mem %d)\n",
kqswnal_data.kqn_elanid, kqswnal_data.kqn_nnodes,
kpr_routing (&kqswnal_data.kqn_router) ? "enabled" : "disabled",