gmnal_tx_t *gmni_txs; /* all txs */
gmnal_rx_t *gmni_rxs; /* all rx descs */
gmnal_txbuf_t *gmni_ltxbs; /* all large tx bufs */
-
+
atomic_t gmni_nthreads; /* total # threads */
gm_alarm_t gmni_alarm; /* alarm to wake caretaker */
int gmni_shutdown; /* tell all threads to exit */
LASSERT (msg->gmm_type == GMNAL_MSG_IMMEDIATE);
LASSERT (iov == NULL || kiov == NULL);
-
+
if (rx->rx_recv_nob < nob) {
CERROR("Short message from nid %s: got %d, need %d\n",
libcfs_nid2str(msg->gmm_srcnid), rx->rx_recv_nob, nob);
else
lnet_copy_kiov2flat(len, buffer, 0,
niov, kiov, offset, len);
-
+
tx->tx_msgnob += len;
tx->tx_large_nob = 0;
} else {
LASSERT(tx->tx_lntmsg == NULL);
tx->tx_lntmsg = lntmsg;
-
+
spin_lock(&gmni->gmni_tx_lock);
list_add_tail(&tx->tx_list, &gmni->gmni_buf_txq);
LASSERT (npages > 0);
for (i = 0; i < npages; i++) {
-
nb->nb_kiov[i].kiov_page = alloc_page(GFP_KERNEL);
nb->nb_kiov[i].kiov_offset = 0;
nb->nb_kiov[i].kiov_len = PAGE_SIZE;
gmnal_free_netbuf_pages(nb, i+1);
return -ENOMEM;
}
-
+
if (i == 0)
nb->nb_netaddr = gmni->gmni_netaddr_base;
-
+
gmni->gmni_netaddr_base += PAGE_SIZE;
}
-
+
return 0;
}
int sz = offsetof(gmnal_txbuf_t, txb_buf.nb_kiov[npages]);
gmnal_txbuf_t *txb;
int rc;
-
+
LIBCFS_ALLOC(txb, sz);
if (txb == NULL) {
CERROR("Can't allocate large txbuffer\n");
for (i = 0; i < nlarge_tx_bufs; i++) {
rc = gmnal_alloc_ltxbuf(gmni);
-
+
if (rc != 0)
return rc;
}