int
kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
{
- kgn_conn_t *conn;
- struct list_head *ctmp, *cnxt;
+ kgn_conn_t *conn, *cnxt;
int loopback;
int count = 0;
- loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
-
- list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
- conn = list_entry(ctmp, kgn_conn_t, gnc_list);
+ loopback = (peer->gnp_nid ==
+ lnet_nid_to_nid4(&peer->gnp_net->gnn_ni->ni_nid));
+ list_for_each_entry_safe(conn, cnxt, &peer->gnp_conns, gnc_list) {
if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
continue;
kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
{
kgn_conn_t *conn;
- struct list_head *tmp;
int loopback;
ENTRY;
- loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
+ loopback = (peer->gnp_nid ==
+ lnet_nid_to_nid4(&peer->gnp_net->gnn_ni->ni_nid));
- list_for_each(tmp, &peer->gnp_conns) {
- conn = list_entry(tmp, kgn_conn_t, gnc_list);
+ list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
CDEBUG(D_NET, "checking conn 0x%p for peer %s"
" lo %d new %llu existing %llu"
" new peer %llu existing peer %llu"
*/
for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
- list_for_each_entry(net , &kgnilnd_data.kgn_nets[i], gnn_list) {
+ list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
/* if gnn_shutdown set for any net shutdown is in progress just return */
if (net->gnn_shutdown) {
up_read(&kgnilnd_data.kgn_net_rw_sem);
up_read(&kgnilnd_data.kgn_net_rw_sem);
for (i = 0; i < nnets; i++) {
- lnet_nid_t peer_nid;
+ struct lnet_nid peer_nid;
net = nets[i];
- peer_nid = kgnilnd_lnd2lnetnid(net->gnn_ni->ni_nid,
- peer->gnp_nid);
+ lnet_nid4_to_nid(kgnilnd_lnd2lnetnid(
+ lnet_nid_to_nid4(&net->gnn_ni->ni_nid),
+ peer->gnp_nid),
+ &peer_nid);
CDEBUG(D_NET, "peer 0x%p->%s last_alive %lld (%llds ago)\n",
peer, libcfs_nid2str(peer_nid), peer->gnp_last_alive,
ktime_get_seconds() - peer->gnp_last_alive);
- lnet_notify(net->gnn_ni, peer_nid, alive, true,
+ lnet_notify(net->gnn_ni, &peer_nid, alive, true,
peer->gnp_last_alive);
kgnilnd_net_decref(net);
lnet_nid_t *id, __u32 *nic_addr,
int *refcount, int *connecting)
{
- struct list_head *ptmp;
kgn_peer_t *peer;
int i;
int rc = -ENOENT;
read_lock(&kgnilnd_data.kgn_peer_conn_lock);
for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
-
- list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
- peer = list_entry(ptmp, kgn_peer_t, gnp_list);
-
+ list_for_each_entry(peer, &kgnilnd_data.kgn_peers[i], gnp_list) {
if (index-- > 0)
continue;
{
LIST_HEAD (souls);
LIST_HEAD (zombies);
- struct list_head *ptmp, *pnxt;
- kgn_peer_t *peer;
+ kgn_peer_t *peer, *pnxt;
int lo;
int hi;
int i;
}
for (i = lo; i <= hi; i++) {
- list_for_each_safe(ptmp, pnxt, &kgnilnd_data.kgn_peers[i]) {
- peer = list_entry(ptmp, kgn_peer_t, gnp_list);
-
+ list_for_each_entry_safe(peer, pnxt, &kgnilnd_data.kgn_peers[i],
+ gnp_list) {
LASSERTF(peer->gnp_net != NULL,
"peer %p (%s) with NULL net\n",
peer, libcfs_nid2str(peer->gnp_nid));
kgnilnd_get_conn_by_idx(int index)
{
kgn_peer_t *peer;
- struct list_head *ptmp;
kgn_conn_t *conn;
- struct list_head *ctmp;
int i;
for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
read_lock(&kgnilnd_data.kgn_peer_conn_lock);
- list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
-
- peer = list_entry(ptmp, kgn_peer_t, gnp_list);
-
- list_for_each(ctmp, &peer->gnp_conns) {
- conn = list_entry(ctmp, kgn_conn_t, gnc_list);
-
+ list_for_each_entry(peer, &kgnilnd_data.kgn_peers[i], gnp_list) {
+ list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
continue;
/* The nid passed in does not yet contain the net portion.
* Let's build it up now
*/
- nid = LNET_MKNID(LNET_NIDNET(net->gnn_ni->ni_nid), nid);
+ nid = LNET_MKNID(LNET_NID_NET(&net->gnn_ni->ni_nid), nid);
rc = kgnilnd_add_peer(net, nid, &new_peer);
if (rc) {
* LNET assumes a conn and peer per net, the LNET_MKNID/LNET_NIDADDR allows us to let Lnet see what it
* wants to see instead of the underlying network that is being used to send the data
*/
- data->ioc_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(nid));
+ data->ioc_nid = LNET_MKNID(LNET_NID_NET(&ni->ni_nid),
+ LNET_NIDADDR(nid));
data->ioc_flags = peer_connecting;
data->ioc_count = peer_refcount;
/* LNET_MKNID is used to build the correct address based on what LNET wants to see instead of
* the generic connection that is used to send the data
*/
- data->ioc_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(conn->gnc_peer->gnp_nid));
+ data->ioc_nid = LNET_MKNID(LNET_NID_NET(&ni->ni_nid),
+ LNET_NIDADDR(conn->gnc_peer->gnp_nid));
data->ioc_u32[0] = conn->gnc_device->gnd_id;
kgnilnd_conn_decref(conn);
}
}
case IOC_LIBCFS_REGISTER_MYNID: {
/* Ignore if this is a noop */
- if (data->ioc_nid == ni->ni_nid) {
+ if (data->ioc_nid == lnet_nid_to_nid4(&ni->ni_nid)) {
rc = 0;
} else {
CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
libcfs_nid2str(data->ioc_nid),
- libcfs_nid2str(ni->ni_nid));
+ libcfs_nidstr(&ni->ni_nid));
rc = -EINVAL;
}
break;
LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 &&
atomic_read(&dev->gnd_n_mdd_held) == 0 &&
atomic64_read(&dev->gnd_nbytes_map) == 0,
- "%d SMSG mappings of %ld bytes still mapped or held %d\n",
+ "%d SMSG mappings of %lld bytes still mapped or held %d\n",
atomic_read(&dev->gnd_n_mdd),
- atomic64_read(&dev->gnd_nbytes_map), atomic_read(&dev->gnd_n_mdd_held));
+ (u64)atomic64_read(&dev->gnd_nbytes_map),
+ atomic_read(&dev->gnd_n_mdd_held));
LASSERT(list_empty(&dev->gnd_map_list));
int kgnilnd_base_startup(void)
{
- struct timeval tv;
- int pkmem = atomic_read(&libcfs_kmemory);
+ long long pkmem = libcfs_kmem_read();
int rc;
int i;
kgn_device_t *dev;
* initialised with seconds + microseconds at startup time. So we
* rely on NOT creating connections more frequently on average than
* 1MHz to ensure we don't use old connstamps when we reboot. */
- do_gettimeofday(&tv);
kgnilnd_data.kgn_connstamp =
kgnilnd_data.kgn_peerstamp =
- (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
+ ktime_get_seconds();
init_rwsem(&kgnilnd_data.kgn_net_rw_sem);
atomic_set(&dev->gnd_ndgrams, 0);
atomic_set(&dev->gnd_nwcdgrams, 0);
/* setup timer for RDMAQ processing */
- setup_timer(&dev->gnd_rdmaq_timer, kgnilnd_schedule_device_timer,
- (unsigned long)dev);
+ cfs_timer_setup(&dev->gnd_rdmaq_timer,
+ kgnilnd_schedule_device_timer,
+ (unsigned long)dev, 0);
/* setup timer for mapping processing */
- setup_timer(&dev->gnd_map_timer, kgnilnd_schedule_device_timer,
- (unsigned long)dev);
+ cfs_timer_setup(&dev->gnd_map_timer,
+ kgnilnd_schedule_device_timer,
+ (unsigned long)dev, 0);
}
kgnilnd_data.kgn_tx_phys_cache =
kmem_cache_create("kgn_tx_phys",
- LNET_MAX_IOV * sizeof(gni_mem_segment_t),
+ GNILND_MAX_IOV * sizeof(gni_mem_segment_t),
0, 0, NULL);
if (kgnilnd_data.kgn_tx_phys_cache == NULL) {
CERROR("Can't create slab for kgn_tx_phys\n");
kgnilnd_data.kgn_cksum_npages * sizeof (struct page *));
for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
- kgnilnd_data.kgn_cksum_map_pages[i] = kmalloc(LNET_MAX_IOV * sizeof (struct page *),
+ kgnilnd_data.kgn_cksum_map_pages[i] = kmalloc(GNILND_MAX_IOV * sizeof (struct page *),
GFP_KERNEL);
if (kgnilnd_data.kgn_cksum_map_pages[i] == NULL) {
CERROR("Can't allocate vmap cksum pages for cpu %d\n", i);
kgnilnd_data.kgn_init = GNILND_INIT_ALL;
/*****************************************************/
- CDEBUG(D_MALLOC, "initial kmem %d\n", pkmem);
+ CDEBUG(D_MALLOC, "initial kmem %lld\n", pkmem);
RETURN(0);
failed:
kgnilnd_unmap_fma_blocks(dev);
kgnilnd_schedule_device(dev);
- wake_up_all(&dev->gnd_dgram_waitq);
- wake_up_all(&dev->gnd_dgping_waitq);
+ wake_up(&dev->gnd_dgram_waitq);
+ wake_up(&dev->gnd_dgping_waitq);
LASSERT(list_empty(&dev->gnd_connd_peers));
}
spin_lock(&kgnilnd_data.kgn_reaper_lock);
- wake_up_all(&kgnilnd_data.kgn_reaper_waitq);
+ wake_up(&kgnilnd_data.kgn_reaper_waitq);
spin_unlock(&kgnilnd_data.kgn_reaper_lock);
if (atomic_read(&kgnilnd_data.kgn_nthreads))
kfree(kgnilnd_data.kgn_cksum_map_pages);
}
- CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
- atomic_read(&libcfs_kmemory));
+ CDEBUG(D_MALLOC, "after NAL cleanup: kmem %lld\n",
+ libcfs_kmem_read());
kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
module_put(THIS_MODULE);
INIT_LIST_HEAD(&net->gnn_list);
ni->ni_data = net;
net->gnn_ni = ni;
- if (!ni->ni_net->net_tunables_set) {
- ni->ni_net->net_tunables.lct_max_tx_credits =
- *kgnilnd_tunables.kgn_credits;
- ni->ni_net->net_tunables.lct_peer_tx_credits =
- *kgnilnd_tunables.kgn_peer_credits;
+
+ kgnilnd_tunables_setup(ni);
+
+ if (!ni->ni_interface) {
+ rc = lnet_ni_add_interface(ni, "ipogif0");
+ if (rc < 0)
+ CWARN("gnilnd failed to allocate ni_interface\n");
}
if (*kgnilnd_tunables.kgn_peer_health) {
atomic_set(&net->gnn_refcount, 1);
/* if we have multiple devices, spread the nets around */
- net->gnn_netnum = LNET_NETNUM(LNET_NIDNET(ni->ni_nid));
+ net->gnn_netnum = LNET_NETNUM(LNET_NID_NET(&ni->ni_nid));
- devno = LNET_NIDNET(ni->ni_nid) % GNILND_MAXDEVS;
+ devno = LNET_NID_NET(&ni->ni_nid) % GNILND_MAXDEVS;
net->gnn_dev = &kgnilnd_data.kgn_devices[devno];
/* allocate a 'dummy' cdm for datagram use. We can only have a single
/* the instance id for the cdm is the NETNUM offset by MAXDEVS -
* ensuring we'll have a unique id */
-
- ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), net->gnn_dev->gnd_nid);
+ ni->ni_nid.nid_addr[0] =
+ cpu_to_be32(LNET_NIDADDR(net->gnn_dev->gnd_nid));
CDEBUG(D_NET, "adding net %p nid=%s on dev %d \n",
- net, libcfs_nid2str(ni->ni_nid), net->gnn_dev->gnd_id);
+ net, libcfs_nidstr(&ni->ni_nid), net->gnn_dev->gnd_id);
/* until the gnn_list is set, we need to cleanup ourselves as
* kgnilnd_shutdown is just gonna get confused */
/* Serialize with startup. */
mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
- CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
- atomic_read(&libcfs_kmemory));
+ CDEBUG(D_MALLOC, "before NAL cleanup: kmem %lld\n",
+ libcfs_kmem_read());
if (net == NULL) {
CERROR("got NULL net for ni %p\n", ni);
kgnilnd_base_shutdown();
}
}
- CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
- atomic_read(&libcfs_kmemory));
+ CDEBUG(D_MALLOC, "after NAL cleanup: kmem %lld\n",
+ libcfs_kmem_read());
mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
EXIT;