* check context */
conn->gnc_device = dev;
- conn->gnc_timeout = MAX(*kgnilnd_tunables.kgn_timeout,
- GNILND_MIN_TIMEOUT);
+ conn->gnc_timeout = max(*kgnilnd_tunables.kgn_timeout,
+ GNILND_MIN_TIMEOUT);
kgnilnd_update_reaper_timeout(conn->gnc_timeout);
/* this is the ep_handle for doing SMSG & BTE */
return;
}
- LIBCFS_ALLOC(nets, nnets * sizeof(*nets));
+ CFS_ALLOC_PTR_ARRAY(nets, nnets);
if (nets == NULL) {
up_read(&kgnilnd_data.kgn_net_rw_sem);
kgnilnd_net_decref(net);
}
- LIBCFS_FREE(nets, nnets * sizeof(*nets));
+ CFS_FREE_PTR_ARRAY(nets, nnets);
}
}
/* set timeout vals in conn early so we can use them for the NAK */
/* use max of the requested and our timeout, peer will do the same */
- conn->gnc_timeout = MAX(conn->gnc_timeout, connreq->gncr_timeout);
+ conn->gnc_timeout = max(conn->gnc_timeout, connreq->gncr_timeout);
/* only ep_bind really mucks around with the CQ */
/* only ep bind if we are not connecting to ourself and the dstnid is not a wildcard. this check
current_to += *kgnilnd_tunables.kgn_min_reconnect_interval / 2;
}
- current_to = MIN(current_to,
- *kgnilnd_tunables.kgn_max_reconnect_interval);
+ current_to = min(current_to,
+ *kgnilnd_tunables.kgn_max_reconnect_interval);
peer->gnp_reconnect_interval = current_to;
CDEBUG(D_NET, "peer %s can reconnect at %lu interval %lu\n",
return rc;
}
- i = 4;
- while (atomic_read(&kgnilnd_data.kgn_npending_conns) ||
- atomic_read(&kgnilnd_data.kgn_npending_detach) ||
- atomic_read(&kgnilnd_data.kgn_npending_unlink)) {
-
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
- i++;
-
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n",
+ wait_var_event_warning(&kgnilnd_data,
+ !atomic_read(&kgnilnd_data.kgn_npending_conns) &&
+ !atomic_read(&kgnilnd_data.kgn_npending_detach) &&
+ !atomic_read(&kgnilnd_data.kgn_npending_unlink),
+ "Waiting on %d peers %d closes %d detaches\n",
atomic_read(&kgnilnd_data.kgn_npending_unlink),
atomic_read(&kgnilnd_data.kgn_npending_conns),
atomic_read(&kgnilnd_data.kgn_npending_detach));
- }
return rc;
}
init_rwsem(&dev->gnd_conn_sem);
/* alloc & setup nid based dgram table */
- LIBCFS_ALLOC(dev->gnd_dgrams,
- sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
+ CFS_ALLOC_PTR_ARRAY(dev->gnd_dgrams,
+ *kgnilnd_tunables.kgn_peer_hash_size);
if (dev->gnd_dgrams == NULL)
GOTO(failed, rc = -ENOMEM);
rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
- LIBCFS_ALLOC(kgnilnd_data.kgn_peers,
- sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
+ CFS_ALLOC_PTR_ARRAY(kgnilnd_data.kgn_peers,
+ *kgnilnd_tunables.kgn_peer_hash_size);
if (kgnilnd_data.kgn_peers == NULL)
GOTO(failed, rc = -ENOMEM);
INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]);
}
- LIBCFS_ALLOC(kgnilnd_data.kgn_conns,
- sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
+ CFS_ALLOC_PTR_ARRAY(kgnilnd_data.kgn_conns,
+ *kgnilnd_tunables.kgn_peer_hash_size);
if (kgnilnd_data.kgn_conns == NULL)
GOTO(failed, rc = -ENOMEM);
INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]);
}
- LIBCFS_ALLOC(kgnilnd_data.kgn_nets,
- sizeof(struct list_head) * *kgnilnd_tunables.kgn_net_hash_size);
+ CFS_ALLOC_PTR_ARRAY(kgnilnd_data.kgn_nets,
+ *kgnilnd_tunables.kgn_net_hash_size);
if (kgnilnd_data.kgn_nets == NULL)
GOTO(failed, rc = -ENOMEM);
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
"Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns));
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
/* Peer state all cleaned up BEFORE setting shutdown, so threads don't
* have to worry about shutdown races. NB connections may be created
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
"Waiting for ruhroh thread to terminate\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
/* Flag threads to terminate */
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"Waiting for %d threads to terminate\n",
atomic_read(&kgnilnd_data.kgn_nthreads));
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
LASSERT(list_empty(&kgnilnd_data.kgn_peers[i]));
- LIBCFS_FREE(kgnilnd_data.kgn_peers,
- sizeof (struct list_head) *
- *kgnilnd_tunables.kgn_peer_hash_size);
+ CFS_FREE_PTRE_ARRAT(kgnilnd_data.kgn_peers,
+ *kgnilnd_tunables.kgn_peer_hash_size);
}
down_write(&kgnilnd_data.kgn_net_rw_sem);
for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++)
LASSERT(list_empty(&kgnilnd_data.kgn_nets[i]));
- LIBCFS_FREE(kgnilnd_data.kgn_nets,
- sizeof (struct list_head) *
- *kgnilnd_tunables.kgn_net_hash_size);
+ CFS_FREE_PTRE_ARRAY(kgnilnd_data.kgn_nets,
+ *kgnilnd_tunables.kgn_net_hash_size);
}
up_write(&kgnilnd_data.kgn_net_rw_sem);
for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
LASSERT(list_empty(&kgnilnd_data.kgn_conns[i]));
- LIBCFS_FREE(kgnilnd_data.kgn_conns,
- sizeof (struct list_head) *
- *kgnilnd_tunables.kgn_peer_hash_size);
+ CFS_FREE_PTR_ARRAY(kgnilnd_data.kgn_conns,
+ *kgnilnd_tunables.kgn_peer_hash_size);
}
for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
"dgrams left %d\n", atomic_read(&dev->gnd_ndgrams));
if (dev->gnd_dgrams != NULL) {
- for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
+ for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size;
+ i++)
LASSERT(list_empty(&dev->gnd_dgrams[i]));
- LIBCFS_FREE(dev->gnd_dgrams,
- sizeof (struct list_head) *
- *kgnilnd_tunables.kgn_peer_hash_size);
+ CFS_FREE_PTR_ARRAY(dev->gnd_dgrams,
+ *kgnilnd_tunables.kgn_peer_hash_size);
}
kgnilnd_free_phys_fmablk(dev);
"Waiting for %d references to clear on net %d\n",
atomic_read(&net->gnn_refcount),
net->gnn_netnum);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
/* release ref from kgnilnd_startup */