return;
}
- LIBCFS_ALLOC(nets, nnets * sizeof(*nets));
+ CFS_ALLOC_PTR_ARRAY(nets, nnets);
if (nets == NULL) {
up_read(&kgnilnd_data.kgn_net_rw_sem);
kgnilnd_net_decref(net);
}
- LIBCFS_FREE(nets, nnets * sizeof(*nets));
+ CFS_FREE_PTR_ARRAY(nets, nnets);
}
}
return rc;
}
- i = 4;
- while (atomic_read(&kgnilnd_data.kgn_npending_conns) ||
- atomic_read(&kgnilnd_data.kgn_npending_detach) ||
- atomic_read(&kgnilnd_data.kgn_npending_unlink)) {
-
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
- i++;
-
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n",
+ wait_var_event_warning(&kgnilnd_data,
+ !atomic_read(&kgnilnd_data.kgn_npending_conns) &&
+ !atomic_read(&kgnilnd_data.kgn_npending_detach) &&
+ !atomic_read(&kgnilnd_data.kgn_npending_unlink),
+ "Waiting on %d peers %d closes %d detaches\n",
atomic_read(&kgnilnd_data.kgn_npending_unlink),
atomic_read(&kgnilnd_data.kgn_npending_conns),
atomic_read(&kgnilnd_data.kgn_npending_detach));
- }
return rc;
}
init_rwsem(&dev->gnd_conn_sem);
/* alloc & setup nid based dgram table */
- LIBCFS_ALLOC(dev->gnd_dgrams,
- sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
+ CFS_ALLOC_PTR_ARRAY(dev->gnd_dgrams,
+ *kgnilnd_tunables.kgn_peer_hash_size);
if (dev->gnd_dgrams == NULL)
GOTO(failed, rc = -ENOMEM);
rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
- LIBCFS_ALLOC(kgnilnd_data.kgn_peers,
- sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
+ CFS_ALLOC_PTR_ARRAY(kgnilnd_data.kgn_peers,
+ *kgnilnd_tunables.kgn_peer_hash_size);
if (kgnilnd_data.kgn_peers == NULL)
GOTO(failed, rc = -ENOMEM);
INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]);
}
- LIBCFS_ALLOC(kgnilnd_data.kgn_conns,
- sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
+ CFS_ALLOC_PTR_ARRAY(kgnilnd_data.kgn_conns,
+ *kgnilnd_tunables.kgn_peer_hash_size);
if (kgnilnd_data.kgn_conns == NULL)
GOTO(failed, rc = -ENOMEM);
INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]);
}
- LIBCFS_ALLOC(kgnilnd_data.kgn_nets,
- sizeof(struct list_head) * *kgnilnd_tunables.kgn_net_hash_size);
+ CFS_ALLOC_PTR_ARRAY(kgnilnd_data.kgn_nets,
+ *kgnilnd_tunables.kgn_net_hash_size);
if (kgnilnd_data.kgn_nets == NULL)
GOTO(failed, rc = -ENOMEM);
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
"Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns));
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
/* Peer state all cleaned up BEFORE setting shutdown, so threads don't
* have to worry about shutdown races. NB connections may be created
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
"Waiting for ruhroh thread to terminate\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
/* Flag threads to terminate */
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"Waiting for %d threads to terminate\n",
atomic_read(&kgnilnd_data.kgn_nthreads));
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
LASSERT(list_empty(&kgnilnd_data.kgn_peers[i]));
- LIBCFS_FREE(kgnilnd_data.kgn_peers,
- sizeof (struct list_head) *
- *kgnilnd_tunables.kgn_peer_hash_size);
+ CFS_FREE_PTRE_ARRAT(kgnilnd_data.kgn_peers,
+ *kgnilnd_tunables.kgn_peer_hash_size);
}
down_write(&kgnilnd_data.kgn_net_rw_sem);
for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++)
LASSERT(list_empty(&kgnilnd_data.kgn_nets[i]));
- LIBCFS_FREE(kgnilnd_data.kgn_nets,
- sizeof (struct list_head) *
- *kgnilnd_tunables.kgn_net_hash_size);
+ CFS_FREE_PTRE_ARRAY(kgnilnd_data.kgn_nets,
+ *kgnilnd_tunables.kgn_net_hash_size);
}
up_write(&kgnilnd_data.kgn_net_rw_sem);
for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
LASSERT(list_empty(&kgnilnd_data.kgn_conns[i]));
- LIBCFS_FREE(kgnilnd_data.kgn_conns,
- sizeof (struct list_head) *
- *kgnilnd_tunables.kgn_peer_hash_size);
+ CFS_FREE_PTR_ARRAY(kgnilnd_data.kgn_conns,
+ *kgnilnd_tunables.kgn_peer_hash_size);
}
for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
"dgrams left %d\n", atomic_read(&dev->gnd_ndgrams));
if (dev->gnd_dgrams != NULL) {
- for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
+ for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size;
+ i++)
LASSERT(list_empty(&dev->gnd_dgrams[i]));
- LIBCFS_FREE(dev->gnd_dgrams,
- sizeof (struct list_head) *
- *kgnilnd_tunables.kgn_peer_hash_size);
+ CFS_FREE_PTR_ARRAY(dev->gnd_dgrams,
+ *kgnilnd_tunables.kgn_peer_hash_size);
}
kgnilnd_free_phys_fmablk(dev);
"Waiting for %d references to clear on net %d\n",
atomic_read(&net->gnn_refcount),
net->gnn_netnum);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
/* release ref from kgnilnd_startup */