#define LASSERT_ATOMIC_ZERO(a) LASSERT_ATOMIC_EQ(a, 0)
#define LASSERT_ATOMIC_POS(a) LASSERT_ATOMIC_GT(a, 0)
-#define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof (*(ptr)));
-#define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof (*(ptr)));
+#define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof(*(ptr)));
+#define CFS_ALLOC_PTR_ARRAY(ptr, count) \
+ LIBCFS_ALLOC(ptr, (count) * sizeof(*(ptr)))
+
+#define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof(*(ptr)));
+#define CFS_FREE_PTR_ARRAY(ptr, count) \
+ LIBCFS_FREE(ptr, (count) * sizeof(*(ptr)))
/* implication */
#define ergo(a, b) (!(a) || (b))
if (!cptab->ctb_nodemask)
goto failed_alloc_nodemask;
- LIBCFS_ALLOC(cptab->ctb_cpu2cpt,
- nr_cpu_ids * sizeof(cptab->ctb_cpu2cpt[0]));
+ CFS_ALLOC_PTR_ARRAY(cptab->ctb_cpu2cpt, nr_cpu_ids);
if (!cptab->ctb_cpu2cpt)
goto failed_alloc_cpu2cpt;
memset(cptab->ctb_cpu2cpt, -1,
nr_cpu_ids * sizeof(cptab->ctb_cpu2cpt[0]));
- LIBCFS_ALLOC(cptab->ctb_node2cpt,
- nr_node_ids * sizeof(cptab->ctb_node2cpt[0]));
+ CFS_ALLOC_PTR_ARRAY(cptab->ctb_node2cpt, nr_node_ids);
if (!cptab->ctb_node2cpt)
goto failed_alloc_node2cpt;
memset(cptab->ctb_node2cpt, -1,
nr_node_ids * sizeof(cptab->ctb_node2cpt[0]));
- LIBCFS_ALLOC(cptab->ctb_parts, ncpt * sizeof(cptab->ctb_parts[0]));
+ CFS_ALLOC_PTR_ARRAY(cptab->ctb_parts, ncpt);
if (!cptab->ctb_parts)
goto failed_alloc_ctb_parts;
if (!part->cpt_nodemask)
goto failed_setting_ctb_parts;
- LIBCFS_ALLOC(part->cpt_distance,
- cptab->ctb_nparts * sizeof(part->cpt_distance[0]));
+ CFS_ALLOC_PTR_ARRAY(part->cpt_distance, cptab->ctb_nparts);
if (!part->cpt_distance)
goto failed_setting_ctb_parts;
free_cpumask_var(part->cpt_cpumask);
if (part->cpt_distance) {
- LIBCFS_FREE(part->cpt_distance,
- cptab->ctb_nparts *
- sizeof(part->cpt_distance[0]));
+ CFS_FREE_PTR_ARRAY(part->cpt_distance,
+ cptab->ctb_nparts);
}
}
- if (cptab->ctb_parts) {
- LIBCFS_FREE(cptab->ctb_parts,
- cptab->ctb_nparts * sizeof(cptab->ctb_parts[0]));
- }
+ if (cptab->ctb_parts)
+ CFS_FREE_PTR_ARRAY(cptab->ctb_parts, cptab->ctb_nparts);
+
failed_alloc_ctb_parts:
- if (cptab->ctb_node2cpt) {
- LIBCFS_FREE(cptab->ctb_node2cpt,
- nr_node_ids * sizeof(cptab->ctb_node2cpt[0]));
- }
+ if (cptab->ctb_node2cpt)
+ CFS_FREE_PTR_ARRAY(cptab->ctb_node2cpt, nr_node_ids);
+
failed_alloc_node2cpt:
- if (cptab->ctb_cpu2cpt) {
- LIBCFS_FREE(cptab->ctb_cpu2cpt,
- nr_cpu_ids * sizeof(cptab->ctb_cpu2cpt[0]));
- }
+ if (cptab->ctb_cpu2cpt)
+ CFS_FREE_PTR_ARRAY(cptab->ctb_cpu2cpt, nr_cpu_ids);
+
failed_alloc_cpu2cpt:
if (cptab->ctb_nodemask)
LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
{
int i;
- if (cptab->ctb_cpu2cpt) {
- LIBCFS_FREE(cptab->ctb_cpu2cpt,
- nr_cpu_ids * sizeof(cptab->ctb_cpu2cpt[0]));
- }
+ if (cptab->ctb_cpu2cpt)
+ CFS_FREE_PTR_ARRAY(cptab->ctb_cpu2cpt, nr_cpu_ids);
- if (cptab->ctb_node2cpt) {
- LIBCFS_FREE(cptab->ctb_node2cpt,
- nr_node_ids * sizeof(cptab->ctb_node2cpt[0]));
- }
+ if (cptab->ctb_node2cpt)
+ CFS_FREE_PTR_ARRAY(cptab->ctb_node2cpt, nr_node_ids);
for (i = 0; cptab->ctb_parts && i < cptab->ctb_nparts; i++) {
struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
free_cpumask_var(part->cpt_cpumask);
- if (part->cpt_distance) {
- LIBCFS_FREE(part->cpt_distance,
- cptab->ctb_nparts *
- sizeof(part->cpt_distance[0]));
- }
+ if (part->cpt_distance)
+ CFS_FREE_PTR_ARRAY(part->cpt_distance,
+ cptab->ctb_nparts);
}
- if (cptab->ctb_parts) {
- LIBCFS_FREE(cptab->ctb_parts,
- cptab->ctb_nparts * sizeof(cptab->ctb_parts[0]));
- }
+ if (cptab->ctb_parts)
+ CFS_FREE_PTR_ARRAY(cptab->ctb_parts, cptab->ctb_nparts);
if (cptab->ctb_nodemask)
LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
return -EINVAL;
}
- LIBCFS_ALLOC(val, sizeof(val[0]) * count);
+ CFS_ALLOC_PTR_ARRAY(val, count);
if (val == NULL)
return -ENOMEM;
/* This array is allocated by LIBCFS_ALLOC(), so it shouldn't be freed
* by OBD_FREE() if it's called by module other than libcfs & LNet,
* otherwise we will see fake memory leak */
- LIBCFS_FREE(values, num * sizeof(values[0]));
+ CFS_FREE_PTR_ARRAY(values, num);
}
EXPORT_SYMBOL(cfs_expr_list_values_free);
return;
}
- LIBCFS_ALLOC(nets, nnets * sizeof(*nets));
+ CFS_ALLOC_PTR_ARRAY(nets, nnets);
if (nets == NULL) {
up_read(&kgnilnd_data.kgn_net_rw_sem);
kgnilnd_net_decref(net);
}
- LIBCFS_FREE(nets, nnets * sizeof(*nets));
+ CFS_FREE_PTR_ARRAY(nets, nnets);
}
}
init_rwsem(&dev->gnd_conn_sem);
/* alloc & setup nid based dgram table */
- LIBCFS_ALLOC(dev->gnd_dgrams,
- sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
+ CFS_ALLOC_PTR_ARRAY(dev->gnd_dgrams,
+ *kgnilnd_tunables.kgn_peer_hash_size);
if (dev->gnd_dgrams == NULL)
GOTO(failed, rc = -ENOMEM);
rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
- LIBCFS_ALLOC(kgnilnd_data.kgn_peers,
- sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
+ CFS_ALLOC_PTR_ARRAY(kgnilnd_data.kgn_peers,
+ *kgnilnd_tunables.kgn_peer_hash_size);
if (kgnilnd_data.kgn_peers == NULL)
GOTO(failed, rc = -ENOMEM);
INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]);
}
- LIBCFS_ALLOC(kgnilnd_data.kgn_conns,
- sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
+ CFS_ALLOC_PTR_ARRAY(kgnilnd_data.kgn_conns,
+ *kgnilnd_tunables.kgn_peer_hash_size);
if (kgnilnd_data.kgn_conns == NULL)
GOTO(failed, rc = -ENOMEM);
INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]);
}
- LIBCFS_ALLOC(kgnilnd_data.kgn_nets,
- sizeof(struct list_head) * *kgnilnd_tunables.kgn_net_hash_size);
+ CFS_ALLOC_PTR_ARRAY(kgnilnd_data.kgn_nets,
+ *kgnilnd_tunables.kgn_net_hash_size);
if (kgnilnd_data.kgn_nets == NULL)
GOTO(failed, rc = -ENOMEM);
for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
LASSERT(list_empty(&kgnilnd_data.kgn_peers[i]));
- LIBCFS_FREE(kgnilnd_data.kgn_peers,
- sizeof (struct list_head) *
- *kgnilnd_tunables.kgn_peer_hash_size);
+ CFS_FREE_PTRE_ARRAT(kgnilnd_data.kgn_peers,
+ *kgnilnd_tunables.kgn_peer_hash_size);
}
down_write(&kgnilnd_data.kgn_net_rw_sem);
for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++)
LASSERT(list_empty(&kgnilnd_data.kgn_nets[i]));
- LIBCFS_FREE(kgnilnd_data.kgn_nets,
- sizeof (struct list_head) *
- *kgnilnd_tunables.kgn_net_hash_size);
+ CFS_FREE_PTRE_ARRAY(kgnilnd_data.kgn_nets,
+ *kgnilnd_tunables.kgn_net_hash_size);
}
up_write(&kgnilnd_data.kgn_net_rw_sem);
for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
LASSERT(list_empty(&kgnilnd_data.kgn_conns[i]));
- LIBCFS_FREE(kgnilnd_data.kgn_conns,
- sizeof (struct list_head) *
- *kgnilnd_tunables.kgn_peer_hash_size);
+ CFS_FREE_PTR_ARRAY(kgnilnd_data.kgn_conns,
+ *kgnilnd_tunables.kgn_peer_hash_size);
}
for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
"dgrams left %d\n", atomic_read(&dev->gnd_ndgrams));
if (dev->gnd_dgrams != NULL) {
- for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
+ for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size;
+ i++)
LASSERT(list_empty(&dev->gnd_dgrams[i]));
- LIBCFS_FREE(dev->gnd_dgrams,
- sizeof (struct list_head) *
- *kgnilnd_tunables.kgn_peer_hash_size);
+ CFS_FREE_PTR_ARRAY(dev->gnd_dgrams,
+ *kgnilnd_tunables.kgn_peer_hash_size);
}
kgnilnd_free_phys_fmablk(dev);
}
/* allocate just enough space for the bits to track the mailboxes */
- LIBCFS_ALLOC(fma_blk->gnm_bit_array, BITS_TO_LONGS(num_mbox) * sizeof(unsigned long));
+ CFS_ALLOC_PTR_ARRAY(fma_blk->gnm_bit_array, BITS_TO_LONGS(num_mbox));
if (fma_blk->gnm_bit_array == NULL) {
CNETERR("could not allocate mailbox bitmask, %lu bytes for %d mbox\n",
sizeof(unsigned long) * BITS_TO_LONGS(num_mbox), num_mbox);
}
bitmap_zero(fma_blk->gnm_bit_array, num_mbox);
- /* now that the num_mbox is set based on allocation type, get debug info setup */
- LIBCFS_ALLOC(fma_blk->gnm_mbox_info, sizeof(kgn_mbox_info_t) * num_mbox);
+ /* now that the num_mbox is set based on allocation type, get debug
+ * info setup
+ * */
+ CFS_ALLOC_PTR_ARRAY(fma_blk->gnm_mbox_info, num_mbox);
if (fma_blk->gnm_mbox_info == NULL) {
CNETERR("could not allocate mailbox debug, %lu bytes for %d mbox\n",
sizeof(kgn_mbox_info_t) * num_mbox, num_mbox);
return 0;
free_info:
- LIBCFS_FREE(fma_blk->gnm_mbox_info, sizeof(kgn_mbox_info_t)*num_mbox);
+ CFS_FREE_PTR_ARRAY(fma_blk->gnm_mbox_info, num_mbox);
free_bit:
- LIBCFS_FREE(fma_blk->gnm_bit_array, BITS_TO_LONGS(num_mbox) * sizeof (unsigned long));
+ CFS_FREE_PTR_ARRAY(fma_blk->gnm_bit_array, BITS_TO_LONGS(num_mbox));
free_blk:
if (fma_blk->gnm_state == GNILND_FMABLK_VIRT) {
kgnilnd_vfree(fma_blk->gnm_block, fma_blk->gnm_blk_size);
list_del(&fma_blk->gnm_bufflist);
- LIBCFS_FREE(fma_blk->gnm_mbox_info, sizeof(kgn_mbox_info_t)*fma_blk->gnm_num_mboxs);
- LIBCFS_FREE(fma_blk->gnm_bit_array, BITS_TO_LONGS(fma_blk->gnm_num_mboxs) * sizeof (unsigned long));
+ CFS_FREE_PTR_ARRAY(fma_blk->gnm_mbox_info, fma_blk->gnm_num_mboxs);
+ CFS_FREE_PTR_ARRAY(fma_blk->gnm_bit_array,
+ BITS_TO_LONGS(fma_blk->gnm_num_mboxs));
LIBCFS_FREE(fma_blk, sizeof(kgn_fma_memblock_t));
}
__u16 cksum, cksum2;
__u64 mbytes;
- LIBCFS_ALLOC(src, LNET_MAX_IOV * sizeof(lnet_kiov_t));
- LIBCFS_ALLOC(dest, LNET_MAX_IOV * sizeof(lnet_kiov_t));
+ CFS_ALLOC_PTR_ARRAY(src, LNET_MAX_IOV);
+ CFS_ALLOC_PTR_ARRAY(dest, LNET_MAX_IOV);
if (src == NULL || dest == NULL) {
CERROR("couldn't allocate iovs\n");
}
if (src != NULL)
- LIBCFS_FREE(src, LNET_MAX_IOV * sizeof(lnet_kiov_t));
+ CFS_FREE_PTR_ARRAY(src, LNET_MAX_IOV);
if (dest != NULL)
- LIBCFS_FREE(dest, LNET_MAX_IOV * sizeof(lnet_kiov_t));
+ CFS_FREE_PTR_ARRAY(dest, LNET_MAX_IOV);
return rc;
}
if (conn->ibc_rx_pages != NULL)
kiblnd_unmap_rx_descs(conn);
- if (conn->ibc_rxs != NULL) {
- LIBCFS_FREE(conn->ibc_rxs,
- IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
- }
+ if (conn->ibc_rxs != NULL)
+ CFS_FREE_PTR_ARRAY(conn->ibc_rxs, IBLND_RX_MSGS(conn));
if (conn->ibc_connvars != NULL)
LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
if (tpo->tpo_tx_descs == NULL)
goto out;
- for (i = 0; i < pool->po_size; i++) {
+ for (i = 0; i < pool->po_size; i++) {
struct kib_tx *tx = &tpo->tpo_tx_descs[i];
int wrq_sge = *kiblnd_tunables.kib_wrq_sge;
list_del(&tx->tx_list);
- if (tx->tx_pages != NULL)
- LIBCFS_FREE(tx->tx_pages,
- LNET_MAX_IOV *
- sizeof(*tx->tx_pages));
- if (tx->tx_frags != NULL)
- LIBCFS_FREE(tx->tx_frags,
- (1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_frags));
- if (tx->tx_wrq != NULL)
- LIBCFS_FREE(tx->tx_wrq,
- (1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_wrq));
+ if (tx->tx_pages != NULL)
+ CFS_FREE_PTR_ARRAY(tx->tx_pages, LNET_MAX_IOV);
+ if (tx->tx_frags != NULL)
+ CFS_FREE_PTR_ARRAY(tx->tx_frags,
+ (1 + IBLND_MAX_RDMA_FRAGS));
+ if (tx->tx_wrq != NULL)
+ CFS_FREE_PTR_ARRAY(tx->tx_wrq,
+ (1 + IBLND_MAX_RDMA_FRAGS));
if (tx->tx_sge != NULL)
- LIBCFS_FREE(tx->tx_sge,
- (1 + IBLND_MAX_RDMA_FRAGS) * wrq_sge *
- sizeof(*tx->tx_sge));
- if (tx->tx_rd != NULL)
- LIBCFS_FREE(tx->tx_rd,
+ CFS_FREE_PTR_ARRAY(tx->tx_sge,
+ (1 + IBLND_MAX_RDMA_FRAGS) *
+ wrq_sge);
+ if (tx->tx_rd != NULL)
+ LIBCFS_FREE(tx->tx_rd,
offsetof(struct kib_rdma_desc,
- rd_frags[IBLND_MAX_RDMA_FRAGS]));
- }
+ rd_frags[IBLND_MAX_RDMA_FRAGS]));
+ }
- LIBCFS_FREE(tpo->tpo_tx_descs,
- pool->po_size * sizeof(struct kib_tx));
+ CFS_FREE_PTR_ARRAY(tpo->tpo_tx_descs, pool->po_size);
out:
- kiblnd_fini_pool(pool);
+ kiblnd_fini_pool(pool);
CFS_FREE_PTR(tpo);
}
break;
}
- if (kiblnd_data.kib_peers != NULL) {
- LIBCFS_FREE(kiblnd_data.kib_peers,
- sizeof(struct list_head) *
- kiblnd_data.kib_peer_hash_size);
- }
+ if (kiblnd_data.kib_peers)
+ CFS_FREE_PTR_ARRAY(kiblnd_data.kib_peers,
+ kiblnd_data.kib_peer_hash_size);
if (kiblnd_data.kib_scheds != NULL)
cfs_percpt_free(kiblnd_data.kib_scheds);
INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs);
kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
- LIBCFS_ALLOC(kiblnd_data.kib_peers,
- sizeof(struct list_head) *
- kiblnd_data.kib_peer_hash_size);
+ CFS_ALLOC_PTR_ARRAY(kiblnd_data.kib_peers,
+ kiblnd_data.kib_peer_hash_size);
if (kiblnd_data.kib_peers == NULL)
goto failed;
}
spin_unlock_bh(&sched->kss_lock);
- LIBCFS_FREE(rx_scratch_pgs, sizeof(*rx_scratch_pgs) *
- LNET_MAX_IOV);
- LIBCFS_FREE(scratch_iov, sizeof(*scratch_iov) *
- LNET_MAX_IOV);
+ CFS_FREE_PTR_ARRAY(rx_scratch_pgs, LNET_MAX_IOV);
+ CFS_FREE_PTR_ARRAY(scratch_iov, LNET_MAX_IOV);
ksocknal_thread_fini();
return 0;
}
LASSERT(the_lnet.ln_remote_nets_hash == NULL);
LASSERT(the_lnet.ln_remote_nets_hbits > 0);
- LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
+ CFS_ALLOC_PTR_ARRAY(hash, LNET_REMOTE_NETS_HASH_SIZE);
if (hash == NULL) {
CERROR("Failed to create remote nets hash table\n");
return -ENOMEM;
for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
- LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
- LNET_REMOTE_NETS_HASH_SIZE *
- sizeof(the_lnet.ln_remote_nets_hash[0]));
+ CFS_FREE_PTR_ARRAY(the_lnet.ln_remote_nets_hash,
+ LNET_REMOTE_NETS_HASH_SIZE);
the_lnet.ln_remote_nets_hash = NULL;
}
}
if (rec->rec_lh_hash != NULL) {
- LIBCFS_FREE(rec->rec_lh_hash,
- LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
+ CFS_FREE_PTR_ARRAY(rec->rec_lh_hash, LNET_LH_HASH_SIZE);
rec->rec_lh_hash = NULL;
}
int i;
int rc;
int max_intf = lnet_interfaces_max;
- size_t buf_size;
if (n_ids <= 0 ||
id.nid == LNET_NID_ANY)
if (n_ids > max_intf)
n_ids = max_intf;
- buf_size = n_ids * sizeof(*buf);
-
- LIBCFS_ALLOC(buf, buf_size);
+ CFS_ALLOC_PTR_ARRAY(buf, n_ids);
if (!buf)
return -ENOMEM;
out:
lnet_net_unlock(cpt);
- LIBCFS_FREE(buf, buf_size);
+ CFS_FREE_PTR_ARRAY(buf, n_ids);
return rc;
}
if (cpts == NULL) {
/* there is an NI which will exist on all CPTs */
if (net->net_cpts != NULL)
- LIBCFS_FREE(net->net_cpts, sizeof(*net->net_cpts) *
- net->net_ncpts);
+ CFS_FREE_PTR_ARRAY(net->net_cpts, net->net_ncpts);
net->net_cpts = NULL;
net->net_ncpts = LNET_CPT_NUMBER;
return 0;
}
if (net->net_cpts == NULL) {
- LIBCFS_ALLOC(net->net_cpts, sizeof(*net->net_cpts) * ncpts);
+ CFS_ALLOC_PTR_ARRAY(net->net_cpts, ncpts);
if (net->net_cpts == NULL)
return -ENOMEM;
memcpy(net->net_cpts, cpts, ncpts * sizeof(*net->net_cpts));
return 0;
}
- LIBCFS_ALLOC(added_cpts, sizeof(*added_cpts) * LNET_CPT_NUMBER);
+ CFS_ALLOC_PTR_ARRAY(added_cpts, LNET_CPT_NUMBER);
if (added_cpts == NULL)
return -ENOMEM;
__u32 *array = NULL, *loc;
__u32 total_entries = j + net->net_ncpts;
- LIBCFS_ALLOC(array, sizeof(*net->net_cpts) * total_entries);
+ CFS_ALLOC_PTR_ARRAY(array, total_entries);
if (array == NULL) {
rc = -ENOMEM;
goto failed;
loc = array + net->net_ncpts;
memcpy(loc, added_cpts, j * sizeof(*net->net_cpts));
- LIBCFS_FREE(net->net_cpts, sizeof(*net->net_cpts) *
- net->net_ncpts);
+ CFS_FREE_PTR_ARRAY(net->net_cpts, net->net_ncpts);
net->net_ncpts = total_entries;
net->net_cpts = array;
}
failed:
- LIBCFS_FREE(added_cpts, sizeof(*added_cpts) * LNET_CPT_NUMBER);
+ CFS_FREE_PTR_ARRAY(added_cpts, LNET_CPT_NUMBER);
return rc;
}
* CPTs which the remaining NIs are associated with.
*/
if (net->net_cpts != NULL) {
- LIBCFS_FREE(net->net_cpts,
- sizeof(*net->net_cpts) * net->net_ncpts);
+ CFS_FREE_PTR_ARRAY(net->net_cpts, net->net_ncpts);
net->net_cpts = NULL;
}
* accross CPT lines.
*/
if (net->net_cpts != NULL) {
- LIBCFS_FREE(net->net_cpts,
- sizeof(*net->net_cpts) *
- net->net_ncpts);
+ CFS_FREE_PTR_ARRAY(net->net_cpts,
+ net->net_ncpts);
net->net_cpts = NULL;
net->net_ncpts = LNET_CPT_NUMBER;
}
}
if (net->net_cpts != NULL)
- LIBCFS_FREE(net->net_cpts,
- sizeof(*net->net_cpts) * net->net_ncpts);
+ CFS_FREE_PTR_ARRAY(net->net_cpts, net->net_ncpts);
LIBCFS_FREE(net, sizeof(*net));
}
LASSERT(rc <= LNET_CPT_NUMBER);
if (rc == LNET_CPT_NUMBER) {
- LIBCFS_FREE(ni->ni_cpts, rc * sizeof(ni->ni_cpts[0]));
+ CFS_FREE_PTR_ARRAY(ni->ni_cpts, rc);
ni->ni_cpts = NULL;
}
ni->ni_ncpts = LNET_CPT_NUMBER;
} else {
size_t array_size = ncpts * sizeof(ni->ni_cpts[0]);
- LIBCFS_ALLOC(ni->ni_cpts, array_size);
+
+ CFS_ALLOC_PTR_ARRAY(ni->ni_cpts, ncpts);
if (ni->ni_cpts == NULL)
goto failed;
memcpy(ni->ni_cpts, cpts, array_size);
return nip;
}
- LIBCFS_ALLOC(ipaddrs, nip * sizeof(*ipaddrs));
+ CFS_ALLOC_PTR_ARRAY(ipaddrs, nip);
if (!ipaddrs) {
rc = -ENOMEM;
CERROR("lnet: Can't allocate ipaddrs[%d], rc = %d\n",
"any local IP interfaces\n");
rc = -ENOENT;
}
- LIBCFS_FREE(ipaddrs, nip * sizeof(*ipaddrs));
+ CFS_FREE_PTR_ARRAY(ipaddrs, nip);
out_free_addrs:
kfree(ifaces);
return rc > 0 ? 0 : rc;
CERROR("%d active msg on exit\n", count);
if (container->msc_finalizers != NULL) {
- LIBCFS_FREE(container->msc_finalizers,
- container->msc_nfinalizers *
- sizeof(*container->msc_finalizers));
+ CFS_FREE_PTR_ARRAY(container->msc_finalizers,
+ container->msc_nfinalizers);
container->msc_finalizers = NULL;
}
if (container->msc_resenders != NULL) {
- LIBCFS_FREE(container->msc_resenders,
- container->msc_nfinalizers *
- sizeof(*container->msc_resenders));
+ CFS_FREE_PTR_ARRAY(container->msc_resenders,
+ container->msc_nfinalizers);
container->msc_resenders = NULL;
}
container->msc_init = 0;
}
}
/* the extra entry is for MEs with ignore bits */
- LIBCFS_FREE(mhash, sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1));
+ CFS_FREE_PTR_ARRAY(mhash, LNET_MT_HASH_SIZE + 1);
}
cfs_percpt_free(ptl->ptl_mtables);
for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
LASSERT(list_empty(&hash[j]));
- LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash));
+ CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
}
cfs_percpt_free(the_lnet.ln_peer_tables);
if (oldnids) {
size = sizeof(*nids) * (lpni->lpni_pref_nnids - 1);
- LIBCFS_FREE(oldnids, sizeof(*oldnids) * size);
+ CFS_FREE_PTR_ARRAY(oldnids, size);
}
out:
if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
if (oldnids) {
size = sizeof(*nids) * (lpni->lpni_pref_nnids + 1);
- LIBCFS_FREE(oldnids, sizeof(*oldnids) * size);
+ CFS_FREE_PTR_ARRAY(oldnids, size);
}
out:
CDEBUG(D_NET, "peer %s nid %s: %d\n",
ptable->pt_zombies--;
spin_unlock(&ptable->pt_zombie_lock);
- if (lpni->lpni_pref_nnids > 1) {
- LIBCFS_FREE(lpni->lpni_pref.nids,
- sizeof(*lpni->lpni_pref.nids) * lpni->lpni_pref_nnids);
- }
+ if (lpni->lpni_pref_nnids > 1)
+ CFS_FREE_PTR_ARRAY(lpni->lpni_pref.nids, lpni->lpni_pref_nnids);
+
LIBCFS_FREE(lpni, sizeof(*lpni));
lnet_peer_net_decref_locked(lpn);
spin_unlock(&lp->lp_lock);
nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
- LIBCFS_ALLOC(curnis, nnis * sizeof(*curnis));
- LIBCFS_ALLOC(addnis, nnis * sizeof(*addnis));
- LIBCFS_ALLOC(delnis, nnis * sizeof(*delnis));
+ CFS_ALLOC_PTR_ARRAY(curnis, nnis);
+ CFS_ALLOC_PTR_ARRAY(addnis, nnis);
+ CFS_ALLOC_PTR_ARRAY(delnis, nnis);
if (!curnis || !addnis || !delnis) {
rc = -ENOMEM;
goto out;
*/
rc = 0;
out:
- LIBCFS_FREE(curnis, nnis * sizeof(*curnis));
- LIBCFS_FREE(addnis, nnis * sizeof(*addnis));
- LIBCFS_FREE(delnis, nnis * sizeof(*delnis));
+ CFS_FREE_PTR_ARRAY(curnis, nnis);
+ CFS_FREE_PTR_ARRAY(addnis, nnis);
+ CFS_FREE_PTR_ARRAY(delnis, nnis);
lnet_ping_buffer_decref(pbuf);
CDEBUG(D_NET, "peer %s (%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
return -ENOMEM;
}
- LIBCFS_ALLOC(bat->bat_cli_hash,
- sizeof(struct list_head) * LST_NODE_HASHSIZE);
+ CFS_ALLOC_PTR_ARRAY(bat->bat_cli_hash, LST_NODE_HASHSIZE);
if (bat->bat_cli_hash == NULL) {
CERROR("Can't allocate hash for batch %s\n", name);
LIBCFS_FREE(bat, sizeof(*bat));
- return -ENOMEM;
- }
+ return -ENOMEM;
+ }
- LIBCFS_ALLOC(bat->bat_srv_hash,
- sizeof(struct list_head) * LST_NODE_HASHSIZE);
- if (bat->bat_srv_hash == NULL) {
- CERROR("Can't allocate hash for batch %s\n", name);
- LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
+ CFS_ALLOC_PTR_ARRAY(bat->bat_srv_hash, LST_NODE_HASHSIZE);
+ if (bat->bat_srv_hash == NULL) {
+ CERROR("Can't allocate hash for batch %s\n", name);
+ LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
LIBCFS_FREE(bat, sizeof(*bat));
- return -ENOMEM;
- }
+ return -ENOMEM;
+ }
if (strlen(name) > sizeof(bat->bat_name)-1) {
LIBCFS_FREE(bat->bat_srv_hash, LST_NODE_HASHSIZE);
INIT_LIST_HEAD(&console_session.ses_bat_list);
INIT_LIST_HEAD(&console_session.ses_trans_list);
- LIBCFS_ALLOC(console_session.ses_ndl_hash,
- sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+ CFS_ALLOC_PTR_ARRAY(console_session.ses_ndl_hash,
+ LST_GLOBAL_HASHSIZE);
if (console_session.ses_ndl_hash == NULL)
return -ENOMEM;
rc = srpc_add_service(&lstcon_acceptor_service);
LASSERT(rc != -EBUSY);
if (rc != 0) {
- LIBCFS_FREE(console_session.ses_ndl_hash,
- sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+ CFS_FREE_PTR_ARRAY(console_session.ses_ndl_hash,
+ LST_GLOBAL_HASHSIZE);
return rc;
}
srpc_shutdown_service(&lstcon_acceptor_service);
srpc_remove_service(&lstcon_acceptor_service);
- LIBCFS_FREE(console_session.ses_ndl_hash,
- sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+ CFS_FREE_PTR_ARRAY(console_session.ses_ndl_hash, LST_GLOBAL_HASHSIZE);
srpc_wait_service_shutdown(&lstcon_acceptor_service);
for (i = 0; i < LST_NODE_HASHSIZE; i++)
LASSERT(list_empty(&console_session.ses_ndl_hash[i]));
- LIBCFS_FREE(console_session.ses_ndl_hash,
- sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+ CFS_FREE_PTR_ARRAY(console_session.ses_ndl_hash,
+ LST_GLOBAL_HASHSIZE);
srpc_wait_service_shutdown(&lstcon_acceptor_service);
continue;
cfs_wi_sched_destroy(lst_sched_test[i]);
}
- LIBCFS_FREE(lst_sched_test,
- sizeof(lst_sched_test[0]) *
- cfs_cpt_number(lnet_cpt_table()));
+ CFS_FREE_PTR_ARRAY(lst_sched_test,
+ cfs_cpt_number(lnet_cpt_table()));
lst_sched_test = NULL;
/* fallthrough */
case LST_INIT_WI_SERIAL:
lst_init_step = LST_INIT_WI_SERIAL;
nscheds = cfs_cpt_number(lnet_cpt_table());
- LIBCFS_ALLOC(lst_sched_test, sizeof(lst_sched_test[0]) * nscheds);
+ CFS_ALLOC_PTR_ARRAY(lst_sched_test, nscheds);
if (lst_sched_test == NULL)
goto error;
spin_lock_init(&stats->ls_lock);
/* alloc num of counter headers */
- LIBCFS_ALLOC(stats->ls_cnt_header,
- stats->ls_num * sizeof(struct lprocfs_counter_header));
+ CFS_ALLOC_PTR_ARRAY(stats->ls_cnt_header, stats->ls_num);
if (!stats->ls_cnt_header)
goto fail;
if (stats->ls_percpu[i])
LIBCFS_FREE(stats->ls_percpu[i], percpusize);
if (stats->ls_cnt_header)
- LIBCFS_FREE(stats->ls_cnt_header, stats->ls_num *
- sizeof(struct lprocfs_counter_header));
+ CFS_FREE_PTR_ARRAY(stats->ls_cnt_header, stats->ls_num);
LIBCFS_FREE(stats, offsetof(typeof(*stats), ls_percpu[num_entry]));
}
EXPORT_SYMBOL(lprocfs_free_stats);