conn->gnc_next_tx = (int) GNILND_MAX_MSG_ID - 10;
/* if this fails, we have conflicts and MAX_TX is too large */
- CLASSERT(GNILND_MAX_MSG_ID < GNILND_MSGID_CLOSE);
+ BUILD_BUG_ON(GNILND_MAX_MSG_ID >= GNILND_MSGID_CLOSE);
/* get a new unique CQ id for this conn */
write_lock(&kgnilnd_data.kgn_peer_conn_lock);
int hold_timeout = 0;
/* code below relies on +1 relationship ... */
- CLASSERT(GNILND_BUF_PHYS_MAPPED == (GNILND_BUF_PHYS_UNMAPPED + 1));
- CLASSERT(GNILND_BUF_VIRT_MAPPED == (GNILND_BUF_VIRT_UNMAPPED + 1));
+ BUILD_BUG_ON(GNILND_BUF_PHYS_MAPPED !=
+ (GNILND_BUF_PHYS_UNMAPPED + 1));
+ BUILD_BUG_ON(GNILND_BUF_VIRT_MAPPED !=
+ (GNILND_BUF_VIRT_UNMAPPED + 1));
switch (tx->tx_buftype) {
default:
int err = 0;
/* ensure we haven't violated max datagram size */
- CLASSERT(sizeof(kgn_connreq_t) <= GNI_DATAGRAM_MAXSIZE);
+ BUILD_BUG_ON(sizeof(kgn_connreq_t) > GNI_DATAGRAM_MAXSIZE);
/* no need to zero out, we do that when allocating dgram */
connreq->gncr_magic = GNILND_MSG_MAGIC;
if (flip) {
/* leave magic unflipped as a clue to peer_ni endianness */
msg->ibm_version = version;
- CLASSERT (sizeof(msg->ibm_type) == 1);
- CLASSERT (sizeof(msg->ibm_credits) == 1);
+ BUILD_BUG_ON(sizeof(msg->ibm_type) != 1);
+ BUILD_BUG_ON(sizeof(msg->ibm_credits) != 1);
msg->ibm_nob = msg_nob;
__swab64s(&msg->ibm_srcnid);
__swab64s(&msg->ibm_srcstamp);
dev = net->ibn_dev;
- /* pre-mapped messages are not bigger than 1 page */
- CLASSERT (IBLND_MSG_SIZE <= PAGE_SIZE);
+ /* pre-mapped messages are not bigger than 1 page */
+ BUILD_BUG_ON(IBLND_MSG_SIZE > PAGE_SIZE);
- /* No fancy arithmetic when we do the buffer calculations */
- CLASSERT (PAGE_SIZE % IBLND_MSG_SIZE == 0);
+ /* No fancy arithmetic when we do the buffer calculations */
+ BUILD_BUG_ON(PAGE_SIZE % IBLND_MSG_SIZE != 0);
tpo->tpo_hdev = kiblnd_current_hdev(dev);
{
int rc;
- CLASSERT(sizeof(struct kib_msg) <= IBLND_MSG_SIZE);
- CLASSERT(offsetof(struct kib_msg,
- ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) <=
- IBLND_MSG_SIZE);
- CLASSERT(offsetof(struct kib_msg,
- ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
- <= IBLND_MSG_SIZE);
+ BUILD_BUG_ON(sizeof(struct kib_msg) > IBLND_MSG_SIZE);
+ BUILD_BUG_ON(offsetof(struct kib_msg,
+ ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) >
+ IBLND_MSG_SIZE);
+ BUILD_BUG_ON(offsetof(struct kib_msg,
+ ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) >
+ IBLND_MSG_SIZE);
rc = kiblnd_tunables_init();
if (rc != 0)
int rc;
/* check ksnr_connected/connecting field large enough */
- CLASSERT(SOCKLND_CONN_NTYPES <= 4);
- CLASSERT(SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
+ BUILD_BUG_ON(SOCKLND_CONN_NTYPES > 4);
+ BUILD_BUG_ON(SOCKLND_CONN_ACK != SOCKLND_CONN_BULK_IN);
/* initialize the_ksocklnd */
the_ksocklnd.lnd_type = SOCKLND;
if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
struct lnet_magicversion *hmv;
- CLASSERT(sizeof(struct lnet_magicversion) ==
+ BUILD_BUG_ON(sizeof(struct lnet_magicversion) !=
offsetof(struct ksock_hello_msg, kshm_src_nid));
hmv = (struct lnet_magicversion *)hello;
int rc;
int i;
- CLASSERT(sizeof(struct lnet_magicversion) ==
- offsetof(struct lnet_hdr, src_nid));
+ BUILD_BUG_ON(sizeof(struct lnet_magicversion) !=
+ offsetof(struct lnet_hdr, src_nid));
LIBCFS_ALLOC(hdr, sizeof(*hdr));
if (hdr == NULL) {
int port;
int fatal;
- CLASSERT(sizeof(cr) <= 16); /* not too big to be on the stack */
+ BUILD_BUG_ON(sizeof(cr) > 16); /* not too big to be on the stack */
for (port = LNET_ACCEPTOR_MAX_RESERVED_PORT;
port >= LNET_ACCEPTOR_MIN_RESERVED_PORT;
continue;
}
- CLASSERT(LNET_PROTO_ACCEPTOR_VERSION == 1);
+ BUILD_BUG_ON(LNET_PROTO_ACCEPTOR_VERSION != 1);
cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC;
cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
int
lnet_fault_init(void)
{
- CLASSERT(LNET_PUT_BIT == 1 << LNET_MSG_PUT);
- CLASSERT(LNET_ACK_BIT == 1 << LNET_MSG_ACK);
- CLASSERT(LNET_GET_BIT == 1 << LNET_MSG_GET);
- CLASSERT(LNET_REPLY_BIT == 1 << LNET_MSG_REPLY);
+ BUILD_BUG_ON(LNET_PUT_BIT != 1 << LNET_MSG_PUT);
+ BUILD_BUG_ON(LNET_ACK_BIT != 1 << LNET_MSG_ACK);
+ BUILD_BUG_ON(LNET_GET_BIT != 1 << LNET_MSG_GET);
+ BUILD_BUG_ON(LNET_REPLY_BIT != 1 << LNET_MSG_REPLY);
mutex_init(&delay_dd.dd_mutex);
spin_lock_init(&delay_dd.dd_lock);
int ver;
int off;
- CLASSERT(sizeof(loff_t) >= 4);
+ BUILD_BUG_ON(sizeof(loff_t) < 4);
off = LNET_PROC_HOFF_GET(*ppos);
ver = LNET_PROC_VER_GET(*ppos);
if (*lenp == 0)
return 0;
- CLASSERT(LNET_PROC_HASH_BITS >= LNET_PEER_HASH_BITS);
+ BUILD_BUG_ON(LNET_PROC_HASH_BITS < LNET_PEER_HASH_BITS);
if (cpt >= LNET_CPT_NUMBER) {
*lenp = 0;
void
lnet_selftest_structure_assertion(void)
{
- CLASSERT(sizeof(struct srpc_msg) == 160);
- CLASSERT(sizeof(struct srpc_test_reqst) == 70);
- CLASSERT(offsetof(struct srpc_msg, msg_body.tes_reqst.tsr_concur) == 72);
- CLASSERT(offsetof(struct srpc_msg, msg_body.tes_reqst.tsr_ndest) == 78);
- CLASSERT(sizeof(struct srpc_stat_reply) == 136);
- CLASSERT(sizeof(struct srpc_stat_reqst) == 28);
+ BUILD_BUG_ON(sizeof(struct srpc_msg) != 160);
+ BUILD_BUG_ON(sizeof(struct srpc_test_reqst) != 70);
+ BUILD_BUG_ON(offsetof(struct srpc_msg, msg_body.tes_reqst.tsr_concur) !=
+ 72);
+ BUILD_BUG_ON(offsetof(struct srpc_msg, msg_body.tes_reqst.tsr_ndest) !=
+ 78);
+ BUILD_BUG_ON(sizeof(struct srpc_stat_reply) != 136);
+ BUILD_BUG_ON(sizeof(struct srpc_stat_reqst) != 28);
}
static int __init
LASSERT(ss != NULL);
LASSERT(ss->ss_lu != NULL);
- /* A compile-time check for FIDs that used to be in lustre_idl.h
- * but is moved here to remove CLASSERT/LASSERT in that header.
+ /*
* Check all lu_fid fields are converted in fid_cpu_to_le() and friends
- * and that there is no padding added by compiler to the struct. */
+ * and that there is no padding added by compiler to the struct.
+ */
{
struct lu_fid tst;
- CLASSERT(sizeof(tst) == sizeof(tst.f_seq) +
- sizeof(tst.f_oid) + sizeof(tst.f_ver));
+ BUILD_BUG_ON(sizeof(tst) != sizeof(tst.f_seq) +
+ sizeof(tst.f_oid) + sizeof(tst.f_ver));
}
seq->lss_cli = NULL;
int set_default = 0;
- CLASSERT(sizeof(struct lov_user_md_v3) >
- sizeof(struct lov_comp_md_v1));
- CLASSERT(sizeof(*lumv3) == sizeof(*lumv3p));
+ BUILD_BUG_ON(sizeof(struct lov_user_md_v3) <=
+ sizeof(struct lov_comp_md_v1));
+ BUILD_BUG_ON(sizeof(*lumv3) != sizeof(*lumv3p));
/* first try with v1 which is smaller than v3 */
if (copy_from_user(&lumv1, lumv1p, sizeof(lumv1)))
RETURN(-EFAULT);
int i, rc;
unsigned long lustre_inode_cache_flags;
- CLASSERT(sizeof(LUSTRE_VOLATILE_HDR) == LUSTRE_VOLATILE_HDR_LEN + 1);
+ BUILD_BUG_ON(sizeof(LUSTRE_VOLATILE_HDR) !=
+ LUSTRE_VOLATILE_HDR_LEN + 1);
/* print an address of _any_ initialized kernel symbol from this
* module, to allow debugging with gdb that doesn't support data
RETURN(rc = rc > 0 ? -EINVAL : rc);
if (buf->lb_buf == NULL || buf->lb_len == 0) {
- CLASSERT(sizeof(*lmv1) <= sizeof(info->lti_key));
+ BUILD_BUG_ON(sizeof(*lmv1) > sizeof(info->lti_key));
/* lti_buf is large enough for *lmv1 or a short
* (<= sizeof(struct lmv_mds_md_v1)) foreign LMV
struct ldlm_res_id resid;
char name[sizeof(fsdb->fsdb_name) + 16];
- CLASSERT(sizeof(name) < 40); /* name is too large to be on stack */
+ BUILD_BUG_ON(sizeof(name) >= 40); /* name is too large to be on stack */
snprintf(name, sizeof(name) - 1, "mgs_%s_notify", fsdb->fsdb_name);
complete(&fsdb->fsdb_notify_comp);
[CLM_WRITE] = "W",
[CLM_GROUP] = "G"
};
- CLASSERT(CLM_MAX == ARRAY_SIZE(names));
+ BUILD_BUG_ON(CLM_MAX != ARRAY_SIZE(names));
return names[mode];
}
EXPORT_SYMBOL(cl_lock_mode_name);
/* If a field is added in struct lustre_mdt_attrs, zero it explicitly
* and change the test below. */
- CLASSERT(sizeof(*lma) ==
- (offsetof(struct lustre_mdt_attrs, lma_self_fid) +
- sizeof(lma->lma_self_fid)));
+ BUILD_BUG_ON(sizeof(*lma) !=
+ (offsetof(struct lustre_mdt_attrs, lma_self_fid) +
+ sizeof(lma->lma_self_fid)));
}
EXPORT_SYMBOL(lustre_lma_init);
void lustre_loa_init(struct lustre_ost_attrs *loa, const struct lu_fid *fid,
__u32 compat, __u32 incompat)
{
- CLASSERT(sizeof(*loa) == LMA_OLD_SIZE);
+ BUILD_BUG_ON(sizeof(*loa) != LMA_OLD_SIZE);
memset(&loa->loa_parent_fid, 0,
sizeof(*loa) - offsetof(typeof(*loa), loa_parent_fid));
OBD_FAIL_TIMEOUT(OBD_FAIL_OST_PAUSE_PUNCH, cfs_fail_val);
/* check that we do support OBD_CONNECT_TRUNCLOCK. */
- CLASSERT(OST_CONNECT_SUPPORTED & OBD_CONNECT_TRUNCLOCK);
+ BUILD_BUG_ON(!(OST_CONNECT_SUPPORTED & OBD_CONNECT_TRUNCLOCK));
if ((oa->o_valid & (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS)) !=
(OBD_MD_FLSIZE | OBD_MD_FLBLOCKS))
*
* Reserve 0.78% of total space, at least 8MB for small filesystems.
*/
- CLASSERT(OSD_STATFS_RESERVED > LDISKFS_MAX_BLOCK_SIZE);
+ BUILD_BUG_ON(OSD_STATFS_RESERVED <= LDISKFS_MAX_BLOCK_SIZE);
reserved = OSD_STATFS_RESERVED >> sb->s_blocksize_bits;
if (likely(sfs->os_blocks >= reserved << OSD_STATFS_RESERVED_SHIFT))
reserved = sfs->os_blocks >> OSD_STATFS_RESERVED_SHIFT;
* debugging if we need to determine where this symlink came from.
*/
if (S_ISLNK(type)) {
- CLASSERT(LDISKFS_N_BLOCKS * 4 >= FID_LEN + 1);
+ BUILD_BUG_ON(LDISKFS_N_BLOCKS * 4 < FID_LEN + 1);
rc = snprintf((char *)LDISKFS_I(local)->i_data,
LDISKFS_N_BLOCKS * 4, DFID, PFID(fid));
struct kobject *kobj;
int rc;
- CLASSERT(BH_DXLock < sizeof(((struct buffer_head *)0)->b_state) * 8);
+ BUILD_BUG_ON(BH_DXLock >=
+ sizeof(((struct buffer_head *)0)->b_state) * 8);
#if !defined(CONFIG_DEBUG_MUTEXES) && !defined(CONFIG_DEBUG_SPINLOCK)
/* please, try to keep osd_thread_info smaller than a page */
- CLASSERT(sizeof(struct osd_thread_info) <= PAGE_SIZE);
+ BUILD_BUG_ON(sizeof(struct osd_thread_info) > PAGE_SIZE);
#endif
osd_oi_mod_init();
#define STORE_UNALIGNED(val, dst) \
({ \
typeof(val) __val = (val); \
- CLASSERT(sizeof(val) == sizeof(*(dst))); \
+ BUILD_BUG_ON(sizeof(val) != sizeof(*(dst))); \
memcpy(dst, &__val, sizeof(*(dst))); \
})
static inline int getsize(const struct iam_leaf *leaf, int namelen, int recsize)
{
- CLASSERT(!(LVAR_PAD & (LVAR_PAD - 1)));
+ BUILD_BUG_ON((LVAR_PAD & (LVAR_PAD - 1)));
return (offsetof(struct lvar_leaf_entry, vle_key) +
namelen + recsize + LVAR_ROUND) & ~LVAR_ROUND;
* gradually disappears as the number of real dnodes grows. It also
* avoids the need to check for divide-by-zero computing dn_per_block.
*/
- CLASSERT(OSD_DNODE_MIN_BLKSHIFT > 0);
- CLASSERT(OSD_DNODE_EST_BLKSHIFT > 0);
+ BUILD_BUG_ON(OSD_DNODE_MIN_BLKSHIFT <= 0);
+ BUILD_BUG_ON(OSD_DNODE_EST_BLKSHIFT <= 0);
est_usedblocks = ((OSD_DNODE_EST_COUNT << OSD_DNODE_EST_BLKSHIFT) +
usedbytes) >> est_maxblockshift;
* Reserve 0.78% of total space, at least 16MB for small filesystems,
* for internal files to be created/unlinked when space is tight.
*/
- CLASSERT(OSD_STATFS_RESERVED_SIZE > 0);
+ BUILD_BUG_ON(OSD_STATFS_RESERVED_SIZE <= 0);
reserved = OSD_STATFS_RESERVED_SIZE >> bshift;
if (likely(osfs->os_blocks >= reserved << OSD_STATFS_RESERVED_SHIFT))
reserved = osfs->os_blocks >> OSD_STATFS_RESERVED_SHIFT;
/* nr_blkptrshift is the log2 of the number of block pointers that can
* be stored in an indirect block */
- CLASSERT(DN_MAX_INDBLKSHIFT > SPA_BLKPTRSHIFT);
+ BUILD_BUG_ON(DN_MAX_INDBLKSHIFT <= SPA_BLKPTRSHIFT);
nr_blkptrshift = DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT;
/* max_blockshift / nr_blkptrshift is thus the maximum depth of the
const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
ENTRY;
- CLASSERT(sizeof(info->oti_buf) >= sizeof(*lma));
+ BUILD_BUG_ON(sizeof(info->oti_buf) < sizeof(*lma));
lma = (struct lustre_mdt_attrs *)info->oti_buf;
buf.lb_buf = lma;
buf.lb_len = sizeof(info->oti_buf);
if (la->la_flags & LUSTRE_LMA_FL_MASKS) {
LASSERT(!obj->oo_pfid_in_lma);
- CLASSERT(sizeof(info->oti_buf) >= sizeof(*lma));
+ BUILD_BUG_ON(sizeof(info->oti_buf) < sizeof(*lma));
lma = (struct lustre_mdt_attrs *)&info->oti_buf;
buf.lb_buf = lma;
buf.lb_len = sizeof(info->oti_buf);
int rc;
ENTRY;
- CLASSERT(sizeof(info->oti_buf) >= sizeof(*loa));
+ BUILD_BUG_ON(sizeof(info->oti_buf) < sizeof(*loa));
rc = osd_xattr_get_internal(env, obj, &tbuf,
XATTR_NAME_LMA, sizep);
if (rc)
int rc;
ENTRY;
- CLASSERT(sizeof(info->oti_buf) >= sizeof(*loa));
+ BUILD_BUG_ON(sizeof(info->oti_buf) < sizeof(*loa));
rc = osd_xattr_get_internal(env, obj, &buf, XATTR_NAME_LMA, &size);
if (rc)
RETURN(rc);
}
/* Need to always be aligned to a power-of-two for mutli-bulk BRW */
- CLASSERT((PTLRPC_BULK_OPS_COUNT & (PTLRPC_BULK_OPS_COUNT - 1)) == 0);
+ BUILD_BUG_ON((PTLRPC_BULK_OPS_COUNT & (PTLRPC_BULK_OPS_COUNT - 1)) !=
+ 0);
xid &= PTLRPC_BULK_OPS_MASK;
atomic64_set(&ptlrpc_last_xid, xid);
}
LASSERT(info != NULL);
assert_spin_locked(&policy->pol_nrs->nrs_lock);
- CLASSERT(sizeof(info->pi_arg) == sizeof(policy->pol_arg));
+ BUILD_BUG_ON(sizeof(info->pi_arg) != sizeof(policy->pol_arg));
memcpy(info->pi_name, policy->pol_desc->pd_name, NRS_POL_NAME_MAX);
memcpy(info->pi_arg, policy->pol_arg, sizeof(policy->pol_arg));
unsigned char mode;
unsigned short ver;
- CLASSERT(sizeof(buf) == sizeof(((struct lu_nodemap *)0)->nm_sepol));
+ BUILD_BUG_ON(sizeof(buf) != sizeof(((struct lu_nodemap *)0)->nm_sepol));
if (sepol == NULL)
return -EINVAL;
char sepol[LUSTRE_NODEMAP_SEPOL_LENGTH + 1];
int rc = 0;
- CLASSERT(sizeof(sepol) == sizeof(((struct lu_nodemap *)0)->nm_sepol));
+ BUILD_BUG_ON(sizeof(sepol) !=
+ sizeof(((struct lu_nodemap *)0)->nm_sepol));
if (count > 0) {
if (count >= sizeof(sepol))
static void nodemap_cluster_rec_init(union nodemap_rec *nr,
const struct lu_nodemap *nodemap)
{
- CLASSERT(sizeof(nr->ncr.ncr_name) == sizeof(nodemap->nm_name));
+ BUILD_BUG_ON(sizeof(nr->ncr.ncr_name) != sizeof(nodemap->nm_name));
strncpy(nr->ncr.ncr_name, nodemap->nm_name, sizeof(nr->ncr.ncr_name));
nr->ncr.ncr_squash_uid = cpu_to_le32(nodemap->nm_squash_uid);
ENTRY;
- CLASSERT(sizeof(union nodemap_rec) == 32);
+ BUILD_BUG_ON(sizeof(union nodemap_rec) != 32);
nodemap_id = le32_to_cpu(key->nk_nodemap_id);
type = nodemap_get_key_type(key);
void ptlrpc_fill_bulk_md(struct lnet_md *md, struct ptlrpc_bulk_desc *desc,
int mdidx)
{
- CLASSERT(PTLRPC_MAX_BRW_PAGES < LI_POISON);
+ BUILD_BUG_ON(PTLRPC_MAX_BRW_PAGES >= LI_POISON);
LASSERT(mdidx < desc->bd_md_max_brw);
LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
({ \
typeof(val) __val = (val); \
\
- CLASSERT(sizeof(val) == sizeof(*(dst))); \
+ BUILD_BUG_ON(sizeof(val) != sizeof(*(dst))); \
memcpy(dst, &__val, sizeof(*(dst))); \
})