};
#define LU_KEY_INIT(mod, type) \
- static void* mod##_key_init(const struct lu_context *ctx, \
- struct lu_context_key *key) \
- { \
- type *value; \
+ static void *mod##_key_init(const struct lu_context *ctx, \
+ struct lu_context_key *key) \
+ { \
+ type *value; \
\
CLASSERT(PAGE_SIZE >= sizeof(*value)); \
\
- OBD_ALLOC_PTR(value); \
- if (value == NULL) \
- value = ERR_PTR(-ENOMEM); \
- \
- return value; \
- } \
- struct __##mod##__dummy_init {;} /* semicolon catcher */
+ OBD_ALLOC_PTR(value); \
+ if (value == NULL) \
+ value = ERR_PTR(-ENOMEM); \
+ \
+ return value; \
+ } \
+ struct __##mod##__dummy_init { ; } /* semicolon catcher */
#define LU_KEY_FINI(mod, type) \
static void mod##_key_fini(const struct lu_context *ctx, \
ldlm_lock_reorder_req(lock);
- req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
- &RQF_LDLM_BL_CALLBACK,
- LUSTRE_DLM_VERSION, LDLM_BL_CALLBACK);
- if (req == NULL)
- RETURN(-ENOMEM);
+ req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
+ &RQF_LDLM_BL_CALLBACK,
+ LUSTRE_DLM_VERSION, LDLM_BL_CALLBACK);
+ if (req == NULL)
+ RETURN(-ENOMEM);
- CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
- ca = ptlrpc_req_async_args(req);
- ca->ca_set_arg = arg;
- ca->ca_lock = lock;
+ CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
+ ca = ptlrpc_req_async_args(req);
+ ca->ca_set_arg = arg;
+ ca->ca_lock = lock;
- req->rq_interpret_reply = ldlm_cb_interpret;
+ req->rq_interpret_reply = ldlm_cb_interpret;
lock_res_and_lock(lock);
if (ldlm_is_destroyed(lock)) {
lvb_len = 0;
req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT, lvb_len);
- rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CP_CALLBACK);
- if (rc) {
- ptlrpc_request_free(req);
- RETURN(rc);
- }
+ rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CP_CALLBACK);
+ if (rc) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
- CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
- ca = ptlrpc_req_async_args(req);
- ca->ca_set_arg = arg;
- ca->ca_lock = lock;
+ CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
+ ca = ptlrpc_req_async_args(req);
+ ca->ca_set_arg = arg;
+ ca->ca_lock = lock;
- req->rq_interpret_reply = ldlm_cb_interpret;
- body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
+ req->rq_interpret_reply = ldlm_cb_interpret;
+ body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
- body->lock_handle[0] = lock->l_remote_handle;
+ body->lock_handle[0] = lock->l_remote_handle;
body->lock_flags = ldlm_flags_to_wire(flags);
ldlm_lock2desc(lock, &body->lock_desc);
if (lvb_len > 0) {
*desc = *arg->gl_desc;
}
- body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
- body->lock_handle[0] = lock->l_remote_handle;
- ldlm_lock2desc(lock, &body->lock_desc);
+ body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
+ body->lock_handle[0] = lock->l_remote_handle;
+ ldlm_lock2desc(lock, &body->lock_desc);
CLASSERT(sizeof(*ca) <= sizeof(req->rq_async_args));
ca = ptlrpc_req_async_args(req);
CLASSERT(sizeof(struct lov_user_md_v3) >
sizeof(struct lov_comp_md_v1));
- LASSERT(sizeof(lumv3) == sizeof(*lumv3p));
- LASSERT(sizeof(lumv3.lmm_objects[0]) ==
- sizeof(lumv3p->lmm_objects[0]));
+ CLASSERT(sizeof(lumv3) == sizeof(*lumv3p));
+ CLASSERT(sizeof(lumv3.lmm_objects[0]) ==
+ sizeof(lumv3p->lmm_objects[0]));
/* first try with v1 which is smaller than v3 */
if (copy_from_user(lumv1, lumv1p, sizeof(*lumv1)))
- RETURN(-EFAULT);
+ RETURN(-EFAULT);
if (lumv1->lmm_magic == LOV_USER_MAGIC_V3)
if (copy_from_user(&lumv3, lumv3p, sizeof(lumv3)))
static void vvp_pgcache_id_unpack(loff_t pos, struct vvp_pgcache_id *id)
{
- CLASSERT(sizeof(pos) == sizeof(__u64));
+ CLASSERT(sizeof(pos) == sizeof(__u64));
- id->vpi_index = pos & 0xffffffff;
- id->vpi_depth = (pos >> PGC_DEPTH_SHIFT) & 0xf;
- id->vpi_bucket = ((unsigned long long)pos >> PGC_OBJ_SHIFT);
+ id->vpi_index = pos & 0xffffffff;
+ id->vpi_depth = (pos >> PGC_DEPTH_SHIFT) & 0xf;
+ id->vpi_bucket = ((unsigned long long)pos >> PGC_OBJ_SHIFT);
}
static loff_t vvp_pgcache_id_pack(struct vvp_pgcache_id *id)
const void *data, size_t datalen, umode_t mode,
uid_t uid, gid_t gid, cfs_cap_t cap_effective, __u64 rdev)
{
- struct mdt_rec_create *rec;
- char *tmp;
- __u64 flags;
+ struct mdt_rec_create *rec;
+ char *tmp;
+ __u64 flags;
CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_create));
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
const char *old, size_t oldlen,
const char *new, size_t newlen)
{
- struct mdt_rec_rename *rec;
+ struct mdt_rec_rename *rec;
- CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_rename));
- rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
+ CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_rename));
+ rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
- /* XXX do something about time, uid, gid */
+ /* XXX do something about time, uid, gid */
rec->rn_opcode = op_data->op_cli_flags & CLI_MIGRATE ?
REINT_MIGRATE : REINT_RENAME;
rec->rn_fsuid = op_data->op_fsuid;
}
}
- if (opcode == MDS_REINT) {
- struct mdt_rec_setxattr *rec;
+ if (opcode == MDS_REINT) {
+ struct mdt_rec_setxattr *rec;
- CLASSERT(sizeof(struct mdt_rec_setxattr) ==
- sizeof(struct mdt_rec_reint));
+ CLASSERT(sizeof(struct mdt_rec_setxattr) ==
+ sizeof(struct mdt_rec_reint));
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
rec->sx_opcode = REINT_SETXATTR;
rec->sx_fsuid = from_kuid(&init_user_ns, current_fsuid());
struct md_object *obj, struct thandle *handle,
__u64 valid)
{
- struct mdd_device *mdd = mdo2mdd(obj);
- int bits, type = 0;
+ struct mdd_device *mdd = mdo2mdd(obj);
+ int bits, type = 0;
bits = (valid & LA_SIZE) ? 1 << CL_TRUNC : 0;
bits |= (valid & ~(LA_CTIME|LA_MTIME|LA_ATIME)) ? 1 << CL_SETATTR : 0;
- bits |= (valid & LA_MTIME) ? 1 << CL_MTIME : 0;
- bits |= (valid & LA_CTIME) ? 1 << CL_CTIME : 0;
- bits |= (valid & LA_ATIME) ? 1 << CL_ATIME : 0;
- bits = bits & mdd->mdd_cl.mc_mask;
+ bits |= (valid & LA_MTIME) ? 1 << CL_MTIME : 0;
+ bits |= (valid & LA_CTIME) ? 1 << CL_CTIME : 0;
+ bits |= (valid & LA_ATIME) ? 1 << CL_ATIME : 0;
+ bits = bits & mdd->mdd_cl.mc_mask;
/* This is an implementation limit rather than a protocol limit */
CLASSERT(CL_LAST <= sizeof(int) * 8);
- if (bits == 0)
- return 0;
+ if (bits == 0)
+ return 0;
- /* The record type is the lowest non-masked set bit */
+ /* The record type is the lowest non-masked set bit */
type = __ffs(bits);
- /* FYI we only store the first CLF_FLAGMASK bits of la_valid */
- return mdd_changelog_data_store(env, mdd, type, (int)valid,
- md2mdd_obj(obj), handle);
+ /* FYI we only store the first CLF_FLAGMASK bits of la_valid */
+ return mdd_changelog_data_store(env, mdd, type, (int)valid,
+ md2mdd_obj(obj), handle);
}
static int mdd_declare_attr_set(const struct lu_env *env,
int mdt_hsm_attr_set(struct mdt_thread_info *info, struct mdt_object *obj,
const struct md_hsm *mh)
{
- struct md_object *next = mdt_object_child(obj);
- struct lu_buf *buf = &info->mti_buf;
- struct hsm_attrs *attrs;
- int rc;
+ struct md_object *next = mdt_object_child(obj);
+ struct lu_buf *buf = &info->mti_buf;
+ struct hsm_attrs *attrs;
+ int rc;
ENTRY;
attrs = (struct hsm_attrs *)info->mti_xattr_buf;
static int mdt_setattr_unpack_rec(struct mdt_thread_info *info)
{
- struct lu_ucred *uc = mdt_ucred(info);
- struct md_attr *ma = &info->mti_attr;
- struct lu_attr *la = &ma->ma_attr;
- struct req_capsule *pill = info->mti_pill;
- struct mdt_reint_record *rr = &info->mti_rr;
- struct mdt_rec_setattr *rec;
- struct lu_nodemap *nodemap;
- ENTRY;
+ struct lu_ucred *uc = mdt_ucred(info);
+ struct md_attr *ma = &info->mti_attr;
+ struct lu_attr *la = &ma->ma_attr;
+ struct req_capsule *pill = info->mti_pill;
+ struct mdt_reint_record *rr = &info->mti_rr;
+ struct mdt_rec_setattr *rec;
+ struct lu_nodemap *nodemap;
- CLASSERT(sizeof(struct mdt_rec_setattr)== sizeof(struct mdt_rec_reint));
- rec = req_capsule_client_get(pill, &RMF_REC_REINT);
- if (rec == NULL)
- RETURN(-EFAULT);
+ ENTRY;
+
+ CLASSERT(sizeof(*rec) == sizeof(struct mdt_rec_reint));
+ rec = req_capsule_client_get(pill, &RMF_REC_REINT);
+ if (rec == NULL)
+ RETURN(-EFAULT);
/* This prior initialization is needed for old_init_ucred_reint() */
uc->uc_fsuid = rec->sa_fsuid;
static int mdt_create_unpack(struct mdt_thread_info *info)
{
- struct lu_ucred *uc = mdt_ucred(info);
- struct mdt_rec_create *rec;
- struct lu_attr *attr = &info->mti_attr.ma_attr;
- struct mdt_reint_record *rr = &info->mti_rr;
- struct req_capsule *pill = info->mti_pill;
- struct md_op_spec *sp = &info->mti_spec;
- int rc;
- ENTRY;
+ struct lu_ucred *uc = mdt_ucred(info);
+ struct mdt_rec_create *rec;
+ struct lu_attr *attr = &info->mti_attr.ma_attr;
+ struct mdt_reint_record *rr = &info->mti_rr;
+ struct req_capsule *pill = info->mti_pill;
+ struct md_op_spec *sp = &info->mti_spec;
+ int rc;
- CLASSERT(sizeof(struct mdt_rec_create) == sizeof(struct mdt_rec_reint));
- rec = req_capsule_client_get(pill, &RMF_REC_REINT);
- if (rec == NULL)
- RETURN(-EFAULT);
+ ENTRY;
+
+ CLASSERT(sizeof(*rec) == sizeof(struct mdt_rec_reint));
+ rec = req_capsule_client_get(pill, &RMF_REC_REINT);
+ if (rec == NULL)
+ RETURN(-EFAULT);
/* This prior initialization is needed for old_init_ucred_reint() */
uc->uc_fsuid = rec->cr_fsuid;
static int mdt_link_unpack(struct mdt_thread_info *info)
{
- struct lu_ucred *uc = mdt_ucred(info);
- struct mdt_rec_link *rec;
- struct lu_attr *attr = &info->mti_attr.ma_attr;
- struct mdt_reint_record *rr = &info->mti_rr;
- struct req_capsule *pill = info->mti_pill;
- int rc;
- ENTRY;
+ struct lu_ucred *uc = mdt_ucred(info);
+ struct mdt_rec_link *rec;
+ struct lu_attr *attr = &info->mti_attr.ma_attr;
+ struct mdt_reint_record *rr = &info->mti_rr;
+ struct req_capsule *pill = info->mti_pill;
+ int rc;
- CLASSERT(sizeof(struct mdt_rec_link) == sizeof(struct mdt_rec_reint));
- rec = req_capsule_client_get(pill, &RMF_REC_REINT);
- if (rec == NULL)
- RETURN(-EFAULT);
+ ENTRY;
+
+ CLASSERT(sizeof(*rec) == sizeof(struct mdt_rec_reint));
+ rec = req_capsule_client_get(pill, &RMF_REC_REINT);
+ if (rec == NULL)
+ RETURN(-EFAULT);
/* This prior initialization is needed for old_init_ucred_reint() */
uc->uc_fsuid = rec->lk_fsuid;
static int mdt_unlink_unpack(struct mdt_thread_info *info)
{
- struct lu_ucred *uc = mdt_ucred(info);
- struct mdt_rec_unlink *rec;
- struct md_attr *ma = &info->mti_attr;
- struct lu_attr *attr = &info->mti_attr.ma_attr;
- struct mdt_reint_record *rr = &info->mti_rr;
- struct req_capsule *pill = info->mti_pill;
- int rc;
- ENTRY;
+ struct lu_ucred *uc = mdt_ucred(info);
+ struct mdt_rec_unlink *rec;
+ struct md_attr *ma = &info->mti_attr;
+ struct lu_attr *attr = &info->mti_attr.ma_attr;
+ struct mdt_reint_record *rr = &info->mti_rr;
+ struct req_capsule *pill = info->mti_pill;
+ int rc;
- CLASSERT(sizeof(struct mdt_rec_unlink) == sizeof(struct mdt_rec_reint));
- rec = req_capsule_client_get(pill, &RMF_REC_REINT);
- if (rec == NULL)
- RETURN(-EFAULT);
+ ENTRY;
+
+ CLASSERT(sizeof(*rec) == sizeof(struct mdt_rec_reint));
+ rec = req_capsule_client_get(pill, &RMF_REC_REINT);
+ if (rec == NULL)
+ RETURN(-EFAULT);
/* This prior initialization is needed for old_init_ucred_reint() */
uc->uc_fsuid = rec->ul_fsuid;
static int mdt_rename_unpack(struct mdt_thread_info *info)
{
- struct lu_ucred *uc = mdt_ucred(info);
- struct mdt_rec_rename *rec;
- struct md_attr *ma = &info->mti_attr;
- struct lu_attr *attr = &info->mti_attr.ma_attr;
- struct mdt_reint_record *rr = &info->mti_rr;
- struct req_capsule *pill = info->mti_pill;
- int rc;
- ENTRY;
+ struct lu_ucred *uc = mdt_ucred(info);
+ struct mdt_rec_rename *rec;
+ struct md_attr *ma = &info->mti_attr;
+ struct lu_attr *attr = &info->mti_attr.ma_attr;
+ struct mdt_reint_record *rr = &info->mti_rr;
+ struct req_capsule *pill = info->mti_pill;
+ int rc;
- CLASSERT(sizeof(struct mdt_rec_rename) == sizeof(struct mdt_rec_reint));
- rec = req_capsule_client_get(pill, &RMF_REC_REINT);
- if (rec == NULL)
- RETURN(-EFAULT);
+ ENTRY;
+
+ CLASSERT(sizeof(*rec) == sizeof(struct mdt_rec_reint));
+ rec = req_capsule_client_get(pill, &RMF_REC_REINT);
+ if (rec == NULL)
+ RETURN(-EFAULT);
/* This prior initialization is needed for old_init_ucred_reint() */
uc->uc_fsuid = rec->rn_fsuid;
static int mdt_open_unpack(struct mdt_thread_info *info)
{
- struct lu_ucred *uc = mdt_ucred(info);
- struct mdt_rec_create *rec;
- struct lu_attr *attr = &info->mti_attr.ma_attr;
- struct req_capsule *pill = info->mti_pill;
- struct mdt_reint_record *rr = &info->mti_rr;
- struct ptlrpc_request *req = mdt_info_req(info);
- struct md_op_spec *sp = &info->mti_spec;
+ struct lu_ucred *uc = mdt_ucred(info);
+ struct mdt_rec_create *rec;
+ struct lu_attr *attr = &info->mti_attr.ma_attr;
+ struct req_capsule *pill = info->mti_pill;
+ struct mdt_reint_record *rr = &info->mti_rr;
+ struct ptlrpc_request *req = mdt_info_req(info);
+ struct md_op_spec *sp = &info->mti_spec;
int rc;
- ENTRY;
+ ENTRY;
- CLASSERT(sizeof(struct mdt_rec_create) == sizeof(struct mdt_rec_reint));
- rec = req_capsule_client_get(pill, &RMF_REC_REINT);
- if (rec == NULL)
- RETURN(-EFAULT);
+ CLASSERT(sizeof(struct mdt_rec_create) == sizeof(struct mdt_rec_reint));
+ rec = req_capsule_client_get(pill, &RMF_REC_REINT);
+ if (rec == NULL)
+ RETURN(-EFAULT);
/* This prior initialization is needed for old_init_ucred_reint() */
uc->uc_fsuid = rec->cr_fsuid;
static int mdt_setxattr_unpack(struct mdt_thread_info *info)
{
- struct mdt_reint_record *rr = &info->mti_rr;
- struct lu_ucred *uc = mdt_ucred(info);
- struct lu_attr *attr = &info->mti_attr.ma_attr;
- struct req_capsule *pill = info->mti_pill;
- struct mdt_rec_setxattr *rec;
- int rc;
+ struct mdt_reint_record *rr = &info->mti_rr;
+ struct lu_ucred *uc = mdt_ucred(info);
+ struct lu_attr *attr = &info->mti_attr.ma_attr;
+ struct req_capsule *pill = info->mti_pill;
+ struct mdt_rec_setxattr *rec;
+ int rc;
ENTRY;
- CLASSERT(sizeof(struct mdt_rec_setxattr) ==
- sizeof(struct mdt_rec_reint));
+ CLASSERT(sizeof(struct mdt_rec_setxattr) ==
+ sizeof(struct mdt_rec_reint));
- rec = req_capsule_client_get(pill, &RMF_REC_REINT);
- if (rec == NULL)
- RETURN(-EFAULT);
+ rec = req_capsule_client_get(pill, &RMF_REC_REINT);
+ if (rec == NULL)
+ RETURN(-EFAULT);
/* This prior initialization is needed for old_init_ucred_reint() */
uc->uc_fsuid = rec->sx_fsuid;
struct ldlm_res_id resid;
char name[sizeof(fsdb->fsdb_name) + 16];
- LASSERTF(sizeof(name) < 40, "name is too large to be in stack.\n");
+ CLASSERT(sizeof(name) < 40); /* name is too large to be on stack */
snprintf(name, sizeof(name) - 1, "mgs_%s_notify", fsdb->fsdb_name);
complete(&fsdb->fsdb_notify_comp);
/* If a field is added in struct lustre_mdt_attrs, zero it explicitly
* and change the test below. */
- LASSERT(sizeof(*lma) ==
- (offsetof(struct lustre_mdt_attrs, lma_self_fid) +
- sizeof(lma->lma_self_fid)));
+ CLASSERT(sizeof(*lma) ==
+ (offsetof(struct lustre_mdt_attrs, lma_self_fid) +
+ sizeof(lma->lma_self_fid)));
}
EXPORT_SYMBOL(lustre_lma_init);
* resent due to cksum error, this will allow Server to
* check+dump pages on its side */
}
- ptlrpc_request_set_replen(req);
+ ptlrpc_request_set_replen(req);
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- aa->aa_oa = oa;
- aa->aa_requested_nob = requested_nob;
- aa->aa_nio_count = niocount;
- aa->aa_page_count = page_count;
- aa->aa_resends = 0;
- aa->aa_ppga = pga;
- aa->aa_cli = cli;
+ CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+ aa = ptlrpc_req_async_args(req);
+ aa->aa_oa = oa;
+ aa->aa_requested_nob = requested_nob;
+ aa->aa_nio_count = niocount;
+ aa->aa_page_count = page_count;
+ aa->aa_resends = 0;
+ aa->aa_ppga = pga;
+ aa->aa_cli = cli;
INIT_LIST_HEAD(&aa->aa_oaps);
*reqp = req;
req->rq_no_delay = 1;
}
- req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
- CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- aa->aa_oi = oinfo;
+ req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
+ CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+ aa = ptlrpc_req_async_args(req);
+ aa->aa_oi = oinfo;
- ptlrpc_set_add_req(rqset, req);
- RETURN(0);
+ ptlrpc_set_add_req(rqset, req);
+ RETURN(0);
}
static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
&RMF_OST_BODY :
&RMF_SETINFO_VAL);
- memcpy(tmp, val, vallen);
+ memcpy(tmp, val, vallen);
if (KEY_IS(KEY_GRANT_SHRINK)) {
- struct osc_grant_args *aa;
- struct obdo *oa;
-
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- OBDO_ALLOC(oa);
- if (!oa) {
- ptlrpc_req_finished(req);
- RETURN(-ENOMEM);
- }
- *oa = ((struct ost_body *)val)->oa;
- aa->aa_oa = oa;
- req->rq_interpret_reply = osc_shrink_grant_interpret;
- }
+ struct osc_grant_args *aa;
+ struct obdo *oa;
+
+ CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+ aa = ptlrpc_req_async_args(req);
+ OBDO_ALLOC(oa);
+ if (!oa) {
+ ptlrpc_req_finished(req);
+ RETURN(-ENOMEM);
+ }
+ *oa = ((struct ost_body *)val)->oa;
+ aa->aa_oa = oa;
+ req->rq_interpret_reply = osc_shrink_grant_interpret;
+ }
ptlrpc_request_set_replen(req);
if (!KEY_IS(KEY_GRANT_SHRINK)) {
{
int rc;
- LASSERT(BH_DXLock < sizeof(((struct buffer_head *)0)->b_state) * 8);
+ CLASSERT(BH_DXLock < sizeof(((struct buffer_head *)0)->b_state) * 8);
#if !defined(CONFIG_DEBUG_MUTEXES) && !defined(CONFIG_DEBUG_SPINLOCK)
/* please, try to keep osd_thread_info smaller than a page */
CLASSERT(sizeof(struct osd_thread_info) <= PAGE_SIZE);
} ll_entry[LFIX_LEAF_RECNO];
};
-#define STORE_UNALIGNED(val, dst) \
-({ \
- typeof(val) __val = (val); \
- CLASSERT(sizeof(val) == sizeof(*(dst))); \
- memcpy(dst, &__val, sizeof(*(dst))); \
+#define STORE_UNALIGNED(val, dst) \
+({ \
+ typeof(val) __val = (val); \
+ CLASSERT(sizeof(val) == sizeof(*(dst))); \
+ memcpy(dst, &__val, sizeof(*(dst))); \
})
static void lfix_root(void *buf,
*/
int ptlrpc_replay_req(struct ptlrpc_request *req)
{
- struct ptlrpc_replay_async_args *aa;
- ENTRY;
+ struct ptlrpc_replay_async_args *aa;
+
+ ENTRY;
- LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
+ LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
- LASSERT (sizeof (*aa) <= sizeof (req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- memset(aa, 0, sizeof *aa);
+ CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+ aa = ptlrpc_req_async_args(req);
+ memset(aa, 0, sizeof(*aa));
/* Prepare request to be resent with ptlrpcd */
aa->praa_old_state = req->rq_send_state;
req->rq_no_delay = req->rq_no_resend = 1;
req->rq_pill.rc_fmt = (void *)&worker_format;
- CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args));
+ CLASSERT(sizeof(*args) <= sizeof(req->rq_async_args));
args = ptlrpc_req_async_args(req);
args->cb = cb;
args->cbdata = cbdata;
LASSERT(info != NULL);
assert_spin_locked(&policy->pol_nrs->nrs_lock);
- LASSERT(sizeof(info->pi_arg) == sizeof(policy->pol_arg));
+ CLASSERT(sizeof(info->pi_arg) == sizeof(policy->pol_arg));
memcpy(info->pi_name, policy->pol_desc->pd_name, NRS_POL_NAME_MAX);
memcpy(info->pi_arg, policy->pol_arg, sizeof(policy->pol_arg));
const struct nodemap_key *nk,
const union nodemap_rec *nr)
{
- struct thandle *th;
- struct dt_device *dev = lu2dt_dev(idx->do_lu.lo_dev);
- int rc;
+ struct thandle *th;
+ struct dt_device *dev = lu2dt_dev(idx->do_lu.lo_dev);
+ int rc;
CLASSERT(sizeof(union nodemap_rec) == 32);
const union nodemap_rec *rec,
struct lu_nodemap **recent_nodemap)
{
- struct lu_nodemap *nodemap = NULL;
- enum nodemap_idx_type type;
- enum nodemap_id_type id_type;
- u8 flags;
- u32 nodemap_id;
- lnet_nid_t nid[2];
- u32 map[2];
- int rc;
+ struct lu_nodemap *nodemap = NULL;
+ enum nodemap_idx_type type;
+ enum nodemap_id_type id_type;
+ u8 flags;
+ u32 nodemap_id;
+ lnet_nid_t nid[2];
+ u32 map[2];
+ int rc;
ENTRY;
void lustre_swab_mgs_target_info(struct mgs_target_info *mti)
{
- int i;
- __swab32s(&mti->mti_lustre_ver);
- __swab32s(&mti->mti_stripe_index);
- __swab32s(&mti->mti_config_ver);
- __swab32s(&mti->mti_flags);
- __swab32s(&mti->mti_instance);
- __swab32s(&mti->mti_nid_count);
- CLASSERT(sizeof(lnet_nid_t) == sizeof(__u64));
- for (i = 0; i < MTI_NIDS_MAX; i++)
- __swab64s(&mti->mti_nids[i]);
+ int i;
+
+ __swab32s(&mti->mti_lustre_ver);
+ __swab32s(&mti->mti_stripe_index);
+ __swab32s(&mti->mti_config_ver);
+ __swab32s(&mti->mti_flags);
+ __swab32s(&mti->mti_instance);
+ __swab32s(&mti->mti_nid_count);
+ CLASSERT(sizeof(lnet_nid_t) == sizeof(__u64));
+ for (i = 0; i < MTI_NIDS_MAX; i++)
+ __swab64s(&mti->mti_nids[i]);
}
void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *entry)
{
__u8 i;
- __swab64s(&entry->mne_version);
- __swab32s(&entry->mne_instance);
- __swab32s(&entry->mne_index);
- __swab32s(&entry->mne_length);
-
- /* mne_nid_(count|type) must be one byte size because we're gonna
- * access it w/o swapping. */
- CLASSERT(sizeof(entry->mne_nid_count) == sizeof(__u8));
- CLASSERT(sizeof(entry->mne_nid_type) == sizeof(__u8));
-
- /* remove this assertion if ipv6 is supported. */
- LASSERT(entry->mne_nid_type == 0);
- for (i = 0; i < entry->mne_nid_count; i++) {
- CLASSERT(sizeof(lnet_nid_t) == sizeof(__u64));
- __swab64s(&entry->u.nids[i]);
- }
+ __swab64s(&entry->mne_version);
+ __swab32s(&entry->mne_instance);
+ __swab32s(&entry->mne_index);
+ __swab32s(&entry->mne_length);
+
+ /* mne_nid_(count|type) must be one byte size because we're gonna
+ * access it w/o swapping. */
+ CLASSERT(sizeof(entry->mne_nid_count) == sizeof(__u8));
+ CLASSERT(sizeof(entry->mne_nid_type) == sizeof(__u8));
+
+ /* remove this assertion if ipv6 is supported. */
+ LASSERT(entry->mne_nid_type == 0);
+ for (i = 0; i < entry->mne_nid_count; i++) {
+ CLASSERT(sizeof(lnet_nid_t) == sizeof(__u64));
+ __swab64s(&entry->u.nids[i]);
+ }
}
EXPORT_SYMBOL(lustre_swab_mgs_nidtbl_entry);
* memcpy(). This macro is needed to avoid dependency of user level tools on
* the kernel headers.
*/
-#define STORE_UNALIGNED(val, dst) \
-({ \
- typeof(val) __val = (val); \
- \
- CLASSERT(sizeof(val) == sizeof(*(dst))); \
- memcpy(dst, &__val, sizeof(*(dst))); \
+#define STORE_UNALIGNED(val, dst) \
+({ \
+ typeof(val) __val = (val); \
+ \
+ CLASSERT(sizeof(val) == sizeof(*(dst))); \
+ memcpy(dst, &__val, sizeof(*(dst))); \
})
static void lfix_root(void *buf,