struct lu_dirpage {
__u64 ldp_hash_start;
__u64 ldp_hash_end;
- __u16 ldp_flags;
- __u16 ldp_pad;
+ __u32 ldp_flags;
__u32 ldp_pad0;
struct lu_dirent ldp_entries[0];
};
static inline struct lu_dirent *lu_dirent_start(struct lu_dirpage *dp)
{
- if (le16_to_cpu(dp->ldp_flags) & LDF_EMPTY)
+ if (le32_to_cpu(dp->ldp_flags) & LDF_EMPTY)
return NULL;
else
return dp->ldp_entries;
{
if (le16_to_cpu(ent->lde_reclen) == 0) {
return (sizeof(*ent) +
- le16_to_cpu(ent->lde_namelen) + 3) & ~3;
+ le16_to_cpu(ent->lde_namelen) + 7) & ~7;
}
return le16_to_cpu(ent->lde_reclen);
}
[EXT2_FT_SYMLINK] DT_LNK,
};
+
+void (*memmover)(void *, const void *, size_t) = memmove;
+
#define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de)))
#define ROUND_UP64(x) (((x)+sizeof(__u64)-1) & ~(sizeof(__u64)-1))
static int filldir(char *buf, int buflen,
ino_t ino, unsigned int d_type, int *filled)
{
cfs_dirent_t *dirent = (cfs_dirent_t *) (buf + *filled);
+ cfs_dirent_t holder;
int reclen = ROUND_UP64(NAME_OFFSET(dirent) + namelen + 1);
+ /*
+ * @buf is not guaranteed to be properly aligned. To work around,
+ * first fill stack-allocated @holder, then copy @holder into @buf by
+ * memmove().
+ */
+
/* check overflow */
if ((*filled + reclen) > buflen)
return 1;
- dirent->d_ino = ino;
+ holder.d_ino = ino;
#ifdef _DIRENT_HAVE_D_OFF
- dirent->d_off = offset;
+ holder.d_off = offset;
#endif
- dirent->d_reclen = reclen;
+ holder.d_reclen = reclen;
#ifdef _DIRENT_HAVE_D_TYPE
- dirent->d_type = (unsigned short) d_type;
+ holder.d_type = (unsigned short) d_type;
#endif
+ /* gcc unrolls memcpy() of structs into field-wise assignments,
+ * assuming proper alignment. Humor it. */
+ (*memmover)(dirent, &holder, NAME_OFFSET(dirent));
memcpy(dirent->d_name, name, namelen);
dirent->d_name[namelen] = 0;
if (result != 0)
break;
- recsize = (sizeof(*ent) + len + 3) & ~3;
+ recsize = (sizeof(*ent) + len + 7) & ~7;
hash = iops->store(env, it);
*end = hash;
* No pages were processed, mark this.
*/
dp->ldp_flags |= LDF_EMPTY;
- dp->ldp_flags = cpu_to_le16(dp->ldp_flags);
+ dp->ldp_flags = cpu_to_le32(dp->ldp_flags);
cfs_kunmap(rdpg->rp_pages[0]);
}
iops->put(env, it);
dp->ldp_hash_start = rdpg->rp_hash;
dp->ldp_hash_end = DIR_END_OFF;
dp->ldp_flags |= LDF_EMPTY;
- dp->ldp_flags = cpu_to_le16(dp->ldp_flags);
+ dp->ldp_flags = cpu_to_le32(dp->ldp_flags);
cfs_kunmap(pg);
GOTO(out_unlock, rc = 0);
}
struct lu_fid_pack oti_pack;
+ /* union to guarantee that ->oti_ipd[] has proper alignment. */
+ union {
char oti_ipd[DX_IPD_MAX_SIZE];
+ long long oti_alignment_lieutenant;
+ };
#if OSD_COUNTERS
int oti_r_locks;
int oti_w_locks;
LVAR_ROUND = LVAR_PAD - 1
};
+/**
+ * Stores \a val at \a dst, where the latter is possibly unaligned. Uses
+ * memcpy(). This macro is needed to avoid dependency of user level tools on
+ * the kernel headers.
+ */
+#define STORE_UNALIGNED(val, dst) \
+({ \
+ typeof(val) __val = (val); \
+ \
+ CLASSERT(sizeof(val) == sizeof(*(dst))); \
+ memcpy(dst, &__val, sizeof(*(dst))); \
+})
+
static void lfix_root(void *buf,
int blocksize, int keysize, int ptrsize, int recsize)
{
entry += keysize;
/* now @entry points to <ptr> */
if (ptrsize == 4)
- *(u_int32_t *)entry = cpu_to_le32(1);
+ STORE_UNALIGNED(cpu_to_le32(1), (u_int32_t *)entry);
else
- *(u_int64_t *)entry = cpu_to_le64(1);
+ STORE_UNALIGNED(cpu_to_le64(1), (u_int64_t *)entry);
}
static void lfix_leaf(void *buf,
entry += sizeof(lvar_hash_t);
/* now @entry points to <ptr> */
if (ptrsize == 4)
- *(u_int32_t *)entry = cpu_to_le32(1);
+ STORE_UNALIGNED(cpu_to_le32(1), (u_int32_t *)entry);
else
- *(u_int64_t *)entry = cpu_to_le64(1);
+ STORE_UNALIGNED(cpu_to_le64(1), (u_int64_t *)entry);
}
static int lvar_esize(int namelen, int recsize)
LVAR_ROUND = LVAR_PAD - 1
};
+/**
+ * Stores \a val at \a dst, where the latter is possibly unaligned. Uses
+ * memcpy(). This macro is needed to avoid dependency of user level tools on
+ * the kernel headers.
+ */
+#define STORE_UNALIGNED(val, dst) \
+({ \
+ typeof(*(dst)) __val = (val); \
+ \
+ memcpy(dst, &__val, sizeof *(dst)); \
+})
+
static int root_limit(int rootgap, int blocksize, int size)
{
int limit;
entry += keysize;
/* now @entry points to <ptr> */
if (ptrsize == 4)
- *(u_int32_t *)entry = cpu_to_le32(1);
+ STORE_UNALIGNED(cpu_to_le32(1), (u_int32_t *)entry);
else
- *(u_int64_t *)entry = cpu_to_le64(1);
+ STORE_UNALIGNED(cpu_to_le64(1), (u_int64_t *)entry);
}
static void lfix_leaf(void *buf,
entry += sizeof(lvar_hash_t);
/* now @entry points to <ptr> */
if (ptrsize == 4)
- *(u_int32_t *)entry = cpu_to_le32(1);
+ STORE_UNALIGNED(cpu_to_le32(1), (u_int32_t *)entry);
else
- *(u_int64_t *)entry = cpu_to_le64(1);
+ STORE_UNALIGNED(cpu_to_le64(1), (u_int64_t *)entry);
}
static int lvar_esize(int namelen, int recsize)