* @{
*/
+#include <linux/kernel.h>
#include <linux/types.h>
#ifdef __KERNEL__
# include <linux/lustre/lustre_fiemap.h>
#endif /* __KERNEL__ */
+/* Handle older distros */
+#ifndef __ALIGN_KERNEL
+# define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
+# define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#endif
+
#if defined(__cplusplus)
extern "C" {
#endif
};
#define PRJQUOTA 2
-#if defined(__x86_64__) || defined(__ia64__) || defined(__ppc64__) || \
- defined(__craynv) || defined(__mips64__) || defined(__powerpc64__) || \
- defined(__aarch64__)
-typedef struct stat lstat_t;
-# define lstat_f lstat
-# define fstat_f fstat
-# define fstatat_f fstatat
-# define HAVE_LOV_USER_MDS_DATA
-#elif defined(__USE_LARGEFILE64) || defined(__KERNEL__)
-typedef struct stat64 lstat_t;
-# define lstat_f lstat64
-# define fstat_f fstat64
-# define fstatat_f fstatat64
-# define HAVE_LOV_USER_MDS_DATA
+/*
+ * We need to always use 64bit version because the structure
+ * is shared across entire cluster where 32bit and 64bit machines
+ * are co-existing.
+ */
+#if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64)
+typedef struct stat64 lstat_t;
+#define lstat_f lstat64
+#define fstat_f fstat64
+#define fstatat_f fstatat64
+#else
+typedef struct stat lstat_t;
+#define lstat_f lstat
+#define fstat_f fstat
+#define fstatat_f fstatat
#endif
+#define HAVE_LOV_USER_MDS_DATA
+
#define LUSTRE_EOF 0xffffffffffffffffULL
/* for statfs() */
static inline bool lov_pattern_supported(__u32 pattern)
{
- return pattern == LOV_PATTERN_RAID0 ||
- pattern == LOV_PATTERN_MDT ||
- pattern == (LOV_PATTERN_RAID0 | LOV_PATTERN_F_RELEASED);
+ return (pattern & ~LOV_PATTERN_F_RELEASED) == LOV_PATTERN_RAID0 ||
+ (pattern & ~LOV_PATTERN_F_RELEASED) == LOV_PATTERN_MDT;
}
#define LOV_MAXPOOLNAME 15
#define LUSTRE_VOLATILE_HDR ".\x0c\x13\x14\x12:VOLATILE"
#define LUSTRE_VOLATILE_HDR_LEN 14
-typedef enum lustre_quota_version {
+enum lustre_quota_version {
LUSTRE_QUOTA_V2 = 1
-} lustre_quota_version_t;
+};
/* XXX: same as if_dqinfo struct in kernel */
struct obd_dqinfo {
CHANGELOG_EXTRA_FLAG_XATTR = 0x08,
};
-#define CR_MAXSIZE cfs_size_round(2 * NAME_MAX + 2 + \
+#define CR_MAXSIZE __ALIGN_KERNEL(2 * NAME_MAX + 2 + \
changelog_rec_offset(CLF_SUPPORTED, \
- CLFE_SUPPORTED))
+ CLFE_SUPPORTED), 8)
/* 31 usable bytes string + null terminator. */
#define LUSTRE_JOBID_SIZE 32
boundaries. See hai_zero */
} __attribute__((packed));
-#ifndef HAVE_CFS_SIZE_ROUND
-static inline int cfs_size_round (int val)
-{
- return (val + 7) & (~0x7);
-}
-#define HAVE_CFS_SIZE_ROUND
-#endif
-
/* Return pointer to first hai in action list */
static inline struct hsm_action_item *hai_first(struct hsm_action_list *hal)
{
- return (struct hsm_action_item *)(hal->hal_fsname +
- cfs_size_round(strlen(hal-> \
- hal_fsname)
- + 1));
+ size_t offset = __ALIGN_KERNEL(strlen(hal->hal_fsname) + 1, 8);
+
+ return (struct hsm_action_item *)(hal->hal_fsname + offset);
}
+
/* Return pointer to next hai */
static inline struct hsm_action_item * hai_next(struct hsm_action_item *hai)
{
- return (struct hsm_action_item *)((char *)hai +
- cfs_size_round(hai->hai_len));
+ size_t offset = __ALIGN_KERNEL(hai->hai_len, 8);
+
+ return (struct hsm_action_item *)((char *)hai + offset);
}
/* Return size of an hsm_action_list */
size_t sz;
struct hsm_action_item *hai;
- sz = sizeof(*hal) + cfs_size_round(strlen(hal->hal_fsname) + 1);
+ sz = sizeof(*hal) + __ALIGN_KERNEL(strlen(hal->hal_fsname) + 1, 8);
hai = hai_first(hal);
for (i = 0; i < hal->hal_count ; i++, hai = hai_next(hai))
- sz += cfs_size_round(hai->hai_len);
+ sz += __ALIGN_KERNEL(hai->hai_len, 8);
return sz;
}