return 0;
}
+/* lov_do_div64(a, b) returns a % b, and a = a / b.
+ * The 32-bit code is LOV-specific due to knowing about stripe limits in
+ * order to reduce the divisor to a 32-bit number. If the divisor is
+ * already a 32-bit value the compiler handles this directly. */
+#if BITS_PER_LONG > 32
+# define lov_do_div64(n,base) ({ \
+ uint64_t __base = (base); \
+ uint64_t __rem; \
+ __rem = ((uint64_t)(n)) % __base; \
+ (n) = ((uint64_t)(n)) / __base; \
+ __rem; \
+ })
+#else
+# define lov_do_div64(n,base) ({ \
+ uint64_t __rem; \
+ if ((sizeof(base) > 4) && (((base) & 0xffffffff00000000ULL) != 0)) { \
+ int __remainder; \
+ LASSERTF(!((base) & (LOV_MIN_STRIPE_SIZE - 1)), "64 bit lov " \
+ "division %llu / %llu\n", (n), (uint64_t)(base)); \
+ __remainder = (n) & (LOV_MIN_STRIPE_SIZE - 1); \
+ (n) >>= LOV_MIN_STRIPE_BITS; \
+ __rem = do_div(n, (base) >> LOV_MIN_STRIPE_BITS); \
+ __rem <<= LOV_MIN_STRIPE_BITS; \
+ __rem += __remainder; \
+ } else { \
+ __rem = do_div(n, base); \
+ } \
+ __rem; \
+ })
+#endif
+
#define IOC_LOV_TYPE 'g'
#define IOC_LOV_MIN_NR 50
#define IOC_LOV_SET_OSC_ACTIVE _IOWR('g', 50, long)
#define LOV_INTERNAL_H
#include <obd_class.h>
+#include <obd_lov.h>
#include <lustre/lustre_user.h>
struct lov_lock_handles {
int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool);
void lov_pool_putref(struct pool_desc *pool);
-#if BITS_PER_LONG == 64
-# define ll_do_div64(n,base) ({ \
- uint64_t __base = (base); \
- uint64_t __rem; \
- __rem = ((uint64_t)(n)) % __base; \
- (n) = ((uint64_t)(n)) / __base; \
- __rem; \
- })
-#elif BITS_PER_LONG == 32
-# define ll_do_div64(n,base) ({ \
- uint64_t __rem; \
- if ((sizeof(base) > 4) && (((base)&0xffffffff00000000ULL) != 0)) { \
- int __remainder; \
- LASSERTF(!((base) & (LOV_MIN_STRIPE_SIZE - 1)), "64 bit lov "\
- "division %llu / %llu\n", (n), (base)); \
- __remainder = (n) & (LOV_MIN_STRIPE_SIZE - 1); \
- (n) >>= LOV_MIN_STRIPE_BITS; \
- (base) >>= LOV_MIN_STRIPE_BITS; \
- __rem = do_div(n, base); \
- __rem <<= LOV_MIN_STRIPE_BITS; \
- __rem += __remainder; \
- } else { \
- __rem = do_div(n, base); \
- } \
- __rem; \
- })
-#else
-#error Unsupported architecture.
-#endif
-
#endif
/* fast path for common case. */
if (lio->lis_nr_subios != 1 && !cl_io_is_append(io)) {
- do_div(start, ssize);
- next = (start + 1) * ssize;
- if (next <= start * ssize)
- next = ~0ull;
+ lov_do_div64(start, ssize);
+ next = (start + 1) * ssize;
+ if (next <= start * ssize)
+ next = ~0ull;
io->ci_continue = next < lio->lis_io_endpos;
io->u.ci_rw.crw_count = min_t(loff_t, lio->lis_io_endpos,
next) - io->u.ci_rw.crw_pos;
lio->lis_pos = io->u.ci_rw.crw_pos;
lio->lis_endpos = io->u.ci_rw.crw_pos + io->u.ci_rw.crw_count;
- CDEBUG(D_VFSTRACE, "stripe: "LPU64" chunk: ["LPU64", "LPU64") "LPU64"\n",
- (__u64)start, lio->lis_pos, lio->lis_endpos,
- (__u64)lio->lis_io_endpos);
+ CDEBUG(D_VFSTRACE, "stripe: "LPU64" chunk: ["LPU64", "LPU64") "
+ LPU64"\n", (__u64)start, lio->lis_pos, lio->lis_endpos,
+ (__u64)lio->lis_io_endpos);
}
/*
* XXX The following call should be optimized: we know, that
__u32 ssize = lsm->lsm_stripe_size;
__u64 start;
- start = *offset;
- do_div(start, ssize);
- start = start * ssize;
+ start = *offset;
+ lov_do_div64(start, ssize);
+ start = start * ssize;
CDEBUG(D_DLMTRACE, "offset "LPU64", stripe %u, start "LPU64
", end "LPU64"\n", *offset, ssize, start,
LASSERT(lsm_op_find(magic) != NULL);
lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, NULL, &swidth);
-
- /* do_div(a, b) returns a % b, and a = a / b */
- stripe_size = do_div(ost_size, ssize);
- if (stripe_size)
- lov_size = ost_size * swidth + stripeno * ssize + stripe_size;
- else
- lov_size = (ost_size - 1) * swidth + (stripeno + 1) * ssize;
+
+ /* lov_do_div64(a, b) returns a % b, and a = a / b */
+ stripe_size = lov_do_div64(ost_size, ssize);
+ if (stripe_size)
+ lov_size = ost_size * swidth + stripeno * ssize + stripe_size;
+ else
+ lov_size = (ost_size - 1) * swidth + (stripeno + 1) * ssize;
RETURN(lov_size);
}
lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &lov_off,
&swidth);
-
- /* ll_do_div64(a, b) returns a % b, and a = a / b */
- stripe_off = ll_do_div64(lov_off, swidth);
+
+ /* lov_do_div64(a, b) returns a % b, and a = a / b */
+ stripe_off = lov_do_div64(lov_off, swidth);
this_stripe = (obd_off)stripeno * ssize;
if (stripe_off < this_stripe) {
lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &file_size,
&swidth);
- /* ll_do_div64(a, b) returns a % b, and a = a / b */
- stripe_off = ll_do_div64(file_size, swidth);
+ /* lov_do_div64(a, b) returns a % b, and a = a / b */
+ stripe_off = lov_do_div64(file_size, swidth);
this_stripe = (obd_off)stripeno * ssize;
if (stripe_off < this_stripe) {
/* compute which stripe number "lov_off" will be written into */
int lov_stripe_number(struct lov_stripe_md *lsm, obd_off lov_off)
{
- unsigned long ssize = lsm->lsm_stripe_size;
- obd_off stripe_off, swidth;
- int magic = lsm->lsm_magic;
+ unsigned long ssize = lsm->lsm_stripe_size;
+ obd_off stripe_off, swidth;
+ int magic = lsm->lsm_magic;
- LASSERT(lsm_op_find(magic) != NULL);
- lsm_op_find(magic)->lsm_stripe_by_offset(lsm, NULL, &lov_off, &swidth);
+ LASSERT(lsm_op_find(magic) != NULL);
+ lsm_op_find(magic)->lsm_stripe_by_offset(lsm, NULL, &lov_off, &swidth);
- stripe_off = ll_do_div64(lov_off, swidth);
+ stripe_off = lov_do_div64(lov_off, swidth);
- /* Puts stripe_off/ssize result into stripe_off */
- do_div(stripe_off, ssize);
+ /* Puts stripe_off/ssize result into stripe_off */
+ lov_do_div64(stripe_off, ssize);
- return stripe_off;
+ return stripe_off;
}
lov->lov_qos.lq_active_oss_count++;
lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_bavail += temp;
- /* per-OST penalty is prio * TGT_bavail / (num_ost - 1) / 2 */
- temp >>= 1;
- do_div(temp, num_active);
- lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj =
- (temp * prio_wide) >> 8;
+ /* per-OST penalty is prio * TGT_bavail / (num_ost - 1) / 2 */
+ temp >>= 1;
+ lov_do_div64(temp, num_active);
+ lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj =
+ (temp * prio_wide) >> 8;
age = (now - lov->lov_tgts[i]->ltd_qos.ltq_used) >> 3;
if (lov->lov_qos.lq_reset || age > 32 * lov->desc.ld_qos_maxage)
/* Per-OSS penalty is prio * oss_avail / oss_osts / (num_oss - 1) / 2 */
cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
- temp = oss->lqo_bavail >> 1;
- do_div(temp, oss->lqo_ost_count * num_active);
- oss->lqo_penalty_per_obj = (temp * prio_wide) >> 8;
+ temp = oss->lqo_bavail >> 1;
+ lov_do_div64(temp, oss->lqo_ost_count * num_active);
+ oss->lqo_penalty_per_obj = (temp * prio_wide) >> 8;
age = (now - oss->lqo_used) >> 3;
if (lov->lov_qos.lq_reset || age > 32 * lov->desc.ld_qos_maxage)
if (success) {
__u32 expected_stripes = lov_get_stripecnt(&obd->u.lov,
LOV_MAGIC, 0);
- if (osfs->os_files != LOV_U64_MAX)
- do_div(osfs->os_files, expected_stripes);
- if (osfs->os_ffree != LOV_U64_MAX)
- do_div(osfs->os_ffree, expected_stripes);
+ if (osfs->os_files != LOV_U64_MAX)
+ lov_do_div64(osfs->os_files, expected_stripes);
+ if (osfs->os_ffree != LOV_U64_MAX)
+ lov_do_div64(osfs->os_ffree, expected_stripes);
cfs_spin_lock(&obd->obd_osfs_lock);
memcpy(&obd->obd_osfs, osfs, sizeof(*osfs));