i=umka
i=h.huang
patch to add lr_padding to lu_range struct. this field will be used by
compact fld work.
seq->lss_cli = NULL;
seq->lss_type = type;
seq->lss_cli = NULL;
seq->lss_type = type;
- range_zero(&seq->lss_space);
+ range_init(&seq->lss_space);
sema_init(&seq->lss_sem, 1);
seq->lss_width = is_srv ?
sema_init(&seq->lss_sem, 1);
seq->lss_width = is_srv ?
void range_cpu_to_le(struct lu_range *dst, const struct lu_range *src)
{
/* check that all fields are converted */
void range_cpu_to_le(struct lu_range *dst, const struct lu_range *src)
{
/* check that all fields are converted */
- CLASSERT(sizeof *src ==
- sizeof src->lr_start +
- sizeof src->lr_end);
+ CLASSERT(sizeof(*src) ==
+ sizeof(src->lr_start) +
+ sizeof(src->lr_end) +
+ sizeof(src->lr_padding));
dst->lr_start = cpu_to_le64(src->lr_start);
dst->lr_end = cpu_to_le64(src->lr_end);
}
dst->lr_start = cpu_to_le64(src->lr_start);
dst->lr_end = cpu_to_le64(src->lr_end);
}
void range_le_to_cpu(struct lu_range *dst, const struct lu_range *src)
{
/* check that all fields are converted */
void range_le_to_cpu(struct lu_range *dst, const struct lu_range *src)
{
/* check that all fields are converted */
- CLASSERT(sizeof *src ==
- sizeof src->lr_start +
- sizeof src->lr_end);
+ CLASSERT(sizeof(*src) ==
+ sizeof(src->lr_start) +
+ sizeof(src->lr_end) +
+ sizeof(src->lr_padding));
dst->lr_start = le64_to_cpu(src->lr_start);
dst->lr_end = le64_to_cpu(src->lr_end);
}
dst->lr_start = le64_to_cpu(src->lr_start);
dst->lr_end = le64_to_cpu(src->lr_end);
}
void range_cpu_to_be(struct lu_range *dst, const struct lu_range *src)
{
/* check that all fields are converted */
void range_cpu_to_be(struct lu_range *dst, const struct lu_range *src)
{
/* check that all fields are converted */
- CLASSERT(sizeof *src ==
- sizeof src->lr_start +
- sizeof src->lr_end);
+ CLASSERT(sizeof(*src) ==
+ sizeof(src->lr_start) +
+ sizeof(src->lr_end) +
+ sizeof(src->lr_padding));
dst->lr_start = cpu_to_be64(src->lr_start);
dst->lr_end = cpu_to_be64(src->lr_end);
}
dst->lr_start = cpu_to_be64(src->lr_start);
dst->lr_end = cpu_to_be64(src->lr_end);
}
void range_be_to_cpu(struct lu_range *dst, const struct lu_range *src)
{
/* check that all fields are converted */
void range_be_to_cpu(struct lu_range *dst, const struct lu_range *src)
{
/* check that all fields are converted */
- CLASSERT(sizeof *src ==
- sizeof src->lr_start +
- sizeof src->lr_end);
+ CLASSERT(sizeof(*src) ==
+ sizeof(src->lr_start) +
+ sizeof(src->lr_end) +
+ sizeof(src->lr_padding));
dst->lr_start = be64_to_cpu(src->lr_start);
dst->lr_end = be64_to_cpu(src->lr_end);
}
dst->lr_start = be64_to_cpu(src->lr_start);
dst->lr_end = be64_to_cpu(src->lr_end);
}
if (input != NULL)
*in = *input;
else
if (input != NULL)
*in = *input;
else
ptlrpc_request_set_replen(req);
ptlrpc_request_set_replen(req);
LASSERT(seq != NULL);
down(&seq->lcs_sem);
fid_zero(&seq->lcs_fid);
LASSERT(seq != NULL);
down(&seq->lcs_sem);
fid_zero(&seq->lcs_fid);
- range_zero(&seq->lcs_space);
+ range_init(&seq->lcs_space);
up(&seq->lcs_sem);
}
EXPORT_SYMBOL(seq_client_flush);
up(&seq->lcs_sem);
}
EXPORT_SYMBOL(seq_client_flush);
struct lu_range {
__u64 lr_start;
__u64 lr_end;
struct lu_range {
__u64 lr_start;
__u64 lr_end;
+ /** stub for compact fld work. */
+ __u64 lr_padding;
-static inline __u64 range_space(struct lu_range *r)
+/**
+ * returns width of given range \a r
+ */
+
+static inline __u64 range_space(const struct lu_range *range)
- return r->lr_end - r->lr_start;
+ return range->lr_end - range->lr_start;
-static inline void range_zero(struct lu_range *r)
+/**
+ * initialize range to zero
+ */
+static inline void range_init(struct lu_range *range)
- r->lr_start = r->lr_end = 0;
+ range->lr_start = range->lr_end = 0;
-static inline int range_within(struct lu_range *r,
+/**
+ * check if given seq id \a s is within given range \a r
+ */
+static inline int range_within(struct lu_range *range,
- return s >= r->lr_start && s < r->lr_end;
+ return s >= range->lr_start && s < range->lr_end;
-static inline void range_alloc(struct lu_range *r,
- struct lu_range *s,
- __u64 w)
+/**
+ * allocate \a w units of sequence from range \a from.
+ */
+static inline void range_alloc(struct lu_range *to,
+ struct lu_range *from,
+ __u64 width)
- r->lr_start = s->lr_start;
- r->lr_end = s->lr_start + w;
- s->lr_start += w;
+ to->lr_start = from->lr_start;
+ to->lr_end = from->lr_start + width;
+ from->lr_start += width;
-static inline int range_is_sane(struct lu_range *r)
+static inline int range_is_sane(const struct lu_range *range)
- return (r->lr_end >= r->lr_start);
+ return (range->lr_end >= range->lr_start);
-static inline int range_is_zero(struct lu_range *r)
+static inline int range_is_zero(const struct lu_range *range)
- return (r->lr_start == 0 && r->lr_end == 0);
+ return (range->lr_start == 0 && range->lr_end == 0);
-static inline int range_is_exhausted(struct lu_range *r)
+static inline int range_is_exhausted(const struct lu_range *range)
- return range_space(r) == 0;
+ return range_space(range) == 0;
}
#define DRANGE "[%#16.16"LPF64"x-%#16.16"LPF64"x]"
}
#define DRANGE "[%#16.16"LPF64"x-%#16.16"LPF64"x]"