* for the same request.
*/
void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
- struct cl_req_attr *attr, obd_valid flags)
+ struct cl_req_attr *attr, u64 flags)
{
const struct cl_req_slice *slice;
struct cl_page *page;
/*FIXME: Just copy from obdo_from_inode*/
void obdo_from_la(struct obdo *dst, const struct lu_attr *la, __u64 valid)
{
- obd_flag newvalid = 0;
+ u32 newvalid = 0;
if (valid & LA_ATIME) {
dst->o_atime = la->la_atime;
EXPORT_SYMBOL(obdo_from_la);
/*FIXME: Just copy from obdo_from_inode*/
-void la_from_obdo(struct lu_attr *dst, const struct obdo *obdo, obd_flag valid)
+void la_from_obdo(struct lu_attr *dst, const struct obdo *obdo, u32 valid)
{
__u64 newvalid = 0;
EXPORT_SYMBOL(la_from_obdo);
void obdo_refresh_inode(struct inode *dst, const struct obdo *src,
- obd_flag valid)
+ u32 valid)
{
valid &= src->o_valid;
struct dt_object_format *dof, struct thandle *th)
{
struct dt_thread_info *dti = dt_info(env);
- obd_id lastid;
+ u64 lastid;
int rc;
ENTRY;
{
struct dt_thread_info *dti = dt_info(env);
struct ls_device *ls;
- obd_id lastid;
+ u64 lastid;
struct dt_object *o = NULL;
struct thandle *th;
__u32 first_oid = fid_oid(first_fid);
/* WARNING: the file systems must take care not to tinker with
attributes they don't manage (such as blocks). */
-void obdo_from_inode(struct obdo *dst, struct inode *src, obd_flag valid)
+void obdo_from_inode(struct obdo *dst, struct inode *src, u32 valid)
{
- obd_flag newvalid = 0;
+ u32 newvalid = 0;
if (valid & (OBD_MD_FLCTIME | OBD_MD_FLMTIME))
CDEBUG(D_INODE, "valid %x, new time %lu/%lu\n",
}
EXPORT_SYMBOL(obdo_from_inode);
-void obdo_cpy_md(struct obdo *dst, const struct obdo *src, obd_flag valid)
+void obdo_cpy_md(struct obdo *dst, const struct obdo *src, u32 valid)
{
CDEBUG(D_INODE, "src obdo "DOSTID" valid "LPX64", dst obdo "DOSTID"\n",
POSTID(&src->o_oi), src->o_valid, POSTID(&dst->o_oi));
EXPORT_SYMBOL(obdo_to_ioobj);
static void iattr_from_obdo(struct iattr *attr, const struct obdo *oa,
- obd_flag valid)
+ u32 valid)
{
valid &= oa->o_valid;
}
void md_from_obdo(struct md_op_data *op_data, const struct obdo *oa,
- obd_flag valid)
+ u32 valid)
{
iattr_from_obdo(&op_data->op_attr, oa, valid);
if (valid & OBD_MD_FLBLOCKS) {
RETURN(0);
}
- static __u64 echo_next_id(struct obd_device *obddev)
+static u64 echo_next_id(struct obd_device *obddev)
{
- obd_id id;
+ u64 id;
spin_lock(&obddev->u.echo.eo_lock);
id = ++obddev->u.echo.eo_lastino;
struct obd_info *oinfo)
{
struct obd_device *obd = class_exp2obd(exp);
- obd_id id = ostid_id(&oinfo->oi_oa->o_oi);
+ u64 id = ostid_id(&oinfo->oi_oa->o_oi);
ENTRY;
if (!obd) {
}
static void
-echo_page_debug_setup(struct page *page, int rw, obd_id id,
+echo_page_debug_setup(struct page *page, int rw, u64 id,
__u64 offset, int len)
{
int page_offset = offset & ~CFS_PAGE_MASK;
}
static int
-echo_page_debug_check(struct page *page, obd_id id,
+echo_page_debug_check(struct page *page, u64 id,
__u64 offset, int len)
{
int page_offset = offset & ~CFS_PAGE_MASK;
(oa->o_valid & OBD_MD_FLFLAGS) != 0 &&
(oa->o_flags & OBD_FL_DEBUG_CHECK) != 0);
struct niobuf_local *res = lb;
- obd_off offset = nb->rnb_offset;
+ u64 offset = nb->rnb_offset;
int len = nb->rnb_len;
while (len > 0) {
struct niobuf_local *lb, int verify)
{
struct niobuf_local *res = lb;
- obd_off start = rb->rnb_offset >> PAGE_CACHE_SHIFT;
- obd_off end = (rb->rnb_offset + rb->rnb_len + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
+ u64 start = rb->rnb_offset >> PAGE_CACHE_SHIFT;
+ u64 end = (rb->rnb_offset + rb->rnb_len + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
int count = (int)(end - start);
int rc = 0;
int i;
/** @} echo_helpers */
static int cl_echo_object_put(struct echo_object *eco);
-static int cl_echo_object_brw(struct echo_object *eco, int rw, obd_off offset,
+static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
struct page **pages, int npages, int async);
struct echo_thread_info {
}
static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
- obd_off start, obd_off end, int mode,
- __u64 *cookie , __u32 enqflags)
+ u64 start, u64 end, int mode,
+ __u64 *cookie , __u32 enqflags)
{
struct cl_io *io;
struct cl_lock *lck;
cl_page_list_add(&queue->c2_qout, page);
}
-static int cl_echo_object_brw(struct echo_object *eco, int rw, obd_off offset,
+static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
struct page **pages, int npages, int async)
{
struct lu_env *env;
/** @} echo_exports */
-static obd_id last_object_id;
+static u64 last_object_id;
#ifdef HAVE_SERVER_SUPPORT
static inline void echo_md_build_name(struct lu_name *lname, char *name,
eco->eo_dev->ed_ec->ec_exp->exp_obd->obd_name, rc);
}
-static void
-echo_client_page_debug_setup(struct page *page, int rw, obd_id id,
- obd_off offset, obd_off count)
+static void echo_client_page_debug_setup(struct page *page, int rw, u64 id,
+ u64 offset, u64 count)
{
- char *addr;
- obd_off stripe_off;
- obd_id stripe_id;
- int delta;
+ char *addr;
+ u64 stripe_off;
+ u64 stripe_id;
+ int delta;
/* no partial pages on the client */
LASSERT(count == PAGE_CACHE_SIZE);
}
static int
-echo_client_page_debug_check(struct page *page, obd_id id, obd_off offset,
- obd_off count)
+echo_client_page_debug_check(struct page *page, u64 id, u64 offset, u64 count)
{
- obd_off stripe_off;
- obd_id stripe_id;
+ u64 stripe_off;
+ u64 stripe_id;
char *addr;
int delta;
int rc;
}
static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
- struct echo_object *eco, obd_off offset,
- obd_size count, int async,
- struct obd_trans_info *oti)
+ struct echo_object *eco, u64 offset,
+ u64 count, int async,
+ struct obd_trans_info *oti)
{
- obd_count npages;
+ size_t npages;
struct brw_page *pga;
struct brw_page *pgp;
struct page **pages;
- obd_off off;
- int i;
+ u64 off;
+ size_t i;
int rc;
int verify;
gfp_t gfp_mask;
- int brw_flags = 0;
+ u32 brw_flags = 0;
ENTRY;
verify = (ostid_id(&oa->o_oi) != ECHO_PERSISTENT_OBJID &&
LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ);
- if (count <= 0 ||
- (count & (~CFS_PAGE_MASK)) != 0)
- RETURN(-EINVAL);
+ if ((count & (~CFS_PAGE_MASK)) != 0)
+ RETURN(-EINVAL);
/* XXX think again with misaligned I/O */
npages = count >> PAGE_CACHE_SHIFT;
static int echo_client_prep_commit(const struct lu_env *env,
struct obd_export *exp, int rw,
struct obdo *oa, struct echo_object *eco,
- obd_off offset, obd_size count,
- obd_size batch, struct obd_trans_info *oti,
+ u64 offset, u64 count,
+ u64 batch, struct obd_trans_info *oti,
int async)
{
- struct obd_ioobj ioo;
- struct niobuf_local *lnb;
- struct niobuf_remote *rnb;
- obd_off off;
- obd_size npages, tot_pages;
+ struct obd_ioobj ioo;
+ struct niobuf_local *lnb;
+ struct niobuf_remote *rnb;
+ u64 off;
+ u64 npages, tot_pages;
int i, ret = 0, brw_flags = 0;
ENTRY;
* \retval negative number on error
*/
int ofd_auth_capa(struct obd_export *exp, const struct lu_fid *fid,
- obd_seq seq, struct lustre_capa *capa, __u64 opc)
+ u64 seq, struct lustre_capa *capa, __u64 opc)
{
struct filter_obd *filter = &exp->exp_obd->u.filter;
struct filter_capa_key *k;
RCL_CLIENT);
if (KEY_IS(KEY_LAST_ID)) {
- obd_id *last_id;
+ u64 *last_id;
struct ofd_seq *oseq;
req_capsule_extend(tsi->tsi_pill, &RQF_OST_GET_INFO_LAST_ID);
last_id = req_capsule_server_get(tsi->tsi_pill, &RMF_OBD_ID);
oseq = ofd_seq_load(tsi->tsi_env, ofd,
- (obd_seq)exp->exp_filter_data.fed_group);
+ (u64)exp->exp_filter_data.fed_group);
if (IS_ERR(oseq))
rc = -EFAULT;
else
struct lu_fid *fid = &info->fti_fid;
struct ost_id *oi = &oa->o_oi;
struct ofd_seq *oseq;
- obd_seq seq = ostid_seq(oi);
- obd_id end_id = ostid_id(oi);
- obd_id last;
- obd_id oid;
+ u64 seq = ostid_seq(oi);
+ u64 end_id = ostid_id(oi);
+ u64 last;
+ u64 oid;
int skip_orphan;
int rc = 0;
struct obdo *rep_oa;
struct obd_export *exp = tsi->tsi_exp;
struct ofd_device *ofd = ofd_exp(exp);
- obd_seq seq = ostid_seq(&oa->o_oi);
- obd_id oid = ostid_id(&oa->o_oi);
+ u64 seq = ostid_seq(&oa->o_oi);
+ u64 oid = ostid_id(&oa->o_oi);
struct ofd_seq *oseq;
int rc = 0, diff;
int sync_trans = 0;
}
if (diff > 0) {
cfs_time_t enough_time = cfs_time_shift(DISK_TIMEOUT);
- obd_id next_id;
+ u64 next_id;
int created = 0;
int count;
struct ofd_device *ofd = ofd_exp(tsi->tsi_exp);
struct ofd_thread_info *fti = tsi2ofd_info(tsi);
struct lu_fid *fid = &fti->fti_fid;
- obd_id oid;
- obd_count count;
+ u64 oid;
+ u32 count;
int rc = 0;
ENTRY;
* \retval pointer to the requested ofd_seq structure
* \retval NULL if ofd_seq is not found
*/
-struct ofd_seq *ofd_seq_get(struct ofd_device *ofd, obd_seq seq)
+struct ofd_seq *ofd_seq_get(struct ofd_device *ofd, u64 seq)
{
struct ofd_seq *oseq;
*
* \retval the last object ID for this sequence
*/
-obd_id ofd_seq_last_oid(struct ofd_seq *oseq)
+u64 ofd_seq_last_oid(struct ofd_seq *oseq)
{
- obd_id id;
+ u64 id;
spin_lock(&oseq->os_last_oid_lock);
id = ostid_id(&oseq->os_oi);
* \param[in] oseq OFD sequence
* \param[in] id the new OID to set
*/
-void ofd_seq_last_oid_set(struct ofd_seq *oseq, obd_id id)
+void ofd_seq_last_oid_set(struct ofd_seq *oseq, u64 id)
{
spin_lock(&oseq->os_last_oid_lock);
if (likely(ostid_id(&oseq->os_oi) < id))
struct ofd_seq *oseq)
{
struct ofd_thread_info *info = ofd_info(env);
- obd_id tmp;
+ u64 tmp;
struct dt_object *obj = oseq->os_lastid_obj;
struct thandle *th;
int rc;
* \retval ERR_PTR pointer on error
*/
struct ofd_seq *ofd_seq_load(const struct lu_env *env, struct ofd_device *ofd,
- obd_seq seq)
+ u64 seq)
{
struct ofd_thread_info *info = ofd_info(env);
struct ofd_seq *oseq = NULL;
struct dt_object *dob;
- obd_id lastid;
+ u64 lastid;
int rc;
ENTRY;
/* Clients typically hold 2x their max_rpcs_in_flight of grant space */
#define OFD_GRANT_SHRINK_LIMIT(exp) (2ULL * 8 * exp_max_brw_size(exp))
-static inline obd_size ofd_grant_from_cli(struct obd_export *exp,
- struct ofd_device *ofd, obd_size val)
+static inline u64 ofd_grant_from_cli(struct obd_export *exp,
+ struct ofd_device *ofd, u64 val)
{
if (ofd_grant_compat(exp, ofd))
/* clients not supporting OBD_CONNECT_GRANT_PARAM actually
return val;
}
-static inline obd_size ofd_grant_to_cli(struct obd_export *exp,
- struct ofd_device *ofd, obd_size val)
+static inline u64 ofd_grant_to_cli(struct obd_export *exp,
+ struct ofd_device *ofd, u64 val)
{
if (ofd_grant_compat(exp, ofd))
return val >> (ofd->ofd_blockbits - COMPAT_BSIZE_SHIFT);
return val;
}
-static inline obd_size ofd_grant_chunk(struct obd_export *exp,
- struct ofd_device *ofd)
+static inline u64 ofd_grant_chunk(struct obd_export *exp,
+ struct ofd_device *ofd)
{
if (ofd_obd(ofd)->obd_self_export == exp)
/* Grant enough space to handle a big precreate request */
*/
void ofd_grant_sanity_check(struct obd_device *obd, const char *func)
{
- struct ofd_device *ofd = ofd_dev(obd->obd_lu_dev);
- struct obd_export *exp;
- obd_size maxsize;
- obd_size tot_dirty = 0;
- obd_size tot_pending = 0;
- obd_size tot_granted = 0;
- obd_size fo_tot_granted;
- obd_size fo_tot_pending;
- obd_size fo_tot_dirty;
+ struct ofd_device *ofd = ofd_dev(obd->obd_lu_dev);
+ struct obd_export *exp;
+ u64 maxsize;
+ u64 tot_dirty = 0;
+ u64 tot_pending = 0;
+ u64 tot_granted = 0;
+ u64 fo_tot_granted;
+ u64 fo_tot_pending;
+ u64 fo_tot_dirty;
if (list_empty(&obd->obd_exports))
return;
* of available space is requested
* \retval amount of non-allocated space, in bytes
*/
-static obd_size ofd_grant_space_left(struct obd_export *exp)
+static u64 ofd_grant_space_left(struct obd_export *exp)
{
- struct obd_device *obd = exp->exp_obd;
- struct ofd_device *ofd = ofd_exp(exp);
- obd_size tot_granted;
- obd_size left;
- obd_size avail;
- obd_size unstable;
+ struct obd_device *obd = exp->exp_obd;
+ struct ofd_device *ofd = ofd_exp(exp);
+ u64 tot_granted;
+ u64 left;
+ u64 avail;
+ u64 unstable;
ENTRY;
assert_spin_locked(&ofd->ofd_grant_lock);
* overhead estimate made by the OSD layer. If we grant all the free
* space, we have no way (grant space cannot be revoked yet) to
* adjust if the write overhead has been underestimated. */
- left -= min_t(obd_size, left, ofd_grant_reserved(ofd, avail));
+ left -= min_t(u64, left, ofd_grant_reserved(ofd, avail));
/* Align left on block size */
left &= ~((1ULL << ofd->ofd_blockbits) - 1);
oa->o_dirty = 0;
dirty = ofd_grant_from_cli(exp, ofd, oa->o_dirty);
- dropped = ofd_grant_from_cli(exp, ofd, (obd_size)oa->o_dropped);
+ dropped = ofd_grant_from_cli(exp, ofd, (u64)oa->o_dropped);
grant_chunk = ofd_grant_chunk(exp, ofd);
/* Update our accounting now so that statfs takes it into account.
* taken out
*/
static void ofd_grant_shrink(struct obd_export *exp, struct obdo *oa,
- obd_size left_space)
+ u64 left_space)
{
struct filter_export_data *fed;
struct ofd_device *ofd = ofd_exp(exp);
* \retval space (in bytes) that will be consumed to write the
* network buffer
*/
-static inline int ofd_grant_rnb_size(struct obd_export *exp,
+static inline u64 ofd_grant_rnb_size(struct obd_export *exp,
struct ofd_device *ofd,
struct niobuf_remote *rnb)
{
- obd_size blocksize;
- obd_size bytes;
- obd_size end;
+ u64 blocksize;
+ u64 bytes;
+ u64 end;
if (exp && ofd_grant_compat(exp, ofd))
blocksize = 1ULL << COMPAT_BSIZE_SHIFT;
bytes += blocksize - end;
if (exp)
/* Apply per-export pecularities if one is given */
- bytes = ofd_grant_from_cli(exp, ofd, (obd_size)bytes);
+ bytes = ofd_grant_from_cli(exp, ofd, bytes);
return bytes;
}
*/
static void ofd_grant_check(const struct lu_env *env, struct obd_export *exp,
struct obdo *oa, struct niobuf_remote *rnb,
- int niocount, obd_size *left)
+ int niocount, u64 *left)
{
struct filter_export_data *fed = &exp->exp_filter_data;
struct obd_device *obd = exp->exp_obd;
*
* \retval amount of grant space allocated
*/
-static long ofd_grant_alloc(struct obd_export *exp, obd_size curgrant,
- obd_size want, obd_size left, bool conservative)
+static long ofd_grant_alloc(struct obd_export *exp, u64 curgrant,
+ u64 want, u64 left, bool conservative)
{
struct obd_device *obd = exp->exp_obd;
struct ofd_device *ofd = ofd_exp(exp);
struct filter_export_data *fed = &exp->exp_filter_data;
long grant_chunk;
- obd_size grant;
+ u64 grant;
ENTRY;
* \retval amount of grant space currently owned by the client
*/
long ofd_grant_connect(const struct lu_env *env, struct obd_export *exp,
- obd_size want, bool new_conn)
+ u64 want, bool new_conn)
{
struct ofd_device *ofd = ofd_exp(exp);
struct filter_export_data *fed = &exp->exp_filter_data;
- obd_size left = 0;
+ u64 left = 0;
long grant;
int from_cache;
int force = 0; /* can use cached data */
}
ofd_grant_alloc(exp,
- ofd_grant_to_cli(exp, ofd, (obd_size)fed->fed_grant),
+ ofd_grant_to_cli(exp, ofd, (u64)fed->fed_grant),
want, left, new_conn);
/* return to client its current grant */
- grant = ofd_grant_to_cli(exp, ofd, (obd_size)fed->fed_grant);
+ grant = ofd_grant_to_cli(exp, ofd, (u64)fed->fed_grant);
ofd->ofd_tot_granted_clients++;
spin_unlock(&ofd->ofd_grant_lock);
{
struct ofd_device *ofd = ofd_exp(exp);
int do_shrink;
- obd_size left = 0;
+ u64 left = 0;
if (!oa)
return;
{
struct obd_device *obd = exp->exp_obd;
struct ofd_device *ofd = ofd_exp(exp);
- obd_size left;
+ u64 left;
int from_cache;
int force = 0; /* can use cached data intially */
int rc;
struct ofd_thread_info *info = ofd_info(env);
struct ofd_device *ofd = ofd_exp(exp);
struct filter_export_data *fed = &exp->exp_filter_data;
- obd_size left = 0;
+ u64 left = 0;
unsigned long wanted;
ENTRY;
if (rc == 0) {
spin_lock(&ofd->ofd_osfs_lock);
/* Take pending out of cached statfs data */
- ofd->ofd_osfs.os_bavail -= min_t(obd_size,
+ ofd->ofd_osfs.os_bavail -= min_t(u64,
ofd->ofd_osfs.os_bavail,
pending >> ofd->ofd_blockbits);
if (ofd->ofd_statfs_inflight)
__u64 ofd_osfs_age;
int ofd_blockbits;
/* writes which might be be accounted twice in ofd_osfs.os_bavail */
- obd_size ofd_osfs_unstable;
+ u64 ofd_osfs_unstable;
/* counters used during statfs update, protected by ofd_osfs_lock.
* record when some statfs refresh are in progress */
int ofd_statfs_inflight;
/* track writes completed while statfs refresh is underway.
* tracking is only effective when ofd_statfs_inflight > 1 */
- obd_size ofd_osfs_inflight;
+ u64 ofd_osfs_inflight;
/* grants: all values in bytes */
/* grant lock to protect all grant counters */
spinlock_t ofd_grant_lock;
/* total amount of dirty data reported by clients in incoming obdo */
- obd_size ofd_tot_dirty;
+ u64 ofd_tot_dirty;
/* sum of filesystem space granted to clients for async writes */
- obd_size ofd_tot_granted;
+ u64 ofd_tot_granted;
/* grant used by I/Os in progress (between prepare and commit) */
- obd_size ofd_tot_pending;
+ u64 ofd_tot_pending;
/* free space threshold over which we stop granting space to clients
* ofd_grant_ratio is stored as a fixed-point fraction using
* OFD_GRANT_RATIO_SHIFT of the remaining free space, not in percentage
/* ofd_capa.c */
int ofd_update_capa_key(struct ofd_device *ofd, struct lustre_capa_key *key);
int ofd_auth_capa(struct obd_export *exp, const struct lu_fid *fid,
- obd_seq seq, struct lustre_capa *capa, __u64 opc);
+ u64 seq, struct lustre_capa *capa, __u64 opc);
void ofd_free_capa_keys(struct ofd_device *ofd);
/* ofd_dev.c */
int ofd_obd_disconnect(struct obd_export *exp);
/* ofd_fs.c */
-obd_id ofd_seq_last_oid(struct ofd_seq *oseq);
-void ofd_seq_last_oid_set(struct ofd_seq *oseq, obd_id id);
+u64 ofd_seq_last_oid(struct ofd_seq *oseq);
+void ofd_seq_last_oid_set(struct ofd_seq *oseq, u64 id);
int ofd_seq_last_oid_write(const struct lu_env *env, struct ofd_device *ofd,
struct ofd_seq *oseq);
int ofd_seqs_init(const struct lu_env *env, struct ofd_device *ofd);
-struct ofd_seq *ofd_seq_get(struct ofd_device *ofd, obd_seq seq);
+struct ofd_seq *ofd_seq_get(struct ofd_device *ofd, u64 seq);
void ofd_seq_put(const struct lu_env *env, struct ofd_seq *oseq);
int ofd_fs_setup(const struct lu_env *env, struct ofd_device *ofd,
void ofd_fs_cleanup(const struct lu_env *env, struct ofd_device *ofd);
int ofd_precreate_batch(struct ofd_device *ofd, int batch);
struct ofd_seq *ofd_seq_load(const struct lu_env *env, struct ofd_device *ofd,
- obd_seq seq);
+ u64 seq);
void ofd_seqs_fini(const struct lu_env *env, struct ofd_device *ofd);
void ofd_seqs_free(const struct lu_env *env, struct ofd_device *ofd);
const struct lu_fid *fid);
int ofd_object_ff_load(const struct lu_env *env, struct ofd_object *fo);
int ofd_precreate_objects(const struct lu_env *env, struct ofd_device *ofd,
- obd_id id, struct ofd_seq *oseq, int nr, int sync);
+ u64 id, struct ofd_seq *oseq, int nr, int sync);
void ofd_object_put(const struct lu_env *env, struct ofd_object *fo);
int ofd_attr_set(const struct lu_env *env, struct ofd_object *fo,
/* ofd_grants.c */
#define OFD_GRANT_RATIO_SHIFT 8
-static inline __u64 ofd_grant_reserved(struct ofd_device *ofd, obd_size bavail)
+static inline u64 ofd_grant_reserved(struct ofd_device *ofd, u64 bavail)
{
return (bavail * ofd->ofd_grant_ratio) >> OFD_GRANT_RATIO_SHIFT;
}
void ofd_grant_sanity_check(struct obd_device *obd, const char *func);
long ofd_grant_connect(const struct lu_env *env, struct obd_export *exp,
- obd_size want, bool new_conn);
+ u64 want, bool new_conn);
void ofd_grant_discard(struct obd_export *exp);
void ofd_grant_prepare_read(const struct lu_env *env, struct obd_export *exp,
struct obdo *oa);
LASSERT(objcount == 1);
if (unlikely(exp->exp_obd->obd_recovering)) {
- obd_seq seq = fid_seq(fid);
- obd_id oid = fid_oid(fid);
+ u64 seq = fid_seq(fid);
+ u64 oid = fid_oid(fid);
struct ofd_seq *oseq;
oseq = ofd_seq_load(env, ofd, seq);
}
while (diff > 0) {
- obd_id next_id = ofd_seq_last_oid(oseq) + 1;
+ u64 next_id = ofd_seq_last_oid(oseq) + 1;
int count = ofd_precreate_batch(ofd, diff);
rc = ofd_precreate_objects(env, ofd, next_id,
spin_lock(&ofd->ofd_osfs_lock);
if (cfs_time_before_64(ofd->ofd_osfs_age, max_age) || max_age == 0) {
- obd_size unstable;
+ u64 unstable;
/* statfs data are too old, get up-to-date one.
* we must be cautious here since multiple threads might be
* w/o the ofd_osfs_lock. Those ones got added to
* the cached statfs data that we are about to crunch.
* Take them into account in the new statfs data */
- osfs->os_bavail -= min_t(obd_size, osfs->os_bavail,
+ osfs->os_bavail -= min_t(u64, osfs->os_bavail,
unstable >> ofd->ofd_blockbits);
/* However, we don't really know if those writes got
* accounted in the statfs call, so tell
osfs->os_bfree << ofd->ofd_blockbits,
osfs->os_bavail << ofd->ofd_blockbits);
- osfs->os_bavail -= min_t(obd_size, osfs->os_bavail,
+ osfs->os_bavail -= min_t(u64, osfs->os_bavail,
((ofd->ofd_tot_dirty + ofd->ofd_tot_pending +
osfs->os_bsize - 1) >> ofd->ofd_blockbits));
struct filter_export_data *fed;
fed = &obd->obd_self_export->exp_filter_data;
- osfs->os_bavail -= min_t(obd_size, osfs->os_bavail,
+ osfs->os_bavail -= min_t(u64, osfs->os_bavail,
fed->fed_grant >> ofd->ofd_blockbits);
}
{
struct ofd_device *ofd = ofd_exp(exp);
struct ofd_thread_info *info;
- obd_seq seq = ostid_seq(&oa->o_oi);
+ u64 seq = ostid_seq(&oa->o_oi);
struct ofd_seq *oseq;
int rc = 0, diff = 1;
- obd_id next_id;
+ u64 next_id;
int count;
ENTRY;
* \retval negative value on error
*/
int ofd_precreate_objects(const struct lu_env *env, struct ofd_device *ofd,
- obd_id id, struct ofd_seq *oseq, int nr, int sync)
+ u64 id, struct ofd_seq *oseq, int nr, int sync)
{
struct ofd_thread_info *info = ofd_info(env);
struct ofd_object *fo = NULL;
struct thandle *th;
struct ofd_object **batch;
struct lu_fid *fid = &info->fti_fid;
- obd_id tmp;
+ u64 tmp;
int rc;
int i;
int objects = 0;