*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*/
/*
* lustre/include/lustre_osc.h
/* Write stats is actually protected by client_obd's lock. */
struct osc_stats {
+ ktime_t os_init;
uint64_t os_lockless_writes; /* by bytes */
uint64_t os_lockless_reads; /* by bytes */
- uint64_t os_lockless_truncates; /* by times */
} od_stats;
/* configuration item(s) */
time64_t od_contention_time;
- int od_lockless_truncate;
};
struct osc_extent;
/** true if this io is counted as active IO */
oi_is_active:1,
/** true if this io has CAP_SYS_RESOURCE */
- oi_cap_sys_resource:1;
+ oi_cap_sys_resource:1,
+ /** true if this io issued by readahead */
+ oi_is_readahead:1;
/** how many LRU pages are reserved for this IO */
unsigned long oi_lru_reserved;
pgoff_t start, pgoff_t end);
void osc_lru_add_batch(struct client_obd *cli, struct list_head *list);
void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
- enum cl_req_type crt, int brw_flags);
+ enum cl_req_type crt, int brw_flags, ktime_t submit_time);
int lru_queue_work(const struct lu_env *env, void *data);
long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
long target, bool force);
int osc_set_async_flags(struct osc_object *obj, struct osc_page *opg,
u32 async_flags);
int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
- struct page *page, loff_t offset);
+ struct cl_page *page, loff_t offset);
int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
struct osc_page *ops, cl_commit_cbt cb);
int osc_page_cache_add(const struct lu_env *env, struct osc_page *opg,
struct osc_page *ops);
int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
struct osc_page *ops);
-int osc_queue_sync_pages(const struct lu_env *env, const struct cl_io *io,
+int osc_queue_sync_pages(const struct lu_env *env, struct cl_io *io,
struct osc_object *obj, struct list_head *list,
int brw_flags);
int osc_cache_truncate_start(const struct lu_env *env, struct osc_object *obj,
}
typedef bool (*osc_page_gang_cbt)(const struct lu_env *, struct cl_io *,
- struct osc_page *, void *);
+ void**, int, void *);
bool osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
struct osc_object *osc, pgoff_t start, pgoff_t end,
osc_page_gang_cbt cb, void *cbdata);
bool osc_discard_cb(const struct lu_env *env, struct cl_io *io,
- struct osc_page *ops, void *cbdata);
+ void**, int, void *cbdata);
/* osc_dev.c */
int osc_device_init(const struct lu_env *env, struct lu_device *d,
int osc_object_glimpse(const struct lu_env *env, const struct cl_object *obj,
struct ost_lvb *lvb);
int osc_object_invalidate(const struct lu_env *env, struct osc_object *osc);
-int osc_object_is_contended(struct osc_object *obj);
int osc_object_find_cbdata(const struct lu_env *env, struct cl_object *obj,
ldlm_iterator_t iter, void *data);
int osc_object_prune(const struct lu_env *env, struct cl_object *obj);
int osc_disconnect(struct obd_export *exp);
int osc_punch_send(struct obd_export *exp, struct obdo *oa,
obd_enqueue_update_f upcall, void *cookie);
+int osc_fallocate_base(struct obd_export *exp, struct obdo *oa,
+ obd_enqueue_update_f upcall, void *cookie, int mode);
+void osc_update_next_shrink(struct client_obd *cli);
+void osc_schedule_grant_work(void);
/* osc_io.c */
int osc_io_submit(const struct lu_env *env, const struct cl_io_slice *ios,
int osc_io_iter_init(const struct lu_env *env, const struct cl_io_slice *ios);
void osc_io_iter_fini(const struct lu_env *env,
const struct cl_io_slice *ios);
-int osc_io_rw_iter_init(const struct lu_env *env,
- const struct cl_io_slice *ios);
void osc_io_rw_iter_fini(const struct lu_env *env,
const struct cl_io_slice *ios);
int osc_io_fault_start(const struct lu_env *env, const struct cl_io_slice *ios);
struct cl_fsync_io *fio);
void osc_io_fsync_end(const struct lu_env *env,
const struct cl_io_slice *slice);
-void osc_read_ahead_release(const struct lu_env *env, void *cbdata);
+void osc_read_ahead_release(const struct lu_env *env, struct cl_read_ahead *ra);
int osc_io_lseek_start(const struct lu_env *env,
const struct cl_io_slice *slice);
void osc_io_lseek_end(const struct lu_env *env,
const struct cl_io_slice *slice);
+int osc_io_lru_reserve(const struct lu_env *env, const struct cl_io_slice *ios,
+ loff_t pos, size_t count);
+int osc_punch_start(const struct lu_env *env, struct cl_io *io,
+ struct cl_object *obj);
/* osc_lock.c */
void osc_lock_to_lockless(const struct lu_env *env, struct osc_lock *ols,
oe_ndelay:1,
/** direct IO pages */
oe_dio:1,
- /** this extent consists of RDMA only pages */
- oe_is_rdma_only;
+ /** this extent consists of pages that are not directly accessible
+ * from the CPU */
+ oe_is_rdma_only:1;
/** how many grants allocated for this extent.
* Grant allocated for this extent. There is no grant allocated
* for reading extents and sync write extents. */