cfs_time_t ops_submit_time;
};
+struct osc_brw_async_args {
+ struct obdo *aa_oa;
+ int aa_requested_nob;
+ int aa_nio_count;
+ u32 aa_page_count;
+ int aa_resends;
+ struct brw_page **aa_ppga;
+ struct client_obd *aa_cli;
+ struct list_head aa_oaps;
+ struct list_head aa_exts;
+};
+
extern struct kmem_cache *osc_lock_kmem;
extern struct kmem_cache *osc_object_kmem;
extern struct kmem_cache *osc_thread_kmem;
void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
enum cl_req_type crt, int brw_flags);
int lru_queue_work(const struct lu_env *env, void *data);
+long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
+ long target, bool force);
/* osc_cache.c */
int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops);
struct osc_page *ops);
int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
struct osc_page *ops);
-int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
- struct list_head *list, int cmd, int brw_flags);
+int osc_queue_sync_pages(const struct lu_env *env, const struct cl_io *io,
+ struct osc_object *obj, struct list_head *list,
+ int brw_flags);
int osc_cache_truncate_start(const struct lu_env *env, struct osc_object *obj,
__u64 size, struct osc_extent **extp);
void osc_cache_truncate_end(const struct lu_env *env, struct osc_extent *ext);
pgoff_t start, pgoff_t end);
int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
struct osc_object *osc, int async);
+void osc_wake_cache_waiters(struct client_obd *cli);
static inline int osc_io_unplug_async(const struct lu_env *env,
struct client_obd *cli,
struct ptlrpc_request_set *set);
int osc_ldlm_resource_invalidate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode, void *arg);
-
+int osc_reconnect(const struct lu_env *env, struct obd_export *exp,
+ struct obd_device *obd, struct obd_uuid *cluuid,
+ struct obd_connect_data *data, void *localdata);
+int osc_disconnect(struct obd_export *exp);
int osc_punch_send(struct obd_export *exp, struct obdo *oa,
obd_enqueue_update_f upcall, void *cookie);
int osc_io_write_start(const struct lu_env *env,
const struct cl_io_slice *slice);
void osc_io_end(const struct lu_env *env, const struct cl_io_slice *slice);
-int osc_io_fsync_start(const struct lu_env *env,
- const struct cl_io_slice *slice);
+int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
+ struct cl_fsync_io *fio);
void osc_io_fsync_end(const struct lu_env *env,
const struct cl_io_slice *slice);
void osc_read_ahead_release(const struct lu_env *env, void *cbdata);
oe_hp:1,
/** this extent should be written back asap. set if one of pages is
* called by page WB daemon, or sync write or reading requests. */
- oe_urgent:1;
+ oe_urgent:1,
+ /** Non-delay RPC should be used for this extent. */
+ oe_ndelay:1;
/** how many grants allocated for this extent.
* Grant allocated for this extent. There is no grant allocated
* for reading extents and sync write extents. */
int oe_rc;
/** max pages per rpc when this extent was created */
unsigned int oe_mppr;
+ /** FLR: layout version when this osc_extent is publised */
+ __u32 oe_layout_version;
};
/** @} osc */