struct osc_async_page {
int oap_magic;
unsigned short oap_cmd;
- unsigned short oap_interrupted:1;
struct list_head oap_pending_item;
struct list_head oap_rpc_item;
return container_of(pga, struct osc_async_page, oap_brw_page);
}
-struct osc_cache_waiter {
- struct list_head ocw_entry;
- wait_queue_head_t ocw_waitq;
- struct osc_async_page *ocw_oap;
- int ocw_grant;
- int ocw_rc;
-};
-
struct osc_device {
struct cl_device od_cl;
struct obd_export *od_exp;
/** true if this io is lockless. */
unsigned int oi_lockless:1,
/** true if this io is counted as active IO */
- oi_is_active:1;
+ oi_is_active:1,
+ /** true if this io has CAP_SYS_RESOURCE */
+ oi_cap_sys_resource:1;
/** how many LRU pages are reserved for this IO */
unsigned long oi_lru_reserved;
struct osc_thread_info {
struct ldlm_res_id oti_resname;
union ldlm_policy_data oti_policy;
- struct cl_lock_descr oti_descr;
struct cl_attr oti_attr;
- struct lustre_handle oti_handle;
- struct cl_page_list oti_plist;
struct cl_io oti_io;
+ struct pagevec oti_pagevec;
void *oti_pvec[OTI_PVEC_SIZE];
/**
* Fields used by cl_lock_discard_pages().
* Just check if the desired lock exists, it won't hold reference
* count on lock.
*/
- OSC_DAP_FL_TEST_LOCK = 1 << 0,
+ OSC_DAP_FL_TEST_LOCK = BIT(0),
/**
* Return the lock even if it is being canceled.
*/
- OSC_DAP_FL_CANCELING = 1 << 1
+ OSC_DAP_FL_CANCELING = BIT(1),
};
/*
spin_unlock(&obj->oo_lock);
}
-static inline int osc_object_is_locked(struct osc_object *obj)
-{
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
- return spin_is_locked(&obj->oo_lock);
-#else
- /*
- * It is not perfect to return true all the time.
- * But since this function is only used for assertion
- * and checking, it seems OK.
- */
- return 1;
-#endif
-}
+#define assert_osc_object_is_locked(obj) \
+ assert_spin_locked(&obj->oo_lock)
static inline void osc_object_set_contended(struct osc_object *obj)
{
int aa_requested_nob;
int aa_nio_count;
u32 aa_page_count;
- int aa_resends;
+ s32 aa_resends;
struct brw_page **aa_ppga;
struct client_obd *aa_cli;
struct list_head aa_oaps;
extern struct kmem_cache *osc_session_kmem;
extern struct kmem_cache *osc_extent_kmem;
extern struct kmem_cache *osc_quota_kmem;
+extern struct kmem_cache *osc_obdo_kmem;
extern struct lu_context_key osc_key;
extern struct lu_context_key osc_session_key;
long target, bool force);
/* osc_cache.c */
-int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops);
int osc_set_async_flags(struct osc_object *obj, struct osc_page *opg,
u32 async_flags);
int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
struct page *page, loff_t offset);
int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
- struct osc_page *ops);
-int osc_page_cache_add(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
+ struct osc_page *ops, cl_commit_cbt cb);
+int osc_page_cache_add(const struct lu_env *env, struct osc_page *opg,
+ struct cl_io *io, cl_commit_cbt cb);
int osc_teardown_async_page(const struct lu_env *env, struct osc_object *obj,
struct osc_page *ops);
int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
pgoff_t start, pgoff_t end);
int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
struct osc_object *osc, int async);
-void osc_wake_cache_waiters(struct client_obd *cli);
+static inline void osc_wake_cache_waiters(struct client_obd *cli)
+{
+ wake_up(&cli->cl_cache_waiters);
+}
static inline int osc_io_unplug_async(const struct lu_env *env,
struct client_obd *cli,
(void)osc_io_unplug0(env, cli, osc, 0);
}
-typedef int (*osc_page_gang_cbt)(const struct lu_env *, struct cl_io *,
- struct osc_page *, void *);
-int osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
- struct osc_object *osc, pgoff_t start, pgoff_t end,
- osc_page_gang_cbt cb, void *cbdata);
-int osc_discard_cb(const struct lu_env *env, struct cl_io *io,
- struct osc_page *ops, void *cbdata);
+typedef bool (*osc_page_gang_cbt)(const struct lu_env *, struct cl_io *,
+ struct osc_page *, void *);
+bool osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
+ struct osc_object *osc, pgoff_t start, pgoff_t end,
+ osc_page_gang_cbt cb, void *cbdata);
+bool osc_discard_cb(const struct lu_env *env, struct cl_io *io,
+ struct osc_page *ops, void *cbdata);
/* osc_dev.c */
int osc_device_init(const struct lu_env *env, struct lu_device *d,
int osc_io_iter_init(const struct lu_env *env, const struct cl_io_slice *ios);
void osc_io_iter_fini(const struct lu_env *env,
const struct cl_io_slice *ios);
-int osc_io_write_iter_init(const struct lu_env *env,
- const struct cl_io_slice *ios);
-void osc_io_write_iter_fini(const struct lu_env *env,
+int osc_io_rw_iter_init(const struct lu_env *env,
+ const struct cl_io_slice *ios);
+void osc_io_rw_iter_fini(const struct lu_env *env,
const struct cl_io_slice *ios);
int osc_io_fault_start(const struct lu_env *env, const struct cl_io_slice *ios);
void osc_io_setattr_end(const struct lu_env *env,
const struct cl_lock_slice *slice);
void osc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice);
int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data);
+unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock);
/*****************************************************************************
*
return (struct cl_object *)&obj->oo_cl;
}
-static inline struct osc_device *obd2osc_dev(const struct obd_device *d)
+static inline struct osc_device *obd2osc_dev(const struct obd_device *obd)
{
- return container_of0(d->obd_lu_dev, struct osc_device,
+ return container_of0(obd->obd_lu_dev, struct osc_device,
od_cl.cd_lu_dev);
}
static inline pgoff_t osc_index(struct osc_page *opg)
{
- return opg->ops_cl.cpl_index;
+ return opg->ops_cl.cpl_page->cp_osc_index;
}
static inline struct cl_page *oap2cl_page(struct osc_async_page *oap)
/** osc_object of this extent */
struct osc_object *oe_obj;
/** refcount, removed from red-black tree if reaches zero. */
- atomic_t oe_refc;
+ struct kref oe_refc;
/** busy if non-zero */
atomic_t oe_users;
/** link list of osc_object's oo_{hp|urgent|locking}_exts. */
/** state of this extent */
enum osc_extent_state oe_state;
/** flags for this extent. */
- unsigned int oe_intree:1,
/** 0 is write, 1 is read */
- oe_rw:1,
+ unsigned int oe_rw:1,
/** sync extent, queued by osc_queue_sync_pages() */
oe_sync:1,
/** set if this extent has partial, sync pages.
* called by page WB daemon, or sync write or reading requests. */
oe_urgent:1,
/** Non-delay RPC should be used for this extent. */
- oe_ndelay:1;
+ oe_ndelay:1,
+ /** direct IO pages */
+ oe_dio:1;
/** how many grants allocated for this extent.
* Grant allocated for this extent. There is no grant allocated
* for reading extents and sync write extents. */
unsigned int oe_nr_pages;
/** list of pending oap pages. Pages in this list are NOT sorted. */
struct list_head oe_pages;
- /** Since an extent has to be written out in atomic, this is used to
- * remember the next page need to be locked to write this extent out.
- * Not used right now.
- */
- struct osc_page *oe_next_page;
/** start and end index of this extent, include start and end
* themselves. Page offset here is the page index of osc_pages.
* oe_start is used as keyword for red-black tree. */