#include <libcfs/libcfs.h>
#include <obd.h>
#include <cl_object.h>
+#include <lustre_crypto.h>
/** \defgroup osc osc
* @{
return container_of(pga, struct osc_async_page, oap_brw_page);
}
-struct osc_cache_waiter {
- struct list_head ocw_entry;
- wait_queue_head_t ocw_waitq;
- struct osc_async_page *ocw_oap;
- int ocw_grant;
- int ocw_rc;
-};
-
struct osc_device {
struct cl_device od_cl;
struct obd_export *od_exp;
* Just check if the desired lock exists, it won't hold reference
* count on lock.
*/
- OSC_DAP_FL_TEST_LOCK = 1 << 0,
+ OSC_DAP_FL_TEST_LOCK = BIT(0),
/**
* Return the lock even if it is being canceled.
*/
- OSC_DAP_FL_CANCELING = 1 << 1
+ OSC_DAP_FL_CANCELING = BIT(1),
};
/*
int aa_requested_nob;
int aa_nio_count;
u32 aa_page_count;
- int aa_resends;
+ s32 aa_resends;
struct brw_page **aa_ppga;
struct client_obd *aa_cli;
struct list_head aa_oaps;
pgoff_t start, pgoff_t end);
int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
struct osc_object *osc, int async);
-void osc_wake_cache_waiters(struct client_obd *cli);
+static inline void osc_wake_cache_waiters(struct client_obd *cli)
+{
+ wake_up(&cli->cl_cache_waiters);
+}
static inline int osc_io_unplug_async(const struct lu_env *env,
struct client_obd *cli,
static inline struct osc_device *lu2osc_dev(const struct lu_device *d)
{
- return container_of0(d, struct osc_device, od_cl.cd_lu_dev);
+ return container_of_safe(d, struct osc_device, od_cl.cd_lu_dev);
}
static inline struct obd_export *osc_export(const struct osc_object *obj)
static inline struct osc_object *cl2osc(const struct cl_object *obj)
{
- return container_of0(obj, struct osc_object, oo_cl);
+ return container_of_safe(obj, struct osc_object, oo_cl);
}
static inline struct cl_object *osc2cl(const struct osc_object *obj)
static inline struct osc_device *obd2osc_dev(const struct obd_device *obd)
{
- return container_of0(obd->obd_lu_dev, struct osc_device,
- od_cl.cd_lu_dev);
+ return container_of_safe(obd->obd_lu_dev, struct osc_device,
+ od_cl.cd_lu_dev);
}
static inline struct lu_device *osc2lu_dev(struct osc_device *osc)
static inline struct osc_object *lu2osc(const struct lu_object *obj)
{
- return container_of0(obj, struct osc_object, oo_cl.co_lu);
+ return container_of_safe(obj, struct osc_object, oo_cl.co_lu);
}
static inline struct osc_io *cl2osc_io(const struct lu_env *env,
const struct cl_io_slice *slice)
{
- struct osc_io *oio = container_of0(slice, struct osc_io, oi_cl);
+ struct osc_io *oio = container_of(slice, struct osc_io, oi_cl);
LINVRNT(oio == osc_env_io(env));
return oio;
static inline struct osc_page *cl2osc_page(const struct cl_page_slice *slice)
{
- return container_of0(slice, struct osc_page, ops_cl);
+ return container_of_safe(slice, struct osc_page, ops_cl);
}
static inline struct osc_page *oap2osc(struct osc_async_page *oap)
{
- return container_of0(oap, struct osc_page, ops_oap);
+ return container_of_safe(oap, struct osc_page, ops_oap);
}
static inline pgoff_t osc_index(struct osc_page *opg)
{
- return opg->ops_cl.cpl_index;
+ return opg->ops_cl.cpl_page->cp_osc_index;
}
static inline struct cl_page *oap2cl_page(struct osc_async_page *oap)
static inline struct osc_lock *cl2osc_lock(const struct cl_lock_slice *slice)
{
- return container_of0(slice, struct osc_lock, ols_cl);
+ return container_of_safe(slice, struct osc_lock, ols_cl);
}
static inline int osc_io_srvlock(struct osc_io *oio)
/** osc_object of this extent */
struct osc_object *oe_obj;
/** refcount, removed from red-black tree if reaches zero. */
- atomic_t oe_refc;
+ struct kref oe_refc;
/** busy if non-zero */
atomic_t oe_users;
/** link list of osc_object's oo_{hp|urgent|locking}_exts. */
/** state of this extent */
enum osc_extent_state oe_state;
/** flags for this extent. */
- unsigned int oe_intree:1,
/** 0 is write, 1 is read */
- oe_rw:1,
+ unsigned int oe_rw:1,
/** sync extent, queued by osc_queue_sync_pages() */
oe_sync:1,
/** set if this extent has partial, sync pages.
/** Non-delay RPC should be used for this extent. */
oe_ndelay:1,
/** direct IO pages */
- oe_dio:1;
+ oe_dio:1,
+ /** this extent consists of RDMA only pages */
+ oe_is_rdma_only;
/** how many grants allocated for this extent.
* Grant allocated for this extent. There is no grant allocated
* for reading extents and sync write extents. */