* Fields in cl_attr that are being set.
*/
enum cl_attr_valid {
- CAT_SIZE = 1 << 0,
- CAT_KMS = 1 << 1,
- CAT_MTIME = 1 << 3,
- CAT_ATIME = 1 << 4,
- CAT_CTIME = 1 << 5,
- CAT_BLOCKS = 1 << 6,
- CAT_UID = 1 << 7,
- CAT_GID = 1 << 8,
- CAT_PROJID = 1 << 9
+ CAT_SIZE = BIT(0),
+ CAT_KMS = BIT(1),
+ CAT_MTIME = BIT(3),
+ CAT_ATIME = BIT(4),
+ CAT_CTIME = BIT(5),
+ CAT_BLOCKS = BIT(6),
+ CAT_UID = BIT(7),
+ CAT_GID = BIT(8),
+ CAT_PROJID = BIT(9),
};
/**
struct cl_page {
/** Reference counter. */
atomic_t cp_ref;
+ /* which slab kmem index this memory allocated from */
+ int cp_kmem_index;
/** An object this page is a part of. Immutable after creation. */
struct cl_object *cp_obj;
/** vmpage */
struct cl_sync_io *cp_sync_io;
/** layout_entry + stripe index, composed using lov_comp_index() */
unsigned int cp_lov_index;
+ pgoff_t cp_osc_index;
};
/**
*/
struct cl_page_slice {
struct cl_page *cpl_page;
- pgoff_t cpl_index;
/**
* Object slice corresponding to this page slice. Immutable after
* creation.
void (*cpo_clip)(const struct lu_env *env,
const struct cl_page_slice *slice,
int from, int to);
- /**
- * \pre the page was queued for transferring.
- * \post page is removed from client's pending list, or -EBUSY
- * is returned if it has already been in transferring.
- *
- * This is one of seldom page operation which is:
- * 0. called from top level;
- * 1. don't have vmpage locked;
- * 2. every layer should synchronize execution of its ->cpo_cancel()
- * with completion handlers. Osc uses client obd lock for this
- * purpose. Based on there is no vvp_page_cancel and
- * lov_page_cancel(), cpo_cancel is defacto protected by client lock.
- *
- * \see osc_page_cancel().
- */
- int (*cpo_cancel)(const struct lu_env *env,
- const struct cl_page_slice *slice);
/**
* Write out a page by kernel. This is only called by ll_writepage
* right now.
struct cl_page_list {
unsigned pl_nr;
struct list_head pl_pages;
- struct task_struct *pl_owner;
};
/**
/* Maximum page index the readahead window will end.
* This is determined DLM lock coverage, RPC and stripe boundary.
* cra_end is included. */
- pgoff_t cra_end;
+ pgoff_t cra_end_idx;
/* optimal RPC size for this read, by pages */
- unsigned long cra_rpc_size;
+ unsigned long cra_rpc_pages;
/* Release callback. If readahead holds resources underneath, this
* function should be called to release it. */
- void (*cra_release)(const struct lu_env *env, void *cbdata);
+ void (*cra_release)(const struct lu_env *env, void *cbdata);
/* Callback data for cra_release routine */
- void *cra_cbdata;
+ void *cra_cbdata;
/* whether lock is in contention */
- bool cra_contention;
+ bool cra_contention;
};
static inline void cl_read_ahead_release(const struct lu_env *env,
size_t crw_count;
int crw_nonblock;
};
+enum cl_setattr_subtype {
+ /** regular setattr **/
+ CL_SETATTR_REG = 1,
+ /** truncate(2) **/
+ CL_SETATTR_TRUNC,
+ /** fallocate(2) - mode preallocate **/
+ CL_SETATTR_FALLOCATE
+};
+
+struct cl_io_range {
+ loff_t cir_pos;
+ size_t cir_count;
+};
+
+struct cl_io_pt {
+ struct cl_io_pt *cip_next;
+ struct kiocb cip_iocb;
+ struct iov_iter cip_iter;
+ struct file *cip_file;
+ enum cl_io_type cip_iot;
+ unsigned int cip_need_restart:1;
+ loff_t cip_pos;
+ size_t cip_count;
+ ssize_t cip_result;
+};
/**
* State for io.
int sa_stripe_index;
struct ost_layout sa_layout;
const struct lu_fid *sa_parent_fid;
+ /* SETATTR interface is used for regular setattr, */
+ /* truncate(2) and fallocate(2) subtypes */
+ enum cl_setattr_subtype sa_subtype;
+ /* The following are used for fallocate(2) */
+ int sa_falloc_mode;
+ loff_t sa_falloc_offset;
+ loff_t sa_falloc_len;
+ loff_t sa_falloc_end;
} ci_setattr;
struct cl_data_version_io {
u64 dv_data_version;
*/
ci_async_readahead:1,
/**
+ * Ignore lockless and do normal locking for this io.
+ */
+ ci_ignore_lockless:1,
+ /**
* Set if we've tried all mirrors for this read IO, if it's not set,
* the read IO will check to-be-read OSCs' status, and make fast-switch
* another mirror if some of the OSTs are not healthy.
*/
ci_tried_all_mirrors:1;
/**
+ * Bypass quota check
+ */
+ unsigned ci_noquota:1;
+ /**
* How many times the read has retried before this one.
* Set by the top level and consumed by the LOV.
*/
static inline struct cl_device *lu2cl_dev(const struct lu_device *d)
{
- LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d));
- return container_of0(d, struct cl_device, cd_lu_dev);
+ LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d));
+ return container_of_safe(d, struct cl_device, cd_lu_dev);
}
static inline struct lu_device *cl2lu_dev(struct cl_device *d)
static inline struct cl_object *lu2cl(const struct lu_object *o)
{
- LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->lo_dev));
- return container_of0(o, struct cl_object, co_lu);
+ LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->lo_dev));
+ return container_of_safe(o, struct cl_object, co_lu);
}
static inline const struct cl_object_conf *
lu2cl_conf(const struct lu_object_conf *conf)
{
- return container_of0(conf, struct cl_object_conf, coc_lu);
+ return container_of_safe(conf, struct cl_object_conf, coc_lu);
}
static inline struct cl_object *cl_object_next(const struct cl_object *obj)
{
- return obj ? lu2cl(lu_object_next(&obj->co_lu)) : NULL;
+ return obj ? lu2cl(lu_object_next(&obj->co_lu)) : NULL;
}
static inline struct cl_object_header *luh2coh(const struct lu_object_header *h)
{
- return container_of0(h, struct cl_object_header, coh_lu);
+ return container_of_safe(h, struct cl_object_header, coh_lu);
}
static inline struct cl_site *cl_object_site(const struct cl_object *obj)
{
- return lu2cl_site(obj->co_lu.lo_dev->ld_site);
+ return lu2cl_site(obj->co_lu.lo_dev->ld_site);
}
static inline
struct cl_object_header *cl_object_header(const struct cl_object *obj)
{
- return luh2coh(obj->co_lu.lo_header);
+ return luh2coh(obj->co_lu.lo_header);
}
static inline int cl_device_init(struct cl_device *d, struct lu_device_type *t)
{
- return lu_device_init(&d->cd_lu_dev, t);
+ return lu_device_init(&d->cd_lu_dev, t);
}
static inline void cl_device_fini(struct cl_device *d)
{
- lu_device_fini(&d->cd_lu_dev);
+ lu_device_fini(&d->cd_lu_dev);
}
void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
- struct cl_object *obj, pgoff_t index,
+ struct cl_object *obj,
const struct cl_page_operations *ops);
void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
struct cl_object *obj,
/** \defgroup cl_page cl_page
* @{ */
-enum {
- CLP_GANG_OKAY = 0,
- CLP_GANG_RESCHED,
- CLP_GANG_AGAIN,
- CLP_GANG_ABORT
-};
-/* callback of cl_page_gang_lookup() */
-
struct cl_page *cl_page_find (const struct lu_env *env,
struct cl_object *obj,
pgoff_t idx, struct page *vmpage,
struct cl_page *pg, enum cl_req_type crt);
void cl_page_clip (const struct lu_env *env, struct cl_page *pg,
int from, int to);
-int cl_page_cancel (const struct lu_env *env, struct cl_page *page);
int cl_page_flush (const struct lu_env *env, struct cl_io *io,
struct cl_page *pg);
* Used at umounting time and signaled on BRW commit
*/
wait_queue_head_t ccc_unstable_waitq;
+ /**
+ * Serialize max_cache_mb write operation
+ */
+ struct mutex ccc_max_cache_mb_lock;
};
/**
* cl_cache functions
pgoff_t start, struct cl_read_ahead *ra);
void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io,
size_t nob);
-int cl_io_cancel (const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue);
/**
* True, iff \a io is an O_APPEND write(2).
static inline int cl_io_is_trunc(const struct cl_io *io)
{
return io->ci_type == CIT_SETATTR &&
- (io->u.ci_setattr.sa_avalid & ATTR_SIZE);
+ (io->u.ci_setattr.sa_avalid & ATTR_SIZE) &&
+ (io->u.ci_setattr.sa_subtype != CL_SETATTR_FALLOCATE);
+}
+
+static inline int cl_io_is_fallocate(const struct cl_io *io)
+{
+ return (io->ci_type == CIT_SETATTR) &&
+ (io->u.ci_setattr.sa_subtype == CL_SETATTR_FALLOCATE);
}
struct cl_io *cl_io_top(struct cl_io *io);
* @{ */
struct cl_sync_io;
+struct cl_dio_aio;
typedef void (cl_sync_io_end_t)(const struct lu_env *, struct cl_sync_io *);
void cl_sync_io_init_notify(struct cl_sync_io *anchor, int nr,
- cl_sync_io_end_t *end);
+ struct cl_dio_aio *aio, cl_sync_io_end_t *end);
int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
long timeout);
void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
int ioret);
+struct cl_dio_aio *cl_aio_alloc(struct kiocb *iocb);
static inline void cl_sync_io_init(struct cl_sync_io *anchor, int nr)
{
- cl_sync_io_init_notify(anchor, nr, NULL);
+ cl_sync_io_init_notify(anchor, nr, NULL, NULL);
}
/**
wait_queue_head_t csi_waitq;
/** callback to invoke when this IO is finished */
cl_sync_io_end_t *csi_end_io;
+ /** aio private data */
+ struct cl_dio_aio *csi_aio;
+};
+
+/** To support Direct AIO */
+struct cl_dio_aio {
+ struct cl_sync_io cda_sync;
+ struct cl_page_list cda_pages;
+ struct kiocb *cda_iocb;
+ ssize_t cda_bytes;
};
/** @} cl_sync_io */