* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2016, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
/*
* super-class definitions.
*/
+#include <linux/aio.h>
+#include <linux/fs.h>
+
#include <libcfs/libcfs.h>
-#include <libcfs/libcfs_ptask.h>
#include <lu_object.h>
#include <linux/atomic.h>
#include <linux/mutex.h>
#include <linux/radix-tree.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
+#include <linux/pagevec.h>
#include <lustre_dlm.h>
struct obd_info;
struct cl_req_attr;
-extern struct cfs_ptask_engine *cl_io_engine;
-
/**
* Device in the client stack.
*
* Fields in cl_attr that are being set.
*/
enum cl_attr_valid {
- CAT_SIZE = 1 << 0,
- CAT_KMS = 1 << 1,
- CAT_MTIME = 1 << 3,
- CAT_ATIME = 1 << 4,
- CAT_CTIME = 1 << 5,
- CAT_BLOCKS = 1 << 6,
- CAT_UID = 1 << 7,
- CAT_GID = 1 << 8,
- CAT_PROJID = 1 << 9
+ CAT_SIZE = BIT(0),
+ CAT_KMS = BIT(1),
+ CAT_MTIME = BIT(3),
+ CAT_ATIME = BIT(4),
+ CAT_CTIME = BIT(5),
+ CAT_BLOCKS = BIT(6),
+ CAT_UID = BIT(7),
+ CAT_GID = BIT(8),
+ CAT_PROJID = BIT(9),
};
/**
struct lu_buf cl_buf;
/** size of layout in lov_mds_md format. */
size_t cl_size;
- /** size of DoM component if exists or zero otherwise */
- u64 cl_dom_comp_size;
/** Layout generation. */
u32 cl_layout_gen;
/** whether layout is a composite one */
bool cl_is_composite;
+ /** Whether layout is a HSM released one */
+ bool cl_is_released;
};
/**
void (*coo_req_attr_set)(const struct lu_env *env,
struct cl_object *obj,
struct cl_req_attr *attr);
+ /**
+ * Flush \a obj data corresponding to \a lock. Used for DoM
+ * locks in llite's cancelling blocking ast callback.
+ */
+ int (*coo_object_flush)(const struct lu_env *env,
+ struct cl_object *obj,
+ struct ldlm_lock *lock);
};
/**
struct cl_page {
/** Reference counter. */
atomic_t cp_ref;
+ /* which slab kmem index this memory allocated from */
+ int cp_kmem_index;
/** An object this page is a part of. Immutable after creation. */
struct cl_object *cp_obj;
/** vmpage */
struct lu_ref_link cp_queue_ref;
/** Assigned if doing a sync_io */
struct cl_sync_io *cp_sync_io;
+ /** layout_entry + stripe index, composed using lov_comp_index() */
+ unsigned int cp_lov_index;
+ pgoff_t cp_osc_index;
};
/**
*/
struct cl_page_slice {
struct cl_page *cpl_page;
- pgoff_t cpl_index;
/**
* Object slice corresponding to this page slice. Immutable after
* creation.
*/
int (*cpo_is_vmlocked)(const struct lu_env *env,
const struct cl_page_slice *slice);
+
+ /**
+ * Update file attributes when all we have is this page. Used for tiny
+ * writes to update attributes when we don't have a full cl_io.
+ */
+ void (*cpo_page_touch)(const struct lu_env *env,
+ const struct cl_page_slice *slice, size_t to);
/**
* Page destruction.
*/
const struct cl_page_slice *slice);
/** Destructor. Frees resources and slice itself. */
void (*cpo_fini)(const struct lu_env *env,
- struct cl_page_slice *slice);
+ struct cl_page_slice *slice,
+ struct pagevec *pvec);
/**
* Optional debugging helper. Prints given page slice.
*
void (*cpo_clip)(const struct lu_env *env,
const struct cl_page_slice *slice,
int from, int to);
- /**
- * \pre the page was queued for transferring.
- * \post page is removed from client's pending list, or -EBUSY
- * is returned if it has already been in transferring.
- *
- * This is one of seldom page operation which is:
- * 0. called from top level;
- * 1. don't have vmpage locked;
- * 2. every layer should synchronize execution of its ->cpo_cancel()
- * with completion handlers. Osc uses client obd lock for this
- * purpose. Based on there is no vvp_page_cancel and
- * lov_page_cancel(), cpo_cancel is defacto protected by client lock.
- *
- * \see osc_page_cancel().
- */
- int (*cpo_cancel)(const struct lu_env *env,
- const struct cl_page_slice *slice);
/**
* Write out a page by kernel. This is only called by ll_writepage
* right now.
* (struct cl_lock) and a list of layers (struct cl_lock_slice), linked to
* cl_lock::cll_layers list through cl_lock_slice::cls_linkage.
*
- * Typical cl_lock consists of the two layers:
+ * Typical cl_lock consists of one layer:
*
- * - vvp_lock (vvp specific data), and
* - lov_lock (lov specific data).
*
* lov_lock contains an array of sub-locks. Each of these sub-locks is a
* normal cl_lock: it has a header (struct cl_lock) and a list of layers:
*
- * - lovsub_lock, and
* - osc_lock
*
* Each sub-lock is associated with a cl_object (representing stripe
/**
* Per-layer part of cl_lock
*
- * \see vvp_lock, lov_lock, lovsub_lock, osc_lock
+ * \see lov_lock, osc_lock
*/
struct cl_lock_slice {
struct cl_lock *cls_lock;
/**
*
- * \see vvp_lock_ops, lov_lock_ops, lovsub_lock_ops, osc_lock_ops
+ * \see lov_lock_ops, osc_lock_ops
*/
struct cl_lock_operations {
/** @{ */
* @anchor for resources
* \retval -ve failure
*
- * \see vvp_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
- * \see osc_lock_enqueue()
+ * \see lov_lock_enqueue(), osc_lock_enqueue()
*/
int (*clo_enqueue)(const struct lu_env *env,
const struct cl_lock_slice *slice,
/**
* Destructor. Frees resources and the slice.
*
- * \see vvp_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
- * \see osc_lock_fini()
+ * \see lov_lock_fini(), osc_lock_fini()
*/
void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice);
/**
struct cl_page_list {
unsigned pl_nr;
struct list_head pl_pages;
- struct task_struct *pl_owner;
};
/**
};
typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
- struct cl_page *);
+ struct pagevec *);
struct cl_read_ahead {
/* Maximum page index the readahead window will end.
* This is determined DLM lock coverage, RPC and stripe boundary.
* cra_end is included. */
- pgoff_t cra_end;
+ pgoff_t cra_end_idx;
/* optimal RPC size for this read, by pages */
- unsigned long cra_rpc_size;
+ unsigned long cra_rpc_pages;
/* Release callback. If readahead holds resources underneath, this
* function should be called to release it. */
- void (*cra_release)(const struct lu_env *env, void *cbdata);
+ void (*cra_release)(const struct lu_env *env, void *cbdata);
/* Callback data for cra_release routine */
- void *cra_cbdata;
+ void *cra_cbdata;
+ /* whether lock is in contention */
+ bool cra_contention;
};
static inline void cl_read_ahead_release(const struct lu_env *env,
CL_FSYNC_ALL = 3
};
+struct cl_io_rw_common {
+ loff_t crw_pos;
+ size_t crw_count;
+ int crw_nonblock;
+};
+enum cl_setattr_subtype {
+ /** regular setattr **/
+ CL_SETATTR_REG = 1,
+ /** truncate(2) **/
+ CL_SETATTR_TRUNC,
+ /** fallocate(2) - mode preallocate **/
+ CL_SETATTR_FALLOCATE
+};
+
struct cl_io_range {
loff_t cir_pos;
size_t cir_count;
};
struct cl_io_pt {
- struct cl_io_pt *cip_next;
- struct cfs_ptask cip_task;
- struct kiocb cip_iocb;
- struct iov_iter cip_iter;
- struct file *cip_file;
- enum cl_io_type cip_iot;
- unsigned int cip_need_restart:1;
- loff_t cip_pos;
- size_t cip_count;
- ssize_t cip_result;
+ struct cl_io_pt *cip_next;
+ struct kiocb cip_iocb;
+ struct iov_iter cip_iter;
+ struct file *cip_file;
+ enum cl_io_type cip_iot;
+ unsigned int cip_need_restart:1;
+ loff_t cip_pos;
+ size_t cip_count;
+ ssize_t cip_result;
};
/**
struct cl_lockset ci_lockset;
/** lock requirements, this is just a help info for sublayers. */
enum cl_io_lock_dmd ci_lockreq;
- union {
- struct cl_rw_io {
- struct iov_iter rw_iter;
- struct kiocb rw_iocb;
- struct cl_io_range rw_range;
- struct file *rw_file;
- unsigned int rw_nonblock:1,
- rw_append:1,
- rw_sync:1;
- int (*rw_ptask)(struct cfs_ptask *ptask);
- } ci_rw;
+ /** layout version when this IO occurs */
+ __u32 ci_layout_version;
+ union {
+ struct cl_rd_io {
+ struct cl_io_rw_common rd;
+ } ci_rd;
+ struct cl_wr_io {
+ struct cl_io_rw_common wr;
+ int wr_append;
+ int wr_sync;
+ } ci_wr;
+ struct cl_io_rw_common ci_rw;
struct cl_setattr_io {
struct ost_lvb sa_attr;
unsigned int sa_attr_flags;
- unsigned int sa_valid;
+ unsigned int sa_avalid; /* ATTR_* */
+ unsigned int sa_xvalid; /* OP_XVALID */
int sa_stripe_index;
struct ost_layout sa_layout;
const struct lu_fid *sa_parent_fid;
+ /* SETATTR interface is used for regular setattr, */
+ /* truncate(2) and fallocate(2) subtypes */
+ enum cl_setattr_subtype sa_subtype;
+ /* The following are used for fallocate(2) */
+ int sa_falloc_mode;
+ loff_t sa_falloc_offset;
+ loff_t sa_falloc_len;
+ loff_t sa_falloc_end;
} ci_setattr;
struct cl_data_version_io {
u64 dv_data_version;
+ u32 dv_layout_version;
int dv_flags;
} ci_data_version;
struct cl_fault_io {
*/
ci_ignore_layout:1,
/**
- * Need MDS intervention to complete a write. This usually means the
- * corresponding component is not initialized for the writing extent.
+ * Need MDS intervention to complete a write.
+ * Write intent is required for the following cases:
+ * 1. component being written is not initialized, or
+ * 2. the mirrored files are NOT in WRITE_PENDING state.
*/
ci_need_write_intent:1,
/**
* O_NOATIME
*/
ci_noatime:1,
- /** Set to 1 if parallel execution is allowed for current I/O? */
- ci_pio:1,
/* Tell sublayers not to expand LDLM locks requested for this IO */
ci_lock_no_expand:1,
/**
* mirror is inaccessible, non-delay RPC would error out quickly so
* that the upper layer can try to access the next mirror.
*/
- ci_ndelay:1;
+ ci_ndelay:1,
+ /**
+ * Set if IO is triggered by async workqueue readahead.
+ */
+ ci_async_readahead:1,
+ /**
+ * Ignore lockless and do normal locking for this io.
+ */
+ ci_ignore_lockless:1,
+ /**
+ * Set if we've tried all mirrors for this read IO, if it's not set,
+ * the read IO will check to-be-read OSCs' status, and make fast-switch
+ * another mirror if some of the OSTs are not healthy.
+ */
+ ci_tried_all_mirrors:1;
+ /**
+ * Bypass quota check
+ */
+ unsigned ci_noquota:1;
/**
* How many times the read has retried before this one.
* Set by the top level and consumed by the LOV.
*/
unsigned ci_ndelay_tried;
/**
+ * Designated mirror index for this I/O.
+ */
+ unsigned ci_designated_mirror;
+ /**
* Number of pages owned by this IO. For invariant checking.
*/
unsigned ci_owned_nr;
+ /**
+ * Range of write intent. Valid if ci_need_write_intent is set.
+ */
+ struct lu_extent ci_write_intent;
};
/** @} cl_io */
static inline struct cl_device *lu2cl_dev(const struct lu_device *d)
{
- LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d));
- return container_of0(d, struct cl_device, cd_lu_dev);
+ LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d));
+ return container_of_safe(d, struct cl_device, cd_lu_dev);
}
static inline struct lu_device *cl2lu_dev(struct cl_device *d)
static inline struct cl_object *lu2cl(const struct lu_object *o)
{
- LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->lo_dev));
- return container_of0(o, struct cl_object, co_lu);
+ LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->lo_dev));
+ return container_of_safe(o, struct cl_object, co_lu);
}
static inline const struct cl_object_conf *
lu2cl_conf(const struct lu_object_conf *conf)
{
- return container_of0(conf, struct cl_object_conf, coc_lu);
+ return container_of_safe(conf, struct cl_object_conf, coc_lu);
}
static inline struct cl_object *cl_object_next(const struct cl_object *obj)
{
- return obj ? lu2cl(lu_object_next(&obj->co_lu)) : NULL;
+ return obj ? lu2cl(lu_object_next(&obj->co_lu)) : NULL;
}
static inline struct cl_object_header *luh2coh(const struct lu_object_header *h)
{
- return container_of0(h, struct cl_object_header, coh_lu);
+ return container_of_safe(h, struct cl_object_header, coh_lu);
}
static inline struct cl_site *cl_object_site(const struct cl_object *obj)
{
- return lu2cl_site(obj->co_lu.lo_dev->ld_site);
+ return lu2cl_site(obj->co_lu.lo_dev->ld_site);
}
static inline
struct cl_object_header *cl_object_header(const struct cl_object *obj)
{
- return luh2coh(obj->co_lu.lo_header);
+ return luh2coh(obj->co_lu.lo_header);
}
static inline int cl_device_init(struct cl_device *d, struct lu_device_type *t)
{
- return lu_device_init(&d->cd_lu_dev, t);
+ return lu_device_init(&d->cd_lu_dev, t);
}
static inline void cl_device_fini(struct cl_device *d)
{
- lu_device_fini(&d->cd_lu_dev);
+ lu_device_fini(&d->cd_lu_dev);
}
void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
- struct cl_object *obj, pgoff_t index,
+ struct cl_object *obj,
const struct cl_page_operations *ops);
void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
struct cl_object *obj,
int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj,
struct cl_layout *cl);
loff_t cl_object_maxbytes(struct cl_object *obj);
+int cl_object_flush(const struct lu_env *env, struct cl_object *obj,
+ struct ldlm_lock *lock);
+
/**
* Returns true, iff \a o0 and \a o1 are slices of the same object.
/** \defgroup cl_page cl_page
* @{ */
-enum {
- CLP_GANG_OKAY = 0,
- CLP_GANG_RESCHED,
- CLP_GANG_AGAIN,
- CLP_GANG_ABORT
-};
-/* callback of cl_page_gang_lookup() */
-
struct cl_page *cl_page_find (const struct lu_env *env,
struct cl_object *obj,
pgoff_t idx, struct page *vmpage,
void cl_page_get (struct cl_page *page);
void cl_page_put (const struct lu_env *env,
struct cl_page *page);
+void cl_pagevec_put (const struct lu_env *env,
+ struct cl_page *page,
+ struct pagevec *pvec);
void cl_page_print (const struct lu_env *env, void *cookie,
lu_printer_t printer,
const struct cl_page *pg);
struct cl_page *pg, enum cl_req_type crt);
void cl_page_clip (const struct lu_env *env, struct cl_page *pg,
int from, int to);
-int cl_page_cancel (const struct lu_env *env, struct cl_page *page);
int cl_page_flush (const struct lu_env *env, struct cl_io *io,
struct cl_page *pg);
void cl_page_delete(const struct lu_env *env, struct cl_page *pg);
int cl_page_is_vmlocked(const struct lu_env *env,
const struct cl_page *pg);
+void cl_page_touch(const struct lu_env *env, const struct cl_page *pg,
+ size_t to);
void cl_page_export(const struct lu_env *env,
struct cl_page *pg, int uptodate);
loff_t cl_offset(const struct cl_object *obj, pgoff_t idx);
* Used at umounting time and signaled on BRW commit
*/
wait_queue_head_t ccc_unstable_waitq;
+ /**
+ * Serialize max_cache_mb write operation
+ */
+ struct mutex ccc_max_cache_mb_lock;
};
/**
* cl_cache functions
pgoff_t start, struct cl_read_ahead *ra);
void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io,
size_t nob);
-int cl_io_cancel (const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue);
/**
* True, iff \a io is an O_APPEND write(2).
*/
static inline int cl_io_is_append(const struct cl_io *io)
{
- return io->ci_type == CIT_WRITE && io->u.ci_rw.rw_append;
+ return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append;
}
static inline int cl_io_is_sync_write(const struct cl_io *io)
{
- return io->ci_type == CIT_WRITE && io->u.ci_rw.rw_sync;
+ return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_sync;
}
static inline int cl_io_is_mkwrite(const struct cl_io *io)
*/
static inline int cl_io_is_trunc(const struct cl_io *io)
{
- return io->ci_type == CIT_SETATTR &&
- (io->u.ci_setattr.sa_valid & ATTR_SIZE);
+ return io->ci_type == CIT_SETATTR &&
+ (io->u.ci_setattr.sa_avalid & ATTR_SIZE) &&
+ (io->u.ci_setattr.sa_subtype != CL_SETATTR_FALLOCATE);
+}
+
+static inline int cl_io_is_fallocate(const struct cl_io *io)
+{
+ return (io->ci_type == CIT_SETATTR) &&
+ (io->u.ci_setattr.sa_subtype == CL_SETATTR_FALLOCATE);
}
struct cl_io *cl_io_top(struct cl_io *io);
/** \defgroup cl_sync_io cl_sync_io
* @{ */
+struct cl_sync_io;
+struct cl_dio_aio;
+
+typedef void (cl_sync_io_end_t)(const struct lu_env *, struct cl_sync_io *);
+
+void cl_sync_io_init_notify(struct cl_sync_io *anchor, int nr,
+ struct cl_dio_aio *aio, cl_sync_io_end_t *end);
+
+int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
+ long timeout);
+void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
+ int ioret);
+struct cl_dio_aio *cl_aio_alloc(struct kiocb *iocb);
+static inline void cl_sync_io_init(struct cl_sync_io *anchor, int nr)
+{
+ cl_sync_io_init_notify(anchor, nr, NULL, NULL);
+}
+
/**
* Anchor for synchronous transfer. This is allocated on a stack by thread
* doing synchronous transfer, and a pointer to this structure is set up in
atomic_t csi_sync_nr;
/** error code. */
int csi_sync_rc;
- /** barrier of destroy this structure */
- atomic_t csi_barrier;
/** completion to be signaled when transfer is complete. */
wait_queue_head_t csi_waitq;
/** callback to invoke when this IO is finished */
- void (*csi_end_io)(const struct lu_env *,
- struct cl_sync_io *);
+ cl_sync_io_end_t *csi_end_io;
+ /** aio private data */
+ struct cl_dio_aio *csi_aio;
};
-void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
- void (*end)(const struct lu_env *, struct cl_sync_io *));
-int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
- long timeout);
-void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
- int ioret);
-void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor);
+/** To support Direct AIO */
+struct cl_dio_aio {
+ struct cl_sync_io cda_sync;
+ struct cl_page_list cda_pages;
+ struct kiocb *cda_iocb;
+ ssize_t cda_bytes;
+};
/** @} cl_sync_io */