*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2014, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* read/write system call it is associated with the single user
* thread, that issued the system call).
*
- * - cl_req represents a collection of pages for a transfer. cl_req is
- * constructed by req-forming engine that tries to saturate
- * transport with large and continuous transfers.
- *
* Terminology
*
* - to avoid confusion high-level I/O operation like read or write system
/*
* super-class definitions.
*/
+#include <linux/aio.h>
+#include <linux/fs.h>
+
#include <libcfs/libcfs.h>
#include <lu_object.h>
#include <linux/atomic.h>
#include <linux/radix-tree.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
+#include <linux/pagevec.h>
#include <lustre_dlm.h>
struct obd_info;
struct inode;
struct cl_device;
-struct cl_device_operations;
struct cl_object;
-struct cl_object_page_operations;
-struct cl_object_lock_operations;
struct cl_page;
struct cl_page_slice;
struct cl_io;
struct cl_io_slice;
-struct cl_req;
-struct cl_req_slice;
-
-/**
- * Operations for each data device in the client stack.
- *
- * \see vvp_cl_ops, lov_cl_ops, lovsub_cl_ops, osc_cl_ops
- */
-struct cl_device_operations {
- /**
- * Initialize cl_req. This method is called top-to-bottom on all
- * devices in the stack to get them a chance to allocate layer-private
- * data, and to attach them to the cl_req by calling
- * cl_req_slice_add().
- *
- * \see osc_req_init(), lov_req_init(), lovsub_req_init()
- * \see vvp_req_init()
- */
- int (*cdo_req_init)(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req);
-};
+struct cl_req_attr;
/**
* Device in the client stack.
struct cl_device {
/** Super-class. */
struct lu_device cd_lu_dev;
- /** Per-layer operation vector. */
- const struct cl_device_operations *cd_ops;
};
/** \addtogroup cl_object cl_object
*/
loff_t cat_kms;
/** Modification time. Measured in seconds since epoch. */
- time_t cat_mtime;
+ time64_t cat_mtime;
/** Access time. Measured in seconds since epoch. */
- time_t cat_atime;
+ time64_t cat_atime;
/** Change time. Measured in seconds since epoch. */
- time_t cat_ctime;
+ time64_t cat_ctime;
/**
* Blocks allocated to this cl_object on the server file system.
*
/* nlink of the directory */
__u64 cat_nlink;
+
+ /* Project identifier for quota purpose. */
+ __u32 cat_projid;
};
/**
* Fields in cl_attr that are being set.
*/
enum cl_attr_valid {
- CAT_SIZE = 1 << 0,
- CAT_KMS = 1 << 1,
- CAT_MTIME = 1 << 3,
- CAT_ATIME = 1 << 4,
- CAT_CTIME = 1 << 5,
- CAT_BLOCKS = 1 << 6,
- CAT_UID = 1 << 7,
- CAT_GID = 1 << 8
+ CAT_SIZE = 1 << 0,
+ CAT_KMS = 1 << 1,
+ CAT_MTIME = 1 << 3,
+ CAT_ATIME = 1 << 4,
+ CAT_CTIME = 1 << 5,
+ CAT_BLOCKS = 1 << 6,
+ CAT_UID = 1 << 7,
+ CAT_GID = 1 << 8,
+ CAT_PROJID = 1 << 9
};
/**
*/
struct cl_object_conf {
/** Super-class. */
- struct lu_object_conf coc_lu;
- union {
- /**
- * Object layout. This is consumed by lov.
- */
- struct lustre_md *coc_md;
+ struct lu_object_conf coc_lu;
+ union {
+ /**
+ * Object layout. This is consumed by lov.
+ */
+ struct lu_buf coc_layout;
/**
* Description of particular stripe location in the
* cluster. This is consumed by osc.
OBJECT_CONF_WAIT = 2
};
+enum {
+ CL_LAYOUT_GEN_NONE = (u32)-2, /* layout lock was cancelled */
+ CL_LAYOUT_GEN_EMPTY = (u32)-1, /* for empty layout */
+};
+
+struct cl_layout {
+ /** the buffer to return the layout in lov_mds_md format. */
+ struct lu_buf cl_buf;
+ /** size of layout in lov_mds_md format. */
+ size_t cl_size;
+ /** Layout generation. */
+ u32 cl_layout_gen;
+ /** whether layout is a composite one */
+ bool cl_is_composite;
+ /** Whether layout is a HSM released one */
+ bool cl_is_released;
+};
+
/**
* Operations implemented for each cl object layer.
*
* Object getstripe method.
*/
int (*coo_getstripe)(const struct lu_env *env, struct cl_object *obj,
- struct lov_user_md __user *lum);
- /**
- * Find whether there is any callback data (ldlm lock) attached upon
- * the object.
- */
- int (*coo_find_cbdata)(const struct lu_env *env, struct cl_object *obj,
- ldlm_iterator_t iter, void *data);
+ struct lov_user_md __user *lum, size_t size);
/**
* Get FIEMAP mapping from the object.
*/
struct ll_fiemap_info_key *fmkey,
struct fiemap *fiemap, size_t *buflen);
/**
- * Get attributes of the object from server. (top->bottom)
+ * Get layout and generation of the object.
*/
- int (*coo_obd_info_get)(const struct lu_env *env, struct cl_object *obj,
- struct obd_info *oinfo,
- struct ptlrpc_request_set *set);
+ int (*coo_layout_get)(const struct lu_env *env, struct cl_object *obj,
+ struct cl_layout *layout);
/**
- * Get data version of the object. (top->bottom)
+ * Get maximum size of the object.
*/
- int (*coo_data_version)(const struct lu_env *env, struct cl_object *obj,
- __u64 *version, int flags);
+ loff_t (*coo_maxbytes)(struct cl_object *obj);
+ /**
+ * Set request attributes.
+ */
+ void (*coo_req_attr_set)(const struct lu_env *env,
+ struct cl_object *obj,
+ struct cl_req_attr *attr);
+ /**
+ * Flush \a obj data corresponding to \a lock. Used for DoM
+ * locks in llite's cancelling blocking ast callback.
+ */
+ int (*coo_object_flush)(const struct lu_env *env,
+ struct cl_object *obj,
+ struct ldlm_lock *lock);
};
/**
*
* - [cl_page_state::CPS_PAGEOUT] page is dirty, the
* req-formation engine decides that it wants to include this page
- * into an cl_req being constructed, and yanks it from the cache;
+ * into an RPC being constructed, and yanks it from the cache;
*
* - [cl_page_state::CPS_FREEING] VM callback is executed to
* evict the page form the memory;
* Page is being read in, as a part of a transfer. This is quite
* similar to the cl_page_state::CPS_PAGEOUT state, except that
* read-in is always "immediate"---there is no such thing a sudden
- * construction of read cl_req from cached, presumably not up to date,
+ * construction of read request from cached, presumably not up to date,
* pages.
*
* Underlying VM page is locked for the duration of transfer.
/** Transient page, the transient cl_page is used to bind a cl_page
* to vmpage which is not belonging to the same object of cl_page.
- * it is used in DirectIO, lockless IO and liblustre. */
+ * it is used in DirectIO and lockless IO. */
CPT_TRANSIENT,
};
struct cl_page {
/** Reference counter. */
atomic_t cp_ref;
- /** Transfer error. */
- int cp_error;
/** An object this page is a part of. Immutable after creation. */
struct cl_object *cp_obj;
/** vmpage */
struct list_head cp_batch;
/** List of slices. Immutable after creation. */
struct list_head cp_layers;
- /** Linkage of pages within cl_req. */
- struct list_head cp_flight;
/**
* Page state. This field is const to avoid accidental update, it is
* modified only internally within cl_page.c. Protected by a VM lock.
* by sub-io. Protected by a VM lock.
*/
struct cl_io *cp_owner;
- /**
- * Owning IO request in cl_page_state::CPS_PAGEOUT and
- * cl_page_state::CPS_PAGEIN states. This field is maintained only in
- * the top-level pages. Protected by a VM lock.
- */
- struct cl_req *cp_req;
/** List of references to this page, for debugging. */
struct lu_ref cp_reference;
/** Link to an object, for debugging. */
struct lu_ref_link cp_queue_ref;
/** Assigned if doing a sync_io */
struct cl_sync_io *cp_sync_io;
+ /** layout_entry + stripe index, composed using lov_comp_index() */
+ unsigned int cp_lov_index;
};
/**
/**
* Requested transfer type.
- * \ingroup cl_req
*/
enum cl_req_type {
CRT_READ,
*/
int (*cpo_is_vmlocked)(const struct lu_env *env,
const struct cl_page_slice *slice);
+
+ /**
+ * Update file attributes when all we have is this page. Used for tiny
+ * writes to update attributes when we don't have a full cl_io.
+ */
+ void (*cpo_page_touch)(const struct lu_env *env,
+ const struct cl_page_slice *slice, size_t to);
/**
* Page destruction.
*/
const struct cl_page_slice *slice);
/** Destructor. Frees resources and slice itself. */
void (*cpo_fini)(const struct lu_env *env,
- struct cl_page_slice *slice);
+ struct cl_page_slice *slice,
+ struct pagevec *pvec);
/**
* Optional debugging helper. Prints given page slice.
*
/**
* \name transfer
*
- * Transfer methods. See comment on cl_req for a description of
- * transfer formation and life-cycle.
+ * Transfer methods.
*
* @{
*/
int ioret);
/**
* Called when cached page is about to be added to the
- * cl_req as a part of req formation.
+ * ptlrpc request as a part of req formation.
*
* \return 0 : proceed with this page;
* \return -EAGAIN : skip this page;
* (struct cl_lock) and a list of layers (struct cl_lock_slice), linked to
* cl_lock::cll_layers list through cl_lock_slice::cls_linkage.
*
- * Typical cl_lock consists of the two layers:
+ * Typical cl_lock consists of one layer:
*
- * - vvp_lock (vvp specific data), and
* - lov_lock (lov specific data).
*
* lov_lock contains an array of sub-locks. Each of these sub-locks is a
* normal cl_lock: it has a header (struct cl_lock) and a list of layers:
*
- * - lovsub_lock, and
* - osc_lock
*
* Each sub-lock is associated with a cl_object (representing stripe
/**
* Per-layer part of cl_lock
*
- * \see vvp_lock, lov_lock, lovsub_lock, osc_lock
+ * \see lov_lock, osc_lock
*/
struct cl_lock_slice {
struct cl_lock *cls_lock;
/**
*
- * \see vvp_lock_ops, lov_lock_ops, lovsub_lock_ops, osc_lock_ops
+ * \see lov_lock_ops, osc_lock_ops
*/
struct cl_lock_operations {
/** @{ */
* @anchor for resources
* \retval -ve failure
*
- * \see vvp_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
- * \see osc_lock_enqueue()
+ * \see lov_lock_enqueue(), osc_lock_enqueue()
*/
int (*clo_enqueue)(const struct lu_env *env,
const struct cl_lock_slice *slice,
/**
* Destructor. Frees resources and the slice.
*
- * \see vvp_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
- * \see osc_lock_fini()
+ * \see lov_lock_fini(), osc_lock_fini()
*/
void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice);
/**
struct task_struct *pl_owner;
};
-/**
+/**
* A 2-queue of pages. A convenience data-type for common use case, 2-queue
* contains an incoming page list and an outgoing page list.
*/
/** IO types */
enum cl_io_type {
/** read system call */
- CIT_READ,
+ CIT_READ = 1,
/** write system call */
CIT_WRITE,
/** truncate, utime system calls */
CIT_SETATTR,
+ /** get data version */
+ CIT_DATA_VERSION,
/**
* page fault handling
*/
*/
CIT_FSYNC,
/**
+ * glimpse. An io context to acquire glimpse lock.
+ */
+ CIT_GLIMPSE,
+ /**
* Miscellaneous io. This is used for occasional io activity that
* doesn't fit into other types. Currently this is used for:
*
* - VM induced page write-out. An io context for writing page out
* for memory cleansing;
*
- * - glimpse. An io context to acquire glimpse lock.
- *
* - grouplock. An io context to acquire group lock.
*
* CIT_MISC io is used simply as a context in which locks and pages
* cl_io_loop() is never called for it.
*/
CIT_MISC,
+ /**
+ * ladvise handling
+ * To give advice about access of a file
+ */
+ CIT_LADVISE,
CIT_OP_NR
};
};
typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
- struct cl_page *);
+ struct pagevec *);
struct cl_read_ahead {
/* Maximum page index the readahead window will end.
* This is determined DLM lock coverage, RPC and stripe boundary.
* cra_end is included. */
- pgoff_t cra_end;
- /* Release routine. If readahead holds resources underneath, this
+ pgoff_t cra_end_idx;
+ /* optimal RPC size for this read, by pages */
+ unsigned long cra_rpc_pages;
+ /* Release callback. If readahead holds resources underneath, this
* function should be called to release it. */
- void (*cra_release)(const struct lu_env *env, void *cbdata);
+ void (*cra_release)(const struct lu_env *env, void *cbdata);
/* Callback data for cra_release routine */
- void *cra_cbdata;
+ void *cra_cbdata;
+ /* whether lock is in contention */
+ bool cra_contention;
};
static inline void cl_read_ahead_release(const struct lu_env *env,
* -EWOULDBLOCK is returned immediately.
*/
CEF_NONBLOCK = 0x00000001,
- /**
- * take lock asynchronously (out of order), as it cannot
- * deadlock. This is for LDLM_FL_HAS_INTENT locks used for glimpsing.
- */
- CEF_ASYNC = 0x00000002,
+ /**
+ * Tell lower layers this is a glimpse request, translated to
+ * LDLM_FL_HAS_INTENT at LDLM layer.
+ *
+ * Also, because glimpse locks never block other locks, we count this
+ * as automatically compatible with other osc locks.
+ * (see osc_lock_compatible)
+ */
+ CEF_GLIMPSE = 0x00000002,
/**
* tell the server to instruct (though a flag in the blocking ast) an
* owner of the conflicting lock, that it can drop dirty pages
* protected by this lock, without sending them to the server.
*/
CEF_DISCARD_DATA = 0x00000004,
- /**
- * tell the sub layers that it must be a `real' lock. This is used for
- * mmapped-buffer locks and glimpse locks that must be never converted
- * into lockless mode.
- *
- * \see vvp_mmap_locks(), cl_glimpse_lock().
- */
- CEF_MUST = 0x00000008,
+ /**
+ * tell the sub layers that it must be a `real' lock. This is used for
+ * mmapped-buffer locks, glimpse locks, manually requested locks
+ * (LU_LADVISE_LOCKAHEAD) that must never be converted into lockless
+ * mode.
+ *
+ * \see vvp_mmap_locks(), cl_glimpse_lock, cl_request_lock().
+ */
+ CEF_MUST = 0x00000008,
/**
* tell the sub layers that never request a `real' lock. This flag is
* not used currently.
*/
CEF_NEVER = 0x00000010,
/**
- * for async glimpse lock.
+ * tell the dlm layer this is a speculative lock request
+ * speculative lock requests are locks which are not requested as part
+ * of an I/O operation. Instead, they are requested because we expect
+ * to use them in the future. They are requested asynchronously at the
+ * ptlrpc layer.
+ *
+ * Currently used for asynchronous glimpse locks and manually requested
+ * locks (LU_LADVISE_LOCKAHEAD).
*/
- CEF_AGL = 0x00000020,
+ CEF_SPECULATIVE = 0x00000020,
/**
* enqueue a lock to test DLM lock existence.
*/
CEF_PEEK = 0x00000040,
/**
+ * Lock match only. Used by group lock in I/O as group lock
+ * is known to exist.
+ */
+ CEF_LOCK_MATCH = 0x00000080,
+ /**
+ * tell the DLM layer to lock only the requested range
+ */
+ CEF_LOCK_NO_EXPAND = 0x00000100,
+ /**
* mask of enq_flags.
*/
- CEF_MASK = 0x0000007f,
+ CEF_MASK = 0x000001ff,
};
/**
};
struct cl_io_rw_common {
- loff_t crw_pos;
- size_t crw_count;
- int crw_nonblock;
+ loff_t crw_pos;
+ size_t crw_count;
+ int crw_nonblock;
};
-
/**
* State for io.
*
struct cl_lockset ci_lockset;
/** lock requirements, this is just a help info for sublayers. */
enum cl_io_lock_dmd ci_lockreq;
- union {
- struct cl_rd_io {
- struct cl_io_rw_common rd;
- } ci_rd;
+ /** layout version when this IO occurs */
+ __u32 ci_layout_version;
+ union {
+ struct cl_rd_io {
+ struct cl_io_rw_common rd;
+ } ci_rd;
struct cl_wr_io {
struct cl_io_rw_common wr;
int wr_append;
struct cl_setattr_io {
struct ost_lvb sa_attr;
unsigned int sa_attr_flags;
- unsigned int sa_valid;
+ unsigned int sa_avalid; /* ATTR_* */
+ unsigned int sa_xvalid; /* OP_XVALID */
int sa_stripe_index;
+ struct ost_layout sa_layout;
const struct lu_fid *sa_parent_fid;
- struct obd_capa *sa_capa;
} ci_setattr;
+ struct cl_data_version_io {
+ u64 dv_data_version;
+ u32 dv_layout_version;
+ int dv_flags;
+ } ci_data_version;
struct cl_fault_io {
/** page index within file. */
pgoff_t ft_index;
struct cl_fsync_io {
loff_t fi_start;
loff_t fi_end;
- struct obd_capa *fi_capa;
/** file system level fid */
struct lu_fid *fi_fid;
enum cl_fsync_mode fi_mode;
/* how many pages were written/discarded */
unsigned int fi_nr_written;
} ci_fsync;
+ struct cl_ladvise_io {
+ __u64 li_start;
+ __u64 li_end;
+ /** file system level fid */
+ struct lu_fid *li_fid;
+ enum lu_ladvise_type li_advice;
+ __u64 li_flags;
+ } ci_ladvise;
} u;
struct cl_2queue ci_queue;
size_t ci_nob;
*/
ci_ignore_layout:1,
/**
+ * Need MDS intervention to complete a write.
+ * Write intent is required for the following cases:
+ * 1. component being written is not initialized, or
+ * 2. the mirrored files are NOT in WRITE_PENDING state.
+ */
+ ci_need_write_intent:1,
+ /**
* Check if layout changed after the IO finishes. Mainly for HSM
* requirement. If IO occurs to openning files, it doesn't need to
* verify layout because HSM won't release openning files.
/**
* O_NOATIME
*/
- ci_noatime:1;
+ ci_noatime:1,
+ /* Tell sublayers not to expand LDLM locks requested for this IO */
+ ci_lock_no_expand:1,
+ /**
+ * Set if non-delay RPC should be used for this IO.
+ *
+ * If this file has multiple mirrors, and if the OSTs of the current
+ * mirror is inaccessible, non-delay RPC would error out quickly so
+ * that the upper layer can try to access the next mirror.
+ */
+ ci_ndelay:1,
+ /**
+ * Set if IO is triggered by async workqueue readahead.
+ */
+ ci_async_readahead:1,
+ /**
+ * Ignore lockless and do normal locking for this io.
+ */
+ ci_ignore_lockless:1,
+ /**
+ * Set if we've tried all mirrors for this read IO, if it's not set,
+ * the read IO will check to-be-read OSCs' status, and make fast-switch
+ * another mirror if some of the OSTs are not healthy.
+ */
+ ci_tried_all_mirrors:1;
+ /**
+ * How many times the read has retried before this one.
+ * Set by the top level and consumed by the LOV.
+ */
+ unsigned ci_ndelay_tried;
+ /**
+ * Designated mirror index for this I/O.
+ */
+ unsigned ci_designated_mirror;
/**
* Number of pages owned by this IO. For invariant checking.
*/
unsigned ci_owned_nr;
+ /**
+ * Range of write intent. Valid if ci_need_write_intent is set.
+ */
+ struct lu_extent ci_write_intent;
};
/** @} cl_io */
-/** \addtogroup cl_req cl_req
- * @{ */
-/** \struct cl_req
- * Transfer.
- *
- * There are two possible modes of transfer initiation on the client:
- *
- * - immediate transfer: this is started when a high level io wants a page
- * or a collection of pages to be transferred right away. Examples:
- * read-ahead, synchronous read in the case of non-page aligned write,
- * page write-out as a part of extent lock cancellation, page write-out
- * as a part of memory cleansing. Immediate transfer can be both
- * cl_req_type::CRT_READ and cl_req_type::CRT_WRITE;
- *
- * - opportunistic transfer (cl_req_type::CRT_WRITE only), that happens
- * when io wants to transfer a page to the server some time later, when
- * it can be done efficiently. Example: pages dirtied by the write(2)
- * path.
- *
- * In any case, transfer takes place in the form of a cl_req, which is a
- * representation for a network RPC.
- *
- * Pages queued for an opportunistic transfer are cached until it is decided
- * that efficient RPC can be composed of them. This decision is made by "a
- * req-formation engine", currently implemented as a part of osc
- * layer. Req-formation depends on many factors: the size of the resulting
- * RPC, whether or not multi-object RPCs are supported by the server,
- * max-rpc-in-flight limitations, size of the dirty cache, etc.
- *
- * For the immediate transfer io submits a cl_page_list, that req-formation
- * engine slices into cl_req's, possibly adding cached pages to some of
- * the resulting req's.
- *
- * Whenever a page from cl_page_list is added to a newly constructed req, its
- * cl_page_operations::cpo_prep() layer methods are called. At that moment,
- * page state is atomically changed from cl_page_state::CPS_OWNED to
- * cl_page_state::CPS_PAGEOUT or cl_page_state::CPS_PAGEIN, cl_page::cp_owner
- * is zeroed, and cl_page::cp_req is set to the
- * req. cl_page_operations::cpo_prep() method at the particular layer might
- * return -EALREADY to indicate that it does not need to submit this page
- * at all. This is possible, for example, if page, submitted for read,
- * became up-to-date in the meantime; and for write, the page don't have
- * dirty bit marked. \see cl_io_submit_rw()
- *
- * Whenever a cached page is added to a newly constructed req, its
- * cl_page_operations::cpo_make_ready() layer methods are called. At that
- * moment, page state is atomically changed from cl_page_state::CPS_CACHED to
- * cl_page_state::CPS_PAGEOUT, and cl_page::cp_req is set to
- * req. cl_page_operations::cpo_make_ready() method at the particular layer
- * might return -EAGAIN to indicate that this page is not eligible for the
- * transfer right now.
- *
- * FUTURE
- *
- * Plan is to divide transfers into "priority bands" (indicated when
- * submitting cl_page_list, and queuing a page for the opportunistic transfer)
- * and allow glueing of cached pages to immediate transfers only within single
- * band. This would make high priority transfers (like lock cancellation or
- * memory pressure induced write-out) really high priority.
- *
- */
-
/**
* Per-transfer attributes.
*/
struct cl_req_attr {
+ enum cl_req_type cra_type;
+ u64 cra_flags;
+ struct cl_page *cra_page;
/** Generic attributes for the server consumption. */
struct obdo *cra_oa;
- /** Capability. */
- struct obd_capa *cra_capa;
/** Jobid */
char cra_jobid[LUSTRE_JOBID_SIZE];
};
-/**
- * Transfer request operations definable at every layer.
- *
- * Concurrency: transfer formation engine synchronizes calls to all transfer
- * methods.
- */
-struct cl_req_operations {
- /**
- * Invoked top-to-bottom by cl_req_prep() when transfer formation is
- * complete (all pages are added).
- *
- * \see osc_req_prep()
- */
- int (*cro_prep)(const struct lu_env *env,
- const struct cl_req_slice *slice);
- /**
- * Called top-to-bottom to fill in \a oa fields. This is called twice
- * with different flags, see bug 10150 and osc_build_req().
- *
- * \param obj an object from cl_req which attributes are to be set in
- * \a oa.
- *
- * \param oa struct obdo where attributes are placed
- *
- * \param flags \a oa fields to be filled.
- */
- void (*cro_attr_set)(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *attr, u64 flags);
- /**
- * Called top-to-bottom from cl_req_completion() to notify layers that
- * transfer completed. Has to free all state allocated by
- * cl_device_operations::cdo_req_init().
- */
- void (*cro_completion)(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret);
-};
-
-/**
- * A per-object state that (potentially multi-object) transfer request keeps.
- */
-struct cl_req_obj {
- /** object itself */
- struct cl_object *ro_obj;
- /** reference to cl_req_obj::ro_obj. For debugging. */
- struct lu_ref_link ro_obj_ref;
- /* something else? Number of pages for a given object? */
-};
-
-/**
- * Transfer request.
- *
- * Transfer requests are not reference counted, because IO sub-system owns
- * them exclusively and knows when to free them.
- *
- * Life cycle.
- *
- * cl_req is created by cl_req_alloc() that calls
- * cl_device_operations::cdo_req_init() device methods to allocate per-req
- * state in every layer.
- *
- * Then pages are added (cl_req_page_add()), req keeps track of all objects it
- * contains pages for.
- *
- * Once all pages were collected, cl_page_operations::cpo_prep() method is
- * called top-to-bottom. At that point layers can modify req, let it pass, or
- * deny it completely. This is to support things like SNS that have transfer
- * ordering requirements invisible to the individual req-formation engine.
- *
- * On transfer completion (or transfer timeout, or failure to initiate the
- * transfer of an allocated req), cl_req_operations::cro_completion() method
- * is called, after execution of cl_page_operations::cpo_completion() of all
- * req's pages.
- */
-struct cl_req {
- enum cl_req_type crq_type;
- /** A list of pages being transfered */
- struct list_head crq_pages;
- /** Number of pages in cl_req::crq_pages */
- unsigned crq_nrpages;
- /** An array of objects which pages are in ->crq_pages */
- struct cl_req_obj *crq_o;
- /** Number of elements in cl_req::crq_objs[] */
- unsigned crq_nrobjs;
- struct list_head crq_layers;
-};
-
-/**
- * Per-layer state for request.
- */
-struct cl_req_slice {
- struct cl_req *crs_req;
- struct cl_device *crs_dev;
- struct list_head crs_linkage;
- const struct cl_req_operations *crs_ops;
-};
-
-/* @} cl_req */
-
enum cache_stats_item {
/** how many cache lookups were performed */
CS_lookup = 0,
const struct cl_lock_operations *ops);
void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
struct cl_object *obj, const struct cl_io_operations *ops);
-void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
- struct cl_device *dev,
- const struct cl_req_operations *ops);
/** @} helpers */
/** \defgroup cl_object cl_object
int cl_object_prune (const struct lu_env *env, struct cl_object *obj);
void cl_object_kill (const struct lu_env *env, struct cl_object *obj);
int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
- struct lov_user_md __user *lum);
-int cl_object_find_cbdata(const struct lu_env *env, struct cl_object *obj,
- ldlm_iterator_t iter, void *data);
+ struct lov_user_md __user *lum, size_t size);
int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj,
struct ll_fiemap_info_key *fmkey, struct fiemap *fiemap,
size_t *buflen);
-int cl_object_obd_info_get(const struct lu_env *env, struct cl_object *obj,
- struct obd_info *oinfo,
- struct ptlrpc_request_set *set);
-int cl_object_data_version(const struct lu_env *env, struct cl_object *obj,
- __u64 *version, int flags);
+int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj,
+ struct cl_layout *cl);
+loff_t cl_object_maxbytes(struct cl_object *obj);
+int cl_object_flush(const struct lu_env *env, struct cl_object *obj,
+ struct ldlm_lock *lock);
+
/**
* Returns true, iff \a o0 and \a o1 are slices of the same object.
void cl_page_get (struct cl_page *page);
void cl_page_put (const struct lu_env *env,
struct cl_page *page);
+void cl_pagevec_put (const struct lu_env *env,
+ struct cl_page *page,
+ struct pagevec *pvec);
void cl_page_print (const struct lu_env *env, void *cookie,
lu_printer_t printer,
const struct cl_page *pg);
void cl_page_delete(const struct lu_env *env, struct cl_page *pg);
int cl_page_is_vmlocked(const struct lu_env *env,
const struct cl_page *pg);
+void cl_page_touch(const struct lu_env *env, const struct cl_page *pg,
+ size_t to);
void cl_page_export(const struct lu_env *env,
struct cl_page *pg, int uptodate);
loff_t cl_offset(const struct cl_object *obj, pgoff_t idx);
size_t nob);
int cl_io_cancel (const struct lu_env *env, struct cl_io *io,
struct cl_page_list *queue);
-int cl_io_is_going (const struct lu_env *env);
/**
* True, iff \a io is an O_APPEND write(2).
*/
static inline int cl_io_is_append(const struct cl_io *io)
{
- return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append;
+ return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append;
}
static inline int cl_io_is_sync_write(const struct cl_io *io)
*/
static inline int cl_io_is_trunc(const struct cl_io *io)
{
- return io->ci_type == CIT_SETATTR &&
- (io->u.ci_setattr.sa_valid & ATTR_SIZE);
+ return io->ci_type == CIT_SETATTR &&
+ (io->u.ci_setattr.sa_avalid & ATTR_SIZE);
}
struct cl_io *cl_io_top(struct cl_io *io);
void cl_io_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct cl_io *io);
-#define CL_IO_SLICE_CLEAN(foo_io, base) \
-do { \
- typeof(foo_io) __foo_io = (foo_io); \
- \
- CLASSERT(offsetof(typeof(*__foo_io), base) == 0); \
- memset(&__foo_io->base + 1, 0, \
- (sizeof *__foo_io) - sizeof __foo_io->base); \
+#define CL_IO_SLICE_CLEAN(foo_io, base) \
+do { \
+ typeof(foo_io) __foo_io = (foo_io); \
+ \
+ memset(&__foo_io->base, 0, \
+ sizeof(*__foo_io) - offsetof(typeof(*__foo_io), base)); \
} while (0)
/** @} cl_io */
struct cl_page_list *plist, struct cl_page *page);
void cl_page_list_disown (const struct lu_env *env,
struct cl_io *io, struct cl_page_list *plist);
-int cl_page_list_own (const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist);
void cl_page_list_assume (const struct lu_env *env,
struct cl_io *io, struct cl_page_list *plist);
void cl_page_list_discard(const struct lu_env *env,
/** @} cl_page_list */
-/** \defgroup cl_req cl_req
- * @{ */
-struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
- enum cl_req_type crt, int nr_objects);
-
-void cl_req_page_add (const struct lu_env *env, struct cl_req *req,
- struct cl_page *page);
-void cl_req_page_done (const struct lu_env *env, struct cl_page *page);
-int cl_req_prep (const struct lu_env *env, struct cl_req *req);
-void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
- struct cl_req_attr *attr, u64 flags);
-void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret);
+void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
+ struct cl_req_attr *attr);
/** \defgroup cl_sync_io cl_sync_io
* @{ */
+struct cl_sync_io;
+
+typedef void (cl_sync_io_end_t)(const struct lu_env *, struct cl_sync_io *);
+
+void cl_sync_io_init_notify(struct cl_sync_io *anchor, int nr,
+ cl_sync_io_end_t *end);
+
+int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
+ long timeout);
+void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
+ int ioret);
+static inline void cl_sync_io_init(struct cl_sync_io *anchor, int nr)
+{
+ cl_sync_io_init_notify(anchor, nr, NULL);
+}
+
/**
* Anchor for synchronous transfer. This is allocated on a stack by thread
* doing synchronous transfer, and a pointer to this structure is set up in
atomic_t csi_sync_nr;
/** error code. */
int csi_sync_rc;
- /** barrier of destroy this structure */
- atomic_t csi_barrier;
/** completion to be signaled when transfer is complete. */
wait_queue_head_t csi_waitq;
/** callback to invoke when this IO is finished */
- void (*csi_end_io)(const struct lu_env *,
- struct cl_sync_io *);
+ cl_sync_io_end_t *csi_end_io;
};
-void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
- void (*end)(const struct lu_env *, struct cl_sync_io *));
-int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
- long timeout);
-void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
- int ioret);
-void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor);
-
/** @} cl_sync_io */
-/** @} cl_req */
-
/** \defgroup cl_env cl_env
*
* lu_env handling for a client.
* - allocation and destruction of environment is amortized by caching no
* longer used environments instead of destroying them;
*
- * - there is a notion of "current" environment, attached to the kernel
- * data structure representing current thread Top-level lustre code
- * allocates an environment and makes it current, then calls into
- * non-lustre code, that in turn calls lustre back. Low-level lustre
- * code thus called can fetch environment created by the top-level code
- * and reuse it, avoiding additional environment allocation.
- * Right now, three interfaces can attach the cl_env to running thread:
- * - cl_env_get
- * - cl_env_implant
- * - cl_env_reexit(cl_env_reenter had to be called priorly)
- *
* \see lu_env, lu_context, lu_context_key
* @{ */
-struct cl_env_nest {
- int cen_refcheck;
- void *cen_cookie;
-};
-
-struct lu_env *cl_env_peek (int *refcheck);
-struct lu_env *cl_env_get (int *refcheck);
-struct lu_env *cl_env_alloc (int *refcheck, __u32 tags);
-struct lu_env *cl_env_nested_get (struct cl_env_nest *nest);
-void cl_env_put (struct lu_env *env, int *refcheck);
-void cl_env_nested_put (struct cl_env_nest *nest, struct lu_env *env);
-void *cl_env_reenter (void);
-void cl_env_reexit (void *cookie);
-void cl_env_implant (struct lu_env *env, int *refcheck);
-void cl_env_unplant (struct lu_env *env, int *refcheck);
-unsigned cl_env_cache_purge(unsigned nr);
-struct lu_env *cl_env_percpu_get (void);
-void cl_env_percpu_put (struct lu_env *env);
+struct lu_env *cl_env_get(__u16 *refcheck);
+struct lu_env *cl_env_alloc(__u16 *refcheck, __u32 tags);
+void cl_env_put(struct lu_env *env, __u16 *refcheck);
+unsigned cl_env_cache_purge(unsigned nr);
+struct lu_env *cl_env_percpu_get(void);
+void cl_env_percpu_put(struct lu_env *env);
/** @} cl_env */