* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*/
#include <libcfs/libcfs.h>
#include <lu_object.h>
+#include <linux/atomic.h>
#include <linux/mutex.h>
#include <linux/radix-tree.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
struct inode;
/**
* Device in the client stack.
*
- * \see ccc_device, lov_device, lovsub_device, osc_device
+ * \see vvp_device, lov_device, lovsub_device, osc_device
*/
struct cl_device {
/** Super-class. */
* be discarded from the memory, all its sub-objects are torn-down and
* destroyed too.
*
- * \see ccc_object, lov_object, lovsub_object, osc_object
+ * \see vvp_object, lov_object, lovsub_object, osc_object
*/
struct cl_object {
/** super class */
* \return the same convention as for
* cl_object_operations::coo_attr_get() is used.
*/
- int (*coo_attr_set)(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid);
+ int (*coo_attr_update)(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_attr *attr, unsigned valid);
/**
* Update object configuration. Called top-to-bottom to modify object
* configuration.
*/
int (*coo_conf_set)(const struct lu_env *env, struct cl_object *obj,
const struct cl_object_conf *conf);
- /**
- * Glimpse ast. Executed when glimpse ast arrives for a lock on this
- * object. Layers are supposed to fill parts of \a lvb that will be
- * shipped to the glimpse originator as a glimpse result.
- *
- * \see ccc_object_glimpse(), lovsub_object_glimpse(),
- * \see osc_object_glimpse()
- */
+ /**
+ * Glimpse ast. Executed when glimpse ast arrives for a lock on this
+ * object. Layers are supposed to fill parts of \a lvb that will be
+ * shipped to the glimpse originator as a glimpse result.
+ *
+ * \see vvp_object_glimpse(), lovsub_object_glimpse(),
+ * \see osc_object_glimpse()
+ */
int (*coo_glimpse)(const struct lu_env *env,
const struct cl_object *obj, struct ost_lvb *lvb);
/**
* mainly pages and locks.
*/
int (*coo_prune)(const struct lu_env *env, struct cl_object *obj);
+ /**
+ * Object getstripe method.
+ */
+ int (*coo_getstripe)(const struct lu_env *env, struct cl_object *obj,
+ struct lov_user_md __user *lum);
};
/**
list_for_each_entry_reverse((slice), \
&(obj)->co_lu.lo_header->loh_layers,\
co_lu.lo_linkage)
+
/** @} cl_object */
#define CL_PAGE_EOF ((pgoff_t)~0ull)
/**
* Per-layer part of cl_page.
*
- * \see ccc_page, lov_page, osc_page
+ * \see vvp_page, lov_page, osc_page
*/
struct cl_page_slice {
struct cl_page *cpl_page;
/**
* Per-layer part of cl_lock
*
- * \see ccc_lock, lov_lock, lovsub_lock, osc_lock
+ * \see vvp_lock, lov_lock, lovsub_lock, osc_lock
*/
struct cl_lock_slice {
struct cl_lock *cls_lock;
* @anchor for resources
* \retval -ve failure
*
- * \see ccc_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
+ * \see vvp_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
* \see osc_lock_enqueue()
*/
int (*clo_enqueue)(const struct lu_env *env,
void (*clo_cancel)(const struct lu_env *env,
const struct cl_lock_slice *slice);
/** @} */
- /**
- * Destructor. Frees resources and the slice.
- *
- * \see ccc_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
- * \see osc_lock_fini()
- */
+ /**
+ * Destructor. Frees resources and the slice.
+ *
+ * \see vvp_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
+ * \see osc_lock_fini()
+ */
void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice);
/**
* Optional debugging helper. Prints given lock slice.
* This is usually embedded into layer session data, rather than allocated
* dynamically.
*
- * \see vvp_io, lov_io, osc_io, ccc_io
+ * \see vvp_io, lov_io, osc_io
*/
struct cl_io_slice {
struct cl_io *cis_io;
/** page index within file. */
pgoff_t ft_index;
/** bytes valid byte on a faulted page. */
- int ft_nob;
+ size_t ft_nob;
/** writable page? for nopage() only */
int ft_writable;
/** page of an executable? */
*
* \param flags \a oa fields to be filled.
*/
- void (*cro_attr_set)(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *attr, obd_valid flags);
+ void (*cro_attr_set)(const struct lu_env *env,
+ const struct cl_req_slice *slice,
+ const struct cl_object *obj,
+ struct cl_req_attr *attr, u64 flags);
/**
* Called top-to-bottom from cl_req_completion() to notify layers that
* transfer completed. Has to free all state allocated by
void cl_object_attr_unlock(struct cl_object *o);
int cl_object_attr_get (const struct lu_env *env, struct cl_object *obj,
struct cl_attr *attr);
-int cl_object_attr_set (const struct lu_env *env, struct cl_object *obj,
+int cl_object_attr_update(const struct lu_env *env, struct cl_object *obj,
const struct cl_attr *attr, unsigned valid);
int cl_object_glimpse (const struct lu_env *env, struct cl_object *obj,
struct ost_lvb *lvb);
const struct cl_object_conf *conf);
int cl_object_prune (const struct lu_env *env, struct cl_object *obj);
void cl_object_kill (const struct lu_env *env, struct cl_object *obj);
+int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
+ struct lov_user_md __user *lum);
/**
* Returns true, iff \a o0 and \a o1 are slices of the same object.
* Functions to discard, delete and export a cl_page.
*/
/** @{ */
-void cl_page_discard (const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg);
-void cl_page_delete (const struct lu_env *env, struct cl_page *pg);
-int cl_page_is_vmlocked (const struct lu_env *env,
- const struct cl_page *pg);
-void cl_page_export (const struct lu_env *env,
- struct cl_page *pg, int uptodate);
+void cl_page_discard(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg);
+void cl_page_delete(const struct lu_env *env, struct cl_page *pg);
+int cl_page_is_vmlocked(const struct lu_env *env,
+ const struct cl_page *pg);
+void cl_page_export(const struct lu_env *env,
+ struct cl_page *pg, int uptodate);
int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
struct cl_page *page, pgoff_t *max_index);
-loff_t cl_offset (const struct cl_object *obj, pgoff_t idx);
-pgoff_t cl_index (const struct cl_object *obj, loff_t offset);
-int cl_page_size (const struct cl_object *obj);
-int cl_pages_prune (const struct lu_env *env, struct cl_object *obj);
+loff_t cl_offset(const struct cl_object *obj, pgoff_t idx);
+pgoff_t cl_index(const struct cl_object *obj, loff_t offset);
+size_t cl_page_size(const struct cl_object *obj);
-void cl_lock_print (const struct lu_env *env, void *cookie,
- lu_printer_t printer, const struct cl_lock *lock);
+void cl_lock_print(const struct lu_env *env, void *cookie,
+ lu_printer_t printer, const struct cl_lock *lock);
void cl_lock_descr_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer,
- const struct cl_lock_descr *descr);
+ lu_printer_t printer,
+ const struct cl_lock_descr *descr);
/* @} helper */
+/**
+ * Data structure managing a client's cached pages. A count of
+ * "unstable" pages is maintained, and an LRU of clean pages is
+ * maintained. "unstable" pages are pages pinned by the ptlrpc
+ * layer for recovery purposes.
+ */
+struct cl_client_cache {
+ /**
+ * # of users (OSCs)
+ */
+ atomic_t ccc_users;
+ /**
+ * # of threads are doing shrinking
+ */
+ unsigned int ccc_lru_shrinkers;
+ /**
+ * # of LRU entries available
+ */
+ atomic_long_t ccc_lru_left;
+ /**
+ * List of entities(OSCs) for this LRU cache
+ */
+ struct list_head ccc_lru;
+ /**
+ * Max # of LRU entries
+ */
+ unsigned long ccc_lru_max;
+ /**
+ * Lock to protect ccc_lru list
+ */
+ spinlock_t ccc_lru_lock;
+ /**
+ * Set if unstable check is enabled
+ */
+ unsigned int ccc_unstable_check:1;
+ /**
+ * # of unstable pages for this mount point
+ */
+ atomic_long_t ccc_unstable_nr;
+ /**
+ * Waitq for awaiting unstable pages to reach zero.
+ * Used at umounting time and signaled on BRW commit
+ */
+ wait_queue_head_t ccc_unstable_waitq;
+};
+
/** @} cl_page */
/** \defgroup cl_lock cl_lock
struct cl_page *page);
void cl_req_page_done (const struct lu_env *env, struct cl_page *page);
int cl_req_prep (const struct lu_env *env, struct cl_req *req);
-void cl_req_attr_set (const struct lu_env *env, struct cl_req *req,
- struct cl_req_attr *attr, obd_valid flags);
+void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
+ struct cl_req_attr *attr, u64 flags);
void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret);
/** \defgroup cl_sync_io cl_sync_io