* \see cl_page_own()
* \see vvp_page_own(), lov_page_own()
*/
- void (*cpo_own)(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
+ int (*cpo_own)(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io, int nonblock);
/** Called when ownership it yielded. Optional.
*
* \see cl_page_disown()
const struct cl_page_slice *slice,
struct cl_io *io);
/**
- * Announces whether the page contains valid data or not by @uptodate.
+ * Announces whether the page contains valid data or not by \a uptodate.
*
* \see cl_page_export()
* \see vvp_page_export()
const struct cl_lock_slice *slice,
struct cl_lock_closure *closure);
/**
- * Executed top-to-bottom when lock description changes (e.g., as a
+ * Executed bottom-to-top when lock description changes (e.g., as a
* result of server granting more generous lock than was requested).
*
* \see lovsub_lock_modify()
cfs_task_t *pl_owner;
};
-/** \addtogroup cl_page_list cl_page_list
+/**
* A 2-queue of pages. A convenience data-type for common use case, 2-queue
* contains an incoming page list and an outgoing page list.
*/
const struct cl_object_conf *conf);
void cl_object_prune (const struct lu_env *env, struct cl_object *obj);
void cl_object_kill (const struct lu_env *env, struct cl_object *obj);
+int cl_object_has_locks (struct cl_object *obj);
/**
* Returns true, iff \a o0 and \a o1 are slices of the same object.
struct cl_object *obj,
struct cl_io *io,
pgoff_t start, pgoff_t end,
- struct cl_page_list *plist);
+ struct cl_page_list *plist,
+ int nonblock);
struct cl_page *cl_page_find (const struct lu_env *env,
struct cl_object *obj,
pgoff_t idx, struct page *vmpage,
int cl_page_own (const struct lu_env *env,
struct cl_io *io, struct cl_page *page);
+int cl_page_own_try (const struct lu_env *env,
+ struct cl_io *io, struct cl_page *page);
void cl_page_assume (const struct lu_env *env,
struct cl_io *io, struct cl_page *page);
void cl_page_unassume (const struct lu_env *env,
* longer used environments instead of destroying them;
*
* - there is a notion of "current" environment, attached to the kernel
- * data structure representing current thread (current->journal_info in
- * Linux kernel). Top-level lustre code allocates an environment and makes
- * it current, then calls into non-lustre code, that in turn calls lustre
- * back. Low-level lustre code thus called can fetch environment created
- * by the top-level code and reuse it, avoiding additional environment
- * allocation.
+ * data structure representing current thread Top-level lustre code
+ * allocates an environment and makes it current, then calls into
+ * non-lustre code, that in turn calls lustre back. Low-level lustre
+ * code thus called can fetch environment created by the top-level code
+ * and reuse it, avoiding additional environment allocation.
+ * Right now, three interfaces can attach the cl_env to running thread:
+ * - cl_env_get
+ * - cl_env_implant
+ * - cl_env_reexit(cl_env_reenter had to be called priorly)
*
* \see lu_env, lu_context, lu_context_key
* @{ */