Whamcloud - gitweb
LU-1408 debug: initialize debug_msg_data if needed
[fs/lustre-release.git] / lustre / include / cl_object.h
index 03ee887..ea0597a 100644 (file)
@@ -98,7 +98,6 @@
  * super-class definitions.
  */
 #include <lu_object.h>
-#include <lvfs.h>
 #ifdef __KERNEL__
 #        include <linux/mutex.h>
 #        include <linux/radix-tree.h>
@@ -388,6 +387,12 @@ struct cl_object_operations {
          */
         int (*coo_glimpse)(const struct lu_env *env,
                            const struct cl_object *obj, struct ost_lvb *lvb);
+       /**
+        * Object prune method. Called when the layout is going to change on
+        * this object, therefore each layer has to clean up their cache,
+        * mainly pages and locks.
+        */
+       int (*coo_prune)(const struct lu_env *env, struct cl_object *obj);
 };
 
 /**
@@ -402,15 +407,9 @@ struct cl_object_header {
          * mostly useless otherwise.
          */
         /** @{ */
-        /** Lock protecting page tree. */
-       spinlock_t               coh_page_guard;
        /** Lock protecting lock list. */
        spinlock_t               coh_lock_guard;
         /** @} locks */
-        /** Radix tree of cl_page's, cached for this object. */
-        struct radix_tree_root   coh_tree;
-        /** # of pages in radix tree. */
-        unsigned long            coh_pages;
         /** List of cl_lock's granted for this object. */
         cfs_list_t               coh_locks;
 
@@ -760,7 +759,7 @@ struct cl_page {
         /**
          * Debug information, the task is owning the page.
          */
-        cfs_task_t              *cp_task;
+       struct task_struct      *cp_task;
         /**
          * Owning IO request in cl_page_state::CPS_PAGEOUT and
          * cl_page_state::CPS_PAGEIN states. This field is maintained only in
@@ -892,14 +891,6 @@ struct cl_page_operations {
         void  (*cpo_export)(const struct lu_env *env,
                             const struct cl_page_slice *slice, int uptodate);
         /**
-         * Unmaps page from the user space (if it is mapped).
-         *
-         * \see cl_page_unmap()
-         * \see vvp_page_unmap()
-         */
-        int (*cpo_unmap)(const struct lu_env *env,
-                         const struct cl_page_slice *slice, struct cl_io *io);
-        /**
          * Checks whether underlying VM page is locked (in the suitable
          * sense). Used for assertions.
          *
@@ -1022,26 +1013,6 @@ struct cl_page_operations {
                  */
                 int  (*cpo_make_ready)(const struct lu_env *env,
                                        const struct cl_page_slice *slice);
-                /**
-                 * Announce that this page is to be written out
-                 * opportunistically, that is, page is dirty, it is not
-                 * necessary to start write-out transfer right now, but
-                 * eventually page has to be written out.
-                 *
-                 * Main caller of this is the write path (see
-                 * vvp_io_commit_write()), using this method to build a
-                 * "transfer cache" from which large transfers are then
-                 * constructed by the req-formation engine.
-                 *
-                 * \todo XXX it would make sense to add page-age tracking
-                 * semantics here, and to oblige the req-formation engine to
-                 * send the page out not later than it is too old.
-                 *
-                 * \see cl_page_cache_add()
-                 */
-                int  (*cpo_cache_add)(const struct lu_env *env,
-                                      const struct cl_page_slice *slice,
-                                      struct cl_io *io);
         } io[CRT_NR];
         /**
          * Tell transfer engine that only [to, from] part of a page should be
@@ -1093,9 +1064,8 @@ struct cl_page_operations {
  */
 #define CL_PAGE_DEBUG(mask, env, page, format, ...)                     \
 do {                                                                    \
-        LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL);                \
-                                                                        \
         if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) {                   \
+                LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL);        \
                 cl_page_print(env, &msgdata, lu_cdebug_printer, page);  \
                 CDEBUG(mask, format , ## __VA_ARGS__);                  \
         }                                                               \
@@ -1106,9 +1076,8 @@ do {                                                                    \
  */
 #define CL_PAGE_HEADER(mask, env, page, format, ...)                          \
 do {                                                                          \
-        LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL);                      \
-                                                                              \
         if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) {                         \
+                LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL);              \
                 cl_page_header_print(env, &msgdata, lu_cdebug_printer, page); \
                 CDEBUG(mask, format , ## __VA_ARGS__);                        \
         }                                                                     \
@@ -1561,30 +1530,30 @@ struct cl_lock {
          */
         struct cl_lock_descr  cll_descr;
         /** Protected by cl_lock::cll_guard. */
-        enum cl_lock_state    cll_state;
-        /** signals state changes. */
-        cfs_waitq_t           cll_wq;
-        /**
-         * Recursive lock, most fields in cl_lock{} are protected by this.
-         *
-         * Locking rules: this mutex is never held across network
-         * communication, except when lock is being canceled.
-         *
-         * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
-         * on a top-lock. Other direction is implemented through a
-         * try-lock-repeat loop. Mutices of unrelated locks can be taken only
-         * by try-locking.
-         *
-         * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
-         */
+       enum cl_lock_state    cll_state;
+       /** signals state changes. */
+       wait_queue_head_t     cll_wq;
+       /**
+        * Recursive lock, most fields in cl_lock{} are protected by this.
+        *
+        * Locking rules: this mutex is never held across network
+        * communication, except when lock is being canceled.
+        *
+        * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
+        * on a top-lock. Other direction is implemented through a
+        * try-lock-repeat loop. Mutices of unrelated locks can be taken only
+        * by try-locking.
+        *
+        * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
+        */
        struct mutex            cll_guard;
-        cfs_task_t           *cll_guarder;
+       struct task_struct    *cll_guarder;
         int                   cll_depth;
 
         /**
          * the owner for INTRANSIT state
          */
-        cfs_task_t           *cll_intransit_owner;
+       struct task_struct    *cll_intransit_owner;
         int                   cll_error;
         /**
          * Number of holds on a lock. A hold prevents a lock from being
@@ -1831,9 +1800,8 @@ struct cl_lock_operations {
 
 #define CL_LOCK_DEBUG(mask, env, lock, format, ...)                     \
 do {                                                                    \
-        LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL);                \
-                                                                        \
         if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) {                   \
+                LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL);        \
                 cl_lock_print(env, &msgdata, lu_cdebug_printer, lock);  \
                 CDEBUG(mask, format , ## __VA_ARGS__);                  \
         }                                                               \
@@ -1869,9 +1837,9 @@ do {                                                                    \
  * @{
  */
 struct cl_page_list {
-        unsigned             pl_nr;
-        cfs_list_t           pl_pages;
-        cfs_task_t          *pl_owner;
+       unsigned             pl_nr;
+       cfs_list_t           pl_pages;
+       struct task_struct   *pl_owner;
 };
 
 /** 
@@ -2022,6 +1990,8 @@ struct cl_io_slice {
         cfs_list_t                     cis_linkage;
 };
 
+typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
+                               struct cl_page *);
 
 /**
  * Per-layer io operations.
@@ -2106,20 +2076,28 @@ struct cl_io_operations {
                 void (*cio_fini) (const struct lu_env *env,
                                   const struct cl_io_slice *slice);
         } op[CIT_OP_NR];
-        struct {
-                /**
-                 * Submit pages from \a queue->c2_qin for IO, and move
-                 * successfully submitted pages into \a queue->c2_qout. Return
-                 * non-zero if failed to submit even the single page. If
-                 * submission failed after some pages were moved into \a
-                 * queue->c2_qout, completion callback with non-zero ioret is
-                 * executed on them.
-                 */
-                int  (*cio_submit)(const struct lu_env *env,
-                                   const struct cl_io_slice *slice,
-                                   enum cl_req_type crt,
-                                  struct cl_2queue *queue);
-        } req_op[CRT_NR];
+
+       /**
+        * Submit pages from \a queue->c2_qin for IO, and move
+        * successfully submitted pages into \a queue->c2_qout. Return
+        * non-zero if failed to submit even the single page. If
+        * submission failed after some pages were moved into \a
+        * queue->c2_qout, completion callback with non-zero ioret is
+        * executed on them.
+        */
+       int  (*cio_submit)(const struct lu_env *env,
+                       const struct cl_io_slice *slice,
+                       enum cl_req_type crt,
+                       struct cl_2queue *queue);
+       /**
+        * Queue async page for write.
+        * The difference between cio_submit and cio_queue is that
+        * cio_submit is for urgent request.
+        */
+       int  (*cio_commit_async)(const struct lu_env *env,
+                       const struct cl_io_slice *slice,
+                       struct cl_page_list *queue, int from, int to,
+                       cl_commit_cbt cb);
         /**
          * Read missing page.
          *
@@ -2132,31 +2110,6 @@ struct cl_io_operations {
                              const struct cl_io_slice *slice,
                              const struct cl_page_slice *page);
         /**
-         * Prepare write of a \a page. Called bottom-to-top by a top-level
-         * cl_io_operations::op[CIT_WRITE]::cio_start() to prepare page for
-         * get data from user-level buffer.
-         *
-         * \pre io->ci_type == CIT_WRITE
-         *
-         * \see vvp_io_prepare_write(), lov_io_prepare_write(),
-         * osc_io_prepare_write().
-         */
-        int (*cio_prepare_write)(const struct lu_env *env,
-                                 const struct cl_io_slice *slice,
-                                 const struct cl_page_slice *page,
-                                 unsigned from, unsigned to);
-        /**
-         *
-         * \pre io->ci_type == CIT_WRITE
-         *
-         * \see vvp_io_commit_write(), lov_io_commit_write(),
-         * osc_io_commit_write().
-         */
-        int (*cio_commit_write)(const struct lu_env *env,
-                                const struct cl_io_slice *slice,
-                                const struct cl_page_slice *page,
-                                unsigned from, unsigned to);
-        /**
          * Optional debugging helper. Print given io slice.
          */
         int (*cio_print)(const struct lu_env *env, void *cookie,
@@ -2778,6 +2731,15 @@ static inline void *cl_object_page_slice(struct cl_object *clob,
        return (void *)((char *)page + clob->co_slice_off);
 }
 
+/**
+ * Return refcount of cl_object.
+ */
+static inline int cl_object_refc(struct cl_object *clob)
+{
+       struct lu_object_header *header = clob->co_lu.lo_header;
+       return cfs_atomic_read(&header->loh_ref);
+}
+
 /** @} cl_object */
 
 /** \defgroup cl_page cl_page
@@ -2788,25 +2750,16 @@ enum {
         CLP_GANG_AGAIN,
         CLP_GANG_ABORT
 };
-
 /* callback of cl_page_gang_lookup() */
-typedef int   (*cl_page_gang_cb_t)  (const struct lu_env *, struct cl_io *,
-                                     struct cl_page *, void *);
-int             cl_page_gang_lookup (const struct lu_env *env,
-                                     struct cl_object *obj,
-                                     struct cl_io *io,
-                                     pgoff_t start, pgoff_t end,
-                                     cl_page_gang_cb_t cb, void *cbdata);
-struct cl_page *cl_page_lookup      (struct cl_object_header *hdr,
-                                     pgoff_t index);
+
 struct cl_page *cl_page_find        (const struct lu_env *env,
                                      struct cl_object *obj,
                                      pgoff_t idx, struct page *vmpage,
                                      enum cl_page_type type);
-struct cl_page *cl_page_find_sub    (const struct lu_env *env,
-                                     struct cl_object *obj,
-                                     pgoff_t idx, struct page *vmpage,
-                                     struct cl_page *parent);
+struct cl_page *cl_page_alloc       (const struct lu_env *env,
+                                    struct cl_object *o, pgoff_t ind,
+                                    struct page *vmpage,
+                                    enum cl_page_type type);
 void            cl_page_get         (struct cl_page *page);
 void            cl_page_put         (const struct lu_env *env,
                                      struct cl_page *page);
@@ -2877,8 +2830,6 @@ int  cl_page_flush      (const struct lu_env *env, struct cl_io *io,
 void    cl_page_discard      (const struct lu_env *env, struct cl_io *io,
                               struct cl_page *pg);
 void    cl_page_delete       (const struct lu_env *env, struct cl_page *pg);
-int     cl_page_unmap        (const struct lu_env *env, struct cl_io *io,
-                              struct cl_page *pg);
 int     cl_page_is_vmlocked  (const struct lu_env *env,
                               const struct cl_page *pg);
 void    cl_page_export       (const struct lu_env *env,
@@ -3061,15 +3012,14 @@ int   cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
                            struct cl_lock_descr *descr);
 int   cl_io_read_page    (const struct lu_env *env, struct cl_io *io,
                           struct cl_page *page);
-int   cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
-                          struct cl_page *page, unsigned from, unsigned to);
-int   cl_io_commit_write (const struct lu_env *env, struct cl_io *io,
-                          struct cl_page *page, unsigned from, unsigned to);
 int   cl_io_submit_rw    (const struct lu_env *env, struct cl_io *io,
                          enum cl_req_type iot, struct cl_2queue *queue);
 int   cl_io_submit_sync  (const struct lu_env *env, struct cl_io *io,
                          enum cl_req_type iot, struct cl_2queue *queue,
                          long timeout);
+int   cl_io_commit_async (const struct lu_env *env, struct cl_io *io,
+                         struct cl_page_list *queue, int from, int to,
+                         cl_commit_cbt cb);
 void  cl_io_rw_advance   (const struct lu_env *env, struct cl_io *io,
                           size_t nob);
 int   cl_io_cancel       (const struct lu_env *env, struct cl_io *io,
@@ -3131,6 +3081,12 @@ static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist)
         return cfs_list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
 }
 
+static inline struct cl_page *cl_page_list_first(struct cl_page_list *plist)
+{
+       LASSERT(plist->pl_nr > 0);
+       return cfs_list_entry(plist->pl_pages.next, struct cl_page, cp_batch);
+}
+
 /**
  * Iterate over pages in a page list.
  */
@@ -3147,6 +3103,8 @@ void cl_page_list_init   (struct cl_page_list *plist);
 void cl_page_list_add    (struct cl_page_list *plist, struct cl_page *page);
 void cl_page_list_move   (struct cl_page_list *dst, struct cl_page_list *src,
                           struct cl_page *page);
+void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
+                         struct cl_page *page);
 void cl_page_list_splice (struct cl_page_list *list,
                           struct cl_page_list *head);
 void cl_page_list_del    (const struct lu_env *env,
@@ -3159,8 +3117,6 @@ void cl_page_list_assume (const struct lu_env *env,
                           struct cl_io *io, struct cl_page_list *plist);
 void cl_page_list_discard(const struct lu_env *env,
                           struct cl_io *io, struct cl_page_list *plist);
-int  cl_page_list_unmap  (const struct lu_env *env,
-                          struct cl_io *io, struct cl_page_list *plist);
 void cl_page_list_fini   (const struct lu_env *env, struct cl_page_list *plist);
 
 void cl_2queue_init     (struct cl_2queue *queue);
@@ -3206,7 +3162,7 @@ struct cl_sync_io {
        /** barrier of destroy this structure */
        cfs_atomic_t            csi_barrier;
        /** completion to be signaled when transfer is complete. */
-       cfs_waitq_t             csi_waitq;
+       wait_queue_head_t       csi_waitq;
 };
 
 void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages);
@@ -3271,6 +3227,9 @@ void          *cl_env_reenter    (void);
 void           cl_env_reexit     (void *cookie);
 void           cl_env_implant    (struct lu_env *env, int *refcheck);
 void           cl_env_unplant    (struct lu_env *env, int *refcheck);
+unsigned       cl_env_cache_purge(unsigned nr);
+struct lu_env *cl_env_percpu_get (void);
+void           cl_env_percpu_put (struct lu_env *env);
 
 /** @} cl_env */