Whamcloud - gitweb
LU-5331 obdclass: serialize lu_site purge
[fs/lustre-release.git] / lustre / obdclass / cl_page.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Client Lustre Page.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  *   Author: Jinshan Xiong <jinshan.xiong@intel.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_CLASS
43
44 #include <libcfs/libcfs.h>
45 #include <obd_class.h>
46 #include <obd_support.h>
47 #include <libcfs/list.h>
48
49 #include <cl_object.h>
50 #include "cl_internal.h"
51
52 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
53
54 #ifdef LIBCFS_DEBUG
55 # define PASSERT(env, page, expr)                                       \
56   do {                                                                    \
57           if (unlikely(!(expr))) {                                      \
58                   CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");    \
59                   LASSERT(0);                                           \
60           }                                                             \
61   } while (0)
62 #else /* !LIBCFS_DEBUG */
63 # define PASSERT(env, page, exp) \
64         ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
65 #endif /* !LIBCFS_DEBUG */
66
67 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
68 # define PINVRNT(env, page, expr)                                       \
69   do {                                                                    \
70           if (unlikely(!(expr))) {                                      \
71                   CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");    \
72                   LINVRNT(0);                                           \
73           }                                                             \
74   } while (0)
75 #else /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
76 # define PINVRNT(env, page, exp) \
77          ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
78 #endif /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
79
80 /* Disable page statistic by default due to huge performance penalty. */
81 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
82 #define CS_PAGE_INC(o, item) \
83         atomic_inc(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
84 #define CS_PAGE_DEC(o, item) \
85         atomic_dec(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
86 #define CS_PAGESTATE_INC(o, state) \
87         atomic_inc(&cl_object_site(o)->cs_pages_state[state])
88 #define CS_PAGESTATE_DEC(o, state) \
89         atomic_dec(&cl_object_site(o)->cs_pages_state[state])
90 #else
91 #define CS_PAGE_INC(o, item)
92 #define CS_PAGE_DEC(o, item)
93 #define CS_PAGESTATE_INC(o, state)
94 #define CS_PAGESTATE_DEC(o, state)
95 #endif
96
97 /**
98  * Internal version of cl_page_get().
99  *
100  * This function can be used to obtain initial reference to previously
101  * unreferenced cached object. It can be called only if concurrent page
102  * reclamation is somehow prevented, e.g., by keeping a lock on a VM page,
103  * associated with \a page.
104  *
105  * Use with care! Not exported.
106  */
107 static void cl_page_get_trust(struct cl_page *page)
108 {
109         LASSERT(atomic_read(&page->cp_ref) > 0);
110         atomic_inc(&page->cp_ref);
111 }
112
113 /**
114  * Returns a slice within a page, corresponding to the given layer in the
115  * device stack.
116  *
117  * \see cl_lock_at()
118  */
119 static const struct cl_page_slice *
120 cl_page_at_trusted(const struct cl_page *page,
121                    const struct lu_device_type *dtype)
122 {
123         const struct cl_page_slice *slice;
124         ENTRY;
125
126         cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
127                 if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
128                         RETURN(slice);
129         }
130         RETURN(NULL);
131 }
132
133 static void cl_page_free(const struct lu_env *env, struct cl_page *page)
134 {
135         struct cl_object *obj  = page->cp_obj;
136         int pagesize = cl_object_header(obj)->coh_page_bufsize;
137
138         PASSERT(env, page, cfs_list_empty(&page->cp_batch));
139         PASSERT(env, page, page->cp_owner == NULL);
140         PASSERT(env, page, page->cp_req == NULL);
141         PASSERT(env, page, page->cp_state == CPS_FREEING);
142
143         ENTRY;
144         while (!cfs_list_empty(&page->cp_layers)) {
145                 struct cl_page_slice *slice;
146
147                 slice = cfs_list_entry(page->cp_layers.next,
148                                        struct cl_page_slice, cpl_linkage);
149                 cfs_list_del_init(page->cp_layers.next);
150                 if (unlikely(slice->cpl_ops->cpo_fini != NULL))
151                         slice->cpl_ops->cpo_fini(env, slice);
152         }
153         CS_PAGE_DEC(obj, total);
154         CS_PAGESTATE_DEC(obj, page->cp_state);
155         lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page);
156         cl_object_put(env, obj);
157         lu_ref_fini(&page->cp_reference);
158         OBD_FREE(page, pagesize);
159         EXIT;
160 }
161
162 /**
163  * Helper function updating page state. This is the only place in the code
164  * where cl_page::cp_state field is mutated.
165  */
166 static inline void cl_page_state_set_trust(struct cl_page *page,
167                                            enum cl_page_state state)
168 {
169         /* bypass const. */
170         *(enum cl_page_state *)&page->cp_state = state;
171 }
172
173 struct cl_page *cl_page_alloc(const struct lu_env *env,
174                 struct cl_object *o, pgoff_t ind, struct page *vmpage,
175                 enum cl_page_type type)
176 {
177         struct cl_page          *page;
178         struct lu_object_header *head;
179
180         ENTRY;
181         OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize,
182                         GFP_NOFS);
183         if (page != NULL) {
184                 int result = 0;
185                 atomic_set(&page->cp_ref, 1);
186                 page->cp_obj = o;
187                 cl_object_get(o);
188                 lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page",
189                                      page);
190                 page->cp_vmpage = vmpage;
191                 cl_page_state_set_trust(page, CPS_CACHED);
192                 page->cp_type = type;
193                 CFS_INIT_LIST_HEAD(&page->cp_layers);
194                 CFS_INIT_LIST_HEAD(&page->cp_batch);
195                 CFS_INIT_LIST_HEAD(&page->cp_flight);
196                 lu_ref_init(&page->cp_reference);
197                 head = o->co_lu.lo_header;
198                 cfs_list_for_each_entry(o, &head->loh_layers,
199                                         co_lu.lo_linkage) {
200                         if (o->co_ops->coo_page_init != NULL) {
201                                 result = o->co_ops->coo_page_init(env, o, page,
202                                                                   ind);
203                                 if (result != 0) {
204                                         cl_page_delete0(env, page);
205                                         cl_page_free(env, page);
206                                         page = ERR_PTR(result);
207                                         break;
208                                 }
209                         }
210                 }
211                 if (result == 0) {
212                         CS_PAGE_INC(o, total);
213                         CS_PAGE_INC(o, create);
214                         CS_PAGESTATE_DEC(o, CPS_CACHED);
215                 }
216         } else {
217                 page = ERR_PTR(-ENOMEM);
218         }
219         RETURN(page);
220 }
221 EXPORT_SYMBOL(cl_page_alloc);
222
223 /**
224  * Returns a cl_page with index \a idx at the object \a o, and associated with
225  * the VM page \a vmpage.
226  *
227  * This is the main entry point into the cl_page caching interface. First, a
228  * cache (implemented as a per-object radix tree) is consulted. If page is
229  * found there, it is returned immediately. Otherwise new page is allocated
230  * and returned. In any case, additional reference to page is acquired.
231  *
232  * \see cl_object_find(), cl_lock_find()
233  */
234 struct cl_page *cl_page_find(const struct lu_env *env,
235                              struct cl_object *o,
236                              pgoff_t idx, struct page *vmpage,
237                              enum cl_page_type type)
238 {
239         struct cl_page          *page = NULL;
240         struct cl_object_header *hdr;
241
242         LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
243         might_sleep();
244
245         ENTRY;
246
247         hdr = cl_object_header(o);
248         CS_PAGE_INC(o, lookup);
249
250         CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
251                idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
252         /* fast path. */
253         if (type == CPT_CACHEABLE) {
254                 /* vmpage lock is used to protect the child/parent
255                  * relationship */
256                 KLASSERT(PageLocked(vmpage));
257                 /*
258                  * cl_vmpage_page() can be called here without any locks as
259                  *
260                  *     - "vmpage" is locked (which prevents ->private from
261                  *       concurrent updates), and
262                  *
263                  *     - "o" cannot be destroyed while current thread holds a
264                  *       reference on it.
265                  */
266                 page = cl_vmpage_page(vmpage, o);
267                 if (page != NULL) {
268                         CS_PAGE_INC(o, hit);
269                         RETURN(page);
270                 }
271         }
272
273         /* allocate and initialize cl_page */
274         page = cl_page_alloc(env, o, idx, vmpage, type);
275         RETURN(page);
276 }
277 EXPORT_SYMBOL(cl_page_find);
278
279 static inline int cl_page_invariant(const struct cl_page *pg)
280 {
281         /*
282          * Page invariant is protected by a VM lock.
283          */
284         LINVRNT(cl_page_is_vmlocked(NULL, pg));
285
286         return cl_page_in_use_noref(pg);
287 }
288
289 static void cl_page_state_set0(const struct lu_env *env,
290                                struct cl_page *page, enum cl_page_state state)
291 {
292         enum cl_page_state old;
293
294         /*
295          * Matrix of allowed state transitions [old][new], for sanity
296          * checking.
297          */
298         static const int allowed_transitions[CPS_NR][CPS_NR] = {
299                 [CPS_CACHED] = {
300                         [CPS_CACHED]  = 0,
301                         [CPS_OWNED]   = 1, /* io finds existing cached page */
302                         [CPS_PAGEIN]  = 0,
303                         [CPS_PAGEOUT] = 1, /* write-out from the cache */
304                         [CPS_FREEING] = 1, /* eviction on the memory pressure */
305                 },
306                 [CPS_OWNED] = {
307                         [CPS_CACHED]  = 1, /* release to the cache */
308                         [CPS_OWNED]   = 0,
309                         [CPS_PAGEIN]  = 1, /* start read immediately */
310                         [CPS_PAGEOUT] = 1, /* start write immediately */
311                         [CPS_FREEING] = 1, /* lock invalidation or truncate */
312                 },
313                 [CPS_PAGEIN] = {
314                         [CPS_CACHED]  = 1, /* io completion */
315                         [CPS_OWNED]   = 0,
316                         [CPS_PAGEIN]  = 0,
317                         [CPS_PAGEOUT] = 0,
318                         [CPS_FREEING] = 0,
319                 },
320                 [CPS_PAGEOUT] = {
321                         [CPS_CACHED]  = 1, /* io completion */
322                         [CPS_OWNED]   = 0,
323                         [CPS_PAGEIN]  = 0,
324                         [CPS_PAGEOUT] = 0,
325                         [CPS_FREEING] = 0,
326                 },
327                 [CPS_FREEING] = {
328                         [CPS_CACHED]  = 0,
329                         [CPS_OWNED]   = 0,
330                         [CPS_PAGEIN]  = 0,
331                         [CPS_PAGEOUT] = 0,
332                         [CPS_FREEING] = 0,
333                 }
334         };
335
336         ENTRY;
337         old = page->cp_state;
338         PASSERT(env, page, allowed_transitions[old][state]);
339         CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
340         PASSERT(env, page, page->cp_state == old);
341         PASSERT(env, page, equi(state == CPS_OWNED, page->cp_owner != NULL));
342
343         CS_PAGESTATE_DEC(page->cp_obj, page->cp_state);
344         CS_PAGESTATE_INC(page->cp_obj, state);
345         cl_page_state_set_trust(page, state);
346         EXIT;
347 }
348
349 static void cl_page_state_set(const struct lu_env *env,
350                               struct cl_page *page, enum cl_page_state state)
351 {
352         cl_page_state_set0(env, page, state);
353 }
354
355 /**
356  * Acquires an additional reference to a page.
357  *
358  * This can be called only by caller already possessing a reference to \a
359  * page.
360  *
361  * \see cl_object_get(), cl_lock_get().
362  */
363 void cl_page_get(struct cl_page *page)
364 {
365         ENTRY;
366         cl_page_get_trust(page);
367         EXIT;
368 }
369 EXPORT_SYMBOL(cl_page_get);
370
371 /**
372  * Releases a reference to a page.
373  *
374  * When last reference is released, page is returned to the cache, unless it
375  * is in cl_page_state::CPS_FREEING state, in which case it is immediately
376  * destroyed.
377  *
378  * \see cl_object_put(), cl_lock_put().
379  */
380 void cl_page_put(const struct lu_env *env, struct cl_page *page)
381 {
382         ENTRY;
383         CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
384                        atomic_read(&page->cp_ref));
385
386         if (atomic_dec_and_test(&page->cp_ref)) {
387                 LASSERT(page->cp_state == CPS_FREEING);
388
389                 LASSERT(atomic_read(&page->cp_ref) == 0);
390                 PASSERT(env, page, page->cp_owner == NULL);
391                 PASSERT(env, page, cfs_list_empty(&page->cp_batch));
392                 /*
393                  * Page is no longer reachable by other threads. Tear
394                  * it down.
395                  */
396                 cl_page_free(env, page);
397         }
398
399         EXIT;
400 }
401 EXPORT_SYMBOL(cl_page_put);
402
403 /**
404  * Returns a cl_page associated with a VM page, and given cl_object.
405  */
406 struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
407 {
408         struct cl_page *page;
409
410         ENTRY;
411         KLASSERT(PageLocked(vmpage));
412
413         /*
414          * NOTE: absence of races and liveness of data are guaranteed by page
415          *       lock on a "vmpage". That works because object destruction has
416          *       bottom-to-top pass.
417          */
418
419         page = (struct cl_page *)vmpage->private;
420         if (page != NULL) {
421                 cl_page_get_trust(page);
422                 LASSERT(page->cp_type == CPT_CACHEABLE);
423         }
424         RETURN(page);
425 }
426 EXPORT_SYMBOL(cl_vmpage_page);
427
428 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
429                                        const struct lu_device_type *dtype)
430 {
431         return cl_page_at_trusted(page, dtype);
432 }
433 EXPORT_SYMBOL(cl_page_at);
434
435 #define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
436
437 #define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...)                   \
438 ({                                                                      \
439         const struct lu_env        *__env  = (_env);                    \
440         struct cl_page             *__page = (_page);                   \
441         const struct cl_page_slice *__scan;                             \
442         int                         __result;                           \
443         ptrdiff_t                   __op   = (_op);                     \
444         int                       (*__method)_proto;                    \
445                                                                         \
446         __result = 0;                                                   \
447         cfs_list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) {     \
448                 __method = *(void **)((char *)__scan->cpl_ops +  __op);        \
449                 if (__method != NULL) {                                        \
450                         __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
451                         if (__result != 0)                              \
452                                 break;                                  \
453                 }                                                       \
454         }                                                               \
455         if (__result > 0)                                               \
456                 __result = 0;                                           \
457         __result;                                                       \
458 })
459
460 #define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...)           \
461 ({                                                                      \
462         const struct lu_env        *__env  = (_env);                    \
463         struct cl_page             *__page = (_page);                   \
464         const struct cl_page_slice *__scan;                             \
465         int                         __result;                           \
466         ptrdiff_t                   __op   = (_op);                     \
467         int                       (*__method)_proto;                    \
468                                                                         \
469         __result = 0;                                                   \
470         list_for_each_entry_reverse(__scan, &__page->cp_layers,         \
471                                         cpl_linkage) {                  \
472                 __method = *(void **)((char *)__scan->cpl_ops +  __op); \
473                 if (__method != NULL) {                                 \
474                         __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
475                         if (__result != 0)                              \
476                                 break;                                  \
477                 }                                                       \
478         }                                                               \
479         if (__result > 0)                                               \
480                 __result = 0;                                           \
481         __result;                                                       \
482 })
483
484 #define CL_PAGE_INVOID(_env, _page, _op, _proto, ...)                   \
485 do {                                                                    \
486         const struct lu_env        *__env  = (_env);                    \
487         struct cl_page             *__page = (_page);                   \
488         const struct cl_page_slice *__scan;                             \
489         ptrdiff_t                   __op   = (_op);                     \
490         void                      (*__method)_proto;                    \
491                                                                         \
492         cfs_list_for_each_entry(__scan, &__page->cp_layers,             \
493                                 cpl_linkage) {                          \
494                 __method = *(void **)((char *)__scan->cpl_ops +  __op); \
495                 if (__method != NULL)                                   \
496                         (*__method)(__env, __scan, ## __VA_ARGS__);     \
497         }                                                               \
498 } while (0)
499
500 #define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...)           \
501 do {                                                                    \
502         const struct lu_env        *__env  = (_env);                    \
503         struct cl_page             *__page = (_page);                   \
504         const struct cl_page_slice *__scan;                             \
505         ptrdiff_t                   __op   = (_op);                     \
506         void                      (*__method)_proto;                    \
507                                                                         \
508         /* get to the bottom page. */                                   \
509         cfs_list_for_each_entry_reverse(__scan, &__page->cp_layers,     \
510                                         cpl_linkage) {                  \
511                 __method = *(void **)((char *)__scan->cpl_ops + __op);  \
512                 if (__method != NULL)                                   \
513                         (*__method)(__env, __scan, ## __VA_ARGS__);     \
514         }                                                               \
515 } while (0)
516
517 static int cl_page_invoke(const struct lu_env *env,
518                           struct cl_io *io, struct cl_page *page, ptrdiff_t op)
519
520 {
521         PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
522         ENTRY;
523         RETURN(CL_PAGE_INVOKE(env, page, op,
524                               (const struct lu_env *,
525                                const struct cl_page_slice *, struct cl_io *),
526                               io));
527 }
528
529 static void cl_page_invoid(const struct lu_env *env,
530                            struct cl_io *io, struct cl_page *page, ptrdiff_t op)
531
532 {
533         PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
534         ENTRY;
535         CL_PAGE_INVOID(env, page, op,
536                        (const struct lu_env *,
537                         const struct cl_page_slice *, struct cl_io *), io);
538         EXIT;
539 }
540
541 static void cl_page_owner_clear(struct cl_page *page)
542 {
543         ENTRY;
544         if (page->cp_owner != NULL) {
545                 LASSERT(page->cp_owner->ci_owned_nr > 0);
546                 page->cp_owner->ci_owned_nr--;
547                 page->cp_owner = NULL;
548         }
549         EXIT;
550 }
551
552 static void cl_page_owner_set(struct cl_page *page)
553 {
554         ENTRY;
555         LASSERT(page->cp_owner != NULL);
556         page->cp_owner->ci_owned_nr++;
557         EXIT;
558 }
559
560 void cl_page_disown0(const struct lu_env *env,
561                      struct cl_io *io, struct cl_page *pg)
562 {
563         enum cl_page_state state;
564
565         ENTRY;
566         state = pg->cp_state;
567         PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
568         PINVRNT(env, pg, cl_page_invariant(pg) || state == CPS_FREEING);
569         cl_page_owner_clear(pg);
570
571         if (state == CPS_OWNED)
572                 cl_page_state_set(env, pg, CPS_CACHED);
573         /*
574          * Completion call-backs are executed in the bottom-up order, so that
575          * uppermost layer (llite), responsible for VFS/VM interaction runs
576          * last and can release locks safely.
577          */
578         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_disown),
579                                (const struct lu_env *,
580                                 const struct cl_page_slice *, struct cl_io *),
581                                io);
582         EXIT;
583 }
584
585 /**
586  * returns true, iff page is owned by the given io.
587  */
588 int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
589 {
590         struct cl_io *top = cl_io_top((struct cl_io *)io);
591         LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
592         ENTRY;
593         RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == top);
594 }
595 EXPORT_SYMBOL(cl_page_is_owned);
596
597 /**
598  * Try to own a page by IO.
599  *
600  * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
601  * into cl_page_state::CPS_OWNED state.
602  *
603  * \pre  !cl_page_is_owned(pg, io)
604  * \post result == 0 iff cl_page_is_owned(pg, io)
605  *
606  * \retval 0   success
607  *
608  * \retval -ve failure, e.g., page was destroyed (and landed in
609  *             cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
610  *             or, page was owned by another thread, or in IO.
611  *
612  * \see cl_page_disown()
613  * \see cl_page_operations::cpo_own()
614  * \see cl_page_own_try()
615  * \see cl_page_own
616  */
617 static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
618                         struct cl_page *pg, int nonblock)
619 {
620         int result;
621
622         PINVRNT(env, pg, !cl_page_is_owned(pg, io));
623
624         ENTRY;
625         io = cl_io_top(io);
626
627         if (pg->cp_state == CPS_FREEING) {
628                 result = -ENOENT;
629         } else {
630                 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own),
631                                         (const struct lu_env *,
632                                          const struct cl_page_slice *,
633                                          struct cl_io *, int),
634                                         io, nonblock);
635                 if (result == 0) {
636                         PASSERT(env, pg, pg->cp_owner == NULL);
637                         PASSERT(env, pg, pg->cp_req == NULL);
638                         pg->cp_owner = cl_io_top(io);;
639                         cl_page_owner_set(pg);
640                         if (pg->cp_state != CPS_FREEING) {
641                                 cl_page_state_set(env, pg, CPS_OWNED);
642                         } else {
643                                 cl_page_disown0(env, io, pg);
644                                 result = -ENOENT;
645                         }
646                 }
647         }
648         PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
649         RETURN(result);
650 }
651
652 /**
653  * Own a page, might be blocked.
654  *
655  * \see cl_page_own0()
656  */
657 int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
658 {
659         return cl_page_own0(env, io, pg, 0);
660 }
661 EXPORT_SYMBOL(cl_page_own);
662
663 /**
664  * Nonblock version of cl_page_own().
665  *
666  * \see cl_page_own0()
667  */
668 int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
669                     struct cl_page *pg)
670 {
671         return cl_page_own0(env, io, pg, 1);
672 }
673 EXPORT_SYMBOL(cl_page_own_try);
674
675
676 /**
677  * Assume page ownership.
678  *
679  * Called when page is already locked by the hosting VM.
680  *
681  * \pre !cl_page_is_owned(pg, io)
682  * \post cl_page_is_owned(pg, io)
683  *
684  * \see cl_page_operations::cpo_assume()
685  */
686 void cl_page_assume(const struct lu_env *env,
687                     struct cl_io *io, struct cl_page *pg)
688 {
689         PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
690
691         ENTRY;
692         io = cl_io_top(io);
693
694         cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
695         PASSERT(env, pg, pg->cp_owner == NULL);
696         pg->cp_owner = cl_io_top(io);
697         cl_page_owner_set(pg);
698         cl_page_state_set(env, pg, CPS_OWNED);
699         EXIT;
700 }
701 EXPORT_SYMBOL(cl_page_assume);
702
703 /**
704  * Releases page ownership without unlocking the page.
705  *
706  * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the
707  * underlying VM page (as VM is supposed to do this itself).
708  *
709  * \pre   cl_page_is_owned(pg, io)
710  * \post !cl_page_is_owned(pg, io)
711  *
712  * \see cl_page_assume()
713  */
714 void cl_page_unassume(const struct lu_env *env,
715                       struct cl_io *io, struct cl_page *pg)
716 {
717         PINVRNT(env, pg, cl_page_is_owned(pg, io));
718         PINVRNT(env, pg, cl_page_invariant(pg));
719
720         ENTRY;
721         io = cl_io_top(io);
722         cl_page_owner_clear(pg);
723         cl_page_state_set(env, pg, CPS_CACHED);
724         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_unassume),
725                                (const struct lu_env *,
726                                 const struct cl_page_slice *, struct cl_io *),
727                                io);
728         EXIT;
729 }
730 EXPORT_SYMBOL(cl_page_unassume);
731
732 /**
733  * Releases page ownership.
734  *
735  * Moves page into cl_page_state::CPS_CACHED.
736  *
737  * \pre   cl_page_is_owned(pg, io)
738  * \post !cl_page_is_owned(pg, io)
739  *
740  * \see cl_page_own()
741  * \see cl_page_operations::cpo_disown()
742  */
743 void cl_page_disown(const struct lu_env *env,
744                     struct cl_io *io, struct cl_page *pg)
745 {
746         PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
747                 pg->cp_state == CPS_FREEING);
748
749         ENTRY;
750         io = cl_io_top(io);
751         cl_page_disown0(env, io, pg);
752         EXIT;
753 }
754 EXPORT_SYMBOL(cl_page_disown);
755
756 /**
757  * Called when page is to be removed from the object, e.g., as a result of
758  * truncate.
759  *
760  * Calls cl_page_operations::cpo_discard() top-to-bottom.
761  *
762  * \pre cl_page_is_owned(pg, io)
763  *
764  * \see cl_page_operations::cpo_discard()
765  */
766 void cl_page_discard(const struct lu_env *env,
767                      struct cl_io *io, struct cl_page *pg)
768 {
769         PINVRNT(env, pg, cl_page_is_owned(pg, io));
770         PINVRNT(env, pg, cl_page_invariant(pg));
771
772         cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
773 }
774 EXPORT_SYMBOL(cl_page_discard);
775
776 /**
777  * Version of cl_page_delete() that can be called for not fully constructed
778  * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
779  * path. Doesn't check page invariant.
780  */
781 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
782 {
783         ENTRY;
784
785         PASSERT(env, pg, pg->cp_state != CPS_FREEING);
786
787         /*
788          * Severe all ways to obtain new pointers to @pg.
789          */
790         cl_page_owner_clear(pg);
791
792         cl_page_state_set0(env, pg, CPS_FREEING);
793
794         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_delete),
795                        (const struct lu_env *, const struct cl_page_slice *));
796
797         EXIT;
798 }
799
800 /**
801  * Called when a decision is made to throw page out of memory.
802  *
803  * Notifies all layers about page destruction by calling
804  * cl_page_operations::cpo_delete() method top-to-bottom.
805  *
806  * Moves page into cl_page_state::CPS_FREEING state (this is the only place
807  * where transition to this state happens).
808  *
809  * Eliminates all venues through which new references to the page can be
810  * obtained:
811  *
812  *     - removes page from the radix trees,
813  *
814  *     - breaks linkage from VM page to cl_page.
815  *
816  * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
817  * drain after some time, at which point page will be recycled.
818  *
819  * \pre  VM page is locked
820  * \post pg->cp_state == CPS_FREEING
821  *
822  * \see cl_page_operations::cpo_delete()
823  */
824 void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
825 {
826         PINVRNT(env, pg, cl_page_invariant(pg));
827         ENTRY;
828         cl_page_delete0(env, pg);
829         EXIT;
830 }
831 EXPORT_SYMBOL(cl_page_delete);
832
833 /**
834  * Marks page up-to-date.
835  *
836  * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
837  * layer responsible for VM interaction has to mark/clear page as up-to-date
838  * by the \a uptodate argument.
839  *
840  * \see cl_page_operations::cpo_export()
841  */
842 void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
843 {
844         PINVRNT(env, pg, cl_page_invariant(pg));
845         CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_export),
846                        (const struct lu_env *,
847                         const struct cl_page_slice *, int), uptodate);
848 }
849 EXPORT_SYMBOL(cl_page_export);
850
851 /**
852  * Returns true, iff \a pg is VM locked in a suitable sense by the calling
853  * thread.
854  */
855 int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
856 {
857         int result;
858         const struct cl_page_slice *slice;
859
860         ENTRY;
861         slice = container_of(pg->cp_layers.next,
862                              const struct cl_page_slice, cpl_linkage);
863         PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL);
864         /*
865          * Call ->cpo_is_vmlocked() directly instead of going through
866          * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
867          * cl_page_invariant().
868          */
869         result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
870         PASSERT(env, pg, result == -EBUSY || result == -ENODATA);
871         RETURN(result == -EBUSY);
872 }
873 EXPORT_SYMBOL(cl_page_is_vmlocked);
874
875 static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
876 {
877         ENTRY;
878         RETURN(crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN);
879 }
880
881 static void cl_page_io_start(const struct lu_env *env,
882                              struct cl_page *pg, enum cl_req_type crt)
883 {
884         /*
885          * Page is queued for IO, change its state.
886          */
887         ENTRY;
888         cl_page_owner_clear(pg);
889         cl_page_state_set(env, pg, cl_req_type_state(crt));
890         EXIT;
891 }
892
893 /**
894  * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
895  * called top-to-bottom. Every layer either agrees to submit this page (by
896  * returning 0), or requests to omit this page (by returning -EALREADY). Layer
897  * handling interactions with the VM also has to inform VM that page is under
898  * transfer now.
899  */
900 int cl_page_prep(const struct lu_env *env, struct cl_io *io,
901                  struct cl_page *pg, enum cl_req_type crt)
902 {
903         int result;
904
905         PINVRNT(env, pg, cl_page_is_owned(pg, io));
906         PINVRNT(env, pg, cl_page_invariant(pg));
907         PINVRNT(env, pg, crt < CRT_NR);
908
909         /*
910          * XXX this has to be called bottom-to-top, so that llite can set up
911          * PG_writeback without risking other layers deciding to skip this
912          * page.
913          */
914         if (crt >= CRT_NR)
915                 return -EINVAL;
916         result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
917         if (result == 0)
918                 cl_page_io_start(env, pg, crt);
919
920         KLASSERT(ergo(crt == CRT_WRITE && pg->cp_type == CPT_CACHEABLE,
921                       equi(result == 0,
922                            PageWriteback(cl_page_vmpage(pg)))));
923         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
924         return result;
925 }
926 EXPORT_SYMBOL(cl_page_prep);
927
928 /**
929  * Notify layers about transfer completion.
930  *
931  * Invoked by transfer sub-system (which is a part of osc) to notify layers
932  * that a transfer, of which this page is a part of has completed.
933  *
934  * Completion call-backs are executed in the bottom-up order, so that
935  * uppermost layer (llite), responsible for the VFS/VM interaction runs last
936  * and can release locks safely.
937  *
938  * \pre  pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
939  * \post pg->cp_state == CPS_CACHED
940  *
941  * \see cl_page_operations::cpo_completion()
942  */
943 void cl_page_completion(const struct lu_env *env,
944                         struct cl_page *pg, enum cl_req_type crt, int ioret)
945 {
946         struct cl_sync_io *anchor = pg->cp_sync_io;
947
948         PASSERT(env, pg, crt < CRT_NR);
949         /* cl_page::cp_req already cleared by the caller (osc_completion()) */
950         PASSERT(env, pg, pg->cp_req == NULL);
951         PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
952
953         ENTRY;
954         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
955         cl_page_state_set(env, pg, CPS_CACHED);
956         if (crt >= CRT_NR)
957                 return;
958         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
959                                (const struct lu_env *,
960                                 const struct cl_page_slice *, int), ioret);
961         if (anchor) {
962                 LASSERT(cl_page_is_vmlocked(env, pg));
963                 LASSERT(pg->cp_sync_io == anchor);
964                 pg->cp_sync_io = NULL;
965         }
966         /*
967          * As page->cp_obj is pinned by a reference from page->cp_req, it is
968          * safe to call cl_page_put() without risking object destruction in a
969          * non-blocking context.
970          */
971         cl_page_put(env, pg);
972
973         if (anchor)
974                 cl_sync_io_note(anchor, ioret);
975
976         EXIT;
977 }
978 EXPORT_SYMBOL(cl_page_completion);
979
980 /**
981  * Notify layers that transfer formation engine decided to yank this page from
982  * the cache and to make it a part of a transfer.
983  *
984  * \pre  pg->cp_state == CPS_CACHED
985  * \post pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
986  *
987  * \see cl_page_operations::cpo_make_ready()
988  */
989 int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
990                        enum cl_req_type crt)
991 {
992         int result;
993
994         PINVRNT(env, pg, crt < CRT_NR);
995
996         ENTRY;
997         if (crt >= CRT_NR)
998                 RETURN(-EINVAL);
999         result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
1000                                 (const struct lu_env *,
1001                                  const struct cl_page_slice *));
1002         if (result == 0) {
1003                 PASSERT(env, pg, pg->cp_state == CPS_CACHED);
1004                 cl_page_io_start(env, pg, crt);
1005         }
1006         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
1007         RETURN(result);
1008 }
1009 EXPORT_SYMBOL(cl_page_make_ready);
1010
1011 /**
1012  * Called if a pge is being written back by kernel's intention.
1013  *
1014  * \pre  cl_page_is_owned(pg, io)
1015  * \post ergo(result == 0, pg->cp_state == CPS_PAGEOUT)
1016  *
1017  * \see cl_page_operations::cpo_flush()
1018  */
1019 int cl_page_flush(const struct lu_env *env, struct cl_io *io,
1020                   struct cl_page *pg)
1021 {
1022         int result;
1023
1024         PINVRNT(env, pg, cl_page_is_owned(pg, io));
1025         PINVRNT(env, pg, cl_page_invariant(pg));
1026
1027         ENTRY;
1028
1029         result = cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_flush));
1030
1031         CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result);
1032         RETURN(result);
1033 }
1034 EXPORT_SYMBOL(cl_page_flush);
1035
1036 /**
1037  * Checks whether page is protected by any extent lock is at least required
1038  * mode.
1039  *
1040  * \return the same as in cl_page_operations::cpo_is_under_lock() method.
1041  * \see cl_page_operations::cpo_is_under_lock()
1042  */
1043 int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
1044                           struct cl_page *page, pgoff_t *max_index)
1045 {
1046         int rc;
1047
1048         PINVRNT(env, page, cl_page_invariant(page));
1049
1050         ENTRY;
1051         rc = CL_PAGE_INVOKE_REVERSE(env, page, CL_PAGE_OP(cpo_is_under_lock),
1052                                     (const struct lu_env *,
1053                                      const struct cl_page_slice *,
1054                                      struct cl_io *, pgoff_t *),
1055                                     io, max_index);
1056         RETURN(rc);
1057 }
1058 EXPORT_SYMBOL(cl_page_is_under_lock);
1059
1060 /**
1061  * Tells transfer engine that only part of a page is to be transmitted.
1062  *
1063  * \see cl_page_operations::cpo_clip()
1064  */
1065 void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
1066                   int from, int to)
1067 {
1068         PINVRNT(env, pg, cl_page_invariant(pg));
1069
1070         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to);
1071         CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip),
1072                        (const struct lu_env *,
1073                         const struct cl_page_slice *,int, int),
1074                        from, to);
1075 }
1076 EXPORT_SYMBOL(cl_page_clip);
1077
1078 /**
1079  * Prints human readable representation of \a pg to the \a f.
1080  */
1081 void cl_page_header_print(const struct lu_env *env, void *cookie,
1082                           lu_printer_t printer, const struct cl_page *pg)
1083 {
1084         (*printer)(env, cookie,
1085                    "page@%p[%d %p %d %d %d %p %p]\n",
1086                    pg, atomic_read(&pg->cp_ref), pg->cp_obj,
1087                    pg->cp_state, pg->cp_error, pg->cp_type,
1088                    pg->cp_owner, pg->cp_req);
1089 }
1090 EXPORT_SYMBOL(cl_page_header_print);
1091
1092 /**
1093  * Prints human readable representation of \a pg to the \a f.
1094  */
1095 void cl_page_print(const struct lu_env *env, void *cookie,
1096                    lu_printer_t printer, const struct cl_page *pg)
1097 {
1098         cl_page_header_print(env, cookie, printer, pg);
1099         CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
1100                        (const struct lu_env *env,
1101                         const struct cl_page_slice *slice,
1102                         void *cookie, lu_printer_t p), cookie, printer);
1103         (*printer)(env, cookie, "end page@%p\n", pg);
1104 }
1105 EXPORT_SYMBOL(cl_page_print);
1106
1107 /**
1108  * Cancel a page which is still in a transfer.
1109  */
1110 int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
1111 {
1112         return CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_cancel),
1113                               (const struct lu_env *,
1114                                const struct cl_page_slice *));
1115 }
1116 EXPORT_SYMBOL(cl_page_cancel);
1117
1118 /**
1119  * Converts a byte offset within object \a obj into a page index.
1120  */
1121 loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
1122 {
1123         return (loff_t)idx << PAGE_CACHE_SHIFT;
1124 }
1125 EXPORT_SYMBOL(cl_offset);
1126
1127 /**
1128  * Converts a page index into a byte offset within object \a obj.
1129  */
1130 pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
1131 {
1132         return offset >> PAGE_CACHE_SHIFT;
1133 }
1134 EXPORT_SYMBOL(cl_index);
1135
1136 int cl_page_size(const struct cl_object *obj)
1137 {
1138         return 1 << PAGE_CACHE_SHIFT;
1139 }
1140 EXPORT_SYMBOL(cl_page_size);
1141
1142 /**
1143  * Adds page slice to the compound page.
1144  *
1145  * This is called by cl_object_operations::coo_page_init() methods to add a
1146  * per-layer state to the page. New state is added at the end of
1147  * cl_page::cp_layers list, that is, it is at the bottom of the stack.
1148  *
1149  * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
1150  */
1151 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
1152                        struct cl_object *obj, pgoff_t index,
1153                        const struct cl_page_operations *ops)
1154 {
1155         ENTRY;
1156         list_add_tail(&slice->cpl_linkage, &page->cp_layers);
1157         slice->cpl_obj  = obj;
1158         slice->cpl_index = index;
1159         slice->cpl_ops  = ops;
1160         slice->cpl_page = page;
1161         EXIT;
1162 }
1163 EXPORT_SYMBOL(cl_page_slice_add);
1164
1165 int  cl_page_init(void)
1166 {
1167         return 0;
1168 }
1169
1170 void cl_page_fini(void)
1171 {
1172 }