Whamcloud - gitweb
LU-4357 libcfs: restore __GFP_WAIT flag to memalloc calls
[fs/lustre-release.git] / lustre / obdclass / cl_page.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Client Lustre Page.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  *   Author: Jinshan Xiong <jinshan.xiong@intel.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_CLASS
43
44 #include <libcfs/libcfs.h>
45 #include <obd_class.h>
46 #include <obd_support.h>
47 #include <libcfs/list.h>
48
49 #include <cl_object.h>
50 #include "cl_internal.h"
51
52 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
53
54 #ifdef LIBCFS_DEBUG
55 # define PASSERT(env, page, expr)                                       \
56   do {                                                                    \
57           if (unlikely(!(expr))) {                                      \
58                   CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");    \
59                   LASSERT(0);                                           \
60           }                                                             \
61   } while (0)
62 #else /* !LIBCFS_DEBUG */
63 # define PASSERT(env, page, exp) \
64         ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
65 #endif /* !LIBCFS_DEBUG */
66
67 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
68 # define PINVRNT(env, page, expr)                                       \
69   do {                                                                    \
70           if (unlikely(!(expr))) {                                      \
71                   CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");    \
72                   LINVRNT(0);                                           \
73           }                                                             \
74   } while (0)
75 #else /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
76 # define PINVRNT(env, page, exp) \
77          ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
78 #endif /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
79
80 /* Disable page statistic by default due to huge performance penalty. */
81 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
82 #define CS_PAGE_INC(o, item) \
83         atomic_inc(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
84 #define CS_PAGE_DEC(o, item) \
85         atomic_dec(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
86 #define CS_PAGESTATE_INC(o, state) \
87         atomic_inc(&cl_object_site(o)->cs_pages_state[state])
88 #define CS_PAGESTATE_DEC(o, state) \
89         atomic_dec(&cl_object_site(o)->cs_pages_state[state])
90 #else
91 #define CS_PAGE_INC(o, item)
92 #define CS_PAGE_DEC(o, item)
93 #define CS_PAGESTATE_INC(o, state)
94 #define CS_PAGESTATE_DEC(o, state)
95 #endif
96
97 /**
98  * Internal version of cl_page_get().
99  *
100  * This function can be used to obtain initial reference to previously
101  * unreferenced cached object. It can be called only if concurrent page
102  * reclamation is somehow prevented, e.g., by keeping a lock on a VM page,
103  * associated with \a page.
104  *
105  * Use with care! Not exported.
106  */
107 static void cl_page_get_trust(struct cl_page *page)
108 {
109         LASSERT(atomic_read(&page->cp_ref) > 0);
110         atomic_inc(&page->cp_ref);
111 }
112
113 /**
114  * Returns a slice within a page, corresponding to the given layer in the
115  * device stack.
116  *
117  * \see cl_lock_at()
118  */
119 static const struct cl_page_slice *
120 cl_page_at_trusted(const struct cl_page *page,
121                    const struct lu_device_type *dtype)
122 {
123         const struct cl_page_slice *slice;
124         ENTRY;
125
126         cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
127                 if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
128                         RETURN(slice);
129         }
130         RETURN(NULL);
131 }
132
133 static void cl_page_free(const struct lu_env *env, struct cl_page *page)
134 {
135         struct cl_object *obj  = page->cp_obj;
136         int pagesize = cl_object_header(obj)->coh_page_bufsize;
137
138         PASSERT(env, page, cfs_list_empty(&page->cp_batch));
139         PASSERT(env, page, page->cp_owner == NULL);
140         PASSERT(env, page, page->cp_req == NULL);
141         PASSERT(env, page, page->cp_state == CPS_FREEING);
142
143         ENTRY;
144         while (!cfs_list_empty(&page->cp_layers)) {
145                 struct cl_page_slice *slice;
146
147                 slice = cfs_list_entry(page->cp_layers.next,
148                                        struct cl_page_slice, cpl_linkage);
149                 cfs_list_del_init(page->cp_layers.next);
150                 if (unlikely(slice->cpl_ops->cpo_fini != NULL))
151                         slice->cpl_ops->cpo_fini(env, slice);
152         }
153         CS_PAGE_DEC(obj, total);
154         CS_PAGESTATE_DEC(obj, page->cp_state);
155         lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page);
156         cl_object_put(env, obj);
157         lu_ref_fini(&page->cp_reference);
158         OBD_FREE(page, pagesize);
159         EXIT;
160 }
161
162 /**
163  * Helper function updating page state. This is the only place in the code
164  * where cl_page::cp_state field is mutated.
165  */
166 static inline void cl_page_state_set_trust(struct cl_page *page,
167                                            enum cl_page_state state)
168 {
169         /* bypass const. */
170         *(enum cl_page_state *)&page->cp_state = state;
171 }
172
173 struct cl_page *cl_page_alloc(const struct lu_env *env,
174                 struct cl_object *o, pgoff_t ind, struct page *vmpage,
175                 enum cl_page_type type)
176 {
177         struct cl_page          *page;
178         struct lu_object_header *head;
179
180         ENTRY;
181         OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize,
182                         GFP_NOFS);
183         if (page != NULL) {
184                 int result = 0;
185                 atomic_set(&page->cp_ref, 1);
186                 page->cp_obj = o;
187                 cl_object_get(o);
188                 lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page",
189                                      page);
190                 page->cp_vmpage = vmpage;
191                 cl_page_state_set_trust(page, CPS_CACHED);
192                 page->cp_type = type;
193                 CFS_INIT_LIST_HEAD(&page->cp_layers);
194                 CFS_INIT_LIST_HEAD(&page->cp_batch);
195                 CFS_INIT_LIST_HEAD(&page->cp_flight);
196                 mutex_init(&page->cp_mutex);
197                 lu_ref_init(&page->cp_reference);
198                 head = o->co_lu.lo_header;
199                 cfs_list_for_each_entry(o, &head->loh_layers,
200                                         co_lu.lo_linkage) {
201                         if (o->co_ops->coo_page_init != NULL) {
202                                 result = o->co_ops->coo_page_init(env, o, page,
203                                                                   ind);
204                                 if (result != 0) {
205                                         cl_page_delete0(env, page);
206                                         cl_page_free(env, page);
207                                         page = ERR_PTR(result);
208                                         break;
209                                 }
210                         }
211                 }
212                 if (result == 0) {
213                         CS_PAGE_INC(o, total);
214                         CS_PAGE_INC(o, create);
215                         CS_PAGESTATE_DEC(o, CPS_CACHED);
216                 }
217         } else {
218                 page = ERR_PTR(-ENOMEM);
219         }
220         RETURN(page);
221 }
222 EXPORT_SYMBOL(cl_page_alloc);
223
224 /**
225  * Returns a cl_page with index \a idx at the object \a o, and associated with
226  * the VM page \a vmpage.
227  *
228  * This is the main entry point into the cl_page caching interface. First, a
229  * cache (implemented as a per-object radix tree) is consulted. If page is
230  * found there, it is returned immediately. Otherwise new page is allocated
231  * and returned. In any case, additional reference to page is acquired.
232  *
233  * \see cl_object_find(), cl_lock_find()
234  */
235 struct cl_page *cl_page_find(const struct lu_env *env,
236                              struct cl_object *o,
237                              pgoff_t idx, struct page *vmpage,
238                              enum cl_page_type type)
239 {
240         struct cl_page          *page = NULL;
241         struct cl_object_header *hdr;
242
243         LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
244         might_sleep();
245
246         ENTRY;
247
248         hdr = cl_object_header(o);
249         CS_PAGE_INC(o, lookup);
250
251         CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
252                idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
253         /* fast path. */
254         if (type == CPT_CACHEABLE) {
255                 /* vmpage lock is used to protect the child/parent
256                  * relationship */
257                 KLASSERT(PageLocked(vmpage));
258                 /*
259                  * cl_vmpage_page() can be called here without any locks as
260                  *
261                  *     - "vmpage" is locked (which prevents ->private from
262                  *       concurrent updates), and
263                  *
264                  *     - "o" cannot be destroyed while current thread holds a
265                  *       reference on it.
266                  */
267                 page = cl_vmpage_page(vmpage, o);
268                 if (page != NULL) {
269                         CS_PAGE_INC(o, hit);
270                         RETURN(page);
271                 }
272         }
273
274         /* allocate and initialize cl_page */
275         page = cl_page_alloc(env, o, idx, vmpage, type);
276         RETURN(page);
277 }
278 EXPORT_SYMBOL(cl_page_find);
279
280 static inline int cl_page_invariant(const struct cl_page *pg)
281 {
282         /*
283          * Page invariant is protected by a VM lock.
284          */
285         LINVRNT(cl_page_is_vmlocked(NULL, pg));
286
287         return cl_page_in_use_noref(pg);
288 }
289
290 static void cl_page_state_set0(const struct lu_env *env,
291                                struct cl_page *page, enum cl_page_state state)
292 {
293         enum cl_page_state old;
294
295         /*
296          * Matrix of allowed state transitions [old][new], for sanity
297          * checking.
298          */
299         static const int allowed_transitions[CPS_NR][CPS_NR] = {
300                 [CPS_CACHED] = {
301                         [CPS_CACHED]  = 0,
302                         [CPS_OWNED]   = 1, /* io finds existing cached page */
303                         [CPS_PAGEIN]  = 0,
304                         [CPS_PAGEOUT] = 1, /* write-out from the cache */
305                         [CPS_FREEING] = 1, /* eviction on the memory pressure */
306                 },
307                 [CPS_OWNED] = {
308                         [CPS_CACHED]  = 1, /* release to the cache */
309                         [CPS_OWNED]   = 0,
310                         [CPS_PAGEIN]  = 1, /* start read immediately */
311                         [CPS_PAGEOUT] = 1, /* start write immediately */
312                         [CPS_FREEING] = 1, /* lock invalidation or truncate */
313                 },
314                 [CPS_PAGEIN] = {
315                         [CPS_CACHED]  = 1, /* io completion */
316                         [CPS_OWNED]   = 0,
317                         [CPS_PAGEIN]  = 0,
318                         [CPS_PAGEOUT] = 0,
319                         [CPS_FREEING] = 0,
320                 },
321                 [CPS_PAGEOUT] = {
322                         [CPS_CACHED]  = 1, /* io completion */
323                         [CPS_OWNED]   = 0,
324                         [CPS_PAGEIN]  = 0,
325                         [CPS_PAGEOUT] = 0,
326                         [CPS_FREEING] = 0,
327                 },
328                 [CPS_FREEING] = {
329                         [CPS_CACHED]  = 0,
330                         [CPS_OWNED]   = 0,
331                         [CPS_PAGEIN]  = 0,
332                         [CPS_PAGEOUT] = 0,
333                         [CPS_FREEING] = 0,
334                 }
335         };
336
337         ENTRY;
338         old = page->cp_state;
339         PASSERT(env, page, allowed_transitions[old][state]);
340         CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
341         PASSERT(env, page, page->cp_state == old);
342         PASSERT(env, page, equi(state == CPS_OWNED, page->cp_owner != NULL));
343
344         CS_PAGESTATE_DEC(page->cp_obj, page->cp_state);
345         CS_PAGESTATE_INC(page->cp_obj, state);
346         cl_page_state_set_trust(page, state);
347         EXIT;
348 }
349
350 static void cl_page_state_set(const struct lu_env *env,
351                               struct cl_page *page, enum cl_page_state state)
352 {
353         cl_page_state_set0(env, page, state);
354 }
355
356 /**
357  * Acquires an additional reference to a page.
358  *
359  * This can be called only by caller already possessing a reference to \a
360  * page.
361  *
362  * \see cl_object_get(), cl_lock_get().
363  */
364 void cl_page_get(struct cl_page *page)
365 {
366         ENTRY;
367         cl_page_get_trust(page);
368         EXIT;
369 }
370 EXPORT_SYMBOL(cl_page_get);
371
372 /**
373  * Releases a reference to a page.
374  *
375  * When last reference is released, page is returned to the cache, unless it
376  * is in cl_page_state::CPS_FREEING state, in which case it is immediately
377  * destroyed.
378  *
379  * \see cl_object_put(), cl_lock_put().
380  */
381 void cl_page_put(const struct lu_env *env, struct cl_page *page)
382 {
383         ENTRY;
384         CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
385                        atomic_read(&page->cp_ref));
386
387         if (atomic_dec_and_test(&page->cp_ref)) {
388                 LASSERT(page->cp_state == CPS_FREEING);
389
390                 LASSERT(atomic_read(&page->cp_ref) == 0);
391                 PASSERT(env, page, page->cp_owner == NULL);
392                 PASSERT(env, page, cfs_list_empty(&page->cp_batch));
393                 /*
394                  * Page is no longer reachable by other threads. Tear
395                  * it down.
396                  */
397                 cl_page_free(env, page);
398         }
399
400         EXIT;
401 }
402 EXPORT_SYMBOL(cl_page_put);
403
404 /**
405  * Returns a cl_page associated with a VM page, and given cl_object.
406  */
407 struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
408 {
409         struct cl_page *page;
410
411         ENTRY;
412         KLASSERT(PageLocked(vmpage));
413
414         /*
415          * NOTE: absence of races and liveness of data are guaranteed by page
416          *       lock on a "vmpage". That works because object destruction has
417          *       bottom-to-top pass.
418          */
419
420         page = (struct cl_page *)vmpage->private;
421         if (page != NULL) {
422                 cl_page_get_trust(page);
423                 LASSERT(page->cp_type == CPT_CACHEABLE);
424         }
425         RETURN(page);
426 }
427 EXPORT_SYMBOL(cl_vmpage_page);
428
429 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
430                                        const struct lu_device_type *dtype)
431 {
432         return cl_page_at_trusted(page, dtype);
433 }
434 EXPORT_SYMBOL(cl_page_at);
435
436 #define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
437
438 #define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...)                   \
439 ({                                                                      \
440         const struct lu_env        *__env  = (_env);                    \
441         struct cl_page             *__page = (_page);                   \
442         const struct cl_page_slice *__scan;                             \
443         int                         __result;                           \
444         ptrdiff_t                   __op   = (_op);                     \
445         int                       (*__method)_proto;                    \
446                                                                         \
447         __result = 0;                                                   \
448         cfs_list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) {     \
449                 __method = *(void **)((char *)__scan->cpl_ops +  __op);        \
450                 if (__method != NULL) {                                        \
451                         __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
452                         if (__result != 0)                              \
453                                 break;                                  \
454                 }                                                       \
455         }                                                               \
456         if (__result > 0)                                               \
457                 __result = 0;                                           \
458         __result;                                                       \
459 })
460
461 #define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...)           \
462 ({                                                                      \
463         const struct lu_env        *__env  = (_env);                    \
464         struct cl_page             *__page = (_page);                   \
465         const struct cl_page_slice *__scan;                             \
466         int                         __result;                           \
467         ptrdiff_t                   __op   = (_op);                     \
468         int                       (*__method)_proto;                    \
469                                                                         \
470         __result = 0;                                                   \
471         list_for_each_entry_reverse(__scan, &__page->cp_layers,         \
472                                         cpl_linkage) {                  \
473                 __method = *(void **)((char *)__scan->cpl_ops +  __op); \
474                 if (__method != NULL) {                                 \
475                         __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
476                         if (__result != 0)                              \
477                                 break;                                  \
478                 }                                                       \
479         }                                                               \
480         if (__result > 0)                                               \
481                 __result = 0;                                           \
482         __result;                                                       \
483 })
484
485 #define CL_PAGE_INVOID(_env, _page, _op, _proto, ...)                   \
486 do {                                                                    \
487         const struct lu_env        *__env  = (_env);                    \
488         struct cl_page             *__page = (_page);                   \
489         const struct cl_page_slice *__scan;                             \
490         ptrdiff_t                   __op   = (_op);                     \
491         void                      (*__method)_proto;                    \
492                                                                         \
493         cfs_list_for_each_entry(__scan, &__page->cp_layers,             \
494                                 cpl_linkage) {                          \
495                 __method = *(void **)((char *)__scan->cpl_ops +  __op); \
496                 if (__method != NULL)                                   \
497                         (*__method)(__env, __scan, ## __VA_ARGS__);     \
498         }                                                               \
499 } while (0)
500
501 #define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...)           \
502 do {                                                                    \
503         const struct lu_env        *__env  = (_env);                    \
504         struct cl_page             *__page = (_page);                   \
505         const struct cl_page_slice *__scan;                             \
506         ptrdiff_t                   __op   = (_op);                     \
507         void                      (*__method)_proto;                    \
508                                                                         \
509         /* get to the bottom page. */                                   \
510         cfs_list_for_each_entry_reverse(__scan, &__page->cp_layers,     \
511                                         cpl_linkage) {                  \
512                 __method = *(void **)((char *)__scan->cpl_ops + __op);  \
513                 if (__method != NULL)                                   \
514                         (*__method)(__env, __scan, ## __VA_ARGS__);     \
515         }                                                               \
516 } while (0)
517
518 static int cl_page_invoke(const struct lu_env *env,
519                           struct cl_io *io, struct cl_page *page, ptrdiff_t op)
520
521 {
522         PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
523         ENTRY;
524         RETURN(CL_PAGE_INVOKE(env, page, op,
525                               (const struct lu_env *,
526                                const struct cl_page_slice *, struct cl_io *),
527                               io));
528 }
529
530 static void cl_page_invoid(const struct lu_env *env,
531                            struct cl_io *io, struct cl_page *page, ptrdiff_t op)
532
533 {
534         PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
535         ENTRY;
536         CL_PAGE_INVOID(env, page, op,
537                        (const struct lu_env *,
538                         const struct cl_page_slice *, struct cl_io *), io);
539         EXIT;
540 }
541
542 static void cl_page_owner_clear(struct cl_page *page)
543 {
544         ENTRY;
545         if (page->cp_owner != NULL) {
546                 LASSERT(page->cp_owner->ci_owned_nr > 0);
547                 page->cp_owner->ci_owned_nr--;
548                 page->cp_owner = NULL;
549                 page->cp_task = NULL;
550         }
551         EXIT;
552 }
553
554 static void cl_page_owner_set(struct cl_page *page)
555 {
556         ENTRY;
557         LASSERT(page->cp_owner != NULL);
558         page->cp_owner->ci_owned_nr++;
559         EXIT;
560 }
561
562 void cl_page_disown0(const struct lu_env *env,
563                      struct cl_io *io, struct cl_page *pg)
564 {
565         enum cl_page_state state;
566
567         ENTRY;
568         state = pg->cp_state;
569         PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
570         PINVRNT(env, pg, cl_page_invariant(pg) || state == CPS_FREEING);
571         cl_page_owner_clear(pg);
572
573         if (state == CPS_OWNED)
574                 cl_page_state_set(env, pg, CPS_CACHED);
575         /*
576          * Completion call-backs are executed in the bottom-up order, so that
577          * uppermost layer (llite), responsible for VFS/VM interaction runs
578          * last and can release locks safely.
579          */
580         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_disown),
581                                (const struct lu_env *,
582                                 const struct cl_page_slice *, struct cl_io *),
583                                io);
584         EXIT;
585 }
586
587 /**
588  * returns true, iff page is owned by the given io.
589  */
590 int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
591 {
592         struct cl_io *top = cl_io_top((struct cl_io *)io);
593         LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
594         ENTRY;
595         RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == top);
596 }
597 EXPORT_SYMBOL(cl_page_is_owned);
598
599 /**
600  * Try to own a page by IO.
601  *
602  * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
603  * into cl_page_state::CPS_OWNED state.
604  *
605  * \pre  !cl_page_is_owned(pg, io)
606  * \post result == 0 iff cl_page_is_owned(pg, io)
607  *
608  * \retval 0   success
609  *
610  * \retval -ve failure, e.g., page was destroyed (and landed in
611  *             cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
612  *             or, page was owned by another thread, or in IO.
613  *
614  * \see cl_page_disown()
615  * \see cl_page_operations::cpo_own()
616  * \see cl_page_own_try()
617  * \see cl_page_own
618  */
619 static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
620                         struct cl_page *pg, int nonblock)
621 {
622         int result;
623
624         PINVRNT(env, pg, !cl_page_is_owned(pg, io));
625
626         ENTRY;
627         io = cl_io_top(io);
628
629         if (pg->cp_state == CPS_FREEING) {
630                 result = -ENOENT;
631         } else {
632                 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own),
633                                         (const struct lu_env *,
634                                          const struct cl_page_slice *,
635                                          struct cl_io *, int),
636                                         io, nonblock);
637                 if (result == 0) {
638                         PASSERT(env, pg, pg->cp_owner == NULL);
639                         PASSERT(env, pg, pg->cp_req == NULL);
640                         pg->cp_owner = cl_io_top(io);;
641                         pg->cp_task  = current;
642                         cl_page_owner_set(pg);
643                         if (pg->cp_state != CPS_FREEING) {
644                                 cl_page_state_set(env, pg, CPS_OWNED);
645                         } else {
646                                 cl_page_disown0(env, io, pg);
647                                 result = -ENOENT;
648                         }
649                 }
650         }
651         PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
652         RETURN(result);
653 }
654
655 /**
656  * Own a page, might be blocked.
657  *
658  * \see cl_page_own0()
659  */
660 int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
661 {
662         return cl_page_own0(env, io, pg, 0);
663 }
664 EXPORT_SYMBOL(cl_page_own);
665
666 /**
667  * Nonblock version of cl_page_own().
668  *
669  * \see cl_page_own0()
670  */
671 int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
672                     struct cl_page *pg)
673 {
674         return cl_page_own0(env, io, pg, 1);
675 }
676 EXPORT_SYMBOL(cl_page_own_try);
677
678
679 /**
680  * Assume page ownership.
681  *
682  * Called when page is already locked by the hosting VM.
683  *
684  * \pre !cl_page_is_owned(pg, io)
685  * \post cl_page_is_owned(pg, io)
686  *
687  * \see cl_page_operations::cpo_assume()
688  */
689 void cl_page_assume(const struct lu_env *env,
690                     struct cl_io *io, struct cl_page *pg)
691 {
692         PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
693
694         ENTRY;
695         io = cl_io_top(io);
696
697         cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
698         PASSERT(env, pg, pg->cp_owner == NULL);
699         pg->cp_owner = cl_io_top(io);
700         pg->cp_task = current;
701         cl_page_owner_set(pg);
702         cl_page_state_set(env, pg, CPS_OWNED);
703         EXIT;
704 }
705 EXPORT_SYMBOL(cl_page_assume);
706
707 /**
708  * Releases page ownership without unlocking the page.
709  *
710  * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the
711  * underlying VM page (as VM is supposed to do this itself).
712  *
713  * \pre   cl_page_is_owned(pg, io)
714  * \post !cl_page_is_owned(pg, io)
715  *
716  * \see cl_page_assume()
717  */
718 void cl_page_unassume(const struct lu_env *env,
719                       struct cl_io *io, struct cl_page *pg)
720 {
721         PINVRNT(env, pg, cl_page_is_owned(pg, io));
722         PINVRNT(env, pg, cl_page_invariant(pg));
723
724         ENTRY;
725         io = cl_io_top(io);
726         cl_page_owner_clear(pg);
727         cl_page_state_set(env, pg, CPS_CACHED);
728         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_unassume),
729                                (const struct lu_env *,
730                                 const struct cl_page_slice *, struct cl_io *),
731                                io);
732         EXIT;
733 }
734 EXPORT_SYMBOL(cl_page_unassume);
735
736 /**
737  * Releases page ownership.
738  *
739  * Moves page into cl_page_state::CPS_CACHED.
740  *
741  * \pre   cl_page_is_owned(pg, io)
742  * \post !cl_page_is_owned(pg, io)
743  *
744  * \see cl_page_own()
745  * \see cl_page_operations::cpo_disown()
746  */
747 void cl_page_disown(const struct lu_env *env,
748                     struct cl_io *io, struct cl_page *pg)
749 {
750         PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
751                 pg->cp_state == CPS_FREEING);
752
753         ENTRY;
754         io = cl_io_top(io);
755         cl_page_disown0(env, io, pg);
756         EXIT;
757 }
758 EXPORT_SYMBOL(cl_page_disown);
759
760 /**
761  * Called when page is to be removed from the object, e.g., as a result of
762  * truncate.
763  *
764  * Calls cl_page_operations::cpo_discard() top-to-bottom.
765  *
766  * \pre cl_page_is_owned(pg, io)
767  *
768  * \see cl_page_operations::cpo_discard()
769  */
770 void cl_page_discard(const struct lu_env *env,
771                      struct cl_io *io, struct cl_page *pg)
772 {
773         PINVRNT(env, pg, cl_page_is_owned(pg, io));
774         PINVRNT(env, pg, cl_page_invariant(pg));
775
776         cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
777 }
778 EXPORT_SYMBOL(cl_page_discard);
779
780 /**
781  * Version of cl_page_delete() that can be called for not fully constructed
782  * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
783  * path. Doesn't check page invariant.
784  */
785 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
786 {
787         ENTRY;
788
789         PASSERT(env, pg, pg->cp_state != CPS_FREEING);
790
791         /*
792          * Severe all ways to obtain new pointers to @pg.
793          */
794         cl_page_owner_clear(pg);
795
796         cl_page_state_set0(env, pg, CPS_FREEING);
797
798         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_delete),
799                        (const struct lu_env *, const struct cl_page_slice *));
800
801         EXIT;
802 }
803
804 /**
805  * Called when a decision is made to throw page out of memory.
806  *
807  * Notifies all layers about page destruction by calling
808  * cl_page_operations::cpo_delete() method top-to-bottom.
809  *
810  * Moves page into cl_page_state::CPS_FREEING state (this is the only place
811  * where transition to this state happens).
812  *
813  * Eliminates all venues through which new references to the page can be
814  * obtained:
815  *
816  *     - removes page from the radix trees,
817  *
818  *     - breaks linkage from VM page to cl_page.
819  *
820  * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
821  * drain after some time, at which point page will be recycled.
822  *
823  * \pre  VM page is locked
824  * \post pg->cp_state == CPS_FREEING
825  *
826  * \see cl_page_operations::cpo_delete()
827  */
828 void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
829 {
830         PINVRNT(env, pg, cl_page_invariant(pg));
831         ENTRY;
832         cl_page_delete0(env, pg);
833         EXIT;
834 }
835 EXPORT_SYMBOL(cl_page_delete);
836
837 /**
838  * Marks page up-to-date.
839  *
840  * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
841  * layer responsible for VM interaction has to mark/clear page as up-to-date
842  * by the \a uptodate argument.
843  *
844  * \see cl_page_operations::cpo_export()
845  */
846 void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
847 {
848         PINVRNT(env, pg, cl_page_invariant(pg));
849         CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_export),
850                        (const struct lu_env *,
851                         const struct cl_page_slice *, int), uptodate);
852 }
853 EXPORT_SYMBOL(cl_page_export);
854
855 /**
856  * Returns true, iff \a pg is VM locked in a suitable sense by the calling
857  * thread.
858  */
859 int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
860 {
861         int result;
862         const struct cl_page_slice *slice;
863
864         ENTRY;
865         slice = container_of(pg->cp_layers.next,
866                              const struct cl_page_slice, cpl_linkage);
867         PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL);
868         /*
869          * Call ->cpo_is_vmlocked() directly instead of going through
870          * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
871          * cl_page_invariant().
872          */
873         result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
874         PASSERT(env, pg, result == -EBUSY || result == -ENODATA);
875         RETURN(result == -EBUSY);
876 }
877 EXPORT_SYMBOL(cl_page_is_vmlocked);
878
879 static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
880 {
881         ENTRY;
882         RETURN(crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN);
883 }
884
885 static void cl_page_io_start(const struct lu_env *env,
886                              struct cl_page *pg, enum cl_req_type crt)
887 {
888         /*
889          * Page is queued for IO, change its state.
890          */
891         ENTRY;
892         cl_page_owner_clear(pg);
893         cl_page_state_set(env, pg, cl_req_type_state(crt));
894         EXIT;
895 }
896
897 /**
898  * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
899  * called top-to-bottom. Every layer either agrees to submit this page (by
900  * returning 0), or requests to omit this page (by returning -EALREADY). Layer
901  * handling interactions with the VM also has to inform VM that page is under
902  * transfer now.
903  */
904 int cl_page_prep(const struct lu_env *env, struct cl_io *io,
905                  struct cl_page *pg, enum cl_req_type crt)
906 {
907         int result;
908
909         PINVRNT(env, pg, cl_page_is_owned(pg, io));
910         PINVRNT(env, pg, cl_page_invariant(pg));
911         PINVRNT(env, pg, crt < CRT_NR);
912
913         /*
914          * XXX this has to be called bottom-to-top, so that llite can set up
915          * PG_writeback without risking other layers deciding to skip this
916          * page.
917          */
918         if (crt >= CRT_NR)
919                 return -EINVAL;
920         result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
921         if (result == 0)
922                 cl_page_io_start(env, pg, crt);
923
924         KLASSERT(ergo(crt == CRT_WRITE && pg->cp_type == CPT_CACHEABLE,
925                       equi(result == 0,
926                            PageWriteback(cl_page_vmpage(pg)))));
927         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
928         return result;
929 }
930 EXPORT_SYMBOL(cl_page_prep);
931
932 /**
933  * Notify layers about transfer completion.
934  *
935  * Invoked by transfer sub-system (which is a part of osc) to notify layers
936  * that a transfer, of which this page is a part of has completed.
937  *
938  * Completion call-backs are executed in the bottom-up order, so that
939  * uppermost layer (llite), responsible for the VFS/VM interaction runs last
940  * and can release locks safely.
941  *
942  * \pre  pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
943  * \post pg->cp_state == CPS_CACHED
944  *
945  * \see cl_page_operations::cpo_completion()
946  */
947 void cl_page_completion(const struct lu_env *env,
948                         struct cl_page *pg, enum cl_req_type crt, int ioret)
949 {
950         struct cl_sync_io *anchor = pg->cp_sync_io;
951
952         PASSERT(env, pg, crt < CRT_NR);
953         /* cl_page::cp_req already cleared by the caller (osc_completion()) */
954         PASSERT(env, pg, pg->cp_req == NULL);
955         PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
956
957         ENTRY;
958         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
959         if (crt == CRT_READ && ioret == 0) {
960                 PASSERT(env, pg, !(pg->cp_flags & CPF_READ_COMPLETED));
961                 pg->cp_flags |= CPF_READ_COMPLETED;
962         }
963
964         cl_page_state_set(env, pg, CPS_CACHED);
965         if (crt >= CRT_NR)
966                 return;
967         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
968                                (const struct lu_env *,
969                                 const struct cl_page_slice *, int), ioret);
970         if (anchor) {
971                 LASSERT(cl_page_is_vmlocked(env, pg));
972                 LASSERT(pg->cp_sync_io == anchor);
973                 pg->cp_sync_io = NULL;
974         }
975         /*
976          * As page->cp_obj is pinned by a reference from page->cp_req, it is
977          * safe to call cl_page_put() without risking object destruction in a
978          * non-blocking context.
979          */
980         cl_page_put(env, pg);
981
982         if (anchor)
983                 cl_sync_io_note(anchor, ioret);
984
985         EXIT;
986 }
987 EXPORT_SYMBOL(cl_page_completion);
988
989 /**
990  * Notify layers that transfer formation engine decided to yank this page from
991  * the cache and to make it a part of a transfer.
992  *
993  * \pre  pg->cp_state == CPS_CACHED
994  * \post pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
995  *
996  * \see cl_page_operations::cpo_make_ready()
997  */
998 int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
999                        enum cl_req_type crt)
1000 {
1001         int result;
1002
1003         PINVRNT(env, pg, crt < CRT_NR);
1004
1005         ENTRY;
1006         if (crt >= CRT_NR)
1007                 RETURN(-EINVAL);
1008         result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
1009                                 (const struct lu_env *,
1010                                  const struct cl_page_slice *));
1011         if (result == 0) {
1012                 PASSERT(env, pg, pg->cp_state == CPS_CACHED);
1013                 cl_page_io_start(env, pg, crt);
1014         }
1015         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
1016         RETURN(result);
1017 }
1018 EXPORT_SYMBOL(cl_page_make_ready);
1019
1020 /**
1021  * Called if a pge is being written back by kernel's intention.
1022  *
1023  * \pre  cl_page_is_owned(pg, io)
1024  * \post ergo(result == 0, pg->cp_state == CPS_PAGEOUT)
1025  *
1026  * \see cl_page_operations::cpo_flush()
1027  */
1028 int cl_page_flush(const struct lu_env *env, struct cl_io *io,
1029                   struct cl_page *pg)
1030 {
1031         int result;
1032
1033         PINVRNT(env, pg, cl_page_is_owned(pg, io));
1034         PINVRNT(env, pg, cl_page_invariant(pg));
1035
1036         ENTRY;
1037
1038         result = cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_flush));
1039
1040         CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result);
1041         RETURN(result);
1042 }
1043 EXPORT_SYMBOL(cl_page_flush);
1044
1045 /**
1046  * Checks whether page is protected by any extent lock is at least required
1047  * mode.
1048  *
1049  * \return the same as in cl_page_operations::cpo_is_under_lock() method.
1050  * \see cl_page_operations::cpo_is_under_lock()
1051  */
1052 int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
1053                           struct cl_page *page, pgoff_t *max_index)
1054 {
1055         int rc;
1056
1057         PINVRNT(env, page, cl_page_invariant(page));
1058
1059         ENTRY;
1060         rc = CL_PAGE_INVOKE_REVERSE(env, page, CL_PAGE_OP(cpo_is_under_lock),
1061                                     (const struct lu_env *,
1062                                      const struct cl_page_slice *,
1063                                      struct cl_io *, pgoff_t *),
1064                                     io, max_index);
1065         RETURN(rc);
1066 }
1067 EXPORT_SYMBOL(cl_page_is_under_lock);
1068
1069 /**
1070  * Tells transfer engine that only part of a page is to be transmitted.
1071  *
1072  * \see cl_page_operations::cpo_clip()
1073  */
1074 void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
1075                   int from, int to)
1076 {
1077         PINVRNT(env, pg, cl_page_invariant(pg));
1078
1079         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to);
1080         CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip),
1081                        (const struct lu_env *,
1082                         const struct cl_page_slice *,int, int),
1083                        from, to);
1084 }
1085 EXPORT_SYMBOL(cl_page_clip);
1086
1087 /**
1088  * Prints human readable representation of \a pg to the \a f.
1089  */
1090 void cl_page_header_print(const struct lu_env *env, void *cookie,
1091                           lu_printer_t printer, const struct cl_page *pg)
1092 {
1093         (*printer)(env, cookie,
1094                    "page@%p[%d %p %d %d %d %p %p %#x]\n",
1095                    pg, atomic_read(&pg->cp_ref), pg->cp_obj,
1096                    pg->cp_state, pg->cp_error, pg->cp_type,
1097                    pg->cp_owner, pg->cp_req, pg->cp_flags);
1098 }
1099 EXPORT_SYMBOL(cl_page_header_print);
1100
1101 /**
1102  * Prints human readable representation of \a pg to the \a f.
1103  */
1104 void cl_page_print(const struct lu_env *env, void *cookie,
1105                    lu_printer_t printer, const struct cl_page *pg)
1106 {
1107         cl_page_header_print(env, cookie, printer, pg);
1108         CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
1109                        (const struct lu_env *env,
1110                         const struct cl_page_slice *slice,
1111                         void *cookie, lu_printer_t p), cookie, printer);
1112         (*printer)(env, cookie, "end page@%p\n", pg);
1113 }
1114 EXPORT_SYMBOL(cl_page_print);
1115
1116 /**
1117  * Cancel a page which is still in a transfer.
1118  */
1119 int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
1120 {
1121         return CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_cancel),
1122                               (const struct lu_env *,
1123                                const struct cl_page_slice *));
1124 }
1125 EXPORT_SYMBOL(cl_page_cancel);
1126
1127 /**
1128  * Converts a byte offset within object \a obj into a page index.
1129  */
1130 loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
1131 {
1132         return (loff_t)idx << PAGE_CACHE_SHIFT;
1133 }
1134 EXPORT_SYMBOL(cl_offset);
1135
1136 /**
1137  * Converts a page index into a byte offset within object \a obj.
1138  */
1139 pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
1140 {
1141         return offset >> PAGE_CACHE_SHIFT;
1142 }
1143 EXPORT_SYMBOL(cl_index);
1144
1145 int cl_page_size(const struct cl_object *obj)
1146 {
1147         return 1 << PAGE_CACHE_SHIFT;
1148 }
1149 EXPORT_SYMBOL(cl_page_size);
1150
1151 /**
1152  * Adds page slice to the compound page.
1153  *
1154  * This is called by cl_object_operations::coo_page_init() methods to add a
1155  * per-layer state to the page. New state is added at the end of
1156  * cl_page::cp_layers list, that is, it is at the bottom of the stack.
1157  *
1158  * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
1159  */
1160 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
1161                        struct cl_object *obj, pgoff_t index,
1162                        const struct cl_page_operations *ops)
1163 {
1164         ENTRY;
1165         list_add_tail(&slice->cpl_linkage, &page->cp_layers);
1166         slice->cpl_obj  = obj;
1167         slice->cpl_index = index;
1168         slice->cpl_ops  = ops;
1169         slice->cpl_page = page;
1170         EXIT;
1171 }
1172 EXPORT_SYMBOL(cl_page_slice_add);
1173
1174 int  cl_page_init(void)
1175 {
1176         return 0;
1177 }
1178
1179 void cl_page_fini(void)
1180 {
1181 }