Whamcloud - gitweb
LU-3196 tests: several test fixes about DNE tests
[fs/lustre-release.git] / lustre / obdclass / cl_page.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Client Lustre Page.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_CLASS
42
43 #include <libcfs/libcfs.h>
44 #include <obd_class.h>
45 #include <obd_support.h>
46 #include <libcfs/list.h>
47
48 #include <cl_object.h>
49 #include "cl_internal.h"
50
51 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
52                             int radix);
53
54 #ifdef LIBCFS_DEBUG
55 # define PASSERT(env, page, expr)                                       \
56   do {                                                                    \
57           if (unlikely(!(expr))) {                                      \
58                   CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");    \
59                   LASSERT(0);                                           \
60           }                                                             \
61   } while (0)
62 #else /* !LIBCFS_DEBUG */
63 # define PASSERT(env, page, exp) \
64         ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
65 #endif /* !LIBCFS_DEBUG */
66
67 #ifdef INVARIANT_CHECK
68 # define PINVRNT(env, page, expr)                                       \
69   do {                                                                    \
70           if (unlikely(!(expr))) {                                      \
71                   CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");    \
72                   LINVRNT(0);                                           \
73           }                                                             \
74   } while (0)
75 #else /* !INVARIANT_CHECK */
76 # define PINVRNT(env, page, exp) \
77         ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
78 #endif /* !INVARIANT_CHECK */
79
80 /* Disable page statistic by default due to huge performance penalty. */
81 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
82 #define CS_PAGE_INC(o, item) \
83         cfs_atomic_inc(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
84 #define CS_PAGE_DEC(o, item) \
85         cfs_atomic_dec(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
86 #define CS_PAGESTATE_INC(o, state) \
87         cfs_atomic_inc(&cl_object_site(o)->cs_pages_state[state])
88 #define CS_PAGESTATE_DEC(o, state) \
89         cfs_atomic_dec(&cl_object_site(o)->cs_pages_state[state])
90 #else
91 #define CS_PAGE_INC(o, item)
92 #define CS_PAGE_DEC(o, item)
93 #define CS_PAGESTATE_INC(o, state)
94 #define CS_PAGESTATE_DEC(o, state)
95 #endif
96
97 /**
98  * Internal version of cl_page_top, it should be called if the page is
99  * known to be not freed, says with page referenced, or radix tree lock held,
100  * or page owned.
101  */
102 static struct cl_page *cl_page_top_trusted(struct cl_page *page)
103 {
104         while (page->cp_parent != NULL)
105                 page = page->cp_parent;
106         return page;
107 }
108
109 /**
110  * Internal version of cl_page_get().
111  *
112  * This function can be used to obtain initial reference to previously
113  * unreferenced cached object. It can be called only if concurrent page
114  * reclamation is somehow prevented, e.g., by locking page radix-tree
115  * (cl_object_header::hdr->coh_page_guard), or by keeping a lock on a VM page,
116  * associated with \a page.
117  *
118  * Use with care! Not exported.
119  */
120 static void cl_page_get_trust(struct cl_page *page)
121 {
122         LASSERT(cfs_atomic_read(&page->cp_ref) > 0);
123         cfs_atomic_inc(&page->cp_ref);
124 }
125
126 /**
127  * Returns a slice within a page, corresponding to the given layer in the
128  * device stack.
129  *
130  * \see cl_lock_at()
131  */
132 static const struct cl_page_slice *
133 cl_page_at_trusted(const struct cl_page *page,
134                    const struct lu_device_type *dtype)
135 {
136         const struct cl_page_slice *slice;
137         ENTRY;
138
139         page = cl_page_top_trusted((struct cl_page *)page);
140         do {
141                 cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
142                         if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
143                                 RETURN(slice);
144                 }
145                 page = page->cp_child;
146         } while (page != NULL);
147         RETURN(NULL);
148 }
149
150 /**
151  * Returns a page with given index in the given object, or NULL if no page is
152  * found. Acquires a reference on \a page.
153  *
154  * Locking: called under cl_object_header::coh_page_guard spin-lock.
155  */
156 struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
157 {
158         struct cl_page *page;
159
160         LASSERT_SPIN_LOCKED(&hdr->coh_page_guard);
161
162         page = radix_tree_lookup(&hdr->coh_tree, index);
163         if (page != NULL)
164                 cl_page_get_trust(page);
165         return page;
166 }
167 EXPORT_SYMBOL(cl_page_lookup);
168
169 /**
170  * Returns a list of pages by a given [start, end] of \a obj.
171  *
172  * \param resched If not NULL, then we give up before hogging CPU for too
173  * long and set *resched = 1, in that case caller should implement a retry
174  * logic.
175  *
176  * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
177  * crucial in the face of [offset, EOF] locks.
178  *
179  * Return at least one page in @queue unless there is no covered page.
180  */
181 int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
182                         struct cl_io *io, pgoff_t start, pgoff_t end,
183                         cl_page_gang_cb_t cb, void *cbdata)
184 {
185         struct cl_object_header *hdr;
186         struct cl_page          *page;
187         struct cl_page         **pvec;
188         const struct cl_page_slice  *slice;
189         const struct lu_device_type *dtype;
190         pgoff_t                  idx;
191         unsigned int             nr;
192         unsigned int             i;
193         unsigned int             j;
194         int                      res = CLP_GANG_OKAY;
195         int                      tree_lock = 1;
196         ENTRY;
197
198         idx = start;
199         hdr = cl_object_header(obj);
200         pvec = cl_env_info(env)->clt_pvec;
201         dtype = cl_object_top(obj)->co_lu.lo_dev->ld_type;
202         spin_lock(&hdr->coh_page_guard);
203         while ((nr = radix_tree_gang_lookup(&hdr->coh_tree, (void **)pvec,
204                                             idx, CLT_PVEC_SIZE)) > 0) {
205                 int end_of_region = 0;
206                 idx = pvec[nr - 1]->cp_index + 1;
207                 for (i = 0, j = 0; i < nr; ++i) {
208                         page = pvec[i];
209                         pvec[i] = NULL;
210
211                         LASSERT(page->cp_type == CPT_CACHEABLE);
212                         if (page->cp_index > end) {
213                                 end_of_region = 1;
214                                 break;
215                         }
216                         if (page->cp_state == CPS_FREEING)
217                                 continue;
218
219                         slice = cl_page_at_trusted(page, dtype);
220                         /*
221                          * Pages for lsm-less file has no underneath sub-page
222                          * for osc, in case of ...
223                          */
224                         PASSERT(env, page, slice != NULL);
225
226                         page = slice->cpl_page;
227                         /*
228                          * Can safely call cl_page_get_trust() under
229                          * radix-tree spin-lock.
230                          *
231                          * XXX not true, because @page is from object another
232                          * than @hdr and protected by different tree lock.
233                          */
234                         cl_page_get_trust(page);
235                         lu_ref_add_atomic(&page->cp_reference,
236                                           "gang_lookup", cfs_current());
237                         pvec[j++] = page;
238                 }
239
240                 /*
241                  * Here a delicate locking dance is performed. Current thread
242                  * holds a reference to a page, but has to own it before it
243                  * can be placed into queue. Owning implies waiting, so
244                  * radix-tree lock is to be released. After a wait one has to
245                  * check that pages weren't truncated (cl_page_own() returns
246                  * error in the latter case).
247                  */
248                 spin_unlock(&hdr->coh_page_guard);
249                 tree_lock = 0;
250
251                 for (i = 0; i < j; ++i) {
252                         page = pvec[i];
253                         if (res == CLP_GANG_OKAY)
254                                 res = (*cb)(env, io, page, cbdata);
255                         lu_ref_del(&page->cp_reference,
256                                    "gang_lookup", cfs_current());
257                         cl_page_put(env, page);
258                 }
259                 if (nr < CLT_PVEC_SIZE || end_of_region)
260                         break;
261
262                 if (res == CLP_GANG_OKAY && cfs_need_resched())
263                         res = CLP_GANG_RESCHED;
264                 if (res != CLP_GANG_OKAY)
265                         break;
266
267                 spin_lock(&hdr->coh_page_guard);
268                 tree_lock = 1;
269         }
270         if (tree_lock)
271                 spin_unlock(&hdr->coh_page_guard);
272         RETURN(res);
273 }
274 EXPORT_SYMBOL(cl_page_gang_lookup);
275
276 static void cl_page_free(const struct lu_env *env, struct cl_page *page)
277 {
278         struct cl_object *obj  = page->cp_obj;
279         int pagesize = cl_object_header(obj)->coh_page_bufsize;
280
281         PASSERT(env, page, cfs_list_empty(&page->cp_batch));
282         PASSERT(env, page, page->cp_owner == NULL);
283         PASSERT(env, page, page->cp_req == NULL);
284         PASSERT(env, page, page->cp_parent == NULL);
285         PASSERT(env, page, page->cp_state == CPS_FREEING);
286
287         ENTRY;
288         cfs_might_sleep();
289         while (!cfs_list_empty(&page->cp_layers)) {
290                 struct cl_page_slice *slice;
291
292                 slice = cfs_list_entry(page->cp_layers.next,
293                                        struct cl_page_slice, cpl_linkage);
294                 cfs_list_del_init(page->cp_layers.next);
295                 slice->cpl_ops->cpo_fini(env, slice);
296         }
297         CS_PAGE_DEC(obj, total);
298         CS_PAGESTATE_DEC(obj, page->cp_state);
299         lu_object_ref_del_at(&obj->co_lu, page->cp_obj_ref, "cl_page", page);
300         cl_object_put(env, obj);
301         lu_ref_fini(&page->cp_reference);
302         OBD_FREE(page, pagesize);
303         EXIT;
304 }
305
306 /**
307  * Helper function updating page state. This is the only place in the code
308  * where cl_page::cp_state field is mutated.
309  */
310 static inline void cl_page_state_set_trust(struct cl_page *page,
311                                            enum cl_page_state state)
312 {
313         /* bypass const. */
314         *(enum cl_page_state *)&page->cp_state = state;
315 }
316
317 static struct cl_page *cl_page_alloc(const struct lu_env *env,
318                 struct cl_object *o, pgoff_t ind, struct page *vmpage,
319                 enum cl_page_type type)
320 {
321         struct cl_page          *page;
322         struct lu_object_header *head;
323
324         ENTRY;
325         OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize,
326                         CFS_ALLOC_IO);
327         if (page != NULL) {
328                 int result;
329                 cfs_atomic_set(&page->cp_ref, 1);
330                 if (type == CPT_CACHEABLE) /* for radix tree */
331                         cfs_atomic_inc(&page->cp_ref);
332                 page->cp_obj = o;
333                 cl_object_get(o);
334                 page->cp_obj_ref = lu_object_ref_add(&o->co_lu, "cl_page",page);
335                 page->cp_index = ind;
336                 cl_page_state_set_trust(page, CPS_CACHED);
337                 page->cp_type = type;
338                 CFS_INIT_LIST_HEAD(&page->cp_layers);
339                 CFS_INIT_LIST_HEAD(&page->cp_batch);
340                 CFS_INIT_LIST_HEAD(&page->cp_flight);
341                 mutex_init(&page->cp_mutex);
342                 lu_ref_init(&page->cp_reference);
343                 head = o->co_lu.lo_header;
344                 cfs_list_for_each_entry(o, &head->loh_layers,
345                                         co_lu.lo_linkage) {
346                         if (o->co_ops->coo_page_init != NULL) {
347                                 result = o->co_ops->coo_page_init(env, o,
348                                                                   page, vmpage);
349                                 if (result != 0) {
350                                         cl_page_delete0(env, page, 0);
351                                         cl_page_free(env, page);
352                                         page = ERR_PTR(result);
353                                         break;
354                                 }
355                         }
356                 }
357                 if (result == 0) {
358                         CS_PAGE_INC(o, total);
359                         CS_PAGE_INC(o, create);
360                         CS_PAGESTATE_DEC(o, CPS_CACHED);
361                 }
362         } else {
363                 page = ERR_PTR(-ENOMEM);
364         }
365         RETURN(page);
366 }
367
368 /**
369  * Returns a cl_page with index \a idx at the object \a o, and associated with
370  * the VM page \a vmpage.
371  *
372  * This is the main entry point into the cl_page caching interface. First, a
373  * cache (implemented as a per-object radix tree) is consulted. If page is
374  * found there, it is returned immediately. Otherwise new page is allocated
375  * and returned. In any case, additional reference to page is acquired.
376  *
377  * \see cl_object_find(), cl_lock_find()
378  */
379 static struct cl_page *cl_page_find0(const struct lu_env *env,
380                                      struct cl_object *o,
381                                      pgoff_t idx, struct page *vmpage,
382                                      enum cl_page_type type,
383                                      struct cl_page *parent)
384 {
385         struct cl_page          *page = NULL;
386         struct cl_page          *ghost = NULL;
387         struct cl_object_header *hdr;
388         int err;
389
390         LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
391         cfs_might_sleep();
392
393         ENTRY;
394
395         hdr = cl_object_header(o);
396         CS_PAGE_INC(o, lookup);
397
398         CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
399                idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
400         /* fast path. */
401         if (type == CPT_CACHEABLE) {
402                 /* vmpage lock is used to protect the child/parent
403                  * relationship */
404                 KLASSERT(PageLocked(vmpage));
405                 /*
406                  * cl_vmpage_page() can be called here without any locks as
407                  *
408                  *     - "vmpage" is locked (which prevents ->private from
409                  *       concurrent updates), and
410                  *
411                  *     - "o" cannot be destroyed while current thread holds a
412                  *       reference on it.
413                  */
414                 page = cl_vmpage_page(vmpage, o);
415                 PINVRNT(env, page,
416                         ergo(page != NULL,
417                              cl_page_vmpage(env, page) == vmpage &&
418                              (void *)radix_tree_lookup(&hdr->coh_tree,
419                                                        idx) == page));
420         }
421
422         if (page != NULL) {
423                 CS_PAGE_INC(o, hit);
424                 RETURN(page);
425         }
426
427         /* allocate and initialize cl_page */
428         page = cl_page_alloc(env, o, idx, vmpage, type);
429         if (IS_ERR(page))
430                 RETURN(page);
431
432         if (type == CPT_TRANSIENT) {
433                 if (parent) {
434                         LASSERT(page->cp_parent == NULL);
435                         page->cp_parent = parent;
436                         parent->cp_child = page;
437                 }
438                 RETURN(page);
439         }
440
441         /*
442          * XXX optimization: use radix_tree_preload() here, and change tree
443          * gfp mask to GFP_KERNEL in cl_object_header_init().
444          */
445         spin_lock(&hdr->coh_page_guard);
446         err = radix_tree_insert(&hdr->coh_tree, idx, page);
447         if (err != 0) {
448                 ghost = page;
449                 /*
450                  * Noted by Jay: a lock on \a vmpage protects cl_page_find()
451                  * from this race, but
452                  *
453                  *     0. it's better to have cl_page interface "locally
454                  *     consistent" so that its correctness can be reasoned
455                  *     about without appealing to the (obscure world of) VM
456                  *     locking.
457                  *
458                  *     1. handling this race allows ->coh_tree to remain
459                  *     consistent even when VM locking is somehow busted,
460                  *     which is very useful during diagnosing and debugging.
461                  */
462                 page = ERR_PTR(err);
463                 CL_PAGE_DEBUG(D_ERROR, env, ghost,
464                               "fail to insert into radix tree: %d\n", err);
465         } else {
466                 if (parent) {
467                         LASSERT(page->cp_parent == NULL);
468                         page->cp_parent = parent;
469                         parent->cp_child = page;
470                 }
471                 hdr->coh_pages++;
472         }
473         spin_unlock(&hdr->coh_page_guard);
474
475         if (unlikely(ghost != NULL)) {
476                 cl_page_delete0(env, ghost, 0);
477                 cl_page_free(env, ghost);
478         }
479         RETURN(page);
480 }
481
482 struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
483                              pgoff_t idx, struct page *vmpage,
484                              enum cl_page_type type)
485 {
486         return cl_page_find0(env, o, idx, vmpage, type, NULL);
487 }
488 EXPORT_SYMBOL(cl_page_find);
489
490
491 struct cl_page *cl_page_find_sub(const struct lu_env *env, struct cl_object *o,
492                                  pgoff_t idx, struct page *vmpage,
493                                  struct cl_page *parent)
494 {
495         return cl_page_find0(env, o, idx, vmpage, parent->cp_type, parent);
496 }
497 EXPORT_SYMBOL(cl_page_find_sub);
498
499 static inline int cl_page_invariant(const struct cl_page *pg)
500 {
501         struct cl_object_header *header;
502         struct cl_page          *parent;
503         struct cl_page          *child;
504         struct cl_io            *owner;
505
506         /*
507          * Page invariant is protected by a VM lock.
508          */
509         LINVRNT(cl_page_is_vmlocked(NULL, pg));
510
511         header = cl_object_header(pg->cp_obj);
512         parent = pg->cp_parent;
513         child  = pg->cp_child;
514         owner  = pg->cp_owner;
515
516         return cl_page_in_use(pg) &&
517                 ergo(parent != NULL, parent->cp_child == pg) &&
518                 ergo(child != NULL, child->cp_parent == pg) &&
519                 ergo(child != NULL, pg->cp_obj != child->cp_obj) &&
520                 ergo(parent != NULL, pg->cp_obj != parent->cp_obj) &&
521                 ergo(owner != NULL && parent != NULL,
522                      parent->cp_owner == pg->cp_owner->ci_parent) &&
523                 ergo(owner != NULL && child != NULL,
524                      child->cp_owner->ci_parent == owner) &&
525                 /*
526                  * Either page is early in initialization (has neither child
527                  * nor parent yet), or it is in the object radix tree.
528                  */
529                 ergo(pg->cp_state < CPS_FREEING && pg->cp_type == CPT_CACHEABLE,
530                      (void *)radix_tree_lookup(&header->coh_tree,
531                                                pg->cp_index) == pg ||
532                      (child == NULL && parent == NULL));
533 }
534
535 static void cl_page_state_set0(const struct lu_env *env,
536                                struct cl_page *page, enum cl_page_state state)
537 {
538         enum cl_page_state old;
539
540         /*
541          * Matrix of allowed state transitions [old][new], for sanity
542          * checking.
543          */
544         static const int allowed_transitions[CPS_NR][CPS_NR] = {
545                 [CPS_CACHED] = {
546                         [CPS_CACHED]  = 0,
547                         [CPS_OWNED]   = 1, /* io finds existing cached page */
548                         [CPS_PAGEIN]  = 0,
549                         [CPS_PAGEOUT] = 1, /* write-out from the cache */
550                         [CPS_FREEING] = 1, /* eviction on the memory pressure */
551                 },
552                 [CPS_OWNED] = {
553                         [CPS_CACHED]  = 1, /* release to the cache */
554                         [CPS_OWNED]   = 0,
555                         [CPS_PAGEIN]  = 1, /* start read immediately */
556                         [CPS_PAGEOUT] = 1, /* start write immediately */
557                         [CPS_FREEING] = 1, /* lock invalidation or truncate */
558                 },
559                 [CPS_PAGEIN] = {
560                         [CPS_CACHED]  = 1, /* io completion */
561                         [CPS_OWNED]   = 0,
562                         [CPS_PAGEIN]  = 0,
563                         [CPS_PAGEOUT] = 0,
564                         [CPS_FREEING] = 0,
565                 },
566                 [CPS_PAGEOUT] = {
567                         [CPS_CACHED]  = 1, /* io completion */
568                         [CPS_OWNED]   = 0,
569                         [CPS_PAGEIN]  = 0,
570                         [CPS_PAGEOUT] = 0,
571                         [CPS_FREEING] = 0,
572                 },
573                 [CPS_FREEING] = {
574                         [CPS_CACHED]  = 0,
575                         [CPS_OWNED]   = 0,
576                         [CPS_PAGEIN]  = 0,
577                         [CPS_PAGEOUT] = 0,
578                         [CPS_FREEING] = 0,
579                 }
580         };
581
582         ENTRY;
583         old = page->cp_state;
584         PASSERT(env, page, allowed_transitions[old][state]);
585         CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
586         for (; page != NULL; page = page->cp_child) {
587                 PASSERT(env, page, page->cp_state == old);
588                 PASSERT(env, page,
589                         equi(state == CPS_OWNED, page->cp_owner != NULL));
590
591                 CS_PAGESTATE_DEC(page->cp_obj, page->cp_state);
592                 CS_PAGESTATE_INC(page->cp_obj, state);
593                 cl_page_state_set_trust(page, state);
594         }
595         EXIT;
596 }
597
598 static void cl_page_state_set(const struct lu_env *env,
599                               struct cl_page *page, enum cl_page_state state)
600 {
601         cl_page_state_set0(env, page, state);
602 }
603
604 /**
605  * Acquires an additional reference to a page.
606  *
607  * This can be called only by caller already possessing a reference to \a
608  * page.
609  *
610  * \see cl_object_get(), cl_lock_get().
611  */
612 void cl_page_get(struct cl_page *page)
613 {
614         ENTRY;
615         cl_page_get_trust(page);
616         EXIT;
617 }
618 EXPORT_SYMBOL(cl_page_get);
619
620 /**
621  * Releases a reference to a page.
622  *
623  * When last reference is released, page is returned to the cache, unless it
624  * is in cl_page_state::CPS_FREEING state, in which case it is immediately
625  * destroyed.
626  *
627  * \see cl_object_put(), cl_lock_put().
628  */
629 void cl_page_put(const struct lu_env *env, struct cl_page *page)
630 {
631         PASSERT(env, page, cfs_atomic_read(&page->cp_ref) > !!page->cp_parent);
632
633         ENTRY;
634         CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
635                        cfs_atomic_read(&page->cp_ref));
636
637         if (cfs_atomic_dec_and_test(&page->cp_ref)) {
638                 LASSERT(page->cp_state == CPS_FREEING);
639
640                 LASSERT(cfs_atomic_read(&page->cp_ref) == 0);
641                 PASSERT(env, page, page->cp_owner == NULL);
642                 PASSERT(env, page, cfs_list_empty(&page->cp_batch));
643                 /*
644                  * Page is no longer reachable by other threads. Tear
645                  * it down.
646                  */
647                 cl_page_free(env, page);
648         }
649
650         EXIT;
651 }
652 EXPORT_SYMBOL(cl_page_put);
653
654 /**
655  * Returns a VM page associated with a given cl_page.
656  */
657 cfs_page_t *cl_page_vmpage(const struct lu_env *env, struct cl_page *page)
658 {
659         const struct cl_page_slice *slice;
660
661         /*
662          * Find uppermost layer with ->cpo_vmpage() method, and return its
663          * result.
664          */
665         page = cl_page_top(page);
666         do {
667                 cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
668                         if (slice->cpl_ops->cpo_vmpage != NULL)
669                                 RETURN(slice->cpl_ops->cpo_vmpage(env, slice));
670                 }
671                 page = page->cp_child;
672         } while (page != NULL);
673         LBUG(); /* ->cpo_vmpage() has to be defined somewhere in the stack */
674 }
675 EXPORT_SYMBOL(cl_page_vmpage);
676
677 /**
678  * Returns a cl_page associated with a VM page, and given cl_object.
679  */
680 struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
681 {
682         struct cl_page *top;
683         struct cl_page *page;
684
685         ENTRY;
686         KLASSERT(PageLocked(vmpage));
687
688         /*
689          * NOTE: absence of races and liveness of data are guaranteed by page
690          *       lock on a "vmpage". That works because object destruction has
691          *       bottom-to-top pass.
692          */
693
694         /*
695          * This loop assumes that ->private points to the top-most page. This
696          * can be rectified easily.
697          */
698         top = (struct cl_page *)vmpage->private;
699         if (top == NULL)
700                 RETURN(NULL);
701
702         for (page = top; page != NULL; page = page->cp_child) {
703                 if (cl_object_same(page->cp_obj, obj)) {
704                         cl_page_get_trust(page);
705                         break;
706                 }
707         }
708         LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
709         RETURN(page);
710 }
711 EXPORT_SYMBOL(cl_vmpage_page);
712
713 /**
714  * Returns the top-page for a given page.
715  *
716  * \see cl_object_top(), cl_io_top()
717  */
718 struct cl_page *cl_page_top(struct cl_page *page)
719 {
720         return cl_page_top_trusted(page);
721 }
722 EXPORT_SYMBOL(cl_page_top);
723
724 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
725                                        const struct lu_device_type *dtype)
726 {
727         return cl_page_at_trusted(page, dtype);
728 }
729 EXPORT_SYMBOL(cl_page_at);
730
731 #define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
732
733 #define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...)                   \
734 ({                                                                      \
735         const struct lu_env        *__env  = (_env);                    \
736         struct cl_page             *__page = (_page);                   \
737         const struct cl_page_slice *__scan;                             \
738         int                         __result;                           \
739         ptrdiff_t                   __op   = (_op);                     \
740         int                       (*__method)_proto;                    \
741                                                                         \
742         __result = 0;                                                   \
743         __page = cl_page_top(__page);                                   \
744         do {                                                            \
745                 cfs_list_for_each_entry(__scan, &__page->cp_layers,     \
746                                         cpl_linkage) {                  \
747                         __method = *(void **)((char *)__scan->cpl_ops + \
748                                               __op);                    \
749                         if (__method != NULL) {                         \
750                                 __result = (*__method)(__env, __scan,   \
751                                                        ## __VA_ARGS__); \
752                                 if (__result != 0)                      \
753                                         break;                          \
754                         }                                               \
755                 }                                                       \
756                 __page = __page->cp_child;                              \
757         } while (__page != NULL && __result == 0);                      \
758         if (__result > 0)                                               \
759                 __result = 0;                                           \
760         __result;                                                       \
761 })
762
763 #define CL_PAGE_INVOID(_env, _page, _op, _proto, ...)                   \
764 do {                                                                    \
765         const struct lu_env        *__env  = (_env);                    \
766         struct cl_page             *__page = (_page);                   \
767         const struct cl_page_slice *__scan;                             \
768         ptrdiff_t                   __op   = (_op);                     \
769         void                      (*__method)_proto;                    \
770                                                                         \
771         __page = cl_page_top(__page);                                   \
772         do {                                                            \
773                 cfs_list_for_each_entry(__scan, &__page->cp_layers,     \
774                                         cpl_linkage) {                  \
775                         __method = *(void **)((char *)__scan->cpl_ops + \
776                                               __op);                    \
777                         if (__method != NULL)                           \
778                                 (*__method)(__env, __scan,              \
779                                             ## __VA_ARGS__);            \
780                 }                                                       \
781                 __page = __page->cp_child;                              \
782         } while (__page != NULL);                                       \
783 } while (0)
784
785 #define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...)               \
786 do {                                                                        \
787         const struct lu_env        *__env  = (_env);                        \
788         struct cl_page             *__page = (_page);                       \
789         const struct cl_page_slice *__scan;                                 \
790         ptrdiff_t                   __op   = (_op);                         \
791         void                      (*__method)_proto;                        \
792                                                                             \
793         /* get to the bottom page. */                                       \
794         while (__page->cp_child != NULL)                                    \
795                 __page = __page->cp_child;                                  \
796         do {                                                                \
797                 cfs_list_for_each_entry_reverse(__scan, &__page->cp_layers, \
798                                                 cpl_linkage) {              \
799                         __method = *(void **)((char *)__scan->cpl_ops +     \
800                                               __op);                        \
801                         if (__method != NULL)                               \
802                                 (*__method)(__env, __scan,                  \
803                                             ## __VA_ARGS__);                \
804                 }                                                           \
805                 __page = __page->cp_parent;                                 \
806         } while (__page != NULL);                                           \
807 } while (0)
808
809 static int cl_page_invoke(const struct lu_env *env,
810                           struct cl_io *io, struct cl_page *page, ptrdiff_t op)
811
812 {
813         PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
814         ENTRY;
815         RETURN(CL_PAGE_INVOKE(env, page, op,
816                               (const struct lu_env *,
817                                const struct cl_page_slice *, struct cl_io *),
818                               io));
819 }
820
821 static void cl_page_invoid(const struct lu_env *env,
822                            struct cl_io *io, struct cl_page *page, ptrdiff_t op)
823
824 {
825         PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
826         ENTRY;
827         CL_PAGE_INVOID(env, page, op,
828                        (const struct lu_env *,
829                         const struct cl_page_slice *, struct cl_io *), io);
830         EXIT;
831 }
832
833 static void cl_page_owner_clear(struct cl_page *page)
834 {
835         ENTRY;
836         for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
837                 if (page->cp_owner != NULL) {
838                         LASSERT(page->cp_owner->ci_owned_nr > 0);
839                         page->cp_owner->ci_owned_nr--;
840                         page->cp_owner = NULL;
841                         page->cp_task = NULL;
842                 }
843         }
844         EXIT;
845 }
846
847 static void cl_page_owner_set(struct cl_page *page)
848 {
849         ENTRY;
850         for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
851                 LASSERT(page->cp_owner != NULL);
852                 page->cp_owner->ci_owned_nr++;
853         }
854         EXIT;
855 }
856
857 void cl_page_disown0(const struct lu_env *env,
858                      struct cl_io *io, struct cl_page *pg)
859 {
860         enum cl_page_state state;
861
862         ENTRY;
863         state = pg->cp_state;
864         PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
865         PINVRNT(env, pg, cl_page_invariant(pg));
866         cl_page_owner_clear(pg);
867
868         if (state == CPS_OWNED)
869                 cl_page_state_set(env, pg, CPS_CACHED);
870         /*
871          * Completion call-backs are executed in the bottom-up order, so that
872          * uppermost layer (llite), responsible for VFS/VM interaction runs
873          * last and can release locks safely.
874          */
875         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_disown),
876                                (const struct lu_env *,
877                                 const struct cl_page_slice *, struct cl_io *),
878                                io);
879         EXIT;
880 }
881
882 /**
883  * returns true, iff page is owned by the given io.
884  */
885 int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
886 {
887         LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
888         ENTRY;
889         RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == io);
890 }
891 EXPORT_SYMBOL(cl_page_is_owned);
892
893 /**
894  * Try to own a page by IO.
895  *
896  * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
897  * into cl_page_state::CPS_OWNED state.
898  *
899  * \pre  !cl_page_is_owned(pg, io)
900  * \post result == 0 iff cl_page_is_owned(pg, io)
901  *
902  * \retval 0   success
903  *
904  * \retval -ve failure, e.g., page was destroyed (and landed in
905  *             cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
906  *             or, page was owned by another thread, or in IO.
907  *
908  * \see cl_page_disown()
909  * \see cl_page_operations::cpo_own()
910  * \see cl_page_own_try()
911  * \see cl_page_own
912  */
913 static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
914                         struct cl_page *pg, int nonblock)
915 {
916         int result;
917
918         PINVRNT(env, pg, !cl_page_is_owned(pg, io));
919
920         ENTRY;
921         pg = cl_page_top(pg);
922         io = cl_io_top(io);
923
924         if (pg->cp_state == CPS_FREEING) {
925                 result = -ENOENT;
926         } else {
927                 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own),
928                                         (const struct lu_env *,
929                                          const struct cl_page_slice *,
930                                          struct cl_io *, int),
931                                         io, nonblock);
932                 if (result == 0) {
933                         PASSERT(env, pg, pg->cp_owner == NULL);
934                         PASSERT(env, pg, pg->cp_req == NULL);
935                         pg->cp_owner = io;
936                         pg->cp_task  = current;
937                         cl_page_owner_set(pg);
938                         if (pg->cp_state != CPS_FREEING) {
939                                 cl_page_state_set(env, pg, CPS_OWNED);
940                         } else {
941                                 cl_page_disown0(env, io, pg);
942                                 result = -ENOENT;
943                         }
944                 }
945         }
946         PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
947         RETURN(result);
948 }
949
950 /**
951  * Own a page, might be blocked.
952  *
953  * \see cl_page_own0()
954  */
955 int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
956 {
957         return cl_page_own0(env, io, pg, 0);
958 }
959 EXPORT_SYMBOL(cl_page_own);
960
961 /**
962  * Nonblock version of cl_page_own().
963  *
964  * \see cl_page_own0()
965  */
966 int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
967                     struct cl_page *pg)
968 {
969         return cl_page_own0(env, io, pg, 1);
970 }
971 EXPORT_SYMBOL(cl_page_own_try);
972
973
974 /**
975  * Assume page ownership.
976  *
977  * Called when page is already locked by the hosting VM.
978  *
979  * \pre !cl_page_is_owned(pg, io)
980  * \post cl_page_is_owned(pg, io)
981  *
982  * \see cl_page_operations::cpo_assume()
983  */
984 void cl_page_assume(const struct lu_env *env,
985                     struct cl_io *io, struct cl_page *pg)
986 {
987         PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
988
989         ENTRY;
990         pg = cl_page_top(pg);
991         io = cl_io_top(io);
992
993         cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
994         PASSERT(env, pg, pg->cp_owner == NULL);
995         pg->cp_owner = io;
996         pg->cp_task = current;
997         cl_page_owner_set(pg);
998         cl_page_state_set(env, pg, CPS_OWNED);
999         EXIT;
1000 }
1001 EXPORT_SYMBOL(cl_page_assume);
1002
1003 /**
1004  * Releases page ownership without unlocking the page.
1005  *
1006  * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the
1007  * underlying VM page (as VM is supposed to do this itself).
1008  *
1009  * \pre   cl_page_is_owned(pg, io)
1010  * \post !cl_page_is_owned(pg, io)
1011  *
1012  * \see cl_page_assume()
1013  */
1014 void cl_page_unassume(const struct lu_env *env,
1015                       struct cl_io *io, struct cl_page *pg)
1016 {
1017         PINVRNT(env, pg, cl_page_is_owned(pg, io));
1018         PINVRNT(env, pg, cl_page_invariant(pg));
1019
1020         ENTRY;
1021         pg = cl_page_top(pg);
1022         io = cl_io_top(io);
1023         cl_page_owner_clear(pg);
1024         cl_page_state_set(env, pg, CPS_CACHED);
1025         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_unassume),
1026                                (const struct lu_env *,
1027                                 const struct cl_page_slice *, struct cl_io *),
1028                                io);
1029         EXIT;
1030 }
1031 EXPORT_SYMBOL(cl_page_unassume);
1032
1033 /**
1034  * Releases page ownership.
1035  *
1036  * Moves page into cl_page_state::CPS_CACHED.
1037  *
1038  * \pre   cl_page_is_owned(pg, io)
1039  * \post !cl_page_is_owned(pg, io)
1040  *
1041  * \see cl_page_own()
1042  * \see cl_page_operations::cpo_disown()
1043  */
1044 void cl_page_disown(const struct lu_env *env,
1045                     struct cl_io *io, struct cl_page *pg)
1046 {
1047         PINVRNT(env, pg, cl_page_is_owned(pg, io));
1048
1049         ENTRY;
1050         pg = cl_page_top(pg);
1051         io = cl_io_top(io);
1052         cl_page_disown0(env, io, pg);
1053         EXIT;
1054 }
1055 EXPORT_SYMBOL(cl_page_disown);
1056
1057 /**
1058  * Called when page is to be removed from the object, e.g., as a result of
1059  * truncate.
1060  *
1061  * Calls cl_page_operations::cpo_discard() top-to-bottom.
1062  *
1063  * \pre cl_page_is_owned(pg, io)
1064  *
1065  * \see cl_page_operations::cpo_discard()
1066  */
1067 void cl_page_discard(const struct lu_env *env,
1068                      struct cl_io *io, struct cl_page *pg)
1069 {
1070         PINVRNT(env, pg, cl_page_is_owned(pg, io));
1071         PINVRNT(env, pg, cl_page_invariant(pg));
1072
1073         cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
1074 }
1075 EXPORT_SYMBOL(cl_page_discard);
1076
1077 /**
1078  * Version of cl_page_delete() that can be called for not fully constructed
1079  * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
1080  * path. Doesn't check page invariant.
1081  */
1082 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
1083                             int radix)
1084 {
1085         struct cl_page *tmp = pg;
1086         ENTRY;
1087
1088         PASSERT(env, pg, pg == cl_page_top(pg));
1089         PASSERT(env, pg, pg->cp_state != CPS_FREEING);
1090
1091         /*
1092          * Severe all ways to obtain new pointers to @pg.
1093          */
1094         cl_page_owner_clear(pg);
1095
1096         /* 
1097          * unexport the page firstly before freeing it so that
1098          * the page content is considered to be invalid.
1099          * We have to do this because a CPS_FREEING cl_page may
1100          * be NOT under the protection of a cl_lock.
1101          * Afterwards, if this page is found by other threads, then this
1102          * page will be forced to reread.
1103          */
1104         cl_page_export(env, pg, 0);
1105         cl_page_state_set0(env, pg, CPS_FREEING);
1106
1107         CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),
1108                        (const struct lu_env *, const struct cl_page_slice *));
1109
1110         if (tmp->cp_type == CPT_CACHEABLE) {
1111                 if (!radix)
1112                         /* !radix means that @pg is not yet in the radix tree,
1113                          * skip removing it.
1114                          */
1115                         tmp = pg->cp_child;
1116                 for (; tmp != NULL; tmp = tmp->cp_child) {
1117                         void                    *value;
1118                         struct cl_object_header *hdr;
1119
1120                         hdr = cl_object_header(tmp->cp_obj);
1121                         spin_lock(&hdr->coh_page_guard);
1122                         value = radix_tree_delete(&hdr->coh_tree,
1123                                                   tmp->cp_index);
1124                         PASSERT(env, tmp, value == tmp);
1125                         PASSERT(env, tmp, hdr->coh_pages > 0);
1126                         hdr->coh_pages--;
1127                         spin_unlock(&hdr->coh_page_guard);
1128                         cl_page_put(env, tmp);
1129                 }
1130         }
1131
1132         EXIT;
1133 }
1134
1135 /**
1136  * Called when a decision is made to throw page out of memory.
1137  *
1138  * Notifies all layers about page destruction by calling
1139  * cl_page_operations::cpo_delete() method top-to-bottom.
1140  *
1141  * Moves page into cl_page_state::CPS_FREEING state (this is the only place
1142  * where transition to this state happens).
1143  *
1144  * Eliminates all venues through which new references to the page can be
1145  * obtained:
1146  *
1147  *     - removes page from the radix trees,
1148  *
1149  *     - breaks linkage from VM page to cl_page.
1150  *
1151  * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
1152  * drain after some time, at which point page will be recycled.
1153  *
1154  * \pre  pg == cl_page_top(pg)
1155  * \pre  VM page is locked
1156  * \post pg->cp_state == CPS_FREEING
1157  *
1158  * \see cl_page_operations::cpo_delete()
1159  */
1160 void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
1161 {
1162         PINVRNT(env, pg, cl_page_invariant(pg));
1163         ENTRY;
1164         cl_page_delete0(env, pg, 1);
1165         EXIT;
1166 }
1167 EXPORT_SYMBOL(cl_page_delete);
1168
1169 /**
1170  * Unmaps page from user virtual memory.
1171  *
1172  * Calls cl_page_operations::cpo_unmap() through all layers top-to-bottom. The
1173  * layer responsible for VM interaction has to unmap page from user space
1174  * virtual memory.
1175  *
1176  * \see cl_page_operations::cpo_unmap()
1177  */
1178 int cl_page_unmap(const struct lu_env *env,
1179                   struct cl_io *io, struct cl_page *pg)
1180 {
1181         PINVRNT(env, pg, cl_page_is_owned(pg, io));
1182         PINVRNT(env, pg, cl_page_invariant(pg));
1183
1184         return cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_unmap));
1185 }
1186 EXPORT_SYMBOL(cl_page_unmap);
1187
1188 /**
1189  * Marks page up-to-date.
1190  *
1191  * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
1192  * layer responsible for VM interaction has to mark/clear page as up-to-date
1193  * by the \a uptodate argument.
1194  *
1195  * \see cl_page_operations::cpo_export()
1196  */
1197 void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
1198 {
1199         PINVRNT(env, pg, cl_page_invariant(pg));
1200         CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_export),
1201                        (const struct lu_env *,
1202                         const struct cl_page_slice *, int), uptodate);
1203 }
1204 EXPORT_SYMBOL(cl_page_export);
1205
1206 /**
1207  * Returns true, iff \a pg is VM locked in a suitable sense by the calling
1208  * thread.
1209  */
1210 int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
1211 {
1212         int result;
1213         const struct cl_page_slice *slice;
1214
1215         ENTRY;
1216         pg = cl_page_top_trusted((struct cl_page *)pg);
1217         slice = container_of(pg->cp_layers.next,
1218                              const struct cl_page_slice, cpl_linkage);
1219         PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL);
1220         /*
1221          * Call ->cpo_is_vmlocked() directly instead of going through
1222          * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
1223          * cl_page_invariant().
1224          */
1225         result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
1226         PASSERT(env, pg, result == -EBUSY || result == -ENODATA);
1227         RETURN(result == -EBUSY);
1228 }
1229 EXPORT_SYMBOL(cl_page_is_vmlocked);
1230
1231 static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
1232 {
1233         ENTRY;
1234         RETURN(crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN);
1235 }
1236
1237 static void cl_page_io_start(const struct lu_env *env,
1238                              struct cl_page *pg, enum cl_req_type crt)
1239 {
1240         /*
1241          * Page is queued for IO, change its state.
1242          */
1243         ENTRY;
1244         cl_page_owner_clear(pg);
1245         cl_page_state_set(env, pg, cl_req_type_state(crt));
1246         EXIT;
1247 }
1248
1249 /**
1250  * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
1251  * called top-to-bottom. Every layer either agrees to submit this page (by
1252  * returning 0), or requests to omit this page (by returning -EALREADY). Layer
1253  * handling interactions with the VM also has to inform VM that page is under
1254  * transfer now.
1255  */
1256 int cl_page_prep(const struct lu_env *env, struct cl_io *io,
1257                  struct cl_page *pg, enum cl_req_type crt)
1258 {
1259         int result;
1260
1261         PINVRNT(env, pg, cl_page_is_owned(pg, io));
1262         PINVRNT(env, pg, cl_page_invariant(pg));
1263         PINVRNT(env, pg, crt < CRT_NR);
1264
1265         /*
1266          * XXX this has to be called bottom-to-top, so that llite can set up
1267          * PG_writeback without risking other layers deciding to skip this
1268          * page.
1269          */
1270         if (crt >= CRT_NR)
1271                 return -EINVAL;
1272         result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
1273         if (result == 0)
1274                 cl_page_io_start(env, pg, crt);
1275
1276         KLASSERT(ergo(crt == CRT_WRITE && pg->cp_type == CPT_CACHEABLE,
1277                       equi(result == 0,
1278                            PageWriteback(cl_page_vmpage(env, pg)))));
1279         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
1280         return result;
1281 }
1282 EXPORT_SYMBOL(cl_page_prep);
1283
1284 /**
1285  * Notify layers about transfer completion.
1286  *
1287  * Invoked by transfer sub-system (which is a part of osc) to notify layers
1288  * that a transfer, of which this page is a part of has completed.
1289  *
1290  * Completion call-backs are executed in the bottom-up order, so that
1291  * uppermost layer (llite), responsible for the VFS/VM interaction runs last
1292  * and can release locks safely.
1293  *
1294  * \pre  pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
1295  * \post pg->cp_state == CPS_CACHED
1296  *
1297  * \see cl_page_operations::cpo_completion()
1298  */
1299 void cl_page_completion(const struct lu_env *env,
1300                         struct cl_page *pg, enum cl_req_type crt, int ioret)
1301 {
1302         struct cl_sync_io *anchor = pg->cp_sync_io;
1303
1304         PASSERT(env, pg, crt < CRT_NR);
1305         /* cl_page::cp_req already cleared by the caller (osc_completion()) */
1306         PASSERT(env, pg, pg->cp_req == NULL);
1307         PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
1308
1309         ENTRY;
1310         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
1311         if (crt == CRT_READ && ioret == 0) {
1312                 PASSERT(env, pg, !(pg->cp_flags & CPF_READ_COMPLETED));
1313                 pg->cp_flags |= CPF_READ_COMPLETED;
1314         }
1315
1316         cl_page_state_set(env, pg, CPS_CACHED);
1317         if (crt >= CRT_NR)
1318                 return;
1319         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
1320                                (const struct lu_env *,
1321                                 const struct cl_page_slice *, int), ioret);
1322         if (anchor) {
1323                 LASSERT(cl_page_is_vmlocked(env, pg));
1324                 LASSERT(pg->cp_sync_io == anchor);
1325                 pg->cp_sync_io = NULL;
1326         }
1327         /*
1328          * As page->cp_obj is pinned by a reference from page->cp_req, it is
1329          * safe to call cl_page_put() without risking object destruction in a
1330          * non-blocking context.
1331          */
1332         cl_page_put(env, pg);
1333
1334         if (anchor)
1335                 cl_sync_io_note(anchor, ioret);
1336
1337         EXIT;
1338 }
1339 EXPORT_SYMBOL(cl_page_completion);
1340
1341 /**
1342  * Notify layers that transfer formation engine decided to yank this page from
1343  * the cache and to make it a part of a transfer.
1344  *
1345  * \pre  pg->cp_state == CPS_CACHED
1346  * \post pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
1347  *
1348  * \see cl_page_operations::cpo_make_ready()
1349  */
1350 int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
1351                        enum cl_req_type crt)
1352 {
1353         int result;
1354
1355         PINVRNT(env, pg, crt < CRT_NR);
1356
1357         ENTRY;
1358         if (crt >= CRT_NR)
1359                 RETURN(-EINVAL);
1360         result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
1361                                 (const struct lu_env *,
1362                                  const struct cl_page_slice *));
1363         if (result == 0) {
1364                 PASSERT(env, pg, pg->cp_state == CPS_CACHED);
1365                 cl_page_io_start(env, pg, crt);
1366         }
1367         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
1368         RETURN(result);
1369 }
1370 EXPORT_SYMBOL(cl_page_make_ready);
1371
1372 /**
1373  * Notify layers that high level io decided to place this page into a cache
1374  * for future transfer.
1375  *
1376  * The layer implementing transfer engine (osc) has to register this page in
1377  * its queues.
1378  *
1379  * \pre  cl_page_is_owned(pg, io)
1380  * \post cl_page_is_owned(pg, io)
1381  *
1382  * \see cl_page_operations::cpo_cache_add()
1383  */
1384 int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
1385                       struct cl_page *pg, enum cl_req_type crt)
1386 {
1387         const struct cl_page_slice *scan;
1388         int result = 0;
1389
1390         PINVRNT(env, pg, crt < CRT_NR);
1391         PINVRNT(env, pg, cl_page_is_owned(pg, io));
1392         PINVRNT(env, pg, cl_page_invariant(pg));
1393
1394         ENTRY;
1395
1396         if (crt >= CRT_NR)
1397                 RETURN(-EINVAL);
1398
1399         cfs_list_for_each_entry(scan, &pg->cp_layers, cpl_linkage) {
1400                 if (scan->cpl_ops->io[crt].cpo_cache_add == NULL)
1401                         continue;
1402
1403                 result = scan->cpl_ops->io[crt].cpo_cache_add(env, scan, io);
1404                 if (result != 0)
1405                         break;
1406         }
1407         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
1408         RETURN(result);
1409 }
1410 EXPORT_SYMBOL(cl_page_cache_add);
1411
1412 /**
1413  * Called if a pge is being written back by kernel's intention.
1414  *
1415  * \pre  cl_page_is_owned(pg, io)
1416  * \post ergo(result == 0, pg->cp_state == CPS_PAGEOUT)
1417  *
1418  * \see cl_page_operations::cpo_flush()
1419  */
1420 int cl_page_flush(const struct lu_env *env, struct cl_io *io,
1421                   struct cl_page *pg)
1422 {
1423         int result;
1424
1425         PINVRNT(env, pg, cl_page_is_owned(pg, io));
1426         PINVRNT(env, pg, cl_page_invariant(pg));
1427
1428         ENTRY;
1429
1430         result = cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_flush));
1431
1432         CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result);
1433         RETURN(result);
1434 }
1435 EXPORT_SYMBOL(cl_page_flush);
1436
1437 /**
1438  * Checks whether page is protected by any extent lock is at least required
1439  * mode.
1440  *
1441  * \return the same as in cl_page_operations::cpo_is_under_lock() method.
1442  * \see cl_page_operations::cpo_is_under_lock()
1443  */
1444 int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
1445                           struct cl_page *page)
1446 {
1447         int rc;
1448
1449         PINVRNT(env, page, cl_page_invariant(page));
1450
1451         ENTRY;
1452         rc = CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_is_under_lock),
1453                             (const struct lu_env *,
1454                              const struct cl_page_slice *, struct cl_io *),
1455                             io);
1456         PASSERT(env, page, rc != 0);
1457         RETURN(rc);
1458 }
1459 EXPORT_SYMBOL(cl_page_is_under_lock);
1460
1461 static int page_prune_cb(const struct lu_env *env, struct cl_io *io,
1462                          struct cl_page *page, void *cbdata)
1463 {
1464         cl_page_own(env, io, page);
1465         cl_page_unmap(env, io, page);
1466         cl_page_discard(env, io, page);
1467         cl_page_disown(env, io, page);
1468         return CLP_GANG_OKAY;
1469 }
1470
1471 /**
1472  * Purges all cached pages belonging to the object \a obj.
1473  */
1474 int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
1475 {
1476         struct cl_thread_info   *info;
1477         struct cl_object        *obj = cl_object_top(clobj);
1478         struct cl_io            *io;
1479         int                      result;
1480
1481         ENTRY;
1482         info  = cl_env_info(env);
1483         io    = &info->clt_io;
1484
1485         /*
1486          * initialize the io. This is ugly since we never do IO in this
1487          * function, we just make cl_page_list functions happy. -jay
1488          */
1489         io->ci_obj = obj;
1490         io->ci_ignore_layout = 1;
1491         result = cl_io_init(env, io, CIT_MISC, obj);
1492         if (result != 0) {
1493                 cl_io_fini(env, io);
1494                 RETURN(io->ci_result);
1495         }
1496
1497         do {
1498                 result = cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF,
1499                                              page_prune_cb, NULL);
1500                 if (result == CLP_GANG_RESCHED)
1501                         cfs_cond_resched();
1502         } while (result != CLP_GANG_OKAY);
1503
1504         cl_io_fini(env, io);
1505         RETURN(result);
1506 }
1507 EXPORT_SYMBOL(cl_pages_prune);
1508
1509 /**
1510  * Tells transfer engine that only part of a page is to be transmitted.
1511  *
1512  * \see cl_page_operations::cpo_clip()
1513  */
1514 void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
1515                   int from, int to)
1516 {
1517         PINVRNT(env, pg, cl_page_invariant(pg));
1518
1519         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to);
1520         CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip),
1521                        (const struct lu_env *,
1522                         const struct cl_page_slice *,int, int),
1523                        from, to);
1524 }
1525 EXPORT_SYMBOL(cl_page_clip);
1526
1527 /**
1528  * Prints human readable representation of \a pg to the \a f.
1529  */
1530 void cl_page_header_print(const struct lu_env *env, void *cookie,
1531                           lu_printer_t printer, const struct cl_page *pg)
1532 {
1533         (*printer)(env, cookie,
1534                    "page@%p[%d %p:%lu ^%p_%p %d %d %d %p %p %#x]\n",
1535                    pg, cfs_atomic_read(&pg->cp_ref), pg->cp_obj,
1536                    pg->cp_index, pg->cp_parent, pg->cp_child,
1537                    pg->cp_state, pg->cp_error, pg->cp_type,
1538                    pg->cp_owner, pg->cp_req, pg->cp_flags);
1539 }
1540 EXPORT_SYMBOL(cl_page_header_print);
1541
1542 /**
1543  * Prints human readable representation of \a pg to the \a f.
1544  */
1545 void cl_page_print(const struct lu_env *env, void *cookie,
1546                    lu_printer_t printer, const struct cl_page *pg)
1547 {
1548         struct cl_page *scan;
1549
1550         for (scan = cl_page_top((struct cl_page *)pg);
1551              scan != NULL; scan = scan->cp_child)
1552                 cl_page_header_print(env, cookie, printer, scan);
1553         CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
1554                        (const struct lu_env *env,
1555                         const struct cl_page_slice *slice,
1556                         void *cookie, lu_printer_t p), cookie, printer);
1557         (*printer)(env, cookie, "end page@%p\n", pg);
1558 }
1559 EXPORT_SYMBOL(cl_page_print);
1560
1561 /**
1562  * Cancel a page which is still in a transfer.
1563  */
1564 int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
1565 {
1566         return CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_cancel),
1567                               (const struct lu_env *,
1568                                const struct cl_page_slice *));
1569 }
1570 EXPORT_SYMBOL(cl_page_cancel);
1571
1572 /**
1573  * Converts a byte offset within object \a obj into a page index.
1574  */
1575 loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
1576 {
1577         /*
1578          * XXX for now.
1579          */
1580         return (loff_t)idx << CFS_PAGE_SHIFT;
1581 }
1582 EXPORT_SYMBOL(cl_offset);
1583
1584 /**
1585  * Converts a page index into a byte offset within object \a obj.
1586  */
1587 pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
1588 {
1589         /*
1590          * XXX for now.
1591          */
1592         return offset >> CFS_PAGE_SHIFT;
1593 }
1594 EXPORT_SYMBOL(cl_index);
1595
1596 int cl_page_size(const struct cl_object *obj)
1597 {
1598         return 1 << CFS_PAGE_SHIFT;
1599 }
1600 EXPORT_SYMBOL(cl_page_size);
1601
1602 /**
1603  * Adds page slice to the compound page.
1604  *
1605  * This is called by cl_object_operations::coo_page_init() methods to add a
1606  * per-layer state to the page. New state is added at the end of
1607  * cl_page::cp_layers list, that is, it is at the bottom of the stack.
1608  *
1609  * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
1610  */
1611 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
1612                        struct cl_object *obj,
1613                        const struct cl_page_operations *ops)
1614 {
1615         ENTRY;
1616         cfs_list_add_tail(&slice->cpl_linkage, &page->cp_layers);
1617         slice->cpl_obj  = obj;
1618         slice->cpl_ops  = ops;
1619         slice->cpl_page = page;
1620         EXIT;
1621 }
1622 EXPORT_SYMBOL(cl_page_slice_add);
1623
1624 int  cl_page_init(void)
1625 {
1626         return 0;
1627 }
1628
1629 void cl_page_fini(void)
1630 {
1631 }