Whamcloud - gitweb
b=21551 Ensure visible pages are sane.
[fs/lustre-release.git] / lustre / obdclass / cl_page.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Client Lustre Page.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_CLASS
42 #ifndef EXPORT_SYMTAB
43 # define EXPORT_SYMTAB
44 #endif
45
46 #include <libcfs/libcfs.h>
47 #include <obd_class.h>
48 #include <obd_support.h>
49 #include <libcfs/list.h>
50
51 #include <cl_object.h>
52 #include "cl_internal.h"
53
54 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
55                             int radix);
56
57 static cfs_mem_cache_t      *cl_page_kmem = NULL;
58
59 static struct lu_kmem_descr cl_page_caches[] = {
60         {
61                 .ckd_cache = &cl_page_kmem,
62                 .ckd_name  = "cl_page_kmem",
63                 .ckd_size  = sizeof (struct cl_page)
64         },
65         {
66                 .ckd_cache = NULL
67         }
68 };
69
70 #ifdef LIBCFS_DEBUG
71 # define PASSERT(env, page, expr)                                       \
72   do {                                                                    \
73           if (unlikely(!(expr))) {                                      \
74                   CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");    \
75                   LASSERT(0);                                           \
76           }                                                             \
77   } while (0)
78 #else /* !LIBCFS_DEBUG */
79 # define PASSERT(env, page, exp) \
80         ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
81 #endif /* !LIBCFS_DEBUG */
82
83 #ifdef INVARIANT_CHECK
84 # define PINVRNT(env, page, expr)                                       \
85   do {                                                                    \
86           if (unlikely(!(expr))) {                                      \
87                   CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");    \
88                   LINVRNT(0);                                           \
89           }                                                             \
90   } while (0)
91 #else /* !INVARIANT_CHECK */
92 # define PINVRNT(env, page, exp) \
93         ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
94 #endif /* !INVARIANT_CHECK */
95
96 /**
97  * Internal version of cl_page_top, it should be called with page referenced,
98  * or coh_page_guard held.
99  */
100 static struct cl_page *cl_page_top_trusted(struct cl_page *page)
101 {
102         LASSERT(cl_is_page(page));
103         while (page->cp_parent != NULL)
104                 page = page->cp_parent;
105         return page;
106 }
107
108 /**
109  * Internal version of cl_page_get().
110  *
111  * This function can be used to obtain initial reference to previously
112  * unreferenced cached object. It can be called only if concurrent page
113  * reclamation is somehow prevented, e.g., by locking page radix-tree
114  * (cl_object_header::hdr->coh_page_guard), or by keeping a lock on a VM page,
115  * associated with \a page.
116  *
117  * Use with care! Not exported.
118  */
119 static void cl_page_get_trust(struct cl_page *page)
120 {
121         LASSERT(cl_is_page(page));
122         /*
123          * Checkless version for trusted users.
124          */
125         if (atomic_inc_return(&page->cp_ref) == 1)
126                 atomic_inc(&cl_object_site(page->cp_obj)->cs_pages.cs_busy);
127 }
128
129 /**
130  * Returns a slice within a page, corresponding to the given layer in the
131  * device stack.
132  *
133  * \see cl_lock_at()
134  */
135 static const struct cl_page_slice *
136 cl_page_at_trusted(const struct cl_page *page,
137                    const struct lu_device_type *dtype)
138 {
139         const struct cl_page_slice *slice;
140
141 #ifdef INVARIANT_CHECK
142         struct cl_object_header *ch = cl_object_header(page->cp_obj);
143
144         if (!atomic_read(&page->cp_ref))
145                 LASSERT_SPIN_LOCKED(&ch->coh_page_guard);
146 #endif
147         ENTRY;
148
149         page = cl_page_top_trusted((struct cl_page *)page);
150         do {
151                 list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
152                         if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
153                                 RETURN(slice);
154                 }
155                 page = page->cp_child;
156         } while (page != NULL);
157         RETURN(NULL);
158 }
159
160 /**
161  * Returns a page with given index in the given object, or NULL if no page is
162  * found. Acquires a reference on \a page.
163  *
164  * Locking: called under cl_object_header::coh_page_guard spin-lock.
165  */
166 struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
167 {
168         struct cl_page *page;
169
170         LASSERT_SPIN_LOCKED(&hdr->coh_page_guard);
171
172         page = radix_tree_lookup(&hdr->coh_tree, index);
173         if (page != NULL) {
174                 LASSERT(cl_is_page(page));
175                 cl_page_get_trust(page);
176         }
177         return page;
178 }
179 EXPORT_SYMBOL(cl_page_lookup);
180
181 /**
182  * Returns a list of pages by a given [start, end] of \a obj.
183  *
184  * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
185  * crucial in the face of [offset, EOF] locks.
186  */
187 void cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
188                          struct cl_io *io, pgoff_t start, pgoff_t end,
189                          struct cl_page_list *queue, int nonblock)
190 {
191         struct cl_object_header *hdr;
192         struct cl_page          *page;
193         struct cl_page         **pvec;
194         const struct cl_page_slice  *slice;
195         const struct lu_device_type *dtype;
196         pgoff_t                  idx;
197         unsigned int             nr;
198         unsigned int             i;
199         unsigned int             j;
200         int                    (*page_own)(const struct lu_env *env,
201                                            struct cl_io *io,
202                                            struct cl_page *pg);
203         ENTRY;
204
205         page_own = nonblock ? cl_page_own_try : cl_page_own;
206
207         idx = start;
208         hdr = cl_object_header(obj);
209         pvec = cl_env_info(env)->clt_pvec;
210         dtype = cl_object_top(obj)->co_lu.lo_dev->ld_type;
211         spin_lock(&hdr->coh_page_guard);
212         while ((nr = radix_tree_gang_lookup(&hdr->coh_tree, (void **)pvec,
213                                             idx, CLT_PVEC_SIZE)) > 0) {
214                 idx = pvec[nr - 1]->cp_index + 1;
215                 for (i = 0, j = 0; i < nr; ++i) {
216                         page = pvec[i];
217                         PASSERT(env, page, cl_is_page(page));
218                         pvec[i] = NULL;
219                         if (page->cp_index > end)
220                                 break;
221                         if (page->cp_state == CPS_FREEING)
222                                 continue;
223                         if (page->cp_type == CPT_TRANSIENT) {
224                                 /* God, we found a transient page!*/
225                                 continue;
226                         }
227
228                         slice = cl_page_at_trusted(page, dtype);
229                         /*
230                          * Pages for lsm-less file has no underneath sub-page
231                          * for osc, in case of ...
232                          */
233                         PASSERT(env, page, slice != NULL);
234
235                         page = slice->cpl_page;
236                         /*
237                          * Can safely call cl_page_get_trust() under
238                          * radix-tree spin-lock.
239                          *
240                          * XXX not true, because @page is from object another
241                          * than @hdr and protected by different tree lock.
242                          */
243                         cl_page_get_trust(page);
244                         lu_ref_add_atomic(&page->cp_reference,
245                                           "page_list", cfs_current());
246                         pvec[j++] = page;
247                 }
248
249                 /*
250                  * Here a delicate locking dance is performed. Current thread
251                  * holds a reference to a page, but has to own it before it
252                  * can be placed into queue. Owning implies waiting, so
253                  * radix-tree lock is to be released. After a wait one has to
254                  * check that pages weren't truncated (cl_page_own() returns
255                  * error in the latter case).
256                  */
257                 spin_unlock(&hdr->coh_page_guard);
258                 for (i = 0; i < j; ++i) {
259                         page = pvec[i];
260                         if (page_own(env, io, page) == 0)
261                                 cl_page_list_add(queue, page);
262                         lu_ref_del(&page->cp_reference,
263                                    "page_list", cfs_current());
264                         cl_page_put(env, page);
265                 }
266                 spin_lock(&hdr->coh_page_guard);
267                 if (nr < CLT_PVEC_SIZE)
268                         break;
269         }
270         spin_unlock(&hdr->coh_page_guard);
271         EXIT;
272 }
273 EXPORT_SYMBOL(cl_page_gang_lookup);
274
275 static void cl_page_free(const struct lu_env *env, struct cl_page *page)
276 {
277         struct cl_object *obj  = page->cp_obj;
278         struct cl_site   *site = cl_object_site(obj);
279
280         PASSERT(env, page, cl_is_page(page));
281         PASSERT(env, page, list_empty(&page->cp_batch));
282         PASSERT(env, page, page->cp_owner == NULL);
283         PASSERT(env, page, page->cp_req == NULL);
284         PASSERT(env, page, page->cp_parent == NULL);
285         PASSERT(env, page, page->cp_state == CPS_FREEING);
286
287         ENTRY;
288         might_sleep();
289         while (!list_empty(&page->cp_layers)) {
290                 struct cl_page_slice *slice;
291
292                 slice = list_entry(page->cp_layers.next, struct cl_page_slice,
293                                    cpl_linkage);
294                 list_del_init(page->cp_layers.next);
295                 slice->cpl_ops->cpo_fini(env, slice);
296         }
297         atomic_dec(&site->cs_pages.cs_total);
298         atomic_dec(&site->cs_pages_state[page->cp_state]);
299         lu_object_ref_del_at(&obj->co_lu, page->cp_obj_ref, "cl_page", page);
300         cl_object_put(env, obj);
301         lu_ref_fini(&page->cp_reference);
302         OBD_SLAB_FREE_PTR(page, cl_page_kmem);
303         EXIT;
304 }
305
306 /**
307  * Helper function updating page state. This is the only place in the code
308  * where cl_page::cp_state field is mutated.
309  */
310 static inline void cl_page_state_set_trust(struct cl_page *page,
311                                            enum cl_page_state state)
312 {
313         /* bypass const. */
314         *(enum cl_page_state *)&page->cp_state = state;
315 }
316
317 static int cl_page_alloc(const struct lu_env *env, struct cl_object *o,
318                          pgoff_t ind, struct page *vmpage,
319                          enum cl_page_type type, struct cl_page **out)
320 {
321         struct cl_page          *page;
322         struct cl_page          *err  = NULL;
323         struct lu_object_header *head;
324         struct cl_site          *site = cl_object_site(o);
325         int                      result;
326
327         ENTRY;
328         result = +1;
329         OBD_SLAB_ALLOC_PTR_GFP(page, cl_page_kmem, CFS_ALLOC_IO);
330         if (page != NULL) {
331                 atomic_set(&page->cp_ref, 1);
332                 page->cp_obj = o;
333                 cl_object_get(o);
334                 page->cp_obj_ref = lu_object_ref_add(&o->co_lu,
335                                                      "cl_page", page);
336                 page->cp_index = ind;
337                 cl_page_state_set_trust(page, CPS_CACHED);
338                 page->cp_type = type;
339                 CFS_INIT_LIST_HEAD(&page->cp_layers);
340                 CFS_INIT_LIST_HEAD(&page->cp_batch);
341                 CFS_INIT_LIST_HEAD(&page->cp_flight);
342                 mutex_init(&page->cp_mutex);
343                 lu_ref_init(&page->cp_reference);
344                 head = o->co_lu.lo_header;
345                 list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) {
346                         if (o->co_ops->coo_page_init != NULL) {
347                                 err = o->co_ops->coo_page_init(env, o,
348                                                                page, vmpage);
349                                 if (err != NULL) {
350                                         cl_page_state_set_trust(page,
351                                                                 CPS_FREEING);
352                                         cl_page_free(env, page);
353                                         page = err;
354                                         break;
355                                 }
356                         }
357                 }
358                 if (err == NULL) {
359                         atomic_inc(&site->cs_pages.cs_busy);
360                         atomic_inc(&site->cs_pages.cs_total);
361                         atomic_inc(&site->cs_pages_state[CPS_CACHED]);
362                         atomic_inc(&site->cs_pages.cs_created);
363                         result = 0;
364                 }
365         } else
366                 page = ERR_PTR(-ENOMEM);
367         *out = page;
368         RETURN(result);
369 }
370
371 /**
372  * Returns a cl_page with index \a idx at the object \a o, and associated with
373  * the VM page \a vmpage.
374  *
375  * This is the main entry point into the cl_page caching interface. First, a
376  * cache (implemented as a per-object radix tree) is consulted. If page is
377  * found there, it is returned immediately. Otherwise new page is allocated
378  * and returned. In any case, additional reference to page is acquired.
379  *
380  * \see cl_object_find(), cl_lock_find()
381  */
382 static struct cl_page *cl_page_find0(const struct lu_env *env,
383                                      struct cl_object *o,
384                                      pgoff_t idx, struct page *vmpage,
385                                      enum cl_page_type type,
386                                      struct cl_page *parent)
387 {
388         struct cl_page          *page;
389         struct cl_page          *ghost = NULL;
390         struct cl_object_header *hdr;
391         struct cl_site          *site = cl_object_site(o);
392         int err;
393
394         LINVRNT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
395         might_sleep();
396
397         ENTRY;
398
399         hdr = cl_object_header(o);
400         atomic_inc(&site->cs_pages.cs_lookup);
401
402         CDEBUG(D_PAGE, "%lu@"DFID" %p %lu %i\n",
403                idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
404         /* fast path. */
405         if (type == CPT_CACHEABLE) {
406                 /*
407                  * cl_vmpage_page() can be called here without any locks as
408                  *
409                  *     - "vmpage" is locked (which prevents ->private from
410                  *       concurrent updates), and
411                  *
412                  *     - "o" cannot be destroyed while current thread holds a
413                  *       reference on it.
414                  */
415                 page = cl_vmpage_page(vmpage, o);
416                 PINVRNT(env, page,
417                         ergo(page != NULL,
418                              cl_page_vmpage(env, page) == vmpage &&
419                              (void *)radix_tree_lookup(&hdr->coh_tree,
420                                                        idx) == page));
421         } else {
422                 spin_lock(&hdr->coh_page_guard);
423                 page = cl_page_lookup(hdr, idx);
424                 spin_unlock(&hdr->coh_page_guard);
425         }
426         if (page != NULL) {
427                 atomic_inc(&site->cs_pages.cs_hit);
428                 RETURN(page);
429         }
430
431         /* allocate and initialize cl_page */
432         err = cl_page_alloc(env, o, idx, vmpage, type, &page);
433         if (err != 0)
434                 RETURN(page);
435         /*
436          * XXX optimization: use radix_tree_preload() here, and change tree
437          * gfp mask to GFP_KERNEL in cl_object_header_init().
438          */
439         spin_lock(&hdr->coh_page_guard);
440         err = radix_tree_insert(&hdr->coh_tree, idx, page);
441         if (err != 0) {
442                 ghost = page;
443                 /*
444                  * Noted by Jay: a lock on \a vmpage protects cl_page_find()
445                  * from this race, but
446                  *
447                  *     0. it's better to have cl_page interface "locally
448                  *     consistent" so that its correctness can be reasoned
449                  *     about without appealing to the (obscure world of) VM
450                  *     locking.
451                  *
452                  *     1. handling this race allows ->coh_tree to remain
453                  *     consistent even when VM locking is somehow busted,
454                  *     which is very useful during diagnosing and debugging.
455                  */
456                 page = ERR_PTR(err);
457                 if (err == -EEXIST) {
458                         /*
459                          * XXX in case of a lookup for CPT_TRANSIENT page,
460                          * nothing protects a CPT_CACHEABLE page from being
461                          * concurrently moved into CPS_FREEING state.
462                          */
463                         page = cl_page_lookup(hdr, idx);
464                         PASSERT(env, page, page != NULL);
465                         if (page->cp_type == CPT_TRANSIENT &&
466                             type == CPT_CACHEABLE) {
467                                 /* XXX: We should make sure that inode sem
468                                  * keeps being held in the lifetime of
469                                  * transient pages, so it is impossible to
470                                  * have conflicting transient pages.
471                                  */
472                                 spin_unlock(&hdr->coh_page_guard);
473                                 cl_page_put(env, page);
474                                 spin_lock(&hdr->coh_page_guard);
475                                 page = ERR_PTR(-EBUSY);
476                         }
477                 }
478         } else {
479                 if (parent) {
480                         LASSERT(page->cp_parent == NULL);
481                         page->cp_parent = parent;
482                         parent->cp_child = page;
483                 }
484                 hdr->coh_pages++;
485         }
486         spin_unlock(&hdr->coh_page_guard);
487
488         if (unlikely(ghost != NULL)) {
489                 atomic_dec(&site->cs_pages.cs_busy);
490                 cl_page_delete0(env, ghost, 0);
491                 cl_page_free(env, ghost);
492         }
493         RETURN(page);
494 }
495
496 struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
497                              pgoff_t idx, struct page *vmpage,
498                              enum cl_page_type type)
499 {
500         return cl_page_find0(env, o, idx, vmpage, type, NULL);
501 }
502 EXPORT_SYMBOL(cl_page_find);
503
504
505 struct cl_page *cl_page_find_sub(const struct lu_env *env, struct cl_object *o,
506                                  pgoff_t idx, struct page *vmpage,
507                                  struct cl_page *parent)
508 {
509         return cl_page_find0(env, o, idx, vmpage, parent->cp_type, parent);
510 }
511 EXPORT_SYMBOL(cl_page_find_sub);
512
513 static inline int cl_page_invariant(const struct cl_page *pg)
514 {
515         struct cl_object_header *header;
516         struct cl_page          *parent;
517         struct cl_page          *child;
518         struct cl_io            *owner;
519
520         LASSERT(cl_is_page(pg));
521         /*
522          * Page invariant is protected by a VM lock.
523          */
524         LINVRNT(cl_page_is_vmlocked(NULL, pg));
525
526         header = cl_object_header(pg->cp_obj);
527         parent = pg->cp_parent;
528         child  = pg->cp_child;
529         owner  = pg->cp_owner;
530
531         return atomic_read(&pg->cp_ref) > 0 &&
532                 ergo(parent != NULL, parent->cp_child == pg) &&
533                 ergo(child != NULL, child->cp_parent == pg) &&
534                 ergo(child != NULL, pg->cp_obj != child->cp_obj) &&
535                 ergo(parent != NULL, pg->cp_obj != parent->cp_obj) &&
536                 ergo(owner != NULL && parent != NULL,
537                      parent->cp_owner == pg->cp_owner->ci_parent) &&
538                 ergo(owner != NULL && child != NULL,
539                      child->cp_owner->ci_parent == owner) &&
540                 /*
541                  * Either page is early in initialization (has neither child
542                  * nor parent yet), or it is in the object radix tree.
543                  */
544                 ergo(pg->cp_state < CPS_FREEING,
545                      (void *)radix_tree_lookup(&header->coh_tree,
546                                                pg->cp_index) == pg ||
547                      (child == NULL && parent == NULL));
548 }
549
550 static void cl_page_state_set0(const struct lu_env *env,
551                                struct cl_page *page, enum cl_page_state state)
552 {
553         enum cl_page_state old;
554         struct cl_site *site = cl_object_site(page->cp_obj);
555
556         /*
557          * Matrix of allowed state transitions [old][new], for sanity
558          * checking.
559          */
560         static const int allowed_transitions[CPS_NR][CPS_NR] = {
561                 [CPS_CACHED] = {
562                         [CPS_CACHED]  = 0,
563                         [CPS_OWNED]   = 1, /* io finds existing cached page */
564                         [CPS_PAGEIN]  = 0,
565                         [CPS_PAGEOUT] = 1, /* write-out from the cache */
566                         [CPS_FREEING] = 1, /* eviction on the memory pressure */
567                 },
568                 [CPS_OWNED] = {
569                         [CPS_CACHED]  = 1, /* release to the cache */
570                         [CPS_OWNED]   = 0,
571                         [CPS_PAGEIN]  = 1, /* start read immediately */
572                         [CPS_PAGEOUT] = 1, /* start write immediately */
573                         [CPS_FREEING] = 1, /* lock invalidation or truncate */
574                 },
575                 [CPS_PAGEIN] = {
576                         [CPS_CACHED]  = 1, /* io completion */
577                         [CPS_OWNED]   = 0,
578                         [CPS_PAGEIN]  = 0,
579                         [CPS_PAGEOUT] = 0,
580                         [CPS_FREEING] = 0,
581                 },
582                 [CPS_PAGEOUT] = {
583                         [CPS_CACHED]  = 1, /* io completion */
584                         [CPS_OWNED]   = 0,
585                         [CPS_PAGEIN]  = 0,
586                         [CPS_PAGEOUT] = 0,
587                         [CPS_FREEING] = 0,
588                 },
589                 [CPS_FREEING] = {
590                         [CPS_CACHED]  = 0,
591                         [CPS_OWNED]   = 0,
592                         [CPS_PAGEIN]  = 0,
593                         [CPS_PAGEOUT] = 0,
594                         [CPS_FREEING] = 0,
595                 }
596         };
597
598         ENTRY;
599         old = page->cp_state;
600         PASSERT(env, page, allowed_transitions[old][state]);
601         CL_PAGE_HEADER(D_TRACE, env, page, "%i -> %i\n", old, state);
602         for (; page != NULL; page = page->cp_child) {
603                 PASSERT(env, page, page->cp_state == old);
604                 PASSERT(env, page,
605                         equi(state == CPS_OWNED, page->cp_owner != NULL));
606
607                 atomic_dec(&site->cs_pages_state[page->cp_state]);
608                 atomic_inc(&site->cs_pages_state[state]);
609                 cl_page_state_set_trust(page, state);
610         }
611         EXIT;
612 }
613
614 static void cl_page_state_set(const struct lu_env *env,
615                               struct cl_page *page, enum cl_page_state state)
616 {
617         PINVRNT(env, page, cl_page_invariant(page));
618         cl_page_state_set0(env, page, state);
619 }
620
621 /**
622  * Acquires an additional reference to a page.
623  *
624  * This can be called only by caller already possessing a reference to \a
625  * page.
626  *
627  * \see cl_object_get(), cl_lock_get().
628  */
629 void cl_page_get(struct cl_page *page)
630 {
631         ENTRY;
632         LASSERT(page->cp_state != CPS_FREEING);
633         cl_page_get_trust(page);
634         EXIT;
635 }
636 EXPORT_SYMBOL(cl_page_get);
637
638 /**
639  * Releases a reference to a page.
640  *
641  * When last reference is released, page is returned to the cache, unless it
642  * is in cl_page_state::CPS_FREEING state, in which case it is immediately
643  * destroyed.
644  *
645  * \see cl_object_put(), cl_lock_put().
646  */
647 void cl_page_put(const struct lu_env *env, struct cl_page *page)
648 {
649         struct cl_object_header *hdr;
650         struct cl_site *site = cl_object_site(page->cp_obj);
651
652         PASSERT(env, page, atomic_read(&page->cp_ref) > !!page->cp_parent);
653
654         ENTRY;
655         CL_PAGE_HEADER(D_TRACE, env, page, "%i\n", atomic_read(&page->cp_ref));
656
657         hdr = cl_object_header(cl_object_top(page->cp_obj));
658         if (atomic_dec_and_lock(&page->cp_ref, &hdr->coh_page_guard)) {
659                 atomic_dec(&site->cs_pages.cs_busy);
660                 /* We're going to access the page w/o a reference, but it's
661                  * ok because we have grabbed the lock coh_page_guard, which
662                  * means nobody is able to free this page behind us.
663                  */
664                 if (page->cp_state == CPS_FREEING) {
665                         /* We drop the page reference and check the page state
666                          * inside the coh_page_guard. So that if it gets here,
667                          * it is the REALLY last reference to this page.
668                          */
669                         spin_unlock(&hdr->coh_page_guard);
670
671                         LASSERT(atomic_read(&page->cp_ref) == 0);
672                         PASSERT(env, page, page->cp_owner == NULL);
673                         PASSERT(env, page, list_empty(&page->cp_batch));
674                         /*
675                          * Page is no longer reachable by other threads. Tear
676                          * it down.
677                          */
678                         cl_page_free(env, page);
679
680                         EXIT;
681                         return;
682                 }
683                 spin_unlock(&hdr->coh_page_guard);
684         }
685
686         EXIT;
687 }
688 EXPORT_SYMBOL(cl_page_put);
689
690 /**
691  * Returns a VM page associated with a given cl_page.
692  */
693 cfs_page_t *cl_page_vmpage(const struct lu_env *env, struct cl_page *page)
694 {
695         const struct cl_page_slice *slice;
696
697         /*
698          * Find uppermost layer with ->cpo_vmpage() method, and return its
699          * result.
700          */
701         page = cl_page_top(page);
702         do {
703                 list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
704                         if (slice->cpl_ops->cpo_vmpage != NULL)
705                                 RETURN(slice->cpl_ops->cpo_vmpage(env, slice));
706                 }
707                 page = page->cp_child;
708         } while (page != NULL);
709         LBUG(); /* ->cpo_vmpage() has to be defined somewhere in the stack */
710 }
711 EXPORT_SYMBOL(cl_page_vmpage);
712
713 /**
714  * Returns a cl_page associated with a VM page, and given cl_object.
715  */
716 struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
717 {
718         struct cl_page *page;
719         struct cl_object_header *hdr;
720
721         ENTRY;
722         KLASSERT(PageLocked(vmpage));
723
724         /*
725          * NOTE: absence of races and liveness of data are guaranteed by page
726          *       lock on a "vmpage". That works because object destruction has
727          *       bottom-to-top pass.
728          */
729
730         /*
731          * This loop assumes that ->private points to the top-most page. This
732          * can be rectified easily.
733          */
734         hdr = cl_object_header(cl_object_top(obj));
735         spin_lock(&hdr->coh_page_guard);
736         for (page = (void *)vmpage->private;
737              page != NULL; page = page->cp_child) {
738                 if (cl_object_same(page->cp_obj, obj)) {
739                         cl_page_get_trust(page);
740                         break;
741                 }
742         }
743         spin_unlock(&hdr->coh_page_guard);
744         LASSERT(ergo(page, cl_is_page(page) && page->cp_type == CPT_CACHEABLE));
745         RETURN(page);
746 }
747 EXPORT_SYMBOL(cl_vmpage_page);
748
749 /**
750  * Returns the top-page for a given page.
751  *
752  * \see cl_object_top(), cl_io_top()
753  */
754 struct cl_page *cl_page_top(struct cl_page *page)
755 {
756         return cl_page_top_trusted(page);
757 }
758 EXPORT_SYMBOL(cl_page_top);
759
760 /**
761  * Returns true if \a addr is an address of an allocated cl_page. Used in
762  * assertions. This check is optimistically imprecise, i.e., it occasionally
763  * returns true for the incorrect addresses, but if it returns false, then the
764  * address is guaranteed to be incorrect. (Should be named cl_pagep().)
765  *
766  * \see cl_is_lock()
767  */
768 int cl_is_page(const void *addr)
769 {
770         return cfs_mem_is_in_cache(addr, cl_page_kmem);
771 }
772 EXPORT_SYMBOL(cl_is_page);
773
774 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
775                                        const struct lu_device_type *dtype)
776 {
777         return cl_page_at_trusted(page, dtype);
778 }
779 EXPORT_SYMBOL(cl_page_at);
780
781 #define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
782
783 #define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...)                   \
784 ({                                                                      \
785         const struct lu_env        *__env  = (_env);                    \
786         struct cl_page             *__page = (_page);                   \
787         const struct cl_page_slice *__scan;                             \
788         int                         __result;                           \
789         ptrdiff_t                   __op   = (_op);                     \
790         int                       (*__method)_proto;                    \
791                                                                         \
792         __result = 0;                                                   \
793         __page = cl_page_top(__page);                                   \
794         do {                                                            \
795                 list_for_each_entry(__scan, &__page->cp_layers,         \
796                                     cpl_linkage) {                      \
797                         __method = *(void **)((char *)__scan->cpl_ops + \
798                                               __op);                    \
799                         if (__method != NULL) {                         \
800                                 __result = (*__method)(__env, __scan,   \
801                                                        ## __VA_ARGS__); \
802                                 if (__result != 0)                      \
803                                         break;                          \
804                         }                                               \
805                 }                                                       \
806                 __page = __page->cp_child;                              \
807         } while (__page != NULL && __result == 0);                      \
808         if (__result > 0)                                               \
809                 __result = 0;                                           \
810         __result;                                                       \
811 })
812
813 #define CL_PAGE_INVOID(_env, _page, _op, _proto, ...)                   \
814 do {                                                                    \
815         const struct lu_env        *__env  = (_env);                    \
816         struct cl_page             *__page = (_page);                   \
817         const struct cl_page_slice *__scan;                             \
818         ptrdiff_t                   __op   = (_op);                     \
819         void                      (*__method)_proto;                    \
820                                                                         \
821         __page = cl_page_top(__page);                                   \
822         do {                                                            \
823                 list_for_each_entry(__scan, &__page->cp_layers,         \
824                                     cpl_linkage) {                      \
825                         __method = *(void **)((char *)__scan->cpl_ops + \
826                                               __op);                    \
827                         if (__method != NULL)                           \
828                                 (*__method)(__env, __scan,              \
829                                             ## __VA_ARGS__);            \
830                 }                                                       \
831                 __page = __page->cp_child;                              \
832         } while (__page != NULL);                                       \
833 } while (0)
834
835 #define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...)           \
836 do {                                                                    \
837         const struct lu_env        *__env  = (_env);                    \
838         struct cl_page             *__page = (_page);                   \
839         const struct cl_page_slice *__scan;                             \
840         ptrdiff_t                   __op   = (_op);                     \
841         void                      (*__method)_proto;                    \
842                                                                         \
843         /* get to the bottom page. */                                   \
844         while (__page->cp_child != NULL)                                \
845                 __page = __page->cp_child;                              \
846         do {                                                            \
847                 list_for_each_entry_reverse(__scan, &__page->cp_layers, \
848                                             cpl_linkage) {              \
849                         __method = *(void **)((char *)__scan->cpl_ops + \
850                                               __op);                    \
851                         if (__method != NULL)                           \
852                                 (*__method)(__env, __scan,              \
853                                             ## __VA_ARGS__);            \
854                 }                                                       \
855                 __page = __page->cp_parent;                             \
856         } while (__page != NULL);                                       \
857 } while (0)
858
859 static int cl_page_invoke(const struct lu_env *env,
860                           struct cl_io *io, struct cl_page *page, ptrdiff_t op)
861
862 {
863         PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
864         ENTRY;
865         RETURN(CL_PAGE_INVOKE(env, page, op,
866                               (const struct lu_env *,
867                                const struct cl_page_slice *, struct cl_io *),
868                               io));
869 }
870
871 static void cl_page_invoid(const struct lu_env *env,
872                            struct cl_io *io, struct cl_page *page, ptrdiff_t op)
873
874 {
875         PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
876         ENTRY;
877         CL_PAGE_INVOID(env, page, op,
878                        (const struct lu_env *,
879                         const struct cl_page_slice *, struct cl_io *), io);
880         EXIT;
881 }
882
883 static void cl_page_owner_clear(struct cl_page *page)
884 {
885         ENTRY;
886         for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
887                 if (page->cp_owner != NULL) {
888                         LASSERT(page->cp_owner->ci_owned_nr > 0);
889                         page->cp_owner->ci_owned_nr--;
890                         page->cp_owner = NULL;
891                         page->cp_task = NULL;
892                 }
893         }
894         EXIT;
895 }
896
897 static void cl_page_owner_set(struct cl_page *page)
898 {
899         ENTRY;
900         for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
901                 LASSERT(page->cp_owner != NULL);
902                 page->cp_owner->ci_owned_nr++;
903         }
904         EXIT;
905 }
906
907 void cl_page_disown0(const struct lu_env *env,
908                      struct cl_io *io, struct cl_page *pg)
909 {
910         enum cl_page_state state;
911
912         ENTRY;
913         state = pg->cp_state;
914         PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
915         PINVRNT(env, pg, cl_page_invariant(pg));
916         cl_page_owner_clear(pg);
917
918         if (state == CPS_OWNED)
919                 cl_page_state_set(env, pg, CPS_CACHED);
920         /*
921          * Completion call-backs are executed in the bottom-up order, so that
922          * uppermost layer (llite), responsible for VFS/VM interaction runs
923          * last and can release locks safely.
924          */
925         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_disown),
926                                (const struct lu_env *,
927                                 const struct cl_page_slice *, struct cl_io *),
928                                io);
929         EXIT;
930 }
931
932 /**
933  * returns true, iff page is owned by the given io.
934  */
935 int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
936 {
937         LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
938         ENTRY;
939         RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == io);
940 }
941 EXPORT_SYMBOL(cl_page_is_owned);
942
943 /**
944  * Try to own a page by IO.
945  *
946  * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
947  * into cl_page_state::CPS_OWNED state.
948  *
949  * \pre  !cl_page_is_owned(pg, io)
950  * \post result == 0 iff cl_page_is_owned(pg, io)
951  *
952  * \retval 0   success
953  *
954  * \retval -ve failure, e.g., page was destroyed (and landed in
955  *             cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
956  *             or, page was owned by another thread, or in IO.
957  *
958  * \see cl_page_disown()
959  * \see cl_page_operations::cpo_own()
960  * \see cl_page_own_try()
961  * \see cl_page_own
962  */
963 static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
964                         struct cl_page *pg, int nonblock)
965 {
966         int result;
967
968         PINVRNT(env, pg, !cl_page_is_owned(pg, io));
969
970         ENTRY;
971         pg = cl_page_top(pg);
972         io = cl_io_top(io);
973
974         if (pg->cp_state == CPS_FREEING) {
975                 result = -EAGAIN;
976         } else {
977                 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own),
978                                         (const struct lu_env *,
979                                          const struct cl_page_slice *,
980                                          struct cl_io *, int),
981                                         io, nonblock);
982                 if (result == 0) {
983                         PASSERT(env, pg, pg->cp_owner == NULL);
984                         PASSERT(env, pg, pg->cp_req == NULL);
985                         pg->cp_owner = io;
986                         pg->cp_task  = current;
987                         cl_page_owner_set(pg);
988                         if (pg->cp_state != CPS_FREEING) {
989                                 cl_page_state_set(env, pg, CPS_OWNED);
990                         } else {
991                                 cl_page_disown0(env, io, pg);
992                                 result = -EAGAIN;
993                         }
994                 }
995         }
996         PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
997         RETURN(result);
998 }
999
1000 /**
1001  * Own a page, might be blocked.
1002  *
1003  * \see cl_page_own0()
1004  */
1005 int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
1006 {
1007         return cl_page_own0(env, io, pg, 0);
1008 }
1009 EXPORT_SYMBOL(cl_page_own);
1010
1011 /**
1012  * Nonblock version of cl_page_own().
1013  *
1014  * \see cl_page_own0()
1015  */
1016 int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
1017                     struct cl_page *pg)
1018 {
1019         return cl_page_own0(env, io, pg, 1);
1020 }
1021 EXPORT_SYMBOL(cl_page_own_try);
1022
1023
1024 /**
1025  * Assume page ownership.
1026  *
1027  * Called when page is already locked by the hosting VM.
1028  *
1029  * \pre !cl_page_is_owned(pg, io)
1030  * \post cl_page_is_owned(pg, io)
1031  *
1032  * \see cl_page_operations::cpo_assume()
1033  */
1034 void cl_page_assume(const struct lu_env *env,
1035                     struct cl_io *io, struct cl_page *pg)
1036 {
1037         PASSERT(env, pg, pg->cp_state < CPS_OWNED);
1038         PASSERT(env, pg, pg->cp_owner == NULL);
1039         PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
1040         PINVRNT(env, pg, cl_page_invariant(pg));
1041
1042         ENTRY;
1043         pg = cl_page_top(pg);
1044         io = cl_io_top(io);
1045
1046         cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
1047         pg->cp_owner = io;
1048         pg->cp_task = current;
1049         cl_page_owner_set(pg);
1050         cl_page_state_set(env, pg, CPS_OWNED);
1051         EXIT;
1052 }
1053 EXPORT_SYMBOL(cl_page_assume);
1054
1055 /**
1056  * Releases page ownership without unlocking the page.
1057  *
1058  * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the
1059  * underlying VM page (as VM is supposed to do this itself).
1060  *
1061  * \pre   cl_page_is_owned(pg, io)
1062  * \post !cl_page_is_owned(pg, io)
1063  *
1064  * \see cl_page_assume()
1065  */
1066 void cl_page_unassume(const struct lu_env *env,
1067                       struct cl_io *io, struct cl_page *pg)
1068 {
1069         PINVRNT(env, pg, cl_page_is_owned(pg, io));
1070         PINVRNT(env, pg, cl_page_invariant(pg));
1071
1072         ENTRY;
1073         pg = cl_page_top(pg);
1074         io = cl_io_top(io);
1075         cl_page_owner_clear(pg);
1076         cl_page_state_set(env, pg, CPS_CACHED);
1077         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_unassume),
1078                                (const struct lu_env *,
1079                                 const struct cl_page_slice *, struct cl_io *),
1080                                io);
1081         EXIT;
1082 }
1083 EXPORT_SYMBOL(cl_page_unassume);
1084
1085 /**
1086  * Releases page ownership.
1087  *
1088  * Moves page into cl_page_state::CPS_CACHED.
1089  *
1090  * \pre   cl_page_is_owned(pg, io)
1091  * \post !cl_page_is_owned(pg, io)
1092  *
1093  * \see cl_page_own()
1094  * \see cl_page_operations::cpo_disown()
1095  */
1096 void cl_page_disown(const struct lu_env *env,
1097                     struct cl_io *io, struct cl_page *pg)
1098 {
1099         PINVRNT(env, pg, cl_page_is_owned(pg, io));
1100
1101         ENTRY;
1102         pg = cl_page_top(pg);
1103         io = cl_io_top(io);
1104         cl_page_disown0(env, io, pg);
1105         EXIT;
1106 }
1107 EXPORT_SYMBOL(cl_page_disown);
1108
1109 /**
1110  * Called when page is to be removed from the object, e.g., as a result of
1111  * truncate.
1112  *
1113  * Calls cl_page_operations::cpo_discard() top-to-bottom.
1114  *
1115  * \pre cl_page_is_owned(pg, io)
1116  *
1117  * \see cl_page_operations::cpo_discard()
1118  */
1119 void cl_page_discard(const struct lu_env *env,
1120                      struct cl_io *io, struct cl_page *pg)
1121 {
1122         PINVRNT(env, pg, cl_page_is_owned(pg, io));
1123         PINVRNT(env, pg, cl_page_invariant(pg));
1124
1125         cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
1126 }
1127 EXPORT_SYMBOL(cl_page_discard);
1128
1129 /**
1130  * Version of cl_page_delete() that can be called for not fully constructed
1131  * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
1132  * path. Doesn't check page invariant.
1133  */
1134 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
1135                             int radix)
1136 {
1137         struct cl_page *tmp = pg;
1138         ENTRY;
1139
1140         PASSERT(env, pg, pg == cl_page_top(pg));
1141         PASSERT(env, pg, pg->cp_state != CPS_FREEING);
1142
1143         /*
1144          * Severe all ways to obtain new pointers to @pg.
1145          */
1146         cl_page_owner_clear(pg);
1147
1148         /* 
1149          * unexport the page firstly before freeing it so that
1150          * the page content is considered to be invalid.
1151          * We have to do this because a CPS_FREEING cl_page may
1152          * be NOT under the protection of a cl_lock.
1153          * Afterwards, if this page is found by other threads, then this
1154          * page will be forced to reread.
1155          */
1156         cl_page_export(env, pg, 0);
1157         cl_page_state_set0(env, pg, CPS_FREEING);
1158
1159         if (!radix)
1160                 /*
1161                  * !radix means that @pg is not yet in the radix tree, skip
1162                  * removing it.
1163                  */
1164                 tmp = pg->cp_child;
1165         for (; tmp != NULL; tmp = tmp->cp_child) {
1166                 void                    *value;
1167                 struct cl_object_header *hdr;
1168
1169                 hdr = cl_object_header(tmp->cp_obj);
1170                 spin_lock(&hdr->coh_page_guard);
1171                 value = radix_tree_delete(&hdr->coh_tree, tmp->cp_index);
1172                 PASSERT(env, tmp, value == tmp);
1173                 PASSERT(env, tmp, hdr->coh_pages > 0);
1174                 hdr->coh_pages--;
1175                 spin_unlock(&hdr->coh_page_guard);
1176         }
1177
1178         CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),
1179                        (const struct lu_env *, const struct cl_page_slice *));
1180         EXIT;
1181 }
1182
1183 /**
1184  * Called when a decision is made to throw page out of memory.
1185  *
1186  * Notifies all layers about page destruction by calling
1187  * cl_page_operations::cpo_delete() method top-to-bottom.
1188  *
1189  * Moves page into cl_page_state::CPS_FREEING state (this is the only place
1190  * where transition to this state happens).
1191  *
1192  * Eliminates all venues through which new references to the page can be
1193  * obtained:
1194  *
1195  *     - removes page from the radix trees,
1196  *
1197  *     - breaks linkage from VM page to cl_page.
1198  *
1199  * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
1200  * drain after some time, at which point page will be recycled.
1201  *
1202  * \pre  pg == cl_page_top(pg)
1203  * \pre  VM page is locked
1204  * \post pg->cp_state == CPS_FREEING
1205  *
1206  * \see cl_page_operations::cpo_delete()
1207  */
1208 void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
1209 {
1210         PINVRNT(env, pg, cl_page_invariant(pg));
1211         ENTRY;
1212         cl_page_delete0(env, pg, 1);
1213         EXIT;
1214 }
1215 EXPORT_SYMBOL(cl_page_delete);
1216
1217 /**
1218  * Unmaps page from user virtual memory.
1219  *
1220  * Calls cl_page_operations::cpo_unmap() through all layers top-to-bottom. The
1221  * layer responsible for VM interaction has to unmap page from user space
1222  * virtual memory.
1223  *
1224  * \see cl_page_operations::cpo_unmap()
1225  */
1226 int cl_page_unmap(const struct lu_env *env,
1227                   struct cl_io *io, struct cl_page *pg)
1228 {
1229         PINVRNT(env, pg, cl_page_is_owned(pg, io));
1230         PINVRNT(env, pg, cl_page_invariant(pg));
1231
1232         return cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_unmap));
1233 }
1234 EXPORT_SYMBOL(cl_page_unmap);
1235
1236 /**
1237  * Marks page up-to-date.
1238  *
1239  * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
1240  * layer responsible for VM interaction has to mark/clear page as up-to-date
1241  * by the \a uptodate argument.
1242  *
1243  * \see cl_page_operations::cpo_export()
1244  */
1245 void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
1246 {
1247         PINVRNT(env, pg, cl_page_invariant(pg));
1248         CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_export),
1249                        (const struct lu_env *,
1250                         const struct cl_page_slice *, int), uptodate);
1251 }
1252 EXPORT_SYMBOL(cl_page_export);
1253
1254 /**
1255  * Returns true, iff \a pg is VM locked in a suitable sense by the calling
1256  * thread.
1257  */
1258 int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
1259 {
1260         int result;
1261         const struct cl_page_slice *slice;
1262
1263         ENTRY;
1264         pg = cl_page_top_trusted((struct cl_page *)pg);
1265         slice = container_of(pg->cp_layers.next,
1266                              const struct cl_page_slice, cpl_linkage);
1267         PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL);
1268         /*
1269          * Call ->cpo_is_vmlocked() directly instead of going through
1270          * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
1271          * cl_page_invariant().
1272          */
1273         result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
1274         PASSERT(env, pg, result == -EBUSY || result == -ENODATA);
1275         RETURN(result == -EBUSY);
1276 }
1277 EXPORT_SYMBOL(cl_page_is_vmlocked);
1278
1279 static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
1280 {
1281         ENTRY;
1282         RETURN(crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN);
1283 }
1284
1285 static void cl_page_io_start(const struct lu_env *env,
1286                              struct cl_page *pg, enum cl_req_type crt)
1287 {
1288         /*
1289          * Page is queued for IO, change its state.
1290          */
1291         ENTRY;
1292         cl_page_owner_clear(pg);
1293         cl_page_state_set(env, pg, cl_req_type_state(crt));
1294         EXIT;
1295 }
1296
1297 /**
1298  * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
1299  * called top-to-bottom. Every layer either agrees to submit this page (by
1300  * returning 0), or requests to omit this page (by returning -EALREADY). Layer
1301  * handling interactions with the VM also has to inform VM that page is under
1302  * transfer now.
1303  */
1304 int cl_page_prep(const struct lu_env *env, struct cl_io *io,
1305                  struct cl_page *pg, enum cl_req_type crt)
1306 {
1307         int result;
1308
1309         PINVRNT(env, pg, cl_page_is_owned(pg, io));
1310         PINVRNT(env, pg, cl_page_invariant(pg));
1311         PINVRNT(env, pg, crt < CRT_NR);
1312
1313         /*
1314          * XXX this has to be called bottom-to-top, so that llite can set up
1315          * PG_writeback without risking other layers deciding to skip this
1316          * page.
1317          */
1318         result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
1319         if (result == 0)
1320                 cl_page_io_start(env, pg, crt);
1321
1322         KLASSERT(ergo(crt == CRT_WRITE && pg->cp_type == CPT_CACHEABLE,
1323                       equi(result == 0,
1324                            PageWriteback(cl_page_vmpage(env, pg)))));
1325         CL_PAGE_HEADER(D_TRACE, env, pg, "%i %i\n", crt, result);
1326         return result;
1327 }
1328 EXPORT_SYMBOL(cl_page_prep);
1329
1330 /**
1331  * Notify layers about transfer completion.
1332  *
1333  * Invoked by transfer sub-system (which is a part of osc) to notify layers
1334  * that a transfer, of which this page is a part of has completed.
1335  *
1336  * Completion call-backs are executed in the bottom-up order, so that
1337  * uppermost layer (llite), responsible for the VFS/VM interaction runs last
1338  * and can release locks safely.
1339  *
1340  * \pre  pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
1341  * \post pg->cp_state == CPS_CACHED
1342  *
1343  * \see cl_page_operations::cpo_completion()
1344  */
1345 void cl_page_completion(const struct lu_env *env,
1346                         struct cl_page *pg, enum cl_req_type crt, int ioret)
1347 {
1348         struct cl_sync_io *anchor = pg->cp_sync_io;
1349
1350         PASSERT(env, pg, crt < CRT_NR);
1351         /* cl_page::cp_req already cleared by the caller (osc_completion()) */
1352         PASSERT(env, pg, pg->cp_req == NULL);
1353         PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
1354         PINVRNT(env, pg, cl_page_invariant(pg));
1355
1356         ENTRY;
1357         CL_PAGE_HEADER(D_TRACE, env, pg, "%i %i\n", crt, ioret);
1358         if (crt == CRT_READ && ioret == 0) {
1359                 PASSERT(env, pg, !(pg->cp_flags & CPF_READ_COMPLETED));
1360                 pg->cp_flags |= CPF_READ_COMPLETED;
1361         }
1362
1363         cl_page_state_set(env, pg, CPS_CACHED);
1364         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
1365                                (const struct lu_env *,
1366                                 const struct cl_page_slice *, int), ioret);
1367         if (anchor) {
1368                 LASSERT(pg->cp_sync_io == anchor);
1369                 pg->cp_sync_io = NULL;
1370                 cl_sync_io_note(anchor, ioret);
1371         }
1372
1373         /* Don't assert the page writeback bit here because the lustre file
1374          * may be as a backend of swap space. in this case, the page writeback
1375          * is set by VM, and obvious we shouldn't clear it at all. Fortunately
1376          * this type of pages are all TRANSIENT pages. */
1377         KLASSERT(ergo(pg->cp_type == CPT_CACHEABLE,
1378                       !PageWriteback(cl_page_vmpage(env, pg))));
1379         EXIT;
1380 }
1381 EXPORT_SYMBOL(cl_page_completion);
1382
1383 /**
1384  * Notify layers that transfer formation engine decided to yank this page from
1385  * the cache and to make it a part of a transfer.
1386  *
1387  * \pre  pg->cp_state == CPS_CACHED
1388  * \post pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
1389  *
1390  * \see cl_page_operations::cpo_make_ready()
1391  */
1392 int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
1393                        enum cl_req_type crt)
1394 {
1395         int result;
1396
1397         PINVRNT(env, pg, crt < CRT_NR);
1398
1399         ENTRY;
1400         result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
1401                                 (const struct lu_env *,
1402                                  const struct cl_page_slice *));
1403         if (result == 0) {
1404                 PASSERT(env, pg, pg->cp_state == CPS_CACHED);
1405                 cl_page_io_start(env, pg, crt);
1406         }
1407         CL_PAGE_HEADER(D_TRACE, env, pg, "%i %i\n", crt, result);
1408         RETURN(result);
1409 }
1410 EXPORT_SYMBOL(cl_page_make_ready);
1411
1412 /**
1413  * Notify layers that high level io decided to place this page into a cache
1414  * for future transfer.
1415  *
1416  * The layer implementing transfer engine (osc) has to register this page in
1417  * its queues.
1418  *
1419  * \pre  cl_page_is_owned(pg, io)
1420  * \post ergo(result == 0,
1421  *            pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT)
1422  *
1423  * \see cl_page_operations::cpo_cache_add()
1424  */
1425 int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
1426                       struct cl_page *pg, enum cl_req_type crt)
1427 {
1428         int result;
1429
1430         PINVRNT(env, pg, crt < CRT_NR);
1431         PINVRNT(env, pg, cl_page_is_owned(pg, io));
1432         PINVRNT(env, pg, cl_page_invariant(pg));
1433
1434         ENTRY;
1435         result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_cache_add));
1436         if (result == 0) {
1437                 cl_page_owner_clear(pg);
1438                 cl_page_state_set(env, pg, CPS_CACHED);
1439         }
1440         CL_PAGE_HEADER(D_TRACE, env, pg, "%i %i\n", crt, result);
1441         RETURN(result);
1442 }
1443 EXPORT_SYMBOL(cl_page_cache_add);
1444
1445 /**
1446  * Checks whether page is protected by any extent lock is at least required
1447  * mode.
1448  *
1449  * \return the same as in cl_page_operations::cpo_is_under_lock() method.
1450  * \see cl_page_operations::cpo_is_under_lock()
1451  */
1452 int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
1453                           struct cl_page *page)
1454 {
1455         int rc;
1456
1457         PINVRNT(env, page, cl_page_invariant(page));
1458
1459         ENTRY;
1460         rc = CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_is_under_lock),
1461                             (const struct lu_env *,
1462                              const struct cl_page_slice *, struct cl_io *),
1463                             io);
1464         PASSERT(env, page, rc != 0);
1465         RETURN(rc);
1466 }
1467 EXPORT_SYMBOL(cl_page_is_under_lock);
1468
1469 /**
1470  * Purges all cached pages belonging to the object \a obj.
1471  */
1472 int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
1473 {
1474         struct cl_thread_info   *info;
1475         struct cl_object        *obj = cl_object_top(clobj);
1476         struct cl_io            *io;
1477         struct cl_page_list     *plist;
1478         int                      result;
1479
1480         ENTRY;
1481         info  = cl_env_info(env);
1482         plist = &info->clt_list;
1483         io    = &info->clt_io;
1484
1485         /*
1486          * initialize the io. This is ugly since we never do IO in this
1487          * function, we just make cl_page_list functions happy. -jay
1488          */
1489         io->ci_obj = obj;
1490         result = cl_io_init(env, io, CIT_MISC, obj);
1491         if (result != 0) {
1492                 cl_io_fini(env, io);
1493                 RETURN(io->ci_result);
1494         }
1495
1496         cl_page_list_init(plist);
1497         cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF, plist, 0);
1498         /*
1499          * Since we're purging the pages of an object, we don't care
1500          * the possible outcomes of the following functions.
1501          */
1502         cl_page_list_unmap(env, io, plist);
1503         cl_page_list_discard(env, io, plist);
1504         cl_page_list_disown(env, io, plist);
1505         cl_page_list_fini(env, plist);
1506
1507         cl_io_fini(env, io);
1508         RETURN(result);
1509 }
1510 EXPORT_SYMBOL(cl_pages_prune);
1511
1512 /**
1513  * Tells transfer engine that only part of a page is to be transmitted.
1514  *
1515  * \see cl_page_operations::cpo_clip()
1516  */
1517 void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
1518                   int from, int to)
1519 {
1520         PINVRNT(env, pg, cl_page_invariant(pg));
1521
1522         CL_PAGE_HEADER(D_TRACE, env, pg, "%i %i\n", from, to);
1523         CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip),
1524                        (const struct lu_env *,
1525                         const struct cl_page_slice *,int, int),
1526                        from, to);
1527 }
1528 EXPORT_SYMBOL(cl_page_clip);
1529
1530 /**
1531  * Prints human readable representation of \a pg to the \a f.
1532  */
1533 void cl_page_header_print(const struct lu_env *env, void *cookie,
1534                           lu_printer_t printer, const struct cl_page *pg)
1535 {
1536         (*printer)(env, cookie,
1537                    "page@%p[%d %p:%lu ^%p_%p %d %d %d %p %p %#x]\n",
1538                    pg, atomic_read(&pg->cp_ref), pg->cp_obj,
1539                    pg->cp_index, pg->cp_parent, pg->cp_child,
1540                    pg->cp_state, pg->cp_error, pg->cp_type,
1541                    pg->cp_owner, pg->cp_req, pg->cp_flags);
1542 }
1543 EXPORT_SYMBOL(cl_page_header_print);
1544
1545 /**
1546  * Prints human readable representation of \a pg to the \a f.
1547  */
1548 void cl_page_print(const struct lu_env *env, void *cookie,
1549                    lu_printer_t printer, const struct cl_page *pg)
1550 {
1551         struct cl_page *scan;
1552
1553         for (scan = cl_page_top((struct cl_page *)pg);
1554              scan != NULL; scan = scan->cp_child)
1555                 cl_page_header_print(env, cookie, printer, scan);
1556         CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
1557                        (const struct lu_env *env,
1558                         const struct cl_page_slice *slice,
1559                         void *cookie, lu_printer_t p), cookie, printer);
1560         (*printer)(env, cookie, "end page@%p\n", pg);
1561 }
1562 EXPORT_SYMBOL(cl_page_print);
1563
1564 /**
1565  * Cancel a page which is still in a transfer.
1566  */
1567 int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
1568 {
1569         return CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_cancel),
1570                               (const struct lu_env *,
1571                                const struct cl_page_slice *));
1572 }
1573 EXPORT_SYMBOL(cl_page_cancel);
1574
1575 /**
1576  * Converts a byte offset within object \a obj into a page index.
1577  */
1578 loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
1579 {
1580         /*
1581          * XXX for now.
1582          */
1583         return (loff_t)idx << CFS_PAGE_SHIFT;
1584 }
1585 EXPORT_SYMBOL(cl_offset);
1586
1587 /**
1588  * Converts a page index into a byte offset within object \a obj.
1589  */
1590 pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
1591 {
1592         /*
1593          * XXX for now.
1594          */
1595         return offset >> CFS_PAGE_SHIFT;
1596 }
1597 EXPORT_SYMBOL(cl_index);
1598
1599 int cl_page_size(const struct cl_object *obj)
1600 {
1601         return 1 << CFS_PAGE_SHIFT;
1602 }
1603 EXPORT_SYMBOL(cl_page_size);
1604
1605 /**
1606  * Adds page slice to the compound page.
1607  *
1608  * This is called by cl_object_operations::coo_page_init() methods to add a
1609  * per-layer state to the page. New state is added at the end of
1610  * cl_page::cp_layers list, that is, it is at the bottom of the stack.
1611  *
1612  * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
1613  */
1614 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
1615                        struct cl_object *obj,
1616                        const struct cl_page_operations *ops)
1617 {
1618         ENTRY;
1619         list_add_tail(&slice->cpl_linkage, &page->cp_layers);
1620         slice->cpl_obj  = obj;
1621         slice->cpl_ops  = ops;
1622         slice->cpl_page = page;
1623         EXIT;
1624 }
1625 EXPORT_SYMBOL(cl_page_slice_add);
1626
1627 int  cl_page_init(void)
1628 {
1629         return lu_kmem_init(cl_page_caches);
1630 }
1631
1632 void cl_page_fini(void)
1633 {
1634         lu_kmem_fini(cl_page_caches);
1635 }