Whamcloud - gitweb
b=17167 libcfs: ensure all libcfs exported symbols to have cfs_ prefix
[fs/lustre-release.git] / lustre / obdclass / cl_page.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Client Lustre Page.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_CLASS
42 #ifndef EXPORT_SYMTAB
43 # define EXPORT_SYMTAB
44 #endif
45
46 #include <libcfs/libcfs.h>
47 #include <obd_class.h>
48 #include <obd_support.h>
49 #include <libcfs/list.h>
50
51 #include <cl_object.h>
52 #include "cl_internal.h"
53
54 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
55                             int radix);
56
57 static cfs_mem_cache_t      *cl_page_kmem = NULL;
58
59 static struct lu_kmem_descr cl_page_caches[] = {
60         {
61                 .ckd_cache = &cl_page_kmem,
62                 .ckd_name  = "cl_page_kmem",
63                 .ckd_size  = sizeof (struct cl_page)
64         },
65         {
66                 .ckd_cache = NULL
67         }
68 };
69
70 #ifdef LIBCFS_DEBUG
71 # define PASSERT(env, page, expr)                                       \
72   do {                                                                    \
73           if (unlikely(!(expr))) {                                      \
74                   CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");    \
75                   LASSERT(0);                                           \
76           }                                                             \
77   } while (0)
78 #else /* !LIBCFS_DEBUG */
79 # define PASSERT(env, page, exp) \
80         ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
81 #endif /* !LIBCFS_DEBUG */
82
83 #ifdef INVARIANT_CHECK
84 # define PINVRNT(env, page, expr)                                       \
85   do {                                                                    \
86           if (unlikely(!(expr))) {                                      \
87                   CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");    \
88                   LINVRNT(0);                                           \
89           }                                                             \
90   } while (0)
91 #else /* !INVARIANT_CHECK */
92 # define PINVRNT(env, page, exp) \
93         ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
94 #endif /* !INVARIANT_CHECK */
95
96 /**
97  * Internal version of cl_page_top, it should be called with page referenced,
98  * or coh_page_guard held.
99  */
100 static struct cl_page *cl_page_top_trusted(struct cl_page *page)
101 {
102         LASSERT(cl_is_page(page));
103         while (page->cp_parent != NULL)
104                 page = page->cp_parent;
105         return page;
106 }
107
108 /**
109  * Internal version of cl_page_get().
110  *
111  * This function can be used to obtain initial reference to previously
112  * unreferenced cached object. It can be called only if concurrent page
113  * reclamation is somehow prevented, e.g., by locking page radix-tree
114  * (cl_object_header::hdr->coh_page_guard), or by keeping a lock on a VM page,
115  * associated with \a page.
116  *
117  * Use with care! Not exported.
118  */
119 static void cl_page_get_trust(struct cl_page *page)
120 {
121         LASSERT(cl_is_page(page));
122         /*
123          * Checkless version for trusted users.
124          */
125         if (cfs_atomic_inc_return(&page->cp_ref) == 1)
126                 cfs_atomic_inc(&cl_object_site(page->cp_obj)->cs_pages.cs_busy);
127 }
128
129 /**
130  * Returns a slice within a page, corresponding to the given layer in the
131  * device stack.
132  *
133  * \see cl_lock_at()
134  */
135 static const struct cl_page_slice *
136 cl_page_at_trusted(const struct cl_page *page,
137                    const struct lu_device_type *dtype)
138 {
139         const struct cl_page_slice *slice;
140
141 #ifdef INVARIANT_CHECK
142         struct cl_object_header *ch = cl_object_header(page->cp_obj);
143
144         if (!cfs_atomic_read(&page->cp_ref))
145                 LASSERT_SPIN_LOCKED(&ch->coh_page_guard);
146 #endif
147         ENTRY;
148
149         page = cl_page_top_trusted((struct cl_page *)page);
150         do {
151                 cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
152                         if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
153                                 RETURN(slice);
154                 }
155                 page = page->cp_child;
156         } while (page != NULL);
157         RETURN(NULL);
158 }
159
160 /**
161  * Returns a page with given index in the given object, or NULL if no page is
162  * found. Acquires a reference on \a page.
163  *
164  * Locking: called under cl_object_header::coh_page_guard spin-lock.
165  */
166 struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
167 {
168         struct cl_page *page;
169
170         LASSERT_SPIN_LOCKED(&hdr->coh_page_guard);
171
172         page = radix_tree_lookup(&hdr->coh_tree, index);
173         if (page != NULL) {
174                 LASSERT(cl_is_page(page));
175                 cl_page_get_trust(page);
176         }
177         return page;
178 }
179 EXPORT_SYMBOL(cl_page_lookup);
180
181 /**
182  * Returns a list of pages by a given [start, end] of \a obj.
183  *
184  * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
185  * crucial in the face of [offset, EOF] locks.
186  */
187 void cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
188                          struct cl_io *io, pgoff_t start, pgoff_t end,
189                          struct cl_page_list *queue, int nonblock)
190 {
191         struct cl_object_header *hdr;
192         struct cl_page          *page;
193         struct cl_page         **pvec;
194         const struct cl_page_slice  *slice;
195         const struct lu_device_type *dtype;
196         pgoff_t                  idx;
197         unsigned int             nr;
198         unsigned int             i;
199         unsigned int             j;
200         int                    (*page_own)(const struct lu_env *env,
201                                            struct cl_io *io,
202                                            struct cl_page *pg);
203         ENTRY;
204
205         page_own = nonblock ? cl_page_own_try : cl_page_own;
206
207         idx = start;
208         hdr = cl_object_header(obj);
209         pvec = cl_env_info(env)->clt_pvec;
210         dtype = cl_object_top(obj)->co_lu.lo_dev->ld_type;
211         cfs_spin_lock(&hdr->coh_page_guard);
212         while ((nr = radix_tree_gang_lookup(&hdr->coh_tree, (void **)pvec,
213                                             idx, CLT_PVEC_SIZE)) > 0) {
214                 idx = pvec[nr - 1]->cp_index + 1;
215                 for (i = 0, j = 0; i < nr; ++i) {
216                         page = pvec[i];
217                         PASSERT(env, page, cl_is_page(page));
218                         pvec[i] = NULL;
219                         if (page->cp_index > end)
220                                 break;
221                         if (page->cp_state == CPS_FREEING)
222                                 continue;
223                         if (page->cp_type == CPT_TRANSIENT) {
224                                 /* God, we found a transient page!*/
225                                 continue;
226                         }
227
228                         slice = cl_page_at_trusted(page, dtype);
229                         /*
230                          * Pages for lsm-less file has no underneath sub-page
231                          * for osc, in case of ...
232                          */
233                         PASSERT(env, page, slice != NULL);
234
235                         page = slice->cpl_page;
236                         /*
237                          * Can safely call cl_page_get_trust() under
238                          * radix-tree spin-lock.
239                          *
240                          * XXX not true, because @page is from object another
241                          * than @hdr and protected by different tree lock.
242                          */
243                         cl_page_get_trust(page);
244                         lu_ref_add_atomic(&page->cp_reference,
245                                           "page_list", cfs_current());
246                         pvec[j++] = page;
247                 }
248
249                 /*
250                  * Here a delicate locking dance is performed. Current thread
251                  * holds a reference to a page, but has to own it before it
252                  * can be placed into queue. Owning implies waiting, so
253                  * radix-tree lock is to be released. After a wait one has to
254                  * check that pages weren't truncated (cl_page_own() returns
255                  * error in the latter case).
256                  */
257                 cfs_spin_unlock(&hdr->coh_page_guard);
258                 for (i = 0; i < j; ++i) {
259                         page = pvec[i];
260                         if (page_own(env, io, page) == 0)
261                                 cl_page_list_add(queue, page);
262                         lu_ref_del(&page->cp_reference,
263                                    "page_list", cfs_current());
264                         cl_page_put(env, page);
265                 }
266                 cfs_spin_lock(&hdr->coh_page_guard);
267                 if (nr < CLT_PVEC_SIZE)
268                         break;
269         }
270         cfs_spin_unlock(&hdr->coh_page_guard);
271         EXIT;
272 }
273 EXPORT_SYMBOL(cl_page_gang_lookup);
274
275 static void cl_page_free(const struct lu_env *env, struct cl_page *page)
276 {
277         struct cl_object *obj  = page->cp_obj;
278         struct cl_site   *site = cl_object_site(obj);
279
280         PASSERT(env, page, cl_is_page(page));
281         PASSERT(env, page, cfs_list_empty(&page->cp_batch));
282         PASSERT(env, page, page->cp_owner == NULL);
283         PASSERT(env, page, page->cp_req == NULL);
284         PASSERT(env, page, page->cp_parent == NULL);
285         PASSERT(env, page, page->cp_state == CPS_FREEING);
286
287         ENTRY;
288         cfs_might_sleep();
289         while (!cfs_list_empty(&page->cp_layers)) {
290                 struct cl_page_slice *slice;
291
292                 slice = cfs_list_entry(page->cp_layers.next,
293                                        struct cl_page_slice, cpl_linkage);
294                 cfs_list_del_init(page->cp_layers.next);
295                 slice->cpl_ops->cpo_fini(env, slice);
296         }
297         cfs_atomic_dec(&site->cs_pages.cs_total);
298         cfs_atomic_dec(&site->cs_pages_state[page->cp_state]);
299         lu_object_ref_del_at(&obj->co_lu, page->cp_obj_ref, "cl_page", page);
300         cl_object_put(env, obj);
301         lu_ref_fini(&page->cp_reference);
302         OBD_SLAB_FREE_PTR(page, cl_page_kmem);
303         EXIT;
304 }
305
306 /**
307  * Helper function updating page state. This is the only place in the code
308  * where cl_page::cp_state field is mutated.
309  */
310 static inline void cl_page_state_set_trust(struct cl_page *page,
311                                            enum cl_page_state state)
312 {
313         /* bypass const. */
314         *(enum cl_page_state *)&page->cp_state = state;
315 }
316
317 static int cl_page_alloc(const struct lu_env *env, struct cl_object *o,
318                          pgoff_t ind, struct page *vmpage,
319                          enum cl_page_type type, struct cl_page **out)
320 {
321         struct cl_page          *page;
322         struct cl_page          *err  = NULL;
323         struct lu_object_header *head;
324         struct cl_site          *site = cl_object_site(o);
325         int                      result;
326
327         ENTRY;
328         result = +1;
329         OBD_SLAB_ALLOC_PTR_GFP(page, cl_page_kmem, CFS_ALLOC_IO);
330         if (page != NULL) {
331                 cfs_atomic_set(&page->cp_ref, 1);
332                 page->cp_obj = o;
333                 cl_object_get(o);
334                 page->cp_obj_ref = lu_object_ref_add(&o->co_lu,
335                                                      "cl_page", page);
336                 page->cp_index = ind;
337                 cl_page_state_set_trust(page, CPS_CACHED);
338                 page->cp_type = type;
339                 CFS_INIT_LIST_HEAD(&page->cp_layers);
340                 CFS_INIT_LIST_HEAD(&page->cp_batch);
341                 CFS_INIT_LIST_HEAD(&page->cp_flight);
342                 cfs_mutex_init(&page->cp_mutex);
343                 lu_ref_init(&page->cp_reference);
344                 head = o->co_lu.lo_header;
345                 cfs_list_for_each_entry(o, &head->loh_layers,
346                                         co_lu.lo_linkage) {
347                         if (o->co_ops->coo_page_init != NULL) {
348                                 err = o->co_ops->coo_page_init(env, o,
349                                                                page, vmpage);
350                                 if (err != NULL) {
351                                         cl_page_state_set_trust(page,
352                                                                 CPS_FREEING);
353                                         cl_page_free(env, page);
354                                         page = err;
355                                         break;
356                                 }
357                         }
358                 }
359                 if (err == NULL) {
360                         cfs_atomic_inc(&site->cs_pages.cs_busy);
361                         cfs_atomic_inc(&site->cs_pages.cs_total);
362                         cfs_atomic_inc(&site->cs_pages_state[CPS_CACHED]);
363                         cfs_atomic_inc(&site->cs_pages.cs_created);
364                         result = 0;
365                 }
366         } else
367                 page = ERR_PTR(-ENOMEM);
368         *out = page;
369         RETURN(result);
370 }
371
372 /**
373  * Returns a cl_page with index \a idx at the object \a o, and associated with
374  * the VM page \a vmpage.
375  *
376  * This is the main entry point into the cl_page caching interface. First, a
377  * cache (implemented as a per-object radix tree) is consulted. If page is
378  * found there, it is returned immediately. Otherwise new page is allocated
379  * and returned. In any case, additional reference to page is acquired.
380  *
381  * \see cl_object_find(), cl_lock_find()
382  */
383 static struct cl_page *cl_page_find0(const struct lu_env *env,
384                                      struct cl_object *o,
385                                      pgoff_t idx, struct page *vmpage,
386                                      enum cl_page_type type,
387                                      struct cl_page *parent)
388 {
389         struct cl_page          *page;
390         struct cl_page          *ghost = NULL;
391         struct cl_object_header *hdr;
392         struct cl_site          *site = cl_object_site(o);
393         int err;
394
395         LINVRNT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
396         cfs_might_sleep();
397
398         ENTRY;
399
400         hdr = cl_object_header(o);
401         cfs_atomic_inc(&site->cs_pages.cs_lookup);
402
403         CDEBUG(D_PAGE, "%lu@"DFID" %p %lu %i\n",
404                idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
405         /* fast path. */
406         if (type == CPT_CACHEABLE) {
407                 /*
408                  * cl_vmpage_page() can be called here without any locks as
409                  *
410                  *     - "vmpage" is locked (which prevents ->private from
411                  *       concurrent updates), and
412                  *
413                  *     - "o" cannot be destroyed while current thread holds a
414                  *       reference on it.
415                  */
416                 page = cl_vmpage_page(vmpage, o);
417                 PINVRNT(env, page,
418                         ergo(page != NULL,
419                              cl_page_vmpage(env, page) == vmpage &&
420                              (void *)radix_tree_lookup(&hdr->coh_tree,
421                                                        idx) == page));
422         } else {
423                 cfs_spin_lock(&hdr->coh_page_guard);
424                 page = cl_page_lookup(hdr, idx);
425                 cfs_spin_unlock(&hdr->coh_page_guard);
426         }
427         if (page != NULL) {
428                 cfs_atomic_inc(&site->cs_pages.cs_hit);
429                 RETURN(page);
430         }
431
432         /* allocate and initialize cl_page */
433         err = cl_page_alloc(env, o, idx, vmpage, type, &page);
434         if (err != 0)
435                 RETURN(page);
436         /*
437          * XXX optimization: use radix_tree_preload() here, and change tree
438          * gfp mask to GFP_KERNEL in cl_object_header_init().
439          */
440         cfs_spin_lock(&hdr->coh_page_guard);
441         err = radix_tree_insert(&hdr->coh_tree, idx, page);
442         if (err != 0) {
443                 ghost = page;
444                 /*
445                  * Noted by Jay: a lock on \a vmpage protects cl_page_find()
446                  * from this race, but
447                  *
448                  *     0. it's better to have cl_page interface "locally
449                  *     consistent" so that its correctness can be reasoned
450                  *     about without appealing to the (obscure world of) VM
451                  *     locking.
452                  *
453                  *     1. handling this race allows ->coh_tree to remain
454                  *     consistent even when VM locking is somehow busted,
455                  *     which is very useful during diagnosing and debugging.
456                  */
457                 page = ERR_PTR(err);
458                 if (err == -EEXIST) {
459                         /*
460                          * XXX in case of a lookup for CPT_TRANSIENT page,
461                          * nothing protects a CPT_CACHEABLE page from being
462                          * concurrently moved into CPS_FREEING state.
463                          */
464                         page = cl_page_lookup(hdr, idx);
465                         PASSERT(env, page, page != NULL);
466                         if (page->cp_type == CPT_TRANSIENT &&
467                             type == CPT_CACHEABLE) {
468                                 /* XXX: We should make sure that inode sem
469                                  * keeps being held in the lifetime of
470                                  * transient pages, so it is impossible to
471                                  * have conflicting transient pages.
472                                  */
473                                 cfs_spin_unlock(&hdr->coh_page_guard);
474                                 cl_page_put(env, page);
475                                 cfs_spin_lock(&hdr->coh_page_guard);
476                                 page = ERR_PTR(-EBUSY);
477                         }
478                 }
479         } else {
480                 if (parent) {
481                         LASSERT(page->cp_parent == NULL);
482                         page->cp_parent = parent;
483                         parent->cp_child = page;
484                 }
485                 hdr->coh_pages++;
486         }
487         cfs_spin_unlock(&hdr->coh_page_guard);
488
489         if (unlikely(ghost != NULL)) {
490                 cfs_atomic_dec(&site->cs_pages.cs_busy);
491                 cl_page_delete0(env, ghost, 0);
492                 cl_page_free(env, ghost);
493         }
494         RETURN(page);
495 }
496
497 struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
498                              pgoff_t idx, struct page *vmpage,
499                              enum cl_page_type type)
500 {
501         return cl_page_find0(env, o, idx, vmpage, type, NULL);
502 }
503 EXPORT_SYMBOL(cl_page_find);
504
505
506 struct cl_page *cl_page_find_sub(const struct lu_env *env, struct cl_object *o,
507                                  pgoff_t idx, struct page *vmpage,
508                                  struct cl_page *parent)
509 {
510         return cl_page_find0(env, o, idx, vmpage, parent->cp_type, parent);
511 }
512 EXPORT_SYMBOL(cl_page_find_sub);
513
514 static inline int cl_page_invariant(const struct cl_page *pg)
515 {
516         struct cl_object_header *header;
517         struct cl_page          *parent;
518         struct cl_page          *child;
519         struct cl_io            *owner;
520
521         LASSERT(cl_is_page(pg));
522         /*
523          * Page invariant is protected by a VM lock.
524          */
525         LINVRNT(cl_page_is_vmlocked(NULL, pg));
526
527         header = cl_object_header(pg->cp_obj);
528         parent = pg->cp_parent;
529         child  = pg->cp_child;
530         owner  = pg->cp_owner;
531
532         return cfs_atomic_read(&pg->cp_ref) > 0 &&
533                 ergo(parent != NULL, parent->cp_child == pg) &&
534                 ergo(child != NULL, child->cp_parent == pg) &&
535                 ergo(child != NULL, pg->cp_obj != child->cp_obj) &&
536                 ergo(parent != NULL, pg->cp_obj != parent->cp_obj) &&
537                 ergo(owner != NULL && parent != NULL,
538                      parent->cp_owner == pg->cp_owner->ci_parent) &&
539                 ergo(owner != NULL && child != NULL,
540                      child->cp_owner->ci_parent == owner) &&
541                 /*
542                  * Either page is early in initialization (has neither child
543                  * nor parent yet), or it is in the object radix tree.
544                  */
545                 ergo(pg->cp_state < CPS_FREEING,
546                      (void *)radix_tree_lookup(&header->coh_tree,
547                                                pg->cp_index) == pg ||
548                      (child == NULL && parent == NULL));
549 }
550
551 static void cl_page_state_set0(const struct lu_env *env,
552                                struct cl_page *page, enum cl_page_state state)
553 {
554         enum cl_page_state old;
555         struct cl_site *site = cl_object_site(page->cp_obj);
556
557         /*
558          * Matrix of allowed state transitions [old][new], for sanity
559          * checking.
560          */
561         static const int allowed_transitions[CPS_NR][CPS_NR] = {
562                 [CPS_CACHED] = {
563                         [CPS_CACHED]  = 0,
564                         [CPS_OWNED]   = 1, /* io finds existing cached page */
565                         [CPS_PAGEIN]  = 0,
566                         [CPS_PAGEOUT] = 1, /* write-out from the cache */
567                         [CPS_FREEING] = 1, /* eviction on the memory pressure */
568                 },
569                 [CPS_OWNED] = {
570                         [CPS_CACHED]  = 1, /* release to the cache */
571                         [CPS_OWNED]   = 0,
572                         [CPS_PAGEIN]  = 1, /* start read immediately */
573                         [CPS_PAGEOUT] = 1, /* start write immediately */
574                         [CPS_FREEING] = 1, /* lock invalidation or truncate */
575                 },
576                 [CPS_PAGEIN] = {
577                         [CPS_CACHED]  = 1, /* io completion */
578                         [CPS_OWNED]   = 0,
579                         [CPS_PAGEIN]  = 0,
580                         [CPS_PAGEOUT] = 0,
581                         [CPS_FREEING] = 0,
582                 },
583                 [CPS_PAGEOUT] = {
584                         [CPS_CACHED]  = 1, /* io completion */
585                         [CPS_OWNED]   = 0,
586                         [CPS_PAGEIN]  = 0,
587                         [CPS_PAGEOUT] = 0,
588                         [CPS_FREEING] = 0,
589                 },
590                 [CPS_FREEING] = {
591                         [CPS_CACHED]  = 0,
592                         [CPS_OWNED]   = 0,
593                         [CPS_PAGEIN]  = 0,
594                         [CPS_PAGEOUT] = 0,
595                         [CPS_FREEING] = 0,
596                 }
597         };
598
599         ENTRY;
600         old = page->cp_state;
601         PASSERT(env, page, allowed_transitions[old][state]);
602         CL_PAGE_HEADER(D_TRACE, env, page, "%i -> %i\n", old, state);
603         for (; page != NULL; page = page->cp_child) {
604                 PASSERT(env, page, page->cp_state == old);
605                 PASSERT(env, page,
606                         equi(state == CPS_OWNED, page->cp_owner != NULL));
607
608                 cfs_atomic_dec(&site->cs_pages_state[page->cp_state]);
609                 cfs_atomic_inc(&site->cs_pages_state[state]);
610                 cl_page_state_set_trust(page, state);
611         }
612         EXIT;
613 }
614
615 static void cl_page_state_set(const struct lu_env *env,
616                               struct cl_page *page, enum cl_page_state state)
617 {
618         PINVRNT(env, page, cl_page_invariant(page));
619         cl_page_state_set0(env, page, state);
620 }
621
622 /**
623  * Acquires an additional reference to a page.
624  *
625  * This can be called only by caller already possessing a reference to \a
626  * page.
627  *
628  * \see cl_object_get(), cl_lock_get().
629  */
630 void cl_page_get(struct cl_page *page)
631 {
632         ENTRY;
633         LASSERT(page->cp_state != CPS_FREEING);
634         cl_page_get_trust(page);
635         EXIT;
636 }
637 EXPORT_SYMBOL(cl_page_get);
638
639 /**
640  * Releases a reference to a page.
641  *
642  * When last reference is released, page is returned to the cache, unless it
643  * is in cl_page_state::CPS_FREEING state, in which case it is immediately
644  * destroyed.
645  *
646  * \see cl_object_put(), cl_lock_put().
647  */
648 void cl_page_put(const struct lu_env *env, struct cl_page *page)
649 {
650         struct cl_object_header *hdr;
651         struct cl_site *site = cl_object_site(page->cp_obj);
652
653         PASSERT(env, page, cfs_atomic_read(&page->cp_ref) > !!page->cp_parent);
654
655         ENTRY;
656         CL_PAGE_HEADER(D_TRACE, env, page, "%i\n",
657                        cfs_atomic_read(&page->cp_ref));
658
659         hdr = cl_object_header(cl_object_top(page->cp_obj));
660         if (cfs_atomic_dec_and_lock(&page->cp_ref, &hdr->coh_page_guard)) {
661                 cfs_atomic_dec(&site->cs_pages.cs_busy);
662                 /* We're going to access the page w/o a reference, but it's
663                  * ok because we have grabbed the lock coh_page_guard, which
664                  * means nobody is able to free this page behind us.
665                  */
666                 if (page->cp_state == CPS_FREEING) {
667                         /* We drop the page reference and check the page state
668                          * inside the coh_page_guard. So that if it gets here,
669                          * it is the REALLY last reference to this page.
670                          */
671                         cfs_spin_unlock(&hdr->coh_page_guard);
672
673                         LASSERT(cfs_atomic_read(&page->cp_ref) == 0);
674                         PASSERT(env, page, page->cp_owner == NULL);
675                         PASSERT(env, page, cfs_list_empty(&page->cp_batch));
676                         /*
677                          * Page is no longer reachable by other threads. Tear
678                          * it down.
679                          */
680                         cl_page_free(env, page);
681
682                         EXIT;
683                         return;
684                 }
685                 cfs_spin_unlock(&hdr->coh_page_guard);
686         }
687
688         EXIT;
689 }
690 EXPORT_SYMBOL(cl_page_put);
691
692 /**
693  * Returns a VM page associated with a given cl_page.
694  */
695 cfs_page_t *cl_page_vmpage(const struct lu_env *env, struct cl_page *page)
696 {
697         const struct cl_page_slice *slice;
698
699         /*
700          * Find uppermost layer with ->cpo_vmpage() method, and return its
701          * result.
702          */
703         page = cl_page_top(page);
704         do {
705                 cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
706                         if (slice->cpl_ops->cpo_vmpage != NULL)
707                                 RETURN(slice->cpl_ops->cpo_vmpage(env, slice));
708                 }
709                 page = page->cp_child;
710         } while (page != NULL);
711         LBUG(); /* ->cpo_vmpage() has to be defined somewhere in the stack */
712 }
713 EXPORT_SYMBOL(cl_page_vmpage);
714
715 /**
716  * Returns a cl_page associated with a VM page, and given cl_object.
717  */
718 struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
719 {
720         struct cl_page *page;
721         struct cl_object_header *hdr;
722
723         ENTRY;
724         KLASSERT(PageLocked(vmpage));
725
726         /*
727          * NOTE: absence of races and liveness of data are guaranteed by page
728          *       lock on a "vmpage". That works because object destruction has
729          *       bottom-to-top pass.
730          */
731
732         /*
733          * This loop assumes that ->private points to the top-most page. This
734          * can be rectified easily.
735          */
736         hdr = cl_object_header(cl_object_top(obj));
737         cfs_spin_lock(&hdr->coh_page_guard);
738         for (page = (void *)vmpage->private;
739              page != NULL; page = page->cp_child) {
740                 if (cl_object_same(page->cp_obj, obj)) {
741                         cl_page_get_trust(page);
742                         break;
743                 }
744         }
745         cfs_spin_unlock(&hdr->coh_page_guard);
746         LASSERT(ergo(page, cl_is_page(page) && page->cp_type == CPT_CACHEABLE));
747         RETURN(page);
748 }
749 EXPORT_SYMBOL(cl_vmpage_page);
750
751 /**
752  * Returns the top-page for a given page.
753  *
754  * \see cl_object_top(), cl_io_top()
755  */
756 struct cl_page *cl_page_top(struct cl_page *page)
757 {
758         return cl_page_top_trusted(page);
759 }
760 EXPORT_SYMBOL(cl_page_top);
761
762 /**
763  * Returns true if \a addr is an address of an allocated cl_page. Used in
764  * assertions. This check is optimistically imprecise, i.e., it occasionally
765  * returns true for the incorrect addresses, but if it returns false, then the
766  * address is guaranteed to be incorrect. (Should be named cl_pagep().)
767  *
768  * \see cl_is_lock()
769  */
770 int cl_is_page(const void *addr)
771 {
772         return cfs_mem_is_in_cache(addr, cl_page_kmem);
773 }
774 EXPORT_SYMBOL(cl_is_page);
775
776 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
777                                        const struct lu_device_type *dtype)
778 {
779         return cl_page_at_trusted(page, dtype);
780 }
781 EXPORT_SYMBOL(cl_page_at);
782
783 #define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
784
785 #define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...)                   \
786 ({                                                                      \
787         const struct lu_env        *__env  = (_env);                    \
788         struct cl_page             *__page = (_page);                   \
789         const struct cl_page_slice *__scan;                             \
790         int                         __result;                           \
791         ptrdiff_t                   __op   = (_op);                     \
792         int                       (*__method)_proto;                    \
793                                                                         \
794         __result = 0;                                                   \
795         __page = cl_page_top(__page);                                   \
796         do {                                                            \
797                 cfs_list_for_each_entry(__scan, &__page->cp_layers,     \
798                                         cpl_linkage) {                  \
799                         __method = *(void **)((char *)__scan->cpl_ops + \
800                                               __op);                    \
801                         if (__method != NULL) {                         \
802                                 __result = (*__method)(__env, __scan,   \
803                                                        ## __VA_ARGS__); \
804                                 if (__result != 0)                      \
805                                         break;                          \
806                         }                                               \
807                 }                                                       \
808                 __page = __page->cp_child;                              \
809         } while (__page != NULL && __result == 0);                      \
810         if (__result > 0)                                               \
811                 __result = 0;                                           \
812         __result;                                                       \
813 })
814
815 #define CL_PAGE_INVOID(_env, _page, _op, _proto, ...)                   \
816 do {                                                                    \
817         const struct lu_env        *__env  = (_env);                    \
818         struct cl_page             *__page = (_page);                   \
819         const struct cl_page_slice *__scan;                             \
820         ptrdiff_t                   __op   = (_op);                     \
821         void                      (*__method)_proto;                    \
822                                                                         \
823         __page = cl_page_top(__page);                                   \
824         do {                                                            \
825                 cfs_list_for_each_entry(__scan, &__page->cp_layers,     \
826                                         cpl_linkage) {                  \
827                         __method = *(void **)((char *)__scan->cpl_ops + \
828                                               __op);                    \
829                         if (__method != NULL)                           \
830                                 (*__method)(__env, __scan,              \
831                                             ## __VA_ARGS__);            \
832                 }                                                       \
833                 __page = __page->cp_child;                              \
834         } while (__page != NULL);                                       \
835 } while (0)
836
837 #define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...)               \
838 do {                                                                        \
839         const struct lu_env        *__env  = (_env);                        \
840         struct cl_page             *__page = (_page);                       \
841         const struct cl_page_slice *__scan;                                 \
842         ptrdiff_t                   __op   = (_op);                         \
843         void                      (*__method)_proto;                        \
844                                                                             \
845         /* get to the bottom page. */                                       \
846         while (__page->cp_child != NULL)                                    \
847                 __page = __page->cp_child;                                  \
848         do {                                                                \
849                 cfs_list_for_each_entry_reverse(__scan, &__page->cp_layers, \
850                                                 cpl_linkage) {              \
851                         __method = *(void **)((char *)__scan->cpl_ops +     \
852                                               __op);                        \
853                         if (__method != NULL)                               \
854                                 (*__method)(__env, __scan,                  \
855                                             ## __VA_ARGS__);                \
856                 }                                                           \
857                 __page = __page->cp_parent;                                 \
858         } while (__page != NULL);                                           \
859 } while (0)
860
861 static int cl_page_invoke(const struct lu_env *env,
862                           struct cl_io *io, struct cl_page *page, ptrdiff_t op)
863
864 {
865         PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
866         ENTRY;
867         RETURN(CL_PAGE_INVOKE(env, page, op,
868                               (const struct lu_env *,
869                                const struct cl_page_slice *, struct cl_io *),
870                               io));
871 }
872
873 static void cl_page_invoid(const struct lu_env *env,
874                            struct cl_io *io, struct cl_page *page, ptrdiff_t op)
875
876 {
877         PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
878         ENTRY;
879         CL_PAGE_INVOID(env, page, op,
880                        (const struct lu_env *,
881                         const struct cl_page_slice *, struct cl_io *), io);
882         EXIT;
883 }
884
885 static void cl_page_owner_clear(struct cl_page *page)
886 {
887         ENTRY;
888         for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
889                 if (page->cp_owner != NULL) {
890                         LASSERT(page->cp_owner->ci_owned_nr > 0);
891                         page->cp_owner->ci_owned_nr--;
892                         page->cp_owner = NULL;
893                         page->cp_task = NULL;
894                 }
895         }
896         EXIT;
897 }
898
899 static void cl_page_owner_set(struct cl_page *page)
900 {
901         ENTRY;
902         for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
903                 LASSERT(page->cp_owner != NULL);
904                 page->cp_owner->ci_owned_nr++;
905         }
906         EXIT;
907 }
908
909 void cl_page_disown0(const struct lu_env *env,
910                      struct cl_io *io, struct cl_page *pg)
911 {
912         enum cl_page_state state;
913
914         ENTRY;
915         state = pg->cp_state;
916         PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
917         PINVRNT(env, pg, cl_page_invariant(pg));
918         cl_page_owner_clear(pg);
919
920         if (state == CPS_OWNED)
921                 cl_page_state_set(env, pg, CPS_CACHED);
922         /*
923          * Completion call-backs are executed in the bottom-up order, so that
924          * uppermost layer (llite), responsible for VFS/VM interaction runs
925          * last and can release locks safely.
926          */
927         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_disown),
928                                (const struct lu_env *,
929                                 const struct cl_page_slice *, struct cl_io *),
930                                io);
931         EXIT;
932 }
933
934 /**
935  * returns true, iff page is owned by the given io.
936  */
937 int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
938 {
939         LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
940         ENTRY;
941         RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == io);
942 }
943 EXPORT_SYMBOL(cl_page_is_owned);
944
945 /**
946  * Try to own a page by IO.
947  *
948  * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
949  * into cl_page_state::CPS_OWNED state.
950  *
951  * \pre  !cl_page_is_owned(pg, io)
952  * \post result == 0 iff cl_page_is_owned(pg, io)
953  *
954  * \retval 0   success
955  *
956  * \retval -ve failure, e.g., page was destroyed (and landed in
957  *             cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
958  *             or, page was owned by another thread, or in IO.
959  *
960  * \see cl_page_disown()
961  * \see cl_page_operations::cpo_own()
962  * \see cl_page_own_try()
963  * \see cl_page_own
964  */
965 static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
966                         struct cl_page *pg, int nonblock)
967 {
968         int result;
969
970         PINVRNT(env, pg, !cl_page_is_owned(pg, io));
971
972         ENTRY;
973         pg = cl_page_top(pg);
974         io = cl_io_top(io);
975
976         if (pg->cp_state == CPS_FREEING) {
977                 result = -EAGAIN;
978         } else {
979                 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own),
980                                         (const struct lu_env *,
981                                          const struct cl_page_slice *,
982                                          struct cl_io *, int),
983                                         io, nonblock);
984                 if (result == 0) {
985                         PASSERT(env, pg, pg->cp_owner == NULL);
986                         PASSERT(env, pg, pg->cp_req == NULL);
987                         pg->cp_owner = io;
988                         pg->cp_task  = current;
989                         cl_page_owner_set(pg);
990                         if (pg->cp_state != CPS_FREEING) {
991                                 cl_page_state_set(env, pg, CPS_OWNED);
992                         } else {
993                                 cl_page_disown0(env, io, pg);
994                                 result = -EAGAIN;
995                         }
996                 }
997         }
998         PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
999         RETURN(result);
1000 }
1001
1002 /**
1003  * Own a page, might be blocked.
1004  *
1005  * \see cl_page_own0()
1006  */
1007 int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
1008 {
1009         return cl_page_own0(env, io, pg, 0);
1010 }
1011 EXPORT_SYMBOL(cl_page_own);
1012
1013 /**
1014  * Nonblock version of cl_page_own().
1015  *
1016  * \see cl_page_own0()
1017  */
1018 int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
1019                     struct cl_page *pg)
1020 {
1021         return cl_page_own0(env, io, pg, 1);
1022 }
1023 EXPORT_SYMBOL(cl_page_own_try);
1024
1025
1026 /**
1027  * Assume page ownership.
1028  *
1029  * Called when page is already locked by the hosting VM.
1030  *
1031  * \pre !cl_page_is_owned(pg, io)
1032  * \post cl_page_is_owned(pg, io)
1033  *
1034  * \see cl_page_operations::cpo_assume()
1035  */
1036 void cl_page_assume(const struct lu_env *env,
1037                     struct cl_io *io, struct cl_page *pg)
1038 {
1039         PASSERT(env, pg, pg->cp_state < CPS_OWNED);
1040         PASSERT(env, pg, pg->cp_owner == NULL);
1041         PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
1042         PINVRNT(env, pg, cl_page_invariant(pg));
1043
1044         ENTRY;
1045         pg = cl_page_top(pg);
1046         io = cl_io_top(io);
1047
1048         cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
1049         pg->cp_owner = io;
1050         pg->cp_task = current;
1051         cl_page_owner_set(pg);
1052         cl_page_state_set(env, pg, CPS_OWNED);
1053         EXIT;
1054 }
1055 EXPORT_SYMBOL(cl_page_assume);
1056
1057 /**
1058  * Releases page ownership without unlocking the page.
1059  *
1060  * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the
1061  * underlying VM page (as VM is supposed to do this itself).
1062  *
1063  * \pre   cl_page_is_owned(pg, io)
1064  * \post !cl_page_is_owned(pg, io)
1065  *
1066  * \see cl_page_assume()
1067  */
1068 void cl_page_unassume(const struct lu_env *env,
1069                       struct cl_io *io, struct cl_page *pg)
1070 {
1071         PINVRNT(env, pg, cl_page_is_owned(pg, io));
1072         PINVRNT(env, pg, cl_page_invariant(pg));
1073
1074         ENTRY;
1075         pg = cl_page_top(pg);
1076         io = cl_io_top(io);
1077         cl_page_owner_clear(pg);
1078         cl_page_state_set(env, pg, CPS_CACHED);
1079         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_unassume),
1080                                (const struct lu_env *,
1081                                 const struct cl_page_slice *, struct cl_io *),
1082                                io);
1083         EXIT;
1084 }
1085 EXPORT_SYMBOL(cl_page_unassume);
1086
1087 /**
1088  * Releases page ownership.
1089  *
1090  * Moves page into cl_page_state::CPS_CACHED.
1091  *
1092  * \pre   cl_page_is_owned(pg, io)
1093  * \post !cl_page_is_owned(pg, io)
1094  *
1095  * \see cl_page_own()
1096  * \see cl_page_operations::cpo_disown()
1097  */
1098 void cl_page_disown(const struct lu_env *env,
1099                     struct cl_io *io, struct cl_page *pg)
1100 {
1101         PINVRNT(env, pg, cl_page_is_owned(pg, io));
1102
1103         ENTRY;
1104         pg = cl_page_top(pg);
1105         io = cl_io_top(io);
1106         cl_page_disown0(env, io, pg);
1107         EXIT;
1108 }
1109 EXPORT_SYMBOL(cl_page_disown);
1110
1111 /**
1112  * Called when page is to be removed from the object, e.g., as a result of
1113  * truncate.
1114  *
1115  * Calls cl_page_operations::cpo_discard() top-to-bottom.
1116  *
1117  * \pre cl_page_is_owned(pg, io)
1118  *
1119  * \see cl_page_operations::cpo_discard()
1120  */
1121 void cl_page_discard(const struct lu_env *env,
1122                      struct cl_io *io, struct cl_page *pg)
1123 {
1124         PINVRNT(env, pg, cl_page_is_owned(pg, io));
1125         PINVRNT(env, pg, cl_page_invariant(pg));
1126
1127         cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
1128 }
1129 EXPORT_SYMBOL(cl_page_discard);
1130
1131 /**
1132  * Version of cl_page_delete() that can be called for not fully constructed
1133  * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
1134  * path. Doesn't check page invariant.
1135  */
1136 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
1137                             int radix)
1138 {
1139         struct cl_page *tmp = pg;
1140         ENTRY;
1141
1142         PASSERT(env, pg, pg == cl_page_top(pg));
1143         PASSERT(env, pg, pg->cp_state != CPS_FREEING);
1144
1145         /*
1146          * Severe all ways to obtain new pointers to @pg.
1147          */
1148         cl_page_owner_clear(pg);
1149
1150         /* 
1151          * unexport the page firstly before freeing it so that
1152          * the page content is considered to be invalid.
1153          * We have to do this because a CPS_FREEING cl_page may
1154          * be NOT under the protection of a cl_lock.
1155          * Afterwards, if this page is found by other threads, then this
1156          * page will be forced to reread.
1157          */
1158         cl_page_export(env, pg, 0);
1159         cl_page_state_set0(env, pg, CPS_FREEING);
1160
1161         if (!radix)
1162                 /*
1163                  * !radix means that @pg is not yet in the radix tree, skip
1164                  * removing it.
1165                  */
1166                 tmp = pg->cp_child;
1167         for (; tmp != NULL; tmp = tmp->cp_child) {
1168                 void                    *value;
1169                 struct cl_object_header *hdr;
1170
1171                 hdr = cl_object_header(tmp->cp_obj);
1172                 cfs_spin_lock(&hdr->coh_page_guard);
1173                 value = radix_tree_delete(&hdr->coh_tree, tmp->cp_index);
1174                 PASSERT(env, tmp, value == tmp);
1175                 PASSERT(env, tmp, hdr->coh_pages > 0);
1176                 hdr->coh_pages--;
1177                 cfs_spin_unlock(&hdr->coh_page_guard);
1178         }
1179
1180         CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),
1181                        (const struct lu_env *, const struct cl_page_slice *));
1182         EXIT;
1183 }
1184
1185 /**
1186  * Called when a decision is made to throw page out of memory.
1187  *
1188  * Notifies all layers about page destruction by calling
1189  * cl_page_operations::cpo_delete() method top-to-bottom.
1190  *
1191  * Moves page into cl_page_state::CPS_FREEING state (this is the only place
1192  * where transition to this state happens).
1193  *
1194  * Eliminates all venues through which new references to the page can be
1195  * obtained:
1196  *
1197  *     - removes page from the radix trees,
1198  *
1199  *     - breaks linkage from VM page to cl_page.
1200  *
1201  * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
1202  * drain after some time, at which point page will be recycled.
1203  *
1204  * \pre  pg == cl_page_top(pg)
1205  * \pre  VM page is locked
1206  * \post pg->cp_state == CPS_FREEING
1207  *
1208  * \see cl_page_operations::cpo_delete()
1209  */
1210 void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
1211 {
1212         PINVRNT(env, pg, cl_page_invariant(pg));
1213         ENTRY;
1214         cl_page_delete0(env, pg, 1);
1215         EXIT;
1216 }
1217 EXPORT_SYMBOL(cl_page_delete);
1218
1219 /**
1220  * Unmaps page from user virtual memory.
1221  *
1222  * Calls cl_page_operations::cpo_unmap() through all layers top-to-bottom. The
1223  * layer responsible for VM interaction has to unmap page from user space
1224  * virtual memory.
1225  *
1226  * \see cl_page_operations::cpo_unmap()
1227  */
1228 int cl_page_unmap(const struct lu_env *env,
1229                   struct cl_io *io, struct cl_page *pg)
1230 {
1231         PINVRNT(env, pg, cl_page_is_owned(pg, io));
1232         PINVRNT(env, pg, cl_page_invariant(pg));
1233
1234         return cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_unmap));
1235 }
1236 EXPORT_SYMBOL(cl_page_unmap);
1237
1238 /**
1239  * Marks page up-to-date.
1240  *
1241  * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
1242  * layer responsible for VM interaction has to mark/clear page as up-to-date
1243  * by the \a uptodate argument.
1244  *
1245  * \see cl_page_operations::cpo_export()
1246  */
1247 void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
1248 {
1249         PINVRNT(env, pg, cl_page_invariant(pg));
1250         CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_export),
1251                        (const struct lu_env *,
1252                         const struct cl_page_slice *, int), uptodate);
1253 }
1254 EXPORT_SYMBOL(cl_page_export);
1255
1256 /**
1257  * Returns true, iff \a pg is VM locked in a suitable sense by the calling
1258  * thread.
1259  */
1260 int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
1261 {
1262         int result;
1263         const struct cl_page_slice *slice;
1264
1265         ENTRY;
1266         pg = cl_page_top_trusted((struct cl_page *)pg);
1267         slice = container_of(pg->cp_layers.next,
1268                              const struct cl_page_slice, cpl_linkage);
1269         PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL);
1270         /*
1271          * Call ->cpo_is_vmlocked() directly instead of going through
1272          * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
1273          * cl_page_invariant().
1274          */
1275         result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
1276         PASSERT(env, pg, result == -EBUSY || result == -ENODATA);
1277         RETURN(result == -EBUSY);
1278 }
1279 EXPORT_SYMBOL(cl_page_is_vmlocked);
1280
1281 static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
1282 {
1283         ENTRY;
1284         RETURN(crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN);
1285 }
1286
1287 static void cl_page_io_start(const struct lu_env *env,
1288                              struct cl_page *pg, enum cl_req_type crt)
1289 {
1290         /*
1291          * Page is queued for IO, change its state.
1292          */
1293         ENTRY;
1294         cl_page_owner_clear(pg);
1295         cl_page_state_set(env, pg, cl_req_type_state(crt));
1296         EXIT;
1297 }
1298
1299 /**
1300  * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
1301  * called top-to-bottom. Every layer either agrees to submit this page (by
1302  * returning 0), or requests to omit this page (by returning -EALREADY). Layer
1303  * handling interactions with the VM also has to inform VM that page is under
1304  * transfer now.
1305  */
1306 int cl_page_prep(const struct lu_env *env, struct cl_io *io,
1307                  struct cl_page *pg, enum cl_req_type crt)
1308 {
1309         int result;
1310
1311         PINVRNT(env, pg, cl_page_is_owned(pg, io));
1312         PINVRNT(env, pg, cl_page_invariant(pg));
1313         PINVRNT(env, pg, crt < CRT_NR);
1314
1315         /*
1316          * XXX this has to be called bottom-to-top, so that llite can set up
1317          * PG_writeback without risking other layers deciding to skip this
1318          * page.
1319          */
1320         result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
1321         if (result == 0)
1322                 cl_page_io_start(env, pg, crt);
1323
1324         KLASSERT(ergo(crt == CRT_WRITE && pg->cp_type == CPT_CACHEABLE,
1325                       equi(result == 0,
1326                            PageWriteback(cl_page_vmpage(env, pg)))));
1327         CL_PAGE_HEADER(D_TRACE, env, pg, "%i %i\n", crt, result);
1328         return result;
1329 }
1330 EXPORT_SYMBOL(cl_page_prep);
1331
1332 /**
1333  * Notify layers about transfer completion.
1334  *
1335  * Invoked by transfer sub-system (which is a part of osc) to notify layers
1336  * that a transfer, of which this page is a part of has completed.
1337  *
1338  * Completion call-backs are executed in the bottom-up order, so that
1339  * uppermost layer (llite), responsible for the VFS/VM interaction runs last
1340  * and can release locks safely.
1341  *
1342  * \pre  pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
1343  * \post pg->cp_state == CPS_CACHED
1344  *
1345  * \see cl_page_operations::cpo_completion()
1346  */
1347 void cl_page_completion(const struct lu_env *env,
1348                         struct cl_page *pg, enum cl_req_type crt, int ioret)
1349 {
1350         struct cl_sync_io *anchor = pg->cp_sync_io;
1351
1352         PASSERT(env, pg, crt < CRT_NR);
1353         /* cl_page::cp_req already cleared by the caller (osc_completion()) */
1354         PASSERT(env, pg, pg->cp_req == NULL);
1355         PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
1356         PINVRNT(env, pg, cl_page_invariant(pg));
1357
1358         ENTRY;
1359         CL_PAGE_HEADER(D_TRACE, env, pg, "%i %i\n", crt, ioret);
1360         if (crt == CRT_READ && ioret == 0) {
1361                 PASSERT(env, pg, !(pg->cp_flags & CPF_READ_COMPLETED));
1362                 pg->cp_flags |= CPF_READ_COMPLETED;
1363         }
1364
1365         cl_page_state_set(env, pg, CPS_CACHED);
1366         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
1367                                (const struct lu_env *,
1368                                 const struct cl_page_slice *, int), ioret);
1369         if (anchor) {
1370                 LASSERT(pg->cp_sync_io == anchor);
1371                 pg->cp_sync_io = NULL;
1372                 cl_sync_io_note(anchor, ioret);
1373         }
1374
1375         /* Don't assert the page writeback bit here because the lustre file
1376          * may be as a backend of swap space. in this case, the page writeback
1377          * is set by VM, and obvious we shouldn't clear it at all. Fortunately
1378          * this type of pages are all TRANSIENT pages. */
1379         KLASSERT(ergo(pg->cp_type == CPT_CACHEABLE,
1380                       !PageWriteback(cl_page_vmpage(env, pg))));
1381         EXIT;
1382 }
1383 EXPORT_SYMBOL(cl_page_completion);
1384
1385 /**
1386  * Notify layers that transfer formation engine decided to yank this page from
1387  * the cache and to make it a part of a transfer.
1388  *
1389  * \pre  pg->cp_state == CPS_CACHED
1390  * \post pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
1391  *
1392  * \see cl_page_operations::cpo_make_ready()
1393  */
1394 int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
1395                        enum cl_req_type crt)
1396 {
1397         int result;
1398
1399         PINVRNT(env, pg, crt < CRT_NR);
1400
1401         ENTRY;
1402         result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
1403                                 (const struct lu_env *,
1404                                  const struct cl_page_slice *));
1405         if (result == 0) {
1406                 PASSERT(env, pg, pg->cp_state == CPS_CACHED);
1407                 cl_page_io_start(env, pg, crt);
1408         }
1409         CL_PAGE_HEADER(D_TRACE, env, pg, "%i %i\n", crt, result);
1410         RETURN(result);
1411 }
1412 EXPORT_SYMBOL(cl_page_make_ready);
1413
1414 /**
1415  * Notify layers that high level io decided to place this page into a cache
1416  * for future transfer.
1417  *
1418  * The layer implementing transfer engine (osc) has to register this page in
1419  * its queues.
1420  *
1421  * \pre  cl_page_is_owned(pg, io)
1422  * \post ergo(result == 0,
1423  *            pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT)
1424  *
1425  * \see cl_page_operations::cpo_cache_add()
1426  */
1427 int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
1428                       struct cl_page *pg, enum cl_req_type crt)
1429 {
1430         int result;
1431
1432         PINVRNT(env, pg, crt < CRT_NR);
1433         PINVRNT(env, pg, cl_page_is_owned(pg, io));
1434         PINVRNT(env, pg, cl_page_invariant(pg));
1435
1436         ENTRY;
1437         result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_cache_add));
1438         if (result == 0) {
1439                 cl_page_owner_clear(pg);
1440                 cl_page_state_set(env, pg, CPS_CACHED);
1441         }
1442         CL_PAGE_HEADER(D_TRACE, env, pg, "%i %i\n", crt, result);
1443         RETURN(result);
1444 }
1445 EXPORT_SYMBOL(cl_page_cache_add);
1446
1447 /**
1448  * Checks whether page is protected by any extent lock is at least required
1449  * mode.
1450  *
1451  * \return the same as in cl_page_operations::cpo_is_under_lock() method.
1452  * \see cl_page_operations::cpo_is_under_lock()
1453  */
1454 int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
1455                           struct cl_page *page)
1456 {
1457         int rc;
1458
1459         PINVRNT(env, page, cl_page_invariant(page));
1460
1461         ENTRY;
1462         rc = CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_is_under_lock),
1463                             (const struct lu_env *,
1464                              const struct cl_page_slice *, struct cl_io *),
1465                             io);
1466         PASSERT(env, page, rc != 0);
1467         RETURN(rc);
1468 }
1469 EXPORT_SYMBOL(cl_page_is_under_lock);
1470
1471 /**
1472  * Purges all cached pages belonging to the object \a obj.
1473  */
1474 int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
1475 {
1476         struct cl_thread_info   *info;
1477         struct cl_object        *obj = cl_object_top(clobj);
1478         struct cl_io            *io;
1479         struct cl_page_list     *plist;
1480         int                      result;
1481
1482         ENTRY;
1483         info  = cl_env_info(env);
1484         plist = &info->clt_list;
1485         io    = &info->clt_io;
1486
1487         /*
1488          * initialize the io. This is ugly since we never do IO in this
1489          * function, we just make cl_page_list functions happy. -jay
1490          */
1491         io->ci_obj = obj;
1492         result = cl_io_init(env, io, CIT_MISC, obj);
1493         if (result != 0) {
1494                 cl_io_fini(env, io);
1495                 RETURN(io->ci_result);
1496         }
1497
1498         cl_page_list_init(plist);
1499         cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF, plist, 0);
1500         /*
1501          * Since we're purging the pages of an object, we don't care
1502          * the possible outcomes of the following functions.
1503          */
1504         cl_page_list_unmap(env, io, plist);
1505         cl_page_list_discard(env, io, plist);
1506         cl_page_list_disown(env, io, plist);
1507         cl_page_list_fini(env, plist);
1508
1509         cl_io_fini(env, io);
1510         RETURN(result);
1511 }
1512 EXPORT_SYMBOL(cl_pages_prune);
1513
1514 /**
1515  * Tells transfer engine that only part of a page is to be transmitted.
1516  *
1517  * \see cl_page_operations::cpo_clip()
1518  */
1519 void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
1520                   int from, int to)
1521 {
1522         PINVRNT(env, pg, cl_page_invariant(pg));
1523
1524         CL_PAGE_HEADER(D_TRACE, env, pg, "%i %i\n", from, to);
1525         CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip),
1526                        (const struct lu_env *,
1527                         const struct cl_page_slice *,int, int),
1528                        from, to);
1529 }
1530 EXPORT_SYMBOL(cl_page_clip);
1531
1532 /**
1533  * Prints human readable representation of \a pg to the \a f.
1534  */
1535 void cl_page_header_print(const struct lu_env *env, void *cookie,
1536                           lu_printer_t printer, const struct cl_page *pg)
1537 {
1538         (*printer)(env, cookie,
1539                    "page@%p[%d %p:%lu ^%p_%p %d %d %d %p %p %#x]\n",
1540                    pg, cfs_atomic_read(&pg->cp_ref), pg->cp_obj,
1541                    pg->cp_index, pg->cp_parent, pg->cp_child,
1542                    pg->cp_state, pg->cp_error, pg->cp_type,
1543                    pg->cp_owner, pg->cp_req, pg->cp_flags);
1544 }
1545 EXPORT_SYMBOL(cl_page_header_print);
1546
1547 /**
1548  * Prints human readable representation of \a pg to the \a f.
1549  */
1550 void cl_page_print(const struct lu_env *env, void *cookie,
1551                    lu_printer_t printer, const struct cl_page *pg)
1552 {
1553         struct cl_page *scan;
1554
1555         for (scan = cl_page_top((struct cl_page *)pg);
1556              scan != NULL; scan = scan->cp_child)
1557                 cl_page_header_print(env, cookie, printer, scan);
1558         CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
1559                        (const struct lu_env *env,
1560                         const struct cl_page_slice *slice,
1561                         void *cookie, lu_printer_t p), cookie, printer);
1562         (*printer)(env, cookie, "end page@%p\n", pg);
1563 }
1564 EXPORT_SYMBOL(cl_page_print);
1565
1566 /**
1567  * Cancel a page which is still in a transfer.
1568  */
1569 int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
1570 {
1571         return CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_cancel),
1572                               (const struct lu_env *,
1573                                const struct cl_page_slice *));
1574 }
1575 EXPORT_SYMBOL(cl_page_cancel);
1576
1577 /**
1578  * Converts a byte offset within object \a obj into a page index.
1579  */
1580 loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
1581 {
1582         /*
1583          * XXX for now.
1584          */
1585         return (loff_t)idx << CFS_PAGE_SHIFT;
1586 }
1587 EXPORT_SYMBOL(cl_offset);
1588
1589 /**
1590  * Converts a page index into a byte offset within object \a obj.
1591  */
1592 pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
1593 {
1594         /*
1595          * XXX for now.
1596          */
1597         return offset >> CFS_PAGE_SHIFT;
1598 }
1599 EXPORT_SYMBOL(cl_index);
1600
1601 int cl_page_size(const struct cl_object *obj)
1602 {
1603         return 1 << CFS_PAGE_SHIFT;
1604 }
1605 EXPORT_SYMBOL(cl_page_size);
1606
1607 /**
1608  * Adds page slice to the compound page.
1609  *
1610  * This is called by cl_object_operations::coo_page_init() methods to add a
1611  * per-layer state to the page. New state is added at the end of
1612  * cl_page::cp_layers list, that is, it is at the bottom of the stack.
1613  *
1614  * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
1615  */
1616 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
1617                        struct cl_object *obj,
1618                        const struct cl_page_operations *ops)
1619 {
1620         ENTRY;
1621         cfs_list_add_tail(&slice->cpl_linkage, &page->cp_layers);
1622         slice->cpl_obj  = obj;
1623         slice->cpl_ops  = ops;
1624         slice->cpl_page = page;
1625         EXIT;
1626 }
1627 EXPORT_SYMBOL(cl_page_slice_add);
1628
1629 int  cl_page_init(void)
1630 {
1631         return lu_kmem_init(cl_page_caches);
1632 }
1633
1634 void cl_page_fini(void)
1635 {
1636         lu_kmem_fini(cl_page_caches);
1637 }