Whamcloud - gitweb
LU-10994 clio: remove vvp_page_print()
[fs/lustre-release.git] / lustre / obdclass / cl_page.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * Client Lustre Page.
32  *
33  *   Author: Nikita Danilov <nikita.danilov@sun.com>
34  *   Author: Jinshan Xiong <jinshan.xiong@intel.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_CLASS
38
39 #include <linux/list.h>
40 #include <libcfs/libcfs.h>
41 #include <obd_class.h>
42 #include <obd_support.h>
43
44 #include <cl_object.h>
45 #include "cl_internal.h"
46
47 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
48 static DEFINE_MUTEX(cl_page_kmem_mutex);
49
50 #ifdef LIBCFS_DEBUG
51 # define PASSERT(env, page, expr)                                       \
52   do {                                                                    \
53           if (unlikely(!(expr))) {                                      \
54                   CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");    \
55                   LASSERT(0);                                           \
56           }                                                             \
57   } while (0)
58 #else /* !LIBCFS_DEBUG */
59 # define PASSERT(env, page, exp) \
60         ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
61 #endif /* !LIBCFS_DEBUG */
62
63 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
64 # define PINVRNT(env, page, expr)                                       \
65   do {                                                                    \
66           if (unlikely(!(expr))) {                                      \
67                   CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");    \
68                   LINVRNT(0);                                           \
69           }                                                             \
70   } while (0)
71 #else /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
72 # define PINVRNT(env, page, exp) \
73          ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
74 #endif /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
75
76 /* Disable page statistic by default due to huge performance penalty. */
77 static void cs_page_inc(const struct cl_object *obj,
78                         enum cache_stats_item item)
79 {
80 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
81         atomic_inc(&cl_object_site(obj)->cs_pages.cs_stats[item]);
82 #endif
83 }
84
85 static void cs_page_dec(const struct cl_object *obj,
86                         enum cache_stats_item item)
87 {
88 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
89         atomic_dec(&cl_object_site(obj)->cs_pages.cs_stats[item]);
90 #endif
91 }
92
93 static void cs_pagestate_inc(const struct cl_object *obj,
94                              enum cl_page_state state)
95 {
96 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
97         atomic_inc(&cl_object_site(obj)->cs_pages_state[state]);
98 #endif
99 }
100
101 static void cs_pagestate_dec(const struct cl_object *obj,
102                               enum cl_page_state state)
103 {
104 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
105         atomic_dec(&cl_object_site(obj)->cs_pages_state[state]);
106 #endif
107 }
108
109 /**
110  * Internal version of cl_page_get().
111  *
112  * This function can be used to obtain initial reference to previously
113  * unreferenced cached object. It can be called only if concurrent page
114  * reclamation is somehow prevented, e.g., by keeping a lock on a VM page,
115  * associated with \a page.
116  *
117  * Use with care! Not exported.
118  */
119 static void cl_page_get_trust(struct cl_page *page)
120 {
121         LASSERT(atomic_read(&page->cp_ref) > 0);
122         atomic_inc(&page->cp_ref);
123 }
124
125 static struct cl_page_slice *
126 cl_page_slice_get(const struct cl_page *cl_page, int index)
127 {
128         if (index < 0 || index >= cl_page->cp_layer_count)
129                 return NULL;
130
131         /* To get the cp_layer_offset values fit under 256 bytes, we
132          * use the offset beyond the end of struct cl_page.
133          */
134         return (struct cl_page_slice *)((char *)cl_page + sizeof(*cl_page) +
135                                         cl_page->cp_layer_offset[index]);
136 }
137
138 #define cl_page_slice_for_each(cl_page, slice, i)               \
139         for (i = 0, slice = cl_page_slice_get(cl_page, 0);      \
140              i < (cl_page)->cp_layer_count;                     \
141              slice = cl_page_slice_get(cl_page, ++i))
142
143 #define cl_page_slice_for_each_reverse(cl_page, slice, i)       \
144         for (i = (cl_page)->cp_layer_count - 1,                 \
145              slice = cl_page_slice_get(cl_page, i); i >= 0;     \
146              slice = cl_page_slice_get(cl_page, --i))
147
148 /**
149  * Returns a slice within a cl_page, corresponding to the given layer in the
150  * device stack.
151  *
152  * \see cl_lock_at()
153  */
154 static const struct cl_page_slice *
155 cl_page_at_trusted(const struct cl_page *cl_page,
156                    const struct lu_device_type *dtype)
157 {
158         const struct cl_page_slice *slice;
159         int i;
160
161         ENTRY;
162
163         cl_page_slice_for_each(cl_page, slice, i) {
164                 if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
165                         RETURN(slice);
166         }
167
168         RETURN(NULL);
169 }
170
171 static void __cl_page_free(struct cl_page *cl_page, unsigned short bufsize)
172 {
173         int index = cl_page->cp_kmem_index;
174
175         if (index >= 0) {
176                 LASSERT(index < ARRAY_SIZE(cl_page_kmem_array));
177                 LASSERT(cl_page_kmem_size_array[index] == bufsize);
178                 OBD_SLAB_FREE(cl_page, cl_page_kmem_array[index], bufsize);
179         } else {
180                 OBD_FREE(cl_page, bufsize);
181         }
182 }
183
184 static void cl_page_free(const struct lu_env *env, struct cl_page *cp,
185                          struct pagevec *pvec)
186 {
187         struct cl_object *obj  = cp->cp_obj;
188         unsigned short bufsize = cl_object_header(obj)->coh_page_bufsize;
189         struct page *vmpage;
190
191         ENTRY;
192         PASSERT(env, cp, list_empty(&cp->cp_batch));
193         PASSERT(env, cp, cp->cp_owner == NULL);
194         PASSERT(env, cp, cp->cp_state == CPS_FREEING);
195
196         if (cp->cp_type == CPT_CACHEABLE) {
197                 /* vmpage->private was already cleared when page was
198                  * moved into CPS_FREEING state. */
199                 vmpage = cp->cp_vmpage;
200                 LASSERT(vmpage != NULL);
201                 LASSERT((struct cl_page *)vmpage->private != cp);
202
203                 if (pvec != NULL) {
204                         if (!pagevec_add(pvec, vmpage))
205                                 pagevec_release(pvec);
206                 } else {
207                         put_page(vmpage);
208                 }
209         }
210
211         cp->cp_layer_count = 0;
212         cs_page_dec(obj, CS_total);
213         cs_pagestate_dec(obj, cp->cp_state);
214         lu_object_ref_del_at(&obj->co_lu, &cp->cp_obj_ref, "cl_page", cp);
215         if (cp->cp_type != CPT_TRANSIENT)
216                 cl_object_put(env, obj);
217         lu_ref_fini(&cp->cp_reference);
218         __cl_page_free(cp, bufsize);
219         EXIT;
220 }
221
222 static struct cl_page *__cl_page_alloc(struct cl_object *o)
223 {
224         int i = 0;
225         struct cl_page *cl_page = NULL;
226         unsigned short bufsize = cl_object_header(o)->coh_page_bufsize;
227
228         if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_PAGE_ALLOC))
229                 return NULL;
230
231 check:
232         /* the number of entries in cl_page_kmem_array is expected to
233          * only be 2-3 entries, so the lookup overhead should be low.
234          */
235         for ( ; i < ARRAY_SIZE(cl_page_kmem_array); i++) {
236                 if (smp_load_acquire(&cl_page_kmem_size_array[i])
237                     == bufsize) {
238                         OBD_SLAB_ALLOC_GFP(cl_page, cl_page_kmem_array[i],
239                                            bufsize, GFP_NOFS);
240                         if (cl_page)
241                                 cl_page->cp_kmem_index = i;
242                         return cl_page;
243                 }
244                 if (cl_page_kmem_size_array[i] == 0)
245                         break;
246         }
247
248         if (i < ARRAY_SIZE(cl_page_kmem_array)) {
249                 char cache_name[32];
250
251                 mutex_lock(&cl_page_kmem_mutex);
252                 if (cl_page_kmem_size_array[i]) {
253                         mutex_unlock(&cl_page_kmem_mutex);
254                         goto check;
255                 }
256                 snprintf(cache_name, sizeof(cache_name),
257                          "cl_page_kmem-%u", bufsize);
258                 cl_page_kmem_array[i] =
259                         kmem_cache_create(cache_name, bufsize,
260                                           0, 0, NULL);
261                 if (cl_page_kmem_array[i] == NULL) {
262                         mutex_unlock(&cl_page_kmem_mutex);
263                         return NULL;
264                 }
265                 smp_store_release(&cl_page_kmem_size_array[i],
266                                   bufsize);
267                 mutex_unlock(&cl_page_kmem_mutex);
268                 goto check;
269         } else {
270                 OBD_ALLOC_GFP(cl_page, bufsize, GFP_NOFS);
271                 if (cl_page)
272                         cl_page->cp_kmem_index = -1;
273         }
274
275         return cl_page;
276 }
277
278 struct cl_page *cl_page_alloc(const struct lu_env *env, struct cl_object *o,
279                               pgoff_t ind, struct page *vmpage,
280                               enum cl_page_type type)
281 {
282         struct cl_page *cl_page;
283         struct cl_object *head;
284
285         ENTRY;
286
287         cl_page = __cl_page_alloc(o);
288         if (cl_page != NULL) {
289                 int result = 0;
290
291                 /*
292                  * Please fix cl_page:cp_state/type declaration if
293                  * these assertions fail in the future.
294                  */
295                 BUILD_BUG_ON((1 << CP_STATE_BITS) < CPS_NR); /* cp_state */
296                 BUILD_BUG_ON((1 << CP_TYPE_BITS) < CPT_NR); /* cp_type */
297                 atomic_set(&cl_page->cp_ref, 1);
298                 cl_page->cp_obj = o;
299                 if (type != CPT_TRANSIENT)
300                         cl_object_get(o);
301                 lu_object_ref_add_at(&o->co_lu, &cl_page->cp_obj_ref,
302                                      "cl_page", cl_page);
303                 cl_page->cp_vmpage = vmpage;
304                 cl_page->cp_state = CPS_CACHED;
305                 cl_page->cp_type = type;
306                 if (type == CPT_TRANSIENT)
307                         /* ref to correct inode will be added
308                          * in ll_direct_rw_pages
309                          */
310                         cl_page->cp_inode = NULL;
311                 else
312                         cl_page->cp_inode = page2inode(vmpage);
313                 INIT_LIST_HEAD(&cl_page->cp_batch);
314                 lu_ref_init(&cl_page->cp_reference);
315                 head = o;
316                 cl_page->cp_page_index = ind;
317                 cl_object_for_each(o, head) {
318                         if (o->co_ops->coo_page_init != NULL) {
319                                 result = o->co_ops->coo_page_init(env, o,
320                                                         cl_page, ind);
321                                 if (result != 0) {
322                                         cl_page_delete0(env, cl_page);
323                                         cl_page_free(env, cl_page, NULL);
324                                         cl_page = ERR_PTR(result);
325                                         break;
326                                 }
327                         }
328                 }
329                 if (result == 0) {
330                         cs_page_inc(o, CS_total);
331                         cs_page_inc(o, CS_create);
332                         cs_pagestate_dec(o, CPS_CACHED);
333                 }
334         } else {
335                 cl_page = ERR_PTR(-ENOMEM);
336         }
337         RETURN(cl_page);
338 }
339
340 /**
341  * Returns a cl_page with index \a idx at the object \a o, and associated with
342  * the VM page \a vmpage.
343  *
344  * This is the main entry point into the cl_page caching interface. First, a
345  * cache (implemented as a per-object radix tree) is consulted. If page is
346  * found there, it is returned immediately. Otherwise new page is allocated
347  * and returned. In any case, additional reference to page is acquired.
348  *
349  * \see cl_object_find(), cl_lock_find()
350  */
351 struct cl_page *cl_page_find(const struct lu_env *env,
352                              struct cl_object *o,
353                              pgoff_t idx, struct page *vmpage,
354                              enum cl_page_type type)
355 {
356         struct cl_page          *page = NULL;
357         struct cl_object_header *hdr;
358
359         LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
360         might_sleep();
361
362         ENTRY;
363
364         hdr = cl_object_header(o);
365         cs_page_inc(o, CS_lookup);
366
367         CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
368                idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
369         /* fast path. */
370         if (type == CPT_CACHEABLE) {
371                 /* vmpage lock is used to protect the child/parent
372                  * relationship */
373                 LASSERT(PageLocked(vmpage));
374                 /*
375                  * cl_vmpage_page() can be called here without any locks as
376                  *
377                  *     - "vmpage" is locked (which prevents ->private from
378                  *       concurrent updates), and
379                  *
380                  *     - "o" cannot be destroyed while current thread holds a
381                  *       reference on it.
382                  */
383                 page = cl_vmpage_page(vmpage, o);
384                 if (page != NULL) {
385                         cs_page_inc(o, CS_hit);
386                         RETURN(page);
387                 }
388         }
389
390         /* allocate and initialize cl_page */
391         page = cl_page_alloc(env, o, idx, vmpage, type);
392         RETURN(page);
393 }
394 EXPORT_SYMBOL(cl_page_find);
395
396 static inline int cl_page_invariant(const struct cl_page *pg)
397 {
398         return cl_page_in_use_noref(pg);
399 }
400
401 static void cl_page_state_set0(const struct lu_env *env,
402                                struct cl_page *cl_page,
403                                enum cl_page_state state)
404 {
405         enum cl_page_state old;
406
407         /*
408          * Matrix of allowed state transitions [old][new], for sanity
409          * checking.
410          */
411         static const int allowed_transitions[CPS_NR][CPS_NR] = {
412                 [CPS_CACHED] = {
413                         [CPS_CACHED]  = 0,
414                         [CPS_OWNED]   = 1, /* io finds existing cached page */
415                         [CPS_PAGEIN]  = 0,
416                         [CPS_PAGEOUT] = 1, /* write-out from the cache */
417                         [CPS_FREEING] = 1, /* eviction on the memory pressure */
418                 },
419                 [CPS_OWNED] = {
420                         [CPS_CACHED]  = 1, /* release to the cache */
421                         [CPS_OWNED]   = 0,
422                         [CPS_PAGEIN]  = 1, /* start read immediately */
423                         [CPS_PAGEOUT] = 1, /* start write immediately */
424                         [CPS_FREEING] = 1, /* lock invalidation or truncate */
425                 },
426                 [CPS_PAGEIN] = {
427                         [CPS_CACHED]  = 1, /* io completion */
428                         [CPS_OWNED]   = 0,
429                         [CPS_PAGEIN]  = 0,
430                         [CPS_PAGEOUT] = 0,
431                         [CPS_FREEING] = 0,
432                 },
433                 [CPS_PAGEOUT] = {
434                         [CPS_CACHED]  = 1, /* io completion */
435                         [CPS_OWNED]   = 0,
436                         [CPS_PAGEIN]  = 0,
437                         [CPS_PAGEOUT] = 0,
438                         [CPS_FREEING] = 0,
439                 },
440                 [CPS_FREEING] = {
441                         [CPS_CACHED]  = 0,
442                         [CPS_OWNED]   = 0,
443                         [CPS_PAGEIN]  = 0,
444                         [CPS_PAGEOUT] = 0,
445                         [CPS_FREEING] = 0,
446                 }
447         };
448
449         ENTRY;
450         old = cl_page->cp_state;
451         PASSERT(env, cl_page, allowed_transitions[old][state]);
452         CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d -> %d\n", old, state);
453         PASSERT(env, cl_page, cl_page->cp_state == old);
454         PASSERT(env, cl_page, equi(state == CPS_OWNED,
455                                    cl_page->cp_owner != NULL));
456
457         cs_pagestate_dec(cl_page->cp_obj, cl_page->cp_state);
458         cs_pagestate_inc(cl_page->cp_obj, state);
459         cl_page->cp_state = state;
460         EXIT;
461 }
462
463 static void cl_page_state_set(const struct lu_env *env,
464                               struct cl_page *page, enum cl_page_state state)
465 {
466         cl_page_state_set0(env, page, state);
467 }
468
469 /**
470  * Acquires an additional reference to a page.
471  *
472  * This can be called only by caller already possessing a reference to \a
473  * page.
474  *
475  * \see cl_object_get(), cl_lock_get().
476  */
477 void cl_page_get(struct cl_page *page)
478 {
479         ENTRY;
480         cl_page_get_trust(page);
481         EXIT;
482 }
483 EXPORT_SYMBOL(cl_page_get);
484
485 /**
486  * Releases a reference to a page, use the pagevec to release the pages
487  * in batch if provided.
488  *
489  * Users need to do a final pagevec_release() to release any trailing pages.
490  */
491 void cl_pagevec_put(const struct lu_env *env, struct cl_page *page,
492                   struct pagevec *pvec)
493 {
494         ENTRY;
495         CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
496                        atomic_read(&page->cp_ref));
497
498         if (atomic_dec_and_test(&page->cp_ref)) {
499                 LASSERT(page->cp_state == CPS_FREEING);
500
501                 LASSERT(atomic_read(&page->cp_ref) == 0);
502                 PASSERT(env, page, page->cp_owner == NULL);
503                 PASSERT(env, page, list_empty(&page->cp_batch));
504                 /*
505                  * Page is no longer reachable by other threads. Tear
506                  * it down.
507                  */
508                 cl_page_free(env, page, pvec);
509         }
510
511         EXIT;
512 }
513 EXPORT_SYMBOL(cl_pagevec_put);
514
515 /**
516  * Releases a reference to a page, wrapper to cl_pagevec_put
517  *
518  * When last reference is released, page is returned to the cache, unless it
519  * is in cl_page_state::CPS_FREEING state, in which case it is immediately
520  * destroyed.
521  *
522  * \see cl_object_put(), cl_lock_put().
523  */
524 void cl_page_put(const struct lu_env *env, struct cl_page *page)
525 {
526         cl_pagevec_put(env, page, NULL);
527 }
528 EXPORT_SYMBOL(cl_page_put);
529
530 /**
531  * Returns a cl_page associated with a VM page, and given cl_object.
532  */
533 struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
534 {
535         struct cl_page *page;
536
537         ENTRY;
538         LASSERT(PageLocked(vmpage));
539
540         /*
541          * NOTE: absence of races and liveness of data are guaranteed by page
542          *       lock on a "vmpage". That works because object destruction has
543          *       bottom-to-top pass.
544          */
545
546         page = (struct cl_page *)vmpage->private;
547         if (page != NULL) {
548                 cl_page_get_trust(page);
549                 LASSERT(page->cp_type == CPT_CACHEABLE);
550         }
551         RETURN(page);
552 }
553 EXPORT_SYMBOL(cl_vmpage_page);
554
555 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
556                                        const struct lu_device_type *dtype)
557 {
558         return cl_page_at_trusted(page, dtype);
559 }
560 EXPORT_SYMBOL(cl_page_at);
561
562 static void cl_page_owner_clear(struct cl_page *page)
563 {
564         ENTRY;
565         if (page->cp_owner != NULL) {
566                 LASSERT(page->cp_owner->ci_owned_nr > 0);
567                 page->cp_owner->ci_owned_nr--;
568                 page->cp_owner = NULL;
569         }
570         EXIT;
571 }
572
573 static void cl_page_owner_set(struct cl_page *page)
574 {
575         ENTRY;
576         LASSERT(page->cp_owner != NULL);
577         page->cp_owner->ci_owned_nr++;
578         EXIT;
579 }
580
581 void cl_page_disown0(const struct lu_env *env, struct cl_page *cp)
582 {
583         struct page *vmpage;
584         enum cl_page_state state;
585
586         ENTRY;
587         state = cp->cp_state;
588         PINVRNT(env, cp, state == CPS_OWNED || state == CPS_FREEING);
589         PINVRNT(env, cp, cl_page_invariant(cp) || state == CPS_FREEING);
590         cl_page_owner_clear(cp);
591
592         if (state == CPS_OWNED)
593                 cl_page_state_set(env, cp, CPS_CACHED);
594
595         if (cp->cp_type == CPT_CACHEABLE) {
596                 vmpage = cp->cp_vmpage;
597                 LASSERT(vmpage != NULL);
598                 LASSERT(PageLocked(vmpage));
599                 unlock_page(vmpage);
600         }
601
602         EXIT;
603 }
604
605 /**
606  * returns true, iff page is owned by the given io.
607  */
608 int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
609 {
610         struct cl_io *top = cl_io_top((struct cl_io *)io);
611         LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
612         ENTRY;
613         RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == top);
614 }
615 EXPORT_SYMBOL(cl_page_is_owned);
616
617 /**
618  * Try to own a page by IO.
619  *
620  * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
621  * into cl_page_state::CPS_OWNED state.
622  *
623  * \pre  !cl_page_is_owned(cl_page, io)
624  * \post result == 0 iff cl_page_is_owned(cl_page, io)
625  *
626  * \retval 0   success
627  *
628  * \retval -ve failure, e.g., cl_page was destroyed (and landed in
629  *             cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
630  *             or, page was owned by another thread, or in IO.
631  *
632  * \see cl_page_disown()
633  * \see cl_page_own_try()
634  * \see cl_page_own
635  */
636 static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
637                         struct cl_page *cl_page, int nonblock)
638 {
639         struct page *vmpage = cl_page->cp_vmpage;
640         int result;
641
642         ENTRY;
643         PINVRNT(env, cl_page, !cl_page_is_owned(cl_page, io));
644
645         if (cl_page->cp_state == CPS_FREEING) {
646                 result = -ENOENT;
647                 goto out;
648         }
649
650         LASSERT(vmpage != NULL);
651
652         if (cl_page->cp_type == CPT_TRANSIENT) {
653                 /* OK */
654         } else if (nonblock) {
655                 if (!trylock_page(vmpage)) {
656                         result = -EAGAIN;
657                         goto out;
658                 }
659
660                 if (unlikely(PageWriteback(vmpage))) {
661                         unlock_page(vmpage);
662                         result = -EAGAIN;
663                         goto out;
664                 }
665         } else {
666                 lock_page(vmpage);
667                 wait_on_page_writeback(vmpage);
668         }
669
670         PASSERT(env, cl_page, cl_page->cp_owner == NULL);
671         cl_page->cp_owner = cl_io_top(io);
672         cl_page_owner_set(cl_page);
673
674         if (cl_page->cp_state == CPS_FREEING) {
675                 cl_page_disown0(env, cl_page);
676                 result = -ENOENT;
677                 goto out;
678         }
679
680         cl_page_state_set(env, cl_page, CPS_OWNED);
681         result = 0;
682 out:
683         PINVRNT(env, cl_page, ergo(result == 0,
684                 cl_page_invariant(cl_page)));
685         RETURN(result);
686 }
687
688 /**
689  * Own a page, might be blocked.
690  *
691  * \see cl_page_own0()
692  */
693 int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
694 {
695         return cl_page_own0(env, io, pg, 0);
696 }
697 EXPORT_SYMBOL(cl_page_own);
698
699 /**
700  * Nonblock version of cl_page_own().
701  *
702  * \see cl_page_own0()
703  */
704 int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
705                     struct cl_page *pg)
706 {
707         return cl_page_own0(env, io, pg, 1);
708 }
709 EXPORT_SYMBOL(cl_page_own_try);
710
711
712 /**
713  * Assume page ownership.
714  *
715  * Called when page is already locked by the hosting VM.
716  *
717  * \pre !cl_page_is_owned(cp, io)
718  * \post cl_page_is_owned(cp, io)
719  */
720 void cl_page_assume(const struct lu_env *env,
721                     struct cl_io *io, struct cl_page *cp)
722 {
723         struct page *vmpage;
724
725         ENTRY;
726         PINVRNT(env, cp, cl_object_same(cp->cp_obj, io->ci_obj));
727
728         if (cp->cp_type == CPT_CACHEABLE) {
729                 vmpage = cp->cp_vmpage;
730                 LASSERT(vmpage != NULL);
731                 LASSERT(PageLocked(vmpage));
732                 wait_on_page_writeback(vmpage);
733         }
734
735         PASSERT(env, cp, cp->cp_owner == NULL);
736         cp->cp_owner = cl_io_top(io);
737         cl_page_owner_set(cp);
738         cl_page_state_set(env, cp, CPS_OWNED);
739         EXIT;
740 }
741 EXPORT_SYMBOL(cl_page_assume);
742
743 /**
744  * Releases page ownership without unlocking the page.
745  *
746  * Moves cl_page into cl_page_state::CPS_CACHED without releasing a lock
747  * on the underlying VM page (as VM is supposed to do this itself).
748  *
749  * \pre   cl_page_is_owned(cp, io)
750  * \post !cl_page_is_owned(cp, io)
751  */
752 void cl_page_unassume(const struct lu_env *env,
753                       struct cl_io *io, struct cl_page *cp)
754 {
755         struct page *vmpage;
756
757         ENTRY;
758         PINVRNT(env, cp, cl_page_is_owned(cp, io));
759         PINVRNT(env, cp, cl_page_invariant(cp));
760
761         cl_page_owner_clear(cp);
762         cl_page_state_set(env, cp, CPS_CACHED);
763
764         if (cp->cp_type == CPT_CACHEABLE) {
765                 vmpage = cp->cp_vmpage;
766                 LASSERT(vmpage != NULL);
767                 LASSERT(PageLocked(vmpage));
768         }
769
770         EXIT;
771 }
772 EXPORT_SYMBOL(cl_page_unassume);
773
774 /**
775  * Releases page ownership.
776  *
777  * Moves page into cl_page_state::CPS_CACHED.
778  *
779  * \pre   cl_page_is_owned(pg, io)
780  * \post !cl_page_is_owned(pg, io)
781  *
782  * \see cl_page_own()
783  */
784 void cl_page_disown(const struct lu_env *env,
785                     struct cl_io *io, struct cl_page *pg)
786 {
787         PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
788                 pg->cp_state == CPS_FREEING);
789
790         cl_page_disown0(env, pg);
791 }
792 EXPORT_SYMBOL(cl_page_disown);
793
794 /**
795  * Called when cl_page is to be removed from the object, e.g.,
796  * as a result of truncate.
797  *
798  * Calls cl_page_operations::cpo_discard() top-to-bottom.
799  *
800  * \pre cl_page_is_owned(cl_page, io)
801  *
802  * \see cl_page_operations::cpo_discard()
803  */
804 void cl_page_discard(const struct lu_env *env,
805                      struct cl_io *io, struct cl_page *cp)
806 {
807         struct page *vmpage;
808         const struct cl_page_slice *slice;
809         int i;
810
811         PINVRNT(env, cp, cl_page_is_owned(cp, io));
812         PINVRNT(env, cp, cl_page_invariant(cp));
813
814         cl_page_slice_for_each(cp, slice, i) {
815                 if (slice->cpl_ops->cpo_discard != NULL)
816                         (*slice->cpl_ops->cpo_discard)(env, slice, io);
817         }
818
819         if (cp->cp_type == CPT_CACHEABLE) {
820                 vmpage = cp->cp_vmpage;
821                 LASSERT(vmpage != NULL);
822                 LASSERT(PageLocked(vmpage));
823                 generic_error_remove_page(vmpage->mapping, vmpage);
824         } else {
825                 cl_page_delete(env, cp);
826         }
827 }
828 EXPORT_SYMBOL(cl_page_discard);
829
830 /**
831  * Version of cl_page_delete() that can be called for not fully constructed
832  * cl_pages, e.g. in an error handling cl_page_find()->cl_page_delete0()
833  * path. Doesn't check cl_page invariant.
834  */
835 static void cl_page_delete0(const struct lu_env *env, struct cl_page *cp)
836 {
837         struct page *vmpage;
838         const struct cl_page_slice *slice;
839         int refc;
840         int i;
841
842         ENTRY;
843         PASSERT(env, cp, cp->cp_state != CPS_FREEING);
844
845         /*
846          * Severe all ways to obtain new pointers to @pg.
847          */
848         cl_page_owner_clear(cp);
849         cl_page_state_set0(env, cp, CPS_FREEING);
850
851         cl_page_slice_for_each_reverse(cp, slice, i) {
852                 if (slice->cpl_ops->cpo_delete != NULL)
853                         (*slice->cpl_ops->cpo_delete)(env, slice);
854         }
855
856         if (cp->cp_type == CPT_CACHEABLE) {
857                 vmpage = cp->cp_vmpage;
858                 LASSERT(PageLocked(vmpage));
859                 LASSERT((struct cl_page *)vmpage->private == cp);
860
861                 /* Drop the reference count held in vvp_page_init */
862                 refc = atomic_dec_return(&cp->cp_ref);
863                 LASSERTF(refc >= 1, "page = %p, refc = %d\n", cp, refc);
864
865                 ClearPagePrivate(vmpage);
866                 vmpage->private = 0;
867
868                 /*
869                  * The reference from vmpage to cl_page is removed,
870                  * but the reference back is still here. It is removed
871                  * later in cl_page_free().
872                  */
873         }
874
875         EXIT;
876 }
877
878 /**
879  * Called when a decision is made to throw page out of memory.
880  *
881  * Notifies all layers about page destruction by calling
882  * cl_page_operations::cpo_delete() method top-to-bottom.
883  *
884  * Moves page into cl_page_state::CPS_FREEING state (this is the only place
885  * where transition to this state happens).
886  *
887  * Eliminates all venues through which new references to the page can be
888  * obtained:
889  *
890  *     - removes page from the radix trees,
891  *
892  *     - breaks linkage from VM page to cl_page.
893  *
894  * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
895  * drain after some time, at which point page will be recycled.
896  *
897  * \pre  VM page is locked
898  * \post pg->cp_state == CPS_FREEING
899  *
900  * \see cl_page_operations::cpo_delete()
901  */
902 void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
903 {
904         PINVRNT(env, pg, cl_page_invariant(pg));
905         ENTRY;
906         cl_page_delete0(env, pg);
907         EXIT;
908 }
909 EXPORT_SYMBOL(cl_page_delete);
910
911 void cl_page_touch(const struct lu_env *env,
912                    const struct cl_page *cl_page, size_t to)
913 {
914         const struct cl_page_slice *slice;
915         int i;
916
917         ENTRY;
918
919         cl_page_slice_for_each(cl_page, slice, i) {
920                 if (slice->cpl_ops->cpo_page_touch != NULL)
921                         (*slice->cpl_ops->cpo_page_touch)(env, slice, to);
922         }
923
924         EXIT;
925 }
926 EXPORT_SYMBOL(cl_page_touch);
927
928 static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
929 {
930         ENTRY;
931         RETURN(crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN);
932 }
933
934 static void cl_page_io_start(const struct lu_env *env,
935                              struct cl_page *pg, enum cl_req_type crt)
936 {
937         /*
938          * Page is queued for IO, change its state.
939          */
940         ENTRY;
941         cl_page_owner_clear(pg);
942         cl_page_state_set(env, pg, cl_req_type_state(crt));
943         EXIT;
944 }
945
946 /**
947  * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
948  * called top-to-bottom. Every layer either agrees to submit this page (by
949  * returning 0), or requests to omit this page (by returning -EALREADY). Layer
950  * handling interactions with the VM also has to inform VM that page is under
951  * transfer now.
952  */
953 int cl_page_prep(const struct lu_env *env, struct cl_io *io,
954                  struct cl_page *cl_page, enum cl_req_type crt)
955 {
956         const struct cl_page_slice *slice;
957         int result = 0;
958         int i;
959
960         PINVRNT(env, cl_page, cl_page_is_owned(cl_page, io));
961         PINVRNT(env, cl_page, cl_page_invariant(cl_page));
962         PINVRNT(env, cl_page, crt < CRT_NR);
963
964         /*
965          * this has to be called bottom-to-top, so that llite can set up
966          * PG_writeback without risking other layers deciding to skip this
967          * page.
968          */
969         if (crt >= CRT_NR)
970                 return -EINVAL;
971
972         if (cl_page->cp_type != CPT_TRANSIENT) {
973                 cl_page_slice_for_each(cl_page, slice, i) {
974                         if (slice->cpl_ops->io[crt].cpo_prep)
975                                 result =
976                                  (*slice->cpl_ops->io[crt].cpo_prep)(env,
977                                                                      slice,
978                                                                      io);
979                         if (result != 0)
980                                 break;
981                 }
982         }
983
984         if (result >= 0) {
985                 result = 0;
986                 cl_page_io_start(env, cl_page, crt);
987         }
988
989         CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", crt, result);
990         return result;
991 }
992 EXPORT_SYMBOL(cl_page_prep);
993
994 /**
995  * Notify layers about transfer completion.
996  *
997  * Invoked by transfer sub-system (which is a part of osc) to notify layers
998  * that a transfer, of which this page is a part of has completed.
999  *
1000  * Completion call-backs are executed in the bottom-up order, so that
1001  * uppermost layer (llite), responsible for the VFS/VM interaction runs last
1002  * and can release locks safely.
1003  *
1004  * \pre  cl_page->cp_state == CPS_PAGEIN || cl_page->cp_state == CPS_PAGEOUT
1005  * \post cl_page->cl_page_state == CPS_CACHED
1006  *
1007  * \see cl_page_operations::cpo_completion()
1008  */
1009 void cl_page_completion(const struct lu_env *env,
1010                         struct cl_page *cl_page, enum cl_req_type crt,
1011                         int ioret)
1012 {
1013         const struct cl_page_slice *slice;
1014         struct cl_sync_io *anchor = cl_page->cp_sync_io;
1015         int i;
1016
1017         ENTRY;
1018         PASSERT(env, cl_page, crt < CRT_NR);
1019         PASSERT(env, cl_page, cl_page->cp_state == cl_req_type_state(crt));
1020
1021         CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", crt, ioret);
1022         cl_page_state_set(env, cl_page, CPS_CACHED);
1023         if (crt >= CRT_NR)
1024                 return;
1025
1026         cl_page_slice_for_each_reverse(cl_page, slice, i) {
1027                 if (slice->cpl_ops->io[crt].cpo_completion != NULL)
1028                         (*slice->cpl_ops->io[crt].cpo_completion)(env, slice,
1029                                                                   ioret);
1030         }
1031
1032         if (anchor != NULL) {
1033                 LASSERT(cl_page->cp_sync_io == anchor);
1034                 cl_page->cp_sync_io = NULL;
1035                 cl_sync_io_note(env, anchor, ioret);
1036         }
1037         EXIT;
1038 }
1039 EXPORT_SYMBOL(cl_page_completion);
1040
1041 /**
1042  * Notify layers that transfer formation engine decided to yank this page from
1043  * the cache and to make it a part of a transfer.
1044  *
1045  * \pre  cl_page->cp_state == CPS_CACHED
1046  * \post cl_page->cp_state == CPS_PAGEIN || cl_page->cp_state == CPS_PAGEOUT
1047  *
1048  * \see cl_page_operations::cpo_make_ready()
1049  */
1050 int cl_page_make_ready(const struct lu_env *env, struct cl_page *cl_page,
1051                        enum cl_req_type crt)
1052 {
1053         const struct cl_page_slice *slice;
1054         int result = 0;
1055         int i;
1056
1057         ENTRY;
1058         PINVRNT(env, cl_page, crt < CRT_NR);
1059         if (crt >= CRT_NR)
1060                 RETURN(-EINVAL);
1061
1062         cl_page_slice_for_each(cl_page, slice, i) {
1063                 if (slice->cpl_ops->io[crt].cpo_make_ready != NULL)
1064                         result = (*slice->cpl_ops->io[crt].cpo_make_ready)(env, slice);
1065                 if (result != 0)
1066                         break;
1067         }
1068
1069         if (result >= 0) {
1070                 result = 0;
1071                 PASSERT(env, cl_page, cl_page->cp_state == CPS_CACHED);
1072                 cl_page_io_start(env, cl_page, crt);
1073         }
1074         CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", crt, result);
1075
1076         RETURN(result);
1077 }
1078 EXPORT_SYMBOL(cl_page_make_ready);
1079
1080 /**
1081  * Called if a page is being written back by kernel's intention.
1082  *
1083  * \pre  cl_page_is_owned(cl_page, io)
1084  * \post ergo(result == 0, cl_page->cp_state == CPS_PAGEOUT)
1085  *
1086  * \see cl_page_operations::cpo_flush()
1087  */
1088 int cl_page_flush(const struct lu_env *env, struct cl_io *io,
1089                   struct cl_page *cl_page)
1090 {
1091         const struct cl_page_slice *slice;
1092         int result = 0;
1093         int i;
1094
1095         ENTRY;
1096         PINVRNT(env, cl_page, cl_page_is_owned(cl_page, io));
1097         PINVRNT(env, cl_page, cl_page_invariant(cl_page));
1098
1099         cl_page_slice_for_each(cl_page, slice, i) {
1100                 if (slice->cpl_ops->cpo_flush != NULL)
1101                         result = (*slice->cpl_ops->cpo_flush)(env, slice, io);
1102                 if (result != 0)
1103                         break;
1104         }
1105         if (result > 0)
1106                 result = 0;
1107
1108         CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d\n", result);
1109         RETURN(result);
1110 }
1111 EXPORT_SYMBOL(cl_page_flush);
1112
1113 /**
1114  * Tells transfer engine that only part of a page is to be transmitted.
1115  *
1116  * \see cl_page_operations::cpo_clip()
1117  */
1118 void cl_page_clip(const struct lu_env *env, struct cl_page *cl_page,
1119                   int from, int to)
1120 {
1121         const struct cl_page_slice *slice;
1122         int i;
1123
1124         PINVRNT(env, cl_page, cl_page_invariant(cl_page));
1125
1126         CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", from, to);
1127         cl_page_slice_for_each(cl_page, slice, i) {
1128                 if (slice->cpl_ops->cpo_clip != NULL)
1129                         (*slice->cpl_ops->cpo_clip)(env, slice, from, to);
1130         }
1131 }
1132 EXPORT_SYMBOL(cl_page_clip);
1133
1134 /**
1135  * Prints human readable representation of \a pg to the \a f.
1136  */
1137 void cl_page_header_print(const struct lu_env *env, void *cookie,
1138                           lu_printer_t printer, const struct cl_page *pg)
1139 {
1140         (*printer)(env, cookie,
1141                    "page@%p[%d %p %d %d %p]\n",
1142                    pg, atomic_read(&pg->cp_ref), pg->cp_obj,
1143                    pg->cp_state, pg->cp_type,
1144                    pg->cp_owner);
1145 }
1146 EXPORT_SYMBOL(cl_page_header_print);
1147
1148 /**
1149  * Prints human readable representation of \a cl_page to the \a f.
1150  */
1151 void cl_page_print(const struct lu_env *env, void *cookie,
1152                    lu_printer_t printer, const struct cl_page *cp)
1153 {
1154         struct page *vmpage = cp->cp_vmpage;
1155         const struct cl_page_slice *slice;
1156         int result = 0;
1157         int i;
1158
1159         cl_page_header_print(env, cookie, printer, cp);
1160
1161         (*printer)(env, cookie, "vmpage @%p", vmpage);
1162
1163         if (vmpage != NULL) {
1164                 (*printer)(env, cookie, " %lx %d:%d %lx %lu %slru",
1165                            (long)vmpage->flags, page_count(vmpage),
1166                            page_mapcount(vmpage), vmpage->private,
1167                            page_index(vmpage),
1168                            list_empty(&vmpage->lru) ? "not-" : "");
1169         }
1170
1171         (*printer)(env, cookie, "\n");
1172
1173         cl_page_slice_for_each(cp, slice, i) {
1174                 if (slice->cpl_ops->cpo_print != NULL)
1175                         result = (*slice->cpl_ops->cpo_print)(env, slice,
1176                                                               cookie, printer);
1177                 if (result != 0)
1178                         break;
1179         }
1180
1181         (*printer)(env, cookie, "end page@%p\n", cp);
1182 }
1183 EXPORT_SYMBOL(cl_page_print);
1184
1185 /**
1186  * Converts a byte offset within object \a obj into a page index.
1187  */
1188 loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
1189 {
1190         return (loff_t)idx << PAGE_SHIFT;
1191 }
1192 EXPORT_SYMBOL(cl_offset);
1193
1194 /**
1195  * Converts a page index into a byte offset within object \a obj.
1196  */
1197 pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
1198 {
1199         return offset >> PAGE_SHIFT;
1200 }
1201 EXPORT_SYMBOL(cl_index);
1202
1203 size_t cl_page_size(const struct cl_object *obj)
1204 {
1205         return 1UL << PAGE_SHIFT;
1206 }
1207 EXPORT_SYMBOL(cl_page_size);
1208
1209 /**
1210  * Adds page slice to the compound page.
1211  *
1212  * This is called by cl_object_operations::coo_page_init() methods to add a
1213  * per-layer state to the page. New state is added at the end of
1214  * cl_page::cp_layers list, that is, it is at the bottom of the stack.
1215  *
1216  * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
1217  */
1218 void cl_page_slice_add(struct cl_page *cl_page, struct cl_page_slice *slice,
1219                        struct cl_object *obj,
1220                        const struct cl_page_operations *ops)
1221 {
1222         unsigned int offset = (char *)slice -
1223                         ((char *)cl_page + sizeof(*cl_page));
1224
1225         ENTRY;
1226         LASSERT(cl_page->cp_layer_count < CP_MAX_LAYER);
1227         LASSERT(offset < (1 << sizeof(cl_page->cp_layer_offset[0]) * 8));
1228         cl_page->cp_layer_offset[cl_page->cp_layer_count++] = offset;
1229         slice->cpl_obj  = obj;
1230         slice->cpl_ops  = ops;
1231         slice->cpl_page = cl_page;
1232
1233         EXIT;
1234 }
1235 EXPORT_SYMBOL(cl_page_slice_add);
1236
1237 /**
1238  * Allocate and initialize cl_cache, called by ll_init_sbi().
1239  */
1240 struct cl_client_cache *cl_cache_init(unsigned long lru_page_max)
1241 {
1242         struct cl_client_cache  *cache = NULL;
1243
1244         ENTRY;
1245         OBD_ALLOC(cache, sizeof(*cache));
1246         if (cache == NULL)
1247                 RETURN(NULL);
1248
1249         /* Initialize cache data */
1250         atomic_set(&cache->ccc_users, 1);
1251         cache->ccc_lru_max = lru_page_max;
1252         atomic_long_set(&cache->ccc_lru_left, lru_page_max);
1253         spin_lock_init(&cache->ccc_lru_lock);
1254         INIT_LIST_HEAD(&cache->ccc_lru);
1255
1256         /* turn unstable check off by default as it impacts performance */
1257         cache->ccc_unstable_check = 0;
1258         atomic_long_set(&cache->ccc_unstable_nr, 0);
1259         init_waitqueue_head(&cache->ccc_unstable_waitq);
1260         mutex_init(&cache->ccc_max_cache_mb_lock);
1261
1262         RETURN(cache);
1263 }
1264 EXPORT_SYMBOL(cl_cache_init);
1265
1266 /**
1267  * Increase cl_cache refcount
1268  */
1269 void cl_cache_incref(struct cl_client_cache *cache)
1270 {
1271         atomic_inc(&cache->ccc_users);
1272 }
1273 EXPORT_SYMBOL(cl_cache_incref);
1274
1275 /**
1276  * Decrease cl_cache refcount and free the cache if refcount=0.
1277  * Since llite, lov and osc all hold cl_cache refcount,
1278  * the free will not cause race. (LU-6173)
1279  */
1280 void cl_cache_decref(struct cl_client_cache *cache)
1281 {
1282         if (atomic_dec_and_test(&cache->ccc_users))
1283                 OBD_FREE(cache, sizeof(*cache));
1284 }
1285 EXPORT_SYMBOL(cl_cache_decref);