Whamcloud - gitweb
b573e8da3a1a360f64edb0b858f25bd16d58386c
[fs/lustre-release.git] / lustre / obdclass / cl_page.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * Client Lustre Page.
32  *
33  *   Author: Nikita Danilov <nikita.danilov@sun.com>
34  *   Author: Jinshan Xiong <jinshan.xiong@intel.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_CLASS
38
39 #include <linux/list.h>
40 #include <libcfs/libcfs.h>
41 #include <obd_class.h>
42 #include <obd_support.h>
43
44 #include <cl_object.h>
45 #include "cl_internal.h"
46
47 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
48 static DEFINE_MUTEX(cl_page_kmem_mutex);
49
50 #ifdef LIBCFS_DEBUG
51 # define PASSERT(env, page, expr)                                       \
52   do {                                                                    \
53           if (unlikely(!(expr))) {                                      \
54                   CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");    \
55                   LASSERT(0);                                           \
56           }                                                             \
57   } while (0)
58 #else /* !LIBCFS_DEBUG */
59 # define PASSERT(env, page, exp) \
60         ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
61 #endif /* !LIBCFS_DEBUG */
62
63 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
64 # define PINVRNT(env, page, expr)                                       \
65   do {                                                                    \
66           if (unlikely(!(expr))) {                                      \
67                   CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");    \
68                   LINVRNT(0);                                           \
69           }                                                             \
70   } while (0)
71 #else /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
72 # define PINVRNT(env, page, exp) \
73          ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
74 #endif /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
75
76 /* Disable page statistic by default due to huge performance penalty. */
77 static void cs_page_inc(const struct cl_object *obj,
78                         enum cache_stats_item item)
79 {
80 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
81         atomic_inc(&cl_object_site(obj)->cs_pages.cs_stats[item]);
82 #endif
83 }
84
85 static void cs_page_dec(const struct cl_object *obj,
86                         enum cache_stats_item item)
87 {
88 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
89         atomic_dec(&cl_object_site(obj)->cs_pages.cs_stats[item]);
90 #endif
91 }
92
93 static void cs_pagestate_inc(const struct cl_object *obj,
94                              enum cl_page_state state)
95 {
96 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
97         atomic_inc(&cl_object_site(obj)->cs_pages_state[state]);
98 #endif
99 }
100
101 static void cs_pagestate_dec(const struct cl_object *obj,
102                               enum cl_page_state state)
103 {
104 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
105         atomic_dec(&cl_object_site(obj)->cs_pages_state[state]);
106 #endif
107 }
108
109 /**
110  * Internal version of cl_page_get().
111  *
112  * This function can be used to obtain initial reference to previously
113  * unreferenced cached object. It can be called only if concurrent page
114  * reclamation is somehow prevented, e.g., by keeping a lock on a VM page,
115  * associated with \a page.
116  *
117  * Use with care! Not exported.
118  */
119 static void cl_page_get_trust(struct cl_page *page)
120 {
121         LASSERT(atomic_read(&page->cp_ref) > 0);
122         atomic_inc(&page->cp_ref);
123 }
124
125 static struct cl_page_slice *
126 cl_page_slice_get(const struct cl_page *cl_page, int index)
127 {
128         if (index < 0 || index >= cl_page->cp_layer_count)
129                 return NULL;
130
131         /* To get the cp_layer_offset values fit under 256 bytes, we
132          * use the offset beyond the end of struct cl_page.
133          */
134         return (struct cl_page_slice *)((char *)cl_page + sizeof(*cl_page) +
135                                         cl_page->cp_layer_offset[index]);
136 }
137
138 #define cl_page_slice_for_each(cl_page, slice, i)               \
139         for (i = 0, slice = cl_page_slice_get(cl_page, 0);      \
140              i < (cl_page)->cp_layer_count;                     \
141              slice = cl_page_slice_get(cl_page, ++i))
142
143 #define cl_page_slice_for_each_reverse(cl_page, slice, i)       \
144         for (i = (cl_page)->cp_layer_count - 1,                 \
145              slice = cl_page_slice_get(cl_page, i); i >= 0;     \
146              slice = cl_page_slice_get(cl_page, --i))
147
148 /**
149  * Returns a slice within a cl_page, corresponding to the given layer in the
150  * device stack.
151  *
152  * \see cl_lock_at()
153  */
154 static const struct cl_page_slice *
155 cl_page_at_trusted(const struct cl_page *cl_page,
156                    const struct lu_device_type *dtype)
157 {
158         const struct cl_page_slice *slice;
159         int i;
160
161         ENTRY;
162
163         cl_page_slice_for_each(cl_page, slice, i) {
164                 if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
165                         RETURN(slice);
166         }
167
168         RETURN(NULL);
169 }
170
171 static void __cl_page_free(struct cl_page *cl_page, unsigned short bufsize)
172 {
173         int index = cl_page->cp_kmem_index;
174
175         if (index >= 0) {
176                 LASSERT(index < ARRAY_SIZE(cl_page_kmem_array));
177                 LASSERT(cl_page_kmem_size_array[index] == bufsize);
178                 OBD_SLAB_FREE(cl_page, cl_page_kmem_array[index], bufsize);
179         } else {
180                 OBD_FREE(cl_page, bufsize);
181         }
182 }
183
184 static void cl_page_free(const struct lu_env *env, struct cl_page *cl_page,
185                          struct pagevec *pvec)
186 {
187         struct cl_object *obj  = cl_page->cp_obj;
188         unsigned short bufsize = cl_object_header(obj)->coh_page_bufsize;
189         struct cl_page_slice *slice;
190         int i;
191
192         ENTRY;
193         PASSERT(env, cl_page, list_empty(&cl_page->cp_batch));
194         PASSERT(env, cl_page, cl_page->cp_owner == NULL);
195         PASSERT(env, cl_page, cl_page->cp_state == CPS_FREEING);
196
197         cl_page_slice_for_each(cl_page, slice, i) {
198                 if (unlikely(slice->cpl_ops->cpo_fini != NULL))
199                         slice->cpl_ops->cpo_fini(env, slice, pvec);
200         }
201         cl_page->cp_layer_count = 0;
202         cs_page_dec(obj, CS_total);
203         cs_pagestate_dec(obj, cl_page->cp_state);
204         lu_object_ref_del_at(&obj->co_lu, &cl_page->cp_obj_ref,
205                              "cl_page", cl_page);
206         if (cl_page->cp_type != CPT_TRANSIENT)
207                 cl_object_put(env, obj);
208         lu_ref_fini(&cl_page->cp_reference);
209         __cl_page_free(cl_page, bufsize);
210         EXIT;
211 }
212
213 static struct cl_page *__cl_page_alloc(struct cl_object *o)
214 {
215         int i = 0;
216         struct cl_page *cl_page = NULL;
217         unsigned short bufsize = cl_object_header(o)->coh_page_bufsize;
218
219         if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_PAGE_ALLOC))
220                 return NULL;
221
222 check:
223         /* the number of entries in cl_page_kmem_array is expected to
224          * only be 2-3 entries, so the lookup overhead should be low.
225          */
226         for ( ; i < ARRAY_SIZE(cl_page_kmem_array); i++) {
227                 if (smp_load_acquire(&cl_page_kmem_size_array[i])
228                     == bufsize) {
229                         OBD_SLAB_ALLOC_GFP(cl_page, cl_page_kmem_array[i],
230                                            bufsize, GFP_NOFS);
231                         if (cl_page)
232                                 cl_page->cp_kmem_index = i;
233                         return cl_page;
234                 }
235                 if (cl_page_kmem_size_array[i] == 0)
236                         break;
237         }
238
239         if (i < ARRAY_SIZE(cl_page_kmem_array)) {
240                 char cache_name[32];
241
242                 mutex_lock(&cl_page_kmem_mutex);
243                 if (cl_page_kmem_size_array[i]) {
244                         mutex_unlock(&cl_page_kmem_mutex);
245                         goto check;
246                 }
247                 snprintf(cache_name, sizeof(cache_name),
248                          "cl_page_kmem-%u", bufsize);
249                 cl_page_kmem_array[i] =
250                         kmem_cache_create(cache_name, bufsize,
251                                           0, 0, NULL);
252                 if (cl_page_kmem_array[i] == NULL) {
253                         mutex_unlock(&cl_page_kmem_mutex);
254                         return NULL;
255                 }
256                 smp_store_release(&cl_page_kmem_size_array[i],
257                                   bufsize);
258                 mutex_unlock(&cl_page_kmem_mutex);
259                 goto check;
260         } else {
261                 OBD_ALLOC_GFP(cl_page, bufsize, GFP_NOFS);
262                 if (cl_page)
263                         cl_page->cp_kmem_index = -1;
264         }
265
266         return cl_page;
267 }
268
269 struct cl_page *cl_page_alloc(const struct lu_env *env, struct cl_object *o,
270                               pgoff_t ind, struct page *vmpage,
271                               enum cl_page_type type)
272 {
273         struct cl_page *cl_page;
274         struct cl_object *head;
275
276         ENTRY;
277
278         cl_page = __cl_page_alloc(o);
279         if (cl_page != NULL) {
280                 int result = 0;
281
282                 /*
283                  * Please fix cl_page:cp_state/type declaration if
284                  * these assertions fail in the future.
285                  */
286                 BUILD_BUG_ON((1 << CP_STATE_BITS) < CPS_NR); /* cp_state */
287                 BUILD_BUG_ON((1 << CP_TYPE_BITS) < CPT_NR); /* cp_type */
288                 atomic_set(&cl_page->cp_ref, 1);
289                 cl_page->cp_obj = o;
290                 if (type != CPT_TRANSIENT)
291                         cl_object_get(o);
292                 lu_object_ref_add_at(&o->co_lu, &cl_page->cp_obj_ref,
293                                      "cl_page", cl_page);
294                 cl_page->cp_vmpage = vmpage;
295                 cl_page->cp_state = CPS_CACHED;
296                 cl_page->cp_type = type;
297                 if (type == CPT_TRANSIENT)
298                         /* ref to correct inode will be added
299                          * in ll_direct_rw_pages
300                          */
301                         cl_page->cp_inode = NULL;
302                 else
303                         cl_page->cp_inode = page2inode(vmpage);
304                 INIT_LIST_HEAD(&cl_page->cp_batch);
305                 lu_ref_init(&cl_page->cp_reference);
306                 head = o;
307                 cl_page->cp_page_index = ind;
308                 cl_object_for_each(o, head) {
309                         if (o->co_ops->coo_page_init != NULL) {
310                                 result = o->co_ops->coo_page_init(env, o,
311                                                         cl_page, ind);
312                                 if (result != 0) {
313                                         cl_page_delete0(env, cl_page);
314                                         cl_page_free(env, cl_page, NULL);
315                                         cl_page = ERR_PTR(result);
316                                         break;
317                                 }
318                         }
319                 }
320                 if (result == 0) {
321                         cs_page_inc(o, CS_total);
322                         cs_page_inc(o, CS_create);
323                         cs_pagestate_dec(o, CPS_CACHED);
324                 }
325         } else {
326                 cl_page = ERR_PTR(-ENOMEM);
327         }
328         RETURN(cl_page);
329 }
330
331 /**
332  * Returns a cl_page with index \a idx at the object \a o, and associated with
333  * the VM page \a vmpage.
334  *
335  * This is the main entry point into the cl_page caching interface. First, a
336  * cache (implemented as a per-object radix tree) is consulted. If page is
337  * found there, it is returned immediately. Otherwise new page is allocated
338  * and returned. In any case, additional reference to page is acquired.
339  *
340  * \see cl_object_find(), cl_lock_find()
341  */
342 struct cl_page *cl_page_find(const struct lu_env *env,
343                              struct cl_object *o,
344                              pgoff_t idx, struct page *vmpage,
345                              enum cl_page_type type)
346 {
347         struct cl_page          *page = NULL;
348         struct cl_object_header *hdr;
349
350         LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
351         might_sleep();
352
353         ENTRY;
354
355         hdr = cl_object_header(o);
356         cs_page_inc(o, CS_lookup);
357
358         CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
359                idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
360         /* fast path. */
361         if (type == CPT_CACHEABLE) {
362                 /* vmpage lock is used to protect the child/parent
363                  * relationship */
364                 LASSERT(PageLocked(vmpage));
365                 /*
366                  * cl_vmpage_page() can be called here without any locks as
367                  *
368                  *     - "vmpage" is locked (which prevents ->private from
369                  *       concurrent updates), and
370                  *
371                  *     - "o" cannot be destroyed while current thread holds a
372                  *       reference on it.
373                  */
374                 page = cl_vmpage_page(vmpage, o);
375                 if (page != NULL) {
376                         cs_page_inc(o, CS_hit);
377                         RETURN(page);
378                 }
379         }
380
381         /* allocate and initialize cl_page */
382         page = cl_page_alloc(env, o, idx, vmpage, type);
383         RETURN(page);
384 }
385 EXPORT_SYMBOL(cl_page_find);
386
387 static inline int cl_page_invariant(const struct cl_page *pg)
388 {
389         return cl_page_in_use_noref(pg);
390 }
391
392 static void cl_page_state_set0(const struct lu_env *env,
393                                struct cl_page *cl_page,
394                                enum cl_page_state state)
395 {
396         enum cl_page_state old;
397
398         /*
399          * Matrix of allowed state transitions [old][new], for sanity
400          * checking.
401          */
402         static const int allowed_transitions[CPS_NR][CPS_NR] = {
403                 [CPS_CACHED] = {
404                         [CPS_CACHED]  = 0,
405                         [CPS_OWNED]   = 1, /* io finds existing cached page */
406                         [CPS_PAGEIN]  = 0,
407                         [CPS_PAGEOUT] = 1, /* write-out from the cache */
408                         [CPS_FREEING] = 1, /* eviction on the memory pressure */
409                 },
410                 [CPS_OWNED] = {
411                         [CPS_CACHED]  = 1, /* release to the cache */
412                         [CPS_OWNED]   = 0,
413                         [CPS_PAGEIN]  = 1, /* start read immediately */
414                         [CPS_PAGEOUT] = 1, /* start write immediately */
415                         [CPS_FREEING] = 1, /* lock invalidation or truncate */
416                 },
417                 [CPS_PAGEIN] = {
418                         [CPS_CACHED]  = 1, /* io completion */
419                         [CPS_OWNED]   = 0,
420                         [CPS_PAGEIN]  = 0,
421                         [CPS_PAGEOUT] = 0,
422                         [CPS_FREEING] = 0,
423                 },
424                 [CPS_PAGEOUT] = {
425                         [CPS_CACHED]  = 1, /* io completion */
426                         [CPS_OWNED]   = 0,
427                         [CPS_PAGEIN]  = 0,
428                         [CPS_PAGEOUT] = 0,
429                         [CPS_FREEING] = 0,
430                 },
431                 [CPS_FREEING] = {
432                         [CPS_CACHED]  = 0,
433                         [CPS_OWNED]   = 0,
434                         [CPS_PAGEIN]  = 0,
435                         [CPS_PAGEOUT] = 0,
436                         [CPS_FREEING] = 0,
437                 }
438         };
439
440         ENTRY;
441         old = cl_page->cp_state;
442         PASSERT(env, cl_page, allowed_transitions[old][state]);
443         CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d -> %d\n", old, state);
444         PASSERT(env, cl_page, cl_page->cp_state == old);
445         PASSERT(env, cl_page, equi(state == CPS_OWNED,
446                                    cl_page->cp_owner != NULL));
447
448         cs_pagestate_dec(cl_page->cp_obj, cl_page->cp_state);
449         cs_pagestate_inc(cl_page->cp_obj, state);
450         cl_page->cp_state = state;
451         EXIT;
452 }
453
454 static void cl_page_state_set(const struct lu_env *env,
455                               struct cl_page *page, enum cl_page_state state)
456 {
457         cl_page_state_set0(env, page, state);
458 }
459
460 /**
461  * Acquires an additional reference to a page.
462  *
463  * This can be called only by caller already possessing a reference to \a
464  * page.
465  *
466  * \see cl_object_get(), cl_lock_get().
467  */
468 void cl_page_get(struct cl_page *page)
469 {
470         ENTRY;
471         cl_page_get_trust(page);
472         EXIT;
473 }
474 EXPORT_SYMBOL(cl_page_get);
475
476 /**
477  * Releases a reference to a page, use the pagevec to release the pages
478  * in batch if provided.
479  *
480  * Users need to do a final pagevec_release() to release any trailing pages.
481  */
482 void cl_pagevec_put(const struct lu_env *env, struct cl_page *page,
483                   struct pagevec *pvec)
484 {
485         ENTRY;
486         CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
487                        atomic_read(&page->cp_ref));
488
489         if (atomic_dec_and_test(&page->cp_ref)) {
490                 LASSERT(page->cp_state == CPS_FREEING);
491
492                 LASSERT(atomic_read(&page->cp_ref) == 0);
493                 PASSERT(env, page, page->cp_owner == NULL);
494                 PASSERT(env, page, list_empty(&page->cp_batch));
495                 /*
496                  * Page is no longer reachable by other threads. Tear
497                  * it down.
498                  */
499                 cl_page_free(env, page, pvec);
500         }
501
502         EXIT;
503 }
504 EXPORT_SYMBOL(cl_pagevec_put);
505
506 /**
507  * Releases a reference to a page, wrapper to cl_pagevec_put
508  *
509  * When last reference is released, page is returned to the cache, unless it
510  * is in cl_page_state::CPS_FREEING state, in which case it is immediately
511  * destroyed.
512  *
513  * \see cl_object_put(), cl_lock_put().
514  */
515 void cl_page_put(const struct lu_env *env, struct cl_page *page)
516 {
517         cl_pagevec_put(env, page, NULL);
518 }
519 EXPORT_SYMBOL(cl_page_put);
520
521 /**
522  * Returns a cl_page associated with a VM page, and given cl_object.
523  */
524 struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
525 {
526         struct cl_page *page;
527
528         ENTRY;
529         LASSERT(PageLocked(vmpage));
530
531         /*
532          * NOTE: absence of races and liveness of data are guaranteed by page
533          *       lock on a "vmpage". That works because object destruction has
534          *       bottom-to-top pass.
535          */
536
537         page = (struct cl_page *)vmpage->private;
538         if (page != NULL) {
539                 cl_page_get_trust(page);
540                 LASSERT(page->cp_type == CPT_CACHEABLE);
541         }
542         RETURN(page);
543 }
544 EXPORT_SYMBOL(cl_vmpage_page);
545
546 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
547                                        const struct lu_device_type *dtype)
548 {
549         return cl_page_at_trusted(page, dtype);
550 }
551 EXPORT_SYMBOL(cl_page_at);
552
553 static void cl_page_owner_clear(struct cl_page *page)
554 {
555         ENTRY;
556         if (page->cp_owner != NULL) {
557                 LASSERT(page->cp_owner->ci_owned_nr > 0);
558                 page->cp_owner->ci_owned_nr--;
559                 page->cp_owner = NULL;
560         }
561         EXIT;
562 }
563
564 static void cl_page_owner_set(struct cl_page *page)
565 {
566         ENTRY;
567         LASSERT(page->cp_owner != NULL);
568         page->cp_owner->ci_owned_nr++;
569         EXIT;
570 }
571
572 void cl_page_disown0(const struct lu_env *env,
573                      struct cl_io *io, struct cl_page *cl_page)
574 {
575         const struct cl_page_slice *slice;
576         enum cl_page_state state;
577         int i;
578
579         ENTRY;
580         state = cl_page->cp_state;
581         PINVRNT(env, cl_page, state == CPS_OWNED ||
582                 state == CPS_FREEING);
583         PINVRNT(env, cl_page, cl_page_invariant(cl_page) ||
584                 state == CPS_FREEING);
585         cl_page_owner_clear(cl_page);
586
587         if (state == CPS_OWNED)
588                 cl_page_state_set(env, cl_page, CPS_CACHED);
589         /*
590          * Completion call-backs are executed in the bottom-up order, so that
591          * uppermost layer (llite), responsible for VFS/VM interaction runs
592          * last and can release locks safely.
593          */
594         cl_page_slice_for_each_reverse(cl_page, slice, i) {
595                 if (slice->cpl_ops->cpo_disown != NULL)
596                         (*slice->cpl_ops->cpo_disown)(env, slice, io);
597         }
598
599         EXIT;
600 }
601
602 /**
603  * returns true, iff page is owned by the given io.
604  */
605 int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
606 {
607         struct cl_io *top = cl_io_top((struct cl_io *)io);
608         LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
609         ENTRY;
610         RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == top);
611 }
612 EXPORT_SYMBOL(cl_page_is_owned);
613
614 /**
615  * Try to own a page by IO.
616  *
617  * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
618  * into cl_page_state::CPS_OWNED state.
619  *
620  * \pre  !cl_page_is_owned(cl_page, io)
621  * \post result == 0 iff cl_page_is_owned(cl_page, io)
622  *
623  * \retval 0   success
624  *
625  * \retval -ve failure, e.g., cl_page was destroyed (and landed in
626  *             cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
627  *             or, page was owned by another thread, or in IO.
628  *
629  * \see cl_page_disown()
630  * \see cl_page_operations::cpo_own()
631  * \see cl_page_own_try()
632  * \see cl_page_own
633  */
634 static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
635                         struct cl_page *cl_page, int nonblock)
636 {
637         const struct cl_page_slice *slice;
638         int result = 0;
639         int i;
640
641         ENTRY;
642         PINVRNT(env, cl_page, !cl_page_is_owned(cl_page, io));
643         io = cl_io_top(io);
644
645         if (cl_page->cp_state == CPS_FREEING) {
646                 result = -ENOENT;
647                 goto out;
648         }
649
650         cl_page_slice_for_each(cl_page, slice, i) {
651                 if (slice->cpl_ops->cpo_own)
652                         result = (*slice->cpl_ops->cpo_own)(env, slice,
653                                                             io, nonblock);
654                 if (result != 0)
655                         break;
656         }
657         if (result > 0)
658                 result = 0;
659
660         if (result == 0) {
661                 PASSERT(env, cl_page, cl_page->cp_owner == NULL);
662                 cl_page->cp_owner = cl_io_top(io);
663                 cl_page_owner_set(cl_page);
664                 if (cl_page->cp_state != CPS_FREEING) {
665                         cl_page_state_set(env, cl_page, CPS_OWNED);
666                 } else {
667                         cl_page_disown0(env, io, cl_page);
668                         result = -ENOENT;
669                 }
670         }
671
672 out:
673         PINVRNT(env, cl_page, ergo(result == 0,
674                 cl_page_invariant(cl_page)));
675         RETURN(result);
676 }
677
678 /**
679  * Own a page, might be blocked.
680  *
681  * \see cl_page_own0()
682  */
683 int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
684 {
685         return cl_page_own0(env, io, pg, 0);
686 }
687 EXPORT_SYMBOL(cl_page_own);
688
689 /**
690  * Nonblock version of cl_page_own().
691  *
692  * \see cl_page_own0()
693  */
694 int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
695                     struct cl_page *pg)
696 {
697         return cl_page_own0(env, io, pg, 1);
698 }
699 EXPORT_SYMBOL(cl_page_own_try);
700
701
702 /**
703  * Assume page ownership.
704  *
705  * Called when page is already locked by the hosting VM.
706  *
707  * \pre !cl_page_is_owned(cl_page, io)
708  * \post cl_page_is_owned(cl_page, io)
709  *
710  * \see cl_page_operations::cpo_assume()
711  */
712 void cl_page_assume(const struct lu_env *env,
713                     struct cl_io *io, struct cl_page *cl_page)
714 {
715         const struct cl_page_slice *slice;
716         int i;
717
718         ENTRY;
719
720         PINVRNT(env, cl_page,
721                 cl_object_same(cl_page->cp_obj, io->ci_obj));
722         io = cl_io_top(io);
723
724         cl_page_slice_for_each(cl_page, slice, i) {
725                 if (slice->cpl_ops->cpo_assume != NULL)
726                         (*slice->cpl_ops->cpo_assume)(env, slice, io);
727         }
728
729         PASSERT(env, cl_page, cl_page->cp_owner == NULL);
730         cl_page->cp_owner = cl_io_top(io);
731         cl_page_owner_set(cl_page);
732         cl_page_state_set(env, cl_page, CPS_OWNED);
733         EXIT;
734 }
735 EXPORT_SYMBOL(cl_page_assume);
736
737 /**
738  * Releases page ownership without unlocking the page.
739  *
740  * Moves cl_page into cl_page_state::CPS_CACHED without releasing a lock
741  * on the underlying VM page (as VM is supposed to do this itself).
742  *
743  * \pre   cl_page_is_owned(cl_page, io)
744  * \post !cl_page_is_owned(cl_page, io)
745  *
746  * \see cl_page_assume()
747  */
748 void cl_page_unassume(const struct lu_env *env,
749                       struct cl_io *io, struct cl_page *cl_page)
750 {
751         const struct cl_page_slice *slice;
752         int i;
753
754         ENTRY;
755         PINVRNT(env, cl_page, cl_page_is_owned(cl_page, io));
756         PINVRNT(env, cl_page, cl_page_invariant(cl_page));
757
758         io = cl_io_top(io);
759         cl_page_owner_clear(cl_page);
760         cl_page_state_set(env, cl_page, CPS_CACHED);
761
762         cl_page_slice_for_each_reverse(cl_page, slice, i) {
763                 if (slice->cpl_ops->cpo_unassume != NULL)
764                         (*slice->cpl_ops->cpo_unassume)(env, slice, io);
765         }
766
767         EXIT;
768 }
769 EXPORT_SYMBOL(cl_page_unassume);
770
771 /**
772  * Releases page ownership.
773  *
774  * Moves page into cl_page_state::CPS_CACHED.
775  *
776  * \pre   cl_page_is_owned(pg, io)
777  * \post !cl_page_is_owned(pg, io)
778  *
779  * \see cl_page_own()
780  * \see cl_page_operations::cpo_disown()
781  */
782 void cl_page_disown(const struct lu_env *env,
783                     struct cl_io *io, struct cl_page *pg)
784 {
785         PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
786                 pg->cp_state == CPS_FREEING);
787
788         ENTRY;
789         io = cl_io_top(io);
790         cl_page_disown0(env, io, pg);
791         EXIT;
792 }
793 EXPORT_SYMBOL(cl_page_disown);
794
795 /**
796  * Called when cl_page is to be removed from the object, e.g.,
797  * as a result of truncate.
798  *
799  * Calls cl_page_operations::cpo_discard() top-to-bottom.
800  *
801  * \pre cl_page_is_owned(cl_page, io)
802  *
803  * \see cl_page_operations::cpo_discard()
804  */
805 void cl_page_discard(const struct lu_env *env,
806                      struct cl_io *io, struct cl_page *cl_page)
807 {
808         const struct cl_page_slice *slice;
809         int i;
810
811         PINVRNT(env, cl_page, cl_page_is_owned(cl_page, io));
812         PINVRNT(env, cl_page, cl_page_invariant(cl_page));
813
814         cl_page_slice_for_each(cl_page, slice, i) {
815                 if (slice->cpl_ops->cpo_discard != NULL)
816                         (*slice->cpl_ops->cpo_discard)(env, slice, io);
817         }
818 }
819 EXPORT_SYMBOL(cl_page_discard);
820
821 /**
822  * Version of cl_page_delete() that can be called for not fully constructed
823  * cl_pages, e.g. in an error handling cl_page_find()->cl_page_delete0()
824  * path. Doesn't check cl_page invariant.
825  */
826 static void cl_page_delete0(const struct lu_env *env,
827                             struct cl_page *cl_page)
828 {
829         const struct cl_page_slice *slice;
830         int i;
831
832         ENTRY;
833
834         PASSERT(env, cl_page, cl_page->cp_state != CPS_FREEING);
835
836         /*
837          * Severe all ways to obtain new pointers to @pg.
838          */
839         cl_page_owner_clear(cl_page);
840         cl_page_state_set0(env, cl_page, CPS_FREEING);
841
842         cl_page_slice_for_each_reverse(cl_page, slice, i) {
843                 if (slice->cpl_ops->cpo_delete != NULL)
844                         (*slice->cpl_ops->cpo_delete)(env, slice);
845         }
846
847         EXIT;
848 }
849
850 /**
851  * Called when a decision is made to throw page out of memory.
852  *
853  * Notifies all layers about page destruction by calling
854  * cl_page_operations::cpo_delete() method top-to-bottom.
855  *
856  * Moves page into cl_page_state::CPS_FREEING state (this is the only place
857  * where transition to this state happens).
858  *
859  * Eliminates all venues through which new references to the page can be
860  * obtained:
861  *
862  *     - removes page from the radix trees,
863  *
864  *     - breaks linkage from VM page to cl_page.
865  *
866  * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
867  * drain after some time, at which point page will be recycled.
868  *
869  * \pre  VM page is locked
870  * \post pg->cp_state == CPS_FREEING
871  *
872  * \see cl_page_operations::cpo_delete()
873  */
874 void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
875 {
876         PINVRNT(env, pg, cl_page_invariant(pg));
877         ENTRY;
878         cl_page_delete0(env, pg);
879         EXIT;
880 }
881 EXPORT_SYMBOL(cl_page_delete);
882
883 /**
884  * Marks page up-to-date.
885  *
886  * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
887  * layer responsible for VM interaction has to mark/clear page as up-to-date
888  * by the \a uptodate argument.
889  *
890  * \see cl_page_operations::cpo_export()
891  */
892 void cl_page_export(const struct lu_env *env, struct cl_page *cl_page,
893                     int uptodate)
894 {
895         const struct cl_page_slice *slice;
896         int i;
897
898         PINVRNT(env, cl_page, cl_page_invariant(cl_page));
899
900         cl_page_slice_for_each(cl_page, slice, i) {
901                 if (slice->cpl_ops->cpo_export != NULL)
902                         (*slice->cpl_ops->cpo_export)(env, slice, uptodate);
903         }
904 }
905 EXPORT_SYMBOL(cl_page_export);
906
907 /**
908  * Returns true, if \a page is VM locked in a suitable sense by the calling
909  * thread.
910  */
911 int cl_page_is_vmlocked(const struct lu_env *env,
912                         const struct cl_page *cl_page)
913 {
914         const struct cl_page_slice *slice;
915         int result;
916
917         ENTRY;
918         slice = cl_page_slice_get(cl_page, 0);
919         PASSERT(env, cl_page, slice->cpl_ops->cpo_is_vmlocked != NULL);
920         /*
921          * Call ->cpo_is_vmlocked() directly instead of going through
922          * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
923          * cl_page_invariant().
924          */
925         result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
926         PASSERT(env, cl_page, result == -EBUSY || result == -ENODATA);
927
928         RETURN(result == -EBUSY);
929 }
930 EXPORT_SYMBOL(cl_page_is_vmlocked);
931
932 void cl_page_touch(const struct lu_env *env,
933                    const struct cl_page *cl_page, size_t to)
934 {
935         const struct cl_page_slice *slice;
936         int i;
937
938         ENTRY;
939
940         cl_page_slice_for_each(cl_page, slice, i) {
941                 if (slice->cpl_ops->cpo_page_touch != NULL)
942                         (*slice->cpl_ops->cpo_page_touch)(env, slice, to);
943         }
944
945         EXIT;
946 }
947 EXPORT_SYMBOL(cl_page_touch);
948
949 static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
950 {
951         ENTRY;
952         RETURN(crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN);
953 }
954
955 static void cl_page_io_start(const struct lu_env *env,
956                              struct cl_page *pg, enum cl_req_type crt)
957 {
958         /*
959          * Page is queued for IO, change its state.
960          */
961         ENTRY;
962         cl_page_owner_clear(pg);
963         cl_page_state_set(env, pg, cl_req_type_state(crt));
964         EXIT;
965 }
966
967 /**
968  * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
969  * called top-to-bottom. Every layer either agrees to submit this page (by
970  * returning 0), or requests to omit this page (by returning -EALREADY). Layer
971  * handling interactions with the VM also has to inform VM that page is under
972  * transfer now.
973  */
974 int cl_page_prep(const struct lu_env *env, struct cl_io *io,
975                  struct cl_page *cl_page, enum cl_req_type crt)
976 {
977         const struct cl_page_slice *slice;
978         int result = 0;
979         int i;
980
981         PINVRNT(env, cl_page, cl_page_is_owned(cl_page, io));
982         PINVRNT(env, cl_page, cl_page_invariant(cl_page));
983         PINVRNT(env, cl_page, crt < CRT_NR);
984
985         /*
986          * this has to be called bottom-to-top, so that llite can set up
987          * PG_writeback without risking other layers deciding to skip this
988          * page.
989          */
990         if (crt >= CRT_NR)
991                 return -EINVAL;
992
993         if (cl_page->cp_type != CPT_TRANSIENT) {
994                 cl_page_slice_for_each(cl_page, slice, i) {
995                         if (slice->cpl_ops->cpo_own)
996                                 result =
997                                  (*slice->cpl_ops->io[crt].cpo_prep)(env,
998                                                                      slice,
999                                                                      io);
1000                         if (result != 0)
1001                                 break;
1002                 }
1003         }
1004
1005         if (result >= 0) {
1006                 result = 0;
1007                 cl_page_io_start(env, cl_page, crt);
1008         }
1009
1010         CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", crt, result);
1011         return result;
1012 }
1013 EXPORT_SYMBOL(cl_page_prep);
1014
1015 /**
1016  * Notify layers about transfer completion.
1017  *
1018  * Invoked by transfer sub-system (which is a part of osc) to notify layers
1019  * that a transfer, of which this page is a part of has completed.
1020  *
1021  * Completion call-backs are executed in the bottom-up order, so that
1022  * uppermost layer (llite), responsible for the VFS/VM interaction runs last
1023  * and can release locks safely.
1024  *
1025  * \pre  cl_page->cp_state == CPS_PAGEIN || cl_page->cp_state == CPS_PAGEOUT
1026  * \post cl_page->cl_page_state == CPS_CACHED
1027  *
1028  * \see cl_page_operations::cpo_completion()
1029  */
1030 void cl_page_completion(const struct lu_env *env,
1031                         struct cl_page *cl_page, enum cl_req_type crt,
1032                         int ioret)
1033 {
1034         const struct cl_page_slice *slice;
1035         struct cl_sync_io *anchor = cl_page->cp_sync_io;
1036         int i;
1037
1038         ENTRY;
1039         PASSERT(env, cl_page, crt < CRT_NR);
1040         PASSERT(env, cl_page, cl_page->cp_state == cl_req_type_state(crt));
1041
1042         CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", crt, ioret);
1043         cl_page_state_set(env, cl_page, CPS_CACHED);
1044         if (crt >= CRT_NR)
1045                 return;
1046
1047         cl_page_slice_for_each_reverse(cl_page, slice, i) {
1048                 if (slice->cpl_ops->io[crt].cpo_completion != NULL)
1049                         (*slice->cpl_ops->io[crt].cpo_completion)(env, slice,
1050                                                                   ioret);
1051         }
1052
1053         if (anchor != NULL) {
1054                 LASSERT(cl_page->cp_sync_io == anchor);
1055                 cl_page->cp_sync_io = NULL;
1056                 cl_sync_io_note(env, anchor, ioret);
1057         }
1058         EXIT;
1059 }
1060 EXPORT_SYMBOL(cl_page_completion);
1061
1062 /**
1063  * Notify layers that transfer formation engine decided to yank this page from
1064  * the cache and to make it a part of a transfer.
1065  *
1066  * \pre  cl_page->cp_state == CPS_CACHED
1067  * \post cl_page->cp_state == CPS_PAGEIN || cl_page->cp_state == CPS_PAGEOUT
1068  *
1069  * \see cl_page_operations::cpo_make_ready()
1070  */
1071 int cl_page_make_ready(const struct lu_env *env, struct cl_page *cl_page,
1072                        enum cl_req_type crt)
1073 {
1074         const struct cl_page_slice *slice;
1075         int result = 0;
1076         int i;
1077
1078         ENTRY;
1079         PINVRNT(env, cl_page, crt < CRT_NR);
1080         if (crt >= CRT_NR)
1081                 RETURN(-EINVAL);
1082
1083         cl_page_slice_for_each(cl_page, slice, i) {
1084                 if (slice->cpl_ops->io[crt].cpo_make_ready != NULL)
1085                         result = (*slice->cpl_ops->io[crt].cpo_make_ready)(env, slice);
1086                 if (result != 0)
1087                         break;
1088         }
1089
1090         if (result >= 0) {
1091                 result = 0;
1092                 PASSERT(env, cl_page, cl_page->cp_state == CPS_CACHED);
1093                 cl_page_io_start(env, cl_page, crt);
1094         }
1095         CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", crt, result);
1096
1097         RETURN(result);
1098 }
1099 EXPORT_SYMBOL(cl_page_make_ready);
1100
1101 /**
1102  * Called if a page is being written back by kernel's intention.
1103  *
1104  * \pre  cl_page_is_owned(cl_page, io)
1105  * \post ergo(result == 0, cl_page->cp_state == CPS_PAGEOUT)
1106  *
1107  * \see cl_page_operations::cpo_flush()
1108  */
1109 int cl_page_flush(const struct lu_env *env, struct cl_io *io,
1110                   struct cl_page *cl_page)
1111 {
1112         const struct cl_page_slice *slice;
1113         int result = 0;
1114         int i;
1115
1116         ENTRY;
1117         PINVRNT(env, cl_page, cl_page_is_owned(cl_page, io));
1118         PINVRNT(env, cl_page, cl_page_invariant(cl_page));
1119
1120         cl_page_slice_for_each(cl_page, slice, i) {
1121                 if (slice->cpl_ops->cpo_flush != NULL)
1122                         result = (*slice->cpl_ops->cpo_flush)(env, slice, io);
1123                 if (result != 0)
1124                         break;
1125         }
1126         if (result > 0)
1127                 result = 0;
1128
1129         CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d\n", result);
1130         RETURN(result);
1131 }
1132 EXPORT_SYMBOL(cl_page_flush);
1133
1134 /**
1135  * Tells transfer engine that only part of a page is to be transmitted.
1136  *
1137  * \see cl_page_operations::cpo_clip()
1138  */
1139 void cl_page_clip(const struct lu_env *env, struct cl_page *cl_page,
1140                   int from, int to)
1141 {
1142         const struct cl_page_slice *slice;
1143         int i;
1144
1145         PINVRNT(env, cl_page, cl_page_invariant(cl_page));
1146
1147         CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", from, to);
1148         cl_page_slice_for_each(cl_page, slice, i) {
1149                 if (slice->cpl_ops->cpo_clip != NULL)
1150                         (*slice->cpl_ops->cpo_clip)(env, slice, from, to);
1151         }
1152 }
1153 EXPORT_SYMBOL(cl_page_clip);
1154
1155 /**
1156  * Prints human readable representation of \a pg to the \a f.
1157  */
1158 void cl_page_header_print(const struct lu_env *env, void *cookie,
1159                           lu_printer_t printer, const struct cl_page *pg)
1160 {
1161         (*printer)(env, cookie,
1162                    "page@%p[%d %p %d %d %p]\n",
1163                    pg, atomic_read(&pg->cp_ref), pg->cp_obj,
1164                    pg->cp_state, pg->cp_type,
1165                    pg->cp_owner);
1166 }
1167 EXPORT_SYMBOL(cl_page_header_print);
1168
1169 /**
1170  * Prints human readable representation of \a cl_page to the \a f.
1171  */
1172 void cl_page_print(const struct lu_env *env, void *cookie,
1173                    lu_printer_t printer, const struct cl_page *cl_page)
1174 {
1175         const struct cl_page_slice *slice;
1176         int result = 0;
1177         int i;
1178
1179         cl_page_header_print(env, cookie, printer, cl_page);
1180         cl_page_slice_for_each(cl_page, slice, i) {
1181                 if (slice->cpl_ops->cpo_print != NULL)
1182                         result = (*slice->cpl_ops->cpo_print)(env, slice,
1183                                                              cookie, printer);
1184                 if (result != 0)
1185                         break;
1186         }
1187         (*printer)(env, cookie, "end page@%p\n", cl_page);
1188 }
1189 EXPORT_SYMBOL(cl_page_print);
1190
1191 /**
1192  * Converts a byte offset within object \a obj into a page index.
1193  */
1194 loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
1195 {
1196         return (loff_t)idx << PAGE_SHIFT;
1197 }
1198 EXPORT_SYMBOL(cl_offset);
1199
1200 /**
1201  * Converts a page index into a byte offset within object \a obj.
1202  */
1203 pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
1204 {
1205         return offset >> PAGE_SHIFT;
1206 }
1207 EXPORT_SYMBOL(cl_index);
1208
1209 size_t cl_page_size(const struct cl_object *obj)
1210 {
1211         return 1UL << PAGE_SHIFT;
1212 }
1213 EXPORT_SYMBOL(cl_page_size);
1214
1215 /**
1216  * Adds page slice to the compound page.
1217  *
1218  * This is called by cl_object_operations::coo_page_init() methods to add a
1219  * per-layer state to the page. New state is added at the end of
1220  * cl_page::cp_layers list, that is, it is at the bottom of the stack.
1221  *
1222  * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
1223  */
1224 void cl_page_slice_add(struct cl_page *cl_page, struct cl_page_slice *slice,
1225                        struct cl_object *obj,
1226                        const struct cl_page_operations *ops)
1227 {
1228         unsigned int offset = (char *)slice -
1229                         ((char *)cl_page + sizeof(*cl_page));
1230
1231         ENTRY;
1232         LASSERT(cl_page->cp_layer_count < CP_MAX_LAYER);
1233         LASSERT(offset < (1 << sizeof(cl_page->cp_layer_offset[0]) * 8));
1234         cl_page->cp_layer_offset[cl_page->cp_layer_count++] = offset;
1235         slice->cpl_obj  = obj;
1236         slice->cpl_ops  = ops;
1237         slice->cpl_page = cl_page;
1238
1239         EXIT;
1240 }
1241 EXPORT_SYMBOL(cl_page_slice_add);
1242
1243 /**
1244  * Allocate and initialize cl_cache, called by ll_init_sbi().
1245  */
1246 struct cl_client_cache *cl_cache_init(unsigned long lru_page_max)
1247 {
1248         struct cl_client_cache  *cache = NULL;
1249
1250         ENTRY;
1251         OBD_ALLOC(cache, sizeof(*cache));
1252         if (cache == NULL)
1253                 RETURN(NULL);
1254
1255         /* Initialize cache data */
1256         atomic_set(&cache->ccc_users, 1);
1257         cache->ccc_lru_max = lru_page_max;
1258         atomic_long_set(&cache->ccc_lru_left, lru_page_max);
1259         spin_lock_init(&cache->ccc_lru_lock);
1260         INIT_LIST_HEAD(&cache->ccc_lru);
1261
1262         /* turn unstable check off by default as it impacts performance */
1263         cache->ccc_unstable_check = 0;
1264         atomic_long_set(&cache->ccc_unstable_nr, 0);
1265         init_waitqueue_head(&cache->ccc_unstable_waitq);
1266         mutex_init(&cache->ccc_max_cache_mb_lock);
1267
1268         RETURN(cache);
1269 }
1270 EXPORT_SYMBOL(cl_cache_init);
1271
1272 /**
1273  * Increase cl_cache refcount
1274  */
1275 void cl_cache_incref(struct cl_client_cache *cache)
1276 {
1277         atomic_inc(&cache->ccc_users);
1278 }
1279 EXPORT_SYMBOL(cl_cache_incref);
1280
1281 /**
1282  * Decrease cl_cache refcount and free the cache if refcount=0.
1283  * Since llite, lov and osc all hold cl_cache refcount,
1284  * the free will not cause race. (LU-6173)
1285  */
1286 void cl_cache_decref(struct cl_client_cache *cache)
1287 {
1288         if (atomic_dec_and_test(&cache->ccc_users))
1289                 OBD_FREE(cache, sizeof(*cache));
1290 }
1291 EXPORT_SYMBOL(cl_cache_decref);