Whamcloud - gitweb
74f9225ec1d5998c57ae0ae984aebd08603a70c3
[fs/lustre-release.git] / lustre / obdclass / cl_page.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * Client Lustre Page.
33  *
34  *   Author: Nikita Danilov <nikita.danilov@sun.com>
35  *   Author: Jinshan Xiong <jinshan.xiong@intel.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_CLASS
39
40 #include <linux/list.h>
41 #include <libcfs/libcfs.h>
42 #include <obd_class.h>
43 #include <obd_support.h>
44
45 #include <cl_object.h>
46 #include "cl_internal.h"
47
48 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
49
50 #ifdef LIBCFS_DEBUG
51 # define PASSERT(env, page, expr)                                       \
52   do {                                                                    \
53           if (unlikely(!(expr))) {                                      \
54                   CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");    \
55                   LASSERT(0);                                           \
56           }                                                             \
57   } while (0)
58 #else /* !LIBCFS_DEBUG */
59 # define PASSERT(env, page, exp) \
60         ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
61 #endif /* !LIBCFS_DEBUG */
62
63 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
64 # define PINVRNT(env, page, expr)                                       \
65   do {                                                                    \
66           if (unlikely(!(expr))) {                                      \
67                   CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");    \
68                   LINVRNT(0);                                           \
69           }                                                             \
70   } while (0)
71 #else /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
72 # define PINVRNT(env, page, exp) \
73          ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
74 #endif /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
75
76 /* Disable page statistic by default due to huge performance penalty. */
77 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
78 #define CS_PAGE_INC(o, item) \
79         atomic_inc(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
80 #define CS_PAGE_DEC(o, item) \
81         atomic_dec(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
82 #define CS_PAGESTATE_INC(o, state) \
83         atomic_inc(&cl_object_site(o)->cs_pages_state[state])
84 #define CS_PAGESTATE_DEC(o, state) \
85         atomic_dec(&cl_object_site(o)->cs_pages_state[state])
86 #else
87 #define CS_PAGE_INC(o, item)
88 #define CS_PAGE_DEC(o, item)
89 #define CS_PAGESTATE_INC(o, state)
90 #define CS_PAGESTATE_DEC(o, state)
91 #endif
92
93 /**
94  * Internal version of cl_page_get().
95  *
96  * This function can be used to obtain initial reference to previously
97  * unreferenced cached object. It can be called only if concurrent page
98  * reclamation is somehow prevented, e.g., by keeping a lock on a VM page,
99  * associated with \a page.
100  *
101  * Use with care! Not exported.
102  */
103 static void cl_page_get_trust(struct cl_page *page)
104 {
105         LASSERT(atomic_read(&page->cp_ref) > 0);
106         atomic_inc(&page->cp_ref);
107 }
108
109 /**
110  * Returns a slice within a page, corresponding to the given layer in the
111  * device stack.
112  *
113  * \see cl_lock_at()
114  */
115 static const struct cl_page_slice *
116 cl_page_at_trusted(const struct cl_page *page,
117                    const struct lu_device_type *dtype)
118 {
119         const struct cl_page_slice *slice;
120         ENTRY;
121
122         list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
123                 if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
124                         RETURN(slice);
125         }
126         RETURN(NULL);
127 }
128
129 static void cl_page_free(const struct lu_env *env, struct cl_page *page)
130 {
131         struct cl_object *obj  = page->cp_obj;
132         int pagesize = cl_object_header(obj)->coh_page_bufsize;
133
134         PASSERT(env, page, list_empty(&page->cp_batch));
135         PASSERT(env, page, page->cp_owner == NULL);
136         PASSERT(env, page, page->cp_state == CPS_FREEING);
137
138         ENTRY;
139         while (!list_empty(&page->cp_layers)) {
140                 struct cl_page_slice *slice;
141
142                 slice = list_entry(page->cp_layers.next,
143                                    struct cl_page_slice, cpl_linkage);
144                 list_del_init(page->cp_layers.next);
145                 if (unlikely(slice->cpl_ops->cpo_fini != NULL))
146                         slice->cpl_ops->cpo_fini(env, slice);
147         }
148         CS_PAGE_DEC(obj, total);
149         CS_PAGESTATE_DEC(obj, page->cp_state);
150         lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page);
151         cl_object_put(env, obj);
152         lu_ref_fini(&page->cp_reference);
153         OBD_FREE(page, pagesize);
154         EXIT;
155 }
156
157 /**
158  * Helper function updating page state. This is the only place in the code
159  * where cl_page::cp_state field is mutated.
160  */
161 static inline void cl_page_state_set_trust(struct cl_page *page,
162                                            enum cl_page_state state)
163 {
164         /* bypass const. */
165         *(enum cl_page_state *)&page->cp_state = state;
166 }
167
168 struct cl_page *cl_page_alloc(const struct lu_env *env,
169                 struct cl_object *o, pgoff_t ind, struct page *vmpage,
170                 enum cl_page_type type)
171 {
172         struct cl_page          *page;
173         struct lu_object_header *head;
174
175         ENTRY;
176         OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize,
177                         GFP_NOFS);
178         if (page != NULL) {
179                 int result = 0;
180                 atomic_set(&page->cp_ref, 1);
181                 page->cp_obj = o;
182                 cl_object_get(o);
183                 lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page",
184                                      page);
185                 page->cp_vmpage = vmpage;
186                 cl_page_state_set_trust(page, CPS_CACHED);
187                 page->cp_type = type;
188                 INIT_LIST_HEAD(&page->cp_layers);
189                 INIT_LIST_HEAD(&page->cp_batch);
190                 lu_ref_init(&page->cp_reference);
191                 head = o->co_lu.lo_header;
192                 list_for_each_entry(o, &head->loh_layers,
193                                     co_lu.lo_linkage) {
194                         if (o->co_ops->coo_page_init != NULL) {
195                                 result = o->co_ops->coo_page_init(env, o, page,
196                                                                   ind);
197                                 if (result != 0) {
198                                         cl_page_delete0(env, page);
199                                         cl_page_free(env, page);
200                                         page = ERR_PTR(result);
201                                         break;
202                                 }
203                         }
204                 }
205                 if (result == 0) {
206                         CS_PAGE_INC(o, total);
207                         CS_PAGE_INC(o, create);
208                         CS_PAGESTATE_DEC(o, CPS_CACHED);
209                 }
210         } else {
211                 page = ERR_PTR(-ENOMEM);
212         }
213         RETURN(page);
214 }
215
216 /**
217  * Returns a cl_page with index \a idx at the object \a o, and associated with
218  * the VM page \a vmpage.
219  *
220  * This is the main entry point into the cl_page caching interface. First, a
221  * cache (implemented as a per-object radix tree) is consulted. If page is
222  * found there, it is returned immediately. Otherwise new page is allocated
223  * and returned. In any case, additional reference to page is acquired.
224  *
225  * \see cl_object_find(), cl_lock_find()
226  */
227 struct cl_page *cl_page_find(const struct lu_env *env,
228                              struct cl_object *o,
229                              pgoff_t idx, struct page *vmpage,
230                              enum cl_page_type type)
231 {
232         struct cl_page          *page = NULL;
233         struct cl_object_header *hdr;
234
235         LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
236         might_sleep();
237
238         ENTRY;
239
240         hdr = cl_object_header(o);
241         CS_PAGE_INC(o, lookup);
242
243         CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
244                idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
245         /* fast path. */
246         if (type == CPT_CACHEABLE) {
247                 /* vmpage lock is used to protect the child/parent
248                  * relationship */
249                 KLASSERT(PageLocked(vmpage));
250                 /*
251                  * cl_vmpage_page() can be called here without any locks as
252                  *
253                  *     - "vmpage" is locked (which prevents ->private from
254                  *       concurrent updates), and
255                  *
256                  *     - "o" cannot be destroyed while current thread holds a
257                  *       reference on it.
258                  */
259                 page = cl_vmpage_page(vmpage, o);
260                 if (page != NULL) {
261                         CS_PAGE_INC(o, hit);
262                         RETURN(page);
263                 }
264         }
265
266         /* allocate and initialize cl_page */
267         page = cl_page_alloc(env, o, idx, vmpage, type);
268         RETURN(page);
269 }
270 EXPORT_SYMBOL(cl_page_find);
271
272 static inline int cl_page_invariant(const struct cl_page *pg)
273 {
274         return cl_page_in_use_noref(pg);
275 }
276
277 static void cl_page_state_set0(const struct lu_env *env,
278                                struct cl_page *page, enum cl_page_state state)
279 {
280         enum cl_page_state old;
281
282         /*
283          * Matrix of allowed state transitions [old][new], for sanity
284          * checking.
285          */
286         static const int allowed_transitions[CPS_NR][CPS_NR] = {
287                 [CPS_CACHED] = {
288                         [CPS_CACHED]  = 0,
289                         [CPS_OWNED]   = 1, /* io finds existing cached page */
290                         [CPS_PAGEIN]  = 0,
291                         [CPS_PAGEOUT] = 1, /* write-out from the cache */
292                         [CPS_FREEING] = 1, /* eviction on the memory pressure */
293                 },
294                 [CPS_OWNED] = {
295                         [CPS_CACHED]  = 1, /* release to the cache */
296                         [CPS_OWNED]   = 0,
297                         [CPS_PAGEIN]  = 1, /* start read immediately */
298                         [CPS_PAGEOUT] = 1, /* start write immediately */
299                         [CPS_FREEING] = 1, /* lock invalidation or truncate */
300                 },
301                 [CPS_PAGEIN] = {
302                         [CPS_CACHED]  = 1, /* io completion */
303                         [CPS_OWNED]   = 0,
304                         [CPS_PAGEIN]  = 0,
305                         [CPS_PAGEOUT] = 0,
306                         [CPS_FREEING] = 0,
307                 },
308                 [CPS_PAGEOUT] = {
309                         [CPS_CACHED]  = 1, /* io completion */
310                         [CPS_OWNED]   = 0,
311                         [CPS_PAGEIN]  = 0,
312                         [CPS_PAGEOUT] = 0,
313                         [CPS_FREEING] = 0,
314                 },
315                 [CPS_FREEING] = {
316                         [CPS_CACHED]  = 0,
317                         [CPS_OWNED]   = 0,
318                         [CPS_PAGEIN]  = 0,
319                         [CPS_PAGEOUT] = 0,
320                         [CPS_FREEING] = 0,
321                 }
322         };
323
324         ENTRY;
325         old = page->cp_state;
326         PASSERT(env, page, allowed_transitions[old][state]);
327         CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
328         PASSERT(env, page, page->cp_state == old);
329         PASSERT(env, page, equi(state == CPS_OWNED, page->cp_owner != NULL));
330
331         CS_PAGESTATE_DEC(page->cp_obj, page->cp_state);
332         CS_PAGESTATE_INC(page->cp_obj, state);
333         cl_page_state_set_trust(page, state);
334         EXIT;
335 }
336
337 static void cl_page_state_set(const struct lu_env *env,
338                               struct cl_page *page, enum cl_page_state state)
339 {
340         cl_page_state_set0(env, page, state);
341 }
342
343 /**
344  * Acquires an additional reference to a page.
345  *
346  * This can be called only by caller already possessing a reference to \a
347  * page.
348  *
349  * \see cl_object_get(), cl_lock_get().
350  */
351 void cl_page_get(struct cl_page *page)
352 {
353         ENTRY;
354         cl_page_get_trust(page);
355         EXIT;
356 }
357 EXPORT_SYMBOL(cl_page_get);
358
359 /**
360  * Releases a reference to a page.
361  *
362  * When last reference is released, page is returned to the cache, unless it
363  * is in cl_page_state::CPS_FREEING state, in which case it is immediately
364  * destroyed.
365  *
366  * \see cl_object_put(), cl_lock_put().
367  */
368 void cl_page_put(const struct lu_env *env, struct cl_page *page)
369 {
370         ENTRY;
371         CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
372                        atomic_read(&page->cp_ref));
373
374         if (atomic_dec_and_test(&page->cp_ref)) {
375                 LASSERT(page->cp_state == CPS_FREEING);
376
377                 LASSERT(atomic_read(&page->cp_ref) == 0);
378                 PASSERT(env, page, page->cp_owner == NULL);
379                 PASSERT(env, page, list_empty(&page->cp_batch));
380                 /*
381                  * Page is no longer reachable by other threads. Tear
382                  * it down.
383                  */
384                 cl_page_free(env, page);
385         }
386
387         EXIT;
388 }
389 EXPORT_SYMBOL(cl_page_put);
390
391 /**
392  * Returns a cl_page associated with a VM page, and given cl_object.
393  */
394 struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
395 {
396         struct cl_page *page;
397
398         ENTRY;
399         KLASSERT(PageLocked(vmpage));
400
401         /*
402          * NOTE: absence of races and liveness of data are guaranteed by page
403          *       lock on a "vmpage". That works because object destruction has
404          *       bottom-to-top pass.
405          */
406
407         page = (struct cl_page *)vmpage->private;
408         if (page != NULL) {
409                 cl_page_get_trust(page);
410                 LASSERT(page->cp_type == CPT_CACHEABLE);
411         }
412         RETURN(page);
413 }
414 EXPORT_SYMBOL(cl_vmpage_page);
415
416 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
417                                        const struct lu_device_type *dtype)
418 {
419         return cl_page_at_trusted(page, dtype);
420 }
421 EXPORT_SYMBOL(cl_page_at);
422
423 static void cl_page_owner_clear(struct cl_page *page)
424 {
425         ENTRY;
426         if (page->cp_owner != NULL) {
427                 LASSERT(page->cp_owner->ci_owned_nr > 0);
428                 page->cp_owner->ci_owned_nr--;
429                 page->cp_owner = NULL;
430         }
431         EXIT;
432 }
433
434 static void cl_page_owner_set(struct cl_page *page)
435 {
436         ENTRY;
437         LASSERT(page->cp_owner != NULL);
438         page->cp_owner->ci_owned_nr++;
439         EXIT;
440 }
441
442 void cl_page_disown0(const struct lu_env *env,
443                      struct cl_io *io, struct cl_page *pg)
444 {
445         const struct cl_page_slice *slice;
446         enum cl_page_state state;
447
448         ENTRY;
449         state = pg->cp_state;
450         PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
451         PINVRNT(env, pg, cl_page_invariant(pg) || state == CPS_FREEING);
452         cl_page_owner_clear(pg);
453
454         if (state == CPS_OWNED)
455                 cl_page_state_set(env, pg, CPS_CACHED);
456         /*
457          * Completion call-backs are executed in the bottom-up order, so that
458          * uppermost layer (llite), responsible for VFS/VM interaction runs
459          * last and can release locks safely.
460          */
461         list_for_each_entry_reverse(slice, &pg->cp_layers, cpl_linkage) {
462                 if (slice->cpl_ops->cpo_disown != NULL)
463                         (*slice->cpl_ops->cpo_disown)(env, slice, io);
464         }
465
466         EXIT;
467 }
468
469 /**
470  * returns true, iff page is owned by the given io.
471  */
472 int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
473 {
474         struct cl_io *top = cl_io_top((struct cl_io *)io);
475         LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
476         ENTRY;
477         RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == top);
478 }
479 EXPORT_SYMBOL(cl_page_is_owned);
480
481 /**
482  * Try to own a page by IO.
483  *
484  * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
485  * into cl_page_state::CPS_OWNED state.
486  *
487  * \pre  !cl_page_is_owned(pg, io)
488  * \post result == 0 iff cl_page_is_owned(pg, io)
489  *
490  * \retval 0   success
491  *
492  * \retval -ve failure, e.g., page was destroyed (and landed in
493  *             cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
494  *             or, page was owned by another thread, or in IO.
495  *
496  * \see cl_page_disown()
497  * \see cl_page_operations::cpo_own()
498  * \see cl_page_own_try()
499  * \see cl_page_own
500  */
501 static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
502                         struct cl_page *pg, int nonblock)
503 {
504         int result = 0;
505         const struct cl_page_slice *slice;
506
507         PINVRNT(env, pg, !cl_page_is_owned(pg, io));
508
509         ENTRY;
510         io = cl_io_top(io);
511
512         if (pg->cp_state == CPS_FREEING) {
513                 result = -ENOENT;
514                 goto out;
515         }
516
517         list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
518                 if (slice->cpl_ops->cpo_own)
519                         result = (*slice->cpl_ops->cpo_own)(env, slice,
520                                                             io, nonblock);
521
522                 if (result != 0)
523                         break;
524
525         }
526         if (result > 0)
527                 result = 0;
528
529         if (result == 0) {
530                 PASSERT(env, pg, pg->cp_owner == NULL);
531                 pg->cp_owner = cl_io_top(io);
532                 cl_page_owner_set(pg);
533                 if (pg->cp_state != CPS_FREEING) {
534                         cl_page_state_set(env, pg, CPS_OWNED);
535                 } else {
536                         cl_page_disown0(env, io, pg);
537                         result = -ENOENT;
538                 }
539         }
540
541 out:
542         PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
543         RETURN(result);
544 }
545
546 /**
547  * Own a page, might be blocked.
548  *
549  * \see cl_page_own0()
550  */
551 int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
552 {
553         return cl_page_own0(env, io, pg, 0);
554 }
555 EXPORT_SYMBOL(cl_page_own);
556
557 /**
558  * Nonblock version of cl_page_own().
559  *
560  * \see cl_page_own0()
561  */
562 int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
563                     struct cl_page *pg)
564 {
565         return cl_page_own0(env, io, pg, 1);
566 }
567 EXPORT_SYMBOL(cl_page_own_try);
568
569
570 /**
571  * Assume page ownership.
572  *
573  * Called when page is already locked by the hosting VM.
574  *
575  * \pre !cl_page_is_owned(pg, io)
576  * \post cl_page_is_owned(pg, io)
577  *
578  * \see cl_page_operations::cpo_assume()
579  */
580 void cl_page_assume(const struct lu_env *env,
581                     struct cl_io *io, struct cl_page *pg)
582 {
583         const struct cl_page_slice *slice;
584
585         PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
586
587         ENTRY;
588         io = cl_io_top(io);
589
590         list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
591                 if (slice->cpl_ops->cpo_assume != NULL)
592                         (*slice->cpl_ops->cpo_assume)(env, slice, io);
593         }
594
595         PASSERT(env, pg, pg->cp_owner == NULL);
596         pg->cp_owner = cl_io_top(io);
597         cl_page_owner_set(pg);
598         cl_page_state_set(env, pg, CPS_OWNED);
599         EXIT;
600 }
601 EXPORT_SYMBOL(cl_page_assume);
602
603 /**
604  * Releases page ownership without unlocking the page.
605  *
606  * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the
607  * underlying VM page (as VM is supposed to do this itself).
608  *
609  * \pre   cl_page_is_owned(pg, io)
610  * \post !cl_page_is_owned(pg, io)
611  *
612  * \see cl_page_assume()
613  */
614 void cl_page_unassume(const struct lu_env *env,
615                       struct cl_io *io, struct cl_page *pg)
616 {
617         const struct cl_page_slice *slice;
618
619         PINVRNT(env, pg, cl_page_is_owned(pg, io));
620         PINVRNT(env, pg, cl_page_invariant(pg));
621
622         ENTRY;
623         io = cl_io_top(io);
624         cl_page_owner_clear(pg);
625         cl_page_state_set(env, pg, CPS_CACHED);
626
627         list_for_each_entry_reverse(slice, &pg->cp_layers, cpl_linkage) {
628                 if (slice->cpl_ops->cpo_unassume != NULL)
629                         (*slice->cpl_ops->cpo_unassume)(env, slice, io);
630         }
631
632         EXIT;
633 }
634 EXPORT_SYMBOL(cl_page_unassume);
635
636 /**
637  * Releases page ownership.
638  *
639  * Moves page into cl_page_state::CPS_CACHED.
640  *
641  * \pre   cl_page_is_owned(pg, io)
642  * \post !cl_page_is_owned(pg, io)
643  *
644  * \see cl_page_own()
645  * \see cl_page_operations::cpo_disown()
646  */
647 void cl_page_disown(const struct lu_env *env,
648                     struct cl_io *io, struct cl_page *pg)
649 {
650         PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
651                 pg->cp_state == CPS_FREEING);
652
653         ENTRY;
654         io = cl_io_top(io);
655         cl_page_disown0(env, io, pg);
656         EXIT;
657 }
658 EXPORT_SYMBOL(cl_page_disown);
659
660 /**
661  * Called when page is to be removed from the object, e.g., as a result of
662  * truncate.
663  *
664  * Calls cl_page_operations::cpo_discard() top-to-bottom.
665  *
666  * \pre cl_page_is_owned(pg, io)
667  *
668  * \see cl_page_operations::cpo_discard()
669  */
670 void cl_page_discard(const struct lu_env *env,
671                      struct cl_io *io, struct cl_page *pg)
672 {
673         const struct cl_page_slice *slice;
674
675         PINVRNT(env, pg, cl_page_is_owned(pg, io));
676         PINVRNT(env, pg, cl_page_invariant(pg));
677
678         list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
679                 if (slice->cpl_ops->cpo_discard != NULL)
680                         (*slice->cpl_ops->cpo_discard)(env, slice, io);
681         }
682 }
683 EXPORT_SYMBOL(cl_page_discard);
684
685 /**
686  * Version of cl_page_delete() that can be called for not fully constructed
687  * pages, e.g. in an error handling cl_page_find()->cl_page_delete0()
688  * path. Doesn't check page invariant.
689  */
690 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
691 {
692         const struct cl_page_slice *slice;
693
694         ENTRY;
695
696         PASSERT(env, pg, pg->cp_state != CPS_FREEING);
697
698         /*
699          * Severe all ways to obtain new pointers to @pg.
700          */
701         cl_page_owner_clear(pg);
702         cl_page_state_set0(env, pg, CPS_FREEING);
703
704         list_for_each_entry_reverse(slice, &pg->cp_layers, cpl_linkage) {
705                 if (slice->cpl_ops->cpo_delete != NULL)
706                         (*slice->cpl_ops->cpo_delete)(env, slice);
707         }
708
709         EXIT;
710 }
711
712 /**
713  * Called when a decision is made to throw page out of memory.
714  *
715  * Notifies all layers about page destruction by calling
716  * cl_page_operations::cpo_delete() method top-to-bottom.
717  *
718  * Moves page into cl_page_state::CPS_FREEING state (this is the only place
719  * where transition to this state happens).
720  *
721  * Eliminates all venues through which new references to the page can be
722  * obtained:
723  *
724  *     - removes page from the radix trees,
725  *
726  *     - breaks linkage from VM page to cl_page.
727  *
728  * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
729  * drain after some time, at which point page will be recycled.
730  *
731  * \pre  VM page is locked
732  * \post pg->cp_state == CPS_FREEING
733  *
734  * \see cl_page_operations::cpo_delete()
735  */
736 void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
737 {
738         PINVRNT(env, pg, cl_page_invariant(pg));
739         ENTRY;
740         cl_page_delete0(env, pg);
741         EXIT;
742 }
743 EXPORT_SYMBOL(cl_page_delete);
744
745 /**
746  * Marks page up-to-date.
747  *
748  * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
749  * layer responsible for VM interaction has to mark/clear page as up-to-date
750  * by the \a uptodate argument.
751  *
752  * \see cl_page_operations::cpo_export()
753  */
754 void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
755 {
756         const struct cl_page_slice *slice;
757
758         PINVRNT(env, pg, cl_page_invariant(pg));
759
760         list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
761                 if (slice->cpl_ops->cpo_export != NULL)
762                         (*slice->cpl_ops->cpo_export)(env, slice, uptodate);
763         }
764 }
765 EXPORT_SYMBOL(cl_page_export);
766
767 /**
768  * Returns true, iff \a pg is VM locked in a suitable sense by the calling
769  * thread.
770  */
771 int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
772 {
773         const struct cl_page_slice *slice;
774         int result;
775
776         ENTRY;
777         slice = container_of(pg->cp_layers.next,
778                              const struct cl_page_slice, cpl_linkage);
779         PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL);
780         /*
781          * Call ->cpo_is_vmlocked() directly instead of going through
782          * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
783          * cl_page_invariant().
784          */
785         result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
786         PASSERT(env, pg, result == -EBUSY || result == -ENODATA);
787         RETURN(result == -EBUSY);
788 }
789 EXPORT_SYMBOL(cl_page_is_vmlocked);
790
791 static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
792 {
793         ENTRY;
794         RETURN(crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN);
795 }
796
797 static void cl_page_io_start(const struct lu_env *env,
798                              struct cl_page *pg, enum cl_req_type crt)
799 {
800         /*
801          * Page is queued for IO, change its state.
802          */
803         ENTRY;
804         cl_page_owner_clear(pg);
805         cl_page_state_set(env, pg, cl_req_type_state(crt));
806         EXIT;
807 }
808
809 /**
810  * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
811  * called top-to-bottom. Every layer either agrees to submit this page (by
812  * returning 0), or requests to omit this page (by returning -EALREADY). Layer
813  * handling interactions with the VM also has to inform VM that page is under
814  * transfer now.
815  */
816 int cl_page_prep(const struct lu_env *env, struct cl_io *io,
817                  struct cl_page *pg, enum cl_req_type crt)
818 {
819         const struct cl_page_slice *slice;
820         int result = 0;
821
822         PINVRNT(env, pg, cl_page_is_owned(pg, io));
823         PINVRNT(env, pg, cl_page_invariant(pg));
824         PINVRNT(env, pg, crt < CRT_NR);
825
826         /*
827          * XXX this has to be called bottom-to-top, so that llite can set up
828          * PG_writeback without risking other layers deciding to skip this
829          * page.
830          */
831         if (crt >= CRT_NR)
832                 return -EINVAL;
833
834         list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
835                 if (slice->cpl_ops->cpo_own)
836                         result = (*slice->cpl_ops->io[crt].cpo_prep)(env,
837                                                                      slice,
838                                                                      io);
839
840                 if (result != 0)
841                         break;
842
843         }
844
845         if (result >= 0) {
846                 result = 0;
847                 cl_page_io_start(env, pg, crt);
848         }
849
850         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
851         return result;
852 }
853 EXPORT_SYMBOL(cl_page_prep);
854
855 /**
856  * Notify layers about transfer completion.
857  *
858  * Invoked by transfer sub-system (which is a part of osc) to notify layers
859  * that a transfer, of which this page is a part of has completed.
860  *
861  * Completion call-backs are executed in the bottom-up order, so that
862  * uppermost layer (llite), responsible for the VFS/VM interaction runs last
863  * and can release locks safely.
864  *
865  * \pre  pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
866  * \post pg->cp_state == CPS_CACHED
867  *
868  * \see cl_page_operations::cpo_completion()
869  */
870 void cl_page_completion(const struct lu_env *env,
871                         struct cl_page *pg, enum cl_req_type crt, int ioret)
872 {
873         const struct cl_page_slice *slice;
874         struct cl_sync_io *anchor = pg->cp_sync_io;
875
876         PASSERT(env, pg, crt < CRT_NR);
877         PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
878
879         ENTRY;
880         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
881         cl_page_state_set(env, pg, CPS_CACHED);
882         if (crt >= CRT_NR)
883                 return;
884
885         list_for_each_entry_reverse(slice, &pg->cp_layers, cpl_linkage) {
886                 if (slice->cpl_ops->io[crt].cpo_completion != NULL)
887                         (*slice->cpl_ops->io[crt].cpo_completion)(env, slice,
888                                                                   ioret);
889         }
890
891         if (anchor != NULL) {
892                 LASSERT(pg->cp_sync_io == anchor);
893                 pg->cp_sync_io = NULL;
894                 cl_sync_io_note(env, anchor, ioret);
895         }
896         EXIT;
897 }
898 EXPORT_SYMBOL(cl_page_completion);
899
900 /**
901  * Notify layers that transfer formation engine decided to yank this page from
902  * the cache and to make it a part of a transfer.
903  *
904  * \pre  pg->cp_state == CPS_CACHED
905  * \post pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
906  *
907  * \see cl_page_operations::cpo_make_ready()
908  */
909 int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
910                        enum cl_req_type crt)
911 {
912         const struct cl_page_slice *sli;
913         int result = 0;
914
915         PINVRNT(env, pg, crt < CRT_NR);
916
917         ENTRY;
918         if (crt >= CRT_NR)
919                 RETURN(-EINVAL);
920
921         list_for_each_entry(sli, &pg->cp_layers, cpl_linkage) {
922                 if (sli->cpl_ops->io[crt].cpo_make_ready != NULL)
923                         result = (*sli->cpl_ops->io[crt].cpo_make_ready)(env,
924                                                                          sli);
925                 if (result != 0)
926                         break;
927         }
928
929         if (result >= 0) {
930                 result = 0;
931                 PASSERT(env, pg, pg->cp_state == CPS_CACHED);
932                 cl_page_io_start(env, pg, crt);
933         }
934         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
935         RETURN(result);
936 }
937 EXPORT_SYMBOL(cl_page_make_ready);
938
939 /**
940  * Called if a pge is being written back by kernel's intention.
941  *
942  * \pre  cl_page_is_owned(pg, io)
943  * \post ergo(result == 0, pg->cp_state == CPS_PAGEOUT)
944  *
945  * \see cl_page_operations::cpo_flush()
946  */
947 int cl_page_flush(const struct lu_env *env, struct cl_io *io,
948                   struct cl_page *pg)
949 {
950         const struct cl_page_slice *slice;
951         int result = 0;
952
953         PINVRNT(env, pg, cl_page_is_owned(pg, io));
954         PINVRNT(env, pg, cl_page_invariant(pg));
955
956         ENTRY;
957
958         list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
959                 if (slice->cpl_ops->cpo_flush != NULL)
960                         result = (*slice->cpl_ops->cpo_flush)(env, slice, io);
961                 if (result != 0)
962                         break;
963         }
964         if (result > 0)
965                 result = 0;
966
967         CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result);
968         RETURN(result);
969 }
970 EXPORT_SYMBOL(cl_page_flush);
971
972 /**
973  * Tells transfer engine that only part of a page is to be transmitted.
974  *
975  * \see cl_page_operations::cpo_clip()
976  */
977 void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
978                   int from, int to)
979 {
980         const struct cl_page_slice *slice;
981
982         PINVRNT(env, pg, cl_page_invariant(pg));
983
984         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to);
985         list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
986                 if (slice->cpl_ops->cpo_clip != NULL)
987                         (*slice->cpl_ops->cpo_clip)(env, slice, from, to);
988         }
989 }
990 EXPORT_SYMBOL(cl_page_clip);
991
992 /**
993  * Prints human readable representation of \a pg to the \a f.
994  */
995 void cl_page_header_print(const struct lu_env *env, void *cookie,
996                           lu_printer_t printer, const struct cl_page *pg)
997 {
998         (*printer)(env, cookie,
999                    "page@%p[%d %p %d %d %p]\n",
1000                    pg, atomic_read(&pg->cp_ref), pg->cp_obj,
1001                    pg->cp_state, pg->cp_type,
1002                    pg->cp_owner);
1003 }
1004 EXPORT_SYMBOL(cl_page_header_print);
1005
1006 /**
1007  * Prints human readable representation of \a pg to the \a f.
1008  */
1009 void cl_page_print(const struct lu_env *env, void *cookie,
1010                    lu_printer_t printer, const struct cl_page *pg)
1011 {
1012         const struct cl_page_slice *slice;
1013         int result = 0;
1014
1015         cl_page_header_print(env, cookie, printer, pg);
1016         list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
1017                 if (slice->cpl_ops->cpo_print != NULL)
1018                         result = (*slice->cpl_ops->cpo_print)(env, slice,
1019                                                              cookie, printer);
1020                 if (result != 0)
1021                         break;
1022         }
1023         (*printer)(env, cookie, "end page@%p\n", pg);
1024 }
1025 EXPORT_SYMBOL(cl_page_print);
1026
1027 /**
1028  * Cancel a page which is still in a transfer.
1029  */
1030 int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
1031 {
1032         const struct cl_page_slice *slice;
1033         int                         result = 0;
1034
1035         list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
1036                 if (slice->cpl_ops->cpo_cancel != NULL)
1037                         result = (*slice->cpl_ops->cpo_cancel)(env, slice);
1038                 if (result != 0)
1039                         break;
1040         }
1041         if (result > 0)
1042                 result = 0;
1043
1044         return result;
1045 }
1046
1047 /**
1048  * Converts a byte offset within object \a obj into a page index.
1049  */
1050 loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
1051 {
1052         return (loff_t)idx << PAGE_SHIFT;
1053 }
1054 EXPORT_SYMBOL(cl_offset);
1055
1056 /**
1057  * Converts a page index into a byte offset within object \a obj.
1058  */
1059 pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
1060 {
1061         return offset >> PAGE_SHIFT;
1062 }
1063 EXPORT_SYMBOL(cl_index);
1064
1065 size_t cl_page_size(const struct cl_object *obj)
1066 {
1067         return 1UL << PAGE_SHIFT;
1068 }
1069 EXPORT_SYMBOL(cl_page_size);
1070
1071 /**
1072  * Adds page slice to the compound page.
1073  *
1074  * This is called by cl_object_operations::coo_page_init() methods to add a
1075  * per-layer state to the page. New state is added at the end of
1076  * cl_page::cp_layers list, that is, it is at the bottom of the stack.
1077  *
1078  * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
1079  */
1080 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
1081                        struct cl_object *obj, pgoff_t index,
1082                        const struct cl_page_operations *ops)
1083 {
1084         ENTRY;
1085         list_add_tail(&slice->cpl_linkage, &page->cp_layers);
1086         slice->cpl_obj  = obj;
1087         slice->cpl_index = index;
1088         slice->cpl_ops  = ops;
1089         slice->cpl_page = page;
1090         EXIT;
1091 }
1092 EXPORT_SYMBOL(cl_page_slice_add);
1093
1094 /**
1095  * Allocate and initialize cl_cache, called by ll_init_sbi().
1096  */
1097 struct cl_client_cache *cl_cache_init(unsigned long lru_page_max)
1098 {
1099         struct cl_client_cache  *cache = NULL;
1100
1101         ENTRY;
1102         OBD_ALLOC(cache, sizeof(*cache));
1103         if (cache == NULL)
1104                 RETURN(NULL);
1105
1106         /* Initialize cache data */
1107         atomic_set(&cache->ccc_users, 1);
1108         cache->ccc_lru_max = lru_page_max;
1109         atomic_long_set(&cache->ccc_lru_left, lru_page_max);
1110         spin_lock_init(&cache->ccc_lru_lock);
1111         INIT_LIST_HEAD(&cache->ccc_lru);
1112
1113         /* turn unstable check off by default as it impacts performance */
1114         cache->ccc_unstable_check = 0;
1115         atomic_long_set(&cache->ccc_unstable_nr, 0);
1116         init_waitqueue_head(&cache->ccc_unstable_waitq);
1117
1118         RETURN(cache);
1119 }
1120 EXPORT_SYMBOL(cl_cache_init);
1121
1122 /**
1123  * Increase cl_cache refcount
1124  */
1125 void cl_cache_incref(struct cl_client_cache *cache)
1126 {
1127         atomic_inc(&cache->ccc_users);
1128 }
1129 EXPORT_SYMBOL(cl_cache_incref);
1130
1131 /**
1132  * Decrease cl_cache refcount and free the cache if refcount=0.
1133  * Since llite, lov and osc all hold cl_cache refcount,
1134  * the free will not cause race. (LU-6173)
1135  */
1136 void cl_cache_decref(struct cl_client_cache *cache)
1137 {
1138         if (atomic_dec_and_test(&cache->ccc_users))
1139                 OBD_FREE(cache, sizeof(*cache));
1140 }
1141 EXPORT_SYMBOL(cl_cache_decref);