Whamcloud - gitweb
LU-3259 clio: Revise read ahead implementation
[fs/lustre-release.git] / lustre / obdclass / cl_page.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2014, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Client Lustre Page.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  *   Author: Jinshan Xiong <jinshan.xiong@intel.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_CLASS
43
44 #include <libcfs/libcfs.h>
45 #include <obd_class.h>
46 #include <obd_support.h>
47 #include <libcfs/list.h>
48
49 #include <cl_object.h>
50 #include "cl_internal.h"
51
52 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
53
54 #ifdef LIBCFS_DEBUG
55 # define PASSERT(env, page, expr)                                       \
56   do {                                                                    \
57           if (unlikely(!(expr))) {                                      \
58                   CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");    \
59                   LASSERT(0);                                           \
60           }                                                             \
61   } while (0)
62 #else /* !LIBCFS_DEBUG */
63 # define PASSERT(env, page, exp) \
64         ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
65 #endif /* !LIBCFS_DEBUG */
66
67 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
68 # define PINVRNT(env, page, expr)                                       \
69   do {                                                                    \
70           if (unlikely(!(expr))) {                                      \
71                   CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");    \
72                   LINVRNT(0);                                           \
73           }                                                             \
74   } while (0)
75 #else /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
76 # define PINVRNT(env, page, exp) \
77          ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
78 #endif /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
79
80 /* Disable page statistic by default due to huge performance penalty. */
81 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
82 #define CS_PAGE_INC(o, item) \
83         atomic_inc(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
84 #define CS_PAGE_DEC(o, item) \
85         atomic_dec(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
86 #define CS_PAGESTATE_INC(o, state) \
87         atomic_inc(&cl_object_site(o)->cs_pages_state[state])
88 #define CS_PAGESTATE_DEC(o, state) \
89         atomic_dec(&cl_object_site(o)->cs_pages_state[state])
90 #else
91 #define CS_PAGE_INC(o, item)
92 #define CS_PAGE_DEC(o, item)
93 #define CS_PAGESTATE_INC(o, state)
94 #define CS_PAGESTATE_DEC(o, state)
95 #endif
96
97 /**
98  * Internal version of cl_page_get().
99  *
100  * This function can be used to obtain initial reference to previously
101  * unreferenced cached object. It can be called only if concurrent page
102  * reclamation is somehow prevented, e.g., by keeping a lock on a VM page,
103  * associated with \a page.
104  *
105  * Use with care! Not exported.
106  */
107 static void cl_page_get_trust(struct cl_page *page)
108 {
109         LASSERT(atomic_read(&page->cp_ref) > 0);
110         atomic_inc(&page->cp_ref);
111 }
112
113 /**
114  * Returns a slice within a page, corresponding to the given layer in the
115  * device stack.
116  *
117  * \see cl_lock_at()
118  */
119 static const struct cl_page_slice *
120 cl_page_at_trusted(const struct cl_page *page,
121                    const struct lu_device_type *dtype)
122 {
123         const struct cl_page_slice *slice;
124         ENTRY;
125
126         list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
127                 if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
128                         RETURN(slice);
129         }
130         RETURN(NULL);
131 }
132
133 static void cl_page_free(const struct lu_env *env, struct cl_page *page)
134 {
135         struct cl_object *obj  = page->cp_obj;
136         int pagesize = cl_object_header(obj)->coh_page_bufsize;
137
138         PASSERT(env, page, list_empty(&page->cp_batch));
139         PASSERT(env, page, page->cp_owner == NULL);
140         PASSERT(env, page, page->cp_req == NULL);
141         PASSERT(env, page, page->cp_state == CPS_FREEING);
142
143         ENTRY;
144         while (!list_empty(&page->cp_layers)) {
145                 struct cl_page_slice *slice;
146
147                 slice = list_entry(page->cp_layers.next,
148                                    struct cl_page_slice, cpl_linkage);
149                 list_del_init(page->cp_layers.next);
150                 if (unlikely(slice->cpl_ops->cpo_fini != NULL))
151                         slice->cpl_ops->cpo_fini(env, slice);
152         }
153         CS_PAGE_DEC(obj, total);
154         CS_PAGESTATE_DEC(obj, page->cp_state);
155         lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page);
156         cl_object_put(env, obj);
157         lu_ref_fini(&page->cp_reference);
158         OBD_FREE(page, pagesize);
159         EXIT;
160 }
161
162 /**
163  * Helper function updating page state. This is the only place in the code
164  * where cl_page::cp_state field is mutated.
165  */
166 static inline void cl_page_state_set_trust(struct cl_page *page,
167                                            enum cl_page_state state)
168 {
169         /* bypass const. */
170         *(enum cl_page_state *)&page->cp_state = state;
171 }
172
173 struct cl_page *cl_page_alloc(const struct lu_env *env,
174                 struct cl_object *o, pgoff_t ind, struct page *vmpage,
175                 enum cl_page_type type)
176 {
177         struct cl_page          *page;
178         struct lu_object_header *head;
179
180         ENTRY;
181         OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize,
182                         GFP_NOFS);
183         if (page != NULL) {
184                 int result = 0;
185                 atomic_set(&page->cp_ref, 1);
186                 page->cp_obj = o;
187                 cl_object_get(o);
188                 lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page",
189                                      page);
190                 page->cp_vmpage = vmpage;
191                 cl_page_state_set_trust(page, CPS_CACHED);
192                 page->cp_type = type;
193                 INIT_LIST_HEAD(&page->cp_layers);
194                 INIT_LIST_HEAD(&page->cp_batch);
195                 INIT_LIST_HEAD(&page->cp_flight);
196                 lu_ref_init(&page->cp_reference);
197                 head = o->co_lu.lo_header;
198                 list_for_each_entry(o, &head->loh_layers,
199                                     co_lu.lo_linkage) {
200                         if (o->co_ops->coo_page_init != NULL) {
201                                 result = o->co_ops->coo_page_init(env, o, page,
202                                                                   ind);
203                                 if (result != 0) {
204                                         cl_page_delete0(env, page);
205                                         cl_page_free(env, page);
206                                         page = ERR_PTR(result);
207                                         break;
208                                 }
209                         }
210                 }
211                 if (result == 0) {
212                         CS_PAGE_INC(o, total);
213                         CS_PAGE_INC(o, create);
214                         CS_PAGESTATE_DEC(o, CPS_CACHED);
215                 }
216         } else {
217                 page = ERR_PTR(-ENOMEM);
218         }
219         RETURN(page);
220 }
221
222 /**
223  * Returns a cl_page with index \a idx at the object \a o, and associated with
224  * the VM page \a vmpage.
225  *
226  * This is the main entry point into the cl_page caching interface. First, a
227  * cache (implemented as a per-object radix tree) is consulted. If page is
228  * found there, it is returned immediately. Otherwise new page is allocated
229  * and returned. In any case, additional reference to page is acquired.
230  *
231  * \see cl_object_find(), cl_lock_find()
232  */
233 struct cl_page *cl_page_find(const struct lu_env *env,
234                              struct cl_object *o,
235                              pgoff_t idx, struct page *vmpage,
236                              enum cl_page_type type)
237 {
238         struct cl_page          *page = NULL;
239         struct cl_object_header *hdr;
240
241         LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
242         might_sleep();
243
244         ENTRY;
245
246         hdr = cl_object_header(o);
247         CS_PAGE_INC(o, lookup);
248
249         CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
250                idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
251         /* fast path. */
252         if (type == CPT_CACHEABLE) {
253                 /* vmpage lock is used to protect the child/parent
254                  * relationship */
255                 KLASSERT(PageLocked(vmpage));
256                 /*
257                  * cl_vmpage_page() can be called here without any locks as
258                  *
259                  *     - "vmpage" is locked (which prevents ->private from
260                  *       concurrent updates), and
261                  *
262                  *     - "o" cannot be destroyed while current thread holds a
263                  *       reference on it.
264                  */
265                 page = cl_vmpage_page(vmpage, o);
266                 if (page != NULL) {
267                         CS_PAGE_INC(o, hit);
268                         RETURN(page);
269                 }
270         }
271
272         /* allocate and initialize cl_page */
273         page = cl_page_alloc(env, o, idx, vmpage, type);
274         RETURN(page);
275 }
276 EXPORT_SYMBOL(cl_page_find);
277
278 static inline int cl_page_invariant(const struct cl_page *pg)
279 {
280         return cl_page_in_use_noref(pg);
281 }
282
283 static void cl_page_state_set0(const struct lu_env *env,
284                                struct cl_page *page, enum cl_page_state state)
285 {
286         enum cl_page_state old;
287
288         /*
289          * Matrix of allowed state transitions [old][new], for sanity
290          * checking.
291          */
292         static const int allowed_transitions[CPS_NR][CPS_NR] = {
293                 [CPS_CACHED] = {
294                         [CPS_CACHED]  = 0,
295                         [CPS_OWNED]   = 1, /* io finds existing cached page */
296                         [CPS_PAGEIN]  = 0,
297                         [CPS_PAGEOUT] = 1, /* write-out from the cache */
298                         [CPS_FREEING] = 1, /* eviction on the memory pressure */
299                 },
300                 [CPS_OWNED] = {
301                         [CPS_CACHED]  = 1, /* release to the cache */
302                         [CPS_OWNED]   = 0,
303                         [CPS_PAGEIN]  = 1, /* start read immediately */
304                         [CPS_PAGEOUT] = 1, /* start write immediately */
305                         [CPS_FREEING] = 1, /* lock invalidation or truncate */
306                 },
307                 [CPS_PAGEIN] = {
308                         [CPS_CACHED]  = 1, /* io completion */
309                         [CPS_OWNED]   = 0,
310                         [CPS_PAGEIN]  = 0,
311                         [CPS_PAGEOUT] = 0,
312                         [CPS_FREEING] = 0,
313                 },
314                 [CPS_PAGEOUT] = {
315                         [CPS_CACHED]  = 1, /* io completion */
316                         [CPS_OWNED]   = 0,
317                         [CPS_PAGEIN]  = 0,
318                         [CPS_PAGEOUT] = 0,
319                         [CPS_FREEING] = 0,
320                 },
321                 [CPS_FREEING] = {
322                         [CPS_CACHED]  = 0,
323                         [CPS_OWNED]   = 0,
324                         [CPS_PAGEIN]  = 0,
325                         [CPS_PAGEOUT] = 0,
326                         [CPS_FREEING] = 0,
327                 }
328         };
329
330         ENTRY;
331         old = page->cp_state;
332         PASSERT(env, page, allowed_transitions[old][state]);
333         CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
334         PASSERT(env, page, page->cp_state == old);
335         PASSERT(env, page, equi(state == CPS_OWNED, page->cp_owner != NULL));
336
337         CS_PAGESTATE_DEC(page->cp_obj, page->cp_state);
338         CS_PAGESTATE_INC(page->cp_obj, state);
339         cl_page_state_set_trust(page, state);
340         EXIT;
341 }
342
343 static void cl_page_state_set(const struct lu_env *env,
344                               struct cl_page *page, enum cl_page_state state)
345 {
346         cl_page_state_set0(env, page, state);
347 }
348
349 /**
350  * Acquires an additional reference to a page.
351  *
352  * This can be called only by caller already possessing a reference to \a
353  * page.
354  *
355  * \see cl_object_get(), cl_lock_get().
356  */
357 void cl_page_get(struct cl_page *page)
358 {
359         ENTRY;
360         cl_page_get_trust(page);
361         EXIT;
362 }
363 EXPORT_SYMBOL(cl_page_get);
364
365 /**
366  * Releases a reference to a page.
367  *
368  * When last reference is released, page is returned to the cache, unless it
369  * is in cl_page_state::CPS_FREEING state, in which case it is immediately
370  * destroyed.
371  *
372  * \see cl_object_put(), cl_lock_put().
373  */
374 void cl_page_put(const struct lu_env *env, struct cl_page *page)
375 {
376         ENTRY;
377         CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
378                        atomic_read(&page->cp_ref));
379
380         if (atomic_dec_and_test(&page->cp_ref)) {
381                 LASSERT(page->cp_state == CPS_FREEING);
382
383                 LASSERT(atomic_read(&page->cp_ref) == 0);
384                 PASSERT(env, page, page->cp_owner == NULL);
385                 PASSERT(env, page, list_empty(&page->cp_batch));
386                 /*
387                  * Page is no longer reachable by other threads. Tear
388                  * it down.
389                  */
390                 cl_page_free(env, page);
391         }
392
393         EXIT;
394 }
395 EXPORT_SYMBOL(cl_page_put);
396
397 /**
398  * Returns a cl_page associated with a VM page, and given cl_object.
399  */
400 struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
401 {
402         struct cl_page *page;
403
404         ENTRY;
405         KLASSERT(PageLocked(vmpage));
406
407         /*
408          * NOTE: absence of races and liveness of data are guaranteed by page
409          *       lock on a "vmpage". That works because object destruction has
410          *       bottom-to-top pass.
411          */
412
413         page = (struct cl_page *)vmpage->private;
414         if (page != NULL) {
415                 cl_page_get_trust(page);
416                 LASSERT(page->cp_type == CPT_CACHEABLE);
417         }
418         RETURN(page);
419 }
420 EXPORT_SYMBOL(cl_vmpage_page);
421
422 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
423                                        const struct lu_device_type *dtype)
424 {
425         return cl_page_at_trusted(page, dtype);
426 }
427 EXPORT_SYMBOL(cl_page_at);
428
429 #define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
430
431 #define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...)                   \
432 ({                                                                      \
433         const struct lu_env        *__env  = (_env);                    \
434         struct cl_page             *__page = (_page);                   \
435         const struct cl_page_slice *__scan;                             \
436         int                         __result;                           \
437         ptrdiff_t                   __op   = (_op);                     \
438         int                        (*__method)_proto;                   \
439                                                                         \
440         __result = 0;                                                   \
441         list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) {  \
442                 __method = *(void **)((char *)__scan->cpl_ops +  __op); \
443                 if (__method != NULL) {                                 \
444                         __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
445                         if (__result != 0)                              \
446                                 break;                                  \
447                 }                                                       \
448         }                                                               \
449         if (__result > 0)                                               \
450                 __result = 0;                                           \
451         __result;                                                       \
452 })
453
454 #define CL_PAGE_INVOID(_env, _page, _op, _proto, ...)                   \
455 do {                                                                    \
456         const struct lu_env        *__env  = (_env);                    \
457         struct cl_page             *__page = (_page);                   \
458         const struct cl_page_slice *__scan;                             \
459         ptrdiff_t                   __op   = (_op);                     \
460         void                      (*__method)_proto;                    \
461                                                                         \
462         list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) {  \
463                 __method = *(void **)((char *)__scan->cpl_ops +  __op); \
464                 if (__method != NULL)                                   \
465                         (*__method)(__env, __scan, ## __VA_ARGS__);     \
466         }                                                               \
467 } while (0)
468
469 #define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...)           \
470 do {                                                                    \
471         const struct lu_env        *__env  = (_env);                    \
472         struct cl_page             *__page = (_page);                   \
473         const struct cl_page_slice *__scan;                             \
474         ptrdiff_t                   __op   = (_op);                     \
475         void                      (*__method)_proto;                    \
476                                                                         \
477         /* get to the bottom page. */                                   \
478         list_for_each_entry_reverse(__scan, &__page->cp_layers,         \
479                                     cpl_linkage) {                      \
480                 __method = *(void **)((char *)__scan->cpl_ops + __op);  \
481                 if (__method != NULL)                                   \
482                         (*__method)(__env, __scan, ## __VA_ARGS__);     \
483         }                                                               \
484 } while (0)
485
486 static int cl_page_invoke(const struct lu_env *env,
487                           struct cl_io *io, struct cl_page *page, ptrdiff_t op)
488
489 {
490         PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
491         ENTRY;
492         RETURN(CL_PAGE_INVOKE(env, page, op,
493                               (const struct lu_env *,
494                                const struct cl_page_slice *, struct cl_io *),
495                               io));
496 }
497
498 static void cl_page_invoid(const struct lu_env *env,
499                            struct cl_io *io, struct cl_page *page, ptrdiff_t op)
500
501 {
502         PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
503         ENTRY;
504         CL_PAGE_INVOID(env, page, op,
505                        (const struct lu_env *,
506                         const struct cl_page_slice *, struct cl_io *), io);
507         EXIT;
508 }
509
510 static void cl_page_owner_clear(struct cl_page *page)
511 {
512         ENTRY;
513         if (page->cp_owner != NULL) {
514                 LASSERT(page->cp_owner->ci_owned_nr > 0);
515                 page->cp_owner->ci_owned_nr--;
516                 page->cp_owner = NULL;
517         }
518         EXIT;
519 }
520
521 static void cl_page_owner_set(struct cl_page *page)
522 {
523         ENTRY;
524         LASSERT(page->cp_owner != NULL);
525         page->cp_owner->ci_owned_nr++;
526         EXIT;
527 }
528
529 void cl_page_disown0(const struct lu_env *env,
530                      struct cl_io *io, struct cl_page *pg)
531 {
532         enum cl_page_state state;
533
534         ENTRY;
535         state = pg->cp_state;
536         PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
537         PINVRNT(env, pg, cl_page_invariant(pg) || state == CPS_FREEING);
538         cl_page_owner_clear(pg);
539
540         if (state == CPS_OWNED)
541                 cl_page_state_set(env, pg, CPS_CACHED);
542         /*
543          * Completion call-backs are executed in the bottom-up order, so that
544          * uppermost layer (llite), responsible for VFS/VM interaction runs
545          * last and can release locks safely.
546          */
547         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_disown),
548                                (const struct lu_env *,
549                                 const struct cl_page_slice *, struct cl_io *),
550                                io);
551         EXIT;
552 }
553
554 /**
555  * returns true, iff page is owned by the given io.
556  */
557 int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
558 {
559         struct cl_io *top = cl_io_top((struct cl_io *)io);
560         LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
561         ENTRY;
562         RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == top);
563 }
564 EXPORT_SYMBOL(cl_page_is_owned);
565
566 /**
567  * Try to own a page by IO.
568  *
569  * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
570  * into cl_page_state::CPS_OWNED state.
571  *
572  * \pre  !cl_page_is_owned(pg, io)
573  * \post result == 0 iff cl_page_is_owned(pg, io)
574  *
575  * \retval 0   success
576  *
577  * \retval -ve failure, e.g., page was destroyed (and landed in
578  *             cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
579  *             or, page was owned by another thread, or in IO.
580  *
581  * \see cl_page_disown()
582  * \see cl_page_operations::cpo_own()
583  * \see cl_page_own_try()
584  * \see cl_page_own
585  */
586 static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
587                         struct cl_page *pg, int nonblock)
588 {
589         int result;
590
591         PINVRNT(env, pg, !cl_page_is_owned(pg, io));
592
593         ENTRY;
594         io = cl_io_top(io);
595
596         if (pg->cp_state == CPS_FREEING) {
597                 result = -ENOENT;
598         } else {
599                 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own),
600                                         (const struct lu_env *,
601                                          const struct cl_page_slice *,
602                                          struct cl_io *, int),
603                                         io, nonblock);
604                 if (result == 0) {
605                         PASSERT(env, pg, pg->cp_owner == NULL);
606                         PASSERT(env, pg, pg->cp_req == NULL);
607                         pg->cp_owner = cl_io_top(io);;
608                         cl_page_owner_set(pg);
609                         if (pg->cp_state != CPS_FREEING) {
610                                 cl_page_state_set(env, pg, CPS_OWNED);
611                         } else {
612                                 cl_page_disown0(env, io, pg);
613                                 result = -ENOENT;
614                         }
615                 }
616         }
617         PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
618         RETURN(result);
619 }
620
621 /**
622  * Own a page, might be blocked.
623  *
624  * \see cl_page_own0()
625  */
626 int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
627 {
628         return cl_page_own0(env, io, pg, 0);
629 }
630 EXPORT_SYMBOL(cl_page_own);
631
632 /**
633  * Nonblock version of cl_page_own().
634  *
635  * \see cl_page_own0()
636  */
637 int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
638                     struct cl_page *pg)
639 {
640         return cl_page_own0(env, io, pg, 1);
641 }
642 EXPORT_SYMBOL(cl_page_own_try);
643
644
645 /**
646  * Assume page ownership.
647  *
648  * Called when page is already locked by the hosting VM.
649  *
650  * \pre !cl_page_is_owned(pg, io)
651  * \post cl_page_is_owned(pg, io)
652  *
653  * \see cl_page_operations::cpo_assume()
654  */
655 void cl_page_assume(const struct lu_env *env,
656                     struct cl_io *io, struct cl_page *pg)
657 {
658         PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
659
660         ENTRY;
661         io = cl_io_top(io);
662
663         cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
664         PASSERT(env, pg, pg->cp_owner == NULL);
665         pg->cp_owner = cl_io_top(io);
666         cl_page_owner_set(pg);
667         cl_page_state_set(env, pg, CPS_OWNED);
668         EXIT;
669 }
670 EXPORT_SYMBOL(cl_page_assume);
671
672 /**
673  * Releases page ownership without unlocking the page.
674  *
675  * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the
676  * underlying VM page (as VM is supposed to do this itself).
677  *
678  * \pre   cl_page_is_owned(pg, io)
679  * \post !cl_page_is_owned(pg, io)
680  *
681  * \see cl_page_assume()
682  */
683 void cl_page_unassume(const struct lu_env *env,
684                       struct cl_io *io, struct cl_page *pg)
685 {
686         PINVRNT(env, pg, cl_page_is_owned(pg, io));
687         PINVRNT(env, pg, cl_page_invariant(pg));
688
689         ENTRY;
690         io = cl_io_top(io);
691         cl_page_owner_clear(pg);
692         cl_page_state_set(env, pg, CPS_CACHED);
693         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_unassume),
694                                (const struct lu_env *,
695                                 const struct cl_page_slice *, struct cl_io *),
696                                io);
697         EXIT;
698 }
699 EXPORT_SYMBOL(cl_page_unassume);
700
701 /**
702  * Releases page ownership.
703  *
704  * Moves page into cl_page_state::CPS_CACHED.
705  *
706  * \pre   cl_page_is_owned(pg, io)
707  * \post !cl_page_is_owned(pg, io)
708  *
709  * \see cl_page_own()
710  * \see cl_page_operations::cpo_disown()
711  */
712 void cl_page_disown(const struct lu_env *env,
713                     struct cl_io *io, struct cl_page *pg)
714 {
715         PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
716                 pg->cp_state == CPS_FREEING);
717
718         ENTRY;
719         io = cl_io_top(io);
720         cl_page_disown0(env, io, pg);
721         EXIT;
722 }
723 EXPORT_SYMBOL(cl_page_disown);
724
725 /**
726  * Called when page is to be removed from the object, e.g., as a result of
727  * truncate.
728  *
729  * Calls cl_page_operations::cpo_discard() top-to-bottom.
730  *
731  * \pre cl_page_is_owned(pg, io)
732  *
733  * \see cl_page_operations::cpo_discard()
734  */
735 void cl_page_discard(const struct lu_env *env,
736                      struct cl_io *io, struct cl_page *pg)
737 {
738         PINVRNT(env, pg, cl_page_is_owned(pg, io));
739         PINVRNT(env, pg, cl_page_invariant(pg));
740
741         cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
742 }
743 EXPORT_SYMBOL(cl_page_discard);
744
745 /**
746  * Version of cl_page_delete() that can be called for not fully constructed
747  * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
748  * path. Doesn't check page invariant.
749  */
750 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
751 {
752         ENTRY;
753
754         PASSERT(env, pg, pg->cp_state != CPS_FREEING);
755
756         /*
757          * Severe all ways to obtain new pointers to @pg.
758          */
759         cl_page_owner_clear(pg);
760
761         cl_page_state_set0(env, pg, CPS_FREEING);
762
763         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_delete),
764                        (const struct lu_env *, const struct cl_page_slice *));
765
766         EXIT;
767 }
768
769 /**
770  * Called when a decision is made to throw page out of memory.
771  *
772  * Notifies all layers about page destruction by calling
773  * cl_page_operations::cpo_delete() method top-to-bottom.
774  *
775  * Moves page into cl_page_state::CPS_FREEING state (this is the only place
776  * where transition to this state happens).
777  *
778  * Eliminates all venues through which new references to the page can be
779  * obtained:
780  *
781  *     - removes page from the radix trees,
782  *
783  *     - breaks linkage from VM page to cl_page.
784  *
785  * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
786  * drain after some time, at which point page will be recycled.
787  *
788  * \pre  VM page is locked
789  * \post pg->cp_state == CPS_FREEING
790  *
791  * \see cl_page_operations::cpo_delete()
792  */
793 void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
794 {
795         PINVRNT(env, pg, cl_page_invariant(pg));
796         ENTRY;
797         cl_page_delete0(env, pg);
798         EXIT;
799 }
800 EXPORT_SYMBOL(cl_page_delete);
801
802 /**
803  * Marks page up-to-date.
804  *
805  * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
806  * layer responsible for VM interaction has to mark/clear page as up-to-date
807  * by the \a uptodate argument.
808  *
809  * \see cl_page_operations::cpo_export()
810  */
811 void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
812 {
813         PINVRNT(env, pg, cl_page_invariant(pg));
814         CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_export),
815                        (const struct lu_env *,
816                         const struct cl_page_slice *, int), uptodate);
817 }
818 EXPORT_SYMBOL(cl_page_export);
819
820 /**
821  * Returns true, iff \a pg is VM locked in a suitable sense by the calling
822  * thread.
823  */
824 int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
825 {
826         int result;
827         const struct cl_page_slice *slice;
828
829         ENTRY;
830         slice = container_of(pg->cp_layers.next,
831                              const struct cl_page_slice, cpl_linkage);
832         PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL);
833         /*
834          * Call ->cpo_is_vmlocked() directly instead of going through
835          * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
836          * cl_page_invariant().
837          */
838         result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
839         PASSERT(env, pg, result == -EBUSY || result == -ENODATA);
840         RETURN(result == -EBUSY);
841 }
842 EXPORT_SYMBOL(cl_page_is_vmlocked);
843
844 static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
845 {
846         ENTRY;
847         RETURN(crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN);
848 }
849
850 static void cl_page_io_start(const struct lu_env *env,
851                              struct cl_page *pg, enum cl_req_type crt)
852 {
853         /*
854          * Page is queued for IO, change its state.
855          */
856         ENTRY;
857         cl_page_owner_clear(pg);
858         cl_page_state_set(env, pg, cl_req_type_state(crt));
859         EXIT;
860 }
861
862 /**
863  * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
864  * called top-to-bottom. Every layer either agrees to submit this page (by
865  * returning 0), or requests to omit this page (by returning -EALREADY). Layer
866  * handling interactions with the VM also has to inform VM that page is under
867  * transfer now.
868  */
869 int cl_page_prep(const struct lu_env *env, struct cl_io *io,
870                  struct cl_page *pg, enum cl_req_type crt)
871 {
872         int result;
873
874         PINVRNT(env, pg, cl_page_is_owned(pg, io));
875         PINVRNT(env, pg, cl_page_invariant(pg));
876         PINVRNT(env, pg, crt < CRT_NR);
877
878         /*
879          * XXX this has to be called bottom-to-top, so that llite can set up
880          * PG_writeback without risking other layers deciding to skip this
881          * page.
882          */
883         if (crt >= CRT_NR)
884                 return -EINVAL;
885         result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
886         if (result == 0)
887                 cl_page_io_start(env, pg, crt);
888
889         KLASSERT(ergo(crt == CRT_WRITE && pg->cp_type == CPT_CACHEABLE,
890                       equi(result == 0,
891                            PageWriteback(cl_page_vmpage(pg)))));
892         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
893         return result;
894 }
895 EXPORT_SYMBOL(cl_page_prep);
896
897 /**
898  * Notify layers about transfer completion.
899  *
900  * Invoked by transfer sub-system (which is a part of osc) to notify layers
901  * that a transfer, of which this page is a part of has completed.
902  *
903  * Completion call-backs are executed in the bottom-up order, so that
904  * uppermost layer (llite), responsible for the VFS/VM interaction runs last
905  * and can release locks safely.
906  *
907  * \pre  pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
908  * \post pg->cp_state == CPS_CACHED
909  *
910  * \see cl_page_operations::cpo_completion()
911  */
912 void cl_page_completion(const struct lu_env *env,
913                         struct cl_page *pg, enum cl_req_type crt, int ioret)
914 {
915         struct cl_sync_io *anchor = pg->cp_sync_io;
916
917         PASSERT(env, pg, crt < CRT_NR);
918         /* cl_page::cp_req already cleared by the caller (osc_completion()) */
919         PASSERT(env, pg, pg->cp_req == NULL);
920         PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
921
922         ENTRY;
923         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
924         cl_page_state_set(env, pg, CPS_CACHED);
925         if (crt >= CRT_NR)
926                 return;
927         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
928                                (const struct lu_env *,
929                                 const struct cl_page_slice *, int), ioret);
930         if (anchor) {
931                 LASSERT(pg->cp_sync_io == anchor);
932                 pg->cp_sync_io = NULL;
933         }
934         /*
935          * As page->cp_obj is pinned by a reference from page->cp_req, it is
936          * safe to call cl_page_put() without risking object destruction in a
937          * non-blocking context.
938          */
939         cl_page_put(env, pg);
940
941         if (anchor != NULL)
942                 cl_sync_io_note(env, anchor, ioret);
943
944         EXIT;
945 }
946 EXPORT_SYMBOL(cl_page_completion);
947
948 /**
949  * Notify layers that transfer formation engine decided to yank this page from
950  * the cache and to make it a part of a transfer.
951  *
952  * \pre  pg->cp_state == CPS_CACHED
953  * \post pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
954  *
955  * \see cl_page_operations::cpo_make_ready()
956  */
957 int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
958                        enum cl_req_type crt)
959 {
960         int result;
961
962         PINVRNT(env, pg, crt < CRT_NR);
963
964         ENTRY;
965         if (crt >= CRT_NR)
966                 RETURN(-EINVAL);
967         result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
968                                 (const struct lu_env *,
969                                  const struct cl_page_slice *));
970         if (result == 0) {
971                 PASSERT(env, pg, pg->cp_state == CPS_CACHED);
972                 cl_page_io_start(env, pg, crt);
973         }
974         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
975         RETURN(result);
976 }
977 EXPORT_SYMBOL(cl_page_make_ready);
978
979 /**
980  * Called if a pge is being written back by kernel's intention.
981  *
982  * \pre  cl_page_is_owned(pg, io)
983  * \post ergo(result == 0, pg->cp_state == CPS_PAGEOUT)
984  *
985  * \see cl_page_operations::cpo_flush()
986  */
987 int cl_page_flush(const struct lu_env *env, struct cl_io *io,
988                   struct cl_page *pg)
989 {
990         int result;
991
992         PINVRNT(env, pg, cl_page_is_owned(pg, io));
993         PINVRNT(env, pg, cl_page_invariant(pg));
994
995         ENTRY;
996
997         result = cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_flush));
998
999         CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result);
1000         RETURN(result);
1001 }
1002 EXPORT_SYMBOL(cl_page_flush);
1003
1004 /**
1005  * Tells transfer engine that only part of a page is to be transmitted.
1006  *
1007  * \see cl_page_operations::cpo_clip()
1008  */
1009 void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
1010                   int from, int to)
1011 {
1012         PINVRNT(env, pg, cl_page_invariant(pg));
1013
1014         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to);
1015         CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip),
1016                        (const struct lu_env *,
1017                         const struct cl_page_slice *,int, int),
1018                        from, to);
1019 }
1020 EXPORT_SYMBOL(cl_page_clip);
1021
1022 /**
1023  * Prints human readable representation of \a pg to the \a f.
1024  */
1025 void cl_page_header_print(const struct lu_env *env, void *cookie,
1026                           lu_printer_t printer, const struct cl_page *pg)
1027 {
1028         (*printer)(env, cookie,
1029                    "page@%p[%d %p %d %d %d %p %p]\n",
1030                    pg, atomic_read(&pg->cp_ref), pg->cp_obj,
1031                    pg->cp_state, pg->cp_error, pg->cp_type,
1032                    pg->cp_owner, pg->cp_req);
1033 }
1034 EXPORT_SYMBOL(cl_page_header_print);
1035
1036 /**
1037  * Prints human readable representation of \a pg to the \a f.
1038  */
1039 void cl_page_print(const struct lu_env *env, void *cookie,
1040                    lu_printer_t printer, const struct cl_page *pg)
1041 {
1042         cl_page_header_print(env, cookie, printer, pg);
1043         CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
1044                        (const struct lu_env *env,
1045                         const struct cl_page_slice *slice,
1046                         void *cookie, lu_printer_t p), cookie, printer);
1047         (*printer)(env, cookie, "end page@%p\n", pg);
1048 }
1049 EXPORT_SYMBOL(cl_page_print);
1050
1051 /**
1052  * Cancel a page which is still in a transfer.
1053  */
1054 int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
1055 {
1056         return CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_cancel),
1057                               (const struct lu_env *,
1058                                const struct cl_page_slice *));
1059 }
1060
1061 /**
1062  * Converts a byte offset within object \a obj into a page index.
1063  */
1064 loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
1065 {
1066         return (loff_t)idx << PAGE_CACHE_SHIFT;
1067 }
1068 EXPORT_SYMBOL(cl_offset);
1069
1070 /**
1071  * Converts a page index into a byte offset within object \a obj.
1072  */
1073 pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
1074 {
1075         return offset >> PAGE_CACHE_SHIFT;
1076 }
1077 EXPORT_SYMBOL(cl_index);
1078
1079 size_t cl_page_size(const struct cl_object *obj)
1080 {
1081         return 1UL << PAGE_CACHE_SHIFT;
1082 }
1083 EXPORT_SYMBOL(cl_page_size);
1084
1085 /**
1086  * Adds page slice to the compound page.
1087  *
1088  * This is called by cl_object_operations::coo_page_init() methods to add a
1089  * per-layer state to the page. New state is added at the end of
1090  * cl_page::cp_layers list, that is, it is at the bottom of the stack.
1091  *
1092  * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
1093  */
1094 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
1095                        struct cl_object *obj, pgoff_t index,
1096                        const struct cl_page_operations *ops)
1097 {
1098         ENTRY;
1099         list_add_tail(&slice->cpl_linkage, &page->cp_layers);
1100         slice->cpl_obj  = obj;
1101         slice->cpl_index = index;
1102         slice->cpl_ops  = ops;
1103         slice->cpl_page = page;
1104         EXIT;
1105 }
1106 EXPORT_SYMBOL(cl_page_slice_add);
1107
1108 /**
1109  * Allocate and initialize cl_cache, called by ll_init_sbi().
1110  */
1111 struct cl_client_cache *cl_cache_init(unsigned long lru_page_max)
1112 {
1113         struct cl_client_cache  *cache = NULL;
1114
1115         ENTRY;
1116         OBD_ALLOC(cache, sizeof(*cache));
1117         if (cache == NULL)
1118                 RETURN(NULL);
1119
1120         /* Initialize cache data */
1121         atomic_set(&cache->ccc_users, 1);
1122         cache->ccc_lru_max = lru_page_max;
1123         atomic_long_set(&cache->ccc_lru_left, lru_page_max);
1124         spin_lock_init(&cache->ccc_lru_lock);
1125         INIT_LIST_HEAD(&cache->ccc_lru);
1126
1127         /* turn unstable check off by default as it impacts performance */
1128         cache->ccc_unstable_check = 0;
1129         atomic_long_set(&cache->ccc_unstable_nr, 0);
1130         init_waitqueue_head(&cache->ccc_unstable_waitq);
1131
1132         RETURN(cache);
1133 }
1134 EXPORT_SYMBOL(cl_cache_init);
1135
1136 /**
1137  * Increase cl_cache refcount
1138  */
1139 void cl_cache_incref(struct cl_client_cache *cache)
1140 {
1141         atomic_inc(&cache->ccc_users);
1142 }
1143 EXPORT_SYMBOL(cl_cache_incref);
1144
1145 /**
1146  * Decrease cl_cache refcount and free the cache if refcount=0.
1147  * Since llite, lov and osc all hold cl_cache refcount,
1148  * the free will not cause race. (LU-6173)
1149  */
1150 void cl_cache_decref(struct cl_client_cache *cache)
1151 {
1152         if (atomic_dec_and_test(&cache->ccc_users))
1153                 OBD_FREE(cache, sizeof(*cache));
1154 }
1155 EXPORT_SYMBOL(cl_cache_decref);