Whamcloud - gitweb
85edd0ee7fb6da27b0b8b2c20c39a49740887959
[fs/lustre-release.git] / lustre / lov / lov_page.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_page for LOV layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  *   Author: Jinshan Xiong <jinshan.xiong@intel.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LOV
43
44 #include "lov_cl_internal.h"
45
46 /** \addtogroup lov
47  *  @{
48  */
49
50 /*****************************************************************************
51  *
52  * Lov page operations.
53  *
54  */
55
56 static int lov_page_invariant(const struct cl_page_slice *slice)
57 {
58         const struct cl_page  *page = slice->cpl_page;
59         const struct cl_page  *sub  = lov_sub_page(slice);
60
61         return ergo(sub != NULL,
62                     page->cp_child == sub &&
63                     sub->cp_parent == page &&
64                     page->cp_state == sub->cp_state);
65 }
66
67 static void lov_page_fini(const struct lu_env *env,
68                           struct cl_page_slice *slice)
69 {
70         struct cl_page  *sub = lov_sub_page(slice);
71
72         LINVRNT(lov_page_invariant(slice));
73         ENTRY;
74
75         if (sub != NULL) {
76                 LASSERT(sub->cp_state == CPS_FREEING);
77                 lu_ref_del(&sub->cp_reference, "lov", sub->cp_parent);
78                 sub->cp_parent = NULL;
79                 slice->cpl_page->cp_child = NULL;
80                 cl_page_put(env, sub);
81         }
82         EXIT;
83 }
84
85 static int lov_page_own(const struct lu_env *env,
86                         const struct cl_page_slice *slice, struct cl_io *io,
87                         int nonblock)
88 {
89         struct lov_io     *lio = lov_env_io(env);
90         struct lov_io_sub *sub;
91
92         LINVRNT(lov_page_invariant(slice));
93         LINVRNT(!cl2lov_page(slice)->lps_invalid);
94         ENTRY;
95
96         sub = lov_page_subio(env, lio, slice);
97         if (!IS_ERR(sub)) {
98                 lov_sub_page(slice)->cp_owner = sub->sub_io;
99                 lov_sub_put(sub);
100         } else
101                 LBUG(); /* Arrgh */
102         RETURN(0);
103 }
104
105 static void lov_page_assume(const struct lu_env *env,
106                             const struct cl_page_slice *slice, struct cl_io *io)
107 {
108         lov_page_own(env, slice, io, 0);
109 }
110
111 static int lov_page_cache_add(const struct lu_env *env,
112                               const struct cl_page_slice *slice,
113                               struct cl_io *io)
114 {
115         struct lov_io     *lio = lov_env_io(env);
116         struct lov_io_sub *sub;
117         int rc = 0;
118
119         LINVRNT(lov_page_invariant(slice));
120         LINVRNT(!cl2lov_page(slice)->lps_invalid);
121         ENTRY;
122
123         sub = lov_page_subio(env, lio, slice);
124         if (!IS_ERR(sub)) {
125                 rc = cl_page_cache_add(sub->sub_env, sub->sub_io,
126                                        slice->cpl_page->cp_child, CRT_WRITE);
127                 lov_sub_put(sub);
128         } else {
129                 rc = PTR_ERR(sub);
130                 CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page, "rc = %d\n", rc);
131         }
132         RETURN(rc);
133 }
134
135 static int lov_page_print(const struct lu_env *env,
136                           const struct cl_page_slice *slice,
137                           void *cookie, lu_printer_t printer)
138 {
139         struct lov_page *lp = cl2lov_page(slice);
140
141         return (*printer)(env, cookie, LUSTRE_LOV_NAME"-page@%p\n", lp);
142 }
143
144 static const struct cl_page_operations lov_page_ops = {
145         .cpo_fini   = lov_page_fini,
146         .cpo_own    = lov_page_own,
147         .cpo_assume = lov_page_assume,
148         .io = {
149                 [CRT_WRITE] = {
150                         .cpo_cache_add = lov_page_cache_add
151                 }
152         },
153         .cpo_print  = lov_page_print
154 };
155
156 static void lov_empty_page_fini(const struct lu_env *env,
157                                 struct cl_page_slice *slice)
158 {
159         LASSERT(slice->cpl_page->cp_child == NULL);
160 }
161
162 int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
163                         struct cl_page *page, struct page *vmpage)
164 {
165         struct lov_object *loo = cl2lov(obj);
166         struct lov_layout_raid0 *r0 = lov_r0(loo);
167         struct lov_io     *lio = lov_env_io(env);
168         struct cl_page    *subpage;
169         struct cl_object  *subobj;
170         struct lov_io_sub *sub;
171         struct lov_page   *lpg = cl_object_page_slice(obj, page);
172         loff_t             offset;
173         obd_off            suboff;
174         int                stripe;
175         int                rc;
176         ENTRY;
177
178         offset = cl_offset(obj, page->cp_index);
179         stripe = lov_stripe_number(loo->lo_lsm, offset);
180         LASSERT(stripe < r0->lo_nr);
181         rc = lov_stripe_offset(loo->lo_lsm, offset, stripe,
182                                &suboff);
183         LASSERT(rc == 0);
184
185         lpg->lps_invalid = 1;
186         cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_page_ops);
187
188         sub = lov_sub_get(env, lio, stripe);
189         if (IS_ERR(sub))
190                 RETURN(PTR_ERR(sub));
191
192         subobj = lovsub2cl(r0->lo_sub[stripe]);
193         subpage = cl_page_alloc(sub->sub_env, subobj, cl_index(subobj, suboff),
194                                 vmpage, page->cp_type);
195         if (!IS_ERR(subpage)) {
196                 subpage->cp_parent = page;
197                 page->cp_child = subpage;
198                 lpg->lps_invalid = 0;
199         } else
200                 rc = PTR_ERR(subpage);
201         lov_sub_put(sub);
202         RETURN(rc);
203 }
204
205 static const struct cl_page_operations lov_empty_page_ops = {
206         .cpo_fini   = lov_empty_page_fini,
207         .cpo_print  = lov_page_print
208 };
209
210 int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
211                         struct cl_page *page, struct page *vmpage)
212 {
213         struct lov_page *lpg = cl_object_page_slice(obj, page);
214         void *addr;
215         ENTRY;
216
217         cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_empty_page_ops);
218         addr = kmap(vmpage);
219         memset(addr, 0, cl_page_size(obj));
220         kunmap(vmpage);
221         cl_page_export(env, page, 1);
222         RETURN(0);
223 }
224
225
226 /** @} lov */
227