Whamcloud - gitweb
LU-169 ldlm: add support for layout lock
[fs/lustre-release.git] / lustre / lov / lov_page.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_page for LOV layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_LOV
42
43 #include "lov_cl_internal.h"
44
45 /** \addtogroup lov
46  *  @{
47  */
48
49 /*****************************************************************************
50  *
51  * Lov page operations.
52  *
53  */
54
55 static int lov_page_invariant(const struct cl_page_slice *slice)
56 {
57         const struct cl_page  *page = slice->cpl_page;
58         const struct cl_page  *sub  = lov_sub_page(slice);
59
60         return ergo(sub != NULL,
61                     page->cp_child == sub &&
62                     sub->cp_parent == page &&
63                     page->cp_state == sub->cp_state);
64 }
65
66 static void lov_page_fini(const struct lu_env *env,
67                           struct cl_page_slice *slice)
68 {
69         struct lov_page *lp  = cl2lov_page(slice);
70         struct cl_page  *sub = lov_sub_page(slice);
71
72         LINVRNT(lov_page_invariant(slice));
73         ENTRY;
74
75         if (sub != NULL) {
76                 LASSERT(sub->cp_state == CPS_FREEING);
77                 lu_ref_del(&sub->cp_reference, "lov", sub->cp_parent);
78                 sub->cp_parent = NULL;
79                 slice->cpl_page->cp_child = NULL;
80                 cl_page_put(env, sub);
81         }
82         OBD_SLAB_FREE_PTR(lp, lov_page_kmem);
83         EXIT;
84 }
85
86 static int lov_page_own(const struct lu_env *env,
87                         const struct cl_page_slice *slice, struct cl_io *io,
88                         int nonblock)
89 {
90         struct lov_io     *lio = lov_env_io(env);
91         struct lov_io_sub *sub;
92
93         LINVRNT(lov_page_invariant(slice));
94         LINVRNT(!cl2lov_page(slice)->lps_invalid);
95         ENTRY;
96
97         sub = lov_page_subio(env, lio, slice);
98         if (!IS_ERR(sub)) {
99                 lov_sub_page(slice)->cp_owner = sub->sub_io;
100                 lov_sub_put(sub);
101         } else
102                 LBUG(); /* Arrgh */
103         RETURN(0);
104 }
105
106 static void lov_page_assume(const struct lu_env *env,
107                             const struct cl_page_slice *slice, struct cl_io *io)
108 {
109         lov_page_own(env, slice, io, 0);
110 }
111
112 static int lov_page_cache_add(const struct lu_env *env,
113                               const struct cl_page_slice *slice,
114                               struct cl_io *io)
115 {
116         struct lov_io     *lio = lov_env_io(env);
117         struct lov_io_sub *sub;
118         int rc = 0;
119
120         LINVRNT(lov_page_invariant(slice));
121         LINVRNT(!cl2lov_page(slice)->lps_invalid);
122         ENTRY;
123
124         sub = lov_page_subio(env, lio, slice);
125         if (!IS_ERR(sub)) {
126                 rc = cl_page_cache_add(sub->sub_env, sub->sub_io,
127                                        slice->cpl_page->cp_child, CRT_WRITE);
128                 lov_sub_put(sub);
129         } else {
130                 rc = PTR_ERR(sub);
131                 CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page, "rc = %d\n", rc);
132         }
133         RETURN(rc);
134 }
135
136 static int lov_page_print(const struct lu_env *env,
137                           const struct cl_page_slice *slice,
138                           void *cookie, lu_printer_t printer)
139 {
140         struct lov_page *lp = cl2lov_page(slice);
141
142         return (*printer)(env, cookie, LUSTRE_LOV_NAME"-page@%p\n", lp);
143 }
144
145 static const struct cl_page_operations lov_page_ops = {
146         .cpo_fini   = lov_page_fini,
147         .cpo_own    = lov_page_own,
148         .cpo_assume = lov_page_assume,
149         .io = {
150                 [CRT_WRITE] = {
151                         .cpo_cache_add = lov_page_cache_add
152                 }
153         },
154         .cpo_print  = lov_page_print
155 };
156
157 static void lov_empty_page_fini(const struct lu_env *env,
158                                 struct cl_page_slice *slice)
159 {
160         struct lov_page *lp  = cl2lov_page(slice);
161
162         LASSERT(slice->cpl_page->cp_child == NULL);
163         ENTRY;
164         OBD_SLAB_FREE_PTR(lp, lov_page_kmem);
165         EXIT;
166 }
167
168 struct cl_page *lov_page_init_raid0(const struct lu_env *env,
169                                     struct cl_object *obj, struct cl_page *page,
170                                     cfs_page_t *vmpage)
171 {
172         struct lov_object *loo = cl2lov(obj);
173         struct lov_layout_raid0 *r0 = lov_r0(loo);
174         struct lov_io     *lio = lov_env_io(env);
175         struct cl_page    *subpage;
176         struct cl_object  *subobj;
177         struct lov_io_sub *sub;
178         struct lov_page   *lpg;
179         struct cl_page    *result;
180         loff_t             offset;
181         obd_off            suboff;
182         int                stripe;
183         int                rc;
184         ENTRY;
185
186         offset = cl_offset(obj, page->cp_index);
187         stripe = lov_stripe_number(loo->lo_lsm, offset);
188         LASSERT(stripe < r0->lo_nr);
189         rc = lov_stripe_offset(loo->lo_lsm, offset, stripe,
190                                    &suboff);
191         LASSERT(rc == 0);
192
193         OBD_SLAB_ALLOC_PTR_GFP(lpg, lov_page_kmem, CFS_ALLOC_IO);
194         if (lpg == NULL)
195                 GOTO(out, result = ERR_PTR(-ENOMEM));
196
197         lpg->lps_invalid = 1;
198         cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_page_ops);
199
200         sub = lov_sub_get(env, lio, stripe);
201         if (IS_ERR(sub))
202                 GOTO(out, result = (struct cl_page *)sub);
203
204         subobj = lovsub2cl(r0->lo_sub[stripe]);
205         subpage = cl_page_find_sub(sub->sub_env, subobj,
206                                    cl_index(subobj, suboff), vmpage, page);
207         lov_sub_put(sub);
208         if (IS_ERR(subpage))
209                 GOTO(out, result = subpage);
210
211         if (likely(subpage->cp_parent == page)) {
212                 lu_ref_add(&subpage->cp_reference, "lov", page);
213                 lpg->lps_invalid = 0;
214                 result = NULL;
215         } else {
216                 CL_PAGE_DEBUG(D_ERROR, env, page, "parent page\n");
217                 CL_PAGE_DEBUG(D_ERROR, env, subpage, "child page\n");
218                 LASSERT(0);
219         }
220
221         EXIT;
222 out:
223         return(result);
224 }
225
226
227 static const struct cl_page_operations lov_empty_page_ops = {
228         .cpo_fini   = lov_empty_page_fini,
229         .cpo_print  = lov_page_print
230 };
231
232 struct cl_page *lov_page_init_empty(const struct lu_env *env,
233                                     struct cl_object *obj, struct cl_page *page,
234                                     cfs_page_t *vmpage)
235 {
236         struct lov_page   *lpg;
237         int result = -ENOMEM;
238         ENTRY;
239
240         OBD_SLAB_ALLOC_PTR_GFP(lpg, lov_page_kmem, CFS_ALLOC_IO);
241         if (lpg != NULL) {
242                 void *addr;
243                 cl_page_slice_add(page, &lpg->lps_cl,
244                                   obj, &lov_empty_page_ops);
245                 addr = cfs_kmap(vmpage);
246                 memset(addr, 0, cl_page_size(obj));
247                 cfs_kunmap(vmpage);
248                 cl_page_export(env, page, 1);
249                 result = 0;
250         }
251         RETURN(ERR_PTR(result));
252 }
253
254
255 /** @} lov */
256