Whamcloud - gitweb
1b4a95876cc750e7c281e8ee04b9d1a7f3b82edc
[fs/lustre-release.git] / lustre / lov / lov_lock.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * Implementation of cl_lock for LOV layer.
33  *
34  *   Author: Nikita Danilov <nikita.danilov@sun.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_LOV
38
39 #include "lov_cl_internal.h"
40
41 /** \addtogroup lov
42  *  @{
43  */
44
45 /*****************************************************************************
46  *
47  * Lov lock operations.
48  *
49  */
50
51 static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
52                                                    const struct cl_lock *parent,
53                                                    struct lov_lock_sub *lls)
54 {
55         struct lov_sublock_env *subenv;
56         struct lov_io          *lio    = lov_env_io(env);
57         struct cl_io           *io     = lio->lis_cl.cis_io;
58         struct lov_io_sub      *sub;
59
60         subenv = &lov_env_session(env)->ls_subenv;
61
62         /*
63          * FIXME: We tend to use the subio's env & io to call the sublock
64          * lock operations because osc lock sometimes stores some control
65          * variables in thread's IO infomation(Now only lockless information).
66          * However, if the lock's host(object) is different from the object
67          * for current IO, we have no way to get the subenv and subio because
68          * they are not initialized at all. As a temp fix, in this case,
69          * we still borrow the parent's env to call sublock operations.
70          */
71         if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
72                 subenv->lse_env = env;
73                 subenv->lse_io = io;
74         } else {
75                 sub = lov_sub_get(env, lio, lls->sub_index);
76                 if (!IS_ERR(sub)) {
77                         subenv->lse_env = sub->sub_env;
78                         subenv->lse_io  = &sub->sub_io;
79                 } else {
80                         subenv = (void *)sub;
81                 }
82         }
83         return subenv;
84 }
85
86 static int lov_sublock_init(const struct lu_env *env,
87                             const struct cl_lock *parent,
88                             struct lov_lock_sub *lls)
89 {
90         struct lov_sublock_env *subenv;
91         int result;
92
93         ENTRY;
94
95         subenv = lov_sublock_env_get(env, parent, lls);
96         if (!IS_ERR(subenv)) {
97                 result = cl_lock_init(subenv->lse_env, &lls->sub_lock,
98                                       subenv->lse_io);
99         } else {
100                 /* error occurs. */
101                 result = PTR_ERR(subenv);
102         }
103         RETURN(result);
104 }
105
106 /**
107  * Creates sub-locks for a given lov_lock for the first time.
108  *
109  * Goes through all sub-objects of top-object, and creates sub-locks on every
110  * sub-object intersecting with top-lock extent. This is complicated by the
111  * fact that top-lock (that is being created) can be accessed concurrently
112  * through already created sub-locks (possibly shared with other top-locks).
113  */
114 static struct lov_lock *lov_lock_sub_init(const struct lu_env *env,
115                                           const struct cl_io *io,
116                                           const struct cl_object *obj,
117                                           struct cl_lock *lock)
118 {
119         struct lov_object *lov = cl2lov(obj);
120         struct lov_lock *lovlck;
121         struct lu_extent ext;
122         loff_t start;
123         loff_t end;
124         int result = 0;
125         int i;
126         int index;
127         int nr;
128
129         ENTRY;
130
131         ext.e_start = cl_offset(obj, lock->cll_descr.cld_start);
132         if (lock->cll_descr.cld_end == CL_PAGE_EOF)
133                 ext.e_end = OBD_OBJECT_EOF;
134         else
135                 ext.e_end  = cl_offset(obj, lock->cll_descr.cld_end + 1);
136
137         nr = 0;
138         lov_foreach_io_layout(index, lov_env_io(env), &ext) {
139                 struct lov_layout_raid0 *r0 = lov_r0(lov, index);
140
141                 for (i = 0; i < r0->lo_nr; i++) {
142                         if (likely(r0->lo_sub[i])) {/* spare layout */
143                                 if (lov_stripe_intersects(lov->lo_lsm, index, i,
144                                                           &ext, &start, &end))
145                                         nr++;
146                                 else if (cl_io_is_trunc(io) &&
147                                          r0->lo_trunc_stripeno == i)
148                                         nr++;
149                         }
150                 }
151         }
152         /**
153          * Aggressive lock request (from cl_setattr_ost) which asks for
154          * [eof, -1) lock, could come across uninstantiated layout extent,
155          * hence a 0 nr is possible.
156          */
157
158         OBD_ALLOC_LARGE(lovlck, offsetof(struct lov_lock, lls_sub[nr]));
159         if (!lovlck)
160                 RETURN(ERR_PTR(-ENOMEM));
161
162         lovlck->lls_nr = nr;
163         nr = 0;
164         lov_foreach_io_layout(index, lov_env_io(env), &ext) {
165                 struct lov_layout_raid0 *r0 = lov_r0(lov, index);
166
167                 for (i = 0; i < r0->lo_nr; ++i) {
168                         struct lov_lock_sub *lls = &lovlck->lls_sub[nr];
169                         struct cl_lock_descr *descr = &lls->sub_lock.cll_descr;
170                         bool intersect = false;
171
172                         if (unlikely(!r0->lo_sub[i]))
173                                 continue;
174
175                         intersect = lov_stripe_intersects(lov->lo_lsm, index, i,
176                                                           &ext, &start, &end);
177                         if (intersect)
178                                 goto init_sublock;
179
180                         if (cl_io_is_trunc(io) && i == r0->lo_trunc_stripeno)
181                                 goto init_sublock;
182
183                         continue;
184
185 init_sublock:
186                         LASSERT(descr->cld_obj == NULL);
187                         descr->cld_obj   = lovsub2cl(r0->lo_sub[i]);
188                         descr->cld_start = cl_index(descr->cld_obj, start);
189                         descr->cld_end   = cl_index(descr->cld_obj, end);
190                         descr->cld_mode  = lock->cll_descr.cld_mode;
191                         descr->cld_gid   = lock->cll_descr.cld_gid;
192                         descr->cld_enq_flags = lock->cll_descr.cld_enq_flags;
193
194                         lls->sub_index = lov_comp_index(index, i);
195
196                         /* initialize sub lock */
197                         result = lov_sublock_init(env, lock, lls);
198                         if (result < 0)
199                                 break;
200
201                         lls->sub_initialized = 1;
202                         nr++;
203                 }
204         }
205         LASSERT(ergo(result == 0, nr == lovlck->lls_nr));
206
207         if (result != 0) {
208                 for (i = 0; i < nr; ++i) {
209                         if (!lovlck->lls_sub[i].sub_initialized)
210                                 break;
211
212                         cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
213                 }
214
215                 OBD_FREE_LARGE(lovlck,
216                                 offsetof(struct lov_lock, lls_sub[nr]));
217                 lovlck = ERR_PTR(result);
218         }
219
220         RETURN(lovlck);
221 }
222
223 static void lov_lock_fini(const struct lu_env *env,
224                           struct cl_lock_slice *slice)
225 {
226         struct lov_lock *lovlck;
227         int i;
228
229         ENTRY;
230         lovlck = cl2lov_lock(slice);
231         for (i = 0; i < lovlck->lls_nr; ++i) {
232                 LASSERT(!lovlck->lls_sub[i].sub_is_enqueued);
233                 if (lovlck->lls_sub[i].sub_initialized)
234                         cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
235         }
236         OBD_FREE_LARGE(lovlck,
237                        offsetof(struct lov_lock, lls_sub[lovlck->lls_nr]));
238         EXIT;
239 }
240
241 /**
242  * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
243  * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
244  * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
245  * state machines in the face of sub-locks sharing (by multiple top-locks),
246  * and concurrent sub-lock cancellations.
247  */
248 static int lov_lock_enqueue(const struct lu_env *env,
249                             const struct cl_lock_slice *slice,
250                             struct cl_io *io, struct cl_sync_io *anchor)
251 {
252         struct cl_lock *lock = slice->cls_lock;
253         struct lov_lock *lovlck = cl2lov_lock(slice);
254         int i;
255         int rc = 0;
256
257         ENTRY;
258
259         for (i = 0; i < lovlck->lls_nr; ++i) {
260                 struct lov_lock_sub     *lls = &lovlck->lls_sub[i];
261                 struct lov_sublock_env  *subenv;
262
263                 subenv = lov_sublock_env_get(env, lock, lls);
264                 if (IS_ERR(subenv)) {
265                         rc = PTR_ERR(subenv);
266                         break;
267                 }
268
269                 rc = cl_lock_enqueue(subenv->lse_env, subenv->lse_io,
270                                      &lls->sub_lock, anchor);
271                 if (rc != 0)
272                         break;
273
274                 lls->sub_is_enqueued = 1;
275         }
276         RETURN(rc);
277 }
278
279 static void lov_lock_cancel(const struct lu_env *env,
280                             const struct cl_lock_slice *slice)
281 {
282         struct cl_lock *lock = slice->cls_lock;
283         struct lov_lock *lovlck = cl2lov_lock(slice);
284         int i;
285
286         ENTRY;
287
288         for (i = 0; i < lovlck->lls_nr; ++i) {
289                 struct lov_lock_sub *lls = &lovlck->lls_sub[i];
290                 struct cl_lock *sublock = &lls->sub_lock;
291                 struct lov_sublock_env *subenv;
292
293                 if (!lls->sub_is_enqueued)
294                         continue;
295
296                 lls->sub_is_enqueued = 0;
297                 subenv = lov_sublock_env_get(env, lock, lls);
298                 if (!IS_ERR(subenv)) {
299                         cl_lock_cancel(subenv->lse_env, sublock);
300                 } else {
301                         CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
302                                       "lov_lock_cancel fails with %ld.\n",
303                                       PTR_ERR(subenv));
304                 }
305         }
306 }
307
308 static int lov_lock_print(const struct lu_env *env, void *cookie,
309                           lu_printer_t p, const struct cl_lock_slice *slice)
310 {
311         struct lov_lock *lck = cl2lov_lock(slice);
312         int i;
313
314         (*p)(env, cookie, "%d\n", lck->lls_nr);
315         for (i = 0; i < lck->lls_nr; ++i) {
316                 struct lov_lock_sub *sub;
317
318                 sub = &lck->lls_sub[i];
319                 (*p)(env, cookie, "    %d %x: ", i, sub->sub_is_enqueued);
320                 cl_lock_print(env, cookie, p, &sub->sub_lock);
321         }
322         return 0;
323 }
324
325 static const struct cl_lock_operations lov_lock_ops = {
326         .clo_fini      = lov_lock_fini,
327         .clo_enqueue   = lov_lock_enqueue,
328         .clo_cancel    = lov_lock_cancel,
329         .clo_print     = lov_lock_print
330 };
331
332 int lov_lock_init_composite(const struct lu_env *env, struct cl_object *obj,
333                             struct cl_lock *lock, const struct cl_io *io)
334 {
335         struct lov_lock *lck;
336         int result = 0;
337
338         ENTRY;
339         lck = lov_lock_sub_init(env, io, obj, lock);
340         if (!IS_ERR(lck))
341                 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
342         else
343                 result = PTR_ERR(lck);
344         RETURN(result);
345 }
346
347 static void lov_empty_lock_fini(const struct lu_env *env,
348                                 struct cl_lock_slice *slice)
349 {
350         struct lov_lock *lck = cl2lov_lock(slice);
351
352         OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
353 }
354
355 static int lov_empty_lock_print(const struct lu_env *env, void *cookie,
356                         lu_printer_t p, const struct cl_lock_slice *slice)
357 {
358         (*p)(env, cookie, "empty\n");
359         return 0;
360 }
361
362 /* XXX: more methods will be added later. */
363 static const struct cl_lock_operations lov_empty_lock_ops = {
364         .clo_fini  = lov_empty_lock_fini,
365         .clo_print = lov_empty_lock_print
366 };
367
368 int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
369                         struct cl_lock *lock, const struct cl_io *io)
370 {
371         struct lov_lock *lck;
372         int result = -ENOMEM;
373
374         ENTRY;
375         OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, GFP_NOFS);
376         if (lck) {
377                 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
378                 result = 0;
379         }
380         RETURN(result);
381 }
382
383 /** @} lov */