Whamcloud - gitweb
LU-14618 lov: correctly handling sub-lock init failure
[fs/lustre-release.git] / lustre / lov / lov_lock.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * Implementation of cl_lock for LOV layer.
32  *
33  *   Author: Nikita Danilov <nikita.danilov@sun.com>
34  */
35
36 #define DEBUG_SUBSYSTEM S_LOV
37
38 #include "lov_cl_internal.h"
39
40 /** \addtogroup lov
41  *  @{
42  */
43
44 /*****************************************************************************
45  *
46  * Lov lock operations.
47  *
48  */
49
50 static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
51                                                    const struct cl_lock *parent,
52                                                    struct lov_lock_sub *lls)
53 {
54         struct lov_sublock_env *subenv;
55         struct lov_io          *lio    = lov_env_io(env);
56         struct cl_io           *io     = lio->lis_cl.cis_io;
57         struct lov_io_sub      *sub;
58
59         subenv = &lov_env_session(env)->ls_subenv;
60
61         /*
62          * FIXME: We tend to use the subio's env & io to call the sublock
63          * lock operations because osc lock sometimes stores some control
64          * variables in thread's IO infomation(Now only lockless information).
65          * However, if the lock's host(object) is different from the object
66          * for current IO, we have no way to get the subenv and subio because
67          * they are not initialized at all. As a temp fix, in this case,
68          * we still borrow the parent's env to call sublock operations.
69          */
70         if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
71                 subenv->lse_env = env;
72                 subenv->lse_io = io;
73         } else {
74                 sub = lov_sub_get(env, lio, lls->sub_index);
75                 if (!IS_ERR(sub)) {
76                         subenv->lse_env = sub->sub_env;
77                         subenv->lse_io  = &sub->sub_io;
78                 } else {
79                         subenv = (void *)sub;
80                 }
81         }
82         return subenv;
83 }
84
85 static int lov_sublock_init(const struct lu_env *env,
86                             const struct cl_lock *parent,
87                             struct lov_lock_sub *lls)
88 {
89         struct lov_sublock_env *subenv;
90         int result;
91
92         ENTRY;
93
94         subenv = lov_sublock_env_get(env, parent, lls);
95         if (!IS_ERR(subenv)) {
96                 result = cl_lock_init(subenv->lse_env, &lls->sub_lock,
97                                       subenv->lse_io);
98         } else {
99                 /* error occurs. */
100                 result = PTR_ERR(subenv);
101         }
102         RETURN(result);
103 }
104
105 /**
106  * Creates sub-locks for a given lov_lock for the first time.
107  *
108  * Goes through all sub-objects of top-object, and creates sub-locks on every
109  * sub-object intersecting with top-lock extent. This is complicated by the
110  * fact that top-lock (that is being created) can be accessed concurrently
111  * through already created sub-locks (possibly shared with other top-locks).
112  */
113 static struct lov_lock *lov_lock_sub_init(const struct lu_env *env,
114                                           const struct cl_io *io,
115                                           const struct cl_object *obj,
116                                           struct cl_lock *lock)
117 {
118         struct lov_object *lov = cl2lov(obj);
119         struct lov_lock *lovlck;
120         struct lu_extent ext;
121         loff_t start;
122         loff_t end;
123         int result = 0;
124         int i;
125         int index;
126         int nr;
127
128         ENTRY;
129
130         ext.e_start = cl_offset(obj, lock->cll_descr.cld_start);
131         if (lock->cll_descr.cld_end == CL_PAGE_EOF)
132                 ext.e_end = OBD_OBJECT_EOF;
133         else
134                 ext.e_end  = cl_offset(obj, lock->cll_descr.cld_end + 1);
135
136         nr = 0;
137         lov_foreach_io_layout(index, lov_env_io(env), &ext) {
138                 struct lov_layout_raid0 *r0 = lov_r0(lov, index);
139
140                 for (i = 0; i < r0->lo_nr; i++) {
141                         if (likely(r0->lo_sub[i])) {/* spare layout */
142                                 if (lov_stripe_intersects(lov->lo_lsm, index, i,
143                                                           &ext, &start, &end))
144                                         nr++;
145                                 else if (cl_io_is_trunc(io) &&
146                                          r0->lo_trunc_stripeno == i)
147                                         nr++;
148                         }
149                 }
150         }
151         /**
152          * Aggressive lock request (from cl_setattr_ost) which asks for
153          * [eof, -1) lock, could come across uninstantiated layout extent,
154          * hence a 0 nr is possible.
155          */
156
157         OBD_ALLOC_LARGE(lovlck, offsetof(struct lov_lock, lls_sub[nr]));
158         if (!lovlck)
159                 RETURN(ERR_PTR(-ENOMEM));
160
161         lovlck->lls_nr = nr;
162         nr = 0;
163         lov_foreach_io_layout(index, lov_env_io(env), &ext) {
164                 struct lov_layout_raid0 *r0 = lov_r0(lov, index);
165
166                 for (i = 0; i < r0->lo_nr; ++i) {
167                         struct lov_lock_sub *lls = &lovlck->lls_sub[nr];
168                         struct cl_lock_descr *descr = &lls->sub_lock.cll_descr;
169                         bool intersect = false;
170
171                         if (unlikely(!r0->lo_sub[i]))
172                                 continue;
173
174                         intersect = lov_stripe_intersects(lov->lo_lsm, index, i,
175                                                           &ext, &start, &end);
176                         if (intersect)
177                                 goto init_sublock;
178
179                         if (cl_io_is_trunc(io) && i == r0->lo_trunc_stripeno)
180                                 goto init_sublock;
181
182                         continue;
183
184 init_sublock:
185                         LASSERT(descr->cld_obj == NULL);
186                         descr->cld_obj   = lovsub2cl(r0->lo_sub[i]);
187                         descr->cld_start = cl_index(descr->cld_obj, start);
188                         descr->cld_end   = cl_index(descr->cld_obj, end);
189                         descr->cld_mode  = lock->cll_descr.cld_mode;
190                         descr->cld_gid   = lock->cll_descr.cld_gid;
191                         descr->cld_enq_flags = lock->cll_descr.cld_enq_flags;
192
193                         lls->sub_index = lov_comp_index(index, i);
194
195                         /* initialize sub lock */
196                         result = lov_sublock_init(env, lock, lls);
197                         if (result < 0)
198                                 break;
199
200                         lls->sub_initialized = 1;
201                         nr++;
202                 }
203                 if (result < 0)
204                         break;
205         }
206         LASSERT(ergo(result == 0, nr == lovlck->lls_nr));
207
208         if (result != 0) {
209                 for (i = 0; i < nr; ++i) {
210                         if (!lovlck->lls_sub[i].sub_initialized)
211                                 break;
212
213                         cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
214                 }
215
216                 OBD_FREE_LARGE(lovlck,
217                                 offsetof(struct lov_lock, lls_sub[nr]));
218                 lovlck = ERR_PTR(result);
219         }
220
221         RETURN(lovlck);
222 }
223
224 static void lov_lock_fini(const struct lu_env *env,
225                           struct cl_lock_slice *slice)
226 {
227         struct lov_lock *lovlck;
228         int i;
229
230         ENTRY;
231         lovlck = cl2lov_lock(slice);
232         for (i = 0; i < lovlck->lls_nr; ++i) {
233                 LASSERT(!lovlck->lls_sub[i].sub_is_enqueued);
234                 if (lovlck->lls_sub[i].sub_initialized)
235                         cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
236         }
237         OBD_FREE_LARGE(lovlck,
238                        offsetof(struct lov_lock, lls_sub[lovlck->lls_nr]));
239         EXIT;
240 }
241
242 /**
243  * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
244  * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
245  * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
246  * state machines in the face of sub-locks sharing (by multiple top-locks),
247  * and concurrent sub-lock cancellations.
248  */
249 static int lov_lock_enqueue(const struct lu_env *env,
250                             const struct cl_lock_slice *slice,
251                             struct cl_io *io, struct cl_sync_io *anchor)
252 {
253         struct cl_lock *lock = slice->cls_lock;
254         struct lov_lock *lovlck = cl2lov_lock(slice);
255         int i;
256         int rc = 0;
257
258         ENTRY;
259
260         for (i = 0; i < lovlck->lls_nr; ++i) {
261                 struct lov_lock_sub     *lls = &lovlck->lls_sub[i];
262                 struct lov_sublock_env  *subenv;
263
264                 subenv = lov_sublock_env_get(env, lock, lls);
265                 if (IS_ERR(subenv)) {
266                         rc = PTR_ERR(subenv);
267                         break;
268                 }
269
270                 rc = cl_lock_enqueue(subenv->lse_env, subenv->lse_io,
271                                      &lls->sub_lock, anchor);
272                 if (rc != 0)
273                         break;
274
275                 lls->sub_is_enqueued = 1;
276         }
277         RETURN(rc);
278 }
279
280 static void lov_lock_cancel(const struct lu_env *env,
281                             const struct cl_lock_slice *slice)
282 {
283         struct cl_lock *lock = slice->cls_lock;
284         struct lov_lock *lovlck = cl2lov_lock(slice);
285         int i;
286
287         ENTRY;
288
289         for (i = 0; i < lovlck->lls_nr; ++i) {
290                 struct lov_lock_sub *lls = &lovlck->lls_sub[i];
291                 struct cl_lock *sublock = &lls->sub_lock;
292                 struct lov_sublock_env *subenv;
293
294                 if (!lls->sub_is_enqueued)
295                         continue;
296
297                 lls->sub_is_enqueued = 0;
298                 subenv = lov_sublock_env_get(env, lock, lls);
299                 if (!IS_ERR(subenv)) {
300                         cl_lock_cancel(subenv->lse_env, sublock);
301                 } else {
302                         CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
303                                       "lov_lock_cancel fails with %ld.\n",
304                                       PTR_ERR(subenv));
305                 }
306         }
307 }
308
309 static int lov_lock_print(const struct lu_env *env, void *cookie,
310                           lu_printer_t p, const struct cl_lock_slice *slice)
311 {
312         struct lov_lock *lck = cl2lov_lock(slice);
313         int i;
314
315         (*p)(env, cookie, "%d\n", lck->lls_nr);
316         for (i = 0; i < lck->lls_nr; ++i) {
317                 struct lov_lock_sub *sub;
318
319                 sub = &lck->lls_sub[i];
320                 (*p)(env, cookie, "    %d %x: ", i, sub->sub_is_enqueued);
321                 cl_lock_print(env, cookie, p, &sub->sub_lock);
322         }
323         return 0;
324 }
325
326 static const struct cl_lock_operations lov_lock_ops = {
327         .clo_fini      = lov_lock_fini,
328         .clo_enqueue   = lov_lock_enqueue,
329         .clo_cancel    = lov_lock_cancel,
330         .clo_print     = lov_lock_print
331 };
332
333 int lov_lock_init_composite(const struct lu_env *env, struct cl_object *obj,
334                             struct cl_lock *lock, const struct cl_io *io)
335 {
336         struct lov_lock *lck;
337         int result = 0;
338
339         ENTRY;
340         lck = lov_lock_sub_init(env, io, obj, lock);
341         if (!IS_ERR(lck))
342                 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
343         else
344                 result = PTR_ERR(lck);
345         RETURN(result);
346 }
347
348 static void lov_empty_lock_fini(const struct lu_env *env,
349                                 struct cl_lock_slice *slice)
350 {
351         struct lov_lock *lck = cl2lov_lock(slice);
352
353         OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
354 }
355
356 static int lov_empty_lock_print(const struct lu_env *env, void *cookie,
357                         lu_printer_t p, const struct cl_lock_slice *slice)
358 {
359         (*p)(env, cookie, "empty\n");
360         return 0;
361 }
362
363 /* XXX: more methods will be added later. */
364 static const struct cl_lock_operations lov_empty_lock_ops = {
365         .clo_fini  = lov_empty_lock_fini,
366         .clo_print = lov_empty_lock_print
367 };
368
369 int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
370                         struct cl_lock *lock, const struct cl_io *io)
371 {
372         struct lov_lock *lck;
373         int result = -ENOMEM;
374
375         ENTRY;
376         OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, GFP_NOFS);
377         if (lck) {
378                 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
379                 result = 0;
380         }
381         RETURN(result);
382 }
383
384 /** @} lov */