Whamcloud - gitweb
LU-4974 lod: Change pool_desc to "[lod|lov]_pool_desc"
[fs/lustre-release.git] / lustre / lov / lov_lock.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * Implementation of cl_lock for LOV layer.
32  *
33  *   Author: Nikita Danilov <nikita.danilov@sun.com>
34  */
35
36 #define DEBUG_SUBSYSTEM S_LOV
37
38 #include "lov_cl_internal.h"
39
40 /** \addtogroup lov
41  *  @{
42  */
43
44 /**
45  * Lov lock operations.
46  */
47 static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
48                                                    const struct cl_lock *parent,
49                                                    struct lov_lock_sub *lls)
50 {
51         struct lov_sublock_env *subenv;
52         struct lov_io          *lio    = lov_env_io(env);
53         struct cl_io           *io     = lio->lis_cl.cis_io;
54         struct lov_io_sub      *sub;
55
56         subenv = &lov_env_session(env)->ls_subenv;
57
58         /*
59          * FIXME: We tend to use the subio's env & io to call the sublock
60          * lock operations because osc lock sometimes stores some control
61          * variables in thread's IO infomation(Now only lockless information).
62          * However, if the lock's host(object) is different from the object
63          * for current IO, we have no way to get the subenv and subio because
64          * they are not initialized at all. As a temp fix, in this case,
65          * we still borrow the parent's env to call sublock operations.
66          */
67         if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
68                 subenv->lse_env = env;
69                 subenv->lse_io = io;
70         } else {
71                 sub = lov_sub_get(env, lio, lls->sub_index);
72                 if (!IS_ERR(sub)) {
73                         subenv->lse_env = sub->sub_env;
74                         subenv->lse_io  = &sub->sub_io;
75                 } else {
76                         subenv = (void *)sub;
77                 }
78         }
79         return subenv;
80 }
81
82 static int lov_sublock_init(const struct lu_env *env,
83                             const struct cl_lock *parent,
84                             struct lov_lock_sub *lls)
85 {
86         struct lov_sublock_env *subenv;
87         int result;
88
89         ENTRY;
90
91         subenv = lov_sublock_env_get(env, parent, lls);
92         if (!IS_ERR(subenv)) {
93                 result = cl_lock_init(subenv->lse_env, &lls->sub_lock,
94                                       subenv->lse_io);
95         } else {
96                 /* error occurs. */
97                 result = PTR_ERR(subenv);
98         }
99         RETURN(result);
100 }
101
102 /**
103  * Creates sub-locks for a given lov_lock for the first time.
104  *
105  * Goes through all sub-objects of top-object, and creates sub-locks on every
106  * sub-object intersecting with top-lock extent. This is complicated by the
107  * fact that top-lock (that is being created) can be accessed concurrently
108  * through already created sub-locks (possibly shared with other top-locks).
109  */
110 static struct lov_lock *lov_lock_sub_init(const struct lu_env *env,
111                                           const struct cl_io *io,
112                                           const struct cl_object *obj,
113                                           struct cl_lock *lock)
114 {
115         struct lov_object *lov = cl2lov(obj);
116         struct lov_io *lio = lov_env_io(env);
117         bool is_trunc = cl_io_is_trunc(io);
118         struct lov_lock *lovlck;
119         struct lu_extent ext;
120         loff_t start;
121         loff_t end;
122         int result = 0;
123         int i;
124         int index;
125         int nr;
126
127         ENTRY;
128
129         LASSERT(ergo(is_trunc, lio->lis_trunc_stripe_index != NULL));
130
131         ext.e_start = lock->cll_descr.cld_start << PAGE_SHIFT;
132         if (lock->cll_descr.cld_end == CL_PAGE_EOF)
133                 ext.e_end = OBD_OBJECT_EOF;
134         else
135                 ext.e_end  = (lock->cll_descr.cld_end + 1) << PAGE_SHIFT;
136
137         nr = 0;
138         lov_foreach_io_layout(index, lio, &ext) {
139                 struct lov_layout_raid0 *r0 = lov_r0(lov, index);
140
141                 for (i = 0; i < r0->lo_nr; i++) {
142                         if (likely(r0->lo_sub[i])) {/* spare layout */
143                                 if (lov_stripe_intersects(lov->lo_lsm, index, i, &ext, &start, &end) ||
144                                     (is_trunc && i == lio->lis_trunc_stripe_index[index]))
145                                         nr++;
146                         }
147                 }
148         }
149         /**
150          * Aggressive lock request (from cl_setattr_ost) which asks for
151          * [eof, -1) lock, could come across uninstantiated layout extent,
152          * hence a 0 nr is possible.
153          */
154
155         OBD_ALLOC_LARGE(lovlck, offsetof(struct lov_lock, lls_sub[nr]));
156         if (!lovlck)
157                 RETURN(ERR_PTR(-ENOMEM));
158
159         lovlck->lls_nr = nr;
160         nr = 0;
161         lov_foreach_io_layout(index, lov_env_io(env), &ext) {
162                 struct lov_layout_raid0 *r0 = lov_r0(lov, index);
163
164                 for (i = 0; i < r0->lo_nr; ++i) {
165                         struct lov_lock_sub *lls;
166                         struct cl_lock_descr *descr;
167
168                         if (unlikely(!r0->lo_sub[i]))
169                                 continue;
170
171                         if (lov_stripe_intersects(lov->lo_lsm, index, i, &ext, &start, &end) ||
172                             (is_trunc && i == lio->lis_trunc_stripe_index[index]))
173                                 goto init_sublock;
174
175                         continue;
176 init_sublock:
177                         LASSERT(nr < lovlck->lls_nr);
178                         lls = &lovlck->lls_sub[nr];
179                         descr = &lls->sub_lock.cll_descr;
180                         LASSERT(descr->cld_obj == NULL);
181                         descr->cld_obj   = lovsub2cl(r0->lo_sub[i]);
182                         descr->cld_start = start >> PAGE_SHIFT;
183                         descr->cld_end   = end >> PAGE_SHIFT;
184                         descr->cld_mode  = lock->cll_descr.cld_mode;
185                         descr->cld_gid   = lock->cll_descr.cld_gid;
186                         descr->cld_enq_flags = lock->cll_descr.cld_enq_flags;
187
188                         lls->sub_index = lov_comp_index(index, i);
189
190                         /* initialize sub lock */
191                         result = lov_sublock_init(env, lock, lls);
192                         if (result < 0)
193                                 break;
194
195                         lls->sub_initialized = 1;
196                         nr++;
197                 }
198                 if (result < 0)
199                         break;
200         }
201         LASSERT(ergo(result == 0, nr == lovlck->lls_nr));
202
203         if (result != 0) {
204                 for (i = 0; i < nr; ++i) {
205                         if (!lovlck->lls_sub[i].sub_initialized)
206                                 break;
207
208                         cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
209                 }
210
211                 OBD_FREE_LARGE(lovlck,
212                                 offsetof(struct lov_lock, lls_sub[nr]));
213                 lovlck = ERR_PTR(result);
214         }
215
216         RETURN(lovlck);
217 }
218
219 static void lov_lock_fini(const struct lu_env *env,
220                           struct cl_lock_slice *slice)
221 {
222         struct lov_lock *lovlck;
223         int i;
224
225         ENTRY;
226         lovlck = cl2lov_lock(slice);
227         for (i = 0; i < lovlck->lls_nr; ++i) {
228                 LASSERT(!lovlck->lls_sub[i].sub_is_enqueued);
229                 if (lovlck->lls_sub[i].sub_initialized)
230                         cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
231         }
232         OBD_FREE_LARGE(lovlck,
233                        offsetof(struct lov_lock, lls_sub[lovlck->lls_nr]));
234         EXIT;
235 }
236
237 /**
238  * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
239  * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
240  * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
241  * state machines in the face of sub-locks sharing (by multiple top-locks),
242  * and concurrent sub-lock cancellations.
243  */
244 static int lov_lock_enqueue(const struct lu_env *env,
245                             const struct cl_lock_slice *slice,
246                             struct cl_io *io, struct cl_sync_io *anchor)
247 {
248         struct cl_lock *lock = slice->cls_lock;
249         struct lov_lock *lovlck = cl2lov_lock(slice);
250         int i;
251         int rc = 0;
252
253         ENTRY;
254
255         for (i = 0; i < lovlck->lls_nr; ++i) {
256                 struct lov_lock_sub     *lls = &lovlck->lls_sub[i];
257                 struct lov_sublock_env  *subenv;
258
259                 subenv = lov_sublock_env_get(env, lock, lls);
260                 if (IS_ERR(subenv)) {
261                         rc = PTR_ERR(subenv);
262                         break;
263                 }
264
265                 rc = cl_lock_enqueue(subenv->lse_env, subenv->lse_io,
266                                      &lls->sub_lock, anchor);
267                 if (rc != 0)
268                         break;
269
270                 lls->sub_is_enqueued = 1;
271         }
272         RETURN(rc);
273 }
274
275 static void lov_lock_cancel(const struct lu_env *env,
276                             const struct cl_lock_slice *slice)
277 {
278         struct cl_lock *lock = slice->cls_lock;
279         struct lov_lock *lovlck = cl2lov_lock(slice);
280         int i;
281
282         ENTRY;
283
284         for (i = 0; i < lovlck->lls_nr; ++i) {
285                 struct lov_lock_sub *lls = &lovlck->lls_sub[i];
286                 struct cl_lock *sublock = &lls->sub_lock;
287                 struct lov_sublock_env *subenv;
288
289                 if (!lls->sub_is_enqueued)
290                         continue;
291
292                 lls->sub_is_enqueued = 0;
293                 subenv = lov_sublock_env_get(env, lock, lls);
294                 if (!IS_ERR(subenv)) {
295                         cl_lock_cancel(subenv->lse_env, sublock);
296                 } else {
297                         CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
298                                       "lov_lock_cancel fails with %ld.\n",
299                                       PTR_ERR(subenv));
300                 }
301         }
302 }
303
304 static int lov_lock_print(const struct lu_env *env, void *cookie,
305                           lu_printer_t p, const struct cl_lock_slice *slice)
306 {
307         struct lov_lock *lck = cl2lov_lock(slice);
308         int i;
309
310         (*p)(env, cookie, "%d\n", lck->lls_nr);
311         for (i = 0; i < lck->lls_nr; ++i) {
312                 struct lov_lock_sub *sub;
313
314                 sub = &lck->lls_sub[i];
315                 (*p)(env, cookie, "    %d %x: ", i, sub->sub_is_enqueued);
316                 cl_lock_print(env, cookie, p, &sub->sub_lock);
317         }
318         return 0;
319 }
320
321 static const struct cl_lock_operations lov_lock_ops = {
322         .clo_fini      = lov_lock_fini,
323         .clo_enqueue   = lov_lock_enqueue,
324         .clo_cancel    = lov_lock_cancel,
325         .clo_print     = lov_lock_print
326 };
327
328 int lov_lock_init_composite(const struct lu_env *env, struct cl_object *obj,
329                             struct cl_lock *lock, const struct cl_io *io)
330 {
331         struct lov_lock *lck;
332         int result = 0;
333
334         ENTRY;
335         lck = lov_lock_sub_init(env, io, obj, lock);
336         if (!IS_ERR(lck))
337                 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
338         else
339                 result = PTR_ERR(lck);
340         RETURN(result);
341 }
342
343 static void lov_empty_lock_fini(const struct lu_env *env,
344                                 struct cl_lock_slice *slice)
345 {
346         struct lov_lock *lck = cl2lov_lock(slice);
347
348         OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
349 }
350
351 static int lov_empty_lock_print(const struct lu_env *env, void *cookie,
352                         lu_printer_t p, const struct cl_lock_slice *slice)
353 {
354         (*p)(env, cookie, "empty\n");
355         return 0;
356 }
357
358 /* XXX: more methods will be added later. */
359 static const struct cl_lock_operations lov_empty_lock_ops = {
360         .clo_fini  = lov_empty_lock_fini,
361         .clo_print = lov_empty_lock_print
362 };
363
364 int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
365                         struct cl_lock *lock, const struct cl_io *io)
366 {
367         struct lov_lock *lck;
368         int result = -ENOMEM;
369
370         ENTRY;
371         OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, GFP_NOFS);
372         if (lck) {
373                 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
374                 result = 0;
375         }
376         RETURN(result);
377 }
378
379 /** @} lov */