Whamcloud - gitweb
LU-6068 misc: update Intel copyright messages 2014
[fs/lustre-release.git] / lustre / lov / lov_lock.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2014, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for LOV layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_LOV
42
43 #include "lov_cl_internal.h"
44
45 /** \addtogroup lov
46  *  @{
47  */
48
49 /*****************************************************************************
50  *
51  * Lov lock operations.
52  *
53  */
54
55 static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
56                                                    const struct cl_lock *parent,
57                                                    struct lov_lock_sub *lls)
58 {
59         struct lov_sublock_env *subenv;
60         struct lov_io          *lio    = lov_env_io(env);
61         struct cl_io           *io     = lio->lis_cl.cis_io;
62         struct lov_io_sub      *sub;
63
64         subenv = &lov_env_session(env)->ls_subenv;
65
66         /*
67          * FIXME: We tend to use the subio's env & io to call the sublock
68          * lock operations because osc lock sometimes stores some control
69          * variables in thread's IO infomation(Now only lockless information).
70          * However, if the lock's host(object) is different from the object
71          * for current IO, we have no way to get the subenv and subio because
72          * they are not initialized at all. As a temp fix, in this case,
73          * we still borrow the parent's env to call sublock operations.
74          */
75         if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
76                 subenv->lse_env = env;
77                 subenv->lse_io  = io;
78                 subenv->lse_sub = NULL;
79         } else {
80                 sub = lov_sub_get(env, lio, lls->sub_stripe);
81                 if (!IS_ERR(sub)) {
82                         subenv->lse_env = sub->sub_env;
83                         subenv->lse_io  = sub->sub_io;
84                         subenv->lse_sub = sub;
85                 } else {
86                         subenv = (void*)sub;
87                 }
88         }
89         return subenv;
90 }
91
92 static void lov_sublock_env_put(struct lov_sublock_env *subenv)
93 {
94         if (subenv && subenv->lse_sub)
95                 lov_sub_put(subenv->lse_sub);
96 }
97
98 static int lov_sublock_init(const struct lu_env *env,
99                             const struct cl_lock *parent,
100                             struct lov_lock_sub *lls)
101 {
102         struct lov_sublock_env *subenv;
103         int result;
104         ENTRY;
105
106         subenv = lov_sublock_env_get(env, parent, lls);
107         if (!IS_ERR(subenv)) {
108                 result = cl_lock_init(subenv->lse_env, &lls->sub_lock,
109                                       subenv->lse_io);
110                 lov_sublock_env_put(subenv);
111         } else {
112                 /* error occurs. */
113                 result = PTR_ERR(subenv);
114         }
115         RETURN(result);
116 }
117
118 /**
119  * Creates sub-locks for a given lov_lock for the first time.
120  *
121  * Goes through all sub-objects of top-object, and creates sub-locks on every
122  * sub-object intersecting with top-lock extent. This is complicated by the
123  * fact that top-lock (that is being created) can be accessed concurrently
124  * through already created sub-locks (possibly shared with other top-locks).
125  */
126 static struct lov_lock *lov_lock_sub_init(const struct lu_env *env,
127                                           const struct cl_object *obj,
128                                           struct cl_lock *lock)
129 {
130         int result = 0;
131         int i;
132         int nr;
133         obd_off start;
134         obd_off end;
135         obd_off file_start;
136         obd_off file_end;
137
138         struct lov_object       *loo    = cl2lov(obj);
139         struct lov_layout_raid0 *r0     = lov_r0(loo);
140         struct lov_lock         *lovlck;
141
142         ENTRY;
143
144         file_start = cl_offset(lov2cl(loo), lock->cll_descr.cld_start);
145         file_end   = cl_offset(lov2cl(loo), lock->cll_descr.cld_end + 1) - 1;
146
147         for (i = 0, nr = 0; i < r0->lo_nr; i++) {
148                 /*
149                  * XXX for wide striping smarter algorithm is desirable,
150                  * breaking out of the loop, early.
151                  */
152                 if (likely(r0->lo_sub[i] != NULL) && /* spare layout */
153                     lov_stripe_intersects(loo->lo_lsm, i,
154                                           file_start, file_end, &start, &end))
155                         nr++;
156         }
157         LASSERT(nr > 0);
158
159         OBD_ALLOC_LARGE(lovlck, offsetof(struct lov_lock, lls_sub[nr]));
160         if (lovlck == NULL)
161                 RETURN(ERR_PTR(-ENOMEM));
162
163         lovlck->lls_nr = nr;
164         for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
165                 if (likely(r0->lo_sub[i] != NULL) &&
166                     lov_stripe_intersects(loo->lo_lsm, i,
167                                           file_start, file_end, &start, &end)) {
168                         struct lov_lock_sub *lls = &lovlck->lls_sub[nr];
169                         struct cl_lock_descr *descr;
170
171                         descr = &lls->sub_lock.cll_descr;
172
173                         LASSERT(descr->cld_obj == NULL);
174                         descr->cld_obj   = lovsub2cl(r0->lo_sub[i]);
175                         descr->cld_start = cl_index(descr->cld_obj, start);
176                         descr->cld_end   = cl_index(descr->cld_obj, end);
177                         descr->cld_mode  = lock->cll_descr.cld_mode;
178                         descr->cld_gid   = lock->cll_descr.cld_gid;
179                         descr->cld_enq_flags = lock->cll_descr.cld_enq_flags;
180
181                         lls->sub_stripe = i;
182
183                         /* initialize sub lock */
184                         result = lov_sublock_init(env, lock, lls);
185                         if (result < 0)
186                                 break;
187
188                         lls->sub_initialized = 1;
189                         nr++;
190                 }
191         }
192         LASSERT(ergo(result == 0, nr == lovlck->lls_nr));
193
194         if (result != 0) {
195                 for (i = 0; i < nr; ++i) {
196                         if (!lovlck->lls_sub[i].sub_initialized)
197                                 break;
198
199                         cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
200                 }
201
202                 OBD_FREE_LARGE(lovlck,
203                                 offsetof(struct lov_lock, lls_sub[nr]));
204                 lovlck = ERR_PTR(result);
205         }
206
207         RETURN(lovlck);
208 }
209
210 static void lov_lock_fini(const struct lu_env *env,
211                           struct cl_lock_slice *slice)
212 {
213         struct lov_lock *lovlck;
214         int i;
215
216         ENTRY;
217         lovlck = cl2lov_lock(slice);
218         for (i = 0; i < lovlck->lls_nr; ++i) {
219                 LASSERT(!lovlck->lls_sub[i].sub_is_enqueued);
220                 if (lovlck->lls_sub[i].sub_initialized)
221                         cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
222         }
223         OBD_FREE_LARGE(lovlck,
224                        offsetof(struct lov_lock, lls_sub[lovlck->lls_nr]));
225         EXIT;
226 }
227
228 /**
229  * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
230  * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
231  * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
232  * state machines in the face of sub-locks sharing (by multiple top-locks),
233  * and concurrent sub-lock cancellations.
234  */
235 static int lov_lock_enqueue(const struct lu_env *env,
236                             const struct cl_lock_slice *slice,
237                             struct cl_io *io, struct cl_sync_io *anchor)
238 {
239         struct cl_lock          *lock   = slice->cls_lock;
240         struct lov_lock         *lovlck = cl2lov_lock(slice);
241         int                     i;
242         int                     rc      = 0;
243
244         ENTRY;
245
246         for (i = 0; i < lovlck->lls_nr; ++i) {
247                 struct lov_lock_sub     *lls = &lovlck->lls_sub[i];
248                 struct lov_sublock_env  *subenv;
249
250                 subenv = lov_sublock_env_get(env, lock, lls);
251                 if (IS_ERR(subenv)) {
252                         rc = PTR_ERR(subenv);
253                         break;
254                 }
255
256                 rc = cl_lock_enqueue(subenv->lse_env, subenv->lse_io,
257                                      &lls->sub_lock, anchor);
258                 lov_sublock_env_put(subenv);
259                 if (rc != 0)
260                         break;
261
262                 lls->sub_is_enqueued = 1;
263         }
264         RETURN(rc);
265 }
266
267 static void lov_lock_cancel(const struct lu_env *env,
268                             const struct cl_lock_slice *slice)
269 {
270         struct cl_lock  *lock   = slice->cls_lock;
271         struct lov_lock *lovlck = cl2lov_lock(slice);
272         int i;
273
274         ENTRY;
275
276         for (i = 0; i < lovlck->lls_nr; ++i) {
277                 struct lov_lock_sub     *lls = &lovlck->lls_sub[i];
278                 struct cl_lock          *sublock = &lls->sub_lock;
279                 struct lov_sublock_env  *subenv;
280
281                 if (!lls->sub_is_enqueued)
282                         continue;
283
284                 lls->sub_is_enqueued = 0;
285                 subenv = lov_sublock_env_get(env, lock, lls);
286                 if (!IS_ERR(subenv)) {
287                         cl_lock_cancel(subenv->lse_env, sublock);
288                         lov_sublock_env_put(subenv);
289                 } else {
290                         CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
291                                       "lov_lock_cancel fails with %ld.\n",
292                                       PTR_ERR(subenv));
293                 }
294         }
295 }
296
297 static int lov_lock_print(const struct lu_env *env, void *cookie,
298                           lu_printer_t p, const struct cl_lock_slice *slice)
299 {
300         struct lov_lock *lck = cl2lov_lock(slice);
301         int              i;
302
303         (*p)(env, cookie, "%d\n", lck->lls_nr);
304         for (i = 0; i < lck->lls_nr; ++i) {
305                 struct lov_lock_sub *sub;
306
307                 sub = &lck->lls_sub[i];
308                 (*p)(env, cookie, "    %d %x: ", i, sub->sub_is_enqueued);
309                 cl_lock_print(env, cookie, p, &sub->sub_lock);
310         }
311         return 0;
312 }
313
314 static const struct cl_lock_operations lov_lock_ops = {
315         .clo_fini      = lov_lock_fini,
316         .clo_enqueue   = lov_lock_enqueue,
317         .clo_cancel    = lov_lock_cancel,
318         .clo_print     = lov_lock_print
319 };
320
321 int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
322                         struct cl_lock *lock, const struct cl_io *io)
323 {
324         struct lov_lock *lck;
325         int result = 0;
326
327         ENTRY;
328         lck = lov_lock_sub_init(env, obj, lock);
329         if (!IS_ERR(lck))
330                 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
331         else
332                 result = PTR_ERR(lck);
333         RETURN(result);
334 }
335
336 static void lov_empty_lock_fini(const struct lu_env *env,
337                                 struct cl_lock_slice *slice)
338 {
339         struct lov_lock *lck = cl2lov_lock(slice);
340         OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
341 }
342
343 static int lov_empty_lock_print(const struct lu_env *env, void *cookie,
344                         lu_printer_t p, const struct cl_lock_slice *slice)
345 {
346         (*p)(env, cookie, "empty\n");
347         return 0;
348 }
349
350 /* XXX: more methods will be added later. */
351 static const struct cl_lock_operations lov_empty_lock_ops = {
352         .clo_fini  = lov_empty_lock_fini,
353         .clo_print = lov_empty_lock_print
354 };
355
356 int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
357                         struct cl_lock *lock, const struct cl_io *io)
358 {
359         struct lov_lock *lck;
360         int result = -ENOMEM;
361
362         ENTRY;
363         OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, GFP_NOFS);
364         if (lck != NULL) {
365                 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
366                 result = 0;
367         }
368         RETURN(result);
369 }
370
371 /** @} lov */