Whamcloud - gitweb
LU-1146 build: batch update copyright messages
[fs/lustre-release.git] / lustre / osc / osc_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  *
32  * Copyright (c) 2011, 2012, Whamcloud, Inc.
33  */
34 /*
35  * This file is part of Lustre, http://www.lustre.org/
36  * Lustre is a trademark of Sun Microsystems, Inc.
37  *
38  * Implementation of cl_lock for OSC layer.
39  *
40  *   Author: Nikita Danilov <nikita.danilov@sun.com>
41  */
42
43 #define DEBUG_SUBSYSTEM S_OSC
44
45 #ifdef __KERNEL__
46 # include <libcfs/libcfs.h>
47 #else
48 # include <liblustre.h>
49 #endif
50 /* fid_build_reg_res_name() */
51 #include <lustre_fid.h>
52
53 #include "osc_cl_internal.h"
54
55 /** \addtogroup osc 
56  *  @{ 
57  */
58
59 #define _PAGEREF_MAGIC  (-10000000)
60
61 /*****************************************************************************
62  *
63  * Type conversions.
64  *
65  */
66
67 static const struct cl_lock_operations osc_lock_ops;
68 static const struct cl_lock_operations osc_lock_lockless_ops;
69 static void osc_lock_to_lockless(const struct lu_env *env,
70                                  struct osc_lock *ols, int force);
71 static int osc_lock_has_pages(struct osc_lock *olck);
72
73 int osc_lock_is_lockless(const struct osc_lock *olck)
74 {
75         return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
76 }
77
78 /**
79  * Returns a weak pointer to the ldlm lock identified by a handle. Returned
80  * pointer cannot be dereferenced, as lock is not protected from concurrent
81  * reclaim. This function is a helper for osc_lock_invariant().
82  */
83 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
84 {
85         struct ldlm_lock *lock;
86
87         lock = ldlm_handle2lock(handle);
88         if (lock != NULL)
89                 LDLM_LOCK_PUT(lock);
90         return lock;
91 }
92
93 /**
94  * Invariant that has to be true all of the time.
95  */
96 static int osc_lock_invariant(struct osc_lock *ols)
97 {
98         struct ldlm_lock *lock        = osc_handle_ptr(&ols->ols_handle);
99         struct ldlm_lock *olock       = ols->ols_lock;
100         int               handle_used = lustre_handle_is_used(&ols->ols_handle);
101
102         return
103                 ergo(osc_lock_is_lockless(ols),
104                      ols->ols_locklessable && ols->ols_lock == NULL)  ||
105                 (ergo(olock != NULL, handle_used) &&
106                  ergo(olock != NULL,
107                       olock->l_handle.h_cookie == ols->ols_handle.cookie) &&
108                  /*
109                   * Check that ->ols_handle and ->ols_lock are consistent, but
110                   * take into account that they are set at the different time.
111                   */
112                  ergo(handle_used,
113                       ergo(lock != NULL && olock != NULL, lock == olock) &&
114                       ergo(lock == NULL, olock == NULL)) &&
115                  ergo(ols->ols_state == OLS_CANCELLED,
116                       olock == NULL && !handle_used) &&
117                  /*
118                   * DLM lock is destroyed only after we have seen cancellation
119                   * ast.
120                   */
121                  ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
122                       !olock->l_destroyed) &&
123                  ergo(ols->ols_state == OLS_GRANTED,
124                       olock != NULL &&
125                       olock->l_req_mode == olock->l_granted_mode &&
126                       ols->ols_hold));
127 }
128
129 /*****************************************************************************
130  *
131  * Lock operations.
132  *
133  */
134
135 /**
136  * Breaks a link between osc_lock and dlm_lock.
137  */
138 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
139 {
140         struct ldlm_lock *dlmlock;
141
142         cfs_spin_lock(&osc_ast_guard);
143         dlmlock = olck->ols_lock;
144         if (dlmlock == NULL) {
145                 cfs_spin_unlock(&osc_ast_guard);
146                 return;
147         }
148
149         olck->ols_lock = NULL;
150         /* wb(); --- for all who checks (ols->ols_lock != NULL) before
151          * call to osc_lock_detach() */
152         dlmlock->l_ast_data = NULL;
153         olck->ols_handle.cookie = 0ULL;
154         cfs_spin_unlock(&osc_ast_guard);
155
156         lock_res_and_lock(dlmlock);
157         if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
158                 struct cl_object *obj = olck->ols_cl.cls_obj;
159                 struct cl_attr *attr  = &osc_env_info(env)->oti_attr;
160                 __u64 old_kms;
161
162                 cl_object_attr_lock(obj);
163                 /* Must get the value under the lock to avoid possible races. */
164                 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
165                 /* Update the kms. Need to loop all granted locks.
166                  * Not a problem for the client */
167                 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
168
169                 cl_object_attr_set(env, obj, attr, CAT_KMS);
170                 cl_object_attr_unlock(obj);
171         }
172         unlock_res_and_lock(dlmlock);
173
174         /* release a reference taken in osc_lock_upcall0(). */
175         LASSERT(olck->ols_has_ref);
176         lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
177         LDLM_LOCK_RELEASE(dlmlock);
178         olck->ols_has_ref = 0;
179 }
180
181 static int osc_lock_unhold(struct osc_lock *ols)
182 {
183         int result = 0;
184
185         if (ols->ols_hold) {
186                 ols->ols_hold = 0;
187                 result = osc_cancel_base(&ols->ols_handle,
188                                          ols->ols_einfo.ei_mode);
189         }
190         return result;
191 }
192
193 static int osc_lock_unuse(const struct lu_env *env,
194                           const struct cl_lock_slice *slice)
195 {
196         struct osc_lock *ols = cl2osc_lock(slice);
197
198         LINVRNT(osc_lock_invariant(ols));
199
200         switch (ols->ols_state) {
201         case OLS_NEW:
202                 LASSERT(!ols->ols_hold);
203                 LASSERT(ols->ols_agl);
204                 return 0;
205         case OLS_UPCALL_RECEIVED:
206                 LASSERT(!ols->ols_hold);
207                 ols->ols_state = OLS_NEW;
208                 return 0;
209         case OLS_GRANTED:
210                 LASSERT(!ols->ols_glimpse);
211                 LASSERT(ols->ols_hold);
212                 /*
213                  * Move lock into OLS_RELEASED state before calling
214                  * osc_cancel_base() so that possible synchronous cancellation
215                  * (that always happens e.g., for liblustre) sees that lock is
216                  * released.
217                  */
218                 ols->ols_state = OLS_RELEASED;
219                 return osc_lock_unhold(ols);
220         default:
221                 CERROR("Impossible state: %d\n", ols->ols_state);
222                 LBUG();
223         }
224 }
225
226 static void osc_lock_fini(const struct lu_env *env,
227                           struct cl_lock_slice *slice)
228 {
229         struct osc_lock  *ols = cl2osc_lock(slice);
230
231         LINVRNT(osc_lock_invariant(ols));
232         /*
233          * ->ols_hold can still be true at this point if, for example, a
234          * thread that requested a lock was killed (and released a reference
235          * to the lock), before reply from a server was received. In this case
236          * lock is destroyed immediately after upcall.
237          */
238         osc_lock_unhold(ols);
239         LASSERT(ols->ols_lock == NULL);
240         LASSERT(cfs_atomic_read(&ols->ols_pageref) == 0 ||
241                 cfs_atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
242
243         OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
244 }
245
246 void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj,
247                         struct ldlm_res_id *resname)
248 {
249         const struct lu_fid *fid = lu_object_fid(&obj->oo_cl.co_lu);
250         if (0) {
251                 /*
252                  * In the perfect world of the future, where ost servers talk
253                  * idif-fids...
254                  */
255                 fid_build_reg_res_name(fid, resname);
256         } else {
257                 /*
258                  * In reality, where ost server expects ->lsm_object_id and
259                  * ->lsm_object_seq in rename.
260                  */
261                 osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_seq,
262                                    resname);
263         }
264 }
265
266 static void osc_lock_build_policy(const struct lu_env *env,
267                                   const struct cl_lock *lock,
268                                   ldlm_policy_data_t *policy)
269 {
270         const struct cl_lock_descr *d = &lock->cll_descr;
271
272         osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
273         policy->l_extent.gid = d->cld_gid;
274 }
275
276 static int osc_enq2ldlm_flags(__u32 enqflags)
277 {
278         int result = 0;
279
280         LASSERT((enqflags & ~CEF_MASK) == 0);
281
282         if (enqflags & CEF_NONBLOCK)
283                 result |= LDLM_FL_BLOCK_NOWAIT;
284         if (enqflags & CEF_ASYNC)
285                 result |= LDLM_FL_HAS_INTENT;
286         if (enqflags & CEF_DISCARD_DATA)
287                 result |= LDLM_AST_DISCARD_DATA;
288         return result;
289 }
290
291 /**
292  * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
293  * pointers. Initialized in osc_init().
294  */
295 cfs_spinlock_t osc_ast_guard;
296
297 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
298 {
299         struct osc_lock *olck;
300
301         lock_res_and_lock(dlm_lock);
302         cfs_spin_lock(&osc_ast_guard);
303         olck = dlm_lock->l_ast_data;
304         if (olck != NULL) {
305                 struct cl_lock *lock = olck->ols_cl.cls_lock;
306                 /*
307                  * If osc_lock holds a reference on ldlm lock, return it even
308                  * when cl_lock is in CLS_FREEING state. This way
309                  *
310                  *         osc_ast_data_get(dlmlock) == NULL
311                  *
312                  * guarantees that all osc references on dlmlock were
313                  * released. osc_dlm_blocking_ast0() relies on that.
314                  */
315                 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
316                         cl_lock_get_trust(lock);
317                         lu_ref_add_atomic(&lock->cll_reference,
318                                           "ast", cfs_current());
319                 } else
320                         olck = NULL;
321         }
322         cfs_spin_unlock(&osc_ast_guard);
323         unlock_res_and_lock(dlm_lock);
324         return olck;
325 }
326
327 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
328 {
329         struct cl_lock *lock;
330
331         lock = olck->ols_cl.cls_lock;
332         lu_ref_del(&lock->cll_reference, "ast", cfs_current());
333         cl_lock_put(env, lock);
334 }
335
336 /**
337  * Updates object attributes from a lock value block (lvb) received together
338  * with the DLM lock reply from the server. Copy of osc_update_enqueue()
339  * logic.
340  *
341  * This can be optimized to not update attributes when lock is a result of a
342  * local match.
343  *
344  * Called under lock and resource spin-locks.
345  */
346 static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
347                                 int rc)
348 {
349         struct ost_lvb    *lvb;
350         struct cl_object  *obj;
351         struct lov_oinfo  *oinfo;
352         struct cl_attr    *attr;
353         unsigned           valid;
354
355         ENTRY;
356
357         if (!(olck->ols_flags & LDLM_FL_LVB_READY))
358                 RETURN_EXIT;
359
360         lvb   = &olck->ols_lvb;
361         obj   = olck->ols_cl.cls_obj;
362         oinfo = cl2osc(obj)->oo_oinfo;
363         attr  = &osc_env_info(env)->oti_attr;
364         valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
365         cl_lvb2attr(attr, lvb);
366
367         cl_object_attr_lock(obj);
368         if (rc == 0) {
369                 struct ldlm_lock  *dlmlock;
370                 __u64 size;
371
372                 dlmlock = olck->ols_lock;
373                 LASSERT(dlmlock != NULL);
374
375                 /* re-grab LVB from a dlm lock under DLM spin-locks. */
376                 *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
377                 size = lvb->lvb_size;
378                 /* Extend KMS up to the end of this lock and no further
379                  * A lock on [x,y] means a KMS of up to y + 1 bytes! */
380                 if (size > dlmlock->l_policy_data.l_extent.end)
381                         size = dlmlock->l_policy_data.l_extent.end + 1;
382                 if (size >= oinfo->loi_kms) {
383                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
384                                    ", kms="LPU64, lvb->lvb_size, size);
385                         valid |= CAT_KMS;
386                         attr->cat_kms = size;
387                 } else {
388                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
389                                    LPU64"; leaving kms="LPU64", end="LPU64,
390                                    lvb->lvb_size, oinfo->loi_kms,
391                                    dlmlock->l_policy_data.l_extent.end);
392                 }
393                 ldlm_lock_allow_match_locked(dlmlock);
394         } else if (rc == -ENAVAIL && olck->ols_glimpse) {
395                 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
396                        " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
397         } else
398                 valid = 0;
399
400         if (valid != 0)
401                 cl_object_attr_set(env, obj, attr, valid);
402
403         cl_object_attr_unlock(obj);
404
405         EXIT;
406 }
407
408 /**
409  * Called when a lock is granted, from an upcall (when server returned a
410  * granted lock), or from completion AST, when server returned a blocked lock.
411  *
412  * Called under lock and resource spin-locks, that are released temporarily
413  * here.
414  */
415 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
416                              struct ldlm_lock *dlmlock, int rc)
417 {
418         struct ldlm_extent   *ext;
419         struct cl_lock       *lock;
420         struct cl_lock_descr *descr;
421
422         LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
423
424         ENTRY;
425         if (olck->ols_state < OLS_GRANTED) {
426                 lock  = olck->ols_cl.cls_lock;
427                 ext   = &dlmlock->l_policy_data.l_extent;
428                 descr = &osc_env_info(env)->oti_descr;
429                 descr->cld_obj = lock->cll_descr.cld_obj;
430
431                 /* XXX check that ->l_granted_mode is valid. */
432                 descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
433                 descr->cld_start = cl_index(descr->cld_obj, ext->start);
434                 descr->cld_end   = cl_index(descr->cld_obj, ext->end);
435                 descr->cld_gid   = ext->gid;
436                 /*
437                  * tell upper layers the extent of the lock that was actually
438                  * granted
439                  */
440                 olck->ols_state = OLS_GRANTED;
441                 osc_lock_lvb_update(env, olck, rc);
442
443                 /* release DLM spin-locks to allow cl_lock_{modify,signal}()
444                  * to take a semaphore on a parent lock. This is safe, because
445                  * spin-locks are needed to protect consistency of
446                  * dlmlock->l_*_mode and LVB, and we have finished processing
447                  * them. */
448                 unlock_res_and_lock(dlmlock);
449                 cl_lock_modify(env, lock, descr);
450                 cl_lock_signal(env, lock);
451                 LINVRNT(osc_lock_invariant(olck));
452                 lock_res_and_lock(dlmlock);
453         }
454         EXIT;
455 }
456
457 static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
458
459 {
460         struct ldlm_lock *dlmlock;
461
462         ENTRY;
463
464         dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
465         LASSERT(dlmlock != NULL);
466
467         lock_res_and_lock(dlmlock);
468         cfs_spin_lock(&osc_ast_guard);
469         LASSERT(dlmlock->l_ast_data == olck);
470         LASSERT(olck->ols_lock == NULL);
471         olck->ols_lock = dlmlock;
472         cfs_spin_unlock(&osc_ast_guard);
473
474         /*
475          * Lock might be not yet granted. In this case, completion ast
476          * (osc_ldlm_completion_ast()) comes later and finishes lock
477          * granting.
478          */
479         if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
480                 osc_lock_granted(env, olck, dlmlock, 0);
481         unlock_res_and_lock(dlmlock);
482
483         /*
484          * osc_enqueue_interpret() decrefs asynchronous locks, counter
485          * this.
486          */
487         ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
488         olck->ols_hold = 1;
489
490         /* lock reference taken by ldlm_handle2lock_long() is owned by
491          * osc_lock and released in osc_lock_detach() */
492         lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
493         olck->ols_has_ref = 1;
494 }
495
496 /**
497  * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
498  * received from a server, or after osc_enqueue_base() matched a local DLM
499  * lock.
500  */
501 static int osc_lock_upcall(void *cookie, int errcode)
502 {
503         struct osc_lock         *olck  = cookie;
504         struct cl_lock_slice    *slice = &olck->ols_cl;
505         struct cl_lock          *lock  = slice->cls_lock;
506         struct lu_env           *env;
507         struct cl_env_nest       nest;
508
509         ENTRY;
510         env = cl_env_nested_get(&nest);
511         if (!IS_ERR(env)) {
512                 int rc;
513
514                 cl_lock_mutex_get(env, lock);
515
516                 LASSERT(lock->cll_state >= CLS_QUEUING);
517                 if (olck->ols_state == OLS_ENQUEUED) {
518                         olck->ols_state = OLS_UPCALL_RECEIVED;
519                         rc = ldlm_error2errno(errcode);
520                 } else if (olck->ols_state == OLS_CANCELLED) {
521                         rc = -EIO;
522                 } else {
523                         CERROR("Impossible state: %d\n", olck->ols_state);
524                         LBUG();
525                 }
526                 if (rc) {
527                         struct ldlm_lock *dlmlock;
528
529                         dlmlock = ldlm_handle2lock(&olck->ols_handle);
530                         if (dlmlock != NULL) {
531                                 lock_res_and_lock(dlmlock);
532                                 cfs_spin_lock(&osc_ast_guard);
533                                 LASSERT(olck->ols_lock == NULL);
534                                 dlmlock->l_ast_data = NULL;
535                                 olck->ols_handle.cookie = 0ULL;
536                                 cfs_spin_unlock(&osc_ast_guard);
537                                 ldlm_lock_fail_match_locked(dlmlock);
538                                 unlock_res_and_lock(dlmlock);
539                                 LDLM_LOCK_PUT(dlmlock);
540                         }
541                 } else {
542                         if (olck->ols_glimpse) {
543                                 olck->ols_glimpse = 0;
544                                 olck->ols_agl = 0 ;
545                         }
546                         osc_lock_upcall0(env, olck);
547                 }
548
549                 /* Error handling, some errors are tolerable. */
550                 if (olck->ols_locklessable && rc == -EUSERS) {
551                         /* This is a tolerable error, turn this lock into
552                          * lockless lock.
553                          */
554                         osc_object_set_contended(cl2osc(slice->cls_obj));
555                         LASSERT(slice->cls_ops == &osc_lock_ops);
556
557                         /* Change this lock to ldlmlock-less lock. */
558                         osc_lock_to_lockless(env, olck, 1);
559                         olck->ols_state = OLS_GRANTED;
560                         rc = 0;
561                 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
562                         osc_lock_lvb_update(env, olck, rc);
563                         cl_lock_delete(env, lock);
564                         /* Hide the error. */
565                         rc = 0;
566                 }
567
568                 if (rc == 0) {
569                         cl_lock_signal(env, lock);
570                         /* del user for lock upcall cookie */
571                         cl_unuse_try(env, lock);
572                 } else {
573                         /* del user for lock upcall cookie */
574                         cl_lock_user_del(env, lock);
575                         cl_lock_error(env, lock, rc);
576                 }
577
578                 cl_lock_mutex_put(env, lock);
579
580                 /* release cookie reference, acquired by osc_lock_enqueue() */
581                 lu_ref_del(&lock->cll_reference, "upcall", lock);
582                 cl_lock_put(env, lock);
583
584                 cl_env_nested_put(&nest, env);
585         } else
586                 /* should never happen, similar to osc_ldlm_blocking_ast(). */
587                 LBUG();
588         RETURN(errcode);
589 }
590
591 /**
592  * Core of osc_dlm_blocking_ast() logic.
593  */
594 static void osc_lock_blocking(const struct lu_env *env,
595                               struct ldlm_lock *dlmlock,
596                               struct osc_lock *olck, int blocking)
597 {
598         struct cl_lock *lock = olck->ols_cl.cls_lock;
599
600         LASSERT(olck->ols_lock == dlmlock);
601         CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
602         LASSERT(!osc_lock_is_lockless(olck));
603
604         /*
605          * Lock might be still addref-ed here, if e.g., blocking ast
606          * is sent for a failed lock.
607          */
608         osc_lock_unhold(olck);
609
610         if (blocking && olck->ols_state < OLS_BLOCKED)
611                 /*
612                  * Move osc_lock into OLS_BLOCKED before canceling the lock,
613                  * because it recursively re-enters osc_lock_blocking(), with
614                  * the state set to OLS_CANCELLED.
615                  */
616                 olck->ols_state = OLS_BLOCKED;
617         /*
618          * cancel and destroy lock at least once no matter how blocking ast is
619          * entered (see comment above osc_ldlm_blocking_ast() for use
620          * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
621          */
622         cl_lock_cancel(env, lock);
623         cl_lock_delete(env, lock);
624 }
625
626 /**
627  * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
628  * and ldlm_lock caches.
629  */
630 static int osc_dlm_blocking_ast0(const struct lu_env *env,
631                                  struct ldlm_lock *dlmlock,
632                                  void *data, int flag)
633 {
634         struct osc_lock *olck;
635         struct cl_lock  *lock;
636         int result;
637         int cancel;
638
639         LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
640
641         cancel = 0;
642         olck = osc_ast_data_get(dlmlock);
643         if (olck != NULL) {
644                 lock = olck->ols_cl.cls_lock;
645                 cl_lock_mutex_get(env, lock);
646                 LINVRNT(osc_lock_invariant(olck));
647                 if (olck->ols_ast_wait) {
648                         /* wake up osc_lock_use() */
649                         cl_lock_signal(env, lock);
650                         olck->ols_ast_wait = 0;
651                 }
652                 /*
653                  * Lock might have been canceled while this thread was
654                  * sleeping for lock mutex, but olck is pinned in memory.
655                  */
656                 if (olck == dlmlock->l_ast_data) {
657                         /*
658                          * NOTE: DLM sends blocking AST's for failed locks
659                          *       (that are still in pre-OLS_GRANTED state)
660                          *       too, and they have to be canceled otherwise
661                          *       DLM lock is never destroyed and stuck in
662                          *       the memory.
663                          *
664                          *       Alternatively, ldlm_cli_cancel() can be
665                          *       called here directly for osc_locks with
666                          *       ols_state < OLS_GRANTED to maintain an
667                          *       invariant that ->clo_cancel() is only called
668                          *       for locks that were granted.
669                          */
670                         LASSERT(data == olck);
671                         osc_lock_blocking(env, dlmlock,
672                                           olck, flag == LDLM_CB_BLOCKING);
673                 } else
674                         cancel = 1;
675                 cl_lock_mutex_put(env, lock);
676                 osc_ast_data_put(env, olck);
677         } else
678                 /*
679                  * DLM lock exists, but there is no cl_lock attached to it.
680                  * This is a `normal' race. cl_object and its cl_lock's can be
681                  * removed by memory pressure, together with all pages.
682                  */
683                 cancel = (flag == LDLM_CB_BLOCKING);
684
685         if (cancel) {
686                 struct lustre_handle *lockh;
687
688                 lockh = &osc_env_info(env)->oti_handle;
689                 ldlm_lock2handle(dlmlock, lockh);
690                 result = ldlm_cli_cancel(lockh);
691         } else
692                 result = 0;
693         return result;
694 }
695
696 /**
697  * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
698  * some other lock, or is canceled. This function is installed as a
699  * ldlm_lock::l_blocking_ast() for client extent locks.
700  *
701  * Control flow is tricky, because ldlm uses the same call-back
702  * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
703  *
704  * \param dlmlock lock for which ast occurred.
705  *
706  * \param new description of a conflicting lock in case of blocking ast.
707  *
708  * \param data value of dlmlock->l_ast_data
709  *
710  * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
711  *             cancellation and blocking ast's.
712  *
713  * Possible use cases:
714  *
715  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
716  *       lock due to lock lru pressure, or explicit user request to purge
717  *       locks.
718  *
719  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
720  *       us that dlmlock conflicts with another lock that some client is
721  *       enqueing. Lock is canceled.
722  *
723  *           - cl_lock_cancel() is called. osc_lock_cancel() calls
724  *             ldlm_cli_cancel() that calls
725  *
726  *                  dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
727  *
728  *             recursively entering osc_ldlm_blocking_ast().
729  *
730  *     - client cancels lock voluntary (e.g., as a part of early cancellation):
731  *
732  *           cl_lock_cancel()->
733  *             osc_lock_cancel()->
734  *               ldlm_cli_cancel()->
735  *                 dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
736  *
737  */
738 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
739                                  struct ldlm_lock_desc *new, void *data,
740                                  int flag)
741 {
742         struct lu_env     *env;
743         struct cl_env_nest nest;
744         int                result;
745
746         /*
747          * This can be called in the context of outer IO, e.g.,
748          *
749          *     cl_enqueue()->...
750          *       ->osc_enqueue_base()->...
751          *         ->ldlm_prep_elc_req()->...
752          *           ->ldlm_cancel_callback()->...
753          *             ->osc_ldlm_blocking_ast()
754          *
755          * new environment has to be created to not corrupt outer context.
756          */
757         env = cl_env_nested_get(&nest);
758         if (!IS_ERR(env)) {
759                 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
760                 cl_env_nested_put(&nest, env);
761         } else {
762                 result = PTR_ERR(env);
763                 /*
764                  * XXX This should never happen, as cl_lock is
765                  * stuck. Pre-allocated environment a la vvp_inode_fini_env
766                  * should be used.
767                  */
768                 LBUG();
769         }
770         if (result != 0) {
771                 if (result == -ENODATA)
772                         result = 0;
773                 else
774                         CERROR("BAST failed: %d\n", result);
775         }
776         return result;
777 }
778
779 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
780                                    int flags, void *data)
781 {
782         struct cl_env_nest nest;
783         struct lu_env     *env;
784         struct osc_lock   *olck;
785         struct cl_lock    *lock;
786         int result;
787         int dlmrc;
788
789         /* first, do dlm part of the work */
790         dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
791         /* then, notify cl_lock */
792         env = cl_env_nested_get(&nest);
793         if (!IS_ERR(env)) {
794                 olck = osc_ast_data_get(dlmlock);
795                 if (olck != NULL) {
796                         lock = olck->ols_cl.cls_lock;
797                         cl_lock_mutex_get(env, lock);
798                         /*
799                          * ldlm_handle_cp_callback() copied LVB from request
800                          * to lock->l_lvb_data, store it in osc_lock.
801                          */
802                         LASSERT(dlmlock->l_lvb_data != NULL);
803                         lock_res_and_lock(dlmlock);
804                         olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
805                         if (olck->ols_lock == NULL) {
806                                 /*
807                                  * upcall (osc_lock_upcall()) hasn't yet been
808                                  * called. Do nothing now, upcall will bind
809                                  * olck to dlmlock and signal the waiters.
810                                  *
811                                  * This maintains an invariant that osc_lock
812                                  * and ldlm_lock are always bound when
813                                  * osc_lock is in OLS_GRANTED state.
814                                  */
815                         } else if (dlmlock->l_granted_mode ==
816                                    dlmlock->l_req_mode) {
817                                 osc_lock_granted(env, olck, dlmlock, dlmrc);
818                         }
819                         unlock_res_and_lock(dlmlock);
820
821                         if (dlmrc != 0) {
822                                 CL_LOCK_DEBUG(D_ERROR, env, lock,
823                                               "dlmlock returned %d\n", dlmrc);
824                                 cl_lock_error(env, lock, dlmrc);
825                         }
826                         cl_lock_mutex_put(env, lock);
827                         osc_ast_data_put(env, olck);
828                         result = 0;
829                 } else
830                         result = -ELDLM_NO_LOCK_DATA;
831                 cl_env_nested_put(&nest, env);
832         } else
833                 result = PTR_ERR(env);
834         return dlmrc ?: result;
835 }
836
837 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
838 {
839         struct ptlrpc_request  *req  = data;
840         struct osc_lock        *olck;
841         struct cl_lock         *lock;
842         struct cl_object       *obj;
843         struct cl_env_nest      nest;
844         struct lu_env          *env;
845         struct ost_lvb         *lvb;
846         struct req_capsule     *cap;
847         int                     result;
848
849         LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
850
851         env = cl_env_nested_get(&nest);
852         if (!IS_ERR(env)) {
853                 /*
854                  * osc_ast_data_get() has to go after environment is
855                  * allocated, because osc_ast_data() acquires a
856                  * reference to a lock, and it can only be released in
857                  * environment.
858                  */
859                 olck = osc_ast_data_get(dlmlock);
860                 if (olck != NULL) {
861                         lock = olck->ols_cl.cls_lock;
862                         cl_lock_mutex_get(env, lock);
863                         cap = &req->rq_pill;
864                         req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
865                         req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
866                                              sizeof *lvb);
867                         result = req_capsule_server_pack(cap);
868                         if (result == 0) {
869                                 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
870                                 obj = lock->cll_descr.cld_obj;
871                                 result = cl_object_glimpse(env, obj, lvb);
872                         }
873                         cl_lock_mutex_put(env, lock);
874                         osc_ast_data_put(env, olck);
875                 } else {
876                         /*
877                          * These errors are normal races, so we don't want to
878                          * fill the console with messages by calling
879                          * ptlrpc_error()
880                          */
881                         lustre_pack_reply(req, 1, NULL, NULL);
882                         result = -ELDLM_NO_LOCK_DATA;
883                 }
884                 cl_env_nested_put(&nest, env);
885         } else
886                 result = PTR_ERR(env);
887         req->rq_status = result;
888         return result;
889 }
890
891 static unsigned long osc_lock_weigh(const struct lu_env *env,
892                                     const struct cl_lock_slice *slice)
893 {
894         /*
895          * don't need to grab coh_page_guard since we don't care the exact #
896          * of pages..
897          */
898         return cl_object_header(slice->cls_obj)->coh_pages;
899 }
900
901 /**
902  * Get the weight of dlm lock for early cancellation.
903  *
904  * XXX: it should return the pages covered by this \a dlmlock.
905  */
906 static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
907 {
908         struct cl_env_nest       nest;
909         struct lu_env           *env;
910         struct osc_lock         *lock;
911         struct cl_lock          *cll;
912         unsigned long            weight;
913         ENTRY;
914
915         cfs_might_sleep();
916         /*
917          * osc_ldlm_weigh_ast has a complex context since it might be called
918          * because of lock canceling, or from user's input. We have to make
919          * a new environment for it. Probably it is implementation safe to use
920          * the upper context because cl_lock_put don't modify environment
921          * variables. But in case of ..
922          */
923         env = cl_env_nested_get(&nest);
924         if (IS_ERR(env))
925                 /* Mostly because lack of memory, tend to eliminate this lock*/
926                 RETURN(0);
927
928         LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
929         lock = osc_ast_data_get(dlmlock);
930         if (lock == NULL) {
931                 /* cl_lock was destroyed because of memory pressure.
932                  * It is much reasonable to assign this type of lock
933                  * a lower cost.
934                  */
935                 GOTO(out, weight = 0);
936         }
937
938         cll = lock->ols_cl.cls_lock;
939         cl_lock_mutex_get(env, cll);
940         weight = cl_lock_weigh(env, cll);
941         cl_lock_mutex_put(env, cll);
942         osc_ast_data_put(env, lock);
943         EXIT;
944
945 out:
946         cl_env_nested_put(&nest, env);
947         return weight;
948 }
949
950 static void osc_lock_build_einfo(const struct lu_env *env,
951                                  const struct cl_lock *clock,
952                                  struct osc_lock *lock,
953                                  struct ldlm_enqueue_info *einfo)
954 {
955         enum cl_lock_mode mode;
956
957         mode = clock->cll_descr.cld_mode;
958         if (mode == CLM_PHANTOM)
959                 /*
960                  * For now, enqueue all glimpse locks in read mode. In the
961                  * future, client might choose to enqueue LCK_PW lock for
962                  * glimpse on a file opened for write.
963                  */
964                 mode = CLM_READ;
965
966         einfo->ei_type   = LDLM_EXTENT;
967         einfo->ei_mode   = osc_cl_lock2ldlm(mode);
968         einfo->ei_cb_bl  = osc_ldlm_blocking_ast;
969         einfo->ei_cb_cp  = osc_ldlm_completion_ast;
970         einfo->ei_cb_gl  = osc_ldlm_glimpse_ast;
971         einfo->ei_cb_wg  = osc_ldlm_weigh_ast;
972         einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
973 }
974
975 /**
976  * Determine if the lock should be converted into a lockless lock.
977  *
978  * Steps to check:
979  * - if the lock has an explicite requirment for a non-lockless lock;
980  * - if the io lock request type ci_lockreq;
981  * - send the enqueue rpc to ost to make the further decision;
982  * - special treat to truncate lockless lock
983  *
984  *  Additional policy can be implemented here, e.g., never do lockless-io
985  *  for large extents.
986  */
987 static void osc_lock_to_lockless(const struct lu_env *env,
988                                  struct osc_lock *ols, int force)
989 {
990         struct cl_lock_slice *slice = &ols->ols_cl;
991         struct cl_lock *lock        = slice->cls_lock;
992
993         LASSERT(ols->ols_state == OLS_NEW ||
994                 ols->ols_state == OLS_UPCALL_RECEIVED);
995
996         if (force) {
997                 ols->ols_locklessable = 1;
998                 LASSERT(cl_lock_is_mutexed(lock));
999                 slice->cls_ops = &osc_lock_lockless_ops;
1000         } else {
1001                 struct osc_io *oio     = osc_env_io(env);
1002                 struct cl_io  *io      = oio->oi_cl.cis_io;
1003                 struct cl_object *obj  = slice->cls_obj;
1004                 struct osc_object *oob = cl2osc(obj);
1005                 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
1006                 struct obd_connect_data *ocd;
1007
1008                 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
1009                         io->ci_lockreq == CILR_MAYBE ||
1010                         io->ci_lockreq == CILR_NEVER);
1011
1012                 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
1013                 ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
1014                                 (io->ci_lockreq == CILR_MAYBE) &&
1015                                 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
1016                 if (io->ci_lockreq == CILR_NEVER ||
1017                         /* lockless IO */
1018                     (ols->ols_locklessable && osc_object_is_contended(oob)) ||
1019                         /* lockless truncate */
1020                     (cl_io_is_trunc(io) &&
1021                      (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
1022                       osd->od_lockless_truncate)) {
1023                         ols->ols_locklessable = 1;
1024                         slice->cls_ops = &osc_lock_lockless_ops;
1025                 }
1026         }
1027         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1028 }
1029
1030 static int osc_lock_compatible(const struct osc_lock *qing,
1031                                const struct osc_lock *qed)
1032 {
1033         enum cl_lock_mode qing_mode;
1034         enum cl_lock_mode qed_mode;
1035
1036         qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode;
1037         if (qed->ols_glimpse &&
1038             (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ))
1039                 return 1;
1040
1041         qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode;
1042         return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
1043 }
1044
1045 /**
1046  * Cancel all conflicting locks and wait for them to be destroyed.
1047  *
1048  * This function is used for two purposes:
1049  *
1050  *     - early cancel all conflicting locks before starting IO, and
1051  *
1052  *     - guarantee that pages added to the page cache by lockless IO are never
1053  *       covered by locks other than lockless IO lock, and, hence, are not
1054  *       visible to other threads.
1055  */
1056 static int osc_lock_enqueue_wait(const struct lu_env *env,
1057                                  const struct osc_lock *olck)
1058 {
1059         struct cl_lock          *lock    = olck->ols_cl.cls_lock;
1060         struct cl_lock_descr    *descr   = &lock->cll_descr;
1061         struct cl_object_header *hdr     = cl_object_header(descr->cld_obj);
1062         struct cl_lock          *scan;
1063         struct cl_lock          *conflict= NULL;
1064         int lockless                     = osc_lock_is_lockless(olck);
1065         int rc                           = 0;
1066         ENTRY;
1067
1068         LASSERT(cl_lock_is_mutexed(lock));
1069
1070         /* make it enqueue anyway for glimpse lock, because we actually
1071          * don't need to cancel any conflicting locks. */
1072         if (olck->ols_glimpse)
1073                 return 0;
1074
1075         cfs_spin_lock(&hdr->coh_lock_guard);
1076         cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
1077                 struct cl_lock_descr *cld = &scan->cll_descr;
1078                 const struct osc_lock *scan_ols;
1079
1080                 if (scan == lock)
1081                         break;
1082
1083                 if (scan->cll_state < CLS_QUEUING ||
1084                     scan->cll_state == CLS_FREEING ||
1085                     cld->cld_start > descr->cld_end ||
1086                     cld->cld_end < descr->cld_start)
1087                         continue;
1088
1089                 /* overlapped and living locks. */
1090
1091                 /* We're not supposed to give up group lock. */
1092                 if (scan->cll_descr.cld_mode == CLM_GROUP) {
1093                         LASSERT(descr->cld_mode != CLM_GROUP ||
1094                                 descr->cld_gid != scan->cll_descr.cld_gid);
1095                         continue;
1096                 }
1097
1098                 scan_ols = osc_lock_at(scan);
1099
1100                 /* We need to cancel the compatible locks if we're enqueuing
1101                  * a lockless lock, for example:
1102                  * imagine that client has PR lock on [0, 1000], and thread T0
1103                  * is doing lockless IO in [500, 1500] region. Concurrent
1104                  * thread T1 can see lockless data in [500, 1000], which is
1105                  * wrong, because these data are possibly stale. */
1106                 if (!lockless && osc_lock_compatible(olck, scan_ols))
1107                         continue;
1108
1109                 /* Now @scan is conflicting with @lock, this means current
1110                  * thread have to sleep for @scan being destroyed. */
1111                 if (scan_ols->ols_owner == osc_env_io(env)) {
1112                         CERROR("DEADLOCK POSSIBLE!\n");
1113                         CL_LOCK_DEBUG(D_ERROR, env, scan, "queued.\n");
1114                         CL_LOCK_DEBUG(D_ERROR, env, lock, "queuing.\n");
1115                         libcfs_debug_dumpstack(NULL);
1116                 }
1117                 cl_lock_get_trust(scan);
1118                 conflict = scan;
1119                 break;
1120         }
1121         cfs_spin_unlock(&hdr->coh_lock_guard);
1122
1123         if (conflict) {
1124                 if (lock->cll_descr.cld_mode == CLM_GROUP) {
1125                         /* we want a group lock but a previous lock request
1126                          * conflicts, we do not wait but return 0 so the
1127                          * request is send to the server
1128                          */
1129                         CDEBUG(D_DLMTRACE, "group lock %p is conflicted "
1130                                            "with %p, no wait, send to server\n",
1131                                lock, conflict);
1132                         cl_lock_put(env, conflict);
1133                         rc = 0;
1134                 } else {
1135                         CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, "
1136                                            "will wait\n",
1137                                lock, conflict);
1138                         LASSERT(lock->cll_conflict == NULL);
1139                         lu_ref_add(&conflict->cll_reference, "cancel-wait",
1140                                    lock);
1141                         lock->cll_conflict = conflict;
1142                         rc = CLO_WAIT;
1143                 }
1144         }
1145         RETURN(rc);
1146 }
1147
1148 /**
1149  * Implementation of cl_lock_operations::clo_enqueue() method for osc
1150  * layer. This initiates ldlm enqueue:
1151  *
1152  *     - cancels conflicting locks early (osc_lock_enqueue_wait());
1153  *
1154  *     - calls osc_enqueue_base() to do actual enqueue.
1155  *
1156  * osc_enqueue_base() is supplied with an upcall function that is executed
1157  * when lock is received either after a local cached ldlm lock is matched, or
1158  * when a reply from the server is received.
1159  *
1160  * This function does not wait for the network communication to complete.
1161  */
1162 static int osc_lock_enqueue(const struct lu_env *env,
1163                             const struct cl_lock_slice *slice,
1164                             struct cl_io *unused, __u32 enqflags)
1165 {
1166         struct osc_lock          *ols     = cl2osc_lock(slice);
1167         struct cl_lock           *lock    = ols->ols_cl.cls_lock;
1168         int result;
1169         ENTRY;
1170
1171         LASSERT(cl_lock_is_mutexed(lock));
1172         LASSERTF(ols->ols_state == OLS_NEW,
1173                  "Impossible state: %d\n", ols->ols_state);
1174
1175         ols->ols_flags = osc_enq2ldlm_flags(enqflags);
1176         if (enqflags & CEF_AGL) {
1177                 ols->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
1178                 ols->ols_agl = 1;
1179         }
1180         if (ols->ols_flags & LDLM_FL_HAS_INTENT)
1181                 ols->ols_glimpse = 1;
1182         if (!osc_lock_is_lockless(ols) && !(enqflags & CEF_MUST))
1183                 /* try to convert this lock to a lockless lock */
1184                 osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
1185
1186         result = osc_lock_enqueue_wait(env, ols);
1187         if (result == 0) {
1188                 if (!osc_lock_is_lockless(ols)) {
1189                         struct osc_object        *obj = cl2osc(slice->cls_obj);
1190                         struct osc_thread_info   *info = osc_env_info(env);
1191                         struct ldlm_res_id       *resname = &info->oti_resname;
1192                         ldlm_policy_data_t       *policy = &info->oti_policy;
1193                         struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
1194
1195                         if (ols->ols_locklessable)
1196                                 ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1197
1198                         /* a reference for lock, passed as an upcall cookie */
1199                         cl_lock_get(lock);
1200                         lu_ref_add(&lock->cll_reference, "upcall", lock);
1201                         /* a user for lock also */
1202                         cl_lock_user_add(env, lock);
1203                         ols->ols_state = OLS_ENQUEUED;
1204
1205                         /*
1206                          * XXX: this is possible blocking point as
1207                          * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
1208                          * LDLM_CP_CALLBACK.
1209                          */
1210                         osc_lock_build_res(env, obj, resname);
1211                         osc_lock_build_policy(env, lock, policy);
1212                         result = osc_enqueue_base(osc_export(obj), resname,
1213                                           &ols->ols_flags, policy,
1214                                           &ols->ols_lvb,
1215                                           obj->oo_oinfo->loi_kms_valid,
1216                                           osc_lock_upcall,
1217                                           ols, einfo, &ols->ols_handle,
1218                                           PTLRPCD_SET, 1, ols->ols_agl);
1219                         if (result != 0) {
1220                                 cl_lock_user_del(env, lock);
1221                                 lu_ref_del(&lock->cll_reference,
1222                                            "upcall", lock);
1223                                 cl_lock_put(env, lock);
1224                                 if (unlikely(result == -ECANCELED)) {
1225                                         ols->ols_state = OLS_NEW;
1226                                         result = 0;
1227                                 }
1228                         }
1229                 } else {
1230                         ols->ols_state = OLS_GRANTED;
1231                         ols->ols_owner = osc_env_io(env);
1232                 }
1233         }
1234         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1235         RETURN(result);
1236 }
1237
1238 static int osc_lock_wait(const struct lu_env *env,
1239                          const struct cl_lock_slice *slice)
1240 {
1241         struct osc_lock *olck = cl2osc_lock(slice);
1242         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1243
1244         LINVRNT(osc_lock_invariant(olck));
1245
1246         if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED) {
1247                 if (olck->ols_flags & LDLM_FL_LVB_READY) {
1248                         return 0;
1249                 } else if (olck->ols_agl) {
1250                         olck->ols_state = OLS_NEW;
1251                 } else {
1252                         LASSERT(lock->cll_error);
1253                         return lock->cll_error;
1254                 }
1255         }
1256
1257         if (olck->ols_state == OLS_NEW) {
1258                 if (lock->cll_descr.cld_enq_flags & CEF_NO_REENQUEUE) {
1259                         return -ENAVAIL;
1260                 } else {
1261                         int rc;
1262
1263                         LASSERT(olck->ols_agl);
1264
1265                         rc = osc_lock_enqueue(env, slice, NULL, CEF_ASYNC |
1266                                                                 CEF_MUST);
1267                         if (rc != 0)
1268                                 return rc;
1269                         else
1270                                 return CLO_REENQUEUED;
1271                 }
1272         }
1273
1274         LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
1275                      lock->cll_error == 0, olck->ols_lock != NULL));
1276
1277         return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
1278 }
1279
1280 /**
1281  * An implementation of cl_lock_operations::clo_use() method that pins cached
1282  * lock.
1283  */
1284 static int osc_lock_use(const struct lu_env *env,
1285                         const struct cl_lock_slice *slice)
1286 {
1287         struct osc_lock *olck = cl2osc_lock(slice);
1288         int rc;
1289
1290         LASSERT(!olck->ols_hold);
1291
1292         /*
1293          * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
1294          * flag is not set. This protects us from a concurrent blocking ast.
1295          */
1296         rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1297         if (rc == 0) {
1298                 olck->ols_hold = 1;
1299                 olck->ols_state = OLS_GRANTED;
1300         } else {
1301                 struct cl_lock *lock;
1302
1303                 /*
1304                  * Lock is being cancelled somewhere within
1305                  * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
1306                  * set, but osc_ldlm_blocking_ast() hasn't yet acquired
1307                  * cl_lock mutex.
1308                  */
1309                 lock = slice->cls_lock;
1310                 LASSERT(lock->cll_state == CLS_INTRANSIT);
1311                 LASSERT(lock->cll_users > 0);
1312                 /* set a flag for osc_dlm_blocking_ast0() to signal the
1313                  * lock.*/
1314                 olck->ols_ast_wait = 1;
1315                 rc = CLO_WAIT;
1316         }
1317         return rc;
1318 }
1319
1320 static int osc_lock_flush(struct osc_lock *ols, int discard)
1321 {
1322         struct cl_lock       *lock  = ols->ols_cl.cls_lock;
1323         struct cl_env_nest    nest;
1324         struct lu_env        *env;
1325         int result = 0;
1326
1327         env = cl_env_nested_get(&nest);
1328         if (!IS_ERR(env)) {
1329                 result = cl_lock_page_out(env, lock, discard);
1330                 cl_env_nested_put(&nest, env);
1331         } else
1332                 result = PTR_ERR(env);
1333         if (result == 0) {
1334                 ols->ols_flush = 1;
1335                 LINVRNT(!osc_lock_has_pages(ols));
1336         }
1337         return result;
1338 }
1339
1340 /**
1341  * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1342  * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1343  * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1344  * with some other lock some where in the cluster. This function does the
1345  * following:
1346  *
1347  *     - invalidates all pages protected by this lock (after sending dirty
1348  *       ones to the server, as necessary);
1349  *
1350  *     - decref's underlying ldlm lock;
1351  *
1352  *     - cancels ldlm lock (ldlm_cli_cancel()).
1353  */
1354 static void osc_lock_cancel(const struct lu_env *env,
1355                             const struct cl_lock_slice *slice)
1356 {
1357         struct cl_lock   *lock    = slice->cls_lock;
1358         struct osc_lock  *olck    = cl2osc_lock(slice);
1359         struct ldlm_lock *dlmlock = olck->ols_lock;
1360         int               result  = 0;
1361         int               discard;
1362
1363         LASSERT(cl_lock_is_mutexed(lock));
1364         LINVRNT(osc_lock_invariant(olck));
1365
1366         if (dlmlock != NULL) {
1367                 int do_cancel;
1368
1369                 discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA);
1370                 result = osc_lock_flush(olck, discard);
1371                 osc_lock_unhold(olck);
1372
1373                 lock_res_and_lock(dlmlock);
1374                 /* Now that we're the only user of dlm read/write reference,
1375                  * mostly the ->l_readers + ->l_writers should be zero.
1376                  * However, there is a corner case.
1377                  * See bug 18829 for details.*/
1378                 do_cancel = (dlmlock->l_readers == 0 &&
1379                              dlmlock->l_writers == 0);
1380                 dlmlock->l_flags |= LDLM_FL_CBPENDING;
1381                 unlock_res_and_lock(dlmlock);
1382                 if (do_cancel)
1383                         result = ldlm_cli_cancel(&olck->ols_handle);
1384                 if (result < 0)
1385                         CL_LOCK_DEBUG(D_ERROR, env, lock,
1386                                       "lock %p cancel failure with error(%d)\n",
1387                                       lock, result);
1388         }
1389         olck->ols_state = OLS_CANCELLED;
1390         olck->ols_flags &= ~LDLM_FL_LVB_READY;
1391         osc_lock_detach(env, olck);
1392 }
1393
1394 #ifdef INVARIANT_CHECK
1395 static int check_cb(const struct lu_env *env, struct cl_io *io,
1396                     struct cl_page *page, void *cbdata)
1397 {
1398         struct cl_lock *lock = cbdata;
1399
1400         if (lock->cll_descr.cld_mode == CLM_READ) {
1401                 struct cl_lock *tmp;
1402                 tmp = cl_lock_at_page(env, lock->cll_descr.cld_obj,
1403                                      page, lock, 1, 0);
1404                 if (tmp != NULL) {
1405                         cl_lock_put(env, tmp);
1406                         return CLP_GANG_OKAY;
1407                 }
1408         }
1409
1410         CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
1411         CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
1412         return CLP_GANG_ABORT;
1413 }
1414
1415 /**
1416  * Returns true iff there are pages under \a olck not protected by other
1417  * locks.
1418  */
1419 static int osc_lock_has_pages(struct osc_lock *olck)
1420 {
1421         struct cl_lock       *lock;
1422         struct cl_lock_descr *descr;
1423         struct cl_object     *obj;
1424         struct osc_object    *oob;
1425         struct cl_env_nest    nest;
1426         struct cl_io         *io;
1427         struct lu_env        *env;
1428         int                   result;
1429
1430         env = cl_env_nested_get(&nest);
1431         if (IS_ERR(env))
1432                 return 0;
1433
1434         obj   = olck->ols_cl.cls_obj;
1435         oob   = cl2osc(obj);
1436         io    = &oob->oo_debug_io;
1437         lock  = olck->ols_cl.cls_lock;
1438         descr = &lock->cll_descr;
1439
1440         cfs_mutex_lock(&oob->oo_debug_mutex);
1441
1442         io->ci_obj = cl_object_top(obj);
1443         cl_io_init(env, io, CIT_MISC, io->ci_obj);
1444         do {
1445                 result = cl_page_gang_lookup(env, obj, io,
1446                                              descr->cld_start, descr->cld_end,
1447                                              check_cb, (void *)lock);
1448                 if (result == CLP_GANG_ABORT)
1449                         break;
1450                 if (result == CLP_GANG_RESCHED)
1451                         cfs_cond_resched();
1452         } while (result != CLP_GANG_OKAY);
1453         cl_io_fini(env, io);
1454         cfs_mutex_unlock(&oob->oo_debug_mutex);
1455         cl_env_nested_put(&nest, env);
1456
1457         return (result == CLP_GANG_ABORT);
1458 }
1459 #else
1460 static int osc_lock_has_pages(struct osc_lock *olck)
1461 {
1462         return 0;
1463 }
1464 #endif /* INVARIANT_CHECK */
1465
1466 static void osc_lock_delete(const struct lu_env *env,
1467                             const struct cl_lock_slice *slice)
1468 {
1469         struct osc_lock *olck;
1470
1471         olck = cl2osc_lock(slice);
1472         if (olck->ols_glimpse) {
1473                 LASSERT(!olck->ols_hold);
1474                 LASSERT(!olck->ols_lock);
1475                 return;
1476         }
1477
1478         LINVRNT(osc_lock_invariant(olck));
1479         LINVRNT(!osc_lock_has_pages(olck));
1480
1481         osc_lock_unhold(olck);
1482         osc_lock_detach(env, olck);
1483 }
1484
1485 /**
1486  * Implements cl_lock_operations::clo_state() method for osc layer.
1487  *
1488  * Maintains osc_lock::ols_owner field.
1489  *
1490  * This assumes that lock always enters CLS_HELD (from some other state) in
1491  * the same IO context as one that requested the lock. This should not be a
1492  * problem, because context is by definition shared by all activity pertaining
1493  * to the same high-level IO.
1494  */
1495 static void osc_lock_state(const struct lu_env *env,
1496                            const struct cl_lock_slice *slice,
1497                            enum cl_lock_state state)
1498 {
1499         struct osc_lock *lock = cl2osc_lock(slice);
1500
1501         /*
1502          * XXX multiple io contexts can use the lock at the same time.
1503          */
1504         LINVRNT(osc_lock_invariant(lock));
1505         if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1506                 struct osc_io *oio = osc_env_io(env);
1507
1508                 LASSERT(lock->ols_owner == NULL);
1509                 lock->ols_owner = oio;
1510         } else if (state != CLS_HELD)
1511                 lock->ols_owner = NULL;
1512 }
1513
1514 static int osc_lock_print(const struct lu_env *env, void *cookie,
1515                           lu_printer_t p, const struct cl_lock_slice *slice)
1516 {
1517         struct osc_lock *lock = cl2osc_lock(slice);
1518
1519         /*
1520          * XXX print ldlm lock and einfo properly.
1521          */
1522         (*p)(env, cookie, "%p %08x "LPX64" %d %p ",
1523              lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
1524              lock->ols_state, lock->ols_owner);
1525         osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1526         return 0;
1527 }
1528
1529 static int osc_lock_fits_into(const struct lu_env *env,
1530                               const struct cl_lock_slice *slice,
1531                               const struct cl_lock_descr *need,
1532                               const struct cl_io *io)
1533 {
1534         struct osc_lock *ols = cl2osc_lock(slice);
1535
1536         if (need->cld_enq_flags & CEF_NEVER)
1537                 return 0;
1538
1539         if (need->cld_mode == CLM_PHANTOM) {
1540                 if (ols->ols_agl)
1541                         return !(ols->ols_state > OLS_RELEASED);
1542
1543                 /*
1544                  * Note: the QUEUED lock can't be matched here, otherwise
1545                  * it might cause the deadlocks.
1546                  * In read_process,
1547                  * P1: enqueued read lock, create sublock1
1548                  * P2: enqueued write lock, create sublock2(conflicted
1549                  *     with sublock1).
1550                  * P1: Grant read lock.
1551                  * P1: enqueued glimpse lock(with holding sublock1_read),
1552                  *     matched with sublock2, waiting sublock2 to be granted.
1553                  *     But sublock2 can not be granted, because P1
1554                  *     will not release sublock1. Bang!
1555                  */
1556                 if (ols->ols_state < OLS_GRANTED ||
1557                     ols->ols_state > OLS_RELEASED)
1558                         return 0;
1559         } else if (need->cld_enq_flags & CEF_MUST) {
1560                 /*
1561                  * If the lock hasn't ever enqueued, it can't be matched
1562                  * because enqueue process brings in many information
1563                  * which can be used to determine things such as lockless,
1564                  * CEF_MUST, etc.
1565                  */
1566                 if (ols->ols_state < OLS_UPCALL_RECEIVED &&
1567                     ols->ols_locklessable)
1568                         return 0;
1569         }
1570         return 1;
1571 }
1572
1573 static const struct cl_lock_operations osc_lock_ops = {
1574         .clo_fini    = osc_lock_fini,
1575         .clo_enqueue = osc_lock_enqueue,
1576         .clo_wait    = osc_lock_wait,
1577         .clo_unuse   = osc_lock_unuse,
1578         .clo_use     = osc_lock_use,
1579         .clo_delete  = osc_lock_delete,
1580         .clo_state   = osc_lock_state,
1581         .clo_cancel  = osc_lock_cancel,
1582         .clo_weigh   = osc_lock_weigh,
1583         .clo_print   = osc_lock_print,
1584         .clo_fits_into = osc_lock_fits_into,
1585 };
1586
1587 static int osc_lock_lockless_unuse(const struct lu_env *env,
1588                                    const struct cl_lock_slice *slice)
1589 {
1590         struct osc_lock *ols = cl2osc_lock(slice);
1591         struct cl_lock *lock = slice->cls_lock;
1592
1593         LASSERT(ols->ols_state == OLS_GRANTED);
1594         LINVRNT(osc_lock_invariant(ols));
1595
1596         cl_lock_cancel(env, lock);
1597         cl_lock_delete(env, lock);
1598         return 0;
1599 }
1600
1601 static void osc_lock_lockless_cancel(const struct lu_env *env,
1602                                      const struct cl_lock_slice *slice)
1603 {
1604         struct osc_lock   *ols  = cl2osc_lock(slice);
1605         int result;
1606
1607         result = osc_lock_flush(ols, 0);
1608         if (result)
1609                 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1610                        ols, result);
1611         ols->ols_state = OLS_CANCELLED;
1612 }
1613
1614 static int osc_lock_lockless_wait(const struct lu_env *env,
1615                                   const struct cl_lock_slice *slice)
1616 {
1617         struct osc_lock *olck = cl2osc_lock(slice);
1618         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1619
1620         LINVRNT(osc_lock_invariant(olck));
1621         LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1622
1623         return lock->cll_error;
1624 }
1625
1626 static void osc_lock_lockless_state(const struct lu_env *env,
1627                                     const struct cl_lock_slice *slice,
1628                                     enum cl_lock_state state)
1629 {
1630         struct osc_lock *lock = cl2osc_lock(slice);
1631
1632         LINVRNT(osc_lock_invariant(lock));
1633         if (state == CLS_HELD) {
1634                 struct osc_io *oio  = osc_env_io(env);
1635
1636                 LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio));
1637                 lock->ols_owner = oio;
1638
1639                 /* set the io to be lockless if this lock is for io's
1640                  * host object */
1641                 if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
1642                         oio->oi_lockless = 1;
1643         }
1644 }
1645
1646 static int osc_lock_lockless_fits_into(const struct lu_env *env,
1647                                        const struct cl_lock_slice *slice,
1648                                        const struct cl_lock_descr *need,
1649                                        const struct cl_io *io)
1650 {
1651         struct osc_lock *lock = cl2osc_lock(slice);
1652
1653         if (!(need->cld_enq_flags & CEF_NEVER))
1654                 return 0;
1655
1656         /* lockless lock should only be used by its owning io. b22147 */
1657         return (lock->ols_owner == osc_env_io(env));
1658 }
1659
1660 static const struct cl_lock_operations osc_lock_lockless_ops = {
1661         .clo_fini      = osc_lock_fini,
1662         .clo_enqueue   = osc_lock_enqueue,
1663         .clo_wait      = osc_lock_lockless_wait,
1664         .clo_unuse     = osc_lock_lockless_unuse,
1665         .clo_state     = osc_lock_lockless_state,
1666         .clo_fits_into = osc_lock_lockless_fits_into,
1667         .clo_cancel    = osc_lock_lockless_cancel,
1668         .clo_print     = osc_lock_print
1669 };
1670
1671 int osc_lock_init(const struct lu_env *env,
1672                   struct cl_object *obj, struct cl_lock *lock,
1673                   const struct cl_io *unused)
1674 {
1675         struct osc_lock *clk;
1676         int result;
1677
1678         OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO);
1679         if (clk != NULL) {
1680                 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
1681                 cfs_atomic_set(&clk->ols_pageref, 0);
1682                 clk->ols_state = OLS_NEW;
1683                 cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
1684                 result = 0;
1685         } else
1686                 result = -ENOMEM;
1687         return result;
1688 }
1689
1690 int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
1691 {
1692         struct osc_lock *olock;
1693         int              rc = 0;
1694
1695         cfs_spin_lock(&osc_ast_guard);
1696         olock = dlm->l_ast_data;
1697         /*
1698          * there's a very rare race with osc_page_addref_lock(), but that
1699          * doesn't matter because in the worst case we don't cancel a lock
1700          * which we actually can, that's no harm.
1701          */
1702         if (olock != NULL &&
1703             cfs_atomic_add_return(_PAGEREF_MAGIC,
1704                                   &olock->ols_pageref) != _PAGEREF_MAGIC) {
1705                 cfs_atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
1706                 rc = 1;
1707         }
1708         cfs_spin_unlock(&osc_ast_guard);
1709         return rc;
1710 }
1711
1712 /** @} osc */