Whamcloud - gitweb
7791e89377a228c572fce2c74b83caafaae23bc2
[fs/lustre-release.git] / lustre / osc / osc_lock.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for OSC layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_OSC
42
43 #ifdef __KERNEL__
44 # include <libcfs/libcfs.h>
45 #else
46 # include <liblustre.h>
47 #endif
48 /* fid_build_reg_res_name() */
49 #include <lustre_fid.h>
50
51 #include "osc_cl_internal.h"
52
53 /** \addtogroup osc 
54  *  @{ 
55  */
56
57 #define _PAGEREF_MAGIC  (-10000000)
58
59 /*****************************************************************************
60  *
61  * Type conversions.
62  *
63  */
64
65 static const struct cl_lock_operations osc_lock_ops;
66 static const struct cl_lock_operations osc_lock_lockless_ops;
67 static void osc_lock_to_lockless(const struct lu_env *env,
68                                  struct osc_lock *ols, int force);
69 static int osc_lock_has_pages(struct osc_lock *olck);
70
71 int osc_lock_is_lockless(const struct osc_lock *olck)
72 {
73         return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
74 }
75
76 /**
77  * Returns a weak pointer to the ldlm lock identified by a handle. Returned
78  * pointer cannot be dereferenced, as lock is not protected from concurrent
79  * reclaim. This function is a helper for osc_lock_invariant().
80  */
81 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
82 {
83         struct ldlm_lock *lock;
84
85         lock = ldlm_handle2lock(handle);
86         if (lock != NULL)
87                 LDLM_LOCK_PUT(lock);
88         return lock;
89 }
90
91 /**
92  * Invariant that has to be true all of the time.
93  */
94 static int osc_lock_invariant(struct osc_lock *ols)
95 {
96         struct ldlm_lock *lock        = osc_handle_ptr(&ols->ols_handle);
97         struct ldlm_lock *olock       = ols->ols_lock;
98         int               handle_used = lustre_handle_is_used(&ols->ols_handle);
99
100         if (ergo(osc_lock_is_lockless(ols),
101                  ols->ols_locklessable && ols->ols_lock == NULL))
102                 return 1;
103
104         /*
105          * If all the following "ergo"s are true, return 1, otherwise 0
106          */
107         if (! ergo(olock != NULL, handle_used))
108                 return 0;
109
110         if (! ergo(olock != NULL,
111                    olock->l_handle.h_cookie == ols->ols_handle.cookie))
112                 return 0;
113
114         if (! ergo(handle_used,
115                    ergo(lock != NULL && olock != NULL, lock == olock) &&
116                    ergo(lock == NULL, olock == NULL)))
117                 return 0;
118         /*
119          * Check that ->ols_handle and ->ols_lock are consistent, but
120          * take into account that they are set at the different time.
121          */
122         if (! ergo(ols->ols_state == OLS_CANCELLED,
123                    olock == NULL && !handle_used))
124                 return 0;
125         /*
126          * DLM lock is destroyed only after we have seen cancellation
127          * ast.
128          */
129         if (! ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
130                    ((olock->l_flags & LDLM_FL_DESTROYED) == 0)))
131                 return 0;
132
133         if (! ergo(ols->ols_state == OLS_GRANTED,
134                    olock != NULL &&
135                    olock->l_req_mode == olock->l_granted_mode &&
136                    ols->ols_hold))
137                 return 0;
138         return 1;
139 }
140
141 /*****************************************************************************
142  *
143  * Lock operations.
144  *
145  */
146
147 /**
148  * Breaks a link between osc_lock and dlm_lock.
149  */
150 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
151 {
152         struct ldlm_lock *dlmlock;
153
154         spin_lock(&osc_ast_guard);
155         dlmlock = olck->ols_lock;
156         if (dlmlock == NULL) {
157                 spin_unlock(&osc_ast_guard);
158                 return;
159         }
160
161         olck->ols_lock = NULL;
162         /* wb(); --- for all who checks (ols->ols_lock != NULL) before
163          * call to osc_lock_detach() */
164         dlmlock->l_ast_data = NULL;
165         olck->ols_handle.cookie = 0ULL;
166         spin_unlock(&osc_ast_guard);
167
168         lock_res_and_lock(dlmlock);
169         if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
170                 struct cl_object *obj = olck->ols_cl.cls_obj;
171                 struct cl_attr *attr  = &osc_env_info(env)->oti_attr;
172                 __u64 old_kms;
173
174                 cl_object_attr_lock(obj);
175                 /* Must get the value under the lock to avoid possible races. */
176                 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
177                 /* Update the kms. Need to loop all granted locks.
178                  * Not a problem for the client */
179                 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
180
181                 cl_object_attr_set(env, obj, attr, CAT_KMS);
182                 cl_object_attr_unlock(obj);
183         }
184         unlock_res_and_lock(dlmlock);
185
186         /* release a reference taken in osc_lock_upcall0(). */
187         LASSERT(olck->ols_has_ref);
188         lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
189         LDLM_LOCK_RELEASE(dlmlock);
190         olck->ols_has_ref = 0;
191 }
192
193 static int osc_lock_unhold(struct osc_lock *ols)
194 {
195         int result = 0;
196
197         if (ols->ols_hold) {
198                 ols->ols_hold = 0;
199                 result = osc_cancel_base(&ols->ols_handle,
200                                          ols->ols_einfo.ei_mode);
201         }
202         return result;
203 }
204
205 static int osc_lock_unuse(const struct lu_env *env,
206                           const struct cl_lock_slice *slice)
207 {
208         struct osc_lock *ols = cl2osc_lock(slice);
209
210         LINVRNT(osc_lock_invariant(ols));
211
212         switch (ols->ols_state) {
213         case OLS_NEW:
214                 LASSERT(!ols->ols_hold);
215                 LASSERT(ols->ols_agl);
216                 return 0;
217         case OLS_UPCALL_RECEIVED:
218                 osc_lock_unhold(ols);
219         case OLS_ENQUEUED:
220                 LASSERT(!ols->ols_hold);
221                 osc_lock_detach(env, ols);
222                 ols->ols_state = OLS_NEW;
223                 return 0;
224         case OLS_GRANTED:
225                 LASSERT(!ols->ols_glimpse);
226                 LASSERT(ols->ols_hold);
227                 /*
228                  * Move lock into OLS_RELEASED state before calling
229                  * osc_cancel_base() so that possible synchronous cancellation
230                  * (that always happens e.g., for liblustre) sees that lock is
231                  * released.
232                  */
233                 ols->ols_state = OLS_RELEASED;
234                 return osc_lock_unhold(ols);
235         default:
236                 CERROR("Impossible state: %d\n", ols->ols_state);
237                 LBUG();
238         }
239 }
240
241 static void osc_lock_fini(const struct lu_env *env,
242                           struct cl_lock_slice *slice)
243 {
244         struct osc_lock  *ols = cl2osc_lock(slice);
245
246         LINVRNT(osc_lock_invariant(ols));
247         /*
248          * ->ols_hold can still be true at this point if, for example, a
249          * thread that requested a lock was killed (and released a reference
250          * to the lock), before reply from a server was received. In this case
251          * lock is destroyed immediately after upcall.
252          */
253         osc_lock_unhold(ols);
254         LASSERT(ols->ols_lock == NULL);
255         LASSERT(cfs_atomic_read(&ols->ols_pageref) == 0 ||
256                 cfs_atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
257
258         OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
259 }
260
261 static void osc_lock_build_policy(const struct lu_env *env,
262                                   const struct cl_lock *lock,
263                                   ldlm_policy_data_t *policy)
264 {
265         const struct cl_lock_descr *d = &lock->cll_descr;
266
267         osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
268         policy->l_extent.gid = d->cld_gid;
269 }
270
271 static __u64 osc_enq2ldlm_flags(__u32 enqflags)
272 {
273         __u64 result = 0;
274
275         LASSERT((enqflags & ~CEF_MASK) == 0);
276
277         if (enqflags & CEF_NONBLOCK)
278                 result |= LDLM_FL_BLOCK_NOWAIT;
279         if (enqflags & CEF_ASYNC)
280                 result |= LDLM_FL_HAS_INTENT;
281         if (enqflags & CEF_DISCARD_DATA)
282                 result |= LDLM_FL_AST_DISCARD_DATA;
283         return result;
284 }
285
286 /**
287  * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
288  * pointers. Initialized in osc_init().
289  */
290 spinlock_t osc_ast_guard;
291
292 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
293 {
294         struct osc_lock *olck;
295
296         lock_res_and_lock(dlm_lock);
297         spin_lock(&osc_ast_guard);
298         olck = dlm_lock->l_ast_data;
299         if (olck != NULL) {
300                 struct cl_lock *lock = olck->ols_cl.cls_lock;
301                 /*
302                  * If osc_lock holds a reference on ldlm lock, return it even
303                  * when cl_lock is in CLS_FREEING state. This way
304                  *
305                  *         osc_ast_data_get(dlmlock) == NULL
306                  *
307                  * guarantees that all osc references on dlmlock were
308                  * released. osc_dlm_blocking_ast0() relies on that.
309                  */
310                 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
311                         cl_lock_get_trust(lock);
312                         lu_ref_add_atomic(&lock->cll_reference,
313                                           "ast", cfs_current());
314                 } else
315                         olck = NULL;
316         }
317         spin_unlock(&osc_ast_guard);
318         unlock_res_and_lock(dlm_lock);
319         return olck;
320 }
321
322 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
323 {
324         struct cl_lock *lock;
325
326         lock = olck->ols_cl.cls_lock;
327         lu_ref_del(&lock->cll_reference, "ast", cfs_current());
328         cl_lock_put(env, lock);
329 }
330
331 /**
332  * Updates object attributes from a lock value block (lvb) received together
333  * with the DLM lock reply from the server. Copy of osc_update_enqueue()
334  * logic.
335  *
336  * This can be optimized to not update attributes when lock is a result of a
337  * local match.
338  *
339  * Called under lock and resource spin-locks.
340  */
341 static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
342                                 int rc)
343 {
344         struct ost_lvb    *lvb;
345         struct cl_object  *obj;
346         struct lov_oinfo  *oinfo;
347         struct cl_attr    *attr;
348         unsigned           valid;
349
350         ENTRY;
351
352         if (!(olck->ols_flags & LDLM_FL_LVB_READY))
353                 RETURN_EXIT;
354
355         lvb   = &olck->ols_lvb;
356         obj   = olck->ols_cl.cls_obj;
357         oinfo = cl2osc(obj)->oo_oinfo;
358         attr  = &osc_env_info(env)->oti_attr;
359         valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
360         cl_lvb2attr(attr, lvb);
361
362         cl_object_attr_lock(obj);
363         if (rc == 0) {
364                 struct ldlm_lock  *dlmlock;
365                 __u64 size;
366
367                 dlmlock = olck->ols_lock;
368                 LASSERT(dlmlock != NULL);
369
370                 /* re-grab LVB from a dlm lock under DLM spin-locks. */
371                 *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
372                 size = lvb->lvb_size;
373                 /* Extend KMS up to the end of this lock and no further
374                  * A lock on [x,y] means a KMS of up to y + 1 bytes! */
375                 if (size > dlmlock->l_policy_data.l_extent.end)
376                         size = dlmlock->l_policy_data.l_extent.end + 1;
377                 if (size >= oinfo->loi_kms) {
378                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
379                                    ", kms="LPU64, lvb->lvb_size, size);
380                         valid |= CAT_KMS;
381                         attr->cat_kms = size;
382                 } else {
383                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
384                                    LPU64"; leaving kms="LPU64", end="LPU64,
385                                    lvb->lvb_size, oinfo->loi_kms,
386                                    dlmlock->l_policy_data.l_extent.end);
387                 }
388                 ldlm_lock_allow_match_locked(dlmlock);
389         } else if (rc == -ENAVAIL && olck->ols_glimpse) {
390                 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
391                        " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
392         } else
393                 valid = 0;
394
395         if (valid != 0)
396                 cl_object_attr_set(env, obj, attr, valid);
397
398         cl_object_attr_unlock(obj);
399
400         EXIT;
401 }
402
403 /**
404  * Called when a lock is granted, from an upcall (when server returned a
405  * granted lock), or from completion AST, when server returned a blocked lock.
406  *
407  * Called under lock and resource spin-locks, that are released temporarily
408  * here.
409  */
410 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
411                              struct ldlm_lock *dlmlock, int rc)
412 {
413         struct ldlm_extent   *ext;
414         struct cl_lock       *lock;
415         struct cl_lock_descr *descr;
416
417         LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
418
419         ENTRY;
420         if (olck->ols_state < OLS_GRANTED) {
421                 lock  = olck->ols_cl.cls_lock;
422                 ext   = &dlmlock->l_policy_data.l_extent;
423                 descr = &osc_env_info(env)->oti_descr;
424                 descr->cld_obj = lock->cll_descr.cld_obj;
425
426                 /* XXX check that ->l_granted_mode is valid. */
427                 descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
428                 descr->cld_start = cl_index(descr->cld_obj, ext->start);
429                 descr->cld_end   = cl_index(descr->cld_obj, ext->end);
430                 descr->cld_gid   = ext->gid;
431                 /*
432                  * tell upper layers the extent of the lock that was actually
433                  * granted
434                  */
435                 olck->ols_state = OLS_GRANTED;
436                 osc_lock_lvb_update(env, olck, rc);
437
438                 /* release DLM spin-locks to allow cl_lock_{modify,signal}()
439                  * to take a semaphore on a parent lock. This is safe, because
440                  * spin-locks are needed to protect consistency of
441                  * dlmlock->l_*_mode and LVB, and we have finished processing
442                  * them. */
443                 unlock_res_and_lock(dlmlock);
444                 cl_lock_modify(env, lock, descr);
445                 cl_lock_signal(env, lock);
446                 LINVRNT(osc_lock_invariant(olck));
447                 lock_res_and_lock(dlmlock);
448         }
449         EXIT;
450 }
451
452 static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
453
454 {
455         struct ldlm_lock *dlmlock;
456
457         ENTRY;
458
459         dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
460         LASSERT(dlmlock != NULL);
461
462         lock_res_and_lock(dlmlock);
463         spin_lock(&osc_ast_guard);
464         LASSERT(dlmlock->l_ast_data == olck);
465         LASSERT(olck->ols_lock == NULL);
466         olck->ols_lock = dlmlock;
467         spin_unlock(&osc_ast_guard);
468
469         /*
470          * Lock might be not yet granted. In this case, completion ast
471          * (osc_ldlm_completion_ast()) comes later and finishes lock
472          * granting.
473          */
474         if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
475                 osc_lock_granted(env, olck, dlmlock, 0);
476         unlock_res_and_lock(dlmlock);
477
478         /*
479          * osc_enqueue_interpret() decrefs asynchronous locks, counter
480          * this.
481          */
482         ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
483         olck->ols_hold = 1;
484
485         /* lock reference taken by ldlm_handle2lock_long() is owned by
486          * osc_lock and released in osc_lock_detach() */
487         lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
488         olck->ols_has_ref = 1;
489 }
490
491 /**
492  * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
493  * received from a server, or after osc_enqueue_base() matched a local DLM
494  * lock.
495  */
496 static int osc_lock_upcall(void *cookie, int errcode)
497 {
498         struct osc_lock         *olck  = cookie;
499         struct cl_lock_slice    *slice = &olck->ols_cl;
500         struct cl_lock          *lock  = slice->cls_lock;
501         struct lu_env           *env;
502         struct cl_env_nest       nest;
503
504         ENTRY;
505         env = cl_env_nested_get(&nest);
506         if (!IS_ERR(env)) {
507                 int rc;
508
509                 cl_lock_mutex_get(env, lock);
510
511                 LASSERT(lock->cll_state >= CLS_QUEUING);
512                 if (olck->ols_state == OLS_ENQUEUED) {
513                         olck->ols_state = OLS_UPCALL_RECEIVED;
514                         rc = ldlm_error2errno(errcode);
515                 } else if (olck->ols_state == OLS_CANCELLED) {
516                         rc = -EIO;
517                 } else {
518                         CERROR("Impossible state: %d\n", olck->ols_state);
519                         LBUG();
520                 }
521                 if (rc) {
522                         struct ldlm_lock *dlmlock;
523
524                         dlmlock = ldlm_handle2lock(&olck->ols_handle);
525                         if (dlmlock != NULL) {
526                                 lock_res_and_lock(dlmlock);
527                                 spin_lock(&osc_ast_guard);
528                                 LASSERT(olck->ols_lock == NULL);
529                                 dlmlock->l_ast_data = NULL;
530                                 olck->ols_handle.cookie = 0ULL;
531                                 spin_unlock(&osc_ast_guard);
532                                 ldlm_lock_fail_match_locked(dlmlock);
533                                 unlock_res_and_lock(dlmlock);
534                                 LDLM_LOCK_PUT(dlmlock);
535                         }
536                 } else {
537                         if (olck->ols_glimpse)
538                                 olck->ols_glimpse = 0;
539                         osc_lock_upcall0(env, olck);
540                 }
541
542                 /* Error handling, some errors are tolerable. */
543                 if (olck->ols_locklessable && rc == -EUSERS) {
544                         /* This is a tolerable error, turn this lock into
545                          * lockless lock.
546                          */
547                         osc_object_set_contended(cl2osc(slice->cls_obj));
548                         LASSERT(slice->cls_ops == &osc_lock_ops);
549
550                         /* Change this lock to ldlmlock-less lock. */
551                         osc_lock_to_lockless(env, olck, 1);
552                         olck->ols_state = OLS_GRANTED;
553                         rc = 0;
554                 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
555                         osc_lock_lvb_update(env, olck, rc);
556                         cl_lock_delete(env, lock);
557                         /* Hide the error. */
558                         rc = 0;
559                 }
560
561                 if (rc == 0) {
562                         /* For AGL case, the RPC sponsor may exits the cl_lock
563                         *  processing without wait() called before related OSC
564                         *  lock upcall(). So update the lock status according
565                         *  to the enqueue result inside AGL upcall(). */
566                         if (olck->ols_agl) {
567                                 lock->cll_flags |= CLF_FROM_UPCALL;
568                                 cl_wait_try(env, lock);
569                                 lock->cll_flags &= ~CLF_FROM_UPCALL;
570                         }
571                         cl_lock_signal(env, lock);
572                         /* del user for lock upcall cookie */
573                         if (olck->ols_agl) {
574                                 if (!olck->ols_glimpse)
575                                         olck->ols_agl = 0;
576                                 cl_unuse_try(env, lock);
577                         }
578                 } else {
579                         /* del user for lock upcall cookie */
580                         if (olck->ols_agl)
581                                 cl_lock_user_del(env, lock);
582                         cl_lock_error(env, lock, rc);
583                 }
584
585                 /* release cookie reference, acquired by osc_lock_enqueue() */
586                 cl_lock_hold_release(env, lock, "upcall", lock);
587                 cl_lock_mutex_put(env, lock);
588
589                 lu_ref_del(&lock->cll_reference, "upcall", lock);
590                 /* This maybe the last reference, so must be called after
591                  * cl_lock_mutex_put(). */
592                 cl_lock_put(env, lock);
593
594                 cl_env_nested_put(&nest, env);
595         } else {
596                 /* should never happen, similar to osc_ldlm_blocking_ast(). */
597                 LBUG();
598         }
599         RETURN(errcode);
600 }
601
602 /**
603  * Core of osc_dlm_blocking_ast() logic.
604  */
605 static void osc_lock_blocking(const struct lu_env *env,
606                               struct ldlm_lock *dlmlock,
607                               struct osc_lock *olck, int blocking)
608 {
609         struct cl_lock *lock = olck->ols_cl.cls_lock;
610
611         LASSERT(olck->ols_lock == dlmlock);
612         CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
613         LASSERT(!osc_lock_is_lockless(olck));
614
615         /*
616          * Lock might be still addref-ed here, if e.g., blocking ast
617          * is sent for a failed lock.
618          */
619         osc_lock_unhold(olck);
620
621         if (blocking && olck->ols_state < OLS_BLOCKED)
622                 /*
623                  * Move osc_lock into OLS_BLOCKED before canceling the lock,
624                  * because it recursively re-enters osc_lock_blocking(), with
625                  * the state set to OLS_CANCELLED.
626                  */
627                 olck->ols_state = OLS_BLOCKED;
628         /*
629          * cancel and destroy lock at least once no matter how blocking ast is
630          * entered (see comment above osc_ldlm_blocking_ast() for use
631          * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
632          */
633         cl_lock_cancel(env, lock);
634         cl_lock_delete(env, lock);
635 }
636
637 /**
638  * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
639  * and ldlm_lock caches.
640  */
641 static int osc_dlm_blocking_ast0(const struct lu_env *env,
642                                  struct ldlm_lock *dlmlock,
643                                  void *data, int flag)
644 {
645         struct osc_lock *olck;
646         struct cl_lock  *lock;
647         int result;
648         int cancel;
649
650         LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
651
652         cancel = 0;
653         olck = osc_ast_data_get(dlmlock);
654         if (olck != NULL) {
655                 lock = olck->ols_cl.cls_lock;
656                 cl_lock_mutex_get(env, lock);
657                 LINVRNT(osc_lock_invariant(olck));
658                 if (olck->ols_ast_wait) {
659                         /* wake up osc_lock_use() */
660                         cl_lock_signal(env, lock);
661                         olck->ols_ast_wait = 0;
662                 }
663                 /*
664                  * Lock might have been canceled while this thread was
665                  * sleeping for lock mutex, but olck is pinned in memory.
666                  */
667                 if (olck == dlmlock->l_ast_data) {
668                         /*
669                          * NOTE: DLM sends blocking AST's for failed locks
670                          *       (that are still in pre-OLS_GRANTED state)
671                          *       too, and they have to be canceled otherwise
672                          *       DLM lock is never destroyed and stuck in
673                          *       the memory.
674                          *
675                          *       Alternatively, ldlm_cli_cancel() can be
676                          *       called here directly for osc_locks with
677                          *       ols_state < OLS_GRANTED to maintain an
678                          *       invariant that ->clo_cancel() is only called
679                          *       for locks that were granted.
680                          */
681                         LASSERT(data == olck);
682                         osc_lock_blocking(env, dlmlock,
683                                           olck, flag == LDLM_CB_BLOCKING);
684                 } else
685                         cancel = 1;
686                 cl_lock_mutex_put(env, lock);
687                 osc_ast_data_put(env, olck);
688         } else
689                 /*
690                  * DLM lock exists, but there is no cl_lock attached to it.
691                  * This is a `normal' race. cl_object and its cl_lock's can be
692                  * removed by memory pressure, together with all pages.
693                  */
694                 cancel = (flag == LDLM_CB_BLOCKING);
695
696         if (cancel) {
697                 struct lustre_handle *lockh;
698
699                 lockh = &osc_env_info(env)->oti_handle;
700                 ldlm_lock2handle(dlmlock, lockh);
701                 result = ldlm_cli_cancel(lockh, LCF_ASYNC);
702         } else
703                 result = 0;
704         return result;
705 }
706
707 /**
708  * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
709  * some other lock, or is canceled. This function is installed as a
710  * ldlm_lock::l_blocking_ast() for client extent locks.
711  *
712  * Control flow is tricky, because ldlm uses the same call-back
713  * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
714  *
715  * \param dlmlock lock for which ast occurred.
716  *
717  * \param new description of a conflicting lock in case of blocking ast.
718  *
719  * \param data value of dlmlock->l_ast_data
720  *
721  * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
722  *             cancellation and blocking ast's.
723  *
724  * Possible use cases:
725  *
726  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
727  *       lock due to lock lru pressure, or explicit user request to purge
728  *       locks.
729  *
730  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
731  *       us that dlmlock conflicts with another lock that some client is
732  *       enqueing. Lock is canceled.
733  *
734  *           - cl_lock_cancel() is called. osc_lock_cancel() calls
735  *             ldlm_cli_cancel() that calls
736  *
737  *                  dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
738  *
739  *             recursively entering osc_ldlm_blocking_ast().
740  *
741  *     - client cancels lock voluntary (e.g., as a part of early cancellation):
742  *
743  *           cl_lock_cancel()->
744  *             osc_lock_cancel()->
745  *               ldlm_cli_cancel()->
746  *                 dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
747  *
748  */
749 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
750                                  struct ldlm_lock_desc *new, void *data,
751                                  int flag)
752 {
753         struct lu_env     *env;
754         struct cl_env_nest nest;
755         int                result;
756
757         /*
758          * This can be called in the context of outer IO, e.g.,
759          *
760          *     cl_enqueue()->...
761          *       ->osc_enqueue_base()->...
762          *         ->ldlm_prep_elc_req()->...
763          *           ->ldlm_cancel_callback()->...
764          *             ->osc_ldlm_blocking_ast()
765          *
766          * new environment has to be created to not corrupt outer context.
767          */
768         env = cl_env_nested_get(&nest);
769         if (!IS_ERR(env)) {
770                 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
771                 cl_env_nested_put(&nest, env);
772         } else {
773                 result = PTR_ERR(env);
774                 /*
775                  * XXX This should never happen, as cl_lock is
776                  * stuck. Pre-allocated environment a la vvp_inode_fini_env
777                  * should be used.
778                  */
779                 LBUG();
780         }
781         if (result != 0) {
782                 if (result == -ENODATA)
783                         result = 0;
784                 else
785                         CERROR("BAST failed: %d\n", result);
786         }
787         return result;
788 }
789
790 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
791                                    __u64 flags, void *data)
792 {
793         struct cl_env_nest nest;
794         struct lu_env     *env;
795         struct osc_lock   *olck;
796         struct cl_lock    *lock;
797         int result;
798         int dlmrc;
799
800         /* first, do dlm part of the work */
801         dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
802         /* then, notify cl_lock */
803         env = cl_env_nested_get(&nest);
804         if (!IS_ERR(env)) {
805                 olck = osc_ast_data_get(dlmlock);
806                 if (olck != NULL) {
807                         lock = olck->ols_cl.cls_lock;
808                         cl_lock_mutex_get(env, lock);
809                         /*
810                          * ldlm_handle_cp_callback() copied LVB from request
811                          * to lock->l_lvb_data, store it in osc_lock.
812                          */
813                         LASSERT(dlmlock->l_lvb_data != NULL);
814                         lock_res_and_lock(dlmlock);
815                         olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
816                         if (olck->ols_lock == NULL) {
817                                 /*
818                                  * upcall (osc_lock_upcall()) hasn't yet been
819                                  * called. Do nothing now, upcall will bind
820                                  * olck to dlmlock and signal the waiters.
821                                  *
822                                  * This maintains an invariant that osc_lock
823                                  * and ldlm_lock are always bound when
824                                  * osc_lock is in OLS_GRANTED state.
825                                  */
826                         } else if (dlmlock->l_granted_mode ==
827                                    dlmlock->l_req_mode) {
828                                 osc_lock_granted(env, olck, dlmlock, dlmrc);
829                         }
830                         unlock_res_and_lock(dlmlock);
831
832                         if (dlmrc != 0) {
833                                 CL_LOCK_DEBUG(D_ERROR, env, lock,
834                                               "dlmlock returned %d\n", dlmrc);
835                                 cl_lock_error(env, lock, dlmrc);
836                         }
837                         cl_lock_mutex_put(env, lock);
838                         osc_ast_data_put(env, olck);
839                         result = 0;
840                 } else
841                         result = -ELDLM_NO_LOCK_DATA;
842                 cl_env_nested_put(&nest, env);
843         } else
844                 result = PTR_ERR(env);
845         return dlmrc ?: result;
846 }
847
848 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
849 {
850         struct ptlrpc_request  *req  = data;
851         struct osc_lock        *olck;
852         struct cl_lock         *lock;
853         struct cl_object       *obj;
854         struct cl_env_nest      nest;
855         struct lu_env          *env;
856         struct ost_lvb         *lvb;
857         struct req_capsule     *cap;
858         int                     result;
859
860         LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
861
862         env = cl_env_nested_get(&nest);
863         if (!IS_ERR(env)) {
864                 /* osc_ast_data_get() has to go after environment is
865                  * allocated, because osc_ast_data() acquires a
866                  * reference to a lock, and it can only be released in
867                  * environment.
868                  */
869                 olck = osc_ast_data_get(dlmlock);
870                 if (olck != NULL) {
871                         lock = olck->ols_cl.cls_lock;
872                         /* Do not grab the mutex of cl_lock for glimpse.
873                          * See LU-1274 for details.
874                          * BTW, it's okay for cl_lock to be cancelled during
875                          * this period because server can handle this race.
876                          * See ldlm_server_glimpse_ast() for details.
877                          * cl_lock_mutex_get(env, lock); */
878                         cap = &req->rq_pill;
879                         req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
880                         req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
881                                              sizeof *lvb);
882                         result = req_capsule_server_pack(cap);
883                         if (result == 0) {
884                                 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
885                                 obj = lock->cll_descr.cld_obj;
886                                 result = cl_object_glimpse(env, obj, lvb);
887                         }
888                         if (!exp_connect_lvb_type(req->rq_export))
889                                 req_capsule_shrink(&req->rq_pill,
890                                                    &RMF_DLM_LVB,
891                                                    sizeof(struct ost_lvb_v1),
892                                                    RCL_SERVER);
893                         osc_ast_data_put(env, olck);
894                 } else {
895                         /*
896                          * These errors are normal races, so we don't want to
897                          * fill the console with messages by calling
898                          * ptlrpc_error()
899                          */
900                         lustre_pack_reply(req, 1, NULL, NULL);
901                         result = -ELDLM_NO_LOCK_DATA;
902                 }
903                 cl_env_nested_put(&nest, env);
904         } else
905                 result = PTR_ERR(env);
906         req->rq_status = result;
907         return result;
908 }
909
910 static unsigned long osc_lock_weigh(const struct lu_env *env,
911                                     const struct cl_lock_slice *slice)
912 {
913         /*
914          * don't need to grab coh_page_guard since we don't care the exact #
915          * of pages..
916          */
917         return cl_object_header(slice->cls_obj)->coh_pages;
918 }
919
920 static void osc_lock_build_einfo(const struct lu_env *env,
921                                  const struct cl_lock *clock,
922                                  struct osc_lock *lock,
923                                  struct ldlm_enqueue_info *einfo)
924 {
925         enum cl_lock_mode mode;
926
927         mode = clock->cll_descr.cld_mode;
928         if (mode == CLM_PHANTOM)
929                 /*
930                  * For now, enqueue all glimpse locks in read mode. In the
931                  * future, client might choose to enqueue LCK_PW lock for
932                  * glimpse on a file opened for write.
933                  */
934                 mode = CLM_READ;
935
936         einfo->ei_type   = LDLM_EXTENT;
937         einfo->ei_mode   = osc_cl_lock2ldlm(mode);
938         einfo->ei_cb_bl  = osc_ldlm_blocking_ast;
939         einfo->ei_cb_cp  = osc_ldlm_completion_ast;
940         einfo->ei_cb_gl  = osc_ldlm_glimpse_ast;
941         einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
942 }
943
944 /**
945  * Determine if the lock should be converted into a lockless lock.
946  *
947  * Steps to check:
948  * - if the lock has an explicite requirment for a non-lockless lock;
949  * - if the io lock request type ci_lockreq;
950  * - send the enqueue rpc to ost to make the further decision;
951  * - special treat to truncate lockless lock
952  *
953  *  Additional policy can be implemented here, e.g., never do lockless-io
954  *  for large extents.
955  */
956 static void osc_lock_to_lockless(const struct lu_env *env,
957                                  struct osc_lock *ols, int force)
958 {
959         struct cl_lock_slice *slice = &ols->ols_cl;
960
961         LASSERT(ols->ols_state == OLS_NEW ||
962                 ols->ols_state == OLS_UPCALL_RECEIVED);
963
964         if (force) {
965                 ols->ols_locklessable = 1;
966                 slice->cls_ops = &osc_lock_lockless_ops;
967         } else {
968                 struct osc_io *oio     = osc_env_io(env);
969                 struct cl_io  *io      = oio->oi_cl.cis_io;
970                 struct cl_object *obj  = slice->cls_obj;
971                 struct osc_object *oob = cl2osc(obj);
972                 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
973                 struct obd_connect_data *ocd;
974
975                 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
976                         io->ci_lockreq == CILR_MAYBE ||
977                         io->ci_lockreq == CILR_NEVER);
978
979                 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
980                 ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
981                                 (io->ci_lockreq == CILR_MAYBE) &&
982                                 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
983                 if (io->ci_lockreq == CILR_NEVER ||
984                         /* lockless IO */
985                     (ols->ols_locklessable && osc_object_is_contended(oob)) ||
986                         /* lockless truncate */
987                     (cl_io_is_trunc(io) &&
988                      (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
989                       osd->od_lockless_truncate)) {
990                         ols->ols_locklessable = 1;
991                         slice->cls_ops = &osc_lock_lockless_ops;
992                 }
993         }
994         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
995 }
996
997 static int osc_lock_compatible(const struct osc_lock *qing,
998                                const struct osc_lock *qed)
999 {
1000         enum cl_lock_mode qing_mode;
1001         enum cl_lock_mode qed_mode;
1002
1003         qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode;
1004         if (qed->ols_glimpse &&
1005             (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ))
1006                 return 1;
1007
1008         qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode;
1009         return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
1010 }
1011
1012 /**
1013  * Cancel all conflicting locks and wait for them to be destroyed.
1014  *
1015  * This function is used for two purposes:
1016  *
1017  *     - early cancel all conflicting locks before starting IO, and
1018  *
1019  *     - guarantee that pages added to the page cache by lockless IO are never
1020  *       covered by locks other than lockless IO lock, and, hence, are not
1021  *       visible to other threads.
1022  */
1023 static int osc_lock_enqueue_wait(const struct lu_env *env,
1024                                  const struct osc_lock *olck)
1025 {
1026         struct cl_lock          *lock    = olck->ols_cl.cls_lock;
1027         struct cl_lock_descr    *descr   = &lock->cll_descr;
1028         struct cl_object_header *hdr     = cl_object_header(descr->cld_obj);
1029         struct cl_lock          *scan;
1030         struct cl_lock          *conflict= NULL;
1031         int lockless                     = osc_lock_is_lockless(olck);
1032         int rc                           = 0;
1033         ENTRY;
1034
1035         LASSERT(cl_lock_is_mutexed(lock));
1036
1037         /* make it enqueue anyway for glimpse lock, because we actually
1038          * don't need to cancel any conflicting locks. */
1039         if (olck->ols_glimpse)
1040                 return 0;
1041
1042         spin_lock(&hdr->coh_lock_guard);
1043         cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
1044                 struct cl_lock_descr *cld = &scan->cll_descr;
1045                 const struct osc_lock *scan_ols;
1046
1047                 if (scan == lock)
1048                         break;
1049
1050                 if (scan->cll_state < CLS_QUEUING ||
1051                     scan->cll_state == CLS_FREEING ||
1052                     cld->cld_start > descr->cld_end ||
1053                     cld->cld_end < descr->cld_start)
1054                         continue;
1055
1056                 /* overlapped and living locks. */
1057
1058                 /* We're not supposed to give up group lock. */
1059                 if (scan->cll_descr.cld_mode == CLM_GROUP) {
1060                         LASSERT(descr->cld_mode != CLM_GROUP ||
1061                                 descr->cld_gid != scan->cll_descr.cld_gid);
1062                         continue;
1063                 }
1064
1065                 scan_ols = osc_lock_at(scan);
1066
1067                 /* We need to cancel the compatible locks if we're enqueuing
1068                  * a lockless lock, for example:
1069                  * imagine that client has PR lock on [0, 1000], and thread T0
1070                  * is doing lockless IO in [500, 1500] region. Concurrent
1071                  * thread T1 can see lockless data in [500, 1000], which is
1072                  * wrong, because these data are possibly stale. */
1073                 if (!lockless && osc_lock_compatible(olck, scan_ols))
1074                         continue;
1075
1076                 cl_lock_get_trust(scan);
1077                 conflict = scan;
1078                 break;
1079         }
1080         spin_unlock(&hdr->coh_lock_guard);
1081
1082         if (conflict) {
1083                 if (lock->cll_descr.cld_mode == CLM_GROUP) {
1084                         /* we want a group lock but a previous lock request
1085                          * conflicts, we do not wait but return 0 so the
1086                          * request is send to the server
1087                          */
1088                         CDEBUG(D_DLMTRACE, "group lock %p is conflicted "
1089                                            "with %p, no wait, send to server\n",
1090                                lock, conflict);
1091                         cl_lock_put(env, conflict);
1092                         rc = 0;
1093                 } else {
1094                         CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, "
1095                                            "will wait\n",
1096                                lock, conflict);
1097                         LASSERT(lock->cll_conflict == NULL);
1098                         lu_ref_add(&conflict->cll_reference, "cancel-wait",
1099                                    lock);
1100                         lock->cll_conflict = conflict;
1101                         rc = CLO_WAIT;
1102                 }
1103         }
1104         RETURN(rc);
1105 }
1106
1107 /**
1108  * Implementation of cl_lock_operations::clo_enqueue() method for osc
1109  * layer. This initiates ldlm enqueue:
1110  *
1111  *     - cancels conflicting locks early (osc_lock_enqueue_wait());
1112  *
1113  *     - calls osc_enqueue_base() to do actual enqueue.
1114  *
1115  * osc_enqueue_base() is supplied with an upcall function that is executed
1116  * when lock is received either after a local cached ldlm lock is matched, or
1117  * when a reply from the server is received.
1118  *
1119  * This function does not wait for the network communication to complete.
1120  */
1121 static int osc_lock_enqueue(const struct lu_env *env,
1122                             const struct cl_lock_slice *slice,
1123                             struct cl_io *unused, __u32 enqflags)
1124 {
1125         struct osc_lock          *ols     = cl2osc_lock(slice);
1126         struct cl_lock           *lock    = ols->ols_cl.cls_lock;
1127         int result;
1128         ENTRY;
1129
1130         LASSERT(cl_lock_is_mutexed(lock));
1131         LASSERTF(ols->ols_state == OLS_NEW,
1132                  "Impossible state: %d\n", ols->ols_state);
1133
1134         LASSERTF(ergo(ols->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
1135                 "lock = %p, ols = %p\n", lock, ols);
1136
1137         result = osc_lock_enqueue_wait(env, ols);
1138         if (result == 0) {
1139                 if (!osc_lock_is_lockless(ols)) {
1140                         struct osc_object        *obj = cl2osc(slice->cls_obj);
1141                         struct osc_thread_info   *info = osc_env_info(env);
1142                         struct ldlm_res_id       *resname = &info->oti_resname;
1143                         ldlm_policy_data_t       *policy = &info->oti_policy;
1144                         struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
1145
1146                         /* lock will be passed as upcall cookie,
1147                          * hold ref to prevent to be released. */
1148                         cl_lock_hold_add(env, lock, "upcall", lock);
1149                         /* a user for agl lock also */
1150                         if (ols->ols_agl)
1151                                 cl_lock_user_add(env, lock);
1152                         ols->ols_state = OLS_ENQUEUED;
1153
1154                         /*
1155                          * XXX: this is possible blocking point as
1156                          * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
1157                          * LDLM_CP_CALLBACK.
1158                          */
1159                         ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname);
1160                         osc_lock_build_policy(env, lock, policy);
1161                         result = osc_enqueue_base(osc_export(obj), resname,
1162                                           &ols->ols_flags, policy,
1163                                           &ols->ols_lvb,
1164                                           obj->oo_oinfo->loi_kms_valid,
1165                                           osc_lock_upcall,
1166                                           ols, einfo, &ols->ols_handle,
1167                                           PTLRPCD_SET, 1, ols->ols_agl);
1168                         if (result != 0) {
1169                                 if (ols->ols_agl)
1170                                         cl_lock_user_del(env, lock);
1171                                 cl_lock_unhold(env, lock, "upcall", lock);
1172                                 if (unlikely(result == -ECANCELED)) {
1173                                         ols->ols_state = OLS_NEW;
1174                                         result = 0;
1175                                 }
1176                         }
1177                 } else {
1178                         ols->ols_state = OLS_GRANTED;
1179                         ols->ols_owner = osc_env_io(env);
1180                 }
1181         }
1182         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1183         RETURN(result);
1184 }
1185
1186 static int osc_lock_wait(const struct lu_env *env,
1187                          const struct cl_lock_slice *slice)
1188 {
1189         struct osc_lock *olck = cl2osc_lock(slice);
1190         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1191
1192         LINVRNT(osc_lock_invariant(olck));
1193
1194         if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED) {
1195                 if (olck->ols_flags & LDLM_FL_LVB_READY) {
1196                         return 0;
1197                 } else if (olck->ols_agl) {
1198                         if (lock->cll_flags & CLF_FROM_UPCALL)
1199                                 /* It is from enqueue RPC reply upcall for
1200                                  * updating state. Do not re-enqueue. */
1201                                 return -ENAVAIL;
1202                         else
1203                                 olck->ols_state = OLS_NEW;
1204                 } else {
1205                         LASSERT(lock->cll_error);
1206                         return lock->cll_error;
1207                 }
1208         }
1209
1210         if (olck->ols_state == OLS_NEW) {
1211                 int rc;
1212
1213                 LASSERT(olck->ols_agl);
1214                 olck->ols_agl = 0;
1215                 rc = osc_lock_enqueue(env, slice, NULL, CEF_ASYNC | CEF_MUST);
1216                 if (rc != 0)
1217                         return rc;
1218                 else
1219                         return CLO_REENQUEUED;
1220         }
1221
1222         LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
1223                      lock->cll_error == 0, olck->ols_lock != NULL));
1224
1225         return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
1226 }
1227
1228 /**
1229  * An implementation of cl_lock_operations::clo_use() method that pins cached
1230  * lock.
1231  */
1232 static int osc_lock_use(const struct lu_env *env,
1233                         const struct cl_lock_slice *slice)
1234 {
1235         struct osc_lock *olck = cl2osc_lock(slice);
1236         int rc;
1237
1238         LASSERT(!olck->ols_hold);
1239
1240         /*
1241          * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
1242          * flag is not set. This protects us from a concurrent blocking ast.
1243          */
1244         rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1245         if (rc == 0) {
1246                 olck->ols_hold = 1;
1247                 olck->ols_state = OLS_GRANTED;
1248         } else {
1249                 struct cl_lock *lock;
1250
1251                 /*
1252                  * Lock is being cancelled somewhere within
1253                  * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
1254                  * set, but osc_ldlm_blocking_ast() hasn't yet acquired
1255                  * cl_lock mutex.
1256                  */
1257                 lock = slice->cls_lock;
1258                 LASSERT(lock->cll_state == CLS_INTRANSIT);
1259                 LASSERT(lock->cll_users > 0);
1260                 /* set a flag for osc_dlm_blocking_ast0() to signal the
1261                  * lock.*/
1262                 olck->ols_ast_wait = 1;
1263                 rc = CLO_WAIT;
1264         }
1265         return rc;
1266 }
1267
1268 static int osc_lock_flush(struct osc_lock *ols, int discard)
1269 {
1270         struct cl_lock       *lock  = ols->ols_cl.cls_lock;
1271         struct cl_env_nest    nest;
1272         struct lu_env        *env;
1273         int result = 0;
1274         ENTRY;
1275
1276         env = cl_env_nested_get(&nest);
1277         if (!IS_ERR(env)) {
1278                 struct osc_object    *obj   = cl2osc(ols->ols_cl.cls_obj);
1279                 struct cl_lock_descr *descr = &lock->cll_descr;
1280                 int rc = 0;
1281
1282                 if (descr->cld_mode >= CLM_WRITE) {
1283                         result = osc_cache_writeback_range(env, obj,
1284                                         descr->cld_start, descr->cld_end,
1285                                         1, discard);
1286                         LDLM_DEBUG(ols->ols_lock,
1287                                 "lock %p: %d pages were %s.\n", lock, result,
1288                                 discard ? "discarded" : "written");
1289                         if (result > 0)
1290                                 result = 0;
1291                 }
1292
1293                 rc = cl_lock_discard_pages(env, lock);
1294                 if (result == 0 && rc < 0)
1295                         result = rc;
1296
1297                 cl_env_nested_put(&nest, env);
1298         } else
1299                 result = PTR_ERR(env);
1300         if (result == 0) {
1301                 ols->ols_flush = 1;
1302                 LINVRNT(!osc_lock_has_pages(ols));
1303         }
1304         RETURN(result);
1305 }
1306
1307 /**
1308  * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1309  * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1310  * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1311  * with some other lock some where in the cluster. This function does the
1312  * following:
1313  *
1314  *     - invalidates all pages protected by this lock (after sending dirty
1315  *       ones to the server, as necessary);
1316  *
1317  *     - decref's underlying ldlm lock;
1318  *
1319  *     - cancels ldlm lock (ldlm_cli_cancel()).
1320  */
1321 static void osc_lock_cancel(const struct lu_env *env,
1322                             const struct cl_lock_slice *slice)
1323 {
1324         struct cl_lock   *lock    = slice->cls_lock;
1325         struct osc_lock  *olck    = cl2osc_lock(slice);
1326         struct ldlm_lock *dlmlock = olck->ols_lock;
1327         int               result  = 0;
1328         int               discard;
1329
1330         LASSERT(cl_lock_is_mutexed(lock));
1331         LINVRNT(osc_lock_invariant(olck));
1332
1333         if (dlmlock != NULL) {
1334                 int do_cancel;
1335
1336                 discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA);
1337                 if (olck->ols_state >= OLS_GRANTED)
1338                         result = osc_lock_flush(olck, discard);
1339                 osc_lock_unhold(olck);
1340
1341                 lock_res_and_lock(dlmlock);
1342                 /* Now that we're the only user of dlm read/write reference,
1343                  * mostly the ->l_readers + ->l_writers should be zero.
1344                  * However, there is a corner case.
1345                  * See bug 18829 for details.*/
1346                 do_cancel = (dlmlock->l_readers == 0 &&
1347                              dlmlock->l_writers == 0);
1348                 dlmlock->l_flags |= LDLM_FL_CBPENDING;
1349                 unlock_res_and_lock(dlmlock);
1350                 if (do_cancel)
1351                         result = ldlm_cli_cancel(&olck->ols_handle, LCF_ASYNC);
1352                 if (result < 0)
1353                         CL_LOCK_DEBUG(D_ERROR, env, lock,
1354                                       "lock %p cancel failure with error(%d)\n",
1355                                       lock, result);
1356         }
1357         olck->ols_state = OLS_CANCELLED;
1358         olck->ols_flags &= ~LDLM_FL_LVB_READY;
1359         osc_lock_detach(env, olck);
1360 }
1361
1362 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
1363 static int check_cb(const struct lu_env *env, struct cl_io *io,
1364                     struct cl_page *page, void *cbdata)
1365 {
1366         struct cl_lock *lock = cbdata;
1367
1368         if (lock->cll_descr.cld_mode == CLM_READ) {
1369                 struct cl_lock *tmp;
1370                 tmp = cl_lock_at_page(env, lock->cll_descr.cld_obj,
1371                                      page, lock, 1, 0);
1372                 if (tmp != NULL) {
1373                         cl_lock_put(env, tmp);
1374                         return CLP_GANG_OKAY;
1375                 }
1376         }
1377
1378         CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
1379         CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
1380         return CLP_GANG_ABORT;
1381 }
1382
1383 /**
1384  * Returns true iff there are pages under \a olck not protected by other
1385  * locks.
1386  */
1387 static int osc_lock_has_pages(struct osc_lock *olck)
1388 {
1389         struct cl_lock       *lock;
1390         struct cl_lock_descr *descr;
1391         struct cl_object     *obj;
1392         struct osc_object    *oob;
1393         struct cl_env_nest    nest;
1394         struct cl_io         *io;
1395         struct lu_env        *env;
1396         int                   result;
1397
1398         env = cl_env_nested_get(&nest);
1399         if (IS_ERR(env))
1400                 return 0;
1401
1402         obj   = olck->ols_cl.cls_obj;
1403         oob   = cl2osc(obj);
1404         io    = &oob->oo_debug_io;
1405         lock  = olck->ols_cl.cls_lock;
1406         descr = &lock->cll_descr;
1407
1408         mutex_lock(&oob->oo_debug_mutex);
1409
1410         io->ci_obj = cl_object_top(obj);
1411         io->ci_ignore_layout = 1;
1412         cl_io_init(env, io, CIT_MISC, io->ci_obj);
1413         do {
1414                 result = cl_page_gang_lookup(env, obj, io,
1415                                              descr->cld_start, descr->cld_end,
1416                                              check_cb, (void *)lock);
1417                 if (result == CLP_GANG_ABORT)
1418                         break;
1419                 if (result == CLP_GANG_RESCHED)
1420                         cond_resched();
1421         } while (result != CLP_GANG_OKAY);
1422         cl_io_fini(env, io);
1423         mutex_unlock(&oob->oo_debug_mutex);
1424         cl_env_nested_put(&nest, env);
1425
1426         return (result == CLP_GANG_ABORT);
1427 }
1428 #else
1429 static int osc_lock_has_pages(struct osc_lock *olck)
1430 {
1431         return 0;
1432 }
1433 #endif /* CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
1434
1435 static void osc_lock_delete(const struct lu_env *env,
1436                             const struct cl_lock_slice *slice)
1437 {
1438         struct osc_lock *olck;
1439
1440         olck = cl2osc_lock(slice);
1441         if (olck->ols_glimpse) {
1442                 LASSERT(!olck->ols_hold);
1443                 LASSERT(!olck->ols_lock);
1444                 return;
1445         }
1446
1447         LINVRNT(osc_lock_invariant(olck));
1448         LINVRNT(!osc_lock_has_pages(olck));
1449
1450         osc_lock_unhold(olck);
1451         osc_lock_detach(env, olck);
1452 }
1453
1454 /**
1455  * Implements cl_lock_operations::clo_state() method for osc layer.
1456  *
1457  * Maintains osc_lock::ols_owner field.
1458  *
1459  * This assumes that lock always enters CLS_HELD (from some other state) in
1460  * the same IO context as one that requested the lock. This should not be a
1461  * problem, because context is by definition shared by all activity pertaining
1462  * to the same high-level IO.
1463  */
1464 static void osc_lock_state(const struct lu_env *env,
1465                            const struct cl_lock_slice *slice,
1466                            enum cl_lock_state state)
1467 {
1468         struct osc_lock *lock = cl2osc_lock(slice);
1469
1470         /*
1471          * XXX multiple io contexts can use the lock at the same time.
1472          */
1473         LINVRNT(osc_lock_invariant(lock));
1474         if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1475                 struct osc_io *oio = osc_env_io(env);
1476
1477                 LASSERT(lock->ols_owner == NULL);
1478                 lock->ols_owner = oio;
1479         } else if (state != CLS_HELD)
1480                 lock->ols_owner = NULL;
1481 }
1482
1483 static int osc_lock_print(const struct lu_env *env, void *cookie,
1484                           lu_printer_t p, const struct cl_lock_slice *slice)
1485 {
1486         struct osc_lock *lock = cl2osc_lock(slice);
1487
1488         /*
1489          * XXX print ldlm lock and einfo properly.
1490          */
1491         (*p)(env, cookie, "%p %#16llx "LPX64" %d %p ",
1492              lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
1493              lock->ols_state, lock->ols_owner);
1494         osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1495         return 0;
1496 }
1497
1498 static int osc_lock_fits_into(const struct lu_env *env,
1499                               const struct cl_lock_slice *slice,
1500                               const struct cl_lock_descr *need,
1501                               const struct cl_io *io)
1502 {
1503         struct osc_lock *ols = cl2osc_lock(slice);
1504
1505         if (need->cld_enq_flags & CEF_NEVER)
1506                 return 0;
1507
1508         if (ols->ols_state >= OLS_CANCELLED)
1509                 return 0;
1510
1511         if (need->cld_mode == CLM_PHANTOM) {
1512                 if (ols->ols_agl)
1513                         return !(ols->ols_state > OLS_RELEASED);
1514
1515                 /*
1516                  * Note: the QUEUED lock can't be matched here, otherwise
1517                  * it might cause the deadlocks.
1518                  * In read_process,
1519                  * P1: enqueued read lock, create sublock1
1520                  * P2: enqueued write lock, create sublock2(conflicted
1521                  *     with sublock1).
1522                  * P1: Grant read lock.
1523                  * P1: enqueued glimpse lock(with holding sublock1_read),
1524                  *     matched with sublock2, waiting sublock2 to be granted.
1525                  *     But sublock2 can not be granted, because P1
1526                  *     will not release sublock1. Bang!
1527                  */
1528                 if (ols->ols_state < OLS_GRANTED ||
1529                     ols->ols_state > OLS_RELEASED)
1530                         return 0;
1531         } else if (need->cld_enq_flags & CEF_MUST) {
1532                 /*
1533                  * If the lock hasn't ever enqueued, it can't be matched
1534                  * because enqueue process brings in many information
1535                  * which can be used to determine things such as lockless,
1536                  * CEF_MUST, etc.
1537                  */
1538                 if (ols->ols_state < OLS_UPCALL_RECEIVED &&
1539                     ols->ols_locklessable)
1540                         return 0;
1541         }
1542         return 1;
1543 }
1544
1545 static const struct cl_lock_operations osc_lock_ops = {
1546         .clo_fini    = osc_lock_fini,
1547         .clo_enqueue = osc_lock_enqueue,
1548         .clo_wait    = osc_lock_wait,
1549         .clo_unuse   = osc_lock_unuse,
1550         .clo_use     = osc_lock_use,
1551         .clo_delete  = osc_lock_delete,
1552         .clo_state   = osc_lock_state,
1553         .clo_cancel  = osc_lock_cancel,
1554         .clo_weigh   = osc_lock_weigh,
1555         .clo_print   = osc_lock_print,
1556         .clo_fits_into = osc_lock_fits_into,
1557 };
1558
1559 static int osc_lock_lockless_unuse(const struct lu_env *env,
1560                                    const struct cl_lock_slice *slice)
1561 {
1562         struct osc_lock *ols = cl2osc_lock(slice);
1563         struct cl_lock *lock = slice->cls_lock;
1564
1565         LASSERT(ols->ols_state == OLS_GRANTED);
1566         LINVRNT(osc_lock_invariant(ols));
1567
1568         cl_lock_cancel(env, lock);
1569         cl_lock_delete(env, lock);
1570         return 0;
1571 }
1572
1573 static void osc_lock_lockless_cancel(const struct lu_env *env,
1574                                      const struct cl_lock_slice *slice)
1575 {
1576         struct osc_lock   *ols  = cl2osc_lock(slice);
1577         int result;
1578
1579         result = osc_lock_flush(ols, 0);
1580         if (result)
1581                 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1582                        ols, result);
1583         ols->ols_state = OLS_CANCELLED;
1584 }
1585
1586 static int osc_lock_lockless_wait(const struct lu_env *env,
1587                                   const struct cl_lock_slice *slice)
1588 {
1589         struct osc_lock *olck = cl2osc_lock(slice);
1590         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1591
1592         LINVRNT(osc_lock_invariant(olck));
1593         LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1594
1595         return lock->cll_error;
1596 }
1597
1598 static void osc_lock_lockless_state(const struct lu_env *env,
1599                                     const struct cl_lock_slice *slice,
1600                                     enum cl_lock_state state)
1601 {
1602         struct osc_lock *lock = cl2osc_lock(slice);
1603
1604         LINVRNT(osc_lock_invariant(lock));
1605         if (state == CLS_HELD) {
1606                 struct osc_io *oio  = osc_env_io(env);
1607
1608                 LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio));
1609                 lock->ols_owner = oio;
1610
1611                 /* set the io to be lockless if this lock is for io's
1612                  * host object */
1613                 if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
1614                         oio->oi_lockless = 1;
1615         }
1616 }
1617
1618 static int osc_lock_lockless_fits_into(const struct lu_env *env,
1619                                        const struct cl_lock_slice *slice,
1620                                        const struct cl_lock_descr *need,
1621                                        const struct cl_io *io)
1622 {
1623         struct osc_lock *lock = cl2osc_lock(slice);
1624
1625         if (!(need->cld_enq_flags & CEF_NEVER))
1626                 return 0;
1627
1628         /* lockless lock should only be used by its owning io. b22147 */
1629         return (lock->ols_owner == osc_env_io(env));
1630 }
1631
1632 static const struct cl_lock_operations osc_lock_lockless_ops = {
1633         .clo_fini      = osc_lock_fini,
1634         .clo_enqueue   = osc_lock_enqueue,
1635         .clo_wait      = osc_lock_lockless_wait,
1636         .clo_unuse     = osc_lock_lockless_unuse,
1637         .clo_state     = osc_lock_lockless_state,
1638         .clo_fits_into = osc_lock_lockless_fits_into,
1639         .clo_cancel    = osc_lock_lockless_cancel,
1640         .clo_print     = osc_lock_print
1641 };
1642
1643 int osc_lock_init(const struct lu_env *env,
1644                   struct cl_object *obj, struct cl_lock *lock,
1645                   const struct cl_io *unused)
1646 {
1647         struct osc_lock *clk;
1648         int result;
1649
1650         OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, __GFP_IO);
1651         if (clk != NULL) {
1652                 __u32 enqflags = lock->cll_descr.cld_enq_flags;
1653
1654                 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
1655                 cfs_atomic_set(&clk->ols_pageref, 0);
1656                 clk->ols_state = OLS_NEW;
1657
1658                 clk->ols_flags = osc_enq2ldlm_flags(enqflags);
1659                 clk->ols_agl = !!(enqflags & CEF_AGL);
1660                 if (clk->ols_agl)
1661                         clk->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
1662                 if (clk->ols_flags & LDLM_FL_HAS_INTENT)
1663                         clk->ols_glimpse = 1;
1664
1665                 cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
1666
1667                 if (!(enqflags & CEF_MUST))
1668                         /* try to convert this lock to a lockless lock */
1669                         osc_lock_to_lockless(env, clk, (enqflags & CEF_NEVER));
1670                 if (clk->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
1671                         clk->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1672
1673                 LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n",
1674                                 lock, clk, clk->ols_flags);
1675
1676                 result = 0;
1677         } else
1678                 result = -ENOMEM;
1679         return result;
1680 }
1681
1682 int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
1683 {
1684         struct osc_lock *olock;
1685         int              rc = 0;
1686
1687         spin_lock(&osc_ast_guard);
1688         olock = dlm->l_ast_data;
1689         /*
1690          * there's a very rare race with osc_page_addref_lock(), but that
1691          * doesn't matter because in the worst case we don't cancel a lock
1692          * which we actually can, that's no harm.
1693          */
1694         if (olock != NULL &&
1695             cfs_atomic_add_return(_PAGEREF_MAGIC,
1696                                   &olock->ols_pageref) != _PAGEREF_MAGIC) {
1697                 cfs_atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
1698                 rc = 1;
1699         }
1700         spin_unlock(&osc_ast_guard);
1701         return rc;
1702 }
1703
1704 /** @} osc */