Whamcloud - gitweb
LU-68 Fix a race between lock cancel and write
[fs/lustre-release.git] / lustre / osc / osc_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for OSC layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_OSC
42
43 #ifdef __KERNEL__
44 # include <libcfs/libcfs.h>
45 #else
46 # include <liblustre.h>
47 #endif
48 /* fid_build_reg_res_name() */
49 #include <lustre_fid.h>
50
51 #include "osc_cl_internal.h"
52
53 /** \addtogroup osc 
54  *  @{ 
55  */
56
57 #define _PAGEREF_MAGIC  (-10000000)
58
59 /*****************************************************************************
60  *
61  * Type conversions.
62  *
63  */
64
65 static const struct cl_lock_operations osc_lock_ops;
66 static const struct cl_lock_operations osc_lock_lockless_ops;
67 static void osc_lock_to_lockless(const struct lu_env *env,
68                                  struct osc_lock *ols, int force);
69 static int osc_lock_has_pages(struct osc_lock *olck);
70
71 int osc_lock_is_lockless(const struct osc_lock *olck)
72 {
73         return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
74 }
75
76 /**
77  * Returns a weak pointer to the ldlm lock identified by a handle. Returned
78  * pointer cannot be dereferenced, as lock is not protected from concurrent
79  * reclaim. This function is a helper for osc_lock_invariant().
80  */
81 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
82 {
83         struct ldlm_lock *lock;
84
85         lock = ldlm_handle2lock(handle);
86         if (lock != NULL)
87                 LDLM_LOCK_PUT(lock);
88         return lock;
89 }
90
91 /**
92  * Invariant that has to be true all of the time.
93  */
94 static int osc_lock_invariant(struct osc_lock *ols)
95 {
96         struct ldlm_lock *lock        = osc_handle_ptr(&ols->ols_handle);
97         struct ldlm_lock *olock       = ols->ols_lock;
98         int               handle_used = lustre_handle_is_used(&ols->ols_handle);
99
100         return
101                 ergo(osc_lock_is_lockless(ols),
102                      ols->ols_locklessable && ols->ols_lock == NULL)  ||
103                 (ergo(olock != NULL, handle_used) &&
104                  ergo(olock != NULL,
105                       olock->l_handle.h_cookie == ols->ols_handle.cookie) &&
106                  /*
107                   * Check that ->ols_handle and ->ols_lock are consistent, but
108                   * take into account that they are set at the different time.
109                   */
110                  ergo(handle_used,
111                       ergo(lock != NULL && olock != NULL, lock == olock) &&
112                       ergo(lock == NULL, olock == NULL)) &&
113                  ergo(ols->ols_state == OLS_CANCELLED,
114                       olock == NULL && !handle_used) &&
115                  /*
116                   * DLM lock is destroyed only after we have seen cancellation
117                   * ast.
118                   */
119                  ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
120                       !olock->l_destroyed) &&
121                  ergo(ols->ols_state == OLS_GRANTED,
122                       olock != NULL &&
123                       olock->l_req_mode == olock->l_granted_mode &&
124                       ols->ols_hold));
125 }
126
127 /*****************************************************************************
128  *
129  * Lock operations.
130  *
131  */
132
133 /**
134  * Breaks a link between osc_lock and dlm_lock.
135  */
136 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
137 {
138         struct ldlm_lock *dlmlock;
139
140         cfs_spin_lock(&osc_ast_guard);
141         dlmlock = olck->ols_lock;
142         if (dlmlock == NULL) {
143                 cfs_spin_unlock(&osc_ast_guard);
144                 return;
145         }
146
147         olck->ols_lock = NULL;
148         /* wb(); --- for all who checks (ols->ols_lock != NULL) before
149          * call to osc_lock_detach() */
150         dlmlock->l_ast_data = NULL;
151         olck->ols_handle.cookie = 0ULL;
152         cfs_spin_unlock(&osc_ast_guard);
153
154         lock_res_and_lock(dlmlock);
155         if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
156                 struct cl_object *obj = olck->ols_cl.cls_obj;
157                 struct cl_attr *attr  = &osc_env_info(env)->oti_attr;
158                 __u64 old_kms;
159
160                 cl_object_attr_lock(obj);
161                 /* Must get the value under the lock to avoid possible races. */
162                 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
163                 /* Update the kms. Need to loop all granted locks.
164                  * Not a problem for the client */
165                 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
166
167                 cl_object_attr_set(env, obj, attr, CAT_KMS);
168                 cl_object_attr_unlock(obj);
169         }
170         unlock_res_and_lock(dlmlock);
171
172         /* release a reference taken in osc_lock_upcall0(). */
173         LASSERT(olck->ols_has_ref);
174         lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
175         LDLM_LOCK_RELEASE(dlmlock);
176         olck->ols_has_ref = 0;
177 }
178
179 static int osc_lock_unhold(struct osc_lock *ols)
180 {
181         int result = 0;
182
183         if (ols->ols_hold) {
184                 ols->ols_hold = 0;
185                 result = osc_cancel_base(&ols->ols_handle,
186                                          ols->ols_einfo.ei_mode);
187         }
188         return result;
189 }
190
191 static int osc_lock_unuse(const struct lu_env *env,
192                           const struct cl_lock_slice *slice)
193 {
194         struct osc_lock *ols = cl2osc_lock(slice);
195
196         LASSERT(ols->ols_state == OLS_GRANTED ||
197                 ols->ols_state == OLS_UPCALL_RECEIVED);
198         LINVRNT(osc_lock_invariant(ols));
199
200         if (ols->ols_glimpse) {
201                 LASSERT(ols->ols_hold == 0);
202                 return 0;
203         }
204         LASSERT(ols->ols_hold);
205
206         /*
207          * Move lock into OLS_RELEASED state before calling osc_cancel_base()
208          * so that possible synchronous cancellation (that always happens
209          * e.g., for liblustre) sees that lock is released.
210          */
211         ols->ols_state = OLS_RELEASED;
212         return osc_lock_unhold(ols);
213 }
214
215 static void osc_lock_fini(const struct lu_env *env,
216                           struct cl_lock_slice *slice)
217 {
218         struct osc_lock  *ols = cl2osc_lock(slice);
219
220         LINVRNT(osc_lock_invariant(ols));
221         /*
222          * ->ols_hold can still be true at this point if, for example, a
223          * thread that requested a lock was killed (and released a reference
224          * to the lock), before reply from a server was received. In this case
225          * lock is destroyed immediately after upcall.
226          */
227         osc_lock_unhold(ols);
228         LASSERT(ols->ols_lock == NULL);
229         LASSERT(cfs_atomic_read(&ols->ols_pageref) == 0 ||
230                 cfs_atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
231
232         OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
233 }
234
235 void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj,
236                         struct ldlm_res_id *resname)
237 {
238         const struct lu_fid *fid = lu_object_fid(&obj->oo_cl.co_lu);
239         if (0) {
240                 /*
241                  * In the perfect world of the future, where ost servers talk
242                  * idif-fids...
243                  */
244                 fid_build_reg_res_name(fid, resname);
245         } else {
246                 /*
247                  * In reality, where ost server expects ->lsm_object_id and
248                  * ->lsm_object_seq in rename.
249                  */
250                 osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_seq,
251                                    resname);
252         }
253 }
254
255 static void osc_lock_build_policy(const struct lu_env *env,
256                                   const struct cl_lock *lock,
257                                   ldlm_policy_data_t *policy)
258 {
259         const struct cl_lock_descr *d = &lock->cll_descr;
260
261         osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
262         policy->l_extent.gid = d->cld_gid;
263 }
264
265 static int osc_enq2ldlm_flags(__u32 enqflags)
266 {
267         int result = 0;
268
269         LASSERT((enqflags & ~CEF_MASK) == 0);
270
271         if (enqflags & CEF_NONBLOCK)
272                 result |= LDLM_FL_BLOCK_NOWAIT;
273         if (enqflags & CEF_ASYNC)
274                 result |= LDLM_FL_HAS_INTENT;
275         if (enqflags & CEF_DISCARD_DATA)
276                 result |= LDLM_AST_DISCARD_DATA;
277         return result;
278 }
279
280 /**
281  * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
282  * pointers. Initialized in osc_init().
283  */
284 cfs_spinlock_t osc_ast_guard;
285
286 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
287 {
288         struct osc_lock *olck;
289
290         lock_res_and_lock(dlm_lock);
291         cfs_spin_lock(&osc_ast_guard);
292         olck = dlm_lock->l_ast_data;
293         if (olck != NULL) {
294                 struct cl_lock *lock = olck->ols_cl.cls_lock;
295                 /*
296                  * If osc_lock holds a reference on ldlm lock, return it even
297                  * when cl_lock is in CLS_FREEING state. This way
298                  *
299                  *         osc_ast_data_get(dlmlock) == NULL
300                  *
301                  * guarantees that all osc references on dlmlock were
302                  * released. osc_dlm_blocking_ast0() relies on that.
303                  */
304                 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
305                         cl_lock_get_trust(lock);
306                         lu_ref_add_atomic(&lock->cll_reference,
307                                           "ast", cfs_current());
308                 } else
309                         olck = NULL;
310         }
311         cfs_spin_unlock(&osc_ast_guard);
312         unlock_res_and_lock(dlm_lock);
313         return olck;
314 }
315
316 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
317 {
318         struct cl_lock *lock;
319
320         lock = olck->ols_cl.cls_lock;
321         lu_ref_del(&lock->cll_reference, "ast", cfs_current());
322         cl_lock_put(env, lock);
323 }
324
325 /**
326  * Updates object attributes from a lock value block (lvb) received together
327  * with the DLM lock reply from the server. Copy of osc_update_enqueue()
328  * logic.
329  *
330  * This can be optimized to not update attributes when lock is a result of a
331  * local match.
332  *
333  * Called under lock and resource spin-locks.
334  */
335 static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
336                                 int rc)
337 {
338         struct ost_lvb    *lvb;
339         struct cl_object  *obj;
340         struct lov_oinfo  *oinfo;
341         struct cl_attr    *attr;
342         unsigned           valid;
343
344         ENTRY;
345
346         if (!(olck->ols_flags & LDLM_FL_LVB_READY)) {
347                 EXIT;
348                 return;
349         }
350
351         lvb   = &olck->ols_lvb;
352         obj   = olck->ols_cl.cls_obj;
353         oinfo = cl2osc(obj)->oo_oinfo;
354         attr  = &osc_env_info(env)->oti_attr;
355         valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
356         cl_lvb2attr(attr, lvb);
357
358         cl_object_attr_lock(obj);
359         if (rc == 0) {
360                 struct ldlm_lock  *dlmlock;
361                 __u64 size;
362
363                 dlmlock = olck->ols_lock;
364                 LASSERT(dlmlock != NULL);
365
366                 /* re-grab LVB from a dlm lock under DLM spin-locks. */
367                 *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
368                 size = lvb->lvb_size;
369                 /* Extend KMS up to the end of this lock and no further
370                  * A lock on [x,y] means a KMS of up to y + 1 bytes! */
371                 if (size > dlmlock->l_policy_data.l_extent.end)
372                         size = dlmlock->l_policy_data.l_extent.end + 1;
373                 if (size >= oinfo->loi_kms) {
374                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
375                                    ", kms="LPU64, lvb->lvb_size, size);
376                         valid |= CAT_KMS;
377                         attr->cat_kms = size;
378                 } else {
379                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
380                                    LPU64"; leaving kms="LPU64", end="LPU64,
381                                    lvb->lvb_size, oinfo->loi_kms,
382                                    dlmlock->l_policy_data.l_extent.end);
383                 }
384                 ldlm_lock_allow_match_locked(dlmlock);
385         } else if (rc == -ENAVAIL && olck->ols_glimpse) {
386                 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
387                        " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
388         } else
389                 valid = 0;
390
391         if (valid != 0)
392                 cl_object_attr_set(env, obj, attr, valid);
393
394         cl_object_attr_unlock(obj);
395
396         EXIT;
397 }
398
399 /**
400  * Called when a lock is granted, from an upcall (when server returned a
401  * granted lock), or from completion AST, when server returned a blocked lock.
402  *
403  * Called under lock and resource spin-locks, that are released temporarily
404  * here.
405  */
406 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
407                              struct ldlm_lock *dlmlock, int rc)
408 {
409         struct ldlm_extent   *ext;
410         struct cl_lock       *lock;
411         struct cl_lock_descr *descr;
412
413         LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
414
415         ENTRY;
416         if (olck->ols_state < OLS_GRANTED) {
417                 lock  = olck->ols_cl.cls_lock;
418                 ext   = &dlmlock->l_policy_data.l_extent;
419                 descr = &osc_env_info(env)->oti_descr;
420                 descr->cld_obj = lock->cll_descr.cld_obj;
421
422                 /* XXX check that ->l_granted_mode is valid. */
423                 descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
424                 descr->cld_start = cl_index(descr->cld_obj, ext->start);
425                 descr->cld_end   = cl_index(descr->cld_obj, ext->end);
426                 descr->cld_gid   = ext->gid;
427                 /*
428                  * tell upper layers the extent of the lock that was actually
429                  * granted
430                  */
431                 olck->ols_state = OLS_GRANTED;
432                 osc_lock_lvb_update(env, olck, rc);
433
434                 /* release DLM spin-locks to allow cl_lock_{modify,signal}()
435                  * to take a semaphore on a parent lock. This is safe, because
436                  * spin-locks are needed to protect consistency of
437                  * dlmlock->l_*_mode and LVB, and we have finished processing
438                  * them. */
439                 unlock_res_and_lock(dlmlock);
440                 cl_lock_modify(env, lock, descr);
441                 cl_lock_signal(env, lock);
442                 LINVRNT(osc_lock_invariant(olck));
443                 lock_res_and_lock(dlmlock);
444         }
445         EXIT;
446 }
447
448 static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
449
450 {
451         struct ldlm_lock *dlmlock;
452
453         ENTRY;
454
455         dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
456         LASSERT(dlmlock != NULL);
457
458         lock_res_and_lock(dlmlock);
459         cfs_spin_lock(&osc_ast_guard);
460         LASSERT(dlmlock->l_ast_data == olck);
461         LASSERT(olck->ols_lock == NULL);
462         olck->ols_lock = dlmlock;
463         cfs_spin_unlock(&osc_ast_guard);
464
465         /*
466          * Lock might be not yet granted. In this case, completion ast
467          * (osc_ldlm_completion_ast()) comes later and finishes lock
468          * granting.
469          */
470         if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
471                 osc_lock_granted(env, olck, dlmlock, 0);
472         unlock_res_and_lock(dlmlock);
473
474         /*
475          * osc_enqueue_interpret() decrefs asynchronous locks, counter
476          * this.
477          */
478         ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
479         olck->ols_hold = 1;
480
481         /* lock reference taken by ldlm_handle2lock_long() is owned by
482          * osc_lock and released in osc_lock_detach() */
483         lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
484         olck->ols_has_ref = 1;
485 }
486
487 /**
488  * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
489  * received from a server, or after osc_enqueue_base() matched a local DLM
490  * lock.
491  */
492 static int osc_lock_upcall(void *cookie, int errcode)
493 {
494         struct osc_lock         *olck  = cookie;
495         struct cl_lock_slice    *slice = &olck->ols_cl;
496         struct cl_lock          *lock  = slice->cls_lock;
497         struct lu_env           *env;
498         struct cl_env_nest       nest;
499
500         ENTRY;
501         env = cl_env_nested_get(&nest);
502         if (!IS_ERR(env)) {
503                 int rc;
504
505                 cl_lock_mutex_get(env, lock);
506
507                 LASSERT(lock->cll_state >= CLS_QUEUING);
508                 if (olck->ols_state == OLS_ENQUEUED) {
509                         olck->ols_state = OLS_UPCALL_RECEIVED;
510                         rc = ldlm_error2errno(errcode);
511                 } else if (olck->ols_state == OLS_CANCELLED) {
512                         rc = -EIO;
513                 } else {
514                         CERROR("Impossible state: %d\n", olck->ols_state);
515                         LBUG();
516                 }
517                 if (rc) {
518                         struct ldlm_lock *dlmlock;
519
520                         dlmlock = ldlm_handle2lock(&olck->ols_handle);
521                         if (dlmlock != NULL) {
522                                 lock_res_and_lock(dlmlock);
523                                 cfs_spin_lock(&osc_ast_guard);
524                                 LASSERT(olck->ols_lock == NULL);
525                                 dlmlock->l_ast_data = NULL;
526                                 olck->ols_handle.cookie = 0ULL;
527                                 cfs_spin_unlock(&osc_ast_guard);
528                                 unlock_res_and_lock(dlmlock);
529                                 LDLM_LOCK_PUT(dlmlock);
530                         }
531                 } else {
532                         if (olck->ols_glimpse)
533                                 olck->ols_glimpse = 0;
534                         osc_lock_upcall0(env, olck);
535                 }
536
537                 /* Error handling, some errors are tolerable. */
538                 if (olck->ols_locklessable && rc == -EUSERS) {
539                         /* This is a tolerable error, turn this lock into
540                          * lockless lock.
541                          */
542                         osc_object_set_contended(cl2osc(slice->cls_obj));
543                         LASSERT(slice->cls_ops == &osc_lock_ops);
544
545                         /* Change this lock to ldlmlock-less lock. */
546                         osc_lock_to_lockless(env, olck, 1);
547                         olck->ols_state = OLS_GRANTED;
548                         rc = 0;
549                 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
550                         osc_lock_lvb_update(env, olck, rc);
551                         cl_lock_delete(env, lock);
552                         /* Hide the error. */
553                         rc = 0;
554                 }
555
556                 if (rc == 0)
557                         /* on error, lock was signaled by cl_lock_error() */
558                         cl_lock_signal(env, lock);
559                 else
560                         cl_lock_error(env, lock, rc);
561
562                 cl_lock_mutex_put(env, lock);
563
564                 /* release cookie reference, acquired by osc_lock_enqueue() */
565                 lu_ref_del(&lock->cll_reference, "upcall", lock);
566                 cl_lock_put(env, lock);
567                 cl_env_nested_put(&nest, env);
568         } else
569                 /* should never happen, similar to osc_ldlm_blocking_ast(). */
570                 LBUG();
571         RETURN(errcode);
572 }
573
574 /**
575  * Core of osc_dlm_blocking_ast() logic.
576  */
577 static void osc_lock_blocking(const struct lu_env *env,
578                               struct ldlm_lock *dlmlock,
579                               struct osc_lock *olck, int blocking)
580 {
581         struct cl_lock *lock = olck->ols_cl.cls_lock;
582
583         LASSERT(olck->ols_lock == dlmlock);
584         CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
585         LASSERT(!osc_lock_is_lockless(olck));
586
587         /*
588          * Lock might be still addref-ed here, if e.g., blocking ast
589          * is sent for a failed lock.
590          */
591         osc_lock_unhold(olck);
592
593         if (blocking && olck->ols_state < OLS_BLOCKED)
594                 /*
595                  * Move osc_lock into OLS_BLOCKED before canceling the lock,
596                  * because it recursively re-enters osc_lock_blocking(), with
597                  * the state set to OLS_CANCELLED.
598                  */
599                 olck->ols_state = OLS_BLOCKED;
600         /*
601          * cancel and destroy lock at least once no matter how blocking ast is
602          * entered (see comment above osc_ldlm_blocking_ast() for use
603          * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
604          */
605         cl_lock_cancel(env, lock);
606         cl_lock_delete(env, lock);
607 }
608
609 /**
610  * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
611  * and ldlm_lock caches.
612  */
613 static int osc_dlm_blocking_ast0(const struct lu_env *env,
614                                  struct ldlm_lock *dlmlock,
615                                  void *data, int flag)
616 {
617         struct osc_lock *olck;
618         struct cl_lock  *lock;
619         int result;
620         int cancel;
621
622         LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
623
624         cancel = 0;
625         olck = osc_ast_data_get(dlmlock);
626         if (olck != NULL) {
627                 lock = olck->ols_cl.cls_lock;
628                 cl_lock_mutex_get(env, lock);
629                 LINVRNT(osc_lock_invariant(olck));
630                 if (olck->ols_ast_wait) {
631                         /* wake up osc_lock_use() */
632                         cl_lock_signal(env, lock);
633                         olck->ols_ast_wait = 0;
634                 }
635                 /*
636                  * Lock might have been canceled while this thread was
637                  * sleeping for lock mutex, but olck is pinned in memory.
638                  */
639                 if (olck == dlmlock->l_ast_data) {
640                         /*
641                          * NOTE: DLM sends blocking AST's for failed locks
642                          *       (that are still in pre-OLS_GRANTED state)
643                          *       too, and they have to be canceled otherwise
644                          *       DLM lock is never destroyed and stuck in
645                          *       the memory.
646                          *
647                          *       Alternatively, ldlm_cli_cancel() can be
648                          *       called here directly for osc_locks with
649                          *       ols_state < OLS_GRANTED to maintain an
650                          *       invariant that ->clo_cancel() is only called
651                          *       for locks that were granted.
652                          */
653                         LASSERT(data == olck);
654                         osc_lock_blocking(env, dlmlock,
655                                           olck, flag == LDLM_CB_BLOCKING);
656                 } else
657                         cancel = 1;
658                 cl_lock_mutex_put(env, lock);
659                 osc_ast_data_put(env, olck);
660         } else
661                 /*
662                  * DLM lock exists, but there is no cl_lock attached to it.
663                  * This is a `normal' race. cl_object and its cl_lock's can be
664                  * removed by memory pressure, together with all pages.
665                  */
666                 cancel = (flag == LDLM_CB_BLOCKING);
667
668         if (cancel) {
669                 struct lustre_handle *lockh;
670
671                 lockh = &osc_env_info(env)->oti_handle;
672                 ldlm_lock2handle(dlmlock, lockh);
673                 result = ldlm_cli_cancel(lockh);
674         } else
675                 result = 0;
676         return result;
677 }
678
679 /**
680  * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
681  * some other lock, or is canceled. This function is installed as a
682  * ldlm_lock::l_blocking_ast() for client extent locks.
683  *
684  * Control flow is tricky, because ldlm uses the same call-back
685  * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
686  *
687  * \param dlmlock lock for which ast occurred.
688  *
689  * \param new description of a conflicting lock in case of blocking ast.
690  *
691  * \param data value of dlmlock->l_ast_data
692  *
693  * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
694  *             cancellation and blocking ast's.
695  *
696  * Possible use cases:
697  *
698  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
699  *       lock due to lock lru pressure, or explicit user request to purge
700  *       locks.
701  *
702  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
703  *       us that dlmlock conflicts with another lock that some client is
704  *       enqueing. Lock is canceled.
705  *
706  *           - cl_lock_cancel() is called. osc_lock_cancel() calls
707  *             ldlm_cli_cancel() that calls
708  *
709  *                  dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
710  *
711  *             recursively entering osc_ldlm_blocking_ast().
712  *
713  *     - client cancels lock voluntary (e.g., as a part of early cancellation):
714  *
715  *           cl_lock_cancel()->
716  *             osc_lock_cancel()->
717  *               ldlm_cli_cancel()->
718  *                 dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
719  *
720  */
721 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
722                                  struct ldlm_lock_desc *new, void *data,
723                                  int flag)
724 {
725         struct lu_env     *env;
726         struct cl_env_nest nest;
727         int                result;
728
729         /*
730          * This can be called in the context of outer IO, e.g.,
731          *
732          *     cl_enqueue()->...
733          *       ->osc_enqueue_base()->...
734          *         ->ldlm_prep_elc_req()->...
735          *           ->ldlm_cancel_callback()->...
736          *             ->osc_ldlm_blocking_ast()
737          *
738          * new environment has to be created to not corrupt outer context.
739          */
740         env = cl_env_nested_get(&nest);
741         if (!IS_ERR(env)) {
742                 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
743                 cl_env_nested_put(&nest, env);
744         } else {
745                 result = PTR_ERR(env);
746                 /*
747                  * XXX This should never happen, as cl_lock is
748                  * stuck. Pre-allocated environment a la vvp_inode_fini_env
749                  * should be used.
750                  */
751                 LBUG();
752         }
753         if (result != 0) {
754                 if (result == -ENODATA)
755                         result = 0;
756                 else
757                         CERROR("BAST failed: %d\n", result);
758         }
759         return result;
760 }
761
762 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
763                                    int flags, void *data)
764 {
765         struct cl_env_nest nest;
766         struct lu_env     *env;
767         struct osc_lock   *olck;
768         struct cl_lock    *lock;
769         int result;
770         int dlmrc;
771
772         /* first, do dlm part of the work */
773         dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
774         /* then, notify cl_lock */
775         env = cl_env_nested_get(&nest);
776         if (!IS_ERR(env)) {
777                 olck = osc_ast_data_get(dlmlock);
778                 if (olck != NULL) {
779                         lock = olck->ols_cl.cls_lock;
780                         cl_lock_mutex_get(env, lock);
781                         /*
782                          * ldlm_handle_cp_callback() copied LVB from request
783                          * to lock->l_lvb_data, store it in osc_lock.
784                          */
785                         LASSERT(dlmlock->l_lvb_data != NULL);
786                         lock_res_and_lock(dlmlock);
787                         olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
788                         if (olck->ols_lock == NULL) {
789                                 /*
790                                  * upcall (osc_lock_upcall()) hasn't yet been
791                                  * called. Do nothing now, upcall will bind
792                                  * olck to dlmlock and signal the waiters.
793                                  *
794                                  * This maintains an invariant that osc_lock
795                                  * and ldlm_lock are always bound when
796                                  * osc_lock is in OLS_GRANTED state.
797                                  */
798                         } else if (dlmlock->l_granted_mode ==
799                                    dlmlock->l_req_mode) {
800                                 osc_lock_granted(env, olck, dlmlock, dlmrc);
801                         }
802                         unlock_res_and_lock(dlmlock);
803
804                         if (dlmrc != 0) {
805                                 CL_LOCK_DEBUG(D_ERROR, env, lock,
806                                               "dlmlock returned %d\n", dlmrc);
807                                 cl_lock_error(env, lock, dlmrc);
808                         }
809                         cl_lock_mutex_put(env, lock);
810                         osc_ast_data_put(env, olck);
811                         result = 0;
812                 } else
813                         result = -ELDLM_NO_LOCK_DATA;
814                 cl_env_nested_put(&nest, env);
815         } else
816                 result = PTR_ERR(env);
817         return dlmrc ?: result;
818 }
819
820 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
821 {
822         struct ptlrpc_request  *req  = data;
823         struct osc_lock        *olck;
824         struct cl_lock         *lock;
825         struct cl_object       *obj;
826         struct cl_env_nest      nest;
827         struct lu_env          *env;
828         struct ost_lvb         *lvb;
829         struct req_capsule     *cap;
830         int                     result;
831
832         LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
833
834         env = cl_env_nested_get(&nest);
835         if (!IS_ERR(env)) {
836                 /*
837                  * osc_ast_data_get() has to go after environment is
838                  * allocated, because osc_ast_data() acquires a
839                  * reference to a lock, and it can only be released in
840                  * environment.
841                  */
842                 olck = osc_ast_data_get(dlmlock);
843                 if (olck != NULL) {
844                         lock = olck->ols_cl.cls_lock;
845                         cl_lock_mutex_get(env, lock);
846                         cap = &req->rq_pill;
847                         req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
848                         req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
849                                              sizeof *lvb);
850                         result = req_capsule_server_pack(cap);
851                         if (result == 0) {
852                                 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
853                                 obj = lock->cll_descr.cld_obj;
854                                 result = cl_object_glimpse(env, obj, lvb);
855                         }
856                         cl_lock_mutex_put(env, lock);
857                         osc_ast_data_put(env, olck);
858                 } else {
859                         /*
860                          * These errors are normal races, so we don't want to
861                          * fill the console with messages by calling
862                          * ptlrpc_error()
863                          */
864                         lustre_pack_reply(req, 1, NULL, NULL);
865                         result = -ELDLM_NO_LOCK_DATA;
866                 }
867                 cl_env_nested_put(&nest, env);
868         } else
869                 result = PTR_ERR(env);
870         req->rq_status = result;
871         return result;
872 }
873
874 static unsigned long osc_lock_weigh(const struct lu_env *env,
875                                     const struct cl_lock_slice *slice)
876 {
877         /*
878          * don't need to grab coh_page_guard since we don't care the exact #
879          * of pages..
880          */
881         return cl_object_header(slice->cls_obj)->coh_pages;
882 }
883
884 /**
885  * Get the weight of dlm lock for early cancellation.
886  *
887  * XXX: it should return the pages covered by this \a dlmlock.
888  */
889 static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
890 {
891         struct cl_env_nest       nest;
892         struct lu_env           *env;
893         struct osc_lock         *lock;
894         struct cl_lock          *cll;
895         unsigned long            weight;
896         ENTRY;
897
898         cfs_might_sleep();
899         /*
900          * osc_ldlm_weigh_ast has a complex context since it might be called
901          * because of lock canceling, or from user's input. We have to make
902          * a new environment for it. Probably it is implementation safe to use
903          * the upper context because cl_lock_put don't modify environment
904          * variables. But in case of ..
905          */
906         env = cl_env_nested_get(&nest);
907         if (IS_ERR(env))
908                 /* Mostly because lack of memory, tend to eliminate this lock*/
909                 RETURN(0);
910
911         LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
912         lock = osc_ast_data_get(dlmlock);
913         if (lock == NULL) {
914                 /* cl_lock was destroyed because of memory pressure.
915                  * It is much reasonable to assign this type of lock
916                  * a lower cost.
917                  */
918                 GOTO(out, weight = 0);
919         }
920
921         cll = lock->ols_cl.cls_lock;
922         cl_lock_mutex_get(env, cll);
923         weight = cl_lock_weigh(env, cll);
924         cl_lock_mutex_put(env, cll);
925         osc_ast_data_put(env, lock);
926         EXIT;
927
928 out:
929         cl_env_nested_put(&nest, env);
930         return weight;
931 }
932
933 static void osc_lock_build_einfo(const struct lu_env *env,
934                                  const struct cl_lock *clock,
935                                  struct osc_lock *lock,
936                                  struct ldlm_enqueue_info *einfo)
937 {
938         enum cl_lock_mode mode;
939
940         mode = clock->cll_descr.cld_mode;
941         if (mode == CLM_PHANTOM)
942                 /*
943                  * For now, enqueue all glimpse locks in read mode. In the
944                  * future, client might choose to enqueue LCK_PW lock for
945                  * glimpse on a file opened for write.
946                  */
947                 mode = CLM_READ;
948
949         einfo->ei_type   = LDLM_EXTENT;
950         einfo->ei_mode   = osc_cl_lock2ldlm(mode);
951         einfo->ei_cb_bl  = osc_ldlm_blocking_ast;
952         einfo->ei_cb_cp  = osc_ldlm_completion_ast;
953         einfo->ei_cb_gl  = osc_ldlm_glimpse_ast;
954         einfo->ei_cb_wg  = osc_ldlm_weigh_ast;
955         einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
956 }
957
958 /**
959  * Determine if the lock should be converted into a lockless lock.
960  *
961  * Steps to check:
962  * - if the lock has an explicite requirment for a non-lockless lock;
963  * - if the io lock request type ci_lockreq;
964  * - send the enqueue rpc to ost to make the further decision;
965  * - special treat to truncate lockless lock
966  *
967  *  Additional policy can be implemented here, e.g., never do lockless-io
968  *  for large extents.
969  */
970 static void osc_lock_to_lockless(const struct lu_env *env,
971                                  struct osc_lock *ols, int force)
972 {
973         struct cl_lock_slice *slice = &ols->ols_cl;
974         struct cl_lock *lock        = slice->cls_lock;
975
976         LASSERT(ols->ols_state == OLS_NEW ||
977                 ols->ols_state == OLS_UPCALL_RECEIVED);
978
979         if (force) {
980                 ols->ols_locklessable = 1;
981                 LASSERT(cl_lock_is_mutexed(lock));
982                 slice->cls_ops = &osc_lock_lockless_ops;
983         } else {
984                 struct osc_io *oio     = osc_env_io(env);
985                 struct cl_io  *io      = oio->oi_cl.cis_io;
986                 struct cl_object *obj  = slice->cls_obj;
987                 struct osc_object *oob = cl2osc(obj);
988                 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
989                 struct obd_connect_data *ocd;
990
991                 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
992                         io->ci_lockreq == CILR_MAYBE ||
993                         io->ci_lockreq == CILR_NEVER);
994
995                 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
996                 ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
997                                 (io->ci_lockreq == CILR_MAYBE) &&
998                                 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
999                 if (io->ci_lockreq == CILR_NEVER ||
1000                         /* lockless IO */
1001                     (ols->ols_locklessable && osc_object_is_contended(oob)) ||
1002                         /* lockless truncate */
1003                     (cl_io_is_trunc(io) &&
1004                      (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
1005                       osd->od_lockless_truncate)) {
1006                         ols->ols_locklessable = 1;
1007                         slice->cls_ops = &osc_lock_lockless_ops;
1008                 }
1009         }
1010         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1011 }
1012
1013 static int osc_lock_compatible(const struct osc_lock *qing,
1014                                const struct osc_lock *qed)
1015 {
1016         enum cl_lock_mode qing_mode;
1017         enum cl_lock_mode qed_mode;
1018
1019         qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode;
1020         if (qed->ols_glimpse &&
1021             (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ))
1022                 return 1;
1023
1024         qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode;
1025         return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
1026 }
1027
1028 /**
1029  * Cancel all conflicting locks and wait for them to be destroyed.
1030  *
1031  * This function is used for two purposes:
1032  *
1033  *     - early cancel all conflicting locks before starting IO, and
1034  *
1035  *     - guarantee that pages added to the page cache by lockless IO are never
1036  *       covered by locks other than lockless IO lock, and, hence, are not
1037  *       visible to other threads.
1038  */
1039 static int osc_lock_enqueue_wait(const struct lu_env *env,
1040                                  const struct osc_lock *olck)
1041 {
1042         struct cl_lock          *lock    = olck->ols_cl.cls_lock;
1043         struct cl_lock_descr    *descr   = &lock->cll_descr;
1044         struct cl_object_header *hdr     = cl_object_header(descr->cld_obj);
1045         struct cl_lock          *scan;
1046         struct cl_lock          *conflict= NULL;
1047         int lockless                     = osc_lock_is_lockless(olck);
1048         int rc                           = 0;
1049         ENTRY;
1050
1051         LASSERT(cl_lock_is_mutexed(lock));
1052         LASSERT(lock->cll_state == CLS_QUEUING);
1053
1054         /* make it enqueue anyway for glimpse lock, because we actually
1055          * don't need to cancel any conflicting locks. */
1056         if (olck->ols_glimpse)
1057                 return 0;
1058
1059         cfs_spin_lock(&hdr->coh_lock_guard);
1060         cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
1061                 struct cl_lock_descr *cld = &scan->cll_descr;
1062                 const struct osc_lock *scan_ols;
1063
1064                 if (scan == lock)
1065                         break;
1066
1067                 if (scan->cll_state < CLS_QUEUING ||
1068                     scan->cll_state == CLS_FREEING ||
1069                     cld->cld_start > descr->cld_end ||
1070                     cld->cld_end < descr->cld_start)
1071                         continue;
1072
1073                 /* overlapped and living locks. */
1074
1075                 /* We're not supposed to give up group lock. */
1076                 if (scan->cll_descr.cld_mode == CLM_GROUP) {
1077                         LASSERT(descr->cld_mode != CLM_GROUP ||
1078                                 descr->cld_gid != scan->cll_descr.cld_gid);
1079                         continue;
1080                 }
1081
1082                 scan_ols = osc_lock_at(scan);
1083
1084                 /* We need to cancel the compatible locks if we're enqueuing
1085                  * a lockless lock, for example:
1086                  * imagine that client has PR lock on [0, 1000], and thread T0
1087                  * is doing lockless IO in [500, 1500] region. Concurrent
1088                  * thread T1 can see lockless data in [500, 1000], which is
1089                  * wrong, because these data are possibly stale. */
1090                 if (!lockless && osc_lock_compatible(olck, scan_ols))
1091                         continue;
1092
1093                 /* Now @scan is conflicting with @lock, this means current
1094                  * thread have to sleep for @scan being destroyed. */
1095                 if (scan_ols->ols_owner == osc_env_io(env)) {
1096                         CERROR("DEADLOCK POSSIBLE!\n");
1097                         CL_LOCK_DEBUG(D_ERROR, env, scan, "queued.\n");
1098                         CL_LOCK_DEBUG(D_ERROR, env, lock, "queuing.\n");
1099                         libcfs_debug_dumpstack(NULL);
1100                 }
1101                 cl_lock_get_trust(scan);
1102                 conflict = scan;
1103                 break;
1104         }
1105         cfs_spin_unlock(&hdr->coh_lock_guard);
1106
1107         if (conflict) {
1108                 if (lock->cll_descr.cld_mode == CLM_GROUP) {
1109                         /* we want a group lock but a previous lock request
1110                          * conflicts, we do not wait but return 0 so the
1111                          * request is send to the server
1112                          */
1113                         CDEBUG(D_DLMTRACE, "group lock %p is conflicted "
1114                                            "with %p, no wait, send to server\n",
1115                                lock, conflict);
1116                         cl_lock_put(env, conflict);
1117                         rc = 0;
1118                 } else {
1119                         CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, "
1120                                            "will wait\n",
1121                                lock, conflict);
1122                         LASSERT(lock->cll_conflict == NULL);
1123                         lu_ref_add(&conflict->cll_reference, "cancel-wait",
1124                                    lock);
1125                         lock->cll_conflict = conflict;
1126                         rc = CLO_WAIT;
1127                 }
1128         }
1129         RETURN(rc);
1130 }
1131
1132 /**
1133  * Implementation of cl_lock_operations::clo_enqueue() method for osc
1134  * layer. This initiates ldlm enqueue:
1135  *
1136  *     - cancels conflicting locks early (osc_lock_enqueue_wait());
1137  *
1138  *     - calls osc_enqueue_base() to do actual enqueue.
1139  *
1140  * osc_enqueue_base() is supplied with an upcall function that is executed
1141  * when lock is received either after a local cached ldlm lock is matched, or
1142  * when a reply from the server is received.
1143  *
1144  * This function does not wait for the network communication to complete.
1145  */
1146 static int osc_lock_enqueue(const struct lu_env *env,
1147                             const struct cl_lock_slice *slice,
1148                             struct cl_io *unused, __u32 enqflags)
1149 {
1150         struct osc_lock          *ols     = cl2osc_lock(slice);
1151         struct cl_lock           *lock    = ols->ols_cl.cls_lock;
1152         int result;
1153         ENTRY;
1154
1155         LASSERT(cl_lock_is_mutexed(lock));
1156         LASSERT(lock->cll_state == CLS_QUEUING);
1157         LASSERT(ols->ols_state == OLS_NEW);
1158
1159         ols->ols_flags = osc_enq2ldlm_flags(enqflags);
1160         if (ols->ols_flags & LDLM_FL_HAS_INTENT)
1161                 ols->ols_glimpse = 1;
1162         if (!osc_lock_is_lockless(ols) && !(enqflags & CEF_MUST))
1163                 /* try to convert this lock to a lockless lock */
1164                 osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
1165
1166         result = osc_lock_enqueue_wait(env, ols);
1167         if (result == 0) {
1168                 if (!osc_lock_is_lockless(ols)) {
1169                         struct osc_object        *obj = cl2osc(slice->cls_obj);
1170                         struct osc_thread_info   *info = osc_env_info(env);
1171                         struct ldlm_res_id       *resname = &info->oti_resname;
1172                         ldlm_policy_data_t       *policy = &info->oti_policy;
1173                         struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
1174
1175                         if (ols->ols_locklessable)
1176                                 ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1177
1178                         /* a reference for lock, passed as an upcall cookie */
1179                         cl_lock_get(lock);
1180                         lu_ref_add(&lock->cll_reference, "upcall", lock);
1181                         ols->ols_state = OLS_ENQUEUED;
1182
1183                         /*
1184                          * XXX: this is possible blocking point as
1185                          * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
1186                          * LDLM_CP_CALLBACK.
1187                          */
1188                         osc_lock_build_res(env, obj, resname);
1189                         osc_lock_build_policy(env, lock, policy);
1190                         result = osc_enqueue_base(osc_export(obj), resname,
1191                                           &ols->ols_flags, policy,
1192                                           &ols->ols_lvb,
1193                                           obj->oo_oinfo->loi_kms_valid,
1194                                           osc_lock_upcall,
1195                                           ols, einfo, &ols->ols_handle,
1196                                           PTLRPCD_SET, 1);
1197                         if (result != 0) {
1198                                 lu_ref_del(&lock->cll_reference,
1199                                            "upcall", lock);
1200                                 cl_lock_put(env, lock);
1201                         }
1202                 } else {
1203                         ols->ols_state = OLS_GRANTED;
1204                         ols->ols_owner = osc_env_io(env);
1205                 }
1206         }
1207         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1208         RETURN(result);
1209 }
1210
1211 static int osc_lock_wait(const struct lu_env *env,
1212                          const struct cl_lock_slice *slice)
1213 {
1214         struct osc_lock *olck = cl2osc_lock(slice);
1215         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1216
1217         LINVRNT(osc_lock_invariant(olck));
1218         if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED)
1219                 return 0;
1220
1221         LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
1222                      lock->cll_error == 0, olck->ols_lock != NULL));
1223
1224         return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
1225 }
1226
1227 /**
1228  * An implementation of cl_lock_operations::clo_use() method that pins cached
1229  * lock.
1230  */
1231 static int osc_lock_use(const struct lu_env *env,
1232                         const struct cl_lock_slice *slice)
1233 {
1234         struct osc_lock *olck = cl2osc_lock(slice);
1235         int rc;
1236
1237         LASSERT(!olck->ols_hold);
1238
1239         /*
1240          * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
1241          * flag is not set. This protects us from a concurrent blocking ast.
1242          */
1243         rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1244         if (rc == 0) {
1245                 olck->ols_hold = 1;
1246                 olck->ols_state = OLS_GRANTED;
1247         } else {
1248                 struct cl_lock *lock;
1249
1250                 /*
1251                  * Lock is being cancelled somewhere within
1252                  * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
1253                  * set, but osc_ldlm_blocking_ast() hasn't yet acquired
1254                  * cl_lock mutex.
1255                  */
1256                 lock = slice->cls_lock;
1257                 LASSERT(lock->cll_state == CLS_INTRANSIT);
1258                 LASSERT(lock->cll_users > 0);
1259                 /* set a flag for osc_dlm_blocking_ast0() to signal the
1260                  * lock.*/
1261                 olck->ols_ast_wait = 1;
1262                 rc = CLO_WAIT;
1263         }
1264         return rc;
1265 }
1266
1267 static int osc_lock_flush(struct osc_lock *ols, int discard)
1268 {
1269         struct cl_lock       *lock  = ols->ols_cl.cls_lock;
1270         struct cl_env_nest    nest;
1271         struct lu_env        *env;
1272         int result = 0;
1273
1274         env = cl_env_nested_get(&nest);
1275         if (!IS_ERR(env)) {
1276                 result = cl_lock_page_out(env, lock, discard);
1277                 cl_env_nested_put(&nest, env);
1278         } else
1279                 result = PTR_ERR(env);
1280         if (result == 0) {
1281                 ols->ols_flush = 1;
1282                 LINVRNT(!osc_lock_has_pages(ols));
1283         }
1284         return result;
1285 }
1286
1287 /**
1288  * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1289  * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1290  * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1291  * with some other lock some where in the cluster. This function does the
1292  * following:
1293  *
1294  *     - invalidates all pages protected by this lock (after sending dirty
1295  *       ones to the server, as necessary);
1296  *
1297  *     - decref's underlying ldlm lock;
1298  *
1299  *     - cancels ldlm lock (ldlm_cli_cancel()).
1300  */
1301 static void osc_lock_cancel(const struct lu_env *env,
1302                             const struct cl_lock_slice *slice)
1303 {
1304         struct cl_lock   *lock    = slice->cls_lock;
1305         struct osc_lock  *olck    = cl2osc_lock(slice);
1306         struct ldlm_lock *dlmlock = olck->ols_lock;
1307         int               result  = 0;
1308         int               discard;
1309
1310         LASSERT(cl_lock_is_mutexed(lock));
1311         LINVRNT(osc_lock_invariant(olck));
1312
1313         if (dlmlock != NULL) {
1314                 int do_cancel;
1315
1316                 discard = dlmlock->l_flags & LDLM_FL_DISCARD_DATA;
1317                 result = osc_lock_flush(olck, discard);
1318                 osc_lock_unhold(olck);
1319
1320                 lock_res_and_lock(dlmlock);
1321                 /* Now that we're the only user of dlm read/write reference,
1322                  * mostly the ->l_readers + ->l_writers should be zero.
1323                  * However, there is a corner case.
1324                  * See bug 18829 for details.*/
1325                 do_cancel = (dlmlock->l_readers == 0 &&
1326                              dlmlock->l_writers == 0);
1327                 dlmlock->l_flags |= LDLM_FL_CBPENDING;
1328                 unlock_res_and_lock(dlmlock);
1329                 if (do_cancel)
1330                         result = ldlm_cli_cancel(&olck->ols_handle);
1331                 if (result < 0)
1332                         CL_LOCK_DEBUG(D_ERROR, env, lock,
1333                                       "lock %p cancel failure with error(%d)\n",
1334                                       lock, result);
1335         }
1336         olck->ols_state = OLS_CANCELLED;
1337         osc_lock_detach(env, olck);
1338 }
1339
1340 void cl_lock_page_list_fixup(const struct lu_env *env,
1341                              struct cl_io *io, struct cl_lock *lock,
1342                              struct cl_page_list *queue);
1343
1344 #ifdef INVARIANT_CHECK
1345 /**
1346  * Returns true iff there are pages under \a olck not protected by other
1347  * locks.
1348  */
1349 static int osc_lock_has_pages(struct osc_lock *olck)
1350 {
1351         struct cl_lock       *lock;
1352         struct cl_lock_descr *descr;
1353         struct cl_object     *obj;
1354         struct osc_object    *oob;
1355         struct cl_page_list  *plist;
1356         struct cl_page       *page;
1357         struct cl_env_nest    nest;
1358         struct cl_io         *io;
1359         struct lu_env        *env;
1360         int                   result;
1361
1362         env = cl_env_nested_get(&nest);
1363         if (!IS_ERR(env)) {
1364                 obj   = olck->ols_cl.cls_obj;
1365                 oob   = cl2osc(obj);
1366                 io    = &oob->oo_debug_io;
1367                 lock  = olck->ols_cl.cls_lock;
1368                 descr = &lock->cll_descr;
1369                 plist = &osc_env_info(env)->oti_plist;
1370                 cl_page_list_init(plist);
1371
1372                 cfs_mutex_lock(&oob->oo_debug_mutex);
1373
1374                 io->ci_obj = cl_object_top(obj);
1375                 cl_io_init(env, io, CIT_MISC, io->ci_obj);
1376                 cl_page_gang_lookup(env, obj, io,
1377                                     descr->cld_start, descr->cld_end, plist, 0,
1378                                     NULL);
1379                 cl_lock_page_list_fixup(env, io, lock, plist);
1380                 if (plist->pl_nr > 0) {
1381                         CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
1382                         cl_page_list_for_each(page, plist)
1383                                 CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
1384                 }
1385                 result = plist->pl_nr > 0;
1386                 cl_page_list_disown(env, io, plist);
1387                 cl_page_list_fini(env, plist);
1388                 cl_io_fini(env, io);
1389                 cfs_mutex_unlock(&oob->oo_debug_mutex);
1390                 cl_env_nested_put(&nest, env);
1391         } else
1392                 result = 0;
1393         return result;
1394 }
1395 #else
1396 static int osc_lock_has_pages(struct osc_lock *olck)
1397 {
1398         return 0;
1399 }
1400 #endif /* INVARIANT_CHECK */
1401
1402 static void osc_lock_delete(const struct lu_env *env,
1403                             const struct cl_lock_slice *slice)
1404 {
1405         struct osc_lock *olck;
1406
1407         olck = cl2osc_lock(slice);
1408         if (olck->ols_glimpse) {
1409                 LASSERT(!olck->ols_hold);
1410                 LASSERT(!olck->ols_lock);
1411                 return;
1412         }
1413
1414         LINVRNT(osc_lock_invariant(olck));
1415         LINVRNT(!osc_lock_has_pages(olck));
1416
1417         osc_lock_unhold(olck);
1418         osc_lock_detach(env, olck);
1419 }
1420
1421 /**
1422  * Implements cl_lock_operations::clo_state() method for osc layer.
1423  *
1424  * Maintains osc_lock::ols_owner field.
1425  *
1426  * This assumes that lock always enters CLS_HELD (from some other state) in
1427  * the same IO context as one that requested the lock. This should not be a
1428  * problem, because context is by definition shared by all activity pertaining
1429  * to the same high-level IO.
1430  */
1431 static void osc_lock_state(const struct lu_env *env,
1432                            const struct cl_lock_slice *slice,
1433                            enum cl_lock_state state)
1434 {
1435         struct osc_lock *lock = cl2osc_lock(slice);
1436
1437         /*
1438          * XXX multiple io contexts can use the lock at the same time.
1439          */
1440         LINVRNT(osc_lock_invariant(lock));
1441         if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1442                 struct osc_io *oio = osc_env_io(env);
1443
1444                 LASSERT(lock->ols_owner == NULL);
1445                 lock->ols_owner = oio;
1446         } else if (state != CLS_HELD)
1447                 lock->ols_owner = NULL;
1448 }
1449
1450 static int osc_lock_print(const struct lu_env *env, void *cookie,
1451                           lu_printer_t p, const struct cl_lock_slice *slice)
1452 {
1453         struct osc_lock *lock = cl2osc_lock(slice);
1454
1455         /*
1456          * XXX print ldlm lock and einfo properly.
1457          */
1458         (*p)(env, cookie, "%p %08x "LPX64" %d %p ",
1459              lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
1460              lock->ols_state, lock->ols_owner);
1461         osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1462         return 0;
1463 }
1464
1465 static int osc_lock_fits_into(const struct lu_env *env,
1466                               const struct cl_lock_slice *slice,
1467                               const struct cl_lock_descr *need,
1468                               const struct cl_io *io)
1469 {
1470         struct osc_lock *ols = cl2osc_lock(slice);
1471
1472         if (need->cld_enq_flags & CEF_NEVER)
1473                 return 0;
1474
1475         if (need->cld_mode == CLM_PHANTOM) {
1476                 /*
1477                  * Note: the QUEUED lock can't be matched here, otherwise
1478                  * it might cause the deadlocks.
1479                  * In read_process,
1480                  * P1: enqueued read lock, create sublock1
1481                  * P2: enqueued write lock, create sublock2(conflicted
1482                  *     with sublock1).
1483                  * P1: Grant read lock.
1484                  * P1: enqueued glimpse lock(with holding sublock1_read),
1485                  *     matched with sublock2, waiting sublock2 to be granted.
1486                  *     But sublock2 can not be granted, because P1
1487                  *     will not release sublock1. Bang!
1488                  */
1489                 if (ols->ols_state < OLS_GRANTED ||
1490                     ols->ols_state > OLS_RELEASED)
1491                         return 0;
1492         } else if (need->cld_enq_flags & CEF_MUST) {
1493                 /*
1494                  * If the lock hasn't ever enqueued, it can't be matched
1495                  * because enqueue process brings in many information
1496                  * which can be used to determine things such as lockless,
1497                  * CEF_MUST, etc.
1498                  */
1499                 if (ols->ols_state < OLS_UPCALL_RECEIVED &&
1500                     ols->ols_locklessable)
1501                         return 0;
1502         }
1503         return 1;
1504 }
1505
1506 static const struct cl_lock_operations osc_lock_ops = {
1507         .clo_fini    = osc_lock_fini,
1508         .clo_enqueue = osc_lock_enqueue,
1509         .clo_wait    = osc_lock_wait,
1510         .clo_unuse   = osc_lock_unuse,
1511         .clo_use     = osc_lock_use,
1512         .clo_delete  = osc_lock_delete,
1513         .clo_state   = osc_lock_state,
1514         .clo_cancel  = osc_lock_cancel,
1515         .clo_weigh   = osc_lock_weigh,
1516         .clo_print   = osc_lock_print,
1517         .clo_fits_into = osc_lock_fits_into,
1518 };
1519
1520 static int osc_lock_lockless_unuse(const struct lu_env *env,
1521                                    const struct cl_lock_slice *slice)
1522 {
1523         struct osc_lock *ols = cl2osc_lock(slice);
1524         struct cl_lock *lock = slice->cls_lock;
1525
1526         LASSERT(ols->ols_state == OLS_GRANTED);
1527         LINVRNT(osc_lock_invariant(ols));
1528
1529         cl_lock_cancel(env, lock);
1530         cl_lock_delete(env, lock);
1531         return 0;
1532 }
1533
1534 static void osc_lock_lockless_cancel(const struct lu_env *env,
1535                                      const struct cl_lock_slice *slice)
1536 {
1537         struct osc_lock   *ols  = cl2osc_lock(slice);
1538         int result;
1539
1540         result = osc_lock_flush(ols, 0);
1541         if (result)
1542                 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1543                        ols, result);
1544         ols->ols_state = OLS_CANCELLED;
1545 }
1546
1547 static int osc_lock_lockless_wait(const struct lu_env *env,
1548                                   const struct cl_lock_slice *slice)
1549 {
1550         struct osc_lock *olck = cl2osc_lock(slice);
1551         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1552
1553         LINVRNT(osc_lock_invariant(olck));
1554         LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1555
1556         return lock->cll_error;
1557 }
1558
1559 static void osc_lock_lockless_state(const struct lu_env *env,
1560                                     const struct cl_lock_slice *slice,
1561                                     enum cl_lock_state state)
1562 {
1563         struct osc_lock *lock = cl2osc_lock(slice);
1564
1565         LINVRNT(osc_lock_invariant(lock));
1566         if (state == CLS_HELD) {
1567                 struct osc_io *oio  = osc_env_io(env);
1568
1569                 LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio));
1570                 lock->ols_owner = oio;
1571
1572                 /* set the io to be lockless if this lock is for io's
1573                  * host object */
1574                 if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
1575                         oio->oi_lockless = 1;
1576         }
1577 }
1578
1579 static int osc_lock_lockless_fits_into(const struct lu_env *env,
1580                                        const struct cl_lock_slice *slice,
1581                                        const struct cl_lock_descr *need,
1582                                        const struct cl_io *io)
1583 {
1584         struct osc_lock *lock = cl2osc_lock(slice);
1585
1586         if (!(need->cld_enq_flags & CEF_NEVER))
1587                 return 0;
1588
1589         /* lockless lock should only be used by its owning io. b22147 */
1590         return (lock->ols_owner == osc_env_io(env));
1591 }
1592
1593 static const struct cl_lock_operations osc_lock_lockless_ops = {
1594         .clo_fini      = osc_lock_fini,
1595         .clo_enqueue   = osc_lock_enqueue,
1596         .clo_wait      = osc_lock_lockless_wait,
1597         .clo_unuse     = osc_lock_lockless_unuse,
1598         .clo_state     = osc_lock_lockless_state,
1599         .clo_fits_into = osc_lock_lockless_fits_into,
1600         .clo_cancel    = osc_lock_lockless_cancel,
1601         .clo_print     = osc_lock_print
1602 };
1603
1604 int osc_lock_init(const struct lu_env *env,
1605                   struct cl_object *obj, struct cl_lock *lock,
1606                   const struct cl_io *unused)
1607 {
1608         struct osc_lock *clk;
1609         int result;
1610
1611         OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO);
1612         if (clk != NULL) {
1613                 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
1614                 cfs_atomic_set(&clk->ols_pageref, 0);
1615                 clk->ols_state = OLS_NEW;
1616                 cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
1617                 result = 0;
1618         } else
1619                 result = -ENOMEM;
1620         return result;
1621 }
1622
1623 int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
1624 {
1625         struct osc_lock *olock;
1626         int              rc = 0;
1627
1628         cfs_spin_lock(&osc_ast_guard);
1629         olock = dlm->l_ast_data;
1630         /*
1631          * there's a very rare race with osc_page_addref_lock(), but that
1632          * doesn't matter because in the worst case we don't cancel a lock
1633          * which we actually can, that's no harm.
1634          */
1635         if (olock != NULL &&
1636             cfs_atomic_add_return(_PAGEREF_MAGIC,
1637                                   &olock->ols_pageref) != _PAGEREF_MAGIC) {
1638                 cfs_atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
1639                 rc = 1;
1640         }
1641         cfs_spin_unlock(&osc_ast_guard);
1642         return rc;
1643 }
1644
1645 /** @} osc */