Whamcloud - gitweb
b=16774 cancel unused osc locks before replay.
[fs/lustre-release.git] / lustre / osc / osc_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for OSC layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_OSC
42
43 #ifdef __KERNEL__
44 # include <libcfs/libcfs.h>
45 #else
46 # include <liblustre.h>
47 #endif
48 /* fid_build_reg_res_name() */
49 #include <lustre_fid.h>
50
51 #include "osc_cl_internal.h"
52
53 /** \addtogroup osc 
54  *  @{ 
55  */
56
57 #define _PAGEREF_MAGIC  (-10000000)
58
59 /*****************************************************************************
60  *
61  * Type conversions.
62  *
63  */
64
65 static const struct cl_lock_operations osc_lock_ops;
66 static const struct cl_lock_operations osc_lock_lockless_ops;
67 static void osc_lock_to_lockless(const struct lu_env *env,
68                                  struct osc_lock *ols, int force);
69 static int osc_lock_has_pages(struct osc_lock *olck);
70
71 int osc_lock_is_lockless(const struct osc_lock *olck)
72 {
73         return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
74 }
75
76 /**
77  * Returns a weak pointer to the ldlm lock identified by a handle. Returned
78  * pointer cannot be dereferenced, as lock is not protected from concurrent
79  * reclaim. This function is a helper for osc_lock_invariant().
80  */
81 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
82 {
83         struct ldlm_lock *lock;
84
85         lock = ldlm_handle2lock(handle);
86         if (lock != NULL)
87                 LDLM_LOCK_PUT(lock);
88         return lock;
89 }
90
91 /**
92  * Invariant that has to be true all of the time.
93  */
94 static int osc_lock_invariant(struct osc_lock *ols)
95 {
96         struct ldlm_lock *lock        = osc_handle_ptr(&ols->ols_handle);
97         struct ldlm_lock *olock       = ols->ols_lock;
98         int               handle_used = lustre_handle_is_used(&ols->ols_handle);
99
100         return
101                 ergo(osc_lock_is_lockless(ols),
102                      ols->ols_locklessable && ols->ols_lock == NULL)  ||
103                 (ergo(olock != NULL, handle_used) &&
104                  ergo(olock != NULL,
105                       olock->l_handle.h_cookie == ols->ols_handle.cookie) &&
106                  /*
107                   * Check that ->ols_handle and ->ols_lock are consistent, but
108                   * take into account that they are set at the different time.
109                   */
110                  ergo(handle_used,
111                       ergo(lock != NULL && olock != NULL, lock == olock) &&
112                       ergo(lock == NULL, olock == NULL)) &&
113                  ergo(ols->ols_state == OLS_CANCELLED,
114                       olock == NULL && !handle_used) &&
115                  /*
116                   * DLM lock is destroyed only after we have seen cancellation
117                   * ast.
118                   */
119                  ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
120                       !olock->l_destroyed) &&
121                  ergo(ols->ols_state == OLS_GRANTED,
122                       olock != NULL &&
123                       olock->l_req_mode == olock->l_granted_mode &&
124                       ols->ols_hold));
125 }
126
127 /*****************************************************************************
128  *
129  * Lock operations.
130  *
131  */
132
133 /**
134  * Breaks a link between osc_lock and dlm_lock.
135  */
136 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
137 {
138         struct ldlm_lock *dlmlock;
139
140         cfs_spin_lock(&osc_ast_guard);
141         dlmlock = olck->ols_lock;
142         if (dlmlock == NULL) {
143                 cfs_spin_unlock(&osc_ast_guard);
144                 return;
145         }
146
147         olck->ols_lock = NULL;
148         /* wb(); --- for all who checks (ols->ols_lock != NULL) before
149          * call to osc_lock_detach() */
150         dlmlock->l_ast_data = NULL;
151         olck->ols_handle.cookie = 0ULL;
152         cfs_spin_unlock(&osc_ast_guard);
153
154         lock_res_and_lock(dlmlock);
155         if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
156                 struct cl_object *obj = olck->ols_cl.cls_obj;
157                 struct cl_attr *attr  = &osc_env_info(env)->oti_attr;
158                 __u64 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
159
160                 /* Update the kms. Need to loop all granted locks.
161                  * Not a problem for the client */
162                 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
163                 unlock_res_and_lock(dlmlock);
164
165                 cl_object_attr_lock(obj);
166                 cl_object_attr_set(env, obj, attr, CAT_KMS);
167                 cl_object_attr_unlock(obj);
168         } else
169                 unlock_res_and_lock(dlmlock);
170
171         /* release a reference taken in osc_lock_upcall0(). */
172         LASSERT(olck->ols_has_ref);
173         lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
174         LDLM_LOCK_RELEASE(dlmlock);
175         olck->ols_has_ref = 0;
176 }
177
178 static int osc_lock_unhold(struct osc_lock *ols)
179 {
180         int result = 0;
181
182         if (ols->ols_hold) {
183                 ols->ols_hold = 0;
184                 result = osc_cancel_base(&ols->ols_handle,
185                                          ols->ols_einfo.ei_mode);
186         }
187         return result;
188 }
189
190 static int osc_lock_unuse(const struct lu_env *env,
191                           const struct cl_lock_slice *slice)
192 {
193         struct osc_lock *ols = cl2osc_lock(slice);
194
195         LASSERT(ols->ols_state == OLS_GRANTED ||
196                 ols->ols_state == OLS_UPCALL_RECEIVED);
197         LINVRNT(osc_lock_invariant(ols));
198
199         if (ols->ols_glimpse) {
200                 LASSERT(ols->ols_hold == 0);
201                 return 0;
202         }
203         LASSERT(ols->ols_hold);
204
205         /*
206          * Move lock into OLS_RELEASED state before calling osc_cancel_base()
207          * so that possible synchronous cancellation (that always happens
208          * e.g., for liblustre) sees that lock is released.
209          */
210         ols->ols_state = OLS_RELEASED;
211         return osc_lock_unhold(ols);
212 }
213
214 static void osc_lock_fini(const struct lu_env *env,
215                           struct cl_lock_slice *slice)
216 {
217         struct osc_lock  *ols = cl2osc_lock(slice);
218
219         LINVRNT(osc_lock_invariant(ols));
220         /*
221          * ->ols_hold can still be true at this point if, for example, a
222          * thread that requested a lock was killed (and released a reference
223          * to the lock), before reply from a server was received. In this case
224          * lock is destroyed immediately after upcall.
225          */
226         osc_lock_unhold(ols);
227         LASSERT(ols->ols_lock == NULL);
228         LASSERT(cfs_atomic_read(&ols->ols_pageref) == 0 ||
229                 cfs_atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
230
231         OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
232 }
233
234 void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj,
235                         struct ldlm_res_id *resname)
236 {
237         const struct lu_fid *fid = lu_object_fid(&obj->oo_cl.co_lu);
238         if (0) {
239                 /*
240                  * In the perfect world of the future, where ost servers talk
241                  * idif-fids...
242                  */
243                 fid_build_reg_res_name(fid, resname);
244         } else {
245                 /*
246                  * In reality, where ost server expects ->lsm_object_id and
247                  * ->lsm_object_seq in rename.
248                  */
249                 osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_seq,
250                                    resname);
251         }
252 }
253
254 static void osc_lock_build_policy(const struct lu_env *env,
255                                   const struct cl_lock *lock,
256                                   ldlm_policy_data_t *policy)
257 {
258         const struct cl_lock_descr *d = &lock->cll_descr;
259
260         osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
261         policy->l_extent.gid = d->cld_gid;
262 }
263
264 static int osc_enq2ldlm_flags(__u32 enqflags)
265 {
266         int result = 0;
267
268         LASSERT((enqflags & ~CEF_MASK) == 0);
269
270         if (enqflags & CEF_NONBLOCK)
271                 result |= LDLM_FL_BLOCK_NOWAIT;
272         if (enqflags & CEF_ASYNC)
273                 result |= LDLM_FL_HAS_INTENT;
274         if (enqflags & CEF_DISCARD_DATA)
275                 result |= LDLM_AST_DISCARD_DATA;
276         return result;
277 }
278
279 /**
280  * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
281  * pointers. Initialized in osc_init().
282  */
283 cfs_spinlock_t osc_ast_guard;
284
285 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
286 {
287         struct osc_lock *olck;
288
289         lock_res_and_lock(dlm_lock);
290         cfs_spin_lock(&osc_ast_guard);
291         olck = dlm_lock->l_ast_data;
292         if (olck != NULL) {
293                 struct cl_lock *lock = olck->ols_cl.cls_lock;
294                 /*
295                  * If osc_lock holds a reference on ldlm lock, return it even
296                  * when cl_lock is in CLS_FREEING state. This way
297                  *
298                  *         osc_ast_data_get(dlmlock) == NULL
299                  *
300                  * guarantees that all osc references on dlmlock were
301                  * released. osc_dlm_blocking_ast0() relies on that.
302                  */
303                 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
304                         cl_lock_get_trust(lock);
305                         lu_ref_add_atomic(&lock->cll_reference,
306                                           "ast", cfs_current());
307                 } else
308                         olck = NULL;
309         }
310         cfs_spin_unlock(&osc_ast_guard);
311         unlock_res_and_lock(dlm_lock);
312         return olck;
313 }
314
315 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
316 {
317         struct cl_lock *lock;
318
319         lock = olck->ols_cl.cls_lock;
320         lu_ref_del(&lock->cll_reference, "ast", cfs_current());
321         cl_lock_put(env, lock);
322 }
323
324 /**
325  * Updates object attributes from a lock value block (lvb) received together
326  * with the DLM lock reply from the server. Copy of osc_update_enqueue()
327  * logic.
328  *
329  * This can be optimized to not update attributes when lock is a result of a
330  * local match.
331  *
332  * Called under lock and resource spin-locks.
333  */
334 static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
335                                 int rc)
336 {
337         struct ost_lvb    *lvb;
338         struct cl_object  *obj;
339         struct lov_oinfo  *oinfo;
340         struct cl_attr    *attr;
341         unsigned           valid;
342
343         ENTRY;
344
345         if (!(olck->ols_flags & LDLM_FL_LVB_READY)) {
346                 EXIT;
347                 return;
348         }
349
350         lvb   = &olck->ols_lvb;
351         obj   = olck->ols_cl.cls_obj;
352         oinfo = cl2osc(obj)->oo_oinfo;
353         attr  = &osc_env_info(env)->oti_attr;
354         valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
355         cl_lvb2attr(attr, lvb);
356
357         cl_object_attr_lock(obj);
358         if (rc == 0) {
359                 struct ldlm_lock  *dlmlock;
360                 __u64 size;
361
362                 dlmlock = olck->ols_lock;
363                 LASSERT(dlmlock != NULL);
364
365                 /* re-grab LVB from a dlm lock under DLM spin-locks. */
366                 *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
367                 size = lvb->lvb_size;
368                 /* Extend KMS up to the end of this lock and no further
369                  * A lock on [x,y] means a KMS of up to y + 1 bytes! */
370                 if (size > dlmlock->l_policy_data.l_extent.end)
371                         size = dlmlock->l_policy_data.l_extent.end + 1;
372                 if (size >= oinfo->loi_kms) {
373                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
374                                    ", kms="LPU64, lvb->lvb_size, size);
375                         valid |= CAT_KMS;
376                         attr->cat_kms = size;
377                 } else {
378                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
379                                    LPU64"; leaving kms="LPU64", end="LPU64,
380                                    lvb->lvb_size, oinfo->loi_kms,
381                                    dlmlock->l_policy_data.l_extent.end);
382                 }
383                 ldlm_lock_allow_match_locked(dlmlock);
384         } else if (rc == -ENAVAIL && olck->ols_glimpse) {
385                 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
386                        " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
387         } else
388                 valid = 0;
389
390         if (valid != 0)
391                 cl_object_attr_set(env, obj, attr, valid);
392
393         cl_object_attr_unlock(obj);
394
395         EXIT;
396 }
397
398 /**
399  * Called when a lock is granted, from an upcall (when server returned a
400  * granted lock), or from completion AST, when server returned a blocked lock.
401  *
402  * Called under lock and resource spin-locks, that are released temporarily
403  * here.
404  */
405 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
406                              struct ldlm_lock *dlmlock, int rc)
407 {
408         struct ldlm_extent   *ext;
409         struct cl_lock       *lock;
410         struct cl_lock_descr *descr;
411
412         LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
413
414         ENTRY;
415         if (olck->ols_state < OLS_GRANTED) {
416                 lock  = olck->ols_cl.cls_lock;
417                 ext   = &dlmlock->l_policy_data.l_extent;
418                 descr = &osc_env_info(env)->oti_descr;
419                 descr->cld_obj = lock->cll_descr.cld_obj;
420
421                 /* XXX check that ->l_granted_mode is valid. */
422                 descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
423                 descr->cld_start = cl_index(descr->cld_obj, ext->start);
424                 descr->cld_end   = cl_index(descr->cld_obj, ext->end);
425                 descr->cld_gid   = ext->gid;
426                 /*
427                  * tell upper layers the extent of the lock that was actually
428                  * granted
429                  */
430                 olck->ols_state = OLS_GRANTED;
431                 osc_lock_lvb_update(env, olck, rc);
432
433                 /* release DLM spin-locks to allow cl_lock_{modify,signal}()
434                  * to take a semaphore on a parent lock. This is safe, because
435                  * spin-locks are needed to protect consistency of
436                  * dlmlock->l_*_mode and LVB, and we have finished processing
437                  * them. */
438                 unlock_res_and_lock(dlmlock);
439                 cl_lock_modify(env, lock, descr);
440                 cl_lock_signal(env, lock);
441                 LINVRNT(osc_lock_invariant(olck));
442                 lock_res_and_lock(dlmlock);
443         }
444         EXIT;
445 }
446
447 static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
448
449 {
450         struct ldlm_lock *dlmlock;
451
452         ENTRY;
453
454         dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
455         LASSERT(dlmlock != NULL);
456
457         lock_res_and_lock(dlmlock);
458         cfs_spin_lock(&osc_ast_guard);
459         LASSERT(dlmlock->l_ast_data == olck);
460         LASSERT(olck->ols_lock == NULL);
461         olck->ols_lock = dlmlock;
462         cfs_spin_unlock(&osc_ast_guard);
463
464         /*
465          * Lock might be not yet granted. In this case, completion ast
466          * (osc_ldlm_completion_ast()) comes later and finishes lock
467          * granting.
468          */
469         if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
470                 osc_lock_granted(env, olck, dlmlock, 0);
471         unlock_res_and_lock(dlmlock);
472
473         /*
474          * osc_enqueue_interpret() decrefs asynchronous locks, counter
475          * this.
476          */
477         ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
478         olck->ols_hold = 1;
479
480         /* lock reference taken by ldlm_handle2lock_long() is owned by
481          * osc_lock and released in osc_lock_detach() */
482         lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
483         olck->ols_has_ref = 1;
484 }
485
486 /**
487  * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
488  * received from a server, or after osc_enqueue_base() matched a local DLM
489  * lock.
490  */
491 static int osc_lock_upcall(void *cookie, int errcode)
492 {
493         struct osc_lock         *olck  = cookie;
494         struct cl_lock_slice    *slice = &olck->ols_cl;
495         struct cl_lock          *lock  = slice->cls_lock;
496         struct lu_env           *env;
497         struct cl_env_nest       nest;
498
499         ENTRY;
500         env = cl_env_nested_get(&nest);
501         if (!IS_ERR(env)) {
502                 int rc;
503
504                 cl_lock_mutex_get(env, lock);
505
506                 LASSERT(lock->cll_state >= CLS_QUEUING);
507                 if (olck->ols_state == OLS_ENQUEUED) {
508                         olck->ols_state = OLS_UPCALL_RECEIVED;
509                         rc = ldlm_error2errno(errcode);
510                 } else if (olck->ols_state == OLS_CANCELLED) {
511                         rc = -EIO;
512                 } else {
513                         CERROR("Impossible state: %d\n", olck->ols_state);
514                         LBUG();
515                 }
516                 if (rc) {
517                         struct ldlm_lock *dlmlock;
518
519                         dlmlock = ldlm_handle2lock(&olck->ols_handle);
520                         if (dlmlock != NULL) {
521                                 lock_res_and_lock(dlmlock);
522                                 cfs_spin_lock(&osc_ast_guard);
523                                 LASSERT(olck->ols_lock == NULL);
524                                 dlmlock->l_ast_data = NULL;
525                                 olck->ols_handle.cookie = 0ULL;
526                                 cfs_spin_unlock(&osc_ast_guard);
527                                 unlock_res_and_lock(dlmlock);
528                                 LDLM_LOCK_PUT(dlmlock);
529                         }
530                 } else {
531                         if (olck->ols_glimpse)
532                                 olck->ols_glimpse = 0;
533                         osc_lock_upcall0(env, olck);
534                 }
535
536                 /* Error handling, some errors are tolerable. */
537                 if (olck->ols_locklessable && rc == -EUSERS) {
538                         /* This is a tolerable error, turn this lock into
539                          * lockless lock.
540                          */
541                         osc_object_set_contended(cl2osc(slice->cls_obj));
542                         LASSERT(slice->cls_ops == &osc_lock_ops);
543
544                         /* Change this lock to ldlmlock-less lock. */
545                         osc_lock_to_lockless(env, olck, 1);
546                         olck->ols_state = OLS_GRANTED;
547                         rc = 0;
548                 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
549                         osc_lock_lvb_update(env, olck, rc);
550                         cl_lock_delete(env, lock);
551                         /* Hide the error. */
552                         rc = 0;
553                 }
554
555                 if (rc == 0)
556                         /* on error, lock was signaled by cl_lock_error() */
557                         cl_lock_signal(env, lock);
558                 else
559                         cl_lock_error(env, lock, rc);
560
561                 cl_lock_mutex_put(env, lock);
562
563                 /* release cookie reference, acquired by osc_lock_enqueue() */
564                 lu_ref_del(&lock->cll_reference, "upcall", lock);
565                 cl_lock_put(env, lock);
566                 cl_env_nested_put(&nest, env);
567         } else
568                 /* should never happen, similar to osc_ldlm_blocking_ast(). */
569                 LBUG();
570         RETURN(errcode);
571 }
572
573 /**
574  * Core of osc_dlm_blocking_ast() logic.
575  */
576 static void osc_lock_blocking(const struct lu_env *env,
577                               struct ldlm_lock *dlmlock,
578                               struct osc_lock *olck, int blocking)
579 {
580         struct cl_lock *lock = olck->ols_cl.cls_lock;
581
582         LASSERT(olck->ols_lock == dlmlock);
583         CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
584         LASSERT(!osc_lock_is_lockless(olck));
585
586         /*
587          * Lock might be still addref-ed here, if e.g., blocking ast
588          * is sent for a failed lock.
589          */
590         osc_lock_unhold(olck);
591
592         if (blocking && olck->ols_state < OLS_BLOCKED)
593                 /*
594                  * Move osc_lock into OLS_BLOCKED before canceling the lock,
595                  * because it recursively re-enters osc_lock_blocking(), with
596                  * the state set to OLS_CANCELLED.
597                  */
598                 olck->ols_state = OLS_BLOCKED;
599         /*
600          * cancel and destroy lock at least once no matter how blocking ast is
601          * entered (see comment above osc_ldlm_blocking_ast() for use
602          * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
603          */
604         cl_lock_cancel(env, lock);
605         cl_lock_delete(env, lock);
606 }
607
608 /**
609  * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
610  * and ldlm_lock caches.
611  */
612 static int osc_dlm_blocking_ast0(const struct lu_env *env,
613                                  struct ldlm_lock *dlmlock,
614                                  void *data, int flag)
615 {
616         struct osc_lock *olck;
617         struct cl_lock  *lock;
618         int result;
619         int cancel;
620
621         LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
622
623         cancel = 0;
624         olck = osc_ast_data_get(dlmlock);
625         if (olck != NULL) {
626                 lock = olck->ols_cl.cls_lock;
627                 cl_lock_mutex_get(env, lock);
628                 LINVRNT(osc_lock_invariant(olck));
629                 if (olck->ols_ast_wait) {
630                         /* wake up osc_lock_use() */
631                         cl_lock_signal(env, lock);
632                         olck->ols_ast_wait = 0;
633                 }
634                 /*
635                  * Lock might have been canceled while this thread was
636                  * sleeping for lock mutex, but olck is pinned in memory.
637                  */
638                 if (olck == dlmlock->l_ast_data) {
639                         /*
640                          * NOTE: DLM sends blocking AST's for failed locks
641                          *       (that are still in pre-OLS_GRANTED state)
642                          *       too, and they have to be canceled otherwise
643                          *       DLM lock is never destroyed and stuck in
644                          *       the memory.
645                          *
646                          *       Alternatively, ldlm_cli_cancel() can be
647                          *       called here directly for osc_locks with
648                          *       ols_state < OLS_GRANTED to maintain an
649                          *       invariant that ->clo_cancel() is only called
650                          *       for locks that were granted.
651                          */
652                         LASSERT(data == olck);
653                         osc_lock_blocking(env, dlmlock,
654                                           olck, flag == LDLM_CB_BLOCKING);
655                 } else
656                         cancel = 1;
657                 cl_lock_mutex_put(env, lock);
658                 osc_ast_data_put(env, olck);
659         } else
660                 /*
661                  * DLM lock exists, but there is no cl_lock attached to it.
662                  * This is a `normal' race. cl_object and its cl_lock's can be
663                  * removed by memory pressure, together with all pages.
664                  */
665                 cancel = (flag == LDLM_CB_BLOCKING);
666
667         if (cancel) {
668                 struct lustre_handle *lockh;
669
670                 lockh = &osc_env_info(env)->oti_handle;
671                 ldlm_lock2handle(dlmlock, lockh);
672                 result = ldlm_cli_cancel(lockh);
673         } else
674                 result = 0;
675         return result;
676 }
677
678 /**
679  * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
680  * some other lock, or is canceled. This function is installed as a
681  * ldlm_lock::l_blocking_ast() for client extent locks.
682  *
683  * Control flow is tricky, because ldlm uses the same call-back
684  * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
685  *
686  * \param dlmlock lock for which ast occurred.
687  *
688  * \param new description of a conflicting lock in case of blocking ast.
689  *
690  * \param data value of dlmlock->l_ast_data
691  *
692  * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
693  *             cancellation and blocking ast's.
694  *
695  * Possible use cases:
696  *
697  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
698  *       lock due to lock lru pressure, or explicit user request to purge
699  *       locks.
700  *
701  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
702  *       us that dlmlock conflicts with another lock that some client is
703  *       enqueing. Lock is canceled.
704  *
705  *           - cl_lock_cancel() is called. osc_lock_cancel() calls
706  *             ldlm_cli_cancel() that calls
707  *
708  *                  dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
709  *
710  *             recursively entering osc_ldlm_blocking_ast().
711  *
712  *     - client cancels lock voluntary (e.g., as a part of early cancellation):
713  *
714  *           cl_lock_cancel()->
715  *             osc_lock_cancel()->
716  *               ldlm_cli_cancel()->
717  *                 dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
718  *
719  */
720 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
721                                  struct ldlm_lock_desc *new, void *data,
722                                  int flag)
723 {
724         struct lu_env     *env;
725         struct cl_env_nest nest;
726         int                result;
727
728         /*
729          * This can be called in the context of outer IO, e.g.,
730          *
731          *     cl_enqueue()->...
732          *       ->osc_enqueue_base()->...
733          *         ->ldlm_prep_elc_req()->...
734          *           ->ldlm_cancel_callback()->...
735          *             ->osc_ldlm_blocking_ast()
736          *
737          * new environment has to be created to not corrupt outer context.
738          */
739         env = cl_env_nested_get(&nest);
740         if (!IS_ERR(env)) {
741                 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
742                 cl_env_nested_put(&nest, env);
743         } else {
744                 result = PTR_ERR(env);
745                 /*
746                  * XXX This should never happen, as cl_lock is
747                  * stuck. Pre-allocated environment a la vvp_inode_fini_env
748                  * should be used.
749                  */
750                 LBUG();
751         }
752         if (result != 0) {
753                 if (result == -ENODATA)
754                         result = 0;
755                 else
756                         CERROR("BAST failed: %d\n", result);
757         }
758         return result;
759 }
760
761 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
762                                    int flags, void *data)
763 {
764         struct cl_env_nest nest;
765         struct lu_env     *env;
766         struct osc_lock   *olck;
767         struct cl_lock    *lock;
768         int result;
769         int dlmrc;
770
771         /* first, do dlm part of the work */
772         dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
773         /* then, notify cl_lock */
774         env = cl_env_nested_get(&nest);
775         if (!IS_ERR(env)) {
776                 olck = osc_ast_data_get(dlmlock);
777                 if (olck != NULL) {
778                         lock = olck->ols_cl.cls_lock;
779                         cl_lock_mutex_get(env, lock);
780                         /*
781                          * ldlm_handle_cp_callback() copied LVB from request
782                          * to lock->l_lvb_data, store it in osc_lock.
783                          */
784                         LASSERT(dlmlock->l_lvb_data != NULL);
785                         lock_res_and_lock(dlmlock);
786                         olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
787                         if (olck->ols_lock == NULL) {
788                                 /*
789                                  * upcall (osc_lock_upcall()) hasn't yet been
790                                  * called. Do nothing now, upcall will bind
791                                  * olck to dlmlock and signal the waiters.
792                                  *
793                                  * This maintains an invariant that osc_lock
794                                  * and ldlm_lock are always bound when
795                                  * osc_lock is in OLS_GRANTED state.
796                                  */
797                         } else if (dlmlock->l_granted_mode ==
798                                    dlmlock->l_req_mode) {
799                                 osc_lock_granted(env, olck, dlmlock, dlmrc);
800                         }
801                         unlock_res_and_lock(dlmlock);
802
803                         if (dlmrc != 0) {
804                                 CL_LOCK_DEBUG(D_ERROR, env, lock,
805                                               "dlmlock returned %d\n", dlmrc);
806                                 cl_lock_error(env, lock, dlmrc);
807                         }
808                         cl_lock_mutex_put(env, lock);
809                         osc_ast_data_put(env, olck);
810                         result = 0;
811                 } else
812                         result = -ELDLM_NO_LOCK_DATA;
813                 cl_env_nested_put(&nest, env);
814         } else
815                 result = PTR_ERR(env);
816         return dlmrc ?: result;
817 }
818
819 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
820 {
821         struct ptlrpc_request  *req  = data;
822         struct osc_lock        *olck;
823         struct cl_lock         *lock;
824         struct cl_object       *obj;
825         struct cl_env_nest      nest;
826         struct lu_env          *env;
827         struct ost_lvb         *lvb;
828         struct req_capsule     *cap;
829         int                     result;
830
831         LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
832
833         env = cl_env_nested_get(&nest);
834         if (!IS_ERR(env)) {
835                 /*
836                  * osc_ast_data_get() has to go after environment is
837                  * allocated, because osc_ast_data() acquires a
838                  * reference to a lock, and it can only be released in
839                  * environment.
840                  */
841                 olck = osc_ast_data_get(dlmlock);
842                 if (olck != NULL) {
843                         lock = olck->ols_cl.cls_lock;
844                         cl_lock_mutex_get(env, lock);
845                         cap = &req->rq_pill;
846                         req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
847                         req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
848                                              sizeof *lvb);
849                         result = req_capsule_server_pack(cap);
850                         if (result == 0) {
851                                 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
852                                 obj = lock->cll_descr.cld_obj;
853                                 result = cl_object_glimpse(env, obj, lvb);
854                         }
855                         cl_lock_mutex_put(env, lock);
856                         osc_ast_data_put(env, olck);
857                 } else {
858                         /*
859                          * These errors are normal races, so we don't want to
860                          * fill the console with messages by calling
861                          * ptlrpc_error()
862                          */
863                         lustre_pack_reply(req, 1, NULL, NULL);
864                         result = -ELDLM_NO_LOCK_DATA;
865                 }
866                 cl_env_nested_put(&nest, env);
867         } else
868                 result = PTR_ERR(env);
869         req->rq_status = result;
870         return result;
871 }
872
873 static unsigned long osc_lock_weigh(const struct lu_env *env,
874                                     const struct cl_lock_slice *slice)
875 {
876         /*
877          * don't need to grab coh_page_guard since we don't care the exact #
878          * of pages..
879          */
880         return cl_object_header(slice->cls_obj)->coh_pages;
881 }
882
883 /**
884  * Get the weight of dlm lock for early cancellation.
885  *
886  * XXX: it should return the pages covered by this \a dlmlock.
887  */
888 static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
889 {
890         struct cl_env_nest       nest;
891         struct lu_env           *env;
892         struct osc_lock         *lock;
893         struct cl_lock          *cll;
894         unsigned long            weight;
895         ENTRY;
896
897         cfs_might_sleep();
898         /*
899          * osc_ldlm_weigh_ast has a complex context since it might be called
900          * because of lock canceling, or from user's input. We have to make
901          * a new environment for it. Probably it is implementation safe to use
902          * the upper context because cl_lock_put don't modify environment
903          * variables. But in case of ..
904          */
905         env = cl_env_nested_get(&nest);
906         if (IS_ERR(env))
907                 /* Mostly because lack of memory, tend to eliminate this lock*/
908                 RETURN(0);
909
910         LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
911         lock = osc_ast_data_get(dlmlock);
912         if (lock == NULL) {
913                 /* cl_lock was destroyed because of memory pressure.
914                  * It is much reasonable to assign this type of lock
915                  * a lower cost.
916                  */
917                 GOTO(out, weight = 0);
918         }
919
920         cll = lock->ols_cl.cls_lock;
921         cl_lock_mutex_get(env, cll);
922         weight = cl_lock_weigh(env, cll);
923         cl_lock_mutex_put(env, cll);
924         osc_ast_data_put(env, lock);
925         EXIT;
926
927 out:
928         cl_env_nested_put(&nest, env);
929         return weight;
930 }
931
932 static void osc_lock_build_einfo(const struct lu_env *env,
933                                  const struct cl_lock *clock,
934                                  struct osc_lock *lock,
935                                  struct ldlm_enqueue_info *einfo)
936 {
937         enum cl_lock_mode mode;
938
939         mode = clock->cll_descr.cld_mode;
940         if (mode == CLM_PHANTOM)
941                 /*
942                  * For now, enqueue all glimpse locks in read mode. In the
943                  * future, client might choose to enqueue LCK_PW lock for
944                  * glimpse on a file opened for write.
945                  */
946                 mode = CLM_READ;
947
948         einfo->ei_type   = LDLM_EXTENT;
949         einfo->ei_mode   = osc_cl_lock2ldlm(mode);
950         einfo->ei_cb_bl  = osc_ldlm_blocking_ast;
951         einfo->ei_cb_cp  = osc_ldlm_completion_ast;
952         einfo->ei_cb_gl  = osc_ldlm_glimpse_ast;
953         einfo->ei_cb_wg  = osc_ldlm_weigh_ast;
954         einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
955 }
956
957 /**
958  * Determine if the lock should be converted into a lockless lock.
959  *
960  * Steps to check:
961  * - if the lock has an explicite requirment for a non-lockless lock;
962  * - if the io lock request type ci_lockreq;
963  * - send the enqueue rpc to ost to make the further decision;
964  * - special treat to truncate lockless lock
965  *
966  *  Additional policy can be implemented here, e.g., never do lockless-io
967  *  for large extents.
968  */
969 static void osc_lock_to_lockless(const struct lu_env *env,
970                                  struct osc_lock *ols, int force)
971 {
972         struct cl_lock_slice *slice = &ols->ols_cl;
973         struct cl_lock *lock        = slice->cls_lock;
974
975         LASSERT(ols->ols_state == OLS_NEW ||
976                 ols->ols_state == OLS_UPCALL_RECEIVED);
977
978         if (force) {
979                 ols->ols_locklessable = 1;
980                 LASSERT(cl_lock_is_mutexed(lock));
981                 slice->cls_ops = &osc_lock_lockless_ops;
982         } else {
983                 struct osc_io *oio     = osc_env_io(env);
984                 struct cl_io  *io      = oio->oi_cl.cis_io;
985                 struct cl_object *obj  = slice->cls_obj;
986                 struct osc_object *oob = cl2osc(obj);
987                 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
988                 struct obd_connect_data *ocd;
989
990                 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
991                         io->ci_lockreq == CILR_MAYBE ||
992                         io->ci_lockreq == CILR_NEVER);
993
994                 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
995                 ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
996                                 (io->ci_lockreq == CILR_MAYBE) &&
997                                 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
998                 if (io->ci_lockreq == CILR_NEVER ||
999                         /* lockless IO */
1000                     (ols->ols_locklessable && osc_object_is_contended(oob)) ||
1001                         /* lockless truncate */
1002                     (cl_io_is_trunc(io) &&
1003                      (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
1004                       osd->od_lockless_truncate)) {
1005                         ols->ols_locklessable = 1;
1006                         slice->cls_ops = &osc_lock_lockless_ops;
1007                 }
1008         }
1009         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1010 }
1011
1012 static int osc_lock_compatible(const struct osc_lock *qing,
1013                                const struct osc_lock *qed)
1014 {
1015         enum cl_lock_mode qing_mode;
1016         enum cl_lock_mode qed_mode;
1017
1018         qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode;
1019         if (qed->ols_glimpse &&
1020             (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ))
1021                 return 1;
1022
1023         qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode;
1024         return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
1025 }
1026
1027 /**
1028  * Cancel all conflicting locks and wait for them to be destroyed.
1029  *
1030  * This function is used for two purposes:
1031  *
1032  *     - early cancel all conflicting locks before starting IO, and
1033  *
1034  *     - guarantee that pages added to the page cache by lockless IO are never
1035  *       covered by locks other than lockless IO lock, and, hence, are not
1036  *       visible to other threads.
1037  */
1038 static int osc_lock_enqueue_wait(const struct lu_env *env,
1039                                  const struct osc_lock *olck)
1040 {
1041         struct cl_lock          *lock    = olck->ols_cl.cls_lock;
1042         struct cl_lock_descr    *descr   = &lock->cll_descr;
1043         struct cl_object_header *hdr     = cl_object_header(descr->cld_obj);
1044         struct cl_lock          *scan;
1045         struct cl_lock          *conflict= NULL;
1046         int lockless                     = osc_lock_is_lockless(olck);
1047         int rc                           = 0;
1048         ENTRY;
1049
1050         LASSERT(cl_lock_is_mutexed(lock));
1051         LASSERT(lock->cll_state == CLS_QUEUING);
1052
1053         /* make it enqueue anyway for glimpse lock, because we actually
1054          * don't need to cancel any conflicting locks. */
1055         if (olck->ols_glimpse)
1056                 return 0;
1057
1058         cfs_spin_lock(&hdr->coh_lock_guard);
1059         cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
1060                 struct cl_lock_descr *cld = &scan->cll_descr;
1061                 const struct osc_lock *scan_ols;
1062
1063                 if (scan == lock)
1064                         break;
1065
1066                 if (scan->cll_state < CLS_QUEUING ||
1067                     scan->cll_state == CLS_FREEING ||
1068                     cld->cld_start > descr->cld_end ||
1069                     cld->cld_end < descr->cld_start)
1070                         continue;
1071
1072                 /* overlapped and living locks. */
1073
1074                 /* We're not supposed to give up group lock. */
1075                 if (scan->cll_descr.cld_mode == CLM_GROUP) {
1076                         LASSERT(descr->cld_mode != CLM_GROUP ||
1077                                 descr->cld_gid != scan->cll_descr.cld_gid);
1078                         continue;
1079                 }
1080
1081                 scan_ols = osc_lock_at(scan);
1082
1083                 /* We need to cancel the compatible locks if we're enqueuing
1084                  * a lockless lock, for example:
1085                  * imagine that client has PR lock on [0, 1000], and thread T0
1086                  * is doing lockless IO in [500, 1500] region. Concurrent
1087                  * thread T1 can see lockless data in [500, 1000], which is
1088                  * wrong, because these data are possibly stale. */
1089                 if (!lockless && osc_lock_compatible(olck, scan_ols))
1090                         continue;
1091
1092                 /* Now @scan is conflicting with @lock, this means current
1093                  * thread have to sleep for @scan being destroyed. */
1094                 if (scan_ols->ols_owner == osc_env_io(env)) {
1095                         CERROR("DEADLOCK POSSIBLE!\n");
1096                         CL_LOCK_DEBUG(D_ERROR, env, scan, "queued.\n");
1097                         CL_LOCK_DEBUG(D_ERROR, env, lock, "queuing.\n");
1098                         libcfs_debug_dumpstack(NULL);
1099                 }
1100                 cl_lock_get_trust(scan);
1101                 conflict = scan;
1102                 break;
1103         }
1104         cfs_spin_unlock(&hdr->coh_lock_guard);
1105
1106         if (conflict) {
1107                 CDEBUG(D_DLMTRACE, "lock %p is confliced with %p, will wait\n",
1108                        lock, conflict);
1109                 lu_ref_add(&conflict->cll_reference, "cancel-wait", lock);
1110                 LASSERT(lock->cll_conflict == NULL);
1111                 lock->cll_conflict = conflict;
1112                 rc = CLO_WAIT;
1113         }
1114         RETURN(rc);
1115 }
1116
1117 /**
1118  * Implementation of cl_lock_operations::clo_enqueue() method for osc
1119  * layer. This initiates ldlm enqueue:
1120  *
1121  *     - cancels conflicting locks early (osc_lock_enqueue_wait());
1122  *
1123  *     - calls osc_enqueue_base() to do actual enqueue.
1124  *
1125  * osc_enqueue_base() is supplied with an upcall function that is executed
1126  * when lock is received either after a local cached ldlm lock is matched, or
1127  * when a reply from the server is received.
1128  *
1129  * This function does not wait for the network communication to complete.
1130  */
1131 static int osc_lock_enqueue(const struct lu_env *env,
1132                             const struct cl_lock_slice *slice,
1133                             struct cl_io *unused, __u32 enqflags)
1134 {
1135         struct osc_lock          *ols     = cl2osc_lock(slice);
1136         struct cl_lock           *lock    = ols->ols_cl.cls_lock;
1137         struct osc_object        *obj     = cl2osc(slice->cls_obj);
1138         struct osc_thread_info   *info    = osc_env_info(env);
1139         struct ldlm_res_id       *resname = &info->oti_resname;
1140         ldlm_policy_data_t       *policy  = &info->oti_policy;
1141         struct ldlm_enqueue_info *einfo   = &ols->ols_einfo;
1142         int result;
1143         ENTRY;
1144
1145         LASSERT(cl_lock_is_mutexed(lock));
1146         LASSERT(lock->cll_state == CLS_QUEUING);
1147         LASSERT(ols->ols_state == OLS_NEW);
1148
1149         osc_lock_build_res(env, obj, resname);
1150         osc_lock_build_policy(env, lock, policy);
1151         ols->ols_flags = osc_enq2ldlm_flags(enqflags);
1152         if (ols->ols_flags & LDLM_FL_HAS_INTENT)
1153                 ols->ols_glimpse = 1;
1154         if (!(enqflags & CEF_MUST))
1155                 /* try to convert this lock to a lockless lock */
1156                 osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
1157
1158         result = osc_lock_enqueue_wait(env, ols);
1159         if (result == 0) {
1160                 if (!osc_lock_is_lockless(ols)) {
1161                         if (ols->ols_locklessable)
1162                                 ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1163
1164                         /* a reference for lock, passed as an upcall cookie */
1165                         cl_lock_get(lock);
1166                         lu_ref_add(&lock->cll_reference, "upcall", lock);
1167                         ols->ols_state = OLS_ENQUEUED;
1168
1169                         /*
1170                          * XXX: this is possible blocking point as
1171                          * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
1172                          * LDLM_CP_CALLBACK.
1173                          */
1174                         result = osc_enqueue_base(osc_export(obj), resname,
1175                                           &ols->ols_flags, policy,
1176                                           &ols->ols_lvb,
1177                                           obj->oo_oinfo->loi_kms_valid,
1178                                           osc_lock_upcall,
1179                                           ols, einfo, &ols->ols_handle,
1180                                           PTLRPCD_SET, 1);
1181                         if (result != 0) {
1182                                 lu_ref_del(&lock->cll_reference,
1183                                            "upcall", lock);
1184                                 cl_lock_put(env, lock);
1185                         }
1186                 } else {
1187                         ols->ols_state = OLS_GRANTED;
1188                         ols->ols_owner = osc_env_io(env);
1189                 }
1190         }
1191         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1192         RETURN(result);
1193 }
1194
1195 static int osc_lock_wait(const struct lu_env *env,
1196                          const struct cl_lock_slice *slice)
1197 {
1198         struct osc_lock *olck = cl2osc_lock(slice);
1199         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1200
1201         LINVRNT(osc_lock_invariant(olck));
1202         if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED)
1203                 return 0;
1204
1205         LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
1206                      lock->cll_error == 0, olck->ols_lock != NULL));
1207
1208         return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
1209 }
1210
1211 /**
1212  * An implementation of cl_lock_operations::clo_use() method that pins cached
1213  * lock.
1214  */
1215 static int osc_lock_use(const struct lu_env *env,
1216                         const struct cl_lock_slice *slice)
1217 {
1218         struct osc_lock *olck = cl2osc_lock(slice);
1219         int rc;
1220
1221         LASSERT(!olck->ols_hold);
1222
1223         /*
1224          * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
1225          * flag is not set. This protects us from a concurrent blocking ast.
1226          */
1227         rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1228         if (rc == 0) {
1229                 olck->ols_hold = 1;
1230                 olck->ols_state = OLS_GRANTED;
1231         } else {
1232                 struct cl_lock *lock;
1233
1234                 /*
1235                  * Lock is being cancelled somewhere within
1236                  * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
1237                  * set, but osc_ldlm_blocking_ast() hasn't yet acquired
1238                  * cl_lock mutex.
1239                  */
1240                 lock = slice->cls_lock;
1241                 LASSERT(lock->cll_state == CLS_INTRANSIT);
1242                 LASSERT(lock->cll_users > 0);
1243                 /* set a flag for osc_dlm_blocking_ast0() to signal the
1244                  * lock.*/
1245                 olck->ols_ast_wait = 1;
1246                 rc = CLO_WAIT;
1247         }
1248         return rc;
1249 }
1250
1251 static int osc_lock_flush(struct osc_lock *ols, int discard)
1252 {
1253         struct cl_lock       *lock  = ols->ols_cl.cls_lock;
1254         struct cl_env_nest    nest;
1255         struct lu_env        *env;
1256         int result = 0;
1257
1258         env = cl_env_nested_get(&nest);
1259         if (!IS_ERR(env)) {
1260                 result = cl_lock_page_out(env, lock, discard);
1261                 cl_env_nested_put(&nest, env);
1262         } else
1263                 result = PTR_ERR(env);
1264         if (result == 0) {
1265                 ols->ols_flush = 1;
1266                 LINVRNT(!osc_lock_has_pages(ols));
1267         }
1268         return result;
1269 }
1270
1271 /**
1272  * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1273  * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1274  * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1275  * with some other lock some where in the cluster. This function does the
1276  * following:
1277  *
1278  *     - invalidates all pages protected by this lock (after sending dirty
1279  *       ones to the server, as necessary);
1280  *
1281  *     - decref's underlying ldlm lock;
1282  *
1283  *     - cancels ldlm lock (ldlm_cli_cancel()).
1284  */
1285 static void osc_lock_cancel(const struct lu_env *env,
1286                             const struct cl_lock_slice *slice)
1287 {
1288         struct cl_lock   *lock    = slice->cls_lock;
1289         struct osc_lock  *olck    = cl2osc_lock(slice);
1290         struct ldlm_lock *dlmlock = olck->ols_lock;
1291         int               result  = 0;
1292         int               discard;
1293
1294         LASSERT(cl_lock_is_mutexed(lock));
1295         LINVRNT(osc_lock_invariant(olck));
1296
1297         if (dlmlock != NULL) {
1298                 int do_cancel;
1299
1300                 discard = dlmlock->l_flags & LDLM_FL_DISCARD_DATA;
1301                 result = osc_lock_flush(olck, discard);
1302                 osc_lock_unhold(olck);
1303
1304                 lock_res_and_lock(dlmlock);
1305                 /* Now that we're the only user of dlm read/write reference,
1306                  * mostly the ->l_readers + ->l_writers should be zero.
1307                  * However, there is a corner case.
1308                  * See bug 18829 for details.*/
1309                 do_cancel = (dlmlock->l_readers == 0 &&
1310                              dlmlock->l_writers == 0);
1311                 dlmlock->l_flags |= LDLM_FL_CBPENDING;
1312                 unlock_res_and_lock(dlmlock);
1313                 if (do_cancel)
1314                         result = ldlm_cli_cancel(&olck->ols_handle);
1315                 if (result < 0)
1316                         CL_LOCK_DEBUG(D_ERROR, env, lock,
1317                                       "lock %p cancel failure with error(%d)\n",
1318                                       lock, result);
1319         }
1320         olck->ols_state = OLS_CANCELLED;
1321         osc_lock_detach(env, olck);
1322 }
1323
1324 void cl_lock_page_list_fixup(const struct lu_env *env,
1325                              struct cl_io *io, struct cl_lock *lock,
1326                              struct cl_page_list *queue);
1327
1328 #ifdef INVARIANT_CHECK
1329 /**
1330  * Returns true iff there are pages under \a olck not protected by other
1331  * locks.
1332  */
1333 static int osc_lock_has_pages(struct osc_lock *olck)
1334 {
1335         struct cl_lock       *lock;
1336         struct cl_lock_descr *descr;
1337         struct cl_object     *obj;
1338         struct osc_object    *oob;
1339         struct cl_page_list  *plist;
1340         struct cl_page       *page;
1341         struct cl_env_nest    nest;
1342         struct cl_io         *io;
1343         struct lu_env        *env;
1344         int                   result;
1345
1346         env = cl_env_nested_get(&nest);
1347         if (!IS_ERR(env)) {
1348                 obj   = olck->ols_cl.cls_obj;
1349                 oob   = cl2osc(obj);
1350                 io    = &oob->oo_debug_io;
1351                 lock  = olck->ols_cl.cls_lock;
1352                 descr = &lock->cll_descr;
1353                 plist = &osc_env_info(env)->oti_plist;
1354                 cl_page_list_init(plist);
1355
1356                 cfs_mutex_lock(&oob->oo_debug_mutex);
1357
1358                 io->ci_obj = cl_object_top(obj);
1359                 cl_io_init(env, io, CIT_MISC, io->ci_obj);
1360                 cl_page_gang_lookup(env, obj, io,
1361                                     descr->cld_start, descr->cld_end, plist, 0,
1362                                     NULL);
1363                 cl_lock_page_list_fixup(env, io, lock, plist);
1364                 if (plist->pl_nr > 0) {
1365                         CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
1366                         cl_page_list_for_each(page, plist)
1367                                 CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
1368                 }
1369                 result = plist->pl_nr > 0;
1370                 cl_page_list_disown(env, io, plist);
1371                 cl_page_list_fini(env, plist);
1372                 cl_io_fini(env, io);
1373                 cfs_mutex_unlock(&oob->oo_debug_mutex);
1374                 cl_env_nested_put(&nest, env);
1375         } else
1376                 result = 0;
1377         return result;
1378 }
1379 #else
1380 static int osc_lock_has_pages(struct osc_lock *olck)
1381 {
1382         return 0;
1383 }
1384 #endif /* INVARIANT_CHECK */
1385
1386 static void osc_lock_delete(const struct lu_env *env,
1387                             const struct cl_lock_slice *slice)
1388 {
1389         struct osc_lock *olck;
1390
1391         olck = cl2osc_lock(slice);
1392         if (olck->ols_glimpse) {
1393                 LASSERT(!olck->ols_hold);
1394                 LASSERT(!olck->ols_lock);
1395                 return;
1396         }
1397
1398         LINVRNT(osc_lock_invariant(olck));
1399         LINVRNT(!osc_lock_has_pages(olck));
1400
1401         osc_lock_unhold(olck);
1402         osc_lock_detach(env, olck);
1403 }
1404
1405 /**
1406  * Implements cl_lock_operations::clo_state() method for osc layer.
1407  *
1408  * Maintains osc_lock::ols_owner field.
1409  *
1410  * This assumes that lock always enters CLS_HELD (from some other state) in
1411  * the same IO context as one that requested the lock. This should not be a
1412  * problem, because context is by definition shared by all activity pertaining
1413  * to the same high-level IO.
1414  */
1415 static void osc_lock_state(const struct lu_env *env,
1416                            const struct cl_lock_slice *slice,
1417                            enum cl_lock_state state)
1418 {
1419         struct osc_lock *lock = cl2osc_lock(slice);
1420
1421         /*
1422          * XXX multiple io contexts can use the lock at the same time.
1423          */
1424         LINVRNT(osc_lock_invariant(lock));
1425         if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1426                 struct osc_io *oio = osc_env_io(env);
1427
1428                 LASSERT(lock->ols_owner == NULL);
1429                 lock->ols_owner = oio;
1430         } else if (state != CLS_HELD)
1431                 lock->ols_owner = NULL;
1432 }
1433
1434 static int osc_lock_print(const struct lu_env *env, void *cookie,
1435                           lu_printer_t p, const struct cl_lock_slice *slice)
1436 {
1437         struct osc_lock *lock = cl2osc_lock(slice);
1438
1439         /*
1440          * XXX print ldlm lock and einfo properly.
1441          */
1442         (*p)(env, cookie, "%p %08x "LPX64" %d %p ",
1443              lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
1444              lock->ols_state, lock->ols_owner);
1445         osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1446         return 0;
1447 }
1448
1449 static int osc_lock_fits_into(const struct lu_env *env,
1450                               const struct cl_lock_slice *slice,
1451                               const struct cl_lock_descr *need,
1452                               const struct cl_io *io)
1453 {
1454         struct osc_lock *ols = cl2osc_lock(slice);
1455
1456         if (need->cld_enq_flags & CEF_NEVER)
1457                 return 0;
1458
1459         if (need->cld_mode == CLM_PHANTOM) {
1460                 /*
1461                  * Note: the QUEUED lock can't be matched here, otherwise
1462                  * it might cause the deadlocks.
1463                  * In read_process,
1464                  * P1: enqueued read lock, create sublock1
1465                  * P2: enqueued write lock, create sublock2(conflicted
1466                  *     with sublock1).
1467                  * P1: Grant read lock.
1468                  * P1: enqueued glimpse lock(with holding sublock1_read),
1469                  *     matched with sublock2, waiting sublock2 to be granted.
1470                  *     But sublock2 can not be granted, because P1
1471                  *     will not release sublock1. Bang!
1472                  */
1473                 if (ols->ols_state < OLS_GRANTED ||
1474                     ols->ols_state > OLS_RELEASED)
1475                         return 0;
1476         } else if (need->cld_enq_flags & CEF_MUST) {
1477                 /*
1478                  * If the lock hasn't ever enqueued, it can't be matched
1479                  * because enqueue process brings in many information
1480                  * which can be used to determine things such as lockless,
1481                  * CEF_MUST, etc.
1482                  */
1483                 if (ols->ols_state < OLS_UPCALL_RECEIVED &&
1484                     ols->ols_locklessable)
1485                         return 0;
1486         }
1487         return 1;
1488 }
1489
1490 static const struct cl_lock_operations osc_lock_ops = {
1491         .clo_fini    = osc_lock_fini,
1492         .clo_enqueue = osc_lock_enqueue,
1493         .clo_wait    = osc_lock_wait,
1494         .clo_unuse   = osc_lock_unuse,
1495         .clo_use     = osc_lock_use,
1496         .clo_delete  = osc_lock_delete,
1497         .clo_state   = osc_lock_state,
1498         .clo_cancel  = osc_lock_cancel,
1499         .clo_weigh   = osc_lock_weigh,
1500         .clo_print   = osc_lock_print,
1501         .clo_fits_into = osc_lock_fits_into,
1502 };
1503
1504 static int osc_lock_lockless_enqueue(const struct lu_env *env,
1505                                      const struct cl_lock_slice *slice,
1506                                      struct cl_io *unused, __u32 enqflags)
1507 {
1508         LBUG();
1509         return 0;
1510 }
1511
1512 static int osc_lock_lockless_unuse(const struct lu_env *env,
1513                                    const struct cl_lock_slice *slice)
1514 {
1515         struct osc_lock *ols = cl2osc_lock(slice);
1516         struct cl_lock *lock = slice->cls_lock;
1517
1518         LASSERT(ols->ols_state == OLS_GRANTED);
1519         LINVRNT(osc_lock_invariant(ols));
1520
1521         cl_lock_cancel(env, lock);
1522         cl_lock_delete(env, lock);
1523         return 0;
1524 }
1525
1526 static void osc_lock_lockless_cancel(const struct lu_env *env,
1527                                      const struct cl_lock_slice *slice)
1528 {
1529         struct osc_lock   *ols  = cl2osc_lock(slice);
1530         int result;
1531
1532         result = osc_lock_flush(ols, 0);
1533         if (result)
1534                 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1535                        ols, result);
1536         ols->ols_state = OLS_CANCELLED;
1537 }
1538
1539 static int osc_lock_lockless_wait(const struct lu_env *env,
1540                                   const struct cl_lock_slice *slice)
1541 {
1542         struct osc_lock *olck = cl2osc_lock(slice);
1543         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1544
1545         LINVRNT(osc_lock_invariant(olck));
1546         LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1547
1548         return lock->cll_error;
1549 }
1550
1551 static void osc_lock_lockless_state(const struct lu_env *env,
1552                                     const struct cl_lock_slice *slice,
1553                                     enum cl_lock_state state)
1554 {
1555         struct osc_lock *lock = cl2osc_lock(slice);
1556
1557         LINVRNT(osc_lock_invariant(lock));
1558         if (state == CLS_HELD) {
1559                 struct osc_io *oio  = osc_env_io(env);
1560
1561                 LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio));
1562                 lock->ols_owner = oio;
1563
1564                 /* set the io to be lockless if this lock is for io's
1565                  * host object */
1566                 if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
1567                         oio->oi_lockless = 1;
1568         }
1569 }
1570
1571 static int osc_lock_lockless_fits_into(const struct lu_env *env,
1572                                        const struct cl_lock_slice *slice,
1573                                        const struct cl_lock_descr *need,
1574                                        const struct cl_io *io)
1575 {
1576         struct osc_lock *lock = cl2osc_lock(slice);
1577
1578         if (!(need->cld_enq_flags & CEF_NEVER))
1579                 return 0;
1580
1581         /* lockless lock should only be used by its owning io. b22147 */
1582         return (lock->ols_owner == osc_env_io(env));
1583 }
1584
1585 static const struct cl_lock_operations osc_lock_lockless_ops = {
1586         .clo_fini      = osc_lock_fini,
1587         .clo_enqueue   = osc_lock_lockless_enqueue,
1588         .clo_wait      = osc_lock_lockless_wait,
1589         .clo_unuse     = osc_lock_lockless_unuse,
1590         .clo_state     = osc_lock_lockless_state,
1591         .clo_fits_into = osc_lock_lockless_fits_into,
1592         .clo_cancel    = osc_lock_lockless_cancel,
1593         .clo_print     = osc_lock_print
1594 };
1595
1596 int osc_lock_init(const struct lu_env *env,
1597                   struct cl_object *obj, struct cl_lock *lock,
1598                   const struct cl_io *unused)
1599 {
1600         struct osc_lock *clk;
1601         int result;
1602
1603         OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO);
1604         if (clk != NULL) {
1605                 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
1606                 cfs_atomic_set(&clk->ols_pageref, 0);
1607                 clk->ols_state = OLS_NEW;
1608                 cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
1609                 result = 0;
1610         } else
1611                 result = -ENOMEM;
1612         return result;
1613 }
1614
1615 int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
1616 {
1617         struct osc_lock *olock;
1618         int              rc = 0;
1619
1620         cfs_spin_lock(&osc_ast_guard);
1621         olock = dlm->l_ast_data;
1622         /*
1623          * there's a very rare race with osc_page_addref_lock(), but that
1624          * doesn't matter because in the worst case we don't cancel a lock
1625          * which we actually can, that's no harm.
1626          */
1627         if (olock != NULL &&
1628             cfs_atomic_add_return(_PAGEREF_MAGIC,
1629                                   &olock->ols_pageref) != _PAGEREF_MAGIC) {
1630                 cfs_atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
1631                 rc = 1;
1632         }
1633         cfs_spin_unlock(&osc_ast_guard);
1634         return rc;
1635 }
1636
1637 /** @} osc */