Whamcloud - gitweb
fa076cf0ecf24ef3243efc7262943e186674d200
[fs/lustre-release.git] / lustre / osc / osc_lock.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for OSC layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  *   Author: Jinshan Xiong <jinshan.xiong@intel.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_OSC
43
44 #ifdef __KERNEL__
45 # include <libcfs/libcfs.h>
46 #else
47 # include <liblustre.h>
48 #endif
49 /* fid_build_reg_res_name() */
50 #include <lustre_fid.h>
51
52 #include "osc_cl_internal.h"
53
54 /** \addtogroup osc
55  *  @{
56  */
57
58 /*****************************************************************************
59  *
60  * Type conversions.
61  *
62  */
63
64 static const struct cl_lock_operations osc_lock_ops;
65 static const struct cl_lock_operations osc_lock_lockless_ops;
66 static void osc_lock_to_lockless(const struct lu_env *env,
67                                  struct osc_lock *ols, int force);
68 static bool osc_lock_has_pages(struct osc_lock *olck);
69
70 int osc_lock_is_lockless(const struct osc_lock *olck)
71 {
72         return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
73 }
74
75 /**
76  * Returns a weak pointer to the ldlm lock identified by a handle. Returned
77  * pointer cannot be dereferenced, as lock is not protected from concurrent
78  * reclaim. This function is a helper for osc_lock_invariant().
79  */
80 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
81 {
82         struct ldlm_lock *lock;
83
84         lock = ldlm_handle2lock(handle);
85         if (lock != NULL)
86                 LDLM_LOCK_PUT(lock);
87         return lock;
88 }
89
90 /**
91  * Invariant that has to be true all of the time.
92  */
93 static int osc_lock_invariant(struct osc_lock *ols)
94 {
95         struct ldlm_lock *lock        = osc_handle_ptr(&ols->ols_handle);
96         struct ldlm_lock *olock       = ols->ols_lock;
97         int               handle_used = lustre_handle_is_used(&ols->ols_handle);
98
99         if (ergo(osc_lock_is_lockless(ols),
100                  ols->ols_locklessable && ols->ols_lock == NULL))
101                 return 1;
102
103         /*
104          * If all the following "ergo"s are true, return 1, otherwise 0
105          */
106         if (! ergo(olock != NULL, handle_used))
107                 return 0;
108
109         if (! ergo(olock != NULL,
110                    olock->l_handle.h_cookie == ols->ols_handle.cookie))
111                 return 0;
112
113         if (! ergo(handle_used,
114                    ergo(lock != NULL && olock != NULL, lock == olock) &&
115                    ergo(lock == NULL, olock == NULL)))
116                 return 0;
117         /*
118          * Check that ->ols_handle and ->ols_lock are consistent, but
119          * take into account that they are set at the different time.
120          */
121         if (! ergo(ols->ols_state == OLS_CANCELLED,
122                    olock == NULL && !handle_used))
123                 return 0;
124         /*
125          * DLM lock is destroyed only after we have seen cancellation
126          * ast.
127          */
128         if (! ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
129                    !ldlm_is_destroyed(olock)))
130                 return 0;
131
132         if (! ergo(ols->ols_state == OLS_GRANTED,
133                    olock != NULL &&
134                    olock->l_req_mode == olock->l_granted_mode &&
135                    ols->ols_hold))
136                 return 0;
137         return 1;
138 }
139
140 /*****************************************************************************
141  *
142  * Lock operations.
143  *
144  */
145
146 /**
147  * Breaks a link between osc_lock and dlm_lock.
148  */
149 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
150 {
151         struct ldlm_lock *dlmlock;
152
153         spin_lock(&osc_ast_guard);
154         dlmlock = olck->ols_lock;
155         if (dlmlock == NULL) {
156                 spin_unlock(&osc_ast_guard);
157                 return;
158         }
159
160         olck->ols_lock = NULL;
161         /* wb(); --- for all who checks (ols->ols_lock != NULL) before
162          * call to osc_lock_detach() */
163         dlmlock->l_ast_data = NULL;
164         olck->ols_handle.cookie = 0ULL;
165         spin_unlock(&osc_ast_guard);
166
167         lock_res_and_lock(dlmlock);
168         if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
169                 struct cl_object *obj = olck->ols_cl.cls_obj;
170                 struct cl_attr *attr  = &osc_env_info(env)->oti_attr;
171                 __u64 old_kms;
172
173                 cl_object_attr_lock(obj);
174                 /* Must get the value under the lock to avoid possible races. */
175                 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
176                 /* Update the kms. Need to loop all granted locks.
177                  * Not a problem for the client */
178                 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
179
180                 cl_object_attr_set(env, obj, attr, CAT_KMS);
181                 cl_object_attr_unlock(obj);
182         }
183         unlock_res_and_lock(dlmlock);
184
185         /* release a reference taken in osc_lock_upcall0(). */
186         LASSERT(olck->ols_has_ref);
187         lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
188         LDLM_LOCK_RELEASE(dlmlock);
189         olck->ols_has_ref = 0;
190 }
191
192 static int osc_lock_unhold(struct osc_lock *ols)
193 {
194         int result = 0;
195
196         if (ols->ols_hold) {
197                 ols->ols_hold = 0;
198                 result = osc_cancel_base(&ols->ols_handle,
199                                          ols->ols_einfo.ei_mode);
200         }
201         return result;
202 }
203
204 static int osc_lock_unuse(const struct lu_env *env,
205                           const struct cl_lock_slice *slice)
206 {
207         struct osc_lock *ols = cl2osc_lock(slice);
208
209         LINVRNT(osc_lock_invariant(ols));
210
211         switch (ols->ols_state) {
212         case OLS_NEW:
213                 LASSERT(!ols->ols_hold);
214                 LASSERT(ols->ols_agl);
215                 return 0;
216         case OLS_UPCALL_RECEIVED:
217                 osc_lock_unhold(ols);
218         case OLS_ENQUEUED:
219                 LASSERT(!ols->ols_hold);
220                 osc_lock_detach(env, ols);
221                 ols->ols_state = OLS_NEW;
222                 return 0;
223         case OLS_GRANTED:
224                 LASSERT(!ols->ols_glimpse);
225                 LASSERT(ols->ols_hold);
226                 /*
227                  * Move lock into OLS_RELEASED state before calling
228                  * osc_cancel_base() so that possible synchronous cancellation
229                  * (that always happens e.g., for liblustre) sees that lock is
230                  * released.
231                  */
232                 ols->ols_state = OLS_RELEASED;
233                 return osc_lock_unhold(ols);
234         default:
235                 CERROR("Impossible state: %d\n", ols->ols_state);
236                 LBUG();
237         }
238 }
239
240 static void osc_lock_fini(const struct lu_env *env,
241                           struct cl_lock_slice *slice)
242 {
243         struct osc_lock  *ols = cl2osc_lock(slice);
244
245         LINVRNT(osc_lock_invariant(ols));
246         /*
247          * ->ols_hold can still be true at this point if, for example, a
248          * thread that requested a lock was killed (and released a reference
249          * to the lock), before reply from a server was received. In this case
250          * lock is destroyed immediately after upcall.
251          */
252         osc_lock_unhold(ols);
253         LASSERT(ols->ols_lock == NULL);
254         OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
255 }
256
257 static void osc_lock_build_policy(const struct lu_env *env,
258                                   const struct cl_lock *lock,
259                                   ldlm_policy_data_t *policy)
260 {
261         const struct cl_lock_descr *d = &lock->cll_descr;
262
263         osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
264         policy->l_extent.gid = d->cld_gid;
265 }
266
267 static __u64 osc_enq2ldlm_flags(__u32 enqflags)
268 {
269         __u64 result = 0;
270
271         LASSERT((enqflags & ~CEF_MASK) == 0);
272
273         if (enqflags & CEF_NONBLOCK)
274                 result |= LDLM_FL_BLOCK_NOWAIT;
275         if (enqflags & CEF_ASYNC)
276                 result |= LDLM_FL_HAS_INTENT;
277         if (enqflags & CEF_DISCARD_DATA)
278                 result |= LDLM_FL_AST_DISCARD_DATA;
279         return result;
280 }
281
282 /**
283  * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
284  * pointers. Initialized in osc_init().
285  */
286 spinlock_t osc_ast_guard;
287
288 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
289 {
290         struct osc_lock *olck;
291
292         lock_res_and_lock(dlm_lock);
293         spin_lock(&osc_ast_guard);
294         olck = dlm_lock->l_ast_data;
295         if (olck != NULL) {
296                 struct cl_lock *lock = olck->ols_cl.cls_lock;
297                 /*
298                  * If osc_lock holds a reference on ldlm lock, return it even
299                  * when cl_lock is in CLS_FREEING state. This way
300                  *
301                  *         osc_ast_data_get(dlmlock) == NULL
302                  *
303                  * guarantees that all osc references on dlmlock were
304                  * released. osc_dlm_blocking_ast0() relies on that.
305                  */
306                 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
307                         cl_lock_get_trust(lock);
308                         lu_ref_add_atomic(&lock->cll_reference,
309                                           "ast", current);
310                 } else
311                         olck = NULL;
312         }
313         spin_unlock(&osc_ast_guard);
314         unlock_res_and_lock(dlm_lock);
315         return olck;
316 }
317
318 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
319 {
320         struct cl_lock *lock;
321
322         lock = olck->ols_cl.cls_lock;
323         lu_ref_del(&lock->cll_reference, "ast", current);
324         cl_lock_put(env, lock);
325 }
326
327 /**
328  * Updates object attributes from a lock value block (lvb) received together
329  * with the DLM lock reply from the server. Copy of osc_update_enqueue()
330  * logic.
331  *
332  * This can be optimized to not update attributes when lock is a result of a
333  * local match.
334  *
335  * Called under lock and resource spin-locks.
336  */
337 static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
338                                 int rc)
339 {
340         struct ost_lvb    *lvb;
341         struct cl_object  *obj;
342         struct lov_oinfo  *oinfo;
343         struct cl_attr    *attr;
344         unsigned           valid;
345
346         ENTRY;
347
348         if (!(olck->ols_flags & LDLM_FL_LVB_READY))
349                 RETURN_EXIT;
350
351         lvb   = &olck->ols_lvb;
352         obj   = olck->ols_cl.cls_obj;
353         oinfo = cl2osc(obj)->oo_oinfo;
354         attr  = &osc_env_info(env)->oti_attr;
355         valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
356         cl_lvb2attr(attr, lvb);
357
358         cl_object_attr_lock(obj);
359         if (rc == 0) {
360                 struct ldlm_lock  *dlmlock;
361                 __u64 size;
362
363                 dlmlock = olck->ols_lock;
364                 LASSERT(dlmlock != NULL);
365
366                 /* re-grab LVB from a dlm lock under DLM spin-locks. */
367                 *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
368                 size = lvb->lvb_size;
369                 /* Extend KMS up to the end of this lock and no further
370                  * A lock on [x,y] means a KMS of up to y + 1 bytes! */
371                 if (size > dlmlock->l_policy_data.l_extent.end)
372                         size = dlmlock->l_policy_data.l_extent.end + 1;
373                 if (size >= oinfo->loi_kms) {
374                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
375                                    ", kms="LPU64, lvb->lvb_size, size);
376                         valid |= CAT_KMS;
377                         attr->cat_kms = size;
378                 } else {
379                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
380                                    LPU64"; leaving kms="LPU64", end="LPU64,
381                                    lvb->lvb_size, oinfo->loi_kms,
382                                    dlmlock->l_policy_data.l_extent.end);
383                 }
384                 ldlm_lock_allow_match_locked(dlmlock);
385         } else if (rc == -ENAVAIL && olck->ols_glimpse) {
386                 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
387                        " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
388         } else
389                 valid = 0;
390
391         if (valid != 0)
392                 cl_object_attr_set(env, obj, attr, valid);
393
394         cl_object_attr_unlock(obj);
395
396         EXIT;
397 }
398
399 /**
400  * Called when a lock is granted, from an upcall (when server returned a
401  * granted lock), or from completion AST, when server returned a blocked lock.
402  *
403  * Called under lock and resource spin-locks, that are released temporarily
404  * here.
405  */
406 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
407                              struct ldlm_lock *dlmlock, int rc)
408 {
409         struct ldlm_extent   *ext;
410         struct cl_lock       *lock;
411         struct cl_lock_descr *descr;
412
413         LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
414
415         ENTRY;
416         if (olck->ols_state < OLS_GRANTED) {
417                 lock  = olck->ols_cl.cls_lock;
418                 ext   = &dlmlock->l_policy_data.l_extent;
419                 descr = &osc_env_info(env)->oti_descr;
420                 descr->cld_obj = lock->cll_descr.cld_obj;
421
422                 /* XXX check that ->l_granted_mode is valid. */
423                 descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
424                 descr->cld_start = cl_index(descr->cld_obj, ext->start);
425                 descr->cld_end   = cl_index(descr->cld_obj, ext->end);
426                 descr->cld_gid   = ext->gid;
427                 /*
428                  * tell upper layers the extent of the lock that was actually
429                  * granted
430                  */
431                 olck->ols_state = OLS_GRANTED;
432                 osc_lock_lvb_update(env, olck, rc);
433
434                 /* release DLM spin-locks to allow cl_lock_{modify,signal}()
435                  * to take a semaphore on a parent lock. This is safe, because
436                  * spin-locks are needed to protect consistency of
437                  * dlmlock->l_*_mode and LVB, and we have finished processing
438                  * them. */
439                 unlock_res_and_lock(dlmlock);
440                 cl_lock_modify(env, lock, descr);
441                 cl_lock_signal(env, lock);
442                 LINVRNT(osc_lock_invariant(olck));
443                 lock_res_and_lock(dlmlock);
444         }
445         EXIT;
446 }
447
448 static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
449
450 {
451         struct ldlm_lock *dlmlock;
452
453         ENTRY;
454
455         dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
456         LASSERT(dlmlock != NULL);
457
458         lock_res_and_lock(dlmlock);
459         spin_lock(&osc_ast_guard);
460         LASSERT(dlmlock->l_ast_data == olck);
461         LASSERT(olck->ols_lock == NULL);
462         olck->ols_lock = dlmlock;
463         spin_unlock(&osc_ast_guard);
464
465         /*
466          * Lock might be not yet granted. In this case, completion ast
467          * (osc_ldlm_completion_ast()) comes later and finishes lock
468          * granting.
469          */
470         if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
471                 osc_lock_granted(env, olck, dlmlock, 0);
472         unlock_res_and_lock(dlmlock);
473
474         /*
475          * osc_enqueue_interpret() decrefs asynchronous locks, counter
476          * this.
477          */
478         ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
479         olck->ols_hold = 1;
480
481         /* lock reference taken by ldlm_handle2lock_long() is owned by
482          * osc_lock and released in osc_lock_detach() */
483         lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
484         olck->ols_has_ref = 1;
485 }
486
487 /**
488  * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
489  * received from a server, or after osc_enqueue_base() matched a local DLM
490  * lock.
491  */
492 static int osc_lock_upcall(void *cookie, int errcode)
493 {
494         struct osc_lock         *olck  = cookie;
495         struct cl_lock_slice    *slice = &olck->ols_cl;
496         struct cl_lock          *lock  = slice->cls_lock;
497         struct lu_env           *env;
498         struct cl_env_nest       nest;
499
500         ENTRY;
501         env = cl_env_nested_get(&nest);
502         if (!IS_ERR(env)) {
503                 int rc;
504
505                 cl_lock_mutex_get(env, lock);
506
507                 LASSERT(lock->cll_state >= CLS_QUEUING);
508                 if (olck->ols_state == OLS_ENQUEUED) {
509                         olck->ols_state = OLS_UPCALL_RECEIVED;
510                         rc = ldlm_error2errno(errcode);
511                 } else if (olck->ols_state == OLS_CANCELLED) {
512                         rc = -EIO;
513                 } else {
514                         CERROR("Impossible state: %d\n", olck->ols_state);
515                         LBUG();
516                 }
517                 if (rc) {
518                         struct ldlm_lock *dlmlock;
519
520                         dlmlock = ldlm_handle2lock(&olck->ols_handle);
521                         if (dlmlock != NULL) {
522                                 lock_res_and_lock(dlmlock);
523                                 spin_lock(&osc_ast_guard);
524                                 LASSERT(olck->ols_lock == NULL);
525                                 dlmlock->l_ast_data = NULL;
526                                 olck->ols_handle.cookie = 0ULL;
527                                 spin_unlock(&osc_ast_guard);
528                                 ldlm_lock_fail_match_locked(dlmlock);
529                                 unlock_res_and_lock(dlmlock);
530                                 LDLM_LOCK_PUT(dlmlock);
531                         }
532                 } else {
533                         if (olck->ols_glimpse)
534                                 olck->ols_glimpse = 0;
535                         osc_lock_upcall0(env, olck);
536                 }
537
538                 /* Error handling, some errors are tolerable. */
539                 if (olck->ols_locklessable && rc == -EUSERS) {
540                         /* This is a tolerable error, turn this lock into
541                          * lockless lock.
542                          */
543                         osc_object_set_contended(cl2osc(slice->cls_obj));
544                         LASSERT(slice->cls_ops == &osc_lock_ops);
545
546                         /* Change this lock to ldlmlock-less lock. */
547                         osc_lock_to_lockless(env, olck, 1);
548                         olck->ols_state = OLS_GRANTED;
549                         rc = 0;
550                 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
551                         osc_lock_lvb_update(env, olck, rc);
552                         cl_lock_delete(env, lock);
553                         /* Hide the error. */
554                         rc = 0;
555                 }
556
557                 if (rc == 0) {
558                         /* For AGL case, the RPC sponsor may exits the cl_lock
559                         *  processing without wait() called before related OSC
560                         *  lock upcall(). So update the lock status according
561                         *  to the enqueue result inside AGL upcall(). */
562                         if (olck->ols_agl) {
563                                 lock->cll_flags |= CLF_FROM_UPCALL;
564                                 cl_wait_try(env, lock);
565                                 lock->cll_flags &= ~CLF_FROM_UPCALL;
566                         }
567                         cl_lock_signal(env, lock);
568                         /* del user for lock upcall cookie */
569                         if (olck->ols_agl) {
570                                 if (!olck->ols_glimpse)
571                                         olck->ols_agl = 0;
572                                 cl_unuse_try(env, lock);
573                         }
574                 } else {
575                         /* del user for lock upcall cookie */
576                         if (olck->ols_agl)
577                                 cl_lock_user_del(env, lock);
578                         cl_lock_error(env, lock, rc);
579                 }
580
581                 /* release cookie reference, acquired by osc_lock_enqueue() */
582                 cl_lock_hold_release(env, lock, "upcall", lock);
583                 cl_lock_mutex_put(env, lock);
584
585                 lu_ref_del(&lock->cll_reference, "upcall", lock);
586                 /* This maybe the last reference, so must be called after
587                  * cl_lock_mutex_put(). */
588                 cl_lock_put(env, lock);
589
590                 cl_env_nested_put(&nest, env);
591         } else {
592                 /* should never happen, similar to osc_ldlm_blocking_ast(). */
593                 LBUG();
594         }
595         RETURN(errcode);
596 }
597
598 /**
599  * Core of osc_dlm_blocking_ast() logic.
600  */
601 static void osc_lock_blocking(const struct lu_env *env,
602                               struct ldlm_lock *dlmlock,
603                               struct osc_lock *olck, int blocking)
604 {
605         struct cl_lock *lock = olck->ols_cl.cls_lock;
606
607         LASSERT(olck->ols_lock == dlmlock);
608         CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
609         LASSERT(!osc_lock_is_lockless(olck));
610
611         /*
612          * Lock might be still addref-ed here, if e.g., blocking ast
613          * is sent for a failed lock.
614          */
615         osc_lock_unhold(olck);
616
617         if (blocking && olck->ols_state < OLS_BLOCKED)
618                 /*
619                  * Move osc_lock into OLS_BLOCKED before canceling the lock,
620                  * because it recursively re-enters osc_lock_blocking(), with
621                  * the state set to OLS_CANCELLED.
622                  */
623                 olck->ols_state = OLS_BLOCKED;
624         /*
625          * cancel and destroy lock at least once no matter how blocking ast is
626          * entered (see comment above osc_ldlm_blocking_ast() for use
627          * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
628          */
629         cl_lock_cancel(env, lock);
630         cl_lock_delete(env, lock);
631 }
632
633 /**
634  * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
635  * and ldlm_lock caches.
636  */
637 static int osc_dlm_blocking_ast0(const struct lu_env *env,
638                                  struct ldlm_lock *dlmlock,
639                                  void *data, int flag)
640 {
641         struct osc_lock *olck;
642         struct cl_lock  *lock;
643         int result;
644         int cancel;
645
646         LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
647
648         cancel = 0;
649         olck = osc_ast_data_get(dlmlock);
650         if (olck != NULL) {
651                 lock = olck->ols_cl.cls_lock;
652                 cl_lock_mutex_get(env, lock);
653                 LINVRNT(osc_lock_invariant(olck));
654                 if (olck->ols_ast_wait) {
655                         /* wake up osc_lock_use() */
656                         cl_lock_signal(env, lock);
657                         olck->ols_ast_wait = 0;
658                 }
659                 /*
660                  * Lock might have been canceled while this thread was
661                  * sleeping for lock mutex, but olck is pinned in memory.
662                  */
663                 if (olck == dlmlock->l_ast_data) {
664                         /*
665                          * NOTE: DLM sends blocking AST's for failed locks
666                          *       (that are still in pre-OLS_GRANTED state)
667                          *       too, and they have to be canceled otherwise
668                          *       DLM lock is never destroyed and stuck in
669                          *       the memory.
670                          *
671                          *       Alternatively, ldlm_cli_cancel() can be
672                          *       called here directly for osc_locks with
673                          *       ols_state < OLS_GRANTED to maintain an
674                          *       invariant that ->clo_cancel() is only called
675                          *       for locks that were granted.
676                          */
677                         LASSERT(data == olck);
678                         osc_lock_blocking(env, dlmlock,
679                                           olck, flag == LDLM_CB_BLOCKING);
680                 } else
681                         cancel = 1;
682                 cl_lock_mutex_put(env, lock);
683                 osc_ast_data_put(env, olck);
684         } else
685                 /*
686                  * DLM lock exists, but there is no cl_lock attached to it.
687                  * This is a `normal' race. cl_object and its cl_lock's can be
688                  * removed by memory pressure, together with all pages.
689                  */
690                 cancel = (flag == LDLM_CB_BLOCKING);
691
692         if (cancel) {
693                 struct lustre_handle *lockh;
694
695                 lockh = &osc_env_info(env)->oti_handle;
696                 ldlm_lock2handle(dlmlock, lockh);
697                 result = ldlm_cli_cancel(lockh, LCF_ASYNC);
698         } else
699                 result = 0;
700         return result;
701 }
702
703 /**
704  * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
705  * some other lock, or is canceled. This function is installed as a
706  * ldlm_lock::l_blocking_ast() for client extent locks.
707  *
708  * Control flow is tricky, because ldlm uses the same call-back
709  * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
710  *
711  * \param dlmlock lock for which ast occurred.
712  *
713  * \param new description of a conflicting lock in case of blocking ast.
714  *
715  * \param data value of dlmlock->l_ast_data
716  *
717  * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
718  *             cancellation and blocking ast's.
719  *
720  * Possible use cases:
721  *
722  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
723  *       lock due to lock lru pressure, or explicit user request to purge
724  *       locks.
725  *
726  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
727  *       us that dlmlock conflicts with another lock that some client is
728  *       enqueing. Lock is canceled.
729  *
730  *           - cl_lock_cancel() is called. osc_lock_cancel() calls
731  *             ldlm_cli_cancel() that calls
732  *
733  *                  dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
734  *
735  *             recursively entering osc_ldlm_blocking_ast().
736  *
737  *     - client cancels lock voluntary (e.g., as a part of early cancellation):
738  *
739  *           cl_lock_cancel()->
740  *             osc_lock_cancel()->
741  *               ldlm_cli_cancel()->
742  *                 dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
743  *
744  */
745 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
746                                  struct ldlm_lock_desc *new, void *data,
747                                  int flag)
748 {
749         struct lu_env     *env;
750         struct cl_env_nest nest;
751         int                result;
752
753         /*
754          * This can be called in the context of outer IO, e.g.,
755          *
756          *     cl_enqueue()->...
757          *       ->osc_enqueue_base()->...
758          *         ->ldlm_prep_elc_req()->...
759          *           ->ldlm_cancel_callback()->...
760          *             ->osc_ldlm_blocking_ast()
761          *
762          * new environment has to be created to not corrupt outer context.
763          */
764         env = cl_env_nested_get(&nest);
765         if (!IS_ERR(env)) {
766                 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
767                 cl_env_nested_put(&nest, env);
768         } else {
769                 result = PTR_ERR(env);
770                 /*
771                  * XXX This should never happen, as cl_lock is
772                  * stuck. Pre-allocated environment a la vvp_inode_fini_env
773                  * should be used.
774                  */
775                 LBUG();
776         }
777         if (result != 0) {
778                 if (result == -ENODATA)
779                         result = 0;
780                 else
781                         CERROR("BAST failed: %d\n", result);
782         }
783         return result;
784 }
785
786 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
787                                    __u64 flags, void *data)
788 {
789         struct cl_env_nest nest;
790         struct lu_env     *env;
791         struct osc_lock   *olck;
792         struct cl_lock    *lock;
793         int result;
794         int dlmrc;
795
796         /* first, do dlm part of the work */
797         dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
798         if (flags == LDLM_FL_WAIT_NOREPROC)
799                 return dlmrc;
800
801         /* then, notify cl_lock */
802         env = cl_env_nested_get(&nest);
803         if (!IS_ERR(env)) {
804                 olck = osc_ast_data_get(dlmlock);
805                 if (olck != NULL) {
806                         lock = olck->ols_cl.cls_lock;
807                         cl_lock_mutex_get(env, lock);
808                         /*
809                          * ldlm_handle_cp_callback() copied LVB from request
810                          * to lock->l_lvb_data, store it in osc_lock.
811                          */
812                         LASSERT(dlmlock->l_lvb_data != NULL);
813                         lock_res_and_lock(dlmlock);
814                         olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
815                         if (olck->ols_lock == NULL) {
816                                 /*
817                                  * upcall (osc_lock_upcall()) hasn't yet been
818                                  * called. Do nothing now, upcall will bind
819                                  * olck to dlmlock and signal the waiters.
820                                  *
821                                  * This maintains an invariant that osc_lock
822                                  * and ldlm_lock are always bound when
823                                  * osc_lock is in OLS_GRANTED state.
824                                  */
825                         } else if (dlmlock->l_granted_mode ==
826                                    dlmlock->l_req_mode) {
827                                 osc_lock_granted(env, olck, dlmlock, dlmrc);
828                         }
829                         unlock_res_and_lock(dlmlock);
830
831                         if (dlmrc != 0) {
832                                 CL_LOCK_DEBUG(D_ERROR, env, lock,
833                                               "dlmlock returned %d\n", dlmrc);
834                                 cl_lock_error(env, lock, dlmrc);
835                         }
836                         cl_lock_mutex_put(env, lock);
837                         osc_ast_data_put(env, olck);
838                         result = 0;
839                 } else
840                         result = -ELDLM_NO_LOCK_DATA;
841                 cl_env_nested_put(&nest, env);
842         } else
843                 result = PTR_ERR(env);
844         return dlmrc ?: result;
845 }
846
847 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
848 {
849         struct ptlrpc_request  *req  = data;
850         struct osc_lock        *olck;
851         struct cl_lock         *lock;
852         struct cl_object       *obj;
853         struct cl_env_nest      nest;
854         struct lu_env          *env;
855         struct ost_lvb         *lvb;
856         struct req_capsule     *cap;
857         int                     result;
858
859         LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
860
861         env = cl_env_nested_get(&nest);
862         if (!IS_ERR(env)) {
863                 /* osc_ast_data_get() has to go after environment is
864                  * allocated, because osc_ast_data() acquires a
865                  * reference to a lock, and it can only be released in
866                  * environment.
867                  */
868                 olck = osc_ast_data_get(dlmlock);
869                 if (olck != NULL) {
870                         lock = olck->ols_cl.cls_lock;
871                         /* Do not grab the mutex of cl_lock for glimpse.
872                          * See LU-1274 for details.
873                          * BTW, it's okay for cl_lock to be cancelled during
874                          * this period because server can handle this race.
875                          * See ldlm_server_glimpse_ast() for details.
876                          * cl_lock_mutex_get(env, lock); */
877                         cap = &req->rq_pill;
878                         req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
879                         req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
880                                              sizeof *lvb);
881                         result = req_capsule_server_pack(cap);
882                         if (result == 0) {
883                                 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
884                                 obj = lock->cll_descr.cld_obj;
885                                 result = cl_object_glimpse(env, obj, lvb);
886                         }
887                         if (!exp_connect_lvb_type(req->rq_export))
888                                 req_capsule_shrink(&req->rq_pill,
889                                                    &RMF_DLM_LVB,
890                                                    sizeof(struct ost_lvb_v1),
891                                                    RCL_SERVER);
892                         osc_ast_data_put(env, olck);
893                 } else {
894                         /*
895                          * These errors are normal races, so we don't want to
896                          * fill the console with messages by calling
897                          * ptlrpc_error()
898                          */
899                         lustre_pack_reply(req, 1, NULL, NULL);
900                         result = -ELDLM_NO_LOCK_DATA;
901                 }
902                 cl_env_nested_put(&nest, env);
903         } else
904                 result = PTR_ERR(env);
905         req->rq_status = result;
906         return result;
907 }
908
909 static int weigh_cb(const struct lu_env *env, struct cl_io *io,
910                     struct osc_page *ops, void *cbdata)
911 {
912         struct cl_page *page = ops->ops_cl.cpl_page;
913
914         if (cl_page_is_vmlocked(env, page)
915 #if defined(__KERNEL__)
916             || PageDirty(page->cp_vmpage) || PageWriteback(page->cp_vmpage)
917 #endif
918            ) {
919                 (*(unsigned long *)cbdata)++;
920                 return CLP_GANG_ABORT;
921         }
922
923         return CLP_GANG_OKAY;
924 }
925
926 static unsigned long osc_lock_weight(const struct lu_env *env,
927                                      const struct osc_lock *ols)
928 {
929         struct cl_io *io = &osc_env_info(env)->oti_io;
930         struct cl_lock_descr *descr = &ols->ols_cl.cls_lock->cll_descr;
931         struct cl_object *obj = ols->ols_cl.cls_obj;
932         unsigned long npages = 0;
933         int result;
934         ENTRY;
935
936         io->ci_obj = cl_object_top(obj);
937         io->ci_ignore_layout = 1;
938         result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
939         if (result != 0)
940                 RETURN(result);
941
942         do {
943                 result = osc_page_gang_lookup(env, io, cl2osc(obj),
944                                               descr->cld_start, descr->cld_end,
945                                               weigh_cb, (void *)&npages);
946                 if (result == CLP_GANG_ABORT)
947                         break;
948                 if (result == CLP_GANG_RESCHED)
949                         cond_resched();
950         } while (result != CLP_GANG_OKAY);
951         cl_io_fini(env, io);
952
953         return npages;
954 }
955
956 /**
957  * Get the weight of dlm lock for early cancellation.
958  */
959 unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
960 {
961         struct cl_env_nest       nest;
962         struct lu_env           *env;
963         struct osc_lock         *lock;
964         unsigned long            weight;
965         ENTRY;
966
967         might_sleep();
968         /*
969          * osc_ldlm_weigh_ast has a complex context since it might be called
970          * because of lock canceling, or from user's input. We have to make
971          * a new environment for it. Probably it is implementation safe to use
972          * the upper context because cl_lock_put don't modify environment
973          * variables. But just in case ..
974          */
975         env = cl_env_nested_get(&nest);
976         if (IS_ERR(env))
977                 /* Mostly because lack of memory, do not eliminate this lock */
978                 RETURN(1);
979
980         LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
981         lock = osc_ast_data_get(dlmlock);
982         if (lock == NULL) {
983                 /* cl_lock was destroyed because of memory pressure.
984                  * It is much reasonable to assign this type of lock
985                  * a lower cost.
986                  */
987                 GOTO(out, weight = 0);
988         }
989
990         weight = osc_lock_weight(env, lock);
991         osc_ast_data_put(env, lock);
992         EXIT;
993
994 out:
995         cl_env_nested_put(&nest, env);
996         return weight;
997 }
998
999 static void osc_lock_build_einfo(const struct lu_env *env,
1000                                  const struct cl_lock *clock,
1001                                  struct osc_lock *lock,
1002                                  struct ldlm_enqueue_info *einfo)
1003 {
1004         enum cl_lock_mode mode;
1005
1006         mode = clock->cll_descr.cld_mode;
1007         if (mode == CLM_PHANTOM)
1008                 /*
1009                  * For now, enqueue all glimpse locks in read mode. In the
1010                  * future, client might choose to enqueue LCK_PW lock for
1011                  * glimpse on a file opened for write.
1012                  */
1013                 mode = CLM_READ;
1014
1015         einfo->ei_type   = LDLM_EXTENT;
1016         einfo->ei_mode   = osc_cl_lock2ldlm(mode);
1017         einfo->ei_cb_bl  = osc_ldlm_blocking_ast;
1018         einfo->ei_cb_cp  = osc_ldlm_completion_ast;
1019         einfo->ei_cb_gl  = osc_ldlm_glimpse_ast;
1020         einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
1021 }
1022
1023 /**
1024  * Determine if the lock should be converted into a lockless lock.
1025  *
1026  * Steps to check:
1027  * - if the lock has an explicite requirment for a non-lockless lock;
1028  * - if the io lock request type ci_lockreq;
1029  * - send the enqueue rpc to ost to make the further decision;
1030  * - special treat to truncate lockless lock
1031  *
1032  *  Additional policy can be implemented here, e.g., never do lockless-io
1033  *  for large extents.
1034  */
1035 static void osc_lock_to_lockless(const struct lu_env *env,
1036                                  struct osc_lock *ols, int force)
1037 {
1038         struct cl_lock_slice *slice = &ols->ols_cl;
1039
1040         LASSERT(ols->ols_state == OLS_NEW ||
1041                 ols->ols_state == OLS_UPCALL_RECEIVED);
1042
1043         if (force) {
1044                 ols->ols_locklessable = 1;
1045                 slice->cls_ops = &osc_lock_lockless_ops;
1046         } else {
1047                 struct osc_io *oio     = osc_env_io(env);
1048                 struct cl_io  *io      = oio->oi_cl.cis_io;
1049                 struct cl_object *obj  = slice->cls_obj;
1050                 struct osc_object *oob = cl2osc(obj);
1051                 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
1052                 struct obd_connect_data *ocd;
1053
1054                 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
1055                         io->ci_lockreq == CILR_MAYBE ||
1056                         io->ci_lockreq == CILR_NEVER);
1057
1058                 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
1059                 ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
1060                                 (io->ci_lockreq == CILR_MAYBE) &&
1061                                 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
1062                 if (io->ci_lockreq == CILR_NEVER ||
1063                         /* lockless IO */
1064                     (ols->ols_locklessable && osc_object_is_contended(oob)) ||
1065                         /* lockless truncate */
1066                     (cl_io_is_trunc(io) &&
1067                      (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
1068                       osd->od_lockless_truncate)) {
1069                         ols->ols_locklessable = 1;
1070                         slice->cls_ops = &osc_lock_lockless_ops;
1071                 }
1072         }
1073         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1074 }
1075
1076 static int osc_lock_compatible(const struct osc_lock *qing,
1077                                const struct osc_lock *qed)
1078 {
1079         enum cl_lock_mode qing_mode;
1080         enum cl_lock_mode qed_mode;
1081
1082         qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode;
1083         if (qed->ols_glimpse &&
1084             (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ))
1085                 return 1;
1086
1087         qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode;
1088         return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
1089 }
1090
1091 /**
1092  * Cancel all conflicting locks and wait for them to be destroyed.
1093  *
1094  * This function is used for two purposes:
1095  *
1096  *     - early cancel all conflicting locks before starting IO, and
1097  *
1098  *     - guarantee that pages added to the page cache by lockless IO are never
1099  *       covered by locks other than lockless IO lock, and, hence, are not
1100  *       visible to other threads.
1101  */
1102 static int osc_lock_enqueue_wait(const struct lu_env *env,
1103                                  const struct osc_lock *olck)
1104 {
1105         struct cl_lock          *lock    = olck->ols_cl.cls_lock;
1106         struct cl_lock_descr    *descr   = &lock->cll_descr;
1107         struct cl_object_header *hdr     = cl_object_header(descr->cld_obj);
1108         struct cl_lock          *scan;
1109         struct cl_lock          *conflict= NULL;
1110         int lockless                     = osc_lock_is_lockless(olck);
1111         int rc                           = 0;
1112         ENTRY;
1113
1114         LASSERT(cl_lock_is_mutexed(lock));
1115
1116         /* make it enqueue anyway for glimpse lock, because we actually
1117          * don't need to cancel any conflicting locks. */
1118         if (olck->ols_glimpse)
1119                 return 0;
1120
1121         spin_lock(&hdr->coh_lock_guard);
1122         list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
1123                 struct cl_lock_descr *cld = &scan->cll_descr;
1124                 const struct osc_lock *scan_ols;
1125
1126                 if (scan == lock)
1127                         break;
1128
1129                 if (scan->cll_state < CLS_QUEUING ||
1130                     scan->cll_state == CLS_FREEING ||
1131                     cld->cld_start > descr->cld_end ||
1132                     cld->cld_end < descr->cld_start)
1133                         continue;
1134
1135                 /* overlapped and living locks. */
1136
1137                 /* We're not supposed to give up group lock. */
1138                 if (scan->cll_descr.cld_mode == CLM_GROUP) {
1139                         LASSERT(descr->cld_mode != CLM_GROUP ||
1140                                 descr->cld_gid != scan->cll_descr.cld_gid);
1141                         continue;
1142                 }
1143
1144                 scan_ols = osc_lock_at(scan);
1145
1146                 /* We need to cancel the compatible locks if we're enqueuing
1147                  * a lockless lock, for example:
1148                  * imagine that client has PR lock on [0, 1000], and thread T0
1149                  * is doing lockless IO in [500, 1500] region. Concurrent
1150                  * thread T1 can see lockless data in [500, 1000], which is
1151                  * wrong, because these data are possibly stale. */
1152                 if (!lockless && osc_lock_compatible(olck, scan_ols))
1153                         continue;
1154
1155                 cl_lock_get_trust(scan);
1156                 conflict = scan;
1157                 break;
1158         }
1159         spin_unlock(&hdr->coh_lock_guard);
1160
1161         if (conflict) {
1162                 if (lock->cll_descr.cld_mode == CLM_GROUP) {
1163                         /* we want a group lock but a previous lock request
1164                          * conflicts, we do not wait but return 0 so the
1165                          * request is send to the server
1166                          */
1167                         CDEBUG(D_DLMTRACE, "group lock %p is conflicted "
1168                                            "with %p, no wait, send to server\n",
1169                                lock, conflict);
1170                         cl_lock_put(env, conflict);
1171                         rc = 0;
1172                 } else {
1173                         CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, "
1174                                            "will wait\n",
1175                                lock, conflict);
1176                         LASSERT(lock->cll_conflict == NULL);
1177                         lu_ref_add(&conflict->cll_reference, "cancel-wait",
1178                                    lock);
1179                         lock->cll_conflict = conflict;
1180                         rc = CLO_WAIT;
1181                 }
1182         }
1183         RETURN(rc);
1184 }
1185
1186 /**
1187  * Implementation of cl_lock_operations::clo_enqueue() method for osc
1188  * layer. This initiates ldlm enqueue:
1189  *
1190  *     - cancels conflicting locks early (osc_lock_enqueue_wait());
1191  *
1192  *     - calls osc_enqueue_base() to do actual enqueue.
1193  *
1194  * osc_enqueue_base() is supplied with an upcall function that is executed
1195  * when lock is received either after a local cached ldlm lock is matched, or
1196  * when a reply from the server is received.
1197  *
1198  * This function does not wait for the network communication to complete.
1199  */
1200 static int osc_lock_enqueue(const struct lu_env *env,
1201                             const struct cl_lock_slice *slice,
1202                             struct cl_io *unused, __u32 enqflags)
1203 {
1204         struct osc_lock          *ols     = cl2osc_lock(slice);
1205         struct cl_lock           *lock    = ols->ols_cl.cls_lock;
1206         int result;
1207         ENTRY;
1208
1209         LASSERT(cl_lock_is_mutexed(lock));
1210         LASSERTF(ols->ols_state == OLS_NEW,
1211                  "Impossible state: %d\n", ols->ols_state);
1212
1213         LASSERTF(ergo(ols->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
1214                 "lock = %p, ols = %p\n", lock, ols);
1215
1216         result = osc_lock_enqueue_wait(env, ols);
1217         if (result == 0) {
1218                 if (!osc_lock_is_lockless(ols)) {
1219                         struct osc_object        *obj = cl2osc(slice->cls_obj);
1220                         struct osc_thread_info   *info = osc_env_info(env);
1221                         struct ldlm_res_id       *resname = &info->oti_resname;
1222                         ldlm_policy_data_t       *policy = &info->oti_policy;
1223                         struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
1224
1225                         /* lock will be passed as upcall cookie,
1226                          * hold ref to prevent to be released. */
1227                         cl_lock_hold_add(env, lock, "upcall", lock);
1228                         /* a user for agl lock also */
1229                         if (ols->ols_agl)
1230                                 cl_lock_user_add(env, lock);
1231                         ols->ols_state = OLS_ENQUEUED;
1232
1233                         /*
1234                          * XXX: this is possible blocking point as
1235                          * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
1236                          * LDLM_CP_CALLBACK.
1237                          */
1238                         ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname);
1239                         osc_lock_build_policy(env, lock, policy);
1240                         result = osc_enqueue_base(osc_export(obj), resname,
1241                                           &ols->ols_flags, policy,
1242                                           &ols->ols_lvb,
1243                                           obj->oo_oinfo->loi_kms_valid,
1244                                           osc_lock_upcall,
1245                                           ols, einfo, &ols->ols_handle,
1246                                           PTLRPCD_SET, 1, ols->ols_agl);
1247                         if (result != 0) {
1248                                 if (ols->ols_agl)
1249                                         cl_lock_user_del(env, lock);
1250                                 cl_lock_unhold(env, lock, "upcall", lock);
1251                                 if (unlikely(result == -ECANCELED)) {
1252                                         ols->ols_state = OLS_NEW;
1253                                         result = 0;
1254                                 }
1255                         }
1256                 } else {
1257                         ols->ols_state = OLS_GRANTED;
1258                         ols->ols_owner = osc_env_io(env);
1259                 }
1260         }
1261         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1262         RETURN(result);
1263 }
1264
1265 static int osc_lock_wait(const struct lu_env *env,
1266                          const struct cl_lock_slice *slice)
1267 {
1268         struct osc_lock *olck = cl2osc_lock(slice);
1269         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1270
1271         LINVRNT(osc_lock_invariant(olck));
1272
1273         if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED) {
1274                 if (olck->ols_flags & LDLM_FL_LVB_READY) {
1275                         return 0;
1276                 } else if (olck->ols_agl) {
1277                         if (lock->cll_flags & CLF_FROM_UPCALL)
1278                                 /* It is from enqueue RPC reply upcall for
1279                                  * updating state. Do not re-enqueue. */
1280                                 return -ENAVAIL;
1281                         else
1282                                 olck->ols_state = OLS_NEW;
1283                 } else {
1284                         LASSERT(lock->cll_error);
1285                         return lock->cll_error;
1286                 }
1287         }
1288
1289         if (olck->ols_state == OLS_NEW) {
1290                 int rc;
1291
1292                 LASSERT(olck->ols_agl);
1293                 olck->ols_agl = 0;
1294                 olck->ols_flags &= ~LDLM_FL_BLOCK_NOWAIT;
1295                 rc = osc_lock_enqueue(env, slice, NULL, CEF_ASYNC | CEF_MUST);
1296                 if (rc != 0)
1297                         return rc;
1298                 else
1299                         return CLO_REENQUEUED;
1300         }
1301
1302         LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
1303                      lock->cll_error == 0, olck->ols_lock != NULL));
1304
1305         return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
1306 }
1307
1308 /**
1309  * An implementation of cl_lock_operations::clo_use() method that pins cached
1310  * lock.
1311  */
1312 static int osc_lock_use(const struct lu_env *env,
1313                         const struct cl_lock_slice *slice)
1314 {
1315         struct osc_lock *olck = cl2osc_lock(slice);
1316         int rc;
1317
1318         LASSERT(!olck->ols_hold);
1319
1320         /*
1321          * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
1322          * flag is not set. This protects us from a concurrent blocking ast.
1323          */
1324         rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1325         if (rc == 0) {
1326                 olck->ols_hold = 1;
1327                 olck->ols_state = OLS_GRANTED;
1328         } else {
1329                 struct cl_lock *lock;
1330
1331                 /*
1332                  * Lock is being cancelled somewhere within
1333                  * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
1334                  * set, but osc_ldlm_blocking_ast() hasn't yet acquired
1335                  * cl_lock mutex.
1336                  */
1337                 lock = slice->cls_lock;
1338                 LASSERT(lock->cll_state == CLS_INTRANSIT);
1339                 LASSERT(lock->cll_users > 0);
1340                 /* set a flag for osc_dlm_blocking_ast0() to signal the
1341                  * lock.*/
1342                 olck->ols_ast_wait = 1;
1343                 rc = CLO_WAIT;
1344         }
1345         return rc;
1346 }
1347
1348 static int osc_lock_flush(struct osc_lock *ols, int discard)
1349 {
1350         struct cl_lock       *lock  = ols->ols_cl.cls_lock;
1351         struct cl_env_nest    nest;
1352         struct lu_env        *env;
1353         int result = 0;
1354         ENTRY;
1355
1356         env = cl_env_nested_get(&nest);
1357         if (!IS_ERR(env)) {
1358                 struct osc_object    *obj   = cl2osc(ols->ols_cl.cls_obj);
1359                 struct cl_lock_descr *descr = &lock->cll_descr;
1360                 int rc = 0;
1361
1362                 if (descr->cld_mode >= CLM_WRITE) {
1363                         result = osc_cache_writeback_range(env, obj,
1364                                         descr->cld_start, descr->cld_end,
1365                                         1, discard);
1366                         LDLM_DEBUG(ols->ols_lock,
1367                                 "lock %p: %d pages were %s.\n", lock, result,
1368                                 discard ? "discarded" : "written");
1369                         if (result > 0)
1370                                 result = 0;
1371                 }
1372
1373                 rc = osc_lock_discard_pages(env, ols);
1374                 if (result == 0 && rc < 0)
1375                         result = rc;
1376
1377                 cl_env_nested_put(&nest, env);
1378         } else
1379                 result = PTR_ERR(env);
1380         if (result == 0) {
1381                 ols->ols_flush = 1;
1382                 LINVRNT(!osc_lock_has_pages(ols));
1383         }
1384         RETURN(result);
1385 }
1386
1387 /**
1388  * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1389  * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1390  * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1391  * with some other lock some where in the cluster. This function does the
1392  * following:
1393  *
1394  *     - invalidates all pages protected by this lock (after sending dirty
1395  *       ones to the server, as necessary);
1396  *
1397  *     - decref's underlying ldlm lock;
1398  *
1399  *     - cancels ldlm lock (ldlm_cli_cancel()).
1400  */
1401 static void osc_lock_cancel(const struct lu_env *env,
1402                             const struct cl_lock_slice *slice)
1403 {
1404         struct cl_lock   *lock    = slice->cls_lock;
1405         struct osc_lock  *olck    = cl2osc_lock(slice);
1406         struct ldlm_lock *dlmlock = olck->ols_lock;
1407
1408         LASSERT(cl_lock_is_mutexed(lock));
1409         LINVRNT(osc_lock_invariant(olck));
1410
1411         if (dlmlock != NULL) {
1412                 bool do_cancel;
1413                 int  result = 0;
1414
1415                 if (olck->ols_state >= OLS_GRANTED)
1416                         result = osc_lock_flush(olck,
1417                                 ldlm_is_discard_data(dlmlock));
1418                 osc_lock_unhold(olck);
1419
1420                 lock_res_and_lock(dlmlock);
1421                 /* Now that we're the only user of dlm read/write reference,
1422                  * mostly the ->l_readers + ->l_writers should be zero.
1423                  * However, there is a corner case.
1424                  * See b=18829 for details.*/
1425                 do_cancel = (dlmlock->l_readers == 0 &&
1426                              dlmlock->l_writers == 0);
1427                 ldlm_set_cbpending(dlmlock);
1428                 unlock_res_and_lock(dlmlock);
1429                 if (do_cancel)
1430                         result = ldlm_cli_cancel(&olck->ols_handle, LCF_ASYNC);
1431                 if (result < 0)
1432                         CL_LOCK_DEBUG(D_ERROR, env, lock,
1433                                       "lock %p cancel failure with error(%d)\n",
1434                                       lock, result);
1435         }
1436         olck->ols_state = OLS_CANCELLED;
1437         olck->ols_flags &= ~LDLM_FL_LVB_READY;
1438         osc_lock_detach(env, olck);
1439 }
1440
1441 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
1442 static int check_cb(const struct lu_env *env, struct cl_io *io,
1443                     struct osc_page *ops, void *cbdata)
1444 {
1445         struct cl_lock *lock = cbdata;
1446
1447         if (lock->cll_descr.cld_mode == CLM_READ) {
1448                 struct cl_lock *tmp;
1449                 tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj,
1450                                        osc_index(ops), lock, 1, 0);
1451                 if (tmp != NULL) {
1452                         cl_lock_put(env, tmp);
1453                         return CLP_GANG_OKAY;
1454                 }
1455         }
1456
1457         CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
1458         CL_PAGE_DEBUG(D_ERROR, env, ops->ops_cl.cpl_page, "\n");
1459         return CLP_GANG_ABORT;
1460 }
1461
1462 /**
1463  * Returns true iff there are pages under \a olck not protected by other
1464  * locks.
1465  */
1466 static bool osc_lock_has_pages(struct osc_lock *olck)
1467 {
1468         struct cl_lock       *lock;
1469         struct cl_lock_descr *descr;
1470         struct cl_object     *obj;
1471         struct osc_object    *oob;
1472         struct cl_env_nest    nest;
1473         struct cl_io         *io;
1474         struct lu_env        *env;
1475         bool                     has_pages;
1476         int                      rc;
1477
1478         env = cl_env_nested_get(&nest);
1479         if (IS_ERR(env))
1480                 return false;
1481
1482         obj   = olck->ols_cl.cls_obj;
1483         oob   = cl2osc(obj);
1484         io    = &oob->oo_debug_io;
1485         lock  = olck->ols_cl.cls_lock;
1486         descr = &lock->cll_descr;
1487
1488         mutex_lock(&oob->oo_debug_mutex);
1489         io->ci_obj = cl_object_top(obj);
1490         io->ci_ignore_layout = 1;
1491         rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
1492         if (rc != 0)
1493                 GOTO(out, has_pages = false);
1494
1495         do {
1496                 rc = osc_page_gang_lookup(env, io, oob,
1497                                           descr->cld_start, descr->cld_end,
1498                                           check_cb, (void *)lock);
1499                 if (rc == CLP_GANG_ABORT)
1500                         break;
1501                 if (rc == CLP_GANG_RESCHED)
1502                         cond_resched();
1503         } while (rc != CLP_GANG_OKAY);
1504         has_pages = (rc == CLP_GANG_ABORT);
1505 out:
1506         cl_io_fini(env, io);
1507         mutex_unlock(&oob->oo_debug_mutex);
1508         cl_env_nested_put(&nest, env);
1509
1510         return has_pages;
1511 }
1512 #else
1513 static bool osc_lock_has_pages(struct osc_lock *olck)
1514 {
1515         return false;
1516 }
1517 #endif /* CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
1518
1519 static void osc_lock_delete(const struct lu_env *env,
1520                             const struct cl_lock_slice *slice)
1521 {
1522         struct osc_lock *olck;
1523
1524         olck = cl2osc_lock(slice);
1525         if (olck->ols_glimpse) {
1526                 LASSERT(!olck->ols_hold);
1527                 LASSERT(!olck->ols_lock);
1528                 return;
1529         }
1530
1531         LINVRNT(osc_lock_invariant(olck));
1532         LINVRNT(!osc_lock_has_pages(olck));
1533
1534         osc_lock_unhold(olck);
1535         osc_lock_detach(env, olck);
1536 }
1537
1538 /**
1539  * Implements cl_lock_operations::clo_state() method for osc layer.
1540  *
1541  * Maintains osc_lock::ols_owner field.
1542  *
1543  * This assumes that lock always enters CLS_HELD (from some other state) in
1544  * the same IO context as one that requested the lock. This should not be a
1545  * problem, because context is by definition shared by all activity pertaining
1546  * to the same high-level IO.
1547  */
1548 static void osc_lock_state(const struct lu_env *env,
1549                            const struct cl_lock_slice *slice,
1550                            enum cl_lock_state state)
1551 {
1552         struct osc_lock *lock = cl2osc_lock(slice);
1553
1554         /*
1555          * XXX multiple io contexts can use the lock at the same time.
1556          */
1557         LINVRNT(osc_lock_invariant(lock));
1558         if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1559                 struct osc_io *oio = osc_env_io(env);
1560
1561                 LASSERT(lock->ols_owner == NULL);
1562                 lock->ols_owner = oio;
1563         } else if (state != CLS_HELD)
1564                 lock->ols_owner = NULL;
1565 }
1566
1567 static int osc_lock_print(const struct lu_env *env, void *cookie,
1568                           lu_printer_t p, const struct cl_lock_slice *slice)
1569 {
1570         struct osc_lock *lock = cl2osc_lock(slice);
1571
1572         /*
1573          * XXX print ldlm lock and einfo properly.
1574          */
1575         (*p)(env, cookie, "%p "LPX64" "LPX64" %d %p ",
1576              lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
1577              lock->ols_state, lock->ols_owner);
1578         osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1579         return 0;
1580 }
1581
1582 static int osc_lock_fits_into(const struct lu_env *env,
1583                               const struct cl_lock_slice *slice,
1584                               const struct cl_lock_descr *need,
1585                               const struct cl_io *io)
1586 {
1587         struct osc_lock *ols = cl2osc_lock(slice);
1588
1589         if (need->cld_enq_flags & CEF_NEVER)
1590                 return 0;
1591
1592         if (ols->ols_state >= OLS_CANCELLED)
1593                 return 0;
1594
1595         if (need->cld_mode == CLM_PHANTOM) {
1596                 if (ols->ols_agl)
1597                         return !(ols->ols_state > OLS_RELEASED);
1598
1599                 /*
1600                  * Note: the QUEUED lock can't be matched here, otherwise
1601                  * it might cause the deadlocks.
1602                  * In read_process,
1603                  * P1: enqueued read lock, create sublock1
1604                  * P2: enqueued write lock, create sublock2(conflicted
1605                  *     with sublock1).
1606                  * P1: Grant read lock.
1607                  * P1: enqueued glimpse lock(with holding sublock1_read),
1608                  *     matched with sublock2, waiting sublock2 to be granted.
1609                  *     But sublock2 can not be granted, because P1
1610                  *     will not release sublock1. Bang!
1611                  */
1612                 if (ols->ols_state < OLS_GRANTED ||
1613                     ols->ols_state > OLS_RELEASED)
1614                         return 0;
1615         } else if (need->cld_enq_flags & CEF_MUST) {
1616                 /*
1617                  * If the lock hasn't ever enqueued, it can't be matched
1618                  * because enqueue process brings in many information
1619                  * which can be used to determine things such as lockless,
1620                  * CEF_MUST, etc.
1621                  */
1622                 if (ols->ols_state < OLS_UPCALL_RECEIVED &&
1623                     ols->ols_locklessable)
1624                         return 0;
1625         }
1626         return 1;
1627 }
1628
1629 static const struct cl_lock_operations osc_lock_ops = {
1630         .clo_fini    = osc_lock_fini,
1631         .clo_enqueue = osc_lock_enqueue,
1632         .clo_wait    = osc_lock_wait,
1633         .clo_unuse   = osc_lock_unuse,
1634         .clo_use     = osc_lock_use,
1635         .clo_delete  = osc_lock_delete,
1636         .clo_state   = osc_lock_state,
1637         .clo_cancel  = osc_lock_cancel,
1638         .clo_print   = osc_lock_print,
1639         .clo_fits_into = osc_lock_fits_into,
1640 };
1641
1642 static int osc_lock_lockless_unuse(const struct lu_env *env,
1643                                    const struct cl_lock_slice *slice)
1644 {
1645         struct osc_lock *ols = cl2osc_lock(slice);
1646         struct cl_lock *lock = slice->cls_lock;
1647
1648         LASSERT(ols->ols_state == OLS_GRANTED);
1649         LINVRNT(osc_lock_invariant(ols));
1650
1651         cl_lock_cancel(env, lock);
1652         cl_lock_delete(env, lock);
1653         return 0;
1654 }
1655
1656 static void osc_lock_lockless_cancel(const struct lu_env *env,
1657                                      const struct cl_lock_slice *slice)
1658 {
1659         struct osc_lock   *ols  = cl2osc_lock(slice);
1660         int result;
1661
1662         result = osc_lock_flush(ols, 0);
1663         if (result)
1664                 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1665                        ols, result);
1666         ols->ols_state = OLS_CANCELLED;
1667 }
1668
1669 static int osc_lock_lockless_wait(const struct lu_env *env,
1670                                   const struct cl_lock_slice *slice)
1671 {
1672         struct osc_lock *olck = cl2osc_lock(slice);
1673         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1674
1675         LINVRNT(osc_lock_invariant(olck));
1676         LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1677
1678         return lock->cll_error;
1679 }
1680
1681 static void osc_lock_lockless_state(const struct lu_env *env,
1682                                     const struct cl_lock_slice *slice,
1683                                     enum cl_lock_state state)
1684 {
1685         struct osc_lock *lock = cl2osc_lock(slice);
1686
1687         LINVRNT(osc_lock_invariant(lock));
1688         if (state == CLS_HELD) {
1689                 struct osc_io *oio  = osc_env_io(env);
1690
1691                 LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio));
1692                 lock->ols_owner = oio;
1693
1694                 /* set the io to be lockless if this lock is for io's
1695                  * host object */
1696                 if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
1697                         oio->oi_lockless = 1;
1698         }
1699 }
1700
1701 static int osc_lock_lockless_fits_into(const struct lu_env *env,
1702                                        const struct cl_lock_slice *slice,
1703                                        const struct cl_lock_descr *need,
1704                                        const struct cl_io *io)
1705 {
1706         struct osc_lock *lock = cl2osc_lock(slice);
1707
1708         if (!(need->cld_enq_flags & CEF_NEVER))
1709                 return 0;
1710
1711         /* lockless lock should only be used by its owning io. b22147 */
1712         return (lock->ols_owner == osc_env_io(env));
1713 }
1714
1715 static const struct cl_lock_operations osc_lock_lockless_ops = {
1716         .clo_fini      = osc_lock_fini,
1717         .clo_enqueue   = osc_lock_enqueue,
1718         .clo_wait      = osc_lock_lockless_wait,
1719         .clo_unuse     = osc_lock_lockless_unuse,
1720         .clo_state     = osc_lock_lockless_state,
1721         .clo_fits_into = osc_lock_lockless_fits_into,
1722         .clo_cancel    = osc_lock_lockless_cancel,
1723         .clo_print     = osc_lock_print
1724 };
1725
1726 int osc_lock_init(const struct lu_env *env,
1727                   struct cl_object *obj, struct cl_lock *lock,
1728                   const struct cl_io *unused)
1729 {
1730         struct osc_lock *clk;
1731         int result;
1732
1733         OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, GFP_NOFS);
1734         if (clk != NULL) {
1735                 __u32 enqflags = lock->cll_descr.cld_enq_flags;
1736
1737                 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
1738                 clk->ols_state = OLS_NEW;
1739
1740                 clk->ols_flags = osc_enq2ldlm_flags(enqflags);
1741                 clk->ols_agl = !!(enqflags & CEF_AGL);
1742                 if (clk->ols_agl)
1743                         clk->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
1744                 if (clk->ols_flags & LDLM_FL_HAS_INTENT)
1745                         clk->ols_glimpse = 1;
1746
1747                 cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
1748
1749                 if (!(enqflags & CEF_MUST))
1750                         /* try to convert this lock to a lockless lock */
1751                         osc_lock_to_lockless(env, clk, (enqflags & CEF_NEVER));
1752                 if (clk->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
1753                         clk->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1754
1755                 LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags "LPX64"\n",
1756                                 lock, clk, clk->ols_flags);
1757
1758                 result = 0;
1759         } else
1760                 result = -ENOMEM;
1761         return result;
1762 }
1763
1764 /** @} osc */