Whamcloud - gitweb
LU-3321 clio: collapse layer of cl_page
[fs/lustre-release.git] / lustre / osc / osc_lock.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for OSC layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  *   Author: Jinshan Xiong <jinshan.xiong@intel.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_OSC
43
44 #ifdef __KERNEL__
45 # include <libcfs/libcfs.h>
46 #else
47 # include <liblustre.h>
48 #endif
49 /* fid_build_reg_res_name() */
50 #include <lustre_fid.h>
51
52 #include "osc_cl_internal.h"
53
54 /** \addtogroup osc 
55  *  @{ 
56  */
57
58 #define _PAGEREF_MAGIC  (-10000000)
59
60 /*****************************************************************************
61  *
62  * Type conversions.
63  *
64  */
65
66 static const struct cl_lock_operations osc_lock_ops;
67 static const struct cl_lock_operations osc_lock_lockless_ops;
68 static void osc_lock_to_lockless(const struct lu_env *env,
69                                  struct osc_lock *ols, int force);
70 static int osc_lock_has_pages(struct osc_lock *olck);
71
72 int osc_lock_is_lockless(const struct osc_lock *olck)
73 {
74         return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
75 }
76
77 /**
78  * Returns a weak pointer to the ldlm lock identified by a handle. Returned
79  * pointer cannot be dereferenced, as lock is not protected from concurrent
80  * reclaim. This function is a helper for osc_lock_invariant().
81  */
82 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
83 {
84         struct ldlm_lock *lock;
85
86         lock = ldlm_handle2lock(handle);
87         if (lock != NULL)
88                 LDLM_LOCK_PUT(lock);
89         return lock;
90 }
91
92 /**
93  * Invariant that has to be true all of the time.
94  */
95 static int osc_lock_invariant(struct osc_lock *ols)
96 {
97         struct ldlm_lock *lock        = osc_handle_ptr(&ols->ols_handle);
98         struct ldlm_lock *olock       = ols->ols_lock;
99         int               handle_used = lustre_handle_is_used(&ols->ols_handle);
100
101         if (ergo(osc_lock_is_lockless(ols),
102                  ols->ols_locklessable && ols->ols_lock == NULL))
103                 return 1;
104
105         /*
106          * If all the following "ergo"s are true, return 1, otherwise 0
107          */
108         if (! ergo(olock != NULL, handle_used))
109                 return 0;
110
111         if (! ergo(olock != NULL,
112                    olock->l_handle.h_cookie == ols->ols_handle.cookie))
113                 return 0;
114
115         if (! ergo(handle_used,
116                    ergo(lock != NULL && olock != NULL, lock == olock) &&
117                    ergo(lock == NULL, olock == NULL)))
118                 return 0;
119         /*
120          * Check that ->ols_handle and ->ols_lock are consistent, but
121          * take into account that they are set at the different time.
122          */
123         if (! ergo(ols->ols_state == OLS_CANCELLED,
124                    olock == NULL && !handle_used))
125                 return 0;
126         /*
127          * DLM lock is destroyed only after we have seen cancellation
128          * ast.
129          */
130         if (! ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
131                    !ldlm_is_destroyed(olock)))
132                 return 0;
133
134         if (! ergo(ols->ols_state == OLS_GRANTED,
135                    olock != NULL &&
136                    olock->l_req_mode == olock->l_granted_mode &&
137                    ols->ols_hold))
138                 return 0;
139         return 1;
140 }
141
142 /*****************************************************************************
143  *
144  * Lock operations.
145  *
146  */
147
148 /**
149  * Breaks a link between osc_lock and dlm_lock.
150  */
151 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
152 {
153         struct ldlm_lock *dlmlock;
154
155         spin_lock(&osc_ast_guard);
156         dlmlock = olck->ols_lock;
157         if (dlmlock == NULL) {
158                 spin_unlock(&osc_ast_guard);
159                 return;
160         }
161
162         olck->ols_lock = NULL;
163         /* wb(); --- for all who checks (ols->ols_lock != NULL) before
164          * call to osc_lock_detach() */
165         dlmlock->l_ast_data = NULL;
166         olck->ols_handle.cookie = 0ULL;
167         spin_unlock(&osc_ast_guard);
168
169         lock_res_and_lock(dlmlock);
170         if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
171                 struct cl_object *obj = olck->ols_cl.cls_obj;
172                 struct cl_attr *attr  = &osc_env_info(env)->oti_attr;
173                 __u64 old_kms;
174
175                 cl_object_attr_lock(obj);
176                 /* Must get the value under the lock to avoid possible races. */
177                 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
178                 /* Update the kms. Need to loop all granted locks.
179                  * Not a problem for the client */
180                 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
181
182                 cl_object_attr_set(env, obj, attr, CAT_KMS);
183                 cl_object_attr_unlock(obj);
184         }
185         unlock_res_and_lock(dlmlock);
186
187         /* release a reference taken in osc_lock_upcall0(). */
188         LASSERT(olck->ols_has_ref);
189         lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
190         LDLM_LOCK_RELEASE(dlmlock);
191         olck->ols_has_ref = 0;
192 }
193
194 static int osc_lock_unhold(struct osc_lock *ols)
195 {
196         int result = 0;
197
198         if (ols->ols_hold) {
199                 ols->ols_hold = 0;
200                 result = osc_cancel_base(&ols->ols_handle,
201                                          ols->ols_einfo.ei_mode);
202         }
203         return result;
204 }
205
206 static int osc_lock_unuse(const struct lu_env *env,
207                           const struct cl_lock_slice *slice)
208 {
209         struct osc_lock *ols = cl2osc_lock(slice);
210
211         LINVRNT(osc_lock_invariant(ols));
212
213         switch (ols->ols_state) {
214         case OLS_NEW:
215                 LASSERT(!ols->ols_hold);
216                 LASSERT(ols->ols_agl);
217                 return 0;
218         case OLS_UPCALL_RECEIVED:
219                 osc_lock_unhold(ols);
220         case OLS_ENQUEUED:
221                 LASSERT(!ols->ols_hold);
222                 osc_lock_detach(env, ols);
223                 ols->ols_state = OLS_NEW;
224                 return 0;
225         case OLS_GRANTED:
226                 LASSERT(!ols->ols_glimpse);
227                 LASSERT(ols->ols_hold);
228                 /*
229                  * Move lock into OLS_RELEASED state before calling
230                  * osc_cancel_base() so that possible synchronous cancellation
231                  * (that always happens e.g., for liblustre) sees that lock is
232                  * released.
233                  */
234                 ols->ols_state = OLS_RELEASED;
235                 return osc_lock_unhold(ols);
236         default:
237                 CERROR("Impossible state: %d\n", ols->ols_state);
238                 LBUG();
239         }
240 }
241
242 static void osc_lock_fini(const struct lu_env *env,
243                           struct cl_lock_slice *slice)
244 {
245         struct osc_lock  *ols = cl2osc_lock(slice);
246
247         LINVRNT(osc_lock_invariant(ols));
248         /*
249          * ->ols_hold can still be true at this point if, for example, a
250          * thread that requested a lock was killed (and released a reference
251          * to the lock), before reply from a server was received. In this case
252          * lock is destroyed immediately after upcall.
253          */
254         osc_lock_unhold(ols);
255         LASSERT(ols->ols_lock == NULL);
256         LASSERT(cfs_atomic_read(&ols->ols_pageref) == 0 ||
257                 cfs_atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
258
259         OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
260 }
261
262 static void osc_lock_build_policy(const struct lu_env *env,
263                                   const struct cl_lock *lock,
264                                   ldlm_policy_data_t *policy)
265 {
266         const struct cl_lock_descr *d = &lock->cll_descr;
267
268         osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
269         policy->l_extent.gid = d->cld_gid;
270 }
271
272 static __u64 osc_enq2ldlm_flags(__u32 enqflags)
273 {
274         __u64 result = 0;
275
276         LASSERT((enqflags & ~CEF_MASK) == 0);
277
278         if (enqflags & CEF_NONBLOCK)
279                 result |= LDLM_FL_BLOCK_NOWAIT;
280         if (enqflags & CEF_ASYNC)
281                 result |= LDLM_FL_HAS_INTENT;
282         if (enqflags & CEF_DISCARD_DATA)
283                 result |= LDLM_FL_AST_DISCARD_DATA;
284         return result;
285 }
286
287 /**
288  * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
289  * pointers. Initialized in osc_init().
290  */
291 spinlock_t osc_ast_guard;
292
293 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
294 {
295         struct osc_lock *olck;
296
297         lock_res_and_lock(dlm_lock);
298         spin_lock(&osc_ast_guard);
299         olck = dlm_lock->l_ast_data;
300         if (olck != NULL) {
301                 struct cl_lock *lock = olck->ols_cl.cls_lock;
302                 /*
303                  * If osc_lock holds a reference on ldlm lock, return it even
304                  * when cl_lock is in CLS_FREEING state. This way
305                  *
306                  *         osc_ast_data_get(dlmlock) == NULL
307                  *
308                  * guarantees that all osc references on dlmlock were
309                  * released. osc_dlm_blocking_ast0() relies on that.
310                  */
311                 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
312                         cl_lock_get_trust(lock);
313                         lu_ref_add_atomic(&lock->cll_reference,
314                                           "ast", current);
315                 } else
316                         olck = NULL;
317         }
318         spin_unlock(&osc_ast_guard);
319         unlock_res_and_lock(dlm_lock);
320         return olck;
321 }
322
323 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
324 {
325         struct cl_lock *lock;
326
327         lock = olck->ols_cl.cls_lock;
328         lu_ref_del(&lock->cll_reference, "ast", current);
329         cl_lock_put(env, lock);
330 }
331
332 /**
333  * Updates object attributes from a lock value block (lvb) received together
334  * with the DLM lock reply from the server. Copy of osc_update_enqueue()
335  * logic.
336  *
337  * This can be optimized to not update attributes when lock is a result of a
338  * local match.
339  *
340  * Called under lock and resource spin-locks.
341  */
342 static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
343                                 int rc)
344 {
345         struct ost_lvb    *lvb;
346         struct cl_object  *obj;
347         struct lov_oinfo  *oinfo;
348         struct cl_attr    *attr;
349         unsigned           valid;
350
351         ENTRY;
352
353         if (!(olck->ols_flags & LDLM_FL_LVB_READY))
354                 RETURN_EXIT;
355
356         lvb   = &olck->ols_lvb;
357         obj   = olck->ols_cl.cls_obj;
358         oinfo = cl2osc(obj)->oo_oinfo;
359         attr  = &osc_env_info(env)->oti_attr;
360         valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
361         cl_lvb2attr(attr, lvb);
362
363         cl_object_attr_lock(obj);
364         if (rc == 0) {
365                 struct ldlm_lock  *dlmlock;
366                 __u64 size;
367
368                 dlmlock = olck->ols_lock;
369                 LASSERT(dlmlock != NULL);
370
371                 /* re-grab LVB from a dlm lock under DLM spin-locks. */
372                 *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
373                 size = lvb->lvb_size;
374                 /* Extend KMS up to the end of this lock and no further
375                  * A lock on [x,y] means a KMS of up to y + 1 bytes! */
376                 if (size > dlmlock->l_policy_data.l_extent.end)
377                         size = dlmlock->l_policy_data.l_extent.end + 1;
378                 if (size >= oinfo->loi_kms) {
379                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
380                                    ", kms="LPU64, lvb->lvb_size, size);
381                         valid |= CAT_KMS;
382                         attr->cat_kms = size;
383                 } else {
384                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
385                                    LPU64"; leaving kms="LPU64", end="LPU64,
386                                    lvb->lvb_size, oinfo->loi_kms,
387                                    dlmlock->l_policy_data.l_extent.end);
388                 }
389                 ldlm_lock_allow_match_locked(dlmlock);
390         } else if (rc == -ENAVAIL && olck->ols_glimpse) {
391                 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
392                        " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
393         } else
394                 valid = 0;
395
396         if (valid != 0)
397                 cl_object_attr_set(env, obj, attr, valid);
398
399         cl_object_attr_unlock(obj);
400
401         EXIT;
402 }
403
404 /**
405  * Called when a lock is granted, from an upcall (when server returned a
406  * granted lock), or from completion AST, when server returned a blocked lock.
407  *
408  * Called under lock and resource spin-locks, that are released temporarily
409  * here.
410  */
411 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
412                              struct ldlm_lock *dlmlock, int rc)
413 {
414         struct ldlm_extent   *ext;
415         struct cl_lock       *lock;
416         struct cl_lock_descr *descr;
417
418         LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
419
420         ENTRY;
421         if (olck->ols_state < OLS_GRANTED) {
422                 lock  = olck->ols_cl.cls_lock;
423                 ext   = &dlmlock->l_policy_data.l_extent;
424                 descr = &osc_env_info(env)->oti_descr;
425                 descr->cld_obj = lock->cll_descr.cld_obj;
426
427                 /* XXX check that ->l_granted_mode is valid. */
428                 descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
429                 descr->cld_start = cl_index(descr->cld_obj, ext->start);
430                 descr->cld_end   = cl_index(descr->cld_obj, ext->end);
431                 descr->cld_gid   = ext->gid;
432                 /*
433                  * tell upper layers the extent of the lock that was actually
434                  * granted
435                  */
436                 olck->ols_state = OLS_GRANTED;
437                 osc_lock_lvb_update(env, olck, rc);
438
439                 /* release DLM spin-locks to allow cl_lock_{modify,signal}()
440                  * to take a semaphore on a parent lock. This is safe, because
441                  * spin-locks are needed to protect consistency of
442                  * dlmlock->l_*_mode and LVB, and we have finished processing
443                  * them. */
444                 unlock_res_and_lock(dlmlock);
445                 cl_lock_modify(env, lock, descr);
446                 cl_lock_signal(env, lock);
447                 LINVRNT(osc_lock_invariant(olck));
448                 lock_res_and_lock(dlmlock);
449         }
450         EXIT;
451 }
452
453 static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
454
455 {
456         struct ldlm_lock *dlmlock;
457
458         ENTRY;
459
460         dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
461         LASSERT(dlmlock != NULL);
462
463         lock_res_and_lock(dlmlock);
464         spin_lock(&osc_ast_guard);
465         LASSERT(dlmlock->l_ast_data == olck);
466         LASSERT(olck->ols_lock == NULL);
467         olck->ols_lock = dlmlock;
468         spin_unlock(&osc_ast_guard);
469
470         /*
471          * Lock might be not yet granted. In this case, completion ast
472          * (osc_ldlm_completion_ast()) comes later and finishes lock
473          * granting.
474          */
475         if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
476                 osc_lock_granted(env, olck, dlmlock, 0);
477         unlock_res_and_lock(dlmlock);
478
479         /*
480          * osc_enqueue_interpret() decrefs asynchronous locks, counter
481          * this.
482          */
483         ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
484         olck->ols_hold = 1;
485
486         /* lock reference taken by ldlm_handle2lock_long() is owned by
487          * osc_lock and released in osc_lock_detach() */
488         lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
489         olck->ols_has_ref = 1;
490 }
491
492 /**
493  * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
494  * received from a server, or after osc_enqueue_base() matched a local DLM
495  * lock.
496  */
497 static int osc_lock_upcall(void *cookie, int errcode)
498 {
499         struct osc_lock         *olck  = cookie;
500         struct cl_lock_slice    *slice = &olck->ols_cl;
501         struct cl_lock          *lock  = slice->cls_lock;
502         struct lu_env           *env;
503         struct cl_env_nest       nest;
504
505         ENTRY;
506         env = cl_env_nested_get(&nest);
507         if (!IS_ERR(env)) {
508                 int rc;
509
510                 cl_lock_mutex_get(env, lock);
511
512                 LASSERT(lock->cll_state >= CLS_QUEUING);
513                 if (olck->ols_state == OLS_ENQUEUED) {
514                         olck->ols_state = OLS_UPCALL_RECEIVED;
515                         rc = ldlm_error2errno(errcode);
516                 } else if (olck->ols_state == OLS_CANCELLED) {
517                         rc = -EIO;
518                 } else {
519                         CERROR("Impossible state: %d\n", olck->ols_state);
520                         LBUG();
521                 }
522                 if (rc) {
523                         struct ldlm_lock *dlmlock;
524
525                         dlmlock = ldlm_handle2lock(&olck->ols_handle);
526                         if (dlmlock != NULL) {
527                                 lock_res_and_lock(dlmlock);
528                                 spin_lock(&osc_ast_guard);
529                                 LASSERT(olck->ols_lock == NULL);
530                                 dlmlock->l_ast_data = NULL;
531                                 olck->ols_handle.cookie = 0ULL;
532                                 spin_unlock(&osc_ast_guard);
533                                 ldlm_lock_fail_match_locked(dlmlock);
534                                 unlock_res_and_lock(dlmlock);
535                                 LDLM_LOCK_PUT(dlmlock);
536                         }
537                 } else {
538                         if (olck->ols_glimpse)
539                                 olck->ols_glimpse = 0;
540                         osc_lock_upcall0(env, olck);
541                 }
542
543                 /* Error handling, some errors are tolerable. */
544                 if (olck->ols_locklessable && rc == -EUSERS) {
545                         /* This is a tolerable error, turn this lock into
546                          * lockless lock.
547                          */
548                         osc_object_set_contended(cl2osc(slice->cls_obj));
549                         LASSERT(slice->cls_ops == &osc_lock_ops);
550
551                         /* Change this lock to ldlmlock-less lock. */
552                         osc_lock_to_lockless(env, olck, 1);
553                         olck->ols_state = OLS_GRANTED;
554                         rc = 0;
555                 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
556                         osc_lock_lvb_update(env, olck, rc);
557                         cl_lock_delete(env, lock);
558                         /* Hide the error. */
559                         rc = 0;
560                 }
561
562                 if (rc == 0) {
563                         /* For AGL case, the RPC sponsor may exits the cl_lock
564                         *  processing without wait() called before related OSC
565                         *  lock upcall(). So update the lock status according
566                         *  to the enqueue result inside AGL upcall(). */
567                         if (olck->ols_agl) {
568                                 lock->cll_flags |= CLF_FROM_UPCALL;
569                                 cl_wait_try(env, lock);
570                                 lock->cll_flags &= ~CLF_FROM_UPCALL;
571                         }
572                         cl_lock_signal(env, lock);
573                         /* del user for lock upcall cookie */
574                         if (olck->ols_agl) {
575                                 if (!olck->ols_glimpse)
576                                         olck->ols_agl = 0;
577                                 cl_unuse_try(env, lock);
578                         }
579                 } else {
580                         /* del user for lock upcall cookie */
581                         if (olck->ols_agl)
582                                 cl_lock_user_del(env, lock);
583                         cl_lock_error(env, lock, rc);
584                 }
585
586                 /* release cookie reference, acquired by osc_lock_enqueue() */
587                 cl_lock_hold_release(env, lock, "upcall", lock);
588                 cl_lock_mutex_put(env, lock);
589
590                 lu_ref_del(&lock->cll_reference, "upcall", lock);
591                 /* This maybe the last reference, so must be called after
592                  * cl_lock_mutex_put(). */
593                 cl_lock_put(env, lock);
594
595                 cl_env_nested_put(&nest, env);
596         } else {
597                 /* should never happen, similar to osc_ldlm_blocking_ast(). */
598                 LBUG();
599         }
600         RETURN(errcode);
601 }
602
603 /**
604  * Core of osc_dlm_blocking_ast() logic.
605  */
606 static void osc_lock_blocking(const struct lu_env *env,
607                               struct ldlm_lock *dlmlock,
608                               struct osc_lock *olck, int blocking)
609 {
610         struct cl_lock *lock = olck->ols_cl.cls_lock;
611
612         LASSERT(olck->ols_lock == dlmlock);
613         CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
614         LASSERT(!osc_lock_is_lockless(olck));
615
616         /*
617          * Lock might be still addref-ed here, if e.g., blocking ast
618          * is sent for a failed lock.
619          */
620         osc_lock_unhold(olck);
621
622         if (blocking && olck->ols_state < OLS_BLOCKED)
623                 /*
624                  * Move osc_lock into OLS_BLOCKED before canceling the lock,
625                  * because it recursively re-enters osc_lock_blocking(), with
626                  * the state set to OLS_CANCELLED.
627                  */
628                 olck->ols_state = OLS_BLOCKED;
629         /*
630          * cancel and destroy lock at least once no matter how blocking ast is
631          * entered (see comment above osc_ldlm_blocking_ast() for use
632          * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
633          */
634         cl_lock_cancel(env, lock);
635         cl_lock_delete(env, lock);
636 }
637
638 /**
639  * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
640  * and ldlm_lock caches.
641  */
642 static int osc_dlm_blocking_ast0(const struct lu_env *env,
643                                  struct ldlm_lock *dlmlock,
644                                  void *data, int flag)
645 {
646         struct osc_lock *olck;
647         struct cl_lock  *lock;
648         int result;
649         int cancel;
650
651         LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
652
653         cancel = 0;
654         olck = osc_ast_data_get(dlmlock);
655         if (olck != NULL) {
656                 lock = olck->ols_cl.cls_lock;
657                 cl_lock_mutex_get(env, lock);
658                 LINVRNT(osc_lock_invariant(olck));
659                 if (olck->ols_ast_wait) {
660                         /* wake up osc_lock_use() */
661                         cl_lock_signal(env, lock);
662                         olck->ols_ast_wait = 0;
663                 }
664                 /*
665                  * Lock might have been canceled while this thread was
666                  * sleeping for lock mutex, but olck is pinned in memory.
667                  */
668                 if (olck == dlmlock->l_ast_data) {
669                         /*
670                          * NOTE: DLM sends blocking AST's for failed locks
671                          *       (that are still in pre-OLS_GRANTED state)
672                          *       too, and they have to be canceled otherwise
673                          *       DLM lock is never destroyed and stuck in
674                          *       the memory.
675                          *
676                          *       Alternatively, ldlm_cli_cancel() can be
677                          *       called here directly for osc_locks with
678                          *       ols_state < OLS_GRANTED to maintain an
679                          *       invariant that ->clo_cancel() is only called
680                          *       for locks that were granted.
681                          */
682                         LASSERT(data == olck);
683                         osc_lock_blocking(env, dlmlock,
684                                           olck, flag == LDLM_CB_BLOCKING);
685                 } else
686                         cancel = 1;
687                 cl_lock_mutex_put(env, lock);
688                 osc_ast_data_put(env, olck);
689         } else
690                 /*
691                  * DLM lock exists, but there is no cl_lock attached to it.
692                  * This is a `normal' race. cl_object and its cl_lock's can be
693                  * removed by memory pressure, together with all pages.
694                  */
695                 cancel = (flag == LDLM_CB_BLOCKING);
696
697         if (cancel) {
698                 struct lustre_handle *lockh;
699
700                 lockh = &osc_env_info(env)->oti_handle;
701                 ldlm_lock2handle(dlmlock, lockh);
702                 result = ldlm_cli_cancel(lockh, LCF_ASYNC);
703         } else
704                 result = 0;
705         return result;
706 }
707
708 /**
709  * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
710  * some other lock, or is canceled. This function is installed as a
711  * ldlm_lock::l_blocking_ast() for client extent locks.
712  *
713  * Control flow is tricky, because ldlm uses the same call-back
714  * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
715  *
716  * \param dlmlock lock for which ast occurred.
717  *
718  * \param new description of a conflicting lock in case of blocking ast.
719  *
720  * \param data value of dlmlock->l_ast_data
721  *
722  * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
723  *             cancellation and blocking ast's.
724  *
725  * Possible use cases:
726  *
727  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
728  *       lock due to lock lru pressure, or explicit user request to purge
729  *       locks.
730  *
731  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
732  *       us that dlmlock conflicts with another lock that some client is
733  *       enqueing. Lock is canceled.
734  *
735  *           - cl_lock_cancel() is called. osc_lock_cancel() calls
736  *             ldlm_cli_cancel() that calls
737  *
738  *                  dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
739  *
740  *             recursively entering osc_ldlm_blocking_ast().
741  *
742  *     - client cancels lock voluntary (e.g., as a part of early cancellation):
743  *
744  *           cl_lock_cancel()->
745  *             osc_lock_cancel()->
746  *               ldlm_cli_cancel()->
747  *                 dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
748  *
749  */
750 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
751                                  struct ldlm_lock_desc *new, void *data,
752                                  int flag)
753 {
754         struct lu_env     *env;
755         struct cl_env_nest nest;
756         int                result;
757
758         /*
759          * This can be called in the context of outer IO, e.g.,
760          *
761          *     cl_enqueue()->...
762          *       ->osc_enqueue_base()->...
763          *         ->ldlm_prep_elc_req()->...
764          *           ->ldlm_cancel_callback()->...
765          *             ->osc_ldlm_blocking_ast()
766          *
767          * new environment has to be created to not corrupt outer context.
768          */
769         env = cl_env_nested_get(&nest);
770         if (!IS_ERR(env)) {
771                 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
772                 cl_env_nested_put(&nest, env);
773         } else {
774                 result = PTR_ERR(env);
775                 /*
776                  * XXX This should never happen, as cl_lock is
777                  * stuck. Pre-allocated environment a la vvp_inode_fini_env
778                  * should be used.
779                  */
780                 LBUG();
781         }
782         if (result != 0) {
783                 if (result == -ENODATA)
784                         result = 0;
785                 else
786                         CERROR("BAST failed: %d\n", result);
787         }
788         return result;
789 }
790
791 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
792                                    __u64 flags, void *data)
793 {
794         struct cl_env_nest nest;
795         struct lu_env     *env;
796         struct osc_lock   *olck;
797         struct cl_lock    *lock;
798         int result;
799         int dlmrc;
800
801         /* first, do dlm part of the work */
802         dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
803         /* then, notify cl_lock */
804         env = cl_env_nested_get(&nest);
805         if (!IS_ERR(env)) {
806                 olck = osc_ast_data_get(dlmlock);
807                 if (olck != NULL) {
808                         lock = olck->ols_cl.cls_lock;
809                         cl_lock_mutex_get(env, lock);
810                         /*
811                          * ldlm_handle_cp_callback() copied LVB from request
812                          * to lock->l_lvb_data, store it in osc_lock.
813                          */
814                         LASSERT(dlmlock->l_lvb_data != NULL);
815                         lock_res_and_lock(dlmlock);
816                         olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
817                         if (olck->ols_lock == NULL) {
818                                 /*
819                                  * upcall (osc_lock_upcall()) hasn't yet been
820                                  * called. Do nothing now, upcall will bind
821                                  * olck to dlmlock and signal the waiters.
822                                  *
823                                  * This maintains an invariant that osc_lock
824                                  * and ldlm_lock are always bound when
825                                  * osc_lock is in OLS_GRANTED state.
826                                  */
827                         } else if (dlmlock->l_granted_mode ==
828                                    dlmlock->l_req_mode) {
829                                 osc_lock_granted(env, olck, dlmlock, dlmrc);
830                         }
831                         unlock_res_and_lock(dlmlock);
832
833                         if (dlmrc != 0) {
834                                 CL_LOCK_DEBUG(D_ERROR, env, lock,
835                                               "dlmlock returned %d\n", dlmrc);
836                                 cl_lock_error(env, lock, dlmrc);
837                         }
838                         cl_lock_mutex_put(env, lock);
839                         osc_ast_data_put(env, olck);
840                         result = 0;
841                 } else
842                         result = -ELDLM_NO_LOCK_DATA;
843                 cl_env_nested_put(&nest, env);
844         } else
845                 result = PTR_ERR(env);
846         return dlmrc ?: result;
847 }
848
849 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
850 {
851         struct ptlrpc_request  *req  = data;
852         struct osc_lock        *olck;
853         struct cl_lock         *lock;
854         struct cl_object       *obj;
855         struct cl_env_nest      nest;
856         struct lu_env          *env;
857         struct ost_lvb         *lvb;
858         struct req_capsule     *cap;
859         int                     result;
860
861         LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
862
863         env = cl_env_nested_get(&nest);
864         if (!IS_ERR(env)) {
865                 /* osc_ast_data_get() has to go after environment is
866                  * allocated, because osc_ast_data() acquires a
867                  * reference to a lock, and it can only be released in
868                  * environment.
869                  */
870                 olck = osc_ast_data_get(dlmlock);
871                 if (olck != NULL) {
872                         lock = olck->ols_cl.cls_lock;
873                         /* Do not grab the mutex of cl_lock for glimpse.
874                          * See LU-1274 for details.
875                          * BTW, it's okay for cl_lock to be cancelled during
876                          * this period because server can handle this race.
877                          * See ldlm_server_glimpse_ast() for details.
878                          * cl_lock_mutex_get(env, lock); */
879                         cap = &req->rq_pill;
880                         req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
881                         req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
882                                              sizeof *lvb);
883                         result = req_capsule_server_pack(cap);
884                         if (result == 0) {
885                                 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
886                                 obj = lock->cll_descr.cld_obj;
887                                 result = cl_object_glimpse(env, obj, lvb);
888                         }
889                         if (!exp_connect_lvb_type(req->rq_export))
890                                 req_capsule_shrink(&req->rq_pill,
891                                                    &RMF_DLM_LVB,
892                                                    sizeof(struct ost_lvb_v1),
893                                                    RCL_SERVER);
894                         osc_ast_data_put(env, olck);
895                 } else {
896                         /*
897                          * These errors are normal races, so we don't want to
898                          * fill the console with messages by calling
899                          * ptlrpc_error()
900                          */
901                         lustre_pack_reply(req, 1, NULL, NULL);
902                         result = -ELDLM_NO_LOCK_DATA;
903                 }
904                 cl_env_nested_put(&nest, env);
905         } else
906                 result = PTR_ERR(env);
907         req->rq_status = result;
908         return result;
909 }
910
911 static unsigned long osc_lock_weigh(const struct lu_env *env,
912                                     const struct cl_lock_slice *slice)
913 {
914         /* TODO: check how many pages are covered by this lock */
915         return cl2osc(slice->cls_obj)->oo_npages;
916 }
917
918 static void osc_lock_build_einfo(const struct lu_env *env,
919                                  const struct cl_lock *clock,
920                                  struct osc_lock *lock,
921                                  struct ldlm_enqueue_info *einfo)
922 {
923         enum cl_lock_mode mode;
924
925         mode = clock->cll_descr.cld_mode;
926         if (mode == CLM_PHANTOM)
927                 /*
928                  * For now, enqueue all glimpse locks in read mode. In the
929                  * future, client might choose to enqueue LCK_PW lock for
930                  * glimpse on a file opened for write.
931                  */
932                 mode = CLM_READ;
933
934         einfo->ei_type   = LDLM_EXTENT;
935         einfo->ei_mode   = osc_cl_lock2ldlm(mode);
936         einfo->ei_cb_bl  = osc_ldlm_blocking_ast;
937         einfo->ei_cb_cp  = osc_ldlm_completion_ast;
938         einfo->ei_cb_gl  = osc_ldlm_glimpse_ast;
939         einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
940 }
941
942 /**
943  * Determine if the lock should be converted into a lockless lock.
944  *
945  * Steps to check:
946  * - if the lock has an explicite requirment for a non-lockless lock;
947  * - if the io lock request type ci_lockreq;
948  * - send the enqueue rpc to ost to make the further decision;
949  * - special treat to truncate lockless lock
950  *
951  *  Additional policy can be implemented here, e.g., never do lockless-io
952  *  for large extents.
953  */
954 static void osc_lock_to_lockless(const struct lu_env *env,
955                                  struct osc_lock *ols, int force)
956 {
957         struct cl_lock_slice *slice = &ols->ols_cl;
958
959         LASSERT(ols->ols_state == OLS_NEW ||
960                 ols->ols_state == OLS_UPCALL_RECEIVED);
961
962         if (force) {
963                 ols->ols_locklessable = 1;
964                 slice->cls_ops = &osc_lock_lockless_ops;
965         } else {
966                 struct osc_io *oio     = osc_env_io(env);
967                 struct cl_io  *io      = oio->oi_cl.cis_io;
968                 struct cl_object *obj  = slice->cls_obj;
969                 struct osc_object *oob = cl2osc(obj);
970                 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
971                 struct obd_connect_data *ocd;
972
973                 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
974                         io->ci_lockreq == CILR_MAYBE ||
975                         io->ci_lockreq == CILR_NEVER);
976
977                 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
978                 ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
979                                 (io->ci_lockreq == CILR_MAYBE) &&
980                                 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
981                 if (io->ci_lockreq == CILR_NEVER ||
982                         /* lockless IO */
983                     (ols->ols_locklessable && osc_object_is_contended(oob)) ||
984                         /* lockless truncate */
985                     (cl_io_is_trunc(io) &&
986                      (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
987                       osd->od_lockless_truncate)) {
988                         ols->ols_locklessable = 1;
989                         slice->cls_ops = &osc_lock_lockless_ops;
990                 }
991         }
992         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
993 }
994
995 static int osc_lock_compatible(const struct osc_lock *qing,
996                                const struct osc_lock *qed)
997 {
998         enum cl_lock_mode qing_mode;
999         enum cl_lock_mode qed_mode;
1000
1001         qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode;
1002         if (qed->ols_glimpse &&
1003             (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ))
1004                 return 1;
1005
1006         qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode;
1007         return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
1008 }
1009
1010 /**
1011  * Cancel all conflicting locks and wait for them to be destroyed.
1012  *
1013  * This function is used for two purposes:
1014  *
1015  *     - early cancel all conflicting locks before starting IO, and
1016  *
1017  *     - guarantee that pages added to the page cache by lockless IO are never
1018  *       covered by locks other than lockless IO lock, and, hence, are not
1019  *       visible to other threads.
1020  */
1021 static int osc_lock_enqueue_wait(const struct lu_env *env,
1022                                  const struct osc_lock *olck)
1023 {
1024         struct cl_lock          *lock    = olck->ols_cl.cls_lock;
1025         struct cl_lock_descr    *descr   = &lock->cll_descr;
1026         struct cl_object_header *hdr     = cl_object_header(descr->cld_obj);
1027         struct cl_lock          *scan;
1028         struct cl_lock          *conflict= NULL;
1029         int lockless                     = osc_lock_is_lockless(olck);
1030         int rc                           = 0;
1031         ENTRY;
1032
1033         LASSERT(cl_lock_is_mutexed(lock));
1034
1035         /* make it enqueue anyway for glimpse lock, because we actually
1036          * don't need to cancel any conflicting locks. */
1037         if (olck->ols_glimpse)
1038                 return 0;
1039
1040         spin_lock(&hdr->coh_lock_guard);
1041         cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
1042                 struct cl_lock_descr *cld = &scan->cll_descr;
1043                 const struct osc_lock *scan_ols;
1044
1045                 if (scan == lock)
1046                         break;
1047
1048                 if (scan->cll_state < CLS_QUEUING ||
1049                     scan->cll_state == CLS_FREEING ||
1050                     cld->cld_start > descr->cld_end ||
1051                     cld->cld_end < descr->cld_start)
1052                         continue;
1053
1054                 /* overlapped and living locks. */
1055
1056                 /* We're not supposed to give up group lock. */
1057                 if (scan->cll_descr.cld_mode == CLM_GROUP) {
1058                         LASSERT(descr->cld_mode != CLM_GROUP ||
1059                                 descr->cld_gid != scan->cll_descr.cld_gid);
1060                         continue;
1061                 }
1062
1063                 scan_ols = osc_lock_at(scan);
1064
1065                 /* We need to cancel the compatible locks if we're enqueuing
1066                  * a lockless lock, for example:
1067                  * imagine that client has PR lock on [0, 1000], and thread T0
1068                  * is doing lockless IO in [500, 1500] region. Concurrent
1069                  * thread T1 can see lockless data in [500, 1000], which is
1070                  * wrong, because these data are possibly stale. */
1071                 if (!lockless && osc_lock_compatible(olck, scan_ols))
1072                         continue;
1073
1074                 cl_lock_get_trust(scan);
1075                 conflict = scan;
1076                 break;
1077         }
1078         spin_unlock(&hdr->coh_lock_guard);
1079
1080         if (conflict) {
1081                 if (lock->cll_descr.cld_mode == CLM_GROUP) {
1082                         /* we want a group lock but a previous lock request
1083                          * conflicts, we do not wait but return 0 so the
1084                          * request is send to the server
1085                          */
1086                         CDEBUG(D_DLMTRACE, "group lock %p is conflicted "
1087                                            "with %p, no wait, send to server\n",
1088                                lock, conflict);
1089                         cl_lock_put(env, conflict);
1090                         rc = 0;
1091                 } else {
1092                         CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, "
1093                                            "will wait\n",
1094                                lock, conflict);
1095                         LASSERT(lock->cll_conflict == NULL);
1096                         lu_ref_add(&conflict->cll_reference, "cancel-wait",
1097                                    lock);
1098                         lock->cll_conflict = conflict;
1099                         rc = CLO_WAIT;
1100                 }
1101         }
1102         RETURN(rc);
1103 }
1104
1105 /**
1106  * Implementation of cl_lock_operations::clo_enqueue() method for osc
1107  * layer. This initiates ldlm enqueue:
1108  *
1109  *     - cancels conflicting locks early (osc_lock_enqueue_wait());
1110  *
1111  *     - calls osc_enqueue_base() to do actual enqueue.
1112  *
1113  * osc_enqueue_base() is supplied with an upcall function that is executed
1114  * when lock is received either after a local cached ldlm lock is matched, or
1115  * when a reply from the server is received.
1116  *
1117  * This function does not wait for the network communication to complete.
1118  */
1119 static int osc_lock_enqueue(const struct lu_env *env,
1120                             const struct cl_lock_slice *slice,
1121                             struct cl_io *unused, __u32 enqflags)
1122 {
1123         struct osc_lock          *ols     = cl2osc_lock(slice);
1124         struct cl_lock           *lock    = ols->ols_cl.cls_lock;
1125         int result;
1126         ENTRY;
1127
1128         LASSERT(cl_lock_is_mutexed(lock));
1129         LASSERTF(ols->ols_state == OLS_NEW,
1130                  "Impossible state: %d\n", ols->ols_state);
1131
1132         LASSERTF(ergo(ols->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
1133                 "lock = %p, ols = %p\n", lock, ols);
1134
1135         result = osc_lock_enqueue_wait(env, ols);
1136         if (result == 0) {
1137                 if (!osc_lock_is_lockless(ols)) {
1138                         struct osc_object        *obj = cl2osc(slice->cls_obj);
1139                         struct osc_thread_info   *info = osc_env_info(env);
1140                         struct ldlm_res_id       *resname = &info->oti_resname;
1141                         ldlm_policy_data_t       *policy = &info->oti_policy;
1142                         struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
1143
1144                         /* lock will be passed as upcall cookie,
1145                          * hold ref to prevent to be released. */
1146                         cl_lock_hold_add(env, lock, "upcall", lock);
1147                         /* a user for agl lock also */
1148                         if (ols->ols_agl)
1149                                 cl_lock_user_add(env, lock);
1150                         ols->ols_state = OLS_ENQUEUED;
1151
1152                         /*
1153                          * XXX: this is possible blocking point as
1154                          * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
1155                          * LDLM_CP_CALLBACK.
1156                          */
1157                         ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname);
1158                         osc_lock_build_policy(env, lock, policy);
1159                         result = osc_enqueue_base(osc_export(obj), resname,
1160                                           &ols->ols_flags, policy,
1161                                           &ols->ols_lvb,
1162                                           obj->oo_oinfo->loi_kms_valid,
1163                                           osc_lock_upcall,
1164                                           ols, einfo, &ols->ols_handle,
1165                                           PTLRPCD_SET, 1, ols->ols_agl);
1166                         if (result != 0) {
1167                                 if (ols->ols_agl)
1168                                         cl_lock_user_del(env, lock);
1169                                 cl_lock_unhold(env, lock, "upcall", lock);
1170                                 if (unlikely(result == -ECANCELED)) {
1171                                         ols->ols_state = OLS_NEW;
1172                                         result = 0;
1173                                 }
1174                         }
1175                 } else {
1176                         ols->ols_state = OLS_GRANTED;
1177                         ols->ols_owner = osc_env_io(env);
1178                 }
1179         }
1180         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1181         RETURN(result);
1182 }
1183
1184 static int osc_lock_wait(const struct lu_env *env,
1185                          const struct cl_lock_slice *slice)
1186 {
1187         struct osc_lock *olck = cl2osc_lock(slice);
1188         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1189
1190         LINVRNT(osc_lock_invariant(olck));
1191
1192         if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED) {
1193                 if (olck->ols_flags & LDLM_FL_LVB_READY) {
1194                         return 0;
1195                 } else if (olck->ols_agl) {
1196                         if (lock->cll_flags & CLF_FROM_UPCALL)
1197                                 /* It is from enqueue RPC reply upcall for
1198                                  * updating state. Do not re-enqueue. */
1199                                 return -ENAVAIL;
1200                         else
1201                                 olck->ols_state = OLS_NEW;
1202                 } else {
1203                         LASSERT(lock->cll_error);
1204                         return lock->cll_error;
1205                 }
1206         }
1207
1208         if (olck->ols_state == OLS_NEW) {
1209                 int rc;
1210
1211                 LASSERT(olck->ols_agl);
1212                 olck->ols_agl = 0;
1213                 rc = osc_lock_enqueue(env, slice, NULL, CEF_ASYNC | CEF_MUST);
1214                 if (rc != 0)
1215                         return rc;
1216                 else
1217                         return CLO_REENQUEUED;
1218         }
1219
1220         LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
1221                      lock->cll_error == 0, olck->ols_lock != NULL));
1222
1223         return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
1224 }
1225
1226 /**
1227  * An implementation of cl_lock_operations::clo_use() method that pins cached
1228  * lock.
1229  */
1230 static int osc_lock_use(const struct lu_env *env,
1231                         const struct cl_lock_slice *slice)
1232 {
1233         struct osc_lock *olck = cl2osc_lock(slice);
1234         int rc;
1235
1236         LASSERT(!olck->ols_hold);
1237
1238         /*
1239          * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
1240          * flag is not set. This protects us from a concurrent blocking ast.
1241          */
1242         rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1243         if (rc == 0) {
1244                 olck->ols_hold = 1;
1245                 olck->ols_state = OLS_GRANTED;
1246         } else {
1247                 struct cl_lock *lock;
1248
1249                 /*
1250                  * Lock is being cancelled somewhere within
1251                  * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
1252                  * set, but osc_ldlm_blocking_ast() hasn't yet acquired
1253                  * cl_lock mutex.
1254                  */
1255                 lock = slice->cls_lock;
1256                 LASSERT(lock->cll_state == CLS_INTRANSIT);
1257                 LASSERT(lock->cll_users > 0);
1258                 /* set a flag for osc_dlm_blocking_ast0() to signal the
1259                  * lock.*/
1260                 olck->ols_ast_wait = 1;
1261                 rc = CLO_WAIT;
1262         }
1263         return rc;
1264 }
1265
1266 static int osc_lock_flush(struct osc_lock *ols, int discard)
1267 {
1268         struct cl_lock       *lock  = ols->ols_cl.cls_lock;
1269         struct cl_env_nest    nest;
1270         struct lu_env        *env;
1271         int result = 0;
1272         ENTRY;
1273
1274         env = cl_env_nested_get(&nest);
1275         if (!IS_ERR(env)) {
1276                 struct osc_object    *obj   = cl2osc(ols->ols_cl.cls_obj);
1277                 struct cl_lock_descr *descr = &lock->cll_descr;
1278                 int rc = 0;
1279
1280                 if (descr->cld_mode >= CLM_WRITE) {
1281                         result = osc_cache_writeback_range(env, obj,
1282                                         descr->cld_start, descr->cld_end,
1283                                         1, discard);
1284                         LDLM_DEBUG(ols->ols_lock,
1285                                 "lock %p: %d pages were %s.\n", lock, result,
1286                                 discard ? "discarded" : "written");
1287                         if (result > 0)
1288                                 result = 0;
1289                 }
1290
1291                 rc = osc_lock_discard_pages(env, ols);
1292                 if (result == 0 && rc < 0)
1293                         result = rc;
1294
1295                 cl_env_nested_put(&nest, env);
1296         } else
1297                 result = PTR_ERR(env);
1298         if (result == 0) {
1299                 ols->ols_flush = 1;
1300                 LINVRNT(!osc_lock_has_pages(ols));
1301         }
1302         RETURN(result);
1303 }
1304
1305 /**
1306  * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1307  * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1308  * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1309  * with some other lock some where in the cluster. This function does the
1310  * following:
1311  *
1312  *     - invalidates all pages protected by this lock (after sending dirty
1313  *       ones to the server, as necessary);
1314  *
1315  *     - decref's underlying ldlm lock;
1316  *
1317  *     - cancels ldlm lock (ldlm_cli_cancel()).
1318  */
1319 static void osc_lock_cancel(const struct lu_env *env,
1320                             const struct cl_lock_slice *slice)
1321 {
1322         struct cl_lock   *lock    = slice->cls_lock;
1323         struct osc_lock  *olck    = cl2osc_lock(slice);
1324         struct ldlm_lock *dlmlock = olck->ols_lock;
1325
1326         LASSERT(cl_lock_is_mutexed(lock));
1327         LINVRNT(osc_lock_invariant(olck));
1328
1329         if (dlmlock != NULL) {
1330                 bool do_cancel;
1331                 int  result = 0;
1332
1333                 if (olck->ols_state >= OLS_GRANTED)
1334                         result = osc_lock_flush(olck,
1335                                 ldlm_is_discard_data(dlmlock));
1336                 osc_lock_unhold(olck);
1337
1338                 lock_res_and_lock(dlmlock);
1339                 /* Now that we're the only user of dlm read/write reference,
1340                  * mostly the ->l_readers + ->l_writers should be zero.
1341                  * However, there is a corner case.
1342                  * See b=18829 for details.*/
1343                 do_cancel = (dlmlock->l_readers == 0 &&
1344                              dlmlock->l_writers == 0);
1345                 ldlm_set_cbpending(dlmlock);
1346                 unlock_res_and_lock(dlmlock);
1347                 if (do_cancel)
1348                         result = ldlm_cli_cancel(&olck->ols_handle, LCF_ASYNC);
1349                 if (result < 0)
1350                         CL_LOCK_DEBUG(D_ERROR, env, lock,
1351                                       "lock %p cancel failure with error(%d)\n",
1352                                       lock, result);
1353         }
1354         olck->ols_state = OLS_CANCELLED;
1355         olck->ols_flags &= ~LDLM_FL_LVB_READY;
1356         osc_lock_detach(env, olck);
1357 }
1358
1359 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
1360 static int check_cb(const struct lu_env *env, struct cl_io *io,
1361                     struct osc_page *ops, void *cbdata)
1362 {
1363         struct cl_lock *lock = cbdata;
1364
1365         if (lock->cll_descr.cld_mode == CLM_READ) {
1366                 struct cl_lock *tmp;
1367                 tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj,
1368                                        osc_index(ops), lock, 1, 0);
1369                 if (tmp != NULL) {
1370                         cl_lock_put(env, tmp);
1371                         return CLP_GANG_OKAY;
1372                 }
1373         }
1374
1375         CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
1376         CL_PAGE_DEBUG(D_ERROR, env, ops->ops_cl.cpl_page, "\n");
1377         return CLP_GANG_ABORT;
1378 }
1379
1380 /**
1381  * Returns true iff there are pages under \a olck not protected by other
1382  * locks.
1383  */
1384 static int osc_lock_has_pages(struct osc_lock *olck)
1385 {
1386         struct cl_lock       *lock;
1387         struct cl_lock_descr *descr;
1388         struct cl_object     *obj;
1389         struct osc_object    *oob;
1390         struct cl_env_nest    nest;
1391         struct cl_io         *io;
1392         struct lu_env        *env;
1393         int                   result;
1394
1395         env = cl_env_nested_get(&nest);
1396         if (IS_ERR(env))
1397                 return 0;
1398
1399         obj   = olck->ols_cl.cls_obj;
1400         oob   = cl2osc(obj);
1401         io    = &oob->oo_debug_io;
1402         lock  = olck->ols_cl.cls_lock;
1403         descr = &lock->cll_descr;
1404
1405         mutex_lock(&oob->oo_debug_mutex);
1406
1407         io->ci_obj = cl_object_top(obj);
1408         io->ci_ignore_layout = 1;
1409         cl_io_init(env, io, CIT_MISC, io->ci_obj);
1410         do {
1411                 result = osc_page_gang_lookup(env, oob, io,
1412                                               descr->cld_start, descr->cld_end,
1413                                               check_cb, (void *)lock);
1414                 if (result == CLP_GANG_ABORT)
1415                         break;
1416                 if (result == CLP_GANG_RESCHED)
1417                         cond_resched();
1418         } while (result != CLP_GANG_OKAY);
1419         cl_io_fini(env, io);
1420         mutex_unlock(&oob->oo_debug_mutex);
1421         cl_env_nested_put(&nest, env);
1422
1423         return (result == CLP_GANG_ABORT);
1424 }
1425 #else
1426 static int osc_lock_has_pages(struct osc_lock *olck)
1427 {
1428         return 0;
1429 }
1430 #endif /* CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
1431
1432 static void osc_lock_delete(const struct lu_env *env,
1433                             const struct cl_lock_slice *slice)
1434 {
1435         struct osc_lock *olck;
1436
1437         olck = cl2osc_lock(slice);
1438         if (olck->ols_glimpse) {
1439                 LASSERT(!olck->ols_hold);
1440                 LASSERT(!olck->ols_lock);
1441                 return;
1442         }
1443
1444         LINVRNT(osc_lock_invariant(olck));
1445         LINVRNT(!osc_lock_has_pages(olck));
1446
1447         osc_lock_unhold(olck);
1448         osc_lock_detach(env, olck);
1449 }
1450
1451 /**
1452  * Implements cl_lock_operations::clo_state() method for osc layer.
1453  *
1454  * Maintains osc_lock::ols_owner field.
1455  *
1456  * This assumes that lock always enters CLS_HELD (from some other state) in
1457  * the same IO context as one that requested the lock. This should not be a
1458  * problem, because context is by definition shared by all activity pertaining
1459  * to the same high-level IO.
1460  */
1461 static void osc_lock_state(const struct lu_env *env,
1462                            const struct cl_lock_slice *slice,
1463                            enum cl_lock_state state)
1464 {
1465         struct osc_lock *lock = cl2osc_lock(slice);
1466
1467         /*
1468          * XXX multiple io contexts can use the lock at the same time.
1469          */
1470         LINVRNT(osc_lock_invariant(lock));
1471         if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1472                 struct osc_io *oio = osc_env_io(env);
1473
1474                 LASSERT(lock->ols_owner == NULL);
1475                 lock->ols_owner = oio;
1476         } else if (state != CLS_HELD)
1477                 lock->ols_owner = NULL;
1478 }
1479
1480 static int osc_lock_print(const struct lu_env *env, void *cookie,
1481                           lu_printer_t p, const struct cl_lock_slice *slice)
1482 {
1483         struct osc_lock *lock = cl2osc_lock(slice);
1484
1485         /*
1486          * XXX print ldlm lock and einfo properly.
1487          */
1488         (*p)(env, cookie, "%p %#16llx "LPX64" %d %p ",
1489              lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
1490              lock->ols_state, lock->ols_owner);
1491         osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1492         return 0;
1493 }
1494
1495 static int osc_lock_fits_into(const struct lu_env *env,
1496                               const struct cl_lock_slice *slice,
1497                               const struct cl_lock_descr *need,
1498                               const struct cl_io *io)
1499 {
1500         struct osc_lock *ols = cl2osc_lock(slice);
1501
1502         if (need->cld_enq_flags & CEF_NEVER)
1503                 return 0;
1504
1505         if (ols->ols_state >= OLS_CANCELLED)
1506                 return 0;
1507
1508         if (need->cld_mode == CLM_PHANTOM) {
1509                 if (ols->ols_agl)
1510                         return !(ols->ols_state > OLS_RELEASED);
1511
1512                 /*
1513                  * Note: the QUEUED lock can't be matched here, otherwise
1514                  * it might cause the deadlocks.
1515                  * In read_process,
1516                  * P1: enqueued read lock, create sublock1
1517                  * P2: enqueued write lock, create sublock2(conflicted
1518                  *     with sublock1).
1519                  * P1: Grant read lock.
1520                  * P1: enqueued glimpse lock(with holding sublock1_read),
1521                  *     matched with sublock2, waiting sublock2 to be granted.
1522                  *     But sublock2 can not be granted, because P1
1523                  *     will not release sublock1. Bang!
1524                  */
1525                 if (ols->ols_state < OLS_GRANTED ||
1526                     ols->ols_state > OLS_RELEASED)
1527                         return 0;
1528         } else if (need->cld_enq_flags & CEF_MUST) {
1529                 /*
1530                  * If the lock hasn't ever enqueued, it can't be matched
1531                  * because enqueue process brings in many information
1532                  * which can be used to determine things such as lockless,
1533                  * CEF_MUST, etc.
1534                  */
1535                 if (ols->ols_state < OLS_UPCALL_RECEIVED &&
1536                     ols->ols_locklessable)
1537                         return 0;
1538         }
1539         return 1;
1540 }
1541
1542 static const struct cl_lock_operations osc_lock_ops = {
1543         .clo_fini    = osc_lock_fini,
1544         .clo_enqueue = osc_lock_enqueue,
1545         .clo_wait    = osc_lock_wait,
1546         .clo_unuse   = osc_lock_unuse,
1547         .clo_use     = osc_lock_use,
1548         .clo_delete  = osc_lock_delete,
1549         .clo_state   = osc_lock_state,
1550         .clo_cancel  = osc_lock_cancel,
1551         .clo_weigh   = osc_lock_weigh,
1552         .clo_print   = osc_lock_print,
1553         .clo_fits_into = osc_lock_fits_into,
1554 };
1555
1556 static int osc_lock_lockless_unuse(const struct lu_env *env,
1557                                    const struct cl_lock_slice *slice)
1558 {
1559         struct osc_lock *ols = cl2osc_lock(slice);
1560         struct cl_lock *lock = slice->cls_lock;
1561
1562         LASSERT(ols->ols_state == OLS_GRANTED);
1563         LINVRNT(osc_lock_invariant(ols));
1564
1565         cl_lock_cancel(env, lock);
1566         cl_lock_delete(env, lock);
1567         return 0;
1568 }
1569
1570 static void osc_lock_lockless_cancel(const struct lu_env *env,
1571                                      const struct cl_lock_slice *slice)
1572 {
1573         struct osc_lock   *ols  = cl2osc_lock(slice);
1574         int result;
1575
1576         result = osc_lock_flush(ols, 0);
1577         if (result)
1578                 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1579                        ols, result);
1580         ols->ols_state = OLS_CANCELLED;
1581 }
1582
1583 static int osc_lock_lockless_wait(const struct lu_env *env,
1584                                   const struct cl_lock_slice *slice)
1585 {
1586         struct osc_lock *olck = cl2osc_lock(slice);
1587         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1588
1589         LINVRNT(osc_lock_invariant(olck));
1590         LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1591
1592         return lock->cll_error;
1593 }
1594
1595 static void osc_lock_lockless_state(const struct lu_env *env,
1596                                     const struct cl_lock_slice *slice,
1597                                     enum cl_lock_state state)
1598 {
1599         struct osc_lock *lock = cl2osc_lock(slice);
1600
1601         LINVRNT(osc_lock_invariant(lock));
1602         if (state == CLS_HELD) {
1603                 struct osc_io *oio  = osc_env_io(env);
1604
1605                 LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio));
1606                 lock->ols_owner = oio;
1607
1608                 /* set the io to be lockless if this lock is for io's
1609                  * host object */
1610                 if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
1611                         oio->oi_lockless = 1;
1612         }
1613 }
1614
1615 static int osc_lock_lockless_fits_into(const struct lu_env *env,
1616                                        const struct cl_lock_slice *slice,
1617                                        const struct cl_lock_descr *need,
1618                                        const struct cl_io *io)
1619 {
1620         struct osc_lock *lock = cl2osc_lock(slice);
1621
1622         if (!(need->cld_enq_flags & CEF_NEVER))
1623                 return 0;
1624
1625         /* lockless lock should only be used by its owning io. b22147 */
1626         return (lock->ols_owner == osc_env_io(env));
1627 }
1628
1629 static const struct cl_lock_operations osc_lock_lockless_ops = {
1630         .clo_fini      = osc_lock_fini,
1631         .clo_enqueue   = osc_lock_enqueue,
1632         .clo_wait      = osc_lock_lockless_wait,
1633         .clo_unuse     = osc_lock_lockless_unuse,
1634         .clo_state     = osc_lock_lockless_state,
1635         .clo_fits_into = osc_lock_lockless_fits_into,
1636         .clo_cancel    = osc_lock_lockless_cancel,
1637         .clo_print     = osc_lock_print
1638 };
1639
1640 int osc_lock_init(const struct lu_env *env,
1641                   struct cl_object *obj, struct cl_lock *lock,
1642                   const struct cl_io *unused)
1643 {
1644         struct osc_lock *clk;
1645         int result;
1646
1647         OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, __GFP_IO);
1648         if (clk != NULL) {
1649                 __u32 enqflags = lock->cll_descr.cld_enq_flags;
1650
1651                 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
1652                 cfs_atomic_set(&clk->ols_pageref, 0);
1653                 clk->ols_state = OLS_NEW;
1654
1655                 clk->ols_flags = osc_enq2ldlm_flags(enqflags);
1656                 clk->ols_agl = !!(enqflags & CEF_AGL);
1657                 if (clk->ols_agl)
1658                         clk->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
1659                 if (clk->ols_flags & LDLM_FL_HAS_INTENT)
1660                         clk->ols_glimpse = 1;
1661
1662                 cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
1663
1664                 if (!(enqflags & CEF_MUST))
1665                         /* try to convert this lock to a lockless lock */
1666                         osc_lock_to_lockless(env, clk, (enqflags & CEF_NEVER));
1667                 if (clk->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
1668                         clk->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1669
1670                 LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n",
1671                                 lock, clk, clk->ols_flags);
1672
1673                 result = 0;
1674         } else
1675                 result = -ENOMEM;
1676         return result;
1677 }
1678
1679 int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
1680 {
1681         struct osc_lock *olock;
1682         int              rc = 0;
1683
1684         spin_lock(&osc_ast_guard);
1685         olock = dlm->l_ast_data;
1686         /*
1687          * there's a very rare race with osc_page_addref_lock(), but that
1688          * doesn't matter because in the worst case we don't cancel a lock
1689          * which we actually can, that's no harm.
1690          */
1691         if (olock != NULL &&
1692             cfs_atomic_add_return(_PAGEREF_MAGIC,
1693                                   &olock->ols_pageref) != _PAGEREF_MAGIC) {
1694                 cfs_atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
1695                 rc = 1;
1696         }
1697         spin_unlock(&osc_ast_guard);
1698         return rc;
1699 }
1700
1701 /** @} osc */