Whamcloud - gitweb
LU-2446 build: Update Whamcloud copyright messages for Intel
[fs/lustre-release.git] / lustre / osc / osc_lock.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for OSC layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_OSC
42
43 #ifdef __KERNEL__
44 # include <libcfs/libcfs.h>
45 #else
46 # include <liblustre.h>
47 #endif
48 /* fid_build_reg_res_name() */
49 #include <lustre_fid.h>
50
51 #include "osc_cl_internal.h"
52
53 /** \addtogroup osc 
54  *  @{ 
55  */
56
57 #define _PAGEREF_MAGIC  (-10000000)
58
59 /*****************************************************************************
60  *
61  * Type conversions.
62  *
63  */
64
65 static const struct cl_lock_operations osc_lock_ops;
66 static const struct cl_lock_operations osc_lock_lockless_ops;
67 static void osc_lock_to_lockless(const struct lu_env *env,
68                                  struct osc_lock *ols, int force);
69 static int osc_lock_has_pages(struct osc_lock *olck);
70
71 int osc_lock_is_lockless(const struct osc_lock *olck)
72 {
73         return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
74 }
75
76 /**
77  * Returns a weak pointer to the ldlm lock identified by a handle. Returned
78  * pointer cannot be dereferenced, as lock is not protected from concurrent
79  * reclaim. This function is a helper for osc_lock_invariant().
80  */
81 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
82 {
83         struct ldlm_lock *lock;
84
85         lock = ldlm_handle2lock(handle);
86         if (lock != NULL)
87                 LDLM_LOCK_PUT(lock);
88         return lock;
89 }
90
91 /**
92  * Invariant that has to be true all of the time.
93  */
94 static int osc_lock_invariant(struct osc_lock *ols)
95 {
96         struct ldlm_lock *lock        = osc_handle_ptr(&ols->ols_handle);
97         struct ldlm_lock *olock       = ols->ols_lock;
98         int               handle_used = lustre_handle_is_used(&ols->ols_handle);
99
100         return
101                 ergo(osc_lock_is_lockless(ols),
102                      ols->ols_locklessable && ols->ols_lock == NULL)  ||
103                 (ergo(olock != NULL, handle_used) &&
104                  ergo(olock != NULL,
105                       olock->l_handle.h_cookie == ols->ols_handle.cookie) &&
106                  /*
107                   * Check that ->ols_handle and ->ols_lock are consistent, but
108                   * take into account that they are set at the different time.
109                   */
110                  ergo(handle_used,
111                       ergo(lock != NULL && olock != NULL, lock == olock) &&
112                       ergo(lock == NULL, olock == NULL)) &&
113                  ergo(ols->ols_state == OLS_CANCELLED,
114                       olock == NULL && !handle_used) &&
115                  /*
116                   * DLM lock is destroyed only after we have seen cancellation
117                   * ast.
118                   */
119                  ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
120                       !olock->l_destroyed) &&
121                  ergo(ols->ols_state == OLS_GRANTED,
122                       olock != NULL &&
123                       olock->l_req_mode == olock->l_granted_mode &&
124                       ols->ols_hold));
125 }
126
127 /*****************************************************************************
128  *
129  * Lock operations.
130  *
131  */
132
133 /**
134  * Breaks a link between osc_lock and dlm_lock.
135  */
136 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
137 {
138         struct ldlm_lock *dlmlock;
139
140         spin_lock(&osc_ast_guard);
141         dlmlock = olck->ols_lock;
142         if (dlmlock == NULL) {
143                 spin_unlock(&osc_ast_guard);
144                 return;
145         }
146
147         olck->ols_lock = NULL;
148         /* wb(); --- for all who checks (ols->ols_lock != NULL) before
149          * call to osc_lock_detach() */
150         dlmlock->l_ast_data = NULL;
151         olck->ols_handle.cookie = 0ULL;
152         spin_unlock(&osc_ast_guard);
153
154         lock_res_and_lock(dlmlock);
155         if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
156                 struct cl_object *obj = olck->ols_cl.cls_obj;
157                 struct cl_attr *attr  = &osc_env_info(env)->oti_attr;
158                 __u64 old_kms;
159
160                 cl_object_attr_lock(obj);
161                 /* Must get the value under the lock to avoid possible races. */
162                 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
163                 /* Update the kms. Need to loop all granted locks.
164                  * Not a problem for the client */
165                 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
166
167                 cl_object_attr_set(env, obj, attr, CAT_KMS);
168                 cl_object_attr_unlock(obj);
169         }
170         unlock_res_and_lock(dlmlock);
171
172         /* release a reference taken in osc_lock_upcall0(). */
173         LASSERT(olck->ols_has_ref);
174         lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
175         LDLM_LOCK_RELEASE(dlmlock);
176         olck->ols_has_ref = 0;
177 }
178
179 static int osc_lock_unhold(struct osc_lock *ols)
180 {
181         int result = 0;
182
183         if (ols->ols_hold) {
184                 ols->ols_hold = 0;
185                 result = osc_cancel_base(&ols->ols_handle,
186                                          ols->ols_einfo.ei_mode);
187         }
188         return result;
189 }
190
191 static int osc_lock_unuse(const struct lu_env *env,
192                           const struct cl_lock_slice *slice)
193 {
194         struct osc_lock *ols = cl2osc_lock(slice);
195
196         LINVRNT(osc_lock_invariant(ols));
197
198         switch (ols->ols_state) {
199         case OLS_NEW:
200                 LASSERT(!ols->ols_hold);
201                 LASSERT(ols->ols_agl);
202                 return 0;
203         case OLS_UPCALL_RECEIVED:
204                 osc_lock_unhold(ols);
205         case OLS_ENQUEUED:
206                 LASSERT(!ols->ols_hold);
207                 osc_lock_detach(env, ols);
208                 ols->ols_state = OLS_NEW;
209                 return 0;
210         case OLS_GRANTED:
211                 LASSERT(!ols->ols_glimpse);
212                 LASSERT(ols->ols_hold);
213                 /*
214                  * Move lock into OLS_RELEASED state before calling
215                  * osc_cancel_base() so that possible synchronous cancellation
216                  * (that always happens e.g., for liblustre) sees that lock is
217                  * released.
218                  */
219                 ols->ols_state = OLS_RELEASED;
220                 return osc_lock_unhold(ols);
221         default:
222                 CERROR("Impossible state: %d\n", ols->ols_state);
223                 LBUG();
224         }
225 }
226
227 static void osc_lock_fini(const struct lu_env *env,
228                           struct cl_lock_slice *slice)
229 {
230         struct osc_lock  *ols = cl2osc_lock(slice);
231
232         LINVRNT(osc_lock_invariant(ols));
233         /*
234          * ->ols_hold can still be true at this point if, for example, a
235          * thread that requested a lock was killed (and released a reference
236          * to the lock), before reply from a server was received. In this case
237          * lock is destroyed immediately after upcall.
238          */
239         osc_lock_unhold(ols);
240         LASSERT(ols->ols_lock == NULL);
241         LASSERT(cfs_atomic_read(&ols->ols_pageref) == 0 ||
242                 cfs_atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
243
244         OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
245 }
246
247 void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj,
248                         struct ldlm_res_id *resname)
249 {
250         const struct lu_fid *fid = lu_object_fid(&obj->oo_cl.co_lu);
251         if (0) {
252                 /*
253                  * In the perfect world of the future, where ost servers talk
254                  * idif-fids...
255                  */
256                 fid_build_reg_res_name(fid, resname);
257         } else {
258                 /*
259                  * In reality, where ost server expects ->lsm_object_id and
260                  * ->lsm_object_seq in rename.
261                  */
262                 osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_seq,
263                                    resname);
264         }
265 }
266
267 static void osc_lock_build_policy(const struct lu_env *env,
268                                   const struct cl_lock *lock,
269                                   ldlm_policy_data_t *policy)
270 {
271         const struct cl_lock_descr *d = &lock->cll_descr;
272
273         osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
274         policy->l_extent.gid = d->cld_gid;
275 }
276
277 static __u64 osc_enq2ldlm_flags(__u32 enqflags)
278 {
279         __u64 result = 0;
280
281         LASSERT((enqflags & ~CEF_MASK) == 0);
282
283         if (enqflags & CEF_NONBLOCK)
284                 result |= LDLM_FL_BLOCK_NOWAIT;
285         if (enqflags & CEF_ASYNC)
286                 result |= LDLM_FL_HAS_INTENT;
287         if (enqflags & CEF_DISCARD_DATA)
288                 result |= LDLM_AST_DISCARD_DATA;
289         return result;
290 }
291
292 /**
293  * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
294  * pointers. Initialized in osc_init().
295  */
296 spinlock_t osc_ast_guard;
297
298 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
299 {
300         struct osc_lock *olck;
301
302         lock_res_and_lock(dlm_lock);
303         spin_lock(&osc_ast_guard);
304         olck = dlm_lock->l_ast_data;
305         if (olck != NULL) {
306                 struct cl_lock *lock = olck->ols_cl.cls_lock;
307                 /*
308                  * If osc_lock holds a reference on ldlm lock, return it even
309                  * when cl_lock is in CLS_FREEING state. This way
310                  *
311                  *         osc_ast_data_get(dlmlock) == NULL
312                  *
313                  * guarantees that all osc references on dlmlock were
314                  * released. osc_dlm_blocking_ast0() relies on that.
315                  */
316                 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
317                         cl_lock_get_trust(lock);
318                         lu_ref_add_atomic(&lock->cll_reference,
319                                           "ast", cfs_current());
320                 } else
321                         olck = NULL;
322         }
323         spin_unlock(&osc_ast_guard);
324         unlock_res_and_lock(dlm_lock);
325         return olck;
326 }
327
328 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
329 {
330         struct cl_lock *lock;
331
332         lock = olck->ols_cl.cls_lock;
333         lu_ref_del(&lock->cll_reference, "ast", cfs_current());
334         cl_lock_put(env, lock);
335 }
336
337 /**
338  * Updates object attributes from a lock value block (lvb) received together
339  * with the DLM lock reply from the server. Copy of osc_update_enqueue()
340  * logic.
341  *
342  * This can be optimized to not update attributes when lock is a result of a
343  * local match.
344  *
345  * Called under lock and resource spin-locks.
346  */
347 static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
348                                 int rc)
349 {
350         struct ost_lvb    *lvb;
351         struct cl_object  *obj;
352         struct lov_oinfo  *oinfo;
353         struct cl_attr    *attr;
354         unsigned           valid;
355
356         ENTRY;
357
358         if (!(olck->ols_flags & LDLM_FL_LVB_READY))
359                 RETURN_EXIT;
360
361         lvb   = &olck->ols_lvb;
362         obj   = olck->ols_cl.cls_obj;
363         oinfo = cl2osc(obj)->oo_oinfo;
364         attr  = &osc_env_info(env)->oti_attr;
365         valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
366         cl_lvb2attr(attr, lvb);
367
368         cl_object_attr_lock(obj);
369         if (rc == 0) {
370                 struct ldlm_lock  *dlmlock;
371                 __u64 size;
372
373                 dlmlock = olck->ols_lock;
374                 LASSERT(dlmlock != NULL);
375
376                 /* re-grab LVB from a dlm lock under DLM spin-locks. */
377                 *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
378                 size = lvb->lvb_size;
379                 /* Extend KMS up to the end of this lock and no further
380                  * A lock on [x,y] means a KMS of up to y + 1 bytes! */
381                 if (size > dlmlock->l_policy_data.l_extent.end)
382                         size = dlmlock->l_policy_data.l_extent.end + 1;
383                 if (size >= oinfo->loi_kms) {
384                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
385                                    ", kms="LPU64, lvb->lvb_size, size);
386                         valid |= CAT_KMS;
387                         attr->cat_kms = size;
388                 } else {
389                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
390                                    LPU64"; leaving kms="LPU64", end="LPU64,
391                                    lvb->lvb_size, oinfo->loi_kms,
392                                    dlmlock->l_policy_data.l_extent.end);
393                 }
394                 ldlm_lock_allow_match_locked(dlmlock);
395         } else if (rc == -ENAVAIL && olck->ols_glimpse) {
396                 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
397                        " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
398         } else
399                 valid = 0;
400
401         if (valid != 0)
402                 cl_object_attr_set(env, obj, attr, valid);
403
404         cl_object_attr_unlock(obj);
405
406         EXIT;
407 }
408
409 /**
410  * Called when a lock is granted, from an upcall (when server returned a
411  * granted lock), or from completion AST, when server returned a blocked lock.
412  *
413  * Called under lock and resource spin-locks, that are released temporarily
414  * here.
415  */
416 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
417                              struct ldlm_lock *dlmlock, int rc)
418 {
419         struct ldlm_extent   *ext;
420         struct cl_lock       *lock;
421         struct cl_lock_descr *descr;
422
423         LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
424
425         ENTRY;
426         if (olck->ols_state < OLS_GRANTED) {
427                 lock  = olck->ols_cl.cls_lock;
428                 ext   = &dlmlock->l_policy_data.l_extent;
429                 descr = &osc_env_info(env)->oti_descr;
430                 descr->cld_obj = lock->cll_descr.cld_obj;
431
432                 /* XXX check that ->l_granted_mode is valid. */
433                 descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
434                 descr->cld_start = cl_index(descr->cld_obj, ext->start);
435                 descr->cld_end   = cl_index(descr->cld_obj, ext->end);
436                 descr->cld_gid   = ext->gid;
437                 /*
438                  * tell upper layers the extent of the lock that was actually
439                  * granted
440                  */
441                 olck->ols_state = OLS_GRANTED;
442                 osc_lock_lvb_update(env, olck, rc);
443
444                 /* release DLM spin-locks to allow cl_lock_{modify,signal}()
445                  * to take a semaphore on a parent lock. This is safe, because
446                  * spin-locks are needed to protect consistency of
447                  * dlmlock->l_*_mode and LVB, and we have finished processing
448                  * them. */
449                 unlock_res_and_lock(dlmlock);
450                 cl_lock_modify(env, lock, descr);
451                 cl_lock_signal(env, lock);
452                 LINVRNT(osc_lock_invariant(olck));
453                 lock_res_and_lock(dlmlock);
454         }
455         EXIT;
456 }
457
458 static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
459
460 {
461         struct ldlm_lock *dlmlock;
462
463         ENTRY;
464
465         dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
466         LASSERT(dlmlock != NULL);
467
468         lock_res_and_lock(dlmlock);
469         spin_lock(&osc_ast_guard);
470         LASSERT(dlmlock->l_ast_data == olck);
471         LASSERT(olck->ols_lock == NULL);
472         olck->ols_lock = dlmlock;
473         spin_unlock(&osc_ast_guard);
474
475         /*
476          * Lock might be not yet granted. In this case, completion ast
477          * (osc_ldlm_completion_ast()) comes later and finishes lock
478          * granting.
479          */
480         if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
481                 osc_lock_granted(env, olck, dlmlock, 0);
482         unlock_res_and_lock(dlmlock);
483
484         /*
485          * osc_enqueue_interpret() decrefs asynchronous locks, counter
486          * this.
487          */
488         ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
489         olck->ols_hold = 1;
490
491         /* lock reference taken by ldlm_handle2lock_long() is owned by
492          * osc_lock and released in osc_lock_detach() */
493         lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
494         olck->ols_has_ref = 1;
495 }
496
497 /**
498  * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
499  * received from a server, or after osc_enqueue_base() matched a local DLM
500  * lock.
501  */
502 static int osc_lock_upcall(void *cookie, int errcode)
503 {
504         struct osc_lock         *olck  = cookie;
505         struct cl_lock_slice    *slice = &olck->ols_cl;
506         struct cl_lock          *lock  = slice->cls_lock;
507         struct lu_env           *env;
508         struct cl_env_nest       nest;
509
510         ENTRY;
511         env = cl_env_nested_get(&nest);
512         if (!IS_ERR(env)) {
513                 int rc;
514
515                 cl_lock_mutex_get(env, lock);
516
517                 LASSERT(lock->cll_state >= CLS_QUEUING);
518                 if (olck->ols_state == OLS_ENQUEUED) {
519                         olck->ols_state = OLS_UPCALL_RECEIVED;
520                         rc = ldlm_error2errno(errcode);
521                 } else if (olck->ols_state == OLS_CANCELLED) {
522                         rc = -EIO;
523                 } else {
524                         CERROR("Impossible state: %d\n", olck->ols_state);
525                         LBUG();
526                 }
527                 if (rc) {
528                         struct ldlm_lock *dlmlock;
529
530                         dlmlock = ldlm_handle2lock(&olck->ols_handle);
531                         if (dlmlock != NULL) {
532                                 lock_res_and_lock(dlmlock);
533                                 spin_lock(&osc_ast_guard);
534                                 LASSERT(olck->ols_lock == NULL);
535                                 dlmlock->l_ast_data = NULL;
536                                 olck->ols_handle.cookie = 0ULL;
537                                 spin_unlock(&osc_ast_guard);
538                                 ldlm_lock_fail_match_locked(dlmlock);
539                                 unlock_res_and_lock(dlmlock);
540                                 LDLM_LOCK_PUT(dlmlock);
541                         }
542                 } else {
543                         if (olck->ols_glimpse)
544                                 olck->ols_glimpse = 0;
545                         osc_lock_upcall0(env, olck);
546                 }
547
548                 /* Error handling, some errors are tolerable. */
549                 if (olck->ols_locklessable && rc == -EUSERS) {
550                         /* This is a tolerable error, turn this lock into
551                          * lockless lock.
552                          */
553                         osc_object_set_contended(cl2osc(slice->cls_obj));
554                         LASSERT(slice->cls_ops == &osc_lock_ops);
555
556                         /* Change this lock to ldlmlock-less lock. */
557                         osc_lock_to_lockless(env, olck, 1);
558                         olck->ols_state = OLS_GRANTED;
559                         rc = 0;
560                 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
561                         osc_lock_lvb_update(env, olck, rc);
562                         cl_lock_delete(env, lock);
563                         /* Hide the error. */
564                         rc = 0;
565                 }
566
567                 if (rc == 0) {
568                         /* For AGL case, the RPC sponsor may exits the cl_lock
569                         *  processing without wait() called before related OSC
570                         *  lock upcall(). So update the lock status according
571                         *  to the enqueue result inside AGL upcall(). */
572                         if (olck->ols_agl) {
573                                 lock->cll_flags |= CLF_FROM_UPCALL;
574                                 cl_wait_try(env, lock);
575                                 lock->cll_flags &= ~CLF_FROM_UPCALL;
576                                 if (!olck->ols_glimpse)
577                                         olck->ols_agl = 0;
578                         }
579                         cl_lock_signal(env, lock);
580                         /* del user for lock upcall cookie */
581                         cl_unuse_try(env, lock);
582                 } else {
583                         /* del user for lock upcall cookie */
584                         cl_lock_user_del(env, lock);
585                         cl_lock_error(env, lock, rc);
586                 }
587
588                 /* release cookie reference, acquired by osc_lock_enqueue() */
589                 cl_lock_hold_release(env, lock, "upcall", lock);
590                 cl_lock_mutex_put(env, lock);
591
592                 lu_ref_del(&lock->cll_reference, "upcall", lock);
593                 /* This maybe the last reference, so must be called after
594                  * cl_lock_mutex_put(). */
595                 cl_lock_put(env, lock);
596
597                 cl_env_nested_put(&nest, env);
598         } else {
599                 /* should never happen, similar to osc_ldlm_blocking_ast(). */
600                 LBUG();
601         }
602         RETURN(errcode);
603 }
604
605 /**
606  * Core of osc_dlm_blocking_ast() logic.
607  */
608 static void osc_lock_blocking(const struct lu_env *env,
609                               struct ldlm_lock *dlmlock,
610                               struct osc_lock *olck, int blocking)
611 {
612         struct cl_lock *lock = olck->ols_cl.cls_lock;
613
614         LASSERT(olck->ols_lock == dlmlock);
615         CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
616         LASSERT(!osc_lock_is_lockless(olck));
617
618         /*
619          * Lock might be still addref-ed here, if e.g., blocking ast
620          * is sent for a failed lock.
621          */
622         osc_lock_unhold(olck);
623
624         if (blocking && olck->ols_state < OLS_BLOCKED)
625                 /*
626                  * Move osc_lock into OLS_BLOCKED before canceling the lock,
627                  * because it recursively re-enters osc_lock_blocking(), with
628                  * the state set to OLS_CANCELLED.
629                  */
630                 olck->ols_state = OLS_BLOCKED;
631         /*
632          * cancel and destroy lock at least once no matter how blocking ast is
633          * entered (see comment above osc_ldlm_blocking_ast() for use
634          * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
635          */
636         cl_lock_cancel(env, lock);
637         cl_lock_delete(env, lock);
638 }
639
640 /**
641  * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
642  * and ldlm_lock caches.
643  */
644 static int osc_dlm_blocking_ast0(const struct lu_env *env,
645                                  struct ldlm_lock *dlmlock,
646                                  void *data, int flag)
647 {
648         struct osc_lock *olck;
649         struct cl_lock  *lock;
650         int result;
651         int cancel;
652
653         LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
654
655         cancel = 0;
656         olck = osc_ast_data_get(dlmlock);
657         if (olck != NULL) {
658                 lock = olck->ols_cl.cls_lock;
659                 cl_lock_mutex_get(env, lock);
660                 LINVRNT(osc_lock_invariant(olck));
661                 if (olck->ols_ast_wait) {
662                         /* wake up osc_lock_use() */
663                         cl_lock_signal(env, lock);
664                         olck->ols_ast_wait = 0;
665                 }
666                 /*
667                  * Lock might have been canceled while this thread was
668                  * sleeping for lock mutex, but olck is pinned in memory.
669                  */
670                 if (olck == dlmlock->l_ast_data) {
671                         /*
672                          * NOTE: DLM sends blocking AST's for failed locks
673                          *       (that are still in pre-OLS_GRANTED state)
674                          *       too, and they have to be canceled otherwise
675                          *       DLM lock is never destroyed and stuck in
676                          *       the memory.
677                          *
678                          *       Alternatively, ldlm_cli_cancel() can be
679                          *       called here directly for osc_locks with
680                          *       ols_state < OLS_GRANTED to maintain an
681                          *       invariant that ->clo_cancel() is only called
682                          *       for locks that were granted.
683                          */
684                         LASSERT(data == olck);
685                         osc_lock_blocking(env, dlmlock,
686                                           olck, flag == LDLM_CB_BLOCKING);
687                 } else
688                         cancel = 1;
689                 cl_lock_mutex_put(env, lock);
690                 osc_ast_data_put(env, olck);
691         } else
692                 /*
693                  * DLM lock exists, but there is no cl_lock attached to it.
694                  * This is a `normal' race. cl_object and its cl_lock's can be
695                  * removed by memory pressure, together with all pages.
696                  */
697                 cancel = (flag == LDLM_CB_BLOCKING);
698
699         if (cancel) {
700                 struct lustre_handle *lockh;
701
702                 lockh = &osc_env_info(env)->oti_handle;
703                 ldlm_lock2handle(dlmlock, lockh);
704                 result = ldlm_cli_cancel(lockh);
705         } else
706                 result = 0;
707         return result;
708 }
709
710 /**
711  * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
712  * some other lock, or is canceled. This function is installed as a
713  * ldlm_lock::l_blocking_ast() for client extent locks.
714  *
715  * Control flow is tricky, because ldlm uses the same call-back
716  * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
717  *
718  * \param dlmlock lock for which ast occurred.
719  *
720  * \param new description of a conflicting lock in case of blocking ast.
721  *
722  * \param data value of dlmlock->l_ast_data
723  *
724  * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
725  *             cancellation and blocking ast's.
726  *
727  * Possible use cases:
728  *
729  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
730  *       lock due to lock lru pressure, or explicit user request to purge
731  *       locks.
732  *
733  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
734  *       us that dlmlock conflicts with another lock that some client is
735  *       enqueing. Lock is canceled.
736  *
737  *           - cl_lock_cancel() is called. osc_lock_cancel() calls
738  *             ldlm_cli_cancel() that calls
739  *
740  *                  dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
741  *
742  *             recursively entering osc_ldlm_blocking_ast().
743  *
744  *     - client cancels lock voluntary (e.g., as a part of early cancellation):
745  *
746  *           cl_lock_cancel()->
747  *             osc_lock_cancel()->
748  *               ldlm_cli_cancel()->
749  *                 dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
750  *
751  */
752 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
753                                  struct ldlm_lock_desc *new, void *data,
754                                  int flag)
755 {
756         struct lu_env     *env;
757         struct cl_env_nest nest;
758         int                result;
759
760         /*
761          * This can be called in the context of outer IO, e.g.,
762          *
763          *     cl_enqueue()->...
764          *       ->osc_enqueue_base()->...
765          *         ->ldlm_prep_elc_req()->...
766          *           ->ldlm_cancel_callback()->...
767          *             ->osc_ldlm_blocking_ast()
768          *
769          * new environment has to be created to not corrupt outer context.
770          */
771         env = cl_env_nested_get(&nest);
772         if (!IS_ERR(env)) {
773                 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
774                 cl_env_nested_put(&nest, env);
775         } else {
776                 result = PTR_ERR(env);
777                 /*
778                  * XXX This should never happen, as cl_lock is
779                  * stuck. Pre-allocated environment a la vvp_inode_fini_env
780                  * should be used.
781                  */
782                 LBUG();
783         }
784         if (result != 0) {
785                 if (result == -ENODATA)
786                         result = 0;
787                 else
788                         CERROR("BAST failed: %d\n", result);
789         }
790         return result;
791 }
792
793 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
794                                    __u64 flags, void *data)
795 {
796         struct cl_env_nest nest;
797         struct lu_env     *env;
798         struct osc_lock   *olck;
799         struct cl_lock    *lock;
800         int result;
801         int dlmrc;
802
803         /* first, do dlm part of the work */
804         dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
805         /* then, notify cl_lock */
806         env = cl_env_nested_get(&nest);
807         if (!IS_ERR(env)) {
808                 olck = osc_ast_data_get(dlmlock);
809                 if (olck != NULL) {
810                         lock = olck->ols_cl.cls_lock;
811                         cl_lock_mutex_get(env, lock);
812                         /*
813                          * ldlm_handle_cp_callback() copied LVB from request
814                          * to lock->l_lvb_data, store it in osc_lock.
815                          */
816                         LASSERT(dlmlock->l_lvb_data != NULL);
817                         lock_res_and_lock(dlmlock);
818                         olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
819                         if (olck->ols_lock == NULL) {
820                                 /*
821                                  * upcall (osc_lock_upcall()) hasn't yet been
822                                  * called. Do nothing now, upcall will bind
823                                  * olck to dlmlock and signal the waiters.
824                                  *
825                                  * This maintains an invariant that osc_lock
826                                  * and ldlm_lock are always bound when
827                                  * osc_lock is in OLS_GRANTED state.
828                                  */
829                         } else if (dlmlock->l_granted_mode ==
830                                    dlmlock->l_req_mode) {
831                                 osc_lock_granted(env, olck, dlmlock, dlmrc);
832                         }
833                         unlock_res_and_lock(dlmlock);
834
835                         if (dlmrc != 0) {
836                                 CL_LOCK_DEBUG(D_ERROR, env, lock,
837                                               "dlmlock returned %d\n", dlmrc);
838                                 cl_lock_error(env, lock, dlmrc);
839                         }
840                         cl_lock_mutex_put(env, lock);
841                         osc_ast_data_put(env, olck);
842                         result = 0;
843                 } else
844                         result = -ELDLM_NO_LOCK_DATA;
845                 cl_env_nested_put(&nest, env);
846         } else
847                 result = PTR_ERR(env);
848         return dlmrc ?: result;
849 }
850
851 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
852 {
853         struct ptlrpc_request  *req  = data;
854         struct osc_lock        *olck;
855         struct cl_lock         *lock;
856         struct cl_object       *obj;
857         struct cl_env_nest      nest;
858         struct lu_env          *env;
859         struct ost_lvb         *lvb;
860         struct req_capsule     *cap;
861         int                     result;
862
863         LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
864
865         env = cl_env_nested_get(&nest);
866         if (!IS_ERR(env)) {
867                 /* osc_ast_data_get() has to go after environment is
868                  * allocated, because osc_ast_data() acquires a
869                  * reference to a lock, and it can only be released in
870                  * environment.
871                  */
872                 olck = osc_ast_data_get(dlmlock);
873                 if (olck != NULL) {
874                         lock = olck->ols_cl.cls_lock;
875                         /* Do not grab the mutex of cl_lock for glimpse.
876                          * See LU-1274 for details.
877                          * BTW, it's okay for cl_lock to be cancelled during
878                          * this period because server can handle this race.
879                          * See ldlm_server_glimpse_ast() for details.
880                          * cl_lock_mutex_get(env, lock); */
881                         cap = &req->rq_pill;
882                         req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
883                         req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
884                                              sizeof *lvb);
885                         result = req_capsule_server_pack(cap);
886                         if (result == 0) {
887                                 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
888                                 obj = lock->cll_descr.cld_obj;
889                                 result = cl_object_glimpse(env, obj, lvb);
890                         }
891                         if (!exp_connect_lvb_type(req->rq_export))
892                                 req_capsule_shrink(&req->rq_pill,
893                                                    &RMF_DLM_LVB,
894                                                    sizeof(struct ost_lvb_v1),
895                                                    RCL_SERVER);
896                         osc_ast_data_put(env, olck);
897                 } else {
898                         /*
899                          * These errors are normal races, so we don't want to
900                          * fill the console with messages by calling
901                          * ptlrpc_error()
902                          */
903                         lustre_pack_reply(req, 1, NULL, NULL);
904                         result = -ELDLM_NO_LOCK_DATA;
905                 }
906                 cl_env_nested_put(&nest, env);
907         } else
908                 result = PTR_ERR(env);
909         req->rq_status = result;
910         return result;
911 }
912
913 static unsigned long osc_lock_weigh(const struct lu_env *env,
914                                     const struct cl_lock_slice *slice)
915 {
916         /*
917          * don't need to grab coh_page_guard since we don't care the exact #
918          * of pages..
919          */
920         return cl_object_header(slice->cls_obj)->coh_pages;
921 }
922
923 /**
924  * Get the weight of dlm lock for early cancellation.
925  *
926  * XXX: it should return the pages covered by this \a dlmlock.
927  */
928 static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
929 {
930         struct cl_env_nest       nest;
931         struct lu_env           *env;
932         struct osc_lock         *lock;
933         struct cl_lock          *cll;
934         unsigned long            weight;
935         ENTRY;
936
937         cfs_might_sleep();
938         /*
939          * osc_ldlm_weigh_ast has a complex context since it might be called
940          * because of lock canceling, or from user's input. We have to make
941          * a new environment for it. Probably it is implementation safe to use
942          * the upper context because cl_lock_put don't modify environment
943          * variables. But in case of ..
944          */
945         env = cl_env_nested_get(&nest);
946         if (IS_ERR(env))
947                 /* Mostly because lack of memory, tend to eliminate this lock*/
948                 RETURN(0);
949
950         LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
951         lock = osc_ast_data_get(dlmlock);
952         if (lock == NULL) {
953                 /* cl_lock was destroyed because of memory pressure.
954                  * It is much reasonable to assign this type of lock
955                  * a lower cost.
956                  */
957                 GOTO(out, weight = 0);
958         }
959
960         cll = lock->ols_cl.cls_lock;
961         cl_lock_mutex_get(env, cll);
962         weight = cl_lock_weigh(env, cll);
963         cl_lock_mutex_put(env, cll);
964         osc_ast_data_put(env, lock);
965         EXIT;
966
967 out:
968         cl_env_nested_put(&nest, env);
969         return weight;
970 }
971
972 static void osc_lock_build_einfo(const struct lu_env *env,
973                                  const struct cl_lock *clock,
974                                  struct osc_lock *lock,
975                                  struct ldlm_enqueue_info *einfo)
976 {
977         enum cl_lock_mode mode;
978
979         mode = clock->cll_descr.cld_mode;
980         if (mode == CLM_PHANTOM)
981                 /*
982                  * For now, enqueue all glimpse locks in read mode. In the
983                  * future, client might choose to enqueue LCK_PW lock for
984                  * glimpse on a file opened for write.
985                  */
986                 mode = CLM_READ;
987
988         einfo->ei_type   = LDLM_EXTENT;
989         einfo->ei_mode   = osc_cl_lock2ldlm(mode);
990         einfo->ei_cb_bl  = osc_ldlm_blocking_ast;
991         einfo->ei_cb_cp  = osc_ldlm_completion_ast;
992         einfo->ei_cb_gl  = osc_ldlm_glimpse_ast;
993         einfo->ei_cb_wg  = osc_ldlm_weigh_ast;
994         einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
995 }
996
997 /**
998  * Determine if the lock should be converted into a lockless lock.
999  *
1000  * Steps to check:
1001  * - if the lock has an explicite requirment for a non-lockless lock;
1002  * - if the io lock request type ci_lockreq;
1003  * - send the enqueue rpc to ost to make the further decision;
1004  * - special treat to truncate lockless lock
1005  *
1006  *  Additional policy can be implemented here, e.g., never do lockless-io
1007  *  for large extents.
1008  */
1009 static void osc_lock_to_lockless(const struct lu_env *env,
1010                                  struct osc_lock *ols, int force)
1011 {
1012         struct cl_lock_slice *slice = &ols->ols_cl;
1013
1014         LASSERT(ols->ols_state == OLS_NEW ||
1015                 ols->ols_state == OLS_UPCALL_RECEIVED);
1016
1017         if (force) {
1018                 ols->ols_locklessable = 1;
1019                 slice->cls_ops = &osc_lock_lockless_ops;
1020         } else {
1021                 struct osc_io *oio     = osc_env_io(env);
1022                 struct cl_io  *io      = oio->oi_cl.cis_io;
1023                 struct cl_object *obj  = slice->cls_obj;
1024                 struct osc_object *oob = cl2osc(obj);
1025                 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
1026                 struct obd_connect_data *ocd;
1027
1028                 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
1029                         io->ci_lockreq == CILR_MAYBE ||
1030                         io->ci_lockreq == CILR_NEVER);
1031
1032                 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
1033                 ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
1034                                 (io->ci_lockreq == CILR_MAYBE) &&
1035                                 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
1036                 if (io->ci_lockreq == CILR_NEVER ||
1037                         /* lockless IO */
1038                     (ols->ols_locklessable && osc_object_is_contended(oob)) ||
1039                         /* lockless truncate */
1040                     (cl_io_is_trunc(io) &&
1041                      (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
1042                       osd->od_lockless_truncate)) {
1043                         ols->ols_locklessable = 1;
1044                         slice->cls_ops = &osc_lock_lockless_ops;
1045                 }
1046         }
1047         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1048 }
1049
1050 static int osc_lock_compatible(const struct osc_lock *qing,
1051                                const struct osc_lock *qed)
1052 {
1053         enum cl_lock_mode qing_mode;
1054         enum cl_lock_mode qed_mode;
1055
1056         qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode;
1057         if (qed->ols_glimpse &&
1058             (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ))
1059                 return 1;
1060
1061         qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode;
1062         return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
1063 }
1064
1065 /**
1066  * Cancel all conflicting locks and wait for them to be destroyed.
1067  *
1068  * This function is used for two purposes:
1069  *
1070  *     - early cancel all conflicting locks before starting IO, and
1071  *
1072  *     - guarantee that pages added to the page cache by lockless IO are never
1073  *       covered by locks other than lockless IO lock, and, hence, are not
1074  *       visible to other threads.
1075  */
1076 static int osc_lock_enqueue_wait(const struct lu_env *env,
1077                                  const struct osc_lock *olck)
1078 {
1079         struct cl_lock          *lock    = olck->ols_cl.cls_lock;
1080         struct cl_lock_descr    *descr   = &lock->cll_descr;
1081         struct cl_object_header *hdr     = cl_object_header(descr->cld_obj);
1082         struct cl_lock          *scan;
1083         struct cl_lock          *conflict= NULL;
1084         int lockless                     = osc_lock_is_lockless(olck);
1085         int rc                           = 0;
1086         ENTRY;
1087
1088         LASSERT(cl_lock_is_mutexed(lock));
1089
1090         /* make it enqueue anyway for glimpse lock, because we actually
1091          * don't need to cancel any conflicting locks. */
1092         if (olck->ols_glimpse)
1093                 return 0;
1094
1095         spin_lock(&hdr->coh_lock_guard);
1096         cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
1097                 struct cl_lock_descr *cld = &scan->cll_descr;
1098                 const struct osc_lock *scan_ols;
1099
1100                 if (scan == lock)
1101                         break;
1102
1103                 if (scan->cll_state < CLS_QUEUING ||
1104                     scan->cll_state == CLS_FREEING ||
1105                     cld->cld_start > descr->cld_end ||
1106                     cld->cld_end < descr->cld_start)
1107                         continue;
1108
1109                 /* overlapped and living locks. */
1110
1111                 /* We're not supposed to give up group lock. */
1112                 if (scan->cll_descr.cld_mode == CLM_GROUP) {
1113                         LASSERT(descr->cld_mode != CLM_GROUP ||
1114                                 descr->cld_gid != scan->cll_descr.cld_gid);
1115                         continue;
1116                 }
1117
1118                 scan_ols = osc_lock_at(scan);
1119
1120                 /* We need to cancel the compatible locks if we're enqueuing
1121                  * a lockless lock, for example:
1122                  * imagine that client has PR lock on [0, 1000], and thread T0
1123                  * is doing lockless IO in [500, 1500] region. Concurrent
1124                  * thread T1 can see lockless data in [500, 1000], which is
1125                  * wrong, because these data are possibly stale. */
1126                 if (!lockless && osc_lock_compatible(olck, scan_ols))
1127                         continue;
1128
1129                 cl_lock_get_trust(scan);
1130                 conflict = scan;
1131                 break;
1132         }
1133         spin_unlock(&hdr->coh_lock_guard);
1134
1135         if (conflict) {
1136                 if (lock->cll_descr.cld_mode == CLM_GROUP) {
1137                         /* we want a group lock but a previous lock request
1138                          * conflicts, we do not wait but return 0 so the
1139                          * request is send to the server
1140                          */
1141                         CDEBUG(D_DLMTRACE, "group lock %p is conflicted "
1142                                            "with %p, no wait, send to server\n",
1143                                lock, conflict);
1144                         cl_lock_put(env, conflict);
1145                         rc = 0;
1146                 } else {
1147                         CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, "
1148                                            "will wait\n",
1149                                lock, conflict);
1150                         LASSERT(lock->cll_conflict == NULL);
1151                         lu_ref_add(&conflict->cll_reference, "cancel-wait",
1152                                    lock);
1153                         lock->cll_conflict = conflict;
1154                         rc = CLO_WAIT;
1155                 }
1156         }
1157         RETURN(rc);
1158 }
1159
1160 /**
1161  * Implementation of cl_lock_operations::clo_enqueue() method for osc
1162  * layer. This initiates ldlm enqueue:
1163  *
1164  *     - cancels conflicting locks early (osc_lock_enqueue_wait());
1165  *
1166  *     - calls osc_enqueue_base() to do actual enqueue.
1167  *
1168  * osc_enqueue_base() is supplied with an upcall function that is executed
1169  * when lock is received either after a local cached ldlm lock is matched, or
1170  * when a reply from the server is received.
1171  *
1172  * This function does not wait for the network communication to complete.
1173  */
1174 static int osc_lock_enqueue(const struct lu_env *env,
1175                             const struct cl_lock_slice *slice,
1176                             struct cl_io *unused, __u32 enqflags)
1177 {
1178         struct osc_lock          *ols     = cl2osc_lock(slice);
1179         struct cl_lock           *lock    = ols->ols_cl.cls_lock;
1180         int result;
1181         ENTRY;
1182
1183         LASSERT(cl_lock_is_mutexed(lock));
1184         LASSERTF(ols->ols_state == OLS_NEW,
1185                  "Impossible state: %d\n", ols->ols_state);
1186
1187         LASSERTF(ergo(ols->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
1188                 "lock = %p, ols = %p\n", lock, ols);
1189
1190         result = osc_lock_enqueue_wait(env, ols);
1191         if (result == 0) {
1192                 if (!osc_lock_is_lockless(ols)) {
1193                         struct osc_object        *obj = cl2osc(slice->cls_obj);
1194                         struct osc_thread_info   *info = osc_env_info(env);
1195                         struct ldlm_res_id       *resname = &info->oti_resname;
1196                         ldlm_policy_data_t       *policy = &info->oti_policy;
1197                         struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
1198
1199                         /* lock will be passed as upcall cookie,
1200                          * hold ref to prevent to be released. */
1201                         cl_lock_hold_add(env, lock, "upcall", lock);
1202                         /* a user for lock also */
1203                         cl_lock_user_add(env, lock);
1204                         ols->ols_state = OLS_ENQUEUED;
1205
1206                         /*
1207                          * XXX: this is possible blocking point as
1208                          * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
1209                          * LDLM_CP_CALLBACK.
1210                          */
1211                         osc_lock_build_res(env, obj, resname);
1212                         osc_lock_build_policy(env, lock, policy);
1213                         result = osc_enqueue_base(osc_export(obj), resname,
1214                                           &ols->ols_flags, policy,
1215                                           &ols->ols_lvb,
1216                                           obj->oo_oinfo->loi_kms_valid,
1217                                           osc_lock_upcall,
1218                                           ols, einfo, &ols->ols_handle,
1219                                           PTLRPCD_SET, 1, ols->ols_agl);
1220                         if (result != 0) {
1221                                 cl_lock_user_del(env, lock);
1222                                 cl_lock_unhold(env, lock, "upcall", lock);
1223                                 if (unlikely(result == -ECANCELED)) {
1224                                         ols->ols_state = OLS_NEW;
1225                                         result = 0;
1226                                 }
1227                         }
1228                 } else {
1229                         ols->ols_state = OLS_GRANTED;
1230                         ols->ols_owner = osc_env_io(env);
1231                 }
1232         }
1233         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1234         RETURN(result);
1235 }
1236
1237 static int osc_lock_wait(const struct lu_env *env,
1238                          const struct cl_lock_slice *slice)
1239 {
1240         struct osc_lock *olck = cl2osc_lock(slice);
1241         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1242
1243         LINVRNT(osc_lock_invariant(olck));
1244
1245         if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED) {
1246                 if (olck->ols_flags & LDLM_FL_LVB_READY) {
1247                         return 0;
1248                 } else if (olck->ols_agl) {
1249                         if (lock->cll_flags & CLF_FROM_UPCALL)
1250                                 /* It is from enqueue RPC reply upcall for
1251                                  * updating state. Do not re-enqueue. */
1252                                 return -ENAVAIL;
1253                         else
1254                                 olck->ols_state = OLS_NEW;
1255                 } else {
1256                         LASSERT(lock->cll_error);
1257                         return lock->cll_error;
1258                 }
1259         }
1260
1261         if (olck->ols_state == OLS_NEW) {
1262                 int rc;
1263
1264                 LASSERT(olck->ols_agl);
1265                 olck->ols_agl = 0;
1266                 rc = osc_lock_enqueue(env, slice, NULL, CEF_ASYNC | CEF_MUST);
1267                 if (rc != 0)
1268                         return rc;
1269                 else
1270                         return CLO_REENQUEUED;
1271         }
1272
1273         LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
1274                      lock->cll_error == 0, olck->ols_lock != NULL));
1275
1276         return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
1277 }
1278
1279 /**
1280  * An implementation of cl_lock_operations::clo_use() method that pins cached
1281  * lock.
1282  */
1283 static int osc_lock_use(const struct lu_env *env,
1284                         const struct cl_lock_slice *slice)
1285 {
1286         struct osc_lock *olck = cl2osc_lock(slice);
1287         int rc;
1288
1289         LASSERT(!olck->ols_hold);
1290
1291         /*
1292          * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
1293          * flag is not set. This protects us from a concurrent blocking ast.
1294          */
1295         rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1296         if (rc == 0) {
1297                 olck->ols_hold = 1;
1298                 olck->ols_state = OLS_GRANTED;
1299         } else {
1300                 struct cl_lock *lock;
1301
1302                 /*
1303                  * Lock is being cancelled somewhere within
1304                  * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
1305                  * set, but osc_ldlm_blocking_ast() hasn't yet acquired
1306                  * cl_lock mutex.
1307                  */
1308                 lock = slice->cls_lock;
1309                 LASSERT(lock->cll_state == CLS_INTRANSIT);
1310                 LASSERT(lock->cll_users > 0);
1311                 /* set a flag for osc_dlm_blocking_ast0() to signal the
1312                  * lock.*/
1313                 olck->ols_ast_wait = 1;
1314                 rc = CLO_WAIT;
1315         }
1316         return rc;
1317 }
1318
1319 static int osc_lock_flush(struct osc_lock *ols, int discard)
1320 {
1321         struct cl_lock       *lock  = ols->ols_cl.cls_lock;
1322         struct cl_env_nest    nest;
1323         struct lu_env        *env;
1324         int result = 0;
1325         ENTRY;
1326
1327         env = cl_env_nested_get(&nest);
1328         if (!IS_ERR(env)) {
1329                 struct osc_object    *obj   = cl2osc(ols->ols_cl.cls_obj);
1330                 struct cl_lock_descr *descr = &lock->cll_descr;
1331                 int rc = 0;
1332
1333                 if (descr->cld_mode >= CLM_WRITE) {
1334                         result = osc_cache_writeback_range(env, obj,
1335                                         descr->cld_start, descr->cld_end,
1336                                         1, discard);
1337                         LDLM_DEBUG(ols->ols_lock,
1338                                 "lock %p: %d pages were %s.\n", lock, result,
1339                                 discard ? "discarded" : "written");
1340                         if (result > 0)
1341                                 result = 0;
1342                 }
1343
1344                 rc = cl_lock_discard_pages(env, lock);
1345                 if (result == 0 && rc < 0)
1346                         result = rc;
1347
1348                 cl_env_nested_put(&nest, env);
1349         } else
1350                 result = PTR_ERR(env);
1351         if (result == 0) {
1352                 ols->ols_flush = 1;
1353                 LINVRNT(!osc_lock_has_pages(ols));
1354         }
1355         RETURN(result);
1356 }
1357
1358 /**
1359  * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1360  * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1361  * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1362  * with some other lock some where in the cluster. This function does the
1363  * following:
1364  *
1365  *     - invalidates all pages protected by this lock (after sending dirty
1366  *       ones to the server, as necessary);
1367  *
1368  *     - decref's underlying ldlm lock;
1369  *
1370  *     - cancels ldlm lock (ldlm_cli_cancel()).
1371  */
1372 static void osc_lock_cancel(const struct lu_env *env,
1373                             const struct cl_lock_slice *slice)
1374 {
1375         struct cl_lock   *lock    = slice->cls_lock;
1376         struct osc_lock  *olck    = cl2osc_lock(slice);
1377         struct ldlm_lock *dlmlock = olck->ols_lock;
1378         int               result  = 0;
1379         int               discard;
1380
1381         LASSERT(cl_lock_is_mutexed(lock));
1382         LINVRNT(osc_lock_invariant(olck));
1383
1384         if (dlmlock != NULL) {
1385                 int do_cancel;
1386
1387                 discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA);
1388                 if (olck->ols_state >= OLS_GRANTED)
1389                         result = osc_lock_flush(olck, discard);
1390                 osc_lock_unhold(olck);
1391
1392                 lock_res_and_lock(dlmlock);
1393                 /* Now that we're the only user of dlm read/write reference,
1394                  * mostly the ->l_readers + ->l_writers should be zero.
1395                  * However, there is a corner case.
1396                  * See bug 18829 for details.*/
1397                 do_cancel = (dlmlock->l_readers == 0 &&
1398                              dlmlock->l_writers == 0);
1399                 dlmlock->l_flags |= LDLM_FL_CBPENDING;
1400                 unlock_res_and_lock(dlmlock);
1401                 if (do_cancel)
1402                         result = ldlm_cli_cancel(&olck->ols_handle);
1403                 if (result < 0)
1404                         CL_LOCK_DEBUG(D_ERROR, env, lock,
1405                                       "lock %p cancel failure with error(%d)\n",
1406                                       lock, result);
1407         }
1408         olck->ols_state = OLS_CANCELLED;
1409         olck->ols_flags &= ~LDLM_FL_LVB_READY;
1410         osc_lock_detach(env, olck);
1411 }
1412
1413 #ifdef INVARIANT_CHECK
1414 static int check_cb(const struct lu_env *env, struct cl_io *io,
1415                     struct cl_page *page, void *cbdata)
1416 {
1417         struct cl_lock *lock = cbdata;
1418
1419         if (lock->cll_descr.cld_mode == CLM_READ) {
1420                 struct cl_lock *tmp;
1421                 tmp = cl_lock_at_page(env, lock->cll_descr.cld_obj,
1422                                      page, lock, 1, 0);
1423                 if (tmp != NULL) {
1424                         cl_lock_put(env, tmp);
1425                         return CLP_GANG_OKAY;
1426                 }
1427         }
1428
1429         CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
1430         CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
1431         return CLP_GANG_ABORT;
1432 }
1433
1434 /**
1435  * Returns true iff there are pages under \a olck not protected by other
1436  * locks.
1437  */
1438 static int osc_lock_has_pages(struct osc_lock *olck)
1439 {
1440         struct cl_lock       *lock;
1441         struct cl_lock_descr *descr;
1442         struct cl_object     *obj;
1443         struct osc_object    *oob;
1444         struct cl_env_nest    nest;
1445         struct cl_io         *io;
1446         struct lu_env        *env;
1447         int                   result;
1448
1449         env = cl_env_nested_get(&nest);
1450         if (IS_ERR(env))
1451                 return 0;
1452
1453         obj   = olck->ols_cl.cls_obj;
1454         oob   = cl2osc(obj);
1455         io    = &oob->oo_debug_io;
1456         lock  = olck->ols_cl.cls_lock;
1457         descr = &lock->cll_descr;
1458
1459         mutex_lock(&oob->oo_debug_mutex);
1460
1461         io->ci_obj = cl_object_top(obj);
1462         io->ci_ignore_layout = 1;
1463         cl_io_init(env, io, CIT_MISC, io->ci_obj);
1464         do {
1465                 result = cl_page_gang_lookup(env, obj, io,
1466                                              descr->cld_start, descr->cld_end,
1467                                              check_cb, (void *)lock);
1468                 if (result == CLP_GANG_ABORT)
1469                         break;
1470                 if (result == CLP_GANG_RESCHED)
1471                         cfs_cond_resched();
1472         } while (result != CLP_GANG_OKAY);
1473         cl_io_fini(env, io);
1474         mutex_unlock(&oob->oo_debug_mutex);
1475         cl_env_nested_put(&nest, env);
1476
1477         return (result == CLP_GANG_ABORT);
1478 }
1479 #else
1480 static int osc_lock_has_pages(struct osc_lock *olck)
1481 {
1482         return 0;
1483 }
1484 #endif /* INVARIANT_CHECK */
1485
1486 static void osc_lock_delete(const struct lu_env *env,
1487                             const struct cl_lock_slice *slice)
1488 {
1489         struct osc_lock *olck;
1490
1491         olck = cl2osc_lock(slice);
1492         if (olck->ols_glimpse) {
1493                 LASSERT(!olck->ols_hold);
1494                 LASSERT(!olck->ols_lock);
1495                 return;
1496         }
1497
1498         LINVRNT(osc_lock_invariant(olck));
1499         LINVRNT(!osc_lock_has_pages(olck));
1500
1501         osc_lock_unhold(olck);
1502         osc_lock_detach(env, olck);
1503 }
1504
1505 /**
1506  * Implements cl_lock_operations::clo_state() method for osc layer.
1507  *
1508  * Maintains osc_lock::ols_owner field.
1509  *
1510  * This assumes that lock always enters CLS_HELD (from some other state) in
1511  * the same IO context as one that requested the lock. This should not be a
1512  * problem, because context is by definition shared by all activity pertaining
1513  * to the same high-level IO.
1514  */
1515 static void osc_lock_state(const struct lu_env *env,
1516                            const struct cl_lock_slice *slice,
1517                            enum cl_lock_state state)
1518 {
1519         struct osc_lock *lock = cl2osc_lock(slice);
1520
1521         /*
1522          * XXX multiple io contexts can use the lock at the same time.
1523          */
1524         LINVRNT(osc_lock_invariant(lock));
1525         if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1526                 struct osc_io *oio = osc_env_io(env);
1527
1528                 LASSERT(lock->ols_owner == NULL);
1529                 lock->ols_owner = oio;
1530         } else if (state != CLS_HELD)
1531                 lock->ols_owner = NULL;
1532 }
1533
1534 static int osc_lock_print(const struct lu_env *env, void *cookie,
1535                           lu_printer_t p, const struct cl_lock_slice *slice)
1536 {
1537         struct osc_lock *lock = cl2osc_lock(slice);
1538
1539         /*
1540          * XXX print ldlm lock and einfo properly.
1541          */
1542         (*p)(env, cookie, "%p %#16llx "LPX64" %d %p ",
1543              lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
1544              lock->ols_state, lock->ols_owner);
1545         osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1546         return 0;
1547 }
1548
1549 static int osc_lock_fits_into(const struct lu_env *env,
1550                               const struct cl_lock_slice *slice,
1551                               const struct cl_lock_descr *need,
1552                               const struct cl_io *io)
1553 {
1554         struct osc_lock *ols = cl2osc_lock(slice);
1555
1556         if (need->cld_enq_flags & CEF_NEVER)
1557                 return 0;
1558
1559         if (ols->ols_state >= OLS_CANCELLED)
1560                 return 0;
1561
1562         if (need->cld_mode == CLM_PHANTOM) {
1563                 if (ols->ols_agl)
1564                         return !(ols->ols_state > OLS_RELEASED);
1565
1566                 /*
1567                  * Note: the QUEUED lock can't be matched here, otherwise
1568                  * it might cause the deadlocks.
1569                  * In read_process,
1570                  * P1: enqueued read lock, create sublock1
1571                  * P2: enqueued write lock, create sublock2(conflicted
1572                  *     with sublock1).
1573                  * P1: Grant read lock.
1574                  * P1: enqueued glimpse lock(with holding sublock1_read),
1575                  *     matched with sublock2, waiting sublock2 to be granted.
1576                  *     But sublock2 can not be granted, because P1
1577                  *     will not release sublock1. Bang!
1578                  */
1579                 if (ols->ols_state < OLS_GRANTED ||
1580                     ols->ols_state > OLS_RELEASED)
1581                         return 0;
1582         } else if (need->cld_enq_flags & CEF_MUST) {
1583                 /*
1584                  * If the lock hasn't ever enqueued, it can't be matched
1585                  * because enqueue process brings in many information
1586                  * which can be used to determine things such as lockless,
1587                  * CEF_MUST, etc.
1588                  */
1589                 if (ols->ols_state < OLS_UPCALL_RECEIVED &&
1590                     ols->ols_locklessable)
1591                         return 0;
1592         }
1593         return 1;
1594 }
1595
1596 static const struct cl_lock_operations osc_lock_ops = {
1597         .clo_fini    = osc_lock_fini,
1598         .clo_enqueue = osc_lock_enqueue,
1599         .clo_wait    = osc_lock_wait,
1600         .clo_unuse   = osc_lock_unuse,
1601         .clo_use     = osc_lock_use,
1602         .clo_delete  = osc_lock_delete,
1603         .clo_state   = osc_lock_state,
1604         .clo_cancel  = osc_lock_cancel,
1605         .clo_weigh   = osc_lock_weigh,
1606         .clo_print   = osc_lock_print,
1607         .clo_fits_into = osc_lock_fits_into,
1608 };
1609
1610 static int osc_lock_lockless_unuse(const struct lu_env *env,
1611                                    const struct cl_lock_slice *slice)
1612 {
1613         struct osc_lock *ols = cl2osc_lock(slice);
1614         struct cl_lock *lock = slice->cls_lock;
1615
1616         LASSERT(ols->ols_state == OLS_GRANTED);
1617         LINVRNT(osc_lock_invariant(ols));
1618
1619         cl_lock_cancel(env, lock);
1620         cl_lock_delete(env, lock);
1621         return 0;
1622 }
1623
1624 static void osc_lock_lockless_cancel(const struct lu_env *env,
1625                                      const struct cl_lock_slice *slice)
1626 {
1627         struct osc_lock   *ols  = cl2osc_lock(slice);
1628         int result;
1629
1630         result = osc_lock_flush(ols, 0);
1631         if (result)
1632                 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1633                        ols, result);
1634         ols->ols_state = OLS_CANCELLED;
1635 }
1636
1637 static int osc_lock_lockless_wait(const struct lu_env *env,
1638                                   const struct cl_lock_slice *slice)
1639 {
1640         struct osc_lock *olck = cl2osc_lock(slice);
1641         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1642
1643         LINVRNT(osc_lock_invariant(olck));
1644         LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1645
1646         return lock->cll_error;
1647 }
1648
1649 static void osc_lock_lockless_state(const struct lu_env *env,
1650                                     const struct cl_lock_slice *slice,
1651                                     enum cl_lock_state state)
1652 {
1653         struct osc_lock *lock = cl2osc_lock(slice);
1654
1655         LINVRNT(osc_lock_invariant(lock));
1656         if (state == CLS_HELD) {
1657                 struct osc_io *oio  = osc_env_io(env);
1658
1659                 LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio));
1660                 lock->ols_owner = oio;
1661
1662                 /* set the io to be lockless if this lock is for io's
1663                  * host object */
1664                 if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
1665                         oio->oi_lockless = 1;
1666         }
1667 }
1668
1669 static int osc_lock_lockless_fits_into(const struct lu_env *env,
1670                                        const struct cl_lock_slice *slice,
1671                                        const struct cl_lock_descr *need,
1672                                        const struct cl_io *io)
1673 {
1674         struct osc_lock *lock = cl2osc_lock(slice);
1675
1676         if (!(need->cld_enq_flags & CEF_NEVER))
1677                 return 0;
1678
1679         /* lockless lock should only be used by its owning io. b22147 */
1680         return (lock->ols_owner == osc_env_io(env));
1681 }
1682
1683 static const struct cl_lock_operations osc_lock_lockless_ops = {
1684         .clo_fini      = osc_lock_fini,
1685         .clo_enqueue   = osc_lock_enqueue,
1686         .clo_wait      = osc_lock_lockless_wait,
1687         .clo_unuse     = osc_lock_lockless_unuse,
1688         .clo_state     = osc_lock_lockless_state,
1689         .clo_fits_into = osc_lock_lockless_fits_into,
1690         .clo_cancel    = osc_lock_lockless_cancel,
1691         .clo_print     = osc_lock_print
1692 };
1693
1694 int osc_lock_init(const struct lu_env *env,
1695                   struct cl_object *obj, struct cl_lock *lock,
1696                   const struct cl_io *unused)
1697 {
1698         struct osc_lock *clk;
1699         int result;
1700
1701         OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO);
1702         if (clk != NULL) {
1703                 __u32 enqflags = lock->cll_descr.cld_enq_flags;
1704
1705                 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
1706                 cfs_atomic_set(&clk->ols_pageref, 0);
1707                 clk->ols_state = OLS_NEW;
1708
1709                 clk->ols_flags = osc_enq2ldlm_flags(enqflags);
1710                 clk->ols_agl = !!(enqflags & CEF_AGL);
1711                 if (clk->ols_agl)
1712                         clk->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
1713                 if (clk->ols_flags & LDLM_FL_HAS_INTENT)
1714                         clk->ols_glimpse = 1;
1715
1716                 cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
1717
1718                 if (!(enqflags & CEF_MUST))
1719                         /* try to convert this lock to a lockless lock */
1720                         osc_lock_to_lockless(env, clk, (enqflags & CEF_NEVER));
1721                 if (clk->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
1722                         clk->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1723
1724                 LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n",
1725                                 lock, clk, clk->ols_flags);
1726
1727                 result = 0;
1728         } else
1729                 result = -ENOMEM;
1730         return result;
1731 }
1732
1733 int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
1734 {
1735         struct osc_lock *olock;
1736         int              rc = 0;
1737
1738         spin_lock(&osc_ast_guard);
1739         olock = dlm->l_ast_data;
1740         /*
1741          * there's a very rare race with osc_page_addref_lock(), but that
1742          * doesn't matter because in the worst case we don't cancel a lock
1743          * which we actually can, that's no harm.
1744          */
1745         if (olock != NULL &&
1746             cfs_atomic_add_return(_PAGEREF_MAGIC,
1747                                   &olock->ols_pageref) != _PAGEREF_MAGIC) {
1748                 cfs_atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
1749                 rc = 1;
1750         }
1751         spin_unlock(&osc_ast_guard);
1752         return rc;
1753 }
1754
1755 /** @} osc */