Whamcloud - gitweb
LU-1683 agl: increase lock cll_holds for AGL upcall
[fs/lustre-release.git] / lustre / osc / osc_lock.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for OSC layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_OSC
42
43 #ifdef __KERNEL__
44 # include <libcfs/libcfs.h>
45 #else
46 # include <liblustre.h>
47 #endif
48 /* fid_build_reg_res_name() */
49 #include <lustre_fid.h>
50
51 #include "osc_cl_internal.h"
52
53 /** \addtogroup osc 
54  *  @{ 
55  */
56
57 #define _PAGEREF_MAGIC  (-10000000)
58
59 /*****************************************************************************
60  *
61  * Type conversions.
62  *
63  */
64
65 static const struct cl_lock_operations osc_lock_ops;
66 static const struct cl_lock_operations osc_lock_lockless_ops;
67 static void osc_lock_to_lockless(const struct lu_env *env,
68                                  struct osc_lock *ols, int force);
69 static int osc_lock_has_pages(struct osc_lock *olck);
70
71 int osc_lock_is_lockless(const struct osc_lock *olck)
72 {
73         return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
74 }
75
76 /**
77  * Returns a weak pointer to the ldlm lock identified by a handle. Returned
78  * pointer cannot be dereferenced, as lock is not protected from concurrent
79  * reclaim. This function is a helper for osc_lock_invariant().
80  */
81 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
82 {
83         struct ldlm_lock *lock;
84
85         lock = ldlm_handle2lock(handle);
86         if (lock != NULL)
87                 LDLM_LOCK_PUT(lock);
88         return lock;
89 }
90
91 /**
92  * Invariant that has to be true all of the time.
93  */
94 static int osc_lock_invariant(struct osc_lock *ols)
95 {
96         struct ldlm_lock *lock        = osc_handle_ptr(&ols->ols_handle);
97         struct ldlm_lock *olock       = ols->ols_lock;
98         int               handle_used = lustre_handle_is_used(&ols->ols_handle);
99
100         return
101                 ergo(osc_lock_is_lockless(ols),
102                      ols->ols_locklessable && ols->ols_lock == NULL)  ||
103                 (ergo(olock != NULL, handle_used) &&
104                  ergo(olock != NULL,
105                       olock->l_handle.h_cookie == ols->ols_handle.cookie) &&
106                  /*
107                   * Check that ->ols_handle and ->ols_lock are consistent, but
108                   * take into account that they are set at the different time.
109                   */
110                  ergo(handle_used,
111                       ergo(lock != NULL && olock != NULL, lock == olock) &&
112                       ergo(lock == NULL, olock == NULL)) &&
113                  ergo(ols->ols_state == OLS_CANCELLED,
114                       olock == NULL && !handle_used) &&
115                  /*
116                   * DLM lock is destroyed only after we have seen cancellation
117                   * ast.
118                   */
119                  ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
120                       !olock->l_destroyed) &&
121                  ergo(ols->ols_state == OLS_GRANTED,
122                       olock != NULL &&
123                       olock->l_req_mode == olock->l_granted_mode &&
124                       ols->ols_hold));
125 }
126
127 /*****************************************************************************
128  *
129  * Lock operations.
130  *
131  */
132
133 /**
134  * Breaks a link between osc_lock and dlm_lock.
135  */
136 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
137 {
138         struct ldlm_lock *dlmlock;
139
140         cfs_spin_lock(&osc_ast_guard);
141         dlmlock = olck->ols_lock;
142         if (dlmlock == NULL) {
143                 cfs_spin_unlock(&osc_ast_guard);
144                 return;
145         }
146
147         olck->ols_lock = NULL;
148         /* wb(); --- for all who checks (ols->ols_lock != NULL) before
149          * call to osc_lock_detach() */
150         dlmlock->l_ast_data = NULL;
151         olck->ols_handle.cookie = 0ULL;
152         cfs_spin_unlock(&osc_ast_guard);
153
154         lock_res_and_lock(dlmlock);
155         if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
156                 struct cl_object *obj = olck->ols_cl.cls_obj;
157                 struct cl_attr *attr  = &osc_env_info(env)->oti_attr;
158                 __u64 old_kms;
159
160                 cl_object_attr_lock(obj);
161                 /* Must get the value under the lock to avoid possible races. */
162                 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
163                 /* Update the kms. Need to loop all granted locks.
164                  * Not a problem for the client */
165                 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
166
167                 cl_object_attr_set(env, obj, attr, CAT_KMS);
168                 cl_object_attr_unlock(obj);
169         }
170         unlock_res_and_lock(dlmlock);
171
172         /* release a reference taken in osc_lock_upcall0(). */
173         LASSERT(olck->ols_has_ref);
174         lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
175         LDLM_LOCK_RELEASE(dlmlock);
176         olck->ols_has_ref = 0;
177 }
178
179 static int osc_lock_unhold(struct osc_lock *ols)
180 {
181         int result = 0;
182
183         if (ols->ols_hold) {
184                 ols->ols_hold = 0;
185                 result = osc_cancel_base(&ols->ols_handle,
186                                          ols->ols_einfo.ei_mode);
187         }
188         return result;
189 }
190
191 static int osc_lock_unuse(const struct lu_env *env,
192                           const struct cl_lock_slice *slice)
193 {
194         struct osc_lock *ols = cl2osc_lock(slice);
195
196         LINVRNT(osc_lock_invariant(ols));
197
198         switch (ols->ols_state) {
199         case OLS_NEW:
200                 LASSERT(!ols->ols_hold);
201                 LASSERT(ols->ols_agl);
202                 return 0;
203         case OLS_ENQUEUED:
204         case OLS_UPCALL_RECEIVED:
205                 LASSERT(!ols->ols_hold);
206                 ols->ols_state = OLS_NEW;
207                 return 0;
208         case OLS_GRANTED:
209                 LASSERT(!ols->ols_glimpse);
210                 LASSERT(ols->ols_hold);
211                 /*
212                  * Move lock into OLS_RELEASED state before calling
213                  * osc_cancel_base() so that possible synchronous cancellation
214                  * (that always happens e.g., for liblustre) sees that lock is
215                  * released.
216                  */
217                 ols->ols_state = OLS_RELEASED;
218                 return osc_lock_unhold(ols);
219         default:
220                 CERROR("Impossible state: %d\n", ols->ols_state);
221                 LBUG();
222         }
223 }
224
225 static void osc_lock_fini(const struct lu_env *env,
226                           struct cl_lock_slice *slice)
227 {
228         struct osc_lock  *ols = cl2osc_lock(slice);
229
230         LINVRNT(osc_lock_invariant(ols));
231         /*
232          * ->ols_hold can still be true at this point if, for example, a
233          * thread that requested a lock was killed (and released a reference
234          * to the lock), before reply from a server was received. In this case
235          * lock is destroyed immediately after upcall.
236          */
237         osc_lock_unhold(ols);
238         LASSERT(ols->ols_lock == NULL);
239         LASSERT(cfs_atomic_read(&ols->ols_pageref) == 0 ||
240                 cfs_atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
241
242         OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
243 }
244
245 void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj,
246                         struct ldlm_res_id *resname)
247 {
248         const struct lu_fid *fid = lu_object_fid(&obj->oo_cl.co_lu);
249         if (0) {
250                 /*
251                  * In the perfect world of the future, where ost servers talk
252                  * idif-fids...
253                  */
254                 fid_build_reg_res_name(fid, resname);
255         } else {
256                 /*
257                  * In reality, where ost server expects ->lsm_object_id and
258                  * ->lsm_object_seq in rename.
259                  */
260                 osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_seq,
261                                    resname);
262         }
263 }
264
265 static void osc_lock_build_policy(const struct lu_env *env,
266                                   const struct cl_lock *lock,
267                                   ldlm_policy_data_t *policy)
268 {
269         const struct cl_lock_descr *d = &lock->cll_descr;
270
271         osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
272         policy->l_extent.gid = d->cld_gid;
273 }
274
275 static int osc_enq2ldlm_flags(__u32 enqflags)
276 {
277         int result = 0;
278
279         LASSERT((enqflags & ~CEF_MASK) == 0);
280
281         if (enqflags & CEF_NONBLOCK)
282                 result |= LDLM_FL_BLOCK_NOWAIT;
283         if (enqflags & CEF_ASYNC)
284                 result |= LDLM_FL_HAS_INTENT;
285         if (enqflags & CEF_DISCARD_DATA)
286                 result |= LDLM_AST_DISCARD_DATA;
287         return result;
288 }
289
290 /**
291  * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
292  * pointers. Initialized in osc_init().
293  */
294 cfs_spinlock_t osc_ast_guard;
295
296 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
297 {
298         struct osc_lock *olck;
299
300         lock_res_and_lock(dlm_lock);
301         cfs_spin_lock(&osc_ast_guard);
302         olck = dlm_lock->l_ast_data;
303         if (olck != NULL) {
304                 struct cl_lock *lock = olck->ols_cl.cls_lock;
305                 /*
306                  * If osc_lock holds a reference on ldlm lock, return it even
307                  * when cl_lock is in CLS_FREEING state. This way
308                  *
309                  *         osc_ast_data_get(dlmlock) == NULL
310                  *
311                  * guarantees that all osc references on dlmlock were
312                  * released. osc_dlm_blocking_ast0() relies on that.
313                  */
314                 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
315                         cl_lock_get_trust(lock);
316                         lu_ref_add_atomic(&lock->cll_reference,
317                                           "ast", cfs_current());
318                 } else
319                         olck = NULL;
320         }
321         cfs_spin_unlock(&osc_ast_guard);
322         unlock_res_and_lock(dlm_lock);
323         return olck;
324 }
325
326 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
327 {
328         struct cl_lock *lock;
329
330         lock = olck->ols_cl.cls_lock;
331         lu_ref_del(&lock->cll_reference, "ast", cfs_current());
332         cl_lock_put(env, lock);
333 }
334
335 /**
336  * Updates object attributes from a lock value block (lvb) received together
337  * with the DLM lock reply from the server. Copy of osc_update_enqueue()
338  * logic.
339  *
340  * This can be optimized to not update attributes when lock is a result of a
341  * local match.
342  *
343  * Called under lock and resource spin-locks.
344  */
345 static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
346                                 int rc)
347 {
348         struct ost_lvb    *lvb;
349         struct cl_object  *obj;
350         struct lov_oinfo  *oinfo;
351         struct cl_attr    *attr;
352         unsigned           valid;
353
354         ENTRY;
355
356         if (!(olck->ols_flags & LDLM_FL_LVB_READY))
357                 RETURN_EXIT;
358
359         lvb   = &olck->ols_lvb;
360         obj   = olck->ols_cl.cls_obj;
361         oinfo = cl2osc(obj)->oo_oinfo;
362         attr  = &osc_env_info(env)->oti_attr;
363         valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
364         cl_lvb2attr(attr, lvb);
365
366         cl_object_attr_lock(obj);
367         if (rc == 0) {
368                 struct ldlm_lock  *dlmlock;
369                 __u64 size;
370
371                 dlmlock = olck->ols_lock;
372                 LASSERT(dlmlock != NULL);
373
374                 /* re-grab LVB from a dlm lock under DLM spin-locks. */
375                 *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
376                 size = lvb->lvb_size;
377                 /* Extend KMS up to the end of this lock and no further
378                  * A lock on [x,y] means a KMS of up to y + 1 bytes! */
379                 if (size > dlmlock->l_policy_data.l_extent.end)
380                         size = dlmlock->l_policy_data.l_extent.end + 1;
381                 if (size >= oinfo->loi_kms) {
382                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
383                                    ", kms="LPU64, lvb->lvb_size, size);
384                         valid |= CAT_KMS;
385                         attr->cat_kms = size;
386                 } else {
387                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
388                                    LPU64"; leaving kms="LPU64", end="LPU64,
389                                    lvb->lvb_size, oinfo->loi_kms,
390                                    dlmlock->l_policy_data.l_extent.end);
391                 }
392                 ldlm_lock_allow_match_locked(dlmlock);
393         } else if (rc == -ENAVAIL && olck->ols_glimpse) {
394                 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
395                        " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
396         } else
397                 valid = 0;
398
399         if (valid != 0)
400                 cl_object_attr_set(env, obj, attr, valid);
401
402         cl_object_attr_unlock(obj);
403
404         EXIT;
405 }
406
407 /**
408  * Called when a lock is granted, from an upcall (when server returned a
409  * granted lock), or from completion AST, when server returned a blocked lock.
410  *
411  * Called under lock and resource spin-locks, that are released temporarily
412  * here.
413  */
414 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
415                              struct ldlm_lock *dlmlock, int rc)
416 {
417         struct ldlm_extent   *ext;
418         struct cl_lock       *lock;
419         struct cl_lock_descr *descr;
420
421         LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
422
423         ENTRY;
424         if (olck->ols_state < OLS_GRANTED) {
425                 lock  = olck->ols_cl.cls_lock;
426                 ext   = &dlmlock->l_policy_data.l_extent;
427                 descr = &osc_env_info(env)->oti_descr;
428                 descr->cld_obj = lock->cll_descr.cld_obj;
429
430                 /* XXX check that ->l_granted_mode is valid. */
431                 descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
432                 descr->cld_start = cl_index(descr->cld_obj, ext->start);
433                 descr->cld_end   = cl_index(descr->cld_obj, ext->end);
434                 descr->cld_gid   = ext->gid;
435                 /*
436                  * tell upper layers the extent of the lock that was actually
437                  * granted
438                  */
439                 olck->ols_state = OLS_GRANTED;
440                 osc_lock_lvb_update(env, olck, rc);
441
442                 /* release DLM spin-locks to allow cl_lock_{modify,signal}()
443                  * to take a semaphore on a parent lock. This is safe, because
444                  * spin-locks are needed to protect consistency of
445                  * dlmlock->l_*_mode and LVB, and we have finished processing
446                  * them. */
447                 unlock_res_and_lock(dlmlock);
448                 cl_lock_modify(env, lock, descr);
449                 cl_lock_signal(env, lock);
450                 LINVRNT(osc_lock_invariant(olck));
451                 lock_res_and_lock(dlmlock);
452         }
453         EXIT;
454 }
455
456 static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
457
458 {
459         struct ldlm_lock *dlmlock;
460
461         ENTRY;
462
463         dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
464         LASSERT(dlmlock != NULL);
465
466         lock_res_and_lock(dlmlock);
467         cfs_spin_lock(&osc_ast_guard);
468         LASSERT(dlmlock->l_ast_data == olck);
469         LASSERT(olck->ols_lock == NULL);
470         olck->ols_lock = dlmlock;
471         cfs_spin_unlock(&osc_ast_guard);
472
473         /*
474          * Lock might be not yet granted. In this case, completion ast
475          * (osc_ldlm_completion_ast()) comes later and finishes lock
476          * granting.
477          */
478         if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
479                 osc_lock_granted(env, olck, dlmlock, 0);
480         unlock_res_and_lock(dlmlock);
481
482         /*
483          * osc_enqueue_interpret() decrefs asynchronous locks, counter
484          * this.
485          */
486         ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
487         olck->ols_hold = 1;
488
489         /* lock reference taken by ldlm_handle2lock_long() is owned by
490          * osc_lock and released in osc_lock_detach() */
491         lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
492         olck->ols_has_ref = 1;
493 }
494
495 /**
496  * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
497  * received from a server, or after osc_enqueue_base() matched a local DLM
498  * lock.
499  */
500 static int osc_lock_upcall(void *cookie, int errcode)
501 {
502         struct osc_lock         *olck  = cookie;
503         struct cl_lock_slice    *slice = &olck->ols_cl;
504         struct cl_lock          *lock  = slice->cls_lock;
505         struct lu_env           *env;
506         struct cl_env_nest       nest;
507
508         ENTRY;
509         env = cl_env_nested_get(&nest);
510         if (!IS_ERR(env)) {
511                 int rc;
512
513                 cl_lock_mutex_get(env, lock);
514
515                 LASSERT(lock->cll_state >= CLS_QUEUING);
516                 if (olck->ols_state == OLS_ENQUEUED) {
517                         olck->ols_state = OLS_UPCALL_RECEIVED;
518                         rc = ldlm_error2errno(errcode);
519                 } else if (olck->ols_state == OLS_CANCELLED) {
520                         rc = -EIO;
521                 } else {
522                         CERROR("Impossible state: %d\n", olck->ols_state);
523                         LBUG();
524                 }
525                 if (rc) {
526                         struct ldlm_lock *dlmlock;
527
528                         dlmlock = ldlm_handle2lock(&olck->ols_handle);
529                         if (dlmlock != NULL) {
530                                 lock_res_and_lock(dlmlock);
531                                 cfs_spin_lock(&osc_ast_guard);
532                                 LASSERT(olck->ols_lock == NULL);
533                                 dlmlock->l_ast_data = NULL;
534                                 olck->ols_handle.cookie = 0ULL;
535                                 cfs_spin_unlock(&osc_ast_guard);
536                                 ldlm_lock_fail_match_locked(dlmlock);
537                                 unlock_res_and_lock(dlmlock);
538                                 LDLM_LOCK_PUT(dlmlock);
539                         }
540                 } else {
541                         if (olck->ols_glimpse)
542                                 olck->ols_glimpse = 0;
543                         osc_lock_upcall0(env, olck);
544                 }
545
546                 /* Error handling, some errors are tolerable. */
547                 if (olck->ols_locklessable && rc == -EUSERS) {
548                         /* This is a tolerable error, turn this lock into
549                          * lockless lock.
550                          */
551                         osc_object_set_contended(cl2osc(slice->cls_obj));
552                         LASSERT(slice->cls_ops == &osc_lock_ops);
553
554                         /* Change this lock to ldlmlock-less lock. */
555                         osc_lock_to_lockless(env, olck, 1);
556                         olck->ols_state = OLS_GRANTED;
557                         rc = 0;
558                 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
559                         osc_lock_lvb_update(env, olck, rc);
560                         cl_lock_delete(env, lock);
561                         /* Hide the error. */
562                         rc = 0;
563                 }
564
565                 if (rc == 0) {
566                         /* For AGL case, the RPC sponsor may exits the cl_lock
567                         *  processing without wait() called before related OSC
568                         *  lock upcall(). So update the lock status according
569                         *  to the enqueue result inside AGL upcall(). */
570                         if (olck->ols_agl) {
571                                 lock->cll_flags |= CLF_FROM_UPCALL;
572                                 cl_wait_try(env, lock);
573                                 lock->cll_flags &= ~CLF_FROM_UPCALL;
574                                 if (!olck->ols_glimpse)
575                                         olck->ols_agl = 0;
576                         }
577                         cl_lock_signal(env, lock);
578                         /* del user for lock upcall cookie */
579                         cl_unuse_try(env, lock);
580                 } else {
581                         /* del user for lock upcall cookie */
582                         cl_lock_user_del(env, lock);
583                         cl_lock_error(env, lock, rc);
584                 }
585
586                 /* release cookie reference, acquired by osc_lock_enqueue() */
587                 cl_lock_hold_release(env, lock, "upcall", lock);
588                 cl_lock_mutex_put(env, lock);
589
590                 lu_ref_del(&lock->cll_reference, "upcall", lock);
591                 /* This maybe the last reference, so must be called after
592                  * cl_lock_mutex_put(). */
593                 cl_lock_put(env, lock);
594
595                 cl_env_nested_put(&nest, env);
596         } else {
597                 /* should never happen, similar to osc_ldlm_blocking_ast(). */
598                 LBUG();
599         }
600         RETURN(errcode);
601 }
602
603 /**
604  * Core of osc_dlm_blocking_ast() logic.
605  */
606 static void osc_lock_blocking(const struct lu_env *env,
607                               struct ldlm_lock *dlmlock,
608                               struct osc_lock *olck, int blocking)
609 {
610         struct cl_lock *lock = olck->ols_cl.cls_lock;
611
612         LASSERT(olck->ols_lock == dlmlock);
613         CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
614         LASSERT(!osc_lock_is_lockless(olck));
615
616         /*
617          * Lock might be still addref-ed here, if e.g., blocking ast
618          * is sent for a failed lock.
619          */
620         osc_lock_unhold(olck);
621
622         if (blocking && olck->ols_state < OLS_BLOCKED)
623                 /*
624                  * Move osc_lock into OLS_BLOCKED before canceling the lock,
625                  * because it recursively re-enters osc_lock_blocking(), with
626                  * the state set to OLS_CANCELLED.
627                  */
628                 olck->ols_state = OLS_BLOCKED;
629         /*
630          * cancel and destroy lock at least once no matter how blocking ast is
631          * entered (see comment above osc_ldlm_blocking_ast() for use
632          * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
633          */
634         cl_lock_cancel(env, lock);
635         cl_lock_delete(env, lock);
636 }
637
638 /**
639  * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
640  * and ldlm_lock caches.
641  */
642 static int osc_dlm_blocking_ast0(const struct lu_env *env,
643                                  struct ldlm_lock *dlmlock,
644                                  void *data, int flag)
645 {
646         struct osc_lock *olck;
647         struct cl_lock  *lock;
648         int result;
649         int cancel;
650
651         LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
652
653         cancel = 0;
654         olck = osc_ast_data_get(dlmlock);
655         if (olck != NULL) {
656                 lock = olck->ols_cl.cls_lock;
657                 cl_lock_mutex_get(env, lock);
658                 LINVRNT(osc_lock_invariant(olck));
659                 if (olck->ols_ast_wait) {
660                         /* wake up osc_lock_use() */
661                         cl_lock_signal(env, lock);
662                         olck->ols_ast_wait = 0;
663                 }
664                 /*
665                  * Lock might have been canceled while this thread was
666                  * sleeping for lock mutex, but olck is pinned in memory.
667                  */
668                 if (olck == dlmlock->l_ast_data) {
669                         /*
670                          * NOTE: DLM sends blocking AST's for failed locks
671                          *       (that are still in pre-OLS_GRANTED state)
672                          *       too, and they have to be canceled otherwise
673                          *       DLM lock is never destroyed and stuck in
674                          *       the memory.
675                          *
676                          *       Alternatively, ldlm_cli_cancel() can be
677                          *       called here directly for osc_locks with
678                          *       ols_state < OLS_GRANTED to maintain an
679                          *       invariant that ->clo_cancel() is only called
680                          *       for locks that were granted.
681                          */
682                         LASSERT(data == olck);
683                         osc_lock_blocking(env, dlmlock,
684                                           olck, flag == LDLM_CB_BLOCKING);
685                 } else
686                         cancel = 1;
687                 cl_lock_mutex_put(env, lock);
688                 osc_ast_data_put(env, olck);
689         } else
690                 /*
691                  * DLM lock exists, but there is no cl_lock attached to it.
692                  * This is a `normal' race. cl_object and its cl_lock's can be
693                  * removed by memory pressure, together with all pages.
694                  */
695                 cancel = (flag == LDLM_CB_BLOCKING);
696
697         if (cancel) {
698                 struct lustre_handle *lockh;
699
700                 lockh = &osc_env_info(env)->oti_handle;
701                 ldlm_lock2handle(dlmlock, lockh);
702                 result = ldlm_cli_cancel(lockh);
703         } else
704                 result = 0;
705         return result;
706 }
707
708 /**
709  * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
710  * some other lock, or is canceled. This function is installed as a
711  * ldlm_lock::l_blocking_ast() for client extent locks.
712  *
713  * Control flow is tricky, because ldlm uses the same call-back
714  * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
715  *
716  * \param dlmlock lock for which ast occurred.
717  *
718  * \param new description of a conflicting lock in case of blocking ast.
719  *
720  * \param data value of dlmlock->l_ast_data
721  *
722  * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
723  *             cancellation and blocking ast's.
724  *
725  * Possible use cases:
726  *
727  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
728  *       lock due to lock lru pressure, or explicit user request to purge
729  *       locks.
730  *
731  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
732  *       us that dlmlock conflicts with another lock that some client is
733  *       enqueing. Lock is canceled.
734  *
735  *           - cl_lock_cancel() is called. osc_lock_cancel() calls
736  *             ldlm_cli_cancel() that calls
737  *
738  *                  dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
739  *
740  *             recursively entering osc_ldlm_blocking_ast().
741  *
742  *     - client cancels lock voluntary (e.g., as a part of early cancellation):
743  *
744  *           cl_lock_cancel()->
745  *             osc_lock_cancel()->
746  *               ldlm_cli_cancel()->
747  *                 dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
748  *
749  */
750 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
751                                  struct ldlm_lock_desc *new, void *data,
752                                  int flag)
753 {
754         struct lu_env     *env;
755         struct cl_env_nest nest;
756         int                result;
757
758         /*
759          * This can be called in the context of outer IO, e.g.,
760          *
761          *     cl_enqueue()->...
762          *       ->osc_enqueue_base()->...
763          *         ->ldlm_prep_elc_req()->...
764          *           ->ldlm_cancel_callback()->...
765          *             ->osc_ldlm_blocking_ast()
766          *
767          * new environment has to be created to not corrupt outer context.
768          */
769         env = cl_env_nested_get(&nest);
770         if (!IS_ERR(env)) {
771                 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
772                 cl_env_nested_put(&nest, env);
773         } else {
774                 result = PTR_ERR(env);
775                 /*
776                  * XXX This should never happen, as cl_lock is
777                  * stuck. Pre-allocated environment a la vvp_inode_fini_env
778                  * should be used.
779                  */
780                 LBUG();
781         }
782         if (result != 0) {
783                 if (result == -ENODATA)
784                         result = 0;
785                 else
786                         CERROR("BAST failed: %d\n", result);
787         }
788         return result;
789 }
790
791 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
792                                    int flags, void *data)
793 {
794         struct cl_env_nest nest;
795         struct lu_env     *env;
796         struct osc_lock   *olck;
797         struct cl_lock    *lock;
798         int result;
799         int dlmrc;
800
801         /* first, do dlm part of the work */
802         dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
803         /* then, notify cl_lock */
804         env = cl_env_nested_get(&nest);
805         if (!IS_ERR(env)) {
806                 olck = osc_ast_data_get(dlmlock);
807                 if (olck != NULL) {
808                         lock = olck->ols_cl.cls_lock;
809                         cl_lock_mutex_get(env, lock);
810                         /*
811                          * ldlm_handle_cp_callback() copied LVB from request
812                          * to lock->l_lvb_data, store it in osc_lock.
813                          */
814                         LASSERT(dlmlock->l_lvb_data != NULL);
815                         lock_res_and_lock(dlmlock);
816                         olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
817                         if (olck->ols_lock == NULL) {
818                                 /*
819                                  * upcall (osc_lock_upcall()) hasn't yet been
820                                  * called. Do nothing now, upcall will bind
821                                  * olck to dlmlock and signal the waiters.
822                                  *
823                                  * This maintains an invariant that osc_lock
824                                  * and ldlm_lock are always bound when
825                                  * osc_lock is in OLS_GRANTED state.
826                                  */
827                         } else if (dlmlock->l_granted_mode ==
828                                    dlmlock->l_req_mode) {
829                                 osc_lock_granted(env, olck, dlmlock, dlmrc);
830                         }
831                         unlock_res_and_lock(dlmlock);
832
833                         if (dlmrc != 0) {
834                                 CL_LOCK_DEBUG(D_ERROR, env, lock,
835                                               "dlmlock returned %d\n", dlmrc);
836                                 cl_lock_error(env, lock, dlmrc);
837                         }
838                         cl_lock_mutex_put(env, lock);
839                         osc_ast_data_put(env, olck);
840                         result = 0;
841                 } else
842                         result = -ELDLM_NO_LOCK_DATA;
843                 cl_env_nested_put(&nest, env);
844         } else
845                 result = PTR_ERR(env);
846         return dlmrc ?: result;
847 }
848
849 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
850 {
851         struct ptlrpc_request  *req  = data;
852         struct osc_lock        *olck;
853         struct cl_lock         *lock;
854         struct cl_object       *obj;
855         struct cl_env_nest      nest;
856         struct lu_env          *env;
857         struct ost_lvb         *lvb;
858         struct req_capsule     *cap;
859         int                     result;
860
861         LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
862
863         env = cl_env_nested_get(&nest);
864         if (!IS_ERR(env)) {
865                 /* osc_ast_data_get() has to go after environment is
866                  * allocated, because osc_ast_data() acquires a
867                  * reference to a lock, and it can only be released in
868                  * environment.
869                  */
870                 olck = osc_ast_data_get(dlmlock);
871                 if (olck != NULL) {
872                         lock = olck->ols_cl.cls_lock;
873                         /* Do not grab the mutex of cl_lock for glimpse.
874                          * See LU-1274 for details.
875                          * BTW, it's okay for cl_lock to be cancelled during
876                          * this period because server can handle this race.
877                          * See ldlm_server_glimpse_ast() for details.
878                          * cl_lock_mutex_get(env, lock); */
879                         cap = &req->rq_pill;
880                         req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
881                         req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
882                                              sizeof *lvb);
883                         result = req_capsule_server_pack(cap);
884                         if (result == 0) {
885                                 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
886                                 obj = lock->cll_descr.cld_obj;
887                                 result = cl_object_glimpse(env, obj, lvb);
888                         }
889                         osc_ast_data_put(env, olck);
890                 } else {
891                         /*
892                          * These errors are normal races, so we don't want to
893                          * fill the console with messages by calling
894                          * ptlrpc_error()
895                          */
896                         lustre_pack_reply(req, 1, NULL, NULL);
897                         result = -ELDLM_NO_LOCK_DATA;
898                 }
899                 cl_env_nested_put(&nest, env);
900         } else
901                 result = PTR_ERR(env);
902         req->rq_status = result;
903         return result;
904 }
905
906 static unsigned long osc_lock_weigh(const struct lu_env *env,
907                                     const struct cl_lock_slice *slice)
908 {
909         /*
910          * don't need to grab coh_page_guard since we don't care the exact #
911          * of pages..
912          */
913         return cl_object_header(slice->cls_obj)->coh_pages;
914 }
915
916 /**
917  * Get the weight of dlm lock for early cancellation.
918  *
919  * XXX: it should return the pages covered by this \a dlmlock.
920  */
921 static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
922 {
923         struct cl_env_nest       nest;
924         struct lu_env           *env;
925         struct osc_lock         *lock;
926         struct cl_lock          *cll;
927         unsigned long            weight;
928         ENTRY;
929
930         cfs_might_sleep();
931         /*
932          * osc_ldlm_weigh_ast has a complex context since it might be called
933          * because of lock canceling, or from user's input. We have to make
934          * a new environment for it. Probably it is implementation safe to use
935          * the upper context because cl_lock_put don't modify environment
936          * variables. But in case of ..
937          */
938         env = cl_env_nested_get(&nest);
939         if (IS_ERR(env))
940                 /* Mostly because lack of memory, tend to eliminate this lock*/
941                 RETURN(0);
942
943         LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
944         lock = osc_ast_data_get(dlmlock);
945         if (lock == NULL) {
946                 /* cl_lock was destroyed because of memory pressure.
947                  * It is much reasonable to assign this type of lock
948                  * a lower cost.
949                  */
950                 GOTO(out, weight = 0);
951         }
952
953         cll = lock->ols_cl.cls_lock;
954         cl_lock_mutex_get(env, cll);
955         weight = cl_lock_weigh(env, cll);
956         cl_lock_mutex_put(env, cll);
957         osc_ast_data_put(env, lock);
958         EXIT;
959
960 out:
961         cl_env_nested_put(&nest, env);
962         return weight;
963 }
964
965 static void osc_lock_build_einfo(const struct lu_env *env,
966                                  const struct cl_lock *clock,
967                                  struct osc_lock *lock,
968                                  struct ldlm_enqueue_info *einfo)
969 {
970         enum cl_lock_mode mode;
971
972         mode = clock->cll_descr.cld_mode;
973         if (mode == CLM_PHANTOM)
974                 /*
975                  * For now, enqueue all glimpse locks in read mode. In the
976                  * future, client might choose to enqueue LCK_PW lock for
977                  * glimpse on a file opened for write.
978                  */
979                 mode = CLM_READ;
980
981         einfo->ei_type   = LDLM_EXTENT;
982         einfo->ei_mode   = osc_cl_lock2ldlm(mode);
983         einfo->ei_cb_bl  = osc_ldlm_blocking_ast;
984         einfo->ei_cb_cp  = osc_ldlm_completion_ast;
985         einfo->ei_cb_gl  = osc_ldlm_glimpse_ast;
986         einfo->ei_cb_wg  = osc_ldlm_weigh_ast;
987         einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
988 }
989
990 /**
991  * Determine if the lock should be converted into a lockless lock.
992  *
993  * Steps to check:
994  * - if the lock has an explicite requirment for a non-lockless lock;
995  * - if the io lock request type ci_lockreq;
996  * - send the enqueue rpc to ost to make the further decision;
997  * - special treat to truncate lockless lock
998  *
999  *  Additional policy can be implemented here, e.g., never do lockless-io
1000  *  for large extents.
1001  */
1002 static void osc_lock_to_lockless(const struct lu_env *env,
1003                                  struct osc_lock *ols, int force)
1004 {
1005         struct cl_lock_slice *slice = &ols->ols_cl;
1006         struct cl_lock *lock        = slice->cls_lock;
1007
1008         LASSERT(ols->ols_state == OLS_NEW ||
1009                 ols->ols_state == OLS_UPCALL_RECEIVED);
1010
1011         if (force) {
1012                 ols->ols_locklessable = 1;
1013                 LASSERT(cl_lock_is_mutexed(lock));
1014                 slice->cls_ops = &osc_lock_lockless_ops;
1015         } else {
1016                 struct osc_io *oio     = osc_env_io(env);
1017                 struct cl_io  *io      = oio->oi_cl.cis_io;
1018                 struct cl_object *obj  = slice->cls_obj;
1019                 struct osc_object *oob = cl2osc(obj);
1020                 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
1021                 struct obd_connect_data *ocd;
1022
1023                 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
1024                         io->ci_lockreq == CILR_MAYBE ||
1025                         io->ci_lockreq == CILR_NEVER);
1026
1027                 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
1028                 ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
1029                                 (io->ci_lockreq == CILR_MAYBE) &&
1030                                 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
1031                 if (io->ci_lockreq == CILR_NEVER ||
1032                         /* lockless IO */
1033                     (ols->ols_locklessable && osc_object_is_contended(oob)) ||
1034                         /* lockless truncate */
1035                     (cl_io_is_trunc(io) &&
1036                      (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
1037                       osd->od_lockless_truncate)) {
1038                         ols->ols_locklessable = 1;
1039                         slice->cls_ops = &osc_lock_lockless_ops;
1040                 }
1041         }
1042         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1043 }
1044
1045 static int osc_lock_compatible(const struct osc_lock *qing,
1046                                const struct osc_lock *qed)
1047 {
1048         enum cl_lock_mode qing_mode;
1049         enum cl_lock_mode qed_mode;
1050
1051         qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode;
1052         if (qed->ols_glimpse &&
1053             (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ))
1054                 return 1;
1055
1056         qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode;
1057         return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
1058 }
1059
1060 /**
1061  * Cancel all conflicting locks and wait for them to be destroyed.
1062  *
1063  * This function is used for two purposes:
1064  *
1065  *     - early cancel all conflicting locks before starting IO, and
1066  *
1067  *     - guarantee that pages added to the page cache by lockless IO are never
1068  *       covered by locks other than lockless IO lock, and, hence, are not
1069  *       visible to other threads.
1070  */
1071 static int osc_lock_enqueue_wait(const struct lu_env *env,
1072                                  const struct osc_lock *olck)
1073 {
1074         struct cl_lock          *lock    = olck->ols_cl.cls_lock;
1075         struct cl_lock_descr    *descr   = &lock->cll_descr;
1076         struct cl_object_header *hdr     = cl_object_header(descr->cld_obj);
1077         struct cl_lock          *scan;
1078         struct cl_lock          *conflict= NULL;
1079         int lockless                     = osc_lock_is_lockless(olck);
1080         int rc                           = 0;
1081         ENTRY;
1082
1083         LASSERT(cl_lock_is_mutexed(lock));
1084
1085         /* make it enqueue anyway for glimpse lock, because we actually
1086          * don't need to cancel any conflicting locks. */
1087         if (olck->ols_glimpse)
1088                 return 0;
1089
1090         cfs_spin_lock(&hdr->coh_lock_guard);
1091         cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
1092                 struct cl_lock_descr *cld = &scan->cll_descr;
1093                 const struct osc_lock *scan_ols;
1094
1095                 if (scan == lock)
1096                         break;
1097
1098                 if (scan->cll_state < CLS_QUEUING ||
1099                     scan->cll_state == CLS_FREEING ||
1100                     cld->cld_start > descr->cld_end ||
1101                     cld->cld_end < descr->cld_start)
1102                         continue;
1103
1104                 /* overlapped and living locks. */
1105
1106                 /* We're not supposed to give up group lock. */
1107                 if (scan->cll_descr.cld_mode == CLM_GROUP) {
1108                         LASSERT(descr->cld_mode != CLM_GROUP ||
1109                                 descr->cld_gid != scan->cll_descr.cld_gid);
1110                         continue;
1111                 }
1112
1113                 scan_ols = osc_lock_at(scan);
1114
1115                 /* We need to cancel the compatible locks if we're enqueuing
1116                  * a lockless lock, for example:
1117                  * imagine that client has PR lock on [0, 1000], and thread T0
1118                  * is doing lockless IO in [500, 1500] region. Concurrent
1119                  * thread T1 can see lockless data in [500, 1000], which is
1120                  * wrong, because these data are possibly stale. */
1121                 if (!lockless && osc_lock_compatible(olck, scan_ols))
1122                         continue;
1123
1124                 cl_lock_get_trust(scan);
1125                 conflict = scan;
1126                 break;
1127         }
1128         cfs_spin_unlock(&hdr->coh_lock_guard);
1129
1130         if (conflict) {
1131                 if (lock->cll_descr.cld_mode == CLM_GROUP) {
1132                         /* we want a group lock but a previous lock request
1133                          * conflicts, we do not wait but return 0 so the
1134                          * request is send to the server
1135                          */
1136                         CDEBUG(D_DLMTRACE, "group lock %p is conflicted "
1137                                            "with %p, no wait, send to server\n",
1138                                lock, conflict);
1139                         cl_lock_put(env, conflict);
1140                         rc = 0;
1141                 } else {
1142                         CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, "
1143                                            "will wait\n",
1144                                lock, conflict);
1145                         LASSERT(lock->cll_conflict == NULL);
1146                         lu_ref_add(&conflict->cll_reference, "cancel-wait",
1147                                    lock);
1148                         lock->cll_conflict = conflict;
1149                         rc = CLO_WAIT;
1150                 }
1151         }
1152         RETURN(rc);
1153 }
1154
1155 /**
1156  * Implementation of cl_lock_operations::clo_enqueue() method for osc
1157  * layer. This initiates ldlm enqueue:
1158  *
1159  *     - cancels conflicting locks early (osc_lock_enqueue_wait());
1160  *
1161  *     - calls osc_enqueue_base() to do actual enqueue.
1162  *
1163  * osc_enqueue_base() is supplied with an upcall function that is executed
1164  * when lock is received either after a local cached ldlm lock is matched, or
1165  * when a reply from the server is received.
1166  *
1167  * This function does not wait for the network communication to complete.
1168  */
1169 static int osc_lock_enqueue(const struct lu_env *env,
1170                             const struct cl_lock_slice *slice,
1171                             struct cl_io *unused, __u32 enqflags)
1172 {
1173         struct osc_lock          *ols     = cl2osc_lock(slice);
1174         struct cl_lock           *lock    = ols->ols_cl.cls_lock;
1175         int result;
1176         ENTRY;
1177
1178         LASSERT(cl_lock_is_mutexed(lock));
1179         LASSERTF(ols->ols_state == OLS_NEW,
1180                  "Impossible state: %d\n", ols->ols_state);
1181
1182         ols->ols_flags = osc_enq2ldlm_flags(enqflags);
1183         if (enqflags & CEF_AGL) {
1184                 ols->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
1185                 ols->ols_agl = 1;
1186         } else {
1187                 ols->ols_agl = 0;
1188         }
1189         if (ols->ols_flags & LDLM_FL_HAS_INTENT)
1190                 ols->ols_glimpse = 1;
1191         if (!osc_lock_is_lockless(ols) && !(enqflags & CEF_MUST))
1192                 /* try to convert this lock to a lockless lock */
1193                 osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
1194
1195         result = osc_lock_enqueue_wait(env, ols);
1196         if (result == 0) {
1197                 if (!osc_lock_is_lockless(ols)) {
1198                         struct osc_object        *obj = cl2osc(slice->cls_obj);
1199                         struct osc_thread_info   *info = osc_env_info(env);
1200                         struct ldlm_res_id       *resname = &info->oti_resname;
1201                         ldlm_policy_data_t       *policy = &info->oti_policy;
1202                         struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
1203
1204                         if (ols->ols_locklessable)
1205                                 ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1206
1207                         /* lock will be passed as upcall cookie,
1208                          * hold ref to prevent to be released. */
1209                         cl_lock_hold_add(env, lock, "upcall", lock);
1210                         /* a user for lock also */
1211                         cl_lock_user_add(env, lock);
1212                         ols->ols_state = OLS_ENQUEUED;
1213
1214                         /*
1215                          * XXX: this is possible blocking point as
1216                          * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
1217                          * LDLM_CP_CALLBACK.
1218                          */
1219                         osc_lock_build_res(env, obj, resname);
1220                         osc_lock_build_policy(env, lock, policy);
1221                         result = osc_enqueue_base(osc_export(obj), resname,
1222                                           &ols->ols_flags, policy,
1223                                           &ols->ols_lvb,
1224                                           obj->oo_oinfo->loi_kms_valid,
1225                                           osc_lock_upcall,
1226                                           ols, einfo, &ols->ols_handle,
1227                                           PTLRPCD_SET, 1, ols->ols_agl);
1228                         if (result != 0) {
1229                                 cl_lock_user_del(env, lock);
1230                                 cl_lock_unhold(env, lock, "upcall", lock);
1231                                 if (unlikely(result == -ECANCELED)) {
1232                                         ols->ols_state = OLS_NEW;
1233                                         result = 0;
1234                                 }
1235                         }
1236                 } else {
1237                         ols->ols_state = OLS_GRANTED;
1238                         ols->ols_owner = osc_env_io(env);
1239                 }
1240         }
1241         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1242         RETURN(result);
1243 }
1244
1245 static int osc_lock_wait(const struct lu_env *env,
1246                          const struct cl_lock_slice *slice)
1247 {
1248         struct osc_lock *olck = cl2osc_lock(slice);
1249         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1250
1251         LINVRNT(osc_lock_invariant(olck));
1252
1253         if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED) {
1254                 if (olck->ols_flags & LDLM_FL_LVB_READY) {
1255                         return 0;
1256                 } else if (olck->ols_agl) {
1257                         if (lock->cll_flags & CLF_FROM_UPCALL)
1258                                 /* It is from enqueue RPC reply upcall for
1259                                  * updating state. Do not re-enqueue. */
1260                                 return -ENAVAIL;
1261                         else
1262                                 olck->ols_state = OLS_NEW;
1263                 } else {
1264                         LASSERT(lock->cll_error);
1265                         return lock->cll_error;
1266                 }
1267         }
1268
1269         if (olck->ols_state == OLS_NEW) {
1270                 int rc;
1271
1272                 LASSERT(olck->ols_agl);
1273
1274                 rc = osc_lock_enqueue(env, slice, NULL, CEF_ASYNC | CEF_MUST);
1275                 if (rc != 0)
1276                         return rc;
1277                 else
1278                         return CLO_REENQUEUED;
1279         }
1280
1281         LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
1282                      lock->cll_error == 0, olck->ols_lock != NULL));
1283
1284         return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
1285 }
1286
1287 /**
1288  * An implementation of cl_lock_operations::clo_use() method that pins cached
1289  * lock.
1290  */
1291 static int osc_lock_use(const struct lu_env *env,
1292                         const struct cl_lock_slice *slice)
1293 {
1294         struct osc_lock *olck = cl2osc_lock(slice);
1295         int rc;
1296
1297         LASSERT(!olck->ols_hold);
1298
1299         /*
1300          * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
1301          * flag is not set. This protects us from a concurrent blocking ast.
1302          */
1303         rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1304         if (rc == 0) {
1305                 olck->ols_hold = 1;
1306                 olck->ols_state = OLS_GRANTED;
1307         } else {
1308                 struct cl_lock *lock;
1309
1310                 /*
1311                  * Lock is being cancelled somewhere within
1312                  * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
1313                  * set, but osc_ldlm_blocking_ast() hasn't yet acquired
1314                  * cl_lock mutex.
1315                  */
1316                 lock = slice->cls_lock;
1317                 LASSERT(lock->cll_state == CLS_INTRANSIT);
1318                 LASSERT(lock->cll_users > 0);
1319                 /* set a flag for osc_dlm_blocking_ast0() to signal the
1320                  * lock.*/
1321                 olck->ols_ast_wait = 1;
1322                 rc = CLO_WAIT;
1323         }
1324         return rc;
1325 }
1326
1327 static int osc_lock_flush(struct osc_lock *ols, int discard)
1328 {
1329         struct cl_lock       *lock  = ols->ols_cl.cls_lock;
1330         struct cl_env_nest    nest;
1331         struct lu_env        *env;
1332         int result = 0;
1333         ENTRY;
1334
1335         env = cl_env_nested_get(&nest);
1336         if (!IS_ERR(env)) {
1337                 struct osc_object    *obj   = cl2osc(ols->ols_cl.cls_obj);
1338                 struct cl_lock_descr *descr = &lock->cll_descr;
1339                 int rc = 0;
1340
1341                 if (descr->cld_mode >= CLM_WRITE) {
1342                         result = osc_cache_writeback_range(env, obj,
1343                                         descr->cld_start, descr->cld_end,
1344                                         1, discard);
1345                         CDEBUG(D_DLMTRACE, "write out %d pages for lock %p.\n",
1346                                result, lock);
1347                         if (result > 0)
1348                                 result = 0;
1349                 }
1350
1351                 rc = cl_lock_discard_pages(env, lock);
1352                 if (result == 0 && rc < 0)
1353                         result = rc;
1354
1355                 cl_env_nested_put(&nest, env);
1356         } else
1357                 result = PTR_ERR(env);
1358         if (result == 0) {
1359                 ols->ols_flush = 1;
1360                 LINVRNT(!osc_lock_has_pages(ols));
1361         }
1362         RETURN(result);
1363 }
1364
1365 /**
1366  * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1367  * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1368  * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1369  * with some other lock some where in the cluster. This function does the
1370  * following:
1371  *
1372  *     - invalidates all pages protected by this lock (after sending dirty
1373  *       ones to the server, as necessary);
1374  *
1375  *     - decref's underlying ldlm lock;
1376  *
1377  *     - cancels ldlm lock (ldlm_cli_cancel()).
1378  */
1379 static void osc_lock_cancel(const struct lu_env *env,
1380                             const struct cl_lock_slice *slice)
1381 {
1382         struct cl_lock   *lock    = slice->cls_lock;
1383         struct osc_lock  *olck    = cl2osc_lock(slice);
1384         struct ldlm_lock *dlmlock = olck->ols_lock;
1385         int               result  = 0;
1386         int               discard;
1387
1388         LASSERT(cl_lock_is_mutexed(lock));
1389         LINVRNT(osc_lock_invariant(olck));
1390
1391         if (dlmlock != NULL) {
1392                 int do_cancel;
1393
1394                 discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA);
1395                 result = osc_lock_flush(olck, discard);
1396                 osc_lock_unhold(olck);
1397
1398                 lock_res_and_lock(dlmlock);
1399                 /* Now that we're the only user of dlm read/write reference,
1400                  * mostly the ->l_readers + ->l_writers should be zero.
1401                  * However, there is a corner case.
1402                  * See bug 18829 for details.*/
1403                 do_cancel = (dlmlock->l_readers == 0 &&
1404                              dlmlock->l_writers == 0);
1405                 dlmlock->l_flags |= LDLM_FL_CBPENDING;
1406                 unlock_res_and_lock(dlmlock);
1407                 if (do_cancel)
1408                         result = ldlm_cli_cancel(&olck->ols_handle);
1409                 if (result < 0)
1410                         CL_LOCK_DEBUG(D_ERROR, env, lock,
1411                                       "lock %p cancel failure with error(%d)\n",
1412                                       lock, result);
1413         }
1414         olck->ols_state = OLS_CANCELLED;
1415         olck->ols_flags &= ~LDLM_FL_LVB_READY;
1416         osc_lock_detach(env, olck);
1417 }
1418
1419 #ifdef INVARIANT_CHECK
1420 static int check_cb(const struct lu_env *env, struct cl_io *io,
1421                     struct cl_page *page, void *cbdata)
1422 {
1423         struct cl_lock *lock = cbdata;
1424
1425         if (lock->cll_descr.cld_mode == CLM_READ) {
1426                 struct cl_lock *tmp;
1427                 tmp = cl_lock_at_page(env, lock->cll_descr.cld_obj,
1428                                      page, lock, 1, 0);
1429                 if (tmp != NULL) {
1430                         cl_lock_put(env, tmp);
1431                         return CLP_GANG_OKAY;
1432                 }
1433         }
1434
1435         CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
1436         CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
1437         return CLP_GANG_ABORT;
1438 }
1439
1440 /**
1441  * Returns true iff there are pages under \a olck not protected by other
1442  * locks.
1443  */
1444 static int osc_lock_has_pages(struct osc_lock *olck)
1445 {
1446         struct cl_lock       *lock;
1447         struct cl_lock_descr *descr;
1448         struct cl_object     *obj;
1449         struct osc_object    *oob;
1450         struct cl_env_nest    nest;
1451         struct cl_io         *io;
1452         struct lu_env        *env;
1453         int                   result;
1454
1455         env = cl_env_nested_get(&nest);
1456         if (IS_ERR(env))
1457                 return 0;
1458
1459         obj   = olck->ols_cl.cls_obj;
1460         oob   = cl2osc(obj);
1461         io    = &oob->oo_debug_io;
1462         lock  = olck->ols_cl.cls_lock;
1463         descr = &lock->cll_descr;
1464
1465         cfs_mutex_lock(&oob->oo_debug_mutex);
1466
1467         io->ci_obj = cl_object_top(obj);
1468         io->ci_ignore_layout = 1;
1469         cl_io_init(env, io, CIT_MISC, io->ci_obj);
1470         do {
1471                 result = cl_page_gang_lookup(env, obj, io,
1472                                              descr->cld_start, descr->cld_end,
1473                                              check_cb, (void *)lock);
1474                 if (result == CLP_GANG_ABORT)
1475                         break;
1476                 if (result == CLP_GANG_RESCHED)
1477                         cfs_cond_resched();
1478         } while (result != CLP_GANG_OKAY);
1479         cl_io_fini(env, io);
1480         cfs_mutex_unlock(&oob->oo_debug_mutex);
1481         cl_env_nested_put(&nest, env);
1482
1483         return (result == CLP_GANG_ABORT);
1484 }
1485 #else
1486 static int osc_lock_has_pages(struct osc_lock *olck)
1487 {
1488         return 0;
1489 }
1490 #endif /* INVARIANT_CHECK */
1491
1492 static void osc_lock_delete(const struct lu_env *env,
1493                             const struct cl_lock_slice *slice)
1494 {
1495         struct osc_lock *olck;
1496
1497         olck = cl2osc_lock(slice);
1498         if (olck->ols_glimpse) {
1499                 LASSERT(!olck->ols_hold);
1500                 LASSERT(!olck->ols_lock);
1501                 return;
1502         }
1503
1504         LINVRNT(osc_lock_invariant(olck));
1505         LINVRNT(!osc_lock_has_pages(olck));
1506
1507         osc_lock_unhold(olck);
1508         osc_lock_detach(env, olck);
1509 }
1510
1511 /**
1512  * Implements cl_lock_operations::clo_state() method for osc layer.
1513  *
1514  * Maintains osc_lock::ols_owner field.
1515  *
1516  * This assumes that lock always enters CLS_HELD (from some other state) in
1517  * the same IO context as one that requested the lock. This should not be a
1518  * problem, because context is by definition shared by all activity pertaining
1519  * to the same high-level IO.
1520  */
1521 static void osc_lock_state(const struct lu_env *env,
1522                            const struct cl_lock_slice *slice,
1523                            enum cl_lock_state state)
1524 {
1525         struct osc_lock *lock = cl2osc_lock(slice);
1526
1527         /*
1528          * XXX multiple io contexts can use the lock at the same time.
1529          */
1530         LINVRNT(osc_lock_invariant(lock));
1531         if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1532                 struct osc_io *oio = osc_env_io(env);
1533
1534                 LASSERT(lock->ols_owner == NULL);
1535                 lock->ols_owner = oio;
1536         } else if (state != CLS_HELD)
1537                 lock->ols_owner = NULL;
1538 }
1539
1540 static int osc_lock_print(const struct lu_env *env, void *cookie,
1541                           lu_printer_t p, const struct cl_lock_slice *slice)
1542 {
1543         struct osc_lock *lock = cl2osc_lock(slice);
1544
1545         /*
1546          * XXX print ldlm lock and einfo properly.
1547          */
1548         (*p)(env, cookie, "%p %08x "LPX64" %d %p ",
1549              lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
1550              lock->ols_state, lock->ols_owner);
1551         osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1552         return 0;
1553 }
1554
1555 static int osc_lock_fits_into(const struct lu_env *env,
1556                               const struct cl_lock_slice *slice,
1557                               const struct cl_lock_descr *need,
1558                               const struct cl_io *io)
1559 {
1560         struct osc_lock *ols = cl2osc_lock(slice);
1561
1562         if (need->cld_enq_flags & CEF_NEVER)
1563                 return 0;
1564
1565         if (ols->ols_state >= OLS_CANCELLED)
1566                 return 0;
1567
1568         if (need->cld_mode == CLM_PHANTOM) {
1569                 if (ols->ols_agl)
1570                         return !(ols->ols_state > OLS_RELEASED);
1571
1572                 /*
1573                  * Note: the QUEUED lock can't be matched here, otherwise
1574                  * it might cause the deadlocks.
1575                  * In read_process,
1576                  * P1: enqueued read lock, create sublock1
1577                  * P2: enqueued write lock, create sublock2(conflicted
1578                  *     with sublock1).
1579                  * P1: Grant read lock.
1580                  * P1: enqueued glimpse lock(with holding sublock1_read),
1581                  *     matched with sublock2, waiting sublock2 to be granted.
1582                  *     But sublock2 can not be granted, because P1
1583                  *     will not release sublock1. Bang!
1584                  */
1585                 if (ols->ols_state < OLS_GRANTED ||
1586                     ols->ols_state > OLS_RELEASED)
1587                         return 0;
1588         } else if (need->cld_enq_flags & CEF_MUST) {
1589                 /*
1590                  * If the lock hasn't ever enqueued, it can't be matched
1591                  * because enqueue process brings in many information
1592                  * which can be used to determine things such as lockless,
1593                  * CEF_MUST, etc.
1594                  */
1595                 if (ols->ols_state < OLS_UPCALL_RECEIVED &&
1596                     ols->ols_locklessable)
1597                         return 0;
1598         }
1599         return 1;
1600 }
1601
1602 static const struct cl_lock_operations osc_lock_ops = {
1603         .clo_fini    = osc_lock_fini,
1604         .clo_enqueue = osc_lock_enqueue,
1605         .clo_wait    = osc_lock_wait,
1606         .clo_unuse   = osc_lock_unuse,
1607         .clo_use     = osc_lock_use,
1608         .clo_delete  = osc_lock_delete,
1609         .clo_state   = osc_lock_state,
1610         .clo_cancel  = osc_lock_cancel,
1611         .clo_weigh   = osc_lock_weigh,
1612         .clo_print   = osc_lock_print,
1613         .clo_fits_into = osc_lock_fits_into,
1614 };
1615
1616 static int osc_lock_lockless_unuse(const struct lu_env *env,
1617                                    const struct cl_lock_slice *slice)
1618 {
1619         struct osc_lock *ols = cl2osc_lock(slice);
1620         struct cl_lock *lock = slice->cls_lock;
1621
1622         LASSERT(ols->ols_state == OLS_GRANTED);
1623         LINVRNT(osc_lock_invariant(ols));
1624
1625         cl_lock_cancel(env, lock);
1626         cl_lock_delete(env, lock);
1627         return 0;
1628 }
1629
1630 static void osc_lock_lockless_cancel(const struct lu_env *env,
1631                                      const struct cl_lock_slice *slice)
1632 {
1633         struct osc_lock   *ols  = cl2osc_lock(slice);
1634         int result;
1635
1636         result = osc_lock_flush(ols, 0);
1637         if (result)
1638                 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1639                        ols, result);
1640         ols->ols_state = OLS_CANCELLED;
1641 }
1642
1643 static int osc_lock_lockless_wait(const struct lu_env *env,
1644                                   const struct cl_lock_slice *slice)
1645 {
1646         struct osc_lock *olck = cl2osc_lock(slice);
1647         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1648
1649         LINVRNT(osc_lock_invariant(olck));
1650         LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1651
1652         return lock->cll_error;
1653 }
1654
1655 static void osc_lock_lockless_state(const struct lu_env *env,
1656                                     const struct cl_lock_slice *slice,
1657                                     enum cl_lock_state state)
1658 {
1659         struct osc_lock *lock = cl2osc_lock(slice);
1660
1661         LINVRNT(osc_lock_invariant(lock));
1662         if (state == CLS_HELD) {
1663                 struct osc_io *oio  = osc_env_io(env);
1664
1665                 LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio));
1666                 lock->ols_owner = oio;
1667
1668                 /* set the io to be lockless if this lock is for io's
1669                  * host object */
1670                 if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
1671                         oio->oi_lockless = 1;
1672         }
1673 }
1674
1675 static int osc_lock_lockless_fits_into(const struct lu_env *env,
1676                                        const struct cl_lock_slice *slice,
1677                                        const struct cl_lock_descr *need,
1678                                        const struct cl_io *io)
1679 {
1680         struct osc_lock *lock = cl2osc_lock(slice);
1681
1682         if (!(need->cld_enq_flags & CEF_NEVER))
1683                 return 0;
1684
1685         /* lockless lock should only be used by its owning io. b22147 */
1686         return (lock->ols_owner == osc_env_io(env));
1687 }
1688
1689 static const struct cl_lock_operations osc_lock_lockless_ops = {
1690         .clo_fini      = osc_lock_fini,
1691         .clo_enqueue   = osc_lock_enqueue,
1692         .clo_wait      = osc_lock_lockless_wait,
1693         .clo_unuse     = osc_lock_lockless_unuse,
1694         .clo_state     = osc_lock_lockless_state,
1695         .clo_fits_into = osc_lock_lockless_fits_into,
1696         .clo_cancel    = osc_lock_lockless_cancel,
1697         .clo_print     = osc_lock_print
1698 };
1699
1700 int osc_lock_init(const struct lu_env *env,
1701                   struct cl_object *obj, struct cl_lock *lock,
1702                   const struct cl_io *unused)
1703 {
1704         struct osc_lock *clk;
1705         int result;
1706
1707         OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO);
1708         if (clk != NULL) {
1709                 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
1710                 cfs_atomic_set(&clk->ols_pageref, 0);
1711                 clk->ols_state = OLS_NEW;
1712                 cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
1713                 result = 0;
1714         } else
1715                 result = -ENOMEM;
1716         return result;
1717 }
1718
1719 int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
1720 {
1721         struct osc_lock *olock;
1722         int              rc = 0;
1723
1724         cfs_spin_lock(&osc_ast_guard);
1725         olock = dlm->l_ast_data;
1726         /*
1727          * there's a very rare race with osc_page_addref_lock(), but that
1728          * doesn't matter because in the worst case we don't cancel a lock
1729          * which we actually can, that's no harm.
1730          */
1731         if (olock != NULL &&
1732             cfs_atomic_add_return(_PAGEREF_MAGIC,
1733                                   &olock->ols_pageref) != _PAGEREF_MAGIC) {
1734                 cfs_atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
1735                 rc = 1;
1736         }
1737         cfs_spin_unlock(&osc_ast_guard);
1738         return rc;
1739 }
1740
1741 /** @} osc */