Whamcloud - gitweb
2767c30151edf7a798af0152cdb1c8823a8aa395
[fs/lustre-release.git] / lustre / osc / osc_lock.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for OSC layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_OSC
42
43 #ifdef __KERNEL__
44 # include <libcfs/libcfs.h>
45 #else
46 # include <liblustre.h>
47 #endif
48 /* fid_build_reg_res_name() */
49 #include <lustre_fid.h>
50
51 #include "osc_cl_internal.h"
52
53 /** \addtogroup osc 
54  *  @{ 
55  */
56
57 #define _PAGEREF_MAGIC  (-10000000)
58
59 /*****************************************************************************
60  *
61  * Type conversions.
62  *
63  */
64
65 static const struct cl_lock_operations osc_lock_ops;
66 static const struct cl_lock_operations osc_lock_lockless_ops;
67 static void osc_lock_to_lockless(const struct lu_env *env,
68                                  struct osc_lock *ols, int force);
69 static int osc_lock_has_pages(struct osc_lock *olck);
70
71 int osc_lock_is_lockless(const struct osc_lock *olck)
72 {
73         return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
74 }
75
76 /**
77  * Returns a weak pointer to the ldlm lock identified by a handle. Returned
78  * pointer cannot be dereferenced, as lock is not protected from concurrent
79  * reclaim. This function is a helper for osc_lock_invariant().
80  */
81 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
82 {
83         struct ldlm_lock *lock;
84
85         lock = ldlm_handle2lock(handle);
86         if (lock != NULL)
87                 LDLM_LOCK_PUT(lock);
88         return lock;
89 }
90
91 /**
92  * Invariant that has to be true all of the time.
93  */
94 static int osc_lock_invariant(struct osc_lock *ols)
95 {
96         struct ldlm_lock *lock        = osc_handle_ptr(&ols->ols_handle);
97         struct ldlm_lock *olock       = ols->ols_lock;
98         int               handle_used = lustre_handle_is_used(&ols->ols_handle);
99
100         return
101                 ergo(osc_lock_is_lockless(ols),
102                      ols->ols_locklessable && ols->ols_lock == NULL)  ||
103                 (ergo(olock != NULL, handle_used) &&
104                  ergo(olock != NULL,
105                       olock->l_handle.h_cookie == ols->ols_handle.cookie) &&
106                  /*
107                   * Check that ->ols_handle and ->ols_lock are consistent, but
108                   * take into account that they are set at the different time.
109                   */
110                  ergo(handle_used,
111                       ergo(lock != NULL && olock != NULL, lock == olock) &&
112                       ergo(lock == NULL, olock == NULL)) &&
113                  ergo(ols->ols_state == OLS_CANCELLED,
114                       olock == NULL && !handle_used) &&
115                  /*
116                   * DLM lock is destroyed only after we have seen cancellation
117                   * ast.
118                   */
119                  ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
120                       !olock->l_destroyed) &&
121                  ergo(ols->ols_state == OLS_GRANTED,
122                       olock != NULL &&
123                       olock->l_req_mode == olock->l_granted_mode &&
124                       ols->ols_hold));
125 }
126
127 /*****************************************************************************
128  *
129  * Lock operations.
130  *
131  */
132
133 /**
134  * Breaks a link between osc_lock and dlm_lock.
135  */
136 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
137 {
138         struct ldlm_lock *dlmlock;
139
140         cfs_spin_lock(&osc_ast_guard);
141         dlmlock = olck->ols_lock;
142         if (dlmlock == NULL) {
143                 cfs_spin_unlock(&osc_ast_guard);
144                 return;
145         }
146
147         olck->ols_lock = NULL;
148         /* wb(); --- for all who checks (ols->ols_lock != NULL) before
149          * call to osc_lock_detach() */
150         dlmlock->l_ast_data = NULL;
151         olck->ols_handle.cookie = 0ULL;
152         cfs_spin_unlock(&osc_ast_guard);
153
154         lock_res_and_lock(dlmlock);
155         if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
156                 struct cl_object *obj = olck->ols_cl.cls_obj;
157                 struct cl_attr *attr  = &osc_env_info(env)->oti_attr;
158                 __u64 old_kms;
159
160                 cl_object_attr_lock(obj);
161                 /* Must get the value under the lock to avoid possible races. */
162                 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
163                 /* Update the kms. Need to loop all granted locks.
164                  * Not a problem for the client */
165                 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
166
167                 cl_object_attr_set(env, obj, attr, CAT_KMS);
168                 cl_object_attr_unlock(obj);
169         }
170         unlock_res_and_lock(dlmlock);
171
172         /* release a reference taken in osc_lock_upcall0(). */
173         LASSERT(olck->ols_has_ref);
174         lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
175         LDLM_LOCK_RELEASE(dlmlock);
176         olck->ols_has_ref = 0;
177 }
178
179 static int osc_lock_unhold(struct osc_lock *ols)
180 {
181         int result = 0;
182
183         if (ols->ols_hold) {
184                 ols->ols_hold = 0;
185                 result = osc_cancel_base(&ols->ols_handle,
186                                          ols->ols_einfo.ei_mode);
187         }
188         return result;
189 }
190
191 static int osc_lock_unuse(const struct lu_env *env,
192                           const struct cl_lock_slice *slice)
193 {
194         struct osc_lock *ols = cl2osc_lock(slice);
195
196         LINVRNT(osc_lock_invariant(ols));
197
198         switch (ols->ols_state) {
199         case OLS_NEW:
200                 LASSERT(!ols->ols_hold);
201                 LASSERT(ols->ols_agl);
202                 return 0;
203         case OLS_UPCALL_RECEIVED:
204                 LASSERT(!ols->ols_hold);
205                 ols->ols_state = OLS_NEW;
206                 return 0;
207         case OLS_GRANTED:
208                 LASSERT(!ols->ols_glimpse);
209                 LASSERT(ols->ols_hold);
210                 /*
211                  * Move lock into OLS_RELEASED state before calling
212                  * osc_cancel_base() so that possible synchronous cancellation
213                  * (that always happens e.g., for liblustre) sees that lock is
214                  * released.
215                  */
216                 ols->ols_state = OLS_RELEASED;
217                 return osc_lock_unhold(ols);
218         default:
219                 CERROR("Impossible state: %d\n", ols->ols_state);
220                 LBUG();
221         }
222 }
223
224 static void osc_lock_fini(const struct lu_env *env,
225                           struct cl_lock_slice *slice)
226 {
227         struct osc_lock  *ols = cl2osc_lock(slice);
228
229         LINVRNT(osc_lock_invariant(ols));
230         /*
231          * ->ols_hold can still be true at this point if, for example, a
232          * thread that requested a lock was killed (and released a reference
233          * to the lock), before reply from a server was received. In this case
234          * lock is destroyed immediately after upcall.
235          */
236         osc_lock_unhold(ols);
237         LASSERT(ols->ols_lock == NULL);
238         LASSERT(cfs_atomic_read(&ols->ols_pageref) == 0 ||
239                 cfs_atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
240
241         OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
242 }
243
244 void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj,
245                         struct ldlm_res_id *resname)
246 {
247         const struct lu_fid *fid = lu_object_fid(&obj->oo_cl.co_lu);
248         if (0) {
249                 /*
250                  * In the perfect world of the future, where ost servers talk
251                  * idif-fids...
252                  */
253                 fid_build_reg_res_name(fid, resname);
254         } else {
255                 /*
256                  * In reality, where ost server expects ->lsm_object_id and
257                  * ->lsm_object_seq in rename.
258                  */
259                 osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_seq,
260                                    resname);
261         }
262 }
263
264 static void osc_lock_build_policy(const struct lu_env *env,
265                                   const struct cl_lock *lock,
266                                   ldlm_policy_data_t *policy)
267 {
268         const struct cl_lock_descr *d = &lock->cll_descr;
269
270         osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
271         policy->l_extent.gid = d->cld_gid;
272 }
273
274 static int osc_enq2ldlm_flags(__u32 enqflags)
275 {
276         int result = 0;
277
278         LASSERT((enqflags & ~CEF_MASK) == 0);
279
280         if (enqflags & CEF_NONBLOCK)
281                 result |= LDLM_FL_BLOCK_NOWAIT;
282         if (enqflags & CEF_ASYNC)
283                 result |= LDLM_FL_HAS_INTENT;
284         if (enqflags & CEF_DISCARD_DATA)
285                 result |= LDLM_AST_DISCARD_DATA;
286         return result;
287 }
288
289 /**
290  * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
291  * pointers. Initialized in osc_init().
292  */
293 cfs_spinlock_t osc_ast_guard;
294
295 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
296 {
297         struct osc_lock *olck;
298
299         lock_res_and_lock(dlm_lock);
300         cfs_spin_lock(&osc_ast_guard);
301         olck = dlm_lock->l_ast_data;
302         if (olck != NULL) {
303                 struct cl_lock *lock = olck->ols_cl.cls_lock;
304                 /*
305                  * If osc_lock holds a reference on ldlm lock, return it even
306                  * when cl_lock is in CLS_FREEING state. This way
307                  *
308                  *         osc_ast_data_get(dlmlock) == NULL
309                  *
310                  * guarantees that all osc references on dlmlock were
311                  * released. osc_dlm_blocking_ast0() relies on that.
312                  */
313                 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
314                         cl_lock_get_trust(lock);
315                         lu_ref_add_atomic(&lock->cll_reference,
316                                           "ast", cfs_current());
317                 } else
318                         olck = NULL;
319         }
320         cfs_spin_unlock(&osc_ast_guard);
321         unlock_res_and_lock(dlm_lock);
322         return olck;
323 }
324
325 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
326 {
327         struct cl_lock *lock;
328
329         lock = olck->ols_cl.cls_lock;
330         lu_ref_del(&lock->cll_reference, "ast", cfs_current());
331         cl_lock_put(env, lock);
332 }
333
334 /**
335  * Updates object attributes from a lock value block (lvb) received together
336  * with the DLM lock reply from the server. Copy of osc_update_enqueue()
337  * logic.
338  *
339  * This can be optimized to not update attributes when lock is a result of a
340  * local match.
341  *
342  * Called under lock and resource spin-locks.
343  */
344 static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
345                                 int rc)
346 {
347         struct ost_lvb    *lvb;
348         struct cl_object  *obj;
349         struct lov_oinfo  *oinfo;
350         struct cl_attr    *attr;
351         unsigned           valid;
352
353         ENTRY;
354
355         if (!(olck->ols_flags & LDLM_FL_LVB_READY))
356                 RETURN_EXIT;
357
358         lvb   = &olck->ols_lvb;
359         obj   = olck->ols_cl.cls_obj;
360         oinfo = cl2osc(obj)->oo_oinfo;
361         attr  = &osc_env_info(env)->oti_attr;
362         valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
363         cl_lvb2attr(attr, lvb);
364
365         cl_object_attr_lock(obj);
366         if (rc == 0) {
367                 struct ldlm_lock  *dlmlock;
368                 __u64 size;
369
370                 dlmlock = olck->ols_lock;
371                 LASSERT(dlmlock != NULL);
372
373                 /* re-grab LVB from a dlm lock under DLM spin-locks. */
374                 *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
375                 size = lvb->lvb_size;
376                 /* Extend KMS up to the end of this lock and no further
377                  * A lock on [x,y] means a KMS of up to y + 1 bytes! */
378                 if (size > dlmlock->l_policy_data.l_extent.end)
379                         size = dlmlock->l_policy_data.l_extent.end + 1;
380                 if (size >= oinfo->loi_kms) {
381                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
382                                    ", kms="LPU64, lvb->lvb_size, size);
383                         valid |= CAT_KMS;
384                         attr->cat_kms = size;
385                 } else {
386                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
387                                    LPU64"; leaving kms="LPU64", end="LPU64,
388                                    lvb->lvb_size, oinfo->loi_kms,
389                                    dlmlock->l_policy_data.l_extent.end);
390                 }
391                 ldlm_lock_allow_match_locked(dlmlock);
392         } else if (rc == -ENAVAIL && olck->ols_glimpse) {
393                 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
394                        " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
395         } else
396                 valid = 0;
397
398         if (valid != 0)
399                 cl_object_attr_set(env, obj, attr, valid);
400
401         cl_object_attr_unlock(obj);
402
403         EXIT;
404 }
405
406 /**
407  * Called when a lock is granted, from an upcall (when server returned a
408  * granted lock), or from completion AST, when server returned a blocked lock.
409  *
410  * Called under lock and resource spin-locks, that are released temporarily
411  * here.
412  */
413 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
414                              struct ldlm_lock *dlmlock, int rc)
415 {
416         struct ldlm_extent   *ext;
417         struct cl_lock       *lock;
418         struct cl_lock_descr *descr;
419
420         LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
421
422         ENTRY;
423         if (olck->ols_state < OLS_GRANTED) {
424                 lock  = olck->ols_cl.cls_lock;
425                 ext   = &dlmlock->l_policy_data.l_extent;
426                 descr = &osc_env_info(env)->oti_descr;
427                 descr->cld_obj = lock->cll_descr.cld_obj;
428
429                 /* XXX check that ->l_granted_mode is valid. */
430                 descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
431                 descr->cld_start = cl_index(descr->cld_obj, ext->start);
432                 descr->cld_end   = cl_index(descr->cld_obj, ext->end);
433                 descr->cld_gid   = ext->gid;
434                 /*
435                  * tell upper layers the extent of the lock that was actually
436                  * granted
437                  */
438                 olck->ols_state = OLS_GRANTED;
439                 osc_lock_lvb_update(env, olck, rc);
440
441                 /* release DLM spin-locks to allow cl_lock_{modify,signal}()
442                  * to take a semaphore on a parent lock. This is safe, because
443                  * spin-locks are needed to protect consistency of
444                  * dlmlock->l_*_mode and LVB, and we have finished processing
445                  * them. */
446                 unlock_res_and_lock(dlmlock);
447                 cl_lock_modify(env, lock, descr);
448                 cl_lock_signal(env, lock);
449                 LINVRNT(osc_lock_invariant(olck));
450                 lock_res_and_lock(dlmlock);
451         }
452         EXIT;
453 }
454
455 static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
456
457 {
458         struct ldlm_lock *dlmlock;
459
460         ENTRY;
461
462         dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
463         LASSERT(dlmlock != NULL);
464
465         lock_res_and_lock(dlmlock);
466         cfs_spin_lock(&osc_ast_guard);
467         LASSERT(dlmlock->l_ast_data == olck);
468         LASSERT(olck->ols_lock == NULL);
469         olck->ols_lock = dlmlock;
470         cfs_spin_unlock(&osc_ast_guard);
471
472         /*
473          * Lock might be not yet granted. In this case, completion ast
474          * (osc_ldlm_completion_ast()) comes later and finishes lock
475          * granting.
476          */
477         if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
478                 osc_lock_granted(env, olck, dlmlock, 0);
479         unlock_res_and_lock(dlmlock);
480
481         /*
482          * osc_enqueue_interpret() decrefs asynchronous locks, counter
483          * this.
484          */
485         ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
486         olck->ols_hold = 1;
487
488         /* lock reference taken by ldlm_handle2lock_long() is owned by
489          * osc_lock and released in osc_lock_detach() */
490         lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
491         olck->ols_has_ref = 1;
492 }
493
494 /**
495  * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
496  * received from a server, or after osc_enqueue_base() matched a local DLM
497  * lock.
498  */
499 static int osc_lock_upcall(void *cookie, int errcode)
500 {
501         struct osc_lock         *olck  = cookie;
502         struct cl_lock_slice    *slice = &olck->ols_cl;
503         struct cl_lock          *lock  = slice->cls_lock;
504         struct lu_env           *env;
505         struct cl_env_nest       nest;
506
507         ENTRY;
508         env = cl_env_nested_get(&nest);
509         if (!IS_ERR(env)) {
510                 int rc;
511
512                 cl_lock_mutex_get(env, lock);
513
514                 LASSERT(lock->cll_state >= CLS_QUEUING);
515                 if (olck->ols_state == OLS_ENQUEUED) {
516                         olck->ols_state = OLS_UPCALL_RECEIVED;
517                         rc = ldlm_error2errno(errcode);
518                 } else if (olck->ols_state == OLS_CANCELLED) {
519                         rc = -EIO;
520                 } else {
521                         CERROR("Impossible state: %d\n", olck->ols_state);
522                         LBUG();
523                 }
524                 if (rc) {
525                         struct ldlm_lock *dlmlock;
526
527                         dlmlock = ldlm_handle2lock(&olck->ols_handle);
528                         if (dlmlock != NULL) {
529                                 lock_res_and_lock(dlmlock);
530                                 cfs_spin_lock(&osc_ast_guard);
531                                 LASSERT(olck->ols_lock == NULL);
532                                 dlmlock->l_ast_data = NULL;
533                                 olck->ols_handle.cookie = 0ULL;
534                                 cfs_spin_unlock(&osc_ast_guard);
535                                 ldlm_lock_fail_match_locked(dlmlock);
536                                 unlock_res_and_lock(dlmlock);
537                                 LDLM_LOCK_PUT(dlmlock);
538                         }
539                 } else {
540                         if (olck->ols_glimpse)
541                                 olck->ols_glimpse = 0;
542                         osc_lock_upcall0(env, olck);
543                 }
544
545                 /* Error handling, some errors are tolerable. */
546                 if (olck->ols_locklessable && rc == -EUSERS) {
547                         /* This is a tolerable error, turn this lock into
548                          * lockless lock.
549                          */
550                         osc_object_set_contended(cl2osc(slice->cls_obj));
551                         LASSERT(slice->cls_ops == &osc_lock_ops);
552
553                         /* Change this lock to ldlmlock-less lock. */
554                         osc_lock_to_lockless(env, olck, 1);
555                         olck->ols_state = OLS_GRANTED;
556                         rc = 0;
557                 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
558                         osc_lock_lvb_update(env, olck, rc);
559                         cl_lock_delete(env, lock);
560                         /* Hide the error. */
561                         rc = 0;
562                 }
563
564                 if (rc == 0) {
565                         /* For AGL case, the RPC sponsor may exits the cl_lock
566                         *  processing without wait() called before related OSC
567                         *  lock upcall(). So update the lock status according
568                         *  to the enqueue result inside AGL upcall(). */
569                         if (olck->ols_agl) {
570                                 lock->cll_flags |= CLF_FROM_UPCALL;
571                                 cl_wait_try(env, lock);
572                                 lock->cll_flags &= ~CLF_FROM_UPCALL;
573                                 if (!olck->ols_glimpse)
574                                         olck->ols_agl = 0;
575                         }
576                         cl_lock_signal(env, lock);
577                         /* del user for lock upcall cookie */
578                         cl_unuse_try(env, lock);
579                 } else {
580                         /* del user for lock upcall cookie */
581                         cl_lock_user_del(env, lock);
582                         cl_lock_error(env, lock, rc);
583                 }
584
585                 cl_lock_mutex_put(env, lock);
586
587                 /* release cookie reference, acquired by osc_lock_enqueue() */
588                 lu_ref_del(&lock->cll_reference, "upcall", lock);
589                 cl_lock_put(env, lock);
590
591                 cl_env_nested_put(&nest, env);
592         } else
593                 /* should never happen, similar to osc_ldlm_blocking_ast(). */
594                 LBUG();
595         RETURN(errcode);
596 }
597
598 /**
599  * Core of osc_dlm_blocking_ast() logic.
600  */
601 static void osc_lock_blocking(const struct lu_env *env,
602                               struct ldlm_lock *dlmlock,
603                               struct osc_lock *olck, int blocking)
604 {
605         struct cl_lock *lock = olck->ols_cl.cls_lock;
606
607         LASSERT(olck->ols_lock == dlmlock);
608         CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
609         LASSERT(!osc_lock_is_lockless(olck));
610
611         /*
612          * Lock might be still addref-ed here, if e.g., blocking ast
613          * is sent for a failed lock.
614          */
615         osc_lock_unhold(olck);
616
617         if (blocking && olck->ols_state < OLS_BLOCKED)
618                 /*
619                  * Move osc_lock into OLS_BLOCKED before canceling the lock,
620                  * because it recursively re-enters osc_lock_blocking(), with
621                  * the state set to OLS_CANCELLED.
622                  */
623                 olck->ols_state = OLS_BLOCKED;
624         /*
625          * cancel and destroy lock at least once no matter how blocking ast is
626          * entered (see comment above osc_ldlm_blocking_ast() for use
627          * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
628          */
629         cl_lock_cancel(env, lock);
630         cl_lock_delete(env, lock);
631 }
632
633 /**
634  * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
635  * and ldlm_lock caches.
636  */
637 static int osc_dlm_blocking_ast0(const struct lu_env *env,
638                                  struct ldlm_lock *dlmlock,
639                                  void *data, int flag)
640 {
641         struct osc_lock *olck;
642         struct cl_lock  *lock;
643         int result;
644         int cancel;
645
646         LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
647
648         cancel = 0;
649         olck = osc_ast_data_get(dlmlock);
650         if (olck != NULL) {
651                 lock = olck->ols_cl.cls_lock;
652                 cl_lock_mutex_get(env, lock);
653                 LINVRNT(osc_lock_invariant(olck));
654                 if (olck->ols_ast_wait) {
655                         /* wake up osc_lock_use() */
656                         cl_lock_signal(env, lock);
657                         olck->ols_ast_wait = 0;
658                 }
659                 /*
660                  * Lock might have been canceled while this thread was
661                  * sleeping for lock mutex, but olck is pinned in memory.
662                  */
663                 if (olck == dlmlock->l_ast_data) {
664                         /*
665                          * NOTE: DLM sends blocking AST's for failed locks
666                          *       (that are still in pre-OLS_GRANTED state)
667                          *       too, and they have to be canceled otherwise
668                          *       DLM lock is never destroyed and stuck in
669                          *       the memory.
670                          *
671                          *       Alternatively, ldlm_cli_cancel() can be
672                          *       called here directly for osc_locks with
673                          *       ols_state < OLS_GRANTED to maintain an
674                          *       invariant that ->clo_cancel() is only called
675                          *       for locks that were granted.
676                          */
677                         LASSERT(data == olck);
678                         osc_lock_blocking(env, dlmlock,
679                                           olck, flag == LDLM_CB_BLOCKING);
680                 } else
681                         cancel = 1;
682                 cl_lock_mutex_put(env, lock);
683                 osc_ast_data_put(env, olck);
684         } else
685                 /*
686                  * DLM lock exists, but there is no cl_lock attached to it.
687                  * This is a `normal' race. cl_object and its cl_lock's can be
688                  * removed by memory pressure, together with all pages.
689                  */
690                 cancel = (flag == LDLM_CB_BLOCKING);
691
692         if (cancel) {
693                 struct lustre_handle *lockh;
694
695                 lockh = &osc_env_info(env)->oti_handle;
696                 ldlm_lock2handle(dlmlock, lockh);
697                 result = ldlm_cli_cancel(lockh);
698         } else
699                 result = 0;
700         return result;
701 }
702
703 /**
704  * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
705  * some other lock, or is canceled. This function is installed as a
706  * ldlm_lock::l_blocking_ast() for client extent locks.
707  *
708  * Control flow is tricky, because ldlm uses the same call-back
709  * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
710  *
711  * \param dlmlock lock for which ast occurred.
712  *
713  * \param new description of a conflicting lock in case of blocking ast.
714  *
715  * \param data value of dlmlock->l_ast_data
716  *
717  * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
718  *             cancellation and blocking ast's.
719  *
720  * Possible use cases:
721  *
722  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
723  *       lock due to lock lru pressure, or explicit user request to purge
724  *       locks.
725  *
726  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
727  *       us that dlmlock conflicts with another lock that some client is
728  *       enqueing. Lock is canceled.
729  *
730  *           - cl_lock_cancel() is called. osc_lock_cancel() calls
731  *             ldlm_cli_cancel() that calls
732  *
733  *                  dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
734  *
735  *             recursively entering osc_ldlm_blocking_ast().
736  *
737  *     - client cancels lock voluntary (e.g., as a part of early cancellation):
738  *
739  *           cl_lock_cancel()->
740  *             osc_lock_cancel()->
741  *               ldlm_cli_cancel()->
742  *                 dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
743  *
744  */
745 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
746                                  struct ldlm_lock_desc *new, void *data,
747                                  int flag)
748 {
749         struct lu_env     *env;
750         struct cl_env_nest nest;
751         int                result;
752
753         /*
754          * This can be called in the context of outer IO, e.g.,
755          *
756          *     cl_enqueue()->...
757          *       ->osc_enqueue_base()->...
758          *         ->ldlm_prep_elc_req()->...
759          *           ->ldlm_cancel_callback()->...
760          *             ->osc_ldlm_blocking_ast()
761          *
762          * new environment has to be created to not corrupt outer context.
763          */
764         env = cl_env_nested_get(&nest);
765         if (!IS_ERR(env)) {
766                 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
767                 cl_env_nested_put(&nest, env);
768         } else {
769                 result = PTR_ERR(env);
770                 /*
771                  * XXX This should never happen, as cl_lock is
772                  * stuck. Pre-allocated environment a la vvp_inode_fini_env
773                  * should be used.
774                  */
775                 LBUG();
776         }
777         if (result != 0) {
778                 if (result == -ENODATA)
779                         result = 0;
780                 else
781                         CERROR("BAST failed: %d\n", result);
782         }
783         return result;
784 }
785
786 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
787                                    int flags, void *data)
788 {
789         struct cl_env_nest nest;
790         struct lu_env     *env;
791         struct osc_lock   *olck;
792         struct cl_lock    *lock;
793         int result;
794         int dlmrc;
795
796         /* first, do dlm part of the work */
797         dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
798         /* then, notify cl_lock */
799         env = cl_env_nested_get(&nest);
800         if (!IS_ERR(env)) {
801                 olck = osc_ast_data_get(dlmlock);
802                 if (olck != NULL) {
803                         lock = olck->ols_cl.cls_lock;
804                         cl_lock_mutex_get(env, lock);
805                         /*
806                          * ldlm_handle_cp_callback() copied LVB from request
807                          * to lock->l_lvb_data, store it in osc_lock.
808                          */
809                         LASSERT(dlmlock->l_lvb_data != NULL);
810                         lock_res_and_lock(dlmlock);
811                         olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
812                         if (olck->ols_lock == NULL) {
813                                 /*
814                                  * upcall (osc_lock_upcall()) hasn't yet been
815                                  * called. Do nothing now, upcall will bind
816                                  * olck to dlmlock and signal the waiters.
817                                  *
818                                  * This maintains an invariant that osc_lock
819                                  * and ldlm_lock are always bound when
820                                  * osc_lock is in OLS_GRANTED state.
821                                  */
822                         } else if (dlmlock->l_granted_mode ==
823                                    dlmlock->l_req_mode) {
824                                 osc_lock_granted(env, olck, dlmlock, dlmrc);
825                         }
826                         unlock_res_and_lock(dlmlock);
827
828                         if (dlmrc != 0) {
829                                 CL_LOCK_DEBUG(D_ERROR, env, lock,
830                                               "dlmlock returned %d\n", dlmrc);
831                                 cl_lock_error(env, lock, dlmrc);
832                         }
833                         cl_lock_mutex_put(env, lock);
834                         osc_ast_data_put(env, olck);
835                         result = 0;
836                 } else
837                         result = -ELDLM_NO_LOCK_DATA;
838                 cl_env_nested_put(&nest, env);
839         } else
840                 result = PTR_ERR(env);
841         return dlmrc ?: result;
842 }
843
844 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
845 {
846         struct ptlrpc_request  *req  = data;
847         struct osc_lock        *olck;
848         struct cl_lock         *lock;
849         struct cl_object       *obj;
850         struct cl_env_nest      nest;
851         struct lu_env          *env;
852         struct ost_lvb         *lvb;
853         struct req_capsule     *cap;
854         int                     result;
855
856         LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
857
858         env = cl_env_nested_get(&nest);
859         if (!IS_ERR(env)) {
860                 /* osc_ast_data_get() has to go after environment is
861                  * allocated, because osc_ast_data() acquires a
862                  * reference to a lock, and it can only be released in
863                  * environment.
864                  */
865                 olck = osc_ast_data_get(dlmlock);
866                 if (olck != NULL) {
867                         lock = olck->ols_cl.cls_lock;
868                         /* Do not grab the mutex of cl_lock for glimpse.
869                          * See LU-1274 for details.
870                          * BTW, it's okay for cl_lock to be cancelled during
871                          * this period because server can handle this race.
872                          * See ldlm_server_glimpse_ast() for details.
873                          * cl_lock_mutex_get(env, lock); */
874                         cap = &req->rq_pill;
875                         req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
876                         req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
877                                              sizeof *lvb);
878                         result = req_capsule_server_pack(cap);
879                         if (result == 0) {
880                                 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
881                                 obj = lock->cll_descr.cld_obj;
882                                 result = cl_object_glimpse(env, obj, lvb);
883                         }
884                         osc_ast_data_put(env, olck);
885                 } else {
886                         /*
887                          * These errors are normal races, so we don't want to
888                          * fill the console with messages by calling
889                          * ptlrpc_error()
890                          */
891                         lustre_pack_reply(req, 1, NULL, NULL);
892                         result = -ELDLM_NO_LOCK_DATA;
893                 }
894                 cl_env_nested_put(&nest, env);
895         } else
896                 result = PTR_ERR(env);
897         req->rq_status = result;
898         return result;
899 }
900
901 static unsigned long osc_lock_weigh(const struct lu_env *env,
902                                     const struct cl_lock_slice *slice)
903 {
904         /*
905          * don't need to grab coh_page_guard since we don't care the exact #
906          * of pages..
907          */
908         return cl_object_header(slice->cls_obj)->coh_pages;
909 }
910
911 /**
912  * Get the weight of dlm lock for early cancellation.
913  *
914  * XXX: it should return the pages covered by this \a dlmlock.
915  */
916 static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
917 {
918         struct cl_env_nest       nest;
919         struct lu_env           *env;
920         struct osc_lock         *lock;
921         struct cl_lock          *cll;
922         unsigned long            weight;
923         ENTRY;
924
925         cfs_might_sleep();
926         /*
927          * osc_ldlm_weigh_ast has a complex context since it might be called
928          * because of lock canceling, or from user's input. We have to make
929          * a new environment for it. Probably it is implementation safe to use
930          * the upper context because cl_lock_put don't modify environment
931          * variables. But in case of ..
932          */
933         env = cl_env_nested_get(&nest);
934         if (IS_ERR(env))
935                 /* Mostly because lack of memory, tend to eliminate this lock*/
936                 RETURN(0);
937
938         LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
939         lock = osc_ast_data_get(dlmlock);
940         if (lock == NULL) {
941                 /* cl_lock was destroyed because of memory pressure.
942                  * It is much reasonable to assign this type of lock
943                  * a lower cost.
944                  */
945                 GOTO(out, weight = 0);
946         }
947
948         cll = lock->ols_cl.cls_lock;
949         cl_lock_mutex_get(env, cll);
950         weight = cl_lock_weigh(env, cll);
951         cl_lock_mutex_put(env, cll);
952         osc_ast_data_put(env, lock);
953         EXIT;
954
955 out:
956         cl_env_nested_put(&nest, env);
957         return weight;
958 }
959
960 static void osc_lock_build_einfo(const struct lu_env *env,
961                                  const struct cl_lock *clock,
962                                  struct osc_lock *lock,
963                                  struct ldlm_enqueue_info *einfo)
964 {
965         enum cl_lock_mode mode;
966
967         mode = clock->cll_descr.cld_mode;
968         if (mode == CLM_PHANTOM)
969                 /*
970                  * For now, enqueue all glimpse locks in read mode. In the
971                  * future, client might choose to enqueue LCK_PW lock for
972                  * glimpse on a file opened for write.
973                  */
974                 mode = CLM_READ;
975
976         einfo->ei_type   = LDLM_EXTENT;
977         einfo->ei_mode   = osc_cl_lock2ldlm(mode);
978         einfo->ei_cb_bl  = osc_ldlm_blocking_ast;
979         einfo->ei_cb_cp  = osc_ldlm_completion_ast;
980         einfo->ei_cb_gl  = osc_ldlm_glimpse_ast;
981         einfo->ei_cb_wg  = osc_ldlm_weigh_ast;
982         einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
983 }
984
985 /**
986  * Determine if the lock should be converted into a lockless lock.
987  *
988  * Steps to check:
989  * - if the lock has an explicite requirment for a non-lockless lock;
990  * - if the io lock request type ci_lockreq;
991  * - send the enqueue rpc to ost to make the further decision;
992  * - special treat to truncate lockless lock
993  *
994  *  Additional policy can be implemented here, e.g., never do lockless-io
995  *  for large extents.
996  */
997 static void osc_lock_to_lockless(const struct lu_env *env,
998                                  struct osc_lock *ols, int force)
999 {
1000         struct cl_lock_slice *slice = &ols->ols_cl;
1001         struct cl_lock *lock        = slice->cls_lock;
1002
1003         LASSERT(ols->ols_state == OLS_NEW ||
1004                 ols->ols_state == OLS_UPCALL_RECEIVED);
1005
1006         if (force) {
1007                 ols->ols_locklessable = 1;
1008                 LASSERT(cl_lock_is_mutexed(lock));
1009                 slice->cls_ops = &osc_lock_lockless_ops;
1010         } else {
1011                 struct osc_io *oio     = osc_env_io(env);
1012                 struct cl_io  *io      = oio->oi_cl.cis_io;
1013                 struct cl_object *obj  = slice->cls_obj;
1014                 struct osc_object *oob = cl2osc(obj);
1015                 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
1016                 struct obd_connect_data *ocd;
1017
1018                 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
1019                         io->ci_lockreq == CILR_MAYBE ||
1020                         io->ci_lockreq == CILR_NEVER);
1021
1022                 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
1023                 ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
1024                                 (io->ci_lockreq == CILR_MAYBE) &&
1025                                 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
1026                 if (io->ci_lockreq == CILR_NEVER ||
1027                         /* lockless IO */
1028                     (ols->ols_locklessable && osc_object_is_contended(oob)) ||
1029                         /* lockless truncate */
1030                     (cl_io_is_trunc(io) &&
1031                      (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
1032                       osd->od_lockless_truncate)) {
1033                         ols->ols_locklessable = 1;
1034                         slice->cls_ops = &osc_lock_lockless_ops;
1035                 }
1036         }
1037         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1038 }
1039
1040 static int osc_lock_compatible(const struct osc_lock *qing,
1041                                const struct osc_lock *qed)
1042 {
1043         enum cl_lock_mode qing_mode;
1044         enum cl_lock_mode qed_mode;
1045
1046         qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode;
1047         if (qed->ols_glimpse &&
1048             (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ))
1049                 return 1;
1050
1051         qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode;
1052         return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
1053 }
1054
1055 /**
1056  * Cancel all conflicting locks and wait for them to be destroyed.
1057  *
1058  * This function is used for two purposes:
1059  *
1060  *     - early cancel all conflicting locks before starting IO, and
1061  *
1062  *     - guarantee that pages added to the page cache by lockless IO are never
1063  *       covered by locks other than lockless IO lock, and, hence, are not
1064  *       visible to other threads.
1065  */
1066 static int osc_lock_enqueue_wait(const struct lu_env *env,
1067                                  const struct osc_lock *olck)
1068 {
1069         struct cl_lock          *lock    = olck->ols_cl.cls_lock;
1070         struct cl_lock_descr    *descr   = &lock->cll_descr;
1071         struct cl_object_header *hdr     = cl_object_header(descr->cld_obj);
1072         struct cl_lock          *scan;
1073         struct cl_lock          *conflict= NULL;
1074         int lockless                     = osc_lock_is_lockless(olck);
1075         int rc                           = 0;
1076         ENTRY;
1077
1078         LASSERT(cl_lock_is_mutexed(lock));
1079
1080         /* make it enqueue anyway for glimpse lock, because we actually
1081          * don't need to cancel any conflicting locks. */
1082         if (olck->ols_glimpse)
1083                 return 0;
1084
1085         cfs_spin_lock(&hdr->coh_lock_guard);
1086         cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
1087                 struct cl_lock_descr *cld = &scan->cll_descr;
1088                 const struct osc_lock *scan_ols;
1089
1090                 if (scan == lock)
1091                         break;
1092
1093                 if (scan->cll_state < CLS_QUEUING ||
1094                     scan->cll_state == CLS_FREEING ||
1095                     cld->cld_start > descr->cld_end ||
1096                     cld->cld_end < descr->cld_start)
1097                         continue;
1098
1099                 /* overlapped and living locks. */
1100
1101                 /* We're not supposed to give up group lock. */
1102                 if (scan->cll_descr.cld_mode == CLM_GROUP) {
1103                         LASSERT(descr->cld_mode != CLM_GROUP ||
1104                                 descr->cld_gid != scan->cll_descr.cld_gid);
1105                         continue;
1106                 }
1107
1108                 scan_ols = osc_lock_at(scan);
1109
1110                 /* We need to cancel the compatible locks if we're enqueuing
1111                  * a lockless lock, for example:
1112                  * imagine that client has PR lock on [0, 1000], and thread T0
1113                  * is doing lockless IO in [500, 1500] region. Concurrent
1114                  * thread T1 can see lockless data in [500, 1000], which is
1115                  * wrong, because these data are possibly stale. */
1116                 if (!lockless && osc_lock_compatible(olck, scan_ols))
1117                         continue;
1118
1119                 /* Now @scan is conflicting with @lock, this means current
1120                  * thread have to sleep for @scan being destroyed. */
1121                 if (scan_ols->ols_owner == osc_env_io(env)) {
1122                         CERROR("DEADLOCK POSSIBLE!\n");
1123                         CL_LOCK_DEBUG(D_ERROR, env, scan, "queued.\n");
1124                         CL_LOCK_DEBUG(D_ERROR, env, lock, "queuing.\n");
1125                         libcfs_debug_dumpstack(NULL);
1126                 }
1127                 cl_lock_get_trust(scan);
1128                 conflict = scan;
1129                 break;
1130         }
1131         cfs_spin_unlock(&hdr->coh_lock_guard);
1132
1133         if (conflict) {
1134                 if (lock->cll_descr.cld_mode == CLM_GROUP) {
1135                         /* we want a group lock but a previous lock request
1136                          * conflicts, we do not wait but return 0 so the
1137                          * request is send to the server
1138                          */
1139                         CDEBUG(D_DLMTRACE, "group lock %p is conflicted "
1140                                            "with %p, no wait, send to server\n",
1141                                lock, conflict);
1142                         cl_lock_put(env, conflict);
1143                         rc = 0;
1144                 } else {
1145                         CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, "
1146                                            "will wait\n",
1147                                lock, conflict);
1148                         LASSERT(lock->cll_conflict == NULL);
1149                         lu_ref_add(&conflict->cll_reference, "cancel-wait",
1150                                    lock);
1151                         lock->cll_conflict = conflict;
1152                         rc = CLO_WAIT;
1153                 }
1154         }
1155         RETURN(rc);
1156 }
1157
1158 /**
1159  * Implementation of cl_lock_operations::clo_enqueue() method for osc
1160  * layer. This initiates ldlm enqueue:
1161  *
1162  *     - cancels conflicting locks early (osc_lock_enqueue_wait());
1163  *
1164  *     - calls osc_enqueue_base() to do actual enqueue.
1165  *
1166  * osc_enqueue_base() is supplied with an upcall function that is executed
1167  * when lock is received either after a local cached ldlm lock is matched, or
1168  * when a reply from the server is received.
1169  *
1170  * This function does not wait for the network communication to complete.
1171  */
1172 static int osc_lock_enqueue(const struct lu_env *env,
1173                             const struct cl_lock_slice *slice,
1174                             struct cl_io *unused, __u32 enqflags)
1175 {
1176         struct osc_lock          *ols     = cl2osc_lock(slice);
1177         struct cl_lock           *lock    = ols->ols_cl.cls_lock;
1178         int result;
1179         ENTRY;
1180
1181         LASSERT(cl_lock_is_mutexed(lock));
1182         LASSERTF(ols->ols_state == OLS_NEW,
1183                  "Impossible state: %d\n", ols->ols_state);
1184
1185         ols->ols_flags = osc_enq2ldlm_flags(enqflags);
1186         if (enqflags & CEF_AGL) {
1187                 ols->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
1188                 ols->ols_agl = 1;
1189         }
1190         if (ols->ols_flags & LDLM_FL_HAS_INTENT)
1191                 ols->ols_glimpse = 1;
1192         if (!osc_lock_is_lockless(ols) && !(enqflags & CEF_MUST))
1193                 /* try to convert this lock to a lockless lock */
1194                 osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
1195
1196         result = osc_lock_enqueue_wait(env, ols);
1197         if (result == 0) {
1198                 if (!osc_lock_is_lockless(ols)) {
1199                         struct osc_object        *obj = cl2osc(slice->cls_obj);
1200                         struct osc_thread_info   *info = osc_env_info(env);
1201                         struct ldlm_res_id       *resname = &info->oti_resname;
1202                         ldlm_policy_data_t       *policy = &info->oti_policy;
1203                         struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
1204
1205                         if (ols->ols_locklessable)
1206                                 ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1207
1208                         /* a reference for lock, passed as an upcall cookie */
1209                         cl_lock_get(lock);
1210                         lu_ref_add(&lock->cll_reference, "upcall", lock);
1211                         /* a user for lock also */
1212                         cl_lock_user_add(env, lock);
1213                         ols->ols_state = OLS_ENQUEUED;
1214
1215                         /*
1216                          * XXX: this is possible blocking point as
1217                          * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
1218                          * LDLM_CP_CALLBACK.
1219                          */
1220                         osc_lock_build_res(env, obj, resname);
1221                         osc_lock_build_policy(env, lock, policy);
1222                         result = osc_enqueue_base(osc_export(obj), resname,
1223                                           &ols->ols_flags, policy,
1224                                           &ols->ols_lvb,
1225                                           obj->oo_oinfo->loi_kms_valid,
1226                                           osc_lock_upcall,
1227                                           ols, einfo, &ols->ols_handle,
1228                                           PTLRPCD_SET, 1, ols->ols_agl);
1229                         if (result != 0) {
1230                                 cl_lock_user_del(env, lock);
1231                                 lu_ref_del(&lock->cll_reference,
1232                                            "upcall", lock);
1233                                 cl_lock_put(env, lock);
1234                                 if (unlikely(result == -ECANCELED)) {
1235                                         ols->ols_state = OLS_NEW;
1236                                         result = 0;
1237                                 }
1238                         }
1239                 } else {
1240                         ols->ols_state = OLS_GRANTED;
1241                         ols->ols_owner = osc_env_io(env);
1242                 }
1243         }
1244         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1245         RETURN(result);
1246 }
1247
1248 static int osc_lock_wait(const struct lu_env *env,
1249                          const struct cl_lock_slice *slice)
1250 {
1251         struct osc_lock *olck = cl2osc_lock(slice);
1252         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1253
1254         LINVRNT(osc_lock_invariant(olck));
1255
1256         if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED) {
1257                 if (olck->ols_flags & LDLM_FL_LVB_READY) {
1258                         return 0;
1259                 } else if (olck->ols_agl) {
1260                         if (lock->cll_flags & CLF_FROM_UPCALL)
1261                                 /* It is from enqueue RPC reply upcall for
1262                                  * updating state. Do not re-enqueue. */
1263                                 return -ENAVAIL;
1264                         else
1265                                 olck->ols_state = OLS_NEW;
1266                 } else {
1267                         LASSERT(lock->cll_error);
1268                         return lock->cll_error;
1269                 }
1270         }
1271
1272         if (olck->ols_state == OLS_NEW) {
1273                 int rc;
1274
1275                 LASSERT(olck->ols_agl);
1276
1277                 rc = osc_lock_enqueue(env, slice, NULL, CEF_ASYNC | CEF_MUST);
1278                 if (rc != 0)
1279                         return rc;
1280                 else
1281                         return CLO_REENQUEUED;
1282         }
1283
1284         LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
1285                      lock->cll_error == 0, olck->ols_lock != NULL));
1286
1287         return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
1288 }
1289
1290 /**
1291  * An implementation of cl_lock_operations::clo_use() method that pins cached
1292  * lock.
1293  */
1294 static int osc_lock_use(const struct lu_env *env,
1295                         const struct cl_lock_slice *slice)
1296 {
1297         struct osc_lock *olck = cl2osc_lock(slice);
1298         int rc;
1299
1300         LASSERT(!olck->ols_hold);
1301
1302         /*
1303          * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
1304          * flag is not set. This protects us from a concurrent blocking ast.
1305          */
1306         rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1307         if (rc == 0) {
1308                 olck->ols_hold = 1;
1309                 olck->ols_state = OLS_GRANTED;
1310         } else {
1311                 struct cl_lock *lock;
1312
1313                 /*
1314                  * Lock is being cancelled somewhere within
1315                  * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
1316                  * set, but osc_ldlm_blocking_ast() hasn't yet acquired
1317                  * cl_lock mutex.
1318                  */
1319                 lock = slice->cls_lock;
1320                 LASSERT(lock->cll_state == CLS_INTRANSIT);
1321                 LASSERT(lock->cll_users > 0);
1322                 /* set a flag for osc_dlm_blocking_ast0() to signal the
1323                  * lock.*/
1324                 olck->ols_ast_wait = 1;
1325                 rc = CLO_WAIT;
1326         }
1327         return rc;
1328 }
1329
1330 static int osc_lock_flush(struct osc_lock *ols, int discard)
1331 {
1332         struct cl_lock       *lock  = ols->ols_cl.cls_lock;
1333         struct cl_env_nest    nest;
1334         struct lu_env        *env;
1335         int result = 0;
1336
1337         env = cl_env_nested_get(&nest);
1338         if (!IS_ERR(env)) {
1339                 result = cl_lock_page_out(env, lock, discard);
1340                 cl_env_nested_put(&nest, env);
1341         } else
1342                 result = PTR_ERR(env);
1343         if (result == 0) {
1344                 ols->ols_flush = 1;
1345                 LINVRNT(!osc_lock_has_pages(ols));
1346         }
1347         return result;
1348 }
1349
1350 /**
1351  * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1352  * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1353  * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1354  * with some other lock some where in the cluster. This function does the
1355  * following:
1356  *
1357  *     - invalidates all pages protected by this lock (after sending dirty
1358  *       ones to the server, as necessary);
1359  *
1360  *     - decref's underlying ldlm lock;
1361  *
1362  *     - cancels ldlm lock (ldlm_cli_cancel()).
1363  */
1364 static void osc_lock_cancel(const struct lu_env *env,
1365                             const struct cl_lock_slice *slice)
1366 {
1367         struct cl_lock   *lock    = slice->cls_lock;
1368         struct osc_lock  *olck    = cl2osc_lock(slice);
1369         struct ldlm_lock *dlmlock = olck->ols_lock;
1370         int               result  = 0;
1371         int               discard;
1372
1373         LASSERT(cl_lock_is_mutexed(lock));
1374         LINVRNT(osc_lock_invariant(olck));
1375
1376         if (dlmlock != NULL) {
1377                 int do_cancel;
1378
1379                 discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA);
1380                 result = osc_lock_flush(olck, discard);
1381                 osc_lock_unhold(olck);
1382
1383                 lock_res_and_lock(dlmlock);
1384                 /* Now that we're the only user of dlm read/write reference,
1385                  * mostly the ->l_readers + ->l_writers should be zero.
1386                  * However, there is a corner case.
1387                  * See bug 18829 for details.*/
1388                 do_cancel = (dlmlock->l_readers == 0 &&
1389                              dlmlock->l_writers == 0);
1390                 dlmlock->l_flags |= LDLM_FL_CBPENDING;
1391                 unlock_res_and_lock(dlmlock);
1392                 if (do_cancel)
1393                         result = ldlm_cli_cancel(&olck->ols_handle);
1394                 if (result < 0)
1395                         CL_LOCK_DEBUG(D_ERROR, env, lock,
1396                                       "lock %p cancel failure with error(%d)\n",
1397                                       lock, result);
1398         }
1399         olck->ols_state = OLS_CANCELLED;
1400         olck->ols_flags &= ~LDLM_FL_LVB_READY;
1401         osc_lock_detach(env, olck);
1402 }
1403
1404 #ifdef INVARIANT_CHECK
1405 static int check_cb(const struct lu_env *env, struct cl_io *io,
1406                     struct cl_page *page, void *cbdata)
1407 {
1408         struct cl_lock *lock = cbdata;
1409
1410         if (lock->cll_descr.cld_mode == CLM_READ) {
1411                 struct cl_lock *tmp;
1412                 tmp = cl_lock_at_page(env, lock->cll_descr.cld_obj,
1413                                      page, lock, 1, 0);
1414                 if (tmp != NULL) {
1415                         cl_lock_put(env, tmp);
1416                         return CLP_GANG_OKAY;
1417                 }
1418         }
1419
1420         CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
1421         CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
1422         return CLP_GANG_ABORT;
1423 }
1424
1425 /**
1426  * Returns true iff there are pages under \a olck not protected by other
1427  * locks.
1428  */
1429 static int osc_lock_has_pages(struct osc_lock *olck)
1430 {
1431         struct cl_lock       *lock;
1432         struct cl_lock_descr *descr;
1433         struct cl_object     *obj;
1434         struct osc_object    *oob;
1435         struct cl_env_nest    nest;
1436         struct cl_io         *io;
1437         struct lu_env        *env;
1438         int                   result;
1439
1440         env = cl_env_nested_get(&nest);
1441         if (IS_ERR(env))
1442                 return 0;
1443
1444         obj   = olck->ols_cl.cls_obj;
1445         oob   = cl2osc(obj);
1446         io    = &oob->oo_debug_io;
1447         lock  = olck->ols_cl.cls_lock;
1448         descr = &lock->cll_descr;
1449
1450         cfs_mutex_lock(&oob->oo_debug_mutex);
1451
1452         io->ci_obj = cl_object_top(obj);
1453         cl_io_init(env, io, CIT_MISC, io->ci_obj);
1454         do {
1455                 result = cl_page_gang_lookup(env, obj, io,
1456                                              descr->cld_start, descr->cld_end,
1457                                              check_cb, (void *)lock);
1458                 if (result == CLP_GANG_ABORT)
1459                         break;
1460                 if (result == CLP_GANG_RESCHED)
1461                         cfs_cond_resched();
1462         } while (result != CLP_GANG_OKAY);
1463         cl_io_fini(env, io);
1464         cfs_mutex_unlock(&oob->oo_debug_mutex);
1465         cl_env_nested_put(&nest, env);
1466
1467         return (result == CLP_GANG_ABORT);
1468 }
1469 #else
1470 static int osc_lock_has_pages(struct osc_lock *olck)
1471 {
1472         return 0;
1473 }
1474 #endif /* INVARIANT_CHECK */
1475
1476 static void osc_lock_delete(const struct lu_env *env,
1477                             const struct cl_lock_slice *slice)
1478 {
1479         struct osc_lock *olck;
1480
1481         olck = cl2osc_lock(slice);
1482         if (olck->ols_glimpse) {
1483                 LASSERT(!olck->ols_hold);
1484                 LASSERT(!olck->ols_lock);
1485                 return;
1486         }
1487
1488         LINVRNT(osc_lock_invariant(olck));
1489         LINVRNT(!osc_lock_has_pages(olck));
1490
1491         osc_lock_unhold(olck);
1492         osc_lock_detach(env, olck);
1493 }
1494
1495 /**
1496  * Implements cl_lock_operations::clo_state() method for osc layer.
1497  *
1498  * Maintains osc_lock::ols_owner field.
1499  *
1500  * This assumes that lock always enters CLS_HELD (from some other state) in
1501  * the same IO context as one that requested the lock. This should not be a
1502  * problem, because context is by definition shared by all activity pertaining
1503  * to the same high-level IO.
1504  */
1505 static void osc_lock_state(const struct lu_env *env,
1506                            const struct cl_lock_slice *slice,
1507                            enum cl_lock_state state)
1508 {
1509         struct osc_lock *lock = cl2osc_lock(slice);
1510
1511         /*
1512          * XXX multiple io contexts can use the lock at the same time.
1513          */
1514         LINVRNT(osc_lock_invariant(lock));
1515         if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1516                 struct osc_io *oio = osc_env_io(env);
1517
1518                 LASSERT(lock->ols_owner == NULL);
1519                 lock->ols_owner = oio;
1520         } else if (state != CLS_HELD)
1521                 lock->ols_owner = NULL;
1522 }
1523
1524 static int osc_lock_print(const struct lu_env *env, void *cookie,
1525                           lu_printer_t p, const struct cl_lock_slice *slice)
1526 {
1527         struct osc_lock *lock = cl2osc_lock(slice);
1528
1529         /*
1530          * XXX print ldlm lock and einfo properly.
1531          */
1532         (*p)(env, cookie, "%p %08x "LPX64" %d %p ",
1533              lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
1534              lock->ols_state, lock->ols_owner);
1535         osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1536         return 0;
1537 }
1538
1539 static int osc_lock_fits_into(const struct lu_env *env,
1540                               const struct cl_lock_slice *slice,
1541                               const struct cl_lock_descr *need,
1542                               const struct cl_io *io)
1543 {
1544         struct osc_lock *ols = cl2osc_lock(slice);
1545
1546         if (need->cld_enq_flags & CEF_NEVER)
1547                 return 0;
1548
1549         if (need->cld_mode == CLM_PHANTOM) {
1550                 if (ols->ols_agl)
1551                         return !(ols->ols_state > OLS_RELEASED);
1552
1553                 /*
1554                  * Note: the QUEUED lock can't be matched here, otherwise
1555                  * it might cause the deadlocks.
1556                  * In read_process,
1557                  * P1: enqueued read lock, create sublock1
1558                  * P2: enqueued write lock, create sublock2(conflicted
1559                  *     with sublock1).
1560                  * P1: Grant read lock.
1561                  * P1: enqueued glimpse lock(with holding sublock1_read),
1562                  *     matched with sublock2, waiting sublock2 to be granted.
1563                  *     But sublock2 can not be granted, because P1
1564                  *     will not release sublock1. Bang!
1565                  */
1566                 if (ols->ols_state < OLS_GRANTED ||
1567                     ols->ols_state > OLS_RELEASED)
1568                         return 0;
1569         } else if (need->cld_enq_flags & CEF_MUST) {
1570                 /*
1571                  * If the lock hasn't ever enqueued, it can't be matched
1572                  * because enqueue process brings in many information
1573                  * which can be used to determine things such as lockless,
1574                  * CEF_MUST, etc.
1575                  */
1576                 if (ols->ols_state < OLS_UPCALL_RECEIVED &&
1577                     ols->ols_locklessable)
1578                         return 0;
1579         }
1580         return 1;
1581 }
1582
1583 static const struct cl_lock_operations osc_lock_ops = {
1584         .clo_fini    = osc_lock_fini,
1585         .clo_enqueue = osc_lock_enqueue,
1586         .clo_wait    = osc_lock_wait,
1587         .clo_unuse   = osc_lock_unuse,
1588         .clo_use     = osc_lock_use,
1589         .clo_delete  = osc_lock_delete,
1590         .clo_state   = osc_lock_state,
1591         .clo_cancel  = osc_lock_cancel,
1592         .clo_weigh   = osc_lock_weigh,
1593         .clo_print   = osc_lock_print,
1594         .clo_fits_into = osc_lock_fits_into,
1595 };
1596
1597 static int osc_lock_lockless_unuse(const struct lu_env *env,
1598                                    const struct cl_lock_slice *slice)
1599 {
1600         struct osc_lock *ols = cl2osc_lock(slice);
1601         struct cl_lock *lock = slice->cls_lock;
1602
1603         LASSERT(ols->ols_state == OLS_GRANTED);
1604         LINVRNT(osc_lock_invariant(ols));
1605
1606         cl_lock_cancel(env, lock);
1607         cl_lock_delete(env, lock);
1608         return 0;
1609 }
1610
1611 static void osc_lock_lockless_cancel(const struct lu_env *env,
1612                                      const struct cl_lock_slice *slice)
1613 {
1614         struct osc_lock   *ols  = cl2osc_lock(slice);
1615         int result;
1616
1617         result = osc_lock_flush(ols, 0);
1618         if (result)
1619                 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1620                        ols, result);
1621         ols->ols_state = OLS_CANCELLED;
1622 }
1623
1624 static int osc_lock_lockless_wait(const struct lu_env *env,
1625                                   const struct cl_lock_slice *slice)
1626 {
1627         struct osc_lock *olck = cl2osc_lock(slice);
1628         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1629
1630         LINVRNT(osc_lock_invariant(olck));
1631         LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1632
1633         return lock->cll_error;
1634 }
1635
1636 static void osc_lock_lockless_state(const struct lu_env *env,
1637                                     const struct cl_lock_slice *slice,
1638                                     enum cl_lock_state state)
1639 {
1640         struct osc_lock *lock = cl2osc_lock(slice);
1641
1642         LINVRNT(osc_lock_invariant(lock));
1643         if (state == CLS_HELD) {
1644                 struct osc_io *oio  = osc_env_io(env);
1645
1646                 LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio));
1647                 lock->ols_owner = oio;
1648
1649                 /* set the io to be lockless if this lock is for io's
1650                  * host object */
1651                 if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
1652                         oio->oi_lockless = 1;
1653         }
1654 }
1655
1656 static int osc_lock_lockless_fits_into(const struct lu_env *env,
1657                                        const struct cl_lock_slice *slice,
1658                                        const struct cl_lock_descr *need,
1659                                        const struct cl_io *io)
1660 {
1661         struct osc_lock *lock = cl2osc_lock(slice);
1662
1663         if (!(need->cld_enq_flags & CEF_NEVER))
1664                 return 0;
1665
1666         /* lockless lock should only be used by its owning io. b22147 */
1667         return (lock->ols_owner == osc_env_io(env));
1668 }
1669
1670 static const struct cl_lock_operations osc_lock_lockless_ops = {
1671         .clo_fini      = osc_lock_fini,
1672         .clo_enqueue   = osc_lock_enqueue,
1673         .clo_wait      = osc_lock_lockless_wait,
1674         .clo_unuse     = osc_lock_lockless_unuse,
1675         .clo_state     = osc_lock_lockless_state,
1676         .clo_fits_into = osc_lock_lockless_fits_into,
1677         .clo_cancel    = osc_lock_lockless_cancel,
1678         .clo_print     = osc_lock_print
1679 };
1680
1681 int osc_lock_init(const struct lu_env *env,
1682                   struct cl_object *obj, struct cl_lock *lock,
1683                   const struct cl_io *unused)
1684 {
1685         struct osc_lock *clk;
1686         int result;
1687
1688         OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO);
1689         if (clk != NULL) {
1690                 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
1691                 cfs_atomic_set(&clk->ols_pageref, 0);
1692                 clk->ols_state = OLS_NEW;
1693                 cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
1694                 result = 0;
1695         } else
1696                 result = -ENOMEM;
1697         return result;
1698 }
1699
1700 int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
1701 {
1702         struct osc_lock *olock;
1703         int              rc = 0;
1704
1705         cfs_spin_lock(&osc_ast_guard);
1706         olock = dlm->l_ast_data;
1707         /*
1708          * there's a very rare race with osc_page_addref_lock(), but that
1709          * doesn't matter because in the worst case we don't cancel a lock
1710          * which we actually can, that's no harm.
1711          */
1712         if (olock != NULL &&
1713             cfs_atomic_add_return(_PAGEREF_MAGIC,
1714                                   &olock->ols_pageref) != _PAGEREF_MAGIC) {
1715                 cfs_atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
1716                 rc = 1;
1717         }
1718         cfs_spin_unlock(&osc_ast_guard);
1719         return rc;
1720 }
1721
1722 /** @} osc */