Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lustre / osc / osc_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for OSC layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 /** \addtogroup osc osc @{ */
42
43 #define DEBUG_SUBSYSTEM S_OSC
44
45 #ifdef __KERNEL__
46 # include <libcfs/libcfs.h>
47 #else
48 # include <liblustre.h>
49 #endif
50 /* fid_build_reg_res_name() */
51 #include <lustre_fid.h>
52
53 #include "osc_cl_internal.h"
54
55 /*****************************************************************************
56  *
57  * Type conversions.
58  *
59  */
60
61 static const struct cl_lock_operations osc_lock_ops;
62 static const struct cl_lock_operations osc_lock_lockless_ops;
63 static void osc_lock_to_lockless(const struct lu_env *env,
64                                  struct osc_lock *ols, int force);
65
66 int osc_lock_is_lockless(const struct osc_lock *olck)
67 {
68         return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
69 }
70
71 /**
72  * Returns a weak pointer to the ldlm lock identified by a handle. Returned
73  * pointer cannot be dereferenced, as lock is not protected from concurrent
74  * reclaim. This function is a helper for osc_lock_invariant().
75  */
76 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
77 {
78         struct ldlm_lock *lock;
79
80         lock = ldlm_handle2lock(handle);
81         if (lock != NULL)
82                 LDLM_LOCK_PUT(lock);
83         return lock;
84 }
85
86 /**
87  * Invariant that has to be true all of the time.
88  */
89 static int osc_lock_invariant(struct osc_lock *ols)
90 {
91         struct ldlm_lock *lock        = osc_handle_ptr(&ols->ols_handle);
92         struct ldlm_lock *olock       = ols->ols_lock;
93         int               handle_used = lustre_handle_is_used(&ols->ols_handle);
94
95         return
96                 ergo(osc_lock_is_lockless(ols),
97                      ols->ols_locklessable && ols->ols_lock == NULL)  ||
98                 (ergo(olock != NULL, handle_used) &&
99                  ergo(olock != NULL,
100                       olock->l_handle.h_cookie == ols->ols_handle.cookie) &&
101                  /*
102                   * Check that ->ols_handle and ->ols_lock are consistent, but
103                   * take into account that they are set at the different time.
104                   */
105                  ergo(handle_used,
106                       ergo(lock != NULL && olock != NULL, lock == olock) &&
107                       ergo(lock == NULL, olock == NULL)) &&
108                  ergo(ols->ols_state == OLS_CANCELLED,
109                       olock == NULL && !handle_used) &&
110                  /*
111                   * DLM lock is destroyed only after we have seen cancellation
112                   * ast.
113                   */
114                  ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
115                       !olock->l_destroyed) &&
116                  ergo(ols->ols_state == OLS_GRANTED,
117                       olock != NULL &&
118                       olock->l_req_mode == olock->l_granted_mode &&
119                       ols->ols_hold));
120 }
121
122 /*****************************************************************************
123  *
124  * Lock operations.
125  *
126  */
127
128 /**
129  * Breaks a link between osc_lock and dlm_lock.
130  */
131 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
132 {
133         struct ldlm_lock *dlmlock;
134
135         spin_lock(&osc_ast_guard);
136         dlmlock = olck->ols_lock;
137         if (dlmlock == NULL) {
138                 spin_unlock(&osc_ast_guard);
139                 return;
140         }
141
142         olck->ols_lock = NULL;
143         /* wb(); --- for all who checks (ols->ols_lock != NULL) before
144          * call to osc_lock_detach() */
145         dlmlock->l_ast_data = NULL;
146         olck->ols_handle.cookie = 0ULL;
147         spin_unlock(&osc_ast_guard);
148
149         lock_res_and_lock(dlmlock);
150         if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
151                 struct cl_object *obj = olck->ols_cl.cls_obj;
152                 struct cl_attr *attr  = &osc_env_info(env)->oti_attr;
153                 __u64 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
154
155                 /* Update the kms. Need to loop all granted locks.
156                  * Not a problem for the client */
157                 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
158                 unlock_res_and_lock(dlmlock);
159
160                 cl_object_attr_lock(obj);
161                 cl_object_attr_set(env, obj, attr, CAT_KMS);
162                 cl_object_attr_unlock(obj);
163         } else
164                 unlock_res_and_lock(dlmlock);
165
166         /* release a reference taken in osc_lock_upcall0(). */
167         lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
168         LDLM_LOCK_RELEASE(dlmlock);
169 }
170
171 static int osc_lock_unuse(const struct lu_env *env,
172                           const struct cl_lock_slice *slice)
173 {
174         struct osc_lock *ols = cl2osc_lock(slice);
175         int result;
176
177         LASSERT(ols->ols_state == OLS_GRANTED ||
178                 ols->ols_state == OLS_UPCALL_RECEIVED);
179         LINVRNT(osc_lock_invariant(ols));
180
181         if (ols->ols_glimpse) {
182                 LASSERT(ols->ols_hold == 0);
183                 return 0;
184         }
185         LASSERT(ols->ols_hold);
186
187         /*
188          * Move lock into OLS_RELEASED state before calling osc_cancel_base()
189          * so that possible synchronous cancellation (that always happens
190          * e.g., for liblustre) sees that lock is released.
191          */
192         ols->ols_state = OLS_RELEASED;
193         ols->ols_hold = 0;
194         result = osc_cancel_base(&ols->ols_handle, ols->ols_einfo.ei_mode);
195         ols->ols_has_ref = 0;
196         return result;
197 }
198
199 static void osc_lock_fini(const struct lu_env *env,
200                           struct cl_lock_slice *slice)
201 {
202         struct osc_lock  *ols = cl2osc_lock(slice);
203
204         LINVRNT(osc_lock_invariant(ols));
205         /*
206          * ->ols_hold can still be true at this point if, for example, a
207          * thread that requested a lock was killed (and released a reference
208          * to the lock), before reply from a server was received. In this case
209          * lock is destroyed immediately after upcall.
210          */
211         if (ols->ols_hold)
212                 osc_lock_unuse(env, slice);
213         LASSERT(ols->ols_lock == NULL);
214
215         OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
216 }
217
218 void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj,
219                         struct ldlm_res_id *resname)
220 {
221         const struct lu_fid *fid = lu_object_fid(&obj->oo_cl.co_lu);
222         if (0) {
223                 /*
224                  * In the perfect world of the future, where ost servers talk
225                  * idif-fids...
226                  */
227                 fid_build_reg_res_name(fid, resname);
228         } else {
229                 /*
230                  * In reality, where ost server expects ->lsm_object_id and
231                  * ->lsm_object_gr in rename.
232                  */
233                 osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_gr,
234                                    resname);
235         }
236 }
237
238 static void osc_lock_build_policy(const struct lu_env *env,
239                                   const struct cl_lock *lock,
240                                   ldlm_policy_data_t *policy)
241 {
242         const struct cl_lock_descr *d = &lock->cll_descr;
243
244         osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
245 }
246
247 static int osc_enq2ldlm_flags(__u32 enqflags)
248 {
249         int result = 0;
250
251         LASSERT((enqflags & ~CEF_MASK) == 0);
252
253         if (enqflags & CEF_NONBLOCK)
254                 result |= LDLM_FL_BLOCK_NOWAIT;
255         if (enqflags & CEF_ASYNC)
256                 result |= LDLM_FL_HAS_INTENT;
257         if (enqflags & CEF_DISCARD_DATA)
258                 result |= LDLM_AST_DISCARD_DATA;
259         return result;
260 }
261
262 /**
263  * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
264  * pointers. Initialized in osc_init().
265  */
266 spinlock_t osc_ast_guard;
267
268 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
269 {
270         struct osc_lock *olck;
271
272         lock_res_and_lock(dlm_lock);
273         spin_lock(&osc_ast_guard);
274         olck = dlm_lock->l_ast_data;
275         if (olck != NULL) {
276                 struct cl_lock *lock = olck->ols_cl.cls_lock;
277                 /*
278                  * If osc_lock holds a reference on ldlm lock, return it even
279                  * when cl_lock is in CLS_FREEING state. This way
280                  *
281                  *         osc_ast_data_get(dlmlock) == NULL
282                  *
283                  * guarantees that all osc references on dlmlock were
284                  * released. osc_dlm_blocking_ast0() relies on that.
285                  */
286                 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
287                         cl_lock_get_trust(lock);
288                         lu_ref_add_atomic(&lock->cll_reference,
289                                           "ast", cfs_current());
290                 } else
291                         olck = NULL;
292         }
293         spin_unlock(&osc_ast_guard);
294         unlock_res_and_lock(dlm_lock);
295         return olck;
296 }
297
298 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
299 {
300         struct cl_lock *lock;
301
302         lock = olck->ols_cl.cls_lock;
303         lu_ref_del(&lock->cll_reference, "ast", cfs_current());
304         cl_lock_put(env, lock);
305 }
306
307 /**
308  * Updates object attributes from a lock value block (lvb) received together
309  * with the DLM lock reply from the server. Copy of osc_update_enqueue()
310  * logic.
311  *
312  * This can be optimized to not update attributes when lock is a result of a
313  * local match.
314  *
315  * Called under lock and resource spin-locks.
316  */
317 static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
318                                 int rc)
319 {
320         struct ost_lvb    *lvb;
321         struct cl_object  *obj;
322         struct lov_oinfo  *oinfo;
323         struct cl_attr    *attr;
324         unsigned           valid;
325
326         ENTRY;
327
328         if (!(olck->ols_flags & LDLM_FL_LVB_READY)) {
329                 EXIT;
330                 return;
331         }
332
333         lvb   = &olck->ols_lvb;
334         obj   = olck->ols_cl.cls_obj;
335         oinfo = cl2osc(obj)->oo_oinfo;
336         attr  = &osc_env_info(env)->oti_attr;
337         valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
338         cl_lvb2attr(attr, lvb);
339
340         cl_object_attr_lock(obj);
341         if (rc == 0) {
342                 struct ldlm_lock  *dlmlock;
343                 __u64 size;
344
345                 dlmlock = olck->ols_lock;
346                 LASSERT(dlmlock != NULL);
347
348                 /* re-grab LVB from a dlm lock under DLM spin-locks. */
349                 *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
350                 size = lvb->lvb_size;
351                 /* Extend KMS up to the end of this lock and no further
352                  * A lock on [x,y] means a KMS of up to y + 1 bytes! */
353                 if (size > dlmlock->l_policy_data.l_extent.end)
354                         size = dlmlock->l_policy_data.l_extent.end + 1;
355                 if (size >= oinfo->loi_kms) {
356                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
357                                    ", kms="LPU64, lvb->lvb_size, size);
358                         valid |= CAT_KMS;
359                         attr->cat_kms = size;
360                 } else {
361                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
362                                    LPU64"; leaving kms="LPU64", end="LPU64,
363                                    lvb->lvb_size, oinfo->loi_kms,
364                                    dlmlock->l_policy_data.l_extent.end);
365                 }
366                 ldlm_lock_allow_match_locked(dlmlock);
367         } else if (rc == -ENAVAIL && olck->ols_glimpse) {
368                 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
369                        " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
370         } else
371                 valid = 0;
372
373         if (valid != 0)
374                 cl_object_attr_set(env, obj, attr, valid);
375
376         cl_object_attr_unlock(obj);
377
378         EXIT;
379 }
380
381 /**
382  * Called when a lock is granted, from an upcall (when server returned a
383  * granted lock), or from completion AST, when server returned a blocked lock.
384  *
385  * Called under lock and resource spin-locks, that are released temporarily
386  * here.
387  */
388 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
389                              struct ldlm_lock *dlmlock, int rc)
390 {
391         struct ldlm_extent   *ext;
392         struct cl_lock       *lock;
393         struct cl_lock_descr *descr;
394
395         LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
396
397         ENTRY;
398         if (olck->ols_state != OLS_GRANTED) {
399                 lock  = olck->ols_cl.cls_lock;
400                 ext   = &dlmlock->l_policy_data.l_extent;
401                 descr = &osc_env_info(env)->oti_descr;
402                 descr->cld_obj = lock->cll_descr.cld_obj;
403
404                 /* XXX check that ->l_granted_mode is valid. */
405                 descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
406                 descr->cld_start = cl_index(descr->cld_obj, ext->start);
407                 descr->cld_end   = cl_index(descr->cld_obj, ext->end);
408                 /*
409                  * tell upper layers the extent of the lock that was actually
410                  * granted
411                  */
412                 olck->ols_state = OLS_GRANTED;
413                 osc_lock_lvb_update(env, olck, rc);
414
415                 /* release DLM spin-locks to allow cl_lock_{modify,signal}()
416                  * to take a semaphore on a parent lock. This is safe, because
417                  * spin-locks are needed to protect consistency of
418                  * dlmlock->l_*_mode and LVB, and we have finished processing
419                  * them. */
420                 unlock_res_and_lock(dlmlock);
421                 cl_lock_modify(env, lock, descr);
422                 cl_lock_signal(env, lock);
423                 LINVRNT(osc_lock_invariant(olck));
424                 lock_res_and_lock(dlmlock);
425         }
426         EXIT;
427 }
428
429 static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
430
431 {
432         struct ldlm_lock *dlmlock;
433
434         ENTRY;
435
436         dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
437         LASSERT(dlmlock != NULL);
438
439         lock_res_and_lock(dlmlock);
440         spin_lock(&osc_ast_guard);
441         LASSERT(dlmlock->l_ast_data == olck);
442         LASSERT(olck->ols_lock == NULL);
443         olck->ols_lock = dlmlock;
444         spin_unlock(&osc_ast_guard);
445
446         /*
447          * Lock might be not yet granted. In this case, completion ast
448          * (osc_ldlm_completion_ast()) comes later and finishes lock
449          * granting.
450          */
451         if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
452                 osc_lock_granted(env, olck, dlmlock, 0);
453         unlock_res_and_lock(dlmlock);
454
455         /*
456          * osc_enqueue_interpret() decrefs asynchronous locks, counter
457          * this.
458          */
459         ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
460         olck->ols_hold = olck->ols_has_ref = 1;
461
462         /* lock reference taken by ldlm_handle2lock_long() is owned by
463          * osc_lock and released in osc_lock_detach() */
464         lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
465 }
466
467 /**
468  * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
469  * received from a server, or after osc_enqueue_base() matched a local DLM
470  * lock.
471  */
472 static int osc_lock_upcall(void *cookie, int errcode)
473 {
474         struct osc_lock         *olck  = cookie;
475         struct cl_lock_slice    *slice = &olck->ols_cl;
476         struct cl_lock          *lock  = slice->cls_lock;
477         struct lu_env           *env;
478         struct cl_env_nest       nest;
479
480         ENTRY;
481         env = cl_env_nested_get(&nest);
482         if (!IS_ERR(env)) {
483                 int rc;
484
485                 cl_lock_mutex_get(env, lock);
486
487                 LASSERT(lock->cll_state >= CLS_QUEUING);
488                 if (olck->ols_state == OLS_ENQUEUED) {
489                         olck->ols_state = OLS_UPCALL_RECEIVED;
490                         rc = ldlm_error2errno(errcode);
491                 } else if (olck->ols_state == OLS_CANCELLED) {
492                         rc = -EIO;
493                 } else {
494                         CERROR("Impossible state: %i\n", olck->ols_state);
495                         LBUG();
496                 }
497                 if (rc) {
498                         struct ldlm_lock *dlmlock;
499
500                         dlmlock = ldlm_handle2lock(&olck->ols_handle);
501                         if (dlmlock != NULL) {
502                                 lock_res_and_lock(dlmlock);
503                                 spin_lock(&osc_ast_guard);
504                                 LASSERT(olck->ols_lock == NULL);
505                                 dlmlock->l_ast_data = NULL;
506                                 olck->ols_handle.cookie = 0ULL;
507                                 spin_unlock(&osc_ast_guard);
508                                 unlock_res_and_lock(dlmlock);
509                                 LDLM_LOCK_PUT(dlmlock);
510                         }
511                 } else {
512                         if (olck->ols_glimpse)
513                                 olck->ols_glimpse = 0;
514                         osc_lock_upcall0(env, olck);
515                 }
516
517                 /* Error handling, some errors are tolerable. */
518                 if (olck->ols_locklessable && rc == -EUSERS) {
519                         /* This is a tolerable error, turn this lock into
520                          * lockless lock.
521                          */
522                         osc_object_set_contended(cl2osc(slice->cls_obj));
523                         LASSERT(slice->cls_ops == &osc_lock_ops);
524
525                         /* Change this lock to ldlmlock-less lock. */
526                         osc_lock_to_lockless(env, olck, 1);
527                         olck->ols_state = OLS_GRANTED;
528                         rc = 0;
529                 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
530                         osc_lock_lvb_update(env, olck, rc);
531                         cl_lock_delete(env, lock);
532                         /* Hide the error. */
533                         rc = 0;
534                 }
535
536                 if (rc == 0)
537                         /* on error, lock was signaled by cl_lock_error() */
538                         cl_lock_signal(env, lock);
539                 else
540                         cl_lock_error(env, lock, rc);
541
542                 cl_lock_mutex_put(env, lock);
543
544                 /* release cookie reference, acquired by osc_lock_enqueue() */
545                 lu_ref_del(&lock->cll_reference, "upcall", lock);
546                 cl_lock_put(env, lock);
547                 cl_env_nested_put(&nest, env);
548         } else
549                 /* should never happen, similar to osc_ldlm_blocking_ast(). */
550                 LBUG();
551         RETURN(errcode);
552 }
553
554 /**
555  * Core of osc_dlm_blocking_ast() logic.
556  */
557 static void osc_lock_blocking(const struct lu_env *env,
558                               struct ldlm_lock *dlmlock,
559                               struct osc_lock *olck, int blocking)
560 {
561         struct cl_lock *lock = olck->ols_cl.cls_lock;
562
563         LASSERT(olck->ols_lock == dlmlock);
564         CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
565         LASSERT(!osc_lock_is_lockless(olck));
566
567         if (olck->ols_hold)
568                 /*
569                  * Lock might be still addref-ed here, if e.g., blocking ast
570                  * is sent for a failed lock.
571                  */
572                 osc_lock_unuse(env, &olck->ols_cl);
573
574         if (blocking && olck->ols_state < OLS_BLOCKED)
575                 /*
576                  * Move osc_lock into OLS_BLOCKED before canceling the lock,
577                  * because it recursively re-enters osc_lock_blocking(), with
578                  * the state set to OLS_CANCELLED.
579                  */
580                 olck->ols_state = OLS_BLOCKED;
581         /*
582          * cancel and destroy lock at least once no matter how blocking ast is
583          * entered (see comment above osc_ldlm_blocking_ast() for use
584          * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
585          */
586         cl_lock_cancel(env, lock);
587         cl_lock_delete(env, lock);
588 }
589
590 /**
591  * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
592  * and ldlm_lock caches.
593  */
594 static int osc_dlm_blocking_ast0(const struct lu_env *env,
595                                  struct ldlm_lock *dlmlock,
596                                  void *data, int flag)
597 {
598         struct osc_lock *olck;
599         struct cl_lock  *lock;
600         int result;
601         int cancel;
602
603         LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
604
605         cancel = 0;
606         olck = osc_ast_data_get(dlmlock);
607         if (olck != NULL) {
608                 lock = olck->ols_cl.cls_lock;
609                 cl_lock_mutex_get(env, lock);
610                 LINVRNT(osc_lock_invariant(olck));
611                 if (olck->ols_ast_wait) {
612                         /* wake up osc_lock_use() */
613                         cl_lock_signal(env, lock);
614                         olck->ols_ast_wait = 0;
615                 }
616                 /*
617                  * Lock might have been canceled while this thread was
618                  * sleeping for lock mutex, but olck is pinned in memory.
619                  */
620                 if (olck == dlmlock->l_ast_data) {
621                         /*
622                          * NOTE: DLM sends blocking AST's for failed locks
623                          *       (that are still in pre-OLS_GRANTED state)
624                          *       too, and they have to be canceled otherwise
625                          *       DLM lock is never destroyed and stuck in
626                          *       the memory.
627                          *
628                          *       Alternatively, ldlm_cli_cancel() can be
629                          *       called here directly for osc_locks with
630                          *       ols_state < OLS_GRANTED to maintain an
631                          *       invariant that ->clo_cancel() is only called
632                          *       for locks that were granted.
633                          */
634                         LASSERT(data == olck);
635                         osc_lock_blocking(env, dlmlock,
636                                           olck, flag == LDLM_CB_BLOCKING);
637                 } else
638                         cancel = 1;
639                 cl_lock_mutex_put(env, lock);
640                 osc_ast_data_put(env, olck);
641         } else
642                 /*
643                  * DLM lock exists, but there is no cl_lock attached to it.
644                  * This is a `normal' race. cl_object and its cl_lock's can be
645                  * removed by memory pressure, together with all pages.
646                  */
647                 cancel = (flag == LDLM_CB_BLOCKING);
648
649         if (cancel) {
650                 struct lustre_handle *lockh;
651
652                 lockh = &osc_env_info(env)->oti_handle;
653                 ldlm_lock2handle(dlmlock, lockh);
654                 result = ldlm_cli_cancel(lockh);
655         } else
656                 result = 0;
657         return result;
658 }
659
660 /**
661  * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
662  * some other lock, or is canceled. This function is installed as a
663  * ldlm_lock::l_blocking_ast() for client extent locks.
664  *
665  * Control flow is tricky, because ldlm uses the same call-back
666  * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
667  *
668  * \param dlmlock lock for which ast occurred.
669  *
670  * \param new description of a conflicting lock in case of blocking ast.
671  *
672  * \param data value of dlmlock->l_ast_data
673  *
674  * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
675  *             cancellation and blocking ast's.
676  *
677  * Possible use cases:
678  *
679  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
680  *       lock due to lock lru pressure, or explicit user request to purge
681  *       locks.
682  *
683  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
684  *       us that dlmlock conflicts with another lock that some client is
685  *       enqueing. Lock is canceled.
686  *
687  *           - cl_lock_cancel() is called. osc_lock_cancel() calls
688  *             ldlm_cli_cancel() that calls
689  *
690  *                  dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
691  *
692  *             recursively entering osc_ldlm_blocking_ast().
693  *
694  *     - client cancels lock voluntary (e.g., as a part of early cancellation):
695  *
696  *           cl_lock_cancel()->
697  *             osc_lock_cancel()->
698  *               ldlm_cli_cancel()->
699  *                 dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
700  *
701  */
702 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
703                                  struct ldlm_lock_desc *new, void *data,
704                                  int flag)
705 {
706         struct lu_env     *env;
707         struct cl_env_nest nest;
708         int                result;
709
710         /*
711          * This can be called in the context of outer IO, e.g.,
712          *
713          *     cl_enqueue()->...
714          *       ->osc_enqueue_base()->...
715          *         ->ldlm_prep_elc_req()->...
716          *           ->ldlm_cancel_callback()->...
717          *             ->osc_ldlm_blocking_ast()
718          *
719          * new environment has to be created to not corrupt outer context.
720          */
721         env = cl_env_nested_get(&nest);
722         if (!IS_ERR(env)) {
723                 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
724                 cl_env_nested_put(&nest, env);
725         } else {
726                 result = PTR_ERR(env);
727                 /*
728                  * XXX This should never happen, as cl_lock is
729                  * stuck. Pre-allocated environment a la vvp_inode_fini_env
730                  * should be used.
731                  */
732                 LBUG();
733         }
734         if (result != 0) {
735                 if (result == -ENODATA)
736                         result = 0;
737                 else
738                         CERROR("BAST failed: %d\n", result);
739         }
740         return result;
741 }
742
743 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
744                                    int flags, void *data)
745 {
746         struct cl_env_nest nest;
747         struct lu_env     *env;
748         struct osc_lock   *olck;
749         struct cl_lock    *lock;
750         int result;
751         int dlmrc;
752
753         /* first, do dlm part of the work */
754         dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
755         /* then, notify cl_lock */
756         env = cl_env_nested_get(&nest);
757         if (!IS_ERR(env)) {
758                 olck = osc_ast_data_get(dlmlock);
759                 if (olck != NULL) {
760                         lock = olck->ols_cl.cls_lock;
761                         cl_lock_mutex_get(env, lock);
762                         /*
763                          * ldlm_handle_cp_callback() copied LVB from request
764                          * to lock->l_lvb_data, store it in osc_lock.
765                          */
766                         LASSERT(dlmlock->l_lvb_data != NULL);
767                         lock_res_and_lock(dlmlock);
768                         olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
769                         if (olck->ols_lock == NULL)
770                                 /*
771                                  * upcall (osc_lock_upcall()) hasn't yet been
772                                  * called. Do nothing now, upcall will bind
773                                  * olck to dlmlock and signal the waiters.
774                                  *
775                                  * This maintains an invariant that osc_lock
776                                  * and ldlm_lock are always bound when
777                                  * osc_lock is in OLS_GRANTED state.
778                                  */
779                                 ;
780                         else if (dlmlock->l_granted_mode != LCK_MINMODE)
781                                 osc_lock_granted(env, olck, dlmlock, dlmrc);
782                         unlock_res_and_lock(dlmlock);
783                         if (dlmrc != 0)
784                                 cl_lock_error(env, lock, dlmrc);
785                         cl_lock_mutex_put(env, lock);
786                         osc_ast_data_put(env, olck);
787                         result = 0;
788                 } else
789                         result = -ELDLM_NO_LOCK_DATA;
790                 cl_env_nested_put(&nest, env);
791         } else
792                 result = PTR_ERR(env);
793         return dlmrc ?: result;
794 }
795
796 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
797 {
798         struct ptlrpc_request  *req  = data;
799         struct osc_lock        *olck;
800         struct cl_lock         *lock;
801         struct cl_object       *obj;
802         struct cl_env_nest      nest;
803         struct lu_env          *env;
804         struct ost_lvb         *lvb;
805         struct req_capsule     *cap;
806         int                     result;
807
808         LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
809
810         env = cl_env_nested_get(&nest);
811         if (!IS_ERR(env)) {
812                 /*
813                  * osc_ast_data_get() has to go after environment is
814                  * allocated, because osc_ast_data() acquires a
815                  * reference to a lock, and it can only be released in
816                  * environment.
817                  */
818                 olck = osc_ast_data_get(dlmlock);
819                 if (olck != NULL) {
820                         lock = olck->ols_cl.cls_lock;
821                         cl_lock_mutex_get(env, lock);
822                         cap = &req->rq_pill;
823                         req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
824                         req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
825                                              sizeof *lvb);
826                         result = req_capsule_server_pack(cap);
827                         if (result == 0) {
828                                 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
829                                 obj = lock->cll_descr.cld_obj;
830                                 result = cl_object_glimpse(env, obj, lvb);
831                         }
832                         cl_lock_mutex_put(env, lock);
833                         osc_ast_data_put(env, olck);
834                 } else {
835                         /*
836                          * These errors are normal races, so we don't want to
837                          * fill the console with messages by calling
838                          * ptlrpc_error()
839                          */
840                         lustre_pack_reply(req, 1, NULL, NULL);
841                         result = -ELDLM_NO_LOCK_DATA;
842                 }
843                 cl_env_nested_put(&nest, env);
844         } else
845                 result = PTR_ERR(env);
846         req->rq_status = result;
847         return result;
848 }
849
850 static unsigned long osc_lock_weigh(const struct lu_env *env,
851                                     const struct cl_lock_slice *slice)
852 {
853         /*
854          * don't need to grab coh_page_guard since we don't care the exact #
855          * of pages..
856          */
857         return cl_object_header(slice->cls_obj)->coh_pages;
858 }
859
860 /**
861  * Get the weight of dlm lock for early cancellation.
862  *
863  * XXX: it should return the pages covered by this \a dlmlock.
864  */
865 static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
866 {
867         struct cl_env_nest       nest;
868         struct lu_env           *env;
869         struct osc_lock         *lock;
870         struct cl_lock          *cll;
871         unsigned long            weight;
872         ENTRY;
873
874         might_sleep();
875         /*
876          * osc_ldlm_weigh_ast has a complex context since it might be called
877          * because of lock canceling, or from user's input. We have to make
878          * a new environment for it. Probably it is implementation safe to use
879          * the upper context because cl_lock_put don't modify environment
880          * variables. But in case of ..
881          */
882         env = cl_env_nested_get(&nest);
883         if (IS_ERR(env))
884                 /* Mostly because lack of memory, tend to eliminate this lock*/
885                 RETURN(0);
886
887         LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
888         lock = osc_ast_data_get(dlmlock);
889         if (lock == NULL) {
890                 /* cl_lock was destroyed because of memory pressure.
891                  * It is much reasonable to assign this type of lock
892                  * a lower cost.
893                  */
894                 GOTO(out, weight = 0);
895         }
896
897         cll = lock->ols_cl.cls_lock;
898         cl_lock_mutex_get(env, cll);
899         weight = cl_lock_weigh(env, cll);
900         cl_lock_mutex_put(env, cll);
901         osc_ast_data_put(env, lock);
902         EXIT;
903
904 out:
905         cl_env_nested_put(&nest, env);
906         return weight;
907 }
908
909 static void osc_lock_build_einfo(const struct lu_env *env,
910                                  const struct cl_lock *clock,
911                                  struct osc_lock *lock,
912                                  struct ldlm_enqueue_info *einfo)
913 {
914         enum cl_lock_mode mode;
915
916         mode = clock->cll_descr.cld_mode;
917         if (mode == CLM_PHANTOM)
918                 /*
919                  * For now, enqueue all glimpse locks in read mode. In the
920                  * future, client might choose to enqueue LCK_PW lock for
921                  * glimpse on a file opened for write.
922                  */
923                 mode = CLM_READ;
924
925         einfo->ei_type   = LDLM_EXTENT;
926         einfo->ei_mode   = osc_cl_lock2ldlm(mode);
927         einfo->ei_cb_bl  = osc_ldlm_blocking_ast;
928         einfo->ei_cb_cp  = osc_ldlm_completion_ast;
929         einfo->ei_cb_gl  = osc_ldlm_glimpse_ast;
930         einfo->ei_cb_wg  = osc_ldlm_weigh_ast;
931         einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
932 }
933
934 static int osc_lock_delete0(struct cl_lock *conflict)
935 {
936         struct cl_env_nest    nest;
937         struct lu_env        *env;
938         int    rc = 0;        
939
940         env = cl_env_nested_get(&nest);
941         if (!IS_ERR(env)) {
942                 cl_lock_delete(env, conflict);
943                 cl_env_nested_put(&nest, env);
944         } else
945                 rc = PTR_ERR(env);
946         return rc; 
947 }
948 /**
949  * Cancels \a conflict lock and waits until it reached CLS_FREEING state. This
950  * is called as a part of enqueuing to cancel conflicting locks early.
951  *
952  * \retval            0: success, \a conflict was cancelled and destroyed.
953  *
954  * \retval   CLO_REPEAT: \a conflict was cancelled, but \a lock mutex was
955  *                       released in the process. Repeat enqueing.
956  *
957  * \retval -EWOULDBLOCK: \a conflict cannot be cancelled immediately, and
958  *                       either \a lock is non-blocking, or current thread
959  *                       holds other locks, that prevent it from waiting
960  *                       for cancel to complete.
961  *
962  * \retval          -ve: other error, including -EINTR.
963  *
964  */
965 static int osc_lock_cancel_wait(const struct lu_env *env, struct cl_lock *lock,
966                                 struct cl_lock *conflict, int canwait)
967 {
968         int rc;
969
970         LASSERT(cl_lock_is_mutexed(lock));
971         LASSERT(cl_lock_is_mutexed(conflict));
972
973         rc = 0;
974         if (conflict->cll_state != CLS_FREEING) {
975                 cl_lock_cancel(env, conflict);
976                 rc = osc_lock_delete0(conflict);
977                 if (rc)
978                         return rc; 
979                 if (conflict->cll_flags & (CLF_CANCELPEND|CLF_DOOMED)) {
980                         rc = -EWOULDBLOCK;
981                         if (cl_lock_nr_mutexed(env) > 2)
982                                 /*
983                                  * If mutices of locks other than @lock and
984                                  * @scan are held by the current thread, it
985                                  * cannot wait on @scan state change in a
986                                  * dead-lock safe matter, so simply skip early
987                                  * cancellation in this case.
988                                  *
989                                  * This means that early cancellation doesn't
990                                  * work when there is even slight mutex
991                                  * contention, as top-lock's mutex is usually
992                                  * held at this time.
993                                  */
994                                 ;
995                         else if (canwait) {
996                                 /* Waiting for @scan to be destroyed */
997                                 cl_lock_mutex_put(env, lock);
998                                 do {
999                                         rc = cl_lock_state_wait(env, conflict);
1000                                 } while (!rc &&
1001                                          conflict->cll_state < CLS_FREEING);
1002                                 /* mutex was released, repeat enqueue. */
1003                                 rc = rc ?: CLO_REPEAT;
1004                                 cl_lock_mutex_get(env, lock);
1005                         }
1006                 }
1007                 LASSERT(ergo(!rc, conflict->cll_state == CLS_FREEING));
1008                 CDEBUG(D_INFO, "lock %p was %s freed now, rc (%d)\n",
1009                        conflict, rc ? "not":"", rc);
1010         }
1011         return rc;
1012 }
1013
1014 /**
1015  * Determine if the lock should be converted into a lockless lock.
1016  *
1017  * Steps to check:
1018  * - if the lock has an explicite requirment for a non-lockless lock;
1019  * - if the io lock request type ci_lockreq;
1020  * - send the enqueue rpc to ost to make the further decision;
1021  * - special treat to truncate lockless lock
1022  *
1023  *  Additional policy can be implemented here, e.g., never do lockless-io
1024  *  for large extents.
1025  */
1026 static void osc_lock_to_lockless(const struct lu_env *env,
1027                                  struct osc_lock *ols, int force)
1028 {
1029         struct cl_lock_slice *slice = &ols->ols_cl;
1030         struct cl_lock *lock        = slice->cls_lock;
1031
1032         LASSERT(ols->ols_state == OLS_NEW ||
1033                 ols->ols_state == OLS_UPCALL_RECEIVED);
1034
1035         if (force) {
1036                 ols->ols_locklessable = 1;
1037                 LASSERT(cl_lock_is_mutexed(lock));
1038                 slice->cls_ops = &osc_lock_lockless_ops;
1039         } else {
1040                 struct osc_io *oio     = osc_env_io(env);
1041                 struct cl_io  *io      = oio->oi_cl.cis_io;
1042                 struct cl_object *obj  = slice->cls_obj;
1043                 struct osc_object *oob = cl2osc(obj);
1044                 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
1045                 struct obd_connect_data *ocd;
1046
1047                 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
1048                         io->ci_lockreq == CILR_MAYBE ||
1049                         io->ci_lockreq == CILR_NEVER);
1050
1051                 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
1052                 ols->ols_locklessable = (io->ci_type != CIT_TRUNC) &&
1053                                 (io->ci_lockreq == CILR_MAYBE) &&
1054                                 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
1055                 if (io->ci_lockreq == CILR_NEVER ||
1056                         /* lockless IO */
1057                     (ols->ols_locklessable && osc_object_is_contended(oob)) ||
1058                         /* lockless truncate */
1059                     (io->ci_type == CIT_TRUNC &&
1060                      (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
1061                       osd->od_lockless_truncate)) {
1062                         ols->ols_locklessable = 1;
1063                         slice->cls_ops = &osc_lock_lockless_ops;
1064                 }
1065         }
1066         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1067 }
1068
1069 /**
1070  * Cancel all conflicting locks and wait for them to be destroyed.
1071  *
1072  * This function is used for two purposes:
1073  *
1074  *     - early cancel all conflicting locks before starting IO, and
1075  *
1076  *     - guarantee that pages added to the page cache by lockless IO are never
1077  *       covered by locks other than lockless IO lock, and, hence, are not
1078  *       visible to other threads.
1079  */
1080 static int osc_lock_enqueue_wait(const struct lu_env *env,
1081                                  const struct osc_lock *olck)
1082 {
1083         struct cl_lock          *lock    = olck->ols_cl.cls_lock;
1084         struct cl_lock_descr    *descr   = &lock->cll_descr;
1085         struct cl_object_header *hdr     = cl_object_header(descr->cld_obj);
1086         struct cl_lock_closure  *closure = &osc_env_info(env)->oti_closure;
1087         struct cl_lock          *scan;
1088         struct cl_lock          *temp;
1089         int lockless                     = osc_lock_is_lockless(olck);
1090         int rc                           = 0;
1091         int canwait;
1092         int stop;
1093         ENTRY;
1094
1095         LASSERT(cl_lock_is_mutexed(lock));
1096         LASSERT(lock->cll_state == CLS_QUEUING);
1097
1098         /*
1099          * XXX This function could be sped up if we had asynchronous
1100          * cancellation.
1101          */
1102
1103         canwait =
1104                 !(olck->ols_flags & LDLM_FL_BLOCK_NOWAIT) &&
1105                 cl_lock_nr_mutexed(env) == 1;
1106         cl_lock_closure_init(env, closure, lock, canwait);
1107         spin_lock(&hdr->coh_lock_guard);
1108         list_for_each_entry_safe(scan, temp, &hdr->coh_locks, cll_linkage) {
1109                 if (scan == lock)
1110                         continue;
1111
1112                 if (scan->cll_state < CLS_QUEUING ||
1113                     scan->cll_state == CLS_FREEING ||
1114                     scan->cll_descr.cld_start > descr->cld_end ||
1115                     scan->cll_descr.cld_end < descr->cld_start)
1116                         continue;
1117
1118                 /* overlapped and living locks. */
1119                 /* A tricky case for lockless pages:
1120                  * We need to cancel the compatible locks if we're enqueuing
1121                  * a lockless lock, for example:
1122                  * imagine that client has PR lock on [0, 1000], and thread T0
1123                  * is doing lockless IO in [500, 1500] region. Concurrent
1124                  * thread T1 can see lockless data in [500, 1000], which is
1125                  * wrong, because these data are possibly stale.
1126                  */
1127                 if (!lockless && cl_lock_compatible(scan, lock))
1128                         continue;
1129
1130                 /* Now @scan is conflicting with @lock, this means current
1131                  * thread have to sleep for @scan being destroyed. */
1132                 cl_lock_get_trust(scan);
1133                 if (&temp->cll_linkage != &hdr->coh_locks)
1134                         cl_lock_get_trust(temp);
1135                 spin_unlock(&hdr->coh_lock_guard);
1136                 lu_ref_add(&scan->cll_reference, "cancel-wait", lock);
1137
1138                 LASSERT(list_empty(&closure->clc_list));
1139                 rc = cl_lock_closure_build(env, scan, closure);
1140                 if (rc == 0) {
1141                         rc = osc_lock_cancel_wait(env, lock, scan, canwait);
1142                         cl_lock_disclosure(env, closure);
1143                         if (rc == -EWOULDBLOCK)
1144                                 rc = 0;
1145                 }
1146                 if (rc == CLO_REPEAT && !canwait)
1147                         /* cannot wait... no early cancellation. */
1148                         rc = 0;
1149
1150                 lu_ref_del(&scan->cll_reference, "cancel-wait", lock);
1151                 cl_lock_put(env, scan);
1152                 spin_lock(&hdr->coh_lock_guard);
1153                 /*
1154                  * Lock list could have been modified, while spin-lock was
1155                  * released. Check that it is safe to continue.
1156                  */
1157                 stop = list_empty(&temp->cll_linkage);
1158                 if (&temp->cll_linkage != &hdr->coh_locks)
1159                         cl_lock_put(env, temp);
1160                 if (stop || rc != 0)
1161                         break;
1162         }
1163         spin_unlock(&hdr->coh_lock_guard);
1164         cl_lock_closure_fini(closure);
1165         RETURN(rc);
1166 }
1167
1168 /**
1169  * Deadlock avoidance for osc_lock_enqueue(). Consider following scenario:
1170  *
1171  *     - Thread0: obtains PR:[0, 10]. Lock is busy.
1172  *
1173  *     - Thread1: enqueues PW:[5, 50]. Blocking ast is sent to
1174  *       PR:[0, 10], but cancellation of busy lock is postponed.
1175  *
1176  *     - Thread0: enqueue PR:[30, 40]. Lock is locally matched to
1177  *       PW:[5, 50], and thread0 waits for the lock completion never
1178  *       releasing PR:[0, 10]---deadlock.
1179  *
1180  * The second PR lock can be glimpse (it is to deal with that situation that
1181  * ll_glimpse_size() has second argument, preventing local match of
1182  * not-yet-granted locks, see bug 10295). Similar situation is possible in the
1183  * case of memory mapped user level buffer.
1184  *
1185  * To prevent this we can detect a situation when current "thread" or "io"
1186  * already holds a lock on this object and either add LDLM_FL_BLOCK_GRANTED to
1187  * the ols->ols_flags, or prevent local match with PW locks.
1188  */
1189 static int osc_deadlock_is_possible(const struct lu_env *env,
1190                                     struct cl_lock *lock)
1191 {
1192         struct cl_object        *obj;
1193         struct cl_object_header *head;
1194         struct cl_lock          *scan;
1195         struct osc_io           *oio;
1196
1197         int result;
1198
1199         ENTRY;
1200
1201         LASSERT(cl_lock_is_mutexed(lock));
1202
1203         oio  = osc_env_io(env);
1204         obj  = lock->cll_descr.cld_obj;
1205         head = cl_object_header(obj);
1206
1207         result = 0;
1208         spin_lock(&head->coh_lock_guard);
1209         list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1210                 if (scan != lock) {
1211                         struct osc_lock *oscan;
1212
1213                         oscan = osc_lock_at(scan);
1214                         LASSERT(oscan != NULL);
1215                         if (oscan->ols_owner == oio) {
1216                                 result = 1;
1217                                 break;
1218                         }
1219                 }
1220         }
1221         spin_unlock(&head->coh_lock_guard);
1222         RETURN(result);
1223 }
1224
1225 /**
1226  * Implementation of cl_lock_operations::clo_enqueue() method for osc
1227  * layer. This initiates ldlm enqueue:
1228  *
1229  *     - checks for possible dead-lock conditions (osc_deadlock_is_possible());
1230  *
1231  *     - cancels conflicting locks early (osc_lock_enqueue_wait());
1232  *
1233  *     - calls osc_enqueue_base() to do actual enqueue.
1234  *
1235  * osc_enqueue_base() is supplied with an upcall function that is executed
1236  * when lock is received either after a local cached ldlm lock is matched, or
1237  * when a reply from the server is received.
1238  *
1239  * This function does not wait for the network communication to complete.
1240  */
1241 static int osc_lock_enqueue(const struct lu_env *env,
1242                             const struct cl_lock_slice *slice,
1243                             struct cl_io *_, __u32 enqflags)
1244 {
1245         struct osc_lock          *ols     = cl2osc_lock(slice);
1246         struct cl_lock           *lock    = ols->ols_cl.cls_lock;
1247         struct osc_object        *obj     = cl2osc(slice->cls_obj);
1248         struct osc_thread_info   *info    = osc_env_info(env);
1249         struct ldlm_res_id       *resname = &info->oti_resname;
1250         ldlm_policy_data_t       *policy  = &info->oti_policy;
1251         struct ldlm_enqueue_info *einfo   = &ols->ols_einfo;
1252         int result;
1253         ENTRY;
1254
1255         LASSERT(cl_lock_is_mutexed(lock));
1256         LASSERT(lock->cll_state == CLS_QUEUING);
1257         LASSERT(ols->ols_state == OLS_NEW);
1258
1259         osc_lock_build_res(env, obj, resname);
1260         osc_lock_build_policy(env, lock, policy);
1261         ols->ols_flags = osc_enq2ldlm_flags(enqflags);
1262         if (osc_deadlock_is_possible(env, lock))
1263                 ols->ols_flags |= LDLM_FL_BLOCK_GRANTED;
1264         if (ols->ols_flags & LDLM_FL_HAS_INTENT)
1265                 ols->ols_glimpse = 1;
1266
1267         result = osc_lock_enqueue_wait(env, ols);
1268         if (result == 0) {
1269                 if (!(enqflags & CEF_MUST))
1270                         /* try to convert this lock to a lockless lock */
1271                         osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
1272                 if (!osc_lock_is_lockless(ols)) {
1273                         if (ols->ols_locklessable)
1274                                 ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1275
1276                         /* a reference for lock, passed as an upcall cookie */
1277                         cl_lock_get(lock);
1278                         lu_ref_add(&lock->cll_reference, "upcall", lock);
1279                         ols->ols_state = OLS_ENQUEUED;
1280
1281                         /*
1282                          * XXX: this is possible blocking point as
1283                          * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
1284                          * LDLM_CP_CALLBACK.
1285                          */
1286                         result = osc_enqueue_base(osc_export(obj), resname,
1287                                           &ols->ols_flags, policy,
1288                                           &ols->ols_lvb,
1289                                           obj->oo_oinfo->loi_kms_valid,
1290                                           osc_lock_upcall,
1291                                           ols, einfo, &ols->ols_handle,
1292                                           PTLRPCD_SET, 1);
1293                         if (result != 0) {
1294                                 lu_ref_del(&lock->cll_reference,
1295                                            "upcall", lock);
1296                                 cl_lock_put(env, lock);
1297                         }
1298                 } else {
1299                         ols->ols_state = OLS_GRANTED;
1300                 }
1301         }
1302         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1303         RETURN(result);
1304 }
1305
1306 static int osc_lock_wait(const struct lu_env *env,
1307                          const struct cl_lock_slice *slice)
1308 {
1309         struct osc_lock *olck = cl2osc_lock(slice);
1310         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1311
1312         LINVRNT(osc_lock_invariant(olck));
1313         if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED)
1314                 return 0;
1315
1316         LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
1317                      lock->cll_error == 0, olck->ols_lock != NULL));
1318
1319         return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
1320 }
1321
1322 /**
1323  * An implementation of cl_lock_operations::clo_use() method that pins cached
1324  * lock.
1325  */
1326 static int osc_lock_use(const struct lu_env *env,
1327                         const struct cl_lock_slice *slice)
1328 {
1329         struct osc_lock *olck = cl2osc_lock(slice);
1330         int rc;
1331
1332         LASSERT(!olck->ols_hold);
1333         /*
1334          * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
1335          * flag is not set. This protects us from a concurrent blocking ast.
1336          */
1337         rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1338         if (rc == 0) {
1339                 olck->ols_hold = olck->ols_has_ref = 1;
1340                 olck->ols_state = OLS_GRANTED;
1341         } else {
1342                 struct cl_lock *lock;
1343
1344                 /*
1345                  * Lock is being cancelled somewhere within
1346                  * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
1347                  * set, but osc_ldlm_blocking_ast() hasn't yet acquired
1348                  * cl_lock mutex.
1349                  */
1350                 lock = slice->cls_lock;
1351                 LASSERT(lock->cll_state == CLS_CACHED);
1352                 LASSERT(lock->cll_users > 0);
1353                 LASSERT(olck->ols_lock->l_flags & LDLM_FL_CBPENDING);
1354                 /* set a flag for osc_dlm_blocking_ast0() to signal the
1355                  * lock.*/
1356                 olck->ols_ast_wait = 1;
1357                 rc = CLO_WAIT;
1358         }
1359         return rc;
1360 }
1361
1362 static int osc_lock_flush(struct osc_lock *ols, int discard)
1363 {
1364         struct cl_lock       *lock  = ols->ols_cl.cls_lock;
1365         struct cl_env_nest    nest;
1366         struct lu_env        *env;
1367         int result = 0;
1368
1369         env = cl_env_nested_get(&nest);
1370         if (!IS_ERR(env)) {
1371                 result = cl_lock_page_out(env, lock, discard);
1372                 cl_env_nested_put(&nest, env);
1373         } else
1374                 result = PTR_ERR(env);
1375         if (result == 0)
1376                 ols->ols_flush = 1;
1377         return result;
1378 }
1379
1380 /**
1381  * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1382  * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1383  * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1384  * with some other lock some where in the cluster. This function does the
1385  * following:
1386  *
1387  *     - invalidates all pages protected by this lock (after sending dirty
1388  *       ones to the server, as necessary);
1389  *
1390  *     - decref's underlying ldlm lock;
1391  *
1392  *     - cancels ldlm lock (ldlm_cli_cancel()).
1393  */
1394 static void osc_lock_cancel(const struct lu_env *env,
1395                             const struct cl_lock_slice *slice)
1396 {
1397         struct cl_lock   *lock    = slice->cls_lock;
1398         struct osc_lock  *olck    = cl2osc_lock(slice);
1399         struct ldlm_lock *dlmlock = olck->ols_lock;
1400         int               result  = 0;
1401         int               discard;
1402
1403         LASSERT(cl_lock_is_mutexed(lock));
1404         LINVRNT(osc_lock_invariant(olck));
1405
1406         if (dlmlock != NULL) {
1407                 int do_cancel;
1408
1409                 discard = dlmlock->l_flags & LDLM_FL_DISCARD_DATA;
1410                 result = osc_lock_flush(olck, discard);
1411                 if (olck->ols_hold)
1412                         osc_lock_unuse(env, slice);
1413
1414                 lock_res_and_lock(dlmlock);
1415                 /* Now that we're the only user of dlm read/write reference,
1416                  * mostly the ->l_readers + ->l_writers should be zero.
1417                  * However, there is a corner case.
1418                  * See bug 18829 for details.*/
1419                 do_cancel = (dlmlock->l_readers == 0 &&
1420                              dlmlock->l_writers == 0);
1421                 dlmlock->l_flags |= LDLM_FL_CBPENDING;
1422                 unlock_res_and_lock(dlmlock);
1423                 if (do_cancel)
1424                         result = ldlm_cli_cancel(&olck->ols_handle);
1425                 if (result < 0)
1426                         CL_LOCK_DEBUG(D_ERROR, env, lock,
1427                                       "lock %p cancel failure with error(%d)\n",
1428                                       lock, result);
1429         }
1430         olck->ols_state = OLS_CANCELLED;
1431         osc_lock_detach(env, olck);
1432 }
1433
1434 void cl_lock_page_list_fixup(const struct lu_env *env,
1435                              struct cl_io *io, struct cl_lock *lock,
1436                              struct cl_page_list *queue);
1437
1438 #ifdef INVARIANT_CHECK
1439 /**
1440  * Returns true iff there are pages under \a olck not protected by other
1441  * locks.
1442  */
1443 static int osc_lock_has_pages(struct osc_lock *olck)
1444 {
1445         struct cl_lock       *lock;
1446         struct cl_lock_descr *descr;
1447         struct cl_object     *obj;
1448         struct osc_object    *oob;
1449         struct cl_page_list  *plist;
1450         struct cl_page       *page;
1451         struct cl_env_nest    nest;
1452         struct cl_io         *io;
1453         struct lu_env        *env;
1454         int                   result;
1455
1456         env = cl_env_nested_get(&nest);
1457         if (!IS_ERR(env)) {
1458                 obj   = olck->ols_cl.cls_obj;
1459                 oob   = cl2osc(obj);
1460                 io    = &oob->oo_debug_io;
1461                 lock  = olck->ols_cl.cls_lock;
1462                 descr = &lock->cll_descr;
1463                 plist = &osc_env_info(env)->oti_plist;
1464                 cl_page_list_init(plist);
1465
1466                 mutex_lock(&oob->oo_debug_mutex);
1467
1468                 io->ci_obj = cl_object_top(obj);
1469                 cl_io_init(env, io, CIT_MISC, io->ci_obj);
1470                 cl_page_gang_lookup(env, obj, io,
1471                                     descr->cld_start, descr->cld_end, plist);
1472                 cl_lock_page_list_fixup(env, io, lock, plist);
1473                 if (plist->pl_nr > 0) {
1474                         CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
1475                         cl_page_list_for_each(page, plist)
1476                                 CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
1477                 }
1478                 result = plist->pl_nr > 0;
1479                 cl_page_list_disown(env, io, plist);
1480                 cl_page_list_fini(env, plist);
1481                 cl_io_fini(env, io);
1482                 mutex_unlock(&oob->oo_debug_mutex);
1483                 cl_env_nested_put(&nest, env);
1484         } else
1485                 result = 0;
1486         return result;
1487 }
1488 #else
1489 # define osc_lock_has_pages(olck) (0)
1490 #endif /* INVARIANT_CHECK */
1491
1492 static void osc_lock_delete(const struct lu_env *env,
1493                             const struct cl_lock_slice *slice)
1494 {
1495         struct osc_lock *olck;
1496
1497         olck = cl2osc_lock(slice);
1498         LINVRNT(osc_lock_invariant(olck));
1499         LINVRNT(!osc_lock_has_pages(olck));
1500
1501         if (olck->ols_hold)
1502                 osc_lock_unuse(env, slice);
1503         osc_lock_detach(env, olck);
1504 }
1505
1506 /**
1507  * Implements cl_lock_operations::clo_state() method for osc layer.
1508  *
1509  * Maintains osc_lock::ols_owner field.
1510  *
1511  * This assumes that lock always enters CLS_HELD (from some other state) in
1512  * the same IO context as one that requested the lock. This should not be a
1513  * problem, because context is by definition shared by all activity pertaining
1514  * to the same high-level IO.
1515  */
1516 static void osc_lock_state(const struct lu_env *env,
1517                            const struct cl_lock_slice *slice,
1518                            enum cl_lock_state state)
1519 {
1520         struct osc_lock *lock = cl2osc_lock(slice);
1521         struct osc_io   *oio  = osc_env_io(env);
1522
1523         /*
1524          * XXX multiple io contexts can use the lock at the same time.
1525          */
1526         LINVRNT(osc_lock_invariant(lock));
1527         if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1528                 LASSERT(lock->ols_owner == NULL);
1529                 lock->ols_owner = oio;
1530         } else if (state != CLS_HELD)
1531                 lock->ols_owner = NULL;
1532 }
1533
1534 static int osc_lock_print(const struct lu_env *env, void *cookie,
1535                           lu_printer_t p, const struct cl_lock_slice *slice)
1536 {
1537         struct osc_lock *lock = cl2osc_lock(slice);
1538
1539         /*
1540          * XXX print ldlm lock and einfo properly.
1541          */
1542         (*p)(env, cookie, "%p %08x "LPU64" %d %p ",
1543              lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
1544              lock->ols_state, lock->ols_owner);
1545         osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1546         return 0;
1547 }
1548
1549 static const struct cl_lock_operations osc_lock_ops = {
1550         .clo_fini    = osc_lock_fini,
1551         .clo_enqueue = osc_lock_enqueue,
1552         .clo_wait    = osc_lock_wait,
1553         .clo_unuse   = osc_lock_unuse,
1554         .clo_use     = osc_lock_use,
1555         .clo_delete  = osc_lock_delete,
1556         .clo_state   = osc_lock_state,
1557         .clo_cancel  = osc_lock_cancel,
1558         .clo_weigh   = osc_lock_weigh,
1559         .clo_print   = osc_lock_print
1560 };
1561
1562 static int osc_lock_lockless_enqueue(const struct lu_env *env,
1563                                      const struct cl_lock_slice *slice,
1564                                      struct cl_io *_, __u32 enqflags)
1565 {
1566         LBUG();
1567         return 0;
1568 }
1569
1570 static int osc_lock_lockless_unuse(const struct lu_env *env,
1571                                    const struct cl_lock_slice *slice)
1572 {
1573         struct osc_lock *ols = cl2osc_lock(slice);
1574         struct cl_lock *lock = slice->cls_lock;
1575
1576         LASSERT(ols->ols_state == OLS_GRANTED);
1577         LINVRNT(osc_lock_invariant(ols));
1578
1579         cl_lock_cancel(env, lock);
1580         cl_lock_delete(env, lock);
1581         return 0;
1582 }
1583
1584 static void osc_lock_lockless_cancel(const struct lu_env *env,
1585                                      const struct cl_lock_slice *slice)
1586 {
1587         struct osc_lock   *ols  = cl2osc_lock(slice);
1588         int result;
1589
1590         result = osc_lock_flush(ols, 0);
1591         if (result)
1592                 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1593                        ols, result);
1594         ols->ols_state = OLS_CANCELLED;
1595 }
1596
1597 static int osc_lock_lockless_wait(const struct lu_env *env,
1598                                   const struct cl_lock_slice *slice)
1599 {
1600         struct osc_lock *olck = cl2osc_lock(slice);
1601         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1602
1603         LINVRNT(osc_lock_invariant(olck));
1604         LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1605
1606         return lock->cll_error;
1607 }
1608
1609 static void osc_lock_lockless_state(const struct lu_env *env,
1610                                     const struct cl_lock_slice *slice,
1611                                     enum cl_lock_state state)
1612 {
1613         struct osc_lock *lock = cl2osc_lock(slice);
1614         struct osc_io   *oio  = osc_env_io(env);
1615
1616         LINVRNT(osc_lock_invariant(lock));
1617         if (state == CLS_HELD) {
1618                 LASSERT(lock->ols_owner == NULL);
1619                 lock->ols_owner = oio;
1620
1621                 /* set the io to be lockless if this lock is for io's
1622                  * host object */
1623                 if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
1624                         oio->oi_lockless = 1;
1625         } else
1626                 lock->ols_owner = NULL;
1627 }
1628
1629 static int osc_lock_lockless_fits_into(const struct lu_env *env,
1630                                        const struct cl_lock_slice *slice,
1631                                        const struct cl_lock_descr *need,
1632                                        const struct cl_io *io)
1633 {
1634         return 0;
1635 }
1636
1637 static const struct cl_lock_operations osc_lock_lockless_ops = {
1638         .clo_fini      = osc_lock_fini,
1639         .clo_enqueue   = osc_lock_lockless_enqueue,
1640         .clo_wait      = osc_lock_lockless_wait,
1641         .clo_unuse     = osc_lock_lockless_unuse,
1642         .clo_state     = osc_lock_lockless_state,
1643         .clo_fits_into = osc_lock_lockless_fits_into,
1644         .clo_cancel    = osc_lock_lockless_cancel,
1645         .clo_print     = osc_lock_print
1646 };
1647
1648 int osc_lock_init(const struct lu_env *env,
1649                   struct cl_object *obj, struct cl_lock *lock,
1650                   const struct cl_io *_)
1651 {
1652         struct osc_lock *clk;
1653         int result;
1654
1655         OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO);
1656         if (clk != NULL) {
1657                 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
1658                 clk->ols_state = OLS_NEW;
1659                 cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
1660                 result = 0;
1661         } else
1662                 result = -ENOMEM;
1663         return result;
1664 }
1665
1666
1667 /** @} osc */