Whamcloud - gitweb
8f2b2b5d308df0743da487977b5de703d8f0fbe1
[fs/lustre-release.git] / lustre / osc / osc_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for OSC layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 /** \addtogroup osc osc @{ */
42
43 #define DEBUG_SUBSYSTEM S_OSC
44
45 #ifdef __KERNEL__
46 # include <libcfs/libcfs.h>
47 #else
48 # include <liblustre.h>
49 #endif
50 /* fid_build_reg_res_name() */
51 #include <lustre_fid.h>
52
53 #include "osc_cl_internal.h"
54
55 /*****************************************************************************
56  *
57  * Type conversions.
58  *
59  */
60
61 static const struct cl_lock_operations osc_lock_ops;
62 static const struct cl_lock_operations osc_lock_lockless_ops;
63 static void osc_lock_to_lockless(const struct lu_env *env,
64                                  struct osc_lock *ols, int force);
65
66 int osc_lock_is_lockless(const struct osc_lock *olck)
67 {
68         return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
69 }
70
71 /**
72  * Returns a weak pointer to the ldlm lock identified by a handle. Returned
73  * pointer cannot be dereferenced, as lock is not protected from concurrent
74  * reclaim. This function is a helper for osc_lock_invariant().
75  */
76 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
77 {
78         struct ldlm_lock *lock;
79
80         lock = ldlm_handle2lock(handle);
81         if (lock != NULL)
82                 LDLM_LOCK_PUT(lock);
83         return lock;
84 }
85
86 /**
87  * Invariant that has to be true all of the time.
88  */
89 static int osc_lock_invariant(struct osc_lock *ols)
90 {
91         struct ldlm_lock *lock        = osc_handle_ptr(&ols->ols_handle);
92         struct ldlm_lock *olock       = ols->ols_lock;
93         int               handle_used = lustre_handle_is_used(&ols->ols_handle);
94
95         return
96                 ergo(osc_lock_is_lockless(ols),
97                      ols->ols_locklessable && ols->ols_lock == NULL)  ||
98                 (ergo(olock != NULL, handle_used) &&
99                  ergo(olock != NULL,
100                       olock->l_handle.h_cookie == ols->ols_handle.cookie) &&
101                  /*
102                   * Check that ->ols_handle and ->ols_lock are consistent, but
103                   * take into account that they are set at the different time.
104                   */
105                  ergo(handle_used,
106                       ergo(lock != NULL && olock != NULL, lock == olock) &&
107                       ergo(lock == NULL, olock == NULL)) &&
108                  ergo(ols->ols_state == OLS_CANCELLED,
109                       olock == NULL && !handle_used) &&
110                  /*
111                   * DLM lock is destroyed only after we have seen cancellation
112                   * ast.
113                   */
114                  ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
115                       !olock->l_destroyed) &&
116                  ergo(ols->ols_state == OLS_GRANTED,
117                       olock != NULL &&
118                       olock->l_req_mode == olock->l_granted_mode &&
119                       ols->ols_hold));
120 }
121
122 /*****************************************************************************
123  *
124  * Lock operations.
125  *
126  */
127
128 /**
129  * Breaks a link between osc_lock and dlm_lock.
130  */
131 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
132 {
133         struct ldlm_lock *dlmlock;
134
135         spin_lock(&osc_ast_guard);
136         dlmlock = olck->ols_lock;
137         if (dlmlock == NULL) {
138                 spin_unlock(&osc_ast_guard);
139                 return;
140         }
141
142         olck->ols_lock = NULL;
143         /* wb(); --- for all who checks (ols->ols_lock != NULL) before
144          * call to osc_lock_detach() */
145         dlmlock->l_ast_data = NULL;
146         olck->ols_handle.cookie = 0ULL;
147         spin_unlock(&osc_ast_guard);
148
149         lock_res_and_lock(dlmlock);
150         if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
151                 struct cl_object *obj = olck->ols_cl.cls_obj;
152                 struct cl_attr *attr  = &osc_env_info(env)->oti_attr;
153                 __u64 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
154
155                 /* Update the kms. Need to loop all granted locks.
156                  * Not a problem for the client */
157                 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
158                 unlock_res_and_lock(dlmlock);
159
160                 cl_object_attr_lock(obj);
161                 cl_object_attr_set(env, obj, attr, CAT_KMS);
162                 cl_object_attr_unlock(obj);
163         } else
164                 unlock_res_and_lock(dlmlock);
165
166         /* release a reference taken in osc_lock_upcall0(). */
167         lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
168         LDLM_LOCK_RELEASE(dlmlock);
169 }
170
171 static int osc_lock_unuse(const struct lu_env *env,
172                           const struct cl_lock_slice *slice)
173 {
174         struct osc_lock *ols = cl2osc_lock(slice);
175         int result;
176
177         LASSERT(ols->ols_state == OLS_GRANTED ||
178                 ols->ols_state == OLS_UPCALL_RECEIVED);
179         LINVRNT(osc_lock_invariant(ols));
180
181         if (ols->ols_glimpse) {
182                 LASSERT(ols->ols_hold == 0);
183                 return 0;
184         }
185         LASSERT(ols->ols_hold);
186
187         /*
188          * Move lock into OLS_RELEASED state before calling osc_cancel_base()
189          * so that possible synchronous cancellation (that always happens
190          * e.g., for liblustre) sees that lock is released.
191          */
192         ols->ols_state = OLS_RELEASED;
193         ols->ols_hold = 0;
194         result = osc_cancel_base(&ols->ols_handle, ols->ols_einfo.ei_mode);
195         ols->ols_has_ref = 0;
196         return result;
197 }
198
199 static void osc_lock_fini(const struct lu_env *env,
200                           struct cl_lock_slice *slice)
201 {
202         struct osc_lock  *ols = cl2osc_lock(slice);
203
204         LINVRNT(osc_lock_invariant(ols));
205         /*
206          * ->ols_hold can still be true at this point if, for example, a
207          * thread that requested a lock was killed (and released a reference
208          * to the lock), before reply from a server was received. In this case
209          * lock is destroyed immediately after upcall.
210          */
211         if (ols->ols_hold)
212                 osc_lock_unuse(env, slice);
213         if (ols->ols_lock != NULL)
214                 osc_lock_detach(env, ols);
215
216         OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
217 }
218
219 void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj,
220                         struct ldlm_res_id *resname)
221 {
222         const struct lu_fid *fid = lu_object_fid(&obj->oo_cl.co_lu);
223         if (0) {
224                 /*
225                  * In the perfect world of the future, where ost servers talk
226                  * idif-fids...
227                  */
228                 fid_build_reg_res_name(fid, resname);
229         } else {
230                 /*
231                  * In reality, where ost server expects ->lsm_object_id and
232                  * ->lsm_object_gr in rename.
233                  */
234                 osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_gr,
235                                    resname);
236         }
237 }
238
239 static void osc_lock_build_policy(const struct lu_env *env,
240                                   const struct cl_lock *lock,
241                                   ldlm_policy_data_t *policy)
242 {
243         const struct cl_lock_descr *d = &lock->cll_descr;
244
245         osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
246 }
247
248 static int osc_enq2ldlm_flags(__u32 enqflags)
249 {
250         int result = 0;
251
252         LASSERT((enqflags & ~CEF_MASK) == 0);
253
254         if (enqflags & CEF_NONBLOCK)
255                 result |= LDLM_FL_BLOCK_NOWAIT;
256         if (enqflags & CEF_ASYNC)
257                 result |= LDLM_FL_HAS_INTENT;
258         if (enqflags & CEF_DISCARD_DATA)
259                 result |= LDLM_AST_DISCARD_DATA;
260         return result;
261 }
262
263 /**
264  * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
265  * pointers. Initialized in osc_init().
266  */
267 spinlock_t osc_ast_guard;
268
269 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
270 {
271         struct osc_lock *olck;
272
273         lock_res_and_lock(dlm_lock);
274         spin_lock(&osc_ast_guard);
275         olck = dlm_lock->l_ast_data;
276         if (olck != NULL) {
277                 struct cl_lock *lock = olck->ols_cl.cls_lock;
278                 /*
279                  * If osc_lock holds a reference on ldlm lock, return it even
280                  * when cl_lock is in CLS_FREEING state. This way
281                  *
282                  *         osc_ast_data_get(dlmlock) == NULL
283                  *
284                  * guarantees that all osc references on dlmlock were
285                  * released. osc_dlm_blocking_ast0() relies on that.
286                  */
287                 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
288                         cl_lock_get_trust(lock);
289                         lu_ref_add_atomic(&lock->cll_reference,
290                                           "ast", cfs_current());
291                 } else
292                         olck = NULL;
293         }
294         spin_unlock(&osc_ast_guard);
295         unlock_res_and_lock(dlm_lock);
296         return olck;
297 }
298
299 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
300 {
301         struct cl_lock *lock;
302
303         lock = olck->ols_cl.cls_lock;
304         lu_ref_del(&lock->cll_reference, "ast", cfs_current());
305         cl_lock_put(env, lock);
306 }
307
308 /**
309  * Updates object attributes from a lock value block (lvb) received together
310  * with the DLM lock reply from the server. Copy of osc_update_enqueue()
311  * logic.
312  *
313  * This can be optimized to not update attributes when lock is a result of a
314  * local match.
315  */
316 static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
317                                 int rc)
318 {
319         struct ost_lvb    *lvb;
320         struct cl_object  *obj;
321         struct lov_oinfo  *oinfo;
322         struct cl_attr    *attr;
323         unsigned           valid;
324
325         ENTRY;
326
327         if (!(olck->ols_flags & LDLM_FL_LVB_READY)) {
328                 EXIT;
329                 return;
330         }
331
332         lvb   = &olck->ols_lvb;
333         obj   = olck->ols_cl.cls_obj;
334         oinfo = cl2osc(obj)->oo_oinfo;
335         attr  = &osc_env_info(env)->oti_attr;
336         valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
337         cl_lvb2attr(attr, lvb);
338
339         cl_object_attr_lock(obj);
340         if (rc == 0) {
341                 struct ldlm_lock  *dlmlock;
342                 __u64 size;
343
344                 dlmlock = olck->ols_lock;
345                 LASSERT(dlmlock != NULL);
346
347                 size = lvb->lvb_size;
348                 /* Extend KMS up to the end of this lock and no further
349                  * A lock on [x,y] means a KMS of up to y + 1 bytes! */
350                 if (size > dlmlock->l_policy_data.l_extent.end)
351                         size = dlmlock->l_policy_data.l_extent.end + 1;
352                 if (size >= oinfo->loi_kms) {
353                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
354                                    ", kms="LPU64, lvb->lvb_size, size);
355                         valid |= CAT_KMS;
356                         attr->cat_kms = size;
357                 } else {
358                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
359                                    LPU64"; leaving kms="LPU64", end="LPU64,
360                                    lvb->lvb_size, oinfo->loi_kms,
361                                    dlmlock->l_policy_data.l_extent.end);
362                 }
363                 ldlm_lock_allow_match(dlmlock);
364         } else if (rc == -ENAVAIL && olck->ols_glimpse) {
365                 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
366                        " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
367         } else
368                 valid = 0;
369
370         if (valid != 0)
371                 cl_object_attr_set(env, obj, attr, valid);
372
373         cl_object_attr_unlock(obj);
374
375         EXIT;
376 }
377
378 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
379                              struct ldlm_lock *dlmlock, int rc)
380 {
381         struct ldlm_extent   *ext;
382         struct cl_lock       *lock;
383         struct cl_lock_descr *descr;
384
385         LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
386
387         ENTRY;
388         if (olck->ols_state != OLS_GRANTED) {
389                 lock  = olck->ols_cl.cls_lock;
390                 ext   = &dlmlock->l_policy_data.l_extent;
391                 descr = &osc_env_info(env)->oti_descr;
392                 descr->cld_obj = lock->cll_descr.cld_obj;
393
394                 /* XXX check that ->l_granted_mode is valid. */
395                 descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
396                 descr->cld_start = cl_index(descr->cld_obj, ext->start);
397                 descr->cld_end   = cl_index(descr->cld_obj, ext->end);
398                 /*
399                  * tell upper layers the extent of the lock that was actually
400                  * granted
401                  */
402                 cl_lock_modify(env, lock, descr);
403                 LINVRNT(osc_lock_invariant(olck));
404                 olck->ols_state = OLS_GRANTED;
405                 osc_lock_lvb_update(env, olck, rc);
406                 cl_lock_signal(env, lock);
407         }
408         EXIT;
409 }
410
411 static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
412
413 {
414         struct ldlm_lock *dlmlock;
415
416         ENTRY;
417
418         dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
419         LASSERT(dlmlock != NULL);
420
421         lock_res_and_lock(dlmlock);
422         spin_lock(&osc_ast_guard);
423         LASSERT(dlmlock->l_ast_data == olck);
424         LASSERT(olck->ols_lock == NULL);
425         olck->ols_lock = dlmlock;
426         spin_unlock(&osc_ast_guard);
427         unlock_res_and_lock(dlmlock);
428
429         /*
430          * Lock might be not yet granted. In this case, completion ast
431          * (osc_ldlm_completion_ast()) comes later and finishes lock
432          * granting.
433          */
434         if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
435                 osc_lock_granted(env, olck, dlmlock, 0);
436         /*
437          * osc_enqueue_interpret() decrefs asynchronous locks, counter
438          * this.
439          */
440         ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
441         olck->ols_hold = olck->ols_has_ref = 1;
442
443         /* lock reference taken by ldlm_handle2lock_long() is owned by
444          * osc_lock and released in osc_lock_detach() */
445         lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
446 }
447
448 /**
449  * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
450  * received from a server, or after osc_enqueue_base() matched a local DLM
451  * lock.
452  */
453 static int osc_lock_upcall(void *cookie, int errcode)
454 {
455         struct osc_lock      *olck  = cookie;
456         struct cl_lock_slice *slice = &olck->ols_cl;
457         struct cl_lock       *lock  = slice->cls_lock;
458         struct lu_env        *env;
459
460         int refcheck;
461
462         ENTRY;
463         /*
464          * XXX environment should be created in ptlrpcd.
465          */
466         env = cl_env_get(&refcheck);
467         if (!IS_ERR(env)) {
468                 int rc;
469
470                 cl_lock_mutex_get(env, lock);
471
472                 LASSERT(lock->cll_state >= CLS_QUEUING);
473                 if (olck->ols_state == OLS_ENQUEUED) {
474                         olck->ols_state = OLS_UPCALL_RECEIVED;
475                         rc = ldlm_error2errno(errcode);
476                 } else if (olck->ols_state == OLS_CANCELLED) {
477                         rc = -EIO;
478                 } else {
479                         CERROR("Impossible state: %i\n", olck->ols_state);
480                         LBUG();
481                 }
482                 if (rc) {
483                         struct ldlm_lock *dlmlock;
484
485                         dlmlock = ldlm_handle2lock(&olck->ols_handle);
486                         if (dlmlock != NULL) {
487                                 lock_res_and_lock(dlmlock);
488                                 spin_lock(&osc_ast_guard);
489                                 LASSERT(olck->ols_lock == NULL);
490                                 dlmlock->l_ast_data = NULL;
491                                 olck->ols_handle.cookie = 0ULL;
492                                 spin_unlock(&osc_ast_guard);
493                                 unlock_res_and_lock(dlmlock);
494                                 LDLM_LOCK_PUT(dlmlock);
495                         }
496                 } else {
497                         if (olck->ols_glimpse)
498                                 olck->ols_glimpse = 0;
499                         osc_lock_upcall0(env, olck);
500                 }
501
502                 /* Error handling, some errors are tolerable. */
503                 if (olck->ols_locklessable && rc == -EUSERS) {
504                         /* This is a tolerable error, turn this lock into
505                          * lockless lock.
506                          */
507                         osc_object_set_contended(cl2osc(slice->cls_obj));
508                         LASSERT(slice->cls_ops == &osc_lock_ops);
509
510                         /* Change this lock to ldlmlock-less lock. */
511                         osc_lock_to_lockless(env, olck, 1);
512                         olck->ols_state = OLS_GRANTED;
513                         rc = 0;
514                 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
515                         osc_lock_lvb_update(env, olck, rc);
516                         cl_lock_delete(env, lock);
517                         /* Hide the error. */
518                         rc = 0;
519                 }
520
521                 if (rc == 0)
522                         /* on error, lock was signaled by cl_lock_error() */
523                         cl_lock_signal(env, lock);
524                 else
525                         cl_lock_error(env, lock, rc);
526
527                 cl_lock_mutex_put(env, lock);
528
529                 /* release cookie reference, acquired by osc_lock_enqueue() */
530                 lu_ref_del(&lock->cll_reference, "upcall", lock);
531                 cl_lock_put(env, lock);
532                 cl_env_put(env, &refcheck);
533         } else
534                 /* should never happen, similar to osc_ldlm_blocking_ast(). */
535                 LBUG();
536         RETURN(errcode);
537 }
538
539 /**
540  * Core of osc_dlm_blocking_ast() logic.
541  */
542 static void osc_lock_blocking(const struct lu_env *env,
543                               struct ldlm_lock *dlmlock,
544                               struct osc_lock *olck, int blocking)
545 {
546         struct cl_lock *lock = olck->ols_cl.cls_lock;
547
548         LASSERT(olck->ols_lock == dlmlock);
549         CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
550         LASSERT(!osc_lock_is_lockless(olck));
551
552         if (olck->ols_hold)
553                 /*
554                  * Lock might be still addref-ed here, if e.g., blocking ast
555                  * is sent for a failed lock.
556                  */
557                 osc_lock_unuse(env, &olck->ols_cl);
558
559         if (blocking && olck->ols_state < OLS_BLOCKED)
560                 /*
561                  * Move osc_lock into OLS_BLOCKED before canceling the lock,
562                  * because it recursively re-enters osc_lock_blocking(), with
563                  * the state set to OLS_CANCELLED.
564                  */
565                 olck->ols_state = OLS_BLOCKED;
566         /*
567          * cancel and destroy lock at least once no matter how blocking ast is
568          * entered (see comment above osc_ldlm_blocking_ast() for use
569          * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
570          */
571         cl_lock_cancel(env, lock);
572         cl_lock_delete(env, lock);
573 }
574
575 /**
576  * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
577  * and ldlm_lock caches.
578  */
579 static int osc_dlm_blocking_ast0(const struct lu_env *env,
580                                  struct ldlm_lock *dlmlock,
581                                  void *data, int flag)
582 {
583         struct osc_lock *olck;
584         struct cl_lock  *lock;
585         int result;
586         int cancel;
587
588         LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
589
590         cancel = 0;
591         olck = osc_ast_data_get(dlmlock);
592         if (olck != NULL) {
593                 lock = olck->ols_cl.cls_lock;
594                 cl_lock_mutex_get(env, lock);
595                 LINVRNT(osc_lock_invariant(olck));
596                 if (olck->ols_ast_wait) {
597                         /* wake up osc_lock_use() */
598                         cl_lock_signal(env, lock);
599                         olck->ols_ast_wait = 0;
600                 }
601                 /*
602                  * Lock might have been canceled while this thread was
603                  * sleeping for lock mutex, but olck is pinned in memory.
604                  */
605                 if (olck == dlmlock->l_ast_data) {
606                         /*
607                          * NOTE: DLM sends blocking AST's for failed locks
608                          *       (that are still in pre-OLS_GRANTED state)
609                          *       too, and they have to be canceled otherwise
610                          *       DLM lock is never destroyed and stuck in
611                          *       the memory.
612                          *
613                          *       Alternatively, ldlm_cli_cancel() can be
614                          *       called here directly for osc_locks with
615                          *       ols_state < OLS_GRANTED to maintain an
616                          *       invariant that ->clo_cancel() is only called
617                          *       for locks that were granted.
618                          */
619                         LASSERT(data == olck);
620                         osc_lock_blocking(env, dlmlock,
621                                           olck, flag == LDLM_CB_BLOCKING);
622                 } else
623                         cancel = 1;
624                 cl_lock_mutex_put(env, lock);
625                 osc_ast_data_put(env, olck);
626         } else
627                 /*
628                  * DLM lock exists, but there is no cl_lock attached to it.
629                  * This is a `normal' race. cl_object and its cl_lock's can be
630                  * removed by memory pressure, together with all pages.
631                  */
632                 cancel = (flag == LDLM_CB_BLOCKING);
633
634         if (cancel) {
635                 struct lustre_handle *lockh;
636
637                 lockh = &osc_env_info(env)->oti_handle;
638                 ldlm_lock2handle(dlmlock, lockh);
639                 result = ldlm_cli_cancel(lockh);
640         } else
641                 result = 0;
642         return result;
643 }
644
645 /**
646  * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
647  * some other lock, or is canceled. This function is installed as a
648  * ldlm_lock::l_blocking_ast() for client extent locks.
649  *
650  * Control flow is tricky, because ldlm uses the same call-back
651  * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
652  *
653  * \param dlmlock lock for which ast occurred.
654  *
655  * \param new description of a conflicting lock in case of blocking ast.
656  *
657  * \param data value of dlmlock->l_ast_data
658  *
659  * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
660  *             cancellation and blocking ast's.
661  *
662  * Possible use cases:
663  *
664  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
665  *       lock due to lock lru pressure, or explicit user request to purge
666  *       locks.
667  *
668  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
669  *       us that dlmlock conflicts with another lock that some client is
670  *       enqueing. Lock is canceled.
671  *
672  *           - cl_lock_cancel() is called. osc_lock_cancel() calls
673  *             ldlm_cli_cancel() that calls
674  *
675  *                  dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
676  *
677  *             recursively entering osc_ldlm_blocking_ast().
678  *
679  *     - client cancels lock voluntary (e.g., as a part of early cancellation):
680  *
681  *           cl_lock_cancel()->
682  *             osc_lock_cancel()->
683  *               ldlm_cli_cancel()->
684  *                 dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
685  *
686  */
687 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
688                                  struct ldlm_lock_desc *new, void *data,
689                                  int flag)
690 {
691         struct lu_env     *env;
692         struct cl_env_nest nest;
693         int                result;
694
695         /*
696          * This can be called in the context of outer IO, e.g.,
697          *
698          *     cl_enqueue()->...
699          *       ->osc_enqueue_base()->...
700          *         ->ldlm_prep_elc_req()->...
701          *           ->ldlm_cancel_callback()->...
702          *             ->osc_ldlm_blocking_ast()
703          *
704          * new environment has to be created to not corrupt outer context.
705          */
706         env = cl_env_nested_get(&nest);
707         if (!IS_ERR(env))
708                 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
709         else {
710                 result = PTR_ERR(env);
711                 /*
712                  * XXX This should never happen, as cl_lock is
713                  * stuck. Pre-allocated environment a la vvp_inode_fini_env
714                  * should be used.
715                  */
716                 LBUG();
717         }
718         if (result != 0) {
719                 if (result == -ENODATA)
720                         result = 0;
721                 else
722                         CERROR("BAST failed: %d\n", result);
723         }
724         cl_env_nested_put(&nest, env);
725         return result;
726 }
727
728 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
729                                    int flags, void *data)
730 {
731         struct lu_env   *env;
732         void            *env_cookie;
733         struct osc_lock *olck;
734         struct cl_lock  *lock;
735         int refcheck;
736         int result;
737         int dlmrc;
738
739         /* first, do dlm part of the work */
740         dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
741         /* then, notify cl_lock */
742         env_cookie = cl_env_reenter();
743         env = cl_env_get(&refcheck);
744         if (!IS_ERR(env)) {
745                 olck = osc_ast_data_get(dlmlock);
746                 if (olck != NULL) {
747                         lock = olck->ols_cl.cls_lock;
748                         cl_lock_mutex_get(env, lock);
749                         /*
750                          * ldlm_handle_cp_callback() copied LVB from request
751                          * to lock->l_lvb_data, store it in osc_lock.
752                          */
753                         LASSERT(dlmlock->l_lvb_data != NULL);
754                         olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
755                         if (olck->ols_lock == NULL)
756                                 /*
757                                  * upcall (osc_lock_upcall()) hasn't yet been
758                                  * called. Do nothing now, upcall will bind
759                                  * olck to dlmlock and signal the waiters.
760                                  *
761                                  * This maintains an invariant that osc_lock
762                                  * and ldlm_lock are always bound when
763                                  * osc_lock is in OLS_GRANTED state.
764                                  */
765                                 ;
766                         else if (dlmlock->l_granted_mode != LCK_MINMODE)
767                                 osc_lock_granted(env, olck, dlmlock, dlmrc);
768                         if (dlmrc != 0)
769                                 cl_lock_error(env, lock, dlmrc);
770                         cl_lock_mutex_put(env, lock);
771                         osc_ast_data_put(env, olck);
772                         result = 0;
773                 } else
774                         result = -ELDLM_NO_LOCK_DATA;
775                 cl_env_put(env, &refcheck);
776         } else
777                 result = PTR_ERR(env);
778         cl_env_reexit(env_cookie);
779         return dlmrc ?: result;
780 }
781
782 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
783 {
784         struct ptlrpc_request  *req  = data;
785         struct osc_lock        *olck;
786         struct cl_lock         *lock;
787         struct cl_object       *obj;
788         struct lu_env          *env;
789         struct ost_lvb         *lvb;
790         struct req_capsule     *cap;
791         int                     result;
792         int                     refcheck;
793
794         LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
795
796         env = cl_env_get(&refcheck);
797         if (!IS_ERR(env)) {
798                 /*
799                  * osc_ast_data_get() has to go after environment is
800                  * allocated, because osc_ast_data() acquires a
801                  * reference to a lock, and it can only be released in
802                  * environment.
803                  */
804                 olck = osc_ast_data_get(dlmlock);
805                 if (olck != NULL) {
806                         lock = olck->ols_cl.cls_lock;
807                         cl_lock_mutex_get(env, lock);
808                         cap = &req->rq_pill;
809                         req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
810                         req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
811                                              sizeof *lvb);
812                         result = req_capsule_server_pack(cap);
813                         if (result == 0) {
814                                 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
815                                 obj = lock->cll_descr.cld_obj;
816                                 result = cl_object_glimpse(env, obj, lvb);
817                         }
818                         cl_lock_mutex_put(env, lock);
819                         osc_ast_data_put(env, olck);
820                 } else {
821                         /*
822                          * These errors are normal races, so we don't want to
823                          * fill the console with messages by calling
824                          * ptlrpc_error()
825                          */
826                         lustre_pack_reply(req, 1, NULL, NULL);
827                         result = -ELDLM_NO_LOCK_DATA;
828                 }
829                 cl_env_put(env, &refcheck);
830         } else
831                 result = PTR_ERR(env);
832         req->rq_status = result;
833         return result;
834 }
835
836 static unsigned long osc_lock_weigh(const struct lu_env *env,
837                                     const struct cl_lock_slice *slice)
838 {
839         /*
840          * don't need to grab coh_page_guard since we don't care the exact #
841          * of pages..
842          */
843         return cl_object_header(slice->cls_obj)->coh_pages;
844 }
845
846 /**
847  * Get the weight of dlm lock for early cancellation.
848  *
849  * XXX: it should return the pages covered by this \a dlmlock.
850  */
851 static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
852 {
853         struct lu_env           *env;
854         int                      refcheck;
855         void                    *cookie;
856         struct osc_lock         *lock;
857         struct cl_lock          *cll;
858         unsigned long            weight;
859         ENTRY;
860
861         might_sleep();
862         cookie = cl_env_reenter();
863         /*
864          * osc_ldlm_weigh_ast has a complex context since it might be called
865          * because of lock canceling, or from user's input. We have to make
866          * a new environment for it. Probably it is implementation safe to use
867          * the upper context because cl_lock_put don't modify environment
868          * variables. But in case of ..
869          */
870         env = cl_env_get(&refcheck);
871         if (IS_ERR(env)) {
872                 /* Mostly because lack of memory, tend to eliminate this lock*/
873                 cl_env_reexit(cookie);
874                 RETURN(0);
875         }
876
877         LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
878         lock = osc_ast_data_get(dlmlock);
879         if (lock == NULL) {
880                 /* cl_lock was destroyed because of memory pressure.
881                  * It is much reasonable to assign this type of lock
882                  * a lower cost.
883                  */
884                 GOTO(out, weight = 0);
885         }
886
887         cll = lock->ols_cl.cls_lock;
888         cl_lock_mutex_get(env, cll);
889         weight = cl_lock_weigh(env, cll);
890         cl_lock_mutex_put(env, cll);
891         osc_ast_data_put(env, lock);
892         EXIT;
893
894 out:
895         cl_env_put(env, &refcheck);
896         cl_env_reexit(cookie);
897         return weight;
898 }
899
900 static void osc_lock_build_einfo(const struct lu_env *env,
901                                  const struct cl_lock *clock,
902                                  struct osc_lock *lock,
903                                  struct ldlm_enqueue_info *einfo)
904 {
905         enum cl_lock_mode mode;
906
907         mode = clock->cll_descr.cld_mode;
908         if (mode == CLM_PHANTOM)
909                 /*
910                  * For now, enqueue all glimpse locks in read mode. In the
911                  * future, client might choose to enqueue LCK_PW lock for
912                  * glimpse on a file opened for write.
913                  */
914                 mode = CLM_READ;
915
916         einfo->ei_type   = LDLM_EXTENT;
917         einfo->ei_mode   = osc_cl_lock2ldlm(mode);
918         einfo->ei_cb_bl  = osc_ldlm_blocking_ast;
919         einfo->ei_cb_cp  = osc_ldlm_completion_ast;
920         einfo->ei_cb_gl  = osc_ldlm_glimpse_ast;
921         einfo->ei_cb_wg  = osc_ldlm_weigh_ast;
922         einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
923 }
924
925 /**
926  * Cancels \a conflict lock and waits until it reached CLS_FREEING state. This
927  * is called as a part of enqueuing to cancel conflicting locks early.
928  *
929  * \retval            0: success, \a conflict was cancelled and destroyed.
930  *
931  * \retval   CLO_REPEAT: \a conflict was cancelled, but \a lock mutex was
932  *                       released in the process. Repeat enqueing.
933  *
934  * \retval -EWOULDBLOCK: \a conflict cannot be cancelled immediately, and
935  *                       either \a lock is non-blocking, or current thread
936  *                       holds other locks, that prevent it from waiting
937  *                       for cancel to complete.
938  *
939  * \retval          -ve: other error, including -EINTR.
940  *
941  */
942 static int osc_lock_cancel_wait(const struct lu_env *env, struct cl_lock *lock,
943                                 struct cl_lock *conflict, int canwait)
944 {
945         int rc;
946
947         LASSERT(cl_lock_is_mutexed(lock));
948         LASSERT(cl_lock_is_mutexed(conflict));
949
950         rc = 0;
951         if (conflict->cll_state != CLS_FREEING) {
952                 cl_lock_cancel(env, conflict);
953                 cl_lock_delete(env, conflict);
954                 if (conflict->cll_flags & (CLF_CANCELPEND|CLF_DOOMED)) {
955                         rc = -EWOULDBLOCK;
956                         if (cl_lock_nr_mutexed(env) > 2)
957                                 /*
958                                  * If mutices of locks other than @lock and
959                                  * @scan are held by the current thread, it
960                                  * cannot wait on @scan state change in a
961                                  * dead-lock safe matter, so simply skip early
962                                  * cancellation in this case.
963                                  *
964                                  * This means that early cancellation doesn't
965                                  * work when there is even slight mutex
966                                  * contention, as top-lock's mutex is usually
967                                  * held at this time.
968                                  */
969                                 ;
970                         else if (canwait) {
971                                 /* Waiting for @scan to be destroyed */
972                                 cl_lock_mutex_put(env, lock);
973                                 do {
974                                         rc = cl_lock_state_wait(env, conflict);
975                                 } while (!rc &&
976                                          conflict->cll_state < CLS_FREEING);
977                                 /* mutex was released, repeat enqueue. */
978                                 rc = rc ?: CLO_REPEAT;
979                                 cl_lock_mutex_get(env, lock);
980                         }
981                 }
982                 LASSERT(ergo(!rc, conflict->cll_state == CLS_FREEING));
983                 CDEBUG(D_INFO, "lock %p was %s freed now, rc (%d)\n",
984                        conflict, rc ? "not":"", rc);
985         }
986         return rc;
987 }
988
989 /**
990  * Determine if the lock should be converted into a lockless lock.
991  *
992  * Steps to check:
993  * - if the lock has an explicite requirment for a non-lockless lock;
994  * - if the io lock request type ci_lockreq;
995  * - send the enqueue rpc to ost to make the further decision;
996  * - special treat to truncate lockless lock
997  *
998  *  Additional policy can be implemented here, e.g., never do lockless-io
999  *  for large extents.
1000  */
1001 static void osc_lock_to_lockless(const struct lu_env *env,
1002                                  struct osc_lock *ols, int force)
1003 {
1004         struct cl_lock_slice *slice = &ols->ols_cl;
1005         struct cl_lock *lock        = slice->cls_lock;
1006
1007         LASSERT(ols->ols_state == OLS_NEW ||
1008                 ols->ols_state == OLS_UPCALL_RECEIVED);
1009
1010         if (force) {
1011                 ols->ols_locklessable = 1;
1012                 LASSERT(cl_lock_is_mutexed(lock));
1013                 slice->cls_ops = &osc_lock_lockless_ops;
1014         } else {
1015                 struct osc_io *oio     = osc_env_io(env);
1016                 struct cl_io  *io      = oio->oi_cl.cis_io;
1017                 struct cl_object *obj  = slice->cls_obj;
1018                 struct osc_object *oob = cl2osc(obj);
1019                 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
1020                 struct obd_connect_data *ocd;
1021
1022                 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
1023                         io->ci_lockreq == CILR_MAYBE ||
1024                         io->ci_lockreq == CILR_NEVER);
1025
1026                 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
1027                 ols->ols_locklessable = (io->ci_type != CIT_TRUNC) &&
1028                                 (io->ci_lockreq == CILR_MAYBE) &&
1029                                 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
1030                 if (io->ci_lockreq == CILR_NEVER ||
1031                         /* lockless IO */
1032                     (ols->ols_locklessable && osc_object_is_contended(oob)) ||
1033                         /* lockless truncate */
1034                     (io->ci_type == CIT_TRUNC &&
1035                      (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
1036                       osd->od_lockless_truncate)) {
1037                         ols->ols_locklessable = 1;
1038                         slice->cls_ops = &osc_lock_lockless_ops;
1039                 }
1040         }
1041 }
1042
1043 /**
1044  * Cancel all conflicting locks and wait for them to be destroyed.
1045  *
1046  * This function is used for two purposes:
1047  *
1048  *     - early cancel all conflicting locks before starting IO, and
1049  *
1050  *     - guarantee that pages added to the page cache by lockless IO are never
1051  *       covered by locks other than lockless IO lock, and, hence, are not
1052  *       visible to other threads.
1053  */
1054 static int osc_lock_enqueue_wait(const struct lu_env *env,
1055                                  const struct osc_lock *olck)
1056 {
1057         struct cl_lock          *lock    = olck->ols_cl.cls_lock;
1058         struct cl_lock_descr    *descr   = &lock->cll_descr;
1059         struct cl_object_header *hdr     = cl_object_header(descr->cld_obj);
1060         struct cl_lock_closure  *closure = &osc_env_info(env)->oti_closure;
1061         struct cl_lock          *scan;
1062         struct cl_lock          *temp;
1063         int lockless                     = osc_lock_is_lockless(olck);
1064         int rc                           = 0;
1065         int canwait;
1066         int stop;
1067         ENTRY;
1068
1069         LASSERT(cl_lock_is_mutexed(lock));
1070         LASSERT(lock->cll_state == CLS_QUEUING);
1071
1072         /*
1073          * XXX This function could be sped up if we had asynchronous
1074          * cancellation.
1075          */
1076
1077         canwait =
1078                 !(olck->ols_flags & LDLM_FL_BLOCK_NOWAIT) &&
1079                 cl_lock_nr_mutexed(env) == 1;
1080         cl_lock_closure_init(env, closure, lock, canwait);
1081         spin_lock(&hdr->coh_lock_guard);
1082         list_for_each_entry_safe(scan, temp, &hdr->coh_locks, cll_linkage) {
1083                 if (scan == lock)
1084                         continue;
1085
1086                 if (scan->cll_state < CLS_QUEUING ||
1087                     scan->cll_state == CLS_FREEING ||
1088                     scan->cll_descr.cld_start > descr->cld_end ||
1089                     scan->cll_descr.cld_end < descr->cld_start)
1090                         continue;
1091
1092                 /* overlapped and living locks. */
1093                 /* A tricky case for lockless pages:
1094                  * We need to cancel the compatible locks if we're enqueuing
1095                  * a lockless lock, for example:
1096                  * imagine that client has PR lock on [0, 1000], and thread T0
1097                  * is doing lockless IO in [500, 1500] region. Concurrent
1098                  * thread T1 can see lockless data in [500, 1000], which is
1099                  * wrong, because these data are possibly stale.
1100                  */
1101                 if (!lockless && cl_lock_compatible(scan, lock))
1102                         continue;
1103
1104                 /* Now @scan is conflicting with @lock, this means current
1105                  * thread have to sleep for @scan being destroyed. */
1106                 cl_lock_get_trust(scan);
1107                 if (&temp->cll_linkage != &hdr->coh_locks)
1108                         cl_lock_get_trust(temp);
1109                 spin_unlock(&hdr->coh_lock_guard);
1110                 lu_ref_add(&scan->cll_reference, "cancel-wait", lock);
1111
1112                 LASSERT(list_empty(&closure->clc_list));
1113                 rc = cl_lock_closure_build(env, scan, closure);
1114                 if (rc == 0) {
1115                         rc = osc_lock_cancel_wait(env, lock, scan, canwait);
1116                         cl_lock_disclosure(env, closure);
1117                         if (rc == -EWOULDBLOCK)
1118                                 rc = 0;
1119                 }
1120                 if (rc == CLO_REPEAT && !canwait)
1121                         /* cannot wait... no early cancellation. */
1122                         rc = 0;
1123
1124                 lu_ref_del(&scan->cll_reference, "cancel-wait", lock);
1125                 cl_lock_put(env, scan);
1126                 spin_lock(&hdr->coh_lock_guard);
1127                 /*
1128                  * Lock list could have been modified, while spin-lock was
1129                  * released. Check that it is safe to continue.
1130                  */
1131                 stop = list_empty(&temp->cll_linkage);
1132                 if (&temp->cll_linkage != &hdr->coh_locks)
1133                         cl_lock_put(env, temp);
1134                 if (stop || rc != 0)
1135                         break;
1136         }
1137         spin_unlock(&hdr->coh_lock_guard);
1138         cl_lock_closure_fini(closure);
1139         RETURN(rc);
1140 }
1141
1142 /**
1143  * Deadlock avoidance for osc_lock_enqueue(). Consider following scenario:
1144  *
1145  *     - Thread0: obtains PR:[0, 10]. Lock is busy.
1146  *
1147  *     - Thread1: enqueues PW:[5, 50]. Blocking ast is sent to
1148  *       PR:[0, 10], but cancellation of busy lock is postponed.
1149  *
1150  *     - Thread0: enqueue PR:[30, 40]. Lock is locally matched to
1151  *       PW:[5, 50], and thread0 waits for the lock completion never
1152  *       releasing PR:[0, 10]---deadlock.
1153  *
1154  * The second PR lock can be glimpse (it is to deal with that situation that
1155  * ll_glimpse_size() has second argument, preventing local match of
1156  * not-yet-granted locks, see bug 10295). Similar situation is possible in the
1157  * case of memory mapped user level buffer.
1158  *
1159  * To prevent this we can detect a situation when current "thread" or "io"
1160  * already holds a lock on this object and either add LDLM_FL_BLOCK_GRANTED to
1161  * the ols->ols_flags, or prevent local match with PW locks.
1162  */
1163 static int osc_deadlock_is_possible(const struct lu_env *env,
1164                                     struct cl_lock *lock)
1165 {
1166         struct cl_object        *obj;
1167         struct cl_object_header *head;
1168         struct cl_lock          *scan;
1169         struct osc_io           *oio;
1170
1171         int result;
1172
1173         ENTRY;
1174
1175         LASSERT(cl_lock_is_mutexed(lock));
1176
1177         oio  = osc_env_io(env);
1178         obj  = lock->cll_descr.cld_obj;
1179         head = cl_object_header(obj);
1180
1181         result = 0;
1182         spin_lock(&head->coh_lock_guard);
1183         list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1184                 if (scan != lock) {
1185                         struct osc_lock *oscan;
1186
1187                         oscan = osc_lock_at(scan);
1188                         LASSERT(oscan != NULL);
1189                         if (oscan->ols_owner == oio) {
1190                                 result = 1;
1191                                 break;
1192                         }
1193                 }
1194         }
1195         spin_unlock(&head->coh_lock_guard);
1196         RETURN(result);
1197 }
1198
1199 /**
1200  * Implementation of cl_lock_operations::clo_enqueue() method for osc
1201  * layer. This initiates ldlm enqueue:
1202  *
1203  *     - checks for possible dead-lock conditions (osc_deadlock_is_possible());
1204  *
1205  *     - cancels conflicting locks early (osc_lock_enqueue_wait());
1206  *
1207  *     - calls osc_enqueue_base() to do actual enqueue.
1208  *
1209  * osc_enqueue_base() is supplied with an upcall function that is executed
1210  * when lock is received either after a local cached ldlm lock is matched, or
1211  * when a reply from the server is received.
1212  *
1213  * This function does not wait for the network communication to complete.
1214  */
1215 static int osc_lock_enqueue(const struct lu_env *env,
1216                             const struct cl_lock_slice *slice,
1217                             struct cl_io *_, __u32 enqflags)
1218 {
1219         struct osc_lock          *ols     = cl2osc_lock(slice);
1220         struct cl_lock           *lock    = ols->ols_cl.cls_lock;
1221         struct osc_object        *obj     = cl2osc(slice->cls_obj);
1222         struct osc_thread_info   *info    = osc_env_info(env);
1223         struct ldlm_res_id       *resname = &info->oti_resname;
1224         ldlm_policy_data_t       *policy  = &info->oti_policy;
1225         struct ldlm_enqueue_info *einfo   = &ols->ols_einfo;
1226         int result;
1227         ENTRY;
1228
1229         LASSERT(cl_lock_is_mutexed(lock));
1230         LASSERT(lock->cll_state == CLS_QUEUING);
1231         LASSERT(ols->ols_state == OLS_NEW);
1232
1233         osc_lock_build_res(env, obj, resname);
1234         osc_lock_build_policy(env, lock, policy);
1235         ols->ols_flags = osc_enq2ldlm_flags(enqflags);
1236         if (osc_deadlock_is_possible(env, lock))
1237                 ols->ols_flags |= LDLM_FL_BLOCK_GRANTED;
1238         if (ols->ols_flags & LDLM_FL_HAS_INTENT)
1239                 ols->ols_glimpse = 1;
1240
1241         result = osc_lock_enqueue_wait(env, ols);
1242         if (result == 0) {
1243                 if (!(enqflags & CEF_MUST))
1244                         /* try to convert this lock to a lockless lock */
1245                         osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
1246                 if (!osc_lock_is_lockless(ols)) {
1247                         if (ols->ols_locklessable)
1248                                 ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1249
1250                         /* a reference for lock, passed as an upcall cookie */
1251                         cl_lock_get(lock);
1252                         lu_ref_add(&lock->cll_reference, "upcall", lock);
1253                         ols->ols_state = OLS_ENQUEUED;
1254
1255                         /*
1256                          * XXX: this is possible blocking point as
1257                          * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
1258                          * LDLM_CP_CALLBACK.
1259                          */
1260                         result = osc_enqueue_base(osc_export(obj), resname,
1261                                           &ols->ols_flags, policy,
1262                                           &ols->ols_lvb,
1263                                           obj->oo_oinfo->loi_kms_valid,
1264                                           osc_lock_upcall,
1265                                           ols, einfo, &ols->ols_handle,
1266                                           PTLRPCD_SET, 1);
1267                         if (result != 0) {
1268                                 lu_ref_del(&lock->cll_reference,
1269                                            "upcall", lock);
1270                                 cl_lock_put(env, lock);
1271                         }
1272                 } else {
1273                         ols->ols_state = OLS_GRANTED;
1274                 }
1275         }
1276
1277         RETURN(result);
1278 }
1279
1280 static int osc_lock_wait(const struct lu_env *env,
1281                          const struct cl_lock_slice *slice)
1282 {
1283         struct osc_lock *olck = cl2osc_lock(slice);
1284         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1285
1286         LINVRNT(osc_lock_invariant(olck));
1287         if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED)
1288                 return 0;
1289
1290         LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
1291                      lock->cll_error == 0, olck->ols_lock != NULL));
1292
1293         return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
1294 }
1295
1296 /**
1297  * An implementation of cl_lock_operations::clo_use() method that pins cached
1298  * lock.
1299  */
1300 static int osc_lock_use(const struct lu_env *env,
1301                         const struct cl_lock_slice *slice)
1302 {
1303         struct osc_lock *olck = cl2osc_lock(slice);
1304         int rc;
1305
1306         LASSERT(!olck->ols_hold);
1307         /*
1308          * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
1309          * flag is not set. This protects us from a concurrent blocking ast.
1310          */
1311         rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1312         if (rc == 0) {
1313                 olck->ols_hold = olck->ols_has_ref = 1;
1314                 olck->ols_state = OLS_GRANTED;
1315         } else {
1316                 struct cl_lock *lock;
1317
1318                 /*
1319                  * Lock is being cancelled somewhere within
1320                  * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
1321                  * set, but osc_ldlm_blocking_ast() hasn't yet acquired
1322                  * cl_lock mutex.
1323                  */
1324                 lock = slice->cls_lock;
1325                 LASSERT(lock->cll_state == CLS_CACHED);
1326                 LASSERT(lock->cll_users > 0);
1327                 LASSERT(olck->ols_lock->l_flags & LDLM_FL_CBPENDING);
1328                 /* set a flag for osc_dlm_blocking_ast0() to signal the
1329                  * lock.*/
1330                 olck->ols_ast_wait = 1;
1331                 rc = CLO_WAIT;
1332         }
1333         return rc;
1334 }
1335
1336 static int osc_lock_flush(struct osc_lock *ols, int discard)
1337 {
1338         struct cl_lock       *lock  = ols->ols_cl.cls_lock;
1339         struct cl_env_nest    nest;
1340         struct lu_env        *env;
1341         int result = 0;
1342
1343         env = cl_env_nested_get(&nest);
1344         if (!IS_ERR(env)) {
1345                 result = cl_lock_page_out(env, lock, discard);
1346                 cl_env_nested_put(&nest, env);
1347         } else
1348                 result = PTR_ERR(env);
1349         if (result == 0)
1350                 ols->ols_flush = 1;
1351         return result;
1352 }
1353
1354 /**
1355  * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1356  * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1357  * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1358  * with some other lock some where in the cluster. This function does the
1359  * following:
1360  *
1361  *     - invalidates all pages protected by this lock (after sending dirty
1362  *       ones to the server, as necessary);
1363  *
1364  *     - decref's underlying ldlm lock;
1365  *
1366  *     - cancels ldlm lock (ldlm_cli_cancel()).
1367  */
1368 static void osc_lock_cancel(const struct lu_env *env,
1369                             const struct cl_lock_slice *slice)
1370 {
1371         struct cl_lock   *lock    = slice->cls_lock;
1372         struct osc_lock  *olck    = cl2osc_lock(slice);
1373         struct ldlm_lock *dlmlock = olck->ols_lock;
1374         int               result;
1375         int               discard;
1376
1377         LASSERT(cl_lock_is_mutexed(lock));
1378         LINVRNT(osc_lock_invariant(olck));
1379
1380         if (dlmlock != NULL) {
1381                 discard = dlmlock->l_flags & LDLM_FL_DISCARD_DATA;
1382                 result = osc_lock_flush(olck, discard);
1383                 if (olck->ols_hold)
1384                         osc_lock_unuse(env, slice);
1385                 LASSERT(dlmlock->l_readers == 0 && dlmlock->l_writers == 0);
1386                 result = ldlm_cli_cancel(&olck->ols_handle);
1387                 if (result < 0)
1388                         CL_LOCK_DEBUG(D_ERROR, env, lock,
1389                                       "lock %p cancel failure with error(%d)\n",
1390                                       lock, result);
1391         }
1392         olck->ols_state = OLS_CANCELLED;
1393         osc_lock_detach(env, olck);
1394 }
1395
1396 void cl_lock_page_list_fixup(const struct lu_env *env,
1397                              struct cl_io *io, struct cl_lock *lock,
1398                              struct cl_page_list *queue);
1399
1400 #ifdef INVARIANT_CHECK
1401 /**
1402  * Returns true iff there are pages under \a olck not protected by other
1403  * locks.
1404  */
1405 static int osc_lock_has_pages(struct osc_lock *olck)
1406 {
1407         struct cl_lock       *lock;
1408         struct cl_lock_descr *descr;
1409         struct cl_object     *obj;
1410         struct osc_object    *oob;
1411         struct cl_page_list  *plist;
1412         struct cl_page       *page;
1413         struct cl_env_nest    nest;
1414         struct cl_io         *io;
1415         struct lu_env        *env;
1416         int                   result;
1417
1418         env = cl_env_nested_get(&nest);
1419         if (!IS_ERR(env)) {
1420                 obj   = olck->ols_cl.cls_obj;
1421                 oob   = cl2osc(obj);
1422                 io    = &oob->oo_debug_io;
1423                 lock  = olck->ols_cl.cls_lock;
1424                 descr = &lock->cll_descr;
1425                 plist = &osc_env_info(env)->oti_plist;
1426                 cl_page_list_init(plist);
1427
1428                 mutex_lock(&oob->oo_debug_mutex);
1429
1430                 io->ci_obj = cl_object_top(obj);
1431                 cl_io_init(env, io, CIT_MISC, io->ci_obj);
1432                 cl_page_gang_lookup(env, obj, io,
1433                                     descr->cld_start, descr->cld_end, plist);
1434                 cl_lock_page_list_fixup(env, io, lock, plist);
1435                 if (plist->pl_nr > 0) {
1436                         CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
1437                         cl_page_list_for_each(page, plist)
1438                                 CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
1439                 }
1440                 result = plist->pl_nr > 0;
1441                 cl_page_list_disown(env, io, plist);
1442                 cl_page_list_fini(env, plist);
1443                 cl_io_fini(env, io);
1444                 mutex_unlock(&oob->oo_debug_mutex);
1445                 cl_env_nested_put(&nest, env);
1446         } else
1447                 result = 0;
1448         return result;
1449 }
1450 #else
1451 # define osc_lock_has_pages(olck) (0)
1452 #endif /* INVARIANT_CHECK */
1453
1454 static void osc_lock_delete(const struct lu_env *env,
1455                             const struct cl_lock_slice *slice)
1456 {
1457         struct osc_lock *olck;
1458
1459         olck = cl2osc_lock(slice);
1460         LINVRNT(osc_lock_invariant(olck));
1461         LINVRNT(!osc_lock_has_pages(olck));
1462
1463         if (olck->ols_hold)
1464                 osc_lock_unuse(env, slice);
1465         osc_lock_detach(env, olck);
1466 }
1467
1468 /**
1469  * Implements cl_lock_operations::clo_state() method for osc layer.
1470  *
1471  * Maintains osc_lock::ols_owner field.
1472  *
1473  * This assumes that lock always enters CLS_HELD (from some other state) in
1474  * the same IO context as one that requested the lock. This should not be a
1475  * problem, because context is by definition shared by all activity pertaining
1476  * to the same high-level IO.
1477  */
1478 static void osc_lock_state(const struct lu_env *env,
1479                            const struct cl_lock_slice *slice,
1480                            enum cl_lock_state state)
1481 {
1482         struct osc_lock *lock = cl2osc_lock(slice);
1483         struct osc_io   *oio  = osc_env_io(env);
1484
1485         /*
1486          * XXX multiple io contexts can use the lock at the same time.
1487          */
1488         LINVRNT(osc_lock_invariant(lock));
1489         if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1490                 LASSERT(lock->ols_owner == NULL);
1491                 lock->ols_owner = oio;
1492         } else if (state != CLS_HELD)
1493                 lock->ols_owner = NULL;
1494 }
1495
1496 static int osc_lock_print(const struct lu_env *env, void *cookie,
1497                           lu_printer_t p, const struct cl_lock_slice *slice)
1498 {
1499         struct osc_lock *lock = cl2osc_lock(slice);
1500
1501         /*
1502          * XXX print ldlm lock and einfo properly.
1503          */
1504         (*p)(env, cookie, "%p %08x "LPU64" %d %p ",
1505              lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
1506              lock->ols_state, lock->ols_owner);
1507         osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1508         return 0;
1509 }
1510
1511 static const struct cl_lock_operations osc_lock_ops = {
1512         .clo_fini    = osc_lock_fini,
1513         .clo_enqueue = osc_lock_enqueue,
1514         .clo_wait    = osc_lock_wait,
1515         .clo_unuse   = osc_lock_unuse,
1516         .clo_use     = osc_lock_use,
1517         .clo_delete  = osc_lock_delete,
1518         .clo_state   = osc_lock_state,
1519         .clo_cancel  = osc_lock_cancel,
1520         .clo_weigh   = osc_lock_weigh,
1521         .clo_print   = osc_lock_print
1522 };
1523
1524 static int osc_lock_lockless_enqueue(const struct lu_env *env,
1525                                      const struct cl_lock_slice *slice,
1526                                      struct cl_io *_, __u32 enqflags)
1527 {
1528         LBUG();
1529         return 0;
1530 }
1531
1532 static int osc_lock_lockless_unuse(const struct lu_env *env,
1533                                    const struct cl_lock_slice *slice)
1534 {
1535         struct osc_lock *ols = cl2osc_lock(slice);
1536         struct cl_lock *lock = slice->cls_lock;
1537
1538         LASSERT(ols->ols_state == OLS_GRANTED);
1539         LINVRNT(osc_lock_invariant(ols));
1540
1541         cl_lock_cancel(env, lock);
1542         cl_lock_delete(env, lock);
1543         return 0;
1544 }
1545
1546 static void osc_lock_lockless_cancel(const struct lu_env *env,
1547                                      const struct cl_lock_slice *slice)
1548 {
1549         struct osc_lock   *ols  = cl2osc_lock(slice);
1550         int result;
1551
1552         result = osc_lock_flush(ols, 0);
1553         if (result)
1554                 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1555                        ols, result);
1556         ols->ols_state = OLS_CANCELLED;
1557 }
1558
1559 static int osc_lock_lockless_wait(const struct lu_env *env,
1560                                   const struct cl_lock_slice *slice)
1561 {
1562         struct osc_lock *olck = cl2osc_lock(slice);
1563         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1564
1565         LINVRNT(osc_lock_invariant(olck));
1566         LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1567
1568         return lock->cll_error;
1569 }
1570
1571 static void osc_lock_lockless_state(const struct lu_env *env,
1572                                     const struct cl_lock_slice *slice,
1573                                     enum cl_lock_state state)
1574 {
1575         struct osc_lock *lock = cl2osc_lock(slice);
1576         struct osc_io   *oio  = osc_env_io(env);
1577
1578         LINVRNT(osc_lock_invariant(lock));
1579         if (state == CLS_HELD) {
1580                 LASSERT(lock->ols_owner == NULL);
1581                 lock->ols_owner = oio;
1582
1583                 /* set the io to be lockless if this lock is for io's
1584                  * host object */
1585                 if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
1586                         oio->oi_lockless = 1;
1587         } else
1588                 lock->ols_owner = NULL;
1589 }
1590
1591 static int osc_lock_lockless_fits_into(const struct lu_env *env,
1592                                        const struct cl_lock_slice *slice,
1593                                        const struct cl_lock_descr *need,
1594                                        const struct cl_io *io)
1595 {
1596         return 0;
1597 }
1598
1599 static const struct cl_lock_operations osc_lock_lockless_ops = {
1600         .clo_fini      = osc_lock_fini,
1601         .clo_enqueue   = osc_lock_lockless_enqueue,
1602         .clo_wait      = osc_lock_lockless_wait,
1603         .clo_unuse     = osc_lock_lockless_unuse,
1604         .clo_state     = osc_lock_lockless_state,
1605         .clo_fits_into = osc_lock_lockless_fits_into,
1606         .clo_cancel    = osc_lock_lockless_cancel,
1607         .clo_print     = osc_lock_print
1608 };
1609
1610 int osc_lock_init(const struct lu_env *env,
1611                   struct cl_object *obj, struct cl_lock *lock,
1612                   const struct cl_io *_)
1613 {
1614         struct osc_lock *clk;
1615         int result;
1616
1617         OBD_SLAB_ALLOC_PTR(clk, osc_lock_kmem);
1618         if (clk != NULL) {
1619                 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
1620                 clk->ols_state = OLS_NEW;
1621                 cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
1622                 result = 0;
1623         } else
1624                 result = -ENOMEM;
1625         return result;
1626 }
1627
1628
1629 /** @} osc */