Whamcloud - gitweb
land clio.
[fs/lustre-release.git] / lustre / osc / osc_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for OSC layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 /** \addtogroup osc osc @{ */
42
43 #define DEBUG_SUBSYSTEM S_OSC
44
45 #ifdef __KERNEL__
46 # include <libcfs/libcfs.h>
47 #else
48 # include <liblustre.h>
49 #endif
50 /* fid_build_reg_res_name() */
51 #include <lustre_fid.h>
52
53 #include "osc_cl_internal.h"
54
55 /*****************************************************************************
56  *
57  * Type conversions.
58  *
59  */
60
61 static const struct cl_lock_operations osc_lock_ops;
62 static const struct cl_lock_operations osc_lock_lockless_ops;
63
64 int osc_lock_is_lockless(const struct osc_lock *olck)
65 {
66         return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
67 }
68
69 /**
70  * Returns a weak pointer to the ldlm lock identified by a handle. Returned
71  * pointer cannot be dereferenced, as lock is not protected from concurrent
72  * reclaim. This function is a helper for osc_lock_invariant().
73  */
74 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
75 {
76         struct ldlm_lock *lock;
77
78         lock = ldlm_handle2lock(handle);
79         if (lock != NULL)
80                 LDLM_LOCK_PUT(lock);
81         return lock;
82 }
83
84 /**
85  * Invariant that has to be true all of the time.
86  */
87 static int osc_lock_invariant(struct osc_lock *ols)
88 {
89         struct ldlm_lock *lock        = osc_handle_ptr(&ols->ols_handle);
90         struct ldlm_lock *olock       = ols->ols_lock;
91         int               handle_used = lustre_handle_is_used(&ols->ols_handle);
92
93         return
94                 ergo(osc_lock_is_lockless(ols),
95                      ols->ols_locklessable && ols->ols_lock == NULL)  ||
96                 (ergo(olock != NULL, handle_used) &&
97                  ergo(olock != NULL,
98                       olock->l_handle.h_cookie == ols->ols_handle.cookie) &&
99                  /*
100                   * Check that ->ols_handle and ->ols_lock are consistent, but
101                   * take into account that they are set at the different time.
102                   */
103                  ergo(handle_used,
104                       ergo(lock != NULL && olock != NULL, lock == olock) &&
105                       ergo(lock == NULL, olock == NULL)) &&
106                  ergo(ols->ols_state == OLS_CANCELLED,
107                       olock == NULL && !handle_used) &&
108                  /*
109                   * DLM lock is destroyed only after we have seen cancellation
110                   * ast.
111                   */
112                  ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
113                       !olock->l_destroyed) &&
114                  ergo(ols->ols_state == OLS_GRANTED,
115                       olock != NULL &&
116                       olock->l_req_mode == olock->l_granted_mode &&
117                       ols->ols_hold));
118 }
119
120 /*****************************************************************************
121  *
122  * Lock operations.
123  *
124  */
125
126 /**
127  * Breaks a link between osc_lock and dlm_lock.
128  */
129 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
130 {
131         struct ldlm_lock *dlmlock;
132
133         spin_lock(&osc_ast_guard);
134         dlmlock = olck->ols_lock;
135         if (dlmlock == NULL) {
136                 spin_unlock(&osc_ast_guard);
137                 return;
138         }
139
140         olck->ols_lock = NULL;
141         /* wb(); --- for all who checks (ols->ols_lock != NULL) before
142          * call to osc_lock_detach() */
143         dlmlock->l_ast_data = NULL;
144         olck->ols_handle.cookie = 0ULL;
145         spin_unlock(&osc_ast_guard);
146
147         lock_res_and_lock(dlmlock);
148         if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
149                 struct cl_object *obj = olck->ols_cl.cls_obj;
150                 struct cl_attr *attr  = &osc_env_info(env)->oti_attr;
151                 __u64 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
152
153                 /* Update the kms. Need to loop all granted locks.
154                  * Not a problem for the client */
155                 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
156                 unlock_res_and_lock(dlmlock);
157
158                 cl_object_attr_lock(obj);
159                 cl_object_attr_set(env, obj, attr, CAT_KMS);
160                 cl_object_attr_unlock(obj);
161         } else
162                 unlock_res_and_lock(dlmlock);
163
164         /* release a reference taken in osc_lock_upcall0(). */
165         lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
166         LDLM_LOCK_RELEASE(dlmlock);
167 }
168
169 static int osc_lock_unuse(const struct lu_env *env,
170                           const struct cl_lock_slice *slice)
171 {
172         struct osc_lock *ols = cl2osc_lock(slice);
173         int result;
174
175         LASSERT(ols->ols_state == OLS_GRANTED ||
176                 ols->ols_state == OLS_UPCALL_RECEIVED);
177         LINVRNT(osc_lock_invariant(ols));
178
179         if (ols->ols_glimpse) {
180                 LASSERT(ols->ols_hold == 0);
181                 return 0;
182         }
183         LASSERT(ols->ols_hold);
184
185         /*
186          * Move lock into OLS_RELEASED state before calling osc_cancel_base()
187          * so that possible synchronous cancellation (that always happens
188          * e.g., for liblustre) sees that lock is released.
189          */
190         ols->ols_state = OLS_RELEASED;
191         ols->ols_hold = 0;
192         result = osc_cancel_base(&ols->ols_handle, ols->ols_einfo.ei_mode);
193         ols->ols_has_ref = 0;
194         return result;
195 }
196
197 static void osc_lock_fini(const struct lu_env *env,
198                           struct cl_lock_slice *slice)
199 {
200         struct osc_lock  *ols = cl2osc_lock(slice);
201
202         LINVRNT(osc_lock_invariant(ols));
203         /*
204          * ->ols_hold can still be true at this point if, for example, a
205          * thread that requested a lock was killed (and released a reference
206          * to the lock), before reply from a server was received. In this case
207          * lock is destroyed immediately after upcall.
208          */
209         if (ols->ols_hold)
210                 osc_lock_unuse(env, slice);
211         if (ols->ols_lock != NULL)
212                 osc_lock_detach(env, ols);
213
214         OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
215 }
216
217 void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj,
218                         struct ldlm_res_id *resname)
219 {
220         const struct lu_fid *fid = lu_object_fid(&obj->oo_cl.co_lu);
221         if (0) {
222                 /*
223                  * In the perfect world of the future, where ost servers talk
224                  * idif-fids...
225                  */
226                 fid_build_reg_res_name(fid, resname);
227         } else {
228                 /*
229                  * In reality, where ost server expects ->lsm_object_id and
230                  * ->lsm_object_gr in rename.
231                  */
232                 osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_gr,
233                                    resname);
234         }
235 }
236
237 static void osc_lock_build_policy(const struct lu_env *env,
238                                   const struct cl_lock *lock,
239                                   ldlm_policy_data_t *policy)
240 {
241         const struct cl_lock_descr *d = &lock->cll_descr;
242
243         osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
244 }
245
246 static int osc_enq2ldlm_flags(__u32 enqflags)
247 {
248         int result = 0;
249
250         LASSERT((enqflags & ~(CEF_NONBLOCK|CEF_ASYNC|CEF_DISCARD_DATA)) == 0);
251
252         if (enqflags & CEF_NONBLOCK)
253                 result |= LDLM_FL_BLOCK_NOWAIT;
254         if (enqflags & CEF_ASYNC)
255                 result |= LDLM_FL_HAS_INTENT;
256         if (enqflags & CEF_DISCARD_DATA)
257                 result |= LDLM_AST_DISCARD_DATA;
258         return result;
259 }
260
261 /**
262  * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
263  * pointers. Initialized in osc_init().
264  */
265 spinlock_t osc_ast_guard;
266
267 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
268 {
269         struct osc_lock *olck;
270
271         lock_res_and_lock(dlm_lock);
272         spin_lock(&osc_ast_guard);
273         olck = dlm_lock->l_ast_data;
274         if (olck != NULL) {
275                 struct cl_lock *lock = olck->ols_cl.cls_lock;
276                 /*
277                  * If osc_lock holds a reference on ldlm lock, return it even
278                  * when cl_lock is in CLS_FREEING state. This way
279                  *
280                  *         osc_ast_data_get(dlmlock) == NULL
281                  *
282                  * guarantees that all osc references on dlmlock were
283                  * released. osc_dlm_blocking_ast0() relies on that.
284                  */
285                 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
286                         cl_lock_get_trust(lock);
287                         lu_ref_add_atomic(&lock->cll_reference,
288                                           "ast", cfs_current());
289                 } else
290                         olck = NULL;
291         }
292         spin_unlock(&osc_ast_guard);
293         unlock_res_and_lock(dlm_lock);
294         return olck;
295 }
296
297 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
298 {
299         struct cl_lock *lock;
300
301         lock = olck->ols_cl.cls_lock;
302         lu_ref_del(&lock->cll_reference, "ast", cfs_current());
303         cl_lock_put(env, lock);
304 }
305
306 static void osc_lock_to_lockless(struct osc_lock *olck)
307 {
308         struct cl_lock_slice *slice = &olck->ols_cl;
309         struct cl_lock  *lock       = slice->cls_lock;
310
311         /*
312          * TODO: Discover which locks we need to convert the lock
313          * to ldlmlockless.
314          */
315         LASSERT(cl_lock_is_mutexed(lock));
316         slice->cls_ops = &osc_lock_lockless_ops;
317 }
318
319 /**
320  * Updates object attributes from a lock value block (lvb) received together
321  * with the DLM lock reply from the server. Copy of osc_update_enqueue()
322  * logic.
323  *
324  * This can be optimized to not update attributes when lock is a result of a
325  * local match.
326  */
327 static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
328                                 int rc)
329 {
330         struct ost_lvb    *lvb;
331         struct cl_object  *obj;
332         struct lov_oinfo  *oinfo;
333         struct cl_attr    *attr;
334         unsigned           valid;
335
336         ENTRY;
337
338         if (!(olck->ols_flags & LDLM_FL_LVB_READY)) {
339                 EXIT;
340                 return;
341         }
342
343         lvb   = &olck->ols_lvb;
344         obj   = olck->ols_cl.cls_obj;
345         oinfo = cl2osc(obj)->oo_oinfo;
346         attr  = &osc_env_info(env)->oti_attr;
347         valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
348         cl_lvb2attr(attr, lvb);
349
350         cl_object_attr_lock(obj);
351         if (rc == 0) {
352                 struct ldlm_lock  *dlmlock;
353                 __u64 size;
354
355                 dlmlock = olck->ols_lock;
356                 LASSERT(dlmlock != NULL);
357
358                 size = lvb->lvb_size;
359                 /* Extend KMS up to the end of this lock and no further
360                  * A lock on [x,y] means a KMS of up to y + 1 bytes! */
361                 if (size > dlmlock->l_policy_data.l_extent.end)
362                         size = dlmlock->l_policy_data.l_extent.end + 1;
363                 if (size >= oinfo->loi_kms) {
364                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
365                                    ", kms="LPU64, lvb->lvb_size, size);
366                         valid |= CAT_KMS;
367                         attr->cat_kms = size;
368                 } else {
369                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
370                                    LPU64"; leaving kms="LPU64", end="LPU64,
371                                    lvb->lvb_size, oinfo->loi_kms,
372                                    dlmlock->l_policy_data.l_extent.end);
373                 }
374                 ldlm_lock_allow_match(dlmlock);
375         } else if (rc == -ENAVAIL && olck->ols_glimpse) {
376                 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
377                        " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
378         } else
379                 valid = 0;
380
381         if (valid != 0)
382                 cl_object_attr_set(env, obj, attr, valid);
383
384         cl_object_attr_unlock(obj);
385
386         EXIT;
387 }
388
389 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
390                              struct ldlm_lock *dlmlock, int rc)
391 {
392         struct ldlm_extent   *ext;
393         struct cl_lock       *lock;
394         struct cl_lock_descr *descr;
395
396         LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
397
398         ENTRY;
399         if (olck->ols_state != OLS_GRANTED) {
400                 lock  = olck->ols_cl.cls_lock;
401                 ext   = &dlmlock->l_policy_data.l_extent;
402                 descr = &osc_env_info(env)->oti_descr;
403                 descr->cld_obj = lock->cll_descr.cld_obj;
404
405                 /* XXX check that ->l_granted_mode is valid. */
406                 descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
407                 descr->cld_start = cl_index(descr->cld_obj, ext->start);
408                 descr->cld_end   = cl_index(descr->cld_obj, ext->end);
409                 /*
410                  * tell upper layers the extent of the lock that was actually
411                  * granted
412                  */
413                 cl_lock_modify(env, lock, descr);
414                 LINVRNT(osc_lock_invariant(olck));
415                 olck->ols_state = OLS_GRANTED;
416                 osc_lock_lvb_update(env, olck, rc);
417                 cl_lock_signal(env, lock);
418         }
419         EXIT;
420 }
421
422 static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
423
424 {
425         struct ldlm_lock *dlmlock;
426
427         ENTRY;
428
429         dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
430         LASSERT(dlmlock != NULL);
431
432         lock_res_and_lock(dlmlock);
433         spin_lock(&osc_ast_guard);
434         LASSERT(dlmlock->l_ast_data == olck);
435         LASSERT(olck->ols_lock == NULL);
436         olck->ols_lock = dlmlock;
437         spin_unlock(&osc_ast_guard);
438         unlock_res_and_lock(dlmlock);
439
440         /*
441          * Lock might be not yet granted. In this case, completion ast
442          * (osc_ldlm_completion_ast()) comes later and finishes lock
443          * granting.
444          */
445         if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
446                 osc_lock_granted(env, olck, dlmlock, 0);
447         /*
448          * osc_enqueue_interpret() decrefs asynchronous locks, counter
449          * this.
450          */
451         ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
452         olck->ols_hold = olck->ols_has_ref = 1;
453
454         /* lock reference taken by ldlm_handle2lock_long() is owned by
455          * osc_lock and released in osc_lock_detach() */
456         lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
457 }
458
459 /**
460  * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
461  * received from a server, or after osc_enqueue_base() matched a local DLM
462  * lock.
463  */
464 static int osc_lock_upcall(void *cookie, int errcode)
465 {
466         struct osc_lock      *olck  = cookie;
467         struct cl_lock_slice *slice = &olck->ols_cl;
468         struct cl_lock       *lock  = slice->cls_lock;
469         struct lu_env        *env;
470
471         int refcheck;
472
473         ENTRY;
474         /*
475          * XXX environment should be created in ptlrpcd.
476          */
477         env = cl_env_get(&refcheck);
478         if (!IS_ERR(env)) {
479                 int rc;
480
481                 cl_lock_mutex_get(env, lock);
482
483                 LASSERT(lock->cll_state >= CLS_QUEUING);
484                 if (olck->ols_state == OLS_ENQUEUED) {
485                         olck->ols_state = OLS_UPCALL_RECEIVED;
486                         rc = ldlm_error2errno(errcode);
487                 } else if (olck->ols_state == OLS_CANCELLED) {
488                         rc = -EIO;
489                 } else {
490                         CERROR("Impossible state: %i\n", olck->ols_state);
491                         LBUG();
492                 }
493                 if (rc) {
494                         struct ldlm_lock *dlmlock;
495
496                         dlmlock = ldlm_handle2lock(&olck->ols_handle);
497                         if (dlmlock != NULL) {
498                                 lock_res_and_lock(dlmlock);
499                                 spin_lock(&osc_ast_guard);
500                                 LASSERT(olck->ols_lock == NULL);
501                                 dlmlock->l_ast_data = NULL;
502                                 olck->ols_handle.cookie = 0ULL;
503                                 spin_unlock(&osc_ast_guard);
504                                 unlock_res_and_lock(dlmlock);
505                                 LDLM_LOCK_PUT(dlmlock);
506                         }
507                 } else {
508                         if (olck->ols_glimpse)
509                                 olck->ols_glimpse = 0;
510                         osc_lock_upcall0(env, olck);
511                 }
512
513                 /* Error handling, some errors are tolerable. */
514                 if (olck->ols_locklessable && rc == -EUSERS) {
515                         /* This is a tolerable error, turn this lock into
516                          * lockless lock.
517                          */
518                         osc_object_set_contended(cl2osc(slice->cls_obj));
519                         LASSERT(slice->cls_ops == &osc_lock_ops);
520
521                         /* Change this lock to ldlmlock-less lock. */
522                         osc_lock_to_lockless(olck);
523                         olck->ols_state = OLS_GRANTED;
524                         rc = 0;
525                 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
526                         osc_lock_lvb_update(env, olck, rc);
527                         cl_lock_delete(env, lock);
528                         /* Hide the error. */
529                         rc = 0;
530                 }
531
532                 if (rc == 0)
533                         /* on error, lock was signaled by cl_lock_error() */
534                         cl_lock_signal(env, lock);
535                 else
536                         cl_lock_error(env, lock, rc);
537
538                 cl_lock_mutex_put(env, lock);
539
540                 /* release cookie reference, acquired by osc_lock_enqueue() */
541                 lu_ref_del(&lock->cll_reference, "upcall", lock);
542                 cl_lock_put(env, lock);
543                 cl_env_put(env, &refcheck);
544         } else
545                 /* should never happen, similar to osc_ldlm_blocking_ast(). */
546                 LBUG();
547         RETURN(errcode);
548 }
549
550 /**
551  * Core of osc_dlm_blocking_ast() logic.
552  */
553 static void osc_lock_blocking(const struct lu_env *env,
554                               struct ldlm_lock *dlmlock,
555                               struct osc_lock *olck, int blocking)
556 {
557         struct cl_lock *lock = olck->ols_cl.cls_lock;
558
559         LASSERT(olck->ols_lock == dlmlock);
560         CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
561         LASSERT(!osc_lock_is_lockless(olck));
562
563         if (olck->ols_hold)
564                 /*
565                  * Lock might be still addref-ed here, if e.g., blocking ast
566                  * is sent for a failed lock.
567                  */
568                 osc_lock_unuse(env, &olck->ols_cl);
569
570         if (blocking && olck->ols_state < OLS_BLOCKED)
571                 /*
572                  * Move osc_lock into OLS_BLOCKED before canceling the lock,
573                  * because it recursively re-enters osc_lock_blocking(), with
574                  * the state set to OLS_CANCELLED.
575                  */
576                 olck->ols_state = OLS_BLOCKED;
577         /*
578          * cancel and destroy lock at least once no matter how blocking ast is
579          * entered (see comment above osc_ldlm_blocking_ast() for use
580          * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
581          */
582         cl_lock_cancel(env, lock);
583         cl_lock_delete(env, lock);
584 }
585
586 /**
587  * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
588  * and ldlm_lock caches.
589  */
590 static int osc_dlm_blocking_ast0(const struct lu_env *env,
591                                  struct ldlm_lock *dlmlock,
592                                  void *data, int flag)
593 {
594         struct osc_lock *olck;
595         struct cl_lock  *lock;
596         int result;
597         int cancel;
598
599         LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
600
601         cancel = 0;
602         olck = osc_ast_data_get(dlmlock);
603         if (olck != NULL) {
604                 lock = olck->ols_cl.cls_lock;
605                 cl_lock_mutex_get(env, lock);
606                 LINVRNT(osc_lock_invariant(olck));
607                 if (olck->ols_ast_wait) {
608                         /* wake up osc_lock_use() */
609                         cl_lock_signal(env, lock);
610                         olck->ols_ast_wait = 0;
611                 }
612                 /*
613                  * Lock might have been canceled while this thread was
614                  * sleeping for lock mutex, but olck is pinned in memory.
615                  */
616                 if (olck == dlmlock->l_ast_data) {
617                         /*
618                          * NOTE: DLM sends blocking AST's for failed locks
619                          *       (that are still in pre-OLS_GRANTED state)
620                          *       too, and they have to be canceled otherwise
621                          *       DLM lock is never destroyed and stuck in
622                          *       the memory.
623                          *
624                          *       Alternatively, ldlm_cli_cancel() can be
625                          *       called here directly for osc_locks with
626                          *       ols_state < OLS_GRANTED to maintain an
627                          *       invariant that ->clo_cancel() is only called
628                          *       for locks that were granted.
629                          */
630                         LASSERT(data == olck);
631                         osc_lock_blocking(env, dlmlock,
632                                           olck, flag == LDLM_CB_BLOCKING);
633                 } else
634                         cancel = 1;
635                 cl_lock_mutex_put(env, lock);
636                 osc_ast_data_put(env, olck);
637         } else
638                 /*
639                  * DLM lock exists, but there is no cl_lock attached to it.
640                  * This is a `normal' race. cl_object and its cl_lock's can be
641                  * removed by memory pressure, together with all pages.
642                  */
643                 cancel = (flag == LDLM_CB_BLOCKING);
644
645         if (cancel) {
646                 struct lustre_handle *lockh;
647
648                 lockh = &osc_env_info(env)->oti_handle;
649                 ldlm_lock2handle(dlmlock, lockh);
650                 result = ldlm_cli_cancel(lockh);
651         } else
652                 result = 0;
653         return result;
654 }
655
656 /**
657  * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
658  * some other lock, or is canceled. This function is installed as a
659  * ldlm_lock::l_blocking_ast() for client extent locks.
660  *
661  * Control flow is tricky, because ldlm uses the same call-back
662  * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
663  *
664  * \param dlmlock lock for which ast occurred.
665  *
666  * \param new description of a conflicting lock in case of blocking ast.
667  *
668  * \param data value of dlmlock->l_ast_data
669  *
670  * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
671  *             cancellation and blocking ast's.
672  *
673  * Possible use cases:
674  *
675  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
676  *       lock due to lock lru pressure, or explicit user request to purge
677  *       locks.
678  *
679  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
680  *       us that dlmlock conflicts with another lock that some client is
681  *       enqueing. Lock is canceled.
682  *
683  *           - cl_lock_cancel() is called. osc_lock_cancel() calls
684  *             ldlm_cli_cancel() that calls
685  *
686  *                  dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
687  *
688  *             recursively entering osc_ldlm_blocking_ast().
689  *
690  *     - client cancels lock voluntary (e.g., as a part of early cancellation):
691  *
692  *           cl_lock_cancel()->
693  *             osc_lock_cancel()->
694  *               ldlm_cli_cancel()->
695  *                 dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
696  *
697  */
698 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
699                                  struct ldlm_lock_desc *new, void *data,
700                                  int flag)
701 {
702         struct lu_env     *env;
703         struct cl_env_nest nest;
704         int                result;
705
706         /*
707          * This can be called in the context of outer IO, e.g.,
708          *
709          *     cl_enqueue()->...
710          *       ->osc_enqueue_base()->...
711          *         ->ldlm_prep_elc_req()->...
712          *           ->ldlm_cancel_callback()->...
713          *             ->osc_ldlm_blocking_ast()
714          *
715          * new environment has to be created to not corrupt outer context.
716          */
717         env = cl_env_nested_get(&nest);
718         if (!IS_ERR(env))
719                 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
720         else {
721                 result = PTR_ERR(env);
722                 /*
723                  * XXX This should never happen, as cl_lock is
724                  * stuck. Pre-allocated environment a la vvp_inode_fini_env
725                  * should be used.
726                  */
727                 LBUG();
728         }
729         if (result != 0) {
730                 if (result == -ENODATA)
731                         result = 0;
732                 else
733                         CERROR("BAST failed: %d\n", result);
734         }
735         cl_env_nested_put(&nest, env);
736         return result;
737 }
738
739 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
740                                    int flags, void *data)
741 {
742         struct lu_env   *env;
743         void            *env_cookie;
744         struct osc_lock *olck;
745         struct cl_lock  *lock;
746         int refcheck;
747         int result;
748         int dlmrc;
749
750         /* first, do dlm part of the work */
751         dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
752         /* then, notify cl_lock */
753         env_cookie = cl_env_reenter();
754         env = cl_env_get(&refcheck);
755         if (!IS_ERR(env)) {
756                 olck = osc_ast_data_get(dlmlock);
757                 if (olck != NULL) {
758                         lock = olck->ols_cl.cls_lock;
759                         cl_lock_mutex_get(env, lock);
760                         /*
761                          * ldlm_handle_cp_callback() copied LVB from request
762                          * to lock->l_lvb_data, store it in osc_lock.
763                          */
764                         LASSERT(dlmlock->l_lvb_data != NULL);
765                         olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
766                         if (olck->ols_lock == NULL)
767                                 /*
768                                  * upcall (osc_lock_upcall()) hasn't yet been
769                                  * called. Do nothing now, upcall will bind
770                                  * olck to dlmlock and signal the waiters.
771                                  *
772                                  * This maintains an invariant that osc_lock
773                                  * and ldlm_lock are always bound when
774                                  * osc_lock is in OLS_GRANTED state.
775                                  */
776                                 ;
777                         else if (dlmlock->l_granted_mode != LCK_MINMODE)
778                                 osc_lock_granted(env, olck, dlmlock, dlmrc);
779                         if (dlmrc != 0)
780                                 cl_lock_error(env, lock, dlmrc);
781                         cl_lock_mutex_put(env, lock);
782                         osc_ast_data_put(env, olck);
783                         result = 0;
784                 } else
785                         result = -ELDLM_NO_LOCK_DATA;
786                 cl_env_put(env, &refcheck);
787         } else
788                 result = PTR_ERR(env);
789         cl_env_reexit(env_cookie);
790         return dlmrc ?: result;
791 }
792
793 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
794 {
795         struct ptlrpc_request  *req  = data;
796         struct osc_lock        *olck;
797         struct cl_lock         *lock;
798         struct cl_object       *obj;
799         struct lu_env          *env;
800         struct ost_lvb         *lvb;
801         struct req_capsule     *cap;
802         int                     result;
803         int                     refcheck;
804
805         LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
806
807         env = cl_env_get(&refcheck);
808         if (!IS_ERR(env)) {
809                 /*
810                  * osc_ast_data_get() has to go after environment is
811                  * allocated, because osc_ast_data() acquires a
812                  * reference to a lock, and it can only be released in
813                  * environment.
814                  */
815                 olck = osc_ast_data_get(dlmlock);
816                 if (olck != NULL) {
817                         cap = &req->rq_pill;
818                         req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
819                         req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
820                                              sizeof *lvb);
821                         result = req_capsule_server_pack(cap);
822                         if (result == 0) {
823                                 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
824                                 lock = olck->ols_cl.cls_lock;
825                                 obj = lock->cll_descr.cld_obj;
826                                 result = cl_object_glimpse(env, obj, lvb);
827                         }
828                         osc_ast_data_put(env, olck);
829                 } else {
830                         /*
831                          * These errors are normal races, so we don't want to
832                          * fill the console with messages by calling
833                          * ptlrpc_error()
834                          */
835                         lustre_pack_reply(req, 1, NULL, NULL);
836                         result = -ELDLM_NO_LOCK_DATA;
837                 }
838                 cl_env_put(env, &refcheck);
839         } else
840                 result = PTR_ERR(env);
841         req->rq_status = result;
842         return result;
843 }
844
845 static unsigned long osc_lock_weigh(const struct lu_env *env,
846                                     const struct cl_lock_slice *slice)
847 {
848         /*
849          * don't need to grab coh_page_guard since we don't care the exact #
850          * of pages..
851          */
852         return cl_object_header(slice->cls_obj)->coh_pages;
853 }
854
855 /**
856  * Get the weight of dlm lock for early cancellation.
857  *
858  * XXX: it should return the pages covered by this \a dlmlock.
859  */
860 static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
861 {
862         struct lu_env           *env;
863         int                      refcheck;
864         void                    *cookie;
865         struct osc_lock         *lock;
866         struct cl_lock          *cll;
867         unsigned long            weight;
868         ENTRY;
869
870         might_sleep();
871         cookie = cl_env_reenter();
872         /*
873          * osc_ldlm_weigh_ast has a complex context since it might be called
874          * because of lock canceling, or from user's input. We have to make
875          * a new environment for it. Probably it is implementation safe to use
876          * the upper context because cl_lock_put don't modify environment
877          * variables. But in case of ..
878          */
879         env = cl_env_get(&refcheck);
880         if (IS_ERR(env)) {
881                 /* Mostly because lack of memory, tend to eliminate this lock*/
882                 cl_env_reexit(cookie);
883                 RETURN(0);
884         }
885
886         LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
887         lock = osc_ast_data_get(dlmlock);
888         if (lock == NULL) {
889                 /* cl_lock was destroyed because of memory pressure.
890                  * It is much reasonable to assign this type of lock
891                  * a lower cost.
892                  */
893                 GOTO(out, weight = 0);
894         }
895
896         cll = lock->ols_cl.cls_lock;
897         cl_lock_mutex_get(env, cll);
898         weight = cl_lock_weigh(env, cll);
899         cl_lock_mutex_put(env, cll);
900         osc_ast_data_put(env, lock);
901         EXIT;
902
903 out:
904         cl_env_put(env, &refcheck);
905         cl_env_reexit(cookie);
906         return weight;
907 }
908
909 static void osc_lock_build_einfo(const struct lu_env *env,
910                                  const struct cl_lock *clock,
911                                  struct osc_lock *lock,
912                                  struct ldlm_enqueue_info *einfo)
913 {
914         enum cl_lock_mode mode;
915
916         mode = clock->cll_descr.cld_mode;
917         if (mode == CLM_PHANTOM)
918                 /*
919                  * For now, enqueue all glimpse locks in read mode. In the
920                  * future, client might choose to enqueue LCK_PW lock for
921                  * glimpse on a file opened for write.
922                  */
923                 mode = CLM_READ;
924
925         einfo->ei_type   = LDLM_EXTENT;
926         einfo->ei_mode   = osc_cl_lock2ldlm(mode);
927         einfo->ei_cb_bl  = osc_ldlm_blocking_ast;
928         einfo->ei_cb_cp  = osc_ldlm_completion_ast;
929         einfo->ei_cb_gl  = osc_ldlm_glimpse_ast;
930         einfo->ei_cb_wg  = osc_ldlm_weigh_ast;
931         einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
932 }
933
934 /**
935  * Cancels \a conflict lock and waits until it reached CLS_FREEING state. This
936  * is called as a part of enqueuing to cancel conflicting locks early.
937  *
938  * \retval            0: success, \a conflict was cancelled and destroyed.
939  *
940  * \retval   CLO_REPEAT: \a conflict was cancelled, but \a lock mutex was
941  *                       released in the process. Repeat enqueing.
942  *
943  * \retval -EWOULDBLOCK: \a conflict cannot be cancelled immediately, and
944  *                       either \a lock is non-blocking, or current thread
945  *                       holds other locks, that prevent it from waiting
946  *                       for cancel to complete.
947  *
948  * \retval          -ve: other error, including -EINTR.
949  *
950  */
951 static int osc_lock_cancel_wait(const struct lu_env *env, struct cl_lock *lock,
952                                 struct cl_lock *conflict, int canwait)
953 {
954         int rc;
955
956         LASSERT(cl_lock_is_mutexed(lock));
957         LASSERT(cl_lock_is_mutexed(conflict));
958
959         rc = 0;
960         if (conflict->cll_state != CLS_FREEING) {
961                 cl_lock_cancel(env, conflict);
962                 cl_lock_delete(env, conflict);
963                 if (conflict->cll_flags & (CLF_CANCELPEND|CLF_DOOMED)) {
964                         rc = -EWOULDBLOCK;
965                         if (cl_lock_nr_mutexed(env) > 2)
966                                 /*
967                                  * If mutices of locks other than @lock and
968                                  * @scan are held by the current thread, it
969                                  * cannot wait on @scan state change in a
970                                  * dead-lock safe matter, so simply skip early
971                                  * cancellation in this case.
972                                  *
973                                  * This means that early cancellation doesn't
974                                  * work when there is even slight mutex
975                                  * contention, as top-lock's mutex is usually
976                                  * held at this time.
977                                  */
978                                 ;
979                         else if (canwait) {
980                                 /* Waiting for @scan to be destroyed */
981                                 cl_lock_mutex_put(env, lock);
982                                 do {
983                                         rc = cl_lock_state_wait(env, conflict);
984                                 } while (!rc &&
985                                          conflict->cll_state < CLS_FREEING);
986                                 /* mutex was released, repeat enqueue. */
987                                 rc = rc ?: CLO_REPEAT;
988                                 cl_lock_mutex_get(env, lock);
989                         }
990                 }
991                 LASSERT(ergo(!rc, conflict->cll_state == CLS_FREEING));
992                 CDEBUG(D_INFO, "lock %p was %s freed now, rc (%d)\n",
993                        conflict, rc ? "not":"", rc);
994         }
995         return rc;
996 }
997
998 /**
999  * Cancel all conflicting locks and wait for them to be destroyed.
1000  *
1001  * This function is used for two purposes:
1002  *
1003  *     - early cancel all conflicting locks before starting IO, and
1004  *
1005  *     - guarantee that pages added to the page cache by lockless IO are never
1006  *       covered by locks other than lockless IO lock, and, hence, are not
1007  *       visible to other threads.
1008  */
1009 static int osc_lock_enqueue_wait(const struct lu_env *env,
1010                                  const struct osc_lock *olck)
1011 {
1012         struct cl_lock          *lock    = olck->ols_cl.cls_lock;
1013         struct cl_lock_descr    *descr   = &lock->cll_descr;
1014         struct cl_object_header *hdr     = cl_object_header(descr->cld_obj);
1015         struct cl_lock_closure  *closure = &osc_env_info(env)->oti_closure;
1016         struct cl_lock          *scan;
1017         struct cl_lock          *temp;
1018         int lockless                     = osc_lock_is_lockless(olck);
1019         int rc                           = 0;
1020         int canwait;
1021         int stop;
1022         ENTRY;
1023
1024         LASSERT(cl_lock_is_mutexed(lock));
1025         LASSERT(lock->cll_state == CLS_QUEUING);
1026
1027         /*
1028          * XXX This function could be sped up if we had asynchronous
1029          * cancellation.
1030          */
1031
1032         canwait =
1033                 !(olck->ols_flags & LDLM_FL_BLOCK_NOWAIT) &&
1034                 cl_lock_nr_mutexed(env) == 1;
1035         cl_lock_closure_init(env, closure, lock, canwait);
1036         spin_lock(&hdr->coh_lock_guard);
1037         list_for_each_entry_safe(scan, temp, &hdr->coh_locks, cll_linkage) {
1038                 if (scan == lock)
1039                         continue;
1040
1041                 if (scan->cll_state < CLS_QUEUING ||
1042                     scan->cll_state == CLS_FREEING ||
1043                     scan->cll_descr.cld_start > descr->cld_end ||
1044                     scan->cll_descr.cld_end < descr->cld_start)
1045                         continue;
1046
1047                 /* overlapped and living locks. */
1048                 /* A tricky case for lockless pages:
1049                  * We need to cancel the compatible locks if we're enqueuing
1050                  * a lockless lock, for example:
1051                  * imagine that client has PR lock on [0, 1000], and thread T0
1052                  * is doing lockless IO in [500, 1500] region. Concurrent
1053                  * thread T1 can see lockless data in [500, 1000], which is
1054                  * wrong, because these data are possibly stale.
1055                  */
1056                 if (!lockless && cl_lock_compatible(scan, lock))
1057                         continue;
1058
1059                 /* Now @scan is conflicting with @lock, this means current
1060                  * thread have to sleep for @scan being destroyed. */
1061                 cl_lock_get_trust(scan);
1062                 if (&temp->cll_linkage != &hdr->coh_locks)
1063                         cl_lock_get_trust(temp);
1064                 spin_unlock(&hdr->coh_lock_guard);
1065                 lu_ref_add(&scan->cll_reference, "cancel-wait", lock);
1066
1067                 LASSERT(list_empty(&closure->clc_list));
1068                 rc = cl_lock_closure_build(env, scan, closure);
1069                 if (rc == 0) {
1070                         rc = osc_lock_cancel_wait(env, lock, scan, canwait);
1071                         cl_lock_disclosure(env, closure);
1072                         if (rc == -EWOULDBLOCK)
1073                                 rc = 0;
1074                 }
1075                 if (rc == CLO_REPEAT && !canwait)
1076                         /* cannot wait... no early cancellation. */
1077                         rc = 0;
1078
1079                 lu_ref_del(&scan->cll_reference, "cancel-wait", lock);
1080                 cl_lock_put(env, scan);
1081                 spin_lock(&hdr->coh_lock_guard);
1082                 /*
1083                  * Lock list could have been modified, while spin-lock was
1084                  * released. Check that it is safe to continue.
1085                  */
1086                 stop = list_empty(&temp->cll_linkage);
1087                 if (&temp->cll_linkage != &hdr->coh_locks)
1088                         cl_lock_put(env, temp);
1089                 if (stop || rc != 0)
1090                         break;
1091         }
1092         spin_unlock(&hdr->coh_lock_guard);
1093         cl_lock_closure_fini(closure);
1094         RETURN(rc);
1095 }
1096
1097 /**
1098  * Deadlock avoidance for osc_lock_enqueue(). Consider following scenario:
1099  *
1100  *     - Thread0: obtains PR:[0, 10]. Lock is busy.
1101  *
1102  *     - Thread1: enqueues PW:[5, 50]. Blocking ast is sent to
1103  *       PR:[0, 10], but cancellation of busy lock is postponed.
1104  *
1105  *     - Thread0: enqueue PR:[30, 40]. Lock is locally matched to
1106  *       PW:[5, 50], and thread0 waits for the lock completion never
1107  *       releasing PR:[0, 10]---deadlock.
1108  *
1109  * The second PR lock can be glimpse (it is to deal with that situation that
1110  * ll_glimpse_size() has second argument, preventing local match of
1111  * not-yet-granted locks, see bug 10295). Similar situation is possible in the
1112  * case of memory mapped user level buffer.
1113  *
1114  * To prevent this we can detect a situation when current "thread" or "io"
1115  * already holds a lock on this object and either add LDLM_FL_BLOCK_GRANTED to
1116  * the ols->ols_flags, or prevent local match with PW locks.
1117  */
1118 static int osc_deadlock_is_possible(const struct lu_env *env,
1119                                     struct cl_lock *lock)
1120 {
1121         struct cl_object        *obj;
1122         struct cl_object_header *head;
1123         struct cl_lock          *scan;
1124         struct osc_io           *oio;
1125
1126         int result;
1127
1128         ENTRY;
1129
1130         LASSERT(cl_lock_is_mutexed(lock));
1131
1132         oio  = osc_env_io(env);
1133         obj  = lock->cll_descr.cld_obj;
1134         head = cl_object_header(obj);
1135
1136         result = 0;
1137         spin_lock(&head->coh_lock_guard);
1138         list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1139                 if (scan != lock) {
1140                         struct osc_lock *oscan;
1141
1142                         oscan = osc_lock_at(scan);
1143                         LASSERT(oscan != NULL);
1144                         if (oscan->ols_owner == oio) {
1145                                 result = 1;
1146                                 break;
1147                         }
1148                 }
1149         }
1150         spin_unlock(&head->coh_lock_guard);
1151         RETURN(result);
1152 }
1153
1154 /**
1155  * Implementation of cl_lock_operations::clo_enqueue() method for osc
1156  * layer. This initiates ldlm enqueue:
1157  *
1158  *     - checks for possible dead-lock conditions (osc_deadlock_is_possible());
1159  *
1160  *     - cancels conflicting locks early (osc_lock_enqueue_wait());
1161  *
1162  *     - calls osc_enqueue_base() to do actual enqueue.
1163  *
1164  * osc_enqueue_base() is supplied with an upcall function that is executed
1165  * when lock is received either after a local cached ldlm lock is matched, or
1166  * when a reply from the server is received.
1167  *
1168  * This function does not wait for the network communication to complete.
1169  */
1170 static int osc_lock_enqueue(const struct lu_env *env,
1171                             const struct cl_lock_slice *slice,
1172                             struct cl_io *_, __u32 enqflags)
1173 {
1174         struct osc_lock          *ols     = cl2osc_lock(slice);
1175         struct cl_lock           *lock    = ols->ols_cl.cls_lock;
1176         struct osc_object        *obj     = cl2osc(slice->cls_obj);
1177         struct osc_thread_info   *info    = osc_env_info(env);
1178         struct ldlm_res_id       *resname = &info->oti_resname;
1179         ldlm_policy_data_t       *policy  = &info->oti_policy;
1180         struct ldlm_enqueue_info *einfo   = &ols->ols_einfo;
1181         int result;
1182         ENTRY;
1183
1184         LASSERT(cl_lock_is_mutexed(lock));
1185         LASSERT(lock->cll_state == CLS_QUEUING);
1186         LASSERT(ols->ols_state == OLS_NEW);
1187
1188         osc_lock_build_res(env, obj, resname);
1189         osc_lock_build_policy(env, lock, policy);
1190         ols->ols_flags = osc_enq2ldlm_flags(enqflags);
1191         if (ols->ols_locklessable)
1192                 ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1193         if (osc_deadlock_is_possible(env, lock))
1194                 ols->ols_flags |= LDLM_FL_BLOCK_GRANTED;
1195         if (ols->ols_flags & LDLM_FL_HAS_INTENT)
1196                 ols->ols_glimpse = 1;
1197
1198         result = osc_lock_enqueue_wait(env, ols);
1199         if (result == 0) {
1200                 /* a reference for lock, passed as an upcall cookie */
1201                 cl_lock_get(lock);
1202                 lu_ref_add(&lock->cll_reference, "upcall", lock);
1203                 ols->ols_state = OLS_ENQUEUED;
1204
1205                 /*
1206                  * XXX: this is possible blocking point as
1207                  * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
1208                  * LDLM_CP_CALLBACK.
1209                  */
1210                 result = osc_enqueue_base(osc_export(obj), resname,
1211                                           &ols->ols_flags, policy,
1212                                           &ols->ols_lvb,
1213                                           obj->oo_oinfo->loi_kms_valid,
1214                                           osc_lock_upcall,
1215                                           ols, einfo, &ols->ols_handle,
1216                                           PTLRPCD_SET, 1);
1217                 if (result != 0) {
1218                         lu_ref_del(&lock->cll_reference, "upcall", lock);
1219                         cl_lock_put(env, lock);
1220                 }
1221         }
1222
1223         RETURN(result);
1224 }
1225
1226 static int osc_lock_wait(const struct lu_env *env,
1227                          const struct cl_lock_slice *slice)
1228 {
1229         struct osc_lock *olck = cl2osc_lock(slice);
1230         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1231
1232         LINVRNT(osc_lock_invariant(olck));
1233         if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED)
1234                 return 0;
1235
1236         LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
1237                      lock->cll_error == 0, olck->ols_lock != NULL));
1238
1239         return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
1240 }
1241
1242 /**
1243  * An implementation of cl_lock_operations::clo_use() method that pins cached
1244  * lock.
1245  */
1246 static int osc_lock_use(const struct lu_env *env,
1247                         const struct cl_lock_slice *slice)
1248 {
1249         struct osc_lock *olck = cl2osc_lock(slice);
1250         int rc;
1251
1252         LASSERT(!olck->ols_hold);
1253         /*
1254          * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
1255          * flag is not set. This protects us from a concurrent blocking ast.
1256          */
1257         rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1258         if (rc == 0) {
1259                 olck->ols_hold = olck->ols_has_ref = 1;
1260                 olck->ols_state = OLS_GRANTED;
1261         } else {
1262                 struct cl_lock *lock;
1263
1264                 /*
1265                  * Lock is being cancelled somewhere within
1266                  * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
1267                  * set, but osc_ldlm_blocking_ast() hasn't yet acquired
1268                  * cl_lock mutex.
1269                  */
1270                 lock = slice->cls_lock;
1271                 LASSERT(lock->cll_state == CLS_CACHED);
1272                 LASSERT(lock->cll_users > 0);
1273                 LASSERT(olck->ols_lock->l_flags & LDLM_FL_CBPENDING);
1274                 /* set a flag for osc_dlm_blocking_ast0() to signal the
1275                  * lock.*/
1276                 olck->ols_ast_wait = 1;
1277                 rc = CLO_WAIT;
1278         }
1279         return rc;
1280 }
1281
1282 static int osc_lock_flush(struct osc_lock *ols, int discard)
1283 {
1284         struct cl_lock       *lock  = ols->ols_cl.cls_lock;
1285         struct cl_env_nest    nest;
1286         struct lu_env        *env;
1287         int result = 0;
1288
1289         env = cl_env_nested_get(&nest);
1290         if (!IS_ERR(env)) {
1291                 result = cl_lock_page_out(env, lock, discard);
1292                 cl_env_nested_put(&nest, env);
1293         } else
1294                 result = PTR_ERR(env);
1295         if (result == 0)
1296                 ols->ols_flush = 1;
1297         return result;
1298 }
1299
1300 /**
1301  * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1302  * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1303  * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1304  * with some other lock some where in the cluster. This function does the
1305  * following:
1306  *
1307  *     - invalidates all pages protected by this lock (after sending dirty
1308  *       ones to the server, as necessary);
1309  *
1310  *     - decref's underlying ldlm lock;
1311  *
1312  *     - cancels ldlm lock (ldlm_cli_cancel()).
1313  */
1314 static void osc_lock_cancel(const struct lu_env *env,
1315                             const struct cl_lock_slice *slice)
1316 {
1317         struct cl_lock   *lock    = slice->cls_lock;
1318         struct osc_lock  *olck    = cl2osc_lock(slice);
1319         struct ldlm_lock *dlmlock = olck->ols_lock;
1320         int               result;
1321         int               discard;
1322
1323         LASSERT(cl_lock_is_mutexed(lock));
1324         LINVRNT(osc_lock_invariant(olck));
1325
1326         if (dlmlock != NULL) {
1327                 discard = dlmlock->l_flags & LDLM_FL_DISCARD_DATA;
1328                 result = osc_lock_flush(olck, discard);
1329                 if (olck->ols_hold)
1330                         osc_lock_unuse(env, slice);
1331                 LASSERT(dlmlock->l_readers == 0 && dlmlock->l_writers == 0);
1332                 result = ldlm_cli_cancel(&olck->ols_handle);
1333                 if (result < 0)
1334                         CL_LOCK_DEBUG(D_ERROR, env, lock,
1335                                       "lock %p cancel failure with error(%d)\n",
1336                                       lock, result);
1337         }
1338         olck->ols_state = OLS_CANCELLED;
1339         osc_lock_detach(env, olck);
1340 }
1341
1342 void cl_lock_page_list_fixup(const struct lu_env *env,
1343                              struct cl_io *io, struct cl_lock *lock,
1344                              struct cl_page_list *queue);
1345
1346 #ifdef INVARIANT_CHECK
1347 /**
1348  * Returns true iff there are pages under \a olck not protected by other
1349  * locks.
1350  */
1351 static int osc_lock_has_pages(struct osc_lock *olck)
1352 {
1353         struct cl_lock       *lock;
1354         struct cl_lock_descr *descr;
1355         struct cl_object     *obj;
1356         struct osc_object    *oob;
1357         struct cl_page_list  *plist;
1358         struct cl_page       *page;
1359         struct cl_env_nest    nest;
1360         struct cl_io         *io;
1361         struct lu_env        *env;
1362         int                   result;
1363
1364         env = cl_env_nested_get(&nest);
1365         if (!IS_ERR(env)) {
1366                 obj   = olck->ols_cl.cls_obj;
1367                 oob   = cl2osc(obj);
1368                 io    = &oob->oo_debug_io;
1369                 lock  = olck->ols_cl.cls_lock;
1370                 descr = &lock->cll_descr;
1371                 plist = &osc_env_info(env)->oti_plist;
1372                 cl_page_list_init(plist);
1373
1374                 mutex_lock(&oob->oo_debug_mutex);
1375
1376                 io->ci_obj = cl_object_top(obj);
1377                 cl_io_init(env, io, CIT_MISC, io->ci_obj);
1378                 cl_page_gang_lookup(env, obj, io,
1379                                     descr->cld_start, descr->cld_end, plist);
1380                 cl_lock_page_list_fixup(env, io, lock, plist);
1381                 if (plist->pl_nr > 0) {
1382                         CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
1383                         cl_page_list_for_each(page, plist)
1384                                 CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
1385                 }
1386                 result = plist->pl_nr > 0;
1387                 cl_page_list_disown(env, io, plist);
1388                 cl_page_list_fini(env, plist);
1389                 cl_io_fini(env, io);
1390                 mutex_unlock(&oob->oo_debug_mutex);
1391                 cl_env_nested_put(&nest, env);
1392         } else
1393                 result = 0;
1394         return result;
1395 }
1396 #else
1397 # define osc_lock_has_pages(olck) (0)
1398 #endif /* INVARIANT_CHECK */
1399
1400 static void osc_lock_delete(const struct lu_env *env,
1401                             const struct cl_lock_slice *slice)
1402 {
1403         struct osc_lock *olck;
1404
1405         olck = cl2osc_lock(slice);
1406         LINVRNT(osc_lock_invariant(olck));
1407         LINVRNT(!osc_lock_has_pages(olck));
1408
1409         if (olck->ols_hold)
1410                 osc_lock_unuse(env, slice);
1411         osc_lock_detach(env, olck);
1412 }
1413
1414 /**
1415  * Implements cl_lock_operations::clo_state() method for osc layer.
1416  *
1417  * Maintains osc_lock::ols_owner field.
1418  *
1419  * This assumes that lock always enters CLS_HELD (from some other state) in
1420  * the same IO context as one that requested the lock. This should not be a
1421  * problem, because context is by definition shared by all activity pertaining
1422  * to the same high-level IO.
1423  */
1424 static void osc_lock_state(const struct lu_env *env,
1425                            const struct cl_lock_slice *slice,
1426                            enum cl_lock_state state)
1427 {
1428         struct osc_lock *lock = cl2osc_lock(slice);
1429         struct osc_io   *oio  = osc_env_io(env);
1430
1431         /*
1432          * XXX multiple io contexts can use the lock at the same time.
1433          */
1434         LINVRNT(osc_lock_invariant(lock));
1435         if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1436                 LASSERT(lock->ols_owner == NULL);
1437                 lock->ols_owner = oio;
1438         } else if (state != CLS_HELD)
1439                 lock->ols_owner = NULL;
1440 }
1441
1442 static int osc_lock_print(const struct lu_env *env, void *cookie,
1443                           lu_printer_t p, const struct cl_lock_slice *slice)
1444 {
1445         struct osc_lock *lock = cl2osc_lock(slice);
1446
1447         /*
1448          * XXX print ldlm lock and einfo properly.
1449          */
1450         (*p)(env, cookie, "%p %08x "LPU64" %d %p ",
1451              lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
1452              lock->ols_state, lock->ols_owner);
1453         osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1454         return 0;
1455 }
1456
1457 static const struct cl_lock_operations osc_lock_ops = {
1458         .clo_fini    = osc_lock_fini,
1459         .clo_enqueue = osc_lock_enqueue,
1460         .clo_wait    = osc_lock_wait,
1461         .clo_unuse   = osc_lock_unuse,
1462         .clo_use     = osc_lock_use,
1463         .clo_delete  = osc_lock_delete,
1464         .clo_state   = osc_lock_state,
1465         .clo_cancel  = osc_lock_cancel,
1466         .clo_weigh   = osc_lock_weigh,
1467         .clo_print   = osc_lock_print
1468 };
1469
1470 static int osc_lock_lockless_enqueue(const struct lu_env *env,
1471                                      const struct cl_lock_slice *slice,
1472                                      struct cl_io *_, __u32 enqflags)
1473 {
1474         struct osc_lock          *ols     = cl2osc_lock(slice);
1475         struct cl_lock           *lock    = ols->ols_cl.cls_lock;
1476         int result;
1477
1478         LASSERT(cl_lock_is_mutexed(lock));
1479         LASSERT(lock->cll_state == CLS_QUEUING);
1480         LASSERT(ols->ols_state == OLS_NEW);
1481
1482         result = osc_lock_enqueue_wait(env, ols);
1483         if (result == 0)
1484                 ols->ols_state = OLS_GRANTED;
1485         return result;
1486 }
1487
1488 static int osc_lock_lockless_unuse(const struct lu_env *env,
1489                                    const struct cl_lock_slice *slice)
1490 {
1491         struct osc_lock *ols = cl2osc_lock(slice);
1492         struct cl_lock *lock = slice->cls_lock;
1493
1494         LASSERT(ols->ols_state == OLS_GRANTED);
1495         LINVRNT(osc_lock_invariant(ols));
1496
1497         cl_lock_cancel(env, lock);
1498         cl_lock_delete(env, lock);
1499         return 0;
1500 }
1501
1502 static void osc_lock_lockless_cancel(const struct lu_env *env,
1503                                      const struct cl_lock_slice *slice)
1504 {
1505         struct osc_lock   *ols  = cl2osc_lock(slice);
1506         int result;
1507
1508         result = osc_lock_flush(ols, 0);
1509         if (result)
1510                 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1511                        ols, result);
1512         ols->ols_state = OLS_CANCELLED;
1513 }
1514
1515 static int osc_lock_lockless_wait(const struct lu_env *env,
1516                                   const struct cl_lock_slice *slice)
1517 {
1518         struct osc_lock *olck = cl2osc_lock(slice);
1519         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1520
1521         LINVRNT(osc_lock_invariant(olck));
1522         LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1523
1524         return lock->cll_error;
1525 }
1526
1527 static void osc_lock_lockless_state(const struct lu_env *env,
1528                                     const struct cl_lock_slice *slice,
1529                                     enum cl_lock_state state)
1530 {
1531         struct osc_lock *lock = cl2osc_lock(slice);
1532         struct osc_io   *oio  = osc_env_io(env);
1533
1534         LINVRNT(osc_lock_invariant(lock));
1535         if (state == CLS_HELD) {
1536                 LASSERT(lock->ols_owner == NULL);
1537                 lock->ols_owner = oio;
1538                 oio->oi_lockless = 1;
1539         } else
1540                 lock->ols_owner = NULL;
1541 }
1542
1543 static int osc_lock_lockless_fits_into(const struct lu_env *env,
1544                                        const struct cl_lock_slice *slice,
1545                                        const struct cl_lock_descr *need,
1546                                        const struct cl_io *io)
1547 {
1548         return 0;
1549 }
1550
1551 static const struct cl_lock_operations osc_lock_lockless_ops = {
1552         .clo_fini      = osc_lock_fini,
1553         .clo_enqueue   = osc_lock_lockless_enqueue,
1554         .clo_wait      = osc_lock_lockless_wait,
1555         .clo_unuse     = osc_lock_lockless_unuse,
1556         .clo_state     = osc_lock_lockless_state,
1557         .clo_fits_into = osc_lock_lockless_fits_into,
1558         .clo_cancel    = osc_lock_lockless_cancel,
1559         .clo_print     = osc_lock_print
1560 };
1561
1562 int osc_lock_init(const struct lu_env *env,
1563                   struct cl_object *obj, struct cl_lock *lock,
1564                   const struct cl_io *io)
1565 {
1566         struct osc_lock   *clk;
1567         struct osc_io     *oio = osc_env_io(env);
1568         struct osc_object *oob = cl2osc(obj);
1569         int result;
1570
1571         OBD_SLAB_ALLOC_PTR(clk, osc_lock_kmem);
1572         if (clk != NULL) {
1573                 const struct cl_lock_operations *ops;
1574                 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
1575                 struct obd_connect_data *ocd;
1576
1577                 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
1578                 clk->ols_state = OLS_NEW;
1579
1580                 /*
1581                  * Check if we need to do lockless IO here.
1582                  * Following conditions must be satisfied:
1583                  * - the current IO must be locklessable;
1584                  * - the stripe is in contention;
1585                  * - requested lock is not a glimpse.
1586                  *
1587                  * if not, we have to inherit the locklessable flag to
1588                  * osc_lock, and let ost make the decision.
1589                  *
1590                  * Additional policy can be implemented here, e.g., never do
1591                  * lockless-io for large extents.
1592                  */
1593                 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
1594                         io->ci_lockreq == CILR_MAYBE ||
1595                         io->ci_lockreq == CILR_NEVER);
1596                 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
1597                 clk->ols_locklessable = (io->ci_type != CIT_TRUNC) &&
1598                                 (io->ci_lockreq == CILR_MAYBE) &&
1599                                 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
1600                 ops = &osc_lock_ops;
1601                 if (io->ci_lockreq == CILR_NEVER ||
1602                     /* lockless IO */
1603                     (clk->ols_locklessable && osc_object_is_contended(oob)) ||
1604                      /* lockless truncate */
1605                     (io->ci_type == CIT_TRUNC &&
1606                      (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
1607                      osd->od_lockless_truncate)) {
1608                         ops = &osc_lock_lockless_ops;
1609                         oio->oi_lockless     = 1;
1610                         clk->ols_locklessable = 1;
1611                 }
1612
1613                 cl_lock_slice_add(lock, &clk->ols_cl, obj, ops);
1614                 result = 0;
1615         } else
1616                 result = -ENOMEM;
1617         return result;
1618 }
1619
1620
1621 /** @} osc */