Whamcloud - gitweb
LU-884 clio: client in memory checksum
[fs/lustre-release.git] / lustre / osc / osc_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  *
32  * Copyright (c) 2011 Whamcloud, Inc.
33  *
34  */
35 /*
36  * This file is part of Lustre, http://www.lustre.org/
37  * Lustre is a trademark of Sun Microsystems, Inc.
38  *
39  * Implementation of cl_lock for OSC layer.
40  *
41  *   Author: Nikita Danilov <nikita.danilov@sun.com>
42  */
43
44 #define DEBUG_SUBSYSTEM S_OSC
45
46 #ifdef __KERNEL__
47 # include <libcfs/libcfs.h>
48 #else
49 # include <liblustre.h>
50 #endif
51 /* fid_build_reg_res_name() */
52 #include <lustre_fid.h>
53
54 #include "osc_cl_internal.h"
55
56 /** \addtogroup osc 
57  *  @{ 
58  */
59
60 #define _PAGEREF_MAGIC  (-10000000)
61
62 /*****************************************************************************
63  *
64  * Type conversions.
65  *
66  */
67
68 static const struct cl_lock_operations osc_lock_ops;
69 static const struct cl_lock_operations osc_lock_lockless_ops;
70 static void osc_lock_to_lockless(const struct lu_env *env,
71                                  struct osc_lock *ols, int force);
72 static int osc_lock_has_pages(struct osc_lock *olck);
73
74 int osc_lock_is_lockless(const struct osc_lock *olck)
75 {
76         return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
77 }
78
79 /**
80  * Returns a weak pointer to the ldlm lock identified by a handle. Returned
81  * pointer cannot be dereferenced, as lock is not protected from concurrent
82  * reclaim. This function is a helper for osc_lock_invariant().
83  */
84 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
85 {
86         struct ldlm_lock *lock;
87
88         lock = ldlm_handle2lock(handle);
89         if (lock != NULL)
90                 LDLM_LOCK_PUT(lock);
91         return lock;
92 }
93
94 /**
95  * Invariant that has to be true all of the time.
96  */
97 static int osc_lock_invariant(struct osc_lock *ols)
98 {
99         struct ldlm_lock *lock        = osc_handle_ptr(&ols->ols_handle);
100         struct ldlm_lock *olock       = ols->ols_lock;
101         int               handle_used = lustre_handle_is_used(&ols->ols_handle);
102
103         return
104                 ergo(osc_lock_is_lockless(ols),
105                      ols->ols_locklessable && ols->ols_lock == NULL)  ||
106                 (ergo(olock != NULL, handle_used) &&
107                  ergo(olock != NULL,
108                       olock->l_handle.h_cookie == ols->ols_handle.cookie) &&
109                  /*
110                   * Check that ->ols_handle and ->ols_lock are consistent, but
111                   * take into account that they are set at the different time.
112                   */
113                  ergo(handle_used,
114                       ergo(lock != NULL && olock != NULL, lock == olock) &&
115                       ergo(lock == NULL, olock == NULL)) &&
116                  ergo(ols->ols_state == OLS_CANCELLED,
117                       olock == NULL && !handle_used) &&
118                  /*
119                   * DLM lock is destroyed only after we have seen cancellation
120                   * ast.
121                   */
122                  ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
123                       !olock->l_destroyed) &&
124                  ergo(ols->ols_state == OLS_GRANTED,
125                       olock != NULL &&
126                       olock->l_req_mode == olock->l_granted_mode &&
127                       ols->ols_hold));
128 }
129
130 /*****************************************************************************
131  *
132  * Lock operations.
133  *
134  */
135
136 /**
137  * Breaks a link between osc_lock and dlm_lock.
138  */
139 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
140 {
141         struct ldlm_lock *dlmlock;
142
143         cfs_spin_lock(&osc_ast_guard);
144         dlmlock = olck->ols_lock;
145         if (dlmlock == NULL) {
146                 cfs_spin_unlock(&osc_ast_guard);
147                 return;
148         }
149
150         olck->ols_lock = NULL;
151         /* wb(); --- for all who checks (ols->ols_lock != NULL) before
152          * call to osc_lock_detach() */
153         dlmlock->l_ast_data = NULL;
154         olck->ols_handle.cookie = 0ULL;
155         cfs_spin_unlock(&osc_ast_guard);
156
157         lock_res_and_lock(dlmlock);
158         if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
159                 struct cl_object *obj = olck->ols_cl.cls_obj;
160                 struct cl_attr *attr  = &osc_env_info(env)->oti_attr;
161                 __u64 old_kms;
162
163                 cl_object_attr_lock(obj);
164                 /* Must get the value under the lock to avoid possible races. */
165                 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
166                 /* Update the kms. Need to loop all granted locks.
167                  * Not a problem for the client */
168                 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
169
170                 cl_object_attr_set(env, obj, attr, CAT_KMS);
171                 cl_object_attr_unlock(obj);
172         }
173         unlock_res_and_lock(dlmlock);
174
175         /* release a reference taken in osc_lock_upcall0(). */
176         LASSERT(olck->ols_has_ref);
177         lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
178         LDLM_LOCK_RELEASE(dlmlock);
179         olck->ols_has_ref = 0;
180 }
181
182 static int osc_lock_unhold(struct osc_lock *ols)
183 {
184         int result = 0;
185
186         if (ols->ols_hold) {
187                 ols->ols_hold = 0;
188                 result = osc_cancel_base(&ols->ols_handle,
189                                          ols->ols_einfo.ei_mode);
190         }
191         return result;
192 }
193
194 static int osc_lock_unuse(const struct lu_env *env,
195                           const struct cl_lock_slice *slice)
196 {
197         struct osc_lock *ols = cl2osc_lock(slice);
198
199         LASSERT(ols->ols_state == OLS_GRANTED ||
200                 ols->ols_state == OLS_UPCALL_RECEIVED);
201         LINVRNT(osc_lock_invariant(ols));
202
203         if (ols->ols_glimpse) {
204                 LASSERT(ols->ols_hold == 0);
205                 return 0;
206         }
207         LASSERT(ols->ols_hold);
208
209         /*
210          * Move lock into OLS_RELEASED state before calling osc_cancel_base()
211          * so that possible synchronous cancellation (that always happens
212          * e.g., for liblustre) sees that lock is released.
213          */
214         ols->ols_state = OLS_RELEASED;
215         return osc_lock_unhold(ols);
216 }
217
218 static void osc_lock_fini(const struct lu_env *env,
219                           struct cl_lock_slice *slice)
220 {
221         struct osc_lock  *ols = cl2osc_lock(slice);
222
223         LINVRNT(osc_lock_invariant(ols));
224         /*
225          * ->ols_hold can still be true at this point if, for example, a
226          * thread that requested a lock was killed (and released a reference
227          * to the lock), before reply from a server was received. In this case
228          * lock is destroyed immediately after upcall.
229          */
230         osc_lock_unhold(ols);
231         LASSERT(ols->ols_lock == NULL);
232         LASSERT(cfs_atomic_read(&ols->ols_pageref) == 0 ||
233                 cfs_atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
234
235         OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
236 }
237
238 void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj,
239                         struct ldlm_res_id *resname)
240 {
241         const struct lu_fid *fid = lu_object_fid(&obj->oo_cl.co_lu);
242         if (0) {
243                 /*
244                  * In the perfect world of the future, where ost servers talk
245                  * idif-fids...
246                  */
247                 fid_build_reg_res_name(fid, resname);
248         } else {
249                 /*
250                  * In reality, where ost server expects ->lsm_object_id and
251                  * ->lsm_object_seq in rename.
252                  */
253                 osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_seq,
254                                    resname);
255         }
256 }
257
258 static void osc_lock_build_policy(const struct lu_env *env,
259                                   const struct cl_lock *lock,
260                                   ldlm_policy_data_t *policy)
261 {
262         const struct cl_lock_descr *d = &lock->cll_descr;
263
264         osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
265         policy->l_extent.gid = d->cld_gid;
266 }
267
268 static int osc_enq2ldlm_flags(__u32 enqflags)
269 {
270         int result = 0;
271
272         LASSERT((enqflags & ~CEF_MASK) == 0);
273
274         if (enqflags & CEF_NONBLOCK)
275                 result |= LDLM_FL_BLOCK_NOWAIT;
276         if (enqflags & CEF_ASYNC)
277                 result |= LDLM_FL_HAS_INTENT;
278         if (enqflags & CEF_DISCARD_DATA)
279                 result |= LDLM_AST_DISCARD_DATA;
280         return result;
281 }
282
283 /**
284  * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
285  * pointers. Initialized in osc_init().
286  */
287 cfs_spinlock_t osc_ast_guard;
288
289 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
290 {
291         struct osc_lock *olck;
292
293         lock_res_and_lock(dlm_lock);
294         cfs_spin_lock(&osc_ast_guard);
295         olck = dlm_lock->l_ast_data;
296         if (olck != NULL) {
297                 struct cl_lock *lock = olck->ols_cl.cls_lock;
298                 /*
299                  * If osc_lock holds a reference on ldlm lock, return it even
300                  * when cl_lock is in CLS_FREEING state. This way
301                  *
302                  *         osc_ast_data_get(dlmlock) == NULL
303                  *
304                  * guarantees that all osc references on dlmlock were
305                  * released. osc_dlm_blocking_ast0() relies on that.
306                  */
307                 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
308                         cl_lock_get_trust(lock);
309                         lu_ref_add_atomic(&lock->cll_reference,
310                                           "ast", cfs_current());
311                 } else
312                         olck = NULL;
313         }
314         cfs_spin_unlock(&osc_ast_guard);
315         unlock_res_and_lock(dlm_lock);
316         return olck;
317 }
318
319 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
320 {
321         struct cl_lock *lock;
322
323         lock = olck->ols_cl.cls_lock;
324         lu_ref_del(&lock->cll_reference, "ast", cfs_current());
325         cl_lock_put(env, lock);
326 }
327
328 /**
329  * Updates object attributes from a lock value block (lvb) received together
330  * with the DLM lock reply from the server. Copy of osc_update_enqueue()
331  * logic.
332  *
333  * This can be optimized to not update attributes when lock is a result of a
334  * local match.
335  *
336  * Called under lock and resource spin-locks.
337  */
338 static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
339                                 int rc)
340 {
341         struct ost_lvb    *lvb;
342         struct cl_object  *obj;
343         struct lov_oinfo  *oinfo;
344         struct cl_attr    *attr;
345         unsigned           valid;
346
347         ENTRY;
348
349         if (!(olck->ols_flags & LDLM_FL_LVB_READY)) {
350                 EXIT;
351                 return;
352         }
353
354         lvb   = &olck->ols_lvb;
355         obj   = olck->ols_cl.cls_obj;
356         oinfo = cl2osc(obj)->oo_oinfo;
357         attr  = &osc_env_info(env)->oti_attr;
358         valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
359         cl_lvb2attr(attr, lvb);
360
361         cl_object_attr_lock(obj);
362         if (rc == 0) {
363                 struct ldlm_lock  *dlmlock;
364                 __u64 size;
365
366                 dlmlock = olck->ols_lock;
367                 LASSERT(dlmlock != NULL);
368
369                 /* re-grab LVB from a dlm lock under DLM spin-locks. */
370                 *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
371                 size = lvb->lvb_size;
372                 /* Extend KMS up to the end of this lock and no further
373                  * A lock on [x,y] means a KMS of up to y + 1 bytes! */
374                 if (size > dlmlock->l_policy_data.l_extent.end)
375                         size = dlmlock->l_policy_data.l_extent.end + 1;
376                 if (size >= oinfo->loi_kms) {
377                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
378                                    ", kms="LPU64, lvb->lvb_size, size);
379                         valid |= CAT_KMS;
380                         attr->cat_kms = size;
381                 } else {
382                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
383                                    LPU64"; leaving kms="LPU64", end="LPU64,
384                                    lvb->lvb_size, oinfo->loi_kms,
385                                    dlmlock->l_policy_data.l_extent.end);
386                 }
387                 ldlm_lock_allow_match_locked(dlmlock);
388         } else if (rc == -ENAVAIL && olck->ols_glimpse) {
389                 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
390                        " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
391         } else
392                 valid = 0;
393
394         if (valid != 0)
395                 cl_object_attr_set(env, obj, attr, valid);
396
397         cl_object_attr_unlock(obj);
398
399         EXIT;
400 }
401
402 /**
403  * Called when a lock is granted, from an upcall (when server returned a
404  * granted lock), or from completion AST, when server returned a blocked lock.
405  *
406  * Called under lock and resource spin-locks, that are released temporarily
407  * here.
408  */
409 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
410                              struct ldlm_lock *dlmlock, int rc)
411 {
412         struct ldlm_extent   *ext;
413         struct cl_lock       *lock;
414         struct cl_lock_descr *descr;
415
416         LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
417
418         ENTRY;
419         if (olck->ols_state < OLS_GRANTED) {
420                 lock  = olck->ols_cl.cls_lock;
421                 ext   = &dlmlock->l_policy_data.l_extent;
422                 descr = &osc_env_info(env)->oti_descr;
423                 descr->cld_obj = lock->cll_descr.cld_obj;
424
425                 /* XXX check that ->l_granted_mode is valid. */
426                 descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
427                 descr->cld_start = cl_index(descr->cld_obj, ext->start);
428                 descr->cld_end   = cl_index(descr->cld_obj, ext->end);
429                 descr->cld_gid   = ext->gid;
430                 /*
431                  * tell upper layers the extent of the lock that was actually
432                  * granted
433                  */
434                 olck->ols_state = OLS_GRANTED;
435                 osc_lock_lvb_update(env, olck, rc);
436
437                 /* release DLM spin-locks to allow cl_lock_{modify,signal}()
438                  * to take a semaphore on a parent lock. This is safe, because
439                  * spin-locks are needed to protect consistency of
440                  * dlmlock->l_*_mode and LVB, and we have finished processing
441                  * them. */
442                 unlock_res_and_lock(dlmlock);
443                 cl_lock_modify(env, lock, descr);
444                 cl_lock_signal(env, lock);
445                 LINVRNT(osc_lock_invariant(olck));
446                 lock_res_and_lock(dlmlock);
447         }
448         EXIT;
449 }
450
451 static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
452
453 {
454         struct ldlm_lock *dlmlock;
455
456         ENTRY;
457
458         dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
459         LASSERT(dlmlock != NULL);
460
461         lock_res_and_lock(dlmlock);
462         cfs_spin_lock(&osc_ast_guard);
463         LASSERT(dlmlock->l_ast_data == olck);
464         LASSERT(olck->ols_lock == NULL);
465         olck->ols_lock = dlmlock;
466         cfs_spin_unlock(&osc_ast_guard);
467
468         /*
469          * Lock might be not yet granted. In this case, completion ast
470          * (osc_ldlm_completion_ast()) comes later and finishes lock
471          * granting.
472          */
473         if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
474                 osc_lock_granted(env, olck, dlmlock, 0);
475         unlock_res_and_lock(dlmlock);
476
477         /*
478          * osc_enqueue_interpret() decrefs asynchronous locks, counter
479          * this.
480          */
481         ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
482         olck->ols_hold = 1;
483
484         /* lock reference taken by ldlm_handle2lock_long() is owned by
485          * osc_lock and released in osc_lock_detach() */
486         lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
487         olck->ols_has_ref = 1;
488 }
489
490 /**
491  * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
492  * received from a server, or after osc_enqueue_base() matched a local DLM
493  * lock.
494  */
495 static int osc_lock_upcall(void *cookie, int errcode)
496 {
497         struct osc_lock         *olck  = cookie;
498         struct cl_lock_slice    *slice = &olck->ols_cl;
499         struct cl_lock          *lock  = slice->cls_lock;
500         struct lu_env           *env;
501         struct cl_env_nest       nest;
502
503         ENTRY;
504         env = cl_env_nested_get(&nest);
505         if (!IS_ERR(env)) {
506                 int rc;
507
508                 cl_lock_mutex_get(env, lock);
509
510                 LASSERT(lock->cll_state >= CLS_QUEUING);
511                 if (olck->ols_state == OLS_ENQUEUED) {
512                         olck->ols_state = OLS_UPCALL_RECEIVED;
513                         rc = ldlm_error2errno(errcode);
514                 } else if (olck->ols_state == OLS_CANCELLED) {
515                         rc = -EIO;
516                 } else {
517                         CERROR("Impossible state: %d\n", olck->ols_state);
518                         LBUG();
519                 }
520                 if (rc) {
521                         struct ldlm_lock *dlmlock;
522
523                         dlmlock = ldlm_handle2lock(&olck->ols_handle);
524                         if (dlmlock != NULL) {
525                                 lock_res_and_lock(dlmlock);
526                                 cfs_spin_lock(&osc_ast_guard);
527                                 LASSERT(olck->ols_lock == NULL);
528                                 dlmlock->l_ast_data = NULL;
529                                 olck->ols_handle.cookie = 0ULL;
530                                 cfs_spin_unlock(&osc_ast_guard);
531                                 unlock_res_and_lock(dlmlock);
532                                 LDLM_LOCK_PUT(dlmlock);
533                         }
534                 } else {
535                         if (olck->ols_glimpse)
536                                 olck->ols_glimpse = 0;
537                         osc_lock_upcall0(env, olck);
538                 }
539
540                 /* Error handling, some errors are tolerable. */
541                 if (olck->ols_locklessable && rc == -EUSERS) {
542                         /* This is a tolerable error, turn this lock into
543                          * lockless lock.
544                          */
545                         osc_object_set_contended(cl2osc(slice->cls_obj));
546                         LASSERT(slice->cls_ops == &osc_lock_ops);
547
548                         /* Change this lock to ldlmlock-less lock. */
549                         osc_lock_to_lockless(env, olck, 1);
550                         olck->ols_state = OLS_GRANTED;
551                         rc = 0;
552                 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
553                         osc_lock_lvb_update(env, olck, rc);
554                         cl_lock_delete(env, lock);
555                         /* Hide the error. */
556                         rc = 0;
557                 }
558
559                 if (rc == 0)
560                         /* on error, lock was signaled by cl_lock_error() */
561                         cl_lock_signal(env, lock);
562                 else
563                         cl_lock_error(env, lock, rc);
564
565                 cl_lock_mutex_put(env, lock);
566
567                 /* release cookie reference, acquired by osc_lock_enqueue() */
568                 lu_ref_del(&lock->cll_reference, "upcall", lock);
569                 cl_lock_put(env, lock);
570                 cl_env_nested_put(&nest, env);
571         } else
572                 /* should never happen, similar to osc_ldlm_blocking_ast(). */
573                 LBUG();
574         RETURN(errcode);
575 }
576
577 /**
578  * Core of osc_dlm_blocking_ast() logic.
579  */
580 static void osc_lock_blocking(const struct lu_env *env,
581                               struct ldlm_lock *dlmlock,
582                               struct osc_lock *olck, int blocking)
583 {
584         struct cl_lock *lock = olck->ols_cl.cls_lock;
585
586         LASSERT(olck->ols_lock == dlmlock);
587         CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
588         LASSERT(!osc_lock_is_lockless(olck));
589
590         /*
591          * Lock might be still addref-ed here, if e.g., blocking ast
592          * is sent for a failed lock.
593          */
594         osc_lock_unhold(olck);
595
596         if (blocking && olck->ols_state < OLS_BLOCKED)
597                 /*
598                  * Move osc_lock into OLS_BLOCKED before canceling the lock,
599                  * because it recursively re-enters osc_lock_blocking(), with
600                  * the state set to OLS_CANCELLED.
601                  */
602                 olck->ols_state = OLS_BLOCKED;
603         /*
604          * cancel and destroy lock at least once no matter how blocking ast is
605          * entered (see comment above osc_ldlm_blocking_ast() for use
606          * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
607          */
608         cl_lock_cancel(env, lock);
609         cl_lock_delete(env, lock);
610 }
611
612 /**
613  * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
614  * and ldlm_lock caches.
615  */
616 static int osc_dlm_blocking_ast0(const struct lu_env *env,
617                                  struct ldlm_lock *dlmlock,
618                                  void *data, int flag)
619 {
620         struct osc_lock *olck;
621         struct cl_lock  *lock;
622         int result;
623         int cancel;
624
625         LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
626
627         cancel = 0;
628         olck = osc_ast_data_get(dlmlock);
629         if (olck != NULL) {
630                 lock = olck->ols_cl.cls_lock;
631                 cl_lock_mutex_get(env, lock);
632                 LINVRNT(osc_lock_invariant(olck));
633                 if (olck->ols_ast_wait) {
634                         /* wake up osc_lock_use() */
635                         cl_lock_signal(env, lock);
636                         olck->ols_ast_wait = 0;
637                 }
638                 /*
639                  * Lock might have been canceled while this thread was
640                  * sleeping for lock mutex, but olck is pinned in memory.
641                  */
642                 if (olck == dlmlock->l_ast_data) {
643                         /*
644                          * NOTE: DLM sends blocking AST's for failed locks
645                          *       (that are still in pre-OLS_GRANTED state)
646                          *       too, and they have to be canceled otherwise
647                          *       DLM lock is never destroyed and stuck in
648                          *       the memory.
649                          *
650                          *       Alternatively, ldlm_cli_cancel() can be
651                          *       called here directly for osc_locks with
652                          *       ols_state < OLS_GRANTED to maintain an
653                          *       invariant that ->clo_cancel() is only called
654                          *       for locks that were granted.
655                          */
656                         LASSERT(data == olck);
657                         osc_lock_blocking(env, dlmlock,
658                                           olck, flag == LDLM_CB_BLOCKING);
659                 } else
660                         cancel = 1;
661                 cl_lock_mutex_put(env, lock);
662                 osc_ast_data_put(env, olck);
663         } else
664                 /*
665                  * DLM lock exists, but there is no cl_lock attached to it.
666                  * This is a `normal' race. cl_object and its cl_lock's can be
667                  * removed by memory pressure, together with all pages.
668                  */
669                 cancel = (flag == LDLM_CB_BLOCKING);
670
671         if (cancel) {
672                 struct lustre_handle *lockh;
673
674                 lockh = &osc_env_info(env)->oti_handle;
675                 ldlm_lock2handle(dlmlock, lockh);
676                 result = ldlm_cli_cancel(lockh);
677         } else
678                 result = 0;
679         return result;
680 }
681
682 /**
683  * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
684  * some other lock, or is canceled. This function is installed as a
685  * ldlm_lock::l_blocking_ast() for client extent locks.
686  *
687  * Control flow is tricky, because ldlm uses the same call-back
688  * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
689  *
690  * \param dlmlock lock for which ast occurred.
691  *
692  * \param new description of a conflicting lock in case of blocking ast.
693  *
694  * \param data value of dlmlock->l_ast_data
695  *
696  * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
697  *             cancellation and blocking ast's.
698  *
699  * Possible use cases:
700  *
701  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
702  *       lock due to lock lru pressure, or explicit user request to purge
703  *       locks.
704  *
705  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
706  *       us that dlmlock conflicts with another lock that some client is
707  *       enqueing. Lock is canceled.
708  *
709  *           - cl_lock_cancel() is called. osc_lock_cancel() calls
710  *             ldlm_cli_cancel() that calls
711  *
712  *                  dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
713  *
714  *             recursively entering osc_ldlm_blocking_ast().
715  *
716  *     - client cancels lock voluntary (e.g., as a part of early cancellation):
717  *
718  *           cl_lock_cancel()->
719  *             osc_lock_cancel()->
720  *               ldlm_cli_cancel()->
721  *                 dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
722  *
723  */
724 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
725                                  struct ldlm_lock_desc *new, void *data,
726                                  int flag)
727 {
728         struct lu_env     *env;
729         struct cl_env_nest nest;
730         int                result;
731
732         /*
733          * This can be called in the context of outer IO, e.g.,
734          *
735          *     cl_enqueue()->...
736          *       ->osc_enqueue_base()->...
737          *         ->ldlm_prep_elc_req()->...
738          *           ->ldlm_cancel_callback()->...
739          *             ->osc_ldlm_blocking_ast()
740          *
741          * new environment has to be created to not corrupt outer context.
742          */
743         env = cl_env_nested_get(&nest);
744         if (!IS_ERR(env)) {
745                 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
746                 cl_env_nested_put(&nest, env);
747         } else {
748                 result = PTR_ERR(env);
749                 /*
750                  * XXX This should never happen, as cl_lock is
751                  * stuck. Pre-allocated environment a la vvp_inode_fini_env
752                  * should be used.
753                  */
754                 LBUG();
755         }
756         if (result != 0) {
757                 if (result == -ENODATA)
758                         result = 0;
759                 else
760                         CERROR("BAST failed: %d\n", result);
761         }
762         return result;
763 }
764
765 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
766                                    int flags, void *data)
767 {
768         struct cl_env_nest nest;
769         struct lu_env     *env;
770         struct osc_lock   *olck;
771         struct cl_lock    *lock;
772         int result;
773         int dlmrc;
774
775         /* first, do dlm part of the work */
776         dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
777         /* then, notify cl_lock */
778         env = cl_env_nested_get(&nest);
779         if (!IS_ERR(env)) {
780                 olck = osc_ast_data_get(dlmlock);
781                 if (olck != NULL) {
782                         lock = olck->ols_cl.cls_lock;
783                         cl_lock_mutex_get(env, lock);
784                         /*
785                          * ldlm_handle_cp_callback() copied LVB from request
786                          * to lock->l_lvb_data, store it in osc_lock.
787                          */
788                         LASSERT(dlmlock->l_lvb_data != NULL);
789                         lock_res_and_lock(dlmlock);
790                         olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
791                         if (olck->ols_lock == NULL) {
792                                 /*
793                                  * upcall (osc_lock_upcall()) hasn't yet been
794                                  * called. Do nothing now, upcall will bind
795                                  * olck to dlmlock and signal the waiters.
796                                  *
797                                  * This maintains an invariant that osc_lock
798                                  * and ldlm_lock are always bound when
799                                  * osc_lock is in OLS_GRANTED state.
800                                  */
801                         } else if (dlmlock->l_granted_mode ==
802                                    dlmlock->l_req_mode) {
803                                 osc_lock_granted(env, olck, dlmlock, dlmrc);
804                         }
805                         unlock_res_and_lock(dlmlock);
806
807                         if (dlmrc != 0) {
808                                 CL_LOCK_DEBUG(D_ERROR, env, lock,
809                                               "dlmlock returned %d\n", dlmrc);
810                                 cl_lock_error(env, lock, dlmrc);
811                         }
812                         cl_lock_mutex_put(env, lock);
813                         osc_ast_data_put(env, olck);
814                         result = 0;
815                 } else
816                         result = -ELDLM_NO_LOCK_DATA;
817                 cl_env_nested_put(&nest, env);
818         } else
819                 result = PTR_ERR(env);
820         return dlmrc ?: result;
821 }
822
823 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
824 {
825         struct ptlrpc_request  *req  = data;
826         struct osc_lock        *olck;
827         struct cl_lock         *lock;
828         struct cl_object       *obj;
829         struct cl_env_nest      nest;
830         struct lu_env          *env;
831         struct ost_lvb         *lvb;
832         struct req_capsule     *cap;
833         int                     result;
834
835         LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
836
837         env = cl_env_nested_get(&nest);
838         if (!IS_ERR(env)) {
839                 /*
840                  * osc_ast_data_get() has to go after environment is
841                  * allocated, because osc_ast_data() acquires a
842                  * reference to a lock, and it can only be released in
843                  * environment.
844                  */
845                 olck = osc_ast_data_get(dlmlock);
846                 if (olck != NULL) {
847                         lock = olck->ols_cl.cls_lock;
848                         cl_lock_mutex_get(env, lock);
849                         cap = &req->rq_pill;
850                         req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
851                         req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
852                                              sizeof *lvb);
853                         result = req_capsule_server_pack(cap);
854                         if (result == 0) {
855                                 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
856                                 obj = lock->cll_descr.cld_obj;
857                                 result = cl_object_glimpse(env, obj, lvb);
858                         }
859                         cl_lock_mutex_put(env, lock);
860                         osc_ast_data_put(env, olck);
861                 } else {
862                         /*
863                          * These errors are normal races, so we don't want to
864                          * fill the console with messages by calling
865                          * ptlrpc_error()
866                          */
867                         lustre_pack_reply(req, 1, NULL, NULL);
868                         result = -ELDLM_NO_LOCK_DATA;
869                 }
870                 cl_env_nested_put(&nest, env);
871         } else
872                 result = PTR_ERR(env);
873         req->rq_status = result;
874         return result;
875 }
876
877 static unsigned long osc_lock_weigh(const struct lu_env *env,
878                                     const struct cl_lock_slice *slice)
879 {
880         /*
881          * don't need to grab coh_page_guard since we don't care the exact #
882          * of pages..
883          */
884         return cl_object_header(slice->cls_obj)->coh_pages;
885 }
886
887 /**
888  * Get the weight of dlm lock for early cancellation.
889  *
890  * XXX: it should return the pages covered by this \a dlmlock.
891  */
892 static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
893 {
894         struct cl_env_nest       nest;
895         struct lu_env           *env;
896         struct osc_lock         *lock;
897         struct cl_lock          *cll;
898         unsigned long            weight;
899         ENTRY;
900
901         cfs_might_sleep();
902         /*
903          * osc_ldlm_weigh_ast has a complex context since it might be called
904          * because of lock canceling, or from user's input. We have to make
905          * a new environment for it. Probably it is implementation safe to use
906          * the upper context because cl_lock_put don't modify environment
907          * variables. But in case of ..
908          */
909         env = cl_env_nested_get(&nest);
910         if (IS_ERR(env))
911                 /* Mostly because lack of memory, tend to eliminate this lock*/
912                 RETURN(0);
913
914         LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
915         lock = osc_ast_data_get(dlmlock);
916         if (lock == NULL) {
917                 /* cl_lock was destroyed because of memory pressure.
918                  * It is much reasonable to assign this type of lock
919                  * a lower cost.
920                  */
921                 GOTO(out, weight = 0);
922         }
923
924         cll = lock->ols_cl.cls_lock;
925         cl_lock_mutex_get(env, cll);
926         weight = cl_lock_weigh(env, cll);
927         cl_lock_mutex_put(env, cll);
928         osc_ast_data_put(env, lock);
929         EXIT;
930
931 out:
932         cl_env_nested_put(&nest, env);
933         return weight;
934 }
935
936 static void osc_lock_build_einfo(const struct lu_env *env,
937                                  const struct cl_lock *clock,
938                                  struct osc_lock *lock,
939                                  struct ldlm_enqueue_info *einfo)
940 {
941         enum cl_lock_mode mode;
942
943         mode = clock->cll_descr.cld_mode;
944         if (mode == CLM_PHANTOM)
945                 /*
946                  * For now, enqueue all glimpse locks in read mode. In the
947                  * future, client might choose to enqueue LCK_PW lock for
948                  * glimpse on a file opened for write.
949                  */
950                 mode = CLM_READ;
951
952         einfo->ei_type   = LDLM_EXTENT;
953         einfo->ei_mode   = osc_cl_lock2ldlm(mode);
954         einfo->ei_cb_bl  = osc_ldlm_blocking_ast;
955         einfo->ei_cb_cp  = osc_ldlm_completion_ast;
956         einfo->ei_cb_gl  = osc_ldlm_glimpse_ast;
957         einfo->ei_cb_wg  = osc_ldlm_weigh_ast;
958         einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
959 }
960
961 /**
962  * Determine if the lock should be converted into a lockless lock.
963  *
964  * Steps to check:
965  * - if the lock has an explicite requirment for a non-lockless lock;
966  * - if the io lock request type ci_lockreq;
967  * - send the enqueue rpc to ost to make the further decision;
968  * - special treat to truncate lockless lock
969  *
970  *  Additional policy can be implemented here, e.g., never do lockless-io
971  *  for large extents.
972  */
973 static void osc_lock_to_lockless(const struct lu_env *env,
974                                  struct osc_lock *ols, int force)
975 {
976         struct cl_lock_slice *slice = &ols->ols_cl;
977         struct cl_lock *lock        = slice->cls_lock;
978
979         LASSERT(ols->ols_state == OLS_NEW ||
980                 ols->ols_state == OLS_UPCALL_RECEIVED);
981
982         if (force) {
983                 ols->ols_locklessable = 1;
984                 LASSERT(cl_lock_is_mutexed(lock));
985                 slice->cls_ops = &osc_lock_lockless_ops;
986         } else {
987                 struct osc_io *oio     = osc_env_io(env);
988                 struct cl_io  *io      = oio->oi_cl.cis_io;
989                 struct cl_object *obj  = slice->cls_obj;
990                 struct osc_object *oob = cl2osc(obj);
991                 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
992                 struct obd_connect_data *ocd;
993
994                 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
995                         io->ci_lockreq == CILR_MAYBE ||
996                         io->ci_lockreq == CILR_NEVER);
997
998                 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
999                 ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
1000                                 (io->ci_lockreq == CILR_MAYBE) &&
1001                                 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
1002                 if (io->ci_lockreq == CILR_NEVER ||
1003                         /* lockless IO */
1004                     (ols->ols_locklessable && osc_object_is_contended(oob)) ||
1005                         /* lockless truncate */
1006                     (cl_io_is_trunc(io) &&
1007                      (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
1008                       osd->od_lockless_truncate)) {
1009                         ols->ols_locklessable = 1;
1010                         slice->cls_ops = &osc_lock_lockless_ops;
1011                 }
1012         }
1013         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1014 }
1015
1016 static int osc_lock_compatible(const struct osc_lock *qing,
1017                                const struct osc_lock *qed)
1018 {
1019         enum cl_lock_mode qing_mode;
1020         enum cl_lock_mode qed_mode;
1021
1022         qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode;
1023         if (qed->ols_glimpse &&
1024             (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ))
1025                 return 1;
1026
1027         qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode;
1028         return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
1029 }
1030
1031 /**
1032  * Cancel all conflicting locks and wait for them to be destroyed.
1033  *
1034  * This function is used for two purposes:
1035  *
1036  *     - early cancel all conflicting locks before starting IO, and
1037  *
1038  *     - guarantee that pages added to the page cache by lockless IO are never
1039  *       covered by locks other than lockless IO lock, and, hence, are not
1040  *       visible to other threads.
1041  */
1042 static int osc_lock_enqueue_wait(const struct lu_env *env,
1043                                  const struct osc_lock *olck)
1044 {
1045         struct cl_lock          *lock    = olck->ols_cl.cls_lock;
1046         struct cl_lock_descr    *descr   = &lock->cll_descr;
1047         struct cl_object_header *hdr     = cl_object_header(descr->cld_obj);
1048         struct cl_lock          *scan;
1049         struct cl_lock          *conflict= NULL;
1050         int lockless                     = osc_lock_is_lockless(olck);
1051         int rc                           = 0;
1052         ENTRY;
1053
1054         LASSERT(cl_lock_is_mutexed(lock));
1055         LASSERT(lock->cll_state == CLS_QUEUING);
1056
1057         /* make it enqueue anyway for glimpse lock, because we actually
1058          * don't need to cancel any conflicting locks. */
1059         if (olck->ols_glimpse)
1060                 return 0;
1061
1062         cfs_spin_lock(&hdr->coh_lock_guard);
1063         cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
1064                 struct cl_lock_descr *cld = &scan->cll_descr;
1065                 const struct osc_lock *scan_ols;
1066
1067                 if (scan == lock)
1068                         break;
1069
1070                 if (scan->cll_state < CLS_QUEUING ||
1071                     scan->cll_state == CLS_FREEING ||
1072                     cld->cld_start > descr->cld_end ||
1073                     cld->cld_end < descr->cld_start)
1074                         continue;
1075
1076                 /* overlapped and living locks. */
1077
1078                 /* We're not supposed to give up group lock. */
1079                 if (scan->cll_descr.cld_mode == CLM_GROUP) {
1080                         LASSERT(descr->cld_mode != CLM_GROUP ||
1081                                 descr->cld_gid != scan->cll_descr.cld_gid);
1082                         continue;
1083                 }
1084
1085                 scan_ols = osc_lock_at(scan);
1086
1087                 /* We need to cancel the compatible locks if we're enqueuing
1088                  * a lockless lock, for example:
1089                  * imagine that client has PR lock on [0, 1000], and thread T0
1090                  * is doing lockless IO in [500, 1500] region. Concurrent
1091                  * thread T1 can see lockless data in [500, 1000], which is
1092                  * wrong, because these data are possibly stale. */
1093                 if (!lockless && osc_lock_compatible(olck, scan_ols))
1094                         continue;
1095
1096                 /* Now @scan is conflicting with @lock, this means current
1097                  * thread have to sleep for @scan being destroyed. */
1098                 if (scan_ols->ols_owner == osc_env_io(env)) {
1099                         CERROR("DEADLOCK POSSIBLE!\n");
1100                         CL_LOCK_DEBUG(D_ERROR, env, scan, "queued.\n");
1101                         CL_LOCK_DEBUG(D_ERROR, env, lock, "queuing.\n");
1102                         libcfs_debug_dumpstack(NULL);
1103                 }
1104                 cl_lock_get_trust(scan);
1105                 conflict = scan;
1106                 break;
1107         }
1108         cfs_spin_unlock(&hdr->coh_lock_guard);
1109
1110         if (conflict) {
1111                 if (lock->cll_descr.cld_mode == CLM_GROUP) {
1112                         /* we want a group lock but a previous lock request
1113                          * conflicts, we do not wait but return 0 so the
1114                          * request is send to the server
1115                          */
1116                         CDEBUG(D_DLMTRACE, "group lock %p is conflicted "
1117                                            "with %p, no wait, send to server\n",
1118                                lock, conflict);
1119                         cl_lock_put(env, conflict);
1120                         rc = 0;
1121                 } else {
1122                         CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, "
1123                                            "will wait\n",
1124                                lock, conflict);
1125                         LASSERT(lock->cll_conflict == NULL);
1126                         lu_ref_add(&conflict->cll_reference, "cancel-wait",
1127                                    lock);
1128                         lock->cll_conflict = conflict;
1129                         rc = CLO_WAIT;
1130                 }
1131         }
1132         RETURN(rc);
1133 }
1134
1135 /**
1136  * Implementation of cl_lock_operations::clo_enqueue() method for osc
1137  * layer. This initiates ldlm enqueue:
1138  *
1139  *     - cancels conflicting locks early (osc_lock_enqueue_wait());
1140  *
1141  *     - calls osc_enqueue_base() to do actual enqueue.
1142  *
1143  * osc_enqueue_base() is supplied with an upcall function that is executed
1144  * when lock is received either after a local cached ldlm lock is matched, or
1145  * when a reply from the server is received.
1146  *
1147  * This function does not wait for the network communication to complete.
1148  */
1149 static int osc_lock_enqueue(const struct lu_env *env,
1150                             const struct cl_lock_slice *slice,
1151                             struct cl_io *unused, __u32 enqflags)
1152 {
1153         struct osc_lock          *ols     = cl2osc_lock(slice);
1154         struct cl_lock           *lock    = ols->ols_cl.cls_lock;
1155         int result;
1156         ENTRY;
1157
1158         LASSERT(cl_lock_is_mutexed(lock));
1159         LASSERT(lock->cll_state == CLS_QUEUING);
1160         LASSERT(ols->ols_state == OLS_NEW);
1161
1162         ols->ols_flags = osc_enq2ldlm_flags(enqflags);
1163         if (ols->ols_flags & LDLM_FL_HAS_INTENT)
1164                 ols->ols_glimpse = 1;
1165         if (!osc_lock_is_lockless(ols) && !(enqflags & CEF_MUST))
1166                 /* try to convert this lock to a lockless lock */
1167                 osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
1168
1169         result = osc_lock_enqueue_wait(env, ols);
1170         if (result == 0) {
1171                 if (!osc_lock_is_lockless(ols)) {
1172                         struct osc_object        *obj = cl2osc(slice->cls_obj);
1173                         struct osc_thread_info   *info = osc_env_info(env);
1174                         struct ldlm_res_id       *resname = &info->oti_resname;
1175                         ldlm_policy_data_t       *policy = &info->oti_policy;
1176                         struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
1177
1178                         if (ols->ols_locklessable)
1179                                 ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1180
1181                         /* a reference for lock, passed as an upcall cookie */
1182                         cl_lock_get(lock);
1183                         lu_ref_add(&lock->cll_reference, "upcall", lock);
1184                         ols->ols_state = OLS_ENQUEUED;
1185
1186                         /*
1187                          * XXX: this is possible blocking point as
1188                          * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
1189                          * LDLM_CP_CALLBACK.
1190                          */
1191                         osc_lock_build_res(env, obj, resname);
1192                         osc_lock_build_policy(env, lock, policy);
1193                         result = osc_enqueue_base(osc_export(obj), resname,
1194                                           &ols->ols_flags, policy,
1195                                           &ols->ols_lvb,
1196                                           obj->oo_oinfo->loi_kms_valid,
1197                                           osc_lock_upcall,
1198                                           ols, einfo, &ols->ols_handle,
1199                                           PTLRPCD_SET, 1);
1200                         if (result != 0) {
1201                                 lu_ref_del(&lock->cll_reference,
1202                                            "upcall", lock);
1203                                 cl_lock_put(env, lock);
1204                         }
1205                 } else {
1206                         ols->ols_state = OLS_GRANTED;
1207                         ols->ols_owner = osc_env_io(env);
1208                 }
1209         }
1210         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1211         RETURN(result);
1212 }
1213
1214 static int osc_lock_wait(const struct lu_env *env,
1215                          const struct cl_lock_slice *slice)
1216 {
1217         struct osc_lock *olck = cl2osc_lock(slice);
1218         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1219
1220         LINVRNT(osc_lock_invariant(olck));
1221         if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED)
1222                 return 0;
1223
1224         LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
1225                      lock->cll_error == 0, olck->ols_lock != NULL));
1226
1227         return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
1228 }
1229
1230 /**
1231  * An implementation of cl_lock_operations::clo_use() method that pins cached
1232  * lock.
1233  */
1234 static int osc_lock_use(const struct lu_env *env,
1235                         const struct cl_lock_slice *slice)
1236 {
1237         struct osc_lock *olck = cl2osc_lock(slice);
1238         int rc;
1239
1240         LASSERT(!olck->ols_hold);
1241
1242         /*
1243          * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
1244          * flag is not set. This protects us from a concurrent blocking ast.
1245          */
1246         rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1247         if (rc == 0) {
1248                 olck->ols_hold = 1;
1249                 olck->ols_state = OLS_GRANTED;
1250         } else {
1251                 struct cl_lock *lock;
1252
1253                 /*
1254                  * Lock is being cancelled somewhere within
1255                  * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
1256                  * set, but osc_ldlm_blocking_ast() hasn't yet acquired
1257                  * cl_lock mutex.
1258                  */
1259                 lock = slice->cls_lock;
1260                 LASSERT(lock->cll_state == CLS_INTRANSIT);
1261                 LASSERT(lock->cll_users > 0);
1262                 /* set a flag for osc_dlm_blocking_ast0() to signal the
1263                  * lock.*/
1264                 olck->ols_ast_wait = 1;
1265                 rc = CLO_WAIT;
1266         }
1267         return rc;
1268 }
1269
1270 static int osc_lock_flush(struct osc_lock *ols, int discard)
1271 {
1272         struct cl_lock       *lock  = ols->ols_cl.cls_lock;
1273         struct cl_env_nest    nest;
1274         struct lu_env        *env;
1275         int result = 0;
1276
1277         env = cl_env_nested_get(&nest);
1278         if (!IS_ERR(env)) {
1279                 result = cl_lock_page_out(env, lock, discard);
1280                 cl_env_nested_put(&nest, env);
1281         } else
1282                 result = PTR_ERR(env);
1283         if (result == 0) {
1284                 ols->ols_flush = 1;
1285                 LINVRNT(!osc_lock_has_pages(ols));
1286         }
1287         return result;
1288 }
1289
1290 /**
1291  * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1292  * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1293  * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1294  * with some other lock some where in the cluster. This function does the
1295  * following:
1296  *
1297  *     - invalidates all pages protected by this lock (after sending dirty
1298  *       ones to the server, as necessary);
1299  *
1300  *     - decref's underlying ldlm lock;
1301  *
1302  *     - cancels ldlm lock (ldlm_cli_cancel()).
1303  */
1304 static void osc_lock_cancel(const struct lu_env *env,
1305                             const struct cl_lock_slice *slice)
1306 {
1307         struct cl_lock   *lock    = slice->cls_lock;
1308         struct osc_lock  *olck    = cl2osc_lock(slice);
1309         struct ldlm_lock *dlmlock = olck->ols_lock;
1310         int               result  = 0;
1311         int               discard;
1312
1313         LASSERT(cl_lock_is_mutexed(lock));
1314         LINVRNT(osc_lock_invariant(olck));
1315
1316         if (dlmlock != NULL) {
1317                 int do_cancel;
1318
1319                 discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA);
1320                 result = osc_lock_flush(olck, discard);
1321                 osc_lock_unhold(olck);
1322
1323                 lock_res_and_lock(dlmlock);
1324                 /* Now that we're the only user of dlm read/write reference,
1325                  * mostly the ->l_readers + ->l_writers should be zero.
1326                  * However, there is a corner case.
1327                  * See bug 18829 for details.*/
1328                 do_cancel = (dlmlock->l_readers == 0 &&
1329                              dlmlock->l_writers == 0);
1330                 dlmlock->l_flags |= LDLM_FL_CBPENDING;
1331                 unlock_res_and_lock(dlmlock);
1332                 if (do_cancel)
1333                         result = ldlm_cli_cancel(&olck->ols_handle);
1334                 if (result < 0)
1335                         CL_LOCK_DEBUG(D_ERROR, env, lock,
1336                                       "lock %p cancel failure with error(%d)\n",
1337                                       lock, result);
1338         }
1339         olck->ols_state = OLS_CANCELLED;
1340         osc_lock_detach(env, olck);
1341 }
1342
1343 void cl_lock_page_list_fixup(const struct lu_env *env,
1344                              struct cl_io *io, struct cl_lock *lock,
1345                              struct cl_page_list *queue);
1346
1347 #ifdef INVARIANT_CHECK
1348 /**
1349  * Returns true iff there are pages under \a olck not protected by other
1350  * locks.
1351  */
1352 static int osc_lock_has_pages(struct osc_lock *olck)
1353 {
1354         struct cl_lock       *lock;
1355         struct cl_lock_descr *descr;
1356         struct cl_object     *obj;
1357         struct osc_object    *oob;
1358         struct cl_page_list  *plist;
1359         struct cl_page       *page;
1360         struct cl_env_nest    nest;
1361         struct cl_io         *io;
1362         struct lu_env        *env;
1363         int                   result;
1364
1365         env = cl_env_nested_get(&nest);
1366         if (!IS_ERR(env)) {
1367                 obj   = olck->ols_cl.cls_obj;
1368                 oob   = cl2osc(obj);
1369                 io    = &oob->oo_debug_io;
1370                 lock  = olck->ols_cl.cls_lock;
1371                 descr = &lock->cll_descr;
1372                 plist = &osc_env_info(env)->oti_plist;
1373                 cl_page_list_init(plist);
1374
1375                 cfs_mutex_lock(&oob->oo_debug_mutex);
1376
1377                 io->ci_obj = cl_object_top(obj);
1378                 cl_io_init(env, io, CIT_MISC, io->ci_obj);
1379                 cl_page_gang_lookup(env, obj, io,
1380                                     descr->cld_start, descr->cld_end, plist);
1381                 cl_lock_page_list_fixup(env, io, lock, plist);
1382                 if (plist->pl_nr > 0) {
1383                         CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
1384                         cl_page_list_for_each(page, plist)
1385                                 CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
1386                 }
1387                 result = plist->pl_nr > 0;
1388                 cl_page_list_disown(env, io, plist);
1389                 cl_page_list_fini(env, plist);
1390                 cl_io_fini(env, io);
1391                 cfs_mutex_unlock(&oob->oo_debug_mutex);
1392                 cl_env_nested_put(&nest, env);
1393         } else
1394                 result = 0;
1395         return result;
1396 }
1397 #else
1398 static int osc_lock_has_pages(struct osc_lock *olck)
1399 {
1400         return 0;
1401 }
1402 #endif /* INVARIANT_CHECK */
1403
1404 static void osc_lock_delete(const struct lu_env *env,
1405                             const struct cl_lock_slice *slice)
1406 {
1407         struct osc_lock *olck;
1408
1409         olck = cl2osc_lock(slice);
1410         if (olck->ols_glimpse) {
1411                 LASSERT(!olck->ols_hold);
1412                 LASSERT(!olck->ols_lock);
1413                 return;
1414         }
1415
1416         LINVRNT(osc_lock_invariant(olck));
1417         LINVRNT(!osc_lock_has_pages(olck));
1418
1419         osc_lock_unhold(olck);
1420         osc_lock_detach(env, olck);
1421 }
1422
1423 /**
1424  * Implements cl_lock_operations::clo_state() method for osc layer.
1425  *
1426  * Maintains osc_lock::ols_owner field.
1427  *
1428  * This assumes that lock always enters CLS_HELD (from some other state) in
1429  * the same IO context as one that requested the lock. This should not be a
1430  * problem, because context is by definition shared by all activity pertaining
1431  * to the same high-level IO.
1432  */
1433 static void osc_lock_state(const struct lu_env *env,
1434                            const struct cl_lock_slice *slice,
1435                            enum cl_lock_state state)
1436 {
1437         struct osc_lock *lock = cl2osc_lock(slice);
1438
1439         /*
1440          * XXX multiple io contexts can use the lock at the same time.
1441          */
1442         LINVRNT(osc_lock_invariant(lock));
1443         if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1444                 struct osc_io *oio = osc_env_io(env);
1445
1446                 LASSERT(lock->ols_owner == NULL);
1447                 lock->ols_owner = oio;
1448         } else if (state != CLS_HELD)
1449                 lock->ols_owner = NULL;
1450 }
1451
1452 static int osc_lock_print(const struct lu_env *env, void *cookie,
1453                           lu_printer_t p, const struct cl_lock_slice *slice)
1454 {
1455         struct osc_lock *lock = cl2osc_lock(slice);
1456
1457         /*
1458          * XXX print ldlm lock and einfo properly.
1459          */
1460         (*p)(env, cookie, "%p %08x "LPX64" %d %p ",
1461              lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
1462              lock->ols_state, lock->ols_owner);
1463         osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1464         return 0;
1465 }
1466
1467 static int osc_lock_fits_into(const struct lu_env *env,
1468                               const struct cl_lock_slice *slice,
1469                               const struct cl_lock_descr *need,
1470                               const struct cl_io *io)
1471 {
1472         struct osc_lock *ols = cl2osc_lock(slice);
1473
1474         if (need->cld_enq_flags & CEF_NEVER)
1475                 return 0;
1476
1477         if (need->cld_mode == CLM_PHANTOM) {
1478                 /*
1479                  * Note: the QUEUED lock can't be matched here, otherwise
1480                  * it might cause the deadlocks.
1481                  * In read_process,
1482                  * P1: enqueued read lock, create sublock1
1483                  * P2: enqueued write lock, create sublock2(conflicted
1484                  *     with sublock1).
1485                  * P1: Grant read lock.
1486                  * P1: enqueued glimpse lock(with holding sublock1_read),
1487                  *     matched with sublock2, waiting sublock2 to be granted.
1488                  *     But sublock2 can not be granted, because P1
1489                  *     will not release sublock1. Bang!
1490                  */
1491                 if (ols->ols_state < OLS_GRANTED ||
1492                     ols->ols_state > OLS_RELEASED)
1493                         return 0;
1494         } else if (need->cld_enq_flags & CEF_MUST) {
1495                 /*
1496                  * If the lock hasn't ever enqueued, it can't be matched
1497                  * because enqueue process brings in many information
1498                  * which can be used to determine things such as lockless,
1499                  * CEF_MUST, etc.
1500                  */
1501                 if (ols->ols_state < OLS_UPCALL_RECEIVED &&
1502                     ols->ols_locklessable)
1503                         return 0;
1504         }
1505         return 1;
1506 }
1507
1508 static const struct cl_lock_operations osc_lock_ops = {
1509         .clo_fini    = osc_lock_fini,
1510         .clo_enqueue = osc_lock_enqueue,
1511         .clo_wait    = osc_lock_wait,
1512         .clo_unuse   = osc_lock_unuse,
1513         .clo_use     = osc_lock_use,
1514         .clo_delete  = osc_lock_delete,
1515         .clo_state   = osc_lock_state,
1516         .clo_cancel  = osc_lock_cancel,
1517         .clo_weigh   = osc_lock_weigh,
1518         .clo_print   = osc_lock_print,
1519         .clo_fits_into = osc_lock_fits_into,
1520 };
1521
1522 static int osc_lock_lockless_unuse(const struct lu_env *env,
1523                                    const struct cl_lock_slice *slice)
1524 {
1525         struct osc_lock *ols = cl2osc_lock(slice);
1526         struct cl_lock *lock = slice->cls_lock;
1527
1528         LASSERT(ols->ols_state == OLS_GRANTED);
1529         LINVRNT(osc_lock_invariant(ols));
1530
1531         cl_lock_cancel(env, lock);
1532         cl_lock_delete(env, lock);
1533         return 0;
1534 }
1535
1536 static void osc_lock_lockless_cancel(const struct lu_env *env,
1537                                      const struct cl_lock_slice *slice)
1538 {
1539         struct osc_lock   *ols  = cl2osc_lock(slice);
1540         int result;
1541
1542         result = osc_lock_flush(ols, 0);
1543         if (result)
1544                 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1545                        ols, result);
1546         ols->ols_state = OLS_CANCELLED;
1547 }
1548
1549 static int osc_lock_lockless_wait(const struct lu_env *env,
1550                                   const struct cl_lock_slice *slice)
1551 {
1552         struct osc_lock *olck = cl2osc_lock(slice);
1553         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1554
1555         LINVRNT(osc_lock_invariant(olck));
1556         LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1557
1558         return lock->cll_error;
1559 }
1560
1561 static void osc_lock_lockless_state(const struct lu_env *env,
1562                                     const struct cl_lock_slice *slice,
1563                                     enum cl_lock_state state)
1564 {
1565         struct osc_lock *lock = cl2osc_lock(slice);
1566
1567         LINVRNT(osc_lock_invariant(lock));
1568         if (state == CLS_HELD) {
1569                 struct osc_io *oio  = osc_env_io(env);
1570
1571                 LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio));
1572                 lock->ols_owner = oio;
1573
1574                 /* set the io to be lockless if this lock is for io's
1575                  * host object */
1576                 if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
1577                         oio->oi_lockless = 1;
1578         }
1579 }
1580
1581 static int osc_lock_lockless_fits_into(const struct lu_env *env,
1582                                        const struct cl_lock_slice *slice,
1583                                        const struct cl_lock_descr *need,
1584                                        const struct cl_io *io)
1585 {
1586         struct osc_lock *lock = cl2osc_lock(slice);
1587
1588         if (!(need->cld_enq_flags & CEF_NEVER))
1589                 return 0;
1590
1591         /* lockless lock should only be used by its owning io. b22147 */
1592         return (lock->ols_owner == osc_env_io(env));
1593 }
1594
1595 static const struct cl_lock_operations osc_lock_lockless_ops = {
1596         .clo_fini      = osc_lock_fini,
1597         .clo_enqueue   = osc_lock_enqueue,
1598         .clo_wait      = osc_lock_lockless_wait,
1599         .clo_unuse     = osc_lock_lockless_unuse,
1600         .clo_state     = osc_lock_lockless_state,
1601         .clo_fits_into = osc_lock_lockless_fits_into,
1602         .clo_cancel    = osc_lock_lockless_cancel,
1603         .clo_print     = osc_lock_print
1604 };
1605
1606 int osc_lock_init(const struct lu_env *env,
1607                   struct cl_object *obj, struct cl_lock *lock,
1608                   const struct cl_io *unused)
1609 {
1610         struct osc_lock *clk;
1611         int result;
1612
1613         OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO);
1614         if (clk != NULL) {
1615                 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
1616                 cfs_atomic_set(&clk->ols_pageref, 0);
1617                 clk->ols_state = OLS_NEW;
1618                 cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
1619                 result = 0;
1620         } else
1621                 result = -ENOMEM;
1622         return result;
1623 }
1624
1625 int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
1626 {
1627         struct osc_lock *olock;
1628         int              rc = 0;
1629
1630         cfs_spin_lock(&osc_ast_guard);
1631         olock = dlm->l_ast_data;
1632         /*
1633          * there's a very rare race with osc_page_addref_lock(), but that
1634          * doesn't matter because in the worst case we don't cancel a lock
1635          * which we actually can, that's no harm.
1636          */
1637         if (olock != NULL &&
1638             cfs_atomic_add_return(_PAGEREF_MAGIC,
1639                                   &olock->ols_pageref) != _PAGEREF_MAGIC) {
1640                 cfs_atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
1641                 rc = 1;
1642         }
1643         cfs_spin_unlock(&osc_ast_guard);
1644         return rc;
1645 }
1646
1647 /** @} osc */