Whamcloud - gitweb
b=19906
[fs/lustre-release.git] / lustre / osc / osc_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for OSC layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_OSC
42
43 #ifdef __KERNEL__
44 # include <libcfs/libcfs.h>
45 #else
46 # include <liblustre.h>
47 #endif
48 /* fid_build_reg_res_name() */
49 #include <lustre_fid.h>
50
51 #include "osc_cl_internal.h"
52
53 /** \addtogroup osc 
54  *  @{ 
55  */
56
57 /*****************************************************************************
58  *
59  * Type conversions.
60  *
61  */
62
63 static const struct cl_lock_operations osc_lock_ops;
64 static const struct cl_lock_operations osc_lock_lockless_ops;
65 static void osc_lock_to_lockless(const struct lu_env *env,
66                                  struct osc_lock *ols, int force);
67 static int osc_lock_has_pages(struct osc_lock *olck);
68
69 int osc_lock_is_lockless(const struct osc_lock *olck)
70 {
71         return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
72 }
73
74 /**
75  * Returns a weak pointer to the ldlm lock identified by a handle. Returned
76  * pointer cannot be dereferenced, as lock is not protected from concurrent
77  * reclaim. This function is a helper for osc_lock_invariant().
78  */
79 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
80 {
81         struct ldlm_lock *lock;
82
83         lock = ldlm_handle2lock(handle);
84         if (lock != NULL)
85                 LDLM_LOCK_PUT(lock);
86         return lock;
87 }
88
89 /**
90  * Invariant that has to be true all of the time.
91  */
92 static int osc_lock_invariant(struct osc_lock *ols)
93 {
94         struct ldlm_lock *lock        = osc_handle_ptr(&ols->ols_handle);
95         struct ldlm_lock *olock       = ols->ols_lock;
96         int               handle_used = lustre_handle_is_used(&ols->ols_handle);
97
98         return
99                 ergo(osc_lock_is_lockless(ols),
100                      ols->ols_locklessable && ols->ols_lock == NULL)  ||
101                 (ergo(olock != NULL, handle_used) &&
102                  ergo(olock != NULL,
103                       olock->l_handle.h_cookie == ols->ols_handle.cookie) &&
104                  /*
105                   * Check that ->ols_handle and ->ols_lock are consistent, but
106                   * take into account that they are set at the different time.
107                   */
108                  ergo(handle_used,
109                       ergo(lock != NULL && olock != NULL, lock == olock) &&
110                       ergo(lock == NULL, olock == NULL)) &&
111                  ergo(ols->ols_state == OLS_CANCELLED,
112                       olock == NULL && !handle_used) &&
113                  /*
114                   * DLM lock is destroyed only after we have seen cancellation
115                   * ast.
116                   */
117                  ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
118                       !olock->l_destroyed) &&
119                  ergo(ols->ols_state == OLS_GRANTED,
120                       olock != NULL &&
121                       olock->l_req_mode == olock->l_granted_mode &&
122                       ols->ols_hold));
123 }
124
125 /*****************************************************************************
126  *
127  * Lock operations.
128  *
129  */
130
131 /**
132  * Breaks a link between osc_lock and dlm_lock.
133  */
134 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
135 {
136         struct ldlm_lock *dlmlock;
137
138         spin_lock(&osc_ast_guard);
139         dlmlock = olck->ols_lock;
140         if (dlmlock == NULL) {
141                 spin_unlock(&osc_ast_guard);
142                 return;
143         }
144
145         olck->ols_lock = NULL;
146         /* wb(); --- for all who checks (ols->ols_lock != NULL) before
147          * call to osc_lock_detach() */
148         dlmlock->l_ast_data = NULL;
149         olck->ols_handle.cookie = 0ULL;
150         spin_unlock(&osc_ast_guard);
151
152         lock_res_and_lock(dlmlock);
153         if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
154                 struct cl_object *obj = olck->ols_cl.cls_obj;
155                 struct cl_attr *attr  = &osc_env_info(env)->oti_attr;
156                 __u64 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
157
158                 /* Update the kms. Need to loop all granted locks.
159                  * Not a problem for the client */
160                 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
161                 unlock_res_and_lock(dlmlock);
162
163                 cl_object_attr_lock(obj);
164                 cl_object_attr_set(env, obj, attr, CAT_KMS);
165                 cl_object_attr_unlock(obj);
166         } else
167                 unlock_res_and_lock(dlmlock);
168
169         /* release a reference taken in osc_lock_upcall0(). */
170         LASSERT(olck->ols_has_ref);
171         lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
172         LDLM_LOCK_RELEASE(dlmlock);
173         olck->ols_has_ref = 0;
174 }
175
176 static int osc_lock_unhold(struct osc_lock *ols)
177 {
178         int result = 0;
179
180         if (ols->ols_hold) {
181                 ols->ols_hold = 0;
182                 result = osc_cancel_base(&ols->ols_handle,
183                                          ols->ols_einfo.ei_mode);
184         }
185         return result;
186 }
187
188 static int osc_lock_unuse(const struct lu_env *env,
189                           const struct cl_lock_slice *slice)
190 {
191         struct osc_lock *ols = cl2osc_lock(slice);
192
193         LASSERT(ols->ols_state == OLS_GRANTED ||
194                 ols->ols_state == OLS_UPCALL_RECEIVED);
195         LINVRNT(osc_lock_invariant(ols));
196
197         if (ols->ols_glimpse) {
198                 LASSERT(ols->ols_hold == 0);
199                 return 0;
200         }
201         LASSERT(ols->ols_hold);
202
203         /*
204          * Move lock into OLS_RELEASED state before calling osc_cancel_base()
205          * so that possible synchronous cancellation (that always happens
206          * e.g., for liblustre) sees that lock is released.
207          */
208         ols->ols_state = OLS_RELEASED;
209         return osc_lock_unhold(ols);
210 }
211
212 static void osc_lock_fini(const struct lu_env *env,
213                           struct cl_lock_slice *slice)
214 {
215         struct osc_lock  *ols = cl2osc_lock(slice);
216
217         LINVRNT(osc_lock_invariant(ols));
218         /*
219          * ->ols_hold can still be true at this point if, for example, a
220          * thread that requested a lock was killed (and released a reference
221          * to the lock), before reply from a server was received. In this case
222          * lock is destroyed immediately after upcall.
223          */
224         osc_lock_unhold(ols);
225         LASSERT(ols->ols_lock == NULL);
226
227         OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
228 }
229
230 void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj,
231                         struct ldlm_res_id *resname)
232 {
233         const struct lu_fid *fid = lu_object_fid(&obj->oo_cl.co_lu);
234         if (0) {
235                 /*
236                  * In the perfect world of the future, where ost servers talk
237                  * idif-fids...
238                  */
239                 fid_build_reg_res_name(fid, resname);
240         } else {
241                 /*
242                  * In reality, where ost server expects ->lsm_object_id and
243                  * ->lsm_object_gr in rename.
244                  */
245                 osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_gr,
246                                    resname);
247         }
248 }
249
250 static void osc_lock_build_policy(const struct lu_env *env,
251                                   const struct cl_lock *lock,
252                                   ldlm_policy_data_t *policy)
253 {
254         const struct cl_lock_descr *d = &lock->cll_descr;
255
256         osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
257         policy->l_extent.gid = d->cld_gid;
258 }
259
260 static int osc_enq2ldlm_flags(__u32 enqflags)
261 {
262         int result = 0;
263
264         LASSERT((enqflags & ~CEF_MASK) == 0);
265
266         if (enqflags & CEF_NONBLOCK)
267                 result |= LDLM_FL_BLOCK_NOWAIT;
268         if (enqflags & CEF_ASYNC)
269                 result |= LDLM_FL_HAS_INTENT;
270         if (enqflags & CEF_DISCARD_DATA)
271                 result |= LDLM_AST_DISCARD_DATA;
272         return result;
273 }
274
275 /**
276  * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
277  * pointers. Initialized in osc_init().
278  */
279 spinlock_t osc_ast_guard;
280
281 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
282 {
283         struct osc_lock *olck;
284
285         lock_res_and_lock(dlm_lock);
286         spin_lock(&osc_ast_guard);
287         olck = dlm_lock->l_ast_data;
288         if (olck != NULL) {
289                 struct cl_lock *lock = olck->ols_cl.cls_lock;
290                 /*
291                  * If osc_lock holds a reference on ldlm lock, return it even
292                  * when cl_lock is in CLS_FREEING state. This way
293                  *
294                  *         osc_ast_data_get(dlmlock) == NULL
295                  *
296                  * guarantees that all osc references on dlmlock were
297                  * released. osc_dlm_blocking_ast0() relies on that.
298                  */
299                 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
300                         cl_lock_get_trust(lock);
301                         lu_ref_add_atomic(&lock->cll_reference,
302                                           "ast", cfs_current());
303                 } else
304                         olck = NULL;
305         }
306         spin_unlock(&osc_ast_guard);
307         unlock_res_and_lock(dlm_lock);
308         return olck;
309 }
310
311 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
312 {
313         struct cl_lock *lock;
314
315         lock = olck->ols_cl.cls_lock;
316         lu_ref_del(&lock->cll_reference, "ast", cfs_current());
317         cl_lock_put(env, lock);
318 }
319
320 /**
321  * Updates object attributes from a lock value block (lvb) received together
322  * with the DLM lock reply from the server. Copy of osc_update_enqueue()
323  * logic.
324  *
325  * This can be optimized to not update attributes when lock is a result of a
326  * local match.
327  *
328  * Called under lock and resource spin-locks.
329  */
330 static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
331                                 int rc)
332 {
333         struct ost_lvb    *lvb;
334         struct cl_object  *obj;
335         struct lov_oinfo  *oinfo;
336         struct cl_attr    *attr;
337         unsigned           valid;
338
339         ENTRY;
340
341         if (!(olck->ols_flags & LDLM_FL_LVB_READY)) {
342                 EXIT;
343                 return;
344         }
345
346         lvb   = &olck->ols_lvb;
347         obj   = olck->ols_cl.cls_obj;
348         oinfo = cl2osc(obj)->oo_oinfo;
349         attr  = &osc_env_info(env)->oti_attr;
350         valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
351         cl_lvb2attr(attr, lvb);
352
353         cl_object_attr_lock(obj);
354         if (rc == 0) {
355                 struct ldlm_lock  *dlmlock;
356                 __u64 size;
357
358                 dlmlock = olck->ols_lock;
359                 LASSERT(dlmlock != NULL);
360
361                 /* re-grab LVB from a dlm lock under DLM spin-locks. */
362                 *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
363                 size = lvb->lvb_size;
364                 /* Extend KMS up to the end of this lock and no further
365                  * A lock on [x,y] means a KMS of up to y + 1 bytes! */
366                 if (size > dlmlock->l_policy_data.l_extent.end)
367                         size = dlmlock->l_policy_data.l_extent.end + 1;
368                 if (size >= oinfo->loi_kms) {
369                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
370                                    ", kms="LPU64, lvb->lvb_size, size);
371                         valid |= CAT_KMS;
372                         attr->cat_kms = size;
373                 } else {
374                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
375                                    LPU64"; leaving kms="LPU64", end="LPU64,
376                                    lvb->lvb_size, oinfo->loi_kms,
377                                    dlmlock->l_policy_data.l_extent.end);
378                 }
379                 ldlm_lock_allow_match_locked(dlmlock);
380         } else if (rc == -ENAVAIL && olck->ols_glimpse) {
381                 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
382                        " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
383         } else
384                 valid = 0;
385
386         if (valid != 0)
387                 cl_object_attr_set(env, obj, attr, valid);
388
389         cl_object_attr_unlock(obj);
390
391         EXIT;
392 }
393
394 /**
395  * Called when a lock is granted, from an upcall (when server returned a
396  * granted lock), or from completion AST, when server returned a blocked lock.
397  *
398  * Called under lock and resource spin-locks, that are released temporarily
399  * here.
400  */
401 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
402                              struct ldlm_lock *dlmlock, int rc)
403 {
404         struct ldlm_extent   *ext;
405         struct cl_lock       *lock;
406         struct cl_lock_descr *descr;
407
408         LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
409
410         ENTRY;
411         if (olck->ols_state != OLS_GRANTED) {
412                 lock  = olck->ols_cl.cls_lock;
413                 ext   = &dlmlock->l_policy_data.l_extent;
414                 descr = &osc_env_info(env)->oti_descr;
415                 descr->cld_obj = lock->cll_descr.cld_obj;
416
417                 /* XXX check that ->l_granted_mode is valid. */
418                 descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
419                 descr->cld_start = cl_index(descr->cld_obj, ext->start);
420                 descr->cld_end   = cl_index(descr->cld_obj, ext->end);
421                 descr->cld_gid   = ext->gid;
422                 /*
423                  * tell upper layers the extent of the lock that was actually
424                  * granted
425                  */
426                 olck->ols_state = OLS_GRANTED;
427                 osc_lock_lvb_update(env, olck, rc);
428
429                 /* release DLM spin-locks to allow cl_lock_{modify,signal}()
430                  * to take a semaphore on a parent lock. This is safe, because
431                  * spin-locks are needed to protect consistency of
432                  * dlmlock->l_*_mode and LVB, and we have finished processing
433                  * them. */
434                 unlock_res_and_lock(dlmlock);
435                 cl_lock_modify(env, lock, descr);
436                 cl_lock_signal(env, lock);
437                 LINVRNT(osc_lock_invariant(olck));
438                 lock_res_and_lock(dlmlock);
439         }
440         EXIT;
441 }
442
443 static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
444
445 {
446         struct ldlm_lock *dlmlock;
447
448         ENTRY;
449
450         dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
451         LASSERT(dlmlock != NULL);
452
453         lock_res_and_lock(dlmlock);
454         spin_lock(&osc_ast_guard);
455         LASSERT(dlmlock->l_ast_data == olck);
456         LASSERT(olck->ols_lock == NULL);
457         olck->ols_lock = dlmlock;
458         spin_unlock(&osc_ast_guard);
459
460         /*
461          * Lock might be not yet granted. In this case, completion ast
462          * (osc_ldlm_completion_ast()) comes later and finishes lock
463          * granting.
464          */
465         if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
466                 osc_lock_granted(env, olck, dlmlock, 0);
467         unlock_res_and_lock(dlmlock);
468
469         /*
470          * osc_enqueue_interpret() decrefs asynchronous locks, counter
471          * this.
472          */
473         ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
474         olck->ols_hold = 1;
475
476         /* lock reference taken by ldlm_handle2lock_long() is owned by
477          * osc_lock and released in osc_lock_detach() */
478         lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
479         olck->ols_has_ref = 1;
480 }
481
482 /**
483  * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
484  * received from a server, or after osc_enqueue_base() matched a local DLM
485  * lock.
486  */
487 static int osc_lock_upcall(void *cookie, int errcode)
488 {
489         struct osc_lock         *olck  = cookie;
490         struct cl_lock_slice    *slice = &olck->ols_cl;
491         struct cl_lock          *lock  = slice->cls_lock;
492         struct lu_env           *env;
493         struct cl_env_nest       nest;
494
495         ENTRY;
496         env = cl_env_nested_get(&nest);
497         if (!IS_ERR(env)) {
498                 int rc;
499
500                 cl_lock_mutex_get(env, lock);
501
502                 LASSERT(lock->cll_state >= CLS_QUEUING);
503                 if (olck->ols_state == OLS_ENQUEUED) {
504                         olck->ols_state = OLS_UPCALL_RECEIVED;
505                         rc = ldlm_error2errno(errcode);
506                 } else if (olck->ols_state == OLS_CANCELLED) {
507                         rc = -EIO;
508                 } else {
509                         CERROR("Impossible state: %i\n", olck->ols_state);
510                         LBUG();
511                 }
512                 if (rc) {
513                         struct ldlm_lock *dlmlock;
514
515                         dlmlock = ldlm_handle2lock(&olck->ols_handle);
516                         if (dlmlock != NULL) {
517                                 lock_res_and_lock(dlmlock);
518                                 spin_lock(&osc_ast_guard);
519                                 LASSERT(olck->ols_lock == NULL);
520                                 dlmlock->l_ast_data = NULL;
521                                 olck->ols_handle.cookie = 0ULL;
522                                 spin_unlock(&osc_ast_guard);
523                                 unlock_res_and_lock(dlmlock);
524                                 LDLM_LOCK_PUT(dlmlock);
525                         }
526                 } else {
527                         if (olck->ols_glimpse)
528                                 olck->ols_glimpse = 0;
529                         osc_lock_upcall0(env, olck);
530                 }
531
532                 /* Error handling, some errors are tolerable. */
533                 if (olck->ols_locklessable && rc == -EUSERS) {
534                         /* This is a tolerable error, turn this lock into
535                          * lockless lock.
536                          */
537                         osc_object_set_contended(cl2osc(slice->cls_obj));
538                         LASSERT(slice->cls_ops == &osc_lock_ops);
539
540                         /* Change this lock to ldlmlock-less lock. */
541                         osc_lock_to_lockless(env, olck, 1);
542                         olck->ols_state = OLS_GRANTED;
543                         rc = 0;
544                 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
545                         osc_lock_lvb_update(env, olck, rc);
546                         cl_lock_delete(env, lock);
547                         /* Hide the error. */
548                         rc = 0;
549                 }
550
551                 if (rc == 0)
552                         /* on error, lock was signaled by cl_lock_error() */
553                         cl_lock_signal(env, lock);
554                 else
555                         cl_lock_error(env, lock, rc);
556
557                 cl_lock_mutex_put(env, lock);
558
559                 /* release cookie reference, acquired by osc_lock_enqueue() */
560                 lu_ref_del(&lock->cll_reference, "upcall", lock);
561                 cl_lock_put(env, lock);
562                 cl_env_nested_put(&nest, env);
563         } else
564                 /* should never happen, similar to osc_ldlm_blocking_ast(). */
565                 LBUG();
566         RETURN(errcode);
567 }
568
569 /**
570  * Core of osc_dlm_blocking_ast() logic.
571  */
572 static void osc_lock_blocking(const struct lu_env *env,
573                               struct ldlm_lock *dlmlock,
574                               struct osc_lock *olck, int blocking)
575 {
576         struct cl_lock *lock = olck->ols_cl.cls_lock;
577
578         LASSERT(olck->ols_lock == dlmlock);
579         CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
580         LASSERT(!osc_lock_is_lockless(olck));
581
582         /*
583          * Lock might be still addref-ed here, if e.g., blocking ast
584          * is sent for a failed lock.
585          */
586         osc_lock_unhold(olck);
587
588         if (blocking && olck->ols_state < OLS_BLOCKED)
589                 /*
590                  * Move osc_lock into OLS_BLOCKED before canceling the lock,
591                  * because it recursively re-enters osc_lock_blocking(), with
592                  * the state set to OLS_CANCELLED.
593                  */
594                 olck->ols_state = OLS_BLOCKED;
595         /*
596          * cancel and destroy lock at least once no matter how blocking ast is
597          * entered (see comment above osc_ldlm_blocking_ast() for use
598          * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
599          */
600         cl_lock_cancel(env, lock);
601         cl_lock_delete(env, lock);
602 }
603
604 /**
605  * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
606  * and ldlm_lock caches.
607  */
608 static int osc_dlm_blocking_ast0(const struct lu_env *env,
609                                  struct ldlm_lock *dlmlock,
610                                  void *data, int flag)
611 {
612         struct osc_lock *olck;
613         struct cl_lock  *lock;
614         int result;
615         int cancel;
616
617         LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
618
619         cancel = 0;
620         olck = osc_ast_data_get(dlmlock);
621         if (olck != NULL) {
622                 lock = olck->ols_cl.cls_lock;
623                 cl_lock_mutex_get(env, lock);
624                 LINVRNT(osc_lock_invariant(olck));
625                 if (olck->ols_ast_wait) {
626                         /* wake up osc_lock_use() */
627                         cl_lock_signal(env, lock);
628                         olck->ols_ast_wait = 0;
629                 }
630                 /*
631                  * Lock might have been canceled while this thread was
632                  * sleeping for lock mutex, but olck is pinned in memory.
633                  */
634                 if (olck == dlmlock->l_ast_data) {
635                         /*
636                          * NOTE: DLM sends blocking AST's for failed locks
637                          *       (that are still in pre-OLS_GRANTED state)
638                          *       too, and they have to be canceled otherwise
639                          *       DLM lock is never destroyed and stuck in
640                          *       the memory.
641                          *
642                          *       Alternatively, ldlm_cli_cancel() can be
643                          *       called here directly for osc_locks with
644                          *       ols_state < OLS_GRANTED to maintain an
645                          *       invariant that ->clo_cancel() is only called
646                          *       for locks that were granted.
647                          */
648                         LASSERT(data == olck);
649                         osc_lock_blocking(env, dlmlock,
650                                           olck, flag == LDLM_CB_BLOCKING);
651                 } else
652                         cancel = 1;
653                 cl_lock_mutex_put(env, lock);
654                 osc_ast_data_put(env, olck);
655         } else
656                 /*
657                  * DLM lock exists, but there is no cl_lock attached to it.
658                  * This is a `normal' race. cl_object and its cl_lock's can be
659                  * removed by memory pressure, together with all pages.
660                  */
661                 cancel = (flag == LDLM_CB_BLOCKING);
662
663         if (cancel) {
664                 struct lustre_handle *lockh;
665
666                 lockh = &osc_env_info(env)->oti_handle;
667                 ldlm_lock2handle(dlmlock, lockh);
668                 result = ldlm_cli_cancel(lockh);
669         } else
670                 result = 0;
671         return result;
672 }
673
674 /**
675  * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
676  * some other lock, or is canceled. This function is installed as a
677  * ldlm_lock::l_blocking_ast() for client extent locks.
678  *
679  * Control flow is tricky, because ldlm uses the same call-back
680  * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
681  *
682  * \param dlmlock lock for which ast occurred.
683  *
684  * \param new description of a conflicting lock in case of blocking ast.
685  *
686  * \param data value of dlmlock->l_ast_data
687  *
688  * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
689  *             cancellation and blocking ast's.
690  *
691  * Possible use cases:
692  *
693  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
694  *       lock due to lock lru pressure, or explicit user request to purge
695  *       locks.
696  *
697  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
698  *       us that dlmlock conflicts with another lock that some client is
699  *       enqueing. Lock is canceled.
700  *
701  *           - cl_lock_cancel() is called. osc_lock_cancel() calls
702  *             ldlm_cli_cancel() that calls
703  *
704  *                  dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
705  *
706  *             recursively entering osc_ldlm_blocking_ast().
707  *
708  *     - client cancels lock voluntary (e.g., as a part of early cancellation):
709  *
710  *           cl_lock_cancel()->
711  *             osc_lock_cancel()->
712  *               ldlm_cli_cancel()->
713  *                 dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
714  *
715  */
716 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
717                                  struct ldlm_lock_desc *new, void *data,
718                                  int flag)
719 {
720         struct lu_env     *env;
721         struct cl_env_nest nest;
722         int                result;
723
724         /*
725          * This can be called in the context of outer IO, e.g.,
726          *
727          *     cl_enqueue()->...
728          *       ->osc_enqueue_base()->...
729          *         ->ldlm_prep_elc_req()->...
730          *           ->ldlm_cancel_callback()->...
731          *             ->osc_ldlm_blocking_ast()
732          *
733          * new environment has to be created to not corrupt outer context.
734          */
735         env = cl_env_nested_get(&nest);
736         if (!IS_ERR(env)) {
737                 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
738                 cl_env_nested_put(&nest, env);
739         } else {
740                 result = PTR_ERR(env);
741                 /*
742                  * XXX This should never happen, as cl_lock is
743                  * stuck. Pre-allocated environment a la vvp_inode_fini_env
744                  * should be used.
745                  */
746                 LBUG();
747         }
748         if (result != 0) {
749                 if (result == -ENODATA)
750                         result = 0;
751                 else
752                         CERROR("BAST failed: %d\n", result);
753         }
754         return result;
755 }
756
757 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
758                                    int flags, void *data)
759 {
760         struct cl_env_nest nest;
761         struct lu_env     *env;
762         struct osc_lock   *olck;
763         struct cl_lock    *lock;
764         int result;
765         int dlmrc;
766
767         /* first, do dlm part of the work */
768         dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
769         /* then, notify cl_lock */
770         env = cl_env_nested_get(&nest);
771         if (!IS_ERR(env)) {
772                 olck = osc_ast_data_get(dlmlock);
773                 if (olck != NULL) {
774                         lock = olck->ols_cl.cls_lock;
775                         cl_lock_mutex_get(env, lock);
776                         /*
777                          * ldlm_handle_cp_callback() copied LVB from request
778                          * to lock->l_lvb_data, store it in osc_lock.
779                          */
780                         LASSERT(dlmlock->l_lvb_data != NULL);
781                         lock_res_and_lock(dlmlock);
782                         olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
783                         if (olck->ols_lock == NULL) {
784                                 /*
785                                  * upcall (osc_lock_upcall()) hasn't yet been
786                                  * called. Do nothing now, upcall will bind
787                                  * olck to dlmlock and signal the waiters.
788                                  *
789                                  * This maintains an invariant that osc_lock
790                                  * and ldlm_lock are always bound when
791                                  * osc_lock is in OLS_GRANTED state.
792                                  */
793                         } else if (dlmlock->l_granted_mode ==
794                                    dlmlock->l_req_mode) {
795                                 osc_lock_granted(env, olck, dlmlock, dlmrc);
796                         }
797                         unlock_res_and_lock(dlmlock);
798
799                         if (dlmrc != 0) {
800                                 CL_LOCK_DEBUG(D_ERROR, env, lock,
801                                               "dlmlock returned %d\n", dlmrc);
802                                 cl_lock_error(env, lock, dlmrc);
803                         }
804                         cl_lock_mutex_put(env, lock);
805                         osc_ast_data_put(env, olck);
806                         result = 0;
807                 } else
808                         result = -ELDLM_NO_LOCK_DATA;
809                 cl_env_nested_put(&nest, env);
810         } else
811                 result = PTR_ERR(env);
812         return dlmrc ?: result;
813 }
814
815 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
816 {
817         struct ptlrpc_request  *req  = data;
818         struct osc_lock        *olck;
819         struct cl_lock         *lock;
820         struct cl_object       *obj;
821         struct cl_env_nest      nest;
822         struct lu_env          *env;
823         struct ost_lvb         *lvb;
824         struct req_capsule     *cap;
825         int                     result;
826
827         LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
828
829         env = cl_env_nested_get(&nest);
830         if (!IS_ERR(env)) {
831                 /*
832                  * osc_ast_data_get() has to go after environment is
833                  * allocated, because osc_ast_data() acquires a
834                  * reference to a lock, and it can only be released in
835                  * environment.
836                  */
837                 olck = osc_ast_data_get(dlmlock);
838                 if (olck != NULL) {
839                         lock = olck->ols_cl.cls_lock;
840                         cl_lock_mutex_get(env, lock);
841                         cap = &req->rq_pill;
842                         req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
843                         req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
844                                              sizeof *lvb);
845                         result = req_capsule_server_pack(cap);
846                         if (result == 0) {
847                                 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
848                                 obj = lock->cll_descr.cld_obj;
849                                 result = cl_object_glimpse(env, obj, lvb);
850                         }
851                         cl_lock_mutex_put(env, lock);
852                         osc_ast_data_put(env, olck);
853                 } else {
854                         /*
855                          * These errors are normal races, so we don't want to
856                          * fill the console with messages by calling
857                          * ptlrpc_error()
858                          */
859                         lustre_pack_reply(req, 1, NULL, NULL);
860                         result = -ELDLM_NO_LOCK_DATA;
861                 }
862                 cl_env_nested_put(&nest, env);
863         } else
864                 result = PTR_ERR(env);
865         req->rq_status = result;
866         return result;
867 }
868
869 static unsigned long osc_lock_weigh(const struct lu_env *env,
870                                     const struct cl_lock_slice *slice)
871 {
872         /*
873          * don't need to grab coh_page_guard since we don't care the exact #
874          * of pages..
875          */
876         return cl_object_header(slice->cls_obj)->coh_pages;
877 }
878
879 /**
880  * Get the weight of dlm lock for early cancellation.
881  *
882  * XXX: it should return the pages covered by this \a dlmlock.
883  */
884 static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
885 {
886         struct cl_env_nest       nest;
887         struct lu_env           *env;
888         struct osc_lock         *lock;
889         struct cl_lock          *cll;
890         unsigned long            weight;
891         ENTRY;
892
893         might_sleep();
894         /*
895          * osc_ldlm_weigh_ast has a complex context since it might be called
896          * because of lock canceling, or from user's input. We have to make
897          * a new environment for it. Probably it is implementation safe to use
898          * the upper context because cl_lock_put don't modify environment
899          * variables. But in case of ..
900          */
901         env = cl_env_nested_get(&nest);
902         if (IS_ERR(env))
903                 /* Mostly because lack of memory, tend to eliminate this lock*/
904                 RETURN(0);
905
906         LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
907         lock = osc_ast_data_get(dlmlock);
908         if (lock == NULL) {
909                 /* cl_lock was destroyed because of memory pressure.
910                  * It is much reasonable to assign this type of lock
911                  * a lower cost.
912                  */
913                 GOTO(out, weight = 0);
914         }
915
916         cll = lock->ols_cl.cls_lock;
917         cl_lock_mutex_get(env, cll);
918         weight = cl_lock_weigh(env, cll);
919         cl_lock_mutex_put(env, cll);
920         osc_ast_data_put(env, lock);
921         EXIT;
922
923 out:
924         cl_env_nested_put(&nest, env);
925         return weight;
926 }
927
928 static void osc_lock_build_einfo(const struct lu_env *env,
929                                  const struct cl_lock *clock,
930                                  struct osc_lock *lock,
931                                  struct ldlm_enqueue_info *einfo)
932 {
933         enum cl_lock_mode mode;
934
935         mode = clock->cll_descr.cld_mode;
936         if (mode == CLM_PHANTOM)
937                 /*
938                  * For now, enqueue all glimpse locks in read mode. In the
939                  * future, client might choose to enqueue LCK_PW lock for
940                  * glimpse on a file opened for write.
941                  */
942                 mode = CLM_READ;
943
944         einfo->ei_type   = LDLM_EXTENT;
945         einfo->ei_mode   = osc_cl_lock2ldlm(mode);
946         einfo->ei_cb_bl  = osc_ldlm_blocking_ast;
947         einfo->ei_cb_cp  = osc_ldlm_completion_ast;
948         einfo->ei_cb_gl  = osc_ldlm_glimpse_ast;
949         einfo->ei_cb_wg  = osc_ldlm_weigh_ast;
950         einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
951 }
952
953 static int osc_lock_delete0(struct cl_lock *conflict)
954 {
955         struct cl_env_nest    nest;
956         struct lu_env        *env;
957         int    rc = 0;        
958
959         env = cl_env_nested_get(&nest);
960         if (!IS_ERR(env)) {
961                 cl_lock_delete(env, conflict);
962                 cl_env_nested_put(&nest, env);
963         } else
964                 rc = PTR_ERR(env);
965         return rc; 
966 }
967 /**
968  * Cancels \a conflict lock and waits until it reached CLS_FREEING state. This
969  * is called as a part of enqueuing to cancel conflicting locks early.
970  *
971  * \retval            0: success, \a conflict was cancelled and destroyed.
972  *
973  * \retval   CLO_REPEAT: \a conflict was cancelled, but \a lock mutex was
974  *                       released in the process. Repeat enqueing.
975  *
976  * \retval -EWOULDBLOCK: \a conflict cannot be cancelled immediately, and
977  *                       either \a lock is non-blocking, or current thread
978  *                       holds other locks, that prevent it from waiting
979  *                       for cancel to complete.
980  *
981  * \retval          -ve: other error, including -EINTR.
982  *
983  */
984 static int osc_lock_cancel_wait(const struct lu_env *env, struct cl_lock *lock,
985                                 struct cl_lock *conflict, int canwait)
986 {
987         int rc;
988
989         LASSERT(cl_lock_is_mutexed(lock));
990         LASSERT(cl_lock_is_mutexed(conflict));
991
992         rc = 0;
993         if (conflict->cll_state != CLS_FREEING) {
994                 cl_lock_cancel(env, conflict);
995                 rc = osc_lock_delete0(conflict);
996                 if (rc)
997                         return rc; 
998                 if (conflict->cll_flags & (CLF_CANCELPEND|CLF_DOOMED)) {
999                         rc = -EWOULDBLOCK;
1000                         if (cl_lock_nr_mutexed(env) > 2)
1001                                 /*
1002                                  * If mutices of locks other than @lock and
1003                                  * @scan are held by the current thread, it
1004                                  * cannot wait on @scan state change in a
1005                                  * dead-lock safe matter, so simply skip early
1006                                  * cancellation in this case.
1007                                  *
1008                                  * This means that early cancellation doesn't
1009                                  * work when there is even slight mutex
1010                                  * contention, as top-lock's mutex is usually
1011                                  * held at this time.
1012                                  */
1013                                 ;
1014                         else if (canwait) {
1015                                 /* Waiting for @scan to be destroyed */
1016                                 cl_lock_mutex_put(env, lock);
1017                                 do {
1018                                         rc = cl_lock_state_wait(env, conflict);
1019                                 } while (!rc &&
1020                                          conflict->cll_state < CLS_FREEING);
1021                                 /* mutex was released, repeat enqueue. */
1022                                 rc = rc ?: CLO_REPEAT;
1023                                 cl_lock_mutex_get(env, lock);
1024                         }
1025                 }
1026                 LASSERT(ergo(!rc, conflict->cll_state == CLS_FREEING));
1027                 CDEBUG(D_INFO, "lock %p was %s freed now, rc (%d)\n",
1028                        conflict, rc ? "not":"", rc);
1029         }
1030         return rc;
1031 }
1032
1033 /**
1034  * Determine if the lock should be converted into a lockless lock.
1035  *
1036  * Steps to check:
1037  * - if the lock has an explicite requirment for a non-lockless lock;
1038  * - if the io lock request type ci_lockreq;
1039  * - send the enqueue rpc to ost to make the further decision;
1040  * - special treat to truncate lockless lock
1041  *
1042  *  Additional policy can be implemented here, e.g., never do lockless-io
1043  *  for large extents.
1044  */
1045 static void osc_lock_to_lockless(const struct lu_env *env,
1046                                  struct osc_lock *ols, int force)
1047 {
1048         struct cl_lock_slice *slice = &ols->ols_cl;
1049         struct cl_lock *lock        = slice->cls_lock;
1050
1051         LASSERT(ols->ols_state == OLS_NEW ||
1052                 ols->ols_state == OLS_UPCALL_RECEIVED);
1053
1054         if (force) {
1055                 ols->ols_locklessable = 1;
1056                 LASSERT(cl_lock_is_mutexed(lock));
1057                 slice->cls_ops = &osc_lock_lockless_ops;
1058         } else {
1059                 struct osc_io *oio     = osc_env_io(env);
1060                 struct cl_io  *io      = oio->oi_cl.cis_io;
1061                 struct cl_object *obj  = slice->cls_obj;
1062                 struct osc_object *oob = cl2osc(obj);
1063                 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
1064                 struct obd_connect_data *ocd;
1065
1066                 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
1067                         io->ci_lockreq == CILR_MAYBE ||
1068                         io->ci_lockreq == CILR_NEVER);
1069
1070                 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
1071                 ols->ols_locklessable = (io->ci_type != CIT_TRUNC) &&
1072                                 (io->ci_lockreq == CILR_MAYBE) &&
1073                                 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
1074                 if (io->ci_lockreq == CILR_NEVER ||
1075                         /* lockless IO */
1076                     (ols->ols_locklessable && osc_object_is_contended(oob)) ||
1077                         /* lockless truncate */
1078                     (io->ci_type == CIT_TRUNC &&
1079                      (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
1080                       osd->od_lockless_truncate)) {
1081                         ols->ols_locklessable = 1;
1082                         slice->cls_ops = &osc_lock_lockless_ops;
1083                 }
1084         }
1085         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1086 }
1087
1088 /**
1089  * Cancel all conflicting locks and wait for them to be destroyed.
1090  *
1091  * This function is used for two purposes:
1092  *
1093  *     - early cancel all conflicting locks before starting IO, and
1094  *
1095  *     - guarantee that pages added to the page cache by lockless IO are never
1096  *       covered by locks other than lockless IO lock, and, hence, are not
1097  *       visible to other threads.
1098  */
1099 static int osc_lock_enqueue_wait(const struct lu_env *env,
1100                                  const struct osc_lock *olck)
1101 {
1102         struct cl_lock          *lock    = olck->ols_cl.cls_lock;
1103         struct cl_lock_descr    *descr   = &lock->cll_descr;
1104         struct cl_object_header *hdr     = cl_object_header(descr->cld_obj);
1105         struct cl_lock_closure  *closure = &osc_env_info(env)->oti_closure;
1106         struct cl_lock          *scan;
1107         struct cl_lock          *temp;
1108         int lockless                     = osc_lock_is_lockless(olck);
1109         int rc                           = 0;
1110         int canwait;
1111         int stop;
1112         ENTRY;
1113
1114         LASSERT(cl_lock_is_mutexed(lock));
1115         LASSERT(lock->cll_state == CLS_QUEUING);
1116
1117         /*
1118          * XXX This function could be sped up if we had asynchronous
1119          * cancellation.
1120          */
1121
1122         canwait =
1123                 !(olck->ols_flags & LDLM_FL_BLOCK_NOWAIT) &&
1124                 cl_lock_nr_mutexed(env) == 1;
1125         cl_lock_closure_init(env, closure, lock, canwait);
1126         spin_lock(&hdr->coh_lock_guard);
1127         list_for_each_entry_safe(scan, temp, &hdr->coh_locks, cll_linkage) {
1128                 if (scan == lock)
1129                         continue;
1130
1131                 if (scan->cll_state < CLS_QUEUING ||
1132                     scan->cll_state == CLS_FREEING ||
1133                     scan->cll_descr.cld_start > descr->cld_end ||
1134                     scan->cll_descr.cld_end < descr->cld_start)
1135                         continue;
1136
1137                 /* overlapped and living locks. */
1138
1139                 /* We're not supposed to give up group lock. */
1140                 if (scan->cll_descr.cld_mode == CLM_GROUP) {
1141                         LASSERT(descr->cld_mode != CLM_GROUP ||
1142                                 descr->cld_gid != scan->cll_descr.cld_gid);
1143                         continue;
1144                 }
1145
1146                 /* A tricky case for lockless pages:
1147                  * We need to cancel the compatible locks if we're enqueuing
1148                  * a lockless lock, for example:
1149                  * imagine that client has PR lock on [0, 1000], and thread T0
1150                  * is doing lockless IO in [500, 1500] region. Concurrent
1151                  * thread T1 can see lockless data in [500, 1000], which is
1152                  * wrong, because these data are possibly stale.
1153                  */
1154                 if (!lockless && cl_lock_compatible(scan, lock))
1155                         continue;
1156
1157                 /* Now @scan is conflicting with @lock, this means current
1158                  * thread have to sleep for @scan being destroyed. */
1159                 cl_lock_get_trust(scan);
1160                 if (&temp->cll_linkage != &hdr->coh_locks)
1161                         cl_lock_get_trust(temp);
1162                 spin_unlock(&hdr->coh_lock_guard);
1163                 lu_ref_add(&scan->cll_reference, "cancel-wait", lock);
1164
1165                 LASSERT(list_empty(&closure->clc_list));
1166                 rc = cl_lock_closure_build(env, scan, closure);
1167                 if (rc == 0) {
1168                         rc = osc_lock_cancel_wait(env, lock, scan, canwait);
1169                         cl_lock_disclosure(env, closure);
1170                         if (rc == -EWOULDBLOCK)
1171                                 rc = 0;
1172                 }
1173                 if (rc == CLO_REPEAT && !canwait)
1174                         /* cannot wait... no early cancellation. */
1175                         rc = 0;
1176
1177                 lu_ref_del(&scan->cll_reference, "cancel-wait", lock);
1178                 cl_lock_put(env, scan);
1179                 spin_lock(&hdr->coh_lock_guard);
1180                 /*
1181                  * Lock list could have been modified, while spin-lock was
1182                  * released. Check that it is safe to continue.
1183                  */
1184                 stop = list_empty(&temp->cll_linkage);
1185                 if (&temp->cll_linkage != &hdr->coh_locks)
1186                         cl_lock_put(env, temp);
1187                 if (stop || rc != 0)
1188                         break;
1189         }
1190         spin_unlock(&hdr->coh_lock_guard);
1191         cl_lock_closure_fini(closure);
1192         RETURN(rc);
1193 }
1194
1195 /**
1196  * Deadlock avoidance for osc_lock_enqueue(). Consider following scenario:
1197  *
1198  *     - Thread0: obtains PR:[0, 10]. Lock is busy.
1199  *
1200  *     - Thread1: enqueues PW:[5, 50]. Blocking ast is sent to
1201  *       PR:[0, 10], but cancellation of busy lock is postponed.
1202  *
1203  *     - Thread0: enqueue PR:[30, 40]. Lock is locally matched to
1204  *       PW:[5, 50], and thread0 waits for the lock completion never
1205  *       releasing PR:[0, 10]---deadlock.
1206  *
1207  * The second PR lock can be glimpse (it is to deal with that situation that
1208  * ll_glimpse_size() has second argument, preventing local match of
1209  * not-yet-granted locks, see bug 10295). Similar situation is possible in the
1210  * case of memory mapped user level buffer.
1211  *
1212  * To prevent this we can detect a situation when current "thread" or "io"
1213  * already holds a lock on this object and either add LDLM_FL_BLOCK_GRANTED to
1214  * the ols->ols_flags, or prevent local match with PW locks.
1215  */
1216 static int osc_deadlock_is_possible(const struct lu_env *env,
1217                                     struct cl_lock *lock)
1218 {
1219         struct cl_object        *obj;
1220         struct cl_object_header *head;
1221         struct cl_lock          *scan;
1222         struct osc_io           *oio;
1223
1224         int result;
1225
1226         ENTRY;
1227
1228         LASSERT(cl_lock_is_mutexed(lock));
1229
1230         oio  = osc_env_io(env);
1231         obj  = lock->cll_descr.cld_obj;
1232         head = cl_object_header(obj);
1233
1234         result = 0;
1235         spin_lock(&head->coh_lock_guard);
1236         list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1237                 if (scan != lock) {
1238                         struct osc_lock *oscan;
1239
1240                         oscan = osc_lock_at(scan);
1241                         LASSERT(oscan != NULL);
1242                         if (oscan->ols_owner == oio) {
1243                                 result = 1;
1244                                 break;
1245                         }
1246                 }
1247         }
1248         spin_unlock(&head->coh_lock_guard);
1249         RETURN(result);
1250 }
1251
1252 /**
1253  * Implementation of cl_lock_operations::clo_enqueue() method for osc
1254  * layer. This initiates ldlm enqueue:
1255  *
1256  *     - checks for possible dead-lock conditions (osc_deadlock_is_possible());
1257  *
1258  *     - cancels conflicting locks early (osc_lock_enqueue_wait());
1259  *
1260  *     - calls osc_enqueue_base() to do actual enqueue.
1261  *
1262  * osc_enqueue_base() is supplied with an upcall function that is executed
1263  * when lock is received either after a local cached ldlm lock is matched, or
1264  * when a reply from the server is received.
1265  *
1266  * This function does not wait for the network communication to complete.
1267  */
1268 static int osc_lock_enqueue(const struct lu_env *env,
1269                             const struct cl_lock_slice *slice,
1270                             struct cl_io *unused, __u32 enqflags)
1271 {
1272         struct osc_lock          *ols     = cl2osc_lock(slice);
1273         struct cl_lock           *lock    = ols->ols_cl.cls_lock;
1274         struct osc_object        *obj     = cl2osc(slice->cls_obj);
1275         struct osc_thread_info   *info    = osc_env_info(env);
1276         struct ldlm_res_id       *resname = &info->oti_resname;
1277         ldlm_policy_data_t       *policy  = &info->oti_policy;
1278         struct ldlm_enqueue_info *einfo   = &ols->ols_einfo;
1279         int result;
1280         ENTRY;
1281
1282         LASSERT(cl_lock_is_mutexed(lock));
1283         LASSERT(lock->cll_state == CLS_QUEUING);
1284         LASSERT(ols->ols_state == OLS_NEW);
1285
1286         osc_lock_build_res(env, obj, resname);
1287         osc_lock_build_policy(env, lock, policy);
1288         ols->ols_flags = osc_enq2ldlm_flags(enqflags);
1289         if (osc_deadlock_is_possible(env, lock))
1290                 ols->ols_flags |= LDLM_FL_BLOCK_GRANTED;
1291         if (ols->ols_flags & LDLM_FL_HAS_INTENT)
1292                 ols->ols_glimpse = 1;
1293
1294         result = osc_lock_enqueue_wait(env, ols);
1295         if (result == 0) {
1296                 if (!(enqflags & CEF_MUST))
1297                         /* try to convert this lock to a lockless lock */
1298                         osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
1299                 if (!osc_lock_is_lockless(ols)) {
1300                         if (ols->ols_locklessable)
1301                                 ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1302
1303                         /* a reference for lock, passed as an upcall cookie */
1304                         cl_lock_get(lock);
1305                         lu_ref_add(&lock->cll_reference, "upcall", lock);
1306                         ols->ols_state = OLS_ENQUEUED;
1307
1308                         /*
1309                          * XXX: this is possible blocking point as
1310                          * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
1311                          * LDLM_CP_CALLBACK.
1312                          */
1313                         result = osc_enqueue_base(osc_export(obj), resname,
1314                                           &ols->ols_flags, policy,
1315                                           &ols->ols_lvb,
1316                                           obj->oo_oinfo->loi_kms_valid,
1317                                           osc_lock_upcall,
1318                                           ols, einfo, &ols->ols_handle,
1319                                           PTLRPCD_SET, 1);
1320                         if (result != 0) {
1321                                 lu_ref_del(&lock->cll_reference,
1322                                            "upcall", lock);
1323                                 cl_lock_put(env, lock);
1324                         }
1325                 } else {
1326                         ols->ols_state = OLS_GRANTED;
1327                 }
1328         }
1329         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1330         RETURN(result);
1331 }
1332
1333 static int osc_lock_wait(const struct lu_env *env,
1334                          const struct cl_lock_slice *slice)
1335 {
1336         struct osc_lock *olck = cl2osc_lock(slice);
1337         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1338
1339         LINVRNT(osc_lock_invariant(olck));
1340         if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED)
1341                 return 0;
1342
1343         LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
1344                      lock->cll_error == 0, olck->ols_lock != NULL));
1345
1346         return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
1347 }
1348
1349 /**
1350  * An implementation of cl_lock_operations::clo_use() method that pins cached
1351  * lock.
1352  */
1353 static int osc_lock_use(const struct lu_env *env,
1354                         const struct cl_lock_slice *slice)
1355 {
1356         struct osc_lock *olck = cl2osc_lock(slice);
1357         int rc;
1358
1359         LASSERT(!olck->ols_hold);
1360
1361         /*
1362          * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
1363          * flag is not set. This protects us from a concurrent blocking ast.
1364          */
1365         rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1366         if (rc == 0) {
1367                 olck->ols_hold = 1;
1368                 olck->ols_state = OLS_GRANTED;
1369         } else {
1370                 struct cl_lock *lock;
1371
1372                 /*
1373                  * Lock is being cancelled somewhere within
1374                  * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
1375                  * set, but osc_ldlm_blocking_ast() hasn't yet acquired
1376                  * cl_lock mutex.
1377                  */
1378                 lock = slice->cls_lock;
1379                 LASSERT(lock->cll_state == CLS_INTRANSIT);
1380                 LASSERT(lock->cll_users > 0);
1381                 /* set a flag for osc_dlm_blocking_ast0() to signal the
1382                  * lock.*/
1383                 olck->ols_ast_wait = 1;
1384                 rc = CLO_WAIT;
1385         }
1386         return rc;
1387 }
1388
1389 static int osc_lock_flush(struct osc_lock *ols, int discard)
1390 {
1391         struct cl_lock       *lock  = ols->ols_cl.cls_lock;
1392         struct cl_env_nest    nest;
1393         struct lu_env        *env;
1394         int result = 0;
1395
1396         env = cl_env_nested_get(&nest);
1397         if (!IS_ERR(env)) {
1398                 result = cl_lock_page_out(env, lock, discard);
1399                 cl_env_nested_put(&nest, env);
1400         } else
1401                 result = PTR_ERR(env);
1402         if (result == 0) {
1403                 ols->ols_flush = 1;
1404                 LINVRNT(!osc_lock_has_pages(ols));
1405         }
1406         return result;
1407 }
1408
1409 /**
1410  * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1411  * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1412  * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1413  * with some other lock some where in the cluster. This function does the
1414  * following:
1415  *
1416  *     - invalidates all pages protected by this lock (after sending dirty
1417  *       ones to the server, as necessary);
1418  *
1419  *     - decref's underlying ldlm lock;
1420  *
1421  *     - cancels ldlm lock (ldlm_cli_cancel()).
1422  */
1423 static void osc_lock_cancel(const struct lu_env *env,
1424                             const struct cl_lock_slice *slice)
1425 {
1426         struct cl_lock   *lock    = slice->cls_lock;
1427         struct osc_lock  *olck    = cl2osc_lock(slice);
1428         struct ldlm_lock *dlmlock = olck->ols_lock;
1429         int               result  = 0;
1430         int               discard;
1431
1432         LASSERT(cl_lock_is_mutexed(lock));
1433         LINVRNT(osc_lock_invariant(olck));
1434
1435         if (dlmlock != NULL) {
1436                 int do_cancel;
1437
1438                 discard = dlmlock->l_flags & LDLM_FL_DISCARD_DATA;
1439                 result = osc_lock_flush(olck, discard);
1440                 osc_lock_unhold(olck);
1441
1442                 lock_res_and_lock(dlmlock);
1443                 /* Now that we're the only user of dlm read/write reference,
1444                  * mostly the ->l_readers + ->l_writers should be zero.
1445                  * However, there is a corner case.
1446                  * See bug 18829 for details.*/
1447                 do_cancel = (dlmlock->l_readers == 0 &&
1448                              dlmlock->l_writers == 0);
1449                 dlmlock->l_flags |= LDLM_FL_CBPENDING;
1450                 unlock_res_and_lock(dlmlock);
1451                 if (do_cancel)
1452                         result = ldlm_cli_cancel(&olck->ols_handle);
1453                 if (result < 0)
1454                         CL_LOCK_DEBUG(D_ERROR, env, lock,
1455                                       "lock %p cancel failure with error(%d)\n",
1456                                       lock, result);
1457         }
1458         olck->ols_state = OLS_CANCELLED;
1459         osc_lock_detach(env, olck);
1460 }
1461
1462 void cl_lock_page_list_fixup(const struct lu_env *env,
1463                              struct cl_io *io, struct cl_lock *lock,
1464                              struct cl_page_list *queue);
1465
1466 #ifdef INVARIANT_CHECK
1467 /**
1468  * Returns true iff there are pages under \a olck not protected by other
1469  * locks.
1470  */
1471 static int osc_lock_has_pages(struct osc_lock *olck)
1472 {
1473         struct cl_lock       *lock;
1474         struct cl_lock_descr *descr;
1475         struct cl_object     *obj;
1476         struct osc_object    *oob;
1477         struct cl_page_list  *plist;
1478         struct cl_page       *page;
1479         struct cl_env_nest    nest;
1480         struct cl_io         *io;
1481         struct lu_env        *env;
1482         int                   result;
1483
1484         env = cl_env_nested_get(&nest);
1485         if (!IS_ERR(env)) {
1486                 obj   = olck->ols_cl.cls_obj;
1487                 oob   = cl2osc(obj);
1488                 io    = &oob->oo_debug_io;
1489                 lock  = olck->ols_cl.cls_lock;
1490                 descr = &lock->cll_descr;
1491                 plist = &osc_env_info(env)->oti_plist;
1492                 cl_page_list_init(plist);
1493
1494                 mutex_lock(&oob->oo_debug_mutex);
1495
1496                 io->ci_obj = cl_object_top(obj);
1497                 cl_io_init(env, io, CIT_MISC, io->ci_obj);
1498                 cl_page_gang_lookup(env, obj, io,
1499                                     descr->cld_start, descr->cld_end, plist, 0);
1500                 cl_lock_page_list_fixup(env, io, lock, plist);
1501                 if (plist->pl_nr > 0) {
1502                         CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
1503                         cl_page_list_for_each(page, plist)
1504                                 CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
1505                 }
1506                 result = plist->pl_nr > 0;
1507                 cl_page_list_disown(env, io, plist);
1508                 cl_page_list_fini(env, plist);
1509                 cl_io_fini(env, io);
1510                 mutex_unlock(&oob->oo_debug_mutex);
1511                 cl_env_nested_put(&nest, env);
1512         } else
1513                 result = 0;
1514         return result;
1515 }
1516 #else
1517 static int osc_lock_has_pages(struct osc_lock *olck)
1518 {
1519         return 0;
1520 }
1521 #endif /* INVARIANT_CHECK */
1522
1523 static void osc_lock_delete(const struct lu_env *env,
1524                             const struct cl_lock_slice *slice)
1525 {
1526         struct osc_lock *olck;
1527
1528         olck = cl2osc_lock(slice);
1529         if (olck->ols_glimpse) {
1530                 LASSERT(!olck->ols_hold);
1531                 LASSERT(!olck->ols_lock);
1532                 return;
1533         }
1534
1535         LINVRNT(osc_lock_invariant(olck));
1536         LINVRNT(!osc_lock_has_pages(olck));
1537
1538         osc_lock_unhold(olck);
1539         osc_lock_detach(env, olck);
1540 }
1541
1542 /**
1543  * Implements cl_lock_operations::clo_state() method for osc layer.
1544  *
1545  * Maintains osc_lock::ols_owner field.
1546  *
1547  * This assumes that lock always enters CLS_HELD (from some other state) in
1548  * the same IO context as one that requested the lock. This should not be a
1549  * problem, because context is by definition shared by all activity pertaining
1550  * to the same high-level IO.
1551  */
1552 static void osc_lock_state(const struct lu_env *env,
1553                            const struct cl_lock_slice *slice,
1554                            enum cl_lock_state state)
1555 {
1556         struct osc_lock *lock = cl2osc_lock(slice);
1557         struct osc_io   *oio  = osc_env_io(env);
1558
1559         /*
1560          * XXX multiple io contexts can use the lock at the same time.
1561          */
1562         LINVRNT(osc_lock_invariant(lock));
1563         if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1564                 LASSERT(lock->ols_owner == NULL);
1565                 lock->ols_owner = oio;
1566         } else if (state != CLS_HELD)
1567                 lock->ols_owner = NULL;
1568 }
1569
1570 static int osc_lock_print(const struct lu_env *env, void *cookie,
1571                           lu_printer_t p, const struct cl_lock_slice *slice)
1572 {
1573         struct osc_lock *lock = cl2osc_lock(slice);
1574
1575         /*
1576          * XXX print ldlm lock and einfo properly.
1577          */
1578         (*p)(env, cookie, "%p %08x "LPX64" %d %p ",
1579              lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
1580              lock->ols_state, lock->ols_owner);
1581         osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1582         return 0;
1583 }
1584
1585 static int osc_lock_fits_into(const struct lu_env *env,
1586                               const struct cl_lock_slice *slice,
1587                               const struct cl_lock_descr *need,
1588                               const struct cl_io *io)
1589 {
1590         struct osc_lock *ols = cl2osc_lock(slice);
1591
1592         if (need->cld_enq_flags & CEF_NEVER)
1593                 return 0;
1594
1595         if (need->cld_mode == CLM_PHANTOM) {
1596                 /*
1597                  * Note: the QUEUED lock can't be matched here, otherwise
1598                  * it might cause the deadlocks.
1599                  * In read_process,
1600                  * P1: enqueued read lock, create sublock1
1601                  * P2: enqueued write lock, create sublock2(conflicted
1602                  *     with sublock1).
1603                  * P1: Grant read lock.
1604                  * P1: enqueued glimpse lock(with holding sublock1_read),
1605                  *     matched with sublock2, waiting sublock2 to be granted.
1606                  *     But sublock2 can not be granted, because P1
1607                  *     will not release sublock1. Bang!
1608                  */
1609                 if (ols->ols_state < OLS_GRANTED ||
1610                         ols->ols_state > OLS_RELEASED)
1611                         return 0;
1612         } else if (need->cld_enq_flags & CEF_MUST) {
1613                  /*
1614                  * If the lock hasn't ever enqueued, it can't be matched
1615                  * because enqueue process brings in many information
1616                  * which can be used to determine things such as lockless,
1617                  * CEF_MUST, etc.
1618                  */
1619                 if (ols->ols_state < OLS_GRANTED ||
1620                         ols->ols_state > OLS_RELEASED)
1621                         return 0;
1622                 if (ols->ols_state < OLS_UPCALL_RECEIVED &&
1623                         ols->ols_locklessable)
1624                         return 0;
1625         }
1626         return 1;
1627 }
1628
1629 static const struct cl_lock_operations osc_lock_ops = {
1630         .clo_fini    = osc_lock_fini,
1631         .clo_enqueue = osc_lock_enqueue,
1632         .clo_wait    = osc_lock_wait,
1633         .clo_unuse   = osc_lock_unuse,
1634         .clo_use     = osc_lock_use,
1635         .clo_delete  = osc_lock_delete,
1636         .clo_state   = osc_lock_state,
1637         .clo_cancel  = osc_lock_cancel,
1638         .clo_weigh   = osc_lock_weigh,
1639         .clo_print   = osc_lock_print,
1640         .clo_fits_into = osc_lock_fits_into,
1641 };
1642
1643 static int osc_lock_lockless_enqueue(const struct lu_env *env,
1644                                      const struct cl_lock_slice *slice,
1645                                      struct cl_io *unused, __u32 enqflags)
1646 {
1647         LBUG();
1648         return 0;
1649 }
1650
1651 static int osc_lock_lockless_unuse(const struct lu_env *env,
1652                                    const struct cl_lock_slice *slice)
1653 {
1654         struct osc_lock *ols = cl2osc_lock(slice);
1655         struct cl_lock *lock = slice->cls_lock;
1656
1657         LASSERT(ols->ols_state == OLS_GRANTED);
1658         LINVRNT(osc_lock_invariant(ols));
1659
1660         cl_lock_cancel(env, lock);
1661         cl_lock_delete(env, lock);
1662         return 0;
1663 }
1664
1665 static void osc_lock_lockless_cancel(const struct lu_env *env,
1666                                      const struct cl_lock_slice *slice)
1667 {
1668         struct osc_lock   *ols  = cl2osc_lock(slice);
1669         int result;
1670
1671         result = osc_lock_flush(ols, 0);
1672         if (result)
1673                 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1674                        ols, result);
1675         ols->ols_state = OLS_CANCELLED;
1676 }
1677
1678 static int osc_lock_lockless_wait(const struct lu_env *env,
1679                                   const struct cl_lock_slice *slice)
1680 {
1681         struct osc_lock *olck = cl2osc_lock(slice);
1682         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1683
1684         LINVRNT(osc_lock_invariant(olck));
1685         LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1686
1687         return lock->cll_error;
1688 }
1689
1690 static void osc_lock_lockless_state(const struct lu_env *env,
1691                                     const struct cl_lock_slice *slice,
1692                                     enum cl_lock_state state)
1693 {
1694         struct osc_lock *lock = cl2osc_lock(slice);
1695         struct osc_io   *oio  = osc_env_io(env);
1696
1697         LINVRNT(osc_lock_invariant(lock));
1698         if (state == CLS_HELD) {
1699                 LASSERT(lock->ols_owner == NULL);
1700                 lock->ols_owner = oio;
1701
1702                 /* set the io to be lockless if this lock is for io's
1703                  * host object */
1704                 if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
1705                         oio->oi_lockless = 1;
1706         } else
1707                 lock->ols_owner = NULL;
1708 }
1709
1710 static int osc_lock_lockless_fits_into(const struct lu_env *env,
1711                                        const struct cl_lock_slice *slice,
1712                                        const struct cl_lock_descr *need,
1713                                        const struct cl_io *io)
1714 {
1715         return 0;
1716 }
1717
1718 static const struct cl_lock_operations osc_lock_lockless_ops = {
1719         .clo_fini      = osc_lock_fini,
1720         .clo_enqueue   = osc_lock_lockless_enqueue,
1721         .clo_wait      = osc_lock_lockless_wait,
1722         .clo_unuse     = osc_lock_lockless_unuse,
1723         .clo_state     = osc_lock_lockless_state,
1724         .clo_fits_into = osc_lock_lockless_fits_into,
1725         .clo_cancel    = osc_lock_lockless_cancel,
1726         .clo_print     = osc_lock_print
1727 };
1728
1729 int osc_lock_init(const struct lu_env *env,
1730                   struct cl_object *obj, struct cl_lock *lock,
1731                   const struct cl_io *unused)
1732 {
1733         struct osc_lock *clk;
1734         int result;
1735
1736         OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO);
1737         if (clk != NULL) {
1738                 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
1739                 clk->ols_state = OLS_NEW;
1740                 cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
1741                 result = 0;
1742         } else
1743                 result = -ENOMEM;
1744         return result;
1745 }
1746
1747
1748 /** @} osc */