Whamcloud - gitweb
b=19188
[fs/lustre-release.git] / lustre / osc / osc_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for OSC layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 /** \addtogroup osc osc @{ */
42
43 #define DEBUG_SUBSYSTEM S_OSC
44
45 #ifdef __KERNEL__
46 # include <libcfs/libcfs.h>
47 #else
48 # include <liblustre.h>
49 #endif
50 /* fid_build_reg_res_name() */
51 #include <lustre_fid.h>
52
53 #include "osc_cl_internal.h"
54
55 /*****************************************************************************
56  *
57  * Type conversions.
58  *
59  */
60
61 static const struct cl_lock_operations osc_lock_ops;
62 static const struct cl_lock_operations osc_lock_lockless_ops;
63 static void osc_lock_to_lockless(const struct lu_env *env,
64                                  struct osc_lock *ols, int force);
65 static int osc_lock_has_pages(struct osc_lock *olck);
66
67 int osc_lock_is_lockless(const struct osc_lock *olck)
68 {
69         return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
70 }
71
72 /**
73  * Returns a weak pointer to the ldlm lock identified by a handle. Returned
74  * pointer cannot be dereferenced, as lock is not protected from concurrent
75  * reclaim. This function is a helper for osc_lock_invariant().
76  */
77 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
78 {
79         struct ldlm_lock *lock;
80
81         lock = ldlm_handle2lock(handle);
82         if (lock != NULL)
83                 LDLM_LOCK_PUT(lock);
84         return lock;
85 }
86
87 /**
88  * Invariant that has to be true all of the time.
89  */
90 static int osc_lock_invariant(struct osc_lock *ols)
91 {
92         struct ldlm_lock *lock        = osc_handle_ptr(&ols->ols_handle);
93         struct ldlm_lock *olock       = ols->ols_lock;
94         int               handle_used = lustre_handle_is_used(&ols->ols_handle);
95
96         return
97                 ergo(osc_lock_is_lockless(ols),
98                      ols->ols_locklessable && ols->ols_lock == NULL)  ||
99                 (ergo(olock != NULL, handle_used) &&
100                  ergo(olock != NULL,
101                       olock->l_handle.h_cookie == ols->ols_handle.cookie) &&
102                  /*
103                   * Check that ->ols_handle and ->ols_lock are consistent, but
104                   * take into account that they are set at the different time.
105                   */
106                  ergo(handle_used,
107                       ergo(lock != NULL && olock != NULL, lock == olock) &&
108                       ergo(lock == NULL, olock == NULL)) &&
109                  ergo(ols->ols_state == OLS_CANCELLED,
110                       olock == NULL && !handle_used) &&
111                  /*
112                   * DLM lock is destroyed only after we have seen cancellation
113                   * ast.
114                   */
115                  ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
116                       !olock->l_destroyed) &&
117                  ergo(ols->ols_state == OLS_GRANTED,
118                       olock != NULL &&
119                       olock->l_req_mode == olock->l_granted_mode &&
120                       ols->ols_hold));
121 }
122
123 /*****************************************************************************
124  *
125  * Lock operations.
126  *
127  */
128
129 /**
130  * Breaks a link between osc_lock and dlm_lock.
131  */
132 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
133 {
134         struct ldlm_lock *dlmlock;
135
136         spin_lock(&osc_ast_guard);
137         dlmlock = olck->ols_lock;
138         if (dlmlock == NULL) {
139                 spin_unlock(&osc_ast_guard);
140                 return;
141         }
142
143         olck->ols_lock = NULL;
144         /* wb(); --- for all who checks (ols->ols_lock != NULL) before
145          * call to osc_lock_detach() */
146         dlmlock->l_ast_data = NULL;
147         olck->ols_handle.cookie = 0ULL;
148         spin_unlock(&osc_ast_guard);
149
150         lock_res_and_lock(dlmlock);
151         if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
152                 struct cl_object *obj = olck->ols_cl.cls_obj;
153                 struct cl_attr *attr  = &osc_env_info(env)->oti_attr;
154                 __u64 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
155
156                 /* Update the kms. Need to loop all granted locks.
157                  * Not a problem for the client */
158                 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
159                 unlock_res_and_lock(dlmlock);
160
161                 cl_object_attr_lock(obj);
162                 cl_object_attr_set(env, obj, attr, CAT_KMS);
163                 cl_object_attr_unlock(obj);
164         } else
165                 unlock_res_and_lock(dlmlock);
166
167         /* release a reference taken in osc_lock_upcall0(). */
168         lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
169         LDLM_LOCK_RELEASE(dlmlock);
170 }
171
172 static int osc_lock_unuse(const struct lu_env *env,
173                           const struct cl_lock_slice *slice)
174 {
175         struct osc_lock *ols = cl2osc_lock(slice);
176         int result;
177
178         LASSERT(ols->ols_state == OLS_GRANTED ||
179                 ols->ols_state == OLS_UPCALL_RECEIVED);
180         LINVRNT(osc_lock_invariant(ols));
181
182         if (ols->ols_glimpse) {
183                 LASSERT(ols->ols_hold == 0);
184                 return 0;
185         }
186         LASSERT(ols->ols_hold);
187
188         /*
189          * Move lock into OLS_RELEASED state before calling osc_cancel_base()
190          * so that possible synchronous cancellation (that always happens
191          * e.g., for liblustre) sees that lock is released.
192          */
193         ols->ols_state = OLS_RELEASED;
194         ols->ols_hold = 0;
195         result = osc_cancel_base(&ols->ols_handle, ols->ols_einfo.ei_mode);
196         ols->ols_has_ref = 0;
197         return result;
198 }
199
200 static void osc_lock_fini(const struct lu_env *env,
201                           struct cl_lock_slice *slice)
202 {
203         struct osc_lock  *ols = cl2osc_lock(slice);
204
205         LINVRNT(osc_lock_invariant(ols));
206         /*
207          * ->ols_hold can still be true at this point if, for example, a
208          * thread that requested a lock was killed (and released a reference
209          * to the lock), before reply from a server was received. In this case
210          * lock is destroyed immediately after upcall.
211          */
212         if (ols->ols_hold)
213                 osc_lock_unuse(env, slice);
214         LASSERT(ols->ols_lock == NULL);
215
216         OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
217 }
218
219 void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj,
220                         struct ldlm_res_id *resname)
221 {
222         const struct lu_fid *fid = lu_object_fid(&obj->oo_cl.co_lu);
223         if (0) {
224                 /*
225                  * In the perfect world of the future, where ost servers talk
226                  * idif-fids...
227                  */
228                 fid_build_reg_res_name(fid, resname);
229         } else {
230                 /*
231                  * In reality, where ost server expects ->lsm_object_id and
232                  * ->lsm_object_gr in rename.
233                  */
234                 osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_gr,
235                                    resname);
236         }
237 }
238
239 static void osc_lock_build_policy(const struct lu_env *env,
240                                   const struct cl_lock *lock,
241                                   ldlm_policy_data_t *policy)
242 {
243         const struct cl_lock_descr *d = &lock->cll_descr;
244
245         osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
246         policy->l_extent.gid = d->cld_gid;
247 }
248
249 static int osc_enq2ldlm_flags(__u32 enqflags)
250 {
251         int result = 0;
252
253         LASSERT((enqflags & ~CEF_MASK) == 0);
254
255         if (enqflags & CEF_NONBLOCK)
256                 result |= LDLM_FL_BLOCK_NOWAIT;
257         if (enqflags & CEF_ASYNC)
258                 result |= LDLM_FL_HAS_INTENT;
259         if (enqflags & CEF_DISCARD_DATA)
260                 result |= LDLM_AST_DISCARD_DATA;
261         return result;
262 }
263
264 /**
265  * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
266  * pointers. Initialized in osc_init().
267  */
268 spinlock_t osc_ast_guard;
269
270 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
271 {
272         struct osc_lock *olck;
273
274         lock_res_and_lock(dlm_lock);
275         spin_lock(&osc_ast_guard);
276         olck = dlm_lock->l_ast_data;
277         if (olck != NULL) {
278                 struct cl_lock *lock = olck->ols_cl.cls_lock;
279                 /*
280                  * If osc_lock holds a reference on ldlm lock, return it even
281                  * when cl_lock is in CLS_FREEING state. This way
282                  *
283                  *         osc_ast_data_get(dlmlock) == NULL
284                  *
285                  * guarantees that all osc references on dlmlock were
286                  * released. osc_dlm_blocking_ast0() relies on that.
287                  */
288                 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
289                         cl_lock_get_trust(lock);
290                         lu_ref_add_atomic(&lock->cll_reference,
291                                           "ast", cfs_current());
292                 } else
293                         olck = NULL;
294         }
295         spin_unlock(&osc_ast_guard);
296         unlock_res_and_lock(dlm_lock);
297         return olck;
298 }
299
300 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
301 {
302         struct cl_lock *lock;
303
304         lock = olck->ols_cl.cls_lock;
305         lu_ref_del(&lock->cll_reference, "ast", cfs_current());
306         cl_lock_put(env, lock);
307 }
308
309 /**
310  * Updates object attributes from a lock value block (lvb) received together
311  * with the DLM lock reply from the server. Copy of osc_update_enqueue()
312  * logic.
313  *
314  * This can be optimized to not update attributes when lock is a result of a
315  * local match.
316  *
317  * Called under lock and resource spin-locks.
318  */
319 static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
320                                 int rc)
321 {
322         struct ost_lvb    *lvb;
323         struct cl_object  *obj;
324         struct lov_oinfo  *oinfo;
325         struct cl_attr    *attr;
326         unsigned           valid;
327
328         ENTRY;
329
330         if (!(olck->ols_flags & LDLM_FL_LVB_READY)) {
331                 EXIT;
332                 return;
333         }
334
335         lvb   = &olck->ols_lvb;
336         obj   = olck->ols_cl.cls_obj;
337         oinfo = cl2osc(obj)->oo_oinfo;
338         attr  = &osc_env_info(env)->oti_attr;
339         valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
340         cl_lvb2attr(attr, lvb);
341
342         cl_object_attr_lock(obj);
343         if (rc == 0) {
344                 struct ldlm_lock  *dlmlock;
345                 __u64 size;
346
347                 dlmlock = olck->ols_lock;
348                 LASSERT(dlmlock != NULL);
349
350                 /* re-grab LVB from a dlm lock under DLM spin-locks. */
351                 *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
352                 size = lvb->lvb_size;
353                 /* Extend KMS up to the end of this lock and no further
354                  * A lock on [x,y] means a KMS of up to y + 1 bytes! */
355                 if (size > dlmlock->l_policy_data.l_extent.end)
356                         size = dlmlock->l_policy_data.l_extent.end + 1;
357                 if (size >= oinfo->loi_kms) {
358                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
359                                    ", kms="LPU64, lvb->lvb_size, size);
360                         valid |= CAT_KMS;
361                         attr->cat_kms = size;
362                 } else {
363                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
364                                    LPU64"; leaving kms="LPU64", end="LPU64,
365                                    lvb->lvb_size, oinfo->loi_kms,
366                                    dlmlock->l_policy_data.l_extent.end);
367                 }
368                 ldlm_lock_allow_match_locked(dlmlock);
369         } else if (rc == -ENAVAIL && olck->ols_glimpse) {
370                 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
371                        " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
372         } else
373                 valid = 0;
374
375         if (valid != 0)
376                 cl_object_attr_set(env, obj, attr, valid);
377
378         cl_object_attr_unlock(obj);
379
380         EXIT;
381 }
382
383 /**
384  * Called when a lock is granted, from an upcall (when server returned a
385  * granted lock), or from completion AST, when server returned a blocked lock.
386  *
387  * Called under lock and resource spin-locks, that are released temporarily
388  * here.
389  */
390 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
391                              struct ldlm_lock *dlmlock, int rc)
392 {
393         struct ldlm_extent   *ext;
394         struct cl_lock       *lock;
395         struct cl_lock_descr *descr;
396
397         LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
398
399         ENTRY;
400         if (olck->ols_state != OLS_GRANTED) {
401                 lock  = olck->ols_cl.cls_lock;
402                 ext   = &dlmlock->l_policy_data.l_extent;
403                 descr = &osc_env_info(env)->oti_descr;
404                 descr->cld_obj = lock->cll_descr.cld_obj;
405
406                 /* XXX check that ->l_granted_mode is valid. */
407                 descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
408                 descr->cld_start = cl_index(descr->cld_obj, ext->start);
409                 descr->cld_end   = cl_index(descr->cld_obj, ext->end);
410                 descr->cld_gid   = ext->gid;
411                 /*
412                  * tell upper layers the extent of the lock that was actually
413                  * granted
414                  */
415                 olck->ols_state = OLS_GRANTED;
416                 osc_lock_lvb_update(env, olck, rc);
417
418                 /* release DLM spin-locks to allow cl_lock_{modify,signal}()
419                  * to take a semaphore on a parent lock. This is safe, because
420                  * spin-locks are needed to protect consistency of
421                  * dlmlock->l_*_mode and LVB, and we have finished processing
422                  * them. */
423                 unlock_res_and_lock(dlmlock);
424                 cl_lock_modify(env, lock, descr);
425                 cl_lock_signal(env, lock);
426                 LINVRNT(osc_lock_invariant(olck));
427                 lock_res_and_lock(dlmlock);
428         }
429         EXIT;
430 }
431
432 static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
433
434 {
435         struct ldlm_lock *dlmlock;
436
437         ENTRY;
438
439         dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
440         LASSERT(dlmlock != NULL);
441
442         lock_res_and_lock(dlmlock);
443         spin_lock(&osc_ast_guard);
444         LASSERT(dlmlock->l_ast_data == olck);
445         LASSERT(olck->ols_lock == NULL);
446         olck->ols_lock = dlmlock;
447         spin_unlock(&osc_ast_guard);
448
449         /*
450          * Lock might be not yet granted. In this case, completion ast
451          * (osc_ldlm_completion_ast()) comes later and finishes lock
452          * granting.
453          */
454         if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
455                 osc_lock_granted(env, olck, dlmlock, 0);
456         unlock_res_and_lock(dlmlock);
457
458         /*
459          * osc_enqueue_interpret() decrefs asynchronous locks, counter
460          * this.
461          */
462         ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
463         olck->ols_hold = olck->ols_has_ref = 1;
464
465         /* lock reference taken by ldlm_handle2lock_long() is owned by
466          * osc_lock and released in osc_lock_detach() */
467         lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
468 }
469
470 /**
471  * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
472  * received from a server, or after osc_enqueue_base() matched a local DLM
473  * lock.
474  */
475 static int osc_lock_upcall(void *cookie, int errcode)
476 {
477         struct osc_lock         *olck  = cookie;
478         struct cl_lock_slice    *slice = &olck->ols_cl;
479         struct cl_lock          *lock  = slice->cls_lock;
480         struct lu_env           *env;
481         struct cl_env_nest       nest;
482
483         ENTRY;
484         env = cl_env_nested_get(&nest);
485         if (!IS_ERR(env)) {
486                 int rc;
487
488                 cl_lock_mutex_get(env, lock);
489
490                 LASSERT(lock->cll_state >= CLS_QUEUING);
491                 if (olck->ols_state == OLS_ENQUEUED) {
492                         olck->ols_state = OLS_UPCALL_RECEIVED;
493                         rc = ldlm_error2errno(errcode);
494                 } else if (olck->ols_state == OLS_CANCELLED) {
495                         rc = -EIO;
496                 } else {
497                         CERROR("Impossible state: %i\n", olck->ols_state);
498                         LBUG();
499                 }
500                 if (rc) {
501                         struct ldlm_lock *dlmlock;
502
503                         dlmlock = ldlm_handle2lock(&olck->ols_handle);
504                         if (dlmlock != NULL) {
505                                 lock_res_and_lock(dlmlock);
506                                 spin_lock(&osc_ast_guard);
507                                 LASSERT(olck->ols_lock == NULL);
508                                 dlmlock->l_ast_data = NULL;
509                                 olck->ols_handle.cookie = 0ULL;
510                                 spin_unlock(&osc_ast_guard);
511                                 unlock_res_and_lock(dlmlock);
512                                 LDLM_LOCK_PUT(dlmlock);
513                         }
514                 } else {
515                         if (olck->ols_glimpse)
516                                 olck->ols_glimpse = 0;
517                         osc_lock_upcall0(env, olck);
518                 }
519
520                 /* Error handling, some errors are tolerable. */
521                 if (olck->ols_locklessable && rc == -EUSERS) {
522                         /* This is a tolerable error, turn this lock into
523                          * lockless lock.
524                          */
525                         osc_object_set_contended(cl2osc(slice->cls_obj));
526                         LASSERT(slice->cls_ops == &osc_lock_ops);
527
528                         /* Change this lock to ldlmlock-less lock. */
529                         osc_lock_to_lockless(env, olck, 1);
530                         olck->ols_state = OLS_GRANTED;
531                         rc = 0;
532                 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
533                         osc_lock_lvb_update(env, olck, rc);
534                         cl_lock_delete(env, lock);
535                         /* Hide the error. */
536                         rc = 0;
537                 }
538
539                 if (rc == 0)
540                         /* on error, lock was signaled by cl_lock_error() */
541                         cl_lock_signal(env, lock);
542                 else
543                         cl_lock_error(env, lock, rc);
544
545                 cl_lock_mutex_put(env, lock);
546
547                 /* release cookie reference, acquired by osc_lock_enqueue() */
548                 lu_ref_del(&lock->cll_reference, "upcall", lock);
549                 cl_lock_put(env, lock);
550                 cl_env_nested_put(&nest, env);
551         } else
552                 /* should never happen, similar to osc_ldlm_blocking_ast(). */
553                 LBUG();
554         RETURN(errcode);
555 }
556
557 /**
558  * Core of osc_dlm_blocking_ast() logic.
559  */
560 static void osc_lock_blocking(const struct lu_env *env,
561                               struct ldlm_lock *dlmlock,
562                               struct osc_lock *olck, int blocking)
563 {
564         struct cl_lock *lock = olck->ols_cl.cls_lock;
565
566         LASSERT(olck->ols_lock == dlmlock);
567         CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
568         LASSERT(!osc_lock_is_lockless(olck));
569
570         if (olck->ols_hold)
571                 /*
572                  * Lock might be still addref-ed here, if e.g., blocking ast
573                  * is sent for a failed lock.
574                  */
575                 osc_lock_unuse(env, &olck->ols_cl);
576
577         if (blocking && olck->ols_state < OLS_BLOCKED)
578                 /*
579                  * Move osc_lock into OLS_BLOCKED before canceling the lock,
580                  * because it recursively re-enters osc_lock_blocking(), with
581                  * the state set to OLS_CANCELLED.
582                  */
583                 olck->ols_state = OLS_BLOCKED;
584         /*
585          * cancel and destroy lock at least once no matter how blocking ast is
586          * entered (see comment above osc_ldlm_blocking_ast() for use
587          * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
588          */
589         cl_lock_cancel(env, lock);
590         cl_lock_delete(env, lock);
591 }
592
593 /**
594  * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
595  * and ldlm_lock caches.
596  */
597 static int osc_dlm_blocking_ast0(const struct lu_env *env,
598                                  struct ldlm_lock *dlmlock,
599                                  void *data, int flag)
600 {
601         struct osc_lock *olck;
602         struct cl_lock  *lock;
603         int result;
604         int cancel;
605
606         LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
607
608         cancel = 0;
609         olck = osc_ast_data_get(dlmlock);
610         if (olck != NULL) {
611                 lock = olck->ols_cl.cls_lock;
612                 cl_lock_mutex_get(env, lock);
613                 LINVRNT(osc_lock_invariant(olck));
614                 if (olck->ols_ast_wait) {
615                         /* wake up osc_lock_use() */
616                         cl_lock_signal(env, lock);
617                         olck->ols_ast_wait = 0;
618                 }
619                 /*
620                  * Lock might have been canceled while this thread was
621                  * sleeping for lock mutex, but olck is pinned in memory.
622                  */
623                 if (olck == dlmlock->l_ast_data) {
624                         /*
625                          * NOTE: DLM sends blocking AST's for failed locks
626                          *       (that are still in pre-OLS_GRANTED state)
627                          *       too, and they have to be canceled otherwise
628                          *       DLM lock is never destroyed and stuck in
629                          *       the memory.
630                          *
631                          *       Alternatively, ldlm_cli_cancel() can be
632                          *       called here directly for osc_locks with
633                          *       ols_state < OLS_GRANTED to maintain an
634                          *       invariant that ->clo_cancel() is only called
635                          *       for locks that were granted.
636                          */
637                         LASSERT(data == olck);
638                         osc_lock_blocking(env, dlmlock,
639                                           olck, flag == LDLM_CB_BLOCKING);
640                 } else
641                         cancel = 1;
642                 cl_lock_mutex_put(env, lock);
643                 osc_ast_data_put(env, olck);
644         } else
645                 /*
646                  * DLM lock exists, but there is no cl_lock attached to it.
647                  * This is a `normal' race. cl_object and its cl_lock's can be
648                  * removed by memory pressure, together with all pages.
649                  */
650                 cancel = (flag == LDLM_CB_BLOCKING);
651
652         if (cancel) {
653                 struct lustre_handle *lockh;
654
655                 lockh = &osc_env_info(env)->oti_handle;
656                 ldlm_lock2handle(dlmlock, lockh);
657                 result = ldlm_cli_cancel(lockh);
658         } else
659                 result = 0;
660         return result;
661 }
662
663 /**
664  * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
665  * some other lock, or is canceled. This function is installed as a
666  * ldlm_lock::l_blocking_ast() for client extent locks.
667  *
668  * Control flow is tricky, because ldlm uses the same call-back
669  * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
670  *
671  * \param dlmlock lock for which ast occurred.
672  *
673  * \param new description of a conflicting lock in case of blocking ast.
674  *
675  * \param data value of dlmlock->l_ast_data
676  *
677  * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
678  *             cancellation and blocking ast's.
679  *
680  * Possible use cases:
681  *
682  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
683  *       lock due to lock lru pressure, or explicit user request to purge
684  *       locks.
685  *
686  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
687  *       us that dlmlock conflicts with another lock that some client is
688  *       enqueing. Lock is canceled.
689  *
690  *           - cl_lock_cancel() is called. osc_lock_cancel() calls
691  *             ldlm_cli_cancel() that calls
692  *
693  *                  dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
694  *
695  *             recursively entering osc_ldlm_blocking_ast().
696  *
697  *     - client cancels lock voluntary (e.g., as a part of early cancellation):
698  *
699  *           cl_lock_cancel()->
700  *             osc_lock_cancel()->
701  *               ldlm_cli_cancel()->
702  *                 dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
703  *
704  */
705 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
706                                  struct ldlm_lock_desc *new, void *data,
707                                  int flag)
708 {
709         struct lu_env     *env;
710         struct cl_env_nest nest;
711         int                result;
712
713         /*
714          * This can be called in the context of outer IO, e.g.,
715          *
716          *     cl_enqueue()->...
717          *       ->osc_enqueue_base()->...
718          *         ->ldlm_prep_elc_req()->...
719          *           ->ldlm_cancel_callback()->...
720          *             ->osc_ldlm_blocking_ast()
721          *
722          * new environment has to be created to not corrupt outer context.
723          */
724         env = cl_env_nested_get(&nest);
725         if (!IS_ERR(env)) {
726                 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
727                 cl_env_nested_put(&nest, env);
728         } else {
729                 result = PTR_ERR(env);
730                 /*
731                  * XXX This should never happen, as cl_lock is
732                  * stuck. Pre-allocated environment a la vvp_inode_fini_env
733                  * should be used.
734                  */
735                 LBUG();
736         }
737         if (result != 0) {
738                 if (result == -ENODATA)
739                         result = 0;
740                 else
741                         CERROR("BAST failed: %d\n", result);
742         }
743         return result;
744 }
745
746 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
747                                    int flags, void *data)
748 {
749         struct cl_env_nest nest;
750         struct lu_env     *env;
751         struct osc_lock   *olck;
752         struct cl_lock    *lock;
753         int result;
754         int dlmrc;
755
756         /* first, do dlm part of the work */
757         dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
758         /* then, notify cl_lock */
759         env = cl_env_nested_get(&nest);
760         if (!IS_ERR(env)) {
761                 olck = osc_ast_data_get(dlmlock);
762                 if (olck != NULL) {
763                         lock = olck->ols_cl.cls_lock;
764                         cl_lock_mutex_get(env, lock);
765                         /*
766                          * ldlm_handle_cp_callback() copied LVB from request
767                          * to lock->l_lvb_data, store it in osc_lock.
768                          */
769                         LASSERT(dlmlock->l_lvb_data != NULL);
770                         lock_res_and_lock(dlmlock);
771                         olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
772                         if (olck->ols_lock == NULL)
773                                 /*
774                                  * upcall (osc_lock_upcall()) hasn't yet been
775                                  * called. Do nothing now, upcall will bind
776                                  * olck to dlmlock and signal the waiters.
777                                  *
778                                  * This maintains an invariant that osc_lock
779                                  * and ldlm_lock are always bound when
780                                  * osc_lock is in OLS_GRANTED state.
781                                  */
782                                 ;
783                         else if (dlmlock->l_granted_mode != LCK_MINMODE)
784                                 osc_lock_granted(env, olck, dlmlock, dlmrc);
785                         unlock_res_and_lock(dlmlock);
786                         if (dlmrc != 0)
787                                 cl_lock_error(env, lock, dlmrc);
788                         cl_lock_mutex_put(env, lock);
789                         osc_ast_data_put(env, olck);
790                         result = 0;
791                 } else
792                         result = -ELDLM_NO_LOCK_DATA;
793                 cl_env_nested_put(&nest, env);
794         } else
795                 result = PTR_ERR(env);
796         return dlmrc ?: result;
797 }
798
799 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
800 {
801         struct ptlrpc_request  *req  = data;
802         struct osc_lock        *olck;
803         struct cl_lock         *lock;
804         struct cl_object       *obj;
805         struct cl_env_nest      nest;
806         struct lu_env          *env;
807         struct ost_lvb         *lvb;
808         struct req_capsule     *cap;
809         int                     result;
810
811         LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
812
813         env = cl_env_nested_get(&nest);
814         if (!IS_ERR(env)) {
815                 /*
816                  * osc_ast_data_get() has to go after environment is
817                  * allocated, because osc_ast_data() acquires a
818                  * reference to a lock, and it can only be released in
819                  * environment.
820                  */
821                 olck = osc_ast_data_get(dlmlock);
822                 if (olck != NULL) {
823                         lock = olck->ols_cl.cls_lock;
824                         cl_lock_mutex_get(env, lock);
825                         cap = &req->rq_pill;
826                         req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
827                         req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
828                                              sizeof *lvb);
829                         result = req_capsule_server_pack(cap);
830                         if (result == 0) {
831                                 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
832                                 obj = lock->cll_descr.cld_obj;
833                                 result = cl_object_glimpse(env, obj, lvb);
834                         }
835                         cl_lock_mutex_put(env, lock);
836                         osc_ast_data_put(env, olck);
837                 } else {
838                         /*
839                          * These errors are normal races, so we don't want to
840                          * fill the console with messages by calling
841                          * ptlrpc_error()
842                          */
843                         lustre_pack_reply(req, 1, NULL, NULL);
844                         result = -ELDLM_NO_LOCK_DATA;
845                 }
846                 cl_env_nested_put(&nest, env);
847         } else
848                 result = PTR_ERR(env);
849         req->rq_status = result;
850         return result;
851 }
852
853 static unsigned long osc_lock_weigh(const struct lu_env *env,
854                                     const struct cl_lock_slice *slice)
855 {
856         /*
857          * don't need to grab coh_page_guard since we don't care the exact #
858          * of pages..
859          */
860         return cl_object_header(slice->cls_obj)->coh_pages;
861 }
862
863 /**
864  * Get the weight of dlm lock for early cancellation.
865  *
866  * XXX: it should return the pages covered by this \a dlmlock.
867  */
868 static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
869 {
870         struct cl_env_nest       nest;
871         struct lu_env           *env;
872         struct osc_lock         *lock;
873         struct cl_lock          *cll;
874         unsigned long            weight;
875         ENTRY;
876
877         might_sleep();
878         /*
879          * osc_ldlm_weigh_ast has a complex context since it might be called
880          * because of lock canceling, or from user's input. We have to make
881          * a new environment for it. Probably it is implementation safe to use
882          * the upper context because cl_lock_put don't modify environment
883          * variables. But in case of ..
884          */
885         env = cl_env_nested_get(&nest);
886         if (IS_ERR(env))
887                 /* Mostly because lack of memory, tend to eliminate this lock*/
888                 RETURN(0);
889
890         LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
891         lock = osc_ast_data_get(dlmlock);
892         if (lock == NULL) {
893                 /* cl_lock was destroyed because of memory pressure.
894                  * It is much reasonable to assign this type of lock
895                  * a lower cost.
896                  */
897                 GOTO(out, weight = 0);
898         }
899
900         cll = lock->ols_cl.cls_lock;
901         cl_lock_mutex_get(env, cll);
902         weight = cl_lock_weigh(env, cll);
903         cl_lock_mutex_put(env, cll);
904         osc_ast_data_put(env, lock);
905         EXIT;
906
907 out:
908         cl_env_nested_put(&nest, env);
909         return weight;
910 }
911
912 static void osc_lock_build_einfo(const struct lu_env *env,
913                                  const struct cl_lock *clock,
914                                  struct osc_lock *lock,
915                                  struct ldlm_enqueue_info *einfo)
916 {
917         enum cl_lock_mode mode;
918
919         mode = clock->cll_descr.cld_mode;
920         if (mode == CLM_PHANTOM)
921                 /*
922                  * For now, enqueue all glimpse locks in read mode. In the
923                  * future, client might choose to enqueue LCK_PW lock for
924                  * glimpse on a file opened for write.
925                  */
926                 mode = CLM_READ;
927
928         einfo->ei_type   = LDLM_EXTENT;
929         einfo->ei_mode   = osc_cl_lock2ldlm(mode);
930         einfo->ei_cb_bl  = osc_ldlm_blocking_ast;
931         einfo->ei_cb_cp  = osc_ldlm_completion_ast;
932         einfo->ei_cb_gl  = osc_ldlm_glimpse_ast;
933         einfo->ei_cb_wg  = osc_ldlm_weigh_ast;
934         einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
935 }
936
937 static int osc_lock_delete0(struct cl_lock *conflict)
938 {
939         struct cl_env_nest    nest;
940         struct lu_env        *env;
941         int    rc = 0;        
942
943         env = cl_env_nested_get(&nest);
944         if (!IS_ERR(env)) {
945                 cl_lock_delete(env, conflict);
946                 cl_env_nested_put(&nest, env);
947         } else
948                 rc = PTR_ERR(env);
949         return rc; 
950 }
951 /**
952  * Cancels \a conflict lock and waits until it reached CLS_FREEING state. This
953  * is called as a part of enqueuing to cancel conflicting locks early.
954  *
955  * \retval            0: success, \a conflict was cancelled and destroyed.
956  *
957  * \retval   CLO_REPEAT: \a conflict was cancelled, but \a lock mutex was
958  *                       released in the process. Repeat enqueing.
959  *
960  * \retval -EWOULDBLOCK: \a conflict cannot be cancelled immediately, and
961  *                       either \a lock is non-blocking, or current thread
962  *                       holds other locks, that prevent it from waiting
963  *                       for cancel to complete.
964  *
965  * \retval          -ve: other error, including -EINTR.
966  *
967  */
968 static int osc_lock_cancel_wait(const struct lu_env *env, struct cl_lock *lock,
969                                 struct cl_lock *conflict, int canwait)
970 {
971         int rc;
972
973         LASSERT(cl_lock_is_mutexed(lock));
974         LASSERT(cl_lock_is_mutexed(conflict));
975
976         rc = 0;
977         if (conflict->cll_state != CLS_FREEING) {
978                 cl_lock_cancel(env, conflict);
979                 rc = osc_lock_delete0(conflict);
980                 if (rc)
981                         return rc; 
982                 if (conflict->cll_flags & (CLF_CANCELPEND|CLF_DOOMED)) {
983                         rc = -EWOULDBLOCK;
984                         if (cl_lock_nr_mutexed(env) > 2)
985                                 /*
986                                  * If mutices of locks other than @lock and
987                                  * @scan are held by the current thread, it
988                                  * cannot wait on @scan state change in a
989                                  * dead-lock safe matter, so simply skip early
990                                  * cancellation in this case.
991                                  *
992                                  * This means that early cancellation doesn't
993                                  * work when there is even slight mutex
994                                  * contention, as top-lock's mutex is usually
995                                  * held at this time.
996                                  */
997                                 ;
998                         else if (canwait) {
999                                 /* Waiting for @scan to be destroyed */
1000                                 cl_lock_mutex_put(env, lock);
1001                                 do {
1002                                         rc = cl_lock_state_wait(env, conflict);
1003                                 } while (!rc &&
1004                                          conflict->cll_state < CLS_FREEING);
1005                                 /* mutex was released, repeat enqueue. */
1006                                 rc = rc ?: CLO_REPEAT;
1007                                 cl_lock_mutex_get(env, lock);
1008                         }
1009                 }
1010                 LASSERT(ergo(!rc, conflict->cll_state == CLS_FREEING));
1011                 CDEBUG(D_INFO, "lock %p was %s freed now, rc (%d)\n",
1012                        conflict, rc ? "not":"", rc);
1013         }
1014         return rc;
1015 }
1016
1017 /**
1018  * Determine if the lock should be converted into a lockless lock.
1019  *
1020  * Steps to check:
1021  * - if the lock has an explicite requirment for a non-lockless lock;
1022  * - if the io lock request type ci_lockreq;
1023  * - send the enqueue rpc to ost to make the further decision;
1024  * - special treat to truncate lockless lock
1025  *
1026  *  Additional policy can be implemented here, e.g., never do lockless-io
1027  *  for large extents.
1028  */
1029 static void osc_lock_to_lockless(const struct lu_env *env,
1030                                  struct osc_lock *ols, int force)
1031 {
1032         struct cl_lock_slice *slice = &ols->ols_cl;
1033         struct cl_lock *lock        = slice->cls_lock;
1034
1035         LASSERT(ols->ols_state == OLS_NEW ||
1036                 ols->ols_state == OLS_UPCALL_RECEIVED);
1037
1038         if (force) {
1039                 ols->ols_locklessable = 1;
1040                 LASSERT(cl_lock_is_mutexed(lock));
1041                 slice->cls_ops = &osc_lock_lockless_ops;
1042         } else {
1043                 struct osc_io *oio     = osc_env_io(env);
1044                 struct cl_io  *io      = oio->oi_cl.cis_io;
1045                 struct cl_object *obj  = slice->cls_obj;
1046                 struct osc_object *oob = cl2osc(obj);
1047                 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
1048                 struct obd_connect_data *ocd;
1049
1050                 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
1051                         io->ci_lockreq == CILR_MAYBE ||
1052                         io->ci_lockreq == CILR_NEVER);
1053
1054                 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
1055                 ols->ols_locklessable = (io->ci_type != CIT_TRUNC) &&
1056                                 (io->ci_lockreq == CILR_MAYBE) &&
1057                                 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
1058                 if (io->ci_lockreq == CILR_NEVER ||
1059                         /* lockless IO */
1060                     (ols->ols_locklessable && osc_object_is_contended(oob)) ||
1061                         /* lockless truncate */
1062                     (io->ci_type == CIT_TRUNC &&
1063                      (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
1064                       osd->od_lockless_truncate)) {
1065                         ols->ols_locklessable = 1;
1066                         slice->cls_ops = &osc_lock_lockless_ops;
1067                 }
1068         }
1069         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1070 }
1071
1072 /**
1073  * Cancel all conflicting locks and wait for them to be destroyed.
1074  *
1075  * This function is used for two purposes:
1076  *
1077  *     - early cancel all conflicting locks before starting IO, and
1078  *
1079  *     - guarantee that pages added to the page cache by lockless IO are never
1080  *       covered by locks other than lockless IO lock, and, hence, are not
1081  *       visible to other threads.
1082  */
1083 static int osc_lock_enqueue_wait(const struct lu_env *env,
1084                                  const struct osc_lock *olck)
1085 {
1086         struct cl_lock          *lock    = olck->ols_cl.cls_lock;
1087         struct cl_lock_descr    *descr   = &lock->cll_descr;
1088         struct cl_object_header *hdr     = cl_object_header(descr->cld_obj);
1089         struct cl_lock_closure  *closure = &osc_env_info(env)->oti_closure;
1090         struct cl_lock          *scan;
1091         struct cl_lock          *temp;
1092         int lockless                     = osc_lock_is_lockless(olck);
1093         int rc                           = 0;
1094         int canwait;
1095         int stop;
1096         ENTRY;
1097
1098         LASSERT(cl_lock_is_mutexed(lock));
1099         LASSERT(lock->cll_state == CLS_QUEUING);
1100
1101         /*
1102          * XXX This function could be sped up if we had asynchronous
1103          * cancellation.
1104          */
1105
1106         canwait =
1107                 !(olck->ols_flags & LDLM_FL_BLOCK_NOWAIT) &&
1108                 cl_lock_nr_mutexed(env) == 1;
1109         cl_lock_closure_init(env, closure, lock, canwait);
1110         spin_lock(&hdr->coh_lock_guard);
1111         list_for_each_entry_safe(scan, temp, &hdr->coh_locks, cll_linkage) {
1112                 if (scan == lock)
1113                         continue;
1114
1115                 if (scan->cll_state < CLS_QUEUING ||
1116                     scan->cll_state == CLS_FREEING ||
1117                     scan->cll_descr.cld_start > descr->cld_end ||
1118                     scan->cll_descr.cld_end < descr->cld_start)
1119                         continue;
1120
1121                 /* overlapped and living locks. */
1122
1123                 /* We're not supposed to give up group lock. */
1124                 if (scan->cll_descr.cld_mode == CLM_GROUP) {
1125                         LASSERT(descr->cld_mode != CLM_GROUP ||
1126                                 descr->cld_gid != scan->cll_descr.cld_gid);
1127                         continue;
1128                 }
1129
1130                 /* A tricky case for lockless pages:
1131                  * We need to cancel the compatible locks if we're enqueuing
1132                  * a lockless lock, for example:
1133                  * imagine that client has PR lock on [0, 1000], and thread T0
1134                  * is doing lockless IO in [500, 1500] region. Concurrent
1135                  * thread T1 can see lockless data in [500, 1000], which is
1136                  * wrong, because these data are possibly stale.
1137                  */
1138                 if (!lockless && cl_lock_compatible(scan, lock))
1139                         continue;
1140
1141                 /* Now @scan is conflicting with @lock, this means current
1142                  * thread have to sleep for @scan being destroyed. */
1143                 cl_lock_get_trust(scan);
1144                 if (&temp->cll_linkage != &hdr->coh_locks)
1145                         cl_lock_get_trust(temp);
1146                 spin_unlock(&hdr->coh_lock_guard);
1147                 lu_ref_add(&scan->cll_reference, "cancel-wait", lock);
1148
1149                 LASSERT(list_empty(&closure->clc_list));
1150                 rc = cl_lock_closure_build(env, scan, closure);
1151                 if (rc == 0) {
1152                         rc = osc_lock_cancel_wait(env, lock, scan, canwait);
1153                         cl_lock_disclosure(env, closure);
1154                         if (rc == -EWOULDBLOCK)
1155                                 rc = 0;
1156                 }
1157                 if (rc == CLO_REPEAT && !canwait)
1158                         /* cannot wait... no early cancellation. */
1159                         rc = 0;
1160
1161                 lu_ref_del(&scan->cll_reference, "cancel-wait", lock);
1162                 cl_lock_put(env, scan);
1163                 spin_lock(&hdr->coh_lock_guard);
1164                 /*
1165                  * Lock list could have been modified, while spin-lock was
1166                  * released. Check that it is safe to continue.
1167                  */
1168                 stop = list_empty(&temp->cll_linkage);
1169                 if (&temp->cll_linkage != &hdr->coh_locks)
1170                         cl_lock_put(env, temp);
1171                 if (stop || rc != 0)
1172                         break;
1173         }
1174         spin_unlock(&hdr->coh_lock_guard);
1175         cl_lock_closure_fini(closure);
1176         RETURN(rc);
1177 }
1178
1179 /**
1180  * Deadlock avoidance for osc_lock_enqueue(). Consider following scenario:
1181  *
1182  *     - Thread0: obtains PR:[0, 10]. Lock is busy.
1183  *
1184  *     - Thread1: enqueues PW:[5, 50]. Blocking ast is sent to
1185  *       PR:[0, 10], but cancellation of busy lock is postponed.
1186  *
1187  *     - Thread0: enqueue PR:[30, 40]. Lock is locally matched to
1188  *       PW:[5, 50], and thread0 waits for the lock completion never
1189  *       releasing PR:[0, 10]---deadlock.
1190  *
1191  * The second PR lock can be glimpse (it is to deal with that situation that
1192  * ll_glimpse_size() has second argument, preventing local match of
1193  * not-yet-granted locks, see bug 10295). Similar situation is possible in the
1194  * case of memory mapped user level buffer.
1195  *
1196  * To prevent this we can detect a situation when current "thread" or "io"
1197  * already holds a lock on this object and either add LDLM_FL_BLOCK_GRANTED to
1198  * the ols->ols_flags, or prevent local match with PW locks.
1199  */
1200 static int osc_deadlock_is_possible(const struct lu_env *env,
1201                                     struct cl_lock *lock)
1202 {
1203         struct cl_object        *obj;
1204         struct cl_object_header *head;
1205         struct cl_lock          *scan;
1206         struct osc_io           *oio;
1207
1208         int result;
1209
1210         ENTRY;
1211
1212         LASSERT(cl_lock_is_mutexed(lock));
1213
1214         oio  = osc_env_io(env);
1215         obj  = lock->cll_descr.cld_obj;
1216         head = cl_object_header(obj);
1217
1218         result = 0;
1219         spin_lock(&head->coh_lock_guard);
1220         list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1221                 if (scan != lock) {
1222                         struct osc_lock *oscan;
1223
1224                         oscan = osc_lock_at(scan);
1225                         LASSERT(oscan != NULL);
1226                         if (oscan->ols_owner == oio) {
1227                                 result = 1;
1228                                 break;
1229                         }
1230                 }
1231         }
1232         spin_unlock(&head->coh_lock_guard);
1233         RETURN(result);
1234 }
1235
1236 /**
1237  * Implementation of cl_lock_operations::clo_enqueue() method for osc
1238  * layer. This initiates ldlm enqueue:
1239  *
1240  *     - checks for possible dead-lock conditions (osc_deadlock_is_possible());
1241  *
1242  *     - cancels conflicting locks early (osc_lock_enqueue_wait());
1243  *
1244  *     - calls osc_enqueue_base() to do actual enqueue.
1245  *
1246  * osc_enqueue_base() is supplied with an upcall function that is executed
1247  * when lock is received either after a local cached ldlm lock is matched, or
1248  * when a reply from the server is received.
1249  *
1250  * This function does not wait for the network communication to complete.
1251  */
1252 static int osc_lock_enqueue(const struct lu_env *env,
1253                             const struct cl_lock_slice *slice,
1254                             struct cl_io *_, __u32 enqflags)
1255 {
1256         struct osc_lock          *ols     = cl2osc_lock(slice);
1257         struct cl_lock           *lock    = ols->ols_cl.cls_lock;
1258         struct osc_object        *obj     = cl2osc(slice->cls_obj);
1259         struct osc_thread_info   *info    = osc_env_info(env);
1260         struct ldlm_res_id       *resname = &info->oti_resname;
1261         ldlm_policy_data_t       *policy  = &info->oti_policy;
1262         struct ldlm_enqueue_info *einfo   = &ols->ols_einfo;
1263         int result;
1264         ENTRY;
1265
1266         LASSERT(cl_lock_is_mutexed(lock));
1267         LASSERT(lock->cll_state == CLS_QUEUING);
1268         LASSERT(ols->ols_state == OLS_NEW);
1269
1270         osc_lock_build_res(env, obj, resname);
1271         osc_lock_build_policy(env, lock, policy);
1272         ols->ols_flags = osc_enq2ldlm_flags(enqflags);
1273         if (osc_deadlock_is_possible(env, lock))
1274                 ols->ols_flags |= LDLM_FL_BLOCK_GRANTED;
1275         if (ols->ols_flags & LDLM_FL_HAS_INTENT)
1276                 ols->ols_glimpse = 1;
1277
1278         result = osc_lock_enqueue_wait(env, ols);
1279         if (result == 0) {
1280                 if (!(enqflags & CEF_MUST))
1281                         /* try to convert this lock to a lockless lock */
1282                         osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
1283                 if (!osc_lock_is_lockless(ols)) {
1284                         if (ols->ols_locklessable)
1285                                 ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1286
1287                         /* a reference for lock, passed as an upcall cookie */
1288                         cl_lock_get(lock);
1289                         lu_ref_add(&lock->cll_reference, "upcall", lock);
1290                         ols->ols_state = OLS_ENQUEUED;
1291
1292                         /*
1293                          * XXX: this is possible blocking point as
1294                          * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
1295                          * LDLM_CP_CALLBACK.
1296                          */
1297                         result = osc_enqueue_base(osc_export(obj), resname,
1298                                           &ols->ols_flags, policy,
1299                                           &ols->ols_lvb,
1300                                           obj->oo_oinfo->loi_kms_valid,
1301                                           osc_lock_upcall,
1302                                           ols, einfo, &ols->ols_handle,
1303                                           PTLRPCD_SET, 1);
1304                         if (result != 0) {
1305                                 lu_ref_del(&lock->cll_reference,
1306                                            "upcall", lock);
1307                                 cl_lock_put(env, lock);
1308                         }
1309                 } else {
1310                         ols->ols_state = OLS_GRANTED;
1311                 }
1312         }
1313         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1314         RETURN(result);
1315 }
1316
1317 static int osc_lock_wait(const struct lu_env *env,
1318                          const struct cl_lock_slice *slice)
1319 {
1320         struct osc_lock *olck = cl2osc_lock(slice);
1321         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1322
1323         LINVRNT(osc_lock_invariant(olck));
1324         if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED)
1325                 return 0;
1326
1327         LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
1328                      lock->cll_error == 0, olck->ols_lock != NULL));
1329
1330         return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
1331 }
1332
1333 /**
1334  * An implementation of cl_lock_operations::clo_use() method that pins cached
1335  * lock.
1336  */
1337 static int osc_lock_use(const struct lu_env *env,
1338                         const struct cl_lock_slice *slice)
1339 {
1340         struct osc_lock *olck = cl2osc_lock(slice);
1341         int rc;
1342
1343         LASSERT(!olck->ols_hold);
1344         /*
1345          * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
1346          * flag is not set. This protects us from a concurrent blocking ast.
1347          */
1348         rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1349         if (rc == 0) {
1350                 olck->ols_hold = olck->ols_has_ref = 1;
1351                 olck->ols_state = OLS_GRANTED;
1352         } else {
1353                 struct cl_lock *lock;
1354
1355                 /*
1356                  * Lock is being cancelled somewhere within
1357                  * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
1358                  * set, but osc_ldlm_blocking_ast() hasn't yet acquired
1359                  * cl_lock mutex.
1360                  */
1361                 lock = slice->cls_lock;
1362                 LASSERT(lock->cll_state == CLS_CACHED);
1363                 LASSERT(lock->cll_users > 0);
1364                 /* set a flag for osc_dlm_blocking_ast0() to signal the
1365                  * lock.*/
1366                 olck->ols_ast_wait = 1;
1367                 rc = CLO_WAIT;
1368         }
1369         return rc;
1370 }
1371
1372 static int osc_lock_flush(struct osc_lock *ols, int discard)
1373 {
1374         struct cl_lock       *lock  = ols->ols_cl.cls_lock;
1375         struct cl_env_nest    nest;
1376         struct lu_env        *env;
1377         int result = 0;
1378
1379         env = cl_env_nested_get(&nest);
1380         if (!IS_ERR(env)) {
1381                 result = cl_lock_page_out(env, lock, discard);
1382                 cl_env_nested_put(&nest, env);
1383         } else
1384                 result = PTR_ERR(env);
1385         if (result == 0) {
1386                 ols->ols_flush = 1;
1387                 LINVRNT(!osc_lock_has_pages(ols));
1388         }
1389         return result;
1390 }
1391
1392 /**
1393  * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1394  * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1395  * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1396  * with some other lock some where in the cluster. This function does the
1397  * following:
1398  *
1399  *     - invalidates all pages protected by this lock (after sending dirty
1400  *       ones to the server, as necessary);
1401  *
1402  *     - decref's underlying ldlm lock;
1403  *
1404  *     - cancels ldlm lock (ldlm_cli_cancel()).
1405  */
1406 static void osc_lock_cancel(const struct lu_env *env,
1407                             const struct cl_lock_slice *slice)
1408 {
1409         struct cl_lock   *lock    = slice->cls_lock;
1410         struct osc_lock  *olck    = cl2osc_lock(slice);
1411         struct ldlm_lock *dlmlock = olck->ols_lock;
1412         int               result  = 0;
1413         int               discard;
1414
1415         LASSERT(cl_lock_is_mutexed(lock));
1416         LINVRNT(osc_lock_invariant(olck));
1417
1418         if (dlmlock != NULL) {
1419                 int do_cancel;
1420
1421                 discard = dlmlock->l_flags & LDLM_FL_DISCARD_DATA;
1422                 result = osc_lock_flush(olck, discard);
1423                 if (olck->ols_hold)
1424                         osc_lock_unuse(env, slice);
1425
1426                 lock_res_and_lock(dlmlock);
1427                 /* Now that we're the only user of dlm read/write reference,
1428                  * mostly the ->l_readers + ->l_writers should be zero.
1429                  * However, there is a corner case.
1430                  * See bug 18829 for details.*/
1431                 do_cancel = (dlmlock->l_readers == 0 &&
1432                              dlmlock->l_writers == 0);
1433                 dlmlock->l_flags |= LDLM_FL_CBPENDING;
1434                 unlock_res_and_lock(dlmlock);
1435                 if (do_cancel)
1436                         result = ldlm_cli_cancel(&olck->ols_handle);
1437                 if (result < 0)
1438                         CL_LOCK_DEBUG(D_ERROR, env, lock,
1439                                       "lock %p cancel failure with error(%d)\n",
1440                                       lock, result);
1441         }
1442         olck->ols_state = OLS_CANCELLED;
1443         osc_lock_detach(env, olck);
1444 }
1445
1446 void cl_lock_page_list_fixup(const struct lu_env *env,
1447                              struct cl_io *io, struct cl_lock *lock,
1448                              struct cl_page_list *queue);
1449
1450 #ifdef INVARIANT_CHECK
1451 /**
1452  * Returns true iff there are pages under \a olck not protected by other
1453  * locks.
1454  */
1455 static int osc_lock_has_pages(struct osc_lock *olck)
1456 {
1457         struct cl_lock       *lock;
1458         struct cl_lock_descr *descr;
1459         struct cl_object     *obj;
1460         struct osc_object    *oob;
1461         struct cl_page_list  *plist;
1462         struct cl_page       *page;
1463         struct cl_env_nest    nest;
1464         struct cl_io         *io;
1465         struct lu_env        *env;
1466         int                   result;
1467
1468         env = cl_env_nested_get(&nest);
1469         if (!IS_ERR(env)) {
1470                 obj   = olck->ols_cl.cls_obj;
1471                 oob   = cl2osc(obj);
1472                 io    = &oob->oo_debug_io;
1473                 lock  = olck->ols_cl.cls_lock;
1474                 descr = &lock->cll_descr;
1475                 plist = &osc_env_info(env)->oti_plist;
1476                 cl_page_list_init(plist);
1477
1478                 mutex_lock(&oob->oo_debug_mutex);
1479
1480                 io->ci_obj = cl_object_top(obj);
1481                 cl_io_init(env, io, CIT_MISC, io->ci_obj);
1482                 cl_page_gang_lookup(env, obj, io,
1483                                     descr->cld_start, descr->cld_end, plist);
1484                 cl_lock_page_list_fixup(env, io, lock, plist);
1485                 if (plist->pl_nr > 0) {
1486                         CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
1487                         cl_page_list_for_each(page, plist)
1488                                 CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
1489                 }
1490                 result = plist->pl_nr > 0;
1491                 cl_page_list_disown(env, io, plist);
1492                 cl_page_list_fini(env, plist);
1493                 cl_io_fini(env, io);
1494                 mutex_unlock(&oob->oo_debug_mutex);
1495                 cl_env_nested_put(&nest, env);
1496         } else
1497                 result = 0;
1498         return result;
1499 }
1500 #else
1501 static int osc_lock_has_pages(struct osc_lock *olck)
1502 {
1503         return 0;
1504 }
1505 #endif /* INVARIANT_CHECK */
1506
1507 static void osc_lock_delete(const struct lu_env *env,
1508                             const struct cl_lock_slice *slice)
1509 {
1510         struct osc_lock *olck;
1511
1512         olck = cl2osc_lock(slice);
1513         if (olck->ols_glimpse) {
1514                 LASSERT(!olck->ols_hold);
1515                 LASSERT(!olck->ols_lock);
1516                 return;
1517         }
1518
1519         LINVRNT(osc_lock_invariant(olck));
1520         LINVRNT(!osc_lock_has_pages(olck));
1521
1522         if (olck->ols_hold)
1523                 osc_lock_unuse(env, slice);
1524         osc_lock_detach(env, olck);
1525 }
1526
1527 /**
1528  * Implements cl_lock_operations::clo_state() method for osc layer.
1529  *
1530  * Maintains osc_lock::ols_owner field.
1531  *
1532  * This assumes that lock always enters CLS_HELD (from some other state) in
1533  * the same IO context as one that requested the lock. This should not be a
1534  * problem, because context is by definition shared by all activity pertaining
1535  * to the same high-level IO.
1536  */
1537 static void osc_lock_state(const struct lu_env *env,
1538                            const struct cl_lock_slice *slice,
1539                            enum cl_lock_state state)
1540 {
1541         struct osc_lock *lock = cl2osc_lock(slice);
1542         struct osc_io   *oio  = osc_env_io(env);
1543
1544         /*
1545          * XXX multiple io contexts can use the lock at the same time.
1546          */
1547         LINVRNT(osc_lock_invariant(lock));
1548         if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1549                 LASSERT(lock->ols_owner == NULL);
1550                 lock->ols_owner = oio;
1551         } else if (state != CLS_HELD)
1552                 lock->ols_owner = NULL;
1553 }
1554
1555 static int osc_lock_print(const struct lu_env *env, void *cookie,
1556                           lu_printer_t p, const struct cl_lock_slice *slice)
1557 {
1558         struct osc_lock *lock = cl2osc_lock(slice);
1559
1560         /*
1561          * XXX print ldlm lock and einfo properly.
1562          */
1563         (*p)(env, cookie, "%p %08x "LPU64" %d %p ",
1564              lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
1565              lock->ols_state, lock->ols_owner);
1566         osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1567         return 0;
1568 }
1569
1570 static const struct cl_lock_operations osc_lock_ops = {
1571         .clo_fini    = osc_lock_fini,
1572         .clo_enqueue = osc_lock_enqueue,
1573         .clo_wait    = osc_lock_wait,
1574         .clo_unuse   = osc_lock_unuse,
1575         .clo_use     = osc_lock_use,
1576         .clo_delete  = osc_lock_delete,
1577         .clo_state   = osc_lock_state,
1578         .clo_cancel  = osc_lock_cancel,
1579         .clo_weigh   = osc_lock_weigh,
1580         .clo_print   = osc_lock_print
1581 };
1582
1583 static int osc_lock_lockless_enqueue(const struct lu_env *env,
1584                                      const struct cl_lock_slice *slice,
1585                                      struct cl_io *_, __u32 enqflags)
1586 {
1587         LBUG();
1588         return 0;
1589 }
1590
1591 static int osc_lock_lockless_unuse(const struct lu_env *env,
1592                                    const struct cl_lock_slice *slice)
1593 {
1594         struct osc_lock *ols = cl2osc_lock(slice);
1595         struct cl_lock *lock = slice->cls_lock;
1596
1597         LASSERT(ols->ols_state == OLS_GRANTED);
1598         LINVRNT(osc_lock_invariant(ols));
1599
1600         cl_lock_cancel(env, lock);
1601         cl_lock_delete(env, lock);
1602         return 0;
1603 }
1604
1605 static void osc_lock_lockless_cancel(const struct lu_env *env,
1606                                      const struct cl_lock_slice *slice)
1607 {
1608         struct osc_lock   *ols  = cl2osc_lock(slice);
1609         int result;
1610
1611         result = osc_lock_flush(ols, 0);
1612         if (result)
1613                 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1614                        ols, result);
1615         ols->ols_state = OLS_CANCELLED;
1616 }
1617
1618 static int osc_lock_lockless_wait(const struct lu_env *env,
1619                                   const struct cl_lock_slice *slice)
1620 {
1621         struct osc_lock *olck = cl2osc_lock(slice);
1622         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1623
1624         LINVRNT(osc_lock_invariant(olck));
1625         LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1626
1627         return lock->cll_error;
1628 }
1629
1630 static void osc_lock_lockless_state(const struct lu_env *env,
1631                                     const struct cl_lock_slice *slice,
1632                                     enum cl_lock_state state)
1633 {
1634         struct osc_lock *lock = cl2osc_lock(slice);
1635         struct osc_io   *oio  = osc_env_io(env);
1636
1637         LINVRNT(osc_lock_invariant(lock));
1638         if (state == CLS_HELD) {
1639                 LASSERT(lock->ols_owner == NULL);
1640                 lock->ols_owner = oio;
1641
1642                 /* set the io to be lockless if this lock is for io's
1643                  * host object */
1644                 if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
1645                         oio->oi_lockless = 1;
1646         } else
1647                 lock->ols_owner = NULL;
1648 }
1649
1650 static int osc_lock_lockless_fits_into(const struct lu_env *env,
1651                                        const struct cl_lock_slice *slice,
1652                                        const struct cl_lock_descr *need,
1653                                        const struct cl_io *io)
1654 {
1655         return 0;
1656 }
1657
1658 static const struct cl_lock_operations osc_lock_lockless_ops = {
1659         .clo_fini      = osc_lock_fini,
1660         .clo_enqueue   = osc_lock_lockless_enqueue,
1661         .clo_wait      = osc_lock_lockless_wait,
1662         .clo_unuse     = osc_lock_lockless_unuse,
1663         .clo_state     = osc_lock_lockless_state,
1664         .clo_fits_into = osc_lock_lockless_fits_into,
1665         .clo_cancel    = osc_lock_lockless_cancel,
1666         .clo_print     = osc_lock_print
1667 };
1668
1669 int osc_lock_init(const struct lu_env *env,
1670                   struct cl_object *obj, struct cl_lock *lock,
1671                   const struct cl_io *_)
1672 {
1673         struct osc_lock *clk;
1674         int result;
1675
1676         OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO);
1677         if (clk != NULL) {
1678                 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
1679                 clk->ols_state = OLS_NEW;
1680                 cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
1681                 result = 0;
1682         } else
1683                 result = -ENOMEM;
1684         return result;
1685 }
1686
1687
1688 /** @} osc */