Whamcloud - gitweb
b=20500
[fs/lustre-release.git] / lustre / osc / osc_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for OSC layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_OSC
42
43 #ifdef __KERNEL__
44 # include <libcfs/libcfs.h>
45 #else
46 # include <liblustre.h>
47 #endif
48 /* fid_build_reg_res_name() */
49 #include <lustre_fid.h>
50
51 #include "osc_cl_internal.h"
52
53 /** \addtogroup osc 
54  *  @{ 
55  */
56
57 /*****************************************************************************
58  *
59  * Type conversions.
60  *
61  */
62
63 static const struct cl_lock_operations osc_lock_ops;
64 static const struct cl_lock_operations osc_lock_lockless_ops;
65 static void osc_lock_to_lockless(const struct lu_env *env,
66                                  struct osc_lock *ols, int force);
67 static int osc_lock_has_pages(struct osc_lock *olck);
68
69 int osc_lock_is_lockless(const struct osc_lock *olck)
70 {
71         return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
72 }
73
74 /**
75  * Returns a weak pointer to the ldlm lock identified by a handle. Returned
76  * pointer cannot be dereferenced, as lock is not protected from concurrent
77  * reclaim. This function is a helper for osc_lock_invariant().
78  */
79 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
80 {
81         struct ldlm_lock *lock;
82
83         lock = ldlm_handle2lock(handle);
84         if (lock != NULL)
85                 LDLM_LOCK_PUT(lock);
86         return lock;
87 }
88
89 /**
90  * Invariant that has to be true all of the time.
91  */
92 static int osc_lock_invariant(struct osc_lock *ols)
93 {
94         struct ldlm_lock *lock        = osc_handle_ptr(&ols->ols_handle);
95         struct ldlm_lock *olock       = ols->ols_lock;
96         int               handle_used = lustre_handle_is_used(&ols->ols_handle);
97
98         return
99                 ergo(osc_lock_is_lockless(ols),
100                      ols->ols_locklessable && ols->ols_lock == NULL)  ||
101                 (ergo(olock != NULL, handle_used) &&
102                  ergo(olock != NULL,
103                       olock->l_handle.h_cookie == ols->ols_handle.cookie) &&
104                  /*
105                   * Check that ->ols_handle and ->ols_lock are consistent, but
106                   * take into account that they are set at the different time.
107                   */
108                  ergo(handle_used,
109                       ergo(lock != NULL && olock != NULL, lock == olock) &&
110                       ergo(lock == NULL, olock == NULL)) &&
111                  ergo(ols->ols_state == OLS_CANCELLED,
112                       olock == NULL && !handle_used) &&
113                  /*
114                   * DLM lock is destroyed only after we have seen cancellation
115                   * ast.
116                   */
117                  ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
118                       !olock->l_destroyed) &&
119                  ergo(ols->ols_state == OLS_GRANTED,
120                       olock != NULL &&
121                       olock->l_req_mode == olock->l_granted_mode &&
122                       ols->ols_hold));
123 }
124
125 /*****************************************************************************
126  *
127  * Lock operations.
128  *
129  */
130
131 /**
132  * Breaks a link between osc_lock and dlm_lock.
133  */
134 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
135 {
136         struct ldlm_lock *dlmlock;
137
138         spin_lock(&osc_ast_guard);
139         dlmlock = olck->ols_lock;
140         if (dlmlock == NULL) {
141                 spin_unlock(&osc_ast_guard);
142                 return;
143         }
144
145         olck->ols_lock = NULL;
146         /* wb(); --- for all who checks (ols->ols_lock != NULL) before
147          * call to osc_lock_detach() */
148         dlmlock->l_ast_data = NULL;
149         olck->ols_handle.cookie = 0ULL;
150         spin_unlock(&osc_ast_guard);
151
152         lock_res_and_lock(dlmlock);
153         if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
154                 struct cl_object *obj = olck->ols_cl.cls_obj;
155                 struct cl_attr *attr  = &osc_env_info(env)->oti_attr;
156                 __u64 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
157
158                 /* Update the kms. Need to loop all granted locks.
159                  * Not a problem for the client */
160                 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
161                 unlock_res_and_lock(dlmlock);
162
163                 cl_object_attr_lock(obj);
164                 cl_object_attr_set(env, obj, attr, CAT_KMS);
165                 cl_object_attr_unlock(obj);
166         } else
167                 unlock_res_and_lock(dlmlock);
168
169         /* release a reference taken in osc_lock_upcall0(). */
170         lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
171         LDLM_LOCK_RELEASE(dlmlock);
172 }
173
174 static int osc_lock_unuse(const struct lu_env *env,
175                           const struct cl_lock_slice *slice)
176 {
177         struct osc_lock *ols = cl2osc_lock(slice);
178         int result;
179
180         LASSERT(ols->ols_state == OLS_GRANTED ||
181                 ols->ols_state == OLS_UPCALL_RECEIVED);
182         LINVRNT(osc_lock_invariant(ols));
183
184         if (ols->ols_glimpse) {
185                 LASSERT(ols->ols_hold == 0);
186                 return 0;
187         }
188         LASSERT(ols->ols_hold);
189
190         /*
191          * Move lock into OLS_RELEASED state before calling osc_cancel_base()
192          * so that possible synchronous cancellation (that always happens
193          * e.g., for liblustre) sees that lock is released.
194          */
195         ols->ols_state = OLS_RELEASED;
196         ols->ols_hold = 0;
197         result = osc_cancel_base(&ols->ols_handle, ols->ols_einfo.ei_mode);
198         ols->ols_has_ref = 0;
199         return result;
200 }
201
202 static void osc_lock_fini(const struct lu_env *env,
203                           struct cl_lock_slice *slice)
204 {
205         struct osc_lock  *ols = cl2osc_lock(slice);
206
207         LINVRNT(osc_lock_invariant(ols));
208         /*
209          * ->ols_hold can still be true at this point if, for example, a
210          * thread that requested a lock was killed (and released a reference
211          * to the lock), before reply from a server was received. In this case
212          * lock is destroyed immediately after upcall.
213          */
214         if (ols->ols_hold)
215                 osc_lock_unuse(env, slice);
216         LASSERT(ols->ols_lock == NULL);
217
218         OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
219 }
220
221 void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj,
222                         struct ldlm_res_id *resname)
223 {
224         const struct lu_fid *fid = lu_object_fid(&obj->oo_cl.co_lu);
225         if (0) {
226                 /*
227                  * In the perfect world of the future, where ost servers talk
228                  * idif-fids...
229                  */
230                 fid_build_reg_res_name(fid, resname);
231         } else {
232                 /*
233                  * In reality, where ost server expects ->lsm_object_id and
234                  * ->lsm_object_gr in rename.
235                  */
236                 osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_gr,
237                                    resname);
238         }
239 }
240
241 static void osc_lock_build_policy(const struct lu_env *env,
242                                   const struct cl_lock *lock,
243                                   ldlm_policy_data_t *policy)
244 {
245         const struct cl_lock_descr *d = &lock->cll_descr;
246
247         osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
248         policy->l_extent.gid = d->cld_gid;
249 }
250
251 static int osc_enq2ldlm_flags(__u32 enqflags)
252 {
253         int result = 0;
254
255         LASSERT((enqflags & ~CEF_MASK) == 0);
256
257         if (enqflags & CEF_NONBLOCK)
258                 result |= LDLM_FL_BLOCK_NOWAIT;
259         if (enqflags & CEF_ASYNC)
260                 result |= LDLM_FL_HAS_INTENT;
261         if (enqflags & CEF_DISCARD_DATA)
262                 result |= LDLM_AST_DISCARD_DATA;
263         return result;
264 }
265
266 /**
267  * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
268  * pointers. Initialized in osc_init().
269  */
270 spinlock_t osc_ast_guard;
271
272 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
273 {
274         struct osc_lock *olck;
275
276         lock_res_and_lock(dlm_lock);
277         spin_lock(&osc_ast_guard);
278         olck = dlm_lock->l_ast_data;
279         if (olck != NULL) {
280                 struct cl_lock *lock = olck->ols_cl.cls_lock;
281                 /*
282                  * If osc_lock holds a reference on ldlm lock, return it even
283                  * when cl_lock is in CLS_FREEING state. This way
284                  *
285                  *         osc_ast_data_get(dlmlock) == NULL
286                  *
287                  * guarantees that all osc references on dlmlock were
288                  * released. osc_dlm_blocking_ast0() relies on that.
289                  */
290                 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
291                         cl_lock_get_trust(lock);
292                         lu_ref_add_atomic(&lock->cll_reference,
293                                           "ast", cfs_current());
294                 } else
295                         olck = NULL;
296         }
297         spin_unlock(&osc_ast_guard);
298         unlock_res_and_lock(dlm_lock);
299         return olck;
300 }
301
302 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
303 {
304         struct cl_lock *lock;
305
306         lock = olck->ols_cl.cls_lock;
307         lu_ref_del(&lock->cll_reference, "ast", cfs_current());
308         cl_lock_put(env, lock);
309 }
310
311 /**
312  * Updates object attributes from a lock value block (lvb) received together
313  * with the DLM lock reply from the server. Copy of osc_update_enqueue()
314  * logic.
315  *
316  * This can be optimized to not update attributes when lock is a result of a
317  * local match.
318  *
319  * Called under lock and resource spin-locks.
320  */
321 static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
322                                 int rc)
323 {
324         struct ost_lvb    *lvb;
325         struct cl_object  *obj;
326         struct lov_oinfo  *oinfo;
327         struct cl_attr    *attr;
328         unsigned           valid;
329
330         ENTRY;
331
332         if (!(olck->ols_flags & LDLM_FL_LVB_READY)) {
333                 EXIT;
334                 return;
335         }
336
337         lvb   = &olck->ols_lvb;
338         obj   = olck->ols_cl.cls_obj;
339         oinfo = cl2osc(obj)->oo_oinfo;
340         attr  = &osc_env_info(env)->oti_attr;
341         valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
342         cl_lvb2attr(attr, lvb);
343
344         cl_object_attr_lock(obj);
345         if (rc == 0) {
346                 struct ldlm_lock  *dlmlock;
347                 __u64 size;
348
349                 dlmlock = olck->ols_lock;
350                 LASSERT(dlmlock != NULL);
351
352                 /* re-grab LVB from a dlm lock under DLM spin-locks. */
353                 *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
354                 size = lvb->lvb_size;
355                 /* Extend KMS up to the end of this lock and no further
356                  * A lock on [x,y] means a KMS of up to y + 1 bytes! */
357                 if (size > dlmlock->l_policy_data.l_extent.end)
358                         size = dlmlock->l_policy_data.l_extent.end + 1;
359                 if (size >= oinfo->loi_kms) {
360                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
361                                    ", kms="LPU64, lvb->lvb_size, size);
362                         valid |= CAT_KMS;
363                         attr->cat_kms = size;
364                 } else {
365                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
366                                    LPU64"; leaving kms="LPU64", end="LPU64,
367                                    lvb->lvb_size, oinfo->loi_kms,
368                                    dlmlock->l_policy_data.l_extent.end);
369                 }
370                 ldlm_lock_allow_match_locked(dlmlock);
371         } else if (rc == -ENAVAIL && olck->ols_glimpse) {
372                 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
373                        " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
374         } else
375                 valid = 0;
376
377         if (valid != 0)
378                 cl_object_attr_set(env, obj, attr, valid);
379
380         cl_object_attr_unlock(obj);
381
382         EXIT;
383 }
384
385 /**
386  * Called when a lock is granted, from an upcall (when server returned a
387  * granted lock), or from completion AST, when server returned a blocked lock.
388  *
389  * Called under lock and resource spin-locks, that are released temporarily
390  * here.
391  */
392 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
393                              struct ldlm_lock *dlmlock, int rc)
394 {
395         struct ldlm_extent   *ext;
396         struct cl_lock       *lock;
397         struct cl_lock_descr *descr;
398
399         LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
400
401         ENTRY;
402         if (olck->ols_state != OLS_GRANTED) {
403                 lock  = olck->ols_cl.cls_lock;
404                 ext   = &dlmlock->l_policy_data.l_extent;
405                 descr = &osc_env_info(env)->oti_descr;
406                 descr->cld_obj = lock->cll_descr.cld_obj;
407
408                 /* XXX check that ->l_granted_mode is valid. */
409                 descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
410                 descr->cld_start = cl_index(descr->cld_obj, ext->start);
411                 descr->cld_end   = cl_index(descr->cld_obj, ext->end);
412                 descr->cld_gid   = ext->gid;
413                 /*
414                  * tell upper layers the extent of the lock that was actually
415                  * granted
416                  */
417                 olck->ols_state = OLS_GRANTED;
418                 osc_lock_lvb_update(env, olck, rc);
419
420                 /* release DLM spin-locks to allow cl_lock_{modify,signal}()
421                  * to take a semaphore on a parent lock. This is safe, because
422                  * spin-locks are needed to protect consistency of
423                  * dlmlock->l_*_mode and LVB, and we have finished processing
424                  * them. */
425                 unlock_res_and_lock(dlmlock);
426                 cl_lock_modify(env, lock, descr);
427                 cl_lock_signal(env, lock);
428                 LINVRNT(osc_lock_invariant(olck));
429                 lock_res_and_lock(dlmlock);
430         }
431         EXIT;
432 }
433
434 static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
435
436 {
437         struct ldlm_lock *dlmlock;
438
439         ENTRY;
440
441         dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
442         LASSERT(dlmlock != NULL);
443
444         lock_res_and_lock(dlmlock);
445         spin_lock(&osc_ast_guard);
446         LASSERT(dlmlock->l_ast_data == olck);
447         LASSERT(olck->ols_lock == NULL);
448         olck->ols_lock = dlmlock;
449         spin_unlock(&osc_ast_guard);
450
451         /*
452          * Lock might be not yet granted. In this case, completion ast
453          * (osc_ldlm_completion_ast()) comes later and finishes lock
454          * granting.
455          */
456         if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
457                 osc_lock_granted(env, olck, dlmlock, 0);
458         unlock_res_and_lock(dlmlock);
459
460         /*
461          * osc_enqueue_interpret() decrefs asynchronous locks, counter
462          * this.
463          */
464         ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
465         olck->ols_hold = olck->ols_has_ref = 1;
466
467         /* lock reference taken by ldlm_handle2lock_long() is owned by
468          * osc_lock and released in osc_lock_detach() */
469         lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
470 }
471
472 /**
473  * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
474  * received from a server, or after osc_enqueue_base() matched a local DLM
475  * lock.
476  */
477 static int osc_lock_upcall(void *cookie, int errcode)
478 {
479         struct osc_lock         *olck  = cookie;
480         struct cl_lock_slice    *slice = &olck->ols_cl;
481         struct cl_lock          *lock  = slice->cls_lock;
482         struct lu_env           *env;
483         struct cl_env_nest       nest;
484
485         ENTRY;
486         env = cl_env_nested_get(&nest);
487         if (!IS_ERR(env)) {
488                 int rc;
489
490                 cl_lock_mutex_get(env, lock);
491
492                 LASSERT(lock->cll_state >= CLS_QUEUING);
493                 if (olck->ols_state == OLS_ENQUEUED) {
494                         olck->ols_state = OLS_UPCALL_RECEIVED;
495                         rc = ldlm_error2errno(errcode);
496                 } else if (olck->ols_state == OLS_CANCELLED) {
497                         rc = -EIO;
498                 } else {
499                         CERROR("Impossible state: %i\n", olck->ols_state);
500                         LBUG();
501                 }
502                 if (rc) {
503                         struct ldlm_lock *dlmlock;
504
505                         dlmlock = ldlm_handle2lock(&olck->ols_handle);
506                         if (dlmlock != NULL) {
507                                 lock_res_and_lock(dlmlock);
508                                 spin_lock(&osc_ast_guard);
509                                 LASSERT(olck->ols_lock == NULL);
510                                 dlmlock->l_ast_data = NULL;
511                                 olck->ols_handle.cookie = 0ULL;
512                                 spin_unlock(&osc_ast_guard);
513                                 unlock_res_and_lock(dlmlock);
514                                 LDLM_LOCK_PUT(dlmlock);
515                         }
516                 } else {
517                         if (olck->ols_glimpse)
518                                 olck->ols_glimpse = 0;
519                         osc_lock_upcall0(env, olck);
520                 }
521
522                 /* Error handling, some errors are tolerable. */
523                 if (olck->ols_locklessable && rc == -EUSERS) {
524                         /* This is a tolerable error, turn this lock into
525                          * lockless lock.
526                          */
527                         osc_object_set_contended(cl2osc(slice->cls_obj));
528                         LASSERT(slice->cls_ops == &osc_lock_ops);
529
530                         /* Change this lock to ldlmlock-less lock. */
531                         osc_lock_to_lockless(env, olck, 1);
532                         olck->ols_state = OLS_GRANTED;
533                         rc = 0;
534                 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
535                         osc_lock_lvb_update(env, olck, rc);
536                         cl_lock_delete(env, lock);
537                         /* Hide the error. */
538                         rc = 0;
539                 }
540
541                 if (rc == 0)
542                         /* on error, lock was signaled by cl_lock_error() */
543                         cl_lock_signal(env, lock);
544                 else
545                         cl_lock_error(env, lock, rc);
546
547                 cl_lock_mutex_put(env, lock);
548
549                 /* release cookie reference, acquired by osc_lock_enqueue() */
550                 lu_ref_del(&lock->cll_reference, "upcall", lock);
551                 cl_lock_put(env, lock);
552                 cl_env_nested_put(&nest, env);
553         } else
554                 /* should never happen, similar to osc_ldlm_blocking_ast(). */
555                 LBUG();
556         RETURN(errcode);
557 }
558
559 /**
560  * Core of osc_dlm_blocking_ast() logic.
561  */
562 static void osc_lock_blocking(const struct lu_env *env,
563                               struct ldlm_lock *dlmlock,
564                               struct osc_lock *olck, int blocking)
565 {
566         struct cl_lock *lock = olck->ols_cl.cls_lock;
567
568         LASSERT(olck->ols_lock == dlmlock);
569         CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
570         LASSERT(!osc_lock_is_lockless(olck));
571
572         if (olck->ols_hold)
573                 /*
574                  * Lock might be still addref-ed here, if e.g., blocking ast
575                  * is sent for a failed lock.
576                  */
577                 osc_lock_unuse(env, &olck->ols_cl);
578
579         if (blocking && olck->ols_state < OLS_BLOCKED)
580                 /*
581                  * Move osc_lock into OLS_BLOCKED before canceling the lock,
582                  * because it recursively re-enters osc_lock_blocking(), with
583                  * the state set to OLS_CANCELLED.
584                  */
585                 olck->ols_state = OLS_BLOCKED;
586         /*
587          * cancel and destroy lock at least once no matter how blocking ast is
588          * entered (see comment above osc_ldlm_blocking_ast() for use
589          * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
590          */
591         cl_lock_cancel(env, lock);
592         cl_lock_delete(env, lock);
593 }
594
595 /**
596  * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
597  * and ldlm_lock caches.
598  */
599 static int osc_dlm_blocking_ast0(const struct lu_env *env,
600                                  struct ldlm_lock *dlmlock,
601                                  void *data, int flag)
602 {
603         struct osc_lock *olck;
604         struct cl_lock  *lock;
605         int result;
606         int cancel;
607
608         LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
609
610         cancel = 0;
611         olck = osc_ast_data_get(dlmlock);
612         if (olck != NULL) {
613                 lock = olck->ols_cl.cls_lock;
614                 cl_lock_mutex_get(env, lock);
615                 LINVRNT(osc_lock_invariant(olck));
616                 if (olck->ols_ast_wait) {
617                         /* wake up osc_lock_use() */
618                         cl_lock_signal(env, lock);
619                         olck->ols_ast_wait = 0;
620                 }
621                 /*
622                  * Lock might have been canceled while this thread was
623                  * sleeping for lock mutex, but olck is pinned in memory.
624                  */
625                 if (olck == dlmlock->l_ast_data) {
626                         /*
627                          * NOTE: DLM sends blocking AST's for failed locks
628                          *       (that are still in pre-OLS_GRANTED state)
629                          *       too, and they have to be canceled otherwise
630                          *       DLM lock is never destroyed and stuck in
631                          *       the memory.
632                          *
633                          *       Alternatively, ldlm_cli_cancel() can be
634                          *       called here directly for osc_locks with
635                          *       ols_state < OLS_GRANTED to maintain an
636                          *       invariant that ->clo_cancel() is only called
637                          *       for locks that were granted.
638                          */
639                         LASSERT(data == olck);
640                         osc_lock_blocking(env, dlmlock,
641                                           olck, flag == LDLM_CB_BLOCKING);
642                 } else
643                         cancel = 1;
644                 cl_lock_mutex_put(env, lock);
645                 osc_ast_data_put(env, olck);
646         } else
647                 /*
648                  * DLM lock exists, but there is no cl_lock attached to it.
649                  * This is a `normal' race. cl_object and its cl_lock's can be
650                  * removed by memory pressure, together with all pages.
651                  */
652                 cancel = (flag == LDLM_CB_BLOCKING);
653
654         if (cancel) {
655                 struct lustre_handle *lockh;
656
657                 lockh = &osc_env_info(env)->oti_handle;
658                 ldlm_lock2handle(dlmlock, lockh);
659                 result = ldlm_cli_cancel(lockh);
660         } else
661                 result = 0;
662         return result;
663 }
664
665 /**
666  * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
667  * some other lock, or is canceled. This function is installed as a
668  * ldlm_lock::l_blocking_ast() for client extent locks.
669  *
670  * Control flow is tricky, because ldlm uses the same call-back
671  * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
672  *
673  * \param dlmlock lock for which ast occurred.
674  *
675  * \param new description of a conflicting lock in case of blocking ast.
676  *
677  * \param data value of dlmlock->l_ast_data
678  *
679  * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
680  *             cancellation and blocking ast's.
681  *
682  * Possible use cases:
683  *
684  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
685  *       lock due to lock lru pressure, or explicit user request to purge
686  *       locks.
687  *
688  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
689  *       us that dlmlock conflicts with another lock that some client is
690  *       enqueing. Lock is canceled.
691  *
692  *           - cl_lock_cancel() is called. osc_lock_cancel() calls
693  *             ldlm_cli_cancel() that calls
694  *
695  *                  dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
696  *
697  *             recursively entering osc_ldlm_blocking_ast().
698  *
699  *     - client cancels lock voluntary (e.g., as a part of early cancellation):
700  *
701  *           cl_lock_cancel()->
702  *             osc_lock_cancel()->
703  *               ldlm_cli_cancel()->
704  *                 dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
705  *
706  */
707 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
708                                  struct ldlm_lock_desc *new, void *data,
709                                  int flag)
710 {
711         struct lu_env     *env;
712         struct cl_env_nest nest;
713         int                result;
714
715         /*
716          * This can be called in the context of outer IO, e.g.,
717          *
718          *     cl_enqueue()->...
719          *       ->osc_enqueue_base()->...
720          *         ->ldlm_prep_elc_req()->...
721          *           ->ldlm_cancel_callback()->...
722          *             ->osc_ldlm_blocking_ast()
723          *
724          * new environment has to be created to not corrupt outer context.
725          */
726         env = cl_env_nested_get(&nest);
727         if (!IS_ERR(env)) {
728                 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
729                 cl_env_nested_put(&nest, env);
730         } else {
731                 result = PTR_ERR(env);
732                 /*
733                  * XXX This should never happen, as cl_lock is
734                  * stuck. Pre-allocated environment a la vvp_inode_fini_env
735                  * should be used.
736                  */
737                 LBUG();
738         }
739         if (result != 0) {
740                 if (result == -ENODATA)
741                         result = 0;
742                 else
743                         CERROR("BAST failed: %d\n", result);
744         }
745         return result;
746 }
747
748 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
749                                    int flags, void *data)
750 {
751         struct cl_env_nest nest;
752         struct lu_env     *env;
753         struct osc_lock   *olck;
754         struct cl_lock    *lock;
755         int result;
756         int dlmrc;
757
758         /* first, do dlm part of the work */
759         dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
760         /* then, notify cl_lock */
761         env = cl_env_nested_get(&nest);
762         if (!IS_ERR(env)) {
763                 olck = osc_ast_data_get(dlmlock);
764                 if (olck != NULL) {
765                         lock = olck->ols_cl.cls_lock;
766                         cl_lock_mutex_get(env, lock);
767                         /*
768                          * ldlm_handle_cp_callback() copied LVB from request
769                          * to lock->l_lvb_data, store it in osc_lock.
770                          */
771                         LASSERT(dlmlock->l_lvb_data != NULL);
772                         lock_res_and_lock(dlmlock);
773                         olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
774                         if (olck->ols_lock == NULL)
775                                 /*
776                                  * upcall (osc_lock_upcall()) hasn't yet been
777                                  * called. Do nothing now, upcall will bind
778                                  * olck to dlmlock and signal the waiters.
779                                  *
780                                  * This maintains an invariant that osc_lock
781                                  * and ldlm_lock are always bound when
782                                  * osc_lock is in OLS_GRANTED state.
783                                  */
784                                 ;
785                         else if (dlmlock->l_granted_mode != LCK_MINMODE)
786                                 osc_lock_granted(env, olck, dlmlock, dlmrc);
787                         unlock_res_and_lock(dlmlock);
788                         if (dlmrc != 0)
789                                 cl_lock_error(env, lock, dlmrc);
790                         cl_lock_mutex_put(env, lock);
791                         osc_ast_data_put(env, olck);
792                         result = 0;
793                 } else
794                         result = -ELDLM_NO_LOCK_DATA;
795                 cl_env_nested_put(&nest, env);
796         } else
797                 result = PTR_ERR(env);
798         return dlmrc ?: result;
799 }
800
801 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
802 {
803         struct ptlrpc_request  *req  = data;
804         struct osc_lock        *olck;
805         struct cl_lock         *lock;
806         struct cl_object       *obj;
807         struct cl_env_nest      nest;
808         struct lu_env          *env;
809         struct ost_lvb         *lvb;
810         struct req_capsule     *cap;
811         int                     result;
812
813         LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
814
815         env = cl_env_nested_get(&nest);
816         if (!IS_ERR(env)) {
817                 /*
818                  * osc_ast_data_get() has to go after environment is
819                  * allocated, because osc_ast_data() acquires a
820                  * reference to a lock, and it can only be released in
821                  * environment.
822                  */
823                 olck = osc_ast_data_get(dlmlock);
824                 if (olck != NULL) {
825                         lock = olck->ols_cl.cls_lock;
826                         cl_lock_mutex_get(env, lock);
827                         cap = &req->rq_pill;
828                         req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
829                         req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
830                                              sizeof *lvb);
831                         result = req_capsule_server_pack(cap);
832                         if (result == 0) {
833                                 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
834                                 obj = lock->cll_descr.cld_obj;
835                                 result = cl_object_glimpse(env, obj, lvb);
836                         }
837                         cl_lock_mutex_put(env, lock);
838                         osc_ast_data_put(env, olck);
839                 } else {
840                         /*
841                          * These errors are normal races, so we don't want to
842                          * fill the console with messages by calling
843                          * ptlrpc_error()
844                          */
845                         lustre_pack_reply(req, 1, NULL, NULL);
846                         result = -ELDLM_NO_LOCK_DATA;
847                 }
848                 cl_env_nested_put(&nest, env);
849         } else
850                 result = PTR_ERR(env);
851         req->rq_status = result;
852         return result;
853 }
854
855 static unsigned long osc_lock_weigh(const struct lu_env *env,
856                                     const struct cl_lock_slice *slice)
857 {
858         /*
859          * don't need to grab coh_page_guard since we don't care the exact #
860          * of pages..
861          */
862         return cl_object_header(slice->cls_obj)->coh_pages;
863 }
864
865 /**
866  * Get the weight of dlm lock for early cancellation.
867  *
868  * XXX: it should return the pages covered by this \a dlmlock.
869  */
870 static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
871 {
872         struct cl_env_nest       nest;
873         struct lu_env           *env;
874         struct osc_lock         *lock;
875         struct cl_lock          *cll;
876         unsigned long            weight;
877         ENTRY;
878
879         might_sleep();
880         /*
881          * osc_ldlm_weigh_ast has a complex context since it might be called
882          * because of lock canceling, or from user's input. We have to make
883          * a new environment for it. Probably it is implementation safe to use
884          * the upper context because cl_lock_put don't modify environment
885          * variables. But in case of ..
886          */
887         env = cl_env_nested_get(&nest);
888         if (IS_ERR(env))
889                 /* Mostly because lack of memory, tend to eliminate this lock*/
890                 RETURN(0);
891
892         LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
893         lock = osc_ast_data_get(dlmlock);
894         if (lock == NULL) {
895                 /* cl_lock was destroyed because of memory pressure.
896                  * It is much reasonable to assign this type of lock
897                  * a lower cost.
898                  */
899                 GOTO(out, weight = 0);
900         }
901
902         cll = lock->ols_cl.cls_lock;
903         cl_lock_mutex_get(env, cll);
904         weight = cl_lock_weigh(env, cll);
905         cl_lock_mutex_put(env, cll);
906         osc_ast_data_put(env, lock);
907         EXIT;
908
909 out:
910         cl_env_nested_put(&nest, env);
911         return weight;
912 }
913
914 static void osc_lock_build_einfo(const struct lu_env *env,
915                                  const struct cl_lock *clock,
916                                  struct osc_lock *lock,
917                                  struct ldlm_enqueue_info *einfo)
918 {
919         enum cl_lock_mode mode;
920
921         mode = clock->cll_descr.cld_mode;
922         if (mode == CLM_PHANTOM)
923                 /*
924                  * For now, enqueue all glimpse locks in read mode. In the
925                  * future, client might choose to enqueue LCK_PW lock for
926                  * glimpse on a file opened for write.
927                  */
928                 mode = CLM_READ;
929
930         einfo->ei_type   = LDLM_EXTENT;
931         einfo->ei_mode   = osc_cl_lock2ldlm(mode);
932         einfo->ei_cb_bl  = osc_ldlm_blocking_ast;
933         einfo->ei_cb_cp  = osc_ldlm_completion_ast;
934         einfo->ei_cb_gl  = osc_ldlm_glimpse_ast;
935         einfo->ei_cb_wg  = osc_ldlm_weigh_ast;
936         einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
937 }
938
939 static int osc_lock_delete0(struct cl_lock *conflict)
940 {
941         struct cl_env_nest    nest;
942         struct lu_env        *env;
943         int    rc = 0;        
944
945         env = cl_env_nested_get(&nest);
946         if (!IS_ERR(env)) {
947                 cl_lock_delete(env, conflict);
948                 cl_env_nested_put(&nest, env);
949         } else
950                 rc = PTR_ERR(env);
951         return rc; 
952 }
953 /**
954  * Cancels \a conflict lock and waits until it reached CLS_FREEING state. This
955  * is called as a part of enqueuing to cancel conflicting locks early.
956  *
957  * \retval            0: success, \a conflict was cancelled and destroyed.
958  *
959  * \retval   CLO_REPEAT: \a conflict was cancelled, but \a lock mutex was
960  *                       released in the process. Repeat enqueing.
961  *
962  * \retval -EWOULDBLOCK: \a conflict cannot be cancelled immediately, and
963  *                       either \a lock is non-blocking, or current thread
964  *                       holds other locks, that prevent it from waiting
965  *                       for cancel to complete.
966  *
967  * \retval          -ve: other error, including -EINTR.
968  *
969  */
970 static int osc_lock_cancel_wait(const struct lu_env *env, struct cl_lock *lock,
971                                 struct cl_lock *conflict, int canwait)
972 {
973         int rc;
974
975         LASSERT(cl_lock_is_mutexed(lock));
976         LASSERT(cl_lock_is_mutexed(conflict));
977
978         rc = 0;
979         if (conflict->cll_state != CLS_FREEING) {
980                 cl_lock_cancel(env, conflict);
981                 rc = osc_lock_delete0(conflict);
982                 if (rc)
983                         return rc; 
984                 if (conflict->cll_flags & (CLF_CANCELPEND|CLF_DOOMED)) {
985                         rc = -EWOULDBLOCK;
986                         if (cl_lock_nr_mutexed(env) > 2)
987                                 /*
988                                  * If mutices of locks other than @lock and
989                                  * @scan are held by the current thread, it
990                                  * cannot wait on @scan state change in a
991                                  * dead-lock safe matter, so simply skip early
992                                  * cancellation in this case.
993                                  *
994                                  * This means that early cancellation doesn't
995                                  * work when there is even slight mutex
996                                  * contention, as top-lock's mutex is usually
997                                  * held at this time.
998                                  */
999                                 ;
1000                         else if (canwait) {
1001                                 /* Waiting for @scan to be destroyed */
1002                                 cl_lock_mutex_put(env, lock);
1003                                 do {
1004                                         rc = cl_lock_state_wait(env, conflict);
1005                                 } while (!rc &&
1006                                          conflict->cll_state < CLS_FREEING);
1007                                 /* mutex was released, repeat enqueue. */
1008                                 rc = rc ?: CLO_REPEAT;
1009                                 cl_lock_mutex_get(env, lock);
1010                         }
1011                 }
1012                 LASSERT(ergo(!rc, conflict->cll_state == CLS_FREEING));
1013                 CDEBUG(D_INFO, "lock %p was %s freed now, rc (%d)\n",
1014                        conflict, rc ? "not":"", rc);
1015         }
1016         return rc;
1017 }
1018
1019 /**
1020  * Determine if the lock should be converted into a lockless lock.
1021  *
1022  * Steps to check:
1023  * - if the lock has an explicite requirment for a non-lockless lock;
1024  * - if the io lock request type ci_lockreq;
1025  * - send the enqueue rpc to ost to make the further decision;
1026  * - special treat to truncate lockless lock
1027  *
1028  *  Additional policy can be implemented here, e.g., never do lockless-io
1029  *  for large extents.
1030  */
1031 static void osc_lock_to_lockless(const struct lu_env *env,
1032                                  struct osc_lock *ols, int force)
1033 {
1034         struct cl_lock_slice *slice = &ols->ols_cl;
1035         struct cl_lock *lock        = slice->cls_lock;
1036
1037         LASSERT(ols->ols_state == OLS_NEW ||
1038                 ols->ols_state == OLS_UPCALL_RECEIVED);
1039
1040         if (force) {
1041                 ols->ols_locklessable = 1;
1042                 LASSERT(cl_lock_is_mutexed(lock));
1043                 slice->cls_ops = &osc_lock_lockless_ops;
1044         } else {
1045                 struct osc_io *oio     = osc_env_io(env);
1046                 struct cl_io  *io      = oio->oi_cl.cis_io;
1047                 struct cl_object *obj  = slice->cls_obj;
1048                 struct osc_object *oob = cl2osc(obj);
1049                 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
1050                 struct obd_connect_data *ocd;
1051
1052                 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
1053                         io->ci_lockreq == CILR_MAYBE ||
1054                         io->ci_lockreq == CILR_NEVER);
1055
1056                 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
1057                 ols->ols_locklessable = (io->ci_type != CIT_TRUNC) &&
1058                                 (io->ci_lockreq == CILR_MAYBE) &&
1059                                 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
1060                 if (io->ci_lockreq == CILR_NEVER ||
1061                         /* lockless IO */
1062                     (ols->ols_locklessable && osc_object_is_contended(oob)) ||
1063                         /* lockless truncate */
1064                     (io->ci_type == CIT_TRUNC &&
1065                      (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
1066                       osd->od_lockless_truncate)) {
1067                         ols->ols_locklessable = 1;
1068                         slice->cls_ops = &osc_lock_lockless_ops;
1069                 }
1070         }
1071         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1072 }
1073
1074 /**
1075  * Cancel all conflicting locks and wait for them to be destroyed.
1076  *
1077  * This function is used for two purposes:
1078  *
1079  *     - early cancel all conflicting locks before starting IO, and
1080  *
1081  *     - guarantee that pages added to the page cache by lockless IO are never
1082  *       covered by locks other than lockless IO lock, and, hence, are not
1083  *       visible to other threads.
1084  */
1085 static int osc_lock_enqueue_wait(const struct lu_env *env,
1086                                  const struct osc_lock *olck)
1087 {
1088         struct cl_lock          *lock    = olck->ols_cl.cls_lock;
1089         struct cl_lock_descr    *descr   = &lock->cll_descr;
1090         struct cl_object_header *hdr     = cl_object_header(descr->cld_obj);
1091         struct cl_lock_closure  *closure = &osc_env_info(env)->oti_closure;
1092         struct cl_lock          *scan;
1093         struct cl_lock          *temp;
1094         int lockless                     = osc_lock_is_lockless(olck);
1095         int rc                           = 0;
1096         int canwait;
1097         int stop;
1098         ENTRY;
1099
1100         LASSERT(cl_lock_is_mutexed(lock));
1101         LASSERT(lock->cll_state == CLS_QUEUING);
1102
1103         /*
1104          * XXX This function could be sped up if we had asynchronous
1105          * cancellation.
1106          */
1107
1108         canwait =
1109                 !(olck->ols_flags & LDLM_FL_BLOCK_NOWAIT) &&
1110                 cl_lock_nr_mutexed(env) == 1;
1111         cl_lock_closure_init(env, closure, lock, canwait);
1112         spin_lock(&hdr->coh_lock_guard);
1113         list_for_each_entry_safe(scan, temp, &hdr->coh_locks, cll_linkage) {
1114                 if (scan == lock)
1115                         continue;
1116
1117                 if (scan->cll_state < CLS_QUEUING ||
1118                     scan->cll_state == CLS_FREEING ||
1119                     scan->cll_descr.cld_start > descr->cld_end ||
1120                     scan->cll_descr.cld_end < descr->cld_start)
1121                         continue;
1122
1123                 /* overlapped and living locks. */
1124
1125                 /* We're not supposed to give up group lock. */
1126                 if (scan->cll_descr.cld_mode == CLM_GROUP) {
1127                         LASSERT(descr->cld_mode != CLM_GROUP ||
1128                                 descr->cld_gid != scan->cll_descr.cld_gid);
1129                         continue;
1130                 }
1131
1132                 /* A tricky case for lockless pages:
1133                  * We need to cancel the compatible locks if we're enqueuing
1134                  * a lockless lock, for example:
1135                  * imagine that client has PR lock on [0, 1000], and thread T0
1136                  * is doing lockless IO in [500, 1500] region. Concurrent
1137                  * thread T1 can see lockless data in [500, 1000], which is
1138                  * wrong, because these data are possibly stale.
1139                  */
1140                 if (!lockless && cl_lock_compatible(scan, lock))
1141                         continue;
1142
1143                 /* Now @scan is conflicting with @lock, this means current
1144                  * thread have to sleep for @scan being destroyed. */
1145                 cl_lock_get_trust(scan);
1146                 if (&temp->cll_linkage != &hdr->coh_locks)
1147                         cl_lock_get_trust(temp);
1148                 spin_unlock(&hdr->coh_lock_guard);
1149                 lu_ref_add(&scan->cll_reference, "cancel-wait", lock);
1150
1151                 LASSERT(list_empty(&closure->clc_list));
1152                 rc = cl_lock_closure_build(env, scan, closure);
1153                 if (rc == 0) {
1154                         rc = osc_lock_cancel_wait(env, lock, scan, canwait);
1155                         cl_lock_disclosure(env, closure);
1156                         if (rc == -EWOULDBLOCK)
1157                                 rc = 0;
1158                 }
1159                 if (rc == CLO_REPEAT && !canwait)
1160                         /* cannot wait... no early cancellation. */
1161                         rc = 0;
1162
1163                 lu_ref_del(&scan->cll_reference, "cancel-wait", lock);
1164                 cl_lock_put(env, scan);
1165                 spin_lock(&hdr->coh_lock_guard);
1166                 /*
1167                  * Lock list could have been modified, while spin-lock was
1168                  * released. Check that it is safe to continue.
1169                  */
1170                 stop = list_empty(&temp->cll_linkage);
1171                 if (&temp->cll_linkage != &hdr->coh_locks)
1172                         cl_lock_put(env, temp);
1173                 if (stop || rc != 0)
1174                         break;
1175         }
1176         spin_unlock(&hdr->coh_lock_guard);
1177         cl_lock_closure_fini(closure);
1178         RETURN(rc);
1179 }
1180
1181 /**
1182  * Deadlock avoidance for osc_lock_enqueue(). Consider following scenario:
1183  *
1184  *     - Thread0: obtains PR:[0, 10]. Lock is busy.
1185  *
1186  *     - Thread1: enqueues PW:[5, 50]. Blocking ast is sent to
1187  *       PR:[0, 10], but cancellation of busy lock is postponed.
1188  *
1189  *     - Thread0: enqueue PR:[30, 40]. Lock is locally matched to
1190  *       PW:[5, 50], and thread0 waits for the lock completion never
1191  *       releasing PR:[0, 10]---deadlock.
1192  *
1193  * The second PR lock can be glimpse (it is to deal with that situation that
1194  * ll_glimpse_size() has second argument, preventing local match of
1195  * not-yet-granted locks, see bug 10295). Similar situation is possible in the
1196  * case of memory mapped user level buffer.
1197  *
1198  * To prevent this we can detect a situation when current "thread" or "io"
1199  * already holds a lock on this object and either add LDLM_FL_BLOCK_GRANTED to
1200  * the ols->ols_flags, or prevent local match with PW locks.
1201  */
1202 static int osc_deadlock_is_possible(const struct lu_env *env,
1203                                     struct cl_lock *lock)
1204 {
1205         struct cl_object        *obj;
1206         struct cl_object_header *head;
1207         struct cl_lock          *scan;
1208         struct osc_io           *oio;
1209
1210         int result;
1211
1212         ENTRY;
1213
1214         LASSERT(cl_lock_is_mutexed(lock));
1215
1216         oio  = osc_env_io(env);
1217         obj  = lock->cll_descr.cld_obj;
1218         head = cl_object_header(obj);
1219
1220         result = 0;
1221         spin_lock(&head->coh_lock_guard);
1222         list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1223                 if (scan != lock) {
1224                         struct osc_lock *oscan;
1225
1226                         oscan = osc_lock_at(scan);
1227                         LASSERT(oscan != NULL);
1228                         if (oscan->ols_owner == oio) {
1229                                 result = 1;
1230                                 break;
1231                         }
1232                 }
1233         }
1234         spin_unlock(&head->coh_lock_guard);
1235         RETURN(result);
1236 }
1237
1238 /**
1239  * Implementation of cl_lock_operations::clo_enqueue() method for osc
1240  * layer. This initiates ldlm enqueue:
1241  *
1242  *     - checks for possible dead-lock conditions (osc_deadlock_is_possible());
1243  *
1244  *     - cancels conflicting locks early (osc_lock_enqueue_wait());
1245  *
1246  *     - calls osc_enqueue_base() to do actual enqueue.
1247  *
1248  * osc_enqueue_base() is supplied with an upcall function that is executed
1249  * when lock is received either after a local cached ldlm lock is matched, or
1250  * when a reply from the server is received.
1251  *
1252  * This function does not wait for the network communication to complete.
1253  */
1254 static int osc_lock_enqueue(const struct lu_env *env,
1255                             const struct cl_lock_slice *slice,
1256                             struct cl_io *unused, __u32 enqflags)
1257 {
1258         struct osc_lock          *ols     = cl2osc_lock(slice);
1259         struct cl_lock           *lock    = ols->ols_cl.cls_lock;
1260         struct osc_object        *obj     = cl2osc(slice->cls_obj);
1261         struct osc_thread_info   *info    = osc_env_info(env);
1262         struct ldlm_res_id       *resname = &info->oti_resname;
1263         ldlm_policy_data_t       *policy  = &info->oti_policy;
1264         struct ldlm_enqueue_info *einfo   = &ols->ols_einfo;
1265         int result;
1266         ENTRY;
1267
1268         LASSERT(cl_lock_is_mutexed(lock));
1269         LASSERT(lock->cll_state == CLS_QUEUING);
1270         LASSERT(ols->ols_state == OLS_NEW);
1271
1272         osc_lock_build_res(env, obj, resname);
1273         osc_lock_build_policy(env, lock, policy);
1274         ols->ols_flags = osc_enq2ldlm_flags(enqflags);
1275         if (osc_deadlock_is_possible(env, lock))
1276                 ols->ols_flags |= LDLM_FL_BLOCK_GRANTED;
1277         if (ols->ols_flags & LDLM_FL_HAS_INTENT)
1278                 ols->ols_glimpse = 1;
1279
1280         result = osc_lock_enqueue_wait(env, ols);
1281         if (result == 0) {
1282                 if (!(enqflags & CEF_MUST))
1283                         /* try to convert this lock to a lockless lock */
1284                         osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
1285                 if (!osc_lock_is_lockless(ols)) {
1286                         if (ols->ols_locklessable)
1287                                 ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1288
1289                         /* a reference for lock, passed as an upcall cookie */
1290                         cl_lock_get(lock);
1291                         lu_ref_add(&lock->cll_reference, "upcall", lock);
1292                         ols->ols_state = OLS_ENQUEUED;
1293
1294                         /*
1295                          * XXX: this is possible blocking point as
1296                          * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
1297                          * LDLM_CP_CALLBACK.
1298                          */
1299                         result = osc_enqueue_base(osc_export(obj), resname,
1300                                           &ols->ols_flags, policy,
1301                                           &ols->ols_lvb,
1302                                           obj->oo_oinfo->loi_kms_valid,
1303                                           osc_lock_upcall,
1304                                           ols, einfo, &ols->ols_handle,
1305                                           PTLRPCD_SET, 1);
1306                         if (result != 0) {
1307                                 lu_ref_del(&lock->cll_reference,
1308                                            "upcall", lock);
1309                                 cl_lock_put(env, lock);
1310                         }
1311                 } else {
1312                         ols->ols_state = OLS_GRANTED;
1313                 }
1314         }
1315         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1316         RETURN(result);
1317 }
1318
1319 static int osc_lock_wait(const struct lu_env *env,
1320                          const struct cl_lock_slice *slice)
1321 {
1322         struct osc_lock *olck = cl2osc_lock(slice);
1323         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1324
1325         LINVRNT(osc_lock_invariant(olck));
1326         if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED)
1327                 return 0;
1328
1329         LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
1330                      lock->cll_error == 0, olck->ols_lock != NULL));
1331
1332         return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
1333 }
1334
1335 /**
1336  * An implementation of cl_lock_operations::clo_use() method that pins cached
1337  * lock.
1338  */
1339 static int osc_lock_use(const struct lu_env *env,
1340                         const struct cl_lock_slice *slice)
1341 {
1342         struct osc_lock *olck = cl2osc_lock(slice);
1343         int rc;
1344
1345         LASSERT(!olck->ols_hold);
1346         /*
1347          * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
1348          * flag is not set. This protects us from a concurrent blocking ast.
1349          */
1350         rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1351         if (rc == 0) {
1352                 olck->ols_hold = olck->ols_has_ref = 1;
1353                 olck->ols_state = OLS_GRANTED;
1354         } else {
1355                 struct cl_lock *lock;
1356
1357                 /*
1358                  * Lock is being cancelled somewhere within
1359                  * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
1360                  * set, but osc_ldlm_blocking_ast() hasn't yet acquired
1361                  * cl_lock mutex.
1362                  */
1363                 lock = slice->cls_lock;
1364                 LASSERT(lock->cll_state == CLS_CACHED);
1365                 LASSERT(lock->cll_users > 0);
1366                 /* set a flag for osc_dlm_blocking_ast0() to signal the
1367                  * lock.*/
1368                 olck->ols_ast_wait = 1;
1369                 rc = CLO_WAIT;
1370         }
1371         return rc;
1372 }
1373
1374 static int osc_lock_flush(struct osc_lock *ols, int discard)
1375 {
1376         struct cl_lock       *lock  = ols->ols_cl.cls_lock;
1377         struct cl_env_nest    nest;
1378         struct lu_env        *env;
1379         int result = 0;
1380
1381         env = cl_env_nested_get(&nest);
1382         if (!IS_ERR(env)) {
1383                 result = cl_lock_page_out(env, lock, discard);
1384                 cl_env_nested_put(&nest, env);
1385         } else
1386                 result = PTR_ERR(env);
1387         if (result == 0) {
1388                 ols->ols_flush = 1;
1389                 LINVRNT(!osc_lock_has_pages(ols));
1390         }
1391         return result;
1392 }
1393
1394 /**
1395  * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1396  * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1397  * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1398  * with some other lock some where in the cluster. This function does the
1399  * following:
1400  *
1401  *     - invalidates all pages protected by this lock (after sending dirty
1402  *       ones to the server, as necessary);
1403  *
1404  *     - decref's underlying ldlm lock;
1405  *
1406  *     - cancels ldlm lock (ldlm_cli_cancel()).
1407  */
1408 static void osc_lock_cancel(const struct lu_env *env,
1409                             const struct cl_lock_slice *slice)
1410 {
1411         struct cl_lock   *lock    = slice->cls_lock;
1412         struct osc_lock  *olck    = cl2osc_lock(slice);
1413         struct ldlm_lock *dlmlock = olck->ols_lock;
1414         int               result  = 0;
1415         int               discard;
1416
1417         LASSERT(cl_lock_is_mutexed(lock));
1418         LINVRNT(osc_lock_invariant(olck));
1419
1420         if (dlmlock != NULL) {
1421                 int do_cancel;
1422
1423                 discard = dlmlock->l_flags & LDLM_FL_DISCARD_DATA;
1424                 result = osc_lock_flush(olck, discard);
1425                 if (olck->ols_hold)
1426                         osc_lock_unuse(env, slice);
1427
1428                 lock_res_and_lock(dlmlock);
1429                 /* Now that we're the only user of dlm read/write reference,
1430                  * mostly the ->l_readers + ->l_writers should be zero.
1431                  * However, there is a corner case.
1432                  * See bug 18829 for details.*/
1433                 do_cancel = (dlmlock->l_readers == 0 &&
1434                              dlmlock->l_writers == 0);
1435                 dlmlock->l_flags |= LDLM_FL_CBPENDING;
1436                 unlock_res_and_lock(dlmlock);
1437                 if (do_cancel)
1438                         result = ldlm_cli_cancel(&olck->ols_handle);
1439                 if (result < 0)
1440                         CL_LOCK_DEBUG(D_ERROR, env, lock,
1441                                       "lock %p cancel failure with error(%d)\n",
1442                                       lock, result);
1443         }
1444         olck->ols_state = OLS_CANCELLED;
1445         osc_lock_detach(env, olck);
1446 }
1447
1448 void cl_lock_page_list_fixup(const struct lu_env *env,
1449                              struct cl_io *io, struct cl_lock *lock,
1450                              struct cl_page_list *queue);
1451
1452 #ifdef INVARIANT_CHECK
1453 /**
1454  * Returns true iff there are pages under \a olck not protected by other
1455  * locks.
1456  */
1457 static int osc_lock_has_pages(struct osc_lock *olck)
1458 {
1459         struct cl_lock       *lock;
1460         struct cl_lock_descr *descr;
1461         struct cl_object     *obj;
1462         struct osc_object    *oob;
1463         struct cl_page_list  *plist;
1464         struct cl_page       *page;
1465         struct cl_env_nest    nest;
1466         struct cl_io         *io;
1467         struct lu_env        *env;
1468         int                   result;
1469
1470         env = cl_env_nested_get(&nest);
1471         if (!IS_ERR(env)) {
1472                 obj   = olck->ols_cl.cls_obj;
1473                 oob   = cl2osc(obj);
1474                 io    = &oob->oo_debug_io;
1475                 lock  = olck->ols_cl.cls_lock;
1476                 descr = &lock->cll_descr;
1477                 plist = &osc_env_info(env)->oti_plist;
1478                 cl_page_list_init(plist);
1479
1480                 mutex_lock(&oob->oo_debug_mutex);
1481
1482                 io->ci_obj = cl_object_top(obj);
1483                 cl_io_init(env, io, CIT_MISC, io->ci_obj);
1484                 cl_page_gang_lookup(env, obj, io,
1485                                     descr->cld_start, descr->cld_end, plist, 0);
1486                 cl_lock_page_list_fixup(env, io, lock, plist);
1487                 if (plist->pl_nr > 0) {
1488                         CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
1489                         cl_page_list_for_each(page, plist)
1490                                 CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
1491                 }
1492                 result = plist->pl_nr > 0;
1493                 cl_page_list_disown(env, io, plist);
1494                 cl_page_list_fini(env, plist);
1495                 cl_io_fini(env, io);
1496                 mutex_unlock(&oob->oo_debug_mutex);
1497                 cl_env_nested_put(&nest, env);
1498         } else
1499                 result = 0;
1500         return result;
1501 }
1502 #else
1503 static int osc_lock_has_pages(struct osc_lock *olck)
1504 {
1505         return 0;
1506 }
1507 #endif /* INVARIANT_CHECK */
1508
1509 static void osc_lock_delete(const struct lu_env *env,
1510                             const struct cl_lock_slice *slice)
1511 {
1512         struct osc_lock *olck;
1513
1514         olck = cl2osc_lock(slice);
1515         if (olck->ols_glimpse) {
1516                 LASSERT(!olck->ols_hold);
1517                 LASSERT(!olck->ols_lock);
1518                 return;
1519         }
1520
1521         LINVRNT(osc_lock_invariant(olck));
1522         LINVRNT(!osc_lock_has_pages(olck));
1523
1524         if (olck->ols_hold)
1525                 osc_lock_unuse(env, slice);
1526         osc_lock_detach(env, olck);
1527 }
1528
1529 /**
1530  * Implements cl_lock_operations::clo_state() method for osc layer.
1531  *
1532  * Maintains osc_lock::ols_owner field.
1533  *
1534  * This assumes that lock always enters CLS_HELD (from some other state) in
1535  * the same IO context as one that requested the lock. This should not be a
1536  * problem, because context is by definition shared by all activity pertaining
1537  * to the same high-level IO.
1538  */
1539 static void osc_lock_state(const struct lu_env *env,
1540                            const struct cl_lock_slice *slice,
1541                            enum cl_lock_state state)
1542 {
1543         struct osc_lock *lock = cl2osc_lock(slice);
1544         struct osc_io   *oio  = osc_env_io(env);
1545
1546         /*
1547          * XXX multiple io contexts can use the lock at the same time.
1548          */
1549         LINVRNT(osc_lock_invariant(lock));
1550         if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1551                 LASSERT(lock->ols_owner == NULL);
1552                 lock->ols_owner = oio;
1553         } else if (state != CLS_HELD)
1554                 lock->ols_owner = NULL;
1555 }
1556
1557 static int osc_lock_print(const struct lu_env *env, void *cookie,
1558                           lu_printer_t p, const struct cl_lock_slice *slice)
1559 {
1560         struct osc_lock *lock = cl2osc_lock(slice);
1561
1562         /*
1563          * XXX print ldlm lock and einfo properly.
1564          */
1565         (*p)(env, cookie, "%p %08x "LPU64" %d %p ",
1566              lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
1567              lock->ols_state, lock->ols_owner);
1568         osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1569         return 0;
1570 }
1571
1572 static const struct cl_lock_operations osc_lock_ops = {
1573         .clo_fini    = osc_lock_fini,
1574         .clo_enqueue = osc_lock_enqueue,
1575         .clo_wait    = osc_lock_wait,
1576         .clo_unuse   = osc_lock_unuse,
1577         .clo_use     = osc_lock_use,
1578         .clo_delete  = osc_lock_delete,
1579         .clo_state   = osc_lock_state,
1580         .clo_cancel  = osc_lock_cancel,
1581         .clo_weigh   = osc_lock_weigh,
1582         .clo_print   = osc_lock_print
1583 };
1584
1585 static int osc_lock_lockless_enqueue(const struct lu_env *env,
1586                                      const struct cl_lock_slice *slice,
1587                                      struct cl_io *unused, __u32 enqflags)
1588 {
1589         LBUG();
1590         return 0;
1591 }
1592
1593 static int osc_lock_lockless_unuse(const struct lu_env *env,
1594                                    const struct cl_lock_slice *slice)
1595 {
1596         struct osc_lock *ols = cl2osc_lock(slice);
1597         struct cl_lock *lock = slice->cls_lock;
1598
1599         LASSERT(ols->ols_state == OLS_GRANTED);
1600         LINVRNT(osc_lock_invariant(ols));
1601
1602         cl_lock_cancel(env, lock);
1603         cl_lock_delete(env, lock);
1604         return 0;
1605 }
1606
1607 static void osc_lock_lockless_cancel(const struct lu_env *env,
1608                                      const struct cl_lock_slice *slice)
1609 {
1610         struct osc_lock   *ols  = cl2osc_lock(slice);
1611         int result;
1612
1613         result = osc_lock_flush(ols, 0);
1614         if (result)
1615                 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1616                        ols, result);
1617         ols->ols_state = OLS_CANCELLED;
1618 }
1619
1620 static int osc_lock_lockless_wait(const struct lu_env *env,
1621                                   const struct cl_lock_slice *slice)
1622 {
1623         struct osc_lock *olck = cl2osc_lock(slice);
1624         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1625
1626         LINVRNT(osc_lock_invariant(olck));
1627         LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1628
1629         return lock->cll_error;
1630 }
1631
1632 static void osc_lock_lockless_state(const struct lu_env *env,
1633                                     const struct cl_lock_slice *slice,
1634                                     enum cl_lock_state state)
1635 {
1636         struct osc_lock *lock = cl2osc_lock(slice);
1637         struct osc_io   *oio  = osc_env_io(env);
1638
1639         LINVRNT(osc_lock_invariant(lock));
1640         if (state == CLS_HELD) {
1641                 LASSERT(lock->ols_owner == NULL);
1642                 lock->ols_owner = oio;
1643
1644                 /* set the io to be lockless if this lock is for io's
1645                  * host object */
1646                 if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
1647                         oio->oi_lockless = 1;
1648         } else
1649                 lock->ols_owner = NULL;
1650 }
1651
1652 static int osc_lock_lockless_fits_into(const struct lu_env *env,
1653                                        const struct cl_lock_slice *slice,
1654                                        const struct cl_lock_descr *need,
1655                                        const struct cl_io *io)
1656 {
1657         return 0;
1658 }
1659
1660 static const struct cl_lock_operations osc_lock_lockless_ops = {
1661         .clo_fini      = osc_lock_fini,
1662         .clo_enqueue   = osc_lock_lockless_enqueue,
1663         .clo_wait      = osc_lock_lockless_wait,
1664         .clo_unuse     = osc_lock_lockless_unuse,
1665         .clo_state     = osc_lock_lockless_state,
1666         .clo_fits_into = osc_lock_lockless_fits_into,
1667         .clo_cancel    = osc_lock_lockless_cancel,
1668         .clo_print     = osc_lock_print
1669 };
1670
1671 int osc_lock_init(const struct lu_env *env,
1672                   struct cl_object *obj, struct cl_lock *lock,
1673                   const struct cl_io *unused)
1674 {
1675         struct osc_lock *clk;
1676         int result;
1677
1678         OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO);
1679         if (clk != NULL) {
1680                 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
1681                 clk->ols_state = OLS_NEW;
1682                 cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
1683                 result = 0;
1684         } else
1685                 result = -ENOMEM;
1686         return result;
1687 }
1688
1689
1690 /** @} osc */