Whamcloud - gitweb
e9d49fe5ab794a6a38c958c62022706047ce6620
[fs/lustre-release.git] / lustre / osc / osc_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for OSC layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_OSC
42
43 #ifdef __KERNEL__
44 # include <libcfs/libcfs.h>
45 #else
46 # include <liblustre.h>
47 #endif
48 /* fid_build_reg_res_name() */
49 #include <lustre_fid.h>
50
51 #include "osc_cl_internal.h"
52
53 /** \addtogroup osc 
54  *  @{ 
55  */
56
57 /*****************************************************************************
58  *
59  * Type conversions.
60  *
61  */
62
63 static const struct cl_lock_operations osc_lock_ops;
64 static const struct cl_lock_operations osc_lock_lockless_ops;
65 static void osc_lock_to_lockless(const struct lu_env *env,
66                                  struct osc_lock *ols, int force);
67 static int osc_lock_has_pages(struct osc_lock *olck);
68
69 int osc_lock_is_lockless(const struct osc_lock *olck)
70 {
71         return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
72 }
73
74 /**
75  * Returns a weak pointer to the ldlm lock identified by a handle. Returned
76  * pointer cannot be dereferenced, as lock is not protected from concurrent
77  * reclaim. This function is a helper for osc_lock_invariant().
78  */
79 static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
80 {
81         struct ldlm_lock *lock;
82
83         lock = ldlm_handle2lock(handle);
84         if (lock != NULL)
85                 LDLM_LOCK_PUT(lock);
86         return lock;
87 }
88
89 /**
90  * Invariant that has to be true all of the time.
91  */
92 static int osc_lock_invariant(struct osc_lock *ols)
93 {
94         struct ldlm_lock *lock        = osc_handle_ptr(&ols->ols_handle);
95         struct ldlm_lock *olock       = ols->ols_lock;
96         int               handle_used = lustre_handle_is_used(&ols->ols_handle);
97
98         return
99                 ergo(osc_lock_is_lockless(ols),
100                      ols->ols_locklessable && ols->ols_lock == NULL)  ||
101                 (ergo(olock != NULL, handle_used) &&
102                  ergo(olock != NULL,
103                       olock->l_handle.h_cookie == ols->ols_handle.cookie) &&
104                  /*
105                   * Check that ->ols_handle and ->ols_lock are consistent, but
106                   * take into account that they are set at the different time.
107                   */
108                  ergo(handle_used,
109                       ergo(lock != NULL && olock != NULL, lock == olock) &&
110                       ergo(lock == NULL, olock == NULL)) &&
111                  ergo(ols->ols_state == OLS_CANCELLED,
112                       olock == NULL && !handle_used) &&
113                  /*
114                   * DLM lock is destroyed only after we have seen cancellation
115                   * ast.
116                   */
117                  ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
118                       !olock->l_destroyed) &&
119                  ergo(ols->ols_state == OLS_GRANTED,
120                       olock != NULL &&
121                       olock->l_req_mode == olock->l_granted_mode &&
122                       ols->ols_hold));
123 }
124
125 /*****************************************************************************
126  *
127  * Lock operations.
128  *
129  */
130
131 /**
132  * Breaks a link between osc_lock and dlm_lock.
133  */
134 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
135 {
136         struct ldlm_lock *dlmlock;
137
138         /* reset the osc lock's state because it might be queued again. */
139         olck->ols_state = OLS_NEW;
140         spin_lock(&osc_ast_guard);
141         dlmlock = olck->ols_lock;
142         if (dlmlock == NULL) {
143                 spin_unlock(&osc_ast_guard);
144                 return;
145         }
146
147         olck->ols_lock = NULL;
148         /* wb(); --- for all who checks (ols->ols_lock != NULL) before
149          * call to osc_lock_detach() */
150         dlmlock->l_ast_data = NULL;
151         olck->ols_handle.cookie = 0ULL;
152         spin_unlock(&osc_ast_guard);
153
154         lock_res_and_lock(dlmlock);
155         if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
156                 struct cl_object *obj = olck->ols_cl.cls_obj;
157                 struct cl_attr *attr  = &osc_env_info(env)->oti_attr;
158                 __u64 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
159
160                 /* Update the kms. Need to loop all granted locks.
161                  * Not a problem for the client */
162                 attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
163                 unlock_res_and_lock(dlmlock);
164
165                 cl_object_attr_lock(obj);
166                 cl_object_attr_set(env, obj, attr, CAT_KMS);
167                 cl_object_attr_unlock(obj);
168         } else
169                 unlock_res_and_lock(dlmlock);
170
171         /* release a reference taken in osc_lock_upcall0(). */
172         LASSERT(olck->ols_has_ref);
173         lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
174         LDLM_LOCK_RELEASE(dlmlock);
175         olck->ols_has_ref = 0;
176 }
177
178 static int osc_lock_unhold(struct osc_lock *ols)
179 {
180         int result = 0;
181
182         if (ols->ols_hold) {
183                 ols->ols_hold = 0;
184                 result = osc_cancel_base(&ols->ols_handle,
185                                          ols->ols_einfo.ei_mode);
186         }
187         return result;
188 }
189
190 static int osc_lock_unuse(const struct lu_env *env,
191                           const struct cl_lock_slice *slice)
192 {
193         struct osc_lock *ols = cl2osc_lock(slice);
194
195         LASSERT(ols->ols_state == OLS_GRANTED ||
196                 ols->ols_state == OLS_UPCALL_RECEIVED);
197         LINVRNT(osc_lock_invariant(ols));
198
199         if (ols->ols_glimpse) {
200                 LASSERT(ols->ols_hold == 0);
201                 return 0;
202         }
203         LASSERT(ols->ols_hold);
204
205         /*
206          * Move lock into OLS_RELEASED state before calling osc_cancel_base()
207          * so that possible synchronous cancellation (that always happens
208          * e.g., for liblustre) sees that lock is released.
209          */
210         ols->ols_state = OLS_RELEASED;
211         return osc_lock_unhold(ols);
212 }
213
214 static void osc_lock_fini(const struct lu_env *env,
215                           struct cl_lock_slice *slice)
216 {
217         struct osc_lock  *ols = cl2osc_lock(slice);
218
219         LINVRNT(osc_lock_invariant(ols));
220         /*
221          * ->ols_hold can still be true at this point if, for example, a
222          * thread that requested a lock was killed (and released a reference
223          * to the lock), before reply from a server was received. In this case
224          * lock is destroyed immediately after upcall.
225          */
226         osc_lock_unhold(ols);
227         LASSERT(ols->ols_lock == NULL);
228
229         OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
230 }
231
232 void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj,
233                         struct ldlm_res_id *resname)
234 {
235         const struct lu_fid *fid = lu_object_fid(&obj->oo_cl.co_lu);
236         if (0) {
237                 /*
238                  * In the perfect world of the future, where ost servers talk
239                  * idif-fids...
240                  */
241                 fid_build_reg_res_name(fid, resname);
242         } else {
243                 /*
244                  * In reality, where ost server expects ->lsm_object_id and
245                  * ->lsm_object_gr in rename.
246                  */
247                 osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_gr,
248                                    resname);
249         }
250 }
251
252 static void osc_lock_build_policy(const struct lu_env *env,
253                                   const struct cl_lock *lock,
254                                   ldlm_policy_data_t *policy)
255 {
256         const struct cl_lock_descr *d = &lock->cll_descr;
257
258         osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
259         policy->l_extent.gid = d->cld_gid;
260 }
261
262 static int osc_enq2ldlm_flags(__u32 enqflags)
263 {
264         int result = 0;
265
266         LASSERT((enqflags & ~CEF_MASK) == 0);
267
268         if (enqflags & CEF_NONBLOCK)
269                 result |= LDLM_FL_BLOCK_NOWAIT;
270         if (enqflags & CEF_ASYNC)
271                 result |= LDLM_FL_HAS_INTENT;
272         if (enqflags & CEF_DISCARD_DATA)
273                 result |= LDLM_AST_DISCARD_DATA;
274         return result;
275 }
276
277 /**
278  * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
279  * pointers. Initialized in osc_init().
280  */
281 spinlock_t osc_ast_guard;
282
283 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
284 {
285         struct osc_lock *olck;
286
287         lock_res_and_lock(dlm_lock);
288         spin_lock(&osc_ast_guard);
289         olck = dlm_lock->l_ast_data;
290         if (olck != NULL) {
291                 struct cl_lock *lock = olck->ols_cl.cls_lock;
292                 /*
293                  * If osc_lock holds a reference on ldlm lock, return it even
294                  * when cl_lock is in CLS_FREEING state. This way
295                  *
296                  *         osc_ast_data_get(dlmlock) == NULL
297                  *
298                  * guarantees that all osc references on dlmlock were
299                  * released. osc_dlm_blocking_ast0() relies on that.
300                  */
301                 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
302                         cl_lock_get_trust(lock);
303                         lu_ref_add_atomic(&lock->cll_reference,
304                                           "ast", cfs_current());
305                 } else
306                         olck = NULL;
307         }
308         spin_unlock(&osc_ast_guard);
309         unlock_res_and_lock(dlm_lock);
310         return olck;
311 }
312
313 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
314 {
315         struct cl_lock *lock;
316
317         lock = olck->ols_cl.cls_lock;
318         lu_ref_del(&lock->cll_reference, "ast", cfs_current());
319         cl_lock_put(env, lock);
320 }
321
322 /**
323  * Updates object attributes from a lock value block (lvb) received together
324  * with the DLM lock reply from the server. Copy of osc_update_enqueue()
325  * logic.
326  *
327  * This can be optimized to not update attributes when lock is a result of a
328  * local match.
329  *
330  * Called under lock and resource spin-locks.
331  */
332 static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
333                                 int rc)
334 {
335         struct ost_lvb    *lvb;
336         struct cl_object  *obj;
337         struct lov_oinfo  *oinfo;
338         struct cl_attr    *attr;
339         unsigned           valid;
340
341         ENTRY;
342
343         if (!(olck->ols_flags & LDLM_FL_LVB_READY)) {
344                 EXIT;
345                 return;
346         }
347
348         lvb   = &olck->ols_lvb;
349         obj   = olck->ols_cl.cls_obj;
350         oinfo = cl2osc(obj)->oo_oinfo;
351         attr  = &osc_env_info(env)->oti_attr;
352         valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
353         cl_lvb2attr(attr, lvb);
354
355         cl_object_attr_lock(obj);
356         if (rc == 0) {
357                 struct ldlm_lock  *dlmlock;
358                 __u64 size;
359
360                 dlmlock = olck->ols_lock;
361                 LASSERT(dlmlock != NULL);
362
363                 /* re-grab LVB from a dlm lock under DLM spin-locks. */
364                 *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
365                 size = lvb->lvb_size;
366                 /* Extend KMS up to the end of this lock and no further
367                  * A lock on [x,y] means a KMS of up to y + 1 bytes! */
368                 if (size > dlmlock->l_policy_data.l_extent.end)
369                         size = dlmlock->l_policy_data.l_extent.end + 1;
370                 if (size >= oinfo->loi_kms) {
371                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
372                                    ", kms="LPU64, lvb->lvb_size, size);
373                         valid |= CAT_KMS;
374                         attr->cat_kms = size;
375                 } else {
376                         LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
377                                    LPU64"; leaving kms="LPU64", end="LPU64,
378                                    lvb->lvb_size, oinfo->loi_kms,
379                                    dlmlock->l_policy_data.l_extent.end);
380                 }
381                 ldlm_lock_allow_match_locked(dlmlock);
382         } else if (rc == -ENAVAIL && olck->ols_glimpse) {
383                 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
384                        " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
385         } else
386                 valid = 0;
387
388         if (valid != 0)
389                 cl_object_attr_set(env, obj, attr, valid);
390
391         cl_object_attr_unlock(obj);
392
393         EXIT;
394 }
395
396 /**
397  * Called when a lock is granted, from an upcall (when server returned a
398  * granted lock), or from completion AST, when server returned a blocked lock.
399  *
400  * Called under lock and resource spin-locks, that are released temporarily
401  * here.
402  */
403 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
404                              struct ldlm_lock *dlmlock, int rc)
405 {
406         struct ldlm_extent   *ext;
407         struct cl_lock       *lock;
408         struct cl_lock_descr *descr;
409
410         LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
411
412         ENTRY;
413         if (olck->ols_state != OLS_GRANTED) {
414                 lock  = olck->ols_cl.cls_lock;
415                 ext   = &dlmlock->l_policy_data.l_extent;
416                 descr = &osc_env_info(env)->oti_descr;
417                 descr->cld_obj = lock->cll_descr.cld_obj;
418
419                 /* XXX check that ->l_granted_mode is valid. */
420                 descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
421                 descr->cld_start = cl_index(descr->cld_obj, ext->start);
422                 descr->cld_end   = cl_index(descr->cld_obj, ext->end);
423                 descr->cld_gid   = ext->gid;
424                 /*
425                  * tell upper layers the extent of the lock that was actually
426                  * granted
427                  */
428                 olck->ols_state = OLS_GRANTED;
429                 osc_lock_lvb_update(env, olck, rc);
430
431                 /* release DLM spin-locks to allow cl_lock_{modify,signal}()
432                  * to take a semaphore on a parent lock. This is safe, because
433                  * spin-locks are needed to protect consistency of
434                  * dlmlock->l_*_mode and LVB, and we have finished processing
435                  * them. */
436                 unlock_res_and_lock(dlmlock);
437                 cl_lock_modify(env, lock, descr);
438                 cl_lock_signal(env, lock);
439                 LINVRNT(osc_lock_invariant(olck));
440                 lock_res_and_lock(dlmlock);
441         }
442         EXIT;
443 }
444
445 static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
446
447 {
448         struct ldlm_lock *dlmlock;
449
450         ENTRY;
451
452         dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
453         LASSERT(dlmlock != NULL);
454
455         lock_res_and_lock(dlmlock);
456         spin_lock(&osc_ast_guard);
457         LASSERT(dlmlock->l_ast_data == olck);
458         LASSERT(olck->ols_lock == NULL);
459         olck->ols_lock = dlmlock;
460         spin_unlock(&osc_ast_guard);
461
462         /*
463          * Lock might be not yet granted. In this case, completion ast
464          * (osc_ldlm_completion_ast()) comes later and finishes lock
465          * granting.
466          */
467         if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
468                 osc_lock_granted(env, olck, dlmlock, 0);
469         unlock_res_and_lock(dlmlock);
470
471         /*
472          * osc_enqueue_interpret() decrefs asynchronous locks, counter
473          * this.
474          */
475         ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
476         olck->ols_hold = 1;
477
478         /* lock reference taken by ldlm_handle2lock_long() is owned by
479          * osc_lock and released in osc_lock_detach() */
480         lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
481         olck->ols_has_ref = 1;
482 }
483
484 /**
485  * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
486  * received from a server, or after osc_enqueue_base() matched a local DLM
487  * lock.
488  */
489 static int osc_lock_upcall(void *cookie, int errcode)
490 {
491         struct osc_lock         *olck  = cookie;
492         struct cl_lock_slice    *slice = &olck->ols_cl;
493         struct cl_lock          *lock  = slice->cls_lock;
494         struct lu_env           *env;
495         struct cl_env_nest       nest;
496
497         ENTRY;
498         env = cl_env_nested_get(&nest);
499         if (!IS_ERR(env)) {
500                 int rc;
501
502                 cl_lock_mutex_get(env, lock);
503
504                 LASSERT(lock->cll_state >= CLS_QUEUING);
505                 if (olck->ols_state == OLS_ENQUEUED) {
506                         olck->ols_state = OLS_UPCALL_RECEIVED;
507                         rc = ldlm_error2errno(errcode);
508                 } else if (olck->ols_state == OLS_CANCELLED) {
509                         rc = -EIO;
510                 } else {
511                         CERROR("Impossible state: %i\n", olck->ols_state);
512                         LBUG();
513                 }
514                 if (rc) {
515                         struct ldlm_lock *dlmlock;
516
517                         dlmlock = ldlm_handle2lock(&olck->ols_handle);
518                         if (dlmlock != NULL) {
519                                 lock_res_and_lock(dlmlock);
520                                 spin_lock(&osc_ast_guard);
521                                 LASSERT(olck->ols_lock == NULL);
522                                 dlmlock->l_ast_data = NULL;
523                                 olck->ols_handle.cookie = 0ULL;
524                                 spin_unlock(&osc_ast_guard);
525                                 unlock_res_and_lock(dlmlock);
526                                 LDLM_LOCK_PUT(dlmlock);
527                         }
528                 } else {
529                         if (olck->ols_glimpse)
530                                 olck->ols_glimpse = 0;
531                         osc_lock_upcall0(env, olck);
532                 }
533
534                 /* Error handling, some errors are tolerable. */
535                 if (olck->ols_locklessable && rc == -EUSERS) {
536                         /* This is a tolerable error, turn this lock into
537                          * lockless lock.
538                          */
539                         osc_object_set_contended(cl2osc(slice->cls_obj));
540                         LASSERT(slice->cls_ops == &osc_lock_ops);
541
542                         /* Change this lock to ldlmlock-less lock. */
543                         osc_lock_to_lockless(env, olck, 1);
544                         olck->ols_state = OLS_GRANTED;
545                         rc = 0;
546                 } else if (olck->ols_glimpse && rc == -ENAVAIL) {
547                         osc_lock_lvb_update(env, olck, rc);
548                         cl_lock_delete(env, lock);
549                         /* Hide the error. */
550                         rc = 0;
551                 }
552
553                 if (rc == 0)
554                         /* on error, lock was signaled by cl_lock_error() */
555                         cl_lock_signal(env, lock);
556                 else
557                         cl_lock_error(env, lock, rc);
558
559                 cl_lock_mutex_put(env, lock);
560
561                 /* release cookie reference, acquired by osc_lock_enqueue() */
562                 lu_ref_del(&lock->cll_reference, "upcall", lock);
563                 cl_lock_put(env, lock);
564                 cl_env_nested_put(&nest, env);
565         } else
566                 /* should never happen, similar to osc_ldlm_blocking_ast(). */
567                 LBUG();
568         RETURN(errcode);
569 }
570
571 /**
572  * Core of osc_dlm_blocking_ast() logic.
573  */
574 static void osc_lock_blocking(const struct lu_env *env,
575                               struct ldlm_lock *dlmlock,
576                               struct osc_lock *olck, int blocking)
577 {
578         struct cl_lock *lock = olck->ols_cl.cls_lock;
579
580         LASSERT(olck->ols_lock == dlmlock);
581         CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
582         LASSERT(!osc_lock_is_lockless(olck));
583
584         /*
585          * Lock might be still addref-ed here, if e.g., blocking ast
586          * is sent for a failed lock.
587          */
588         osc_lock_unhold(olck);
589
590         if (blocking && olck->ols_state < OLS_BLOCKED)
591                 /*
592                  * Move osc_lock into OLS_BLOCKED before canceling the lock,
593                  * because it recursively re-enters osc_lock_blocking(), with
594                  * the state set to OLS_CANCELLED.
595                  */
596                 olck->ols_state = OLS_BLOCKED;
597         /*
598          * cancel and destroy lock at least once no matter how blocking ast is
599          * entered (see comment above osc_ldlm_blocking_ast() for use
600          * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
601          */
602         cl_lock_cancel(env, lock);
603         cl_lock_delete(env, lock);
604 }
605
606 /**
607  * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
608  * and ldlm_lock caches.
609  */
610 static int osc_dlm_blocking_ast0(const struct lu_env *env,
611                                  struct ldlm_lock *dlmlock,
612                                  void *data, int flag)
613 {
614         struct osc_lock *olck;
615         struct cl_lock  *lock;
616         int result;
617         int cancel;
618
619         LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
620
621         cancel = 0;
622         olck = osc_ast_data_get(dlmlock);
623         if (olck != NULL) {
624                 lock = olck->ols_cl.cls_lock;
625                 cl_lock_mutex_get(env, lock);
626                 LINVRNT(osc_lock_invariant(olck));
627                 if (olck->ols_ast_wait) {
628                         /* wake up osc_lock_use() */
629                         cl_lock_signal(env, lock);
630                         olck->ols_ast_wait = 0;
631                 }
632                 /*
633                  * Lock might have been canceled while this thread was
634                  * sleeping for lock mutex, but olck is pinned in memory.
635                  */
636                 if (olck == dlmlock->l_ast_data) {
637                         /*
638                          * NOTE: DLM sends blocking AST's for failed locks
639                          *       (that are still in pre-OLS_GRANTED state)
640                          *       too, and they have to be canceled otherwise
641                          *       DLM lock is never destroyed and stuck in
642                          *       the memory.
643                          *
644                          *       Alternatively, ldlm_cli_cancel() can be
645                          *       called here directly for osc_locks with
646                          *       ols_state < OLS_GRANTED to maintain an
647                          *       invariant that ->clo_cancel() is only called
648                          *       for locks that were granted.
649                          */
650                         LASSERT(data == olck);
651                         osc_lock_blocking(env, dlmlock,
652                                           olck, flag == LDLM_CB_BLOCKING);
653                 } else
654                         cancel = 1;
655                 cl_lock_mutex_put(env, lock);
656                 osc_ast_data_put(env, olck);
657         } else
658                 /*
659                  * DLM lock exists, but there is no cl_lock attached to it.
660                  * This is a `normal' race. cl_object and its cl_lock's can be
661                  * removed by memory pressure, together with all pages.
662                  */
663                 cancel = (flag == LDLM_CB_BLOCKING);
664
665         if (cancel) {
666                 struct lustre_handle *lockh;
667
668                 lockh = &osc_env_info(env)->oti_handle;
669                 ldlm_lock2handle(dlmlock, lockh);
670                 result = ldlm_cli_cancel(lockh);
671         } else
672                 result = 0;
673         return result;
674 }
675
676 /**
677  * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
678  * some other lock, or is canceled. This function is installed as a
679  * ldlm_lock::l_blocking_ast() for client extent locks.
680  *
681  * Control flow is tricky, because ldlm uses the same call-back
682  * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
683  *
684  * \param dlmlock lock for which ast occurred.
685  *
686  * \param new description of a conflicting lock in case of blocking ast.
687  *
688  * \param data value of dlmlock->l_ast_data
689  *
690  * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
691  *             cancellation and blocking ast's.
692  *
693  * Possible use cases:
694  *
695  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
696  *       lock due to lock lru pressure, or explicit user request to purge
697  *       locks.
698  *
699  *     - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
700  *       us that dlmlock conflicts with another lock that some client is
701  *       enqueing. Lock is canceled.
702  *
703  *           - cl_lock_cancel() is called. osc_lock_cancel() calls
704  *             ldlm_cli_cancel() that calls
705  *
706  *                  dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
707  *
708  *             recursively entering osc_ldlm_blocking_ast().
709  *
710  *     - client cancels lock voluntary (e.g., as a part of early cancellation):
711  *
712  *           cl_lock_cancel()->
713  *             osc_lock_cancel()->
714  *               ldlm_cli_cancel()->
715  *                 dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
716  *
717  */
718 static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
719                                  struct ldlm_lock_desc *new, void *data,
720                                  int flag)
721 {
722         struct lu_env     *env;
723         struct cl_env_nest nest;
724         int                result;
725
726         /*
727          * This can be called in the context of outer IO, e.g.,
728          *
729          *     cl_enqueue()->...
730          *       ->osc_enqueue_base()->...
731          *         ->ldlm_prep_elc_req()->...
732          *           ->ldlm_cancel_callback()->...
733          *             ->osc_ldlm_blocking_ast()
734          *
735          * new environment has to be created to not corrupt outer context.
736          */
737         env = cl_env_nested_get(&nest);
738         if (!IS_ERR(env)) {
739                 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
740                 cl_env_nested_put(&nest, env);
741         } else {
742                 result = PTR_ERR(env);
743                 /*
744                  * XXX This should never happen, as cl_lock is
745                  * stuck. Pre-allocated environment a la vvp_inode_fini_env
746                  * should be used.
747                  */
748                 LBUG();
749         }
750         if (result != 0) {
751                 if (result == -ENODATA)
752                         result = 0;
753                 else
754                         CERROR("BAST failed: %d\n", result);
755         }
756         return result;
757 }
758
759 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
760                                    int flags, void *data)
761 {
762         struct cl_env_nest nest;
763         struct lu_env     *env;
764         struct osc_lock   *olck;
765         struct cl_lock    *lock;
766         int result;
767         int dlmrc;
768
769         /* first, do dlm part of the work */
770         dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
771         /* then, notify cl_lock */
772         env = cl_env_nested_get(&nest);
773         if (!IS_ERR(env)) {
774                 olck = osc_ast_data_get(dlmlock);
775                 if (olck != NULL) {
776                         lock = olck->ols_cl.cls_lock;
777                         cl_lock_mutex_get(env, lock);
778                         /*
779                          * ldlm_handle_cp_callback() copied LVB from request
780                          * to lock->l_lvb_data, store it in osc_lock.
781                          */
782                         LASSERT(dlmlock->l_lvb_data != NULL);
783                         lock_res_and_lock(dlmlock);
784                         olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
785                         if (olck->ols_lock == NULL) {
786                                 /*
787                                  * upcall (osc_lock_upcall()) hasn't yet been
788                                  * called. Do nothing now, upcall will bind
789                                  * olck to dlmlock and signal the waiters.
790                                  *
791                                  * This maintains an invariant that osc_lock
792                                  * and ldlm_lock are always bound when
793                                  * osc_lock is in OLS_GRANTED state.
794                                  */
795                         } else if (dlmlock->l_granted_mode ==
796                                    dlmlock->l_req_mode) {
797                                 osc_lock_granted(env, olck, dlmlock, dlmrc);
798                         }
799                         unlock_res_and_lock(dlmlock);
800
801                         if (dlmrc != 0) {
802                                 CL_LOCK_DEBUG(D_ERROR, env, lock,
803                                               "dlmlock returned %d\n", dlmrc);
804                                 cl_lock_error(env, lock, dlmrc);
805                         }
806                         cl_lock_mutex_put(env, lock);
807                         osc_ast_data_put(env, olck);
808                         result = 0;
809                 } else
810                         result = -ELDLM_NO_LOCK_DATA;
811                 cl_env_nested_put(&nest, env);
812         } else
813                 result = PTR_ERR(env);
814         return dlmrc ?: result;
815 }
816
817 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
818 {
819         struct ptlrpc_request  *req  = data;
820         struct osc_lock        *olck;
821         struct cl_lock         *lock;
822         struct cl_object       *obj;
823         struct cl_env_nest      nest;
824         struct lu_env          *env;
825         struct ost_lvb         *lvb;
826         struct req_capsule     *cap;
827         int                     result;
828
829         LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
830
831         env = cl_env_nested_get(&nest);
832         if (!IS_ERR(env)) {
833                 /*
834                  * osc_ast_data_get() has to go after environment is
835                  * allocated, because osc_ast_data() acquires a
836                  * reference to a lock, and it can only be released in
837                  * environment.
838                  */
839                 olck = osc_ast_data_get(dlmlock);
840                 if (olck != NULL) {
841                         lock = olck->ols_cl.cls_lock;
842                         cl_lock_mutex_get(env, lock);
843                         cap = &req->rq_pill;
844                         req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
845                         req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
846                                              sizeof *lvb);
847                         result = req_capsule_server_pack(cap);
848                         if (result == 0) {
849                                 lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
850                                 obj = lock->cll_descr.cld_obj;
851                                 result = cl_object_glimpse(env, obj, lvb);
852                         }
853                         cl_lock_mutex_put(env, lock);
854                         osc_ast_data_put(env, olck);
855                 } else {
856                         /*
857                          * These errors are normal races, so we don't want to
858                          * fill the console with messages by calling
859                          * ptlrpc_error()
860                          */
861                         lustre_pack_reply(req, 1, NULL, NULL);
862                         result = -ELDLM_NO_LOCK_DATA;
863                 }
864                 cl_env_nested_put(&nest, env);
865         } else
866                 result = PTR_ERR(env);
867         req->rq_status = result;
868         return result;
869 }
870
871 static unsigned long osc_lock_weigh(const struct lu_env *env,
872                                     const struct cl_lock_slice *slice)
873 {
874         /*
875          * don't need to grab coh_page_guard since we don't care the exact #
876          * of pages..
877          */
878         return cl_object_header(slice->cls_obj)->coh_pages;
879 }
880
881 /**
882  * Get the weight of dlm lock for early cancellation.
883  *
884  * XXX: it should return the pages covered by this \a dlmlock.
885  */
886 static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
887 {
888         struct cl_env_nest       nest;
889         struct lu_env           *env;
890         struct osc_lock         *lock;
891         struct cl_lock          *cll;
892         unsigned long            weight;
893         ENTRY;
894
895         might_sleep();
896         /*
897          * osc_ldlm_weigh_ast has a complex context since it might be called
898          * because of lock canceling, or from user's input. We have to make
899          * a new environment for it. Probably it is implementation safe to use
900          * the upper context because cl_lock_put don't modify environment
901          * variables. But in case of ..
902          */
903         env = cl_env_nested_get(&nest);
904         if (IS_ERR(env))
905                 /* Mostly because lack of memory, tend to eliminate this lock*/
906                 RETURN(0);
907
908         LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
909         lock = osc_ast_data_get(dlmlock);
910         if (lock == NULL) {
911                 /* cl_lock was destroyed because of memory pressure.
912                  * It is much reasonable to assign this type of lock
913                  * a lower cost.
914                  */
915                 GOTO(out, weight = 0);
916         }
917
918         cll = lock->ols_cl.cls_lock;
919         cl_lock_mutex_get(env, cll);
920         weight = cl_lock_weigh(env, cll);
921         cl_lock_mutex_put(env, cll);
922         osc_ast_data_put(env, lock);
923         EXIT;
924
925 out:
926         cl_env_nested_put(&nest, env);
927         return weight;
928 }
929
930 static void osc_lock_build_einfo(const struct lu_env *env,
931                                  const struct cl_lock *clock,
932                                  struct osc_lock *lock,
933                                  struct ldlm_enqueue_info *einfo)
934 {
935         enum cl_lock_mode mode;
936
937         mode = clock->cll_descr.cld_mode;
938         if (mode == CLM_PHANTOM)
939                 /*
940                  * For now, enqueue all glimpse locks in read mode. In the
941                  * future, client might choose to enqueue LCK_PW lock for
942                  * glimpse on a file opened for write.
943                  */
944                 mode = CLM_READ;
945
946         einfo->ei_type   = LDLM_EXTENT;
947         einfo->ei_mode   = osc_cl_lock2ldlm(mode);
948         einfo->ei_cb_bl  = osc_ldlm_blocking_ast;
949         einfo->ei_cb_cp  = osc_ldlm_completion_ast;
950         einfo->ei_cb_gl  = osc_ldlm_glimpse_ast;
951         einfo->ei_cb_wg  = osc_ldlm_weigh_ast;
952         einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
953 }
954
955 static int osc_lock_delete0(struct cl_lock *conflict)
956 {
957         struct cl_env_nest    nest;
958         struct lu_env        *env;
959         int    rc = 0;        
960
961         env = cl_env_nested_get(&nest);
962         if (!IS_ERR(env)) {
963                 cl_lock_delete(env, conflict);
964                 cl_env_nested_put(&nest, env);
965         } else
966                 rc = PTR_ERR(env);
967         return rc; 
968 }
969 /**
970  * Cancels \a conflict lock and waits until it reached CLS_FREEING state. This
971  * is called as a part of enqueuing to cancel conflicting locks early.
972  *
973  * \retval            0: success, \a conflict was cancelled and destroyed.
974  *
975  * \retval   CLO_REPEAT: \a conflict was cancelled, but \a lock mutex was
976  *                       released in the process. Repeat enqueing.
977  *
978  * \retval -EWOULDBLOCK: \a conflict cannot be cancelled immediately, and
979  *                       either \a lock is non-blocking, or current thread
980  *                       holds other locks, that prevent it from waiting
981  *                       for cancel to complete.
982  *
983  * \retval          -ve: other error, including -EINTR.
984  *
985  */
986 static int osc_lock_cancel_wait(const struct lu_env *env, struct cl_lock *lock,
987                                 struct cl_lock *conflict, int canwait)
988 {
989         int rc;
990
991         LASSERT(cl_lock_is_mutexed(lock));
992         LASSERT(cl_lock_is_mutexed(conflict));
993
994         rc = 0;
995         if (conflict->cll_state != CLS_FREEING) {
996                 cl_lock_cancel(env, conflict);
997                 rc = osc_lock_delete0(conflict);
998                 if (rc)
999                         return rc; 
1000                 if (conflict->cll_flags & (CLF_CANCELPEND|CLF_DOOMED)) {
1001                         rc = -EWOULDBLOCK;
1002                         if (cl_lock_nr_mutexed(env) > 2)
1003                                 /*
1004                                  * If mutices of locks other than @lock and
1005                                  * @scan are held by the current thread, it
1006                                  * cannot wait on @scan state change in a
1007                                  * dead-lock safe matter, so simply skip early
1008                                  * cancellation in this case.
1009                                  *
1010                                  * This means that early cancellation doesn't
1011                                  * work when there is even slight mutex
1012                                  * contention, as top-lock's mutex is usually
1013                                  * held at this time.
1014                                  */
1015                                 ;
1016                         else if (canwait) {
1017                                 /* Waiting for @scan to be destroyed */
1018                                 cl_lock_mutex_put(env, lock);
1019                                 do {
1020                                         rc = cl_lock_state_wait(env, conflict);
1021                                 } while (!rc &&
1022                                          conflict->cll_state < CLS_FREEING);
1023                                 /* mutex was released, repeat enqueue. */
1024                                 rc = rc ?: CLO_REPEAT;
1025                                 cl_lock_mutex_get(env, lock);
1026                         }
1027                 }
1028                 LASSERT(ergo(!rc, conflict->cll_state == CLS_FREEING));
1029                 CDEBUG(D_INFO, "lock %p was %s freed now, rc (%d)\n",
1030                        conflict, rc ? "not":"", rc);
1031         }
1032         return rc;
1033 }
1034
1035 /**
1036  * Determine if the lock should be converted into a lockless lock.
1037  *
1038  * Steps to check:
1039  * - if the lock has an explicite requirment for a non-lockless lock;
1040  * - if the io lock request type ci_lockreq;
1041  * - send the enqueue rpc to ost to make the further decision;
1042  * - special treat to truncate lockless lock
1043  *
1044  *  Additional policy can be implemented here, e.g., never do lockless-io
1045  *  for large extents.
1046  */
1047 static void osc_lock_to_lockless(const struct lu_env *env,
1048                                  struct osc_lock *ols, int force)
1049 {
1050         struct cl_lock_slice *slice = &ols->ols_cl;
1051         struct cl_lock *lock        = slice->cls_lock;
1052
1053         LASSERT(ols->ols_state == OLS_NEW ||
1054                 ols->ols_state == OLS_UPCALL_RECEIVED);
1055
1056         if (force) {
1057                 ols->ols_locklessable = 1;
1058                 LASSERT(cl_lock_is_mutexed(lock));
1059                 slice->cls_ops = &osc_lock_lockless_ops;
1060         } else {
1061                 struct osc_io *oio     = osc_env_io(env);
1062                 struct cl_io  *io      = oio->oi_cl.cis_io;
1063                 struct cl_object *obj  = slice->cls_obj;
1064                 struct osc_object *oob = cl2osc(obj);
1065                 const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
1066                 struct obd_connect_data *ocd;
1067
1068                 LASSERT(io->ci_lockreq == CILR_MANDATORY ||
1069                         io->ci_lockreq == CILR_MAYBE ||
1070                         io->ci_lockreq == CILR_NEVER);
1071
1072                 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
1073                 ols->ols_locklessable = (io->ci_type != CIT_TRUNC) &&
1074                                 (io->ci_lockreq == CILR_MAYBE) &&
1075                                 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
1076                 if (io->ci_lockreq == CILR_NEVER ||
1077                         /* lockless IO */
1078                     (ols->ols_locklessable && osc_object_is_contended(oob)) ||
1079                         /* lockless truncate */
1080                     (io->ci_type == CIT_TRUNC &&
1081                      (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
1082                       osd->od_lockless_truncate)) {
1083                         ols->ols_locklessable = 1;
1084                         slice->cls_ops = &osc_lock_lockless_ops;
1085                 }
1086         }
1087         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1088 }
1089
1090 /**
1091  * Cancel all conflicting locks and wait for them to be destroyed.
1092  *
1093  * This function is used for two purposes:
1094  *
1095  *     - early cancel all conflicting locks before starting IO, and
1096  *
1097  *     - guarantee that pages added to the page cache by lockless IO are never
1098  *       covered by locks other than lockless IO lock, and, hence, are not
1099  *       visible to other threads.
1100  */
1101 static int osc_lock_enqueue_wait(const struct lu_env *env,
1102                                  const struct osc_lock *olck)
1103 {
1104         struct cl_lock          *lock    = olck->ols_cl.cls_lock;
1105         struct cl_lock_descr    *descr   = &lock->cll_descr;
1106         struct cl_object_header *hdr     = cl_object_header(descr->cld_obj);
1107         struct cl_lock_closure  *closure = &osc_env_info(env)->oti_closure;
1108         struct cl_lock          *scan;
1109         struct cl_lock          *temp;
1110         int lockless                     = osc_lock_is_lockless(olck);
1111         int rc                           = 0;
1112         int canwait;
1113         int stop;
1114         ENTRY;
1115
1116         LASSERT(cl_lock_is_mutexed(lock));
1117         LASSERT(lock->cll_state == CLS_QUEUING);
1118
1119         /*
1120          * XXX This function could be sped up if we had asynchronous
1121          * cancellation.
1122          */
1123
1124         canwait =
1125                 !(olck->ols_flags & LDLM_FL_BLOCK_NOWAIT) &&
1126                 cl_lock_nr_mutexed(env) == 1;
1127         cl_lock_closure_init(env, closure, lock, canwait);
1128         spin_lock(&hdr->coh_lock_guard);
1129         list_for_each_entry_safe(scan, temp, &hdr->coh_locks, cll_linkage) {
1130                 if (scan == lock)
1131                         continue;
1132
1133                 if (scan->cll_state < CLS_QUEUING ||
1134                     scan->cll_state == CLS_FREEING ||
1135                     scan->cll_descr.cld_start > descr->cld_end ||
1136                     scan->cll_descr.cld_end < descr->cld_start)
1137                         continue;
1138
1139                 /* overlapped and living locks. */
1140
1141                 /* We're not supposed to give up group lock. */
1142                 if (scan->cll_descr.cld_mode == CLM_GROUP) {
1143                         LASSERT(descr->cld_mode != CLM_GROUP ||
1144                                 descr->cld_gid != scan->cll_descr.cld_gid);
1145                         continue;
1146                 }
1147
1148                 /* A tricky case for lockless pages:
1149                  * We need to cancel the compatible locks if we're enqueuing
1150                  * a lockless lock, for example:
1151                  * imagine that client has PR lock on [0, 1000], and thread T0
1152                  * is doing lockless IO in [500, 1500] region. Concurrent
1153                  * thread T1 can see lockless data in [500, 1000], which is
1154                  * wrong, because these data are possibly stale.
1155                  */
1156                 if (!lockless && cl_lock_compatible(scan, lock))
1157                         continue;
1158
1159                 /* Now @scan is conflicting with @lock, this means current
1160                  * thread have to sleep for @scan being destroyed. */
1161                 cl_lock_get_trust(scan);
1162                 if (&temp->cll_linkage != &hdr->coh_locks)
1163                         cl_lock_get_trust(temp);
1164                 spin_unlock(&hdr->coh_lock_guard);
1165                 lu_ref_add(&scan->cll_reference, "cancel-wait", lock);
1166
1167                 LASSERT(list_empty(&closure->clc_list));
1168                 rc = cl_lock_closure_build(env, scan, closure);
1169                 if (rc == 0) {
1170                         rc = osc_lock_cancel_wait(env, lock, scan, canwait);
1171                         cl_lock_disclosure(env, closure);
1172                         if (rc == -EWOULDBLOCK)
1173                                 rc = 0;
1174                 }
1175                 if (rc == CLO_REPEAT && !canwait)
1176                         /* cannot wait... no early cancellation. */
1177                         rc = 0;
1178
1179                 lu_ref_del(&scan->cll_reference, "cancel-wait", lock);
1180                 cl_lock_put(env, scan);
1181                 spin_lock(&hdr->coh_lock_guard);
1182                 /*
1183                  * Lock list could have been modified, while spin-lock was
1184                  * released. Check that it is safe to continue.
1185                  */
1186                 stop = list_empty(&temp->cll_linkage);
1187                 if (&temp->cll_linkage != &hdr->coh_locks)
1188                         cl_lock_put(env, temp);
1189                 if (stop || rc != 0)
1190                         break;
1191         }
1192         spin_unlock(&hdr->coh_lock_guard);
1193         cl_lock_closure_fini(closure);
1194         RETURN(rc);
1195 }
1196
1197 /**
1198  * Deadlock avoidance for osc_lock_enqueue(). Consider following scenario:
1199  *
1200  *     - Thread0: obtains PR:[0, 10]. Lock is busy.
1201  *
1202  *     - Thread1: enqueues PW:[5, 50]. Blocking ast is sent to
1203  *       PR:[0, 10], but cancellation of busy lock is postponed.
1204  *
1205  *     - Thread0: enqueue PR:[30, 40]. Lock is locally matched to
1206  *       PW:[5, 50], and thread0 waits for the lock completion never
1207  *       releasing PR:[0, 10]---deadlock.
1208  *
1209  * The second PR lock can be glimpse (it is to deal with that situation that
1210  * ll_glimpse_size() has second argument, preventing local match of
1211  * not-yet-granted locks, see bug 10295). Similar situation is possible in the
1212  * case of memory mapped user level buffer.
1213  *
1214  * To prevent this we can detect a situation when current "thread" or "io"
1215  * already holds a lock on this object and either add LDLM_FL_BLOCK_GRANTED to
1216  * the ols->ols_flags, or prevent local match with PW locks.
1217  */
1218 static int osc_deadlock_is_possible(const struct lu_env *env,
1219                                     struct cl_lock *lock)
1220 {
1221         struct cl_object        *obj;
1222         struct cl_object_header *head;
1223         struct cl_lock          *scan;
1224         struct osc_io           *oio;
1225
1226         int result;
1227
1228         ENTRY;
1229
1230         LASSERT(cl_lock_is_mutexed(lock));
1231
1232         oio  = osc_env_io(env);
1233         obj  = lock->cll_descr.cld_obj;
1234         head = cl_object_header(obj);
1235
1236         result = 0;
1237         spin_lock(&head->coh_lock_guard);
1238         list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1239                 if (scan != lock) {
1240                         struct osc_lock *oscan;
1241
1242                         oscan = osc_lock_at(scan);
1243                         LASSERT(oscan != NULL);
1244                         if (oscan->ols_owner == oio) {
1245                                 result = 1;
1246                                 break;
1247                         }
1248                 }
1249         }
1250         spin_unlock(&head->coh_lock_guard);
1251         RETURN(result);
1252 }
1253
1254 /**
1255  * Implementation of cl_lock_operations::clo_enqueue() method for osc
1256  * layer. This initiates ldlm enqueue:
1257  *
1258  *     - checks for possible dead-lock conditions (osc_deadlock_is_possible());
1259  *
1260  *     - cancels conflicting locks early (osc_lock_enqueue_wait());
1261  *
1262  *     - calls osc_enqueue_base() to do actual enqueue.
1263  *
1264  * osc_enqueue_base() is supplied with an upcall function that is executed
1265  * when lock is received either after a local cached ldlm lock is matched, or
1266  * when a reply from the server is received.
1267  *
1268  * This function does not wait for the network communication to complete.
1269  */
1270 static int osc_lock_enqueue(const struct lu_env *env,
1271                             const struct cl_lock_slice *slice,
1272                             struct cl_io *unused, __u32 enqflags)
1273 {
1274         struct osc_lock          *ols     = cl2osc_lock(slice);
1275         struct cl_lock           *lock    = ols->ols_cl.cls_lock;
1276         struct osc_object        *obj     = cl2osc(slice->cls_obj);
1277         struct osc_thread_info   *info    = osc_env_info(env);
1278         struct ldlm_res_id       *resname = &info->oti_resname;
1279         ldlm_policy_data_t       *policy  = &info->oti_policy;
1280         struct ldlm_enqueue_info *einfo   = &ols->ols_einfo;
1281         int result;
1282         ENTRY;
1283
1284         LASSERT(cl_lock_is_mutexed(lock));
1285         LASSERT(lock->cll_state == CLS_QUEUING);
1286         LASSERT(ols->ols_state == OLS_NEW);
1287
1288         osc_lock_build_res(env, obj, resname);
1289         osc_lock_build_policy(env, lock, policy);
1290         ols->ols_flags = osc_enq2ldlm_flags(enqflags);
1291         if (osc_deadlock_is_possible(env, lock))
1292                 ols->ols_flags |= LDLM_FL_BLOCK_GRANTED;
1293         if (ols->ols_flags & LDLM_FL_HAS_INTENT)
1294                 ols->ols_glimpse = 1;
1295
1296         result = osc_lock_enqueue_wait(env, ols);
1297         if (result == 0) {
1298                 if (!(enqflags & CEF_MUST))
1299                         /* try to convert this lock to a lockless lock */
1300                         osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
1301                 if (!osc_lock_is_lockless(ols)) {
1302                         if (ols->ols_locklessable)
1303                                 ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
1304
1305                         /* a reference for lock, passed as an upcall cookie */
1306                         cl_lock_get(lock);
1307                         lu_ref_add(&lock->cll_reference, "upcall", lock);
1308                         ols->ols_state = OLS_ENQUEUED;
1309
1310                         /*
1311                          * XXX: this is possible blocking point as
1312                          * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
1313                          * LDLM_CP_CALLBACK.
1314                          */
1315                         result = osc_enqueue_base(osc_export(obj), resname,
1316                                           &ols->ols_flags, policy,
1317                                           &ols->ols_lvb,
1318                                           obj->oo_oinfo->loi_kms_valid,
1319                                           osc_lock_upcall,
1320                                           ols, einfo, &ols->ols_handle,
1321                                           PTLRPCD_SET, 1);
1322                         if (result != 0) {
1323                                 lu_ref_del(&lock->cll_reference,
1324                                            "upcall", lock);
1325                                 cl_lock_put(env, lock);
1326                         }
1327                 } else {
1328                         ols->ols_state = OLS_GRANTED;
1329                 }
1330         }
1331         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
1332         RETURN(result);
1333 }
1334
1335 static int osc_lock_wait(const struct lu_env *env,
1336                          const struct cl_lock_slice *slice)
1337 {
1338         struct osc_lock *olck = cl2osc_lock(slice);
1339         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1340
1341         LINVRNT(osc_lock_invariant(olck));
1342         if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED)
1343                 return 0;
1344
1345         LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
1346                      lock->cll_error == 0, olck->ols_lock != NULL));
1347
1348         return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
1349 }
1350
1351 /**
1352  * An implementation of cl_lock_operations::clo_use() method that pins cached
1353  * lock.
1354  */
1355 static int osc_lock_use(const struct lu_env *env,
1356                         const struct cl_lock_slice *slice)
1357 {
1358         struct osc_lock *olck = cl2osc_lock(slice);
1359         int rc;
1360
1361         LASSERT(!olck->ols_hold);
1362
1363         /*
1364          * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
1365          * flag is not set. This protects us from a concurrent blocking ast.
1366          */
1367         rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
1368         if (rc == 0) {
1369                 olck->ols_hold = 1;
1370                 olck->ols_state = OLS_GRANTED;
1371         } else {
1372                 struct cl_lock *lock;
1373
1374                 /*
1375                  * Lock is being cancelled somewhere within
1376                  * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
1377                  * set, but osc_ldlm_blocking_ast() hasn't yet acquired
1378                  * cl_lock mutex.
1379                  */
1380                 lock = slice->cls_lock;
1381                 LASSERT(lock->cll_state == CLS_INTRANSIT);
1382                 LASSERT(lock->cll_users > 0);
1383                 /* set a flag for osc_dlm_blocking_ast0() to signal the
1384                  * lock.*/
1385                 olck->ols_ast_wait = 1;
1386                 rc = CLO_WAIT;
1387         }
1388         return rc;
1389 }
1390
1391 static int osc_lock_flush(struct osc_lock *ols, int discard)
1392 {
1393         struct cl_lock       *lock  = ols->ols_cl.cls_lock;
1394         struct cl_env_nest    nest;
1395         struct lu_env        *env;
1396         int result = 0;
1397
1398         env = cl_env_nested_get(&nest);
1399         if (!IS_ERR(env)) {
1400                 result = cl_lock_page_out(env, lock, discard);
1401                 cl_env_nested_put(&nest, env);
1402         } else
1403                 result = PTR_ERR(env);
1404         if (result == 0) {
1405                 ols->ols_flush = 1;
1406                 LINVRNT(!osc_lock_has_pages(ols));
1407         }
1408         return result;
1409 }
1410
1411 /**
1412  * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
1413  * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1414  * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
1415  * with some other lock some where in the cluster. This function does the
1416  * following:
1417  *
1418  *     - invalidates all pages protected by this lock (after sending dirty
1419  *       ones to the server, as necessary);
1420  *
1421  *     - decref's underlying ldlm lock;
1422  *
1423  *     - cancels ldlm lock (ldlm_cli_cancel()).
1424  */
1425 static void osc_lock_cancel(const struct lu_env *env,
1426                             const struct cl_lock_slice *slice)
1427 {
1428         struct cl_lock   *lock    = slice->cls_lock;
1429         struct osc_lock  *olck    = cl2osc_lock(slice);
1430         struct ldlm_lock *dlmlock = olck->ols_lock;
1431         int               result  = 0;
1432         int               discard;
1433
1434         LASSERT(cl_lock_is_mutexed(lock));
1435         LINVRNT(osc_lock_invariant(olck));
1436
1437         if (dlmlock != NULL) {
1438                 int do_cancel;
1439
1440                 discard = dlmlock->l_flags & LDLM_FL_DISCARD_DATA;
1441                 result = osc_lock_flush(olck, discard);
1442                 osc_lock_unhold(olck);
1443
1444                 lock_res_and_lock(dlmlock);
1445                 /* Now that we're the only user of dlm read/write reference,
1446                  * mostly the ->l_readers + ->l_writers should be zero.
1447                  * However, there is a corner case.
1448                  * See bug 18829 for details.*/
1449                 do_cancel = (dlmlock->l_readers == 0 &&
1450                              dlmlock->l_writers == 0);
1451                 dlmlock->l_flags |= LDLM_FL_CBPENDING;
1452                 unlock_res_and_lock(dlmlock);
1453                 if (do_cancel)
1454                         result = ldlm_cli_cancel(&olck->ols_handle);
1455                 if (result < 0)
1456                         CL_LOCK_DEBUG(D_ERROR, env, lock,
1457                                       "lock %p cancel failure with error(%d)\n",
1458                                       lock, result);
1459         }
1460         olck->ols_state = OLS_CANCELLED;
1461         osc_lock_detach(env, olck);
1462 }
1463
1464 void cl_lock_page_list_fixup(const struct lu_env *env,
1465                              struct cl_io *io, struct cl_lock *lock,
1466                              struct cl_page_list *queue);
1467
1468 #ifdef INVARIANT_CHECK
1469 /**
1470  * Returns true iff there are pages under \a olck not protected by other
1471  * locks.
1472  */
1473 static int osc_lock_has_pages(struct osc_lock *olck)
1474 {
1475         struct cl_lock       *lock;
1476         struct cl_lock_descr *descr;
1477         struct cl_object     *obj;
1478         struct osc_object    *oob;
1479         struct cl_page_list  *plist;
1480         struct cl_page       *page;
1481         struct cl_env_nest    nest;
1482         struct cl_io         *io;
1483         struct lu_env        *env;
1484         int                   result;
1485
1486         env = cl_env_nested_get(&nest);
1487         if (!IS_ERR(env)) {
1488                 obj   = olck->ols_cl.cls_obj;
1489                 oob   = cl2osc(obj);
1490                 io    = &oob->oo_debug_io;
1491                 lock  = olck->ols_cl.cls_lock;
1492                 descr = &lock->cll_descr;
1493                 plist = &osc_env_info(env)->oti_plist;
1494                 cl_page_list_init(plist);
1495
1496                 mutex_lock(&oob->oo_debug_mutex);
1497
1498                 io->ci_obj = cl_object_top(obj);
1499                 cl_io_init(env, io, CIT_MISC, io->ci_obj);
1500                 cl_page_gang_lookup(env, obj, io,
1501                                     descr->cld_start, descr->cld_end, plist, 0);
1502                 cl_lock_page_list_fixup(env, io, lock, plist);
1503                 if (plist->pl_nr > 0) {
1504                         CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
1505                         cl_page_list_for_each(page, plist)
1506                                 CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
1507                 }
1508                 result = plist->pl_nr > 0;
1509                 cl_page_list_disown(env, io, plist);
1510                 cl_page_list_fini(env, plist);
1511                 cl_io_fini(env, io);
1512                 mutex_unlock(&oob->oo_debug_mutex);
1513                 cl_env_nested_put(&nest, env);
1514         } else
1515                 result = 0;
1516         return result;
1517 }
1518 #else
1519 static int osc_lock_has_pages(struct osc_lock *olck)
1520 {
1521         return 0;
1522 }
1523 #endif /* INVARIANT_CHECK */
1524
1525 static void osc_lock_delete(const struct lu_env *env,
1526                             const struct cl_lock_slice *slice)
1527 {
1528         struct osc_lock *olck;
1529
1530         olck = cl2osc_lock(slice);
1531         if (olck->ols_glimpse) {
1532                 LASSERT(!olck->ols_hold);
1533                 LASSERT(!olck->ols_lock);
1534                 return;
1535         }
1536
1537         LINVRNT(osc_lock_invariant(olck));
1538         LINVRNT(!osc_lock_has_pages(olck));
1539
1540         osc_lock_unhold(olck);
1541         osc_lock_detach(env, olck);
1542 }
1543
1544 /**
1545  * Implements cl_lock_operations::clo_state() method for osc layer.
1546  *
1547  * Maintains osc_lock::ols_owner field.
1548  *
1549  * This assumes that lock always enters CLS_HELD (from some other state) in
1550  * the same IO context as one that requested the lock. This should not be a
1551  * problem, because context is by definition shared by all activity pertaining
1552  * to the same high-level IO.
1553  */
1554 static void osc_lock_state(const struct lu_env *env,
1555                            const struct cl_lock_slice *slice,
1556                            enum cl_lock_state state)
1557 {
1558         struct osc_lock *lock = cl2osc_lock(slice);
1559         struct osc_io   *oio  = osc_env_io(env);
1560
1561         /*
1562          * XXX multiple io contexts can use the lock at the same time.
1563          */
1564         LINVRNT(osc_lock_invariant(lock));
1565         if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
1566                 LASSERT(lock->ols_owner == NULL);
1567                 lock->ols_owner = oio;
1568         } else if (state != CLS_HELD)
1569                 lock->ols_owner = NULL;
1570 }
1571
1572 static int osc_lock_print(const struct lu_env *env, void *cookie,
1573                           lu_printer_t p, const struct cl_lock_slice *slice)
1574 {
1575         struct osc_lock *lock = cl2osc_lock(slice);
1576
1577         /*
1578          * XXX print ldlm lock and einfo properly.
1579          */
1580         (*p)(env, cookie, "%p %08x "LPX64" %d %p ",
1581              lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
1582              lock->ols_state, lock->ols_owner);
1583         osc_lvb_print(env, cookie, p, &lock->ols_lvb);
1584         return 0;
1585 }
1586
1587 static int osc_lock_fits_into(const struct lu_env *env,
1588                               const struct cl_lock_slice *slice,
1589                               const struct cl_lock_descr *need,
1590                               const struct cl_io *io)
1591 {
1592         struct osc_lock *ols = cl2osc_lock(slice);
1593
1594         /* If the lock hasn't ever enqueued, it can't be matched because
1595          * enqueue process brings in many information which can be used to
1596          * determine things such as lockless, CEF_MUST, etc.
1597          */
1598         if (ols->ols_state < OLS_ENQUEUED)
1599                 return 0;
1600
1601         /* Don't match this lock if the lock is able to become lockless lock.
1602          * This is because the new lock might be covering a mmap region and
1603          * so that it must have a cached at the local side. */
1604         if (ols->ols_state < OLS_UPCALL_RECEIVED && ols->ols_locklessable)
1605                 return 0;
1606
1607         /* If the lock is going to be canceled, no reason to match it as well */
1608         if (ols->ols_state > OLS_RELEASED)
1609                 return 0;
1610
1611         /* go for it. */
1612         return 1;
1613 }
1614
1615 static const struct cl_lock_operations osc_lock_ops = {
1616         .clo_fini    = osc_lock_fini,
1617         .clo_enqueue = osc_lock_enqueue,
1618         .clo_wait    = osc_lock_wait,
1619         .clo_unuse   = osc_lock_unuse,
1620         .clo_use     = osc_lock_use,
1621         .clo_delete  = osc_lock_delete,
1622         .clo_state   = osc_lock_state,
1623         .clo_cancel  = osc_lock_cancel,
1624         .clo_weigh   = osc_lock_weigh,
1625         .clo_print   = osc_lock_print,
1626         .clo_fits_into = osc_lock_fits_into,
1627 };
1628
1629 static int osc_lock_lockless_enqueue(const struct lu_env *env,
1630                                      const struct cl_lock_slice *slice,
1631                                      struct cl_io *unused, __u32 enqflags)
1632 {
1633         LBUG();
1634         return 0;
1635 }
1636
1637 static int osc_lock_lockless_unuse(const struct lu_env *env,
1638                                    const struct cl_lock_slice *slice)
1639 {
1640         struct osc_lock *ols = cl2osc_lock(slice);
1641         struct cl_lock *lock = slice->cls_lock;
1642
1643         LASSERT(ols->ols_state == OLS_GRANTED);
1644         LINVRNT(osc_lock_invariant(ols));
1645
1646         cl_lock_cancel(env, lock);
1647         cl_lock_delete(env, lock);
1648         return 0;
1649 }
1650
1651 static void osc_lock_lockless_cancel(const struct lu_env *env,
1652                                      const struct cl_lock_slice *slice)
1653 {
1654         struct osc_lock   *ols  = cl2osc_lock(slice);
1655         int result;
1656
1657         result = osc_lock_flush(ols, 0);
1658         if (result)
1659                 CERROR("Pages for lockless lock %p were not purged(%d)\n",
1660                        ols, result);
1661         ols->ols_state = OLS_CANCELLED;
1662 }
1663
1664 static int osc_lock_lockless_wait(const struct lu_env *env,
1665                                   const struct cl_lock_slice *slice)
1666 {
1667         struct osc_lock *olck = cl2osc_lock(slice);
1668         struct cl_lock  *lock = olck->ols_cl.cls_lock;
1669
1670         LINVRNT(osc_lock_invariant(olck));
1671         LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
1672
1673         return lock->cll_error;
1674 }
1675
1676 static void osc_lock_lockless_state(const struct lu_env *env,
1677                                     const struct cl_lock_slice *slice,
1678                                     enum cl_lock_state state)
1679 {
1680         struct osc_lock *lock = cl2osc_lock(slice);
1681         struct osc_io   *oio  = osc_env_io(env);
1682
1683         LINVRNT(osc_lock_invariant(lock));
1684         if (state == CLS_HELD) {
1685                 LASSERT(lock->ols_owner == NULL);
1686                 lock->ols_owner = oio;
1687
1688                 /* set the io to be lockless if this lock is for io's
1689                  * host object */
1690                 if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
1691                         oio->oi_lockless = 1;
1692         } else
1693                 lock->ols_owner = NULL;
1694 }
1695
1696 static int osc_lock_lockless_fits_into(const struct lu_env *env,
1697                                        const struct cl_lock_slice *slice,
1698                                        const struct cl_lock_descr *need,
1699                                        const struct cl_io *io)
1700 {
1701         return 0;
1702 }
1703
1704 static const struct cl_lock_operations osc_lock_lockless_ops = {
1705         .clo_fini      = osc_lock_fini,
1706         .clo_enqueue   = osc_lock_lockless_enqueue,
1707         .clo_wait      = osc_lock_lockless_wait,
1708         .clo_unuse     = osc_lock_lockless_unuse,
1709         .clo_state     = osc_lock_lockless_state,
1710         .clo_fits_into = osc_lock_lockless_fits_into,
1711         .clo_cancel    = osc_lock_lockless_cancel,
1712         .clo_print     = osc_lock_print
1713 };
1714
1715 int osc_lock_init(const struct lu_env *env,
1716                   struct cl_object *obj, struct cl_lock *lock,
1717                   const struct cl_io *unused)
1718 {
1719         struct osc_lock *clk;
1720         int result;
1721
1722         OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO);
1723         if (clk != NULL) {
1724                 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
1725                 clk->ols_state = OLS_NEW;
1726                 cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
1727                 result = 0;
1728         } else
1729                 result = -ENOMEM;
1730         return result;
1731 }
1732
1733
1734 /** @} osc */