Whamcloud - gitweb
LU-1347 build: remove the vim/emacs modelines
[fs/lustre-release.git] / lustre / ldlm / ldlm_plain.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ldlm/ldlm_plain.c
37  *
38  * Author: Peter Braam <braam@clusterfs.com>
39  * Author: Phil Schwan <phil@clusterfs.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LDLM
43
44 #ifdef __KERNEL__
45 #include <lustre_dlm.h>
46 #include <obd_support.h>
47 #include <lustre_lib.h>
48 #else
49 #include <liblustre.h>
50 #endif
51
52 #include "ldlm_internal.h"
53
54 #ifdef HAVE_SERVER_SUPPORT
55 static inline int
56 ldlm_plain_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
57                         cfs_list_t *work_list)
58 {
59         cfs_list_t *tmp;
60         struct ldlm_lock *lock;
61         ldlm_mode_t req_mode = req->l_req_mode;
62         int compat = 1;
63         ENTRY;
64
65         lockmode_verify(req_mode);
66
67         cfs_list_for_each(tmp, queue) {
68                 lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
69
70                 if (req == lock)
71                         RETURN(compat);
72
73                  /* last lock in mode group */
74                  tmp = &cfs_list_entry(lock->l_sl_mode.prev,
75                                        struct ldlm_lock,
76                                        l_sl_mode)->l_res_link;
77
78                  if (lockmode_compat(lock->l_req_mode, req_mode))
79                         continue;
80
81                 if (!work_list)
82                         RETURN(0);
83
84                 compat = 0;
85
86                 /* add locks of the mode group to @work_list as
87                  * blocking locks for @req */
88                 if (lock->l_blocking_ast)
89                         ldlm_add_ast_work_item(lock, req, work_list);
90
91                 {
92                         cfs_list_t *head;
93
94                         head = &lock->l_sl_mode;
95                         cfs_list_for_each_entry(lock, head, l_sl_mode)
96                                 if (lock->l_blocking_ast)
97                                         ldlm_add_ast_work_item(lock, req,
98                                                                work_list);
99                 }
100         }
101
102         RETURN(compat);
103 }
104
105 /* If first_enq is 0 (ie, called from ldlm_reprocess_queue):
106  *   - blocking ASTs have already been sent
107  *   - must call this function with the resource lock held
108  *
109  * If first_enq is 1 (ie, called from ldlm_lock_enqueue):
110  *   - blocking ASTs have not been sent
111  *   - must call this function with the resource lock held */
112 int ldlm_process_plain_lock(struct ldlm_lock *lock, int *flags, int first_enq,
113                             ldlm_error_t *err, cfs_list_t *work_list)
114 {
115         struct ldlm_resource *res = lock->l_resource;
116         CFS_LIST_HEAD(rpc_list);
117         int rc;
118         ENTRY;
119
120         check_res_locked(res);
121         LASSERT(cfs_list_empty(&res->lr_converting));
122
123         if (!first_enq) {
124                 LASSERT(work_list != NULL);
125                 rc = ldlm_plain_compat_queue(&res->lr_granted, lock, NULL);
126                 if (!rc)
127                         RETURN(LDLM_ITER_STOP);
128                 rc = ldlm_plain_compat_queue(&res->lr_waiting, lock, NULL);
129                 if (!rc)
130                         RETURN(LDLM_ITER_STOP);
131
132                 ldlm_resource_unlink_lock(lock);
133                 ldlm_grant_lock(lock, work_list);
134                 RETURN(LDLM_ITER_CONTINUE);
135         }
136
137  restart:
138         rc = ldlm_plain_compat_queue(&res->lr_granted, lock, &rpc_list);
139         rc += ldlm_plain_compat_queue(&res->lr_waiting, lock, &rpc_list);
140
141         if (rc != 2) {
142                 /* If either of the compat_queue()s returned 0, then we
143                  * have ASTs to send and must go onto the waiting list.
144                  *
145                  * bug 2322: we used to unlink and re-add here, which was a
146                  * terrible folly -- if we goto restart, we could get
147                  * re-ordered!  Causes deadlock, because ASTs aren't sent! */
148                 if (cfs_list_empty(&lock->l_res_link))
149                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
150                 unlock_res(res);
151                 rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
152                                        LDLM_WORK_BL_AST);
153                 lock_res(res);
154                 if (rc == -ERESTART)
155                         GOTO(restart, -ERESTART);
156                 *flags |= LDLM_FL_BLOCK_GRANTED;
157         } else {
158                 ldlm_resource_unlink_lock(lock);
159                 ldlm_grant_lock(lock, NULL);
160         }
161         RETURN(0);
162 }
163 #endif /* HAVE_SERVER_SUPPORT */
164
165 void ldlm_plain_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
166                                      ldlm_policy_data_t *lpolicy)
167 {
168         /* No policy for plain locks */
169 }
170
171 void ldlm_plain_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
172                                      ldlm_wire_policy_data_t *wpolicy)
173 {
174         /* No policy for plain locks */
175 }