1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This code is issued under the GNU General Public License.
7 * See the file COPYING in this distribution
9 * by Cluster File Systems, Inc.
12 #define DEBUG_SUBSYSTEM S_LDLM
14 #include <linux/lustre_dlm.h>
15 #include <linux/obd_class.h>
16 #include <linux/obd.h>
18 static int interrupted_completion_wait(void *data)
23 static int expired_completion_wait(void *data)
25 struct ldlm_lock *lock = data;
27 CERROR("NULL lock\n");
28 else if (!lock->l_export)
29 CERROR("lock %p has NULL export\n", lock);
31 class_signal_connection_failure(lock->l_export->exp_connection);
35 int ldlm_completion_ast(struct ldlm_lock *lock, int flags)
37 struct l_wait_info lwi =
38 LWI_TIMEOUT_INTR(obd_timeout * HZ, expired_completion_wait,
39 interrupted_completion_wait, lock);
43 if (flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
44 LDLM_FL_BLOCK_CONV)) {
45 /* Go to sleep until the lock is granted. */
46 /* FIXME: or cancelled. */
47 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock,"
50 ldlm_reprocess_all(lock->l_resource);
51 rc = l_wait_event(lock->l_waitq,
52 (lock->l_req_mode == lock->l_granted_mode),
56 "client-side enqueue waking up: failed (%d)",
60 "client-side enqueue waking up: granted");
62 } else if (flags == LDLM_FL_WAIT_NOREPROC) {
63 rc = l_wait_event(lock->l_waitq,
64 (lock->l_req_mode == lock->l_granted_mode),
66 } else if (flags == 0) {
67 wake_up(&lock->l_waitq);
73 static int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
74 struct lustre_handle *parent_lockh,
77 void *cookie, int cookielen,
80 ldlm_completion_callback completion,
81 ldlm_blocking_callback blocking,
84 struct lustre_handle *lockh)
86 struct ldlm_lock *lock;
90 CERROR("Trying to cancel local lock\n");
94 lock = ldlm_lock_create(ns, parent_lockh, res_id, type, mode, data, data_len);
96 GOTO(out_nolock, err = -ENOMEM);
97 LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
99 ldlm_lock_addref_internal(lock, mode);
100 ldlm_lock2handle(lock, lockh);
101 lock->l_connh = NULL;
103 err = ldlm_lock_enqueue(lock, cookie, cookielen, flags, completion,
108 if (type == LDLM_EXTENT)
109 memcpy(cookie, &lock->l_extent, sizeof(lock->l_extent));
110 if ((*flags) & LDLM_FL_LOCK_CHANGED)
111 memcpy(res_id, lock->l_resource->lr_name, sizeof(*res_id));
113 LDLM_DEBUG_NOLOCK("client-side local enqueue handler END (lock %p)",
116 if (lock->l_completion_ast)
117 lock->l_completion_ast(lock, *flags);
119 LDLM_DEBUG(lock, "client-side local enqueue END");
127 int ldlm_cli_enqueue(struct lustre_handle *connh,
128 struct ptlrpc_request *req,
129 struct ldlm_namespace *ns,
130 struct lustre_handle *parent_lock_handle,
133 void *cookie, int cookielen,
136 ldlm_completion_callback completion,
137 ldlm_blocking_callback blocking,
140 struct lustre_handle *lockh)
142 struct ldlm_lock *lock;
143 struct ldlm_request *body;
144 struct ldlm_reply *reply;
145 int rc, size = sizeof(*body), req_passed_in = 1;
149 return ldlm_cli_enqueue_local(ns, parent_lock_handle, res_id,
150 type, cookie, cookielen, mode,
151 flags, completion, blocking, data,
155 lock = ldlm_lock_create(ns, parent_lock_handle, res_id, type, mode,
158 GOTO(out_nolock, rc = -ENOMEM);
159 LDLM_DEBUG(lock, "client-side enqueue START");
160 /* for the local lock, add the reference */
161 ldlm_lock_addref_internal(lock, mode);
162 ldlm_lock2handle(lock, lockh);
165 req = ptlrpc_prep_req(class_conn2cliimp(connh), LDLM_ENQUEUE, 1,
168 GOTO(out, rc = -ENOMEM);
170 } else if (req->rq_reqmsg->buflens[0] != sizeof(*body))
173 /* Dump all of this data into the request buffer */
174 body = lustre_msg_buf(req->rq_reqmsg, 0);
175 ldlm_lock2desc(lock, &body->lock_desc);
176 /* Phil: make this part of ldlm_lock2desc */
177 if (type == LDLM_EXTENT)
178 memcpy(&body->lock_desc.l_extent, cookie,
179 sizeof(body->lock_desc.l_extent));
180 body->lock_flags = *flags;
182 memcpy(&body->lock_handle1, lockh, sizeof(*lockh));
183 if (parent_lock_handle)
184 memcpy(&body->lock_handle2, parent_lock_handle,
185 sizeof(body->lock_handle2));
187 /* Continue as normal. */
188 if (!req_passed_in) {
189 size = sizeof(*reply);
190 req->rq_replen = lustre_msg_size(1, &size);
192 lock->l_connh = connh;
193 lock->l_export = NULL;
195 rc = ptlrpc_queue_wait(req);
196 /* FIXME: status check here? */
197 rc = ptlrpc_check_status(req, rc);
199 if (rc != ELDLM_OK) {
200 LDLM_DEBUG(lock, "client-side enqueue END (%s)",
201 rc == ELDLM_LOCK_ABORTED ? "ABORTED" : "FAILED");
202 ldlm_lock_decref(lockh, mode);
203 /* FIXME: if we've already received a completion AST, this will
205 ldlm_lock_destroy(lock);
209 reply = lustre_msg_buf(req->rq_repmsg, 0);
210 memcpy(&lock->l_remote_handle, &reply->lock_handle,
211 sizeof(lock->l_remote_handle));
212 if (type == LDLM_EXTENT)
213 memcpy(cookie, &reply->lock_extent, sizeof(reply->lock_extent));
214 *flags = reply->lock_flags;
216 CDEBUG(D_INFO, "remote handle: %p, flags: %d\n",
217 (void *)(unsigned long)reply->lock_handle.addr, *flags);
218 CDEBUG(D_INFO, "extent: %Lu -> %Lu\n",
219 (unsigned long long)reply->lock_extent.start,
220 (unsigned long long)reply->lock_extent.end);
222 /* If enqueue returned a blocked lock but the completion handler has
223 * already run, then it fixed up the resource and we don't need to do it
225 if ((*flags) & LDLM_FL_LOCK_CHANGED) {
226 int newmode = reply->lock_mode;
227 if (newmode && newmode != lock->l_req_mode) {
228 LDLM_DEBUG(lock, "server returned different mode %s",
229 ldlm_lockname[newmode]);
230 lock->l_req_mode = newmode;
233 if (reply->lock_resource_name[0] !=
234 lock->l_resource->lr_name[0]) {
235 CDEBUG(D_INFO, "remote intent success, locking %ld "
237 (long)reply->lock_resource_name[0],
238 (long)lock->l_resource->lr_name[0]);
240 ldlm_lock_change_resource(lock,
241 reply->lock_resource_name);
242 if (lock->l_resource == NULL) {
246 LDLM_DEBUG(lock, "client-side enqueue, new resource");
251 ptlrpc_free_req(req);
253 rc = ldlm_lock_enqueue(lock, cookie, cookielen, flags, completion,
255 if (lock->l_completion_ast)
256 lock->l_completion_ast(lock, *flags);
258 LDLM_DEBUG(lock, "client-side enqueue END");
266 int ldlm_match_or_enqueue(struct lustre_handle *connh,
267 struct ptlrpc_request *req,
268 struct ldlm_namespace *ns,
269 struct lustre_handle *parent_lock_handle,
272 void *cookie, int cookielen,
275 ldlm_completion_callback completion,
276 ldlm_blocking_callback blocking,
279 struct lustre_handle *lockh)
283 rc = ldlm_lock_match(ns, res_id, type, cookie, cookielen, mode, lockh);
285 rc = ldlm_cli_enqueue(connh, req, ns,
286 parent_lock_handle, res_id, type, cookie,
287 cookielen, mode, flags, completion,
288 blocking, data, data_len, lockh);
290 CERROR("ldlm_cli_enqueue: err: %d\n", rc);
296 static int ldlm_cli_convert_local(struct ldlm_lock *lock, int new_mode,
300 if (lock->l_resource->lr_namespace->ns_client) {
301 CERROR("Trying to cancel local lock\n");
304 LDLM_DEBUG(lock, "client-side local convert");
306 ldlm_lock_convert(lock, new_mode, flags);
307 ldlm_reprocess_all(lock->l_resource);
309 LDLM_DEBUG(lock, "client-side local convert handler END");
314 int ldlm_cli_convert(struct lustre_handle *lockh, int new_mode, int *flags)
316 struct ldlm_request *body;
317 struct lustre_handle *connh;
318 struct ldlm_reply *reply;
319 struct ldlm_lock *lock;
320 struct ldlm_resource *res;
321 struct ptlrpc_request *req;
322 int rc, size = sizeof(*body);
325 lock = ldlm_handle2lock(lockh);
331 connh = lock->l_connh;
334 return ldlm_cli_convert_local(lock, new_mode, flags);
336 LDLM_DEBUG(lock, "client-side convert");
338 req = ptlrpc_prep_req(class_conn2cliimp(connh), LDLM_CONVERT, 1, &size,
341 GOTO(out, rc = -ENOMEM);
343 body = lustre_msg_buf(req->rq_reqmsg, 0);
344 memcpy(&body->lock_handle1, &lock->l_remote_handle,
345 sizeof(body->lock_handle1));
347 body->lock_desc.l_req_mode = new_mode;
348 body->lock_flags = *flags;
350 size = sizeof(*reply);
351 req->rq_replen = lustre_msg_size(1, &size);
353 rc = ptlrpc_queue_wait(req);
354 rc = ptlrpc_check_status(req, rc);
358 reply = lustre_msg_buf(req->rq_repmsg, 0);
359 res = ldlm_lock_convert(lock, new_mode, &reply->lock_flags);
361 ldlm_reprocess_all(res);
362 /* Go to sleep until the lock is granted. */
363 /* FIXME: or cancelled. */
364 if (lock->l_completion_ast)
365 lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC);
369 ptlrpc_free_req(req);
373 int ldlm_cli_cancel(struct lustre_handle *lockh)
375 struct ptlrpc_request *req;
376 struct ldlm_lock *lock;
377 struct ldlm_request *body;
378 int rc = 0, size = sizeof(*body);
381 lock = ldlm_handle2lock(lockh);
383 /* It's possible that the decref that we did just before this
384 * cancel was the last reader/writer, and caused a cancel before
385 * we could call this function. If we want to make this
386 * impossible (by adding a dec_and_cancel() or similar), then
387 * we can put the LBUG back. */
393 LDLM_DEBUG(lock, "client-side cancel");
394 /* Set this flag to prevent others from getting new references*/
395 l_lock(&lock->l_resource->lr_namespace->ns_lock);
396 lock->l_flags |= LDLM_FL_CBPENDING;
397 ldlm_cancel_callback(lock);
398 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
400 req = ptlrpc_prep_req(class_conn2cliimp(lock->l_connh),
401 LDLM_CANCEL, 1, &size, NULL);
403 GOTO(out, rc = -ENOMEM);
405 body = lustre_msg_buf(req->rq_reqmsg, 0);
406 memcpy(&body->lock_handle1, &lock->l_remote_handle,
407 sizeof(body->lock_handle1));
409 req->rq_replen = lustre_msg_size(0, NULL);
411 rc = ptlrpc_queue_wait(req);
412 rc = ptlrpc_check_status(req, rc);
413 ptlrpc_free_req(req);
417 ldlm_lock_cancel(lock);
419 LDLM_DEBUG(lock, "client-side local cancel");
420 if (lock->l_resource->lr_namespace->ns_client) {
421 CERROR("Trying to cancel local lock\n");
424 ldlm_lock_cancel(lock);
425 ldlm_reprocess_all(lock->l_resource);
426 LDLM_DEBUG(lock, "client-side local cancel handler END");
435 /* Cancel all locks on a given resource that have 0 readers/writers.
437 * If 'local_only' is true, throw the locks away without trying to notify the
439 int ldlm_cli_cancel_unused(struct ldlm_namespace *ns, __u64 *res_id,
442 struct ldlm_resource *res;
443 struct list_head *tmp, *next, list = LIST_HEAD_INIT(list);
444 struct ldlm_ast_work *w;
447 res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
451 l_lock(&ns->ns_lock);
452 list_for_each(tmp, &res->lr_granted) {
453 struct ldlm_lock *lock;
454 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
456 if (lock->l_readers || lock->l_writers)
459 /* Setting the CBPENDING flag is a little misleading, but
460 * prevents an important race; namely, once CBPENDING is set,
461 * the lock can accumulate no more readers/writers. Since
462 * readers and writers are already zero here, ldlm_lock_decref
463 * won't see this flag and call l_blocking_ast */
464 lock->l_flags |= LDLM_FL_CBPENDING;
466 OBD_ALLOC(w, sizeof(*w));
469 w->w_lock = LDLM_LOCK_GET(lock);
470 list_add(&w->w_list, &list);
472 l_unlock(&ns->ns_lock);
474 list_for_each_safe(tmp, next, &list) {
475 struct lustre_handle lockh;
477 w = list_entry(tmp, struct ldlm_ast_work, w_list);
480 ldlm_lock_cancel(w->w_lock);
482 ldlm_lock2handle(w->w_lock, &lockh);
483 rc = ldlm_cli_cancel(&lockh);
485 CERROR("ldlm_cli_cancel: %d\n", rc);
487 LDLM_LOCK_PUT(w->w_lock);
488 list_del(&w->w_list);
489 OBD_FREE(w, sizeof(*w));
492 ldlm_resource_put(res);