1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This code is issued under the GNU General Public License.
7 * See the file COPYING in this distribution
9 * by Cluster File Systems, Inc.
12 #define DEBUG_SUBSYSTEM S_LDLM
14 #include <linux/lustre_dlm.h>
15 #include <linux/obd_class.h>
16 #include <linux/obd.h>
18 static int interrupted_completion_wait(void *data)
23 static int expired_completion_wait(void *data)
25 struct ldlm_lock *lock = data;
26 struct ptlrpc_connection *conn;
27 struct obd_device *obd;
30 CERROR("NULL lock\n");
31 else if (!lock->l_connh)
32 CERROR("lock %p has NULL connh\n", lock);
33 else if (!(obd = class_conn2obd(lock->l_connh)))
34 CERROR("lock %p has NULL obd\n", lock);
35 else if (!(conn = obd->u.cli.cl_import.imp_connection))
36 CERROR("lock %p has NULL connection\n", lock);
38 class_signal_connection_failure(conn);
44 static int expired_completion_wait(void *data)
46 struct ldlm_lock *lock = data;
47 struct ptlrpc_connection *conn =
48 class_conn2cliimp(lock->l_connh)->imp_connection;
51 CERROR("lock %p has NULL import connection\n", lock);
55 class_signal_connection_failure(conn);
60 int ldlm_completion_ast(struct ldlm_lock *lock, int flags)
62 struct l_wait_info lwi =
63 LWI_TIMEOUT_INTR(obd_timeout * HZ, expired_completion_wait,
64 interrupted_completion_wait, lock);
68 if (flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
69 LDLM_FL_BLOCK_CONV)) {
70 /* Go to sleep until the lock is granted. */
71 /* FIXME: or cancelled. */
72 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock,"
75 ldlm_reprocess_all(lock->l_resource);
76 rc = l_wait_event(lock->l_waitq,
77 (lock->l_req_mode == lock->l_granted_mode),
81 "client-side enqueue waking up: failed (%d)",
85 "client-side enqueue waking up: granted");
87 } else if (flags == LDLM_FL_WAIT_NOREPROC) {
88 rc = l_wait_event(lock->l_waitq,
89 (lock->l_req_mode == lock->l_granted_mode),
91 } else if (flags == 0) {
92 wake_up(&lock->l_waitq);
98 static int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
99 struct lustre_handle *parent_lockh,
102 void *cookie, int cookielen,
105 ldlm_completion_callback completion,
106 ldlm_blocking_callback blocking,
109 struct lustre_handle *lockh)
111 struct ldlm_lock *lock;
115 CERROR("Trying to cancel local lock\n");
119 lock = ldlm_lock_create(ns, parent_lockh, res_id, type, mode, data, data_len);
121 GOTO(out_nolock, err = -ENOMEM);
122 LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
124 ldlm_lock_addref_internal(lock, mode);
125 ldlm_lock2handle(lock, lockh);
126 lock->l_connh = NULL;
128 err = ldlm_lock_enqueue(lock, cookie, cookielen, flags, completion,
133 if (type == LDLM_EXTENT)
134 memcpy(cookie, &lock->l_extent, sizeof(lock->l_extent));
135 if ((*flags) & LDLM_FL_LOCK_CHANGED)
136 memcpy(res_id, lock->l_resource->lr_name, sizeof(*res_id));
138 LDLM_DEBUG_NOLOCK("client-side local enqueue handler END (lock %p)",
141 if (lock->l_completion_ast)
142 lock->l_completion_ast(lock, *flags);
144 LDLM_DEBUG(lock, "client-side local enqueue END");
152 int ldlm_cli_enqueue(struct lustre_handle *connh,
153 struct ptlrpc_request *req,
154 struct ldlm_namespace *ns,
155 struct lustre_handle *parent_lock_handle,
158 void *cookie, int cookielen,
161 ldlm_completion_callback completion,
162 ldlm_blocking_callback blocking,
165 struct lustre_handle *lockh)
167 struct ldlm_lock *lock;
168 struct ldlm_request *body;
169 struct ldlm_reply *reply;
170 int rc, size = sizeof(*body), req_passed_in = 1;
174 return ldlm_cli_enqueue_local(ns, parent_lock_handle, res_id,
175 type, cookie, cookielen, mode,
176 flags, completion, blocking, data,
180 lock = ldlm_lock_create(ns, parent_lock_handle, res_id, type, mode,
183 GOTO(out_nolock, rc = -ENOMEM);
184 LDLM_DEBUG(lock, "client-side enqueue START");
185 /* for the local lock, add the reference */
186 ldlm_lock_addref_internal(lock, mode);
187 ldlm_lock2handle(lock, lockh);
190 req = ptlrpc_prep_req(class_conn2cliimp(connh), LDLM_ENQUEUE, 1,
193 GOTO(out, rc = -ENOMEM);
195 } else if (req->rq_reqmsg->buflens[0] != sizeof(*body))
198 /* Dump all of this data into the request buffer */
199 body = lustre_msg_buf(req->rq_reqmsg, 0);
200 ldlm_lock2desc(lock, &body->lock_desc);
201 /* Phil: make this part of ldlm_lock2desc */
202 if (type == LDLM_EXTENT)
203 memcpy(&body->lock_desc.l_extent, cookie,
204 sizeof(body->lock_desc.l_extent));
205 body->lock_flags = *flags;
207 memcpy(&body->lock_handle1, lockh, sizeof(*lockh));
208 if (parent_lock_handle)
209 memcpy(&body->lock_handle2, parent_lock_handle,
210 sizeof(body->lock_handle2));
212 /* Continue as normal. */
213 if (!req_passed_in) {
214 size = sizeof(*reply);
215 req->rq_replen = lustre_msg_size(1, &size);
217 lock->l_connh = connh;
218 lock->l_export = NULL;
220 rc = ptlrpc_queue_wait(req);
221 /* FIXME: status check here? */
222 rc = ptlrpc_check_status(req, rc);
224 if (rc != ELDLM_OK) {
225 LDLM_DEBUG(lock, "client-side enqueue END (%s)",
226 rc == ELDLM_LOCK_ABORTED ? "ABORTED" : "FAILED");
227 ldlm_lock_decref(lockh, mode);
228 /* FIXME: if we've already received a completion AST, this will
230 ldlm_lock_destroy(lock);
234 reply = lustre_msg_buf(req->rq_repmsg, 0);
235 memcpy(&lock->l_remote_handle, &reply->lock_handle,
236 sizeof(lock->l_remote_handle));
237 if (type == LDLM_EXTENT)
238 memcpy(cookie, &reply->lock_extent, sizeof(reply->lock_extent));
239 *flags = reply->lock_flags;
241 CDEBUG(D_INFO, "remote handle: %p, flags: %d\n",
242 (void *)(unsigned long)reply->lock_handle.addr, *flags);
243 CDEBUG(D_INFO, "extent: %Lu -> %Lu\n",
244 (unsigned long long)reply->lock_extent.start,
245 (unsigned long long)reply->lock_extent.end);
247 /* If enqueue returned a blocked lock but the completion handler has
248 * already run, then it fixed up the resource and we don't need to do it
250 if ((*flags) & LDLM_FL_LOCK_CHANGED) {
251 int newmode = reply->lock_mode;
252 if (newmode && newmode != lock->l_req_mode) {
253 LDLM_DEBUG(lock, "server returned different mode %s",
254 ldlm_lockname[newmode]);
255 lock->l_req_mode = newmode;
258 if (reply->lock_resource_name[0] !=
259 lock->l_resource->lr_name[0]) {
260 CDEBUG(D_INFO, "remote intent success, locking %ld "
262 (long)reply->lock_resource_name[0],
263 (long)lock->l_resource->lr_name[0]);
265 ldlm_lock_change_resource(lock,
266 reply->lock_resource_name);
267 if (lock->l_resource == NULL) {
271 LDLM_DEBUG(lock, "client-side enqueue, new resource");
276 ptlrpc_free_req(req);
278 rc = ldlm_lock_enqueue(lock, cookie, cookielen, flags, completion,
280 if (lock->l_completion_ast)
281 lock->l_completion_ast(lock, *flags);
283 LDLM_DEBUG(lock, "client-side enqueue END");
291 int ldlm_match_or_enqueue(struct lustre_handle *connh,
292 struct ptlrpc_request *req,
293 struct ldlm_namespace *ns,
294 struct lustre_handle *parent_lock_handle,
297 void *cookie, int cookielen,
300 ldlm_completion_callback completion,
301 ldlm_blocking_callback blocking,
304 struct lustre_handle *lockh)
308 rc = ldlm_lock_match(ns, res_id, type, cookie, cookielen, mode, lockh);
310 rc = ldlm_cli_enqueue(connh, req, ns,
311 parent_lock_handle, res_id, type, cookie,
312 cookielen, mode, flags, completion,
313 blocking, data, data_len, lockh);
315 CERROR("ldlm_cli_enqueue: err: %d\n", rc);
321 static int ldlm_cli_convert_local(struct ldlm_lock *lock, int new_mode,
325 if (lock->l_resource->lr_namespace->ns_client) {
326 CERROR("Trying to cancel local lock\n");
329 LDLM_DEBUG(lock, "client-side local convert");
331 ldlm_lock_convert(lock, new_mode, flags);
332 ldlm_reprocess_all(lock->l_resource);
334 LDLM_DEBUG(lock, "client-side local convert handler END");
339 int ldlm_cli_convert(struct lustre_handle *lockh, int new_mode, int *flags)
341 struct ldlm_request *body;
342 struct lustre_handle *connh;
343 struct ldlm_reply *reply;
344 struct ldlm_lock *lock;
345 struct ldlm_resource *res;
346 struct ptlrpc_request *req;
347 int rc, size = sizeof(*body);
350 lock = ldlm_handle2lock(lockh);
356 connh = lock->l_connh;
359 return ldlm_cli_convert_local(lock, new_mode, flags);
361 LDLM_DEBUG(lock, "client-side convert");
363 req = ptlrpc_prep_req(class_conn2cliimp(connh), LDLM_CONVERT, 1, &size,
366 GOTO(out, rc = -ENOMEM);
368 body = lustre_msg_buf(req->rq_reqmsg, 0);
369 memcpy(&body->lock_handle1, &lock->l_remote_handle,
370 sizeof(body->lock_handle1));
372 body->lock_desc.l_req_mode = new_mode;
373 body->lock_flags = *flags;
375 size = sizeof(*reply);
376 req->rq_replen = lustre_msg_size(1, &size);
378 rc = ptlrpc_queue_wait(req);
379 rc = ptlrpc_check_status(req, rc);
383 reply = lustre_msg_buf(req->rq_repmsg, 0);
384 res = ldlm_lock_convert(lock, new_mode, &reply->lock_flags);
386 ldlm_reprocess_all(res);
387 /* Go to sleep until the lock is granted. */
388 /* FIXME: or cancelled. */
389 if (lock->l_completion_ast)
390 lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC);
394 ptlrpc_free_req(req);
398 int ldlm_cli_cancel(struct lustre_handle *lockh)
400 struct ptlrpc_request *req;
401 struct ldlm_lock *lock;
402 struct ldlm_request *body;
403 int rc = 0, size = sizeof(*body);
406 lock = ldlm_handle2lock(lockh);
408 /* It's possible that the decref that we did just before this
409 * cancel was the last reader/writer, and caused a cancel before
410 * we could call this function. If we want to make this
411 * impossible (by adding a dec_and_cancel() or similar), then
412 * we can put the LBUG back. */
418 LDLM_DEBUG(lock, "client-side cancel");
419 /* Set this flag to prevent others from getting new references*/
420 l_lock(&lock->l_resource->lr_namespace->ns_lock);
421 lock->l_flags |= LDLM_FL_CBPENDING;
422 ldlm_cancel_callback(lock);
423 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
425 req = ptlrpc_prep_req(class_conn2cliimp(lock->l_connh),
426 LDLM_CANCEL, 1, &size, NULL);
428 GOTO(out, rc = -ENOMEM);
430 body = lustre_msg_buf(req->rq_reqmsg, 0);
431 memcpy(&body->lock_handle1, &lock->l_remote_handle,
432 sizeof(body->lock_handle1));
434 req->rq_replen = lustre_msg_size(0, NULL);
436 rc = ptlrpc_queue_wait(req);
437 rc = ptlrpc_check_status(req, rc);
438 ptlrpc_free_req(req);
442 ldlm_lock_cancel(lock);
444 LDLM_DEBUG(lock, "client-side local cancel");
445 if (lock->l_resource->lr_namespace->ns_client) {
446 CERROR("Trying to cancel local lock\n");
449 ldlm_lock_cancel(lock);
450 ldlm_reprocess_all(lock->l_resource);
451 LDLM_DEBUG(lock, "client-side local cancel handler END");
460 /* Cancel all locks on a given resource that have 0 readers/writers.
462 * If 'local_only' is true, throw the locks away without trying to notify the
464 int ldlm_cli_cancel_unused(struct ldlm_namespace *ns, __u64 *res_id,
467 struct ldlm_resource *res;
468 struct list_head *tmp, *next, list = LIST_HEAD_INIT(list);
469 struct ldlm_ast_work *w;
472 res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
476 l_lock(&ns->ns_lock);
477 list_for_each(tmp, &res->lr_granted) {
478 struct ldlm_lock *lock;
479 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
481 if (lock->l_readers || lock->l_writers)
484 /* Setting the CBPENDING flag is a little misleading, but
485 * prevents an important race; namely, once CBPENDING is set,
486 * the lock can accumulate no more readers/writers. Since
487 * readers and writers are already zero here, ldlm_lock_decref
488 * won't see this flag and call l_blocking_ast */
489 lock->l_flags |= LDLM_FL_CBPENDING;
491 OBD_ALLOC(w, sizeof(*w));
494 w->w_lock = LDLM_LOCK_GET(lock);
495 list_add(&w->w_list, &list);
497 l_unlock(&ns->ns_lock);
499 list_for_each_safe(tmp, next, &list) {
500 struct lustre_handle lockh;
502 w = list_entry(tmp, struct ldlm_ast_work, w_list);
505 ldlm_lock_cancel(w->w_lock);
507 ldlm_lock2handle(w->w_lock, &lockh);
508 rc = ldlm_cli_cancel(&lockh);
510 CERROR("ldlm_cli_cancel: %d\n", rc);
512 LDLM_LOCK_PUT(w->w_lock);
513 list_del(&w->w_list);
514 OBD_FREE(w, sizeof(*w));
517 ldlm_resource_put(res);