1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #define DEBUG_SUBSYSTEM S_LDLM
24 #include <linux/lustre_dlm.h>
25 #include <linux/obd_class.h>
26 #include <linux/obd.h>
28 static int interrupted_completion_wait(void *data)
33 static int expired_completion_wait(void *data)
35 struct ldlm_lock *lock = data;
36 struct ptlrpc_connection *conn;
37 struct obd_device *obd;
40 CERROR("NULL lock\n");
41 else if (!lock->l_connh)
42 CERROR("lock %p has NULL connh\n", lock);
43 else if (!(obd = class_conn2obd(lock->l_connh)))
44 CERROR("lock %p has NULL obd\n", lock);
45 else if (!(conn = obd->u.cli.cl_import.imp_connection))
46 CERROR("lock %p has NULL connection\n", lock);
48 class_signal_connection_failure(conn);
53 static int expired_completion_wait(void *data)
55 struct ldlm_lock *lock = data;
56 struct ptlrpc_connection *conn =
57 class_conn2cliimp(lock->l_connh)->imp_connection;
60 CERROR("lock %p has NULL import connection\n", lock);
64 class_signal_connection_failure(conn);
69 int ldlm_completion_ast(struct ldlm_lock *lock, int flags)
71 struct l_wait_info lwi =
72 LWI_TIMEOUT_INTR(obd_timeout * HZ, expired_completion_wait,
73 interrupted_completion_wait, lock);
77 if (flags == LDLM_FL_WAIT_NOREPROC)
81 wake_up(&lock->l_waitq);
85 if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
89 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
92 ldlm_reprocess_all(lock->l_resource);
95 /* Go to sleep until the lock is granted or cancelled. */
96 rc = l_wait_event(lock->l_waitq,
97 ((lock->l_req_mode == lock->l_granted_mode) ||
98 (lock->l_flags & LDLM_FL_DESTROYED)), &lwi);
100 if (lock->l_flags & LDLM_FL_DESTROYED) {
101 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
106 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
111 LDLM_DEBUG(lock, "client-side enqueue waking up: granted");
115 static int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
116 struct lustre_handle *parent_lockh,
119 void *cookie, int cookielen,
122 ldlm_completion_callback completion,
123 ldlm_blocking_callback blocking,
126 struct lustre_handle *lockh)
128 struct ldlm_lock *lock;
133 CERROR("Trying to enqueue local lock in a shadow namespace\n");
137 lock = ldlm_lock_create(ns, parent_lockh, res_id, type, mode, data,
140 GOTO(out_nolock, err = -ENOMEM);
141 LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
143 ldlm_lock_addref_internal(lock, mode);
144 ldlm_lock2handle(lock, lockh);
145 lock->l_connh = NULL;
147 err = ldlm_lock_enqueue(lock, cookie, cookielen, flags, completion,
152 if (type == LDLM_EXTENT)
153 memcpy(cookie, &lock->l_extent, sizeof(lock->l_extent));
154 if ((*flags) & LDLM_FL_LOCK_CHANGED)
155 memcpy(res_id, lock->l_resource->lr_name, sizeof(*res_id));
157 LDLM_DEBUG_NOLOCK("client-side local enqueue handler END (lock %p)",
160 if (lock->l_completion_ast)
161 lock->l_completion_ast(lock, *flags);
163 LDLM_DEBUG(lock, "client-side local enqueue END");
171 int ldlm_cli_enqueue(struct lustre_handle *connh,
172 struct ptlrpc_request *req,
173 struct ldlm_namespace *ns,
174 struct lustre_handle *parent_lock_handle,
177 void *cookie, int cookielen,
180 ldlm_completion_callback completion,
181 ldlm_blocking_callback blocking,
184 struct lustre_handle *lockh)
186 struct ldlm_lock *lock;
187 struct ldlm_request *body;
188 struct ldlm_reply *reply;
189 int rc, size = sizeof(*body), req_passed_in = 1;
193 return ldlm_cli_enqueue_local(ns, parent_lock_handle, res_id,
194 type, cookie, cookielen, mode,
195 flags, completion, blocking, data,
199 lock = ldlm_lock_create(ns, parent_lock_handle, res_id, type, mode,
202 GOTO(out_nolock, rc = -ENOMEM);
203 LDLM_DEBUG(lock, "client-side enqueue START");
204 /* for the local lock, add the reference */
205 ldlm_lock_addref_internal(lock, mode);
206 ldlm_lock2handle(lock, lockh);
209 req = ptlrpc_prep_req(class_conn2cliimp(connh), LDLM_ENQUEUE, 1,
212 GOTO(out, rc = -ENOMEM);
214 } else if (req->rq_reqmsg->buflens[0] != sizeof(*body))
217 /* Dump all of this data into the request buffer */
218 body = lustre_msg_buf(req->rq_reqmsg, 0);
219 ldlm_lock2desc(lock, &body->lock_desc);
220 /* Phil: make this part of ldlm_lock2desc */
221 if (type == LDLM_EXTENT) {
222 memcpy(&body->lock_desc.l_extent, cookie,
223 sizeof(body->lock_desc.l_extent));
224 CDEBUG(D_INFO, "extent in body: "LPU64" -> "LPU64"\n",
225 body->lock_desc.l_extent.start,
226 body->lock_desc.l_extent.end);
228 body->lock_flags = *flags;
230 memcpy(&body->lock_handle1, lockh, sizeof(*lockh));
231 if (parent_lock_handle)
232 memcpy(&body->lock_handle2, parent_lock_handle,
233 sizeof(body->lock_handle2));
235 /* Continue as normal. */
236 if (!req_passed_in) {
237 size = sizeof(*reply);
238 req->rq_replen = lustre_msg_size(1, &size);
240 lock->l_connh = connh;
241 lock->l_export = NULL;
243 rc = ptlrpc_queue_wait(req);
244 /* FIXME: status check here? */
245 rc = ptlrpc_check_status(req, rc);
247 if (rc != ELDLM_OK) {
248 LDLM_DEBUG(lock, "client-side enqueue END (%s)",
249 rc == ELDLM_LOCK_ABORTED ? "ABORTED" : "FAILED");
250 ldlm_lock_decref(lockh, mode);
251 /* FIXME: if we've already received a completion AST, this will
253 ldlm_lock_destroy(lock);
257 reply = lustre_msg_buf(req->rq_repmsg, 0);
258 memcpy(&lock->l_remote_handle, &reply->lock_handle,
259 sizeof(lock->l_remote_handle));
260 if (type == LDLM_EXTENT)
261 memcpy(cookie, &reply->lock_extent, sizeof(reply->lock_extent));
262 *flags = reply->lock_flags;
264 CDEBUG(D_INFO, "remote handle: %p, flags: %d\n",
265 (void *)(unsigned long)reply->lock_handle.addr, *flags);
266 CDEBUG(D_INFO, "requested extent: "LPU64" -> "LPU64", got extent "
267 LPU64" -> "LPU64"\n",
268 body->lock_desc.l_extent.start, body->lock_desc.l_extent.end,
269 reply->lock_extent.start, reply->lock_extent.end);
271 /* If enqueue returned a blocked lock but the completion handler has
272 * already run, then it fixed up the resource and we don't need to do it
274 if ((*flags) & LDLM_FL_LOCK_CHANGED) {
275 int newmode = reply->lock_mode;
276 if (newmode && newmode != lock->l_req_mode) {
277 LDLM_DEBUG(lock, "server returned different mode %s",
278 ldlm_lockname[newmode]);
279 lock->l_req_mode = newmode;
282 if (reply->lock_resource_name[0] !=
283 lock->l_resource->lr_name[0]) {
284 CDEBUG(D_INFO, "remote intent success, locking %ld "
286 (long)reply->lock_resource_name[0],
287 (long)lock->l_resource->lr_name[0]);
289 ldlm_lock_change_resource(lock,
290 reply->lock_resource_name);
291 if (lock->l_resource == NULL) {
295 LDLM_DEBUG(lock, "client-side enqueue, new resource");
300 ptlrpc_req_finished(req);
302 rc = ldlm_lock_enqueue(lock, cookie, cookielen, flags, completion,
304 if (lock->l_completion_ast)
305 lock->l_completion_ast(lock, *flags);
307 LDLM_DEBUG(lock, "client-side enqueue END");
315 int ldlm_match_or_enqueue(struct lustre_handle *connh,
316 struct ptlrpc_request *req,
317 struct ldlm_namespace *ns,
318 struct lustre_handle *parent_lock_handle,
321 void *cookie, int cookielen,
324 ldlm_completion_callback completion,
325 ldlm_blocking_callback blocking,
328 struct lustre_handle *lockh)
332 rc = ldlm_lock_match(ns, res_id, type, cookie, cookielen, mode, lockh);
334 rc = ldlm_cli_enqueue(connh, req, ns,
335 parent_lock_handle, res_id, type, cookie,
336 cookielen, mode, flags, completion,
337 blocking, data, data_len, lockh);
339 CERROR("ldlm_cli_enqueue: err: %d\n", rc);
345 static int ldlm_cli_convert_local(struct ldlm_lock *lock, int new_mode,
349 if (lock->l_resource->lr_namespace->ns_client) {
350 CERROR("Trying to cancel local lock\n");
353 LDLM_DEBUG(lock, "client-side local convert");
355 ldlm_lock_convert(lock, new_mode, flags);
356 ldlm_reprocess_all(lock->l_resource);
358 LDLM_DEBUG(lock, "client-side local convert handler END");
363 /* FIXME: one of ldlm_cli_convert or the server side should reject attempted
364 * conversion of locks which are on the waiting or converting queue */
365 int ldlm_cli_convert(struct lustre_handle *lockh, int new_mode, int *flags)
367 struct ldlm_request *body;
368 struct lustre_handle *connh;
369 struct ldlm_reply *reply;
370 struct ldlm_lock *lock;
371 struct ldlm_resource *res;
372 struct ptlrpc_request *req;
373 int rc, size = sizeof(*body);
376 lock = ldlm_handle2lock(lockh);
382 connh = lock->l_connh;
385 return ldlm_cli_convert_local(lock, new_mode, flags);
387 LDLM_DEBUG(lock, "client-side convert");
389 req = ptlrpc_prep_req(class_conn2cliimp(connh), LDLM_CONVERT, 1, &size,
392 GOTO(out, rc = -ENOMEM);
394 body = lustre_msg_buf(req->rq_reqmsg, 0);
395 memcpy(&body->lock_handle1, &lock->l_remote_handle,
396 sizeof(body->lock_handle1));
398 body->lock_desc.l_req_mode = new_mode;
399 body->lock_flags = *flags;
401 size = sizeof(*reply);
402 req->rq_replen = lustre_msg_size(1, &size);
404 rc = ptlrpc_queue_wait(req);
405 rc = ptlrpc_check_status(req, rc);
409 reply = lustre_msg_buf(req->rq_repmsg, 0);
410 res = ldlm_lock_convert(lock, new_mode, &reply->lock_flags);
412 ldlm_reprocess_all(res);
413 /* Go to sleep until the lock is granted. */
414 /* FIXME: or cancelled. */
415 if (lock->l_completion_ast)
416 lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC);
420 ptlrpc_req_finished(req);
424 int ldlm_cli_cancel(struct lustre_handle *lockh)
426 struct ptlrpc_request *req;
427 struct ldlm_lock *lock;
428 struct ldlm_request *body;
429 int rc = 0, size = sizeof(*body);
432 lock = ldlm_handle2lock(lockh);
434 /* It's possible that the decref that we did just before this
435 * cancel was the last reader/writer, and caused a cancel before
436 * we could call this function. If we want to make this
437 * impossible (by adding a dec_and_cancel() or similar), then
438 * we can put the LBUG back. */
444 LDLM_DEBUG(lock, "client-side cancel");
445 /* Set this flag to prevent others from getting new references*/
446 l_lock(&lock->l_resource->lr_namespace->ns_lock);
447 lock->l_flags |= LDLM_FL_CBPENDING;
448 ldlm_cancel_callback(lock);
449 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
451 req = ptlrpc_prep_req(class_conn2cliimp(lock->l_connh),
452 LDLM_CANCEL, 1, &size, NULL);
454 GOTO(out, rc = -ENOMEM);
456 body = lustre_msg_buf(req->rq_reqmsg, 0);
457 memcpy(&body->lock_handle1, &lock->l_remote_handle,
458 sizeof(body->lock_handle1));
460 req->rq_replen = lustre_msg_size(0, NULL);
462 rc = ptlrpc_queue_wait(req);
463 rc = ptlrpc_check_status(req, rc);
464 ptlrpc_req_finished(req);
468 ldlm_lock_cancel(lock);
470 LDLM_DEBUG(lock, "client-side local cancel");
471 if (lock->l_resource->lr_namespace->ns_client) {
472 CERROR("Trying to cancel local lock\n");
475 ldlm_lock_cancel(lock);
476 ldlm_reprocess_all(lock->l_resource);
477 LDLM_DEBUG(lock, "client-side local cancel handler END");
486 /* Cancel all locks on a given resource that have 0 readers/writers.
488 * If 'local_only' is true, throw the locks away without trying to notify the
490 int ldlm_cli_cancel_unused(struct ldlm_namespace *ns, __u64 *res_id,
493 struct ldlm_resource *res;
494 struct list_head *tmp, *next, list = LIST_HEAD_INIT(list);
495 struct ldlm_ast_work *w;
498 res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
500 /* This is not a problem. */
501 CDEBUG(D_INFO, "No resource "LPU64"\n", res_id[0]);
505 l_lock(&ns->ns_lock);
506 list_for_each(tmp, &res->lr_granted) {
507 struct ldlm_lock *lock;
508 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
510 if (lock->l_readers || lock->l_writers)
513 /* Setting the CBPENDING flag is a little misleading, but
514 * prevents an important race; namely, once CBPENDING is set,
515 * the lock can accumulate no more readers/writers. Since
516 * readers and writers are already zero here, ldlm_lock_decref
517 * won't see this flag and call l_blocking_ast */
518 lock->l_flags |= LDLM_FL_CBPENDING;
520 OBD_ALLOC(w, sizeof(*w));
523 w->w_lock = LDLM_LOCK_GET(lock);
524 list_add(&w->w_list, &list);
526 l_unlock(&ns->ns_lock);
528 list_for_each_safe(tmp, next, &list) {
529 struct lustre_handle lockh;
531 w = list_entry(tmp, struct ldlm_ast_work, w_list);
534 ldlm_lock_cancel(w->w_lock);
536 ldlm_lock2handle(w->w_lock, &lockh);
537 rc = ldlm_cli_cancel(&lockh);
539 CERROR("ldlm_cli_cancel: %d\n", rc);
541 LDLM_LOCK_PUT(w->w_lock);
542 list_del(&w->w_list);
543 OBD_FREE(w, sizeof(*w));
546 ldlm_resource_put(res);