1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002 Cluster File Systems, Inc. <info@clusterfs.com>
5 * Copyright (c) 2002 Lawrence Livermore National Laboratory
6 * Author: James Newsome <newsome2@llnl.gov>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #define DEBUG_SUBSYSTEM S_LDLM
26 #include <asm/atomic.h>
27 #include <linux/types.h>
28 #include <linux/random.h>
30 #include <linux/lustre_dlm.h>
31 #include <linux/obd.h>
33 struct ldlm_test_thread {
34 struct obd_device *obddev;
35 struct ldlm_namespace *t_ns;
36 struct list_head t_link;
38 wait_queue_head_t t_ctl_waitq;
41 struct ldlm_test_lock {
42 struct list_head l_link;
43 struct lustre_handle l_lockh;
46 static unsigned int max_locks;
47 static unsigned int num_resources;
48 static unsigned int num_extents;
50 static spinlock_t ctl_lock = SPIN_LOCK_UNLOCKED;
51 /* protect these with the ctl_lock */
52 static LIST_HEAD(ctl_threads);
53 static int regression_running = 0;
54 static LIST_HEAD(lock_list);
55 static int num_locks = 0;
57 /* cumulative stats for regression test */
58 static atomic_t locks_requested = ATOMIC_INIT(0);
59 static atomic_t converts_requested = ATOMIC_INIT(0);
60 static atomic_t locks_granted = ATOMIC_INIT(0);
61 static atomic_t locks_matched = ATOMIC_INIT(0);
63 /* making this a global avoids the problem of having pointers
64 * to garbage after the test exits.
66 static struct lustre_handle regress_connh;
68 static int ldlm_do_decrement(void);
69 static int ldlm_do_enqueue(struct ldlm_test_thread *thread);
70 static int ldlm_do_convert(void);
73 * blocking ast for regression test.
76 static int ldlm_test_blocking_ast(struct ldlm_lock *lock,
77 struct ldlm_lock_desc *new,
78 void *data, __u32 data_len)
81 struct lustre_handle lockh;
84 LDLM_DEBUG(lock, "We're blocking. Cancelling lock");
85 ldlm_lock2handle(lock, &lockh);
86 rc = ldlm_cli_cancel(&lockh);
88 CERROR("ldlm_cli_cancel: %d\n", rc);
95 /* blocking ast for basic tests. noop */
96 static int ldlm_blocking_ast(struct ldlm_lock *lock,
97 struct ldlm_lock_desc *new,
98 void *data, __u32 data_len)
101 CERROR("ldlm_blocking_ast: lock=%p, new=%p\n", lock, new);
105 /* Completion ast for regression test.
106 * Does not sleep when blocked.
108 static int ldlm_test_completion_ast(struct ldlm_lock *lock, int flags)
110 struct ldlm_test_lock *lock_info;
113 if (flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
114 LDLM_FL_BLOCK_CONV)) {
116 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock");
120 if (lock->l_granted_mode != lock->l_req_mode)
121 CERROR("completion ast called with non-granted lock\n");
123 /* add to list of granted locks */
125 if (flags & LDLM_FL_WAIT_NOREPROC) {
126 atomic_inc(&locks_matched);
127 LDLM_DEBUG(lock, "lock matched");
129 atomic_inc(&locks_granted);
130 LDLM_DEBUG(lock, "lock granted");
133 OBD_ALLOC(lock_info, sizeof(*lock_info));
134 if (lock_info == NULL) {
139 ldlm_lock2handle(lock, &lock_info->l_lockh);
141 spin_lock(&ctl_lock);
142 list_add_tail(&lock_info->l_link, &lock_list);
144 spin_unlock(&ctl_lock);
149 int ldlm_test_basics(struct obd_device *obddev)
151 struct ldlm_namespace *ns;
152 struct ldlm_resource *res;
153 __u64 res_id[RES_NAME_SIZE] = {1, 2, 3};
155 struct ldlm_lock *lock1, *lock;
159 ns = ldlm_namespace_new("test_server", LDLM_NAMESPACE_SERVER);
163 lock1 = ldlm_lock_create(ns, NULL, res_id, LDLM_PLAIN, LCK_CR, NULL, 0);
166 err = ldlm_lock_enqueue(lock1, NULL, 0, &flags,
167 ldlm_completion_ast, ldlm_blocking_ast);
171 lock = ldlm_lock_create(ns, NULL, res_id, LDLM_PLAIN, LCK_EX, NULL, 0);
174 err = ldlm_lock_enqueue(lock, NULL, 0, &flags,
175 ldlm_completion_ast, ldlm_blocking_ast);
178 if (!(flags & LDLM_FL_BLOCK_GRANTED))
181 res = ldlm_resource_get(ns, NULL, res_id, LDLM_PLAIN, 1);
184 ldlm_resource_dump(res);
186 res = ldlm_lock_convert(lock1, LCK_NL, &flags);
188 ldlm_reprocess_all(res);
190 ldlm_resource_dump(res);
191 ldlm_namespace_free(ns);
196 int ldlm_test_extents(struct obd_device *obddev)
198 struct ldlm_namespace *ns;
199 struct ldlm_resource *res;
200 struct ldlm_lock *lock, *lock1, *lock2;
201 __u64 res_id[RES_NAME_SIZE] = {0, 0, 0};
202 struct ldlm_extent ext1 = {4, 6}, ext2 = {6, 9}, ext3 = {10, 11};
207 ns = ldlm_namespace_new("test_server", LDLM_NAMESPACE_SERVER);
212 lock1 = ldlm_lock_create(ns, NULL, res_id, LDLM_EXTENT, LCK_PR, NULL,
216 err = ldlm_lock_enqueue(lock1, &ext1, sizeof(ext1), &flags, NULL, NULL);
219 if (!(flags & LDLM_FL_LOCK_CHANGED))
223 lock2 = ldlm_lock_create(ns, NULL, res_id, LDLM_EXTENT, LCK_PR,
225 err = ldlm_lock_enqueue(lock2, &ext2, sizeof(ext2), &flags, NULL, NULL);
228 if (!(flags & LDLM_FL_LOCK_CHANGED))
232 lock = ldlm_lock_create(ns, NULL, res_id, LDLM_EXTENT, LCK_EX, NULL, 0);
235 err = ldlm_lock_enqueue(lock, &ext3, sizeof(ext3), &flags,
239 if (!(flags & LDLM_FL_BLOCK_GRANTED))
241 if (flags & LDLM_FL_LOCK_CHANGED)
244 /* Convert/cancel blocking locks */
246 res = ldlm_lock_convert(lock1, LCK_NL, &flags);
248 ldlm_reprocess_all(res);
250 ldlm_lock_cancel(lock2);
252 ldlm_reprocess_all(res);
254 /* Dump the results */
255 res = ldlm_resource_get(ns, NULL, res_id, LDLM_EXTENT, 0);
258 ldlm_resource_dump(res);
259 ldlm_namespace_free(ns);
264 static int ldlm_test_network(struct obd_device *obddev,
265 struct lustre_handle *connh)
268 __u64 res_id[RES_NAME_SIZE] = {1, 2, 3};
269 struct ldlm_extent ext = {4, 6};
270 struct lustre_handle lockh1;
271 struct ldlm_lock *lock;
276 err = ldlm_cli_enqueue(connh, NULL, obddev->obd_namespace, NULL, res_id,
277 LDLM_EXTENT, &ext, sizeof(ext), LCK_PR, &flags,
278 ldlm_completion_ast, NULL, NULL, 0, &lockh1);
280 CERROR("ldlm_cli_enqueue: %d\n", err);
283 err = ldlm_cli_convert(&lockh1, LCK_EX, &flags);
284 CERROR("ldlm_cli_convert: %d\n", err);
286 lock = ldlm_handle2lock(&lockh1);
287 ldlm_lock_dump(lock);
290 /* Need to decrement old mode. Don't bother incrementing new
291 * mode since the test is done.
294 ldlm_lock_decref(&lockh1, LCK_PR);
299 static int ldlm_do_decrement(void)
301 struct ldlm_test_lock *lock_info;
302 struct ldlm_lock *lock;
306 spin_lock(&ctl_lock);
307 if(list_empty(&lock_list)) {
308 CERROR("lock_list is empty\n");
309 spin_unlock(&ctl_lock);
313 /* delete from list */
314 lock_info = list_entry(lock_list.next,
315 struct ldlm_test_lock, l_link);
316 list_del(lock_list.next);
318 spin_unlock(&ctl_lock);
320 /* decrement and free the info */
321 lock = ldlm_handle2lock(&lock_info->l_lockh);
322 ldlm_lock_decref(&lock_info->l_lockh, lock->l_granted_mode);
325 OBD_FREE(lock_info, sizeof(*lock_info));
330 static int ldlm_do_enqueue(struct ldlm_test_thread *thread)
332 struct lustre_handle lockh;
333 __u64 res_id[3] = {0};
335 struct ldlm_extent ext;
336 unsigned char random;
337 int flags = 0, rc = 0;
340 /* Pick a random resource from 1 to num_resources */
341 get_random_bytes(&random, sizeof(random));
342 res_id[0] = random % num_resources;
344 /* Pick a random lock mode */
345 get_random_bytes(&random, sizeof(random));
346 lock_mode = random % LCK_NL + 1;
348 /* Pick a random extent */
349 get_random_bytes(&random, sizeof(random));
350 ext.start = random % num_extents;
351 get_random_bytes(&random, sizeof(random));
353 (num_extents - (int)ext.start) + ext.start;
355 LDLM_DEBUG_NOLOCK("about to enqueue with resource %d, mode %d,"
362 rc = ldlm_match_or_enqueue(®ress_connh,
364 thread->obddev->obd_namespace,
365 NULL, res_id, LDLM_EXTENT, &ext,
366 sizeof(ext), lock_mode, &flags,
367 ldlm_test_completion_ast,
368 ldlm_test_blocking_ast,
371 atomic_inc(&locks_requested);
374 CERROR("ldlm_cli_enqueue: %d\n", rc);
381 static int ldlm_do_convert(void)
384 unsigned char random;
385 int flags = 0, rc = 0;
386 struct ldlm_test_lock *lock_info;
387 struct ldlm_lock *lock;
390 /* delete from list */
391 spin_lock(&ctl_lock);
392 lock_info = list_entry(lock_list.next, struct ldlm_test_lock, l_link);
393 list_del(lock_list.next);
395 spin_unlock(&ctl_lock);
397 /* Pick a random lock mode */
398 get_random_bytes(&random, sizeof(random));
399 lock_mode = random % LCK_NL + 1;
401 /* do the conversion */
402 rc = ldlm_cli_convert(&lock_info->l_lockh , lock_mode, &flags);
403 atomic_inc(&converts_requested);
406 CERROR("ldlm_cli_convert: %d\n", rc);
411 * Adjust reference counts.
412 * FIXME: This is technically a bit... wrong,
413 * since we don't know when/if the convert succeeded
415 ldlm_lock_addref(&lock_info->l_lockh, lock_mode);
416 lock = ldlm_handle2lock(&lock_info->l_lockh);
417 ldlm_lock_decref(&lock_info->l_lockh, lock->l_granted_mode);
420 OBD_FREE(lock_info, sizeof(*lock_info));
427 static int ldlm_test_main(void *data)
429 struct ldlm_test_thread *thread = data;
434 spin_lock_irq(¤t->sigmask_lock);
435 sigfillset(¤t->blocked);
436 recalc_sigpending(current);
437 spin_unlock_irq(¤t->sigmask_lock);
439 sprintf(current->comm, "ldlm_test");
442 /* Record that the thread is running */
443 thread->t_flags |= SVC_RUNNING;
444 wake_up(&thread->t_ctl_waitq);
446 while (!(thread->t_flags & SVC_STOPPING)) {
447 unsigned char random;
448 unsigned char dec_chance, con_chance;
449 unsigned char chance_left = 100;
451 spin_lock(&ctl_lock);
452 /* probability of decrementing increases linearly
453 * as more locks are held.
455 dec_chance = chance_left * num_locks / max_locks;
456 chance_left -= dec_chance;
458 /* FIXME: conversions temporarily disabled
459 * until they are working correctly.
461 /* con_chance = chance_left * num_locks / max_locks; */
463 chance_left -= con_chance;
464 spin_unlock(&ctl_lock);
466 get_random_bytes(&random, sizeof(random));
468 random = random % 100;
469 if (random < dec_chance)
471 else if (random < (dec_chance + con_chance))
474 ldlm_do_enqueue(thread);
476 LDLM_DEBUG_NOLOCK("locks requested: %d, "
477 "conversions requested %d",
478 atomic_read(&locks_requested),
479 atomic_read(&converts_requested));
480 LDLM_DEBUG_NOLOCK("locks granted: %d, "
482 atomic_read(&locks_granted),
483 atomic_read(&locks_matched));
485 spin_lock(&ctl_lock);
486 LDLM_DEBUG_NOLOCK("lock references currently held: %d, ",
488 spin_unlock(&ctl_lock);
491 * We don't sleep after a lock being blocked, so let's
492 * make sure other things can run.
497 thread->t_flags |= SVC_STOPPED;
498 wake_up(&thread->t_ctl_waitq);
503 static int ldlm_start_thread(struct obd_device *obddev,
504 struct lustre_handle *connh)
506 struct ldlm_test_thread *test;
510 OBD_ALLOC(test, sizeof(*test));
515 init_waitqueue_head(&test->t_ctl_waitq);
517 test->obddev = obddev;
519 spin_lock(&ctl_lock);
520 list_add(&test->t_link, &ctl_threads);
521 spin_unlock(&ctl_lock);
523 rc = kernel_thread(ldlm_test_main, (void *)test,
524 CLONE_VM | CLONE_FS | CLONE_FILES);
526 CERROR("cannot start thread\n");
529 wait_event(test->t_ctl_waitq, test->t_flags & SVC_RUNNING);
534 int ldlm_regression_start(struct obd_device *obddev,
535 struct lustre_handle *connh,
536 unsigned int threads, unsigned int max_locks_in,
537 unsigned int num_resources_in,
538 unsigned int num_extents_in)
543 spin_lock(&ctl_lock);
544 if (regression_running) {
545 CERROR("You can't start the ldlm regression twice.\n");
546 spin_unlock(&ctl_lock);
549 regression_running = 1;
550 spin_unlock(&ctl_lock);
552 regress_connh = *connh;
553 max_locks = max_locks_in;
554 num_resources = num_resources_in;
555 num_extents = num_extents_in;
557 LDLM_DEBUG_NOLOCK("regression test started: threads: %d, max_locks: "
558 "%d, num_res: %d, num_ext: %d\n",
559 threads, max_locks_in, num_resources_in,
562 for (i = 0; i < threads; i++) {
563 rc = ldlm_start_thread(obddev, connh);
570 ldlm_regression_stop();
574 int ldlm_regression_stop(void)
578 spin_lock(&ctl_lock);
579 if (!regression_running) {
580 CERROR("The ldlm regression isn't started.\n");
581 spin_unlock(&ctl_lock);
585 while (!list_empty(&ctl_threads)) {
586 struct ldlm_test_thread *thread;
587 thread = list_entry(ctl_threads.next, struct ldlm_test_thread,
590 thread->t_flags |= SVC_STOPPING;
592 spin_unlock(&ctl_lock);
593 wake_up(&thread->t_ctl_waitq);
594 wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
595 spin_lock(&ctl_lock);
597 list_del(&thread->t_link);
598 OBD_FREE(thread, sizeof(*thread));
601 /* decrement all held locks */
602 while (!list_empty(&lock_list)) {
603 struct ldlm_lock *lock;
604 struct ldlm_test_lock *lock_info =
605 list_entry(lock_list.next, struct ldlm_test_lock,
607 list_del(lock_list.next);
610 lock = ldlm_handle2lock(&lock_info->l_lockh);
611 ldlm_lock_decref(&lock_info->l_lockh, lock->l_granted_mode);
614 OBD_FREE(lock_info, sizeof(*lock_info));
617 regression_running = 0;
618 spin_unlock(&ctl_lock);
623 int ldlm_test(struct obd_device *obddev, struct lustre_handle *connh)
626 rc = ldlm_test_basics(obddev);
630 rc = ldlm_test_extents(obddev);
634 rc = ldlm_test_network(obddev, connh);