1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002 Cluster File Systems, Inc. <info@clusterfs.com>
5 * Copyright (c) 2002 Lawrence Livermore National Laboratory
6 * Author: James Newsome <newsome2@llnl.gov>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #define DEBUG_SUBSYSTEM S_LDLM
26 #include <asm/atomic.h>
27 #include <linux/types.h>
28 #include <linux/random.h>
30 #include <linux/lustre_dlm.h>
31 #include <linux/obd.h>
33 struct ldlm_test_thread {
34 struct obd_device *obddev;
35 struct ldlm_namespace *t_ns;
36 struct list_head t_link;
38 wait_queue_head_t t_ctl_waitq;
41 struct ldlm_test_lock {
42 struct list_head l_link;
43 struct lustre_handle l_lockh;
46 static unsigned int max_locks;
47 static unsigned int num_resources;
48 static unsigned int num_extents;
50 static spinlock_t ctl_lock = SPIN_LOCK_UNLOCKED;
51 /* protect these with the ctl_lock */
52 static LIST_HEAD(ctl_threads);
53 static int regression_running = 0;
54 static LIST_HEAD(lock_list);
55 static int num_locks = 0;
57 /* cumulative stats for regression test */
58 static atomic_t locks_requested = ATOMIC_INIT(0);
59 static atomic_t converts_requested = ATOMIC_INIT(0);
60 static atomic_t locks_granted = ATOMIC_INIT(0);
61 static atomic_t locks_matched = ATOMIC_INIT(0);
63 /* making this a global avoids the problem of having pointers
64 * to garbage after the test exits.
66 static struct lustre_handle regress_connh;
68 static int ldlm_do_decrement(void);
69 static int ldlm_do_enqueue(struct ldlm_test_thread *thread);
70 static int ldlm_do_convert(void);
73 * blocking ast for regression test.
76 static int ldlm_test_blocking_ast(struct ldlm_lock *lock,
77 struct ldlm_lock_desc *new,
81 struct lustre_handle lockh;
85 case LDLM_CB_BLOCKING:
86 LDLM_DEBUG(lock, "We're blocking. Cancelling lock");
87 ldlm_lock2handle(lock, &lockh);
88 rc = ldlm_cli_cancel(&lockh);
90 CERROR("ldlm_cli_cancel: %d\n", rc);
94 case LDLM_CB_CANCELING:
95 LDLM_DEBUG(lock, "this lock is being cancelled");
104 /* blocking ast for basic tests. noop */
105 static int ldlm_blocking_ast(struct ldlm_lock *lock,
106 struct ldlm_lock_desc *new,
107 void *data, int flag)
110 CERROR("ldlm_blocking_ast: lock=%p, new=%p, flag=%d\n", lock, new,
115 /* Completion ast for regression test.
116 * Does not sleep when blocked.
118 static int ldlm_test_completion_ast(struct ldlm_lock *lock, int flags, void *data)
120 struct ldlm_test_lock *lock_info;
123 if (flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
124 LDLM_FL_BLOCK_CONV)) {
125 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock");
129 if (lock->l_granted_mode != lock->l_req_mode)
130 CERROR("completion ast called with non-granted lock\n");
132 /* add to list of granted locks */
134 if (flags & LDLM_FL_WAIT_NOREPROC) {
135 atomic_inc(&locks_matched);
136 LDLM_DEBUG(lock, "lock matched");
138 atomic_inc(&locks_granted);
139 LDLM_DEBUG(lock, "lock granted");
142 OBD_ALLOC(lock_info, sizeof(*lock_info));
143 if (lock_info == NULL) {
148 ldlm_lock2handle(lock, &lock_info->l_lockh);
150 spin_lock(&ctl_lock);
151 list_add_tail(&lock_info->l_link, &lock_list);
153 spin_unlock(&ctl_lock);
158 int ldlm_test_basics(struct obd_device *obddev)
160 struct ldlm_namespace *ns;
161 struct ldlm_resource *res;
162 struct ldlm_res_id res_id = { .name = {1, 2, 3} };
164 struct ldlm_lock *lock1, *lock;
168 ns = ldlm_namespace_new("test_server", LDLM_NAMESPACE_SERVER);
172 lock1 = ldlm_lock_create(ns, NULL, res_id, LDLM_PLAIN, LCK_CR, NULL, 0);
175 err = ldlm_lock_enqueue(ns, lock1, NULL, 0, &flags,
176 ldlm_completion_ast, ldlm_blocking_ast);
180 lock = ldlm_lock_create(ns, NULL, res_id, LDLM_PLAIN, LCK_EX, NULL, 0);
183 err = ldlm_lock_enqueue(ns, lock, NULL, 0, &flags,
184 ldlm_completion_ast, ldlm_blocking_ast);
187 if (!(flags & LDLM_FL_BLOCK_GRANTED))
190 res = ldlm_resource_get(ns, NULL, res_id, LDLM_PLAIN, 1);
193 ldlm_resource_dump(res);
195 res = ldlm_lock_convert(lock1, LCK_NL, &flags);
197 ldlm_reprocess_all(res);
199 ldlm_resource_dump(res);
200 ldlm_namespace_free(ns, 0);
205 int ldlm_test_extents(struct obd_device *obddev)
207 struct ldlm_namespace *ns;
208 struct ldlm_resource *res;
209 struct ldlm_lock *lock, *lock1, *lock2;
210 struct ldlm_res_id res_id = { .name = {0} };
211 struct ldlm_extent ext1 = {4, 6}, ext2 = {6, 9}, ext3 = {10, 11};
216 ns = ldlm_namespace_new("test_server", LDLM_NAMESPACE_SERVER);
221 lock1 = ldlm_lock_create(ns, NULL, res_id, LDLM_EXTENT, LCK_PR, NULL,
225 err = ldlm_lock_enqueue(ns, lock1, &ext1, sizeof(ext1), &flags, NULL,
229 if (!(flags & LDLM_FL_LOCK_CHANGED))
233 lock2 = ldlm_lock_create(ns, NULL, res_id, LDLM_EXTENT, LCK_PR,
235 err = ldlm_lock_enqueue(ns, lock2, &ext2, sizeof(ext2), &flags, NULL,
239 if (!(flags & LDLM_FL_LOCK_CHANGED))
243 lock = ldlm_lock_create(ns, NULL, res_id, LDLM_EXTENT, LCK_EX, NULL, 0);
246 err = ldlm_lock_enqueue(ns, lock, &ext3, sizeof(ext3), &flags,
250 if (!(flags & LDLM_FL_BLOCK_GRANTED))
252 if (flags & LDLM_FL_LOCK_CHANGED)
255 /* Convert/cancel blocking locks */
257 res = ldlm_lock_convert(lock1, LCK_NL, &flags);
259 ldlm_reprocess_all(res);
261 ldlm_lock_cancel(lock2);
263 ldlm_reprocess_all(res);
265 /* Dump the results */
266 res = ldlm_resource_get(ns, NULL, res_id, LDLM_EXTENT, 0);
269 ldlm_resource_dump(res);
270 ldlm_namespace_free(ns, 0);
275 static int ldlm_test_network(struct obd_device *obddev,
276 struct lustre_handle *connh)
278 struct ldlm_res_id res_id = { .name = {1, 2, 3} };
279 struct ldlm_extent ext = {4, 6};
280 struct lustre_handle lockh1;
281 struct ldlm_lock *lock;
286 err = ldlm_cli_enqueue(connh, NULL, obddev->obd_namespace, NULL, res_id,
287 LDLM_EXTENT, &ext, sizeof(ext), LCK_PR, &flags,
288 ldlm_completion_ast, NULL, NULL, 0, &lockh1);
290 CERROR("ldlm_cli_enqueue: %d\n", err);
293 err = ldlm_cli_convert(&lockh1, LCK_EX, &flags);
294 CERROR("ldlm_cli_convert: %d\n", err);
296 lock = ldlm_handle2lock(&lockh1);
297 ldlm_lock_dump(D_OTHER, lock);
300 /* Need to decrement old mode. Don't bother incrementing new
301 * mode since the test is done.
304 ldlm_lock_decref(&lockh1, LCK_PR);
309 static int ldlm_do_decrement(void)
311 struct ldlm_test_lock *lock_info;
312 struct ldlm_lock *lock;
316 spin_lock(&ctl_lock);
317 if(list_empty(&lock_list)) {
318 CERROR("lock_list is empty\n");
319 spin_unlock(&ctl_lock);
323 /* delete from list */
324 lock_info = list_entry(lock_list.next,
325 struct ldlm_test_lock, l_link);
326 list_del(lock_list.next);
328 spin_unlock(&ctl_lock);
330 /* decrement and free the info */
331 lock = ldlm_handle2lock(&lock_info->l_lockh);
332 ldlm_lock_decref(&lock_info->l_lockh, lock->l_granted_mode);
335 OBD_FREE(lock_info, sizeof(*lock_info));
340 static int ldlm_do_enqueue(struct ldlm_test_thread *thread)
342 struct lustre_handle lockh;
343 struct ldlm_res_id res_id = { .name = {0} };
345 struct ldlm_extent ext;
346 unsigned char random;
347 int flags = 0, rc = 0;
350 /* Pick a random resource from 1 to num_resources */
351 get_random_bytes(&random, sizeof(random));
352 res_id.name[0] = random % num_resources;
354 /* Pick a random lock mode */
355 get_random_bytes(&random, sizeof(random));
356 lock_mode = random % LCK_NL + 1;
358 /* Pick a random extent */
359 get_random_bytes(&random, sizeof(random));
360 ext.start = random % num_extents;
361 get_random_bytes(&random, sizeof(random));
363 (num_extents - (int)ext.start) + ext.start;
365 LDLM_DEBUG_NOLOCK("about to enqueue with resource "LPX64", mode %d,"
366 " extent "LPX64" -> "LPX64, res_id.name[0], lock_mode,
369 rc = ldlm_match_or_enqueue(®ress_connh, NULL,
370 thread->obddev->obd_namespace,
371 NULL, res_id, LDLM_EXTENT, &ext,
372 sizeof(ext), lock_mode, &flags,
373 ldlm_test_completion_ast,
374 ldlm_test_blocking_ast,
377 atomic_inc(&locks_requested);
380 CERROR("ldlm_cli_enqueue: %d\n", rc);
387 static int ldlm_do_convert(void)
390 unsigned char random;
391 int flags = 0, rc = 0;
392 struct ldlm_test_lock *lock_info;
393 struct ldlm_lock *lock;
396 /* delete from list */
397 spin_lock(&ctl_lock);
398 lock_info = list_entry(lock_list.next, struct ldlm_test_lock, l_link);
399 list_del(lock_list.next);
401 spin_unlock(&ctl_lock);
403 /* Pick a random lock mode */
404 get_random_bytes(&random, sizeof(random));
405 lock_mode = random % LCK_NL + 1;
407 /* do the conversion */
408 rc = ldlm_cli_convert(&lock_info->l_lockh , lock_mode, &flags);
409 atomic_inc(&converts_requested);
412 CERROR("ldlm_cli_convert: %d\n", rc);
417 * Adjust reference counts.
418 * FIXME: This is technically a bit... wrong,
419 * since we don't know when/if the convert succeeded
421 ldlm_lock_addref(&lock_info->l_lockh, lock_mode);
422 lock = ldlm_handle2lock(&lock_info->l_lockh);
423 ldlm_lock_decref(&lock_info->l_lockh, lock->l_granted_mode);
426 OBD_FREE(lock_info, sizeof(*lock_info));
433 static int ldlm_test_main(void *data)
435 struct ldlm_test_thread *thread = data;
441 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
442 sigfillset(¤t->blocked);
445 spin_lock_irqsave(¤t->sigmask_lock, flags);
446 sigfillset(¤t->blocked);
447 recalc_sigpending(current);
448 spin_unlock_irqrestore(¤t->sigmask_lock, flags);
451 sprintf(current->comm, "ldlm_test");
454 /* Record that the thread is running */
455 thread->t_flags |= SVC_RUNNING;
456 wake_up(&thread->t_ctl_waitq);
458 while (!(thread->t_flags & SVC_STOPPING)) {
459 unsigned char random;
460 unsigned char dec_chance, con_chance;
461 unsigned char chance_left = 100;
463 spin_lock(&ctl_lock);
464 /* probability of decrementing increases linearly
465 * as more locks are held.
467 dec_chance = chance_left * num_locks / max_locks;
468 chance_left -= dec_chance;
470 /* FIXME: conversions temporarily disabled
471 * until they are working correctly.
473 /* con_chance = chance_left * num_locks / max_locks; */
475 chance_left -= con_chance;
476 spin_unlock(&ctl_lock);
478 get_random_bytes(&random, sizeof(random));
480 random = random % 100;
481 if (random < dec_chance)
483 else if (random < (dec_chance + con_chance))
486 ldlm_do_enqueue(thread);
488 LDLM_DEBUG_NOLOCK("locks requested: %d, "
489 "conversions requested %d",
490 atomic_read(&locks_requested),
491 atomic_read(&converts_requested));
492 LDLM_DEBUG_NOLOCK("locks granted: %d, "
494 atomic_read(&locks_granted),
495 atomic_read(&locks_matched));
497 spin_lock(&ctl_lock);
498 LDLM_DEBUG_NOLOCK("lock references currently held: %d, ",
500 spin_unlock(&ctl_lock);
503 * We don't sleep after a lock being blocked, so let's
504 * make sure other things can run.
509 thread->t_flags |= SVC_STOPPED;
510 wake_up(&thread->t_ctl_waitq);
515 static int ldlm_start_thread(struct obd_device *obddev,
516 struct lustre_handle *connh)
518 struct ldlm_test_thread *test;
522 OBD_ALLOC(test, sizeof(*test));
527 init_waitqueue_head(&test->t_ctl_waitq);
529 test->obddev = obddev;
531 spin_lock(&ctl_lock);
532 list_add(&test->t_link, &ctl_threads);
533 spin_unlock(&ctl_lock);
535 rc = kernel_thread(ldlm_test_main, (void *)test,
536 CLONE_VM | CLONE_FS | CLONE_FILES);
538 CERROR("cannot start thread\n");
541 wait_event(test->t_ctl_waitq, test->t_flags & SVC_RUNNING);
546 int ldlm_regression_start(struct obd_device *obddev,
547 struct lustre_handle *connh,
548 unsigned int threads, unsigned int max_locks_in,
549 unsigned int num_resources_in,
550 unsigned int num_extents_in)
555 spin_lock(&ctl_lock);
556 if (regression_running) {
557 CERROR("You can't start the ldlm regression twice.\n");
558 spin_unlock(&ctl_lock);
561 regression_running = 1;
562 spin_unlock(&ctl_lock);
564 regress_connh = *connh;
565 max_locks = max_locks_in;
566 num_resources = num_resources_in;
567 num_extents = num_extents_in;
569 LDLM_DEBUG_NOLOCK("regression test started: threads: %d, max_locks: "
570 "%d, num_res: %d, num_ext: %d\n",
571 threads, max_locks_in, num_resources_in,
574 for (i = 0; i < threads; i++) {
575 rc = ldlm_start_thread(obddev, connh);
582 ldlm_regression_stop();
586 int ldlm_regression_stop(void)
590 spin_lock(&ctl_lock);
591 if (!regression_running) {
592 CERROR("The ldlm regression isn't started.\n");
593 spin_unlock(&ctl_lock);
597 while (!list_empty(&ctl_threads)) {
598 struct ldlm_test_thread *thread;
599 thread = list_entry(ctl_threads.next, struct ldlm_test_thread,
602 thread->t_flags |= SVC_STOPPING;
604 spin_unlock(&ctl_lock);
605 wake_up(&thread->t_ctl_waitq);
606 wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
607 spin_lock(&ctl_lock);
609 list_del(&thread->t_link);
610 OBD_FREE(thread, sizeof(*thread));
613 /* decrement all held locks */
614 while (!list_empty(&lock_list)) {
615 struct ldlm_lock *lock;
616 struct ldlm_test_lock *lock_info =
617 list_entry(lock_list.next, struct ldlm_test_lock,
619 list_del(lock_list.next);
622 lock = ldlm_handle2lock(&lock_info->l_lockh);
623 ldlm_lock_decref(&lock_info->l_lockh, lock->l_granted_mode);
626 OBD_FREE(lock_info, sizeof(*lock_info));
629 regression_running = 0;
630 spin_unlock(&ctl_lock);
635 int ldlm_test(struct obd_device *obddev, struct lustre_handle *connh)
638 rc = ldlm_test_basics(obddev);
642 rc = ldlm_test_extents(obddev);
646 rc = ldlm_test_network(obddev, connh);