1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002 Cluster File Systems, Inc. <info@clusterfs.com>
5 * Copyright (c) 2002 Lawrence Livermore National Laboratory
6 * Author: James Newsome <newsome2@llnl.gov>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #define DEBUG_SUBSYSTEM S_LDLM
26 #include <asm/atomic.h>
27 #include <linux/types.h>
28 #include <linux/random.h>
30 #include <linux/lustre_dlm.h>
32 struct ldlm_test_thread {
33 struct obd_device *obddev;
34 struct ldlm_namespace *t_ns;
35 struct list_head t_link;
37 wait_queue_head_t t_ctl_waitq;
40 struct ldlm_test_lock {
41 struct list_head l_link;
42 struct lustre_handle l_lockh;
45 static unsigned int max_locks;
46 static unsigned int num_resources;
47 static unsigned int num_extents;
49 static spinlock_t ctl_lock = SPIN_LOCK_UNLOCKED;
50 /* protect these with the ctl_lock */
51 static LIST_HEAD(ctl_threads);
52 static int regression_running = 0;
53 static LIST_HEAD(lock_list);
54 static int num_locks = 0;
56 /* cumulative stats for regression test */
57 static atomic_t locks_requested = ATOMIC_INIT(0);
58 static atomic_t converts_requested = ATOMIC_INIT(0);
59 static atomic_t locks_granted = ATOMIC_INIT(0);
60 static atomic_t locks_matched = ATOMIC_INIT(0);
62 /* making this a global avoids the problem of having pointers
63 * to garbage after the test exits.
65 static struct lustre_handle regress_connh;
67 static int ldlm_do_decrement(void);
68 static int ldlm_do_enqueue(struct ldlm_test_thread *thread);
69 static int ldlm_do_convert(void);
72 * blocking ast for regression test.
75 static int ldlm_test_blocking_ast(struct ldlm_lock *lock,
76 struct ldlm_lock_desc *new,
77 void *data, __u32 data_len)
80 struct lustre_handle lockh;
83 LDLM_DEBUG(lock, "We're blocking. Cancelling lock");
84 ldlm_lock2handle(lock, &lockh);
85 rc = ldlm_cli_cancel(&lockh);
87 CERROR("ldlm_cli_cancel: %d\n", rc);
94 /* blocking ast for basic tests. noop */
95 static int ldlm_blocking_ast(struct ldlm_lock *lock,
96 struct ldlm_lock_desc *new,
97 void *data, __u32 data_len)
100 CERROR("ldlm_blocking_ast: lock=%p, new=%p\n", lock, new);
104 /* Completion ast for regression test.
105 * Does not sleep when blocked.
107 static int ldlm_test_completion_ast(struct ldlm_lock *lock, int flags)
109 struct ldlm_test_lock *lock_info;
112 if (flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
113 LDLM_FL_BLOCK_CONV)) {
115 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock");
119 if (lock->l_granted_mode != lock->l_req_mode)
120 CERROR("completion ast called with non-granted lock\n");
122 /* add to list of granted locks */
124 if (flags & LDLM_FL_WAIT_NOREPROC) {
125 atomic_inc(&locks_matched);
126 LDLM_DEBUG(lock, "lock matched");
128 atomic_inc(&locks_granted);
129 LDLM_DEBUG(lock, "lock granted");
132 OBD_ALLOC(lock_info, sizeof(*lock_info));
133 if (lock_info == NULL) {
138 ldlm_lock2handle(lock, &lock_info->l_lockh);
140 spin_lock(&ctl_lock);
141 list_add_tail(&lock_info->l_link, &lock_list);
143 spin_unlock(&ctl_lock);
148 int ldlm_test_basics(struct obd_device *obddev)
150 struct ldlm_namespace *ns;
151 struct ldlm_resource *res;
152 __u64 res_id[RES_NAME_SIZE] = {1, 2, 3};
154 struct ldlm_lock *lock1, *lock;
158 ns = ldlm_namespace_new("test_server", LDLM_NAMESPACE_SERVER);
162 lock1 = ldlm_lock_create(ns, NULL, res_id, LDLM_PLAIN, LCK_CR, NULL, 0);
165 err = ldlm_lock_enqueue(lock1, NULL, 0, &flags,
166 ldlm_completion_ast, ldlm_blocking_ast);
170 lock = ldlm_lock_create(ns, NULL, res_id, LDLM_PLAIN, LCK_EX, NULL, 0);
173 err = ldlm_lock_enqueue(lock, NULL, 0, &flags,
174 ldlm_completion_ast, ldlm_blocking_ast);
177 if (!(flags & LDLM_FL_BLOCK_GRANTED))
180 res = ldlm_resource_get(ns, NULL, res_id, LDLM_PLAIN, 1);
183 ldlm_resource_dump(res);
185 res = ldlm_lock_convert(lock1, LCK_NL, &flags);
187 ldlm_reprocess_all(res);
189 ldlm_resource_dump(res);
190 ldlm_namespace_free(ns);
195 int ldlm_test_extents(struct obd_device *obddev)
197 struct ldlm_namespace *ns;
198 struct ldlm_resource *res;
199 struct ldlm_lock *lock, *lock1, *lock2;
200 __u64 res_id[RES_NAME_SIZE] = {0, 0, 0};
201 struct ldlm_extent ext1 = {4, 6}, ext2 = {6, 9}, ext3 = {10, 11};
206 ns = ldlm_namespace_new("test_server", LDLM_NAMESPACE_SERVER);
211 lock1 = ldlm_lock_create(ns, NULL, res_id, LDLM_EXTENT, LCK_PR, NULL,
215 err = ldlm_lock_enqueue(lock1, &ext1, sizeof(ext1), &flags, NULL, NULL);
218 if (!(flags & LDLM_FL_LOCK_CHANGED))
222 lock2 = ldlm_lock_create(ns, NULL, res_id, LDLM_EXTENT, LCK_PR,
224 err = ldlm_lock_enqueue(lock2, &ext2, sizeof(ext2), &flags, NULL, NULL);
227 if (!(flags & LDLM_FL_LOCK_CHANGED))
231 lock = ldlm_lock_create(ns, NULL, res_id, LDLM_EXTENT, LCK_EX, NULL, 0);
234 err = ldlm_lock_enqueue(lock, &ext3, sizeof(ext3), &flags,
238 if (!(flags & LDLM_FL_BLOCK_GRANTED))
240 if (flags & LDLM_FL_LOCK_CHANGED)
243 /* Convert/cancel blocking locks */
245 res = ldlm_lock_convert(lock1, LCK_NL, &flags);
247 ldlm_reprocess_all(res);
249 ldlm_lock_cancel(lock2);
251 ldlm_reprocess_all(res);
253 /* Dump the results */
254 res = ldlm_resource_get(ns, NULL, res_id, LDLM_EXTENT, 0);
257 ldlm_resource_dump(res);
258 ldlm_namespace_free(ns);
263 static int ldlm_test_network(struct obd_device *obddev,
264 struct lustre_handle *connh)
267 __u64 res_id[RES_NAME_SIZE] = {1, 2, 3};
268 struct ldlm_extent ext = {4, 6};
269 struct lustre_handle lockh1;
270 struct ldlm_lock *lock;
275 err = ldlm_cli_enqueue(connh, NULL, obddev->obd_namespace, NULL, res_id,
276 LDLM_EXTENT, &ext, sizeof(ext), LCK_PR, &flags,
277 ldlm_completion_ast, NULL, NULL, 0, &lockh1);
279 CERROR("ldlm_cli_enqueue: %d\n", err);
282 err = ldlm_cli_convert(&lockh1, LCK_EX, &flags);
283 CERROR("ldlm_cli_convert: %d\n", err);
285 lock = ldlm_handle2lock(&lockh1);
286 ldlm_lock_dump(lock);
289 /* Need to decrement old mode. Don't bother incrementing new
290 * mode since the test is done.
293 ldlm_lock_decref(&lockh1, LCK_PR);
298 static int ldlm_do_decrement(void)
300 struct ldlm_test_lock *lock_info;
301 struct ldlm_lock *lock;
305 spin_lock(&ctl_lock);
306 if(list_empty(&lock_list)) {
307 CERROR("lock_list is empty\n");
308 spin_unlock(&ctl_lock);
312 /* delete from list */
313 lock_info = list_entry(lock_list.next,
314 struct ldlm_test_lock, l_link);
315 list_del(lock_list.next);
317 spin_unlock(&ctl_lock);
319 /* decrement and free the info */
320 lock = ldlm_handle2lock(&lock_info->l_lockh);
321 ldlm_lock_decref(&lock_info->l_lockh, lock->l_granted_mode);
324 OBD_FREE(lock_info, sizeof(*lock_info));
329 static int ldlm_do_enqueue(struct ldlm_test_thread *thread)
331 struct lustre_handle lockh;
332 __u64 res_id[3] = {0};
334 struct ldlm_extent ext;
335 unsigned char random;
336 int flags = 0, rc = 0;
339 /* Pick a random resource from 1 to num_resources */
340 get_random_bytes(&random, sizeof(random));
341 res_id[0] = random % num_resources;
343 /* Pick a random lock mode */
344 get_random_bytes(&random, sizeof(random));
345 lock_mode = random % LCK_NL + 1;
347 /* Pick a random extent */
348 get_random_bytes(&random, sizeof(random));
349 ext.start = random % num_extents;
350 get_random_bytes(&random, sizeof(random));
352 (num_extents - (int)ext.start) + ext.start;
354 LDLM_DEBUG_NOLOCK("about to enqueue with resource %d, mode %d,"
361 rc = ldlm_match_or_enqueue(®ress_connh,
363 thread->obddev->obd_namespace,
364 NULL, res_id, LDLM_EXTENT, &ext,
365 sizeof(ext), lock_mode, &flags,
366 ldlm_test_completion_ast,
367 ldlm_test_blocking_ast,
370 atomic_inc(&locks_requested);
373 CERROR("ldlm_cli_enqueue: %d\n", rc);
380 static int ldlm_do_convert(void)
383 unsigned char random;
384 int flags = 0, rc = 0;
385 struct ldlm_test_lock *lock_info;
386 struct ldlm_lock *lock;
389 /* delete from list */
390 spin_lock(&ctl_lock);
391 lock_info = list_entry(lock_list.next, struct ldlm_test_lock, l_link);
392 list_del(lock_list.next);
394 spin_unlock(&ctl_lock);
396 /* Pick a random lock mode */
397 get_random_bytes(&random, sizeof(random));
398 lock_mode = random % LCK_NL + 1;
400 /* do the conversion */
401 rc = ldlm_cli_convert(&lock_info->l_lockh , lock_mode, &flags);
402 atomic_inc(&converts_requested);
405 CERROR("ldlm_cli_convert: %d\n", rc);
410 * Adjust reference counts.
411 * FIXME: This is technically a bit... wrong,
412 * since we don't know when/if the convert succeeded
414 ldlm_lock_addref(&lock_info->l_lockh, lock_mode);
415 lock = ldlm_handle2lock(&lock_info->l_lockh);
416 ldlm_lock_decref(&lock_info->l_lockh, lock->l_granted_mode);
419 OBD_FREE(lock_info, sizeof(*lock_info));
426 static int ldlm_test_main(void *data)
428 struct ldlm_test_thread *thread = data;
433 spin_lock_irq(¤t->sigmask_lock);
434 sigfillset(¤t->blocked);
435 recalc_sigpending(current);
436 spin_unlock_irq(¤t->sigmask_lock);
438 sprintf(current->comm, "ldlm_test");
441 /* Record that the thread is running */
442 thread->t_flags |= SVC_RUNNING;
443 wake_up(&thread->t_ctl_waitq);
445 while (!(thread->t_flags & SVC_STOPPING)) {
446 unsigned char random;
447 unsigned char dec_chance, con_chance;
448 unsigned char chance_left = 100;
450 spin_lock(&ctl_lock);
451 /* probability of decrementing increases linearly
452 * as more locks are held.
454 dec_chance = chance_left * num_locks / max_locks;
455 chance_left -= dec_chance;
457 /* FIXME: conversions temporarily disabled
458 * until they are working correctly.
460 /* con_chance = chance_left * num_locks / max_locks; */
462 chance_left -= con_chance;
463 spin_unlock(&ctl_lock);
465 get_random_bytes(&random, sizeof(random));
467 random = random % 100;
468 if (random < dec_chance)
470 else if (random < (dec_chance + con_chance))
473 ldlm_do_enqueue(thread);
475 LDLM_DEBUG_NOLOCK("locks requested: %d, "
476 "conversions requested %d",
477 atomic_read(&locks_requested),
478 atomic_read(&converts_requested));
479 LDLM_DEBUG_NOLOCK("locks granted: %d, "
481 atomic_read(&locks_granted),
482 atomic_read(&locks_matched));
484 spin_lock(&ctl_lock);
485 LDLM_DEBUG_NOLOCK("lock references currently held: %d, ",
487 spin_unlock(&ctl_lock);
490 * We don't sleep after a lock being blocked, so let's
491 * make sure other things can run.
496 thread->t_flags |= SVC_STOPPED;
497 wake_up(&thread->t_ctl_waitq);
502 static int ldlm_start_thread(struct obd_device *obddev,
503 struct lustre_handle *connh)
505 struct ldlm_test_thread *test;
509 OBD_ALLOC(test, sizeof(*test));
514 init_waitqueue_head(&test->t_ctl_waitq);
516 test->obddev = obddev;
518 spin_lock(&ctl_lock);
519 list_add(&test->t_link, &ctl_threads);
520 spin_unlock(&ctl_lock);
522 rc = kernel_thread(ldlm_test_main, (void *)test,
523 CLONE_VM | CLONE_FS | CLONE_FILES);
525 CERROR("cannot start thread\n");
528 wait_event(test->t_ctl_waitq, test->t_flags & SVC_RUNNING);
533 int ldlm_regression_start(struct obd_device *obddev,
534 struct lustre_handle *connh,
535 unsigned int threads, unsigned int max_locks_in,
536 unsigned int num_resources_in,
537 unsigned int num_extents_in)
542 spin_lock(&ctl_lock);
543 if (regression_running) {
544 CERROR("You can't start the ldlm regression twice.\n");
545 spin_unlock(&ctl_lock);
548 regression_running = 1;
549 spin_unlock(&ctl_lock);
551 regress_connh = *connh;
552 max_locks = max_locks_in;
553 num_resources = num_resources_in;
554 num_extents = num_extents_in;
556 LDLM_DEBUG_NOLOCK("regression test started: threads: %d, max_locks: "
557 "%d, num_res: %d, num_ext: %d\n",
558 threads, max_locks_in, num_resources_in,
561 for (i = 0; i < threads; i++) {
562 rc = ldlm_start_thread(obddev, connh);
569 ldlm_regression_stop();
573 int ldlm_regression_stop(void)
577 spin_lock(&ctl_lock);
578 if (!regression_running) {
579 CERROR("The ldlm regression isn't started.\n");
580 spin_unlock(&ctl_lock);
584 while (!list_empty(&ctl_threads)) {
585 struct ldlm_test_thread *thread;
586 thread = list_entry(ctl_threads.next, struct ldlm_test_thread,
589 thread->t_flags |= SVC_STOPPING;
591 spin_unlock(&ctl_lock);
592 wake_up(&thread->t_ctl_waitq);
593 wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
594 spin_lock(&ctl_lock);
596 list_del(&thread->t_link);
597 OBD_FREE(thread, sizeof(*thread));
600 /* decrement all held locks */
601 while (!list_empty(&lock_list)) {
602 struct ldlm_lock *lock;
603 struct ldlm_test_lock *lock_info =
604 list_entry(lock_list.next, struct ldlm_test_lock,
606 list_del(lock_list.next);
609 lock = ldlm_handle2lock(&lock_info->l_lockh);
610 ldlm_lock_decref(&lock_info->l_lockh, lock->l_granted_mode);
613 OBD_FREE(lock_info, sizeof(*lock_info));
616 regression_running = 0;
617 spin_unlock(&ctl_lock);
622 int ldlm_test(struct obd_device *obddev, struct lustre_handle *connh)
625 rc = ldlm_test_basics(obddev);
629 rc = ldlm_test_extents(obddev);
633 rc = ldlm_test_network(obddev, connh);