1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002 Cluster File Systems, Inc. <info@clusterfs.com>
5 * Copyright (c) 2002 Lawrence Livermore National Laboratory
6 * Author: James Newsome <newsome2@llnl.gov>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #define DEBUG_SUBSYSTEM S_LDLM
26 #include <asm/atomic.h>
27 #include <linux/types.h>
28 #include <linux/random.h>
30 #include <linux/lustre_dlm.h>
31 #include <linux/obd.h>
33 struct ldlm_test_thread {
34 struct obd_device *obddev;
35 struct ldlm_namespace *t_ns;
36 struct list_head t_link;
38 wait_queue_head_t t_ctl_waitq;
41 struct ldlm_test_lock {
42 struct list_head l_link;
43 struct lustre_handle l_lockh;
46 static unsigned int max_locks;
47 static unsigned int num_resources;
48 static unsigned int num_extents;
50 static spinlock_t ctl_lock = SPIN_LOCK_UNLOCKED;
51 /* protect these with the ctl_lock */
52 static LIST_HEAD(ctl_threads);
53 static int regression_running = 0;
54 static LIST_HEAD(lock_list);
55 static int num_locks = 0;
57 /* cumulative stats for regression test */
58 static atomic_t locks_requested = ATOMIC_INIT(0);
59 static atomic_t converts_requested = ATOMIC_INIT(0);
60 static atomic_t locks_granted = ATOMIC_INIT(0);
61 static atomic_t locks_matched = ATOMIC_INIT(0);
63 /* making this a global avoids the problem of having pointers
64 * to garbage after the test exits.
66 static struct lustre_handle regress_connh;
68 static int ldlm_do_decrement(void);
69 static int ldlm_do_enqueue(struct ldlm_test_thread *thread);
70 static int ldlm_do_convert(void);
73 * blocking ast for regression test.
76 static int ldlm_test_blocking_ast(struct ldlm_lock *lock,
77 struct ldlm_lock_desc *new,
81 struct lustre_handle lockh;
85 case LDLM_CB_BLOCKING:
86 LDLM_DEBUG(lock, "We're blocking. Cancelling lock");
87 ldlm_lock2handle(lock, &lockh);
88 rc = ldlm_cli_cancel(&lockh);
90 CERROR("ldlm_cli_cancel: %d\n", rc);
94 case LDLM_CB_CANCELING:
95 LDLM_DEBUG(lock, "this lock is being cancelled");
104 /* blocking ast for basic tests. noop */
105 static int ldlm_blocking_ast(struct ldlm_lock *lock,
106 struct ldlm_lock_desc *new,
107 void *data, int flag)
110 CERROR("ldlm_blocking_ast: lock=%p, new=%p, flag=%d\n", lock, new,
115 /* Completion ast for regression test.
116 * Does not sleep when blocked.
118 static int ldlm_test_completion_ast(struct ldlm_lock *lock, int flags, void *data)
120 struct ldlm_test_lock *lock_info;
123 if (flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
124 LDLM_FL_BLOCK_CONV)) {
125 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock");
129 if (lock->l_granted_mode != lock->l_req_mode)
130 CERROR("completion ast called with non-granted lock\n");
132 /* add to list of granted locks */
134 if (flags & LDLM_FL_WAIT_NOREPROC) {
135 atomic_inc(&locks_matched);
136 LDLM_DEBUG(lock, "lock matched");
138 atomic_inc(&locks_granted);
139 LDLM_DEBUG(lock, "lock granted");
142 OBD_ALLOC(lock_info, sizeof(*lock_info));
143 if (lock_info == NULL) {
148 ldlm_lock2handle(lock, &lock_info->l_lockh);
150 spin_lock(&ctl_lock);
151 list_add_tail(&lock_info->l_link, &lock_list);
153 spin_unlock(&ctl_lock);
158 int ldlm_test_basics(struct obd_device *obddev)
160 struct ldlm_namespace *ns;
161 struct ldlm_resource *res;
162 struct ldlm_res_id res_id = { .name = {1, 2, 3} };
164 struct ldlm_lock *lock1, *lock;
168 ns = ldlm_namespace_new("test_server", LDLM_NAMESPACE_SERVER);
172 lock1 = ldlm_lock_create(ns, NULL, res_id, LDLM_PLAIN, LCK_CR, NULL, 0);
175 err = ldlm_lock_enqueue(ns, lock1, NULL, 0, &flags,
176 ldlm_completion_ast, ldlm_blocking_ast);
180 lock = ldlm_lock_create(ns, NULL, res_id, LDLM_PLAIN, LCK_EX, NULL, 0);
183 err = ldlm_lock_enqueue(ns, lock, NULL, 0, &flags,
184 ldlm_completion_ast, ldlm_blocking_ast);
187 if (!(flags & LDLM_FL_BLOCK_GRANTED))
190 res = ldlm_resource_get(ns, NULL, res_id, LDLM_PLAIN, 1);
193 ldlm_resource_dump(res);
195 res = ldlm_lock_convert(lock1, LCK_NL, &flags);
197 ldlm_reprocess_all(res);
199 ldlm_resource_dump(res);
200 ldlm_namespace_free(ns, 0);
205 int ldlm_test_extents(struct obd_device *obddev)
207 struct ldlm_namespace *ns;
208 struct ldlm_resource *res;
209 struct ldlm_lock *lock, *lock1, *lock2;
210 struct ldlm_res_id res_id = { .name = {0} };
211 struct ldlm_extent ext1 = {4, 6}, ext2 = {6, 9}, ext3 = {10, 11};
216 ns = ldlm_namespace_new("test_server", LDLM_NAMESPACE_SERVER);
221 lock1 = ldlm_lock_create(ns, NULL, res_id, LDLM_EXTENT, LCK_PR, NULL,0);
224 err = ldlm_lock_enqueue(ns, lock1, &ext1, sizeof(ext1), &flags, NULL,
228 if (!(flags & LDLM_FL_LOCK_CHANGED))
232 lock2 = ldlm_lock_create(ns, NULL, res_id, LDLM_EXTENT, LCK_PR, NULL,0);
233 err = ldlm_lock_enqueue(ns, lock2, &ext2, sizeof(ext2), &flags, NULL,
237 if (!(flags & LDLM_FL_LOCK_CHANGED))
241 lock = ldlm_lock_create(ns, NULL, res_id, LDLM_EXTENT, LCK_EX, NULL, 0);
244 err = ldlm_lock_enqueue(ns, lock, &ext3, sizeof(ext3), &flags,
248 if (!(flags & LDLM_FL_BLOCK_GRANTED))
250 if (flags & LDLM_FL_LOCK_CHANGED)
253 /* Convert/cancel blocking locks */
255 res = ldlm_lock_convert(lock1, LCK_NL, &flags);
257 ldlm_reprocess_all(res);
259 ldlm_lock_cancel(lock2);
261 ldlm_reprocess_all(res);
263 /* Dump the results */
264 res = ldlm_resource_get(ns, NULL, res_id, LDLM_EXTENT, 0);
267 ldlm_resource_dump(res);
268 ldlm_namespace_free(ns, 0);
273 static int ldlm_test_network(struct obd_device *obddev,
274 struct lustre_handle *connh)
276 struct ldlm_res_id res_id = { .name = {1, 2, 3} };
277 struct ldlm_extent ext = {4, 6};
278 struct lustre_handle lockh1;
279 struct ldlm_lock *lock;
284 err = ldlm_cli_enqueue(connh, NULL, obddev->obd_namespace, NULL, res_id,
285 LDLM_EXTENT, &ext, sizeof(ext), LCK_PR, &flags,
286 ldlm_completion_ast, NULL, NULL, 0, &lockh1);
288 CERROR("ldlm_cli_enqueue: %d\n", err);
291 err = ldlm_cli_convert(&lockh1, LCK_EX, &flags);
292 CERROR("ldlm_cli_convert: %d\n", err);
294 lock = ldlm_handle2lock(&lockh1);
295 ldlm_lock_dump(D_OTHER, lock);
298 /* Need to decrement old mode. Don't bother incrementing new
299 * mode since the test is done.
302 ldlm_lock_decref(&lockh1, LCK_PR);
307 static int ldlm_do_decrement(void)
309 struct ldlm_test_lock *lock_info;
310 struct ldlm_lock *lock;
314 spin_lock(&ctl_lock);
315 if(list_empty(&lock_list)) {
316 CERROR("lock_list is empty\n");
317 spin_unlock(&ctl_lock);
321 /* delete from list */
322 lock_info = list_entry(lock_list.next,
323 struct ldlm_test_lock, l_link);
324 list_del(lock_list.next);
326 spin_unlock(&ctl_lock);
328 /* decrement and free the info */
329 lock = ldlm_handle2lock(&lock_info->l_lockh);
330 ldlm_lock_decref(&lock_info->l_lockh, lock->l_granted_mode);
333 OBD_FREE(lock_info, sizeof(*lock_info));
338 static int ldlm_do_enqueue(struct ldlm_test_thread *thread)
340 struct lustre_handle lockh;
341 struct ldlm_res_id res_id = { .name = {0} };
343 struct ldlm_extent ext;
344 unsigned char random;
345 int flags = 0, rc = 0;
348 /* Pick a random resource from 1 to num_resources */
349 get_random_bytes(&random, sizeof(random));
350 res_id.name[0] = random % num_resources;
352 /* Pick a random lock mode */
353 get_random_bytes(&random, sizeof(random));
354 lock_mode = random % LCK_NL + 1;
356 /* Pick a random extent */
357 get_random_bytes(&random, sizeof(random));
358 ext.start = random % num_extents;
359 get_random_bytes(&random, sizeof(random));
361 (num_extents - (int)ext.start) + ext.start;
363 LDLM_DEBUG_NOLOCK("about to enqueue with resource "LPX64", mode %d,"
364 " extent "LPX64" -> "LPX64, res_id.name[0], lock_mode,
367 rc = ldlm_match_or_enqueue(®ress_connh, NULL,
368 thread->obddev->obd_namespace,
369 NULL, res_id, LDLM_EXTENT, &ext,
370 sizeof(ext), lock_mode, &flags,
371 ldlm_test_completion_ast,
372 ldlm_test_blocking_ast,
375 atomic_inc(&locks_requested);
378 CERROR("ldlm_cli_enqueue: %d\n", rc);
385 static int ldlm_do_convert(void)
388 unsigned char random;
389 int flags = 0, rc = 0;
390 struct ldlm_test_lock *lock_info;
391 struct ldlm_lock *lock;
394 /* delete from list */
395 spin_lock(&ctl_lock);
396 lock_info = list_entry(lock_list.next, struct ldlm_test_lock, l_link);
397 list_del(lock_list.next);
399 spin_unlock(&ctl_lock);
401 /* Pick a random lock mode */
402 get_random_bytes(&random, sizeof(random));
403 lock_mode = random % LCK_NL + 1;
405 /* do the conversion */
406 rc = ldlm_cli_convert(&lock_info->l_lockh , lock_mode, &flags);
407 atomic_inc(&converts_requested);
410 CERROR("ldlm_cli_convert: %d\n", rc);
415 * Adjust reference counts.
416 * FIXME: This is technically a bit... wrong,
417 * since we don't know when/if the convert succeeded
419 ldlm_lock_addref(&lock_info->l_lockh, lock_mode);
420 lock = ldlm_handle2lock(&lock_info->l_lockh);
421 ldlm_lock_decref(&lock_info->l_lockh, lock->l_granted_mode);
424 OBD_FREE(lock_info, sizeof(*lock_info));
431 static int ldlm_test_main(void *data)
433 struct ldlm_test_thread *thread = data;
439 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
440 sigfillset(¤t->blocked);
443 spin_lock_irqsave(¤t->sigmask_lock, flags);
444 sigfillset(¤t->blocked);
445 recalc_sigpending(current);
446 spin_unlock_irqrestore(¤t->sigmask_lock, flags);
449 sprintf(current->comm, "ldlm_test");
452 /* Record that the thread is running */
453 thread->t_flags |= SVC_RUNNING;
454 wake_up(&thread->t_ctl_waitq);
456 while (!(thread->t_flags & SVC_STOPPING)) {
457 unsigned char random;
458 unsigned char dec_chance, con_chance;
459 unsigned char chance_left = 100;
461 spin_lock(&ctl_lock);
462 /* probability of decrementing increases linearly
463 * as more locks are held.
465 dec_chance = chance_left * num_locks / max_locks;
466 chance_left -= dec_chance;
468 /* FIXME: conversions temporarily disabled
469 * until they are working correctly.
471 /* con_chance = chance_left * num_locks / max_locks; */
473 chance_left -= con_chance;
474 spin_unlock(&ctl_lock);
476 get_random_bytes(&random, sizeof(random));
478 random = random % 100;
479 if (random < dec_chance)
481 else if (random < (dec_chance + con_chance))
484 ldlm_do_enqueue(thread);
486 LDLM_DEBUG_NOLOCK("locks requested: %d, "
487 "conversions requested %d",
488 atomic_read(&locks_requested),
489 atomic_read(&converts_requested));
490 LDLM_DEBUG_NOLOCK("locks granted: %d, "
492 atomic_read(&locks_granted),
493 atomic_read(&locks_matched));
495 spin_lock(&ctl_lock);
496 LDLM_DEBUG_NOLOCK("lock references currently held: %d, ",
498 spin_unlock(&ctl_lock);
501 * We don't sleep after a lock being blocked, so let's
502 * make sure other things can run.
507 thread->t_flags |= SVC_STOPPED;
508 wake_up(&thread->t_ctl_waitq);
513 static int ldlm_start_thread(struct obd_device *obddev,
514 struct lustre_handle *connh)
516 struct ldlm_test_thread *test;
520 OBD_ALLOC(test, sizeof(*test));
525 init_waitqueue_head(&test->t_ctl_waitq);
527 test->obddev = obddev;
529 spin_lock(&ctl_lock);
530 list_add(&test->t_link, &ctl_threads);
531 spin_unlock(&ctl_lock);
533 rc = kernel_thread(ldlm_test_main, (void *)test,
534 CLONE_VM | CLONE_FS | CLONE_FILES);
536 CERROR("cannot start thread\n");
539 wait_event(test->t_ctl_waitq, test->t_flags & SVC_RUNNING);
544 int ldlm_regression_start(struct obd_device *obddev,
545 struct lustre_handle *connh,
546 unsigned int threads, unsigned int max_locks_in,
547 unsigned int num_resources_in,
548 unsigned int num_extents_in)
553 spin_lock(&ctl_lock);
554 if (regression_running) {
555 CERROR("You can't start the ldlm regression twice.\n");
556 spin_unlock(&ctl_lock);
559 regression_running = 1;
560 spin_unlock(&ctl_lock);
562 regress_connh = *connh;
563 max_locks = max_locks_in;
564 num_resources = num_resources_in;
565 num_extents = num_extents_in;
567 LDLM_DEBUG_NOLOCK("regression test started: threads: %d, max_locks: "
568 "%d, num_res: %d, num_ext: %d\n",
569 threads, max_locks_in, num_resources_in,
572 for (i = 0; i < threads; i++) {
573 rc = ldlm_start_thread(obddev, connh);
580 ldlm_regression_stop();
584 int ldlm_regression_stop(void)
588 spin_lock(&ctl_lock);
589 if (!regression_running) {
590 CERROR("The ldlm regression isn't started.\n");
591 spin_unlock(&ctl_lock);
595 while (!list_empty(&ctl_threads)) {
596 struct ldlm_test_thread *thread;
597 thread = list_entry(ctl_threads.next, struct ldlm_test_thread,
600 thread->t_flags |= SVC_STOPPING;
602 spin_unlock(&ctl_lock);
603 wake_up(&thread->t_ctl_waitq);
604 wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
605 spin_lock(&ctl_lock);
607 list_del(&thread->t_link);
608 OBD_FREE(thread, sizeof(*thread));
611 /* decrement all held locks */
612 while (!list_empty(&lock_list)) {
613 struct ldlm_lock *lock;
614 struct ldlm_test_lock *lock_info =
615 list_entry(lock_list.next, struct ldlm_test_lock,
617 list_del(lock_list.next);
620 lock = ldlm_handle2lock(&lock_info->l_lockh);
621 ldlm_lock_decref(&lock_info->l_lockh, lock->l_granted_mode);
624 OBD_FREE(lock_info, sizeof(*lock_info));
627 regression_running = 0;
628 spin_unlock(&ctl_lock);
633 int ldlm_test(struct obd_device *obddev, struct lustre_handle *connh)
636 rc = ldlm_test_basics(obddev);
640 rc = ldlm_test_extents(obddev);
644 rc = ldlm_test_network(obddev, connh);