Whamcloud - gitweb
ONLY UPDATE IF YOU NEED THIS (i.e. Andreas probably will)
[fs/lustre-release.git] / lustre / ldlm / ldlm_test.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (c) 2002 Cluster File Systems, Inc. <info@clusterfs.com>
5  * Copyright (c) 2002 Lawrence Livermore National Laboratory
6  *  Author: James Newsome <newsome2@llnl.gov>
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #define DEBUG_SUBSYSTEM S_LDLM
25
26 #include <asm/atomic.h>
27 #include <linux/types.h>
28 #include <linux/random.h>
29
30 #include <linux/lustre_dlm.h>
31 #include <linux/obd.h>
32
33 struct ldlm_test_thread {
34         struct obd_device *obddev;
35         struct ldlm_namespace *t_ns;
36         struct list_head t_link;
37         __u32 t_flags;
38         wait_queue_head_t t_ctl_waitq;
39 };
40
41 struct ldlm_test_lock {
42         struct list_head l_link;
43         struct lustre_handle l_lockh;
44 };
45
46 static unsigned int max_locks;
47 static unsigned int num_resources;
48 static unsigned int num_extents;
49
50 static spinlock_t ctl_lock = SPIN_LOCK_UNLOCKED;
51 /* protect these with the ctl_lock */
52 static LIST_HEAD(ctl_threads);
53 static int regression_running = 0;
54 static LIST_HEAD(lock_list);
55 static int num_locks = 0;
56
57 /* cumulative stats for regression test */
58 static atomic_t locks_requested = ATOMIC_INIT(0);
59 static atomic_t converts_requested = ATOMIC_INIT(0);
60 static atomic_t locks_granted = ATOMIC_INIT(0);
61 static atomic_t locks_matched = ATOMIC_INIT(0);
62
63 /* making this a global avoids the problem of having pointers
64  * to garbage after the test exits.
65  */
66 static struct lustre_handle regress_connh;
67
68 static int ldlm_do_decrement(void);
69 static int ldlm_do_enqueue(struct ldlm_test_thread *thread);
70 static int ldlm_do_convert(void);
71
72 /*
73  * blocking ast for regression test.
74  * Just cancels lock
75  */
76 static int ldlm_test_blocking_ast(struct ldlm_lock *lock,
77                                   struct ldlm_lock_desc *new,
78                                   void *data, __u32 data_len, int flag)
79 {
80         int rc;
81         struct lustre_handle lockh;
82         ENTRY;
83
84         switch (flag) {
85         case LDLM_CB_BLOCKING:
86                 LDLM_DEBUG(lock, "We're blocking. Cancelling lock");
87                 ldlm_lock2handle(lock, &lockh);
88                 rc = ldlm_cli_cancel(&lockh);
89                 if (rc < 0) {
90                         CERROR("ldlm_cli_cancel: %d\n", rc);
91                         LBUG();
92                 }
93                 break;
94         case LDLM_CB_CANCELING:
95                 LDLM_DEBUG(lock, "this lock is being cancelled");
96                 break;
97         default:
98                 LBUG();
99         }
100
101         RETURN(0);
102 }
103
104 /* blocking ast for basic tests. noop */
105 static int ldlm_blocking_ast(struct ldlm_lock *lock,
106                              struct ldlm_lock_desc *new,
107                              void *data, __u32 data_len, int flag)
108 {
109         ENTRY;
110         CERROR("ldlm_blocking_ast: lock=%p, new=%p, flag=%d\n", lock, new,
111                flag);
112         RETURN(0);
113 }
114
115 /* Completion ast for regression test.
116  * Does not sleep when blocked.
117  */
118 static int ldlm_test_completion_ast(struct ldlm_lock *lock, int flags)
119 {
120         struct ldlm_test_lock *lock_info;
121         ENTRY;
122
123         if (flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
124                      LDLM_FL_BLOCK_CONV)) {
125                 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock");
126                 RETURN(0);
127         }
128
129         if (lock->l_granted_mode != lock->l_req_mode)
130                 CERROR("completion ast called with non-granted lock\n");
131
132         /* add to list of granted locks */
133
134         if (flags & LDLM_FL_WAIT_NOREPROC) {
135                 atomic_inc(&locks_matched);
136                 LDLM_DEBUG(lock, "lock matched");
137         } else {
138                 atomic_inc(&locks_granted);
139                 LDLM_DEBUG(lock, "lock granted");
140         }
141
142         OBD_ALLOC(lock_info, sizeof(*lock_info));
143         if (lock_info == NULL) {
144                 LBUG();
145                 RETURN(-ENOMEM);
146         }
147
148         ldlm_lock2handle(lock, &lock_info->l_lockh);
149
150         spin_lock(&ctl_lock);
151         list_add_tail(&lock_info->l_link, &lock_list);
152         num_locks++;
153         spin_unlock(&ctl_lock);
154
155         RETURN(0);
156 }
157
158 int ldlm_test_basics(struct obd_device *obddev)
159 {
160         struct ldlm_namespace *ns;
161         struct ldlm_resource *res;
162         __u64 res_id[RES_NAME_SIZE] = {1, 2, 3};
163         ldlm_error_t err;
164         struct ldlm_lock *lock1, *lock;
165         int flags;
166         ENTRY;
167
168         ns = ldlm_namespace_new("test_server", LDLM_NAMESPACE_SERVER);
169         if (ns == NULL)
170                 LBUG();
171
172         lock1 = ldlm_lock_create(ns, NULL, res_id, LDLM_PLAIN, LCK_CR, NULL, 0);
173         if (lock1 == NULL)
174                 LBUG();
175         err = ldlm_lock_enqueue(lock1, NULL, 0, &flags,
176                                 ldlm_completion_ast, ldlm_blocking_ast);
177         if (err != ELDLM_OK)
178                 LBUG();
179
180         lock = ldlm_lock_create(ns, NULL, res_id, LDLM_PLAIN, LCK_EX, NULL, 0);
181         if (lock == NULL)
182                 LBUG();
183         err = ldlm_lock_enqueue(lock, NULL, 0, &flags,
184                                 ldlm_completion_ast, ldlm_blocking_ast);
185         if (err != ELDLM_OK)
186                 LBUG();
187         if (!(flags & LDLM_FL_BLOCK_GRANTED))
188                 LBUG();
189
190         res = ldlm_resource_get(ns, NULL, res_id, LDLM_PLAIN, 1);
191         if (res == NULL)
192                 LBUG();
193         ldlm_resource_dump(res);
194
195         res = ldlm_lock_convert(lock1, LCK_NL, &flags);
196         if (res != NULL)
197                 ldlm_reprocess_all(res);
198
199         ldlm_resource_dump(res);
200         ldlm_namespace_free(ns);
201
202         RETURN(0);
203 }
204
205 int ldlm_test_extents(struct obd_device *obddev)
206 {
207         struct ldlm_namespace *ns;
208         struct ldlm_resource *res;
209         struct ldlm_lock *lock, *lock1, *lock2;
210         __u64 res_id[RES_NAME_SIZE] = {0, 0, 0};
211         struct ldlm_extent ext1 = {4, 6}, ext2 = {6, 9}, ext3 = {10, 11};
212         ldlm_error_t err;
213         int flags;
214         ENTRY;
215
216         ns = ldlm_namespace_new("test_server", LDLM_NAMESPACE_SERVER);
217         if (ns == NULL)
218                 LBUG();
219
220         flags = 0;
221         lock1 = ldlm_lock_create(ns, NULL, res_id, LDLM_EXTENT, LCK_PR, NULL,
222                                  0);
223         if (lock1 == NULL)
224                 LBUG();
225         err = ldlm_lock_enqueue(lock1, &ext1, sizeof(ext1), &flags, NULL, NULL);
226         if (err != ELDLM_OK)
227                 LBUG();
228         if (!(flags & LDLM_FL_LOCK_CHANGED))
229                 LBUG();
230
231         flags = 0;
232         lock2 = ldlm_lock_create(ns, NULL, res_id, LDLM_EXTENT, LCK_PR,
233                                 NULL, 0);
234         err = ldlm_lock_enqueue(lock2, &ext2, sizeof(ext2), &flags, NULL, NULL);
235         if (err != ELDLM_OK)
236                 LBUG();
237         if (!(flags & LDLM_FL_LOCK_CHANGED))
238                 LBUG();
239
240         flags = 0;
241         lock = ldlm_lock_create(ns, NULL, res_id, LDLM_EXTENT, LCK_EX, NULL, 0);
242         if (lock == NULL)
243                 LBUG();
244         err = ldlm_lock_enqueue(lock, &ext3, sizeof(ext3), &flags,
245                                 NULL, NULL);
246         if (err != ELDLM_OK)
247                 LBUG();
248         if (!(flags & LDLM_FL_BLOCK_GRANTED))
249                 LBUG();
250         if (flags & LDLM_FL_LOCK_CHANGED)
251                 LBUG();
252
253         /* Convert/cancel blocking locks */
254         flags = 0;
255         res = ldlm_lock_convert(lock1, LCK_NL, &flags);
256         if (res != NULL)
257                 ldlm_reprocess_all(res);
258
259         ldlm_lock_cancel(lock2);
260         if (res != NULL)
261                 ldlm_reprocess_all(res);
262
263         /* Dump the results */
264         res = ldlm_resource_get(ns, NULL, res_id, LDLM_EXTENT, 0);
265         if (res == NULL)
266                 LBUG();
267         ldlm_resource_dump(res);
268         ldlm_namespace_free(ns);
269
270         RETURN(0);
271 }
272
273 static int ldlm_test_network(struct obd_device *obddev,
274                              struct lustre_handle *connh)
275 {
276
277         __u64 res_id[RES_NAME_SIZE] = {1, 2, 3};
278         struct ldlm_extent ext = {4, 6};
279         struct lustre_handle lockh1;
280         struct ldlm_lock *lock;
281         int flags = 0;
282         ldlm_error_t err;
283         ENTRY;
284
285         err = ldlm_cli_enqueue(connh, NULL, obddev->obd_namespace, NULL, res_id,
286                                LDLM_EXTENT, &ext, sizeof(ext), LCK_PR, &flags,
287                                ldlm_completion_ast, NULL, NULL, 0, &lockh1);
288
289         CERROR("ldlm_cli_enqueue: %d\n", err);
290
291         flags = 0;
292         err = ldlm_cli_convert(&lockh1, LCK_EX, &flags);
293         CERROR("ldlm_cli_convert: %d\n", err);
294
295         lock = ldlm_handle2lock(&lockh1);
296         ldlm_lock_dump(lock);
297         ldlm_lock_put(lock);
298
299         /* Need to decrement old mode. Don't bother incrementing new
300          * mode since the test is done.
301          */
302         if (err == ELDLM_OK)
303                 ldlm_lock_decref(&lockh1, LCK_PR);
304
305         RETURN(err);
306 }
307
308 static int ldlm_do_decrement(void)
309 {
310         struct ldlm_test_lock *lock_info;
311         struct ldlm_lock *lock;
312         int rc = 0;
313         ENTRY;
314
315         spin_lock(&ctl_lock);
316         if(list_empty(&lock_list)) {
317                 CERROR("lock_list is empty\n");
318                 spin_unlock(&ctl_lock);
319                 RETURN(0);
320         }
321
322         /* delete from list */
323         lock_info = list_entry(lock_list.next,
324                         struct ldlm_test_lock, l_link);
325         list_del(lock_list.next);
326         num_locks--;
327         spin_unlock(&ctl_lock);
328
329         /* decrement and free the info */
330         lock = ldlm_handle2lock(&lock_info->l_lockh);
331         ldlm_lock_decref(&lock_info->l_lockh, lock->l_granted_mode);
332         ldlm_lock_put(lock);
333
334         OBD_FREE(lock_info, sizeof(*lock_info));
335
336         RETURN(rc);
337 }
338
339 static int ldlm_do_enqueue(struct ldlm_test_thread *thread)
340 {
341         struct lustre_handle lockh;
342         __u64 res_id[3] = {0};
343         __u32 lock_mode;
344         struct ldlm_extent ext;
345         unsigned char random;
346         int flags = 0, rc = 0;
347         ENTRY;
348
349         /* Pick a random resource from 1 to num_resources */
350         get_random_bytes(&random, sizeof(random));
351         res_id[0] = random % num_resources;
352
353         /* Pick a random lock mode */
354         get_random_bytes(&random, sizeof(random));
355         lock_mode = random % LCK_NL + 1;
356
357         /* Pick a random extent */
358         get_random_bytes(&random, sizeof(random));
359         ext.start = random % num_extents;
360         get_random_bytes(&random, sizeof(random));
361         ext.end = random %
362                 (num_extents - (int)ext.start) + ext.start;
363
364         LDLM_DEBUG_NOLOCK("about to enqueue with resource "LPX64", mode %d,"
365                           " extent "LPX64" -> "LPX64, res_id[0], lock_mode,
366                           ext.start, ext.end);
367
368         rc = ldlm_match_or_enqueue(&regress_connh, NULL,
369                                    thread->obddev->obd_namespace,
370                                    NULL, res_id, LDLM_EXTENT, &ext,
371                                    sizeof(ext), lock_mode, &flags,
372                                    ldlm_test_completion_ast,
373                                    ldlm_test_blocking_ast,
374                                    NULL, 0, &lockh);
375
376         atomic_inc(&locks_requested);
377
378         if (rc < 0) {
379                 CERROR("ldlm_cli_enqueue: %d\n", rc);
380                 LBUG();
381         }
382
383         RETURN(rc);
384 }
385
386 static int ldlm_do_convert(void)
387 {
388         __u32 lock_mode;
389         unsigned char random;
390         int flags = 0, rc = 0;
391         struct ldlm_test_lock *lock_info;
392         struct ldlm_lock *lock;
393         ENTRY;
394
395         /* delete from list */
396         spin_lock(&ctl_lock);
397         lock_info = list_entry(lock_list.next, struct ldlm_test_lock, l_link);
398         list_del(lock_list.next);
399         num_locks--;
400         spin_unlock(&ctl_lock);
401
402         /* Pick a random lock mode */
403         get_random_bytes(&random, sizeof(random));
404         lock_mode = random % LCK_NL + 1;
405
406         /* do the conversion */
407         rc = ldlm_cli_convert(&lock_info->l_lockh , lock_mode, &flags);
408         atomic_inc(&converts_requested);
409
410         if (rc < 0) {
411                 CERROR("ldlm_cli_convert: %d\n", rc);
412                 LBUG();
413         }
414
415         /*
416          *  Adjust reference counts.
417          *  FIXME: This is technically a bit... wrong,
418          *  since we don't know when/if the convert succeeded
419          */
420         ldlm_lock_addref(&lock_info->l_lockh, lock_mode);
421         lock = ldlm_handle2lock(&lock_info->l_lockh);
422         ldlm_lock_decref(&lock_info->l_lockh, lock->l_granted_mode);
423         ldlm_lock_put(lock);
424
425         OBD_FREE(lock_info, sizeof(*lock_info));
426
427         RETURN(rc);
428 }
429
430
431
432 static int ldlm_test_main(void *data)
433 {
434         struct ldlm_test_thread *thread = data;
435         ENTRY;
436
437         lock_kernel();
438         daemonize();
439         spin_lock_irq(&current->sigmask_lock);
440         sigfillset(&current->blocked);
441 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
442         recalc_sigpending();
443 #else
444         recalc_sigpending(current);
445 #endif
446         spin_unlock_irq(&current->sigmask_lock);
447
448         sprintf(current->comm, "ldlm_test");
449         unlock_kernel();
450
451         /* Record that the thread is running */
452         thread->t_flags |= SVC_RUNNING;
453         wake_up(&thread->t_ctl_waitq);
454
455         while (!(thread->t_flags & SVC_STOPPING)) {
456                 unsigned char random;
457                 unsigned char dec_chance, con_chance;
458                 unsigned char chance_left = 100;
459
460                 spin_lock(&ctl_lock);
461                 /* probability of decrementing increases linearly
462                  * as more locks are held.
463                  */
464                 dec_chance = chance_left * num_locks / max_locks;
465                 chance_left -= dec_chance;
466
467                 /* FIXME: conversions temporarily disabled
468                  * until they are working correctly.
469                  */
470                 /* con_chance = chance_left * num_locks / max_locks; */
471                 con_chance = 0;
472                 chance_left -= con_chance;
473                 spin_unlock(&ctl_lock);
474
475                 get_random_bytes(&random, sizeof(random));
476
477                 random = random % 100;
478                 if (random < dec_chance)
479                         ldlm_do_decrement();
480                 else if (random < (dec_chance + con_chance))
481                         ldlm_do_convert();
482                 else
483                         ldlm_do_enqueue(thread);
484
485                 LDLM_DEBUG_NOLOCK("locks requested: %d, "
486                                   "conversions requested %d",
487                                   atomic_read(&locks_requested),
488                                   atomic_read(&converts_requested));
489                 LDLM_DEBUG_NOLOCK("locks granted: %d, "
490                                   "locks matched: %d",
491                                   atomic_read(&locks_granted),
492                                   atomic_read(&locks_matched));
493
494                 spin_lock(&ctl_lock);
495                 LDLM_DEBUG_NOLOCK("lock references currently held: %d, ",
496                                   num_locks);
497                 spin_unlock(&ctl_lock);
498
499                 /*
500                  * We don't sleep after a lock being blocked, so let's
501                  * make sure other things can run.
502                  */
503                 schedule();
504         }
505
506         thread->t_flags |= SVC_STOPPED;
507         wake_up(&thread->t_ctl_waitq);
508
509         RETURN(0);
510 }
511
512 static int ldlm_start_thread(struct obd_device *obddev,
513                              struct lustre_handle *connh)
514 {
515         struct ldlm_test_thread *test;
516         int rc;
517         ENTRY;
518
519         OBD_ALLOC(test, sizeof(*test));
520         if (test == NULL) {
521                 LBUG();
522                 RETURN(-ENOMEM);
523         }
524         init_waitqueue_head(&test->t_ctl_waitq);
525
526         test->obddev = obddev;
527
528         spin_lock(&ctl_lock);
529         list_add(&test->t_link, &ctl_threads);
530         spin_unlock(&ctl_lock);
531
532         rc = kernel_thread(ldlm_test_main, (void *)test,
533                            CLONE_VM | CLONE_FS | CLONE_FILES);
534         if (rc < 0) {
535                 CERROR("cannot start thread\n");
536                 RETURN(-EINVAL);
537         }
538         wait_event(test->t_ctl_waitq, test->t_flags & SVC_RUNNING);
539
540         RETURN(0);
541 }
542
543 int ldlm_regression_start(struct obd_device *obddev,
544                           struct lustre_handle *connh,
545                           unsigned int threads, unsigned int max_locks_in,
546                           unsigned int num_resources_in,
547                           unsigned int num_extents_in)
548 {
549         int i, rc = 0;
550         ENTRY;
551
552         spin_lock(&ctl_lock);
553         if (regression_running) {
554                 CERROR("You can't start the ldlm regression twice.\n");
555                 spin_unlock(&ctl_lock);
556                 RETURN(-EINVAL);
557         }
558         regression_running = 1;
559         spin_unlock(&ctl_lock);
560
561         regress_connh = *connh;
562         max_locks = max_locks_in;
563         num_resources = num_resources_in;
564         num_extents = num_extents_in;
565
566         LDLM_DEBUG_NOLOCK("regression test started: threads: %d, max_locks: "
567                           "%d, num_res: %d, num_ext: %d\n",
568                           threads, max_locks_in, num_resources_in,
569                           num_extents_in);
570
571         for (i = 0; i < threads; i++) {
572                 rc = ldlm_start_thread(obddev, connh);
573                 if (rc < 0)
574                         GOTO(cleanup, rc);
575         }
576
577  cleanup:
578         if (rc < 0)
579                 ldlm_regression_stop();
580         RETURN(rc);
581 }
582
583 int ldlm_regression_stop(void)
584 {
585         ENTRY;
586
587         spin_lock(&ctl_lock);
588         if (!regression_running) {
589                 CERROR("The ldlm regression isn't started.\n");
590                 spin_unlock(&ctl_lock);
591                 RETURN(-EINVAL);
592         }
593
594         while (!list_empty(&ctl_threads)) {
595                 struct ldlm_test_thread *thread;
596                 thread = list_entry(ctl_threads.next, struct ldlm_test_thread,
597                                     t_link);
598
599                 thread->t_flags |= SVC_STOPPING;
600
601                 spin_unlock(&ctl_lock);
602                 wake_up(&thread->t_ctl_waitq);
603                 wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
604                 spin_lock(&ctl_lock);
605
606                 list_del(&thread->t_link);
607                 OBD_FREE(thread, sizeof(*thread));
608         }
609
610         /* decrement all held locks */
611         while (!list_empty(&lock_list)) {
612                 struct ldlm_lock *lock;
613                 struct ldlm_test_lock *lock_info =
614                        list_entry(lock_list.next, struct ldlm_test_lock,
615                                    l_link);
616                 list_del(lock_list.next);
617                 num_locks--;
618
619                 lock = ldlm_handle2lock(&lock_info->l_lockh);
620                 ldlm_lock_decref(&lock_info->l_lockh, lock->l_granted_mode);
621                 ldlm_lock_put(lock);
622
623                 OBD_FREE(lock_info, sizeof(*lock_info));
624         }
625
626         regression_running = 0;
627         spin_unlock(&ctl_lock);
628
629         RETURN(0);
630 }
631
632 int ldlm_test(struct obd_device *obddev, struct lustre_handle *connh)
633 {
634         int rc;
635         rc = ldlm_test_basics(obddev);
636         if (rc)
637                 RETURN(rc);
638
639         rc = ldlm_test_extents(obddev);
640         if (rc)
641                 RETURN(rc);
642
643         rc = ldlm_test_network(obddev, connh);
644         RETURN(rc);
645 }