Whamcloud - gitweb
ce7a73d40551b996e2d958eef531b3a1673c6b58
[fs/lustre-release.git] / lustre / ldlm / ldlm_test.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (c) 2002 Cluster File Systems, Inc. <info@clusterfs.com>
5  * Copyright (c) 2002 Lawrence Livermore National Laboratory
6  *  Author: James Newsome <newsome2@llnl.gov>
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #define DEBUG_SUBSYSTEM S_LDLM
25
26 #include <asm/atomic.h>
27 #include <linux/types.h>
28 #include <linux/random.h>
29
30 #include <linux/lustre_dlm.h>
31 #include <linux/obd.h>
32
33 struct ldlm_test_thread {
34         struct obd_device *obddev;
35         struct ldlm_namespace *t_ns;
36         struct list_head t_link;
37         __u32 t_flags;
38         wait_queue_head_t t_ctl_waitq;
39 };
40
41 struct ldlm_test_lock {
42         struct list_head l_link;
43         struct lustre_handle l_lockh;
44 };
45
46 static unsigned int max_locks;
47 static unsigned int num_resources;
48 static unsigned int num_extents;
49
50 static spinlock_t ctl_lock = SPIN_LOCK_UNLOCKED;
51 /* protect these with the ctl_lock */
52 static LIST_HEAD(ctl_threads);
53 static int regression_running = 0;
54 static LIST_HEAD(lock_list);
55 static int num_locks = 0;
56
57 /* cumulative stats for regression test */
58 static atomic_t locks_requested = ATOMIC_INIT(0);
59 static atomic_t converts_requested = ATOMIC_INIT(0);
60 static atomic_t locks_granted = ATOMIC_INIT(0);
61 static atomic_t locks_matched = ATOMIC_INIT(0);
62
63 /* making this a global avoids the problem of having pointers
64  * to garbage after the test exits.
65  */
66 static struct lustre_handle regress_connh;
67
68 static int ldlm_do_decrement(void);
69 static int ldlm_do_enqueue(struct ldlm_test_thread *thread);
70 static int ldlm_do_convert(void);
71
72 /*
73  * blocking ast for regression test.
74  * Just cancels lock
75  */
76 static int ldlm_test_blocking_ast(struct ldlm_lock *lock,
77                                   struct ldlm_lock_desc *new,
78                                   void *data, __u32 data_len, int flag)
79 {
80         int rc;
81         struct lustre_handle lockh;
82         ENTRY;
83
84         switch (flag) {
85         case LDLM_CB_BLOCKING:
86                 LDLM_DEBUG(lock, "We're blocking. Cancelling lock");
87                 ldlm_lock2handle(lock, &lockh);
88                 rc = ldlm_cli_cancel(&lockh);
89                 if (rc < 0) {
90                         CERROR("ldlm_cli_cancel: %d\n", rc);
91                         LBUG();
92                 }
93                 break;
94         case LDLM_CB_CANCELING:
95                 LDLM_DEBUG(lock, "this lock is being cancelled");
96                 break;
97         default:
98                 LBUG();
99         }
100
101         RETURN(0);
102 }
103
104 /* blocking ast for basic tests. noop */
105 static int ldlm_blocking_ast(struct ldlm_lock *lock,
106                              struct ldlm_lock_desc *new,
107                              void *data, __u32 data_len, int flag)
108 {
109         ENTRY;
110         CERROR("ldlm_blocking_ast: lock=%p, new=%p, flag=%d\n", lock, new,
111                flag);
112         RETURN(0);
113 }
114
115 /* Completion ast for regression test.
116  * Does not sleep when blocked.
117  */
118 static int ldlm_test_completion_ast(struct ldlm_lock *lock, int flags)
119 {
120         struct ldlm_test_lock *lock_info;
121         ENTRY;
122
123         if (flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
124                      LDLM_FL_BLOCK_CONV)) {
125                 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock");
126                 RETURN(0);
127         }
128
129         if (lock->l_granted_mode != lock->l_req_mode)
130                 CERROR("completion ast called with non-granted lock\n");
131
132         /* add to list of granted locks */
133
134         if (flags & LDLM_FL_WAIT_NOREPROC) {
135                 atomic_inc(&locks_matched);
136                 LDLM_DEBUG(lock, "lock matched");
137         } else {
138                 atomic_inc(&locks_granted);
139                 LDLM_DEBUG(lock, "lock granted");
140         }
141
142         OBD_ALLOC(lock_info, sizeof(*lock_info));
143         if (lock_info == NULL) {
144                 LBUG();
145                 RETURN(-ENOMEM);
146         }
147
148         ldlm_lock2handle(lock, &lock_info->l_lockh);
149
150         spin_lock(&ctl_lock);
151         list_add_tail(&lock_info->l_link, &lock_list);
152         num_locks++;
153         spin_unlock(&ctl_lock);
154
155         RETURN(0);
156 }
157
158 int ldlm_test_basics(struct obd_device *obddev)
159 {
160         struct ldlm_namespace *ns;
161         struct ldlm_resource *res;
162         __u64 res_id[RES_NAME_SIZE] = {1, 2, 3};
163         ldlm_error_t err;
164         struct ldlm_lock *lock1, *lock;
165         int flags;
166         ENTRY;
167
168         ns = ldlm_namespace_new("test_server", LDLM_NAMESPACE_SERVER);
169         if (ns == NULL)
170                 LBUG();
171
172         lock1 = ldlm_lock_create(ns, NULL, res_id, LDLM_PLAIN, LCK_CR, NULL, 0);
173         if (lock1 == NULL)
174                 LBUG();
175         err = ldlm_lock_enqueue(lock1, NULL, 0, &flags,
176                                 ldlm_completion_ast, ldlm_blocking_ast);
177         if (err != ELDLM_OK)
178                 LBUG();
179
180         lock = ldlm_lock_create(ns, NULL, res_id, LDLM_PLAIN, LCK_EX, NULL, 0);
181         if (lock == NULL)
182                 LBUG();
183         err = ldlm_lock_enqueue(lock, NULL, 0, &flags,
184                                 ldlm_completion_ast, ldlm_blocking_ast);
185         if (err != ELDLM_OK)
186                 LBUG();
187         if (!(flags & LDLM_FL_BLOCK_GRANTED))
188                 LBUG();
189
190         res = ldlm_resource_get(ns, NULL, res_id, LDLM_PLAIN, 1);
191         if (res == NULL)
192                 LBUG();
193         ldlm_resource_dump(res);
194
195         res = ldlm_lock_convert(lock1, LCK_NL, &flags);
196         if (res != NULL)
197                 ldlm_reprocess_all(res);
198
199         ldlm_resource_dump(res);
200         ldlm_namespace_free(ns);
201
202         RETURN(0);
203 }
204
205 int ldlm_test_extents(struct obd_device *obddev)
206 {
207         struct ldlm_namespace *ns;
208         struct ldlm_resource *res;
209         struct ldlm_lock *lock, *lock1, *lock2;
210         __u64 res_id[RES_NAME_SIZE] = {0, 0, 0};
211         struct ldlm_extent ext1 = {4, 6}, ext2 = {6, 9}, ext3 = {10, 11};
212         ldlm_error_t err;
213         int flags;
214         ENTRY;
215
216         ns = ldlm_namespace_new("test_server", LDLM_NAMESPACE_SERVER);
217         if (ns == NULL)
218                 LBUG();
219
220         flags = 0;
221         lock1 = ldlm_lock_create(ns, NULL, res_id, LDLM_EXTENT, LCK_PR, NULL,
222                                  0);
223         if (lock1 == NULL)
224                 LBUG();
225         err = ldlm_lock_enqueue(lock1, &ext1, sizeof(ext1), &flags, NULL, NULL);
226         if (err != ELDLM_OK)
227                 LBUG();
228         if (!(flags & LDLM_FL_LOCK_CHANGED))
229                 LBUG();
230
231         flags = 0;
232         lock2 = ldlm_lock_create(ns, NULL, res_id, LDLM_EXTENT, LCK_PR,
233                                 NULL, 0);
234         err = ldlm_lock_enqueue(lock2, &ext2, sizeof(ext2), &flags, NULL, NULL);
235         if (err != ELDLM_OK)
236                 LBUG();
237         if (!(flags & LDLM_FL_LOCK_CHANGED))
238                 LBUG();
239
240         flags = 0;
241         lock = ldlm_lock_create(ns, NULL, res_id, LDLM_EXTENT, LCK_EX, NULL, 0);
242         if (lock == NULL)
243                 LBUG();
244         err = ldlm_lock_enqueue(lock, &ext3, sizeof(ext3), &flags,
245                                 NULL, NULL);
246         if (err != ELDLM_OK)
247                 LBUG();
248         if (!(flags & LDLM_FL_BLOCK_GRANTED))
249                 LBUG();
250         if (flags & LDLM_FL_LOCK_CHANGED)
251                 LBUG();
252
253         /* Convert/cancel blocking locks */
254         flags = 0;
255         res = ldlm_lock_convert(lock1, LCK_NL, &flags);
256         if (res != NULL)
257                 ldlm_reprocess_all(res);
258
259         ldlm_lock_cancel(lock2);
260         if (res != NULL)
261                 ldlm_reprocess_all(res);
262
263         /* Dump the results */
264         res = ldlm_resource_get(ns, NULL, res_id, LDLM_EXTENT, 0);
265         if (res == NULL)
266                 LBUG();
267         ldlm_resource_dump(res);
268         ldlm_namespace_free(ns);
269
270         RETURN(0);
271 }
272
273 static int ldlm_test_network(struct obd_device *obddev,
274                              struct lustre_handle *connh)
275 {
276
277         __u64 res_id[RES_NAME_SIZE] = {1, 2, 3};
278         struct ldlm_extent ext = {4, 6};
279         struct lustre_handle lockh1;
280         struct ldlm_lock *lock;
281         int flags = 0;
282         ldlm_error_t err;
283         ENTRY;
284
285         err = ldlm_cli_enqueue(connh, NULL, obddev->obd_namespace, NULL, res_id,
286                                LDLM_EXTENT, &ext, sizeof(ext), LCK_PR, &flags,
287                                ldlm_completion_ast, NULL, NULL, 0, &lockh1);
288
289         CERROR("ldlm_cli_enqueue: %d\n", err);
290
291         flags = 0;
292         err = ldlm_cli_convert(&lockh1, LCK_EX, &flags);
293         CERROR("ldlm_cli_convert: %d\n", err);
294
295         lock = ldlm_handle2lock(&lockh1);
296         ldlm_lock_dump(lock);
297         ldlm_lock_put(lock);
298
299         /* Need to decrement old mode. Don't bother incrementing new
300          * mode since the test is done.
301          */
302         if (err == ELDLM_OK)
303                 ldlm_lock_decref(&lockh1, LCK_PR);
304
305         RETURN(err);
306 }
307
308 static int ldlm_do_decrement(void)
309 {
310         struct ldlm_test_lock *lock_info;
311         struct ldlm_lock *lock;
312         int rc = 0;
313         ENTRY;
314
315         spin_lock(&ctl_lock);
316         if(list_empty(&lock_list)) {
317                 CERROR("lock_list is empty\n");
318                 spin_unlock(&ctl_lock);
319                 RETURN(0);
320         }
321
322         /* delete from list */
323         lock_info = list_entry(lock_list.next,
324                         struct ldlm_test_lock, l_link);
325         list_del(lock_list.next);
326         num_locks--;
327         spin_unlock(&ctl_lock);
328
329         /* decrement and free the info */
330         lock = ldlm_handle2lock(&lock_info->l_lockh);
331         ldlm_lock_decref(&lock_info->l_lockh, lock->l_granted_mode);
332         ldlm_lock_put(lock);
333
334         OBD_FREE(lock_info, sizeof(*lock_info));
335
336         RETURN(rc);
337 }
338
339 static int ldlm_do_enqueue(struct ldlm_test_thread *thread)
340 {
341         struct lustre_handle lockh;
342         __u64 res_id[3] = {0};
343         __u32 lock_mode;
344         struct ldlm_extent ext;
345         unsigned char random;
346         int flags = 0, rc = 0;
347         ENTRY;
348
349         /* Pick a random resource from 1 to num_resources */
350         get_random_bytes(&random, sizeof(random));
351         res_id[0] = random % num_resources;
352
353         /* Pick a random lock mode */
354         get_random_bytes(&random, sizeof(random));
355         lock_mode = random % LCK_NL + 1;
356
357         /* Pick a random extent */
358         get_random_bytes(&random, sizeof(random));
359         ext.start = random % num_extents;
360         get_random_bytes(&random, sizeof(random));
361         ext.end = random %
362                 (num_extents - (int)ext.start) + ext.start;
363
364         LDLM_DEBUG_NOLOCK("about to enqueue with resource "LPX64", mode %d,"
365                           " extent "LPX64" -> "LPX64, res_id[0], lock_mode,
366                           ext.start, ext.end);
367
368         rc = ldlm_match_or_enqueue(&regress_connh, NULL,
369                                    thread->obddev->obd_namespace,
370                                    NULL, res_id, LDLM_EXTENT, &ext,
371                                    sizeof(ext), lock_mode, &flags,
372                                    ldlm_test_completion_ast,
373                                    ldlm_test_blocking_ast,
374                                    NULL, 0, &lockh);
375
376         atomic_inc(&locks_requested);
377
378         if (rc < 0) {
379                 CERROR("ldlm_cli_enqueue: %d\n", rc);
380                 LBUG();
381         }
382
383         RETURN(rc);
384 }
385
386 static int ldlm_do_convert(void)
387 {
388         __u32 lock_mode;
389         unsigned char random;
390         int flags = 0, rc = 0;
391         struct ldlm_test_lock *lock_info;
392         struct ldlm_lock *lock;
393         ENTRY;
394
395         /* delete from list */
396         spin_lock(&ctl_lock);
397         lock_info = list_entry(lock_list.next, struct ldlm_test_lock, l_link);
398         list_del(lock_list.next);
399         num_locks--;
400         spin_unlock(&ctl_lock);
401
402         /* Pick a random lock mode */
403         get_random_bytes(&random, sizeof(random));
404         lock_mode = random % LCK_NL + 1;
405
406         /* do the conversion */
407         rc = ldlm_cli_convert(&lock_info->l_lockh , lock_mode, &flags);
408         atomic_inc(&converts_requested);
409
410         if (rc < 0) {
411                 CERROR("ldlm_cli_convert: %d\n", rc);
412                 LBUG();
413         }
414
415         /*
416          *  Adjust reference counts.
417          *  FIXME: This is technically a bit... wrong,
418          *  since we don't know when/if the convert succeeded
419          */
420         ldlm_lock_addref(&lock_info->l_lockh, lock_mode);
421         lock = ldlm_handle2lock(&lock_info->l_lockh);
422         ldlm_lock_decref(&lock_info->l_lockh, lock->l_granted_mode);
423         ldlm_lock_put(lock);
424
425         OBD_FREE(lock_info, sizeof(*lock_info));
426
427         RETURN(rc);
428 }
429
430
431
432 static int ldlm_test_main(void *data)
433 {
434         struct ldlm_test_thread *thread = data;
435         ENTRY;
436
437         lock_kernel();
438         daemonize();
439 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
440         sigfillset(&current->blocked);
441         recalc_sigpending();
442 #else
443         spin_lock_irq(&current->sigmask_lock);
444         sigfillset(&current->blocked);
445         recalc_sigpending(current);
446         spin_unlock_irq(&current->sigmask_lock);
447 #endif
448
449         sprintf(current->comm, "ldlm_test");
450         unlock_kernel();
451
452         /* Record that the thread is running */
453         thread->t_flags |= SVC_RUNNING;
454         wake_up(&thread->t_ctl_waitq);
455
456         while (!(thread->t_flags & SVC_STOPPING)) {
457                 unsigned char random;
458                 unsigned char dec_chance, con_chance;
459                 unsigned char chance_left = 100;
460
461                 spin_lock(&ctl_lock);
462                 /* probability of decrementing increases linearly
463                  * as more locks are held.
464                  */
465                 dec_chance = chance_left * num_locks / max_locks;
466                 chance_left -= dec_chance;
467
468                 /* FIXME: conversions temporarily disabled
469                  * until they are working correctly.
470                  */
471                 /* con_chance = chance_left * num_locks / max_locks; */
472                 con_chance = 0;
473                 chance_left -= con_chance;
474                 spin_unlock(&ctl_lock);
475
476                 get_random_bytes(&random, sizeof(random));
477
478                 random = random % 100;
479                 if (random < dec_chance)
480                         ldlm_do_decrement();
481                 else if (random < (dec_chance + con_chance))
482                         ldlm_do_convert();
483                 else
484                         ldlm_do_enqueue(thread);
485
486                 LDLM_DEBUG_NOLOCK("locks requested: %d, "
487                                   "conversions requested %d",
488                                   atomic_read(&locks_requested),
489                                   atomic_read(&converts_requested));
490                 LDLM_DEBUG_NOLOCK("locks granted: %d, "
491                                   "locks matched: %d",
492                                   atomic_read(&locks_granted),
493                                   atomic_read(&locks_matched));
494
495                 spin_lock(&ctl_lock);
496                 LDLM_DEBUG_NOLOCK("lock references currently held: %d, ",
497                                   num_locks);
498                 spin_unlock(&ctl_lock);
499
500                 /*
501                  * We don't sleep after a lock being blocked, so let's
502                  * make sure other things can run.
503                  */
504                 schedule();
505         }
506
507         thread->t_flags |= SVC_STOPPED;
508         wake_up(&thread->t_ctl_waitq);
509
510         RETURN(0);
511 }
512
513 static int ldlm_start_thread(struct obd_device *obddev,
514                              struct lustre_handle *connh)
515 {
516         struct ldlm_test_thread *test;
517         int rc;
518         ENTRY;
519
520         OBD_ALLOC(test, sizeof(*test));
521         if (test == NULL) {
522                 LBUG();
523                 RETURN(-ENOMEM);
524         }
525         init_waitqueue_head(&test->t_ctl_waitq);
526
527         test->obddev = obddev;
528
529         spin_lock(&ctl_lock);
530         list_add(&test->t_link, &ctl_threads);
531         spin_unlock(&ctl_lock);
532
533         rc = kernel_thread(ldlm_test_main, (void *)test,
534                            CLONE_VM | CLONE_FS | CLONE_FILES);
535         if (rc < 0) {
536                 CERROR("cannot start thread\n");
537                 RETURN(-EINVAL);
538         }
539         wait_event(test->t_ctl_waitq, test->t_flags & SVC_RUNNING);
540
541         RETURN(0);
542 }
543
544 int ldlm_regression_start(struct obd_device *obddev,
545                           struct lustre_handle *connh,
546                           unsigned int threads, unsigned int max_locks_in,
547                           unsigned int num_resources_in,
548                           unsigned int num_extents_in)
549 {
550         int i, rc = 0;
551         ENTRY;
552
553         spin_lock(&ctl_lock);
554         if (regression_running) {
555                 CERROR("You can't start the ldlm regression twice.\n");
556                 spin_unlock(&ctl_lock);
557                 RETURN(-EINVAL);
558         }
559         regression_running = 1;
560         spin_unlock(&ctl_lock);
561
562         regress_connh = *connh;
563         max_locks = max_locks_in;
564         num_resources = num_resources_in;
565         num_extents = num_extents_in;
566
567         LDLM_DEBUG_NOLOCK("regression test started: threads: %d, max_locks: "
568                           "%d, num_res: %d, num_ext: %d\n",
569                           threads, max_locks_in, num_resources_in,
570                           num_extents_in);
571
572         for (i = 0; i < threads; i++) {
573                 rc = ldlm_start_thread(obddev, connh);
574                 if (rc < 0)
575                         GOTO(cleanup, rc);
576         }
577
578  cleanup:
579         if (rc < 0)
580                 ldlm_regression_stop();
581         RETURN(rc);
582 }
583
584 int ldlm_regression_stop(void)
585 {
586         ENTRY;
587
588         spin_lock(&ctl_lock);
589         if (!regression_running) {
590                 CERROR("The ldlm regression isn't started.\n");
591                 spin_unlock(&ctl_lock);
592                 RETURN(-EINVAL);
593         }
594
595         while (!list_empty(&ctl_threads)) {
596                 struct ldlm_test_thread *thread;
597                 thread = list_entry(ctl_threads.next, struct ldlm_test_thread,
598                                     t_link);
599
600                 thread->t_flags |= SVC_STOPPING;
601
602                 spin_unlock(&ctl_lock);
603                 wake_up(&thread->t_ctl_waitq);
604                 wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
605                 spin_lock(&ctl_lock);
606
607                 list_del(&thread->t_link);
608                 OBD_FREE(thread, sizeof(*thread));
609         }
610
611         /* decrement all held locks */
612         while (!list_empty(&lock_list)) {
613                 struct ldlm_lock *lock;
614                 struct ldlm_test_lock *lock_info =
615                        list_entry(lock_list.next, struct ldlm_test_lock,
616                                    l_link);
617                 list_del(lock_list.next);
618                 num_locks--;
619
620                 lock = ldlm_handle2lock(&lock_info->l_lockh);
621                 ldlm_lock_decref(&lock_info->l_lockh, lock->l_granted_mode);
622                 ldlm_lock_put(lock);
623
624                 OBD_FREE(lock_info, sizeof(*lock_info));
625         }
626
627         regression_running = 0;
628         spin_unlock(&ctl_lock);
629
630         RETURN(0);
631 }
632
633 int ldlm_test(struct obd_device *obddev, struct lustre_handle *connh)
634 {
635         int rc;
636         rc = ldlm_test_basics(obddev);
637         if (rc)
638                 RETURN(rc);
639
640         rc = ldlm_test_extents(obddev);
641         if (rc)
642                 RETURN(rc);
643
644         rc = ldlm_test_network(obddev, connh);
645         RETURN(rc);
646 }