Whamcloud - gitweb
- James Newsome's dlm stress test
[fs/lustre-release.git] / lustre / ldlm / ldlm_test.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (c) 2002 Cluster File Systems, Inc. <info@clusterfs.com>
5  * Copyright (c) 2002 Lawrence Livermore National Laboratory
6  *  Author: James Newsome <newsome2@llnl.gov>
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #define DEBUG_SUBSYSTEM S_LDLM
25
26 #include <asm/atomic.h>
27 #include <linux/types.h>
28 #include <linux/random.h>
29
30 #include <linux/lustre_dlm.h>
31
32 struct ldlm_test_thread {
33         struct lustre_handle *connh;
34         struct obd_device *obddev;
35         struct ldlm_namespace *t_ns;
36         struct list_head t_link;
37         __u32 t_flags;
38         wait_queue_head_t t_ctl_waitq;
39 };
40
41 struct ldlm_test_lock {
42         struct list_head l_link;
43         struct lustre_handle l_lockh;
44         ldlm_mode_t l_mode;
45 };
46
47 static const int max_locks = 10;
48
49 static spinlock_t ctl_lock = SPIN_LOCK_UNLOCKED;
50 /* protect these with the ctl_lock */
51 static LIST_HEAD(ctl_threads);
52 static int regression_running = 0;
53 static LIST_HEAD(lock_list);
54 static int num_locks = 0;
55
56 /* cumulative stats for regression test */
57 static atomic_t locks_requested = ATOMIC_INIT(0);
58 static atomic_t locks_granted = ATOMIC_INIT(0);
59 static atomic_t locks_matched = ATOMIC_INIT(0);
60
61 /*
62  * blocking ast for regression test.
63  * Just cancels lock
64  */
65 static int ldlm_test_blocking_ast(struct ldlm_lock *lock,
66                                   struct ldlm_lock_desc *new,
67                                   void *data, __u32 data_len)
68 {
69         int rc;
70         struct lustre_handle lockh;
71         ENTRY;
72
73         LDLM_DEBUG_NOLOCK("We're blocking. Cancelling lock");
74         ldlm_lock2handle(lock, &lockh);
75         rc = ldlm_cli_cancel(&lockh);
76         if (rc < 0) {
77                 CERROR("ldlm_cli_cancel: %d\n", rc);
78                 LBUG();
79         }
80
81         RETURN(0);
82 }
83
84 /* blocking ast for basic tests. noop */
85 static int ldlm_blocking_ast(struct ldlm_lock *lock,
86                              struct ldlm_lock_desc *new,
87                              void *data, __u32 data_len)
88 {
89         ENTRY;
90         CERROR("ldlm_blocking_ast: lock=%p, new=%p\n", lock, new);
91         RETURN(0);
92 }
93
94 /* Completion ast for regression test.
95  * Does not sleep when blocked.
96  */
97 static int ldlm_test_completion_ast(struct ldlm_lock *lock, int flags)
98 {
99
100         ENTRY;
101
102         if (flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
103                       LDLM_FL_BLOCK_CONV)) {
104                 /* Do nothing */
105                 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock");
106                 ldlm_lock_dump(lock);
107         } else {
108                 /* add to list of granted locks */
109                 struct ldlm_test_lock *lock_info;
110
111                 if (flags == LDLM_FL_WAIT_NOREPROC) {
112                         atomic_inc(&locks_matched);
113                         LDLM_DEBUG(lock, "lock matched");
114                 } else if (flags == LDLM_FL_LOCK_CHANGED) {
115                         atomic_inc(&locks_granted);
116                         LDLM_DEBUG(lock, "lock changed and granted");
117                 } else {
118                         atomic_inc(&locks_granted);
119                         LDLM_DEBUG(lock, "lock granted");
120                 }
121
122                 OBD_ALLOC(lock_info, sizeof(*lock_info));
123                 if (lock_info == NULL) {
124                         LBUG();
125                         RETURN(-ENOMEM);
126                 }
127
128                 ldlm_lock2handle(lock, &lock_info->l_lockh);
129                 lock_info->l_mode = lock->l_granted_mode;
130
131                 spin_lock(&ctl_lock);
132                 list_add_tail(&lock_info->l_link, &lock_list);
133                 num_locks++;
134
135                 /* if we're over the max of granted locks, decref some */
136                 if (num_locks > max_locks) {
137                         /* delete from list */
138                         lock_info = list_entry(lock_list.next,
139                                                struct ldlm_test_lock, l_link);
140                         list_del(lock_list.next);
141                         num_locks--;
142                         spin_unlock(&ctl_lock);
143
144                         /* decrement and free the info
145                          * Don't hold ctl_lock here. The decref
146                          * may result in another lock being granted
147                          * and hence this function being called again.
148                          */
149                         LDLM_DEBUG_NOLOCK("Decrementing lock");
150                         ldlm_lock_decref(&lock_info->l_lockh,
151                                          lock_info->l_mode);
152                         OBD_FREE(lock_info, sizeof(*lock_info));
153
154                         spin_lock(&ctl_lock);
155                 }
156                 spin_unlock(&ctl_lock);
157         }
158
159         RETURN(0);
160 }
161
162 int ldlm_test_basics(struct obd_device *obddev)
163 {
164         struct ldlm_namespace *ns;
165         struct ldlm_resource *res;
166         __u64 res_id[RES_NAME_SIZE] = {1, 2, 3};
167         ldlm_error_t err;
168         struct ldlm_lock *lock1, *lock;
169         int flags;
170
171         ns = ldlm_namespace_new("test_server", LDLM_NAMESPACE_SERVER);
172         if (ns == NULL)
173                 LBUG();
174
175         lock1 = ldlm_lock_create(ns, NULL, res_id, LDLM_PLAIN, LCK_CR, NULL, 0);
176         if (lock1 == NULL)
177                 LBUG();
178         err = ldlm_lock_enqueue(lock1, NULL, 0, &flags,
179                                 ldlm_completion_ast, ldlm_blocking_ast);
180         if (err != ELDLM_OK)
181                 LBUG();
182
183         lock = ldlm_lock_create(ns, NULL, res_id, LDLM_PLAIN, LCK_EX, NULL, 0);
184         if (lock == NULL)
185                 LBUG();
186         err = ldlm_lock_enqueue(lock, NULL, 0, &flags,
187                                 ldlm_completion_ast, ldlm_blocking_ast);
188         if (err != ELDLM_OK)
189                 LBUG();
190         if (!(flags & LDLM_FL_BLOCK_GRANTED))
191                 LBUG();
192
193         res = ldlm_resource_get(ns, NULL, res_id, LDLM_PLAIN, 1);
194         if (res == NULL)
195                 LBUG();
196         ldlm_resource_dump(res);
197
198         res = ldlm_lock_convert(lock1, LCK_NL, &flags);
199         if (res != NULL)
200                 ldlm_reprocess_all(res);
201
202         ldlm_resource_dump(res);
203         ldlm_namespace_free(ns);
204
205         return 0;
206 }
207
208 int ldlm_test_extents(struct obd_device *obddev)
209 {
210         struct ldlm_namespace *ns;
211         struct ldlm_resource *res;
212         struct ldlm_lock *lock, *lock1, *lock2;
213         __u64 res_id[RES_NAME_SIZE] = {0, 0, 0};
214         struct ldlm_extent ext1 = {4, 6}, ext2 = {6, 9}, ext3 = {10, 11};
215         ldlm_error_t err;
216         int flags;
217
218         ns = ldlm_namespace_new("test_server", LDLM_NAMESPACE_SERVER);
219         if (ns == NULL)
220                 LBUG();
221
222         flags = 0;
223         lock1 = ldlm_lock_create(ns, NULL, res_id, LDLM_EXTENT, LCK_PR, NULL,
224                                  0);
225         if (lock1 == NULL)
226                 LBUG();
227         err = ldlm_lock_enqueue(lock1, &ext1, sizeof(ext1), &flags, NULL, NULL);
228         if (err != ELDLM_OK)
229                 LBUG();
230         if (!(flags & LDLM_FL_LOCK_CHANGED))
231                 LBUG();
232
233         flags = 0;
234         lock2 = ldlm_lock_create(ns, NULL, res_id, LDLM_EXTENT, LCK_PR,
235                                 NULL, 0);
236         err = ldlm_lock_enqueue(lock2, &ext2, sizeof(ext2), &flags, NULL, NULL);
237         if (err != ELDLM_OK)
238                 LBUG();
239         if (!(flags & LDLM_FL_LOCK_CHANGED))
240                 LBUG();
241
242         flags = 0;
243         lock = ldlm_lock_create(ns, NULL, res_id, LDLM_EXTENT, LCK_EX, NULL, 0);
244         if (lock == NULL)
245                 LBUG();
246         err = ldlm_lock_enqueue(lock, &ext3, sizeof(ext3), &flags,
247                                 NULL, NULL);
248         if (err != ELDLM_OK)
249                 LBUG();
250         if (!(flags & LDLM_FL_BLOCK_GRANTED))
251                 LBUG();
252         if (flags & LDLM_FL_LOCK_CHANGED)
253                 LBUG();
254
255         /* Convert/cancel blocking locks */
256         flags = 0;
257         res = ldlm_lock_convert(lock1, LCK_NL, &flags);
258         if (res != NULL)
259                 ldlm_reprocess_all(res);
260
261         ldlm_lock_cancel(lock2);
262         if (res != NULL)
263                 ldlm_reprocess_all(res);
264
265         /* Dump the results */
266         res = ldlm_resource_get(ns, NULL, res_id, LDLM_EXTENT, 0);
267         if (res == NULL)
268                 LBUG();
269         ldlm_resource_dump(res);
270         ldlm_namespace_free(ns);
271
272         return 0;
273 }
274
275 static int ldlm_test_network(struct obd_device *obddev,
276                              struct lustre_handle *connh)
277 {
278
279         __u64 res_id[RES_NAME_SIZE] = {1, 2, 3};
280         struct ldlm_extent ext = {4, 6};
281         struct lustre_handle lockh1;
282         int flags = 0;
283         ldlm_error_t err;
284
285         err = ldlm_cli_enqueue(connh, NULL, obddev->obd_namespace, NULL, res_id,
286                                LDLM_EXTENT, &ext, sizeof(ext), LCK_PR, &flags,
287                                ldlm_completion_ast, NULL, NULL, 0, &lockh1);
288
289         CERROR("ldlm_cli_enqueue: %d\n", err);
290
291         flags = 0;
292         err = ldlm_cli_convert(&lockh1, LCK_EX, &flags);
293         CERROR("ldlm_cli_convert: %d\n", err);
294
295         if (err == ELDLM_OK)
296                 ldlm_lock_decref(&lockh1, LCK_EX);
297
298         RETURN(err);
299 }
300
301 static int ldlm_test_main(void *data)
302 {
303         struct ldlm_test_thread *thread = data;
304         const unsigned int num_resources = 10;
305         const unsigned int num_extent = 10;
306         ENTRY;
307
308         lock_kernel();
309         daemonize();
310         spin_lock_irq(&current->sigmask_lock);
311         sigfillset(&current->blocked);
312         recalc_sigpending(current);
313         spin_unlock_irq(&current->sigmask_lock);
314
315         sprintf(current->comm, "ldlm_test");
316
317         /* Record that the thread is running */
318         thread->t_flags |= SVC_RUNNING;
319         wake_up(&thread->t_ctl_waitq);
320
321         while (!(thread->t_flags & SVC_STOPPING)) {
322                 struct lustre_handle lockh;
323                 __u64 res_id[3] = {0};
324                 __u32 lock_mode;
325                 struct ldlm_extent ext;
326                 char random;
327                 int flags = 0, rc = 0;
328
329                 /* Pick a random resource from 1 to num_resources */
330                 get_random_bytes(&random, sizeof(random));
331                 res_id[0] = (unsigned char)random % num_resources;
332
333                 /* Pick a random lock mode */
334                 get_random_bytes(&random, sizeof(random));
335                 lock_mode = (unsigned char)random % LCK_NL + 1;
336
337                 /* Pick a random extent */
338                 get_random_bytes(&random, sizeof(random));
339                 ext.start = (unsigned int)random % num_extent;
340                 get_random_bytes(&random, sizeof(random));
341                 ext.end = (unsigned int)random %
342                         (num_extent - (int)ext.start) + ext.start;
343
344                 LDLM_DEBUG_NOLOCK("about to enqueue with resource %d, mode %d,"
345                                   " extent %d -> %d", (int)res_id[0], lock_mode,
346                                   (int)ext.start, (int)ext.end);
347
348                 rc = ldlm_match_or_enqueue(thread->connh, NULL,
349                                            thread->obddev->obd_namespace, NULL,
350                                            res_id, LDLM_EXTENT, &ext,
351                                            sizeof(ext), lock_mode, &flags,
352                                            ldlm_test_completion_ast,
353                                            ldlm_test_blocking_ast, NULL, 0,
354                                            &lockh);
355
356                 atomic_inc(&locks_requested);
357                 if (rc < 0) {
358                         CERROR("ldlm_cli_enqueue: %d\n", rc);
359                         LBUG();
360                 }
361
362                 LDLM_DEBUG_NOLOCK("locks requested: %d, granted: %d, "
363                                   "matched: %d",
364                                   atomic_read(&locks_requested),
365                                   atomic_read(&locks_granted),
366                                   atomic_read(&locks_matched));
367
368                 /* I think this may be necessary since we don't sleep
369                  * after a lock being blocked
370                  */
371                 schedule();
372         }
373
374         thread->t_flags |= SVC_STOPPED;
375         wake_up(&thread->t_ctl_waitq);
376
377         RETURN(0);
378 }
379
380 static int ldlm_start_thread(struct obd_device *obddev,
381                              struct lustre_handle *connh)
382 {
383         struct ldlm_test_thread *test;
384         int rc;
385         ENTRY;
386
387         OBD_ALLOC(test, sizeof(*test));
388         if (test == NULL) {
389                 LBUG();
390                 RETURN(-ENOMEM);
391         }
392         init_waitqueue_head(&test->t_ctl_waitq);
393
394         test->connh = connh;
395         test->obddev = obddev;
396
397         spin_lock(&ctl_lock);
398         list_add(&test->t_link, &ctl_threads);
399         spin_unlock(&ctl_lock);
400
401         rc = kernel_thread(ldlm_test_main, (void *)test,
402                            CLONE_VM | CLONE_FS | CLONE_FILES);
403         if (rc < 0) {
404                 CERROR("cannot start thread\n");
405                 RETURN(-EINVAL);
406         }
407         wait_event(test->t_ctl_waitq, test->t_flags & SVC_RUNNING);
408
409         RETURN(0);
410 }
411
412 int ldlm_regression_start(struct obd_device *obddev,
413                           struct lustre_handle *connh, int count)
414 {
415         int i, rc = 0;
416         ENTRY;
417
418         spin_lock(&ctl_lock);
419         if (regression_running) {
420                 CERROR("You can't start the ldlm regression twice.\n");
421                 spin_unlock(&ctl_lock);
422                 RETURN(-EINVAL);
423         }
424         regression_running = 1;
425         spin_unlock(&ctl_lock);
426
427         for (i = 0; i < count; i++) {
428                 rc = ldlm_start_thread(obddev, connh);
429                 if (rc < 0)
430                         GOTO(cleanup, rc);
431         }
432
433  cleanup:
434         RETURN(rc);
435 }
436
437 int ldlm_regression_stop(void)
438 {
439         ENTRY;
440
441         spin_lock(&ctl_lock);
442         if (!regression_running) {
443                 CERROR("The ldlm regression isn't started.\n");
444                 spin_unlock(&ctl_lock);
445                 RETURN(-EINVAL);
446         }
447
448         while (!list_empty(&ctl_threads)) {
449                 struct ldlm_test_thread *thread;
450                 thread = list_entry(ctl_threads.next, struct ldlm_test_thread,
451                                     t_link);
452
453                 thread->t_flags |= SVC_STOPPING;
454                 spin_unlock(&ctl_lock);
455
456                 wake_up(&thread->t_ctl_waitq);
457                 wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
458
459                 spin_lock(&ctl_lock);
460                 list_del(&thread->t_link);
461                 OBD_FREE(thread, sizeof(*thread));
462         }
463
464         regression_running = 0;
465         spin_unlock(&ctl_lock);
466
467         RETURN(0);
468 }
469
470 int ldlm_test(struct obd_device *obddev, struct lustre_handle *connh)
471 {
472         int rc;
473 /*        rc = ldlm_test_basics(obddev);
474         if (rc)
475                 RETURN(rc);
476
477         rc = ldlm_test_extents(obddev);
478         if (rc)
479                 RETURN(rc);
480 */
481
482         rc = ldlm_test_network(obddev, connh);
483         RETURN(rc);
484 }