4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
27 * This file is part of Lustre, http://www.lustre.org/
28 * Lustre is a trademark of Sun Microsystems, Inc.
30 * lustre/tests/parallel_grouplock.c
32 * Author: You Feng <youfeng@clusterfs.com>
40 #include <sys/types.h>
41 #include <asm/types.h>
44 #include <sys/ioctl.h>
50 #define LPGL_BUF_LEN 8192
51 #define LPGL_TEST_ITEMS 12
55 #define MAX_WAIT_TRIES 10
56 #define WAIT_TIME 1 /* secs */
57 #define ONE_MB 1048576 /* 1 MB */
58 #define MIN_LGBUF_SIZE 536870912 /* 512 MB */
59 #define MAX_LGBUF_SIZE 536870912 /* 512 MB */
60 // #define MAX_LGBUF_SIZE 1073741824 /* 1 GB */
73 char buf[LPGL_BUF_LEN];
76 char filename[MAX_FILENAME_LEN];
84 lgbuf_size = MAX_LGBUF_SIZE;
85 for (; lgbuf_size >= MIN_LGBUF_SIZE; lgbuf_size -= ONE_MB)
86 if ((lgbuf = (char *)malloc(lgbuf_size)) != NULL)
89 FAIL("malloc of large buffer failed.\n");
97 rc = read(fd, buf, sizeof(buf));
99 pos = lseek(fd, 0, SEEK_CUR);
100 FAILF("read of file %s at pos %d for %zu bytes returned %d: (%d) %s.\n",
101 filename, pos, sizeof(buf), rc, errno, strerror(errno));
102 } else if (rc != sizeof(buf)) {
103 pos = lseek(fd, 0, SEEK_CUR);
104 FAILF("read of file %s at pos %d for %zu bytes returned %d.\n",
105 filename, pos, sizeof(buf), rc);
110 write_buf(int fd, int index)
112 int pos = index * sizeof(buf);
115 memset(buf, index, sizeof(buf));
116 lseek(fd, pos, SEEK_SET);
117 rc = write(fd, buf, sizeof(buf));
119 FAILF("write of file %s at pos %d for %zu bytes returned %d: (%d) %s.\n",
120 filename, pos, sizeof(buf), rc, errno, strerror(errno));
121 else if (rc != sizeof(buf))
122 FAILF("write of file %s at pos %d for %zu bytes returned %d.\n",
123 filename, pos, sizeof(buf), rc);
127 * task0 attempts GR(gid=1) -- granted immediately
128 * task1 attempts PR|PW -- blocked, goes on waiting list
129 * task2 attempts GR(gid=1) -> should be granted
130 * task2 writes to file and releases GR(gid=1)
131 * task0 waits for task2 to complete its processing
132 * task0 writes to file and releases GR(gid=1)
133 * task1 PR|PW should be granted and reads the file
135 void grouplock_test1(char *filename, int fd, int blocking_op, int unlock_op)
137 MPI_Request req1, req2;
138 int iter, flag1, flag2, temp1, temp2;
142 rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid);
144 FAILF("ioctl GROUP_LOCK of file %s: (%d) %s.\n",
145 filename, errno, strerror(errno));
148 MPI_Barrier(MPI_COMM_WORLD);
152 if (blocking_op == WRITE) {
154 lseek(fd, 0, SEEK_SET);
157 for (i = 0; i <= 2; i++)
160 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
163 /* Wait for task1 to progress. This could be racey. */
166 rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid);
168 FAILF("ioctl GROUP_LOCK of file %s: (%d) %s.\n",
169 filename, errno, strerror(errno));
173 if (unlock_op == CLOSE)
176 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
179 FAILF("%s release GROUP_LOCK of file %s: (%d) %s.\n",
180 (unlock_op == CLOSE) ? "close" : "ioctl",
181 filename, errno, strerror(errno));
183 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
186 /* PR|PW task will tell us when it completes */
187 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
188 /* 2nd locking task will tell us when it completes */
189 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
191 /* Wait for task2 to complete. */
192 iter = MAX_WAIT_TRIES;
196 FAIL("2nd locking task is not progressing\n");
200 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
202 FAIL("PR|PW task progressed even though GROUP lock is held\n");
204 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
207 /* Make sure task1 is still waiting. */
208 iter = MAX_WAIT_TRIES;
212 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
214 FAIL("PR|PW task progressed even though GROUP lock is held\n");
219 /* Now we need to release the lock */
220 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
222 FAILF("ioctl GROUP_UNLOCK of file %s: (%d) %s.\n",
223 filename, errno, strerror(errno));
225 /* Wait for task1 to complete. */
226 iter = MAX_WAIT_TRIES;
230 FAIL("PR|PW task is not progressing even though GROUP lock was released\n");
234 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
242 * task0 attempts GR(gid=1) -- granted immediately
243 * task1 attempts GR(gid=2) -- blocked
244 * task2 attempts PR|PW -- blocked
245 * task3 attempts GR(gid=2) -- blocked
246 * task4 attempts GR(gid=1) -- should be granted
247 * task0,4 writes to file and releases GR(gid=1) --
248 * this allows task2 & 3's GR locks to be granted; task4 remains blocked.
249 * task1 & 3 write to file and release GR(gid=2)
250 * task2 PR|PW should be granted and reads the file.
252 void grouplock_test2(char *filename, int fd, int blocking_op, int unlock_op)
254 int i, iter, rc, gid = 1;
255 int flag1, flag2, flag3, flag4;
256 int temp1, temp2, temp3, temp4;
257 MPI_Request req1, req2, req3, req4;
260 rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid);
262 FAILF("ioctl GROUP_LOCK of file %s: (%d) %s.\n",
263 filename, errno, strerror(errno));
266 MPI_Barrier(MPI_COMM_WORLD);
270 /* Wait for task2 to issue its read request. */
271 sleep(2 * WAIT_TIME);
274 rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid);
276 FAILF("ioctl GROUP_LOCK of file %s: (%d) %s.\n",
277 filename, errno, strerror(errno));
281 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
284 * Do not release the locks until task 0 is ready to watch
285 * for reading task only
287 MPI_Recv(&temp1, 1, MPI_INT, 0, 1, MPI_COMM_WORLD,
290 if (unlock_op == CLOSE)
293 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
295 FAILF("%s release GROUP_LOCK of file %s: (%d) %s.\n",
296 (unlock_op == CLOSE) ? "close" : "ioctl",
297 filename, errno, strerror(errno));
300 /* Give task1 a chance to request its GR lock. */
303 if (blocking_op == WRITE) {
305 lseek(fd, 0, SEEK_SET);
308 for (i = 0; i <= 3; i++)
311 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
314 /* Give task1 & 3 a chance to queue their GR locks. */
315 sleep(3 * WAIT_TIME);
317 rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid);
319 FAILF("ioctl GROUP_LOCK of file %s: (%d) %s.\n",
320 filename, errno, strerror(errno));
324 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
326 FAILF("%s release GROUP_LOCK of file %s: (%d) %s.\n",
327 (unlock_op == CLOSE) ? "close" : "ioctl",
328 filename, errno, strerror(errno));
330 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
333 /* locking tasks will tell us when they complete */
334 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
335 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
336 MPI_Irecv(&temp3, 1, MPI_INT, 3, 1, MPI_COMM_WORLD, &req3);
337 MPI_Irecv(&temp4, 1, MPI_INT, 4, 1, MPI_COMM_WORLD, &req4);
339 /* Make sure all tasks that should be blocked are waiting. */
340 iter = MAX_WAIT_TRIES;
344 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
345 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
346 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
348 FAIL("GROUP (gid=2) task progressed even though GROUP (gid=1) lock is held.\n");
350 FAIL("PR|PW task progressed even though GROUP (gid=1) lock is still held\n");
353 /* Wait for task4 to signal it has completed. */
354 iter = MAX_WAIT_TRIES;
358 FAIL("2nd task GROUP(gid=1) not progressing\n");
361 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
362 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
363 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
364 MPI_Test(&req4, &flag4, MPI_STATUS_IGNORE);
366 FAIL("GROUP (gid=2) task progressed even though GROUP (gid=1) lock is held.\n");
368 FAIL("PR|PW task progressed even though GROUP (gid=1) lock is still held\n");
373 /* Now let's release first lock */
374 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
376 FAILF("ioctl GROUP_UNLOCK of file %s returned %d",
379 /* Wait for task1 & 3 to signal they have their lock. */
380 iter = MAX_WAIT_TRIES;
384 FAIL("GROUP(gid=2) tasks not progressing\n");
387 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
388 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
389 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
391 fprintf(stderr, "task2 %d\n", flag2);
392 FAIL("PR task progressed even though GROUP lock was on the queue task\n");
394 } while (!(flag1 && flag3));
396 /* Make sure task2 is still waiting. */
397 iter = MAX_WAIT_TRIES;
401 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
403 FAIL("PR task progressed even though GR(gid=2) lock was active.\n");
406 /* Tell task1 & 3 to release their GR(gid=2) lock. */
407 MPI_Send(&gid, 1, MPI_INT, 1, 1, MPI_COMM_WORLD);
408 MPI_Send(&gid, 1, MPI_INT, 3, 1, MPI_COMM_WORLD);
410 /* Wait for task2 (PR) to complete. */
411 iter = MAX_WAIT_TRIES;
415 FAIL("reading task is not progressing even though GROUP locks are released\n");
419 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
426 * Tests a bug that once existed in the group lock code;
427 * i.e. that a GR lock request on a O_NONBLOCK fd could fail even though
428 * there is no blocking GROUP lock ahead of it on the waitq.
430 * task0 starts a large write (PW). this test could be racey if this
431 * write finishes too quickly.
432 * task1 attempts GR(gid=1) -- blocked
433 * task2 attempts GR(gid=2) with a O_NONBLOCK fs. should not fail.
435 void grouplock_test3(char *filename, int fd)
437 MPI_Request req1, req2;
438 int iter, flag1, flag2, temp1, temp2;
443 } else if (rank == 2) {
444 rc = fcntl(fd, F_SETFL, O_NONBLOCK);
446 FAILF("fcntl(O_NONBLOCK) failed: (%d) %s.\n",
447 errno, strerror(errno));
450 MPI_Barrier(MPI_COMM_WORLD);
459 * Racey, we have to sleep just long enough for
460 * task0's write to start.
464 rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid);
466 FAILF("ioctl GROUP_LOCK of file %s: (%d) %s.\n",
467 filename, errno, strerror(errno));
469 /* tell task0 we have the lock. */
470 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
472 /* the close of fd will release the lock. */
475 rc = write(fd, lgbuf, lgbuf_size);
477 FAILF("write of file %s for %d bytes returned %d: (%d) %s.\n",
478 filename, lgbuf_size, rc, errno, strerror(errno));
479 else if (rc != lgbuf_size)
480 FAILF("write of file %s for %d bytes returned %d.\n",
481 filename, lgbuf_size, rc);
483 /* GR tasks will tell us when they complete */
484 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
485 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
487 /* Wait for task1 & 2 to complete. */
488 iter = MAX_WAIT_TRIES;
492 FAIL("GR(gid=1) tasks are not progressing even no conflicting locks exist.\n");
496 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
497 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
498 } while (!(flag1 && flag2));
504 * Tests a bug that once existed in the group lock code;
505 * i.e. extent locks without O_NONBLOCK that go on the waitq before a group
506 * lock request came in and was granted. The extent lock would timed out and
509 * task0 starts a large write (PW). this test could be racey if this
510 * write finishes too quickly.
511 * task1 attempts PR -- blocked
512 * task2 attempts GR(gid=1) -- blocked
513 * task0 completes write
514 * task1 should wakeup and complete its read
515 * task2 should wakeup and after task1 complete.
517 void grouplock_test4(char *filename, int fd)
520 int iter, flag1, temp1;
526 MPI_Barrier(MPI_COMM_WORLD);
531 * Racey, we have to sleep just long enough for
532 * task0's write to start.
534 MPI_Recv(&temp1, 1, MPI_INT, 0, 1, MPI_COMM_WORLD,
537 /* tell task2 to go. */
538 MPI_Send(&gid, 1, MPI_INT, 2, 1, MPI_COMM_WORLD);
542 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
545 /* Give task0 & 1 a chance to start. */
546 MPI_Recv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD,
548 sleep(2 * WAIT_TIME);
550 rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid);
552 FAILF("ioctl GROUP_LOCK of file %s: (%d) %s.\n",
553 filename, errno, strerror(errno));
555 /* tell task0 we have the lock. */
556 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
559 * Do not release the locks until task 0 tells us too.
560 * for reading task only
562 MPI_Recv(&temp1, 1, MPI_INT, 0, 1, MPI_COMM_WORLD,
565 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
567 FAILF("ioctl GROUP_UNLOCK of file %s: (%d) %s.\n",
568 filename, errno, strerror(errno));
571 /* tell task1 to go to avoid race */
572 MPI_Send(&gid, 1, MPI_INT, 1, 1, MPI_COMM_WORLD);
573 rc = write(fd, lgbuf, lgbuf_size);
575 FAILF("write of file %s for %d bytes returned %d: (%d) %s.\n",
576 filename, lgbuf_size,
577 rc, errno, strerror(errno));
578 else if (rc != lgbuf_size)
579 FAILF("write of file %s for %d bytes returned %d.\n",
580 filename, lgbuf_size, rc);
582 /* wait for task2 to get its lock. */
583 MPI_Recv(&temp1, 1, MPI_INT, 2, 1, MPI_COMM_WORLD,
586 /* Tell task2 it's ok to release its GR(gid=1) lock. */
587 MPI_Send(&gid, 1, MPI_INT, 2, 1, MPI_COMM_WORLD);
589 /* wait a really long time. */
590 sleep(180 * WAIT_TIME);
592 /* PR task will tell us when it completes */
593 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
596 * Make sure the PR task is successful and doesn't hang.
598 * XXX - To test properly we need to make sure the read
599 * gets queued before task2's group lock request.
600 * You may need to increase lgbuf_size.
602 iter = MAX_WAIT_TRIES;
606 FAIL("PR task is hung !\n");
610 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
618 * task0 attempts GR(gid=1) -- granted
619 * task1 attempts PR on non-blocking fd -> should return -EAGAIN
620 * task2 attempts PW on non-blocking fd -> should return -EAGAIN
621 * task3 attempts GR(gid=2) on non-blocking fd -> should return -EAGAIN
623 void grouplock_nonblock_test(char *filename, int fd)
625 MPI_Request req1, req2, req3;
626 int iter, flag1, flag2, flag3, temp1, temp2, temp3;
630 rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid);
632 FAILF("ioctl GROUP_LOCK of file %s: (%d) %s.\n",
633 filename, errno, strerror(errno));
636 rc = fcntl(fd, F_SETFL, O_NONBLOCK);
638 FAILF("fcntl(O_NONBLOCK) failed: (%d) %s.\n",
639 errno, strerror(errno));
641 MPI_Barrier(MPI_COMM_WORLD);
645 rc = read(fd, buf, sizeof(buf));
646 if ((rc != -1) || (errno != EAGAIN))
647 FAIL("PR lock succeeded while incompatible GROUP LOCK (gid=1) is still held\n");
649 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
652 rc = write(fd, buf, sizeof(buf));
653 if ((rc != -1) || (errno != EAGAIN))
654 FAIL("PW lock succeeded while incompatible GROUP LOCK (gid=1) is still held\n");
656 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
660 rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid);
661 if ((rc != -1) || (errno != EAGAIN))
662 FAIL("GROUP_LOCK (gid=2) succeeded while incompatible GROUP LOCK (gid=1) is still held.\n");
664 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
667 /* reading task will tell us when it completes */
668 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
669 /* writing task will tell us when it completes */
670 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
671 /* 2nd locking task will tell us when it completes */
672 MPI_Irecv(&temp3, 1, MPI_INT, 3, 1, MPI_COMM_WORLD, &req3);
674 iter = MAX_WAIT_TRIES;
678 FAIL("non-blocking tasks are not progressing\n");
681 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
682 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
683 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
684 } while (!(flag1 && flag2 && flag3));
686 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
688 FAILF("ioctl GROUP_UNLOCK of file %s", filename);
693 /* Just test some error paths with invalid requests */
694 void grouplock_errorstest(char *filename, int fd)
698 MPI_Barrier(MPI_COMM_WORLD);
702 rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid);
704 FAILF("ioctl GROUP_LOCK of file %s: (%d) %s.\n",
705 filename, errno, strerror(errno));
707 /* second group lock on same fd, same gid */
708 rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid);
711 FAILF("Double GROUP lock failed with errno %d instead of EINVAL\n",
714 FAIL("Taking second GROUP lock on same fd succeed\n");
717 /* second group lock on same fd, different gid */
718 rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid + 1);
721 FAILF("Double GROUP lock with different gid failed with errno %d instead of EINVAL\n",
724 FAIL("Taking second GROUP lock on same fd, with different gid, succeeded.\n");
727 /* GROUP unlock with wrong gid */
728 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid + 1);
731 FAILF("GROUP_UNLOCK with wrong gid failed with errno %d instead of EINVAL\n",
734 FAIL("GROUP unlock with wrong gid succeed\n");
737 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
739 FAILF("ioctl GROUP_UNLOCK of file %s returned %d.",
744 /* unlock of never locked fd */
745 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
748 FAILF("GROUP_UNLOCK on never locked fd failed with errno %d instead of EINVAL.\n",
751 FAIL("GROUP unlock on never locked fd succeed\n");
757 void grouplock_file(char *name, int subtest)
760 int flags = O_CREAT | O_RDWR | O_SYNC | O_TRUNC;
763 sprintf(filename, "%s/%s", testdir, name);
765 fd = open(filename, flags, mode);
767 FAILF("open of file %s: (%d) %s.\n",
768 filename, errno, strerror(errno));
770 MPI_Barrier(MPI_COMM_WORLD);
774 grouplock_test1(filename, fd, READ, IOCTL);
777 grouplock_test1(filename, fd, READ, CLOSE);
780 grouplock_test1(filename, fd, WRITE, IOCTL);
783 grouplock_test1(filename, fd, WRITE, CLOSE);
786 grouplock_test2(filename, fd, READ, IOCTL);
789 grouplock_test2(filename, fd, READ, CLOSE);
792 grouplock_test2(filename, fd, WRITE, IOCTL);
795 grouplock_test2(filename, fd, WRITE, CLOSE);
798 grouplock_nonblock_test(filename, fd);
801 grouplock_errorstest(filename, fd);
804 grouplock_test3(filename, fd);
807 grouplock_test4(filename, fd);
810 FAILF("wrong subtest number %d (should be <= %d)",
811 subtest, LPGL_TEST_ITEMS);
819 MPI_Barrier(MPI_COMM_WORLD);
822 void parallel_grouplock(void)
828 sprintf(teststr, "subtest %d", only_test);
830 grouplock_file("parallel_grouplock", only_test);
833 for (i = 1; i <= LPGL_TEST_ITEMS; i++) {
834 sprintf(teststr, "subtest %d", i);
836 grouplock_file("parallel_grouplock", i);
842 void usage(char *proc)
847 printf("Usage: %s [-h] -d <testdir> [-n <num>]\n", proc);
848 printf(" [-t <num>] [-v] [-V #] [-g]\n");
849 printf("\t-h: prints this help message\n");
850 printf("\t-d: the directory in which the tests will run\n");
851 printf("\t-n: repeat test # times\n");
852 printf("\t-t: run a particular test #\n");
853 printf("\t-v: increase the verbositly level by 1\n");
854 printf("\t-V: select a specific verbosity level\n");
855 printf("\t-g: debug mode\n");
864 int main(int argc, char *argv[])
866 int i, iterations = 1, c;
872 * Check for -h parameter before MPI_Init so the binary can be
873 * called directly, without, for instance, mpirun
875 for (i = 1; i < argc; ++i) {
876 if (!strcmp(argv[i], "-h") || !strcmp(argv[i], "--help"))
880 MPI_Init(&argc, &argv);
881 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
882 MPI_Comm_size(MPI_COMM_WORLD, &size);
884 /* Parse command line options */
886 c = getopt(argc, argv, "d:ghn:t:vV:");
901 iterations = atoi(optarg);
904 only_test = atoi(optarg);
910 verbose = atoi(optarg);
916 printf("%s is running with %d task(es) %s\n",
917 argv[0], size, debug ? "in DEBUG mode" : "\b\b");
919 if (size < MIN_GLHOST) {
921 "Error: %d tasks run, but should be at least %d tasks to run the test!\n",
923 MPI_Abort(MPI_COMM_WORLD, 2);
926 if (!testdir && rank == 0) {
928 "Please specify a test directory! (\"%s -h\" for help)\n",
930 MPI_Abort(MPI_COMM_WORLD, 2);
935 for (i = 0; i < iterations; ++i) {
937 printf("%s: Running test #%s(iter %d)\n",
938 timestamp(), argv[0], i);
940 parallel_grouplock();
941 MPI_Barrier(MPI_COMM_WORLD);
945 printf("%s: All tests passed!\n", timestamp());