4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
27 * This file is part of Lustre, http://www.lustre.org/
28 * Lustre is a trademark of Sun Microsystems, Inc.
30 * lustre/tests/parallel_grouplock.c
32 * Author: You Feng <youfeng@clusterfs.com>
40 #include <sys/types.h>
41 #include <asm/types.h>
44 #include <sys/ioctl.h>
50 #define LPGL_BUF_LEN 8192
51 #define LPGL_TEST_ITEMS 12
55 #define MAX_WAIT_TRIES 10
56 #define WAIT_TIME 1 /* secs */
57 #define ONE_MB 1048576 /* 1 MB */
58 #define MIN_LGBUF_SIZE 536870912 /* 512 MB */
59 #define MAX_LGBUF_SIZE 536870912 /* 512 MB */
60 // #define MAX_LGBUF_SIZE 1073741824 /* 1 GB */
73 char buf[LPGL_BUF_LEN];
76 char filename[MAX_FILENAME_LEN];
77 char errmsg[MAX_FILENAME_LEN + 96];
85 lgbuf_size = MAX_LGBUF_SIZE;
86 for (; lgbuf_size >= MIN_LGBUF_SIZE; lgbuf_size -= ONE_MB)
87 if ((lgbuf = (char *)malloc(lgbuf_size)) != NULL)
90 FAIL("malloc of large buffer failed.\n");
98 rc = read(fd, buf, sizeof(buf));
100 pos = lseek(fd, 0, SEEK_CUR);
102 "read of file %s at pos %d for %zu bytes returned %d: (%d) %s.\n",
103 filename, pos, sizeof(buf), rc, errno, strerror(errno));
105 } else if (rc != sizeof(buf)) {
106 pos = lseek(fd, 0, SEEK_CUR);
108 "read of file %s at pos %d for %zu bytes returned %d.\n",
109 filename, pos, sizeof(buf), rc);
115 write_buf(int fd, int index)
117 int pos = index * sizeof(buf);
120 memset(buf, index, sizeof(buf));
121 lseek(fd, pos, SEEK_SET);
122 rc = write(fd, buf, sizeof(buf));
125 "write of file %s at pos %d for %zu bytes returned %d: (%d) %s.\n",
126 filename, pos, sizeof(buf), rc, errno, strerror(errno));
128 } else if (rc != sizeof(buf)) {
130 "write of file %s at pos %d for %zu bytes returned %d.\n",
131 filename, pos, sizeof(buf), rc);
137 * task0 attempts GR(gid=1) -- granted immediately
138 * task1 attempts PR|PW -- blocked, goes on waiting list
139 * task2 attempts GR(gid=1) -> should be granted
140 * task2 writes to file and releases GR(gid=1)
141 * task0 waits for task2 to complete its processing
142 * task0 writes to file and releases GR(gid=1)
143 * task1 PR|PW should be granted and reads the file
145 void grouplock_test1(char *filename, int fd, int blocking_op, int unlock_op)
147 MPI_Request req1, req2;
148 int iter, flag1, flag2, temp1, temp2;
152 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
154 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
155 filename, errno, strerror(errno));
160 MPI_Barrier(MPI_COMM_WORLD);
164 if (blocking_op == WRITE) {
166 lseek(fd, 0, SEEK_SET);
169 for (i = 0; i <= 2; i++)
172 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
175 /* Wait for task1 to progress. This could be racey. */
178 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
180 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
181 filename, errno, strerror(errno));
187 if (unlock_op == CLOSE)
190 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
194 "%s release GROUP_LOCK of file %s: (%d) %s.\n",
195 (unlock_op == CLOSE) ? "close" : "ioctl",
196 filename, errno, strerror(errno));
199 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
202 /* PR|PW task will tell us when it completes */
203 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
204 /* 2nd locking task will tell us when it completes */
205 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
207 /* Wait for task2 to complete. */
208 iter = MAX_WAIT_TRIES;
212 FAIL("2nd locking task is not progressing\n");
216 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
218 FAIL("PR|PW task progressed even though GROUP lock is held\n");
220 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
223 /* Make sure task1 is still waiting. */
224 iter = MAX_WAIT_TRIES;
228 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
230 FAIL("PR|PW task progressed even though GROUP lock is held\n");
235 /* Now we need to release the lock */
236 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
238 "ioctl GROUP_UNLOCK of file %s: (%d) %s.\n",
239 filename, errno, strerror(errno));
243 /* Wait for task1 to complete. */
244 iter = MAX_WAIT_TRIES;
248 FAIL("PR|PW task is not progressing even though GROUP lock was released\n");
252 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
260 * task0 attempts GR(gid=1) -- granted immediately
261 * task1 attempts GR(gid=2) -- blocked
262 * task2 attempts PR|PW -- blocked
263 * task3 attempts GR(gid=2) -- blocked
264 * task4 attempts GR(gid=1) -- should be granted
265 * task0,4 writes to file and releases GR(gid=1) --
266 * this allows task2 & 3's GR locks to be granted; task4 remains blocked.
267 * task1 & 3 write to file and release GR(gid=2)
268 * task2 PR|PW should be granted and reads the file.
270 void grouplock_test2(char *filename, int fd, int blocking_op, int unlock_op)
272 int i, iter, rc, gid = 1;
273 int flag1, flag2, flag3, flag4;
274 int temp1, temp2, temp3, temp4;
275 MPI_Request req1, req2, req3, req4;
278 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
280 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
281 filename, errno, strerror(errno));
286 MPI_Barrier(MPI_COMM_WORLD);
290 /* Wait for task2 to issue its read request. */
291 sleep(2 * WAIT_TIME);
294 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
296 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
297 filename, errno, strerror(errno));
303 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
306 * Do not release the locks until task 0 is ready to watch
307 * for reading task only
309 MPI_Recv(&temp1, 1, MPI_INT, 0, 1, MPI_COMM_WORLD,
312 if (unlock_op == CLOSE)
315 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
318 "%s release GROUP_LOCK of file %s: (%d) %s.\n",
319 (unlock_op == CLOSE) ? "close" : "ioctl",
320 filename, errno, strerror(errno));
325 /* Give task1 a chance to request its GR lock. */
328 if (blocking_op == WRITE) {
330 lseek(fd, 0, SEEK_SET);
333 for (i = 0; i <= 3; i++)
336 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
339 /* Give task1 & 3 a chance to queue their GR locks. */
340 sleep(3 * WAIT_TIME);
342 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
344 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
345 filename, errno, strerror(errno));
351 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
354 "%s release GROUP_LOCK of file %s: (%d) %s.\n",
355 (unlock_op == CLOSE) ? "close" : "ioctl",
356 filename, errno, strerror(errno));
360 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
363 /* locking tasks will tell us when they complete */
364 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
365 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
366 MPI_Irecv(&temp3, 1, MPI_INT, 3, 1, MPI_COMM_WORLD, &req3);
367 MPI_Irecv(&temp4, 1, MPI_INT, 4, 1, MPI_COMM_WORLD, &req4);
369 /* Make sure all tasks that should be blocked are waiting. */
370 iter = MAX_WAIT_TRIES;
374 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
375 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
376 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
378 FAIL("GROUP (gid=2) task progressed even though GROUP (gid=1) lock is held.\n");
380 FAIL("PR|PW task progressed even though GROUP (gid=1) lock is still held\n");
383 /* Wait for task4 to signal it has completed. */
384 iter = MAX_WAIT_TRIES;
388 FAIL("2nd task GROUP(gid=1) not progressing\n");
391 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
392 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
393 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
394 MPI_Test(&req4, &flag4, MPI_STATUS_IGNORE);
396 FAIL("GROUP (gid=2) task progressed even though GROUP (gid=1) lock is held.\n");
398 FAIL("PR|PW task progressed even though GROUP (gid=1) lock is still held\n");
403 /* Now let's release first lock */
404 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
406 "ioctl GROUP_UNLOCK of file %s returned %d",
411 /* Wait for task1 & 3 to signal they have their lock. */
412 iter = MAX_WAIT_TRIES;
416 FAIL("GROUP(gid=2) tasks not progressing\n");
419 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
420 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
421 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
423 fprintf(stderr, "task2 %d\n", flag2);
424 FAIL("PR task progressed even though GROUP lock was on the queue task\n");
426 } while (!(flag1 && flag3));
428 /* Make sure task2 is still waiting. */
429 iter = MAX_WAIT_TRIES;
433 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
435 FAIL("PR task progressed even though GR(gid=2) lock was active.\n");
438 /* Tell task1 & 3 to release their GR(gid=2) lock. */
439 MPI_Send(&gid, 1, MPI_INT, 1, 1, MPI_COMM_WORLD);
440 MPI_Send(&gid, 1, MPI_INT, 3, 1, MPI_COMM_WORLD);
442 /* Wait for task2 (PR) to complete. */
443 iter = MAX_WAIT_TRIES;
447 FAIL("reading task is not progressing even though GROUP locks are released\n");
451 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
458 * Tests a bug that once existed in the group lock code;
459 * i.e. that a GR lock request on a O_NONBLOCK fd could fail even though
460 * there is no blocking GROUP lock ahead of it on the waitq.
462 * task0 starts a large write (PW). this test could be racey if this
463 * write finishes too quickly.
464 * task1 attempts GR(gid=1) -- blocked
465 * task2 attempts GR(gid=2) with a O_NONBLOCK fs. should not fail.
467 void grouplock_test3(char *filename, int fd)
469 MPI_Request req1, req2;
470 int iter, flag1, flag2, temp1, temp2;
475 } else if (rank == 2) {
476 rc = fcntl(fd, F_SETFL, O_NONBLOCK);
478 sprintf(errmsg, "fcntl(O_NONBLOCK) failed: (%d) %s.\n",
479 errno, strerror(errno));
484 MPI_Barrier(MPI_COMM_WORLD);
493 * Racey, we have to sleep just long enough for
494 * task0's write to start.
498 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
500 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
501 filename, errno, strerror(errno));
505 /* tell task0 we have the lock. */
506 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
508 /* the close of fd will release the lock. */
511 rc = write(fd, lgbuf, lgbuf_size);
514 "write of file %s for %d bytes returned %d: (%d) %s.\n",
515 filename, lgbuf_size,
516 rc, errno, strerror(errno));
518 } else if (rc != lgbuf_size) {
520 "write of file %s for %d bytes returned %d.\n",
521 filename, lgbuf_size, rc);
525 /* GR tasks will tell us when they complete */
526 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
527 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
529 /* Wait for task1 & 2 to complete. */
530 iter = MAX_WAIT_TRIES;
534 FAIL("GR(gid=1) tasks are not progressing even no conflicting locks exist.\n");
538 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
539 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
540 } while (!(flag1 && flag2));
546 * Tests a bug that once existed in the group lock code;
547 * i.e. extent locks without O_NONBLOCK that go on the waitq before a group
548 * lock request came in and was granted. The extent lock would timed out and
551 * task0 starts a large write (PW). this test could be racey if this
552 * write finishes too quickly.
553 * task1 attempts PR -- blocked
554 * task2 attempts GR(gid=1) -- blocked
555 * task0 completes write
556 * task1 should wakeup and complete its read
557 * task2 should wakeup and after task1 complete.
559 void grouplock_test4(char *filename, int fd)
562 int iter, flag1, temp1;
568 MPI_Barrier(MPI_COMM_WORLD);
573 * Racey, we have to sleep just long enough for
574 * task0's write to start.
576 MPI_Recv(&temp1, 1, MPI_INT, 0, 1, MPI_COMM_WORLD,
579 /* tell task2 to go. */
580 MPI_Send(&gid, 1, MPI_INT, 2, 1, MPI_COMM_WORLD);
584 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
587 /* Give task0 & 1 a chance to start. */
588 MPI_Recv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD,
590 sleep(2 * WAIT_TIME);
592 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
594 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
595 filename, errno, strerror(errno));
599 /* tell task0 we have the lock. */
600 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
603 * Do not release the locks until task 0 tells us too.
604 * for reading task only
606 MPI_Recv(&temp1, 1, MPI_INT, 0, 1, MPI_COMM_WORLD,
609 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
612 "ioctl GROUP_UNLOCK of file %s: (%d) %s.\n",
613 filename, errno, strerror(errno));
618 /* tell task1 to go to avoid race */
619 MPI_Send(&gid, 1, MPI_INT, 1, 1, MPI_COMM_WORLD);
620 rc = write(fd, lgbuf, lgbuf_size);
623 "write of file %s for %d bytes returned %d: (%d) %s.\n",
624 filename, lgbuf_size,
625 rc, errno, strerror(errno));
627 } else if (rc != lgbuf_size) {
629 "write of file %s for %d bytes returned %d.\n",
630 filename, lgbuf_size, rc);
634 /* wait for task2 to get its lock. */
635 MPI_Recv(&temp1, 1, MPI_INT, 2, 1, MPI_COMM_WORLD,
638 /* Tell task2 it's ok to release its GR(gid=1) lock. */
639 MPI_Send(&gid, 1, MPI_INT, 2, 1, MPI_COMM_WORLD);
641 /* wait a really long time. */
642 sleep(180 * WAIT_TIME);
644 /* PR task will tell us when it completes */
645 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
648 * Make sure the PR task is successful and doesn't hang.
650 * XXX - To test properly we need to make sure the read
651 * gets queued before task2's group lock request.
652 * You may need to increase lgbuf_size.
654 iter = MAX_WAIT_TRIES;
658 FAIL("PR task is hung !\n");
662 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
670 * task0 attempts GR(gid=1) -- granted
671 * task1 attempts PR on non-blocking fd -> should return -EWOULDBLOCK
672 * task2 attempts PW on non-blocking fd -> should return -EWOULDBLOCK
673 * task3 attempts GR(gid=2) on non-blocking fd -> should return -EWOULDBLOCK
675 void grouplock_nonblock_test(char *filename, int fd)
677 MPI_Request req1, req2, req3;
678 int iter, flag1, flag2, flag3, temp1, temp2, temp3;
682 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
684 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
685 filename, errno, strerror(errno));
690 rc = fcntl(fd, F_SETFL, O_NONBLOCK);
692 sprintf(errmsg, "fcntl(O_NONBLOCK) failed: (%d) %s.\n",
693 errno, strerror(errno));
697 MPI_Barrier(MPI_COMM_WORLD);
701 rc = read(fd, buf, sizeof(buf));
702 if ((rc != -1) || (errno != EWOULDBLOCK))
703 FAIL("PR lock succeeded while incompatible GROUP LOCK (gid=1) is still held\n");
705 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
708 rc = write(fd, buf, sizeof(buf));
709 if ((rc != -1) || (errno != EWOULDBLOCK))
710 FAIL("PW lock succeeded while incompatible GROUP LOCK (gid=1) is still held\n");
712 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
716 rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid);
717 if ((rc != -1) || (errno != EWOULDBLOCK))
718 FAIL("GROUP_LOCK (gid=2) succeeded while incompatible GROUP LOCK (gid=1) is still held.\n");
720 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
723 /* reading task will tell us when it completes */
724 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
725 /* writing task will tell us when it completes */
726 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
727 /* 2nd locking task will tell us when it completes */
728 MPI_Irecv(&temp3, 1, MPI_INT, 3, 1, MPI_COMM_WORLD, &req3);
730 iter = MAX_WAIT_TRIES;
734 FAIL("non-blocking tasks are not progressing\n");
737 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
738 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
739 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
740 } while (!(flag1 && flag2 && flag3));
742 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
743 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s",
751 /* Just test some error paths with invalid requests */
752 void grouplock_errorstest(char *filename, int fd)
756 MPI_Barrier(MPI_COMM_WORLD);
760 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
762 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
763 filename, errno, strerror(errno));
767 /* second group lock on same fd, same gid */
768 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
769 if (errno != EINVAL) {
771 "Double GROUP lock failed with errno %d instead of EINVAL\n",
776 FAIL("Taking second GROUP lock on same fd succeed\n");
779 /* second group lock on same fd, different gid */
780 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid + 1)) == -1) {
781 if (errno != EINVAL) {
783 "Double GROUP lock with different gid failed with errno %d instead of EINVAL\n",
788 FAIL("Taking second GROUP lock on same fd, with different gid, succeeded.\n");
791 /* GROUP unlock with wrong gid */
792 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid + 1)) == -1) {
793 if (errno != EINVAL) {
795 "GROUP_UNLOCK with wrong gid failed with errno %d instead of EINVAL\n",
800 FAIL("GROUP unlock with wrong gid succeed\n");
803 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
805 "ioctl GROUP_UNLOCK of file %s returned %d.",
812 /* unlock of never locked fd */
813 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
814 if (errno != EINVAL) {
816 "GROUP_UNLOCK on never locked fd failed with errno %d instead of EINVAL.\n",
821 FAIL("GROUP unlock on never locked fd succeed\n");
827 void grouplock_file(char *name, int subtest)
830 int flags = O_CREAT | O_RDWR | O_SYNC | O_TRUNC;
833 sprintf(filename, "%s/%s", testdir, name);
835 if ((fd = open(filename, flags, mode)) == -1) {
836 sprintf(errmsg, "open of file %s: (%d) %s.\n",
837 filename, errno, strerror(errno));
841 MPI_Barrier(MPI_COMM_WORLD);
845 grouplock_test1(filename, fd, READ, IOCTL);
848 grouplock_test1(filename, fd, READ, CLOSE);
851 grouplock_test1(filename, fd, WRITE, IOCTL);
854 grouplock_test1(filename, fd, WRITE, CLOSE);
857 grouplock_test2(filename, fd, READ, IOCTL);
860 grouplock_test2(filename, fd, READ, CLOSE);
863 grouplock_test2(filename, fd, WRITE, IOCTL);
866 grouplock_test2(filename, fd, WRITE, CLOSE);
869 grouplock_nonblock_test(filename, fd);
872 grouplock_errorstest(filename, fd);
875 grouplock_test3(filename, fd);
878 grouplock_test4(filename, fd);
881 sprintf(errmsg, "wrong subtest number %d (should be <= %d)",
882 subtest, LPGL_TEST_ITEMS);
891 MPI_Barrier(MPI_COMM_WORLD);
894 void parallel_grouplock(void)
900 sprintf(teststr, "subtest %d", only_test);
902 grouplock_file("parallel_grouplock", only_test);
905 for (i = 1; i <= LPGL_TEST_ITEMS; i++) {
906 sprintf(teststr, "subtest %d", i);
908 grouplock_file("parallel_grouplock", i);
914 void usage(char *proc)
919 printf("Usage: %s [-h] -d <testdir> [-n <num>]\n", proc);
920 printf(" [-t <num>] [-v] [-V #] [-g]\n");
921 printf("\t-h: prints this help message\n");
922 printf("\t-d: the directory in which the tests will run\n");
923 printf("\t-n: repeat test # times\n");
924 printf("\t-t: run a particular test #\n");
925 printf("\t-v: increase the verbositly level by 1\n");
926 printf("\t-V: select a specific verbosity level\n");
927 printf("\t-g: debug mode\n");
936 int main(int argc, char *argv[])
938 int i, iterations = 1, c;
944 * Check for -h parameter before MPI_Init so the binary can be
945 * called directly, without, for instance, mpirun
947 for (i = 1; i < argc; ++i) {
948 if (!strcmp(argv[i], "-h") || !strcmp(argv[i], "--help"))
952 MPI_Init(&argc, &argv);
953 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
954 MPI_Comm_size(MPI_COMM_WORLD, &size);
956 /* Parse command line options */
958 c = getopt(argc, argv, "d:ghn:t:vV:");
973 iterations = atoi(optarg);
976 only_test = atoi(optarg);
982 verbose = atoi(optarg);
988 printf("%s is running with %d task(es) %s\n",
989 argv[0], size, debug ? "in DEBUG mode" : "\b\b");
991 if (size < MIN_GLHOST) {
993 "Error: %d tasks run, but should be at least %d tasks to run the test!\n",
995 MPI_Abort(MPI_COMM_WORLD, 2);
998 if (!testdir && rank == 0) {
1000 "Please specify a test directory! (\"%s -h\" for help)\n",
1002 MPI_Abort(MPI_COMM_WORLD, 2);
1007 for (i = 0; i < iterations; ++i) {
1009 printf("%s: Running test #%s(iter %d)\n",
1010 timestamp(), argv[0], i);
1012 parallel_grouplock();
1013 MPI_Barrier(MPI_COMM_WORLD);
1017 printf("%s: All tests passed!\n", timestamp());