4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
27 * This file is part of Lustre, http://www.lustre.org/
28 * Lustre is a trademark of Sun Microsystems, Inc.
30 * lustre/tests/parallel_grouplock.c
32 * Author: You Feng <youfeng@clusterfs.com>
40 #include <sys/types.h>
41 #include <asm/types.h>
44 #include <sys/ioctl.h>
50 #define LPGL_BUF_LEN 8192
51 #define LPGL_TEST_ITEMS 12
55 #define MAX_WAIT_TRIES 10
56 #define WAIT_TIME 1 /* secs */
57 #define ONE_MB 1048576 /* 1 MB */
58 #define MIN_LGBUF_SIZE 536870912 /* 512 MB */
59 #define MAX_LGBUF_SIZE 536870912 /* 512 MB */
60 // #define MAX_LGBUF_SIZE 1073741824 /* 1 GB */
73 char buf[LPGL_BUF_LEN];
76 char filename[MAX_FILENAME_LEN];
77 char errmsg[MAX_FILENAME_LEN+20];
86 lgbuf_size = MAX_LGBUF_SIZE;
87 for (; lgbuf_size >= MIN_LGBUF_SIZE; lgbuf_size -= ONE_MB)
88 if ((lgbuf = (char *)malloc(lgbuf_size)) != NULL)
91 FAIL("malloc of large buffer failed.\n");
99 rc = read(fd, buf, sizeof(buf));
101 pos = lseek(fd, 0, SEEK_CUR);
102 sprintf(errmsg, "read of file %s at pos %d for %zu bytes "
103 "returned %d: (%d) %s.\n",
104 filename, pos, sizeof(buf), rc, errno, strerror(errno));
106 } else if (rc != sizeof(buf)) {
107 pos = lseek(fd, 0, SEEK_CUR);
108 sprintf(errmsg, "read of file %s at pos %d for %zu bytes "
110 filename, pos, sizeof(buf), rc);
116 write_buf(int fd, int index)
118 int pos = index * sizeof(buf);
121 memset(buf, index, sizeof(buf));
122 lseek(fd, pos, SEEK_SET);
123 rc = write(fd, buf, sizeof(buf));
125 sprintf(errmsg, "write of file %s at pos %d for %zu bytes "
126 "returned %d: (%d) %s.\n",
127 filename, pos, sizeof(buf), rc, errno, strerror(errno));
129 } else if (rc != sizeof(buf)) {
130 sprintf(errmsg, "write of file %s at pos %d for %zu bytes "
132 filename, pos, sizeof(buf), rc);
138 * task0 attempts GR(gid=1) -- granted immediately
139 * task1 attempts PR|PW -- blocked, goes on waiting list
140 * task2 attempts GR(gid=1) -> should be granted
141 * task2 writes to file and releases GR(gid=1)
142 * task0 waits for task2 to complete its processing
143 * task0 writes to file and releases GR(gid=1)
144 * task1 PR|PW should be granted and reads the file
146 void grouplock_test1(char *filename, int fd, int blocking_op, int unlock_op)
148 MPI_Request req1, req2;
149 int iter, flag1, flag2, temp1, temp2;
153 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
155 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
156 filename, errno, strerror(errno));
161 MPI_Barrier(MPI_COMM_WORLD);
165 if (blocking_op == WRITE) {
167 lseek(fd, 0, SEEK_SET);
170 for (i = 0; i <= 2; i++)
173 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
176 /* Wait for task1 to progress. This could be racey. */
179 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
181 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
182 filename, errno, strerror(errno));
188 if (unlock_op == CLOSE)
191 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
195 "%s release GROUP_LOCK of file %s: (%d) %s.\n",
196 (unlock_op == CLOSE) ? "close" : "ioctl",
197 filename, errno, strerror(errno));
200 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
203 /* PR|PW task will tell us when it completes */
204 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
205 /* 2nd locking task will tell us when it completes */
206 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
208 /* Wait for task2 to complete. */
209 iter = MAX_WAIT_TRIES;
213 FAIL("2nd locking task is not progressing\n");
218 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
220 FAIL("PR|PW task progressed even though GROUP "
224 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
227 /* Make sure task1 is still waiting. */
228 iter = MAX_WAIT_TRIES;
232 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
234 FAIL("PR|PW task progressed even though "
235 "GROUP lock is held\n");
241 /* Now we need to release the lock */
242 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
244 "ioctl GROUP_UNLOCK of file %s: (%d) %s.\n",
245 filename, errno, strerror(errno));
249 /* Wait for task1 to complete. */
250 iter = MAX_WAIT_TRIES;
254 FAIL("PR|PW task is not progressing even "
255 "though GROUP lock was released\n");
259 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
267 * task0 attempts GR(gid=1) -- granted immediately
268 * task1 attempts GR(gid=2) -- blocked
269 * task2 attempts PR|PW -- blocked
270 * task3 attempts GR(gid=2) -- blocked
271 * task4 attempts GR(gid=1) -- should be granted
272 * task0,4 writes to file and releases GR(gid=1) --
273 * this allows task2 & 3's GR locks to be granted; task4 remains blocked.
274 * task1 & 3 write to file and release GR(gid=2)
275 * task2 PR|PW should be granted and reads the file.
277 void grouplock_test2(char *filename, int fd, int blocking_op, int unlock_op)
279 int i, iter, rc, gid = 1;
280 int flag1, flag2, flag3, flag4;
281 int temp1, temp2, temp3, temp4;
282 MPI_Request req1, req2, req3, req4;
285 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
287 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
288 filename, errno, strerror(errno));
293 MPI_Barrier(MPI_COMM_WORLD);
297 /* Wait for task2 to issue its read request. */
301 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
303 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
304 filename, errno, strerror(errno));
310 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
312 /* Do not release the locks until task 0 is ready to watch
313 for reading task only */
314 MPI_Recv(&temp1, 1, MPI_INT, 0, 1, MPI_COMM_WORLD,
317 if (unlock_op == CLOSE)
320 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
323 "%s release GROUP_LOCK of file %s: (%d) %s.\n",
324 (unlock_op == CLOSE) ? "close" : "ioctl",
325 filename, errno, strerror(errno));
330 /* Give task1 a chance to request its GR lock. */
333 if (blocking_op == WRITE) {
335 lseek(fd, 0, SEEK_SET);
338 for (i = 0; i <= 3; i++)
341 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
344 /* Give task1 & 3 a chance to queue their GR locks. */
347 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
349 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
350 filename, errno, strerror(errno));
356 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
359 "%s release GROUP_LOCK of file %s: (%d) %s.\n",
360 (unlock_op == CLOSE) ? "close" : "ioctl",
361 filename, errno, strerror(errno));
365 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
368 /* locking tasks will tell us when they complete */
369 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
370 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
371 MPI_Irecv(&temp3, 1, MPI_INT, 3, 1, MPI_COMM_WORLD, &req3);
372 MPI_Irecv(&temp4, 1, MPI_INT, 4, 1, MPI_COMM_WORLD, &req4);
374 /* Make sure all tasks that should be blocked are waiting. */
375 iter = MAX_WAIT_TRIES;
379 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
380 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
381 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
382 if (flag1 || flag3) {
383 FAIL("GROUP (gid=2) task progressed even though"
384 " GROUP (gid=1) lock is held.\n");
387 FAIL("PR|PW task progressed even though "
388 "GROUP (gid=1) lock is still held\n");
392 /* Wait for task4 to signal it has completed. */
393 iter = MAX_WAIT_TRIES;
397 FAIL("2nd task GROUP(gid=1) not progressing\n");
400 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
401 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
402 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
403 MPI_Test(&req4, &flag4, MPI_STATUS_IGNORE);
404 if (flag1 || flag3) {
405 FAIL("GROUP (gid=2) task progressed even though"
406 " GROUP (gid=1) lock is held.\n");
409 FAIL("PR|PW task progressed even though "
410 "GROUP (gid=1) lock is still held\n");
416 /* Now let's release first lock */
417 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
418 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s "
419 "returned %d", filename, rc);
423 /* Wait for task1 & 3 to signal they have their lock. */
424 iter = MAX_WAIT_TRIES;
428 FAIL("GROUP(gid=2) tasks not progressing\n");
431 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
432 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
433 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
435 fprintf(stderr, "task2 %d\n", flag2);
436 FAIL("PR task progressed even though GROUP lock"
437 " was on the queue task\n");
439 } while (!(flag1 && flag3));
441 /* Make sure task2 is still waiting. */
442 iter = MAX_WAIT_TRIES;
446 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
448 FAIL("PR task progressed even though GR(gid=2) "
449 "lock was active.\n");
453 /* Tell task1 & 3 to release their GR(gid=2) lock. */
454 MPI_Send(&gid, 1, MPI_INT, 1, 1, MPI_COMM_WORLD);
455 MPI_Send(&gid, 1, MPI_INT, 3, 1, MPI_COMM_WORLD);
457 /* Wait for task2 (PR) to complete. */
458 iter = MAX_WAIT_TRIES;
462 FAIL("reading task is not progressing even "
463 "though GROUP locks are released\n");
467 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
474 * Tests a bug that once existed in the group lock code;
475 * i.e. that a GR lock request on a O_NONBLOCK fd could fail even though
476 * there is no blocking GROUP lock ahead of it on the waitq.
478 * task0 starts a large write (PW). this test could be racey if this
479 * write finishes too quickly.
480 * task1 attempts GR(gid=1) -- blocked
481 * task2 attempts GR(gid=2) with a O_NONBLOCK fs. should not fail.
483 void grouplock_test3(char *filename, int fd)
485 MPI_Request req1, req2;
486 int iter, flag1, flag2, temp1, temp2;
491 } else if (rank == 2) {
492 rc = fcntl(fd, F_SETFL, O_NONBLOCK);
494 sprintf(errmsg, "fcntl(O_NONBLOCK) failed: (%d) %s.\n",
495 errno, strerror(errno));
500 MPI_Barrier(MPI_COMM_WORLD);
508 /* Racey, we have to sleep just long enough for
509 * task0's write to start. */
512 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
514 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
515 filename, errno, strerror(errno));
519 /* tell task0 we have the lock. */
520 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
522 /* the close of fd will release the lock. */
525 rc = write(fd, lgbuf, lgbuf_size);
527 sprintf(errmsg, "write of file %s for %d bytes "
528 "returned %d: (%d) %s.\n",
529 filename, lgbuf_size,
530 rc, errno, strerror(errno));
532 } else if (rc != lgbuf_size) {
533 sprintf(errmsg, "write of file %s for %d bytes "
535 filename, lgbuf_size, rc);
539 /* GR tasks will tell us when they complete */
540 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
541 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
543 /* Wait for task1 & 2 to complete. */
544 iter = MAX_WAIT_TRIES;
548 FAIL("GR(gid=1) tasks are not progressing even "
549 "no conflicting locks exist.\n");
553 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
554 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
555 } while (!(flag1 && flag2));
561 * Tests a bug that once existed in the group lock code;
562 * i.e. extent locks without O_NONBLOCK that go on the waitq before a group
563 * lock request came in and was granted. The extent lock would timed out and
566 * task0 starts a large write (PW). this test could be racey if this
567 * write finishes too quickly.
568 * task1 attempts PR -- blocked
569 * task2 attempts GR(gid=1) -- blocked
570 * task0 completes write
571 * task1 should wakeup and complete its read
572 * task2 should wakeup and after task1 complete.
574 void grouplock_test4(char *filename, int fd)
577 int iter, flag1, temp1;
583 MPI_Barrier(MPI_COMM_WORLD);
587 /* Racey, we have to sleep just long enough for
588 * task0's write to start. */
589 MPI_Recv(&temp1, 1, MPI_INT, 0, 1, MPI_COMM_WORLD,
592 /* tell task2 to go. */
593 MPI_Send(&gid, 1, MPI_INT, 2, 1, MPI_COMM_WORLD);
597 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
600 /* Give task0 & 1 a chance to start. */
601 MPI_Recv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD,
603 sleep(2 * WAIT_TIME);
605 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
607 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
608 filename, errno, strerror(errno));
612 /* tell task0 we have the lock. */
613 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
615 /* Do not release the locks until task 0 tells us too.
616 for reading task only */
617 MPI_Recv(&temp1, 1, MPI_INT, 0, 1, MPI_COMM_WORLD,
620 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
623 "ioctl GROUP_UNLOCK of file %s: (%d) %s.\n",
624 filename, errno, strerror(errno));
629 /* tell task1 to go to avoid race */
630 MPI_Send(&gid, 1, MPI_INT, 1, 1, MPI_COMM_WORLD);
631 rc = write(fd, lgbuf, lgbuf_size);
633 sprintf(errmsg, "write of file %s for %d bytes "
634 "returned %d: (%d) %s.\n",
635 filename, lgbuf_size,
636 rc, errno, strerror(errno));
638 } else if (rc != lgbuf_size) {
639 sprintf(errmsg, "write of file %s for %d bytes "
641 filename, lgbuf_size, rc);
645 /* wait for task2 to get its lock. */
646 MPI_Recv(&temp1, 1, MPI_INT, 2, 1, MPI_COMM_WORLD,
649 /* Tell task2 it's ok to release its GR(gid=1) lock. */
650 MPI_Send(&gid, 1, MPI_INT, 2, 1, MPI_COMM_WORLD);
652 /* wait a really long time. */
653 sleep(180 * WAIT_TIME);
655 /* PR task will tell us when it completes */
656 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
658 /* Make sure the PR task is successful and doesn't hang.
660 * XXX - To test properly we need to make sure the read
661 * gets queued before task2's group lock request.
662 * You may need to increase lgbuf_size.
664 iter = MAX_WAIT_TRIES;
668 FAIL("PR task is hung !\n");
672 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
680 * task0 attempts GR(gid=1) -- granted
681 * task1 attempts PR on non-blocking fd -> should return -EWOULDBLOCK
682 * task2 attempts PW on non-blocking fd -> should return -EWOULDBLOCK
683 * task3 attempts GR(gid=2) on non-blocking fd -> should return -EWOULDBLOCK
685 void grouplock_nonblock_test(char *filename, int fd)
687 MPI_Request req1, req2, req3;
688 int iter, flag1, flag2, flag3, temp1, temp2, temp3;
692 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
694 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
695 filename, errno, strerror(errno));
700 rc = fcntl(fd, F_SETFL, O_NONBLOCK);
702 sprintf(errmsg, "fcntl(O_NONBLOCK) failed: (%d) %s.\n",
703 errno, strerror(errno));
707 MPI_Barrier(MPI_COMM_WORLD);
711 rc = read(fd, buf, sizeof(buf));
712 if ((rc != -1) || (errno != EWOULDBLOCK)) {
713 FAIL("PR lock succeeded while incompatible "
714 "GROUP LOCK (gid=1) is still held\n");
717 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
720 rc = write(fd, buf, sizeof(buf));
721 if ((rc != -1) || (errno != EWOULDBLOCK)) {
722 FAIL("PW lock succeeded while incompatible "
723 "GROUP LOCK (gid=1) is still held\n");
726 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
730 rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid);
731 if ((rc != -1) || (errno != EWOULDBLOCK)) {
732 FAIL("GROUP_LOCK (gid=2) succeeded while incompatible "
733 "GROUP LOCK (gid=1) is still held.\n");
736 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
739 /* reading task will tell us when it completes */
740 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
741 /* writing task will tell us when it completes */
742 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
743 /* 2nd locking task will tell us when it completes */
744 MPI_Irecv(&temp3, 1, MPI_INT, 3, 1, MPI_COMM_WORLD, &req3);
746 iter = MAX_WAIT_TRIES;
750 FAIL("non-blocking tasks are not progressing\n");
753 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
754 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
755 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
756 } while (!(flag1 && flag2 && flag3));
758 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
759 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s",
767 /* Just test some error paths with invalid requests */
768 void grouplock_errorstest(char *filename, int fd)
772 MPI_Barrier(MPI_COMM_WORLD);
776 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
778 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
779 filename, errno, strerror(errno));
783 /* second group lock on same fd, same gid */
784 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
785 if (errno != EINVAL) {
786 sprintf(errmsg, "Double GROUP lock failed "
787 "with errno %d instead of EINVAL\n",
792 FAIL("Taking second GROUP lock on same fd succeed\n");
795 /* second group lock on same fd, different gid */
796 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid + 1)) == -1) {
797 if (errno != EINVAL) {
798 sprintf(errmsg, "Double GROUP lock with "
799 "different gid failed with errno %d "
800 "instead of EINVAL\n", errno);
804 FAIL("Taking second GROUP lock on same fd, with "
805 "different gid, succeeded.\n");
808 /* GROUP unlock with wrong gid */
809 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid + 1)) == -1) {
810 if (errno != EINVAL) {
811 sprintf(errmsg, "GROUP_UNLOCK with wrong gid "
812 "failed with errno %d instead of "
817 FAIL("GROUP unlock with wrong gid succeed\n");
820 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
821 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s "
822 "returned %d.", filename, rc);
828 /* unlock of never locked fd */
829 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
830 if (errno != EINVAL) {
831 sprintf(errmsg, "GROUP_UNLOCK on never locked "
832 "fd failed with errno %d instead of "
837 FAIL("GROUP unlock on never locked fd succeed\n");
843 void grouplock_file(char *name, int subtest)
846 int flags = O_CREAT|O_RDWR|O_SYNC|O_TRUNC;
849 sprintf(filename, "%s/%s", testdir, name);
851 if ((fd = open(filename, flags, mode)) == -1) {
852 sprintf(errmsg, "open of file %s: (%d) %s.\n",
853 filename, errno, strerror(errno));
857 MPI_Barrier(MPI_COMM_WORLD);
861 grouplock_test1(filename, fd, READ, IOCTL);
864 grouplock_test1(filename, fd, READ, CLOSE);
867 grouplock_test1(filename, fd, WRITE, IOCTL);
870 grouplock_test1(filename, fd, WRITE, CLOSE);
873 grouplock_test2(filename, fd, READ, IOCTL);
876 grouplock_test2(filename, fd, READ, CLOSE);
879 grouplock_test2(filename, fd, WRITE, IOCTL);
882 grouplock_test2(filename, fd, WRITE, CLOSE);
885 grouplock_nonblock_test(filename, fd);
888 grouplock_errorstest(filename, fd);
891 grouplock_test3(filename, fd);
894 grouplock_test4(filename, fd);
897 sprintf(errmsg, "wrong subtest number %d (should be <= %d)",
898 subtest, LPGL_TEST_ITEMS);
907 MPI_Barrier(MPI_COMM_WORLD);
910 void parallel_grouplock(void)
916 sprintf(teststr, "subtest %d", only_test);
918 grouplock_file("parallel_grouplock", only_test);
921 for (i = 1; i <= LPGL_TEST_ITEMS; i++) {
922 sprintf(teststr, "subtest %d", i);
924 grouplock_file("parallel_grouplock", i);
930 void usage(char *proc)
935 printf("Usage: %s [-h] -d <testdir> [-n <num>]\n", proc);
936 printf(" [-t <num>] [-v] [-V #] [-g]\n");
937 printf("\t-h: prints this help message\n");
938 printf("\t-d: the directory in which the tests will run\n");
939 printf("\t-n: repeat test # times\n");
940 printf("\t-t: run a particular test #\n");
941 printf("\t-v: increase the verbositly level by 1\n");
942 printf("\t-V: select a specific verbosity level\n");
943 printf("\t-g: debug mode\n");
947 if (i) MPI_Finalize();
951 int main(int argc, char *argv[])
953 int i, iterations = 1, c;
958 /* Check for -h parameter before MPI_Init so the binary can be
959 called directly, without, for instance, mpirun */
960 for (i = 1; i < argc; ++i) {
961 if (!strcmp(argv[i], "-h") || !strcmp(argv[i], "--help"))
965 MPI_Init(&argc, &argv);
966 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
967 MPI_Comm_size(MPI_COMM_WORLD, &size);
969 /* Parse command line options */
971 c = getopt(argc, argv, "d:ghn:t:vV:");
986 iterations = atoi(optarg);
989 only_test = atoi(optarg);
995 verbose = atoi(optarg);
1001 printf("%s is running with %d task(es) %s\n",
1002 argv[0], size, debug ? "in DEBUG mode" : "\b\b");
1004 if (size < MIN_GLHOST) {
1005 fprintf(stderr, "Error: "
1006 "%d tasks run, but should be at least %d tasks to run "
1007 "the test!\n", size, MIN_GLHOST);
1008 MPI_Abort(MPI_COMM_WORLD, 2);
1011 if (testdir == NULL && rank == 0) {
1012 fprintf(stderr, "Please specify a test directory! "
1013 "(\"%s -h\" for help)\n",
1015 MPI_Abort(MPI_COMM_WORLD, 2);
1020 for (i = 0; i < iterations; ++i) {
1022 printf("%s: Running test #%s(iter %d)\n",
1023 timestamp(), argv[0], i);
1025 parallel_grouplock();
1026 MPI_Barrier(MPI_COMM_WORLD);
1030 printf("%s: All tests passed!\n", timestamp());