4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
27 * This file is part of Lustre, http://www.lustre.org/
28 * Lustre is a trademark of Sun Microsystems, Inc.
30 * lustre/tests/parallel_grouplock.c
32 * Author: You Feng <youfeng@clusterfs.com>
40 #include <sys/types.h>
41 #include <asm/types.h>
44 #include <sys/ioctl.h>
48 #include <lustre/lustre_user.h>
51 #define LPGL_BUF_LEN 8192
52 #define LPGL_TEST_ITEMS 12
56 #define MAX_WAIT_TRIES 10
57 #define WAIT_TIME 1 /* secs */
58 #define ONE_MB 1048576 /* 1 MB */
59 #define MIN_LGBUF_SIZE 536870912 /* 512 MB */
60 #define MAX_LGBUF_SIZE 536870912 /* 512 MB */
61 // #define MAX_LGBUF_SIZE 1073741824 /* 1 GB */
74 char buf[LPGL_BUF_LEN];
77 char filename[MAX_FILENAME_LEN];
78 char errmsg[MAX_FILENAME_LEN+20];
87 lgbuf_size = MAX_LGBUF_SIZE;
88 for (; lgbuf_size >= MIN_LGBUF_SIZE; lgbuf_size -= ONE_MB)
89 if ((lgbuf = (char *)malloc(lgbuf_size)) != NULL)
92 FAIL("malloc of large buffer failed.\n");
100 rc = read(fd, buf, sizeof(buf));
102 pos = lseek(fd, 0, SEEK_CUR);
103 sprintf(errmsg, "read of file %s at pos %d for %zu bytes "
104 "returned %d: (%d) %s.\n",
105 filename, pos, sizeof(buf), rc, errno, strerror(errno));
107 } else if (rc != sizeof(buf)) {
108 pos = lseek(fd, 0, SEEK_CUR);
109 sprintf(errmsg, "read of file %s at pos %d for %zu bytes "
111 filename, pos, sizeof(buf), rc);
117 write_buf(int fd, int index)
119 int pos = index * sizeof(buf);
122 memset(buf, index, sizeof(buf));
123 lseek(fd, pos, SEEK_SET);
124 rc = write(fd, buf, sizeof(buf));
126 sprintf(errmsg, "write of file %s at pos %d for %zu bytes "
127 "returned %d: (%d) %s.\n",
128 filename, pos, sizeof(buf), rc, errno, strerror(errno));
130 } else if (rc != sizeof(buf)) {
131 sprintf(errmsg, "write of file %s at pos %d for %zu bytes "
133 filename, pos, sizeof(buf), rc);
139 * task0 attempts GR(gid=1) -- granted immediately
140 * task1 attempts PR|PW -- blocked, goes on waiting list
141 * task2 attempts GR(gid=1) -> should be granted
142 * task2 writes to file and releases GR(gid=1)
143 * task0 waits for task2 to complete its processing
144 * task0 writes to file and releases GR(gid=1)
145 * task1 PR|PW should be granted and reads the file
147 void grouplock_test1(char *filename, int fd, int blocking_op, int unlock_op)
149 MPI_Request req1, req2;
150 int iter, flag1, flag2, temp1, temp2;
154 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
156 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
157 filename, errno, strerror(errno));
162 MPI_Barrier(MPI_COMM_WORLD);
166 if (blocking_op == WRITE) {
168 lseek(fd, 0, SEEK_SET);
171 for (i = 0; i <= 2; i++)
174 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
177 /* Wait for task1 to progress. This could be racey. */
180 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
182 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
183 filename, errno, strerror(errno));
189 if (unlock_op == CLOSE)
192 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
196 "%s release GROUP_LOCK of file %s: (%d) %s.\n",
197 (unlock_op == CLOSE) ? "close" : "ioctl",
198 filename, errno, strerror(errno));
201 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
204 /* PR|PW task will tell us when it completes */
205 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
206 /* 2nd locking task will tell us when it completes */
207 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
209 /* Wait for task2 to complete. */
210 iter = MAX_WAIT_TRIES;
214 FAIL("2nd locking task is not progressing\n");
219 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
221 FAIL("PR|PW task progressed even though GROUP "
225 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
228 /* Make sure task1 is still waiting. */
229 iter = MAX_WAIT_TRIES;
233 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
235 FAIL("PR|PW task progressed even though "
236 "GROUP lock is held\n");
242 /* Now we need to release the lock */
243 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
245 "ioctl GROUP_UNLOCK of file %s: (%d) %s.\n",
246 filename, errno, strerror(errno));
250 /* Wait for task1 to complete. */
251 iter = MAX_WAIT_TRIES;
255 FAIL("PR|PW task is not progressing even "
256 "though GROUP lock was released\n");
260 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
268 * task0 attempts GR(gid=1) -- granted immediately
269 * task1 attempts GR(gid=2) -- blocked
270 * task2 attempts PR|PW -- blocked
271 * task3 attempts GR(gid=2) -- blocked
272 * task4 attempts GR(gid=1) -- should be granted
273 * task0,4 writes to file and releases GR(gid=1) --
274 * this allows task2 & 3's GR locks to be granted; task4 remains blocked.
275 * task1 & 3 write to file and release GR(gid=2)
276 * task2 PR|PW should be granted and reads the file.
278 void grouplock_test2(char *filename, int fd, int blocking_op, int unlock_op)
280 int i, iter, rc, gid = 1;
281 int flag1, flag2, flag3, flag4;
282 int temp1, temp2, temp3, temp4;
283 MPI_Request req1, req2, req3, req4;
286 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
288 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
289 filename, errno, strerror(errno));
294 MPI_Barrier(MPI_COMM_WORLD);
298 /* Wait for task2 to issue its read request. */
302 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
304 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
305 filename, errno, strerror(errno));
311 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
313 /* Do not release the locks until task 0 is ready to watch
314 for reading task only */
315 MPI_Recv(&temp1, 1, MPI_INT, 0, 1, MPI_COMM_WORLD,
318 if (unlock_op == CLOSE)
321 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
324 "%s release GROUP_LOCK of file %s: (%d) %s.\n",
325 (unlock_op == CLOSE) ? "close" : "ioctl",
326 filename, errno, strerror(errno));
331 /* Give task1 a chance to request its GR lock. */
334 if (blocking_op == WRITE) {
336 lseek(fd, 0, SEEK_SET);
339 for (i = 0; i <= 3; i++)
342 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
345 /* Give task1 & 3 a chance to queue their GR locks. */
348 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
350 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
351 filename, errno, strerror(errno));
357 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
360 "%s release GROUP_LOCK of file %s: (%d) %s.\n",
361 (unlock_op == CLOSE) ? "close" : "ioctl",
362 filename, errno, strerror(errno));
366 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
369 /* locking tasks will tell us when they complete */
370 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
371 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
372 MPI_Irecv(&temp3, 1, MPI_INT, 3, 1, MPI_COMM_WORLD, &req3);
373 MPI_Irecv(&temp4, 1, MPI_INT, 4, 1, MPI_COMM_WORLD, &req4);
375 /* Make sure all tasks that should be blocked are waiting. */
376 iter = MAX_WAIT_TRIES;
380 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
381 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
382 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
383 if (flag1 || flag3) {
384 FAIL("GROUP (gid=2) task progressed even though"
385 " GROUP (gid=1) lock is held.\n");
388 FAIL("PR|PW task progressed even though "
389 "GROUP (gid=1) lock is still held\n");
393 /* Wait for task4 to signal it has completed. */
394 iter = MAX_WAIT_TRIES;
398 FAIL("2nd task GROUP(gid=1) not progressing\n");
401 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
402 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
403 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
404 MPI_Test(&req4, &flag4, MPI_STATUS_IGNORE);
405 if (flag1 || flag3) {
406 FAIL("GROUP (gid=2) task progressed even though"
407 " GROUP (gid=1) lock is held.\n");
410 FAIL("PR|PW task progressed even though "
411 "GROUP (gid=1) lock is still held\n");
417 /* Now let's release first lock */
418 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
419 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s "
420 "returned %d", filename, rc);
424 /* Wait for task1 & 3 to signal they have their lock. */
425 iter = MAX_WAIT_TRIES;
429 FAIL("GROUP(gid=2) tasks not progressing\n");
432 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
433 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
434 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
436 fprintf(stderr, "task2 %d\n", flag2);
437 FAIL("PR task progressed even though GROUP lock"
438 " was on the queue task\n");
440 } while (!(flag1 && flag3));
442 /* Make sure task2 is still waiting. */
443 iter = MAX_WAIT_TRIES;
447 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
449 FAIL("PR task progressed even though GR(gid=2) "
450 "lock was active.\n");
454 /* Tell task1 & 3 to release their GR(gid=2) lock. */
455 MPI_Send(&gid, 1, MPI_INT, 1, 1, MPI_COMM_WORLD);
456 MPI_Send(&gid, 1, MPI_INT, 3, 1, MPI_COMM_WORLD);
458 /* Wait for task2 (PR) to complete. */
459 iter = MAX_WAIT_TRIES;
463 FAIL("reading task is not progressing even "
464 "though GROUP locks are released\n");
468 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
475 * Tests a bug that once existed in the group lock code;
476 * i.e. that a GR lock request on a O_NONBLOCK fd could fail even though
477 * there is no blocking GROUP lock ahead of it on the waitq.
479 * task0 starts a large write (PW). this test could be racey if this
480 * write finishes too quickly.
481 * task1 attempts GR(gid=1) -- blocked
482 * task2 attempts GR(gid=2) with a O_NONBLOCK fs. should not fail.
484 void grouplock_test3(char *filename, int fd)
486 MPI_Request req1, req2;
487 int iter, flag1, flag2, temp1, temp2;
492 } else if (rank == 2) {
493 rc = fcntl(fd, F_SETFL, O_NONBLOCK);
495 sprintf(errmsg, "fcntl(O_NONBLOCK) failed: (%d) %s.\n",
496 errno, strerror(errno));
501 MPI_Barrier(MPI_COMM_WORLD);
509 /* Racey, we have to sleep just long enough for
510 * task0's write to start. */
513 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
515 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
516 filename, errno, strerror(errno));
520 /* tell task0 we have the lock. */
521 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
523 /* the close of fd will release the lock. */
526 rc = write(fd, lgbuf, lgbuf_size);
528 sprintf(errmsg, "write of file %s for %d bytes "
529 "returned %d: (%d) %s.\n",
530 filename, lgbuf_size,
531 rc, errno, strerror(errno));
533 } else if (rc != lgbuf_size) {
534 sprintf(errmsg, "write of file %s for %d bytes "
536 filename, lgbuf_size, rc);
540 /* GR tasks will tell us when they complete */
541 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
542 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
544 /* Wait for task1 & 2 to complete. */
545 iter = MAX_WAIT_TRIES;
549 FAIL("GR(gid=1) tasks are not progressing even "
550 "no conflicting locks exist.\n");
554 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
555 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
556 } while (!(flag1 && flag2));
562 * Tests a bug that once existed in the group lock code;
563 * i.e. extent locks without O_NONBLOCK that go on the waitq before a group
564 * lock request came in and was granted. The extent lock would timed out and
567 * task0 starts a large write (PW). this test could be racey if this
568 * write finishes too quickly.
569 * task1 attempts PR -- blocked
570 * task2 attempts GR(gid=1) -- blocked
571 * task0 completes write
572 * task1 should wakeup and complete its read
573 * task2 should wakeup and after task1 complete.
575 void grouplock_test4(char *filename, int fd)
578 int iter, flag1, temp1;
584 MPI_Barrier(MPI_COMM_WORLD);
588 /* Racey, we have to sleep just long enough for
589 * task0's write to start. */
590 MPI_Recv(&temp1, 1, MPI_INT, 0, 1, MPI_COMM_WORLD,
595 /* tell task2 to go. */
596 MPI_Send(&gid, 1, MPI_INT, 2, 1, MPI_COMM_WORLD);
600 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
603 /* Give task0 & 1 a chance to start. */
604 MPI_Recv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD,
609 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
611 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
612 filename, errno, strerror(errno));
616 /* tell task0 we have the lock. */
617 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
619 /* Do not release the locks until task 0 tells us too.
620 for reading task only */
621 MPI_Recv(&temp1, 1, MPI_INT, 0, 1, MPI_COMM_WORLD,
624 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
627 "ioctl GROUP_UNLOCK of file %s: (%d) %s.\n",
628 filename, errno, strerror(errno));
633 /* tell task1 to go to avoid race */
634 MPI_Send(&gid, 1, MPI_INT, 1, 1, MPI_COMM_WORLD);
635 rc = write(fd, lgbuf, lgbuf_size);
637 sprintf(errmsg, "write of file %s for %d bytes "
638 "returned %d: (%d) %s.\n",
639 filename, lgbuf_size,
640 rc, errno, strerror(errno));
642 } else if (rc != lgbuf_size) {
643 sprintf(errmsg, "write of file %s for %d bytes "
645 filename, lgbuf_size, rc);
649 /* wait for task2 to get its lock. */
650 MPI_Recv(&temp1, 1, MPI_INT, 2, 1, MPI_COMM_WORLD,
653 /* Tell task2 it's ok to release its GR(gid=1) lock. */
654 MPI_Send(&gid, 1, MPI_INT, 2, 1, MPI_COMM_WORLD);
656 /* wait a really long time. */
657 sleep(180 * WAIT_TIME);
659 /* PR task will tell us when it completes */
660 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
662 /* Make sure the PR task is successful and doesn't hang.
664 * XXX - To test properly we need to make sure the read
665 * gets queued before task2's group lock request.
666 * You may need to increase lgbuf_size.
668 iter = MAX_WAIT_TRIES;
672 FAIL("PR task is hung !\n");
676 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
684 * task0 attempts GR(gid=1) -- granted
685 * task1 attempts PR on non-blocking fd -> should return -EWOULDBLOCK
686 * task2 attempts PW on non-blocking fd -> should return -EWOULDBLOCK
687 * task3 attempts GR(gid=2) on non-blocking fd -> should return -EWOULDBLOCK
689 void grouplock_nonblock_test(char *filename, int fd)
691 MPI_Request req1, req2, req3;
692 int iter, flag1, flag2, flag3, temp1, temp2, temp3;
696 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
698 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
699 filename, errno, strerror(errno));
704 rc = fcntl(fd, F_SETFL, O_NONBLOCK);
706 sprintf(errmsg, "fcntl(O_NONBLOCK) failed: (%d) %s.\n",
707 errno, strerror(errno));
711 MPI_Barrier(MPI_COMM_WORLD);
715 rc = read(fd, buf, sizeof(buf));
716 if ((rc != -1) || (errno != EWOULDBLOCK)) {
717 FAIL("PR lock succeeded while incompatible "
718 "GROUP LOCK (gid=1) is still held\n");
721 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
724 rc = write(fd, buf, sizeof(buf));
725 if ((rc != -1) || (errno != EWOULDBLOCK)) {
726 FAIL("PW lock succeeded while incompatible "
727 "GROUP LOCK (gid=1) is still held\n");
730 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
734 rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid);
735 if ((rc != -1) || (errno != EWOULDBLOCK)) {
736 FAIL("GROUP_LOCK (gid=2) succeeded while incompatible "
737 "GROUP LOCK (gid=1) is still held.\n");
740 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
743 /* reading task will tell us when it completes */
744 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
745 /* writing task will tell us when it completes */
746 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
747 /* 2nd locking task will tell us when it completes */
748 MPI_Irecv(&temp3, 1, MPI_INT, 3, 1, MPI_COMM_WORLD, &req3);
750 iter = MAX_WAIT_TRIES;
754 FAIL("non-blocking tasks are not progressing\n");
757 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
758 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
759 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
760 } while (!(flag1 && flag2 && flag3));
762 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
763 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s",
771 /* Just test some error paths with invalid requests */
772 void grouplock_errorstest(char *filename, int fd)
776 MPI_Barrier(MPI_COMM_WORLD);
780 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
782 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
783 filename, errno, strerror(errno));
787 /* second group lock on same fd, same gid */
788 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
789 if (errno != EINVAL) {
790 sprintf(errmsg, "Double GROUP lock failed "
791 "with errno %d instead of EINVAL\n",
796 FAIL("Taking second GROUP lock on same fd succeed\n");
799 /* second group lock on same fd, different gid */
800 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid + 1)) == -1) {
801 if (errno != EINVAL) {
802 sprintf(errmsg, "Double GROUP lock with "
803 "different gid failed with errno %d "
804 "instead of EINVAL\n", errno);
808 FAIL("Taking second GROUP lock on same fd, with "
809 "different gid, succeeded.\n");
812 /* GROUP unlock with wrong gid */
813 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid + 1)) == -1) {
814 if (errno != EINVAL) {
815 sprintf(errmsg, "GROUP_UNLOCK with wrong gid "
816 "failed with errno %d instead of "
821 FAIL("GROUP unlock with wrong gid succeed\n");
824 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
825 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s "
826 "returned %d.", filename, rc);
832 /* unlock of never locked fd */
833 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
834 if (errno != EINVAL) {
835 sprintf(errmsg, "GROUP_UNLOCK on never locked "
836 "fd failed with errno %d instead of "
841 FAIL("GROUP unlock on never locked fd succeed\n");
847 void grouplock_file(char *name, int subtest)
850 int flags = O_CREAT|O_RDWR|O_SYNC|O_TRUNC;
853 sprintf(filename, "%s/%s", testdir, name);
855 if ((fd = open(filename, flags, mode)) == -1) {
856 sprintf(errmsg, "open of file %s: (%d) %s.\n",
857 filename, errno, strerror(errno));
861 MPI_Barrier(MPI_COMM_WORLD);
865 grouplock_test1(filename, fd, READ, IOCTL);
868 grouplock_test1(filename, fd, READ, CLOSE);
871 grouplock_test1(filename, fd, WRITE, IOCTL);
874 grouplock_test1(filename, fd, WRITE, CLOSE);
877 grouplock_test2(filename, fd, READ, IOCTL);
880 grouplock_test2(filename, fd, READ, CLOSE);
883 grouplock_test2(filename, fd, WRITE, IOCTL);
886 grouplock_test2(filename, fd, WRITE, CLOSE);
889 grouplock_nonblock_test(filename, fd);
892 grouplock_errorstest(filename, fd);
895 grouplock_test3(filename, fd);
898 grouplock_test4(filename, fd);
901 sprintf(errmsg, "wrong subtest number %d (should be <= %d)",
902 subtest, LPGL_TEST_ITEMS);
911 MPI_Barrier(MPI_COMM_WORLD);
914 void parallel_grouplock(void)
920 sprintf(teststr, "subtest %d", only_test);
922 grouplock_file("parallel_grouplock", only_test);
925 for (i = 1; i <= LPGL_TEST_ITEMS; i++) {
926 sprintf(teststr, "subtest %d", i);
928 grouplock_file("parallel_grouplock", i);
934 void usage(char *proc)
939 printf("Usage: %s [-h] -d <testdir> [-n <num>]\n", proc);
940 printf(" [-t <num>] [-v] [-V #] [-g]\n");
941 printf("\t-h: prints this help message\n");
942 printf("\t-d: the directory in which the tests will run\n");
943 printf("\t-n: repeat test # times\n");
944 printf("\t-t: run a particular test #\n");
945 printf("\t-v: increase the verbositly level by 1\n");
946 printf("\t-V: select a specific verbosity level\n");
947 printf("\t-g: debug mode\n");
951 if (i) MPI_Finalize();
955 int main(int argc, char *argv[])
957 int i, iterations = 1, c;
962 /* Check for -h parameter before MPI_Init so the binary can be
963 called directly, without, for instance, mpirun */
964 for (i = 1; i < argc; ++i) {
965 if (!strcmp(argv[i], "-h") || !strcmp(argv[i], "--help"))
969 MPI_Init(&argc, &argv);
970 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
971 MPI_Comm_size(MPI_COMM_WORLD, &size);
973 /* Parse command line options */
975 c = getopt(argc, argv, "d:ghn:t:vV:");
990 iterations = atoi(optarg);
993 only_test = atoi(optarg);
999 verbose = atoi(optarg);
1005 printf("%s is running with %d task(es) %s\n",
1006 argv[0], size, debug ? "in DEBUG mode" : "\b\b");
1008 if (size < MIN_GLHOST) {
1009 fprintf(stderr, "Error: "
1010 "%d tasks run, but should be at least %d tasks to run "
1011 "the test!\n", size, MIN_GLHOST);
1012 MPI_Abort(MPI_COMM_WORLD, 2);
1015 if (testdir == NULL && rank == 0) {
1016 fprintf(stderr, "Please specify a test directory! "
1017 "(\"%s -h\" for help)\n",
1019 MPI_Abort(MPI_COMM_WORLD, 2);
1024 for (i = 0; i < iterations; ++i) {
1026 printf("%s: Running test #%s(iter %d)\n",
1027 timestamp(), argv[0], i);
1029 parallel_grouplock();
1030 MPI_Barrier(MPI_COMM_WORLD);
1034 printf("%s: All tests passed!\n", timestamp());