4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
34 * lustre/tests/parallel_grouplock.c
36 * Author: You Feng <youfeng@clusterfs.com>
44 #include <sys/types.h>
45 #include <asm/types.h>
48 #include <sys/ioctl.h>
52 #include <lustre/lustre_user.h>
55 #define LPGL_BUF_LEN 8192
56 #define LPGL_TEST_ITEMS 12
60 #define MAX_WAIT_TRIES 10
61 #define WAIT_TIME 1 /* secs */
62 #define ONE_MB 1048576 /* 1 MB */
63 #define MIN_LGBUF_SIZE 536870912 /* 512 MB */
64 #define MAX_LGBUF_SIZE 536870912 /* 512 MB */
65 // #define MAX_LGBUF_SIZE 1073741824 /* 1 GB */
78 char buf[LPGL_BUF_LEN];
81 char filename[MAX_FILENAME_LEN];
82 char errmsg[MAX_FILENAME_LEN+20];
91 lgbuf_size = MAX_LGBUF_SIZE;
92 for (; lgbuf_size >= MIN_LGBUF_SIZE; lgbuf_size -= ONE_MB)
93 if ((lgbuf = (char *)malloc(lgbuf_size)) != NULL)
96 FAIL("malloc of large buffer failed.\n");
104 rc = read(fd, buf, sizeof(buf));
106 pos = lseek(fd, 0, SEEK_CUR);
107 sprintf(errmsg, "read of file %s at pos %d for %zu bytes "
108 "returned %d: (%d) %s.\n",
109 filename, pos, sizeof(buf), rc, errno, strerror(errno));
111 } else if (rc != sizeof(buf)) {
112 pos = lseek(fd, 0, SEEK_CUR);
113 sprintf(errmsg, "read of file %s at pos %d for %zu bytes "
115 filename, pos, sizeof(buf), rc);
121 write_buf(int fd, int index)
123 int pos = index * sizeof(buf);
126 memset(buf, index, sizeof(buf));
127 lseek(fd, pos, SEEK_SET);
128 rc = write(fd, buf, sizeof(buf));
130 sprintf(errmsg, "write of file %s at pos %d for %zu bytes "
131 "returned %d: (%d) %s.\n",
132 filename, pos, sizeof(buf), rc, errno, strerror(errno));
134 } else if (rc != sizeof(buf)) {
135 sprintf(errmsg, "write of file %s at pos %d for %zu bytes "
137 filename, pos, sizeof(buf), rc);
143 * task0 attempts GR(gid=1) -- granted immediately
144 * task1 attempts PR|PW -- blocked, goes on waiting list
145 * task2 attempts GR(gid=1) -> should be granted
146 * task2 writes to file and releases GR(gid=1)
147 * task0 waits for task2 to complete its processing
148 * task0 writes to file and releases GR(gid=1)
149 * task1 PR|PW should be granted and reads the file
151 void grouplock_test1(char *filename, int fd, int blocking_op, int unlock_op)
153 MPI_Request req1, req2;
154 int iter, flag1, flag2, temp1, temp2;
158 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
160 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
161 filename, errno, strerror(errno));
166 MPI_Barrier(MPI_COMM_WORLD);
170 if (blocking_op == WRITE) {
172 lseek(fd, 0, SEEK_SET);
175 for (i = 0; i <= 2; i++)
178 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
181 /* Wait for task1 to progress. This could be racey. */
184 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
186 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
187 filename, errno, strerror(errno));
193 if (unlock_op == CLOSE)
196 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
200 "%s release GROUP_LOCK of file %s: (%d) %s.\n",
201 (unlock_op == CLOSE) ? "close" : "ioctl",
202 filename, errno, strerror(errno));
205 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
208 /* PR|PW task will tell us when it completes */
209 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
210 /* 2nd locking task will tell us when it completes */
211 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
213 /* Wait for task2 to complete. */
214 iter = MAX_WAIT_TRIES;
218 FAIL("2nd locking task is not progressing\n");
223 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
225 FAIL("PR|PW task progressed even though GROUP "
229 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
232 /* Make sure task1 is still waiting. */
233 iter = MAX_WAIT_TRIES;
237 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
239 FAIL("PR|PW task progressed even though "
240 "GROUP lock is held\n");
246 /* Now we need to release the lock */
247 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
249 "ioctl GROUP_UNLOCK of file %s: (%d) %s.\n",
250 filename, errno, strerror(errno));
254 /* Wait for task1 to complete. */
255 iter = MAX_WAIT_TRIES;
259 FAIL("PR|PW task is not progressing even "
260 "though GROUP lock was released\n");
264 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
272 * task0 attempts GR(gid=1) -- granted immediately
273 * task1 attempts GR(gid=2) -- blocked
274 * task2 attempts PR|PW -- blocked
275 * task3 attempts GR(gid=2) -- blocked
276 * task4 attempts GR(gid=1) -- should be granted
277 * task0,4 writes to file and releases GR(gid=1) --
278 * this allows task2 & 3's GR locks to be granted; task4 remains blocked.
279 * task1 & 3 write to file and release GR(gid=2)
280 * task2 PR|PW should be granted and reads the file.
282 void grouplock_test2(char *filename, int fd, int blocking_op, int unlock_op)
284 int i, iter, rc, gid = 1;
285 int flag1, flag2, flag3, flag4;
286 int temp1, temp2, temp3, temp4;
287 MPI_Request req1, req2, req3, req4;
290 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
292 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
293 filename, errno, strerror(errno));
298 MPI_Barrier(MPI_COMM_WORLD);
302 /* Wait for task2 to issue its read request. */
306 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
308 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
309 filename, errno, strerror(errno));
315 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
317 /* Do not release the locks until task 0 is ready to watch
318 for reading task only */
319 MPI_Recv(&temp1, 1, MPI_INT, 0, 1, MPI_COMM_WORLD,
322 if (unlock_op == CLOSE)
325 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
328 "%s release GROUP_LOCK of file %s: (%d) %s.\n",
329 (unlock_op == CLOSE) ? "close" : "ioctl",
330 filename, errno, strerror(errno));
335 /* Give task1 a chance to request its GR lock. */
338 if (blocking_op == WRITE) {
340 lseek(fd, 0, SEEK_SET);
343 for (i = 0; i <= 3; i++)
346 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
349 /* Give task1 & 3 a chance to queue their GR locks. */
352 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
354 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
355 filename, errno, strerror(errno));
361 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
364 "%s release GROUP_LOCK of file %s: (%d) %s.\n",
365 (unlock_op == CLOSE) ? "close" : "ioctl",
366 filename, errno, strerror(errno));
370 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
373 /* locking tasks will tell us when they complete */
374 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
375 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
376 MPI_Irecv(&temp3, 1, MPI_INT, 3, 1, MPI_COMM_WORLD, &req3);
377 MPI_Irecv(&temp4, 1, MPI_INT, 4, 1, MPI_COMM_WORLD, &req4);
379 /* Make sure all tasks that should be blocked are waiting. */
380 iter = MAX_WAIT_TRIES;
384 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
385 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
386 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
387 if (flag1 || flag3) {
388 FAIL("GROUP (gid=2) task progressed even though"
389 " GROUP (gid=1) lock is held.\n");
392 FAIL("PR|PW task progressed even though "
393 "GROUP (gid=1) lock is still held\n");
397 /* Wait for task4 to signal it has completed. */
398 iter = MAX_WAIT_TRIES;
402 FAIL("2nd task GROUP(gid=1) not progressing\n");
405 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
406 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
407 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
408 MPI_Test(&req4, &flag4, MPI_STATUS_IGNORE);
409 if (flag1 || flag3) {
410 FAIL("GROUP (gid=2) task progressed even though"
411 " GROUP (gid=1) lock is held.\n");
414 FAIL("PR|PW task progressed even though "
415 "GROUP (gid=1) lock is still held\n");
421 /* Now let's release first lock */
422 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
423 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s "
424 "returned %d", filename, rc);
428 /* Wait for task1 & 3 to signal they have their lock. */
429 iter = MAX_WAIT_TRIES;
433 FAIL("GROUP(gid=2) tasks not progressing\n");
436 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
437 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
438 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
440 fprintf(stderr, "task2 %d\n", flag2);
441 FAIL("PR task progressed even though GROUP lock"
442 " was on the queue task\n");
444 } while (!(flag1 && flag3));
446 /* Make sure task2 is still waiting. */
447 iter = MAX_WAIT_TRIES;
451 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
453 FAIL("PR task progressed even though GR(gid=2) "
454 "lock was active.\n");
458 /* Tell task1 & 3 to release their GR(gid=2) lock. */
459 MPI_Send(&gid, 1, MPI_INT, 1, 1, MPI_COMM_WORLD);
460 MPI_Send(&gid, 1, MPI_INT, 3, 1, MPI_COMM_WORLD);
462 /* Wait for task2 (PR) to complete. */
463 iter = MAX_WAIT_TRIES;
467 FAIL("reading task is not progressing even "
468 "though GROUP locks are released\n");
472 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
479 * Tests a bug that once existed in the group lock code;
480 * i.e. that a GR lock request on a O_NONBLOCK fd could fail even though
481 * there is no blocking GROUP lock ahead of it on the waitq.
483 * task0 starts a large write (PW). this test could be racey if this
484 * write finishes too quickly.
485 * task1 attempts GR(gid=1) -- blocked
486 * task2 attempts GR(gid=2) with a O_NONBLOCK fs. should not fail.
488 void grouplock_test3(char *filename, int fd)
490 MPI_Request req1, req2;
491 int iter, flag1, flag2, temp1, temp2;
496 } else if (rank == 2) {
497 rc = fcntl(fd, F_SETFL, O_NONBLOCK);
499 sprintf(errmsg, "fcntl(O_NONBLOCK) failed: (%d) %s.\n",
500 errno, strerror(errno));
505 MPI_Barrier(MPI_COMM_WORLD);
513 /* Racey, we have to sleep just long enough for
514 * task0's write to start. */
517 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
519 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
520 filename, errno, strerror(errno));
524 /* tell task0 we have the lock. */
525 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
527 /* the close of fd will release the lock. */
530 rc = write(fd, lgbuf, lgbuf_size);
532 sprintf(errmsg, "write of file %s for %d bytes "
533 "returned %d: (%d) %s.\n",
534 filename, lgbuf_size,
535 rc, errno, strerror(errno));
537 } else if (rc != lgbuf_size) {
538 sprintf(errmsg, "write of file %s for %d bytes "
540 filename, lgbuf_size, rc);
544 /* GR tasks will tell us when they complete */
545 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
546 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
548 /* Wait for task1 & 2 to complete. */
549 iter = MAX_WAIT_TRIES;
553 FAIL("GR(gid=1) tasks are not progressing even "
554 "no conflicting locks exist.\n");
558 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
559 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
560 } while (!(flag1 && flag2));
566 * Tests a bug that once existed in the group lock code;
567 * i.e. extent locks without O_NONBLOCK that go on the waitq before a group
568 * lock request came in and was granted. The extent lock would timed out and
571 * task0 starts a large write (PW). this test could be racey if this
572 * write finishes too quickly.
573 * task1 attempts PR -- blocked
574 * task2 attempts GR(gid=1) -- blocked
575 * task0 completes write
576 * task1 should wakeup and complete its read
577 * task2 should wakeup and after task1 complete.
579 void grouplock_test4(char *filename, int fd)
582 int iter, flag1, temp1;
588 MPI_Barrier(MPI_COMM_WORLD);
592 /* Racey, we have to sleep just long enough for
593 * task0's write to start. */
594 MPI_Recv(&temp1, 1, MPI_INT, 0, 1, MPI_COMM_WORLD,
599 /* tell task2 to go. */
600 MPI_Send(&gid, 1, MPI_INT, 2, 1, MPI_COMM_WORLD);
604 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
607 /* Give task0 & 1 a chance to start. */
608 MPI_Recv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD,
613 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
615 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
616 filename, errno, strerror(errno));
620 /* tell task0 we have the lock. */
621 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
623 /* Do not release the locks until task 0 tells us too.
624 for reading task only */
625 MPI_Recv(&temp1, 1, MPI_INT, 0, 1, MPI_COMM_WORLD,
628 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
631 "ioctl GROUP_UNLOCK of file %s: (%d) %s.\n",
632 filename, errno, strerror(errno));
637 /* tell task1 to go to avoid race */
638 MPI_Send(&gid, 1, MPI_INT, 1, 1, MPI_COMM_WORLD);
639 rc = write(fd, lgbuf, lgbuf_size);
641 sprintf(errmsg, "write of file %s for %d bytes "
642 "returned %d: (%d) %s.\n",
643 filename, lgbuf_size,
644 rc, errno, strerror(errno));
646 } else if (rc != lgbuf_size) {
647 sprintf(errmsg, "write of file %s for %d bytes "
649 filename, lgbuf_size, rc);
653 /* wait for task2 to get its lock. */
654 MPI_Recv(&temp1, 1, MPI_INT, 2, 1, MPI_COMM_WORLD,
657 /* Tell task2 it's ok to release its GR(gid=1) lock. */
658 MPI_Send(&gid, 1, MPI_INT, 2, 1, MPI_COMM_WORLD);
660 /* wait a really long time. */
661 sleep(180 * WAIT_TIME);
663 /* PR task will tell us when it completes */
664 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
666 /* Make sure the PR task is successful and doesn't hang.
668 * XXX - To test properly we need to make sure the read
669 * gets queued before task2's group lock request.
670 * You may need to increase lgbuf_size.
672 iter = MAX_WAIT_TRIES;
676 FAIL("PR task is hung !\n");
680 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
688 * task0 attempts GR(gid=1) -- granted
689 * task1 attempts PR on non-blocking fd -> should return -EWOULDBLOCK
690 * task2 attempts PW on non-blocking fd -> should return -EWOULDBLOCK
691 * task3 attempts GR(gid=2) on non-blocking fd -> should return -EWOULDBLOCK
693 void grouplock_nonblock_test(char *filename, int fd)
695 MPI_Request req1, req2, req3;
696 int iter, flag1, flag2, flag3, temp1, temp2, temp3;
700 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
702 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
703 filename, errno, strerror(errno));
708 rc = fcntl(fd, F_SETFL, O_NONBLOCK);
710 sprintf(errmsg, "fcntl(O_NONBLOCK) failed: (%d) %s.\n",
711 errno, strerror(errno));
715 MPI_Barrier(MPI_COMM_WORLD);
719 rc = read(fd, buf, sizeof(buf));
720 if ((rc != -1) || (errno != EWOULDBLOCK)) {
721 FAIL("PR lock succeeded while incompatible "
722 "GROUP LOCK (gid=1) is still held\n");
725 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
728 rc = write(fd, buf, sizeof(buf));
729 if ((rc != -1) || (errno != EWOULDBLOCK)) {
730 FAIL("PW lock succeeded while incompatible "
731 "GROUP LOCK (gid=1) is still held\n");
734 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
738 rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid);
739 if ((rc != -1) || (errno != EWOULDBLOCK)) {
740 FAIL("GROUP_LOCK (gid=2) succeeded while incompatible "
741 "GROUP LOCK (gid=1) is still held.\n");
744 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
747 /* reading task will tell us when it completes */
748 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
749 /* writing task will tell us when it completes */
750 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
751 /* 2nd locking task will tell us when it completes */
752 MPI_Irecv(&temp3, 1, MPI_INT, 3, 1, MPI_COMM_WORLD, &req3);
754 iter = MAX_WAIT_TRIES;
758 FAIL("non-blocking tasks are not progressing\n");
761 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
762 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
763 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
764 } while (!(flag1 && flag2 && flag3));
766 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
767 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s",
775 /* Just test some error paths with invalid requests */
776 void grouplock_errorstest(char *filename, int fd)
780 MPI_Barrier(MPI_COMM_WORLD);
784 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
786 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
787 filename, errno, strerror(errno));
791 /* second group lock on same fd, same gid */
792 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
793 if (errno != EINVAL) {
794 sprintf(errmsg, "Double GROUP lock failed "
795 "with errno %d instead of EINVAL\n",
800 FAIL("Taking second GROUP lock on same fd succeed\n");
803 /* second group lock on same fd, different gid */
804 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid + 1)) == -1) {
805 if (errno != EINVAL) {
806 sprintf(errmsg, "Double GROUP lock with "
807 "different gid failed with errno %d "
808 "instead of EINVAL\n", errno);
812 FAIL("Taking second GROUP lock on same fd, with "
813 "different gid, succeeded.\n");
816 /* GROUP unlock with wrong gid */
817 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid + 1)) == -1) {
818 if (errno != EINVAL) {
819 sprintf(errmsg, "GROUP_UNLOCK with wrong gid "
820 "failed with errno %d instead of "
825 FAIL("GROUP unlock with wrong gid succeed\n");
828 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
829 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s "
830 "returned %d.", filename, rc);
836 /* unlock of never locked fd */
837 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
838 if (errno != EINVAL) {
839 sprintf(errmsg, "GROUP_UNLOCK on never locked "
840 "fd failed with errno %d instead of "
845 FAIL("GROUP unlock on never locked fd succeed\n");
851 void grouplock_file(char *name, int subtest)
854 int flags = O_CREAT|O_RDWR|O_SYNC|O_TRUNC;
857 sprintf(filename, "%s/%s", testdir, name);
859 if ((fd = open(filename, flags, mode)) == -1) {
860 sprintf(errmsg, "open of file %s: (%d) %s.\n",
861 filename, errno, strerror(errno));
865 MPI_Barrier(MPI_COMM_WORLD);
869 grouplock_test1(filename, fd, READ, IOCTL);
872 grouplock_test1(filename, fd, READ, CLOSE);
875 grouplock_test1(filename, fd, WRITE, IOCTL);
878 grouplock_test1(filename, fd, WRITE, CLOSE);
881 grouplock_test2(filename, fd, READ, IOCTL);
884 grouplock_test2(filename, fd, READ, CLOSE);
887 grouplock_test2(filename, fd, WRITE, IOCTL);
890 grouplock_test2(filename, fd, WRITE, CLOSE);
893 grouplock_nonblock_test(filename, fd);
896 grouplock_errorstest(filename, fd);
899 grouplock_test3(filename, fd);
902 grouplock_test4(filename, fd);
905 sprintf(errmsg, "wrong subtest number %d (should be <= %d)",
906 subtest, LPGL_TEST_ITEMS);
915 MPI_Barrier(MPI_COMM_WORLD);
918 void parallel_grouplock(void)
924 sprintf(teststr, "subtest %d", only_test);
926 grouplock_file("parallel_grouplock", only_test);
929 for (i = 1; i <= LPGL_TEST_ITEMS; i++) {
930 sprintf(teststr, "subtest %d", i);
932 grouplock_file("parallel_grouplock", i);
938 void usage(char *proc)
943 printf("Usage: %s [-h] -d <testdir> [-n <num>]\n", proc);
944 printf(" [-t <num>] [-v] [-V #] [-g]\n");
945 printf("\t-h: prints this help message\n");
946 printf("\t-d: the directory in which the tests will run\n");
947 printf("\t-n: repeat test # times\n");
948 printf("\t-t: run a particular test #\n");
949 printf("\t-v: increase the verbositly level by 1\n");
950 printf("\t-V: select a specific verbosity level\n");
951 printf("\t-g: debug mode\n");
955 if (i) MPI_Finalize();
959 int main(int argc, char *argv[])
961 int i, iterations = 1, c;
966 /* Check for -h parameter before MPI_Init so the binary can be
967 called directly, without, for instance, mpirun */
968 for (i = 1; i < argc; ++i) {
969 if (!strcmp(argv[i], "-h") || !strcmp(argv[i], "--help"))
973 MPI_Init(&argc, &argv);
974 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
975 MPI_Comm_size(MPI_COMM_WORLD, &size);
977 /* Parse command line options */
979 c = getopt(argc, argv, "d:ghn:t:vV:");
994 iterations = atoi(optarg);
997 only_test = atoi(optarg);
1003 verbose = atoi(optarg);
1009 printf("%s is running with %d task(es) %s\n",
1010 argv[0], size, debug ? "in DEBUG mode" : "\b\b");
1012 if (size < MIN_GLHOST) {
1013 fprintf(stderr, "Error: "
1014 "%d tasks run, but should be at least %d tasks to run "
1015 "the test!\n", size, MIN_GLHOST);
1016 MPI_Abort(MPI_COMM_WORLD, 2);
1019 if (testdir == NULL && rank == 0) {
1020 fprintf(stderr, "Please specify a test directory! "
1021 "(\"%s -h\" for help)\n",
1023 MPI_Abort(MPI_COMM_WORLD, 2);
1028 for (i = 0; i < iterations; ++i) {
1030 printf("%s: Running test #%s(iter %d)\n",
1031 timestamp(), argv[0], i);
1033 parallel_grouplock();
1034 MPI_Barrier(MPI_COMM_WORLD);
1038 printf("%s: All tests passed!\n", timestamp());