1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/tests/parallel_grouplock.c
38 * Author: You Feng <youfeng@clusterfs.com>
45 #include <sys/types.h>
46 #include <asm/types.h>
49 #include <sys/ioctl.h>
53 #include <libcfs/libcfs.h>
54 #include <lustre/lustre_user.h>
57 #define LPGL_BUF_LEN 8192
58 #define LPGL_TEST_ITEMS 12
62 #define MAX_WAIT_TRIES 10
63 #define WAIT_TIME 1 /* secs */
64 #define ONE_MB 1048576 /* 1 MB */
65 #define MIN_LGBUF_SIZE 536870912 /* 512 MB */
66 #define MAX_LGBUF_SIZE 536870912 /* 512 MB */
67 // #define MAX_LGBUF_SIZE 1073741824 /* 1 GB */
80 char buf[LPGL_BUF_LEN];
83 char filename[MAX_FILENAME_LEN];
84 char errmsg[MAX_FILENAME_LEN+20];
93 lgbuf_size = MAX_LGBUF_SIZE;
94 for (; lgbuf_size >= MIN_LGBUF_SIZE; lgbuf_size -= ONE_MB)
95 if ((lgbuf = (char *)malloc(lgbuf_size)) != NULL)
98 FAIL("malloc of large buffer failed.\n");
106 rc = read(fd, buf, sizeof(buf));
108 pos = lseek(fd, 0, SEEK_CUR);
109 sprintf(errmsg, "read of file %s at pos %d for %zu bytes "
110 "returned %d: (%d) %s.\n",
111 filename, pos, sizeof(buf), rc, errno, strerror(errno));
113 } else if (rc != sizeof(buf)) {
114 pos = lseek(fd, 0, SEEK_CUR);
115 sprintf(errmsg, "read of file %s at pos %d for %zu bytes "
117 filename, pos, sizeof(buf), rc);
123 write_buf(int fd, int index)
125 int pos = index * sizeof(buf);
128 memset(buf, index, sizeof(buf));
129 lseek(fd, pos, SEEK_SET);
130 rc = write(fd, buf, sizeof(buf));
132 sprintf(errmsg, "write of file %s at pos %d for %zu bytes "
133 "returned %d: (%d) %s.\n",
134 filename, pos, sizeof(buf), rc, errno, strerror(errno));
136 } else if (rc != sizeof(buf)) {
137 sprintf(errmsg, "write of file %s at pos %d for %zu bytes "
139 filename, pos, sizeof(buf), rc);
145 * task0 attempts GR(gid=1) -- granted immediately
146 * task1 attempts PR|PW -- blocked, goes on waiting list
147 * task2 attempts GR(gid=1) -> should be granted
148 * task2 writes to file and releases GR(gid=1)
149 * task0 waits for task2 to complete its processing
150 * task0 writes to file and releases GR(gid=1)
151 * task1 PR|PW should be granted and reads the file
153 void grouplock_test1(char *filename, int fd, int blocking_op, int unlock_op)
155 MPI_Request req1, req2;
156 int iter, flag1, flag2, temp1, temp2;
160 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
162 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
163 filename, errno, strerror(errno));
168 MPI_Barrier(MPI_COMM_WORLD);
172 if (blocking_op == WRITE) {
174 lseek(fd, 0, SEEK_SET);
177 for (i = 0; i <= 2; i++)
180 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
183 /* Wait for task1 to progress. This could be racey. */
186 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
188 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
189 filename, errno, strerror(errno));
195 if (unlock_op == CLOSE)
198 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
202 "%s release GROUP_LOCK of file %s: (%d) %s.\n",
203 (unlock_op == CLOSE) ? "close" : "ioctl",
204 filename, errno, strerror(errno));
207 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
210 /* PR|PW task will tell us when it completes */
211 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
212 /* 2nd locking task will tell us when it completes */
213 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
215 /* Wait for task2 to complete. */
216 iter = MAX_WAIT_TRIES;
220 FAIL("2nd locking task is not progressing\n");
225 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
227 FAIL("PR|PW task progressed even though GROUP "
231 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
234 /* Make sure task1 is still waiting. */
235 iter = MAX_WAIT_TRIES;
239 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
241 FAIL("PR|PW task progressed even though "
242 "GROUP lock is held\n");
248 /* Now we need to release the lock */
249 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
251 "ioctl GROUP_UNLOCK of file %s: (%d) %s.\n",
252 filename, errno, strerror(errno));
256 /* Wait for task1 to complete. */
257 iter = MAX_WAIT_TRIES;
261 FAIL("PR|PW task is not progressing even "
262 "though GROUP lock was released\n");
266 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
274 * task0 attempts GR(gid=1) -- granted immediately
275 * task1 attempts GR(gid=2) -- blocked
276 * task2 attempts PR|PW -- blocked
277 * task3 attempts GR(gid=2) -- blocked
278 * task4 attempts GR(gid=1) -- should be granted
279 * task0,4 writes to file and releases GR(gid=1) --
280 * this allows task2 & 3's GR locks to be granted; task4 remains blocked.
281 * task1 & 3 write to file and release GR(gid=2)
282 * task2 PR|PW should be granted and reads the file.
284 void grouplock_test2(char *filename, int fd, int blocking_op, int unlock_op)
286 int i, iter, rc, gid = 1;
287 int flag1, flag2, flag3, flag4;
288 int temp1, temp2, temp3, temp4;
289 MPI_Request req1, req2, req3, req4;
292 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
294 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
295 filename, errno, strerror(errno));
300 MPI_Barrier(MPI_COMM_WORLD);
304 /* Wait for task2 to issue its read request. */
308 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
310 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
311 filename, errno, strerror(errno));
317 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
319 /* Do not release the locks until task 0 is ready to watch
320 for reading task only */
321 MPI_Recv(&temp1, 1, MPI_INT, 0, 1, MPI_COMM_WORLD,
324 if (unlock_op == CLOSE)
327 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
330 "%s release GROUP_LOCK of file %s: (%d) %s.\n",
331 (unlock_op == CLOSE) ? "close" : "ioctl",
332 filename, errno, strerror(errno));
337 /* Give task1 a chance to request its GR lock. */
340 if (blocking_op == WRITE) {
342 lseek(fd, 0, SEEK_SET);
345 for (i = 0; i <= 3; i++)
348 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
351 /* Give task1 & 3 a chance to queue their GR locks. */
354 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
356 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
357 filename, errno, strerror(errno));
363 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
366 "%s release GROUP_LOCK of file %s: (%d) %s.\n",
367 (unlock_op == CLOSE) ? "close" : "ioctl",
368 filename, errno, strerror(errno));
372 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
375 /* locking tasks will tell us when they complete */
376 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
377 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
378 MPI_Irecv(&temp3, 1, MPI_INT, 3, 1, MPI_COMM_WORLD, &req3);
379 MPI_Irecv(&temp4, 1, MPI_INT, 4, 1, MPI_COMM_WORLD, &req4);
381 /* Make sure all tasks that should be blocked are waiting. */
382 iter = MAX_WAIT_TRIES;
386 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
387 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
388 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
389 if (flag1 || flag3) {
390 FAIL("GROUP (gid=2) task progressed even though"
391 " GROUP (gid=1) lock is held.\n");
394 FAIL("PR|PW task progressed even though "
395 "GROUP (gid=1) lock is still held\n");
399 /* Wait for task4 to signal it has completed. */
400 iter = MAX_WAIT_TRIES;
404 FAIL("2nd task GROUP(gid=1) not progressing\n");
407 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
408 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
409 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
410 MPI_Test(&req4, &flag4, MPI_STATUS_IGNORE);
411 if (flag1 || flag3) {
412 FAIL("GROUP (gid=2) task progressed even though"
413 " GROUP (gid=1) lock is held.\n");
416 FAIL("PR|PW task progressed even though "
417 "GROUP (gid=1) lock is still held\n");
423 /* Now let's release first lock */
424 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
425 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s "
426 "returned %d", filename, rc);
430 /* Wait for task1 & 3 to signal they have their lock. */
431 iter = MAX_WAIT_TRIES;
435 FAIL("GROUP(gid=2) tasks not progressing\n");
438 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
439 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
440 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
442 fprintf(stderr, "task2 %d\n", flag2);
443 FAIL("PR task progressed even though GROUP lock"
444 " was on the queue task\n");
446 } while (!(flag1 && flag3));
448 /* Make sure task2 is still waiting. */
449 iter = MAX_WAIT_TRIES;
453 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
455 FAIL("PR task progressed even though GR(gid=2) "
456 "lock was active.\n");
460 /* Tell task1 & 3 to release their GR(gid=2) lock. */
461 MPI_Send(&gid, 1, MPI_INT, 1, 1, MPI_COMM_WORLD);
462 MPI_Send(&gid, 1, MPI_INT, 3, 1, MPI_COMM_WORLD);
464 /* Wait for task2 (PR) to complete. */
465 iter = MAX_WAIT_TRIES;
469 FAIL("reading task is not progressing even "
470 "though GROUP locks are released\n");
474 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
481 * Tests a bug that once existed in the group lock code;
482 * i.e. that a GR lock request on a O_NONBLOCK fd could fail even though
483 * there is no blocking GROUP lock ahead of it on the waitq.
485 * task0 starts a large write (PW). this test could be racey if this
486 * write finishes too quickly.
487 * task1 attempts GR(gid=1) -- blocked
488 * task2 attempts GR(gid=2) with a O_NONBLOCK fs. should not fail.
490 void grouplock_test3(char *filename, int fd)
492 MPI_Request req1, req2;
493 int iter, flag1, flag2, temp1, temp2;
498 } else if (rank == 2) {
499 rc = fcntl(fd, F_SETFL, O_NONBLOCK);
501 sprintf(errmsg, "fcntl(O_NONBLOCK) failed: (%d) %s.\n",
502 errno, strerror(errno));
507 MPI_Barrier(MPI_COMM_WORLD);
515 /* Racey, we have to sleep just long enough for
516 * task0's write to start. */
519 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
521 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
522 filename, errno, strerror(errno));
526 /* tell task0 we have the lock. */
527 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
529 /* the close of fd will release the lock. */
532 rc = write(fd, lgbuf, lgbuf_size);
534 sprintf(errmsg, "write of file %s for %d bytes "
535 "returned %d: (%d) %s.\n",
536 filename, lgbuf_size,
537 rc, errno, strerror(errno));
539 } else if (rc != lgbuf_size) {
540 sprintf(errmsg, "write of file %s for %d bytes "
542 filename, lgbuf_size, rc);
546 /* GR tasks will tell us when they complete */
547 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
548 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
550 /* Wait for task1 & 2 to complete. */
551 iter = MAX_WAIT_TRIES;
555 FAIL("GR(gid=1) tasks are not progressing even "
556 "no conflicting locks exist.\n");
560 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
561 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
562 } while (!(flag1 && flag2));
568 * Tests a bug that once existed in the group lock code;
569 * i.e. extent locks without O_NONBLOCK that go on the waitq before a group
570 * lock request came in and was granted. The extent lock would timed out and
573 * task0 starts a large write (PW). this test could be racey if this
574 * write finishes too quickly.
575 * task1 attempts PR -- blocked
576 * task2 attempts GR(gid=1) -- blocked
577 * task0 completes write
578 * task1 should wakeup and complete its read
579 * task2 should wakeup and after task1 complete.
581 void grouplock_test4(char *filename, int fd)
584 int iter, flag1, temp1;
590 MPI_Barrier(MPI_COMM_WORLD);
594 /* Racey, we have to sleep just long enough for
595 * task0's write to start. */
596 MPI_Recv(&temp1, 1, MPI_INT, 0, 1, MPI_COMM_WORLD,
601 /* tell task2 to go. */
602 MPI_Send(&gid, 1, MPI_INT, 2, 1, MPI_COMM_WORLD);
606 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
609 /* Give task0 & 1 a chance to start. */
610 MPI_Recv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD,
615 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
617 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
618 filename, errno, strerror(errno));
622 /* tell task0 we have the lock. */
623 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
625 /* Do not release the locks until task 0 tells us too.
626 for reading task only */
627 MPI_Recv(&temp1, 1, MPI_INT, 0, 1, MPI_COMM_WORLD,
630 rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid);
633 "ioctl GROUP_UNLOCK of file %s: (%d) %s.\n",
634 filename, errno, strerror(errno));
639 /* tell task1 to go to avoid race */
640 MPI_Send(&gid, 1, MPI_INT, 1, 1, MPI_COMM_WORLD);
641 rc = write(fd, lgbuf, lgbuf_size);
643 sprintf(errmsg, "write of file %s for %d bytes "
644 "returned %d: (%d) %s.\n",
645 filename, lgbuf_size,
646 rc, errno, strerror(errno));
648 } else if (rc != lgbuf_size) {
649 sprintf(errmsg, "write of file %s for %d bytes "
651 filename, lgbuf_size, rc);
655 /* wait for task2 to get its lock. */
656 MPI_Recv(&temp1, 1, MPI_INT, 2, 1, MPI_COMM_WORLD,
659 /* Tell task2 it's ok to release its GR(gid=1) lock. */
660 MPI_Send(&gid, 1, MPI_INT, 2, 1, MPI_COMM_WORLD);
662 /* wait a really long time. */
663 sleep(180 * WAIT_TIME);
665 /* PR task will tell us when it completes */
666 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
668 /* Make sure the PR task is successful and doesn't hang.
670 * XXX - To test properly we need to make sure the read
671 * gets queued before task2's group lock request.
672 * You may need to increase lgbuf_size.
674 iter = MAX_WAIT_TRIES;
678 FAIL("PR task is hung !\n");
682 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
690 * task0 attempts GR(gid=1) -- granted
691 * task1 attempts PR on non-blocking fd -> should return -EWOULDBLOCK
692 * task2 attempts PW on non-blocking fd -> should return -EWOULDBLOCK
693 * task3 attempts GR(gid=2) on non-blocking fd -> should return -EWOULDBLOCK
695 void grouplock_nonblock_test(char *filename, int fd)
697 MPI_Request req1, req2, req3;
698 int iter, flag1, flag2, flag3, temp1, temp2, temp3;
702 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
704 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
705 filename, errno, strerror(errno));
710 rc = fcntl(fd, F_SETFL, O_NONBLOCK);
712 sprintf(errmsg, "fcntl(O_NONBLOCK) failed: (%d) %s.\n",
713 errno, strerror(errno));
717 MPI_Barrier(MPI_COMM_WORLD);
721 rc = read(fd, buf, sizeof(buf));
722 if ((rc != -1) || (errno != EWOULDBLOCK)) {
723 FAIL("PR lock succeeded while incompatible "
724 "GROUP LOCK (gid=1) is still held\n");
727 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
730 rc = write(fd, buf, sizeof(buf));
731 if ((rc != -1) || (errno != EWOULDBLOCK)) {
732 FAIL("PW lock succeeded while incompatible "
733 "GROUP LOCK (gid=1) is still held\n");
736 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
740 rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid);
741 if ((rc != -1) || (errno != EWOULDBLOCK)) {
742 FAIL("GROUP_LOCK (gid=2) succeeded while incompatible "
743 "GROUP LOCK (gid=1) is still held.\n");
746 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
749 /* reading task will tell us when it completes */
750 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
751 /* writing task will tell us when it completes */
752 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
753 /* 2nd locking task will tell us when it completes */
754 MPI_Irecv(&temp3, 1, MPI_INT, 3, 1, MPI_COMM_WORLD, &req3);
756 iter = MAX_WAIT_TRIES;
760 FAIL("non-blocking tasks are not progressing\n");
763 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
764 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
765 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
766 } while (!(flag1 && flag2 && flag3));
768 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
769 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s",
777 /* Just test some error paths with invalid requests */
778 void grouplock_errorstest(char *filename, int fd)
782 MPI_Barrier(MPI_COMM_WORLD);
786 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
788 "ioctl GROUP_LOCK of file %s: (%d) %s.\n",
789 filename, errno, strerror(errno));
793 /* second group lock on same fd, same gid */
794 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
795 if (errno != EINVAL) {
796 sprintf(errmsg, "Double GROUP lock failed "
797 "with errno %d instead of EINVAL\n",
802 FAIL("Taking second GROUP lock on same fd succeed\n");
805 /* second group lock on same fd, different gid */
806 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid + 1)) == -1) {
807 if (errno != EINVAL) {
808 sprintf(errmsg, "Double GROUP lock with "
809 "different gid failed with errno %d "
810 "instead of EINVAL\n", errno);
814 FAIL("Taking second GROUP lock on same fd, with "
815 "different gid, succeeded.\n");
818 /* GROUP unlock with wrong gid */
819 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid + 1)) == -1) {
820 if (errno != EINVAL) {
821 sprintf(errmsg, "GROUP_UNLOCK with wrong gid "
822 "failed with errno %d instead of "
827 FAIL("GROUP unlock with wrong gid succeed\n");
830 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
831 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s "
832 "returned %d.", filename, rc);
838 /* unlock of never locked fd */
839 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
840 if (errno != EINVAL) {
841 sprintf(errmsg, "GROUP_UNLOCK on never locked "
842 "fd failed with errno %d instead of "
847 FAIL("GROUP unlock on never locked fd succeed\n");
853 void grouplock_file(char *name, int subtest)
856 int flags = O_CREAT|O_RDWR|O_SYNC|O_TRUNC;
859 sprintf(filename, "%s/%s", testdir, name);
861 if ((fd = open(filename, flags, mode)) == -1) {
862 sprintf(errmsg, "open of file %s: (%d) %s.\n",
863 filename, errno, strerror(errno));
867 MPI_Barrier(MPI_COMM_WORLD);
871 grouplock_test1(filename, fd, READ, IOCTL);
874 grouplock_test1(filename, fd, READ, CLOSE);
877 grouplock_test1(filename, fd, WRITE, IOCTL);
880 grouplock_test1(filename, fd, WRITE, CLOSE);
883 grouplock_test2(filename, fd, READ, IOCTL);
886 grouplock_test2(filename, fd, READ, CLOSE);
889 grouplock_test2(filename, fd, WRITE, IOCTL);
892 grouplock_test2(filename, fd, WRITE, CLOSE);
895 grouplock_nonblock_test(filename, fd);
898 grouplock_errorstest(filename, fd);
901 grouplock_test3(filename, fd);
904 grouplock_test4(filename, fd);
907 sprintf(errmsg, "wrong subtest number %d (should be <= %d)",
908 subtest, LPGL_TEST_ITEMS);
917 MPI_Barrier(MPI_COMM_WORLD);
920 void parallel_grouplock(void)
926 sprintf(teststr, "subtest %d", only_test);
928 grouplock_file("parallel_grouplock", only_test);
931 for (i = 1; i <= LPGL_TEST_ITEMS; i++) {
932 sprintf(teststr, "subtest %d", i);
934 grouplock_file("parallel_grouplock", i);
940 void usage(char *proc)
945 printf("Usage: %s [-h] -d <testdir> [-n <num>]\n", proc);
946 printf(" [-t <num>] [-v] [-V #] [-g]\n");
947 printf("\t-h: prints this help message\n");
948 printf("\t-d: the directory in which the tests will run\n");
949 printf("\t-n: repeat test # times\n");
950 printf("\t-t: run a particular test #\n");
951 printf("\t-v: increase the verbositly level by 1\n");
952 printf("\t-V: select a specific verbosity level\n");
953 printf("\t-g: debug mode\n");
957 if (i) MPI_Finalize();
961 int main(int argc, char *argv[])
963 int i, iterations = 1, c;
968 /* Check for -h parameter before MPI_Init so the binary can be
969 called directly, without, for instance, mpirun */
970 for (i = 1; i < argc; ++i) {
971 if (!strcmp(argv[i], "-h") || !strcmp(argv[i], "--help"))
975 MPI_Init(&argc, &argv);
976 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
977 MPI_Comm_size(MPI_COMM_WORLD, &size);
979 /* Parse command line options */
981 c = getopt(argc, argv, "d:ghn:t:vV:");
996 iterations = atoi(optarg);
999 only_test = atoi(optarg);
1005 verbose = atoi(optarg);
1011 printf("%s is running with %d task(es) %s\n",
1012 argv[0], size, debug ? "in DEBUG mode" : "\b\b");
1014 if (size < MIN_GLHOST) {
1015 fprintf(stderr, "Error: "
1016 "%d tasks run, but should be at least %d tasks to run "
1017 "the test!\n", size, MIN_GLHOST);
1018 MPI_Abort(MPI_COMM_WORLD, 2);
1021 if (testdir == NULL && rank == 0) {
1022 fprintf(stderr, "Please specify a test directory! "
1023 "(\"%s -h\" for help)\n",
1025 MPI_Abort(MPI_COMM_WORLD, 2);
1030 for (i = 0; i < iterations; ++i) {
1032 printf("%s: Running test #%s(iter %d)\n",
1033 timestamp(), argv[0], i);
1035 parallel_grouplock();
1036 MPI_Barrier(MPI_COMM_WORLD);
1040 printf("%s: All tests passed!\n", timestamp());