1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/tests/parallel_grouplock.c
38 * Author: You Feng <youfeng@clusterfs.com>
45 #include <sys/types.h>
48 #include <sys/ioctl.h>
52 #include <liblustre.h>
53 #include <lustre/lustre_user.h>
54 #include <lustre/tests/lp_utils.h>
56 #define LPGL_FILEN 700000
57 #define LPGL_TEST_ITEMS 7
61 /* waiting time in 0.1 s */
62 #define MAX_WAITING_TIME 20
69 * process1 attempts CW(gid=1) -- granted immediately
70 * process2 attempts PR -- blocked, goes on waiting list
71 * process3 attempts CW(gid=1) -> should be granted, but may go on
74 void grouplock_test1(char *filename, int fd, char *errmsg)
76 int rc, count, gid = 1;
78 char zeros[LPGL_FILEN];
79 MPI_Request req1, req2;
83 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
84 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
90 MPI_Barrier(MPI_COMM_WORLD);
93 memset(zeros, 0x0, sizeof(zeros));
94 lseek(fd, 0, SEEK_SET);
96 MPI_Send(&gid, 1, MPI_INT, 2, 1, MPI_COMM_WORLD);
97 count = read(fd, buf, sizeof(buf));
98 if (count != sizeof(buf)) {
100 dump_diff(zeros, buf, count, 0);
101 sprintf(errmsg, "read of file %s return %d",
105 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
111 /* Wait for reading task to progress, this is probably somewhat
112 racey, though, may be adding usleep here would make things
115 MPI_Recv(&temp, 1, MPI_INT, 1, 1, MPI_COMM_WORLD,
117 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
118 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
122 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
126 int iter = MAX_WAITING_TIME;
129 /* reading task will tell us when it completes */
130 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
131 /* 2nd locking task will tell us when it completes */
132 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
137 FAIL("2nd locking task is not progressing\n");
140 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
141 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
143 FAIL("PR task progressed even though GROUP lock"
149 /* Now we need to release the lock */
151 if (rank == 0 || rank == 2) {
152 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
153 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s return %d",
160 int iter = MAX_WAITING_TIME;
166 FAIL("reading task is not progressing even "
167 "though GROUP lock was released\n");
171 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
175 MPI_Barrier(MPI_COMM_WORLD);
180 * process1 attempts CW(gid=1) -- granted immediately
181 * process2 attempts CW(gid=2) -- blocked
182 * process3 attempts PR -- blocked
183 * process4 attempts CW(gid=2) -- blocked
184 * process1 releases CW(gid=1) -- this allows process2's CW lock to be granted
185 process3 remains blocked
187 void grouplock_test2(char *filename, int fd, char *errmsg)
189 int rc, count, gid = 1;
190 char buf[LPGL_FILEN];
191 char zeros[LPGL_FILEN];
192 MPI_Request req1, req2, req3;
193 int temp1, temp2, temp3;
196 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
197 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
203 MPI_Barrier(MPI_COMM_WORLD);
205 if (rank == 1 || rank == 3) {
208 MPI_Recv(&temp1, 1, MPI_INT, 2, 1, MPI_COMM_WORLD,
212 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
213 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
217 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
221 memset(zeros, 0x0, sizeof(zeros));
222 lseek(fd, 0, SEEK_SET);
224 MPI_Send(&gid, 1, MPI_INT, 3, 1, MPI_COMM_WORLD);
225 count = read(fd, buf, sizeof(buf));
226 if (count != sizeof(buf)) {
228 dump_diff(zeros, buf, count, 0);
229 sprintf(errmsg, "read of file %s return %d",
233 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
237 int iter = MAX_WAITING_TIME;
238 int flag1, flag2, flag3;
240 /* 2nd locking task will tell us when it completes */
241 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
242 /* 3nd locking task will tell us when it completes */
243 MPI_Irecv(&temp2, 1, MPI_INT, 3, 1, MPI_COMM_WORLD, &req2);
244 /* reading task will tell us when it completes */
245 MPI_Irecv(&temp3, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req3);
250 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
251 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
252 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
254 FAIL("PR task progressed even though GROUP lock"
257 if (flag1 || flag2) {
258 FAIL("GROUP (gid=2) task progressed even though"
259 " GROUP (gid=1) lock is held\n");
264 /* Now let's release first lock */
265 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
266 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s return %d",
270 iter = MAX_WAITING_TIME;
274 FAIL("GROUP(gid=2) tasks are not progressing\n");
277 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
278 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
279 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
281 fprintf(stderr, "task1 %d, task3 %d\n", flag1,
283 FAIL("PR task progressed even though GROUP lock"
284 " was on the queue task\n");
286 } while (!(flag1 && flag2));
287 MPI_Send(&gid, 1, MPI_INT, 1, 1, MPI_COMM_WORLD);
288 MPI_Send(&gid, 1, MPI_INT, 3, 1, MPI_COMM_WORLD);
291 if (rank == 1 || rank == 3) {
292 /* Do not release the locks until task 0 is ready to watch
293 for reading task only */
294 MPI_Recv(&temp1, 1, MPI_INT, 0, 1, MPI_COMM_WORLD,
296 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
297 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s return %d",
304 int iter = MAX_WAITING_TIME;
310 FAIL("reading task is not progressing even "
311 "though GROUP locks are released\n");
315 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
319 MPI_Barrier(MPI_COMM_WORLD);
324 * process1 attempts CW(gid=1) -- granted
325 * process2 attempts PR -- blocked
326 * process3 attempts CW(gid=1) -> should be granted
327 * process3 releases CW(gid=1)
328 * process2 should remain blocked
329 * process1 releases CW(gid=1)
330 * process2's PR should be granted
332 * This is a lot like test1.
334 void grouplock_test3(char *filename, int fd, char *errmsg)
336 int rc, count, gid = 1;
337 char buf[LPGL_FILEN];
338 char zeros[LPGL_FILEN];
339 MPI_Request req1, req2;
343 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
344 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
350 MPI_Barrier(MPI_COMM_WORLD);
353 memset(zeros, 0x0, sizeof(zeros));
354 lseek(fd, 0, SEEK_SET);
356 MPI_Send(&gid, 1, MPI_INT, 2, 1, MPI_COMM_WORLD);
357 count = read(fd, buf, sizeof(buf));
358 if (count != sizeof(buf)) {
360 dump_diff(zeros, buf, count, 0);
361 sprintf(errmsg, "read of file %s return %d",
365 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
371 /* Wait for reading task to progress, this is probably somewhat
372 racey, though, may be adding usleep here would make things
375 MPI_Recv(&temp, 1, MPI_INT, 1, 1, MPI_COMM_WORLD,
377 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
378 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
382 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
386 int iter = MAX_WAITING_TIME;
389 /* reading task will tell us when it completes */
390 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
391 /* 2nd locking task will tell us when it completes */
392 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
397 FAIL("2nd locking task is not progressing\n");
400 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
401 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
403 FAIL("PR task progressed even though GROUP lock"
409 /* Now we need to release the lock */
412 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
413 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s return %d",
420 int iter = MAX_WAITING_TIME;
426 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
427 } while (!flag1 && iter);
429 FAIL("reading task is progressing even "
430 "though GROUP lock was not fully released\n");
433 iter = MAX_WAITING_TIME;
435 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
436 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s return %d",
444 FAIL("reading task is not progressing even "
445 "though GROUP lock was released\n");
449 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
453 MPI_Barrier(MPI_COMM_WORLD);
458 * process1 attempts CW(gid=1) -- granted
459 * process2 attempts PR on non-blocking fd -> should return -EWOULDBLOCK
460 * process3 attempts CW(gid=2) on non-blocking fd -> should return -EWOULDBLOCK
462 void grouplock_test4(char *filename, int fd, char *errmsg)
464 int rc, count, gid = 1;
465 char buf[LPGL_FILEN];
466 char zeros[LPGL_FILEN];
469 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
470 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
476 MPI_Barrier(MPI_COMM_WORLD);
479 memset(zeros, 0x0, sizeof(zeros));
480 lseek(fd, 0, SEEK_SET);
482 count = read(fd, buf, sizeof(buf));
483 if (count != sizeof(buf)) {
484 if (count == -1 && errno == EWOULDBLOCK) {
485 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
490 dump_diff(zeros, buf, count, 0);
491 sprintf(errmsg, "read of file %s return %d",
495 FAIL("PR lock succeed while incompatible "
496 "GROUP LOCK (gid=1) is still held\n");
502 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
503 if (errno == EWOULDBLOCK) {
504 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
508 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
512 FAIL("GROUP_LOCK (gid=2) succeed while incompatible "
513 "GROUP LOCK (gid=1) is still held\n");
519 int iter = MAX_WAITING_TIME;
521 MPI_Request req1, req2;
524 /* reading task will tell us when it completes */
525 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
526 /* 2nd locking task will tell us when it completes */
527 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
532 FAIL("non-blocking tasks are not progressing\n");
535 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
536 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
537 } while (!(flag2 && flag1));
539 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
540 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s", filename);
547 * process1 attempts CW(gid=1) -- granted
548 * process2 attempts CW(gid=2) -- blocked
549 * process3 attempts CW(gid=2) -- blocked
550 * process1 releases CW(gid=1)
551 * process2's CW(gid=2) should be granted
552 * process3's CW(gid=2) should be granted
554 * This is pretty much like test 3
556 void grouplock_test5(char *filename, int fd, char *errmsg)
559 MPI_Request req1, req2;
563 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
564 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
570 MPI_Barrier(MPI_COMM_WORLD);
572 if (rank == 2 || rank == 1) {
574 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
575 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
579 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
583 int iter = MAX_WAITING_TIME;
586 /* 3rd locking task will tell us when it completes */
587 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
588 /* 2nd locking task will tell us when it completes */
589 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
594 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
595 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
596 } while (!flag2 && !flag1 && iter);
598 FAIL("incomptible locking tasks are progressing\n");
602 /* Now we need to release the lock */
605 int iter = MAX_WAITING_TIME;
607 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
608 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s return %d",
616 FAIL("locking tasks are not progressing even "
617 "though incompatible lock released\n");
620 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
621 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
622 } while (!(flag1 && flag2));
626 if ( rank == 1 || rank == 2) {
627 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
628 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s return %d",
634 MPI_Barrier(MPI_COMM_WORLD);
640 * process1 attempts CW(gid=1) -- granted
641 * process2 attempts PW -- blocked
642 * process2 attempts CW(gid=2) -- blocked
643 * process3 attempts CW(gid=2) -- blocked
644 * process1 releases CW(gid=1)
645 * process2's CW(gid=2) should be granted
646 * process3's CW(gid=2) should be granted
648 * after process1 release CW(gid=1), there are two pathes:
649 * path 1. process2 get PW
650 * path 2. process3 get CW(gid=2)
652 * green: Also about test6 - by definition if P* and CW lock are waiting,
653 * CW lock have bigger priority and should be granted first when it becomes
654 * possible. So after process1 releases its CW lock, process3 should always
655 * get CW lock, and when it will release it, process 2 will proceed with read
656 * and then with getting CW lock
658 * XXX This test does not make any sence at all the way it is described right
659 * now, hence disabled.
661 void grouplock_test6(char *filename, int fd, char *errmsg)
665 /* Just test some error paths with invalid requests */
666 void grouplock_errorstest(char *filename, int fd, char *errmsg)
671 /* To not do lots of separate tests with lots of fd opening/closing,
672 different parts of this test are performed in different processes */
674 if (rank == 0 || rank == 1 ) {
675 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
676 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
682 /* second group lock on same fd, same gid */
684 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
685 if (errno != EINVAL) {
686 sprintf(errmsg, "Double GROUP lock failed with errno %d instead of EINVAL\n", errno);
690 FAIL("Taking second GROUP lock on same fd succeed\n");
694 /* second group lock on same fd, different gid */
696 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid + 1)) == -1) {
697 if (errno != EINVAL) {
698 sprintf(errmsg, "Double GROUP lock different gid failed with errno %d instead of EINVAL\n", errno);
702 FAIL("Taking second GROUP lock on same fd, different gid, succeed\n");
706 /* GROUP unlock with wrong gid */
707 if (rank == 0 || rank == 1) {
708 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid + 1)) == -1) {
709 if (errno != EINVAL) {
710 sprintf(errmsg, "GROUP unlock with wrong gid failed with errno %d instead of EINVAL\n",
715 FAIL("GROUP unlock with wrong gid succeed\n");
719 if (rank == 0 || rank == 1) {
720 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
721 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s return %d",
727 /* unlock of never locked fd */
729 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
730 if (errno != EINVAL) {
731 sprintf(errmsg, "GROUP unlock on never locked fd failed with errno %d instead of EINVAL\n",
736 FAIL("GROUP unlock on never locked fd succeed\n");
741 void grouplock_file(char *name, int items)
744 char filename[MAX_FILENAME_LEN];
745 char errmsg[MAX_FILENAME_LEN+20];
747 sprintf(filename, "%s/%s", testdir, name);
750 if ((fd = open(filename, O_RDWR | O_NONBLOCK)) == -1) {
751 sprintf(errmsg, "open of file %s", filename);
754 } else if ((fd = open(filename, O_RDWR)) == -1) {
755 sprintf(errmsg, "open of file %s", filename);
759 MPI_Barrier(MPI_COMM_WORLD);
763 grouplock_test1(filename, fd, errmsg);
766 grouplock_test2(filename, fd, errmsg);
769 grouplock_test3(filename, fd, errmsg);
772 grouplock_test4(filename, fd, errmsg);
775 grouplock_test5(filename, fd, errmsg);
778 grouplock_test6(filename, fd, errmsg);
781 grouplock_errorstest(filename, fd, errmsg);
784 sprintf(errmsg, "wrong test case number %d (should be <= %d)",
785 items, LPGL_TEST_ITEMS);
789 MPI_Barrier(MPI_COMM_WORLD);
791 if (close(fd) == -1) {
792 sprintf(errmsg, "close of file %s", filename);
798 void parallel_grouplock(void)
802 for (i = 1;i <= LPGL_TEST_ITEMS;++i) {
804 create_file("parallel_grouplock", LPGL_FILEN, 0);
808 grouplock_file("parallel_grouplock", i);
812 remove_file("parallel_grouplock");
817 void usage(char *proc)
822 printf("Usage: %s [-h] -d <testdir>\n", proc);
823 printf(" [-n \"13\"] [-v] [-V #] [-g]\n");
824 printf("\t-h: prints this help message\n");
825 printf("\t-d: the directory in which the tests will run\n");
826 printf("\t-n: repeat test # times\n");
827 printf("\t-v: increase the verbositly level by 1\n");
828 printf("\t-V: select a specific verbosity level\n");
829 printf("\t-g: debug mode\n");
833 if (i) MPI_Finalize();
837 int main(int argc, char *argv[])
840 int i, iterations = 1;
842 /* Check for -h parameter before MPI_Init so the binary can be
843 called directly, without, for instance, mpirun */
844 for (i = 1; i < argc; ++i) {
845 if (!strcmp(argv[i], "-h") || !strcmp(argv[i], "--help"))
849 MPI_Init(&argc, &argv);
850 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
851 MPI_Comm_size(MPI_COMM_WORLD, &size);
853 // MPI_Comm_set_attr(MPI_COMM_WORLD, MPI_WTIME_IS_GLOBAL, &tr);
855 /* Parse command line options */
857 c = getopt(argc, argv, "d:ghn:vV:");
872 iterations = atoi(optarg);
878 verbose = atoi(optarg);
884 printf("%s is running with %d process(es) %s\n",
885 argv[0], size, debug ? "in DEBUG mode" : "\b\b");
887 if (size < MAX_GLHOST) {
888 fprintf(stderr, "Error: "
889 "should be at least four processes to run the test!\n");
890 MPI_Abort(MPI_COMM_WORLD, 2);
893 if (testdir == NULL && rank == 0) {
894 fprintf(stderr, "Please specify a test directory! "
895 "(\"%s -h\" for help)\n", argv[0]);
896 MPI_Abort(MPI_COMM_WORLD, 2);
901 for (i = 0; i < iterations; ++i) {
903 printf("%s: Running test #%s(iter %d)\n",
904 timestamp(), argv[0], i);
906 parallel_grouplock();
907 MPI_Barrier(MPI_COMM_WORLD);
911 printf("%s: All tests passed!\n", timestamp());