1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/tests/parallel_grouplock.c
38 * Author: You Feng <youfeng@clusterfs.com>
45 #include <sys/types.h>
48 #include <sys/ioctl.h>
52 #include <lustre/lustre_user.h>
53 #include <lustre/tests/lp_utils.h>
55 #define LPGL_FILEN 700000
56 #define LPGL_TEST_ITEMS 7
60 /* waiting time in 0.1 s */
61 #define MAX_WAITING_TIME 20
68 * process1 attempts CW(gid=1) -- granted immediately
69 * process2 attempts PR -- blocked, goes on waiting list
70 * process3 attempts CW(gid=1) -> should be granted, but may go on
73 void grouplock_test1(char *filename, int fd, char *errmsg)
75 int rc, count, gid = 1;
77 char zeros[LPGL_FILEN];
78 MPI_Request req1, req2;
82 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
83 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
89 MPI_Barrier(MPI_COMM_WORLD);
92 memset(zeros, 0x0, sizeof(zeros));
93 lseek(fd, 0, SEEK_SET);
95 MPI_Send(&gid, 1, MPI_INT, 2, 1, MPI_COMM_WORLD);
96 count = read(fd, buf, sizeof(buf));
97 if (count != sizeof(buf)) {
99 dump_diff(zeros, buf, count, 0);
100 sprintf(errmsg, "read of file %s return %d",
104 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
110 /* Wait for reading task to progress, this is probably somewhat
111 racey, though, may be adding usleep here would make things
114 MPI_Recv(&temp, 1, MPI_INT, 1, 1, MPI_COMM_WORLD,
116 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
117 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
121 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
125 int iter = MAX_WAITING_TIME;
128 /* reading task will tell us when it completes */
129 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
130 /* 2nd locking task will tell us when it completes */
131 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
136 FAIL("2nd locking task is not progressing\n");
139 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
140 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
142 FAIL("PR task progressed even though GROUP lock"
148 /* Now we need to release the lock */
150 if (rank == 0 || rank == 2) {
151 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
152 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s return %d",
159 int iter = MAX_WAITING_TIME;
165 FAIL("reading task is not progressing even "
166 "though GROUP lock was released\n");
170 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
174 MPI_Barrier(MPI_COMM_WORLD);
179 * process1 attempts CW(gid=1) -- granted immediately
180 * process2 attempts CW(gid=2) -- blocked
181 * process3 attempts PR -- blocked
182 * process4 attempts CW(gid=2) -- blocked
183 * process1 releases CW(gid=1) -- this allows process2's CW lock to be granted
184 process3 remains blocked
186 void grouplock_test2(char *filename, int fd, char *errmsg)
188 int rc, count, gid = 1;
189 char buf[LPGL_FILEN];
190 char zeros[LPGL_FILEN];
191 MPI_Request req1, req2, req3;
192 int temp1, temp2, temp3;
195 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
196 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
202 MPI_Barrier(MPI_COMM_WORLD);
204 if (rank == 1 || rank == 3) {
207 MPI_Recv(&temp1, 1, MPI_INT, 2, 1, MPI_COMM_WORLD,
211 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
212 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
216 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
220 memset(zeros, 0x0, sizeof(zeros));
221 lseek(fd, 0, SEEK_SET);
223 MPI_Send(&gid, 1, MPI_INT, 3, 1, MPI_COMM_WORLD);
224 count = read(fd, buf, sizeof(buf));
225 if (count != sizeof(buf)) {
227 dump_diff(zeros, buf, count, 0);
228 sprintf(errmsg, "read of file %s return %d",
232 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
236 int iter = MAX_WAITING_TIME;
237 int flag1, flag2, flag3;
239 /* 2nd locking task will tell us when it completes */
240 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
241 /* 3nd locking task will tell us when it completes */
242 MPI_Irecv(&temp2, 1, MPI_INT, 3, 1, MPI_COMM_WORLD, &req2);
243 /* reading task will tell us when it completes */
244 MPI_Irecv(&temp3, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req3);
249 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
250 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
251 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
253 FAIL("PR task progressed even though GROUP lock"
256 if (flag1 || flag2) {
257 FAIL("GROUP (gid=2) task progressed even though"
258 " GROUP (gid=1) lock is held\n");
263 /* Now let's release first lock */
264 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
265 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s return %d",
269 iter = MAX_WAITING_TIME;
273 FAIL("GROUP(gid=2) tasks are not progressing\n");
276 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
277 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
278 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
280 fprintf(stderr, "task1 %d, task3 %d\n", flag1,
282 FAIL("PR task progressed even though GROUP lock"
283 " was on the queue task\n");
285 } while (!(flag1 && flag2));
286 MPI_Send(&gid, 1, MPI_INT, 1, 1, MPI_COMM_WORLD);
287 MPI_Send(&gid, 1, MPI_INT, 3, 1, MPI_COMM_WORLD);
290 if (rank == 1 || rank == 3) {
291 /* Do not release the locks until task 0 is ready to watch
292 for reading task only */
293 MPI_Recv(&temp1, 1, MPI_INT, 0, 1, MPI_COMM_WORLD,
295 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
296 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s return %d",
303 int iter = MAX_WAITING_TIME;
309 FAIL("reading task is not progressing even "
310 "though GROUP locks are released\n");
314 MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE);
318 MPI_Barrier(MPI_COMM_WORLD);
323 * process1 attempts CW(gid=1) -- granted
324 * process2 attempts PR -- blocked
325 * process3 attempts CW(gid=1) -> should be granted
326 * process3 releases CW(gid=1)
327 * process2 should remain blocked
328 * process1 releases CW(gid=1)
329 * process2's PR should be granted
331 * This is a lot like test1.
333 void grouplock_test3(char *filename, int fd, char *errmsg)
335 int rc, count, gid = 1;
336 char buf[LPGL_FILEN];
337 char zeros[LPGL_FILEN];
338 MPI_Request req1, req2;
342 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
343 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
349 MPI_Barrier(MPI_COMM_WORLD);
352 memset(zeros, 0x0, sizeof(zeros));
353 lseek(fd, 0, SEEK_SET);
355 MPI_Send(&gid, 1, MPI_INT, 2, 1, MPI_COMM_WORLD);
356 count = read(fd, buf, sizeof(buf));
357 if (count != sizeof(buf)) {
359 dump_diff(zeros, buf, count, 0);
360 sprintf(errmsg, "read of file %s return %d",
364 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
370 /* Wait for reading task to progress, this is probably somewhat
371 racey, though, may be adding usleep here would make things
374 MPI_Recv(&temp, 1, MPI_INT, 1, 1, MPI_COMM_WORLD,
376 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
377 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
381 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
385 int iter = MAX_WAITING_TIME;
388 /* reading task will tell us when it completes */
389 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
390 /* 2nd locking task will tell us when it completes */
391 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
396 FAIL("2nd locking task is not progressing\n");
399 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
400 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
402 FAIL("PR task progressed even though GROUP lock"
408 /* Now we need to release the lock */
411 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
412 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s return %d",
419 int iter = MAX_WAITING_TIME;
425 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
426 } while (!flag1 && iter);
428 FAIL("reading task is progressing even "
429 "though GROUP lock was not fully released\n");
432 iter = MAX_WAITING_TIME;
434 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
435 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s return %d",
443 FAIL("reading task is not progressing even "
444 "though GROUP lock was released\n");
448 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
452 MPI_Barrier(MPI_COMM_WORLD);
457 * process1 attempts CW(gid=1) -- granted
458 * process2 attempts PR on non-blocking fd -> should return -EWOULDBLOCK
459 * process3 attempts CW(gid=2) on non-blocking fd -> should return -EWOULDBLOCK
461 void grouplock_test4(char *filename, int fd, char *errmsg)
463 int rc, count, gid = 1;
464 char buf[LPGL_FILEN];
465 char zeros[LPGL_FILEN];
468 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
469 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
475 MPI_Barrier(MPI_COMM_WORLD);
478 memset(zeros, 0x0, sizeof(zeros));
479 lseek(fd, 0, SEEK_SET);
481 count = read(fd, buf, sizeof(buf));
482 if (count != sizeof(buf)) {
483 if (count == -1 && errno == EWOULDBLOCK) {
484 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
489 dump_diff(zeros, buf, count, 0);
490 sprintf(errmsg, "read of file %s return %d",
494 FAIL("PR lock succeed while incompatible "
495 "GROUP LOCK (gid=1) is still held\n");
501 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
502 if (errno == EWOULDBLOCK) {
503 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
507 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
511 FAIL("GROUP_LOCK (gid=2) succeed while incompatible "
512 "GROUP LOCK (gid=1) is still held\n");
518 int iter = MAX_WAITING_TIME;
520 MPI_Request req1, req2;
523 /* reading task will tell us when it completes */
524 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
525 /* 2nd locking task will tell us when it completes */
526 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
531 FAIL("non-blocking tasks are not progressing\n");
534 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
535 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
536 } while (!(flag2 && flag1));
538 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
539 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s", filename);
546 * process1 attempts CW(gid=1) -- granted
547 * process2 attempts CW(gid=2) -- blocked
548 * process3 attempts CW(gid=2) -- blocked
549 * process1 releases CW(gid=1)
550 * process2's CW(gid=2) should be granted
551 * process3's CW(gid=2) should be granted
553 * This is pretty much like test 3
555 void grouplock_test5(char *filename, int fd, char *errmsg)
558 MPI_Request req1, req2;
562 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
563 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
569 MPI_Barrier(MPI_COMM_WORLD);
571 if (rank == 2 || rank == 1) {
573 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
574 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
578 MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
582 int iter = MAX_WAITING_TIME;
585 /* 3rd locking task will tell us when it completes */
586 MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1);
587 /* 2nd locking task will tell us when it completes */
588 MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2);
593 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
594 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
595 } while (!flag2 && !flag1 && iter);
597 FAIL("incomptible locking tasks are progressing\n");
601 /* Now we need to release the lock */
604 int iter = MAX_WAITING_TIME;
606 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
607 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s return %d",
615 FAIL("locking tasks are not progressing even "
616 "though incompatible lock released\n");
619 MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE);
620 MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE);
621 } while (!(flag1 && flag2));
625 if ( rank == 1 || rank == 2) {
626 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
627 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s return %d",
633 MPI_Barrier(MPI_COMM_WORLD);
637 * process1 attempts CW(gid=1) -- granted
638 * process2 attempts PW -- blocked
639 * process2 attempts CW(gid=2) -- blocked
640 * process3 attempts CW(gid=2) -- blocked
641 * process1 releases CW(gid=1)
642 * process2's CW(gid=2) should be granted
643 * process3's CW(gid=2) should be granted
645 * after process1 release CW(gid=1), there are two pathes:
646 * path 1. process2 get PW
647 * path 2. process3 get CW(gid=2)
649 * green: Also about test6 - by definition if P* and CW lock are waiting,
650 * CW lock have bigger priority and should be granted first when it becomes
651 * possible. So after process1 releases its CW lock, process3 should always
652 * get CW lock, and when it will release it, process 2 will proceed with read
653 * and then with getting CW lock
655 * XXX This test does not make any sence at all the way it is described right
656 * now, hence disabled.
658 void grouplock_test6(char *filename, int fd, char *errmsg)
662 /* Just test some error paths with invalid requests */
663 void grouplock_errorstest(char *filename, int fd, char *errmsg)
668 /* To not do lots of separate tests with lots of fd opening/closing,
669 different parts of this test are performed in different processes */
671 if (rank == 0 || rank == 1 ) {
672 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
673 sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d",
679 /* second group lock on same fd, same gid */
681 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) {
682 if (errno != EINVAL) {
683 sprintf(errmsg, "Double GROUP lock failed with errno %d instead of EINVAL\n", errno);
687 FAIL("Taking second GROUP lock on same fd succeed\n");
691 /* second group lock on same fd, different gid */
693 if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid + 1)) == -1) {
694 if (errno != EINVAL) {
695 sprintf(errmsg, "Double GROUP lock different gid failed with errno %d instead of EINVAL\n", errno);
699 FAIL("Taking second GROUP lock on same fd, different gid, succeed\n");
703 /* GROUP unlock with wrong gid */
704 if (rank == 0 || rank == 1) {
705 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid + 1)) == -1) {
706 if (errno != EINVAL) {
707 sprintf(errmsg, "GROUP unlock with wrong gid failed with errno %d instead of EINVAL\n",
712 FAIL("GROUP unlock with wrong gid succeed\n");
716 if (rank == 0 || rank == 1) {
717 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
718 sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s return %d",
724 /* unlock of never locked fd */
726 if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) {
727 if (errno != EINVAL) {
728 sprintf(errmsg, "GROUP unlock on never locked fd failed with errno %d instead of EINVAL\n",
733 FAIL("GROUP unlock on never locked fd succeed\n");
738 void grouplock_file(char *name, int items)
741 char filename[MAX_FILENAME_LEN];
742 char errmsg[MAX_FILENAME_LEN+20];
744 sprintf(filename, "%s/%s", testdir, name);
747 if ((fd = open(filename, O_RDWR | O_NONBLOCK)) == -1) {
748 sprintf(errmsg, "open of file %s", filename);
751 } else if ((fd = open(filename, O_RDWR)) == -1) {
752 sprintf(errmsg, "open of file %s", filename);
756 MPI_Barrier(MPI_COMM_WORLD);
760 grouplock_test1(filename, fd, errmsg);
763 grouplock_test2(filename, fd, errmsg);
766 grouplock_test3(filename, fd, errmsg);
769 grouplock_test4(filename, fd, errmsg);
772 grouplock_test5(filename, fd, errmsg);
775 grouplock_test6(filename, fd, errmsg);
778 grouplock_errorstest(filename, fd, errmsg);
781 sprintf(errmsg, "wrong test case number %d (should be <= %d)",
782 items, LPGL_TEST_ITEMS);
786 MPI_Barrier(MPI_COMM_WORLD);
788 if (close(fd) == -1) {
789 sprintf(errmsg, "close of file %s", filename);
795 void parallel_grouplock(void)
799 for (i = 1;i <= LPGL_TEST_ITEMS;++i) {
801 create_file("parallel_grouplock", LPGL_FILEN, 0);
805 grouplock_file("parallel_grouplock", i);
809 remove_file("parallel_grouplock");
814 void usage(char *proc)
819 printf("Usage: %s [-h] -d <testdir>\n", proc);
820 printf(" [-n \"13\"] [-v] [-V #] [-g]\n");
821 printf("\t-h: prints this help message\n");
822 printf("\t-d: the directory in which the tests will run\n");
823 printf("\t-n: repeat test # times\n");
824 printf("\t-v: increase the verbositly level by 1\n");
825 printf("\t-V: select a specific verbosity level\n");
826 printf("\t-g: debug mode\n");
830 if (i) MPI_Finalize();
834 int main(int argc, char *argv[])
837 int i, iterations = 1;
839 /* Check for -h parameter before MPI_Init so the binary can be
840 called directly, without, for instance, mpirun */
841 for (i = 1; i < argc; ++i) {
842 if (!strcmp(argv[i], "-h") || !strcmp(argv[i], "--help"))
846 MPI_Init(&argc, &argv);
847 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
848 MPI_Comm_size(MPI_COMM_WORLD, &size);
850 // MPI_Comm_set_attr(MPI_COMM_WORLD, MPI_WTIME_IS_GLOBAL, &tr);
852 /* Parse command line options */
854 c = getopt(argc, argv, "d:ghn:vV:");
869 iterations = atoi(optarg);
875 verbose = atoi(optarg);
881 printf("%s is running with %d process(es) %s\n",
882 argv[0], size, debug ? "in DEBUG mode" : "\b\b");
884 if (size < MAX_GLHOST) {
885 fprintf(stderr, "Error: "
886 "should be at least four processes to run the test!\n");
887 MPI_Abort(MPI_COMM_WORLD, 2);
890 if (testdir == NULL && rank == 0) {
891 fprintf(stderr, "Please specify a test directory! "
892 "(\"%s -h\" for help)\n", argv[0]);
893 MPI_Abort(MPI_COMM_WORLD, 2);
898 for (i = 0; i < iterations; ++i) {
900 printf("%s: Running test #%s(iter %d)\n",
901 timestamp(), argv[0], i);
903 parallel_grouplock();
904 MPI_Barrier(MPI_COMM_WORLD);
908 printf("%s: All tests passed!\n", timestamp());