Thread may be waken inproperly in htree code. This patch
reschedule thread to keep locking correct.
Change-Id: I6a8d1bbc0470b2577ca80faa304eb06f7913c218
Signed-off-by: Yang Sheng <ys@whamcloud.com>
Reviewed-on: https://review.whamcloud.com/34160
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Tested-by: Jenkins
Reviewed-by: Wang Shilong <wshilong@ddn.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
===================================================================
--- /dev/null
+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/htree_lock.c
===================================================================
--- /dev/null
+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/htree_lock.c
+/*
+ * fs/ext4/htree_lock.c
+ *
+/*
+ * fs/ext4/htree_lock.c
+ *
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
-+ /* granted, no doubt. wake up will set me RUNNING */
++ /* granted, no doubt. wake up will set me RUNNING.
++ * Since thread would be waken up accidentally,
++ * so we need check lock whether granted or not again. */
++ if (!list_empty(&lck->lk_blocked_list)) {
++ htree_spin_lock(lhead, HTREE_DEP_ROOT);
++ if (list_empty(&lck->lk_blocked_list)) {
++ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++ return 0;
++ }
++ goto retry;
++ }
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
bottleneck by introducing a parallel locking mechanism for entire
ldiskfs directories. This work will enable multiple application
threads to simultaneously lookup, create and unlink in parallel.
bottleneck by introducing a parallel locking mechanism for entire
ldiskfs directories. This work will enable multiple application
threads to simultaneously lookup, create and unlink in parallel.
This patch contains:
- pdirops support for ldiskfs
- integrate with osd-ldiskfs
This patch contains:
- pdirops support for ldiskfs
- integrate with osd-ldiskfs
===================================================================
--- /dev/null
+++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c
===================================================================
--- /dev/null
+++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c
+/*
+ * fs/ext4/htree_lock.c
+ *
+/*
+ * fs/ext4/htree_lock.c
+ *
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
-+ /* granted, no doubt. wake up will set me RUNNING */
++ /* granted, no doubt. wake up will set me RUNNING.
++ * Since thread would be waken up accidentally,
++ * so we need check lock whether granted or not again. */
++ if (!list_empty(&lck->lk_blocked_list)) {
++ htree_spin_lock(lhead, HTREE_DEP_ROOT);
++ if (list_empty(&lck->lk_blocked_list)) {
++ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++ return 0;
++ }
++ goto retry;
++ }
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
===================================================================
--- /dev/null
+++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c
===================================================================
--- /dev/null
+++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c
+/*
+ * fs/ext4/htree_lock.c
+ *
+/*
+ * fs/ext4/htree_lock.c
+ *
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
-+ /* granted, no doubt. wake up will set me RUNNING */
++ /* granted, no doubt. wake up will set me RUNNING.
++ * Since thread would be waken up accidentally,
++ * so we need check lock whether granted or not again. */
++ if (!list_empty(&lck->lk_blocked_list)) {
++ htree_spin_lock(lhead, HTREE_DEP_ROOT);
++ if (list_empty(&lck->lk_blocked_list)) {
++ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++ return 0;
++ }
++ goto retry;
++ }
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
===================================================================
--- /dev/null
+++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c
===================================================================
--- /dev/null
+++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c
+/*
+ * fs/ext4/htree_lock.c
+ *
+/*
+ * fs/ext4/htree_lock.c
+ *
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
-+ /* granted, no doubt. wake up will set me RUNNING */
++ /* granted, no doubt. wake up will set me RUNNING.
++ * Since thread would be waken up accidentally,
++ * so we need check lock whether granted or not again. */
++ if (!list_empty(&lck->lk_blocked_list)) {
++ htree_spin_lock(lhead, HTREE_DEP_ROOT);
++ if (list_empty(&lck->lk_blocked_list)) {
++ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++ return 0;
++ }
++ goto retry;
++ }
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
===================================================================
--- /dev/null
+++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c
===================================================================
--- /dev/null
+++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c
+/*
+ * fs/ext4/htree_lock.c
+ *
+/*
+ * fs/ext4/htree_lock.c
+ *
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
-+ /* granted, no doubt. wake up will set me RUNNING */
++ /* granted, no doubt. wake up will set me RUNNING.
++ * Since thread would be waken up accidentally,
++ * so we need check lock whether granted or not again. */
++ if (!list_empty(&lck->lk_blocked_list)) {
++ htree_spin_lock(lhead, HTREE_DEP_ROOT);
++ if (list_empty(&lck->lk_blocked_list)) {
++ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++ return 0;
++ }
++ goto retry;
++ }
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
index 0000000..99e7375
--- /dev/null
+++ b/fs/ext4/htree_lock.c
index 0000000..99e7375
--- /dev/null
+++ b/fs/ext4/htree_lock.c
+/*
+ * fs/ext4/htree_lock.c
+ *
+/*
+ * fs/ext4/htree_lock.c
+ *
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
-+ /* granted, no doubt. wake up will set me RUNNING */
++ /* granted, no doubt. wake up will set me RUNNING.
++ * Since thread would be waken up accidentally,
++ * so we need check lock whether granted or not again. */
++ if (!list_empty(&lck->lk_blocked_list)) {
++ htree_spin_lock(lhead, HTREE_DEP_ROOT);
++ if (list_empty(&lck->lk_blocked_list)) {
++ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++ return 0;
++ }
++ goto retry;
++ }
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
index 0000000..99e7375
--- /dev/null
+++ b/fs/ext4/htree_lock.c
index 0000000..99e7375
--- /dev/null
+++ b/fs/ext4/htree_lock.c
+/*
+ * fs/ext4/htree_lock.c
+ *
+/*
+ * fs/ext4/htree_lock.c
+ *
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
-+ /* granted, no doubt. wake up will set me RUNNING */
++ /* granted, no doubt. wake up will set me RUNNING.
++ * Since thread would be waken up accidentally,
++ * so we need check lock whether granted or not again. */
++ if (!list_empty(&lck->lk_blocked_list)) {
++ htree_spin_lock(lhead, HTREE_DEP_ROOT);
++ if (list_empty(&lck->lk_blocked_list)) {
++ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++ return 0;
++ }
++ goto retry;
++ }
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
===================================================================
--- /dev/null
+++ linux-4.15.0/fs/ext4/htree_lock.c
===================================================================
--- /dev/null
+++ linux-4.15.0/fs/ext4/htree_lock.c
+/*
+ * fs/ext4/htree_lock.c
+ *
+/*
+ * fs/ext4/htree_lock.c
+ *
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
-+ /* granted, no doubt. wake up will set me RUNNING */
++ /* granted, no doubt. wake up will set me RUNNING.
++ * Since thread would be waken up accidentally,
++ * so we need check lock whether granted or not again. */
++ if (!list_empty(&lck->lk_blocked_list)) {
++ htree_spin_lock(lhead, HTREE_DEP_ROOT);
++ if (list_empty(&lck->lk_blocked_list)) {
++ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++ return 0;
++ }
++ goto retry;
++ }
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;