===================================================================
--- /dev/null
+++ linux-2.6.32-504.3.3.el6.x86_64/fs/ext4/htree_lock.c
-@@ -0,0 +1,880 @@
+@@ -0,0 +1,891 @@
+/*
+ * fs/ext4/htree_lock.c
+ *
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
++retry:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
-+ /* granted, no doubt. wake up will set me RUNNING */
++ /* granted, no doubt. wake up will set me RUNNING.
++ * Since thread would be waken up accidentally,
++ * so we need check lock whether granted or not again. */
++ if (!list_empty(&lck->lk_blocked_list)) {
++ htree_spin_lock(lhead, HTREE_DEP_ROOT);
++ if (list_empty(&lck->lk_blocked_list)) {
++ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++ return 0;
++ }
++ goto retry;
++ }
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
bottleneck by introducing a parallel locking mechanism for entire
ldiskfs directories. This work will enable multiple application
threads to simultaneously lookup, create and unlink in parallel.
-
+
This patch contains:
- pdirops support for ldiskfs
- integrate with osd-ldiskfs
===================================================================
--- /dev/null
+++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c
-@@ -0,0 +1,880 @@
+@@ -0,0 +1,891 @@
+/*
+ * fs/ext4/htree_lock.c
+ *
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
++retry:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
-+ /* granted, no doubt. wake up will set me RUNNING */
++ /* granted, no doubt. wake up will set me RUNNING.
++ * Since thread would be waken up accidentally,
++ * so we need check lock whether granted or not again. */
++ if (!list_empty(&lck->lk_blocked_list)) {
++ htree_spin_lock(lhead, HTREE_DEP_ROOT);
++ if (list_empty(&lck->lk_blocked_list)) {
++ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++ return 0;
++ }
++ goto retry;
++ }
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
===================================================================
--- /dev/null
+++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c
-@@ -0,0 +1,880 @@
+@@ -0,0 +1,891 @@
+/*
+ * fs/ext4/htree_lock.c
+ *
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
++retry:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
-+ /* granted, no doubt. wake up will set me RUNNING */
++ /* granted, no doubt. wake up will set me RUNNING.
++ * Since thread would be waken up accidentally,
++ * so we need check lock whether granted or not again. */
++ if (!list_empty(&lck->lk_blocked_list)) {
++ htree_spin_lock(lhead, HTREE_DEP_ROOT);
++ if (list_empty(&lck->lk_blocked_list)) {
++ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++ return 0;
++ }
++ goto retry;
++ }
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
===================================================================
--- /dev/null
+++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c
-@@ -0,0 +1,880 @@
+@@ -0,0 +1,891 @@
+/*
+ * fs/ext4/htree_lock.c
+ *
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
++retry:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
-+ /* granted, no doubt. wake up will set me RUNNING */
++ /* granted, no doubt. wake up will set me RUNNING.
++ * Since thread would be waken up accidentally,
++ * so we need check lock whether granted or not again. */
++ if (!list_empty(&lck->lk_blocked_list)) {
++ htree_spin_lock(lhead, HTREE_DEP_ROOT);
++ if (list_empty(&lck->lk_blocked_list)) {
++ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++ return 0;
++ }
++ goto retry;
++ }
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
===================================================================
--- /dev/null
+++ linux-3.10.0-229.1.2.fc21.x86_64/fs/ext4/htree_lock.c
-@@ -0,0 +1,880 @@
+@@ -0,0 +1,891 @@
+/*
+ * fs/ext4/htree_lock.c
+ *
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
++retry:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
-+ /* granted, no doubt. wake up will set me RUNNING */
++ /* granted, no doubt. wake up will set me RUNNING.
++ * Since thread would be waken up accidentally,
++ * so we need check lock whether granted or not again. */
++ if (!list_empty(&lck->lk_blocked_list)) {
++ htree_spin_lock(lhead, HTREE_DEP_ROOT);
++ if (list_empty(&lck->lk_blocked_list)) {
++ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++ return 0;
++ }
++ goto retry;
++ }
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
index 0000000..99e7375
--- /dev/null
+++ b/fs/ext4/htree_lock.c
-@@ -0,0 +1,880 @@
+@@ -0,0 +1,891 @@
+/*
+ * fs/ext4/htree_lock.c
+ *
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
++retry:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
-+ /* granted, no doubt. wake up will set me RUNNING */
++ /* granted, no doubt. wake up will set me RUNNING.
++ * Since thread would be waken up accidentally,
++ * so we need check lock whether granted or not again. */
++ if (!list_empty(&lck->lk_blocked_list)) {
++ htree_spin_lock(lhead, HTREE_DEP_ROOT);
++ if (list_empty(&lck->lk_blocked_list)) {
++ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++ return 0;
++ }
++ goto retry;
++ }
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
index 0000000..99e7375
--- /dev/null
+++ b/fs/ext4/htree_lock.c
-@@ -0,0 +1,880 @@
+@@ -0,0 +1,891 @@
+/*
+ * fs/ext4/htree_lock.c
+ *
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
++retry:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
-+ /* granted, no doubt. wake up will set me RUNNING */
++ /* granted, no doubt. wake up will set me RUNNING.
++ * Since thread would be waken up accidentally,
++ * so we need check lock whether granted or not again. */
++ if (!list_empty(&lck->lk_blocked_list)) {
++ htree_spin_lock(lhead, HTREE_DEP_ROOT);
++ if (list_empty(&lck->lk_blocked_list)) {
++ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++ return 0;
++ }
++ goto retry;
++ }
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;
===================================================================
--- /dev/null
+++ linux-4.15.0/fs/ext4/htree_lock.c
-@@ -0,0 +1,880 @@
+@@ -0,0 +1,891 @@
+/*
+ * fs/ext4/htree_lock.c
+ *
+ lck->lk_task = current;
+ list_add_tail(&lck->lk_blocked_list, &lhead->lh_blocked_list);
+
++retry:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
+ /* wait to be given the lock */
+ if (lck->lk_task != NULL)
+ schedule();
-+ /* granted, no doubt. wake up will set me RUNNING */
++ /* granted, no doubt. wake up will set me RUNNING.
++ * Since thread would be waken up accidentally,
++ * so we need check lock whether granted or not again. */
++ if (!list_empty(&lck->lk_blocked_list)) {
++ htree_spin_lock(lhead, HTREE_DEP_ROOT);
++ if (list_empty(&lck->lk_blocked_list)) {
++ htree_spin_unlock(lhead, HTREE_DEP_ROOT);
++ return 0;
++ }
++ goto retry;
++ }
+ return 0; /* without lh_lock */
+ }
+ lhead->lh_ngranted[lck->lk_mode]++;