+ err = ldlm_cli_convert(&lockh1, LCK_EX, &flags);
+ CERROR("ldlm_cli_convert: %d\n", err);
+
+ lock = ldlm_handle2lock(&lockh1);
+ ldlm_lock_dump(D_OTHER, lock);
+ ldlm_lock_put(lock);
+
+ /* Need to decrement old mode. Don't bother incrementing new
+ * mode since the test is done.
+ */
+ if (err == ELDLM_OK)
+ ldlm_lock_decref(&lockh1, LCK_PR);
+
+ RETURN(err);
+}
+
+static int ldlm_do_decrement(void)
+{
+ struct ldlm_test_lock *lock_info;
+ struct ldlm_lock *lock;
+ int rc = 0;
+ ENTRY;
+
+ spin_lock(&ctl_lock);
+ if(list_empty(&lock_list)) {
+ CERROR("lock_list is empty\n");
+ spin_unlock(&ctl_lock);
+ RETURN(0);
+ }
+
+ /* delete from list */
+ lock_info = list_entry(lock_list.next,
+ struct ldlm_test_lock, l_link);
+ list_del(lock_list.next);
+ num_locks--;
+ spin_unlock(&ctl_lock);
+
+ /* decrement and free the info */
+ lock = ldlm_handle2lock(&lock_info->l_lockh);
+ ldlm_lock_decref(&lock_info->l_lockh, lock->l_granted_mode);
+ ldlm_lock_put(lock);
+
+ OBD_FREE(lock_info, sizeof(*lock_info));
+
+ RETURN(rc);
+}
+
+static int ldlm_do_enqueue(struct ldlm_test_thread *thread)
+{
+ struct lustre_handle lockh;
+ struct ldlm_res_id res_id = { .name = {0} };
+ __u32 lock_mode;
+ struct ldlm_extent ext;
+ unsigned char random;
+ int flags = 0, rc = 0;
+ ENTRY;
+
+ /* Pick a random resource from 1 to num_resources */
+ get_random_bytes(&random, sizeof(random));
+ res_id.name[0] = random % num_resources;
+
+ /* Pick a random lock mode */
+ get_random_bytes(&random, sizeof(random));
+ lock_mode = random % LCK_NL + 1;
+
+ /* Pick a random extent */
+ get_random_bytes(&random, sizeof(random));
+ ext.start = random % num_extents;
+ get_random_bytes(&random, sizeof(random));
+ ext.end = random %
+ (num_extents - (int)ext.start) + ext.start;
+
+ LDLM_DEBUG_NOLOCK("about to enqueue with resource "LPX64", mode %d,"
+ " extent "LPX64" -> "LPX64, res_id.name[0], lock_mode,
+ ext.start, ext.end);
+
+ rc = ldlm_match_or_enqueue(®ress_connh, NULL,
+ thread->obddev->obd_namespace,
+ NULL, res_id, LDLM_EXTENT, &ext,
+ sizeof(ext), lock_mode, &flags,
+ ldlm_test_completion_ast,
+ ldlm_test_blocking_ast,
+ NULL, 0, &lockh);
+
+ atomic_inc(&locks_requested);
+
+ if (rc < 0) {
+ CERROR("ldlm_cli_enqueue: %d\n", rc);
+ LBUG();
+ }
+
+ RETURN(rc);
+}
+
+static int ldlm_do_convert(void)
+{
+ __u32 lock_mode;
+ unsigned char random;
+ int flags = 0, rc = 0;
+ struct ldlm_test_lock *lock_info;
+ struct ldlm_lock *lock;
+ ENTRY;
+
+ /* delete from list */
+ spin_lock(&ctl_lock);
+ lock_info = list_entry(lock_list.next, struct ldlm_test_lock, l_link);
+ list_del(lock_list.next);
+ num_locks--;
+ spin_unlock(&ctl_lock);
+
+ /* Pick a random lock mode */
+ get_random_bytes(&random, sizeof(random));
+ lock_mode = random % LCK_NL + 1;
+
+ /* do the conversion */
+ rc = ldlm_cli_convert(&lock_info->l_lockh , lock_mode, &flags);
+ atomic_inc(&converts_requested);
+
+ if (rc < 0) {
+ CERROR("ldlm_cli_convert: %d\n", rc);
+ LBUG();
+ }
+
+ /*
+ * Adjust reference counts.
+ * FIXME: This is technically a bit... wrong,
+ * since we don't know when/if the convert succeeded
+ */
+ ldlm_lock_addref(&lock_info->l_lockh, lock_mode);
+ lock = ldlm_handle2lock(&lock_info->l_lockh);
+ ldlm_lock_decref(&lock_info->l_lockh, lock->l_granted_mode);
+ ldlm_lock_put(lock);
+
+ OBD_FREE(lock_info, sizeof(*lock_info));
+
+ RETURN(rc);
+}
+
+
+
+static int ldlm_test_main(void *data)
+{
+ struct ldlm_test_thread *thread = data;
+ unsigned long flags;
+ ENTRY;
+
+ lock_kernel();
+ daemonize();
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+ sigfillset(¤t->blocked);
+ recalc_sigpending();
+#else
+ spin_lock_irqsave(¤t->sigmask_lock, flags);
+ sigfillset(¤t->blocked);
+ recalc_sigpending(current);
+ spin_unlock_irqrestore(¤t->sigmask_lock, flags);
+#endif
+
+ sprintf(current->comm, "ldlm_test");
+ unlock_kernel();
+
+ /* Record that the thread is running */
+ thread->t_flags |= SVC_RUNNING;
+ wake_up(&thread->t_ctl_waitq);
+
+ while (!(thread->t_flags & SVC_STOPPING)) {
+ unsigned char random;
+ unsigned char dec_chance, con_chance;
+ unsigned char chance_left = 100;
+
+ spin_lock(&ctl_lock);
+ /* probability of decrementing increases linearly
+ * as more locks are held.
+ */
+ dec_chance = chance_left * num_locks / max_locks;
+ chance_left -= dec_chance;
+
+ /* FIXME: conversions temporarily disabled
+ * until they are working correctly.
+ */
+ /* con_chance = chance_left * num_locks / max_locks; */
+ con_chance = 0;
+ chance_left -= con_chance;
+ spin_unlock(&ctl_lock);
+
+ get_random_bytes(&random, sizeof(random));
+
+ random = random % 100;
+ if (random < dec_chance)
+ ldlm_do_decrement();
+ else if (random < (dec_chance + con_chance))
+ ldlm_do_convert();
+ else
+ ldlm_do_enqueue(thread);
+
+ LDLM_DEBUG_NOLOCK("locks requested: %d, "
+ "conversions requested %d",
+ atomic_read(&locks_requested),
+ atomic_read(&converts_requested));
+ LDLM_DEBUG_NOLOCK("locks granted: %d, "
+ "locks matched: %d",
+ atomic_read(&locks_granted),
+ atomic_read(&locks_matched));
+
+ spin_lock(&ctl_lock);
+ LDLM_DEBUG_NOLOCK("lock references currently held: %d, ",
+ num_locks);
+ spin_unlock(&ctl_lock);
+
+ /*
+ * We don't sleep after a lock being blocked, so let's
+ * make sure other things can run.
+ */
+ schedule();
+ }
+
+ thread->t_flags |= SVC_STOPPED;
+ wake_up(&thread->t_ctl_waitq);
+
+ RETURN(0);
+}
+
+static int ldlm_start_thread(struct obd_device *obddev,
+ struct lustre_handle *connh)
+{
+ struct ldlm_test_thread *test;
+ int rc;
+ ENTRY;
+
+ OBD_ALLOC(test, sizeof(*test));
+ if (test == NULL) {
+ LBUG();
+ RETURN(-ENOMEM);
+ }
+ init_waitqueue_head(&test->t_ctl_waitq);
+
+ test->obddev = obddev;
+
+ spin_lock(&ctl_lock);
+ list_add(&test->t_link, &ctl_threads);
+ spin_unlock(&ctl_lock);
+
+ rc = kernel_thread(ldlm_test_main, (void *)test,
+ CLONE_VM | CLONE_FS | CLONE_FILES);
+ if (rc < 0) {
+ CERROR("cannot start thread\n");
+ RETURN(-EINVAL);
+ }
+ wait_event(test->t_ctl_waitq, test->t_flags & SVC_RUNNING);
+
+ RETURN(0);
+}