]) # LC_BIO_BI_PHYS_SEGMENTS
#
+# LC_HAVE_FLUSH_DELAYED_FPUT
+#
+# kernel commit v3.5-rc6-284-g4a9d4b024a31 adds flush_delayed_fput()
+# kernel commit v5.3-rc2-13-g7239a40ca8bf exports flush_delayed_fput()
+#
+AC_DEFUN([LC_HAVE_FLUSH_DELAYED_FPUT], [
+LB_CHECK_EXPORT([flush_delayed_fput], [fs/file_table.c],
+ [AC_DEFINE(HAVE_FLUSH_DELAYED_FPUT, 1,
+ [flush_delayed_fput() is exported by the kernel])])
+]) # LC_FLUSH_DELAYED_FPUT
+
+#
# LC_LM_COMPARE_OWNER_EXISTS
#
# kernel 5.3-rc3 commit f85d93385e9fe6886a751f647f6812a89bf6bee3
# 5.3
LC_BIO_BI_PHYS_SEGMENTS
+ LC_HAVE_FLUSH_DELAYED_FPUT
LC_LM_COMPARE_OWNER_EXISTS
# 5.5
return 0;
}
+#ifdef HAVE_SERVER_SUPPORT
+# ifdef HAVE_FLUSH_DELAYED_FPUT
+# define cfs_flush_delayed_fput() flush_delayed_fput()
+# else
+void (*cfs_flush_delayed_fput)(void);
+# endif /* HAVE_FLUSH_DELAYED_FPUT */
+#else /* !HAVE_SERVER_SUPPORT */
+#define cfs_flush_delayed_fput() do {} while (0)
+#endif /* HAVE_SERVER_SUPPORT */
+
/**
* Main thread body for service threads.
* Waits in a loop waiting for new requests to process to appear.
CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id,
svcpt->scp_nthrs_running);
+#ifdef HAVE_SERVER_SUPPORT
+#ifndef HAVE_FLUSH_DELAYED_FPUT
+ if (unlikely(cfs_flush_delayed_fput == NULL))
+ cfs_flush_delayed_fput =
+ cfs_kallsyms_lookup_name("flush_delayed_fput");
+#endif
+#endif
/* XXX maintain a list of all managed devices: insert here */
while (!ptlrpc_thread_stopping(thread)) {
+ bool idle = true;
+
if (ptlrpc_wait_event(svcpt, thread))
break;
if (ptlrpc_threads_need_create(svcpt)) {
/* Ignore return code - we tried... */
ptlrpc_start_thread(svcpt, 0);
+ idle = false;
}
/* reset le_ses to initial state */
if (counter++ < 100)
continue;
counter = 0;
+ idle = false;
}
if (ptlrpc_at_check(svcpt))
lu_context_enter(&env->le_ctx);
ptlrpc_server_handle_request(svcpt, thread);
lu_context_exit(&env->le_ctx);
+ idle = false;
}
if (ptlrpc_rqbd_pending(svcpt) &&
svcpt->scp_rqbd_timeout = cfs_time_seconds(1) / 10;
CDEBUG(D_RPCTRACE, "Posted buffers: %d\n",
svcpt->scp_nrqbds_posted);
+ idle = false;
}
+
+ /* If nothing to do, flush old alloc_file_pseudo() descriptors.
+ * This has internal atomicity so it is OK to call often.
+ * We could also do other idle tasks at this time.
+ */
+ if (idle)
+ cfs_flush_delayed_fput();
+
/*
* If the number of threads has been tuned downward and this
* thread should be stopped, then stop in reverse order so the