Add @batch parameter to cancel LRU, which means if at least 1 lock is
cancelled, try to cancel at least a batch locks. This functionality
will be used in later patches.
Limit the LRU cancel by 1 thread only, however, not for those which
have the @max limit given (ELC), as LRU may be left not cleaned up
in full.
Signed-off-by: Vitaly Fertman <c17818@cray.com>
Change-Id: Ide21c4a2b2209b8a721249466ea1e651c8532c8a
HPE-bug-id: LUS-8678
Reviewed-on: https://es-gerrit.dev.cray.com/157067
Reviewed-by: Andriy Skulysh <c17819@cray.com>
Reviewed-by: Alexey Lyashkov <c17817@cray.com>
Tested-by: Alexander Lezhoev <c17454@cray.com>
Reviewed-on: https://review.whamcloud.com/39561
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Gu Zheng <gzheng@ddn.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
LDLM_NS_TYPE_MGT, /**< MGT namespace */
};
LDLM_NS_TYPE_MGT, /**< MGT namespace */
};
+enum ldlm_namespace_flags {
+ /**
+ * Flag to indicate the LRU cancel is in progress.
+ * Used to limit the process by 1 thread only.
+ */
+ LDLM_LRU_CANCEL = 0
+};
+
struct kobject ns_kobj; /* sysfs object */
struct completion ns_kobj_unregister;
struct kobject ns_kobj; /* sysfs object */
struct completion ns_kobj_unregister;
+
+ /**
+ * To avoid another ns_lock usage, a separate bitops field.
+ */
+ unsigned long ns_flags;
* redundant unused locks are canceled locally;
* - also cancel locally unused aged locks;
* - do not cancel more than \a max locks;
* redundant unused locks are canceled locally;
* - also cancel locally unused aged locks;
* - do not cancel more than \a max locks;
+ * - if some locks are cancelled, try to cancel at least \a batch locks
* - GET the found locks and add them into the \a cancels list.
*
* A client lock can be added to the l_bl_ast list only when it is
* - GET the found locks and add them into the \a cancels list.
*
* A client lock can be added to the l_bl_ast list only when it is
* discard those pages.
*/
static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
* discard those pages.
*/
static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
- struct list_head *cancels, int min, int max,
+ struct list_head *cancels,
+ int min, int max, int batch,
enum ldlm_lru_flags lru_flags)
{
ldlm_cancel_lru_policy_t pf;
enum ldlm_lru_flags lru_flags)
{
ldlm_cancel_lru_policy_t pf;
int no_wait = lru_flags & LDLM_LRU_FLAG_NO_WAIT;
ENTRY;
int no_wait = lru_flags & LDLM_LRU_FLAG_NO_WAIT;
ENTRY;
+ /*
+ * Let only 1 thread to proceed. However, not for those which have the
+ * @max limit given (ELC), as LRU may be left not cleaned up in full.
+ */
+ if (max == 0) {
+ if (test_and_set_bit(LDLM_LRU_CANCEL, &ns->ns_flags))
+ RETURN(0);
+ } else if (test_bit(LDLM_LRU_CANCEL, &ns->ns_flags))
+ RETURN(0);
+
LASSERT(ergo(max, min <= max));
LASSERT(ergo(max, min <= max));
+ /* No sense to give @batch for ELC */
+ LASSERT(ergo(max, batch == 0));
if (!ns_connect_lru_resize(ns))
min = max_t(int, min, ns->ns_nr_unused - ns->ns_max_unused);
if (!ns_connect_lru_resize(ns))
min = max_t(int, min, ns->ns_nr_unused - ns->ns_max_unused);
+ /* If at least 1 lock is to be cancelled, cancel at least @batch locks */
+ if (min && min < batch)
+ min = batch;
+
pf = ldlm_cancel_lru_policy(ns, lru_flags);
LASSERT(pf != NULL);
pf = ldlm_cancel_lru_policy(ns, lru_flags);
LASSERT(pf != NULL);
unlock_res_and_lock(lock);
lu_ref_del(&lock->l_reference, __FUNCTION__, current);
added++;
unlock_res_and_lock(lock);
lu_ref_del(&lock->l_reference, __FUNCTION__, current);
added++;
+ /* Once a lock added, batch the requested amount */
+ if (min == 0)
+ min = batch;
+
+ if (max == 0)
+ clear_bit(LDLM_LRU_CANCEL, &ns->ns_flags);
+
- added = ldlm_prepare_lru_list(ns, cancels, min, max, lru_flags);
+ added = ldlm_prepare_lru_list(ns, cancels, min, max, 0, lru_flags);
if (added <= 0)
return added;
if (added <= 0)
return added;
* Just prepare the list of locks, do not actually cancel them yet.
* Locks are cancelled later in a separate thread.
*/
* Just prepare the list of locks, do not actually cancel them yet.
* Locks are cancelled later in a separate thread.
*/
- count = ldlm_prepare_lru_list(ns, &cancels, min, 0, lru_flags);
+ count = ldlm_prepare_lru_list(ns, &cancels, min, 0, 0, lru_flags);
rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, cancel_flags);
if (rc == 0)
RETURN(count);
rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, cancel_flags);
if (rc == 0)
RETURN(count);
ns->ns_stopping = 0;
ns->ns_reclaim_start = 0;
ns->ns_last_pos = &ns->ns_unused_list;
ns->ns_stopping = 0;
ns->ns_reclaim_start = 0;
ns->ns_last_pos = &ns->ns_unused_list;
rc = ldlm_namespace_sysfs_register(ns);
if (rc) {
rc = ldlm_namespace_sysfs_register(ns);
if (rc) {