* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
cfs_list_t blwi_head;
int blwi_count;
cfs_completion_t blwi_comp;
- cfs_atomic_t blwi_ref_count;
+ int blwi_mode;
+ int blwi_mem_pressure;
};
#ifdef __KERNEL__
-static inline void ldlm_bl_work_item_get(struct ldlm_bl_work_item *blwi)
-{
- cfs_atomic_inc(&blwi->blwi_ref_count);
-}
-
-static inline void ldlm_bl_work_item_put(struct ldlm_bl_work_item *blwi)
-{
- if (cfs_atomic_dec_and_test(&blwi->blwi_ref_count))
- OBD_FREE(blwi, sizeof(*blwi));
-}
static inline int have_expired_locks(void)
{
if (!ldlm_request_cancel(req, dlm_req, 0))
req->rq_status = ESTALE;
- if (ptlrpc_reply(req) != 0)
- LBUG();
-
- RETURN(0);
+ RETURN(ptlrpc_reply(req));
}
void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
}
#ifdef __KERNEL__
-static int __ldlm_bl_to_thread(struct ldlm_namespace *ns, struct ldlm_bl_work_item *blwi,
- struct ldlm_lock_desc *ld, struct ldlm_lock *lock,
- cfs_list_t *cancels, int count, int mode)
+static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi, int mode)
{
struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
ENTRY;
- if (cancels && count == 0) {
- if (mode == LDLM_ASYNC)
- OBD_FREE(blwi, sizeof(*blwi));
- RETURN(0);
+ cfs_spin_lock(&blp->blp_lock);
+ if (blwi->blwi_lock && blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) {
+ /* add LDLM_FL_DISCARD_DATA requests to the priority list */
+ cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
+ } else {
+ /* other blocking callbacks are added to the regular list */
+ cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_list);
}
+ cfs_spin_unlock(&blp->blp_lock);
+
+ cfs_waitq_signal(&blp->blp_waitq);
+ /* can not use blwi->blwi_mode as blwi could be already freed in
+ LDLM_ASYNC mode */
+ if (mode == LDLM_SYNC)
+ cfs_wait_for_completion(&blwi->blwi_comp);
+
+ RETURN(0);
+}
+
+static inline void init_blwi(struct ldlm_bl_work_item *blwi,
+ struct ldlm_namespace *ns,
+ struct ldlm_lock_desc *ld,
+ cfs_list_t *cancels, int count,
+ struct ldlm_lock *lock,
+ int mode)
+{
cfs_init_completion(&blwi->blwi_comp);
- cfs_atomic_set(&blwi->blwi_ref_count, 1);
+ CFS_INIT_LIST_HEAD(&blwi->blwi_head);
+
+ if (cfs_memory_pressure_get())
+ blwi->blwi_mem_pressure = 1;
blwi->blwi_ns = ns;
+ blwi->blwi_mode = mode;
if (ld != NULL)
blwi->blwi_ld = *ld;
if (count) {
} else {
blwi->blwi_lock = lock;
}
-
- cfs_spin_lock(&blp->blp_lock);
- if (lock && lock->l_flags & LDLM_FL_DISCARD_DATA) {
- /* add LDLM_FL_DISCARD_DATA requests to the priority list */
- cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
- } else {
- /* other blocking callbacks are added to the regular list */
- cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_list);
- }
- cfs_spin_unlock(&blp->blp_lock);
-
- if (mode == LDLM_SYNC) {
- /* keep ref count as object is on this stack for SYNC call */
- ldlm_bl_work_item_get(blwi);
- cfs_waitq_signal(&blp->blp_waitq);
- cfs_wait_for_completion(&blwi->blwi_comp);
- } else {
- cfs_waitq_signal(&blp->blp_waitq);
- }
-
- RETURN(0);
}
static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
struct ldlm_lock_desc *ld, struct ldlm_lock *lock,
- struct list_head *cancels, int count, int mode)
+ cfs_list_t *cancels, int count, int mode)
{
ENTRY;
+ if (cancels && count == 0)
+ RETURN(0);
+
if (mode == LDLM_SYNC) {
/* if it is synchronous call do minimum mem alloc, as it could
* be triggered from kernel shrinker
*/
struct ldlm_bl_work_item blwi;
memset(&blwi, 0, sizeof(blwi));
- /* have extra ref as this obj is on stack */
- RETURN(__ldlm_bl_to_thread(ns, &blwi, ld, lock, cancels, count, mode));
+ init_blwi(&blwi, ns, ld, cancels, count, lock, LDLM_SYNC);
+ RETURN(__ldlm_bl_to_thread(&blwi, LDLM_SYNC));
} else {
struct ldlm_bl_work_item *blwi;
OBD_ALLOC(blwi, sizeof(*blwi));
if (blwi == NULL)
RETURN(-ENOMEM);
+ init_blwi(blwi, ns, ld, cancels, count, lock, LDLM_ASYNC);
- RETURN(__ldlm_bl_to_thread(ns, blwi, ld, lock, cancels, count, mode));
+ RETURN(__ldlm_bl_to_thread(blwi, LDLM_ASYNC));
}
}
/* added by ldlm_cleanup() */
break;
}
+ if (blwi->blwi_mem_pressure)
+ cfs_memory_pressure_set();
if (blwi->blwi_count) {
+ int count;
/* The special case when we cancel locks in lru
* asynchronously, we pass the list of locks here.
* Thus locks are marked LDLM_FL_CANCELING, but NOT
* canceled locally yet. */
- ldlm_cli_cancel_list_local(&blwi->blwi_head,
- blwi->blwi_count, 0);
- ldlm_cli_cancel_list(&blwi->blwi_head,
- blwi->blwi_count, NULL, 0);
+ count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
+ blwi->blwi_count,
+ LCF_BL_AST);
+ ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL, 0);
} else {
ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
blwi->blwi_lock);
}
- cfs_complete(&blwi->blwi_comp);
- ldlm_bl_work_item_put(blwi);
+ if (blwi->blwi_mem_pressure)
+ cfs_memory_pressure_clr();
+
+ if (blwi->blwi_mode == LDLM_ASYNC)
+ OBD_FREE(blwi, sizeof(*blwi));
+ else
+ cfs_complete(&blwi->blwi_comp);
}
cfs_atomic_dec(&blp->blp_busy_threads);