cfs_list_t blwi_head;
int blwi_count;
cfs_completion_t blwi_comp;
- cfs_atomic_t blwi_ref_count;
+ int blwi_mode;
+ int blwi_mem_pressure;
};
#ifdef __KERNEL__
-static inline void ldlm_bl_work_item_get(struct ldlm_bl_work_item *blwi)
-{
- cfs_atomic_inc(&blwi->blwi_ref_count);
-}
-
-static inline void ldlm_bl_work_item_put(struct ldlm_bl_work_item *blwi)
-{
- if (cfs_atomic_dec_and_test(&blwi->blwi_ref_count))
- OBD_FREE(blwi, sizeof(*blwi));
-}
static inline int have_expired_locks(void)
{
if (!ldlm_request_cancel(req, dlm_req, 0))
req->rq_status = ESTALE;
- if (ptlrpc_reply(req) != 0)
- LBUG();
-
- RETURN(0);
+ RETURN(ptlrpc_reply(req));
}
void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
cfs_spin_unlock(&blp->blp_lock);
cfs_waitq_signal(&blp->blp_waitq);
+
+ /* can not use blwi->blwi_mode as blwi could be already freed in
+ LDLM_ASYNC mode */
if (mode == LDLM_SYNC)
- /* keep ref count as object is on this stack for SYNC call */
cfs_wait_for_completion(&blwi->blwi_comp);
RETURN(0);
struct ldlm_namespace *ns,
struct ldlm_lock_desc *ld,
cfs_list_t *cancels, int count,
- struct ldlm_lock *lock)
+ struct ldlm_lock *lock,
+ int mode)
{
cfs_init_completion(&blwi->blwi_comp);
- /* set ref count to 1 initially, supposed to be released in
- * ldlm_bl_thread_main(), if not allocated on the stack */
- cfs_atomic_set(&blwi->blwi_ref_count, 1);
CFS_INIT_LIST_HEAD(&blwi->blwi_head);
+ if (cfs_memory_pressure_get())
+ blwi->blwi_mem_pressure = 1;
+
blwi->blwi_ns = ns;
+ blwi->blwi_mode = mode;
if (ld != NULL)
blwi->blwi_ld = *ld;
if (count) {
*/
struct ldlm_bl_work_item blwi;
memset(&blwi, 0, sizeof(blwi));
- init_blwi(&blwi, ns, ld, cancels, count, lock);
- /* take extra ref as this obj is on stack */
- ldlm_bl_work_item_get(&blwi);
- RETURN(__ldlm_bl_to_thread(&blwi, mode));
+ init_blwi(&blwi, ns, ld, cancels, count, lock, LDLM_SYNC);
+ RETURN(__ldlm_bl_to_thread(&blwi, LDLM_SYNC));
} else {
struct ldlm_bl_work_item *blwi;
OBD_ALLOC(blwi, sizeof(*blwi));
if (blwi == NULL)
RETURN(-ENOMEM);
- init_blwi(blwi, ns, ld, cancels, count, lock);
+ init_blwi(blwi, ns, ld, cancels, count, lock, LDLM_ASYNC);
- RETURN(__ldlm_bl_to_thread(blwi, mode));
+ RETURN(__ldlm_bl_to_thread(blwi, LDLM_ASYNC));
}
}
/* added by ldlm_cleanup() */
break;
}
+ if (blwi->blwi_mem_pressure)
+ cfs_memory_pressure_set();
if (blwi->blwi_count) {
+ int count;
/* The special case when we cancel locks in lru
* asynchronously, we pass the list of locks here.
- * Thus lock is marked LDLM_FL_CANCELING, and already
- * canceled locally. */
- ldlm_cli_cancel_list(&blwi->blwi_head,
- blwi->blwi_count, NULL, 0);
+ * Thus locks are marked LDLM_FL_CANCELING, but NOT
+ * canceled locally yet. */
+ count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
+ blwi->blwi_count,
+ LCF_BL_AST);
+ ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL, 0);
} else {
ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
blwi->blwi_lock);
}
- cfs_complete(&blwi->blwi_comp);
- ldlm_bl_work_item_put(blwi);
+ if (blwi->blwi_mem_pressure)
+ cfs_memory_pressure_clr();
+
+ if (blwi->blwi_mode == LDLM_ASYNC)
+ OBD_FREE(blwi, sizeof(*blwi));
+ else
+ cfs_complete(&blwi->blwi_comp);
}
cfs_atomic_dec(&blp->blp_busy_threads);
EXPORT_SYMBOL(ldlm_namespace_foreach_res);
EXPORT_SYMBOL(ldlm_resource_iterate);
EXPORT_SYMBOL(ldlm_cancel_resource_local);
+EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
EXPORT_SYMBOL(ldlm_cli_cancel_list);
/* ldlm_lockd.c */