void *data, __u32 data_len);
int ldlm_cli_convert(struct lustre_handle *, int new_mode, __u32 *flags);
int ldlm_cli_update_pool(struct ptlrpc_request *req);
-int ldlm_cli_cancel(struct lustre_handle *lockh);
+int ldlm_cli_cancel(struct lustre_handle *lockh,
+ ldlm_cancel_flags_t cancel_flags);
int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
ldlm_cancel_flags_t flags, void *opaque);
int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
}
/* ldlm_request.c */
-typedef enum {
- LDLM_ASYNC,
- LDLM_SYNC,
-} ldlm_sync_t;
-
/* Cancel lru flag, it indicates we cancel aged locks. */
enum {
LDLM_CANCEL_AGED = 1 << 0, /* Cancel aged locks (non lru resize). */
* sending nor waiting for any rpcs) */
};
-int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync,
- int flags);
+int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
+ ldlm_cancel_flags_t sync, int flags);
int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
cfs_list_t *cancels, int count, int max,
ldlm_cancel_flags_t cancel_flags, int flags);
/* ldlm_lockd.c */
int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
struct ldlm_lock *lock);
-int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
- cfs_list_t *cancels, int count, int mode);
+int ldlm_bl_to_thread_list(struct ldlm_namespace *ns,
+ struct ldlm_lock_desc *ld,
+ cfs_list_t *cancels, int count,
+ ldlm_cancel_flags_t cancel_flags);
void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
struct ldlm_lock_desc *ld, struct ldlm_lock *lock);
* enqueue. */
if (!exp_connect_cancelset(lock->l_conn_export) &&
!ns_connect_lru_resize(ns))
- ldlm_cancel_lru(ns, 0, LDLM_ASYNC, 0);
+ ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0);
} else {
LDLM_DEBUG(lock, "do not add lock into lru list");
unlock_res_and_lock(lock);
cfs_list_t blwi_head;
int blwi_count;
struct completion blwi_comp;
- int blwi_mode;
+ ldlm_cancel_flags_t blwi_flags;
int blwi_mem_pressure;
};
}
#ifdef __KERNEL__
-static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi, int mode)
+static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
+ ldlm_cancel_flags_t cancel_flags)
{
struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
ENTRY;
cfs_waitq_signal(&blp->blp_waitq);
- /* can not use blwi->blwi_mode as blwi could be already freed in
- LDLM_ASYNC mode */
- if (mode == LDLM_SYNC)
+ /* can not check blwi->blwi_flags as blwi could be already freed in
+ LCF_ASYNC mode */
+ if (!(cancel_flags & LCF_ASYNC))
wait_for_completion(&blwi->blwi_comp);
RETURN(0);
}
static inline void init_blwi(struct ldlm_bl_work_item *blwi,
- struct ldlm_namespace *ns,
- struct ldlm_lock_desc *ld,
- cfs_list_t *cancels, int count,
- struct ldlm_lock *lock,
- int mode)
+ struct ldlm_namespace *ns,
+ struct ldlm_lock_desc *ld,
+ cfs_list_t *cancels, int count,
+ struct ldlm_lock *lock,
+ ldlm_cancel_flags_t cancel_flags)
{
init_completion(&blwi->blwi_comp);
CFS_INIT_LIST_HEAD(&blwi->blwi_head);
blwi->blwi_mem_pressure = 1;
blwi->blwi_ns = ns;
- blwi->blwi_mode = mode;
+ blwi->blwi_flags = cancel_flags;
if (ld != NULL)
blwi->blwi_ld = *ld;
if (count) {
* call ->l_blocking_ast itself.
*/
static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
- struct ldlm_lock_desc *ld, struct ldlm_lock *lock,
- cfs_list_t *cancels, int count, int mode)
+ struct ldlm_lock_desc *ld,
+ struct ldlm_lock *lock,
+ cfs_list_t *cancels, int count,
+ ldlm_cancel_flags_t cancel_flags)
{
- ENTRY;
+ ENTRY;
- if (cancels && count == 0)
- RETURN(0);
+ if (cancels && count == 0)
+ RETURN(0);
- if (mode == LDLM_SYNC) {
- /* if it is synchronous call do minimum mem alloc, as it could
- * be triggered from kernel shrinker
- */
- struct ldlm_bl_work_item blwi;
- memset(&blwi, 0, sizeof(blwi));
- init_blwi(&blwi, ns, ld, cancels, count, lock, LDLM_SYNC);
- RETURN(__ldlm_bl_to_thread(&blwi, LDLM_SYNC));
- } else {
- struct ldlm_bl_work_item *blwi;
- OBD_ALLOC(blwi, sizeof(*blwi));
- if (blwi == NULL)
- RETURN(-ENOMEM);
- init_blwi(blwi, ns, ld, cancels, count, lock, LDLM_ASYNC);
+ if (cancel_flags & LCF_ASYNC) {
+ struct ldlm_bl_work_item *blwi;
- RETURN(__ldlm_bl_to_thread(blwi, LDLM_ASYNC));
- }
+ OBD_ALLOC(blwi, sizeof(*blwi));
+ if (blwi == NULL)
+ RETURN(-ENOMEM);
+ init_blwi(blwi, ns, ld, cancels, count, lock, cancel_flags);
+
+ RETURN(__ldlm_bl_to_thread(blwi, cancel_flags));
+ } else {
+ /* if it is synchronous call do minimum mem alloc, as it could
+ * be triggered from kernel shrinker
+ */
+ struct ldlm_bl_work_item blwi;
+
+ memset(&blwi, 0, sizeof(blwi));
+ init_blwi(&blwi, ns, ld, cancels, count, lock, cancel_flags);
+ RETURN(__ldlm_bl_to_thread(&blwi, cancel_flags));
+ }
}
#endif
int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
- struct ldlm_lock *lock)
+ struct ldlm_lock *lock)
{
#ifdef __KERNEL__
- RETURN(ldlm_bl_to_thread(ns, ld, lock, NULL, 0, LDLM_ASYNC));
+ return ldlm_bl_to_thread(ns, ld, lock, NULL, 0, LCF_ASYNC);
#else
- RETURN(-ENOSYS);
+ return -ENOSYS;
#endif
}
int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
- cfs_list_t *cancels, int count, int mode)
+ cfs_list_t *cancels, int count,
+ ldlm_cancel_flags_t cancel_flags)
{
#ifdef __KERNEL__
- RETURN(ldlm_bl_to_thread(ns, ld, NULL, cancels, count, mode));
+ return ldlm_bl_to_thread(ns, ld, NULL, cancels, count, cancel_flags);
#else
- RETURN(-ENOSYS);
+ return -ENOSYS;
#endif
}
* which the server has already started a blocking callback on. */
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
- rc = ldlm_cli_cancel(&dlm_req->lock_handle[0]);
+ rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0);
if (rc < 0)
CERROR("ldlm_cli_cancel: %d\n", rc);
}
count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
blwi->blwi_count,
LCF_BL_AST);
- ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL, 0);
+ ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL,
+ blwi->blwi_flags);
} else {
ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
blwi->blwi_lock);
if (blwi->blwi_mem_pressure)
cfs_memory_pressure_clr();
- if (blwi->blwi_mode == LDLM_ASYNC)
- OBD_FREE(blwi, sizeof(*blwi));
- else
+ if (blwi->blwi_flags & LCF_ASYNC)
+ OBD_FREE(blwi, sizeof(*blwi));
+ else
complete(&blwi->blwi_comp);
}
* It may be called when SLV has changed much, this is why we do not
* take into account pl->pl_recalc_time here.
*/
- RETURN(ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LDLM_SYNC,
- LDLM_CANCEL_LRUR));
+ RETURN(ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC,
+ LDLM_CANCEL_LRUR));
}
/**
spin_unlock(&ns->ns_lock);
if (nr) {
- canceled = ldlm_cancel_lru(ns, nr, LDLM_ASYNC,
- LDLM_CANCEL_SHRINK);
+ canceled = ldlm_cancel_lru(ns, nr, LCF_ASYNC,
+ LDLM_CANCEL_SHRINK);
}
#ifdef __KERNEL__
/*
LDLM_DEBUG(lock, "already unused, calling ldlm_cli_cancel");
ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
+ rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
if (rc < 0)
CERROR("ldlm_cli_cancel: %d\n", rc);
} else {
*
* Lock must not have any readers or writers by this time.
*/
-int ldlm_cli_cancel(struct lustre_handle *lockh)
+int ldlm_cli_cancel(struct lustre_handle *lockh,
+ ldlm_cancel_flags_t cancel_flags)
{
struct obd_export *exp;
int avail, flags, count = 1;
count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,
LCF_BL_AST, flags);
}
- ldlm_cli_cancel_list(&cancels, count, NULL, 0);
+ ldlm_cli_cancel_list(&cancels, count, NULL, cancel_flags);
RETURN(0);
}
EXPORT_SYMBOL(ldlm_cli_cancel);
/**
* Cancel at least \a nr locks from given namespace LRU.
*
- * When called with LDLM_ASYNC the blocking callback will be handled
+ * When called with LCF_ASYNC the blocking callback will be handled
* in a thread and this function will return after the thread has been
- * asked to call the callback. When called with LDLM_SYNC the blocking
+ * asked to call the callback. When called with LCF_ASYNC the blocking
* callback will be performed in this function.
*/
-int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t mode,
- int flags)
+int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
+ ldlm_cancel_flags_t cancel_flags,
+ int flags)
{
- CFS_LIST_HEAD(cancels);
- int count, rc;
- ENTRY;
+ CFS_LIST_HEAD(cancels);
+ int count, rc;
+ ENTRY;
#ifndef __KERNEL__
- mode = LDLM_SYNC; /* force to be sync in user space */
+ cancel_flags &= ~LCF_ASYNC; /* force to be sync in user space */
#endif
- /* Just prepare the list of locks, do not actually cancel them yet.
- * Locks are cancelled later in a separate thread. */
- count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags);
- rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, mode);
- if (rc == 0)
- RETURN(count);
-
- RETURN(0);
+ /* Just prepare the list of locks, do not actually cancel them yet.
+ * Locks are cancelled later in a separate thread. */
+ count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags);
+ rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, cancel_flags);
+ if (rc == 0)
+ RETURN(count);
+
+ RETURN(0);
}
/**
int canceled, unused = ns->ns_nr_unused;
/* Try to cancel all @ns_nr_unused locks. */
- canceled = ldlm_cancel_lru(ns, unused, LDLM_SYNC,
- LDLM_CANCEL_PASSED);
+ canceled = ldlm_cancel_lru(ns, unused, 0,
+ LDLM_CANCEL_PASSED);
if (canceled < unused) {
CDEBUG(D_DLMTRACE,
"not all requested locks are canceled, "
} else {
tmp = ns->ns_max_unused;
ns->ns_max_unused = 0;
- ldlm_cancel_lru(ns, 0, LDLM_SYNC, LDLM_CANCEL_PASSED);
+ ldlm_cancel_lru(ns, 0, 0, LDLM_CANCEL_PASSED);
ns->ns_max_unused = tmp;
}
return count;
"changing namespace %s unused locks from %u to %u\n",
ldlm_ns_name(ns), ns->ns_nr_unused,
(unsigned int)tmp);
- ldlm_cancel_lru(ns, tmp, LDLM_ASYNC, LDLM_CANCEL_PASSED);
+ ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_CANCEL_PASSED);
if (!lru_resize) {
CDEBUG(D_DLMTRACE,
ldlm_ns_name(ns), ns->ns_max_unused,
(unsigned int)tmp);
ns->ns_max_unused = (unsigned int)tmp;
- ldlm_cancel_lru(ns, 0, LDLM_ASYNC, LDLM_CANCEL_PASSED);
+ ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED);
/* Make sure that LRU resize was originally supported before
* turning it on here. */
unlock_res(res);
ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
+ rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
if (rc)
CERROR("ldlm_cli_cancel: %d\n", rc);
} else {
switch (flag) {
case LDLM_CB_BLOCKING:
ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
+ rc = ldlm_cli_cancel(&lockh, 0);
if (rc < 0) {
CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
RETURN(rc);
switch (flag) {
case LDLM_CB_BLOCKING:
ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
+ rc = ldlm_cli_cancel(&lockh, 0);
if (rc != ELDLM_OK)
CERROR("ldlm_cli_cancel failed: %d\n", rc);
break;
switch (flag) {
case LDLM_CB_BLOCKING:
ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
+ rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
if (rc < 0) {
CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
RETURN(rc);
switch (flag) {
case LDLM_CB_BLOCKING:
ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
+ rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
if (rc < 0) {
CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
RETURN(rc);
/* mgs wants the lock, give it up... */
LDLM_DEBUG(lock, "MGC blocking CB");
ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
+ rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
break;
case LDLM_CB_CANCELING:
/* We've given up the lock, prepare ourselves to update. */
lockh = &osc_env_info(env)->oti_handle;
ldlm_lock2handle(dlmlock, lockh);
- result = ldlm_cli_cancel(lockh);
+ result = ldlm_cli_cancel(lockh, LCF_ASYNC);
} else
result = 0;
return result;
dlmlock->l_flags |= LDLM_FL_CBPENDING;
unlock_res_and_lock(dlmlock);
if (do_cancel)
- result = ldlm_cli_cancel(&olck->ols_handle);
+ result = ldlm_cli_cancel(&olck->ols_handle, LCF_ASYNC);
if (result < 0)
CL_LOCK_DEBUG(D_ERROR, env, lock,
"lock %p cancel failure with error(%d)\n",
LDLM_DEBUG(lock, "blocking AST on global quota lock");
ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
+ rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
break;
}
case LDLM_CB_CANCELING: {
LDLM_DEBUG(lock, "blocking AST on ID quota lock");
ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
+ rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
break;
}
case LDLM_CB_CANCELING: {