int count;
ENTRY;
+ /* Return, i.e. cancel nothing, only if ELC is supported (flag in
+ * export) but disabled through procfs (flag in NS).
+ *
+ * This distinguishes from a case when ELC is not supported originally,
+ * when we still want to cancel locks in advance and just cancel them
+ * locally, without sending any RPC. */
+ if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
+ RETURN(0);
+
osc_build_res_name(oa->o_id, oa->o_seq, &res_id);
res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
if (res == NULL)
LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
lock_res_and_lock(lock);
- cfs_spin_lock(&osc_ast_guard);
+ spin_lock(&osc_ast_guard);
- if (lock->l_ast_data == NULL)
- lock->l_ast_data = data;
- if (lock->l_ast_data == data)
- set = 1;
+ if (lock->l_ast_data == NULL)
+ lock->l_ast_data = data;
+ if (lock->l_ast_data == data)
+ set = 1;
- cfs_spin_unlock(&osc_ast_guard);
- unlock_res_and_lock(lock);
+ spin_unlock(&osc_ast_guard);
+ unlock_res_and_lock(lock);
- return set;
+ return set;
}
static int osc_set_data_with_check(struct lustre_handle *lockh,
*flags &= ~LDLM_FL_BLOCK_GRANTED;
rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
- sizeof(*lvb), lockh, async);
+ sizeof(*lvb), LVB_T_OST, lockh, async);
if (rqset) {
if (!rc) {
struct osc_enqueue_args *aa;
/*Since the request might also come from lprocfs, so we need
*sync this with client_disconnect_export Bug15684*/
- cfs_down_read(&obd->u.cli.cl_sem);
+ down_read(&obd->u.cli.cl_sem);
if (obd->u.cli.cl_import)
imp = class_import_get(obd->u.cli.cl_import);
- cfs_up_read(&obd->u.cli.cl_sem);
+ up_read(&obd->u.cli.cl_sem);
if (!imp)
RETURN(-ENODEV);
RETURN(0);
}
- if (KEY_IS(KEY_LRU_SET)) {
+ if (KEY_IS(KEY_CACHE_SET)) {
struct client_obd *cli = &obd->u.cli;
- LASSERT(cli->cl_lru == NULL); /* only once */
- cli->cl_lru = (struct cl_client_lru *)val;
- cfs_atomic_inc(&cli->cl_lru->ccl_users);
- cli->cl_lru_left = &cli->cl_lru->ccl_page_left;
+ LASSERT(cli->cl_cache == NULL); /* only once */
+ cli->cl_cache = (struct cl_client_cache *)val;
+ cfs_atomic_inc(&cli->cl_cache->ccc_users);
+ cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
/* add this osc into entity list */
LASSERT(cfs_list_empty(&cli->cl_lru_osc));
- cfs_spin_lock(&cli->cl_lru->ccl_lock);
- cfs_list_add(&cli->cl_lru_osc, &cli->cl_lru->ccl_list);
- cfs_spin_unlock(&cli->cl_lru->ccl_lock);
+ spin_lock(&cli->cl_cache->ccc_lru_lock);
+ cfs_list_add(&cli->cl_lru_osc, &cli->cl_cache->ccc_lru);
+ spin_unlock(&cli->cl_cache->ccc_lru_lock);
RETURN(0);
}
- if (KEY_IS(KEY_LRU_SHRINK)) {
+ if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
struct client_obd *cli = &obd->u.cli;
int nr = cfs_atomic_read(&cli->cl_lru_in_list) >> 1;
int target = *(int *)val;
Even if something bad goes through, we'd get a -EINVAL from OST
anyway. */
- if (KEY_IS(KEY_GRANT_SHRINK))
- req = ptlrpc_request_alloc(imp, &RQF_OST_SET_GRANT_INFO);
- else
- req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO);
-
- if (req == NULL)
- RETURN(-ENOMEM);
-
- req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
- RCL_CLIENT, keylen);
- req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
- RCL_CLIENT, vallen);
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
- if (rc) {
- ptlrpc_request_free(req);
- RETURN(rc);
- }
+ req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
+ &RQF_OST_SET_GRANT_INFO :
+ &RQF_OBD_SET_INFO);
+ if (req == NULL)
+ RETURN(-ENOMEM);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
+ RCL_CLIENT, keylen);
+ if (!KEY_IS(KEY_GRANT_SHRINK))
+ req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
+ RCL_CLIENT, vallen);
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
+ if (rc) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
- memcpy(tmp, key, keylen);
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
+ memcpy(tmp, key, keylen);
+ tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
+ &RMF_OST_BODY :
+ &RMF_SETINFO_VAL);
memcpy(tmp, val, vallen);
if (KEY_IS(KEY_GRANT_SHRINK)) {
CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
/* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
ptlrpc_deactivate_import(imp);
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_pingable = 0;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ imp->imp_pingable = 0;
+ spin_unlock(&imp->imp_lock);
break;
}
case OBD_CLEANUP_EXPORTS: {
ENTRY;
/* lru cleanup */
- if (cli->cl_lru != NULL) {
- LASSERT(cfs_atomic_read(&cli->cl_lru->ccl_users) > 0);
- cfs_spin_lock(&cli->cl_lru->ccl_lock);
+ if (cli->cl_cache != NULL) {
+ LASSERT(cfs_atomic_read(&cli->cl_cache->ccc_users) > 0);
+ spin_lock(&cli->cl_cache->ccc_lru_lock);
cfs_list_del_init(&cli->cl_lru_osc);
- cfs_spin_unlock(&cli->cl_lru->ccl_lock);
+ spin_unlock(&cli->cl_cache->ccc_lru_lock);
cli->cl_lru_left = NULL;
- cfs_atomic_dec(&cli->cl_lru->ccl_users);
- cli->cl_lru = NULL;
+ cfs_atomic_dec(&cli->cl_cache->ccc_users);
+ cli->cl_cache = NULL;
}
/* free memory of osc quota cache */
};
extern struct lu_kmem_descr osc_caches[];
-extern cfs_spinlock_t osc_ast_guard;
-extern cfs_lock_class_key_t osc_ast_guard_class;
+extern spinlock_t osc_ast_guard;
+extern struct lock_class_key osc_ast_guard_class;
int __init osc_init(void)
{
RETURN(rc);
}
- cfs_spin_lock_init(&osc_ast_guard);
- cfs_lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
+ spin_lock_init(&osc_ast_guard);
+ lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
- RETURN(rc);
+ RETURN(rc);
}
#ifdef __KERNEL__