int count;
ENTRY;
+ /* Return, i.e. cancel nothing, only if ELC is supported (flag in
+ * export) but disabled through procfs (flag in NS).
+ *
+ * This distinguishes from a case when ELC is not supported originally,
+ * when we still want to cancel locks in advance and just cancel them
+ * locally, without sending any RPC. */
+ if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
+ RETURN(0);
+
osc_build_res_name(oa->o_id, oa->o_seq, &res_id);
res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
if (res == NULL)
return 0;
}
+int osc_create(const struct lu_env *env, struct obd_export *exp,
+ struct obdo *oa, struct lov_stripe_md **ea,
+ struct obd_trans_info *oti)
+{
+ int rc = 0;
+ ENTRY;
+
+ LASSERT(oa);
+ LASSERT(ea);
+ LASSERT(oa->o_valid & OBD_MD_FLGROUP);
+
+ if ((oa->o_valid & OBD_MD_FLFLAGS) &&
+ oa->o_flags == OBD_FL_RECREATE_OBJS) {
+ RETURN(osc_real_create(exp, oa, ea, oti));
+ }
+
+ if (!fid_seq_is_mdt(oa->o_seq))
+ RETURN(osc_real_create(exp, oa, ea, oti));
+
+ /* we should not get here anymore */
+ LBUG();
+
+ RETURN(rc);
+}
+
/* Destroy requests can be async always on the client, and we don't even really
* care about the return code since the client cannot do anything at all about
* a destroy failure.
LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
lock_res_and_lock(lock);
- cfs_spin_lock(&osc_ast_guard);
+ spin_lock(&osc_ast_guard);
- if (lock->l_ast_data == NULL)
- lock->l_ast_data = data;
- if (lock->l_ast_data == data)
- set = 1;
+ if (lock->l_ast_data == NULL)
+ lock->l_ast_data = data;
+ if (lock->l_ast_data == data)
+ set = 1;
- cfs_spin_unlock(&osc_ast_guard);
- unlock_res_and_lock(lock);
+ spin_unlock(&osc_ast_guard);
+ unlock_res_and_lock(lock);
- return set;
+ return set;
}
static int osc_set_data_with_check(struct lustre_handle *lockh,
*flags &= ~LDLM_FL_BLOCK_GRANTED;
rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
- sizeof(*lvb), lockh, async);
+ sizeof(*lvb), LVB_T_OST, lockh, async);
if (rqset) {
if (!rc) {
struct osc_enqueue_args *aa;
struct ptlrpc_request *req,
struct osc_async_args *aa, int rc)
{
- struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
struct obd_statfs *msfs;
- __u64 used;
ENTRY;
if (rc == -EBADR)
GOTO(out, rc = -EPROTO);
}
- /* Reinitialize the RDONLY and DEGRADED flags at the client
- * on each statfs, so they don't stay set permanently. */
- cfs_spin_lock(&cli->cl_oscc.oscc_lock);
-
- if (unlikely(msfs->os_state & OS_STATE_DEGRADED))
- cli->cl_oscc.oscc_flags |= OSCC_FLAG_DEGRADED;
- else if (unlikely(cli->cl_oscc.oscc_flags & OSCC_FLAG_DEGRADED))
- cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_DEGRADED;
-
- if (unlikely(msfs->os_state & OS_STATE_READONLY))
- cli->cl_oscc.oscc_flags |= OSCC_FLAG_RDONLY;
- else if (unlikely(cli->cl_oscc.oscc_flags & OSCC_FLAG_RDONLY))
- cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_RDONLY;
-
- /* Add a bit of hysteresis so this flag isn't continually flapping,
- * and ensure that new files don't get extremely fragmented due to
- * only a small amount of available space in the filesystem.
- * We want to set the NOSPC flag when there is less than ~0.1% free
- * and clear it when there is at least ~0.2% free space, so:
- * avail < ~0.1% max max = avail + used
- * 1025 * avail < avail + used used = blocks - free
- * 1024 * avail < used
- * 1024 * avail < blocks - free
- * avail < ((blocks - free) >> 10)
- *
- * On very large disk, say 16TB 0.1% will be 16 GB. We don't want to
- * lose that amount of space so in those cases we report no space left
- * if their is less than 1 GB left. */
- used = min_t(__u64,(msfs->os_blocks - msfs->os_bfree) >> 10, 1 << 30);
- if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) == 0) &&
- ((msfs->os_ffree < 32) || (msfs->os_bavail < used))))
- cli->cl_oscc.oscc_flags |= OSCC_FLAG_NOSPC;
- else if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) != 0) &&
- (msfs->os_ffree > 64) &&
- (msfs->os_bavail > (used << 1)))) {
- cli->cl_oscc.oscc_flags &= ~(OSCC_FLAG_NOSPC |
- OSCC_FLAG_NOSPC_BLK);
- }
-
- if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) != 0) &&
- (msfs->os_bavail < used)))
- cli->cl_oscc.oscc_flags |= OSCC_FLAG_NOSPC_BLK;
-
- cfs_spin_unlock(&cli->cl_oscc.oscc_lock);
-
*aa->aa_oi->oi_osfs = *msfs;
out:
rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
/*Since the request might also come from lprocfs, so we need
*sync this with client_disconnect_export Bug15684*/
- cfs_down_read(&obd->u.cli.cl_sem);
+ down_read(&obd->u.cli.cl_sem);
if (obd->u.cli.cl_import)
imp = class_import_get(obd->u.cli.cl_import);
- cfs_up_read(&obd->u.cli.cl_sem);
+ up_read(&obd->u.cli.cl_sem);
if (!imp)
RETURN(-ENODEV);
RETURN(-EINVAL);
}
-static int osc_setinfo_mds_connect_import(struct obd_import *imp)
-{
- struct llog_ctxt *ctxt;
- int rc = 0;
- ENTRY;
-
- ctxt = llog_get_context(imp->imp_obd, LLOG_MDS_OST_ORIG_CTXT);
- if (ctxt) {
- rc = llog_initiator_connect(ctxt);
- llog_ctxt_put(ctxt);
- } else {
- /* XXX return an error? skip setting below flags? */
- }
-
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_server_timeout = 1;
- imp->imp_pingable = 1;
- cfs_spin_unlock(&imp->imp_lock);
- CDEBUG(D_RPCTRACE, "pinging OST %s\n", obd2cli_tgt(imp->imp_obd));
-
- RETURN(rc);
-}
-
-static int osc_setinfo_mds_conn_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- void *aa, int rc)
-{
- ENTRY;
- if (rc != 0)
- RETURN(rc);
-
- RETURN(osc_setinfo_mds_connect_import(req->rq_import));
-}
-
static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
obd_count keylen, void *key, obd_count vallen,
void *val, struct ptlrpc_request_set *set)
OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
- if (KEY_IS(KEY_NEXT_ID)) {
- obd_id new_val;
- struct osc_creator *oscc = &obd->u.cli.cl_oscc;
-
- if (vallen != sizeof(obd_id))
- RETURN(-ERANGE);
- if (val == NULL)
- RETURN(-EINVAL);
-
- if (vallen != sizeof(obd_id))
- RETURN(-EINVAL);
-
- /* avoid race between allocate new object and set next id
- * from ll_sync thread */
- cfs_spin_lock(&oscc->oscc_lock);
- new_val = *((obd_id*)val) + 1;
- if (new_val > oscc->oscc_next_id)
- oscc->oscc_next_id = new_val;
- cfs_spin_unlock(&oscc->oscc_lock);
- CDEBUG(D_HA, "%s: set oscc_next_id = "LPU64"\n",
- exp->exp_obd->obd_name,
- obd->u.cli.cl_oscc.oscc_next_id);
-
- RETURN(0);
- }
-
if (KEY_IS(KEY_CHECKSUM)) {
if (vallen != sizeof(int))
RETURN(-EINVAL);
RETURN(0);
}
- if (KEY_IS(KEY_LRU_SET)) {
+ if (KEY_IS(KEY_CACHE_SET)) {
struct client_obd *cli = &obd->u.cli;
- LASSERT(cli->cl_lru == NULL); /* only once */
- cli->cl_lru = (struct cl_client_lru *)val;
- cfs_atomic_inc(&cli->cl_lru->ccl_users);
- cli->cl_lru_left = &cli->cl_lru->ccl_page_left;
+ LASSERT(cli->cl_cache == NULL); /* only once */
+ cli->cl_cache = (struct cl_client_cache *)val;
+ cfs_atomic_inc(&cli->cl_cache->ccc_users);
+ cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
/* add this osc into entity list */
LASSERT(cfs_list_empty(&cli->cl_lru_osc));
- cfs_spin_lock(&cli->cl_lru->ccl_lock);
- cfs_list_add(&cli->cl_lru_osc, &cli->cl_lru->ccl_list);
- cfs_spin_unlock(&cli->cl_lru->ccl_lock);
+ spin_lock(&cli->cl_cache->ccc_lru_lock);
+ cfs_list_add(&cli->cl_lru_osc, &cli->cl_cache->ccc_lru);
+ spin_unlock(&cli->cl_cache->ccc_lru_lock);
RETURN(0);
}
- if (KEY_IS(KEY_LRU_SHRINK)) {
+ if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
struct client_obd *cli = &obd->u.cli;
int nr = cfs_atomic_read(&cli->cl_lru_in_list) >> 1;
int target = *(int *)val;
Even if something bad goes through, we'd get a -EINVAL from OST
anyway. */
- if (KEY_IS(KEY_GRANT_SHRINK))
- req = ptlrpc_request_alloc(imp, &RQF_OST_SET_GRANT_INFO);
- else
- req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO);
-
- if (req == NULL)
- RETURN(-ENOMEM);
-
- req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
- RCL_CLIENT, keylen);
- req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
- RCL_CLIENT, vallen);
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
- if (rc) {
- ptlrpc_request_free(req);
- RETURN(rc);
- }
+ req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
+ &RQF_OST_SET_GRANT_INFO :
+ &RQF_OBD_SET_INFO);
+ if (req == NULL)
+ RETURN(-ENOMEM);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
+ RCL_CLIENT, keylen);
+ if (!KEY_IS(KEY_GRANT_SHRINK))
+ req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
+ RCL_CLIENT, vallen);
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
+ if (rc) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
- memcpy(tmp, key, keylen);
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
+ memcpy(tmp, key, keylen);
+ tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
+ &RMF_OST_BODY :
+ &RMF_SETINFO_VAL);
memcpy(tmp, val, vallen);
- if (KEY_IS(KEY_MDS_CONN)) {
- struct osc_creator *oscc = &obd->u.cli.cl_oscc;
-
- oscc->oscc_oa.o_seq = (*(__u32 *)val);
- oscc->oscc_oa.o_valid |= OBD_MD_FLGROUP;
- LASSERT_SEQ_IS_MDT(oscc->oscc_oa.o_seq);
- req->rq_no_delay = req->rq_no_resend = 1;
- req->rq_interpret_reply = osc_setinfo_mds_conn_interpret;
- } else if (KEY_IS(KEY_GRANT_SHRINK)) {
+ if (KEY_IS(KEY_GRANT_SHRINK)) {
struct osc_grant_args *aa;
struct obdo *oa;
switch (event) {
case IMP_EVENT_DISCON: {
- /* Only do this on the MDS OSC's */
- if (imp->imp_server_timeout) {
- struct osc_creator *oscc = &obd->u.cli.cl_oscc;
-
- cfs_spin_lock(&oscc->oscc_lock);
- oscc->oscc_flags |= OSCC_FLAG_RECOVERING;
- cfs_spin_unlock(&oscc->oscc_lock);
- }
cli = &obd->u.cli;
client_obd_list_lock(&cli->cl_loi_list_lock);
cli->cl_avail_grant = 0;
break;
}
case IMP_EVENT_ACTIVE: {
- /* Only do this on the MDS OSC's */
- if (imp->imp_server_timeout) {
- struct osc_creator *oscc = &obd->u.cli.cl_oscc;
-
- cfs_spin_lock(&oscc->oscc_lock);
- oscc->oscc_flags &= ~(OSCC_FLAG_NOSPC |
- OSCC_FLAG_NOSPC_BLK);
- cfs_spin_unlock(&oscc->oscc_lock);
- }
rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
break;
}
ptlrpc_lprocfs_register_obd(obd);
}
- oscc_init(obd);
/* We need to allocate a few requests more, because
* brw_interpret tries to create new requests before freeing
* previous ones, Ideally we want to have 2x max_rpcs_in_flight
CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
/* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
ptlrpc_deactivate_import(imp);
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_pingable = 0;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ imp->imp_pingable = 0;
+ spin_unlock(&imp->imp_lock);
break;
}
case OBD_CLEANUP_EXPORTS: {
ENTRY;
/* lru cleanup */
- if (cli->cl_lru != NULL) {
- LASSERT(cfs_atomic_read(&cli->cl_lru->ccl_users) > 0);
- cfs_spin_lock(&cli->cl_lru->ccl_lock);
+ if (cli->cl_cache != NULL) {
+ LASSERT(cfs_atomic_read(&cli->cl_cache->ccc_users) > 0);
+ spin_lock(&cli->cl_cache->ccc_lru_lock);
cfs_list_del_init(&cli->cl_lru_osc);
- cfs_spin_unlock(&cli->cl_lru->ccl_lock);
+ spin_unlock(&cli->cl_cache->ccc_lru_lock);
cli->cl_lru_left = NULL;
- cfs_atomic_dec(&cli->cl_lru->ccl_users);
- cli->cl_lru = NULL;
+ cfs_atomic_dec(&cli->cl_cache->ccc_users);
+ cli->cl_cache = NULL;
}
/* free memory of osc quota cache */
.o_statfs_async = osc_statfs_async,
.o_packmd = osc_packmd,
.o_unpackmd = osc_unpackmd,
- .o_precreate = osc_precreate,
.o_create = osc_create,
- .o_create_async = osc_create_async,
.o_destroy = osc_destroy,
.o_getattr = osc_getattr,
.o_getattr_async = osc_getattr_async,
};
extern struct lu_kmem_descr osc_caches[];
-extern cfs_spinlock_t osc_ast_guard;
-extern cfs_lock_class_key_t osc_ast_guard_class;
+extern spinlock_t osc_ast_guard;
+extern struct lock_class_key osc_ast_guard_class;
int __init osc_init(void)
{
RETURN(rc);
}
- cfs_spin_lock_init(&osc_ast_guard);
- cfs_lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
+ spin_lock_init(&osc_ast_guard);
+ lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
- RETURN(rc);
+ RETURN(rc);
}
#ifdef __KERNEL__