With this patch majority of md/dt methods in new server stack get struct lu_env argument instead of struct lu_context.
lu_env consists of two contexts:
- ->le_ctx: this is "local" context used to avoid allocating data on stack,
and to avoid short-term dynamic allocations. This is a repalcement for old
@ctx argument.
- ->le_ses: this is "session": a context associated with request. Here live
data that are to be shared between different stack levels. E.g., credentials
(struct md_cred), and capability.
return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &cmm_lu_ops);
}
-int cmm_root_get(const struct lu_context *ctx, struct md_device *md,
+int cmm_root_get(const struct lu_env *env, struct md_device *md,
struct lu_fid *fid, struct md_ucred *uc)
{
struct cmm_device *cmm_dev = md2cmm_dev(md);
/* valid only on master MDS */
if (cmm_dev->cmm_local_num == 0)
- return cmm_child_ops(cmm_dev)->mdo_root_get(ctx,
+ return cmm_child_ops(cmm_dev)->mdo_root_get(env,
cmm_dev->cmm_child, fid, uc);
else
return -EINVAL;
}
-static int cmm_statfs(const struct lu_context *ctxt, struct md_device *md,
+static int cmm_statfs(const struct lu_env *env, struct md_device *md,
struct kstatfs *sfs, struct md_ucred *uc) {
struct cmm_device *cmm_dev = md2cmm_dev(md);
int rc;
ENTRY;
- rc = cmm_child_ops(cmm_dev)->mdo_statfs(ctxt,
+ rc = cmm_child_ops(cmm_dev)->mdo_statfs(env,
cmm_dev->cmm_child, sfs, uc);
RETURN (rc);
}
-static int cmm_maxsize_get(const struct lu_context *ctxt, struct md_device *md,
+static int cmm_maxsize_get(const struct lu_env *env, struct md_device *md,
int *md_size, int *cookie_size, struct md_ucred *uc)
{
struct cmm_device *cmm_dev = md2cmm_dev(md);
int rc;
ENTRY;
- rc = cmm_child_ops(cmm_dev)->mdo_maxsize_get(ctxt, cmm_dev->cmm_child,
+ rc = cmm_child_ops(cmm_dev)->mdo_maxsize_get(env, cmm_dev->cmm_child,
md_size, cookie_size, uc);
RETURN(rc);
}
RETURN(rc);
}
-static int cmm_update_capa_key(const struct lu_context *ctxt,
+static int cmm_update_capa_key(const struct lu_env *env,
struct md_device *md,
struct lustre_capa_key *key)
{
struct cmm_device *cmm_dev = md2cmm_dev(md);
int rc;
ENTRY;
- rc = cmm_child_ops(cmm_dev)->mdo_update_capa_key(ctxt,
+ rc = cmm_child_ops(cmm_dev)->mdo_update_capa_key(env,
cmm_dev->cmm_child,
key);
RETURN(rc);
/* --- cmm_lu_operations --- */
/* add new MDC to the CMM, create MDC lu_device and connect it to mdc_obd */
-static int cmm_add_mdc(const struct lu_context *ctx,
+static int cmm_add_mdc(const struct lu_env *env,
struct cmm_device *cm, struct lustre_cfg *cfg)
{
struct lu_device_type *ldt = &mdc_device_type;
}
}
spin_unlock(&cm->cmm_tgt_guard);
- ld = ldt->ldt_ops->ldto_device_alloc(ctx, ldt, cfg);
+ ld = ldt->ldt_ops->ldto_device_alloc(env, ldt, cfg);
ld->ld_site = cmm2lu_dev(cm)->ld_site;
- rc = ldt->ldt_ops->ldto_device_init(ctx, ld, NULL);
+ rc = ldt->ldt_ops->ldto_device_init(env, ld, NULL);
if (rc) {
- ldt->ldt_ops->ldto_device_free(ctx, ld);
+ ldt->ldt_ops->ldto_device_free(env, ld);
RETURN (rc);
}
/* pass config to the just created MDC */
- rc = ld->ld_ops->ldo_process_config(ctx, ld, cfg);
+ rc = ld->ld_ops->ldo_process_config(env, ld, cfg);
if (rc == 0) {
spin_lock(&cm->cmm_tgt_guard);
list_for_each_entry_safe(mc, tmp, &cm->cmm_targets,
mc_linkage) {
if (mc->mc_num == mdc_num) {
spin_unlock(&cm->cmm_tgt_guard);
- ldt->ldt_ops->ldto_device_fini(ctx, ld);
- ldt->ldt_ops->ldto_device_free(ctx, ld);
+ ldt->ldt_ops->ldto_device_fini(env, ld);
+ ldt->ldt_ops->ldto_device_free(env, ld);
RETURN(-EEXIST);
}
}
target.ft_srv = NULL;
target.ft_idx = mc->mc_num;
target.ft_exp = mc->mc_desc.cl_exp;
-
+
fld_client_add_target(ls->ls_client_fld, &target);
}
RETURN(rc);
}
-static void cmm_device_shutdown(const struct lu_context *ctx,
+static void cmm_device_shutdown(const struct lu_env *env,
struct cmm_device *cm)
{
struct mdc_device *mc, *tmp;
list_del_init(&mc->mc_linkage);
lu_device_put(cmm2lu_dev(cm));
- ld_m->ld_type->ldt_ops->ldto_device_fini(ctx, ld_m);
- ld_m->ld_type->ldt_ops->ldto_device_free(ctx, ld_m);
+ ld_m->ld_type->ldt_ops->ldto_device_fini(env, ld_m);
+ ld_m->ld_type->ldt_ops->ldto_device_free(env, ld_m);
cm->cmm_tgt_count--;
}
spin_unlock(&cm->cmm_tgt_guard);
EXIT;
}
-static int cmm_device_mount(const struct lu_context *ctx,
+static int cmm_device_mount(const struct lu_env *env,
struct cmm_device *m, struct lustre_cfg *cfg)
{
const char *index = lustre_cfg_string(cfg, 2);
char *p;
-
+
LASSERT(index != NULL);
m->cmm_local_num = simple_strtol(index, &p, 10);
CERROR("Invalid index in lustre_cgf\n");
RETURN(-EINVAL);
}
-
+
RETURN(0);
}
-static int cmm_process_config(const struct lu_context *ctx,
+static int cmm_process_config(const struct lu_env *env,
struct lu_device *d, struct lustre_cfg *cfg)
{
struct cmm_device *m = lu2cmm_dev(d);
switch(cfg->lcfg_command) {
case LCFG_ADD_MDC:
- err = cmm_add_mdc(ctx, m, cfg);
+ err = cmm_add_mdc(env, m, cfg);
/* the first ADD_MDC can be counted as setup is finished */
if ((m->cmm_flags & CMM_INITIALIZED) == 0)
m->cmm_flags |= CMM_INITIALIZED;
case LCFG_SETUP:
{
/* lower layers should be set up at first */
- err = next->ld_ops->ldo_process_config(ctx, next, cfg);
+ err = next->ld_ops->ldo_process_config(env, next, cfg);
if (err == 0)
- err = cmm_device_mount(ctx, m, cfg);
+ err = cmm_device_mount(env, m, cfg);
break;
}
case LCFG_CLEANUP:
{
- cmm_device_shutdown(ctx, m);
+ cmm_device_shutdown(env, m);
}
default:
- err = next->ld_ops->ldo_process_config(ctx, next, cfg);
+ err = next->ld_ops->ldo_process_config(env, next, cfg);
}
RETURN(err);
}
-static int cmm_recovery_complete(const struct lu_context *ctxt,
+static int cmm_recovery_complete(const struct lu_env *env,
struct lu_device *d)
{
struct cmm_device *m = lu2cmm_dev(d);
struct lu_device *next = md2lu_dev(m->cmm_child);
int rc;
ENTRY;
- rc = next->ld_ops->ldo_recovery_complete(ctxt, next);
+ rc = next->ld_ops->ldo_recovery_complete(env, next);
RETURN(rc);
}
};
/* --- lu_device_type operations --- */
-int cmm_upcall(const struct lu_context *ctxt, struct md_device *md,
+int cmm_upcall(const struct lu_env *env, struct md_device *md,
enum md_upcall_event ev)
{
struct md_device *upcall_dev;
upcall_dev = md->md_upcall.mu_upcall_dev;
LASSERT(upcall_dev);
- rc = upcall_dev->md_upcall.mu_upcall(ctxt, md->md_upcall.mu_upcall_dev, ev);
+ rc = upcall_dev->md_upcall.mu_upcall(env, md->md_upcall.mu_upcall_dev, ev);
RETURN(rc);
}
-static struct lu_device *cmm_device_alloc(const struct lu_context *ctx,
+static struct lu_device *cmm_device_alloc(const struct lu_env *env,
struct lu_device_type *t,
struct lustre_cfg *cfg)
{
RETURN (l);
}
-static void cmm_device_free(const struct lu_context *ctx, struct lu_device *d)
+static void cmm_device_free(const struct lu_env *env, struct lu_device *d)
{
struct cmm_device *m = lu2cmm_dev(d);
}
/* context key constructor/destructor */
-static void *cmm_thread_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+static void *cmm_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
{
struct cmm_thread_info *info;
return info;
}
-static void cmm_thread_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
+static void cmm_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
{
struct cmm_thread_info *info = data;
OBD_FREE_PTR(info);
struct lu_context_key cmm_thread_key = {
.lct_tags = LCT_MD_THREAD,
- .lct_init = cmm_thread_init,
- .lct_fini = cmm_thread_fini
+ .lct_init = cmm_key_init,
+ .lct_fini = cmm_key_fini
};
static int cmm_type_init(struct lu_device_type *t)
lu_context_key_degister(&cmm_thread_key);
}
-static int cmm_device_init(const struct lu_context *ctx,
+static int cmm_device_init(const struct lu_env *env,
struct lu_device *d, struct lu_device *next)
{
struct cmm_device *m = lu2cmm_dev(d);
RETURN(err);
}
-static struct lu_device *cmm_device_fini(const struct lu_context *ctx,
+static struct lu_device *cmm_device_fini(const struct lu_env *env,
struct lu_device *ld)
{
struct cmm_device *cm = lu2cmm_dev(ld);
*
* Copyright (C) 2006 Cluster File Systems, Inc.
* Author: Mike Pershin <tappro@clusterfs.com>
- *
+ *
* This file is part of the Lustre file system, http://www.lustre.org
* Lustre is a trademark of Cluster File Systems, Inc.
*
}
/* cmm_object.c */
-struct lu_object *cmm_object_alloc(const struct lu_context *ctx,
+struct lu_object *cmm_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *);
-int cmm_upcall(const struct lu_context *ctxt, struct md_device *md,
+int cmm_upcall(const struct lu_env *env, struct md_device *md,
enum md_upcall_event ev);
#ifdef HAVE_SPLIT_SUPPORT
/* cmm_split.c */
-int cml_try_to_split(const struct lu_context *ctx,
+int cml_try_to_split(const struct lu_env *env,
struct md_object *mo, struct md_ucred *uc);
#endif
static int cmm_fld_lookup(struct cmm_device *cm,
const struct lu_fid *fid, mdsno_t *mds,
- const struct lu_context *ctx)
+ const struct lu_env *env)
{
struct lu_site *ls;
int rc = 0;
ls = cm->cmm_md_dev.md_lu_dev.ld_site;
rc = fld_client_lookup(ls->ls_client_fld,
- fid_seq(fid), mds, ctx);
+ fid_seq(fid), mds, env);
if (rc) {
CERROR("can't find mds by seq "LPX64", rc %d\n",
fid_seq(fid), rc);
static struct md_dir_operations cmr_dir_ops;
static struct lu_object_operations cmr_obj_ops;
-struct lu_object *cmm_object_alloc(const struct lu_context *ctx,
+struct lu_object *cmm_object_alloc(const struct lu_env *env,
const struct lu_object_header *loh,
struct lu_device *ld)
{
cd = lu2cmm_dev(ld);
if (cd->cmm_flags & CMM_INITIALIZED) {
/* get object location */
- rc = cmm_fld_lookup(lu2cmm_dev(ld), fid, &mdsnum, ctx);
+ rc = cmm_fld_lookup(lu2cmm_dev(ld), fid, &mdsnum, env);
if (rc)
RETURN(NULL);
} else
}
/* lu_object operations */
-static void cml_object_free(const struct lu_context *ctx,
+static void cml_object_free(const struct lu_env *env,
struct lu_object *lo)
{
struct cml_object *clo = lu2cml_obj(lo);
OBD_FREE_PTR(clo);
}
-static int cml_object_init(const struct lu_context *ctx, struct lu_object *lo)
+static int cml_object_init(const struct lu_env *env, struct lu_object *lo)
{
struct cmm_device *cd = lu2cmm_dev(lo->lo_dev);
struct lu_device *c_dev;
if (c_dev == NULL) {
rc = -ENOENT;
} else {
- c_obj = c_dev->ld_ops->ldo_object_alloc(ctx,
+ c_obj = c_dev->ld_ops->ldo_object_alloc(env,
lo->lo_header, c_dev);
if (c_obj != NULL) {
lu_object_add(lo, c_obj);
RETURN(rc);
}
-static int cml_object_print(const struct lu_context *ctx, void *cookie,
+static int cml_object_print(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *lo)
{
- return (*p)(ctx, cookie, LUSTRE_CMM_NAME"-local@%p", lo);
+ return (*p)(env, cookie, LUSTRE_CMM_NAME"-local@%p", lo);
}
static struct lu_object_operations cml_obj_ops = {
};
/* CMM local md_object operations */
-static int cml_object_create(const struct lu_context *ctx,
+static int cml_object_create(const struct lu_env *env,
struct md_object *mo,
const struct md_create_spec *spec,
struct md_attr *attr,
{
int rc;
ENTRY;
- rc = mo_object_create(ctx, md_object_next(mo), spec, attr, uc);
+ rc = mo_object_create(env, md_object_next(mo), spec, attr, uc);
RETURN(rc);
}
-static int cml_permission(const struct lu_context *ctx,
+static int cml_permission(const struct lu_env *env,
struct md_object *mo, int mask, struct md_ucred *uc)
{
int rc;
ENTRY;
- rc = mo_permission(ctx, md_object_next(mo), mask, uc);
+ rc = mo_permission(env, md_object_next(mo), mask, uc);
RETURN(rc);
}
-static int cml_attr_get(const struct lu_context *ctx, struct md_object *mo,
+static int cml_attr_get(const struct lu_env *env, struct md_object *mo,
struct md_attr *attr, struct md_ucred *uc)
{
int rc;
ENTRY;
- rc = mo_attr_get(ctx, md_object_next(mo), attr, uc);
+ rc = mo_attr_get(env, md_object_next(mo), attr, uc);
RETURN(rc);
}
-static int cml_attr_set(const struct lu_context *ctx, struct md_object *mo,
+static int cml_attr_set(const struct lu_env *env, struct md_object *mo,
const struct md_attr *attr, struct md_ucred *uc)
{
int rc;
ENTRY;
- rc = mo_attr_set(ctx, md_object_next(mo), attr, uc);
+ rc = mo_attr_set(env, md_object_next(mo), attr, uc);
RETURN(rc);
}
-static int cml_xattr_get(const struct lu_context *ctx, struct md_object *mo,
+static int cml_xattr_get(const struct lu_env *env, struct md_object *mo,
void *buf, int buflen, const char *name,
struct md_ucred *uc)
{
int rc;
ENTRY;
- rc = mo_xattr_get(ctx, md_object_next(mo), buf, buflen, name, uc);
+ rc = mo_xattr_get(env, md_object_next(mo), buf, buflen, name, uc);
RETURN(rc);
}
-static int cml_readlink(const struct lu_context *ctx, struct md_object *mo,
+static int cml_readlink(const struct lu_env *env, struct md_object *mo,
void *buf, int buflen, struct md_ucred *uc)
{
int rc;
ENTRY;
- rc = mo_readlink(ctx, md_object_next(mo), buf, buflen, uc);
+ rc = mo_readlink(env, md_object_next(mo), buf, buflen, uc);
RETURN(rc);
}
-static int cml_xattr_list(const struct lu_context *ctx, struct md_object *mo,
+static int cml_xattr_list(const struct lu_env *env, struct md_object *mo,
void *buf, int buflen, struct md_ucred *uc)
{
int rc;
ENTRY;
- rc = mo_xattr_list(ctx, md_object_next(mo), buf, buflen, uc);
+ rc = mo_xattr_list(env, md_object_next(mo), buf, buflen, uc);
RETURN(rc);
}
-static int cml_xattr_set(const struct lu_context *ctx, struct md_object *mo,
+static int cml_xattr_set(const struct lu_env *env, struct md_object *mo,
const void *buf, int buflen,
const char *name, int fl, struct md_ucred *uc)
{
int rc;
ENTRY;
- rc = mo_xattr_set(ctx, md_object_next(mo), buf, buflen, name, fl, uc);
+ rc = mo_xattr_set(env, md_object_next(mo), buf, buflen, name, fl, uc);
RETURN(rc);
}
-static int cml_xattr_del(const struct lu_context *ctx, struct md_object *mo,
+static int cml_xattr_del(const struct lu_env *env, struct md_object *mo,
const char *name, struct md_ucred *uc)
{
int rc;
ENTRY;
- rc = mo_xattr_del(ctx, md_object_next(mo), name, uc);
+ rc = mo_xattr_del(env, md_object_next(mo), name, uc);
RETURN(rc);
}
-static int cml_ref_add(const struct lu_context *ctx, struct md_object *mo,
+static int cml_ref_add(const struct lu_env *env, struct md_object *mo,
struct md_ucred *uc)
{
int rc;
ENTRY;
- rc = mo_ref_add(ctx, md_object_next(mo), uc);
+ rc = mo_ref_add(env, md_object_next(mo), uc);
RETURN(rc);
}
-static int cml_ref_del(const struct lu_context *ctx, struct md_object *mo,
+static int cml_ref_del(const struct lu_env *env, struct md_object *mo,
struct md_attr *ma, struct md_ucred *uc)
{
int rc;
ENTRY;
- rc = mo_ref_del(ctx, md_object_next(mo), ma, uc);
+ rc = mo_ref_del(env, md_object_next(mo), ma, uc);
RETURN(rc);
}
-static int cml_open(const struct lu_context *ctx, struct md_object *mo,
+static int cml_open(const struct lu_env *env, struct md_object *mo,
int flags, struct md_ucred *uc)
{
int rc;
ENTRY;
- rc = mo_open(ctx, md_object_next(mo), flags, uc);
+ rc = mo_open(env, md_object_next(mo), flags, uc);
RETURN(rc);
}
-static int cml_close(const struct lu_context *ctx, struct md_object *mo,
+static int cml_close(const struct lu_env *env, struct md_object *mo,
struct md_attr *ma, struct md_ucred *uc)
{
int rc;
ENTRY;
- rc = mo_close(ctx, md_object_next(mo), ma, uc);
+ rc = mo_close(env, md_object_next(mo), ma, uc);
RETURN(rc);
}
-static int cml_readpage(const struct lu_context *ctxt, struct md_object *mo,
+static int cml_readpage(const struct lu_env *env, struct md_object *mo,
const struct lu_rdpg *rdpg, struct md_ucred *uc)
{
int rc;
ENTRY;
- rc = mo_readpage(ctxt, md_object_next(mo), rdpg, uc);
+ rc = mo_readpage(env, md_object_next(mo), rdpg, uc);
RETURN(rc);
}
-static int cml_capa_get(const struct lu_context *ctxt, struct md_object *mo,
+static int cml_capa_get(const struct lu_env *env, struct md_object *mo,
struct lustre_capa *capa)
{
int rc;
ENTRY;
- rc = mo_capa_get(ctxt, md_object_next(mo), capa);
+ rc = mo_capa_get(env, md_object_next(mo), capa);
RETURN(rc);
}
};
/* md_dir operations */
-static int cml_lookup(const struct lu_context *ctx, struct md_object *mo_p,
+static int cml_lookup(const struct lu_env *env, struct md_object *mo_p,
const char *name, struct lu_fid *lf, struct md_ucred *uc)
{
int rc;
ENTRY;
- rc = mdo_lookup(ctx, md_object_next(mo_p), name, lf, uc);
+ rc = mdo_lookup(env, md_object_next(mo_p), name, lf, uc);
RETURN(rc);
}
-static int cml_create(const struct lu_context *ctx,
+static int cml_create(const struct lu_env *env,
struct md_object *mo_p, const char *child_name,
struct md_object *mo_c, const struct md_create_spec *spec,
struct md_attr *ma, struct md_ucred *uc)
ENTRY;
#ifdef HAVE_SPLIT_SUPPORT
- rc = cml_try_to_split(ctx, mo_p, uc);
+ rc = cml_try_to_split(env, mo_p, uc);
if (rc)
RETURN(rc);
#endif
- rc = mdo_create(ctx, md_object_next(mo_p), child_name,
+ rc = mdo_create(env, md_object_next(mo_p), child_name,
md_object_next(mo_c), spec, ma, uc);
RETURN(rc);
}
-static int cml_create_data(const struct lu_context *ctx, struct md_object *p,
+static int cml_create_data(const struct lu_env *env, struct md_object *p,
struct md_object *o,
const struct md_create_spec *spec,
struct md_attr *ma, struct md_ucred *uc)
{
int rc;
ENTRY;
- rc = mdo_create_data(ctx, md_object_next(p), md_object_next(o),
+ rc = mdo_create_data(env, md_object_next(p), md_object_next(o),
spec, ma, uc);
RETURN(rc);
}
-static int cml_link(const struct lu_context *ctx, struct md_object *mo_p,
+static int cml_link(const struct lu_env *env, struct md_object *mo_p,
struct md_object *mo_s, const char *name,
struct md_attr *ma, struct md_ucred *uc)
{
int rc;
ENTRY;
- rc = mdo_link(ctx, md_object_next(mo_p), md_object_next(mo_s),
+ rc = mdo_link(env, md_object_next(mo_p), md_object_next(mo_s),
name, ma, uc);
RETURN(rc);
}
-static int cml_unlink(const struct lu_context *ctx, struct md_object *mo_p,
+static int cml_unlink(const struct lu_env *env, struct md_object *mo_p,
struct md_object *mo_c, const char *name,
struct md_attr *ma, struct md_ucred *uc)
{
int rc;
ENTRY;
- rc = mdo_unlink(ctx, md_object_next(mo_p), md_object_next(mo_c),
+ rc = mdo_unlink(env, md_object_next(mo_p), md_object_next(mo_c),
name, ma, uc);
RETURN(rc);
}
/* rename is split to local/remote by location of new parent dir */
-struct md_object *md_object_find(const struct lu_context *ctx,
+struct md_object *md_object_find(const struct lu_env *env,
struct md_device *md,
const struct lu_fid *f)
{
struct md_object *m;
ENTRY;
- o = lu_object_find(ctx, md2lu_dev(md)->ld_site, f, BYPASS_CAPA);
+ o = lu_object_find(env, md2lu_dev(md)->ld_site, f, BYPASS_CAPA);
if (IS_ERR(o))
m = (struct md_object *)o;
else {
RETURN(m);
}
-static int __cmm_mode_get(const struct lu_context *ctx, struct md_device *md,
+static int __cmm_mode_get(const struct lu_env *env, struct md_device *md,
const struct lu_fid *lf, struct md_attr *ma,
struct md_ucred *uc)
{
struct cmm_thread_info *cmi;
- struct md_object *mo_s = md_object_find(ctx, md, lf);
+ struct md_object *mo_s = md_object_find(env, md, lf);
struct md_attr *tmp_ma;
int rc;
ENTRY;
if (IS_ERR(mo_s))
RETURN(PTR_ERR(mo_s));
-
- cmi = lu_context_key_get(ctx, &cmm_thread_key);
+
+ cmi = lu_context_key_get(&env->le_ctx, &cmm_thread_key);
LASSERT(cmi);
tmp_ma = &cmi->cmi_ma;
tmp_ma->ma_need = MA_INODE;
-
+
/* get type from src, can be remote req */
- rc = mo_attr_get(ctx, md_object_next(mo_s), tmp_ma, uc);
+ rc = mo_attr_get(env, md_object_next(mo_s), tmp_ma, uc);
if (rc == 0) {
ma->ma_attr.la_mode = tmp_ma->ma_attr.la_mode;
ma->ma_attr.la_flags = tmp_ma->ma_attr.la_flags;
ma->ma_attr.la_valid |= LA_MODE | LA_FLAGS;
}
- lu_object_put(ctx, &mo_s->mo_lu);
+ lu_object_put(env, &mo_s->mo_lu);
return rc;
}
-static int cml_rename(const struct lu_context *ctx, struct md_object *mo_po,
+static int cml_rename(const struct lu_env *env, struct md_object *mo_po,
struct md_object *mo_pn, const struct lu_fid *lf,
const char *s_name, struct md_object *mo_t,
const char *t_name, struct md_attr *ma,
int rc;
ENTRY;
- rc = __cmm_mode_get(ctx, md_obj2dev(mo_po), lf, ma, uc);
+ rc = __cmm_mode_get(env, md_obj2dev(mo_po), lf, ma, uc);
if (rc != 0)
RETURN(rc);
if (mo_t && lu_object_exists(&mo_t->mo_lu) < 0) {
/* mo_t is remote object and there is RPC to unlink it */
- rc = mo_ref_del(ctx, md_object_next(mo_t), ma, uc);
+ rc = mo_ref_del(env, md_object_next(mo_t), ma, uc);
if (rc)
RETURN(rc);
mo_t = NULL;
}
/* local rename, mo_t can be NULL */
- rc = mdo_rename(ctx, md_object_next(mo_po),
+ rc = mdo_rename(env, md_object_next(mo_po),
md_object_next(mo_pn), lf, s_name,
md_object_next(mo_t), t_name, ma, uc);
RETURN(rc);
}
-static int cml_rename_tgt(const struct lu_context *ctx, struct md_object *mo_p,
+static int cml_rename_tgt(const struct lu_env *env, struct md_object *mo_p,
struct md_object *mo_t, const struct lu_fid *lf,
const char *name, struct md_attr *ma,
struct md_ucred *uc)
int rc;
ENTRY;
- rc = mdo_rename_tgt(ctx, md_object_next(mo_p),
+ rc = mdo_rename_tgt(env, md_object_next(mo_p),
md_object_next(mo_t), lf, name, ma, uc);
RETURN(rc);
}
/* used only in case of rename_tgt() when target is not exist */
-static int cml_name_insert(const struct lu_context *ctx, struct md_object *p,
+static int cml_name_insert(const struct lu_env *env, struct md_object *p,
const char *name, const struct lu_fid *lf, int isdir,
struct md_ucred *uc)
{
int rc;
ENTRY;
- rc = mdo_name_insert(ctx, md_object_next(p), name, lf, isdir, uc);
+ rc = mdo_name_insert(env, md_object_next(p), name, lf, isdir, uc);
RETURN(rc);
}
/* Common method for remote and local use. */
-static int cmm_is_subdir(const struct lu_context *ctx, struct md_object *mo,
+static int cmm_is_subdir(const struct lu_env *env, struct md_object *mo,
const struct lu_fid *fid, struct lu_fid *sfid,
struct md_ucred *uc)
{
int rc;
ENTRY;
- cmi = lu_context_key_get(ctx, &cmm_thread_key);
- rc = __cmm_mode_get(ctx, md_obj2dev(mo), fid, &cmi->cmi_ma, uc);
+ cmi = lu_context_key_get(&env->le_ctx, &cmm_thread_key);
+ rc = __cmm_mode_get(env, md_obj2dev(mo), fid, &cmi->cmi_ma, uc);
if (rc)
RETURN(rc);
if (!S_ISDIR(cmi->cmi_ma.ma_attr.la_mode))
RETURN(0);
-
- rc = mdo_is_subdir(ctx, md_object_next(mo), fid, sfid, uc);
+
+ rc = mdo_is_subdir(env, md_object_next(mo), fid, sfid, uc);
RETURN(rc);
}
}
/* lu_object operations */
-static void cmr_object_free(const struct lu_context *ctx,
+static void cmr_object_free(const struct lu_env *env,
struct lu_object *lo)
{
struct cmr_object *cro = lu2cmr_obj(lo);
OBD_FREE_PTR(cro);
}
-static int cmr_object_init(const struct lu_context *ctx, struct lu_object *lo)
+static int cmr_object_init(const struct lu_env *env, struct lu_object *lo)
{
struct cmm_device *cd = lu2cmm_dev(lo->lo_dev);
struct lu_device *c_dev;
if (c_dev == NULL) {
rc = -ENOENT;
} else {
- c_obj = c_dev->ld_ops->ldo_object_alloc(ctx,
+ c_obj = c_dev->ld_ops->ldo_object_alloc(env,
lo->lo_header, c_dev);
if (c_obj != NULL) {
lu_object_add(lo, c_obj);
RETURN(rc);
}
-static int cmr_object_print(const struct lu_context *ctx, void *cookie,
+static int cmr_object_print(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *lo)
{
- return (*p)(ctx, cookie, LUSTRE_CMM_NAME"-remote@%p", lo);
+ return (*p)(env, cookie, LUSTRE_CMM_NAME"-remote@%p", lo);
}
static struct lu_object_operations cmr_obj_ops = {
};
/* CMM remote md_object operations. All are invalid */
-static int cmr_object_create(const struct lu_context *ctx,
+static int cmr_object_create(const struct lu_env *env,
struct md_object *mo,
const struct md_create_spec *spec,
struct md_attr *ma,
RETURN(-EFAULT);
}
-static int cmr_permission(const struct lu_context *ctx, struct md_object *mo,
+static int cmr_permission(const struct lu_env *env, struct md_object *mo,
int mask, struct md_ucred *uc)
{
RETURN(-EREMOTE);
}
-static int cmr_attr_get(const struct lu_context *ctx, struct md_object *mo,
+static int cmr_attr_get(const struct lu_env *env, struct md_object *mo,
struct md_attr *attr, struct md_ucred *uc)
{
RETURN(-EREMOTE);
}
-static int cmr_attr_set(const struct lu_context *ctx, struct md_object *mo,
+static int cmr_attr_set(const struct lu_env *env, struct md_object *mo,
const struct md_attr *attr, struct md_ucred *uc)
{
RETURN(-EFAULT);
}
-static int cmr_xattr_get(const struct lu_context *ctx, struct md_object *mo,
+static int cmr_xattr_get(const struct lu_env *env, struct md_object *mo,
void *buf, int buflen, const char *name,
struct md_ucred *uc)
{
RETURN(-EFAULT);
}
-static int cmr_readlink(const struct lu_context *ctx, struct md_object *mo,
+static int cmr_readlink(const struct lu_env *env, struct md_object *mo,
void *buf, int buflen, struct md_ucred *uc)
{
RETURN(-EFAULT);
}
-static int cmr_xattr_list(const struct lu_context *ctx, struct md_object *mo,
+static int cmr_xattr_list(const struct lu_env *env, struct md_object *mo,
void *buf, int buflen, struct md_ucred *uc)
{
RETURN(-EFAULT);
}
-static int cmr_xattr_set(const struct lu_context *ctx, struct md_object *mo,
+static int cmr_xattr_set(const struct lu_env *env, struct md_object *mo,
const void *buf, int buflen, const char *name, int fl,
struct md_ucred *uc)
{
RETURN(-EFAULT);
}
-static int cmr_xattr_del(const struct lu_context *ctx, struct md_object *mo,
+static int cmr_xattr_del(const struct lu_env *env, struct md_object *mo,
const char *name, struct md_ucred *uc)
{
RETURN(-EFAULT);
}
-static int cmr_ref_add(const struct lu_context *ctx, struct md_object *mo,
+static int cmr_ref_add(const struct lu_env *env, struct md_object *mo,
struct md_ucred *uc)
{
RETURN(-EFAULT);
}
-static int cmr_ref_del(const struct lu_context *ctx, struct md_object *mo,
+static int cmr_ref_del(const struct lu_env *env, struct md_object *mo,
struct md_attr *ma, struct md_ucred *uc)
{
RETURN(-EFAULT);
}
-static int cmr_open(const struct lu_context *ctx, struct md_object *mo,
+static int cmr_open(const struct lu_env *env, struct md_object *mo,
int flags, struct md_ucred *uc)
{
RETURN(-EREMOTE);
}
-static int cmr_close(const struct lu_context *ctx, struct md_object *mo,
+static int cmr_close(const struct lu_env *env, struct md_object *mo,
struct md_attr *ma, struct md_ucred *uc)
{
RETURN(-EFAULT);
}
-static int cmr_readpage(const struct lu_context *ctxt, struct md_object *mo,
+static int cmr_readpage(const struct lu_env *env, struct md_object *mo,
const struct lu_rdpg *rdpg, struct md_ucred *uc)
{
RETURN(-EREMOTE);
}
-static int cmr_capa_get(const struct lu_context *ctxt, struct md_object *mo,
+static int cmr_capa_get(const struct lu_env *env, struct md_object *mo,
struct lustre_capa *capa)
{
RETURN(-EFAULT);
};
/* remote part of md_dir operations */
-static int cmr_lookup(const struct lu_context *ctx, struct md_object *mo_p,
+static int cmr_lookup(const struct lu_env *env, struct md_object *mo_p,
const char *name, struct lu_fid *lf, struct md_ucred *uc)
{
/*
* For more details see rollback HLD/DLD.
*
*/
-static int cmr_create(const struct lu_context *ctx, struct md_object *mo_p,
+static int cmr_create(const struct lu_env *env, struct md_object *mo_p,
const char *child_name, struct md_object *mo_c,
const struct md_create_spec *spec,
struct md_attr *ma, struct md_ucred *uc)
ENTRY;
/* check the SGID attr */
- cmi = lu_context_key_get(ctx, &cmm_thread_key);
+ cmi = lu_context_key_get(&env->le_ctx, &cmm_thread_key);
LASSERT(cmi);
tmp_ma = &cmi->cmi_ma;
tmp_ma->ma_need = MA_INODE;
- rc = mo_attr_get(ctx, md_object_next(mo_p), tmp_ma, uc);
+ rc = mo_attr_get(env, md_object_next(mo_p), tmp_ma, uc);
if (rc)
RETURN(rc);
-
+
if (tmp_ma->ma_attr.la_mode & S_ISGID) {
ma->ma_attr.la_gid = tmp_ma->ma_attr.la_gid;
if (S_ISDIR(ma->ma_attr.la_mode)) {
}
}
/* remote object creation and local name insert */
- rc = mo_object_create(ctx, md_object_next(mo_c), spec, ma, uc);
+ rc = mo_object_create(env, md_object_next(mo_c), spec, ma, uc);
if (rc == 0) {
- rc = mdo_name_insert(ctx, md_object_next(mo_p),
+ rc = mdo_name_insert(env, md_object_next(mo_p),
child_name, lu_object_fid(&mo_c->mo_lu),
S_ISDIR(ma->ma_attr.la_mode), uc);
}
RETURN(rc);
}
-static int cmr_link(const struct lu_context *ctx, struct md_object *mo_p,
+static int cmr_link(const struct lu_env *env, struct md_object *mo_p,
struct md_object *mo_s, const char *name,
struct md_attr *ma, struct md_ucred *uc)
{
//XXX: make sure that MDT checks name isn't exist
- rc = mo_ref_add(ctx, md_object_next(mo_s), uc);
+ rc = mo_ref_add(env, md_object_next(mo_s), uc);
if (rc == 0) {
- rc = mdo_name_insert(ctx, md_object_next(mo_p),
+ rc = mdo_name_insert(env, md_object_next(mo_p),
name, lu_object_fid(&mo_s->mo_lu), 0, uc);
}
RETURN(rc);
}
-static int cmr_unlink(const struct lu_context *ctx, struct md_object *mo_p,
+static int cmr_unlink(const struct lu_env *env, struct md_object *mo_p,
struct md_object *mo_c, const char *name,
struct md_attr *ma, struct md_ucred *uc)
{
int rc;
ENTRY;
- rc = mo_ref_del(ctx, md_object_next(mo_c), ma, uc);
+ rc = mo_ref_del(env, md_object_next(mo_c), ma, uc);
if (rc == 0) {
- rc = mdo_name_remove(ctx, md_object_next(mo_p), name, uc);
+ rc = mdo_name_remove(env, md_object_next(mo_p), name, uc);
}
RETURN(rc);
}
-static int cmr_rename(const struct lu_context *ctx,
+static int cmr_rename(const struct lu_env *env,
struct md_object *mo_po, struct md_object *mo_pn,
const struct lu_fid *lf, const char *s_name,
struct md_object *mo_t, const char *t_name,
{
int rc;
ENTRY;
-
+
/* get real type of src */
- rc = __cmm_mode_get(ctx, md_obj2dev(mo_po), lf, ma, uc);
+ rc = __cmm_mode_get(env, md_obj2dev(mo_po), lf, ma, uc);
if (rc != 0)
RETURN(rc);
/* the mo_pn is remote directory, so we cannot even know if there is
* mo_t or not. Therefore mo_t is NULL here but remote server should do
* lookup and process this further */
- rc = mdo_rename_tgt(ctx, md_object_next(mo_pn),
+ rc = mdo_rename_tgt(env, md_object_next(mo_pn),
NULL/* mo_t */, lf, t_name, ma, uc);
/* only old name is removed localy */
if (rc == 0)
- rc = mdo_name_remove(ctx, md_object_next(mo_po),
+ rc = mdo_name_remove(env, md_object_next(mo_po),
s_name, uc);
RETURN(rc);
/* part of cross-ref rename(). Used to insert new name in new parent
* and unlink target with same name if it exists */
-static int cmr_rename_tgt(const struct lu_context *ctx,
+static int cmr_rename_tgt(const struct lu_env *env,
struct md_object *mo_p, struct md_object *mo_t,
const struct lu_fid *lf, const char *name,
struct md_attr *ma, struct md_ucred *uc)
int rc;
ENTRY;
/* target object is remote one */
- rc = mo_ref_del(ctx, md_object_next(mo_t), ma, uc);
+ rc = mo_ref_del(env, md_object_next(mo_t), ma, uc);
/* continue locally with name handling only */
if (rc == 0)
- rc = mdo_rename_tgt(ctx, md_object_next(mo_p),
+ rc = mdo_rename_tgt(env, md_object_next(mo_p),
NULL, lf, name, ma, uc);
RETURN(rc);
}
return &(obj->cmo_obj.mo_lu.lo_header->loh_fid);
}
-static int cmm_expect_splitting(const struct lu_context *ctx,
+static int cmm_expect_splitting(const struct lu_env *env,
struct md_object *mo,
struct md_attr *ma,
struct md_ucred *uc)
if (ma->ma_lmv_size)
GOTO(cleanup, rc = CMM_NO_SPLIT_EXPECTED);
OBD_ALLOC_PTR(fid);
- rc = cmm_child_ops(cmm)->mdo_root_get(ctx, cmm->cmm_child, fid, uc);
+ rc = cmm_child_ops(cmm)->mdo_root_get(env, cmm->cmm_child, fid, uc);
if (rc)
GOTO(cleanup, rc);
#define cmm_md_size(stripes) \
(sizeof(struct lmv_stripe_md) + (stripes) * sizeof(struct lu_fid))
-static int cmm_alloc_fid(const struct lu_context *ctx, struct cmm_device *cmm,
+static int cmm_alloc_fid(const struct lu_env *env, struct cmm_device *cmm,
struct lu_fid *fid, int count)
{
struct mdc_device *mc, *tmp;
ls = cmm->cmm_md_dev.md_lu_dev.ld_site;
rc = fld_client_create(ls->ls_client_fld,
fid_seq(&fid[i]),
- mc->mc_num, ctx);
+ mc->mc_num, env);
}
if (rc < 0) {
spin_unlock(&cmm->cmm_tgt_guard);
RETURN(rc);
}
-struct cmm_object *cmm_object_find(const struct lu_context *ctxt,
+struct cmm_object *cmm_object_find(const struct lu_env *env,
struct cmm_device *d,
const struct lu_fid *f,
struct lustre_capa *capa)
struct cmm_object *m;
ENTRY;
- o = lu_object_find(ctxt, d->cmm_md_dev.md_lu_dev.ld_site, f,
+ o = lu_object_find(env, d->cmm_md_dev.md_lu_dev.ld_site, f,
capa);
if (IS_ERR(o))
m = (struct cmm_object *)o;
RETURN(m);
}
-static inline void cmm_object_put(const struct lu_context *ctxt,
+static inline void cmm_object_put(const struct lu_env *env,
struct cmm_object *o)
{
- lu_object_put(ctxt, &o->cmo_obj.mo_lu);
+ lu_object_put(env, &o->cmo_obj.mo_lu);
}
-static int cmm_creat_remote_obj(const struct lu_context *ctx,
+static int cmm_creat_remote_obj(const struct lu_env *env,
struct cmm_device *cmm,
struct lu_fid *fid, struct md_attr *ma,
const struct lmv_stripe_md *lmv,
int rc;
ENTRY;
- /* XXX Since capablity will not work with split. so we
+ /* XXX Since capablity will not work with split. so we
* pass NULL capablity here */
- obj = cmm_object_find(ctx, cmm, fid, NULL);
+ obj = cmm_object_find(env, cmm, fid, NULL);
if (IS_ERR(obj))
RETURN(PTR_ERR(obj));
spec->u.sp_ea.eadata = lmv;
spec->u.sp_ea.eadatalen = lmv_size;
spec->sp_cr_flags |= MDS_CREATE_SLAVE_OBJ;
- rc = mo_object_create(ctx, md_object_next(&obj->cmo_obj),
+ rc = mo_object_create(env, md_object_next(&obj->cmo_obj),
spec, ma, uc);
OBD_FREE_PTR(spec);
- cmm_object_put(ctx, obj);
+ cmm_object_put(env, obj);
RETURN(rc);
}
-static int cmm_create_slave_objects(const struct lu_context *ctx,
+static int cmm_create_slave_objects(const struct lu_env *env,
struct md_object *mo, struct md_attr *ma,
struct md_ucred *uc)
{
lmv->mea_ids[0] = *lf;
- rc = cmm_alloc_fid(ctx, cmm, &lmv->mea_ids[1],
+ rc = cmm_alloc_fid(env, cmm, &lmv->mea_ids[1],
cmm->cmm_tgt_count);
if (rc)
GOTO(cleanup, rc);
slave_lmv->mea_magic = MEA_MAGIC_HASH_SEGMENT;
slave_lmv->mea_count = 0;
for (i = 1; i < cmm->cmm_tgt_count + 1; i ++) {
- rc = cmm_creat_remote_obj(ctx, cmm, &lmv->mea_ids[i], ma,
+ rc = cmm_creat_remote_obj(env, cmm, &lmv->mea_ids[i], ma,
slave_lmv, sizeof(slave_lmv), uc);
if (rc)
GOTO(cleanup, rc);
RETURN(rc);
}
-static int cmm_send_split_pages(const struct lu_context *ctx,
+static int cmm_send_split_pages(const struct lu_env *env,
struct md_object *mo, struct lu_rdpg *rdpg,
struct lu_fid *fid, int len,
struct md_ucred *uc)
int rc = 0;
ENTRY;
- obj = cmm_object_find(ctx, cmm, fid, NULL);
+ obj = cmm_object_find(env, cmm, fid, NULL);
if (IS_ERR(obj))
RETURN(PTR_ERR(obj));
- rc = mdc_send_page(cmm, ctx, md_object_next(&obj->cmo_obj),
+ rc = mdc_send_page(cmm, env, md_object_next(&obj->cmo_obj),
rdpg->rp_pages[0], len, uc);
- cmm_object_put(ctx, obj);
+ cmm_object_put(env, obj);
RETURN(rc);
}
-static int cmm_remove_entries(const struct lu_context *ctx,
+static int cmm_remove_entries(const struct lu_env *env,
struct md_object *mo, struct lu_rdpg *rdpg,
__u32 hash_end, __u32 *len, struct md_ucred *uc)
{
* will find better way */
OBD_ALLOC(name, ent->lde_namelen + 1);
memcpy(name, ent->lde_name, ent->lde_namelen);
- rc = mdo_name_remove(ctx, md_object_next(mo),
+ rc = mdo_name_remove(env, md_object_next(mo),
name, uc);
OBD_FREE(name, ent->lde_namelen + 1);
}
if (rc) {
/* FIXME: Do not know why it return -ENOENT
- * in some case
+ * in some case
* */
if (rc != -ENOENT)
GOTO(unmap, rc);
RETURN(rc);
}
-static int cmm_split_entries(const struct lu_context *ctx,
+static int cmm_split_entries(const struct lu_env *env,
struct md_object *mo, struct lu_rdpg *rdpg,
struct lu_fid *lf, __u32 end, struct md_ucred *uc)
{
memset(kmap(rdpg->rp_pages[0]), 0, CFS_PAGE_SIZE);
kunmap(rdpg->rp_pages[0]);
- rc = mo_readpage(ctx, md_object_next(mo), rdpg, uc);
+ rc = mo_readpage(env, md_object_next(mo), rdpg, uc);
/* -E2BIG means it already reach the end of the dir */
if (rc) {
if (rc != -ERANGE) {
RETURN(rc);
}
}
-
+
/* Remove the old entries */
- rc = cmm_remove_entries(ctx, mo, rdpg, end, &len, uc);
+ rc = cmm_remove_entries(env, mo, rdpg, end, &len, uc);
if (rc)
RETURN(rc);
- /* Send page to slave object */
+ /* Send page to slave object */
if (len > 0) {
- rc = cmm_send_split_pages(ctx, mo, rdpg, lf, len, uc);
+ rc = cmm_send_split_pages(env, mo, rdpg, lf, len, uc);
if (rc)
RETURN(rc);
}
-
+
kmap(rdpg->rp_pages[0]);
ldp = page_address(rdpg->rp_pages[0]);
if (ldp->ldp_hash_end >= end) {
done = 1;
}
rdpg->rp_hash = ldp->ldp_hash_end;
- kunmap(rdpg->rp_pages[0]);
+ kunmap(rdpg->rp_pages[0]);
} while (!done);
RETURN(rc);
}
#define SPLIT_PAGE_COUNT 1
-static int cmm_scan_and_split(const struct lu_context *ctx,
+static int cmm_scan_and_split(const struct lu_env *env,
struct md_object *mo, struct md_attr *ma,
struct md_ucred *uc)
{
rdpg->rp_hash = i * hash_segement;
hash_end = rdpg->rp_hash + hash_segement;
- rc = cmm_split_entries(ctx, mo, rdpg, lf, hash_end, uc);
+ rc = cmm_split_entries(env, mo, rdpg, lf, hash_end, uc);
if (rc)
GOTO(cleanup, rc);
}
RETURN(rc);
}
-int cml_try_to_split(const struct lu_context *ctx, struct md_object *mo,
+int cml_try_to_split(const struct lu_env *env, struct md_object *mo,
struct md_ucred *uc)
{
struct cmm_device *cmm = cmm_obj2dev(md2cmm_obj(mo));
RETURN(-ENOMEM);
ma->ma_need = MA_INODE|MA_LMV;
- rc = mo_attr_get(ctx, mo, ma, uc);
+ rc = mo_attr_get(env, mo, ma, uc);
if (rc)
GOTO(cleanup, ma);
/* step1: checking whether the dir need to be splitted */
- rc = cmm_expect_splitting(ctx, mo, ma, uc);
+ rc = cmm_expect_splitting(env, mo, ma, uc);
if (rc != CMM_EXPECT_SPLIT)
GOTO(cleanup, rc = 0);
/* Disable trans for splitting, since there will be
- * so many trans in this one ops, confilct with current
+ * so many trans in this one ops, confilct with current
* recovery design */
- rc = cmm_upcall(ctx, &cmm->cmm_md_dev, MD_NO_TRANS);
+ rc = cmm_upcall(env, &cmm->cmm_md_dev, MD_NO_TRANS);
if (rc)
GOTO(cleanup, rc = 0);
/* step2: create slave objects */
- rc = cmm_create_slave_objects(ctx, mo, ma, uc);
+ rc = cmm_create_slave_objects(env, mo, ma, uc);
if (rc)
GOTO(cleanup, ma);
/* step3: scan and split the object */
- rc = cmm_scan_and_split(ctx, mo, ma, uc);
+ rc = cmm_scan_and_split(env, mo, ma, uc);
if (rc)
GOTO(cleanup, ma);
/* step4: set mea to the master object */
- rc = mo_xattr_set(ctx, md_object_next(mo), ma->ma_lmv,
+ rc = mo_xattr_set(env, md_object_next(mo), ma->ma_lmv,
ma->ma_lmv_size, MDS_LMV_MD_NAME, 0, uc);
- if (rc == -ERESTART)
- CWARN("Dir"DFID" has been split \n",
+ if (rc == -ERESTART)
+ CWARN("Dir"DFID" has been split \n",
PFID(lu_object_fid(&mo->mo_lu)));
cleanup:
if (ma->ma_lmv_size && ma->ma_lmv)
* mdc_add_obd() find that obd by uuid and connects to it.
* Local MDT uuid is used for connection
* */
-static int mdc_add_obd(const struct lu_context *ctx,
+static int mdc_add_obd(const struct lu_env *env,
struct mdc_device *mc, struct lustre_cfg *cfg)
{
struct mdc_cli_desc *desc = &mc->mc_desc;
RETURN(-ENOMEM);
/* The connection between MDS must be local */
ocd->ocd_connect_flags |= OBD_CONNECT_LCL_CLIENT;
- rc = obd_connect(ctx, conn, mdc, &mdc->obd_uuid, ocd);
+ rc = obd_connect(env, conn, mdc, &mdc->obd_uuid, ocd);
OBD_FREE_PTR(ocd);
if (rc) {
CERROR("target %s connect error %d\n",
CDEBUG(D_CONFIG, "disconnect from %s\n",
mdc_obd->obd_name);
-
+
rc = obd_fid_fini(desc->cl_exp);
if (rc)
CERROR("fid init error %d \n", rc);
obd_register_observer(mdc_obd, NULL);
-
+
/*TODO: Give the same shutdown flags as we have */
/*
desc->cl_exp->exp_obd->obd_force = mdt_obd->obd_force;
RETURN(rc);
}
-static int mdc_process_config(const struct lu_context *ctx,
+static int mdc_process_config(const struct lu_env *env,
struct lu_device *ld, struct lustre_cfg *cfg)
{
struct mdc_device *mc = lu2mdc_dev(ld);
ENTRY;
switch (cfg->lcfg_command) {
case LCFG_ADD_MDC:
- rc = mdc_add_obd(ctx, mc, cfg);
+ rc = mdc_add_obd(env, mc, cfg);
break;
default:
rc = -EOPNOTSUPP;
.ldo_process_config = mdc_process_config
};
-static int mdc_device_init(const struct lu_context *ctx,
+static int mdc_device_init(const struct lu_env *env,
struct lu_device *ld, struct lu_device *next)
{
return 0;
}
-static struct lu_device *mdc_device_fini(const struct lu_context *ctx,
+static struct lu_device *mdc_device_fini(const struct lu_env *env,
struct lu_device *ld)
{
struct mdc_device *mc = lu2mdc_dev(ld);
RETURN (NULL);
}
-struct lu_device *mdc_device_alloc(const struct lu_context *ctx,
+struct lu_device *mdc_device_alloc(const struct lu_env *env,
struct lu_device_type *ldt,
struct lustre_cfg *cfg)
{
RETURN (ld);
}
-void mdc_device_free(const struct lu_context *ctx, struct lu_device *ld)
+void mdc_device_free(const struct lu_env *env, struct lu_device *ld)
{
struct mdc_device *mc = lu2mdc_dev(ld);
/* context key constructor/destructor */
-static void *mdc_thread_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+static void *mdc_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
{
struct mdc_thread_info *info;
return info;
}
-static void mdc_thread_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
+static void mdc_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
{
struct mdc_thread_info *info = data;
OBD_FREE_PTR(info);
struct lu_context_key mdc_thread_key = {
.lct_tags = LCT_MD_THREAD|LCT_CL_THREAD,
- .lct_init = mdc_thread_init,
- .lct_fini = mdc_thread_fini
+ .lct_init = mdc_key_init,
+ .lct_fini = mdc_key_fini
};
int mdc_type_init(struct lu_device_type *ldt)
*
* Copyright (C) 2006 Cluster File Systems, Inc.
* Author: Mike Pershin <tappro@clusterfs.com>
- *
+ *
* This file is part of the Lustre file system, http://www.lustre.org
* Lustre is a trademark of Cluster File Systems, Inc.
*
return container_of0(ld, struct mdc_device, mc_md_dev.md_lu_dev);
}
-struct lu_object *mdc_object_alloc(const struct lu_context *,
+struct lu_object *mdc_object_alloc(const struct lu_env *,
const struct lu_object_header *,
struct lu_device *);
#ifdef HAVE_SPLIT_SUPPORT
-int mdc_send_page(struct cmm_device *cmm, const struct lu_context *ctx,
+int mdc_send_page(struct cmm_device *cmm, const struct lu_env *env,
struct md_object *mo, struct page *page, __u32 end,
struct md_ucred *uc);
#endif
extern struct lu_context_key mdc_thread_key;
-struct lu_object *mdc_object_alloc(const struct lu_context *ctx,
+struct lu_object *mdc_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *ld)
{
RETURN(NULL);
}
-static void mdc_object_free(const struct lu_context *ctx, struct lu_object *lo)
+static void mdc_object_free(const struct lu_env *env, struct lu_object *lo)
{
struct mdc_object *mco = lu2mdc_obj(lo);
lu_object_fini(lo);
OBD_FREE_PTR(mco);
}
-static int mdc_object_init(const struct lu_context *ctx, struct lu_object *lo)
+static int mdc_object_init(const struct lu_env *env, struct lu_object *lo)
{
ENTRY;
lo->lo_header->loh_attr |= LOHA_REMOTE;
RETURN(0);
}
-static int mdc_object_print(const struct lu_context *ctx, void *cookie,
+static int mdc_object_print(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *lo)
{
- return (*p)(ctx, cookie, LUSTRE_CMM_MDC_NAME"-object@%p", lo);
+ return (*p)(env, cookie, LUSTRE_CMM_MDC_NAME"-object@%p", lo);
}
static struct lu_object_operations mdc_obj_ops = {
};
/* md_object_operations */
-static
-struct mdc_thread_info *mdc_info_get(const struct lu_context *ctx)
+static
+struct mdc_thread_info *mdc_info_get(const struct lu_env *env)
{
struct mdc_thread_info *mci;
- mci = lu_context_key_get(ctx, &mdc_thread_key);
+ mci = lu_context_key_get(&env->le_ctx, &mdc_thread_key);
LASSERT(mci);
return mci;
}
-static
-struct mdc_thread_info *mdc_info_init(const struct lu_context *ctx)
+static
+struct mdc_thread_info *mdc_info_init(const struct lu_env *env)
{
struct mdc_thread_info *mci;
- mci = mdc_info_get(ctx);
+ mci = mdc_info_get(env);
memset(mci, 0, sizeof(*mci));
ma->ma_valid = MA_INODE;
}
-static int mdc_req2attr_update(const struct lu_context *ctx,
+static int mdc_req2attr_update(const struct lu_env *env,
struct md_attr *ma)
{
struct mdc_thread_info *mci;
struct mdt_body *body;
struct lov_mds_md *lov;
struct llog_cookie *cookie;
-
+
ENTRY;
- mci = mdc_info_get(ctx);
+ mci = mdc_info_get(env);
req = mci->mci_req;
LASSERT(req);
body = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, sizeof(*body));
CERROR("OBD_MD_FLEASIZE is set but eadatasize is zero\n");
RETURN(-EPROTO);
}
-
+
lov = lustre_swab_repbuf(req, REPLY_REC_OFF + 1,
body->eadatasize, NULL);
if (lov == NULL) {
ma->ma_valid |= MA_LOV;
if (!(body->valid & OBD_MD_FLCOOKIE))
RETURN(0);
-
+
if (body->aclsize == 0) {
CERROR("OBD_MD_FLCOOKIE is set but cookie size is zero\n");
RETURN(-EPROTO);
RETURN(0);
}
-static int mdc_attr_get(const struct lu_context *ctx, struct md_object *mo,
+static int mdc_attr_get(const struct lu_env *env, struct md_object *mo,
struct md_attr *ma, struct md_ucred *uc)
{
struct mdc_device *mc = md2mdc_dev(md_obj2dev(mo));
int rc;
ENTRY;
- mci = lu_context_key_get(ctx, &mdc_thread_key);
+ mci = lu_context_key_get(&env->le_ctx, &mdc_thread_key);
LASSERT(mci);
memset(&mci->mci_opdata, 0, sizeof(mci->mci_opdata));
if (rc == 0) {
/* get attr from request */
- rc = mdc_req2attr_update(ctx, ma);
+ rc = mdc_req2attr_update(env, ma);
}
ptlrpc_req_finished(mci->mci_req);
}
-static int mdc_object_create(const struct lu_context *ctx,
+static int mdc_object_create(const struct lu_env *env,
struct md_object *mo,
const struct md_create_spec *spec,
struct md_attr *ma,
ENTRY;
LASSERT(spec->u.sp_pfid != NULL);
- mci = mdc_info_init(ctx);
+ mci = mdc_info_init(env);
mci->mci_opdata.fid2 = *lu_object_fid(&mo->mo_lu);
/* parent fid is needed to create dotdot on the remote node */
mci->mci_opdata.fid1 = *(spec->u.sp_pfid);
symname = spec->u.sp_symname;
symlen = symname ? strlen(symname) + 1 : 0;
}
-
+
rc = md_create(mc->mc_desc.cl_exp, &mci->mci_opdata,
symname, symlen,
la->la_mode, uid, gid, cap, la->la_rdev,
if (rc == 0) {
/* get attr from request */
- rc = mdc_req2attr_update(ctx, ma);
+ rc = mdc_req2attr_update(env, ma);
}
ptlrpc_req_finished(mci->mci_req);
RETURN(rc);
}
-static int mdc_ref_add(const struct lu_context *ctx, struct md_object *mo,
+static int mdc_ref_add(const struct lu_env *env, struct md_object *mo,
struct md_ucred *uc)
{
struct mdc_device *mc = md2mdc_dev(md_obj2dev(mo));
int rc;
ENTRY;
- mci = lu_context_key_get(ctx, &mdc_thread_key);
+ mci = lu_context_key_get(&env->le_ctx, &mdc_thread_key);
LASSERT(mci);
memset(&mci->mci_opdata, 0, sizeof(mci->mci_opdata));
RETURN(rc);
}
-static int mdc_ref_del(const struct lu_context *ctx, struct md_object *mo,
+static int mdc_ref_del(const struct lu_env *env, struct md_object *mo,
struct md_attr *ma, struct md_ucred *uc)
{
struct mdc_device *mc = md2mdc_dev(md_obj2dev(mo));
int rc;
ENTRY;
- mci = mdc_info_init(ctx);
+ mci = mdc_info_init(env);
mci->mci_opdata.fid1 = *lu_object_fid(&mo->mo_lu);
mci->mci_opdata.mode = la->la_mode;
mci->mci_opdata.mod_time = la->la_ctime;
rc = md_unlink(mc->mc_desc.cl_exp, &mci->mci_opdata, &mci->mci_req);
if (rc == 0) {
/* get attr from request */
- rc = mdc_req2attr_update(ctx, ma);
+ rc = mdc_req2attr_update(env, ma);
}
ptlrpc_req_finished(mci->mci_req);
}
#ifdef HAVE_SPLIT_SUPPORT
-int mdc_send_page(struct cmm_device *cm, const struct lu_context *ctx,
+int mdc_send_page(struct cmm_device *cm, const struct lu_env *env,
struct md_object *mo, struct page *page, __u32 offset,
struct md_ucred *uc)
{
};
/* md_dir_operations */
-static int mdc_rename_tgt(const struct lu_context *ctx, struct md_object *mo_p,
+static int mdc_rename_tgt(const struct lu_env *env, struct md_object *mo_p,
struct md_object *mo_t, const struct lu_fid *lf,
const char *name, struct md_attr *ma,
struct md_ucred *uc)
int rc;
ENTRY;
- mci = mdc_info_init(ctx);
+ mci = mdc_info_init(env);
mci->mci_opdata.fid1 = *lu_object_fid(&mo_p->mo_lu);
mci->mci_opdata.fid2 = *lf;
mci->mci_opdata.mode = la->la_mode;
name, strlen(name), &mci->mci_req);
if (rc == 0) {
/* get attr from request */
- mdc_req2attr_update(ctx, ma);
+ mdc_req2attr_update(env, ma);
}
ptlrpc_req_finished(mci->mci_req);
RETURN(rc);
}
-static int mdc_is_subdir(const struct lu_context *ctx, struct md_object *mo,
+static int mdc_is_subdir(const struct lu_env *env, struct md_object *mo,
const struct lu_fid *fid, struct lu_fid *sfid,
struct md_ucred *uc)
{
int rc;
ENTRY;
- mci = mdc_info_init(ctx);
-
+ mci = mdc_info_init(env);
+
/* FIXME: capability for split! */
rc = md_is_subdir(mc->mc_desc.cl_exp, lu_object_fid(&mo->mo_lu),
fid, NULL, NULL, &mci->mci_req);
body = lustre_msg_buf(mci->mci_req->rq_repmsg, REPLY_REC_OFF,
sizeof(*body));
-
+
LASSERT(body->valid & (OBD_MD_FLMODE | OBD_MD_FLID) &&
(body->mode == 0 || body->mode == 1 || body->mode == EREMOTE));
/* assigns client to sequence controller node */
int seq_server_set_cli(struct lu_server_seq *seq,
struct lu_client_seq *cli,
- const struct lu_context *ctx)
+ const struct lu_env *env)
{
int rc = 0;
ENTRY;
if (cli == NULL) {
CDEBUG(D_INFO|D_WARNING, "%s: Detached "
- "sequence client %s\n", seq->lss_name,
+ "sequence client %s\n", seq->lss_name,
cli->lcs_name);
seq->lss_cli = cli;
RETURN(0);
/* get new range from controller only if super-sequence is not yet
* initialized from backing store or something else. */
if (range_is_zero(&seq->lss_super)) {
- rc = seq_client_alloc_super(cli, ctx);
+ rc = seq_client_alloc_super(cli, env);
if (rc) {
up(&seq->lss_sem);
CERROR("%s: Can't allocate super-sequence, "
seq->lss_super = cli->lcs_range;
/* save init seq to backing store. */
- rc = seq_store_write(seq, ctx);
+ rc = seq_store_write(seq, env);
if (rc) {
CERROR("%s: Can't write sequence state, "
"rc = %d\n", seq->lss_name, rc);
static int __seq_server_alloc_super(struct lu_server_seq *seq,
struct lu_range *in,
struct lu_range *out,
- const struct lu_context *ctx)
+ const struct lu_env *env)
{
struct lu_range *space = &seq->lss_space;
int rc;
} else {
if (range_space(space) < seq->lss_super_width) {
CWARN("%s: Sequences space to be exhausted soon. "
- "Only "LPU64" sequences left\n", seq->lss_name,
+ "Only "LPU64" sequences left\n", seq->lss_name,
range_space(space));
*out = *space;
space->lr_start = space->lr_end;
} else if (range_is_exhausted(space)) {
- CERROR("%s: Sequences space is exhausted\n",
+ CERROR("%s: Sequences space is exhausted\n",
seq->lss_name);
RETURN(-ENOSPC);
} else {
}
}
- rc = seq_store_write(seq, ctx);
+ rc = seq_store_write(seq, env);
if (rc) {
CERROR("%s: Can't save state, rc %d\n", seq->lss_name, rc);
RETURN(rc);
int seq_server_alloc_super(struct lu_server_seq *seq,
struct lu_range *in,
struct lu_range *out,
- const struct lu_context *ctx)
+ const struct lu_env *env)
{
int rc;
ENTRY;
down(&seq->lss_sem);
- rc = __seq_server_alloc_super(seq, in, out, ctx);
+ rc = __seq_server_alloc_super(seq, in, out, env);
up(&seq->lss_sem);
RETURN(rc);
static int __seq_server_alloc_meta(struct lu_server_seq *seq,
struct lu_range *in,
struct lu_range *out,
- const struct lu_context *ctx)
+ const struct lu_env *env)
{
struct lu_range *super = &seq->lss_super;
int rc = 0;
LASSERT(range_is_sane(super));
- /*
+ /*
* This is recovery case. Adjust super range if input range looks like
* it is allocated from new super.
*/
if (range_is_exhausted(super)) {
LASSERT(in->lr_start > super->lr_start);
- /*
+ /*
* Server cannot send to client empty range, this is why
* we check here that range from client is "newer" than
* exhausted super.
*/
super->lr_start = in->lr_start;
-
+
super->lr_end = super->lr_start +
LUSTRE_SEQ_SUPER_WIDTH;
} else {
- /*
+ /*
* Update super start by start from client's range. End
* should not be changed if range was not exhausted.
*/
RETURN(-EOPNOTSUPP);
}
- rc = seq_client_alloc_super(seq->lss_cli, ctx);
+ rc = seq_client_alloc_super(seq->lss_cli, env);
if (rc) {
CERROR("%s: Can't allocate super-sequence, "
"rc %d\n", seq->lss_name, rc);
range_alloc(out, super, seq->lss_meta_width);
}
- rc = seq_store_write(seq, ctx);
+ rc = seq_store_write(seq, env);
if (rc) {
CERROR("%s: Can't save state, rc = %d\n",
seq->lss_name, rc);
int seq_server_alloc_meta(struct lu_server_seq *seq,
struct lu_range *in,
struct lu_range *out,
- const struct lu_context *ctx)
+ const struct lu_env *env)
{
int rc;
ENTRY;
down(&seq->lss_sem);
- rc = __seq_server_alloc_meta(seq, in, out, ctx);
+ rc = __seq_server_alloc_meta(seq, in, out, env);
up(&seq->lss_sem);
RETURN(rc);
}
static int seq_server_handle(struct lu_site *site,
- const struct lu_context *ctx,
+ const struct lu_env *env,
__u32 opc, struct lu_range *in,
struct lu_range *out)
{
int rc;
ENTRY;
-
+
switch (opc) {
case SEQ_ALLOC_META:
if (!site->ls_server_seq) {
RETURN(-EINVAL);
}
rc = seq_server_alloc_meta(site->ls_server_seq,
- in, out, ctx);
+ in, out, env);
break;
case SEQ_ALLOC_SUPER:
if (!site->ls_control_seq) {
RETURN(-EINVAL);
}
rc = seq_server_alloc_super(site->ls_control_seq,
- in, out, ctx);
+ in, out, env);
break;
default:
rc = -EINVAL;
RETURN(rc);
}
-static int seq_req_handle(struct ptlrpc_request *req,
+static int seq_req_handle(struct ptlrpc_request *req, const struct lu_env *env,
struct seq_thread_info *info)
{
struct lu_range *out, *in = NULL;
opc = req_capsule_client_get(&info->sti_pill,
&RMF_SEQ_OPC);
if (opc != NULL) {
- const struct lu_context *ctx;
-
out = req_capsule_server_get(&info->sti_pill,
&RMF_SEQ_RANGE);
if (out == NULL)
LASSERT(!range_is_zero(in) && range_is_sane(in));
}
-
- ctx = req->rq_svc_thread->t_ctx;
- LASSERT(ctx != NULL);
- LASSERT(ctx->lc_thread == req->rq_svc_thread);
- rc = seq_server_handle(site, ctx, *opc, in, out);
+
+ rc = seq_server_handle(site, env, *opc, in, out);
} else
rc = -EPROTO;
RETURN(rc);
}
-static void *seq_thread_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+static void *seq_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
{
struct seq_thread_info *info;
return info;
}
-static void seq_thread_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
+static void seq_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
{
struct seq_thread_info *info = data;
OBD_FREE_PTR(info);
struct lu_context_key seq_thread_key = {
.lct_tags = LCT_MD_THREAD,
- .lct_init = seq_thread_init,
- .lct_fini = seq_thread_fini
+ .lct_init = seq_key_init,
+ .lct_fini = seq_key_fini
};
static void seq_thread_info_init(struct ptlrpc_request *req,
static int seq_handle(struct ptlrpc_request *req)
{
- const struct lu_context *ctx;
+ const struct lu_env *env;
struct seq_thread_info *info;
int rc;
-
- ctx = req->rq_svc_thread->t_ctx;
- LASSERT(ctx != NULL);
- LASSERT(ctx->lc_thread == req->rq_svc_thread);
- info = lu_context_key_get(ctx, &seq_thread_key);
+ env = req->rq_svc_thread->t_env;
+ LASSERT(env != NULL);
+
+ info = lu_context_key_get(&env->le_ctx, &seq_thread_key);
LASSERT(info != NULL);
seq_thread_info_init(req, info);
- rc = seq_req_handle(req, info);
+ rc = seq_req_handle(req, env, info);
seq_thread_info_fini(info);
return rc;
struct dt_device *dev,
const char *prefix,
enum lu_mgr_type type,
- const struct lu_context *ctx)
+ const struct lu_env *env)
{
int rc, is_srv = (type == LUSTRE_SEQ_SERVER);
ENTRY;
seq->lss_super_width = LUSTRE_SEQ_SUPER_WIDTH;
seq->lss_meta_width = LUSTRE_SEQ_META_WIDTH;
- snprintf(seq->lss_name, sizeof(seq->lss_name),
+ snprintf(seq->lss_name, sizeof(seq->lss_name),
"%s-%s", (is_srv ? "srv" : "ctl"), prefix);
seq->lss_space = LUSTRE_SEQ_SPACE_RANGE;
seq->lss_super = LUSTRE_SEQ_ZERO_RANGE;
- rc = seq_store_init(seq, ctx, dev);
+ rc = seq_store_init(seq, env, dev);
if (rc)
GOTO(out, rc);
/* request backing store for saved sequence info */
- rc = seq_store_read(seq, ctx);
+ rc = seq_store_read(seq, env);
if (rc == -ENODATA) {
CDEBUG(D_INFO|D_WARNING, "%s: No data found "
"on storage, %s\n", seq->lss_name,
EXIT;
out:
if (rc)
- seq_server_fini(seq, ctx);
+ seq_server_fini(seq, env);
return rc;
}
EXPORT_SYMBOL(seq_server_init);
void seq_server_fini(struct lu_server_seq *seq,
- const struct lu_context *ctx)
+ const struct lu_env *env)
{
ENTRY;
seq_server_proc_fini(seq);
- seq_store_fini(seq, ctx);
+ seq_store_fini(seq, env);
EXIT;
}
{
printk(KERN_INFO "Lustre: Sequence Manager; "
"info@clusterfs.com\n");
-
+
seq_type_proc_dir = lprocfs_register(LUSTRE_SEQ_NAME,
proc_lustre_root,
NULL, NULL);
if (IS_ERR(seq_type_proc_dir))
return PTR_ERR(seq_type_proc_dir);
-
+
lu_context_key_register(&seq_thread_key);
return 0;
}
extern struct lu_context_key seq_thread_key;
int seq_store_init(struct lu_server_seq *seq,
- const struct lu_context *ctx,
+ const struct lu_env *env,
struct dt_device *dt);
void seq_store_fini(struct lu_server_seq *seq,
- const struct lu_context *ctx);
+ const struct lu_env *env);
int seq_store_write(struct lu_server_seq *seq,
- const struct lu_context *ctx);
+ const struct lu_env *env);
int seq_store_read(struct lu_server_seq *seq,
- const struct lu_context *ctx);
+ const struct lu_env *env);
#ifdef LPROCFS
extern struct lprocfs_vars seq_server_proc_list[];
/* zero out input range, this is not recovery yet. */
in = req_capsule_client_get(&pill, &RMF_SEQ_RANGE);
range_zero(in);
-
+
size[1] = sizeof(struct lu_range);
ptlrpc_req_set_repsize(req, 2, size);
/* Save server out to request for recovery case. */
*in = *out;
-
- CDEBUG(D_INFO, "%s: Allocated %s-sequence "DRANGE"]\n",
+
+ CDEBUG(D_INFO, "%s: Allocated %s-sequence "DRANGE"]\n",
seq->lcs_name, opcname, PRANGE(range));
-
+
EXIT;
out_req:
req_capsule_fini(&pill);
/* request sequence-controller node to allocate new super-sequence. */
static int __seq_client_alloc_super(struct lu_client_seq *seq,
- const struct lu_context *ctx)
+ const struct lu_env *env)
{
int rc;
-
+
#ifdef __KERNEL__
if (seq->lcs_srv) {
- LASSERT(ctx != NULL);
+ LASSERT(env != NULL);
rc = seq_server_alloc_super(seq->lcs_srv, NULL,
&seq->lcs_range,
- ctx);
+ env);
} else {
#endif
rc = seq_client_rpc(seq, &seq->lcs_range,
}
int seq_client_alloc_super(struct lu_client_seq *seq,
- const struct lu_context *ctx)
+ const struct lu_env *env)
{
int rc;
ENTRY;
down(&seq->lcs_sem);
- rc = __seq_client_alloc_super(seq, ctx);
+ rc = __seq_client_alloc_super(seq, env);
up(&seq->lcs_sem);
RETURN(rc);
/* request sequence-controller node to allocate new meta-sequence. */
static int __seq_client_alloc_meta(struct lu_client_seq *seq,
- const struct lu_context *ctx)
+ const struct lu_env *env)
{
int rc;
#ifdef __KERNEL__
if (seq->lcs_srv) {
- LASSERT(ctx != NULL);
+ LASSERT(env != NULL);
rc = seq_server_alloc_meta(seq->lcs_srv, NULL,
&seq->lcs_range,
- ctx);
+ env);
} else {
#endif
rc = seq_client_rpc(seq, &seq->lcs_range,
}
int seq_client_alloc_meta(struct lu_client_seq *seq,
- const struct lu_context *ctx)
+ const struct lu_env *env)
{
int rc;
ENTRY;
down(&seq->lcs_sem);
- rc = __seq_client_alloc_meta(seq, ctx);
+ rc = __seq_client_alloc_meta(seq, env);
up(&seq->lcs_sem);
RETURN(rc);
fid_oid(&seq->lcs_fid) >= seq->lcs_width)
{
seqno_t seqnr;
-
+
/* allocate new sequence for case client has no sequence at all
* or sequence is exhausted and should be switched. */
rc = __seq_client_alloc_seq(seq, &seqnr);
NULL, NULL);
if (IS_ERR(seq->lcs_proc_dir)) {
- CERROR("%s: LProcFS failed in seq-init\n",
+ CERROR("%s: LProcFS failed in seq-init\n",
seq->lcs_name);
rc = PTR_ERR(seq->lcs_proc_dir);
RETURN(rc);
/* this function implies that caller takes care about locking */
int seq_store_write(struct lu_server_seq *seq,
- const struct lu_context *ctx)
+ const struct lu_env *env)
{
struct dt_object *dt_obj = seq->lss_obj;
struct seq_thread_info *info;
ENTRY;
dt_dev = lu2dt_dev(seq->lss_obj->do_lu.lo_dev);
- info = lu_context_key_get(ctx, &seq_thread_key);
+ info = lu_context_key_get(&env->le_ctx, &seq_thread_key);
LASSERT(info != NULL);
/* stub here, will fix it later */
info->sti_txn.tp_credits = SEQ_TXN_STORE_CREDITS;
- th = dt_dev->dd_ops->dt_trans_start(ctx, dt_dev, &info->sti_txn);
+ th = dt_dev->dd_ops->dt_trans_start(env, dt_dev, &info->sti_txn);
if (!IS_ERR(th)) {
/* store ranges in le format */
range_cpu_to_le(&info->sti_record.ssr_space, &seq->lss_space);
range_cpu_to_le(&info->sti_record.ssr_super, &seq->lss_super);
-
- rc = dt_obj->do_body_ops->dbo_write(ctx, dt_obj,
+
+ rc = dt_obj->do_body_ops->dbo_write(env, dt_obj,
(char *)&info->sti_record,
sizeof(info->sti_record),
&pos, th);
} else if (rc >= 0) {
rc = -EIO;
}
-
- dt_dev->dd_ops->dt_trans_stop(ctx, th);
+
+ dt_dev->dd_ops->dt_trans_stop(env, th);
} else {
rc = PTR_ERR(th);
}
/* this function implies that caller takes care about locking or locking is not
* needed (init time). */
int seq_store_read(struct lu_server_seq *seq,
- const struct lu_context *ctx)
+ const struct lu_env *env)
{
struct dt_object *dt_obj = seq->lss_obj;
struct seq_thread_info *info;
int rc;
ENTRY;
- info = lu_context_key_get(ctx, &seq_thread_key);
+ info = lu_context_key_get(&env->le_ctx, &seq_thread_key);
LASSERT(info != NULL);
- rc = dt_obj->do_body_ops->dbo_read(ctx, dt_obj,
+ rc = dt_obj->do_body_ops->dbo_read(env, dt_obj,
(char *)&info->sti_record,
sizeof(info->sti_record), &pos);
-
+
if (rc == sizeof(info->sti_record)) {
range_le_to_cpu(&seq->lss_space, &info->sti_record.ssr_space);
range_le_to_cpu(&seq->lss_super, &info->sti_record.ssr_super);
CDEBUG(D_INFO|D_WARNING, "%s: Read ranges: Space - "
- DRANGE", Super - "DRANGE"\n", seq->lss_name,
+ DRANGE", Super - "DRANGE"\n", seq->lss_name,
PRANGE(&seq->lss_space), PRANGE(&seq->lss_super));
rc = 0;
} else if (rc == 0) {
rc = -ENODATA;
} else if (rc >= 0) {
- CERROR("%s: Read only %d bytes of %d\n", seq->lss_name,
+ CERROR("%s: Read only %d bytes of %d\n", seq->lss_name,
rc, sizeof(info->sti_record));
rc = -EIO;
}
}
int seq_store_init(struct lu_server_seq *seq,
- const struct lu_context *ctx,
+ const struct lu_env *env,
struct dt_device *dt)
{
struct dt_object *dt_obj;
name = seq->lss_type == LUSTRE_SEQ_SERVER ?
LUSTRE_SEQ_SRV_NAME : LUSTRE_SEQ_CTL_NAME;
-
- dt_obj = dt_store_open(ctx, dt, name, &fid);
+
+ dt_obj = dt_store_open(env, dt, name, &fid);
if (!IS_ERR(dt_obj)) {
seq->lss_obj = dt_obj;
rc = 0;
}
void seq_store_fini(struct lu_server_seq *seq,
- const struct lu_context *ctx)
+ const struct lu_env *env)
{
ENTRY;
if (seq->lss_obj != NULL) {
if (!IS_ERR(seq->lss_obj))
- lu_object_put(ctx, &seq->lss_obj->do_lu);
+ lu_object_put(env, &seq->lss_obj->do_lu);
seq->lss_obj = NULL;
}
-
+
EXIT;
}
#endif
{
printk(KERN_INFO "Lustre: Fid Location Database; "
"info@clusterfs.com\n");
-
+
fld_type_proc_dir = lprocfs_register(LUSTRE_FLD_NAME,
proc_lustre_root,
NULL, NULL);
if (IS_ERR(fld_type_proc_dir))
return PTR_ERR(fld_type_proc_dir);
-
+
lu_context_key_register(&fld_thread_key);
return 0;
}
/* insert index entry and update cache */
int fld_server_create(struct lu_server_fld *fld,
- const struct lu_context *ctx,
+ const struct lu_env *env,
seqno_t seq, mdsno_t mds)
{
- return fld_index_create(fld, ctx, seq, mds);
+ return fld_index_create(fld, env, seq, mds);
}
EXPORT_SYMBOL(fld_server_create);
/* delete index entry */
int fld_server_delete(struct lu_server_fld *fld,
- const struct lu_context *ctx,
+ const struct lu_env *env,
seqno_t seq)
{
- return fld_index_delete(fld, ctx, seq);
+ return fld_index_delete(fld, env, seq);
}
EXPORT_SYMBOL(fld_server_delete);
/* issue on-disk index lookup */
int fld_server_lookup(struct lu_server_fld *fld,
- const struct lu_context *ctx,
+ const struct lu_env *env,
seqno_t seq, mdsno_t *mds)
{
- return fld_index_lookup(fld, ctx, seq, mds);
+ return fld_index_lookup(fld, env, seq, mds);
}
EXPORT_SYMBOL(fld_server_lookup);
static int fld_server_handle(struct lu_server_fld *fld,
- const struct lu_context *ctx,
+ const struct lu_env *env,
__u32 opc, struct md_fld *mf,
struct fld_thread_info *info)
{
switch (opc) {
case FLD_CREATE:
- rc = fld_server_create(fld, ctx,
+ rc = fld_server_create(fld, env,
mf->mf_seq, mf->mf_mds);
/* do not return -EEXIST error for resent case */
rc = 0;
break;
case FLD_DELETE:
- rc = fld_server_delete(fld, ctx, mf->mf_seq);
+ rc = fld_server_delete(fld, env, mf->mf_seq);
/* do not return -ENOENT error for resent case */
if ((info->fti_flags & MSG_RESENT) && rc == -ENOENT)
rc = 0;
break;
case FLD_LOOKUP:
- rc = fld_server_lookup(fld, ctx,
+ rc = fld_server_lookup(fld, env,
mf->mf_seq, &mf->mf_mds);
break;
default:
ENTRY;
site = req->rq_export->exp_obd->obd_lu_dev->ld_site;
-
+
rc = req_capsule_pack(&info->fti_pill);
if (rc)
RETURN(rc);
*out = *in;
rc = fld_server_handle(site->ls_server_fld,
- req->rq_svc_thread->t_ctx,
+ req->rq_svc_thread->t_env,
*opc, out, info);
} else
rc = -EPROTO;
int i;
info->fti_flags = lustre_msg_get_flags(req->rq_reqmsg);
-
+
/* mark rep buffer as req-layout stuff expects */
for (i = 0; i < ARRAY_SIZE(info->fti_rep_buf_size); i++)
info->fti_rep_buf_size[i] = -1;
static int fld_handle(struct ptlrpc_request *req)
{
- const struct lu_context *ctx;
+ const struct lu_env *env;
struct fld_thread_info *info;
int rc;
-
- ctx = req->rq_svc_thread->t_ctx;
- LASSERT(ctx != NULL);
- LASSERT(ctx->lc_thread == req->rq_svc_thread);
- info = lu_context_key_get(ctx, &fld_thread_key);
+ env = req->rq_svc_thread->t_env;
+ LASSERT(env != NULL);
+
+ info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
LASSERT(info != NULL);
fld_thread_info_init(req, info);
#endif
int fld_server_init(struct lu_server_fld *fld, struct dt_device *dt,
- const char *prefix, const struct lu_context *ctx)
+ const char *prefix, const struct lu_env *env)
{
int rc;
ENTRY;
snprintf(fld->lsf_name, sizeof(fld->lsf_name),
"srv-%s", prefix);
- rc = fld_index_init(fld, ctx, dt);
+ rc = fld_index_init(fld, env, dt);
if (rc)
GOTO(out, rc);
EXIT;
out:
if (rc)
- fld_server_fini(fld, ctx);
+ fld_server_fini(fld, env);
return rc;
}
EXPORT_SYMBOL(fld_server_init);
void fld_server_fini(struct lu_server_fld *fld,
- const struct lu_context *ctx)
+ const struct lu_env *env)
{
ENTRY;
fld_server_proc_fini(fld);
- fld_index_fini(fld, ctx);
-
+ fld_index_fini(fld, env);
+
EXIT;
}
EXPORT_SYMBOL(fld_server_fini);
extern struct lu_context_key fld_thread_key;
-static struct dt_key *fld_key(const struct lu_context *ctx,
+static struct dt_key *fld_key(const struct lu_env *env,
const seqno_t seq)
{
struct fld_thread_info *info;
ENTRY;
- info = lu_context_key_get(ctx, &fld_thread_key);
+ info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
LASSERT(info != NULL);
info->fti_key = cpu_to_be64(seq);
RETURN((void *)&info->fti_key);
}
-static struct dt_rec *fld_rec(const struct lu_context *ctx,
+static struct dt_rec *fld_rec(const struct lu_env *env,
const mdsno_t mds)
{
struct fld_thread_info *info;
ENTRY;
- info = lu_context_key_get(ctx, &fld_thread_key);
+ info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
LASSERT(info != NULL);
info->fti_rec = cpu_to_be64(mds);
}
int fld_index_create(struct lu_server_fld *fld,
- const struct lu_context *ctx,
+ const struct lu_env *env,
seqno_t seq, mdsno_t mds)
{
struct dt_object *dt_obj = fld->lsf_obj;
ENTRY;
dt_dev = lu2dt_dev(fld->lsf_obj->do_lu.lo_dev);
-
+
/* stub here, will fix it later */
txn.tp_credits = FLD_TXN_INDEX_INSERT_CREDITS;
- th = dt_dev->dd_ops->dt_trans_start(ctx, dt_dev, &txn);
+ th = dt_dev->dd_ops->dt_trans_start(env, dt_dev, &txn);
if (!IS_ERR(th)) {
- rc = dt_obj->do_index_ops->dio_insert(ctx, dt_obj,
- fld_rec(ctx, mds),
- fld_key(ctx, seq), th);
- dt_dev->dd_ops->dt_trans_stop(ctx, th);
+ rc = dt_obj->do_index_ops->dio_insert(env, dt_obj,
+ fld_rec(env, mds),
+ fld_key(env, seq), th);
+ dt_dev->dd_ops->dt_trans_stop(env, th);
} else
rc = PTR_ERR(th);
RETURN(rc);
}
int fld_index_delete(struct lu_server_fld *fld,
- const struct lu_context *ctx,
+ const struct lu_env *env,
seqno_t seq)
{
struct dt_object *dt_obj = fld->lsf_obj;
dt_dev = lu2dt_dev(fld->lsf_obj->do_lu.lo_dev);
txn.tp_credits = FLD_TXN_INDEX_DELETE_CREDITS;
- th = dt_dev->dd_ops->dt_trans_start(ctx, dt_dev, &txn);
+ th = dt_dev->dd_ops->dt_trans_start(env, dt_dev, &txn);
if (!IS_ERR(th)) {
- rc = dt_obj->do_index_ops->dio_delete(ctx, dt_obj,
- fld_key(ctx, seq), th);
- dt_dev->dd_ops->dt_trans_stop(ctx, th);
+ rc = dt_obj->do_index_ops->dio_delete(env, dt_obj,
+ fld_key(env, seq), th);
+ dt_dev->dd_ops->dt_trans_stop(env, th);
} else
rc = PTR_ERR(th);
RETURN(rc);
}
int fld_index_lookup(struct lu_server_fld *fld,
- const struct lu_context *ctx,
+ const struct lu_env *env,
seqno_t seq, mdsno_t *mds)
{
struct dt_object *dt_obj = fld->lsf_obj;
- struct dt_rec *rec = fld_rec(ctx, 0);
+ struct dt_rec *rec = fld_rec(env, 0);
int rc;
ENTRY;
- rc = dt_obj->do_index_ops->dio_lookup(ctx, dt_obj, rec,
- fld_key(ctx, seq));
+ rc = dt_obj->do_index_ops->dio_lookup(env, dt_obj, rec,
+ fld_key(env, seq));
if (rc == 0)
*mds = be64_to_cpu(*(__u64 *)rec);
RETURN(rc);
}
int fld_index_init(struct lu_server_fld *fld,
- const struct lu_context *ctx,
+ const struct lu_env *env,
struct dt_device *dt)
{
struct dt_object *dt_obj;
int rc;
ENTRY;
- dt_obj = dt_store_open(ctx, dt, fld_index_name, &fid);
+ dt_obj = dt_store_open(env, dt, fld_index_name, &fid);
if (!IS_ERR(dt_obj)) {
fld->lsf_obj = dt_obj;
- rc = dt_obj->do_ops->do_index_try(ctx, dt_obj,
+ rc = dt_obj->do_ops->do_index_try(env, dt_obj,
&fld_index_features);
if (rc == 0)
LASSERT(dt_obj->do_index_ops != NULL);
}
void fld_index_fini(struct lu_server_fld *fld,
- const struct lu_context *ctx)
+ const struct lu_env *env)
{
ENTRY;
if (fld->lsf_obj != NULL) {
if (!IS_ERR(fld->lsf_obj))
- lu_object_put(ctx, &fld->lsf_obj->do_lu);
+ lu_object_put(env, &fld->lsf_obj->do_lu);
fld->lsf_obj = NULL;
}
EXIT;
};
int fld_index_init(struct lu_server_fld *fld,
- const struct lu_context *ctx,
+ const struct lu_env *env,
struct dt_device *dt);
void fld_index_fini(struct lu_server_fld *fld,
- const struct lu_context *ctx);
+ const struct lu_env *env);
int fld_index_create(struct lu_server_fld *fld,
- const struct lu_context *ctx,
+ const struct lu_env *env,
seqno_t seq, mdsno_t mds);
int fld_index_delete(struct lu_server_fld *fld,
- const struct lu_context *ctx,
+ const struct lu_env *env,
seqno_t seq);
int fld_index_lookup(struct lu_server_fld *fld,
- const struct lu_context *ctx,
+ const struct lu_env *env,
seqno_t seq, mdsno_t *mds);
#ifdef LPROCFS
list_add_tail(&target->ft_chain,
&fld->lcf_targets);
-
+
fld->lcf_count++;
spin_unlock(&fld->lcf_lock);
fld->lcf_count--;
list_del(&target->ft_chain);
spin_unlock(&fld->lcf_lock);
-
+
if (target->ft_exp != NULL)
class_export_put(target->ft_exp);
NULL, NULL);
if (IS_ERR(fld->lcf_proc_dir)) {
- CERROR("%s: LProcFS failed in fld-init\n",
+ CERROR("%s: LProcFS failed in fld-init\n",
fld->lcf_name);
rc = PTR_ERR(fld->lcf_proc_dir);
RETURN(rc);
rc = lprocfs_add_vars(fld->lcf_proc_dir,
fld_client_proc_list, fld);
if (rc) {
- CERROR("%s: Can't init FLD proc, rc %d\n",
+ CERROR("%s: Can't init FLD proc, rc %d\n",
fld->lcf_name, rc);
GOTO(out_cleanup, rc);
}
"cli-%s", prefix);
if (!hash_is_sane(hash)) {
- CERROR("%s: Wrong hash function %#x\n",
+ CERROR("%s: Wrong hash function %#x\n",
fld->lcf_name, hash);
RETURN(-EINVAL);
}
int fld_client_create(struct lu_client_fld *fld,
seqno_t seq, mdsno_t mds,
- const struct lu_context *ctx)
+ const struct lu_env *env)
{
struct md_fld md_fld = { .mf_seq = seq, .mf_mds = mds };
struct lu_fld_target *target;
#ifdef __KERNEL__
if (target->ft_srv != NULL) {
- LASSERT(ctx != NULL);
+ LASSERT(env != NULL);
rc = fld_server_create(target->ft_srv,
- ctx, seq, mds);
+ env, seq, mds);
} else {
#endif
rc = fld_client_rpc(target->ft_exp,
*/
fld_cache_insert(fld->lcf_cache, seq, mds);
} else {
- CERROR("%s: Can't create FLD entry, rc %d\n",
+ CERROR("%s: Can't create FLD entry, rc %d\n",
fld->lcf_name, rc);
}
RETURN(rc);
EXPORT_SYMBOL(fld_client_create);
int fld_client_delete(struct lu_client_fld *fld, seqno_t seq,
- const struct lu_context *ctx)
+ const struct lu_env *env)
{
struct md_fld md_fld = { .mf_seq = seq, .mf_mds = 0 };
struct lu_fld_target *target;
#ifdef __KERNEL__
if (target->ft_srv != NULL) {
- LASSERT(ctx != NULL);
+ LASSERT(env != NULL);
rc = fld_server_delete(target->ft_srv,
- ctx, seq);
+ env, seq);
} else {
#endif
rc = fld_client_rpc(target->ft_exp,
int fld_client_lookup(struct lu_client_fld *fld,
seqno_t seq, mdsno_t *mds,
- const struct lu_context *ctx)
+ const struct lu_env *env)
{
struct md_fld md_fld = { .mf_seq = seq, .mf_mds = 0 };
struct lu_fld_target *target;
#ifdef __KERNEL__
if (target->ft_srv != NULL) {
- LASSERT(ctx != NULL);
+ LASSERT(env != NULL);
rc = fld_server_lookup(target->ft_srv,
- ctx, seq, &md_fld.mf_mds);
+ env, seq, &md_fld.mf_mds);
} else {
#endif
rc = fld_client_rpc(target->ft_exp,
/*
* Return device-wide statistics.
*/
- int (*dt_statfs)(const struct lu_context *ctx,
+ int (*dt_statfs)(const struct lu_env *env,
struct dt_device *dev, struct kstatfs *sfs);
/*
* Start transaction, described by @param.
*/
- struct thandle *(*dt_trans_start)(const struct lu_context *ctx,
+ struct thandle *(*dt_trans_start)(const struct lu_env *env,
struct dt_device *dev,
struct txn_param *param);
/*
* Finish previously started transaction.
*/
- void (*dt_trans_stop)(const struct lu_context *ctx,
+ void (*dt_trans_stop)(const struct lu_env *env,
struct thandle *th);
/*
* Return fid of root index object.
*/
- int (*dt_root_get)(const struct lu_context *ctx,
+ int (*dt_root_get)(const struct lu_env *env,
struct dt_device *dev, struct lu_fid *f);
/*
* Return device configuration data.
*/
- void (*dt_conf_get)(const struct lu_context *ctx,
+ void (*dt_conf_get)(const struct lu_env *env,
const struct dt_device *dev,
struct dt_device_param *param);
/*
* handling device state, mostly for tests
*/
- int (*dt_sync)(const struct lu_context *ctx, struct dt_device *dev);
- void (*dt_ro)(const struct lu_context *ctx, struct dt_device *dev);
+ int (*dt_sync)(const struct lu_env *env, struct dt_device *dev);
+ void (*dt_ro)(const struct lu_env *env, struct dt_device *dev);
};
* Per-dt-object operations.
*/
struct dt_object_operations {
- void (*do_read_lock)(const struct lu_context *ctx,
+ void (*do_read_lock)(const struct lu_env *env,
struct dt_object *dt);
- void (*do_write_lock)(const struct lu_context *ctx,
+ void (*do_write_lock)(const struct lu_env *env,
struct dt_object *dt);
- void (*do_read_unlock)(const struct lu_context *ctx,
+ void (*do_read_unlock)(const struct lu_env *env,
struct dt_object *dt);
- void (*do_write_unlock)(const struct lu_context *ctx,
+ void (*do_write_unlock)(const struct lu_env *env,
struct dt_object *dt);
/*
* Note: following ->do_{x,}attr_{set,get}() operations are very
*
* precondition: lu_object_exists(&dt->do_lu);
*/
- int (*do_attr_get)(const struct lu_context *ctxt,
+ int (*do_attr_get)(const struct lu_env *env,
struct dt_object *dt, struct lu_attr *attr);
/*
* Set standard attributes.
*
* precondition: dt_object_exists(dt);
*/
- int (*do_attr_set)(const struct lu_context *ctxt,
+ int (*do_attr_set)(const struct lu_env *env,
struct dt_object *dt,
const struct lu_attr *attr,
struct thandle *handle);
*
* precondition: dt_object_exists(dt);
*/
- int (*do_xattr_get)(const struct lu_context *ctxt,
+ int (*do_xattr_get)(const struct lu_env *env,
struct dt_object *dt,
void *buf, int buf_len, const char *name);
/*
*
* precondition: dt_object_exists(dt);
*/
- int (*do_xattr_set)(const struct lu_context *ctxt,
+ int (*do_xattr_set)(const struct lu_env *env,
struct dt_object *dt,
const void *buf, int buf_len,
const char *name, int fl, struct thandle *handle);
*
* precondition: dt_object_exists(dt);
*/
- int (*do_xattr_del)(const struct lu_context *ctxt,
+ int (*do_xattr_del)(const struct lu_env *env,
struct dt_object *dt,
const char *name, struct thandle *handle);
/*
*
* precondition: dt_object_exists(dt);
*/
- int (*do_xattr_list)(const struct lu_context *ctxt,
+ int (*do_xattr_list)(const struct lu_env *env,
struct dt_object *dt, void *buf, int buf_len);
/*
* Create new object on this device.
* precondition: !dt_object_exists(dt);
* postcondition: ergo(result == 0, dt_object_exists(dt));
*/
- int (*do_create)(const struct lu_context *ctxt, struct dt_object *dt,
+ int (*do_create)(const struct lu_env *env, struct dt_object *dt,
struct lu_attr *attr, struct thandle *th);
/*
* Announce that this object is going to be used as an index. This
* Also probes for features. Operation is successful if all required
* features are supported.
*/
- int (*do_index_try)(const struct lu_context *ctxt,
+ int (*do_index_try)(const struct lu_env *env,
struct dt_object *dt,
const struct dt_index_features *feat);
/*
* Add nlink of the object
* precondition: dt_object_exists(dt);
*/
- void (*do_ref_add)(const struct lu_context *ctxt,
+ void (*do_ref_add)(const struct lu_env *env,
struct dt_object *dt, struct thandle *th);
/*
* Del nlink of the object
* precondition: dt_object_exists(dt);
*/
- void (*do_ref_del)(const struct lu_context *ctxt,
+ void (*do_ref_del)(const struct lu_env *env,
struct dt_object *dt, struct thandle *th);
- int (*do_readpage)(const struct lu_context *ctxt,
+ int (*do_readpage)(const struct lu_env *env,
struct dt_object *dt, const struct lu_rdpg *rdpg);
};
/*
* precondition: dt_object_exists(dt);
*/
- ssize_t (*dbo_read)(const struct lu_context *ctxt, struct dt_object *dt,
+ ssize_t (*dbo_read)(const struct lu_env *env, struct dt_object *dt,
void *buf, size_t count, loff_t *pos);
/*
* precondition: dt_object_exists(dt);
*/
- ssize_t (*dbo_write)(const struct lu_context *ctxt,
+ ssize_t (*dbo_write)(const struct lu_env *env,
struct dt_object *dt, const void *buf,
size_t count, loff_t *pos, struct thandle *handle);
};
/*
* precondition: dt_object_exists(dt);
*/
- int (*dio_lookup)(const struct lu_context *ctxt, struct dt_object *dt,
+ int (*dio_lookup)(const struct lu_env *env, struct dt_object *dt,
struct dt_rec *rec, const struct dt_key *key);
/*
* precondition: dt_object_exists(dt);
*/
- int (*dio_insert)(const struct lu_context *ctxt, struct dt_object *dt,
+ int (*dio_insert)(const struct lu_env *env, struct dt_object *dt,
const struct dt_rec *rec, const struct dt_key *key,
struct thandle *handle);
/*
* precondition: dt_object_exists(dt);
*/
- int (*dio_delete)(const struct lu_context *ctxt, struct dt_object *dt,
+ int (*dio_delete)(const struct lu_env *env, struct dt_object *dt,
const struct dt_key *key, struct thandle *handle);
/*
* Iterator interface
*
* precondition: dt_object_exists(dt);
*/
- struct dt_it *(*init)(const struct lu_context *ctxt,
+ struct dt_it *(*init)(const struct lu_env *env,
struct dt_object *dt, int writable);
- void (*fini)(const struct lu_context *ctxt,
+ void (*fini)(const struct lu_env *env,
struct dt_it *di);
- int (*get)(const struct lu_context *ctxt,
+ int (*get)(const struct lu_env *env,
struct dt_it *di,
const struct dt_key *key);
- void (*put)(const struct lu_context *ctxt,
+ void (*put)(const struct lu_env *env,
struct dt_it *di);
- int (*del)(const struct lu_context *ctxt,
+ int (*del)(const struct lu_env *env,
struct dt_it *di, struct thandle *th);
- int (*next)(const struct lu_context *ctxt,
+ int (*next)(const struct lu_env *env,
struct dt_it *di);
- struct dt_key *(*key)(const struct lu_context *ctxt,
+ struct dt_key *(*key)(const struct lu_env *env,
const struct dt_it *di);
- int (*key_size)(const struct lu_context *ctxt,
+ int (*key_size)(const struct lu_env *env,
const struct dt_it *di);
- struct dt_rec *(*rec)(const struct lu_context *ctxt,
+ struct dt_rec *(*rec)(const struct lu_env *env,
const struct dt_it *di);
- __u32 (*store)(const struct lu_context *ctxt,
+ __u32 (*store)(const struct lu_env *env,
const struct dt_it *di);
- int (*load)(const struct lu_context *ctxt,
+ int (*load)(const struct lu_env *env,
const struct dt_it *di, __u32 hash);
} dio_it;
};
* before each transaction commit.
*/
struct dt_txn_callback {
- int (*dtc_txn_start)(const struct lu_context *ctx,
+ int (*dtc_txn_start)(const struct lu_env *env,
struct txn_param *param, void *cookie);
- int (*dtc_txn_stop)(const struct lu_context *ctx,
+ int (*dtc_txn_stop)(const struct lu_env *env,
struct thandle *txn, void *cookie);
- int (*dtc_txn_commit)(const struct lu_context *ctx,
+ int (*dtc_txn_commit)(const struct lu_env *env,
struct thandle *txn, void *cookie);
void *dtc_cookie;
struct list_head dtc_linkage;
void dt_txn_callback_add(struct dt_device *dev, struct dt_txn_callback *cb);
void dt_txn_callback_del(struct dt_device *dev, struct dt_txn_callback *cb);
-int dt_txn_hook_start(const struct lu_context *ctx,
+int dt_txn_hook_start(const struct lu_env *env,
struct dt_device *dev, struct txn_param *param);
-int dt_txn_hook_stop(const struct lu_context *ctx, struct thandle *txn);
-int dt_txn_hook_commit(const struct lu_context *ctx, struct thandle *txn);
+int dt_txn_hook_stop(const struct lu_env *env, struct thandle *txn);
+int dt_txn_hook_commit(const struct lu_env *env, struct thandle *txn);
-int dt_try_as_dir(const struct lu_context *ctx, struct dt_object *obj);
-struct dt_object *dt_store_open(const struct lu_context *ctx,
+int dt_try_as_dir(const struct lu_env *env, struct dt_object *obj);
+struct dt_object *dt_store_open(const struct lu_env *env,
struct dt_device *dt, const char *name,
struct lu_fid *fid);
struct lu_device;
struct lu_object_header;
struct lu_context;
+struct lu_env;
+
/*
* Operations common for data and meta-data devices.
*/
* postcondition: ergo(!IS_ERR(result), result->lo_dev == d &&
* result->lo_ops != NULL);
*/
- struct lu_object *(*ldo_object_alloc)(const struct lu_context *ctx,
+ struct lu_object *(*ldo_object_alloc)(const struct lu_env *env,
const struct lu_object_header *h,
struct lu_device *d);
/*
* process config specific for device
*/
- int (*ldo_process_config)(const struct lu_context *ctx,
+ int (*ldo_process_config)(const struct lu_env *env,
struct lu_device *, struct lustre_cfg *);
- int (*ldo_recovery_complete)(const struct lu_context *,
+ int (*ldo_recovery_complete)(const struct lu_env *,
struct lu_device *);
};
* Printer function is needed to provide some flexibility in (semi-)debugging
* output: possible implementations: printk, CDEBUG, sysfs/seq_file
*/
-typedef int (*lu_printer_t)(const struct lu_context *ctx,
+typedef int (*lu_printer_t)(const struct lu_env *env,
void *cookie, const char *format, ...)
__attribute__ ((format (printf, 3, 4)));
* stack. It's responsibility of this method to insert lower-layer
* object(s) it create into appropriate places of object stack.
*/
- int (*loo_object_init)(const struct lu_context *ctx,
+ int (*loo_object_init)(const struct lu_env *env,
struct lu_object *o);
/*
* Called (in top-to-bottom order) during object allocation after all
* layers were allocated and initialized. Can be used to perform
* initialization depending on lower layers.
*/
- int (*loo_object_start)(const struct lu_context *ctx,
+ int (*loo_object_start)(const struct lu_env *env,
struct lu_object *o);
/*
* Called before ->loo_object_free() to signal that object is being
* destroyed. Dual to ->loo_object_init().
*/
- void (*loo_object_delete)(const struct lu_context *ctx,
+ void (*loo_object_delete)(const struct lu_env *env,
struct lu_object *o);
/*
* Dual to ->ldo_object_alloc(). Called when object is removed from
* memory.
*/
- void (*loo_object_free)(const struct lu_context *ctx,
+ void (*loo_object_free)(const struct lu_env *env,
struct lu_object *o);
/*
* Called when last active reference to the object is released (and
* object returns to the cache). This method is optional.
*/
- void (*loo_object_release)(const struct lu_context *ctx,
+ void (*loo_object_release)(const struct lu_env *env,
struct lu_object *o);
/*
* Debugging helper. Print given object.
*/
- int (*loo_object_print)(const struct lu_context *ctx, void *cookie,
+ int (*loo_object_print)(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *o);
/*
* Optional debugging method. Returns true iff method is internally
/*
* Called to authorize action by capability.
*/
- int (*loo_object_auth)(const struct lu_context *ctx,
+ int (*loo_object_auth)(const struct lu_env *env,
const struct lu_object *o,
struct lustre_capa *capa,
__u64 opc);
/*
* Allocate new device.
*/
- struct lu_device *(*ldto_device_alloc)(const struct lu_context *ctx,
+ struct lu_device *(*ldto_device_alloc)(const struct lu_env *env,
struct lu_device_type *t,
struct lustre_cfg *lcfg);
/*
* Free device. Dual to ->ldto_device_alloc().
*/
- void (*ldto_device_free)(const struct lu_context *,
+ void (*ldto_device_free)(const struct lu_env *,
struct lu_device *);
/*
* Initialize the devices after allocation
*/
- int (*ldto_device_init)(const struct lu_context *ctx,
+ int (*ldto_device_init)(const struct lu_env *env,
struct lu_device *, struct lu_device *);
/*
* Finalize device. Dual to ->ldto_device_init(). Returns pointer to
* the next device in the stack.
*/
- struct lu_device *(*ldto_device_fini)(const struct lu_context *ctx,
+ struct lu_device *(*ldto_device_fini)(const struct lu_env *env,
struct lu_device *);
/*
* object to the cache, unless lu_object_is_dying(o) holds. In the latter
* case, free object immediately.
*/
-void lu_object_put(const struct lu_context *ctxt,
+void lu_object_put(const struct lu_env *env,
struct lu_object *o);
/*
* Free @nr objects from the cold end of the site LRU list.
*/
-void lu_site_purge(const struct lu_context *ctx,
+void lu_site_purge(const struct lu_env *env,
struct lu_site *s, int nr);
/*
* it. Otherwise, create new object, insert it into cache and return it. In
* any case, additional reference is acquired on the returned object.
*/
-struct lu_object *lu_object_find(const struct lu_context *ctxt,
+struct lu_object *lu_object_find(const struct lu_env *env,
struct lu_site *s, const struct lu_fid *f,
struct lustre_capa *c);
/*
* Auth lu_object capability.
*/
-int lu_object_auth(const struct lu_context *ctxt, const struct lu_object *o,
+int lu_object_auth(const struct lu_env *env, const struct lu_object *o,
struct lustre_capa *capa, __u64 opc);
/*
/*
* Printer function emitting messages through libcfs_debug_msg().
*/
-int lu_cdebug_printer(const struct lu_context *ctx,
+int lu_cdebug_printer(const struct lu_env *env,
void *cookie, const char *format, ...);
/*
* Print object description followed by user-supplied message.
*/
-#define LU_OBJECT_DEBUG(mask, ctx, object, format, ...) \
+#define LU_OBJECT_DEBUG(mask, env, object, format, ...) \
({ \
static struct lu_cdebug_print_info __info = { \
.lpi_subsys = DEBUG_SUBSYSTEM, \
.lpi_fn = __FUNCTION__, \
.lpi_line = __LINE__ \
}; \
- lu_object_print(ctx, &__info, lu_cdebug_printer, object); \
+ lu_object_print(env, &__info, lu_cdebug_printer, object); \
CDEBUG(mask, format , ## __VA_ARGS__); \
})
/*
* Print human readable representation of the @o to the @f.
*/
-void lu_object_print(const struct lu_context *ctxt, void *cookie,
+void lu_object_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct lu_object *o);
/*
int lu_context_refill(const struct lu_context *ctx);
/*
+ * Environment.
+ */
+struct lu_env {
+ /*
+ * "Local" context, used to store data instead of stack.
+ */
+ struct lu_context le_ctx;
+ /*
+ * "Session" context for per-request data.
+ */
+ struct lu_context *le_ses;
+};
+
+int lu_env_init(struct lu_env *env, struct lu_context *ses, __u32 tags);
+void lu_env_fini(struct lu_env *env);
+
+/*
* One-time initializers, called at obdclass module initialization, not
* exported.
*/
struct dt_device *dev,
const char *prefix,
enum lu_mgr_type type,
- const struct lu_context *ctx);
+ const struct lu_env *env);
void seq_server_fini(struct lu_server_seq *seq,
- const struct lu_context *ctx);
+ const struct lu_env *env);
int seq_server_alloc_super(struct lu_server_seq *seq,
struct lu_range *in,
struct lu_range *out,
- const struct lu_context *ctx);
+ const struct lu_env *env);
int seq_server_alloc_meta(struct lu_server_seq *seq,
struct lu_range *in,
struct lu_range *out,
- const struct lu_context *ctx);
+ const struct lu_env *env);
int seq_server_set_cli(struct lu_server_seq *seq,
struct lu_client_seq *cli,
- const struct lu_context *ctx);
+ const struct lu_env *env);
/* Client methods */
int seq_client_init(struct lu_client_seq *seq,
void seq_client_fini(struct lu_client_seq *seq);
int seq_client_alloc_super(struct lu_client_seq *seq,
- const struct lu_context *ctx);
+ const struct lu_env *env);
int seq_client_alloc_meta(struct lu_client_seq *seq,
- const struct lu_context *ctx);
+ const struct lu_env *env);
int seq_client_alloc_seq(struct lu_client_seq *seq,
seqno_t *seqnr);
};
struct fld_cache_info {
- /*
+ /*
* cache guard, protects fci_hash mostly because others immutable after
* init is finished.
*/
/* cache shrink threshold */
int fci_threshold;
-
+
/* prefered number of cached entries */
int fci_cache_size;
int fld_server_init(struct lu_server_fld *fld,
struct dt_device *dt,
const char *prefix,
- const struct lu_context *ctx);
+ const struct lu_env *env);
void fld_server_fini(struct lu_server_fld *fld,
- const struct lu_context *ctx);
+ const struct lu_env *env);
int fld_server_create(struct lu_server_fld *fld,
- const struct lu_context *ctx,
+ const struct lu_env *env,
seqno_t seq, mdsno_t mds);
int fld_server_delete(struct lu_server_fld *fld,
- const struct lu_context *ctx,
+ const struct lu_env *env,
seqno_t seq);
int fld_server_lookup(struct lu_server_fld *fld,
- const struct lu_context *ctx,
+ const struct lu_env *env,
seqno_t seq, mdsno_t *mds);
/* Client methods */
int fld_client_lookup(struct lu_client_fld *fld,
seqno_t seq, mdsno_t *mds,
- const struct lu_context *ctx);
+ const struct lu_env *env);
int fld_client_create(struct lu_client_fld *fld,
seqno_t seq, mdsno_t mds,
- const struct lu_context *ctx);
+ const struct lu_env *env);
int fld_client_delete(struct lu_client_fld *fld,
seqno_t seq,
- const struct lu_context *ctx);
+ const struct lu_env *env);
int fld_client_add_target(struct lu_client_fld *fld,
struct lu_fld_target *tar);
};
struct lu_context;
+struct lu_env;
struct ptlrpc_request {
int rq_type; /* one of PTL_RPC_MSG_* */
unsigned int t_id; /* service thread index, from ptlrpc_start_threads */
cfs_waitq_t t_ctl_waitq;
- struct lu_context *t_ctx;
+ struct lu_env *t_env;
};
struct ptlrpc_request_buffer_desc {
/* ldlm/ldlm_lib.c */
int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg);
int client_obd_cleanup(struct obd_device *obddev);
-int client_connect_import(const struct lu_context *ctx,
+int client_connect_import(const struct lu_env *env,
struct lustre_handle *conn, struct obd_device *obd,
struct obd_uuid *cluuid, struct obd_connect_data *);
int client_disconnect_export(struct obd_export *exp);
* Operations implemented for each md object (both directory and leaf).
*/
struct md_object_operations {
- int (*moo_permission)(const struct lu_context *ctxt,
+ int (*moo_permission)(const struct lu_env *env,
struct md_object *obj,
int mask,
struct md_ucred *uc);
- int (*moo_attr_get)(const struct lu_context *ctxt,
+ int (*moo_attr_get)(const struct lu_env *env,
struct md_object *obj,
struct md_attr *attr,
struct md_ucred *uc);
- int (*moo_attr_set)(const struct lu_context *ctxt,
+ int (*moo_attr_set)(const struct lu_env *env,
struct md_object *obj,
const struct md_attr *attr,
struct md_ucred *uc);
- int (*moo_xattr_get)(const struct lu_context *ctxt,
+ int (*moo_xattr_get)(const struct lu_env *env,
struct md_object *obj,
void *buf,
int buf_len,
const char *name,
struct md_ucred *uc);
- int (*moo_xattr_list)(const struct lu_context *ctxt,
+ int (*moo_xattr_list)(const struct lu_env *env,
struct md_object *obj,
void *buf,
int buf_len,
struct md_ucred *uc);
- int (*moo_xattr_set)(const struct lu_context *ctxt,
+ int (*moo_xattr_set)(const struct lu_env *env,
struct md_object *obj,
const void *buf,
int buf_len,
int fl,
struct md_ucred *uc);
- int (*moo_xattr_del)(const struct lu_context *ctxt,
+ int (*moo_xattr_del)(const struct lu_env *env,
struct md_object *obj,
const char *name,
struct md_ucred *uc);
- int (*moo_readpage)(const struct lu_context *ctxt,
+ int (*moo_readpage)(const struct lu_env *env,
struct md_object *obj,
const struct lu_rdpg *rdpg,
struct md_ucred *uc);
- int (*moo_readlink)(const struct lu_context *ctxt,
+ int (*moo_readlink)(const struct lu_env *env,
struct md_object *obj,
void *buf,
int buf_len,
struct md_ucred *uc);
/* part of cross-ref operation */
- int (*moo_object_create)(const struct lu_context *ctxt,
+ int (*moo_object_create)(const struct lu_env *env,
struct md_object *obj,
const struct md_create_spec *spec,
struct md_attr *ma,
struct md_ucred *uc);
- int (*moo_ref_add)(const struct lu_context * ctxt,
+ int (*moo_ref_add)(const struct lu_env *env,
struct md_object *obj,
struct md_ucred *uc);
- int (*moo_ref_del)(const struct lu_context *ctxt,
+ int (*moo_ref_del)(const struct lu_env *env,
struct md_object *obj,
struct md_attr *ma,
struct md_ucred *uc);
- int (*moo_open)(const struct lu_context *ctxt,
+ int (*moo_open)(const struct lu_env *env,
struct md_object *obj,
int flag,
struct md_ucred *uc);
- int (*moo_close)(const struct lu_context *ctxt,
+ int (*moo_close)(const struct lu_env *env,
struct md_object *obj,
struct md_attr *ma,
struct md_ucred *uc);
- int (*moo_capa_get)(const struct lu_context *, struct md_object *,
+ int (*moo_capa_get)(const struct lu_env *, struct md_object *,
struct lustre_capa *);
};
* Operations implemented for each directory object.
*/
struct md_dir_operations {
- int (*mdo_is_subdir) (const struct lu_context *ctxt,
+ int (*mdo_is_subdir) (const struct lu_env *env,
struct md_object *obj,
const struct lu_fid *fid,
struct lu_fid *sfid,
struct md_ucred *uc);
-
- int (*mdo_lookup)(const struct lu_context *ctxt,
+
+ int (*mdo_lookup)(const struct lu_env *env,
struct md_object *obj,
const char *name,
struct lu_fid *fid,
struct md_ucred *uc);
- int (*mdo_create)(const struct lu_context *ctxt,
+ int (*mdo_create)(const struct lu_env *env,
struct md_object *pobj,
const char *name,
struct md_object *child,
struct md_ucred *uc);
/* This method is used for creating data object for this meta object*/
- int (*mdo_create_data)(const struct lu_context *ctxt,
+ int (*mdo_create_data)(const struct lu_env *env,
struct md_object *p,
struct md_object *o,
const struct md_create_spec *spec,
struct md_attr *ma,
struct md_ucred *uc);
- int (*mdo_rename)(const struct lu_context *ctxt,
+ int (*mdo_rename)(const struct lu_env *env,
struct md_object *spobj,
struct md_object *tpobj,
const struct lu_fid *lf,
struct md_attr *ma,
struct md_ucred *uc);
- int (*mdo_link)(const struct lu_context *ctxt,
+ int (*mdo_link)(const struct lu_env *env,
struct md_object *tgt_obj,
struct md_object *src_obj,
const char *name,
struct md_attr *ma,
struct md_ucred *uc);
- int (*mdo_unlink)(const struct lu_context *ctxt,
+ int (*mdo_unlink)(const struct lu_env *env,
struct md_object *pobj,
struct md_object *cobj,
const char *name,
struct md_ucred *uc);
/* partial ops for cross-ref case */
- int (*mdo_name_insert)(const struct lu_context *ctxt,
+ int (*mdo_name_insert)(const struct lu_env *env,
struct md_object *obj,
const char *name,
const struct lu_fid *fid,
int isdir,
struct md_ucred *uc);
- int (*mdo_name_remove)(const struct lu_context *ctxt,
+ int (*mdo_name_remove)(const struct lu_env *env,
struct md_object *obj, const char *name,
struct md_ucred *uc);
- int (*mdo_rename_tgt)(const struct lu_context *ctxt,
+ int (*mdo_rename_tgt)(const struct lu_env *env,
struct md_object *pobj,
struct md_object *tobj,
const struct lu_fid *fid,
struct md_device_operations {
/* meta-data device related handlers. */
- int (*mdo_root_get)(const struct lu_context *ctx,
+ int (*mdo_root_get)(const struct lu_env *env,
struct md_device *m,
struct lu_fid *f,
struct md_ucred *uc);
- int (*mdo_maxsize_get)(const struct lu_context *ctx,
+ int (*mdo_maxsize_get)(const struct lu_env *env,
struct md_device *m,
int *md_size,
int *cookie_size,
struct md_ucred *uc);
- int (*mdo_statfs)(const struct lu_context *ctx,
+ int (*mdo_statfs)(const struct lu_env *env,
struct md_device *m,
struct kstatfs *sfs,
struct md_ucred *uc);
int (*mdo_init_capa_keys)(struct md_device *m,
struct lustre_capa_key *keys);
- int (*mdo_update_capa_key)(const struct lu_context *ctx,
+ int (*mdo_update_capa_key)(const struct lu_env *env,
struct md_device *m,
struct lustre_capa_key *key);
};
struct md_upcall {
struct md_device *mu_upcall_dev;
- int (*mu_upcall)(const struct lu_context *ctxt, struct md_device *md,
+ int (*mu_upcall)(const struct lu_env *env, struct md_device *md,
enum md_upcall_event ev);
};
}
/* md operations */
-static inline int mo_permission(const struct lu_context *cx,
+static inline int mo_permission(const struct lu_env *env,
struct md_object *m,
int mask,
struct md_ucred *uc)
{
LASSERT(m->mo_ops->moo_permission);
- return m->mo_ops->moo_permission(cx, m, mask, uc);
+ return m->mo_ops->moo_permission(env, m, mask, uc);
}
-static inline int mo_attr_get(const struct lu_context *cx,
+static inline int mo_attr_get(const struct lu_env *env,
struct md_object *m,
struct md_attr *at,
struct md_ucred *uc)
{
LASSERT(m->mo_ops->moo_attr_get);
- return m->mo_ops->moo_attr_get(cx, m, at, uc);
+ return m->mo_ops->moo_attr_get(env, m, at, uc);
}
-static inline int mo_readlink(const struct lu_context *cx,
+static inline int mo_readlink(const struct lu_env *env,
struct md_object *m,
void *buf,
int buf_len,
struct md_ucred *uc)
{
LASSERT(m->mo_ops->moo_readlink);
- return m->mo_ops->moo_readlink(cx, m, buf, buf_len, uc);
+ return m->mo_ops->moo_readlink(env, m, buf, buf_len, uc);
}
-static inline int mo_attr_set(const struct lu_context *cx,
+static inline int mo_attr_set(const struct lu_env *env,
struct md_object *m,
const struct md_attr *at,
struct md_ucred *uc)
{
LASSERT(m->mo_ops->moo_attr_set);
- return m->mo_ops->moo_attr_set(cx, m, at, uc);
+ return m->mo_ops->moo_attr_set(env, m, at, uc);
}
-static inline int mo_xattr_get(const struct lu_context *cx,
+static inline int mo_xattr_get(const struct lu_env *env,
struct md_object *m,
void *buf,
int buf_len,
struct md_ucred *uc)
{
LASSERT(m->mo_ops->moo_xattr_get);
- return m->mo_ops->moo_xattr_get(cx, m, buf, buf_len, name, uc);
+ return m->mo_ops->moo_xattr_get(env, m, buf, buf_len, name, uc);
}
-static inline int mo_xattr_del(const struct lu_context *cx,
+static inline int mo_xattr_del(const struct lu_env *env,
struct md_object *m,
const char *name,
struct md_ucred *uc)
{
LASSERT(m->mo_ops->moo_xattr_del);
- return m->mo_ops->moo_xattr_del(cx, m, name, uc);
+ return m->mo_ops->moo_xattr_del(env, m, name, uc);
}
-static inline int mo_xattr_set(const struct lu_context *cx,
+static inline int mo_xattr_set(const struct lu_env *env,
struct md_object *m,
const void *buf,
int buf_len,
struct md_ucred *uc)
{
LASSERT(m->mo_ops->moo_xattr_set);
- return m->mo_ops->moo_xattr_set(cx, m, buf, buf_len, name, flags, uc);
+ return m->mo_ops->moo_xattr_set(env, m, buf, buf_len, name, flags, uc);
}
-static inline int mo_xattr_list(const struct lu_context *cx,
+static inline int mo_xattr_list(const struct lu_env *env,
struct md_object *m,
void *buf,
int buf_len,
struct md_ucred *uc)
{
LASSERT(m->mo_ops->moo_xattr_list);
- return m->mo_ops->moo_xattr_list(cx, m, buf, buf_len, uc);
+ return m->mo_ops->moo_xattr_list(env, m, buf, buf_len, uc);
}
-static inline int mo_open(const struct lu_context *cx,
+static inline int mo_open(const struct lu_env *env,
struct md_object *m,
int flags,
struct md_ucred *uc)
{
LASSERT(m->mo_ops->moo_open);
- return m->mo_ops->moo_open(cx, m, flags, uc);
+ return m->mo_ops->moo_open(env, m, flags, uc);
}
-static inline int mo_close(const struct lu_context *cx,
+static inline int mo_close(const struct lu_env *env,
struct md_object *m,
struct md_attr *ma,
struct md_ucred *uc)
{
LASSERT(m->mo_ops->moo_close);
- return m->mo_ops->moo_close(cx, m, ma, uc);
+ return m->mo_ops->moo_close(env, m, ma, uc);
}
-static inline int mo_readpage(const struct lu_context *cx,
+static inline int mo_readpage(const struct lu_env *env,
struct md_object *m,
const struct lu_rdpg *rdpg,
struct md_ucred *uc)
{
LASSERT(m->mo_ops->moo_readpage);
- return m->mo_ops->moo_readpage(cx, m, rdpg, uc);
+ return m->mo_ops->moo_readpage(env, m, rdpg, uc);
}
-static inline int mo_object_create(const struct lu_context *cx,
+static inline int mo_object_create(const struct lu_env *env,
struct md_object *m,
const struct md_create_spec *spc,
struct md_attr *at,
struct md_ucred *uc)
{
LASSERT(m->mo_ops->moo_object_create);
- return m->mo_ops->moo_object_create(cx, m, spc, at, uc);
+ return m->mo_ops->moo_object_create(env, m, spc, at, uc);
}
-static inline int mo_ref_add(const struct lu_context *cx,
+static inline int mo_ref_add(const struct lu_env *env,
struct md_object *m,
struct md_ucred *uc)
{
LASSERT(m->mo_ops->moo_ref_add);
- return m->mo_ops->moo_ref_add(cx, m, uc);
+ return m->mo_ops->moo_ref_add(env, m, uc);
}
-static inline int mo_ref_del(const struct lu_context *cx,
+static inline int mo_ref_del(const struct lu_env *env,
struct md_object *m,
struct md_attr *ma,
struct md_ucred *uc)
{
LASSERT(m->mo_ops->moo_ref_del);
- return m->mo_ops->moo_ref_del(cx, m, ma, uc);
+ return m->mo_ops->moo_ref_del(env, m, ma, uc);
}
-static inline int mo_capa_get(const struct lu_context *cx,
+static inline int mo_capa_get(const struct lu_env *env,
struct md_object *m,
struct lustre_capa *c)
{
LASSERT(m->mo_ops->moo_capa_get);
- return m->mo_ops->moo_capa_get(cx, m, c);
+ return m->mo_ops->moo_capa_get(env, m, c);
}
-static inline int mdo_lookup(const struct lu_context *cx,
+static inline int mdo_lookup(const struct lu_env *env,
struct md_object *p,
const char *name,
struct lu_fid *f,
struct md_ucred *uc)
{
LASSERT(p->mo_dir_ops->mdo_lookup);
- return p->mo_dir_ops->mdo_lookup(cx, p, name, f, uc);
+ return p->mo_dir_ops->mdo_lookup(env, p, name, f, uc);
}
-static inline int mdo_create(const struct lu_context *cx,
+static inline int mdo_create(const struct lu_env *env,
struct md_object *p,
const char *child_name,
struct md_object *c,
struct md_ucred *uc)
{
LASSERT(c->mo_dir_ops->mdo_create);
- return c->mo_dir_ops->mdo_create(cx, p, child_name, c, spc, at, uc);
+ return c->mo_dir_ops->mdo_create(env, p, child_name, c, spc, at, uc);
}
-static inline int mdo_create_data(const struct lu_context *cx,
+static inline int mdo_create_data(const struct lu_env *env,
struct md_object *p,
struct md_object *c,
const struct md_create_spec *spec,
struct md_ucred *uc)
{
LASSERT(c->mo_dir_ops->mdo_create_data);
- return c->mo_dir_ops->mdo_create_data(cx, p, c, spec, ma, uc);
+ return c->mo_dir_ops->mdo_create_data(env, p, c, spec, ma, uc);
}
-static inline int mdo_rename(const struct lu_context *cx,
+static inline int mdo_rename(const struct lu_env *env,
struct md_object *sp,
struct md_object *tp,
const struct lu_fid *lf,
struct md_ucred *uc)
{
LASSERT(tp->mo_dir_ops->mdo_rename);
- return tp->mo_dir_ops->mdo_rename(cx, sp, tp, lf, sname, t, tname,
+ return tp->mo_dir_ops->mdo_rename(env, sp, tp, lf, sname, t, tname,
ma, uc);
}
-static inline int mdo_is_subdir(const struct lu_context *cx,
+static inline int mdo_is_subdir(const struct lu_env *env,
struct md_object *mo,
const struct lu_fid *fid,
struct lu_fid *sfid,
struct md_ucred *uc)
{
LASSERT(mo->mo_dir_ops->mdo_is_subdir);
- return mo->mo_dir_ops->mdo_is_subdir(cx, mo, fid, sfid, uc);
+ return mo->mo_dir_ops->mdo_is_subdir(env, mo, fid, sfid, uc);
}
-static inline int mdo_link(const struct lu_context *cx,
+static inline int mdo_link(const struct lu_env *env,
struct md_object *p,
struct md_object *s,
const char *name,
struct md_ucred *uc)
{
LASSERT(s->mo_dir_ops->mdo_link);
- return s->mo_dir_ops->mdo_link(cx, p, s, name, ma, uc);
+ return s->mo_dir_ops->mdo_link(env, p, s, name, ma, uc);
}
-static inline int mdo_unlink(const struct lu_context *cx,
+static inline int mdo_unlink(const struct lu_env *env,
struct md_object *p,
struct md_object *c,
const char *name,
struct md_ucred *uc)
{
LASSERT(c->mo_dir_ops->mdo_unlink);
- return c->mo_dir_ops->mdo_unlink(cx, p, c, name, ma, uc);
+ return c->mo_dir_ops->mdo_unlink(env, p, c, name, ma, uc);
}
-static inline int mdo_name_insert(const struct lu_context *cx,
+static inline int mdo_name_insert(const struct lu_env *env,
struct md_object *p,
const char *name,
const struct lu_fid *f,
struct md_ucred *uc)
{
LASSERT(p->mo_dir_ops->mdo_name_insert);
- return p->mo_dir_ops->mdo_name_insert(cx, p, name, f, isdir, uc);
+ return p->mo_dir_ops->mdo_name_insert(env, p, name, f, isdir, uc);
}
-static inline int mdo_name_remove(const struct lu_context *cx,
+static inline int mdo_name_remove(const struct lu_env *env,
struct md_object *p,
const char *name,
struct md_ucred *uc)
{
LASSERT(p->mo_dir_ops->mdo_name_remove);
- return p->mo_dir_ops->mdo_name_remove(cx, p, name, uc);
+ return p->mo_dir_ops->mdo_name_remove(env, p, name, uc);
}
-static inline int mdo_rename_tgt(const struct lu_context *cx,
+static inline int mdo_rename_tgt(const struct lu_env *env,
struct md_object *p,
struct md_object *t,
const struct lu_fid *lf,
{
if (t) {
LASSERT(t->mo_dir_ops->mdo_rename_tgt);
- return t->mo_dir_ops->mdo_rename_tgt(cx, p, t, lf, name,
+ return t->mo_dir_ops->mdo_rename_tgt(env, p, t, lf, name,
ma, uc);
} else {
LASSERT(p->mo_dir_ops->mdo_rename_tgt);
- return p->mo_dir_ops->mdo_rename_tgt(cx, p, t, lf, name,
+ return p->mo_dir_ops->mdo_rename_tgt(env, p, t, lf, name,
ma, uc);
}
}
struct semaphore fo_init_lock; /* group initialization lock */
int fo_committed_group;
-
+
spinlock_t fo_objidlock; /* protect fo_lastobjid */
spinlock_t fo_translock; /* protect fsd_last_transno */
struct file *fo_rcvd_filp;
unsigned int obd_attached:1, /* finished attach */
obd_set_up:1, /* finished setup */
obd_recovering:1, /* there are recoverable clients */
- obd_abort_recovery:1,/* somebody ioctl'ed us to abort */
+ obd_abort_recovery:1,/* somebody ioctl'ed us to abort */
obd_replayable:1, /* recovery is enabled; inform clients */
obd_no_transno:1, /* no committed-transno notification */
obd_no_recov:1, /* fail instead of retry messages */
* data. @ocd->ocd_connect_flags is modified to reflect flags actually
* granted by the target, which are guaranteed to be a subset of flags
* asked for. If @ocd == NULL, use default parameters. */
- int (*o_connect)(const struct lu_context *ctx,
+ int (*o_connect)(const struct lu_env *env,
struct lustre_handle *conn, struct obd_device *src,
struct obd_uuid *cluuid, struct obd_connect_data *ocd);
int (*o_reconnect)(struct obd_export *exp, struct obd_device *src,
int cmd, obd_off *);
/* llog related obd_methods */
- int (*o_llog_init)(struct obd_device *obd, struct obd_llogs *llog,
- struct obd_device *disk_obd, int count,
+ int (*o_llog_init)(struct obd_device *obd, struct obd_llogs *llog,
+ struct obd_device *disk_obd, int count,
struct llog_catid *logid, struct obd_uuid *uuid);
int (*o_llog_finish)(struct obd_device *obd, int count);
int (*o_llog_connect)(struct obd_export *, struct llogd_conn_body *);
-
+
/* metadata-only methods */
int (*o_pin)(struct obd_export *, const struct lu_fid *fid,
struct obd_capa *, struct obd_client_handle *, int flag);
int (*m_init_ea_size)(struct obd_export *, int, int, int);
int (*m_get_lustre_md)(struct obd_export *, struct ptlrpc_request *,
- int, struct obd_export *, struct obd_export *,
+ int, struct obd_export *, struct obd_export *,
struct lustre_md *);
int (*m_free_lustre_md)(struct obd_export *, struct lustre_md *);
/* obd_config.c */
int class_process_config(struct lustre_cfg *lcfg);
-int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
+int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
struct lustre_cfg *lcfg, void *data);
int class_attach(struct lustre_cfg *lcfg);
int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg);
#ifdef __KERNEL__
ldt = obd->obd_type->typ_lu;
if (ldt != NULL) {
- struct lu_context ctx;
+ struct lu_env env;
- rc = lu_context_init(&ctx, ldt->ldt_ctx_tags);
+ rc = lu_env_init(&env, NULL, ldt->ldt_ctx_tags);
if (rc == 0) {
- lu_context_enter(&ctx);
- d = ldt->ldt_ops->ldto_device_alloc(&ctx, ldt, cfg);
- lu_context_exit(&ctx);
- lu_context_fini(&ctx);
+ d = ldt->ldt_ops->ldto_device_alloc(&env, ldt, cfg);
+ lu_env_fini(&env);
if (!IS_ERR(d)) {
obd->obd_lu_dev = d;
d->ld_obd = obd;
if (ldt != NULL) {
LASSERT(d != NULL);
if (cleanup_stage == OBD_CLEANUP_EXPORTS) {
- struct lu_context ctx;
-
- rc = lu_context_init(&ctx, ldt->ldt_ctx_tags);
+ struct lu_env env;
+
+ rc = lu_env_init(&env, NULL, ldt->ldt_ctx_tags);
if (rc == 0) {
- lu_context_enter(&ctx);
- ldt->ldt_ops->ldto_device_fini(&ctx, d);
- lu_context_exit(&ctx);
- lu_context_fini(&ctx);
+ ldt->ldt_ops->ldto_device_fini(&env, d);
+ lu_env_fini(&env);
}
} else {
rc = 0;
OBD_CHECK_DT_OP(obd, precleanup, 0);
rc = OBP(obd, precleanup)(obd, cleanup_stage);
}
-
+
OBD_COUNTER_INCREMENT(obd, precleanup);
RETURN(rc);
}
ldt = obd->obd_type->typ_lu;
d = obd->obd_lu_dev;
if (ldt != NULL) {
- struct lu_context ctx;
+ struct lu_env env;
LASSERT(d != NULL);
- rc = lu_context_init(&ctx, ldt->ldt_ctx_tags);
+ rc = lu_env_init(&env, NULL, ldt->ldt_ctx_tags);
if (rc == 0) {
- lu_context_enter(&ctx);
- ldt->ldt_ops->ldto_device_free(&ctx, d);
- lu_context_exit(&ctx);
- lu_context_fini(&ctx);
+ ldt->ldt_ops->ldto_device_free(&env, d);
+ lu_env_fini(&env);
obd->obd_lu_dev = NULL;
}
} else
ldt = obd->obd_type->typ_lu;
d = obd->obd_lu_dev;
if (ldt != NULL && d != NULL) {
- struct lu_context ctx;
+ struct lu_env env;
- rc = lu_context_init(&ctx, ldt->ldt_ctx_tags);
+ rc = lu_env_init(&env, NULL, ldt->ldt_ctx_tags);
if (rc == 0) {
- lu_context_enter(&ctx);
- rc = d->ld_ops->ldo_process_config(&ctx, d, data);
- lu_context_exit(&ctx);
- lu_context_fini(&ctx);
+ rc = d->ld_ops->ldo_process_config(&env, d, data);
+ lu_env_fini(&env);
}
} else
#endif
RETURN(rc);
}
-static inline int obd_connect(const struct lu_context *ctx,
+static inline int obd_connect(const struct lu_env *env,
struct lustre_handle *conn,struct obd_device *obd,
struct obd_uuid *cluuid,
struct obd_connect_data *d)
OBD_CHECK_DT_OP(obd, connect, -EOPNOTSUPP);
OBD_COUNTER_INCREMENT(obd, connect);
- rc = OBP(obd, connect)(ctx, conn, obd, cluuid, d);
+ rc = OBP(obd, connect)(env, conn, obd, cluuid, d);
/* check that only subset is granted */
LASSERT(ergo(d != NULL,
(d->ocd_connect_flags & ocf) == d->ocd_connect_flags));
struct llogd_conn_body *body)
{
ENTRY;
-
+
OBD_CHECK_DT_OP(exp->exp_obd, llog_connect, 0);
OBD_COUNTER_INCREMENT(exp->exp_obd, llog_connect);
---- linux.orig/fs/ext3/super.c 2006-08-25 12:39:48.000000000 +0400
-+++ linux/fs/ext3/super.c 2006-09-27 02:37:46.000000000 +0400
-@@ -1139,8 +1139,8 @@
+Index: iam/fs/ext3/super.c
+===================================================================
+--- iam.orig/fs/ext3/super.c 2006-09-28 22:41:30.000000000 +0400
++++ iam/fs/ext3/super.c 2006-09-28 22:41:31.000000000 +0400
+@@ -1168,8 +1168,8 @@ static int ext3_check_descriptors (struc
* e2fsck was run on this filesystem, and it must have already done the orphan
* inode cleanup for us, so we can safely abort without any further action.
*/
{
unsigned int s_flags = sb->s_flags;
int nr_orphans = 0, nr_truncates = 0;
-@@ -1227,7 +1227,9 @@
+@@ -1256,7 +1256,9 @@ static void ext3_orphan_cleanup (struct
}
#endif
sb->s_flags = s_flags; /* Restore MS_RDONLY status */
#define log2(n) ffz(~(n))
-@@ -1641,9 +1643,8 @@
+@@ -1682,9 +1684,8 @@ static int ext3_fill_super (struct super
* superblock lock.
*/
EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS;
qsnet-rhel4-2.6.patch
linux-2.6-binutils-2.16.patch
vm-tunables-rhel4.patch
-2.6-rhel4-kgdb-ga.patch
+2.6-rhel4-kgdb-ga.patch
tcp-zero-copy-2.6.9-rhel4.patch
iallocsem_consistency.patch
raid5-stats.patch
}
/* ->o_connect() method for client side (OSC and MDC and MGC) */
-int client_connect_import(const struct lu_context *ctx,
+int client_connect_import(const struct lu_env *env,
struct lustre_handle *dlm_handle,
struct obd_device *obd, struct obd_uuid *cluuid,
struct obd_connect_data *data)
GOTO(out, rc = -ENODEV);
}
- /* Make sure the target isn't cleaned up while we're here. Yes,
- there's still a race between the above check and our incref here.
+ /* Make sure the target isn't cleaned up while we're here. Yes,
+ there's still a race between the above check and our incref here.
Really, class_uuid2obd should take the ref. */
targref = class_incref(target);
rc = -EBUSY;
} else {
dont_check_exports:
- rc = obd_connect(req->rq_svc_thread->t_ctx,
+ rc = obd_connect(req->rq_svc_thread->t_env,
&conn, target, &cluuid, data);
}
} else {
cluuid.uuid, libcfs_nid2str(req->rq_peer.nid),
export->exp_conn_cnt,
lustre_msg_get_conn_cnt(req->rq_reqmsg));
-
+
spin_unlock(&export->exp_lock);
GOTO(out, rc = -EALREADY);
}
out:
if (export)
export->exp_connecting = 0;
- if (targref)
+ if (targref)
class_decref(targref);
if (rc)
req->rq_status = rc;
void target_committed_to_req(struct ptlrpc_request *req)
{
struct obd_device *obd;
-
+
if (req == NULL || req->rq_export == NULL)
- return;
+ return;
obd = req->rq_export->exp_obd;
if (obd == NULL)
int repsize[2] = { sizeof(struct ptlrpc_body),
sizeof(struct qunit_data) };
ENTRY;
-
+
rc = lustre_pack_reply(req, 2, repsize, NULL);
if (rc) {
CERROR("packing reply failed!: rc = %d\n", rc);
if ((req->rq_export->exp_connect_flags & OBD_CONNECT_QUOTA64) &&
!OBD_FAIL_CHECK(OBD_FAIL_QUOTA_QD_COUNT_32BIT)) {
CDEBUG(D_QUOTA, "qd_count is 64bit!\n");
- rep = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
+ rep = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
sizeof(struct qunit_data));
LASSERT(rep);
- qdata = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*qdata),
+ qdata = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*qdata),
lustre_swab_qdata);
} else {
CDEBUG(D_QUOTA, "qd_count is 32bit!\n");
- rep = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
+ rep = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
sizeof(struct qunit_data_old));
LASSERT(rep);
- qdata_old = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*qdata_old),
+ qdata_old = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*qdata_old),
lustre_swab_qdata_old);
qdata = lustre_quota_old_to_new(qdata_old);
}
ENTRY;
LASSERT(data != NULL);
-
+
spin_lock(&lmv->lmv_lock);
for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
if (tgt->ltd_exp == NULL)
RETURN(rc);
}
- /*
+ /*
* XXX: make sure that ocd_connect_flags from all targets are
* the same. Otherwise one of MDTs runs wrong version or
* something like this. --umka
obd->obd_self_export->exp_connect_flags =
conn_data->ocd_connect_flags;
}
-
+
/* Pass the notification up the chain. */
if (obd->obd_observer)
rc = obd_notify(obd->obd_observer, watched, ev, data);
/* this is fake connect function. Its purpose is to initialize lmv and say
* caller that everything is okay. Real connection will be performed later. */
-static int lmv_connect(const struct lu_context *ctx,
+static int lmv_connect(const struct lu_env *env,
struct lustre_handle *conn, struct obd_device *obd,
struct obd_uuid *cluuid, struct obd_connect_data *data)
{
target.ft_srv = NULL;
target.ft_exp = mdc_exp;
target.ft_idx = tgt->idx;
-
+
fld_client_add_target(&lmv->lmv_fld, &target);
mdc_data = &class_exp2cliimp(mdc_exp)->imp_connect_data;
} else {
struct lmv_obj *obj;
LASSERT(fid_is_sane(hint->ph_pfid));
-
+
obj = lmv_obj_grab(obd, hint->ph_pfid);
if (obj) {
- /* If the dir got split, alloc fid according
+ /* If the dir got split, alloc fid according
* to its hash
*/
struct lu_fid *rpid;
- *mds = raw_name2idx(obj->lo_hashtype,
+ *mds = raw_name2idx(obj->lo_hashtype,
obj->lo_objcount,
- hint->ph_cname->name,
+ hint->ph_cname->name,
hint->ph_cname->len);
rpid = &obj->lo_inodes[*mds].li_fid;
rc = lmv_fld_lookup(lmv, rpid, mds);
GOTO(exit, rc);
}
CDEBUG(D_INODE, "the obj "DFID" has been"
- "splitted,got MDS at "LPU64" by name %s\n",
- PFID(hint->ph_pfid), *mds,
+ "splitted,got MDS at "LPU64" by name %s\n",
+ PFID(hint->ph_pfid), *mds,
hint->ph_cname->name);
rc = 0;
/* default policy is to use parent MDS */
rc = lmv_fld_lookup(lmv, hint->ph_pfid, mds);
}
-
+
}
} else {
/* sequences among all tgts are not well balanced, allocate new
/* client switches to new sequence, setup fld */
if (rc > 0) {
LASSERT(fid_is_sane(fid));
-
+
rc = fld_client_create(&lmv->lmv_fld, fid_seq(fid),
mds, NULL);
if (rc) {
rc = md_setxattr(tgt_exp, fid, oc, valid, name,
input, input_size, output_size, flags, request);
-
+
RETURN(rc);
}
obj = lmv_obj_grab(obd, &op_data->fid1);
if (obj) {
mdsno_t mds;
-
+
mds = raw_name2idx(obj->lo_hashtype, obj->lo_objcount,
op_data->name, op_data->namelen);
op_data->fid1 = obj->lo_inodes[mds].li_fid;
if (rc)
GOTO(cleanup, rc);
-
+
if (it->d.lustre.it_data) {
struct ptlrpc_request *req;
req = (struct ptlrpc_request *)it->d.lustre.it_data;
obj = lmv_obj_grab(obd, &op_data->fid1);
if (obj) {
mdsno_t mds;
-
+
/* directory is split. look for right mds for this
* name */
mds = raw_name2idx(obj->lo_hashtype, obj->lo_objcount,
rc = md_enqueue(tgt_exp, lock_type, it, lock_mode, op_data, lockh,
lmm, lmmsize, cb_compl, cb_blocking, cb_data,
extra_lock_flags);
-
+
if (rc == 0 && it->it_op == IT_OPEN)
rc = lmv_enqueue_remote(exp, lock_type, it, lock_mode,
op_data, lockh, lmm, lmmsize,
ptlrpc_req_finished(*request);
RETURN(PTR_ERR(tgt_exp));
}
-
+
rc = md_getattr_name(tgt_exp, &rid, NULL, NULL, 1,
valid, ea_size, &req);
ptlrpc_req_finished(*request);
CDEBUG(D_OTHER, "forward to MDS #"LPU64" ("DFID")\n",
mds, PFID(&op_data->fid1));
-
+
op_data->fsuid = current->fsuid;
op_data->fsgid = current->fsgid;
op_data->cap = current->cap_effective;
RETURN(0);
}
-static int lmv_reset_hash_seg_end (struct lmv_obd *lmv, struct lmv_obj *obj,
+static int lmv_reset_hash_seg_end (struct lmv_obd *lmv, struct lmv_obj *obj,
const struct lu_fid *fid, int index,
struct lu_dirpage *dp)
{
struct lu_fid rid;
__u32 seg_end, max_hash = MAX_HASH_SIZE;
int rc = 0;
-
+
/*
* We have reached the end of this hash segment, and the start offset of
* next segment need to be gotten out from the next segment, set it to
do_div(max_hash, obj->lo_objcount);
seg_end = max_hash * index;
-
+
/* Get start offset from next segment */
rid = obj->lo_inodes[index].li_fid;
tgt_exp = lmv_get_export(lmv, &rid);
page = alloc_pages(GFP_KERNEL, 0);
if (!page)
GOTO(cleanup, rc = -ENOMEM);
-
+
rc = md_readpage(tgt_exp, &rid, NULL, seg_end, page, &tmp_req);
if (rc) {
- /* E2BIG means it already reached the end of the dir,
+ /* E2BIG means it already reached the end of the dir,
* no need reset the hash segment end */
- if (rc == -E2BIG)
- GOTO(cleanup, rc = 0);
+ if (rc == -E2BIG)
+ GOTO(cleanup, rc = 0);
if (rc != -ERANGE)
GOTO(cleanup, rc);
if (rc == -ERANGE)
rc = 0;
- }
+ }
kmap(page);
- next_dp = cfs_page_address(page);
- LASSERT(le32_to_cpu(next_dp->ldp_hash_start) >= seg_end);
+ next_dp = cfs_page_address(page);
+ LASSERT(le32_to_cpu(next_dp->ldp_hash_start) >= seg_end);
dp->ldp_hash_end = next_dp->ldp_hash_start;
kunmap(page);
CDEBUG(D_INFO,"reset h_end %x for split obj"DFID"o_count %d index %d\n",
le32_to_cpu(dp->ldp_hash_end), PFID(&rid), obj->lo_objcount,
- index);
+ index);
cleanup:
if (tmp_req)
ptlrpc_req_finished(tmp_req);
__u64 index = offset;
__u32 seg = MAX_HASH_SIZE;
lmv_obj_lock(obj);
-
+
LASSERT(obj->lo_objcount > 0);
do_div(seg, obj->lo_objcount);
do_div(index, seg);
GOTO(cleanup, PTR_ERR(tgt_exp));
rc = md_readpage(tgt_exp, &rid, oc, offset, page, request);
- if (rc)
+ if (rc)
GOTO(cleanup, rc);
if (obj && i < obj->lo_objcount - 1) {
struct lu_dirpage *dp;
__u32 end;
kmap(page);
- dp = cfs_page_address(page);
+ dp = cfs_page_address(page);
end = le32_to_cpu(dp->ldp_hash_end);
if (end == ~0ul)
rc = lmv_reset_hash_seg_end(lmv, obj, fid,
i + 1, dp);
kunmap(page);
- } else
+ } else
if (rc == -ERANGE)
rc = -EIO;
RETURN(rc);
}
-static int lmv_llog_init(struct obd_device *obd, struct obd_llogs* llogs,
- struct obd_device *tgt, int count,
+static int lmv_llog_init(struct obd_device *obd, struct obd_llogs* llogs,
+ struct obd_device *tgt, int count,
struct llog_catid *logid, struct obd_uuid *uuid)
{
struct llog_ctxt *ctxt;
}
int lmv_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req,
- int offset, struct obd_export *dt_exp,
+ int offset, struct obd_export *dt_exp,
struct obd_export *md_exp, struct lustre_md *md)
{
struct obd_device *obd = exp->exp_obd;
tgt_exp = lmv_get_export(lmv, och->och_fid);
if (IS_ERR(tgt_exp))
RETURN(PTR_ERR(tgt_exp));
-
+
RETURN(md_set_open_replay_data(tgt_exp, och, open_req));
}
/* ok to dec to 0 more than once -- ltd_exp's will be null */
if (atomic_dec_and_test(&lov->lov_refcount) && lov->lov_death_row) {
int i;
- CDEBUG(D_CONFIG, "destroying %d lov targets\n",
+ CDEBUG(D_CONFIG, "destroying %d lov targets\n",
lov->lov_death_row);
for (i = 0; i < lov->desc.ld_tgt_count; i++) {
if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_reap)
}
#define MAX_STRING_SIZE 128
-static int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
+static int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
struct obd_connect_data *data)
{
struct lov_obd *lov = &obd->u.lov;
#endif
rc = qos_add_tgt(obd, index);
- if (rc)
+ if (rc)
CERROR("qos_add_tgt failed %d\n", rc);
RETURN(0);
}
-static int lov_connect(const struct lu_context *ctx,
+static int lov_connect(const struct lu_env *env,
struct lustre_handle *conn, struct obd_device *obd,
struct obd_uuid *cluuid, struct obd_connect_data *data)
{
/* Why should there ever be more than 1 connect? */
lov->lov_connects++;
LASSERT(lov->lov_connects == 1);
-
+
memset(&lov->lov_ocd, 0, sizeof(lov->lov_ocd));
if (data)
lov->lov_ocd = *data;
rc = lov_connect_obd(obd, i, lov->lov_tgts[i]->ltd_activate,
&lov->lov_ocd);
if (rc) {
- CERROR("%s: lov connect tgt %d failed: %d\n",
+ CERROR("%s: lov connect tgt %d failed: %d\n",
obd->obd_name, i, rc);
continue;
}
}
lov_putref(obd);
-
+
RETURN(0);
}
RETURN(0);
}
-static int lov_del_target(struct obd_device *obd, __u32 index,
+static int lov_del_target(struct obd_device *obd, __u32 index,
struct obd_uuid *uuidp, int gen);
static int lov_disconnect(struct obd_export *exp)
__u32 newsize, oldsize = 0;
newsize = max(lov->lov_tgt_size, (__u32)2);
- while (newsize < index + 1)
+ while (newsize < index + 1)
newsize = newsize << 1;
OBD_ALLOC(newtgts, sizeof(*newtgts) * newsize);
if (newtgts == NULL) {
}
if (lov->lov_tgt_size) {
- memcpy(newtgts, lov->lov_tgts, sizeof(*newtgts) *
+ memcpy(newtgts, lov->lov_tgts, sizeof(*newtgts) *
lov->lov_tgt_size);
old = lov->lov_tgts;
oldsize = lov->lov_tgt_size;
CDEBUG(D_CONFIG, "tgts: %p size: %d\n",
lov->lov_tgts, lov->lov_tgt_size);
- }
+ }
OBD_ALLOC_PTR(tgt);
CDEBUG(D_CONFIG, "idx=%d ltd_gen=%d ld_tgt_count=%d\n",
index, tgt->ltd_gen, lov->desc.ld_tgt_count);
-
- if (lov->lov_connects == 0) {
+
+ if (lov->lov_connects == 0) {
/* lov_connect hasn't been called yet. We'll do the
lov_connect_obd on this target when that fn first runs,
because we don't know the connect flags yet. */
if (rc)
GOTO(out, rc);
- rc = lov_notify(obd, tgt->ltd_exp->exp_obd,
+ rc = lov_notify(obd, tgt->ltd_exp->exp_obd,
active ? OBD_NOTIFY_ACTIVE : OBD_NOTIFY_INACTIVE,
(void *)&index);
out:
if (rc) {
- CERROR("add failed (%d), deleting %s\n", rc,
+ CERROR("add failed (%d), deleting %s\n", rc,
obd_uuid2str(&tgt->ltd_uuid));
lov_del_target(obd, index, 0, 0);
}
}
/* Schedule a target for deletion */
-static int lov_del_target(struct obd_device *obd, __u32 index,
+static int lov_del_target(struct obd_device *obd, __u32 index,
struct obd_uuid *uuidp, int gen)
{
struct lov_obd *lov = &obd->u.lov;
CDEBUG(D_CONFIG, "uuid: %s idx: %d gen: %d exp: %p active: %d\n",
lov_uuid2str(lov, index), index,
- lov->lov_tgts[index]->ltd_gen, lov->lov_tgts[index]->ltd_exp,
+ lov->lov_tgts[index]->ltd_gen, lov->lov_tgts[index]->ltd_exp,
lov->lov_tgts[index]->ltd_active);
lov->lov_tgts[index]->ltd_reap = 1;
osc_obd = class_exp2obd(tgt->ltd_exp);
CDEBUG(D_CONFIG, "Removing tgt %s : %s\n",
- lov_uuid2str(lov, index),
+ lov_uuid2str(lov, index),
osc_obd ? osc_obd->obd_name : "<no obd>");
if (tgt->ltd_exp)
* shrink it. */
lov->lov_tgts[index] = NULL;
- OBD_FREE_PTR(tgt);
+ OBD_FREE_PTR(tgt);
/* Manual cleanup - no cleanup logs to clean up the osc's. We must
do it ourselves. And we can't do it from lov_cleanup,
if (lov->lov_tgts[i]) {
CERROR("lov tgt %d not cleaned!"
" deathrow=%d, lovrc=%d\n",
- i, lov->lov_death_row,
+ i, lov->lov_death_row,
atomic_read(&lov->lov_refcount));
lov_del_target(obd, i, 0, 0);
}
}
- OBD_FREE(lov->lov_tgts, sizeof(*lov->lov_tgts) *
+ OBD_FREE(lov->lov_tgts, sizeof(*lov->lov_tgts) *
lov->lov_tgt_size);
lov->lov_tgt_size = 0;
}
-
- if (lov->lov_qos.lq_rr_size)
+
+ if (lov->lov_qos.lq_rr_size)
OBD_FREE(lov->lov_qos.lq_rr_array, lov->lov_qos.lq_rr_size);
RETURN(0);
case LCFG_PARAM: {
struct lprocfs_static_vars lvars;
struct lov_desc *desc = &(obd->u.lov.desc);
-
+
if (!desc)
GOTO(out, rc = -EINVAL);
-
+
lprocfs_init_vars(lov, &lvars);
-
+
rc = class_process_proc_param(PARAM_LOV, lvars.obd_vars,
lcfg, obd);
GOTO(out, rc);
if (ost_uuid && !obd_uuid_equals(ost_uuid, &tgt->ltd_uuid))
continue;
- CDEBUG(D_CONFIG,"Clear orphans for %d:%s\n", i,
+ CDEBUG(D_CONFIG,"Clear orphans for %d:%s\n", i,
obd_uuid2str(ost_uuid));
memcpy(tmp_oa, src_oa, sizeof(*tmp_oa));
LASSERT(lov->lov_tgts[i]->ltd_exp);
/* XXX: LOV STACKING: use real "obj_mdp" sub-data */
- err = obd_create(lov->lov_tgts[i]->ltd_exp,
+ err = obd_create(lov->lov_tgts[i]->ltd_exp,
tmp_oa, &obj_mdp, oti);
if (err)
/* This export will be disabled until it is recovered,
req = list_entry(pos, struct lov_request, rq_link);
CDEBUG(D_INFO, "objid "LPX64"[%d] has subobj "LPX64" at idx "
- "%u\n", oinfo->oi_oa->o_id, req->rq_stripe,
+ "%u\n", oinfo->oi_oa->o_id, req->rq_stripe,
req->rq_oi.oi_oa->o_id, req->rq_idx);
rc = obd_getattr(lov->lov_tgts[req->rq_idx]->ltd_exp,
RETURN(rc);
}
-static int lov_getattr_interpret(struct ptlrpc_request_set *rqset,
+static int lov_getattr_interpret(struct ptlrpc_request_set *rqset,
void *data, int rc)
{
struct lov_request_set *lovset = (struct lov_request_set *)data;
RETURN(rc);
CDEBUG(D_INFO, "objid "LPX64": %ux%u byte stripes\n",
- oinfo->oi_md->lsm_object_id, oinfo->oi_md->lsm_stripe_count,
+ oinfo->oi_md->lsm_object_id, oinfo->oi_md->lsm_stripe_count,
oinfo->oi_md->lsm_stripe_size);
list_for_each (pos, &lovset->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
CDEBUG(D_INFO, "objid "LPX64"[%d] has subobj "LPX64" at idx "
- "%u\n", oinfo->oi_oa->o_id, req->rq_stripe,
+ "%u\n", oinfo->oi_oa->o_id, req->rq_stripe,
req->rq_oi.oi_oa->o_id, req->rq_idx);
rc = obd_getattr_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
&req->rq_oi, rqset);
RETURN(-ENODEV);
/* for now, we only expect the following updates here */
- LASSERT(!(oinfo->oi_oa->o_valid & ~(OBD_MD_FLID | OBD_MD_FLTYPE |
- OBD_MD_FLMODE | OBD_MD_FLATIME |
+ LASSERT(!(oinfo->oi_oa->o_valid & ~(OBD_MD_FLID | OBD_MD_FLTYPE |
+ OBD_MD_FLMODE | OBD_MD_FLATIME |
OBD_MD_FLMTIME | OBD_MD_FLCTIME |
- OBD_MD_FLFLAGS | OBD_MD_FLSIZE |
- OBD_MD_FLGROUP | OBD_MD_FLUID |
+ OBD_MD_FLFLAGS | OBD_MD_FLSIZE |
+ OBD_MD_FLGROUP | OBD_MD_FLUID |
OBD_MD_FLGID | OBD_MD_FLINLINE |
OBD_MD_FLFID | OBD_MD_FLGENER)));
lov = &exp->exp_obd->u.lov;
list_for_each (pos, &set->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
- rc = obd_setattr(lov->lov_tgts[req->rq_idx]->ltd_exp,
+ rc = obd_setattr(lov->lov_tgts[req->rq_idx]->ltd_exp,
&req->rq_oi, NULL);
err = lov_update_setattr_set(set, req, rc);
if (err) {
CERROR("error: setattr objid "LPX64" subobj "
LPX64" on OST idx %d: rc = %d\n",
- set->set_oi->oi_oa->o_id,
+ set->set_oi->oi_oa->o_id,
req->rq_oi.oi_oa->o_id, req->rq_idx, err);
if (!rc)
rc = err;
list_for_each (pos, &set->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
- rc = obd_sync(lov->lov_tgts[req->rq_idx]->ltd_exp,
- req->rq_oi.oi_oa, NULL,
+ rc = obd_sync(lov->lov_tgts[req->rq_idx]->ltd_exp,
+ req->rq_oi.oi_oa, NULL,
req->rq_oi.oi_policy.l_extent.start,
req->rq_oi.oi_policy.l_extent.end, capa);
err = lov_update_common_set(set, req, rc);
/* Find an existing osc so we can get it's stupid sizeof(*oap).
Only because of this layering limitation will a client
mount with no osts fail */
- while (!lov->lov_tgts || !lov->lov_tgts[i] ||
+ while (!lov->lov_tgts || !lov->lov_tgts[i] ||
!lov->lov_tgts[i]->ltd_exp) {
i++;
- if (i >= lov->desc.ld_tgt_count)
+ if (i >= lov->desc.ld_tgt_count)
RETURN(-ENOMEDIUM);
}
rc = size_round(sizeof(*lap)) +
loi = lsm->lsm_oinfo;
for (i = 0; i < lsm->lsm_stripe_count; i++, loi++) {
- if (!lov->lov_tgts[loi->loi_ost_idx] ||
+ if (!lov->lov_tgts[loi->loi_ost_idx] ||
!lov->lov_tgts[loi->loi_ost_idx]->ltd_active) {
CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
continue;
if (!exp || !exp->exp_obd)
RETURN(-ENODEV);
-
+
LASSERT(lsm->lsm_object_gr > 0);
-
+
lov = &exp->exp_obd->u.lov;
for (i = 0,loi = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++,loi++) {
struct lov_stripe_md submd;
struct lov_stripe_md submd;
int err;
- if (!lov->lov_tgts[loi->loi_ost_idx] ||
+ if (!lov->lov_tgts[loi->loi_ost_idx] ||
!lov->lov_tgts[loi->loi_ost_idx]->ltd_active)
CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
struct lov_stripe_md submd;
int rc = 0;
- if (!lov->lov_tgts[loi->loi_ost_idx] ||
+ if (!lov->lov_tgts[loi->loi_ost_idx] ||
!lov->lov_tgts[loi->loi_ost_idx]->ltd_active)
CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
continue;
}
- err = obd_statfs(class_exp2obd(lov->lov_tgts[i]->ltd_exp),
+ err = obd_statfs(class_exp2obd(lov->lov_tgts[i]->ltd_exp),
&lov_sfs, max_age);
if (err) {
if (lov->lov_tgts[i]->ltd_active && !rc)
genp = (__u32 *)data->ioc_inlbuf3;
/* the uuid will be empty for deleted OSTs */
for (i = 0; i < count; i++, uuidp++, genp++) {
- if (!lov->lov_tgts[i])
+ if (!lov->lov_tgts[i])
continue;
*uuidp = lov->lov_tgts[i]->ltd_uuid;
*genp = lov->lov_tgts[i]->ltd_gen;
if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
continue;
- err = obd_set_info_async(lov->lov_tgts[i]->ltd_exp,
+ err = obd_set_info_async(lov->lov_tgts[i]->ltd_exp,
keylen, key, vallen, val, set);
if (!rc)
rc = err;
}
if (KEY_IS(KEY_MDS_CONN)) {
struct mds_group_info mgi;
-
+
LASSERT(vallen == sizeof(mgi));
mgi = (*(struct mds_group_info *)val);
for (i = 0; i < lov->desc.ld_tgt_count; i++) {
if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_exp)
continue;
-
- if (mgi.uuid && !obd_uuid_equals(mgi.uuid,
+
+ if (mgi.uuid && !obd_uuid_equals(mgi.uuid,
&lov->lov_tgts[i]->ltd_uuid))
continue;
if (!val && !lov->lov_tgts[i]->ltd_active)
continue;
err = obd_set_info_async(lov->lov_tgts[i]->ltd_exp,
- keylen, key, sizeof(int),
+ keylen, key, sizeof(int),
&mgi.group, set);
if (!rc)
rc = err;
}
GOTO(out, rc);
}
-
+
if (KEY_IS("unlinked")) {
if (vallen != 0 && KEY_IS("unlinked"))
GOTO(out, rc = -EINVAL);
for (i = 0; i < lov->desc.ld_tgt_count; i++) {
if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_exp)
continue;
-
+
if (val && !obd_uuid_equals(val, &lov->lov_tgts[i]->ltd_uuid))
continue;
do_div(start, ssize);
start = start * ssize;
- CDEBUG(D_DLMTRACE, "offset %Lu, stripe %lu, start %Lu, end %Lu\n",
+ CDEBUG(D_DLMTRACE, "offset %Lu, stripe %lu, start %Lu, end %Lu\n",
*offset, ssize, start, start + ssize - 1);
if (cmd == OBD_CALC_STRIPE_END) {
*offset = start + ssize - 1;
#include "mdd_internal.h"
-static struct thandle* mdd_trans_start(const struct lu_context *ctxt,
+static struct thandle* mdd_trans_start(const struct lu_env *env,
struct mdd_device *);
-static void mdd_trans_stop(const struct lu_context *ctxt,
+static void mdd_trans_stop(const struct lu_env *env,
struct mdd_device *mdd, int rc,
struct thandle *handle);
static struct dt_object* mdd_object_child(struct mdd_object *o);
-static void __mdd_ref_add(const struct lu_context *ctxt, struct mdd_object *obj,
+static void __mdd_ref_add(const struct lu_env *env, struct mdd_object *obj,
struct thandle *handle);
-static void __mdd_ref_del(const struct lu_context *ctxt, struct mdd_object *obj,
+static void __mdd_ref_del(const struct lu_env *env, struct mdd_object *obj,
struct thandle *handle);
-static int __mdd_lookup(const struct lu_context *ctxt,
+static int __mdd_lookup(const struct lu_env *env,
struct md_object *pobj,
const char *name, const struct lu_fid* fid,
int mask, struct md_ucred *uc);
-static int __mdd_lookup_locked(const struct lu_context *ctxt,
+static int __mdd_lookup_locked(const struct lu_env *env,
struct md_object *pobj,
const char *name, const struct lu_fid* fid,
int mask, struct md_ucred *uc);
-static int mdd_exec_permission_lite(const struct lu_context *ctxt,
+static int mdd_exec_permission_lite(const struct lu_env *env,
struct mdd_object *obj,
struct md_ucred *uc);
-static int __mdd_permission_internal(const struct lu_context *ctxt,
+static int __mdd_permission_internal(const struct lu_env *env,
struct mdd_object *obj,
int mask, int getattr,
struct md_ucred *uc);
/* Calculate the credits of each transaction here */
/* Note: we did not count into QUOTA here, If we mount with --data_journal
* we may need more*/
-enum {
-/* Insert/Delete IAM
+enum {
+/* Insert/Delete IAM
* EXT3_INDEX_EXTRA_TRANS_BLOCKS(8) + EXT3_SINGLEDATA_TRANS_BLOCKS 8
* XXX Note: maybe iam need more,since iam have more level than Ext3 htree
*/
-
INSERT_IAM_CREDITS = 16,
/* Insert/Delete Oi
- * same as IAM insert/delete 16
+ * same as IAM insert/delete 16
* */
INSERT_OI_CREDITS = 16,
-/* Create a object
+/* Create a object
* Same as create object in Ext3 filesystem, but did not count QUOTA i
- * EXT3_DATA_TRANS_BLOCKS(12) + INDEX_EXTRA_BLOCKS(8) +
+ * EXT3_DATA_TRANS_BLOCKS(12) + INDEX_EXTRA_BLOCKS(8) +
* 3(inode bits,groups, GDT)*/
CREATE_OBJECT_CREDITS = 23,
/* A log rec need EXT3_INDEX_EXTRA_TRANS_BLOCKS(8) +
* EXT3_SINGLEDATA_TRANS_BLOCKS(8))
*/
- LOG_REC_CREDIT = 16
+ LOG_REC_CREDIT = 16
};
-/* XXX we should know the ost count to calculate the llog */
+/* XXX we should know the ost count to calculate the llog */
#define DEFAULT_LSM_COUNT 4 /* FIXME later */
enum {
MDD_TXN_OBJECT_DESTROY_CREDITS = 20,
- /* OBJECT CREATE :OI_INSERT + CREATE */
+ /* OBJECT CREATE :OI_INSERT + CREATE */
MDD_TXN_OBJECT_CREATE_CREDITS = (INSERT_OI_CREDITS + \
CREATE_OBJECT_CREDITS),
/* ATTR SET: XATTR_SET + ATTR set(3)*/
MDD_TXN_ATTR_SET_CREDITS = (XATTR_SET_CREDITS + 3),
-
+
MDD_TXN_XATTR_SET_CREDITS = XATTR_SET_CREDITS,
-
+
MDD_TXN_INDEX_INSERT_CREDITS = INSERT_IAM_CREDITS,
MDD_TXN_INDEX_DELETE_CREDITS = INSERT_IAM_CREDITS,
MDD_TXN_LINK_CREDITS = INSERT_IAM_CREDITS,
-/*
+/*
* UNLINK CREDITS
* IAM_INSERT_CREDITS + UNLINK log
* Unlink log = ((EXT3_INDEX_EXTRA_TRANS_BLOCKS(8) +
* EXT3_SINGLEDATA_TRANS_BLOCKS(8)) * lsm stripe count
- * XXX we should know the ost count to calculate the llog
+ * XXX we should know the ost count to calculate the llog
*/
MDD_TXN_UNLINK_CREDITS = (INSERT_IAM_CREDITS +
LOG_REC_CREDIT*DEFAULT_LSM_COUNT),
-/*
- * RENAME CREDITS
+/*
+ * RENAME CREDITS
* 2 IAM_INSERT + 1 IAM_DELETE + UNLINK LOG
*/
MDD_TXN_RENAME_CREDITS = (3 * INSERT_IAM_CREDITS + \
LOG_REC_CREDIT * DEFAULT_LSM_COUNT),
/* CREATE_DATA CREDITS
- * SET_XATTR
+ * SET_XATTR
* */
MDD_TXN_CREATE_DATA_CREDITS = XATTR_SET_CREDITS,
-/* CREATE
- * IAM_INSERT + OI_INSERT + CREATE_OBJECT_CREDITS
+/* CREATE
+ * IAM_INSERT + OI_INSERT + CREATE_OBJECT_CREDITS
* SET_MD CREDITS is already counted in CREATE_OBJECT CREDITS */
MDD_TXN_MKDIR_CREDITS = (INSERT_IAM_CREDITS + INSERT_OI_CREDITS \
+ CREATE_OBJECT_CREDITS)
DEFINE_MDD_TXN_OP_DESC(MDD_TXN_CREATE_DATA);
DEFINE_MDD_TXN_OP_DESC(MDD_TXN_MKDIR);
-static void mdd_txn_param_build(const struct lu_context *ctx,
+static void mdd_txn_param_build(const struct lu_env *env,
const struct mdd_txn_op_descr *opd)
{
- mdd_ctx_info(ctx)->mti_param.tp_credits = opd->mod_credits;
+ mdd_env_info(env)->mti_param.tp_credits = opd->mod_credits;
}
#define mdd_get_group_info(group_info) do { \
return rc;
}
-static inline int mdd_permission_internal(const struct lu_context *ctxt,
+static inline int mdd_permission_internal(const struct lu_env *env,
struct mdd_object *obj, int mask,
struct md_ucred *uc)
{
- return __mdd_permission_internal(ctxt, obj, mask, 1, uc);
+ return __mdd_permission_internal(env, obj, mask, 1, uc);
}
-struct mdd_thread_info *mdd_ctx_info(const struct lu_context *ctx)
+struct mdd_thread_info *mdd_env_info(const struct lu_env *env)
{
struct mdd_thread_info *info;
- info = lu_context_key_get(ctx, &mdd_thread_key);
+ info = lu_context_key_get(&env->le_ctx, &mdd_thread_key);
LASSERT(info != NULL);
return info;
}
-static struct lu_object *mdd_object_alloc(const struct lu_context *ctxt,
+static struct lu_object *mdd_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *d)
{
}
}
-static int mdd_object_init(const struct lu_context *ctxt, struct lu_object *o)
+static int mdd_object_init(const struct lu_env *env, struct lu_object *o)
{
struct mdd_device *d = lu2mdd_dev(o->lo_dev);
struct lu_object *below;
ENTRY;
under = &d->mdd_child->dd_lu_dev;
- below = under->ld_ops->ldo_object_alloc(ctxt, o->lo_header, under);
+ below = under->ld_ops->ldo_object_alloc(env, o->lo_header, under);
if (below == NULL)
RETURN(-ENOMEM);
RETURN(0);
}
-static int mdd_get_flags(const struct lu_context *ctxt, struct mdd_object *obj);
+static int mdd_get_flags(const struct lu_env *env, struct mdd_object *obj);
-static int mdd_object_start(const struct lu_context *ctxt, struct lu_object *o)
+static int mdd_object_start(const struct lu_env *env, struct lu_object *o)
{
if (lu_object_exists(o))
- return mdd_get_flags(ctxt, lu2mdd_obj(o));
+ return mdd_get_flags(env, lu2mdd_obj(o));
else
return 0;
}
-static void mdd_object_free(const struct lu_context *ctxt, struct lu_object *o)
+static void mdd_object_free(const struct lu_env *env, struct lu_object *o)
{
struct mdd_object *mdd = lu2mdd_obj(o);
OBD_FREE_PTR(mdd);
}
-static int mdd_object_print(const struct lu_context *ctxt, void *cookie,
+static int mdd_object_print(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *o)
{
- return (*p)(ctxt, cookie, LUSTRE_MDD_NAME"-object@%p", o);
+ return (*p)(env, cookie, LUSTRE_MDD_NAME"-object@%p", o);
}
/* orphan handling is here */
-static void mdd_object_delete(const struct lu_context *ctxt,
+static void mdd_object_delete(const struct lu_env *env,
struct lu_object *o)
{
struct mdd_object *mdd_obj = lu2mdd_obj(o);
return;
if (test_bit(LU_OBJECT_ORPHAN, &o->lo_header->loh_flags)) {
- mdd_txn_param_build(ctxt, &MDD_TXN_MKDIR);
- handle = mdd_trans_start(ctxt, lu2mdd_dev(o->lo_dev));
+ mdd_txn_param_build(env, &MDD_TXN_MKDIR);
+ handle = mdd_trans_start(env, lu2mdd_dev(o->lo_dev));
if (IS_ERR(handle))
CERROR("Cannot get thandle\n");
else {
- mdd_write_lock(ctxt, mdd_obj);
+ mdd_write_lock(env, mdd_obj);
/* let's remove obj from the orphan list */
- __mdd_orphan_del(ctxt, mdd_obj, handle);
- mdd_write_unlock(ctxt, mdd_obj);
- mdd_trans_stop(ctxt, lu2mdd_dev(o->lo_dev),
+ __mdd_orphan_del(env, mdd_obj, handle);
+ mdd_write_unlock(env, mdd_obj);
+ mdd_trans_stop(env, lu2mdd_dev(o->lo_dev),
0, handle);
}
}
.loo_object_delete = mdd_object_delete
};
-struct mdd_object *mdd_object_find(const struct lu_context *ctxt,
+struct mdd_object *mdd_object_find(const struct lu_env *env,
struct mdd_device *d,
const struct lu_fid *f)
{
struct mdd_object *m;
ENTRY;
- o = lu_object_find(ctxt, mdd2lu_dev(d)->ld_site, f, BYPASS_CAPA);
+ o = lu_object_find(env, mdd2lu_dev(d)->ld_site, f, BYPASS_CAPA);
if (IS_ERR(o))
m = (struct mdd_object *)o;
else {
lo = lu_object_locate(o->lo_header, mdd2lu_dev(d)->ld_type);
/* remote object can't be located and should be put then */
if (lo == NULL)
- lu_object_put(ctxt, o);
+ lu_object_put(env, o);
m = lu2mdd_obj(lo);
}
RETURN(m);
}
/*Check whether it may create the cobj under the pobj*/
-static int mdd_may_create(const struct lu_context *ctxt,
+static int mdd_may_create(const struct lu_env *env,
struct mdd_object *pobj, struct mdd_object *cobj,
int need_check, struct md_ucred *uc)
{
/*check pobj may create or not*/
if (need_check)
- rc = mdd_permission_internal(ctxt, pobj,
+ rc = mdd_permission_internal(env, pobj,
MAY_WRITE | MAY_EXEC, uc);
RETURN(rc);
}
-static inline int __mdd_la_get(const struct lu_context *ctxt,
+static inline int __mdd_la_get(const struct lu_env *env,
struct mdd_object *obj, struct lu_attr *la)
{
struct dt_object *next = mdd_object_child(obj);
LASSERT(lu_object_exists(mdd2lu_obj(obj)));
- return next->do_ops->do_attr_get(ctxt, next, la);
+ return next->do_ops->do_attr_get(env, next, la);
}
static void mdd_flags_xlate(struct mdd_object *obj, __u32 flags)
obj->mod_flags |= IMMUTE_OBJ;
}
-static int mdd_get_flags(const struct lu_context *ctxt, struct mdd_object *obj)
+static int mdd_get_flags(const struct lu_env *env, struct mdd_object *obj)
{
- struct lu_attr *la = &mdd_ctx_info(ctxt)->mti_la;
+ struct lu_attr *la = &mdd_env_info(env)->mti_la;
int rc;
ENTRY;
- mdd_read_lock(ctxt, obj);
- rc = __mdd_la_get(ctxt, obj, la);
- mdd_read_unlock(ctxt, obj);
+ mdd_read_lock(env, obj);
+ rc = __mdd_la_get(env, obj, la);
+ mdd_read_unlock(env, obj);
if (rc == 0)
mdd_flags_xlate(obj, la->la_flags);
RETURN(rc);
* It's inline, so penalty for filesystems that don't use sticky bit is
* minimal.
*/
-static inline int mdd_is_sticky(const struct lu_context *ctxt,
+static inline int mdd_is_sticky(const struct lu_env *env,
struct mdd_object *pobj,
struct mdd_object *cobj,
struct md_ucred *uc)
{
- struct lu_attr *tmp_la = &mdd_ctx_info(ctxt)->mti_la;
+ struct lu_attr *tmp_la = &mdd_env_info(env)->mti_la;
int rc;
- rc = __mdd_la_get(ctxt, cobj, tmp_la);
+ rc = __mdd_la_get(env, cobj, tmp_la);
if (rc) {
return rc;
} else if (tmp_la->la_uid == uc->mu_fsuid) {
return 0;
} else {
- rc = __mdd_la_get(ctxt, pobj, tmp_la);
+ rc = __mdd_la_get(env, pobj, tmp_la);
if (rc)
return rc;
else if (!(tmp_la->la_mode & S_ISVTX))
}
/*Check whether it may delete the cobj under the pobj*/
-static int mdd_may_delete(const struct lu_context *ctxt,
+static int mdd_may_delete(const struct lu_env *env,
struct mdd_object *pobj, struct mdd_object *cobj,
int is_dir, int need_check, struct md_ucred *uc)
{
if (mdd_is_dead_obj(pobj))
RETURN(-ENOENT);
- if (mdd_is_sticky(ctxt, pobj, cobj, uc))
+ if (mdd_is_sticky(env, pobj, cobj, uc))
RETURN(-EPERM);
if (need_check)
- rc = mdd_permission_internal(ctxt, pobj,
+ rc = mdd_permission_internal(env, pobj,
MAY_WRITE | MAY_EXEC, uc);
}
RETURN(rc);
}
/* get only inode attributes */
-static int __mdd_iattr_get(const struct lu_context *ctxt,
+static int __mdd_iattr_get(const struct lu_env *env,
struct mdd_object *mdd_obj, struct md_attr *ma)
{
int rc = 0;
ENTRY;
- rc = __mdd_la_get(ctxt, mdd_obj, &ma->ma_attr);
+ rc = __mdd_la_get(env, mdd_obj, &ma->ma_attr);
if (rc == 0)
ma->ma_valid = MA_INODE;
RETURN(rc);
}
/* get lov EA only */
-static int __mdd_lmm_get(const struct lu_context *ctxt,
+static int __mdd_lmm_get(const struct lu_env *env,
struct mdd_object *mdd_obj, struct md_attr *ma)
{
int rc;
ENTRY;
LASSERT(ma->ma_lmm != NULL && ma->ma_lmm_size > 0);
- rc = mdd_get_md(ctxt, mdd_obj, ma->ma_lmm, &ma->ma_lmm_size,
+ rc = mdd_get_md(env, mdd_obj, ma->ma_lmm, &ma->ma_lmm_size,
MDS_LOV_MD_NAME);
if (rc > 0) {
ma->ma_valid |= MA_LOV;
}
/* get lmv EA only*/
-static int __mdd_lmv_get(const struct lu_context *ctxt,
+static int __mdd_lmv_get(const struct lu_env *env,
struct mdd_object *mdd_obj, struct md_attr *ma)
{
int rc;
- rc = mdd_get_md(ctxt, mdd_obj, ma->ma_lmv, &ma->ma_lmv_size,
+ rc = mdd_get_md(env, mdd_obj, ma->ma_lmv, &ma->ma_lmv_size,
MDS_LMV_MD_NAME);
if (rc > 0) {
ma->ma_valid |= MA_LMV;
RETURN(rc);
}
-static int mdd_attr_get_internal(const struct lu_context *ctxt,
+static int mdd_attr_get_internal(const struct lu_env *env,
struct mdd_object *mdd_obj,
struct md_attr *ma)
{
ENTRY;
if (ma->ma_need & MA_INODE)
- rc = __mdd_iattr_get(ctxt, mdd_obj, ma);
+ rc = __mdd_iattr_get(env, mdd_obj, ma);
if (rc == 0 && ma->ma_need & MA_LOV) {
if (S_ISREG(mdd_object_type(mdd_obj)) ||
S_ISDIR(mdd_object_type(mdd_obj)))
- rc = __mdd_lmm_get(ctxt, mdd_obj, ma);
+ rc = __mdd_lmm_get(env, mdd_obj, ma);
}
if (rc == 0 && ma->ma_need & MA_LMV) {
if (S_ISDIR(mdd_object_type(mdd_obj)))
- rc = __mdd_lmv_get(ctxt, mdd_obj, ma);
+ rc = __mdd_lmv_get(env, mdd_obj, ma);
}
CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = "LPX64"\n",
rc, ma->ma_valid);
RETURN(rc);
}
-static inline int mdd_attr_get_internal_locked(const struct lu_context *ctxt,
+static inline int mdd_attr_get_internal_locked(const struct lu_env *env,
struct mdd_object *mdd_obj,
struct md_attr *ma)
{
int rc;
- mdd_read_lock(ctxt, mdd_obj);
- rc = mdd_attr_get_internal(ctxt, mdd_obj, ma);
- mdd_read_unlock(ctxt, mdd_obj);
+ mdd_read_lock(env, mdd_obj);
+ rc = mdd_attr_get_internal(env, mdd_obj, ma);
+ mdd_read_unlock(env, mdd_obj);
return rc;
}
/*
* No permission check is needed.
*/
-static int mdd_attr_get(const struct lu_context *ctxt, struct md_object *obj,
+static int mdd_attr_get(const struct lu_env *env, struct md_object *obj,
struct md_attr *ma, struct md_ucred *uc)
{
struct mdd_object *mdd_obj = md2mdd_obj(obj);
int rc;
ENTRY;
- rc = mdd_attr_get_internal_locked(ctxt, mdd_obj, ma);
+ rc = mdd_attr_get_internal_locked(env, mdd_obj, ma);
RETURN(rc);
}
/*
* No permission check is needed.
*/
-static int mdd_xattr_get(const struct lu_context *ctxt,
+static int mdd_xattr_get(const struct lu_env *env,
struct md_object *obj, void *buf, int buf_len,
const char *name, struct md_ucred *uc)
{
LASSERT(lu_object_exists(&obj->mo_lu));
next = mdd_object_child(mdd_obj);
- mdd_read_lock(ctxt, mdd_obj);
- rc = next->do_ops->do_xattr_get(ctxt, next, buf, buf_len, name);
- mdd_read_unlock(ctxt, mdd_obj);
+ mdd_read_lock(env, mdd_obj);
+ rc = next->do_ops->do_xattr_get(env, next, buf, buf_len, name);
+ mdd_read_unlock(env, mdd_obj);
RETURN(rc);
}
* Permission check is done when open,
* no need check again.
*/
-static int mdd_readlink(const struct lu_context *ctxt, struct md_object *obj,
+static int mdd_readlink(const struct lu_env *env, struct md_object *obj,
void *buf, int buf_len, struct md_ucred *uc)
{
struct mdd_object *mdd_obj = md2mdd_obj(obj);
LASSERT(lu_object_exists(&obj->mo_lu));
next = mdd_object_child(mdd_obj);
- mdd_read_lock(ctxt, mdd_obj);
- rc = next->do_body_ops->dbo_read(ctxt, next, buf, buf_len, &pos);
- mdd_read_unlock(ctxt, mdd_obj);
+ mdd_read_lock(env, mdd_obj);
+ rc = next->do_body_ops->dbo_read(env, next, buf, buf_len, &pos);
+ mdd_read_unlock(env, mdd_obj);
RETURN(rc);
}
-static int mdd_xattr_list(const struct lu_context *ctxt, struct md_object *obj,
+static int mdd_xattr_list(const struct lu_env *env, struct md_object *obj,
void *buf, int buf_len, struct md_ucred *uc)
{
struct mdd_object *mdd_obj = md2mdd_obj(obj);
LASSERT(lu_object_exists(&obj->mo_lu));
next = mdd_object_child(mdd_obj);
- mdd_read_lock(ctxt, mdd_obj);
- rc = next->do_ops->do_xattr_list(ctxt, next, buf, buf_len);
- mdd_read_unlock(ctxt, mdd_obj);
+ mdd_read_lock(env, mdd_obj);
+ rc = next->do_ops->do_xattr_list(env, next, buf, buf_len);
+ mdd_read_unlock(env, mdd_obj);
RETURN(rc);
}
-static int mdd_txn_start_cb(const struct lu_context *ctx,
+static int mdd_txn_start_cb(const struct lu_env *env,
struct txn_param *param, void *cookie)
{
return 0;
}
-static int mdd_txn_stop_cb(const struct lu_context *ctx,
+static int mdd_txn_stop_cb(const struct lu_env *env,
struct thandle *txn, void *cookie)
{
struct mdd_device *mdd = cookie;
return mds_lov_write_objids(obd);
}
-static int mdd_txn_commit_cb(const struct lu_context *ctx,
+static int mdd_txn_commit_cb(const struct lu_env *env,
struct thandle *txn, void *cookie)
{
return 0;
}
-static int mdd_device_init(const struct lu_context *ctx,
+static int mdd_device_init(const struct lu_env *env,
struct lu_device *d, struct lu_device *next)
{
struct mdd_device *mdd = lu2mdd_dev(d);
RETURN(rc);
}
-static struct lu_device *mdd_device_fini(const struct lu_context *ctx,
+static struct lu_device *mdd_device_fini(const struct lu_env *env,
struct lu_device *d)
{
struct mdd_device *mdd = lu2mdd_dev(d);
struct lu_device *next = &mdd->mdd_child->dd_lu_dev;
-
+
return next;
}
-static int mdd_mount(const struct lu_context *ctx, struct mdd_device *mdd)
+static int mdd_mount(const struct lu_env *env, struct mdd_device *mdd)
{
int rc;
struct dt_object *root;
ENTRY;
dt_txn_callback_add(mdd->mdd_child, &mdd->mdd_txn_cb);
- root = dt_store_open(ctx, mdd->mdd_child, mdd_root_dir_name,
+ root = dt_store_open(env, mdd->mdd_child, mdd_root_dir_name,
&mdd->mdd_root_fid);
if (!IS_ERR(root)) {
LASSERT(root != NULL);
- lu_object_put(ctx, &root->do_lu);
- rc = orph_index_init(ctx, mdd);
+ lu_object_put(env, &root->do_lu);
+ rc = orph_index_init(env, mdd);
} else
rc = PTR_ERR(root);
RETURN(rc);
}
-static void mdd_device_shutdown(const struct lu_context *ctxt,
+static void mdd_device_shutdown(const struct lu_env *env,
struct mdd_device *m)
{
dt_txn_callback_del(m->mdd_child, &m->mdd_txn_cb);
if (m->mdd_obd_dev)
- mdd_fini_obd(ctxt, m);
- orph_index_fini(ctxt, m);
+ mdd_fini_obd(env, m);
+ orph_index_fini(env, m);
}
-static int mdd_process_config(const struct lu_context *ctxt,
+static int mdd_process_config(const struct lu_env *env,
struct lu_device *d, struct lustre_cfg *cfg)
{
struct mdd_device *m = lu2mdd_dev(d);
switch (cfg->lcfg_command) {
case LCFG_SETUP:
- rc = next->ld_ops->ldo_process_config(ctxt, next, cfg);
+ rc = next->ld_ops->ldo_process_config(env, next, cfg);
if (rc)
GOTO(out, rc);
- dt->dd_ops->dt_conf_get(ctxt, dt, &m->mdd_dt_conf);
+ dt->dd_ops->dt_conf_get(env, dt, &m->mdd_dt_conf);
- rc = mdd_init_obd(ctxt, m, cfg);
+ rc = mdd_init_obd(env, m, cfg);
if (rc) {
CERROR("lov init error %d \n", rc);
GOTO(out, rc);
}
- rc = mdd_mount(ctxt, m);
+ rc = mdd_mount(env, m);
if (rc)
GOTO(out, rc);
break;
case LCFG_CLEANUP:
- mdd_device_shutdown(ctxt, m);
+ mdd_device_shutdown(env, m);
default:
- rc = next->ld_ops->ldo_process_config(ctxt, next, cfg);
+ rc = next->ld_ops->ldo_process_config(env, next, cfg);
break;
}
out:
RETURN(rc);
}
-static int mdd_recovery_complete(const struct lu_context *ctxt,
+static int mdd_recovery_complete(const struct lu_env *env,
struct lu_device *d)
{
struct mdd_device *mdd = lu2mdd_dev(d);
int rc;
ENTRY;
/* TODO:
- rc = mdd_lov_set_nextid(ctx, mdd);
+ rc = mdd_lov_set_nextid(env, mdd);
if (rc) {
CERROR("%s: mdd_lov_set_nextid failed %d\n",
obd->obd_name, rc);
GOTO(out, rc);
}
- rc = mdd_cleanup_unlink_llog(ctx, mdd);
+ rc = mdd_cleanup_unlink_llog(env, mdd);
obd_notify(obd->u.mds.mds_osc_obd, NULL,
obd->obd_async_recov ? OBD_NOTIFY_SYNC_NONBLOCK :
obd->obd_recovering = 0;
obd->obd_type->typ_dt_ops->o_postrecov(obd);
/* TODO: orphans handling */
- __mdd_orphan_cleanup(ctxt, mdd);
- rc = next->ld_ops->ldo_recovery_complete(ctxt, next);
+ __mdd_orphan_cleanup(env, mdd);
+ rc = next->ld_ops->ldo_recovery_complete(env, next);
RETURN(rc);
}
.ldo_recovery_complete = mdd_recovery_complete
};
-void mdd_write_lock(const struct lu_context *ctxt, struct mdd_object *obj)
+void mdd_write_lock(const struct lu_env *env, struct mdd_object *obj)
{
struct dt_object *next = mdd_object_child(obj);
- next->do_ops->do_write_lock(ctxt, next);
+ next->do_ops->do_write_lock(env, next);
}
-void mdd_read_lock(const struct lu_context *ctxt, struct mdd_object *obj)
+void mdd_read_lock(const struct lu_env *env, struct mdd_object *obj)
{
struct dt_object *next = mdd_object_child(obj);
- next->do_ops->do_read_lock(ctxt, next);
+ next->do_ops->do_read_lock(env, next);
}
-void mdd_write_unlock(const struct lu_context *ctxt, struct mdd_object *obj)
+void mdd_write_unlock(const struct lu_env *env, struct mdd_object *obj)
{
struct dt_object *next = mdd_object_child(obj);
- next->do_ops->do_write_unlock(ctxt, next);
+ next->do_ops->do_write_unlock(env, next);
}
-void mdd_read_unlock(const struct lu_context *ctxt, struct mdd_object *obj)
+void mdd_read_unlock(const struct lu_env *env, struct mdd_object *obj)
{
struct dt_object *next = mdd_object_child(obj);
- next->do_ops->do_read_unlock(ctxt, next);
+ next->do_ops->do_read_unlock(env, next);
}
-static void mdd_lock2(const struct lu_context *ctxt,
+static void mdd_lock2(const struct lu_env *env,
struct mdd_object *o0, struct mdd_object *o1)
{
- mdd_write_lock(ctxt, o0);
- mdd_write_lock(ctxt, o1);
+ mdd_write_lock(env, o0);
+ mdd_write_lock(env, o1);
}
-static void mdd_unlock2(const struct lu_context *ctxt,
+static void mdd_unlock2(const struct lu_env *env,
struct mdd_object *o0, struct mdd_object *o1)
{
- mdd_write_unlock(ctxt, o1);
- mdd_write_unlock(ctxt, o0);
+ mdd_write_unlock(env, o1);
+ mdd_write_unlock(env, o0);
}
-static struct thandle* mdd_trans_start(const struct lu_context *ctxt,
+static struct thandle* mdd_trans_start(const struct lu_env *env,
struct mdd_device *mdd)
{
- struct txn_param *p = &mdd_ctx_info(ctxt)->mti_param;
+ struct txn_param *p = &mdd_env_info(env)->mti_param;
- return mdd_child_ops(mdd)->dt_trans_start(ctxt, mdd->mdd_child, p);
+ return mdd_child_ops(mdd)->dt_trans_start(env, mdd->mdd_child, p);
}
-static void mdd_trans_stop(const struct lu_context *ctxt,
+static void mdd_trans_stop(const struct lu_env *env,
struct mdd_device *mdd, int result,
struct thandle *handle)
{
handle->th_result = result;
- mdd_child_ops(mdd)->dt_trans_stop(ctxt, handle);
+ mdd_child_ops(mdd)->dt_trans_stop(env, handle);
}
-static int __mdd_object_create(const struct lu_context *ctxt,
+static int __mdd_object_create(const struct lu_env *env,
struct mdd_object *obj, struct md_attr *ma,
struct thandle *handle)
{
if (!lu_object_exists(mdd2lu_obj(obj))) {
next = mdd_object_child(obj);
- rc = next->do_ops->do_create(ctxt, next, attr, handle);
+ rc = next->do_ops->do_create(env, next, attr, handle);
} else
rc = -EEXIST;
RETURN(rc);
}
-int mdd_attr_set_internal(const struct lu_context *ctxt, struct mdd_object *o,
+int mdd_attr_set_internal(const struct lu_env *env, struct mdd_object *o,
const struct lu_attr *attr, struct thandle *handle)
{
struct dt_object *next;
LASSERT(lu_object_exists(mdd2lu_obj(o)));
next = mdd_object_child(o);
- return next->do_ops->do_attr_set(ctxt, next, attr, handle);
+ return next->do_ops->do_attr_set(env, next, attr, handle);
}
-int mdd_attr_set_internal_locked(const struct lu_context *ctxt,
+int mdd_attr_set_internal_locked(const struct lu_env *env,
struct mdd_object *o,
const struct lu_attr *attr,
struct thandle *handle)
{
int rc;
- mdd_write_lock(ctxt, o);
- rc = mdd_attr_set_internal(ctxt, o, attr, handle);
- mdd_write_unlock(ctxt, o);
+ mdd_write_lock(env, o);
+ rc = mdd_attr_set_internal(env, o, attr, handle);
+ mdd_write_unlock(env, o);
return rc;
}
-static int __mdd_xattr_set(const struct lu_context *ctxt, struct mdd_object *o,
+static int __mdd_xattr_set(const struct lu_env *env, struct mdd_object *o,
const void *buf, int buf_len, const char *name,
int fl, struct thandle *handle)
{
LASSERT(lu_object_exists(mdd2lu_obj(o)));
next = mdd_object_child(o);
if (buf && buf_len > 0) {
- rc = next->do_ops->do_xattr_set(ctxt, next, buf, buf_len, name,
+ rc = next->do_ops->do_xattr_set(env, next, buf, buf_len, name,
0, handle);
}else if (buf == NULL && buf_len == 0) {
- rc = next->do_ops->do_xattr_del(ctxt, next, name, handle);
+ rc = next->do_ops->do_xattr_del(env, next, name, handle);
}
RETURN(rc);
}
* This API is ported from mds_fix_attr but remove some unnecesssary stuff.
* and port to
*/
-int mdd_fix_attr(const struct lu_context *ctxt, struct mdd_object *obj,
+int mdd_fix_attr(const struct lu_env *env, struct mdd_object *obj,
struct lu_attr *la, struct md_ucred *uc)
{
- struct lu_attr *tmp_la = &mdd_ctx_info(ctxt)->mti_la;
+ struct lu_attr *tmp_la = &mdd_env_info(env)->mti_la;
time_t now = CURRENT_SECONDS;
int rc;
ENTRY;
if (la->la_valid & (LA_NLINK | LA_RDEV | LA_BLKSIZE))
RETURN(-EPERM);
- rc = __mdd_la_get(ctxt, obj, tmp_la);
+ rc = __mdd_la_get(env, obj, tmp_la);
if (rc)
RETURN(rc);
if ((la->la_valid & (LA_MTIME | LA_ATIME | LA_CTIME)) &&
!(la->la_valid & ~(LA_MTIME | LA_ATIME | LA_CTIME))) {
if ((uc->mu_fsuid != tmp_la->la_uid) &&
- !mdd_capable(uc, CAP_FOWNER))
+ !mdd_capable(uc, CAP_FOWNER))
RETURN(-EPERM);
}
/* For tuncate (or setsize), we should have MAY_WRITE perm */
if (la->la_valid & (LA_SIZE | LA_BLOCKS)) {
- rc = mdd_permission_internal(ctxt, obj, MAY_WRITE, uc);
+ rc = mdd_permission_internal(env, obj, MAY_WRITE, uc);
if (rc)
RETURN(rc);
- /*
+ /*
* For the "Size-on-MDS" setattr update, merge coming
* attributes with the set in the inode. BUG 10641
*/
if ((la->la_valid & LA_ATIME) &&
(la->la_atime < tmp_la->la_atime))
la->la_valid &= ~LA_ATIME;
-
- if ((la->la_valid & LA_CTIME) &&
+
+ if ((la->la_valid & LA_CTIME) &&
(la->la_ctime < tmp_la->la_ctime))
la->la_valid &= ~(LA_MTIME | LA_CTIME);
}
/* set attr and LOV EA at once, return updated attr */
-static int mdd_attr_set(const struct lu_context *ctxt, struct md_object *obj,
+static int mdd_attr_set(const struct lu_env *env, struct md_object *obj,
const struct md_attr *ma, struct md_ucred *uc)
{
struct mdd_object *mdd_obj = md2mdd_obj(obj);
struct thandle *handle;
struct lov_mds_md *lmm = NULL;
int rc = 0, lmm_size = 0, max_size = 0;
- struct lu_attr *la_copy = &mdd_ctx_info(ctxt)->mti_la_for_fix;
+ struct lu_attr *la_copy = &mdd_env_info(env)->mti_la_for_fix;
ENTRY;
- mdd_txn_param_build(ctxt, &MDD_TXN_ATTR_SET);
- handle = mdd_trans_start(ctxt, mdd);
+ mdd_txn_param_build(env, &MDD_TXN_ATTR_SET);
+ handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
RETURN(PTR_ERR(handle));
/*TODO: add lock here*/
/* start a log jounal handle if needed */
if (S_ISREG(mdd_object_type(mdd_obj)) &&
ma->ma_attr.la_valid & (LA_UID | LA_GID)) {
- max_size = mdd_lov_mdsize(ctxt, mdd);
+ max_size = mdd_lov_mdsize(env, mdd);
OBD_ALLOC(lmm, max_size);
if (lmm == NULL)
GOTO(cleanup, rc = -ENOMEM);
- rc = mdd_get_md_locked(ctxt, mdd_obj, lmm, &lmm_size,
+ rc = mdd_get_md_locked(env, mdd_obj, lmm, &lmm_size,
MDS_LOV_MD_NAME);
if (rc < 0)
ma->ma_attr.la_mtime, ma->ma_attr.la_ctime);
*la_copy = ma->ma_attr;
- mdd_write_lock(ctxt, mdd_obj);
- rc = mdd_fix_attr(ctxt, mdd_obj, la_copy, uc);
- mdd_write_unlock(ctxt, mdd_obj);
+ mdd_write_lock(env, mdd_obj);
+ rc = mdd_fix_attr(env, mdd_obj, la_copy, uc);
+ mdd_write_unlock(env, mdd_obj);
if (rc)
GOTO(cleanup, rc);
if (la_copy->la_valid & LA_FLAGS) {
- rc = mdd_attr_set_internal_locked(ctxt, mdd_obj, la_copy,
+ rc = mdd_attr_set_internal_locked(env, mdd_obj, la_copy,
handle);
if (rc == 0)
mdd_flags_xlate(mdd_obj, la_copy->la_flags);
} else if (la_copy->la_valid) { /* setattr */
- rc = mdd_attr_set_internal_locked(ctxt, mdd_obj, la_copy,
+ rc = mdd_attr_set_internal_locked(env, mdd_obj, la_copy,
handle);
/* journal chown/chgrp in llog, just like unlink */
if (rc == 0 && lmm_size){
mode = mdd_object_type(mdd_obj);
if (S_ISREG(mode) || S_ISDIR(mode)) {
/*TODO check permission*/
- rc = mdd_lov_set_md(ctxt, NULL, mdd_obj, ma->ma_lmm,
+ rc = mdd_lov_set_md(env, NULL, mdd_obj, ma->ma_lmm,
ma->ma_lmm_size, handle, 1);
}
}
cleanup:
- mdd_trans_stop(ctxt, mdd, rc, handle);
+ mdd_trans_stop(env, mdd, rc, handle);
if (rc == 0 && lmm_size) {
/*set obd attr, if needed*/
- rc = mdd_lov_setattr_async(ctxt, mdd_obj, lmm, lmm_size);
+ rc = mdd_lov_setattr_async(env, mdd_obj, lmm, lmm_size);
}
if (lmm != NULL) {
OBD_FREE(lmm, max_size);
RETURN(rc);
}
-int mdd_xattr_set_txn(const struct lu_context *ctxt, struct mdd_object *obj,
+int mdd_xattr_set_txn(const struct lu_env *env, struct mdd_object *obj,
const void *buf, int buf_len, const char *name, int fl,
struct thandle *handle)
{
int rc;
ENTRY;
- mdd_write_lock(ctxt, obj);
- rc = __mdd_xattr_set(ctxt, obj, buf, buf_len, name, fl, handle);
- mdd_write_unlock(ctxt, obj);
+ mdd_write_lock(env, obj);
+ rc = __mdd_xattr_set(env, obj, buf, buf_len, name, fl, handle);
+ mdd_write_unlock(env, obj);
RETURN(rc);
}
-static int mdd_xattr_sanity_check(const struct lu_context *ctxt,
+static int mdd_xattr_sanity_check(const struct lu_env *env,
struct mdd_object *obj,
struct md_ucred *uc)
{
- struct lu_attr *tmp_la = &mdd_ctx_info(ctxt)->mti_la;
+ struct lu_attr *tmp_la = &mdd_env_info(env)->mti_la;
int rc;
ENTRY;
if (mdd_is_immutable(obj) || mdd_is_append(obj))
RETURN(-EPERM);
- mdd_read_lock(ctxt, obj);
- rc = __mdd_la_get(ctxt, obj, tmp_la);
- mdd_read_unlock(ctxt, obj);
+ mdd_read_lock(env, obj);
+ rc = __mdd_la_get(env, obj, tmp_la);
+ mdd_read_unlock(env, obj);
if (rc)
RETURN(rc);
RETURN(rc);
}
-static int mdd_xattr_set(const struct lu_context *ctxt, struct md_object *obj,
+static int mdd_xattr_set(const struct lu_env *env, struct md_object *obj,
const void *buf, int buf_len, const char *name, int fl,
struct md_ucred *uc)
{
int rc;
ENTRY;
- rc = mdd_xattr_sanity_check(ctxt, mdd_obj, uc);
+ rc = mdd_xattr_sanity_check(env, mdd_obj, uc);
if (rc)
RETURN(rc);
- mdd_txn_param_build(ctxt, &MDD_TXN_XATTR_SET);
- handle = mdd_trans_start(ctxt, mdd);
+ mdd_txn_param_build(env, &MDD_TXN_XATTR_SET);
+ handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
RETURN(PTR_ERR(handle));
- rc = mdd_xattr_set_txn(ctxt, md2mdd_obj(obj), buf, buf_len, name,
+ rc = mdd_xattr_set_txn(env, md2mdd_obj(obj), buf, buf_len, name,
fl, handle);
#ifdef HAVE_SPLIT_SUPPORT
if (rc == 0) {
rc = -ERESTART;
}
#endif
- mdd_trans_stop(ctxt, mdd, rc, handle);
+ mdd_trans_stop(env, mdd, rc, handle);
RETURN(rc);
}
-static int __mdd_xattr_del(const struct lu_context *ctxt,struct mdd_device *mdd,
+static int __mdd_xattr_del(const struct lu_env *env,struct mdd_device *mdd,
struct mdd_object *obj,
const char *name, struct thandle *handle)
{
LASSERT(lu_object_exists(mdd2lu_obj(obj)));
next = mdd_object_child(obj);
- return next->do_ops->do_xattr_del(ctxt, next, name, handle);
+ return next->do_ops->do_xattr_del(env, next, name, handle);
}
-int mdd_xattr_del(const struct lu_context *ctxt, struct md_object *obj,
+int mdd_xattr_del(const struct lu_env *env, struct md_object *obj,
const char *name, struct md_ucred *uc)
{
struct mdd_object *mdd_obj = md2mdd_obj(obj);
int rc;
ENTRY;
- rc = mdd_xattr_sanity_check(ctxt, mdd_obj, uc);
+ rc = mdd_xattr_sanity_check(env, mdd_obj, uc);
if (rc)
RETURN(rc);
- mdd_txn_param_build(ctxt, &MDD_TXN_XATTR_SET);
- handle = mdd_trans_start(ctxt, mdd);
+ mdd_txn_param_build(env, &MDD_TXN_XATTR_SET);
+ handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
RETURN(PTR_ERR(handle));
- mdd_write_lock(ctxt, mdd_obj);
- rc = __mdd_xattr_del(ctxt, mdd, md2mdd_obj(obj), name, handle);
- mdd_write_unlock(ctxt, mdd_obj);
+ mdd_write_lock(env, mdd_obj);
+ rc = __mdd_xattr_del(env, mdd, md2mdd_obj(obj), name, handle);
+ mdd_write_unlock(env, mdd_obj);
- mdd_trans_stop(ctxt, mdd, rc, handle);
+ mdd_trans_stop(env, mdd, rc, handle);
RETURN(rc);
}
-static int __mdd_index_insert_only(const struct lu_context *ctxt,
+static int __mdd_index_insert_only(const struct lu_env *env,
struct mdd_object *pobj,
const struct lu_fid *lf,
const char *name, struct thandle *th)
struct dt_object *next = mdd_object_child(pobj);
ENTRY;
- if (dt_try_as_dir(ctxt, next))
- rc = next->do_index_ops->dio_insert(ctxt, next,
+ if (dt_try_as_dir(env, next))
+ rc = next->do_index_ops->dio_insert(env, next,
(struct dt_rec *)lf,
(struct dt_key *)name, th);
else
}
/* insert new index, add reference if isdir, update times */
-static int __mdd_index_insert(const struct lu_context *ctxt,
+static int __mdd_index_insert(const struct lu_env *env,
struct mdd_object *pobj, const struct lu_fid *lf,
const char *name, int isdir, struct thandle *th)
{
ENTRY;
#if 0
- struct lu_attr *la = &mdd_ctx_info(ctxt)->mti_la;
+ struct lu_attr *la = &mdd_env_info(env)->mti_la;
#endif
- if (dt_try_as_dir(ctxt, next))
- rc = next->do_index_ops->dio_insert(ctxt, next,
+ if (dt_try_as_dir(env, next))
+ rc = next->do_index_ops->dio_insert(env, next,
(struct dt_rec *)lf,
(struct dt_key *)name, th);
else
if (rc == 0) {
if (isdir)
- __mdd_ref_add(ctxt, pobj, th);
+ __mdd_ref_add(env, pobj, th);
#if 0
la->la_valid = LA_MTIME|LA_CTIME;
la->la_atime = ma->ma_attr.la_atime;
la->la_ctime = ma->ma_attr.la_ctime;
- rc = mdd_attr_set_internal(ctxt, mdd_obj, la, handle);
+ rc = mdd_attr_set_internal(env, mdd_obj, la, handle);
#endif
}
return rc;
}
-static int __mdd_index_delete(const struct lu_context *ctxt,
+static int __mdd_index_delete(const struct lu_env *env,
struct mdd_object *pobj, const char *name,
struct thandle *handle)
{
struct dt_object *next = mdd_object_child(pobj);
ENTRY;
- if (dt_try_as_dir(ctxt, next))
- rc = next->do_index_ops->dio_delete(ctxt, next,
+ if (dt_try_as_dir(env, next))
+ rc = next->do_index_ops->dio_delete(env, next,
(struct dt_key *)name, handle);
else
rc = -ENOTDIR;
RETURN(rc);
}
-static int mdd_link_sanity_check(const struct lu_context *ctxt,
+static int mdd_link_sanity_check(const struct lu_env *env,
struct mdd_object *tgt_obj,
struct mdd_object *src_obj,
struct md_ucred *uc)
int rc;
ENTRY;
- rc = mdd_may_create(ctxt, tgt_obj, NULL, 1, uc);
+ rc = mdd_may_create(env, tgt_obj, NULL, 1, uc);
if (rc)
RETURN(rc);
RETURN(rc);
}
-static int mdd_link(const struct lu_context *ctxt, struct md_object *tgt_obj,
+static int mdd_link(const struct lu_env *env, struct md_object *tgt_obj,
struct md_object *src_obj, const char *name,
struct md_attr *ma, struct md_ucred *uc)
{
struct mdd_object *mdd_tobj = md2mdd_obj(tgt_obj);
struct mdd_object *mdd_sobj = md2mdd_obj(src_obj);
struct mdd_device *mdd = mdo2mdd(src_obj);
- struct lu_attr *la_copy = &mdd_ctx_info(ctxt)->mti_la_for_fix;
+ struct lu_attr *la_copy = &mdd_env_info(env)->mti_la_for_fix;
struct thandle *handle;
int rc;
ENTRY;
- mdd_txn_param_build(ctxt, &MDD_TXN_LINK);
- handle = mdd_trans_start(ctxt, mdd);
+ mdd_txn_param_build(env, &MDD_TXN_LINK);
+ handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
RETURN(PTR_ERR(handle));
- mdd_lock2(ctxt, mdd_tobj, mdd_sobj);
+ mdd_lock2(env, mdd_tobj, mdd_sobj);
- rc = mdd_link_sanity_check(ctxt, mdd_tobj, mdd_sobj, uc);
+ rc = mdd_link_sanity_check(env, mdd_tobj, mdd_sobj, uc);
if (rc)
GOTO(out, rc);
- rc = __mdd_index_insert_only(ctxt, mdd_tobj, mdo2fid(mdd_sobj),
+ rc = __mdd_index_insert_only(env, mdd_tobj, mdo2fid(mdd_sobj),
name, handle);
if (rc == 0)
- __mdd_ref_add(ctxt, mdd_sobj, handle);
+ __mdd_ref_add(env, mdd_sobj, handle);
*la_copy = ma->ma_attr;
la_copy->la_valid = LA_CTIME;
- rc = mdd_attr_set_internal(ctxt, mdd_sobj, la_copy, handle);
+ rc = mdd_attr_set_internal(env, mdd_sobj, la_copy, handle);
if (rc)
GOTO(out, rc);
la_copy->la_valid = LA_CTIME | LA_MTIME;
- rc = mdd_attr_set_internal(ctxt, mdd_tobj, la_copy, handle);
+ rc = mdd_attr_set_internal(env, mdd_tobj, la_copy, handle);
out:
- mdd_unlock2(ctxt, mdd_tobj, mdd_sobj);
- mdd_trans_stop(ctxt, mdd, rc, handle);
+ mdd_unlock2(env, mdd_tobj, mdd_sobj);
+ mdd_trans_stop(env, mdd, rc, handle);
RETURN(rc);
}
* -ve other error
*
*/
-static int mdd_dir_is_empty(const struct lu_context *ctx,
+static int mdd_dir_is_empty(const struct lu_env *env,
struct mdd_object *dir)
{
struct dt_it *it;
obj = mdd_object_child(dir);
iops = &obj->do_index_ops->dio_it;
- it = iops->init(ctx, obj, 0);
+ it = iops->init(env, obj, 0);
if (it != NULL) {
- result = iops->get(ctx, it, (const void *)"");
+ result = iops->get(env, it, (const void *)"");
if (result > 0) {
int i;
for (result = 0, i = 0; result == 0 && i < 3; ++i)
- result = iops->next(ctx, it);
+ result = iops->next(env, it);
if (result == 0)
result = -ENOTEMPTY;
else if (result == +1)
* Huh? Index contains no zero key?
*/
result = -EIO;
-
- iops->put(ctx, it);
- iops->fini(ctx, it);
+
+ iops->put(env, it);
+ iops->fini(env, it);
} else
result = -ENOMEM;
return result;
/* return md_attr back,
* if it is last unlink then return lov ea + llog cookie*/
-int __mdd_object_kill(const struct lu_context *ctxt,
+int __mdd_object_kill(const struct lu_env *env,
struct mdd_object *obj,
struct md_attr *ma)
{
if (S_ISREG(mdd_object_type(obj))) {
/* Return LOV & COOKIES unconditionally here. We clean evth up.
* Caller must be ready for that. */
- rc = __mdd_lmm_get(ctxt, obj, ma);
+ rc = __mdd_lmm_get(env, obj, ma);
if ((ma->ma_valid & MA_LOV))
- rc = mdd_unlink_log(ctxt, mdo2mdd(&obj->mod_obj),
+ rc = mdd_unlink_log(env, mdo2mdd(&obj->mod_obj),
obj, ma);
}
RETURN(rc);
}
/* caller should take a lock before calling */
-static int __mdd_finish_unlink(const struct lu_context *ctxt,
+static int __mdd_finish_unlink(const struct lu_env *env,
struct mdd_object *obj, struct md_attr *ma,
struct thandle *th)
{
int rc;
ENTRY;
- rc = __mdd_iattr_get(ctxt, obj, ma);
+ rc = __mdd_iattr_get(env, obj, ma);
if (rc == 0 && ma->ma_attr.la_nlink == 0) {
/* add new orphan and the object
* will be deleted during the object_put() */
- if (__mdd_orphan_add(ctxt, obj, th) == 0)
+ if (__mdd_orphan_add(env, obj, th) == 0)
set_bit(LU_OBJECT_ORPHAN,
&mdd2lu_obj(obj)->lo_header->loh_flags);
-
+
if (obj->mod_count == 0)
- rc = __mdd_object_kill(ctxt, obj, ma);
+ rc = __mdd_object_kill(env, obj, ma);
}
RETURN(rc);
}
-static int mdd_unlink_sanity_check(const struct lu_context *ctxt,
+static int mdd_unlink_sanity_check(const struct lu_env *env,
struct mdd_object *pobj,
struct mdd_object *cobj,
struct md_attr *ma,
int rc = 0;
ENTRY;
- rc = mdd_may_delete(ctxt, pobj, cobj,
+ rc = mdd_may_delete(env, pobj, cobj,
S_ISDIR(ma->ma_attr.la_mode), 1, uc);
if (rc)
RETURN(rc);
if (S_ISDIR(mdd_object_type(cobj))) {
- if (dt_try_as_dir(ctxt, dt_cobj))
- rc = mdd_dir_is_empty(ctxt, cobj);
+ if (dt_try_as_dir(env, dt_cobj))
+ rc = mdd_dir_is_empty(env, cobj);
else
rc = -ENOTDIR;
}
RETURN(rc);
}
-static int mdd_unlink(const struct lu_context *ctxt,
+static int mdd_unlink(const struct lu_env *env,
struct md_object *pobj, struct md_object *cobj,
const char *name, struct md_attr *ma, struct md_ucred *uc)
{
struct mdd_device *mdd = mdo2mdd(pobj);
struct mdd_object *mdd_pobj = md2mdd_obj(pobj);
struct mdd_object *mdd_cobj = md2mdd_obj(cobj);
- struct lu_attr *la_copy = &mdd_ctx_info(ctxt)->mti_la_for_fix;
+ struct lu_attr *la_copy = &mdd_env_info(env)->mti_la_for_fix;
struct thandle *handle;
int rc;
ENTRY;
- mdd_txn_param_build(ctxt, &MDD_TXN_UNLINK);
- handle = mdd_trans_start(ctxt, mdd);
+ mdd_txn_param_build(env, &MDD_TXN_UNLINK);
+ handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
RETURN(PTR_ERR(handle));
- mdd_lock2(ctxt, mdd_pobj, mdd_cobj);
+ mdd_lock2(env, mdd_pobj, mdd_cobj);
- rc = mdd_unlink_sanity_check(ctxt, mdd_pobj, mdd_cobj, ma, uc);
+ rc = mdd_unlink_sanity_check(env, mdd_pobj, mdd_cobj, ma, uc);
if (rc)
GOTO(cleanup, rc);
- rc = __mdd_index_delete(ctxt, mdd_pobj, name, handle);
+ rc = __mdd_index_delete(env, mdd_pobj, name, handle);
if (rc)
GOTO(cleanup, rc);
- __mdd_ref_del(ctxt, mdd_cobj, handle);
+ __mdd_ref_del(env, mdd_cobj, handle);
*la_copy = ma->ma_attr;
if (S_ISDIR(lu_object_attr(&cobj->mo_lu))) {
/* unlink dot */
- __mdd_ref_del(ctxt, mdd_cobj, handle);
+ __mdd_ref_del(env, mdd_cobj, handle);
/* unlink dotdot */
- __mdd_ref_del(ctxt, mdd_pobj, handle);
+ __mdd_ref_del(env, mdd_pobj, handle);
} else {
la_copy->la_valid = LA_CTIME;
- rc = mdd_attr_set_internal(ctxt, mdd_cobj, la_copy, handle);
+ rc = mdd_attr_set_internal(env, mdd_cobj, la_copy, handle);
if (rc)
GOTO(cleanup, rc);
}
la_copy->la_valid = LA_CTIME | LA_MTIME;
- rc = mdd_attr_set_internal(ctxt, mdd_pobj, la_copy, handle);
+ rc = mdd_attr_set_internal(env, mdd_pobj, la_copy, handle);
if (rc)
GOTO(cleanup, rc);
- rc = __mdd_finish_unlink(ctxt, mdd_cobj, ma, handle);
-
+ rc = __mdd_finish_unlink(env, mdd_cobj, ma, handle);
+
if (rc == 0)
obd_set_info_async(mdd2obd_dev(mdd)->u.mds.mds_osc_exp,
strlen("unlinked"), "unlinked", 0,
NULL, NULL);
cleanup:
- mdd_unlock2(ctxt, mdd_pobj, mdd_cobj);
- mdd_trans_stop(ctxt, mdd, rc, handle);
+ mdd_unlock2(env, mdd_pobj, mdd_cobj);
+ mdd_trans_stop(env, mdd, rc, handle);
RETURN(rc);
}
/* partial unlink */
-static int mdd_ref_del(const struct lu_context *ctxt, struct md_object *obj,
+static int mdd_ref_del(const struct lu_env *env, struct md_object *obj,
struct md_attr *ma, struct md_ucred *uc)
{
struct mdd_object *mdd_obj = md2mdd_obj(obj);
int rc;
ENTRY;
- mdd_txn_param_build(ctxt, &MDD_TXN_XATTR_SET);
- handle = mdd_trans_start(ctxt, mdd);
+ mdd_txn_param_build(env, &MDD_TXN_XATTR_SET);
+ handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
RETURN(-ENOMEM);
- mdd_write_lock(ctxt, mdd_obj);
+ mdd_write_lock(env, mdd_obj);
- rc = mdd_unlink_sanity_check(ctxt, NULL, mdd_obj, ma, uc);
+ rc = mdd_unlink_sanity_check(env, NULL, mdd_obj, ma, uc);
if (rc)
GOTO(cleanup, rc);
- __mdd_ref_del(ctxt, mdd_obj, handle);
+ __mdd_ref_del(env, mdd_obj, handle);
if (S_ISDIR(lu_object_attr(&obj->mo_lu))) {
/* unlink dot */
- __mdd_ref_del(ctxt, mdd_obj, handle);
+ __mdd_ref_del(env, mdd_obj, handle);
}
- rc = __mdd_finish_unlink(ctxt, mdd_obj, ma, handle);
+ rc = __mdd_finish_unlink(env, mdd_obj, ma, handle);
EXIT;
cleanup:
- mdd_write_unlock(ctxt, mdd_obj);
- mdd_trans_stop(ctxt, mdd, rc, handle);
+ mdd_write_unlock(env, mdd_obj);
+ mdd_trans_stop(env, mdd, rc, handle);
return rc;
}
-static int mdd_parent_fid(const struct lu_context *ctxt,
+static int mdd_parent_fid(const struct lu_env *env,
struct mdd_object *obj,
struct lu_fid *fid)
{
- return __mdd_lookup_locked(ctxt, &obj->mod_obj,
+ return __mdd_lookup_locked(env, &obj->mod_obj,
dotdot, fid, 0, NULL);
}
*
* otherwise: values < 0, errors.
*/
-static int mdd_is_parent(const struct lu_context *ctxt,
+static int mdd_is_parent(const struct lu_env *env,
struct mdd_device *mdd,
struct mdd_object *p1,
const struct lu_fid *lf,
ENTRY;
LASSERT(!lu_fid_eq(mdo2fid(p1), lf));
- pfid = &mdd_ctx_info(ctxt)->mti_fid;
+ pfid = &mdd_env_info(env)->mti_fid;
/* Do not lookup ".." in root, they do not exist there. */
if (lu_fid_eq(mdo2fid(p1), &mdd->mdd_root_fid))
RETURN(0);
for(;;) {
- rc = mdd_parent_fid(ctxt, p1, pfid);
+ rc = mdd_parent_fid(env, p1, pfid);
if (rc)
GOTO(out, rc);
if (lu_fid_eq(pfid, &mdd->mdd_root_fid))
if (lu_fid_eq(pfid, lf))
GOTO(out, rc = 1);
if (parent)
- mdd_object_put(ctxt, parent);
- parent = mdd_object_find(ctxt, mdd, pfid);
+ mdd_object_put(env, parent);
+ parent = mdd_object_find(env, mdd, pfid);
/* cross-ref parent */
if (parent == NULL) {
EXIT;
out:
if (parent && !IS_ERR(parent))
- mdd_object_put(ctxt, parent);
+ mdd_object_put(env, parent);
return rc;
}
-static int mdd_rename_lock(const struct lu_context *ctxt,
+static int mdd_rename_lock(const struct lu_env *env,
struct mdd_device *mdd,
struct mdd_object *src_pobj,
struct mdd_object *tgt_pobj)
ENTRY;
if (src_pobj == tgt_pobj) {
- mdd_write_lock(ctxt, src_pobj);
+ mdd_write_lock(env, src_pobj);
RETURN(0);
}
/* compared the parent child relationship of src_p&tgt_p */
if (lu_fid_eq(&mdd->mdd_root_fid, mdo2fid(src_pobj))){
- mdd_lock2(ctxt, src_pobj, tgt_pobj);
+ mdd_lock2(env, src_pobj, tgt_pobj);
RETURN(0);
} else if (lu_fid_eq(&mdd->mdd_root_fid, mdo2fid(tgt_pobj))) {
- mdd_lock2(ctxt, tgt_pobj, src_pobj);
+ mdd_lock2(env, tgt_pobj, src_pobj);
RETURN(0);
}
- rc = mdd_is_parent(ctxt, mdd, src_pobj, mdo2fid(tgt_pobj), NULL);
+ rc = mdd_is_parent(env, mdd, src_pobj, mdo2fid(tgt_pobj), NULL);
if (rc < 0)
RETURN(rc);
if (rc == 1) {
- mdd_lock2(ctxt, tgt_pobj, src_pobj);
+ mdd_lock2(env, tgt_pobj, src_pobj);
RETURN(0);
}
- mdd_lock2(ctxt, src_pobj, tgt_pobj);
+ mdd_lock2(env, src_pobj, tgt_pobj);
RETURN(0);
}
-static void mdd_rename_unlock(const struct lu_context *ctxt,
+static void mdd_rename_unlock(const struct lu_env *env,
struct mdd_object *src_pobj,
struct mdd_object *tgt_pobj)
{
- mdd_write_unlock(ctxt, src_pobj);
+ mdd_write_unlock(env, src_pobj);
if (src_pobj != tgt_pobj)
- mdd_write_unlock(ctxt, tgt_pobj);
+ mdd_write_unlock(env, tgt_pobj);
}
-static int mdd_rename_sanity_check(const struct lu_context *ctxt,
+static int mdd_rename_sanity_check(const struct lu_env *env,
struct mdd_object *src_pobj,
struct mdd_object *tgt_pobj,
const struct lu_fid *sfid,
int rc = 0, need_check = 1;
ENTRY;
- mdd_read_lock(ctxt, src_pobj);
- rc = mdd_may_delete(ctxt, src_pobj, sobj, src_is_dir, need_check, uc);
- mdd_read_unlock(ctxt, src_pobj);
+ mdd_read_lock(env, src_pobj);
+ rc = mdd_may_delete(env, src_pobj, sobj, src_is_dir, need_check, uc);
+ mdd_read_unlock(env, src_pobj);
if (rc)
RETURN(rc);
need_check = 0;
if (!tobj) {
- mdd_read_lock(ctxt, tgt_pobj);
- rc = mdd_may_create(ctxt, tgt_pobj, NULL, need_check, uc);
- mdd_read_unlock(ctxt, tgt_pobj);
+ mdd_read_lock(env, tgt_pobj);
+ rc = mdd_may_create(env, tgt_pobj, NULL, need_check, uc);
+ mdd_read_unlock(env, tgt_pobj);
} else {
- mdd_read_lock(ctxt, tgt_pobj);
- rc = mdd_may_delete(ctxt, tgt_pobj, tobj, src_is_dir,
+ mdd_read_lock(env, tgt_pobj);
+ rc = mdd_may_delete(env, tgt_pobj, tobj, src_is_dir,
need_check, uc);
- mdd_read_unlock(ctxt, tgt_pobj);
+ mdd_read_unlock(env, tgt_pobj);
if (!rc && S_ISDIR(mdd_object_type(tobj)) &&
- mdd_dir_is_empty(ctxt, tobj))
+ mdd_dir_is_empty(env, tobj))
RETURN(-ENOTEMPTY);
}
/* source should not be ancestor of target dir */
- if (!rc && src_is_dir && mdd_is_parent(ctxt, mdd, tgt_pobj, sfid, NULL))
+ if (!rc && src_is_dir && mdd_is_parent(env, mdd, tgt_pobj, sfid, NULL))
RETURN(-EINVAL);
RETURN(rc);
}
/* src object can be remote that is why we use only fid and type of object */
-static int mdd_rename(const struct lu_context *ctxt,
+static int mdd_rename(const struct lu_env *env,
struct md_object *src_pobj, struct md_object *tgt_pobj,
const struct lu_fid *lf, const char *sname,
struct md_object *tobj, const char *tname,
struct mdd_device *mdd = mdo2mdd(src_pobj);
struct mdd_object *mdd_spobj = md2mdd_obj(src_pobj);
struct mdd_object *mdd_tpobj = md2mdd_obj(tgt_pobj);
- struct mdd_object *mdd_sobj = mdd_object_find(ctxt, mdd, lf);
+ struct mdd_object *mdd_sobj = mdd_object_find(env, mdd, lf);
struct mdd_object *mdd_tobj = NULL;
- struct lu_attr *la_copy = &mdd_ctx_info(ctxt)->mti_la_for_fix;
+ struct lu_attr *la_copy = &mdd_env_info(env)->mti_la_for_fix;
struct thandle *handle;
int is_dir;
int rc;
mdd_tobj = md2mdd_obj(tobj);
/*XXX: shouldn't this check be done under lock below? */
- rc = mdd_rename_sanity_check(ctxt, mdd_spobj, mdd_tpobj,
+ rc = mdd_rename_sanity_check(env, mdd_spobj, mdd_tpobj,
lf, is_dir, mdd_sobj, mdd_tobj, uc);
if (rc)
GOTO(out, rc);
- mdd_txn_param_build(ctxt, &MDD_TXN_RENAME);
- handle = mdd_trans_start(ctxt, mdd);
+ mdd_txn_param_build(env, &MDD_TXN_RENAME);
+ handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
GOTO(out, rc = PTR_ERR(handle));
/*FIXME: Should consider tobj and sobj too in rename_lock*/
- rc = mdd_rename_lock(ctxt, mdd, mdd_spobj, mdd_tpobj);
+ rc = mdd_rename_lock(env, mdd, mdd_spobj, mdd_tpobj);
if (rc)
GOTO(cleanup_unlocked, rc);
- rc = __mdd_index_delete(ctxt, mdd_spobj, sname, handle);
+ rc = __mdd_index_delete(env, mdd_spobj, sname, handle);
if (rc)
GOTO(cleanup, rc);
/*if sobj is dir, its parent object nlink should be dec too*/
if (is_dir)
- __mdd_ref_del(ctxt, mdd_spobj, handle);
+ __mdd_ref_del(env, mdd_spobj, handle);
- rc = __mdd_index_delete(ctxt, mdd_tpobj, tname, handle);
+ rc = __mdd_index_delete(env, mdd_tpobj, tname, handle);
/* tobj can be remote one,
* so we do index_delete unconditionally and -ENOENT is allowed */
if (rc != 0 && rc != -ENOENT)
GOTO(cleanup, rc);
- rc = __mdd_index_insert(ctxt, mdd_tpobj, lf, tname, is_dir, handle);
+ rc = __mdd_index_insert(env, mdd_tpobj, lf, tname, is_dir, handle);
if (rc)
GOTO(cleanup, rc);
la_copy->la_valid = LA_CTIME;
if (mdd_sobj) {
/*XXX: how to update ctime for remote sobj? */
- rc = mdd_attr_set_internal_locked(ctxt, mdd_sobj, la_copy, handle);
+ rc = mdd_attr_set_internal_locked(env, mdd_sobj, la_copy, handle);
if (rc)
GOTO(cleanup, rc);
}
if (tobj && lu_object_exists(&tobj->mo_lu)) {
- mdd_write_lock(ctxt, mdd_tobj);
- __mdd_ref_del(ctxt, mdd_tobj, handle);
+ mdd_write_lock(env, mdd_tobj);
+ __mdd_ref_del(env, mdd_tobj, handle);
/* remove dot reference */
if (is_dir)
- __mdd_ref_del(ctxt, mdd_tobj, handle);
+ __mdd_ref_del(env, mdd_tobj, handle);
la_copy->la_valid = LA_CTIME;
- rc = mdd_attr_set_internal(ctxt, mdd_tobj, la_copy, handle);
+ rc = mdd_attr_set_internal(env, mdd_tobj, la_copy, handle);
if (rc)
GOTO(cleanup, rc);
- rc = __mdd_finish_unlink(ctxt, mdd_tobj, ma, handle);
- mdd_write_unlock(ctxt, mdd_tobj);
+ rc = __mdd_finish_unlink(env, mdd_tobj, ma, handle);
+ mdd_write_unlock(env, mdd_tobj);
if (rc)
GOTO(cleanup, rc);
}
la_copy->la_valid = LA_CTIME | LA_MTIME;
- rc = mdd_attr_set_internal(ctxt, mdd_spobj, la_copy, handle);
+ rc = mdd_attr_set_internal(env, mdd_spobj, la_copy, handle);
if (rc)
GOTO(cleanup, rc);
if (mdd_spobj != mdd_tpobj) {
la_copy->la_valid = LA_CTIME | LA_MTIME;
- rc = mdd_attr_set_internal(ctxt, mdd_tpobj, la_copy, handle);
+ rc = mdd_attr_set_internal(env, mdd_tpobj, la_copy, handle);
}
cleanup:
- mdd_rename_unlock(ctxt, mdd_spobj, mdd_tpobj);
+ mdd_rename_unlock(env, mdd_spobj, mdd_tpobj);
cleanup_unlocked:
- mdd_trans_stop(ctxt, mdd, rc, handle);
+ mdd_trans_stop(env, mdd, rc, handle);
out:
if (mdd_sobj)
- mdd_object_put(ctxt, mdd_sobj);
+ mdd_object_put(env, mdd_sobj);
RETURN(rc);
}
static int
-__mdd_lookup(const struct lu_context *ctxt, struct md_object *pobj,
+__mdd_lookup(const struct lu_env *env, struct md_object *pobj,
const char *name, const struct lu_fid* fid, int mask,
struct md_ucred *uc)
{
RETURN(-ESTALE);
if (mask == MAY_EXEC)
- rc = mdd_exec_permission_lite(ctxt, mdd_obj, uc);
+ rc = mdd_exec_permission_lite(env, mdd_obj, uc);
else
- rc = mdd_permission_internal(ctxt, mdd_obj, mask, uc);
+ rc = mdd_permission_internal(env, mdd_obj, mask, uc);
if (rc)
RETURN(rc);
- if (S_ISDIR(mdd_object_type(mdd_obj)) && dt_try_as_dir(ctxt, dir))
- rc = dir->do_index_ops->dio_lookup(ctxt, dir, rec, key);
+ if (S_ISDIR(mdd_object_type(mdd_obj)) && dt_try_as_dir(env, dir))
+ rc = dir->do_index_ops->dio_lookup(env, dir, rec, key);
else
rc = -ENOTDIR;
}
static int
-__mdd_lookup_locked(const struct lu_context *ctxt, struct md_object *pobj,
+__mdd_lookup_locked(const struct lu_env *env, struct md_object *pobj,
const char *name, const struct lu_fid* fid, int mask,
struct md_ucred *uc)
{
struct mdd_object *mdd_obj = md2mdd_obj(pobj);
int rc;
-
- mdd_read_lock(ctxt, mdd_obj);
- rc = __mdd_lookup(ctxt, pobj, name, fid, mask, uc);
- mdd_read_unlock(ctxt, mdd_obj);
- return rc;
+ mdd_read_lock(env, mdd_obj);
+ rc = __mdd_lookup(env, pobj, name, fid, mask, uc);
+ mdd_read_unlock(env, mdd_obj);
+
+ return rc;
}
-static int mdd_lookup(const struct lu_context *ctxt,
+static int mdd_lookup(const struct lu_env *env,
struct md_object *pobj, const char *name,
struct lu_fid* fid, struct md_ucred *uc)
{
int rc;
ENTRY;
- rc = __mdd_lookup_locked(ctxt, pobj, name, fid, MAY_EXEC, uc);
+ rc = __mdd_lookup_locked(env, pobj, name, fid, MAY_EXEC, uc);
RETURN(rc);
}
*
* returns < 0: if error
*/
-static int mdd_is_subdir(const struct lu_context *ctx,
+static int mdd_is_subdir(const struct lu_env *env,
struct md_object *mo, const struct lu_fid *fid,
struct lu_fid *sfid, struct md_ucred *uc)
{
if (!S_ISDIR(mdd_object_type(md2mdd_obj(mo))))
RETURN(0);
- rc = mdd_is_parent(ctx, mdd, md2mdd_obj(mo), fid, sfid);
+ rc = mdd_is_parent(env, mdd, md2mdd_obj(mo), fid, sfid);
RETURN(rc);
}
-static int __mdd_object_initialize(const struct lu_context *ctxt,
+static int __mdd_object_initialize(const struct lu_env *env,
const struct lu_fid *pfid,
struct mdd_object *child,
struct md_attr *ma, struct thandle *handle)
* (2) maybe, the child attributes should be set in OSD when creation.
*/
- rc = mdd_attr_set_internal(ctxt, child, &ma->ma_attr, handle);
+ rc = mdd_attr_set_internal(env, child, &ma->ma_attr, handle);
if (rc != 0)
RETURN(rc);
if (S_ISDIR(ma->ma_attr.la_mode)) {
/* add . and .. for newly created dir */
- __mdd_ref_add(ctxt, child, handle);
- rc = __mdd_index_insert_only(ctxt, child, mdo2fid(child),
+ __mdd_ref_add(env, child, handle);
+ rc = __mdd_index_insert_only(env, child, mdo2fid(child),
dot, handle);
if (rc == 0) {
- rc = __mdd_index_insert_only(ctxt, child, pfid,
+ rc = __mdd_index_insert_only(env, child, pfid,
dotdot, handle);
if (rc != 0) {
int rc2;
- rc2 = __mdd_index_delete(ctxt,
+ rc2 = __mdd_index_delete(env,
child, dot, handle);
if (rc2 != 0)
CERROR("Failure to cleanup after dotdot"
" creation: %d (%d)\n", rc2, rc);
else
- __mdd_ref_del(ctxt, child, handle);
+ __mdd_ref_del(env, child, handle);
}
}
}
/*
* XXX: Need MAY_WRITE to be checked?
*/
-static int mdd_cd_sanity_check(const struct lu_context *ctxt,
+static int mdd_cd_sanity_check(const struct lu_env *env,
struct mdd_object *obj, struct md_ucred *uc)
{
int rc = 0;
RETURN(-ENOENT);
#if 0
- mdd_read_lock(ctxt, obj);
- rc = mdd_permission_internal(ctxt, obj, MAY_WRITE, uc);
- mdd_read_unlock(ctxt, obj);
+ mdd_read_lock(env, obj);
+ rc = mdd_permission_internal(env, obj, MAY_WRITE, uc);
+ mdd_read_unlock(env, obj);
#endif
RETURN(rc);
}
-static int mdd_create_data(const struct lu_context *ctxt,
+static int mdd_create_data(const struct lu_env *env,
struct md_object *pobj, struct md_object *cobj,
const struct md_create_spec *spec,
struct md_attr *ma, struct md_ucred *uc)
int rc;
ENTRY;
- rc = mdd_cd_sanity_check(ctxt, son, uc);
+ rc = mdd_cd_sanity_check(env, son, uc);
if (rc)
RETURN(rc);
if (spec->sp_cr_flags & MDS_OPEN_DELAY_CREATE ||
!(spec->sp_cr_flags & FMODE_WRITE))
RETURN(0);
- rc = mdd_lov_create(ctxt, mdd, mdd_pobj, son, &lmm, &lmm_size, spec,
+ rc = mdd_lov_create(env, mdd, mdd_pobj, son, &lmm, &lmm_size, spec,
attr);
if (rc)
RETURN(rc);
- mdd_txn_param_build(ctxt, &MDD_TXN_CREATE_DATA);
- handle = mdd_trans_start(ctxt, mdd);
+ mdd_txn_param_build(env, &MDD_TXN_CREATE_DATA);
+ handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
RETURN(rc = PTR_ERR(handle));
/* replay creates has objects already */
if (spec->u.sp_ea.no_lov_create) {
CDEBUG(D_INFO, "we already have lov ea\n");
- rc = mdd_lov_set_md(ctxt, mdd_pobj, son,
+ rc = mdd_lov_set_md(env, mdd_pobj, son,
(struct lov_mds_md *)spec->u.sp_ea.eadata,
spec->u.sp_ea.eadatalen, handle, 0);
} else
- rc = mdd_lov_set_md(ctxt, mdd_pobj, son, lmm,
+ rc = mdd_lov_set_md(env, mdd_pobj, son, lmm,
lmm_size, handle, 0);
if (rc == 0)
- rc = mdd_attr_get_internal_locked(ctxt, son, ma);
+ rc = mdd_attr_get_internal_locked(env, son, ma);
/* finish mdd_lov_create() stuff */
- mdd_lov_create_finish(ctxt, mdd, rc);
- mdd_trans_stop(ctxt, mdd, rc, handle);
+ mdd_lov_create_finish(env, mdd, rc);
+ mdd_trans_stop(env, mdd, rc, handle);
if (lmm)
OBD_FREE(lmm, lmm_size);
RETURN(rc);
}
-static int mdd_create_sanity_check(const struct lu_context *ctxt,
+static int mdd_create_sanity_check(const struct lu_env *env,
struct md_object *pobj,
const char *name, struct md_attr *ma,
struct md_ucred *uc)
{
- struct mdd_thread_info *info = mdd_ctx_info(ctxt);
+ struct mdd_thread_info *info = mdd_env_info(env);
struct lu_attr *la = &info->mti_la;
struct lu_fid *fid = &info->mti_fid;
struct mdd_object *obj = md2mdd_obj(pobj);
if (mdd_is_dead_obj(obj))
RETURN(-ENOENT);
- rc = __mdd_lookup_locked(ctxt, pobj, name, fid,
+ rc = __mdd_lookup_locked(env, pobj, name, fid,
MAY_WRITE | MAY_EXEC, uc);
if (rc != -ENOENT)
RETURN(rc ? : -EEXIST);
/* sgid check */
- mdd_read_lock(ctxt, obj);
- rc = __mdd_la_get(ctxt, obj, la);
- mdd_read_unlock(ctxt, obj);
+ mdd_read_lock(env, obj);
+ rc = __mdd_la_get(env, obj, la);
+ mdd_read_unlock(env, obj);
if (rc)
RETURN(rc);
/*
* Create object and insert it into namespace.
*/
-static int mdd_create(const struct lu_context *ctxt,
+static int mdd_create(const struct lu_env *env,
struct md_object *pobj, const char *name,
struct md_object *child,
const struct md_create_spec *spec,
struct mdd_device *mdd = mdo2mdd(pobj);
struct mdd_object *mdd_pobj = md2mdd_obj(pobj);
struct mdd_object *son = md2mdd_obj(child);
- struct lu_attr *la_copy = &mdd_ctx_info(ctxt)->mti_la_for_fix;
+ struct lu_attr *la_copy = &mdd_env_info(env)->mti_la_for_fix;
struct lu_attr *attr = &ma->ma_attr;
struct lov_mds_md *lmm = NULL;
struct thandle *handle;
ENTRY;
/* sanity checks before big job */
- rc = mdd_create_sanity_check(ctxt, pobj, name, ma, uc);
+ rc = mdd_create_sanity_check(env, pobj, name, ma, uc);
if (rc)
RETURN(rc);
/* no RPC inside the transaction, so OST objects should be created at
* first */
if (S_ISREG(attr->la_mode)) {
- rc = mdd_lov_create(ctxt, mdd, mdd_pobj, son, &lmm, &lmm_size,
+ rc = mdd_lov_create(env, mdd, mdd_pobj, son, &lmm, &lmm_size,
spec, attr);
if (rc)
RETURN(rc);
}
- mdd_txn_param_build(ctxt, &MDD_TXN_MKDIR);
- handle = mdd_trans_start(ctxt, mdd);
+ mdd_txn_param_build(env, &MDD_TXN_MKDIR);
+ handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
RETURN(PTR_ERR(handle));
- mdd_write_lock(ctxt, mdd_pobj);
+ mdd_write_lock(env, mdd_pobj);
/*
* XXX check that link can be added to the parent in mkdir case.
* Maybe we should do the same. For now: creation-first.
*/
- mdd_write_lock(ctxt, son);
- rc = __mdd_object_create(ctxt, son, ma, handle);
+ mdd_write_lock(env, son);
+ rc = __mdd_object_create(env, son, ma, handle);
if (rc) {
- mdd_write_unlock(ctxt, son);
+ mdd_write_unlock(env, son);
GOTO(cleanup, rc);
}
created = 1;
- rc = __mdd_object_initialize(ctxt, mdo2fid(mdd_pobj),
+ rc = __mdd_object_initialize(env, mdo2fid(mdd_pobj),
son, ma, handle);
- mdd_write_unlock(ctxt, son);
+ mdd_write_unlock(env, son);
if (rc)
/*
* Object has no links, so it will be destroyed when last
*/
GOTO(cleanup, rc);
- rc = __mdd_index_insert(ctxt, mdd_pobj, mdo2fid(son),
+ rc = __mdd_index_insert(env, mdd_pobj, mdo2fid(son),
name, S_ISDIR(attr->la_mode), handle);
if (rc)
/* replay creates has objects already */
if (spec->u.sp_ea.no_lov_create) {
CDEBUG(D_INFO, "we already have lov ea\n");
- rc = mdd_lov_set_md(ctxt, mdd_pobj, son,
+ rc = mdd_lov_set_md(env, mdd_pobj, son,
(struct lov_mds_md *)spec->u.sp_ea.eadata,
spec->u.sp_ea.eadatalen, handle, 0);
} else
- rc = mdd_lov_set_md(ctxt, mdd_pobj, son, lmm,
+ rc = mdd_lov_set_md(env, mdd_pobj, son, lmm,
lmm_size, handle, 0);
if (rc) {
CERROR("error on stripe info copy %d \n", rc);
int sym_len = strlen(target_name);
loff_t pos = 0;
- rc = dt->do_body_ops->dbo_write(ctxt, dt, target_name,
+ rc = dt->do_body_ops->dbo_write(env, dt, target_name,
sym_len, &pos, handle);
if (rc == sym_len)
rc = 0;
*la_copy = ma->ma_attr;
la_copy->la_valid = LA_CTIME | LA_MTIME;
- rc = mdd_attr_set_internal(ctxt, mdd_pobj, la_copy, handle);
+ rc = mdd_attr_set_internal(env, mdd_pobj, la_copy, handle);
if (rc)
GOTO(cleanup, rc);
/* return attr back */
- rc = mdd_attr_get_internal_locked(ctxt, son, ma);
+ rc = mdd_attr_get_internal_locked(env, son, ma);
cleanup:
if (rc && created) {
int rc2 = 0;
if (inserted) {
- rc2 = __mdd_index_delete(ctxt, mdd_pobj, name, handle);
+ rc2 = __mdd_index_delete(env, mdd_pobj, name, handle);
if (rc2)
CERROR("error can not cleanup destroy %d\n",
rc2);
}
if (rc2 == 0)
- __mdd_ref_del(ctxt, son, handle);
+ __mdd_ref_del(env, son, handle);
}
/* finish mdd_lov_create() stuff */
- mdd_lov_create_finish(ctxt, mdd, rc);
+ mdd_lov_create_finish(env, mdd, rc);
if (lmm)
OBD_FREE(lmm, lmm_size);
- mdd_write_unlock(ctxt, mdd_pobj);
- mdd_trans_stop(ctxt, mdd, rc, handle);
+ mdd_write_unlock(env, mdd_pobj);
+ mdd_trans_stop(env, mdd, rc, handle);
RETURN(rc);
}
/* partial operation */
-static int mdd_oc_sanity_check(const struct lu_context *ctxt,
+static int mdd_oc_sanity_check(const struct lu_env *env,
struct mdd_object *obj,
struct md_attr *ma,
struct md_ucred *uc)
RETURN(rc);
}
-static int mdd_object_create(const struct lu_context *ctxt,
+static int mdd_object_create(const struct lu_env *env,
struct md_object *obj,
const struct md_create_spec *spec,
struct md_attr *ma,
int rc;
ENTRY;
- rc = mdd_oc_sanity_check(ctxt, mdd_obj, ma, uc);
+ rc = mdd_oc_sanity_check(env, mdd_obj, ma, uc);
if (rc)
RETURN(rc);
- mdd_txn_param_build(ctxt, &MDD_TXN_OBJECT_CREATE);
- handle = mdd_trans_start(ctxt, mdd);
+ mdd_txn_param_build(env, &MDD_TXN_OBJECT_CREATE);
+ handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
RETURN(PTR_ERR(handle));
- mdd_write_lock(ctxt, mdd_obj);
- rc = __mdd_object_create(ctxt, mdd_obj, ma, handle);
+ mdd_write_lock(env, mdd_obj);
+ rc = __mdd_object_create(env, mdd_obj, ma, handle);
if (rc == 0 && spec->sp_cr_flags & MDS_CREATE_SLAVE_OBJ) {
/* if creating the slave object, set slave EA here */
- rc = __mdd_xattr_set(ctxt, mdd_obj, spec->u.sp_ea.eadata,
+ rc = __mdd_xattr_set(env, mdd_obj, spec->u.sp_ea.eadata,
spec->u.sp_ea.eadatalen, MDS_LMV_MD_NAME,
0, handle);
pfid = spec->u.sp_ea.fid;
}
if (rc == 0)
- rc = __mdd_object_initialize(ctxt, pfid, mdd_obj, ma, handle);
- mdd_write_unlock(ctxt, mdd_obj);
+ rc = __mdd_object_initialize(env, pfid, mdd_obj, ma, handle);
+ mdd_write_unlock(env, mdd_obj);
if (rc == 0)
- rc = mdd_attr_get_internal_locked(ctxt, mdd_obj, ma);
+ rc = mdd_attr_get_internal_locked(env, mdd_obj, ma);
- mdd_trans_stop(ctxt, mdd, rc, handle);
+ mdd_trans_stop(env, mdd, rc, handle);
RETURN(rc);
}
* Partial operation. Be aware, this is called with write lock taken, so we use
* locksless version of __mdd_lookup() here.
*/
-static int mdd_ni_sanity_check(const struct lu_context *ctxt,
+static int mdd_ni_sanity_check(const struct lu_env *env,
struct md_object *pobj,
const char *name,
const struct lu_fid *fid,
if (mdd_is_dead_obj(obj))
RETURN(-ENOENT);
- rc = __mdd_lookup(ctxt, pobj, name, fid, MAY_WRITE | MAY_EXEC, uc);
+ rc = __mdd_lookup(env, pobj, name, fid, MAY_WRITE | MAY_EXEC, uc);
if (rc != -ENOENT)
RETURN(rc ? : -EEXIST);
else
RETURN(0);
}
-static int mdd_name_insert(const struct lu_context *ctxt,
+static int mdd_name_insert(const struct lu_env *env,
struct md_object *pobj,
const char *name, const struct lu_fid *fid,
int isdir, struct md_ucred *uc)
int rc;
ENTRY;
- mdd_txn_param_build(ctxt, &MDD_TXN_INDEX_INSERT);
- handle = mdd_trans_start(ctxt, mdo2mdd(pobj));
+ mdd_txn_param_build(env, &MDD_TXN_INDEX_INSERT);
+ handle = mdd_trans_start(env, mdo2mdd(pobj));
if (IS_ERR(handle))
RETURN(PTR_ERR(handle));
- mdd_write_lock(ctxt, mdd_obj);
- rc = mdd_ni_sanity_check(ctxt, pobj, name, fid, uc);
+ mdd_write_lock(env, mdd_obj);
+ rc = mdd_ni_sanity_check(env, pobj, name, fid, uc);
if (rc)
GOTO(out_unlock, rc);
- rc = __mdd_index_insert(ctxt, mdd_obj, fid, name, isdir, handle);
+ rc = __mdd_index_insert(env, mdd_obj, fid, name, isdir, handle);
out_unlock:
- mdd_write_unlock(ctxt, mdd_obj);
+ mdd_write_unlock(env, mdd_obj);
- mdd_trans_stop(ctxt, mdo2mdd(pobj), rc, handle);
+ mdd_trans_stop(env, mdo2mdd(pobj), rc, handle);
RETURN(rc);
}
* Be aware, this is called with write lock taken, so we use locksless version
* of __mdd_lookup() here.
*/
-static int mdd_nr_sanity_check(const struct lu_context *ctxt,
+static int mdd_nr_sanity_check(const struct lu_env *env,
struct md_object *pobj,
const char *name,
struct md_ucred *uc)
{
- struct mdd_thread_info *info = mdd_ctx_info(ctxt);
+ struct mdd_thread_info *info = mdd_env_info(env);
struct lu_fid *fid = &info->mti_fid;
struct mdd_object *obj = md2mdd_obj(pobj);
int rc;
if (mdd_is_dead_obj(obj))
RETURN(-ENOENT);
- rc = __mdd_lookup(ctxt, pobj, name, fid, MAY_WRITE | MAY_EXEC, uc);
+ rc = __mdd_lookup(env, pobj, name, fid, MAY_WRITE | MAY_EXEC, uc);
RETURN(rc);
}
-static int mdd_name_remove(const struct lu_context *ctxt,
+static int mdd_name_remove(const struct lu_env *env,
struct md_object *pobj,
const char *name,
struct md_ucred *uc)
int rc;
ENTRY;
- mdd_txn_param_build(ctxt, &MDD_TXN_INDEX_DELETE);
- handle = mdd_trans_start(ctxt, mdd);
+ mdd_txn_param_build(env, &MDD_TXN_INDEX_DELETE);
+ handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
RETURN(PTR_ERR(handle));
- mdd_write_lock(ctxt, mdd_obj);
- rc = mdd_nr_sanity_check(ctxt, pobj, name, uc);
+ mdd_write_lock(env, mdd_obj);
+ rc = mdd_nr_sanity_check(env, pobj, name, uc);
if (rc)
GOTO(out_unlock, rc);
- rc = __mdd_index_delete(ctxt, mdd_obj, name, handle);
+ rc = __mdd_index_delete(env, mdd_obj, name, handle);
out_unlock:
- mdd_write_unlock(ctxt, mdd_obj);
+ mdd_write_unlock(env, mdd_obj);
- mdd_trans_stop(ctxt, mdd, rc, handle);
+ mdd_trans_stop(env, mdd, rc, handle);
RETURN(rc);
}
-static int mdd_rt_sanity_check(const struct lu_context *ctxt,
+static int mdd_rt_sanity_check(const struct lu_env *env,
struct mdd_object *tgt_pobj,
struct mdd_object *tobj,
const struct lu_fid *sfid,
src_is_dir = S_ISDIR(ma->ma_attr.la_mode);
if (tobj) {
- rc = mdd_may_delete(ctxt, tgt_pobj, tobj, src_is_dir, 1, uc);
+ rc = mdd_may_delete(env, tgt_pobj, tobj, src_is_dir, 1, uc);
if (!rc && S_ISDIR(mdd_object_type(tobj)) &&
- mdd_dir_is_empty(ctxt, tobj))
+ mdd_dir_is_empty(env, tobj))
RETURN(-ENOTEMPTY);
} else {
- rc = mdd_may_create(ctxt, tgt_pobj, NULL, 1, uc);
+ rc = mdd_may_create(env, tgt_pobj, NULL, 1, uc);
}
/* source should not be ancestor of target dir */
- if (!rc &&& src_is_dir && mdd_is_parent(ctxt, mdd, tgt_pobj, sfid, NULL))
+ if (!rc &&& src_is_dir && mdd_is_parent(env, mdd, tgt_pobj, sfid, NULL))
RETURN(-EINVAL);
RETURN(rc);
}
-static int mdd_rename_tgt(const struct lu_context *ctxt,
+static int mdd_rename_tgt(const struct lu_env *env,
struct md_object *pobj, struct md_object *tobj,
const struct lu_fid *lf, const char *name,
struct md_attr *ma, struct md_ucred *uc)
int rc;
ENTRY;
- mdd_txn_param_build(ctxt, &MDD_TXN_RENAME);
- handle = mdd_trans_start(ctxt, mdd);
+ mdd_txn_param_build(env, &MDD_TXN_RENAME);
+ handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
RETURN(PTR_ERR(handle));
if (tobj) {
mdd_tobj = md2mdd_obj(tobj);
- mdd_lock2(ctxt, mdd_tpobj, mdd_tobj);
+ mdd_lock2(env, mdd_tpobj, mdd_tobj);
} else {
- mdd_write_lock(ctxt, mdd_tpobj);
+ mdd_write_lock(env, mdd_tpobj);
}
/*TODO rename sanity checking*/
- rc = mdd_rt_sanity_check(ctxt, mdd_tpobj, mdd_tobj, lf, name, ma, uc);
+ rc = mdd_rt_sanity_check(env, mdd_tpobj, mdd_tobj, lf, name, ma, uc);
if (rc)
GOTO(cleanup, rc);
if (tobj) {
- rc = __mdd_index_delete(ctxt, mdd_tpobj, name, handle);
+ rc = __mdd_index_delete(env, mdd_tpobj, name, handle);
if (rc)
GOTO(cleanup, rc);
}
- rc = __mdd_index_insert_only(ctxt, mdd_tpobj, lf, name, handle);
+ rc = __mdd_index_insert_only(env, mdd_tpobj, lf, name, handle);
if (rc)
GOTO(cleanup, rc);
if (tobj && lu_object_exists(&tobj->mo_lu))
- __mdd_ref_del(ctxt, mdd_tobj, handle);
+ __mdd_ref_del(env, mdd_tobj, handle);
cleanup:
if (tobj)
- mdd_unlock2(ctxt, mdd_tpobj, mdd_tobj);
+ mdd_unlock2(env, mdd_tpobj, mdd_tobj);
else
- mdd_write_unlock(ctxt, mdd_tpobj);
- mdd_trans_stop(ctxt, mdd, rc, handle);
+ mdd_write_unlock(env, mdd_tpobj);
+ mdd_trans_stop(env, mdd, rc, handle);
RETURN(rc);
}
/*
* No permission check is needed.
*/
-static int mdd_root_get(const struct lu_context *ctx,
+static int mdd_root_get(const struct lu_env *env,
struct md_device *m, struct lu_fid *f,
struct md_ucred *uc)
{
/*
* No permission check is needed.
*/
-static int mdd_statfs(const struct lu_context *ctx, struct md_device *m,
+static int mdd_statfs(const struct lu_env *env, struct md_device *m,
struct kstatfs *sfs, struct md_ucred *uc)
{
struct mdd_device *mdd = lu2mdd_dev(&m->md_lu_dev);
ENTRY;
- rc = mdd_child_ops(mdd)->dt_statfs(ctx, mdd->mdd_child, sfs);
+ rc = mdd_child_ops(mdd)->dt_statfs(env, mdd->mdd_child, sfs);
RETURN(rc);
}
/*
* No permission check is needed.
*/
-static int mdd_maxsize_get(const struct lu_context *ctx, struct md_device *m,
+static int mdd_maxsize_get(const struct lu_env *env, struct md_device *m,
int *md_size, int *cookie_size, struct md_ucred *uc)
{
struct mdd_device *mdd = lu2mdd_dev(&m->md_lu_dev);
ENTRY;
- *md_size = mdd_lov_mdsize(ctx, mdd);
- *cookie_size = mdd_lov_cookiesize(ctx, mdd);
+ *md_size = mdd_lov_mdsize(env, mdd);
+ *cookie_size = mdd_lov_cookiesize(env, mdd);
RETURN(0);
}
RETURN(0);
}
-static int mdd_update_capa_key(const struct lu_context *ctx,
+static int mdd_update_capa_key(const struct lu_env *env,
struct md_device *m,
struct lustre_capa_key *key)
{
RETURN(rc);
}
-static void __mdd_ref_add(const struct lu_context *ctxt, struct mdd_object *obj,
+static void __mdd_ref_add(const struct lu_env *env, struct mdd_object *obj,
struct thandle *handle)
{
struct dt_object *next;
LASSERT(lu_object_exists(mdd2lu_obj(obj)));
next = mdd_object_child(obj);
- next->do_ops->do_ref_add(ctxt, next, handle);
+ next->do_ops->do_ref_add(env, next, handle);
}
/*
* XXX: if permission check is needed here?
*/
-static int mdd_ref_add(const struct lu_context *ctxt,
+static int mdd_ref_add(const struct lu_env *env,
struct md_object *obj, struct md_ucred *uc)
{
struct mdd_object *mdd_obj = md2mdd_obj(obj);
struct thandle *handle;
ENTRY;
- mdd_txn_param_build(ctxt, &MDD_TXN_XATTR_SET);
- handle = mdd_trans_start(ctxt, mdd);
+ mdd_txn_param_build(env, &MDD_TXN_XATTR_SET);
+ handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
RETURN(-ENOMEM);
- mdd_write_lock(ctxt, mdd_obj);
- __mdd_ref_add(ctxt, mdd_obj, handle);
- mdd_write_unlock(ctxt, mdd_obj);
+ mdd_write_lock(env, mdd_obj);
+ __mdd_ref_add(env, mdd_obj, handle);
+ mdd_write_unlock(env, mdd_obj);
- mdd_trans_stop(ctxt, mdd, 0, handle);
+ mdd_trans_stop(env, mdd, 0, handle);
RETURN(0);
}
static void
-__mdd_ref_del(const struct lu_context *ctxt, struct mdd_object *obj,
+__mdd_ref_del(const struct lu_env *env, struct mdd_object *obj,
struct thandle *handle)
{
struct dt_object *next = mdd_object_child(obj);
LASSERT(lu_object_exists(mdd2lu_obj(obj)));
- next->do_ops->do_ref_del(ctxt, next, handle);
+ next->do_ops->do_ref_del(env, next, handle);
}
/* do NOT or the MAY_*'s, you'll get the weakest */
return res;
}
-static int mdd_open_sanity_check(const struct lu_context *ctxt,
+static int mdd_open_sanity_check(const struct lu_env *env,
struct mdd_object *obj, int flag,
struct md_ucred *uc)
{
- struct lu_attr *tmp_la = &mdd_ctx_info(ctxt)->mti_la;
+ struct lu_attr *tmp_la = &mdd_env_info(env)->mti_la;
int mode = accmode(obj, flag);
int rc;
ENTRY;
if (mdd_is_dead_obj(obj))
RETURN(-ENOENT);
- rc = __mdd_la_get(ctxt, obj, tmp_la);
+ rc = __mdd_la_get(env, obj, tmp_la);
if (rc)
RETURN(rc);
RETURN(-EISDIR);
if (!(flag & MDS_OPEN_CREATED)) {
- rc = __mdd_permission_internal(ctxt, obj, mode, 0, uc);
+ rc = __mdd_permission_internal(env, obj, mode, 0, uc);
if (rc)
RETURN(rc);
}
RETURN(0);
}
-static int mdd_open(const struct lu_context *ctxt, struct md_object *obj,
+static int mdd_open(const struct lu_env *env, struct md_object *obj,
int flags, struct md_ucred *uc)
{
struct mdd_object *mdd_obj = md2mdd_obj(obj);
int rc = 0;
- mdd_write_lock(ctxt, mdd_obj);
+ mdd_write_lock(env, mdd_obj);
- rc = mdd_open_sanity_check(ctxt, mdd_obj, flags, uc);
+ rc = mdd_open_sanity_check(env, mdd_obj, flags, uc);
if (rc == 0)
mdd_obj->mod_count ++;
- mdd_write_unlock(ctxt, mdd_obj);
+ mdd_write_unlock(env, mdd_obj);
return rc;
}
/*
* No permission check is needed.
*/
-static int mdd_close(const struct lu_context *ctxt, struct md_object *obj,
+static int mdd_close(const struct lu_env *env, struct md_object *obj,
struct md_attr *ma, struct md_ucred *uc)
{
int rc;
struct mdd_object *mdd_obj = md2mdd_obj(obj);
ENTRY;
- mdd_write_lock(ctxt, mdd_obj);
+ mdd_write_lock(env, mdd_obj);
/* release open count */
mdd_obj->mod_count --;
- rc = __mdd_iattr_get(ctxt, mdd_obj, ma);
+ rc = __mdd_iattr_get(env, mdd_obj, ma);
if (rc == 0 && mdd_obj->mod_count == 0) {
if (ma->ma_attr.la_nlink == 0)
- rc = __mdd_object_kill(ctxt, mdd_obj, ma);
+ rc = __mdd_object_kill(env, mdd_obj, ma);
}
- mdd_write_unlock(ctxt, mdd_obj);
+ mdd_write_unlock(env, mdd_obj);
RETURN(rc);
}
-static int mdd_readpage_sanity_check(const struct lu_context *ctxt,
+static int mdd_readpage_sanity_check(const struct lu_env *env,
struct mdd_object *obj,
struct md_ucred *uc)
{
ENTRY;
if (S_ISDIR(mdd_object_type(obj)) &&
- dt_try_as_dir(ctxt, next))
- rc = mdd_permission_internal(ctxt, obj, MAY_READ, uc);
+ dt_try_as_dir(env, next))
+ rc = mdd_permission_internal(env, obj, MAY_READ, uc);
else
rc = -ENOTDIR;
RETURN(rc);
}
-static int mdd_readpage(const struct lu_context *ctxt, struct md_object *obj,
+static int mdd_readpage(const struct lu_env *env, struct md_object *obj,
const struct lu_rdpg *rdpg, struct md_ucred *uc)
{
struct dt_object *next;
LASSERT(lu_object_exists(mdd2lu_obj(mdd_obj)));
next = mdd_object_child(mdd_obj);
- mdd_read_lock(ctxt, mdd_obj);
- rc = mdd_readpage_sanity_check(ctxt, mdd_obj, uc);
+ mdd_read_lock(env, mdd_obj);
+ rc = mdd_readpage_sanity_check(env, mdd_obj, uc);
if (rc)
GOTO(out_unlock, rc);
- rc = next->do_ops->do_readpage(ctxt, next, rdpg);
+ rc = next->do_ops->do_readpage(env, next, rdpg);
out_unlock:
- mdd_read_unlock(ctxt, mdd_obj);
+ mdd_read_unlock(env, mdd_obj);
RETURN(rc);
}
}
#endif
-static int mdd_check_acl(const struct lu_context *ctxt, struct mdd_object *obj,
+static int mdd_check_acl(const struct lu_env *env, struct mdd_object *obj,
struct lu_attr* la, int mask, struct md_ucred *uc)
{
#ifdef CONFIG_FS_POSIX_ACL
ENTRY;
next = mdd_object_child(obj);
- buf_len = next->do_ops->do_xattr_get(ctxt, next, NULL, 0, "");
+ buf_len = next->do_ops->do_xattr_get(env, next, NULL, 0, "");
if (buf_len <= 0)
RETURN(buf_len ? : -EACCES);
OBD_ALLOC(buf, buf_len);
if (buf == NULL)
RETURN(-ENOMEM);
-
- rc = next->do_ops->do_xattr_get(ctxt, next, buf, buf_len, "");
+
+ rc = next->do_ops->do_xattr_get(env, next, buf, buf_len, "");
if (rc <= 0)
GOTO(out, rc = rc ? : -EACCES);
entry = ((posix_acl_xattr_header *)buf)->a_entries;
entry_count = (rc - 4) / sizeof(posix_acl_xattr_entry);
-
+
rc = mdd_posix_acl_permission(uc, la, mask, entry, entry_count);
out:
#endif
}
-static int mdd_exec_permission_lite(const struct lu_context *ctxt,
+static int mdd_exec_permission_lite(const struct lu_env *env,
struct mdd_object *obj,
struct md_ucred *uc)
{
- struct lu_attr *la = &mdd_ctx_info(ctxt)->mti_la;
+ struct lu_attr *la = &mdd_env_info(env)->mti_la;
umode_t mode;
int rc;
ENTRY;
if (uc->mu_valid == UCRED_INVALID)
RETURN(-EACCES);
- rc = __mdd_la_get(ctxt, obj, la);
+ rc = __mdd_la_get(env, obj, la);
if (rc)
RETURN(rc);
RETURN(-EACCES);
}
-static int __mdd_permission_internal(const struct lu_context *ctxt,
+static int __mdd_permission_internal(const struct lu_env *env,
struct mdd_object *obj,
int mask, int getattr,
struct md_ucred *uc)
{
- struct lu_attr *la = &mdd_ctx_info(ctxt)->mti_la;
+ struct lu_attr *la = &mdd_env_info(env)->mti_la;
__u32 mode;
int rc;
RETURN(-EACCES);
if (getattr) {
- rc = __mdd_la_get(ctxt, obj, la);
+ rc = __mdd_la_get(env, obj, la);
if (rc)
RETURN(rc);
}
if (((mode >> 3) & mask & S_IRWXO) != mask)
goto check_groups;
- rc = mdd_check_acl(ctxt, obj, la, mask, uc);
+ rc = mdd_check_acl(env, obj, la, mask, uc);
if (rc == -EACCES)
goto check_capabilities;
else if ((rc != -EAGAIN) && (rc != -EOPNOTSUPP))
RETURN(-EACCES);
}
-static inline int mdd_permission_internal_locked(const struct lu_context *ctxt,
+static inline int mdd_permission_internal_locked(const struct lu_env *env,
struct mdd_object *obj,
int mask, struct md_ucred *uc)
{
int rc;
- mdd_read_lock(ctxt, obj);
- rc = mdd_permission_internal(ctxt, obj, mask, uc);
- mdd_read_unlock(ctxt, obj);
+ mdd_read_lock(env, obj);
+ rc = mdd_permission_internal(env, obj, mask, uc);
+ mdd_read_unlock(env, obj);
return rc;
}
-static int mdd_permission(const struct lu_context *ctxt, struct md_object *obj,
+static int mdd_permission(const struct lu_env *env, struct md_object *obj,
int mask, struct md_ucred *uc)
{
struct mdd_object *mdd_obj = md2mdd_obj(obj);
int rc;
ENTRY;
- rc = mdd_permission_internal_locked(ctxt, mdd_obj, mask, uc);
+ rc = mdd_permission_internal_locked(env, mdd_obj, mask, uc);
RETURN(rc);
}
-static int mdd_capa_get(const struct lu_context *ctxt, struct md_object *obj,
+static int mdd_capa_get(const struct lu_env *env, struct md_object *obj,
struct lustre_capa *capa)
{
struct mdd_object *mdd_obj = md2mdd_obj(obj);
.o_owner = THIS_MODULE
};
-static struct lu_device *mdd_device_alloc(const struct lu_context *ctx,
+static struct lu_device *mdd_device_alloc(const struct lu_env *env,
struct lu_device_type *t,
struct lustre_cfg *lcfg)
{
return l;
}
-static void mdd_device_free(const struct lu_context *ctx,
+static void mdd_device_free(const struct lu_env *env,
struct lu_device *lu)
{
struct mdd_device *m = lu2mdd_dev(lu);
struct obd_trans_info mti_oti;
};
-int mdd_init_obd(const struct lu_context *ctxt, struct mdd_device *mdd,
+int mdd_init_obd(const struct lu_env *env, struct mdd_device *mdd,
struct lustre_cfg *cfg);
-int mdd_fini_obd(const struct lu_context *, struct mdd_device *);
-int mdd_xattr_set_txn(const struct lu_context *ctxt, struct mdd_object *obj,
+int mdd_fini_obd(const struct lu_env *, struct mdd_device *);
+int mdd_xattr_set_txn(const struct lu_env *env, struct mdd_object *obj,
const void *buf, int buf_len, const char *name, int fl,
struct thandle *txn);
-int mdd_lov_set_md(const struct lu_context *ctxt, struct mdd_object *pobj,
+int mdd_lov_set_md(const struct lu_env *env, struct mdd_object *pobj,
struct mdd_object *child, struct lov_mds_md *lmm,
int lmm_size, struct thandle *handle, int set_stripe);
-int mdd_lov_create(const struct lu_context *ctxt, struct mdd_device *mdd,
+int mdd_lov_create(const struct lu_env *env, struct mdd_device *mdd,
struct mdd_object *parent, struct mdd_object *child,
struct lov_mds_md **lmm, int *lmm_size,
const struct md_create_spec *spec, struct lu_attr *la);
-void mdd_lov_create_finish(const struct lu_context *ctxt,
+void mdd_lov_create_finish(const struct lu_env *env,
struct mdd_device *mdd, int rc);
-int mdd_get_md(const struct lu_context *ctxt, struct mdd_object *obj,
+int mdd_get_md(const struct lu_env *env, struct mdd_object *obj,
void *md, int *md_size, const char *name);
-int mdd_get_md_locked(const struct lu_context *ctxt, struct mdd_object *obj,
+int mdd_get_md_locked(const struct lu_env *env, struct mdd_object *obj,
void *md, int *md_size, const char *name);
-int mdd_unlink_log(const struct lu_context *ctxt, struct mdd_device *mdd,
+int mdd_unlink_log(const struct lu_env *env, struct mdd_device *mdd,
struct mdd_object *mdd_cobj, struct md_attr *ma);
-int mdd_attr_set_internal(const struct lu_context *ctxt, struct mdd_object *o,
+int mdd_attr_set_internal(const struct lu_env *env, struct mdd_object *o,
const struct lu_attr *attr, struct thandle *handle);
-int mdd_get_cookie_size(const struct lu_context *ctxt, struct mdd_device *mdd,
+int mdd_get_cookie_size(const struct lu_env *env, struct mdd_device *mdd,
struct lov_mds_md *lmm);
-int mdd_lov_setattr_async(const struct lu_context *ctxt, struct mdd_object *obj,
+int mdd_lov_setattr_async(const struct lu_env *env, struct mdd_object *obj,
struct lov_mds_md *lmm, int lmm_size);
-struct mdd_thread_info *mdd_ctx_info(const struct lu_context *ctx);
+struct mdd_thread_info *mdd_env_info(const struct lu_env *env);
-void mdd_read_lock(const struct lu_context *ctxt, struct mdd_object *obj);
-void mdd_read_unlock(const struct lu_context *ctxt, struct mdd_object *obj);
-void mdd_write_lock(const struct lu_context *ctxt, struct mdd_object *obj);
-void mdd_write_unlock(const struct lu_context *ctxt, struct mdd_object *obj);
+void mdd_read_lock(const struct lu_env *env, struct mdd_object *obj);
+void mdd_read_unlock(const struct lu_env *env, struct mdd_object *obj);
+void mdd_write_lock(const struct lu_env *env, struct mdd_object *obj);
+void mdd_write_unlock(const struct lu_env *env, struct mdd_object *obj);
-int __mdd_orphan_cleanup(const struct lu_context *ctx, struct mdd_device *d);
-int __mdd_orphan_add(const struct lu_context *, struct mdd_object *,
+int __mdd_orphan_cleanup(const struct lu_env *env, struct mdd_device *d);
+int __mdd_orphan_add(const struct lu_env *, struct mdd_object *,
struct thandle *);
-int __mdd_orphan_del(const struct lu_context *, struct mdd_object *,
+int __mdd_orphan_del(const struct lu_env *, struct mdd_object *,
struct thandle *);
-int orph_index_init(const struct lu_context *ctx, struct mdd_device *mdd);
-void orph_index_fini(const struct lu_context *ctx, struct mdd_device *mdd);
-int __mdd_object_kill(const struct lu_context *, struct mdd_object *,
+int orph_index_init(const struct lu_env *env, struct mdd_device *mdd);
+void orph_index_fini(const struct lu_env *env, struct mdd_device *mdd);
+int __mdd_object_kill(const struct lu_env *, struct mdd_object *,
struct md_attr *);
-struct mdd_object *mdd_object_find(const struct lu_context *,
+struct mdd_object *mdd_object_find(const struct lu_env *,
struct mdd_device *,
const struct lu_fid *);
-static inline void mdd_object_put(const struct lu_context *ctxt,
+static inline void mdd_object_put(const struct lu_env *env,
struct mdd_object *o)
{
- lu_object_put(ctxt, &o->mod_obj.mo_lu);
+ lu_object_put(env, &o->mod_obj.mo_lu);
}
extern struct lu_device_operations mdd_lu_ops;
return lu_object_attr(&obj->mod_obj.mo_lu);
}
-static inline int mdd_lov_mdsize(const struct lu_context *ctxt,
+static inline int mdd_lov_mdsize(const struct lu_env *env,
struct mdd_device *mdd)
{
struct obd_device *obd = mdd2obd_dev(mdd);
return obd->u.mds.mds_max_mdsize;
}
-static inline int mdd_lov_cookiesize(const struct lu_context *ctxt,
+static inline int mdd_lov_cookiesize(const struct lu_env *env,
struct mdd_device *mdd)
{
struct obd_device *obd = mdd2obd_dev(mdd);
}
/* The obd is created for handling data stack for mdd */
-int mdd_init_obd(const struct lu_context *ctxt, struct mdd_device *mdd,
+int mdd_init_obd(const struct lu_env *env, struct mdd_device *mdd,
struct lustre_cfg *cfg)
{
struct lustre_cfg_bufs *bufs;
GOTO(class_detach, rc);
/*
* Add here for obd notify mechiasm,
- * when adding a new ost, the mds will notify this mdd
+ * when adding a new ost, the mds will notify this mdd
*/
obd->obd_upcall.onu_owner = mdd;
obd->obd_upcall.onu_upcall = mdd_lov_update;
RETURN(rc);
}
-int mdd_fini_obd(const struct lu_context *ctxt, struct mdd_device *mdd)
+int mdd_fini_obd(const struct lu_env *env, struct mdd_device *mdd)
{
struct lustre_cfg_bufs *bufs;
struct lustre_cfg *lcfg;
obd = mdd2obd_dev(mdd);
LASSERT(obd);
-
+
OBD_ALLOC_PTR(bufs);
if (!bufs)
RETURN(-ENOMEM);
RETURN(rc);
}
-int mdd_get_md(const struct lu_context *ctxt, struct mdd_object *obj,
+int mdd_get_md(const struct lu_env *env, struct mdd_object *obj,
void *md, int *md_size, const char *name)
{
struct dt_object *next;
ENTRY;
next = mdd_object_child(obj);
- rc = next->do_ops->do_xattr_get(ctxt, next, md, *md_size, name);
+ rc = next->do_ops->do_xattr_get(env, next, md, *md_size, name);
/*
* XXX: handling of -ENODATA, the right way is to have ->do_md_get()
* exported by dt layer.
RETURN (rc);
}
-int mdd_get_md_locked(const struct lu_context *ctxt, struct mdd_object *obj,
+int mdd_get_md_locked(const struct lu_env *env, struct mdd_object *obj,
void *md, int *md_size, const char *name)
{
int rc = 0;
- mdd_read_lock(ctxt, obj);
- rc = mdd_get_md(ctxt, obj, md, md_size, name);
- mdd_read_unlock(ctxt, obj);
+ mdd_read_lock(env, obj);
+ rc = mdd_get_md(env, obj, md, md_size, name);
+ mdd_read_unlock(env, obj);
return rc;
}
-static int mdd_lov_set_stripe_md(const struct lu_context *ctxt,
+static int mdd_lov_set_stripe_md(const struct lu_env *env,
struct mdd_object *obj, struct lov_mds_md *lmmp,
int lmm_size, struct thandle *handle)
{
RETURN(rc);
obd_free_memmd(lov_exp, &lsm);
- rc = mdd_xattr_set_txn(ctxt, obj, lmmp, lmm_size, MDS_LOV_MD_NAME, 0,
+ rc = mdd_xattr_set_txn(env, obj, lmmp, lmm_size, MDS_LOV_MD_NAME, 0,
handle);
CDEBUG(D_INFO, "set lov ea of "DFID" rc %d \n", PFID(mdo2fid(obj)), rc);
RETURN(rc);
}
-static int mdd_lov_set_dir_md(const struct lu_context *ctxt,
+static int mdd_lov_set_dir_md(const struct lu_env *env,
struct mdd_object *obj, struct lov_mds_md *lmmp,
int lmm_size, struct thandle *handle)
{
lum->lmm_stripe_offset == (typeof(lum->lmm_stripe_offset))(-1)) ||
/* lmm_stripe_size == -1 is deprecated in 1.4.6 */
lum->lmm_stripe_size == (typeof(lum->lmm_stripe_size))(-1)){
- rc = mdd_xattr_set_txn(ctxt, obj, NULL, 0, MDS_LOV_MD_NAME, 0,
+ rc = mdd_xattr_set_txn(env, obj, NULL, 0, MDS_LOV_MD_NAME, 0,
handle);
if (rc == -ENODATA)
rc = 0;
CDEBUG(D_INFO, "delete lov ea of "DFID" rc %d \n",
PFID(mdo2fid(obj)), rc);
} else {
- rc = mdd_lov_set_stripe_md(ctxt, obj, lmmp, lmm_size, handle);
+ rc = mdd_lov_set_stripe_md(env, obj, lmmp, lmm_size, handle);
}
RETURN(rc);
}
-int mdd_lov_set_md(const struct lu_context *ctxt, struct mdd_object *pobj,
+int mdd_lov_set_md(const struct lu_env *env, struct mdd_object *pobj,
struct mdd_object *child, struct lov_mds_md *lmmp,
int lmm_size, struct thandle *handle, int set_stripe)
{
mode = mdd_object_type(child);
if (S_ISREG(mode) && lmm_size > 0) {
if (set_stripe) {
- rc = mdd_lov_set_stripe_md(ctxt, child, lmmp, lmm_size,
+ rc = mdd_lov_set_stripe_md(env, child, lmmp, lmm_size,
handle);
} else {
- rc = mdd_xattr_set_txn(ctxt, child, lmmp, lmm_size,
+ rc = mdd_xattr_set_txn(env, child, lmmp, lmm_size,
MDS_LOV_MD_NAME, 0, handle);
}
} else if (S_ISDIR(mode)) {
if (lmmp == NULL && lmm_size == 0) {
- struct lov_mds_md *lmm = &mdd_ctx_info(ctxt)->mti_lmm;
+ struct lov_mds_md *lmm = &mdd_env_info(env)->mti_lmm;
int size = sizeof(lmm);
/* Get parent dir stripe and set */
if (pobj != NULL)
- rc = mdd_get_md(ctxt, pobj, &lmm, &size,
+ rc = mdd_get_md(env, pobj, &lmm, &size,
MDS_LOV_MD_NAME);
if (rc > 0) {
- rc = mdd_xattr_set_txn(ctxt, child, lmm, size,
+ rc = mdd_xattr_set_txn(env, child, lmm, size,
MDS_LOV_MD_NAME, 0, handle);
if (rc)
CERROR("error on copy stripe info: rc "
}
} else {
LASSERT(lmmp != NULL && lmm_size > 0);
- rc = mdd_lov_set_dir_md(ctxt, child, lmmp,
+ rc = mdd_lov_set_dir_md(env, child, lmmp,
lmm_size, handle);
}
}
return ((fid_seq(fid) - 1) * LUSTRE_SEQ_MAX_WIDTH + fid_oid(fid));
}
-static int mdd_lov_objid_alloc(const struct lu_context *ctxt,
+static int mdd_lov_objid_alloc(const struct lu_env *env,
struct mdd_device *mdd)
{
- struct mdd_thread_info *info = mdd_ctx_info(ctxt);
+ struct mdd_thread_info *info = mdd_env_info(env);
struct mds_obd *mds = &mdd->mdd_obd_dev->u.mds;
-
+
OBD_ALLOC(info->mti_oti.oti_objid,
mds->mds_lov_desc.ld_tgt_count * sizeof(obd_id));
return (info->mti_oti.oti_objid == NULL ? -ENOMEM : 0);
}
-static void mdd_lov_objid_update(const struct lu_context *ctxt,
+static void mdd_lov_objid_update(const struct lu_env *env,
struct mdd_device *mdd)
{
- struct mdd_thread_info *info = mdd_ctx_info(ctxt);
+ struct mdd_thread_info *info = mdd_env_info(env);
mds_lov_update_objids(mdd->mdd_obd_dev, info->mti_oti.oti_objid);
}
-static void mdd_lov_objid_from_lmm(const struct lu_context *ctx,
- struct mdd_device *mdd,
+static void mdd_lov_objid_from_lmm(const struct lu_env *env,
+ struct mdd_device *mdd,
struct lov_mds_md *lmm)
{
struct mds_obd *mds = &mdd->mdd_obd_dev->u.mds;
- struct mdd_thread_info *info = mdd_ctx_info(ctx);
+ struct mdd_thread_info *info = mdd_env_info(env);
mds_objids_from_lmm(info->mti_oti.oti_objid, lmm, &mds->mds_lov_desc);
}
-static void mdd_lov_objid_free(const struct lu_context *ctxt,
+static void mdd_lov_objid_free(const struct lu_env *env,
struct mdd_device *mdd)
{
- struct mdd_thread_info *info = mdd_ctx_info(ctxt);
+ struct mdd_thread_info *info = mdd_env_info(env);
struct mds_obd *mds = &mdd->mdd_obd_dev->u.mds;
OBD_FREE(info->mti_oti.oti_objid,
info->mti_oti.oti_objid = NULL;
}
-void mdd_lov_create_finish(const struct lu_context *ctxt,
+void mdd_lov_create_finish(const struct lu_env *env,
struct mdd_device *mdd, int rc)
{
- struct mdd_thread_info *info = mdd_ctx_info(ctxt);
+ struct mdd_thread_info *info = mdd_env_info(env);
if (info->mti_oti.oti_objid != NULL) {
if (rc == 0)
- mdd_lov_objid_update(ctxt, mdd);
- mdd_lov_objid_free(ctxt, mdd);
+ mdd_lov_objid_update(env, mdd);
+ mdd_lov_objid_free(env, mdd);
}
}
-int mdd_lov_create(const struct lu_context *ctxt, struct mdd_device *mdd,
+int mdd_lov_create(const struct lu_env *env, struct mdd_device *mdd,
struct mdd_object *parent, struct mdd_object *child,
struct lov_mds_md **lmm, int *lmm_size,
const struct md_create_spec *spec, struct lu_attr *la)
const void *eadata = spec->u.sp_ea.eadata;
__u32 create_flags = spec->sp_cr_flags;
int rc = 0;
- struct obd_trans_info *oti = &mdd_ctx_info(ctxt)->mti_oti;
+ struct obd_trans_info *oti = &mdd_env_info(env)->mti_oti;
ENTRY;
if (create_flags & MDS_OPEN_DELAY_CREATE ||
RETURN(0);
oti_init(oti, NULL);
- rc = mdd_lov_objid_alloc(ctxt, mdd);
+ rc = mdd_lov_objid_alloc(env, mdd);
if (rc != 0)
RETURN(rc);
/* replay case, should get lov from eadata */
if (spec->u.sp_ea.no_lov_create != 0) {
- mdd_lov_objid_from_lmm(ctxt, mdd, (struct lov_mds_md *)eadata);
+ mdd_lov_objid_from_lmm(env, mdd, (struct lov_mds_md *)eadata);
RETURN(0);
}
-
+
if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_MDS_ALLOC_OBDO))
GOTO(out_ids, rc = -ENOMEM);
oa->o_valid = OBD_MD_FLID | OBD_MD_FLTYPE | OBD_MD_FLFLAGS |
OBD_MD_FLMODE | OBD_MD_FLUID | OBD_MD_FLGID | OBD_MD_FLGROUP;
oa->o_size = 0;
-
+
if (!(create_flags & MDS_OPEN_HAS_OBJS)) {
if (create_flags & MDS_OPEN_HAS_EA) {
LASSERT(eadata != NULL);
/* get lov ea from parent and set to lov */
struct lov_mds_md *__lmm;
int __lmm_size, returned_lmm_size;
- __lmm_size = mdd_lov_mdsize(ctxt, mdd);
+ __lmm_size = mdd_lov_mdsize(env, mdd);
returned_lmm_size = __lmm_size;
OBD_ALLOC(__lmm, __lmm_size);
if (__lmm == NULL)
GOTO(out_oa, rc = -ENOMEM);
- rc = mdd_get_md_locked(ctxt, parent, __lmm,
+ rc = mdd_get_md_locked(env, parent, __lmm,
&returned_lmm_size, MDS_LOV_MD_NAME);
if (rc > 0)
rc = obd_iocontrol(OBD_IOC_LOV_SETSTRIPE,
* attr is in charged by OST.
*/
if (la->la_size && la->la_valid & LA_SIZE) {
- struct obd_info *oinfo = &mdd_ctx_info(ctxt)->mti_oi;
+ struct obd_info *oinfo = &mdd_env_info(env)->mti_oi;
memset(oinfo, 0, sizeof(*oinfo));
oa->o_size = la->la_size;
/* when setting attr to ost, FLBKSZ is not needed */
- oa->o_valid &= ~OBD_MD_FLBLKSZ;
+ oa->o_valid &= ~OBD_MD_FLBLKSZ;
obdo_from_la(oa, la, OBD_MD_FLTYPE | OBD_MD_FLATIME |
OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLSIZE);
/* blksize should be changed after create data object */
la->la_valid |= LA_BLKSIZE;
la->la_blksize = oa->o_blksize;
-
+
rc = obd_packmd(lov_exp, lmm, lsm);
if (rc < 0) {
CERROR("cannot pack lsm, err = %d\n", rc);
out_ids:
if (lsm)
obd_free_memmd(lov_exp, &lsm);
- if (rc != 0)
- mdd_lov_objid_free(ctxt, mdd);
+ if (rc != 0)
+ mdd_lov_objid_free(env, mdd);
RETURN(rc);
}
-int mdd_unlink_log(const struct lu_context *ctxt, struct mdd_device *mdd,
+int mdd_unlink_log(const struct lu_env *env, struct mdd_device *mdd,
struct mdd_object *mdd_cobj, struct md_attr *ma)
{
struct obd_device *obd = mdd2obd_dev(mdd);
return 0;
}
-int mdd_lov_setattr_async(const struct lu_context *ctxt, struct mdd_object *obj,
+int mdd_lov_setattr_async(const struct lu_env *env, struct mdd_object *obj,
struct lov_mds_md *lmm, int lmm_size)
{
struct mdd_device *mdd = mdo2mdd(&obj->mod_obj);
struct obd_device *obd = mdd2obd_dev(mdd);
- struct lu_attr *tmp_la = &mdd_ctx_info(ctxt)->mti_la;
+ struct lu_attr *tmp_la = &mdd_env_info(env)->mti_la;
struct dt_object *next = mdd_object_child(obj);
__u32 seq = lu_object_fid(mdd2lu_obj(obj))->f_seq;
__u32 oid = lu_object_fid(mdd2lu_obj(obj))->f_oid;
int rc = 0;
ENTRY;
- rc = next->do_ops->do_attr_get(ctxt, next, tmp_la);
+ rc = next->do_ops->do_attr_get(env, next, tmp_la);
if (rc)
RETURN(rc);
ORPH_OP_TRUNCATE
};
-static struct orph_key *orph_key_fill(const struct lu_context *ctx,
+static struct orph_key *orph_key_fill(const struct lu_env *env,
const struct lu_fid *lf, __u32 op)
{
- struct orph_key *key = &mdd_ctx_info(ctx)->mti_orph_key;
+ struct orph_key *key = &mdd_env_info(env)->mti_orph_key;
LASSERT(key);
key->ok_fid.f_seq = cpu_to_be64(fid_seq(lf));
key->ok_fid.f_oid = cpu_to_be32(fid_oid(lf));
return key;
}
-static int orph_index_insert(const struct lu_context *ctx,
+static int orph_index_insert(const struct lu_env *env,
struct mdd_object *obj, __u32 op,
loff_t *offset, struct thandle *th)
{
struct mdd_device *mdd = mdo2mdd(&obj->mod_obj);
struct dt_object *dor = mdd->mdd_orphans;
- struct orph_key *key = orph_key_fill(ctx, mdo2fid(obj), op);
+ struct orph_key *key = orph_key_fill(env, mdo2fid(obj), op);
int rc;
ENTRY;
- rc = dor->do_index_ops->dio_insert(ctx, dor, (struct dt_rec *)offset,
+ rc = dor->do_index_ops->dio_insert(env, dor, (struct dt_rec *)offset,
(struct dt_key *)key, th);
RETURN(rc);
}
-static int orph_index_delete(const struct lu_context *ctx,
+static int orph_index_delete(const struct lu_env *env,
struct mdd_object *obj, __u32 op,
struct thandle *th)
{
struct mdd_device *mdd = mdo2mdd(&obj->mod_obj);
struct dt_object *dor = mdd->mdd_orphans;
- struct orph_key *key = orph_key_fill(ctx, mdo2fid(obj), op);
+ struct orph_key *key = orph_key_fill(env, mdo2fid(obj), op);
int rc;
ENTRY;
LASSERT(dor);
- rc = dor->do_index_ops->dio_delete(ctx, dor,
+ rc = dor->do_index_ops->dio_delete(env, dor,
(struct dt_key *)key, th);
RETURN(rc);
}
-static inline struct orph_key *orph_key_empty(const struct lu_context *ctx,
+static inline struct orph_key *orph_key_empty(const struct lu_env *env,
__u32 op)
{
- struct orph_key *key = &mdd_ctx_info(ctx)->mti_orph_key;
+ struct orph_key *key = &mdd_env_info(env)->mti_orph_key;
LASSERT(key);
key->ok_fid.f_seq = 0;
key->ok_fid.f_oid = 0;
return key;
}
-static void orph_key_test_and_del(const struct lu_context *ctx,
+static void orph_key_test_and_del(const struct lu_env *env,
struct mdd_device *mdd,
const struct orph_key *key)
{
struct mdd_object *mdo;
- mdo = mdd_object_find(ctx, mdd, &key->ok_fid);
+ mdo = mdd_object_find(env, mdd, &key->ok_fid);
if (IS_ERR(mdo))
CERROR("Invalid orphan!\n");
else {
- mdd_write_lock(ctx, mdo);
+ mdd_write_lock(env, mdo);
if (mdo->mod_count == 0) {
/* non-opened orphan, let's delete it */
- struct md_attr *ma = &mdd_ctx_info(ctx)->mti_ma;
+ struct md_attr *ma = &mdd_env_info(env)->mti_ma;
CWARN("Found orphan!\n");
- __mdd_object_kill(ctx, mdo, ma);
+ __mdd_object_kill(env, mdo, ma);
/* TODO: now handle OST objects */
- //mdd_ost_objects_destroy(ctx, ma);
+ //mdd_ost_objects_destroy(env, ma);
/* TODO: destroy index entry */
}
- mdd_write_unlock(ctx, mdo);
- mdd_object_put(ctx, mdo);
- }
+ mdd_write_unlock(env, mdo);
+ mdd_object_put(env, mdo);
+ }
}
-static int orph_index_iterate(const struct lu_context *ctx,
+static int orph_index_iterate(const struct lu_env *env,
struct mdd_device *mdd)
{
struct dt_object *dt_obj = mdd->mdd_orphans;
struct dt_it *it;
struct dt_it_ops *iops;
- struct orph_key *key = orph_key_empty(ctx, 0);
+ struct orph_key *key = orph_key_empty(env, 0);
int result;
ENTRY;
iops = &dt_obj->do_index_ops->dio_it;
- it = iops->init(ctx, dt_obj, 1);
+ it = iops->init(env, dt_obj, 1);
if (it != NULL) {
- result = iops->get(ctx, it, (const void *)key);
+ result = iops->get(env, it, (const void *)key);
if (result > 0) {
int i;
/* main cycle */
for (result = 0, i = 0; result == +1; ++i) {
- key = (void *)iops->key(ctx, it);
- orph_key_test_and_del(ctx, mdd, key);
- result = iops->next(ctx, it);
+ key = (void *)iops->key(env, it);
+ orph_key_test_and_del(env, mdd, key);
+ result = iops->next(env, it);
}
- } else if (result == 0)
+ } else if (result == 0)
/* Index contains no zero key? */
result = -EIO;
-
- iops->put(ctx, it);
- iops->fini(ctx, it);
+
+ iops->put(env, it);
+ iops->fini(env, it);
} else
result = -ENOMEM;
RETURN(result);
}
-int orph_index_init(const struct lu_context *ctx, struct mdd_device *mdd)
+int orph_index_init(const struct lu_env *env, struct mdd_device *mdd)
{
struct lu_fid fid;
struct dt_object *d;
int rc;
ENTRY;
- d = dt_store_open(ctx, mdd->mdd_child, orph_index_name, &fid);
+ d = dt_store_open(env, mdd->mdd_child, orph_index_name, &fid);
if (!IS_ERR(d)) {
mdd->mdd_orphans = d;
- rc = d->do_ops->do_index_try(ctx, d, &orph_index_features);
+ rc = d->do_ops->do_index_try(env, d, &orph_index_features);
if (rc == 0)
LASSERT(d->do_index_ops != NULL);
else
RETURN(rc);
}
-void orph_index_fini(const struct lu_context *ctx, struct mdd_device *mdd)
+void orph_index_fini(const struct lu_env *env, struct mdd_device *mdd)
{
ENTRY;
if (mdd->mdd_orphans != NULL) {
- lu_object_put(ctx, &mdd->mdd_orphans->do_lu);
+ lu_object_put(env, &mdd->mdd_orphans->do_lu);
mdd->mdd_orphans = NULL;
}
EXIT;
}
-int __mdd_orphan_cleanup(const struct lu_context *ctx, struct mdd_device *d)
+int __mdd_orphan_cleanup(const struct lu_env *env, struct mdd_device *d)
{
- return orph_index_iterate(ctx, d);
+ return orph_index_iterate(env, d);
}
-int __mdd_orphan_add(const struct lu_context *ctx,
+int __mdd_orphan_add(const struct lu_env *env,
struct mdd_object *obj, struct thandle *th)
{
loff_t offset = 0;
- return orph_index_insert(ctx, obj, ORPH_OP_UNLINK, &offset, th);
+ return orph_index_insert(env, obj, ORPH_OP_UNLINK, &offset, th);
}
-int __mdd_orphan_del(const struct lu_context *ctx,
+int __mdd_orphan_del(const struct lu_env *env,
struct mdd_object *obj, struct thandle *th)
{
- return orph_index_delete(ctx, obj, ORPH_OP_UNLINK, th);
+ return orph_index_delete(env, obj, ORPH_OP_UNLINK, th);
}
/*
if (async)
oti.oti_flags |= OBD_MODE_ASYNC;
-
+
rc = obd_destroy(mds->mds_dt_exp, oa, lsm, &oti);
obdo_free(oa);
out_free_memmd:
struct dentry *de = mds_fid2dentry(mds, fid, mnt), *retval = de;
struct ldlm_res_id res_id = { .name = {0} };
int flags = LDLM_FL_ATOMIC_CB, rc;
- ldlm_policy_data_t policy = { .l_inodebits = { lockpart} };
+ ldlm_policy_data_t policy = { .l_inodebits = { lockpart} };
ENTRY;
if (IS_ERR(de))
res_id.name[0] = de->d_inode->i_ino;
res_id.name[1] = de->d_inode->i_generation;
- rc = ldlm_cli_enqueue_local(obd->obd_namespace, res_id,
- LDLM_IBITS, &policy, lock_mode, &flags,
+ rc = ldlm_cli_enqueue_local(obd->obd_namespace, res_id,
+ LDLM_IBITS, &policy, lock_mode, &flags,
ldlm_blocking_ast, ldlm_completion_ast,
NULL, NULL, 0, NULL, lockh);
if (rc != ELDLM_OK) {
* about that client, like open files, the last operation number it did
* on the server, etc.
*/
-static int mds_connect(const struct lu_context *ctx,
+static int mds_connect(const struct lu_env *env,
struct lustre_handle *conn, struct obd_device *obd,
struct obd_uuid *cluuid, struct obd_connect_data *data)
{
if (resent_req == 0) {
if (name) {
- rc = mds_get_parent_child_locked(obd, &obd->u.mds,
+ rc = mds_get_parent_child_locked(obd, &obd->u.mds,
&body->fid1,
- &parent_lockh,
+ &parent_lockh,
&dparent, LCK_CR,
MDS_INODELOCK_UPDATE,
name, namesize,
LASSERT(dchild);
if (IS_ERR(dchild))
rc = PTR_ERR(dchild);
- }
+ }
if (rc)
GOTO(cleanup, rc);
} else {
/* If we're DISCONNECTing, the mds_export_data is already freed */
if (!rc && lustre_msg_get_opc(req->rq_reqmsg) != MDS_DISCONNECT) {
struct mds_export_data *med = &req->rq_export->exp_mds_data;
-
+
/* I don't think last_xid is used for anyway, so I'm not sure
if we need to care about last_close_xid here.*/
lustre_msg_set_last_xid(req->rq_repmsg,
ENTRY;
/* setup 1:/dev/loop/0 2:ext3 3:mdsA 4:errors=remount-ro,iopen_nopriv */
-
+
CLASSERT(offsetof(struct obd_device, u.obt) ==
offsetof(struct obd_device, u.mds.mds_obt));
case OBD_CLEANUP_EARLY:
break;
case OBD_CLEANUP_EXPORTS:
- /*XXX Use this for mdd mds cleanup, so comment out
+ /*XXX Use this for mdd mds cleanup, so comment out
*this target_cleanup_recovery for this tmp MDD MDS
*Wangdi*/
if (strncmp(obd->obd_name, MDD_OBD_NAME, strlen(MDD_OBD_NAME)))
- target_cleanup_recovery(obd);
+ target_cleanup_recovery(obd);
mds_lov_early_clean(obd);
break;
case OBD_CLEANUP_SELF_EXP:
unlock_kernel();
must_relock++;
}
-
+
if (must_put) {
/* In case we didn't mount with lustre_get_mount -- old method*/
mntput(mds->mds_vfsmnt);
int rc;
lprocfs_init_vars(mds, &lvars);
-
+
rc = class_process_proc_param(PARAM_MDT, lvars.obd_vars, lcfg, obd);
return(rc);
}
CDEBUG(D_INFO, "obd %s setup \n", obd->obd_name);
if (strncmp(obd->obd_name, MDD_OBD_NAME, strlen(MDD_OBD_NAME)))
RETURN(0);
-
+
if (lcfg->lcfg_bufcount < 5) {
CERROR("invalid arg for setup %s\n", MDD_OBD_NAME);
RETURN(-EINVAL);
}
dev = lustre_cfg_string(lcfg, 4);
lmi = server_get_mount(dev);
- LASSERT(lmi != NULL);
-
+ LASSERT(lmi != NULL);
+
lsi = s2lsi(lmi->lmi_sb);
mnt = lmi->lmi_mnt;
/* FIXME: MDD LOV initialize objects.
CERROR("__iopen__ directory has no inode? rc = %d\n", rc);
GOTO(err_fid, rc);
}
-
+
/* open and test the lov objd file */
file = filp_open(LOV_OBJID, O_RDWR | O_CREAT, 0644);
if (IS_ERR(file)) {
obd->obd_async_recov = 1;
rc = mds_postsetup(obd);
obd->obd_async_recov = 0;
-
+
if (rc)
GOTO(err_objects, rc);
-
+
mds->mds_max_mdsize = sizeof(struct lov_mds_md);
mds->mds_max_cookiesize = sizeof(struct llog_cookie);
pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
RETURN(rc);
err_lov_objid:
- if (mds->mds_lov_objid_filp &&
+ if (mds->mds_lov_objid_filp &&
filp_close((struct file *)mds->mds_lov_objid_filp, 0))
CERROR("can't close %s after error\n", LOV_OBJID);
err_fid:
dput(mds->mds_fid_de);
LL_DQUOT_OFF(obd->u.obt.obt_sb);
fsfilt_put_ops(obd->obd_fsops);
-
+
pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
RETURN(rc);
}
*
* lustre/mdt/mdt_capa.c
* Lustre Metadata Target (mdt) capability key read/write/update.
- *
+ *
* Copyright (C) 2005 Cluster File Systems, Inc.
* Author: Lai Siyao <lsy@clusterfs.com>
*
memcpy(tgt->lk_key, src->lk_key, sizeof(src->lk_key));
}
-static int write_capa_keys(const struct lu_context *ctx,
+static int write_capa_keys(const struct lu_env *env,
struct mdt_device *mdt,
struct lustre_capa_key *keys)
{
loff_t off = 0;
int i, rc;
- mti = lu_context_key_get(ctx, &mdt_thread_key);
+ mti = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
- th = mdt_trans_start(ctx, mdt, MDT_TXN_CAPA_KEYS_WRITE_CREDITS);
+ th = mdt_trans_start(env, mdt, MDT_TXN_CAPA_KEYS_WRITE_CREDITS);
if (IS_ERR(th))
RETURN(PTR_ERR(th));
for (i = 0; i < 2; i++) {
lck_cpu_to_le(tmp, &keys[i]);
- rc = mdt_record_write(ctx, mdt->mdt_ck_obj, tmp, sizeof(*tmp),
+ rc = mdt_record_write(env, mdt->mdt_ck_obj, tmp, sizeof(*tmp),
&off, th);
if (rc)
break;
}
- mdt_trans_stop(ctx, mdt, th);
+ mdt_trans_stop(env, mdt, th);
CDEBUG(D_INFO, "write capability keys rc = %d:\n", rc);
return rc;
}
-static int read_capa_keys(const struct lu_context *ctx,
+static int read_capa_keys(const struct lu_env *env,
struct mdt_device *mdt,
struct lustre_capa_key *keys)
{
loff_t off = 0;
int i, rc;
- mti = lu_context_key_get(ctx, &mdt_thread_key);
+ mti = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
tmp = &mti->mti_capa_key;
for (i = 0; i < 2; i++) {
- rc = mdt_record_read(ctx, mdt->mdt_ck_obj, tmp, sizeof(*tmp),
+ rc = mdt_record_read(env, mdt->mdt_ck_obj, tmp, sizeof(*tmp),
&off);
if (rc)
return rc;
return 0;
}
-int mdt_capa_keys_init(const struct lu_context *ctx, struct mdt_device *mdt)
+int mdt_capa_keys_init(const struct lu_env *env, struct mdt_device *mdt)
{
struct lustre_capa_key *keys = mdt->mdt_capa_keys;
struct mdt_thread_info *mti;
mdsnum = mdt->mdt_md_dev.md_lu_dev.ld_site->ls_node_id;
- mti = lu_context_key_get(ctx, &mdt_thread_key);
+ mti = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
LASSERT(mti != NULL);
la = &mti->mti_attr.ma_attr;
obj = mdt->mdt_ck_obj;
- obj->do_ops->do_read_lock(ctx, obj);
- rc = obj->do_ops->do_attr_get(ctx, mdt->mdt_ck_obj, la);
- obj->do_ops->do_read_unlock(ctx, obj);
+ obj->do_ops->do_read_lock(env, obj);
+ rc = obj->do_ops->do_attr_get(env, mdt->mdt_ck_obj, la);
+ obj->do_ops->do_read_unlock(env, obj);
if (rc)
RETURN(rc);
DEBUG_CAPA_KEY(D_SEC, &keys[i], "initializing");
}
- rc = write_capa_keys(ctx, mdt, keys);
+ rc = write_capa_keys(env, mdt, keys);
if (rc) {
CERROR("error writing MDS %s: rc %d\n", CAPA_KEYS, rc);
RETURN(rc);
}
} else {
- rc = read_capa_keys(ctx, mdt, keys);
+ rc = read_capa_keys(env, mdt, keys);
if (rc) {
CERROR("error reading MDS %s: rc %d\n", CAPA_KEYS, rc);
RETURN(rc);
struct mdt_device *mdt = args;
struct ptlrpc_thread *thread = &mdt->mdt_ck_thread;
struct lustre_capa_key *tmp, *key = red_capa_key(mdt);
- struct lu_context ctx;
+ struct lu_env env;
struct mdt_thread_info *info;
struct md_device *next;
struct l_wait_info lwi = { 0 };
thread->t_flags = SVC_RUNNING;
cfs_waitq_signal(&thread->t_ctl_waitq);
- rc = lu_context_init(&ctx, LCT_MD_THREAD);
+ rc = lu_env_init(&env, NULL, LCT_MD_THREAD);
if (rc)
RETURN(rc);
- thread->t_ctx = &ctx;
- ctx.lc_thread = thread;
+ thread->t_env = &env;
+ env.le_ctx.lc_thread = thread;
- lu_context_enter(&ctx);
- info = lu_context_key_get(&ctx, &mdt_thread_key);
+ info = lu_context_key_get(&env.le_ctx, &mdt_thread_key);
LASSERT(info != NULL);
tmp = &info->mti_capa_key;
make_capa_key(tmp, mdsnum, key->lk_keyid);
next = mdt->mdt_child;
- rc = next->md_ops->mdo_update_capa_key(&ctx, next, tmp);
+ rc = next->md_ops->mdo_update_capa_key(&env, next, tmp);
if (!rc) {
- rc = write_capa_keys(&ctx, mdt, mdt->mdt_capa_keys);
+ rc = write_capa_keys(&env, mdt, mdt->mdt_capa_keys);
if (!rc) {
spin_lock(&capa_lock);
mdt->mdt_capa_keys[0] = *key;
mod_timer(&mdt->mdt_ck_timer, mdt->mdt_ck_expiry);
CDEBUG(D_SEC, "mdt_ck_timer %lu\n", mdt->mdt_ck_expiry);
}
- lu_context_exit(&ctx);
- lu_context_fini(&ctx);
+ lu_env_fini(&env);
thread->t_flags = SVC_STOPPED;
cfs_waitq_signal(&thread->t_ctl_waitq);
rc = -ENOMEM;
} else {
body = req_capsule_server_get(&info->mti_pill, &RMF_MDT_BODY);
- rc = next->md_ops->mdo_root_get(info->mti_ctxt, next,
+ rc = next->md_ops->mdo_root_get(info->mti_env, next,
&body->fid1, NULL);
if (rc == 0)
body->valid |= OBD_MD_FLID;
} else {
osfs = req_capsule_server_get(&info->mti_pill,&RMF_OBD_STATFS);
/* XXX max_age optimisation is needed here. See mds_statfs */
- rc = next->md_ops->mdo_statfs(info->mti_ctxt, next,
+ rc = next->md_ops->mdo_statfs(info->mti_env, next,
&info->mti_u.ksfs, NULL);
statfs_pack(osfs, &info->mti_u.ksfs);
}
struct md_attr *ma = &info->mti_attr;
struct lu_attr *la = &ma->ma_attr;
struct req_capsule *pill = &info->mti_pill;
- const struct lu_context *ctxt = info->mti_ctxt;
+ const struct lu_env *env = info->mti_env;
struct mdt_body *repbody;
void *buffer;
int length;
ma->ma_lmm_size = req_capsule_get_size(pill, &RMF_MDT_MD,
RCL_SERVER);
}
- rc = mo_attr_get(ctxt, next, ma, NULL);
+ rc = mo_attr_get(env, next, ma, NULL);
if (rc == -EREMOTE) {
/* This object is located on remote node.*/
repbody->fid1 = *mdt_object_fid(o);
}
} else if (S_ISLNK(la->la_mode) &&
reqbody->valid & OBD_MD_LINKNAME) {
- rc = mo_readlink(ctxt, next, ma->ma_lmm, ma->ma_lmm_size, NULL);
+ rc = mo_readlink(env, next, ma->ma_lmm, ma->ma_lmm_size, NULL);
if (rc <= 0) {
CERROR("readlink failed: %d\n", rc);
rc = -EFAULT;
buffer = req_capsule_server_get(pill, &RMF_ACL);
length = req_capsule_get_size(pill, &RMF_ACL, RCL_SERVER);
if (length > 0) {
- rc = mo_xattr_get(ctxt, next, buffer, length,
+ rc = mo_xattr_get(env, next, buffer, length,
XATTR_NAME_ACL_ACCESS, NULL);
if (rc < 0) {
if (rc == -ENODATA || rc == -EOPNOTSUPP)
capa = req_capsule_server_get(&info->mti_pill, &RMF_CAPA1);
LASSERT(capa);
capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
- rc = mo_capa_get(ctxt, next, capa);
+ rc = mo_capa_get(env, next, capa);
if (rc)
RETURN(rc);
repbody->valid |= OBD_MD_FLMDSCAPA;
* directory case.
*/
LASSERT(fid_is_sane(&info->mti_body->fid2));
- rc = mdo_is_subdir(info->mti_ctxt, mdt_object_child(obj),
+ rc = mdo_is_subdir(info->mti_env, mdt_object_child(obj),
&info->mti_body->fid2, &repbody->fid1, NULL);
if (rc < 0)
RETURN(rc);
RETURN(rc);
/*step 2: lookup child's fid by name */
- rc = mdo_lookup(info->mti_ctxt, next, name, child_fid, &info->mti_uc);
+ rc = mdo_lookup(info->mti_env, next, name, child_fid, &info->mti_uc);
if (rc != 0) {
if (rc == -ENOENT)
mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
*step 3: find the child object by fid & lock it.
* regardless if it is local or remote.
*/
- child = mdt_object_find(info->mti_ctxt, info->mti_mdt, child_fid,
+ child = mdt_object_find(info->mti_env, info->mti_mdt, child_fid,
BYPASS_CAPA);
if (IS_ERR(child))
GOTO(out_parent, rc = PTR_ERR(child));
}
EXIT;
out_child:
- mdt_object_put(info->mti_ctxt, child);
+ mdt_object_put(info->mti_env, child);
out_parent:
mdt_object_unlock(info, parent, lhp, 1);
out:
* will find better way */
OBD_ALLOC(name, ent->lde_namelen + 1);
memcpy(name, ent->lde_name, ent->lde_namelen);
- rc = mdo_name_insert(info->mti_ctxt,
+ rc = mdo_name_insert(info->mti_env,
md_object_next(&object->mot_obj),
name, lf, 0, NULL);
OBD_FREE(name, ent->lde_namelen + 1);
}
/* call lower layers to fill allocated pages with directory data */
- rc = mo_readpage(info->mti_ctxt, mdt_object_child(object), rdpg,
+ rc = mo_readpage(info->mti_env, mdt_object_child(object), rdpg,
&info->mti_uc);
if (rc) {
if (rc == -ERANGE)
next = mdt_object_child(info->mti_object);
info->mti_attr.ma_need = MA_INODE;
- rc = mo_attr_get(info->mti_ctxt, next,
+ rc = mo_attr_get(info->mti_env, next,
&info->mti_attr, NULL);
if (rc == 0) {
body = req_capsule_server_get(pill,
struct mdt_device *mdt = info->mti_mdt;
struct mdt_object *obj = info->mti_object;
struct mdt_body *body;
- struct lustre_capa *capa;
+ struct lustre_capa *capa;
int rc;
ENTRY;
*capa = obj->mot_header.loh_capa;
/* TODO: add capa check */
- rc = mo_capa_get(info->mti_ctxt, mdt_object_child(obj), capa);
+ rc = mo_capa_get(info->mti_env, mdt_object_child(obj), capa);
if (rc)
RETURN(rc);
return container_of0(o, struct mdt_object, mot_obj.mo_lu);
}
-struct mdt_object *mdt_object_find(const struct lu_context *ctxt,
+struct mdt_object *mdt_object_find(const struct lu_env *env,
struct mdt_device *d,
const struct lu_fid *f,
struct lustre_capa *c)
if (!d->mdt_opts.mo_mds_capa)
c = BYPASS_CAPA;
- o = lu_object_find(ctxt, d->mdt_md_dev.md_lu_dev.ld_site, f, c);
+ o = lu_object_find(env, d->mdt_md_dev.md_lu_dev.ld_site, f, c);
if (IS_ERR(o))
m = (struct mdt_object *)o;
else
{
struct mdt_object *o;
- o = mdt_object_find(info->mti_ctxt, info->mti_mdt, f, capa);
+ o = mdt_object_find(info->mti_env, info->mti_mdt, f, capa);
if (!IS_ERR(o)) {
int rc;
rc = mdt_object_lock(info, o, lh, ibits);
if (rc != 0) {
- mdt_object_put(info->mti_ctxt, o);
+ mdt_object_put(info->mti_env, o);
o = ERR_PTR(rc);
}
}
int decref)
{
mdt_object_unlock(info, o, lh, decref);
- mdt_object_put(info->mti_ctxt, o);
+ mdt_object_put(info->mti_env, o);
}
static struct mdt_handler *mdt_handler_find(__u32 opc,
const struct mdt_body *body;
struct lustre_capa *capa = NULL;
struct mdt_object *obj;
- const struct lu_context *ctx;
+ const struct lu_env *env;
struct req_capsule *pill;
int rc;
- ctx = info->mti_ctxt;
+ env = info->mti_env;
pill = &info->mti_pill;
body = info->mti_body = req_capsule_client_get(pill, &RMF_MDT_BODY);
if (req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT))
capa = req_capsule_client_get(pill, &RMF_CAPA1);
- obj = mdt_object_find(ctx, info->mti_mdt, &body->fid1, capa);
+ obj = mdt_object_find(env, info->mti_mdt, &body->fid1, capa);
if (!IS_ERR(obj)) {
if ((flags & HABEO_CORPUS) &&
!lu_object_exists(&obj->mot_obj.mo_lu)) {
- mdt_object_put(ctx, obj);
+ mdt_object_put(env, obj);
rc = -ENOENT;
} else {
info->mti_object = obj;
mdt_lock_handle_init(&info->mti_lh[i]);
info->mti_fail_id = OBD_FAIL_MDS_ALL_REPLY_NET;
- info->mti_ctxt = req->rq_svc_thread->t_ctx;
+ info->mti_env = req->rq_svc_thread->t_env;
info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg);
/* it can be NULL while CONNECT */
req_capsule_fini(&info->mti_pill);
if (info->mti_object != NULL) {
- mdt_object_put(info->mti_ctxt, info->mti_object);
+ mdt_object_put(info->mti_env, info->mti_object);
info->mti_object = NULL;
}
for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
static int mdt_handle_common(struct ptlrpc_request *req,
struct mdt_opc_slice *supported)
{
- struct lu_context *ctx;
+ struct lu_env *env;
struct mdt_thread_info *info;
int rc;
ENTRY;
- ctx = req->rq_svc_thread->t_ctx;
- LASSERT(ctx != NULL);
- LASSERT(ctx->lc_thread == req->rq_svc_thread);
- info = lu_context_key_get(ctx, &mdt_thread_key);
+ env = req->rq_svc_thread->t_env;
+ LASSERT(env != NULL);
+ LASSERT(env->le_ses != NULL);
+ LASSERT(env->le_ctx.lc_thread == req->rq_svc_thread);
+ info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
LASSERT(info != NULL);
mdt_thread_info_init(req, info);
LASSERT(req != NULL);
- info = lu_context_key_get(req->rq_svc_thread->t_ctx, &mdt_thread_key);
+ info = lu_context_key_get(&req->rq_svc_thread->t_env->le_ctx,
+ &mdt_thread_key);
LASSERT(info != NULL);
pill = &info->mti_pill;
LASSERT(pill->rc_req == req);
/*
* Seq wrappers
*/
-static int mdt_seq_fini(const struct lu_context *ctx,
+static int mdt_seq_fini(const struct lu_env *env,
struct mdt_device *m)
{
struct lu_site *ls = m->mdt_md_dev.md_lu_dev.ld_site;
ENTRY;
if (ls && ls->ls_server_seq) {
- seq_server_fini(ls->ls_server_seq, ctx);
+ seq_server_fini(ls->ls_server_seq, env);
OBD_FREE_PTR(ls->ls_server_seq);
ls->ls_server_seq = NULL;
}
if (ls && ls->ls_control_seq) {
- seq_server_fini(ls->ls_control_seq, ctx);
+ seq_server_fini(ls->ls_control_seq, env);
OBD_FREE_PTR(ls->ls_control_seq);
ls->ls_control_seq = NULL;
}
RETURN(0);
}
-static int mdt_seq_init(const struct lu_context *ctx,
+static int mdt_seq_init(const struct lu_env *env,
const char *uuid,
struct mdt_device *m)
{
rc = seq_server_init(ls->ls_control_seq,
m->mdt_bottom, uuid,
LUSTRE_SEQ_CONTROLLER,
- ctx);
+ env);
if (rc)
GOTO(out_seq_fini, rc);
rc = seq_server_init(ls->ls_server_seq,
m->mdt_bottom, uuid,
LUSTRE_SEQ_SERVER,
- ctx);
+ env);
if (rc)
GOTO(out_seq_fini, rc = -ENOMEM);
rc = seq_server_set_cli(ls->ls_server_seq,
ls->ls_client_seq,
- ctx);
+ env);
}
EXIT;
out_seq_fini:
if (rc)
- mdt_seq_fini(ctx, m);
+ mdt_seq_fini(env, m);
return rc;
}
-static int mdt_md_connect(const struct lu_context *ctx,
+static int mdt_md_connect(const struct lu_env *env,
struct lustre_handle *conn,
struct obd_device *mdc)
{
RETURN(-ENOMEM);
/* The connection between MDS must be local */
ocd->ocd_connect_flags |= OBD_CONNECT_LCL_CLIENT;
- rc = obd_connect(ctx, conn, mdc, &mdc->obd_uuid, ocd);
+ rc = obd_connect(env, conn, mdc, &mdc->obd_uuid, ocd);
OBD_FREE_PTR(ocd);
* Init client sequence manager which is used by local MDS to talk to sequence
* controller on remote node.
*/
-static int mdt_seq_init_cli(const struct lu_context *ctx,
+static int mdt_seq_init_cli(const struct lu_env *env,
struct mdt_device *m,
struct lustre_cfg *cfg)
{
char *p, *index_string = lustre_cfg_string(cfg, 2);
ENTRY;
- info = lu_context_key_get(ctx, &mdt_thread_key);
+ info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
uuidp = &info->mti_u.uuid[0];
mdcuuidp = &info->mti_u.uuid[1];
CDEBUG(D_CONFIG, "connect to controller %s(%s)\n",
mdc->obd_name, mdc->obd_uuid.uuid);
- rc = mdt_md_connect(ctx, &conn, mdc);
+ rc = mdt_md_connect(env, &conn, mdc);
if (rc) {
CERROR("target %s connect error %d\n",
mdc->obd_name, rc);
rc = seq_server_set_cli(ls->ls_server_seq,
ls->ls_client_seq,
- ctx);
+ env);
}
}
/*
* FLD wrappers
*/
-static int mdt_fld_fini(const struct lu_context *ctx,
+static int mdt_fld_fini(const struct lu_env *env,
struct mdt_device *m)
{
struct lu_site *ls = m->mdt_md_dev.md_lu_dev.ld_site;
ENTRY;
if (ls && ls->ls_server_fld) {
- fld_server_fini(ls->ls_server_fld, ctx);
+ fld_server_fini(ls->ls_server_fld, env);
OBD_FREE_PTR(ls->ls_server_fld);
ls->ls_server_fld = NULL;
}
RETURN(0);
}
-static int mdt_fld_init(const struct lu_context *ctx,
+static int mdt_fld_init(const struct lu_env *env,
const char *uuid,
struct mdt_device *m)
{
RETURN(rc = -ENOMEM);
rc = fld_server_init(ls->ls_server_fld,
- m->mdt_bottom, uuid, ctx);
+ m->mdt_bottom, uuid, env);
if (rc) {
OBD_FREE_PTR(ls->ls_server_fld);
ls->ls_server_fld = NULL;
EXIT;
out_fld_fini:
if (rc)
- mdt_fld_fini(ctx, m);
+ mdt_fld_fini(env, m);
return rc;
}
return rc;
}
-static void mdt_stack_fini(const struct lu_context *ctx,
+static void mdt_stack_fini(const struct lu_env *env,
struct mdt_device *m, struct lu_device *top)
{
struct lu_device *d = top, *n;
struct mdt_thread_info *info;
ENTRY;
- info = lu_context_key_get(ctx, &mdt_thread_key);
+ info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
LASSERT(info != NULL);
bufs = &info->mti_u.bufs;
return;
}
LASSERT(top);
- top->ld_ops->ldo_process_config(ctx, top, lcfg);
+ top->ld_ops->ldo_process_config(env, top, lcfg);
lustre_cfg_free(lcfg);
- lu_site_purge(ctx, top->ld_site, ~0);
+ lu_site_purge(env, top->ld_site, ~0);
while (d != NULL) {
struct obd_type *type;
struct lu_device_type *ldt = d->ld_type;
/* each fini() returns next device in stack of layers
* * so we can avoid the recursion */
- n = ldt->ldt_ops->ldto_device_fini(ctx, d);
+ n = ldt->ldt_ops->ldto_device_fini(env, d);
lu_device_put(d);
- ldt->ldt_ops->ldto_device_free(ctx, d);
+ ldt->ldt_ops->ldto_device_free(env, d);
type = ldt->ldt_obd_type;
type->typ_refcnt--;
class_put_type(type);
m->mdt_child = NULL;
}
-static struct lu_device *mdt_layer_setup(const struct lu_context *ctx,
+static struct lu_device *mdt_layer_setup(const struct lu_env *env,
const char *typename,
struct lu_device *child,
struct lustre_cfg *cfg)
GOTO(out, rc = -ENODEV);
}
- rc = lu_context_refill(ctx);
+ rc = lu_context_refill(&env->le_ctx);
if (rc != 0) {
CERROR("Failure to refill context: '%d'\n", rc);
GOTO(out_type, rc);
}
+ if (env->le_ses != NULL) {
+ rc = lu_context_refill(env->le_ses);
+ if (rc != 0) {
+ CERROR("Failure to refill session: '%d'\n", rc);
+ GOTO(out_type, rc);
+ }
+ }
+
ldt = type->typ_lu;
if (ldt == NULL) {
CERROR("type: '%s'\n", typename);
}
ldt->ldt_obd_type = type;
- d = ldt->ldt_ops->ldto_device_alloc(ctx, ldt, cfg);
+ d = ldt->ldt_ops->ldto_device_alloc(env, ldt, cfg);
if (IS_ERR(d)) {
CERROR("Cannot allocate device: '%s'\n", typename);
GOTO(out_type, rc = -ENODEV);
d->ld_site = child->ld_site;
type->typ_refcnt++;
- rc = ldt->ldt_ops->ldto_device_init(ctx, d, child);
+ rc = ldt->ldt_ops->ldto_device_init(env, d, child);
if (rc) {
CERROR("can't init device '%s', rc %d\n", typename, rc);
GOTO(out_alloc, rc);
RETURN(d);
out_alloc:
- ldt->ldt_ops->ldto_device_free(ctx, d);
+ ldt->ldt_ops->ldto_device_free(env, d);
type->typ_refcnt--;
out_type:
class_put_type(type);
return ERR_PTR(rc);
}
-static int mdt_stack_init(const struct lu_context *ctx,
+static int mdt_stack_init(const struct lu_env *env,
struct mdt_device *m, struct lustre_cfg *cfg)
{
struct lu_device *d = &m->mdt_md_dev.md_lu_dev;
ENTRY;
/* init the stack */
- tmp = mdt_layer_setup(ctx, LUSTRE_OSD_NAME, d, cfg);
+ tmp = mdt_layer_setup(env, LUSTRE_OSD_NAME, d, cfg);
if (IS_ERR(tmp)) {
RETURN(PTR_ERR(tmp));
}
m->mdt_bottom = lu2dt_dev(tmp);
d = tmp;
- tmp = mdt_layer_setup(ctx, LUSTRE_MDD_NAME, d, cfg);
+ tmp = mdt_layer_setup(env, LUSTRE_MDD_NAME, d, cfg);
if (IS_ERR(tmp)) {
GOTO(out, rc = PTR_ERR(tmp));
}
d = tmp;
md = lu2md_dev(d);
- tmp = mdt_layer_setup(ctx, LUSTRE_CMM_NAME, d, cfg);
+ tmp = mdt_layer_setup(env, LUSTRE_CMM_NAME, d, cfg);
if (IS_ERR(tmp)) {
GOTO(out, rc = PTR_ERR(tmp));
}
/* process setup config */
tmp = &m->mdt_md_dev.md_lu_dev;
- rc = tmp->ld_ops->ldo_process_config(ctx, tmp, cfg);
+ rc = tmp->ld_ops->ldo_process_config(env, tmp, cfg);
GOTO(out, rc);
out:
/* fini from last known good lu_device */
if (rc)
- mdt_stack_fini(ctx, m, d);
+ mdt_stack_fini(env, m, d);
return rc;
}
-static void mdt_fini(const struct lu_context *ctx, struct mdt_device *m)
+static void mdt_fini(const struct lu_env *env, struct mdt_device *m)
{
struct lu_device *d = &m->mdt_md_dev.md_lu_dev;
struct lu_site *ls = d->ld_site;
d->ld_obd->obd_namespace = m->mdt_namespace = NULL;
}
- mdt_seq_fini(ctx, m);
+ mdt_seq_fini(env, m);
mdt_seq_fini_cli(m);
- mdt_fld_fini(ctx, m);
+ mdt_fld_fini(env, m);
if (m->mdt_rootsquash_info) {
OBD_FREE_PTR(m->mdt_rootsquash_info);
del_timer(&m->mdt_ck_timer);
mdt_ck_thread_stop(m);
- mdt_fs_cleanup(ctx, m);
+ mdt_fs_cleanup(env, m);
/* finish the stack */
- mdt_stack_fini(ctx, m, md2lu_dev(m->mdt_child));
+ mdt_stack_fini(env, m, md2lu_dev(m->mdt_child));
if (ls) {
lu_site_fini(ls);
EXIT;
}
-int mdt_postrecov(const struct lu_context *, struct mdt_device *);
+int mdt_postrecov(const struct lu_env *, struct mdt_device *);
-static int mdt_init0(const struct lu_context *ctx, struct mdt_device *m,
+static int mdt_init0(const struct lu_env *env, struct mdt_device *m,
struct lu_device_type *ldt, struct lustre_cfg *cfg)
{
struct lprocfs_static_vars lvars;
int rc;
ENTRY;
- info = lu_context_key_get(ctx, &mdt_thread_key);
+ info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
LASSERT(info != NULL);
obd = class_name2obd(dev);
}
/* init the stack */
- rc = mdt_stack_init(ctx, m, cfg);
+ rc = mdt_stack_init(env, m, cfg);
if (rc) {
CERROR("can't init device stack, rc %d\n", rc);
GOTO(err_fini_site, rc);
LASSERT(num);
s->ls_node_id = simple_strtol(num, NULL, 10);
- rc = mdt_fld_init(ctx, obd->obd_name, m);
+ rc = mdt_fld_init(env, obd->obd_name, m);
if (rc)
GOTO(err_fini_stack, rc);
- rc = mdt_seq_init(ctx, obd->obd_name, m);
+ rc = mdt_seq_init(env, obd->obd_name, m);
if (rc)
GOTO(err_fini_fld, rc);
GOTO(err_capa, rc);
ping_evictor_start();
- rc = mdt_fs_setup(ctx, m, obd);
+ rc = mdt_fs_setup(env, m, obd);
if (rc)
GOTO(err_stop_service, rc);
if(obd->obd_recovering == 0)
- mdt_postrecov(ctx, m);
+ mdt_postrecov(env, m);
m->mdt_opts.mo_no_gss_support = 1;
ldlm_namespace_free(m->mdt_namespace, 0);
obd->obd_namespace = m->mdt_namespace = NULL;
err_fini_seq:
- mdt_seq_fini(ctx, m);
+ mdt_seq_fini(env, m);
err_fini_fld:
- mdt_fld_fini(ctx, m);
+ mdt_fld_fini(env, m);
err_fini_stack:
- mdt_stack_fini(ctx, m, md2lu_dev(m->mdt_child));
+ mdt_stack_fini(env, m, md2lu_dev(m->mdt_child));
err_fini_site:
lu_site_fini(s);
err_free_site:
}
/* used by MGS to process specific configurations */
-static int mdt_process_config(const struct lu_context *ctx,
+static int mdt_process_config(const struct lu_env *env,
struct lu_device *d, struct lustre_cfg *cfg)
{
struct mdt_device *m = mdt_dev(d);
if (rc)
/* others are passed further */
- rc = next->ld_ops->ldo_process_config(ctx, next, cfg);
+ rc = next->ld_ops->ldo_process_config(env, next, cfg);
break;
}
case LCFG_ADD_MDC:
* Add mdc hook to get first MDT uuid and connect it to
* ls->controller to use for seq manager.
*/
- rc = mdt_seq_init_cli(ctx, mdt_dev(d), cfg);
+ rc = mdt_seq_init_cli(env, mdt_dev(d), cfg);
if (rc) {
CERROR("can't initialize controller export, "
"rc %d\n", rc);
}
default:
/* others are passed further */
- rc = next->ld_ops->ldo_process_config(ctx, next, cfg);
+ rc = next->ld_ops->ldo_process_config(env, next, cfg);
break;
}
RETURN(rc);
}
-static struct lu_object *mdt_object_alloc(const struct lu_context *ctxt,
+static struct lu_object *mdt_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *d)
{
RETURN(NULL);
}
-static int mdt_object_init(const struct lu_context *ctxt, struct lu_object *o)
+static int mdt_object_init(const struct lu_env *env, struct lu_object *o)
{
struct mdt_device *d = mdt_dev(o->lo_dev);
struct lu_device *under;
PFID(lu_object_fid(o)));
under = &d->mdt_child->md_lu_dev;
- below = under->ld_ops->ldo_object_alloc(ctxt, o->lo_header, under);
+ below = under->ld_ops->ldo_object_alloc(env, o->lo_header, under);
if (below != NULL) {
lu_object_add(o, below);
} else
RETURN(rc);
}
-static void mdt_object_free(const struct lu_context *ctxt, struct lu_object *o)
+static void mdt_object_free(const struct lu_env *env, struct lu_object *o)
{
struct mdt_object *mo = mdt_obj(o);
struct lu_object_header *h;
EXIT;
}
-static int mdt_object_print(const struct lu_context *ctxt, void *cookie,
+static int mdt_object_print(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *o)
{
- return (*p)(ctxt, cookie, LUSTRE_MDT_NAME"-object@%p", o);
+ return (*p)(env, cookie, LUSTRE_MDT_NAME"-object@%p", o);
}
static struct lu_device_operations mdt_lu_ops = {
}
/* mds_connect copy */
-static int mdt_obd_connect(const struct lu_context *ctx,
+static int mdt_obd_connect(const struct lu_env *env,
struct lustre_handle *conn, struct obd_device *obd,
struct obd_uuid *cluuid,
struct obd_connect_data *data)
int rc;
ENTRY;
- LASSERT(ctx != NULL);
+ LASSERT(env != NULL);
if (!conn || !obd || !cluuid)
RETURN(-EINVAL);
if (mcd != NULL) {
memcpy(mcd->mcd_uuid, cluuid, sizeof mcd->mcd_uuid);
med->med_mcd = mcd;
- rc = mdt_client_new(ctx, mdt, med);
+ rc = mdt_client_new(env, mdt, med);
if (rc != 0) {
OBD_FREE_PTR(mcd);
med->med_mcd = NULL;
struct obd_device *obd = export->exp_obd;
struct mdt_device *mdt;
struct mdt_thread_info *info;
- struct lu_context ctxt;
+ struct lu_env env;
struct md_attr *ma;
int rc = 0;
ENTRY;
mdt = mdt_dev(obd->obd_lu_dev);
LASSERT(mdt != NULL);
- rc = lu_context_init(&ctxt, LCT_MD_THREAD);
+ rc = lu_env_init(&env, NULL, LCT_MD_THREAD);
if (rc)
RETURN(rc);
- lu_context_enter(&ctxt);
-
- info = lu_context_key_get(&ctxt, &mdt_thread_key);
+ info = lu_context_key_get(&env.le_ctx, &mdt_thread_key);
LASSERT(info != NULL);
memset(info, 0, sizeof *info);
- info->mti_ctxt = &ctxt;
+ info->mti_env = &env;
info->mti_mdt = mdt;
ma = &info->mti_attr;
}
spin_unlock(&med->med_open_lock);
info->mti_mdt = NULL;
- mdt_client_del(&ctxt, mdt, med);
+ mdt_client_del(&env, mdt, med);
out:
if (ma->ma_lmm)
OBD_FREE(ma->ma_lmm, mdt->mdt_max_mdsize);
if (ma->ma_cookie)
OBD_FREE(ma->ma_cookie, mdt->mdt_max_cookiesize);
- lu_context_exit(&ctxt);
- lu_context_fini(&ctxt);
+ lu_env_fini(&env);
RETURN(rc);
}
-static int mdt_upcall(const struct lu_context *ctx, struct md_device *md,
+static int mdt_upcall(const struct lu_env *env, struct md_device *md,
enum md_upcall_event ev)
{
struct mdt_device *m = mdt_dev(&md->md_lu_dev);
switch (ev) {
case MD_LOV_SYNC:
- rc = next->md_ops->mdo_maxsize_get(ctx, next,
+ rc = next->md_ops->mdo_maxsize_get(env, next,
&m->mdt_max_mdsize,
&m->mdt_max_cookiesize, NULL);
CDEBUG(D_INFO, "get max mdsize %d max cookiesize %d\n",
m->mdt_max_mdsize, m->mdt_max_cookiesize);
break;
case MD_NO_TRANS:
- mti = lu_context_key_get(ctx, &mdt_thread_key);
+ mti = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
mti->mti_no_need_trans = 1;
CDEBUG(D_INFO, "disable mdt trans for this thread\n");
break;
static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
void *karg, void *uarg)
{
- struct lu_context ctxt;
+ struct lu_env env;
struct obd_device *obd= exp->exp_obd;
struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
struct dt_device *dt = mdt->mdt_bottom;
ENTRY;
CDEBUG(D_IOCTL, "handling ioctl cmd %#x\n", cmd);
- rc = lu_context_init(&ctxt, LCT_MD_THREAD);
+ rc = lu_env_init(&env, NULL, LCT_MD_THREAD);
if (rc)
RETURN(rc);
- lu_context_enter(&ctxt);
switch (cmd) {
case OBD_IOC_SYNC:
- rc = dt->dd_ops->dt_sync(&ctxt, dt);
+ rc = dt->dd_ops->dt_sync(&env, dt);
break;
case OBD_IOC_SET_READONLY:
- rc = dt->dd_ops->dt_sync(&ctxt, dt);
- dt->dd_ops->dt_ro(&ctxt, dt);
+ rc = dt->dd_ops->dt_sync(&env, dt);
+ dt->dd_ops->dt_ro(&env, dt);
break;
case OBD_IOC_ABORT_RECOVERY:
rc = -EOPNOTSUPP;
}
- lu_context_exit(&ctxt);
- lu_context_fini(&ctxt);
+ lu_env_fini(&env);
RETURN(rc);
}
-int mdt_postrecov(const struct lu_context *ctx, struct mdt_device *mdt)
+int mdt_postrecov(const struct lu_env *env, struct mdt_device *mdt)
{
struct lu_device *ld = md2lu_dev(mdt->mdt_child);
int rc;
ENTRY;
- rc = ld->ld_ops->ldo_recovery_complete(ctx, ld);
+ rc = ld->ld_ops->ldo_recovery_complete(env, ld);
RETURN(rc);
}
int mdt_obd_postrecov(struct obd_device *obd)
{
- struct lu_context ctxt;
+ struct lu_env env;
int rc;
- rc = lu_context_init(&ctxt, LCT_MD_THREAD);
+ rc = lu_env_init(&env, NULL, LCT_MD_THREAD);
if (rc)
RETURN(rc);
- lu_context_enter(&ctxt);
- rc = mdt_postrecov(&ctxt, mdt_dev(obd->obd_lu_dev));
- lu_context_exit(&ctxt);
- lu_context_fini(&ctxt);
+ rc = mdt_postrecov(&env, mdt_dev(obd->obd_lu_dev));
+ lu_env_fini(&env);
return rc;
}
};
-static struct lu_device* mdt_device_fini(const struct lu_context *ctx,
+static struct lu_device* mdt_device_fini(const struct lu_env *env,
struct lu_device *d)
{
struct mdt_device *m = mdt_dev(d);
- mdt_fini(ctx, m);
+ mdt_fini(env, m);
RETURN(NULL);
}
-static void mdt_device_free(const struct lu_context *ctx, struct lu_device *d)
+static void mdt_device_free(const struct lu_env *env, struct lu_device *d)
{
struct mdt_device *m = mdt_dev(d);
OBD_FREE_PTR(m);
}
-static struct lu_device *mdt_device_alloc(const struct lu_context *ctx,
+static struct lu_device *mdt_device_alloc(const struct lu_env *env,
struct lu_device_type *t,
struct lustre_cfg *cfg)
{
int rc;
l = &m->mdt_md_dev.md_lu_dev;
- rc = mdt_init0(ctx, m, t, cfg);
+ rc = mdt_init0(env, m, t, cfg);
if (rc != 0) {
OBD_FREE_PTR(m);
l = ERR_PTR(rc);
/*
* context key constructor/destructor
*/
-static void *mdt_thread_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+static void *mdt_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
{
struct mdt_thread_info *info;
return info;
}
-static void mdt_thread_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
+static void mdt_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
{
struct mdt_thread_info *info = data;
OBD_FREE_PTR(info);
struct lu_context_key mdt_thread_key = {
.lct_tags = LCT_MD_THREAD,
- .lct_init = mdt_thread_init,
- .lct_fini = mdt_thread_fini
+ .lct_init = mdt_key_init,
+ .lct_fini = mdt_key_fini
};
-static void *mdt_txn_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+static void *mdt_txn_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
{
struct mdt_txn_info *txi;
return txi;
}
-static void mdt_txn_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
+static void mdt_txn_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
{
struct mdt_txn_info *txi = data;
OBD_FREE_PTR(txi);
struct lu_context_key mdt_txn_key = {
.lct_tags = LCT_TX_HANDLE,
- .lct_init = mdt_txn_init,
- .lct_fini = mdt_txn_fini
+ .lct_init = mdt_txn_key_init,
+ .lct_fini = mdt_txn_key_fini
};
perm->rp_fsgid = uc->mu_o_fsgid;
perm->rp_access_perm = 0;
- if (mo_permission(info->mti_ctxt, next, MAY_READ, uc) == 0)
+ if (mo_permission(info->mti_env, next, MAY_READ, uc) == 0)
perm->rp_access_perm |= MAY_READ;
- if (mo_permission(info->mti_ctxt, next, MAY_WRITE, uc) == 0)
+ if (mo_permission(info->mti_env, next, MAY_WRITE, uc) == 0)
perm->rp_access_perm |= MAY_WRITE;
- if (mo_permission(info->mti_ctxt, next, MAY_EXEC, uc) == 0)
+ if (mo_permission(info->mti_env, next, MAY_EXEC, uc) == 0)
perm->rp_access_perm |= MAY_EXEC;
RETURN(0);
*/
struct req_capsule mti_pill;
- const struct lu_context *mti_ctxt;
+ const struct lu_env *mti_env;
struct mdt_device *mti_mdt;
/*
* number of buffers in reply message.
return info->mti_pill.rc_req;
}
-static inline void mdt_object_get(const struct lu_context *ctxt,
+static inline void mdt_object_get(const struct lu_env *env,
struct mdt_object *o)
{
lu_object_get(&o->mot_obj.mo_lu);
}
-static inline void mdt_object_put(const struct lu_context *ctxt,
+static inline void mdt_object_put(const struct lu_env *env,
struct mdt_object *o)
{
- lu_object_put(ctxt, &o->mot_obj.mo_lu);
+ lu_object_put(env, &o->mot_obj.mo_lu);
}
static inline const struct lu_fid *mdt_object_fid(struct mdt_object *o)
struct mdt_lock_handle *,
int decref);
-struct mdt_object *mdt_object_find(const struct lu_context *,
+struct mdt_object *mdt_object_find(const struct lu_env *,
struct mdt_device *,
const struct lu_fid *,
struct lustre_capa *);
void mdt_reconstruct(struct mdt_thread_info *, struct mdt_lock_handle *);
-int mdt_fs_setup(const struct lu_context *, struct mdt_device *,
+int mdt_fs_setup(const struct lu_env *, struct mdt_device *,
struct obd_device *);
-void mdt_fs_cleanup(const struct lu_context *, struct mdt_device *);
+void mdt_fs_cleanup(const struct lu_env *, struct mdt_device *);
-int mdt_client_del(const struct lu_context *ctxt,
+int mdt_client_del(const struct lu_env *env,
struct mdt_device *mdt,
struct mdt_export_data *med);
-int mdt_client_add(const struct lu_context *ctxt,
+int mdt_client_add(const struct lu_env *env,
struct mdt_device *mdt,
struct mdt_export_data *med,
int cl_idx);
-int mdt_client_new(const struct lu_context *ctxt,
+int mdt_client_new(const struct lu_env *env,
struct mdt_device *mdt,
struct mdt_export_data *med);
int mdt_handle_last_unlink(struct mdt_thread_info *, struct mdt_object *,
const struct md_attr *);
void mdt_reconstruct_open(struct mdt_thread_info *, struct mdt_lock_handle *);
-struct thandle* mdt_trans_start(const struct lu_context *ctx,
+struct thandle* mdt_trans_start(const struct lu_env *env,
struct mdt_device *mdt, int credits);
-void mdt_trans_stop(const struct lu_context *ctx,
+void mdt_trans_stop(const struct lu_env *env,
struct mdt_device *mdt, struct thandle *th);
-int mdt_record_write(const struct lu_context *ctx,
+int mdt_record_write(const struct lu_env *env,
struct dt_object *dt, const void *buf,
size_t count, loff_t *pos, struct thandle *th);
-int mdt_record_read(const struct lu_context *ctx,
+int mdt_record_read(const struct lu_env *env,
struct dt_object *dt, void *buf,
size_t count, loff_t *pos);
extern struct lu_context_key mdt_thread_key;
/* debug issues helper starts here*/
-static inline void mdt_fail_write(const struct lu_context *ctx,
+static inline void mdt_fail_write(const struct lu_env *env,
struct dt_device *dd, int id)
{
if (OBD_FAIL_CHECK(id)) {
CERROR(LUSTRE_MDT_NAME": obd_fail_loc=%x, fail write ops\n",
id);
- dd->dd_ops->dt_ro(ctx, dd);
+ dd->dd_ops->dt_ro(env, dd);
/* We set FAIL_ONCE because we never "un-fail" a device */
obd_fail_loc |= OBD_FAILED | OBD_FAIL_ONCE;
}
int mdt_ck_thread_start(struct mdt_device *mdt);
void mdt_ck_thread_stop(struct mdt_device *mdt);
void mdt_ck_timer_callback(unsigned long castmeharder);
-int mdt_capa_keys_init(const struct lu_context *ctx, struct mdt_device *mdt);
+int mdt_capa_keys_init(const struct lu_env *env, struct mdt_device *mdt);
static inline struct lustre_capa_key *red_capa_key(struct mdt_device *mdt)
{
RETURN(0);
ma->ma_need = MA_INODE | MA_LOV;
- rc = mdo_create_data(info->mti_ctxt,
+ rc = mdo_create_data(info->mti_env,
p ? mdt_object_child(p) : NULL,
mdt_object_child(o), spec, ma, &info->mti_uc);
RETURN(rc);
if (!S_ISREG(lu_object_attr(&o->mot_obj.mo_lu)))
RETURN(0);
-
+
spin_lock(&mdt->mdt_ioepoch_lock);
if (mdt_epoch_opened(o)) {
/* Epoch continues even if there is no writers yet. */
CDEBUG(D_INODE, "Closing epoch "LPU64" on "DFID". Count %d\n",
o->mot_ioepoch, PFID(mdt_object_fid(o)), o->mot_epochcount);
-
+
if (info->mti_attr.ma_attr.la_valid & LA_SIZE)
/* Do Size-on-MDS attribute update.
* Size-on-MDS is re-enabled inside. */
RETURN(0);
spin_lock(&info->mti_mdt->mdt_ioepoch_lock);
-
+
/* Epoch closes only if client tells about it or eviction occures. */
if (eviction || (info->mti_epoch->flags & MF_EPOCH_CLOSE)) {
LASSERT(o->mot_epochcount);
CDEBUG(D_INODE, "Closing epoch "LPU64" on "DFID". Count %d\n",
o->mot_ioepoch, PFID(mdt_object_fid(o)),
o->mot_epochcount);
-
+
if (!eviction)
achange = (info->mti_epoch->flags & MF_SOM_CHANGE);
-
+
rc = 0;
if (!eviction && !mdt_epoch_opened(o)) {
/* Epoch ends. Is an Size-on-MDS update needed? */
* update, re-ask client. */
rc = -EAGAIN;
} else if (!(la->la_valid & LA_SIZE) && achange) {
- /* Attributes were changed by the last writer
+ /* Attributes were changed by the last writer
* only but no Size-on-MDS update is received.*/
rc = -EAGAIN;
}
}
-
+
if (achange || eviction)
o->mot_flags |= MF_SOM_CHANGE;
}
-
+
opened = mdt_epoch_opened(o);
spin_unlock(&info->mti_mdt->mdt_ioepoch_lock);
static int mdt_mfd_open(struct mdt_thread_info *info,
struct mdt_object *p,
struct mdt_object *o,
- int flags,
+ int flags,
int created,
struct ldlm_reply *rep)
{
capa = req_capsule_server_get(&info->mti_pill, &RMF_CAPA1);
LASSERT(capa);
capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
- rc = mo_capa_get(info->mti_ctxt, mdt_object_child(o), capa);
+ rc = mo_capa_get(info->mti_env, mdt_object_child(o), capa);
if (rc)
RETURN(rc);
repbody->valid |= OBD_MD_FLMDSCAPA;
capa = req_capsule_server_get(&info->mti_pill, &RMF_CAPA2);
LASSERT(capa);
capa->lc_opc = CAPA_OPC_OSS_DEFAULT;
- rc = mo_capa_get(info->mti_ctxt, mdt_object_child(o), capa);
+ rc = mo_capa_get(info->mti_env, mdt_object_child(o), capa);
if (rc)
RETURN(rc);
repbody->valid |= OBD_MD_FLOSSCAPA;
mfd = NULL;
}
spin_unlock(&med->med_open_lock);
-
+
if (mfd != NULL) {
repbody->handle.cookie = mfd->mfd_handle.h_cookie;
RETURN(0);
if (rc)
RETURN(rc);
- rc = mo_open(info->mti_ctxt, mdt_object_child(o),
+ rc = mo_open(info->mti_env, mdt_object_child(o),
created ? flags | MDS_OPEN_CREATED : flags,
&info->mti_uc);
if (rc)
mfd = mdt_mfd_new();
if (mfd != NULL) {
-
+
/* keep a reference on this object for this open,
* and is released by mdt_mfd_close() */
- mdt_object_get(info->mti_ctxt, o);
+ mdt_object_get(info->mti_env, o);
/* open handling */
mfd->mfd_mode = flags;
void mdt_reconstruct_open(struct mdt_thread_info *info,
struct mdt_lock_handle *lhc)
{
- const struct lu_context *ctxt = info->mti_ctxt;
+ const struct lu_env *env = info->mti_env;
struct mdt_device *mdt = info->mti_mdt;
struct req_capsule *pill = &info->mti_pill;
struct ptlrpc_request *req = mdt_info_req(info);
CERROR("This is reconstruct open: disp="LPX64", result=%d\n",
ldlm_rep->lock_policy_res1, req->rq_status);
- if (mdt_get_disposition(ldlm_rep, DISP_OPEN_CREATE) &&
+ if (mdt_get_disposition(ldlm_rep, DISP_OPEN_CREATE) &&
req->rq_status != 0) {
/* We did not create successfully, return error to client. */
mdt_shrink_reply(info, DLM_REPLY_REC_OFF + 1, 1, 1);
}
if (mdt_get_disposition(ldlm_rep, DISP_OPEN_CREATE)) {
- /*
- * We failed after creation, but we do not know in which step
+ /*
+ * We failed after creation, but we do not know in which step
* we failed. So try to check the child object.
*/
- parent = mdt_object_find(ctxt, mdt, rr->rr_fid1, rr->rr_capa1);
+ parent = mdt_object_find(env, mdt, rr->rr_fid1, rr->rr_capa1);
LASSERT(!IS_ERR(parent));
- child = mdt_object_find(ctxt, mdt, rr->rr_fid2, rr->rr_capa2);
+ child = mdt_object_find(env, mdt, rr->rr_fid2, rr->rr_capa2);
LASSERT(!IS_ERR(child));
rc = lu_object_exists(&child->mot_obj.mo_lu);
if (rc > 0) {
struct md_object *next;
next = mdt_object_child(child);
- rc = mo_attr_get(ctxt, next, ma, NULL);
+ rc = mo_attr_get(env, next, ma, NULL);
if (rc == 0)
- rc = mdt_mfd_open(info, parent, child,
+ rc = mdt_mfd_open(info, parent, child,
flags, 1, ldlm_rep);
} else if (rc < 0) {
/* the child object was created on remote server */
rc = 0;
} else if (rc == 0) {
/* the child does not exist, we should do regular open */
- mdt_object_put(ctxt, parent);
- mdt_object_put(ctxt, child);
+ mdt_object_put(env, parent);
+ mdt_object_put(env, child);
GOTO(regular_open, 0);
}
- mdt_object_put(ctxt, parent);
- mdt_object_put(ctxt, child);
+ mdt_object_put(env, parent);
+ mdt_object_put(env, child);
mdt_shrink_reply(info, DLM_REPLY_REC_OFF + 1, 1, 1);
GOTO(out, rc);
} else {
lustre_msg_set_status(req->rq_repmsg, req->rq_status);
}
-static int mdt_open_by_fid(struct mdt_thread_info* info,
+static int mdt_open_by_fid(struct mdt_thread_info* info,
struct ldlm_reply *rep)
{
__u32 flags = info->mti_spec.sp_cr_flags;
int rc;
ENTRY;
- o = mdt_object_find(info->mti_ctxt, info->mti_mdt, rr->rr_fid2,
+ o = mdt_object_find(info->mti_env, info->mti_mdt, rr->rr_fid2,
rr->rr_capa2);
- if (IS_ERR(o))
+ if (IS_ERR(o))
RETURN(rc = PTR_ERR(o));
rc = lu_object_exists(&o->mot_obj.mo_lu);
if (rc > 0) {
- const struct lu_context *ctxt = info->mti_ctxt;
+ const struct lu_env *env = info->mti_env;
mdt_set_disposition(info, rep, (DISP_IT_EXECD |
DISP_LOOKUP_EXECD |
DISP_LOOKUP_POS));
- rc = mo_attr_get(ctxt, mdt_object_child(o), ma, NULL);
+ rc = mo_attr_get(env, mdt_object_child(o), ma, NULL);
if (rc == 0)
rc = mdt_mfd_open(info, NULL, o, flags, 0, rep);
} else if (rc == 0) {
repbody->valid |= (OBD_MD_FLID | OBD_MD_MDS);
rc = 0;
}
- mdt_object_put(info->mti_ctxt, o);
+ mdt_object_put(info->mti_env, o);
RETURN(rc);
}
int rc;
ENTRY;
- o = mdt_object_find(info->mti_ctxt, info->mti_mdt, fid, BYPASS_CAPA);
- if (IS_ERR(o))
+ o = mdt_object_find(info->mti_env, info->mti_mdt, fid, BYPASS_CAPA);
+ if (IS_ERR(o))
RETURN(rc = PTR_ERR(o));
rc = lu_object_exists(&o->mot_obj.mo_lu);
if (rc > 0) {
- rc = mo_attr_get(info->mti_ctxt, mdt_object_child(o), ma, NULL);
+ rc = mo_attr_get(info->mti_env, mdt_object_child(o), ma, NULL);
if (rc == 0)
rc = mdt_mfd_open(info, NULL, o, flags, 0, rep);
} else if (rc == 0) {
CERROR("The object isn't on this server! FLD error?\n");
rc = -EFAULT;
}
- mdt_object_put(info->mti_ctxt, o);
+ mdt_object_put(info->mti_env, o);
RETURN(rc);
}
mdt_set_disposition(info, ldlm_rep,
(DISP_IT_EXECD | DISP_LOOKUP_EXECD));
-
+
if (rr->rr_name[0] == 0) {
/* this is cross-ref open */
mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
if (IS_ERR(parent))
GOTO(out, result = PTR_ERR(parent));
- result = mdo_lookup(info->mti_ctxt, mdt_object_child(parent),
+ result = mdo_lookup(info->mti_env, mdt_object_child(parent),
rr->rr_name, child_fid, &info->mti_uc);
if (result != 0 && result != -ENOENT && result != -ESTALE)
GOTO(out_parent, result);
mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
}
- child = mdt_object_find(info->mti_ctxt, mdt, child_fid, BYPASS_CAPA);
+ child = mdt_object_find(info->mti_env, mdt, child_fid, BYPASS_CAPA);
if (IS_ERR(child))
GOTO(out_parent, result = PTR_ERR(child));
if (result == -ENOENT) {
/* Not found and with MDS_OPEN_CREAT: let's create it. */
mdt_set_disposition(info, ldlm_rep, DISP_OPEN_CREATE);
- result = mdo_create(info->mti_ctxt,
+ result = mdo_create(info->mti_env,
mdt_object_child(parent),
rr->rr_name,
mdt_object_child(child),
&info->mti_attr,
&info->mti_uc);
if (result == -ERESTART) {
- mdt_clear_disposition(info, ldlm_rep, DISP_OPEN_CREATE);
+ mdt_clear_disposition(info, ldlm_rep, DISP_OPEN_CREATE);
GOTO(out_child, result);
}
- else {
+ else {
if (result != 0)
GOTO(out_child, result);
}
created = 1;
} else {
/* We have to get attr & lov ea for this object */
- result = mo_attr_get(info->mti_ctxt, mdt_object_child(child),
+ result = mo_attr_get(info->mti_env, mdt_object_child(child),
ma, NULL);
/*
* The object is on remote node, return its FID for remote open.
*/
if (result == -EREMOTE) {
int rc;
-
- /*
+
+ /*
* Check if this lock already was sent to client and
* this is resent case. For resent case do not take lock
* again, use what is already granted.
*/
LASSERT(lhc != NULL);
-
+
if (lustre_handle_is_used(&lhc->mlh_lh)) {
struct ldlm_lock *lock;
-
+
LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
MSG_RESENT);
-
+
lock = ldlm_handle2lock(&lhc->mlh_lh);
if (!lock) {
CERROR("Invalid lock handle "LPX64"\n",
} else {
mdt_lock_handle_init(lhc);
lhc->mlh_mode = LCK_CR;
-
+
rc = mdt_object_lock(info, child, lhc,
MDS_INODELOCK_LOOKUP);
}
}
/* Try to open it now. */
- result = mdt_mfd_open(info, parent, child, create_flags,
+ result = mdt_mfd_open(info, parent, child, create_flags,
created, ldlm_rep);
GOTO(finish_open, result);
int rc2;
ma->ma_need = 0;
ma->ma_cookie_size = 0;
- rc2 = mdo_unlink(info->mti_ctxt,
+ rc2 = mdo_unlink(info->mti_env,
mdt_object_child(parent),
mdt_object_child(child),
rr->rr_name,
CERROR("error in cleanup of open");
}
out_child:
- mdt_object_put(info->mti_ctxt, child);
+ mdt_object_put(info->mti_env, child);
out_parent:
mdt_object_unlock_put(info, parent, lh, result);
out:
}
ma->ma_need |= MA_INODE;
-
+
if (!MFD_CLOSED(mode))
- rc = mo_close(info->mti_ctxt, next, ma, NULL);
+ rc = mo_close(info->mti_env, next, ma, NULL);
else if (ret == -EAGAIN)
- rc = mo_attr_get(info->mti_ctxt, next, ma, NULL);
+ rc = mo_attr_get(info->mti_env, next, ma, NULL);
/* If the object is unlinked, do not try to re-enable SIZEONMDS */
if ((ret == -EAGAIN) && (ma->ma_valid & MA_INODE) &&
}
} else {
mdt_mfd_free(mfd);
- mdt_object_put(info->mti_ctxt, o);
+ mdt_object_put(info->mti_env, o);
}
RETURN(rc ? rc : ret);
rc = req_capsule_pack(&info->mti_pill);
/* Continue to close handle even if we can not pack reply */
if (rc == 0) {
- repbody = req_capsule_server_get(&info->mti_pill,
+ repbody = req_capsule_server_get(&info->mti_pill,
&RMF_MDT_BODY);
- ma->ma_lmm = req_capsule_server_get(&info->mti_pill,
+ ma->ma_lmm = req_capsule_server_get(&info->mti_pill,
&RMF_MDT_MD);
ma->ma_lmm_size = req_capsule_get_size(&info->mti_pill,
&RMF_MDT_MD,
/* Do not lose object before last unlink. */
o = mfd->mfd_object;
- mdt_object_get(info->mti_ctxt, o);
+ mdt_object_get(info->mti_env, o);
ret = mdt_mfd_close(info, mfd);
if (repbody != NULL)
rc = mdt_handle_last_unlink(info, o, ma);
- mdt_object_put(info->mti_ctxt, o);
+ mdt_object_put(info->mti_env, o);
}
if (repbody != NULL)
mdt_shrink_reply(info, REPLY_REC_OFF + 1, 0, 0);
if (MDT_FAIL_CHECK(OBD_FAIL_MDS_CLOSE_PACK))
RETURN(-ENOMEM);
-
+
RETURN(rc ? rc : ret);
}
rc = req_capsule_pack(&info->mti_pill);
if (rc)
RETURN(rc);
-
- repbody = req_capsule_server_get(&info->mti_pill,
+
+ repbody = req_capsule_server_get(&info->mti_pill,
&RMF_MDT_BODY);
repbody->eadatasize = 0;
repbody->aclsize = 0;
info->mti_epoch->handle.cookie);
rc = -ESTALE;
} else {
- LASSERT((mfd->mfd_mode == FMODE_EPOCH) ||
+ LASSERT((mfd->mfd_mode == FMODE_EPOCH) ||
(mfd->mfd_mode == FMODE_EPOCHLCK));
class_handle_unhash(&mfd->mfd_handle);
list_del_init(&mfd->mfd_list);
#include "mdt_internal.h"
-static int mdt_server_data_update(const struct lu_context *ctx,
+static int mdt_server_data_update(const struct lu_env *env,
struct mdt_device *mdt);
/* TODO: maybe this pair should be defined in dt_object.c */
-int mdt_record_read(const struct lu_context *ctx,
+int mdt_record_read(const struct lu_env *env,
struct dt_object *dt, void *buf,
size_t count, loff_t *pos)
{
LASSERTF(dt != NULL, "dt is NULL when we want to read record\n");
- rc = dt->do_body_ops->dbo_read(ctx, dt, buf, count, pos);
+ rc = dt->do_body_ops->dbo_read(env, dt, buf, count, pos);
if (rc == count)
rc = 0;
return rc;
}
-int mdt_record_write(const struct lu_context *ctx,
+int mdt_record_write(const struct lu_env *env,
struct dt_object *dt, const void *buf,
size_t count, loff_t *pos, struct thandle *th)
{
LASSERTF(dt != NULL, "dt is NULL when we want to write record\n");
LASSERT(th != NULL);
- rc = dt->do_body_ops->dbo_write(ctx, dt, buf, count, pos, th);
+ rc = dt->do_body_ops->dbo_write(env, dt, buf, count, pos, th);
if (rc == count)
rc = 0;
else if (rc >= 0)
MDT_TXN_LAST_RCVD_WRITE_CREDITS = 3
};
-struct thandle* mdt_trans_start(const struct lu_context *ctx,
+struct thandle* mdt_trans_start(const struct lu_env *env,
struct mdt_device *mdt, int credits)
{
struct mdt_thread_info *mti;
struct txn_param *p;
- mti = lu_context_key_get(ctx, &mdt_thread_key);
+ mti = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
p = &mti->mti_txn_param;
p->tp_credits = credits;
- return mdt->mdt_bottom->dd_ops->dt_trans_start(ctx, mdt->mdt_bottom, p);
+ return mdt->mdt_bottom->dd_ops->dt_trans_start(env, mdt->mdt_bottom, p);
}
-void mdt_trans_stop(const struct lu_context *ctx,
+void mdt_trans_stop(const struct lu_env *env,
struct mdt_device *mdt, struct thandle *th)
{
- mdt->mdt_bottom->dd_ops->dt_trans_stop(ctx, th);
+ mdt->mdt_bottom->dd_ops->dt_trans_stop(env, th);
}
/* last_rcvd handling */
buf->mcd_last_close_result = cpu_to_le32(mcd->mcd_last_close_result);
}
-static int mdt_last_rcvd_header_read(const struct lu_context *ctx,
+static int mdt_last_rcvd_header_read(const struct lu_env *env,
struct mdt_device *mdt,
struct mdt_server_data *msd)
{
loff_t *off;
int rc;
- mti = lu_context_key_get(ctx, &mdt_thread_key);
+ mti = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
/* temporary stuff for read */
tmp = &mti->mti_msd;
off = &mti->mti_off;
*off = 0;
- rc = mdt_record_read(ctx, mdt->mdt_last_rcvd,
+ rc = mdt_record_read(env, mdt->mdt_last_rcvd,
tmp, sizeof(*tmp), off);
if (rc == 0)
msd_le_to_cpu(tmp, msd);
return rc;
}
-static int mdt_last_rcvd_header_write(const struct lu_context *ctx,
+static int mdt_last_rcvd_header_write(const struct lu_env *env,
struct mdt_device *mdt,
struct mdt_server_data *msd)
{
loff_t *off;
int rc;
- mti = lu_context_key_get(ctx, &mdt_thread_key);
+ mti = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
- th = mdt_trans_start(ctx, mdt, MDT_TXN_LAST_RCVD_WRITE_CREDITS);
+ th = mdt_trans_start(env, mdt, MDT_TXN_LAST_RCVD_WRITE_CREDITS);
if (IS_ERR(th))
RETURN(PTR_ERR(th));
tmp = &mti->mti_msd;
off = &mti->mti_off;
*off = 0;
-
+
msd_cpu_to_le(msd, tmp);
- rc = mdt_record_write(ctx, mdt->mdt_last_rcvd,
+ rc = mdt_record_write(env, mdt->mdt_last_rcvd,
tmp, sizeof(*tmp), off, th);
- mdt_trans_stop(ctx, mdt, th);
+ mdt_trans_stop(env, mdt, th);
CDEBUG(D_INFO, "write last_rcvd header rc = %d:\n"
"uuid = %s\n"
return rc;
}
-static int mdt_last_rcvd_read(const struct lu_context *ctx,
+static int mdt_last_rcvd_read(const struct lu_env *env,
struct mdt_device *mdt,
struct mdt_client_data *mcd, loff_t *off)
{
struct mdt_client_data *tmp;
int rc;
- mti = lu_context_key_get(ctx, &mdt_thread_key);
+ mti = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
tmp = &mti->mti_mcd;
- rc = mdt_record_read(ctx, mdt->mdt_last_rcvd, tmp, sizeof(*tmp), off);
+ rc = mdt_record_read(env, mdt->mdt_last_rcvd, tmp, sizeof(*tmp), off);
if (rc == 0)
mcd_le_to_cpu(tmp, mcd);
return rc;
}
-static int mdt_last_rcvd_write(const struct lu_context *ctx,
+static int mdt_last_rcvd_write(const struct lu_env *env,
struct mdt_device *mdt,
struct mdt_client_data *mcd,
loff_t *off, struct thandle *th)
int rc;
LASSERT(th != NULL);
- mti = lu_context_key_get(ctx, &mdt_thread_key);
+ mti = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
tmp = &mti->mti_mcd;
mcd_cpu_to_le(mcd, tmp);
- rc = mdt_record_write(ctx, mdt->mdt_last_rcvd,
+ rc = mdt_record_write(env, mdt->mdt_last_rcvd,
tmp, sizeof(*tmp), off, th);
CDEBUG(D_INFO, "write mcd @%d rc = %d:\n"
}
-static int mdt_clients_data_init(const struct lu_context *ctx,
+static int mdt_clients_data_init(const struct lu_env *env,
struct mdt_device *mdt,
unsigned long last_size)
{
__u64 last_transno;
struct obd_export *exp;
struct mdt_export_data *med;
-
+
if (!mcd) {
OBD_ALLOC_PTR(mcd);
if (!mcd)
off = msd->msd_client_start +
cl_idx * msd->msd_client_size;
- rc = mdt_last_rcvd_read(ctx, mdt, mcd, &off);
+ rc = mdt_last_rcvd_read(env, mdt, mcd, &off);
if (rc) {
CERROR("error reading MDS %s idx %d, off %llu: rc %d\n",
LAST_RCVD, cl_idx, off, rc);
med = &exp->exp_mdt_data;
med->med_mcd = mcd;
- rc = mdt_client_add(ctx, mdt, med, cl_idx);
+ rc = mdt_client_add(env, mdt, med, cl_idx);
LASSERTF(rc == 0, "rc = %d\n", rc); /* can't fail existing */
mcd = NULL;
exp->exp_replay_needed = 1;
RETURN(rc);
}
-static int mdt_server_data_init(const struct lu_context *ctx,
+static int mdt_server_data_init(const struct lu_env *env,
struct mdt_device *mdt)
{
struct mdt_server_data *msd = &mdt->mdt_msd;
CLASSERT(offsetof(struct mdt_client_data, mcd_padding) +
sizeof(mcd->mcd_padding) == LR_CLIENT_SIZE);
- mti = lu_context_key_get(ctx, &mdt_thread_key);
+ mti = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
LASSERT(mti != NULL);
la = &mti->mti_attr.ma_attr;
obj = mdt->mdt_last_rcvd;
- obj->do_ops->do_read_lock(ctx, obj);
- rc = obj->do_ops->do_attr_get(ctx, mdt->mdt_last_rcvd, la);
- obj->do_ops->do_read_unlock(ctx, obj);
+ obj->do_ops->do_read_lock(env, obj);
+ rc = obj->do_ops->do_attr_get(env, mdt->mdt_last_rcvd, la);
+ obj->do_ops->do_read_unlock(env, obj);
if (rc)
RETURN(rc);
last_rcvd_size = (unsigned long)la->la_size;
-
+
if (last_rcvd_size == 0) {
LCONSOLE_WARN("%s: new disk, initializing\n", obd->obd_name);
OBD_INCOMPAT_COMMON_LR;
} else {
LCONSOLE_WARN("%s: used disk, loading\n", obd->obd_name);
- rc = mdt_last_rcvd_header_read(ctx, mdt, msd);
+ rc = mdt_last_rcvd_header_read(env, mdt, msd);
if (rc) {
CERROR("error reading MDS %s: rc %d\n", LAST_RCVD, rc);
GOTO(out, rc);
GOTO(out, rc = -EINVAL);
}
- rc = mdt_clients_data_init(ctx, mdt, last_rcvd_size);
+ rc = mdt_clients_data_init(env, mdt, last_rcvd_size);
if (rc)
GOTO(err_client, rc);
msd->msd_mount_count = mdt->mdt_mount_count;
/* save it, so mount count and last_transno is current */
- rc = mdt_server_data_update(ctx, mdt);
+ rc = mdt_server_data_update(env, mdt);
if (rc)
GOTO(err_client, rc);
return rc;
}
-static int mdt_server_data_update(const struct lu_context *ctx,
+static int mdt_server_data_update(const struct lu_env *env,
struct mdt_device *mdt)
{
struct mdt_server_data *msd = &mdt->mdt_msd;
msd->msd_last_transno = mdt->mdt_last_transno;
spin_unlock(&mdt->mdt_transno_lock);
- rc = mdt_last_rcvd_header_write(ctx, mdt, msd);
+ rc = mdt_last_rcvd_header_write(env, mdt, msd);
RETURN(rc);
}
-int mdt_client_new(const struct lu_context *ctx,
+int mdt_client_new(const struct lu_env *env,
struct mdt_device *mdt,
struct mdt_export_data *med)
{
LASSERT(bitmap != NULL);
if (!strcmp(med->med_mcd->mcd_uuid, obd->obd_uuid.uuid))
RETURN(0);
- mti = lu_context_key_get(ctx, &mdt_thread_key);
+ mti = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
/* the bitmap operations can handle cl_idx > sizeof(long) * 8, so
* there's no need for extra complication here
*/
LASSERTF(med->med_lr_off > 0, "med_lr_off = %llu\n", med->med_lr_off);
/* write new client data */
off = med->med_lr_off;
- th = mdt_trans_start(ctx, mdt, MDT_TXN_LAST_RCVD_WRITE_CREDITS);
+ th = mdt_trans_start(env, mdt, MDT_TXN_LAST_RCVD_WRITE_CREDITS);
if (IS_ERR(th))
RETURN(PTR_ERR(th));
-
- rc = mdt_last_rcvd_write(ctx, mdt, mcd, &off, th);
+
+ rc = mdt_last_rcvd_write(env, mdt, mcd, &off, th);
CDEBUG(D_INFO, "wrote client mcd at idx %u off %llu (len %u)\n",
cl_idx, med->med_lr_off, sizeof(*mcd));
- mdt_trans_stop(ctx, mdt, th);
+ mdt_trans_stop(env, mdt, th);
RETURN(rc);
}
* It should not be possible to fail adding an existing client - otherwise
* mdt_init_server_data() callsite needs to be fixed.
*/
-int mdt_client_add(const struct lu_context *ctx,
+int mdt_client_add(const struct lu_env *env,
struct mdt_device *mdt,
struct mdt_export_data *med, int cl_idx)
{
RETURN(rc);
}
-int mdt_client_del(const struct lu_context *ctx,
+int mdt_client_del(const struct lu_env *env,
struct mdt_device *mdt,
struct mdt_export_data *med)
{
LBUG();
}
- th = mdt_trans_start(ctx, mdt, MDT_TXN_LAST_RCVD_WRITE_CREDITS);
+ th = mdt_trans_start(env, mdt, MDT_TXN_LAST_RCVD_WRITE_CREDITS);
if (IS_ERR(th))
GOTO(free, rc = PTR_ERR(th));
mutex_down(&med->med_mcd_lock);
memset(mcd, 0, sizeof *mcd);
-
- rc = mdt_last_rcvd_write(ctx, mdt, mcd, &off, th);
+
+ rc = mdt_last_rcvd_write(env, mdt, mcd, &off, th);
mutex_up(&med->med_mcd_lock);
- mdt_trans_stop(ctx, mdt, th);
-
+ mdt_trans_stop(env, mdt, th);
+
CDEBUG(rc == 0 ? D_INFO : D_ERROR,
"zeroing out client idx %u in %s rc %d\n",
med->med_lr_idx, LAST_RCVD, rc);
-
+
spin_lock(&mdt->mdt_client_bitmap_lock);
clear_bit(med->med_lr_idx, mdt->mdt_client_bitmap);
spin_unlock(&mdt->mdt_client_bitmap_lock);
/* Make sure the server's last_transno is up to date. Do this
* after the client is freed so we know all the client's
* transactions have been committed. */
- mdt_server_data_update(ctx, mdt);
+ mdt_server_data_update(env, mdt);
EXIT;
free:
CERROR("client idx %d has offset %lld\n", med->med_lr_idx, off);
err = -EINVAL;
} else {
- err = mdt_last_rcvd_write(mti->mti_ctxt, mdt, mcd, &off, th);
+ err = mdt_last_rcvd_write(mti->mti_env, mdt, mcd, &off, th);
}
mutex_up(&med->med_mcd_lock);
RETURN(err);
extern struct lu_context_key mdt_thread_key;
/* add credits for last_rcvd update */
-static int mdt_txn_start_cb(const struct lu_context *ctx,
+static int mdt_txn_start_cb(const struct lu_env *env,
struct txn_param *param, void *cookie)
{
param->tp_credits += MDT_TXN_LAST_RCVD_WRITE_CREDITS;
}
/* Update last_rcvd records with latests transaction data */
-static int mdt_txn_stop_cb(const struct lu_context *ctx,
+static int mdt_txn_stop_cb(const struct lu_env *env,
struct thandle *txn, void *cookie)
{
struct mdt_device *mdt = cookie;
struct mdt_txn_info *txi;
struct mdt_thread_info *mti;
struct ptlrpc_request *req;
-
+
/* transno in two contexts - for commit_cb and for thread */
txi = lu_context_key_get(&txn->th_ctx, &mdt_txn_key);
- mti = lu_context_key_get(ctx, &mdt_thread_key);
+ mti = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
req = mdt_info_req(mti);
if (mti->mti_mdt == NULL || req == NULL || mti->mti_no_need_trans) {
}
/* commit callback, need to update last_commited value */
-static int mdt_txn_commit_cb(const struct lu_context *ctx,
+static int mdt_txn_commit_cb(const struct lu_env *env,
struct thandle *txn, void *cookie)
{
struct mdt_device *mdt = cookie;
return 0;
}
-int mdt_fs_setup(const struct lu_context *ctx, struct mdt_device *mdt,
+int mdt_fs_setup(const struct lu_env *env, struct mdt_device *mdt,
struct obd_device *obd)
{
struct lu_fid fid;
dt_txn_callback_add(mdt->mdt_bottom, &mdt->mdt_txn_cb);
- o = dt_store_open(ctx, mdt->mdt_bottom, LAST_RCVD, &fid);
+ o = dt_store_open(env, mdt->mdt_bottom, LAST_RCVD, &fid);
if(!IS_ERR(o)) {
mdt->mdt_last_rcvd = o;
- rc = mdt_server_data_init(ctx, mdt);
+ rc = mdt_server_data_init(env, mdt);
if (rc) {
- lu_object_put(ctx, &o->do_lu);
+ lu_object_put(env, &o->do_lu);
mdt->mdt_last_rcvd = NULL;
}
} else {
if (rc)
RETURN(rc);
- o = dt_store_open(ctx, mdt->mdt_bottom, CAPA_KEYS, &fid);
+ o = dt_store_open(env, mdt->mdt_bottom, CAPA_KEYS, &fid);
if(!IS_ERR(o)) {
struct md_device *next = mdt->mdt_child;
mdt->mdt_ck_obj = o;
- rc = mdt_capa_keys_init(ctx, mdt);
+ rc = mdt_capa_keys_init(env, mdt);
if (rc) {
- lu_object_put(ctx, &o->do_lu);
+ lu_object_put(env, &o->do_lu);
mdt->mdt_ck_obj = NULL;
RETURN(rc);
}
}
-void mdt_fs_cleanup(const struct lu_context *ctx, struct mdt_device *mdt)
+void mdt_fs_cleanup(const struct lu_env *env, struct mdt_device *mdt)
{
struct obd_device *obd = mdt->mdt_md_dev.md_lu_dev.ld_obd;
class_disconnect_exports(obd); /* cleans up client info too */
if (mdt->mdt_last_rcvd)
- lu_object_put(ctx, &mdt->mdt_last_rcvd->do_lu);
+ lu_object_put(env, &mdt->mdt_last_rcvd->do_lu);
mdt->mdt_last_rcvd = NULL;
if (mdt->mdt_ck_obj)
- lu_object_put(ctx, &mdt->mdt_ck_obj->do_lu);
+ lu_object_put(env, &mdt->mdt_ck_obj->do_lu);
mdt->mdt_ck_obj = NULL;
}
return;
/* if no error, so child was created with requested fid */
- child = mdt_object_find(mti->mti_ctxt, mdt, mti->mti_rr.rr_fid2,
+ child = mdt_object_find(mti->mti_env, mdt, mti->mti_rr.rr_fid2,
mti->mti_rr.rr_capa2);
LASSERT(!IS_ERR(child));
body = req_capsule_server_get(&mti->mti_pill, &RMF_MDT_BODY);
- rc = mo_attr_get(mti->mti_ctxt, mdt_object_child(child),
+ rc = mo_attr_get(mti->mti_env, mdt_object_child(child),
&mti->mti_attr, NULL);
if (rc == -EREMOTE) {
/* object was created on remote server */
}
mdt_pack_attr2body(body, &mti->mti_attr.ma_attr, mdt_object_fid(child));
mdt_body_reverse_idmap(mti, body);
- mdt_object_put(mti->mti_ctxt, child);
+ mdt_object_put(mti->mti_env, child);
}
static void mdt_reconstruct_setattr(struct mdt_thread_info *mti,
return;
body = req_capsule_server_get(&mti->mti_pill, &RMF_MDT_BODY);
- obj = mdt_object_find(mti->mti_ctxt, mdt, mti->mti_rr.rr_fid1,
+ obj = mdt_object_find(mti->mti_env, mdt, mti->mti_rr.rr_fid1,
mti->mti_rr.rr_capa1);
LASSERT(!IS_ERR(obj));
- mo_attr_get(mti->mti_ctxt, mdt_object_child(obj),
+ mo_attr_get(mti->mti_env, mdt_object_child(obj),
&mti->mti_attr, NULL);
mdt_pack_attr2body(body, &mti->mti_attr.ma_attr, mdt_object_fid(obj));
mdt_body_reverse_idmap(mti, body);
if (rec->ur_iattr.ia_valid & (ATTR_ATIME | ATTR_ATIME_SET))
body->valid |= OBD_MD_FLATIME;
*/
- mdt_object_put(mti->mti_ctxt, obj);
+ mdt_object_put(mti->mti_env, obj);
}
static void mdt_reconstruct_with_shrink(struct mdt_thread_info *mti,
if (IS_ERR(parent))
RETURN(PTR_ERR(parent));
- child = mdt_object_find(info->mti_ctxt, mdt, rr->rr_fid2, BYPASS_CAPA);
+ child = mdt_object_find(info->mti_env, mdt, rr->rr_fid2, BYPASS_CAPA);
if (!IS_ERR(child)) {
struct md_object *next = mdt_object_child(parent);
ma->ma_need = MA_INODE;
- mdt_fail_write(info->mti_ctxt, info->mti_mdt->mdt_bottom,
+ mdt_fail_write(info->mti_env, info->mti_mdt->mdt_bottom,
OBD_FAIL_MDS_REINT_CREATE_WRITE);
- rc = mdo_create(info->mti_ctxt, next, rr->rr_name,
+ rc = mdo_create(info->mti_env, next, rr->rr_name,
mdt_object_child(child),
&info->mti_spec, ma, &info->mti_uc);
if (rc == 0) {
mdt_object_fid(child));
mdt_body_reverse_idmap(info, repbody);
}
- mdt_object_put(info->mti_ctxt, child);
+ mdt_object_put(info->mti_env, child);
} else
rc = PTR_ERR(child);
mdt_object_unlock_put(info, parent, lh, rc);
repbody = req_capsule_server_get(&info->mti_pill, &RMF_MDT_BODY);
- o = mdt_object_find(info->mti_ctxt, mdt, info->mti_rr.rr_fid2,
+ o = mdt_object_find(info->mti_env, mdt, info->mti_rr.rr_fid2,
BYPASS_CAPA);
if (!IS_ERR(o)) {
struct md_object *next = mdt_object_child(o);
ma->ma_need = MA_INODE;
- rc = mo_object_create(info->mti_ctxt, next, &info->mti_spec,
+ rc = mo_object_create(info->mti_env, next, &info->mti_spec,
ma, &info->mti_uc);
if (rc == 0) {
/* return fid & attr to client. */
mdt_object_fid(o));
mdt_body_reverse_idmap(info, repbody);
}
- mdt_object_put(info->mti_ctxt, o);
+ mdt_object_put(info->mti_env, o);
} else
rc = PTR_ERR(o);
* already. */
if (som_update && (info->mti_epoch->ioepoch != mo->mot_ioepoch))
RETURN(0);
-
+
lh = &info->mti_lh[MDT_LH_PARENT];
lh->mlh_mode = LCK_EX;
* skip setattr. */
if (som_update && (info->mti_epoch->ioepoch != mo->mot_ioepoch))
GOTO(out, rc = 0);
-
+
if (lu_object_assert_not_exists(&mo->mot_obj.mo_lu))
GOTO(out, rc = -ENOENT);
/* all attrs are packed into mti_attr in unpack_setattr */
- mdt_fail_write(info->mti_ctxt, info->mti_mdt->mdt_bottom,
+ mdt_fail_write(info->mti_env, info->mti_mdt->mdt_bottom,
OBD_FAIL_MDS_REINT_SETATTR_WRITE);
/* all attrs are packed into mti_attr in unpack_setattr */
- rc = mo_attr_set(info->mti_ctxt, mdt_object_child(mo), ma,
+ rc = mo_attr_set(info->mti_env, mdt_object_child(mo), ma,
&info->mti_uc);
if (rc != 0)
GOTO(out, rc);
CDEBUG(D_INODE, "Closing epoch "LPU64" on "DFID". Count %d\n",
mo->mot_ioepoch, PFID(mdt_object_fid(mo)),
mo->mot_epochcount);
-
+
mdt_sizeonmds_enable(info, mo);
}
-
+
EXIT;
out:
mdt_object_unlock(info, mo, lh, rc);
(unsigned int)ma->ma_attr.la_valid);
repbody = req_capsule_server_get(&info->mti_pill, &RMF_MDT_BODY);
- mo = mdt_object_find(info->mti_ctxt, info->mti_mdt, rr->rr_fid1,
+ mo = mdt_object_find(info->mti_env, info->mti_mdt, rr->rr_fid1,
rr->rr_capa1);
if (IS_ERR(mo))
RETURN(rc = PTR_ERR(mo));
mfd = mdt_mfd_new();
if (mfd == NULL)
GOTO(out, rc = -ENOMEM);
-
+
/* FIXME: in recovery, need to pass old epoch here */
mdt_epoch_open(info, mo, 0);
repbody->ioepoch = mo->mot_ioepoch;
- mdt_object_get(info->mti_ctxt, mo);
+ mdt_object_get(info->mti_env, mo);
mfd->mfd_mode = FMODE_EPOCHLCK;
mfd->mfd_object = mo;
mfd->mfd_xid = req->rq_xid;
if (mfd == NULL) {
spin_unlock(&med->med_open_lock);
CDEBUG(D_INODE, "no handle for file close: "
- "fid = "DFID": cookie = "LPX64"\n",
+ "fid = "DFID": cookie = "LPX64"\n",
PFID(info->mti_rr.rr_fid1),
info->mti_epoch->handle.cookie);
GOTO(out, rc = -ESTALE);
ma->ma_need = MA_INODE;
next = mdt_object_child(mo);
- rc = mo_attr_get(info->mti_ctxt, next, ma, NULL);
+ rc = mo_attr_get(info->mti_env, next, ma, NULL);
if (rc != 0)
GOTO(out, rc);
capa = req_capsule_server_get(&info->mti_pill, &RMF_CAPA1);
LASSERT(capa);
capa->lc_opc = CAPA_OPC_OSS_DEFAULT | CAPA_OPC_OSS_TRUNC;
- rc = mo_capa_get(info->mti_ctxt, mdt_object_child(mo), capa);
+ rc = mo_capa_get(info->mti_env, mdt_object_child(mo), capa);
if (rc)
RETURN(rc);
repbody->valid |= OBD_MD_FLOSSCAPA;
mdt_body_reverse_idmap(info, repbody);
EXIT;
out:
- mdt_object_put(info->mti_ctxt, mo);
+ mdt_object_put(info->mti_env, mo);
return rc;
}
if (strlen(rr->rr_name) == 0) {
/* remote partial operation */
- rc = mo_ref_del(info->mti_ctxt, mdt_object_child(mp), ma,
+ rc = mo_ref_del(info->mti_env, mdt_object_child(mp), ma,
&info->mti_uc);
GOTO(out_unlock_parent, rc);
}
/* step 2: find & lock the child */
- rc = mdo_lookup(info->mti_ctxt, mdt_object_child(mp),
+ rc = mdo_lookup(info->mti_env, mdt_object_child(mp),
rr->rr_name, child_fid, &info->mti_uc);
if (rc != 0)
GOTO(out_unlock_parent, rc);
/* we will lock the child regardless it is local or remote. No harm. */
- mc = mdt_object_find(info->mti_ctxt, info->mti_mdt, child_fid,
+ mc = mdt_object_find(info->mti_env, info->mti_mdt, child_fid,
BYPASS_CAPA);
if (IS_ERR(mc))
GOTO(out_unlock_parent, rc = PTR_ERR(mc));
if (rc != 0)
GOTO(out_put_child, rc);
- mdt_fail_write(info->mti_ctxt, info->mti_mdt->mdt_bottom,
+ mdt_fail_write(info->mti_env, info->mti_mdt->mdt_bottom,
OBD_FAIL_MDS_REINT_UNLINK_WRITE);
/*
* whether need MA_LOV and MA_COOKIE.
*/
ma->ma_need = MA_INODE;
- rc = mdo_unlink(info->mti_ctxt, mdt_object_child(mp),
+ rc = mdo_unlink(info->mti_env, mdt_object_child(mp),
mdt_object_child(mc), rr->rr_name, ma, &info->mti_uc);
if (rc)
GOTO(out_unlock_child, rc);
out_unlock_child:
mdt_object_unlock(info, mc, child_lh, rc);
out_put_child:
- mdt_object_put(info->mti_ctxt, mc);
+ mdt_object_put(info->mti_env, mc);
out_unlock_parent:
mdt_object_unlock_put(info, mp, parent_lh, rc);
out:
/* step 1: lock the source */
lhs = &info->mti_lh[MDT_LH_PARENT];
lhs->mlh_mode = LCK_EX;
- ms = mdt_object_find_lock(info, rr->rr_fid1, lhs,
+ ms = mdt_object_find_lock(info, rr->rr_fid1, lhs,
MDS_INODELOCK_UPDATE, rr->rr_capa1);
if (IS_ERR(ms))
RETURN(PTR_ERR(ms));
if (strlen(rr->rr_name) == 0) {
/* remote partial operation */
- rc = mo_ref_add(info->mti_ctxt, mdt_object_child(ms),
+ rc = mo_ref_add(info->mti_env, mdt_object_child(ms),
&info->mti_uc);
GOTO(out_unlock_source, rc);
}
/*step 2: find & lock the target parent dir*/
lhp = &info->mti_lh[MDT_LH_CHILD];
lhp->mlh_mode = LCK_EX;
- mp = mdt_object_find_lock(info, rr->rr_fid2, lhp,
+ mp = mdt_object_find_lock(info, rr->rr_fid2, lhp,
MDS_INODELOCK_UPDATE, rr->rr_capa2);
if (IS_ERR(mp))
GOTO(out_unlock_source, rc = PTR_ERR(mp));
/* step 4: link it */
- mdt_fail_write(info->mti_ctxt, info->mti_mdt->mdt_bottom,
+ mdt_fail_write(info->mti_env, info->mti_mdt->mdt_bottom,
OBD_FAIL_MDS_REINT_LINK_WRITE);
- rc = mdo_link(info->mti_ctxt, mdt_object_child(mp),
+ rc = mdo_link(info->mti_env, mdt_object_child(mp),
mdt_object_child(ms), rr->rr_name, ma, &info->mti_uc);
GOTO(out_unlock_target, rc);
GOTO(out, rc = PTR_ERR(mtgtdir));
/*step 2: find & lock the target object if exists*/
- rc = mdo_lookup(info->mti_ctxt, mdt_object_child(mtgtdir),
+ rc = mdo_lookup(info->mti_env, mdt_object_child(mtgtdir),
rr->rr_tgt, tgt_fid, &info->mti_uc);
if (rc != 0 && rc != -ENOENT) {
GOTO(out_unlock_tgtdir, rc);
if (IS_ERR(mtgt))
GOTO(out_unlock_tgtdir, rc = PTR_ERR(mtgt));
- rc = mdo_rename_tgt(info->mti_ctxt, mdt_object_child(mtgtdir),
+ rc = mdo_rename_tgt(info->mti_env, mdt_object_child(mtgtdir),
mdt_object_child(mtgt), rr->rr_fid2,
rr->rr_tgt, ma, &info->mti_uc);
} else /* -ENOENT */ {
- rc = mdo_name_insert(info->mti_ctxt, mdt_object_child(mtgtdir),
+ rc = mdo_name_insert(info->mti_env, mdt_object_child(mtgtdir),
rr->rr_tgt, rr->rr_fid2,
S_ISDIR(ma->ma_attr.la_mode),
&info->mti_uc);
static int mdt_rename_lock(struct mdt_thread_info *info,
struct lustre_handle *lh)
{
- ldlm_policy_data_t policy = { .l_inodebits = { MDS_INODELOCK_UPDATE } };
+ ldlm_policy_data_t policy = { .l_inodebits = { MDS_INODELOCK_UPDATE } };
struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
int flags = LDLM_FL_ATOMIC_CB;
struct ldlm_res_id res_id;
ls = info->mti_mdt->mdt_md_dev.md_lu_dev.ld_site;
fid_build_res_name(&LUSTRE_BFL_FID, &res_id);
-
+
if (ls->ls_control_exp == NULL) {
- /*
+ /*
* Current node is controller, that is mdt0 where we should take
* BFL lock.
*/
EXIT;
}
-/*
+/*
* This is is_subdir() variant, it is CMD is cmm forwards it to correct
* target. Source should not be ancestor of target dir. May be other rename
* checks can be moved here later.
ENTRY;
do {
- dst = mdt_object_find(info->mti_ctxt, info->mti_mdt, &dst_fid,
+ dst = mdt_object_find(info->mti_env, info->mti_mdt, &dst_fid,
BYPASS_CAPA);
if (!IS_ERR(dst)) {
- rc = mdo_is_subdir(info->mti_ctxt,
+ rc = mdo_is_subdir(info->mti_env,
mdt_object_child(dst),
fid, &dst_fid, NULL);
- mdt_object_put(info->mti_ctxt, dst);
+ mdt_object_put(info->mti_env, dst);
if (rc < 0) {
CERROR("Error while doing mdo_is_subdir(), rc %d\n",
rc);
rc = PTR_ERR(dst);
}
} while (rc == EREMOTE);
-
+
RETURN(rc);
}
ENTRY;
+#if 0
DEBUG_REQ(D_INODE, req, "rename "DFID"/%s to "DFID"/%s",
PFID(rr->rr_fid1), rr->rr_name,
PFID(rr->rr_fid2), rr->rr_tgt);
+#endif
rc = req_capsule_get_size(pill, &RMF_NAME, RCL_CLIENT);
if (rc == 1) {
lh_tgtdirp = &info->mti_lh[MDT_LH_CHILD];
lh_tgtdirp->mlh_mode = LCK_EX;
if (lu_fid_eq(rr->rr_fid1, rr->rr_fid2)) {
- mdt_object_get(info->mti_ctxt, msrcdir);
+ mdt_object_get(info->mti_env, msrcdir);
mtgtdir = msrcdir;
} else {
- mtgtdir = mdt_object_find(info->mti_ctxt, info->mti_mdt,
+ mtgtdir = mdt_object_find(info->mti_env, info->mti_mdt,
rr->rr_fid2, rr->rr_capa2);
if (IS_ERR(mtgtdir))
GOTO(out_unlock_source, rc = PTR_ERR(mtgtdir));
-
+
rc = mdt_object_cr_lock(info, mtgtdir, lh_tgtdirp,
MDS_INODELOCK_UPDATE);
if (rc != 0) {
- mdt_object_put(info->mti_ctxt, mtgtdir);
+ mdt_object_put(info->mti_env, mtgtdir);
GOTO(out_unlock_source, rc);
}
}
/*step 3: find & lock the old object*/
- rc = mdo_lookup(info->mti_ctxt, mdt_object_child(msrcdir),
+ rc = mdo_lookup(info->mti_env, mdt_object_child(msrcdir),
rr->rr_name, old_fid, &info->mti_uc);
if (rc != 0)
GOTO(out_unlock_target, rc);
/*step 4: find & lock the new object*/
/* new target object may not exist now */
- rc = mdo_lookup(info->mti_ctxt, mdt_object_child(mtgtdir),
+ rc = mdo_lookup(info->mti_env, mdt_object_child(mtgtdir),
rr->rr_tgt, new_fid, &info->mti_uc);
if (rc == 0) {
/* the new_fid should have been filled at this moment*/
GOTO(out_unlock_old, rc = -EINVAL);
lh_newp->mlh_mode = LCK_EX;
- mnew = mdt_object_find(info->mti_ctxt, info->mti_mdt, new_fid,
+ mnew = mdt_object_find(info->mti_env, info->mti_mdt, new_fid,
BYPASS_CAPA);
if (IS_ERR(mnew))
GOTO(out_unlock_old, rc = PTR_ERR(mnew));
rc = mdt_object_cr_lock(info, mnew, lh_newp,
MDS_INODELOCK_FULL);
if (rc != 0) {
- mdt_object_put(info->mti_ctxt, mnew);
+ mdt_object_put(info->mti_env, mnew);
GOTO(out_unlock_old, rc);
}
} else if (rc != -EREMOTE && rc != -ENOENT)
ma->ma_need = MA_INODE | MA_LOV | MA_COOKIE;
- mdt_fail_write(info->mti_ctxt, info->mti_mdt->mdt_bottom,
+ mdt_fail_write(info->mti_env, info->mti_mdt->mdt_bottom,
OBD_FAIL_MDS_REINT_RENAME_WRITE);
/* Check if @dst is subdir of @src. */
if (rc)
GOTO(out_unlock_new, rc);
- rc = mdo_rename(info->mti_ctxt, mdt_object_child(msrcdir),
+ rc = mdo_rename(info->mti_env, mdt_object_child(msrcdir),
mdt_object_child(mtgtdir), old_fid, rr->rr_name,
(mnew ? mdt_object_child(mnew) : NULL),
rr->rr_tgt, ma, &info->mti_uc);
-
+
/* handle last link of tgt object */
if (rc == 0 && mnew)
mdt_handle_last_unlink(info, mnew, ma);
if (!strcmp(xattr_name, XATTR_NAME_LUSTRE_ACL))
rc = RMTACL_SIZE_MAX;
else
- rc = mo_xattr_get(info->mti_ctxt,
+ rc = mo_xattr_get(info->mti_env,
mdt_object_child(info->mti_object),
NULL, 0, xattr_name, NULL);
} else if ((valid & OBD_MD_FLXATTRLS) == OBD_MD_FLXATTRLS) {
- rc = mo_xattr_list(info->mti_ctxt,
+ rc = mo_xattr_list(info->mti_env,
mdt_object_child(info->mti_object),
NULL, 0, NULL);
} else {
rc = do_remote_getfacl(info, &body->fid1,
buf, buflen);
} else {
- rc = mo_xattr_get(info->mti_ctxt, next, buf, buflen,
+ rc = mo_xattr_get(info->mti_env, next, buf, buflen,
xattr_name, NULL);
}
} else if (info->mti_body->valid & OBD_MD_FLXATTRLS) {
CDEBUG(D_INODE, "listxattr\n");
- rc = mo_xattr_list(info->mti_ctxt, next, buf, buflen, NULL);
+ rc = mo_xattr_list(info->mti_env, next, buf, buflen, NULL);
if (rc < 0)
CDEBUG(D_OTHER, "listxattr failed: %d\n", rc);
} else
__u64 valid = info->mti_body->valid;
int rc = 0, rc1;
- if ((valid & OBD_MD_FLXATTR) == OBD_MD_FLXATTR) {
+ if ((valid & OBD_MD_FLXATTR) == OBD_MD_FLXATTR) {
char *xattr_name;
xattr_name = req_capsule_client_get(pill, &RMF_NAME);
struct req_capsule *pill = &info->mti_pill;
struct mdt_object *obj = info->mti_object;
struct mdt_body *body = (struct mdt_body *)info->mti_body;
- const struct lu_context *ctx = info->mti_ctxt;
+ const struct lu_env *env = info->mti_env;
struct md_object *child = mdt_object_child(obj);
__u64 valid = body->valid;
char *xattr_name;
if (body->flags & XATTR_CREATE)
flags |= LU_XATTR_CREATE;
- mdt_fail_write(ctx, info->mti_mdt->mdt_bottom,
+ mdt_fail_write(env, info->mti_mdt->mdt_bottom,
OBD_FAIL_MDS_SETXATTR_WRITE);
- rc = mo_xattr_set(ctx, child, xattr, xattr_len,
+ rc = mo_xattr_set(env, child, xattr, xattr_len,
xattr_name, flags, &info->mti_uc);
}
} else if ((valid & OBD_MD_FLXATTRRM) == OBD_MD_FLXATTRRM) {
- rc = mo_xattr_del(ctx, child, xattr_name, &info->mti_uc);
+ rc = mo_xattr_del(env, child, xattr_name, &info->mti_uc);
} else {
CERROR("valid bits: "LPX64"\n", body->valid);
rc = -EINVAL;
/* Establish a connection to the MGS.*/
-static int mgs_connect(const struct lu_context *ctx,
+static int mgs_connect(const struct lu_env *env,
struct lustre_handle *conn, struct obd_device *obd,
struct obd_uuid *cluuid, struct obd_connect_data *data)
{
ENTRY;
rc = mgc_logname2resid(fsname, &res_id);
- if (!rc)
+ if (!rc)
rc = ldlm_cli_enqueue_local(obd->obd_namespace, res_id,
LDLM_PLAIN, NULL, LCK_EX,
&flags, ldlm_blocking_ast,
ldlm_completion_ast, NULL,
fsname, 0, NULL, lockh);
- if (rc)
+ if (rc)
CERROR("can't take cfg lock for %s (%d)\n", fsname, rc);
RETURN(rc);
mti->mti_svname, obd_export_nid2str(req->rq_export));
rc = mgs_check_target(obd, mti);
/* above will set appropriate mti flags */
- if (rc <= 0)
+ if (rc <= 0)
/* Nothing wrong, or fatal error */
GOTO(out_nolock, rc);
}
CERROR("Can't upgrade from 1.4 (%d)\n", rc);
GOTO(out, rc);
}
-
+
/* Turn off all other update-related flags; we're done. */
- mti->mti_flags &= ~(LDD_F_UPGRADE14 |
- LDD_F_VIRGIN | LDD_F_UPDATE |
+ mti->mti_flags &= ~(LDD_F_UPGRADE14 |
+ LDD_F_VIRGIN | LDD_F_UPDATE |
LDD_F_NEED_INDEX | LDD_F_WRITECONF);
mti->mti_flags |= LDD_F_REWRITE_LDD;
goto out;
}
if (mti->mti_flags & LDD_F_UPDATE) {
- CDEBUG(D_MGS, "updating %s, index=%d\n", mti->mti_svname,
+ CDEBUG(D_MGS, "updating %s, index=%d\n", mti->mti_svname,
mti->mti_stripe_index);
-
- /* create or update the target log
+
+ /* create or update the target log
and update the client/mdt logs */
rc = mgs_write_log_target(obd, mti);
if (rc) {
GOTO(out, rc);
}
- mti->mti_flags &= ~(LDD_F_VIRGIN | LDD_F_UPDATE |
+ mti->mti_flags &= ~(LDD_F_VIRGIN | LDD_F_UPDATE |
LDD_F_NEED_INDEX | LDD_F_WRITECONF);
mti->mti_flags |= LDD_F_REWRITE_LDD;
}
out_nolock:
CDEBUG(D_MGS, "replying with %s, index=%d, rc=%d\n", mti->mti_svname,
mti->mti_stripe_index, rc);
- lustre_pack_reply(req, 2, rep_size, NULL);
+ lustre_pack_reply(req, 2, rep_size, NULL);
/* send back the whole mti in the reply */
rep_mti = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
sizeof(*rep_mti));
}
LASSERT(current->journal_info == NULL);
-
- if (rc)
+
+ if (rc)
CDEBUG(D_CONFIG | D_ERROR, "MGS handle cmd=%d rc=%d\n", opc, rc);
else
CDEBUG(D_CONFIG, "MGS handle cmd=%d rc=%d\n", opc, rc);
#include <dt_object.h>
#include <libcfs/list.h>
-/* no lock is necessary to protect the list, because call-backs
+/* no lock is necessary to protect the list, because call-backs
* are added during system startup. Please refer to "struct dt_device".
*/
void dt_txn_callback_add(struct dt_device *dev, struct dt_txn_callback *cb)
}
EXPORT_SYMBOL(dt_txn_callback_del);
-int dt_txn_hook_start(const struct lu_context *ctxt,
+int dt_txn_hook_start(const struct lu_env *env,
struct dt_device *dev, struct txn_param *param)
{
int result;
list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
if (cb->dtc_txn_start == NULL)
continue;
- result = cb->dtc_txn_start(ctxt, param, cb->dtc_cookie);
+ result = cb->dtc_txn_start(env, param, cb->dtc_cookie);
if (result < 0)
break;
}
}
EXPORT_SYMBOL(dt_txn_hook_start);
-int dt_txn_hook_stop(const struct lu_context *ctxt, struct thandle *txn)
+int dt_txn_hook_stop(const struct lu_env *env, struct thandle *txn)
{
struct dt_device *dev = txn->th_dev;
struct dt_txn_callback *cb;
list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
if (cb->dtc_txn_stop == NULL)
continue;
- result = cb->dtc_txn_stop(ctxt, txn, cb->dtc_cookie);
+ result = cb->dtc_txn_stop(env, txn, cb->dtc_cookie);
if (result < 0)
break;
}
}
EXPORT_SYMBOL(dt_txn_hook_stop);
-int dt_txn_hook_commit(const struct lu_context *ctxt, struct thandle *txn)
+int dt_txn_hook_commit(const struct lu_env *env, struct thandle *txn)
{
struct dt_device *dev = txn->th_dev;
struct dt_txn_callback *cb;
list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
if (cb->dtc_txn_commit == NULL)
continue;
- result = cb->dtc_txn_commit(ctxt, txn, cb->dtc_cookie);
+ result = cb->dtc_txn_commit(env, txn, cb->dtc_cookie);
if (result < 0)
break;
}
}
EXPORT_SYMBOL(dt_object_fini);
-int dt_try_as_dir(const struct lu_context *ctx, struct dt_object *obj)
+int dt_try_as_dir(const struct lu_env *env, struct dt_object *obj)
{
if (obj->do_index_ops == NULL)
- obj->do_ops->do_index_try(ctx, obj, &dt_directory_features);
+ obj->do_ops->do_index_try(env, obj, &dt_directory_features);
return obj->do_index_ops != NULL;
}
EXPORT_SYMBOL(dt_try_as_dir);
-static int dt_lookup(const struct lu_context *ctx, struct dt_object *dir,
+static int dt_lookup(const struct lu_env *env, struct dt_object *dir,
const char *name, struct lu_fid *fid)
{
struct dt_rec *rec = (struct dt_rec *)fid;
const struct dt_key *key = (const struct dt_key *)name;
int result;
- if (dt_try_as_dir(ctx, dir))
- result = dir->do_index_ops->dio_lookup(ctx, dir, rec, key);
+ if (dt_try_as_dir(env, dir))
+ result = dir->do_index_ops->dio_lookup(env, dir, rec, key);
else
result = -ENOTDIR;
return result;
}
-static struct dt_object *dt_locate(const struct lu_context *ctx,
+static struct dt_object *dt_locate(const struct lu_env *env,
struct dt_device *dev,
const struct lu_fid *fid)
{
struct lu_object *obj;
struct dt_object *dt;
- obj = lu_object_find(ctx, dev->dd_lu_dev.ld_site, fid, BYPASS_CAPA);
+ obj = lu_object_find(env, dev->dd_lu_dev.ld_site, fid, BYPASS_CAPA);
if (!IS_ERR(obj)) {
obj = lu_object_locate(obj->lo_header, dev->dd_lu_dev.ld_type);
LASSERT(obj != NULL);
return dt;
}
-struct dt_object *dt_store_open(const struct lu_context *ctx,
+struct dt_object *dt_store_open(const struct lu_env *env,
struct dt_device *dt, const char *name,
struct lu_fid *fid)
{
struct dt_object *root;
struct dt_object *child;
- result = dt->dd_ops->dt_root_get(ctx, dt, fid);
+ result = dt->dd_ops->dt_root_get(env, dt, fid);
if (result == 0) {
- root = dt_locate(ctx, dt, fid);
+ root = dt_locate(env, dt, fid);
if (!IS_ERR(root)) {
lu_object_bypass_capa(&root->do_lu);
- result = dt_lookup(ctx, root, name, fid);
+ result = dt_lookup(env, root, name, fid);
if (result == 0)
- child = dt_locate(ctx, dt, fid);
+ child = dt_locate(env, dt, fid);
else
child = ERR_PTR(result);
- lu_object_put(ctx, &root->do_lu);
+ lu_object_put(env, &root->do_lu);
} else {
CERROR("No root\n");
child = (void *)root;
#include <lu_object.h>
#include <libcfs/list.h>
-static void lu_object_free(const struct lu_context *ctx, struct lu_object *o);
+static void lu_object_free(const struct lu_env *env, struct lu_object *o);
/*
* Decrease reference counter on object. If last reference is freed, return
* object to the cache, unless lu_object_is_dying(o) holds. In the latter
* case, free object immediately.
*/
-void lu_object_put(const struct lu_context *ctxt, struct lu_object *o)
+void lu_object_put(const struct lu_env *env, struct lu_object *o)
{
struct lu_object_header *top;
struct lu_site *site;
*/
list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
if (o->lo_ops->loo_object_release != NULL)
- o->lo_ops->loo_object_release(ctxt, o);
+ o->lo_ops->loo_object_release(env, o);
}
-- site->ls_busy;
if (lu_object_is_dying(top)) {
* Object was already removed from hash and lru above, can
* kill it.
*/
- lu_object_free(ctxt, orig);
+ lu_object_free(env, orig);
}
EXPORT_SYMBOL(lu_object_put);
* This follows object creation protocol, described in the comment within
* struct lu_device_operations definition.
*/
-static struct lu_object *lu_object_alloc(const struct lu_context *ctxt,
+static struct lu_object *lu_object_alloc(const struct lu_env *env,
struct lu_site *s,
const struct lu_fid *f,
const struct lustre_capa *capa)
* Create top-level object slice. This will also create
* lu_object_header.
*/
- top = s->ls_top_dev->ld_ops->ldo_object_alloc(ctxt,
+ top = s->ls_top_dev->ld_ops->ldo_object_alloc(env,
NULL, s->ls_top_dev);
if (IS_ERR(top))
RETURN(top);
continue;
clean = 0;
scan->lo_header = top->lo_header;
- result = scan->lo_ops->loo_object_init(ctxt, scan);
+ result = scan->lo_ops->loo_object_init(env, scan);
if (result != 0) {
- lu_object_free(ctxt, top);
+ lu_object_free(env, top);
RETURN(ERR_PTR(result));
}
scan->lo_flags |= LU_OBJECT_ALLOCATED;
list_for_each_entry_reverse(scan, layers, lo_linkage) {
if (scan->lo_ops->loo_object_start != NULL) {
- result = scan->lo_ops->loo_object_start(ctxt, scan);
+ result = scan->lo_ops->loo_object_start(env, scan);
if (result != 0) {
- lu_object_free(ctxt, top);
+ lu_object_free(env, top);
RETURN(ERR_PTR(result));
}
}
/*
* Free object.
*/
-static void lu_object_free(const struct lu_context *ctx, struct lu_object *o)
+static void lu_object_free(const struct lu_env *env, struct lu_object *o)
{
struct list_head splice;
struct lu_object *scan;
list_for_each_entry_reverse(scan,
&o->lo_header->loh_layers, lo_linkage) {
if (scan->lo_ops->loo_object_delete != NULL)
- scan->lo_ops->loo_object_delete(ctx, scan);
+ scan->lo_ops->loo_object_delete(env, scan);
}
-- o->lo_dev->ld_site->ls_total;
/*
o = container_of0(splice.next, struct lu_object, lo_linkage);
list_del_init(&o->lo_linkage);
LASSERT(o->lo_ops->loo_object_free != NULL);
- o->lo_ops->loo_object_free(ctx, o);
+ o->lo_ops->loo_object_free(env, o);
}
}
/*
* Free @nr objects from the cold end of the site LRU list.
*/
-void lu_site_purge(const struct lu_context *ctx, struct lu_site *s, int nr)
+void lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
{
struct list_head dispose;
struct lu_object_header *h;
h = container_of0(dispose.next,
struct lu_object_header, loh_lru);
list_del_init(&h->loh_lru);
- lu_object_free(ctx, lu_object_top(h));
+ lu_object_free(env, lu_object_top(h));
s->ls_stats.s_lru_purged ++;
}
}
/*
* Printer function emitting messages through libcfs_debug_msg().
*/
-int lu_cdebug_printer(const struct lu_context *ctx,
+int lu_cdebug_printer(const struct lu_env *env,
void *cookie, const char *format, ...)
{
struct lu_cdebug_print_info *info = cookie;
va_start(args, format);
- key = lu_context_key_get(ctx, &lu_cdebug_key);
+ key = lu_context_key_get(&env->le_ctx, &lu_cdebug_key);
LASSERT(key != NULL);
used = strlen(key->lck_area);
/*
* Print object header.
*/
-static void lu_object_header_print(const struct lu_context *ctx,
+static void lu_object_header_print(const struct lu_env *env,
void *cookie, lu_printer_t printer,
const struct lu_object_header *hdr)
{
- (*printer)(ctx, cookie, "header@%p[%#lx, %d, "DFID"%s%s]",
+ (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s]",
hdr, hdr->loh_flags, hdr->loh_ref, PFID(&hdr->loh_fid),
hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
list_empty(&hdr->loh_lru) ? "" : " lru");
/*
* Print human readable representation of the @o to the @printer.
*/
-void lu_object_print(const struct lu_context *ctx, void *cookie,
+void lu_object_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct lu_object *o)
{
static const char ruler[] = "........................................";
int depth;
top = o->lo_header;
- lu_object_header_print(ctx, cookie, printer, top);
- (*printer)(ctx, cookie, "\n");
+ lu_object_header_print(env, cookie, printer, top);
+ (*printer)(env, cookie, "\n");
list_for_each_entry(o, &top->loh_layers, lo_linkage) {
depth = o->lo_depth + 4;
LASSERT(o->lo_ops->loo_object_print != NULL);
/*
* print `.' @depth times.
*/
- (*printer)(ctx, cookie, "%*.*s", depth, depth, ruler);
- o->lo_ops->loo_object_print(ctx, cookie, printer, o);
- (*printer)(ctx, cookie, "\n");
+ (*printer)(env, cookie, "%*.*s", depth, depth, ruler);
+ o->lo_ops->loo_object_print(env, cookie, printer, o);
+ (*printer)(env, cookie, "\n");
}
}
EXPORT_SYMBOL(lu_object_print);
* it. Otherwise, create new object, insert it into cache and return it. In
* any case, additional reference is acquired on the returned object.
*/
-struct lu_object *lu_object_find(const struct lu_context *ctxt,
+struct lu_object *lu_object_find(const struct lu_env *env,
struct lu_site *s, const struct lu_fid *f,
struct lustre_capa *capa)
{
if (capa == BYPASS_CAPA) {
o->lo_header->loh_capa_bypass = 1;
} else {
- rc = lu_object_auth(ctxt, o, capa,
+ rc = lu_object_auth(env, o, capa,
CAPA_OPC_INDEX_LOOKUP);
if (rc)
return ERR_PTR(rc);
* Allocate new object. This may result in rather complicated
* operations, including fld queries, inode loading, etc.
*/
- o = lu_object_alloc(ctxt, s, f, capa);
+ o = lu_object_alloc(env, s, f, capa);
if (IS_ERR(o))
return o;
s->ls_stats.s_cache_race ++;
spin_unlock(&s->ls_guard);
if (o != NULL)
- lu_object_free(ctxt, o);
+ lu_object_free(env, o);
return shadow;
}
EXPORT_SYMBOL(lu_object_find);
-int lu_object_auth(const struct lu_context *ctxt, const struct lu_object *o,
+int lu_object_auth(const struct lu_env *env, const struct lu_object *o,
struct lustre_capa *capa, __u64 opc)
{
struct lu_object_header *top = o->lo_header;
list_for_each_entry(o, &top->loh_layers, lo_linkage) {
if (o->lo_ops->loo_object_auth) {
- rc = o->lo_ops->loo_object_auth(ctxt, o, capa, opc);
+ rc = o->lo_ops->loo_object_auth(env, o, capa, opc);
if (rc)
return rc;
}
}
EXPORT_SYMBOL(lu_context_refill);
+int lu_env_init(struct lu_env *env, struct lu_context *ses, __u32 tags)
+{
+ int result;
+
+ env->le_ses = ses;
+ result = lu_context_init(&env->le_ctx, tags);
+ if (result == 0)
+ lu_context_enter(&env->le_ctx);
+ return result;
+}
+EXPORT_SYMBOL(lu_env_init);
+
+void lu_env_fini(struct lu_env *env)
+{
+ lu_context_exit(&env->le_ctx);
+ lu_context_fini(&env->le_ctx);
+ env->le_ses = NULL;
+}
+EXPORT_SYMBOL(lu_env_fini);
+
/*
* Initialization of global lu_* data.
*/
LPROC_ECHO_LAST = LPROC_ECHO_WRITE_BYTES +1
};
-static int echo_connect(const struct lu_context *ctx,
+static int echo_connect(const struct lu_env *env,
struct lustre_handle *conn, struct obd_device *obd,
struct obd_uuid *cluuid, struct obd_connect_data *data)
{
}
if (!(oinfo->oi_oa->o_valid & OBD_MD_FLID)) {
- CERROR("obdo missing FLID valid flag: "LPX64"\n",
+ CERROR("obdo missing FLID valid flag: "LPX64"\n",
oinfo->oi_oa->o_valid);
RETURN(-EINVAL);
}
}
if (!(oinfo->oi_oa->o_valid & OBD_MD_FLID)) {
- CERROR("obdo missing FLID valid flag: "LPX64"\n",
+ CERROR("obdo missing FLID valid flag: "LPX64"\n",
oinfo->oi_oa->o_valid);
RETURN(-EINVAL);
}
RETURN(-ENOMEM);
}
- rc = ldlm_cli_enqueue_local(obd->obd_namespace, res_id, LDLM_PLAIN,
- NULL, LCK_NL, &lock_flags, NULL,
- ldlm_completion_ast, NULL, NULL,
+ rc = ldlm_cli_enqueue_local(obd->obd_namespace, res_id, LDLM_PLAIN,
+ NULL, LCK_NL, &lock_flags, NULL,
+ ldlm_completion_ast, NULL, NULL,
0, NULL, &obd->u.echo.eo_nl_lock);
LASSERT (rc == ELDLM_OK);
/* sleep until we have a page to send */
spin_unlock(&eas.eas_lock);
- rc = wait_event_interruptible(eas.eas_waitq,
+ rc = wait_event_interruptible(eas.eas_waitq,
eas_should_wake(&eas));
spin_lock(&eas.eas_lock);
if (rc && !eas.eas_rc)
/* now we just spin waiting for all the rpcs to complete */
while(eas.eas_in_flight) {
spin_unlock(&eas.eas_lock);
- wait_event_interruptible(eas.eas_waitq,
+ wait_event_interruptible(eas.eas_waitq,
eas.eas_in_flight == 0);
spin_lock(&eas.eas_lock);
}
RETURN(rc);
}
-static int echo_client_connect(const struct lu_context *ctx,
+static int echo_client_connect(const struct lu_env *env,
struct lustre_handle *conn,
struct obd_device *src, struct obd_uuid *cluuid,
struct obd_connect_data *data)
filter->fo_subdir_count = le16_to_cpu(fsd->lsd_subdir_count);
/* COMPAT_146 */
/* Assume old last_rcvd format unless I_C_LR is set */
- if (!(fsd->lsd_feature_incompat &
+ if (!(fsd->lsd_feature_incompat &
cpu_to_le32(OBD_INCOMPAT_COMMON_LR)))
fsd->lsd_last_transno = fsd->lsd_compat14;
/* end COMPAT_146 */
CDEBUG(D_HA, "RCVRNG CLIENT uuid: %s idx: %d lr: "LPU64
" srv lr: "LPU64" fcd_group %d\n", fcd->fcd_uuid, cl_idx,
- last_rcvd, le64_to_cpu(fsd->lsd_last_transno),
+ last_rcvd, le64_to_cpu(fsd->lsd_last_transno),
le32_to_cpu(fcd->fcd_group));
if (IS_ERR(exp)) {
if (PTR_ERR(exp) == -EALREADY) {
f_dput(dentry);
}
OBD_FREE(filter->fo_dentry_O_groups,
- filter->fo_group_count *
+ filter->fo_group_count *
sizeof(*filter->fo_dentry_O_groups));
filter->fo_dentry_O_groups = NULL;
}
filp_close(filp, 0);
}
OBD_FREE(filter->fo_last_objid_files,
- filter->fo_group_count *
+ filter->fo_group_count *
sizeof(*filter->fo_last_objid_files));
filter->fo_last_objid_files = NULL;
}
}
if (filter->fo_last_objids != NULL) {
OBD_FREE(filter->fo_last_objids,
- filter->fo_group_count *
+ filter->fo_group_count *
sizeof(*filter->fo_last_objids));
filter->fo_last_objids = NULL;
}
}
filter_update_last_group(obd, group);
-
+
if (filp->f_dentry->d_inode->i_size == 0) {
filter->fo_last_objids[group] = FILTER_INIT_OBJID;
rc = filter_update_last_objid(obd, group, 1);
rc = filter_read_groups(obd, last_group, 1);
if (rc)
GOTO(cleanup, rc);
-
+
filp_close(filp, 0);
RETURN(0);
CERROR("error writing server data: rc = %d\n", rc);
for (i = 1; i < filter->fo_group_count; i++) {
- rc = filter_update_last_objid(obd, i,
+ rc = filter_update_last_objid(obd, i,
(i == filter->fo_group_count - 1));
if (rc)
CERROR("error writing group %d lastobjid: rc = %d\n",
RETURN(dchild);
}
-static int filter_prepare_destroy(struct obd_device *obd, obd_id objid,
+static int filter_prepare_destroy(struct obd_device *obd, obd_id objid,
obd_id group)
{
struct lustre_handle lockh;
ENTRY;
/* Tell the clients that the object is gone now and that they should
* throw away any cached pages. */
- rc = ldlm_cli_enqueue_local(obd->obd_namespace, res_id, LDLM_EXTENT,
- &policy, LCK_PW, &flags, ldlm_blocking_ast,
- ldlm_completion_ast, NULL, NULL, 0, NULL,
+ rc = ldlm_cli_enqueue_local(obd->obd_namespace, res_id, LDLM_EXTENT,
+ &policy, LCK_PW, &flags, ldlm_blocking_ast,
+ ldlm_completion_ast, NULL, NULL, 0, NULL,
&lockh);
/* We only care about the side-effects, just drop the lock. */
obd->obd_lvfs_ctxt.pwd = mnt->mnt_root;
obd->obd_lvfs_ctxt.fs = get_ds();
obd->obd_lvfs_ctxt.cb_ops = filter_lvfs_ops;
-
+
sema_init(&filter->fo_init_lock, 1);
filter->fo_committed_group = 0;
-
+
rc = filter_prep(obd);
if (rc)
GOTO(err_ops, rc);
lop_add: llog_obd_origin_add
};
-static int filter_llog_init(struct obd_device *obd, struct obd_llogs *llogs,
- struct obd_device *tgt, int count,
+static int filter_llog_init(struct obd_device *obd, struct obd_llogs *llogs,
+ struct obd_device *tgt, int count,
struct llog_catid *catid,
struct obd_uuid *uuid)
{
{
int rc = 0;
ENTRY;
-
+
if (CTXTP(ctxt, cleanup))
rc = CTXTP(ctxt, cleanup)(ctxt);
-
+
if (ctxt->loc_exp)
class_export_put(ctxt->loc_exp);
OBD_FREE(ctxt, sizeof(*ctxt));
struct llog_ctxt *ctxt;
int rc = 0, rc2 = 0;
ENTRY;
-
+
ctxt = llog_get_context_from_llogs(llogs, LLOG_MDS_OST_REPL_CTXT);
if (ctxt)
rc = filter_group_llog_cleanup(ctxt);
struct llog_ctxt *ctxt;
int rc = 0, rc2 = 0;
ENTRY;
-
+
ctxt = llog_get_context(obd, LLOG_MDS_OST_REPL_CTXT);
if (ctxt)
rc = llog_cleanup(ctxt);
init:
if (export) {
fglog->exp = export;
- ctxt = llog_get_context_from_llogs(fglog->llogs,
+ ctxt = llog_get_context_from_llogs(fglog->llogs,
LLOG_MDS_OST_REPL_CTXT);
LASSERT(ctxt != NULL);
rc = llog_connect(ctxt, 1, &body->lgdc_logid,
&body->lgdc_gen, NULL);
if (rc != 0)
- CERROR("failed to connect rc %d idx %d\n", rc,
+ CERROR("failed to connect rc %d idx %d\n", rc,
body->lgdc_ctxt_idx);
RETURN(rc);
filter = &obd->u.filter;
spin_lock(&filter->fo_llog_list_lock);
while (!list_empty(&filter->fo_llog_list)) {
- log = list_entry(filter->fo_llog_list.next,
+ log = list_entry(filter->fo_llog_list.next,
struct filter_group_llog, list);
list_del(&log->list);
spin_unlock(&filter->fo_llog_list_lock);
spin_lock(&filter->fo_llog_list_lock);
}
spin_unlock(&filter->fo_llog_list_lock);
-
+
rc = obd_llog_finish(obd, 0);
if (rc)
CERROR("failed to cleanup llogging subsystem\n");
case OBD_CLEANUP_EXPORTS:
target_cleanup_recovery(obd);
break;
- case OBD_CLEANUP_SELF_EXP:
+ case OBD_CLEANUP_SELF_EXP:
rc = filter_llog_preclean(obd);
break;
case OBD_CLEANUP_OBD:
unlock_kernel();
must_relock++;
}
-
+
if (must_put) {
/* In case we didn't mount with lustre_get_mount -- old method*/
mntput(filter->fo_vfsmnt);
data->ocd_connect_flags &= OST_CONNECT_SUPPORTED;
exp->exp_connect_flags = data->ocd_connect_flags;
if (exp->exp_imp_reverse)
- exp->exp_imp_reverse->imp_connect_data.ocd_connect_flags
+ exp->exp_imp_reverse->imp_connect_data.ocd_connect_flags
= data->ocd_connect_flags;
data->ocd_version = LUSTRE_VERSION_CODE;
}
/* nearly identical to mds_connect */
-static int filter_connect(const struct lu_context *ctx,
+static int filter_connect(const struct lu_env *env,
struct lustre_handle *conn, struct obd_device *obd,
struct obd_uuid *cluuid,
struct obd_connect_data *data)
LASSERT(exp != NULL);
fed = &exp->exp_filter_data;
-
+
rc = filter_connect_internal(exp, data);
if (rc)
GOTO(cleanup, rc);
rc = filter_client_add(obd, filter, fed, -1);
if (rc)
GOTO(cleanup, rc);
- }
+ }
CWARN("%s: Received MDS connection ("LPX64"); group %d\n",
obd->obd_name, exp->exp_handle.h_cookie, group);
if (group == 0)
GOTO(cleanup, rc);
-
+
if (fed->fed_group != 0 && fed->fed_group != group) {
CERROR("!!! This export (nid %s) used object group %d "
"earlier; now it's trying to use group %d! This could "
group = 1 << 30;
spin_lock(&filter->fo_llog_list_lock);
list_for_each_entry(nlog, &filter->fo_llog_list, list) {
-
+
if (nlog->group <= worked) {
/* this group is already synced */
continue;
}
-
+
if (group < nlog->group) {
/* we have group with smaller number to sync */
continue;
int filter_setattr(struct obd_export *exp, struct obd_info *oinfo,
struct obd_trans_info *oti)
{
- struct ldlm_res_id res_id = { .name = { oinfo->oi_oa->o_id, 0,
+ struct ldlm_res_id res_id = { .name = { oinfo->oi_oa->o_id, 0,
oinfo->oi_oa->o_gr, 0 } };
struct ldlm_valblock_ops *ns_lvbo;
struct filter_mod_data *fmd;
/* setup llog imports */
LASSERT(val != NULL);
group = (int)(*(__u32 *)val);
- LASSERT(group >= FILTER_GROUP_MDS0);
-
+ LASSERT(group >= FILTER_GROUP_MDS0);
+
llog = filter_grab_llog_for_group(obd, group, exp);
LASSERT(llog != NULL);
ctxt = llog_get_context_from_llogs(llog, LLOG_MDS_OST_REPL_CTXT);
int rc = 0;
lprocfs_init_vars(filter, &lvars);
-
+
rc = class_process_proc_param(PARAM_OST, lvars.obd_vars, lcfg, obd);
return(rc);
}
struct iam_container oo_container;
struct iam_descr oo_descr;
struct iam_path_descr *oo_ipd;
- const struct lu_context *oo_owner;
+ const struct lu_env *oo_owner;
};
/*
*/
struct dentry *od_obj_area;
- /* Thread context for transaction commit callback.
+ /* Environment for transaction commit callback.
* Currently, OSD is based on ext3/JBD. Transaction commit in ext3/JBD
* is serialized, that is there is no more than one transaction commit
* at a time (JBD journal_commit_transaction() is serialized).
* This means that it's enough to have _one_ lu_context.
*/
- struct lu_context od_ctx_for_commit;
+ struct lu_env od_env_for_commit;
};
-static int osd_root_get (const struct lu_context *ctxt,
+static int osd_root_get (const struct lu_env *env,
struct dt_device *dev, struct lu_fid *f);
-static int osd_statfs (const struct lu_context *ctxt,
+static int osd_statfs (const struct lu_env *env,
struct dt_device *dev, struct kstatfs *sfs);
static int lu_device_is_osd (const struct lu_device *d);
static int osd_mod_init (void) __init;
static int osd_type_init (struct lu_device_type *t);
static void osd_type_fini (struct lu_device_type *t);
-static int osd_object_init (const struct lu_context *ctxt,
+static int osd_object_init (const struct lu_env *env,
struct lu_object *l);
-static void osd_object_release(const struct lu_context *ctxt,
+static void osd_object_release(const struct lu_env *env,
struct lu_object *l);
-static int osd_object_print (const struct lu_context *ctx, void *cookie,
+static int osd_object_print (const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *o);
-static void osd_device_free (const struct lu_context *ctx,
+static void osd_device_free (const struct lu_env *env,
struct lu_device *m);
static void *osd_key_init (const struct lu_context *ctx,
struct lu_context_key *key);
struct lu_context_key *key, void *data);
static int osd_has_index (const struct osd_object *obj);
static void osd_object_init0 (struct osd_object *obj);
-static int osd_device_init (const struct lu_context *ctx,
+static int osd_device_init (const struct lu_env *env,
struct lu_device *d, struct lu_device *);
-static int osd_fid_lookup (const struct lu_context *ctx,
+static int osd_fid_lookup (const struct lu_env *env,
struct osd_object *obj,
const struct lu_fid *fid);
-static int osd_inode_getattr (const struct lu_context *ctx,
+static int osd_inode_getattr (const struct lu_env *env,
struct inode *inode, struct lu_attr *attr);
-static int osd_inode_setattr (const struct lu_context *ctx,
+static int osd_inode_setattr (const struct lu_env *env,
struct inode *inode, const struct lu_attr *attr);
static int osd_param_is_sane (const struct osd_device *dev,
const struct txn_param *param);
-static int osd_index_lookup (const struct lu_context *ctxt,
+static int osd_index_lookup (const struct lu_env *env,
struct dt_object *dt,
struct dt_rec *rec, const struct dt_key *key);
-static int osd_index_insert (const struct lu_context *ctxt,
+static int osd_index_insert (const struct lu_env *env,
struct dt_object *dt,
const struct dt_rec *rec,
const struct dt_key *key,
struct thandle *handle);
-static int osd_index_delete (const struct lu_context *ctxt,
+static int osd_index_delete (const struct lu_env *env,
struct dt_object *dt, const struct dt_key *key,
struct thandle *handle);
-static int osd_index_probe (const struct lu_context *ctxt,
+static int osd_index_probe (const struct lu_env *env,
struct osd_object *o,
const struct dt_index_features *feat);
-static int osd_index_try (const struct lu_context *ctx,
+static int osd_index_try (const struct lu_env *env,
struct dt_object *dt,
const struct dt_index_features *feat);
static void osd_index_fini (struct osd_object *o);
-static void osd_it_fini (const struct lu_context *ctx, struct dt_it *di);
-static int osd_it_get (const struct lu_context *ctx,
+static void osd_it_fini (const struct lu_env *env, struct dt_it *di);
+static int osd_it_get (const struct lu_env *env,
struct dt_it *di, const struct dt_key *key);
-static void osd_it_put (const struct lu_context *ctx, struct dt_it *di);
-static int osd_it_next (const struct lu_context *ctx, struct dt_it *di);
-static int osd_it_del (const struct lu_context *ctx, struct dt_it *di,
+static void osd_it_put (const struct lu_env *env, struct dt_it *di);
+static int osd_it_next (const struct lu_env *env, struct dt_it *di);
+static int osd_it_del (const struct lu_env *env, struct dt_it *di,
struct thandle *th);
-static int osd_it_key_size (const struct lu_context *ctx,
+static int osd_it_key_size (const struct lu_env *env,
const struct dt_it *di);
-static void osd_conf_get (const struct lu_context *ctx,
+static void osd_conf_get (const struct lu_env *env,
const struct dt_device *dev,
struct dt_device_param *param);
-static int osd_read_locked (const struct lu_context *ctx,
+static int osd_read_locked (const struct lu_env *env,
struct osd_object *o);
-static int osd_write_locked (const struct lu_context *ctx,
+static int osd_write_locked (const struct lu_env *env,
struct osd_object *o);
-static void osd_trans_stop (const struct lu_context *ctx,
+static void osd_trans_stop (const struct lu_env *env,
struct thandle *th);
static struct osd_object *osd_obj (const struct lu_object *o);
static struct osd_object *osd_dt_obj (const struct dt_object *d);
static struct osd_device *osd_obj2dev (const struct osd_object *o);
static struct lu_device *osd2lu_dev (struct osd_device *osd);
-static struct lu_device *osd_device_fini (const struct lu_context *ctx,
+static struct lu_device *osd_device_fini (const struct lu_env *env,
struct lu_device *d);
-static struct lu_device *osd_device_alloc (const struct lu_context *ctx,
+static struct lu_device *osd_device_alloc (const struct lu_env *env,
struct lu_device_type *t,
struct lustre_cfg *cfg);
-static struct lu_object *osd_object_alloc (const struct lu_context *ctx,
+static struct lu_object *osd_object_alloc (const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *d);
static struct inode *osd_iget (struct osd_thread_info *info,
struct osd_device *dev,
const struct osd_inode_id *id);
static struct super_block *osd_sb (const struct osd_device *dev);
-static struct dt_it *osd_it_init (const struct lu_context *ctx,
+static struct dt_it *osd_it_init (const struct lu_env *env,
struct dt_object *dt, int wable);
-static struct dt_key *osd_it_key (const struct lu_context *ctx,
+static struct dt_key *osd_it_key (const struct lu_env *env,
const struct dt_it *di);
-static struct dt_rec *osd_it_rec (const struct lu_context *ctx,
+static struct dt_rec *osd_it_rec (const struct lu_env *env,
const struct dt_it *di);
-static struct timespec *osd_inode_time (const struct lu_context *ctx,
+static struct timespec *osd_inode_time (const struct lu_env *env,
struct inode *inode,
__u64 seconds);
-static struct thandle *osd_trans_start (const struct lu_context *ctx,
+static struct thandle *osd_trans_start (const struct lu_env *env,
struct dt_device *d,
struct txn_param *p);
static journal_t *osd_journal (const struct osd_device *dev);
#define osd_invariant(obj) (1)
#endif
-static int osd_read_locked(const struct lu_context *ctx, struct osd_object *o)
+static int osd_read_locked(const struct lu_env *env, struct osd_object *o)
{
- struct osd_thread_info *oti = lu_context_key_get(ctx, &osd_key);
+ struct osd_thread_info *oti = lu_context_key_get(&env->le_ctx, &osd_key);
return oti->oti_r_locks > 0;
}
-static int osd_write_locked(const struct lu_context *ctx, struct osd_object *o)
+static int osd_write_locked(const struct lu_env *env, struct osd_object *o)
{
- struct osd_thread_info *oti = lu_context_key_get(ctx, &osd_key);
+ struct osd_thread_info *oti = lu_context_key_get(&env->le_ctx, &osd_key);
- return oti->oti_w_locks > 0 && o->oo_owner == ctx;
+ return oti->oti_w_locks > 0 && o->oo_owner == env;
}
static void osd_fid_build_name(const struct lu_fid *fid, char *name)
}
/* helper to push us into KERNEL_DS context */
-static struct file *osd_rw_init(const struct lu_context *ctxt,
+static struct file *osd_rw_init(const struct lu_env *env,
struct inode *inode, mm_segment_t *seg)
{
- struct osd_thread_info *info = lu_context_key_get(ctxt, &osd_key);
+ struct osd_thread_info *info = lu_context_key_get(&env->le_ctx, &osd_key);
struct dentry *dentry = &info->oti_dentry;
struct file *file = &info->oti_file;
set_fs(*seg);
}
-static int osd_root_get(const struct lu_context *ctx,
+static int osd_root_get(const struct lu_env *env,
struct dt_device *dev, struct lu_fid *f)
{
struct inode *inode;
* OSD object methods.
*/
-static struct lu_object *osd_object_alloc(const struct lu_context *ctx,
+static struct lu_object *osd_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *d)
{
(LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
}
-static int osd_object_init(const struct lu_context *ctxt, struct lu_object *l)
+static int osd_object_init(const struct lu_env *env, struct lu_object *l)
{
struct osd_object *obj = osd_obj(l);
int result;
LASSERT(osd_invariant(obj));
- result = osd_fid_lookup(ctxt, obj, lu_object_fid(l));
+ result = osd_fid_lookup(env, obj, lu_object_fid(l));
if (result == 0) {
if (obj->oo_inode != NULL)
osd_object_init0(obj);
return result;
}
-static void osd_object_free(const struct lu_context *ctx, struct lu_object *l)
+static void osd_object_free(const struct lu_env *env, struct lu_object *l)
{
struct osd_object *obj = osd_obj(l);
OSD_TXN_RMENTRY_CREDITS = 20
};
-static int osd_inode_remove(const struct lu_context *ctx,
+static int osd_inode_remove(const struct lu_env *env,
struct osd_object *obj)
{
const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
struct osd_device *osd = osd_obj2dev(obj);
- struct osd_thread_info *oti = lu_context_key_get(ctx, &osd_key);
+ struct osd_thread_info *oti = lu_context_key_get(&env->le_ctx, &osd_key);
struct txn_param *prm = &oti->oti_txn;
struct thandle *th;
struct dentry *dentry;
int result;
prm->tp_credits = OSD_TXN_OI_DELETE_CREDITS + OSD_TXN_RMENTRY_CREDITS;
- th = osd_trans_start(ctx, &osd->od_dt_dev, prm);
+ th = osd_trans_start(env, &osd->od_dt_dev, prm);
if (!IS_ERR(th)) {
osd_oi_write_lock(&osd->od_oi);
result = osd_oi_delete(oti, &osd->od_oi, fid, th);
osd_oi_write_unlock(&osd->od_oi);
- /*
+ /*
* The following is added by huanghua@clusterfs.com as
- * a temporary hack, to remove the directory entry in
+ * a temporary hack, to remove the directory entry in
* "*OBJ_TEMP*". We will finally do not use this hack,
* and at that time we will remove these code.
*/
dput(dentry);
} else
iput(obj->oo_inode);
- osd_trans_stop(ctx, th);
+ osd_trans_stop(env, th);
} else
result = PTR_ERR(th);
return result;
* Called just before object is freed. Releases all resources except for
* object itself (that is released by osd_object_free()).
*/
-static void osd_object_delete(const struct lu_context *ctx, struct lu_object *l)
+static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
{
struct osd_object *obj = osd_obj(l);
struct inode *inode = obj->oo_inode;
* ("*OBJ-TEMP*"), but name in that directory is _not_ counted in
* inode ->i_nlink.
*/
-
+
osd_index_fini(obj);
if (inode != NULL) {
int result;
if (osd_inode_unlinked(inode)) {
- result = osd_inode_remove(ctx, obj);
+ result = osd_inode_remove(env, obj);
if (result != 0)
- LU_OBJECT_DEBUG(D_ERROR, ctx, l,
+ LU_OBJECT_DEBUG(D_ERROR, env, l,
"Failed to cleanup: %d\n",
result);
} else
}
}
-static void osd_object_release(const struct lu_context *ctxt,
+static void osd_object_release(const struct lu_env *env,
struct lu_object *l)
{
struct osd_object *o = osd_obj(l);
set_bit(LU_OBJECT_HEARD_BANSHEE, &l->lo_header->loh_flags);
}
-static int osd_object_print(const struct lu_context *ctx, void *cookie,
+static int osd_object_print(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *l)
{
struct osd_object *o = osd_obj(l);
struct iam_descr *d;
d = o->oo_container.ic_descr;
- return (*p)(ctx, cookie, LUSTRE_OSD_NAME"-object@%p(i:%p:%lu/%u)[%s]",
+ return (*p)(env, cookie, LUSTRE_OSD_NAME"-object@%p(i:%p:%lu/%u)[%s]",
o, o->oo_inode,
o->oo_inode ? o->oo_inode->i_ino : 0UL,
o->oo_inode ? o->oo_inode->i_generation : 0,
d ? d->id_ops->id_name : "plain");
}
-static int osd_statfs(const struct lu_context *ctx,
+static int osd_statfs(const struct lu_env *env,
struct dt_device *d, struct kstatfs *sfs)
{
struct osd_device *osd = osd_dt_dev(d);
RETURN (result);
}
-static void osd_conf_get(const struct lu_context *ctx,
+static void osd_conf_get(const struct lu_env *env,
const struct dt_device *dev,
struct dt_device_param *param)
{
/* This dd_ctx_for_commit is only for commit usage.
* see "struct dt_device"
*/
- dt_txn_hook_commit(&osd_dt_dev(dev)->od_ctx_for_commit, th);
+ dt_txn_hook_commit(&osd_dt_dev(dev)->od_env_for_commit, th);
}
lu_device_put(&dev->dd_lu_dev);
OBD_FREE_PTR(oh);
}
-static struct thandle *osd_trans_start(const struct lu_context *ctx,
+static struct thandle *osd_trans_start(const struct lu_env *env,
struct dt_device *d,
struct txn_param *p)
{
handle_t *jh;
struct osd_thandle *oh;
struct thandle *th;
- struct osd_thread_info *oti = lu_context_key_get(ctx, &osd_key);
+ struct osd_thread_info *oti = lu_context_key_get(&env->le_ctx, &osd_key);
int hook_res;
ENTRY;
- hook_res = dt_txn_hook_start(ctx, d, p);
+ hook_res = dt_txn_hook_start(env, d, p);
if (hook_res != 0)
RETURN(ERR_PTR(hook_res));
RETURN(th);
}
-static void osd_trans_stop(const struct lu_context *ctx, struct thandle *th)
+static void osd_trans_stop(const struct lu_env *env, struct thandle *th)
{
int result;
struct osd_thandle *oh;
- struct osd_thread_info *oti = lu_context_key_get(ctx, &osd_key);
+ struct osd_thread_info *oti = lu_context_key_get(&env->le_ctx, &osd_key);
ENTRY;
/*
* XXX temporary stuff. Some abstraction layer should be used.
*/
- result = dt_txn_hook_stop(ctx, th);
+ result = dt_txn_hook_stop(env, th);
if (result != 0)
CERROR("Failure in transaction hook: %d\n", result);
EXIT;
}
-static int osd_sync(const struct lu_context *ctx, struct dt_device *d)
+static int osd_sync(const struct lu_env *env, struct dt_device *d)
{
CDEBUG(D_HA, "syncing OSD %s\n", LUSTRE_OSD_NAME);
return ldiskfs_force_commit(osd_sb(osd_dt_dev(d)));
}
-static void osd_ro(const struct lu_context *ctx, struct dt_device *d)
+static void osd_ro(const struct lu_env *env, struct dt_device *d)
{
ENTRY;
.dt_ro = osd_ro
};
-static void osd_object_read_lock(const struct lu_context *ctx,
+static void osd_object_read_lock(const struct lu_env *env,
struct dt_object *dt)
{
struct osd_object *obj = osd_dt_obj(dt);
- struct osd_thread_info *oti = lu_context_key_get(ctx, &osd_key);
+ struct osd_thread_info *oti = lu_context_key_get(&env->le_ctx, &osd_key);
LASSERT(osd_invariant(obj));
- LASSERT(obj->oo_owner != ctx);
+ LASSERT(obj->oo_owner != env);
down_read(&obj->oo_sem);
LASSERT(obj->oo_owner == NULL);
oti->oti_r_locks++;
}
-static void osd_object_write_lock(const struct lu_context *ctx,
+static void osd_object_write_lock(const struct lu_env *env,
struct dt_object *dt)
{
struct osd_object *obj = osd_dt_obj(dt);
- struct osd_thread_info *oti = lu_context_key_get(ctx, &osd_key);
+ struct osd_thread_info *oti = lu_context_key_get(&env->le_ctx, &osd_key);
LASSERT(osd_invariant(obj));
- LASSERT(obj->oo_owner != ctx);
+ LASSERT(obj->oo_owner != env);
down_write(&obj->oo_sem);
LASSERT(obj->oo_owner == NULL);
- obj->oo_owner = ctx;
+ obj->oo_owner = env;
oti->oti_w_locks++;
}
-static void osd_object_read_unlock(const struct lu_context *ctx,
+static void osd_object_read_unlock(const struct lu_env *env,
struct dt_object *dt)
{
struct osd_object *obj = osd_dt_obj(dt);
- struct osd_thread_info *oti = lu_context_key_get(ctx, &osd_key);
+ struct osd_thread_info *oti = lu_context_key_get(&env->le_ctx, &osd_key);
LASSERT(osd_invariant(obj));
LASSERT(oti->oti_r_locks > 0);
up_read(&obj->oo_sem);
}
-static void osd_object_write_unlock(const struct lu_context *ctx,
+static void osd_object_write_unlock(const struct lu_env *env,
struct dt_object *dt)
{
struct osd_object *obj = osd_dt_obj(dt);
- struct osd_thread_info *oti = lu_context_key_get(ctx, &osd_key);
+ struct osd_thread_info *oti = lu_context_key_get(&env->le_ctx, &osd_key);
LASSERT(osd_invariant(obj));
- LASSERT(obj->oo_owner == ctx);
+ LASSERT(obj->oo_owner == env);
LASSERT(oti->oti_w_locks > 0);
oti->oti_w_locks--;
obj->oo_owner = NULL;
up_write(&obj->oo_sem);
}
-static inline int osd_object_auth(const struct lu_context *ctx,
+static inline int osd_object_auth(const struct lu_env *env,
const struct lu_object *o,
__u64 opc)
{
- return o->lo_ops->loo_object_auth(ctx, o, lu_object_capa(o), opc);
+ return o->lo_ops->loo_object_auth(env, o, lu_object_capa(o), opc);
}
-static int osd_attr_get(const struct lu_context *ctxt,
+static int osd_attr_get(const struct lu_env *env,
struct dt_object *dt,
struct lu_attr *attr)
{
LASSERT(dt_object_exists(dt));
LASSERT(osd_invariant(obj));
- LASSERT(osd_read_locked(ctxt, obj) || osd_write_locked(ctxt, obj));
+ LASSERT(osd_read_locked(env, obj) || osd_write_locked(env, obj));
- if (osd_object_auth(ctxt, &dt->do_lu, CAPA_OPC_META_READ))
+ if (osd_object_auth(env, &dt->do_lu, CAPA_OPC_META_READ))
return -EACCES;
- return osd_inode_getattr(ctxt, obj->oo_inode, attr);
+ return osd_inode_getattr(env, obj->oo_inode, attr);
}
-static int osd_attr_set(const struct lu_context *ctxt,
+static int osd_attr_set(const struct lu_env *env,
struct dt_object *dt,
const struct lu_attr *attr,
struct thandle *handle)
LASSERT(handle != NULL);
LASSERT(dt_object_exists(dt));
LASSERT(osd_invariant(obj));
- LASSERT(osd_write_locked(ctxt, obj));
+ LASSERT(osd_write_locked(env, obj));
- if (osd_object_auth(ctxt, &dt->do_lu, CAPA_OPC_META_WRITE))
+ if (osd_object_auth(env, &dt->do_lu, CAPA_OPC_META_WRITE))
return -EACCES;
- return osd_inode_setattr(ctxt, obj->oo_inode, attr);
+ return osd_inode_setattr(env, obj->oo_inode, attr);
}
-static struct timespec *osd_inode_time(const struct lu_context *ctx,
+static struct timespec *osd_inode_time(const struct lu_env *env,
struct inode *inode, __u64 seconds)
{
- struct osd_thread_info *oti = lu_context_key_get(ctx, &osd_key);
+ struct osd_thread_info *oti = lu_context_key_get(&env->le_ctx, &osd_key);
struct timespec *t = &oti->oti_time;
t->tv_sec = seconds;
return t;
}
-static int osd_inode_setattr(const struct lu_context *ctx,
+static int osd_inode_setattr(const struct lu_env *env,
struct inode *inode, const struct lu_attr *attr)
{
__u64 bits;
LASSERT(!(bits & LA_TYPE)); /* Huh? You want too much. */
if (bits & LA_ATIME)
- inode->i_atime = *osd_inode_time(ctx, inode, attr->la_atime);
+ inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
if (bits & LA_CTIME)
- inode->i_ctime = *osd_inode_time(ctx, inode, attr->la_ctime);
+ inode->i_ctime = *osd_inode_time(env, inode, attr->la_ctime);
if (bits & LA_MTIME)
- inode->i_mtime = *osd_inode_time(ctx, inode, attr->la_mtime);
+ inode->i_mtime = *osd_inode_time(env, inode, attr->la_mtime);
if (bits & LA_SIZE)
LDISKFS_I(inode)->i_disksize = inode->i_size = attr->la_size;
if (bits & LA_BLOCKS)
return result;
}
-static int osd_object_create(const struct lu_context *ctx, struct dt_object *dt,
+static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
struct lu_attr *attr, struct thandle *th)
{
const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
struct osd_object *obj = osd_dt_obj(dt);
struct osd_device *osd = osd_obj2dev(obj);
- struct osd_thread_info *info = lu_context_key_get(ctx, &osd_key);
+ struct osd_thread_info *info = lu_context_key_get(&env->le_ctx, &osd_key);
int result;
ENTRY;
LASSERT(osd_invariant(obj));
LASSERT(!dt_object_exists(dt));
- LASSERT(osd_write_locked(ctx, obj));
+ LASSERT(osd_write_locked(env, obj));
LASSERT(th != NULL);
/*
* XXX missing: permission checks.
*/
- if (osd_object_auth(ctx, &dt->do_lu, CAPA_OPC_INDEX_INSERT))
+ if (osd_object_auth(env, &dt->do_lu, CAPA_OPC_INDEX_INSERT))
RETURN(-EACCES);
/*
return result;
}
-static void osd_object_ref_add(const struct lu_context *ctxt,
+static void osd_object_ref_add(const struct lu_env *env,
struct dt_object *dt, struct thandle *th)
{
struct osd_object *obj = osd_dt_obj(dt);
LASSERT(osd_invariant(obj));
LASSERT(dt_object_exists(dt));
- LASSERT(osd_write_locked(ctxt, obj));
+ LASSERT(osd_write_locked(env, obj));
LASSERT(th != NULL);
- if (osd_object_auth(ctxt, &dt->do_lu, CAPA_OPC_META_WRITE)) {
- LU_OBJECT_DEBUG(D_ERROR, ctxt, &dt->do_lu,
+ if (osd_object_auth(env, &dt->do_lu, CAPA_OPC_META_WRITE)) {
+ LU_OBJECT_DEBUG(D_ERROR, env, &dt->do_lu,
"no capability to link!\n");
return;
}
inode->i_nlink ++;
mark_inode_dirty(inode);
} else
- LU_OBJECT_DEBUG(D_ERROR, ctxt, &dt->do_lu,
+ LU_OBJECT_DEBUG(D_ERROR, env, &dt->do_lu,
"Overflowed nlink\n");
LASSERT(osd_invariant(obj));
}
-static void osd_object_ref_del(const struct lu_context *ctxt,
+static void osd_object_ref_del(const struct lu_env *env,
struct dt_object *dt, struct thandle *th)
{
struct osd_object *obj = osd_dt_obj(dt);
LASSERT(osd_invariant(obj));
LASSERT(dt_object_exists(dt));
- LASSERT(osd_write_locked(ctxt, obj));
+ LASSERT(osd_write_locked(env, obj));
LASSERT(th != NULL);
- if (osd_object_auth(ctxt, &dt->do_lu, CAPA_OPC_META_WRITE)) {
- LU_OBJECT_DEBUG(D_ERROR, ctxt, &dt->do_lu,
+ if (osd_object_auth(env, &dt->do_lu, CAPA_OPC_META_WRITE)) {
+ LU_OBJECT_DEBUG(D_ERROR, env, &dt->do_lu,
"no capability to unlink!\n");
return;
}
inode->i_nlink --;
mark_inode_dirty(inode);
} else
- LU_OBJECT_DEBUG(D_ERROR, ctxt, &dt->do_lu,
+ LU_OBJECT_DEBUG(D_ERROR, env, &dt->do_lu,
"Underflowed nlink\n");
LASSERT(osd_invariant(obj));
}
-static int osd_xattr_get(const struct lu_context *ctxt, struct dt_object *dt,
+static int osd_xattr_get(const struct lu_env *env, struct dt_object *dt,
void *buf, int size, const char *name)
{
struct osd_object *obj = osd_dt_obj(dt);
struct inode *inode = obj->oo_inode;
- struct osd_thread_info *info = lu_context_key_get(ctxt, &osd_key);
+ struct osd_thread_info *info = lu_context_key_get(&env->le_ctx, &osd_key);
struct dentry *dentry = &info->oti_dentry;
LASSERT(dt_object_exists(dt));
LASSERT(inode->i_op != NULL && inode->i_op->getxattr != NULL);
- LASSERT(osd_read_locked(ctxt, obj) || osd_write_locked(ctxt, obj));
+ LASSERT(osd_read_locked(env, obj) || osd_write_locked(env, obj));
- if (osd_object_auth(ctxt, &dt->do_lu, CAPA_OPC_META_READ))
+ if (osd_object_auth(env, &dt->do_lu, CAPA_OPC_META_READ))
return -EACCES;
dentry->d_inode = inode;
return inode->i_op->getxattr(dentry, name, buf, size);
}
-static int osd_xattr_set(const struct lu_context *ctxt, struct dt_object *dt,
+static int osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
const void *buf, int size, const char *name, int fl,
struct thandle *handle)
{
struct osd_object *obj = osd_dt_obj(dt);
struct inode *inode = obj->oo_inode;
- struct osd_thread_info *info = lu_context_key_get(ctxt, &osd_key);
+ struct osd_thread_info *info = lu_context_key_get(&env->le_ctx, &osd_key);
struct dentry *dentry = &info->oti_dentry;
LASSERT(dt_object_exists(dt));
LASSERT(inode->i_op != NULL && inode->i_op->setxattr != NULL);
- LASSERT(osd_write_locked(ctxt, obj));
+ LASSERT(osd_write_locked(env, obj));
LASSERT(handle != NULL);
- if (osd_object_auth(ctxt, &dt->do_lu, CAPA_OPC_META_WRITE))
+ if (osd_object_auth(env, &dt->do_lu, CAPA_OPC_META_WRITE))
return -EACCES;
dentry->d_inode = inode;
return inode->i_op->setxattr(dentry, name, buf, size, fs_flags);
}
-static int osd_xattr_list(const struct lu_context *ctxt, struct dt_object *dt,
+static int osd_xattr_list(const struct lu_env *env, struct dt_object *dt,
void *buf, int size)
{
struct osd_object *obj = osd_dt_obj(dt);
struct inode *inode = obj->oo_inode;
- struct osd_thread_info *info = lu_context_key_get(ctxt, &osd_key);
+ struct osd_thread_info *info = lu_context_key_get(&env->le_ctx, &osd_key);
struct dentry *dentry = &info->oti_dentry;
LASSERT(dt_object_exists(dt));
LASSERT(inode->i_op != NULL && inode->i_op->listxattr != NULL);
- LASSERT(osd_read_locked(ctxt, obj) || osd_write_locked(ctxt, obj));
+ LASSERT(osd_read_locked(env, obj) || osd_write_locked(env, obj));
- if (osd_object_auth(ctxt, &dt->do_lu, CAPA_OPC_META_READ))
+ if (osd_object_auth(env, &dt->do_lu, CAPA_OPC_META_READ))
return -EACCES;
dentry->d_inode = inode;
return inode->i_op->listxattr(dentry, buf, size);
}
-static int osd_xattr_del(const struct lu_context *ctxt, struct dt_object *dt,
+static int osd_xattr_del(const struct lu_env *env, struct dt_object *dt,
const char *name, struct thandle *handle)
{
struct osd_object *obj = osd_dt_obj(dt);
struct inode *inode = obj->oo_inode;
- struct osd_thread_info *info = lu_context_key_get(ctxt, &osd_key);
+ struct osd_thread_info *info = lu_context_key_get(&env->le_ctx, &osd_key);
struct dentry *dentry = &info->oti_dentry;
LASSERT(dt_object_exists(dt));
LASSERT(inode->i_op != NULL && inode->i_op->removexattr != NULL);
- LASSERT(osd_write_locked(ctxt, obj));
+ LASSERT(osd_write_locked(env, obj));
LASSERT(handle != NULL);
- if (osd_object_auth(ctxt, &dt->do_lu, CAPA_OPC_META_WRITE))
+ if (osd_object_auth(env, &dt->do_lu, CAPA_OPC_META_WRITE))
return -EACCES;
dentry->d_inode = inode;
return inode->i_op->removexattr(dentry, name);
}
-static int osd_dir_page_build(const struct lu_context *ctx, int first,
+static int osd_dir_page_build(const struct lu_env *env, int first,
void *area, int nob,
struct dt_it_ops *iops, struct dt_it *it,
__u32 *start, __u32 *end, struct lu_dirent **last)
{
int result;
- struct osd_thread_info *info = lu_context_key_get(ctx, &osd_key);
+ struct osd_thread_info *info = lu_context_key_get(&env->le_ctx, &osd_key);
struct lu_fid *fid = &info->oti_fid;
struct lu_dirent *ent;
int recsize;
__u32 hash;
- name = (char *)iops->key(ctx, it);
- len = iops->key_size(ctx, it);
+ name = (char *)iops->key(env, it);
+ len = iops->key_size(env, it);
- *fid = *(struct lu_fid *)iops->rec(ctx, it);
+ *fid = *(struct lu_fid *)iops->rec(env, it);
fid_cpu_to_le(fid, fid);
recsize = (sizeof *ent + len + 3) & ~3;
- hash = iops->store(ctx, it);
+ hash = iops->store(env, it);
*end = hash;
CDEBUG(D_INODE, "%p %p %d "DFID": %#8.8x (%d)\"%*.*s\"\n",
area, ent, nob, PFID(fid), hash, len, len, len, name);
*last = ent;
ent = (void *)ent + recsize;
nob -= recsize;
- result = iops->next(ctx, it);
+ result = iops->next(env, it);
} else {
/*
* record doesn't fit into page, enlarge previous one.
return result;
}
-static int osd_readpage(const struct lu_context *ctxt,
+static int osd_readpage(const struct lu_env *env,
struct dt_object *dt, const struct lu_rdpg *rdpg)
{
struct dt_it *it;
LASSERT(dt_object_exists(dt));
LASSERT(osd_invariant(obj));
LASSERT(osd_has_index(obj));
- LASSERT(osd_read_locked(ctxt, obj) || osd_write_locked(ctxt, obj));
+ LASSERT(osd_read_locked(env, obj) || osd_write_locked(env, obj));
LASSERT(rdpg->rp_pages != NULL);
- if (osd_object_auth(ctxt, &dt->do_lu, CAPA_OPC_BODY_READ))
+ if (osd_object_auth(env, &dt->do_lu, CAPA_OPC_BODY_READ))
return -EACCES;
if (rdpg->rp_count <= 0)
* iterating through directory and fill pages from @rdpg
*/
iops = &dt->do_index_ops->dio_it;
- it = iops->init(ctxt, dt, 0);
+ it = iops->init(env, dt, 0);
if (it == NULL)
return -ENOMEM;
/*
* XXX position iterator at rdpg->rp_hash
*/
- rc = iops->load(ctxt, it, rdpg->rp_hash);
-
- /* When spliting, it need read entries from some offset by computing
+ rc = iops->load(env, it, rdpg->rp_hash);
+
+ /* When spliting, it need read entries from some offset by computing
* not by some entries offset like readdir, so it might return 0 here.
*/
if (rc == 0)
rc1 = -ERANGE;
-
+
if (rc >= 0) {
struct page *pg; /* no, Richard, it _is_ initialized */
struct lu_dirent *last;
rc == 0 && nob > 0; i++, nob -= CFS_PAGE_SIZE) {
LASSERT(i < rdpg->rp_npages);
pg = rdpg->rp_pages[i];
- rc = osd_dir_page_build(ctxt, !i, kmap(pg),
+ rc = osd_dir_page_build(env, !i, kmap(pg),
min_t(int, nob, CFS_PAGE_SIZE),
iops, it,
&hash_start, &hash_end, &last);
last->lde_reclen = 0;
kunmap(pg);
}
- iops->put(ctxt, it);
+ iops->put(env, it);
if (rc > 0) {
/*
* end of directory.
dp->ldp_hash_end = hash_end;
kunmap(rdpg->rp_pages[0]);
}
- }
- iops->put(ctxt, it);
- iops->fini(ctxt, it);
-
+ }
+ iops->put(env, it);
+ iops->fini(env, it);
+
return rc ? rc : rc1;
}
* Body operations.
*/
-static ssize_t osd_read(const struct lu_context *ctxt, struct dt_object *dt,
+static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
void *buf, size_t count, loff_t *pos)
{
struct inode *inode = osd_dt_obj(dt)->oo_inode;
mm_segment_t seg;
ssize_t result;
- file = osd_rw_init(ctxt, inode, &seg);
+ file = osd_rw_init(env, inode, &seg);
/*
* We'd like to use vfs_read() here, but it messes with
* dnotify_parent() and locks.
return result;
}
-static ssize_t osd_write(const struct lu_context *ctxt, struct dt_object *dt,
+static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
const void *buf, size_t count, loff_t *pos,
struct thandle *handle)
{
LASSERT(handle != NULL);
- file = osd_rw_init(ctxt, inode, &seg);
+ file = osd_rw_init(env, inode, &seg);
if (file->f_op->write)
result = file->f_op->write(file, buf, count, pos);
else {
* Index operations.
*/
-static int osd_index_probe(const struct lu_context *ctxt, struct osd_object *o,
+static int osd_index_probe(const struct lu_env *env, struct osd_object *o,
const struct dt_index_features *feat)
{
struct iam_descr *descr;
* writable */);
}
-static int osd_index_try(const struct lu_context *ctx, struct dt_object *dt,
+static int osd_index_try(const struct lu_env *env, struct dt_object *dt,
const struct dt_index_features *feat)
{
int result;
LASSERT(osd_invariant(obj));
LASSERT(dt_object_exists(dt));
- if (osd_object_auth(ctx, &dt->do_lu, CAPA_OPC_INDEX_LOOKUP))
+ if (osd_object_auth(env, &dt->do_lu, CAPA_OPC_INDEX_LOOKUP))
RETURN(-EACCES);
if (osd_sb(osd_obj2dev(obj))->s_root->d_inode == obj->oo_inode) {
result = 0;
if (result == 0) {
- if (osd_index_probe(ctx, obj, feat))
+ if (osd_index_probe(env, obj, feat))
result = 0;
else
result = -ENOTDIR;
return result;
}
-static int osd_index_delete(const struct lu_context *ctxt, struct dt_object *dt,
+static int osd_index_delete(const struct lu_env *env, struct dt_object *dt,
const struct dt_key *key, struct thandle *handle)
{
struct osd_object *obj = osd_dt_obj(dt);
LASSERT(obj->oo_ipd != NULL);
LASSERT(handle != NULL);
- if (osd_object_auth(ctxt, &dt->do_lu, CAPA_OPC_INDEX_DELETE))
+ if (osd_object_auth(env, &dt->do_lu, CAPA_OPC_INDEX_DELETE))
RETURN(-EACCES);
oh = container_of0(handle, struct osd_thandle, ot_super);
RETURN(rc);
}
-static int osd_index_lookup(const struct lu_context *ctxt, struct dt_object *dt,
+static int osd_index_lookup(const struct lu_env *env, struct dt_object *dt,
struct dt_rec *rec, const struct dt_key *key)
{
struct osd_object *obj = osd_dt_obj(dt);
LASSERT(obj->oo_container.ic_object == obj->oo_inode);
LASSERT(obj->oo_ipd != NULL);
- if (osd_object_auth(ctxt, &dt->do_lu, CAPA_OPC_INDEX_LOOKUP))
+ if (osd_object_auth(env, &dt->do_lu, CAPA_OPC_INDEX_LOOKUP))
return -EACCES;
rc = iam_lookup(&obj->oo_container, (const struct iam_key *)key,
RETURN(rc);
}
-static int osd_index_insert(const struct lu_context *ctx, struct dt_object *dt,
+static int osd_index_insert(const struct lu_env *env, struct dt_object *dt,
const struct dt_rec *rec, const struct dt_key *key,
struct thandle *th)
{
LASSERT(obj->oo_ipd != NULL);
LASSERT(th != NULL);
- if (osd_object_auth(ctx, &dt->do_lu, CAPA_OPC_INDEX_INSERT))
+ if (osd_object_auth(env, &dt->do_lu, CAPA_OPC_INDEX_INSERT))
return -EACCES;
oh = container_of0(th, struct osd_thandle, ot_super);
struct iam_iterator oi_it;
};
-static struct dt_it *osd_it_init(const struct lu_context *ctx,
+static struct dt_it *osd_it_init(const struct lu_env *env,
struct dt_object *dt, int writable)
{
struct osd_it *it;
return (struct dt_it *)it;
}
-static void osd_it_fini(const struct lu_context *ctx, struct dt_it *di)
+static void osd_it_fini(const struct lu_env *env, struct dt_it *di)
{
struct osd_it *it = (struct osd_it *)di;
iam_it_fini(&it->oi_it);
- lu_object_put(ctx, &it->oi_obj->oo_dt.do_lu);
+ lu_object_put(env, &it->oi_obj->oo_dt.do_lu);
OBD_FREE_PTR(it);
}
-static int osd_it_get(const struct lu_context *ctx,
+static int osd_it_get(const struct lu_env *env,
struct dt_it *di, const struct dt_key *key)
{
struct osd_it *it = (struct osd_it *)di;
return iam_it_get(&it->oi_it, (const struct iam_key *)key);
}
-static void osd_it_put(const struct lu_context *ctx, struct dt_it *di)
+static void osd_it_put(const struct lu_env *env, struct dt_it *di)
{
struct osd_it *it = (struct osd_it *)di;
iam_it_put(&it->oi_it);
}
-static int osd_it_next(const struct lu_context *ctx, struct dt_it *di)
+static int osd_it_next(const struct lu_env *env, struct dt_it *di)
{
struct osd_it *it = (struct osd_it *)di;
return iam_it_next(&it->oi_it);
}
-static int osd_it_del(const struct lu_context *ctx, struct dt_it *di,
+static int osd_it_del(const struct lu_env *env, struct dt_it *di,
struct thandle *th)
{
struct osd_it *it = (struct osd_it *)di;
return iam_it_rec_delete(oh->ot_handle, &it->oi_it);
}
-static struct dt_key *osd_it_key(const struct lu_context *ctx,
+static struct dt_key *osd_it_key(const struct lu_env *env,
const struct dt_it *di)
{
struct osd_it *it = (struct osd_it *)di;
return (struct dt_key *)iam_it_key_get(&it->oi_it);
}
-static int osd_it_key_size(const struct lu_context *ctx, const struct dt_it *di)
+static int osd_it_key_size(const struct lu_env *env, const struct dt_it *di)
{
struct osd_it *it = (struct osd_it *)di;
return iam_it_key_size(&it->oi_it);
}
-static struct dt_rec *osd_it_rec(const struct lu_context *ctx,
+static struct dt_rec *osd_it_rec(const struct lu_env *env,
const struct dt_it *di)
{
struct osd_it *it = (struct osd_it *)di;
return (struct dt_rec *)iam_it_rec_get(&it->oi_it);
}
-static __u32 osd_it_store(const struct lu_context *ctxt, const struct dt_it *di)
+static __u32 osd_it_store(const struct lu_env *env, const struct dt_it *di)
{
struct osd_it *it = (struct osd_it *)di;
return iam_it_store(&it->oi_it);
}
-static int osd_it_load(const struct lu_context *ctxt,
+static int osd_it_load(const struct lu_env *env,
const struct dt_it *di, __u32 hash)
{
struct osd_it *it = (struct osd_it *)di;
}
};
-static int osd_index_compat_delete(const struct lu_context *ctxt,
+static int osd_index_compat_delete(const struct lu_env *env,
struct dt_object *dt,
const struct dt_key *key,
struct thandle *handle)
return 0;
}
-static int osd_index_compat_lookup(const struct lu_context *ctxt,
+static int osd_index_compat_lookup(const struct lu_env *env,
struct dt_object *dt,
struct dt_rec *rec, const struct dt_key *key)
{
struct osd_object *obj = osd_dt_obj(dt);
struct osd_device *osd = osd_obj2dev(obj);
- struct osd_thread_info *info = lu_context_key_get(ctxt, &osd_key);
+ struct osd_thread_info *info = lu_context_key_get(&env->le_ctx, &osd_key);
struct inode *dir;
int result;
/*
* XXX Temporary stuff.
*/
-static int osd_index_compat_insert(const struct lu_context *ctx,
+static int osd_index_compat_insert(const struct lu_env *env,
struct dt_object *dt,
const struct dt_rec *rec,
const struct dt_key *key, struct thandle *th)
struct lu_device *ludev = dt->do_lu.lo_dev;
struct lu_object *luch;
- struct osd_thread_info *info = lu_context_key_get(ctx, &osd_key);
+ struct osd_thread_info *info = lu_context_key_get(&env->le_ctx, &osd_key);
int result;
LASSERT(osd_invariant(obj));
LASSERT(th != NULL);
- luch = lu_object_find(ctx, ludev->ld_site, fid, BYPASS_CAPA);
+ luch = lu_object_find(env, ludev->ld_site, fid, BYPASS_CAPA);
if (!IS_ERR(luch)) {
if (lu_object_exists(luch)) {
struct osd_object *child;
CERROR("Sorry.\n");
result = -ENOENT;
}
- lu_object_put(ctx, luch);
+ lu_object_put(env, luch);
} else
result = PTR_ERR(luch);
LASSERT(osd_invariant(obj));
OBD_ALLOC_PTR(info);
if (info != NULL)
- info->oti_ctx = ctx;
+ info->oti_env = container_of(ctx, struct lu_env, le_ctx);
else
info = ERR_PTR(-ENOMEM);
return info;
LASSERT(info->oti_txns == 0);
}
-static int osd_device_init(const struct lu_context *ctx,
+static int osd_device_init(const struct lu_env *env,
struct lu_device *d, struct lu_device *next)
{
- int rc;
- rc = lu_context_init(&osd_dev(d)->od_ctx_for_commit, LCT_MD_THREAD);
- if (rc == 0)
- lu_context_enter(&osd_dev(d)->od_ctx_for_commit);
- return rc;
+ return lu_env_init(&osd_dev(d)->od_env_for_commit, NULL, LCT_MD_THREAD);
}
-static int osd_shutdown(const struct lu_context *ctx, struct osd_device *o)
+static int osd_shutdown(const struct lu_env *env, struct osd_device *o)
{
- struct osd_thread_info *info = lu_context_key_get(ctx, &osd_key);
+ struct osd_thread_info *info = lu_context_key_get(&env->le_ctx, &osd_key);
ENTRY;
if (o->od_obj_area != NULL) {
dput(o->od_obj_area);
RETURN(0);
}
-static int osd_mount(const struct lu_context *ctx,
+static int osd_mount(const struct lu_env *env,
struct osd_device *o, struct lustre_cfg *cfg)
{
struct lustre_mount_info *lmi;
const char *dev = lustre_cfg_string(cfg, 0);
- struct osd_thread_info *info = lu_context_key_get(ctx, &osd_key);
+ struct osd_thread_info *info = lu_context_key_get(&env->le_ctx, &osd_key);
int result;
ENTRY;
result = PTR_ERR(d);
}
if (result != 0)
- osd_shutdown(ctx, o);
+ osd_shutdown(env, o);
RETURN(result);
}
-static struct lu_device *osd_device_fini(const struct lu_context *ctx,
+static struct lu_device *osd_device_fini(const struct lu_env *env,
struct lu_device *d)
{
ENTRY;
shrink_dcache_sb(osd_sb(osd_dev(d)));
- osd_sync(ctx, lu2dt_dev(d));
+ osd_sync(env, lu2dt_dev(d));
if (osd_dev(d)->od_mount)
server_put_mount(osd_dev(d)->od_mount->lmi_name,
osd_dev(d)->od_mount->lmi_mnt);
osd_dev(d)->od_mount = NULL;
- lu_context_exit(&osd_dev(d)->od_ctx_for_commit);
- lu_context_fini(&osd_dev(d)->od_ctx_for_commit);
+ lu_env_fini(&osd_dev(d)->od_env_for_commit);
RETURN(NULL);
}
-static struct lu_device *osd_device_alloc(const struct lu_context *ctx,
+static struct lu_device *osd_device_alloc(const struct lu_env *env,
struct lu_device_type *t,
struct lustre_cfg *cfg)
{
return l;
}
-static void osd_device_free(const struct lu_context *ctx, struct lu_device *d)
+static void osd_device_free(const struct lu_env *env, struct lu_device *d)
{
struct osd_device *o = osd_dev(d);
OBD_FREE_PTR(o);
}
-static int osd_process_config(const struct lu_context *ctx,
+static int osd_process_config(const struct lu_env *env,
struct lu_device *d, struct lustre_cfg *cfg)
{
struct osd_device *o = osd_dev(d);
switch(cfg->lcfg_command) {
case LCFG_SETUP:
- err = osd_mount(ctx, o, cfg);
+ err = osd_mount(env, o, cfg);
break;
case LCFG_CLEANUP:
- err = osd_shutdown(ctx, o);
+ err = osd_shutdown(env, o);
break;
default:
err = -ENOTTY;
extern void ldiskfs_orphan_cleanup (struct super_block * sb,
struct ldiskfs_super_block * es);
-static int osd_recovery_complete(const struct lu_context *ctxt,
+static int osd_recovery_complete(const struct lu_env *env,
struct lu_device *d)
{
struct osd_device *o = osd_dev(d);
}
-static int osd_fid_lookup(const struct lu_context *ctx,
+static int osd_fid_lookup(const struct lu_env *env,
struct osd_object *obj, const struct lu_fid *fid)
{
struct osd_thread_info *info;
ENTRY;
- info = lu_context_key_get(ctx, &osd_key);
+ info = lu_context_key_get(&env->le_ctx, &osd_key);
dev = osd_dev(ldev);
id = &info->oti_id;
oi = &dev->od_oi;
RETURN(result);
}
-static int osd_inode_getattr(const struct lu_context *ctx,
+static int osd_inode_getattr(const struct lu_env *env,
struct inode *inode, struct lu_attr *attr)
{
attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
return osd_invariant(osd_obj(l));
}
-static int capa_is_sane(const struct lu_context *ctx,
+static int capa_is_sane(const struct lu_env *env,
struct lustre_capa *capa,
struct lustre_capa_key *keys)
{
struct obd_capa *c;
- struct osd_thread_info *oti = lu_context_key_get(ctx, &osd_key);
+ struct osd_thread_info *oti = lu_context_key_get(&env->le_ctx, &osd_key);
int i, rc = 0;
ENTRY;
RETURN(0);
}
-static int osd_object_capa_auth(const struct lu_context *ctx,
+static int osd_object_capa_auth(const struct lu_env *env,
const struct lu_object *obj,
struct lustre_capa *capa,
__u64 opc)
return -EACCES;
}
- if (!capa_is_sane(ctx, capa, obj->lo_dev->ld_site->ls_capa_keys)) {
+ if (!capa_is_sane(env, capa, obj->lo_dev->ld_site->ls_capa_keys)) {
DEBUG_CAPA(D_ERROR, capa, "insane");
return -EACCES;
}
struct osd_inode_id *id);
struct osd_thread_info {
- const struct lu_context *oti_ctx;
+ const struct lu_env *oti_env;
struct lu_fid oti_fid;
struct osd_inode_id oti_id;
struct osd_oi *oi, struct dt_device *dev)
{
int rc;
- struct dt_object *obj;
- const struct lu_context *ctx;
+ struct dt_object *obj;
+ const struct lu_env *env;
- ctx = info->oti_ctx;
+ env = info->oti_env;
/*
* Initialize ->oi_lock first, because of possible oi re-entrance in
* dt_store_open().
*/
init_rwsem(&oi->oi_lock);
- obj = dt_store_open(ctx, dev, oi_dirname, &info->oti_fid);
+ obj = dt_store_open(env, dev, oi_dirname, &info->oti_fid);
if (!IS_ERR(obj)) {
- rc = obj->do_ops->do_index_try(ctx, obj, &oi_index_features);
+ rc = obj->do_ops->do_index_try(env, obj, &oi_index_features);
if (rc == 0) {
LASSERT(obj->do_index_ops != NULL);
oi->oi_dir = obj;
} else {
CERROR("Wrong index \"%s\": %d\n", oi_dirname, rc);
- lu_object_put(ctx, &obj->do_lu);
+ lu_object_put(env, &obj->do_lu);
}
} else {
rc = PTR_ERR(obj);
void osd_oi_fini(struct osd_thread_info *info, struct osd_oi *oi)
{
if (oi->oi_dir != NULL) {
- lu_object_put(info->oti_ctx, &oi->oi_dir->do_lu);
+ lu_object_put(info->oti_env, &oi->oi_dir->do_lu);
oi->oi_dir = NULL;
}
}
rc = 0;
} else {
rc = oi->oi_dir->do_index_ops->dio_lookup
- (info->oti_ctx, oi->oi_dir,
+ (info->oti_env, oi->oi_dir,
(struct dt_rec *)id, oi_fid_key(info, fid));
osd_inode_id_init(id, id->oii_ino, id->oii_gen);
}
dev = lu2dt_dev(idx->do_lu.lo_dev);
id = &info->oti_id;
osd_inode_id_init(id, id0->oii_ino, id0->oii_gen);
- return idx->do_index_ops->dio_insert(info->oti_ctx, idx,
+ return idx->do_index_ops->dio_insert(info->oti_env, idx,
(const struct dt_rec *)id,
oi_fid_key(info, fid), th);
}
idx = oi->oi_dir;
dev = lu2dt_dev(idx->do_lu.lo_dev);
- return idx->do_index_ops->dio_delete(info->oti_ctx, idx,
+ return idx->do_index_ops->dio_delete(info->oti_env, idx,
oi_fid_key(info, fid), th);
}
CDEBUG(D_NET, "got req "LPD64"\n", request->rq_xid);
request->rq_svc_thread = thread;
+ request->rq_svc_thread->t_env->le_ses = &request->rq_session;
+
request->rq_export = class_conn2export(
lustre_msg_get_handle(request->rq_reqmsg));
#ifdef WITH_GROUP_INFO
struct group_info *ginfo = NULL;
#endif
- struct lu_context ctx;
+ struct lu_env env;
int rc = 0;
ENTRY;
goto out;
}
- rc = lu_context_init(&ctx, svc->srv_ctx_tags);
+ rc = lu_context_init(&env.le_ctx, svc->srv_ctx_tags);
if (rc)
goto out_srv_init;
- thread->t_ctx = &ctx;
- ctx.lc_thread = thread;
+ thread->t_env = &env;
+ env.le_ctx.lc_thread = thread;
/* Alloc reply state structure for this one */
OBD_ALLOC_GFP(rs, svc->srv_max_reply_size, CFS_ALLOC_STD);
if (!list_empty (&svc->srv_request_queue) &&
(svc->srv_n_difficult_replies == 0 ||
svc->srv_n_active_reqs < (svc->srv_nthreads - 1))) {
- lu_context_enter(&ctx);
+ lu_context_enter(&env.le_ctx);
ptlrpc_server_handle_request(svc, thread);
- lu_context_exit(&ctx);
+ lu_context_exit(&env.le_ctx);
}
if (!list_empty(&svc->srv_idle_rqbds) &&
svc->srv_done(thread);
out:
- lu_context_fini(&ctx);
+ lu_env_fini(&env);
CDEBUG(D_NET, "service thread %d exiting: rc %d\n", thread->t_id, rc);