* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
struct echo_page {
struct cl_page_slice ep_cl;
- cfs_mutex_t ep_lock;
+ struct mutex ep_lock;
cfs_page_t *ep_vmpage;
};
return container_of(c, struct echo_object_conf, eoc_cl);
}
-static inline void lsm2fid(struct lov_stripe_md *lsm, struct lu_fid *fid)
-{
- fid_zero(fid);
- fid->f_seq = FID_SEQ_ECHO;
- /* truncated to 32 bits by assignment */
- fid->f_oid = lsm->lsm_object_id;
- fid->f_ver = lsm->lsm_object_id >> 32;
-}
/** @} echo_helpers */
static struct echo_object *cl_echo_object_find(struct echo_device *d,
unsigned long dummy;
};
-static cfs_mem_cache_t *echo_page_kmem;
static cfs_mem_cache_t *echo_lock_kmem;
static cfs_mem_cache_t *echo_object_kmem;
static cfs_mem_cache_t *echo_thread_kmem;
static struct lu_kmem_descr echo_caches[] = {
{
- .ckd_cache = &echo_page_kmem,
- .ckd_name = "echo_page_kmem",
- .ckd_size = sizeof (struct echo_page)
- },
- {
.ckd_cache = &echo_lock_kmem,
.ckd_name = "echo_lock_kmem",
.ckd_size = sizeof (struct echo_lock)
struct echo_page *ep = cl2echo_page(slice);
if (!nonblock)
- cfs_mutex_lock(&ep->ep_lock);
- else if (!cfs_mutex_trylock(&ep->ep_lock))
+ mutex_lock(&ep->ep_lock);
+ else if (!mutex_trylock(&ep->ep_lock))
return -EAGAIN;
return 0;
}
{
struct echo_page *ep = cl2echo_page(slice);
- LASSERT(cfs_mutex_is_locked(&ep->ep_lock));
- cfs_mutex_unlock(&ep->ep_lock);
+ LASSERT(mutex_is_locked(&ep->ep_lock));
+ mutex_unlock(&ep->ep_lock);
}
static void echo_page_discard(const struct lu_env *env,
static int echo_page_is_vmlocked(const struct lu_env *env,
const struct cl_page_slice *slice)
{
- if (cfs_mutex_is_locked(&cl2echo_page(slice)->ep_lock))
+ if (mutex_is_locked(&cl2echo_page(slice)->ep_lock))
return -EBUSY;
return -ENODATA;
}
cfs_atomic_dec(&eco->eo_npages);
page_cache_release(vmpage);
- OBD_SLAB_FREE_PTR(ep, echo_page_kmem);
EXIT;
}
struct echo_page *ep = cl2echo_page(slice);
(*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p %d vm@%p\n",
- ep, cfs_mutex_is_locked(&ep->ep_lock), ep->ep_vmpage);
+ ep, mutex_is_locked(&ep->ep_lock), ep->ep_vmpage);
return 0;
}
*
* @{
*/
-static struct cl_page *echo_page_init(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage)
+static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage)
{
- struct echo_page *ep;
+ struct echo_page *ep = cl_object_page_slice(obj, page);
+ struct echo_object *eco = cl2echo_obj(obj);
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(ep, echo_page_kmem, CFS_ALLOC_IO);
- if (ep != NULL) {
- struct echo_object *eco = cl2echo_obj(obj);
- ep->ep_vmpage = vmpage;
- page_cache_get(vmpage);
- cfs_mutex_init(&ep->ep_lock);
- cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
- cfs_atomic_inc(&eco->eo_npages);
- }
- RETURN(ERR_PTR(ep ? 0 : -ENOMEM));
+ ep->ep_vmpage = vmpage;
+ page_cache_get(vmpage);
+ mutex_init(&ep->ep_lock);
+ cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
+ cfs_atomic_inc(&eco->eo_npages);
+ RETURN(0);
}
static int echo_io_init(const struct lu_env *env, struct cl_object *obj,
eco->eo_dev = ed;
cfs_atomic_set(&eco->eo_npages, 0);
+ cl_object_page_init(lu2cl(obj), sizeof(struct echo_page));
- cfs_spin_lock(&ec->ec_lock);
- cfs_list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
- cfs_spin_unlock(&ec->ec_lock);
+ spin_lock(&ec->ec_lock);
+ cfs_list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
+ spin_unlock(&ec->ec_lock);
- RETURN(0);
+ RETURN(0);
}
/* taken from osc_unpackmd() */
loi_init((*lsmp)->lsm_oinfo[0]);
(*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
+ ostid_set_seq_echo(&(*lsmp)->lsm_oi);
RETURN(lsm_size);
}
LASSERT(cfs_atomic_read(&eco->eo_npages) == 0);
- cfs_spin_lock(&ec->ec_lock);
+ spin_lock(&ec->ec_lock);
cfs_list_del_init(&eco->eo_obj_chain);
- cfs_spin_unlock(&ec->ec_lock);
+ spin_unlock(&ec->ec_lock);
lu_object_fini(obj);
lu_object_header_fini(obj->lo_header);
#define ECHO_SEQ_WIDTH 0xffffffff
static int echo_fid_init(struct echo_device *ed, char *obd_name,
- struct md_site *ms)
+ struct seq_server_site *ss)
{
char *prefix;
int rc;
snprintf(prefix, MAX_OBD_NAME + 5, "srv-%s", obd_name);
- /* Init client side sequence-manager */
- rc = seq_client_init(ed->ed_cl_seq, NULL,
- LUSTRE_SEQ_METADATA,
- prefix, ms->ms_server_seq);
+ /* Init client side sequence-manager */
+ rc = seq_client_init(ed->ed_cl_seq, NULL,
+ LUSTRE_SEQ_METADATA,
+ prefix, ss->ss_server_seq);
ed->ed_cl_seq->lcs_width = ECHO_SEQ_WIDTH;
OBD_FREE(prefix, MAX_OBD_NAME + 5);
if (rc)
ls = next->ld_site;
- cfs_spin_lock(&ls->ls_ld_lock);
- cfs_list_for_each_entry(ld, &ls->ls_ld_linkage, ld_linkage) {
- if (strcmp(ld->ld_type->ldt_name, tgt_type_name) == 0) {
- found = 1;
- break;
- }
- }
- cfs_spin_unlock(&ls->ls_ld_lock);
+ spin_lock(&ls->ls_ld_lock);
+ cfs_list_for_each_entry(ld, &ls->ls_ld_linkage, ld_linkage) {
+ if (strcmp(ld->ld_type->ldt_name, tgt_type_name) == 0) {
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock(&ls->ls_ld_lock);
if (found == 0) {
CERROR("%s is not lu device type!\n",
ed->ed_site_myself.cs_lu = *ls;
ed->ed_site = &ed->ed_site_myself;
ed->ed_cl.cd_lu_dev.ld_site = &ed->ed_site_myself.cs_lu;
- rc = echo_fid_init(ed, obd->obd_name, lu_site2md(ls));
- if (rc) {
- CERROR("echo fid init error %d\n", rc);
- GOTO(out, rc);
- }
+ rc = echo_fid_init(ed, obd->obd_name, lu_site2seq(ls));
+ if (rc) {
+ CERROR("echo fid init error %d\n", rc);
+ GOTO(out, rc);
+ }
} else {
/* if echo client is to be stacked upon ost device, the next is
* NULL since ost is not a clio device so far */
* all of cached objects. Anyway, probably the echo device is being
* parallelly accessed.
*/
- cfs_spin_lock(&ec->ec_lock);
- cfs_list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
- eco->eo_deleted = 1;
- cfs_spin_unlock(&ec->ec_lock);
-
- /* purge again */
- lu_site_purge(env, &ed->ed_site->cs_lu, -1);
-
- CDEBUG(D_INFO,
- "Waiting for the reference of echo object to be dropped\n");
-
- /* Wait for the last reference to be dropped. */
- cfs_spin_lock(&ec->ec_lock);
- while (!cfs_list_empty(&ec->ec_objects)) {
- cfs_spin_unlock(&ec->ec_lock);
- CERROR("echo_client still has objects at cleanup time, "
- "wait for 1 second\n");
- cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
- cfs_time_seconds(1));
- lu_site_purge(env, &ed->ed_site->cs_lu, -1);
- cfs_spin_lock(&ec->ec_lock);
- }
- cfs_spin_unlock(&ec->ec_lock);
+ spin_lock(&ec->ec_lock);
+ cfs_list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
+ eco->eo_deleted = 1;
+ spin_unlock(&ec->ec_lock);
+
+ /* purge again */
+ lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+
+ CDEBUG(D_INFO,
+ "Waiting for the reference of echo object to be dropped\n");
+
+ /* Wait for the last reference to be dropped. */
+ spin_lock(&ec->ec_lock);
+ while (!cfs_list_empty(&ec->ec_objects)) {
+ spin_unlock(&ec->ec_lock);
+ CERROR("echo_client still has objects at cleanup time, "
+ "wait for 1 second\n");
+ cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
+ cfs_time_seconds(1));
+ lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+ spin_lock(&ec->ec_lock);
+ }
+ spin_unlock(&ec->ec_lock);
LASSERT(cfs_list_empty(&ec->ec_locks));
struct cl_object *obj;
struct lu_fid *fid;
int refcheck;
- ENTRY;
+ int rc;
+ ENTRY;
- LASSERT(lsmp);
- lsm = *lsmp;
- LASSERT(lsm);
- LASSERT(lsm->lsm_object_id);
+ LASSERT(lsmp);
+ lsm = *lsmp;
+ LASSERT(lsm);
+ LASSERTF(ostid_id(&lsm->lsm_oi) != 0, DOSTID"\n", POSTID(&lsm->lsm_oi));
+ LASSERTF(ostid_seq(&lsm->lsm_oi) == FID_SEQ_ECHO, DOSTID"\n",
+ POSTID(&lsm->lsm_oi));
/* Never return an object if the obd is to be freed. */
if (echo_dev2cl(d)->cd_lu_dev.ld_obd->obd_stopping)
if (!d->ed_next_islov) {
struct lov_oinfo *oinfo = lsm->lsm_oinfo[0];
LASSERT(oinfo != NULL);
- oinfo->loi_id = lsm->lsm_object_id;
- oinfo->loi_seq = lsm->lsm_object_seq;
+ oinfo->loi_oi = lsm->lsm_oi;
conf->eoc_cl.u.coc_oinfo = oinfo;
} else {
struct lustre_md *md;
}
conf->eoc_md = lsmp;
- fid = &info->eti_fid;
- lsm2fid(lsm, fid);
+ fid = &info->eti_fid;
+ rc = ostid_to_fid(fid, &lsm->lsm_oi, 0);
+ if (rc != 0)
+ GOTO(out, eco = ERR_PTR(rc));
/* In the function below, .hs_keycmp resolves to
* lu_obj_hop_keycmp() */
if (eco->eo_deleted) {
struct lu_object_header *loh = obj->co_lu.lo_header;
LASSERT(&eco->eo_hdr == luh2coh(loh));
- cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags);
+ set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags);
}
cl_object_put(env, obj);
rc = cl_wait(env, lck);
if (rc == 0) {
el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
- cfs_spin_lock(&ec->ec_lock);
- if (cfs_list_empty(&el->el_chain)) {
- cfs_list_add(&el->el_chain, &ec->ec_locks);
- el->el_cookie = ++ec->ec_unique;
- }
- cfs_atomic_inc(&el->el_refcount);
- *cookie = el->el_cookie;
- cfs_spin_unlock(&ec->ec_lock);
- } else
- cl_lock_release(env, lck, "ec enqueue", cfs_current());
- }
- RETURN(rc);
+ spin_lock(&ec->ec_lock);
+ if (cfs_list_empty(&el->el_chain)) {
+ cfs_list_add(&el->el_chain, &ec->ec_locks);
+ el->el_cookie = ++ec->ec_unique;
+ }
+ cfs_atomic_inc(&el->el_refcount);
+ *cookie = el->el_cookie;
+ spin_unlock(&ec->ec_lock);
+ } else {
+ cl_lock_release(env, lck, "ec enqueue", cfs_current());
+ }
+ }
+ RETURN(rc);
}
static int cl_echo_enqueue(struct echo_object *eco, obd_off start, obd_off end,
ENTRY;
LASSERT(ec != NULL);
- cfs_spin_lock (&ec->ec_lock);
+ spin_lock(&ec->ec_lock);
cfs_list_for_each (el, &ec->ec_locks) {
ecl = cfs_list_entry (el, struct echo_lock, el_chain);
CDEBUG(D_INFO, "ecl: %p, cookie: "LPX64"\n", ecl, ecl->el_cookie);
break;
}
}
- cfs_spin_unlock (&ec->ec_lock);
+ spin_unlock(&ec->ec_lock);
if (!found)
RETURN(-ENOENT);
ENTRY;
+ if (ec_parent == NULL)
+ return -1;
parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
if (parent == NULL)
RETURN(-ENXIO);
RETURN(ERR_PTR(rc));
}
+ /* In the function below, .hs_keycmp resolves to
+ * lu_obj_hop_keycmp() */
+ /* coverity[overrun-buffer-val] */
child = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev, fid, NULL);
RETURN(child);
ENTRY;
+ if (ec_parent == NULL)
+ return -1;
parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
if (parent == NULL)
RETURN(-ENXIO);
ENTRY;
+ if (ec_parent == NULL)
+ return -1;
parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
if (parent == NULL)
RETURN(-ENXIO);
int rc = 0;
int i;
+ if (ec_parent == NULL)
+ return -1;
parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
if (parent == NULL)
return -ENXIO;
CDEBUG(D_RPCTRACE, "Start destroy object "DFID" %s %p\n",
PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent);
- rc = mdo_unlink(env, parent, lu2md(child), lname, ma);
- if (rc) {
- CERROR("Can not unlink child %s: rc = %d\n",
- lname->ln_name, rc);
- GOTO(out_put, rc);
- }
+ rc = mdo_unlink(env, parent, lu2md(child), lname, ma, 0);
+ if (rc) {
+ CERROR("Can not unlink child %s: rc = %d\n",
+ lname->ln_name, rc);
+ GOTO(out_put, rc);
+ }
CDEBUG(D_RPCTRACE, "End destroy object "DFID" %s %p\n",
PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent);
out_put:
RETURN(ERR_PTR(rc));
}
+ /* In the function below, .hs_keycmp resolves to
+ * lu_obj_hop_keycmp() */
+ /* coverity[overrun-buffer-val] */
parent = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev, fid, NULL);
if (IS_ERR(parent)) {
CERROR("Can not find the parent "DFID": rc = %ld\n",
RETURN(parent);
}
+static void echo_ucred_init(struct lu_env *env)
+{
+ struct lu_ucred *ucred = lu_ucred(env);
+
+ ucred->uc_valid = UCRED_INVALID;
+
+ ucred->uc_suppgids[0] = -1;
+ ucred->uc_suppgids[1] = -1;
+
+ ucred->uc_uid = ucred->uc_o_uid = cfs_curproc_uid();
+ ucred->uc_gid = ucred->uc_o_gid = cfs_curproc_gid();
+ ucred->uc_fsuid = ucred->uc_o_fsuid = cfs_curproc_fsuid();
+ ucred->uc_fsgid = ucred->uc_o_fsgid = cfs_curproc_fsgid();
+ ucred->uc_cap = cfs_curproc_cap_pack();
+
+ /* remove fs privilege for non-root user. */
+ if (ucred->uc_fsuid)
+ ucred->uc_cap &= ~CFS_CAP_FS_MASK;
+ ucred->uc_valid = UCRED_NEW;
+}
+
+static void echo_ucred_fini(struct lu_env *env)
+{
+ struct lu_ucred *ucred = lu_ucred(env);
+ ucred->uc_valid = UCRED_INIT;
+}
+
#define ECHO_MD_CTX_TAG (LCT_REMEMBER | LCT_MD_THREAD)
#define ECHO_MD_SES_TAG (LCT_REMEMBER | LCT_SESSION)
static int echo_md_handler(struct echo_device *ed, int command,
- char *path, int path_len, int id, int count,
- struct obd_ioctl_data *data)
+ char *path, int path_len, __u64 id, int count,
+ struct obd_ioctl_data *data)
{
struct echo_thread_info *info;
struct lu_device *ld = ed->ed_next;
GOTO(out_name, rc = -EFAULT);
}
+ echo_ucred_init(env);
+
switch (command) {
case ECHO_MD_CREATE:
case ECHO_MD_MKDIR: {
int stripe_count = (int)data->ioc_obdo2.o_misc;
int stripe_index = (int)data->ioc_obdo2.o_stripe_idx;
- fid->f_seq = data->ioc_obdo1.o_seq;
- fid->f_oid = (__u32)data->ioc_obdo1.o_id;
- fid->f_ver = 0;
+ rc = ostid_to_fid(fid, &data->ioc_obdo1.o_oi, 0);
+ if (rc != 0)
+ break;
+
/* In the function below, .hs_keycmp resolves to
* lu_obj_hop_keycmp() */
/* coverity[overrun-buffer-val] */
rc = -EINVAL;
break;
}
+ echo_ucred_fini(env);
+
out_name:
if (name != NULL)
OBD_FREE(name, namelen + 1);
idx = cfs_rand();
- /* setup stripes: indices + default ids if required */
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- if (lsm->lsm_oinfo[i]->loi_id == 0)
- lsm->lsm_oinfo[i]->loi_id = lsm->lsm_object_id;
+ /* setup stripes: indices + default ids if required */
+ for (i = 0; i < lsm->lsm_stripe_count; i++) {
+ if (ostid_id(&lsm->lsm_oinfo[i]->loi_oi) == 0)
+ lsm->lsm_oinfo[i]->loi_oi = lsm->lsm_oi;
- lsm->lsm_oinfo[i]->loi_ost_idx =
- (idx + i) % ec->ec_nstripes;
- }
+ lsm->lsm_oinfo[i]->loi_ost_idx =
+ (idx + i) % ec->ec_nstripes;
+ }
}
- /* setup object ID here for !on_target and LOV hint */
- if (oa->o_valid & OBD_MD_FLID)
- lsm->lsm_object_id = oa->o_id;
+ /* setup object ID here for !on_target and LOV hint */
+ if (oa->o_valid & OBD_MD_FLID) {
+ LASSERT(oa->o_valid & OBD_MD_FLGROUP);
+ lsm->lsm_oi = oa->o_oi;
+ }
- if (lsm->lsm_object_id == 0)
- lsm->lsm_object_id = ++last_object_id;
+ if (ostid_id(&lsm->lsm_oi) == 0)
+ ostid_set_id(&lsm->lsm_oi, ++last_object_id);
rc = 0;
- if (on_target) {
- /* Only echo objects are allowed to be created */
- LASSERT((oa->o_valid & OBD_MD_FLGROUP) &&
- (oa->o_seq == FID_SEQ_ECHO));
- rc = obd_create(env, ec->ec_exp, oa, &lsm, oti);
- if (rc != 0) {
- CERROR("Cannot create objects: rc = %d\n", rc);
- GOTO(failed, rc);
- }
- created = 1;
- }
+ if (on_target) {
+ /* Only echo objects are allowed to be created */
+ LASSERT((oa->o_valid & OBD_MD_FLGROUP) &&
+ (ostid_seq(&oa->o_oi) == FID_SEQ_ECHO));
+ rc = obd_create(env, ec->ec_exp, oa, &lsm, oti);
+ if (rc != 0) {
+ CERROR("Cannot create objects: rc = %d\n", rc);
+ GOTO(failed, rc);
+ }
+ created = 1;
+ }
/* See what object ID we were given */
- oa->o_id = lsm->lsm_object_id;
+ oa->o_oi = lsm->lsm_oi;
oa->o_valid |= OBD_MD_FLID;
eco = cl_echo_object_find(ed, &lsm);
GOTO(failed, rc = PTR_ERR(eco));
cl_echo_object_put(eco);
- CDEBUG(D_INFO, "oa->o_id = %lx\n", (long)oa->o_id);
+ CDEBUG(D_INFO, "oa oid "DOSTID"\n", POSTID(&oa->o_oi));
EXIT;
failed:
int rc;
ENTRY;
- if ((oa->o_valid & OBD_MD_FLID) == 0 ||
- oa->o_id == 0) /* disallow use of object id 0 */
- {
+ if ((oa->o_valid & OBD_MD_FLID) == 0 || ostid_id(&oa->o_oi) == 0) {
+ /* disallow use of object id 0 */
CERROR ("No valid oid\n");
RETURN(-EINVAL);
}
if (rc < 0)
RETURN(rc);
- lsm->lsm_object_id = oa->o_id;
- if (oa->o_valid & OBD_MD_FLGROUP)
- lsm->lsm_object_seq = oa->o_seq;
- else
- lsm->lsm_object_seq = FID_SEQ_ECHO;
+ lsm->lsm_oi = oa->o_oi;
+ if (!(oa->o_valid & OBD_MD_FLGROUP))
+ ostid_set_seq_echo(&lsm->lsm_oi);
rc = 0;
eco = cl_echo_object_find(ed, &lsm);
stripe_index = woffset / stripe_size;
- *idp = lsm->lsm_oinfo[stripe_index]->loi_id;
- *offp = offset * stripe_size + woffset % stripe_size;
+ *idp = ostid_id(&lsm->lsm_oinfo[stripe_index]->loi_oi);
+ *offp = offset * stripe_size + woffset % stripe_size;
}
static void
int brw_flags = 0;
ENTRY;
- verify = ((oa->o_id) != ECHO_PERSISTENT_OBJID &&
+ verify = (ostid_id(&oa->o_oi) != ECHO_PERSISTENT_OBJID &&
(oa->o_valid & OBD_MD_FLFLAGS) != 0 &&
(oa->o_flags & OBD_FL_DEBUG_CHECK) != 0);
- gfp_mask = ((oa->o_id & 2) == 0) ? CFS_ALLOC_STD : CFS_ALLOC_HIGHUSER;
+ gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? CFS_ALLOC_STD : CFS_ALLOC_HIGHUSER;
- LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ);
- LASSERT(lsm != NULL);
- LASSERT(lsm->lsm_object_id == oa->o_id);
+ LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ);
+ LASSERT(lsm != NULL);
+ LASSERT(ostid_id(&lsm->lsm_oi) == ostid_id(&oa->o_oi));
if (count <= 0 ||
(count & (~CFS_PAGE_MASK)) != 0)
pgp->off = off;
pgp->flag = brw_flags;
- if (verify)
- echo_client_page_debug_setup(lsm, pgp->pg, rw,
- oa->o_id, off, pgp->count);
+ if (verify)
+ echo_client_page_debug_setup(lsm, pgp->pg, rw,
+ ostid_id(&oa->o_oi), off,
+ pgp->count);
}
/* brw mode can only be used at client */
if (pgp->pg == NULL)
continue;
- if (verify) {
- int vrc;
- vrc = echo_client_page_debug_check(lsm, pgp->pg, oa->o_id,
- pgp->off, pgp->count);
- if (vrc != 0 && rc == 0)
- rc = vrc;
- }
- OBD_PAGE_FREE(pgp->pg);
+ if (verify) {
+ int vrc;
+ vrc = echo_client_page_debug_check(lsm, pgp->pg,
+ ostid_id(&oa->o_oi),
+ pgp->off, pgp->count);
+ if (vrc != 0 && rc == 0)
+ rc = vrc;
+ }
+ OBD_PAGE_FREE(pgp->pg);
}
OBD_FREE(pga, npages * sizeof(*pga));
OBD_FREE(pages, npages * sizeof(*pages));
ENTRY;
- if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0 ||
- (lsm != NULL && lsm->lsm_object_id != oa->o_id))
- RETURN(-EINVAL);
+ if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0 ||
+ (lsm != NULL && ostid_id(&lsm->lsm_oi) != ostid_id(&oa->o_oi)))
+ RETURN(-EINVAL);
npages = batch >> CFS_PAGE_SHIFT;
tot_pages = count >> CFS_PAGE_SHIFT;
if (async)
lnb[i].flags |= OBD_BRW_ASYNC;
- if (oa->o_id == ECHO_PERSISTENT_OBJID ||
- (oa->o_valid & OBD_MD_FLFLAGS) == 0 ||
- (oa->o_flags & OBD_FL_DEBUG_CHECK) == 0)
- continue;
-
- if (rw == OBD_BRW_WRITE)
- echo_client_page_debug_setup(lsm, page, rw,
- oa->o_id,
- rnb[i].offset,
- rnb[i].len);
- else
- echo_client_page_debug_check(lsm, page,
- oa->o_id,
- rnb[i].offset,
- rnb[i].len);
- }
+ if (ostid_id(&oa->o_oi) == ECHO_PERSISTENT_OBJID ||
+ (oa->o_valid & OBD_MD_FLFLAGS) == 0 ||
+ (oa->o_flags & OBD_FL_DEBUG_CHECK) == 0)
+ continue;
+
+ if (rw == OBD_BRW_WRITE)
+ echo_client_page_debug_setup(lsm, page, rw,
+ ostid_id(&oa->o_oi),
+ rnb[i].offset,
+ rnb[i].len);
+ else
+ echo_client_page_debug_check(lsm, page,
+ ostid_id(&oa->o_oi),
+ rnb[i].offset,
+ rnb[i].len);
+ }
ret = obd_commitrw(env, rw, exp, oa, 1, &ioo,
rnb, npages, lnb, oti, ret);
memset(&dummy_oti, 0, sizeof(dummy_oti));
- oa = &data->ioc_obdo1;
- if (!(oa->o_valid & OBD_MD_FLGROUP)) {
- oa->o_valid |= OBD_MD_FLGROUP;
- oa->o_seq = FID_SEQ_ECHO;
- }
+ oa = &data->ioc_obdo1;
+ if (!(oa->o_valid & OBD_MD_FLGROUP)) {
+ oa->o_valid |= OBD_MD_FLGROUP;
+ ostid_set_seq_echo(&oa->o_oi);
+ }
/* This FID is unpacked just for validation at this point */
- rc = fid_ostid_unpack(&fid, &oa->o_oi, 0);
+ rc = ostid_to_fid(&fid, &oa->o_oi, 0);
if (rc < 0)
RETURN(rc);
data->ioc_plen1, &dummy_oti);
GOTO(out, rc);
- case OBD_IOC_ECHO_MD: {
- int count;
- int cmd;
- char *dir = NULL;
- int dirlen;
- __u64 id;
+ case OBD_IOC_ECHO_MD: {
+ int count;
+ int cmd;
+ char *dir = NULL;
+ int dirlen;
+ __u64 id;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
- GOTO(out, rc = -EPERM);
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ GOTO(out, rc = -EPERM);
- count = data->ioc_count;
- cmd = data->ioc_command;
+ count = data->ioc_count;
+ cmd = data->ioc_command;
- id = data->ioc_obdo2.o_id;
+ id = ostid_id(&data->ioc_obdo2.o_oi);
- dirlen = data->ioc_plen1;
- OBD_ALLOC(dir, dirlen + 1);
- if (dir == NULL)
- GOTO(out, rc = -ENOMEM);
+ dirlen = data->ioc_plen1;
+ OBD_ALLOC(dir, dirlen + 1);
+ if (dir == NULL)
+ GOTO(out, rc = -ENOMEM);
- if (cfs_copy_from_user(dir, data->ioc_pbuf1, dirlen)) {
- OBD_FREE(dir, data->ioc_plen1 + 1);
- GOTO(out, rc = -EFAULT);
- }
+ if (cfs_copy_from_user(dir, data->ioc_pbuf1, dirlen)) {
+ OBD_FREE(dir, data->ioc_plen1 + 1);
+ GOTO(out, rc = -EFAULT);
+ }
- rc = echo_md_handler(ed, cmd, dir, dirlen, id, count, data);
- OBD_FREE(dir, dirlen + 1);
- GOTO(out, rc);
- }
+ rc = echo_md_handler(ed, cmd, dir, dirlen, id, count, data);
+ OBD_FREE(dir, dirlen + 1);
+ GOTO(out, rc);
+ }
case OBD_IOC_ECHO_ALLOC_SEQ: {
struct lu_env *cl_env;
int refcheck;
if (cfs_copy_to_user(data->ioc_pbuf1, &seq, data->ioc_plen1))
return -EFAULT;
- max_count = LUSTRE_SEQ_MAX_WIDTH;
- if (cfs_copy_to_user(data->ioc_pbuf2, &max_count,
- data->ioc_plen2))
- return -EFAULT;
- GOTO(out, rc);
+ max_count = LUSTRE_METADATA_SEQ_MAX_WIDTH;
+ if (cfs_copy_to_user(data->ioc_pbuf2, &max_count,
+ data->ioc_plen2))
+ return -EFAULT;
+ GOTO(out, rc);
}
case OBD_IOC_DESTROY:
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
RETURN(-EINVAL);
}
- cfs_spin_lock_init (&ec->ec_lock);
+ spin_lock_init(&ec->ec_lock);
CFS_INIT_LIST_HEAD (&ec->ec_objects);
CFS_INIT_LIST_HEAD (&ec->ec_locks);
ec->ec_unique = 0;
}
ocd->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_REQPORTAL |
+ OBD_CONNECT_BRW_SIZE |
OBD_CONNECT_GRANT | OBD_CONNECT_FULL20 |
- OBD_CONNECT_64BITHASH;
+ OBD_CONNECT_64BITHASH | OBD_CONNECT_LVB_TYPE;
+ ocd->ocd_brw_size = DT_MAX_BRW_SIZE;
ocd->ocd_version = LUSTRE_VERSION_CODE;
ocd->ocd_group = FID_SEQ_ECHO;
rc = obd_connect(env, &ec->ec_exp, tgt, &echo_uuid, ocd, NULL);
if (rc == 0) {
/* Turn off pinger because it connects to tgt obd directly. */
- cfs_spin_lock(&tgt->obd_dev_lock);
- cfs_list_del_init(&ec->ec_exp->exp_obd_chain_timed);
- cfs_spin_unlock(&tgt->obd_dev_lock);
+ spin_lock(&tgt->obd_dev_lock);
+ cfs_list_del_init(&ec->ec_exp->exp_obd_chain_timed);
+ spin_unlock(&tgt->obd_dev_lock);
}
OBD_FREE(ocd, sizeof(*ocd));