-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <lustre_debug.h>
#include <lprocfs_status.h>
#include <cl_object.h>
+#include <lustre_fid.h>
+#include <lustre_acl.h>
+#include <lustre_net.h>
+#include <obd_lov.h>
#include "echo_internal.h"
struct cl_site *ed_site;
struct lu_device *ed_next;
int ed_next_islov;
+ int ed_next_ismd;
+ struct lu_client_seq *ed_cl_seq;
};
struct echo_object {
struct echo_page {
struct cl_page_slice ep_cl;
+ struct mutex ep_lock;
cfs_page_t *ep_vmpage;
};
};
#endif
-static int echo_client_setup(struct obd_device *obddev,
+static int echo_client_setup(const struct lu_env *env,
+ struct obd_device *obddev,
struct lustre_cfg *lcfg);
static int echo_client_cleanup(struct obd_device *obddev);
struct cl_io eti_io;
struct cl_lock_descr eti_descr;
struct lu_fid eti_fid;
+ struct lu_fid eti_fid2;
+ struct md_op_spec eti_spec;
+ struct lov_mds_md_v3 eti_lmm;
+ struct lov_user_md_v3 eti_lum;
+ struct md_attr eti_ma;
+ struct lu_name eti_lname;
+ /* per-thread values, can be re-used */
+ void *eti_big_lmm;
+ int eti_big_lmmsize;
+ char eti_name[20];
+ struct lu_buf eti_buf;
+ char eti_xattr_buf[LUSTRE_POSIX_ACL_MAX_SIZE];
};
/* No session used right now */
*
* @{
*/
-cfs_page_t *echo_page_vmpage(const struct lu_env *env,
- const struct cl_page_slice *slice)
+static cfs_page_t *echo_page_vmpage(const struct lu_env *env,
+ const struct cl_page_slice *slice)
{
return cl2echo_page(slice)->ep_vmpage;
}
+static int echo_page_own(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io, int nonblock)
+{
+ struct echo_page *ep = cl2echo_page(slice);
+
+ if (!nonblock)
+ mutex_lock(&ep->ep_lock);
+ else if (!mutex_trylock(&ep->ep_lock))
+ return -EAGAIN;
+ return 0;
+}
+
+static void echo_page_disown(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io)
+{
+ struct echo_page *ep = cl2echo_page(slice);
+
+ LASSERT(mutex_is_locked(&ep->ep_lock));
+ mutex_unlock(&ep->ep_lock);
+}
+
static void echo_page_discard(const struct lu_env *env,
const struct cl_page_slice *slice,
struct cl_io *unused)
static int echo_page_is_vmlocked(const struct lu_env *env,
const struct cl_page_slice *slice)
{
- return 1;
+ if (mutex_is_locked(&cl2echo_page(slice)->ep_lock))
+ return -EBUSY;
+ return -ENODATA;
}
static void echo_page_completion(const struct lu_env *env,
{
struct echo_page *ep = cl2echo_page(slice);
- (*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p vm@%p\n",
- ep, ep->ep_vmpage);
+ (*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p %d vm@%p\n",
+ ep, mutex_is_locked(&ep->ep_lock), ep->ep_vmpage);
return 0;
}
static const struct cl_page_operations echo_page_ops = {
+ .cpo_own = echo_page_own,
+ .cpo_disown = echo_page_disown,
.cpo_discard = echo_page_discard,
.cpo_vmpage = echo_page_vmpage,
.cpo_fini = echo_page_fini,
struct echo_object *eco = cl2echo_obj(obj);
ep->ep_vmpage = vmpage;
page_cache_get(vmpage);
+ mutex_init(&ep->ep_lock);
cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
cfs_atomic_inc(&eco->eo_npages);
}
static int echo_object_init(const struct lu_env *env, struct lu_object *obj,
const struct lu_object_conf *conf)
{
- const struct cl_object_conf *cconf = lu2cl_conf(conf);
- struct echo_object_conf *econf = cl2echo_conf(cconf);
struct echo_device *ed = cl2echo_dev(lu2cl_dev(obj->lo_dev));
struct echo_client_obd *ec = ed->ed_ec;
struct echo_object *eco = cl2echo_obj(lu2cl(obj));
lu_object_add(obj, below);
}
- LASSERT(econf->eoc_md);
- eco->eo_lsm = *econf->eoc_md;
+ if (!ed->ed_next_ismd) {
+ const struct cl_object_conf *cconf = lu2cl_conf(conf);
+ struct echo_object_conf *econf = cl2echo_conf(cconf);
+
+ LASSERT(econf->eoc_md);
+ eco->eo_lsm = *econf->eoc_md;
+ /* clear the lsm pointer so that it won't get freed. */
+ *econf->eoc_md = NULL;
+ } else {
+ eco->eo_lsm = NULL;
+ }
+
eco->eo_dev = ed;
cfs_atomic_set(&eco->eo_npages, 0);
- /* clear the lsm pointer so that it won't get freed. */
- *econf->eoc_md = NULL;
+ spin_lock(&ec->ec_lock);
+ cfs_list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
+ spin_unlock(&ec->ec_lock);
- cfs_spin_lock(&ec->ec_lock);
- cfs_list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
- cfs_spin_unlock(&ec->ec_lock);
+ RETURN(0);
+}
- RETURN(0);
+/* taken from osc_unpackmd() */
+static int echo_alloc_memmd(struct echo_device *ed,
+ struct lov_stripe_md **lsmp)
+{
+ int lsm_size;
+
+ ENTRY;
+
+ /* If export is lov/osc then use their obd method */
+ if (ed->ed_next != NULL)
+ return obd_alloc_memmd(ed->ed_ec->ec_exp, lsmp);
+ /* OFD has no unpackmd method, do everything here */
+ lsm_size = lov_stripe_md_size(1);
+
+ LASSERT(*lsmp == NULL);
+ OBD_ALLOC(*lsmp, lsm_size);
+ if (*lsmp == NULL)
+ RETURN(-ENOMEM);
+
+ OBD_ALLOC((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
+ if ((*lsmp)->lsm_oinfo[0] == NULL) {
+ OBD_FREE(*lsmp, lsm_size);
+ RETURN(-ENOMEM);
+ }
+
+ loi_init((*lsmp)->lsm_oinfo[0]);
+ (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
+
+ RETURN(lsm_size);
+}
+
+static int echo_free_memmd(struct echo_device *ed, struct lov_stripe_md **lsmp)
+{
+ int lsm_size;
+
+ ENTRY;
+
+ /* If export is lov/osc then use their obd method */
+ if (ed->ed_next != NULL)
+ return obd_free_memmd(ed->ed_ec->ec_exp, lsmp);
+ /* OFD has no unpackmd method, do everything here */
+ lsm_size = lov_stripe_md_size(1);
+
+ LASSERT(*lsmp != NULL);
+ OBD_FREE((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
+ OBD_FREE(*lsmp, lsm_size);
+ *lsmp = NULL;
+ RETURN(0);
}
static void echo_object_free(const struct lu_env *env, struct lu_object *obj)
{
struct echo_object *eco = cl2echo_obj(lu2cl(obj));
struct echo_client_obd *ec = eco->eo_dev->ed_ec;
- struct lov_stripe_md *lsm = eco->eo_lsm;
ENTRY;
LASSERT(cfs_atomic_read(&eco->eo_npages) == 0);
- cfs_spin_lock(&ec->ec_lock);
+ spin_lock(&ec->ec_lock);
cfs_list_del_init(&eco->eo_obj_chain);
- cfs_spin_unlock(&ec->ec_lock);
+ spin_unlock(&ec->ec_lock);
lu_object_fini(obj);
lu_object_header_fini(obj->lo_header);
- if (lsm)
- obd_free_memmd(ec->ec_exp, &lsm);
- OBD_SLAB_FREE_PTR(eco, echo_object_kmem);
- EXIT;
+ if (eco->eo_lsm)
+ echo_free_memmd(eco->eo_dev, &eco->eo_lsm);
+ OBD_SLAB_FREE_PTR(eco, echo_object_kmem);
+ EXIT;
}
static int echo_object_print(const struct lu_env *env, void *cookie,
return (*p)(env, cookie, "echoclient-object@%p", obj);
}
-
static const struct lu_object_operations echo_lu_obj_ops = {
.loo_object_init = echo_object_init,
.loo_object_delete = NULL,
static struct lu_device_operations echo_device_lu_ops = {
.ldo_object_alloc = echo_object_alloc,
};
+
/** @} echo_lu_dev_ops */
static struct cl_device_operations echo_device_cl_ops = {
static void echo_site_fini(const struct lu_env *env, struct echo_device *ed)
{
if (ed->ed_site) {
- cl_site_fini(ed->ed_site);
+ if (!ed->ed_next_ismd)
+ cl_site_fini(ed->ed_site);
ed->ed_site = NULL;
}
}
LU_TYPE_INIT_FINI(echo, &echo_thread_key, &echo_session_key);
+#define ECHO_SEQ_WIDTH 0xffffffff
+static int echo_fid_init(struct echo_device *ed, char *obd_name,
+ struct md_site *ms)
+{
+ char *prefix;
+ int rc;
+ ENTRY;
+
+ OBD_ALLOC_PTR(ed->ed_cl_seq);
+ if (ed->ed_cl_seq == NULL)
+ RETURN(-ENOMEM);
+
+ OBD_ALLOC(prefix, MAX_OBD_NAME + 5);
+ if (prefix == NULL)
+ GOTO(out_free_seq, rc = -ENOMEM);
+
+ snprintf(prefix, MAX_OBD_NAME + 5, "srv-%s", obd_name);
+
+ /* Init client side sequence-manager */
+ rc = seq_client_init(ed->ed_cl_seq, NULL,
+ LUSTRE_SEQ_METADATA,
+ prefix, ms->ms_server_seq);
+ ed->ed_cl_seq->lcs_width = ECHO_SEQ_WIDTH;
+ OBD_FREE(prefix, MAX_OBD_NAME + 5);
+ if (rc)
+ GOTO(out_free_seq, rc);
+
+ RETURN(0);
+
+out_free_seq:
+ OBD_FREE_PTR(ed->ed_cl_seq);
+ ed->ed_cl_seq = NULL;
+ RETURN(rc);
+}
+
+static int echo_fid_fini(struct obd_device *obddev)
+{
+ struct echo_device *ed = obd2echo_dev(obddev);
+ ENTRY;
+
+ if (ed->ed_cl_seq != NULL) {
+ seq_client_fini(ed->ed_cl_seq);
+ OBD_FREE_PTR(ed->ed_cl_seq);
+ ed->ed_cl_seq = NULL;
+ }
+
+ RETURN(0);
+}
+
static struct lu_device *echo_device_alloc(const struct lu_env *env,
struct lu_device_type *t,
struct lustre_cfg *cfg)
cd->cd_ops = &echo_device_cl_ops;
cleanup = 2;
- rc = echo_site_init(env, ed);
- if (rc)
- GOTO(out, rc);
-
- cleanup = 3;
obd = class_name2obd(lustre_cfg_string(cfg, 0));
LASSERT(obd != NULL);
- rc = echo_client_setup(obd, cfg);
- if (rc)
- GOTO(out, rc);
- ed->ed_ec = &obd->u.echo_client;
+ LASSERT(env != NULL);
- cleanup = 4;
tgt = class_name2obd(lustre_cfg_string(cfg, 1));
- LASSERT(tgt != NULL);
- next = tgt->obd_lu_dev;
- if (next != NULL && !lu_device_is_cl(next))
- next = NULL;
+ if (tgt == NULL) {
+ CERROR("Can not find tgt device %s\n",
+ lustre_cfg_string(cfg, 1));
+ GOTO(out, rc = -ENODEV);
+ }
- /*
- * if echo client is to be stacked upon ost device, the next is NULL
- * since ost is not a clio device so far
- */
- tgt_type_name = tgt->obd_type->typ_name;
- if (next != NULL) {
- LASSERT(next != NULL);
- if (next->ld_site != NULL)
- GOTO(out, rc = -EBUSY);
-
- next->ld_site = &ed->ed_site->cs_lu;
- rc = next->ld_type->ldt_ops->ldto_device_init(env, next,
- next->ld_type->ldt_name, NULL);
+ next = tgt->obd_lu_dev;
+ if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) {
+ ed->ed_next_ismd = 1;
+ } else {
+ ed->ed_next_ismd = 0;
+ rc = echo_site_init(env, ed);
if (rc)
GOTO(out, rc);
+ }
+ cleanup = 3;
- /* Trikcy case, I have to determine the obd type since clio
- * uses the different parameters to initialize objects for
- * lov & osc.
- */
- if (strcmp(tgt_type_name, LUSTRE_LOV_NAME) == 0)
- ed->ed_next_islov = 1;
- else
- LASSERT(strcmp(tgt_type_name, LUSTRE_OSC_NAME) == 0);
- } else
- LASSERT(strcmp(tgt_type_name, LUSTRE_OST_NAME) == 0);
+ rc = echo_client_setup(env, obd, cfg);
+ if (rc)
+ GOTO(out, rc);
+
+ ed->ed_ec = &obd->u.echo_client;
+ cleanup = 4;
+
+ if (ed->ed_next_ismd) {
+ /* Suppose to connect to some Metadata layer */
+ struct lu_site *ls;
+ struct lu_device *ld;
+ int found = 0;
+
+ if (next == NULL) {
+ CERROR("%s is not lu device type!\n",
+ lustre_cfg_string(cfg, 1));
+ GOTO(out, rc = -EINVAL);
+ }
+
+ tgt_type_name = lustre_cfg_string(cfg, 2);
+ if (!tgt_type_name) {
+ CERROR("%s no type name for echo %s setup\n",
+ lustre_cfg_string(cfg, 1),
+ tgt->obd_type->typ_name);
+ GOTO(out, rc = -EINVAL);
+ }
+
+ ls = next->ld_site;
+
+ spin_lock(&ls->ls_ld_lock);
+ cfs_list_for_each_entry(ld, &ls->ls_ld_linkage, ld_linkage) {
+ if (strcmp(ld->ld_type->ldt_name, tgt_type_name) == 0) {
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock(&ls->ls_ld_lock);
+
+ if (found == 0) {
+ CERROR("%s is not lu device type!\n",
+ lustre_cfg_string(cfg, 1));
+ GOTO(out, rc = -EINVAL);
+ }
+
+ next = ld;
+ /* For MD echo client, it will use the site in MDS stack */
+ ed->ed_site_myself.cs_lu = *ls;
+ ed->ed_site = &ed->ed_site_myself;
+ ed->ed_cl.cd_lu_dev.ld_site = &ed->ed_site_myself.cs_lu;
+ rc = echo_fid_init(ed, obd->obd_name, lu_site2md(ls));
+ if (rc) {
+ CERROR("echo fid init error %d\n", rc);
+ GOTO(out, rc);
+ }
+ } else {
+ /* if echo client is to be stacked upon ost device, the next is
+ * NULL since ost is not a clio device so far */
+ if (next != NULL && !lu_device_is_cl(next))
+ next = NULL;
+
+ tgt_type_name = tgt->obd_type->typ_name;
+ if (next != NULL) {
+ LASSERT(next != NULL);
+ if (next->ld_site != NULL)
+ GOTO(out, rc = -EBUSY);
+
+ next->ld_site = &ed->ed_site->cs_lu;
+ rc = next->ld_type->ldt_ops->ldto_device_init(env, next,
+ next->ld_type->ldt_name,
+ NULL);
+ if (rc)
+ GOTO(out, rc);
+
+ /* Tricky case, I have to determine the obd type since
+ * CLIO uses the different parameters to initialize
+ * objects for lov & osc. */
+ if (strcmp(tgt_type_name, LUSTRE_LOV_NAME) == 0)
+ ed->ed_next_islov = 1;
+ else
+ LASSERT(strcmp(tgt_type_name,
+ LUSTRE_OSC_NAME) == 0);
+ } else
+ LASSERT(strcmp(tgt_type_name, LUSTRE_OST_NAME) == 0);
+ }
ed->ed_next = next;
RETURN(&cd->cd_lu_dev);
-
out:
switch(cleanup) {
case 4: {
struct echo_device *ed = cl2echo_dev(lu2cl_dev(d));
struct lu_device *next = ed->ed_next;
- while (next)
+ while (next && !ed->ed_next_ismd)
next = next->ld_type->ldt_ops->ldto_device_fini(env, next);
return NULL;
}
struct echo_object *eco;
struct lu_device *next = ed->ed_next;
- CDEBUG(D_INFO, "echo device:%p is going to be freed, next = %p\n", ed, next);
-
- /* destroy locks */
- cfs_spin_lock(&ec->ec_lock);
- while (!cfs_list_empty(&ec->ec_locks)) {
- struct echo_lock *ecl = cfs_list_entry(ec->ec_locks.next,
- struct echo_lock,
- el_chain);
- int still_used = 0;
-
- if (cfs_atomic_dec_and_test(&ecl->el_refcount))
- cfs_list_del_init(&ecl->el_chain);
- else
- still_used = 1;
- cfs_spin_unlock(&ec->ec_lock);
-
- CERROR("echo client: pending lock %p refs %d\n",
- ecl, cfs_atomic_read(&ecl->el_refcount));
-
- echo_lock_release(env, ecl, still_used);
- cfs_spin_lock(&ec->ec_lock);
- }
- cfs_spin_unlock(&ec->ec_lock);
+ CDEBUG(D_INFO, "echo device:%p is going to be freed, next = %p\n",
+ ed, next);
- LASSERT(ed->ed_site);
lu_site_purge(env, &ed->ed_site->cs_lu, -1);
/* check if there are objects still alive.
* all of cached objects. Anyway, probably the echo device is being
* parallelly accessed.
*/
- cfs_spin_lock(&ec->ec_lock);
- cfs_list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
- eco->eo_deleted = 1;
- cfs_spin_unlock(&ec->ec_lock);
-
- /* purge again */
- lu_site_purge(env, &ed->ed_site->cs_lu, -1);
-
- CDEBUG(D_INFO,
- "Waiting for the reference of echo object to be dropped\n");
-
- /* Wait for the last reference to be dropped. */
- cfs_spin_lock(&ec->ec_lock);
- while (!cfs_list_empty(&ec->ec_objects)) {
- cfs_spin_unlock(&ec->ec_lock);
- CERROR("echo_client still has objects at cleanup time, "
- "wait for 1 second\n");
- cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
- cfs_time_seconds(1));
- cfs_spin_lock(&ec->ec_lock);
- }
- cfs_spin_unlock(&ec->ec_lock);
+ spin_lock(&ec->ec_lock);
+ cfs_list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
+ eco->eo_deleted = 1;
+ spin_unlock(&ec->ec_lock);
+
+ /* purge again */
+ lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+
+ CDEBUG(D_INFO,
+ "Waiting for the reference of echo object to be dropped\n");
+
+ /* Wait for the last reference to be dropped. */
+ spin_lock(&ec->ec_lock);
+ while (!cfs_list_empty(&ec->ec_objects)) {
+ spin_unlock(&ec->ec_lock);
+ CERROR("echo_client still has objects at cleanup time, "
+ "wait for 1 second\n");
+ cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
+ cfs_time_seconds(1));
+ lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+ spin_lock(&ec->ec_lock);
+ }
+ spin_unlock(&ec->ec_lock);
+
+ LASSERT(cfs_list_empty(&ec->ec_locks));
CDEBUG(D_INFO, "No object exists, exiting...\n");
echo_client_cleanup(d->ld_obd);
-
- while (next)
+ echo_fid_fini(d->ld_obd);
+ while (next && !ed->ed_next_ismd)
next = next->ld_type->ldt_ops->ldto_device_free(env, next);
LASSERT(ed->ed_site == lu2cl_site(d->ld_site));
.ldt_tags = LU_DEVICE_CL,
.ldt_name = LUSTRE_ECHO_CLIENT_NAME,
.ldt_ops = &echo_device_type_ops,
- .ldt_ctx_tags = LCT_CL_THREAD
+ .ldt_ctx_tags = LCT_CL_THREAD | LCT_MD_THREAD | LCT_DT_THREAD,
};
/** @} echo_init */
fid = &info->eti_fid;
lsm2fid(lsm, fid);
+ /* In the function below, .hs_keycmp resolves to
+ * lu_obj_hop_keycmp() */
+ /* coverity[overrun-buffer-val] */
obj = cl_object_find(env, echo_dev2cl(d), fid, &conf->eoc_cl);
if (IS_ERR(obj))
GOTO(out, eco = (void*)obj);
if (eco->eo_deleted) {
struct lu_object_header *loh = obj->co_lu.lo_header;
LASSERT(&eco->eo_hdr == luh2coh(loh));
- cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags);
- cl_object_prune(env, obj);
+ set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags);
}
cl_object_put(env, obj);
rc = cl_wait(env, lck);
if (rc == 0) {
el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
- cfs_spin_lock(&ec->ec_lock);
- if (cfs_list_empty(&el->el_chain)) {
- cfs_list_add(&el->el_chain, &ec->ec_locks);
- el->el_cookie = ++ec->ec_unique;
- }
- cfs_atomic_inc(&el->el_refcount);
- *cookie = el->el_cookie;
- cfs_spin_unlock(&ec->ec_lock);
- } else
- cl_lock_release(env, lck, "ec enqueue", cfs_current());
- }
- RETURN(rc);
+ spin_lock(&ec->ec_lock);
+ if (cfs_list_empty(&el->el_chain)) {
+ cfs_list_add(&el->el_chain, &ec->ec_locks);
+ el->el_cookie = ++ec->ec_unique;
+ }
+ cfs_atomic_inc(&el->el_refcount);
+ *cookie = el->el_cookie;
+ spin_unlock(&ec->ec_lock);
+ } else {
+ cl_lock_release(env, lck, "ec enqueue", cfs_current());
+ }
+ }
+ RETURN(rc);
}
static int cl_echo_enqueue(struct echo_object *eco, obd_off start, obd_off end,
info = echo_env_info(env);
io = &info->eti_io;
+ io->ci_ignore_layout = 1;
result = cl_io_init(env, io, CIT_MISC, echo_obj2cl(eco));
if (result < 0)
GOTO(out, result);
ENTRY;
LASSERT(ec != NULL);
- cfs_spin_lock (&ec->ec_lock);
+ spin_lock(&ec->ec_lock);
cfs_list_for_each (el, &ec->ec_locks) {
ecl = cfs_list_entry (el, struct echo_lock, el_chain);
CDEBUG(D_INFO, "ecl: %p, cookie: "LPX64"\n", ecl, ecl->el_cookie);
break;
}
}
- cfs_spin_unlock (&ec->ec_lock);
+ spin_unlock(&ec->ec_lock);
if (!found)
RETURN(-ENOENT);
queue = &info->eti_queue;
cl_2queue_init(queue);
+
+ io->ci_ignore_layout = 1;
rc = cl_io_init(env, io, CIT_MISC, obj);
if (rc < 0)
GOTO(out, rc);
if (async)
rc = cl_echo_async_brw(env, io, typ, queue);
else
- rc = cl_io_submit_sync(env, io, typ, queue,
- CRP_NORMAL, 0);
+ rc = cl_io_submit_sync(env, io, typ, queue, 0);
CDEBUG(D_INFO, "echo_client %s write returns %d\n",
async ? "async" : "sync", rc);
}
return (0);
}
-static int echo_create_object(struct echo_device *ed, int on_target,
- struct obdo *oa, void *ulsm, int ulsm_nob,
- struct obd_trans_info *oti)
+static inline void echo_md_build_name(struct lu_name *lname, char *name,
+ __u64 id)
+{
+ sprintf(name, LPU64, id);
+ lname->ln_name = name;
+ lname->ln_namelen = strlen(name);
+}
+
+/* similar to mdt_attr_get_complex */
+static int echo_big_lmm_get(const struct lu_env *env, struct md_object *o,
+ struct md_attr *ma)
+{
+ struct echo_thread_info *info = echo_env_info(env);
+ int rc;
+
+ ENTRY;
+
+ LASSERT(ma->ma_lmm_size > 0);
+
+ rc = mo_xattr_get(env, o, &LU_BUF_NULL, XATTR_NAME_LOV);
+ if (rc < 0)
+ RETURN(rc);
+
+ /* big_lmm may need to be grown */
+ if (info->eti_big_lmmsize < rc) {
+ int size = size_roundup_power2(rc);
+
+ if (info->eti_big_lmmsize > 0) {
+ /* free old buffer */
+ LASSERT(info->eti_big_lmm);
+ OBD_FREE_LARGE(info->eti_big_lmm,
+ info->eti_big_lmmsize);
+ info->eti_big_lmm = NULL;
+ info->eti_big_lmmsize = 0;
+ }
+
+ OBD_ALLOC_LARGE(info->eti_big_lmm, size);
+ if (info->eti_big_lmm == NULL)
+ RETURN(-ENOMEM);
+ info->eti_big_lmmsize = size;
+ }
+ LASSERT(info->eti_big_lmmsize >= rc);
+
+ info->eti_buf.lb_buf = info->eti_big_lmm;
+ info->eti_buf.lb_len = info->eti_big_lmmsize;
+ rc = mo_xattr_get(env, o, &info->eti_buf, XATTR_NAME_LOV);
+ if (rc < 0)
+ RETURN(rc);
+
+ ma->ma_valid |= MA_LOV;
+ ma->ma_lmm = info->eti_big_lmm;
+ ma->ma_lmm_size = rc;
+
+ RETURN(0);
+}
+
+int echo_attr_get_complex(const struct lu_env *env, struct md_object *next,
+ struct md_attr *ma)
+{
+ struct echo_thread_info *info = echo_env_info(env);
+ struct lu_buf *buf = &info->eti_buf;
+ cfs_umode_t mode = lu_object_attr(&next->mo_lu);
+ int need = ma->ma_need;
+ int rc = 0, rc2;
+
+ ENTRY;
+
+ ma->ma_valid = 0;
+
+ if (need & MA_INODE) {
+ ma->ma_need = MA_INODE;
+ rc = mo_attr_get(env, next, ma);
+ if (rc)
+ GOTO(out, rc);
+ ma->ma_valid |= MA_INODE;
+ }
+
+ if (need & MA_LOV) {
+ if (S_ISREG(mode) || S_ISDIR(mode)) {
+ LASSERT(ma->ma_lmm_size > 0);
+ buf->lb_buf = ma->ma_lmm;
+ buf->lb_len = ma->ma_lmm_size;
+ rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_LOV);
+ if (rc2 > 0) {
+ ma->ma_lmm_size = rc2;
+ ma->ma_valid |= MA_LOV;
+ } else if (rc2 == -ENODATA) {
+ /* no LOV EA */
+ ma->ma_lmm_size = 0;
+ } else if (rc2 == -ERANGE) {
+ rc2 = echo_big_lmm_get(env, next, ma);
+ if (rc2 < 0)
+ GOTO(out, rc = rc2);
+ } else {
+ GOTO(out, rc = rc2);
+ }
+ }
+ }
+
+#ifdef CONFIG_FS_POSIX_ACL
+ if (need & MA_ACL_DEF && S_ISDIR(mode)) {
+ buf->lb_buf = ma->ma_acl;
+ buf->lb_len = ma->ma_acl_size;
+ rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_ACL_DEFAULT);
+ if (rc2 > 0) {
+ ma->ma_acl_size = rc2;
+ ma->ma_valid |= MA_ACL_DEF;
+ } else if (rc2 == -ENODATA) {
+ /* no ACLs */
+ ma->ma_acl_size = 0;
+ } else {
+ GOTO(out, rc = rc2);
+ }
+ }
+#endif
+out:
+ ma->ma_need = need;
+ CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = "LPX64" ma_lmm=%p\n",
+ rc, ma->ma_valid, ma->ma_lmm);
+ RETURN(rc);
+}
+
+static int
+echo_md_create_internal(const struct lu_env *env, struct echo_device *ed,
+ struct md_object *parent, struct lu_fid *fid,
+ struct lu_name *lname, struct md_op_spec *spec,
+ struct md_attr *ma)
+{
+ struct lu_object *ec_child, *child;
+ struct lu_device *ld = ed->ed_next;
+ struct echo_thread_info *info = echo_env_info(env);
+ struct lu_fid *fid2 = &info->eti_fid2;
+ struct lu_object_conf conf = { .loc_flags = LOC_F_NEW };
+ int rc;
+
+ ENTRY;
+
+ rc = mdo_lookup(env, parent, lname, fid2, spec);
+ if (rc == 0)
+ return -EEXIST;
+ else if (rc != -ENOENT)
+ return rc;
+
+ ec_child = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev,
+ fid, &conf);
+ if (IS_ERR(ec_child)) {
+ CERROR("Can not find the child "DFID": rc = %ld\n", PFID(fid),
+ PTR_ERR(ec_child));
+ RETURN(PTR_ERR(ec_child));
+ }
+
+ child = lu_object_locate(ec_child->lo_header, ld->ld_type);
+ if (child == NULL) {
+ CERROR("Can not locate the child "DFID"\n", PFID(fid));
+ GOTO(out_put, rc = -EINVAL);
+ }
+
+ CDEBUG(D_RPCTRACE, "Start creating object "DFID" %s %p\n",
+ PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent);
+
+ /*
+ * Do not perform lookup sanity check. We know that name does not exist.
+ */
+ spec->sp_cr_lookup = 0;
+ rc = mdo_create(env, parent, lname, lu2md(child), spec, ma);
+ if (rc) {
+ CERROR("Can not create child "DFID": rc = %d\n", PFID(fid), rc);
+ GOTO(out_put, rc);
+ }
+ CDEBUG(D_RPCTRACE, "End creating object "DFID" %s %p rc = %d\n",
+ PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent, rc);
+ EXIT;
+out_put:
+ lu_object_put(env, ec_child);
+ return rc;
+}
+
+static int echo_set_lmm_size(const struct lu_env *env, struct lu_device *ld,
+ struct md_attr *ma)
+{
+ struct echo_thread_info *info = echo_env_info(env);
+
+ if (strcmp(ld->ld_type->ldt_name, LUSTRE_MDD_NAME)) {
+ ma->ma_lmm = (void *)&info->eti_lmm;
+ ma->ma_lmm_size = sizeof(info->eti_lmm);
+ } else {
+ LASSERT(info->eti_big_lmmsize);
+ ma->ma_lmm = info->eti_big_lmm;
+ ma->ma_lmm_size = info->eti_big_lmmsize;
+ }
+
+ return 0;
+}
+
+static int echo_create_md_object(const struct lu_env *env,
+ struct echo_device *ed,
+ struct lu_object *ec_parent,
+ struct lu_fid *fid,
+ char *name, int namelen,
+ __u64 id, __u32 mode, int count,
+ int stripe_count, int stripe_offset)
+{
+ struct lu_object *parent;
+ struct echo_thread_info *info = echo_env_info(env);
+ struct lu_name *lname = &info->eti_lname;
+ struct md_op_spec *spec = &info->eti_spec;
+ struct md_attr *ma = &info->eti_ma;
+ struct lu_device *ld = ed->ed_next;
+ int rc = 0;
+ int i;
+
+ ENTRY;
+
+ parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
+ if (parent == NULL)
+ RETURN(-ENXIO);
+
+ memset(ma, 0, sizeof(*ma));
+ memset(spec, 0, sizeof(*spec));
+ if (stripe_count != 0) {
+ spec->sp_cr_flags |= FMODE_WRITE;
+ echo_set_lmm_size(env, ld, ma);
+ if (stripe_count != -1) {
+ struct lov_user_md_v3 *lum = &info->eti_lum;
+
+ lum->lmm_magic = LOV_USER_MAGIC_V3;
+ lum->lmm_stripe_count = stripe_count;
+ lum->lmm_stripe_offset = stripe_offset;
+ lum->lmm_pattern = 0;
+ spec->u.sp_ea.eadata = lum;
+ spec->u.sp_ea.eadatalen = sizeof(*lum);
+ spec->sp_cr_flags |= MDS_OPEN_HAS_EA;
+ }
+ }
+
+ ma->ma_attr.la_mode = mode;
+ ma->ma_attr.la_valid = LA_CTIME | LA_MODE;
+ ma->ma_attr.la_ctime = cfs_time_current_64();
+
+ if (name != NULL) {
+ lname->ln_name = name;
+ lname->ln_namelen = namelen;
+ /* If name is specified, only create one object by name */
+ rc = echo_md_create_internal(env, ed, lu2md(parent), fid, lname,
+ spec, ma);
+ RETURN(rc);
+ }
+
+ /* Create multiple object sequenced by id */
+ for (i = 0; i < count; i++) {
+ char *tmp_name = info->eti_name;
+
+ echo_md_build_name(lname, tmp_name, id);
+
+ rc = echo_md_create_internal(env, ed, lu2md(parent), fid, lname,
+ spec, ma);
+ if (rc) {
+ CERROR("Can not create child %s: rc = %d\n", tmp_name,
+ rc);
+ break;
+ }
+ id++;
+ fid->f_oid++;
+ }
+
+ RETURN(rc);
+}
+
+static struct lu_object *echo_md_lookup(const struct lu_env *env,
+ struct echo_device *ed,
+ struct md_object *parent,
+ struct lu_name *lname)
+{
+ struct echo_thread_info *info = echo_env_info(env);
+ struct lu_fid *fid = &info->eti_fid;
+ struct lu_object *child;
+ int rc;
+ ENTRY;
+
+ CDEBUG(D_INFO, "lookup %s in parent "DFID" %p\n", lname->ln_name,
+ PFID(fid), parent);
+ rc = mdo_lookup(env, parent, lname, fid, NULL);
+ if (rc) {
+ CERROR("lookup %s: rc = %d\n", lname->ln_name, rc);
+ RETURN(ERR_PTR(rc));
+ }
+
+ child = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev, fid, NULL);
+
+ RETURN(child);
+}
+
+static int echo_setattr_object(const struct lu_env *env,
+ struct echo_device *ed,
+ struct lu_object *ec_parent,
+ __u64 id, int count)
+{
+ struct lu_object *parent;
+ struct echo_thread_info *info = echo_env_info(env);
+ struct lu_name *lname = &info->eti_lname;
+ char *name = info->eti_name;
+ struct lu_device *ld = ed->ed_next;
+ struct lu_buf *buf = &info->eti_buf;
+ int rc = 0;
+ int i;
+
+ ENTRY;
+
+ parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
+ if (parent == NULL)
+ RETURN(-ENXIO);
+
+ for (i = 0; i < count; i++) {
+ struct lu_object *ec_child, *child;
+
+ echo_md_build_name(lname, name, id);
+
+ ec_child = echo_md_lookup(env, ed, lu2md(parent), lname);
+ if (IS_ERR(ec_child)) {
+ CERROR("Can't find child %s: rc = %ld\n",
+ lname->ln_name, PTR_ERR(ec_child));
+ RETURN(PTR_ERR(ec_child));
+ }
+
+ child = lu_object_locate(ec_child->lo_header, ld->ld_type);
+ if (child == NULL) {
+ CERROR("Can not locate the child %s\n", lname->ln_name);
+ lu_object_put(env, ec_child);
+ rc = -EINVAL;
+ break;
+ }
+
+ CDEBUG(D_RPCTRACE, "Start setattr object "DFID"\n",
+ PFID(lu_object_fid(child)));
+
+ buf->lb_buf = info->eti_xattr_buf;
+ buf->lb_len = sizeof(info->eti_xattr_buf);
+
+ sprintf(name, "%s.test1", XATTR_USER_PREFIX);
+ rc = mo_xattr_set(env, lu2md(child), buf, name,
+ LU_XATTR_CREATE);
+ if (rc < 0) {
+ CERROR("Can not setattr child "DFID": rc = %d\n",
+ PFID(lu_object_fid(child)), rc);
+ lu_object_put(env, ec_child);
+ break;
+ }
+ CDEBUG(D_RPCTRACE, "End setattr object "DFID"\n",
+ PFID(lu_object_fid(child)));
+ id++;
+ lu_object_put(env, ec_child);
+ }
+ RETURN(rc);
+}
+
+static int echo_getattr_object(const struct lu_env *env,
+ struct echo_device *ed,
+ struct lu_object *ec_parent,
+ __u64 id, int count)
+{
+ struct lu_object *parent;
+ struct echo_thread_info *info = echo_env_info(env);
+ struct lu_name *lname = &info->eti_lname;
+ char *name = info->eti_name;
+ struct md_attr *ma = &info->eti_ma;
+ struct lu_device *ld = ed->ed_next;
+ int rc = 0;
+ int i;
+
+ ENTRY;
+
+ parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
+ if (parent == NULL)
+ RETURN(-ENXIO);
+
+ memset(ma, 0, sizeof(*ma));
+ ma->ma_need |= MA_INODE | MA_LOV | MA_PFID | MA_HSM | MA_ACL_DEF;
+ ma->ma_acl = info->eti_xattr_buf;
+ ma->ma_acl_size = sizeof(info->eti_xattr_buf);
+
+ for (i = 0; i < count; i++) {
+ struct lu_object *ec_child, *child;
+
+ ma->ma_valid = 0;
+ echo_md_build_name(lname, name, id);
+ echo_set_lmm_size(env, ld, ma);
+
+ ec_child = echo_md_lookup(env, ed, lu2md(parent), lname);
+ if (IS_ERR(ec_child)) {
+ CERROR("Can't find child %s: rc = %ld\n",
+ lname->ln_name, PTR_ERR(ec_child));
+ RETURN(PTR_ERR(ec_child));
+ }
+
+ child = lu_object_locate(ec_child->lo_header, ld->ld_type);
+ if (child == NULL) {
+ CERROR("Can not locate the child %s\n", lname->ln_name);
+ lu_object_put(env, ec_child);
+ RETURN(-EINVAL);
+ }
+
+ CDEBUG(D_RPCTRACE, "Start getattr object "DFID"\n",
+ PFID(lu_object_fid(child)));
+ rc = echo_attr_get_complex(env, lu2md(child), ma);
+ if (rc) {
+ CERROR("Can not getattr child "DFID": rc = %d\n",
+ PFID(lu_object_fid(child)), rc);
+ lu_object_put(env, ec_child);
+ break;
+ }
+ CDEBUG(D_RPCTRACE, "End getattr object "DFID"\n",
+ PFID(lu_object_fid(child)));
+ id++;
+ lu_object_put(env, ec_child);
+ }
+
+ RETURN(rc);
+}
+
+static int echo_lookup_object(const struct lu_env *env,
+ struct echo_device *ed,
+ struct lu_object *ec_parent,
+ __u64 id, int count)
+{
+ struct lu_object *parent;
+ struct echo_thread_info *info = echo_env_info(env);
+ struct lu_name *lname = &info->eti_lname;
+ char *name = info->eti_name;
+ struct lu_fid *fid = &info->eti_fid;
+ struct lu_device *ld = ed->ed_next;
+ int rc = 0;
+ int i;
+
+ parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
+ if (parent == NULL)
+ return -ENXIO;
+
+ /*prepare the requests*/
+ for (i = 0; i < count; i++) {
+ echo_md_build_name(lname, name, id);
+
+ CDEBUG(D_RPCTRACE, "Start lookup object "DFID" %s %p\n",
+ PFID(lu_object_fid(parent)), lname->ln_name, parent);
+
+ rc = mdo_lookup(env, lu2md(parent), lname, fid, NULL);
+ if (rc) {
+ CERROR("Can not lookup child %s: rc = %d\n", name, rc);
+ break;
+ }
+ CDEBUG(D_RPCTRACE, "End lookup object "DFID" %s %p\n",
+ PFID(lu_object_fid(parent)), lname->ln_name, parent);
+
+ id++;
+ }
+ return rc;
+}
+
+static int echo_md_destroy_internal(const struct lu_env *env,
+ struct echo_device *ed,
+ struct md_object *parent,
+ struct lu_name *lname,
+ struct md_attr *ma)
+{
+ struct lu_device *ld = ed->ed_next;
+ struct lu_object *ec_child;
+ struct lu_object *child;
+ int rc;
+
+ ENTRY;
+
+ ec_child = echo_md_lookup(env, ed, parent, lname);
+ if (IS_ERR(ec_child)) {
+ CERROR("Can't find child %s: rc = %ld\n", lname->ln_name,
+ PTR_ERR(ec_child));
+ RETURN(PTR_ERR(ec_child));
+ }
+
+ child = lu_object_locate(ec_child->lo_header, ld->ld_type);
+ if (child == NULL) {
+ CERROR("Can not locate the child %s\n", lname->ln_name);
+ GOTO(out_put, rc = -EINVAL);
+ }
+
+ CDEBUG(D_RPCTRACE, "Start destroy object "DFID" %s %p\n",
+ PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent);
+
+ rc = mdo_unlink(env, parent, lu2md(child), lname, ma);
+ if (rc) {
+ CERROR("Can not unlink child %s: rc = %d\n",
+ lname->ln_name, rc);
+ GOTO(out_put, rc);
+ }
+ CDEBUG(D_RPCTRACE, "End destroy object "DFID" %s %p\n",
+ PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent);
+out_put:
+ lu_object_put(env, ec_child);
+ return rc;
+}
+
+static int echo_destroy_object(const struct lu_env *env,
+ struct echo_device *ed,
+ struct lu_object *ec_parent,
+ char *name, int namelen,
+ __u64 id, __u32 mode,
+ int count)
+{
+ struct echo_thread_info *info = echo_env_info(env);
+ struct lu_name *lname = &info->eti_lname;
+ struct md_attr *ma = &info->eti_ma;
+ struct lu_device *ld = ed->ed_next;
+ struct lu_object *parent;
+ int rc = 0;
+ int i;
+ ENTRY;
+
+ parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
+ if (parent == NULL)
+ RETURN(-EINVAL);
+
+ memset(ma, 0, sizeof(*ma));
+ ma->ma_attr.la_mode = mode;
+ ma->ma_attr.la_valid = LA_CTIME;
+ ma->ma_attr.la_ctime = cfs_time_current_64();
+ ma->ma_need = MA_INODE;
+ ma->ma_valid = 0;
+
+ if (name != NULL) {
+ lname->ln_name = name;
+ lname->ln_namelen = namelen;
+ rc = echo_md_destroy_internal(env, ed, lu2md(parent), lname,
+ ma);
+ RETURN(rc);
+ }
+
+ /*prepare the requests*/
+ for (i = 0; i < count; i++) {
+ char *tmp_name = info->eti_name;
+
+ ma->ma_valid = 0;
+ echo_md_build_name(lname, tmp_name, id);
+
+ rc = echo_md_destroy_internal(env, ed, lu2md(parent), lname,
+ ma);
+ if (rc) {
+ CERROR("Can not unlink child %s: rc = %d\n", name, rc);
+ break;
+ }
+ id++;
+ }
+
+ RETURN(rc);
+}
+
+static struct lu_object *echo_resolve_path(const struct lu_env *env,
+ struct echo_device *ed, char *path,
+ int path_len)
+{
+ struct lu_device *ld = ed->ed_next;
+ struct md_device *md = lu2md_dev(ld);
+ struct echo_thread_info *info = echo_env_info(env);
+ struct lu_fid *fid = &info->eti_fid;
+ struct lu_name *lname = &info->eti_lname;
+ struct lu_object *parent = NULL;
+ struct lu_object *child = NULL;
+ int rc = 0;
+ ENTRY;
+
+ /*Only support MDD layer right now*/
+ rc = md->md_ops->mdo_root_get(env, md, fid);
+ if (rc) {
+ CERROR("get root error: rc = %d\n", rc);
+ RETURN(ERR_PTR(rc));
+ }
+
+ parent = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev, fid, NULL);
+ if (IS_ERR(parent)) {
+ CERROR("Can not find the parent "DFID": rc = %ld\n",
+ PFID(fid), PTR_ERR(parent));
+ RETURN(parent);
+ }
+
+ while (1) {
+ struct lu_object *ld_parent;
+ char *e;
+
+ e = strsep(&path, "/");
+ if (e == NULL)
+ break;
+
+ if (e[0] == 0) {
+ if (!path || path[0] == '\0')
+ break;
+ continue;
+ }
+
+ lname->ln_name = e;
+ lname->ln_namelen = strlen(e);
+
+ ld_parent = lu_object_locate(parent->lo_header, ld->ld_type);
+ if (ld_parent == NULL) {
+ lu_object_put(env, parent);
+ rc = -EINVAL;
+ break;
+ }
+
+ child = echo_md_lookup(env, ed, lu2md(ld_parent), lname);
+ lu_object_put(env, parent);
+ if (IS_ERR(child)) {
+ rc = (int)PTR_ERR(child);
+ CERROR("lookup %s under parent "DFID": rc = %d\n",
+ lname->ln_name, PFID(lu_object_fid(ld_parent)),
+ rc);
+ break;
+ }
+ parent = child;
+ }
+ if (rc)
+ RETURN(ERR_PTR(rc));
+
+ RETURN(parent);
+}
+
+static void echo_ucred_init(struct lu_env *env)
+{
+ struct lu_ucred *ucred = lu_ucred(env);
+
+ ucred->uc_valid = UCRED_INVALID;
+
+ ucred->uc_suppgids[0] = -1;
+ ucred->uc_suppgids[1] = -1;
+
+ ucred->uc_uid = ucred->uc_o_uid = cfs_curproc_uid();
+ ucred->uc_gid = ucred->uc_o_gid = cfs_curproc_gid();
+ ucred->uc_fsuid = ucred->uc_o_fsuid = cfs_curproc_fsuid();
+ ucred->uc_fsgid = ucred->uc_o_fsgid = cfs_curproc_fsgid();
+ ucred->uc_cap = cfs_curproc_cap_pack();
+
+ /* remove fs privilege for non-root user. */
+ if (ucred->uc_fsuid)
+ ucred->uc_cap &= ~CFS_CAP_FS_MASK;
+ ucred->uc_valid = UCRED_NEW;
+}
+
+static void echo_ucred_fini(struct lu_env *env)
+{
+ struct lu_ucred *ucred = lu_ucred(env);
+ ucred->uc_valid = UCRED_INIT;
+}
+
+#define ECHO_MD_CTX_TAG (LCT_REMEMBER | LCT_MD_THREAD)
+#define ECHO_MD_SES_TAG (LCT_REMEMBER | LCT_SESSION)
+static int echo_md_handler(struct echo_device *ed, int command,
+ char *path, int path_len, int id, int count,
+ struct obd_ioctl_data *data)
+{
+ struct echo_thread_info *info;
+ struct lu_device *ld = ed->ed_next;
+ struct lu_env *env;
+ int refcheck;
+ struct lu_object *parent;
+ char *name = NULL;
+ int namelen = data->ioc_plen2;
+ int rc = 0;
+ ENTRY;
+
+ if (ld == NULL) {
+ CERROR("MD echo client is not being initialized properly\n");
+ RETURN(-EINVAL);
+ }
+
+ if (strcmp(ld->ld_type->ldt_name, LUSTRE_MDD_NAME)) {
+ CERROR("Only support MDD layer right now!\n");
+ RETURN(-EINVAL);
+ }
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+
+ rc = lu_env_refill_by_tags(env, ECHO_MD_CTX_TAG, ECHO_MD_SES_TAG);
+ if (rc != 0)
+ GOTO(out_env, rc);
+
+ /* init big_lmm buffer */
+ info = echo_env_info(env);
+ LASSERT(info->eti_big_lmm == NULL);
+ OBD_ALLOC_LARGE(info->eti_big_lmm, MIN_MD_SIZE);
+ if (info->eti_big_lmm == NULL)
+ GOTO(out_env, rc = -ENOMEM);
+ info->eti_big_lmmsize = MIN_MD_SIZE;
+
+ parent = echo_resolve_path(env, ed, path, path_len);
+ if (IS_ERR(parent)) {
+ CERROR("Can not resolve the path %s: rc = %ld\n", path,
+ PTR_ERR(parent));
+ GOTO(out_free, rc = PTR_ERR(parent));
+ }
+
+ if (namelen > 0) {
+ OBD_ALLOC(name, namelen + 1);
+ if (name == NULL)
+ GOTO(out_put, rc = -ENOMEM);
+ if (cfs_copy_from_user(name, data->ioc_pbuf2, namelen))
+ GOTO(out_name, rc = -EFAULT);
+ }
+
+ echo_ucred_init(env);
+
+ switch (command) {
+ case ECHO_MD_CREATE:
+ case ECHO_MD_MKDIR: {
+ struct echo_thread_info *info = echo_env_info(env);
+ __u32 mode = data->ioc_obdo2.o_mode;
+ struct lu_fid *fid = &info->eti_fid;
+ int stripe_count = (int)data->ioc_obdo2.o_misc;
+ int stripe_index = (int)data->ioc_obdo2.o_stripe_idx;
+
+ fid->f_seq = data->ioc_obdo1.o_seq;
+ fid->f_oid = (__u32)data->ioc_obdo1.o_id;
+ fid->f_ver = 0;
+ /* In the function below, .hs_keycmp resolves to
+ * lu_obj_hop_keycmp() */
+ /* coverity[overrun-buffer-val] */
+ rc = echo_create_md_object(env, ed, parent, fid, name, namelen,
+ id, mode, count, stripe_count,
+ stripe_index);
+ break;
+ }
+ case ECHO_MD_DESTROY:
+ case ECHO_MD_RMDIR: {
+ __u32 mode = data->ioc_obdo2.o_mode;
+
+ rc = echo_destroy_object(env, ed, parent, name, namelen,
+ id, mode, count);
+ break;
+ }
+ case ECHO_MD_LOOKUP:
+ rc = echo_lookup_object(env, ed, parent, id, count);
+ break;
+ case ECHO_MD_GETATTR:
+ rc = echo_getattr_object(env, ed, parent, id, count);
+ break;
+ case ECHO_MD_SETATTR:
+ rc = echo_setattr_object(env, ed, parent, id, count);
+ break;
+ default:
+ CERROR("unknown command %d\n", command);
+ rc = -EINVAL;
+ break;
+ }
+ echo_ucred_fini(env);
+
+out_name:
+ if (name != NULL)
+ OBD_FREE(name, namelen + 1);
+out_put:
+ lu_object_put(env, parent);
+out_free:
+ LASSERT(info->eti_big_lmm);
+ OBD_FREE_LARGE(info->eti_big_lmm, info->eti_big_lmmsize);
+ info->eti_big_lmm = NULL;
+ info->eti_big_lmmsize = 0;
+out_env:
+ cl_env_put(env, &refcheck);
+ return rc;
+}
+
+static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
+ int on_target, struct obdo *oa, void *ulsm,
+ int ulsm_nob, struct obd_trans_info *oti)
{
struct echo_object *eco;
struct echo_client_obd *ec = ed->ed_ec;
RETURN(-EINVAL);
}
- rc = obd_alloc_memmd(ec->ec_exp, &lsm);
+ rc = echo_alloc_memmd(ed, &lsm);
if (rc < 0) {
- CERROR("Cannot allocate md, rc = %d\n", rc);
+ CERROR("Cannot allocate md: rc = %d\n", rc);
GOTO(failed, rc);
}
if (lsm->lsm_stripe_size == 0)
lsm->lsm_stripe_size = CFS_PAGE_SIZE;
- idx = ll_rand();
+ idx = cfs_rand();
/* setup stripes: indices + default ids if required */
for (i = 0; i < lsm->lsm_stripe_count; i++) {
/* Only echo objects are allowed to be created */
LASSERT((oa->o_valid & OBD_MD_FLGROUP) &&
(oa->o_seq == FID_SEQ_ECHO));
- rc = obd_create(ec->ec_exp, oa, &lsm, oti);
+ rc = obd_create(env, ec->ec_exp, oa, &lsm, oti);
if (rc != 0) {
- CERROR("Cannot create objects, rc = %d\n", rc);
+ CERROR("Cannot create objects: rc = %d\n", rc);
GOTO(failed, rc);
}
created = 1;
failed:
if (created && rc)
- obd_destroy(ec->ec_exp, oa, lsm, oti, NULL, NULL);
+ obd_destroy(env, ec->ec_exp, oa, lsm, oti, NULL, NULL);
if (lsm)
- obd_free_memmd(ec->ec_exp, &lsm);
+ echo_free_memmd(ed, &lsm);
if (rc)
- CERROR("create object failed with rc = %d\n", rc);
+ CERROR("create object failed with: rc = %d\n", rc);
return (rc);
}
static int echo_get_object(struct echo_object **ecop, struct echo_device *ed,
struct obdo *oa)
{
- struct echo_client_obd *ec = ed->ed_ec;
struct lov_stripe_md *lsm = NULL;
struct echo_object *eco;
int rc;
RETURN(-EINVAL);
}
- rc = obd_alloc_memmd(ec->ec_exp, &lsm);
+ rc = echo_alloc_memmd(ed, &lsm);
if (rc < 0)
RETURN(rc);
else
rc = PTR_ERR(eco);
if (lsm)
- obd_free_memmd(ec->ec_exp, &lsm);
+ echo_free_memmd(ed, &lsm);
RETURN(rc);
}
obd_size count, int async,
struct obd_trans_info *oti)
{
- struct echo_client_obd *ec = ed->ed_ec;
struct lov_stripe_md *lsm = eco->eo_lsm;
obd_count npages;
struct brw_page *pga;
int rc;
int verify;
int gfp_mask;
+ int brw_flags = 0;
ENTRY;
verify = ((oa->o_id) != ECHO_PERSISTENT_OBJID &&
/* XXX think again with misaligned I/O */
npages = count >> CFS_PAGE_SHIFT;
+ if (rw == OBD_BRW_WRITE)
+ brw_flags = OBD_BRW_ASYNC;
+
OBD_ALLOC(pga, npages * sizeof(*pga));
if (pga == NULL)
RETURN(-ENOMEM);
pages[i] = pgp->pg;
pgp->count = CFS_PAGE_SIZE;
pgp->off = off;
- pgp->flag = 0;
+ pgp->flag = brw_flags;
if (verify)
echo_client_page_debug_setup(lsm, pgp->pg, rw,
oa->o_id, off, pgp->count);
}
- if (ed->ed_next == NULL) {
- struct obd_info oinfo = { { { 0 } } };
- oinfo.oi_oa = oa;
- oinfo.oi_md = lsm;
- rc = obd_brw(rw, ec->ec_exp, &oinfo, npages, pga, oti);
- } else
- rc = cl_echo_object_brw(eco, rw, offset, pages, npages, async);
+ /* brw mode can only be used at client */
+ LASSERT(ed->ed_next != NULL);
+ rc = cl_echo_object_brw(eco, rw, offset, pages, npages, async);
out:
if (rc != 0 || rw != OBD_BRW_READ)
RETURN(rc);
}
-static int echo_client_prep_commit(struct obd_export *exp, int rw,
- struct obdo *oa, struct echo_object *eco,
- obd_off offset, obd_size count,
- obd_size batch, struct obd_trans_info *oti)
+static int echo_client_prep_commit(const struct lu_env *env,
+ struct obd_export *exp, int rw,
+ struct obdo *oa, struct echo_object *eco,
+ obd_off offset, obd_size count,
+ obd_size batch, struct obd_trans_info *oti,
+ int async)
{
struct lov_stripe_md *lsm = eco->eo_lsm;
struct obd_ioobj ioo;
struct niobuf_remote *rnb;
obd_off off;
obd_size npages, tot_pages;
- int i, ret = 0;
+ int i, ret = 0, brw_flags = 0;
+
ENTRY;
if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0 ||
if (lnb == NULL || rnb == NULL)
GOTO(out, ret = -ENOMEM);
+ if (rw == OBD_BRW_WRITE && async)
+ brw_flags |= OBD_BRW_ASYNC;
+
obdo_to_ioobj(oa, &ioo);
off = offset;
for (i = 0; i < npages; i++, off += CFS_PAGE_SIZE) {
rnb[i].offset = off;
rnb[i].len = CFS_PAGE_SIZE;
+ rnb[i].flags = brw_flags;
}
ioo.ioo_bufcnt = npages;
oti->oti_transno = 0;
lpages = npages;
- ret = obd_preprw(rw, exp, oa, 1, &ioo, rnb, &lpages, lnb, oti,
- NULL);
+ ret = obd_preprw(env, rw, exp, oa, 1, &ioo, rnb, &lpages,
+ lnb, oti, NULL);
if (ret != 0)
GOTO(out, ret);
LASSERT(lpages == npages);
if (page == NULL && lnb[i].rc == 0)
continue;
+ if (async)
+ lnb[i].flags |= OBD_BRW_ASYNC;
+
if (oa->o_id == ECHO_PERSISTENT_OBJID ||
(oa->o_valid & OBD_MD_FLFLAGS) == 0 ||
(oa->o_flags & OBD_FL_DEBUG_CHECK) == 0)
rnb[i].len);
}
- ret = obd_commitrw(rw, exp, oa, 1,&ioo,rnb,npages,lnb,oti,ret);
+ ret = obd_commitrw(env, rw, exp, oa, 1, &ioo,
+ rnb, npages, lnb, oti, ret);
if (ret != 0)
GOTO(out, ret);
RETURN(ret);
}
-static int echo_client_brw_ioctl(int rw, struct obd_export *exp,
- struct obd_ioctl_data *data)
+static int echo_client_brw_ioctl(const struct lu_env *env, int rw,
+ struct obd_export *exp,
+ struct obd_ioctl_data *data,
+ struct obd_trans_info *dummy_oti)
{
struct obd_device *obd = class_exp2obd(exp);
struct echo_device *ed = obd2echo_dev(obd);
struct echo_client_obd *ec = ed->ed_ec;
- struct obd_trans_info dummy_oti = { .oti_thread = NULL };
struct obdo *oa = &data->ioc_obdo1;
struct echo_object *eco;
int rc;
int async = 1;
+ long test_mode;
ENTRY;
LASSERT(oa->o_valid & OBD_MD_FLGROUP);
oa->o_valid &= ~OBD_MD_FLHANDLE;
- switch((long)data->ioc_pbuf1) {
- case 1:
+ /* OFD/obdfilter works only via prep/commit */
+ test_mode = (long)data->ioc_pbuf1;
+ if (test_mode == 1)
async = 0;
+
+ if (ed->ed_next == NULL && test_mode != 3) {
+ test_mode = 3;
+ data->ioc_plen1 = data->ioc_count;
+ }
+
+ /* Truncate batch size to maximum */
+ if (data->ioc_plen1 > PTLRPC_MAX_BRW_SIZE)
+ data->ioc_plen1 = PTLRPC_MAX_BRW_SIZE;
+
+ switch (test_mode) {
+ case 1:
/* fall through */
case 2:
rc = echo_client_kbrw(ed, rw, oa,
eco, data->ioc_offset,
- data->ioc_count, async, &dummy_oti);
+ data->ioc_count, async, dummy_oti);
break;
case 3:
- rc = echo_client_prep_commit(ec->ec_exp, rw, oa,
- eco, data->ioc_offset,
- data->ioc_count, data->ioc_plen1,
- &dummy_oti);
+ rc = echo_client_prep_commit(env, ec->ec_exp, rw, oa,
+ eco, data->ioc_offset,
+ data->ioc_count, data->ioc_plen1,
+ dummy_oti, async);
break;
default:
rc = -EINVAL;
}
static int
-echo_client_iocontrol(unsigned int cmd, struct obd_export *exp,
- int len, void *karg, void *uarg)
+echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
+ void *karg, void *uarg)
{
struct obd_device *obd = exp->exp_obd;
struct echo_device *ed = obd2echo_dev(obd);
struct echo_object *eco;
struct obd_ioctl_data *data = karg;
struct obd_trans_info dummy_oti;
+ struct lu_env *env;
struct oti_req_ack_lock *ack_lock;
struct obdo *oa;
+ struct lu_fid fid;
int rw = OBD_BRW_READ;
int rc = 0;
int i;
ENTRY;
- cfs_unlock_kernel();
-
memset(&dummy_oti, 0, sizeof(dummy_oti));
oa = &data->ioc_obdo1;
oa->o_valid |= OBD_MD_FLGROUP;
oa->o_seq = FID_SEQ_ECHO;
}
- /* assume we can touch filter native objects with echo device. */
- /* LASSERT(oa->o_seq == FID_SEQ_ECHO); */
+
+ /* This FID is unpacked just for validation at this point */
+ rc = fid_ostid_unpack(&fid, &oa->o_oi, 0);
+ if (rc < 0)
+ RETURN(rc);
+
+ OBD_ALLOC_PTR(env);
+ if (env == NULL)
+ RETURN(-ENOMEM);
+
+ rc = lu_env_init(env, LCT_DT_THREAD);
+ if (rc)
+ GOTO(out, rc = -ENOMEM);
switch (cmd) {
case OBD_IOC_CREATE: /* may create echo object */
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
GOTO (out, rc = -EPERM);
- rc = echo_create_object (ed, 1, oa,
- data->ioc_pbuf1, data->ioc_plen1,
- &dummy_oti);
+ rc = echo_create_object(env, ed, 1, oa, data->ioc_pbuf1,
+ data->ioc_plen1, &dummy_oti);
+ GOTO(out, rc);
+
+ case OBD_IOC_ECHO_MD: {
+ int count;
+ int cmd;
+ char *dir = NULL;
+ int dirlen;
+ __u64 id;
+
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ GOTO(out, rc = -EPERM);
+
+ count = data->ioc_count;
+ cmd = data->ioc_command;
+
+ id = data->ioc_obdo2.o_id;
+
+ dirlen = data->ioc_plen1;
+ OBD_ALLOC(dir, dirlen + 1);
+ if (dir == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ if (cfs_copy_from_user(dir, data->ioc_pbuf1, dirlen)) {
+ OBD_FREE(dir, data->ioc_plen1 + 1);
+ GOTO(out, rc = -EFAULT);
+ }
+
+ rc = echo_md_handler(ed, cmd, dir, dirlen, id, count, data);
+ OBD_FREE(dir, dirlen + 1);
GOTO(out, rc);
+ }
+ case OBD_IOC_ECHO_ALLOC_SEQ: {
+ struct lu_env *cl_env;
+ int refcheck;
+ __u64 seq;
+ int max_count;
+
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ GOTO(out, rc = -EPERM);
+
+ cl_env = cl_env_get(&refcheck);
+ if (IS_ERR(cl_env))
+ GOTO(out, rc = PTR_ERR(cl_env));
+
+ rc = lu_env_refill_by_tags(cl_env, ECHO_MD_CTX_TAG,
+ ECHO_MD_SES_TAG);
+ if (rc != 0) {
+ cl_env_put(cl_env, &refcheck);
+ GOTO(out, rc);
+ }
+
+ rc = seq_client_get_seq(cl_env, ed->ed_cl_seq, &seq);
+ cl_env_put(cl_env, &refcheck);
+ if (rc < 0) {
+ CERROR("%s: Can not alloc seq: rc = %d\n",
+ obd->obd_name, rc);
+ GOTO(out, rc);
+ }
+ if (cfs_copy_to_user(data->ioc_pbuf1, &seq, data->ioc_plen1))
+ return -EFAULT;
+
+ max_count = LUSTRE_SEQ_MAX_WIDTH;
+ if (cfs_copy_to_user(data->ioc_pbuf2, &max_count,
+ data->ioc_plen2))
+ return -EFAULT;
+ GOTO(out, rc);
+ }
case OBD_IOC_DESTROY:
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
GOTO (out, rc = -EPERM);
- rc = echo_get_object (&eco, ed, oa);
+ rc = echo_get_object(&eco, ed, oa);
if (rc == 0) {
- rc = obd_destroy(ec->ec_exp, oa, eco->eo_lsm,
+ rc = obd_destroy(env, ec->ec_exp, oa, eco->eo_lsm,
&dummy_oti, NULL, NULL);
if (rc == 0)
eco->eo_deleted = 1;
GOTO(out, rc);
case OBD_IOC_GETATTR:
- rc = echo_get_object (&eco, ed, oa);
+ rc = echo_get_object(&eco, ed, oa);
if (rc == 0) {
struct obd_info oinfo = { { { 0 } } };
oinfo.oi_md = eco->eo_lsm;
oinfo.oi_oa = oa;
- rc = obd_getattr(ec->ec_exp, &oinfo);
+ rc = obd_getattr(env, ec->ec_exp, &oinfo);
echo_put_object(eco);
}
GOTO(out, rc);
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
GOTO (out, rc = -EPERM);
- rc = echo_get_object (&eco, ed, oa);
+ rc = echo_get_object(&eco, ed, oa);
if (rc == 0) {
struct obd_info oinfo = { { { 0 } } };
oinfo.oi_oa = oa;
oinfo.oi_md = eco->eo_lsm;
- rc = obd_setattr(ec->ec_exp, &oinfo, NULL);
+ rc = obd_setattr(env, ec->ec_exp, &oinfo, NULL);
echo_put_object(eco);
}
GOTO(out, rc);
rw = OBD_BRW_WRITE;
/* fall through */
case OBD_IOC_BRW_READ:
- rc = echo_client_brw_ioctl(rw, exp, data);
+ rc = echo_client_brw_ioctl(env, rw, exp, data, &dummy_oti);
GOTO(out, rc);
case ECHO_IOC_GET_STRIPE:
echo_put_object(eco);
}
} else {
- rc = echo_create_object(ed, 0, oa,
+ rc = echo_create_object(env, ed, 0, oa,
data->ioc_pbuf1,
data->ioc_plen1, &dummy_oti);
}
}
EXIT;
- out:
+out:
+ lu_env_fini(env);
+ OBD_FREE_PTR(env);
/* XXX this should be in a helper also called by target_send_reply */
for (ack_lock = dummy_oti.oti_ack_locks, i = 0; i < 4;
ldlm_lock_decref(&ack_lock->lock, ack_lock->mode);
}
- cfs_lock_kernel();
-
return rc;
}
-static int echo_client_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
+static int echo_client_setup(const struct lu_env *env,
+ struct obd_device *obddev, struct lustre_cfg *lcfg)
{
struct echo_client_obd *ec = &obddev->u.echo_client;
struct obd_device *tgt;
RETURN(-EINVAL);
}
- cfs_spin_lock_init (&ec->ec_lock);
+ spin_lock_init(&ec->ec_lock);
CFS_INIT_LIST_HEAD (&ec->ec_objects);
CFS_INIT_LIST_HEAD (&ec->ec_locks);
ec->ec_unique = 0;
ec->ec_nstripes = 0;
+ if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) {
+ lu_context_tags_update(ECHO_MD_CTX_TAG);
+ lu_session_tags_update(ECHO_MD_SES_TAG);
+ RETURN(0);
+ }
+
OBD_ALLOC(ocd, sizeof(*ocd));
if (ocd == NULL) {
CERROR("Can't alloc ocd connecting to %s\n",
}
ocd->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_REQPORTAL |
- OBD_CONNECT_GRANT | OBD_CONNECT_FULL20;
+ OBD_CONNECT_GRANT | OBD_CONNECT_FULL20 |
+ OBD_CONNECT_64BITHASH | OBD_CONNECT_LVB_TYPE;
ocd->ocd_version = LUSTRE_VERSION_CODE;
ocd->ocd_group = FID_SEQ_ECHO;
- rc = obd_connect(NULL, &ec->ec_exp, tgt, &echo_uuid, ocd, NULL);
+ rc = obd_connect(env, &ec->ec_exp, tgt, &echo_uuid, ocd, NULL);
if (rc == 0) {
/* Turn off pinger because it connects to tgt obd directly. */
- cfs_spin_lock(&tgt->obd_dev_lock);
- cfs_list_del_init(&ec->ec_exp->exp_obd_chain_timed);
- cfs_spin_unlock(&tgt->obd_dev_lock);
+ spin_lock(&tgt->obd_dev_lock);
+ cfs_list_del_init(&ec->ec_exp->exp_obd_chain_timed);
+ spin_unlock(&tgt->obd_dev_lock);
}
OBD_FREE(ocd, sizeof(*ocd));
static int echo_client_cleanup(struct obd_device *obddev)
{
+ struct echo_device *ed = obd2echo_dev(obddev);
struct echo_client_obd *ec = &obddev->u.echo_client;
int rc;
ENTRY;
+ /*Do nothing for Metadata echo client*/
+ if (ed == NULL )
+ RETURN(0);
+
+ if (ed->ed_next_ismd) {
+ lu_context_tags_clear(ECHO_MD_CTX_TAG);
+ lu_session_tags_clear(ECHO_MD_SES_TAG);
+ RETURN(0);
+ }
+
if (!cfs_list_empty(&obddev->obd_exports)) {
CERROR("still has clients!\n");
RETURN(-EBUSY);
return rc;
}
-static struct obd_ops echo_obd_ops = {
+static struct obd_ops echo_client_obd_ops = {
.o_owner = THIS_MODULE,
#if 0
int rc;
lprocfs_echo_init_vars(&lvars);
- rc = class_register_type(&echo_obd_ops, NULL, lvars.module_vars,
- LUSTRE_ECHO_CLIENT_NAME, &echo_device_type);
- if (rc == 0)
- lu_kmem_init(echo_caches);
+
+ rc = lu_kmem_init(echo_caches);
+ if (rc == 0) {
+ rc = class_register_type(&echo_client_obd_ops, NULL,
+ lvars.module_vars,
+ LUSTRE_ECHO_CLIENT_NAME,
+ &echo_device_type);
+ if (rc)
+ lu_kmem_fini(echo_caches);
+ }
return rc;
}
lu_kmem_fini(echo_caches);
}
+#ifdef __KERNEL__
+static int __init obdecho_init(void)
+{
+ struct lprocfs_static_vars lvars;
+ int rc;
+
+ ENTRY;
+ LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
+
+ LASSERT(CFS_PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
+
+ lprocfs_echo_init_vars(&lvars);
+
+# ifdef HAVE_SERVER_SUPPORT
+ rc = echo_persistent_pages_init();
+ if (rc != 0)
+ goto failed_0;
+
+ rc = class_register_type(&echo_obd_ops, NULL, lvars.module_vars,
+ LUSTRE_ECHO_NAME, NULL);
+ if (rc != 0)
+ goto failed_1;
+# endif
+
+ rc = echo_client_init();
+
+# ifdef HAVE_SERVER_SUPPORT
+ if (rc == 0)
+ RETURN(0);
+
+ class_unregister_type(LUSTRE_ECHO_NAME);
+failed_1:
+ echo_persistent_pages_fini();
+failed_0:
+# endif
+ RETURN(rc);
+}
+
+static void /*__exit*/ obdecho_exit(void)
+{
+ echo_client_exit();
+
+# ifdef HAVE_SERVER_SUPPORT
+ class_unregister_type(LUSTRE_ECHO_NAME);
+ echo_persistent_pages_fini();
+# endif
+}
+
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Lustre Testing Echo OBD driver");
+MODULE_LICENSE("GPL");
+
+cfs_module(obdecho, LUSTRE_VERSION_STRING, obdecho_init, obdecho_exit);
+#endif /* __KERNEL__ */
+
/** @} echo_client */