#include <lustre_debug.h>
#include <lprocfs_status.h>
#include <cl_object.h>
+#include <md_object.h>
#include <lustre_fid.h>
#include <lustre_acl.h>
#include <lustre_net.h>
struct echo_page {
struct cl_page_slice ep_cl;
struct mutex ep_lock;
- cfs_page_t *ep_vmpage;
+ struct page *ep_vmpage;
};
struct echo_lock {
obd_off end, int mode, __u64 *cookie);
static int cl_echo_cancel (struct echo_device *d, __u64 cookie);
static int cl_echo_object_brw(struct echo_object *eco, int rw, obd_off offset,
- cfs_page_t **pages, int npages, int async);
+ struct page **pages, int npages, int async);
static struct echo_thread_info *echo_env_info(const struct lu_env *env);
struct echo_thread_info {
- struct echo_object_conf eti_conf;
- struct lustre_md eti_md;
+ struct echo_object_conf eti_conf;
+ struct lustre_md eti_md;
- struct cl_2queue eti_queue;
- struct cl_io eti_io;
- struct cl_lock_descr eti_descr;
- struct lu_fid eti_fid;
+ struct cl_2queue eti_queue;
+ struct cl_io eti_io;
+ struct cl_lock_descr eti_descr;
+ struct lu_fid eti_fid;
struct lu_fid eti_fid2;
- struct md_op_spec eti_spec;
- struct lov_mds_md_v3 eti_lmm;
- struct lov_user_md_v3 eti_lum;
- struct md_attr eti_ma;
- struct lu_name eti_lname;
+#ifdef HAVE_SERVER_SUPPORT
+ struct md_op_spec eti_spec;
+ struct lov_mds_md_v3 eti_lmm;
+ struct lov_user_md_v3 eti_lum;
+ struct md_attr eti_ma;
+ struct lu_name eti_lname;
/* per-thread values, can be re-used */
void *eti_big_lmm;
int eti_big_lmmsize;
- char eti_name[20];
- struct lu_buf eti_buf;
- char eti_xattr_buf[LUSTRE_POSIX_ACL_MAX_SIZE];
+ char eti_name[20];
+ struct lu_buf eti_buf;
+ char eti_xattr_buf[LUSTRE_POSIX_ACL_MAX_SIZE];
+#endif
};
/* No session used right now */
unsigned long dummy;
};
-static cfs_mem_cache_t *echo_lock_kmem;
-static cfs_mem_cache_t *echo_object_kmem;
-static cfs_mem_cache_t *echo_thread_kmem;
-static cfs_mem_cache_t *echo_session_kmem;
-//static cfs_mem_cache_t *echo_req_kmem;
+static struct kmem_cache *echo_lock_kmem;
+static struct kmem_cache *echo_object_kmem;
+static struct kmem_cache *echo_thread_kmem;
+static struct kmem_cache *echo_session_kmem;
+/* static struct kmem_cache *echo_req_kmem; */
static struct lu_kmem_descr echo_caches[] = {
{
*
* @{
*/
-static cfs_page_t *echo_page_vmpage(const struct lu_env *env,
+static struct page *echo_page_vmpage(const struct lu_env *env,
const struct cl_page_slice *slice)
{
return cl2echo_page(slice)->ep_vmpage;
}
static void echo_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
+ struct cl_page_slice *slice)
{
- struct echo_page *ep = cl2echo_page(slice);
- struct echo_object *eco = cl2echo_obj(slice->cpl_obj);
- cfs_page_t *vmpage = ep->ep_vmpage;
- ENTRY;
+ struct echo_object *eco = cl2echo_obj(slice->cpl_obj);
+ ENTRY;
- cfs_atomic_dec(&eco->eo_npages);
- page_cache_release(vmpage);
- EXIT;
+ cfs_atomic_dec(&eco->eo_npages);
+ page_cache_release(cl2echo_page(slice)->ep_vmpage);
+ EXIT;
}
static int echo_page_prep(const struct lu_env *env,
* @{
*/
static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage)
+ struct cl_page *page, struct page *vmpage)
{
struct echo_page *ep = cl_object_page_slice(obj, page);
struct echo_object *eco = cl2echo_obj(obj);
struct echo_lock *el;
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(el, echo_lock_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(el, echo_lock_kmem, __GFP_IO);
if (el != NULL) {
cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops);
el->el_object = cl2echo_obj(obj);
/* we're the top dev. */
LASSERT(hdr == NULL);
- OBD_SLAB_ALLOC_PTR_GFP(eco, echo_object_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(eco, echo_object_kmem, __GFP_IO);
if (eco != NULL) {
struct cl_object_header *hdr = &eco->eo_hdr;
{
struct echo_thread_info *info;
- OBD_SLAB_ALLOC_PTR_GFP(info, echo_thread_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(info, echo_thread_kmem, __GFP_IO);
if (info == NULL)
info = ERR_PTR(-ENOMEM);
return info;
{
struct echo_session_info *session;
- OBD_SLAB_ALLOC_PTR_GFP(session, echo_session_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(session, echo_session_kmem, __GFP_IO);
if (session == NULL)
session = ERR_PTR(-ENOMEM);
return session;
LU_TYPE_INIT_FINI(echo, &echo_thread_key, &echo_session_key);
-#define ECHO_SEQ_WIDTH 0xffffffff
+#ifdef HAVE_SERVER_SUPPORT
+# define ECHO_SEQ_WIDTH 0xffffffff
static int echo_fid_init(struct echo_device *ed, char *obd_name,
struct seq_server_site *ss)
{
RETURN(0);
}
+#endif /* HAVE_SERVER_SUPPORT */
static struct lu_device *echo_device_alloc(const struct lu_env *env,
struct lu_device_type *t,
cleanup = 4;
if (ed->ed_next_ismd) {
+#ifdef HAVE_SERVER_SUPPORT
/* Suppose to connect to some Metadata layer */
struct lu_site *ls;
struct lu_device *ld;
CERROR("echo fid init error %d\n", rc);
GOTO(out, rc);
}
+#else /* !HAVE_SERVER_SUPPORT */
+ CERROR("Local operations are NOT supported on client side. "
+ "Only remote operations are supported. Metadata client "
+ "must be run on server side.\n");
+ GOTO(out, rc = -EOPNOTSUPP);
+#endif
} else {
/* if echo client is to be stacked upon ost device, the next is
* NULL since ost is not a clio device so far */
spin_unlock(&ec->ec_lock);
CERROR("echo_client still has objects at cleanup time, "
"wait for 1 second\n");
- cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
+ schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
cfs_time_seconds(1));
lu_site_purge(env, &ed->ed_site->cs_lu, -1);
spin_lock(&ec->ec_lock);
LASSERT(cfs_list_empty(&ec->ec_locks));
- CDEBUG(D_INFO, "No object exists, exiting...\n");
+ CDEBUG(D_INFO, "No object exists, exiting...\n");
- echo_client_cleanup(d->ld_obd);
- echo_fid_fini(d->ld_obd);
- while (next && !ed->ed_next_ismd)
- next = next->ld_type->ldt_ops->ldto_device_free(env, next);
+ echo_client_cleanup(d->ld_obd);
+#ifdef HAVE_SERVER_SUPPORT
+ echo_fid_fini(d->ld_obd);
+#endif
+ while (next && !ed->ed_next_ismd)
+ next = next->ld_type->ldt_ops->ldto_device_free(env, next);
LASSERT(ed->ed_site == lu2cl_site(d->ld_site));
echo_site_fini(env, ed);
*cookie = el->el_cookie;
spin_unlock(&ec->ec_lock);
} else {
- cl_lock_release(env, lck, "ec enqueue", cfs_current());
+ cl_lock_release(env, lck, "ec enqueue", current);
}
}
RETURN(rc);
RETURN(rc);
}
-static int cl_echo_async_brw(const struct lu_env *env, struct cl_io *io,
- enum cl_req_type unused, struct cl_2queue *queue)
+static void echo_commit_callback(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page)
{
- struct cl_page *clp;
- struct cl_page *temp;
- int result = 0;
- ENTRY;
+ struct echo_thread_info *info;
+ struct cl_2queue *queue;
- cl_page_list_for_each_safe(clp, temp, &queue->c2_qin) {
- int rc;
- rc = cl_page_cache_add(env, io, clp, CRT_WRITE);
- if (rc == 0)
- continue;
- result = result ?: rc;
- }
- RETURN(result);
+ info = echo_env_info(env);
+ LASSERT(io == &info->eti_io);
+
+ queue = &info->eti_queue;
+ cl_page_list_add(&queue->c2_qout, page);
}
static int cl_echo_object_brw(struct echo_object *eco, int rw, obd_off offset,
- cfs_page_t **pages, int npages, int async)
+ struct page **pages, int npages, int async)
{
struct lu_env *env;
struct echo_thread_info *info;
rc = cl_echo_enqueue0(env, eco, offset,
- offset + npages * CFS_PAGE_SIZE - 1,
+ offset + npages * PAGE_CACHE_SIZE - 1,
rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
CEF_NEVER);
if (rc < 0)
async = async && (typ == CRT_WRITE);
if (async)
- rc = cl_echo_async_brw(env, io, typ, queue);
- else
+ rc = cl_io_commit_async(env, io, &queue->c2_qin,
+ 0, PAGE_SIZE,
+ echo_commit_callback);
+ else
rc = cl_io_submit_sync(env, io, typ, queue, 0);
CDEBUG(D_INFO, "echo_client %s write returns %d\n",
async ? "async" : "sync", rc);
if (nob > ulsm_nob)
return (-EINVAL);
- if (cfs_copy_to_user (ulsm, lsm, sizeof(ulsm)))
+ if (copy_to_user (ulsm, lsm, sizeof(*ulsm)))
return (-EFAULT);
for (i = 0; i < lsm->lsm_stripe_count; i++) {
- if (cfs_copy_to_user (ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i],
+ if (copy_to_user (ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i],
sizeof(lsm->lsm_oinfo[0])))
return (-EFAULT);
}
if (ulsm_nob < sizeof (*lsm))
return (-EINVAL);
- if (cfs_copy_from_user (lsm, ulsm, sizeof (*lsm)))
+ if (copy_from_user (lsm, ulsm, sizeof (*lsm)))
return (-EFAULT);
if (lsm->lsm_stripe_count > ec->ec_nstripes ||
for (i = 0; i < lsm->lsm_stripe_count; i++) {
- if (cfs_copy_from_user(lsm->lsm_oinfo[i],
+ if (copy_from_user(lsm->lsm_oinfo[i],
((struct lov_stripe_md *)ulsm)-> \
lsm_oinfo[i],
sizeof(lsm->lsm_oinfo[0])))
return (0);
}
+#ifdef HAVE_SERVER_SUPPORT
static inline void echo_md_build_name(struct lu_name *lname, char *name,
__u64 id)
{
{
struct echo_thread_info *info = echo_env_info(env);
struct lu_buf *buf = &info->eti_buf;
- cfs_umode_t mode = lu_object_attr(&next->mo_lu);
+ umode_t mode = lu_object_attr(&next->mo_lu);
int need = ma->ma_need;
int rc = 0, rc2;
GOTO(out_put, rc = -EINVAL);
}
+ if (lu_object_remote(child)) {
+ CERROR("Can not destroy remote object %s: rc = %d\n",
+ lname->ln_name, -EPERM);
+ GOTO(out_put, rc = -EPERM);
+ }
CDEBUG(D_RPCTRACE, "Start destroy object "DFID" %s %p\n",
PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent);
ucred->uc_suppgids[0] = -1;
ucred->uc_suppgids[1] = -1;
- ucred->uc_uid = ucred->uc_o_uid = cfs_curproc_uid();
- ucred->uc_gid = ucred->uc_o_gid = cfs_curproc_gid();
- ucred->uc_fsuid = ucred->uc_o_fsuid = cfs_curproc_fsuid();
- ucred->uc_fsgid = ucred->uc_o_fsgid = cfs_curproc_fsgid();
+ ucred->uc_uid = ucred->uc_o_uid = current_uid();
+ ucred->uc_gid = ucred->uc_o_gid = current_gid();
+ ucred->uc_fsuid = ucred->uc_o_fsuid = current_fsuid();
+ ucred->uc_fsgid = ucred->uc_o_fsgid = current_fsgid();
ucred->uc_cap = cfs_curproc_cap_pack();
/* remove fs privilege for non-root user. */
}
#define ECHO_MD_CTX_TAG (LCT_REMEMBER | LCT_MD_THREAD)
-#define ECHO_MD_SES_TAG (LCT_REMEMBER | LCT_SESSION)
+#define ECHO_MD_SES_TAG (LCT_REMEMBER | LCT_SESSION | LCT_SERVER_SESSION)
static int echo_md_handler(struct echo_device *ed, int command,
char *path, int path_len, __u64 id, int count,
struct obd_ioctl_data *data)
OBD_ALLOC(name, namelen + 1);
if (name == NULL)
GOTO(out_put, rc = -ENOMEM);
- if (cfs_copy_from_user(name, data->ioc_pbuf2, namelen))
+ if (copy_from_user(name, data->ioc_pbuf2, namelen))
GOTO(out_name, rc = -EFAULT);
}
cl_env_put(env, &refcheck);
return rc;
}
+#endif /* HAVE_SERVER_SUPPORT */
static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
int on_target, struct obdo *oa, void *ulsm,
lsm->lsm_stripe_count = ec->ec_nstripes;
if (lsm->lsm_stripe_size == 0)
- lsm->lsm_stripe_size = CFS_PAGE_SIZE;
+ lsm->lsm_stripe_size = PAGE_CACHE_SIZE;
idx = cfs_rand();
static void
echo_client_page_debug_setup(struct lov_stripe_md *lsm,
- cfs_page_t *page, int rw, obd_id id,
+ struct page *page, int rw, obd_id id,
obd_off offset, obd_off count)
{
char *addr;
int delta;
/* no partial pages on the client */
- LASSERT(count == CFS_PAGE_SIZE);
+ LASSERT(count == PAGE_CACHE_SIZE);
- addr = cfs_kmap(page);
+ addr = kmap(page);
- for (delta = 0; delta < CFS_PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
+ for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
if (rw == OBD_BRW_WRITE) {
stripe_off = offset + delta;
stripe_id = id;
stripe_off, stripe_id);
}
- cfs_kunmap(page);
+ kunmap(page);
}
static int echo_client_page_debug_check(struct lov_stripe_md *lsm,
- cfs_page_t *page, obd_id id,
+ struct page *page, obd_id id,
obd_off offset, obd_off count)
{
obd_off stripe_off;
int rc2;
/* no partial pages on the client */
- LASSERT(count == CFS_PAGE_SIZE);
+ LASSERT(count == PAGE_CACHE_SIZE);
- addr = cfs_kmap(page);
+ addr = kmap(page);
- for (rc = delta = 0; delta < CFS_PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
+ for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
stripe_off = offset + delta;
stripe_id = id;
echo_get_stripe_off_id (lsm, &stripe_off, &stripe_id);
}
}
- cfs_kunmap(page);
+ kunmap(page);
return rc;
}
obd_count npages;
struct brw_page *pga;
struct brw_page *pgp;
- cfs_page_t **pages;
+ struct page **pages;
obd_off off;
int i;
int rc;
(oa->o_valid & OBD_MD_FLFLAGS) != 0 &&
(oa->o_flags & OBD_FL_DEBUG_CHECK) != 0);
- gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? CFS_ALLOC_STD : CFS_ALLOC_HIGHUSER;
+ gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? GFP_IOFS : GFP_HIGHUSER;
LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ);
LASSERT(lsm != NULL);
RETURN(-EINVAL);
/* XXX think again with misaligned I/O */
- npages = count >> CFS_PAGE_SHIFT;
+ npages = count >> PAGE_CACHE_SHIFT;
if (rw == OBD_BRW_WRITE)
brw_flags = OBD_BRW_ASYNC;
for (i = 0, pgp = pga, off = offset;
i < npages;
- i++, pgp++, off += CFS_PAGE_SIZE) {
+ i++, pgp++, off += PAGE_CACHE_SIZE) {
LASSERT (pgp->pg == NULL); /* for cleanup */
goto out;
pages[i] = pgp->pg;
- pgp->count = CFS_PAGE_SIZE;
+ pgp->count = PAGE_CACHE_SIZE;
pgp->off = off;
pgp->flag = brw_flags;
(lsm != NULL && ostid_id(&lsm->lsm_oi) != ostid_id(&oa->o_oi)))
RETURN(-EINVAL);
- npages = batch >> CFS_PAGE_SHIFT;
- tot_pages = count >> CFS_PAGE_SHIFT;
+ npages = batch >> PAGE_CACHE_SHIFT;
+ tot_pages = count >> PAGE_CACHE_SHIFT;
OBD_ALLOC(lnb, npages * sizeof(struct niobuf_local));
OBD_ALLOC(rnb, npages * sizeof(struct niobuf_remote));
if (tot_pages < npages)
npages = tot_pages;
- for (i = 0; i < npages; i++, off += CFS_PAGE_SIZE) {
+ for (i = 0; i < npages; i++, off += PAGE_CACHE_SIZE) {
rnb[i].offset = off;
- rnb[i].len = CFS_PAGE_SIZE;
+ rnb[i].len = PAGE_CACHE_SIZE;
rnb[i].flags = brw_flags;
}
LASSERT(lpages == npages);
for (i = 0; i < lpages; i++) {
- cfs_page_t *page = lnb[i].page;
+ struct page *page = lnb[i].page;
/* read past eof? */
if (page == NULL && lnb[i].rc == 0)
echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
void *karg, void *uarg)
{
+#ifdef HAVE_SERVER_SUPPORT
+ struct tgt_session_info *tsi;
+#endif
struct obd_device *obd = exp->exp_obd;
struct echo_device *ed = obd2echo_dev(obd);
struct echo_client_obd *ec = ed->ed_ec;
int rw = OBD_BRW_READ;
int rc = 0;
int i;
+#ifdef HAVE_SERVER_SUPPORT
+ struct lu_context echo_session;
+#endif
ENTRY;
memset(&dummy_oti, 0, sizeof(dummy_oti));
if (env == NULL)
RETURN(-ENOMEM);
- rc = lu_env_init(env, LCT_DT_THREAD);
- if (rc)
- GOTO(out, rc = -ENOMEM);
+ rc = lu_env_init(env, LCT_DT_THREAD);
+ if (rc)
+ GOTO(out_alloc, rc = -ENOMEM);
+#ifdef HAVE_SERVER_SUPPORT
+ env->le_ses = &echo_session;
+ rc = lu_context_init(env->le_ses, LCT_SERVER_SESSION | LCT_NOREF);
+ if (unlikely(rc < 0))
+ GOTO(out_env, rc);
+ lu_context_enter(env->le_ses);
+
+ tsi = tgt_ses_info(env);
+ tsi->tsi_exp = ec->ec_exp;
+#endif
switch (cmd) {
case OBD_IOC_CREATE: /* may create echo object */
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
data->ioc_plen1, &dummy_oti);
GOTO(out, rc);
+#ifdef HAVE_SERVER_SUPPORT
case OBD_IOC_ECHO_MD: {
int count;
int cmd;
count = data->ioc_count;
cmd = data->ioc_command;
- id = ostid_id(&data->ioc_obdo2.o_oi);
-
+ id = data->ioc_obdo2.o_oi.oi.oi_id;
dirlen = data->ioc_plen1;
OBD_ALLOC(dir, dirlen + 1);
if (dir == NULL)
GOTO(out, rc = -ENOMEM);
- if (cfs_copy_from_user(dir, data->ioc_pbuf1, dirlen)) {
+ if (copy_from_user(dir, data->ioc_pbuf1, dirlen)) {
OBD_FREE(dir, data->ioc_plen1 + 1);
GOTO(out, rc = -EFAULT);
}
GOTO(out, rc);
}
- if (cfs_copy_to_user(data->ioc_pbuf1, &seq, data->ioc_plen1))
+ if (copy_to_user(data->ioc_pbuf1, &seq, data->ioc_plen1))
return -EFAULT;
max_count = LUSTRE_METADATA_SEQ_MAX_WIDTH;
- if (cfs_copy_to_user(data->ioc_pbuf2, &max_count,
+ if (copy_to_user(data->ioc_pbuf2, &max_count,
data->ioc_plen2))
return -EFAULT;
GOTO(out, rc);
}
+#endif /* HAVE_SERVER_SUPPORT */
case OBD_IOC_DESTROY:
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
GOTO (out, rc = -EPERM);
EXIT;
out:
+#ifdef HAVE_SERVER_SUPPORT
+ lu_context_exit(env->le_ses);
+ lu_context_fini(env->le_ses);
+out_env:
+#endif
lu_env_fini(env);
+out_alloc:
OBD_FREE_PTR(env);
/* XXX this should be in a helper also called by target_send_reply */
ec->ec_unique = 0;
ec->ec_nstripes = 0;
- if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) {
- lu_context_tags_update(ECHO_MD_CTX_TAG);
- lu_session_tags_update(ECHO_MD_SES_TAG);
- RETURN(0);
- }
+ if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) {
+#ifdef HAVE_SERVER_SUPPORT
+ lu_context_tags_update(ECHO_MD_CTX_TAG);
+ lu_session_tags_update(ECHO_MD_SES_TAG);
+#else
+ CERROR("Local operations are NOT supported on client side. "
+ "Only remote operations are supported. Metadata client "
+ "must be run on server side.\n");
+#endif
+ RETURN(0);
+ }
OBD_ALLOC(ocd, sizeof(*ocd));
if (ocd == NULL) {
RETURN(0);
if (ed->ed_next_ismd) {
- lu_context_tags_clear(ECHO_MD_CTX_TAG);
- lu_session_tags_clear(ECHO_MD_SES_TAG);
+#ifdef HAVE_SERVER_SUPPORT
+ lu_context_tags_clear(ECHO_MD_CTX_TAG);
+ lu_session_tags_clear(ECHO_MD_SES_TAG);
+#else
+ CERROR("This is client-side only module, does not support "
+ "metadata echo client.\n");
+#endif
RETURN(0);
}
lprocfs_echo_init_vars(&lvars);
- rc = lu_kmem_init(echo_caches);
- if (rc == 0) {
- rc = class_register_type(&echo_client_obd_ops, NULL,
- lvars.module_vars,
- LUSTRE_ECHO_CLIENT_NAME,
- &echo_device_type);
- if (rc)
- lu_kmem_fini(echo_caches);
- }
- return rc;
+ rc = lu_kmem_init(echo_caches);
+ if (rc == 0) {
+ rc = class_register_type(&echo_client_obd_ops, NULL, NULL,
+#ifndef HAVE_ONLY_PROCFS_SEQ
+ lvars.module_vars,
+#endif
+ LUSTRE_ECHO_CLIENT_NAME,
+ &echo_device_type);
+ if (rc)
+ lu_kmem_fini(echo_caches);
+ }
+ return rc;
}
void echo_client_exit(void)
ENTRY;
LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
- LASSERT(CFS_PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
+ LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
lprocfs_echo_init_vars(&lvars);
if (rc != 0)
goto failed_0;
- rc = class_register_type(&echo_obd_ops, NULL, lvars.module_vars,
- LUSTRE_ECHO_NAME, NULL);
- if (rc != 0)
- goto failed_1;
+ rc = class_register_type(&echo_obd_ops, NULL, NULL,
+#ifndef HAVE_ONLY_PROCFS_SEQ
+ lvars.module_vars,
+#endif
+ LUSTRE_ECHO_NAME, NULL);
+ if (rc != 0)
+ goto failed_1;
# endif
rc = echo_client_init();