-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_MDC
#ifdef __KERNEL__
#define REQUEST_MINOR 244
-static quota_interface_t *quota_interface;
-extern quota_interface_t mdc_quota_interface;
+struct mdc_renew_capa_args {
+ struct obd_capa *ra_oc;
+ renew_capa_cb_t ra_cb;
+};
static int mdc_cleanup(struct obd_device *obd);
struct md_open_data *mod = och->och_mod;
ENTRY;
- LASSERT(mod != LP_POISON && mod != NULL);
+ /**
+ * It is possible to not have \var mod in a case of eviction between
+ * lookup and ll_file_open().
+ **/
+ if (mod == NULL)
+ RETURN(0);
+
+ LASSERT(mod != LP_POISON);
mod->mod_och = NULL;
och->och_mod = NULL;
* exists and return no error in that case
*/
if (mod) {
+ DEBUG_REQ(D_HA, req, "Reset ESTALE = %d", rc);
LASSERT(mod->mod_open_req != NULL);
if (mod->mod_open_req->rq_committed)
rc = 0;
EXPORT_SYMBOL(mdc_sendpage);
#endif
-int mdc_readpage(struct obd_export *exp, const struct lu_fid *fid,
- struct obd_capa *oc, __u64 offset, struct page *page,
- struct ptlrpc_request **request)
+int mdc_readpage(struct obd_export *exp, struct md_op_data *op_data,
+ struct page **pages, struct ptlrpc_request **request)
{
struct ptlrpc_request *req;
struct ptlrpc_bulk_desc *desc;
+ int i;
+ cfs_waitq_t waitq;
+ int resends = 0;
+ struct l_wait_info lwi;
int rc;
ENTRY;
*request = NULL;
+ cfs_waitq_init(&waitq);
+
+restart_bulk:
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_READPAGE);
if (req == NULL)
RETURN(-ENOMEM);
- mdc_set_capa_size(req, &RMF_CAPA1, oc);
+ mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_READPAGE);
if (rc) {
req->rq_request_portal = MDS_READPAGE_PORTAL;
ptlrpc_at_set_req_timeout(req);
- desc = ptlrpc_prep_bulk_imp(req, 1, BULK_PUT_SINK, MDS_BULK_PORTAL);
+ desc = ptlrpc_prep_bulk_imp(req, op_data->op_npages, BULK_PUT_SINK,
+ MDS_BULK_PORTAL);
if (desc == NULL) {
ptlrpc_request_free(req);
RETURN(-ENOMEM);
}
/* NB req now owns desc and will free it when it gets freed */
- ptlrpc_prep_bulk_page(desc, page, 0, CFS_PAGE_SIZE);
- mdc_readdir_pack(req, offset, CFS_PAGE_SIZE, fid, oc);
+ for (i = 0; i < op_data->op_npages; i++)
+ ptlrpc_prep_bulk_page(desc, pages[i], 0, CFS_PAGE_SIZE);
+
+ mdc_readdir_pack(req, op_data->op_offset,
+ CFS_PAGE_SIZE * op_data->op_npages,
+ &op_data->op_fid1, op_data->op_capa1);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
if (rc) {
ptlrpc_req_finished(req);
- RETURN(rc);
+ if (rc != -ETIMEDOUT)
+ RETURN(rc);
+
+ resends++;
+ if (!client_should_resend(resends, &exp->exp_obd->u.cli)) {
+ CERROR("too many resend retries, returning error\n");
+ RETURN(-EIO);
+ }
+ lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(resends), NULL, NULL, NULL);
+ l_wait_event(waitq, 0, &lwi);
+
+ goto restart_bulk;
}
rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk,
RETURN(rc);
}
- if (req->rq_bulk->bd_nob_transferred != CFS_PAGE_SIZE) {
+ if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
CERROR("Unexpected # bytes transferred: %d (%ld expected)\n",
- req->rq_bulk->bd_nob_transferred, CFS_PAGE_SIZE);
+ req->rq_bulk->bd_nob_transferred,
+ CFS_PAGE_SIZE * op_data->op_npages);
ptlrpc_req_finished(req);
RETURN(-EPROTO);
}
RETURN(0);
}
-static int mdc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
+static int mdc_statfs(const struct lu_env *env,
+ struct obd_export *exp, struct obd_statfs *osfs,
__u64 max_age, __u32 flags)
{
+ struct obd_device *obd = class_exp2obd(exp);
struct ptlrpc_request *req;
struct obd_statfs *msfs;
struct obd_import *imp = NULL;
/* Val is struct getinfo_fid2path result plus path */
vallen = sizeof(*gf) + gf->gf_pathlen;
- rc = obd_get_info(exp, keylen, key, &vallen, gf, NULL);
+ rc = obd_get_info(NULL, exp, keylen, key, &vallen, gf, NULL);
if (rc)
GOTO(out, rc);
CDEBUG(D_CHANGELOG, "changelog to fp=%p start "LPU64"\n",
cs->cs_fp, cs->cs_startrec);
+ /*
+ * It's important to daemonize here to close unused FDs.
+ * The write fd from pipe is already opened by the caller,
+ * so it's fine to clear all files here
+ */
+ cfs_daemonize("mdc_clg_send_thread");
+
OBD_ALLOC(cs->cs_buf, CR_MAXSIZE);
if (cs->cs_buf == NULL)
GOTO(out, rc = -ENOMEM);
GOTO(out, rc);
}
- /* We need the pipe fd open, so llog_process can't daemonize */
- rc = llog_cat_process_flags(llh, changelog_show_cb, cs,
- LLOG_FLAG_NODEAMON, 0, 0);
+ rc = llog_cat_process_flags(llh, changelog_show_cb, cs, 0, 0, 0);
/* Send EOF no matter what our result */
if ((kuch = changelog_kuc_hdr(cs->cs_buf, sizeof(*kuch),
if (cs->cs_buf)
OBD_FREE(cs->cs_buf, CR_MAXSIZE);
OBD_FREE_PTR(cs);
+ /* detach from parent process so we get cleaned up */
+ cfs_daemonize("cl_send");
return rc;
}
/* New thread because we should return to user app before
writing into our pipe */
- rc = cfs_kernel_thread(mdc_changelog_send_thread, cs,
- CLONE_VM | CLONE_FILES);
+ rc = cfs_create_thread(mdc_changelog_send_thread, cs, CFS_DAEMON_FLAGS);
if (rc >= 0) {
CDEBUG(D_CHANGELOG, "start changelog thread: %d\n", rc);
return 0;
static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
struct lustre_kernelcomm *lk);
+static int mdc_quotacheck(struct obd_device *unused, struct obd_export *exp,
+ struct obd_quotactl *oqctl)
+{
+ struct client_obd *cli = &exp->exp_obd->u.cli;
+ struct ptlrpc_request *req;
+ struct obd_quotactl *body;
+ int rc;
+ ENTRY;
+
+ req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
+ &RQF_MDS_QUOTACHECK, LUSTRE_MDS_VERSION,
+ MDS_QUOTACHECK);
+ if (req == NULL)
+ RETURN(-ENOMEM);
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
+ *body = *oqctl;
+
+ ptlrpc_request_set_replen(req);
+
+ /* the next poll will find -ENODATA, that means quotacheck is
+ * going on */
+ cli->cl_qchk_stat = -ENODATA;
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ cli->cl_qchk_stat = rc;
+ ptlrpc_req_finished(req);
+ RETURN(rc);
+}
+
+static int mdc_quota_poll_check(struct obd_export *exp,
+ struct if_quotacheck *qchk)
+{
+ struct client_obd *cli = &exp->exp_obd->u.cli;
+ int rc;
+ ENTRY;
+
+ qchk->obd_uuid = cli->cl_target_uuid;
+ memcpy(qchk->obd_type, LUSTRE_MDS_NAME, strlen(LUSTRE_MDS_NAME));
+
+ rc = cli->cl_qchk_stat;
+ /* the client is not the previous one */
+ if (rc == CL_NOT_QUOTACHECKED)
+ rc = -EINTR;
+ RETURN(rc);
+}
+
+static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp,
+ struct obd_quotactl *oqctl)
+{
+ struct ptlrpc_request *req;
+ struct obd_quotactl *oqc;
+ int rc;
+ ENTRY;
+
+ req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
+ &RQF_MDS_QUOTACTL, LUSTRE_MDS_VERSION,
+ MDS_QUOTACTL);
+ if (req == NULL)
+ RETURN(-ENOMEM);
+
+ oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
+ *oqc = *oqctl;
+
+ ptlrpc_request_set_replen(req);
+ ptlrpc_at_set_req_timeout(req);
+ req->rq_no_resend = 1;
+
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ CERROR("ptlrpc_queue_wait failed, rc: %d\n", rc);
+
+ if (req->rq_repmsg &&
+ (oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL))) {
+ *oqctl = *oqc;
+ } else if (!rc) {
+ CERROR ("Can't unpack obd_quotactl\n");
+ rc = -EPROTO;
+ }
+ ptlrpc_req_finished(req);
+
+ RETURN(rc);
+}
+
static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
void *karg, void *uarg)
{
struct ioc_changelog *icc = karg;
struct changelog_setinfo cs =
{.cs_recno = icc->icc_recno, .cs_id = icc->icc_id};
- rc = obd_set_info_async(exp, strlen(KEY_CHANGELOG_CLEAR),
+ rc = obd_set_info_async(NULL, exp, strlen(KEY_CHANGELOG_CLEAR),
KEY_CHANGELOG_CLEAR, sizeof(cs), &cs,
NULL);
GOTO(out, rc);
GOTO(out, rc);
}
case OBD_IOC_CLIENT_RECOVER:
- rc = ptlrpc_recover_import(imp, data->ioc_inlbuf1);
+ rc = ptlrpc_recover_import(imp, data->ioc_inlbuf1, 0);
if (rc < 0)
GOTO(out, rc);
GOTO(out, rc = 0);
}
#endif
case OBD_IOC_POLL_QUOTACHECK:
- rc = lquota_poll_check(quota_interface, exp,
- (struct if_quotacheck *)karg);
+ rc = mdc_quota_poll_check(exp, (struct if_quotacheck *)karg);
GOTO(out, rc);
case OBD_IOC_PING_TARGET:
rc = ptlrpc_obd_ping(obd);
GOTO(out, rc);
/*
- * Normally IOC_OBD_STATFS iocontrol is handled by LMV instead of MDC.
- * But when the cluster is upgraded from 1.8, there'd be no LMV layer
- * thus we might be called here. Eventually this code should be removed.
- * bz20731.
+ * Normally IOC_OBD_STATFS, OBD_IOC_QUOTACTL iocontrol are handled by
+ * LMV instead of MDC. But when the cluster is upgraded from 1.8,
+ * there'd be no LMV layer thus we might be called here. Eventually
+ * this code should be removed.
+ * bz20731, LU-592.
*/
case IOC_OBD_STATFS: {
struct obd_statfs stat_buf = {0};
(int) sizeof(struct obd_uuid))))
GOTO(out, rc = -EFAULT);
- rc = mdc_statfs(obd, &stat_buf,
- cfs_time_current_64() - CFS_HZ, 0);
+ rc = mdc_statfs(NULL, obd->obd_self_export, &stat_buf,
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ 0);
if (rc != 0)
GOTO(out, rc);
GOTO(out, rc = 0);
}
+ case OBD_IOC_QUOTACTL: {
+ struct if_quotactl *qctl = karg;
+ struct obd_quotactl *oqctl;
+
+ OBD_ALLOC_PTR(oqctl);
+ if (!oqctl)
+ RETURN(-ENOMEM);
+
+ QCTL_COPY(oqctl, qctl);
+ rc = obd_quotactl(exp, oqctl);
+ if (rc == 0) {
+ QCTL_COPY(qctl, oqctl);
+ qctl->qc_valid = QC_MDTIDX;
+ qctl->obd_uuid = obd->u.cli.cl_target_uuid;
+ }
+ OBD_FREE_PTR(oqctl);
+ break;
+ }
+ case LL_IOC_GET_CONNECT_FLAGS: {
+ if (cfs_copy_to_user(uarg, &exp->exp_connect_flags,
+ sizeof(__u64)))
+ GOTO(out, rc = -EFAULT);
+ else
+ GOTO(out, rc = 0);
+ }
default:
CERROR("mdc_ioctl(): unrecognised ioctl %#x\n", cmd);
GOTO(out, rc = -ENOTTY);
__swab32s(&h->hai_action);
lustre_swab_lu_fid(&h->hai_fid);
__swab64s(&h->hai_cookie);
- __swab64s(&h->hai_extent_start);
- __swab64s(&h->hai_extent_end);
+ __swab64s(&h->hai_extent.offset);
+ __swab64s(&h->hai_extent.length);
__swab64s(&h->hai_gid);
}
}
}
+static void lustre_swab_kuch(struct kuc_hdr *l)
+{
+ __swab16s(&l->kuc_magic);
+ /* __u8 l->kuc_transport */
+ __swab16s(&l->kuc_msgtype);
+ __swab16s(&l->kuc_msglen);
+}
+
static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
struct lustre_kernelcomm *lk)
{
rc = libcfs_kkuc_group_rem(lk->lk_uid,lk->lk_group);
else {
cfs_file_t *fp = cfs_get_fd(lk->lk_wfd);
- rc = libcfs_kkuc_group_add(fp, lk->lk_uid,lk->lk_group);
+ rc = libcfs_kkuc_group_add(fp, lk->lk_uid,lk->lk_group,
+ lk->lk_data);
if (rc && fp)
cfs_put_file(fp);
}
RETURN(rc);
}
-int mdc_set_info_async(struct obd_export *exp,
+int mdc_set_info_async(const struct lu_env *env,
+ struct obd_export *exp,
obd_count keylen, void *key,
obd_count vallen, void *val,
struct ptlrpc_request_set *set)
int rc = -EINVAL;
ENTRY;
- if (KEY_IS(KEY_INIT_RECOV)) {
- if (vallen != sizeof(int))
- RETURN(-EINVAL);
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_initial_recov = *(int *)val;
- cfs_spin_unlock(&imp->imp_lock);
- CDEBUG(D_HA, "%s: set imp_initial_recov = %d\n",
- exp->exp_obd->obd_name, imp->imp_initial_recov);
- RETURN(0);
- }
- /* Turn off initial_recov after we try all backup servers once */
- if (KEY_IS(KEY_INIT_RECOV_BACKUP)) {
- if (vallen != sizeof(int))
- RETURN(-EINVAL);
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_initial_recov_bk = *(int *)val;
- if (imp->imp_initial_recov_bk)
- imp->imp_initial_recov = 1;
- cfs_spin_unlock(&imp->imp_lock);
- CDEBUG(D_HA, "%s: set imp_initial_recov_bk = %d\n",
- exp->exp_obd->obd_name, imp->imp_initial_recov_bk);
- RETURN(0);
- }
if (KEY_IS(KEY_READ_ONLY)) {
if (vallen != sizeof(int))
RETURN(-EINVAL);
RETURN(rc);
}
-int mdc_get_info(struct obd_export *exp, __u32 keylen, void *key,
- __u32 *vallen, void *val, struct lov_stripe_md *lsm)
+int mdc_get_info(const struct lu_env *env, struct obd_export *exp,
+ __u32 keylen, void *key, __u32 *vallen, void *val,
+ struct lov_stripe_md *lsm)
{
int rc = -EINVAL;
max_easize = val;
*max_easize = exp->exp_obd->u.cli.cl_max_mds_easize;
RETURN(0);
- }
- if (KEY_IS(KEY_CONN_DATA)) {
+ } else if (KEY_IS(KEY_CONN_DATA)) {
struct obd_import *imp = class_exp2cliimp(exp);
struct obd_connect_data *data = val;
*data = imp->imp_connect_data;
RETURN(0);
+ } else if (KEY_IS(KEY_TGT_COUNT)) {
+ *((int *)val) = 1;
+ RETURN(0);
}
rc = mdc_get_info_rpc(exp, keylen, key, *vallen, val);
/*
* Flush current sequence to make client obtain new one
* from server in case of disconnect/reconnect.
- * If range is already empty then no need to flush it.
*/
- if (cli->cl_seq != NULL &&
- !range_is_exhausted(&cli->cl_seq->lcs_space)) {
+ if (cli->cl_seq != NULL)
seq_client_flush(cli->cl_seq);
- }
rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
break;
case IMP_EVENT_OCD:
rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
break;
-
+ case IMP_EVENT_DEACTIVATE:
+ case IMP_EVENT_ACTIVATE:
+ break;
default:
CERROR("Unknown import event %x\n", event);
LBUG();
struct client_obd *cli = &exp->exp_obd->u.cli;
struct lu_client_seq *seq = cli->cl_seq;
ENTRY;
- RETURN(seq_client_alloc_fid(seq, fid));
+ RETURN(seq_client_alloc_fid(NULL, seq, fid));
}
/* XXX This method is used only to clear current fid seq
return &cli->cl_target_uuid;
}
+/**
+ * Determine whether the lock can be canceled before replaying it during
+ * recovery, non zero value will be return if the lock can be canceled,
+ * or zero returned for not
+ */
+static int mdc_cancel_for_recovery(struct ldlm_lock *lock)
+{
+ if (lock->l_resource->lr_type != LDLM_IBITS)
+ RETURN(0);
+
+ /* FIXME: if we ever get into a situation where there are too many
+ * opened files with open locks on a single node, then we really
+ * should replay these open locks to reget it */
+ if (lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_OPEN)
+ RETURN(0);
+
+ RETURN(1);
+}
+
static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
{
struct client_obd *cli = &obd->u.cli;
ptlrpcd_addref();
- OBD_ALLOC(cli->cl_setattr_lock, sizeof (*cli->cl_setattr_lock));
- if (!cli->cl_setattr_lock)
- GOTO(err_rpc_lock, rc = -ENOMEM);
- mdc_init_rpc_lock(cli->cl_setattr_lock);
-
OBD_ALLOC(cli->cl_close_lock, sizeof (*cli->cl_close_lock));
if (!cli->cl_close_lock)
- GOTO(err_setattr_lock, rc = -ENOMEM);
+ GOTO(err_rpc_lock, rc = -ENOMEM);
mdc_init_rpc_lock(cli->cl_close_lock);
rc = client_obd_setup(obd, cfg);
sptlrpc_lprocfs_cliobd_attach(obd);
ptlrpc_lprocfs_register_obd(obd);
+ ns_register_cancel(obd->obd_namespace, mdc_cancel_for_recovery);
+
rc = obd_llog_init(obd, &obd->obd_olg, obd, NULL);
if (rc) {
mdc_cleanup(obd);
err_close_lock:
OBD_FREE(cli->cl_close_lock, sizeof (*cli->cl_close_lock));
-err_setattr_lock:
- OBD_FREE(cli->cl_setattr_lock, sizeof (*cli->cl_setattr_lock));
err_rpc_lock:
OBD_FREE(cli->cl_rpc_lock, sizeof (*cli->cl_rpc_lock));
ptlrpcd_decref();
if (obd->obd_type->typ_refcnt <= 1)
libcfs_kkuc_group_rem(0, KUC_GRP_HSM);
- /* If we set up but never connected, the
- client import will not have been cleaned. */
- if (obd->u.cli.cl_import) {
- struct obd_import *imp;
- cfs_down_write(&obd->u.cli.cl_sem);
- imp = obd->u.cli.cl_import;
- CERROR("client import never connected\n");
- ptlrpc_invalidate_import(imp);
- class_destroy_import(imp);
- cfs_up_write(&obd->u.cli.cl_sem);
- obd->u.cli.cl_import = NULL;
- }
+ obd_cleanup_client_import(obd);
+ ptlrpc_lprocfs_unregister_obd(obd);
+ lprocfs_obd_cleanup(obd);
+
rc = obd_llog_finish(obd, 0);
if (rc != 0)
CERROR("failed to cleanup llogging subsystems\n");
struct client_obd *cli = &obd->u.cli;
OBD_FREE(cli->cl_rpc_lock, sizeof (*cli->cl_rpc_lock));
- OBD_FREE(cli->cl_setattr_lock, sizeof (*cli->cl_setattr_lock));
OBD_FREE(cli->cl_close_lock, sizeof (*cli->cl_close_lock));
- ptlrpc_lprocfs_unregister_obd(obd);
- lprocfs_obd_cleanup(obd);
ptlrpcd_decref();
return client_obd_cleanup(obd);
}
static int mdc_interpret_renew_capa(const struct lu_env *env,
- struct ptlrpc_request *req, void *unused,
+ struct ptlrpc_request *req, void *args,
int status)
{
- struct obd_capa *oc = req->rq_async_args.pointer_arg[0];
- renew_capa_cb_t cb = req->rq_async_args.pointer_arg[1];
+ struct mdc_renew_capa_args *ra = args;
struct mdt_body *body = NULL;
struct lustre_capa *capa;
ENTRY;
GOTO(out, capa = ERR_PTR(-EFAULT));
EXIT;
out:
- cb(oc, capa);
+ ra->ra_cb(ra->ra_oc, capa);
return 0;
}
renew_capa_cb_t cb)
{
struct ptlrpc_request *req;
+ struct mdc_renew_capa_args *ra;
ENTRY;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_GETATTR,
mdc_pack_body(req, &oc->c_capa.lc_fid, oc, OBD_MD_FLOSSCAPA, 0, -1, 0);
ptlrpc_request_set_replen(req);
- req->rq_async_args.pointer_arg[0] = oc;
- req->rq_async_args.pointer_arg[1] = cb;
+ CLASSERT(sizeof(*ra) <= sizeof(req->rq_async_args));
+ ra = ptlrpc_req_async_args(req);
+ ra->ra_oc = oc;
+ ra->ra_cb = cb;
req->rq_interpret_reply = mdc_interpret_renew_capa;
- ptlrpcd_add_req(req, PSCOPE_OTHER);
+ ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
RETURN(0);
}
.o_get_info = mdc_get_info,
.o_process_config = mdc_process_config,
.o_get_uuid = mdc_get_uuid,
+ .o_quotactl = mdc_quotactl,
+ .o_quotacheck = mdc_quotacheck
};
struct md_ops mdc_md_ops = {
struct lprocfs_static_vars lvars = { 0 };
lprocfs_mdc_init_vars(&lvars);
- cfs_request_module("lquota");
- quota_interface = PORTAL_SYMBOL_GET(mdc_quota_interface);
- init_obd_quota_ops(quota_interface, &mdc_obd_ops);
-
rc = class_register_type(&mdc_obd_ops, &mdc_md_ops, lvars.module_vars,
LUSTRE_MDC_NAME, NULL);
- if (rc && quota_interface)
- PORTAL_SYMBOL_PUT(mdc_quota_interface);
-
RETURN(rc);
}
#ifdef __KERNEL__
static void /*__exit*/ mdc_exit(void)
{
- if (quota_interface)
- PORTAL_SYMBOL_PUT(mdc_quota_interface);
-
class_unregister_type(LUSTRE_MDC_NAME);
}