-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
/*
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_LMV
#ifdef __KERNEL__
#include <linux/slab.h>
if (tgts->ltd_exp == NULL)
continue;
- obd_set_info_async(tgts->ltd_exp, sizeof(KEY_INTERMDS),
+ obd_set_info_async(NULL, tgts->ltd_exp, sizeof(KEY_INTERMDS),
KEY_INTERMDS, 0, NULL, NULL);
}
}
}
if (lmv->desc.ld_tgt_count == 0) {
+ lmv_init_unlock(lmv);
CERROR("%s: no targets configured.\n", obd->obd_name);
RETURN(-EINVAL);
}
(int) sizeof(struct obd_uuid))))
RETURN(-EFAULT);
- rc = obd_statfs(mdc_obd, &stat_buf,
+ rc = obd_statfs(NULL, lmv->tgts[index].ltd_exp, &stat_buf,
cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
0);
if (rc)
* New seq alloc and FLD setup should be atomic. Otherwise we may find
* on server that seq in new allocated fid is not yet known.
*/
- cfs_down(&tgt->ltd_fid_sem);
+ cfs_mutex_lock(&tgt->ltd_fid_mutex);
if (!tgt->ltd_active)
GOTO(out, rc = -ENODEV);
EXIT;
out:
- cfs_up(&tgt->ltd_fid_sem);
+ cfs_mutex_unlock(&tgt->ltd_fid_mutex);
return rc;
}
{
struct obd_device *obd = class_exp2obd(exp);
struct lmv_obd *lmv = &obd->u.lmv;
- mdsno_t mds;
+ mdsno_t mds = 0;
int rc;
ENTRY;
RETURN(-ENOMEM);
for (i = 0; i < LMV_MAX_TGT_COUNT; i++) {
- cfs_sema_init(&lmv->tgts[i].ltd_fid_sem, 1);
+ cfs_mutex_init(&lmv->tgts[i].ltd_fid_mutex);
lmv->tgts[i].ltd_idx = i;
}
lmv->lmv_placement = PLACEMENT_CHAR_POLICY;
cfs_spin_lock_init(&lmv->lmv_lock);
- cfs_sema_init(&lmv->init_sem, 1);
+ cfs_mutex_init(&lmv->init_mutex);
rc = lmv_object_setup(obd);
if (rc) {
lprocfs_obd_setup(obd, lvars.obd_vars);
#ifdef LPROCFS
{
- rc = lprocfs_seq_create(obd->obd_proc_entry, "target_obd_status",
+ rc = lprocfs_seq_create(obd->obd_proc_entry, "target_obd",
0444, &lmv_proc_target_fops, obd);
if (rc)
- CWARN("Error adding target_obd_stats file (%d)\n", rc);
+ CWARN("%s: error adding LMV target_obd file: rc = %d\n",
+ obd->obd_name, rc);
}
#endif
rc = fld_client_init(&lmv->lmv_fld, obd->obd_name,
ENTRY;
fld_client_fini(&lmv->lmv_fld);
- lprocfs_obd_cleanup(obd);
lmv_object_cleanup(obd);
OBD_FREE(lmv->datas, lmv->datas_size);
OBD_FREE(lmv->tgts, lmv->tgts_size);
RETURN(rc);
}
-static int lmv_statfs(struct obd_device *obd, struct obd_statfs *osfs,
- __u64 max_age, __u32 flags)
+static int lmv_statfs(const struct lu_env *env, struct obd_export *exp,
+ struct obd_statfs *osfs, __u64 max_age, __u32 flags)
{
+ struct obd_device *obd = class_exp2obd(exp);
struct lmv_obd *lmv = &obd->u.lmv;
struct obd_statfs *temp;
int rc = 0;
if (lmv->tgts[i].ltd_exp == NULL)
continue;
- rc = obd_statfs(lmv->tgts[i].ltd_exp->exp_obd, temp,
+ rc = obd_statfs(env, lmv->tgts[i].ltd_exp, temp,
max_age, flags);
if (rc) {
CERROR("can't stat MDS #%d (%s), error %d\n", i,
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *src_tgt;
- struct lmv_tgt_desc *tgt_tgt;
int rc;
int sidx;
int loop = 0;
op_data->op_cap = cfs_curproc_cap_pack();
src_tgt = lmv_get_target(lmv, mds1);
- tgt_tgt = lmv_get_target(lmv, mds2);
/*
* LOOKUP lock on src child (fid3) should also be cancelled for
val = le64_to_cpu(*hash);
if (val < hash_adj)
val += MAX_HASH_SIZE;
- if (val != DIR_END_OFF)
+ if (val != MDS_DIR_END_OFF)
*hash = cpu_to_le64(val - hash_adj);
}
return id ^ (id >> 32);
}
-static int lmv_readpage(struct obd_export *exp, const struct lu_fid *fid,
- struct obd_capa *oc, __u64 offset64, struct page *page,
- struct ptlrpc_request **request)
+static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
+ struct page **pages, struct ptlrpc_request **request)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
- struct lu_fid rid = *fid;
struct lmv_object *obj;
- __u64 offset;
+ struct lu_fid rid = op_data->op_fid1;
+ __u64 offset = op_data->op_offset;
__u64 hash_adj = 0;
__u32 rank = 0;
__u64 seg_size = 0;
int tgt0_idx = 0;
int rc;
int nr = 0;
+ int i;
+ /* number of pages read, in CFS_PAGE_SIZE */
+ int nrdpgs;
+ /* number of pages transferred in LU_PAGE_SIZE */
+ int nlupgs;
struct lmv_stripe *los;
struct lmv_tgt_desc *tgt;
struct lu_dirpage *dp;
struct lu_dirent *ent;
ENTRY;
- offset = offset64;
-
rc = lmv_check_connect(obd);
if (rc)
RETURN(rc);
* [R*MAX_HASH/N ... (R + 1)*MAX_HASH/N] there for we do hash_adj
* on hash values that we get.
*/
- obj = lmv_object_find_lock(obd, fid);
+ obj = lmv_object_find_lock(obd, &rid);
if (obj) {
nr = obj->lo_objcount;
LASSERT(nr > 0);
do_div(seg_size, nr);
los = obj->lo_stripes;
tgt = lmv_get_target(lmv, los[0].ls_mds);
- rank = lmv_node_rank(tgt->ltd_exp, fid) % nr;
+ rank = lmv_node_rank(tgt->ltd_exp, &rid) % nr;
tgt_tmp = offset;
do_div(tgt_tmp, seg_size);
tgt0_idx = do_div(tgt_tmp, nr);
if (IS_ERR(tgt))
GOTO(cleanup, rc = PTR_ERR(tgt));
- rc = md_readpage(tgt->ltd_exp, &rid, oc, offset, page, request);
+ op_data->op_fid1 = rid;
+ rc = md_readpage(tgt->ltd_exp, op_data, pages, request);
if (rc)
GOTO(cleanup, rc);
- if (obj) {
- dp = cfs_kmap(page);
- lmv_hash_adjust(&dp->ldp_hash_start, hash_adj);
- lmv_hash_adjust(&dp->ldp_hash_end, hash_adj);
- LASSERT(le64_to_cpu(dp->ldp_hash_start) <= offset64);
+ nrdpgs = ((*request)->rq_bulk->bd_nob_transferred + CFS_PAGE_SIZE - 1)
+ >> CFS_PAGE_SHIFT;
+ nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
+ LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
+ LASSERT(nrdpgs > 0 && nrdpgs <= op_data->op_npages);
- for (ent = lu_dirent_start(dp); ent != NULL;
- ent = lu_dirent_next(ent))
- lmv_hash_adjust(&ent->lde_hash, hash_adj);
+ CDEBUG(D_INODE, "read %d(%d)/%d pages\n", nrdpgs, nlupgs,
+ op_data->op_npages);
- if (tgt0_idx != nr - 1) {
- __u64 end;
+ for (i = 0; i < nrdpgs; i++) {
+#if CFS_PAGE_SIZE > LU_PAGE_SIZE
+ struct lu_dirpage *first;
+ __u64 hash_end = 0;
+ __u32 flags = 0;
+#endif
+ struct lu_dirent *tmp = NULL;
- end = le64_to_cpu(dp->ldp_hash_end);
- if (end == DIR_END_OFF) {
+ dp = cfs_kmap(pages[i]);
+ if (obj) {
+ lmv_hash_adjust(&dp->ldp_hash_start, hash_adj);
+ lmv_hash_adjust(&dp->ldp_hash_end, hash_adj);
+ LASSERT(le64_to_cpu(dp->ldp_hash_start) <=
+ op_data->op_offset);
+
+ if ((tgt0_idx != nr - 1) &&
+ (le64_to_cpu(dp->ldp_hash_end) == MDS_DIR_END_OFF))
+ {
dp->ldp_hash_end = cpu_to_le32(seg_size *
(tgt0_idx + 1));
CDEBUG(D_INODE,
""DFID" reset end "LPX64" tgt %d\n",
PFID(&rid),
- (__u64)le64_to_cpu(dp->ldp_hash_end), tgt_idx);
+ (__u64)le64_to_cpu(dp->ldp_hash_end),
+ tgt_idx);
+ }
+ }
+
+ ent = lu_dirent_start(dp);
+#if CFS_PAGE_SIZE > LU_PAGE_SIZE
+ first = dp;
+ hash_end = dp->ldp_hash_end;
+repeat:
+#endif
+ nlupgs--;
+ for (tmp = ent; ent != NULL;
+ tmp = ent, ent = lu_dirent_next(ent)) {
+ if (obj)
+ lmv_hash_adjust(&ent->lde_hash, hash_adj);
+ }
+
+#if CFS_PAGE_SIZE > LU_PAGE_SIZE
+ dp = (struct lu_dirpage *)((char *)dp + LU_PAGE_SIZE);
+ if (((unsigned long)dp & ~CFS_PAGE_MASK) && nlupgs > 0) {
+ ent = lu_dirent_start(dp);
+
+ if (obj) {
+ lmv_hash_adjust(&dp->ldp_hash_end, hash_adj);
+ if ((tgt0_idx != nr - 1) &&
+ (le64_to_cpu(dp->ldp_hash_end) ==
+ MDS_DIR_END_OFF)) {
+ hash_end = cpu_to_le32(seg_size *
+ (tgt0_idx + 1));
+ CDEBUG(D_INODE,
+ ""DFID" reset end "LPX64" tgt %d\n",
+ PFID(&rid),
+ (__u64)le64_to_cpu(hash_end),
+ tgt_idx);
+ }
+ }
+ hash_end = dp->ldp_hash_end;
+ flags = dp->ldp_flags;
+
+ if (tmp) {
+ /* enlarge the end entry lde_reclen from 0 to
+ * first entry of next lu_dirpage, in this way
+ * several lu_dirpages can be stored into one
+ * client page on client. */
+ tmp = ((void *)tmp) +
+ le16_to_cpu(tmp->lde_reclen);
+ tmp->lde_reclen =
+ cpu_to_le16((char *)(dp->ldp_entries) -
+ (char *)tmp);
+ goto repeat;
}
}
- cfs_kunmap(page);
+ first->ldp_hash_end = hash_end;
+ first->ldp_flags &= ~cpu_to_le32(LDF_COLLIDE);
+ first->ldp_flags |= flags & cpu_to_le32(LDF_COLLIDE);
+#else
+ SET_BUT_UNUSED(tmp);
+#endif
+ cfs_kunmap(pages[i]);
}
EXIT;
cleanup:
static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
{
- int rc = 0;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ int rc = 0;
switch (stage) {
case OBD_CLEANUP_EARLY:
* stack. */
break;
case OBD_CLEANUP_EXPORTS:
+ fld_client_proc_fini(&lmv->lmv_fld);
+ lprocfs_obd_cleanup(obd);
rc = obd_llog_finish(obd, 0);
if (rc != 0)
CERROR("failed to cleanup llogging subsystems\n");
RETURN(rc);
}
-static int lmv_get_info(struct obd_export *exp, __u32 keylen,
- void *key, __u32 *vallen, void *val,
+static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
+ __u32 keylen, void *key, __u32 *vallen, void *val,
struct lov_stripe_md *lsm)
{
struct obd_device *obd;
continue;
}
- if (!obd_get_info(tgts->ltd_exp, keylen, key,
+ if (!obd_get_info(env, tgts->ltd_exp, keylen, key,
vallen, val, NULL))
RETURN(0);
}
* Forwarding this request to first MDS, it should know LOV
* desc.
*/
- rc = obd_get_info(lmv->tgts[0].ltd_exp, keylen, key,
+ rc = obd_get_info(env, lmv->tgts[0].ltd_exp, keylen, key,
vallen, val, NULL);
if (!rc && KEY_IS(KEY_CONN_DATA)) {
exp->exp_connect_flags =
((struct obd_connect_data *)val)->ocd_connect_flags;
}
RETURN(rc);
+ } else if (KEY_IS(KEY_TGT_COUNT)) {
+ *((int *)val) = lmv->desc.ld_tgt_count;
+ RETURN(0);
}
CDEBUG(D_IOCTL, "Invalid key\n");
RETURN(-EINVAL);
}
-int lmv_set_info_async(struct obd_export *exp, obd_count keylen,
- void *key, obd_count vallen, void *val,
- struct ptlrpc_request_set *set)
+int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
+ obd_count keylen, void *key, obd_count vallen,
+ void *val, struct ptlrpc_request_set *set)
{
struct lmv_tgt_desc *tgt;
struct obd_device *obd;
if (!tgt->ltd_exp)
continue;
- err = obd_set_info_async(tgt->ltd_exp,
+ err = obd_set_info_async(env, tgt->ltd_exp,
keylen, key, vallen, val, set);
if (err && rc == 0)
rc = err;
RETURN(mea_size);
if (*lmmp && !lsm) {
- OBD_FREE(*lmmp, mea_size);
+ OBD_FREE_LARGE(*lmmp, mea_size);
*lmmp = NULL;
RETURN(0);
}
if (*lmmp == NULL) {
- OBD_ALLOC(*lmmp, mea_size);
+ OBD_ALLOC_LARGE(*lmmp, mea_size);
if (*lmmp == NULL)
RETURN(-ENOMEM);
}
return mea_size;
if (*lsmp != NULL && lmm == NULL) {
- OBD_FREE(*tmea, mea_size);
+ OBD_FREE_LARGE(*tmea, mea_size);
*lsmp = NULL;
RETURN(0);
}
LASSERT(mea_size == lmm_size);
- OBD_ALLOC(*tmea, mea_size);
+ OBD_ALLOC_LARGE(*tmea, mea_size);
if (*tmea == NULL)
RETURN(-ENOMEM);
}
int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
- __u32 *bits)
+ __u64 *bits)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_object *obj;
- struct lmv_tgt_desc *tgt;
+ struct lmv_tgt_desc *tgt = NULL;
int rc;
int sidx;
ENTRY;
if (rc)
RETURN(rc);
- if (!fid_is_sane(&op_data->op_fid2)) {
+ if (op_data->op_namelen) {
obj = lmv_object_find(obd, &op_data->op_fid1);
- if (obj && op_data->op_namelen) {
- sidx = raw_name2idx(obj->lo_hashtype,
- obj->lo_objcount,
+ if (obj) {
+ sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount,
(char *)op_data->op_name,
op_data->op_namelen);
op_data->op_fid1 = obj->lo_stripes[sidx].ls_fid;
- tgt = lmv_get_target(lmv,
- obj->lo_stripes[sidx].ls_mds);
- CDEBUG(D_INODE,
- "Choose slave dir ("DFID") -> mds #%d\n",
- PFID(&op_data->op_fid1), tgt->ltd_idx);
- } else {
- tgt = lmv_find_target(lmv, &op_data->op_fid1);
- }
- if (obj)
+ tgt = lmv_get_target(lmv, obj->lo_stripes[sidx].ls_mds);
lmv_object_put(obj);
- } else {
- op_data->op_fid1 = op_data->op_fid2;
- tgt = lmv_find_target(lmv, &op_data->op_fid2);
- op_data->op_bias = MDS_CROSS_REF;
- /*
- * Unfortunately, we have to lie to MDC/MDS to retrieve
- * attributes llite needs.
- */
- if (minfo->mi_it.it_op & IT_LOOKUP)
- minfo->mi_it.it_op = IT_GETATTR;
+ }
}
+ if (tgt == NULL)
+ tgt = lmv_find_target(lmv, &op_data->op_fid1);
+
if (IS_ERR(tgt))
RETURN(PTR_ERR(tgt));
}
int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
- struct lu_fid *fid, __u32 *bits)
+ struct lu_fid *fid, __u64 *bits)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
RETURN(rc);
}
+/**
+ * For lmv, only need to send request to master MDT, and the master MDT will
+ * process with other slave MDTs. The only exception is Q_GETOQUOTA for which
+ * we directly fetch data from the slave MDTs.
+ */
+int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
+ struct obd_quotactl *oqctl)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt = &lmv->tgts[0];
+ int rc = 0, i;
+ __u64 curspace, curinodes;
+ ENTRY;
+
+ if (!lmv->desc.ld_tgt_count || !tgt->ltd_active) {
+ CERROR("master lmv inactive\n");
+ RETURN(-EIO);
+ }
+
+ if (oqctl->qc_cmd != Q_GETOQUOTA) {
+ rc = obd_quotactl(tgt->ltd_exp, oqctl);
+ RETURN(rc);
+ }
+
+ curspace = curinodes = 0;
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ int err;
+ tgt = &lmv->tgts[i];
+
+ if (tgt->ltd_exp == NULL)
+ continue;
+ if (!tgt->ltd_active) {
+ CDEBUG(D_HA, "mdt %d is inactive.\n", i);
+ continue;
+ }
+
+ err = obd_quotactl(tgt->ltd_exp, oqctl);
+ if (err) {
+ CERROR("getquota on mdt %d failed. %d\n", i, err);
+ if (!rc)
+ rc = err;
+ } else {
+ curspace += oqctl->qc_dqblk.dqb_curspace;
+ curinodes += oqctl->qc_dqblk.dqb_curinodes;
+ }
+ }
+ oqctl->qc_dqblk.dqb_curspace = curspace;
+ oqctl->qc_dqblk.dqb_curinodes = curinodes;
+
+ RETURN(rc);
+}
+
+int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp,
+ struct obd_quotactl *oqctl)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+ int i, rc = 0;
+ ENTRY;
+
+ for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
+ int err;
+
+ if (!tgt->ltd_active) {
+ CERROR("lmv idx %d inactive\n", i);
+ RETURN(-EIO);
+ }
+
+ err = obd_quotacheck(tgt->ltd_exp, oqctl);
+ if (err && !rc)
+ rc = err;
+ }
+
+ RETURN(rc);
+}
struct obd_ops lmv_obd_ops = {
.o_owner = THIS_MODULE,
.o_notify = lmv_notify,
.o_get_uuid = lmv_get_uuid,
.o_iocontrol = lmv_iocontrol,
- .o_fid_delete = lmv_fid_delete
+ .o_fid_delete = lmv_fid_delete,
+ .o_quotacheck = lmv_quotacheck,
+ .o_quotactl = lmv_quotactl
};
struct md_ops lmv_md_ops = {
.m_revalidate_lock = lmv_revalidate_lock
};
-static quota_interface_t *quota_interface;
-extern quota_interface_t lmv_quota_interface;
-
int __init lmv_init(void)
{
struct lprocfs_static_vars lvars;
lprocfs_lmv_init_vars(&lvars);
- cfs_request_module("lquota");
- quota_interface = PORTAL_SYMBOL_GET(lmv_quota_interface);
- init_obd_quota_ops(quota_interface, &lmv_obd_ops);
-
rc = class_register_type(&lmv_obd_ops, &lmv_md_ops,
lvars.module_vars, LUSTRE_LMV_NAME, NULL);
- if (rc) {
- if (quota_interface)
- PORTAL_SYMBOL_PUT(lmv_quota_interface);
+ if (rc)
cfs_mem_cache_destroy(lmv_object_cache);
- }
return rc;
}
#ifdef __KERNEL__
static void lmv_exit(void)
{
- if (quota_interface)
- PORTAL_SYMBOL_PUT(lmv_quota_interface);
-
class_unregister_type(LUSTRE_LMV_NAME);
LASSERTF(cfs_atomic_read(&lmv_object_count) == 0,