* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
#include <liblustre.h>
#endif
-#include <lustre/lustre_idl.h>
#include <lustre_log.h>
#include <obd_support.h>
#include <lustre_lib.h>
/* object cache. */
cfs_mem_cache_t *lmv_object_cache;
-atomic_t lmv_object_count = ATOMIC_INIT(0);
+cfs_atomic_t lmv_object_count = CFS_ATOMIC_INIT(0);
static void lmv_activate_target(struct lmv_obd *lmv,
struct lmv_tgt_desc *tgt,
CDEBUG(D_INFO, "Searching in lmv %p for uuid %s (activate=%d)\n",
lmv, uuid->uuid, activate);
- spin_lock(&lmv->lmv_lock);
+ cfs_spin_lock(&lmv->lmv_lock);
for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
if (tgt->ltd_exp == NULL)
continue;
EXIT;
out_lmv_lock:
- spin_unlock(&lmv->lmv_lock);
+ cfs_spin_unlock(&lmv->lmv_lock);
return rc;
}
LASSERT(data != NULL);
- spin_lock(&lmv->lmv_lock);
+ cfs_spin_lock(&lmv->lmv_lock);
for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
if (tgt->ltd_exp == NULL)
continue;
break;
}
}
- spin_unlock(&lmv->lmv_lock);
+ cfs_spin_unlock(&lmv->lmv_lock);
RETURN(0);
}
CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n",
mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
- atomic_read(&obd->obd_refcount));
+ cfs_atomic_read(&obd->obd_refcount));
#ifdef __KERNEL__
lmv_proc_dir = lprocfs_srch(obd->obd_proc_entry, "target_obds");
if (lmv_proc_dir) {
struct proc_dir_entry *mdc_symlink;
- char name[MAX_STRING_SIZE + 1];
LASSERT(mdc_obd->obd_type != NULL);
LASSERT(mdc_obd->obd_type->typ_name != NULL);
- name[MAX_STRING_SIZE] = '\0';
- snprintf(name, MAX_STRING_SIZE, "../../../%s/%s",
- mdc_obd->obd_type->typ_name,
- mdc_obd->obd_name);
- mdc_symlink = proc_symlink(mdc_obd->obd_name,
- lmv_proc_dir, name);
+ mdc_symlink = lprocfs_add_symlink(mdc_obd->obd_name,
+ lmv_proc_dir,
+ "../../../%s/%s",
+ mdc_obd->obd_type->typ_name,
+ mdc_obd->obd_name);
if (mdc_symlink == NULL) {
CERROR("Could not register LMV target "
"/proc/fs/lustre/%s/%s/target_obds/%s.",
RETURN(-EINVAL);
}
- rc = obd_llog_init(obd, &obd->obd_olg, mdc_obd, 0, NULL, tgt_uuid);
+ rc = obd_llog_init(obd, &obd->obd_olg, mdc_obd, NULL);
if (rc) {
lmv_init_unlock(lmv);
CERROR("lmv failed to setup llogging subsystems\n");
}
}
- spin_lock(&lmv->lmv_lock);
+ cfs_spin_lock(&lmv->lmv_lock);
tgt = lmv->tgts + lmv->desc.ld_tgt_count++;
tgt->ltd_uuid = *tgt_uuid;
- spin_unlock(&lmv->lmv_lock);
+ cfs_spin_unlock(&lmv->lmv_lock);
if (lmv->connected) {
rc = lmv_connect_mdc(obd, tgt);
if (rc) {
- spin_lock(&lmv->lmv_lock);
+ cfs_spin_lock(&lmv->lmv_lock);
lmv->desc.ld_tgt_count--;
memset(tgt, 0, sizeof(*tgt));
- spin_unlock(&lmv->lmv_lock);
+ cfs_spin_unlock(&lmv->lmv_lock);
} else {
int easize = sizeof(struct lmv_stripe_md) +
lmv->desc.ld_tgt_count *
mdc_obd = class_exp2obd(tgt->ltd_exp);
- if (mdc_obd)
+ if (mdc_obd) {
+ mdc_obd->obd_force = obd->obd_force;
+ mdc_obd->obd_fail = obd->obd_fail;
mdc_obd->obd_no_recov = obd->obd_no_recov;
+ }
#ifdef __KERNEL__
lmv_proc_dir = lprocfs_srch(obd->obd_proc_entry, "target_obds");
__u32 index;
memcpy(&index, data->ioc_inlbuf2, sizeof(__u32));
- LASSERT(data->ioc_plen1 == sizeof(struct obd_statfs));
-
if ((index >= count))
RETURN(-ENODEV);
if (!mdc_obd)
RETURN(-EINVAL);
+ /* copy UUID */
+ if (cfs_copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd),
+ min((int) data->ioc_plen2,
+ (int) sizeof(struct obd_uuid))))
+ RETURN(-EFAULT);
+
rc = obd_statfs(mdc_obd, &stat_buf,
- cfs_time_current_64() - HZ, 0);
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ 0);
if (rc)
RETURN(rc);
- if (copy_to_user(data->ioc_pbuf1, &stat_buf, data->ioc_plen1))
- RETURN(-EFAULT);
- if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd),
- data->ioc_plen2))
+ if (cfs_copy_to_user(data->ioc_pbuf1, &stat_buf,
+ min((int) data->ioc_plen1,
+ (int) sizeof(stat_buf))))
RETURN(-EFAULT);
break;
}
OBD_FREE_PTR(oqctl);
break;
}
+ case OBD_IOC_CHANGELOG_SEND:
case OBD_IOC_CHANGELOG_CLEAR: {
- struct ioc_changelog_clear *icc = karg;
+ struct ioc_changelog *icc = karg;
if (icc->icc_mdtindex >= count)
RETURN(-ENODEV);
sizeof(*icc), icc, NULL);
break;
}
+ case LL_IOC_GET_CONNECT_FLAGS: {
+ rc = obd_iocontrol(cmd, lmv->tgts[0].ltd_exp, len, karg, uarg);
+ break;
+ }
default : {
for (i = 0; i < count; i++) {
int err;
+ struct obd_device *mdc_obd;
if (lmv->tgts[i].ltd_exp == NULL)
continue;
-
+ /* ll_umount_begin() sets force flag but for lmv, not
+ * mdc. Let's pass it through */
+ mdc_obd = class_exp2obd(lmv->tgts[i].ltd_exp);
+ mdc_obd->obd_force = obddev->obd_force;
err = obd_iocontrol(cmd, lmv->tgts[i].ltd_exp, len,
karg, uarg);
if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) {
* New seq alloc and FLD setup should be atomic. Otherwise we may find
* on server that seq in new allocated fid is not yet known.
*/
- down(&tgt->ltd_fid_sem);
+ cfs_down(&tgt->ltd_fid_sem);
if (!tgt->ltd_active)
GOTO(out, rc = -ENODEV);
EXIT;
out:
- up(&tgt->ltd_fid_sem);
+ cfs_up(&tgt->ltd_fid_sem);
return rc;
}
RETURN(-ENOMEM);
for (i = 0; i < LMV_MAX_TGT_COUNT; i++) {
- sema_init(&lmv->tgts[i].ltd_fid_sem, 1);
+ cfs_sema_init(&lmv->tgts[i].ltd_fid_sem, 1);
lmv->tgts[i].ltd_idx = i;
}
lmv->max_easize = 0;
lmv->lmv_placement = PLACEMENT_CHAR_POLICY;
- spin_lock_init(&lmv->lmv_lock);
- sema_init(&lmv->init_sem, 1);
+ cfs_spin_lock_init(&lmv->lmv_lock);
+ cfs_sema_init(&lmv->init_sem, 1);
rc = lmv_object_setup(obd);
if (rc) {
RETURN(rc);
}
-static int lmv_getattr(struct obd_export *exp, const struct lu_fid *fid,
- struct obd_capa *oc, obd_valid valid, int ea_size,
+static int lmv_getattr(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request)
{
struct obd_device *obd = exp->exp_obd;
if (rc)
RETURN(rc);
- tgt = lmv_find_target(lmv, fid);
+ tgt = lmv_find_target(lmv, &op_data->op_fid1);
if (IS_ERR(tgt))
RETURN(PTR_ERR(tgt));
- rc = md_getattr(tgt->ltd_exp, fid, oc, valid, ea_size, request);
+ if (op_data->op_valid & OBD_MD_MDTIDX) {
+ op_data->op_mds = tgt->ltd_idx;
+ RETURN(0);
+ }
+
+ rc = md_getattr(tgt->ltd_exp, op_data, request);
if (rc)
RETURN(rc);
- obj = lmv_object_find_lock(obd, fid);
+ obj = lmv_object_find_lock(obd, &op_data->op_fid1);
- CDEBUG(D_INODE, "GETATTR for "DFID" %s\n", PFID(fid),
+ CDEBUG(D_INODE, "GETATTR for "DFID" %s\n", PFID(&op_data->op_fid1),
obj ? "(split)" : "");
/*
RETURN(0);
}
+static int lmv_find_cbdata(struct obd_export *exp, const struct lu_fid *fid,
+ ldlm_iterator_t it, void *data)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ int i;
+ int rc;
+ ENTRY;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ RETURN(rc);
+
+ CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
+
+ /*
+ * With CMD every object can have two locks in different namespaces:
+ * lookup lock in space of mds storing direntry and update/open lock in
+ * space of mds storing inode.
+ */
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ rc = md_find_cbdata(lmv->tgts[i].ltd_exp, fid, it, data);
+ if (rc)
+ RETURN(rc);
+ }
+
+ RETURN(rc);
+}
+
+
static int lmv_close(struct obd_export *exp, struct md_op_data *op_data,
struct md_open_data *mod, struct ptlrpc_request **request)
{
struct lmv_tgt_desc *tgt;
struct lmv_object *obj;
struct lustre_md md;
+ struct md_op_data *op_data;
int mealen;
int rc;
__u64 valid;
/*
* Time to update mea of parent fid.
*/
- rc = md_getattr(tgt->ltd_exp, fid, NULL, valid, mealen, &req);
+
+ OBD_ALLOC_PTR(op_data);
+ if (op_data == NULL)
+ RETURN(-ENOMEM);
+
+ op_data->op_fid1 = *fid;
+ op_data->op_mode = mealen;
+ op_data->op_valid = valid;
+
+ rc = md_getattr(tgt->ltd_exp, op_data, &req);
+ OBD_FREE_PTR(op_data);
if (rc) {
CERROR("md_getattr() failed, error %d\n", rc);
GOTO(cleanup, rc);
}
static int
-lmv_getattr_name(struct obd_export *exp, const struct lu_fid *fid,
- struct obd_capa *oc, const char *name, int namelen,
- obd_valid valid, int ea_size, __u32 suppgid,
+lmv_getattr_name(struct obd_export *exp,struct md_op_data *op_data,
struct ptlrpc_request **request)
{
struct ptlrpc_request *req = NULL;
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
- struct lu_fid rid = *fid;
+ struct lu_fid rid = op_data->op_fid1;
struct lmv_tgt_desc *tgt;
struct mdt_body *body;
struct lmv_object *obj;
+ obd_valid valid = op_data->op_valid;
int rc;
int loop = 0;
int sidx;
obj = lmv_object_find(obd, &rid);
if (obj) {
sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount,
- name, namelen - 1);
+ op_data->op_name, op_data->op_namelen);
rid = obj->lo_stripes[sidx].ls_fid;
tgt = lmv_get_target(lmv, obj->lo_stripes[sidx].ls_mds);
+ op_data->op_mds = obj->lo_stripes[sidx].ls_mds;
valid &= ~OBD_MD_FLCKSPLIT;
lmv_object_put(obj);
} else {
tgt = lmv_find_target(lmv, &rid);
valid |= OBD_MD_FLCKSPLIT;
+ op_data->op_mds = tgt->ltd_idx;
}
if (IS_ERR(tgt))
RETURN(PTR_ERR(tgt));
CDEBUG(D_INODE, "GETATTR_NAME for %*s on "DFID" - "DFID" -> mds #%d\n",
- namelen, name, PFID(fid), PFID(&rid), tgt->ltd_idx);
+ op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
+ PFID(&rid), tgt->ltd_idx);
- rc = md_getattr_name(tgt->ltd_exp, &rid, oc, name, namelen, valid,
- ea_size, suppgid, request);
+ op_data->op_valid = valid;
+ op_data->op_fid1 = rid;
+ rc = md_getattr_name(tgt->ltd_exp, op_data, request);
if (rc == 0) {
body = req_capsule_server_get(&(*request)->rq_pill,
&RMF_MDT_BODY);
RETURN(PTR_ERR(tgt));
}
- rc = md_getattr_name(tgt->ltd_exp, &rid, NULL, NULL,
- 1, valid | OBD_MD_FLCROSSREF,
- ea_size, suppgid, &req);
+ op_data->op_fid1 = rid;
+ op_data->op_valid |= OBD_MD_FLCROSSREF;
+ op_data->op_namelen = 0;
+ op_data->op_name = NULL;
+ rc = md_getattr_name(tgt->ltd_exp, op_data, &req);
ptlrpc_req_finished(*request);
*request = req;
}
CDEBUG(D_INODE, "EARLY_CANCEL slave "DFID" -> mds #%d\n",
PFID(st_fid), tgt->ltd_idx);
rc = md_cancel_unused(tgt->ltd_exp, st_fid, &policy,
- mode, LDLM_FL_ASYNC, NULL);
+ mode, LCF_ASYNC, NULL);
if (rc)
GOTO(out_put_obj, rc);
} else {
CDEBUG(D_INODE, "EARLY_CANCEL on "DFID"\n", PFID(fid));
policy.l_inodebits.bits = bits;
rc = md_cancel_unused(tgt->ltd_exp, fid, &policy,
- mode, LDLM_FL_ASYNC, NULL);
+ mode, LCF_ASYNC, NULL);
} else {
CDEBUG(D_INODE,
"EARLY_CANCEL skip operation target %d on "DFID"\n",
CDEBUG(D_INODE, "Forward to mds #%x ("DFID")\n",
mds, PFID(&op_data->op_fid1));
- op_data->op_fsuid = current->fsuid;
- op_data->op_fsgid = current->fsgid;
+ op_data->op_fsuid = cfs_curproc_fsuid();
+ op_data->op_fsgid = cfs_curproc_fsgid();
op_data->op_cap = cfs_curproc_cap_pack();
tgt = lmv_get_target(lmv, mds);
RETURN(rc);
}
- op_data->op_fsuid = current->fsuid;
- op_data->op_fsgid = current->fsgid;
+ op_data->op_fsuid = cfs_curproc_fsuid();
+ op_data->op_fsgid = cfs_curproc_fsgid();
op_data->op_cap = cfs_curproc_cap_pack();
src_tgt = lmv_get_target(lmv, mds1);
CDEBUG(D_INODE,
""DFID" reset end "LPX64" tgt %d\n",
PFID(&rid),
- le64_to_cpu(dp->ldp_hash_end), tgt_idx);
+ (__u64)le64_to_cpu(dp->ldp_hash_end), tgt_idx);
}
}
cfs_kunmap(page);
op_data->op_bias |= MDS_CHECK_SPLIT;
}
- op_data->op_fsuid = current->fsuid;
- op_data->op_fsgid = current->fsgid;
+ op_data->op_fsuid = cfs_curproc_fsuid();
+ op_data->op_fsgid = cfs_curproc_fsgid();
op_data->op_cap = cfs_curproc_cap_pack();
/*
}
lmv = &obd->u.lmv;
- if (KEY_IS(KEY_READ_ONLY) || KEY_IS(KEY_FLUSH_CTX) ||
- KEY_IS(KEY_INIT_RECOV_BACKUP)) {
+ if (KEY_IS(KEY_READ_ONLY) || KEY_IS(KEY_FLUSH_CTX)) {
int i, err = 0;
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
ldlm_policy_data_t *policy, ldlm_mode_t mode,
- int flags, void *opaque)
+ ldlm_cancel_flags_t flags, void *opaque)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
RETURN(rc);
}
-int lmv_revalidate_lock(struct obd_export *exp,
- struct lookup_intent *it,
- struct lu_fid *fid)
+int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
+ struct lu_fid *fid, __u32 *bits)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
if (IS_ERR(tgt))
RETURN(PTR_ERR(tgt));
- rc = md_revalidate_lock(tgt->ltd_exp, it, fid);
+ rc = md_revalidate_lock(tgt->ltd_exp, it, fid, bits);
RETURN(rc);
}
struct md_ops lmv_md_ops = {
.m_getstatus = lmv_getstatus,
.m_change_cbdata = lmv_change_cbdata,
+ .m_find_cbdata = lmv_find_cbdata,
.m_close = lmv_close,
.m_create = lmv_create,
.m_done_writing = lmv_done_writing,
lprocfs_lmv_init_vars(&lvars);
- request_module("lquota");
+ cfs_request_module("lquota");
quota_interface = PORTAL_SYMBOL_GET(lmv_quota_interface);
init_obd_quota_ops(quota_interface, &lmv_obd_ops);
class_unregister_type(LUSTRE_LMV_NAME);
- LASSERTF(atomic_read(&lmv_object_count) == 0,
+ LASSERTF(cfs_atomic_read(&lmv_object_count) == 0,
"Can't free lmv objects cache, %d object(s) busy\n",
- atomic_read(&lmv_object_count));
+ cfs_atomic_read(&lmv_object_count));
cfs_mem_cache_destroy(lmv_object_cache);
}