return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &cmm_lu_ops);
}
-int cmm_root_get(const struct lu_context *ctx, struct md_device *md,
+int cmm_get_root(const struct lu_context *ctx, struct md_device *md,
struct lu_fid *fid)
{
struct cmm_device *cmm_dev = md2cmm_dev(md);
/* valid only on master MDS */
if (cmm_dev->cmm_local_num == 0)
- return cmm_child_ops(cmm_dev)->mdo_root_get(ctx,
- cmm_dev->cmm_child, fid);
+ return cmm_child_ops(cmm_dev)->mdo_get_root(ctx,
+ cmm_dev->cmm_child, fid);
else
return -EINVAL;
}
}
static struct md_device_operations cmm_md_ops = {
- .mdo_root_get = cmm_root_get,
.mdo_statfs = cmm_statfs,
+ .mdo_get_root = cmm_get_root,
.mdo_get_maxsize = cmm_get_maxsize,
};
const struct lu_object_header *hdr,
struct lu_device *);
-int cmm_root_get(const struct lu_context *ctx, struct md_device *md,
+int cmm_get_root(const struct lu_context *ctx, struct md_device *md,
struct lu_fid *fid);
#ifdef HAVE_SPLIT_SUPPORT
GOTO(cleanup, rc = CMM_NO_SPLIT_EXPECTED);
OBD_ALLOC_PTR(fid);
- rc = cmm_root_get(ctx, &cmm->cmm_md_dev, fid);
+ rc = cmm_get_root(ctx, &cmm->cmm_md_dev, fid);
if (rc)
GOTO(cleanup, rc);
#include "fid_internal.h"
#ifdef __KERNEL__
-/*
- * Sequence space, starts from 0x400 to have first 0x400 sequences used for
- * special purposes. This means that if we have seq-with 10000 fids, we have
- * ~10M fids reserved for special purposes (igifs, etc.).
- */
-const struct lu_range LUSTRE_SEQ_SPACE_RANGE = {
- (0x400),
- ((__u64)~0ULL)
-};
-EXPORT_SYMBOL(LUSTRE_SEQ_SPACE_RANGE);
-
-/* zero range, used for init and other purposes */
-const struct lu_range LUSTRE_SEQ_ZERO_RANGE = {
- 0,
- 0
-};
-EXPORT_SYMBOL(LUSTRE_SEQ_ZERO_RANGE);
-
/* assigns client to sequence controller node */
int seq_server_set_cli(struct lu_server_seq *seq,
struct lu_client_seq *cli,
* fid/fid_internal.h
*
* Copyright (C) 2006 Cluster File Systems, Inc.
+ * Author: Yury Umanets <umka@clusterfs.com>
*
* This file is part of the Lustre file system, http://www.lustre.org
* Lustre is a trademark of Cluster File Systems, Inc.
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* license text for more details.
*/
-#ifndef _FID_INTERNAL_H
-#define _FID_INTERNAL_H
+#ifndef __FID_INTERNAL_H
+#define __FID_INTERNAL_H
#include <lustre/lustre_idl.h>
#include <dt_object.h>
#endif
-#endif
+#endif /* __FID_INTERNAL_H */
*
* Copyright (c) 2006 Cluster File Systems, Inc.
* Author: Nikita Danilov <nikita@clusterfs.com>
+ * Yury Umanets <umka@clusterfs.com>
*
* This file is part of the Lustre file system, http://www.lustre.org
* Lustre is a trademark of Cluster File Systems, Inc.
#include <lu_object.h>
#include <lustre_fid.h>
+/*
+ * Sequence space, starts from 0x400 to have first 0x400 sequences used for
+ * special purposes. This means that if we have seq-with 10000 fids, we have
+ * ~10M fids reserved for special purposes (igifs, etc.).
+ */
+const struct lu_range LUSTRE_SEQ_SPACE_RANGE = {
+ (0x400),
+ ((__u64)~0ULL)
+};
+EXPORT_SYMBOL(LUSTRE_SEQ_SPACE_RANGE);
+
+/* Zero range, used for init and other purposes. */
+const struct lu_range LUSTRE_SEQ_ZERO_RANGE = {
+ 0,
+ 0
+};
+EXPORT_SYMBOL(LUSTRE_SEQ_ZERO_RANGE);
+
+/* Lustre Big Fs Lock fid. */
+const struct lu_fid LUSTRE_BFL_FID = { .f_seq = 0x0000000000000003,
+ .f_oid = 0x0000000000000001,
+ .f_ver = 0x0000000000000000 };
+EXPORT_SYMBOL(LUSTRE_BFL_FID);
+
void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
{
/* check that all fields are converted */
* FLD (Fids Location Database)
*
* Copyright (C) 2006 Cluster File Systems, Inc.
- * Author: WangDi <wangdi@clusterfs.com>
- * Yury Umanets <umka@clusterfs.com>
+ * Author: Yury Umanets <umka@clusterfs.com>
+ * WangDi <wangdi@clusterfs.com>
*
* This file is part of the Lustre file system, http://www.lustre.org
* Lustre is a trademark of Cluster File Systems, Inc.
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* license text for more details.
*/
-#ifndef _FLD_INTERNAL_H
-#define _FLD_INTERNAL_H
+#ifndef __FLD_INTERNAL_H
+#define __FLD_INTERNAL_H
#include <lustre/lustre_idl.h>
#include <dt_object.h>
return tar->ft_exp->exp_client_uuid.uuid;
}
-#endif
+#endif /* __FLD_INTERNAL_H */
/*
* Return fid of root index object.
*/
- int (*dt_root_get)(const struct lu_context *ctx,
+ int (*dt_get_root)(const struct lu_context *ctx,
struct dt_device *dev, struct lu_fid *f);
/*
* Return device configuration data.
*/
- void (*dt_conf_get)(const struct lu_context *ctx,
+ void (*dt_get_conf)(const struct lu_context *ctx,
const struct dt_device *dev,
struct dt_device_param *param);
/*
* Controller Seq Manager
*/
struct lu_server_seq *ls_control_seq;
+ struct obd_export *ls_control_exp;
/*
* Client Seq Manager
*/
struct lu_client_seq *ls_client_seq;
- struct obd_export *ls_client_exp;
/* statistical counters. Protected by nothing, races are accepted. */
struct {
#define MEA_MAGIC_LAST_CHAR 0xb2221ca1
#define MEA_MAGIC_ALL_CHARS 0xb222a11c
#define MEA_MAGIC_HASH_SEGMENT 0xb222a11b
-#define MAX_HASH_SIZE 0x3fffffff
+#define MAX_HASH_SIZE 0x3fffffff
struct lmv_stripe_md {
__u32 mea_magic;
#define MDT_CONNECT_SUPPORTED (OBD_CONNECT_RDONLY | OBD_CONNECT_VERSION | \
OBD_CONNECT_ACL | OBD_CONNECT_XATTR | \
- OBD_CONNECT_IBITS | OBD_CONNECT_JOIN)
+ OBD_CONNECT_IBITS | OBD_CONNECT_JOIN | \
+ OBD_CONNECT_ATTRFID)
#define OBD_OCD_VERSION(major,minor,patch,fix) (((major)<<24) + ((minor)<<16) +\
((patch)<<8) + (fix))
* vim:expandtab:shiftwidth=8:tabstop=8:
*
* Copyright (C) 2006 Cluster File Systems, Inc.
+ * Author: Yury Umanets <umka@clusterfs.com>
*
* This file is part of Lustre, http://www.lustre.org.
*
/* whole sequences space range and zero range definitions */
extern const struct lu_range LUSTRE_SEQ_SPACE_RANGE;
extern const struct lu_range LUSTRE_SEQ_ZERO_RANGE;
+extern const struct lu_fid LUSTRE_BFL_FID;
enum {
/* this is how may FIDs may be allocated in one sequence. 16384 for now */
void range_le_to_cpu(struct lu_range *dst, const struct lu_range *src);
void range_be_to_cpu(struct lu_range *dst, const struct lu_range *src);
-#endif /* __LINUX_OBD_CLASS_H */
+#endif /* __LINUX_FID_H */
struct md_device_operations {
/* meta-data device related handlers. */
- int (*mdo_root_get)(const struct lu_context *ctx,
+ int (*mdo_get_root)(const struct lu_context *ctx,
struct md_device *m, struct lu_fid *f);
- int (*mdo_statfs)(const struct lu_context *ctx,
- struct md_device *m, struct kstatfs *sfs);
int (*mdo_get_maxsize)(const struct lu_context *ctx,
struct md_device *m, int *md_size,
int *cookie_size);
+ int (*mdo_statfs)(const struct lu_context *ctx,
+ struct md_device *m, struct kstatfs *sfs);
};
enum md_upcall_event {
update:
obj->lo_inodes[i].li_size = (MAX_HASH_SIZE/obj->lo_objcount) *
- (i + 1);
+ (i + 1);
CDEBUG(D_OTHER, "fresh: %lu\n",
(unsigned long)obj->lo_inodes[i].li_size);
return rc;
}
+static int lmv_set_mdc_data(struct lmv_obd *lmv, struct obd_uuid *uuid,
+ struct obd_connect_data *data)
+{
+ struct lmv_tgt_desc *tgt;
+ int i;
+ ENTRY;
+
+ LASSERT(data != NULL);
+
+ spin_lock(&lmv->lmv_lock);
+ for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
+ if (tgt->ltd_exp == NULL)
+ continue;
+
+ if (obd_uuid_equals(uuid, &tgt->uuid)) {
+ lmv->datas[tgt->idx] = *data;
+ break;
+ }
+ }
+ spin_unlock(&lmv->lmv_lock);
+ RETURN(0);
+}
+
static int lmv_notify(struct obd_device *obd, struct obd_device *watched,
enum obd_notify_event ev, void *data)
{
+ struct lmv_obd *lmv = &obd->u.lmv;
struct obd_uuid *uuid;
int rc;
ENTRY;
watched->obd_name);
RETURN(-EINVAL);
}
- uuid = &watched->u.cli.cl_target_uuid;
- /* Set MDC as active before notifying the observer, so the observer can
- * use the MDC normally. */
- rc = lmv_set_mdc_active(&obd->u.lmv, uuid,
- ev == OBD_NOTIFY_ACTIVE);
- if (rc) {
- CERROR("%sactivation of %s failed: %d\n",
- ev == OBD_NOTIFY_ACTIVE ? "" : "de",
- uuid->uuid, rc);
- RETURN(rc);
+ uuid = &watched->u.cli.cl_target_uuid;
+ if (ev == OBD_NOTIFY_ACTIVE || ev == OBD_NOTIFY_INACTIVE) {
+ /*
+ * Set MDC as active before notifying the observer, so the
+ * observer can use the MDC normally.
+ */
+ rc = lmv_set_mdc_active(lmv, uuid,
+ ev == OBD_NOTIFY_ACTIVE);
+ if (rc) {
+ CERROR("%sactivation of %s failed: %d\n",
+ ev == OBD_NOTIFY_ACTIVE ? "" : "de",
+ uuid->uuid, rc);
+ RETURN(rc);
+ }
}
+ if (ev == OBD_NOTIFY_OCD) {
+ struct obd_connect_data *conn_data =
+ &watched->u.cli.cl_import->imp_connect_data;
+ /*
+ * Set connect data to desired target, update exp_connect_flags.
+ */
+ rc = lmv_set_mdc_data(lmv, uuid, conn_data);
+ if (rc) {
+ CERROR("can't set connect data to target %s, rc %d\n",
+ uuid->uuid, rc);
+ RETURN(rc);
+ }
+
+ /*
+ * XXX: make sure that ocd_connect_flags from all targets are
+ * the same. Otherwise one of MDTs runs wrong version or
+ * something like this. --umka
+ */
+ obd->obd_self_export->exp_connect_flags =
+ conn_data->ocd_connect_flags;
+ }
+
+ /* Pass the notification up the chain. */
if (obd->obd_observer)
- /* pass the notification up the chain. */
rc = obd_notify(obd->obd_observer, watched, ev, data);
RETURN(rc);
if (obj && i < obj->lo_objcount - 1) {
struct lu_dirpage *dp;
__u32 end;
- /* This dirobj has been splitted, so we
- * check whether reach the end of one hash_segment
- * and reset ldp->ldp_hash_end
- */
+ /*
+ * This dirobj has been split, so we check whether reach the end
+ * of one hash_segment and reset ldp->ldp_hash_end.
+ */
kmap(page);
dp = page_address(page);
end = le32_to_cpu(dp->ldp_hash_end);
if (end == ~0ul) {
__u32 hash_segment_end = (i + 1) *
- MAX_HASH_SIZE/obj->lo_objcount;
+ MAX_HASH_SIZE/obj->lo_objcount;
dp->ldp_hash_end = cpu_to_le32(hash_segment_end);
CDEBUG(D_INFO,"reset hash end %x for split obj "DFID"",
le32_to_cpu(dp->ldp_hash_end), PFID(&rid));
break;
}
case IMP_EVENT_OCD:
+ rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
break;
default:
rc = next->ld_ops->ldo_process_config(ctxt, next, cfg);
if (rc)
GOTO(out, rc);
- dt->dd_ops->dt_conf_get(ctxt, dt, &m->mdd_dt_conf);
+ dt->dd_ops->dt_get_conf(ctxt, dt, &m->mdd_dt_conf);
rc = mdd_mount(ctxt, m);
if (rc)
RETURN(rc);
}
-static int mdd_root_get(const struct lu_context *ctx,
+static int mdd_get_root(const struct lu_context *ctx,
struct md_device *m, struct lu_fid *f)
{
struct mdd_device *mdd = lu2mdd_dev(&m->md_lu_dev);
}
struct md_device_operations mdd_ops = {
- .mdo_root_get = mdd_root_get,
.mdo_statfs = mdd_statfs,
+ .mdo_get_root = mdd_get_root,
.mdo_get_maxsize = mdd_get_maxsize,
};
rc = -ENOMEM;
else {
body = req_capsule_server_get(&info->mti_pill, &RMF_MDT_BODY);
- rc = next->md_ops->mdo_root_get(info->mti_ctxt,
+ rc = next->md_ops->mdo_get_root(info->mti_ctxt,
next, &body->fid1);
if (rc == 0)
body->valid |= OBD_MD_FLID;
return -EOPNOTSUPP;
}
-/*
- * Build (DLM) resource name from fid.
- */
-struct ldlm_res_id *fid_build_res_name(const struct lu_fid *f,
- struct ldlm_res_id *name)
-{
- memset(name, 0, sizeof *name);
- name->name[0] = fid_seq(f);
- name->name[1] = fid_oid(f);
- name->name[2] = fid_ver(f);
- return name;
-}
-
/* issues dlm lock on passed @ns, @f stores it lock handle into @lh. */
int fid_lock(struct ldlm_namespace *ns, const struct lu_fid *f,
struct lustre_handle *lh, ldlm_mode_t mode,
CERROR("target %s connect error %d\n",
mdc->obd_name, rc);
} else {
- ls->ls_client_exp = class_conn2export(&conn);
+ ls->ls_control_exp = class_conn2export(&conn);
OBD_ALLOC_PTR(ls->ls_client_seq);
mdc->obd_name);
rc = seq_client_init(ls->ls_client_seq,
- ls->ls_client_exp,
+ ls->ls_control_exp,
LUSTRE_SEQ_METADATA,
prefix, NULL, NULL);
OBD_FREE(prefix, MAX_OBD_NAME + 5);
seq_server_set_cli(ls->ls_server_seq,
NULL, NULL);
- if (ls && ls->ls_client_exp) {
- rc = obd_disconnect(ls->ls_client_exp);
+ if (ls && ls->ls_control_exp) {
+ rc = obd_disconnect(ls->ls_control_exp);
if (rc) {
CERROR("failure to disconnect "
"obd: %d\n", rc);
}
- ls->ls_client_exp = NULL;
+ ls->ls_control_exp = NULL;
}
EXIT;
}
} \
} while(0)
+/*
+ * Build (DLM) resource name from fid.
+ */
+static inline struct ldlm_res_id *
+fid_build_res_name(const struct lu_fid *f,
+ struct ldlm_res_id *name)
+{
+ memset(name, 0, sizeof *name);
+ name->name[0] = fid_seq(f);
+ name->name[1] = fid_oid(f);
+ name->name[2] = fid_ver(f);
+ return name;
+}
+
#endif /* __KERNEL__ */
#endif /* _MDT_H */
return rc;
}
+static int mdt_rename_lock(struct mdt_thread_info *info,
+ struct lustre_handle *lh)
+{
+ ldlm_policy_data_t policy = { .l_inodebits = { MDS_INODELOCK_UPDATE } };
+ struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
+ int flags = LDLM_FL_ATOMIC_CB;
+ struct ldlm_res_id res_id;
+ struct lu_site *ls;
+ int rc;
+ ENTRY;
+
+ ls = info->mti_mdt->mdt_md_dev.md_lu_dev.ld_site;
+ fid_build_res_name(&LUSTRE_BFL_FID, &res_id);
+
+ if (ls->ls_control_exp == NULL) {
+ /*
+ * Current node is controller, that is mdt0 where we should take
+ * BFL lock.
+ */
+ rc = ldlm_cli_enqueue_local(ns, res_id, LDLM_IBITS, &policy,
+ LCK_EX, &flags, ldlm_blocking_ast,
+ ldlm_completion_ast, NULL, NULL, 0,
+ NULL, lh);
+ } else {
+ /*
+ * This is the case mdt0 is remote node, issue DLM lock like
+ * other clients.
+ */
+ rc = ldlm_cli_enqueue(ls->ls_control_exp, NULL, res_id,
+ LDLM_IBITS, &policy, LCK_EX, &flags,
+ ldlm_blocking_ast, ldlm_completion_ast,
+ NULL, NULL, NULL, 0, NULL, lh, 0);
+ }
+
+ RETURN(rc);
+}
+
+static void mdt_rename_unlock(struct lustre_handle *lh)
+{
+ ENTRY;
+ ldlm_lock_decref(lh, LCK_EX);
+ EXIT;
+}
+
+static int mdt_rename_check(struct mdt_thread_info *info)
+{
+ return 0;
+}
static int mdt_reint_rename(struct mdt_thread_info *info)
{
struct mdt_lock_handle *lh_newp;
struct lu_fid *old_fid = &info->mti_tmp_fid1;
struct lu_fid *new_fid = &info->mti_tmp_fid2;
+ struct lustre_handle rename_lh = { 0 };
int rc;
ENTRY;
RETURN(mdt_reint_rename_tgt(info));
}
+ rc = mdt_rename_lock(info, &rename_lh);
+ if (rc) {
+ CERROR("can't lock FS for rename, rc %d\n", rc);
+ RETURN(rc);
+ }
+
+ rc = mdt_rename_check(info);
+ if (rc)
+ GOTO(out, rc);
+
lh_newp = &info->mti_lh[MDT_LH_NEW];
/* step 1: lock the source dir */
mdt_object_unlock_put(info, mtgtdir, lh_tgtdirp, rc);
out_unlock_source:
mdt_object_unlock_put(info, msrcdir, lh_srcdirp, rc);
+ mdt_rename_unlock(&rename_lh);
out:
return rc;
}
struct dt_object *root;
struct dt_object *child;
- result = dt->dd_ops->dt_root_get(ctx, dt, fid);
+ result = dt->dd_ops->dt_get_root(ctx, dt, fid);
if (result == 0) {
root = dt_locate(ctx, dt, fid);
if (!IS_ERR(root)) {
struct lu_context od_ctx_for_commit;
};
-static int osd_root_get (const struct lu_context *ctxt,
+static int osd_get_root (const struct lu_context *ctxt,
struct dt_device *dev, struct lu_fid *f);
static int osd_statfs (const struct lu_context *ctxt,
struct dt_device *dev, struct kstatfs *sfs);
struct thandle *th);
static int osd_it_key_size (const struct lu_context *ctx,
const struct dt_it *di);
-static void osd_conf_get (const struct lu_context *ctx,
+static void osd_get_conf (const struct lu_context *ctx,
const struct dt_device *dev,
struct dt_device_param *param);
static int osd_read_locked (const struct lu_context *ctx,
set_fs(*seg);
}
-static int osd_root_get(const struct lu_context *ctx,
+static int osd_get_root(const struct lu_context *ctx,
struct dt_device *dev, struct lu_fid *f)
{
struct inode *inode;
RETURN (result);
}
-static void osd_conf_get(const struct lu_context *ctx,
+static void osd_get_conf(const struct lu_context *ctx,
const struct dt_device *dev,
struct dt_device_param *param)
{
static struct dt_device_operations osd_dt_ops = {
- .dt_root_get = osd_root_get,
+ .dt_get_root = osd_get_root,
.dt_statfs = osd_statfs,
.dt_trans_start = osd_trans_start,
.dt_trans_stop = osd_trans_stop,
- .dt_conf_get = osd_conf_get,
+ .dt_get_conf = osd_get_conf,
.dt_sync = osd_sync,
.dt_ro = osd_ro
};