-/* -*- MODE: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * mdd/mdd_handler.c
- * Lustre Metadata Server (mdd) routines
+ * GPL HEADER START
*
- * Copyright (C) 2006 Cluster File Systems, Inc.
- * Author: Wang Di <wangdi@clusterfs.com>
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * This file is part of the Lustre file system, http://www.lustre.org
- * Lustre is a trademark of Cluster File Systems, Inc.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * You may have signed or agreed to another license before downloading
- * this software. If so, you are bound by the terms and conditions
- * of that agreement, and the following does not apply to you. See the
- * LICENSE file included with this distribution for more information.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * If you did not agree to a different license, then this copy of Lustre
- * is open source software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
- * In either case, Lustre is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * license text for more details.
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
*/
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/mdd/mdd_object.c
+ *
+ * Lustre Metadata Server (mdd) routines
+ *
+ * Author: Wang Di <wangdi@clusterfs.com>
+ */
+
#ifndef EXPORT_SYMTAB
# define EXPORT_SYMTAB
#endif
#define DEBUG_SUBSYSTEM S_MDS
#include <linux/module.h>
+#ifdef HAVE_EXT4_LDISKFS
+#include <ldiskfs/ldiskfs_jbd2.h>
+#else
#include <linux/jbd.h>
+#endif
#include <obd.h>
#include <obd_class.h>
#include <obd_support.h>
/* fid_be_cpu(), fid_cpu_to_be(). */
#include <lustre_fid.h>
+#include <lustre_param.h>
+#ifdef HAVE_EXT4_LDISKFS
+#include <ldiskfs/ldiskfs.h>
+#else
#include <linux/ldiskfs_fs.h>
+#endif
#include <lustre_mds.h>
#include <lustre/lustre_idl.h>
#include "mdd_internal.h"
-static struct lu_object_operations mdd_lu_obj_ops;
+static const struct lu_object_operations mdd_lu_obj_ops;
+
+static int mdd_xattr_get(const struct lu_env *env,
+ struct md_object *obj, struct lu_buf *buf,
+ const char *name);
+
+int mdd_data_get(const struct lu_env *env, struct mdd_object *obj,
+ void **data)
+{
+ LASSERTF(mdd_object_exists(obj), "FID is "DFID"\n",
+ PFID(mdd_object_fid(obj)));
+ mdo_data_get(env, obj, data);
+ return 0;
+}
int mdd_la_get(const struct lu_env *env, struct mdd_object *obj,
struct lu_attr *la, struct lustre_capa *capa)
obj->mod_flags |= IMMUTE_OBJ;
}
+struct mdd_thread_info *mdd_env_info(const struct lu_env *env)
+{
+ struct mdd_thread_info *info;
+
+ info = lu_context_key_get(&env->le_ctx, &mdd_thread_key);
+ LASSERT(info != NULL);
+ return info;
+}
+
struct lu_buf *mdd_buf_get(const struct lu_env *env, void *area, ssize_t len)
{
struct lu_buf *buf;
return buf;
}
+void mdd_buf_put(struct lu_buf *buf)
+{
+ if (buf == NULL || buf->lb_buf == NULL)
+ return;
+ if (buf->lb_vmalloc)
+ OBD_VFREE(buf->lb_buf, buf->lb_len);
+ else
+ OBD_FREE(buf->lb_buf, buf->lb_len);
+ buf->lb_buf = NULL;
+ buf->lb_len = 0;
+}
+
+const struct lu_buf *mdd_buf_get_const(const struct lu_env *env,
+ const void *area, ssize_t len)
+{
+ struct lu_buf *buf;
+
+ buf = &mdd_env_info(env)->mti_buf;
+ buf->lb_buf = (void *)area;
+ buf->lb_len = len;
+ return buf;
+}
+
+#define BUF_VMALLOC_SIZE (CFS_PAGE_SIZE<<2) /* 16k */
+struct lu_buf *mdd_buf_alloc(const struct lu_env *env, ssize_t len)
+{
+ struct lu_buf *buf = &mdd_env_info(env)->mti_big_buf;
+
+ if ((len > buf->lb_len) && (buf->lb_buf != NULL)) {
+ if (buf->lb_vmalloc)
+ OBD_VFREE(buf->lb_buf, buf->lb_len);
+ else
+ OBD_FREE(buf->lb_buf, buf->lb_len);
+ buf->lb_buf = NULL;
+ }
+ if (buf->lb_buf == NULL) {
+ buf->lb_len = len;
+ if (buf->lb_len <= BUF_VMALLOC_SIZE) {
+ OBD_ALLOC(buf->lb_buf, buf->lb_len);
+ buf->lb_vmalloc = 0;
+ }
+ if (buf->lb_buf == NULL) {
+ OBD_VMALLOC(buf->lb_buf, buf->lb_len);
+ buf->lb_vmalloc = 1;
+ }
+ if (buf->lb_buf == NULL)
+ buf->lb_len = 0;
+ }
+ return buf;
+}
+
+/** Increase the size of the \a mti_big_buf.
+ * preserves old data in buffer
+ * old buffer remains unchanged on error
+ * \retval 0 or -ENOMEM
+ */
+int mdd_buf_grow(const struct lu_env *env, ssize_t len)
+{
+ struct lu_buf *oldbuf = &mdd_env_info(env)->mti_big_buf;
+ struct lu_buf buf;
+
+ LASSERT(len >= oldbuf->lb_len);
+ if (len > BUF_VMALLOC_SIZE) {
+ OBD_VMALLOC(buf.lb_buf, len);
+ buf.lb_vmalloc = 1;
+ } else {
+ OBD_ALLOC(buf.lb_buf, len);
+ buf.lb_vmalloc = 0;
+ }
+ if (buf.lb_buf == NULL)
+ return -ENOMEM;
+
+ buf.lb_len = len;
+ memcpy(buf.lb_buf, oldbuf->lb_buf, oldbuf->lb_len);
+
+ if (oldbuf->lb_vmalloc)
+ OBD_VFREE(oldbuf->lb_buf, oldbuf->lb_len);
+ else
+ OBD_FREE(oldbuf->lb_buf, oldbuf->lb_len);
+
+ memcpy(oldbuf, &buf, sizeof(buf));
+
+ return 0;
+}
+
struct llog_cookie *mdd_max_cookie_get(const struct lu_env *env,
struct mdd_device *mdd)
{
}
if (unlikely(mti->mti_max_cookie == NULL)) {
OBD_ALLOC(mti->mti_max_cookie, max_cookie_size);
- if (unlikely(mti->mti_max_cookie != NULL))
+ if (likely(mti->mti_max_cookie != NULL))
mti->mti_max_cookie_size = max_cookie_size;
}
+ if (likely(mti->mti_max_cookie != NULL))
+ memset(mti->mti_max_cookie, 0, mti->mti_max_cookie_size);
return mti->mti_max_cookie;
}
}
if (unlikely(mti->mti_max_lmm == NULL)) {
OBD_ALLOC(mti->mti_max_lmm, max_lmm_size);
- if (unlikely(mti->mti_max_lmm != NULL))
+ if (likely(mti->mti_max_lmm != NULL))
mti->mti_max_lmm_size = max_lmm_size;
}
return mti->mti_max_lmm;
}
-const struct lu_buf *mdd_buf_get_const(const struct lu_env *env,
- const void *area, ssize_t len)
-{
- struct lu_buf *buf;
-
- buf = &mdd_env_info(env)->mti_buf;
- buf->lb_buf = (void *)area;
- buf->lb_len = len;
- return buf;
-}
-
-struct mdd_thread_info *mdd_env_info(const struct lu_env *env)
-{
- struct mdd_thread_info *info;
-
- info = lu_context_key_get(&env->le_ctx, &mdd_thread_key);
- LASSERT(info != NULL);
- return info;
-}
-
struct lu_object *mdd_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *d)
}
}
-static int mdd_object_init(const struct lu_env *env, struct lu_object *o)
+static int mdd_object_init(const struct lu_env *env, struct lu_object *o,
+ const struct lu_object_conf *unused)
{
- struct mdd_device *d = lu2mdd_dev(o->lo_dev);
- struct lu_object *below;
+ struct mdd_device *d = lu2mdd_dev(o->lo_dev);
+ struct mdd_object *mdd_obj = lu2mdd_obj(o);
+ struct lu_object *below;
struct lu_device *under;
ENTRY;
- under = &d->mdd_child->dd_lu_dev;
- below = under->ld_ops->ldo_object_alloc(env, o->lo_header, under);
- mdd_pdlock_init(lu2mdd_obj(o));
+ mdd_obj->mod_cltime = 0;
+ under = &d->mdd_child->dd_lu_dev;
+ below = under->ld_ops->ldo_object_alloc(env, o->lo_header, under);
+ mdd_pdlock_init(mdd_obj);
if (below == NULL)
- RETURN(-ENOMEM);
+ RETURN(-ENOMEM);
lu_object_add(o, below);
+
RETURN(0);
}
static void mdd_object_free(const struct lu_env *env, struct lu_object *o)
{
struct mdd_object *mdd = lu2mdd_obj(o);
-
+
lu_object_fini(o);
OBD_FREE_PTR(mdd);
}
static int mdd_object_print(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *o)
{
- return (*p)(env, cookie, LUSTRE_MDD_NAME"-object@%p", o);
+ struct mdd_object *mdd = lu2mdd_obj((struct lu_object *)o);
+ return (*p)(env, cookie, LUSTRE_MDD_NAME"-object@%p(open_count=%d, "
+ "valid=%x, cltime="LPU64", flags=%lx)",
+ mdd, mdd->mod_count, mdd->mod_valid,
+ mdd->mod_cltime, mdd->mod_flags);
+}
+
+static const struct lu_object_operations mdd_lu_obj_ops = {
+ .loo_object_init = mdd_object_init,
+ .loo_object_start = mdd_object_start,
+ .loo_object_free = mdd_object_free,
+ .loo_object_print = mdd_object_print,
+};
+
+struct mdd_object *mdd_object_find(const struct lu_env *env,
+ struct mdd_device *d,
+ const struct lu_fid *f)
+{
+ return md2mdd_obj(md_object_find_slice(env, &d->mdd_md_dev, f));
}
-/* orphan handling is here */
-static void mdd_object_delete(const struct lu_env *env,
- struct lu_object *o)
+static int mdd_path2fid(const struct lu_env *env, struct mdd_device *mdd,
+ const char *path, struct lu_fid *fid)
{
- struct mdd_object *mdd_obj = lu2mdd_obj(o);
- struct thandle *handle = NULL;
+ struct lu_buf *buf;
+ struct lu_fid *f = &mdd_env_info(env)->mti_fid;
+ struct mdd_object *obj;
+ struct lu_name *lname = &mdd_env_info(env)->mti_name;
+ char *name;
+ int rc = 0;
ENTRY;
- if (lu2mdd_dev(o->lo_dev)->mdd_orphans == NULL)
- return;
+ /* temp buffer for path element */
+ buf = mdd_buf_alloc(env, PATH_MAX);
+ if (buf->lb_buf == NULL)
+ RETURN(-ENOMEM);
+
+ lname->ln_name = name = buf->lb_buf;
+ lname->ln_namelen = 0;
+ *f = mdd->mdd_root_fid;
- if (mdd_obj->mod_flags & ORPHAN_OBJ) {
- mdd_txn_param_build(env, lu2mdd_dev(o->lo_dev),
- MDD_TXN_INDEX_DELETE_OP);
- handle = mdd_trans_start(env, lu2mdd_dev(o->lo_dev));
- if (IS_ERR(handle))
- CERROR("Cannot get thandle\n");
- else {
- mdd_write_lock(env, mdd_obj);
- /* let's remove obj from the orphan list */
- __mdd_orphan_del(env, mdd_obj, handle);
- mdd_write_unlock(env, mdd_obj);
- mdd_trans_stop(env, lu2mdd_dev(o->lo_dev),
- 0, handle);
+ while(1) {
+ while (*path == '/')
+ path++;
+ if (*path == '\0')
+ break;
+ while (*path != '/' && *path != '\0') {
+ *name = *path;
+ path++;
+ name++;
+ lname->ln_namelen++;
}
+
+ *name = '\0';
+ /* find obj corresponding to fid */
+ obj = mdd_object_find(env, mdd, f);
+ if (obj == NULL)
+ GOTO(out, rc = -EREMOTE);
+ if (IS_ERR(obj))
+ GOTO(out, rc = -PTR_ERR(obj));
+ /* get child fid from parent and name */
+ rc = mdd_lookup(env, &obj->mod_obj, lname, f, NULL);
+ mdd_object_put(env, obj);
+ if (rc)
+ break;
+
+ name = buf->lb_buf;
+ lname->ln_namelen = 0;
}
+
+ if (!rc)
+ *fid = *f;
+out:
+ RETURN(rc);
}
-static struct lu_object_operations mdd_lu_obj_ops = {
- .loo_object_init = mdd_object_init,
- .loo_object_start = mdd_object_start,
- .loo_object_free = mdd_object_free,
- .loo_object_print = mdd_object_print,
- .loo_object_delete = mdd_object_delete
+/** The maximum depth that fid2path() will search.
+ * This is limited only because we want to store the fids for
+ * historical path lookup purposes.
+ */
+#define MAX_PATH_DEPTH 100
+
+/** mdd_path() lookup structure. */
+struct path_lookup_info {
+ __u64 pli_recno; /**< history point */
+ __u64 pli_currec; /**< current record */
+ struct lu_fid pli_fid;
+ struct lu_fid pli_fids[MAX_PATH_DEPTH]; /**< path, in fids */
+ struct mdd_object *pli_mdd_obj;
+ char *pli_path; /**< full path */
+ int pli_pathlen;
+ int pli_linkno; /**< which hardlink to follow */
+ int pli_fidcount; /**< number of \a pli_fids */
};
-struct mdd_object *mdd_object_find(const struct lu_env *env,
- struct mdd_device *d,
- const struct lu_fid *f)
+static int mdd_path_current(const struct lu_env *env,
+ struct path_lookup_info *pli)
+{
+ struct mdd_device *mdd = mdo2mdd(&pli->pli_mdd_obj->mod_obj);
+ struct mdd_object *mdd_obj;
+ struct lu_buf *buf = NULL;
+ struct link_ea_header *leh;
+ struct link_ea_entry *lee;
+ struct lu_name *tmpname = &mdd_env_info(env)->mti_name;
+ struct lu_fid *tmpfid = &mdd_env_info(env)->mti_fid;
+ char *ptr;
+ int reclen;
+ int rc;
+ ENTRY;
+
+ ptr = pli->pli_path + pli->pli_pathlen - 1;
+ *ptr = 0;
+ --ptr;
+ pli->pli_fidcount = 0;
+ pli->pli_fids[0] = *(struct lu_fid *)mdd_object_fid(pli->pli_mdd_obj);
+
+ while (!mdd_is_root(mdd, &pli->pli_fids[pli->pli_fidcount])) {
+ mdd_obj = mdd_object_find(env, mdd,
+ &pli->pli_fids[pli->pli_fidcount]);
+ if (mdd_obj == NULL)
+ GOTO(out, rc = -EREMOTE);
+ if (IS_ERR(mdd_obj))
+ GOTO(out, rc = -PTR_ERR(mdd_obj));
+ rc = lu_object_exists(&mdd_obj->mod_obj.mo_lu);
+ if (rc <= 0) {
+ mdd_object_put(env, mdd_obj);
+ if (rc == -1)
+ rc = -EREMOTE;
+ else if (rc == 0)
+ /* Do I need to error out here? */
+ rc = -ENOENT;
+ GOTO(out, rc);
+ }
+
+ /* Get parent fid and object name */
+ mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
+ buf = mdd_links_get(env, mdd_obj);
+ mdd_read_unlock(env, mdd_obj);
+ mdd_object_put(env, mdd_obj);
+ if (IS_ERR(buf))
+ GOTO(out, rc = PTR_ERR(buf));
+
+ leh = buf->lb_buf;
+ lee = (struct link_ea_entry *)(leh + 1); /* link #0 */
+ mdd_lee_unpack(lee, &reclen, tmpname, tmpfid);
+
+ /* If set, use link #linkno for path lookup, otherwise use
+ link #0. Only do this for the final path element. */
+ if ((pli->pli_fidcount == 0) &&
+ (pli->pli_linkno < leh->leh_reccount)) {
+ int count;
+ for (count = 0; count < pli->pli_linkno; count++) {
+ lee = (struct link_ea_entry *)
+ ((char *)lee + reclen);
+ mdd_lee_unpack(lee, &reclen, tmpname, tmpfid);
+ }
+ if (pli->pli_linkno < leh->leh_reccount - 1)
+ /* indicate to user there are more links */
+ pli->pli_linkno++;
+ }
+
+ /* Pack the name in the end of the buffer */
+ ptr -= tmpname->ln_namelen;
+ if (ptr - 1 <= pli->pli_path)
+ GOTO(out, rc = -EOVERFLOW);
+ strncpy(ptr, tmpname->ln_name, tmpname->ln_namelen);
+ *(--ptr) = '/';
+
+ /* Store the parent fid for historic lookup */
+ if (++pli->pli_fidcount >= MAX_PATH_DEPTH)
+ GOTO(out, rc = -EOVERFLOW);
+ pli->pli_fids[pli->pli_fidcount] = *tmpfid;
+ }
+
+ /* Verify that our path hasn't changed since we started the lookup.
+ Record the current index, and verify the path resolves to the
+ same fid. If it does, then the path is correct as of this index. */
+ cfs_spin_lock(&mdd->mdd_cl.mc_lock);
+ pli->pli_currec = mdd->mdd_cl.mc_index;
+ cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
+ rc = mdd_path2fid(env, mdd, ptr, &pli->pli_fid);
+ if (rc) {
+ CDEBUG(D_INFO, "mdd_path2fid(%s) failed %d\n", ptr, rc);
+ GOTO (out, rc = -EAGAIN);
+ }
+ if (!lu_fid_eq(&pli->pli_fids[0], &pli->pli_fid)) {
+ CDEBUG(D_INFO, "mdd_path2fid(%s) found another FID o="DFID
+ " n="DFID"\n", ptr, PFID(&pli->pli_fids[0]),
+ PFID(&pli->pli_fid));
+ GOTO(out, rc = -EAGAIN);
+ }
+ ptr++; /* skip leading / */
+ memmove(pli->pli_path, ptr, pli->pli_path + pli->pli_pathlen - ptr);
+
+ EXIT;
+out:
+ if (buf && !IS_ERR(buf) && buf->lb_vmalloc)
+ /* if we vmalloced a large buffer drop it */
+ mdd_buf_put(buf);
+
+ return rc;
+}
+
+static int mdd_path_historic(const struct lu_env *env,
+ struct path_lookup_info *pli)
{
- struct lu_object *o, *lo;
- struct mdd_object *m;
+ return 0;
+}
+
+/* Returns the full path to this fid, as of changelog record recno. */
+static int mdd_path(const struct lu_env *env, struct md_object *obj,
+ char *path, int pathlen, __u64 *recno, int *linkno)
+{
+ struct path_lookup_info *pli;
+ int tries = 3;
+ int rc = -EAGAIN;
ENTRY;
- o = lu_object_find(env, mdd2lu_dev(d)->ld_site, f);
- if (IS_ERR(o))
- m = (struct mdd_object *)o;
- else {
- lo = lu_object_locate(o->lo_header, mdd2lu_dev(d)->ld_type);
- /* remote object can't be located and should be put then */
- if (lo == NULL)
- lu_object_put(env, o);
- m = lu2mdd_obj(lo);
+ if (pathlen < 3)
+ RETURN(-EOVERFLOW);
+
+ if (mdd_is_root(mdo2mdd(obj), mdd_object_fid(md2mdd_obj(obj)))) {
+ path[0] = '\0';
+ RETURN(0);
+ }
+
+ OBD_ALLOC_PTR(pli);
+ if (pli == NULL)
+ RETURN(-ENOMEM);
+
+ pli->pli_mdd_obj = md2mdd_obj(obj);
+ pli->pli_recno = *recno;
+ pli->pli_path = path;
+ pli->pli_pathlen = pathlen;
+ pli->pli_linkno = *linkno;
+
+ /* Retry multiple times in case file is being moved */
+ while (tries-- && rc == -EAGAIN)
+ rc = mdd_path_current(env, pli);
+
+ /* For historical path lookup, the current links may not have existed
+ * at "recno" time. We must switch over to earlier links/parents
+ * by using the changelog records. If the earlier parent doesn't
+ * exist, we must search back through the changelog to reconstruct
+ * its parents, then check if it exists, etc.
+ * We may ignore this problem for the initial implementation and
+ * state that an "original" hardlink must still exist for us to find
+ * historic path name. */
+ if (pli->pli_recno != -1) {
+ rc = mdd_path_historic(env, pli);
+ } else {
+ *recno = pli->pli_currec;
+ /* Return next link index to caller */
+ *linkno = pli->pli_linkno;
}
- RETURN(m);
+
+ OBD_FREE_PTR(pli);
+
+ RETURN (rc);
}
int mdd_get_flags(const struct lu_env *env, struct mdd_object *obj)
ENTRY;
rc = mdd_la_get(env, obj, la, BYPASS_CAPA);
- if (rc == 0)
+ if (rc == 0) {
mdd_flags_xlate(obj, la->la_flags);
+ if (S_ISDIR(la->la_mode) && la->la_nlink == 1)
+ obj->mod_flags |= MNLINK_OBJ;
+ }
RETURN(rc);
}
RETURN(rc);
}
-static int mdd_get_default_md(struct mdd_object *mdd_obj,
- struct lov_mds_md *lmm, int *size)
+int mdd_get_default_md(struct mdd_object *mdd_obj, struct lov_mds_md *lmm)
{
struct lov_desc *ldesc;
struct mdd_device *mdd = mdo2mdd(&mdd_obj->mod_obj);
+ struct lov_user_md *lum = (struct lov_user_md*)lmm;
ENTRY;
+ if (!lum)
+ RETURN(0);
+
ldesc = &mdd->mdd_obd_dev->u.mds.mds_lov_desc;
LASSERT(ldesc != NULL);
- if (!lmm)
- RETURN(0);
-
- lmm->lmm_magic = LOV_MAGIC_V1;
- lmm->lmm_object_gr = LOV_OBJECT_GROUP_DEFAULT;
- lmm->lmm_pattern = ldesc->ld_pattern;
- lmm->lmm_stripe_size = ldesc->ld_default_stripe_size;
- lmm->lmm_stripe_count = ldesc->ld_default_stripe_count;
- *size = sizeof(struct lov_mds_md);
+ lum->lmm_magic = LOV_MAGIC_V1;
+ lum->lmm_object_seq = LOV_OBJECT_GROUP_DEFAULT;
+ lum->lmm_pattern = ldesc->ld_pattern;
+ lum->lmm_stripe_size = ldesc->ld_default_stripe_size;
+ lum->lmm_stripe_count = ldesc->ld_default_stripe_count;
+ lum->lmm_stripe_offset = ldesc->ld_default_stripe_offset;
- RETURN(sizeof(struct lov_mds_md));
+ RETURN(sizeof(*lum));
}
/* get lov EA only */
RETURN(0);
rc = mdd_get_md(env, mdd_obj, ma->ma_lmm, &ma->ma_lmm_size,
- MDS_LOV_MD_NAME);
-
- if (rc == 0 && (ma->ma_need & MA_LOV_DEF)) {
- rc = mdd_get_default_md(mdd_obj, ma->ma_lmm,
- &ma->ma_lmm_size);
- }
-
+ XATTR_NAME_LOV);
+ if (rc == 0 && (ma->ma_need & MA_LOV_DEF))
+ rc = mdd_get_default_md(mdd_obj, ma->ma_lmm);
if (rc > 0) {
+ ma->ma_lmm_size = rc;
ma->ma_valid |= MA_LOV;
rc = 0;
}
int rc;
ENTRY;
- mdd_read_lock(env, mdd_obj);
+ mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
rc = __mdd_lmm_get(env, mdd_obj, ma);
mdd_read_unlock(env, mdd_obj);
RETURN(rc);
RETURN(0);
rc = mdd_get_md(env, mdd_obj, ma->ma_lmv, &ma->ma_lmv_size,
- MDS_LMV_MD_NAME);
+ XATTR_NAME_LMV);
if (rc > 0) {
ma->ma_valid |= MA_LMV;
rc = 0;
RETURN(rc);
}
-static int mdd_attr_get_internal(const struct lu_env *env,
- struct mdd_object *mdd_obj,
+static int __mdd_lma_get(const struct lu_env *env, struct mdd_object *mdd_obj,
+ struct md_attr *ma)
+{
+ struct mdd_thread_info *info = mdd_env_info(env);
+ struct lustre_mdt_attrs *lma =
+ (struct lustre_mdt_attrs *)info->mti_xattr_buf;
+ int lma_size;
+ int rc;
+ ENTRY;
+
+ /* If all needed data are already valid, nothing to do */
+ if ((ma->ma_valid & (MA_HSM | MA_SOM)) ==
+ (ma->ma_need & (MA_HSM | MA_SOM)))
+ RETURN(0);
+
+ /* Read LMA from disk EA */
+ lma_size = sizeof(info->mti_xattr_buf);
+ rc = mdd_get_md(env, mdd_obj, lma, &lma_size, XATTR_NAME_LMA);
+ if (rc <= 0)
+ RETURN(rc);
+
+ /* Useless to check LMA incompatibility because this is already done in
+ * osd_ea_fid_get(), and this will fail long before this code is
+ * called.
+ * So, if we are here, LMA is compatible.
+ */
+
+ lustre_lma_swab(lma);
+
+ /* Swab and copy LMA */
+ if (ma->ma_need & MA_HSM) {
+ if (lma->lma_compat & LMAC_HSM)
+ ma->ma_hsm.mh_flags = lma->lma_flags & HSM_FLAGS_MASK;
+ else
+ ma->ma_hsm.mh_flags = 0;
+ ma->ma_valid |= MA_HSM;
+ }
+
+ /* Copy SOM */
+ if (ma->ma_need & MA_SOM && lma->lma_compat & LMAC_SOM) {
+ LASSERT(ma->ma_som != NULL);
+ ma->ma_som->msd_ioepoch = lma->lma_ioepoch;
+ ma->ma_som->msd_size = lma->lma_som_size;
+ ma->ma_som->msd_blocks = lma->lma_som_blocks;
+ ma->ma_som->msd_mountid = lma->lma_som_mountid;
+ ma->ma_valid |= MA_SOM;
+ }
+
+ RETURN(0);
+}
+
+int mdd_attr_get_internal(const struct lu_env *env, struct mdd_object *mdd_obj,
struct md_attr *ma)
{
int rc = 0;
if (S_ISDIR(mdd_object_type(mdd_obj)))
rc = __mdd_lmv_get(env, mdd_obj, ma);
}
+ if (rc == 0 && ma->ma_need & (MA_HSM | MA_SOM)) {
+ if (S_ISREG(mdd_object_type(mdd_obj)))
+ rc = __mdd_lma_get(env, mdd_obj, ma);
+ }
#ifdef CONFIG_FS_POSIX_ACL
if (rc == 0 && ma->ma_need & MA_ACL_DEF) {
if (S_ISDIR(mdd_object_type(mdd_obj)))
- rc = mdd_acl_def_get(env, mdd_obj, ma);
+ rc = mdd_def_acl_get(env, mdd_obj, ma);
}
#endif
- CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = "LPX64"\n",
- rc, ma->ma_valid);
+ CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = "LPX64" ma_lmm=%p\n",
+ rc, ma->ma_valid, ma->ma_lmm);
RETURN(rc);
}
struct mdd_object *mdd_obj, struct md_attr *ma)
{
int rc;
- int needlock = ma->ma_need & (MA_LOV | MA_LMV | MA_ACL_DEF);
+ int needlock = ma->ma_need &
+ (MA_LOV | MA_LMV | MA_ACL_DEF | MA_HSM | MA_SOM);
if (needlock)
- mdd_read_lock(env, mdd_obj);
+ mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
rc = mdd_attr_get_internal(env, mdd_obj, ma);
if (needlock)
mdd_read_unlock(env, mdd_obj);
LASSERT(mdd_object_exists(mdd_obj));
- mdd_read_lock(env, mdd_obj);
+ mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
rc = mdo_xattr_get(env, mdd_obj, buf, name,
mdd_object_capa(env, mdd_obj));
mdd_read_unlock(env, mdd_obj);
LASSERT(mdd_object_exists(mdd_obj));
next = mdd_object_child(mdd_obj);
- mdd_read_lock(env, mdd_obj);
+ mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
rc = next->do_body_ops->dbo_read(env, next, buf, &pos,
mdd_object_capa(env, mdd_obj));
mdd_read_unlock(env, mdd_obj);
ENTRY;
- mdd_read_lock(env, mdd_obj);
+ mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
rc = mdo_xattr_list(env, mdd_obj, buf, mdd_object_capa(env, mdd_obj));
mdd_read_unlock(env, mdd_obj);
int mdd_object_create_internal(const struct lu_env *env, struct mdd_object *p,
struct mdd_object *c, struct md_attr *ma,
- struct thandle *handle)
+ struct thandle *handle,
+ const struct md_op_spec *spec)
{
struct lu_attr *attr = &ma->ma_attr;
struct dt_allocation_hint *hint = &mdd_env_info(env)->mti_hint;
+ struct dt_object_format *dof = &mdd_env_info(env)->mti_dof;
+ const struct dt_index_features *feat = spec->sp_feat;
int rc;
ENTRY;
struct dt_object *next = mdd_object_child(c);
LASSERT(next);
+ if (feat != &dt_directory_features && feat != NULL)
+ dof->dof_type = DFT_INDEX;
+ else
+ dof->dof_type = dt_mode_to_dft(attr->la_mode);
+
+ dof->u.dof_idx.di_feat = feat;
+
/* @hint will be initialized by underlying device. */
next->do_ops->do_ah_init(env, hint,
p ? mdd_object_child(p) : NULL,
attr->la_mode & S_IFMT);
- rc = mdo_create_obj(env, c, attr, hint, handle);
+
+ rc = mdo_create_obj(env, c, attr, hint, dof, handle);
LASSERT(ergo(rc == 0, mdd_object_exists(c)));
} else
rc = -EEXIST;
RETURN(rc);
}
+/**
+ * Make sure the ctime is increased only.
+ */
+static inline int mdd_attr_check(const struct lu_env *env,
+ struct mdd_object *obj,
+ struct lu_attr *attr)
+{
+ struct lu_attr *tmp_la = &mdd_env_info(env)->mti_la;
+ int rc;
+ ENTRY;
+
+ if (attr->la_valid & LA_CTIME) {
+ rc = mdd_la_get(env, obj, tmp_la, BYPASS_CAPA);
+ if (rc)
+ RETURN(rc);
+
+ if (attr->la_ctime < tmp_la->la_ctime)
+ attr->la_valid &= ~(LA_MTIME | LA_CTIME);
+ else if (attr->la_valid == LA_CTIME &&
+ attr->la_ctime == tmp_la->la_ctime)
+ attr->la_valid &= ~LA_CTIME;
+ }
+ RETURN(0);
+}
-int mdd_attr_set_internal(const struct lu_env *env, struct mdd_object *obj,
- const struct lu_attr *attr, struct thandle *handle,
- const int needacl)
+int mdd_attr_set_internal(const struct lu_env *env,
+ struct mdd_object *obj,
+ struct lu_attr *attr,
+ struct thandle *handle,
+ int needacl)
{
int rc;
ENTRY;
RETURN(rc);
}
-int mdd_attr_set_internal_locked(const struct lu_env *env,
- struct mdd_object *o,
- const struct lu_attr *attr,
- struct thandle *handle, int needacl)
+int mdd_attr_check_set_internal(const struct lu_env *env,
+ struct mdd_object *obj,
+ struct lu_attr *attr,
+ struct thandle *handle,
+ int needacl)
{
int rc;
ENTRY;
- needacl = needacl && (attr->la_valid & LA_MODE);
+ rc = mdd_attr_check(env, obj, attr);
+ if (rc)
+ RETURN(rc);
+
+ if (attr->la_valid)
+ rc = mdd_attr_set_internal(env, obj, attr, handle, needacl);
+ RETURN(rc);
+}
+static int mdd_attr_set_internal_locked(const struct lu_env *env,
+ struct mdd_object *obj,
+ struct lu_attr *attr,
+ struct thandle *handle,
+ int needacl)
+{
+ int rc;
+ ENTRY;
+
+ needacl = needacl && (attr->la_valid & LA_MODE);
+ if (needacl)
+ mdd_write_lock(env, obj, MOR_TGT_CHILD);
+ rc = mdd_attr_set_internal(env, obj, attr, handle, needacl);
if (needacl)
- mdd_write_lock(env, o);
+ mdd_write_unlock(env, obj);
+ RETURN(rc);
+}
- rc = mdd_attr_set_internal(env, o, attr, handle, needacl);
+int mdd_attr_check_set_internal_locked(const struct lu_env *env,
+ struct mdd_object *obj,
+ struct lu_attr *attr,
+ struct thandle *handle,
+ int needacl)
+{
+ int rc;
+ ENTRY;
+ needacl = needacl && (attr->la_valid & LA_MODE);
+ if (needacl)
+ mdd_write_lock(env, obj, MOR_TGT_CHILD);
+ rc = mdd_attr_check_set_internal(env, obj, attr, handle, needacl);
if (needacl)
- mdd_write_unlock(env, o);
+ mdd_write_unlock(env, obj);
RETURN(rc);
}
-static int __mdd_xattr_set(const struct lu_env *env, struct mdd_object *obj,
- const struct lu_buf *buf, const char *name,
- int fl, struct thandle *handle)
+int __mdd_xattr_set(const struct lu_env *env, struct mdd_object *obj,
+ const struct lu_buf *buf, const char *name,
+ int fl, struct thandle *handle)
{
struct lustre_capa *capa = mdd_object_capa(env, obj);
int rc = -EINVAL;
struct lu_attr *la, const struct md_attr *ma)
{
struct lu_attr *tmp_la = &mdd_env_info(env)->mti_la;
- struct md_ucred *uc = md_ucred(env);
+ struct md_ucred *uc;
int rc;
ENTRY;
if (la->la_valid & (LA_NLINK | LA_RDEV | LA_BLKSIZE))
RETURN(-EPERM);
- /* This is only for set ctime when rename's source is on remote MDS. */
- if (unlikely(la->la_valid == LA_CTIME)) {
- rc = mdd_may_delete(env, NULL, obj, (struct md_attr *)ma, 1, 0);
- RETURN(rc);
- }
+ /* export destroy does not have ->le_ses, but we may want
+ * to drop LUSTRE_SOM_FL. */
+ if (!env->le_ses)
+ RETURN(0);
+
+ uc = md_ucred(env);
rc = mdd_la_get(env, obj, tmp_la, BYPASS_CAPA);
if (rc)
RETURN(rc);
+ if (la->la_valid == LA_CTIME) {
+ if (!(ma->ma_attr_flags & MDS_PERM_BYPASS))
+ /* This is only for set ctime when rename's source is
+ * on remote MDS. */
+ rc = mdd_may_delete(env, NULL, obj,
+ (struct md_attr *)ma, 1, 0);
+ if (rc == 0 && la->la_ctime <= tmp_la->la_ctime)
+ la->la_valid &= ~LA_CTIME;
+ RETURN(rc);
+ }
+
if (la->la_valid == LA_ATIME) {
/* This is atime only set for read atime update on close. */
- if (la->la_atime <= tmp_la->la_atime + 0/*XXX:mds_atime_diff*/)
+ if (la->la_atime > tmp_la->la_atime &&
+ la->la_atime <= (tmp_la->la_atime +
+ mdd_obj2mdd_dev(obj)->mdd_atime_diff))
la->la_valid &= ~LA_ATIME;
RETURN(0);
}
-
+
/* Check if flags change. */
if (la->la_valid & LA_FLAGS) {
unsigned int oldflags = 0;
(LUSTRE_IMMUTABLE_FL | LUSTRE_APPEND_FL);
if ((uc->mu_fsuid != tmp_la->la_uid) &&
- !mdd_capable(uc, CAP_FOWNER))
+ !mdd_capable(uc, CFS_CAP_FOWNER))
RETURN(-EPERM);
/* XXX: the IMMUTABLE and APPEND_ONLY flags can
if (mdd_is_immutable(obj))
oldflags |= LUSTRE_IMMUTABLE_FL;
if (mdd_is_append(obj))
- oldflags |= LUSTRE_APPEND_FL;
+ oldflags |= LUSTRE_APPEND_FL;
if ((oldflags ^ newflags) &&
- !mdd_capable(uc, CAP_LINUX_IMMUTABLE))
+ !mdd_capable(uc, CFS_CAP_LINUX_IMMUTABLE))
RETURN(-EPERM);
if (!S_ISDIR(tmp_la->la_mode))
if ((la->la_valid & (LA_MTIME | LA_ATIME | LA_CTIME)) &&
!(la->la_valid & ~(LA_MTIME | LA_ATIME | LA_CTIME))) {
if ((uc->mu_fsuid != tmp_la->la_uid) &&
- !mdd_capable(uc, CAP_FOWNER)) {
+ !mdd_capable(uc, CFS_CAP_FOWNER)) {
rc = mdd_permission_internal_locked(env, obj, tmp_la,
- MAY_WRITE);
+ MAY_WRITE,
+ MOR_TGT_CHILD);
if (rc)
RETURN(rc);
}
/* Make sure a caller can chmod. */
if (la->la_valid & LA_MODE) {
- /*
- * Bypass la_vaild == LA_MODE,
- * this is for changing file with SUID or SGID.
- */
+ /* Bypass la_vaild == LA_MODE,
+ * this is for changing file with SUID or SGID. */
if ((la->la_valid & ~LA_MODE) &&
+ !(ma->ma_attr_flags & MDS_PERM_BYPASS) &&
(uc->mu_fsuid != tmp_la->la_uid) &&
- !mdd_capable(uc, CAP_FOWNER))
+ !mdd_capable(uc, CFS_CAP_FOWNER))
RETURN(-EPERM);
- if (la->la_mode == (umode_t) -1)
+ if (la->la_mode == (cfs_umode_t) -1)
la->la_mode = tmp_la->la_mode;
else
la->la_mode = (la->la_mode & S_IALLUGO) |
(tmp_la->la_mode & ~S_IALLUGO);
/* Also check the setgid bit! */
- if (!lustre_in_group_p(uc, (la->la_valid & LA_GID) ? la->la_gid :
- tmp_la->la_gid) && !mdd_capable(uc, CAP_FSETID))
+ if (!lustre_in_group_p(uc, (la->la_valid & LA_GID) ?
+ la->la_gid : tmp_la->la_gid) &&
+ !mdd_capable(uc, CFS_CAP_FSETID))
la->la_mode &= ~S_ISGID;
} else {
la->la_mode = tmp_la->la_mode;
la->la_uid = tmp_la->la_uid;
if (((uc->mu_fsuid != tmp_la->la_uid) ||
(la->la_uid != tmp_la->la_uid)) &&
- !mdd_capable(uc, CAP_CHOWN))
+ !mdd_capable(uc, CFS_CAP_CHOWN))
RETURN(-EPERM);
- /*
- * If the user or group of a non-directory has been
+ /* If the user or group of a non-directory has been
* changed by a non-root user, remove the setuid bit.
* 19981026 David C Niemi <niemi@tux.org>
*
* to avoid some races. This is the behavior we had in
* 2.0. The check for non-root was definitely wrong
* for 2.2 anyway, as it should have been using
- * CAP_FSETID rather than fsuid -- 19990830 SD.
- */
+ * CAP_FSETID rather than fsuid -- 19990830 SD. */
if (((tmp_la->la_mode & S_ISUID) == S_ISUID) &&
!S_ISDIR(tmp_la->la_mode)) {
la->la_mode &= ~S_ISUID;
if (((uc->mu_fsuid != tmp_la->la_uid) ||
((la->la_gid != tmp_la->la_gid) &&
!lustre_in_group_p(uc, la->la_gid))) &&
- !mdd_capable(uc, CAP_CHOWN))
+ !mdd_capable(uc, CFS_CAP_CHOWN))
RETURN(-EPERM);
- /*
- * Likewise, if the user or group of a non-directory
+ /* Likewise, if the user or group of a non-directory
* has been changed by a non-root user, remove the
* setgid bit UNLESS there is no group execute bit
* (this would be a file marked for mandatory
* locking). 19981026 David C Niemi <niemi@tux.org>
*
* Removed the fsuid check (see the comment above) --
- * 19990830 SD.
- */
+ * 19990830 SD. */
if (((tmp_la->la_mode & (S_ISGID | S_IXGRP)) ==
(S_ISGID | S_IXGRP)) && !S_ISDIR(tmp_la->la_mode)) {
la->la_mode &= ~S_ISGID;
}
}
- /* For truncate (or setsize), we should have MAY_WRITE perm */
- if (la->la_valid & (LA_SIZE | LA_BLOCKS)) {
- if (!((la->la_valid & MDS_OPEN_OWNEROVERRIDE) &&
- (uc->mu_fsuid == tmp_la->la_uid)) &&
- !(ma->ma_attr_flags & MDS_PERM_BYPASS)) {
- rc = mdd_permission_internal_locked(env, obj, tmp_la,
- MAY_WRITE);
- if (rc)
- RETURN(rc);
- }
-
+ /* For both Size-on-MDS case and truncate case,
+ * "la->la_valid & (LA_SIZE | LA_BLOCKS)" are ture.
+ * We distinguish them by "ma->ma_attr_flags & MDS_SOM".
+ * For SOM case, it is true, the MAY_WRITE perm has been checked
+ * when open, no need check again. For truncate case, it is false,
+ * the MAY_WRITE perm should be checked here. */
+ if (ma->ma_attr_flags & MDS_SOM) {
/* For the "Size-on-MDS" setattr update, merge coming
* attributes with the set in the inode. BUG 10641 */
if ((la->la_valid & LA_ATIME) &&
if ((la->la_valid & LA_CTIME) &&
(la->la_ctime <= tmp_la->la_ctime))
la->la_valid &= ~(LA_MTIME | LA_CTIME);
- } else if (la->la_valid & LA_CTIME) {
- /* The pure setattr, it has the priority over what is already
- * set, do not drop it if ctime is equal. */
- if (la->la_ctime < tmp_la->la_ctime)
- la->la_valid &= ~(LA_ATIME | LA_MTIME | LA_CTIME);
+ } else {
+ if (la->la_valid & (LA_SIZE | LA_BLOCKS)) {
+ if (!((ma->ma_attr_flags & MDS_OPEN_OWNEROVERRIDE) &&
+ (uc->mu_fsuid == tmp_la->la_uid)) &&
+ !(ma->ma_attr_flags & MDS_PERM_BYPASS)) {
+ rc = mdd_permission_internal_locked(env, obj,
+ tmp_la, MAY_WRITE,
+ MOR_TGT_CHILD);
+ if (rc)
+ RETURN(rc);
+ }
+ }
+ if (la->la_valid & LA_CTIME) {
+ /* The pure setattr, it has the priority over what is
+ * already set, do not drop it if ctime is equal. */
+ if (la->la_ctime < tmp_la->la_ctime)
+ la->la_valid &= ~(LA_ATIME | LA_MTIME |
+ LA_CTIME);
+ }
}
RETURN(0);
}
+/** Store a data change changelog record
+ * If this fails, we must fail the whole transaction; we don't
+ * want the change to commit without the log entry.
+ * \param mdd_obj - mdd_object of change
+ * \param handle - transacion handle
+ */
+static int mdd_changelog_data_store(const struct lu_env *env,
+ struct mdd_device *mdd,
+ enum changelog_rec_type type,
+ int flags,
+ struct mdd_object *mdd_obj,
+ struct thandle *handle)
+{
+ const struct lu_fid *tfid = mdo2fid(mdd_obj);
+ struct llog_changelog_rec *rec;
+ struct lu_buf *buf;
+ int reclen;
+ int rc;
+
+ /* Not recording */
+ if (!(mdd->mdd_cl.mc_flags & CLM_ON))
+ RETURN(0);
+ if ((mdd->mdd_cl.mc_mask & (1 << type)) == 0)
+ RETURN(0);
+
+ LASSERT(handle != NULL);
+ LASSERT(mdd_obj != NULL);
+
+ if ((type >= CL_MTIME) && (type <= CL_ATIME) &&
+ cfs_time_before_64(mdd->mdd_cl.mc_starttime, mdd_obj->mod_cltime)) {
+ /* Don't need multiple updates in this log */
+ /* Don't check under lock - no big deal if we get an extra
+ entry */
+ RETURN(0);
+ }
+
+ reclen = llog_data_len(sizeof(*rec));
+ buf = mdd_buf_alloc(env, reclen);
+ if (buf->lb_buf == NULL)
+ RETURN(-ENOMEM);
+ rec = (struct llog_changelog_rec *)buf->lb_buf;
+
+ rec->cr.cr_flags = CLF_VERSION | (CLF_FLAGMASK & flags);
+ rec->cr.cr_type = (__u32)type;
+ rec->cr.cr_tfid = *tfid;
+ rec->cr.cr_namelen = 0;
+ mdd_obj->mod_cltime = cfs_time_current_64();
+
+ rc = mdd_changelog_llog_write(mdd, rec, handle);
+ if (rc < 0) {
+ CERROR("changelog failed: rc=%d op%d t"DFID"\n",
+ rc, type, PFID(tfid));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int mdd_changelog(const struct lu_env *env, enum changelog_rec_type type,
+ int flags, struct md_object *obj)
+{
+ struct thandle *handle;
+ struct mdd_object *mdd_obj = md2mdd_obj(obj);
+ struct mdd_device *mdd = mdo2mdd(obj);
+ int rc;
+ ENTRY;
+
+ handle = mdd_trans_start(env, mdd);
+
+ if (IS_ERR(handle))
+ return(PTR_ERR(handle));
+
+ rc = mdd_changelog_data_store(env, mdd, type, flags, mdd_obj,
+ handle);
+
+ mdd_trans_stop(env, mdd, rc, handle);
+
+ RETURN(rc);
+}
+
+/**
+ * Should be called with write lock held.
+ *
+ * \see mdd_lma_set_locked().
+ */
+static int __mdd_lma_set(const struct lu_env *env, struct mdd_object *mdd_obj,
+ const struct md_attr *ma, struct thandle *handle)
+{
+ struct mdd_thread_info *info = mdd_env_info(env);
+ struct lu_buf *buf;
+ struct lustre_mdt_attrs *lma =
+ (struct lustre_mdt_attrs *) info->mti_xattr_buf;
+ int lmasize = sizeof(struct lustre_mdt_attrs);
+ int rc = 0;
+
+ ENTRY;
+
+ /* Either HSM or SOM part is not valid, we need to read it before */
+ if ((!ma->ma_valid) & (MA_HSM | MA_SOM)) {
+ rc = mdd_get_md(env, mdd_obj, lma, &lmasize, XATTR_NAME_LMA);
+ if (rc <= 0)
+ RETURN(rc);
+
+ lustre_lma_swab(lma);
+ } else {
+ memset(lma, 0, lmasize);
+ }
+
+ /* Copy HSM data */
+ if (ma->ma_valid & MA_HSM) {
+ lma->lma_flags |= ma->ma_hsm.mh_flags & HSM_FLAGS_MASK;
+ lma->lma_compat |= LMAC_HSM;
+ }
+
+ /* Copy SOM data */
+ if (ma->ma_valid & MA_SOM) {
+ LASSERT(ma->ma_som != NULL);
+ if (ma->ma_som->msd_ioepoch == IOEPOCH_INVAL) {
+ lma->lma_compat &= ~LMAC_SOM;
+ } else {
+ lma->lma_compat |= LMAC_SOM;
+ lma->lma_ioepoch = ma->ma_som->msd_ioepoch;
+ lma->lma_som_size = ma->ma_som->msd_size;
+ lma->lma_som_blocks = ma->ma_som->msd_blocks;
+ lma->lma_som_mountid = ma->ma_som->msd_mountid;
+ }
+ }
+
+ /* Copy FID */
+ memcpy(&lma->lma_self_fid, mdo2fid(mdd_obj), sizeof(lma->lma_self_fid));
+
+ lustre_lma_swab(lma);
+ buf = mdd_buf_get(env, lma, lmasize);
+ rc = __mdd_xattr_set(env, mdd_obj, buf, XATTR_NAME_LMA, 0, handle);
+
+ RETURN(rc);
+}
+
+/**
+ * Save LMA extended attributes with data from \a ma.
+ *
+ * HSM and Size-On-MDS data will be extracted from \ma if they are valid, if
+ * not, LMA EA will be first read from disk, modified and write back.
+ *
+ */
+static int mdd_lma_set_locked(const struct lu_env *env,
+ struct mdd_object *mdd_obj,
+ const struct md_attr *ma, struct thandle *handle)
+{
+ int rc;
+
+ mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
+ rc = __mdd_lma_set(env, mdd_obj, ma, handle);
+ mdd_write_unlock(env, mdd_obj);
+ return rc;
+}
+
+/* Precedence for choosing record type when multiple
+ * attributes change: setattr > mtime > ctime > atime
+ * (ctime changes when mtime does, plus chmod/chown.
+ * atime and ctime are independent.) */
+static int mdd_attr_set_changelog(const struct lu_env *env,
+ struct md_object *obj, struct thandle *handle,
+ __u64 valid)
+{
+ struct mdd_device *mdd = mdo2mdd(obj);
+ int bits, type = 0;
+
+ bits = (valid & ~(LA_CTIME|LA_MTIME|LA_ATIME)) ? 1 << CL_SETATTR : 0;
+ bits |= (valid & LA_MTIME) ? 1 << CL_MTIME : 0;
+ bits |= (valid & LA_CTIME) ? 1 << CL_CTIME : 0;
+ bits |= (valid & LA_ATIME) ? 1 << CL_ATIME : 0;
+ bits = bits & mdd->mdd_cl.mc_mask;
+ if (bits == 0)
+ return 0;
+
+ /* The record type is the lowest non-masked set bit */
+ while (bits && ((bits & 1) == 0)) {
+ bits = bits >> 1;
+ type++;
+ }
+
+ /* FYI we only store the first CLF_FLAGMASK bits of la_valid */
+ return mdd_changelog_data_store(env, mdd, type, (int)valid,
+ md2mdd_obj(obj), handle);
+}
+
/* set attr and LOV EA at once, return updated attr */
static int mdd_attr_set(const struct lu_env *env, struct md_object *obj,
const struct md_attr *ma)
struct llog_cookie *logcookies = NULL;
int rc, lmm_size = 0, cookie_size = 0;
struct lu_attr *la_copy = &mdd_env_info(env)->mti_la_for_fix;
+#ifdef HAVE_QUOTA_SUPPORT
+ struct obd_device *obd = mdd->mdd_obd_dev;
+ struct mds_obd *mds = &obd->u.mds;
+ unsigned int qnids[MAXQUOTAS] = { 0, 0 };
+ unsigned int qoids[MAXQUOTAS] = { 0, 0 };
+ int quota_opc = 0, block_count = 0;
+ int inode_pending[MAXQUOTAS] = { 0, 0 };
+ int block_pending[MAXQUOTAS] = { 0, 0 };
+#endif
ENTRY;
- mdd_txn_param_build(env, mdd, MDD_TXN_ATTR_SET_OP);
+ mdd_setattr_txn_param_build(env, obj, (struct md_attr *)ma,
+ MDD_TXN_ATTR_SET_OP);
handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
RETURN(PTR_ERR(handle));
GOTO(cleanup, rc = -ENOMEM);
rc = mdd_get_md_locked(env, mdd_obj, lmm, &lmm_size,
- MDS_LOV_MD_NAME);
+ XATTR_NAME_LOV);
if (rc < 0)
GOTO(cleanup, rc);
}
- if (ma->ma_attr.la_valid & (ATTR_MTIME | ATTR_CTIME))
+ if (ma->ma_attr.la_valid & (LA_MTIME | LA_CTIME))
CDEBUG(D_INODE, "setting mtime "LPU64", ctime "LPU64"\n",
ma->ma_attr.la_mtime, ma->ma_attr.la_ctime);
if (rc)
GOTO(cleanup, rc);
+#ifdef HAVE_QUOTA_SUPPORT
+ if (mds->mds_quota && la_copy->la_valid & (LA_UID | LA_GID)) {
+ struct obd_export *exp = md_quota(env)->mq_exp;
+ struct lu_attr *la_tmp = &mdd_env_info(env)->mti_la;
+
+ rc = mdd_la_get(env, mdd_obj, la_tmp, BYPASS_CAPA);
+ if (!rc) {
+ quota_opc = FSFILT_OP_SETATTR;
+ mdd_quota_wrapper(la_copy, qnids);
+ mdd_quota_wrapper(la_tmp, qoids);
+ /* get file quota for new owner */
+ lquota_chkquota(mds_quota_interface_ref, obd, exp,
+ qnids, inode_pending, 1, NULL, 0,
+ NULL, 0);
+ block_count = (la_tmp->la_blocks + 7) >> 3;
+ if (block_count) {
+ void *data = NULL;
+ mdd_data_get(env, mdd_obj, &data);
+ /* get block quota for new owner */
+ lquota_chkquota(mds_quota_interface_ref, obd,
+ exp, qnids, block_pending,
+ block_count, NULL,
+ LQUOTA_FLAGS_BLK, data, 1);
+ }
+ }
+ }
+#endif
+
if (la_copy->la_valid & LA_FLAGS) {
rc = mdd_attr_set_internal_locked(env, mdd_obj, la_copy,
handle, 1);
}
if (rc == 0 && ma->ma_valid & MA_LOV) {
- umode_t mode;
+ cfs_umode_t mode;
mode = mdd_object_type(mdd_obj);
if (S_ISREG(mode) || S_ISDIR(mode)) {
}
}
+ if (rc == 0 && ma->ma_valid & (MA_HSM | MA_SOM)) {
+ cfs_umode_t mode;
+
+ mode = mdd_object_type(mdd_obj);
+ if (S_ISREG(mode))
+ rc = mdd_lma_set_locked(env, mdd_obj, ma, handle);
+
+ }
cleanup:
+ if (rc == 0)
+ rc = mdd_attr_set_changelog(env, obj, handle,
+ ma->ma_attr.la_valid);
mdd_trans_stop(env, mdd, rc, handle);
if (rc == 0 && (lmm != NULL && lmm_size > 0 )) {
/*set obd attr, if needed*/
rc = mdd_lov_setattr_async(env, mdd_obj, lmm, lmm_size,
logcookies);
}
+#ifdef HAVE_QUOTA_SUPPORT
+ if (quota_opc) {
+ lquota_pending_commit(mds_quota_interface_ref, obd, qnids,
+ inode_pending, 0);
+ lquota_pending_commit(mds_quota_interface_ref, obd, qnids,
+ block_pending, 1);
+ /* Trigger dqrel/dqacq for original owner and new owner.
+ * If failed, the next call for lquota_chkquota will
+ * process it. */
+ lquota_adjust(mds_quota_interface_ref, obd, qnids, qoids, rc,
+ quota_opc);
+ }
+#endif
RETURN(rc);
}
int rc;
ENTRY;
- mdd_write_lock(env, obj);
+ mdd_write_lock(env, obj, MOR_TGT_CHILD);
rc = __mdd_xattr_set(env, obj, buf, name, fl, handle);
mdd_write_unlock(env, obj);
if (rc)
RETURN(rc);
- if ((uc->mu_fsuid != tmp_la->la_uid) && !mdd_capable(uc, CAP_FOWNER))
+ if ((uc->mu_fsuid != tmp_la->la_uid) &&
+ !mdd_capable(uc, CFS_CAP_FOWNER))
RETURN(-EPERM);
RETURN(rc);
}
+/**
+ * The caller should guarantee to update the object ctime
+ * after xattr_set if needed.
+ */
static int mdd_xattr_set(const struct lu_env *env, struct md_object *obj,
- const struct lu_buf *buf, const char *name, int fl)
+ const struct lu_buf *buf, const char *name,
+ int fl)
{
- struct lu_attr *la_copy = &mdd_env_info(env)->mti_la_for_fix;
struct mdd_object *mdd_obj = md2mdd_obj(obj);
struct mdd_device *mdd = mdo2mdd(obj);
struct thandle *handle;
RETURN(rc);
mdd_txn_param_build(env, mdd, MDD_TXN_XATTR_SET_OP);
+ /* security-replated changes may require sync */
+ if (!strcmp(name, XATTR_NAME_ACL_ACCESS) &&
+ mdd->mdd_sync_permission == 1)
+ txn_param_sync(&mdd_env_info(env)->mti_param);
+
handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
RETURN(PTR_ERR(handle));
- rc = mdd_xattr_set_txn(env, md2mdd_obj(obj), buf, name,
- fl, handle);
- if (rc == 0) {
- la_copy->la_ctime = CURRENT_SECONDS;
- la_copy->la_valid = LA_CTIME;
- rc = mdd_attr_set_internal_locked(env, mdd_obj, la_copy,
- handle, 0);
- }
+ rc = mdd_xattr_set_txn(env, mdd_obj, buf, name, fl, handle);
+
+ /* Only record user xattr changes */
+ if ((rc == 0) && (strncmp("user.", name, 5) == 0))
+ rc = mdd_changelog_data_store(env, mdd, CL_XATTR, 0, mdd_obj,
+ handle);
mdd_trans_stop(env, mdd, rc, handle);
RETURN(rc);
}
+/**
+ * The caller should guarantee to update the object ctime
+ * after xattr_set if needed.
+ */
int mdd_xattr_del(const struct lu_env *env, struct md_object *obj,
const char *name)
{
- struct lu_attr *la_copy = &mdd_env_info(env)->mti_la_for_fix;
struct mdd_object *mdd_obj = md2mdd_obj(obj);
struct mdd_device *mdd = mdo2mdd(obj);
struct thandle *handle;
if (IS_ERR(handle))
RETURN(PTR_ERR(handle));
- mdd_write_lock(env, mdd_obj);
- rc = mdo_xattr_del(env, md2mdd_obj(obj), name, handle,
+ mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
+ rc = mdo_xattr_del(env, mdd_obj, name, handle,
mdd_object_capa(env, mdd_obj));
mdd_write_unlock(env, mdd_obj);
- if (rc == 0) {
- la_copy->la_ctime = CURRENT_SECONDS;
- la_copy->la_valid = LA_CTIME;
- rc = mdd_attr_set_internal_locked(env, mdd_obj, la_copy,
- handle, 0);
- }
+
+ /* Only record user xattr changes */
+ if ((rc == 0) && (strncmp("user.", name, 5) != 0))
+ rc = mdd_changelog_data_store(env, mdd, CL_XATTR, 0, mdd_obj,
+ handle);
mdd_trans_stop(env, mdd, rc, handle);
struct mdd_object *mdd_obj = md2mdd_obj(obj);
struct mdd_device *mdd = mdo2mdd(obj);
struct thandle *handle;
+#ifdef HAVE_QUOTA_SUPPORT
+ struct obd_device *obd = mdd->mdd_obd_dev;
+ struct mds_obd *mds = &obd->u.mds;
+ unsigned int qids[MAXQUOTAS] = { 0, 0 };
+ int quota_opc = 0;
+#endif
int rc;
ENTRY;
if (IS_ERR(handle))
RETURN(-ENOMEM);
- mdd_write_lock(env, mdd_obj);
+ mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
rc = mdd_unlink_sanity_check(env, NULL, mdd_obj, ma);
if (rc)
GOTO(cleanup, rc);
- mdo_ref_del(env, mdd_obj, handle);
+ __mdd_ref_del(env, mdd_obj, handle, 0);
if (S_ISDIR(lu_object_attr(&obj->mo_lu))) {
/* unlink dot */
- mdo_ref_del(env, mdd_obj, handle);
+ __mdd_ref_del(env, mdd_obj, handle, 1);
}
LASSERT(ma->ma_attr.la_valid & LA_CTIME);
la_copy->la_ctime = ma->ma_attr.la_ctime;
la_copy->la_valid = LA_CTIME;
- rc = mdd_attr_set_internal(env, mdd_obj, la_copy, handle, 0);
+ rc = mdd_attr_check_set_internal(env, mdd_obj, la_copy, handle, 0);
if (rc)
GOTO(cleanup, rc);
rc = mdd_finish_unlink(env, mdd_obj, ma, handle);
+#ifdef HAVE_QUOTA_SUPPORT
+ if (mds->mds_quota && ma->ma_valid & MA_INODE &&
+ ma->ma_attr.la_nlink == 0 && mdd_obj->mod_count == 0) {
+ quota_opc = FSFILT_OP_UNLINK_PARTIAL_CHILD;
+ mdd_quota_wrapper(&ma->ma_attr, qids);
+ }
+#endif
+
EXIT;
cleanup:
mdd_write_unlock(env, mdd_obj);
mdd_trans_stop(env, mdd, rc, handle);
+#ifdef HAVE_QUOTA_SUPPORT
+ if (quota_opc)
+ /* Trigger dqrel on the owner of child. If failed,
+ * the next call for lquota_chkquota will process it */
+ lquota_adjust(mds_quota_interface_ref, obd, qids, 0, rc,
+ quota_opc);
+#endif
return rc;
}
struct mdd_object *mdd_obj = md2mdd_obj(obj);
const struct lu_fid *pfid = spec->u.sp_pfid;
struct thandle *handle;
- int rc;
+#ifdef HAVE_QUOTA_SUPPORT
+ struct obd_device *obd = mdd->mdd_obd_dev;
+ struct obd_export *exp = md_quota(env)->mq_exp;
+ struct mds_obd *mds = &obd->u.mds;
+ unsigned int qids[MAXQUOTAS] = { 0, 0 };
+ int quota_opc = 0, block_count = 0;
+ int inode_pending[MAXQUOTAS] = { 0, 0 };
+ int block_pending[MAXQUOTAS] = { 0, 0 };
+#endif
+ int rc = 0;
ENTRY;
+#ifdef HAVE_QUOTA_SUPPORT
+ if (mds->mds_quota) {
+ quota_opc = FSFILT_OP_CREATE_PARTIAL_CHILD;
+ mdd_quota_wrapper(&ma->ma_attr, qids);
+ /* get file quota for child */
+ lquota_chkquota(mds_quota_interface_ref, obd, exp,
+ qids, inode_pending, 1, NULL, 0,
+ NULL, 0);
+ switch (ma->ma_attr.la_mode & S_IFMT) {
+ case S_IFLNK:
+ case S_IFDIR:
+ block_count = 2;
+ break;
+ case S_IFREG:
+ block_count = 1;
+ break;
+ }
+ /* get block quota for child */
+ if (block_count)
+ lquota_chkquota(mds_quota_interface_ref, obd, exp,
+ qids, block_pending, block_count,
+ NULL, LQUOTA_FLAGS_BLK, NULL, 0);
+ }
+#endif
+
mdd_txn_param_build(env, mdd, MDD_TXN_OBJECT_CREATE_OP);
handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
- RETURN(PTR_ERR(handle));
+ GOTO(out_pending, rc = PTR_ERR(handle));
- mdd_write_lock(env, mdd_obj);
+ mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
rc = mdd_oc_sanity_check(env, mdd_obj, ma);
if (rc)
GOTO(unlock, rc);
- rc = mdd_object_create_internal(env, NULL, mdd_obj, ma, handle);
+ rc = mdd_object_create_internal(env, NULL, mdd_obj, ma, handle, spec);
if (rc)
GOTO(unlock, rc);
rc = __mdd_xattr_set(env, mdd_obj,
mdd_buf_get_const(env, lmv, lmv_size),
- MDS_LMV_MD_NAME, 0, handle);
+ XATTR_NAME_LMV, 0, handle);
if (rc)
GOTO(unlock, rc);
- rc = mdd_attr_set_internal(env, mdd_obj, &ma->ma_attr, handle, 0);
+ rc = mdd_attr_set_internal(env, mdd_obj, &ma->ma_attr,
+ handle, 0);
} else {
#ifdef CONFIG_FS_POSIX_ACL
if (spec->sp_cr_flags & MDS_CREATE_RMT_ACL) {
pfid = spec->u.sp_ea.fid;
}
#endif
- rc = mdd_object_initialize(env, pfid, mdd_obj, ma, handle);
+ rc = mdd_object_initialize(env, pfid, NULL, mdd_obj, ma, handle,
+ spec);
}
EXIT;
unlock:
- mdd_write_unlock(env, mdd_obj);
if (rc == 0)
- rc = mdd_attr_get_internal_locked(env, mdd_obj, ma);
+ rc = mdd_attr_get_internal(env, mdd_obj, ma);
+ mdd_write_unlock(env, mdd_obj);
mdd_trans_stop(env, mdd, rc, handle);
+out_pending:
+#ifdef HAVE_QUOTA_SUPPORT
+ if (quota_opc) {
+ lquota_pending_commit(mds_quota_interface_ref, obd, qids,
+ inode_pending, 0);
+ lquota_pending_commit(mds_quota_interface_ref, obd, qids,
+ block_pending, 1);
+ /* Trigger dqacq on the owner of child. If failed,
+ * the next call for lquota_chkquota will process it. */
+ lquota_adjust(mds_quota_interface_ref, obd, qids, 0, rc,
+ quota_opc);
+ }
+#endif
return rc;
}
if (IS_ERR(handle))
RETURN(-ENOMEM);
- mdd_write_lock(env, mdd_obj);
+ mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
rc = mdd_link_sanity_check(env, NULL, NULL, mdd_obj);
if (rc == 0)
- mdo_ref_add(env, mdd_obj, handle);
+ __mdd_ref_add(env, mdd_obj, handle);
mdd_write_unlock(env, mdd_obj);
if (rc == 0) {
LASSERT(ma->ma_attr.la_valid & LA_CTIME);
la_copy->la_ctime = ma->ma_attr.la_ctime;
la_copy->la_valid = LA_CTIME;
- rc = mdd_attr_set_internal_locked(env, mdd_obj, la_copy,
- handle, 0);
+ rc = mdd_attr_check_set_internal_locked(env, mdd_obj, la_copy,
+ handle, 0);
}
mdd_trans_stop(env, mdd, 0, handle);
if (uc && ((uc->mu_valid == UCRED_OLD) ||
(uc->mu_valid == UCRED_NEW)) &&
(uc->mu_fsuid != tmp_la->la_uid) &&
- !mdd_capable(uc, CAP_FOWNER))
+ !mdd_capable(uc, CFS_CAP_FOWNER))
RETURN(-EPERM);
}
#endif
struct mdd_object *mdd_obj = md2mdd_obj(obj);
int rc = 0;
- mdd_write_lock(env, mdd_obj);
+ mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
rc = mdd_open_sanity_check(env, mdd_obj, flags);
if (rc == 0)
if (S_ISREG(mdd_object_type(obj))) {
/* Return LOV & COOKIES unconditionally here. We clean evth up.
* Caller must be ready for that. */
+
rc = __mdd_lmm_get(env, obj, ma);
if ((ma->ma_valid & MA_LOV))
rc = mdd_unlink_log(env, mdo2mdd(&obj->mod_obj),
static int mdd_close(const struct lu_env *env, struct md_object *obj,
struct md_attr *ma)
{
- int rc;
struct mdd_object *mdd_obj = md2mdd_obj(obj);
+ struct mdd_device *mdd = mdo2mdd(obj);
struct thandle *handle;
+ int rc;
+ int reset = 1;
+
+#ifdef HAVE_QUOTA_SUPPORT
+ struct obd_device *obd = mdo2mdd(obj)->mdd_obd_dev;
+ struct mds_obd *mds = &obd->u.mds;
+ unsigned int qids[MAXQUOTAS] = { 0, 0 };
+ int quota_opc = 0;
+#endif
ENTRY;
rc = mdd_log_txn_param_build(env, obj, ma, MDD_TXN_UNLINK_OP);
if (IS_ERR(handle))
RETURN(PTR_ERR(handle));
- mdd_write_lock(env, mdd_obj);
+ mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
/* release open count */
mdd_obj->mod_count --;
+ if (mdd_obj->mod_count == 0 && mdd_obj->mod_flags & ORPHAN_OBJ) {
+ /* remove link to object from orphan index */
+ rc = __mdd_orphan_del(env, mdd_obj, handle);
+ if (rc == 0) {
+ CDEBUG(D_HA, "Object "DFID" is deleted from orphan "
+ "list, OSS objects to be destroyed.\n",
+ PFID(mdd_object_fid(mdd_obj)));
+ } else {
+ CERROR("Object "DFID" can not be deleted from orphan "
+ "list, maybe cause OST objects can not be "
+ "destroyed (err: %d).\n",
+ PFID(mdd_object_fid(mdd_obj)), rc);
+ /* If object was not deleted from orphan list, do not
+ * destroy OSS objects, which will be done when next
+ * recovery. */
+ GOTO(out, rc);
+ }
+ }
+
rc = mdd_iattr_get(env, mdd_obj, ma);
- if (rc == 0 && mdd_obj->mod_count == 0 && ma->ma_attr.la_nlink == 0)
- rc = mdd_object_kill(env, mdd_obj, ma);
- else
+ /* Object maybe not in orphan list originally, it is rare case for
+ * mdd_finish_unlink() failure. */
+ if (rc == 0 && ma->ma_attr.la_nlink == 0) {
+#ifdef HAVE_QUOTA_SUPPORT
+ if (mds->mds_quota) {
+ quota_opc = FSFILT_OP_UNLINK_PARTIAL_CHILD;
+ mdd_quota_wrapper(&ma->ma_attr, qids);
+ }
+#endif
+ /* MDS_CLOSE_CLEANUP means destroy OSS objects by MDS. */
+ if (ma->ma_valid & MA_FLAGS &&
+ ma->ma_attr_flags & MDS_CLOSE_CLEANUP) {
+ rc = mdd_lov_destroy(env, mdd, mdd_obj, &ma->ma_attr);
+ } else {
+ rc = mdd_object_kill(env, mdd_obj, ma);
+ if (rc == 0)
+ reset = 0;
+ }
+
+ if (rc != 0)
+ CERROR("Error when prepare to delete Object "DFID" , "
+ "which will cause OST objects can not be "
+ "destroyed.\n", PFID(mdd_object_fid(mdd_obj)));
+ }
+ EXIT;
+
+out:
+ if (reset)
ma->ma_valid &= ~(MA_LOV | MA_COOKIE);
-
+
mdd_write_unlock(env, mdd_obj);
mdd_trans_stop(env, mdo2mdd(obj), rc, handle);
- RETURN(rc);
+#ifdef HAVE_QUOTA_SUPPORT
+ if (quota_opc)
+ /* Trigger dqrel on the owner of child. If failed,
+ * the next call for lquota_chkquota will process it */
+ lquota_adjust(mds_quota_interface_ref, obd, qids, 0, rc,
+ quota_opc);
+#endif
+ return rc;
}
/*
RETURN(rc);
}
-static int mdd_dir_page_build(const struct lu_env *env, int first,
- void *area, int nob, struct dt_it_ops *iops,
- struct dt_it *it, __u32 *start, __u32 *end,
- struct lu_dirent **last)
+static int mdd_dir_page_build(const struct lu_env *env, struct mdd_device *mdd,
+ int first, void *area, int nob,
+ const struct dt_it_ops *iops, struct dt_it *it,
+ __u64 *start, __u64 *end,
+ struct lu_dirent **last, __u32 attr)
{
- struct lu_fid *fid = &mdd_env_info(env)->mti_fid2;
- struct mdd_thread_info *info = mdd_env_info(env);
- struct lu_fid_pack *pack = &info->mti_pack;
int result;
+ __u64 hash = 0;
struct lu_dirent *ent;
if (first) {
nob -= sizeof (struct lu_dirpage);
}
- LASSERT(nob > sizeof *ent);
-
ent = area;
- result = 0;
do {
- char *name;
int len;
int recsize;
- __u32 hash;
- name = (char *)iops->key(env, it);
len = iops->key_size(env, it);
- pack = (struct lu_fid_pack *)iops->rec(env, it);
- result = fid_unpack(pack, fid);
- if (result != 0)
- break;
+ /* IAM iterator can return record with zero len. */
+ if (len == 0)
+ goto next;
- recsize = (sizeof(*ent) + len + 3) & ~3;
hash = iops->store(env, it);
- *end = hash;
+ if (unlikely(first)) {
+ first = 0;
+ *start = hash;
+ }
- CDEBUG(D_INFO, "%p %p %d "DFID": %#8.8x (%d) \"%*.*s\"\n",
- name, ent, nob, PFID(fid), hash, len, len, len, name);
+ /* calculate max space required for lu_dirent */
+ recsize = lu_dirent_calc_size(len, attr);
if (nob >= recsize) {
- ent->lde_fid = *fid;
- fid_cpu_to_le(&ent->lde_fid, &ent->lde_fid);
- ent->lde_hash = hash;
- ent->lde_namelen = cpu_to_le16(len);
- ent->lde_reclen = cpu_to_le16(recsize);
- memcpy(ent->lde_name, name, len);
- if (first && ent == area)
- *start = hash;
- *last = ent;
- ent = (void *)ent + recsize;
- nob -= recsize;
- result = iops->next(env, it);
+ result = iops->rec(env, it, ent, attr);
+ if (result == -ESTALE)
+ goto next;
+ if (result != 0)
+ goto out;
+
+ /* osd might not able to pack all attributes,
+ * so recheck rec length */
+ recsize = le16_to_cpu(ent->lde_reclen);
} else {
/*
* record doesn't fit into page, enlarge previous one.
*/
- LASSERT(*last != NULL);
- (*last)->lde_reclen =
- cpu_to_le16(le16_to_cpu((*last)->lde_reclen) +
- nob);
- break;
+ if (*last) {
+ (*last)->lde_reclen =
+ cpu_to_le16(le16_to_cpu((*last)->lde_reclen) +
+ nob);
+ result = 0;
+ } else
+ result = -EINVAL;
+
+ goto out;
}
+ *last = ent;
+ ent = (void *)ent + recsize;
+ nob -= recsize;
+
+next:
+ result = iops->next(env, it);
+ if (result == -ESTALE)
+ goto next;
} while (result == 0);
+out:
+ *end = hash;
return result;
}
{
struct dt_it *it;
struct dt_object *next = mdd_object_child(obj);
- struct dt_it_ops *iops;
+ const struct dt_it_ops *iops;
struct page *pg;
- struct lu_dirent *last;
+ struct lu_dirent *last = NULL;
+ struct mdd_device *mdd = mdo2mdd(&obj->mod_obj);
int i;
int rc;
int nob;
- __u32 hash_start;
- __u32 hash_end;
+ __u64 hash_start;
+ __u64 hash_end = 0;
LASSERT(rdpg->rp_pages != NULL);
LASSERT(next->do_index_ops != NULL);
* iterate through directory and fill pages from @rdpg
*/
iops = &next->do_index_ops->dio_it;
- it = iops->init(env, next, 0, mdd_object_capa(env, obj));
+ it = iops->init(env, next, mdd_object_capa(env, obj));
if (IS_ERR(it))
return PTR_ERR(it);
rc = iops->load(env, it, rdpg->rp_hash);
- if (rc == 0)
+ if (rc == 0){
/*
* Iterator didn't find record with exactly the key requested.
*
* state)---position it on the next item.
*/
rc = iops->next(env, it);
- else if (rc > 0)
+ } else if (rc > 0)
rc = 0;
/*
i++, nob -= CFS_PAGE_SIZE) {
LASSERT(i < rdpg->rp_npages);
pg = rdpg->rp_pages[i];
- rc = mdd_dir_page_build(env, !i, kmap(pg),
+ rc = mdd_dir_page_build(env, mdd, !i, cfs_kmap(pg),
min_t(int, nob, CFS_PAGE_SIZE), iops,
- it, &hash_start, &hash_end, &last);
- if (rc != 0 || i == rdpg->rp_npages - 1)
- last->lde_reclen = 0;
- kunmap(pg);
+ it, &hash_start, &hash_end, &last,
+ rdpg->rp_attrs);
+ if (rc != 0 || i == rdpg->rp_npages - 1) {
+ if (last)
+ last->lde_reclen = 0;
+ }
+ cfs_kunmap(pg);
}
if (rc > 0) {
/*
if (rc == 0) {
struct lu_dirpage *dp;
- dp = kmap(rdpg->rp_pages[0]);
- dp->ldp_hash_start = rdpg->rp_hash;
- dp->ldp_hash_end = hash_end;
+ dp = cfs_kmap(rdpg->rp_pages[0]);
+ dp->ldp_hash_start = cpu_to_le64(rdpg->rp_hash);
+ dp->ldp_hash_end = cpu_to_le64(hash_end);
if (i == 0)
/*
* No pages were processed, mark this.
*/
dp->ldp_flags |= LDF_EMPTY;
- dp->ldp_flags = cpu_to_le16(dp->ldp_flags);
- kunmap(rdpg->rp_pages[0]);
+
+ dp->ldp_flags = cpu_to_le32(dp->ldp_flags);
+ cfs_kunmap(rdpg->rp_pages[0]);
}
iops->put(env, it);
iops->fini(env, it);
return rc;
}
-static int mdd_readpage(const struct lu_env *env, struct md_object *obj,
- const struct lu_rdpg *rdpg)
+int mdd_readpage(const struct lu_env *env, struct md_object *obj,
+ const struct lu_rdpg *rdpg)
{
struct mdd_object *mdd_obj = md2mdd_obj(obj);
int rc;
LASSERT(mdd_object_exists(mdd_obj));
- mdd_read_lock(env, mdd_obj);
+ mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
rc = mdd_readpage_sanity_check(env, mdd_obj);
if (rc)
GOTO(out_unlock, rc);
LASSERT(rdpg->rp_pages != NULL);
pg = rdpg->rp_pages[0];
- dp = (struct lu_dirpage*)kmap(pg);
+ dp = (struct lu_dirpage*)cfs_kmap(pg);
memset(dp, 0 , sizeof(struct lu_dirpage));
- dp->ldp_hash_start = rdpg->rp_hash;
- dp->ldp_hash_end = DIR_END_OFF;
+ dp->ldp_hash_start = cpu_to_le64(rdpg->rp_hash);
+ dp->ldp_hash_end = cpu_to_le64(DIR_END_OFF);
dp->ldp_flags |= LDF_EMPTY;
- dp->ldp_flags = cpu_to_le16(dp->ldp_flags);
- kunmap(pg);
+ dp->ldp_flags = cpu_to_le32(dp->ldp_flags);
+ cfs_kunmap(pg);
GOTO(out_unlock, rc = 0);
}
return rc;
}
-struct md_object_operations mdd_obj_ops = {
+static int mdd_object_sync(const struct lu_env *env, struct md_object *obj)
+{
+ struct mdd_object *mdd_obj = md2mdd_obj(obj);
+ struct dt_object *next;
+
+ LASSERT(mdd_object_exists(mdd_obj));
+ next = mdd_object_child(mdd_obj);
+ return next->do_ops->do_object_sync(env, next);
+}
+
+static dt_obj_version_t mdd_version_get(const struct lu_env *env,
+ struct md_object *obj)
+{
+ struct mdd_object *mdd_obj = md2mdd_obj(obj);
+
+ LASSERT(mdd_object_exists(mdd_obj));
+ return do_version_get(env, mdd_object_child(mdd_obj));
+}
+
+static void mdd_version_set(const struct lu_env *env, struct md_object *obj,
+ dt_obj_version_t version)
+{
+ struct mdd_object *mdd_obj = md2mdd_obj(obj);
+
+ LASSERT(mdd_object_exists(mdd_obj));
+ do_version_set(env, mdd_object_child(mdd_obj), version);
+}
+
+const struct md_object_operations mdd_obj_ops = {
.moo_permission = mdd_permission,
.moo_attr_get = mdd_attr_get,
.moo_attr_set = mdd_attr_set,
.moo_close = mdd_close,
.moo_readpage = mdd_readpage,
.moo_readlink = mdd_readlink,
- .moo_capa_get = mdd_capa_get
+ .moo_changelog = mdd_changelog,
+ .moo_capa_get = mdd_capa_get,
+ .moo_object_sync = mdd_object_sync,
+ .moo_version_get = mdd_version_get,
+ .moo_version_set = mdd_version_set,
+ .moo_path = mdd_path,
+ .moo_file_lock = mdd_file_lock,
+ .moo_file_unlock = mdd_file_unlock,
};