-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Author: Wang Di <wangdi@clusterfs.com>
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_MDS
#include <linux/module.h>
-#ifdef HAVE_EXT4_LDISKFS
-#include <ldiskfs/ldiskfs_jbd2.h>
-#else
-#include <linux/jbd.h>
-#endif
#include <obd.h>
#include <obd_class.h>
#include <obd_support.h>
#include <lprocfs_status.h>
/* fid_be_cpu(), fid_cpu_to_be(). */
#include <lustre_fid.h>
-
+#include <lustre_idmap.h>
#include <lustre_param.h>
-#ifdef HAVE_EXT4_LDISKFS
-#include <ldiskfs/ldiskfs.h>
-#else
-#include <linux/ldiskfs_fs.h>
-#endif
#include <lustre_mds.h>
#include <lustre/lustre_idl.h>
struct md_object *obj, struct lu_buf *buf,
const char *name);
-int mdd_data_get(const struct lu_env *env, struct mdd_object *obj,
- void **data)
-{
- LASSERTF(mdd_object_exists(obj), "FID is "DFID"\n",
- PFID(mdd_object_fid(obj)));
- mdo_data_get(env, obj, data);
- return 0;
-}
-
int mdd_la_get(const struct lu_env *env, struct mdd_object *obj,
- struct lu_attr *la, struct lustre_capa *capa)
-{
- LASSERTF(mdd_object_exists(obj), "FID is "DFID"\n",
- PFID(mdd_object_fid(obj)));
- return mdo_attr_get(env, obj, la, capa);
-}
-
-static void mdd_flags_xlate(struct mdd_object *obj, __u32 flags)
+ struct lu_attr *la)
{
- obj->mod_flags &= ~(APPEND_OBJ|IMMUTE_OBJ);
-
- if (flags & LUSTRE_APPEND_FL)
- obj->mod_flags |= APPEND_OBJ;
+ if (mdd_object_exists(obj) == 0) {
+ CERROR("%s: object "DFID" not found: rc = -2\n",
+ mdd_obj_dev_name(obj), PFID(mdd_object_fid(obj)));
+ return -ENOENT;
+ }
- if (flags & LUSTRE_IMMUTABLE_FL)
- obj->mod_flags |= IMMUTE_OBJ;
+ return mdo_attr_get(env, obj, la);
}
struct mdd_thread_info *mdd_env_info(const struct lu_env *env)
{
struct mdd_thread_info *info;
+ lu_env_refill((struct lu_env *)env);
info = lu_context_key_get(&env->le_ctx, &mdd_thread_key);
LASSERT(info != NULL);
return info;
struct lu_buf *mdd_buf_get(const struct lu_env *env, void *area, ssize_t len)
{
- struct lu_buf *buf;
+ struct lu_buf *buf;
- buf = &mdd_env_info(env)->mti_buf;
- buf->lb_buf = area;
- buf->lb_len = len;
- return buf;
-}
-
-void mdd_buf_put(struct lu_buf *buf)
-{
- if (buf == NULL || buf->lb_buf == NULL)
- return;
- OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
- buf->lb_buf = NULL;
- buf->lb_len = 0;
+ buf = &mdd_env_info(env)->mti_buf[0];
+ buf->lb_buf = area;
+ buf->lb_len = len;
+ return buf;
}
const struct lu_buf *mdd_buf_get_const(const struct lu_env *env,
const void *area, ssize_t len)
{
- struct lu_buf *buf;
-
- buf = &mdd_env_info(env)->mti_buf;
- buf->lb_buf = (void *)area;
- buf->lb_len = len;
- return buf;
-}
-
-struct lu_buf *mdd_buf_alloc(const struct lu_env *env, ssize_t len)
-{
- struct lu_buf *buf = &mdd_env_info(env)->mti_big_buf;
+ struct lu_buf *buf;
- if ((len > buf->lb_len) && (buf->lb_buf != NULL)) {
- OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
- buf->lb_buf = NULL;
- }
- if (buf->lb_buf == NULL) {
- buf->lb_len = len;
- OBD_ALLOC_LARGE(buf->lb_buf, buf->lb_len);
- if (buf->lb_buf == NULL)
- buf->lb_len = 0;
- }
- return buf;
-}
-
-/** Increase the size of the \a mti_big_buf.
- * preserves old data in buffer
- * old buffer remains unchanged on error
- * \retval 0 or -ENOMEM
- */
-int mdd_buf_grow(const struct lu_env *env, ssize_t len)
-{
- struct lu_buf *oldbuf = &mdd_env_info(env)->mti_big_buf;
- struct lu_buf buf;
-
- LASSERT(len >= oldbuf->lb_len);
- OBD_ALLOC_LARGE(buf.lb_buf, len);
-
- if (buf.lb_buf == NULL)
- return -ENOMEM;
-
- buf.lb_len = len;
- memcpy(buf.lb_buf, oldbuf->lb_buf, oldbuf->lb_len);
-
- OBD_FREE_LARGE(oldbuf->lb_buf, oldbuf->lb_len);
-
- memcpy(oldbuf, &buf, sizeof(buf));
-
- return 0;
+ buf = &mdd_env_info(env)->mti_buf[0];
+ buf->lb_buf = (void *)area;
+ buf->lb_len = len;
+ return buf;
}
-struct llog_cookie *mdd_max_cookie_get(const struct lu_env *env,
- struct mdd_device *mdd)
+struct lu_object *mdd_object_alloc(const struct lu_env *env,
+ const struct lu_object_header *hdr,
+ struct lu_device *d)
{
- struct mdd_thread_info *mti = mdd_env_info(env);
- int max_cookie_size;
-
- max_cookie_size = mdd_lov_cookiesize(env, mdd);
- if (unlikely(mti->mti_max_cookie_size < max_cookie_size)) {
- if (mti->mti_max_cookie)
- OBD_FREE_LARGE(mti->mti_max_cookie,
- mti->mti_max_cookie_size);
- mti->mti_max_cookie = NULL;
- mti->mti_max_cookie_size = 0;
- }
- if (unlikely(mti->mti_max_cookie == NULL)) {
- OBD_ALLOC_LARGE(mti->mti_max_cookie, max_cookie_size);
- if (likely(mti->mti_max_cookie != NULL))
- mti->mti_max_cookie_size = max_cookie_size;
- }
- if (likely(mti->mti_max_cookie != NULL))
- memset(mti->mti_max_cookie, 0, mti->mti_max_cookie_size);
- return mti->mti_max_cookie;
-}
+ struct mdd_object *mdd_obj;
-struct lov_mds_md *mdd_max_lmm_get(const struct lu_env *env,
- struct mdd_device *mdd)
-{
- struct mdd_thread_info *mti = mdd_env_info(env);
- int max_lmm_size;
-
- max_lmm_size = mdd_lov_mdsize(env, mdd);
- if (unlikely(mti->mti_max_lmm_size < max_lmm_size)) {
- if (mti->mti_max_lmm)
- OBD_FREE_LARGE(mti->mti_max_lmm, mti->mti_max_lmm_size);
- mti->mti_max_lmm = NULL;
- mti->mti_max_lmm_size = 0;
- }
- if (unlikely(mti->mti_max_lmm == NULL)) {
- OBD_ALLOC_LARGE(mti->mti_max_lmm, max_lmm_size);
- if (likely(mti->mti_max_lmm != NULL))
- mti->mti_max_lmm_size = max_lmm_size;
- }
- return mti->mti_max_lmm;
-}
+ OBD_SLAB_ALLOC_PTR_GFP(mdd_obj, mdd_object_kmem, GFP_NOFS);
+ if (mdd_obj != NULL) {
+ struct lu_object *o;
-struct lu_object *mdd_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
- struct lu_device *d)
-{
- struct mdd_object *mdd_obj;
-
- OBD_ALLOC_PTR(mdd_obj);
- if (mdd_obj != NULL) {
- struct lu_object *o;
-
- o = mdd2lu_obj(mdd_obj);
- lu_object_init(o, NULL, d);
- mdd_obj->mod_obj.mo_ops = &mdd_obj_ops;
- mdd_obj->mod_obj.mo_dir_ops = &mdd_dir_ops;
- mdd_obj->mod_count = 0;
- o->lo_ops = &mdd_lu_obj_ops;
- return o;
- } else {
- return NULL;
- }
+ o = mdd2lu_obj(mdd_obj);
+ lu_object_init(o, NULL, d);
+ mdd_obj->mod_obj.mo_ops = &mdd_obj_ops;
+ mdd_obj->mod_obj.mo_dir_ops = &mdd_dir_ops;
+ mdd_obj->mod_count = 0;
+ o->lo_ops = &mdd_lu_obj_ops;
+ return o;
+ } else {
+ return NULL;
+ }
}
static int mdd_object_init(const struct lu_env *env, struct lu_object *o,
mdd_obj->mod_cltime = 0;
under = &d->mdd_child->dd_lu_dev;
below = under->ld_ops->ldo_object_alloc(env, o->lo_header, under);
- mdd_pdlock_init(mdd_obj);
- if (below == NULL)
- RETURN(-ENOMEM);
+ if (IS_ERR(below))
+ RETURN(PTR_ERR(below));
lu_object_add(o, below);
static int mdd_object_start(const struct lu_env *env, struct lu_object *o)
{
- if (lu_object_exists(o))
- return mdd_get_flags(env, lu2mdd_obj(o));
- else
- return 0;
+ int rc = 0;
+
+ if (lu_object_exists(o)) {
+ struct mdd_object *mdd_obj = lu2mdd_obj(o);
+ struct lu_attr *attr = MDD_ENV_VAR(env, la_for_start);
+
+ rc = mdd_la_get(env, mdd_obj, attr);
+ }
+
+ return rc;
}
static void mdd_object_free(const struct lu_env *env, struct lu_object *o)
struct mdd_object *mdd = lu2mdd_obj(o);
lu_object_fini(o);
- OBD_FREE_PTR(mdd);
+ OBD_SLAB_FREE_PTR(mdd, mdd_object_kmem);
}
static int mdd_object_print(const struct lu_env *env, void *cookie,
return md2mdd_obj(md_object_find_slice(env, &d->mdd_md_dev, f));
}
-static int mdd_path2fid(const struct lu_env *env, struct mdd_device *mdd,
- const char *path, struct lu_fid *fid)
-{
- struct lu_buf *buf;
- struct lu_fid *f = &mdd_env_info(env)->mti_fid;
- struct mdd_object *obj;
- struct lu_name *lname = &mdd_env_info(env)->mti_name;
- char *name;
- int rc = 0;
- ENTRY;
-
- /* temp buffer for path element */
- buf = mdd_buf_alloc(env, PATH_MAX);
- if (buf->lb_buf == NULL)
- RETURN(-ENOMEM);
-
- lname->ln_name = name = buf->lb_buf;
- lname->ln_namelen = 0;
- *f = mdd->mdd_root_fid;
-
- while(1) {
- while (*path == '/')
- path++;
- if (*path == '\0')
- break;
- while (*path != '/' && *path != '\0') {
- *name = *path;
- path++;
- name++;
- lname->ln_namelen++;
- }
-
- *name = '\0';
- /* find obj corresponding to fid */
- obj = mdd_object_find(env, mdd, f);
- if (obj == NULL)
- GOTO(out, rc = -EREMOTE);
- if (IS_ERR(obj))
- GOTO(out, rc = PTR_ERR(obj));
- /* get child fid from parent and name */
- rc = mdd_lookup(env, &obj->mod_obj, lname, f, NULL);
- mdd_object_put(env, obj);
- if (rc)
- break;
-
- name = buf->lb_buf;
- lname->ln_namelen = 0;
- }
-
- if (!rc)
- *fid = *f;
-out:
- RETURN(rc);
-}
-
-/** The maximum depth that fid2path() will search.
- * This is limited only because we want to store the fids for
- * historical path lookup purposes.
- */
-#define MAX_PATH_DEPTH 100
-
-/** mdd_path() lookup structure. */
-struct path_lookup_info {
- __u64 pli_recno; /**< history point */
- __u64 pli_currec; /**< current record */
- struct lu_fid pli_fid;
- struct lu_fid pli_fids[MAX_PATH_DEPTH]; /**< path, in fids */
- struct mdd_object *pli_mdd_obj;
- char *pli_path; /**< full path */
- int pli_pathlen;
- int pli_linkno; /**< which hardlink to follow */
- int pli_fidcount; /**< number of \a pli_fids */
-};
-
-static int mdd_path_current(const struct lu_env *env,
- struct path_lookup_info *pli)
-{
- struct mdd_device *mdd = mdo2mdd(&pli->pli_mdd_obj->mod_obj);
- struct mdd_object *mdd_obj;
- struct lu_buf *buf = NULL;
- struct link_ea_header *leh;
- struct link_ea_entry *lee;
- struct lu_name *tmpname = &mdd_env_info(env)->mti_name;
- struct lu_fid *tmpfid = &mdd_env_info(env)->mti_fid;
- char *ptr;
- int reclen;
- int rc;
- ENTRY;
-
- ptr = pli->pli_path + pli->pli_pathlen - 1;
- *ptr = 0;
- --ptr;
- pli->pli_fidcount = 0;
- pli->pli_fids[0] = *(struct lu_fid *)mdd_object_fid(pli->pli_mdd_obj);
-
- while (!mdd_is_root(mdd, &pli->pli_fids[pli->pli_fidcount])) {
- mdd_obj = mdd_object_find(env, mdd,
- &pli->pli_fids[pli->pli_fidcount]);
- if (mdd_obj == NULL)
- GOTO(out, rc = -EREMOTE);
- if (IS_ERR(mdd_obj))
- GOTO(out, rc = PTR_ERR(mdd_obj));
- rc = lu_object_exists(&mdd_obj->mod_obj.mo_lu);
- if (rc <= 0) {
- mdd_object_put(env, mdd_obj);
- if (rc == -1)
- rc = -EREMOTE;
- else if (rc == 0)
- /* Do I need to error out here? */
- rc = -ENOENT;
- GOTO(out, rc);
- }
-
- /* Get parent fid and object name */
- mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
- buf = mdd_links_get(env, mdd_obj);
- mdd_read_unlock(env, mdd_obj);
- mdd_object_put(env, mdd_obj);
- if (IS_ERR(buf))
- GOTO(out, rc = PTR_ERR(buf));
-
- leh = buf->lb_buf;
- lee = (struct link_ea_entry *)(leh + 1); /* link #0 */
- mdd_lee_unpack(lee, &reclen, tmpname, tmpfid);
-
- /* If set, use link #linkno for path lookup, otherwise use
- link #0. Only do this for the final path element. */
- if ((pli->pli_fidcount == 0) &&
- (pli->pli_linkno < leh->leh_reccount)) {
- int count;
- for (count = 0; count < pli->pli_linkno; count++) {
- lee = (struct link_ea_entry *)
- ((char *)lee + reclen);
- mdd_lee_unpack(lee, &reclen, tmpname, tmpfid);
- }
- if (pli->pli_linkno < leh->leh_reccount - 1)
- /* indicate to user there are more links */
- pli->pli_linkno++;
- }
-
- /* Pack the name in the end of the buffer */
- ptr -= tmpname->ln_namelen;
- if (ptr - 1 <= pli->pli_path)
- GOTO(out, rc = -EOVERFLOW);
- strncpy(ptr, tmpname->ln_name, tmpname->ln_namelen);
- *(--ptr) = '/';
-
- /* Store the parent fid for historic lookup */
- if (++pli->pli_fidcount >= MAX_PATH_DEPTH)
- GOTO(out, rc = -EOVERFLOW);
- pli->pli_fids[pli->pli_fidcount] = *tmpfid;
- }
-
- /* Verify that our path hasn't changed since we started the lookup.
- Record the current index, and verify the path resolves to the
- same fid. If it does, then the path is correct as of this index. */
- cfs_spin_lock(&mdd->mdd_cl.mc_lock);
- pli->pli_currec = mdd->mdd_cl.mc_index;
- cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
- rc = mdd_path2fid(env, mdd, ptr, &pli->pli_fid);
- if (rc) {
- CDEBUG(D_INFO, "mdd_path2fid(%s) failed %d\n", ptr, rc);
- GOTO (out, rc = -EAGAIN);
- }
- if (!lu_fid_eq(&pli->pli_fids[0], &pli->pli_fid)) {
- CDEBUG(D_INFO, "mdd_path2fid(%s) found another FID o="DFID
- " n="DFID"\n", ptr, PFID(&pli->pli_fids[0]),
- PFID(&pli->pli_fid));
- GOTO(out, rc = -EAGAIN);
- }
- ptr++; /* skip leading / */
- memmove(pli->pli_path, ptr, pli->pli_path + pli->pli_pathlen - ptr);
-
- EXIT;
-out:
- if (buf && !IS_ERR(buf) && buf->lb_len > OBD_ALLOC_BIG)
- /* if we vmalloced a large buffer drop it */
- mdd_buf_put(buf);
-
- return rc;
-}
-
-static int mdd_path_historic(const struct lu_env *env,
- struct path_lookup_info *pli)
-{
- return 0;
-}
-
-/* Returns the full path to this fid, as of changelog record recno. */
-static int mdd_path(const struct lu_env *env, struct md_object *obj,
- char *path, int pathlen, __u64 *recno, int *linkno)
-{
- struct path_lookup_info *pli;
- int tries = 3;
- int rc = -EAGAIN;
- ENTRY;
-
- if (pathlen < 3)
- RETURN(-EOVERFLOW);
-
- if (mdd_is_root(mdo2mdd(obj), mdd_object_fid(md2mdd_obj(obj)))) {
- path[0] = '\0';
- RETURN(0);
- }
-
- OBD_ALLOC_PTR(pli);
- if (pli == NULL)
- RETURN(-ENOMEM);
-
- pli->pli_mdd_obj = md2mdd_obj(obj);
- pli->pli_recno = *recno;
- pli->pli_path = path;
- pli->pli_pathlen = pathlen;
- pli->pli_linkno = *linkno;
-
- /* Retry multiple times in case file is being moved */
- while (tries-- && rc == -EAGAIN)
- rc = mdd_path_current(env, pli);
-
- /* For historical path lookup, the current links may not have existed
- * at "recno" time. We must switch over to earlier links/parents
- * by using the changelog records. If the earlier parent doesn't
- * exist, we must search back through the changelog to reconstruct
- * its parents, then check if it exists, etc.
- * We may ignore this problem for the initial implementation and
- * state that an "original" hardlink must still exist for us to find
- * historic path name. */
- if (pli->pli_recno != -1) {
- rc = mdd_path_historic(env, pli);
- } else {
- *recno = pli->pli_currec;
- /* Return next link index to caller */
- *linkno = pli->pli_linkno;
- }
-
- OBD_FREE_PTR(pli);
-
- RETURN (rc);
-}
-
-int mdd_get_flags(const struct lu_env *env, struct mdd_object *obj)
-{
- struct lu_attr *la = &mdd_env_info(env)->mti_la;
- int rc;
-
- ENTRY;
- rc = mdd_la_get(env, obj, la, BYPASS_CAPA);
- if (rc == 0) {
- mdd_flags_xlate(obj, la->la_flags);
- if (S_ISDIR(la->la_mode) && la->la_nlink == 1)
- obj->mod_flags |= MNLINK_OBJ;
- }
- RETURN(rc);
-}
-
-/* get only inode attributes */
-int mdd_iattr_get(const struct lu_env *env, struct mdd_object *mdd_obj,
- struct md_attr *ma)
-{
- int rc = 0;
- ENTRY;
-
- if (ma->ma_valid & MA_INODE)
- RETURN(0);
-
- rc = mdd_la_get(env, mdd_obj, &ma->ma_attr,
- mdd_object_capa(env, mdd_obj));
- if (rc == 0)
- ma->ma_valid |= MA_INODE;
- RETURN(rc);
-}
-
-int mdd_get_default_md(struct mdd_object *mdd_obj, struct lov_mds_md *lmm)
-{
- struct lov_desc *ldesc;
- struct mdd_device *mdd = mdo2mdd(&mdd_obj->mod_obj);
- struct lov_user_md *lum = (struct lov_user_md*)lmm;
- ENTRY;
-
- if (!lum)
- RETURN(0);
-
- ldesc = &mdd->mdd_obd_dev->u.mds.mds_lov_desc;
- LASSERT(ldesc != NULL);
-
- lum->lmm_magic = LOV_MAGIC_V1;
- lum->lmm_object_seq = FID_SEQ_LOV_DEFAULT;
- lum->lmm_pattern = ldesc->ld_pattern;
- lum->lmm_stripe_size = ldesc->ld_default_stripe_size;
- lum->lmm_stripe_count = ldesc->ld_default_stripe_count;
- lum->lmm_stripe_offset = ldesc->ld_default_stripe_offset;
-
- RETURN(sizeof(*lum));
-}
-
-static int is_rootdir(struct mdd_object *mdd_obj)
-{
- const struct mdd_device *mdd_dev = mdd_obj2mdd_dev(mdd_obj);
- const struct lu_fid *fid = mdo2fid(mdd_obj);
-
- return lu_fid_eq(&mdd_dev->mdd_root_fid, fid);
-}
-
-/* get lov EA only */
-static int __mdd_lmm_get(const struct lu_env *env,
- struct mdd_object *mdd_obj, struct md_attr *ma)
-{
- int rc;
- ENTRY;
-
- if (ma->ma_valid & MA_LOV)
- RETURN(0);
-
- rc = mdd_get_md(env, mdd_obj, ma->ma_lmm, &ma->ma_lmm_size,
- XATTR_NAME_LOV);
- if (rc == 0 && (ma->ma_need & MA_LOV_DEF) && is_rootdir(mdd_obj))
- rc = mdd_get_default_md(mdd_obj, ma->ma_lmm);
- if (rc > 0) {
- ma->ma_lmm_size = rc;
- ma->ma_valid |= MA_LOV;
- rc = 0;
- }
- RETURN(rc);
-}
-
-/* get the first parent fid from link EA */
-static int mdd_pfid_get(const struct lu_env *env,
- struct mdd_object *mdd_obj, struct md_attr *ma)
-{
- struct lu_buf *buf;
- struct link_ea_header *leh;
- struct link_ea_entry *lee;
- struct lu_fid *pfid = &ma->ma_pfid;
- ENTRY;
-
- if (ma->ma_valid & MA_PFID)
- RETURN(0);
-
- buf = mdd_links_get(env, mdd_obj);
- if (IS_ERR(buf))
- RETURN(PTR_ERR(buf));
-
- leh = buf->lb_buf;
- lee = (struct link_ea_entry *)(leh + 1);
- memcpy(pfid, &lee->lee_parent_fid, sizeof(*pfid));
- fid_be_to_cpu(pfid, pfid);
- ma->ma_valid |= MA_PFID;
- if (buf->lb_len > OBD_ALLOC_BIG)
- /* if we vmalloced a large buffer drop it */
- mdd_buf_put(buf);
- RETURN(0);
-}
-
-int mdd_lmm_get_locked(const struct lu_env *env, struct mdd_object *mdd_obj,
- struct md_attr *ma)
-{
- int rc;
- ENTRY;
-
- mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
- rc = __mdd_lmm_get(env, mdd_obj, ma);
- mdd_read_unlock(env, mdd_obj);
- RETURN(rc);
-}
-
-/* get lmv EA only*/
-static int __mdd_lmv_get(const struct lu_env *env,
- struct mdd_object *mdd_obj, struct md_attr *ma)
-{
- int rc;
- ENTRY;
-
- if (ma->ma_valid & MA_LMV)
- RETURN(0);
-
- rc = mdd_get_md(env, mdd_obj, ma->ma_lmv, &ma->ma_lmv_size,
- XATTR_NAME_LMV);
- if (rc > 0) {
- ma->ma_valid |= MA_LMV;
- rc = 0;
- }
- RETURN(rc);
-}
-
-static int __mdd_lma_get(const struct lu_env *env, struct mdd_object *mdd_obj,
- struct md_attr *ma)
-{
- struct mdd_thread_info *info = mdd_env_info(env);
- struct lustre_mdt_attrs *lma =
- (struct lustre_mdt_attrs *)info->mti_xattr_buf;
- int lma_size;
- int rc;
- ENTRY;
-
- /* If all needed data are already valid, nothing to do */
- if ((ma->ma_valid & (MA_HSM | MA_SOM)) ==
- (ma->ma_need & (MA_HSM | MA_SOM)))
- RETURN(0);
-
- /* Read LMA from disk EA */
- lma_size = sizeof(info->mti_xattr_buf);
- rc = mdd_get_md(env, mdd_obj, lma, &lma_size, XATTR_NAME_LMA);
- if (rc <= 0)
- RETURN(rc);
-
- /* Useless to check LMA incompatibility because this is already done in
- * osd_ea_fid_get(), and this will fail long before this code is
- * called.
- * So, if we are here, LMA is compatible.
- */
-
- lustre_lma_swab(lma);
-
- /* Swab and copy LMA */
- if (ma->ma_need & MA_HSM) {
- if (lma->lma_compat & LMAC_HSM)
- ma->ma_hsm.mh_flags = lma->lma_flags & HSM_FLAGS_MASK;
- else
- ma->ma_hsm.mh_flags = 0;
- ma->ma_valid |= MA_HSM;
- }
-
- /* Copy SOM */
- if (ma->ma_need & MA_SOM && lma->lma_compat & LMAC_SOM) {
- LASSERT(ma->ma_som != NULL);
- ma->ma_som->msd_ioepoch = lma->lma_ioepoch;
- ma->ma_som->msd_size = lma->lma_som_size;
- ma->ma_som->msd_blocks = lma->lma_som_blocks;
- ma->ma_som->msd_mountid = lma->lma_som_mountid;
- ma->ma_valid |= MA_SOM;
- }
-
- RETURN(0);
-}
-
-int mdd_attr_get_internal(const struct lu_env *env, struct mdd_object *mdd_obj,
- struct md_attr *ma)
-{
- int rc = 0;
- ENTRY;
-
- if (ma->ma_need & MA_INODE)
- rc = mdd_iattr_get(env, mdd_obj, ma);
-
- if (rc == 0 && ma->ma_need & MA_LOV) {
- if (S_ISREG(mdd_object_type(mdd_obj)) ||
- S_ISDIR(mdd_object_type(mdd_obj)))
- rc = __mdd_lmm_get(env, mdd_obj, ma);
- }
- if (rc == 0 && ma->ma_need & MA_PFID && !(ma->ma_valid & MA_LOV)) {
- if (S_ISREG(mdd_object_type(mdd_obj)))
- rc = mdd_pfid_get(env, mdd_obj, ma);
- }
- if (rc == 0 && ma->ma_need & MA_LMV) {
- if (S_ISDIR(mdd_object_type(mdd_obj)))
- rc = __mdd_lmv_get(env, mdd_obj, ma);
- }
- if (rc == 0 && ma->ma_need & (MA_HSM | MA_SOM)) {
- if (S_ISREG(mdd_object_type(mdd_obj)))
- rc = __mdd_lma_get(env, mdd_obj, ma);
- }
-#ifdef CONFIG_FS_POSIX_ACL
- if (rc == 0 && ma->ma_need & MA_ACL_DEF) {
- if (S_ISDIR(mdd_object_type(mdd_obj)))
- rc = mdd_def_acl_get(env, mdd_obj, ma);
- }
-#endif
- CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = "LPX64" ma_lmm=%p\n",
- rc, ma->ma_valid, ma->ma_lmm);
- RETURN(rc);
-}
-
-int mdd_attr_get_internal_locked(const struct lu_env *env,
- struct mdd_object *mdd_obj, struct md_attr *ma)
-{
- int rc;
- int needlock = ma->ma_need &
- (MA_LOV | MA_LMV | MA_ACL_DEF | MA_HSM | MA_SOM | MA_PFID);
-
- if (needlock)
- mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
- rc = mdd_attr_get_internal(env, mdd_obj, ma);
- if (needlock)
- mdd_read_unlock(env, mdd_obj);
- return rc;
-}
-
/*
* No permission check is needed.
*/
-static int mdd_attr_get(const struct lu_env *env, struct md_object *obj,
- struct md_attr *ma)
+int mdd_attr_get(const struct lu_env *env, struct md_object *obj,
+ struct md_attr *ma)
{
- struct mdd_object *mdd_obj = md2mdd_obj(obj);
- int rc;
+ struct mdd_object *mdd_obj = md2mdd_obj(obj);
+ int rc;
- ENTRY;
- rc = mdd_attr_get_internal_locked(env, mdd_obj, ma);
- RETURN(rc);
+ ENTRY;
+
+ rc = mdd_la_get(env, mdd_obj, &ma->ma_attr);
+ if ((ma->ma_need & MA_INODE) != 0 && mdd_is_dead_obj(mdd_obj))
+ ma->ma_attr.la_nlink = 0;
+
+ RETURN(rc);
}
/*
ENTRY;
- LASSERT(mdd_object_exists(mdd_obj));
+ if (mdd_object_exists(mdd_obj) == 0) {
+ CERROR("%s: object "DFID" not found: rc = -2\n",
+ mdd_obj_dev_name(mdd_obj),PFID(mdd_object_fid(mdd_obj)));
+ return -ENOENT;
+ }
+
+ /* If the object has been delete from the namespace, then
+ * get linkEA should return -ENOENT as well */
+ if (unlikely((mdd_obj->mod_flags & (DEAD_OBJ | ORPHAN_OBJ)) &&
+ strcmp(name, XATTR_NAME_LINK) == 0))
+ RETURN(-ENOENT);
mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
- rc = mdo_xattr_get(env, mdd_obj, buf, name,
- mdd_object_capa(env, mdd_obj));
+ rc = mdo_xattr_get(env, mdd_obj, buf, name);
mdd_read_unlock(env, mdd_obj);
RETURN(rc);
* Permission check is done when open,
* no need check again.
*/
-static int mdd_readlink(const struct lu_env *env, struct md_object *obj,
- struct lu_buf *buf)
+int mdd_readlink(const struct lu_env *env, struct md_object *obj,
+ struct lu_buf *buf)
{
struct mdd_object *mdd_obj = md2mdd_obj(obj);
struct dt_object *next;
int rc;
ENTRY;
- LASSERT(mdd_object_exists(mdd_obj));
+ if (mdd_object_exists(mdd_obj) == 0) {
+ CERROR("%s: object "DFID" not found: rc = -2\n",
+ mdd_obj_dev_name(mdd_obj),PFID(mdd_object_fid(mdd_obj)));
+ return -ENOENT;
+ }
next = mdd_object_child(mdd_obj);
- mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
- rc = next->do_body_ops->dbo_read(env, next, buf, &pos,
- mdd_object_capa(env, mdd_obj));
+ LASSERT(next != NULL);
+ LASSERT(next->do_body_ops != NULL);
+ LASSERT(next->do_body_ops->dbo_read != NULL);
+ mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
+ rc = dt_read(env, next, buf, &pos);
mdd_read_unlock(env, mdd_obj);
RETURN(rc);
}
ENTRY;
mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
- rc = mdo_xattr_list(env, mdd_obj, buf, mdd_object_capa(env, mdd_obj));
+ rc = mdo_xattr_list(env, mdd_obj, buf);
mdd_read_unlock(env, mdd_obj);
- RETURN(rc);
-}
-
-int mdd_object_create_internal(const struct lu_env *env, struct mdd_object *p,
- struct mdd_object *c, struct md_attr *ma,
- struct thandle *handle,
- const struct md_op_spec *spec)
+ if (rc < 0)
+ RETURN(rc);
+
+ /*
+ * Filter out XATTR_NAME_LINK if this is an orphan object. See
+ * mdd_xattr_get().
+ */
+ if (unlikely(mdd_obj->mod_flags & (DEAD_OBJ | ORPHAN_OBJ))) {
+ char *end = (char *)buf->lb_buf + rc;
+ char *p = buf->lb_buf;
+
+ while (p < end) {
+ char *next = p + strlen(p) + 1;
+
+ if (strcmp(p, XATTR_NAME_LINK) == 0) {
+ if (end - next > 0)
+ memmove(p, next, end - next);
+ rc -= next - p;
+ CDEBUG(D_INFO, "Filtered out "XATTR_NAME_LINK
+ " of orphan "DFID"\n",
+ PFID(mdd_object_fid(mdd_obj)));
+ break;
+ }
+
+ p = next;
+ }
+ }
+
+ RETURN(rc);
+}
+
+int mdd_declare_object_create_internal(const struct lu_env *env,
+ struct mdd_object *p,
+ struct mdd_object *c,
+ struct lu_attr *attr,
+ struct thandle *handle,
+ const struct md_op_spec *spec,
+ struct dt_allocation_hint *hint)
{
- struct lu_attr *attr = &ma->ma_attr;
- struct dt_allocation_hint *hint = &mdd_env_info(env)->mti_hint;
struct dt_object_format *dof = &mdd_env_info(env)->mti_dof;
const struct dt_index_features *feat = spec->sp_feat;
int rc;
ENTRY;
- if (!mdd_object_exists(c)) {
- struct dt_object *next = mdd_object_child(c);
- LASSERT(next);
-
- if (feat != &dt_directory_features && feat != NULL)
- dof->dof_type = DFT_INDEX;
- else
- dof->dof_type = dt_mode_to_dft(attr->la_mode);
-
- dof->u.dof_idx.di_feat = feat;
-
- /* @hint will be initialized by underlying device. */
- next->do_ops->do_ah_init(env, hint,
- p ? mdd_object_child(p) : NULL,
- attr->la_mode & S_IFMT);
-
- rc = mdo_create_obj(env, c, attr, hint, dof, handle);
- LASSERT(ergo(rc == 0, mdd_object_exists(c)));
- } else
- rc = -EEXIST;
+ if (feat != &dt_directory_features && feat != NULL) {
+ dof->dof_type = DFT_INDEX;
+ dof->u.dof_idx.di_feat = feat;
+
+ } else {
+ dof->dof_type = dt_mode_to_dft(attr->la_mode);
+ if (dof->dof_type == DFT_REGULAR) {
+ dof->u.dof_reg.striped =
+ md_should_create(spec->sp_cr_flags);
+ if (spec->sp_cr_flags & MDS_OPEN_HAS_EA)
+ dof->u.dof_reg.striped = 0;
+ /* is this replay? */
+ if (spec->no_create)
+ dof->u.dof_reg.striped = 0;
+ }
+ }
+
+ rc = mdo_declare_create_obj(env, c, attr, hint, dof, handle);
RETURN(rc);
}
-/**
- * Make sure the ctime is increased only.
- */
-static inline int mdd_attr_check(const struct lu_env *env,
- struct mdd_object *obj,
- struct lu_attr *attr)
+int mdd_object_create_internal(const struct lu_env *env, struct mdd_object *p,
+ struct mdd_object *c, struct lu_attr *attr,
+ struct thandle *handle,
+ const struct md_op_spec *spec,
+ struct dt_allocation_hint *hint)
{
- struct lu_attr *tmp_la = &mdd_env_info(env)->mti_la;
+ struct dt_object_format *dof = &mdd_env_info(env)->mti_dof;
int rc;
ENTRY;
- if (attr->la_valid & LA_CTIME) {
- rc = mdd_la_get(env, obj, tmp_la, BYPASS_CAPA);
- if (rc)
- RETURN(rc);
+ LASSERT(!mdd_object_exists(c));
- if (attr->la_ctime < tmp_la->la_ctime)
- attr->la_valid &= ~(LA_MTIME | LA_CTIME);
- else if (attr->la_valid == LA_CTIME &&
- attr->la_ctime == tmp_la->la_ctime)
- attr->la_valid &= ~LA_CTIME;
- }
- RETURN(0);
+ rc = mdo_create_obj(env, c, attr, hint, dof, handle);
+
+ RETURN(rc);
}
-int mdd_attr_set_internal(const struct lu_env *env,
- struct mdd_object *obj,
- struct lu_attr *attr,
- struct thandle *handle,
- int needacl)
+int mdd_attr_set_internal(const struct lu_env *env, struct mdd_object *obj,
+ const struct lu_attr *attr, struct thandle *handle,
+ int needacl)
{
int rc;
ENTRY;
- rc = mdo_attr_set(env, obj, attr, handle, mdd_object_capa(env, obj));
+ rc = mdo_attr_set(env, obj, attr, handle);
#ifdef CONFIG_FS_POSIX_ACL
if (!rc && (attr->la_valid & LA_MODE) && needacl)
rc = mdd_acl_chmod(env, obj, attr->la_mode, handle);
RETURN(rc);
}
-int mdd_attr_check_set_internal(const struct lu_env *env,
- struct mdd_object *obj,
- struct lu_attr *attr,
- struct thandle *handle,
- int needacl)
+int mdd_update_time(const struct lu_env *env, struct mdd_object *obj,
+ const struct lu_attr *oattr, struct lu_attr *attr,
+ struct thandle *handle)
{
- int rc;
- ENTRY;
+ int rc = 0;
+ ENTRY;
- rc = mdd_attr_check(env, obj, attr);
- if (rc)
- RETURN(rc);
+ LASSERT(attr->la_valid & LA_CTIME);
+ LASSERT(oattr != NULL);
- if (attr->la_valid)
- rc = mdd_attr_set_internal(env, obj, attr, handle, needacl);
- RETURN(rc);
-}
+ /* Make sure the ctime is increased only, however, it's not strictly
+ * reliable at here because there is not guarantee to hold lock on
+ * object, so we just bypass some unnecessary cmtime setting first
+ * and OSD has to check it again. */
+ if (attr->la_ctime < oattr->la_ctime)
+ attr->la_valid &= ~(LA_MTIME | LA_CTIME);
+ else if (attr->la_valid == LA_CTIME &&
+ attr->la_ctime == oattr->la_ctime)
+ attr->la_valid &= ~LA_CTIME;
-static int mdd_attr_set_internal_locked(const struct lu_env *env,
- struct mdd_object *obj,
- struct lu_attr *attr,
- struct thandle *handle,
- int needacl)
-{
- int rc;
- ENTRY;
-
- needacl = needacl && (attr->la_valid & LA_MODE);
- if (needacl)
- mdd_write_lock(env, obj, MOR_TGT_CHILD);
- rc = mdd_attr_set_internal(env, obj, attr, handle, needacl);
- if (needacl)
- mdd_write_unlock(env, obj);
- RETURN(rc);
-}
-
-int mdd_attr_check_set_internal_locked(const struct lu_env *env,
- struct mdd_object *obj,
- struct lu_attr *attr,
- struct thandle *handle,
- int needacl)
-{
- int rc;
- ENTRY;
-
- needacl = needacl && (attr->la_valid & LA_MODE);
- if (needacl)
- mdd_write_lock(env, obj, MOR_TGT_CHILD);
- rc = mdd_attr_check_set_internal(env, obj, attr, handle, needacl);
- if (needacl)
- mdd_write_unlock(env, obj);
- RETURN(rc);
-}
-
-int __mdd_xattr_set(const struct lu_env *env, struct mdd_object *obj,
- const struct lu_buf *buf, const char *name,
- int fl, struct thandle *handle)
-{
- struct lustre_capa *capa = mdd_object_capa(env, obj);
- int rc = -EINVAL;
- ENTRY;
-
- if (buf->lb_buf && buf->lb_len > 0)
- rc = mdo_xattr_set(env, obj, buf, name, 0, handle, capa);
- else if (buf->lb_buf == NULL && buf->lb_len == 0)
- rc = mdo_xattr_del(env, obj, name, handle, capa);
-
- RETURN(rc);
+ if (attr->la_valid != 0)
+ rc = mdd_attr_set_internal(env, obj, attr, handle, 0);
+ RETURN(rc);
}
/*
* This API is ported from mds_fix_attr but remove some unnecesssary stuff.
*/
static int mdd_fix_attr(const struct lu_env *env, struct mdd_object *obj,
- struct lu_attr *la, const struct md_attr *ma)
-{
- struct lu_attr *tmp_la = &mdd_env_info(env)->mti_la;
- struct md_ucred *uc;
- int rc;
- ENTRY;
-
- if (!la->la_valid)
- RETURN(0);
-
- /* Do not permit change file type */
- if (la->la_valid & LA_TYPE)
- RETURN(-EPERM);
-
- /* They should not be processed by setattr */
- if (la->la_valid & (LA_NLINK | LA_RDEV | LA_BLKSIZE))
- RETURN(-EPERM);
-
- /* export destroy does not have ->le_ses, but we may want
- * to drop LUSTRE_SOM_FL. */
- if (!env->le_ses)
- RETURN(0);
-
- uc = md_ucred(env);
-
- rc = mdd_la_get(env, obj, tmp_la, BYPASS_CAPA);
- if (rc)
- RETURN(rc);
-
- if (la->la_valid == LA_CTIME) {
- if (!(ma->ma_attr_flags & MDS_PERM_BYPASS))
- /* This is only for set ctime when rename's source is
- * on remote MDS. */
- rc = mdd_may_delete(env, NULL, obj,
- (struct md_attr *)ma, 1, 0);
- if (rc == 0 && la->la_ctime <= tmp_la->la_ctime)
- la->la_valid &= ~LA_CTIME;
- RETURN(rc);
- }
-
- if (la->la_valid == LA_ATIME) {
- /* This is atime only set for read atime update on close. */
- if (la->la_atime >= tmp_la->la_atime &&
- la->la_atime < (tmp_la->la_atime +
- mdd_obj2mdd_dev(obj)->mdd_atime_diff))
- la->la_valid &= ~LA_ATIME;
- RETURN(0);
- }
-
- /* Check if flags change. */
- if (la->la_valid & LA_FLAGS) {
- unsigned int oldflags = 0;
- unsigned int newflags = la->la_flags &
- (LUSTRE_IMMUTABLE_FL | LUSTRE_APPEND_FL);
-
- if ((uc->mu_fsuid != tmp_la->la_uid) &&
- !mdd_capable(uc, CFS_CAP_FOWNER))
- RETURN(-EPERM);
-
- /* XXX: the IMMUTABLE and APPEND_ONLY flags can
- * only be changed by the relevant capability. */
- if (mdd_is_immutable(obj))
- oldflags |= LUSTRE_IMMUTABLE_FL;
- if (mdd_is_append(obj))
- oldflags |= LUSTRE_APPEND_FL;
- if ((oldflags ^ newflags) &&
- !mdd_capable(uc, CFS_CAP_LINUX_IMMUTABLE))
- RETURN(-EPERM);
-
- if (!S_ISDIR(tmp_la->la_mode))
- la->la_flags &= ~LUSTRE_DIRSYNC_FL;
- }
-
- if ((mdd_is_immutable(obj) || mdd_is_append(obj)) &&
- (la->la_valid & ~LA_FLAGS) &&
- !(ma->ma_attr_flags & MDS_PERM_BYPASS))
- RETURN(-EPERM);
-
- /* Check for setting the obj time. */
- if ((la->la_valid & (LA_MTIME | LA_ATIME | LA_CTIME)) &&
- !(la->la_valid & ~(LA_MTIME | LA_ATIME | LA_CTIME))) {
- if ((uc->mu_fsuid != tmp_la->la_uid) &&
- !mdd_capable(uc, CFS_CAP_FOWNER)) {
- rc = mdd_permission_internal_locked(env, obj, tmp_la,
- MAY_WRITE,
- MOR_TGT_CHILD);
- if (rc)
- RETURN(rc);
- }
- }
-
- if (la->la_valid & LA_KILL_SUID) {
- la->la_valid &= ~LA_KILL_SUID;
- if ((tmp_la->la_mode & S_ISUID) &&
- !(la->la_valid & LA_MODE)) {
- la->la_mode = tmp_la->la_mode;
- la->la_valid |= LA_MODE;
- }
- la->la_mode &= ~S_ISUID;
- }
-
- if (la->la_valid & LA_KILL_SGID) {
- la->la_valid &= ~LA_KILL_SGID;
- if (((tmp_la->la_mode & (S_ISGID | S_IXGRP)) ==
- (S_ISGID | S_IXGRP)) &&
- !(la->la_valid & LA_MODE)) {
- la->la_mode = tmp_la->la_mode;
- la->la_valid |= LA_MODE;
- }
- la->la_mode &= ~S_ISGID;
- }
-
- /* Make sure a caller can chmod. */
- if (la->la_valid & LA_MODE) {
- if (!(ma->ma_attr_flags & MDS_PERM_BYPASS) &&
- (uc->mu_fsuid != tmp_la->la_uid) &&
- !mdd_capable(uc, CFS_CAP_FOWNER))
- RETURN(-EPERM);
-
- if (la->la_mode == (cfs_umode_t) -1)
- la->la_mode = tmp_la->la_mode;
- else
- la->la_mode = (la->la_mode & S_IALLUGO) |
- (tmp_la->la_mode & ~S_IALLUGO);
-
- /* Also check the setgid bit! */
- if (!lustre_in_group_p(uc, (la->la_valid & LA_GID) ?
- la->la_gid : tmp_la->la_gid) &&
- !mdd_capable(uc, CFS_CAP_FSETID))
- la->la_mode &= ~S_ISGID;
- } else {
- la->la_mode = tmp_la->la_mode;
- }
-
- /* Make sure a caller can chown. */
- if (la->la_valid & LA_UID) {
- if (la->la_uid == (uid_t) -1)
- la->la_uid = tmp_la->la_uid;
- if (((uc->mu_fsuid != tmp_la->la_uid) ||
- (la->la_uid != tmp_la->la_uid)) &&
- !mdd_capable(uc, CFS_CAP_CHOWN))
- RETURN(-EPERM);
-
- /* If the user or group of a non-directory has been
- * changed by a non-root user, remove the setuid bit.
- * 19981026 David C Niemi <niemi@tux.org>
- *
- * Changed this to apply to all users, including root,
- * to avoid some races. This is the behavior we had in
- * 2.0. The check for non-root was definitely wrong
- * for 2.2 anyway, as it should have been using
- * CAP_FSETID rather than fsuid -- 19990830 SD. */
- if (((tmp_la->la_mode & S_ISUID) == S_ISUID) &&
- !S_ISDIR(tmp_la->la_mode)) {
- la->la_mode &= ~S_ISUID;
- la->la_valid |= LA_MODE;
- }
- }
-
- /* Make sure caller can chgrp. */
- if (la->la_valid & LA_GID) {
- if (la->la_gid == (gid_t) -1)
- la->la_gid = tmp_la->la_gid;
- if (((uc->mu_fsuid != tmp_la->la_uid) ||
- ((la->la_gid != tmp_la->la_gid) &&
- !lustre_in_group_p(uc, la->la_gid))) &&
- !mdd_capable(uc, CFS_CAP_CHOWN))
- RETURN(-EPERM);
-
- /* Likewise, if the user or group of a non-directory
- * has been changed by a non-root user, remove the
- * setgid bit UNLESS there is no group execute bit
- * (this would be a file marked for mandatory
- * locking). 19981026 David C Niemi <niemi@tux.org>
- *
- * Removed the fsuid check (see the comment above) --
- * 19990830 SD. */
- if (((tmp_la->la_mode & (S_ISGID | S_IXGRP)) ==
- (S_ISGID | S_IXGRP)) && !S_ISDIR(tmp_la->la_mode)) {
- la->la_mode &= ~S_ISGID;
- la->la_valid |= LA_MODE;
- }
- }
-
- /* For both Size-on-MDS case and truncate case,
- * "la->la_valid & (LA_SIZE | LA_BLOCKS)" are ture.
- * We distinguish them by "ma->ma_attr_flags & MDS_SOM".
- * For SOM case, it is true, the MAY_WRITE perm has been checked
- * when open, no need check again. For truncate case, it is false,
- * the MAY_WRITE perm should be checked here. */
- if (ma->ma_attr_flags & MDS_SOM) {
- /* For the "Size-on-MDS" setattr update, merge coming
- * attributes with the set in the inode. BUG 10641 */
- if ((la->la_valid & LA_ATIME) &&
- (la->la_atime <= tmp_la->la_atime))
- la->la_valid &= ~LA_ATIME;
-
- /* OST attributes do not have a priority over MDS attributes,
- * so drop times if ctime is equal. */
- if ((la->la_valid & LA_CTIME) &&
- (la->la_ctime <= tmp_la->la_ctime))
- la->la_valid &= ~(LA_MTIME | LA_CTIME);
- } else {
- if (la->la_valid & (LA_SIZE | LA_BLOCKS)) {
- if (!((ma->ma_attr_flags & MDS_OPEN_OWNEROVERRIDE) &&
- (uc->mu_fsuid == tmp_la->la_uid)) &&
- !(ma->ma_attr_flags & MDS_PERM_BYPASS)) {
- rc = mdd_permission_internal_locked(env, obj,
- tmp_la, MAY_WRITE,
- MOR_TGT_CHILD);
- if (rc)
- RETURN(rc);
- }
- }
- if (la->la_valid & LA_CTIME) {
- /* The pure setattr, it has the priority over what is
- * already set, do not drop it if ctime is equal. */
- if (la->la_ctime < tmp_la->la_ctime)
- la->la_valid &= ~(LA_ATIME | LA_MTIME |
- LA_CTIME);
- }
- }
-
- RETURN(0);
+ const struct lu_attr *oattr, struct lu_attr *la,
+ const unsigned long flags)
+{
+ struct lu_ucred *uc;
+ int rc = 0;
+ ENTRY;
+
+ if (!la->la_valid)
+ RETURN(0);
+
+ /* Do not permit change file type */
+ if (la->la_valid & LA_TYPE)
+ RETURN(-EPERM);
+
+ /* They should not be processed by setattr */
+ if (la->la_valid & (LA_NLINK | LA_RDEV | LA_BLKSIZE))
+ RETURN(-EPERM);
+
+ LASSERT(oattr != NULL);
+
+ /* export destroy does not have ->le_ses, but we may want
+ * to drop LUSTRE_SOM_FL. */
+ uc = lu_ucred_check(env);
+ if (uc == NULL)
+ RETURN(0);
+
+ if (la->la_valid == LA_CTIME) {
+ if (!(flags & MDS_PERM_BYPASS))
+ /* This is only for set ctime when rename's source is
+ * on remote MDS. */
+ rc = mdd_may_delete(env, NULL, NULL, obj, oattr, NULL,
+ 1, 0);
+ if (rc == 0 && la->la_ctime <= oattr->la_ctime)
+ la->la_valid &= ~LA_CTIME;
+ RETURN(rc);
+ }
+
+ if (la->la_valid == LA_ATIME) {
+ /* This is atime only set for read atime update on close. */
+ if (la->la_atime >= oattr->la_atime &&
+ la->la_atime < (oattr->la_atime +
+ mdd_obj2mdd_dev(obj)->mdd_atime_diff))
+ la->la_valid &= ~LA_ATIME;
+ RETURN(0);
+ }
+
+ /* Check if flags change. */
+ if (la->la_valid & LA_FLAGS) {
+ unsigned int oldflags = oattr->la_flags &
+ (LUSTRE_IMMUTABLE_FL | LUSTRE_APPEND_FL);
+ unsigned int newflags = la->la_flags &
+ (LUSTRE_IMMUTABLE_FL | LUSTRE_APPEND_FL);
+
+ if ((uc->uc_fsuid != oattr->la_uid) &&
+ !md_capable(uc, CFS_CAP_FOWNER))
+ RETURN(-EPERM);
+
+ /* The IMMUTABLE and APPEND_ONLY flags can
+ * only be changed by the relevant capability. */
+ if ((oldflags ^ newflags) &&
+ !md_capable(uc, CFS_CAP_LINUX_IMMUTABLE))
+ RETURN(-EPERM);
+
+ if (!S_ISDIR(oattr->la_mode))
+ la->la_flags &= ~LUSTRE_DIRSYNC_FL;
+ }
+
+ if (oattr->la_flags & (LUSTRE_IMMUTABLE_FL | LUSTRE_APPEND_FL) &&
+ (la->la_valid & ~LA_FLAGS) &&
+ !(flags & MDS_PERM_BYPASS))
+ RETURN(-EPERM);
+
+ /* Check for setting the obj time. */
+ if ((la->la_valid & (LA_MTIME | LA_ATIME | LA_CTIME)) &&
+ !(la->la_valid & ~(LA_MTIME | LA_ATIME | LA_CTIME))) {
+ if ((uc->uc_fsuid != oattr->la_uid) &&
+ !md_capable(uc, CFS_CAP_FOWNER)) {
+ rc = mdd_permission_internal(env, obj, oattr,
+ MAY_WRITE);
+ if (rc)
+ RETURN(rc);
+ }
+ }
+
+ if (la->la_valid & LA_KILL_SUID) {
+ la->la_valid &= ~LA_KILL_SUID;
+ if ((oattr->la_mode & S_ISUID) &&
+ !(la->la_valid & LA_MODE)) {
+ la->la_mode = oattr->la_mode;
+ la->la_valid |= LA_MODE;
+ }
+ la->la_mode &= ~S_ISUID;
+ }
+
+ if (la->la_valid & LA_KILL_SGID) {
+ la->la_valid &= ~LA_KILL_SGID;
+ if (((oattr->la_mode & (S_ISGID | S_IXGRP)) ==
+ (S_ISGID | S_IXGRP)) &&
+ !(la->la_valid & LA_MODE)) {
+ la->la_mode = oattr->la_mode;
+ la->la_valid |= LA_MODE;
+ }
+ la->la_mode &= ~S_ISGID;
+ }
+
+ /* Make sure a caller can chmod. */
+ if (la->la_valid & LA_MODE) {
+ if (!(flags & MDS_PERM_BYPASS) &&
+ (uc->uc_fsuid != oattr->la_uid) &&
+ !md_capable(uc, CFS_CAP_FOWNER))
+ RETURN(-EPERM);
+
+ if (la->la_mode == (umode_t) -1)
+ la->la_mode = oattr->la_mode;
+ else
+ la->la_mode = (la->la_mode & S_IALLUGO) |
+ (oattr->la_mode & ~S_IALLUGO);
+
+ /* Also check the setgid bit! */
+ if (!lustre_in_group_p(uc, (la->la_valid & LA_GID) ?
+ la->la_gid : oattr->la_gid) &&
+ !md_capable(uc, CFS_CAP_FSETID))
+ la->la_mode &= ~S_ISGID;
+ } else {
+ la->la_mode = oattr->la_mode;
+ }
+
+ /* Make sure a caller can chown. */
+ if (la->la_valid & LA_UID) {
+ if (la->la_uid == (uid_t) -1)
+ la->la_uid = oattr->la_uid;
+ if (((uc->uc_fsuid != oattr->la_uid) ||
+ (la->la_uid != oattr->la_uid)) &&
+ !md_capable(uc, CFS_CAP_CHOWN))
+ RETURN(-EPERM);
+
+ /* If the user or group of a non-directory has been
+ * changed by a non-root user, remove the setuid bit.
+ * 19981026 David C Niemi <niemi@tux.org>
+ *
+ * Changed this to apply to all users, including root,
+ * to avoid some races. This is the behavior we had in
+ * 2.0. The check for non-root was definitely wrong
+ * for 2.2 anyway, as it should have been using
+ * CAP_FSETID rather than fsuid -- 19990830 SD. */
+ if (((oattr->la_mode & S_ISUID) == S_ISUID) &&
+ !S_ISDIR(oattr->la_mode)) {
+ la->la_mode &= ~S_ISUID;
+ la->la_valid |= LA_MODE;
+ }
+ }
+
+ /* Make sure caller can chgrp. */
+ if (la->la_valid & LA_GID) {
+ if (la->la_gid == (gid_t) -1)
+ la->la_gid = oattr->la_gid;
+ if (((uc->uc_fsuid != oattr->la_uid) ||
+ ((la->la_gid != oattr->la_gid) &&
+ !lustre_in_group_p(uc, la->la_gid))) &&
+ !md_capable(uc, CFS_CAP_CHOWN))
+ RETURN(-EPERM);
+
+ /* Likewise, if the user or group of a non-directory
+ * has been changed by a non-root user, remove the
+ * setgid bit UNLESS there is no group execute bit
+ * (this would be a file marked for mandatory
+ * locking). 19981026 David C Niemi <niemi@tux.org>
+ *
+ * Removed the fsuid check (see the comment above) --
+ * 19990830 SD. */
+ if (((oattr->la_mode & (S_ISGID | S_IXGRP)) ==
+ (S_ISGID | S_IXGRP)) && !S_ISDIR(oattr->la_mode)) {
+ la->la_mode &= ~S_ISGID;
+ la->la_valid |= LA_MODE;
+ }
+ }
+
+ /* For both Size-on-MDS case and truncate case,
+ * "la->la_valid & (LA_SIZE | LA_BLOCKS)" are ture.
+ * We distinguish them by "flags & MDS_SOM".
+ * For SOM case, it is true, the MAY_WRITE perm has been checked
+ * when open, no need check again. For truncate case, it is false,
+ * the MAY_WRITE perm should be checked here. */
+ if (flags & MDS_SOM) {
+ /* For the "Size-on-MDS" setattr update, merge coming
+ * attributes with the set in the inode. BUG 10641 */
+ if ((la->la_valid & LA_ATIME) &&
+ (la->la_atime <= oattr->la_atime))
+ la->la_valid &= ~LA_ATIME;
+
+ /* OST attributes do not have a priority over MDS attributes,
+ * so drop times if ctime is equal. */
+ if ((la->la_valid & LA_CTIME) &&
+ (la->la_ctime <= oattr->la_ctime))
+ la->la_valid &= ~(LA_MTIME | LA_CTIME);
+ } else {
+ if (la->la_valid & (LA_SIZE | LA_BLOCKS)) {
+ if (!((flags & MDS_OWNEROVERRIDE) &&
+ (uc->uc_fsuid == oattr->la_uid)) &&
+ !(flags & MDS_PERM_BYPASS)) {
+ rc = mdd_permission_internal(env, obj,
+ oattr, MAY_WRITE);
+ if (rc != 0)
+ RETURN(rc);
+ }
+ }
+ if (la->la_valid & LA_CTIME) {
+ /* The pure setattr, it has the priority over what is
+ * already set, do not drop it if ctime is equal. */
+ if (la->la_ctime < oattr->la_ctime)
+ la->la_valid &= ~(LA_ATIME | LA_MTIME |
+ LA_CTIME);
+ }
+ }
+
+ RETURN(0);
}
/** Store a data change changelog record
* \param mdd_obj - mdd_object of change
* \param handle - transacion handle
*/
-static int mdd_changelog_data_store(const struct lu_env *env,
- struct mdd_device *mdd,
- enum changelog_rec_type type,
- int flags,
- struct mdd_object *mdd_obj,
- struct thandle *handle)
+int mdd_changelog_data_store(const struct lu_env *env, struct mdd_device *mdd,
+ enum changelog_rec_type type, int flags,
+ struct mdd_object *mdd_obj, struct thandle *handle)
{
- const struct lu_fid *tfid = mdo2fid(mdd_obj);
- struct llog_changelog_rec *rec;
- struct lu_buf *buf;
- int reclen;
- int rc;
+ const struct lu_ucred *uc = lu_ucred(env);
+ const struct lu_fid *tfid;
+ struct llog_changelog_rec *rec;
+ struct lu_buf *buf;
+ int reclen;
+ int rc;
/* Not recording */
if (!(mdd->mdd_cl.mc_flags & CLM_ON))
if ((mdd->mdd_cl.mc_mask & (1 << type)) == 0)
RETURN(0);
- LASSERT(handle != NULL);
LASSERT(mdd_obj != NULL);
+ LASSERT(handle != NULL);
+
+ tfid = mdo2fid(mdd_obj);
if ((type >= CL_MTIME) && (type <= CL_ATIME) &&
cfs_time_before_64(mdd->mdd_cl.mc_starttime, mdd_obj->mod_cltime)) {
RETURN(0);
}
- reclen = llog_data_len(sizeof(*rec));
- buf = mdd_buf_alloc(env, reclen);
- if (buf->lb_buf == NULL)
- RETURN(-ENOMEM);
- rec = (struct llog_changelog_rec *)buf->lb_buf;
-
- rec->cr.cr_flags = CLF_VERSION | (CLF_FLAGMASK & flags);
- rec->cr.cr_type = (__u32)type;
- rec->cr.cr_tfid = *tfid;
- rec->cr.cr_namelen = 0;
- mdd_obj->mod_cltime = cfs_time_current_64();
-
- rc = mdd_changelog_llog_write(mdd, rec, handle);
- if (rc < 0) {
- CERROR("changelog failed: rc=%d op%d t"DFID"\n",
- rc, type, PFID(tfid));
- return -EFAULT;
- }
+ flags = (flags & CLF_FLAGMASK) | CLF_VERSION;
+ if (uc != NULL && uc->uc_jobid[0] != '\0')
+ flags |= CLF_JOBID;
- return 0;
+ reclen = llog_data_len(changelog_rec_offset(flags & CLF_SUPPORTED));
+ buf = lu_buf_check_and_alloc(&mdd_env_info(env)->mti_big_buf, reclen);
+ if (buf->lb_buf == NULL)
+ RETURN(-ENOMEM);
+ rec = buf->lb_buf;
+
+ rec->cr.cr_flags = flags;
+ rec->cr.cr_type = (__u32)type;
+ rec->cr.cr_tfid = *tfid;
+ rec->cr.cr_namelen = 0;
+ mdd_obj->mod_cltime = cfs_time_current_64();
+
+ if (flags & CLF_JOBID)
+ mdd_changelog_rec_ext_jobid(&rec->cr, uc->uc_jobid);
+
+ rc = mdd_changelog_store(env, mdd, rec, handle);
+
+ RETURN(rc);
}
-int mdd_changelog(const struct lu_env *env, enum changelog_rec_type type,
- int flags, struct md_object *obj)
+static int mdd_changelog(const struct lu_env *env, enum changelog_rec_type type,
+ int flags, struct md_object *obj)
{
struct thandle *handle;
struct mdd_object *mdd_obj = md2mdd_obj(obj);
int rc;
ENTRY;
- handle = mdd_trans_start(env, mdd);
-
+ handle = mdd_trans_create(env, mdd);
if (IS_ERR(handle))
- return(PTR_ERR(handle));
+ RETURN(PTR_ERR(handle));
+
+ rc = mdd_declare_changelog_store(env, mdd, NULL, NULL, handle);
+ if (rc)
+ GOTO(stop, rc);
+
+ rc = mdd_trans_start(env, mdd, handle);
+ if (rc)
+ GOTO(stop, rc);
rc = mdd_changelog_data_store(env, mdd, type, flags, mdd_obj,
handle);
+stop:
mdd_trans_stop(env, mdd, rc, handle);
RETURN(rc);
}
/**
- * Should be called with write lock held.
- *
- * \see mdd_lma_set_locked().
- */
-static int __mdd_lma_set(const struct lu_env *env, struct mdd_object *mdd_obj,
- const struct md_attr *ma, struct thandle *handle)
-{
- struct mdd_thread_info *info = mdd_env_info(env);
- struct lu_buf *buf;
- struct lustre_mdt_attrs *lma =
- (struct lustre_mdt_attrs *) info->mti_xattr_buf;
- int lmasize = sizeof(struct lustre_mdt_attrs);
- int rc = 0;
-
- ENTRY;
-
- /* Either HSM or SOM part is not valid, we need to read it before */
- if ((!ma->ma_valid) & (MA_HSM | MA_SOM)) {
- rc = mdd_get_md(env, mdd_obj, lma, &lmasize, XATTR_NAME_LMA);
- if (rc <= 0)
- RETURN(rc);
-
- lustre_lma_swab(lma);
- } else {
- memset(lma, 0, lmasize);
- }
-
- /* Copy HSM data */
- if (ma->ma_valid & MA_HSM) {
- lma->lma_flags |= ma->ma_hsm.mh_flags & HSM_FLAGS_MASK;
- lma->lma_compat |= LMAC_HSM;
- }
-
- /* Copy SOM data */
- if (ma->ma_valid & MA_SOM) {
- LASSERT(ma->ma_som != NULL);
- if (ma->ma_som->msd_ioepoch == IOEPOCH_INVAL) {
- lma->lma_compat &= ~LMAC_SOM;
- } else {
- lma->lma_compat |= LMAC_SOM;
- lma->lma_ioepoch = ma->ma_som->msd_ioepoch;
- lma->lma_som_size = ma->ma_som->msd_size;
- lma->lma_som_blocks = ma->ma_som->msd_blocks;
- lma->lma_som_mountid = ma->ma_som->msd_mountid;
- }
- }
-
- /* Copy FID */
- memcpy(&lma->lma_self_fid, mdo2fid(mdd_obj), sizeof(lma->lma_self_fid));
-
- lustre_lma_swab(lma);
- buf = mdd_buf_get(env, lma, lmasize);
- rc = __mdd_xattr_set(env, mdd_obj, buf, XATTR_NAME_LMA, 0, handle);
-
- RETURN(rc);
-}
-
-/**
* Save LMA extended attributes with data from \a ma.
*
* HSM and Size-On-MDS data will be extracted from \ma if they are valid, if
* not, LMA EA will be first read from disk, modified and write back.
*
*/
-static int mdd_lma_set_locked(const struct lu_env *env,
- struct mdd_object *mdd_obj,
- const struct md_attr *ma, struct thandle *handle)
-{
- int rc;
-
- mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
- rc = __mdd_lma_set(env, mdd_obj, ma, handle);
- mdd_write_unlock(env, mdd_obj);
- return rc;
-}
-
/* Precedence for choosing record type when multiple
* attributes change: setattr > mtime > ctime > atime
* (ctime changes when mtime does, plus chmod/chown.
struct mdd_device *mdd = mdo2mdd(obj);
int bits, type = 0;
- bits = (valid & ~(LA_CTIME|LA_MTIME|LA_ATIME)) ? 1 << CL_SETATTR : 0;
+ bits = (valid & LA_SIZE) ? 1 << CL_TRUNC : 0;
+ bits |= (valid & ~(LA_CTIME|LA_MTIME|LA_ATIME)) ? 1 << CL_SETATTR : 0;
bits |= (valid & LA_MTIME) ? 1 << CL_MTIME : 0;
bits |= (valid & LA_CTIME) ? 1 << CL_CTIME : 0;
bits |= (valid & LA_ATIME) ? 1 << CL_ATIME : 0;
bits = bits & mdd->mdd_cl.mc_mask;
+ /* This is an implementation limit rather than a protocol limit */
+ CLASSERT(CL_LAST <= sizeof(int) * 8);
if (bits == 0)
return 0;
- /* The record type is the lowest non-masked set bit */
- while (bits && ((bits & 1) == 0)) {
- bits = bits >> 1;
- type++;
+ /* The record type is the lowest non-masked set bit */
+ type = __ffs(bits);
+
+ /* FYI we only store the first CLF_FLAGMASK bits of la_valid */
+ return mdd_changelog_data_store(env, mdd, type, (int)valid,
+ md2mdd_obj(obj), handle);
+}
+
+static int mdd_declare_attr_set(const struct lu_env *env,
+ struct mdd_device *mdd,
+ struct mdd_object *obj,
+ const struct lu_attr *attr,
+ struct thandle *handle)
+{
+ int rc;
+
+ rc = mdo_declare_attr_set(env, obj, attr, handle);
+ if (rc)
+ return rc;
+
+#ifdef CONFIG_FS_POSIX_ACL
+ if (attr->la_valid & LA_MODE) {
+ mdd_read_lock(env, obj, MOR_TGT_CHILD);
+ rc = mdo_xattr_get(env, obj, &LU_BUF_NULL,
+ XATTR_NAME_ACL_ACCESS);
+ mdd_read_unlock(env, obj);
+ if (rc == -EOPNOTSUPP || rc == -ENODATA)
+ rc = 0;
+ else if (rc < 0)
+ return rc;
+
+ if (rc != 0) {
+ struct lu_buf *buf = mdd_buf_get(env, NULL, rc);
+ rc = mdo_declare_xattr_set(env, obj, buf,
+ XATTR_NAME_ACL_ACCESS, 0,
+ handle);
+ if (rc)
+ return rc;
+ }
}
+#endif
- /* FYI we only store the first CLF_FLAGMASK bits of la_valid */
- return mdd_changelog_data_store(env, mdd, type, (int)valid,
- md2mdd_obj(obj), handle);
+ rc = mdd_declare_changelog_store(env, mdd, NULL, NULL, handle);
+ return rc;
}
-/* set attr and LOV EA at once, return updated attr */
-static int mdd_attr_set(const struct lu_env *env, struct md_object *obj,
- const struct md_attr *ma)
+/*
+ * LU-3671
+ *
+ * permission changes may require sync operation, to mitigate performance
+ * impact, only do this for dir and when permission is reduced.
+ *
+ * For regular files, version is updated with permission change (see VBR), async
+ * permission won't cause any issue, while missing permission change on
+ * directory may affect accessibility of other objects after recovery.
+ */
+static inline bool permission_needs_sync(const struct lu_attr *old,
+ const struct lu_attr *new)
{
- struct mdd_object *mdd_obj = md2mdd_obj(obj);
- struct mdd_device *mdd = mdo2mdd(obj);
- struct thandle *handle;
- struct lov_mds_md *lmm = NULL;
- struct llog_cookie *logcookies = NULL;
- int rc, lmm_size = 0, cookie_size = 0;
- struct lu_attr *la_copy = &mdd_env_info(env)->mti_la_for_fix;
-#ifdef HAVE_QUOTA_SUPPORT
- struct obd_device *obd = mdd->mdd_obd_dev;
- struct mds_obd *mds = &obd->u.mds;
- unsigned int qnids[MAXQUOTAS] = { 0, 0 };
- unsigned int qoids[MAXQUOTAS] = { 0, 0 };
- int quota_opc = 0, block_count = 0;
- int inode_pending[MAXQUOTAS] = { 0, 0 };
- int block_pending[MAXQUOTAS] = { 0, 0 };
-#endif
- ENTRY;
+ if (!S_ISDIR(old->la_mode))
+ return false;
+
+ if (new->la_valid & (LA_UID | LA_GID))
+ return true;
+
+ if (new->la_valid & LA_MODE &&
+ new->la_mode & (S_ISUID | S_ISGID | S_ISVTX))
+ return true;
+
+ if ((new->la_valid & LA_MODE) &&
+ ((new->la_mode & old->la_mode) & S_IRWXUGO) !=
+ (old->la_mode & S_IRWXUGO))
+ return true;
+
+ return false;
+}
- *la_copy = ma->ma_attr;
- rc = mdd_fix_attr(env, mdd_obj, la_copy, ma);
- if (rc != 0)
- RETURN(rc);
+/* set attr and LOV EA at once, return updated attr */
+int mdd_attr_set(const struct lu_env *env, struct md_object *obj,
+ const struct md_attr *ma)
+{
+ struct mdd_object *mdd_obj = md2mdd_obj(obj);
+ struct mdd_device *mdd = mdo2mdd(obj);
+ struct thandle *handle;
+ struct lu_attr *la_copy = &mdd_env_info(env)->mti_la_for_fix;
+ struct lu_attr *attr = MDD_ENV_VAR(env, cattr);
+ const struct lu_attr *la = &ma->ma_attr;
+ int rc;
+ ENTRY;
+
+ /* we do not use ->attr_set() for LOV/SOM/HSM EA any more */
+ LASSERT((ma->ma_valid & MA_LOV) == 0);
+ LASSERT((ma->ma_valid & MA_HSM) == 0);
+ LASSERT((ma->ma_valid & MA_SOM) == 0);
+
+ rc = mdd_la_get(env, mdd_obj, attr);
+ if (rc)
+ RETURN(rc);
+
+ *la_copy = ma->ma_attr;
+ rc = mdd_fix_attr(env, mdd_obj, attr, la_copy, ma->ma_attr_flags);
+ if (rc)
+ RETURN(rc);
/* setattr on "close" only change atime, or do nothing */
- if (ma->ma_valid == MA_INODE &&
- ma->ma_attr.la_valid == LA_ATIME && la_copy->la_valid == 0)
+ if (la->la_valid == LA_ATIME && la_copy->la_valid == 0)
RETURN(0);
- mdd_setattr_txn_param_build(env, obj, (struct md_attr *)ma,
- MDD_TXN_ATTR_SET_OP);
- handle = mdd_trans_start(env, mdd);
+ handle = mdd_trans_create(env, mdd);
if (IS_ERR(handle))
RETURN(PTR_ERR(handle));
- /*TODO: add lock here*/
- /* start a log jounal handle if needed */
- if (S_ISREG(mdd_object_type(mdd_obj)) &&
- ma->ma_attr.la_valid & (LA_UID | LA_GID)) {
- lmm_size = mdd_lov_mdsize(env, mdd);
- lmm = mdd_max_lmm_get(env, mdd);
- if (lmm == NULL)
- GOTO(cleanup, rc = -ENOMEM);
-
- rc = mdd_get_md_locked(env, mdd_obj, lmm, &lmm_size,
- XATTR_NAME_LOV);
-
- if (rc < 0)
- GOTO(cleanup, rc);
- }
- if (ma->ma_attr.la_valid & (LA_MTIME | LA_CTIME))
- CDEBUG(D_INODE, "setting mtime "LPU64", ctime "LPU64"\n",
- ma->ma_attr.la_mtime, ma->ma_attr.la_ctime);
-
-#ifdef HAVE_QUOTA_SUPPORT
- if (mds->mds_quota && la_copy->la_valid & (LA_UID | LA_GID)) {
- struct obd_export *exp = md_quota(env)->mq_exp;
- struct lu_attr *la_tmp = &mdd_env_info(env)->mti_la;
-
- rc = mdd_la_get(env, mdd_obj, la_tmp, BYPASS_CAPA);
- if (!rc) {
- quota_opc = FSFILT_OP_SETATTR;
- mdd_quota_wrapper(la_copy, qnids);
- mdd_quota_wrapper(la_tmp, qoids);
- /* get file quota for new owner */
- lquota_chkquota(mds_quota_interface_ref, obd, exp,
- qnids, inode_pending, 1, NULL, 0,
- NULL, 0);
- block_count = (la_tmp->la_blocks + 7) >> 3;
- if (block_count) {
- void *data = NULL;
- mdd_data_get(env, mdd_obj, &data);
- /* get block quota for new owner */
- lquota_chkquota(mds_quota_interface_ref, obd,
- exp, qnids, block_pending,
- block_count, NULL,
- LQUOTA_FLAGS_BLK, data, 1);
- }
- }
- }
-#endif
+ rc = mdd_declare_attr_set(env, mdd, mdd_obj, la, handle);
+ if (rc)
+ GOTO(stop, rc);
- if (la_copy->la_valid & LA_FLAGS) {
- rc = mdd_attr_set_internal_locked(env, mdd_obj, la_copy,
- handle, 1);
- if (rc == 0)
- mdd_flags_xlate(mdd_obj, la_copy->la_flags);
- } else if (la_copy->la_valid) { /* setattr */
- rc = mdd_attr_set_internal_locked(env, mdd_obj, la_copy,
- handle, 1);
- /* journal chown/chgrp in llog, just like unlink */
- if (rc == 0 && lmm_size){
- cookie_size = mdd_lov_cookiesize(env, mdd);
- logcookies = mdd_max_cookie_get(env, mdd);
- if (logcookies == NULL)
- GOTO(cleanup, rc = -ENOMEM);
-
- if (mdd_setattr_log(env, mdd, ma, lmm, lmm_size,
- logcookies, cookie_size) <= 0)
- logcookies = NULL;
- }
- }
+ rc = mdd_trans_start(env, mdd, handle);
+ if (rc)
+ GOTO(stop, rc);
- if (rc == 0 && ma->ma_valid & MA_LOV) {
- cfs_umode_t mode;
+ if (mdd->mdd_sync_permission && permission_needs_sync(attr, la))
+ handle->th_sync = 1;
- mode = mdd_object_type(mdd_obj);
- if (S_ISREG(mode) || S_ISDIR(mode)) {
- rc = mdd_lsm_sanity_check(env, mdd_obj);
- if (rc)
- GOTO(cleanup, rc);
+ if (la->la_valid & (LA_MTIME | LA_CTIME))
+ CDEBUG(D_INODE, "setting mtime "LPU64", ctime "LPU64"\n",
+ la->la_mtime, la->la_ctime);
- rc = mdd_lov_set_md(env, NULL, mdd_obj, ma->ma_lmm,
- ma->ma_lmm_size, handle, 1);
- }
+ mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
+ if (la_copy->la_valid & LA_FLAGS)
+ rc = mdd_attr_set_internal(env, mdd_obj, la_copy, handle, 1);
+ else if (la_copy->la_valid) /* setattr */
+ rc = mdd_attr_set_internal(env, mdd_obj, la_copy, handle, 1);
+ mdd_write_unlock(env, mdd_obj);
- }
- if (rc == 0 && ma->ma_valid & (MA_HSM | MA_SOM)) {
- cfs_umode_t mode;
+ if (rc == 0)
+ rc = mdd_attr_set_changelog(env, obj, handle, la->la_valid);
- mode = mdd_object_type(mdd_obj);
- if (S_ISREG(mode))
- rc = mdd_lma_set_locked(env, mdd_obj, ma, handle);
+ GOTO(stop, rc);
- }
-cleanup:
- if (rc == 0)
- rc = mdd_attr_set_changelog(env, obj, handle,
- ma->ma_attr.la_valid);
- mdd_trans_stop(env, mdd, rc, handle);
- if (rc == 0 && (lmm != NULL && lmm_size > 0 )) {
- /*set obd attr, if needed*/
- rc = mdd_lov_setattr_async(env, mdd_obj, lmm, lmm_size,
- logcookies);
- }
-#ifdef HAVE_QUOTA_SUPPORT
- if (quota_opc) {
- lquota_pending_commit(mds_quota_interface_ref, obd, qnids,
- inode_pending, 0);
- lquota_pending_commit(mds_quota_interface_ref, obd, qnids,
- block_pending, 1);
- /* Trigger dqrel/dqacq for original owner and new owner.
- * If failed, the next call for lquota_chkquota will
- * process it. */
- lquota_adjust(mds_quota_interface_ref, obd, qnids, qoids, rc,
- quota_opc);
- }
-#endif
- RETURN(rc);
+stop:
+ mdd_trans_stop(env, mdd, rc, handle);
+ return rc;
}
-int mdd_xattr_set_txn(const struct lu_env *env, struct mdd_object *obj,
- const struct lu_buf *buf, const char *name, int fl,
- struct thandle *handle)
+static int mdd_xattr_sanity_check(const struct lu_env *env,
+ struct mdd_object *obj,
+ const struct lu_attr *attr)
{
- int rc;
- ENTRY;
+ struct lu_ucred *uc = lu_ucred_assert(env);
+ ENTRY;
- mdd_write_lock(env, obj, MOR_TGT_CHILD);
- rc = __mdd_xattr_set(env, obj, buf, name, fl, handle);
- mdd_write_unlock(env, obj);
+ if (attr->la_flags & (LUSTRE_IMMUTABLE_FL | LUSTRE_APPEND_FL))
+ RETURN(-EPERM);
- RETURN(rc);
+ if ((uc->uc_fsuid != attr->la_uid) && !md_capable(uc, CFS_CAP_FOWNER))
+ RETURN(-EPERM);
+
+ RETURN(0);
}
-static int mdd_xattr_sanity_check(const struct lu_env *env,
- struct mdd_object *obj)
+static int mdd_declare_xattr_set(const struct lu_env *env,
+ struct mdd_device *mdd,
+ struct mdd_object *obj,
+ const struct lu_buf *buf,
+ const char *name,
+ int fl, struct thandle *handle)
{
- struct lu_attr *tmp_la = &mdd_env_info(env)->mti_la;
- struct md_ucred *uc = md_ucred(env);
- int rc;
- ENTRY;
+ int rc;
- if (mdd_is_immutable(obj) || mdd_is_append(obj))
- RETURN(-EPERM);
+ rc = mdo_declare_xattr_set(env, obj, buf, name, fl, handle);
+ if (rc)
+ return rc;
- rc = mdd_la_get(env, obj, tmp_la, BYPASS_CAPA);
- if (rc)
- RETURN(rc);
+ /* Only record user and layout xattr changes */
+ if (strncmp(XATTR_USER_PREFIX, name,
+ sizeof(XATTR_USER_PREFIX) - 1) == 0 ||
+ strcmp(XATTR_NAME_LOV, name) == 0) {
+ rc = mdd_declare_changelog_store(env, mdd, NULL, NULL, handle);
+ if (rc)
+ return rc;
+ }
- if ((uc->mu_fsuid != tmp_la->la_uid) &&
- !mdd_capable(uc, CFS_CAP_FOWNER))
- RETURN(-EPERM);
+ /* If HSM data is modified, this could add a changelog */
+ if (strcmp(XATTR_NAME_HSM, name) == 0) {
+ rc = mdd_declare_changelog_store(env, mdd, NULL, NULL, handle);
+ if (rc)
+ return rc;
+ }
- RETURN(rc);
+ rc = mdd_declare_changelog_store(env, mdd, NULL, NULL, handle);
+ return rc;
}
+/*
+ * Compare current and future data of HSM EA and add a changelog if needed.
+ *
+ * Caller should have write-locked \param obj.
+ *
+ * \param buf - Future HSM EA content.
+ * \retval 0 if no changelog is needed or changelog was added properly.
+ * \retval -ve errno if there was a problem
+ */
+static int mdd_hsm_update_locked(const struct lu_env *env,
+ struct md_object *obj,
+ const struct lu_buf *buf,
+ struct thandle *handle)
+{
+ struct mdd_thread_info *info = mdd_env_info(env);
+ struct mdd_device *mdd = mdo2mdd(obj);
+ struct mdd_object *mdd_obj = md2mdd_obj(obj);
+ struct lu_buf *current_buf;
+ struct md_hsm *current_mh;
+ struct md_hsm *new_mh;
+ int rc;
+ ENTRY;
+
+ OBD_ALLOC_PTR(current_mh);
+ if (current_mh == NULL)
+ RETURN(-ENOMEM);
+
+ /* Read HSM attrs from disk */
+ CLASSERT(sizeof(struct hsm_attrs) <= sizeof(info->mti_xattr_buf));
+ current_buf = mdd_buf_get(env, info->mti_xattr_buf,
+ sizeof(info->mti_xattr_buf));
+ rc = mdo_xattr_get(env, mdd_obj, current_buf, XATTR_NAME_HSM);
+ rc = lustre_buf2hsm(current_buf->lb_buf, rc, current_mh);
+ if (rc < 0 && rc != -ENODATA)
+ GOTO(free, rc);
+ else if (rc == -ENODATA)
+ current_mh->mh_flags = 0;
+
+ /* Map future HSM xattr */
+ OBD_ALLOC_PTR(new_mh);
+ if (new_mh == NULL)
+ GOTO(free, rc = -ENOMEM);
+ lustre_buf2hsm(buf->lb_buf, buf->lb_len, new_mh);
+
+ /* If HSM flags are different, add a changelog */
+ rc = 0;
+ if (current_mh->mh_flags != new_mh->mh_flags) {
+ int flags = 0;
+ hsm_set_cl_event(&flags, HE_STATE);
+ if (new_mh->mh_flags & HS_DIRTY)
+ hsm_set_cl_flags(&flags, CLF_HSM_DIRTY);
+
+ rc = mdd_changelog_data_store(env, mdd, CL_HSM, flags, mdd_obj,
+ handle);
+ }
+
+ OBD_FREE_PTR(new_mh);
+free:
+ OBD_FREE_PTR(current_mh);
+ return(rc);
+}
+
+static int mdd_xattr_del(const struct lu_env *env, struct md_object *obj,
+ const char *name);
+
/**
* The caller should guarantee to update the object ctime
* after xattr_set if needed.
*/
static int mdd_xattr_set(const struct lu_env *env, struct md_object *obj,
- const struct lu_buf *buf, const char *name,
- int fl)
+ const struct lu_buf *buf, const char *name,
+ int fl)
+{
+ struct mdd_object *mdd_obj = md2mdd_obj(obj);
+ struct lu_attr *attr = MDD_ENV_VAR(env, cattr);
+ struct mdd_device *mdd = mdo2mdd(obj);
+ struct thandle *handle;
+ int rc;
+ ENTRY;
+
+ rc = mdd_la_get(env, mdd_obj, attr);
+ if (rc)
+ RETURN(rc);
+
+ rc = mdd_xattr_sanity_check(env, mdd_obj, attr);
+ if (rc)
+ RETURN(rc);
+
+ if (strcmp(name, XATTR_NAME_ACL_ACCESS) == 0 ||
+ strcmp(name, XATTR_NAME_ACL_DEFAULT) == 0) {
+ struct posix_acl *acl;
+
+ /* user may set empty ACL, which should be treated as removing
+ * ACL. */
+ acl = posix_acl_from_xattr(&init_user_ns, buf->lb_buf,
+ buf->lb_len);
+ if (acl == NULL) {
+ rc = mdd_xattr_del(env, obj, name);
+ RETURN(rc);
+ }
+ posix_acl_release(acl);
+ }
+
+ if (!strcmp(name, XATTR_NAME_ACL_ACCESS)) {
+ rc = mdd_acl_set(env, mdd_obj, attr, buf, fl);
+ RETURN(rc);
+ }
+
+ handle = mdd_trans_create(env, mdd);
+ if (IS_ERR(handle))
+ RETURN(PTR_ERR(handle));
+
+ rc = mdd_declare_xattr_set(env, mdd, mdd_obj, buf, name, fl, handle);
+ if (rc)
+ GOTO(stop, rc);
+
+ rc = mdd_trans_start(env, mdd, handle);
+ if (rc)
+ GOTO(stop, rc);
+
+ mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
+
+ if (strcmp(XATTR_NAME_HSM, name) == 0) {
+ rc = mdd_hsm_update_locked(env, obj, buf, handle);
+ if (rc) {
+ mdd_write_unlock(env, mdd_obj);
+ GOTO(stop, rc);
+ }
+ }
+
+ rc = mdo_xattr_set(env, mdd_obj, buf, name, fl, handle);
+ mdd_write_unlock(env, mdd_obj);
+ if (rc)
+ GOTO(stop, rc);
+
+ if (strcmp(XATTR_NAME_LOV, name) == 0)
+ rc = mdd_changelog_data_store(env, mdd, CL_LAYOUT, 0, mdd_obj,
+ handle);
+ else if (strncmp(XATTR_USER_PREFIX, name,
+ sizeof(XATTR_USER_PREFIX) - 1) == 0 ||
+ strncmp(POSIX_ACL_XATTR_ACCESS, name,
+ sizeof(POSIX_ACL_XATTR_ACCESS) - 1) == 0 ||
+ strncmp(POSIX_ACL_XATTR_DEFAULT, name,
+ sizeof(POSIX_ACL_XATTR_DEFAULT) - 1) == 0)
+ rc = mdd_changelog_data_store(env, mdd, CL_XATTR, 0, mdd_obj,
+ handle);
+
+stop:
+ mdd_trans_stop(env, mdd, rc, handle);
+
+ RETURN(rc);
+}
+
+static int mdd_declare_xattr_del(const struct lu_env *env,
+ struct mdd_device *mdd,
+ struct mdd_object *obj,
+ const char *name,
+ struct thandle *handle)
{
- struct mdd_object *mdd_obj = md2mdd_obj(obj);
- struct mdd_device *mdd = mdo2mdd(obj);
- struct thandle *handle;
- int rc;
- ENTRY;
-
- rc = mdd_xattr_sanity_check(env, mdd_obj);
- if (rc)
- RETURN(rc);
-
- mdd_txn_param_build(env, mdd, MDD_TXN_XATTR_SET_OP);
- /* security-replated changes may require sync */
- if (!strcmp(name, XATTR_NAME_ACL_ACCESS) &&
- mdd->mdd_sync_permission == 1)
- txn_param_sync(&mdd_env_info(env)->mti_param);
-
- handle = mdd_trans_start(env, mdd);
- if (IS_ERR(handle))
- RETURN(PTR_ERR(handle));
+ int rc;
- rc = mdd_xattr_set_txn(env, mdd_obj, buf, name, fl, handle);
+ rc = mdo_declare_xattr_del(env, obj, name, handle);
+ if (rc)
+ return rc;
- /* Only record user xattr changes */
- if ((rc == 0) && (strncmp("user.", name, 5) == 0))
- rc = mdd_changelog_data_store(env, mdd, CL_XATTR, 0, mdd_obj,
- handle);
- mdd_trans_stop(env, mdd, rc, handle);
+ /* Only record user xattr changes */
+ if ((strncmp(XATTR_USER_PREFIX, name,
+ sizeof(XATTR_USER_PREFIX) - 1) == 0))
+ rc = mdd_declare_changelog_store(env, mdd, NULL, NULL, handle);
- RETURN(rc);
+ return rc;
}
/**
* The caller should guarantee to update the object ctime
* after xattr_set if needed.
*/
-int mdd_xattr_del(const struct lu_env *env, struct md_object *obj,
- const char *name)
+static int mdd_xattr_del(const struct lu_env *env, struct md_object *obj,
+ const char *name)
{
- struct mdd_object *mdd_obj = md2mdd_obj(obj);
- struct mdd_device *mdd = mdo2mdd(obj);
- struct thandle *handle;
- int rc;
- ENTRY;
+ struct mdd_object *mdd_obj = md2mdd_obj(obj);
+ struct lu_attr *attr = MDD_ENV_VAR(env, cattr);
+ struct mdd_device *mdd = mdo2mdd(obj);
+ struct thandle *handle;
+ int rc;
+ ENTRY;
- rc = mdd_xattr_sanity_check(env, mdd_obj);
- if (rc)
- RETURN(rc);
+ rc = mdd_la_get(env, mdd_obj, attr);
+ if (rc)
+ RETURN(rc);
- mdd_txn_param_build(env, mdd, MDD_TXN_XATTR_SET_OP);
- handle = mdd_trans_start(env, mdd);
+ rc = mdd_xattr_sanity_check(env, mdd_obj, attr);
+ if (rc)
+ RETURN(rc);
+
+ handle = mdd_trans_create(env, mdd);
if (IS_ERR(handle))
RETURN(PTR_ERR(handle));
+ rc = mdd_declare_xattr_del(env, mdd, mdd_obj, name, handle);
+ if (rc)
+ GOTO(stop, rc);
+
+ rc = mdd_trans_start(env, mdd, handle);
+ if (rc)
+ GOTO(stop, rc);
+
mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
- rc = mdo_xattr_del(env, mdd_obj, name, handle,
- mdd_object_capa(env, mdd_obj));
+ rc = mdo_xattr_del(env, mdd_obj, name, handle);
mdd_write_unlock(env, mdd_obj);
-
- /* Only record user xattr changes */
- if ((rc == 0) && (strncmp("user.", name, 5) != 0))
+ if (rc)
+ GOTO(stop, rc);
+
+ /* Only record system & user xattr changes */
+ if (strncmp(XATTR_USER_PREFIX, name,
+ sizeof(XATTR_USER_PREFIX) - 1) == 0 ||
+ strncmp(POSIX_ACL_XATTR_ACCESS, name,
+ sizeof(POSIX_ACL_XATTR_ACCESS) - 1) == 0 ||
+ strncmp(POSIX_ACL_XATTR_DEFAULT, name,
+ sizeof(POSIX_ACL_XATTR_DEFAULT) - 1) == 0)
rc = mdd_changelog_data_store(env, mdd, CL_XATTR, 0, mdd_obj,
handle);
+stop:
mdd_trans_stop(env, mdd, rc, handle);
RETURN(rc);
}
-/* partial unlink */
-static int mdd_ref_del(const struct lu_env *env, struct md_object *obj,
- struct md_attr *ma)
-{
- struct lu_attr *la_copy = &mdd_env_info(env)->mti_la_for_fix;
- struct mdd_object *mdd_obj = md2mdd_obj(obj);
- struct mdd_device *mdd = mdo2mdd(obj);
- struct thandle *handle;
-#ifdef HAVE_QUOTA_SUPPORT
- struct obd_device *obd = mdd->mdd_obd_dev;
- struct mds_obd *mds = &obd->u.mds;
- unsigned int qids[MAXQUOTAS] = { 0, 0 };
- int quota_opc = 0;
-#endif
- int rc;
- ENTRY;
-
- /*
- * Check -ENOENT early here because we need to get object type
- * to calculate credits before transaction start
- */
- if (!mdd_object_exists(mdd_obj))
- RETURN(-ENOENT);
-
- LASSERT(mdd_object_exists(mdd_obj) > 0);
-
- rc = mdd_log_txn_param_build(env, obj, ma, MDD_TXN_UNLINK_OP);
- if (rc)
- RETURN(rc);
-
- handle = mdd_trans_start(env, mdd);
- if (IS_ERR(handle))
- RETURN(-ENOMEM);
-
- mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
-
- rc = mdd_unlink_sanity_check(env, NULL, mdd_obj, ma);
- if (rc)
- GOTO(cleanup, rc);
-
- __mdd_ref_del(env, mdd_obj, handle, 0);
-
- if (S_ISDIR(lu_object_attr(&obj->mo_lu))) {
- /* unlink dot */
- __mdd_ref_del(env, mdd_obj, handle, 1);
- }
-
- LASSERT(ma->ma_attr.la_valid & LA_CTIME);
- la_copy->la_ctime = ma->ma_attr.la_ctime;
-
- la_copy->la_valid = LA_CTIME;
- rc = mdd_attr_check_set_internal(env, mdd_obj, la_copy, handle, 0);
- if (rc)
- GOTO(cleanup, rc);
-
- rc = mdd_finish_unlink(env, mdd_obj, ma, handle);
-#ifdef HAVE_QUOTA_SUPPORT
- if (mds->mds_quota && ma->ma_valid & MA_INODE &&
- ma->ma_attr.la_nlink == 0 && mdd_obj->mod_count == 0) {
- quota_opc = FSFILT_OP_UNLINK_PARTIAL_CHILD;
- mdd_quota_wrapper(&ma->ma_attr, qids);
- }
-#endif
-
-
- EXIT;
-cleanup:
- mdd_write_unlock(env, mdd_obj);
- mdd_trans_stop(env, mdd, rc, handle);
-#ifdef HAVE_QUOTA_SUPPORT
- if (quota_opc)
- /* Trigger dqrel on the owner of child. If failed,
- * the next call for lquota_chkquota will process it */
- lquota_adjust(mds_quota_interface_ref, obd, qids, 0, rc,
- quota_opc);
-#endif
- return rc;
-}
-
-/* partial operation */
-static int mdd_oc_sanity_check(const struct lu_env *env,
- struct mdd_object *obj,
- struct md_attr *ma)
-{
- int rc;
- ENTRY;
+/*
+ * read lov EA of an object
+ * return the lov EA in an allocated lu_buf
+ */
+int mdd_get_lov_ea(const struct lu_env *env, struct mdd_object *obj,
+ struct lu_buf *lmm_buf)
+{
+ struct lu_buf *buf = &mdd_env_info(env)->mti_big_buf;
+ int rc, bufsize;
+ ENTRY;
+
+repeat:
+ rc = mdo_xattr_get(env, obj, buf, XATTR_NAME_LOV);
+
+ if (rc == -ERANGE) {
+ /* mti_big_buf is allocated but is too small
+ * we need to increase it */
+ buf = lu_buf_check_and_alloc(&mdd_env_info(env)->mti_big_buf,
+ buf->lb_len * 2);
+ if (buf->lb_buf == NULL)
+ GOTO(out, rc = -ENOMEM);
+ goto repeat;
+ }
+
+ if (rc < 0)
+ RETURN(rc);
+
+ if (rc == 0)
+ RETURN(-ENODATA);
+
+ bufsize = rc;
+ if (memcmp(buf, &LU_BUF_NULL, sizeof(*buf)) == 0) {
+ /* mti_big_buf was not allocated, so we have to
+ * allocate it based on the ea size */
+ buf = lu_buf_check_and_alloc(&mdd_env_info(env)->mti_big_buf,
+ bufsize);
+ if (buf->lb_buf == NULL)
+ GOTO(out, rc = -ENOMEM);
+ goto repeat;
+ }
+
+ lu_buf_alloc(lmm_buf, bufsize);
+ if (lmm_buf->lb_buf == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ memcpy(lmm_buf->lb_buf, buf->lb_buf, bufsize);
+ rc = 0;
+ EXIT;
- switch (ma->ma_attr.la_mode & S_IFMT) {
- case S_IFREG:
- case S_IFDIR:
- case S_IFLNK:
- case S_IFCHR:
- case S_IFBLK:
- case S_IFIFO:
- case S_IFSOCK:
- rc = 0;
- break;
- default:
- rc = -EINVAL;
- break;
- }
- RETURN(rc);
+out:
+ if (rc < 0)
+ lu_buf_free(lmm_buf);
+ return rc;
}
-static int mdd_object_create(const struct lu_env *env,
- struct md_object *obj,
- const struct md_op_spec *spec,
- struct md_attr *ma)
+static int mdd_xattr_hsm_replace(const struct lu_env *env,
+ struct mdd_object *o, struct lu_buf *buf,
+ struct thandle *handle)
{
+ struct hsm_attrs *attrs;
+ __u32 hsm_flags;
+ int flags = 0;
+ int rc;
+ ENTRY;
- struct mdd_device *mdd = mdo2mdd(obj);
- struct mdd_object *mdd_obj = md2mdd_obj(obj);
- const struct lu_fid *pfid = spec->u.sp_pfid;
- struct thandle *handle;
-#ifdef HAVE_QUOTA_SUPPORT
- struct obd_device *obd = mdd->mdd_obd_dev;
- struct obd_export *exp = md_quota(env)->mq_exp;
- struct mds_obd *mds = &obd->u.mds;
- unsigned int qids[MAXQUOTAS] = { 0, 0 };
- int quota_opc = 0, block_count = 0;
- int inode_pending[MAXQUOTAS] = { 0, 0 };
- int block_pending[MAXQUOTAS] = { 0, 0 };
-#endif
- int rc = 0;
- ENTRY;
+ rc = mdo_xattr_set(env, o, buf, XATTR_NAME_HSM, LU_XATTR_REPLACE,
+ handle);
+ if (rc != 0)
+ RETURN(rc);
-#ifdef HAVE_QUOTA_SUPPORT
- if (mds->mds_quota) {
- quota_opc = FSFILT_OP_CREATE_PARTIAL_CHILD;
- mdd_quota_wrapper(&ma->ma_attr, qids);
- /* get file quota for child */
- lquota_chkquota(mds_quota_interface_ref, obd, exp,
- qids, inode_pending, 1, NULL, 0,
- NULL, 0);
- switch (ma->ma_attr.la_mode & S_IFMT) {
- case S_IFLNK:
- case S_IFDIR:
- block_count = 2;
- break;
- case S_IFREG:
- block_count = 1;
- break;
- }
- /* get block quota for child */
- if (block_count)
- lquota_chkquota(mds_quota_interface_ref, obd, exp,
- qids, block_pending, block_count,
- NULL, LQUOTA_FLAGS_BLK, NULL, 0);
- }
-#endif
+ attrs = buf->lb_buf;
+ hsm_flags = le32_to_cpu(attrs->hsm_flags);
+ if (!(hsm_flags & HS_RELEASED) || mdd_is_dead_obj(o))
+ RETURN(0);
- mdd_txn_param_build(env, mdd, MDD_TXN_OBJECT_CREATE_OP);
- handle = mdd_trans_start(env, mdd);
- if (IS_ERR(handle))
- GOTO(out_pending, rc = PTR_ERR(handle));
+ /* Add a changelog record for release. */
+ hsm_set_cl_event(&flags, HE_RELEASE);
+ rc = mdd_changelog_data_store(env, mdo2mdd(&o->mod_obj), CL_HSM,
+ flags, o, handle);
+ RETURN(rc);
+}
- mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
- rc = mdd_oc_sanity_check(env, mdd_obj, ma);
- if (rc)
- GOTO(unlock, rc);
+/*
+ * check if layout swapping between 2 objects is allowed
+ * the rules are:
+ * - only normal FIDs or non-system IGIFs
+ * - same type of objects
+ * - same owner/group (so quotas are still valid)
+ */
+static int mdd_layout_swap_allowed(const struct lu_env *env,
+ struct mdd_object *o1,
+ const struct lu_attr *attr1,
+ struct mdd_object *o2,
+ const struct lu_attr *attr2)
+{
+ const struct lu_fid *fid1, *fid2;
+ ENTRY;
- rc = mdd_object_create_internal(env, NULL, mdd_obj, ma, handle, spec);
- if (rc)
- GOTO(unlock, rc);
+ fid1 = mdo2fid(o1);
+ fid2 = mdo2fid(o2);
- if (spec->sp_cr_flags & MDS_CREATE_SLAVE_OBJ) {
- /* If creating the slave object, set slave EA here. */
- int lmv_size = spec->u.sp_ea.eadatalen;
- struct lmv_stripe_md *lmv;
+ if (!fid_is_norm(fid1) &&
+ (!fid_is_igif(fid1) || IS_ERR(mdd_links_get(env, o1))))
+ RETURN(-EBADF);
- lmv = (struct lmv_stripe_md *)spec->u.sp_ea.eadata;
- LASSERT(lmv != NULL && lmv_size > 0);
+ if (!fid_is_norm(fid2) &&
+ (!fid_is_igif(fid2) || IS_ERR(mdd_links_get(env, o2))))
+ RETURN(-EBADF);
- rc = __mdd_xattr_set(env, mdd_obj,
- mdd_buf_get_const(env, lmv, lmv_size),
- XATTR_NAME_LMV, 0, handle);
- if (rc)
- GOTO(unlock, rc);
+ if (mdd_object_type(o1) != mdd_object_type(o2)) {
+ if (S_ISDIR(mdd_object_type(o1)))
+ RETURN(-ENOTDIR);
+ if (S_ISREG(mdd_object_type(o1)))
+ RETURN(-EISDIR);
+ RETURN(-EBADF);
+ }
- rc = mdd_attr_set_internal(env, mdd_obj, &ma->ma_attr,
- handle, 0);
- } else {
-#ifdef CONFIG_FS_POSIX_ACL
- if (spec->sp_cr_flags & MDS_CREATE_RMT_ACL) {
- struct lu_buf *buf = &mdd_env_info(env)->mti_buf;
-
- buf->lb_buf = (void *)spec->u.sp_ea.eadata;
- buf->lb_len = spec->u.sp_ea.eadatalen;
- if ((buf->lb_len > 0) && (buf->lb_buf != NULL)) {
- rc = __mdd_acl_init(env, mdd_obj, buf,
- &ma->ma_attr.la_mode,
- handle);
- if (rc)
- GOTO(unlock, rc);
- else
- ma->ma_attr.la_valid |= LA_MODE;
- }
-
- pfid = spec->u.sp_ea.fid;
- }
-#endif
- rc = mdd_object_initialize(env, pfid, NULL, mdd_obj, ma, handle,
- spec);
- }
- EXIT;
-unlock:
- if (rc == 0)
- rc = mdd_attr_get_internal(env, mdd_obj, ma);
- mdd_write_unlock(env, mdd_obj);
+ if ((attr1->la_uid != attr2->la_uid) ||
+ (attr1->la_gid != attr2->la_gid))
+ RETURN(-EPERM);
- mdd_trans_stop(env, mdd, rc, handle);
-out_pending:
-#ifdef HAVE_QUOTA_SUPPORT
- if (quota_opc) {
- lquota_pending_commit(mds_quota_interface_ref, obd, qids,
- inode_pending, 0);
- lquota_pending_commit(mds_quota_interface_ref, obd, qids,
- block_pending, 1);
- /* Trigger dqacq on the owner of child. If failed,
- * the next call for lquota_chkquota will process it. */
- lquota_adjust(mds_quota_interface_ref, obd, qids, 0, rc,
- quota_opc);
- }
-#endif
- return rc;
+ RETURN(0);
}
-/* partial link */
-static int mdd_ref_add(const struct lu_env *env, struct md_object *obj,
- const struct md_attr *ma)
-{
- struct lu_attr *la_copy = &mdd_env_info(env)->mti_la_for_fix;
- struct mdd_object *mdd_obj = md2mdd_obj(obj);
- struct mdd_device *mdd = mdo2mdd(obj);
- struct thandle *handle;
- int rc;
- ENTRY;
-
- mdd_txn_param_build(env, mdd, MDD_TXN_XATTR_SET_OP);
- handle = mdd_trans_start(env, mdd);
- if (IS_ERR(handle))
- RETURN(-ENOMEM);
-
- mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
- rc = mdd_link_sanity_check(env, NULL, NULL, mdd_obj);
- if (rc == 0)
- __mdd_ref_add(env, mdd_obj, handle);
- mdd_write_unlock(env, mdd_obj);
- if (rc == 0) {
- LASSERT(ma->ma_attr.la_valid & LA_CTIME);
- la_copy->la_ctime = ma->ma_attr.la_ctime;
-
- la_copy->la_valid = LA_CTIME;
- rc = mdd_attr_check_set_internal_locked(env, mdd_obj, la_copy,
- handle, 0);
- }
- mdd_trans_stop(env, mdd, 0, handle);
-
- RETURN(rc);
+/**
+ * swap layouts between 2 lustre objects
+ */
+static int mdd_swap_layouts(const struct lu_env *env, struct md_object *obj1,
+ struct md_object *obj2, __u64 flags)
+{
+ struct mdd_thread_info *info = mdd_env_info(env);
+ struct mdd_object *fst_o = md2mdd_obj(obj1);
+ struct mdd_object *snd_o = md2mdd_obj(obj2);
+ struct lu_attr *fst_la = MDD_ENV_VAR(env, cattr);
+ struct lu_attr *snd_la = MDD_ENV_VAR(env, tattr);
+ struct mdd_device *mdd = mdo2mdd(obj1);
+ struct lov_mds_md *fst_lmm, *snd_lmm;
+ struct lu_buf *fst_buf = &info->mti_buf[0];
+ struct lu_buf *snd_buf = &info->mti_buf[1];
+ struct lu_buf *fst_hsm_buf = &info->mti_buf[2];
+ struct lu_buf *snd_hsm_buf = &info->mti_buf[3];
+ struct ost_id *saved_oi = NULL;
+ struct thandle *handle;
+ __u16 fst_gen, snd_gen;
+ int fst_fl;
+ int rc;
+ int rc2;
+ ENTRY;
+
+ CLASSERT(ARRAY_SIZE(info->mti_buf) >= 4);
+ memset(info->mti_buf, 0, sizeof(info->mti_buf));
+
+ /* we have to sort the 2 obj, so locking will always
+ * be in the same order, even in case of 2 concurrent swaps */
+ rc = lu_fid_cmp(mdo2fid(fst_o), mdo2fid(snd_o));
+ if (rc == 0) /* same fid ? */
+ RETURN(-EPERM);
+
+ if (rc < 0)
+ swap(fst_o, snd_o);
+
+ rc = mdd_la_get(env, fst_o, fst_la);
+ if (rc != 0)
+ RETURN(rc);
+
+ rc = mdd_la_get(env, snd_o, snd_la);
+ if (rc != 0)
+ RETURN(rc);
+
+ /* check if layout swapping is allowed */
+ rc = mdd_layout_swap_allowed(env, fst_o, fst_la, snd_o, snd_la);
+ if (rc != 0)
+ RETURN(rc);
+
+ handle = mdd_trans_create(env, mdd);
+ if (IS_ERR(handle))
+ RETURN(PTR_ERR(handle));
+
+ /* objects are already sorted */
+ mdd_write_lock(env, fst_o, MOR_TGT_CHILD);
+ mdd_write_lock(env, snd_o, MOR_TGT_CHILD);
+
+ rc = mdd_get_lov_ea(env, fst_o, fst_buf);
+ if (rc < 0 && rc != -ENODATA)
+ GOTO(stop, rc);
+
+ rc = mdd_get_lov_ea(env, snd_o, snd_buf);
+ if (rc < 0 && rc != -ENODATA)
+ GOTO(stop, rc);
+
+ /* swapping 2 non existant layouts is a success */
+ if (fst_buf->lb_buf == NULL && snd_buf->lb_buf == NULL)
+ GOTO(stop, rc = 0);
+
+ /* to help inode migration between MDT, it is better to
+ * start by the no layout file (if one), so we order the swap */
+ if (snd_buf->lb_buf == NULL) {
+ swap(fst_o, snd_o);
+ swap(fst_buf, snd_buf);
+ }
+
+ /* lmm and generation layout initialization */
+ if (fst_buf->lb_buf != NULL) {
+ fst_lmm = fst_buf->lb_buf;
+ fst_gen = le16_to_cpu(fst_lmm->lmm_layout_gen);
+ fst_fl = LU_XATTR_REPLACE;
+ } else {
+ fst_lmm = NULL;
+ fst_gen = 0;
+ fst_fl = LU_XATTR_CREATE;
+ }
+
+ snd_lmm = snd_buf->lb_buf;
+ snd_gen = le16_to_cpu(snd_lmm->lmm_layout_gen);
+
+ /* increase the generation layout numbers */
+ snd_gen++;
+ fst_gen++;
+
+ /* set the file specific informations in lmm */
+ if (fst_lmm != NULL) {
+ saved_oi = &info->mti_oa.o_oi;
+
+ *saved_oi = fst_lmm->lmm_oi;
+ fst_lmm->lmm_layout_gen = cpu_to_le16(snd_gen);
+ fst_lmm->lmm_oi = snd_lmm->lmm_oi;
+ snd_lmm->lmm_oi = *saved_oi;
+ } else {
+ if (snd_lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1))
+ snd_lmm->lmm_magic = cpu_to_le32(LOV_MAGIC_V1_DEF);
+ else if (snd_lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V3))
+ snd_lmm->lmm_magic = cpu_to_le32(LOV_MAGIC_V3_DEF);
+ else
+ GOTO(stop, rc = -EPROTO);
+ }
+ snd_lmm->lmm_layout_gen = cpu_to_le16(fst_gen);
+
+ /* Prepare HSM attribute if it's required */
+ if (flags & SWAP_LAYOUTS_MDS_HSM) {
+ const int buflen = sizeof(struct hsm_attrs);
+
+ lu_buf_alloc(fst_hsm_buf, buflen);
+ lu_buf_alloc(snd_hsm_buf, buflen);
+ if (fst_hsm_buf->lb_buf == NULL || snd_hsm_buf->lb_buf == NULL)
+ GOTO(stop, rc = -ENOMEM);
+
+ /* Read HSM attribute */
+ rc = mdo_xattr_get(env, fst_o, fst_hsm_buf, XATTR_NAME_HSM);
+ if (rc < 0)
+ GOTO(stop, rc);
+
+ rc = mdo_xattr_get(env, snd_o, snd_hsm_buf, XATTR_NAME_HSM);
+ if (rc < 0)
+ GOTO(stop, rc);
+
+ rc = mdd_declare_xattr_set(env, mdd, fst_o, snd_hsm_buf,
+ XATTR_NAME_HSM, LU_XATTR_REPLACE,
+ handle);
+ if (rc < 0)
+ GOTO(stop, rc);
+
+ rc = mdd_declare_xattr_set(env, mdd, snd_o, fst_hsm_buf,
+ XATTR_NAME_HSM, LU_XATTR_REPLACE,
+ handle);
+ if (rc < 0)
+ GOTO(stop, rc);
+ }
+
+ /* prepare transaction */
+ rc = mdd_declare_xattr_set(env, mdd, fst_o, snd_buf, XATTR_NAME_LOV,
+ fst_fl, handle);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ if (fst_buf->lb_buf != NULL)
+ rc = mdd_declare_xattr_set(env, mdd, snd_o, fst_buf,
+ XATTR_NAME_LOV, LU_XATTR_REPLACE,
+ handle);
+ else
+ rc = mdd_declare_xattr_del(env, mdd, snd_o, XATTR_NAME_LOV,
+ handle);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ rc = mdd_trans_start(env, mdd, handle);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ if (flags & SWAP_LAYOUTS_MDS_HSM) {
+ rc = mdd_xattr_hsm_replace(env, fst_o, snd_hsm_buf, handle);
+ if (rc < 0)
+ GOTO(stop, rc);
+
+ rc = mdd_xattr_hsm_replace(env, snd_o, fst_hsm_buf, handle);
+ if (rc < 0) {
+ rc2 = mdd_xattr_hsm_replace(env, fst_o, fst_hsm_buf,
+ handle);
+ if (rc2 < 0)
+ CERROR("%s: restore "DFID" HSM error: %d/%d\n",
+ mdd_obj_dev_name(fst_o),
+ PFID(mdo2fid(fst_o)), rc, rc2);
+ GOTO(stop, rc);
+ }
+ }
+
+ rc = mdo_xattr_set(env, fst_o, snd_buf, XATTR_NAME_LOV, fst_fl, handle);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_MDS_HSM_SWAP_LAYOUTS))) {
+ rc = -EOPNOTSUPP;
+ } else {
+ if (fst_buf->lb_buf != NULL)
+ rc = mdo_xattr_set(env, snd_o, fst_buf, XATTR_NAME_LOV,
+ LU_XATTR_REPLACE, handle);
+ else
+ rc = mdo_xattr_del(env, snd_o, XATTR_NAME_LOV, handle);
+ }
+
+ if (rc != 0) {
+ int steps = 0;
+
+ /* failure on second file, but first was done, so we have
+ * to roll back first. */
+ if (fst_buf->lb_buf != NULL) {
+ fst_lmm->lmm_oi = *saved_oi;
+ fst_lmm->lmm_layout_gen = cpu_to_le16(fst_gen - 1);
+ rc2 = mdo_xattr_set(env, fst_o, fst_buf, XATTR_NAME_LOV,
+ LU_XATTR_REPLACE, handle);
+ } else {
+ rc2 = mdo_xattr_del(env, fst_o, XATTR_NAME_LOV, handle);
+ }
+ if (rc2 < 0)
+ goto do_lbug;
+
+ ++steps;
+ rc2 = mdd_xattr_hsm_replace(env, fst_o, fst_hsm_buf, handle);
+ if (rc2 < 0)
+ goto do_lbug;
+
+ ++steps;
+ rc2 = mdd_xattr_hsm_replace(env, snd_o, snd_hsm_buf, handle);
+
+ do_lbug:
+ if (rc2 < 0) {
+ /* very bad day */
+ CERROR("%s: unable to roll back layout swap. FIDs: "
+ DFID" and "DFID "error: %d/%d, steps: %d\n",
+ mdd_obj_dev_name(fst_o),
+ PFID(mdo2fid(snd_o)), PFID(mdo2fid(fst_o)),
+ rc, rc2, steps);
+ /* a solution to avoid journal commit is to panic,
+ * but it has strong consequences so we use LBUG to
+ * allow sysdamin to choose to panic or not
+ */
+ LBUG();
+ }
+ GOTO(stop, rc);
+ }
+
+ /* Issue one changelog record per file */
+ rc = mdd_changelog_data_store(env, mdd, CL_LAYOUT, 0, fst_o, handle);
+ if (rc)
+ GOTO(stop, rc);
+
+ rc = mdd_changelog_data_store(env, mdd, CL_LAYOUT, 0, snd_o, handle);
+ if (rc)
+ GOTO(stop, rc);
+ EXIT;
+
+stop:
+ mdd_trans_stop(env, mdd, rc, handle);
+ mdd_write_unlock(env, snd_o);
+ mdd_write_unlock(env, fst_o);
+
+ lu_buf_free(fst_buf);
+ lu_buf_free(snd_buf);
+ lu_buf_free(fst_hsm_buf);
+ lu_buf_free(snd_hsm_buf);
+ return rc;
+}
+
+void mdd_object_make_hint(const struct lu_env *env, struct mdd_object *parent,
+ struct mdd_object *child, const struct lu_attr *attr,
+ const struct md_op_spec *spec,
+ struct dt_allocation_hint *hint)
+{
+ struct dt_object *np = parent ? mdd_object_child(parent) : NULL;
+ struct dt_object *nc = mdd_object_child(child);
+
+ memset(hint, 0, sizeof(*hint));
+
+ /* For striped directory, give striping EA to lod_ah_init, which will
+ * decide the stripe_offset and stripe count by it. */
+ if (S_ISDIR(attr->la_mode) &&
+ unlikely(spec != NULL && spec->sp_cr_flags & MDS_OPEN_HAS_EA)) {
+ hint->dah_eadata = spec->u.sp_ea.eadata;
+ hint->dah_eadata_len = spec->u.sp_ea.eadatalen;
+ } else {
+ hint->dah_eadata = NULL;
+ hint->dah_eadata_len = 0;
+ }
+
+ CDEBUG(D_INFO, DFID" eadata %p len %d\n", PFID(mdd_object_fid(child)),
+ hint->dah_eadata, hint->dah_eadata_len);
+ /* @hint will be initialized by underlying device. */
+ nc->do_ops->do_ah_init(env, hint, np, nc, attr->la_mode & S_IFMT);
}
/*
* do NOT or the MAY_*'s, you'll get the weakest
*/
-int accmode(const struct lu_env *env, struct lu_attr *la, int flags)
+int accmode(const struct lu_env *env, const struct lu_attr *la, int flags)
{
- int res = 0;
-
- /* Sadly, NFSD reopens a file repeatedly during operation, so the
- * "acc_mode = 0" allowance for newly-created files isn't honoured.
- * NFSD uses the MDS_OPEN_OWNEROVERRIDE flag to say that a file
- * owner can write to a file even if it is marked readonly to hide
- * its brokenness. (bug 5781) */
- if (flags & MDS_OPEN_OWNEROVERRIDE) {
- struct md_ucred *uc = md_ucred(env);
-
- if ((uc == NULL) || (uc->mu_valid == UCRED_INIT) ||
- (la->la_uid == uc->mu_fsuid))
- return 0;
- }
+ int res = 0;
- if (flags & FMODE_READ)
- res |= MAY_READ;
- if (flags & (FMODE_WRITE | MDS_OPEN_TRUNC | MDS_OPEN_APPEND))
- res |= MAY_WRITE;
- if (flags & MDS_FMODE_EXEC)
- res = MAY_EXEC;
- return res;
+ /* Sadly, NFSD reopens a file repeatedly during operation, so the
+ * "acc_mode = 0" allowance for newly-created files isn't honoured.
+ * NFSD uses the MDS_OPEN_OWNEROVERRIDE flag to say that a file
+ * owner can write to a file even if it is marked readonly to hide
+ * its brokenness. (bug 5781) */
+ if (flags & MDS_OPEN_OWNEROVERRIDE) {
+ struct lu_ucred *uc = lu_ucred_check(env);
+
+ if ((uc == NULL) || (la->la_uid == uc->uc_fsuid))
+ return 0;
+ }
+
+ if (flags & FMODE_READ)
+ res |= MAY_READ;
+ if (flags & (FMODE_WRITE | MDS_OPEN_TRUNC | MDS_OPEN_APPEND))
+ res |= MAY_WRITE;
+ if (flags & MDS_FMODE_EXEC)
+ res = MAY_EXEC;
+ return res;
}
static int mdd_open_sanity_check(const struct lu_env *env,
- struct mdd_object *obj, int flag)
+ struct mdd_object *obj,
+ const struct lu_attr *attr, int flag)
{
- struct lu_attr *tmp_la = &mdd_env_info(env)->mti_la;
- int mode, rc;
- ENTRY;
-
- /* EEXIST check */
- if (mdd_is_dead_obj(obj))
- RETURN(-ENOENT);
+ int mode, rc;
+ ENTRY;
- rc = mdd_la_get(env, obj, tmp_la, BYPASS_CAPA);
- if (rc)
- RETURN(rc);
+ /* EEXIST check */
+ if (mdd_is_dead_obj(obj))
+ RETURN(-ENOENT);
- if (S_ISLNK(tmp_la->la_mode))
- RETURN(-ELOOP);
+ if (S_ISLNK(attr->la_mode))
+ RETURN(-ELOOP);
- mode = accmode(env, tmp_la, flag);
+ mode = accmode(env, attr, flag);
- if (S_ISDIR(tmp_la->la_mode) && (mode & MAY_WRITE))
- RETURN(-EISDIR);
+ if (S_ISDIR(attr->la_mode) && (mode & MAY_WRITE))
+ RETURN(-EISDIR);
- if (!(flag & MDS_OPEN_CREATED)) {
- rc = mdd_permission_internal(env, obj, tmp_la, mode);
- if (rc)
- RETURN(rc);
- }
+ if (!(flag & MDS_OPEN_CREATED)) {
+ rc = mdd_permission_internal(env, obj, attr, mode);
+ if (rc)
+ RETURN(rc);
+ }
- if (S_ISFIFO(tmp_la->la_mode) || S_ISSOCK(tmp_la->la_mode) ||
- S_ISBLK(tmp_la->la_mode) || S_ISCHR(tmp_la->la_mode))
- flag &= ~MDS_OPEN_TRUNC;
+ if (S_ISFIFO(attr->la_mode) || S_ISSOCK(attr->la_mode) ||
+ S_ISBLK(attr->la_mode) || S_ISCHR(attr->la_mode))
+ flag &= ~MDS_OPEN_TRUNC;
- /* For writing append-only file must open it with append mode. */
- if (mdd_is_append(obj)) {
- if ((flag & FMODE_WRITE) && !(flag & MDS_OPEN_APPEND))
- RETURN(-EPERM);
- if (flag & MDS_OPEN_TRUNC)
- RETURN(-EPERM);
- }
+ /* For writing append-only file must open it with append mode. */
+ if (attr->la_flags & LUSTRE_APPEND_FL) {
+ if ((flag & FMODE_WRITE) && !(flag & MDS_OPEN_APPEND))
+ RETURN(-EPERM);
+ if (flag & MDS_OPEN_TRUNC)
+ RETURN(-EPERM);
+ }
#if 0
/*
* Now, flag -- O_NOATIME does not be packed by client.
*/
if (flag & O_NOATIME) {
- struct md_ucred *uc = md_ucred(env);
+ struct lu_ucred *uc = lu_ucred(env);
- if (uc && ((uc->mu_valid == UCRED_OLD) ||
- (uc->mu_valid == UCRED_NEW)) &&
- (uc->mu_fsuid != tmp_la->la_uid) &&
- !mdd_capable(uc, CFS_CAP_FOWNER))
- RETURN(-EPERM);
+ if (uc && ((uc->uc_valid == UCRED_OLD) ||
+ (uc->uc_valid == UCRED_NEW)) &&
+ (uc->uc_fsuid != attr->la_uid) &&
+ !md_capable(uc, CFS_CAP_FOWNER))
+ RETURN(-EPERM);
}
#endif
- RETURN(0);
+ RETURN(0);
}
static int mdd_open(const struct lu_env *env, struct md_object *obj,
- int flags)
+ int flags)
{
- struct mdd_object *mdd_obj = md2mdd_obj(obj);
- int rc = 0;
+ struct mdd_object *mdd_obj = md2mdd_obj(obj);
+ struct lu_attr *attr = MDD_ENV_VAR(env, cattr);
+ int rc = 0;
- mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
+ mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
- rc = mdd_open_sanity_check(env, mdd_obj, flags);
- if (rc == 0)
- mdd_obj->mod_count++;
+ rc = mdd_la_get(env, mdd_obj, attr);
+ if (rc != 0)
+ GOTO(out, rc);
- mdd_write_unlock(env, mdd_obj);
- return rc;
+ rc = mdd_open_sanity_check(env, mdd_obj, attr, flags);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ mdd_obj->mod_count++;
+ EXIT;
+out:
+ mdd_write_unlock(env, mdd_obj);
+ return rc;
}
-/* return md_attr back,
- * if it is last unlink then return lov ea + llog cookie*/
-int mdd_object_kill(const struct lu_env *env, struct mdd_object *obj,
- struct md_attr *ma)
+static int mdd_declare_close(const struct lu_env *env,
+ struct mdd_object *obj,
+ struct md_attr *ma,
+ struct thandle *handle)
{
- int rc = 0;
- ENTRY;
+ int rc;
- if (S_ISREG(mdd_object_type(obj))) {
- /* Return LOV & COOKIES unconditionally here. We clean evth up.
- * Caller must be ready for that. */
+ rc = orph_declare_index_delete(env, obj, handle);
+ if (rc)
+ return rc;
- rc = __mdd_lmm_get(env, obj, ma);
- if ((ma->ma_valid & MA_LOV))
- rc = mdd_unlink_log(env, mdo2mdd(&obj->mod_obj),
- obj, ma);
- }
- RETURN(rc);
+ return mdo_declare_destroy(env, obj, handle);
}
/*
* No permission check is needed.
*/
static int mdd_close(const struct lu_env *env, struct md_object *obj,
- struct md_attr *ma)
+ struct md_attr *ma, int mode)
{
struct mdd_object *mdd_obj = md2mdd_obj(obj);
struct mdd_device *mdd = mdo2mdd(obj);
struct thandle *handle = NULL;
- int rc;
- int reset = 1;
-
-#ifdef HAVE_QUOTA_SUPPORT
- struct obd_device *obd = mdo2mdd(obj)->mdd_obd_dev;
- struct mds_obd *mds = &obd->u.mds;
- unsigned int qids[MAXQUOTAS] = { 0, 0 };
- int quota_opc = 0;
-#endif
+ int rc, is_orphan = 0;
ENTRY;
if (ma->ma_valid & MA_FLAGS && ma->ma_attr_flags & MDS_KEEP_ORPHAN) {
- mdd_obj->mod_count--;
+ mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
+ mdd_obj->mod_count--;
+ mdd_write_unlock(env, mdd_obj);
if (mdd_obj->mod_flags & ORPHAN_OBJ && !mdd_obj->mod_count)
CDEBUG(D_HA, "Object "DFID" is retained in orphan "
RETURN(0);
}
- /* check without any lock */
- if (mdd_obj->mod_count == 1 &&
- (mdd_obj->mod_flags & (ORPHAN_OBJ | DEAD_OBJ)) != 0) {
- again:
- rc = mdd_log_txn_param_build(env, obj, ma, MDD_TXN_UNLINK_OP);
- if (rc)
- RETURN(rc);
- handle = mdd_trans_start(env, mdo2mdd(obj));
+ /* mdd_finish_unlink() will always set orphan object as DEAD_OBJ, but
+ * it might fail to add the object to orphan list (w/o ORPHAN_OBJ). */
+ /* check without any lock */
+ is_orphan = mdd_obj->mod_count == 1 &&
+ (mdd_obj->mod_flags & (ORPHAN_OBJ | DEAD_OBJ)) != 0;
+
+again:
+ if (is_orphan) {
+ handle = mdd_trans_create(env, mdo2mdd(obj));
if (IS_ERR(handle))
RETURN(PTR_ERR(handle));
+
+ rc = mdd_declare_close(env, mdd_obj, ma, handle);
+ if (rc)
+ GOTO(stop, rc);
+
+ rc = mdd_declare_changelog_store(env, mdd, NULL, NULL, handle);
+ if (rc)
+ GOTO(stop, rc);
+
+ rc = mdd_trans_start(env, mdo2mdd(obj), handle);
+ if (rc)
+ GOTO(stop, rc);
}
mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
- if (handle == NULL && mdd_obj->mod_count == 1 &&
- (mdd_obj->mod_flags & ORPHAN_OBJ) != 0) {
- mdd_write_unlock(env, mdd_obj);
- goto again;
- }
+ rc = mdd_la_get(env, mdd_obj, &ma->ma_attr);
+ if (rc != 0) {
+ CERROR("Failed to get lu_attr of "DFID": %d\n",
+ PFID(mdd_object_fid(mdd_obj)), rc);
+ GOTO(out, rc);
+ }
+
+ /* check again with lock */
+ is_orphan = (mdd_obj->mod_count == 1) &&
+ ((mdd_obj->mod_flags & (ORPHAN_OBJ | DEAD_OBJ)) != 0 ||
+ ma->ma_attr.la_nlink == 0);
+
+ if (is_orphan && handle == NULL) {
+ mdd_write_unlock(env, mdd_obj);
+ goto again;
+ }
+
+ mdd_obj->mod_count--; /*release open count */
+
+ if (!is_orphan)
+ GOTO(out, rc = 0);
+
+ /* Orphan object */
+ /* NB: Object maybe not in orphan list originally, it is rare case for
+ * mdd_finish_unlink() failure, in that case, the object doesn't have
+ * ORPHAN_OBJ flag */
+ if ((mdd_obj->mod_flags & ORPHAN_OBJ) != 0) {
+ /* remove link to object from orphan index */
+ LASSERT(handle != NULL);
+ rc = __mdd_orphan_del(env, mdd_obj, handle);
+ if (rc != 0) {
+ CERROR("%s: unable to delete "DFID" from orphan list: "
+ "rc = %d\n", lu_dev_name(mdd2lu_dev(mdd)),
+ PFID(mdd_object_fid(mdd_obj)), rc);
+ /* If object was not deleted from orphan list, do not
+ * destroy OSS objects, which will be done when next
+ * recovery. */
+ GOTO(out, rc);
+ }
+
+ CDEBUG(D_HA, "Object "DFID" is deleted from orphan "
+ "list, OSS objects to be destroyed.\n",
+ PFID(mdd_object_fid(mdd_obj)));
+ }
+
+ rc = mdo_destroy(env, mdd_obj, handle);
+
+ if (rc != 0) {
+ CERROR("%s: unable to delete "DFID" from orphan list: "
+ "rc = %d\n", lu_dev_name(mdd2lu_dev(mdd)),
+ PFID(mdd_object_fid(mdd_obj)), rc);
+ }
+ EXIT;
- /* release open count */
- mdd_obj->mod_count --;
+out:
+ mdd_write_unlock(env, mdd_obj);
- if (mdd_obj->mod_count == 0 && mdd_obj->mod_flags & ORPHAN_OBJ) {
- /* remove link to object from orphan index */
- rc = __mdd_orphan_del(env, mdd_obj, handle);
- if (rc == 0) {
- CDEBUG(D_HA, "Object "DFID" is deleted from orphan "
- "list, OSS objects to be destroyed.\n",
- PFID(mdd_object_fid(mdd_obj)));
- } else {
- CERROR("Object "DFID" can not be deleted from orphan "
- "list, maybe cause OST objects can not be "
- "destroyed (err: %d).\n",
- PFID(mdd_object_fid(mdd_obj)), rc);
- /* If object was not deleted from orphan list, do not
- * destroy OSS objects, which will be done when next
- * recovery. */
- GOTO(out, rc);
- }
- }
+ if (rc == 0 &&
+ (mode & (FMODE_WRITE | MDS_OPEN_APPEND | MDS_OPEN_TRUNC)) &&
+ !(ma->ma_valid & MA_FLAGS && ma->ma_attr_flags & MDS_RECOV_OPEN)) {
+ if (handle == NULL) {
+ handle = mdd_trans_create(env, mdo2mdd(obj));
+ if (IS_ERR(handle))
+ GOTO(stop, rc = PTR_ERR(handle));
- rc = mdd_iattr_get(env, mdd_obj, ma);
- /* Object maybe not in orphan list originally, it is rare case for
- * mdd_finish_unlink() failure. */
- if (rc == 0 && ma->ma_attr.la_nlink == 0) {
-#ifdef HAVE_QUOTA_SUPPORT
- if (mds->mds_quota) {
- quota_opc = FSFILT_OP_UNLINK_PARTIAL_CHILD;
- mdd_quota_wrapper(&ma->ma_attr, qids);
- }
-#endif
- /* MDS_CLOSE_CLEANUP means destroy OSS objects by MDS. */
- if (ma->ma_valid & MA_FLAGS &&
- ma->ma_attr_flags & MDS_CLOSE_CLEANUP) {
- rc = mdd_lov_destroy(env, mdd, mdd_obj, &ma->ma_attr);
- } else {
- rc = mdd_object_kill(env, mdd_obj, ma);
- if (rc == 0)
- reset = 0;
+ rc = mdd_declare_changelog_store(env, mdd, NULL, NULL,
+ handle);
+ if (rc)
+ GOTO(stop, rc);
+
+ rc = mdd_trans_start(env, mdo2mdd(obj), handle);
+ if (rc)
+ GOTO(stop, rc);
}
- if (rc != 0)
- CERROR("Error when prepare to delete Object "DFID" , "
- "which will cause OST objects can not be "
- "destroyed.\n", PFID(mdd_object_fid(mdd_obj)));
+ mdd_changelog_data_store(env, mdd, CL_CLOSE, mode,
+ mdd_obj, handle);
}
- EXIT;
-out:
- if (reset)
- ma->ma_valid &= ~(MA_LOV | MA_COOKIE);
+stop:
+ if (handle != NULL && !IS_ERR(handle))
+ mdd_trans_stop(env, mdd, rc, handle);
- mdd_write_unlock(env, mdd_obj);
- if (handle != NULL)
- mdd_trans_stop(env, mdo2mdd(obj), rc, handle);
-#ifdef HAVE_QUOTA_SUPPORT
- if (quota_opc)
- /* Trigger dqrel on the owner of child. If failed,
- * the next call for lquota_chkquota will process it */
- lquota_adjust(mds_quota_interface_ref, obd, qids, 0, rc,
- quota_opc);
-#endif
- return rc;
+ return rc;
}
/*
RETURN(rc);
}
-static int mdd_dir_page_build(const struct lu_env *env, struct mdd_device *mdd,
- int first, void *area, int nob,
- const struct dt_it_ops *iops, struct dt_it *it,
- __u64 *start, __u64 *end,
- struct lu_dirent **last, __u32 attr)
+static int mdd_dir_page_build(const struct lu_env *env, union lu_page *lp,
+ size_t nob, const struct dt_it_ops *iops,
+ struct dt_it *it, __u32 attr, void *arg)
{
- int result;
- __u64 hash = 0;
- struct lu_dirent *ent;
-
- if (first) {
- memset(area, 0, sizeof (struct lu_dirpage));
- area += sizeof (struct lu_dirpage);
- nob -= sizeof (struct lu_dirpage);
- }
+ struct lu_dirpage *dp = &lp->lp_dir;
+ void *area = dp;
+ int result;
+ __u64 hash = 0;
+ struct lu_dirent *ent;
+ struct lu_dirent *last = NULL;
+ struct lu_fid fid;
+ int first = 1;
+
+ if (nob < sizeof(*dp))
+ return -EINVAL;
+
+ memset(area, 0, sizeof (*dp));
+ area += sizeof (*dp);
+ nob -= sizeof (*dp);
ent = area;
do {
int len;
- int recsize;
+ size_t recsize;
- len = iops->key_size(env, it);
+ len = iops->key_size(env, it);
/* IAM iterator can return record with zero len. */
if (len == 0)
hash = iops->store(env, it);
if (unlikely(first)) {
first = 0;
- *start = hash;
+ dp->ldp_hash_start = cpu_to_le64(hash);
}
/* calculate max space required for lu_dirent */
recsize = lu_dirent_calc_size(len, attr);
if (nob >= recsize) {
- result = iops->rec(env, it, ent, attr);
+ result = iops->rec(env, it, (struct dt_rec *)ent, attr);
if (result == -ESTALE)
goto next;
if (result != 0)
/* osd might not able to pack all attributes,
* so recheck rec length */
recsize = le16_to_cpu(ent->lde_reclen);
- } else {
- /*
- * record doesn't fit into page, enlarge previous one.
- */
- if (*last) {
- (*last)->lde_reclen =
- cpu_to_le16(le16_to_cpu((*last)->lde_reclen) +
- nob);
- result = 0;
- } else
- result = -EINVAL;
+ if (le32_to_cpu(ent->lde_attrs) & LUDA_FID) {
+ fid_le_to_cpu(&fid, &ent->lde_fid);
+ if (fid_is_dot_lustre(&fid))
+ goto next;
+ }
+ } else {
+ result = (last != NULL) ? 0 :-EINVAL;
goto out;
}
- *last = ent;
+ last = ent;
ent = (void *)ent + recsize;
nob -= recsize;
} while (result == 0);
out:
- *end = hash;
+ dp->ldp_hash_end = cpu_to_le64(hash);
+ if (last != NULL) {
+ if (last->lde_hash == dp->ldp_hash_end)
+ dp->ldp_flags |= cpu_to_le32(LDF_COLLIDE);
+ last->lde_reclen = 0; /* end mark */
+ }
+ if (result > 0)
+ /* end of directory */
+ dp->ldp_hash_end = cpu_to_le64(MDS_DIR_END_OFF);
+ else if (result < 0)
+ CWARN("build page failed: %d!\n", result);
return result;
}
-static int __mdd_readpage(const struct lu_env *env, struct mdd_object *obj,
- const struct lu_rdpg *rdpg)
-{
- struct dt_it *it;
- struct dt_object *next = mdd_object_child(obj);
- const struct dt_it_ops *iops;
- struct page *pg;
- struct lu_dirent *last = NULL;
- struct mdd_device *mdd = mdo2mdd(&obj->mod_obj);
- int i;
- int rc;
- int nob;
- __u64 hash_start;
- __u64 hash_end = 0;
-
- LASSERT(rdpg->rp_pages != NULL);
- LASSERT(next->do_index_ops != NULL);
-
- if (rdpg->rp_count <= 0)
- return -EFAULT;
-
- /*
- * iterate through directory and fill pages from @rdpg
- */
- iops = &next->do_index_ops->dio_it;
- it = iops->init(env, next, rdpg->rp_attrs, mdd_object_capa(env, obj));
- if (IS_ERR(it))
- return PTR_ERR(it);
-
- rc = iops->load(env, it, rdpg->rp_hash);
-
- if (rc == 0){
- /*
- * Iterator didn't find record with exactly the key requested.
- *
- * It is currently either
- *
- * - positioned above record with key less than
- * requested---skip it.
- *
- * - or not positioned at all (is in IAM_IT_SKEWED
- * state)---position it on the next item.
- */
- rc = iops->next(env, it);
- } else if (rc > 0)
- rc = 0;
-
- /*
- * At this point and across for-loop:
- *
- * rc == 0 -> ok, proceed.
- * rc > 0 -> end of directory.
- * rc < 0 -> error.
- */
- for (i = 0, nob = rdpg->rp_count; rc == 0 && nob > 0;
- i++, nob -= CFS_PAGE_SIZE) {
- LASSERT(i < rdpg->rp_npages);
- pg = rdpg->rp_pages[i];
- rc = mdd_dir_page_build(env, mdd, !i, cfs_kmap(pg),
- min_t(int, nob, CFS_PAGE_SIZE), iops,
- it, &hash_start, &hash_end, &last,
- rdpg->rp_attrs);
- if (rc != 0 || i == rdpg->rp_npages - 1) {
- if (last)
- last->lde_reclen = 0;
- }
- cfs_kunmap(pg);
- }
- if (rc > 0) {
- /*
- * end of directory.
- */
- hash_end = MDS_DIR_END_OFF;
- rc = 0;
- }
- if (rc == 0) {
- struct lu_dirpage *dp;
-
- dp = cfs_kmap(rdpg->rp_pages[0]);
- dp->ldp_hash_start = cpu_to_le64(rdpg->rp_hash);
- dp->ldp_hash_end = cpu_to_le64(hash_end);
- if (i == 0)
- /*
- * No pages were processed, mark this.
- */
- dp->ldp_flags |= LDF_EMPTY;
-
- dp->ldp_flags = cpu_to_le32(dp->ldp_flags);
- cfs_kunmap(rdpg->rp_pages[0]);
- }
- iops->put(env, it);
- iops->fini(env, it);
-
- return rc;
-}
-
int mdd_readpage(const struct lu_env *env, struct md_object *obj,
const struct lu_rdpg *rdpg)
{
int rc;
ENTRY;
- LASSERT(mdd_object_exists(mdd_obj));
+ if (mdd_object_exists(mdd_obj) == 0) {
+ CERROR("%s: object "DFID" not found: rc = -2\n",
+ mdd_obj_dev_name(mdd_obj),PFID(mdd_object_fid(mdd_obj)));
+ return -ENOENT;
+ }
mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
rc = mdd_readpage_sanity_check(env, mdd_obj);
struct page *pg;
struct lu_dirpage *dp;
- /*
- * According to POSIX, please do not return any entry to client:
- * even dot and dotdot should not be returned.
- */
- CWARN("readdir from dead object: "DFID"\n",
- PFID(mdd_object_fid(mdd_obj)));
+ /*
+ * According to POSIX, please do not return any entry to client:
+ * even dot and dotdot should not be returned.
+ */
+ CDEBUG(D_INODE, "readdir from dead object: "DFID"\n",
+ PFID(mdd_object_fid(mdd_obj)));
if (rdpg->rp_count <= 0)
GOTO(out_unlock, rc = -EFAULT);
LASSERT(rdpg->rp_pages != NULL);
pg = rdpg->rp_pages[0];
- dp = (struct lu_dirpage*)cfs_kmap(pg);
+ dp = (struct lu_dirpage *)kmap(pg);
memset(dp, 0 , sizeof(struct lu_dirpage));
dp->ldp_hash_start = cpu_to_le64(rdpg->rp_hash);
dp->ldp_hash_end = cpu_to_le64(MDS_DIR_END_OFF);
- dp->ldp_flags |= LDF_EMPTY;
- dp->ldp_flags = cpu_to_le32(dp->ldp_flags);
- cfs_kunmap(pg);
- GOTO(out_unlock, rc = 0);
- }
-
- rc = __mdd_readpage(env, mdd_obj, rdpg);
-
- EXIT;
+ dp->ldp_flags = cpu_to_le32(LDF_EMPTY);
+ kunmap(pg);
+ GOTO(out_unlock, rc = LU_PAGE_SIZE);
+ }
+
+ rc = dt_index_walk(env, mdd_object_child(mdd_obj), rdpg,
+ mdd_dir_page_build, NULL);
+ if (rc >= 0) {
+ struct lu_dirpage *dp;
+
+ dp = kmap(rdpg->rp_pages[0]);
+ dp->ldp_hash_start = cpu_to_le64(rdpg->rp_hash);
+ if (rc == 0) {
+ /*
+ * No pages were processed, mark this for first page
+ * and send back.
+ */
+ dp->ldp_hash_end = cpu_to_le64(MDS_DIR_END_OFF);
+ dp->ldp_flags = cpu_to_le32(LDF_EMPTY);
+ rc = min_t(unsigned int, LU_PAGE_SIZE, rdpg->rp_count);
+ }
+ kunmap(rdpg->rp_pages[0]);
+ }
+
+ GOTO(out_unlock, rc);
out_unlock:
mdd_read_unlock(env, mdd_obj);
return rc;
static int mdd_object_sync(const struct lu_env *env, struct md_object *obj)
{
- struct mdd_object *mdd_obj = md2mdd_obj(obj);
- struct dt_object *next;
+ struct mdd_object *mdd_obj = md2mdd_obj(obj);
- LASSERT(mdd_object_exists(mdd_obj));
- next = mdd_object_child(mdd_obj);
- return next->do_ops->do_object_sync(env, next);
+ if (mdd_object_exists(mdd_obj) == 0) {
+ int rc = -ENOENT;
+
+ CERROR("%s: object "DFID" not found: rc = %d\n",
+ mdd_obj_dev_name(mdd_obj),
+ PFID(mdd_object_fid(mdd_obj)), rc);
+ return rc;
+ }
+ return dt_object_sync(env, mdd_object_child(mdd_obj),
+ 0, OBD_OBJECT_EOF);
}
-static dt_obj_version_t mdd_version_get(const struct lu_env *env,
- struct md_object *obj)
+static int mdd_object_lock(const struct lu_env *env,
+ struct md_object *obj,
+ struct lustre_handle *lh,
+ struct ldlm_enqueue_info *einfo,
+ ldlm_policy_data_t *policy)
{
- struct mdd_object *mdd_obj = md2mdd_obj(obj);
-
- LASSERT(mdd_object_exists(mdd_obj));
- return do_version_get(env, mdd_object_child(mdd_obj));
+ struct mdd_object *mdd_obj = md2mdd_obj(obj);
+ return dt_object_lock(env, mdd_object_child(mdd_obj), lh,
+ einfo, policy);
}
-static void mdd_version_set(const struct lu_env *env, struct md_object *obj,
- dt_obj_version_t version)
+static int mdd_object_unlock(const struct lu_env *env,
+ struct md_object *obj,
+ struct ldlm_enqueue_info *einfo,
+ ldlm_policy_data_t *policy)
{
- struct mdd_object *mdd_obj = md2mdd_obj(obj);
-
- LASSERT(mdd_object_exists(mdd_obj));
- do_version_set(env, mdd_object_child(mdd_obj), version);
+ struct mdd_object *mdd_obj = md2mdd_obj(obj);
+ return dt_object_unlock(env, mdd_object_child(mdd_obj), einfo, policy);
}
const struct md_object_operations mdd_obj_ops = {
- .moo_permission = mdd_permission,
- .moo_attr_get = mdd_attr_get,
- .moo_attr_set = mdd_attr_set,
- .moo_xattr_get = mdd_xattr_get,
- .moo_xattr_set = mdd_xattr_set,
- .moo_xattr_list = mdd_xattr_list,
- .moo_xattr_del = mdd_xattr_del,
- .moo_object_create = mdd_object_create,
- .moo_ref_add = mdd_ref_add,
- .moo_ref_del = mdd_ref_del,
- .moo_open = mdd_open,
- .moo_close = mdd_close,
- .moo_readpage = mdd_readpage,
- .moo_readlink = mdd_readlink,
- .moo_changelog = mdd_changelog,
- .moo_capa_get = mdd_capa_get,
- .moo_object_sync = mdd_object_sync,
- .moo_version_get = mdd_version_get,
- .moo_version_set = mdd_version_set,
- .moo_path = mdd_path,
- .moo_file_lock = mdd_file_lock,
- .moo_file_unlock = mdd_file_unlock,
+ .moo_permission = mdd_permission,
+ .moo_attr_get = mdd_attr_get,
+ .moo_attr_set = mdd_attr_set,
+ .moo_xattr_get = mdd_xattr_get,
+ .moo_xattr_set = mdd_xattr_set,
+ .moo_xattr_list = mdd_xattr_list,
+ .moo_xattr_del = mdd_xattr_del,
+ .moo_swap_layouts = mdd_swap_layouts,
+ .moo_open = mdd_open,
+ .moo_close = mdd_close,
+ .moo_readpage = mdd_readpage,
+ .moo_readlink = mdd_readlink,
+ .moo_changelog = mdd_changelog,
+ .moo_object_sync = mdd_object_sync,
+ .moo_object_lock = mdd_object_lock,
+ .moo_object_unlock = mdd_object_unlock,
};