1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/mdd/mdd_object.c
38 * Lustre Metadata Server (mdd) routines
40 * Author: Wang Di <wangdi@clusterfs.com>
44 # define EXPORT_SYMTAB
46 #define DEBUG_SUBSYSTEM S_MDS
48 #include <linux/module.h>
49 #include <linux/jbd.h>
51 #include <obd_class.h>
52 #include <obd_support.h>
53 #include <lprocfs_status.h>
54 /* fid_be_cpu(), fid_cpu_to_be(). */
55 #include <lustre_fid.h>
57 #include <lustre_param.h>
58 #include <linux/ldiskfs_fs.h>
59 #include <lustre_mds.h>
60 #include <lustre/lustre_idl.h>
62 #include "mdd_internal.h"
64 static const struct lu_object_operations mdd_lu_obj_ops;
66 static int mdd_xattr_get(const struct lu_env *env,
67 struct md_object *obj, struct lu_buf *buf,
70 int mdd_data_get(const struct lu_env *env, struct mdd_object *obj,
73 LASSERTF(mdd_object_exists(obj), "FID is "DFID"\n",
74 PFID(mdd_object_fid(obj)));
75 mdo_data_get(env, obj, data);
79 int mdd_la_get(const struct lu_env *env, struct mdd_object *obj,
80 struct lu_attr *la, struct lustre_capa *capa)
82 LASSERTF(mdd_object_exists(obj), "FID is "DFID"\n",
83 PFID(mdd_object_fid(obj)));
84 return mdo_attr_get(env, obj, la, capa);
87 static void mdd_flags_xlate(struct mdd_object *obj, __u32 flags)
89 obj->mod_flags &= ~(APPEND_OBJ|IMMUTE_OBJ);
91 if (flags & LUSTRE_APPEND_FL)
92 obj->mod_flags |= APPEND_OBJ;
94 if (flags & LUSTRE_IMMUTABLE_FL)
95 obj->mod_flags |= IMMUTE_OBJ;
98 struct mdd_thread_info *mdd_env_info(const struct lu_env *env)
100 struct mdd_thread_info *info;
102 info = lu_context_key_get(&env->le_ctx, &mdd_thread_key);
103 LASSERT(info != NULL);
107 struct lu_buf *mdd_buf_get(const struct lu_env *env, void *area, ssize_t len)
111 buf = &mdd_env_info(env)->mti_buf;
117 void mdd_buf_put(struct lu_buf *buf)
119 if (buf == NULL || buf->lb_buf == NULL)
122 OBD_VFREE(buf->lb_buf, buf->lb_len);
124 OBD_FREE(buf->lb_buf, buf->lb_len);
128 const struct lu_buf *mdd_buf_get_const(const struct lu_env *env,
129 const void *area, ssize_t len)
133 buf = &mdd_env_info(env)->mti_buf;
134 buf->lb_buf = (void *)area;
139 #define BUF_VMALLOC_SIZE (CFS_PAGE_SIZE<<2) /* 16k */
140 struct lu_buf *mdd_buf_alloc(const struct lu_env *env, ssize_t len)
142 struct lu_buf *buf = &mdd_env_info(env)->mti_big_buf;
144 if ((len > buf->lb_len) && (buf->lb_buf != NULL)) {
146 OBD_VFREE(buf->lb_buf, buf->lb_len);
148 OBD_FREE(buf->lb_buf, buf->lb_len);
151 if (buf->lb_buf == NULL) {
153 if (buf->lb_len <= BUF_VMALLOC_SIZE) {
154 OBD_ALLOC(buf->lb_buf, buf->lb_len);
157 if (buf->lb_buf == NULL) {
158 OBD_VMALLOC(buf->lb_buf, buf->lb_len);
161 if (buf->lb_buf == NULL)
167 /** Increase the size of the \a mti_big_buf.
168 * preserves old data in buffer
169 * old buffer remains unchanged on error
170 * \retval 0 or -ENOMEM
172 int mdd_buf_grow(const struct lu_env *env, ssize_t len)
174 struct lu_buf *oldbuf = &mdd_env_info(env)->mti_big_buf;
177 LASSERT(len >= oldbuf->lb_len);
178 if (len > BUF_VMALLOC_SIZE) {
179 OBD_VMALLOC(buf.lb_buf, len);
182 OBD_ALLOC(buf.lb_buf, len);
185 if (buf.lb_buf == NULL)
189 memcpy(buf.lb_buf, oldbuf->lb_buf, oldbuf->lb_len);
191 if (oldbuf->lb_vmalloc)
192 OBD_VFREE(oldbuf->lb_buf, oldbuf->lb_len);
194 OBD_FREE(oldbuf->lb_buf, oldbuf->lb_len);
196 memcpy(oldbuf, &buf, sizeof(buf));
201 struct llog_cookie *mdd_max_cookie_get(const struct lu_env *env,
202 struct mdd_device *mdd)
204 struct mdd_thread_info *mti = mdd_env_info(env);
207 max_cookie_size = mdd_lov_cookiesize(env, mdd);
208 if (unlikely(mti->mti_max_cookie_size < max_cookie_size)) {
209 if (mti->mti_max_cookie)
210 OBD_FREE(mti->mti_max_cookie, mti->mti_max_cookie_size);
211 mti->mti_max_cookie = NULL;
212 mti->mti_max_cookie_size = 0;
214 if (unlikely(mti->mti_max_cookie == NULL)) {
215 OBD_ALLOC(mti->mti_max_cookie, max_cookie_size);
216 if (likely(mti->mti_max_cookie != NULL))
217 mti->mti_max_cookie_size = max_cookie_size;
219 if (likely(mti->mti_max_cookie != NULL))
220 memset(mti->mti_max_cookie, 0, mti->mti_max_cookie_size);
221 return mti->mti_max_cookie;
224 struct lov_mds_md *mdd_max_lmm_get(const struct lu_env *env,
225 struct mdd_device *mdd)
227 struct mdd_thread_info *mti = mdd_env_info(env);
230 max_lmm_size = mdd_lov_mdsize(env, mdd);
231 if (unlikely(mti->mti_max_lmm_size < max_lmm_size)) {
232 if (mti->mti_max_lmm)
233 OBD_FREE(mti->mti_max_lmm, mti->mti_max_lmm_size);
234 mti->mti_max_lmm = NULL;
235 mti->mti_max_lmm_size = 0;
237 if (unlikely(mti->mti_max_lmm == NULL)) {
238 OBD_ALLOC(mti->mti_max_lmm, max_lmm_size);
239 if (unlikely(mti->mti_max_lmm != NULL))
240 mti->mti_max_lmm_size = max_lmm_size;
242 return mti->mti_max_lmm;
245 struct lu_object *mdd_object_alloc(const struct lu_env *env,
246 const struct lu_object_header *hdr,
249 struct mdd_object *mdd_obj;
251 OBD_ALLOC_PTR(mdd_obj);
252 if (mdd_obj != NULL) {
255 o = mdd2lu_obj(mdd_obj);
256 lu_object_init(o, NULL, d);
257 mdd_obj->mod_obj.mo_ops = &mdd_obj_ops;
258 mdd_obj->mod_obj.mo_dir_ops = &mdd_dir_ops;
259 mdd_obj->mod_count = 0;
260 o->lo_ops = &mdd_lu_obj_ops;
267 static int mdd_object_init(const struct lu_env *env, struct lu_object *o,
268 const struct lu_object_conf *_)
270 struct mdd_device *d = lu2mdd_dev(o->lo_dev);
271 struct mdd_object *mdd_obj = lu2mdd_obj(o);
272 struct lu_object *below;
273 struct lu_device *under;
276 mdd_obj->mod_cltime = 0;
277 under = &d->mdd_child->dd_lu_dev;
278 below = under->ld_ops->ldo_object_alloc(env, o->lo_header, under);
279 mdd_pdlock_init(mdd_obj);
283 lu_object_add(o, below);
288 static int mdd_object_start(const struct lu_env *env, struct lu_object *o)
290 if (lu_object_exists(o))
291 return mdd_get_flags(env, lu2mdd_obj(o));
296 static void mdd_object_free(const struct lu_env *env, struct lu_object *o)
298 struct mdd_object *mdd = lu2mdd_obj(o);
304 static int mdd_object_print(const struct lu_env *env, void *cookie,
305 lu_printer_t p, const struct lu_object *o)
307 return (*p)(env, cookie, LUSTRE_MDD_NAME"-object@%p", o);
310 static const struct lu_object_operations mdd_lu_obj_ops = {
311 .loo_object_init = mdd_object_init,
312 .loo_object_start = mdd_object_start,
313 .loo_object_free = mdd_object_free,
314 .loo_object_print = mdd_object_print,
317 struct mdd_object *mdd_object_find(const struct lu_env *env,
318 struct mdd_device *d,
319 const struct lu_fid *f)
321 return md2mdd_obj(md_object_find_slice(env, &d->mdd_md_dev, f));
324 static int mdd_path2fid(const struct lu_env *env, struct mdd_device *mdd,
325 const char *path, struct lu_fid *fid)
328 struct lu_fid *f = &mdd_env_info(env)->mti_fid;
329 struct mdd_object *obj;
330 struct lu_name *lname = &mdd_env_info(env)->mti_name;
335 /* temp buffer for path element */
336 buf = mdd_buf_alloc(env, PATH_MAX);
337 if (buf->lb_buf == NULL)
340 lname->ln_name = name = buf->lb_buf;
341 lname->ln_namelen = 0;
342 *f = mdd->mdd_root_fid;
349 while (*path != '/' && *path != '\0') {
357 /* find obj corresponding to fid */
358 obj = mdd_object_find(env, mdd, f);
360 GOTO(out, rc = -EREMOTE);
362 GOTO(out, rc = -PTR_ERR(obj));
363 /* get child fid from parent and name */
364 rc = mdd_lookup(env, &obj->mod_obj, lname, f, NULL);
365 mdd_object_put(env, obj);
370 lname->ln_namelen = 0;
379 /** The maximum depth that fid2path() will search.
380 * This is limited only because we want to store the fids for
381 * historical path lookup purposes.
383 #define MAX_PATH_DEPTH 100
385 /** mdd_path() lookup structure. */
386 struct path_lookup_info {
387 __u64 pli_recno; /**< history point */
388 struct lu_fid pli_fid;
389 struct lu_fid pli_fids[MAX_PATH_DEPTH]; /**< path, in fids */
390 struct mdd_object *pli_mdd_obj;
391 char *pli_path; /**< full path */
393 int pli_linkno; /**< which hardlink to follow */
394 int pli_fidcount; /**< number of \a pli_fids */
397 static int mdd_path_current(const struct lu_env *env,
398 struct path_lookup_info *pli)
400 struct mdd_device *mdd = mdo2mdd(&pli->pli_mdd_obj->mod_obj);
401 struct mdd_object *mdd_obj;
402 struct lu_buf *buf = NULL;
403 struct link_ea_header *leh;
404 struct link_ea_entry *lee;
405 struct lu_name *tmpname = &mdd_env_info(env)->mti_name;
406 struct lu_fid *tmpfid = &mdd_env_info(env)->mti_fid;
412 ptr = pli->pli_path + pli->pli_pathlen - 1;
415 pli->pli_fidcount = 0;
416 pli->pli_fids[0] = *(struct lu_fid *)mdd_object_fid(pli->pli_mdd_obj);
418 while (!mdd_is_root(mdd, &pli->pli_fids[pli->pli_fidcount])) {
419 mdd_obj = mdd_object_find(env, mdd,
420 &pli->pli_fids[pli->pli_fidcount]);
422 GOTO(out, rc = -EREMOTE);
424 GOTO(out, rc = -PTR_ERR(mdd_obj));
425 rc = lu_object_exists(&mdd_obj->mod_obj.mo_lu);
427 mdd_object_put(env, mdd_obj);
431 /* Do I need to error out here? */
436 /* Get parent fid and object name */
437 mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
438 buf = mdd_links_get(env, mdd_obj);
439 mdd_read_unlock(env, mdd_obj);
440 mdd_object_put(env, mdd_obj);
442 GOTO(out, rc = PTR_ERR(buf));
445 lee = (struct link_ea_entry *)(leh + 1); /* link #0 */
446 mdd_lee_unpack(lee, &reclen, tmpname, tmpfid);
448 /* If set, use link #linkno for path lookup, otherwise use
449 link #0. Only do this for the final path element. */
450 if ((pli->pli_fidcount == 0) &&
451 (pli->pli_linkno < leh->leh_reccount)) {
453 for (count = 0; count < pli->pli_linkno; count++) {
454 lee = (struct link_ea_entry *)
455 ((char *)lee + reclen);
456 mdd_lee_unpack(lee, &reclen, tmpname, tmpfid);
458 if (pli->pli_linkno < leh->leh_reccount - 1)
459 /* indicate to user there are more links */
463 /* Pack the name in the end of the buffer */
464 ptr -= tmpname->ln_namelen;
465 if (ptr - 1 <= pli->pli_path)
466 GOTO(out, rc = -EOVERFLOW);
467 strncpy(ptr, tmpname->ln_name, tmpname->ln_namelen);
470 /* Store the parent fid for historic lookup */
471 if (++pli->pli_fidcount >= MAX_PATH_DEPTH)
472 GOTO(out, rc = -EOVERFLOW);
473 pli->pli_fids[pli->pli_fidcount] = *tmpfid;
476 /* Verify that our path hasn't changed since we started the lookup */
477 rc = mdd_path2fid(env, mdd, ptr, &pli->pli_fid);
479 CDEBUG(D_INFO, "mdd_path2fid(%s) failed %d\n", ptr, rc);
480 GOTO (out, rc = -EAGAIN);
482 if (!lu_fid_eq(&pli->pli_fids[0], &pli->pli_fid)) {
483 CDEBUG(D_INFO, "mdd_path2fid(%s) found another FID o="DFID
484 " n="DFID"\n", ptr, PFID(&pli->pli_fids[0]),
485 PFID(&pli->pli_fid));
486 GOTO(out, rc = -EAGAIN);
489 memmove(pli->pli_path, ptr, pli->pli_path + pli->pli_pathlen - ptr);
493 if (buf && !IS_ERR(buf) && buf->lb_vmalloc)
494 /* if we vmalloced a large buffer drop it */
500 /* Returns the full path to this fid, as of changelog record recno. */
501 static int mdd_path(const struct lu_env *env, struct md_object *obj,
502 char *path, int pathlen, __u64 recno, int *linkno)
504 struct path_lookup_info *pli;
512 if (mdd_is_root(mdo2mdd(obj), mdd_object_fid(md2mdd_obj(obj)))) {
522 pli->pli_mdd_obj = md2mdd_obj(obj);
523 pli->pli_recno = recno;
524 pli->pli_path = path;
525 pli->pli_pathlen = pathlen;
526 pli->pli_linkno = *linkno;
528 /* Retry multiple times in case file is being moved */
529 while (tries-- && rc == -EAGAIN)
530 rc = mdd_path_current(env, pli);
532 #if 0 /* We need old path names only for replication */
533 /* For historical path lookup, the current links may not have existed
534 * at "recno" time. We must switch over to earlier links/parents
535 * by using the changelog records. If the earlier parent doesn't
536 * exist, we must search back through the changelog to reconstruct
537 * its parents, then check if it exists, etc.
538 * We may ignore this problem for the initial implementation and
539 * state that an "original" hardlink must still exist for us to find
540 * historic path name. */
541 if (pli->pli_recno != -1)
542 rc = mdd_path_historic(env, pli);
545 /* return next link index to caller */
546 *linkno = pli->pli_linkno;
553 int mdd_get_flags(const struct lu_env *env, struct mdd_object *obj)
555 struct lu_attr *la = &mdd_env_info(env)->mti_la;
559 rc = mdd_la_get(env, obj, la, BYPASS_CAPA);
561 mdd_flags_xlate(obj, la->la_flags);
562 if (S_ISDIR(la->la_mode) && la->la_nlink == 1)
563 obj->mod_flags |= MNLINK_OBJ;
568 /* get only inode attributes */
569 int mdd_iattr_get(const struct lu_env *env, struct mdd_object *mdd_obj,
575 if (ma->ma_valid & MA_INODE)
578 rc = mdd_la_get(env, mdd_obj, &ma->ma_attr,
579 mdd_object_capa(env, mdd_obj));
581 ma->ma_valid |= MA_INODE;
585 int mdd_get_default_md(struct mdd_object *mdd_obj, struct lov_mds_md *lmm,
588 struct lov_desc *ldesc;
589 struct mdd_device *mdd = mdo2mdd(&mdd_obj->mod_obj);
592 ldesc = &mdd->mdd_obd_dev->u.mds.mds_lov_desc;
593 LASSERT(ldesc != NULL);
598 lmm->lmm_magic = LOV_MAGIC_V1;
599 lmm->lmm_object_gr = LOV_OBJECT_GROUP_DEFAULT;
600 lmm->lmm_pattern = ldesc->ld_pattern;
601 lmm->lmm_stripe_size = ldesc->ld_default_stripe_size;
602 lmm->lmm_stripe_count = ldesc->ld_default_stripe_count;
603 *size = sizeof(struct lov_mds_md);
605 RETURN(sizeof(struct lov_mds_md));
608 /* get lov EA only */
609 static int __mdd_lmm_get(const struct lu_env *env,
610 struct mdd_object *mdd_obj, struct md_attr *ma)
615 if (ma->ma_valid & MA_LOV)
618 rc = mdd_get_md(env, mdd_obj, ma->ma_lmm, &ma->ma_lmm_size,
621 if (rc == 0 && (ma->ma_need & MA_LOV_DEF)) {
622 rc = mdd_get_default_md(mdd_obj, ma->ma_lmm,
627 ma->ma_valid |= MA_LOV;
633 int mdd_lmm_get_locked(const struct lu_env *env, struct mdd_object *mdd_obj,
639 mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
640 rc = __mdd_lmm_get(env, mdd_obj, ma);
641 mdd_read_unlock(env, mdd_obj);
646 static int __mdd_lmv_get(const struct lu_env *env,
647 struct mdd_object *mdd_obj, struct md_attr *ma)
652 if (ma->ma_valid & MA_LMV)
655 rc = mdd_get_md(env, mdd_obj, ma->ma_lmv, &ma->ma_lmv_size,
658 ma->ma_valid |= MA_LMV;
664 static int mdd_attr_get_internal(const struct lu_env *env,
665 struct mdd_object *mdd_obj,
671 if (ma->ma_need & MA_INODE)
672 rc = mdd_iattr_get(env, mdd_obj, ma);
674 if (rc == 0 && ma->ma_need & MA_LOV) {
675 if (S_ISREG(mdd_object_type(mdd_obj)) ||
676 S_ISDIR(mdd_object_type(mdd_obj)))
677 rc = __mdd_lmm_get(env, mdd_obj, ma);
679 if (rc == 0 && ma->ma_need & MA_LMV) {
680 if (S_ISDIR(mdd_object_type(mdd_obj)))
681 rc = __mdd_lmv_get(env, mdd_obj, ma);
683 #ifdef CONFIG_FS_POSIX_ACL
684 if (rc == 0 && ma->ma_need & MA_ACL_DEF) {
685 if (S_ISDIR(mdd_object_type(mdd_obj)))
686 rc = mdd_def_acl_get(env, mdd_obj, ma);
689 CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = "LPX64"\n",
694 int mdd_attr_get_internal_locked(const struct lu_env *env,
695 struct mdd_object *mdd_obj, struct md_attr *ma)
698 int needlock = ma->ma_need & (MA_LOV | MA_LMV | MA_ACL_DEF);
701 mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
702 rc = mdd_attr_get_internal(env, mdd_obj, ma);
704 mdd_read_unlock(env, mdd_obj);
709 * No permission check is needed.
711 static int mdd_attr_get(const struct lu_env *env, struct md_object *obj,
714 struct mdd_object *mdd_obj = md2mdd_obj(obj);
718 rc = mdd_attr_get_internal_locked(env, mdd_obj, ma);
723 * No permission check is needed.
725 static int mdd_xattr_get(const struct lu_env *env,
726 struct md_object *obj, struct lu_buf *buf,
729 struct mdd_object *mdd_obj = md2mdd_obj(obj);
734 LASSERT(mdd_object_exists(mdd_obj));
736 mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
737 rc = mdo_xattr_get(env, mdd_obj, buf, name,
738 mdd_object_capa(env, mdd_obj));
739 mdd_read_unlock(env, mdd_obj);
745 * Permission check is done when open,
746 * no need check again.
748 static int mdd_readlink(const struct lu_env *env, struct md_object *obj,
751 struct mdd_object *mdd_obj = md2mdd_obj(obj);
752 struct dt_object *next;
757 LASSERT(mdd_object_exists(mdd_obj));
759 next = mdd_object_child(mdd_obj);
760 mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
761 rc = next->do_body_ops->dbo_read(env, next, buf, &pos,
762 mdd_object_capa(env, mdd_obj));
763 mdd_read_unlock(env, mdd_obj);
768 * No permission check is needed.
770 static int mdd_xattr_list(const struct lu_env *env, struct md_object *obj,
773 struct mdd_object *mdd_obj = md2mdd_obj(obj);
778 mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
779 rc = mdo_xattr_list(env, mdd_obj, buf, mdd_object_capa(env, mdd_obj));
780 mdd_read_unlock(env, mdd_obj);
785 int mdd_object_create_internal(const struct lu_env *env, struct mdd_object *p,
786 struct mdd_object *c, struct md_attr *ma,
787 struct thandle *handle,
788 const struct md_op_spec *spec)
790 struct lu_attr *attr = &ma->ma_attr;
791 struct dt_allocation_hint *hint = &mdd_env_info(env)->mti_hint;
792 struct dt_object_format *dof = &mdd_env_info(env)->mti_dof;
793 const struct dt_index_features *feat = spec->sp_feat;
797 if (!mdd_object_exists(c)) {
798 struct dt_object *next = mdd_object_child(c);
801 if (feat != &dt_directory_features && feat != NULL)
802 dof->dof_type = DFT_INDEX;
804 dof->dof_type = dt_mode_to_dft(attr->la_mode);
806 dof->u.dof_idx.di_feat = feat;
808 /* @hint will be initialized by underlying device. */
809 next->do_ops->do_ah_init(env, hint,
810 p ? mdd_object_child(p) : NULL,
811 attr->la_mode & S_IFMT);
813 rc = mdo_create_obj(env, c, attr, hint, dof, handle);
814 LASSERT(ergo(rc == 0, mdd_object_exists(c)));
822 * Make sure the ctime is increased only.
824 static inline int mdd_attr_check(const struct lu_env *env,
825 struct mdd_object *obj,
826 struct lu_attr *attr)
828 struct lu_attr *tmp_la = &mdd_env_info(env)->mti_la;
832 if (attr->la_valid & LA_CTIME) {
833 rc = mdd_la_get(env, obj, tmp_la, BYPASS_CAPA);
837 if (attr->la_ctime < tmp_la->la_ctime)
838 attr->la_valid &= ~(LA_MTIME | LA_CTIME);
839 else if (attr->la_valid == LA_CTIME &&
840 attr->la_ctime == tmp_la->la_ctime)
841 attr->la_valid &= ~LA_CTIME;
846 int mdd_attr_set_internal(const struct lu_env *env,
847 struct mdd_object *obj,
848 struct lu_attr *attr,
849 struct thandle *handle,
855 rc = mdo_attr_set(env, obj, attr, handle, mdd_object_capa(env, obj));
856 #ifdef CONFIG_FS_POSIX_ACL
857 if (!rc && (attr->la_valid & LA_MODE) && needacl)
858 rc = mdd_acl_chmod(env, obj, attr->la_mode, handle);
863 int mdd_attr_check_set_internal(const struct lu_env *env,
864 struct mdd_object *obj,
865 struct lu_attr *attr,
866 struct thandle *handle,
872 rc = mdd_attr_check(env, obj, attr);
877 rc = mdd_attr_set_internal(env, obj, attr, handle, needacl);
881 static int mdd_attr_set_internal_locked(const struct lu_env *env,
882 struct mdd_object *obj,
883 struct lu_attr *attr,
884 struct thandle *handle,
890 needacl = needacl && (attr->la_valid & LA_MODE);
892 mdd_write_lock(env, obj, MOR_TGT_CHILD);
893 rc = mdd_attr_set_internal(env, obj, attr, handle, needacl);
895 mdd_write_unlock(env, obj);
899 int mdd_attr_check_set_internal_locked(const struct lu_env *env,
900 struct mdd_object *obj,
901 struct lu_attr *attr,
902 struct thandle *handle,
908 needacl = needacl && (attr->la_valid & LA_MODE);
910 mdd_write_lock(env, obj, MOR_TGT_CHILD);
911 rc = mdd_attr_check_set_internal(env, obj, attr, handle, needacl);
913 mdd_write_unlock(env, obj);
917 int __mdd_xattr_set(const struct lu_env *env, struct mdd_object *obj,
918 const struct lu_buf *buf, const char *name,
919 int fl, struct thandle *handle)
921 struct lustre_capa *capa = mdd_object_capa(env, obj);
925 if (buf->lb_buf && buf->lb_len > 0)
926 rc = mdo_xattr_set(env, obj, buf, name, 0, handle, capa);
927 else if (buf->lb_buf == NULL && buf->lb_len == 0)
928 rc = mdo_xattr_del(env, obj, name, handle, capa);
934 * This gives the same functionality as the code between
935 * sys_chmod and inode_setattr
936 * chown_common and inode_setattr
937 * utimes and inode_setattr
938 * This API is ported from mds_fix_attr but remove some unnecesssary stuff.
940 static int mdd_fix_attr(const struct lu_env *env, struct mdd_object *obj,
941 struct lu_attr *la, const struct md_attr *ma)
943 struct lu_attr *tmp_la = &mdd_env_info(env)->mti_la;
944 struct md_ucred *uc = md_ucred(env);
951 /* Do not permit change file type */
952 if (la->la_valid & LA_TYPE)
955 /* They should not be processed by setattr */
956 if (la->la_valid & (LA_NLINK | LA_RDEV | LA_BLKSIZE))
959 rc = mdd_la_get(env, obj, tmp_la, BYPASS_CAPA);
963 if (la->la_valid == LA_CTIME) {
964 if (!(ma->ma_attr_flags & MDS_PERM_BYPASS))
965 /* This is only for set ctime when rename's source is
967 rc = mdd_may_delete(env, NULL, obj,
968 (struct md_attr *)ma, 1, 0);
969 if (rc == 0 && la->la_ctime <= tmp_la->la_ctime)
970 la->la_valid &= ~LA_CTIME;
974 if (la->la_valid == LA_ATIME) {
975 /* This is atime only set for read atime update on close. */
976 if (la->la_atime <= tmp_la->la_atime +
977 mdd_obj2mdd_dev(obj)->mdd_atime_diff)
978 la->la_valid &= ~LA_ATIME;
982 /* Check if flags change. */
983 if (la->la_valid & LA_FLAGS) {
984 unsigned int oldflags = 0;
985 unsigned int newflags = la->la_flags &
986 (LUSTRE_IMMUTABLE_FL | LUSTRE_APPEND_FL);
988 if ((uc->mu_fsuid != tmp_la->la_uid) &&
989 !mdd_capable(uc, CFS_CAP_FOWNER))
992 /* XXX: the IMMUTABLE and APPEND_ONLY flags can
993 * only be changed by the relevant capability. */
994 if (mdd_is_immutable(obj))
995 oldflags |= LUSTRE_IMMUTABLE_FL;
996 if (mdd_is_append(obj))
997 oldflags |= LUSTRE_APPEND_FL;
998 if ((oldflags ^ newflags) &&
999 !mdd_capable(uc, CFS_CAP_LINUX_IMMUTABLE))
1002 if (!S_ISDIR(tmp_la->la_mode))
1003 la->la_flags &= ~LUSTRE_DIRSYNC_FL;
1006 if ((mdd_is_immutable(obj) || mdd_is_append(obj)) &&
1007 (la->la_valid & ~LA_FLAGS) &&
1008 !(ma->ma_attr_flags & MDS_PERM_BYPASS))
1011 /* Check for setting the obj time. */
1012 if ((la->la_valid & (LA_MTIME | LA_ATIME | LA_CTIME)) &&
1013 !(la->la_valid & ~(LA_MTIME | LA_ATIME | LA_CTIME))) {
1014 if ((uc->mu_fsuid != tmp_la->la_uid) &&
1015 !mdd_capable(uc, CFS_CAP_FOWNER)) {
1016 rc = mdd_permission_internal_locked(env, obj, tmp_la,
1024 /* Make sure a caller can chmod. */
1025 if (la->la_valid & LA_MODE) {
1026 /* Bypass la_vaild == LA_MODE,
1027 * this is for changing file with SUID or SGID. */
1028 if ((la->la_valid & ~LA_MODE) &&
1029 !(ma->ma_attr_flags & MDS_PERM_BYPASS) &&
1030 (uc->mu_fsuid != tmp_la->la_uid) &&
1031 !mdd_capable(uc, CFS_CAP_FOWNER))
1034 if (la->la_mode == (umode_t) -1)
1035 la->la_mode = tmp_la->la_mode;
1037 la->la_mode = (la->la_mode & S_IALLUGO) |
1038 (tmp_la->la_mode & ~S_IALLUGO);
1040 /* Also check the setgid bit! */
1041 if (!lustre_in_group_p(uc, (la->la_valid & LA_GID) ?
1042 la->la_gid : tmp_la->la_gid) &&
1043 !mdd_capable(uc, CFS_CAP_FSETID))
1044 la->la_mode &= ~S_ISGID;
1046 la->la_mode = tmp_la->la_mode;
1049 /* Make sure a caller can chown. */
1050 if (la->la_valid & LA_UID) {
1051 if (la->la_uid == (uid_t) -1)
1052 la->la_uid = tmp_la->la_uid;
1053 if (((uc->mu_fsuid != tmp_la->la_uid) ||
1054 (la->la_uid != tmp_la->la_uid)) &&
1055 !mdd_capable(uc, CFS_CAP_CHOWN))
1058 /* If the user or group of a non-directory has been
1059 * changed by a non-root user, remove the setuid bit.
1060 * 19981026 David C Niemi <niemi@tux.org>
1062 * Changed this to apply to all users, including root,
1063 * to avoid some races. This is the behavior we had in
1064 * 2.0. The check for non-root was definitely wrong
1065 * for 2.2 anyway, as it should have been using
1066 * CAP_FSETID rather than fsuid -- 19990830 SD. */
1067 if (((tmp_la->la_mode & S_ISUID) == S_ISUID) &&
1068 !S_ISDIR(tmp_la->la_mode)) {
1069 la->la_mode &= ~S_ISUID;
1070 la->la_valid |= LA_MODE;
1074 /* Make sure caller can chgrp. */
1075 if (la->la_valid & LA_GID) {
1076 if (la->la_gid == (gid_t) -1)
1077 la->la_gid = tmp_la->la_gid;
1078 if (((uc->mu_fsuid != tmp_la->la_uid) ||
1079 ((la->la_gid != tmp_la->la_gid) &&
1080 !lustre_in_group_p(uc, la->la_gid))) &&
1081 !mdd_capable(uc, CFS_CAP_CHOWN))
1084 /* Likewise, if the user or group of a non-directory
1085 * has been changed by a non-root user, remove the
1086 * setgid bit UNLESS there is no group execute bit
1087 * (this would be a file marked for mandatory
1088 * locking). 19981026 David C Niemi <niemi@tux.org>
1090 * Removed the fsuid check (see the comment above) --
1092 if (((tmp_la->la_mode & (S_ISGID | S_IXGRP)) ==
1093 (S_ISGID | S_IXGRP)) && !S_ISDIR(tmp_la->la_mode)) {
1094 la->la_mode &= ~S_ISGID;
1095 la->la_valid |= LA_MODE;
1099 /* For both Size-on-MDS case and truncate case,
1100 * "la->la_valid & (LA_SIZE | LA_BLOCKS)" are ture.
1101 * We distinguish them by "ma->ma_attr_flags & MDS_SOM".
1102 * For SOM case, it is true, the MAY_WRITE perm has been checked
1103 * when open, no need check again. For truncate case, it is false,
1104 * the MAY_WRITE perm should be checked here. */
1105 if (ma->ma_attr_flags & MDS_SOM) {
1106 /* For the "Size-on-MDS" setattr update, merge coming
1107 * attributes with the set in the inode. BUG 10641 */
1108 if ((la->la_valid & LA_ATIME) &&
1109 (la->la_atime <= tmp_la->la_atime))
1110 la->la_valid &= ~LA_ATIME;
1112 /* OST attributes do not have a priority over MDS attributes,
1113 * so drop times if ctime is equal. */
1114 if ((la->la_valid & LA_CTIME) &&
1115 (la->la_ctime <= tmp_la->la_ctime))
1116 la->la_valid &= ~(LA_MTIME | LA_CTIME);
1118 if (la->la_valid & (LA_SIZE | LA_BLOCKS)) {
1119 if (!((ma->ma_attr_flags & MDS_OPEN_OWNEROVERRIDE) &&
1120 (uc->mu_fsuid == tmp_la->la_uid)) &&
1121 !(ma->ma_attr_flags & MDS_PERM_BYPASS)) {
1122 rc = mdd_permission_internal_locked(env, obj,
1129 if (la->la_valid & LA_CTIME) {
1130 /* The pure setattr, it has the priority over what is
1131 * already set, do not drop it if ctime is equal. */
1132 if (la->la_ctime < tmp_la->la_ctime)
1133 la->la_valid &= ~(LA_ATIME | LA_MTIME |
1141 /** Store a data change changelog record
1142 * If this fails, we must fail the whole transaction; we don't
1143 * want the change to commit without the log entry.
1144 * \param mdd_obj - mdd_object of change
1145 * \param handle - transacion handle
1147 static int mdd_changelog_data_store(const struct lu_env *env,
1148 struct mdd_device *mdd,
1149 enum changelog_rec_type type,
1150 struct mdd_object *mdd_obj,
1151 struct thandle *handle)
1153 const struct lu_fid *tfid = mdo2fid(mdd_obj);
1154 struct llog_changelog_rec *rec;
1159 if (!(mdd->mdd_cl.mc_flags & CLM_ON))
1162 LASSERT(handle != NULL);
1163 LASSERT(mdd_obj != NULL);
1165 if ((type == CL_SETATTR) &&
1166 cfs_time_before_64(mdd->mdd_cl.mc_starttime, mdd_obj->mod_cltime)) {
1167 /* Don't need multiple updates in this log */
1168 /* Don't check under lock - no big deal if we get an extra
1173 reclen = llog_data_len(sizeof(*rec));
1174 buf = mdd_buf_alloc(env, reclen);
1175 if (buf->lb_buf == NULL)
1177 rec = (struct llog_changelog_rec *)buf->lb_buf;
1179 rec->cr_flags = CLF_VERSION;
1180 rec->cr_type = (__u32)type;
1181 rec->cr_tfid = *tfid;
1182 rec->cr_namelen = 0;
1183 mdd_obj->mod_cltime = cfs_time_current_64();
1185 rc = mdd_changelog_llog_write(mdd, rec, handle);
1187 CERROR("changelog failed: rc=%d op%d t"DFID"\n",
1188 rc, type, PFID(tfid));
1195 /* set attr and LOV EA at once, return updated attr */
1196 static int mdd_attr_set(const struct lu_env *env, struct md_object *obj,
1197 const struct md_attr *ma)
1199 struct mdd_object *mdd_obj = md2mdd_obj(obj);
1200 struct mdd_device *mdd = mdo2mdd(obj);
1201 struct thandle *handle;
1202 struct lov_mds_md *lmm = NULL;
1203 struct llog_cookie *logcookies = NULL;
1204 int rc, lmm_size = 0, cookie_size = 0;
1205 struct lu_attr *la_copy = &mdd_env_info(env)->mti_la_for_fix;
1206 #ifdef HAVE_QUOTA_SUPPORT
1207 struct obd_device *obd = mdd->mdd_obd_dev;
1208 struct mds_obd *mds = &obd->u.mds;
1209 unsigned int qnids[MAXQUOTAS] = { 0, 0 };
1210 unsigned int qoids[MAXQUOTAS] = { 0, 0 };
1211 int quota_opc = 0, block_count = 0;
1212 int inode_pending = 0, block_pending = 0;
1216 mdd_setattr_txn_param_build(env, obj, (struct md_attr *)ma,
1217 MDD_TXN_ATTR_SET_OP);
1218 handle = mdd_trans_start(env, mdd);
1220 RETURN(PTR_ERR(handle));
1221 /*TODO: add lock here*/
1222 /* start a log jounal handle if needed */
1223 if (S_ISREG(mdd_object_type(mdd_obj)) &&
1224 ma->ma_attr.la_valid & (LA_UID | LA_GID)) {
1225 lmm_size = mdd_lov_mdsize(env, mdd);
1226 lmm = mdd_max_lmm_get(env, mdd);
1228 GOTO(cleanup, rc = -ENOMEM);
1230 rc = mdd_get_md_locked(env, mdd_obj, lmm, &lmm_size,
1237 if (ma->ma_attr.la_valid & (LA_MTIME | LA_CTIME))
1238 CDEBUG(D_INODE, "setting mtime "LPU64", ctime "LPU64"\n",
1239 ma->ma_attr.la_mtime, ma->ma_attr.la_ctime);
1241 *la_copy = ma->ma_attr;
1242 rc = mdd_fix_attr(env, mdd_obj, la_copy, ma);
1246 #ifdef HAVE_QUOTA_SUPPORT
1247 if (mds->mds_quota && la_copy->la_valid & (LA_UID | LA_GID)) {
1248 struct lu_attr *la_tmp = &mdd_env_info(env)->mti_la;
1250 rc = mdd_la_get(env, mdd_obj, la_tmp, BYPASS_CAPA);
1252 quota_opc = FSFILT_OP_SETATTR;
1253 mdd_quota_wrapper(la_copy, qnids);
1254 mdd_quota_wrapper(la_tmp, qoids);
1255 /* get file quota for new owner */
1256 lquota_chkquota(mds_quota_interface_ref, obd,
1257 qnids[USRQUOTA], qnids[GRPQUOTA], 1,
1258 &inode_pending, NULL, 0, NULL, 0);
1259 block_count = (la_tmp->la_blocks + 7) >> 3;
1262 mdd_data_get(env, mdd_obj, &data);
1263 /* get block quota for new owner */
1264 lquota_chkquota(mds_quota_interface_ref, obd,
1267 block_count, &block_pending,
1268 NULL, LQUOTA_FLAGS_BLK,
1275 if (la_copy->la_valid & LA_FLAGS) {
1276 rc = mdd_attr_set_internal_locked(env, mdd_obj, la_copy,
1279 mdd_flags_xlate(mdd_obj, la_copy->la_flags);
1280 } else if (la_copy->la_valid) { /* setattr */
1281 rc = mdd_attr_set_internal_locked(env, mdd_obj, la_copy,
1283 /* journal chown/chgrp in llog, just like unlink */
1284 if (rc == 0 && lmm_size){
1285 cookie_size = mdd_lov_cookiesize(env, mdd);
1286 logcookies = mdd_max_cookie_get(env, mdd);
1287 if (logcookies == NULL)
1288 GOTO(cleanup, rc = -ENOMEM);
1290 if (mdd_setattr_log(env, mdd, ma, lmm, lmm_size,
1291 logcookies, cookie_size) <= 0)
1296 if (rc == 0 && ma->ma_valid & MA_LOV) {
1299 mode = mdd_object_type(mdd_obj);
1300 if (S_ISREG(mode) || S_ISDIR(mode)) {
1301 rc = mdd_lsm_sanity_check(env, mdd_obj);
1305 rc = mdd_lov_set_md(env, NULL, mdd_obj, ma->ma_lmm,
1306 ma->ma_lmm_size, handle, 1);
1311 if ((rc == 0) && (ma->ma_attr.la_valid & (LA_MTIME | LA_CTIME)))
1312 rc = mdd_changelog_data_store(env, mdd, CL_SETATTR, mdd_obj,
1314 mdd_trans_stop(env, mdd, rc, handle);
1315 if (rc == 0 && (lmm != NULL && lmm_size > 0 )) {
1316 /*set obd attr, if needed*/
1317 rc = mdd_lov_setattr_async(env, mdd_obj, lmm, lmm_size,
1320 #ifdef HAVE_QUOTA_SUPPORT
1323 lquota_pending_commit(mds_quota_interface_ref, obd,
1324 qnids[USRQUOTA], qnids[GRPQUOTA],
1327 lquota_pending_commit(mds_quota_interface_ref, obd,
1328 qnids[USRQUOTA], qnids[GRPQUOTA],
1330 /* Trigger dqrel/dqacq for original owner and new owner.
1331 * If failed, the next call for lquota_chkquota will
1333 lquota_adjust(mds_quota_interface_ref, obd, qnids, qoids, rc,
1340 int mdd_xattr_set_txn(const struct lu_env *env, struct mdd_object *obj,
1341 const struct lu_buf *buf, const char *name, int fl,
1342 struct thandle *handle)
1347 mdd_write_lock(env, obj, MOR_TGT_CHILD);
1348 rc = __mdd_xattr_set(env, obj, buf, name, fl, handle);
1349 mdd_write_unlock(env, obj);
1354 static int mdd_xattr_sanity_check(const struct lu_env *env,
1355 struct mdd_object *obj)
1357 struct lu_attr *tmp_la = &mdd_env_info(env)->mti_la;
1358 struct md_ucred *uc = md_ucred(env);
1362 if (mdd_is_immutable(obj) || mdd_is_append(obj))
1365 rc = mdd_la_get(env, obj, tmp_la, BYPASS_CAPA);
1369 if ((uc->mu_fsuid != tmp_la->la_uid) &&
1370 !mdd_capable(uc, CFS_CAP_FOWNER))
1377 * The caller should guarantee to update the object ctime
1378 * after xattr_set if needed.
1380 static int mdd_xattr_set(const struct lu_env *env, struct md_object *obj,
1381 const struct lu_buf *buf, const char *name,
1384 struct mdd_object *mdd_obj = md2mdd_obj(obj);
1385 struct mdd_device *mdd = mdo2mdd(obj);
1386 struct thandle *handle;
1390 rc = mdd_xattr_sanity_check(env, mdd_obj);
1394 mdd_txn_param_build(env, mdd, MDD_TXN_XATTR_SET_OP);
1395 handle = mdd_trans_start(env, mdd);
1397 RETURN(PTR_ERR(handle));
1399 rc = mdd_xattr_set_txn(env, mdd_obj, buf, name, fl, handle);
1401 /* Only record user xattr changes */
1402 if ((rc == 0) && (mdd->mdd_cl.mc_flags & CLM_ON) &&
1403 (strncmp("user.", name, 5) == 0))
1404 rc = mdd_changelog_data_store(env, mdd, CL_XATTR, mdd_obj,
1406 mdd_trans_stop(env, mdd, rc, handle);
1412 * The caller should guarantee to update the object ctime
1413 * after xattr_set if needed.
1415 int mdd_xattr_del(const struct lu_env *env, struct md_object *obj,
1418 struct mdd_object *mdd_obj = md2mdd_obj(obj);
1419 struct mdd_device *mdd = mdo2mdd(obj);
1420 struct thandle *handle;
1424 rc = mdd_xattr_sanity_check(env, mdd_obj);
1428 mdd_txn_param_build(env, mdd, MDD_TXN_XATTR_SET_OP);
1429 handle = mdd_trans_start(env, mdd);
1431 RETURN(PTR_ERR(handle));
1433 mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
1434 rc = mdo_xattr_del(env, mdd_obj, name, handle,
1435 mdd_object_capa(env, mdd_obj));
1436 mdd_write_unlock(env, mdd_obj);
1438 /* Only record user xattr changes */
1439 if ((rc == 0) && (mdd->mdd_cl.mc_flags & CLM_ON) &&
1440 (strncmp("user.", name, 5) != 0))
1441 rc = mdd_changelog_data_store(env, mdd, CL_XATTR, mdd_obj,
1444 mdd_trans_stop(env, mdd, rc, handle);
1449 /* partial unlink */
1450 static int mdd_ref_del(const struct lu_env *env, struct md_object *obj,
1453 struct lu_attr *la_copy = &mdd_env_info(env)->mti_la_for_fix;
1454 struct mdd_object *mdd_obj = md2mdd_obj(obj);
1455 struct mdd_device *mdd = mdo2mdd(obj);
1456 struct thandle *handle;
1457 #ifdef HAVE_QUOTA_SUPPORT
1458 struct obd_device *obd = mdd->mdd_obd_dev;
1459 struct mds_obd *mds = &obd->u.mds;
1460 unsigned int qids[MAXQUOTAS] = { 0, 0 };
1467 * Check -ENOENT early here because we need to get object type
1468 * to calculate credits before transaction start
1470 if (!mdd_object_exists(mdd_obj))
1473 LASSERT(mdd_object_exists(mdd_obj) > 0);
1475 rc = mdd_log_txn_param_build(env, obj, ma, MDD_TXN_UNLINK_OP);
1479 handle = mdd_trans_start(env, mdd);
1483 mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
1485 rc = mdd_unlink_sanity_check(env, NULL, mdd_obj, ma);
1489 __mdd_ref_del(env, mdd_obj, handle, 0);
1491 if (S_ISDIR(lu_object_attr(&obj->mo_lu))) {
1493 __mdd_ref_del(env, mdd_obj, handle, 1);
1496 LASSERT(ma->ma_attr.la_valid & LA_CTIME);
1497 la_copy->la_ctime = ma->ma_attr.la_ctime;
1499 la_copy->la_valid = LA_CTIME;
1500 rc = mdd_attr_check_set_internal(env, mdd_obj, la_copy, handle, 0);
1504 rc = mdd_finish_unlink(env, mdd_obj, ma, handle);
1505 #ifdef HAVE_QUOTA_SUPPORT
1506 if (mds->mds_quota && ma->ma_valid & MA_INODE &&
1507 ma->ma_attr.la_nlink == 0 && mdd_obj->mod_count == 0) {
1508 quota_opc = FSFILT_OP_UNLINK_PARTIAL_CHILD;
1509 mdd_quota_wrapper(&ma->ma_attr, qids);
1516 mdd_write_unlock(env, mdd_obj);
1517 mdd_trans_stop(env, mdd, rc, handle);
1518 #ifdef HAVE_QUOTA_SUPPORT
1520 /* Trigger dqrel on the owner of child. If failed,
1521 * the next call for lquota_chkquota will process it */
1522 lquota_adjust(mds_quota_interface_ref, obd, qids, 0, rc,
1528 /* partial operation */
1529 static int mdd_oc_sanity_check(const struct lu_env *env,
1530 struct mdd_object *obj,
1536 switch (ma->ma_attr.la_mode & S_IFMT) {
1553 static int mdd_object_create(const struct lu_env *env,
1554 struct md_object *obj,
1555 const struct md_op_spec *spec,
1559 struct mdd_device *mdd = mdo2mdd(obj);
1560 struct mdd_object *mdd_obj = md2mdd_obj(obj);
1561 const struct lu_fid *pfid = spec->u.sp_pfid;
1562 struct thandle *handle;
1563 #ifdef HAVE_QUOTA_SUPPORT
1564 struct obd_device *obd = mdd->mdd_obd_dev;
1565 struct mds_obd *mds = &obd->u.mds;
1566 unsigned int qids[MAXQUOTAS] = { 0, 0 };
1567 int quota_opc = 0, block_count = 0;
1568 int inode_pending = 0, block_pending = 0;
1573 #ifdef HAVE_QUOTA_SUPPORT
1574 if (mds->mds_quota) {
1575 quota_opc = FSFILT_OP_CREATE_PARTIAL_CHILD;
1576 mdd_quota_wrapper(&ma->ma_attr, qids);
1577 /* get file quota for child */
1578 lquota_chkquota(mds_quota_interface_ref, obd, qids[USRQUOTA],
1579 qids[GRPQUOTA], 1, &inode_pending, NULL, 0,
1581 switch (ma->ma_attr.la_mode & S_IFMT) {
1590 /* get block quota for child */
1592 lquota_chkquota(mds_quota_interface_ref, obd,
1593 qids[USRQUOTA], qids[GRPQUOTA],
1594 block_count, &block_pending, NULL,
1595 LQUOTA_FLAGS_BLK, NULL, 0);
1599 mdd_txn_param_build(env, mdd, MDD_TXN_OBJECT_CREATE_OP);
1600 handle = mdd_trans_start(env, mdd);
1602 GOTO(out_pending, rc = PTR_ERR(handle));
1604 mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
1605 rc = mdd_oc_sanity_check(env, mdd_obj, ma);
1609 rc = mdd_object_create_internal(env, NULL, mdd_obj, ma, handle, spec);
1613 if (spec->sp_cr_flags & MDS_CREATE_SLAVE_OBJ) {
1614 /* If creating the slave object, set slave EA here. */
1615 int lmv_size = spec->u.sp_ea.eadatalen;
1616 struct lmv_stripe_md *lmv;
1618 lmv = (struct lmv_stripe_md *)spec->u.sp_ea.eadata;
1619 LASSERT(lmv != NULL && lmv_size > 0);
1621 rc = __mdd_xattr_set(env, mdd_obj,
1622 mdd_buf_get_const(env, lmv, lmv_size),
1623 XATTR_NAME_LMV, 0, handle);
1627 rc = mdd_attr_set_internal(env, mdd_obj, &ma->ma_attr,
1630 #ifdef CONFIG_FS_POSIX_ACL
1631 if (spec->sp_cr_flags & MDS_CREATE_RMT_ACL) {
1632 struct lu_buf *buf = &mdd_env_info(env)->mti_buf;
1634 buf->lb_buf = (void *)spec->u.sp_ea.eadata;
1635 buf->lb_len = spec->u.sp_ea.eadatalen;
1636 if ((buf->lb_len > 0) && (buf->lb_buf != NULL)) {
1637 rc = __mdd_acl_init(env, mdd_obj, buf,
1638 &ma->ma_attr.la_mode,
1643 ma->ma_attr.la_valid |= LA_MODE;
1646 pfid = spec->u.sp_ea.fid;
1649 rc = mdd_object_initialize(env, pfid, NULL, mdd_obj, ma, handle,
1655 rc = mdd_attr_get_internal(env, mdd_obj, ma);
1656 mdd_write_unlock(env, mdd_obj);
1658 mdd_trans_stop(env, mdd, rc, handle);
1660 #ifdef HAVE_QUOTA_SUPPORT
1663 lquota_pending_commit(mds_quota_interface_ref, obd,
1664 qids[USRQUOTA], qids[GRPQUOTA],
1667 lquota_pending_commit(mds_quota_interface_ref, obd,
1668 qids[USRQUOTA], qids[GRPQUOTA],
1670 /* Trigger dqacq on the owner of child. If failed,
1671 * the next call for lquota_chkquota will process it. */
1672 lquota_adjust(mds_quota_interface_ref, obd, qids, 0, rc,
1673 FSFILT_OP_CREATE_PARTIAL_CHILD);
1680 static int mdd_ref_add(const struct lu_env *env, struct md_object *obj,
1681 const struct md_attr *ma)
1683 struct lu_attr *la_copy = &mdd_env_info(env)->mti_la_for_fix;
1684 struct mdd_object *mdd_obj = md2mdd_obj(obj);
1685 struct mdd_device *mdd = mdo2mdd(obj);
1686 struct thandle *handle;
1690 mdd_txn_param_build(env, mdd, MDD_TXN_XATTR_SET_OP);
1691 handle = mdd_trans_start(env, mdd);
1695 mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
1696 rc = mdd_link_sanity_check(env, NULL, NULL, mdd_obj);
1698 __mdd_ref_add(env, mdd_obj, handle);
1699 mdd_write_unlock(env, mdd_obj);
1701 LASSERT(ma->ma_attr.la_valid & LA_CTIME);
1702 la_copy->la_ctime = ma->ma_attr.la_ctime;
1704 la_copy->la_valid = LA_CTIME;
1705 rc = mdd_attr_check_set_internal_locked(env, mdd_obj, la_copy,
1708 mdd_trans_stop(env, mdd, 0, handle);
1714 * do NOT or the MAY_*'s, you'll get the weakest
1716 int accmode(const struct lu_env *env, struct lu_attr *la, int flags)
1720 /* Sadly, NFSD reopens a file repeatedly during operation, so the
1721 * "acc_mode = 0" allowance for newly-created files isn't honoured.
1722 * NFSD uses the MDS_OPEN_OWNEROVERRIDE flag to say that a file
1723 * owner can write to a file even if it is marked readonly to hide
1724 * its brokenness. (bug 5781) */
1725 if (flags & MDS_OPEN_OWNEROVERRIDE) {
1726 struct md_ucred *uc = md_ucred(env);
1728 if ((uc == NULL) || (uc->mu_valid == UCRED_INIT) ||
1729 (la->la_uid == uc->mu_fsuid))
1733 if (flags & FMODE_READ)
1735 if (flags & (FMODE_WRITE | MDS_OPEN_TRUNC | MDS_OPEN_APPEND))
1737 if (flags & MDS_FMODE_EXEC)
1742 static int mdd_open_sanity_check(const struct lu_env *env,
1743 struct mdd_object *obj, int flag)
1745 struct lu_attr *tmp_la = &mdd_env_info(env)->mti_la;
1750 if (mdd_is_dead_obj(obj))
1753 rc = mdd_la_get(env, obj, tmp_la, BYPASS_CAPA);
1757 if (S_ISLNK(tmp_la->la_mode))
1760 mode = accmode(env, tmp_la, flag);
1762 if (S_ISDIR(tmp_la->la_mode) && (mode & MAY_WRITE))
1765 if (!(flag & MDS_OPEN_CREATED)) {
1766 rc = mdd_permission_internal(env, obj, tmp_la, mode);
1771 if (S_ISFIFO(tmp_la->la_mode) || S_ISSOCK(tmp_la->la_mode) ||
1772 S_ISBLK(tmp_la->la_mode) || S_ISCHR(tmp_la->la_mode))
1773 flag &= ~MDS_OPEN_TRUNC;
1775 /* For writing append-only file must open it with append mode. */
1776 if (mdd_is_append(obj)) {
1777 if ((flag & FMODE_WRITE) && !(flag & MDS_OPEN_APPEND))
1779 if (flag & MDS_OPEN_TRUNC)
1785 * Now, flag -- O_NOATIME does not be packed by client.
1787 if (flag & O_NOATIME) {
1788 struct md_ucred *uc = md_ucred(env);
1790 if (uc && ((uc->mu_valid == UCRED_OLD) ||
1791 (uc->mu_valid == UCRED_NEW)) &&
1792 (uc->mu_fsuid != tmp_la->la_uid) &&
1793 !mdd_capable(uc, CFS_CAP_FOWNER))
1801 static int mdd_open(const struct lu_env *env, struct md_object *obj,
1804 struct mdd_object *mdd_obj = md2mdd_obj(obj);
1807 mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
1809 rc = mdd_open_sanity_check(env, mdd_obj, flags);
1811 mdd_obj->mod_count++;
1813 mdd_write_unlock(env, mdd_obj);
1817 /* return md_attr back,
1818 * if it is last unlink then return lov ea + llog cookie*/
1819 int mdd_object_kill(const struct lu_env *env, struct mdd_object *obj,
1825 if (S_ISREG(mdd_object_type(obj))) {
1826 /* Return LOV & COOKIES unconditionally here. We clean evth up.
1827 * Caller must be ready for that. */
1829 rc = __mdd_lmm_get(env, obj, ma);
1830 if ((ma->ma_valid & MA_LOV))
1831 rc = mdd_unlink_log(env, mdo2mdd(&obj->mod_obj),
1838 * No permission check is needed.
1840 static int mdd_close(const struct lu_env *env, struct md_object *obj,
1843 struct mdd_object *mdd_obj = md2mdd_obj(obj);
1844 struct thandle *handle;
1848 #ifdef HAVE_QUOTA_SUPPORT
1849 struct obd_device *obd = mdo2mdd(obj)->mdd_obd_dev;
1850 struct mds_obd *mds = &obd->u.mds;
1851 unsigned int qids[MAXQUOTAS] = { 0, 0 };
1856 rc = mdd_log_txn_param_build(env, obj, ma, MDD_TXN_UNLINK_OP);
1859 handle = mdd_trans_start(env, mdo2mdd(obj));
1861 RETURN(PTR_ERR(handle));
1863 mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
1864 /* release open count */
1865 mdd_obj->mod_count --;
1867 if (mdd_obj->mod_count == 0) {
1868 /* remove link to object from orphan index */
1869 if (mdd_obj->mod_flags & ORPHAN_OBJ)
1870 __mdd_orphan_del(env, mdd_obj, handle);
1873 rc = mdd_iattr_get(env, mdd_obj, ma);
1875 if (mdd_obj->mod_count == 0 && ma->ma_attr.la_nlink == 0) {
1876 rc = mdd_object_kill(env, mdd_obj, ma);
1877 #ifdef HAVE_QUOTA_SUPPORT
1878 if (mds->mds_quota) {
1879 quota_opc = FSFILT_OP_UNLINK_PARTIAL_CHILD;
1880 mdd_quota_wrapper(&ma->ma_attr, qids);
1889 ma->ma_valid &= ~(MA_LOV | MA_COOKIE);
1891 mdd_write_unlock(env, mdd_obj);
1892 mdd_trans_stop(env, mdo2mdd(obj), rc, handle);
1893 #ifdef HAVE_QUOTA_SUPPORT
1895 /* Trigger dqrel on the owner of child. If failed,
1896 * the next call for lquota_chkquota will process it */
1897 lquota_adjust(mds_quota_interface_ref, obd, qids, 0, rc,
1904 * Permission check is done when open,
1905 * no need check again.
1907 static int mdd_readpage_sanity_check(const struct lu_env *env,
1908 struct mdd_object *obj)
1910 struct dt_object *next = mdd_object_child(obj);
1914 if (S_ISDIR(mdd_object_type(obj)) && dt_try_as_dir(env, next))
1922 static int mdd_append_attrs(const struct lu_env *env,
1923 struct mdd_device *mdd,
1925 const struct dt_it_ops *iops,
1927 struct lu_dirent*ent)
1929 struct mdd_thread_info *info = mdd_env_info(env);
1930 struct lu_fid *fid = &info->mti_fid2;
1931 int len = cpu_to_le16(ent->lde_namelen);
1932 const unsigned align = sizeof(struct luda_type) - 1;
1933 struct lu_fid_pack *pack;
1934 struct mdd_object *obj;
1935 struct luda_type *lt;
1938 if (attr & LUDA_FID) {
1939 pack = (struct lu_fid_pack *)iops->rec(env, it);
1945 rc = fid_unpack(pack, fid);
1951 fid_cpu_to_le(&ent->lde_fid, fid);
1952 ent->lde_attrs = LUDA_FID;
1955 /* check if file type is required */
1956 if (attr & LUDA_TYPE) {
1957 if (!(attr & LUDA_FID)) {
1958 CERROR("wrong attr : [%x]\n",attr);
1963 obj = mdd_object_find(env, mdd, fid);
1964 if (obj == NULL) /* remote object */
1972 if (mdd_object_exists(obj) == +1) {
1973 len = (len + align) & ~align;
1975 lt = (void *) ent->lde_name + len;
1976 lt->lt_type = cpu_to_le16(mdd_object_type(obj));
1978 ent->lde_attrs |= LUDA_TYPE;
1980 mdd_object_put(env, obj);
1983 ent->lde_attrs = cpu_to_le32(ent->lde_attrs);
1987 static int mdd_dir_page_build(const struct lu_env *env, struct mdd_device *mdd,
1988 int first, void *area, int nob,
1989 const struct dt_it_ops *iops, struct dt_it *it,
1990 __u64 *start, __u64 *end,
1991 struct lu_dirent **last, __u32 attr)
1994 struct lu_dirent *ent;
1998 memset(area, 0, sizeof (struct lu_dirpage));
1999 area += sizeof (struct lu_dirpage);
2000 nob -= sizeof (struct lu_dirpage);
2009 len = iops->key_size(env, it);
2011 /* IAM iterator can return record with zero len. */
2015 name = (char *)iops->key(env, it);
2016 hash = iops->store(env, it);
2018 if (unlikely(first)) {
2023 recsize = lu_dirent_calc_size(len, attr);
2025 CDEBUG(D_INFO, "%p %p %d "LPU64" (%d) \"%*.*s\"\n",
2026 name, ent, nob, hash, len, len, len, name);
2028 if (nob >= recsize) {
2029 ent->lde_hash = cpu_to_le64(hash);
2030 ent->lde_namelen = cpu_to_le16(len);
2031 ent->lde_reclen = cpu_to_le16(recsize);
2032 memcpy(ent->lde_name, name, len);
2034 result = mdd_append_attrs(env, mdd, attr, iops, it, ent);
2039 * record doesn't fit into page, enlarge previous one.
2042 (*last)->lde_reclen =
2043 cpu_to_le16(le16_to_cpu((*last)->lde_reclen) +
2052 ent = (void *)ent + recsize;
2056 result = iops->next(env, it);
2057 } while (result == 0);
2064 static int __mdd_readpage(const struct lu_env *env, struct mdd_object *obj,
2065 const struct lu_rdpg *rdpg)
2068 struct dt_object *next = mdd_object_child(obj);
2069 const struct dt_it_ops *iops;
2071 struct lu_dirent *last = NULL;
2072 struct mdd_device *mdd = mdo2mdd(&obj->mod_obj);
2079 LASSERT(rdpg->rp_pages != NULL);
2080 LASSERT(next->do_index_ops != NULL);
2082 if (rdpg->rp_count <= 0)
2086 * iterate through directory and fill pages from @rdpg
2088 iops = &next->do_index_ops->dio_it;
2089 it = iops->init(env, next, mdd_object_capa(env, obj));
2093 rc = iops->load(env, it, rdpg->rp_hash);
2097 * Iterator didn't find record with exactly the key requested.
2099 * It is currently either
2101 * - positioned above record with key less than
2102 * requested---skip it.
2104 * - or not positioned at all (is in IAM_IT_SKEWED
2105 * state)---position it on the next item.
2107 rc = iops->next(env, it);
2112 * At this point and across for-loop:
2114 * rc == 0 -> ok, proceed.
2115 * rc > 0 -> end of directory.
2118 for (i = 0, nob = rdpg->rp_count; rc == 0 && nob > 0;
2119 i++, nob -= CFS_PAGE_SIZE) {
2120 LASSERT(i < rdpg->rp_npages);
2121 pg = rdpg->rp_pages[i];
2122 rc = mdd_dir_page_build(env, mdd, !i, cfs_kmap(pg),
2123 min_t(int, nob, CFS_PAGE_SIZE), iops,
2124 it, &hash_start, &hash_end, &last,
2126 if (rc != 0 || i == rdpg->rp_npages - 1) {
2128 last->lde_reclen = 0;
2136 hash_end = DIR_END_OFF;
2140 struct lu_dirpage *dp;
2142 dp = cfs_kmap(rdpg->rp_pages[0]);
2143 dp->ldp_hash_start = cpu_to_le64(rdpg->rp_hash);
2144 dp->ldp_hash_end = cpu_to_le64(hash_end);
2147 * No pages were processed, mark this.
2149 dp->ldp_flags |= LDF_EMPTY;
2151 dp->ldp_flags = cpu_to_le32(dp->ldp_flags);
2152 cfs_kunmap(rdpg->rp_pages[0]);
2155 iops->fini(env, it);
2160 static int mdd_readpage(const struct lu_env *env, struct md_object *obj,
2161 const struct lu_rdpg *rdpg)
2163 struct mdd_object *mdd_obj = md2mdd_obj(obj);
2167 LASSERT(mdd_object_exists(mdd_obj));
2169 mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
2170 rc = mdd_readpage_sanity_check(env, mdd_obj);
2172 GOTO(out_unlock, rc);
2174 if (mdd_is_dead_obj(mdd_obj)) {
2176 struct lu_dirpage *dp;
2179 * According to POSIX, please do not return any entry to client:
2180 * even dot and dotdot should not be returned.
2182 CWARN("readdir from dead object: "DFID"\n",
2183 PFID(mdd_object_fid(mdd_obj)));
2185 if (rdpg->rp_count <= 0)
2186 GOTO(out_unlock, rc = -EFAULT);
2187 LASSERT(rdpg->rp_pages != NULL);
2189 pg = rdpg->rp_pages[0];
2190 dp = (struct lu_dirpage*)cfs_kmap(pg);
2191 memset(dp, 0 , sizeof(struct lu_dirpage));
2192 dp->ldp_hash_start = cpu_to_le64(rdpg->rp_hash);
2193 dp->ldp_hash_end = cpu_to_le64(DIR_END_OFF);
2194 dp->ldp_flags |= LDF_EMPTY;
2195 dp->ldp_flags = cpu_to_le32(dp->ldp_flags);
2197 GOTO(out_unlock, rc = 0);
2200 rc = __mdd_readpage(env, mdd_obj, rdpg);
2204 mdd_read_unlock(env, mdd_obj);
2208 static int mdd_object_sync(const struct lu_env *env, struct md_object *obj)
2210 struct mdd_object *mdd_obj = md2mdd_obj(obj);
2211 struct dt_object *next;
2213 LASSERT(mdd_object_exists(mdd_obj));
2214 next = mdd_object_child(mdd_obj);
2215 return next->do_ops->do_object_sync(env, next);
2218 const struct md_object_operations mdd_obj_ops = {
2219 .moo_permission = mdd_permission,
2220 .moo_attr_get = mdd_attr_get,
2221 .moo_attr_set = mdd_attr_set,
2222 .moo_xattr_get = mdd_xattr_get,
2223 .moo_xattr_set = mdd_xattr_set,
2224 .moo_xattr_list = mdd_xattr_list,
2225 .moo_xattr_del = mdd_xattr_del,
2226 .moo_object_create = mdd_object_create,
2227 .moo_ref_add = mdd_ref_add,
2228 .moo_ref_del = mdd_ref_del,
2229 .moo_open = mdd_open,
2230 .moo_close = mdd_close,
2231 .moo_readpage = mdd_readpage,
2232 .moo_readlink = mdd_readlink,
2233 .moo_capa_get = mdd_capa_get,
2234 .moo_object_sync = mdd_object_sync,
2235 .moo_path = mdd_path,