1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/mdd/mdd_object.c
38 * Lustre Metadata Server (mdd) routines
40 * Author: Wang Di <wangdi@clusterfs.com>
44 # define EXPORT_SYMTAB
46 #define DEBUG_SUBSYSTEM S_MDS
48 #include <linux/module.h>
49 #include <linux/jbd.h>
51 #include <obd_class.h>
52 #include <obd_support.h>
53 #include <lprocfs_status.h>
54 /* fid_be_cpu(), fid_cpu_to_be(). */
55 #include <lustre_fid.h>
57 #include <linux/ldiskfs_fs.h>
58 #include <lustre_mds.h>
59 #include <lustre/lustre_idl.h>
61 #include "mdd_internal.h"
63 static const struct lu_object_operations mdd_lu_obj_ops;
65 int mdd_la_get(const struct lu_env *env, struct mdd_object *obj,
66 struct lu_attr *la, struct lustre_capa *capa)
68 LASSERTF(mdd_object_exists(obj), "FID is "DFID"\n",
69 PFID(mdd_object_fid(obj)));
70 return mdo_attr_get(env, obj, la, capa);
73 static void mdd_flags_xlate(struct mdd_object *obj, __u32 flags)
75 obj->mod_flags &= ~(APPEND_OBJ|IMMUTE_OBJ);
77 if (flags & LUSTRE_APPEND_FL)
78 obj->mod_flags |= APPEND_OBJ;
80 if (flags & LUSTRE_IMMUTABLE_FL)
81 obj->mod_flags |= IMMUTE_OBJ;
84 struct lu_buf *mdd_buf_get(const struct lu_env *env, void *area, ssize_t len)
88 buf = &mdd_env_info(env)->mti_buf;
94 struct llog_cookie *mdd_max_cookie_get(const struct lu_env *env,
95 struct mdd_device *mdd)
97 struct mdd_thread_info *mti = mdd_env_info(env);
100 max_cookie_size = mdd_lov_cookiesize(env, mdd);
101 if (unlikely(mti->mti_max_cookie_size < max_cookie_size)) {
102 if (mti->mti_max_cookie)
103 OBD_FREE(mti->mti_max_cookie, mti->mti_max_cookie_size);
104 mti->mti_max_cookie = NULL;
105 mti->mti_max_cookie_size = 0;
107 if (unlikely(mti->mti_max_cookie == NULL)) {
108 OBD_ALLOC(mti->mti_max_cookie, max_cookie_size);
109 if (likely(mti->mti_max_cookie != NULL))
110 mti->mti_max_cookie_size = max_cookie_size;
112 if (likely(mti->mti_max_cookie != NULL))
113 memset(mti->mti_max_cookie, 0, mti->mti_max_cookie_size);
114 return mti->mti_max_cookie;
117 struct lov_mds_md *mdd_max_lmm_get(const struct lu_env *env,
118 struct mdd_device *mdd)
120 struct mdd_thread_info *mti = mdd_env_info(env);
123 max_lmm_size = mdd_lov_mdsize(env, mdd);
124 if (unlikely(mti->mti_max_lmm_size < max_lmm_size)) {
125 if (mti->mti_max_lmm)
126 OBD_FREE(mti->mti_max_lmm, mti->mti_max_lmm_size);
127 mti->mti_max_lmm = NULL;
128 mti->mti_max_lmm_size = 0;
130 if (unlikely(mti->mti_max_lmm == NULL)) {
131 OBD_ALLOC(mti->mti_max_lmm, max_lmm_size);
132 if (unlikely(mti->mti_max_lmm != NULL))
133 mti->mti_max_lmm_size = max_lmm_size;
135 return mti->mti_max_lmm;
138 const struct lu_buf *mdd_buf_get_const(const struct lu_env *env,
139 const void *area, ssize_t len)
143 buf = &mdd_env_info(env)->mti_buf;
144 buf->lb_buf = (void *)area;
149 struct mdd_thread_info *mdd_env_info(const struct lu_env *env)
151 struct mdd_thread_info *info;
153 info = lu_context_key_get(&env->le_ctx, &mdd_thread_key);
154 LASSERT(info != NULL);
158 struct lu_object *mdd_object_alloc(const struct lu_env *env,
159 const struct lu_object_header *hdr,
162 struct mdd_object *mdd_obj;
164 OBD_ALLOC_PTR(mdd_obj);
165 if (mdd_obj != NULL) {
168 o = mdd2lu_obj(mdd_obj);
169 lu_object_init(o, NULL, d);
170 mdd_obj->mod_obj.mo_ops = &mdd_obj_ops;
171 mdd_obj->mod_obj.mo_dir_ops = &mdd_dir_ops;
172 mdd_obj->mod_count = 0;
173 o->lo_ops = &mdd_lu_obj_ops;
180 static int mdd_object_init(const struct lu_env *env, struct lu_object *o,
181 const struct lu_object_conf *_)
183 struct mdd_device *d = lu2mdd_dev(o->lo_dev);
184 struct lu_object *below;
185 struct lu_device *under;
188 under = &d->mdd_child->dd_lu_dev;
189 below = under->ld_ops->ldo_object_alloc(env, o->lo_header, under);
190 mdd_pdlock_init(lu2mdd_obj(o));
194 lu_object_add(o, below);
198 static int mdd_object_start(const struct lu_env *env, struct lu_object *o)
200 if (lu_object_exists(o))
201 return mdd_get_flags(env, lu2mdd_obj(o));
206 static void mdd_object_free(const struct lu_env *env, struct lu_object *o)
208 struct mdd_object *mdd = lu2mdd_obj(o);
214 static int mdd_object_print(const struct lu_env *env, void *cookie,
215 lu_printer_t p, const struct lu_object *o)
217 return (*p)(env, cookie, LUSTRE_MDD_NAME"-object@%p", o);
220 static const struct lu_object_operations mdd_lu_obj_ops = {
221 .loo_object_init = mdd_object_init,
222 .loo_object_start = mdd_object_start,
223 .loo_object_free = mdd_object_free,
224 .loo_object_print = mdd_object_print,
227 struct mdd_object *mdd_object_find(const struct lu_env *env,
228 struct mdd_device *d,
229 const struct lu_fid *f)
231 return md2mdd_obj(md_object_find_slice(env, &d->mdd_md_dev, f));
234 int mdd_get_flags(const struct lu_env *env, struct mdd_object *obj)
236 struct lu_attr *la = &mdd_env_info(env)->mti_la;
240 rc = mdd_la_get(env, obj, la, BYPASS_CAPA);
242 mdd_flags_xlate(obj, la->la_flags);
243 if (S_ISDIR(la->la_mode) && la->la_nlink == 1)
244 obj->mod_flags |= MNLINK_OBJ;
249 /* get only inode attributes */
250 int mdd_iattr_get(const struct lu_env *env, struct mdd_object *mdd_obj,
256 if (ma->ma_valid & MA_INODE)
259 rc = mdd_la_get(env, mdd_obj, &ma->ma_attr,
260 mdd_object_capa(env, mdd_obj));
262 ma->ma_valid |= MA_INODE;
266 static int mdd_get_default_md(struct mdd_object *mdd_obj,
267 struct lov_mds_md *lmm, int *size)
269 struct lov_desc *ldesc;
270 struct mdd_device *mdd = mdo2mdd(&mdd_obj->mod_obj);
273 ldesc = &mdd->mdd_obd_dev->u.mds.mds_lov_desc;
274 LASSERT(ldesc != NULL);
279 lmm->lmm_magic = LOV_MAGIC_V1;
280 lmm->lmm_object_gr = LOV_OBJECT_GROUP_DEFAULT;
281 lmm->lmm_pattern = ldesc->ld_pattern;
282 lmm->lmm_stripe_size = ldesc->ld_default_stripe_size;
283 lmm->lmm_stripe_count = ldesc->ld_default_stripe_count;
284 *size = sizeof(struct lov_mds_md);
286 RETURN(sizeof(struct lov_mds_md));
289 /* get lov EA only */
290 static int __mdd_lmm_get(const struct lu_env *env,
291 struct mdd_object *mdd_obj, struct md_attr *ma)
296 if (ma->ma_valid & MA_LOV)
299 rc = mdd_get_md(env, mdd_obj, ma->ma_lmm, &ma->ma_lmm_size,
302 if (rc == 0 && (ma->ma_need & MA_LOV_DEF)) {
303 rc = mdd_get_default_md(mdd_obj, ma->ma_lmm,
308 ma->ma_valid |= MA_LOV;
314 int mdd_lmm_get_locked(const struct lu_env *env, struct mdd_object *mdd_obj,
320 mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
321 rc = __mdd_lmm_get(env, mdd_obj, ma);
322 mdd_read_unlock(env, mdd_obj);
327 static int __mdd_lmv_get(const struct lu_env *env,
328 struct mdd_object *mdd_obj, struct md_attr *ma)
333 if (ma->ma_valid & MA_LMV)
336 rc = mdd_get_md(env, mdd_obj, ma->ma_lmv, &ma->ma_lmv_size,
339 ma->ma_valid |= MA_LMV;
345 static int mdd_attr_get_internal(const struct lu_env *env,
346 struct mdd_object *mdd_obj,
352 if (ma->ma_need & MA_INODE)
353 rc = mdd_iattr_get(env, mdd_obj, ma);
355 if (rc == 0 && ma->ma_need & MA_LOV) {
356 if (S_ISREG(mdd_object_type(mdd_obj)) ||
357 S_ISDIR(mdd_object_type(mdd_obj)))
358 rc = __mdd_lmm_get(env, mdd_obj, ma);
360 if (rc == 0 && ma->ma_need & MA_LMV) {
361 if (S_ISDIR(mdd_object_type(mdd_obj)))
362 rc = __mdd_lmv_get(env, mdd_obj, ma);
364 #ifdef CONFIG_FS_POSIX_ACL
365 if (rc == 0 && ma->ma_need & MA_ACL_DEF) {
366 if (S_ISDIR(mdd_object_type(mdd_obj)))
367 rc = mdd_def_acl_get(env, mdd_obj, ma);
370 CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = "LPX64"\n",
375 int mdd_attr_get_internal_locked(const struct lu_env *env,
376 struct mdd_object *mdd_obj, struct md_attr *ma)
379 int needlock = ma->ma_need & (MA_LOV | MA_LMV | MA_ACL_DEF);
382 mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
383 rc = mdd_attr_get_internal(env, mdd_obj, ma);
385 mdd_read_unlock(env, mdd_obj);
390 * No permission check is needed.
392 static int mdd_attr_get(const struct lu_env *env, struct md_object *obj,
395 struct mdd_object *mdd_obj = md2mdd_obj(obj);
399 rc = mdd_attr_get_internal_locked(env, mdd_obj, ma);
404 * No permission check is needed.
406 static int mdd_xattr_get(const struct lu_env *env,
407 struct md_object *obj, struct lu_buf *buf,
410 struct mdd_object *mdd_obj = md2mdd_obj(obj);
415 LASSERT(mdd_object_exists(mdd_obj));
417 mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
418 rc = mdo_xattr_get(env, mdd_obj, buf, name,
419 mdd_object_capa(env, mdd_obj));
420 mdd_read_unlock(env, mdd_obj);
426 * Permission check is done when open,
427 * no need check again.
429 static int mdd_readlink(const struct lu_env *env, struct md_object *obj,
432 struct mdd_object *mdd_obj = md2mdd_obj(obj);
433 struct dt_object *next;
438 LASSERT(mdd_object_exists(mdd_obj));
440 next = mdd_object_child(mdd_obj);
441 mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
442 rc = next->do_body_ops->dbo_read(env, next, buf, &pos,
443 mdd_object_capa(env, mdd_obj));
444 mdd_read_unlock(env, mdd_obj);
449 * No permission check is needed.
451 static int mdd_xattr_list(const struct lu_env *env, struct md_object *obj,
454 struct mdd_object *mdd_obj = md2mdd_obj(obj);
459 mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
460 rc = mdo_xattr_list(env, mdd_obj, buf, mdd_object_capa(env, mdd_obj));
461 mdd_read_unlock(env, mdd_obj);
466 int mdd_object_create_internal(const struct lu_env *env, struct mdd_object *p,
467 struct mdd_object *c, struct md_attr *ma,
468 struct thandle *handle,
469 const struct md_op_spec *spec)
471 struct lu_attr *attr = &ma->ma_attr;
472 struct dt_allocation_hint *hint = &mdd_env_info(env)->mti_hint;
473 struct dt_object_format *dof = &mdd_env_info(env)->mti_dof;
474 const struct dt_index_features *feat = spec->sp_feat;
478 if (!mdd_object_exists(c)) {
479 struct dt_object *next = mdd_object_child(c);
482 if (feat != &dt_directory_features && feat != NULL)
483 dof->dof_type = DFT_INDEX;
485 dof->dof_type = dt_mode_to_dft(attr->la_mode);
487 dof->u.dof_idx.di_feat = feat;
489 /* @hint will be initialized by underlying device. */
490 next->do_ops->do_ah_init(env, hint,
491 p ? mdd_object_child(p) : NULL,
492 attr->la_mode & S_IFMT);
494 rc = mdo_create_obj(env, c, attr, hint, dof, handle);
495 LASSERT(ergo(rc == 0, mdd_object_exists(c)));
503 * Make sure the ctime is increased only.
505 static inline int mdd_attr_check(const struct lu_env *env,
506 struct mdd_object *obj,
507 struct lu_attr *attr)
509 struct lu_attr *tmp_la = &mdd_env_info(env)->mti_la;
513 if (attr->la_valid & LA_CTIME) {
514 rc = mdd_la_get(env, obj, tmp_la, BYPASS_CAPA);
518 if (attr->la_ctime < tmp_la->la_ctime)
519 attr->la_valid &= ~(LA_MTIME | LA_CTIME);
520 else if (attr->la_valid == LA_CTIME &&
521 attr->la_ctime == tmp_la->la_ctime)
522 attr->la_valid &= ~LA_CTIME;
527 int mdd_attr_set_internal(const struct lu_env *env,
528 struct mdd_object *obj,
529 struct lu_attr *attr,
530 struct thandle *handle,
536 rc = mdo_attr_set(env, obj, attr, handle, mdd_object_capa(env, obj));
537 #ifdef CONFIG_FS_POSIX_ACL
538 if (!rc && (attr->la_valid & LA_MODE) && needacl)
539 rc = mdd_acl_chmod(env, obj, attr->la_mode, handle);
544 int mdd_attr_check_set_internal(const struct lu_env *env,
545 struct mdd_object *obj,
546 struct lu_attr *attr,
547 struct thandle *handle,
553 rc = mdd_attr_check(env, obj, attr);
558 rc = mdd_attr_set_internal(env, obj, attr, handle, needacl);
562 static int mdd_attr_set_internal_locked(const struct lu_env *env,
563 struct mdd_object *obj,
564 struct lu_attr *attr,
565 struct thandle *handle,
571 needacl = needacl && (attr->la_valid & LA_MODE);
573 mdd_write_lock(env, obj, MOR_TGT_CHILD);
574 rc = mdd_attr_set_internal(env, obj, attr, handle, needacl);
576 mdd_write_unlock(env, obj);
580 int mdd_attr_check_set_internal_locked(const struct lu_env *env,
581 struct mdd_object *obj,
582 struct lu_attr *attr,
583 struct thandle *handle,
589 needacl = needacl && (attr->la_valid & LA_MODE);
591 mdd_write_lock(env, obj, MOR_TGT_CHILD);
592 rc = mdd_attr_check_set_internal(env, obj, attr, handle, needacl);
594 mdd_write_unlock(env, obj);
598 static int __mdd_xattr_set(const struct lu_env *env, struct mdd_object *obj,
599 const struct lu_buf *buf, const char *name,
600 int fl, struct thandle *handle)
602 struct lustre_capa *capa = mdd_object_capa(env, obj);
606 if (buf->lb_buf && buf->lb_len > 0)
607 rc = mdo_xattr_set(env, obj, buf, name, 0, handle, capa);
608 else if (buf->lb_buf == NULL && buf->lb_len == 0)
609 rc = mdo_xattr_del(env, obj, name, handle, capa);
615 * This gives the same functionality as the code between
616 * sys_chmod and inode_setattr
617 * chown_common and inode_setattr
618 * utimes and inode_setattr
619 * This API is ported from mds_fix_attr but remove some unnecesssary stuff.
621 static int mdd_fix_attr(const struct lu_env *env, struct mdd_object *obj,
622 struct lu_attr *la, const struct md_attr *ma)
624 struct lu_attr *tmp_la = &mdd_env_info(env)->mti_la;
625 struct md_ucred *uc = md_ucred(env);
632 /* Do not permit change file type */
633 if (la->la_valid & LA_TYPE)
636 /* They should not be processed by setattr */
637 if (la->la_valid & (LA_NLINK | LA_RDEV | LA_BLKSIZE))
640 rc = mdd_la_get(env, obj, tmp_la, BYPASS_CAPA);
644 if (la->la_valid == LA_CTIME) {
645 if (!(ma->ma_attr_flags & MDS_PERM_BYPASS))
646 /* This is only for set ctime when rename's source is
648 rc = mdd_may_delete(env, NULL, obj,
649 (struct md_attr *)ma, 1, 0);
650 if (rc == 0 && la->la_ctime <= tmp_la->la_ctime)
651 la->la_valid &= ~LA_CTIME;
655 if (la->la_valid == LA_ATIME) {
656 /* This is atime only set for read atime update on close. */
657 if (la->la_atime <= tmp_la->la_atime +
658 mdd_obj2mdd_dev(obj)->mdd_atime_diff)
659 la->la_valid &= ~LA_ATIME;
663 /* Check if flags change. */
664 if (la->la_valid & LA_FLAGS) {
665 unsigned int oldflags = 0;
666 unsigned int newflags = la->la_flags &
667 (LUSTRE_IMMUTABLE_FL | LUSTRE_APPEND_FL);
669 if ((uc->mu_fsuid != tmp_la->la_uid) &&
670 !mdd_capable(uc, CFS_CAP_FOWNER))
673 /* XXX: the IMMUTABLE and APPEND_ONLY flags can
674 * only be changed by the relevant capability. */
675 if (mdd_is_immutable(obj))
676 oldflags |= LUSTRE_IMMUTABLE_FL;
677 if (mdd_is_append(obj))
678 oldflags |= LUSTRE_APPEND_FL;
679 if ((oldflags ^ newflags) &&
680 !mdd_capable(uc, CFS_CAP_LINUX_IMMUTABLE))
683 if (!S_ISDIR(tmp_la->la_mode))
684 la->la_flags &= ~LUSTRE_DIRSYNC_FL;
687 if ((mdd_is_immutable(obj) || mdd_is_append(obj)) &&
688 (la->la_valid & ~LA_FLAGS) &&
689 !(ma->ma_attr_flags & MDS_PERM_BYPASS))
692 /* Check for setting the obj time. */
693 if ((la->la_valid & (LA_MTIME | LA_ATIME | LA_CTIME)) &&
694 !(la->la_valid & ~(LA_MTIME | LA_ATIME | LA_CTIME))) {
695 if ((uc->mu_fsuid != tmp_la->la_uid) &&
696 !mdd_capable(uc, CFS_CAP_FOWNER)) {
697 rc = mdd_permission_internal_locked(env, obj, tmp_la,
705 /* Make sure a caller can chmod. */
706 if (la->la_valid & LA_MODE) {
707 /* Bypass la_vaild == LA_MODE,
708 * this is for changing file with SUID or SGID. */
709 if ((la->la_valid & ~LA_MODE) &&
710 !(ma->ma_attr_flags & MDS_PERM_BYPASS) &&
711 (uc->mu_fsuid != tmp_la->la_uid) &&
712 !mdd_capable(uc, CFS_CAP_FOWNER))
715 if (la->la_mode == (umode_t) -1)
716 la->la_mode = tmp_la->la_mode;
718 la->la_mode = (la->la_mode & S_IALLUGO) |
719 (tmp_la->la_mode & ~S_IALLUGO);
721 /* Also check the setgid bit! */
722 if (!lustre_in_group_p(uc, (la->la_valid & LA_GID) ?
723 la->la_gid : tmp_la->la_gid) &&
724 !mdd_capable(uc, CFS_CAP_FSETID))
725 la->la_mode &= ~S_ISGID;
727 la->la_mode = tmp_la->la_mode;
730 /* Make sure a caller can chown. */
731 if (la->la_valid & LA_UID) {
732 if (la->la_uid == (uid_t) -1)
733 la->la_uid = tmp_la->la_uid;
734 if (((uc->mu_fsuid != tmp_la->la_uid) ||
735 (la->la_uid != tmp_la->la_uid)) &&
736 !mdd_capable(uc, CFS_CAP_CHOWN))
739 /* If the user or group of a non-directory has been
740 * changed by a non-root user, remove the setuid bit.
741 * 19981026 David C Niemi <niemi@tux.org>
743 * Changed this to apply to all users, including root,
744 * to avoid some races. This is the behavior we had in
745 * 2.0. The check for non-root was definitely wrong
746 * for 2.2 anyway, as it should have been using
747 * CAP_FSETID rather than fsuid -- 19990830 SD. */
748 if (((tmp_la->la_mode & S_ISUID) == S_ISUID) &&
749 !S_ISDIR(tmp_la->la_mode)) {
750 la->la_mode &= ~S_ISUID;
751 la->la_valid |= LA_MODE;
755 /* Make sure caller can chgrp. */
756 if (la->la_valid & LA_GID) {
757 if (la->la_gid == (gid_t) -1)
758 la->la_gid = tmp_la->la_gid;
759 if (((uc->mu_fsuid != tmp_la->la_uid) ||
760 ((la->la_gid != tmp_la->la_gid) &&
761 !lustre_in_group_p(uc, la->la_gid))) &&
762 !mdd_capable(uc, CFS_CAP_CHOWN))
765 /* Likewise, if the user or group of a non-directory
766 * has been changed by a non-root user, remove the
767 * setgid bit UNLESS there is no group execute bit
768 * (this would be a file marked for mandatory
769 * locking). 19981026 David C Niemi <niemi@tux.org>
771 * Removed the fsuid check (see the comment above) --
773 if (((tmp_la->la_mode & (S_ISGID | S_IXGRP)) ==
774 (S_ISGID | S_IXGRP)) && !S_ISDIR(tmp_la->la_mode)) {
775 la->la_mode &= ~S_ISGID;
776 la->la_valid |= LA_MODE;
780 /* For both Size-on-MDS case and truncate case,
781 * "la->la_valid & (LA_SIZE | LA_BLOCKS)" are ture.
782 * We distinguish them by "ma->ma_attr_flags & MDS_SOM".
783 * For SOM case, it is true, the MAY_WRITE perm has been checked
784 * when open, no need check again. For truncate case, it is false,
785 * the MAY_WRITE perm should be checked here. */
786 if (ma->ma_attr_flags & MDS_SOM) {
787 /* For the "Size-on-MDS" setattr update, merge coming
788 * attributes with the set in the inode. BUG 10641 */
789 if ((la->la_valid & LA_ATIME) &&
790 (la->la_atime <= tmp_la->la_atime))
791 la->la_valid &= ~LA_ATIME;
793 /* OST attributes do not have a priority over MDS attributes,
794 * so drop times if ctime is equal. */
795 if ((la->la_valid & LA_CTIME) &&
796 (la->la_ctime <= tmp_la->la_ctime))
797 la->la_valid &= ~(LA_MTIME | LA_CTIME);
799 if (la->la_valid & (LA_SIZE | LA_BLOCKS)) {
800 if (!((ma->ma_attr_flags & MDS_OPEN_OWNEROVERRIDE) &&
801 (uc->mu_fsuid == tmp_la->la_uid)) &&
802 !(ma->ma_attr_flags & MDS_PERM_BYPASS)) {
803 rc = mdd_permission_internal_locked(env, obj,
810 if (la->la_valid & LA_CTIME) {
811 /* The pure setattr, it has the priority over what is
812 * already set, do not drop it if ctime is equal. */
813 if (la->la_ctime < tmp_la->la_ctime)
814 la->la_valid &= ~(LA_ATIME | LA_MTIME |
822 /* set attr and LOV EA at once, return updated attr */
823 static int mdd_attr_set(const struct lu_env *env, struct md_object *obj,
824 const struct md_attr *ma)
826 struct mdd_object *mdd_obj = md2mdd_obj(obj);
827 struct mdd_device *mdd = mdo2mdd(obj);
828 struct thandle *handle;
829 struct lov_mds_md *lmm = NULL;
830 struct llog_cookie *logcookies = NULL;
831 int rc, lmm_size = 0, cookie_size = 0;
832 struct lu_attr *la_copy = &mdd_env_info(env)->mti_la_for_fix;
833 #ifdef HAVE_QUOTA_SUPPORT
834 struct obd_device *obd = mdd->mdd_obd_dev;
835 struct mds_obd *mds = &obd->u.mds;
836 unsigned int qnids[MAXQUOTAS] = { 0, 0 };
837 unsigned int qoids[MAXQUOTAS] = { 0, 0 };
838 int quota_opc = 0, block_count = 0;
839 int inode_pending = 0, block_pending = 0;
843 mdd_setattr_txn_param_build(env, obj, (struct md_attr *)ma,
844 MDD_TXN_ATTR_SET_OP);
845 handle = mdd_trans_start(env, mdd);
847 RETURN(PTR_ERR(handle));
848 /*TODO: add lock here*/
849 /* start a log jounal handle if needed */
850 if (S_ISREG(mdd_object_type(mdd_obj)) &&
851 ma->ma_attr.la_valid & (LA_UID | LA_GID)) {
852 lmm_size = mdd_lov_mdsize(env, mdd);
853 lmm = mdd_max_lmm_get(env, mdd);
855 GOTO(cleanup, rc = -ENOMEM);
857 rc = mdd_get_md_locked(env, mdd_obj, lmm, &lmm_size,
864 if (ma->ma_attr.la_valid & (ATTR_MTIME | ATTR_CTIME))
865 CDEBUG(D_INODE, "setting mtime "LPU64", ctime "LPU64"\n",
866 ma->ma_attr.la_mtime, ma->ma_attr.la_ctime);
868 *la_copy = ma->ma_attr;
869 rc = mdd_fix_attr(env, mdd_obj, la_copy, ma);
873 #ifdef HAVE_QUOTA_SUPPORT
874 if (mds->mds_quota && la_copy->la_valid & (LA_UID | LA_GID)) {
875 struct lu_attr *la_tmp = &mdd_env_info(env)->mti_la;
877 rc = mdd_la_get(env, mdd_obj, la_tmp, BYPASS_CAPA);
879 quota_opc = FSFILT_OP_SETATTR;
880 mdd_quota_wrapper(la_copy, qnids);
881 mdd_quota_wrapper(la_tmp, qoids);
882 /* get file quota for new owner */
883 lquota_chkquota(mds_quota_interface_ref, obd,
884 qnids[USRQUOTA], qnids[GRPQUOTA], 1,
885 &inode_pending, NULL, 0);
886 block_count = (la_tmp->la_blocks + 7) >> 3;
888 /* get block quota for new owner */
889 lquota_chkquota(mds_quota_interface_ref, obd,
892 block_count, &block_pending,
893 NULL, LQUOTA_FLAGS_BLK);
898 if (la_copy->la_valid & LA_FLAGS) {
899 rc = mdd_attr_set_internal_locked(env, mdd_obj, la_copy,
902 mdd_flags_xlate(mdd_obj, la_copy->la_flags);
903 } else if (la_copy->la_valid) { /* setattr */
904 rc = mdd_attr_set_internal_locked(env, mdd_obj, la_copy,
906 /* journal chown/chgrp in llog, just like unlink */
907 if (rc == 0 && lmm_size){
908 cookie_size = mdd_lov_cookiesize(env, mdd);
909 logcookies = mdd_max_cookie_get(env, mdd);
910 if (logcookies == NULL)
911 GOTO(cleanup, rc = -ENOMEM);
913 if (mdd_setattr_log(env, mdd, ma, lmm, lmm_size,
914 logcookies, cookie_size) <= 0)
919 if (rc == 0 && ma->ma_valid & MA_LOV) {
922 mode = mdd_object_type(mdd_obj);
923 if (S_ISREG(mode) || S_ISDIR(mode)) {
924 rc = mdd_lsm_sanity_check(env, mdd_obj);
928 rc = mdd_lov_set_md(env, NULL, mdd_obj, ma->ma_lmm,
929 ma->ma_lmm_size, handle, 1);
934 mdd_trans_stop(env, mdd, rc, handle);
935 if (rc == 0 && (lmm != NULL && lmm_size > 0 )) {
936 /*set obd attr, if needed*/
937 rc = mdd_lov_setattr_async(env, mdd_obj, lmm, lmm_size,
940 #ifdef HAVE_QUOTA_SUPPORT
943 lquota_pending_commit(mds_quota_interface_ref, obd,
944 qnids[USRQUOTA], qnids[GRPQUOTA],
947 lquota_pending_commit(mds_quota_interface_ref, obd,
948 qnids[USRQUOTA], qnids[GRPQUOTA],
950 /* Trigger dqrel/dqacq for original owner and new owner.
951 * If failed, the next call for lquota_chkquota will
953 lquota_adjust(mds_quota_interface_ref, obd, qnids, qoids, rc,
960 int mdd_xattr_set_txn(const struct lu_env *env, struct mdd_object *obj,
961 const struct lu_buf *buf, const char *name, int fl,
962 struct thandle *handle)
967 mdd_write_lock(env, obj, MOR_TGT_CHILD);
968 rc = __mdd_xattr_set(env, obj, buf, name, fl, handle);
969 mdd_write_unlock(env, obj);
974 static int mdd_xattr_sanity_check(const struct lu_env *env,
975 struct mdd_object *obj)
977 struct lu_attr *tmp_la = &mdd_env_info(env)->mti_la;
978 struct md_ucred *uc = md_ucred(env);
982 if (mdd_is_immutable(obj) || mdd_is_append(obj))
985 rc = mdd_la_get(env, obj, tmp_la, BYPASS_CAPA);
989 if ((uc->mu_fsuid != tmp_la->la_uid) &&
990 !mdd_capable(uc, CFS_CAP_FOWNER))
997 * The caller should guarantee to update the object ctime
998 * after xattr_set if needed.
1000 static int mdd_xattr_set(const struct lu_env *env, struct md_object *obj,
1001 const struct lu_buf *buf, const char *name,
1004 struct mdd_object *mdd_obj = md2mdd_obj(obj);
1005 struct mdd_device *mdd = mdo2mdd(obj);
1006 struct thandle *handle;
1010 rc = mdd_xattr_sanity_check(env, mdd_obj);
1014 mdd_txn_param_build(env, mdd, MDD_TXN_XATTR_SET_OP);
1015 handle = mdd_trans_start(env, mdd);
1017 RETURN(PTR_ERR(handle));
1019 rc = mdd_xattr_set_txn(env, mdd_obj, buf, name, fl, handle);
1020 mdd_trans_stop(env, mdd, rc, handle);
1026 * The caller should guarantee to update the object ctime
1027 * after xattr_set if needed.
1029 int mdd_xattr_del(const struct lu_env *env, struct md_object *obj,
1032 struct mdd_object *mdd_obj = md2mdd_obj(obj);
1033 struct mdd_device *mdd = mdo2mdd(obj);
1034 struct thandle *handle;
1038 rc = mdd_xattr_sanity_check(env, mdd_obj);
1042 mdd_txn_param_build(env, mdd, MDD_TXN_XATTR_SET_OP);
1043 handle = mdd_trans_start(env, mdd);
1045 RETURN(PTR_ERR(handle));
1047 mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
1048 rc = mdo_xattr_del(env, mdd_obj, name, handle,
1049 mdd_object_capa(env, mdd_obj));
1050 mdd_write_unlock(env, mdd_obj);
1051 mdd_trans_stop(env, mdd, rc, handle);
1056 /* partial unlink */
1057 static int mdd_ref_del(const struct lu_env *env, struct md_object *obj,
1060 struct lu_attr *la_copy = &mdd_env_info(env)->mti_la_for_fix;
1061 struct mdd_object *mdd_obj = md2mdd_obj(obj);
1062 struct mdd_device *mdd = mdo2mdd(obj);
1063 struct thandle *handle;
1064 #ifdef HAVE_QUOTA_SUPPORT
1065 struct obd_device *obd = mdd->mdd_obd_dev;
1066 struct mds_obd *mds = &obd->u.mds;
1067 unsigned int qids[MAXQUOTAS] = { 0, 0 };
1074 * Check -ENOENT early here because we need to get object type
1075 * to calculate credits before transaction start
1077 if (!mdd_object_exists(mdd_obj))
1080 LASSERT(mdd_object_exists(mdd_obj) > 0);
1082 rc = mdd_log_txn_param_build(env, obj, ma, MDD_TXN_UNLINK_OP);
1086 handle = mdd_trans_start(env, mdd);
1090 mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
1092 rc = mdd_unlink_sanity_check(env, NULL, mdd_obj, ma);
1096 __mdd_ref_del(env, mdd_obj, handle, 0);
1098 if (S_ISDIR(lu_object_attr(&obj->mo_lu))) {
1100 __mdd_ref_del(env, mdd_obj, handle, 1);
1103 LASSERT(ma->ma_attr.la_valid & LA_CTIME);
1104 la_copy->la_ctime = ma->ma_attr.la_ctime;
1106 la_copy->la_valid = LA_CTIME;
1107 rc = mdd_attr_check_set_internal(env, mdd_obj, la_copy, handle, 0);
1111 rc = mdd_finish_unlink(env, mdd_obj, ma, handle);
1112 #ifdef HAVE_QUOTA_SUPPORT
1113 if (mds->mds_quota && ma->ma_valid & MA_INODE &&
1114 ma->ma_attr.la_nlink == 0 && mdd_obj->mod_count == 0) {
1115 quota_opc = FSFILT_OP_UNLINK_PARTIAL_CHILD;
1116 mdd_quota_wrapper(&ma->ma_attr, qids);
1123 mdd_write_unlock(env, mdd_obj);
1124 mdd_trans_stop(env, mdd, rc, handle);
1125 #ifdef HAVE_QUOTA_SUPPORT
1127 /* Trigger dqrel on the owner of child. If failed,
1128 * the next call for lquota_chkquota will process it */
1129 lquota_adjust(mds_quota_interface_ref, obd, qids, 0, rc,
1135 /* partial operation */
1136 static int mdd_oc_sanity_check(const struct lu_env *env,
1137 struct mdd_object *obj,
1143 switch (ma->ma_attr.la_mode & S_IFMT) {
1160 static int mdd_object_create(const struct lu_env *env,
1161 struct md_object *obj,
1162 const struct md_op_spec *spec,
1166 struct mdd_device *mdd = mdo2mdd(obj);
1167 struct mdd_object *mdd_obj = md2mdd_obj(obj);
1168 const struct lu_fid *pfid = spec->u.sp_pfid;
1169 struct thandle *handle;
1170 #ifdef HAVE_QUOTA_SUPPORT
1171 struct obd_device *obd = mdd->mdd_obd_dev;
1172 struct mds_obd *mds = &obd->u.mds;
1173 unsigned int qids[MAXQUOTAS] = { 0, 0 };
1174 int quota_opc = 0, block_count = 0;
1175 int inode_pending = 0, block_pending = 0;
1180 #ifdef HAVE_QUOTA_SUPPORT
1181 if (mds->mds_quota) {
1182 quota_opc = FSFILT_OP_CREATE_PARTIAL_CHILD;
1183 mdd_quota_wrapper(&ma->ma_attr, qids);
1184 /* get file quota for child */
1185 lquota_chkquota(mds_quota_interface_ref, obd, qids[USRQUOTA],
1186 qids[GRPQUOTA], 1, &inode_pending, NULL, 0);
1187 switch (ma->ma_attr.la_mode & S_IFMT) {
1196 /* get block quota for child */
1198 lquota_chkquota(mds_quota_interface_ref, obd,
1199 qids[USRQUOTA], qids[GRPQUOTA],
1200 block_count, &block_pending, NULL,
1205 mdd_txn_param_build(env, mdd, MDD_TXN_OBJECT_CREATE_OP);
1206 handle = mdd_trans_start(env, mdd);
1208 GOTO(out_pending, rc = PTR_ERR(handle));
1210 mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
1211 rc = mdd_oc_sanity_check(env, mdd_obj, ma);
1215 rc = mdd_object_create_internal(env, NULL, mdd_obj, ma, handle, spec);
1219 if (spec->sp_cr_flags & MDS_CREATE_SLAVE_OBJ) {
1220 /* If creating the slave object, set slave EA here. */
1221 int lmv_size = spec->u.sp_ea.eadatalen;
1222 struct lmv_stripe_md *lmv;
1224 lmv = (struct lmv_stripe_md *)spec->u.sp_ea.eadata;
1225 LASSERT(lmv != NULL && lmv_size > 0);
1227 rc = __mdd_xattr_set(env, mdd_obj,
1228 mdd_buf_get_const(env, lmv, lmv_size),
1229 MDS_LMV_MD_NAME, 0, handle);
1233 rc = mdd_attr_set_internal(env, mdd_obj, &ma->ma_attr,
1236 #ifdef CONFIG_FS_POSIX_ACL
1237 if (spec->sp_cr_flags & MDS_CREATE_RMT_ACL) {
1238 struct lu_buf *buf = &mdd_env_info(env)->mti_buf;
1240 buf->lb_buf = (void *)spec->u.sp_ea.eadata;
1241 buf->lb_len = spec->u.sp_ea.eadatalen;
1242 if ((buf->lb_len > 0) && (buf->lb_buf != NULL)) {
1243 rc = __mdd_acl_init(env, mdd_obj, buf,
1244 &ma->ma_attr.la_mode,
1249 ma->ma_attr.la_valid |= LA_MODE;
1252 pfid = spec->u.sp_ea.fid;
1255 rc = mdd_object_initialize(env, pfid, mdd_obj, ma, handle, spec);
1260 rc = mdd_attr_get_internal(env, mdd_obj, ma);
1261 mdd_write_unlock(env, mdd_obj);
1263 mdd_trans_stop(env, mdd, rc, handle);
1265 #ifdef HAVE_QUOTA_SUPPORT
1268 lquota_pending_commit(mds_quota_interface_ref, obd,
1269 qids[USRQUOTA], qids[GRPQUOTA],
1272 lquota_pending_commit(mds_quota_interface_ref, obd,
1273 qids[USRQUOTA], qids[GRPQUOTA],
1275 /* Trigger dqacq on the owner of child. If failed,
1276 * the next call for lquota_chkquota will process it. */
1277 lquota_adjust(mds_quota_interface_ref, obd, qids, 0, rc,
1278 FSFILT_OP_CREATE_PARTIAL_CHILD);
1285 static int mdd_ref_add(const struct lu_env *env, struct md_object *obj,
1286 const struct md_attr *ma)
1288 struct lu_attr *la_copy = &mdd_env_info(env)->mti_la_for_fix;
1289 struct mdd_object *mdd_obj = md2mdd_obj(obj);
1290 struct mdd_device *mdd = mdo2mdd(obj);
1291 struct thandle *handle;
1295 mdd_txn_param_build(env, mdd, MDD_TXN_XATTR_SET_OP);
1296 handle = mdd_trans_start(env, mdd);
1300 mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
1301 rc = mdd_link_sanity_check(env, NULL, NULL, mdd_obj);
1303 __mdd_ref_add(env, mdd_obj, handle);
1304 mdd_write_unlock(env, mdd_obj);
1306 LASSERT(ma->ma_attr.la_valid & LA_CTIME);
1307 la_copy->la_ctime = ma->ma_attr.la_ctime;
1309 la_copy->la_valid = LA_CTIME;
1310 rc = mdd_attr_check_set_internal_locked(env, mdd_obj, la_copy,
1313 mdd_trans_stop(env, mdd, 0, handle);
1319 * do NOT or the MAY_*'s, you'll get the weakest
1321 int accmode(const struct lu_env *env, struct lu_attr *la, int flags)
1325 /* Sadly, NFSD reopens a file repeatedly during operation, so the
1326 * "acc_mode = 0" allowance for newly-created files isn't honoured.
1327 * NFSD uses the MDS_OPEN_OWNEROVERRIDE flag to say that a file
1328 * owner can write to a file even if it is marked readonly to hide
1329 * its brokenness. (bug 5781) */
1330 if (flags & MDS_OPEN_OWNEROVERRIDE) {
1331 struct md_ucred *uc = md_ucred(env);
1333 if ((uc == NULL) || (uc->mu_valid == UCRED_INIT) ||
1334 (la->la_uid == uc->mu_fsuid))
1338 if (flags & FMODE_READ)
1340 if (flags & (FMODE_WRITE | MDS_OPEN_TRUNC | MDS_OPEN_APPEND))
1342 if (flags & MDS_FMODE_EXEC)
1347 static int mdd_open_sanity_check(const struct lu_env *env,
1348 struct mdd_object *obj, int flag)
1350 struct lu_attr *tmp_la = &mdd_env_info(env)->mti_la;
1355 if (mdd_is_dead_obj(obj))
1358 rc = mdd_la_get(env, obj, tmp_la, BYPASS_CAPA);
1362 if (S_ISLNK(tmp_la->la_mode))
1365 mode = accmode(env, tmp_la, flag);
1367 if (S_ISDIR(tmp_la->la_mode) && (mode & MAY_WRITE))
1370 if (!(flag & MDS_OPEN_CREATED)) {
1371 rc = mdd_permission_internal(env, obj, tmp_la, mode);
1376 if (S_ISFIFO(tmp_la->la_mode) || S_ISSOCK(tmp_la->la_mode) ||
1377 S_ISBLK(tmp_la->la_mode) || S_ISCHR(tmp_la->la_mode))
1378 flag &= ~MDS_OPEN_TRUNC;
1380 /* For writing append-only file must open it with append mode. */
1381 if (mdd_is_append(obj)) {
1382 if ((flag & FMODE_WRITE) && !(flag & MDS_OPEN_APPEND))
1384 if (flag & MDS_OPEN_TRUNC)
1390 * Now, flag -- O_NOATIME does not be packed by client.
1392 if (flag & O_NOATIME) {
1393 struct md_ucred *uc = md_ucred(env);
1395 if (uc && ((uc->mu_valid == UCRED_OLD) ||
1396 (uc->mu_valid == UCRED_NEW)) &&
1397 (uc->mu_fsuid != tmp_la->la_uid) &&
1398 !mdd_capable(uc, CFS_CAP_FOWNER))
1406 static int mdd_open(const struct lu_env *env, struct md_object *obj,
1409 struct mdd_object *mdd_obj = md2mdd_obj(obj);
1412 mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
1414 rc = mdd_open_sanity_check(env, mdd_obj, flags);
1416 mdd_obj->mod_count++;
1418 mdd_write_unlock(env, mdd_obj);
1422 /* return md_attr back,
1423 * if it is last unlink then return lov ea + llog cookie*/
1424 int mdd_object_kill(const struct lu_env *env, struct mdd_object *obj,
1430 if (S_ISREG(mdd_object_type(obj))) {
1431 /* Return LOV & COOKIES unconditionally here. We clean evth up.
1432 * Caller must be ready for that. */
1434 rc = __mdd_lmm_get(env, obj, ma);
1435 if ((ma->ma_valid & MA_LOV))
1436 rc = mdd_unlink_log(env, mdo2mdd(&obj->mod_obj),
1443 * No permission check is needed.
1445 static int mdd_close(const struct lu_env *env, struct md_object *obj,
1448 struct mdd_object *mdd_obj = md2mdd_obj(obj);
1449 struct thandle *handle;
1453 #ifdef HAVE_QUOTA_SUPPORT
1454 struct obd_device *obd = mdo2mdd(obj)->mdd_obd_dev;
1455 struct mds_obd *mds = &obd->u.mds;
1456 unsigned int qids[MAXQUOTAS] = { 0, 0 };
1461 rc = mdd_log_txn_param_build(env, obj, ma, MDD_TXN_UNLINK_OP);
1464 handle = mdd_trans_start(env, mdo2mdd(obj));
1466 RETURN(PTR_ERR(handle));
1468 mdd_write_lock(env, mdd_obj, MOR_TGT_CHILD);
1469 /* release open count */
1470 mdd_obj->mod_count --;
1472 if (mdd_obj->mod_count == 0) {
1473 /* remove link to object from orphan index */
1474 if (mdd_obj->mod_flags & ORPHAN_OBJ)
1475 __mdd_orphan_del(env, mdd_obj, handle);
1478 rc = mdd_iattr_get(env, mdd_obj, ma);
1480 if (mdd_obj->mod_count == 0 && ma->ma_attr.la_nlink == 0) {
1481 rc = mdd_object_kill(env, mdd_obj, ma);
1482 #ifdef HAVE_QUOTA_SUPPORT
1483 if (mds->mds_quota) {
1484 quota_opc = FSFILT_OP_UNLINK_PARTIAL_CHILD;
1485 mdd_quota_wrapper(&ma->ma_attr, qids);
1494 ma->ma_valid &= ~(MA_LOV | MA_COOKIE);
1496 mdd_write_unlock(env, mdd_obj);
1497 mdd_trans_stop(env, mdo2mdd(obj), rc, handle);
1498 #ifdef HAVE_QUOTA_SUPPORT
1500 /* Trigger dqrel on the owner of child. If failed,
1501 * the next call for lquota_chkquota will process it */
1502 lquota_adjust(mds_quota_interface_ref, obd, qids, 0, rc,
1509 * Permission check is done when open,
1510 * no need check again.
1512 static int mdd_readpage_sanity_check(const struct lu_env *env,
1513 struct mdd_object *obj)
1515 struct dt_object *next = mdd_object_child(obj);
1519 if (S_ISDIR(mdd_object_type(obj)) && dt_try_as_dir(env, next))
1527 static int mdd_dir_page_build(const struct lu_env *env, int first,
1528 void *area, int nob, const struct dt_it_ops *iops,
1529 struct dt_it *it, __u64 *start, __u64 *end,
1530 struct lu_dirent **last)
1532 struct lu_fid *fid = &mdd_env_info(env)->mti_fid2;
1533 struct mdd_thread_info *info = mdd_env_info(env);
1534 struct lu_fid_pack *pack = &info->mti_pack;
1536 struct lu_dirent *ent;
1539 memset(area, 0, sizeof (struct lu_dirpage));
1540 area += sizeof (struct lu_dirpage);
1541 nob -= sizeof (struct lu_dirpage);
1544 LASSERT(nob > sizeof *ent);
1554 name = (char *)iops->key(env, it);
1555 len = iops->key_size(env, it);
1557 pack = (struct lu_fid_pack *)iops->rec(env, it);
1558 result = fid_unpack(pack, fid);
1562 recsize = (sizeof(*ent) + len + 7) & ~7;
1563 hash = iops->store(env, it);
1566 CDEBUG(D_INFO, "%p %p %d "DFID": "LPU64" (%d) \"%*.*s\"\n",
1567 name, ent, nob, PFID(fid), hash, len, len, len, name);
1569 if (nob >= recsize) {
1570 ent->lde_fid = *fid;
1571 fid_cpu_to_le(&ent->lde_fid, &ent->lde_fid);
1572 ent->lde_hash = hash;
1573 ent->lde_namelen = cpu_to_le16(len);
1574 ent->lde_reclen = cpu_to_le16(recsize);
1575 memcpy(ent->lde_name, name, len);
1576 if (first && ent == area)
1579 ent = (void *)ent + recsize;
1581 result = iops->next(env, it);
1584 * record doesn't fit into page, enlarge previous one.
1586 LASSERT(*last != NULL);
1587 (*last)->lde_reclen =
1588 cpu_to_le16(le16_to_cpu((*last)->lde_reclen) +
1592 } while (result == 0);
1597 static int __mdd_readpage(const struct lu_env *env, struct mdd_object *obj,
1598 const struct lu_rdpg *rdpg)
1601 struct dt_object *next = mdd_object_child(obj);
1602 const struct dt_it_ops *iops;
1604 struct lu_dirent *last = NULL;
1611 LASSERT(rdpg->rp_pages != NULL);
1612 LASSERT(next->do_index_ops != NULL);
1614 if (rdpg->rp_count <= 0)
1618 * iterate through directory and fill pages from @rdpg
1620 iops = &next->do_index_ops->dio_it;
1621 it = iops->init(env, next, mdd_object_capa(env, obj));
1625 rc = iops->load(env, it, rdpg->rp_hash);
1629 * Iterator didn't find record with exactly the key requested.
1631 * It is currently either
1633 * - positioned above record with key less than
1634 * requested---skip it.
1636 * - or not positioned at all (is in IAM_IT_SKEWED
1637 * state)---position it on the next item.
1639 rc = iops->next(env, it);
1644 * At this point and across for-loop:
1646 * rc == 0 -> ok, proceed.
1647 * rc > 0 -> end of directory.
1650 for (i = 0, nob = rdpg->rp_count; rc == 0 && nob > 0;
1651 i++, nob -= CFS_PAGE_SIZE) {
1652 LASSERT(i < rdpg->rp_npages);
1653 pg = rdpg->rp_pages[i];
1654 rc = mdd_dir_page_build(env, !i, cfs_kmap(pg),
1655 min_t(int, nob, CFS_PAGE_SIZE), iops,
1656 it, &hash_start, &hash_end, &last);
1657 if (rc != 0 || i == rdpg->rp_npages - 1)
1658 last->lde_reclen = 0;
1665 hash_end = DIR_END_OFF;
1669 struct lu_dirpage *dp;
1671 dp = cfs_kmap(rdpg->rp_pages[0]);
1672 dp->ldp_hash_start = rdpg->rp_hash;
1673 dp->ldp_hash_end = hash_end;
1676 * No pages were processed, mark this.
1678 dp->ldp_flags |= LDF_EMPTY;
1679 dp->ldp_flags = cpu_to_le32(dp->ldp_flags);
1680 cfs_kunmap(rdpg->rp_pages[0]);
1683 iops->fini(env, it);
1688 static int mdd_readpage(const struct lu_env *env, struct md_object *obj,
1689 const struct lu_rdpg *rdpg)
1691 struct mdd_object *mdd_obj = md2mdd_obj(obj);
1695 LASSERT(mdd_object_exists(mdd_obj));
1697 mdd_read_lock(env, mdd_obj, MOR_TGT_CHILD);
1698 rc = mdd_readpage_sanity_check(env, mdd_obj);
1700 GOTO(out_unlock, rc);
1702 if (mdd_is_dead_obj(mdd_obj)) {
1704 struct lu_dirpage *dp;
1707 * According to POSIX, please do not return any entry to client:
1708 * even dot and dotdot should not be returned.
1710 CWARN("readdir from dead object: "DFID"\n",
1711 PFID(mdd_object_fid(mdd_obj)));
1713 if (rdpg->rp_count <= 0)
1714 GOTO(out_unlock, rc = -EFAULT);
1715 LASSERT(rdpg->rp_pages != NULL);
1717 pg = rdpg->rp_pages[0];
1718 dp = (struct lu_dirpage*)cfs_kmap(pg);
1719 memset(dp, 0 , sizeof(struct lu_dirpage));
1720 dp->ldp_hash_start = rdpg->rp_hash;
1721 dp->ldp_hash_end = DIR_END_OFF;
1722 dp->ldp_flags |= LDF_EMPTY;
1723 dp->ldp_flags = cpu_to_le32(dp->ldp_flags);
1725 GOTO(out_unlock, rc = 0);
1728 rc = __mdd_readpage(env, mdd_obj, rdpg);
1732 mdd_read_unlock(env, mdd_obj);
1736 static int mdd_object_sync(const struct lu_env *env, struct md_object *obj)
1738 struct mdd_object *mdd_obj = md2mdd_obj(obj);
1739 struct dt_object *next;
1741 LASSERT(mdd_object_exists(mdd_obj));
1742 next = mdd_object_child(mdd_obj);
1743 return next->do_ops->do_object_sync(env, next);
1746 const struct md_object_operations mdd_obj_ops = {
1747 .moo_permission = mdd_permission,
1748 .moo_attr_get = mdd_attr_get,
1749 .moo_attr_set = mdd_attr_set,
1750 .moo_xattr_get = mdd_xattr_get,
1751 .moo_xattr_set = mdd_xattr_set,
1752 .moo_xattr_list = mdd_xattr_list,
1753 .moo_xattr_del = mdd_xattr_del,
1754 .moo_object_create = mdd_object_create,
1755 .moo_ref_add = mdd_ref_add,
1756 .moo_ref_del = mdd_ref_del,
1757 .moo_open = mdd_open,
1758 .moo_close = mdd_close,
1759 .moo_readpage = mdd_readpage,
1760 .moo_readlink = mdd_readlink,
1761 .moo_capa_get = mdd_capa_get,
1762 .moo_object_sync = mdd_object_sync,