1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/mdd/mdd_dir.c
38 * Lustre Metadata Server (mdd) routines
40 * Author: Wang Di <wangdi@clusterfs.com>
44 # define EXPORT_SYMTAB
46 #define DEBUG_SUBSYSTEM S_MDS
48 #include <linux/module.h>
49 #include <linux/jbd.h>
51 #include <obd_class.h>
52 #include <lustre_ver.h>
53 #include <obd_support.h>
54 #include <lprocfs_status.h>
56 #include <linux/ldiskfs_fs.h>
57 #include <lustre_mds.h>
58 #include <lustre/lustre_idl.h>
59 #include <lustre_fid.h>
61 #include "mdd_internal.h"
63 static const char dot[] = ".";
64 static const char dotdot[] = "..";
66 static struct lu_name lname_dotdot = {
71 static int __mdd_lookup(const struct lu_env *env, struct md_object *pobj,
72 const struct lu_name *lname, struct lu_fid* fid,
75 __mdd_lookup_locked(const struct lu_env *env, struct md_object *pobj,
76 const struct lu_name *lname, struct lu_fid* fid, int mask)
78 const char *name = lname->ln_name;
79 struct mdd_object *mdd_obj = md2mdd_obj(pobj);
80 struct dynlock_handle *dlh;
83 dlh = mdd_pdo_read_lock(env, mdd_obj, name, MOR_TGT_PARENT);
84 if (unlikely(dlh == NULL))
86 rc = __mdd_lookup(env, pobj, lname, fid, mask);
87 mdd_pdo_read_unlock(env, mdd_obj, dlh);
92 static int mdd_lookup(const struct lu_env *env,
93 struct md_object *pobj, const struct lu_name *lname,
94 struct lu_fid* fid, struct md_op_spec *spec)
98 rc = __mdd_lookup_locked(env, pobj, lname, fid, MAY_EXEC);
103 static int mdd_parent_fid(const struct lu_env *env, struct mdd_object *obj,
106 return __mdd_lookup_locked(env, &obj->mod_obj, &lname_dotdot, fid, 0);
110 * For root fid use special function, whcih does not compare version component
111 * of fid. Vresion component is different for root fids on all MDTs.
113 static int mdd_is_root(struct mdd_device *mdd, const struct lu_fid *fid)
115 return fid_seq(&mdd->mdd_root_fid) == fid_seq(fid) &&
116 fid_oid(&mdd->mdd_root_fid) == fid_oid(fid);
120 * return 1: if lf is the fid of the ancestor of p1;
123 * return -EREMOTE: if remote object is found, in this
124 * case fid of remote object is saved to @pf;
126 * otherwise: values < 0, errors.
128 static int mdd_is_parent(const struct lu_env *env,
129 struct mdd_device *mdd,
130 struct mdd_object *p1,
131 const struct lu_fid *lf,
134 struct mdd_object *parent = NULL;
139 LASSERT(!lu_fid_eq(mdo2fid(p1), lf));
140 pfid = &mdd_env_info(env)->mti_fid;
142 /* Check for root first. */
143 if (mdd_is_root(mdd, mdo2fid(p1)))
147 /* this is done recursively, bypass capa for each obj */
148 mdd_set_capainfo(env, 4, p1, BYPASS_CAPA);
149 rc = mdd_parent_fid(env, p1, pfid);
152 if (mdd_is_root(mdd, pfid))
154 if (lu_fid_eq(pfid, lf))
157 mdd_object_put(env, parent);
158 parent = mdd_object_find(env, mdd, pfid);
160 /* cross-ref parent */
161 if (parent == NULL) {
164 GOTO(out, rc = -EREMOTE);
165 } else if (IS_ERR(parent))
166 GOTO(out, rc = PTR_ERR(parent));
171 if (parent && !IS_ERR(parent))
172 mdd_object_put(env, parent);
177 * No permission check is needed.
179 * returns 1: if fid is ancestor of @mo;
180 * returns 0: if fid is not a ancestor of @mo;
182 * returns EREMOTE if remote object is found, fid of remote object is saved to
185 * returns < 0: if error
187 static int mdd_is_subdir(const struct lu_env *env,
188 struct md_object *mo, const struct lu_fid *fid,
191 struct mdd_device *mdd = mdo2mdd(mo);
195 if (!S_ISDIR(mdd_object_type(md2mdd_obj(mo))))
198 rc = mdd_is_parent(env, mdd, md2mdd_obj(mo), fid, sfid);
202 } else if (rc == 1) {
203 /* found @fid is parent */
211 * Check that @dir contains no entries except (possibly) dot and dotdot.
216 * -ENOTDIR not a directory object
217 * -ENOTEMPTY not empty
221 static int mdd_dir_is_empty(const struct lu_env *env,
222 struct mdd_object *dir)
225 struct dt_object *obj;
226 const struct dt_it_ops *iops;
230 obj = mdd_object_child(dir);
231 if (!dt_try_as_dir(env, obj))
234 iops = &obj->do_index_ops->dio_it;
235 it = iops->init(env, obj, BYPASS_CAPA);
237 result = iops->get(env, it, (const void *)"");
240 for (result = 0, i = 0; result == 0 && i < 3; ++i)
241 result = iops->next(env, it);
244 else if (result == +1)
246 } else if (result == 0)
248 * Huh? Index contains no zero key?
259 static int __mdd_may_link(const struct lu_env *env, struct mdd_object *obj)
261 struct mdd_device *m = mdd_obj2mdd_dev(obj);
262 struct lu_attr *la = &mdd_env_info(env)->mti_la;
266 rc = mdd_la_get(env, obj, la, BYPASS_CAPA);
271 * Subdir count limitation can be broken through.
273 if (la->la_nlink >= m->mdd_dt_conf.ddp_max_nlink &&
274 !S_ISDIR(la->la_mode))
281 * Check whether it may create the cobj under the pobj.
284 int mdd_may_create(const struct lu_env *env, struct mdd_object *pobj,
285 struct mdd_object *cobj, int check_perm, int check_nlink)
290 if (cobj && mdd_object_exists(cobj))
293 if (mdd_is_dead_obj(pobj))
297 rc = mdd_permission_internal_locked(env, pobj, NULL,
298 MAY_WRITE | MAY_EXEC,
301 if (!rc && check_nlink)
302 rc = __mdd_may_link(env, pobj);
308 * Check whether can unlink from the pobj in the case of "cobj == NULL".
310 int mdd_may_unlink(const struct lu_env *env, struct mdd_object *pobj,
311 const struct md_attr *ma)
316 if (mdd_is_dead_obj(pobj))
319 if ((ma->ma_attr.la_valid & LA_FLAGS) &&
320 (ma->ma_attr.la_flags & (LUSTRE_APPEND_FL | LUSTRE_IMMUTABLE_FL)))
323 rc = mdd_permission_internal_locked(env, pobj, NULL,
324 MAY_WRITE | MAY_EXEC,
329 if (mdd_is_append(pobj))
336 * pobj == NULL is remote ops case, under such case, pobj's
337 * VTX feature has been checked already, no need check again.
339 static inline int mdd_is_sticky(const struct lu_env *env,
340 struct mdd_object *pobj,
341 struct mdd_object *cobj)
343 struct lu_attr *tmp_la = &mdd_env_info(env)->mti_la;
344 struct md_ucred *uc = md_ucred(env);
348 rc = mdd_la_get(env, pobj, tmp_la, BYPASS_CAPA);
352 if (!(tmp_la->la_mode & S_ISVTX) ||
353 (tmp_la->la_uid == uc->mu_fsuid))
357 rc = mdd_la_get(env, cobj, tmp_la, BYPASS_CAPA);
361 if (tmp_la->la_uid == uc->mu_fsuid)
364 return !mdd_capable(uc, CFS_CAP_FOWNER);
368 * Check whether it may delete the cobj from the pobj.
371 int mdd_may_delete(const struct lu_env *env, struct mdd_object *pobj,
372 struct mdd_object *cobj, struct md_attr *ma,
373 int check_perm, int check_empty)
379 if (!mdd_object_exists(cobj))
383 if (mdd_is_dead_obj(pobj))
387 rc = mdd_permission_internal_locked(env, pobj, NULL,
388 MAY_WRITE | MAY_EXEC,
394 if (mdd_is_append(pobj))
398 if (!(ma->ma_attr_flags & MDS_VTX_BYPASS) &&
399 mdd_is_sticky(env, pobj, cobj))
402 if (mdd_is_immutable(cobj) || mdd_is_append(cobj))
405 if ((ma->ma_attr.la_valid & LA_FLAGS) &&
406 (ma->ma_attr.la_flags & (LUSTRE_APPEND_FL | LUSTRE_IMMUTABLE_FL)))
409 if (S_ISDIR(ma->ma_attr.la_mode)) {
410 struct mdd_device *mdd = mdo2mdd(&cobj->mod_obj);
412 if (!S_ISDIR(mdd_object_type(cobj)))
415 if (lu_fid_eq(mdo2fid(cobj), &mdd->mdd_root_fid))
417 } else if (S_ISDIR(mdd_object_type(cobj)))
420 if (S_ISDIR(ma->ma_attr.la_mode) && check_empty)
421 rc = mdd_dir_is_empty(env, cobj);
428 * has mdd_write_lock on src already, but not on tgt yet
430 int mdd_link_sanity_check(const struct lu_env *env,
431 struct mdd_object *tgt_obj,
432 const struct lu_name *lname,
433 struct mdd_object *src_obj)
435 struct mdd_device *m = mdd_obj2mdd_dev(src_obj);
439 /* Local ops, no lookup before link, check filename length here. */
440 if (lname && (lname->ln_namelen > m->mdd_dt_conf.ddp_max_name_len))
441 RETURN(-ENAMETOOLONG);
443 if (mdd_is_immutable(src_obj) || mdd_is_append(src_obj))
446 if (S_ISDIR(mdd_object_type(src_obj)))
449 LASSERT(src_obj != tgt_obj);
451 rc = mdd_may_create(env, tgt_obj, NULL, 1, 0);
456 rc = __mdd_may_link(env, src_obj);
462 * If subdir count is up to ddp_max_nlink, then enable MNLINK_OBJ flag and
463 * assign i_nlink to 1 which means the i_nlink for subdir count is incredible
464 * (maybe too large to be represented). It is a trick to break through the
465 * "i_nlink" limitation for subdir count.
467 void __mdd_ref_add(const struct lu_env *env, struct mdd_object *obj,
468 struct thandle *handle)
470 struct lu_attr *tmp_la = &mdd_env_info(env)->mti_la;
471 struct mdd_device *m = mdd_obj2mdd_dev(obj);
473 if (!mdd_is_mnlink(obj)) {
474 if (S_ISDIR(mdd_object_type(obj))) {
475 if (mdd_la_get(env, obj, tmp_la, BYPASS_CAPA))
478 if (tmp_la->la_nlink >= m->mdd_dt_conf.ddp_max_nlink) {
479 obj->mod_flags |= MNLINK_OBJ;
480 tmp_la->la_nlink = 1;
481 tmp_la->la_valid = LA_NLINK;
482 mdd_attr_set_internal(env, obj, tmp_la, handle,
487 mdo_ref_add(env, obj, handle);
491 void __mdd_ref_del(const struct lu_env *env, struct mdd_object *obj,
492 struct thandle *handle, int is_dot)
494 if (!mdd_is_mnlink(obj) || is_dot)
495 mdo_ref_del(env, obj, handle);
498 /* insert named index, add reference if isdir */
499 static int __mdd_index_insert(const struct lu_env *env, struct mdd_object *pobj,
500 const struct lu_fid *lf, const char *name, int is_dir,
501 struct thandle *handle, struct lustre_capa *capa)
503 struct dt_object *next = mdd_object_child(pobj);
507 if (dt_try_as_dir(env, next)) {
508 struct md_ucred *uc = md_ucred(env);
510 rc = next->do_index_ops->dio_insert(env, next,
511 __mdd_fid_rec(env, lf),
512 (const struct dt_key *)name,
513 handle, capa, uc->mu_cap &
514 CFS_CAP_SYS_RESOURCE_MASK);
521 mdd_write_lock(env, pobj, MOR_TGT_PARENT);
522 __mdd_ref_add(env, pobj, handle);
523 mdd_write_unlock(env, pobj);
529 /* delete named index, drop reference if isdir */
530 static int __mdd_index_delete(const struct lu_env *env, struct mdd_object *pobj,
531 const char *name, int is_dir, struct thandle *handle,
532 struct lustre_capa *capa)
534 struct dt_object *next = mdd_object_child(pobj);
538 if (dt_try_as_dir(env, next)) {
539 rc = next->do_index_ops->dio_delete(env, next,
540 (struct dt_key *)name,
542 if (rc == 0 && is_dir) {
545 if (name != NULL && name[0] == '.' && name[1] == 0)
547 mdd_write_lock(env, pobj, MOR_TGT_PARENT);
548 __mdd_ref_del(env, pobj, handle, is_dot);
549 mdd_write_unlock(env, pobj);
558 __mdd_index_insert_only(const struct lu_env *env, struct mdd_object *pobj,
559 const struct lu_fid *lf, const char *name,
560 struct thandle *handle, struct lustre_capa *capa)
562 struct dt_object *next = mdd_object_child(pobj);
566 if (dt_try_as_dir(env, next)) {
567 struct md_ucred *uc = md_ucred(env);
569 rc = next->do_index_ops->dio_insert(env, next,
570 __mdd_fid_rec(env, lf),
571 (const struct dt_key *)name,
572 handle, capa, uc->mu_cap &
573 CFS_CAP_SYS_RESOURCE_MASK);
580 static int mdd_link(const struct lu_env *env, struct md_object *tgt_obj,
581 struct md_object *src_obj, const struct lu_name *lname,
584 const char *name = lname->ln_name;
585 struct lu_attr *la = &mdd_env_info(env)->mti_la_for_fix;
586 struct mdd_object *mdd_tobj = md2mdd_obj(tgt_obj);
587 struct mdd_object *mdd_sobj = md2mdd_obj(src_obj);
588 struct mdd_device *mdd = mdo2mdd(src_obj);
589 struct dynlock_handle *dlh;
590 struct thandle *handle;
591 #ifdef HAVE_QUOTA_SUPPORT
592 struct obd_device *obd = mdd->mdd_obd_dev;
593 struct mds_obd *mds = &obd->u.mds;
594 unsigned int qids[MAXQUOTAS] = { 0, 0 };
595 int quota_opc = 0, rec_pending = 0;
600 #ifdef HAVE_QUOTA_SUPPORT
601 if (mds->mds_quota) {
602 struct lu_attr *la_tmp = &mdd_env_info(env)->mti_la;
604 rc = mdd_la_get(env, mdd_tobj, la_tmp, BYPASS_CAPA);
606 quota_opc = FSFILT_OP_LINK;
607 mdd_quota_wrapper(la_tmp, qids);
608 /* get block quota for parent */
609 lquota_chkquota(mds_quota_interface_ref, obd,
610 qids[USRQUOTA], qids[GRPQUOTA], 1,
611 &rec_pending, NULL, LQUOTA_FLAGS_BLK);
616 mdd_txn_param_build(env, mdd, MDD_TXN_LINK_OP);
617 handle = mdd_trans_start(env, mdd);
619 GOTO(out_pending, rc = PTR_ERR(handle));
621 dlh = mdd_pdo_write_lock(env, mdd_tobj, name, MOR_TGT_CHILD);
623 GOTO(out_trans, rc = -ENOMEM);
624 mdd_write_lock(env, mdd_sobj, MOR_TGT_CHILD);
626 rc = mdd_link_sanity_check(env, mdd_tobj, lname, mdd_sobj);
628 GOTO(out_unlock, rc);
630 rc = __mdd_index_insert_only(env, mdd_tobj, mdo2fid(mdd_sobj),
632 mdd_object_capa(env, mdd_tobj));
634 GOTO(out_unlock, rc);
636 __mdd_ref_add(env, mdd_sobj, handle);
638 LASSERT(ma->ma_attr.la_valid & LA_CTIME);
639 la->la_ctime = la->la_mtime = ma->ma_attr.la_ctime;
641 la->la_valid = LA_CTIME | LA_MTIME;
642 rc = mdd_attr_check_set_internal_locked(env, mdd_tobj, la, handle, 0);
644 GOTO(out_unlock, rc);
646 la->la_valid = LA_CTIME;
647 rc = mdd_attr_check_set_internal(env, mdd_sobj, la, handle, 0);
650 mdd_write_unlock(env, mdd_sobj);
651 mdd_pdo_write_unlock(env, mdd_tobj, dlh);
653 mdd_trans_stop(env, mdd, rc, handle);
655 #ifdef HAVE_QUOTA_SUPPORT
658 lquota_pending_commit(mds_quota_interface_ref, obd,
659 qids[USRQUOTA], qids[GRPQUOTA],
661 /* Trigger dqacq for the parent owner. If failed,
662 * the next call for lquota_chkquota will process it. */
663 lquota_adjust(mds_quota_interface_ref, obd, 0, qids, rc,
670 /* caller should take a lock before calling */
671 int mdd_finish_unlink(const struct lu_env *env,
672 struct mdd_object *obj, struct md_attr *ma,
679 rc = mdd_iattr_get(env, obj, ma);
680 if (rc == 0 && ma->ma_attr.la_nlink == 0) {
681 /* add new orphan and the object
682 * will be deleted during mdd_close() */
683 if (obj->mod_count) {
684 rc = __mdd_orphan_add(env, obj, th);
686 obj->mod_flags |= ORPHAN_OBJ;
689 obj->mod_flags |= DEAD_OBJ;
690 if (!(obj->mod_flags & ORPHAN_OBJ)) {
691 rc = mdd_object_kill(env, obj, ma);
698 ma->ma_valid &= ~(MA_LOV | MA_COOKIE);
705 * has mdd_write_lock on cobj already, but not on pobj yet
707 int mdd_unlink_sanity_check(const struct lu_env *env, struct mdd_object *pobj,
708 struct mdd_object *cobj, struct md_attr *ma)
713 rc = mdd_may_delete(env, pobj, cobj, ma, 1, 1);
718 static int mdd_unlink(const struct lu_env *env, struct md_object *pobj,
719 struct md_object *cobj, const struct lu_name *lname,
722 const char *name = lname->ln_name;
723 struct lu_attr *la = &mdd_env_info(env)->mti_la_for_fix;
724 struct mdd_object *mdd_pobj = md2mdd_obj(pobj);
725 struct mdd_object *mdd_cobj = md2mdd_obj(cobj);
726 struct mdd_device *mdd = mdo2mdd(pobj);
727 struct dynlock_handle *dlh;
728 struct thandle *handle;
729 #ifdef HAVE_QUOTA_SUPPORT
730 struct obd_device *obd = mdd->mdd_obd_dev;
731 struct mds_obd *mds = &obd->u.mds;
732 unsigned int qcids[MAXQUOTAS] = { 0, 0 };
733 unsigned int qpids[MAXQUOTAS] = { 0, 0 };
739 LASSERTF(mdd_object_exists(mdd_cobj) > 0, "FID is "DFID"\n",
740 PFID(mdd_object_fid(mdd_cobj)));
742 rc = mdd_log_txn_param_build(env, cobj, ma, MDD_TXN_UNLINK_OP);
746 handle = mdd_trans_start(env, mdd);
748 RETURN(PTR_ERR(handle));
751 dlh = mdd_pdo_write_lock(env, mdd_pobj, name, MOR_TGT_PARENT);
753 GOTO(out_trans, rc = -ENOMEM);
754 mdd_write_lock(env, mdd_cobj, MOR_TGT_CHILD);
756 is_dir = S_ISDIR(ma->ma_attr.la_mode);
757 rc = mdd_unlink_sanity_check(env, mdd_pobj, mdd_cobj, ma);
761 rc = __mdd_index_delete(env, mdd_pobj, name, is_dir, handle,
762 mdd_object_capa(env, mdd_pobj));
766 __mdd_ref_del(env, mdd_cobj, handle, 0);
769 __mdd_ref_del(env, mdd_cobj, handle, 1);
771 LASSERT(ma->ma_attr.la_valid & LA_CTIME);
772 la->la_ctime = la->la_mtime = ma->ma_attr.la_ctime;
774 la->la_valid = LA_CTIME | LA_MTIME;
775 rc = mdd_attr_check_set_internal_locked(env, mdd_pobj, la, handle, 0);
779 la->la_valid = LA_CTIME;
780 rc = mdd_attr_check_set_internal(env, mdd_cobj, la, handle, 0);
784 rc = mdd_finish_unlink(env, mdd_cobj, ma, handle);
785 #ifdef HAVE_QUOTA_SUPPORT
786 if (mds->mds_quota && ma->ma_valid & MA_INODE &&
787 ma->ma_attr.la_nlink == 0) {
788 struct lu_attr *la_tmp = &mdd_env_info(env)->mti_la;
790 rc = mdd_la_get(env, mdd_pobj, la_tmp, BYPASS_CAPA);
792 mdd_quota_wrapper(la_tmp, qpids);
793 if (mdd_cobj->mod_count == 0) {
794 quota_opc = FSFILT_OP_UNLINK;
795 mdd_quota_wrapper(&ma->ma_attr, qcids);
797 quota_opc = FSFILT_OP_UNLINK_PARTIAL_PARENT;
804 obd_set_info_async(mdd2obd_dev(mdd)->u.mds.mds_osc_exp,
805 sizeof(KEY_UNLINKED), KEY_UNLINKED, 0,
809 mdd_write_unlock(env, mdd_cobj);
810 mdd_pdo_write_unlock(env, mdd_pobj, dlh);
812 mdd_trans_stop(env, mdd, rc, handle);
813 #ifdef HAVE_QUOTA_SUPPORT
815 /* Trigger dqrel on the owner of child and parent. If failed,
816 * the next call for lquota_chkquota will process it. */
817 lquota_adjust(mds_quota_interface_ref, obd, qcids, qpids, rc,
823 /* has not lock on pobj yet */
824 static int mdd_ni_sanity_check(const struct lu_env *env,
825 struct md_object *pobj,
826 const struct md_attr *ma)
828 struct mdd_object *obj = md2mdd_obj(pobj);
832 if (ma->ma_attr_flags & MDS_PERM_BYPASS)
835 rc = mdd_may_create(env, obj, NULL, 1, S_ISDIR(ma->ma_attr.la_mode));
843 static int mdd_name_insert(const struct lu_env *env,
844 struct md_object *pobj,
845 const struct lu_name *lname,
846 const struct lu_fid *fid,
847 const struct md_attr *ma)
849 const char *name = lname->ln_name;
850 struct lu_attr *la = &mdd_env_info(env)->mti_la_for_fix;
851 struct mdd_object *mdd_obj = md2mdd_obj(pobj);
852 struct mdd_device *mdd = mdo2mdd(pobj);
853 struct dynlock_handle *dlh;
854 struct thandle *handle;
855 int is_dir = S_ISDIR(ma->ma_attr.la_mode);
856 #ifdef HAVE_QUOTA_SUPPORT
857 struct md_ucred *uc = md_ucred(env);
858 struct obd_device *obd = mdd->mdd_obd_dev;
859 struct mds_obd *mds = &obd->u.mds;
860 unsigned int qids[MAXQUOTAS] = { 0, 0 };
861 int quota_opc = 0, rec_pending = 0;
862 cfs_cap_t save = uc->mu_cap;
867 #ifdef HAVE_QUOTA_SUPPORT
868 if (mds->mds_quota) {
869 if (!(ma->ma_attr_flags & MDS_QUOTA_IGNORE)) {
870 struct lu_attr *la_tmp = &mdd_env_info(env)->mti_la;
872 rc = mdd_la_get(env, mdd_obj, la_tmp, BYPASS_CAPA);
874 quota_opc = FSFILT_OP_LINK;
875 mdd_quota_wrapper(la_tmp, qids);
876 /* get block quota for parent */
877 lquota_chkquota(mds_quota_interface_ref, obd,
878 qids[USRQUOTA], qids[GRPQUOTA],
879 1, &rec_pending, NULL,
883 uc->mu_cap |= CFS_CAP_SYS_RESOURCE_MASK;
887 mdd_txn_param_build(env, mdd, MDD_TXN_INDEX_INSERT_OP);
888 handle = mdd_trans_start(env, mdo2mdd(pobj));
890 GOTO(out_pending, rc = PTR_ERR(handle));
892 dlh = mdd_pdo_write_lock(env, mdd_obj, name, MOR_TGT_PARENT);
894 GOTO(out_trans, rc = -ENOMEM);
896 rc = mdd_ni_sanity_check(env, pobj, ma);
898 GOTO(out_unlock, rc);
900 rc = __mdd_index_insert(env, mdd_obj, fid, name, is_dir,
901 handle, BYPASS_CAPA);
903 GOTO(out_unlock, rc);
906 * For some case, no need update obj's ctime (LA_CTIME is not set),
908 * For other cases, update obj's ctime (LA_CTIME is set),
911 if (ma->ma_attr.la_valid & LA_CTIME) {
912 la->la_ctime = la->la_mtime = ma->ma_attr.la_ctime;
913 la->la_valid = LA_CTIME | LA_MTIME;
914 rc = mdd_attr_check_set_internal_locked(env, mdd_obj, la,
919 mdd_pdo_write_unlock(env, mdd_obj, dlh);
921 mdd_trans_stop(env, mdo2mdd(pobj), rc, handle);
923 #ifdef HAVE_QUOTA_SUPPORT
924 if (mds->mds_quota) {
927 lquota_pending_commit(mds_quota_interface_ref,
929 qids[GRPQUOTA], 1, 1);
930 /* Trigger dqacq for the parent owner. If failed,
931 * the next call for lquota_chkquota will process it*/
932 lquota_adjust(mds_quota_interface_ref, obd, 0, qids,
942 /* has not lock on pobj yet */
943 static int mdd_nr_sanity_check(const struct lu_env *env,
944 struct md_object *pobj,
945 const struct md_attr *ma)
947 struct mdd_object *obj = md2mdd_obj(pobj);
951 if (ma->ma_attr_flags & MDS_PERM_BYPASS)
954 rc = mdd_may_unlink(env, obj, ma);
962 static int mdd_name_remove(const struct lu_env *env,
963 struct md_object *pobj,
964 const struct lu_name *lname,
965 const struct md_attr *ma)
967 const char *name = lname->ln_name;
968 struct lu_attr *la = &mdd_env_info(env)->mti_la_for_fix;
969 struct mdd_object *mdd_obj = md2mdd_obj(pobj);
970 struct mdd_device *mdd = mdo2mdd(pobj);
971 struct dynlock_handle *dlh;
972 struct thandle *handle;
973 int is_dir = S_ISDIR(ma->ma_attr.la_mode);
974 #ifdef HAVE_QUOTA_SUPPORT
975 struct obd_device *obd = mdd->mdd_obd_dev;
976 struct mds_obd *mds = &obd->u.mds;
977 unsigned int qids[MAXQUOTAS] = { 0, 0 };
983 #ifdef HAVE_QUOTA_SUPPORT
984 if (mds->mds_quota) {
985 struct lu_attr *la_tmp = &mdd_env_info(env)->mti_la;
987 rc = mdd_la_get(env, mdd_obj, la_tmp, BYPASS_CAPA);
989 quota_opc = FSFILT_OP_UNLINK_PARTIAL_PARENT;
990 mdd_quota_wrapper(la_tmp, qids);
994 mdd_txn_param_build(env, mdd, MDD_TXN_INDEX_DELETE_OP);
995 handle = mdd_trans_start(env, mdd);
997 GOTO(out_pending, rc = PTR_ERR(handle));
999 dlh = mdd_pdo_write_lock(env, mdd_obj, name, MOR_TGT_PARENT);
1001 GOTO(out_trans, rc = -ENOMEM);
1003 rc = mdd_nr_sanity_check(env, pobj, ma);
1005 GOTO(out_unlock, rc);
1007 rc = __mdd_index_delete(env, mdd_obj, name, is_dir,
1008 handle, BYPASS_CAPA);
1010 GOTO(out_unlock, rc);
1013 * For some case, no need update obj's ctime (LA_CTIME is not set),
1015 * For other cases, update obj's ctime (LA_CTIME is set),
1018 if (ma->ma_attr.la_valid & LA_CTIME) {
1019 la->la_ctime = la->la_mtime = ma->ma_attr.la_ctime;
1020 la->la_valid = LA_CTIME | LA_MTIME;
1021 rc = mdd_attr_check_set_internal_locked(env, mdd_obj, la,
1026 mdd_pdo_write_unlock(env, mdd_obj, dlh);
1028 mdd_trans_stop(env, mdd, rc, handle);
1030 #ifdef HAVE_QUOTA_SUPPORT
1031 /* Trigger dqrel for the parent owner.
1032 * If failed, the next call for lquota_chkquota will process it. */
1034 lquota_adjust(mds_quota_interface_ref, obd, 0, qids, rc,
1042 * has mdd_write_lock on tobj alreay, but not on tgt_pobj yet
1044 static int mdd_rt_sanity_check(const struct lu_env *env,
1045 struct mdd_object *tgt_pobj,
1046 struct mdd_object *tobj,
1052 if (unlikely(ma->ma_attr_flags & MDS_PERM_BYPASS))
1055 /* XXX: for mdd_rename_tgt, "tobj == NULL" does not mean tobj not
1056 * exist. In fact, tobj must exist, otherwise the call trace will be:
1057 * mdt_reint_rename_tgt -> mdo_name_insert -> ... -> mdd_name_insert.
1058 * When get here, tobj must be NOT NULL, the other case has been
1059 * processed in cmr_rename_tgt before mdd_rename_tgt and enable
1061 * So check may_delete, but not check nlink of tgt_pobj. */
1063 rc = mdd_may_delete(env, tgt_pobj, tobj, ma, 1, 1);
1068 static int mdd_rename_tgt(const struct lu_env *env,
1069 struct md_object *pobj, struct md_object *tobj,
1070 const struct lu_fid *lf, const struct lu_name *lname,
1073 const char *name = lname->ln_name;
1074 struct lu_attr *la = &mdd_env_info(env)->mti_la_for_fix;
1075 struct mdd_object *mdd_tpobj = md2mdd_obj(pobj);
1076 struct mdd_object *mdd_tobj = md2mdd_obj(tobj);
1077 struct mdd_device *mdd = mdo2mdd(pobj);
1078 struct dynlock_handle *dlh;
1079 struct thandle *handle;
1080 #ifdef HAVE_QUOTA_SUPPORT
1081 struct obd_device *obd = mdd->mdd_obd_dev;
1082 struct mds_obd *mds = &obd->u.mds;
1083 unsigned int qcids[MAXQUOTAS] = { 0, 0 };
1084 unsigned int qpids[MAXQUOTAS] = { 0, 0 };
1085 int quota_opc = 0, rec_pending = 0;
1090 #ifdef HAVE_QUOTA_SUPPORT
1091 if (mds->mds_quota && !tobj) {
1092 struct lu_attr *la_tmp = &mdd_env_info(env)->mti_la;
1094 rc = mdd_la_get(env, mdd_tpobj, la_tmp, BYPASS_CAPA);
1096 quota_opc = FSFILT_OP_LINK;
1097 mdd_quota_wrapper(la_tmp, qpids);
1098 /* get block quota for target parent */
1099 lquota_chkquota(mds_quota_interface_ref, obd,
1100 qpids[USRQUOTA], qpids[GRPQUOTA], 1,
1101 &rec_pending, NULL, LQUOTA_FLAGS_BLK);
1105 mdd_txn_param_build(env, mdd, MDD_TXN_RENAME_TGT_OP);
1106 handle = mdd_trans_start(env, mdd);
1108 GOTO(out_pending, rc = PTR_ERR(handle));
1110 dlh = mdd_pdo_write_lock(env, mdd_tpobj, name, MOR_TGT_PARENT);
1112 GOTO(out_trans, rc = -ENOMEM);
1114 mdd_write_lock(env, mdd_tobj, MOR_TGT_CHILD);
1116 rc = mdd_rt_sanity_check(env, mdd_tpobj, mdd_tobj, ma);
1121 * If rename_tgt is called then we should just re-insert name with
1122 * correct fid, no need to dec/inc parent nlink if obj is dir.
1124 rc = __mdd_index_delete(env, mdd_tpobj, name, 0, handle, BYPASS_CAPA);
1128 rc = __mdd_index_insert_only(env, mdd_tpobj, lf, name, handle,
1133 LASSERT(ma->ma_attr.la_valid & LA_CTIME);
1134 la->la_ctime = la->la_mtime = ma->ma_attr.la_ctime;
1136 la->la_valid = LA_CTIME | LA_MTIME;
1137 rc = mdd_attr_check_set_internal_locked(env, mdd_tpobj, la, handle, 0);
1142 * For tobj is remote case cmm layer has processed
1143 * and pass NULL tobj to here. So when tobj is NOT NULL,
1144 * it must be local one.
1146 if (tobj && mdd_object_exists(mdd_tobj)) {
1147 __mdd_ref_del(env, mdd_tobj, handle, 0);
1149 /* Remove dot reference. */
1150 if (S_ISDIR(ma->ma_attr.la_mode))
1151 __mdd_ref_del(env, mdd_tobj, handle, 1);
1153 la->la_valid = LA_CTIME;
1154 rc = mdd_attr_check_set_internal(env, mdd_tobj, la, handle, 0);
1158 rc = mdd_finish_unlink(env, mdd_tobj, ma, handle);
1162 #ifdef HAVE_QUOTA_SUPPORT
1163 if (mds->mds_quota && ma->ma_valid & MA_INODE &&
1164 ma->ma_attr.la_nlink == 0 && mdd_tobj->mod_count == 0) {
1165 quota_opc = FSFILT_OP_UNLINK_PARTIAL_CHILD;
1166 mdd_quota_wrapper(&ma->ma_attr, qcids);
1173 mdd_write_unlock(env, mdd_tobj);
1174 mdd_pdo_write_unlock(env, mdd_tpobj, dlh);
1176 mdd_trans_stop(env, mdd, rc, handle);
1178 #ifdef HAVE_QUOTA_SUPPORT
1179 if (mds->mds_quota) {
1181 lquota_pending_commit(mds_quota_interface_ref, obd,
1186 /* Trigger dqrel/dqacq on the target owner of child and
1187 * parent. If failed, the next call for lquota_chkquota
1188 * will process it. */
1189 lquota_adjust(mds_quota_interface_ref, obd, qcids,
1190 qpids, rc, quota_opc);
1197 * The permission has been checked when obj created, no need check again.
1199 static int mdd_cd_sanity_check(const struct lu_env *env,
1200 struct mdd_object *obj)
1205 if (!obj || mdd_is_dead_obj(obj))
1212 static int mdd_create_data(const struct lu_env *env, struct md_object *pobj,
1213 struct md_object *cobj, const struct md_op_spec *spec,
1216 struct mdd_device *mdd = mdo2mdd(cobj);
1217 struct mdd_object *mdd_pobj = md2mdd_obj(pobj);
1218 struct mdd_object *son = md2mdd_obj(cobj);
1219 struct lu_attr *attr = &ma->ma_attr;
1220 struct lov_mds_md *lmm = NULL;
1222 struct thandle *handle;
1226 rc = mdd_cd_sanity_check(env, son);
1230 if (!md_should_create(spec->sp_cr_flags))
1233 rc = mdd_lov_create(env, mdd, mdd_pobj, son, &lmm, &lmm_size,
1238 mdd_txn_param_build(env, mdd, MDD_TXN_CREATE_DATA_OP);
1239 handle = mdd_trans_start(env, mdd);
1241 GOTO(out_free, rc = PTR_ERR(handle));
1244 * XXX: Setting the lov ea is not locked but setting the attr is locked?
1245 * Should this be fixed?
1248 /* Replay creates has objects already */
1250 if (spec->u.sp_ea.no_lov_create) {
1251 CDEBUG(D_INFO, "we already have lov ea\n");
1252 rc = mdd_lov_set_md(env, mdd_pobj, son,
1253 (struct lov_mds_md *)spec->u.sp_ea.eadata,
1254 spec->u.sp_ea.eadatalen, handle, 0);
1257 /* No need mdd_lsm_sanity_check here */
1258 rc = mdd_lov_set_md(env, mdd_pobj, son, lmm,
1259 lmm_size, handle, 0);
1262 rc = mdd_attr_get_internal_locked(env, son, ma);
1264 /* update lov_objid data, must be before transaction stop! */
1266 mdd_lov_objid_update(mdd, lmm);
1268 mdd_trans_stop(env, mdd, rc, handle);
1270 /* Finish mdd_lov_create() stuff. */
1271 mdd_lov_create_finish(env, mdd, lmm, lmm_size, spec);
1276 __mdd_lookup(const struct lu_env *env, struct md_object *pobj,
1277 const struct lu_name *lname, struct lu_fid* fid, int mask)
1279 const char *name = lname->ln_name;
1280 const struct dt_key *key = (const struct dt_key *)name;
1281 struct mdd_object *mdd_obj = md2mdd_obj(pobj);
1282 struct mdd_device *m = mdo2mdd(pobj);
1283 struct dt_object *dir = mdd_object_child(mdd_obj);
1284 struct lu_fid_pack *pack = &mdd_env_info(env)->mti_pack;
1288 if (unlikely(mdd_is_dead_obj(mdd_obj)))
1291 rc = mdd_object_exists(mdd_obj);
1292 if (unlikely(rc == 0))
1294 else if (unlikely(rc < 0)) {
1295 CERROR("Object "DFID" locates on remote server\n",
1296 PFID(mdo2fid(mdd_obj)));
1300 /* The common filename length check. */
1301 if (unlikely(lname->ln_namelen > m->mdd_dt_conf.ddp_max_name_len))
1302 RETURN(-ENAMETOOLONG);
1304 rc = mdd_permission_internal_locked(env, mdd_obj, NULL, mask,
1309 if (likely(S_ISDIR(mdd_object_type(mdd_obj)) &&
1310 dt_try_as_dir(env, dir))) {
1311 rc = dir->do_index_ops->dio_lookup(env, dir,
1312 (struct dt_rec *)pack, key,
1313 mdd_object_capa(env, mdd_obj));
1315 rc = fid_unpack(pack, fid);
1324 int mdd_object_initialize(const struct lu_env *env, const struct lu_fid *pfid,
1325 struct mdd_object *child, struct md_attr *ma,
1326 struct thandle *handle, const struct md_op_spec *spec)
1332 * Update attributes for child.
1335 * (1) the valid bits should be converted between Lustre and Linux;
1336 * (2) maybe, the child attributes should be set in OSD when creation.
1339 rc = mdd_attr_set_internal(env, child, &ma->ma_attr, handle, 0);
1343 if (S_ISDIR(ma->ma_attr.la_mode)) {
1344 /* Add "." and ".." for newly created dir */
1345 __mdd_ref_add(env, child, handle);
1346 rc = __mdd_index_insert_only(env, child, mdo2fid(child),
1347 dot, handle, BYPASS_CAPA);
1349 rc = __mdd_index_insert_only(env, child, pfid,
1355 rc2 = __mdd_index_delete(env, child, dot, 1,
1356 handle, BYPASS_CAPA);
1358 CERROR("Failure to cleanup after dotdot"
1359 " creation: %d (%d)\n", rc2, rc);
1366 /* has not lock on pobj yet */
1367 static int mdd_create_sanity_check(const struct lu_env *env,
1368 struct md_object *pobj,
1369 const struct lu_name *lname,
1371 struct md_op_spec *spec)
1373 struct mdd_thread_info *info = mdd_env_info(env);
1374 struct lu_attr *la = &info->mti_la;
1375 struct lu_fid *fid = &info->mti_fid;
1376 struct mdd_object *obj = md2mdd_obj(pobj);
1377 struct mdd_device *m = mdo2mdd(pobj);
1378 int lookup = spec->sp_cr_lookup;
1383 if (mdd_is_dead_obj(obj))
1387 * In some cases this lookup is not needed - we know before if name
1388 * exists or not because MDT performs lookup for it.
1389 * name length check is done in lookup.
1393 * Check if the name already exist, though it will be checked in
1394 * _index_insert also, for avoiding rolling back if exists
1397 rc = __mdd_lookup_locked(env, pobj, lname, fid,
1398 MAY_WRITE | MAY_EXEC);
1400 RETURN(rc ? : -EEXIST);
1403 * Check WRITE permission for the parent.
1404 * EXEC permission have been checked
1405 * when lookup before create already.
1407 rc = mdd_permission_internal_locked(env, obj, NULL, MAY_WRITE,
1414 rc = mdd_la_get(env, obj, la, BYPASS_CAPA);
1418 if (la->la_mode & S_ISGID) {
1419 ma->ma_attr.la_gid = la->la_gid;
1420 if (S_ISDIR(ma->ma_attr.la_mode)) {
1421 ma->ma_attr.la_mode |= S_ISGID;
1422 ma->ma_attr.la_valid |= LA_MODE;
1426 switch (ma->ma_attr.la_mode & S_IFMT) {
1428 unsigned int symlen = strlen(spec->u.sp_symname) + 1;
1430 if (symlen > (1 << m->mdd_dt_conf.ddp_block_shift))
1431 RETURN(-ENAMETOOLONG);
1451 * Create object and insert it into namespace.
1453 static int mdd_create(const struct lu_env *env,
1454 struct md_object *pobj,
1455 const struct lu_name *lname,
1456 struct md_object *child,
1457 struct md_op_spec *spec,
1460 struct mdd_thread_info *info = mdd_env_info(env);
1461 struct lu_attr *la = &info->mti_la_for_fix;
1462 struct md_attr *ma_acl = &info->mti_ma;
1463 struct mdd_object *mdd_pobj = md2mdd_obj(pobj);
1464 struct mdd_object *son = md2mdd_obj(child);
1465 struct mdd_device *mdd = mdo2mdd(pobj);
1466 struct lu_attr *attr = &ma->ma_attr;
1467 struct lov_mds_md *lmm = NULL;
1468 struct thandle *handle;
1469 struct dynlock_handle *dlh;
1470 const char *name = lname->ln_name;
1471 int rc, created = 0, initialized = 0, inserted = 0, lmm_size = 0;
1472 int got_def_acl = 0;
1473 #ifdef HAVE_QUOTA_SUPPORT
1474 struct obd_device *obd = mdd->mdd_obd_dev;
1475 struct mds_obd *mds = &obd->u.mds;
1476 unsigned int qcids[MAXQUOTAS] = { 0, 0 };
1477 unsigned int qpids[MAXQUOTAS] = { 0, 0 };
1478 int quota_opc = 0, block_count = 0;
1479 int inode_pending = 0, block_pending = 0, parent_pending = 0;
1484 * Two operations have to be performed:
1486 * - an allocation of a new object (->do_create()), and
1488 * - an insertion into a parent index (->dio_insert()).
1490 * Due to locking, operation order is not important, when both are
1491 * successful, *but* error handling cases are quite different:
1493 * - if insertion is done first, and following object creation fails,
1494 * insertion has to be rolled back, but this operation might fail
1495 * also leaving us with dangling index entry.
1497 * - if creation is done first, is has to be undone if insertion
1498 * fails, leaving us with leaked space, which is neither good, nor
1501 * It seems that creation-first is simplest solution, but it is
1502 * sub-optimal in the frequent
1507 * case, because second mkdir is bound to create object, only to
1508 * destroy it immediately.
1510 * To avoid this follow local file systems that do double lookup:
1512 * 0. lookup -> -EEXIST (mdd_create_sanity_check())
1514 * 1. create (mdd_object_create_internal())
1516 * 2. insert (__mdd_index_insert(), lookup again)
1519 /* Sanity checks before big job. */
1520 rc = mdd_create_sanity_check(env, pobj, lname, ma, spec);
1524 #ifdef HAVE_QUOTA_SUPPORT
1525 if (mds->mds_quota) {
1526 struct lu_attr *la_tmp = &mdd_env_info(env)->mti_la;
1528 rc = mdd_la_get(env, mdd_pobj, la_tmp, BYPASS_CAPA);
1532 quota_opc = FSFILT_OP_CREATE;
1533 mdd_quota_wrapper(&ma->ma_attr, qcids);
1534 mdd_quota_wrapper(la_tmp, qpids);
1535 /* get file quota for child */
1536 lquota_chkquota(mds_quota_interface_ref, obd,
1537 qcids[USRQUOTA], qcids[GRPQUOTA], 1,
1538 &inode_pending, NULL, 0);
1539 switch (ma->ma_attr.la_mode & S_IFMT) {
1548 if (qcids[USRQUOTA] == qpids[USRQUOTA] &&
1549 qcids[GRPQUOTA] == qpids[GRPQUOTA]) {
1553 /* get block quota for child and parent */
1555 lquota_chkquota(mds_quota_interface_ref, obd,
1556 qcids[USRQUOTA], qcids[GRPQUOTA],
1558 &block_pending, NULL,
1561 lquota_chkquota(mds_quota_interface_ref, obd,
1562 qpids[USRQUOTA], qpids[GRPQUOTA], 1,
1563 &parent_pending, NULL,
1570 * No RPC inside the transaction, so OST objects should be created at
1573 if (S_ISREG(attr->la_mode)) {
1574 rc = mdd_lov_create(env, mdd, mdd_pobj, son, &lmm, &lmm_size,
1577 GOTO(out_pending, rc);
1580 if (!S_ISLNK(attr->la_mode)) {
1581 ma_acl->ma_acl_size = sizeof info->mti_xattr_buf;
1582 ma_acl->ma_acl = info->mti_xattr_buf;
1583 ma_acl->ma_need = MA_ACL_DEF;
1584 ma_acl->ma_valid = 0;
1586 mdd_read_lock(env, mdd_pobj, MOR_TGT_PARENT);
1587 rc = mdd_def_acl_get(env, mdd_pobj, ma_acl);
1588 mdd_read_unlock(env, mdd_pobj);
1591 else if (ma_acl->ma_valid & MA_ACL_DEF)
1595 mdd_txn_param_build(env, mdd, MDD_TXN_MKDIR_OP);
1596 handle = mdd_trans_start(env, mdd);
1598 GOTO(out_free, rc = PTR_ERR(handle));
1600 dlh = mdd_pdo_write_lock(env, mdd_pobj, name, MOR_TGT_PARENT);
1602 GOTO(out_trans, rc = -ENOMEM);
1604 mdd_write_lock(env, son, MOR_TGT_CHILD);
1605 rc = mdd_object_create_internal(env, mdd_pobj, son, ma, handle, spec);
1607 mdd_write_unlock(env, son);
1613 #ifdef CONFIG_FS_POSIX_ACL
1615 struct lu_buf *acl_buf = &info->mti_buf;
1616 acl_buf->lb_buf = ma_acl->ma_acl;
1617 acl_buf->lb_len = ma_acl->ma_acl_size;
1619 rc = __mdd_acl_init(env, son, acl_buf, &attr->la_mode, handle);
1621 mdd_write_unlock(env, son);
1624 ma->ma_attr.la_valid |= LA_MODE;
1629 rc = mdd_object_initialize(env, mdo2fid(mdd_pobj),
1630 son, ma, handle, spec);
1631 mdd_write_unlock(env, son);
1634 * Object has no links, so it will be destroyed when last
1635 * reference is released. (XXX not now.)
1641 rc = __mdd_index_insert(env, mdd_pobj, mdo2fid(son),
1642 name, S_ISDIR(attr->la_mode), handle,
1643 mdd_object_capa(env, mdd_pobj));
1650 /* No need mdd_lsm_sanity_check here */
1651 rc = mdd_lov_set_md(env, mdd_pobj, son, lmm, lmm_size, handle, 0);
1653 CERROR("error on stripe info copy %d \n", rc);
1656 if (lmm && lmm_size > 0) {
1657 /* Set Lov here, do not get lmm again later */
1658 memcpy(ma->ma_lmm, lmm, lmm_size);
1659 ma->ma_lmm_size = lmm_size;
1660 ma->ma_valid |= MA_LOV;
1663 if (S_ISLNK(attr->la_mode)) {
1664 struct md_ucred *uc = md_ucred(env);
1665 struct dt_object *dt = mdd_object_child(son);
1666 const char *target_name = spec->u.sp_symname;
1667 int sym_len = strlen(target_name);
1668 const struct lu_buf *buf;
1671 buf = mdd_buf_get_const(env, target_name, sym_len);
1672 rc = dt->do_body_ops->dbo_write(env, dt, buf, &pos, handle,
1673 mdd_object_capa(env, son),
1675 CFS_CAP_SYS_RESOURCE_MASK);
1680 GOTO(cleanup, rc = -EFAULT);
1684 la->la_valid = LA_CTIME | LA_MTIME;
1685 rc = mdd_attr_check_set_internal_locked(env, mdd_pobj, la, handle, 0);
1689 /* Return attr back. */
1690 rc = mdd_attr_get_internal_locked(env, son, ma);
1693 if (rc && created) {
1697 rc2 = __mdd_index_delete(env, mdd_pobj, name,
1698 S_ISDIR(attr->la_mode),
1699 handle, BYPASS_CAPA);
1701 CERROR("error can not cleanup destroy %d\n",
1706 mdd_write_lock(env, son, MOR_TGT_CHILD);
1707 __mdd_ref_del(env, son, handle, 0);
1708 if (initialized && S_ISDIR(attr->la_mode))
1709 __mdd_ref_del(env, son, handle, 1);
1710 mdd_write_unlock(env, son);
1714 /* update lov_objid data, must be before transaction stop! */
1716 mdd_lov_objid_update(mdd, lmm);
1718 mdd_pdo_write_unlock(env, mdd_pobj, dlh);
1720 mdd_trans_stop(env, mdd, rc, handle);
1722 /* finis lov_create stuff, free all temporary data */
1723 mdd_lov_create_finish(env, mdd, lmm, lmm_size, spec);
1725 #ifdef HAVE_QUOTA_SUPPORT
1728 lquota_pending_commit(mds_quota_interface_ref, obd,
1729 qcids[USRQUOTA], qcids[GRPQUOTA],
1732 lquota_pending_commit(mds_quota_interface_ref, obd,
1733 qcids[USRQUOTA], qcids[GRPQUOTA],
1736 lquota_pending_commit(mds_quota_interface_ref, obd,
1737 qpids[USRQUOTA], qpids[GRPQUOTA],
1739 /* Trigger dqacq on the owner of child and parent. If failed,
1740 * the next call for lquota_chkquota will process it. */
1741 lquota_adjust(mds_quota_interface_ref, obd, qcids, qpids, rc,
1749 * Get locks on parents in proper order
1750 * RETURN: < 0 - error, rename_order if successful
1758 static int mdd_rename_order(const struct lu_env *env,
1759 struct mdd_device *mdd,
1760 struct mdd_object *src_pobj,
1761 struct mdd_object *tgt_pobj)
1763 /* order of locking, 1 - tgt-src, 0 - src-tgt*/
1767 if (src_pobj == tgt_pobj)
1768 RETURN(MDD_RN_SAME);
1770 /* compared the parent child relationship of src_p&tgt_p */
1771 if (lu_fid_eq(&mdd->mdd_root_fid, mdo2fid(src_pobj))){
1773 } else if (lu_fid_eq(&mdd->mdd_root_fid, mdo2fid(tgt_pobj))) {
1776 rc = mdd_is_parent(env, mdd, src_pobj, mdo2fid(tgt_pobj), NULL);
1789 /* has not mdd_write{read}_lock on any obj yet. */
1790 static int mdd_rename_sanity_check(const struct lu_env *env,
1791 struct mdd_object *src_pobj,
1792 struct mdd_object *tgt_pobj,
1793 struct mdd_object *sobj,
1794 struct mdd_object *tobj,
1800 if (unlikely(ma->ma_attr_flags & MDS_PERM_BYPASS))
1803 /* XXX: when get here, sobj must NOT be NULL,
1804 * the other case has been processed in cml_rename
1805 * before mdd_rename and enable MDS_PERM_BYPASS. */
1807 rc = mdd_may_delete(env, src_pobj, sobj, ma, 1, 0);
1811 /* XXX: when get here, "tobj == NULL" means tobj must
1812 * NOT exist (neither on remote MDS, such case has been
1813 * processed in cml_rename before mdd_rename and enable
1815 * So check may_create, but not check may_unlink. */
1817 rc = mdd_may_create(env, tgt_pobj, NULL,
1818 (src_pobj != tgt_pobj), 0);
1820 rc = mdd_may_delete(env, tgt_pobj, tobj, ma,
1821 (src_pobj != tgt_pobj), 1);
1823 if (!rc && !tobj && (src_pobj != tgt_pobj) &&
1824 S_ISDIR(ma->ma_attr.la_mode))
1825 rc = __mdd_may_link(env, tgt_pobj);
1830 /* src object can be remote that is why we use only fid and type of object */
1831 static int mdd_rename(const struct lu_env *env,
1832 struct md_object *src_pobj, struct md_object *tgt_pobj,
1833 const struct lu_fid *lf, const struct lu_name *lsname,
1834 struct md_object *tobj, const struct lu_name *ltname,
1837 const char *sname = lsname->ln_name;
1838 const char *tname = ltname->ln_name;
1839 struct lu_attr *la = &mdd_env_info(env)->mti_la_for_fix;
1840 struct mdd_object *mdd_spobj = md2mdd_obj(src_pobj);
1841 struct mdd_object *mdd_tpobj = md2mdd_obj(tgt_pobj);
1842 struct mdd_device *mdd = mdo2mdd(src_pobj);
1843 struct mdd_object *mdd_sobj = NULL;
1844 struct mdd_object *mdd_tobj = NULL;
1845 struct dynlock_handle *sdlh, *tdlh;
1846 struct thandle *handle;
1847 const struct lu_fid *tpobj_fid = mdo2fid(mdd_tpobj);
1851 #ifdef HAVE_QUOTA_SUPPORT
1852 struct obd_device *obd = mdd->mdd_obd_dev;
1853 struct mds_obd *mds = &obd->u.mds;
1854 unsigned int qspids[MAXQUOTAS] = { 0, 0 };
1855 unsigned int qtcids[MAXQUOTAS] = { 0, 0 };
1856 unsigned int qtpids[MAXQUOTAS] = { 0, 0 };
1857 int quota_opc = 0, rec_pending = 0;
1861 LASSERT(ma->ma_attr.la_mode & S_IFMT);
1862 is_dir = S_ISDIR(ma->ma_attr.la_mode);
1865 mdd_tobj = md2mdd_obj(tobj);
1867 #ifdef HAVE_QUOTA_SUPPORT
1868 if (mds->mds_quota) {
1869 struct lu_attr *la_tmp = &mdd_env_info(env)->mti_la;
1871 rc = mdd_la_get(env, mdd_spobj, la_tmp, BYPASS_CAPA);
1873 mdd_quota_wrapper(la_tmp, qspids);
1875 rc = mdd_la_get(env, mdd_tpobj, la_tmp,
1878 quota_opc = FSFILT_OP_LINK;
1879 mdd_quota_wrapper(la_tmp, qtpids);
1880 /* get block quota for target parent */
1881 lquota_chkquota(mds_quota_interface_ref,
1882 obd, qtpids[USRQUOTA],
1883 qtpids[GRPQUOTA], 1,
1891 mdd_txn_param_build(env, mdd, MDD_TXN_RENAME_OP);
1892 handle = mdd_trans_start(env, mdd);
1894 GOTO(out_pending, rc = PTR_ERR(handle));
1896 /* FIXME: Should consider tobj and sobj too in rename_lock. */
1897 rc = mdd_rename_order(env, mdd, mdd_spobj, mdd_tpobj);
1899 GOTO(cleanup_unlocked, rc);
1901 /* Get locks in determined order */
1902 if (rc == MDD_RN_SAME) {
1903 sdlh = mdd_pdo_write_lock(env, mdd_spobj,
1904 sname, MOR_SRC_PARENT);
1905 /* check hashes to determine do we need one lock or two */
1906 if (mdd_name2hash(sname) != mdd_name2hash(tname))
1907 tdlh = mdd_pdo_write_lock(env, mdd_tpobj, tname,
1911 } else if (rc == MDD_RN_SRCTGT) {
1912 sdlh = mdd_pdo_write_lock(env, mdd_spobj, sname,MOR_SRC_PARENT);
1913 tdlh = mdd_pdo_write_lock(env, mdd_tpobj, tname,MOR_TGT_PARENT);
1915 tdlh = mdd_pdo_write_lock(env, mdd_tpobj, tname,MOR_SRC_PARENT);
1916 sdlh = mdd_pdo_write_lock(env, mdd_spobj, sname,MOR_TGT_PARENT);
1918 if (sdlh == NULL || tdlh == NULL)
1919 GOTO(cleanup, rc = -ENOMEM);
1921 mdd_sobj = mdd_object_find(env, mdd, lf);
1922 rc = mdd_rename_sanity_check(env, mdd_spobj, mdd_tpobj,
1923 mdd_sobj, mdd_tobj, ma);
1927 rc = __mdd_index_delete(env, mdd_spobj, sname, is_dir, handle,
1928 mdd_object_capa(env, mdd_spobj));
1932 /* "mv dir1 dir2" needs "dir1/.." link update */
1934 rc = __mdd_index_delete(env, mdd_sobj, dotdot, is_dir, handle,
1935 mdd_object_capa(env, mdd_spobj));
1939 rc = __mdd_index_insert(env, mdd_sobj, tpobj_fid, dotdot,
1941 mdd_object_capa(env, mdd_tpobj));
1947 * Here tobj can be remote one, so we do index_delete unconditionally
1948 * and -ENOENT is allowed.
1950 rc = __mdd_index_delete(env, mdd_tpobj, tname, is_dir, handle,
1951 mdd_object_capa(env, mdd_tpobj));
1952 if (rc != 0 && rc != -ENOENT)
1955 rc = __mdd_index_insert(env, mdd_tpobj, lf, tname, is_dir, handle,
1956 mdd_object_capa(env, mdd_tpobj));
1960 LASSERT(ma->ma_attr.la_valid & LA_CTIME);
1961 la->la_ctime = la->la_mtime = ma->ma_attr.la_ctime;
1963 /* XXX: mdd_sobj must be local one if it is NOT NULL. */
1965 la->la_valid = LA_CTIME;
1966 rc = mdd_attr_check_set_internal_locked(env, mdd_sobj, la,
1973 * For tobj is remote case cmm layer has processed
1974 * and set tobj to NULL then. So when tobj is NOT NULL,
1975 * it must be local one.
1977 if (tobj && mdd_object_exists(mdd_tobj)) {
1978 mdd_write_lock(env, mdd_tobj, MOR_TGT_CHILD);
1979 __mdd_ref_del(env, mdd_tobj, handle, 0);
1981 /* Remove dot reference. */
1983 __mdd_ref_del(env, mdd_tobj, handle, 1);
1985 la->la_valid = LA_CTIME;
1986 rc = mdd_attr_check_set_internal(env, mdd_tobj, la, handle, 0);
1990 rc = mdd_finish_unlink(env, mdd_tobj, ma, handle);
1991 mdd_write_unlock(env, mdd_tobj);
1995 #ifdef HAVE_QUOTA_SUPPORT
1996 if (mds->mds_quota && ma->ma_valid & MA_INODE &&
1997 ma->ma_attr.la_nlink == 0 && mdd_tobj->mod_count == 0) {
1998 quota_opc = FSFILT_OP_UNLINK_PARTIAL_CHILD;
1999 mdd_quota_wrapper(&ma->ma_attr, qtcids);
2004 la->la_valid = LA_CTIME | LA_MTIME;
2005 rc = mdd_attr_check_set_internal_locked(env, mdd_spobj, la, handle, 0);
2009 if (mdd_spobj != mdd_tpobj) {
2010 la->la_valid = LA_CTIME | LA_MTIME;
2011 rc = mdd_attr_check_set_internal_locked(env, mdd_tpobj, la,
2017 if (likely(tdlh) && sdlh != tdlh)
2018 mdd_pdo_write_unlock(env, mdd_tpobj, tdlh);
2020 mdd_pdo_write_unlock(env, mdd_spobj, sdlh);
2022 mdd_trans_stop(env, mdd, rc, handle);
2024 mdd_object_put(env, mdd_sobj);
2026 #ifdef HAVE_QUOTA_SUPPORT
2027 if (mds->mds_quota) {
2029 lquota_pending_commit(mds_quota_interface_ref, obd,
2033 /* Trigger dqrel on the source owner of parent.
2034 * If failed, the next call for lquota_chkquota will
2036 lquota_adjust(mds_quota_interface_ref, obd, 0, qspids, rc,
2037 FSFILT_OP_UNLINK_PARTIAL_PARENT);
2039 /* Trigger dqrel/dqacq on the target owner of child and
2040 * parent. If failed, the next call for lquota_chkquota
2041 * will process it. */
2042 lquota_adjust(mds_quota_interface_ref, obd, qtcids,
2043 qtpids, rc, quota_opc);
2049 const struct md_dir_operations mdd_dir_ops = {
2050 .mdo_is_subdir = mdd_is_subdir,
2051 .mdo_lookup = mdd_lookup,
2052 .mdo_create = mdd_create,
2053 .mdo_rename = mdd_rename,
2054 .mdo_link = mdd_link,
2055 .mdo_unlink = mdd_unlink,
2056 .mdo_name_insert = mdd_name_insert,
2057 .mdo_name_remove = mdd_name_remove,
2058 .mdo_rename_tgt = mdd_rename_tgt,
2059 .mdo_create_data = mdd_create_data