4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 #include <linux/sched.h>
36 #include <linux/quotaops.h>
37 #include <linux/highmem.h>
38 #include <linux/pagemap.h>
39 #include <linux/security.h>
40 #include <linux/user_namespace.h>
41 #ifdef HAVE_UIDGID_HEADER
42 # include <linux/uidgid.h>
45 #define DEBUG_SUBSYSTEM S_LLITE
47 #include <obd_support.h>
48 #include <lustre_fid.h>
49 #include <lustre_dlm.h>
50 #include "llite_internal.h"
52 static int ll_create_it(struct inode *dir, struct dentry *dentry,
53 struct lookup_intent *it,
54 void *secctx, __u32 secctxlen);
56 /* called from iget5_locked->find_inode() under inode_lock spinlock */
57 static int ll_test_inode(struct inode *inode, void *opaque)
59 struct ll_inode_info *lli = ll_i2info(inode);
60 struct lustre_md *md = opaque;
62 if (unlikely(!(md->body->mbo_valid & OBD_MD_FLID))) {
63 CERROR("MDS body missing FID\n");
67 if (!lu_fid_eq(&lli->lli_fid, &md->body->mbo_fid1))
73 static int ll_set_inode(struct inode *inode, void *opaque)
75 struct ll_inode_info *lli = ll_i2info(inode);
76 struct mdt_body *body = ((struct lustre_md *)opaque)->body;
78 if (unlikely(!(body->mbo_valid & OBD_MD_FLID))) {
79 CERROR("MDS body missing FID\n");
83 lli->lli_fid = body->mbo_fid1;
84 if (unlikely(!(body->mbo_valid & OBD_MD_FLTYPE))) {
85 CERROR("Can not initialize inode "DFID" without object type: "
87 PFID(&lli->lli_fid), body->mbo_valid);
91 inode->i_mode = (inode->i_mode & ~S_IFMT) | (body->mbo_mode & S_IFMT);
92 if (unlikely(inode->i_mode == 0)) {
93 CERROR("Invalid inode "DFID" type\n", PFID(&lli->lli_fid));
104 * Get an inode by inode number(@hash), which is already instantiated by
105 * the intent lookup).
107 struct inode *ll_iget(struct super_block *sb, ino_t hash,
108 struct lustre_md *md)
116 inode = iget5_locked(sb, hash, ll_test_inode, ll_set_inode, md);
118 RETURN(ERR_PTR(-ENOMEM));
120 if (inode->i_state & I_NEW) {
121 rc = ll_read_inode2(inode, md);
122 if (rc == 0 && S_ISREG(inode->i_mode) &&
123 ll_i2info(inode)->lli_clob == NULL)
124 rc = cl_file_inode_init(inode, md);
127 /* Let's clear directory lsm here, otherwise
128 * make_bad_inode() will reset the inode mode
129 * to regular, then ll_clear_inode will not
130 * be able to clear lsm_md */
131 if (S_ISDIR(inode->i_mode))
132 ll_dir_clear_lsm_md(inode);
133 make_bad_inode(inode);
134 unlock_new_inode(inode);
138 inode_has_no_xattr(inode);
139 unlock_new_inode(inode);
141 } else if (is_bad_inode(inode)) {
143 inode = ERR_PTR(-ESTALE);
144 } else if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
145 rc = ll_update_inode(inode, md);
146 CDEBUG(D_VFSTRACE, "got inode: "DFID"(%p): rc = %d\n",
147 PFID(&md->body->mbo_fid1), inode, rc);
149 if (S_ISDIR(inode->i_mode))
150 ll_dir_clear_lsm_md(inode);
159 static void ll_invalidate_negative_children(struct inode *dir)
161 struct dentry *dentry, *tmp_subdir;
162 DECLARE_LL_D_HLIST_NODE_PTR(p);
165 ll_d_hlist_for_each_entry(dentry, p, &dir->i_dentry) {
166 spin_lock(&dentry->d_lock);
167 if (!list_empty(&dentry->d_subdirs)) {
168 struct dentry *child;
170 list_for_each_entry_safe(child, tmp_subdir,
173 if (child->d_inode == NULL)
174 d_lustre_invalidate(child, 1);
177 spin_unlock(&dentry->d_lock);
179 ll_unlock_dcache(dir);
182 int ll_test_inode_by_fid(struct inode *inode, void *opaque)
184 return lu_fid_eq(&ll_i2info(inode)->lli_fid, opaque);
187 int ll_dom_lock_cancel(struct inode *inode, struct ldlm_lock *lock)
190 struct ll_inode_info *lli = ll_i2info(inode);
191 struct cl_layout clt = { .cl_layout_gen = 0, };
198 if (!lli->lli_clob) {
199 /* due to DoM read on open, there may exist pages for Lustre
200 * regular file even though cl_object is not set up yet. */
201 truncate_inode_pages(inode->i_mapping, 0);
205 env = cl_env_get(&refcheck);
207 RETURN(PTR_ERR(env));
209 rc = cl_object_layout_get(env, lli->lli_clob, &clt);
211 CDEBUG(D_INODE, "Cannot get layout for "DFID"\n",
212 PFID(ll_inode2fid(inode)));
214 } else if (clt.cl_size == 0 || clt.cl_dom_comp_size == 0) {
215 CDEBUG(D_INODE, "DOM lock without DOM layout for "DFID"\n",
216 PFID(ll_inode2fid(inode)));
218 enum cl_fsync_mode mode;
219 loff_t end = clt.cl_dom_comp_size - 1;
221 mode = ldlm_is_discard_data(lock) ?
222 CL_FSYNC_DISCARD : CL_FSYNC_LOCAL;
223 rc = cl_sync_file_range(inode, 0, end, mode, 1);
224 truncate_inode_pages_range(inode->i_mapping, 0, end);
226 cl_env_put(env, &refcheck);
230 void ll_lock_cancel_bits(struct ldlm_lock *lock, __u64 to_cancel)
232 struct inode *inode = ll_inode_from_resource_lock(lock);
233 __u64 bits = to_cancel;
239 if (!fid_res_name_eq(ll_inode2fid(inode),
240 &lock->l_resource->lr_name)) {
241 LDLM_ERROR(lock, "data mismatch with object "DFID"(%p)",
242 PFID(ll_inode2fid(inode)), inode);
246 if (bits & MDS_INODELOCK_XATTR) {
247 if (S_ISDIR(inode->i_mode))
248 ll_i2info(inode)->lli_def_stripe_offset = -1;
249 ll_xattr_cache_destroy(inode);
250 bits &= ~MDS_INODELOCK_XATTR;
253 /* For OPEN locks we differentiate between lock modes
254 * LCK_CR, LCK_CW, LCK_PR - bug 22891 */
255 if (bits & MDS_INODELOCK_OPEN)
256 ll_have_md_lock(inode, &bits, lock->l_req_mode);
258 if (bits & MDS_INODELOCK_OPEN) {
261 switch (lock->l_req_mode) {
272 LDLM_ERROR(lock, "bad lock mode for OPEN lock");
276 ll_md_real_close(inode, fmode);
278 bits &= ~MDS_INODELOCK_OPEN;
281 if (bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE |
282 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_PERM |
284 ll_have_md_lock(inode, &bits, LCK_MINMODE);
286 if (bits & MDS_INODELOCK_DOM) {
287 rc = ll_dom_lock_cancel(inode, lock);
289 CDEBUG(D_INODE, "cannot flush DoM data "
291 PFID(ll_inode2fid(inode)), rc);
292 lock_res_and_lock(lock);
293 ldlm_set_kms_ignore(lock);
294 unlock_res_and_lock(lock);
297 if (bits & MDS_INODELOCK_LAYOUT) {
298 struct cl_object_conf conf = {
299 .coc_opc = OBJECT_CONF_INVALIDATE,
303 rc = ll_layout_conf(inode, &conf);
305 CDEBUG(D_INODE, "cannot invalidate layout of "
307 PFID(ll_inode2fid(inode)), rc);
310 if (bits & MDS_INODELOCK_UPDATE) {
311 struct ll_inode_info *lli = ll_i2info(inode);
313 lli->lli_update_atime = 1;
316 if ((bits & MDS_INODELOCK_UPDATE) && S_ISDIR(inode->i_mode)) {
317 struct ll_inode_info *lli = ll_i2info(inode);
319 CDEBUG(D_INODE, "invalidating inode "DFID" lli = %p, "
320 "pfid = "DFID"\n", PFID(ll_inode2fid(inode)),
321 lli, PFID(&lli->lli_pfid));
322 truncate_inode_pages(inode->i_mapping, 0);
324 if (unlikely(!fid_is_zero(&lli->lli_pfid))) {
325 struct inode *master_inode = NULL;
328 /* This is slave inode, since all of the child dentry
329 * is connected on the master inode, so we have to
330 * invalidate the negative children on master inode */
331 CDEBUG(D_INODE, "Invalidate s"DFID" m"DFID"\n",
332 PFID(ll_inode2fid(inode)), PFID(&lli->lli_pfid));
334 hash = cl_fid_build_ino(&lli->lli_pfid,
335 ll_need_32bit_api(ll_i2sbi(inode)));
337 /* Do not lookup the inode with ilookup5, otherwise
338 * it will cause dead lock,
339 * 1. Client1 send chmod req to the MDT0, then on MDT0,
340 * it enqueues master and all of its slaves lock,
341 * (mdt_attr_set() -> mdt_lock_slaves()), after gets
342 * master and stripe0 lock, it will send the enqueue
343 * req (for stripe1) to MDT1, then MDT1 finds the lock
344 * has been granted to client2. Then MDT1 sends blocking
346 * 2. At the same time, client2 tries to unlink
347 * the striped dir (rm -rf striped_dir), and during
348 * lookup, it will hold the master inode of the striped
349 * directory, whose inode state is NEW, then tries to
350 * revalidate all of its slaves, (ll_prep_inode()->
351 * ll_iget()->ll_read_inode2()-> ll_update_inode().).
352 * And it will be blocked on the server side because
354 * 3. Then the client get the blocking_ast req, cancel
355 * the lock, but being blocked if using ->ilookup5()),
356 * because master inode state is NEW. */
357 master_inode = ilookup5_nowait(inode->i_sb, hash,
358 ll_test_inode_by_fid,
359 (void *)&lli->lli_pfid);
361 ll_invalidate_negative_children(master_inode);
365 ll_invalidate_negative_children(inode);
369 if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) &&
370 inode->i_sb->s_root != NULL &&
371 inode != inode->i_sb->s_root->d_inode)
372 ll_invalidate_aliases(inode);
377 /* Check if the given lock may be downgraded instead of canceling and
378 * that convert is really needed. */
379 int ll_md_need_convert(struct ldlm_lock *lock)
381 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
383 __u64 wanted = lock->l_policy_data.l_inodebits.cancel_bits;
384 __u64 bits = lock->l_policy_data.l_inodebits.bits & ~wanted;
385 enum ldlm_mode mode = LCK_MINMODE;
387 if (!lock->l_conn_export ||
388 !exp_connect_lock_convert(lock->l_conn_export))
391 if (!wanted || !bits || ldlm_is_cancel(lock))
394 /* do not convert locks other than DOM for now */
395 if (!((bits | wanted) & MDS_INODELOCK_DOM))
398 /* We may have already remaining bits in some other lock so
399 * lock convert will leave us just extra lock for the same bit.
400 * Check if client has other lock with the same bits and the same
401 * or lower mode and don't convert if any.
403 switch (lock->l_req_mode) {
415 /* do not convert other modes */
419 /* is lock is too old to be converted? */
420 lock_res_and_lock(lock);
421 if (ktime_after(ktime_get(),
422 ktime_add(lock->l_last_used,
423 ktime_set(ns->ns_dirty_age_limit, 0)))) {
424 unlock_res_and_lock(lock);
427 unlock_res_and_lock(lock);
429 inode = ll_inode_from_resource_lock(lock);
430 ll_have_md_lock(inode, &bits, mode);
435 int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
436 void *data, int flag)
438 struct lustre_handle lockh;
439 __u64 bits = lock->l_policy_data.l_inodebits.bits;
445 case LDLM_CB_BLOCKING:
447 __u64 cancel_flags = LCF_ASYNC;
449 if (ll_md_need_convert(lock)) {
450 cancel_flags |= LCF_CONVERT;
451 /* For lock convert some cancel actions may require
452 * this lock with non-dropped canceled bits, e.g. page
453 * flush for DOM lock. So call ll_lock_cancel_bits()
454 * here while canceled bits are still set.
456 bits = lock->l_policy_data.l_inodebits.cancel_bits;
457 if (bits & MDS_INODELOCK_DOM)
458 ll_lock_cancel_bits(lock, MDS_INODELOCK_DOM);
460 ldlm_lock2handle(lock, &lockh);
461 rc = ldlm_cli_cancel(&lockh, cancel_flags);
463 CDEBUG(D_INODE, "ldlm_cli_cancel: rc = %d\n", rc);
468 case LDLM_CB_CANCELING:
469 if (ldlm_is_converting(lock)) {
470 /* this is called on already converted lock, so
471 * ibits has remained bits only and cancel_bits
472 * are bits that were dropped.
473 * Note that DOM lock is handled prior lock convert
474 * and is excluded here.
476 bits = lock->l_policy_data.l_inodebits.cancel_bits &
479 LASSERT(ldlm_is_canceling(lock));
481 ll_lock_cancel_bits(lock, bits);
490 __u32 ll_i2suppgid(struct inode *i)
492 if (in_group_p(i->i_gid))
493 return (__u32)from_kgid(&init_user_ns, i->i_gid);
495 return (__u32) __kgid_val(INVALID_GID);
498 /* Pack the required supplementary groups into the supplied groups array.
499 * If we don't need to use the groups from the target inode(s) then we
500 * instead pack one or more groups from the user's supplementary group
501 * array in case it might be useful. Not needed if doing an MDS-side upcall. */
502 void ll_i2gids(__u32 *suppgids, struct inode *i1, struct inode *i2)
505 LASSERT(suppgids != NULL);
507 suppgids[0] = ll_i2suppgid(i1);
510 suppgids[1] = ll_i2suppgid(i2);
516 * try to reuse three types of dentry:
517 * 1. unhashed alias, this one is unhashed by d_invalidate (but it may be valid
518 * by concurrent .revalidate).
519 * 2. INVALID alias (common case for no valid ldlm lock held, but this flag may
520 * be cleared by others calling d_lustre_revalidate).
521 * 3. DISCONNECTED alias.
523 static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
525 struct dentry *alias, *discon_alias, *invalid_alias;
526 DECLARE_LL_D_HLIST_NODE_PTR(p);
528 if (ll_d_hlist_empty(&inode->i_dentry))
531 discon_alias = invalid_alias = NULL;
533 ll_lock_dcache(inode);
534 ll_d_hlist_for_each_entry(alias, p, &inode->i_dentry) {
535 LASSERT(alias != dentry);
537 spin_lock(&alias->d_lock);
538 if ((alias->d_flags & DCACHE_DISCONNECTED) &&
539 S_ISDIR(inode->i_mode))
540 /* LASSERT(last_discon == NULL); LU-405, bz 20055 */
541 discon_alias = alias;
542 else if (alias->d_parent == dentry->d_parent &&
543 alias->d_name.hash == dentry->d_name.hash &&
544 alias->d_name.len == dentry->d_name.len &&
545 memcmp(alias->d_name.name, dentry->d_name.name,
546 dentry->d_name.len) == 0)
547 invalid_alias = alias;
548 spin_unlock(&alias->d_lock);
553 alias = invalid_alias ?: discon_alias ?: NULL;
555 spin_lock(&alias->d_lock);
557 spin_unlock(&alias->d_lock);
559 ll_unlock_dcache(inode);
565 * Similar to d_splice_alias(), but lustre treats invalid alias
566 * similar to DCACHE_DISCONNECTED, and tries to use it anyway.
568 struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
574 new = ll_find_alias(inode, de);
584 "Reuse dentry %p inode %p refc %d flags %#x\n",
585 new, new->d_inode, ll_d_count(new), new->d_flags);
593 CDEBUG(D_DENTRY, "Add dentry %p inode %p refc %d flags %#x\n",
594 de, de->d_inode, ll_d_count(de), de->d_flags);
598 static int ll_lookup_it_finish(struct ptlrpc_request *request,
599 struct lookup_intent *it,
600 struct inode *parent, struct dentry **de,
601 void *secctx, __u32 secctxlen)
603 struct inode *inode = NULL;
606 struct dentry *alias;
609 /* NB 1 request reference will be taken away by ll_intent_lock()
611 CDEBUG(D_DENTRY, "it %p it_disposition %x\n", it,
613 if (!it_disposition(it, DISP_LOOKUP_NEG)) {
614 struct req_capsule *pill = &request->rq_pill;
615 struct mdt_body *body = req_capsule_server_get(pill,
618 rc = ll_prep_inode(&inode, request, (*de)->d_sb, it);
622 if (it->it_op & IT_OPEN)
623 ll_dom_finish_open(inode, request, it);
625 ll_set_lock_data(ll_i2sbi(parent)->ll_md_exp, inode, it, &bits);
627 /* We used to query real size from OSTs here, but actually
628 * this is not needed. For stat() calls size would be updated
629 * from subsequent do_revalidate()->ll_inode_revalidate_it() in
631 * vfs_getattr_it->ll_getattr()->ll_inode_revalidate_it() in 2.6
632 * Everybody else who needs correct file size would call
633 * ll_glimpse_size or some equivalent themselves anyway.
637 /* If security context was returned by MDT, put it in
638 * inode now to save an extra getxattr from security hooks,
639 * and avoid deadlock.
641 if (body->mbo_valid & OBD_MD_SECCTX) {
642 secctx = req_capsule_server_get(pill, &RMF_FILE_SECCTX);
643 secctxlen = req_capsule_get_size(pill,
648 CDEBUG(D_SEC, "server returned security context"
650 PFID(ll_inode2fid(inode)));
653 if (secctx != NULL && secctxlen != 0) {
655 rc = security_inode_notifysecctx(inode, secctx,
659 CWARN("cannot set security context for "
661 PFID(ll_inode2fid(inode)), rc);
665 /* Only hash *de if it is unhashed (new dentry).
666 * Atoimc_open may passin hashed dentries for open.
668 alias = ll_splice_alias(inode, *de);
670 GOTO(out, rc = PTR_ERR(alias));
674 if (!it_disposition(it, DISP_LOOKUP_NEG)) {
675 /* we have lookup look - unhide dentry */
676 if (bits & MDS_INODELOCK_LOOKUP)
677 d_lustre_revalidate(*de);
678 } else if (!it_disposition(it, DISP_OPEN_CREATE)) {
679 /* If file created on server, don't depend on parent UPDATE
680 * lock to unhide it. It is left hidden and next lookup can
681 * find it in ll_splice_alias.
683 /* Check that parent has UPDATE lock. */
684 struct lookup_intent parent_it = {
686 .it_lock_handle = 0 };
687 struct lu_fid fid = ll_i2info(parent)->lli_fid;
689 /* If it is striped directory, get the real stripe parent */
690 if (unlikely(ll_i2info(parent)->lli_lsm_md != NULL)) {
691 rc = md_get_fid_from_lsm(ll_i2mdexp(parent),
692 ll_i2info(parent)->lli_lsm_md,
694 (*de)->d_name.len, &fid);
699 if (md_revalidate_lock(ll_i2mdexp(parent), &parent_it, &fid,
701 d_lustre_revalidate(*de);
702 ll_intent_release(&parent_it);
709 if (rc != 0 && it->it_op & IT_OPEN)
710 ll_open_cleanup((*de)->d_sb, request);
715 static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
716 struct lookup_intent *it,
717 void **secctx, __u32 *secctxlen)
719 struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
720 struct dentry *save = dentry, *retval;
721 struct ptlrpc_request *req = NULL;
722 struct md_op_data *op_data = NULL;
725 char secctx_name[XATTR_NAME_MAX + 1];
729 if (dentry->d_name.len > ll_i2sbi(parent)->ll_namelen)
730 RETURN(ERR_PTR(-ENAMETOOLONG));
732 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s, dir="DFID"(%p), intent=%s\n",
733 dentry->d_name.len, dentry->d_name.name,
734 PFID(ll_inode2fid(parent)), parent, LL_IT2STR(it));
736 if (d_mountpoint(dentry))
737 CERROR("Tell Peter, lookup on mtpt, it %s\n", LL_IT2STR(it));
739 if (it == NULL || it->it_op == IT_GETXATTR)
742 if (it->it_op == IT_GETATTR && dentry_may_statahead(parent, dentry)) {
743 rc = ll_statahead(parent, &dentry, 0);
745 RETURN(dentry == save ? NULL : dentry);
748 if (it->it_op & IT_OPEN && it->it_flags & FMODE_WRITE &&
749 dentry->d_sb->s_flags & MS_RDONLY)
750 RETURN(ERR_PTR(-EROFS));
752 if (it->it_op & IT_CREAT)
753 opc = LUSTRE_OPC_CREATE;
755 opc = LUSTRE_OPC_ANY;
757 op_data = ll_prep_md_op_data(NULL, parent, NULL, dentry->d_name.name,
758 dentry->d_name.len, 0, opc, NULL);
760 GOTO(out, retval = ERR_CAST(op_data));
762 /* enforce umask if acl disabled or MDS doesn't support umask */
763 if (!IS_POSIXACL(parent) || !exp_connect_umask(ll_i2mdexp(parent)))
764 it->it_create_mode &= ~current_umask();
766 if (it->it_op & IT_CREAT &&
767 ll_i2sbi(parent)->ll_flags & LL_SBI_FILE_SECCTX) {
768 rc = ll_dentry_init_security(dentry, it->it_create_mode,
770 &op_data->op_file_secctx_name,
771 &op_data->op_file_secctx,
772 &op_data->op_file_secctx_size);
774 GOTO(out, retval = ERR_PTR(rc));
776 *secctx = op_data->op_file_secctx;
777 if (secctxlen != NULL)
778 *secctxlen = op_data->op_file_secctx_size;
782 if (secctxlen != NULL)
786 /* ask for security context upon intent */
787 if (it->it_op & (IT_LOOKUP | IT_GETATTR | IT_OPEN)) {
788 /* get name of security xattr to request to server */
789 rc = ll_listsecurity(parent, secctx_name,
790 sizeof(secctx_name));
792 CDEBUG(D_SEC, "cannot get security xattr name for "
794 PFID(ll_inode2fid(parent)), rc);
796 op_data->op_file_secctx_name = secctx_name;
797 op_data->op_file_secctx_name_size = rc;
798 CDEBUG(D_SEC, "'%.*s' is security xattr for "DFID"\n",
799 rc, secctx_name, PFID(ll_inode2fid(parent)));
803 rc = md_intent_lock(ll_i2mdexp(parent), op_data, it, &req,
804 &ll_md_blocking_ast, 0);
805 /* If the MDS allows the client to chgrp (CFS_SETGRP_PERM), but the
806 * client does not know which suppgid should be sent to the MDS, or
807 * some other(s) changed the target file's GID after this RPC sent
808 * to the MDS with the suppgid as the original GID, then we should
809 * try again with right suppgid. */
810 if (rc == -EACCES && it->it_op & IT_OPEN &&
811 it_disposition(it, DISP_OPEN_DENY)) {
812 struct mdt_body *body;
814 LASSERT(req != NULL);
816 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
817 if (op_data->op_suppgids[0] == body->mbo_gid ||
818 op_data->op_suppgids[1] == body->mbo_gid ||
819 !in_group_p(make_kgid(&init_user_ns, body->mbo_gid)))
820 GOTO(out, retval = ERR_PTR(-EACCES));
822 fid_zero(&op_data->op_fid2);
823 op_data->op_suppgids[1] = body->mbo_gid;
824 ptlrpc_req_finished(req);
826 ll_intent_release(it);
827 rc = md_intent_lock(ll_i2mdexp(parent), op_data, it, &req,
828 &ll_md_blocking_ast, 0);
832 GOTO(out, retval = ERR_PTR(rc));
834 /* dir layout may change */
835 ll_unlock_md_op_lsm(op_data);
836 rc = ll_lookup_it_finish(req, it, parent, &dentry,
837 secctx != NULL ? *secctx : NULL,
838 secctxlen != NULL ? *secctxlen : 0);
840 ll_intent_release(it);
841 GOTO(out, retval = ERR_PTR(rc));
844 if ((it->it_op & IT_OPEN) && dentry->d_inode &&
845 !S_ISREG(dentry->d_inode->i_mode) &&
846 !S_ISDIR(dentry->d_inode->i_mode)) {
847 ll_release_openhandle(dentry, it);
849 ll_lookup_finish_locks(it, dentry);
851 GOTO(out, retval = (dentry == save) ? NULL : dentry);
854 if (op_data != NULL && !IS_ERR(op_data)) {
855 if (secctx != NULL && secctxlen != NULL) {
856 /* caller needs sec ctx info, so reset it in op_data to
857 * prevent it from being freed */
858 op_data->op_file_secctx = NULL;
859 op_data->op_file_secctx_size = 0;
861 ll_finish_md_op_data(op_data);
864 ptlrpc_req_finished(req);
868 #ifdef HAVE_IOP_ATOMIC_OPEN
869 static struct dentry *ll_lookup_nd(struct inode *parent, struct dentry *dentry,
872 struct lookup_intent *itp, it = { .it_op = IT_GETATTR };
875 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s, dir="DFID"(%p), flags=%u\n",
876 dentry->d_name.len, dentry->d_name.name,
877 PFID(ll_inode2fid(parent)), parent, flags);
880 * Optimize away (CREATE && !OPEN). Let .create handle the race.
881 * but only if we have write permissions there, otherwise we need
882 * to proceed with lookup. LU-4185
884 if ((flags & LOOKUP_CREATE) && !(flags & LOOKUP_OPEN) &&
885 (inode_permission(parent, MAY_WRITE | MAY_EXEC) == 0))
888 if (flags & (LOOKUP_PARENT|LOOKUP_OPEN|LOOKUP_CREATE))
892 de = ll_lookup_it(parent, dentry, itp, NULL, NULL);
895 ll_intent_release(itp);
901 * For cached negative dentry and new dentry, handle lookup/create/open
904 static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
905 struct file *file, unsigned open_flags,
906 umode_t mode, int *opened)
908 struct lookup_intent *it;
910 long long lookup_flags = LOOKUP_OPEN;
916 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s, dir="DFID"(%p), file %p,"
917 "open_flags %x, mode %x opened %d\n",
918 dentry->d_name.len, dentry->d_name.name,
919 PFID(ll_inode2fid(dir)), dir, file, open_flags, mode, *opened);
921 /* Only negative dentries enter here */
922 LASSERT(dentry->d_inode == NULL);
924 if (!d_unhashed(dentry)) {
925 /* A valid negative dentry that just passed revalidation,
926 * there's little point to try and open it server-side,
927 * even though there's a minuscule chance it might succeed.
928 * Either way it's a valid race to just return -ENOENT here.
930 if (!(open_flags & O_CREAT))
933 /* Otherwise we just unhash it to be rehashed afresh via
934 * lookup if necessary
939 OBD_ALLOC(it, sizeof(*it));
944 if (open_flags & O_CREAT) {
945 it->it_op |= IT_CREAT;
946 lookup_flags |= LOOKUP_CREATE;
948 it->it_create_mode = (mode & S_IALLUGO) | S_IFREG;
949 it->it_flags = (open_flags & ~O_ACCMODE) | OPEN_FMODE(open_flags);
950 it->it_flags &= ~MDS_OPEN_FL_INTERNAL;
952 /* Dentry added to dcache tree in ll_lookup_it */
953 de = ll_lookup_it(dir, dentry, it, &secctx, &secctxlen);
959 CFS_FAIL_TIMEOUT(OBD_FAIL_LLITE_CREATE_FILE_PAUSE, cfs_fail_val);
962 if (it_disposition(it, DISP_OPEN_CREATE)) {
963 /* Dentry instantiated in ll_create_it. */
964 rc = ll_create_it(dir, dentry, it, secctx, secctxlen);
965 security_release_secctx(secctx, secctxlen);
967 /* We dget in ll_splice_alias. */
973 *opened |= FILE_CREATED;
975 if (dentry->d_inode && it_disposition(it, DISP_OPEN_OPEN)) {
977 if (S_ISFIFO(dentry->d_inode->i_mode)) {
978 /* We cannot call open here as it might
979 * deadlock. This case is unreachable in
980 * practice because of OBD_CONNECT_NODEVOH. */
981 rc = finish_no_open(file, de);
983 file->private_data = it;
984 rc = finish_open(file, dentry, NULL, opened);
985 /* We dget in ll_splice_alias. finish_open takes
986 * care of dget for fd open.
992 rc = finish_no_open(file, de);
997 ll_intent_release(it);
998 OBD_FREE(it, sizeof(*it));
1003 #else /* !HAVE_IOP_ATOMIC_OPEN */
1004 static struct lookup_intent *
1005 ll_convert_intent(struct open_intent *oit, int lookup_flags, bool is_readonly)
1007 struct lookup_intent *it;
1011 return ERR_PTR(-ENOMEM);
1013 if (lookup_flags & LOOKUP_OPEN) {
1014 it->it_op = IT_OPEN;
1015 /* Avoid file creation for ro bind mount point(is_readonly) */
1016 if ((lookup_flags & LOOKUP_CREATE) && !is_readonly)
1017 it->it_op |= IT_CREAT;
1018 it->it_create_mode = (oit->create_mode & S_IALLUGO) | S_IFREG;
1019 it->it_flags = ll_namei_to_lookup_intent_flag(oit->flags &
1020 ~(is_readonly ? O_CREAT : 0));
1021 it->it_flags &= ~MDS_OPEN_FL_INTERNAL;
1023 it->it_op = IT_GETATTR;
1029 static struct dentry *ll_lookup_nd(struct inode *parent, struct dentry *dentry,
1030 struct nameidata *nd)
1035 if (nd && !(nd->flags & (LOOKUP_CONTINUE|LOOKUP_PARENT))) {
1036 struct lookup_intent *it;
1038 if (ll_d2d(dentry) && ll_d2d(dentry)->lld_it) {
1039 it = ll_d2d(dentry)->lld_it;
1040 ll_d2d(dentry)->lld_it = NULL;
1043 * Optimize away (CREATE && !OPEN). Let .create handle
1044 * the race. But only if we have write permissions
1045 * there, otherwise we need to proceed with lookup.
1048 if ((nd->flags & LOOKUP_CREATE) &&
1049 !(nd->flags & LOOKUP_OPEN) &&
1050 (inode_permission(parent,
1051 MAY_WRITE | MAY_EXEC) == 0))
1054 it = ll_convert_intent(&nd->intent.open, nd->flags,
1055 (nd->path.mnt->mnt_flags & MNT_READONLY) ||
1056 (nd->path.mnt->mnt_sb->s_flags & MS_RDONLY));
1058 RETURN((struct dentry *)it);
1061 de = ll_lookup_it(parent, dentry, it, NULL, NULL);
1064 if ((nd->flags & LOOKUP_OPEN) && !IS_ERR(dentry)) { /* Open */
1065 if (dentry->d_inode &&
1066 it_disposition(it, DISP_OPEN_OPEN)) { /* nocreate */
1067 if (S_ISFIFO(dentry->d_inode->i_mode)) {
1068 /* We cannot call open here as it might
1069 * deadlock. This case is unreachable in
1070 * practice because of
1071 * OBD_CONNECT_NODEVOH. */
1075 nd->intent.open.file->private_data = it;
1076 filp = lookup_instantiate_filp(nd,
1082 de = (struct dentry *)filp;
1085 } else if (it_disposition(it, DISP_OPEN_CREATE)) {
1086 /* XXX This can only reliably work on assumption
1087 * that there are NO hashed negative dentries.*/
1088 ll_d2d(dentry)->lld_it = it;
1089 it = NULL; /* Will be freed in ll_create_nd */
1090 /* We absolutely depend on ll_create_nd to be
1091 * called to not leak this intent and possible
1092 * data attached to it */
1097 ll_intent_release(it);
1098 OBD_FREE(it, sizeof(*it));
1101 de = ll_lookup_it(parent, dentry, NULL, NULL, NULL);
1106 #endif /* HAVE_IOP_ATOMIC_OPEN */
1108 /* We depend on "mode" being set with the proper file type/umask by now */
1109 static struct inode *ll_create_node(struct inode *dir, struct lookup_intent *it)
1111 struct inode *inode = NULL;
1112 struct ptlrpc_request *request = NULL;
1113 struct ll_sb_info *sbi = ll_i2sbi(dir);
1117 LASSERT(it && it->it_disposition);
1119 LASSERT(it_disposition(it, DISP_ENQ_CREATE_REF));
1120 request = it->it_request;
1121 it_clear_disposition(it, DISP_ENQ_CREATE_REF);
1122 rc = ll_prep_inode(&inode, request, dir->i_sb, it);
1124 GOTO(out, inode = ERR_PTR(rc));
1126 /* Pause to allow for a race with concurrent access by fid */
1127 OBD_FAIL_TIMEOUT(OBD_FAIL_LLITE_CREATE_NODE_PAUSE, cfs_fail_val);
1129 /* We asked for a lock on the directory, but were granted a
1130 * lock on the inode. Since we finally have an inode pointer,
1131 * stuff it in the lock. */
1132 CDEBUG(D_DLMTRACE, "setting l_ast_data to inode "DFID"(%p)\n",
1133 PFID(ll_inode2fid(inode)), inode);
1134 ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
1137 ptlrpc_req_finished(request);
1142 * By the time this is called, we already have created the directory cache
1143 * entry for the new file, but it is so far negative - it has no inode.
1145 * We defer creating the OBD object(s) until open, to keep the intent and
1146 * non-intent code paths similar, and also because we do not have the MDS
1147 * inode number before calling ll_create_node() (which is needed for LOV),
1148 * so we would need to do yet another RPC to the MDS to store the LOV EA
1149 * data on the MDS. If needed, we would pass the PACKED lmm as data and
1150 * lmm_size in datalen (the MDS still has code which will handle that).
1152 * If the create succeeds, we fill in the inode information
1153 * with d_instantiate().
1155 static int ll_create_it(struct inode *dir, struct dentry *dentry,
1156 struct lookup_intent *it,
1157 void *secctx, __u32 secctxlen)
1159 struct inode *inode;
1163 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s, dir="DFID"(%p), intent=%s\n",
1164 dentry->d_name.len, dentry->d_name.name,
1165 PFID(ll_inode2fid(dir)), dir, LL_IT2STR(it));
1167 rc = it_open_error(DISP_OPEN_CREATE, it);
1171 inode = ll_create_node(dir, it);
1173 RETURN(PTR_ERR(inode));
1175 if ((ll_i2sbi(inode)->ll_flags & LL_SBI_FILE_SECCTX) &&
1178 /* must be done before d_instantiate, because it calls
1179 * security_d_instantiate, which means a getxattr if security
1180 * context is not set yet */
1181 rc = security_inode_notifysecctx(inode, secctx, secctxlen);
1182 inode_unlock(inode);
1187 d_instantiate(dentry, inode);
1189 if (!(ll_i2sbi(inode)->ll_flags & LL_SBI_FILE_SECCTX)) {
1190 rc = ll_inode_init_security(dentry, inode, dir);
1198 void ll_update_times(struct ptlrpc_request *request, struct inode *inode)
1200 struct mdt_body *body = req_capsule_server_get(&request->rq_pill,
1204 if (body->mbo_valid & OBD_MD_FLMTIME &&
1205 body->mbo_mtime > LTIME_S(inode->i_mtime)) {
1206 CDEBUG(D_INODE, "setting fid "DFID" mtime from %lu to %llu"
1207 "\n", PFID(ll_inode2fid(inode)),
1208 LTIME_S(inode->i_mtime), body->mbo_mtime);
1209 LTIME_S(inode->i_mtime) = body->mbo_mtime;
1212 if (body->mbo_valid & OBD_MD_FLCTIME &&
1213 body->mbo_ctime > LTIME_S(inode->i_ctime))
1214 LTIME_S(inode->i_ctime) = body->mbo_ctime;
1217 static int ll_new_node(struct inode *dir, struct dentry *dchild,
1218 const char *tgt, umode_t mode, int rdev, __u32 opc)
1220 struct qstr *name = &dchild->d_name;
1221 struct ptlrpc_request *request = NULL;
1222 struct md_op_data *op_data;
1223 struct inode *inode = NULL;
1224 struct ll_sb_info *sbi = ll_i2sbi(dir);
1229 if (unlikely(tgt != NULL))
1230 tgt_len = strlen(tgt) + 1;
1233 op_data = ll_prep_md_op_data(NULL, dir, NULL, name->name,
1234 name->len, 0, opc, NULL);
1235 if (IS_ERR(op_data))
1236 GOTO(err_exit, err = PTR_ERR(op_data));
1238 if (sbi->ll_flags & LL_SBI_FILE_SECCTX) {
1239 err = ll_dentry_init_security(dchild, mode, &dchild->d_name,
1240 &op_data->op_file_secctx_name,
1241 &op_data->op_file_secctx,
1242 &op_data->op_file_secctx_size);
1244 GOTO(err_exit, err);
1247 err = md_create(sbi->ll_md_exp, op_data, tgt, tgt_len, mode,
1248 from_kuid(&init_user_ns, current_fsuid()),
1249 from_kgid(&init_user_ns, current_fsgid()),
1250 cfs_curproc_cap_pack(), rdev, &request);
1251 if (err < 0 && err != -EREMOTE)
1252 GOTO(err_exit, err);
1254 /* If the client doesn't know where to create a subdirectory (or
1255 * in case of a race that sends the RPC to the wrong MDS), the
1256 * MDS will return -EREMOTE and the client will fetch the layout
1257 * of the directory, then create the directory on the right MDT. */
1258 if (unlikely(err == -EREMOTE)) {
1259 struct ll_inode_info *lli = ll_i2info(dir);
1260 struct lmv_user_md *lum;
1264 ptlrpc_req_finished(request);
1267 err2 = ll_dir_getstripe(dir, (void **)&lum, &lumsize, &request,
1268 OBD_MD_DEFAULT_MEA);
1270 /* Update stripe_offset and retry */
1271 lli->lli_def_stripe_offset = lum->lum_stripe_offset;
1272 } else if (err2 == -ENODATA &&
1273 lli->lli_def_stripe_offset != -1) {
1274 /* If there are no default stripe EA on the MDT, but the
1275 * client has default stripe, then it probably means
1276 * default stripe EA has just been deleted. */
1277 lli->lli_def_stripe_offset = -1;
1279 GOTO(err_exit, err);
1282 ptlrpc_req_finished(request);
1284 ll_finish_md_op_data(op_data);
1288 ll_update_times(request, dir);
1290 CFS_FAIL_TIMEOUT(OBD_FAIL_LLITE_NEWNODE_PAUSE, cfs_fail_val);
1292 err = ll_prep_inode(&inode, request, dchild->d_sb, NULL);
1294 GOTO(err_exit, err);
1296 if (sbi->ll_flags & LL_SBI_FILE_SECCTX) {
1298 /* must be done before d_instantiate, because it calls
1299 * security_d_instantiate, which means a getxattr if security
1300 * context is not set yet */
1301 err = security_inode_notifysecctx(inode,
1302 op_data->op_file_secctx,
1303 op_data->op_file_secctx_size);
1304 inode_unlock(inode);
1306 GOTO(err_exit, err);
1309 d_instantiate(dchild, inode);
1311 if (!(sbi->ll_flags & LL_SBI_FILE_SECCTX)) {
1312 err = ll_inode_init_security(dchild, inode, dir);
1314 GOTO(err_exit, err);
1319 if (request != NULL)
1320 ptlrpc_req_finished(request);
1322 if (!IS_ERR_OR_NULL(op_data))
1323 ll_finish_md_op_data(op_data);
1328 static int ll_mknod(struct inode *dir, struct dentry *dchild, ll_umode_t mode,
1331 struct qstr *name = &dchild->d_name;
1335 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s, dir="DFID"(%p) mode %o dev %x\n",
1336 name->len, name->name, PFID(ll_inode2fid(dir)), dir,
1339 if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
1340 mode &= ~current_umask();
1342 switch (mode & S_IFMT) {
1344 mode |= S_IFREG; /* for mode = 0 case, fallthrough */
1350 err = ll_new_node(dir, dchild, NULL, mode, old_encode_dev(rdev),
1361 ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_MKNOD, 1);
1366 #ifdef HAVE_IOP_ATOMIC_OPEN
1368 * Plain create. Intent create is handled in atomic_open.
1370 static int ll_create_nd(struct inode *dir, struct dentry *dentry,
1371 umode_t mode, bool want_excl)
1375 CFS_FAIL_TIMEOUT(OBD_FAIL_LLITE_CREATE_FILE_PAUSE, cfs_fail_val);
1377 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s, dir="DFID"(%p), "
1378 "flags=%u, excl=%d\n", dentry->d_name.len,
1379 dentry->d_name.name, PFID(ll_inode2fid(dir)),
1380 dir, mode, want_excl);
1382 /* Using mknod(2) to create a regular file is designed to not recognize
1383 * volatile file name, so we use ll_mknod() here. */
1384 rc = ll_mknod(dir, dentry, mode, 0);
1386 ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_CREATE, 1);
1388 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s, unhashed %d\n",
1389 dentry->d_name.len, dentry->d_name.name, d_unhashed(dentry));
1393 #else /* !HAVE_IOP_ATOMIC_OPEN */
1394 static int ll_create_nd(struct inode *dir, struct dentry *dentry,
1395 ll_umode_t mode, struct nameidata *nd)
1397 struct ll_dentry_data *lld = ll_d2d(dentry);
1398 struct lookup_intent *it = NULL;
1401 CFS_FAIL_TIMEOUT(OBD_FAIL_LLITE_CREATE_FILE_PAUSE, cfs_fail_val);
1407 /* LU-8559: use LUSTRE_OPC_CREATE for non atomic open case
1408 * so that volatile file name is recoginized.
1409 * Mknod(2), however, is designed to not recognize volatile
1410 * file name to avoid inode leak under orphan directory until
1412 return ll_new_node(dir, dentry, NULL, mode, 0,
1418 /* Was there an error? Propagate it! */
1419 if (it->it_status) {
1424 rc = ll_create_it(dir, dentry, it, NULL, 0);
1425 if (nd && (nd->flags & LOOKUP_OPEN) && dentry->d_inode) { /* Open */
1428 nd->intent.open.file->private_data = it;
1429 filp = lookup_instantiate_filp(nd, dentry, NULL);
1435 ll_intent_release(it);
1436 OBD_FREE(it, sizeof(*it));
1439 ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_CREATE, 1);
1443 #endif /* HAVE_IOP_ATOMIC_OPEN */
1445 static int ll_symlink(struct inode *dir, struct dentry *dchild,
1446 const char *oldpath)
1448 struct qstr *name = &dchild->d_name;
1452 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s, dir="DFID"(%p), target=%.*s\n",
1453 name->len, name->name, PFID(ll_inode2fid(dir)),
1454 dir, 3000, oldpath);
1456 err = ll_new_node(dir, dchild, oldpath, S_IFLNK | S_IRWXUGO, 0,
1457 LUSTRE_OPC_SYMLINK);
1460 ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_SYMLINK, 1);
1465 static int ll_link(struct dentry *old_dentry, struct inode *dir,
1466 struct dentry *new_dentry)
1468 struct inode *src = old_dentry->d_inode;
1469 struct qstr *name = &new_dentry->d_name;
1470 struct ll_sb_info *sbi = ll_i2sbi(dir);
1471 struct ptlrpc_request *request = NULL;
1472 struct md_op_data *op_data;
1476 CDEBUG(D_VFSTRACE, "VFS Op: inode="DFID"(%p), dir="DFID"(%p), "
1477 "target=%.*s\n", PFID(ll_inode2fid(src)), src,
1478 PFID(ll_inode2fid(dir)), dir, name->len, name->name);
1480 op_data = ll_prep_md_op_data(NULL, src, dir, name->name, name->len,
1481 0, LUSTRE_OPC_ANY, NULL);
1482 if (IS_ERR(op_data))
1483 RETURN(PTR_ERR(op_data));
1485 err = md_link(sbi->ll_md_exp, op_data, &request);
1486 ll_finish_md_op_data(op_data);
1490 ll_update_times(request, dir);
1491 ll_stats_ops_tally(sbi, LPROC_LL_LINK, 1);
1494 ptlrpc_req_finished(request);
1498 static int ll_mkdir(struct inode *dir, struct dentry *dchild, ll_umode_t mode)
1500 struct qstr *name = &dchild->d_name;
1504 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s, dir="DFID"(%p)\n",
1505 name->len, name->name, PFID(ll_inode2fid(dir)), dir);
1507 if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
1508 mode &= ~current_umask();
1510 mode = (mode & (S_IRWXUGO|S_ISVTX)) | S_IFDIR;
1512 err = ll_new_node(dir, dchild, NULL, mode, 0, LUSTRE_OPC_MKDIR);
1514 ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_MKDIR, 1);
1519 static int ll_rmdir(struct inode *dir, struct dentry *dchild)
1521 struct qstr *name = &dchild->d_name;
1522 struct ptlrpc_request *request = NULL;
1523 struct md_op_data *op_data;
1527 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s, dir="DFID"(%p)\n",
1528 name->len, name->name, PFID(ll_inode2fid(dir)), dir);
1530 if (unlikely(d_mountpoint(dchild)))
1533 op_data = ll_prep_md_op_data(NULL, dir, NULL, name->name, name->len,
1534 S_IFDIR, LUSTRE_OPC_ANY, NULL);
1535 if (IS_ERR(op_data))
1536 RETURN(PTR_ERR(op_data));
1538 if (dchild->d_inode != NULL)
1539 op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
1541 op_data->op_fid2 = op_data->op_fid3;
1542 rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
1543 ll_finish_md_op_data(op_data);
1545 ll_update_times(request, dir);
1546 ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_RMDIR, 1);
1549 ptlrpc_req_finished(request);
1556 int ll_rmdir_entry(struct inode *dir, char *name, int namelen)
1558 struct ptlrpc_request *request = NULL;
1559 struct md_op_data *op_data;
1563 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s, dir="DFID"(%p)\n",
1564 namelen, name, PFID(ll_inode2fid(dir)), dir);
1566 op_data = ll_prep_md_op_data(NULL, dir, NULL, name, strlen(name),
1567 S_IFDIR, LUSTRE_OPC_ANY, NULL);
1568 if (IS_ERR(op_data))
1569 RETURN(PTR_ERR(op_data));
1570 op_data->op_cli_flags |= CLI_RM_ENTRY;
1571 rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
1572 ll_finish_md_op_data(op_data);
1574 ll_update_times(request, dir);
1575 ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_RMDIR, 1);
1578 ptlrpc_req_finished(request);
1582 static int ll_unlink(struct inode *dir, struct dentry *dchild)
1584 struct qstr *name = &dchild->d_name;
1585 struct ptlrpc_request *request = NULL;
1586 struct md_op_data *op_data;
1587 struct mdt_body *body;
1590 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s, dir="DFID"(%p)\n",
1591 name->len, name->name, PFID(ll_inode2fid(dir)), dir);
1594 * XXX: unlink bind mountpoint maybe call to here,
1595 * just check it as vfs_unlink does.
1597 if (unlikely(d_mountpoint(dchild)))
1600 op_data = ll_prep_md_op_data(NULL, dir, NULL, name->name, name->len, 0,
1601 LUSTRE_OPC_ANY, NULL);
1602 if (IS_ERR(op_data))
1603 RETURN(PTR_ERR(op_data));
1605 op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
1607 op_data->op_fid2 = op_data->op_fid3;
1608 rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
1609 ll_finish_md_op_data(op_data);
1614 * The server puts attributes in on the last unlink, use them to update
1615 * the link count so the inode can be freed immediately.
1617 body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
1618 if (body->mbo_valid & OBD_MD_FLNLINK)
1619 set_nlink(dchild->d_inode, body->mbo_nlink);
1621 ll_update_times(request, dir);
1622 ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_UNLINK, 1);
1625 ptlrpc_req_finished(request);
1629 static int ll_rename(struct inode *src, struct dentry *src_dchild,
1630 struct inode *tgt, struct dentry *tgt_dchild
1631 #ifdef HAVE_IOPS_RENAME_WITH_FLAGS
1632 , unsigned int flags
1636 struct qstr *src_name = &src_dchild->d_name;
1637 struct qstr *tgt_name = &tgt_dchild->d_name;
1638 struct ptlrpc_request *request = NULL;
1639 struct ll_sb_info *sbi = ll_i2sbi(src);
1640 struct md_op_data *op_data;
1644 #ifdef HAVE_IOPS_RENAME_WITH_FLAGS
1649 CDEBUG(D_VFSTRACE, "VFS Op:oldname=%.*s, src_dir="DFID
1650 "(%p), newname=%.*s, tgt_dir="DFID"(%p)\n",
1651 src_name->len, src_name->name,
1652 PFID(ll_inode2fid(src)), src, tgt_name->len,
1653 tgt_name->name, PFID(ll_inode2fid(tgt)), tgt);
1655 if (unlikely(d_mountpoint(src_dchild) || d_mountpoint(tgt_dchild)))
1658 op_data = ll_prep_md_op_data(NULL, src, tgt, NULL, 0, 0,
1659 LUSTRE_OPC_ANY, NULL);
1660 if (IS_ERR(op_data))
1661 RETURN(PTR_ERR(op_data));
1663 if (src_dchild->d_inode != NULL)
1664 op_data->op_fid3 = *ll_inode2fid(src_dchild->d_inode);
1666 if (tgt_dchild->d_inode != NULL)
1667 op_data->op_fid4 = *ll_inode2fid(tgt_dchild->d_inode);
1669 err = md_rename(sbi->ll_md_exp, op_data,
1670 src_name->name, src_name->len,
1671 tgt_name->name, tgt_name->len, &request);
1672 ll_finish_md_op_data(op_data);
1674 ll_update_times(request, src);
1675 ll_update_times(request, tgt);
1676 ll_stats_ops_tally(sbi, LPROC_LL_RENAME, 1);
1679 ptlrpc_req_finished(request);
1682 d_move(src_dchild, tgt_dchild);
1687 const struct inode_operations ll_dir_inode_operations = {
1689 #ifdef HAVE_IOP_ATOMIC_OPEN
1690 .atomic_open = ll_atomic_open,
1692 .lookup = ll_lookup_nd,
1693 .create = ll_create_nd,
1694 /* We need all these non-raw things for NFSD, to not patch it. */
1695 .unlink = ll_unlink,
1698 .symlink = ll_symlink,
1700 .rename = ll_rename,
1701 .setattr = ll_setattr,
1702 .getattr = ll_getattr,
1703 .permission = ll_inode_permission,
1704 #ifdef HAVE_IOP_XATTR
1705 .setxattr = ll_setxattr,
1706 .getxattr = ll_getxattr,
1707 .removexattr = ll_removexattr,
1709 .listxattr = ll_listxattr,
1710 #ifdef HAVE_IOP_GET_ACL
1711 .get_acl = ll_get_acl,
1713 #ifdef HAVE_IOP_SET_ACL
1714 .set_acl = ll_set_acl,
1718 const struct inode_operations ll_special_inode_operations = {
1719 .setattr = ll_setattr,
1720 .getattr = ll_getattr,
1721 .permission = ll_inode_permission,
1722 #ifdef HAVE_IOP_XATTR
1723 .setxattr = ll_setxattr,
1724 .getxattr = ll_getxattr,
1725 .removexattr = ll_removexattr,
1727 .listxattr = ll_listxattr,
1728 #ifdef HAVE_IOP_GET_ACL
1729 .get_acl = ll_get_acl,
1731 #ifdef HAVE_IOP_SET_ACL
1732 .set_acl = ll_set_acl,