4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
33 * Directory code for lustre client.
37 #include <linux/pagemap.h>
39 #include <linux/version.h>
40 #include <linux/security.h>
41 #include <linux/user_namespace.h>
42 #include <linux/uidgid.h>
43 #include <linux/uaccess.h>
44 #include <linux/buffer_head.h> // for wait_on_buffer
45 #include <linux/pagevec.h>
47 #define DEBUG_SUBSYSTEM S_LLITE
49 #include <obd_support.h>
50 #include <obd_class.h>
51 #include <uapi/linux/lustre/lustre_ioctl.h>
52 #include <lustre_lib.h>
53 #include <lustre_dlm.h>
54 #include <lustre_compat.h>
55 #include <lustre_fid.h>
56 #include <lustre_kernelcomm.h>
57 #include <lustre_swab.h>
58 #include <lustre_quota.h>
59 #include <libcfs/libcfs_crypto.h>
61 #include "llite_internal.h"
64 * (new) readdir implementation overview.
66 * Original lustre readdir implementation cached exact copy of raw directory
67 * pages on the client. These pages were indexed in client page cache by
68 * logical offset in the directory file. This design, while very simple and
69 * intuitive had some inherent problems:
71 * . it implies that byte offset to the directory entry serves as a
72 * telldir(3)/seekdir(3) cookie, but that offset is not stable: in
73 * ext3/htree directory entries may move due to splits, and more
76 * . it is incompatible with the design of split directories for cmd3,
77 * that assumes that names are distributed across nodes based on their
78 * hash, and so readdir should be done in hash order.
80 * New readdir implementation does readdir in hash order, and uses hash of a
81 * file name as a telldir/seekdir cookie. This led to number of complications:
83 * . hash is not unique, so it cannot be used to index cached directory
84 * pages on the client (note, that it requires a whole pageful of hash
85 * collided entries to cause two pages to have identical hashes);
87 * . hash is not unique, so it cannot, strictly speaking, be used as an
88 * entry cookie. ext3/htree has the same problem and lustre implementation
89 * mimics their solution: seekdir(hash) positions directory at the first
90 * entry with the given hash.
96 * Client caches directory pages using hash of the first entry as an index. As
97 * noted above hash is not unique, so this solution doesn't work as is:
98 * special processing is needed for "page hash chains" (i.e., sequences of
99 * pages filled with entries all having the same hash value).
101 * First, such chains have to be detected. To this end, server returns to the
102 * client the hash of the first entry on the page next to one returned. When
103 * client detects that this hash is the same as hash of the first entry on the
104 * returned page, page hash collision has to be handled. Pages in the
105 * hash chain, except first one, are termed "overflow pages".
107 * Proposed (unimplimented) solution to index uniqueness problem is to
108 * not cache overflow pages. Instead, when page hash collision is
109 * detected, all overflow pages from emerging chain should be
110 * immediately requested from the server and placed in a special data
111 * structure. This data structure can be used by ll_readdir() to
112 * process entries from overflow pages. When readdir invocation
113 * finishes, overflow pages are discarded. If page hash collision chain
114 * weren't completely processed, next call to readdir will again detect
115 * page hash collision, again read overflow pages in, process next
116 * portion of entries and again discard the pages. This is not as
117 * wasteful as it looks, because, given reasonable hash, page hash
118 * collisions are extremely rare.
120 * 1. directory positioning
122 * When seekdir(hash) is called.
124 * seekdir() sets the location in the directory stream from which the next
125 * readdir() call will start. mdc_page_locate() is used to find page with
126 * starting hash and will issue RPC to fetch that page. If there is a hash
127 * collision the concerned page is removed.
132 * identification of and access to overflow pages
136 * Page in MDS_READPAGE RPC is packed in LU_PAGE_SIZE, and each page contains
137 * a header lu_dirpage which describes the start/end hash, and whether this
138 * page is empty (contains no dir entry) or hash collide with next page.
139 * After client receives reply, several pages will be integrated into dir page
140 * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the
141 * lu_dirpage for this integrated page will be adjusted. See
142 * mdc_adjust_dirpages().
145 struct page *ll_get_dir_page(struct inode *dir, struct md_op_data *op_data,
146 __u64 offset, int *partial_readdir_rc)
148 struct md_readdir_info mrinfo = {
149 .mr_blocking_ast = ll_md_blocking_ast };
153 rc = md_read_page(ll_i2mdexp(dir), op_data, &mrinfo, offset, &page);
157 if (partial_readdir_rc && mrinfo.mr_partial_readdir_rc)
158 *partial_readdir_rc = mrinfo.mr_partial_readdir_rc;
163 void ll_release_page(struct inode *inode, struct page *page,
168 /* Always remove the page for striped dir, because the page is
169 * built from temporarily in LMV layer */
170 if (inode && ll_dir_striped(inode)) {
177 if (likely(page->mapping != NULL))
178 cfs_delete_from_page_cache(page);
184 #ifdef HAVE_DIR_CONTEXT
185 int ll_dir_read(struct inode *inode, __u64 *ppos, struct md_op_data *op_data,
186 struct dir_context *ctx, int *partial_readdir_rc)
189 int ll_dir_read(struct inode *inode, __u64 *ppos, struct md_op_data *op_data,
190 void *cookie, filldir_t filldir, int *partial_readdir_rc)
193 struct ll_sb_info *sbi = ll_i2sbi(inode);
195 bool is_api32 = ll_need_32bit_api(sbi);
196 bool is_hash64 = test_bit(LL_SBI_64BIT_HASH, sbi->ll_flags);
199 struct llcrypt_str lltr = LLTR_INIT(NULL, 0);
203 if (IS_ENCRYPTED(inode)) {
204 rc = llcrypt_fname_alloc_buffer(inode, NAME_MAX, &lltr);
209 page = ll_get_dir_page(inode, op_data, pos, partial_readdir_rc);
211 while (rc == 0 && !done) {
212 struct lu_dirpage *dp;
213 struct lu_dirent *ent;
222 hash = MDS_DIR_END_OFF;
223 dp = page_address(page);
224 for (ent = lu_dirent_start(dp); ent != NULL && !done;
225 ent = lu_dirent_next(ent)) {
232 hash = le64_to_cpu(ent->lde_hash);
233 if (hash < pos) /* Skip until we find target hash */
236 namelen = le16_to_cpu(ent->lde_namelen);
237 if (namelen == 0) /* Skip dummy record */
240 if (is_api32 && is_hash64)
244 fid_le_to_cpu(&fid, &ent->lde_fid);
245 ino = cl_fid_build_ino(&fid, is_api32);
246 type = S_DT(lu_dirent_type_get(ent));
247 /* For ll_nfs_get_name_filldir(), it will try to access
248 * 'ent' through 'lde_name', so the parameter 'name'
249 * for 'filldir()' must be part of the 'ent'. */
250 #ifdef HAVE_DIR_CONTEXT
252 if (!IS_ENCRYPTED(inode)) {
253 done = !dir_emit(ctx, ent->lde_name, namelen,
256 /* Directory is encrypted */
257 int save_len = lltr.len;
258 struct llcrypt_str de_name =
259 LLTR_INIT(ent->lde_name, namelen);
261 rc = ll_fname_disk_to_usr(inode, 0, 0, &de_name,
269 done = !dir_emit(ctx, de_name.name, de_name.len,
273 /* HAVE_DIR_CONTEXT is defined from kernel 3.11, whereas
274 * IS_ENCRYPTED is brought by kernel 4.14.
275 * So there is no need to handle encryption case here.
277 done = filldir(cookie, ent->lde_name, namelen, lhash,
284 ll_release_page(inode, page, false);
288 next = le64_to_cpu(dp->ldp_hash_end);
290 if (pos == MDS_DIR_END_OFF) {
291 /* End of directory reached. */
293 ll_release_page(inode, page, false);
295 /* Normal case: continue to the next page.*/
296 ll_release_page(inode, page,
297 le32_to_cpu(dp->ldp_flags) &
300 page = ll_get_dir_page(inode, op_data, pos,
304 #ifdef HAVE_DIR_CONTEXT
309 llcrypt_fname_free_buffer(&lltr);
313 #ifdef HAVE_DIR_CONTEXT
314 static int ll_iterate(struct file *filp, struct dir_context *ctx)
316 static int ll_readdir(struct file *filp, void *cookie, filldir_t filldir)
319 struct inode *inode = file_inode(filp);
320 struct ll_file_data *lfd = filp->private_data;
321 struct ll_sb_info *sbi = ll_i2sbi(inode);
322 bool hash64 = test_bit(LL_SBI_64BIT_HASH, sbi->ll_flags);
323 int api32 = ll_need_32bit_api(sbi);
324 struct md_op_data *op_data;
325 struct lu_fid pfid = { 0 };
326 ktime_t kstart = ktime_get();
327 /* result of possible partial readdir */
328 int partial_readdir_rc = 0;
334 LASSERT(lfd != NULL);
338 "VFS Op:inode="DFID"(%p) pos/size%lu/%llu 32bit_api %d\n",
339 PFID(ll_inode2fid(inode)),
340 inode, (unsigned long)pos, i_size_read(inode), api32);
342 if (IS_ENCRYPTED(inode)) {
343 rc = llcrypt_prepare_readdir(inode);
344 if (rc && rc != -ENOKEY)
348 if (pos == MDS_DIR_END_OFF)
352 if (unlikely(ll_dir_striped(inode))) {
353 struct dentry *parent = dget_parent(file_dentry(filp));
354 struct inode *i_dir = d_inode(parent);
356 /* Only needed for striped dir to fill ..see lmv_read_page() */
358 struct obd_export *exp = ll_i2mdexp(i_dir);
359 __u64 ibits = MDS_INODELOCK_LOOKUP;
361 if (ll_have_md_lock(exp, i_dir, &ibits, LCK_MINMODE))
362 pfid = *ll_inode2fid(i_dir);
366 /* If it can not find in cache, do lookup .. on the master
368 if (fid_is_zero(&pfid)) {
369 rc = ll_dir_get_parent_fid(inode, &pfid);
375 op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0,
376 LUSTRE_OPC_ANY, inode);
378 GOTO(out, rc = PTR_ERR(op_data));
380 /* foreign dirs are browsed out of Lustre */
381 if (unlikely(lmv_dir_foreign(op_data->op_lso1))) {
382 ll_finish_md_op_data(op_data);
386 op_data->op_fid3 = pfid;
388 #ifdef HAVE_DIR_CONTEXT
390 rc = ll_dir_read(inode, &pos, op_data, ctx, &partial_readdir_rc);
393 rc = ll_dir_read(inode, &pos, op_data, cookie, filldir,
394 &partial_readdir_rc);
397 if (!lfd->fd_partial_readdir_rc)
398 lfd->fd_partial_readdir_rc = partial_readdir_rc;
400 if (pos == MDS_DIR_END_OFF) {
402 pos = LL_DIR_END_OFF_32BIT;
404 pos = LL_DIR_END_OFF;
409 #ifdef HAVE_DIR_CONTEXT
414 ll_finish_md_op_data(op_data);
418 ll_stats_ops_tally(sbi, LPROC_LL_READDIR,
419 ktime_us_delta(ktime_get(), kstart));
425 * Create striped directory with specified stripe(@lump)
427 * \param[in] dparent the parent of the directory.
428 * \param[in] lump the specified stripes.
429 * \param[in] dirname the name of the directory.
430 * \param[in] mode the specified mode of the directory.
432 * \retval =0 if striped directory is being created successfully.
433 * <0 if the creation is failed.
435 static int ll_dir_setdirstripe(struct dentry *dparent, struct lmv_user_md *lump,
436 size_t len, const char *dirname, umode_t mode,
439 struct inode *parent = dparent->d_inode;
440 struct ptlrpc_request *request = NULL;
441 struct md_op_data *op_data;
442 struct ll_sb_info *sbi = ll_i2sbi(parent);
443 struct inode *inode = NULL;
444 struct dentry dentry = {
448 .len = strlen(dirname),
449 .hash = ll_full_name_hash(dparent, dirname,
452 .d_sb = dparent->d_sb,
454 bool encrypt = false;
459 if (unlikely(!lmv_user_magic_supported(lump->lum_magic)))
462 if (lump->lum_magic != LMV_MAGIC_FOREIGN) {
464 "VFS Op:inode="DFID"(%p) name=%s stripe_offset=%d stripe_count=%u, hash_type=%x\n",
465 PFID(ll_inode2fid(parent)), parent, dirname,
466 (int)lump->lum_stripe_offset, lump->lum_stripe_count,
467 lump->lum_hash_type);
469 struct lmv_foreign_md *lfm = (struct lmv_foreign_md *)lump;
472 "VFS Op:inode="DFID"(%p) name %s foreign, length %u, value '%.*s'\n",
473 PFID(ll_inode2fid(parent)), parent, dirname,
474 lfm->lfm_length, lfm->lfm_length, lfm->lfm_value);
477 if (lump->lum_stripe_count > 1 &&
478 !(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_DIR_STRIPE))
481 if (IS_DEADDIR(parent) &&
482 !CFS_FAIL_CHECK(OBD_FAIL_LLITE_NO_CHECK_DEAD))
485 /* MDS < 2.14 doesn't support 'crush' hash type, and cannot handle
486 * unknown hash if client doesn't set a valid one. switch to fnv_1a_64.
488 if (CFS_FAIL_CHECK(OBD_FAIL_LMV_UNKNOWN_STRIPE)) {
489 lump->lum_hash_type = cfs_fail_val;
490 } else if (!(exp_connect_flags2(sbi->ll_md_exp) & OBD_CONNECT2_CRUSH)) {
491 enum lmv_hash_type type = lump->lum_hash_type &
494 if (type >= LMV_HASH_TYPE_CRUSH ||
495 type == LMV_HASH_TYPE_UNKNOWN)
496 lump->lum_hash_type = (lump->lum_hash_type ^ type) |
497 LMV_HASH_TYPE_FNV_1A_64;
500 hash_flags = lump->lum_hash_type & ~LMV_HASH_TYPE_MASK;
501 if (hash_flags & ~LMV_HASH_FLAG_KNOWN)
504 if (unlikely(!lmv_user_magic_supported(cpu_to_le32(lump->lum_magic))))
505 lustre_swab_lmv_user_md(lump);
507 if (!IS_POSIXACL(parent) || !exp_connect_umask(ll_i2mdexp(parent)))
508 mode &= ~current_umask();
509 mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR;
510 op_data = ll_prep_md_op_data(NULL, parent, NULL, dirname,
511 strlen(dirname), mode, LUSTRE_OPC_MKDIR,
514 RETURN(PTR_ERR(op_data));
516 op_data->op_dir_depth = ll_i2info(parent)->lli_inherit_depth ?:
517 ll_i2info(parent)->lli_dir_depth;
519 if (ll_sbi_has_encrypt(sbi) &&
520 (IS_ENCRYPTED(parent) ||
521 unlikely(ll_sb_has_test_dummy_encryption(parent->i_sb)))) {
522 err = llcrypt_prepare_readdir(parent);
524 GOTO(out_op_data, err);
525 if (!llcrypt_has_encryption_key(parent))
526 GOTO(out_op_data, err = -ENOKEY);
530 if (test_bit(LL_SBI_FILE_SECCTX, sbi->ll_flags)) {
531 /* selinux_dentry_init_security() uses dentry->d_parent and name
532 * to determine the security context for the file. So our fake
533 * dentry should be real enough for this purpose. */
534 err = ll_dentry_init_security(&dentry, mode, &dentry.d_name,
535 &op_data->op_file_secctx_name,
536 &op_data->op_file_secctx_name_size,
537 &op_data->op_file_secctx,
538 &op_data->op_file_secctx_size,
539 &op_data->op_file_secctx_slot);
541 GOTO(out_op_data, err);
545 err = llcrypt_inherit_context(parent, NULL, op_data, false);
547 GOTO(out_op_data, err);
550 op_data->op_cli_flags |= CLI_SET_MEA;
552 op_data->op_bias |= MDS_SETSTRIPE_CREATE;
554 err = md_create(sbi->ll_md_exp, op_data, lump, len, mode,
555 from_kuid(&init_user_ns, current_fsuid()),
556 from_kgid(&init_user_ns, current_fsgid()),
557 current_cap(), 0, &request);
559 GOTO(out_request, err);
561 CFS_FAIL_TIMEOUT(OBD_FAIL_LLITE_SETDIRSTRIPE_PAUSE, cfs_fail_val);
563 err = ll_prep_inode(&inode, &request->rq_pill, parent->i_sb, NULL);
565 GOTO(out_inode, err);
567 dentry.d_inode = inode;
569 if (test_bit(LL_SBI_FILE_SECCTX, sbi->ll_flags))
570 err = ll_inode_notifysecctx(inode, op_data->op_file_secctx,
571 op_data->op_file_secctx_size);
573 err = ll_inode_init_security(&dentry, inode, parent);
576 GOTO(out_inode, err);
579 err = ll_set_encflags(inode, op_data->op_file_encctx,
580 op_data->op_file_encctx_size, false);
582 GOTO(out_inode, err);
588 ptlrpc_req_finished(request);
590 ll_finish_md_op_data(op_data);
595 int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
598 struct ll_sb_info *sbi = ll_i2sbi(inode);
599 struct md_op_data *op_data;
600 struct ptlrpc_request *req = NULL;
606 switch (lump->lmm_magic) {
607 case LOV_USER_MAGIC_V1:
608 lum_size = sizeof(struct lov_user_md_v1);
610 case LOV_USER_MAGIC_V3:
611 lum_size = sizeof(struct lov_user_md_v3);
613 case LOV_USER_MAGIC_COMP_V1:
614 lum_size = ((struct lov_comp_md_v1 *)lump)->lcm_size;
616 case LMV_USER_MAGIC: {
617 struct lmv_user_md *lmv = (struct lmv_user_md *)lump;
619 /* MDS < 2.14 doesn't support 'crush' hash type, and
620 * cannot handle unknown hash if client doesn't set a
621 * valid one. switch to fnv_1a_64.
623 if (!(exp_connect_flags2(sbi->ll_md_exp) &
624 OBD_CONNECT2_CRUSH)) {
625 enum lmv_hash_type type = lmv->lum_hash_type &
628 if (type >= LMV_HASH_TYPE_CRUSH ||
629 type == LMV_HASH_TYPE_UNKNOWN)
631 (lmv->lum_hash_type ^ type) |
632 LMV_HASH_TYPE_FNV_1A_64;
634 if (lmv->lum_magic != cpu_to_le32(LMV_USER_MAGIC))
635 lustre_swab_lmv_user_md(lmv);
636 lum_size = sizeof(*lmv);
639 case LOV_USER_MAGIC_SPECIFIC: {
640 struct lov_user_md_v3 *v3 =
641 (struct lov_user_md_v3 *)lump;
642 if (v3->lmm_stripe_count > LOV_MAX_STRIPE_COUNT)
644 lum_size = lov_user_md_size(v3->lmm_stripe_count,
645 LOV_USER_MAGIC_SPECIFIC);
650 "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
651 lump->lmm_magic, LOV_USER_MAGIC_V1,
656 /* This is coming from userspace, so should be in
657 * local endian. But the MDS would like it in little
658 * endian, so we swab it before we send it.
660 if ((__swab32(lump->lmm_magic) & le32_to_cpu(LOV_MAGIC_MASK)) ==
661 le32_to_cpu(LOV_MAGIC_MAGIC))
662 lustre_swab_lov_user_md(lump, 0);
664 lum_size = sizeof(struct lov_user_md_v1);
667 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
668 LUSTRE_OPC_ANY, NULL);
670 RETURN(PTR_ERR(op_data));
672 /* swabbing is done in lov_setstripe() on server side */
673 rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size, &req);
674 ll_finish_md_op_data(op_data);
675 ptlrpc_req_finished(req);
682 /* get default LMV from client cache */
683 static int ll_dir_get_default_lmv(struct inode *inode, struct lmv_user_md *lum)
685 struct ll_inode_info *lli = ll_i2info(inode);
686 const struct lmv_stripe_md *lsm;
687 bool fs_dmv_got = false;
692 if (lli->lli_def_lsm_obj) {
693 down_read(&lli->lli_lsm_sem);
694 lsm = &lli->lli_def_lsm_obj->lso_lsm;
696 lum->lum_magic = lsm->lsm_md_magic;
697 lum->lum_stripe_count = lsm->lsm_md_stripe_count;
698 lum->lum_stripe_offset = lsm->lsm_md_master_mdt_index;
699 lum->lum_hash_type = lsm->lsm_md_hash_type;
700 lum->lum_max_inherit = lsm->lsm_md_max_inherit;
701 lum->lum_max_inherit_rr = lsm->lsm_md_max_inherit_rr;
704 up_read(&lli->lli_lsm_sem);
707 if (rc == -ENODATA && !is_root_inode(inode) && !fs_dmv_got) {
708 lli = ll_i2info(inode->i_sb->s_root->d_inode);
713 if (!rc && fs_dmv_got) {
714 lli = ll_i2info(inode);
715 if (lum->lum_max_inherit != LMV_INHERIT_UNLIMITED) {
716 if (lum->lum_max_inherit == LMV_INHERIT_NONE ||
717 lum->lum_max_inherit < LMV_INHERIT_END ||
718 lum->lum_max_inherit > LMV_INHERIT_MAX ||
719 lum->lum_max_inherit <= lli->lli_dir_depth)
720 GOTO(out, rc = -ENODATA);
722 lum->lum_max_inherit -= lli->lli_dir_depth;
725 if (lum->lum_max_inherit_rr != LMV_INHERIT_RR_UNLIMITED) {
726 if (lum->lum_max_inherit_rr == LMV_INHERIT_NONE ||
727 lum->lum_max_inherit_rr < LMV_INHERIT_RR_END ||
728 lum->lum_max_inherit_rr > LMV_INHERIT_RR_MAX ||
729 lum->lum_max_inherit_rr <= lli->lli_dir_depth)
730 lum->lum_max_inherit_rr = LMV_INHERIT_RR_NONE;
732 if (lum->lum_max_inherit_rr > lli->lli_dir_depth)
733 lum->lum_max_inherit_rr -= lli->lli_dir_depth;
740 int ll_dir_get_default_layout(struct inode *inode, void **plmm, int *plmm_size,
741 struct ptlrpc_request **request, u64 valid,
742 enum get_default_layout_type type)
744 struct ll_sb_info *sbi = ll_i2sbi(inode);
745 struct mdt_body *body;
746 struct lov_mds_md *lmm = NULL;
747 struct ptlrpc_request *req = NULL;
748 int lmm_size = OBD_MAX_DEFAULT_EA_SIZE;
749 struct md_op_data *op_data;
755 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, lmm_size,
756 LUSTRE_OPC_ANY, NULL);
758 RETURN(PTR_ERR(op_data));
760 op_data->op_valid = valid | OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
762 if (type == GET_DEFAULT_LAYOUT_ROOT) {
763 lu_root_fid(&op_data->op_fid1);
764 fid = op_data->op_fid1;
766 fid = *ll_inode2fid(inode);
769 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
770 ll_finish_md_op_data(op_data);
772 CDEBUG(D_INFO, "md_getattr failed on inode "DFID": rc %d\n",
777 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
778 LASSERT(body != NULL);
780 lmm_size = body->mbo_eadatasize;
782 if (!(body->mbo_valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
784 GOTO(out, rc = -ENODATA);
787 lmm = req_capsule_server_sized_get(&req->rq_pill,
788 &RMF_MDT_MD, lmm_size);
789 LASSERT(lmm != NULL);
791 /* This is coming from the MDS, so is probably in
792 * little endian. We convert it to host endian before
793 * passing it to userspace.
795 /* We don't swab objects for directories */
796 switch (le32_to_cpu(lmm->lmm_magic)) {
799 case LOV_MAGIC_COMP_V1:
800 case LOV_USER_MAGIC_SPECIFIC:
801 if (LOV_MAGIC != cpu_to_le32(LOV_MAGIC))
802 lustre_swab_lov_user_md((struct lov_user_md *)lmm, 0);
805 if (LMV_MAGIC != cpu_to_le32(LMV_MAGIC))
806 lustre_swab_lmv_mds_md((union lmv_mds_md *)lmm);
809 if (LMV_USER_MAGIC != cpu_to_le32(LMV_USER_MAGIC))
810 lustre_swab_lmv_user_md((struct lmv_user_md *)lmm);
812 case LMV_MAGIC_FOREIGN: {
813 struct lmv_foreign_md *lfm = (struct lmv_foreign_md *)lmm;
815 if (LMV_MAGIC_FOREIGN != cpu_to_le32(LMV_MAGIC_FOREIGN)) {
816 __swab32s(&lfm->lfm_magic);
817 __swab32s(&lfm->lfm_length);
818 __swab32s(&lfm->lfm_type);
819 __swab32s(&lfm->lfm_flags);
825 CERROR("%s: unknown magic: %lX: rc = %d\n", sbi->ll_fsname,
826 (unsigned long)lmm->lmm_magic, rc);
830 *plmm_size = lmm_size;
836 * This function will be used to get default LOV/LMV/Default LMV
837 * @valid will be used to indicate which stripe it will retrieve.
838 * If the directory does not have its own default layout, then the
839 * function will request the default layout from root FID.
840 * OBD_MD_MEA LMV stripe EA
841 * OBD_MD_DEFAULT_MEA Default LMV stripe EA
842 * otherwise Default LOV EA.
843 * Each time, it can only retrieve 1 stripe EA
845 int ll_dir_getstripe_default(struct inode *inode, void **plmm, int *plmm_size,
846 struct ptlrpc_request **request,
847 struct ptlrpc_request **root_request,
850 struct ptlrpc_request *req = NULL;
851 struct ptlrpc_request *root_req = NULL;
852 struct lov_mds_md *lmm = NULL;
857 rc = ll_dir_get_default_layout(inode, (void **)&lmm, &lmm_size,
859 if (rc == -ENODATA && !fid_is_root(ll_inode2fid(inode)) &&
860 !(valid & OBD_MD_MEA) && root_request != NULL) {
861 int rc2 = ll_dir_get_default_layout(inode, (void **)&lmm,
862 &lmm_size, &root_req, valid,
863 GET_DEFAULT_LAYOUT_ROOT);
869 *plmm_size = lmm_size;
871 if (root_request != NULL)
872 *root_request = root_req;
878 * This function will be used to get default LOV/LMV/Default LMV
879 * @valid will be used to indicate which stripe it will retrieve
880 * OBD_MD_MEA LMV stripe EA
881 * OBD_MD_DEFAULT_MEA Default LMV stripe EA
882 * otherwise Default LOV EA.
883 * Each time, it can only retrieve 1 stripe EA
885 int ll_dir_getstripe(struct inode *inode, void **plmm, int *plmm_size,
886 struct ptlrpc_request **request, u64 valid)
888 struct ptlrpc_request *req = NULL;
889 struct lov_mds_md *lmm = NULL;
894 rc = ll_dir_get_default_layout(inode, (void **)&lmm, &lmm_size,
898 *plmm_size = lmm_size;
904 int ll_get_mdt_idx_by_fid(struct ll_sb_info *sbi, const struct lu_fid *fid)
906 struct md_op_data *op_data;
911 OBD_ALLOC_PTR(op_data);
915 op_data->op_flags |= MF_GET_MDT_IDX;
916 op_data->op_fid1 = *fid;
917 rc = md_getattr(sbi->ll_md_exp, op_data, NULL);
918 mdt_index = op_data->op_mds;
919 OBD_FREE_PTR(op_data);
927 * Get MDT index for the inode.
929 int ll_get_mdt_idx(struct inode *inode)
931 return ll_get_mdt_idx_by_fid(ll_i2sbi(inode), ll_inode2fid(inode));
935 * Generic handler to do any pre-copy work.
937 * It sends a first hsm_progress (with extent length == 0) to coordinator as a
938 * first information for it that real work has started.
940 * Moreover, for a ARCHIVE request, it will sample the file data version and
941 * store it in \a copy.
943 * \return 0 on success.
945 static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
947 struct ll_sb_info *sbi = ll_s2sbi(sb);
948 struct hsm_progress_kernel hpk;
953 /* Forge a hsm_progress based on data from copy. */
954 hpk.hpk_fid = copy->hc_hai.hai_fid;
955 hpk.hpk_cookie = copy->hc_hai.hai_cookie;
956 hpk.hpk_extent.offset = copy->hc_hai.hai_extent.offset;
957 hpk.hpk_extent.length = 0;
960 hpk.hpk_data_version = 0;
963 /* For archive request, we need to read the current file version. */
964 if (copy->hc_hai.hai_action == HSMA_ARCHIVE) {
966 __u64 data_version = 0;
968 /* Get inode for this fid */
969 inode = search_inode_for_lustre(sb, ©->hc_hai.hai_fid);
971 hpk.hpk_flags |= HP_FLAG_RETRY;
972 /* hpk_errval is >= 0 */
973 hpk.hpk_errval = -PTR_ERR(inode);
974 GOTO(progress, rc = PTR_ERR(inode));
977 /* Read current file data version */
978 rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
981 CDEBUG(D_HSM, "Could not read file data version of "
982 DFID" (rc = %d). Archive request ("
983 "%#llx) could not be done.\n",
984 PFID(©->hc_hai.hai_fid), rc,
985 copy->hc_hai.hai_cookie);
986 hpk.hpk_flags |= HP_FLAG_RETRY;
987 /* hpk_errval must be >= 0 */
988 hpk.hpk_errval = -rc;
992 /* Store in the hsm_copy for later copytool use.
993 * Always modified even if no lsm. */
994 copy->hc_data_version = data_version;
998 /* On error, the request should be considered as completed */
999 if (hpk.hpk_errval > 0)
1000 hpk.hpk_flags |= HP_FLAG_COMPLETED;
1002 rc2 = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
1005 /* Return first error */
1006 RETURN(rc != 0 ? rc : rc2);
1010 * Generic handler to do any post-copy work.
1012 * It will send the last hsm_progress update to coordinator to inform it
1013 * that copy is finished and whether it was successful or not.
1016 * - for ARCHIVE request, it will sample the file data version and compare it
1017 * with the version saved in ll_ioc_copy_start(). If they do not match, copy
1018 * will be considered as failed.
1019 * - for RESTORE request, it will sample the file data version and send it to
1020 * coordinator which is useful if the file was imported as 'released'.
1022 * \return 0 on success.
1024 static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
1026 struct ll_sb_info *sbi = ll_s2sbi(sb);
1027 struct hsm_progress_kernel hpk;
1032 /* If you modify the logic here, also check llapi_hsm_copy_end(). */
1033 /* Take care: copy->hc_hai.hai_action, len, gid and data are not
1034 * initialized if copy_end was called with copy == NULL.
1037 /* Forge a hsm_progress based on data from copy. */
1038 hpk.hpk_fid = copy->hc_hai.hai_fid;
1039 hpk.hpk_cookie = copy->hc_hai.hai_cookie;
1040 hpk.hpk_extent = copy->hc_hai.hai_extent;
1041 hpk.hpk_flags = copy->hc_flags | HP_FLAG_COMPLETED;
1042 hpk.hpk_errval = copy->hc_errval;
1043 hpk.hpk_data_version = 0;
1045 /* For archive request, we need to check the file data was not changed.
1047 * For restore request, we need to send the file data version, this is
1048 * useful when the file was created using hsm_import.
1050 if (((copy->hc_hai.hai_action == HSMA_ARCHIVE) ||
1051 (copy->hc_hai.hai_action == HSMA_RESTORE)) &&
1052 (copy->hc_errval == 0)) {
1053 struct inode *inode;
1054 __u64 data_version = 0;
1056 /* Get lsm for this fid */
1057 inode = search_inode_for_lustre(sb, ©->hc_hai.hai_fid);
1058 if (IS_ERR(inode)) {
1059 hpk.hpk_flags |= HP_FLAG_RETRY;
1060 /* hpk_errval must be >= 0 */
1061 hpk.hpk_errval = -PTR_ERR(inode);
1062 GOTO(progress, rc = PTR_ERR(inode));
1065 rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
1069 "Could not read file data version. Request could not be confirmed.\n");
1070 if (hpk.hpk_errval == 0)
1071 hpk.hpk_errval = -rc;
1075 /* Store in the hsm_copy for later copytool use.
1076 * Always modified even if no lsm. */
1077 hpk.hpk_data_version = data_version;
1079 /* File could have been stripped during archiving, so we need
1080 * to check anyway. */
1081 if ((copy->hc_hai.hai_action == HSMA_ARCHIVE) &&
1082 (copy->hc_data_version != data_version)) {
1083 CDEBUG(D_HSM, "File data version mismatched. "
1084 "File content was changed during archiving. "
1085 DFID", start:%#llx current:%#llx\n",
1086 PFID(©->hc_hai.hai_fid),
1087 copy->hc_data_version, data_version);
1088 /* File was changed, send error to cdt. Do not ask for
1089 * retry because if a file is modified frequently,
1090 * the cdt will loop on retried archive requests.
1091 * The policy engine will ask for a new archive later
1092 * when the file will not be modified for some tunable
1094 hpk.hpk_flags &= ~HP_FLAG_RETRY;
1096 /* hpk_errval must be >= 0 */
1097 hpk.hpk_errval = -rc;
1104 rc2 = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
1107 /* Return first error */
1108 RETURN(rc != 0 ? rc : rc2);
1112 static int copy_and_ct_start(int cmd, struct obd_export *exp,
1113 const struct lustre_kernelcomm __user *data)
1115 struct lustre_kernelcomm *lk;
1116 struct lustre_kernelcomm *tmp;
1117 size_t size = sizeof(*lk);
1122 /* copy data from userspace to get numbers of archive_id */
1123 OBD_ALLOC(lk, size);
1127 if (copy_from_user(lk, data, size))
1128 GOTO(out_lk, rc = -EFAULT);
1130 if (lk->lk_flags & LK_FLG_STOP)
1133 if (!(lk->lk_flags & LK_FLG_DATANR)) {
1134 __u32 archive_mask = lk->lk_data_count;
1137 /* old hsm agent to old MDS */
1138 if (!exp_connect_archive_id_array(exp))
1141 /* old hsm agent to new MDS */
1142 lk->lk_flags |= LK_FLG_DATANR;
1144 if (archive_mask == 0)
1147 count = hweight32(archive_mask);
1148 new_size = offsetof(struct lustre_kernelcomm, lk_data[count]);
1149 OBD_ALLOC(tmp, new_size);
1151 GOTO(out_lk, rc = -ENOMEM);
1153 memcpy(tmp, lk, size);
1154 tmp->lk_data_count = count;
1160 for (i = 0; i < sizeof(archive_mask) * 8; i++) {
1161 if (BIT(i) & archive_mask) {
1162 lk->lk_data[count] = i + 1;
1169 /* new hsm agent to new mds */
1170 if (lk->lk_data_count > 0) {
1171 new_size = offsetof(struct lustre_kernelcomm,
1172 lk_data[lk->lk_data_count]);
1173 OBD_ALLOC(tmp, new_size);
1175 GOTO(out_lk, rc = -ENOMEM);
1181 if (copy_from_user(lk, data, size))
1182 GOTO(out_lk, rc = -EFAULT);
1185 /* new hsm agent to old MDS */
1186 if (!exp_connect_archive_id_array(exp)) {
1189 if (lk->lk_data_count > LL_HSM_ORIGIN_MAX_ARCHIVE)
1190 GOTO(out_lk, rc = -EINVAL);
1192 for (i = 0; i < lk->lk_data_count; i++) {
1193 if (lk->lk_data[i] > LL_HSM_ORIGIN_MAX_ARCHIVE) {
1195 CERROR("%s: archive id %d requested but only [0 - %zu] supported: rc = %d\n",
1196 exp->exp_obd->obd_name, lk->lk_data[i],
1197 LL_HSM_ORIGIN_MAX_ARCHIVE, rc);
1201 if (lk->lk_data[i] == 0) {
1206 archives |= (1 << (lk->lk_data[i] - 1));
1208 lk->lk_flags &= ~LK_FLG_DATANR;
1209 lk->lk_data_count = archives;
1212 rc = obd_iocontrol(cmd, exp, size, lk, NULL);
1218 static int check_owner(int type, int id)
1222 if (!uid_eq(current_euid(), make_kuid(&init_user_ns, id)))
1226 if (!in_egroup_p(make_kgid(&init_user_ns, id)))
1235 struct kmem_cache *quota_iter_slab;
1236 static DEFINE_MUTEX(quotactl_iter_lock);
1238 struct ll_quotactl_iter_list {
1239 __u64 lqil_mark; /* iter identifier */
1240 __u32 lqil_flags; /* what has been done */
1241 pid_t lqil_pid; /* debug calling task */
1242 time64_t lqil_iter_time; /* the time to iter */
1243 struct list_head lqil_sbi_list; /* list on ll_sb_info */
1244 struct list_head lqil_quotactl_iter_list; /* list of quota iters */
1247 void ll_quota_iter_check_and_cleanup(struct ll_sb_info *sbi, bool check)
1249 struct if_quotactl_iter *iter_rec = NULL;
1250 struct ll_quotactl_iter_list *tmp, *ll_iter = NULL;
1253 mutex_lock("actl_iter_lock);
1255 list_for_each_entry_safe(ll_iter, tmp, &sbi->ll_all_quota_list,
1258 ll_iter->lqil_iter_time > (ktime_get_seconds() - 86400))
1261 while ((iter_rec = list_first_entry_or_null(
1262 &ll_iter->lqil_quotactl_iter_list,
1263 struct if_quotactl_iter,
1264 qci_link)) != NULL) {
1265 list_del_init(&iter_rec->qci_link);
1266 OBD_SLAB_FREE_PTR(iter_rec, quota_iter_slab);
1269 list_del_init(&ll_iter->lqil_sbi_list);
1270 OBD_FREE_PTR(ll_iter);
1274 mutex_unlock("actl_iter_lock);
1277 /* iterate the quota usage from all QSDs */
1278 static int quotactl_iter_acct(struct list_head *quota_list, void *buffer,
1279 __u64 size, __u64 *count, __u32 qtype, bool is_md)
1281 struct if_quotactl_iter *tmp, *iter = NULL;
1282 struct lquota_acct_rec *acct;
1288 while (cur < size) {
1290 (sizeof(qid) + sizeof(*acct))) {
1295 qid = *((__u64 *)(buffer + cur));
1297 acct = (struct lquota_acct_rec *)(buffer + cur);
1298 cur += sizeof(*acct);
1301 list_for_each_entry(tmp, quota_list, qci_link) {
1302 if (tmp->qci_qc.qc_id == (__u32)qid) {
1309 CDEBUG(D_QUOTA, "can't find the iter record for %llu\n",
1315 OBD_SLAB_ALLOC_PTR(iter, quota_iter_slab);
1321 INIT_LIST_HEAD(&iter->qci_link);
1322 iter->qci_qc.qc_id = 0;
1323 iter->qci_qc.qc_type = qtype;
1326 list_add(&iter->qci_link, quota_list);
1330 iter->qci_qc.qc_dqblk.dqb_valid |= QIF_INODES;
1331 iter->qci_qc.qc_dqblk.dqb_curinodes += acct->ispace;
1332 iter->qci_qc.qc_dqblk.dqb_curspace += acct->bspace;
1334 iter->qci_qc.qc_dqblk.dqb_valid |= QIF_SPACE;
1335 iter->qci_qc.qc_dqblk.dqb_curspace += acct->bspace;
1342 /* iterate all quota settings from QMT */
1343 static int quotactl_iter_glb(struct list_head *quota_list, void *buffer,
1344 __u64 size, __u64 *count, __u32 qtype, bool is_md)
1346 struct if_quotactl_iter *tmp, *iter = NULL;
1347 struct lquota_glb_rec *glb;
1349 bool inserted = false;
1354 while (cur < size) {
1356 (sizeof(qid) + sizeof(*glb))) {
1361 qid = *((__u64 *)(buffer + cur));
1363 glb = (struct lquota_glb_rec *)(buffer + cur);
1364 cur += sizeof(*glb);
1367 list_for_each_entry(tmp, quota_list, qci_link) {
1368 if (tmp->qci_qc.qc_id == (__u32)qid) {
1375 OBD_SLAB_ALLOC_PTR(iter, quota_iter_slab);
1381 INIT_LIST_HEAD(&iter->qci_link);
1384 list_for_each_entry(tmp, quota_list, qci_link) {
1385 if (tmp->qci_qc.qc_id < qid)
1389 list_add_tail(&iter->qci_link,
1395 list_add_tail(&iter->qci_link, quota_list);
1397 iter->qci_qc.qc_type = qtype;
1398 iter->qci_qc.qc_id = (__u32)qid;
1403 iter->qci_qc.qc_dqblk.dqb_valid |= QIF_ILIMITS;
1404 iter->qci_qc.qc_dqblk.dqb_ihardlimit =
1406 iter->qci_qc.qc_dqblk.dqb_isoftlimit =
1408 iter->qci_qc.qc_dqblk.dqb_itime = glb->qbr_time;
1410 iter->qci_qc.qc_dqblk.dqb_valid |= QIF_BLIMITS;
1411 iter->qci_qc.qc_dqblk.dqb_bhardlimit =
1413 iter->qci_qc.qc_dqblk.dqb_bsoftlimit =
1415 iter->qci_qc.qc_dqblk.dqb_btime = glb->qbr_time;
1422 /* iterate the quota setting from QMT and all QSDs to get the quota information
1423 * for all users or groups
1425 static int quotactl_iter(struct ll_sb_info *sbi, struct if_quotactl *qctl)
1427 struct list_head iter_quota_glb_list;
1428 struct list_head iter_obd_quota_md_list;
1429 struct list_head iter_obd_quota_dt_list;
1430 struct ll_quotactl_iter_list *ll_iter;
1431 struct lquota_iter *iter;
1432 struct obd_quotactl *oqctl;
1438 OBD_ALLOC_PTR(ll_iter);
1439 if (ll_iter == NULL)
1442 INIT_LIST_HEAD(&ll_iter->lqil_sbi_list);
1443 INIT_LIST_HEAD(&ll_iter->lqil_quotactl_iter_list);
1445 mutex_lock("actl_iter_lock);
1447 if (!list_empty(&sbi->ll_all_quota_list))
1448 ll_quota_iter_check_and_cleanup(sbi, true);
1450 INIT_LIST_HEAD(&iter_quota_glb_list);
1451 INIT_LIST_HEAD(&iter_obd_quota_md_list);
1452 INIT_LIST_HEAD(&iter_obd_quota_dt_list);
1454 OBD_ALLOC_PTR(oqctl);
1456 GOTO(out, rc = -ENOMEM);
1458 QCTL_COPY(oqctl, qctl);
1459 oqctl->qc_iter_list = (__u64)&iter_quota_glb_list;
1460 rc = obd_quotactl(sbi->ll_md_exp, oqctl);
1464 QCTL_COPY(oqctl, qctl);
1465 oqctl->qc_cmd = LUSTRE_Q_ITEROQUOTA;
1466 oqctl->qc_iter_list = (__u64)&iter_obd_quota_md_list;
1467 rc = obd_quotactl(sbi->ll_md_exp, oqctl);
1471 QCTL_COPY(oqctl, qctl);
1472 oqctl->qc_cmd = LUSTRE_Q_ITEROQUOTA;
1473 oqctl->qc_iter_list = (__u64)&iter_obd_quota_dt_list;
1474 rc = obd_quotactl(sbi->ll_dt_exp, oqctl);
1479 while ((iter = list_first_entry_or_null(&iter_quota_glb_list,
1480 struct lquota_iter, li_link))) {
1483 buffer = iter->li_buffer;
1484 rc = quotactl_iter_glb(&ll_iter->lqil_quotactl_iter_list,
1485 buffer, iter->li_md_size, &count,
1486 oqctl->qc_type, true);
1490 buffer = iter->li_buffer + LQUOTA_ITER_BUFLEN / 2;
1491 rc = quotactl_iter_glb(&ll_iter->lqil_quotactl_iter_list,
1492 buffer, iter->li_dt_size, &count,
1493 oqctl->qc_type, false);
1498 list_del_init(&iter->li_link);
1499 OBD_FREE_LARGE(iter,
1500 sizeof(struct lquota_iter) + LQUOTA_ITER_BUFLEN);
1503 while ((iter = list_first_entry_or_null(&iter_obd_quota_md_list,
1504 struct lquota_iter, li_link))) {
1505 rc = quotactl_iter_acct(&ll_iter->lqil_quotactl_iter_list,
1506 iter->li_buffer, iter->li_md_size,
1507 &count, oqctl->qc_type, true);
1511 list_del_init(&iter->li_link);
1512 OBD_FREE_LARGE(iter,
1513 sizeof(struct lquota_iter) + LQUOTA_ITER_BUFLEN);
1516 while ((iter = list_first_entry_or_null(&iter_obd_quota_dt_list,
1517 struct lquota_iter, li_link))) {
1518 rc = quotactl_iter_acct(&ll_iter->lqil_quotactl_iter_list,
1519 iter->li_buffer, iter->li_dt_size,
1520 &count, oqctl->qc_type, false);
1524 list_del_init(&iter->li_link);
1525 OBD_FREE_LARGE(iter,
1526 sizeof(struct lquota_iter) + LQUOTA_ITER_BUFLEN);
1529 ll_iter->lqil_mark = ((__u64)current->pid << 32) |
1530 ((__u32)qctl->qc_type << 8) |
1531 (ktime_get_seconds() & 0xFFFFFF);
1532 ll_iter->lqil_flags = qctl->qc_type;
1533 ll_iter->lqil_pid = current->pid;
1534 ll_iter->lqil_iter_time = ktime_get_seconds();
1536 list_add(&ll_iter->lqil_sbi_list, &sbi->ll_all_quota_list);
1538 qctl->qc_allquota_count = count;
1539 qctl->qc_allquota_mark = ll_iter->lqil_mark;
1543 ll_quota_iter_check_and_cleanup(sbi, true);
1545 while ((iter = list_first_entry_or_null(&iter_quota_glb_list,
1546 struct lquota_iter, li_link))) {
1547 list_del_init(&iter->li_link);
1548 OBD_FREE_LARGE(iter,
1549 sizeof(struct lquota_iter) + LQUOTA_ITER_BUFLEN);
1552 while ((iter = list_first_entry_or_null(&iter_obd_quota_md_list,
1553 struct lquota_iter, li_link))) {
1554 list_del_init(&iter->li_link);
1555 OBD_FREE_LARGE(iter,
1556 sizeof(struct lquota_iter) + LQUOTA_ITER_BUFLEN);
1559 while ((iter = list_first_entry_or_null(&iter_obd_quota_dt_list,
1560 struct lquota_iter, li_link))) {
1561 list_del_init(&iter->li_link);
1562 OBD_FREE_LARGE(iter,
1563 sizeof(struct lquota_iter) + LQUOTA_ITER_BUFLEN);
1566 OBD_FREE_PTR(ll_iter);
1569 OBD_FREE_PTR(oqctl);
1571 mutex_unlock("actl_iter_lock);
1575 static int quotactl_getallquota(struct ll_sb_info *sbi,
1576 struct if_quotactl *qctl)
1578 struct ll_quotactl_iter_list *ll_iter = NULL;
1579 struct if_quotactl_iter *iter = NULL;
1580 void __user *buffer = (void __user *)qctl->qc_allquota_buffer;
1581 __u64 cur = 0, count = qctl->qc_allquota_buflen;
1586 mutex_lock("actl_iter_lock);
1588 while ((ll_iter = list_first_entry_or_null(&sbi->ll_all_quota_list,
1589 struct ll_quotactl_iter_list,
1590 lqil_sbi_list)) != NULL) {
1591 if (qctl->qc_allquota_mark == ll_iter->lqil_mark)
1596 mutex_unlock("actl_iter_lock);
1600 while ((iter = list_first_entry_or_null(
1601 &ll_iter->lqil_quotactl_iter_list,
1602 struct if_quotactl_iter, qci_link))) {
1603 if (count - cur < sizeof(struct if_quotactl)) {
1608 if (copy_to_user(buffer + cur, &iter->qci_qc,
1609 sizeof(struct if_quotactl))) {
1614 cur += sizeof(struct if_quotactl);
1616 list_del_init(&iter->qci_link);
1617 OBD_SLAB_FREE_PTR(iter, quota_iter_slab);
1620 /* cleanup in case of error */
1621 while ((iter = list_first_entry_or_null(
1622 &ll_iter->lqil_quotactl_iter_list,
1623 struct if_quotactl_iter, qci_link))) {
1624 list_del_init(&iter->qci_link);
1625 OBD_SLAB_FREE_PTR(iter, quota_iter_slab);
1628 mutex_unlock("actl_iter_lock);
1633 int quotactl_ioctl(struct super_block *sb, struct if_quotactl *qctl)
1635 struct ll_sb_info *sbi = ll_s2sbi(sb);
1636 int cmd = qctl->qc_cmd;
1637 int type = qctl->qc_type;
1638 int id = qctl->qc_id;
1639 int valid = qctl->qc_valid;
1647 case LUSTRE_Q_SETDEFAULT:
1648 case LUSTRE_Q_SETQUOTAPOOL:
1649 case LUSTRE_Q_SETINFOPOOL:
1650 case LUSTRE_Q_SETDEFAULT_POOL:
1651 case LUSTRE_Q_DELETEQID:
1652 case LUSTRE_Q_RESETQID:
1653 if (!capable(CAP_SYS_ADMIN))
1656 if (sb->s_flags & SB_RDONLY)
1660 case LUSTRE_Q_GETDEFAULT:
1661 case LUSTRE_Q_GETQUOTAPOOL:
1662 case LUSTRE_Q_GETDEFAULT_POOL:
1663 case LUSTRE_Q_ITERQUOTA:
1664 case LUSTRE_Q_GETALLQUOTA:
1665 if (check_owner(type, id) &&
1666 (!capable(CAP_SYS_ADMIN)))
1670 case LUSTRE_Q_GETINFOPOOL:
1673 CERROR("%s: unsupported quotactl op: %#x: rc = %d\n",
1674 sbi->ll_fsname, cmd, -EOPNOTSUPP);
1675 RETURN(-EOPNOTSUPP);
1678 if (cmd == LUSTRE_Q_ITERQUOTA) {
1679 rc = quotactl_iter(sbi, qctl);
1680 } else if (cmd == LUSTRE_Q_GETALLQUOTA) {
1681 rc = quotactl_getallquota(sbi, qctl);
1682 } else if (valid != QC_GENERAL) {
1683 if (cmd == Q_GETINFO)
1684 qctl->qc_cmd = Q_GETOINFO;
1685 else if (cmd == Q_GETQUOTA ||
1686 cmd == LUSTRE_Q_GETQUOTAPOOL)
1687 qctl->qc_cmd = Q_GETOQUOTA;
1693 rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
1694 sizeof(*qctl), qctl, NULL);
1697 rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_dt_exp,
1698 sizeof(*qctl), qctl, NULL);
1701 rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
1702 sizeof(*qctl), qctl, NULL);
1704 rc = obd_iocontrol(OBD_IOC_QUOTACTL,
1706 sizeof(*qctl), qctl, NULL);
1717 struct obd_quotactl *oqctl;
1718 int oqctl_len = sizeof(*oqctl);
1720 if (LUSTRE_Q_CMD_IS_POOL(cmd))
1721 oqctl_len += LOV_MAXPOOLNAME + 1;
1723 OBD_ALLOC(oqctl, oqctl_len);
1727 QCTL_COPY(oqctl, qctl);
1728 rc = obd_quotactl(sbi->ll_md_exp, oqctl);
1730 OBD_FREE(oqctl, oqctl_len);
1733 /* If QIF_SPACE is not set, client should collect the
1734 * space usage from OSSs by itself
1736 if ((cmd == Q_GETQUOTA || cmd == LUSTRE_Q_GETQUOTAPOOL) &&
1737 !(oqctl->qc_dqblk.dqb_valid & QIF_SPACE) &&
1738 !oqctl->qc_dqblk.dqb_curspace) {
1739 struct obd_quotactl *oqctl_tmp;
1740 int qctl_len = sizeof(*oqctl_tmp) + LOV_MAXPOOLNAME + 1;
1742 OBD_ALLOC(oqctl_tmp, qctl_len);
1743 if (oqctl_tmp == NULL)
1744 GOTO(out, rc = -ENOMEM);
1746 if (cmd == LUSTRE_Q_GETQUOTAPOOL) {
1747 oqctl_tmp->qc_cmd = LUSTRE_Q_GETQUOTAPOOL;
1748 memcpy(oqctl_tmp->qc_poolname,
1750 LOV_MAXPOOLNAME + 1);
1752 oqctl_tmp->qc_cmd = Q_GETOQUOTA;
1754 oqctl_tmp->qc_id = oqctl->qc_id;
1755 oqctl_tmp->qc_type = oqctl->qc_type;
1757 /* collect space usage from OSTs */
1758 oqctl_tmp->qc_dqblk.dqb_curspace = 0;
1759 rc = obd_quotactl(sbi->ll_dt_exp, oqctl_tmp);
1760 if (!rc || rc == -EREMOTEIO) {
1761 oqctl->qc_dqblk.dqb_curspace =
1762 oqctl_tmp->qc_dqblk.dqb_curspace;
1763 oqctl->qc_dqblk.dqb_valid |= QIF_SPACE;
1766 /* collect space & inode usage from MDTs */
1767 oqctl_tmp->qc_cmd = Q_GETOQUOTA;
1768 oqctl_tmp->qc_dqblk.dqb_curspace = 0;
1769 oqctl_tmp->qc_dqblk.dqb_curinodes = 0;
1770 rc = obd_quotactl(sbi->ll_md_exp, oqctl_tmp);
1771 if (!rc || rc == -EREMOTEIO) {
1772 oqctl->qc_dqblk.dqb_curspace +=
1773 oqctl_tmp->qc_dqblk.dqb_curspace;
1774 oqctl->qc_dqblk.dqb_curinodes =
1775 oqctl_tmp->qc_dqblk.dqb_curinodes;
1776 oqctl->qc_dqblk.dqb_valid |= QIF_INODES;
1778 oqctl->qc_dqblk.dqb_valid &= ~QIF_SPACE;
1781 OBD_FREE(oqctl_tmp, qctl_len);
1784 QCTL_COPY(qctl, oqctl);
1785 OBD_FREE(oqctl, oqctl_len);
1791 static int ll_rmfid(struct file *file, void __user *arg)
1793 const struct fid_array __user *ufa = arg;
1794 struct inode *inode = file_inode(file);
1795 struct ll_sb_info *sbi = ll_i2sbi(inode);
1796 struct fid_array *lfa = NULL, *lfa_new = NULL;
1797 int i, rc, *rcs = NULL;
1799 bool lfa_flag = false; /* lfa already free'ed */
1803 if (!capable(CAP_DAC_READ_SEARCH) &&
1804 !test_bit(LL_SBI_USER_FID2PATH, ll_i2sbi(inode)->ll_flags))
1806 /* Only need to get the buflen */
1807 if (get_user(nr, &ufa->fa_nr))
1809 /* DoS protection */
1810 if (nr > OBD_MAX_FIDS_IN_ARRAY)
1813 size = offsetof(struct fid_array, fa_fids[nr]);
1814 OBD_ALLOC(lfa, size);
1817 OBD_ALLOC_PTR_ARRAY(rcs, nr);
1819 GOTO(free_lfa, rc = -ENOMEM);
1821 if (copy_from_user(lfa, arg, size))
1822 GOTO(free_rcs, rc = -EFAULT);
1824 /* In case of subdirectory mount, we need to make sure all the files
1825 * for which we want to remove FID are visible in the namespace.
1827 if (!fid_is_root(&sbi->ll_root_fid)) {
1828 int path_len = PATH_MAX, linkno;
1829 struct getinfo_fid2path *gf;
1830 int idx, last_idx = nr - 1;
1834 OBD_ALLOC(lfa_new, size);
1836 GOTO(free_rcs, rc = -ENOMEM);
1839 gf = kmalloc(sizeof(*gf) + path_len + 1, GFP_NOFS);
1841 GOTO(free_lfa_new, rc = -ENOMEM);
1843 for (idx = 0; idx < nr; idx++) {
1846 memset(gf, 0, sizeof(*gf) + path_len + 1);
1847 gf->gf_fid = lfa->fa_fids[idx];
1848 gf->gf_pathlen = path_len;
1849 gf->gf_linkno = linkno;
1850 rc = __ll_fid2path(inode, gf,
1851 sizeof(*gf) + gf->gf_pathlen,
1853 if (rc == -ENAMETOOLONG) {
1854 struct getinfo_fid2path *tmpgf;
1856 path_len += PATH_MAX;
1857 tmpgf = krealloc(gf,
1858 sizeof(*gf) + path_len + 1,
1862 GOTO(free_lfa_new, rc = -ENOMEM);
1869 if (gf->gf_linkno == linkno)
1871 linkno = gf->gf_linkno;
1875 /* All the links for this fid are visible in the
1876 * mounted subdir. So add it to the list of fids
1879 lfa_new->fa_fids[lfa_new->fa_nr++] =
1882 /* At least one link for this fid is not visible
1883 * in the mounted subdir. So add it at the end
1884 * of the list that will be hidden to lower
1885 * layers, and set -ENOENT as ret code.
1887 lfa_new->fa_fids[last_idx] = lfa->fa_fids[idx];
1888 rcs[last_idx--] = rc;
1892 OBD_FREE(lfa, size);
1896 if (lfa->fa_nr == 0)
1897 GOTO(free_rcs, rc = rcs[nr - 1]);
1899 /* Call mdc_iocontrol */
1900 rc = md_rmfid(ll_i2mdexp(file_inode(file)), lfa, rcs, NULL);
1903 for (i = 0; i < nr; i++)
1905 lfa->fa_fids[i].f_ver = rcs[i];
1906 if (copy_to_user(arg, lfa, size))
1911 OBD_FREE(lfa_new, size);
1913 OBD_FREE_PTR_ARRAY(rcs, nr);
1916 OBD_FREE(lfa, size);
1921 /* This function tries to get a single name component,
1922 * to send to the server. No actual path traversal involved,
1923 * so we limit to NAME_MAX */
1924 static char *ll_getname(const char __user *filename)
1929 OBD_ALLOC(tmp, NAME_MAX + 1);
1932 return ERR_PTR(-ENOMEM);
1934 len = strncpy_from_user(tmp, filename, NAME_MAX + 1);
1937 else if (len > NAME_MAX)
1938 ret = -ENAMETOOLONG;
1941 OBD_FREE(tmp, NAME_MAX + 1);
1947 static const char *const ladvise_names[] = LU_LADVISE_NAMES;
1949 #define ll_putname(filename) OBD_FREE(filename, NAME_MAX + 1);
1951 static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1953 struct dentry *dentry = file_dentry(file);
1954 struct inode *inode = file_inode(file);
1955 struct ll_sb_info *sbi = ll_i2sbi(inode);
1956 struct obd_ioctl_data *data = NULL;
1957 void __user *uarg = (void __user *)arg;
1961 CDEBUG(D_VFSTRACE|D_IOCTL, "VFS Op:inode="DFID"(%pK) cmd=%x arg=%lx\n",
1962 PFID(ll_inode2fid(inode)), inode, cmd, arg);
1964 /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
1965 if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
1968 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
1970 case IOC_MDC_LOOKUP: {
1971 int namelen, len = 0;
1974 rc = obd_ioctl_getdata(&data, &len, uarg);
1978 filename = data->ioc_inlbuf1;
1979 namelen = strlen(filename);
1981 CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
1982 GOTO(out_free, rc = -EINVAL);
1985 rc = ll_get_fid_by_name(inode, filename, namelen, NULL, NULL);
1987 CERROR("%s: lookup %.*s failed: rc = %d\n",
1988 sbi->ll_fsname, namelen, filename, rc);
1992 OBD_FREE_LARGE(data, len);
1995 case LL_IOC_LMV_SETSTRIPE: {
1996 struct lmv_user_md *lum;
2001 bool createonly = false;
2005 rc = obd_ioctl_getdata(&data, &len, uarg);
2009 if (data->ioc_inlbuf1 == NULL || data->ioc_inlbuf2 == NULL ||
2010 data->ioc_inllen1 == 0 || data->ioc_inllen2 == 0)
2011 GOTO(lmv_out_free, rc = -EINVAL);
2013 filename = data->ioc_inlbuf1;
2014 namelen = data->ioc_inllen1;
2017 CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
2018 GOTO(lmv_out_free, rc = -EINVAL);
2020 lum = (struct lmv_user_md *)data->ioc_inlbuf2;
2021 lumlen = data->ioc_inllen2;
2023 if (!lmv_user_magic_supported(lum->lum_magic)) {
2024 CERROR("%s: wrong lum magic %x : rc = %d\n", filename,
2025 lum->lum_magic, -EINVAL);
2026 GOTO(lmv_out_free, rc = -EINVAL);
2029 if ((lum->lum_magic == LMV_USER_MAGIC ||
2030 lum->lum_magic == LMV_USER_MAGIC_SPECIFIC) &&
2031 lumlen < sizeof(*lum)) {
2032 CERROR("%s: wrong lum size %d for magic %x : rc = %d\n",
2033 filename, lumlen, lum->lum_magic, -EINVAL);
2034 GOTO(lmv_out_free, rc = -EINVAL);
2037 if (lum->lum_magic == LMV_MAGIC_FOREIGN &&
2038 lumlen < sizeof(struct lmv_foreign_md)) {
2039 CERROR("%s: wrong lum magic %x or size %d: rc = %d\n",
2040 filename, lum->lum_magic, lumlen, -EFAULT);
2041 GOTO(lmv_out_free, rc = -EINVAL);
2044 mode = data->ioc_type;
2045 createonly = data->ioc_obdo1.o_flags & OBD_FL_OBDMDEXISTS;
2046 rc = ll_dir_setdirstripe(dentry, lum, lumlen, filename, mode,
2049 OBD_FREE_LARGE(data, len);
2053 case LL_IOC_LMV_SET_DEFAULT_STRIPE: {
2054 struct lmv_user_md lum;
2055 struct lmv_user_md __user *ulump = uarg;
2058 if (copy_from_user(&lum, ulump, sizeof(lum)))
2061 if (lum.lum_magic != LMV_USER_MAGIC)
2064 rc = ll_dir_setstripe(inode, (struct lov_user_md *)&lum, 0);
2068 case LL_IOC_LOV_SETSTRIPE_NEW:
2069 case LL_IOC_LOV_SETSTRIPE: {
2070 struct lov_user_md_v3 *lumv3 = NULL;
2071 struct lov_user_md_v1 lumv1;
2072 struct lov_user_md_v1 *lumv1_ptr = &lumv1;
2073 struct lov_user_md_v1 __user *lumv1p = uarg;
2074 struct lov_user_md_v3 __user *lumv3p = uarg;
2076 int set_default = 0;
2078 BUILD_BUG_ON(sizeof(struct lov_user_md_v3) <=
2079 sizeof(struct lov_comp_md_v1));
2080 BUILD_BUG_ON(sizeof(*lumv3) != sizeof(*lumv3p));
2081 /* first try with v1 which is smaller than v3 */
2082 if (copy_from_user(&lumv1, lumv1p, sizeof(lumv1)))
2085 if (is_root_inode(inode))
2088 switch (lumv1.lmm_magic) {
2089 case LOV_USER_MAGIC_V3:
2090 case LOV_USER_MAGIC_SPECIFIC:
2091 lum_size = ll_lov_user_md_size(&lumv1);
2094 OBD_ALLOC(lumv3, lum_size);
2097 if (copy_from_user(lumv3, lumv3p, lum_size))
2098 GOTO(out, rc = -EFAULT);
2099 lumv1_ptr = (struct lov_user_md_v1 *)lumv3;
2101 case LOV_USER_MAGIC_V1:
2104 GOTO(out, rc = -EOPNOTSUPP);
2107 /* in v1 and v3 cases lumv1 points to data */
2108 rc = ll_dir_setstripe(inode, lumv1_ptr, set_default);
2111 OBD_FREE(lumv3, lum_size);
2114 case LL_IOC_LMV_GETSTRIPE: {
2115 struct lmv_user_md __user *ulmv = uarg;
2116 struct lmv_user_md lum;
2117 struct ptlrpc_request *request = NULL;
2118 union lmv_mds_md *lmm = NULL;
2121 struct lmv_user_md *tmp = NULL;
2125 int max_stripe_count;
2129 if (copy_from_user(&lum, ulmv, sizeof(*ulmv)))
2132 /* get default LMV */
2133 if (lum.lum_magic == LMV_USER_MAGIC &&
2134 lum.lum_type != LMV_TYPE_RAW) {
2135 rc = ll_dir_get_default_lmv(inode, &lum);
2139 if (copy_to_user(ulmv, &lum, sizeof(lum)))
2145 max_stripe_count = lum.lum_stripe_count;
2146 /* lum_magic will indicate which stripe the ioctl will like
2147 * to get, LMV_MAGIC_V1 is for normal LMV stripe, LMV_USER_MAGIC
2148 * is for default LMV stripe */
2149 if (lum.lum_magic == LMV_MAGIC_V1)
2150 valid |= OBD_MD_MEA;
2151 else if (lum.lum_magic == LMV_USER_MAGIC)
2152 valid |= OBD_MD_DEFAULT_MEA;
2156 rc = ll_dir_getstripe_default(inode, (void **)&lmm, &lmmsize,
2157 &request, NULL, valid);
2159 GOTO(finish_req, rc);
2161 /* get default LMV in raw mode */
2162 if (lum.lum_magic == LMV_USER_MAGIC) {
2163 if (copy_to_user(ulmv, lmm, lmmsize))
2164 GOTO(finish_req, rc = -EFAULT);
2165 GOTO(finish_req, rc);
2168 /* if foreign LMV case, fake stripes number */
2169 if (lmm->lmv_magic == LMV_MAGIC_FOREIGN) {
2170 struct lmv_foreign_md *lfm;
2172 lfm = (struct lmv_foreign_md *)lmm;
2173 if (lfm->lfm_length < XATTR_SIZE_MAX -
2174 offsetof(typeof(*lfm), lfm_value)) {
2175 __u32 size = lfm->lfm_length +
2176 offsetof(typeof(*lfm), lfm_value);
2178 stripe_count = lmv_foreign_to_md_stripes(size);
2180 CERROR("%s: invalid %d foreign size returned: rc = %d\n",
2181 sbi->ll_fsname, lfm->lfm_length,
2186 stripe_count = lmv_mds_md_stripe_count_get(lmm);
2188 if (max_stripe_count < stripe_count) {
2189 lum.lum_stripe_count = stripe_count;
2190 if (copy_to_user(ulmv, &lum, sizeof(lum)))
2191 GOTO(finish_req, rc = -EFAULT);
2192 GOTO(finish_req, rc = -E2BIG);
2195 /* enough room on user side and foreign case */
2196 if (lmm->lmv_magic == LMV_MAGIC_FOREIGN) {
2197 struct lmv_foreign_md *lfm;
2200 lfm = (struct lmv_foreign_md *)lmm;
2201 size = lfm->lfm_length +
2202 offsetof(struct lmv_foreign_md, lfm_value);
2203 if (copy_to_user(ulmv, lfm, size))
2204 GOTO(finish_req, rc = -EFAULT);
2205 GOTO(finish_req, rc);
2208 lum_size = lmv_user_md_size(stripe_count,
2209 LMV_USER_MAGIC_SPECIFIC);
2210 OBD_ALLOC(tmp, lum_size);
2212 GOTO(finish_req, rc = -ENOMEM);
2214 mdt_index = ll_get_mdt_idx(inode);
2216 GOTO(out_tmp, rc = -ENOMEM);
2218 tmp->lum_magic = LMV_MAGIC_V1;
2219 tmp->lum_stripe_count = 0;
2220 tmp->lum_stripe_offset = mdt_index;
2221 tmp->lum_hash_type = lmv_mds_md_hash_type_get(lmm);
2222 for (i = 0; i < stripe_count; i++) {
2225 fid_le_to_cpu(&fid, &lmm->lmv_md_v1.lmv_stripe_fids[i]);
2226 if (fid_is_sane(&fid)) {
2227 mdt_index = ll_get_mdt_idx_by_fid(sbi, &fid);
2229 GOTO(out_tmp, rc = mdt_index);
2231 tmp->lum_objects[i].lum_mds = mdt_index;
2232 tmp->lum_objects[i].lum_fid = fid;
2235 tmp->lum_stripe_count++;
2238 if (copy_to_user(ulmv, tmp, lum_size))
2239 GOTO(out_tmp, rc = -EFAULT);
2241 OBD_FREE(tmp, lum_size);
2243 ptlrpc_req_finished(request);
2246 case LL_IOC_REMOVE_ENTRY: {
2247 char *filename = NULL;
2251 /* Here is a little hack to avoid sending REINT_RMENTRY to
2252 * unsupported server, which might crash the server(LU-2730),
2253 * Because both LVB_TYPE and REINT_RMENTRY will be supported
2254 * on 2.4, we use OBD_CONNECT_LVB_TYPE to detect whether the
2255 * server will support REINT_RMENTRY XXX*/
2256 if (!(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_LVB_TYPE))
2257 RETURN(-EOPNOTSUPP);
2259 filename = ll_getname(uarg);
2260 if (IS_ERR(filename))
2261 RETURN(PTR_ERR(filename));
2263 namelen = strlen(filename);
2265 GOTO(out_rmdir, rc = -EINVAL);
2267 rc = ll_rmdir_entry(inode, filename, namelen);
2270 ll_putname(filename);
2274 RETURN(ll_rmfid(file, uarg));
2275 case LL_IOC_LOV_SWAP_LAYOUTS:
2277 case LL_IOC_LOV_GETSTRIPE:
2278 case LL_IOC_LOV_GETSTRIPE_NEW:
2279 case LL_IOC_MDC_GETINFO_V1:
2280 case LL_IOC_MDC_GETINFO_V2:
2281 case IOC_MDC_GETFILEINFO_V1:
2282 case IOC_MDC_GETFILEINFO_V2:
2283 case IOC_MDC_GETFILESTRIPE: {
2284 struct ptlrpc_request *request = NULL;
2285 struct ptlrpc_request *root_request = NULL;
2286 struct lov_user_md __user *lump;
2287 struct lov_mds_md *lmm = NULL;
2288 struct mdt_body *body;
2289 char *filename = NULL;
2290 lstat_t __user *statp = NULL;
2291 lstatx_t __user *stxp = NULL;
2292 __u64 __user *flagsp = NULL;
2293 __u32 __user *lmmsizep = NULL;
2294 struct lu_fid __user *fidp = NULL;
2298 if (cmd == IOC_MDC_GETFILEINFO_V1 ||
2299 cmd == IOC_MDC_GETFILEINFO_V2 ||
2300 cmd == IOC_MDC_GETFILESTRIPE) {
2301 filename = ll_getname(uarg);
2302 if (IS_ERR(filename))
2303 RETURN(PTR_ERR(filename));
2305 rc = ll_lov_getstripe_ea_info(inode, filename, &lmm,
2306 &lmmsize, &request);
2308 rc = ll_dir_getstripe_default(inode, (void **)&lmm,
2314 body = req_capsule_server_get(&request->rq_pill,
2316 LASSERT(body != NULL);
2321 if (rc == -ENODATA && (cmd == IOC_MDC_GETFILEINFO_V1 ||
2322 cmd == LL_IOC_MDC_GETINFO_V1 ||
2323 cmd == IOC_MDC_GETFILEINFO_V2 ||
2324 cmd == LL_IOC_MDC_GETINFO_V2)) {
2332 if (cmd == IOC_MDC_GETFILESTRIPE ||
2333 cmd == LL_IOC_LOV_GETSTRIPE ||
2334 cmd == LL_IOC_LOV_GETSTRIPE_NEW) {
2336 } else if (cmd == IOC_MDC_GETFILEINFO_V1 ||
2337 cmd == LL_IOC_MDC_GETINFO_V1){
2338 struct lov_user_mds_data_v1 __user *lmdp;
2341 statp = &lmdp->lmd_st;
2342 lump = &lmdp->lmd_lmm;
2344 struct lov_user_mds_data __user *lmdp;
2347 fidp = &lmdp->lmd_fid;
2348 stxp = &lmdp->lmd_stx;
2349 flagsp = &lmdp->lmd_flags;
2350 lmmsizep = &lmdp->lmd_lmmsize;
2351 lump = &lmdp->lmd_lmm;
2355 /* If the file has no striping then zero out *lump so
2356 * that the caller isn't confused by garbage. */
2357 if (clear_user(lump, sizeof(*lump)))
2358 GOTO(out_req, rc = -EFAULT);
2359 } else if (copy_to_user(lump, lmm, lmmsize)) {
2360 if (copy_to_user(lump, lmm, sizeof(*lump)))
2361 GOTO(out_req, rc = -EFAULT);
2364 api32 = test_bit(LL_SBI_32BIT_API, sbi->ll_flags);
2366 if (cmd == IOC_MDC_GETFILEINFO_V1 ||
2367 cmd == LL_IOC_MDC_GETINFO_V1) {
2370 st.st_dev = inode->i_sb->s_dev;
2371 st.st_mode = body->mbo_mode;
2372 st.st_nlink = body->mbo_nlink;
2373 st.st_uid = body->mbo_uid;
2374 st.st_gid = body->mbo_gid;
2375 st.st_rdev = body->mbo_rdev;
2376 if (llcrypt_require_key(inode) == -ENOKEY)
2377 st.st_size = round_up(st.st_size,
2378 LUSTRE_ENCRYPTION_UNIT_SIZE);
2380 st.st_size = body->mbo_size;
2381 st.st_blksize = PAGE_SIZE;
2382 st.st_blocks = body->mbo_blocks;
2383 st.st_atime = body->mbo_atime;
2384 st.st_mtime = body->mbo_mtime;
2385 st.st_ctime = body->mbo_ctime;
2386 st.st_ino = cl_fid_build_ino(&body->mbo_fid1,
2389 if (copy_to_user(statp, &st, sizeof(st)))
2390 GOTO(out_req, rc = -EFAULT);
2391 } else if (cmd == IOC_MDC_GETFILEINFO_V2 ||
2392 cmd == LL_IOC_MDC_GETINFO_V2) {
2393 lstatx_t stx = { 0 };
2394 __u64 valid = body->mbo_valid;
2396 stx.stx_blksize = PAGE_SIZE;
2397 stx.stx_nlink = body->mbo_nlink;
2398 stx.stx_uid = body->mbo_uid;
2399 stx.stx_gid = body->mbo_gid;
2400 stx.stx_mode = body->mbo_mode;
2401 stx.stx_ino = cl_fid_build_ino(&body->mbo_fid1,
2403 if (llcrypt_require_key(inode) == -ENOKEY)
2404 stx.stx_size = round_up(stx.stx_size,
2405 LUSTRE_ENCRYPTION_UNIT_SIZE);
2407 stx.stx_size = body->mbo_size;
2408 stx.stx_blocks = body->mbo_blocks;
2409 stx.stx_atime.tv_sec = body->mbo_atime;
2410 stx.stx_ctime.tv_sec = body->mbo_ctime;
2411 stx.stx_mtime.tv_sec = body->mbo_mtime;
2412 stx.stx_btime.tv_sec = body->mbo_btime;
2413 stx.stx_rdev_major = MAJOR(body->mbo_rdev);
2414 stx.stx_rdev_minor = MINOR(body->mbo_rdev);
2415 stx.stx_dev_major = MAJOR(inode->i_sb->s_dev);
2416 stx.stx_dev_minor = MINOR(inode->i_sb->s_dev);
2417 stx.stx_mask |= STATX_BASIC_STATS | STATX_BTIME;
2419 stx.stx_attributes_mask = STATX_ATTR_IMMUTABLE |
2421 #ifdef HAVE_LUSTRE_CRYPTO
2422 stx.stx_attributes_mask |= STATX_ATTR_ENCRYPTED;
2424 if (body->mbo_valid & OBD_MD_FLFLAGS) {
2425 stx.stx_attributes |= body->mbo_flags;
2426 /* if Lustre specific LUSTRE_ENCRYPT_FL flag is
2427 * set, also set ext4 equivalent to please statx
2429 if (body->mbo_flags & LUSTRE_ENCRYPT_FL)
2430 stx.stx_attributes |= STATX_ATTR_ENCRYPTED;
2433 /* For a striped directory, the size and blocks returned
2434 * from MDT is not correct.
2435 * The size and blocks are aggregated by client across
2437 * Thus for a striped directory, do not return the valid
2438 * FLSIZE and FLBLOCKS flags to the caller.
2439 * However, this whould be better decided by the MDS
2440 * instead of the client.
2442 if (cmd == LL_IOC_MDC_GETINFO_V2 &&
2443 ll_dir_striped(inode))
2444 valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
2446 if (flagsp && copy_to_user(flagsp, &valid,
2448 GOTO(out_req, rc = -EFAULT);
2450 if (fidp && copy_to_user(fidp, &body->mbo_fid1,
2452 GOTO(out_req, rc = -EFAULT);
2454 if (!(valid & OBD_MD_FLSIZE))
2455 stx.stx_mask &= ~STATX_SIZE;
2456 if (!(valid & OBD_MD_FLBLOCKS))
2457 stx.stx_mask &= ~STATX_BLOCKS;
2459 if (stxp && copy_to_user(stxp, &stx, sizeof(stx)))
2460 GOTO(out_req, rc = -EFAULT);
2462 if (lmmsizep && copy_to_user(lmmsizep, &lmmsize,
2464 GOTO(out_req, rc = -EFAULT);
2469 ptlrpc_req_finished(request);
2470 ptlrpc_req_finished(root_request);
2472 ll_putname(filename);
2475 case OBD_IOC_QUOTACTL: {
2476 struct if_quotactl *qctl;
2477 int qctl_len = sizeof(*qctl) + LOV_MAXPOOLNAME + 1;
2479 OBD_ALLOC(qctl, qctl_len);
2483 if (copy_from_user(qctl, uarg, sizeof(*qctl)))
2484 GOTO(out_quotactl, rc = -EFAULT);
2486 if (LUSTRE_Q_CMD_IS_POOL(qctl->qc_cmd)) {
2487 char __user *from = uarg +
2488 offsetof(typeof(*qctl), qc_poolname);
2489 if (copy_from_user(qctl->qc_poolname, from,
2490 LOV_MAXPOOLNAME + 1))
2491 GOTO(out_quotactl, rc = -EFAULT);
2494 rc = quotactl_ioctl(inode->i_sb, qctl);
2495 if ((rc == 0 || rc == -ENODATA) &&
2496 copy_to_user(uarg, qctl, sizeof(*qctl)))
2499 OBD_FREE(qctl, qctl_len);
2502 case LL_IOC_GETOBDCOUNT: {
2504 struct obd_export *exp;
2506 if (copy_from_user(&count, uarg, sizeof(count)))
2509 /* get ost count when count is zero, get mdt count otherwise */
2510 exp = count ? sbi->ll_md_exp : sbi->ll_dt_exp;
2511 vallen = sizeof(count);
2512 rc = obd_get_info(NULL, exp, sizeof(KEY_TGT_COUNT),
2513 KEY_TGT_COUNT, &vallen, &count);
2515 CERROR("%s: get target count failed: rc = %d\n",
2516 sbi->ll_fsname, rc);
2520 if (copy_to_user(uarg, &count, sizeof(count)))
2525 case LL_IOC_GET_CONNECT_FLAGS:
2526 RETURN(obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL, uarg));
2527 case LL_IOC_FID2MDTIDX: {
2528 struct obd_export *exp = ll_i2mdexp(inode);
2532 if (copy_from_user(&fid, uarg, sizeof(fid)))
2535 /* Call mdc_iocontrol */
2536 rc = obd_iocontrol(LL_IOC_FID2MDTIDX, exp, sizeof(fid), &fid,
2537 (__u32 __user *)&index);
2543 case LL_IOC_HSM_REQUEST: {
2544 struct hsm_user_request *hur;
2551 /* We don't know the true size yet; copy the fixed-size part */
2552 if (copy_from_user(hur, uarg, sizeof(*hur))) {
2557 /* Compute the whole struct size */
2558 totalsize = hur_len(hur);
2563 /* Final size will be more than double totalsize */
2564 if (totalsize >= MDS_MAXREQSIZE / 3)
2567 OBD_ALLOC_LARGE(hur, totalsize);
2571 /* Copy the whole struct */
2572 if (copy_from_user(hur, uarg, totalsize))
2573 GOTO(out_hur, rc = -EFAULT);
2575 if (hur->hur_request.hr_action == HUA_RELEASE) {
2576 const struct lu_fid *fid;
2580 for (i = 0; i < hur->hur_request.hr_itemcount; i++) {
2581 fid = &hur->hur_user_item[i].hui_fid;
2582 f = search_inode_for_lustre(inode->i_sb, fid);
2588 rc = ll_hsm_release(f);
2594 rc = obd_iocontrol(cmd, ll_i2mdexp(inode), totalsize,
2598 OBD_FREE_LARGE(hur, totalsize);
2602 case LL_IOC_HSM_PROGRESS: {
2603 struct hsm_progress_kernel hpk;
2604 struct hsm_progress hp;
2606 if (copy_from_user(&hp, uarg, sizeof(hp)))
2609 hpk.hpk_fid = hp.hp_fid;
2610 hpk.hpk_cookie = hp.hp_cookie;
2611 hpk.hpk_extent = hp.hp_extent;
2612 hpk.hpk_flags = hp.hp_flags;
2613 hpk.hpk_errval = hp.hp_errval;
2614 hpk.hpk_data_version = 0;
2616 /* File may not exist in Lustre; all progress
2617 * reported to Lustre root */
2618 rc = obd_iocontrol(cmd, sbi->ll_md_exp, sizeof(hpk), &hpk,
2622 case LL_IOC_HSM_CT_START:
2623 if (!capable(CAP_SYS_ADMIN))
2626 rc = copy_and_ct_start(cmd, sbi->ll_md_exp, uarg);
2629 case LL_IOC_HSM_COPY_START: {
2630 struct hsm_copy *copy;
2633 OBD_ALLOC_PTR(copy);
2636 if (copy_from_user(copy, uarg, sizeof(*copy))) {
2641 rc = ll_ioc_copy_start(inode->i_sb, copy);
2642 if (copy_to_user(uarg, copy, sizeof(*copy)))
2648 case LL_IOC_HSM_COPY_END: {
2649 struct hsm_copy *copy;
2652 OBD_ALLOC_PTR(copy);
2655 if (copy_from_user(copy, uarg, sizeof(*copy))) {
2660 rc = ll_ioc_copy_end(inode->i_sb, copy);
2661 if (copy_to_user(uarg, copy, sizeof(*copy)))
2667 case LL_IOC_MIGRATE: {
2668 struct lmv_user_md *lum;
2675 rc = obd_ioctl_getdata(&data, &len, uarg);
2679 if (data->ioc_inlbuf1 == NULL || data->ioc_inlbuf2 == NULL ||
2680 data->ioc_inllen1 == 0 || data->ioc_inllen2 == 0)
2681 GOTO(migrate_free, rc = -EINVAL);
2683 filename = data->ioc_inlbuf1;
2684 namelen = data->ioc_inllen1;
2685 flags = data->ioc_type;
2687 if (namelen < 1 || namelen != strlen(filename) + 1) {
2688 CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
2689 GOTO(migrate_free, rc = -EINVAL);
2692 lum = (struct lmv_user_md *)data->ioc_inlbuf2;
2693 if (lum->lum_magic != LMV_USER_MAGIC &&
2694 lum->lum_magic != LMV_USER_MAGIC_SPECIFIC) {
2696 CERROR("%s: wrong lum magic %x: rc = %d\n",
2697 filename, lum->lum_magic, rc);
2698 GOTO(migrate_free, rc);
2701 rc = ll_migrate(inode, file, lum, filename, flags);
2703 OBD_FREE_LARGE(data, len);
2707 case LL_IOC_LADVISE2: {
2708 struct llapi_lu_ladvise2 *ladvise;
2710 OBD_ALLOC_PTR(ladvise);
2711 if (ladvise == NULL)
2714 if (copy_from_user(ladvise, uarg, sizeof(*ladvise)))
2715 GOTO(out_ladvise, rc = -EFAULT);
2717 switch (ladvise->lla_advice) {
2718 case LU_LADVISE_AHEAD:
2719 if (ladvise->lla_start >= ladvise->lla_end) {
2721 "%s: Invalid range (%llu %llu) for %s\n",
2722 sbi->ll_fsname, ladvise->lla_start,
2724 ladvise_names[ladvise->lla_advice]);
2725 GOTO(out_ladvise, rc = -EINVAL);
2729 * Currently we only support name indexing format
2732 if (ladvise->lla_ahead_mode != LU_AH_NAME_INDEX) {
2734 "%s: Invalid access mode (%d) for %s\n",
2735 sbi->ll_fsname, ladvise->lla_ahead_mode,
2736 ladvise_names[ladvise->lla_advice]);
2737 GOTO(out_ladvise, rc = -EINVAL);
2740 /* Currently we only support stat-ahead operations. */
2741 if (!(ladvise->lla_access_flags & ACCESS_FL_STAT)) {
2743 "%s: Invalid access flags (%x) for %s\n",
2745 ladvise->lla_access_flags,
2746 ladvise_names[ladvise->lla_advice]);
2747 GOTO(out_ladvise, rc = -EINVAL);
2750 rc = ll_ioctl_ahead(file, ladvise);
2756 OBD_FREE_PTR(ladvise);
2759 case LL_IOC_PCC_DETACH_BY_FID: {
2760 struct lu_pcc_detach_fid *detach;
2762 struct inode *inode2;
2765 OBD_ALLOC_PTR(detach);
2769 if (copy_from_user(detach, uarg, sizeof(*detach)))
2770 GOTO(out_detach, rc = -EFAULT);
2772 fid = &detach->pccd_fid;
2773 ino = cl_fid_build_ino(fid, ll_need_32bit_api(sbi));
2774 inode2 = ilookup5(inode->i_sb, ino, ll_test_inode_by_fid, fid);
2776 /* Target inode is not in inode cache, and PCC file
2777 * has aleady released, return immdiately.
2779 GOTO(out_detach, rc = 0);
2781 if (!S_ISREG(inode2->i_mode))
2782 GOTO(out_iput, rc = -EINVAL);
2784 if (!inode_owner_or_capable(&nop_mnt_idmap, inode2))
2785 GOTO(out_iput, rc = -EPERM);
2787 rc = pcc_ioctl_detach(inode2, detach->pccd_opt);
2791 OBD_FREE_PTR(detach);
2795 rc = ll_iocontrol(inode, file, cmd, uarg);
2798 RETURN(obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL, uarg));
2802 static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
2804 struct inode *inode = file->f_mapping->host;
2805 struct ll_file_data *fd = file->private_data;
2806 struct ll_sb_info *sbi = ll_i2sbi(inode);
2807 int api32 = ll_need_32bit_api(sbi);
2808 loff_t ret = -EINVAL;
2811 ll_inode_lock(inode);
2816 offset += file->f_pos;
2822 offset += LL_DIR_END_OFF_32BIT;
2824 offset += LL_DIR_END_OFF;
2831 ((api32 && offset <= LL_DIR_END_OFF_32BIT) ||
2832 (!api32 && offset <= LL_DIR_END_OFF))) {
2833 if (offset != file->f_pos) {
2836 hash64 = test_bit(LL_SBI_64BIT_HASH, sbi->ll_flags);
2837 if ((api32 && offset == LL_DIR_END_OFF_32BIT) ||
2838 (!api32 && offset == LL_DIR_END_OFF))
2839 fd->lfd_pos = MDS_DIR_END_OFF;
2840 else if (api32 && hash64)
2841 fd->lfd_pos = offset << 32;
2843 fd->lfd_pos = offset;
2844 file->f_pos = offset;
2845 file->f_version = 0;
2852 ll_inode_unlock(inode);
2856 static int ll_dir_open(struct inode *inode, struct file *file)
2859 RETURN(ll_file_open(inode, file));
2862 static int ll_dir_release(struct inode *inode, struct file *file)
2865 RETURN(ll_file_release(inode, file));
2868 /* notify error if partially read striped directory */
2869 static int ll_dir_flush(struct file *file, fl_owner_t id)
2871 struct ll_file_data *lfd = file->private_data;
2872 int rc = lfd->fd_partial_readdir_rc;
2874 lfd->fd_partial_readdir_rc = 0;
2879 const struct file_operations ll_dir_operations = {
2880 .llseek = ll_dir_seek,
2881 .open = ll_dir_open,
2882 .release = ll_dir_release,
2883 .read = generic_read_dir,
2884 #ifdef HAVE_DIR_CONTEXT
2885 .iterate_shared = ll_iterate,
2887 .readdir = ll_readdir,
2889 .unlocked_ioctl = ll_dir_ioctl,
2891 .flush = ll_dir_flush,