4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
33 * Directory code for lustre client.
37 #include <linux/pagemap.h>
39 #include <linux/version.h>
40 #include <linux/security.h>
41 #include <linux/user_namespace.h>
42 #include <linux/uidgid.h>
43 #include <linux/uaccess.h>
44 #include <linux/buffer_head.h> // for wait_on_buffer
45 #include <linux/pagevec.h>
47 #define DEBUG_SUBSYSTEM S_LLITE
49 #include <obd_support.h>
50 #include <obd_class.h>
51 #include <uapi/linux/lustre/lustre_ioctl.h>
52 #include <lustre_lib.h>
53 #include <lustre_dlm.h>
54 #include <lustre_compat.h>
55 #include <lustre_fid.h>
56 #include <lustre_kernelcomm.h>
57 #include <lustre_swab.h>
58 #include <lustre_quota.h>
59 #include <libcfs/libcfs_crypto.h>
61 #include "llite_internal.h"
64 * (new) readdir implementation overview.
66 * Original lustre readdir implementation cached exact copy of raw directory
67 * pages on the client. These pages were indexed in client page cache by
68 * logical offset in the directory file. This design, while very simple and
69 * intuitive had some inherent problems:
71 * . it implies that byte offset to the directory entry serves as a
72 * telldir(3)/seekdir(3) cookie, but that offset is not stable: in
73 * ext3/htree directory entries may move due to splits, and more
76 * . it is incompatible with the design of split directories for cmd3,
77 * that assumes that names are distributed across nodes based on their
78 * hash, and so readdir should be done in hash order.
80 * New readdir implementation does readdir in hash order, and uses hash of a
81 * file name as a telldir/seekdir cookie. This led to number of complications:
83 * . hash is not unique, so it cannot be used to index cached directory
84 * pages on the client (note, that it requires a whole pageful of hash
85 * collided entries to cause two pages to have identical hashes);
87 * . hash is not unique, so it cannot, strictly speaking, be used as an
88 * entry cookie. ext3/htree has the same problem and lustre implementation
89 * mimics their solution: seekdir(hash) positions directory at the first
90 * entry with the given hash.
96 * Client caches directory pages using hash of the first entry as an index. As
97 * noted above hash is not unique, so this solution doesn't work as is:
98 * special processing is needed for "page hash chains" (i.e., sequences of
99 * pages filled with entries all having the same hash value).
101 * First, such chains have to be detected. To this end, server returns to the
102 * client the hash of the first entry on the page next to one returned. When
103 * client detects that this hash is the same as hash of the first entry on the
104 * returned page, page hash collision has to be handled. Pages in the
105 * hash chain, except first one, are termed "overflow pages".
107 * Proposed (unimplimented) solution to index uniqueness problem is to
108 * not cache overflow pages. Instead, when page hash collision is
109 * detected, all overflow pages from emerging chain should be
110 * immediately requested from the server and placed in a special data
111 * structure. This data structure can be used by ll_readdir() to
112 * process entries from overflow pages. When readdir invocation
113 * finishes, overflow pages are discarded. If page hash collision chain
114 * weren't completely processed, next call to readdir will again detect
115 * page hash collision, again read overflow pages in, process next
116 * portion of entries and again discard the pages. This is not as
117 * wasteful as it looks, because, given reasonable hash, page hash
118 * collisions are extremely rare.
120 * 1. directory positioning
122 * When seekdir(hash) is called.
124 * seekdir() sets the location in the directory stream from which the next
125 * readdir() call will start. mdc_page_locate() is used to find page with
126 * starting hash and will issue RPC to fetch that page. If there is a hash
127 * collision the concerned page is removed.
132 * identification of and access to overflow pages
136 * Page in MDS_READPAGE RPC is packed in LU_PAGE_SIZE, and each page contains
137 * a header lu_dirpage which describes the start/end hash, and whether this
138 * page is empty (contains no dir entry) or hash collide with next page.
139 * After client receives reply, several pages will be integrated into dir page
140 * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the
141 * lu_dirpage for this integrated page will be adjusted. See
142 * mdc_adjust_dirpages().
145 struct page *ll_get_dir_page(struct inode *dir, struct md_op_data *op_data,
146 __u64 offset, int *partial_readdir_rc)
148 struct md_readdir_info mrinfo = {
149 .mr_blocking_ast = ll_md_blocking_ast };
153 rc = md_read_page(ll_i2mdexp(dir), op_data, &mrinfo, offset, &page);
157 if (partial_readdir_rc && mrinfo.mr_partial_readdir_rc)
158 *partial_readdir_rc = mrinfo.mr_partial_readdir_rc;
163 void ll_release_page(struct inode *inode, struct page *page,
168 /* Always remove the page for striped dir, because the page is
169 * built from temporarily in LMV layer
171 if (inode && ll_dir_striped(inode)) {
178 if (likely(page->mapping != NULL))
179 cfs_delete_from_page_cache(page);
185 #ifdef HAVE_DIR_CONTEXT
186 int ll_dir_read(struct inode *inode, __u64 *ppos, struct md_op_data *op_data,
187 struct dir_context *ctx, int *partial_readdir_rc)
190 int ll_dir_read(struct inode *inode, __u64 *ppos, struct md_op_data *op_data,
191 void *cookie, filldir_t filldir, int *partial_readdir_rc)
194 struct ll_sb_info *sbi = ll_i2sbi(inode);
196 bool is_api32 = ll_need_32bit_api(sbi);
197 bool is_hash64 = test_bit(LL_SBI_64BIT_HASH, sbi->ll_flags);
200 struct llcrypt_str lltr = LLTR_INIT(NULL, 0);
205 if (IS_ENCRYPTED(inode)) {
206 rc = llcrypt_fname_alloc_buffer(inode, NAME_MAX, &lltr);
211 page = ll_get_dir_page(inode, op_data, pos, partial_readdir_rc);
213 while (rc == 0 && !done) {
214 struct lu_dirpage *dp;
215 struct lu_dirent *ent;
224 hash = MDS_DIR_END_OFF;
225 dp = page_address(page);
226 for (ent = lu_dirent_start(dp); ent != NULL && !done;
227 ent = lu_dirent_next(ent)) {
234 hash = le64_to_cpu(ent->lde_hash);
235 if (hash < pos) /* Skip until we find target hash */
238 namelen = le16_to_cpu(ent->lde_namelen);
239 if (namelen == 0) /* Skip dummy record */
242 if (is_api32 && is_hash64)
246 fid_le_to_cpu(&fid, &ent->lde_fid);
247 ino = cl_fid_build_ino(&fid, is_api32);
248 type = S_DT(lu_dirent_type_get(ent));
249 /* For ll_nfs_get_name_filldir(), it will try to access
250 * 'ent' through 'lde_name', so the parameter 'name'
251 * for 'filldir()' must be part of the 'ent'.
253 #ifdef HAVE_DIR_CONTEXT
255 if (!IS_ENCRYPTED(inode)) {
256 done = !dir_emit(ctx, ent->lde_name, namelen,
259 /* Directory is encrypted */
260 int save_len = lltr.len;
261 struct llcrypt_str de_name =
262 LLTR_INIT(ent->lde_name, namelen);
264 rc = ll_fname_disk_to_usr(inode, 0, 0, &de_name,
272 done = !dir_emit(ctx, de_name.name, de_name.len,
276 /* HAVE_DIR_CONTEXT is defined from kernel 3.11, whereas
277 * IS_ENCRYPTED is brought by kernel 4.14.
278 * So there is no need to handle encryption case here.
280 done = filldir(cookie, ent->lde_name, namelen, lhash,
287 ll_release_page(inode, page, false);
291 next = le64_to_cpu(dp->ldp_hash_end);
293 if (pos == MDS_DIR_END_OFF) {
294 /* End of directory reached. */
296 ll_release_page(inode, page, false);
298 /* Normal case: continue to the next page.*/
299 ll_release_page(inode, page,
300 le32_to_cpu(dp->ldp_flags) &
303 page = ll_get_dir_page(inode, op_data, pos,
307 #ifdef HAVE_DIR_CONTEXT
312 llcrypt_fname_free_buffer(&lltr);
316 #ifdef HAVE_DIR_CONTEXT
317 static int ll_iterate(struct file *filp, struct dir_context *ctx)
319 static int ll_readdir(struct file *filp, void *cookie, filldir_t filldir)
322 struct inode *inode = file_inode(filp);
323 struct ll_file_data *lfd = filp->private_data;
324 struct ll_sb_info *sbi = ll_i2sbi(inode);
325 bool hash64 = test_bit(LL_SBI_64BIT_HASH, sbi->ll_flags);
326 int api32 = ll_need_32bit_api(sbi);
327 struct md_op_data *op_data;
328 struct lu_fid pfid = { 0 };
329 ktime_t kstart = ktime_get();
330 /* result of possible partial readdir */
331 int partial_readdir_rc = 0;
337 LASSERT(lfd != NULL);
341 "VFS Op:inode="DFID"(%p) pos/size%lu/%llu 32bit_api %d\n",
342 PFID(ll_inode2fid(inode)),
343 inode, (unsigned long)pos, i_size_read(inode), api32);
345 if (IS_ENCRYPTED(inode)) {
346 rc = llcrypt_prepare_readdir(inode);
347 if (rc && rc != -ENOKEY)
351 if (pos == MDS_DIR_END_OFF)
355 if (unlikely(ll_dir_striped(inode))) {
356 struct dentry *parent = dget_parent(file_dentry(filp));
357 struct inode *i_dir = d_inode(parent);
359 /* Only needed for striped dir to fill ..see lmv_read_page() */
361 struct obd_export *exp = ll_i2mdexp(i_dir);
362 __u64 ibits = MDS_INODELOCK_LOOKUP;
364 if (ll_have_md_lock(exp, i_dir, &ibits, LCK_MINMODE))
365 pfid = *ll_inode2fid(i_dir);
369 /* If it can not find in cache, do lookup on the master obj */
370 if (fid_is_zero(&pfid)) {
371 rc = ll_dir_get_parent_fid(inode, &pfid);
377 op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0,
378 LUSTRE_OPC_ANY, inode);
380 GOTO(out, rc = PTR_ERR(op_data));
382 /* foreign dirs are browsed out of Lustre */
383 if (unlikely(lmv_dir_foreign(op_data->op_lso1))) {
384 ll_finish_md_op_data(op_data);
388 op_data->op_fid3 = pfid;
390 #ifdef HAVE_DIR_CONTEXT
392 rc = ll_dir_read(inode, &pos, op_data, ctx, &partial_readdir_rc);
395 rc = ll_dir_read(inode, &pos, op_data, cookie, filldir,
396 &partial_readdir_rc);
399 if (!lfd->fd_partial_readdir_rc)
400 lfd->fd_partial_readdir_rc = partial_readdir_rc;
402 if (pos == MDS_DIR_END_OFF) {
404 pos = LL_DIR_END_OFF_32BIT;
406 pos = LL_DIR_END_OFF;
411 #ifdef HAVE_DIR_CONTEXT
416 ll_finish_md_op_data(op_data);
420 ll_stats_ops_tally(sbi, LPROC_LL_READDIR,
421 ktime_us_delta(ktime_get(), kstart));
427 * Create striped directory with specified stripe(@lump)
429 * \param[in] dparent the parent of the directory.
430 * \param[in] lump the specified stripes.
431 * \param[in] dirname the name of the directory.
432 * \param[in] mode the specified mode of the directory.
434 * \retval =0 if striped directory is being created successfully.
435 * <0 if the creation is failed.
437 static int ll_dir_setdirstripe(struct dentry *dparent, struct lmv_user_md *lump,
438 size_t len, const char *dirname, umode_t mode,
441 struct inode *parent = dparent->d_inode;
442 struct ptlrpc_request *request = NULL;
443 struct md_op_data *op_data;
444 struct ll_sb_info *sbi = ll_i2sbi(parent);
445 struct inode *inode = NULL;
446 struct dentry dentry = {
450 .len = strlen(dirname),
451 .hash = ll_full_name_hash(dparent, dirname,
454 .d_sb = dparent->d_sb,
456 bool encrypt = false;
461 if (unlikely(!lmv_user_magic_supported(lump->lum_magic)))
464 if (lump->lum_magic != LMV_MAGIC_FOREIGN) {
466 "VFS Op:inode="DFID"(%p) name=%s stripe_offset=%d stripe_count=%u, hash_type=%x\n",
467 PFID(ll_inode2fid(parent)), parent, dirname,
468 (int)lump->lum_stripe_offset, lump->lum_stripe_count,
469 lump->lum_hash_type);
471 struct lmv_foreign_md *lfm = (struct lmv_foreign_md *)lump;
474 "VFS Op:inode="DFID"(%p) name %s foreign, length %u, value '%.*s'\n",
475 PFID(ll_inode2fid(parent)), parent, dirname,
476 lfm->lfm_length, lfm->lfm_length, lfm->lfm_value);
479 if (lump->lum_stripe_count > 1 &&
480 !(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_DIR_STRIPE))
483 if (IS_DEADDIR(parent) &&
484 !CFS_FAIL_CHECK(OBD_FAIL_LLITE_NO_CHECK_DEAD))
487 /* MDS < 2.14 doesn't support 'crush' hash type, and cannot handle
488 * unknown hash if client doesn't set a valid one. switch to fnv_1a_64.
490 if (CFS_FAIL_CHECK(OBD_FAIL_LMV_UNKNOWN_STRIPE)) {
491 lump->lum_hash_type = cfs_fail_val;
492 } else if (!(exp_connect_flags2(sbi->ll_md_exp) & OBD_CONNECT2_CRUSH)) {
493 enum lmv_hash_type type = lump->lum_hash_type &
496 if (type >= LMV_HASH_TYPE_CRUSH ||
497 type == LMV_HASH_TYPE_UNKNOWN)
498 lump->lum_hash_type = (lump->lum_hash_type ^ type) |
499 LMV_HASH_TYPE_FNV_1A_64;
502 hash_flags = lump->lum_hash_type & ~LMV_HASH_TYPE_MASK;
503 if (hash_flags & ~LMV_HASH_FLAG_KNOWN)
506 if (unlikely(!lmv_user_magic_supported(cpu_to_le32(lump->lum_magic))))
507 lustre_swab_lmv_user_md(lump);
509 if (!IS_POSIXACL(parent) || !exp_connect_umask(ll_i2mdexp(parent)))
510 mode &= ~current_umask();
511 mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR;
512 op_data = ll_prep_md_op_data(NULL, parent, NULL, dirname,
513 strlen(dirname), mode, LUSTRE_OPC_MKDIR,
516 RETURN(PTR_ERR(op_data));
518 op_data->op_dir_depth = ll_i2info(parent)->lli_inherit_depth ?:
519 ll_i2info(parent)->lli_dir_depth;
521 if (ll_sbi_has_encrypt(sbi) &&
522 (IS_ENCRYPTED(parent) ||
523 unlikely(ll_sb_has_test_dummy_encryption(parent->i_sb)))) {
524 err = llcrypt_prepare_readdir(parent);
526 GOTO(out_op_data, err);
527 if (!llcrypt_has_encryption_key(parent))
528 GOTO(out_op_data, err = -ENOKEY);
532 if (test_bit(LL_SBI_FILE_SECCTX, sbi->ll_flags)) {
533 /* selinux_dentry_init_security() uses dentry->d_parent and name
534 * to determine the security context for the file. So our fake
535 * dentry should be real enough for this purpose.
537 err = ll_dentry_init_security(&dentry, mode, &dentry.d_name,
538 &op_data->op_file_secctx_name,
539 &op_data->op_file_secctx_name_size,
540 &op_data->op_file_secctx,
541 &op_data->op_file_secctx_size,
542 &op_data->op_file_secctx_slot);
544 GOTO(out_op_data, err);
548 err = llcrypt_inherit_context(parent, NULL, op_data, false);
550 GOTO(out_op_data, err);
553 op_data->op_cli_flags |= CLI_SET_MEA;
555 op_data->op_bias |= MDS_SETSTRIPE_CREATE;
557 err = md_create(sbi->ll_md_exp, op_data, lump, len, mode,
558 from_kuid(&init_user_ns, current_fsuid()),
559 from_kgid(&init_user_ns, current_fsgid()),
560 current_cap(), 0, &request);
562 GOTO(out_request, err);
564 CFS_FAIL_TIMEOUT(OBD_FAIL_LLITE_SETDIRSTRIPE_PAUSE, cfs_fail_val);
566 err = ll_prep_inode(&inode, &request->rq_pill, parent->i_sb, NULL);
568 GOTO(out_inode, err);
570 dentry.d_inode = inode;
572 if (test_bit(LL_SBI_FILE_SECCTX, sbi->ll_flags))
573 err = ll_inode_notifysecctx(inode, op_data->op_file_secctx,
574 op_data->op_file_secctx_size);
576 err = ll_inode_init_security(&dentry, inode, parent);
579 GOTO(out_inode, err);
582 err = ll_set_encflags(inode, op_data->op_file_encctx,
583 op_data->op_file_encctx_size, false);
585 GOTO(out_inode, err);
591 ptlrpc_req_finished(request);
593 ll_finish_md_op_data(op_data);
598 int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
601 struct ll_sb_info *sbi = ll_i2sbi(inode);
602 struct md_op_data *op_data;
603 struct ptlrpc_request *req = NULL;
609 switch (lump->lmm_magic) {
610 case LOV_USER_MAGIC_V1:
611 lum_size = sizeof(struct lov_user_md_v1);
613 case LOV_USER_MAGIC_V3:
614 lum_size = sizeof(struct lov_user_md_v3);
616 case LOV_USER_MAGIC_COMP_V1:
617 lum_size = ((struct lov_comp_md_v1 *)lump)->lcm_size;
619 case LMV_USER_MAGIC: {
620 struct lmv_user_md *lmv = (struct lmv_user_md *)lump;
622 /* MDS < 2.14 doesn't support 'crush' hash type, and
623 * cannot handle unknown hash if client doesn't set a
624 * valid one. switch to fnv_1a_64.
626 if (!(exp_connect_flags2(sbi->ll_md_exp) &
627 OBD_CONNECT2_CRUSH)) {
628 enum lmv_hash_type type = lmv->lum_hash_type &
631 if (type >= LMV_HASH_TYPE_CRUSH ||
632 type == LMV_HASH_TYPE_UNKNOWN)
634 (lmv->lum_hash_type ^ type) |
635 LMV_HASH_TYPE_FNV_1A_64;
637 if (lmv->lum_magic != cpu_to_le32(LMV_USER_MAGIC))
638 lustre_swab_lmv_user_md(lmv);
639 lum_size = sizeof(*lmv);
642 case LOV_USER_MAGIC_SPECIFIC: {
643 struct lov_user_md_v3 *v3 =
644 (struct lov_user_md_v3 *)lump;
645 if (v3->lmm_stripe_count > LOV_MAX_STRIPE_COUNT)
647 lum_size = lov_user_md_size(v3->lmm_stripe_count,
648 LOV_USER_MAGIC_SPECIFIC);
653 "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
654 lump->lmm_magic, LOV_USER_MAGIC_V1,
659 /* This is coming from userspace, so should be in
660 * local endian. But the MDS would like it in little
661 * endian, so we swab it before we send it.
663 if ((__swab32(lump->lmm_magic) & le32_to_cpu(LOV_MAGIC_MASK)) ==
664 le32_to_cpu(LOV_MAGIC_MAGIC))
665 lustre_swab_lov_user_md(lump, 0);
667 lum_size = sizeof(struct lov_user_md_v1);
670 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
671 LUSTRE_OPC_ANY, NULL);
673 RETURN(PTR_ERR(op_data));
675 /* swabbing is done in lov_setstripe() on server side */
676 rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size, &req);
677 ll_finish_md_op_data(op_data);
678 ptlrpc_req_finished(req);
685 /* get default LMV from client cache */
686 static int ll_dir_get_default_lmv(struct inode *inode, struct lmv_user_md *lum)
688 struct ll_inode_info *lli = ll_i2info(inode);
689 const struct lmv_stripe_md *lsm;
690 bool fs_dmv_got = false;
695 if (lli->lli_def_lsm_obj) {
696 down_read(&lli->lli_lsm_sem);
697 lsm = &lli->lli_def_lsm_obj->lso_lsm;
699 lum->lum_magic = lsm->lsm_md_magic;
700 lum->lum_stripe_count = lsm->lsm_md_stripe_count;
701 lum->lum_stripe_offset = lsm->lsm_md_master_mdt_index;
702 lum->lum_hash_type = lsm->lsm_md_hash_type;
703 lum->lum_max_inherit = lsm->lsm_md_max_inherit;
704 lum->lum_max_inherit_rr = lsm->lsm_md_max_inherit_rr;
707 up_read(&lli->lli_lsm_sem);
710 if (rc == -ENODATA && !is_root_inode(inode) && !fs_dmv_got) {
711 lli = ll_i2info(inode->i_sb->s_root->d_inode);
716 if (!rc && fs_dmv_got) {
717 lli = ll_i2info(inode);
718 if (lum->lum_max_inherit != LMV_INHERIT_UNLIMITED) {
719 if (lum->lum_max_inherit == LMV_INHERIT_NONE ||
720 lum->lum_max_inherit < LMV_INHERIT_END ||
721 lum->lum_max_inherit > LMV_INHERIT_MAX ||
722 lum->lum_max_inherit <= lli->lli_dir_depth)
723 GOTO(out, rc = -ENODATA);
725 lum->lum_max_inherit -= lli->lli_dir_depth;
728 if (lum->lum_max_inherit_rr != LMV_INHERIT_RR_UNLIMITED) {
729 if (lum->lum_max_inherit_rr == LMV_INHERIT_NONE ||
730 lum->lum_max_inherit_rr < LMV_INHERIT_RR_END ||
731 lum->lum_max_inherit_rr > LMV_INHERIT_RR_MAX ||
732 lum->lum_max_inherit_rr <= lli->lli_dir_depth)
733 lum->lum_max_inherit_rr = LMV_INHERIT_RR_NONE;
735 if (lum->lum_max_inherit_rr > lli->lli_dir_depth)
736 lum->lum_max_inherit_rr -= lli->lli_dir_depth;
743 int ll_dir_get_default_layout(struct inode *inode, void **plmm, int *plmm_size,
744 struct ptlrpc_request **request, u64 valid,
745 enum get_default_layout_type type)
747 struct ll_sb_info *sbi = ll_i2sbi(inode);
748 struct mdt_body *body;
749 struct lov_mds_md *lmm = NULL;
750 struct ptlrpc_request *req = NULL;
751 int lmm_size = OBD_MAX_DEFAULT_EA_SIZE;
752 struct md_op_data *op_data;
758 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, lmm_size,
759 LUSTRE_OPC_ANY, NULL);
761 RETURN(PTR_ERR(op_data));
763 op_data->op_valid = valid | OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
765 if (type == GET_DEFAULT_LAYOUT_ROOT) {
766 lu_root_fid(&op_data->op_fid1);
767 fid = op_data->op_fid1;
769 fid = *ll_inode2fid(inode);
772 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
773 ll_finish_md_op_data(op_data);
775 CDEBUG(D_INFO, "md_getattr failed on inode "DFID": rc %d\n",
780 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
781 LASSERT(body != NULL);
783 lmm_size = body->mbo_eadatasize;
785 if (!(body->mbo_valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
787 GOTO(out, rc = -ENODATA);
790 lmm = req_capsule_server_sized_get(&req->rq_pill,
791 &RMF_MDT_MD, lmm_size);
792 LASSERT(lmm != NULL);
794 /* This is coming from the MDS, so is probably in
795 * little endian. We convert it to host endian before
796 * passing it to userspace.
798 /* We don't swab objects for directories */
799 switch (le32_to_cpu(lmm->lmm_magic)) {
802 case LOV_MAGIC_COMP_V1:
803 case LOV_USER_MAGIC_SPECIFIC:
804 if (LOV_MAGIC != cpu_to_le32(LOV_MAGIC))
805 lustre_swab_lov_user_md((struct lov_user_md *)lmm, 0);
808 if (LMV_MAGIC != cpu_to_le32(LMV_MAGIC))
809 lustre_swab_lmv_mds_md((union lmv_mds_md *)lmm);
812 if (LMV_USER_MAGIC != cpu_to_le32(LMV_USER_MAGIC))
813 lustre_swab_lmv_user_md((struct lmv_user_md *)lmm);
815 case LMV_MAGIC_FOREIGN: {
816 struct lmv_foreign_md *lfm = (struct lmv_foreign_md *)lmm;
818 if (LMV_MAGIC_FOREIGN != cpu_to_le32(LMV_MAGIC_FOREIGN)) {
819 __swab32s(&lfm->lfm_magic);
820 __swab32s(&lfm->lfm_length);
821 __swab32s(&lfm->lfm_type);
822 __swab32s(&lfm->lfm_flags);
828 CERROR("%s: unknown magic: %lX: rc = %d\n", sbi->ll_fsname,
829 (unsigned long)lmm->lmm_magic, rc);
833 *plmm_size = lmm_size;
839 * This function will be used to get default LOV/LMV/Default LMV
840 * @valid will be used to indicate which stripe it will retrieve.
841 * If the directory does not have its own default layout, then the
842 * function will request the default layout from root FID.
843 * OBD_MD_MEA LMV stripe EA
844 * OBD_MD_DEFAULT_MEA Default LMV stripe EA
845 * otherwise Default LOV EA.
846 * Each time, it can only retrieve 1 stripe EA
848 int ll_dir_getstripe_default(struct inode *inode, void **plmm, int *plmm_size,
849 struct ptlrpc_request **request,
850 struct ptlrpc_request **root_request,
853 struct ptlrpc_request *req = NULL;
854 struct ptlrpc_request *root_req = NULL;
855 struct lov_mds_md *lmm = NULL;
860 rc = ll_dir_get_default_layout(inode, (void **)&lmm, &lmm_size,
862 if (rc == -ENODATA && !fid_is_root(ll_inode2fid(inode)) &&
863 !(valid & OBD_MD_MEA) && root_request != NULL) {
864 int rc2 = ll_dir_get_default_layout(inode, (void **)&lmm,
865 &lmm_size, &root_req, valid,
866 GET_DEFAULT_LAYOUT_ROOT);
872 *plmm_size = lmm_size;
874 if (root_request != NULL)
875 *root_request = root_req;
881 * This function will be used to get default LOV/LMV/Default LMV
882 * @valid will be used to indicate which stripe it will retrieve
883 * OBD_MD_MEA LMV stripe EA
884 * OBD_MD_DEFAULT_MEA Default LMV stripe EA
885 * otherwise Default LOV EA.
886 * Each time, it can only retrieve 1 stripe EA
888 int ll_dir_getstripe(struct inode *inode, void **plmm, int *plmm_size,
889 struct ptlrpc_request **request, u64 valid)
891 struct ptlrpc_request *req = NULL;
892 struct lov_mds_md *lmm = NULL;
897 rc = ll_dir_get_default_layout(inode, (void **)&lmm, &lmm_size,
901 *plmm_size = lmm_size;
907 int ll_get_mdt_idx_by_fid(struct ll_sb_info *sbi, const struct lu_fid *fid)
909 struct md_op_data *op_data;
914 OBD_ALLOC_PTR(op_data);
918 op_data->op_flags |= MF_GET_MDT_IDX;
919 op_data->op_fid1 = *fid;
920 rc = md_getattr(sbi->ll_md_exp, op_data, NULL);
921 mdt_index = op_data->op_mds;
922 OBD_FREE_PTR(op_data);
930 * Get MDT index for the inode.
932 int ll_get_mdt_idx(struct inode *inode)
934 return ll_get_mdt_idx_by_fid(ll_i2sbi(inode), ll_inode2fid(inode));
938 * Generic handler to do any pre-copy work.
940 * It sends a first hsm_progress (with extent length == 0) to coordinator as a
941 * first information for it that real work has started.
943 * Moreover, for a ARCHIVE request, it will sample the file data version and
944 * store it in \a copy.
946 * \return 0 on success.
948 static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
950 struct ll_sb_info *sbi = ll_s2sbi(sb);
951 struct hsm_progress_kernel hpk;
956 /* Forge a hsm_progress based on data from copy. */
957 hpk.hpk_fid = copy->hc_hai.hai_fid;
958 hpk.hpk_cookie = copy->hc_hai.hai_cookie;
959 hpk.hpk_extent.offset = copy->hc_hai.hai_extent.offset;
960 hpk.hpk_extent.length = 0;
963 hpk.hpk_data_version = 0;
966 /* For archive request, we need to read the current file version. */
967 if (copy->hc_hai.hai_action == HSMA_ARCHIVE) {
969 __u64 data_version = 0;
971 /* Get inode for this fid */
972 inode = search_inode_for_lustre(sb, ©->hc_hai.hai_fid);
974 hpk.hpk_flags |= HP_FLAG_RETRY;
975 /* hpk_errval is >= 0 */
976 hpk.hpk_errval = -PTR_ERR(inode);
977 GOTO(progress, rc = PTR_ERR(inode));
980 /* Read current file data version */
981 rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
984 CDEBUG(D_HSM, "Could not read file data version of "
985 DFID" (rc = %d). Archive request ("
986 "%#llx) could not be done.\n",
987 PFID(©->hc_hai.hai_fid), rc,
988 copy->hc_hai.hai_cookie);
989 hpk.hpk_flags |= HP_FLAG_RETRY;
990 /* hpk_errval must be >= 0 */
991 hpk.hpk_errval = -rc;
995 /* Store in the hsm_copy for later copytool use.
996 * Always modified even if no lsm.
998 copy->hc_data_version = data_version;
1002 /* On error, the request should be considered as completed */
1003 if (hpk.hpk_errval > 0)
1004 hpk.hpk_flags |= HP_FLAG_COMPLETED;
1006 rc2 = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
1009 /* Return first error */
1010 RETURN(rc != 0 ? rc : rc2);
1014 * Generic handler to do any post-copy work.
1016 * It will send the last hsm_progress update to coordinator to inform it
1017 * that copy is finished and whether it was successful or not.
1020 * - for ARCHIVE request, it will sample the file data version and compare it
1021 * with the version saved in ll_ioc_copy_start(). If they do not match, copy
1022 * will be considered as failed.
1023 * - for RESTORE request, it will sample the file data version and send it to
1024 * coordinator which is useful if the file was imported as 'released'.
1026 * \return 0 on success.
1028 static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
1030 struct ll_sb_info *sbi = ll_s2sbi(sb);
1031 struct hsm_progress_kernel hpk;
1036 /* If you modify the logic here, also check llapi_hsm_copy_end(). */
1037 /* Take care: copy->hc_hai.hai_action, len, gid and data are not
1038 * initialized if copy_end was called with copy == NULL.
1041 /* Forge a hsm_progress based on data from copy. */
1042 hpk.hpk_fid = copy->hc_hai.hai_fid;
1043 hpk.hpk_cookie = copy->hc_hai.hai_cookie;
1044 hpk.hpk_extent = copy->hc_hai.hai_extent;
1045 hpk.hpk_flags = copy->hc_flags | HP_FLAG_COMPLETED;
1046 hpk.hpk_errval = copy->hc_errval;
1047 hpk.hpk_data_version = 0;
1049 /* For archive request, we need to check the file data was not changed.
1051 * For restore request, we need to send the file data version, this is
1052 * useful when the file was created using hsm_import.
1054 if (((copy->hc_hai.hai_action == HSMA_ARCHIVE) ||
1055 (copy->hc_hai.hai_action == HSMA_RESTORE)) &&
1056 (copy->hc_errval == 0)) {
1057 struct inode *inode;
1058 __u64 data_version = 0;
1060 /* Get lsm for this fid */
1061 inode = search_inode_for_lustre(sb, ©->hc_hai.hai_fid);
1062 if (IS_ERR(inode)) {
1063 hpk.hpk_flags |= HP_FLAG_RETRY;
1064 /* hpk_errval must be >= 0 */
1065 hpk.hpk_errval = -PTR_ERR(inode);
1066 GOTO(progress, rc = PTR_ERR(inode));
1069 rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
1073 "Could not read file data version. Request could not be confirmed.\n");
1074 if (hpk.hpk_errval == 0)
1075 hpk.hpk_errval = -rc;
1079 /* Store in the hsm_copy for later copytool use.
1080 * Always modified even if no lsm.
1082 hpk.hpk_data_version = data_version;
1084 /* File could have been stripped during archiving, so we need
1087 if ((copy->hc_hai.hai_action == HSMA_ARCHIVE) &&
1088 (copy->hc_data_version != data_version)) {
1089 CDEBUG(D_HSM, "File data version mismatched. "
1090 "File content was changed during archiving. "
1091 DFID", start:%#llx current:%#llx\n",
1092 PFID(©->hc_hai.hai_fid),
1093 copy->hc_data_version, data_version);
1094 /* File was changed, send error to cdt. Do not ask for
1095 * retry because if a file is modified frequently,
1096 * the cdt will loop on retried archive requests.
1097 * The policy engine will ask for a new archive later
1098 * when the file will not be modified for some tunable
1101 hpk.hpk_flags &= ~HP_FLAG_RETRY;
1103 /* hpk_errval must be >= 0 */
1104 hpk.hpk_errval = -rc;
1111 rc2 = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
1114 /* Return first error */
1115 RETURN(rc != 0 ? rc : rc2);
1119 static int copy_and_ct_start(int cmd, struct obd_export *exp,
1120 const struct lustre_kernelcomm __user *data)
1122 struct lustre_kernelcomm *lk;
1123 struct lustre_kernelcomm *tmp;
1124 size_t size = sizeof(*lk);
1129 /* copy data from userspace to get numbers of archive_id */
1130 OBD_ALLOC(lk, size);
1134 if (copy_from_user(lk, data, size))
1135 GOTO(out_lk, rc = -EFAULT);
1137 if (lk->lk_flags & LK_FLG_STOP)
1140 if (!(lk->lk_flags & LK_FLG_DATANR)) {
1141 __u32 archive_mask = lk->lk_data_count;
1144 /* old hsm agent to old MDS */
1145 if (!exp_connect_archive_id_array(exp))
1148 /* old hsm agent to new MDS */
1149 lk->lk_flags |= LK_FLG_DATANR;
1151 if (archive_mask == 0)
1154 count = hweight32(archive_mask);
1155 new_size = offsetof(struct lustre_kernelcomm, lk_data[count]);
1156 OBD_ALLOC(tmp, new_size);
1158 GOTO(out_lk, rc = -ENOMEM);
1160 memcpy(tmp, lk, size);
1161 tmp->lk_data_count = count;
1167 for (i = 0; i < sizeof(archive_mask) * 8; i++) {
1168 if (BIT(i) & archive_mask) {
1169 lk->lk_data[count] = i + 1;
1176 /* new hsm agent to new mds */
1177 if (lk->lk_data_count > 0) {
1178 new_size = offsetof(struct lustre_kernelcomm,
1179 lk_data[lk->lk_data_count]);
1180 OBD_ALLOC(tmp, new_size);
1182 GOTO(out_lk, rc = -ENOMEM);
1188 if (copy_from_user(lk, data, size))
1189 GOTO(out_lk, rc = -EFAULT);
1192 /* new hsm agent to old MDS */
1193 if (!exp_connect_archive_id_array(exp)) {
1196 if (lk->lk_data_count > LL_HSM_ORIGIN_MAX_ARCHIVE)
1197 GOTO(out_lk, rc = -EINVAL);
1199 for (i = 0; i < lk->lk_data_count; i++) {
1200 if (lk->lk_data[i] > LL_HSM_ORIGIN_MAX_ARCHIVE) {
1202 CERROR("%s: archive id %d requested but only [0 - %zu] supported: rc = %d\n",
1203 exp->exp_obd->obd_name, lk->lk_data[i],
1204 LL_HSM_ORIGIN_MAX_ARCHIVE, rc);
1208 if (lk->lk_data[i] == 0) {
1213 archives |= (1 << (lk->lk_data[i] - 1));
1215 lk->lk_flags &= ~LK_FLG_DATANR;
1216 lk->lk_data_count = archives;
1219 rc = obd_iocontrol(cmd, exp, size, lk, NULL);
1225 static int check_owner(int type, int id)
1229 if (!uid_eq(current_euid(), make_kuid(&init_user_ns, id)))
1233 if (!in_egroup_p(make_kgid(&init_user_ns, id)))
1242 struct kmem_cache *quota_iter_slab;
1243 static DEFINE_MUTEX(quotactl_iter_lock);
1245 struct ll_quotactl_iter_list {
1246 __u64 lqil_mark; /* iter identifier */
1247 __u32 lqil_flags; /* what has been done */
1248 pid_t lqil_pid; /* debug calling task */
1249 time64_t lqil_iter_time; /* the time to iter */
1250 struct list_head lqil_sbi_list; /* list on ll_sb_info */
1251 struct list_head lqil_quotactl_iter_list; /* list of quota iters */
1254 void ll_quota_iter_check_and_cleanup(struct ll_sb_info *sbi, bool check)
1256 struct if_quotactl_iter *iter_rec = NULL;
1257 struct ll_quotactl_iter_list *tmp, *ll_iter = NULL;
1260 mutex_lock("actl_iter_lock);
1262 list_for_each_entry_safe(ll_iter, tmp, &sbi->ll_all_quota_list,
1265 ll_iter->lqil_iter_time > (ktime_get_seconds() - 86400))
1268 while ((iter_rec = list_first_entry_or_null(
1269 &ll_iter->lqil_quotactl_iter_list,
1270 struct if_quotactl_iter,
1271 qci_link)) != NULL) {
1272 list_del_init(&iter_rec->qci_link);
1273 OBD_SLAB_FREE_PTR(iter_rec, quota_iter_slab);
1276 list_del_init(&ll_iter->lqil_sbi_list);
1277 OBD_FREE_PTR(ll_iter);
1281 mutex_unlock("actl_iter_lock);
1284 /* iterate the quota usage from all QSDs */
1285 static int quotactl_iter_acct(struct list_head *quota_list, void *buffer,
1286 __u64 size, __u64 *count, __u32 qtype, bool is_md)
1288 struct if_quotactl_iter *tmp, *iter = NULL;
1289 struct lquota_acct_rec *acct;
1295 while (cur < size) {
1297 (sizeof(qid) + sizeof(*acct))) {
1302 qid = *((__u64 *)(buffer + cur));
1304 acct = (struct lquota_acct_rec *)(buffer + cur);
1305 cur += sizeof(*acct);
1308 list_for_each_entry(tmp, quota_list, qci_link) {
1309 if (tmp->qci_qc.qc_id == (__u32)qid) {
1316 CDEBUG(D_QUOTA, "can't find the iter record for %llu\n",
1322 OBD_SLAB_ALLOC_PTR(iter, quota_iter_slab);
1328 INIT_LIST_HEAD(&iter->qci_link);
1329 iter->qci_qc.qc_id = 0;
1330 iter->qci_qc.qc_type = qtype;
1333 list_add(&iter->qci_link, quota_list);
1337 iter->qci_qc.qc_dqblk.dqb_valid |= QIF_INODES;
1338 iter->qci_qc.qc_dqblk.dqb_curinodes += acct->ispace;
1339 iter->qci_qc.qc_dqblk.dqb_curspace += acct->bspace;
1341 iter->qci_qc.qc_dqblk.dqb_valid |= QIF_SPACE;
1342 iter->qci_qc.qc_dqblk.dqb_curspace += acct->bspace;
1349 /* iterate all quota settings from QMT */
1350 static int quotactl_iter_glb(struct list_head *quota_list, void *buffer,
1351 __u64 size, __u64 *count, __u32 qtype, bool is_md)
1353 struct if_quotactl_iter *tmp, *iter = NULL;
1354 struct lquota_glb_rec *glb;
1356 bool inserted = false;
1361 while (cur < size) {
1363 (sizeof(qid) + sizeof(*glb))) {
1368 qid = *((__u64 *)(buffer + cur));
1370 glb = (struct lquota_glb_rec *)(buffer + cur);
1371 cur += sizeof(*glb);
1374 list_for_each_entry(tmp, quota_list, qci_link) {
1375 if (tmp->qci_qc.qc_id == (__u32)qid) {
1382 OBD_SLAB_ALLOC_PTR(iter, quota_iter_slab);
1388 INIT_LIST_HEAD(&iter->qci_link);
1391 list_for_each_entry(tmp, quota_list, qci_link) {
1392 if (tmp->qci_qc.qc_id < qid)
1396 list_add_tail(&iter->qci_link,
1402 list_add_tail(&iter->qci_link, quota_list);
1404 iter->qci_qc.qc_type = qtype;
1405 iter->qci_qc.qc_id = (__u32)qid;
1410 iter->qci_qc.qc_dqblk.dqb_valid |= QIF_ILIMITS;
1411 iter->qci_qc.qc_dqblk.dqb_ihardlimit =
1413 iter->qci_qc.qc_dqblk.dqb_isoftlimit =
1415 iter->qci_qc.qc_dqblk.dqb_itime = glb->qbr_time;
1417 iter->qci_qc.qc_dqblk.dqb_valid |= QIF_BLIMITS;
1418 iter->qci_qc.qc_dqblk.dqb_bhardlimit =
1420 iter->qci_qc.qc_dqblk.dqb_bsoftlimit =
1422 iter->qci_qc.qc_dqblk.dqb_btime = glb->qbr_time;
1429 /* iterate the quota setting from QMT and all QSDs to get the quota information
1430 * for all users or groups
1432 static int quotactl_iter(struct ll_sb_info *sbi, struct if_quotactl *qctl)
1434 struct list_head iter_quota_glb_list;
1435 struct list_head iter_obd_quota_md_list;
1436 struct list_head iter_obd_quota_dt_list;
1437 struct ll_quotactl_iter_list *ll_iter;
1438 struct lquota_iter *iter;
1439 struct obd_quotactl *oqctl;
1445 OBD_ALLOC_PTR(ll_iter);
1446 if (ll_iter == NULL)
1449 INIT_LIST_HEAD(&ll_iter->lqil_sbi_list);
1450 INIT_LIST_HEAD(&ll_iter->lqil_quotactl_iter_list);
1452 mutex_lock("actl_iter_lock);
1454 if (!list_empty(&sbi->ll_all_quota_list))
1455 ll_quota_iter_check_and_cleanup(sbi, true);
1457 INIT_LIST_HEAD(&iter_quota_glb_list);
1458 INIT_LIST_HEAD(&iter_obd_quota_md_list);
1459 INIT_LIST_HEAD(&iter_obd_quota_dt_list);
1461 OBD_ALLOC_PTR(oqctl);
1463 GOTO(out, rc = -ENOMEM);
1465 QCTL_COPY(oqctl, qctl);
1466 oqctl->qc_iter_list = (__u64)&iter_quota_glb_list;
1467 rc = obd_quotactl(sbi->ll_md_exp, oqctl);
1471 QCTL_COPY(oqctl, qctl);
1472 oqctl->qc_cmd = LUSTRE_Q_ITEROQUOTA;
1473 oqctl->qc_iter_list = (__u64)&iter_obd_quota_md_list;
1474 rc = obd_quotactl(sbi->ll_md_exp, oqctl);
1478 QCTL_COPY(oqctl, qctl);
1479 oqctl->qc_cmd = LUSTRE_Q_ITEROQUOTA;
1480 oqctl->qc_iter_list = (__u64)&iter_obd_quota_dt_list;
1481 rc = obd_quotactl(sbi->ll_dt_exp, oqctl);
1486 while ((iter = list_first_entry_or_null(&iter_quota_glb_list,
1487 struct lquota_iter, li_link))) {
1490 buffer = iter->li_buffer;
1491 rc = quotactl_iter_glb(&ll_iter->lqil_quotactl_iter_list,
1492 buffer, iter->li_md_size, &count,
1493 oqctl->qc_type, true);
1497 buffer = iter->li_buffer + LQUOTA_ITER_BUFLEN / 2;
1498 rc = quotactl_iter_glb(&ll_iter->lqil_quotactl_iter_list,
1499 buffer, iter->li_dt_size, &count,
1500 oqctl->qc_type, false);
1505 list_del_init(&iter->li_link);
1506 OBD_FREE_LARGE(iter,
1507 sizeof(struct lquota_iter) + LQUOTA_ITER_BUFLEN);
1510 while ((iter = list_first_entry_or_null(&iter_obd_quota_md_list,
1511 struct lquota_iter, li_link))) {
1512 rc = quotactl_iter_acct(&ll_iter->lqil_quotactl_iter_list,
1513 iter->li_buffer, iter->li_md_size,
1514 &count, oqctl->qc_type, true);
1518 list_del_init(&iter->li_link);
1519 OBD_FREE_LARGE(iter,
1520 sizeof(struct lquota_iter) + LQUOTA_ITER_BUFLEN);
1523 while ((iter = list_first_entry_or_null(&iter_obd_quota_dt_list,
1524 struct lquota_iter, li_link))) {
1525 rc = quotactl_iter_acct(&ll_iter->lqil_quotactl_iter_list,
1526 iter->li_buffer, iter->li_dt_size,
1527 &count, oqctl->qc_type, false);
1531 list_del_init(&iter->li_link);
1532 OBD_FREE_LARGE(iter,
1533 sizeof(struct lquota_iter) + LQUOTA_ITER_BUFLEN);
1536 ll_iter->lqil_mark = ((__u64)current->pid << 32) |
1537 ((__u32)qctl->qc_type << 8) |
1538 (ktime_get_seconds() & 0xFFFFFF);
1539 ll_iter->lqil_flags = qctl->qc_type;
1540 ll_iter->lqil_pid = current->pid;
1541 ll_iter->lqil_iter_time = ktime_get_seconds();
1543 list_add(&ll_iter->lqil_sbi_list, &sbi->ll_all_quota_list);
1545 qctl->qc_allquota_count = count;
1546 qctl->qc_allquota_mark = ll_iter->lqil_mark;
1550 ll_quota_iter_check_and_cleanup(sbi, true);
1552 while ((iter = list_first_entry_or_null(&iter_quota_glb_list,
1553 struct lquota_iter, li_link))) {
1554 list_del_init(&iter->li_link);
1555 OBD_FREE_LARGE(iter,
1556 sizeof(struct lquota_iter) + LQUOTA_ITER_BUFLEN);
1559 while ((iter = list_first_entry_or_null(&iter_obd_quota_md_list,
1560 struct lquota_iter, li_link))) {
1561 list_del_init(&iter->li_link);
1562 OBD_FREE_LARGE(iter,
1563 sizeof(struct lquota_iter) + LQUOTA_ITER_BUFLEN);
1566 while ((iter = list_first_entry_or_null(&iter_obd_quota_dt_list,
1567 struct lquota_iter, li_link))) {
1568 list_del_init(&iter->li_link);
1569 OBD_FREE_LARGE(iter,
1570 sizeof(struct lquota_iter) + LQUOTA_ITER_BUFLEN);
1573 OBD_FREE_PTR(ll_iter);
1576 OBD_FREE_PTR(oqctl);
1578 mutex_unlock("actl_iter_lock);
1582 static int quotactl_getallquota(struct ll_sb_info *sbi,
1583 struct if_quotactl *qctl)
1585 struct ll_quotactl_iter_list *ll_iter = NULL;
1586 struct if_quotactl_iter *iter = NULL;
1587 void __user *buffer = (void __user *)qctl->qc_allquota_buffer;
1588 __u64 cur = 0, count = qctl->qc_allquota_buflen;
1593 mutex_lock("actl_iter_lock);
1595 while ((ll_iter = list_first_entry_or_null(&sbi->ll_all_quota_list,
1596 struct ll_quotactl_iter_list,
1597 lqil_sbi_list)) != NULL) {
1598 if (qctl->qc_allquota_mark == ll_iter->lqil_mark)
1603 mutex_unlock("actl_iter_lock);
1607 while ((iter = list_first_entry_or_null(
1608 &ll_iter->lqil_quotactl_iter_list,
1609 struct if_quotactl_iter, qci_link))) {
1610 if (count - cur < sizeof(struct if_quotactl)) {
1615 if (copy_to_user(buffer + cur, &iter->qci_qc,
1616 sizeof(struct if_quotactl))) {
1621 cur += sizeof(struct if_quotactl);
1623 list_del_init(&iter->qci_link);
1624 OBD_SLAB_FREE_PTR(iter, quota_iter_slab);
1627 /* cleanup in case of error */
1628 while ((iter = list_first_entry_or_null(
1629 &ll_iter->lqil_quotactl_iter_list,
1630 struct if_quotactl_iter, qci_link))) {
1631 list_del_init(&iter->qci_link);
1632 OBD_SLAB_FREE_PTR(iter, quota_iter_slab);
1635 mutex_unlock("actl_iter_lock);
1640 int quotactl_ioctl(struct super_block *sb, struct if_quotactl *qctl)
1642 struct ll_sb_info *sbi = ll_s2sbi(sb);
1643 int cmd = qctl->qc_cmd;
1644 int type = qctl->qc_type;
1645 int id = qctl->qc_id;
1646 int valid = qctl->qc_valid;
1654 case LUSTRE_Q_SETDEFAULT:
1655 case LUSTRE_Q_SETQUOTAPOOL:
1656 case LUSTRE_Q_SETINFOPOOL:
1657 case LUSTRE_Q_SETDEFAULT_POOL:
1658 case LUSTRE_Q_DELETEQID:
1659 case LUSTRE_Q_RESETQID:
1660 if (!capable(CAP_SYS_ADMIN))
1663 if (sb->s_flags & SB_RDONLY)
1667 case LUSTRE_Q_GETDEFAULT:
1668 case LUSTRE_Q_GETQUOTAPOOL:
1669 case LUSTRE_Q_GETDEFAULT_POOL:
1670 case LUSTRE_Q_ITERQUOTA:
1671 case LUSTRE_Q_GETALLQUOTA:
1672 if (check_owner(type, id) &&
1673 (!capable(CAP_SYS_ADMIN)))
1677 case LUSTRE_Q_GETINFOPOOL:
1680 CERROR("%s: unsupported quotactl op: %#x: rc = %d\n",
1681 sbi->ll_fsname, cmd, -EOPNOTSUPP);
1682 RETURN(-EOPNOTSUPP);
1685 if (cmd == LUSTRE_Q_ITERQUOTA) {
1686 rc = quotactl_iter(sbi, qctl);
1687 } else if (cmd == LUSTRE_Q_GETALLQUOTA) {
1688 rc = quotactl_getallquota(sbi, qctl);
1689 } else if (valid != QC_GENERAL) {
1690 if (cmd == Q_GETINFO)
1691 qctl->qc_cmd = Q_GETOINFO;
1692 else if (cmd == Q_GETQUOTA ||
1693 cmd == LUSTRE_Q_GETQUOTAPOOL)
1694 qctl->qc_cmd = Q_GETOQUOTA;
1700 rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
1701 sizeof(*qctl), qctl, NULL);
1704 rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_dt_exp,
1705 sizeof(*qctl), qctl, NULL);
1708 rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
1709 sizeof(*qctl), qctl, NULL);
1711 rc = obd_iocontrol(OBD_IOC_QUOTACTL,
1713 sizeof(*qctl), qctl, NULL);
1724 struct obd_quotactl *oqctl;
1725 int oqctl_len = sizeof(*oqctl);
1727 if (LUSTRE_Q_CMD_IS_POOL(cmd))
1728 oqctl_len += LOV_MAXPOOLNAME + 1;
1730 OBD_ALLOC(oqctl, oqctl_len);
1734 QCTL_COPY(oqctl, qctl);
1735 rc = obd_quotactl(sbi->ll_md_exp, oqctl);
1737 OBD_FREE(oqctl, oqctl_len);
1740 /* If QIF_SPACE is not set, client should collect the
1741 * space usage from OSSs by itself
1743 if ((cmd == Q_GETQUOTA || cmd == LUSTRE_Q_GETQUOTAPOOL) &&
1744 !(oqctl->qc_dqblk.dqb_valid & QIF_SPACE) &&
1745 !oqctl->qc_dqblk.dqb_curspace) {
1746 struct obd_quotactl *oqctl_tmp;
1747 int qctl_len = sizeof(*oqctl_tmp) + LOV_MAXPOOLNAME + 1;
1749 OBD_ALLOC(oqctl_tmp, qctl_len);
1750 if (oqctl_tmp == NULL)
1751 GOTO(out, rc = -ENOMEM);
1753 if (cmd == LUSTRE_Q_GETQUOTAPOOL) {
1754 oqctl_tmp->qc_cmd = LUSTRE_Q_GETQUOTAPOOL;
1755 memcpy(oqctl_tmp->qc_poolname,
1757 LOV_MAXPOOLNAME + 1);
1759 oqctl_tmp->qc_cmd = Q_GETOQUOTA;
1761 oqctl_tmp->qc_id = oqctl->qc_id;
1762 oqctl_tmp->qc_type = oqctl->qc_type;
1764 /* collect space usage from OSTs */
1765 oqctl_tmp->qc_dqblk.dqb_curspace = 0;
1766 rc = obd_quotactl(sbi->ll_dt_exp, oqctl_tmp);
1767 if (!rc || rc == -EREMOTEIO) {
1768 oqctl->qc_dqblk.dqb_curspace =
1769 oqctl_tmp->qc_dqblk.dqb_curspace;
1770 oqctl->qc_dqblk.dqb_valid |= QIF_SPACE;
1773 /* collect space & inode usage from MDTs */
1774 oqctl_tmp->qc_cmd = Q_GETOQUOTA;
1775 oqctl_tmp->qc_dqblk.dqb_curspace = 0;
1776 oqctl_tmp->qc_dqblk.dqb_curinodes = 0;
1777 rc = obd_quotactl(sbi->ll_md_exp, oqctl_tmp);
1778 if (!rc || rc == -EREMOTEIO) {
1779 oqctl->qc_dqblk.dqb_curspace +=
1780 oqctl_tmp->qc_dqblk.dqb_curspace;
1781 oqctl->qc_dqblk.dqb_curinodes =
1782 oqctl_tmp->qc_dqblk.dqb_curinodes;
1783 oqctl->qc_dqblk.dqb_valid |= QIF_INODES;
1785 oqctl->qc_dqblk.dqb_valid &= ~QIF_SPACE;
1788 OBD_FREE(oqctl_tmp, qctl_len);
1791 QCTL_COPY(qctl, oqctl);
1792 OBD_FREE(oqctl, oqctl_len);
1798 static int ll_rmfid(struct file *file, void __user *arg)
1800 const struct fid_array __user *ufa = arg;
1801 struct inode *inode = file_inode(file);
1802 struct ll_sb_info *sbi = ll_i2sbi(inode);
1803 struct fid_array *lfa = NULL, *lfa_new = NULL;
1804 int i, rc, *rcs = NULL;
1806 bool lfa_flag = false; /* lfa already free'ed */
1810 if (!capable(CAP_DAC_READ_SEARCH) &&
1811 !test_bit(LL_SBI_USER_FID2PATH, ll_i2sbi(inode)->ll_flags))
1813 /* Only need to get the buflen */
1814 if (get_user(nr, &ufa->fa_nr))
1816 /* DoS protection */
1817 if (nr > OBD_MAX_FIDS_IN_ARRAY)
1820 size = offsetof(struct fid_array, fa_fids[nr]);
1821 OBD_ALLOC(lfa, size);
1824 OBD_ALLOC_PTR_ARRAY(rcs, nr);
1826 GOTO(free_lfa, rc = -ENOMEM);
1828 if (copy_from_user(lfa, arg, size))
1829 GOTO(free_rcs, rc = -EFAULT);
1831 /* In case of subdirectory mount, we need to make sure all the files
1832 * for which we want to remove FID are visible in the namespace.
1834 if (!fid_is_root(&sbi->ll_root_fid)) {
1835 int path_len = PATH_MAX, linkno;
1836 struct getinfo_fid2path *gf;
1837 int idx, last_idx = nr - 1;
1841 OBD_ALLOC(lfa_new, size);
1843 GOTO(free_rcs, rc = -ENOMEM);
1846 gf = kmalloc(sizeof(*gf) + path_len + 1, GFP_NOFS);
1848 GOTO(free_lfa_new, rc = -ENOMEM);
1850 for (idx = 0; idx < nr; idx++) {
1853 memset(gf, 0, sizeof(*gf) + path_len + 1);
1854 gf->gf_fid = lfa->fa_fids[idx];
1855 gf->gf_pathlen = path_len;
1856 gf->gf_linkno = linkno;
1857 rc = __ll_fid2path(inode, gf,
1858 sizeof(*gf) + gf->gf_pathlen,
1860 if (rc == -ENAMETOOLONG) {
1861 struct getinfo_fid2path *tmpgf;
1863 path_len += PATH_MAX;
1864 tmpgf = krealloc(gf,
1865 sizeof(*gf) + path_len + 1,
1869 GOTO(free_lfa_new, rc = -ENOMEM);
1876 if (gf->gf_linkno == linkno)
1878 linkno = gf->gf_linkno;
1882 /* All the links for this fid are visible in the
1883 * mounted subdir. So add it to the list of fids
1886 lfa_new->fa_fids[lfa_new->fa_nr++] =
1889 /* At least one link for this fid is not visible
1890 * in the mounted subdir. So add it at the end
1891 * of the list that will be hidden to lower
1892 * layers, and set -ENOENT as ret code.
1894 lfa_new->fa_fids[last_idx] = lfa->fa_fids[idx];
1895 rcs[last_idx--] = rc;
1899 OBD_FREE(lfa, size);
1903 if (lfa->fa_nr == 0)
1904 GOTO(free_rcs, rc = rcs[nr - 1]);
1906 /* Call mdc_iocontrol */
1907 rc = md_rmfid(ll_i2mdexp(file_inode(file)), lfa, rcs, NULL);
1910 for (i = 0; i < nr; i++)
1912 lfa->fa_fids[i].f_ver = rcs[i];
1913 if (copy_to_user(arg, lfa, size))
1918 OBD_FREE(lfa_new, size);
1920 OBD_FREE_PTR_ARRAY(rcs, nr);
1923 OBD_FREE(lfa, size);
1928 /* This function tries to get a single name component, to send to the server.
1929 * No actual path traversal involved, so we limit to NAME_MAX
1931 static char *ll_getname(const char __user *filename)
1936 OBD_ALLOC(tmp, NAME_MAX + 1);
1939 return ERR_PTR(-ENOMEM);
1941 len = strncpy_from_user(tmp, filename, NAME_MAX + 1);
1944 else if (len > NAME_MAX)
1945 ret = -ENAMETOOLONG;
1948 OBD_FREE(tmp, NAME_MAX + 1);
1954 static const char *const ladvise_names[] = LU_LADVISE_NAMES;
1956 #define ll_putname(filename) OBD_FREE(filename, NAME_MAX + 1);
1958 static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1960 struct dentry *dentry = file_dentry(file);
1961 struct inode *inode = file_inode(file);
1962 struct ll_sb_info *sbi = ll_i2sbi(inode);
1963 struct obd_ioctl_data *data = NULL;
1964 void __user *uarg = (void __user *)arg;
1968 CDEBUG(D_VFSTRACE|D_IOCTL, "VFS Op:inode="DFID"(%pK) cmd=%x arg=%lx\n",
1969 PFID(ll_inode2fid(inode)), inode, cmd, arg);
1971 /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
1972 if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
1975 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
1977 case IOC_MDC_LOOKUP: {
1978 int namelen, len = 0;
1981 rc = obd_ioctl_getdata(&data, &len, uarg);
1985 filename = data->ioc_inlbuf1;
1986 namelen = strlen(filename);
1988 CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
1989 GOTO(out_free, rc = -EINVAL);
1992 rc = ll_get_fid_by_name(inode, filename, namelen, NULL, NULL);
1994 CERROR("%s: lookup %.*s failed: rc = %d\n",
1995 sbi->ll_fsname, namelen, filename, rc);
1999 OBD_FREE_LARGE(data, len);
2002 case LL_IOC_LMV_SETSTRIPE: {
2003 struct lmv_user_md *lum;
2008 bool createonly = false;
2012 rc = obd_ioctl_getdata(&data, &len, uarg);
2016 if (data->ioc_inlbuf1 == NULL || data->ioc_inlbuf2 == NULL ||
2017 data->ioc_inllen1 == 0 || data->ioc_inllen2 == 0)
2018 GOTO(lmv_out_free, rc = -EINVAL);
2020 filename = data->ioc_inlbuf1;
2021 namelen = data->ioc_inllen1;
2024 CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
2025 GOTO(lmv_out_free, rc = -EINVAL);
2027 lum = (struct lmv_user_md *)data->ioc_inlbuf2;
2028 lumlen = data->ioc_inllen2;
2030 if (!lmv_user_magic_supported(lum->lum_magic)) {
2031 CERROR("%s: wrong lum magic %x : rc = %d\n", filename,
2032 lum->lum_magic, -EINVAL);
2033 GOTO(lmv_out_free, rc = -EINVAL);
2036 if ((lum->lum_magic == LMV_USER_MAGIC ||
2037 lum->lum_magic == LMV_USER_MAGIC_SPECIFIC) &&
2038 lumlen < sizeof(*lum)) {
2039 CERROR("%s: wrong lum size %d for magic %x : rc = %d\n",
2040 filename, lumlen, lum->lum_magic, -EINVAL);
2041 GOTO(lmv_out_free, rc = -EINVAL);
2044 if (lum->lum_magic == LMV_MAGIC_FOREIGN &&
2045 lumlen < sizeof(struct lmv_foreign_md)) {
2046 CERROR("%s: wrong lum magic %x or size %d: rc = %d\n",
2047 filename, lum->lum_magic, lumlen, -EFAULT);
2048 GOTO(lmv_out_free, rc = -EINVAL);
2051 mode = data->ioc_type;
2052 createonly = data->ioc_obdo1.o_flags & OBD_FL_OBDMDEXISTS;
2053 rc = ll_dir_setdirstripe(dentry, lum, lumlen, filename, mode,
2056 OBD_FREE_LARGE(data, len);
2060 case LL_IOC_LMV_SET_DEFAULT_STRIPE: {
2061 struct lmv_user_md lum;
2062 struct lmv_user_md __user *ulump = uarg;
2065 if (copy_from_user(&lum, ulump, sizeof(lum)))
2068 if (lum.lum_magic != LMV_USER_MAGIC)
2071 rc = ll_dir_setstripe(inode, (struct lov_user_md *)&lum, 0);
2075 case LL_IOC_LOV_SETSTRIPE_NEW:
2076 case LL_IOC_LOV_SETSTRIPE: {
2077 struct lov_user_md_v3 *lumv3 = NULL;
2078 struct lov_user_md_v1 lumv1;
2079 struct lov_user_md_v1 *lumv1_ptr = &lumv1;
2080 struct lov_user_md_v1 __user *lumv1p = uarg;
2081 struct lov_user_md_v3 __user *lumv3p = uarg;
2083 int set_default = 0;
2085 BUILD_BUG_ON(sizeof(struct lov_user_md_v3) <=
2086 sizeof(struct lov_comp_md_v1));
2087 BUILD_BUG_ON(sizeof(*lumv3) != sizeof(*lumv3p));
2088 /* first try with v1 which is smaller than v3 */
2089 if (copy_from_user(&lumv1, lumv1p, sizeof(lumv1)))
2092 if (is_root_inode(inode))
2095 switch (lumv1.lmm_magic) {
2096 case LOV_USER_MAGIC_V3:
2097 case LOV_USER_MAGIC_SPECIFIC:
2098 lum_size = ll_lov_user_md_size(&lumv1);
2101 OBD_ALLOC(lumv3, lum_size);
2104 if (copy_from_user(lumv3, lumv3p, lum_size))
2105 GOTO(out, rc = -EFAULT);
2106 lumv1_ptr = (struct lov_user_md_v1 *)lumv3;
2108 case LOV_USER_MAGIC_V1:
2111 GOTO(out, rc = -EOPNOTSUPP);
2114 /* in v1 and v3 cases lumv1 points to data */
2115 rc = ll_dir_setstripe(inode, lumv1_ptr, set_default);
2118 OBD_FREE(lumv3, lum_size);
2121 case LL_IOC_LMV_GETSTRIPE: {
2122 struct lmv_user_md __user *ulmv = uarg;
2123 struct lmv_user_md lum;
2124 struct ptlrpc_request *request = NULL;
2125 union lmv_mds_md *lmm = NULL;
2128 struct lmv_user_md *tmp = NULL;
2132 int max_stripe_count;
2136 if (copy_from_user(&lum, ulmv, sizeof(*ulmv)))
2139 /* get default LMV */
2140 if (lum.lum_magic == LMV_USER_MAGIC &&
2141 lum.lum_type != LMV_TYPE_RAW) {
2142 rc = ll_dir_get_default_lmv(inode, &lum);
2146 if (copy_to_user(ulmv, &lum, sizeof(lum)))
2152 max_stripe_count = lum.lum_stripe_count;
2153 /* lum_magic will indicate which stripe the ioctl will like
2154 * to get, LMV_MAGIC_V1 is for normal LMV stripe, LMV_USER_MAGIC
2155 * is for default LMV stripe
2157 if (lum.lum_magic == LMV_MAGIC_V1)
2158 valid |= OBD_MD_MEA;
2159 else if (lum.lum_magic == LMV_USER_MAGIC)
2160 valid |= OBD_MD_DEFAULT_MEA;
2164 rc = ll_dir_getstripe_default(inode, (void **)&lmm, &lmmsize,
2165 &request, NULL, valid);
2167 GOTO(finish_req, rc);
2169 /* get default LMV in raw mode */
2170 if (lum.lum_magic == LMV_USER_MAGIC) {
2171 if (copy_to_user(ulmv, lmm, lmmsize))
2172 GOTO(finish_req, rc = -EFAULT);
2173 GOTO(finish_req, rc);
2176 /* if foreign LMV case, fake stripes number */
2177 if (lmm->lmv_magic == LMV_MAGIC_FOREIGN) {
2178 struct lmv_foreign_md *lfm;
2180 lfm = (struct lmv_foreign_md *)lmm;
2181 if (lfm->lfm_length < XATTR_SIZE_MAX -
2182 offsetof(typeof(*lfm), lfm_value)) {
2183 __u32 size = lfm->lfm_length +
2184 offsetof(typeof(*lfm), lfm_value);
2186 stripe_count = lmv_foreign_to_md_stripes(size);
2188 CERROR("%s: invalid %d foreign size returned: rc = %d\n",
2189 sbi->ll_fsname, lfm->lfm_length,
2194 stripe_count = lmv_mds_md_stripe_count_get(lmm);
2196 if (max_stripe_count < stripe_count) {
2197 lum.lum_stripe_count = stripe_count;
2198 if (copy_to_user(ulmv, &lum, sizeof(lum)))
2199 GOTO(finish_req, rc = -EFAULT);
2200 GOTO(finish_req, rc = -E2BIG);
2203 /* enough room on user side and foreign case */
2204 if (lmm->lmv_magic == LMV_MAGIC_FOREIGN) {
2205 struct lmv_foreign_md *lfm;
2208 lfm = (struct lmv_foreign_md *)lmm;
2209 size = lfm->lfm_length +
2210 offsetof(struct lmv_foreign_md, lfm_value);
2211 if (copy_to_user(ulmv, lfm, size))
2212 GOTO(finish_req, rc = -EFAULT);
2213 GOTO(finish_req, rc);
2216 lum_size = lmv_user_md_size(stripe_count,
2217 LMV_USER_MAGIC_SPECIFIC);
2218 OBD_ALLOC(tmp, lum_size);
2220 GOTO(finish_req, rc = -ENOMEM);
2222 mdt_index = ll_get_mdt_idx(inode);
2224 GOTO(out_tmp, rc = -ENOMEM);
2226 tmp->lum_magic = LMV_MAGIC_V1;
2227 tmp->lum_stripe_count = 0;
2228 tmp->lum_stripe_offset = mdt_index;
2229 tmp->lum_hash_type = lmv_mds_md_hash_type_get(lmm);
2230 for (i = 0; i < stripe_count; i++) {
2233 fid_le_to_cpu(&fid, &lmm->lmv_md_v1.lmv_stripe_fids[i]);
2234 if (fid_is_sane(&fid)) {
2235 mdt_index = ll_get_mdt_idx_by_fid(sbi, &fid);
2237 GOTO(out_tmp, rc = mdt_index);
2239 tmp->lum_objects[i].lum_mds = mdt_index;
2240 tmp->lum_objects[i].lum_fid = fid;
2243 tmp->lum_stripe_count++;
2246 if (copy_to_user(ulmv, tmp, lum_size))
2247 GOTO(out_tmp, rc = -EFAULT);
2249 OBD_FREE(tmp, lum_size);
2251 ptlrpc_req_finished(request);
2254 case LL_IOC_REMOVE_ENTRY: {
2255 char *filename = NULL;
2259 /* Here is a little hack to avoid sending REINT_RMENTRY to
2260 * unsupported server, which might crash the server(LU-2730),
2261 * Because both LVB_TYPE and REINT_RMENTRY will be supported
2262 * on 2.4, we use OBD_CONNECT_LVB_TYPE to detect whether the
2263 * server will support REINT_RMENTRY XXX
2265 if (!(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_LVB_TYPE))
2266 RETURN(-EOPNOTSUPP);
2268 filename = ll_getname(uarg);
2269 if (IS_ERR(filename))
2270 RETURN(PTR_ERR(filename));
2272 namelen = strlen(filename);
2274 GOTO(out_rmdir, rc = -EINVAL);
2276 rc = ll_rmdir_entry(inode, filename, namelen);
2279 ll_putname(filename);
2283 RETURN(ll_rmfid(file, uarg));
2284 case LL_IOC_LOV_SWAP_LAYOUTS:
2286 case LL_IOC_LOV_GETSTRIPE:
2287 case LL_IOC_LOV_GETSTRIPE_NEW:
2288 case LL_IOC_MDC_GETINFO_V1:
2289 case LL_IOC_MDC_GETINFO_V2:
2290 case IOC_MDC_GETFILEINFO_V1:
2291 case IOC_MDC_GETFILEINFO_V2:
2292 case IOC_MDC_GETFILESTRIPE: {
2293 struct ptlrpc_request *request = NULL;
2294 struct ptlrpc_request *root_request = NULL;
2295 struct lov_user_md __user *lump;
2296 struct lov_mds_md *lmm = NULL;
2297 struct mdt_body *body;
2298 char *filename = NULL;
2299 lstat_t __user *statp = NULL;
2300 lstatx_t __user *stxp = NULL;
2301 __u64 __user *flagsp = NULL;
2302 __u32 __user *lmmsizep = NULL;
2303 struct lu_fid __user *fidp = NULL;
2307 if (cmd == IOC_MDC_GETFILEINFO_V1 ||
2308 cmd == IOC_MDC_GETFILEINFO_V2 ||
2309 cmd == IOC_MDC_GETFILESTRIPE) {
2310 filename = ll_getname(uarg);
2311 if (IS_ERR(filename))
2312 RETURN(PTR_ERR(filename));
2314 rc = ll_lov_getstripe_ea_info(inode, filename, &lmm,
2315 &lmmsize, &request);
2317 rc = ll_dir_getstripe_default(inode, (void **)&lmm,
2323 body = req_capsule_server_get(&request->rq_pill,
2325 LASSERT(body != NULL);
2330 if (rc == -ENODATA && (cmd == IOC_MDC_GETFILEINFO_V1 ||
2331 cmd == LL_IOC_MDC_GETINFO_V1 ||
2332 cmd == IOC_MDC_GETFILEINFO_V2 ||
2333 cmd == LL_IOC_MDC_GETINFO_V2)) {
2341 if (cmd == IOC_MDC_GETFILESTRIPE ||
2342 cmd == LL_IOC_LOV_GETSTRIPE ||
2343 cmd == LL_IOC_LOV_GETSTRIPE_NEW) {
2345 } else if (cmd == IOC_MDC_GETFILEINFO_V1 ||
2346 cmd == LL_IOC_MDC_GETINFO_V1){
2347 struct lov_user_mds_data_v1 __user *lmdp;
2350 statp = &lmdp->lmd_st;
2351 lump = &lmdp->lmd_lmm;
2353 struct lov_user_mds_data __user *lmdp;
2356 fidp = &lmdp->lmd_fid;
2357 stxp = &lmdp->lmd_stx;
2358 flagsp = &lmdp->lmd_flags;
2359 lmmsizep = &lmdp->lmd_lmmsize;
2360 lump = &lmdp->lmd_lmm;
2364 /* If the file has no striping then zero out *lump so
2365 * that the caller isn't confused by garbage.
2367 if (clear_user(lump, sizeof(*lump)))
2368 GOTO(out_req, rc = -EFAULT);
2369 } else if (copy_to_user(lump, lmm, lmmsize)) {
2370 if (copy_to_user(lump, lmm, sizeof(*lump)))
2371 GOTO(out_req, rc = -EFAULT);
2374 api32 = test_bit(LL_SBI_32BIT_API, sbi->ll_flags);
2376 if (cmd == IOC_MDC_GETFILEINFO_V1 ||
2377 cmd == LL_IOC_MDC_GETINFO_V1) {
2380 st.st_dev = inode->i_sb->s_dev;
2381 st.st_mode = body->mbo_mode;
2382 st.st_nlink = body->mbo_nlink;
2383 st.st_uid = body->mbo_uid;
2384 st.st_gid = body->mbo_gid;
2385 st.st_rdev = body->mbo_rdev;
2386 if (llcrypt_require_key(inode) == -ENOKEY)
2387 st.st_size = round_up(st.st_size,
2388 LUSTRE_ENCRYPTION_UNIT_SIZE);
2390 st.st_size = body->mbo_size;
2391 st.st_blksize = PAGE_SIZE;
2392 st.st_blocks = body->mbo_blocks;
2393 st.st_atime = body->mbo_atime;
2394 st.st_mtime = body->mbo_mtime;
2395 st.st_ctime = body->mbo_ctime;
2396 st.st_ino = cl_fid_build_ino(&body->mbo_fid1,
2399 if (copy_to_user(statp, &st, sizeof(st)))
2400 GOTO(out_req, rc = -EFAULT);
2401 } else if (cmd == IOC_MDC_GETFILEINFO_V2 ||
2402 cmd == LL_IOC_MDC_GETINFO_V2) {
2403 lstatx_t stx = { 0 };
2404 __u64 valid = body->mbo_valid;
2406 stx.stx_blksize = PAGE_SIZE;
2407 stx.stx_nlink = body->mbo_nlink;
2408 stx.stx_uid = body->mbo_uid;
2409 stx.stx_gid = body->mbo_gid;
2410 stx.stx_mode = body->mbo_mode;
2411 stx.stx_ino = cl_fid_build_ino(&body->mbo_fid1,
2413 if (llcrypt_require_key(inode) == -ENOKEY)
2414 stx.stx_size = round_up(stx.stx_size,
2415 LUSTRE_ENCRYPTION_UNIT_SIZE);
2417 stx.stx_size = body->mbo_size;
2418 stx.stx_blocks = body->mbo_blocks;
2419 stx.stx_atime.tv_sec = body->mbo_atime;
2420 stx.stx_ctime.tv_sec = body->mbo_ctime;
2421 stx.stx_mtime.tv_sec = body->mbo_mtime;
2422 stx.stx_btime.tv_sec = body->mbo_btime;
2423 stx.stx_rdev_major = MAJOR(body->mbo_rdev);
2424 stx.stx_rdev_minor = MINOR(body->mbo_rdev);
2425 stx.stx_dev_major = MAJOR(inode->i_sb->s_dev);
2426 stx.stx_dev_minor = MINOR(inode->i_sb->s_dev);
2427 stx.stx_mask |= STATX_BASIC_STATS | STATX_BTIME;
2429 stx.stx_attributes_mask = STATX_ATTR_IMMUTABLE |
2431 #ifdef HAVE_LUSTRE_CRYPTO
2432 stx.stx_attributes_mask |= STATX_ATTR_ENCRYPTED;
2434 if (body->mbo_valid & OBD_MD_FLFLAGS) {
2435 stx.stx_attributes |= body->mbo_flags;
2436 /* if Lustre specific LUSTRE_ENCRYPT_FL flag is
2437 * set, also set ext4 equivalent to please statx
2439 if (body->mbo_flags & LUSTRE_ENCRYPT_FL)
2440 stx.stx_attributes |=
2441 STATX_ATTR_ENCRYPTED;
2444 /* For a striped directory, the size and blocks returned
2445 * from MDT is not correct.
2446 * The size and blocks are aggregated by client across
2448 * Thus for a striped directory, do not return the valid
2449 * FLSIZE and FLBLOCKS flags to the caller.
2450 * However, this whould be better decided by the MDS
2451 * instead of the client.
2453 if (cmd == LL_IOC_MDC_GETINFO_V2 &&
2454 ll_dir_striped(inode))
2455 valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
2457 if (flagsp && copy_to_user(flagsp, &valid,
2459 GOTO(out_req, rc = -EFAULT);
2461 if (fidp && copy_to_user(fidp, &body->mbo_fid1,
2463 GOTO(out_req, rc = -EFAULT);
2465 if (!(valid & OBD_MD_FLSIZE))
2466 stx.stx_mask &= ~STATX_SIZE;
2467 if (!(valid & OBD_MD_FLBLOCKS))
2468 stx.stx_mask &= ~STATX_BLOCKS;
2470 if (stxp && copy_to_user(stxp, &stx, sizeof(stx)))
2471 GOTO(out_req, rc = -EFAULT);
2473 if (lmmsizep && copy_to_user(lmmsizep, &lmmsize,
2475 GOTO(out_req, rc = -EFAULT);
2480 ptlrpc_req_finished(request);
2481 ptlrpc_req_finished(root_request);
2483 ll_putname(filename);
2486 case OBD_IOC_QUOTACTL: {
2487 struct if_quotactl *qctl;
2488 int qctl_len = sizeof(*qctl) + LOV_MAXPOOLNAME + 1;
2490 OBD_ALLOC(qctl, qctl_len);
2494 if (copy_from_user(qctl, uarg, sizeof(*qctl)))
2495 GOTO(out_quotactl, rc = -EFAULT);
2497 if (LUSTRE_Q_CMD_IS_POOL(qctl->qc_cmd)) {
2498 char __user *from = uarg +
2499 offsetof(typeof(*qctl), qc_poolname);
2500 if (copy_from_user(qctl->qc_poolname, from,
2501 LOV_MAXPOOLNAME + 1))
2502 GOTO(out_quotactl, rc = -EFAULT);
2505 rc = quotactl_ioctl(inode->i_sb, qctl);
2506 if ((rc == 0 || rc == -ENODATA) &&
2507 copy_to_user(uarg, qctl, sizeof(*qctl)))
2510 OBD_FREE(qctl, qctl_len);
2513 case LL_IOC_GETOBDCOUNT: {
2515 struct obd_export *exp;
2517 if (copy_from_user(&count, uarg, sizeof(count)))
2520 /* get ost count when count is zero, get mdt count otherwise */
2521 exp = count ? sbi->ll_md_exp : sbi->ll_dt_exp;
2522 vallen = sizeof(count);
2523 rc = obd_get_info(NULL, exp, sizeof(KEY_TGT_COUNT),
2524 KEY_TGT_COUNT, &vallen, &count);
2526 CERROR("%s: get target count failed: rc = %d\n",
2527 sbi->ll_fsname, rc);
2531 if (copy_to_user(uarg, &count, sizeof(count)))
2536 case LL_IOC_GET_CONNECT_FLAGS:
2537 RETURN(obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL, uarg));
2538 case LL_IOC_FID2MDTIDX: {
2539 struct obd_export *exp = ll_i2mdexp(inode);
2543 if (copy_from_user(&fid, uarg, sizeof(fid)))
2546 /* Call mdc_iocontrol */
2547 rc = obd_iocontrol(LL_IOC_FID2MDTIDX, exp, sizeof(fid), &fid,
2548 (__u32 __user *)&index);
2554 case LL_IOC_HSM_REQUEST: {
2555 struct hsm_user_request *hur;
2562 /* We don't know the true size yet; copy the fixed-size part */
2563 if (copy_from_user(hur, uarg, sizeof(*hur))) {
2568 /* Compute the whole struct size */
2569 totalsize = hur_len(hur);
2574 /* Final size will be more than double totalsize */
2575 if (totalsize >= MDS_MAXREQSIZE / 3)
2578 OBD_ALLOC_LARGE(hur, totalsize);
2582 /* Copy the whole struct */
2583 if (copy_from_user(hur, uarg, totalsize))
2584 GOTO(out_hur, rc = -EFAULT);
2586 if (hur->hur_request.hr_action == HUA_RELEASE) {
2587 const struct lu_fid *fid;
2591 for (i = 0; i < hur->hur_request.hr_itemcount; i++) {
2592 fid = &hur->hur_user_item[i].hui_fid;
2593 f = search_inode_for_lustre(inode->i_sb, fid);
2599 rc = ll_hsm_release(f);
2605 rc = obd_iocontrol(cmd, ll_i2mdexp(inode), totalsize,
2609 OBD_FREE_LARGE(hur, totalsize);
2613 case LL_IOC_HSM_PROGRESS: {
2614 struct hsm_progress_kernel hpk;
2615 struct hsm_progress hp;
2617 if (copy_from_user(&hp, uarg, sizeof(hp)))
2620 hpk.hpk_fid = hp.hp_fid;
2621 hpk.hpk_cookie = hp.hp_cookie;
2622 hpk.hpk_extent = hp.hp_extent;
2623 hpk.hpk_flags = hp.hp_flags;
2624 hpk.hpk_errval = hp.hp_errval;
2625 hpk.hpk_data_version = 0;
2627 /* File may not exist in Lustre; all progress
2628 * reported to Lustre root
2630 rc = obd_iocontrol(cmd, sbi->ll_md_exp, sizeof(hpk), &hpk,
2634 case LL_IOC_HSM_CT_START:
2635 if (!capable(CAP_SYS_ADMIN))
2638 rc = copy_and_ct_start(cmd, sbi->ll_md_exp, uarg);
2641 case LL_IOC_HSM_COPY_START: {
2642 struct hsm_copy *copy;
2645 OBD_ALLOC_PTR(copy);
2648 if (copy_from_user(copy, uarg, sizeof(*copy))) {
2653 rc = ll_ioc_copy_start(inode->i_sb, copy);
2654 if (copy_to_user(uarg, copy, sizeof(*copy)))
2660 case LL_IOC_HSM_COPY_END: {
2661 struct hsm_copy *copy;
2664 OBD_ALLOC_PTR(copy);
2667 if (copy_from_user(copy, uarg, sizeof(*copy))) {
2672 rc = ll_ioc_copy_end(inode->i_sb, copy);
2673 if (copy_to_user(uarg, copy, sizeof(*copy)))
2679 case LL_IOC_MIGRATE: {
2680 struct lmv_user_md *lum;
2687 rc = obd_ioctl_getdata(&data, &len, uarg);
2691 if (data->ioc_inlbuf1 == NULL || data->ioc_inlbuf2 == NULL ||
2692 data->ioc_inllen1 == 0 || data->ioc_inllen2 == 0)
2693 GOTO(migrate_free, rc = -EINVAL);
2695 filename = data->ioc_inlbuf1;
2696 namelen = data->ioc_inllen1;
2697 flags = data->ioc_type;
2699 if (namelen < 1 || namelen != strlen(filename) + 1) {
2700 CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
2701 GOTO(migrate_free, rc = -EINVAL);
2704 lum = (struct lmv_user_md *)data->ioc_inlbuf2;
2705 if (lum->lum_magic != LMV_USER_MAGIC &&
2706 lum->lum_magic != LMV_USER_MAGIC_SPECIFIC) {
2708 CERROR("%s: wrong lum magic %x: rc = %d\n",
2709 filename, lum->lum_magic, rc);
2710 GOTO(migrate_free, rc);
2713 rc = ll_migrate(inode, file, lum, filename, flags);
2715 OBD_FREE_LARGE(data, len);
2719 case LL_IOC_LADVISE2: {
2720 struct llapi_lu_ladvise2 *ladvise;
2722 OBD_ALLOC_PTR(ladvise);
2723 if (ladvise == NULL)
2726 if (copy_from_user(ladvise, uarg, sizeof(*ladvise)))
2727 GOTO(out_ladvise, rc = -EFAULT);
2729 switch (ladvise->lla_advice) {
2730 case LU_LADVISE_AHEAD:
2731 if (ladvise->lla_start >= ladvise->lla_end) {
2733 "%s: Invalid range (%llu %llu) for %s\n",
2734 sbi->ll_fsname, ladvise->lla_start,
2736 ladvise_names[ladvise->lla_advice]);
2737 GOTO(out_ladvise, rc = -EINVAL);
2741 * Currently we only support name indexing format
2744 if (ladvise->lla_ahead_mode != LU_AH_NAME_INDEX) {
2746 "%s: Invalid access mode (%d) for %s\n",
2747 sbi->ll_fsname, ladvise->lla_ahead_mode,
2748 ladvise_names[ladvise->lla_advice]);
2749 GOTO(out_ladvise, rc = -EINVAL);
2752 /* Currently we only support stat-ahead operations. */
2753 if (!(ladvise->lla_access_flags & ACCESS_FL_STAT)) {
2755 "%s: Invalid access flags (%x) for %s\n",
2757 ladvise->lla_access_flags,
2758 ladvise_names[ladvise->lla_advice]);
2759 GOTO(out_ladvise, rc = -EINVAL);
2762 rc = ll_ioctl_ahead(file, ladvise);
2768 OBD_FREE_PTR(ladvise);
2771 case LL_IOC_PCC_DETACH_BY_FID: {
2772 struct lu_pcc_detach_fid *detach;
2774 struct inode *inode2;
2777 OBD_ALLOC_PTR(detach);
2781 if (copy_from_user(detach, uarg, sizeof(*detach)))
2782 GOTO(out_detach, rc = -EFAULT);
2784 fid = &detach->pccd_fid;
2785 ino = cl_fid_build_ino(fid, ll_need_32bit_api(sbi));
2786 inode2 = ilookup5(inode->i_sb, ino, ll_test_inode_by_fid, fid);
2788 /* Target inode is not in inode cache, and PCC file
2789 * has aleady released, return immdiately.
2791 GOTO(out_detach, rc = 0);
2793 if (!S_ISREG(inode2->i_mode))
2794 GOTO(out_iput, rc = -EINVAL);
2796 if (!inode_owner_or_capable(&nop_mnt_idmap, inode2))
2797 GOTO(out_iput, rc = -EPERM);
2799 rc = pcc_ioctl_detach(inode2, detach->pccd_opt);
2803 OBD_FREE_PTR(detach);
2807 rc = ll_iocontrol(inode, file, cmd, uarg);
2810 RETURN(obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL, uarg));
2814 static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
2816 struct inode *inode = file->f_mapping->host;
2817 struct ll_file_data *fd = file->private_data;
2818 struct ll_sb_info *sbi = ll_i2sbi(inode);
2819 int api32 = ll_need_32bit_api(sbi);
2820 loff_t ret = -EINVAL;
2823 ll_inode_lock(inode);
2828 offset += file->f_pos;
2834 offset += LL_DIR_END_OFF_32BIT;
2836 offset += LL_DIR_END_OFF;
2843 ((api32 && offset <= LL_DIR_END_OFF_32BIT) ||
2844 (!api32 && offset <= LL_DIR_END_OFF))) {
2845 if (offset != file->f_pos) {
2848 hash64 = test_bit(LL_SBI_64BIT_HASH, sbi->ll_flags);
2849 if ((api32 && offset == LL_DIR_END_OFF_32BIT) ||
2850 (!api32 && offset == LL_DIR_END_OFF))
2851 fd->lfd_pos = MDS_DIR_END_OFF;
2852 else if (api32 && hash64)
2853 fd->lfd_pos = offset << 32;
2855 fd->lfd_pos = offset;
2856 file->f_pos = offset;
2857 file->f_version = 0;
2864 ll_inode_unlock(inode);
2868 static int ll_dir_open(struct inode *inode, struct file *file)
2871 RETURN(ll_file_open(inode, file));
2874 static int ll_dir_release(struct inode *inode, struct file *file)
2877 RETURN(ll_file_release(inode, file));
2880 /* notify error if partially read striped directory */
2881 static int ll_dir_flush(struct file *file, fl_owner_t id)
2883 struct ll_file_data *lfd = file->private_data;
2884 int rc = lfd->fd_partial_readdir_rc;
2886 lfd->fd_partial_readdir_rc = 0;
2891 const struct file_operations ll_dir_operations = {
2892 .llseek = ll_dir_seek,
2893 .open = ll_dir_open,
2894 .release = ll_dir_release,
2895 .read = generic_read_dir,
2896 #ifdef HAVE_DIR_CONTEXT
2897 .iterate_shared = ll_iterate,
2899 .readdir = ll_readdir,
2901 .unlocked_ioctl = ll_dir_ioctl,
2903 .flush = ll_dir_flush,