4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
33 * Author: Peter Braam <braam@clusterfs.com>
34 * Author: Phil Schwan <phil@clusterfs.com>
35 * Author: Andreas Dilger <adilger@clusterfs.com>
38 #define DEBUG_SUBSYSTEM S_LLITE
39 #include <lustre_dlm.h>
40 #include <linux/pagemap.h>
41 #include <linux/file.h>
42 #include <linux/sched.h>
43 #include <linux/user_namespace.h>
44 #include <linux/uidgid.h>
45 #include <linux/falloc.h>
46 #include <linux/ktime.h>
48 #include <uapi/linux/lustre/lustre_ioctl.h>
49 #include <uapi/linux/llcrypt.h>
50 #include <lustre_swab.h>
52 #include "cl_object.h"
53 #include "llite_internal.h"
54 #include "vvp_internal.h"
57 struct inode *sp_inode;
62 __u64 pa_data_version;
68 ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg);
70 static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
73 static struct ll_file_data *ll_file_data_get(void)
75 struct ll_file_data *fd;
77 OBD_SLAB_ALLOC_PTR_GFP(fd, ll_file_data_slab, GFP_NOFS);
81 fd->fd_write_failed = false;
82 pcc_file_init(&fd->fd_pcc_file);
87 static void ll_file_data_put(struct ll_file_data *fd)
90 OBD_SLAB_FREE_PTR(fd, ll_file_data_slab);
94 * Packs all the attributes into @op_data for the CLOSE rpc.
96 static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
97 struct obd_client_handle *och)
101 ll_prep_md_op_data(op_data, inode, NULL, NULL,
102 0, 0, LUSTRE_OPC_ANY, NULL);
104 op_data->op_attr.ia_mode = inode->i_mode;
105 op_data->op_attr.ia_atime = inode->i_atime;
106 op_data->op_attr.ia_mtime = inode->i_mtime;
107 op_data->op_attr.ia_ctime = inode->i_ctime;
108 /* In case of encrypted file without the key, visible size was rounded
109 * up to next LUSTRE_ENCRYPTION_UNIT_SIZE, and clear text size was
110 * stored into lli_lazysize in ll_merge_attr(), so set proper file size
111 * now that we are closing.
113 if (llcrypt_require_key(inode) == -ENOKEY &&
114 ll_i2info(inode)->lli_attr_valid & OBD_MD_FLLAZYSIZE)
115 op_data->op_attr.ia_size = ll_i2info(inode)->lli_lazysize;
117 op_data->op_attr.ia_size = i_size_read(inode);
118 op_data->op_attr.ia_valid |= (ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
119 ATTR_MTIME | ATTR_MTIME_SET |
121 op_data->op_xvalid |= OP_XVALID_CTIME_SET;
122 op_data->op_attr_blocks = inode->i_blocks;
123 op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags);
124 if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags))
125 op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL;
126 op_data->op_open_handle = och->och_open_handle;
128 if (och->och_flags & FMODE_WRITE &&
129 test_and_clear_bit(LLIF_DATA_MODIFIED, &ll_i2info(inode)->lli_flags))
130 /* For HSM: if inode data has been modified, pack it so that
131 * MDT can set data dirty flag in the archive. */
132 op_data->op_bias |= MDS_DATA_MODIFIED;
138 * Perform a close, possibly with a bias.
139 * The meaning of "data" depends on the value of "bias".
141 * If \a bias is MDS_HSM_RELEASE then \a data is a pointer to the data version.
142 * If \a bias is MDS_CLOSE_LAYOUT_SWAP then \a data is a pointer to the inode to
145 static int ll_close_inode_openhandle(struct inode *inode,
146 struct obd_client_handle *och,
147 enum mds_op_bias bias, void *data)
149 struct obd_export *md_exp = ll_i2mdexp(inode);
150 const struct ll_inode_info *lli = ll_i2info(inode);
151 struct md_op_data *op_data;
152 struct ptlrpc_request *req = NULL;
156 if (class_exp2obd(md_exp) == NULL) {
157 CERROR("%s: invalid MDC connection handle closing "DFID"\n",
158 ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
162 OBD_ALLOC_PTR(op_data);
163 /* We leak openhandle and request here on error, but not much to be
164 * done in OOM case since app won't retry close on error either. */
166 GOTO(out, rc = -ENOMEM);
168 ll_prepare_close(inode, op_data, och);
170 case MDS_CLOSE_LAYOUT_MERGE:
171 /* merge blocks from the victim inode */
172 op_data->op_attr_blocks += ((struct inode *)data)->i_blocks;
173 op_data->op_attr.ia_valid |= ATTR_SIZE;
174 op_data->op_xvalid |= OP_XVALID_BLOCKS;
176 case MDS_CLOSE_LAYOUT_SPLIT:
177 case MDS_CLOSE_LAYOUT_SWAP: {
178 struct split_param *sp = data;
180 LASSERT(data != NULL);
181 op_data->op_bias |= bias;
182 op_data->op_data_version = 0;
183 op_data->op_lease_handle = och->och_lease_handle;
184 if (bias == MDS_CLOSE_LAYOUT_SPLIT) {
185 op_data->op_fid2 = *ll_inode2fid(sp->sp_inode);
186 op_data->op_mirror_id = sp->sp_mirror_id;
188 op_data->op_fid2 = *ll_inode2fid(data);
193 case MDS_CLOSE_RESYNC_DONE: {
194 struct ll_ioc_lease *ioc = data;
196 LASSERT(data != NULL);
197 op_data->op_attr_blocks +=
198 ioc->lil_count * op_data->op_attr_blocks;
199 op_data->op_attr.ia_valid |= ATTR_SIZE;
200 op_data->op_xvalid |= OP_XVALID_BLOCKS;
201 op_data->op_bias |= MDS_CLOSE_RESYNC_DONE;
203 op_data->op_lease_handle = och->och_lease_handle;
204 op_data->op_data = &ioc->lil_ids[0];
205 op_data->op_data_size =
206 ioc->lil_count * sizeof(ioc->lil_ids[0]);
210 case MDS_PCC_ATTACH: {
211 struct pcc_param *param = data;
213 LASSERT(data != NULL);
214 op_data->op_bias |= MDS_HSM_RELEASE | MDS_PCC_ATTACH;
215 op_data->op_archive_id = param->pa_archive_id;
216 op_data->op_data_version = param->pa_data_version;
217 op_data->op_lease_handle = och->och_lease_handle;
221 case MDS_HSM_RELEASE:
222 LASSERT(data != NULL);
223 op_data->op_bias |= MDS_HSM_RELEASE;
224 op_data->op_data_version = *(__u64 *)data;
225 op_data->op_lease_handle = och->och_lease_handle;
226 op_data->op_attr.ia_valid |= ATTR_SIZE;
227 op_data->op_xvalid |= OP_XVALID_BLOCKS;
231 LASSERT(data == NULL);
235 if (!(op_data->op_attr.ia_valid & ATTR_SIZE))
236 op_data->op_xvalid |= OP_XVALID_LAZYSIZE;
237 if (!(op_data->op_xvalid & OP_XVALID_BLOCKS))
238 op_data->op_xvalid |= OP_XVALID_LAZYBLOCKS;
240 rc = md_close(md_exp, op_data, och->och_mod, &req);
241 if (rc != 0 && rc != -EINTR)
242 CERROR("%s: inode "DFID" mdc close failed: rc = %d\n",
243 md_exp->exp_obd->obd_name, PFID(&lli->lli_fid), rc);
245 if (rc == 0 && op_data->op_bias & bias) {
246 struct mdt_body *body;
248 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
249 if (!(body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED))
252 if (bias & MDS_PCC_ATTACH) {
253 struct pcc_param *param = data;
255 param->pa_layout_gen = body->mbo_layout_gen;
259 ll_finish_md_op_data(op_data);
263 md_clear_open_replay_data(md_exp, och);
264 och->och_open_handle.cookie = DEAD_HANDLE_MAGIC;
267 ptlrpc_req_finished(req); /* This is close request */
271 int ll_md_real_close(struct inode *inode, fmode_t fmode)
273 struct ll_inode_info *lli = ll_i2info(inode);
274 struct obd_client_handle **och_p;
275 struct obd_client_handle *och;
280 if (fmode & FMODE_WRITE) {
281 och_p = &lli->lli_mds_write_och;
282 och_usecount = &lli->lli_open_fd_write_count;
283 } else if (fmode & FMODE_EXEC) {
284 och_p = &lli->lli_mds_exec_och;
285 och_usecount = &lli->lli_open_fd_exec_count;
287 LASSERT(fmode & FMODE_READ);
288 och_p = &lli->lli_mds_read_och;
289 och_usecount = &lli->lli_open_fd_read_count;
292 mutex_lock(&lli->lli_och_mutex);
293 if (*och_usecount > 0) {
294 /* There are still users of this handle, so skip
296 mutex_unlock(&lli->lli_och_mutex);
302 mutex_unlock(&lli->lli_och_mutex);
305 /* There might be a race and this handle may already
307 rc = ll_close_inode_openhandle(inode, och, 0, NULL);
313 static int ll_md_close(struct inode *inode, struct file *file)
315 union ldlm_policy_data policy = {
316 .l_inodebits = { MDS_INODELOCK_OPEN },
318 __u64 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
319 struct ll_file_data *fd = file->private_data;
320 struct ll_inode_info *lli = ll_i2info(inode);
321 struct lustre_handle lockh;
322 enum ldlm_mode lockmode;
326 /* clear group lock, if present */
327 if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
328 ll_put_grouplock(inode, file, fd->fd_grouplock.lg_gid);
330 mutex_lock(&lli->lli_och_mutex);
331 if (fd->fd_lease_och != NULL) {
333 struct obd_client_handle *lease_och;
335 lease_och = fd->fd_lease_och;
336 fd->fd_lease_och = NULL;
337 mutex_unlock(&lli->lli_och_mutex);
339 /* Usually the lease is not released when the
340 * application crashed, we need to release here. */
341 rc = ll_lease_close(lease_och, inode, &lease_broken);
343 mutex_lock(&lli->lli_och_mutex);
345 CDEBUG_LIMIT(rc ? D_ERROR : D_INODE,
346 "Clean up lease "DFID" %d/%d\n",
347 PFID(&lli->lli_fid), rc, lease_broken);
350 if (fd->fd_och != NULL) {
351 struct obd_client_handle *och;
355 mutex_unlock(&lli->lli_och_mutex);
357 rc = ll_close_inode_openhandle(inode, och, 0, NULL);
361 /* Let's see if we have good enough OPEN lock on the file and if
362 we can skip talking to MDS */
363 if (fd->fd_omode & FMODE_WRITE) {
365 LASSERT(lli->lli_open_fd_write_count);
366 lli->lli_open_fd_write_count--;
367 } else if (fd->fd_omode & FMODE_EXEC) {
369 LASSERT(lli->lli_open_fd_exec_count);
370 lli->lli_open_fd_exec_count--;
373 LASSERT(lli->lli_open_fd_read_count);
374 lli->lli_open_fd_read_count--;
376 mutex_unlock(&lli->lli_och_mutex);
378 /* LU-4398: do not cache write open lock if the file has exec bit */
379 if ((lockmode == LCK_CW && inode->i_mode & S_IXUGO) ||
380 !md_lock_match(ll_i2mdexp(inode), flags, ll_inode2fid(inode),
381 LDLM_IBITS, &policy, lockmode, &lockh))
382 rc = ll_md_real_close(inode, fd->fd_omode);
385 file->private_data = NULL;
386 ll_file_data_put(fd);
391 /* While this returns an error code, fput() the caller does not, so we need
392 * to make every effort to clean up all of our state here. Also, applications
393 * rarely check close errors and even if an error is returned they will not
394 * re-try the close call.
396 int ll_file_release(struct inode *inode, struct file *file)
398 struct ll_file_data *fd;
399 struct ll_sb_info *sbi = ll_i2sbi(inode);
400 struct ll_inode_info *lli = ll_i2info(inode);
401 ktime_t kstart = ktime_get();
406 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
407 PFID(ll_inode2fid(inode)), inode);
409 fd = file->private_data;
412 /* The last ref on @file, maybe not the the owner pid of statahead,
413 * because parent and child process can share the same file handle. */
414 if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd)
415 ll_deauthorize_statahead(inode, fd);
417 if (is_root_inode(inode)) {
418 file->private_data = NULL;
419 ll_file_data_put(fd);
423 pcc_file_release(inode, file);
425 if (!S_ISDIR(inode->i_mode)) {
426 if (lli->lli_clob != NULL)
427 lov_read_and_clear_async_rc(lli->lli_clob);
428 lli->lli_async_rc = 0;
431 lli->lli_close_fd_time = ktime_get();
433 rc = ll_md_close(inode, file);
435 if (CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_DUMP_LOG, cfs_fail_val))
436 libcfs_debug_dumplog();
439 if (!rc && !is_root_inode(inode))
440 ll_stats_ops_tally(sbi, LPROC_LL_RELEASE,
441 ktime_us_delta(ktime_get(), kstart));
445 static inline int ll_dom_readpage(void *data, struct page *page)
447 /* since ll_dom_readpage is a page cache helper, it is safe to assume
448 * mapping and host pointers are set here
451 struct niobuf_local *lnb = data;
455 inode = page2inode(page);
457 kaddr = kmap_atomic(page);
458 memcpy(kaddr, lnb->lnb_data, lnb->lnb_len);
459 if (lnb->lnb_len < PAGE_SIZE)
460 memset(kaddr + lnb->lnb_len, 0,
461 PAGE_SIZE - lnb->lnb_len);
462 flush_dcache_page(page);
463 SetPageUptodate(page);
464 kunmap_atomic(kaddr);
466 if (inode && IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
467 if (!llcrypt_has_encryption_key(inode))
468 CDEBUG(D_SEC, "no enc key for "DFID"\n",
469 PFID(ll_inode2fid(inode)));
471 unsigned int offs = 0;
473 while (offs < PAGE_SIZE) {
474 /* decrypt only if page is not empty */
475 if (memcmp(page_address(page) + offs,
476 page_address(ZERO_PAGE(0)),
477 LUSTRE_ENCRYPTION_UNIT_SIZE) == 0)
480 rc = llcrypt_decrypt_pagecache_blocks(page,
481 LUSTRE_ENCRYPTION_UNIT_SIZE,
486 offs += LUSTRE_ENCRYPTION_UNIT_SIZE;
495 void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req)
499 struct ll_inode_info *lli = ll_i2info(inode);
500 struct cl_object *obj = lli->lli_clob;
501 struct address_space *mapping = inode->i_mapping;
503 struct niobuf_remote *rnb;
504 struct mdt_body *body;
506 unsigned long index, start;
507 struct niobuf_local lnb;
516 if (!req_capsule_field_present(&req->rq_pill, &RMF_NIOBUF_INLINE,
520 rnb = req_capsule_server_get(&req->rq_pill, &RMF_NIOBUF_INLINE);
521 if (rnb == NULL || rnb->rnb_len == 0)
524 /* LU-11595: Server may return whole file and that is OK always or
525 * it may return just file tail and its offset must be aligned with
526 * client PAGE_SIZE to be used on that client, if server's PAGE_SIZE is
527 * smaller then offset may be not aligned and that data is just ignored.
529 if (rnb->rnb_offset & ~PAGE_MASK)
532 /* Server returns whole file or just file tail if it fills in reply
533 * buffer, in both cases total size should be equal to the file size.
535 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
536 if (rnb->rnb_offset + rnb->rnb_len != body->mbo_dom_size &&
537 !(inode && IS_ENCRYPTED(inode))) {
538 CERROR("%s: server returns off/len %llu/%u but size %llu\n",
539 ll_i2sbi(inode)->ll_fsname, rnb->rnb_offset,
540 rnb->rnb_len, body->mbo_dom_size);
544 env = cl_env_get(&refcheck);
547 io = vvp_env_thread_io(env);
549 io->ci_ignore_layout = 1;
550 rc = cl_io_init(env, io, CIT_MISC, obj);
554 CDEBUG(D_INFO, "Get data along with open at %llu len %i, size %llu\n",
555 rnb->rnb_offset, rnb->rnb_len, body->mbo_dom_size);
557 data = (char *)rnb + sizeof(*rnb);
559 lnb.lnb_file_offset = rnb->rnb_offset;
560 start = lnb.lnb_file_offset >> PAGE_SHIFT;
562 LASSERT((lnb.lnb_file_offset & ~PAGE_MASK) == 0);
563 lnb.lnb_page_offset = 0;
565 struct cl_page *page;
567 lnb.lnb_data = data + (index << PAGE_SHIFT);
568 lnb.lnb_len = rnb->rnb_len - (index << PAGE_SHIFT);
569 if (lnb.lnb_len > PAGE_SIZE)
570 lnb.lnb_len = PAGE_SIZE;
572 vmpage = read_cache_page(mapping, index + start,
573 ll_dom_readpage, &lnb);
574 if (IS_ERR(vmpage)) {
575 CWARN("%s: cannot fill page %lu for "DFID
576 " with data: rc = %li\n",
577 ll_i2sbi(inode)->ll_fsname, index + start,
578 PFID(lu_object_fid(&obj->co_lu)),
583 if (vmpage->mapping == NULL) {
586 /* page was truncated */
589 /* attach VM page to CL page cache */
590 page = cl_page_find(env, obj, vmpage->index, vmpage,
593 ClearPageUptodate(vmpage);
598 cl_page_export(env, page, 1);
599 cl_page_put(env, page);
603 } while (rnb->rnb_len > (index << PAGE_SHIFT));
607 cl_env_put(env, &refcheck);
612 static int ll_intent_file_open(struct dentry *de, void *lmm, int lmmsize,
613 struct lookup_intent *itp)
615 struct ll_sb_info *sbi = ll_i2sbi(de->d_inode);
616 struct dentry *parent = de->d_parent;
619 struct md_op_data *op_data;
620 struct ptlrpc_request *req = NULL;
624 LASSERT(parent != NULL);
625 LASSERT(itp->it_flags & MDS_OPEN_BY_FID);
627 /* if server supports open-by-fid, or file name is invalid, don't pack
628 * name in open request */
629 if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_OPEN_BY_NAME) ||
630 !(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_OPEN_BY_FID)) {
632 len = de->d_name.len;
633 name = kmalloc(len + 1, GFP_NOFS);
638 spin_lock(&de->d_lock);
639 if (len != de->d_name.len) {
640 spin_unlock(&de->d_lock);
644 memcpy(name, de->d_name.name, len);
646 spin_unlock(&de->d_lock);
648 if (!lu_name_is_valid_2(name, len)) {
654 op_data = ll_prep_md_op_data(NULL, parent->d_inode, de->d_inode,
655 name, len, 0, LUSTRE_OPC_OPEN, NULL);
656 if (IS_ERR(op_data)) {
658 RETURN(PTR_ERR(op_data));
660 op_data->op_data = lmm;
661 op_data->op_data_size = lmmsize;
663 OBD_FAIL_TIMEOUT(OBD_FAIL_LLITE_OPEN_DELAY, cfs_fail_val);
665 rc = md_intent_lock(sbi->ll_md_exp, op_data, itp, &req,
666 &ll_md_blocking_ast, 0);
668 ll_finish_md_op_data(op_data);
670 /* reason for keep own exit path - don`t flood log
671 * with messages with -ESTALE errors.
673 if (!it_disposition(itp, DISP_OPEN_OPEN) ||
674 it_open_error(DISP_OPEN_OPEN, itp))
676 ll_release_openhandle(de, itp);
680 if (it_disposition(itp, DISP_LOOKUP_NEG))
681 GOTO(out, rc = -ENOENT);
683 if (rc != 0 || it_open_error(DISP_OPEN_OPEN, itp)) {
684 rc = rc ? rc : it_open_error(DISP_OPEN_OPEN, itp);
685 CDEBUG(D_VFSTRACE, "lock enqueue: err: %d\n", rc);
689 rc = ll_prep_inode(&de->d_inode, &req->rq_pill, NULL, itp);
691 if (!rc && itp->it_lock_mode) {
694 /* If we got a lock back and it has a LOOKUP bit set,
695 * make sure the dentry is marked as valid so we can find it.
696 * We don't need to care about actual hashing since other bits
697 * of kernel will deal with that later.
699 ll_set_lock_data(sbi->ll_md_exp, de->d_inode, itp, &bits);
700 if (bits & MDS_INODELOCK_LOOKUP) {
701 d_lustre_revalidate(de);
702 ll_update_dir_depth(parent->d_inode, de->d_inode);
705 /* if DoM bit returned along with LAYOUT bit then there
706 * can be read-on-open data returned.
708 if (bits & MDS_INODELOCK_DOM && bits & MDS_INODELOCK_LAYOUT)
709 ll_dom_finish_open(de->d_inode, req);
713 ptlrpc_req_finished(req);
714 ll_intent_drop_lock(itp);
716 /* We did open by fid, but by the time we got to the server, the object
717 * disappeared. This is possible if the object was unlinked, but it's
718 * also possible if the object was unlinked by a rename. In the case
719 * of an object renamed over our existing one, we can't fail this open.
720 * O_CREAT also goes through this path if we had an existing dentry,
721 * and it's obviously wrong to return ENOENT for O_CREAT.
723 * Instead let's return -ESTALE, and the VFS will retry the open with
724 * LOOKUP_REVAL, which we catch in ll_revalidate_dentry and fail to
725 * revalidate, causing a lookup. This causes extra lookups in the case
726 * where we had a dentry in cache but the file is being unlinked and we
727 * lose the race with unlink, but this should be very rare.
735 static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it,
736 struct obd_client_handle *och)
738 struct mdt_body *body;
740 body = req_capsule_server_get(&it->it_request->rq_pill, &RMF_MDT_BODY);
741 och->och_open_handle = body->mbo_open_handle;
742 och->och_fid = body->mbo_fid1;
743 och->och_lease_handle.cookie = it->it_lock_handle;
744 och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
745 och->och_flags = it->it_flags;
747 return md_set_open_replay_data(md_exp, och, it);
750 static int ll_local_open(struct file *file, struct lookup_intent *it,
751 struct ll_file_data *fd, struct obd_client_handle *och)
753 struct inode *inode = file_inode(file);
756 LASSERT(!file->private_data);
763 rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
768 file->private_data = fd;
769 ll_readahead_init(inode, &fd->fd_ras);
770 fd->fd_omode = it->it_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
775 void ll_track_file_opens(struct inode *inode)
777 struct ll_inode_info *lli = ll_i2info(inode);
778 struct ll_sb_info *sbi = ll_i2sbi(inode);
780 /* do not skew results with delays from never-opened inodes */
781 if (ktime_to_ns(lli->lli_close_fd_time))
782 ll_stats_ops_tally(sbi, LPROC_LL_INODE_OPCLTM,
783 ktime_us_delta(ktime_get(), lli->lli_close_fd_time));
785 if (ktime_after(ktime_get(),
786 ktime_add_ms(lli->lli_close_fd_time,
787 sbi->ll_oc_max_ms))) {
788 lli->lli_open_fd_count = 1;
789 lli->lli_close_fd_time = ns_to_ktime(0);
791 lli->lli_open_fd_count++;
794 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_OCOUNT,
795 lli->lli_open_fd_count);
798 /* Open a file, and (for the very first open) create objects on the OSTs at
799 * this time. If opened with O_LOV_DELAY_CREATE, then we don't do the object
800 * creation or open until ll_lov_setstripe() ioctl is called.
802 * If we already have the stripe MD locally then we don't request it in
803 * md_open(), by passing a lmm_size = 0.
805 * It is up to the application to ensure no other processes open this file
806 * in the O_LOV_DELAY_CREATE case, or the default striping pattern will be
807 * used. We might be able to avoid races of that sort by getting lli_open_sem
808 * before returning in the O_LOV_DELAY_CREATE case and dropping it here
809 * or in ll_file_release(), but I'm not sure that is desirable/necessary.
811 int ll_file_open(struct inode *inode, struct file *file)
813 struct ll_inode_info *lli = ll_i2info(inode);
814 struct lookup_intent *it, oit = { .it_op = IT_OPEN,
815 .it_flags = file->f_flags };
816 struct obd_client_handle **och_p = NULL;
817 __u64 *och_usecount = NULL;
818 struct ll_file_data *fd;
819 ktime_t kstart = ktime_get();
823 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), flags %o\n",
824 PFID(ll_inode2fid(inode)), inode, file->f_flags);
826 it = file->private_data; /* XXX: compat macro */
827 file->private_data = NULL; /* prevent ll_local_open assertion */
829 if (S_ISREG(inode->i_mode)) {
830 rc = ll_file_open_encrypt(inode, file);
832 if (it && it->it_disposition)
833 ll_release_openhandle(file_dentry(file), it);
834 GOTO(out_nofiledata, rc);
838 fd = ll_file_data_get();
840 GOTO(out_nofiledata, rc = -ENOMEM);
843 if (S_ISDIR(inode->i_mode))
844 ll_authorize_statahead(inode, fd);
846 ll_track_file_opens(inode);
847 if (is_root_inode(inode)) {
848 file->private_data = fd;
852 if (!it || !it->it_disposition) {
853 /* Convert f_flags into access mode. We cannot use file->f_mode,
854 * because everything but O_ACCMODE mask was stripped from
856 if ((oit.it_flags + 1) & O_ACCMODE)
858 if (file->f_flags & O_TRUNC)
859 oit.it_flags |= FMODE_WRITE;
861 /* kernel only call f_op->open in dentry_open. filp_open calls
862 * dentry_open after call to open_namei that checks permissions.
863 * Only nfsd_open call dentry_open directly without checking
864 * permissions and because of that this code below is safe.
866 if (oit.it_flags & (FMODE_WRITE | FMODE_READ))
867 oit.it_flags |= MDS_OPEN_OWNEROVERRIDE;
869 /* We do not want O_EXCL here, presumably we opened the file
870 * already? XXX - NFS implications? */
871 oit.it_flags &= ~O_EXCL;
873 /* bug20584, if "it_flags" contains O_CREAT, the file will be
874 * created if necessary, then "IT_CREAT" should be set to keep
875 * consistent with it */
876 if (oit.it_flags & O_CREAT)
877 oit.it_op |= IT_CREAT;
883 /* Let's see if we have file open on MDS already. */
884 if (it->it_flags & FMODE_WRITE) {
885 och_p = &lli->lli_mds_write_och;
886 och_usecount = &lli->lli_open_fd_write_count;
887 } else if (it->it_flags & FMODE_EXEC) {
888 och_p = &lli->lli_mds_exec_och;
889 och_usecount = &lli->lli_open_fd_exec_count;
891 och_p = &lli->lli_mds_read_och;
892 och_usecount = &lli->lli_open_fd_read_count;
895 mutex_lock(&lli->lli_och_mutex);
896 if (*och_p) { /* Open handle is present */
897 if (it_disposition(it, DISP_OPEN_OPEN)) {
898 /* Well, there's extra open request that we do not need,
899 * let's close it somehow. This will decref request. */
900 rc = it_open_error(DISP_OPEN_OPEN, it);
902 mutex_unlock(&lli->lli_och_mutex);
903 GOTO(out_openerr, rc);
906 ll_release_openhandle(file_dentry(file), it);
910 rc = ll_local_open(file, it, fd, NULL);
913 mutex_unlock(&lli->lli_och_mutex);
914 GOTO(out_openerr, rc);
917 LASSERT(*och_usecount == 0);
918 if (!it->it_disposition) {
919 struct dentry *dentry = file_dentry(file);
920 struct ll_sb_info *sbi = ll_i2sbi(inode);
921 struct ll_dentry_data *ldd;
923 /* We cannot just request lock handle now, new ELC code
924 * means that one of other OPEN locks for this file
925 * could be cancelled, and since blocking ast handler
926 * would attempt to grab och_mutex as well, that would
927 * result in a deadlock
929 mutex_unlock(&lli->lli_och_mutex);
931 * Normally called under two situations:
933 * 2. A race/condition on MDS resulting in no open
934 * handle to be returned from LOOKUP|OPEN request,
935 * for example if the target entry was a symlink.
937 * In NFS path we know there's pathologic behavior
938 * so we always enable open lock caching when coming
939 * from there. It's detected by setting a flag in
942 * After reaching number of opens of this inode
943 * we always ask for an open lock on it to handle
944 * bad userspace actors that open and close files
945 * in a loop for absolutely no good reason
948 ldd = ll_d2d(dentry);
949 if (filename_is_volatile(dentry->d_name.name,
952 /* There really is nothing here, but this
953 * make this more readable I think.
954 * We do not want openlock for volatile
955 * files under any circumstances
957 } else if (ldd && ldd->lld_nfs_dentry) {
958 /* NFS path. This also happens to catch
959 * open by fh files I guess
961 it->it_flags |= MDS_OPEN_LOCK;
962 /* clear the flag for future lookups */
963 ldd->lld_nfs_dentry = 0;
964 } else if (sbi->ll_oc_thrsh_count > 0) {
965 /* Take MDS_OPEN_LOCK with many opens */
966 if (lli->lli_open_fd_count >=
967 sbi->ll_oc_thrsh_count)
968 it->it_flags |= MDS_OPEN_LOCK;
970 /* If this is open after we just closed */
971 else if (ktime_before(ktime_get(),
972 ktime_add_ms(lli->lli_close_fd_time,
973 sbi->ll_oc_thrsh_ms)))
974 it->it_flags |= MDS_OPEN_LOCK;
978 * Always specify MDS_OPEN_BY_FID because we don't want
979 * to get file with different fid.
981 it->it_flags |= MDS_OPEN_BY_FID;
982 rc = ll_intent_file_open(dentry, NULL, 0, it);
984 GOTO(out_openerr, rc);
988 OBD_ALLOC(*och_p, sizeof(struct obd_client_handle));
990 GOTO(out_och_free, rc = -ENOMEM);
994 /* md_intent_lock() didn't get a request ref if there was an
995 * open error, so don't do cleanup on the request here
997 /* XXX (green): Should not we bail out on any error here, not
998 * just open error? */
999 rc = it_open_error(DISP_OPEN_OPEN, it);
1001 GOTO(out_och_free, rc);
1003 LASSERTF(it_disposition(it, DISP_ENQ_OPEN_REF),
1004 "inode %p: disposition %x, status %d\n", inode,
1005 it_disposition(it, ~0), it->it_status);
1007 rc = ll_local_open(file, it, fd, *och_p);
1009 GOTO(out_och_free, rc);
1012 rc = pcc_file_open(inode, file);
1014 GOTO(out_och_free, rc);
1016 mutex_unlock(&lli->lli_och_mutex);
1020 /* Must do this outside lli_och_mutex lock to prevent deadlock where
1021 different kind of OPEN lock for this same inode gets cancelled
1022 by ldlm_cancel_lru */
1023 if (!S_ISREG(inode->i_mode))
1024 GOTO(out_och_free, rc);
1025 cl_lov_delay_create_clear(&file->f_flags);
1026 GOTO(out_och_free, rc);
1030 if (och_p && *och_p) {
1031 OBD_FREE(*och_p, sizeof(struct obd_client_handle));
1032 *och_p = NULL; /* OBD_FREE writes some magic there */
1035 mutex_unlock(&lli->lli_och_mutex);
1038 if (lli->lli_opendir_key == fd)
1039 ll_deauthorize_statahead(inode, fd);
1042 ll_file_data_put(fd);
1044 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN,
1045 ktime_us_delta(ktime_get(), kstart));
1049 if (it && it_disposition(it, DISP_ENQ_OPEN_REF)) {
1050 ptlrpc_req_finished(it->it_request);
1051 it_clear_disposition(it, DISP_ENQ_OPEN_REF);
1057 static int ll_md_blocking_lease_ast(struct ldlm_lock *lock,
1058 struct ldlm_lock_desc *desc, void *data, int flag)
1061 struct lustre_handle lockh;
1065 case LDLM_CB_BLOCKING:
1066 ldlm_lock2handle(lock, &lockh);
1067 rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
1069 CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
1073 case LDLM_CB_CANCELING:
1081 * When setting a lease on a file, we take ownership of the lli_mds_*_och
1082 * and save it as fd->fd_och so as to force client to reopen the file even
1083 * if it has an open lock in cache already.
1085 static int ll_lease_och_acquire(struct inode *inode, struct file *file,
1086 struct lustre_handle *old_open_handle)
1088 struct ll_inode_info *lli = ll_i2info(inode);
1089 struct ll_file_data *fd = file->private_data;
1090 struct obd_client_handle **och_p;
1091 __u64 *och_usecount;
1095 /* Get the openhandle of the file */
1096 mutex_lock(&lli->lli_och_mutex);
1097 if (fd->fd_lease_och != NULL)
1098 GOTO(out_unlock, rc = -EBUSY);
1100 if (fd->fd_och == NULL) {
1101 if (file->f_mode & FMODE_WRITE) {
1102 LASSERT(lli->lli_mds_write_och != NULL);
1103 och_p = &lli->lli_mds_write_och;
1104 och_usecount = &lli->lli_open_fd_write_count;
1106 LASSERT(lli->lli_mds_read_och != NULL);
1107 och_p = &lli->lli_mds_read_och;
1108 och_usecount = &lli->lli_open_fd_read_count;
1111 if (*och_usecount > 1)
1112 GOTO(out_unlock, rc = -EBUSY);
1114 fd->fd_och = *och_p;
1119 *old_open_handle = fd->fd_och->och_open_handle;
1123 mutex_unlock(&lli->lli_och_mutex);
1128 * Release ownership on lli_mds_*_och when putting back a file lease.
1130 static int ll_lease_och_release(struct inode *inode, struct file *file)
1132 struct ll_inode_info *lli = ll_i2info(inode);
1133 struct ll_file_data *fd = file->private_data;
1134 struct obd_client_handle **och_p;
1135 struct obd_client_handle *old_och = NULL;
1136 __u64 *och_usecount;
1140 mutex_lock(&lli->lli_och_mutex);
1141 if (file->f_mode & FMODE_WRITE) {
1142 och_p = &lli->lli_mds_write_och;
1143 och_usecount = &lli->lli_open_fd_write_count;
1145 och_p = &lli->lli_mds_read_och;
1146 och_usecount = &lli->lli_open_fd_read_count;
1149 /* The file may have been open by another process (broken lease) so
1150 * *och_p is not NULL. In this case we should simply increase usecount
1153 if (*och_p != NULL) {
1154 old_och = fd->fd_och;
1157 *och_p = fd->fd_och;
1161 mutex_unlock(&lli->lli_och_mutex);
1163 if (old_och != NULL)
1164 rc = ll_close_inode_openhandle(inode, old_och, 0, NULL);
1170 * Acquire a lease and open the file.
1172 static struct obd_client_handle *
1173 ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
1176 struct lookup_intent it = { .it_op = IT_OPEN };
1177 struct ll_sb_info *sbi = ll_i2sbi(inode);
1178 struct md_op_data *op_data;
1179 struct ptlrpc_request *req = NULL;
1180 struct lustre_handle old_open_handle = { 0 };
1181 struct obd_client_handle *och = NULL;
1186 if (fmode != FMODE_WRITE && fmode != FMODE_READ)
1187 RETURN(ERR_PTR(-EINVAL));
1190 if (!(fmode & file->f_mode) || (file->f_mode & FMODE_EXEC))
1191 RETURN(ERR_PTR(-EPERM));
1193 rc = ll_lease_och_acquire(inode, file, &old_open_handle);
1195 RETURN(ERR_PTR(rc));
1200 RETURN(ERR_PTR(-ENOMEM));
1202 op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0,
1203 LUSTRE_OPC_ANY, NULL);
1204 if (IS_ERR(op_data))
1205 GOTO(out, rc = PTR_ERR(op_data));
1207 /* To tell the MDT this openhandle is from the same owner */
1208 op_data->op_open_handle = old_open_handle;
1210 it.it_flags = fmode | open_flags;
1211 it.it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID | MDS_OPEN_LEASE;
1212 rc = md_intent_lock(sbi->ll_md_exp, op_data, &it, &req,
1213 &ll_md_blocking_lease_ast,
1214 /* LDLM_FL_NO_LRU: To not put the lease lock into LRU list, otherwise
1215 * it can be cancelled which may mislead applications that the lease is
1217 * LDLM_FL_EXCL: Set this flag so that it won't be matched by normal
1218 * open in ll_md_blocking_ast(). Otherwise as ll_md_blocking_lease_ast
1219 * doesn't deal with openhandle, so normal openhandle will be leaked. */
1220 LDLM_FL_NO_LRU | LDLM_FL_EXCL);
1221 ll_finish_md_op_data(op_data);
1222 ptlrpc_req_finished(req);
1224 GOTO(out_release_it, rc);
1226 if (it_disposition(&it, DISP_LOOKUP_NEG))
1227 GOTO(out_release_it, rc = -ENOENT);
1229 rc = it_open_error(DISP_OPEN_OPEN, &it);
1231 GOTO(out_release_it, rc);
1233 LASSERT(it_disposition(&it, DISP_ENQ_OPEN_REF));
1234 rc = ll_och_fill(sbi->ll_md_exp, &it, och);
1236 GOTO(out_release_it, rc);
1238 if (!it_disposition(&it, DISP_OPEN_LEASE)) /* old server? */
1239 GOTO(out_close, rc = -EOPNOTSUPP);
1241 /* already get lease, handle lease lock */
1242 ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
1243 if (!it.it_lock_mode ||
1244 !(it.it_lock_bits & MDS_INODELOCK_OPEN)) {
1245 /* open lock must return for lease */
1246 CERROR(DFID "lease granted but no open lock, %d/%llu.\n",
1247 PFID(ll_inode2fid(inode)), it.it_lock_mode,
1249 GOTO(out_close, rc = -EPROTO);
1252 ll_intent_release(&it);
1256 /* Cancel open lock */
1257 if (it.it_lock_mode != 0) {
1258 ldlm_lock_decref_and_cancel(&och->och_lease_handle,
1260 it.it_lock_mode = 0;
1261 och->och_lease_handle.cookie = 0ULL;
1263 rc2 = ll_close_inode_openhandle(inode, och, 0, NULL);
1265 CERROR("%s: error closing file "DFID": %d\n",
1266 sbi->ll_fsname, PFID(&ll_i2info(inode)->lli_fid), rc2);
1267 och = NULL; /* och has been freed in ll_close_inode_openhandle() */
1269 ll_intent_release(&it);
1273 RETURN(ERR_PTR(rc));
1277 * Check whether a layout swap can be done between two inodes.
1279 * \param[in] inode1 First inode to check
1280 * \param[in] inode2 Second inode to check
1282 * \retval 0 on success, layout swap can be performed between both inodes
1283 * \retval negative error code if requirements are not met
1285 static int ll_check_swap_layouts_validity(struct inode *inode1,
1286 struct inode *inode2)
1288 if (!S_ISREG(inode1->i_mode) || !S_ISREG(inode2->i_mode))
1291 if (inode_permission(&init_user_ns, inode1, MAY_WRITE) ||
1292 inode_permission(&init_user_ns, inode2, MAY_WRITE))
1295 if (inode1->i_sb != inode2->i_sb)
1301 static int ll_swap_layouts_close(struct obd_client_handle *och,
1302 struct inode *inode, struct inode *inode2)
1304 const struct lu_fid *fid1 = ll_inode2fid(inode);
1305 const struct lu_fid *fid2;
1309 CDEBUG(D_INODE, "%s: biased close of file "DFID"\n",
1310 ll_i2sbi(inode)->ll_fsname, PFID(fid1));
1312 rc = ll_check_swap_layouts_validity(inode, inode2);
1314 GOTO(out_free_och, rc);
1316 /* We now know that inode2 is a lustre inode */
1317 fid2 = ll_inode2fid(inode2);
1319 rc = lu_fid_cmp(fid1, fid2);
1321 GOTO(out_free_och, rc = -EINVAL);
1323 /* Close the file and {swap,merge} layouts between inode & inode2.
1324 * NB: lease lock handle is released in mdc_close_layout_swap_pack()
1325 * because we still need it to pack l_remote_handle to MDT. */
1326 rc = ll_close_inode_openhandle(inode, och, MDS_CLOSE_LAYOUT_SWAP,
1329 och = NULL; /* freed in ll_close_inode_openhandle() */
1339 * Release lease and close the file.
1340 * It will check if the lease has ever broken.
1342 static int ll_lease_close_intent(struct obd_client_handle *och,
1343 struct inode *inode,
1344 bool *lease_broken, enum mds_op_bias bias,
1347 struct ldlm_lock *lock;
1348 bool cancelled = true;
1352 lock = ldlm_handle2lock(&och->och_lease_handle);
1354 lock_res_and_lock(lock);
1355 cancelled = ldlm_is_cancel(lock);
1356 unlock_res_and_lock(lock);
1357 LDLM_LOCK_PUT(lock);
1360 CDEBUG(D_INODE, "lease for "DFID" broken? %d, bias: %x\n",
1361 PFID(&ll_i2info(inode)->lli_fid), cancelled, bias);
1363 if (lease_broken != NULL)
1364 *lease_broken = cancelled;
1366 if (!cancelled && !bias)
1367 ldlm_cli_cancel(&och->och_lease_handle, 0);
1369 if (cancelled) { /* no need to excute intent */
1374 rc = ll_close_inode_openhandle(inode, och, bias, data);
1378 static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
1381 return ll_lease_close_intent(och, inode, lease_broken, 0, NULL);
1385 * After lease is taken, send the RPC MDS_REINT_RESYNC to the MDT
1387 static int ll_lease_file_resync(struct obd_client_handle *och,
1388 struct inode *inode, unsigned long arg)
1390 struct ll_sb_info *sbi = ll_i2sbi(inode);
1391 struct md_op_data *op_data;
1392 struct ll_ioc_lease_id ioc;
1393 __u64 data_version_unused;
1397 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1398 LUSTRE_OPC_ANY, NULL);
1399 if (IS_ERR(op_data))
1400 RETURN(PTR_ERR(op_data));
1402 if (copy_from_user(&ioc, (struct ll_ioc_lease_id __user *)arg,
1406 /* before starting file resync, it's necessary to clean up page cache
1407 * in client memory, otherwise once the layout version is increased,
1408 * writing back cached data will be denied the OSTs. */
1409 rc = ll_data_version(inode, &data_version_unused, LL_DV_WR_FLUSH);
1413 op_data->op_lease_handle = och->och_lease_handle;
1414 op_data->op_mirror_id = ioc.lil_mirror_id;
1415 rc = md_file_resync(sbi->ll_md_exp, op_data);
1421 ll_finish_md_op_data(op_data);
1425 int ll_merge_attr(const struct lu_env *env, struct inode *inode)
1427 struct ll_inode_info *lli = ll_i2info(inode);
1428 struct cl_object *obj = lli->lli_clob;
1429 struct cl_attr *attr = vvp_env_thread_attr(env);
1437 ll_inode_size_lock(inode);
1439 /* Merge timestamps the most recently obtained from MDS with
1440 * timestamps obtained from OSTs.
1442 * Do not overwrite atime of inode because it may be refreshed
1443 * by file_accessed() function. If the read was served by cache
1444 * data, there is no RPC to be sent so that atime may not be
1445 * transferred to OSTs at all. MDT only updates atime at close time
1446 * if it's at least 'mdd.*.atime_diff' older.
1447 * All in all, the atime in Lustre does not strictly comply with
1448 * POSIX. Solving this problem needs to send an RPC to MDT for each
1449 * read, this will hurt performance.
1451 if (test_and_clear_bit(LLIF_UPDATE_ATIME, &lli->lli_flags) ||
1452 inode->i_atime.tv_sec < lli->lli_atime)
1453 inode->i_atime.tv_sec = lli->lli_atime;
1455 inode->i_mtime.tv_sec = lli->lli_mtime;
1456 inode->i_ctime.tv_sec = lli->lli_ctime;
1458 mtime = inode->i_mtime.tv_sec;
1459 atime = inode->i_atime.tv_sec;
1460 ctime = inode->i_ctime.tv_sec;
1462 cl_object_attr_lock(obj);
1463 if (OBD_FAIL_CHECK(OBD_FAIL_MDC_MERGE))
1466 rc = cl_object_attr_get(env, obj, attr);
1467 cl_object_attr_unlock(obj);
1470 GOTO(out_size_unlock, rc = (rc == -ENODATA ? 0 : rc));
1472 if (atime < attr->cat_atime)
1473 atime = attr->cat_atime;
1475 if (ctime < attr->cat_ctime)
1476 ctime = attr->cat_ctime;
1478 if (mtime < attr->cat_mtime)
1479 mtime = attr->cat_mtime;
1481 CDEBUG(D_VFSTRACE, DFID" updating i_size %llu\n",
1482 PFID(&lli->lli_fid), attr->cat_size);
1484 if (llcrypt_require_key(inode) == -ENOKEY) {
1485 /* Without the key, round up encrypted file size to next
1486 * LUSTRE_ENCRYPTION_UNIT_SIZE. Clear text size is put in
1487 * lli_lazysize for proper file size setting at close time.
1489 lli->lli_attr_valid |= OBD_MD_FLLAZYSIZE;
1490 lli->lli_lazysize = attr->cat_size;
1491 attr->cat_size = round_up(attr->cat_size,
1492 LUSTRE_ENCRYPTION_UNIT_SIZE);
1494 i_size_write(inode, attr->cat_size);
1495 inode->i_blocks = attr->cat_blocks;
1497 inode->i_mtime.tv_sec = mtime;
1498 inode->i_atime.tv_sec = atime;
1499 inode->i_ctime.tv_sec = ctime;
1502 ll_inode_size_unlock(inode);
1508 * Set designated mirror for I/O.
1510 * So far only read, write, and truncated can support to issue I/O to
1511 * designated mirror.
1513 void ll_io_set_mirror(struct cl_io *io, const struct file *file)
1515 struct ll_file_data *fd = file->private_data;
1517 /* clear layout version for generic(non-resync) I/O in case it carries
1518 * stale layout version due to I/O restart */
1519 io->ci_layout_version = 0;
1521 /* FLR: disable non-delay for designated mirror I/O because obviously
1522 * only one mirror is available */
1523 if (fd->fd_designated_mirror > 0) {
1525 io->ci_designated_mirror = fd->fd_designated_mirror;
1526 io->ci_layout_version = fd->fd_layout_version;
1529 CDEBUG(D_VFSTRACE, "%s: desiginated mirror: %d\n",
1530 file->f_path.dentry->d_name.name, io->ci_designated_mirror);
1533 static bool file_is_noatime(const struct file *file)
1535 const struct vfsmount *mnt = file->f_path.mnt;
1536 const struct inode *inode = file_inode((struct file *)file);
1538 /* Adapted from file_accessed() and touch_atime().*/
1539 if (file->f_flags & O_NOATIME)
1542 if (inode->i_flags & S_NOATIME)
1545 if (IS_NOATIME(inode))
1548 if (mnt->mnt_flags & (MNT_NOATIME | MNT_READONLY))
1551 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1554 if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
1560 void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot,
1561 struct vvp_io_args *args)
1563 struct inode *inode = file_inode(file);
1564 struct ll_file_data *fd = file->private_data;
1566 io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK;
1567 io->ci_lock_no_expand = fd->ll_lock_no_expand;
1569 if (iot == CIT_WRITE) {
1570 io->u.ci_wr.wr_append = !!(file->f_flags & O_APPEND);
1571 io->u.ci_wr.wr_sync = !!(file->f_flags & O_SYNC ||
1572 file->f_flags & O_DIRECT ||
1574 #ifdef HAVE_GENERIC_WRITE_SYNC_2ARGS
1575 io->u.ci_wr.wr_sync |= !!(args &&
1576 (args->u.normal.via_iocb->ki_flags &
1581 io->ci_obj = ll_i2info(inode)->lli_clob;
1582 io->ci_lockreq = CILR_MAYBE;
1583 if (ll_file_nolock(file)) {
1584 io->ci_lockreq = CILR_NEVER;
1585 io->ci_no_srvlock = 1;
1586 } else if (file->f_flags & O_APPEND) {
1587 io->ci_lockreq = CILR_MANDATORY;
1589 io->ci_noatime = file_is_noatime(file);
1590 io->ci_async_readahead = false;
1592 /* FLR: only use non-delay I/O for read as there is only one
1593 * avaliable mirror for write. */
1594 io->ci_ndelay = !(iot == CIT_WRITE);
1596 ll_io_set_mirror(io, file);
1599 static void ll_heat_add(struct inode *inode, enum cl_io_type iot,
1602 struct ll_inode_info *lli = ll_i2info(inode);
1603 struct ll_sb_info *sbi = ll_i2sbi(inode);
1604 enum obd_heat_type sample_type;
1605 enum obd_heat_type iobyte_type;
1606 __u64 now = ktime_get_real_seconds();
1608 if (!ll_sbi_has_file_heat(sbi) ||
1609 lli->lli_heat_flags & LU_HEAT_FLAG_OFF)
1612 if (iot == CIT_READ) {
1613 sample_type = OBD_HEAT_READSAMPLE;
1614 iobyte_type = OBD_HEAT_READBYTE;
1615 } else if (iot == CIT_WRITE) {
1616 sample_type = OBD_HEAT_WRITESAMPLE;
1617 iobyte_type = OBD_HEAT_WRITEBYTE;
1622 spin_lock(&lli->lli_heat_lock);
1623 obd_heat_add(&lli->lli_heat_instances[sample_type], now, 1,
1624 sbi->ll_heat_decay_weight, sbi->ll_heat_period_second);
1625 obd_heat_add(&lli->lli_heat_instances[iobyte_type], now, count,
1626 sbi->ll_heat_decay_weight, sbi->ll_heat_period_second);
1627 spin_unlock(&lli->lli_heat_lock);
1631 ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
1632 struct file *file, enum cl_io_type iot,
1633 loff_t *ppos, size_t count)
1635 struct vvp_io *vio = vvp_env_io(env);
1636 struct inode *inode = file_inode(file);
1637 struct ll_inode_info *lli = ll_i2info(inode);
1638 struct ll_sb_info *sbi = ll_i2sbi(inode);
1639 struct ll_file_data *fd = file->private_data;
1640 struct range_lock range;
1641 bool range_locked = false;
1646 unsigned int retried = 0, dio_lock = 0;
1647 bool is_aio = false;
1648 bool is_parallel_dio = false;
1649 struct cl_dio_aio *ci_aio = NULL;
1651 bool partial_io = false;
1652 size_t max_io_pages, max_cached_pages;
1656 CDEBUG(D_VFSTRACE, "%s: %s ppos: %llu, count: %zu\n",
1657 file_dentry(file)->d_name.name,
1658 iot == CIT_READ ? "read" : "write", *ppos, count);
1660 max_io_pages = PTLRPC_MAX_BRW_PAGES * OBD_MAX_RIF_DEFAULT;
1661 max_cached_pages = sbi->ll_cache->ccc_lru_max;
1662 if (max_io_pages > (max_cached_pages >> 2))
1663 max_io_pages = max_cached_pages >> 2;
1665 io = vvp_env_thread_io(env);
1666 if (file->f_flags & O_DIRECT) {
1667 if (!is_sync_kiocb(args->u.normal.via_iocb))
1670 /* the kernel does not support AIO on pipes, and parallel DIO
1671 * uses part of the AIO path, so we must not do parallel dio
1674 is_parallel_dio = !iov_iter_is_pipe(args->u.normal.via_iter) &&
1677 if (!ll_sbi_has_parallel_dio(sbi))
1678 is_parallel_dio = false;
1680 ci_aio = cl_aio_alloc(args->u.normal.via_iocb,
1681 ll_i2info(inode)->lli_clob, NULL);
1683 GOTO(out, rc = -ENOMEM);
1688 * IO block size need be aware of cached page limit, otherwise
1689 * if we have small max_cached_mb but large block IO issued, io
1690 * could not be finished and blocked whole client.
1692 if (file->f_flags & O_DIRECT)
1695 per_bytes = min(max_io_pages << PAGE_SHIFT, count);
1696 partial_io = per_bytes < count;
1697 io = vvp_env_thread_io(env);
1698 ll_io_init(io, file, iot, args);
1699 io->ci_aio = ci_aio;
1700 io->ci_dio_lock = dio_lock;
1701 io->ci_ndelay_tried = retried;
1702 io->ci_parallel_dio = is_parallel_dio;
1704 if (cl_io_rw_init(env, io, iot, *ppos, per_bytes) == 0) {
1705 if (file->f_flags & O_APPEND)
1706 range_lock_init(&range, 0, LUSTRE_EOF);
1708 range_lock_init(&range, *ppos, *ppos + per_bytes - 1);
1710 vio->vui_fd = file->private_data;
1711 vio->vui_iter = args->u.normal.via_iter;
1712 vio->vui_iocb = args->u.normal.via_iocb;
1713 /* Direct IO reads must also take range lock,
1714 * or multiple reads will try to work on the same pages
1715 * See LU-6227 for details.
1717 if (((iot == CIT_WRITE) ||
1718 (iot == CIT_READ && (file->f_flags & O_DIRECT))) &&
1719 !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
1720 CDEBUG(D_VFSTRACE, "Range lock "RL_FMT"\n",
1722 rc = range_lock(&lli->lli_write_tree, &range);
1726 range_locked = true;
1729 ll_cl_add(inode, env, io, LCC_RW);
1730 rc = cl_io_loop(env, io);
1731 ll_cl_remove(inode, env);
1733 if (range_locked && !is_parallel_dio) {
1734 CDEBUG(D_VFSTRACE, "Range unlock "RL_FMT"\n",
1736 range_unlock(&lli->lli_write_tree, &range);
1737 range_locked = false;
1740 /* cl_io_rw_init() handled IO */
1744 /* N/B: parallel DIO may be disabled during i/o submission;
1745 * if that occurs, async RPCs are resolved before we get here, and this
1746 * wait call completes immediately.
1748 if (is_parallel_dio) {
1749 struct cl_sync_io *anchor = &io->ci_aio->cda_sync;
1751 /* for dio, EIOCBQUEUED is an implementation detail,
1752 * and we don't return it to userspace
1754 if (rc == -EIOCBQUEUED)
1757 rc2 = cl_sync_io_wait_recycle(env, anchor, 0, 0);
1762 range_unlock(&lli->lli_write_tree, &range);
1763 range_locked = false;
1768 * In order to move forward AIO, ci_nob was increased,
1769 * but that doesn't mean io have been finished, it just
1770 * means io have been submited, we will always return
1771 * EIOCBQUEUED to the caller, So we could only return
1772 * number of bytes in non-AIO case.
1774 if (io->ci_nob > 0) {
1777 result += io->ci_nob;
1778 *ppos = io->u.ci_wr.wr.crw_pos; /* for splice */
1783 count -= io->ci_nob;
1785 /* prepare IO restart */
1787 args->u.normal.via_iter = vio->vui_iter;
1791 * Reexpand iov count because it was zero
1794 iov_iter_reexpand(vio->vui_iter, count);
1795 if (per_bytes == io->ci_nob)
1796 io->ci_need_restart = 1;
1800 cl_io_fini(env, io);
1803 "%s: %d io complete with rc: %d, result: %zd, restart: %d\n",
1804 file->f_path.dentry->d_name.name,
1805 iot, rc, result, io->ci_need_restart);
1807 if ((rc == 0 || rc == -ENODATA || rc == -ENOLCK) &&
1808 count > 0 && io->ci_need_restart) {
1810 "%s: restart %s from %lld, count: %zu, ret: %zd, rc: %d\n",
1811 file_dentry(file)->d_name.name,
1812 iot == CIT_READ ? "read" : "write",
1813 *ppos, count, result, rc);
1814 /* preserve the tried count for FLR */
1815 retried = io->ci_ndelay_tried;
1816 dio_lock = io->ci_dio_lock;
1822 * VFS will call aio_complete() if no -EIOCBQUEUED
1823 * is returned for AIO, so we can not call aio_complete()
1826 if (rc != -EIOCBQUEUED)
1827 io->ci_aio->cda_no_aio_complete = 1;
1829 * Drop one extra reference so that end_io() could be
1830 * called for this IO context, we could call it after
1831 * we make sure all AIO requests have been proceed.
1833 cl_sync_io_note(env, &io->ci_aio->cda_sync,
1834 rc == -EIOCBQUEUED ? 0 : rc);
1836 cl_aio_free(env, io->ci_aio);
1841 if (iot == CIT_READ) {
1843 ll_stats_ops_tally(ll_i2sbi(inode),
1844 LPROC_LL_READ_BYTES, result);
1845 } else if (iot == CIT_WRITE) {
1847 ll_stats_ops_tally(ll_i2sbi(inode),
1848 LPROC_LL_WRITE_BYTES, result);
1849 fd->fd_write_failed = false;
1850 } else if (result == 0 && rc == 0) {
1853 fd->fd_write_failed = true;
1855 fd->fd_write_failed = false;
1856 } else if (rc != -ERESTARTSYS) {
1857 fd->fd_write_failed = true;
1861 CDEBUG(D_VFSTRACE, "iot: %d, result: %zd\n", iot, result);
1863 ll_heat_add(inode, iot, result);
1865 RETURN(result > 0 ? result : rc);
1869 * The purpose of fast read is to overcome per I/O overhead and improve IOPS
1870 * especially for small I/O.
1872 * To serve a read request, CLIO has to create and initialize a cl_io and
1873 * then request DLM lock. This has turned out to have siginificant overhead
1874 * and affects the performance of small I/O dramatically.
1876 * It's not necessary to create a cl_io for each I/O. Under the help of read
1877 * ahead, most of the pages being read are already in memory cache and we can
1878 * read those pages directly because if the pages exist, the corresponding DLM
1879 * lock must exist so that page content must be valid.
1881 * In fast read implementation, the llite speculatively finds and reads pages
1882 * in memory cache. There are three scenarios for fast read:
1883 * - If the page exists and is uptodate, kernel VM will provide the data and
1884 * CLIO won't be intervened;
1885 * - If the page was brought into memory by read ahead, it will be exported
1886 * and read ahead parameters will be updated;
1887 * - Otherwise the page is not in memory, we can't do fast read. Therefore,
1888 * it will go back and invoke normal read, i.e., a cl_io will be created
1889 * and DLM lock will be requested.
1891 * POSIX compliance: posix standard states that read is intended to be atomic.
1892 * Lustre read implementation is in line with Linux kernel read implementation
1893 * and neither of them complies with POSIX standard in this matter. Fast read
1894 * doesn't make the situation worse on single node but it may interleave write
1895 * results from multiple nodes due to short read handling in ll_file_aio_read().
1897 * \param env - lu_env
1898 * \param iocb - kiocb from kernel
1899 * \param iter - user space buffers where the data will be copied
1901 * \retval - number of bytes have been read, or error code if error occurred.
1904 ll_do_fast_read(struct kiocb *iocb, struct iov_iter *iter)
1908 if (!ll_sbi_has_fast_read(ll_i2sbi(file_inode(iocb->ki_filp))))
1911 /* NB: we can't do direct IO for fast read because it will need a lock
1912 * to make IO engine happy. */
1913 if (iocb->ki_filp->f_flags & O_DIRECT)
1916 result = generic_file_read_iter(iocb, iter);
1918 /* If the first page is not in cache, generic_file_aio_read() will be
1919 * returned with -ENODATA.
1920 * See corresponding code in ll_readpage(). */
1921 if (result == -ENODATA)
1925 ll_heat_add(file_inode(iocb->ki_filp), CIT_READ, result);
1926 ll_stats_ops_tally(ll_i2sbi(file_inode(iocb->ki_filp)),
1927 LPROC_LL_READ_BYTES, result);
1934 * Read from a file (through the page cache).
1936 static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1939 struct vvp_io_args *args;
1940 struct file *file = iocb->ki_filp;
1944 ktime_t kstart = ktime_get();
1949 CDEBUG(D_VFSTRACE|D_IOTRACE, "file %s:"DFID", ppos: %lld, count: %zu\n",
1950 file_dentry(file)->d_name.name,
1951 PFID(ll_inode2fid(file_inode(file))), iocb->ki_pos,
1952 iov_iter_count(to));
1954 if (!iov_iter_count(to))
1958 * Currently when PCC read failed, we do not fall back to the
1959 * normal read path, just return the error.
1960 * The resaon is that: for RW-PCC, the file data may be modified
1961 * in the PCC and inconsistent with the data on OSTs (or file
1962 * data has been removed from the Lustre file system), at this
1963 * time, fallback to the normal read path may read the wrong
1965 * TODO: for RO-PCC (readonly PCC), fall back to normal read
1966 * path: read data from data copy on OSTs.
1968 result = pcc_file_read_iter(iocb, to, &cached);
1972 ll_ras_enter(file, iocb->ki_pos, iov_iter_count(to));
1974 result = ll_do_fast_read(iocb, to);
1975 if (result < 0 || iov_iter_count(to) == 0)
1978 env = cl_env_get(&refcheck);
1980 RETURN(PTR_ERR(env));
1982 args = ll_env_args(env);
1983 args->u.normal.via_iter = to;
1984 args->u.normal.via_iocb = iocb;
1986 rc2 = ll_file_io_generic(env, args, file, CIT_READ,
1987 &iocb->ki_pos, iov_iter_count(to));
1990 else if (result == 0)
1993 cl_env_put(env, &refcheck);
1996 ll_rw_stats_tally(ll_i2sbi(file_inode(file)), current->pid,
1997 file->private_data, iocb->ki_pos, result,
1999 ll_stats_ops_tally(ll_i2sbi(file_inode(file)), LPROC_LL_READ,
2000 ktime_us_delta(ktime_get(), kstart));
2007 * Similar trick to ll_do_fast_read, this improves write speed for tiny writes.
2008 * If a page is already in the page cache and dirty (and some other things -
2009 * See ll_tiny_write_begin for the instantiation of these rules), then we can
2010 * write to it without doing a full I/O, because Lustre already knows about it
2011 * and will write it out. This saves a lot of processing time.
2013 * All writes here are within one page, so exclusion is handled by the page
2014 * lock on the vm page. We do not do tiny writes for writes which touch
2015 * multiple pages because it's very unlikely multiple sequential pages are
2016 * are already dirty.
2018 * We limit these to < PAGE_SIZE because PAGE_SIZE writes are relatively common
2019 * and are unlikely to be to already dirty pages.
2021 * Attribute updates are important here, we do them in ll_tiny_write_end.
2023 static ssize_t ll_do_tiny_write(struct kiocb *iocb, struct iov_iter *iter)
2025 ssize_t count = iov_iter_count(iter);
2026 struct file *file = iocb->ki_filp;
2027 struct inode *inode = file_inode(file);
2028 bool lock_inode = !IS_NOSEC(inode);
2033 /* Restrict writes to single page and < PAGE_SIZE. See comment at top
2034 * of function for why.
2036 if (count >= PAGE_SIZE ||
2037 (iocb->ki_pos & (PAGE_SIZE-1)) + count > PAGE_SIZE)
2040 if (unlikely(lock_inode))
2042 result = __generic_file_write_iter(iocb, iter);
2044 if (unlikely(lock_inode))
2045 inode_unlock(inode);
2047 /* If the page is not already dirty, ll_tiny_write_begin returns
2048 * -ENODATA. We continue on to normal write.
2050 if (result == -ENODATA)
2054 ll_heat_add(inode, CIT_WRITE, result);
2055 set_bit(LLIF_DATA_MODIFIED, &ll_i2info(inode)->lli_flags);
2058 CDEBUG(D_VFSTRACE, "result: %zu, original count %zu\n", result, count);
2064 * Write to a file (through the page cache).
2066 static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2068 struct vvp_io_args *args;
2070 ssize_t rc_tiny = 0, rc_normal;
2071 struct file *file = iocb->ki_filp;
2074 ktime_t kstart = ktime_get();
2079 CDEBUG(D_VFSTRACE|D_IOTRACE, "file %s:"DFID", ppos: %lld, count: %zu\n",
2080 file_dentry(file)->d_name.name,
2081 PFID(ll_inode2fid(file_inode(file))), iocb->ki_pos,
2082 iov_iter_count(from));
2084 if (!iov_iter_count(from))
2085 GOTO(out, rc_normal = 0);
2088 * When PCC write failed, we usually do not fall back to the normal
2089 * write path, just return the error. But there is a special case when
2090 * returned error code is -ENOSPC due to running out of space on PCC HSM
2091 * bakcend. At this time, it will fall back to normal I/O path and
2092 * retry the I/O. As the file is in HSM released state, it will restore
2093 * the file data to OSTs first and redo the write again. And the
2094 * restore process will revoke the layout lock and detach the file
2095 * from PCC cache automatically.
2097 result = pcc_file_write_iter(iocb, from, &cached);
2098 if (cached && result != -ENOSPC && result != -EDQUOT)
2099 GOTO(out, rc_normal = result);
2101 /* NB: we can't do direct IO for tiny writes because they use the page
2102 * cache, we can't do sync writes because tiny writes can't flush
2103 * pages, and we can't do append writes because we can't guarantee the
2104 * required DLM locks are held to protect file size.
2106 if (ll_sbi_has_tiny_write(ll_i2sbi(file_inode(file))) &&
2107 !(file->f_flags & (O_DIRECT | O_SYNC | O_APPEND)))
2108 rc_tiny = ll_do_tiny_write(iocb, from);
2110 /* In case of error, go on and try normal write - Only stop if tiny
2111 * write completed I/O.
2113 if (iov_iter_count(from) == 0)
2114 GOTO(out, rc_normal = rc_tiny);
2116 env = cl_env_get(&refcheck);
2118 RETURN(PTR_ERR(env));
2120 args = ll_env_args(env);
2121 args->u.normal.via_iter = from;
2122 args->u.normal.via_iocb = iocb;
2124 rc_normal = ll_file_io_generic(env, args, file, CIT_WRITE,
2125 &iocb->ki_pos, iov_iter_count(from));
2127 /* On success, combine bytes written. */
2128 if (rc_tiny >= 0 && rc_normal > 0)
2129 rc_normal += rc_tiny;
2130 /* On error, only return error from normal write if tiny write did not
2131 * write any bytes. Otherwise return bytes written by tiny write.
2133 else if (rc_tiny > 0)
2134 rc_normal = rc_tiny;
2136 cl_env_put(env, &refcheck);
2138 if (rc_normal > 0) {
2139 ll_rw_stats_tally(ll_i2sbi(file_inode(file)), current->pid,
2140 file->private_data, iocb->ki_pos,
2142 ll_stats_ops_tally(ll_i2sbi(file_inode(file)), LPROC_LL_WRITE,
2143 ktime_us_delta(ktime_get(), kstart));
2149 #ifndef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
2151 * XXX: exact copy from kernel code (__generic_file_aio_write_nolock)
2153 static int ll_file_get_iov_count(const struct iovec *iov,
2154 unsigned long *nr_segs, size_t *count,
2160 for (seg = 0; seg < *nr_segs; seg++) {
2161 const struct iovec *iv = &iov[seg];
2164 * If any segment has a negative length, or the cumulative
2165 * length ever wraps negative then return -EINVAL.
2168 if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
2170 if (access_ok(access_flags, iv->iov_base, iv->iov_len))
2175 cnt -= iv->iov_len; /* This segment is no good */
2182 static ssize_t ll_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
2183 unsigned long nr_segs, loff_t pos)
2190 result = ll_file_get_iov_count(iov, &nr_segs, &iov_count, VERIFY_READ);
2197 # ifdef HAVE_IOV_ITER_INIT_DIRECTION
2198 iov_iter_init(&to, READ, iov, nr_segs, iov_count);
2199 # else /* !HAVE_IOV_ITER_INIT_DIRECTION */
2200 iov_iter_init(&to, iov, nr_segs, iov_count, 0);
2201 # endif /* HAVE_IOV_ITER_INIT_DIRECTION */
2203 result = ll_file_read_iter(iocb, &to);
2208 static ssize_t ll_file_read(struct file *file, char __user *buf, size_t count,
2211 struct iovec iov = { .iov_base = buf, .iov_len = count };
2220 init_sync_kiocb(&kiocb, file);
2221 kiocb.ki_pos = *ppos;
2222 #ifdef HAVE_KIOCB_KI_LEFT
2223 kiocb.ki_left = count;
2224 #elif defined(HAVE_KI_NBYTES)
2225 kiocb.i_nbytes = count;
2228 result = ll_file_aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
2229 *ppos = kiocb.ki_pos;
2235 * Write to a file (through the page cache).
2238 static ssize_t ll_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2239 unsigned long nr_segs, loff_t pos)
2241 struct iov_iter from;
2246 result = ll_file_get_iov_count(iov, &nr_segs, &iov_count, VERIFY_WRITE);
2253 # ifdef HAVE_IOV_ITER_INIT_DIRECTION
2254 iov_iter_init(&from, WRITE, iov, nr_segs, iov_count);
2255 # else /* !HAVE_IOV_ITER_INIT_DIRECTION */
2256 iov_iter_init(&from, iov, nr_segs, iov_count, 0);
2257 # endif /* HAVE_IOV_ITER_INIT_DIRECTION */
2259 result = ll_file_write_iter(iocb, &from);
2264 static ssize_t ll_file_write(struct file *file, const char __user *buf,
2265 size_t count, loff_t *ppos)
2267 struct iovec iov = { .iov_base = (void __user *)buf,
2277 init_sync_kiocb(&kiocb, file);
2278 kiocb.ki_pos = *ppos;
2279 #ifdef HAVE_KIOCB_KI_LEFT
2280 kiocb.ki_left = count;
2281 #elif defined(HAVE_KI_NBYTES)
2282 kiocb.ki_nbytes = count;
2285 result = ll_file_aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
2286 *ppos = kiocb.ki_pos;
2290 #endif /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
2292 int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
2293 __u64 flags, struct lov_user_md *lum, int lum_size)
2295 struct lookup_intent oit = {
2297 .it_flags = flags | MDS_OPEN_BY_FID,
2302 if ((__swab32(lum->lmm_magic) & le32_to_cpu(LOV_MAGIC_MASK)) ==
2303 le32_to_cpu(LOV_MAGIC_MAGIC)) {
2304 /* this code will only exist for big-endian systems */
2305 lustre_swab_lov_user_md(lum, 0);
2308 ll_inode_size_lock(inode);
2309 rc = ll_intent_file_open(dentry, lum, lum_size, &oit);
2311 GOTO(out_unlock, rc);
2313 ll_release_openhandle(dentry, &oit);
2316 ll_inode_size_unlock(inode);
2317 ll_intent_release(&oit);
2322 int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
2323 struct lov_mds_md **lmmp, int *lmm_size,
2324 struct ptlrpc_request **request)
2326 struct ll_sb_info *sbi = ll_i2sbi(inode);
2327 struct mdt_body *body;
2328 struct lov_mds_md *lmm = NULL;
2329 struct ptlrpc_request *req = NULL;
2330 struct md_op_data *op_data;
2335 rc = ll_get_default_mdsize(sbi, &lmmsize);
2339 op_data = ll_prep_md_op_data(NULL, inode, NULL, filename,
2340 strlen(filename), lmmsize,
2341 LUSTRE_OPC_ANY, NULL);
2342 if (IS_ERR(op_data))
2343 RETURN(PTR_ERR(op_data));
2345 op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
2346 rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
2347 ll_finish_md_op_data(op_data);
2349 CDEBUG(D_INFO, "md_getattr_name failed on %s: rc %d\n",
2354 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2355 LASSERT(body != NULL); /* checked by mdc_getattr_name */
2357 lmmsize = body->mbo_eadatasize;
2359 if (!(body->mbo_valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
2361 GOTO(out, rc = -ENODATA);
2363 lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize);
2364 LASSERT(lmm != NULL);
2366 if (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1) &&
2367 lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3) &&
2368 lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_COMP_V1) &&
2369 lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_FOREIGN))
2370 GOTO(out, rc = -EPROTO);
2373 * This is coming from the MDS, so is probably in
2374 * little endian. We convert it to host endian before
2375 * passing it to userspace.
2377 if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) {
2378 int stripe_count = 0;
2380 if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) ||
2381 lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
2382 stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
2383 if (le32_to_cpu(lmm->lmm_pattern) &
2384 LOV_PATTERN_F_RELEASED)
2386 lustre_swab_lov_user_md((struct lov_user_md *)lmm, 0);
2388 /* if function called for directory - we should
2389 * avoid swab not existent lsm objects
2391 if (lmm->lmm_magic == LOV_MAGIC_V1 &&
2392 S_ISREG(body->mbo_mode))
2393 lustre_swab_lov_user_md_objects(
2394 ((struct lov_user_md_v1 *)lmm)->lmm_objects,
2396 else if (lmm->lmm_magic == LOV_MAGIC_V3 &&
2397 S_ISREG(body->mbo_mode))
2398 lustre_swab_lov_user_md_objects(
2399 ((struct lov_user_md_v3 *)lmm)->lmm_objects,
2401 } else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_COMP_V1)) {
2402 lustre_swab_lov_comp_md_v1(
2403 (struct lov_comp_md_v1 *)lmm);
2407 if (lmm->lmm_magic == LOV_MAGIC_COMP_V1) {
2408 struct lov_comp_md_v1 *comp_v1 = NULL;
2409 struct lov_comp_md_entry_v1 *ent;
2410 struct lov_user_md_v1 *v1;
2414 comp_v1 = (struct lov_comp_md_v1 *)lmm;
2415 /* Dump the striping information */
2416 for (; i < comp_v1->lcm_entry_count; i++) {
2417 ent = &comp_v1->lcm_entries[i];
2418 off = ent->lcme_offset;
2419 v1 = (struct lov_user_md_v1 *)((char *)lmm + off);
2421 "comp[%d]: stripe_count=%u, stripe_size=%u\n",
2422 i, v1->lmm_stripe_count, v1->lmm_stripe_size);
2426 * Return valid stripe_count and stripe_size instead of 0 for
2427 * DoM files to avoid divide-by-zero for older userspace that
2428 * calls this ioctl, e.g. lustre ADIO driver.
2430 if (lmm->lmm_stripe_count == 0)
2431 lmm->lmm_stripe_count = 1;
2432 if (lmm->lmm_stripe_size == 0) {
2433 /* Since the first component of the file data is placed
2434 * on the MDT for faster access, the stripe_size of the
2435 * second one is always that applications which are
2438 if (lmm->lmm_pattern == LOV_PATTERN_MDT)
2439 i = comp_v1->lcm_entry_count > 1 ? 1 : 0;
2441 i = comp_v1->lcm_entry_count > 1 ?
2442 comp_v1->lcm_entry_count - 1 : 0;
2443 ent = &comp_v1->lcm_entries[i];
2444 off = ent->lcme_offset;
2445 v1 = (struct lov_user_md_v1 *)((char *)lmm + off);
2446 lmm->lmm_stripe_size = v1->lmm_stripe_size;
2451 *lmm_size = lmmsize;
2456 static int ll_lov_setea(struct inode *inode, struct file *file,
2459 __u64 flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
2460 struct lov_user_md *lump;
2461 int lum_size = sizeof(struct lov_user_md) +
2462 sizeof(struct lov_user_ost_data);
2466 if (!capable(CAP_SYS_ADMIN))
2469 OBD_ALLOC_LARGE(lump, lum_size);
2473 if (copy_from_user(lump, arg, lum_size))
2474 GOTO(out_lump, rc = -EFAULT);
2476 rc = ll_lov_setstripe_ea_info(inode, file_dentry(file), flags, lump,
2478 cl_lov_delay_create_clear(&file->f_flags);
2481 OBD_FREE_LARGE(lump, lum_size);
2485 static int ll_file_getstripe(struct inode *inode, void __user *lum, size_t size)
2492 env = cl_env_get(&refcheck);
2494 RETURN(PTR_ERR(env));
2496 rc = cl_object_getstripe(env, ll_i2info(inode)->lli_clob, lum, size);
2497 cl_env_put(env, &refcheck);
2501 static int ll_lov_setstripe(struct inode *inode, struct file *file,
2504 struct lov_user_md __user *lum = (struct lov_user_md __user *)arg;
2505 struct lov_user_md *klum;
2507 __u64 flags = FMODE_WRITE;
2510 rc = ll_copy_user_md(lum, &klum);
2515 rc = ll_lov_setstripe_ea_info(inode, file_dentry(file), flags, klum,
2520 rc = put_user(0, &lum->lmm_stripe_count);
2524 rc = ll_layout_refresh(inode, &gen);
2528 rc = ll_file_getstripe(inode, arg, lum_size);
2529 if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode) &&
2530 ll_i2info(inode)->lli_clob) {
2531 struct iattr attr = { 0 };
2533 rc = cl_setattr_ost(ll_i2info(inode)->lli_clob, &attr,
2534 OP_XVALID_FLAGS, LUSTRE_ENCRYPT_FL);
2537 cl_lov_delay_create_clear(&file->f_flags);
2540 OBD_FREE_LARGE(klum, lum_size);
2546 ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
2548 struct ll_inode_info *lli = ll_i2info(inode);
2549 struct cl_object *obj = lli->lli_clob;
2550 struct ll_file_data *fd = file->private_data;
2551 struct ll_grouplock grouplock;
2556 CWARN("group id for group lock must not be 0\n");
2560 if (ll_file_nolock(file))
2561 RETURN(-EOPNOTSUPP);
2563 if (file->f_flags & O_NONBLOCK) {
2564 if (!mutex_trylock(&lli->lli_group_mutex))
2567 mutex_lock(&lli->lli_group_mutex);
2569 if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
2570 CWARN("group lock already existed with gid %lu\n",
2571 fd->fd_grouplock.lg_gid);
2572 GOTO(out, rc = -EINVAL);
2574 if (arg != lli->lli_group_gid && lli->lli_group_users != 0) {
2575 if (file->f_flags & O_NONBLOCK)
2576 GOTO(out, rc = -EAGAIN);
2577 mutex_unlock(&lli->lli_group_mutex);
2578 wait_var_event(&lli->lli_group_users, !lli->lli_group_users);
2579 GOTO(retry, rc = 0);
2581 LASSERT(fd->fd_grouplock.lg_lock == NULL);
2584 * XXX: group lock needs to protect all OST objects while PFL
2585 * can add new OST objects during the IO, so we'd instantiate
2586 * all OST objects before getting its group lock.
2591 struct cl_layout cl = {
2592 .cl_is_composite = false,
2594 struct lu_extent ext = {
2596 .e_end = OBD_OBJECT_EOF,
2599 env = cl_env_get(&refcheck);
2601 GOTO(out, rc = PTR_ERR(env));
2603 rc = cl_object_layout_get(env, obj, &cl);
2604 if (rc >= 0 && cl.cl_is_composite)
2605 rc = ll_layout_write_intent(inode, LAYOUT_INTENT_WRITE,
2608 cl_env_put(env, &refcheck);
2613 rc = cl_get_grouplock(ll_i2info(inode)->lli_clob,
2614 arg, (file->f_flags & O_NONBLOCK), &grouplock);
2619 fd->fd_flags |= LL_FILE_GROUP_LOCKED;
2620 fd->fd_grouplock = grouplock;
2621 if (lli->lli_group_users == 0)
2622 lli->lli_group_gid = grouplock.lg_gid;
2623 lli->lli_group_users++;
2625 CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
2627 mutex_unlock(&lli->lli_group_mutex);
2632 static int ll_put_grouplock(struct inode *inode, struct file *file,
2635 struct ll_inode_info *lli = ll_i2info(inode);
2636 struct ll_file_data *fd = file->private_data;
2637 struct ll_grouplock grouplock;
2641 mutex_lock(&lli->lli_group_mutex);
2642 if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
2643 CWARN("no group lock held\n");
2644 GOTO(out, rc = -EINVAL);
2647 LASSERT(fd->fd_grouplock.lg_lock != NULL);
2649 if (fd->fd_grouplock.lg_gid != arg) {
2650 CWARN("group lock %lu doesn't match current id %lu\n",
2651 arg, fd->fd_grouplock.lg_gid);
2652 GOTO(out, rc = -EINVAL);
2655 grouplock = fd->fd_grouplock;
2656 memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock));
2657 fd->fd_flags &= ~LL_FILE_GROUP_LOCKED;
2659 cl_put_grouplock(&grouplock);
2661 lli->lli_group_users--;
2662 if (lli->lli_group_users == 0) {
2663 lli->lli_group_gid = 0;
2664 wake_up_var(&lli->lli_group_users);
2666 CDEBUG(D_INFO, "group lock %lu released\n", arg);
2669 mutex_unlock(&lli->lli_group_mutex);
2675 * Close inode open handle
2677 * \param dentry [in] dentry which contains the inode
2678 * \param it [in,out] intent which contains open info and result
2681 * \retval <0 failure
2683 int ll_release_openhandle(struct dentry *dentry, struct lookup_intent *it)
2685 struct inode *inode = dentry->d_inode;
2686 struct obd_client_handle *och;
2692 /* Root ? Do nothing. */
2693 if (is_root_inode(inode))
2696 /* No open handle to close? Move away */
2697 if (!it_disposition(it, DISP_OPEN_OPEN))
2700 LASSERT(it_open_error(DISP_OPEN_OPEN, it) == 0);
2702 OBD_ALLOC(och, sizeof(*och));
2704 GOTO(out, rc = -ENOMEM);
2706 rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
2710 rc = ll_close_inode_openhandle(inode, och, 0, NULL);
2712 /* this one is in place of ll_file_open */
2713 if (it_disposition(it, DISP_ENQ_OPEN_REF)) {
2714 ptlrpc_req_finished(it->it_request);
2715 it_clear_disposition(it, DISP_ENQ_OPEN_REF);
2721 * Get size for inode for which FIEMAP mapping is requested.
2722 * Make the FIEMAP get_info call and returns the result.
2723 * \param fiemap kernel buffer to hold extens
2724 * \param num_bytes kernel buffer size
2726 static int ll_do_fiemap(struct inode *inode, struct fiemap *fiemap,
2732 struct ll_fiemap_info_key fmkey = { .lfik_name = KEY_FIEMAP, };
2735 /* Checks for fiemap flags */
2736 if (fiemap->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
2737 fiemap->fm_flags &= ~LUSTRE_FIEMAP_FLAGS_COMPAT;
2741 /* Check for FIEMAP_FLAG_SYNC */
2742 if (fiemap->fm_flags & FIEMAP_FLAG_SYNC) {
2743 rc = filemap_fdatawrite(inode->i_mapping);
2748 env = cl_env_get(&refcheck);
2750 RETURN(PTR_ERR(env));
2752 if (i_size_read(inode) == 0) {
2753 rc = ll_glimpse_size(inode);
2758 fmkey.lfik_oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
2759 obdo_from_inode(&fmkey.lfik_oa, inode, OBD_MD_FLSIZE);
2760 obdo_set_parent_fid(&fmkey.lfik_oa, &ll_i2info(inode)->lli_fid);
2762 /* If filesize is 0, then there would be no objects for mapping */
2763 if (fmkey.lfik_oa.o_size == 0) {
2764 fiemap->fm_mapped_extents = 0;
2768 fmkey.lfik_fiemap = *fiemap;
2770 rc = cl_object_fiemap(env, ll_i2info(inode)->lli_clob,
2771 &fmkey, fiemap, &num_bytes);
2773 cl_env_put(env, &refcheck);
2777 int ll_fid2path(struct inode *inode, void __user *arg)
2779 struct obd_export *exp = ll_i2mdexp(inode);
2780 const struct getinfo_fid2path __user *gfin = arg;
2782 struct getinfo_fid2path *gfout;
2788 if (!capable(CAP_DAC_READ_SEARCH) &&
2789 !test_bit(LL_SBI_USER_FID2PATH, ll_i2sbi(inode)->ll_flags))
2792 /* Only need to get the buflen */
2793 if (get_user(pathlen, &gfin->gf_pathlen))
2796 if (pathlen > PATH_MAX)
2799 outsize = sizeof(*gfout) + pathlen;
2800 OBD_ALLOC(gfout, outsize);
2804 if (copy_from_user(gfout, arg, sizeof(*gfout)))
2805 GOTO(gf_free, rc = -EFAULT);
2806 /* append root FID after gfout to let MDT know the root FID so that it
2807 * can lookup the correct path, this is mainly for fileset.
2808 * old server without fileset mount support will ignore this. */
2809 *gfout->gf_u.gf_root_fid = *ll_inode2fid(inode);
2811 /* Call mdc_iocontrol */
2812 rc = obd_iocontrol(OBD_IOC_FID2PATH, exp, outsize, gfout, NULL);
2816 if (copy_to_user(arg, gfout, outsize))
2820 OBD_FREE(gfout, outsize);
2825 ll_ioc_data_version(struct inode *inode, struct ioc_data_version *ioc)
2827 struct cl_object *obj = ll_i2info(inode)->lli_clob;
2835 ioc->idv_version = 0;
2836 ioc->idv_layout_version = UINT_MAX;
2838 /* If no file object initialized, we consider its version is 0. */
2842 env = cl_env_get(&refcheck);
2844 RETURN(PTR_ERR(env));
2846 io = vvp_env_thread_io(env);
2848 io->u.ci_data_version.dv_data_version = 0;
2849 io->u.ci_data_version.dv_layout_version = UINT_MAX;
2850 io->u.ci_data_version.dv_flags = ioc->idv_flags;
2853 if (cl_io_init(env, io, CIT_DATA_VERSION, io->ci_obj) == 0)
2854 result = cl_io_loop(env, io);
2856 result = io->ci_result;
2858 ioc->idv_version = io->u.ci_data_version.dv_data_version;
2859 ioc->idv_layout_version = io->u.ci_data_version.dv_layout_version;
2861 cl_io_fini(env, io);
2863 if (unlikely(io->ci_need_restart))
2866 cl_env_put(env, &refcheck);
2872 * Read the data_version for inode.
2874 * This value is computed using stripe object version on OST.
2875 * Version is computed using server side locking.
2877 * @param flags if do sync on the OST side;
2879 * LL_DV_RD_FLUSH: flush dirty pages, LCK_PR on OSTs
2880 * LL_DV_WR_FLUSH: drop all caching pages, LCK_PW on OSTs
2882 int ll_data_version(struct inode *inode, __u64 *data_version, int flags)
2884 struct ioc_data_version ioc = { .idv_flags = flags };
2887 rc = ll_ioc_data_version(inode, &ioc);
2889 *data_version = ioc.idv_version;
2895 * Trigger a HSM release request for the provided inode.
2897 int ll_hsm_release(struct inode *inode)
2900 struct obd_client_handle *och = NULL;
2901 __u64 data_version = 0;
2907 CDEBUG(D_INODE, "%s: Releasing file "DFID".\n",
2908 ll_i2sbi(inode)->ll_fsname,
2909 PFID(&ll_i2info(inode)->lli_fid));
2911 och = ll_lease_open(inode, NULL, FMODE_WRITE, MDS_OPEN_RELEASE);
2913 GOTO(out, rc = PTR_ERR(och));
2915 /* Grab latest data_version and [am]time values */
2916 rc = ll_data_version(inode, &data_version,
2917 LL_DV_WR_FLUSH | LL_DV_SZ_UPDATE);
2921 env = cl_env_get(&refcheck);
2923 GOTO(out, rc = PTR_ERR(env));
2925 rc = ll_merge_attr(env, inode);
2926 cl_env_put(env, &refcheck);
2928 /* If error happen, we have the wrong size for a file.
2934 /* Release the file.
2935 * NB: lease lock handle is released in mdc_hsm_release_pack() because
2936 * we still need it to pack l_remote_handle to MDT. */
2937 rc = ll_close_inode_openhandle(inode, och, MDS_HSM_RELEASE,
2943 if (och != NULL && !IS_ERR(och)) /* close the file */
2944 ll_lease_close(och, inode, NULL);
2949 struct ll_swap_stack {
2952 struct inode *inode1;
2953 struct inode *inode2;
2958 static int ll_swap_layouts(struct file *file1, struct file *file2,
2959 struct lustre_swap_layouts *lsl)
2961 struct mdc_swap_layouts msl;
2962 struct md_op_data *op_data;
2965 struct ll_swap_stack *llss = NULL;
2968 OBD_ALLOC_PTR(llss);
2972 llss->inode1 = file_inode(file1);
2973 llss->inode2 = file_inode(file2);
2975 rc = ll_check_swap_layouts_validity(llss->inode1, llss->inode2);
2979 /* we use 2 bool because it is easier to swap than 2 bits */
2980 if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV1)
2981 llss->check_dv1 = true;
2983 if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV2)
2984 llss->check_dv2 = true;
2986 /* we cannot use lsl->sl_dvX directly because we may swap them */
2987 llss->dv1 = lsl->sl_dv1;
2988 llss->dv2 = lsl->sl_dv2;
2990 rc = lu_fid_cmp(ll_inode2fid(llss->inode1), ll_inode2fid(llss->inode2));
2991 if (rc == 0) /* same file, done! */
2994 if (rc < 0) { /* sequentialize it */
2995 swap(llss->inode1, llss->inode2);
2997 swap(llss->dv1, llss->dv2);
2998 swap(llss->check_dv1, llss->check_dv2);
3002 if (gid != 0) { /* application asks to flush dirty cache */
3003 rc = ll_get_grouplock(llss->inode1, file1, gid);
3007 rc = ll_get_grouplock(llss->inode2, file2, gid);
3009 ll_put_grouplock(llss->inode1, file1, gid);
3014 /* ultimate check, before swaping the layouts we check if
3015 * dataversion has changed (if requested) */
3016 if (llss->check_dv1) {
3017 rc = ll_data_version(llss->inode1, &dv, 0);
3020 if (dv != llss->dv1)
3021 GOTO(putgl, rc = -EAGAIN);
3024 if (llss->check_dv2) {
3025 rc = ll_data_version(llss->inode2, &dv, 0);
3028 if (dv != llss->dv2)
3029 GOTO(putgl, rc = -EAGAIN);
3032 /* struct md_op_data is used to send the swap args to the mdt
3033 * only flags is missing, so we use struct mdc_swap_layouts
3034 * through the md_op_data->op_data */
3035 /* flags from user space have to be converted before they are send to
3036 * server, no flag is sent today, they are only used on the client */
3039 op_data = ll_prep_md_op_data(NULL, llss->inode1, llss->inode2, NULL, 0,
3040 0, LUSTRE_OPC_ANY, &msl);
3041 if (IS_ERR(op_data))
3042 GOTO(free, rc = PTR_ERR(op_data));
3044 rc = obd_iocontrol(LL_IOC_LOV_SWAP_LAYOUTS, ll_i2mdexp(llss->inode1),
3045 sizeof(*op_data), op_data, NULL);
3046 ll_finish_md_op_data(op_data);
3053 ll_put_grouplock(llss->inode2, file2, gid);
3054 ll_put_grouplock(llss->inode1, file1, gid);
3064 int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss)
3066 struct obd_export *exp = ll_i2mdexp(inode);
3067 struct md_op_data *op_data;
3071 /* Detect out-of range masks */
3072 if ((hss->hss_setmask | hss->hss_clearmask) & ~HSM_FLAGS_MASK)
3075 /* Non-root users are forbidden to set or clear flags which are
3076 * NOT defined in HSM_USER_MASK. */
3077 if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) &&
3078 !capable(CAP_SYS_ADMIN))
3081 if (!exp_connect_archive_id_array(exp)) {
3082 /* Detect out-of range archive id */
3083 if ((hss->hss_valid & HSS_ARCHIVE_ID) &&
3084 (hss->hss_archive_id > LL_HSM_ORIGIN_MAX_ARCHIVE))
3088 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
3089 LUSTRE_OPC_ANY, hss);
3090 if (IS_ERR(op_data))
3091 RETURN(PTR_ERR(op_data));
3093 rc = obd_iocontrol(LL_IOC_HSM_STATE_SET, exp, sizeof(*op_data),
3096 ll_finish_md_op_data(op_data);
3101 static int ll_hsm_import(struct inode *inode, struct file *file,
3102 struct hsm_user_import *hui)
3104 struct hsm_state_set *hss = NULL;
3105 struct iattr *attr = NULL;
3109 if (!S_ISREG(inode->i_mode))
3115 GOTO(out, rc = -ENOMEM);
3117 hss->hss_valid = HSS_SETMASK | HSS_ARCHIVE_ID;
3118 hss->hss_archive_id = hui->hui_archive_id;
3119 hss->hss_setmask = HS_ARCHIVED | HS_EXISTS | HS_RELEASED;
3120 rc = ll_hsm_state_set(inode, hss);
3124 OBD_ALLOC_PTR(attr);
3126 GOTO(out, rc = -ENOMEM);
3128 attr->ia_mode = hui->hui_mode & (S_IRWXU | S_IRWXG | S_IRWXO);
3129 attr->ia_mode |= S_IFREG;
3130 attr->ia_uid = make_kuid(&init_user_ns, hui->hui_uid);
3131 attr->ia_gid = make_kgid(&init_user_ns, hui->hui_gid);
3132 attr->ia_size = hui->hui_size;
3133 attr->ia_mtime.tv_sec = hui->hui_mtime;
3134 attr->ia_mtime.tv_nsec = hui->hui_mtime_ns;
3135 attr->ia_atime.tv_sec = hui->hui_atime;
3136 attr->ia_atime.tv_nsec = hui->hui_atime_ns;
3138 attr->ia_valid = ATTR_SIZE | ATTR_MODE | ATTR_FORCE |
3139 ATTR_UID | ATTR_GID |
3140 ATTR_MTIME | ATTR_MTIME_SET |
3141 ATTR_ATIME | ATTR_ATIME_SET;
3145 rc = ll_setattr_raw(file_dentry(file), attr, 0, true);
3149 inode_unlock(inode);
3161 static inline long ll_lease_type_from_fmode(fmode_t fmode)
3163 return ((fmode & FMODE_READ) ? LL_LEASE_RDLCK : 0) |
3164 ((fmode & FMODE_WRITE) ? LL_LEASE_WRLCK : 0);
3167 static int ll_file_futimes_3(struct file *file, const struct ll_futimes_3 *lfu)
3169 struct inode *inode = file_inode(file);
3171 .ia_valid = ATTR_ATIME | ATTR_ATIME_SET |
3172 ATTR_MTIME | ATTR_MTIME_SET |
3175 .tv_sec = lfu->lfu_atime_sec,
3176 .tv_nsec = lfu->lfu_atime_nsec,
3179 .tv_sec = lfu->lfu_mtime_sec,
3180 .tv_nsec = lfu->lfu_mtime_nsec,
3183 .tv_sec = lfu->lfu_ctime_sec,
3184 .tv_nsec = lfu->lfu_ctime_nsec,
3190 if (!capable(CAP_SYS_ADMIN))
3193 if (!S_ISREG(inode->i_mode))
3197 rc = ll_setattr_raw(file_dentry(file), &ia, OP_XVALID_CTIME_SET,
3199 inode_unlock(inode);
3204 static enum cl_lock_mode cl_mode_user_to_kernel(enum lock_mode_user mode)
3207 case MODE_READ_USER:
3209 case MODE_WRITE_USER:
3216 static const char *const user_lockname[] = LOCK_MODE_NAMES;
3218 /* Used to allow the upper layers of the client to request an LDLM lock
3219 * without doing an actual read or write.
3221 * Used for ladvise lockahead to manually request specific locks.
3223 * \param[in] file file this ladvise lock request is on
3224 * \param[in] ladvise ladvise struct describing this lock request
3226 * \retval 0 success, no detailed result available (sync requests
3227 * and requests sent to the server [not handled locally]
3228 * cannot return detailed results)
3229 * \retval LLA_RESULT_{SAME,DIFFERENT} - detailed result of the lock request,
3230 * see definitions for details.
3231 * \retval negative negative errno on error
3233 int ll_file_lock_ahead(struct file *file, struct llapi_lu_ladvise *ladvise)
3235 struct lu_env *env = NULL;
3236 struct cl_io *io = NULL;
3237 struct cl_lock *lock = NULL;
3238 struct cl_lock_descr *descr = NULL;
3239 struct dentry *dentry = file->f_path.dentry;
3240 struct inode *inode = dentry->d_inode;
3241 enum cl_lock_mode cl_mode;
3242 off_t start = ladvise->lla_start;
3243 off_t end = ladvise->lla_end;
3250 "Lock request: file=%pd, inode=%p, mode=%s start=%llu, end=%llu\n",
3251 dentry, dentry->d_inode,
3252 user_lockname[ladvise->lla_lockahead_mode], (__u64) start,
3255 cl_mode = cl_mode_user_to_kernel(ladvise->lla_lockahead_mode);
3257 GOTO(out, result = cl_mode);
3259 /* Get IO environment */
3260 result = cl_io_get(inode, &env, &io, &refcheck);
3264 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
3267 * nothing to do for this io. This currently happens when
3268 * stripe sub-object's are not yet created.
3270 result = io->ci_result;
3271 } else if (result == 0) {
3272 lock = vvp_env_lock(env);
3273 descr = &lock->cll_descr;
3275 descr->cld_obj = io->ci_obj;
3276 /* Convert byte offsets to pages */
3277 descr->cld_start = cl_index(io->ci_obj, start);
3278 descr->cld_end = cl_index(io->ci_obj, end);
3279 descr->cld_mode = cl_mode;
3280 /* CEF_MUST is used because we do not want to convert a
3281 * lockahead request to a lockless lock */
3282 descr->cld_enq_flags = CEF_MUST | CEF_LOCK_NO_EXPAND;
3284 if (ladvise->lla_peradvice_flags & LF_ASYNC)
3285 descr->cld_enq_flags |= CEF_SPECULATIVE;
3287 result = cl_lock_request(env, io, lock);
3289 /* On success, we need to release the lock */
3291 cl_lock_release(env, lock);
3293 cl_io_fini(env, io);
3294 cl_env_put(env, &refcheck);
3296 /* -ECANCELED indicates a matching lock with a different extent
3297 * was already present, and -EEXIST indicates a matching lock
3298 * on exactly the same extent was already present.
3299 * We convert them to positive values for userspace to make
3300 * recognizing true errors easier.
3301 * Note we can only return these detailed results on async requests,
3302 * as sync requests look the same as i/o requests for locking. */
3303 if (result == -ECANCELED)
3304 result = LLA_RESULT_DIFFERENT;
3305 else if (result == -EEXIST)
3306 result = LLA_RESULT_SAME;
3311 static const char *const ladvise_names[] = LU_LADVISE_NAMES;
3313 static int ll_ladvise_sanity(struct inode *inode,
3314 struct llapi_lu_ladvise *ladvise)
3316 struct ll_sb_info *sbi = ll_i2sbi(inode);
3317 enum lu_ladvise_type advice = ladvise->lla_advice;
3318 /* Note the peradvice flags is a 32 bit field, so per advice flags must
3319 * be in the first 32 bits of enum ladvise_flags */
3320 __u32 flags = ladvise->lla_peradvice_flags;
3321 /* 3 lines at 80 characters per line, should be plenty */
3324 if (advice > LU_LADVISE_MAX || advice == LU_LADVISE_INVALID) {
3327 "%s: advice with value '%d' not recognized, last supported advice is %s (value '%d'): rc = %d\n",
3328 sbi->ll_fsname, advice,
3329 ladvise_names[LU_LADVISE_MAX-1], LU_LADVISE_MAX-1, rc);
3333 /* Per-advice checks */
3335 case LU_LADVISE_LOCKNOEXPAND:
3336 if (flags & ~LF_LOCKNOEXPAND_MASK) {
3338 CDEBUG(D_VFSTRACE, "%s: Invalid flags (%x) for %s: "
3339 "rc = %d\n", sbi->ll_fsname, flags,
3340 ladvise_names[advice], rc);
3344 case LU_LADVISE_LOCKAHEAD:
3345 /* Currently only READ and WRITE modes can be requested */
3346 if (ladvise->lla_lockahead_mode >= MODE_MAX_USER ||
3347 ladvise->lla_lockahead_mode == 0) {
3349 CDEBUG(D_VFSTRACE, "%s: Invalid mode (%d) for %s: "
3350 "rc = %d\n", sbi->ll_fsname,
3351 ladvise->lla_lockahead_mode,
3352 ladvise_names[advice], rc);
3356 case LU_LADVISE_WILLREAD:
3357 case LU_LADVISE_DONTNEED:
3359 /* Note fall through above - These checks apply to all advices
3360 * except LOCKNOEXPAND */
3361 if (flags & ~LF_DEFAULT_MASK) {
3363 CDEBUG(D_VFSTRACE, "%s: Invalid flags (%x) for %s: "
3364 "rc = %d\n", sbi->ll_fsname, flags,
3365 ladvise_names[advice], rc);
3368 if (ladvise->lla_start >= ladvise->lla_end) {
3370 CDEBUG(D_VFSTRACE, "%s: Invalid range (%llu to %llu) "
3371 "for %s: rc = %d\n", sbi->ll_fsname,
3372 ladvise->lla_start, ladvise->lla_end,
3373 ladvise_names[advice], rc);
3385 * Give file access advices
3387 * The ladvise interface is similar to Linux fadvise() system call, except it
3388 * forwards the advices directly from Lustre client to server. The server side
3389 * codes will apply appropriate read-ahead and caching techniques for the
3390 * corresponding files.
3392 * A typical workload for ladvise is e.g. a bunch of different clients are
3393 * doing small random reads of a file, so prefetching pages into OSS cache
3394 * with big linear reads before the random IO is a net benefit. Fetching
3395 * all that data into each client cache with fadvise() may not be, due to
3396 * much more data being sent to the client.
3398 static int ll_ladvise(struct inode *inode, struct file *file, __u64 flags,
3399 struct llapi_lu_ladvise *ladvise)
3403 struct cl_ladvise_io *lio;
3408 env = cl_env_get(&refcheck);
3410 RETURN(PTR_ERR(env));
3412 io = vvp_env_thread_io(env);
3413 io->ci_obj = ll_i2info(inode)->lli_clob;
3415 /* initialize parameters for ladvise */
3416 lio = &io->u.ci_ladvise;
3417 lio->li_start = ladvise->lla_start;
3418 lio->li_end = ladvise->lla_end;
3419 lio->li_fid = ll_inode2fid(inode);
3420 lio->li_advice = ladvise->lla_advice;
3421 lio->li_flags = flags;
3423 if (cl_io_init(env, io, CIT_LADVISE, io->ci_obj) == 0)
3424 rc = cl_io_loop(env, io);
3428 cl_io_fini(env, io);
3429 cl_env_put(env, &refcheck);
3433 static int ll_lock_noexpand(struct file *file, int flags)
3435 struct ll_file_data *fd = file->private_data;
3437 fd->ll_lock_no_expand = !(flags & LF_UNSET);
3442 int ll_ioctl_fsgetxattr(struct inode *inode, unsigned int cmd,
3445 struct fsxattr fsxattr;
3447 if (copy_from_user(&fsxattr,
3448 (const struct fsxattr __user *)arg,
3452 fsxattr.fsx_xflags = ll_inode_flags_to_xflags(inode->i_flags);
3453 if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags))
3454 fsxattr.fsx_xflags |= FS_XFLAG_PROJINHERIT;
3455 fsxattr.fsx_projid = ll_i2info(inode)->lli_projid;
3456 if (copy_to_user((struct fsxattr __user *)arg,
3457 &fsxattr, sizeof(fsxattr)))
3463 int ll_ioctl_check_project(struct inode *inode, __u32 xflags,
3467 * Project Quota ID state is only allowed to change from within the init
3468 * namespace. Enforce that restriction only if we are trying to change
3469 * the quota ID state. Everything else is allowed in user namespaces.
3471 if (current_user_ns() == &init_user_ns) {
3473 * Caller is allowed to change the project ID. if it is being
3474 * changed, make sure that the new value is valid.
3476 if (ll_i2info(inode)->lli_projid != projid &&
3477 !projid_valid(make_kprojid(&init_user_ns, projid)))
3483 if (ll_i2info(inode)->lli_projid != projid)
3486 if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags)) {
3487 if (!(xflags & FS_XFLAG_PROJINHERIT))
3490 if (xflags & FS_XFLAG_PROJINHERIT)
3497 static int ll_set_project(struct inode *inode, __u32 xflags, __u32 projid)
3499 struct md_op_data *op_data;
3500 struct ptlrpc_request *req = NULL;
3501 struct cl_object *obj;
3502 unsigned int inode_flags;
3505 rc = ll_ioctl_check_project(inode, xflags, projid);
3509 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
3510 LUSTRE_OPC_ANY, NULL);
3511 if (IS_ERR(op_data))
3512 RETURN(PTR_ERR(op_data));
3514 inode_flags = ll_xflags_to_inode_flags(xflags);
3515 op_data->op_attr_flags = ll_inode_to_ext_flags(inode_flags);
3516 if (xflags & FS_XFLAG_PROJINHERIT)
3517 op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL;
3518 op_data->op_projid = projid;
3519 op_data->op_xvalid |= OP_XVALID_PROJID | OP_XVALID_FLAGS;
3520 rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data, NULL, 0, &req);
3521 ptlrpc_req_finished(req);
3523 GOTO(out_fsxattr, rc);
3524 ll_update_inode_flags(inode, op_data->op_attr_flags);
3526 /* Avoid OST RPC if this is only ioctl setting project inherit flag */
3527 if (xflags == 0 || xflags == FS_XFLAG_PROJINHERIT)
3528 GOTO(out_fsxattr, rc);
3530 obj = ll_i2info(inode)->lli_clob;
3532 struct iattr attr = { 0 };
3534 rc = cl_setattr_ost(obj, &attr, OP_XVALID_FLAGS, xflags);
3538 ll_finish_md_op_data(op_data);
3542 int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd,
3545 struct fsxattr fsxattr;
3549 if (copy_from_user(&fsxattr,
3550 (const struct fsxattr __user *)arg,
3554 RETURN(ll_set_project(inode, fsxattr.fsx_xflags,
3555 fsxattr.fsx_projid));
3558 int ll_ioctl_project(struct file *file, unsigned int cmd,
3561 struct lu_project lu_project;
3562 struct dentry *dentry = file_dentry(file);
3563 struct inode *inode = file_inode(file);
3564 struct dentry *child_dentry = NULL;
3565 int rc = 0, name_len;
3567 if (copy_from_user(&lu_project,
3568 (const struct lu_project __user *)arg,
3569 sizeof(lu_project)))
3572 /* apply child dentry if name is valid */
3573 name_len = strnlen(lu_project.project_name, NAME_MAX);
3574 if (name_len > 0 && name_len <= NAME_MAX) {
3576 child_dentry = lookup_one_len(lu_project.project_name,
3578 inode_unlock(inode);
3579 if (IS_ERR(child_dentry)) {
3580 rc = PTR_ERR(child_dentry);
3583 inode = child_dentry->d_inode;
3588 } else if (name_len > NAME_MAX) {
3593 switch (lu_project.project_type) {
3594 case LU_PROJECT_SET:
3595 rc = ll_set_project(inode, lu_project.project_xflags,
3596 lu_project.project_id);
3598 case LU_PROJECT_GET:
3599 lu_project.project_xflags =
3600 ll_inode_flags_to_xflags(inode->i_flags);
3601 if (test_bit(LLIF_PROJECT_INHERIT,
3602 &ll_i2info(inode)->lli_flags))
3603 lu_project.project_xflags |= FS_XFLAG_PROJINHERIT;
3604 lu_project.project_id = ll_i2info(inode)->lli_projid;
3605 if (copy_to_user((struct lu_project __user *)arg,
3606 &lu_project, sizeof(lu_project))) {
3616 if (!IS_ERR_OR_NULL(child_dentry))
3621 static long ll_file_unlock_lease(struct file *file, struct ll_ioc_lease *ioc,
3624 struct inode *inode = file_inode(file);
3625 struct ll_file_data *fd = file->private_data;
3626 struct ll_inode_info *lli = ll_i2info(inode);
3627 struct obd_client_handle *och = NULL;
3628 struct split_param sp;
3629 struct pcc_param param;
3630 bool lease_broken = false;
3632 enum mds_op_bias bias = 0;
3633 struct file *layout_file = NULL;
3635 size_t data_size = 0;
3636 bool attached = false;
3641 mutex_lock(&lli->lli_och_mutex);
3642 if (fd->fd_lease_och != NULL) {
3643 och = fd->fd_lease_och;
3644 fd->fd_lease_och = NULL;
3646 mutex_unlock(&lli->lli_och_mutex);
3651 fmode = och->och_flags;
3653 switch (ioc->lil_flags) {
3654 case LL_LEASE_RESYNC_DONE:
3655 if (ioc->lil_count > IOC_IDS_MAX)
3656 GOTO(out_lease_close, rc = -EINVAL);
3658 data_size = offsetof(typeof(*ioc), lil_ids[ioc->lil_count]);
3659 OBD_ALLOC(data, data_size);
3661 GOTO(out_lease_close, rc = -ENOMEM);
3663 if (copy_from_user(data, (void __user *)arg, data_size))
3664 GOTO(out_lease_close, rc = -EFAULT);
3666 bias = MDS_CLOSE_RESYNC_DONE;
3668 case LL_LEASE_LAYOUT_MERGE: {
3671 if (ioc->lil_count != 1)
3672 GOTO(out_lease_close, rc = -EINVAL);
3674 arg += sizeof(*ioc);
3675 if (copy_from_user(&fd, (void __user *)arg, sizeof(__u32)))
3676 GOTO(out_lease_close, rc = -EFAULT);
3678 layout_file = fget(fd);
3680 GOTO(out_lease_close, rc = -EBADF);
3682 if ((file->f_flags & O_ACCMODE) == O_RDONLY ||
3683 (layout_file->f_flags & O_ACCMODE) == O_RDONLY)
3684 GOTO(out_lease_close, rc = -EPERM);
3686 data = file_inode(layout_file);
3687 bias = MDS_CLOSE_LAYOUT_MERGE;
3690 case LL_LEASE_LAYOUT_SPLIT: {
3694 if (ioc->lil_count != 2)
3695 GOTO(out_lease_close, rc = -EINVAL);
3697 arg += sizeof(*ioc);
3698 if (copy_from_user(&fdv, (void __user *)arg, sizeof(__u32)))
3699 GOTO(out_lease_close, rc = -EFAULT);
3701 arg += sizeof(__u32);
3702 if (copy_from_user(&mirror_id, (void __user *)arg,
3704 GOTO(out_lease_close, rc = -EFAULT);
3706 layout_file = fget(fdv);
3708 GOTO(out_lease_close, rc = -EBADF);
3710 /* if layout_file == file, it means to destroy the mirror */
3711 sp.sp_inode = file_inode(layout_file);
3712 sp.sp_mirror_id = (__u16)mirror_id;
3714 bias = MDS_CLOSE_LAYOUT_SPLIT;
3717 case LL_LEASE_PCC_ATTACH:
3718 if (ioc->lil_count != 1)
3721 if (IS_ENCRYPTED(inode))
3722 RETURN(-EOPNOTSUPP);
3724 arg += sizeof(*ioc);
3725 if (copy_from_user(¶m.pa_archive_id, (void __user *)arg,
3727 GOTO(out_lease_close, rc2 = -EFAULT);
3729 rc2 = pcc_readwrite_attach(file, inode, param.pa_archive_id);
3731 GOTO(out_lease_close, rc2);
3734 /* Grab latest data version */
3735 rc2 = ll_data_version(inode, ¶m.pa_data_version,
3738 GOTO(out_lease_close, rc2);
3741 bias = MDS_PCC_ATTACH;
3744 /* without close intent */
3749 rc = ll_lease_close_intent(och, inode, &lease_broken, bias, data);
3753 rc = ll_lease_och_release(inode, file);
3762 switch (ioc->lil_flags) {
3763 case LL_LEASE_RESYNC_DONE:
3765 OBD_FREE(data, data_size);
3767 case LL_LEASE_LAYOUT_MERGE:
3768 case LL_LEASE_LAYOUT_SPLIT:
3772 ll_layout_refresh(inode, &fd->fd_layout_version);
3774 case LL_LEASE_PCC_ATTACH:
3777 rc = pcc_readwrite_attach_fini(file, inode,
3778 param.pa_layout_gen,
3785 rc = ll_lease_type_from_fmode(fmode);
3789 static long ll_file_set_lease(struct file *file, struct ll_ioc_lease *ioc,
3792 struct inode *inode = file_inode(file);
3793 struct ll_inode_info *lli = ll_i2info(inode);
3794 struct ll_file_data *fd = file->private_data;
3795 struct obd_client_handle *och = NULL;
3796 __u64 open_flags = 0;
3802 switch (ioc->lil_mode) {
3803 case LL_LEASE_WRLCK:
3804 if (!(file->f_mode & FMODE_WRITE))
3806 fmode = FMODE_WRITE;
3808 case LL_LEASE_RDLCK:
3809 if (!(file->f_mode & FMODE_READ))
3813 case LL_LEASE_UNLCK:
3814 RETURN(ll_file_unlock_lease(file, ioc, arg));
3819 CDEBUG(D_INODE, "Set lease with mode %u\n", fmode);
3821 /* apply for lease */
3822 if (ioc->lil_flags & LL_LEASE_RESYNC)
3823 open_flags = MDS_OPEN_RESYNC;
3824 och = ll_lease_open(inode, file, fmode, open_flags);
3826 RETURN(PTR_ERR(och));
3828 if (ioc->lil_flags & LL_LEASE_RESYNC) {
3829 rc = ll_lease_file_resync(och, inode, arg);
3831 ll_lease_close(och, inode, NULL);
3834 rc = ll_layout_refresh(inode, &fd->fd_layout_version);
3836 ll_lease_close(och, inode, NULL);
3842 mutex_lock(&lli->lli_och_mutex);
3843 if (fd->fd_lease_och == NULL) {
3844 fd->fd_lease_och = och;
3847 mutex_unlock(&lli->lli_och_mutex);
3849 /* impossible now that only excl is supported for now */
3850 ll_lease_close(och, inode, &lease_broken);
3856 static void ll_heat_get(struct inode *inode, struct lu_heat *heat)
3858 struct ll_inode_info *lli = ll_i2info(inode);
3859 struct ll_sb_info *sbi = ll_i2sbi(inode);
3860 __u64 now = ktime_get_real_seconds();
3863 spin_lock(&lli->lli_heat_lock);
3864 heat->lh_flags = lli->lli_heat_flags;
3865 for (i = 0; i < heat->lh_count; i++)
3866 heat->lh_heat[i] = obd_heat_get(&lli->lli_heat_instances[i],
3867 now, sbi->ll_heat_decay_weight,
3868 sbi->ll_heat_period_second);
3869 spin_unlock(&lli->lli_heat_lock);
3872 static int ll_heat_set(struct inode *inode, enum lu_heat_flag flags)
3874 struct ll_inode_info *lli = ll_i2info(inode);
3877 spin_lock(&lli->lli_heat_lock);
3878 if (flags & LU_HEAT_FLAG_CLEAR)
3879 obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
3881 if (flags & LU_HEAT_FLAG_OFF)
3882 lli->lli_heat_flags |= LU_HEAT_FLAG_OFF;
3884 lli->lli_heat_flags &= ~LU_HEAT_FLAG_OFF;
3886 spin_unlock(&lli->lli_heat_lock);
3892 ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3894 struct inode *inode = file_inode(file);
3895 struct ll_file_data *fd = file->private_data;
3899 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), cmd=%x\n",
3900 PFID(ll_inode2fid(inode)), inode, cmd);
3901 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
3903 /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
3904 if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
3908 case LL_IOC_GETFLAGS:
3909 /* Get the current value of the file flags */
3910 return put_user(fd->fd_flags, (int __user *)arg);
3911 case LL_IOC_SETFLAGS:
3912 case LL_IOC_CLRFLAGS:
3913 /* Set or clear specific file flags */
3914 /* XXX This probably needs checks to ensure the flags are
3915 * not abused, and to handle any flag side effects.
3917 if (get_user(flags, (int __user *) arg))
3920 if (cmd == LL_IOC_SETFLAGS) {
3921 if ((flags & LL_FILE_IGNORE_LOCK) &&
3922 !(file->f_flags & O_DIRECT)) {
3923 CERROR("%s: unable to disable locking on "
3924 "non-O_DIRECT file\n", current->comm);
3928 fd->fd_flags |= flags;
3930 fd->fd_flags &= ~flags;
3933 case LL_IOC_LOV_SETSTRIPE:
3934 case LL_IOC_LOV_SETSTRIPE_NEW:
3935 RETURN(ll_lov_setstripe(inode, file, (void __user *)arg));
3936 case LL_IOC_LOV_SETEA:
3937 RETURN(ll_lov_setea(inode, file, (void __user *)arg));
3938 case LL_IOC_LOV_SWAP_LAYOUTS: {
3940 struct lustre_swap_layouts lsl;
3942 if (copy_from_user(&lsl, (char __user *)arg,
3943 sizeof(struct lustre_swap_layouts)))
3946 if ((file->f_flags & O_ACCMODE) == O_RDONLY)
3949 file2 = fget(lsl.sl_fd);
3953 /* O_WRONLY or O_RDWR */
3954 if ((file2->f_flags & O_ACCMODE) == O_RDONLY)
3955 GOTO(out, rc = -EPERM);
3957 if (lsl.sl_flags & SWAP_LAYOUTS_CLOSE) {
3958 struct inode *inode2;
3959 struct ll_inode_info *lli;
3960 struct obd_client_handle *och = NULL;
3962 lli = ll_i2info(inode);
3963 mutex_lock(&lli->lli_och_mutex);
3964 if (fd->fd_lease_och != NULL) {
3965 och = fd->fd_lease_och;
3966 fd->fd_lease_och = NULL;
3968 mutex_unlock(&lli->lli_och_mutex);
3970 GOTO(out, rc = -ENOLCK);
3971 inode2 = file_inode(file2);
3972 rc = ll_swap_layouts_close(och, inode, inode2);
3974 rc = ll_swap_layouts(file, file2, &lsl);
3980 case LL_IOC_LOV_GETSTRIPE:
3981 case LL_IOC_LOV_GETSTRIPE_NEW:
3982 RETURN(ll_file_getstripe(inode, (void __user *)arg, 0));
3983 case FS_IOC_GETFLAGS:
3984 case FS_IOC_SETFLAGS:
3985 RETURN(ll_iocontrol(inode, file, cmd, arg));
3986 case FSFILT_IOC_GETVERSION:
3987 case FS_IOC_GETVERSION:
3988 RETURN(put_user(inode->i_generation, (int __user *)arg));
3989 /* We need to special case any other ioctls we want to handle,
3990 * to send them to the MDS/OST as appropriate and to properly
3991 * network encode the arg field. */
3992 case FS_IOC_SETVERSION:
3995 case LL_IOC_GROUP_LOCK:
3996 RETURN(ll_get_grouplock(inode, file, arg));
3997 case LL_IOC_GROUP_UNLOCK:
3998 RETURN(ll_put_grouplock(inode, file, arg));
3999 case IOC_OBD_STATFS:
4000 RETURN(ll_obd_statfs(inode, (void __user *)arg));
4002 case LL_IOC_FLUSHCTX:
4003 RETURN(ll_flush_ctx(inode));
4004 case LL_IOC_PATH2FID: {
4005 if (copy_to_user((void __user *)arg, ll_inode2fid(inode),
4006 sizeof(struct lu_fid)))
4011 case LL_IOC_GETPARENT:
4012 RETURN(ll_getparent(file, (struct getparent __user *)arg));
4014 case OBD_IOC_FID2PATH:
4015 RETURN(ll_fid2path(inode, (void __user *)arg));
4016 case LL_IOC_DATA_VERSION: {
4017 struct ioc_data_version idv;
4020 if (copy_from_user(&idv, (char __user *)arg, sizeof(idv)))
4023 idv.idv_flags &= LL_DV_RD_FLUSH | LL_DV_WR_FLUSH;
4024 rc = ll_ioc_data_version(inode, &idv);
4027 copy_to_user((char __user *)arg, &idv, sizeof(idv)))
4033 case LL_IOC_GET_MDTIDX: {
4036 mdtidx = ll_get_mdt_idx(inode);
4040 if (put_user((int)mdtidx, (int __user *)arg))
4045 case OBD_IOC_GETNAME_OLD:
4046 case OBD_IOC_GETDTNAME:
4047 case OBD_IOC_GETMDNAME:
4048 RETURN(ll_get_obd_name(inode, cmd, arg));
4049 case LL_IOC_HSM_STATE_GET: {
4050 struct md_op_data *op_data;
4051 struct hsm_user_state *hus;
4058 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
4059 LUSTRE_OPC_ANY, hus);
4060 if (IS_ERR(op_data)) {
4062 RETURN(PTR_ERR(op_data));
4065 rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
4068 if (copy_to_user((void __user *)arg, hus, sizeof(*hus)))
4071 ll_finish_md_op_data(op_data);
4075 case LL_IOC_HSM_STATE_SET: {
4076 struct hsm_state_set *hss;
4083 if (copy_from_user(hss, (char __user *)arg, sizeof(*hss))) {
4088 rc = ll_hsm_state_set(inode, hss);
4093 case LL_IOC_HSM_ACTION: {
4094 struct md_op_data *op_data;
4095 struct hsm_current_action *hca;
4103 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
4104 LUSTRE_OPC_ANY, hca);
4105 if (IS_ERR(op_data)) {
4107 RETURN(PTR_ERR(op_data));
4110 rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
4113 GOTO(skip_copy, rc);
4115 /* The hsm_current_action retreived from the server could
4116 * contain corrupt information. If it is incorrect data collect
4117 * debug information. We still send the data even if incorrect
4118 * to user land to handle.
4120 action = hsm_user_action2name(hca->hca_action);
4121 if (strcmp(action, "UNKNOWN") == 0 ||
4122 hca->hca_state > HPS_DONE) {
4124 "HSM current state %s action %s, offset = %llu, length %llu\n",
4125 hsm_progress_state2name(hca->hca_state), action,
4126 hca->hca_location.offset, hca->hca_location.length);
4129 if (copy_to_user((char __user *)arg, hca, sizeof(*hca)))
4132 ll_finish_md_op_data(op_data);
4136 case LL_IOC_SET_LEASE_OLD: {
4137 struct ll_ioc_lease ioc = { .lil_mode = (__u32)arg };
4139 RETURN(ll_file_set_lease(file, &ioc, 0));
4141 case LL_IOC_SET_LEASE: {
4142 struct ll_ioc_lease ioc;
4144 if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc)))
4147 RETURN(ll_file_set_lease(file, &ioc, arg));
4149 case LL_IOC_GET_LEASE: {
4150 struct ll_inode_info *lli = ll_i2info(inode);
4151 struct ldlm_lock *lock = NULL;
4154 mutex_lock(&lli->lli_och_mutex);
4155 if (fd->fd_lease_och != NULL) {
4156 struct obd_client_handle *och = fd->fd_lease_och;
4158 lock = ldlm_handle2lock(&och->och_lease_handle);
4160 lock_res_and_lock(lock);
4161 if (!ldlm_is_cancel(lock))
4162 fmode = och->och_flags;
4164 unlock_res_and_lock(lock);
4165 LDLM_LOCK_PUT(lock);
4168 mutex_unlock(&lli->lli_och_mutex);
4170 RETURN(ll_lease_type_from_fmode(fmode));
4172 case LL_IOC_HSM_IMPORT: {
4173 struct hsm_user_import *hui;
4179 if (copy_from_user(hui, (void __user *)arg, sizeof(*hui))) {
4184 rc = ll_hsm_import(inode, file, hui);
4189 case LL_IOC_FUTIMES_3: {
4190 struct ll_futimes_3 lfu;
4192 if (copy_from_user(&lfu,
4193 (const struct ll_futimes_3 __user *)arg,
4197 RETURN(ll_file_futimes_3(file, &lfu));
4199 case LL_IOC_LADVISE: {
4200 struct llapi_ladvise_hdr *k_ladvise_hdr;
4201 struct llapi_ladvise_hdr __user *u_ladvise_hdr;
4204 int alloc_size = sizeof(*k_ladvise_hdr);
4207 u_ladvise_hdr = (void __user *)arg;
4208 OBD_ALLOC_PTR(k_ladvise_hdr);
4209 if (k_ladvise_hdr == NULL)
4212 if (copy_from_user(k_ladvise_hdr, u_ladvise_hdr, alloc_size))
4213 GOTO(out_ladvise, rc = -EFAULT);
4215 if (k_ladvise_hdr->lah_magic != LADVISE_MAGIC ||
4216 k_ladvise_hdr->lah_count < 1)
4217 GOTO(out_ladvise, rc = -EINVAL);
4219 num_advise = k_ladvise_hdr->lah_count;
4220 if (num_advise >= LAH_COUNT_MAX)
4221 GOTO(out_ladvise, rc = -EFBIG);
4223 OBD_FREE_PTR(k_ladvise_hdr);
4224 alloc_size = offsetof(typeof(*k_ladvise_hdr),
4225 lah_advise[num_advise]);
4226 OBD_ALLOC(k_ladvise_hdr, alloc_size);
4227 if (k_ladvise_hdr == NULL)
4231 * TODO: submit multiple advices to one server in a single RPC
4233 if (copy_from_user(k_ladvise_hdr, u_ladvise_hdr, alloc_size))
4234 GOTO(out_ladvise, rc = -EFAULT);
4236 for (i = 0; i < num_advise; i++) {
4237 struct llapi_lu_ladvise *k_ladvise =
4238 &k_ladvise_hdr->lah_advise[i];
4239 struct llapi_lu_ladvise __user *u_ladvise =
4240 &u_ladvise_hdr->lah_advise[i];
4242 rc = ll_ladvise_sanity(inode, k_ladvise);
4244 GOTO(out_ladvise, rc);
4246 switch (k_ladvise->lla_advice) {
4247 case LU_LADVISE_LOCKNOEXPAND:
4248 rc = ll_lock_noexpand(file,
4249 k_ladvise->lla_peradvice_flags);
4250 GOTO(out_ladvise, rc);
4251 case LU_LADVISE_LOCKAHEAD:
4253 rc = ll_file_lock_ahead(file, k_ladvise);
4256 GOTO(out_ladvise, rc);
4259 &u_ladvise->lla_lockahead_result))
4260 GOTO(out_ladvise, rc = -EFAULT);
4263 rc = ll_ladvise(inode, file,
4264 k_ladvise_hdr->lah_flags,
4267 GOTO(out_ladvise, rc);
4274 OBD_FREE(k_ladvise_hdr, alloc_size);
4277 case LL_IOC_FLR_SET_MIRROR: {
4278 /* mirror I/O must be direct to avoid polluting page cache
4280 if (!(file->f_flags & O_DIRECT))
4283 fd->fd_designated_mirror = (__u32)arg;
4286 case FS_IOC_FSGETXATTR:
4287 RETURN(ll_ioctl_fsgetxattr(inode, cmd, arg));
4288 case FS_IOC_FSSETXATTR:
4289 RETURN(ll_ioctl_fssetxattr(inode, cmd, arg));
4290 case LL_IOC_PROJECT:
4291 RETURN(ll_ioctl_project(file, cmd, arg));
4293 RETURN(put_user(PAGE_SIZE, (int __user *)arg));
4294 case LL_IOC_HEAT_GET: {
4295 struct lu_heat uheat;
4296 struct lu_heat *heat;
4299 if (copy_from_user(&uheat, (void __user *)arg, sizeof(uheat)))
4302 if (uheat.lh_count > OBD_HEAT_COUNT)
4303 uheat.lh_count = OBD_HEAT_COUNT;
4305 size = offsetof(typeof(uheat), lh_heat[uheat.lh_count]);
4306 OBD_ALLOC(heat, size);
4310 heat->lh_count = uheat.lh_count;
4311 ll_heat_get(inode, heat);
4312 rc = copy_to_user((char __user *)arg, heat, size);
4313 OBD_FREE(heat, size);
4314 RETURN(rc ? -EFAULT : 0);
4316 case LL_IOC_HEAT_SET: {
4319 if (copy_from_user(&flags, (void __user *)arg, sizeof(flags)))
4322 rc = ll_heat_set(inode, flags);
4325 case LL_IOC_PCC_DETACH: {
4326 struct lu_pcc_detach *detach;
4328 OBD_ALLOC_PTR(detach);
4332 if (copy_from_user(detach,
4333 (const struct lu_pcc_detach __user *)arg,
4335 GOTO(out_detach_free, rc = -EFAULT);
4337 if (!S_ISREG(inode->i_mode))
4338 GOTO(out_detach_free, rc = -EINVAL);
4340 if (!inode_owner_or_capable(&init_user_ns, inode))
4341 GOTO(out_detach_free, rc = -EPERM);
4343 rc = pcc_ioctl_detach(inode, detach->pccd_opt);
4345 OBD_FREE_PTR(detach);
4348 case LL_IOC_PCC_STATE: {
4349 struct lu_pcc_state __user *ustate =
4350 (struct lu_pcc_state __user *)arg;
4351 struct lu_pcc_state *state;
4353 OBD_ALLOC_PTR(state);
4357 if (copy_from_user(state, ustate, sizeof(*state)))
4358 GOTO(out_state, rc = -EFAULT);
4360 rc = pcc_ioctl_state(file, inode, state);
4362 GOTO(out_state, rc);
4364 if (copy_to_user(ustate, state, sizeof(*state)))
4365 GOTO(out_state, rc = -EFAULT);
4368 OBD_FREE_PTR(state);
4371 #ifdef HAVE_LUSTRE_CRYPTO
4372 case LL_IOC_SET_ENCRYPTION_POLICY:
4373 if (!ll_sbi_has_encrypt(ll_i2sbi(inode)))
4375 return llcrypt_ioctl_set_policy(file, (const void __user *)arg);
4376 case LL_IOC_GET_ENCRYPTION_POLICY_EX:
4377 if (!ll_sbi_has_encrypt(ll_i2sbi(inode)))
4379 return llcrypt_ioctl_get_policy_ex(file, (void __user *)arg);
4380 case LL_IOC_ADD_ENCRYPTION_KEY:
4381 if (!ll_sbi_has_encrypt(ll_i2sbi(inode)))
4383 return llcrypt_ioctl_add_key(file, (void __user *)arg);
4384 case LL_IOC_REMOVE_ENCRYPTION_KEY:
4385 if (!ll_sbi_has_encrypt(ll_i2sbi(inode)))
4387 return llcrypt_ioctl_remove_key(file, (void __user *)arg);
4388 case LL_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4389 if (!ll_sbi_has_encrypt(ll_i2sbi(inode)))
4391 return llcrypt_ioctl_remove_key_all_users(file,
4392 (void __user *)arg);
4393 case LL_IOC_GET_ENCRYPTION_KEY_STATUS:
4394 if (!ll_sbi_has_encrypt(ll_i2sbi(inode)))
4396 return llcrypt_ioctl_get_key_status(file, (void __user *)arg);
4399 case LL_IOC_UNLOCK_FOREIGN: {
4400 struct dentry *dentry = file_dentry(file);
4402 /* if not a foreign symlink do nothing */
4403 if (ll_foreign_is_removable(dentry, true)) {
4405 "prevent unlink of non-foreign file ("DFID")\n",
4406 PFID(ll_inode2fid(inode)));
4407 RETURN(-EOPNOTSUPP);
4413 RETURN(obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL,
4414 (void __user *)arg));
4418 loff_t ll_lseek(struct file *file, loff_t offset, int whence)
4420 struct inode *inode = file_inode(file);
4423 struct cl_lseek_io *lsio;
4430 env = cl_env_get(&refcheck);
4432 RETURN(PTR_ERR(env));
4434 io = vvp_env_thread_io(env);
4435 io->ci_obj = ll_i2info(inode)->lli_clob;
4436 ll_io_set_mirror(io, file);
4438 lsio = &io->u.ci_lseek;
4439 lsio->ls_start = offset;
4440 lsio->ls_whence = whence;
4441 lsio->ls_result = -ENXIO;
4444 rc = cl_io_init(env, io, CIT_LSEEK, io->ci_obj);
4446 struct vvp_io *vio = vvp_env_io(env);
4448 vio->vui_fd = file->private_data;
4449 rc = cl_io_loop(env, io);
4453 retval = rc ? : lsio->ls_result;
4454 cl_io_fini(env, io);
4455 } while (unlikely(io->ci_need_restart));
4457 cl_env_put(env, &refcheck);
4459 /* Without the key, SEEK_HOLE return value has to be
4460 * rounded up to next LUSTRE_ENCRYPTION_UNIT_SIZE.
4462 if (llcrypt_require_key(inode) == -ENOKEY && whence == SEEK_HOLE)
4463 retval = round_up(retval, LUSTRE_ENCRYPTION_UNIT_SIZE);
4468 static loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
4470 struct inode *inode = file_inode(file);
4471 loff_t retval = offset, eof = 0;
4472 ktime_t kstart = ktime_get();
4476 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), to=%llu=%#llx(%d)\n",
4477 PFID(ll_inode2fid(inode)), inode, retval, retval,
4480 if (origin == SEEK_END) {
4481 retval = ll_glimpse_size(inode);
4484 eof = i_size_read(inode);
4487 if (origin == SEEK_HOLE || origin == SEEK_DATA) {
4491 /* flush local cache first if any */
4492 cl_sync_file_range(inode, offset, OBD_OBJECT_EOF,
4495 retval = ll_lseek(file, offset, origin);
4498 retval = vfs_setpos(file, retval, ll_file_maxbytes(inode));
4500 retval = generic_file_llseek_size(file, offset, origin,
4501 ll_file_maxbytes(inode), eof);
4504 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK,
4505 ktime_us_delta(ktime_get(), kstart));
4509 static int ll_flush(struct file *file, fl_owner_t id)
4511 struct inode *inode = file_inode(file);
4512 struct ll_inode_info *lli = ll_i2info(inode);
4513 struct ll_file_data *fd = file->private_data;
4516 LASSERT(!S_ISDIR(inode->i_mode));
4518 /* catch async errors that were recorded back when async writeback
4519 * failed for pages in this mapping. */
4520 rc = lli->lli_async_rc;
4521 lli->lli_async_rc = 0;
4522 if (lli->lli_clob != NULL) {
4523 err = lov_read_and_clear_async_rc(lli->lli_clob);
4528 /* The application has been told write failure already.
4529 * Do not report failure again. */
4530 if (fd->fd_write_failed)
4532 return rc ? -EIO : 0;
4536 * Called to make sure a portion of file has been written out.
4537 * if @mode is not CL_FSYNC_LOCAL, it will send OST_SYNC RPCs to OST.
4539 * Return how many pages have been written.
4541 int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
4542 enum cl_fsync_mode mode, int ignore_layout)
4546 struct cl_fsync_io *fio;
4551 if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL &&
4552 mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL)
4555 env = cl_env_get(&refcheck);
4557 RETURN(PTR_ERR(env));
4559 io = vvp_env_thread_io(env);
4560 io->ci_obj = ll_i2info(inode)->lli_clob;
4561 io->ci_ignore_layout = ignore_layout;
4563 /* initialize parameters for sync */
4564 fio = &io->u.ci_fsync;
4565 fio->fi_start = start;
4567 fio->fi_fid = ll_inode2fid(inode);
4568 fio->fi_mode = mode;
4569 fio->fi_nr_written = 0;
4571 if (cl_io_init(env, io, CIT_FSYNC, io->ci_obj) == 0)
4572 result = cl_io_loop(env, io);
4574 result = io->ci_result;
4576 result = fio->fi_nr_written;
4577 cl_io_fini(env, io);
4578 cl_env_put(env, &refcheck);
4584 * When dentry is provided (the 'else' case), file_dentry() may be
4585 * null and dentry must be used directly rather than pulled from
4586 * file_dentry() as is done otherwise.
4589 int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
4591 struct dentry *dentry = file_dentry(file);
4592 struct inode *inode = dentry->d_inode;
4593 struct ll_inode_info *lli = ll_i2info(inode);
4594 struct ptlrpc_request *req;
4595 ktime_t kstart = ktime_get();
4601 "VFS Op:inode="DFID"(%p), start %lld, end %lld, datasync %d\n",
4602 PFID(ll_inode2fid(inode)), inode, start, end, datasync);
4604 /* fsync's caller has already called _fdata{sync,write}, we want
4605 * that IO to finish before calling the osc and mdc sync methods */
4606 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
4608 /* catch async errors that were recorded back when async writeback
4609 * failed for pages in this mapping. */
4610 if (!S_ISDIR(inode->i_mode)) {
4611 err = lli->lli_async_rc;
4612 lli->lli_async_rc = 0;
4615 if (lli->lli_clob != NULL) {
4616 err = lov_read_and_clear_async_rc(lli->lli_clob);
4622 err = md_fsync(ll_i2sbi(inode)->ll_md_exp, ll_inode2fid(inode), &req);
4626 ptlrpc_req_finished(req);
4628 if (S_ISREG(inode->i_mode)) {
4629 struct ll_file_data *fd = file->private_data;
4632 /* Sync metadata on MDT first, and then sync the cached data
4635 err = pcc_fsync(file, start, end, datasync, &cached);
4637 err = cl_sync_file_range(inode, start, end,
4639 if (rc == 0 && err < 0)
4642 fd->fd_write_failed = true;
4644 fd->fd_write_failed = false;
4648 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC,
4649 ktime_us_delta(ktime_get(), kstart));
4654 ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
4656 struct inode *inode = file_inode(file);
4657 struct ll_sb_info *sbi = ll_i2sbi(inode);
4658 struct ldlm_enqueue_info einfo = {
4659 .ei_type = LDLM_FLOCK,
4660 .ei_cb_cp = ldlm_flock_completion_ast,
4661 .ei_cbdata = file_lock,
4663 struct md_op_data *op_data;
4664 struct lustre_handle lockh = { 0 };
4665 union ldlm_policy_data flock = { { 0 } };
4666 int fl_type = file_lock->fl_type;
4667 ktime_t kstart = ktime_get();
4673 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID" file_lock=%p\n",
4674 PFID(ll_inode2fid(inode)), file_lock);
4676 if (file_lock->fl_flags & FL_FLOCK) {
4677 LASSERT((cmd == F_SETLKW) || (cmd == F_SETLK));
4678 /* flocks are whole-file locks */
4679 flock.l_flock.end = OFFSET_MAX;
4680 /* For flocks owner is determined by the local file desctiptor*/
4681 flock.l_flock.owner = (unsigned long)file_lock->fl_file;
4682 } else if (file_lock->fl_flags & FL_POSIX) {
4683 flock.l_flock.owner = (unsigned long)file_lock->fl_owner;
4684 flock.l_flock.start = file_lock->fl_start;
4685 flock.l_flock.end = file_lock->fl_end;
4689 flock.l_flock.pid = file_lock->fl_pid;
4691 #if defined(HAVE_LM_COMPARE_OWNER) || defined(lm_compare_owner)
4692 /* Somewhat ugly workaround for svc lockd.
4693 * lockd installs custom fl_lmops->lm_compare_owner that checks
4694 * for the fl_owner to be the same (which it always is on local node
4695 * I guess between lockd processes) and then compares pid.
4696 * As such we assign pid to the owner field to make it all work,
4697 * conflict with normal locks is unlikely since pid space and
4698 * pointer space for current->files are not intersecting */
4699 if (file_lock->fl_lmops && file_lock->fl_lmops->lm_compare_owner)
4700 flock.l_flock.owner = (unsigned long)file_lock->fl_pid;
4705 einfo.ei_mode = LCK_PR;
4708 /* An unlock request may or may not have any relation to
4709 * existing locks so we may not be able to pass a lock handle
4710 * via a normal ldlm_lock_cancel() request. The request may even
4711 * unlock a byte range in the middle of an existing lock. In
4712 * order to process an unlock request we need all of the same
4713 * information that is given with a normal read or write record
4714 * lock request. To avoid creating another ldlm unlock (cancel)
4715 * message we'll treat a LCK_NL flock request as an unlock. */
4716 einfo.ei_mode = LCK_NL;
4719 einfo.ei_mode = LCK_PW;
4722 CDEBUG(D_INFO, "Unknown fcntl lock type: %d\n", fl_type);
4737 flags = LDLM_FL_BLOCK_NOWAIT;
4743 flags = LDLM_FL_TEST_LOCK;
4746 CERROR("unknown fcntl lock command: %d\n", cmd);
4750 /* Save the old mode so that if the mode in the lock changes we
4751 * can decrement the appropriate reader or writer refcount. */
4752 file_lock->fl_type = einfo.ei_mode;
4754 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
4755 LUSTRE_OPC_ANY, NULL);
4756 if (IS_ERR(op_data))
4757 RETURN(PTR_ERR(op_data));
4759 CDEBUG(D_DLMTRACE, "inode="DFID", pid=%u, flags=%#llx, mode=%u, "
4760 "start=%llu, end=%llu\n", PFID(ll_inode2fid(inode)),
4761 flock.l_flock.pid, flags, einfo.ei_mode,
4762 flock.l_flock.start, flock.l_flock.end);
4764 rc = md_enqueue(sbi->ll_md_exp, &einfo, &flock, op_data, &lockh,
4767 /* Restore the file lock type if not TEST lock. */
4768 if (!(flags & LDLM_FL_TEST_LOCK))
4769 file_lock->fl_type = fl_type;
4771 #ifdef HAVE_LOCKS_LOCK_FILE_WAIT
4772 if ((rc == 0 || file_lock->fl_type == F_UNLCK) &&
4773 !(flags & LDLM_FL_TEST_LOCK))
4774 rc2 = locks_lock_file_wait(file, file_lock);
4776 if ((file_lock->fl_flags & FL_FLOCK) &&
4777 (rc == 0 || file_lock->fl_type == F_UNLCK))
4778 rc2 = flock_lock_file_wait(file, file_lock);
4779 if ((file_lock->fl_flags & FL_POSIX) &&
4780 (rc == 0 || file_lock->fl_type == F_UNLCK) &&
4781 !(flags & LDLM_FL_TEST_LOCK))
4782 rc2 = posix_lock_file_wait(file, file_lock);
4783 #endif /* HAVE_LOCKS_LOCK_FILE_WAIT */
4785 if (rc2 && file_lock->fl_type != F_UNLCK) {
4786 einfo.ei_mode = LCK_NL;
4787 md_enqueue(sbi->ll_md_exp, &einfo, &flock, op_data,
4792 ll_finish_md_op_data(op_data);
4795 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK,
4796 ktime_us_delta(ktime_get(), kstart));
4800 int ll_get_fid_by_name(struct inode *parent, const char *name,
4801 int namelen, struct lu_fid *fid,
4802 struct inode **inode)
4804 struct md_op_data *op_data = NULL;
4805 struct mdt_body *body;
4806 struct ptlrpc_request *req;
4810 op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen, 0,
4811 LUSTRE_OPC_ANY, NULL);
4812 if (IS_ERR(op_data))
4813 RETURN(PTR_ERR(op_data));
4815 op_data->op_valid = OBD_MD_FLID | OBD_MD_FLTYPE;
4816 rc = md_getattr_name(ll_i2sbi(parent)->ll_md_exp, op_data, &req);
4817 ll_finish_md_op_data(op_data);
4821 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
4823 GOTO(out_req, rc = -EFAULT);
4825 *fid = body->mbo_fid1;
4828 rc = ll_prep_inode(inode, &req->rq_pill, parent->i_sb, NULL);
4830 ptlrpc_req_finished(req);
4834 int ll_migrate(struct inode *parent, struct file *file, struct lmv_user_md *lum,
4835 const char *name, __u32 flags)
4837 struct dentry *dchild = NULL;
4838 struct inode *child_inode = NULL;
4839 struct md_op_data *op_data;
4840 struct ptlrpc_request *request = NULL;
4841 struct obd_client_handle *och = NULL;
4843 struct mdt_body *body;
4844 __u64 data_version = 0;
4845 size_t namelen = strlen(name);
4846 int lumlen = lmv_user_md_size(lum->lum_stripe_count, lum->lum_magic);
4847 bool oldformat = false;
4851 CDEBUG(D_VFSTRACE, "migrate "DFID"/%s to MDT%04x stripe count %d\n",
4852 PFID(ll_inode2fid(parent)), name,
4853 lum->lum_stripe_offset, lum->lum_stripe_count);
4855 if (lum->lum_magic != cpu_to_le32(LMV_USER_MAGIC) &&
4856 lum->lum_magic != cpu_to_le32(LMV_USER_MAGIC_SPECIFIC))
4857 lustre_swab_lmv_user_md(lum);
4859 /* Get child FID first */
4860 qstr.hash = ll_full_name_hash(file_dentry(file), name, namelen);
4863 dchild = d_lookup(file_dentry(file), &qstr);
4865 if (dchild->d_inode)
4866 child_inode = igrab(dchild->d_inode);
4871 rc = ll_get_fid_by_name(parent, name, namelen, NULL,
4880 if (!(exp_connect_flags2(ll_i2sbi(parent)->ll_md_exp) &
4881 OBD_CONNECT2_DIR_MIGRATE)) {
4882 if (le32_to_cpu(lum->lum_stripe_count) > 1 ||
4883 ll_dir_striped(child_inode)) {
4884 CERROR("%s: MDT doesn't support stripe directory "
4885 "migration!\n", ll_i2sbi(parent)->ll_fsname);
4886 GOTO(out_iput, rc = -EOPNOTSUPP);
4891 * lfs migrate command needs to be blocked on the client
4892 * by checking the migrate FID against the FID of the
4895 if (is_root_inode(child_inode))
4896 GOTO(out_iput, rc = -EINVAL);
4898 if (IS_ENCRYPTED(parent)) {
4899 if (unlikely(!llcrypt_policy_has_filename_enc(parent)))
4901 } else if (IS_ENCRYPTED(child_inode) &&
4902 unlikely(!llcrypt_policy_has_filename_enc(child_inode))) {
4905 if (unlikely(oldformat)) {
4907 "cannot migrate old format encrypted "DFID", please move to new enc dir first\n",
4908 PFID(ll_inode2fid(child_inode)));
4909 GOTO(out_iput, rc = -EUCLEAN);
4912 op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen,
4913 child_inode->i_mode, LUSTRE_OPC_ANY, NULL);
4914 if (IS_ERR(op_data))
4915 GOTO(out_iput, rc = PTR_ERR(op_data));
4917 inode_lock(child_inode);
4918 op_data->op_fid3 = *ll_inode2fid(child_inode);
4919 if (!fid_is_sane(&op_data->op_fid3)) {
4920 CERROR("%s: migrate %s, but FID "DFID" is insane\n",
4921 ll_i2sbi(parent)->ll_fsname, name,
4922 PFID(&op_data->op_fid3));
4923 GOTO(out_unlock, rc = -EINVAL);
4926 op_data->op_cli_flags |= CLI_MIGRATE | CLI_SET_MEA;
4927 op_data->op_data = lum;
4928 op_data->op_data_size = lumlen;
4930 /* migrate dirent only for subdirs if MDS_MIGRATE_NSONLY set */
4931 if (S_ISDIR(child_inode->i_mode) && (flags & MDS_MIGRATE_NSONLY) &&
4932 lmv_dir_layout_changing(ll_i2info(parent)->lli_lsm_md))
4933 op_data->op_bias |= MDS_MIGRATE_NSONLY;
4936 if (S_ISREG(child_inode->i_mode)) {
4937 och = ll_lease_open(child_inode, NULL, FMODE_WRITE, 0);
4941 GOTO(out_unlock, rc);
4944 rc = ll_data_version(child_inode, &data_version,
4947 GOTO(out_close, rc);
4949 op_data->op_open_handle = och->och_open_handle;
4950 op_data->op_data_version = data_version;
4951 op_data->op_lease_handle = och->och_lease_handle;
4952 op_data->op_bias |= MDS_CLOSE_MIGRATE;
4954 spin_lock(&och->och_mod->mod_open_req->rq_lock);
4955 och->och_mod->mod_open_req->rq_replay = 0;
4956 spin_unlock(&och->och_mod->mod_open_req->rq_lock);
4959 rc = md_rename(ll_i2sbi(parent)->ll_md_exp, op_data,
4960 op_data->op_name, op_data->op_namelen,
4961 op_data->op_name, op_data->op_namelen, &request);
4963 LASSERT(request != NULL);
4964 ll_update_times(request, parent);
4967 if (rc == 0 || rc == -EAGAIN) {
4968 body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
4969 LASSERT(body != NULL);
4971 /* If the server does release layout lock, then we cleanup
4972 * the client och here, otherwise release it in out_close: */
4973 if (och && body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED) {
4974 obd_mod_put(och->och_mod);
4975 md_clear_open_replay_data(ll_i2sbi(parent)->ll_md_exp,
4977 och->och_open_handle.cookie = DEAD_HANDLE_MAGIC;
4983 if (request != NULL) {
4984 ptlrpc_req_finished(request);
4988 /* Try again if the lease has cancelled. */
4989 if (rc == -EAGAIN && S_ISREG(child_inode->i_mode))
4994 ll_lease_close(och, child_inode, NULL);
4996 clear_nlink(child_inode);
4998 inode_unlock(child_inode);
4999 ll_finish_md_op_data(op_data);
5006 ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock)
5008 struct ll_file_data *fd = file->private_data;
5012 * In order to avoid flood of warning messages, only print one message
5013 * for one file. And the entire message rate on the client is limited
5014 * by CDEBUG_LIMIT too.
5016 if (!(fd->fd_flags & LL_FILE_FLOCK_WARNING)) {
5017 fd->fd_flags |= LL_FILE_FLOCK_WARNING;
5018 CDEBUG_LIMIT(D_CONSOLE,
5019 "flock disabled, mount with '-o [local]flock' to enable\r\n");
5025 * test if some locks matching bits and l_req_mode are acquired
5026 * - bits can be in different locks
5027 * - if found clear the common lock bits in *bits
5028 * - the bits not found, are kept in *bits
5030 * \param bits [IN] searched lock bits [IN]
5031 * \param l_req_mode [IN] searched lock mode
5032 * \retval boolean, true iff all bits are found
5034 int ll_have_md_lock(struct inode *inode, __u64 *bits, enum ldlm_mode l_req_mode)
5036 struct lustre_handle lockh;
5037 union ldlm_policy_data policy;
5038 enum ldlm_mode mode = (l_req_mode == LCK_MINMODE) ?
5039 (LCK_CR | LCK_CW | LCK_PR | LCK_PW) : l_req_mode;
5048 fid = &ll_i2info(inode)->lli_fid;
5049 CDEBUG(D_INFO, "trying to match res "DFID" mode %s\n", PFID(fid),
5050 ldlm_lockname[mode]);
5052 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING | LDLM_FL_TEST_LOCK;
5053 for (i = 0; i < MDS_INODELOCK_NUMBITS && *bits != 0; i++) {
5054 policy.l_inodebits.bits = *bits & BIT(i);
5055 if (policy.l_inodebits.bits == 0)
5058 if (md_lock_match(ll_i2mdexp(inode), flags, fid, LDLM_IBITS,
5059 &policy, mode, &lockh)) {
5060 struct ldlm_lock *lock;
5062 lock = ldlm_handle2lock(&lockh);
5065 ~(lock->l_policy_data.l_inodebits.bits);
5066 LDLM_LOCK_PUT(lock);
5068 *bits &= ~policy.l_inodebits.bits;
5075 enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
5076 struct lustre_handle *lockh, __u64 flags,
5077 enum ldlm_mode mode)
5079 union ldlm_policy_data policy = { .l_inodebits = { bits } };
5084 fid = &ll_i2info(inode)->lli_fid;
5085 CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
5087 rc = md_lock_match(ll_i2mdexp(inode), LDLM_FL_BLOCK_GRANTED|flags,
5088 fid, LDLM_IBITS, &policy, mode, lockh);
5093 static int ll_inode_revalidate_fini(struct inode *inode, int rc)
5095 /* Already unlinked. Just update nlink and return success */
5096 if (rc == -ENOENT) {
5098 /* If it is striped directory, and there is bad stripe
5099 * Let's revalidate the dentry again, instead of returning
5101 if (ll_dir_striped(inode))
5104 /* This path cannot be hit for regular files unless in
5105 * case of obscure races, so no need to to validate
5107 if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
5109 } else if (rc != 0) {
5110 CDEBUG_LIMIT((rc == -EACCES || rc == -EIDRM) ? D_INFO : D_ERROR,
5111 "%s: revalidate FID "DFID" error: rc = %d\n",
5112 ll_i2sbi(inode)->ll_fsname,
5113 PFID(ll_inode2fid(inode)), rc);
5119 static int ll_inode_revalidate(struct dentry *dentry, enum ldlm_intent_flags op)
5121 struct inode *parent;
5122 struct inode *inode = dentry->d_inode;
5123 struct obd_export *exp = ll_i2mdexp(inode);
5124 struct lookup_intent oit = {
5127 struct ptlrpc_request *req = NULL;
5128 struct md_op_data *op_data;
5129 const char *name = NULL;
5134 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p),name=%s\n",
5135 PFID(ll_inode2fid(inode)), inode, dentry->d_name.name);
5137 if (exp_connect_flags2(exp) & OBD_CONNECT2_GETATTR_PFID) {
5138 parent = dentry->d_parent->d_inode;
5139 name = dentry->d_name.name;
5140 namelen = dentry->d_name.len;
5145 op_data = ll_prep_md_op_data(NULL, parent, inode, name, namelen, 0,
5146 LUSTRE_OPC_ANY, NULL);
5147 if (IS_ERR(op_data))
5148 RETURN(PTR_ERR(op_data));
5150 /* Call getattr by fid */
5151 if (exp_connect_flags2(exp) & OBD_CONNECT2_GETATTR_PFID)
5152 op_data->op_flags = MF_GETATTR_BY_FID;
5153 rc = md_intent_lock(exp, op_data, &oit, &req, &ll_md_blocking_ast, 0);
5154 ll_finish_md_op_data(op_data);
5156 rc = ll_inode_revalidate_fini(inode, rc);
5160 rc = ll_revalidate_it_finish(req, &oit, dentry);
5162 ll_intent_release(&oit);
5166 /* Unlinked? Unhash dentry, so it is not picked up later by
5167 * do_lookup() -> ll_revalidate_it(). We cannot use d_drop
5168 * here to preserve get_cwd functionality on 2.6.
5170 if (!dentry->d_inode->i_nlink)
5171 d_lustre_invalidate(dentry);
5173 ll_lookup_finish_locks(&oit, dentry);
5175 ptlrpc_req_finished(req);
5180 static int ll_merge_md_attr(struct inode *inode)
5182 struct ll_inode_info *lli = ll_i2info(inode);
5183 struct cl_attr attr = { 0 };
5186 LASSERT(lli->lli_lsm_md != NULL);
5188 if (!lmv_dir_striped(lli->lli_lsm_md))
5191 down_read(&lli->lli_lsm_sem);
5192 rc = md_merge_attr(ll_i2mdexp(inode), ll_i2info(inode)->lli_lsm_md,
5193 &attr, ll_md_blocking_ast);
5194 up_read(&lli->lli_lsm_sem);
5198 spin_lock(&inode->i_lock);
5199 set_nlink(inode, attr.cat_nlink);
5200 spin_unlock(&inode->i_lock);
5202 inode->i_blocks = attr.cat_blocks;
5203 i_size_write(inode, attr.cat_size);
5205 ll_i2info(inode)->lli_atime = attr.cat_atime;
5206 ll_i2info(inode)->lli_mtime = attr.cat_mtime;
5207 ll_i2info(inode)->lli_ctime = attr.cat_ctime;
5212 int ll_getattr_dentry(struct dentry *de, struct kstat *stat, u32 request_mask,
5213 unsigned int flags, bool foreign)
5215 struct inode *inode = de->d_inode;
5216 struct ll_sb_info *sbi = ll_i2sbi(inode);
5217 struct ll_inode_info *lli = ll_i2info(inode);
5218 struct inode *dir = de->d_parent->d_inode;
5219 bool need_glimpse = true;
5220 ktime_t kstart = ktime_get();
5223 /* The OST object(s) determine the file size, blocks and mtime. */
5224 if (!(request_mask & STATX_SIZE || request_mask & STATX_BLOCKS ||
5225 request_mask & STATX_MTIME))
5226 need_glimpse = false;
5228 if (dentry_may_statahead(dir, de))
5229 ll_start_statahead(dir, de, need_glimpse &&
5230 !(flags & AT_STATX_DONT_SYNC));
5232 if (flags & AT_STATX_DONT_SYNC)
5233 GOTO(fill_attr, rc = 0);
5235 rc = ll_inode_revalidate(de, IT_GETATTR);
5239 /* foreign file/dir are always of zero length, so don't
5240 * need to validate size.
5242 if (S_ISREG(inode->i_mode) && !foreign) {
5246 GOTO(fill_attr, rc);
5248 rc = pcc_inode_getattr(inode, request_mask, flags, &cached);
5249 if (cached && rc < 0)
5253 GOTO(fill_attr, rc);
5256 * If the returned attr is masked with OBD_MD_FLSIZE &
5257 * OBD_MD_FLBLOCKS & OBD_MD_FLMTIME, it means that the file size
5258 * or blocks obtained from MDT is strictly correct, and the file
5259 * is usually not being modified by clients, and the [a|m|c]time
5260 * got from MDT is also strictly correct.
5261 * Under this circumstance, it does not need to send glimpse
5262 * RPCs to OSTs for file attributes such as the size and blocks.
5264 if (lli->lli_attr_valid & OBD_MD_FLSIZE &&
5265 lli->lli_attr_valid & OBD_MD_FLBLOCKS &&
5266 lli->lli_attr_valid & OBD_MD_FLMTIME) {
5267 inode->i_mtime.tv_sec = lli->lli_mtime;
5268 if (lli->lli_attr_valid & OBD_MD_FLATIME)
5269 inode->i_atime.tv_sec = lli->lli_atime;
5270 if (lli->lli_attr_valid & OBD_MD_FLCTIME)
5271 inode->i_ctime.tv_sec = lli->lli_ctime;
5272 GOTO(fill_attr, rc);
5275 /* In case of restore, the MDT has the right size and has
5276 * already send it back without granting the layout lock,
5277 * inode is up-to-date so glimpse is useless.
5278 * Also to glimpse we need the layout, in case of a running
5279 * restore the MDT holds the layout lock so the glimpse will
5280 * block up to the end of restore (getattr will block)
5282 if (!test_bit(LLIF_FILE_RESTORING, &lli->lli_flags)) {
5283 rc = ll_glimpse_size(inode);
5288 /* If object isn't regular a file then don't validate size. */
5289 /* foreign dir is not striped dir */
5290 if (ll_dir_striped(inode) && !foreign) {
5291 rc = ll_merge_md_attr(inode);
5296 if (lli->lli_attr_valid & OBD_MD_FLATIME)
5297 inode->i_atime.tv_sec = lli->lli_atime;
5298 if (lli->lli_attr_valid & OBD_MD_FLMTIME)
5299 inode->i_mtime.tv_sec = lli->lli_mtime;
5300 if (lli->lli_attr_valid & OBD_MD_FLCTIME)
5301 inode->i_ctime.tv_sec = lli->lli_ctime;
5305 OBD_FAIL_TIMEOUT(OBD_FAIL_GETATTR_DELAY, 30);
5307 if (ll_need_32bit_api(sbi)) {
5308 stat->ino = cl_fid_build_ino(&lli->lli_fid, 1);
5309 stat->dev = ll_compat_encode_dev(inode->i_sb->s_dev);
5310 stat->rdev = ll_compat_encode_dev(inode->i_rdev);
5312 stat->ino = inode->i_ino;
5313 stat->dev = inode->i_sb->s_dev;
5314 stat->rdev = inode->i_rdev;
5317 /* foreign symlink to be exposed as a real symlink */
5319 stat->mode = inode->i_mode;
5321 stat->mode = (inode->i_mode & ~S_IFMT) | S_IFLNK;
5323 stat->uid = inode->i_uid;
5324 stat->gid = inode->i_gid;
5325 stat->atime = inode->i_atime;
5326 stat->mtime = inode->i_mtime;
5327 stat->ctime = inode->i_ctime;
5328 /* stat->blksize is used to tell about preferred IO size */
5329 if (sbi->ll_stat_blksize)
5330 stat->blksize = sbi->ll_stat_blksize;
5331 else if (S_ISREG(inode->i_mode))
5332 stat->blksize = 1 << min(PTLRPC_MAX_BRW_BITS + 1,
5333 LL_MAX_BLKSIZE_BITS);
5335 stat->blksize = 1 << inode->i_sb->s_blocksize_bits;
5337 stat->nlink = inode->i_nlink;
5338 stat->size = i_size_read(inode);
5339 stat->blocks = inode->i_blocks;
5341 #if defined(HAVE_USER_NAMESPACE_ARG) || defined(HAVE_INODEOPS_ENHANCED_GETATTR)
5342 if (flags & AT_STATX_DONT_SYNC) {
5343 if (stat->size == 0 &&
5344 lli->lli_attr_valid & OBD_MD_FLLAZYSIZE)
5345 stat->size = lli->lli_lazysize;
5346 if (stat->blocks == 0 &&
5347 lli->lli_attr_valid & OBD_MD_FLLAZYBLOCKS)
5348 stat->blocks = lli->lli_lazyblocks;
5351 if (lli->lli_attr_valid & OBD_MD_FLBTIME) {
5352 stat->result_mask |= STATX_BTIME;
5353 stat->btime.tv_sec = lli->lli_btime;
5356 stat->attributes_mask = STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND;
5357 stat->attributes |= ll_inode_to_ext_flags(inode->i_flags);
5358 stat->result_mask &= request_mask;
5361 ll_stats_ops_tally(sbi, LPROC_LL_GETATTR,
5362 ktime_us_delta(ktime_get(), kstart));
5367 #if defined(HAVE_USER_NAMESPACE_ARG) || defined(HAVE_INODEOPS_ENHANCED_GETATTR)
5368 int ll_getattr(struct user_namespace *mnt_userns, const struct path *path,
5369 struct kstat *stat, u32 request_mask, unsigned int flags)
5371 return ll_getattr_dentry(path->dentry, stat, request_mask, flags,
5375 int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
5377 return ll_getattr_dentry(de, stat, STATX_BASIC_STATS,
5378 AT_STATX_SYNC_AS_STAT, false);
5382 int cl_falloc(struct file *file, struct inode *inode, int mode, loff_t offset,
5389 loff_t size = i_size_read(inode);
5393 env = cl_env_get(&refcheck);
5395 RETURN(PTR_ERR(env));
5397 io = vvp_env_thread_io(env);
5398 io->ci_obj = ll_i2info(inode)->lli_clob;
5399 ll_io_set_mirror(io, file);
5401 io->ci_verify_layout = 1;
5402 io->u.ci_setattr.sa_parent_fid = lu_object_fid(&io->ci_obj->co_lu);
5403 io->u.ci_setattr.sa_falloc_mode = mode;
5404 io->u.ci_setattr.sa_falloc_offset = offset;
5405 io->u.ci_setattr.sa_falloc_end = offset + len;
5406 io->u.ci_setattr.sa_subtype = CL_SETATTR_FALLOCATE;
5408 CDEBUG(D_INODE, "UID %u GID %u\n",
5409 from_kuid(&init_user_ns, inode->i_uid),
5410 from_kgid(&init_user_ns, inode->i_gid));
5412 io->u.ci_setattr.sa_falloc_uid = from_kuid(&init_user_ns, inode->i_uid);
5413 io->u.ci_setattr.sa_falloc_gid = from_kgid(&init_user_ns, inode->i_gid);
5415 if (io->u.ci_setattr.sa_falloc_end > size) {
5416 loff_t newsize = io->u.ci_setattr.sa_falloc_end;
5418 /* Check new size against VFS/VM file size limit and rlimit */
5419 rc = inode_newsize_ok(inode, newsize);
5422 if (newsize > ll_file_maxbytes(inode)) {
5423 CDEBUG(D_INODE, "file size too large %llu > %llu\n",
5424 (unsigned long long)newsize,
5425 ll_file_maxbytes(inode));
5432 rc = cl_io_init(env, io, CIT_SETATTR, io->ci_obj);
5434 rc = cl_io_loop(env, io);
5437 cl_io_fini(env, io);
5438 } while (unlikely(io->ci_need_restart));
5441 cl_env_put(env, &refcheck);
5445 long ll_fallocate(struct file *filp, int mode, loff_t offset, loff_t len)
5447 struct inode *inode = file_inode(filp);
5450 if (offset < 0 || len <= 0)
5453 * Encrypted inodes can't handle collapse range or zero range or insert
5454 * range since we would need to re-encrypt blocks with a different IV or
5455 * XTS tweak (which are based on the logical block number).
5456 * Similar to what ext4 does.
5458 if (IS_ENCRYPTED(inode) &&
5459 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE |
5460 FALLOC_FL_ZERO_RANGE)))
5461 RETURN(-EOPNOTSUPP);
5464 * mode == 0 (which is standard prealloc) and PUNCH is supported
5465 * Rest of mode options are not supported yet.
5467 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
5468 RETURN(-EOPNOTSUPP);
5470 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FALLOCATE, 1);
5472 rc = cl_falloc(filp, inode, mode, offset, len);
5474 * ENOTSUPP (524) is an NFSv3 specific error code erroneously
5475 * used by Lustre in several places. Retuning it here would
5476 * confuse applications that explicity test for EOPNOTSUPP
5477 * (95) and fall back to ftruncate().
5479 if (rc == -ENOTSUPP)
5485 static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
5486 __u64 start, __u64 len)
5490 struct fiemap *fiemap;
5491 unsigned int extent_count = fieinfo->fi_extents_max;
5493 num_bytes = sizeof(*fiemap) + (extent_count *
5494 sizeof(struct fiemap_extent));
5495 OBD_ALLOC_LARGE(fiemap, num_bytes);
5500 fiemap->fm_flags = fieinfo->fi_flags;
5501 fiemap->fm_extent_count = fieinfo->fi_extents_max;
5502 fiemap->fm_start = start;
5503 fiemap->fm_length = len;
5504 if (extent_count > 0 &&
5505 copy_from_user(&fiemap->fm_extents[0], fieinfo->fi_extents_start,
5506 sizeof(struct fiemap_extent)) != 0)
5507 GOTO(out, rc = -EFAULT);
5509 rc = ll_do_fiemap(inode, fiemap, num_bytes);
5511 if (IS_ENCRYPTED(inode)) {
5514 for (i = 0; i < fiemap->fm_mapped_extents; i++)
5515 fiemap->fm_extents[i].fe_flags |=
5516 FIEMAP_EXTENT_DATA_ENCRYPTED |
5517 FIEMAP_EXTENT_ENCODED;
5520 fieinfo->fi_flags = fiemap->fm_flags;
5521 fieinfo->fi_extents_mapped = fiemap->fm_mapped_extents;
5522 if (extent_count > 0 &&
5523 copy_to_user(fieinfo->fi_extents_start, &fiemap->fm_extents[0],
5524 fiemap->fm_mapped_extents *
5525 sizeof(struct fiemap_extent)) != 0)
5526 GOTO(out, rc = -EFAULT);
5528 OBD_FREE_LARGE(fiemap, num_bytes);
5532 int ll_inode_permission(struct user_namespace *mnt_userns, struct inode *inode,
5536 struct ll_sb_info *sbi;
5537 struct root_squash_info *squash;
5538 struct cred *cred = NULL;
5539 const struct cred *old_cred = NULL;
5540 bool squash_id = false;
5541 ktime_t kstart = ktime_get();
5545 if (mask & MAY_NOT_BLOCK)
5549 * as root inode are NOT getting validated in lookup operation,
5550 * need to do it before permission check.
5553 if (is_root_inode(inode)) {
5554 rc = ll_inode_revalidate(inode->i_sb->s_root, IT_LOOKUP);
5559 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), inode mode %x mask %o\n",
5560 PFID(ll_inode2fid(inode)), inode, inode->i_mode, mask);
5562 /* squash fsuid/fsgid if needed */
5563 sbi = ll_i2sbi(inode);
5564 squash = &sbi->ll_squash;
5565 if (unlikely(squash->rsi_uid != 0 &&
5566 uid_eq(current_fsuid(), GLOBAL_ROOT_UID) &&
5567 !test_bit(LL_SBI_NOROOTSQUASH, sbi->ll_flags))) {
5571 CDEBUG(D_OTHER, "squash creds (%d:%d)=>(%d:%d)\n",
5572 __kuid_val(current_fsuid()), __kgid_val(current_fsgid()),
5573 squash->rsi_uid, squash->rsi_gid);
5575 /* update current process's credentials
5576 * and FS capability */
5577 cred = prepare_creds();
5581 cred->fsuid = make_kuid(&init_user_ns, squash->rsi_uid);
5582 cred->fsgid = make_kgid(&init_user_ns, squash->rsi_gid);
5583 cred->cap_effective = cap_drop_nfsd_set(cred->cap_effective);
5584 cred->cap_effective = cap_drop_fs_set(cred->cap_effective);
5586 old_cred = override_creds(cred);
5589 rc = generic_permission(mnt_userns, inode, mask);
5590 /* restore current process's credentials and FS capability */
5592 revert_creds(old_cred);
5597 ll_stats_ops_tally(sbi, LPROC_LL_INODE_PERM,
5598 ktime_us_delta(ktime_get(), kstart));
5603 /* -o localflock - only provides locally consistent flock locks */
5604 static const struct file_operations ll_file_operations = {
5605 #ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
5606 # ifdef HAVE_SYNC_READ_WRITE
5607 .read = new_sync_read,
5608 .write = new_sync_write,
5610 .read_iter = ll_file_read_iter,
5611 .write_iter = ll_file_write_iter,
5612 #else /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5613 .read = ll_file_read,
5614 .aio_read = ll_file_aio_read,
5615 .write = ll_file_write,
5616 .aio_write = ll_file_aio_write,
5617 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5618 .unlocked_ioctl = ll_file_ioctl,
5619 .open = ll_file_open,
5620 .release = ll_file_release,
5621 .mmap = ll_file_mmap,
5622 .llseek = ll_file_seek,
5623 #ifndef HAVE_DEFAULT_FILE_SPLICE_READ_EXPORT
5624 .splice_read = generic_file_splice_read,
5626 .splice_read = pcc_file_splice_read,
5630 .fallocate = ll_fallocate,
5633 static const struct file_operations ll_file_operations_flock = {
5634 #ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
5635 # ifdef HAVE_SYNC_READ_WRITE
5636 .read = new_sync_read,
5637 .write = new_sync_write,
5638 # endif /* HAVE_SYNC_READ_WRITE */
5639 .read_iter = ll_file_read_iter,
5640 .write_iter = ll_file_write_iter,
5641 #else /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5642 .read = ll_file_read,
5643 .aio_read = ll_file_aio_read,
5644 .write = ll_file_write,
5645 .aio_write = ll_file_aio_write,
5646 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5647 .unlocked_ioctl = ll_file_ioctl,
5648 .open = ll_file_open,
5649 .release = ll_file_release,
5650 .mmap = ll_file_mmap,
5651 .llseek = ll_file_seek,
5652 #ifndef HAVE_DEFAULT_FILE_SPLICE_READ_EXPORT
5653 .splice_read = generic_file_splice_read,
5655 .splice_read = pcc_file_splice_read,
5659 .flock = ll_file_flock,
5660 .lock = ll_file_flock,
5661 .fallocate = ll_fallocate,
5664 /* These are for -o noflock - to return ENOSYS on flock calls */
5665 static const struct file_operations ll_file_operations_noflock = {
5666 #ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
5667 # ifdef HAVE_SYNC_READ_WRITE
5668 .read = new_sync_read,
5669 .write = new_sync_write,
5670 # endif /* HAVE_SYNC_READ_WRITE */
5671 .read_iter = ll_file_read_iter,
5672 .write_iter = ll_file_write_iter,
5673 #else /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5674 .read = ll_file_read,
5675 .aio_read = ll_file_aio_read,
5676 .write = ll_file_write,
5677 .aio_write = ll_file_aio_write,
5678 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5679 .unlocked_ioctl = ll_file_ioctl,
5680 .open = ll_file_open,
5681 .release = ll_file_release,
5682 .mmap = ll_file_mmap,
5683 .llseek = ll_file_seek,
5684 #ifndef HAVE_DEFAULT_FILE_SPLICE_READ_EXPORT
5685 .splice_read = generic_file_splice_read,
5687 .splice_read = pcc_file_splice_read,
5691 .flock = ll_file_noflock,
5692 .lock = ll_file_noflock,
5693 .fallocate = ll_fallocate,
5696 const struct inode_operations ll_file_inode_operations = {
5697 .setattr = ll_setattr,
5698 .getattr = ll_getattr,
5699 .permission = ll_inode_permission,
5700 #ifdef HAVE_IOP_XATTR
5701 .setxattr = ll_setxattr,
5702 .getxattr = ll_getxattr,
5703 .removexattr = ll_removexattr,
5705 .listxattr = ll_listxattr,
5706 .fiemap = ll_fiemap,
5707 .get_acl = ll_get_acl,
5708 #ifdef HAVE_IOP_SET_ACL
5709 .set_acl = ll_set_acl,
5713 const struct file_operations *ll_select_file_operations(struct ll_sb_info *sbi)
5715 const struct file_operations *fops = &ll_file_operations_noflock;
5717 if (test_bit(LL_SBI_FLOCK, sbi->ll_flags))
5718 fops = &ll_file_operations_flock;
5719 else if (test_bit(LL_SBI_LOCALFLOCK, sbi->ll_flags))
5720 fops = &ll_file_operations;
5725 int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
5727 struct ll_inode_info *lli = ll_i2info(inode);
5728 struct cl_object *obj = lli->lli_clob;
5737 env = cl_env_get(&refcheck);
5739 RETURN(PTR_ERR(env));
5741 rc = cl_conf_set(env, lli->lli_clob, conf);
5745 if (conf->coc_opc == OBJECT_CONF_SET) {
5746 struct ldlm_lock *lock = conf->coc_lock;
5747 struct cl_layout cl = {
5751 LASSERT(lock != NULL);
5752 LASSERT(ldlm_has_layout(lock));
5754 /* it can only be allowed to match after layout is
5755 * applied to inode otherwise false layout would be
5756 * seen. Applying layout shoud happen before dropping
5757 * the intent lock. */
5758 ldlm_lock_allow_match(lock);
5760 rc = cl_object_layout_get(env, obj, &cl);
5765 DFID": layout version change: %u -> %u\n",
5766 PFID(&lli->lli_fid), ll_layout_version_get(lli),
5768 ll_layout_version_set(lli, cl.cl_layout_gen);
5772 cl_env_put(env, &refcheck);
5774 RETURN(rc < 0 ? rc : 0);
5777 /* Fetch layout from MDT with getxattr request, if it's not ready yet */
5778 static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
5781 struct ll_sb_info *sbi = ll_i2sbi(inode);
5782 struct ptlrpc_request *req;
5789 CDEBUG(D_INODE, DFID" LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n",
5790 PFID(ll_inode2fid(inode)), ldlm_is_lvb_ready(lock),
5791 lock->l_lvb_data, lock->l_lvb_len);
5793 if (lock->l_lvb_data != NULL)
5796 /* if layout lock was granted right away, the layout is returned
5797 * within DLM_LVB of dlm reply; otherwise if the lock was ever
5798 * blocked and then granted via completion ast, we have to fetch
5799 * layout here. Please note that we can't use the LVB buffer in
5800 * completion AST because it doesn't have a large enough buffer */
5801 rc = ll_get_default_mdsize(sbi, &lmmsize);
5805 rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), OBD_MD_FLXATTR,
5806 XATTR_NAME_LOV, lmmsize, &req);
5809 GOTO(out, rc = 0); /* empty layout */
5816 if (lmmsize == 0) /* empty layout */
5819 lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, lmmsize);
5821 GOTO(out, rc = -EFAULT);
5823 OBD_ALLOC_LARGE(lvbdata, lmmsize);
5824 if (lvbdata == NULL)
5825 GOTO(out, rc = -ENOMEM);
5827 memcpy(lvbdata, lmm, lmmsize);
5828 lock_res_and_lock(lock);
5829 if (unlikely(lock->l_lvb_data == NULL)) {
5830 lock->l_lvb_type = LVB_T_LAYOUT;
5831 lock->l_lvb_data = lvbdata;
5832 lock->l_lvb_len = lmmsize;
5835 unlock_res_and_lock(lock);
5838 OBD_FREE_LARGE(lvbdata, lmmsize);
5843 ptlrpc_req_finished(req);
5848 * Apply the layout to the inode. Layout lock is held and will be released
5851 static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
5852 struct inode *inode)
5854 struct ll_inode_info *lli = ll_i2info(inode);
5855 struct ll_sb_info *sbi = ll_i2sbi(inode);
5856 struct ldlm_lock *lock;
5857 struct cl_object_conf conf;
5860 bool wait_layout = false;
5863 LASSERT(lustre_handle_is_used(lockh));
5865 lock = ldlm_handle2lock(lockh);
5866 LASSERT(lock != NULL);
5868 if (!ldlm_has_layout(lock))
5869 GOTO(out, rc = -EAGAIN);
5871 LDLM_DEBUG(lock, "file "DFID"(%p) being reconfigured",
5872 PFID(&lli->lli_fid), inode);
5874 /* in case this is a caching lock and reinstate with new inode */
5875 md_set_lock_data(sbi->ll_md_exp, lockh, inode, NULL);
5877 lock_res_and_lock(lock);
5878 lvb_ready = ldlm_is_lvb_ready(lock);
5879 unlock_res_and_lock(lock);
5881 /* checking lvb_ready is racy but this is okay. The worst case is
5882 * that multi processes may configure the file on the same time. */
5886 rc = ll_layout_fetch(inode, lock);
5890 /* for layout lock, lmm is stored in lock's lvb.
5891 * lvb_data is immutable if the lock is held so it's safe to access it
5894 * set layout to file. Unlikely this will fail as old layout was
5895 * surely eliminated */
5896 memset(&conf, 0, sizeof conf);
5897 conf.coc_opc = OBJECT_CONF_SET;
5898 conf.coc_inode = inode;
5899 conf.coc_lock = lock;
5900 conf.u.coc_layout.lb_buf = lock->l_lvb_data;
5901 conf.u.coc_layout.lb_len = lock->l_lvb_len;
5902 rc = ll_layout_conf(inode, &conf);
5904 /* refresh layout failed, need to wait */
5905 wait_layout = rc == -EBUSY;
5908 LDLM_LOCK_PUT(lock);
5909 ldlm_lock_decref(lockh, mode);
5911 /* wait for IO to complete if it's still being used. */
5913 CDEBUG(D_INODE, "%s: "DFID"(%p) wait for layout reconf\n",
5914 sbi->ll_fsname, PFID(&lli->lli_fid), inode);
5916 memset(&conf, 0, sizeof conf);
5917 conf.coc_opc = OBJECT_CONF_WAIT;
5918 conf.coc_inode = inode;
5919 rc = ll_layout_conf(inode, &conf);
5923 CDEBUG(D_INODE, "%s file="DFID" waiting layout return: %d\n",
5924 sbi->ll_fsname, PFID(&lli->lli_fid), rc);
5930 * Issue layout intent RPC to MDS.
5931 * \param inode [in] file inode
5932 * \param intent [in] layout intent
5934 * \retval 0 on success
5935 * \retval < 0 error code
5937 static int ll_layout_intent(struct inode *inode, struct layout_intent *intent)
5939 struct ll_inode_info *lli = ll_i2info(inode);
5940 struct ll_sb_info *sbi = ll_i2sbi(inode);
5941 struct md_op_data *op_data;
5942 struct lookup_intent it;
5943 struct ptlrpc_request *req;
5947 op_data = ll_prep_md_op_data(NULL, inode, inode, NULL,
5948 0, 0, LUSTRE_OPC_ANY, NULL);
5949 if (IS_ERR(op_data))
5950 RETURN(PTR_ERR(op_data));
5952 op_data->op_data = intent;
5953 op_data->op_data_size = sizeof(*intent);
5955 memset(&it, 0, sizeof(it));
5956 it.it_op = IT_LAYOUT;
5957 if (intent->li_opc == LAYOUT_INTENT_WRITE ||
5958 intent->li_opc == LAYOUT_INTENT_TRUNC)
5959 it.it_flags = FMODE_WRITE;
5961 LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file "DFID"(%p)",
5962 sbi->ll_fsname, PFID(&lli->lli_fid), inode);
5964 rc = md_intent_lock(sbi->ll_md_exp, op_data, &it, &req,
5965 &ll_md_blocking_ast, 0);
5966 if (it.it_request != NULL)
5967 ptlrpc_req_finished(it.it_request);
5968 it.it_request = NULL;
5970 ll_finish_md_op_data(op_data);
5972 /* set lock data in case this is a new lock */
5974 ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
5976 ll_intent_drop_lock(&it);
5982 * This function checks if there exists a LAYOUT lock on the client side,
5983 * or enqueues it if it doesn't have one in cache.
5985 * This function will not hold layout lock so it may be revoked any time after
5986 * this function returns. Any operations depend on layout should be redone
5989 * This function should be called before lov_io_init() to get an uptodate
5990 * layout version, the caller should save the version number and after IO
5991 * is finished, this function should be called again to verify that layout
5992 * is not changed during IO time.
5994 int ll_layout_refresh(struct inode *inode, __u32 *gen)
5996 struct ll_inode_info *lli = ll_i2info(inode);
5997 struct ll_sb_info *sbi = ll_i2sbi(inode);
5998 struct lustre_handle lockh;
5999 struct layout_intent intent = {
6000 .li_opc = LAYOUT_INTENT_ACCESS,
6002 enum ldlm_mode mode;
6006 *gen = ll_layout_version_get(lli);
6007 if (!test_bit(LL_SBI_LAYOUT_LOCK, sbi->ll_flags) ||
6008 *gen != CL_LAYOUT_GEN_NONE)
6012 LASSERT(fid_is_sane(ll_inode2fid(inode)));
6013 LASSERT(S_ISREG(inode->i_mode));
6015 /* take layout lock mutex to enqueue layout lock exclusively. */
6016 mutex_lock(&lli->lli_layout_mutex);
6019 /* mostly layout lock is caching on the local side, so try to
6020 * match it before grabbing layout lock mutex. */
6021 mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
6022 LCK_CR | LCK_CW | LCK_PR |
6024 if (mode != 0) { /* hit cached lock */
6025 rc = ll_layout_lock_set(&lockh, mode, inode);
6031 rc = ll_layout_intent(inode, &intent);
6037 *gen = ll_layout_version_get(lli);
6038 mutex_unlock(&lli->lli_layout_mutex);
6044 * Issue layout intent RPC indicating where in a file an IO is about to write.
6046 * \param[in] inode file inode.
6047 * \param[in] ext write range with start offset of fille in bytes where
6048 * an IO is about to write, and exclusive end offset in
6051 * \retval 0 on success
6052 * \retval < 0 error code
6054 int ll_layout_write_intent(struct inode *inode, enum layout_intent_opc opc,
6055 struct lu_extent *ext)
6057 struct layout_intent intent = {
6059 .li_extent.e_start = ext->e_start,
6060 .li_extent.e_end = ext->e_end,
6065 rc = ll_layout_intent(inode, &intent);
6071 * This function send a restore request to the MDT
6073 int ll_layout_restore(struct inode *inode, loff_t offset, __u64 length)
6075 struct hsm_user_request *hur;
6079 len = sizeof(struct hsm_user_request) +
6080 sizeof(struct hsm_user_item);
6081 OBD_ALLOC(hur, len);
6085 hur->hur_request.hr_action = HUA_RESTORE;
6086 hur->hur_request.hr_archive_id = 0;
6087 hur->hur_request.hr_flags = 0;
6088 memcpy(&hur->hur_user_item[0].hui_fid, &ll_i2info(inode)->lli_fid,
6089 sizeof(hur->hur_user_item[0].hui_fid));
6090 hur->hur_user_item[0].hui_extent.offset = offset;
6091 hur->hur_user_item[0].hui_extent.length = length;
6092 hur->hur_request.hr_itemcount = 1;
6093 rc = obd_iocontrol(LL_IOC_HSM_REQUEST, ll_i2sbi(inode)->ll_md_exp,