1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
5 * Use is subject to license terms.
7 * Copyright (c) 2011, 2017, Intel Corporation.
11 * This file is part of Lustre, http://www.lustre.org/
13 * Author: Peter Braam <braam@clusterfs.com>
14 * Author: Phil Schwan <phil@clusterfs.com>
15 * Author: Andreas Dilger <adilger@clusterfs.com>
18 #define DEBUG_SUBSYSTEM S_LLITE
19 #include <lustre_dlm.h>
20 #include <linux/pagemap.h>
21 #include <linux/file.h>
22 #include <linux/sched.h>
23 #include <linux/user_namespace.h>
24 #include <linux/capability.h>
25 #include <linux/uidgid.h>
26 #include <linux/falloc.h>
27 #include <linux/ktime.h>
28 #ifdef HAVE_LINUX_FILELOCK_HEADER
29 #include <linux/filelock.h>
32 #include <uapi/linux/lustre/lustre_ioctl.h>
33 #include <lustre_swab.h>
34 #include <libcfs/linux/linux-misc.h>
36 #include "cl_object.h"
37 #include "llite_internal.h"
38 #include "vvp_internal.h"
41 struct inode *sp_inode;
46 __u64 pa_data_version;
51 struct swap_layouts_param {
52 struct inode *slp_inode;
58 ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg);
60 static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
63 static struct ll_file_data *ll_file_data_get(void)
65 struct ll_file_data *lfd;
67 OBD_SLAB_ALLOC_PTR_GFP(lfd, ll_file_data_slab, GFP_NOFS);
71 lfd->fd_write_failed = false;
72 pcc_file_init(&lfd->fd_pcc_file);
77 static void ll_file_data_put(struct ll_file_data *lfd)
80 OBD_SLAB_FREE_PTR(lfd, ll_file_data_slab);
83 /* Packs all the attributes into @op_data for the CLOSE rpc. */
84 static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
85 struct obd_client_handle *och)
88 ll_prep_md_op_data(op_data, inode, NULL, NULL,
89 0, 0, LUSTRE_OPC_ANY, NULL);
91 op_data->op_attr.ia_mode = inode->i_mode;
92 op_data->op_attr.ia_atime = inode_get_atime(inode);
93 op_data->op_attr.ia_mtime = inode_get_mtime(inode);
94 op_data->op_attr.ia_ctime = inode_get_ctime(inode);
95 /* In case of encrypted file without the key, visible size was rounded
96 * up to next LUSTRE_ENCRYPTION_UNIT_SIZE, and clear text size was
97 * stored into lli_lazysize in ll_merge_attr(), so set proper file size
98 * now that we are closing.
100 if (IS_ENCRYPTED(inode) && !ll_has_encryption_key(inode) &&
101 ll_i2info(inode)->lli_attr_valid & OBD_MD_FLLAZYSIZE) {
102 op_data->op_attr.ia_size = ll_i2info(inode)->lli_lazysize;
103 if (IS_PCCCOPY(inode)) {
104 inode->i_flags &= ~S_PCCCOPY;
105 i_size_write(inode, op_data->op_attr.ia_size);
108 op_data->op_attr.ia_size = i_size_read(inode);
110 op_data->op_attr.ia_valid |= (ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
111 ATTR_MTIME | ATTR_MTIME_SET |
113 op_data->op_xvalid |= OP_XVALID_CTIME_SET;
114 op_data->op_attr_blocks = inode->i_blocks;
115 op_data->op_attr_flags = ll_inode2ext_flags(inode);
116 op_data->op_open_handle = och->och_open_handle;
118 if (och->och_flags & MDS_FMODE_WRITE &&
119 test_and_clear_bit(LLIF_DATA_MODIFIED,
120 &ll_i2info(inode)->lli_flags))
121 /* For HSM: if inode data has been modified, pack it so that
122 * MDT can set data dirty flag in the archive.
124 op_data->op_bias |= MDS_DATA_MODIFIED;
130 * Perform a close, possibly with a bias.
131 * The meaning of "data" depends on the value of "bias".
133 * If \a bias is MDS_HSM_RELEASE then \a data is a pointer to the data version.
134 * If \a bias is MDS_CLOSE_LAYOUT_SWAP then \a data is a pointer to a
135 * struct swap_layouts_param containing the inode to swap with and the old and
138 static int ll_close_inode_openhandle(struct inode *inode,
139 struct obd_client_handle *och,
140 enum mds_op_bias bias, void *data)
142 struct obd_export *md_exp = ll_i2mdexp(inode);
143 const struct ll_inode_info *lli = ll_i2info(inode);
144 struct md_op_data *op_data;
145 struct ptlrpc_request *req = NULL;
149 if (class_exp2obd(md_exp) == NULL) {
151 CERROR("%s: invalid MDC connection handle closing "DFID": rc = %d\n",
152 ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid), rc);
156 OBD_ALLOC_PTR(op_data);
157 /* We leak openhandle and request here on error, but not much to be
158 * done in OOM case since app won't retry close on error either.
161 GOTO(out, rc = -ENOMEM);
163 ll_prepare_close(inode, op_data, och);
165 case MDS_CLOSE_LAYOUT_MERGE:
166 /* merge blocks from the victim inode */
167 op_data->op_attr_blocks += ((struct inode *)data)->i_blocks;
168 op_data->op_attr.ia_valid |= ATTR_SIZE;
169 op_data->op_xvalid |= OP_XVALID_BLOCKS;
171 case MDS_CLOSE_LAYOUT_SPLIT: {
172 struct split_param *sp = data;
174 LASSERT(data != NULL);
175 op_data->op_bias |= bias;
176 op_data->op_data_version = 0;
177 op_data->op_lease_handle = och->och_lease_handle;
178 if (bias == MDS_CLOSE_LAYOUT_SPLIT) {
179 op_data->op_fid2 = *ll_inode2fid(sp->sp_inode);
180 op_data->op_mirror_id = sp->sp_mirror_id;
181 } else { /* MDS_CLOSE_LAYOUT_MERGE */
182 op_data->op_fid2 = *ll_inode2fid(data);
186 case MDS_CLOSE_LAYOUT_SWAP: {
187 struct swap_layouts_param *slp = data;
189 LASSERT(data != NULL);
190 op_data->op_bias |= (bias | MDS_CLOSE_LAYOUT_SWAP_HSM);
191 op_data->op_lease_handle = och->och_lease_handle;
192 op_data->op_fid2 = *ll_inode2fid(slp->slp_inode);
193 op_data->op_data_version = slp->slp_dv1;
194 op_data->op_data_version2 = slp->slp_dv2;
198 case MDS_CLOSE_RESYNC_DONE: {
199 struct ll_ioc_lease *ioc = data;
201 LASSERT(data != NULL);
202 op_data->op_attr_blocks +=
203 ioc->lil_count * op_data->op_attr_blocks;
204 op_data->op_attr.ia_valid |= ATTR_SIZE;
205 op_data->op_xvalid |= OP_XVALID_BLOCKS;
206 op_data->op_bias |= MDS_CLOSE_RESYNC_DONE;
208 op_data->op_lease_handle = och->och_lease_handle;
209 op_data->op_data = &ioc->lil_ids[0];
210 op_data->op_data_size =
211 ioc->lil_count * sizeof(ioc->lil_ids[0]);
215 case MDS_PCC_ATTACH: {
216 struct pcc_param *param = data;
218 LASSERT(data != NULL);
219 op_data->op_bias |= MDS_HSM_RELEASE | MDS_PCC_ATTACH;
220 op_data->op_archive_id = param->pa_archive_id;
221 op_data->op_data_version = param->pa_data_version;
222 op_data->op_lease_handle = och->och_lease_handle;
226 case MDS_HSM_RELEASE:
227 LASSERT(data != NULL);
228 op_data->op_bias |= MDS_HSM_RELEASE;
229 op_data->op_data_version = *(__u64 *)data;
230 op_data->op_lease_handle = och->och_lease_handle;
231 op_data->op_attr.ia_valid |= ATTR_SIZE;
232 op_data->op_xvalid |= OP_XVALID_BLOCKS;
236 LASSERT(data == NULL);
240 if (!(op_data->op_attr.ia_valid & ATTR_SIZE))
241 op_data->op_xvalid |= OP_XVALID_LAZYSIZE;
242 if (!(op_data->op_xvalid & OP_XVALID_BLOCKS))
243 op_data->op_xvalid |= OP_XVALID_LAZYBLOCKS;
245 rc = md_close(md_exp, op_data, och->och_mod, &req);
246 if (rc != 0 && rc != -EINTR)
247 CERROR("%s: inode "DFID" mdc close failed: rc = %d\n",
248 md_exp->exp_obd->obd_name, PFID(&lli->lli_fid), rc);
250 if (rc == 0 && op_data->op_bias & bias) {
251 struct mdt_body *body;
253 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
254 if (!(body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED))
257 if (bias & MDS_PCC_ATTACH) {
258 struct pcc_param *param = data;
260 param->pa_layout_gen = body->mbo_layout_gen;
264 ll_finish_md_op_data(op_data);
268 md_clear_open_replay_data(md_exp, och);
269 och->och_open_handle.cookie = DEAD_HANDLE_MAGIC;
272 ptlrpc_req_put(req); /* This is close request */
277 * ll_md_real_close() - called when file is closed. Called from ll_file_release
279 * @inode: inode which is getting closed
280 * @fd_open_mode: MDS flags passed from client
286 int ll_md_real_close(struct inode *inode, enum mds_open_flags fd_open_mode)
288 struct ll_inode_info *lli = ll_i2info(inode);
289 struct obd_client_handle **och_p;
290 struct obd_client_handle *och;
295 if (fd_open_mode & MDS_FMODE_WRITE) {
296 och_p = &lli->lli_mds_write_och;
297 och_usecount = &lli->lli_open_fd_write_count;
298 } else if (fd_open_mode & MDS_FMODE_EXEC) {
299 och_p = &lli->lli_mds_exec_och;
300 och_usecount = &lli->lli_open_fd_exec_count;
302 LASSERT(fd_open_mode & MDS_FMODE_READ);
303 och_p = &lli->lli_mds_read_och;
304 och_usecount = &lli->lli_open_fd_read_count;
307 mutex_lock(&lli->lli_och_mutex);
308 if (*och_usecount > 0) {
309 /* There are still users of this handle, so skip freeing it */
310 mutex_unlock(&lli->lli_och_mutex);
316 mutex_unlock(&lli->lli_och_mutex);
319 /* There might be race and this handle may already be closed. */
320 rc = ll_close_inode_openhandle(inode, och, 0, NULL);
326 static int ll_md_close(struct inode *inode, struct file *file)
328 union ldlm_policy_data policy = {
329 .l_inodebits = { MDS_INODELOCK_OPEN },
331 __u64 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
332 struct ll_file_data *lfd = file->private_data;
333 struct ll_inode_info *lli = ll_i2info(inode);
334 struct lustre_handle lockh;
335 enum ldlm_mode lockmode;
339 /* clear group lock, if present */
340 if (unlikely(lfd->lfd_file_flags & LL_FILE_GROUP_LOCKED))
341 ll_put_grouplock(inode, file, lfd->fd_grouplock.lg_gid);
343 mutex_lock(&lli->lli_och_mutex);
344 if (lfd->fd_lease_och != NULL) {
346 struct obd_client_handle *lease_och;
348 lease_och = lfd->fd_lease_och;
349 lfd->fd_lease_och = NULL;
350 mutex_unlock(&lli->lli_och_mutex);
352 /* Usually the lease is not released when the
353 * application crashed, we need to release here.
355 rc = ll_lease_close(lease_och, inode, &lease_broken);
357 mutex_lock(&lli->lli_och_mutex);
359 CDEBUG_LIMIT(rc ? D_ERROR : D_INODE,
360 "Clean up lease "DFID" %d/%d\n",
361 PFID(&lli->lli_fid), rc, lease_broken);
364 if (lfd->fd_och != NULL) {
365 struct obd_client_handle *och;
369 mutex_unlock(&lli->lli_och_mutex);
371 rc = ll_close_inode_openhandle(inode, och, 0, NULL);
375 /* Let's see if we have good enough OPEN lock on the file and if we can
376 * skip talking to MDS
378 if (lfd->fd_open_mode & MDS_FMODE_WRITE) {
380 LASSERT(lli->lli_open_fd_write_count);
381 lli->lli_open_fd_write_count--;
382 } else if (lfd->fd_open_mode & MDS_FMODE_EXEC) {
384 LASSERT(lli->lli_open_fd_exec_count);
385 lli->lli_open_fd_exec_count--;
388 LASSERT(lli->lli_open_fd_read_count);
389 lli->lli_open_fd_read_count--;
391 mutex_unlock(&lli->lli_och_mutex);
393 /* LU-4398: do not cache write open lock if the file has exec bit */
394 if ((lockmode == LCK_CW && inode->i_mode & 0111) ||
395 !md_lock_match(ll_i2mdexp(inode), flags, ll_inode2fid(inode),
396 LDLM_IBITS, &policy, lockmode, 0, &lockh))
397 rc = ll_md_real_close(inode, lfd->fd_open_mode);
400 file->private_data = NULL;
401 ll_file_data_put(lfd);
406 /* While this returns an error code, fput() the caller does not, so we need
407 * to make every effort to clean up all of our state here. Also, applications
408 * rarely check close errors and even if an error is returned they will not
409 * re-try the close call.
411 int ll_file_release(struct inode *inode, struct file *file)
413 struct ll_file_data *lfd;
414 struct ll_sb_info *sbi = ll_i2sbi(inode);
415 struct ll_inode_info *lli = ll_i2info(inode);
416 ktime_t kstart = ktime_get();
420 CDEBUG(D_VFSTRACE|D_IOTRACE,
421 "START file "DNAME":"DFID"(%p), flags %o\n",
422 encode_fn_file(file), PFID(ll_inode2fid(file_inode(file))),
423 inode, file->f_flags);
425 lfd = file->private_data;
426 LASSERT(lfd != NULL);
428 /* The last ref on @file, maybe not the the owner pid of statahead,
429 * because parent and child process can share the same file handle.
431 if (S_ISDIR(inode->i_mode) &&
432 (lli->lli_opendir_key == lfd || lfd->fd_sai))
433 ll_deauthorize_statahead(inode, lfd);
435 if (is_root_inode(inode)) {
436 file->private_data = NULL;
437 ll_file_data_put(lfd);
441 pcc_file_release(inode, file);
443 if (!S_ISDIR(inode->i_mode)) {
444 if (lli->lli_clob != NULL)
445 lov_read_and_clear_async_rc(lli->lli_clob);
446 lli->lli_async_rc = 0;
449 lli->lli_close_fd_time = ktime_get();
451 rc = ll_md_close(inode, file);
453 if (CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_DUMP_LOG, cfs_fail_val))
454 libcfs_debug_dumplog();
457 if (!rc && !is_root_inode(inode))
458 ll_stats_ops_tally(sbi, LPROC_LL_RELEASE,
459 ktime_us_delta(ktime_get(), kstart));
461 "COMPLETED file "DNAME":"DFID"(%p), flags %o, rc = %d\n",
462 encode_fn_file(file), PFID(ll_inode2fid(file_inode(file))),
463 inode, file->f_flags, rc);
468 static inline int ll_dom_readpage(void *data, struct page *page)
470 /* since ll_dom_readpage is a page cache helper, it is safe to assume
471 * mapping and host pointers are set here
474 struct niobuf_local *lnb = data;
478 inode = page2inode(page);
480 kaddr = kmap_atomic(page);
481 memcpy(kaddr, lnb->lnb_data, lnb->lnb_len);
482 if (lnb->lnb_len < PAGE_SIZE)
483 memset(kaddr + lnb->lnb_len, 0,
484 PAGE_SIZE - lnb->lnb_len);
485 kunmap_atomic(kaddr);
487 if (inode && IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
488 if (!ll_has_encryption_key(inode)) {
489 CDEBUG(D_SEC, "no enc key for "DFID"\n",
490 PFID(ll_inode2fid(inode)));
493 unsigned int offs = 0;
495 while (offs < PAGE_SIZE) {
496 /* decrypt only if page is not empty */
497 if (memcmp(page_address(page) + offs,
498 page_address(ZERO_PAGE(0)),
499 LUSTRE_ENCRYPTION_UNIT_SIZE) == 0)
502 rc = llcrypt_decrypt_pagecache_blocks(page,
503 LUSTRE_ENCRYPTION_UNIT_SIZE,
508 offs += LUSTRE_ENCRYPTION_UNIT_SIZE;
513 flush_dcache_page(page);
514 SetPageUptodate(page);
521 #ifdef HAVE_READ_CACHE_PAGE_WANTS_FILE
522 static inline int ll_dom_read_folio(struct file *file, struct folio *folio0)
524 return ll_dom_readpage(file->private_data, folio_page(folio0, 0));
527 #define ll_dom_read_folio ll_dom_readpage
530 void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req)
534 struct ll_inode_info *lli = ll_i2info(inode);
535 struct cl_object *obj = lli->lli_clob;
536 struct address_space *mapping = inode->i_mapping;
538 struct niobuf_remote *rnb;
539 struct mdt_body *body;
541 unsigned long index, start;
542 struct niobuf_local lnb;
550 if (!req_capsule_field_present(&req->rq_pill, &RMF_NIOBUF_INLINE,
554 rnb = req_capsule_server_get(&req->rq_pill, &RMF_NIOBUF_INLINE);
555 if (rnb == NULL || rnb->rnb_len == 0)
558 /* LU-11595: Server may return whole file and that is OK always or
559 * it may return just file tail and its offset must be aligned with
560 * client PAGE_SIZE to be used on that client, if server's PAGE_SIZE is
561 * smaller then offset may be not aligned and that data is just ignored.
563 if (rnb->rnb_offset & ~PAGE_MASK)
566 /* Server returns whole file or just file tail if it fills in reply
567 * buffer, in both cases total size should be equal to the file size.
569 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
570 if (rnb->rnb_offset + rnb->rnb_len != body->mbo_dom_size &&
571 !(inode && IS_ENCRYPTED(inode))) {
572 CERROR("%s: server returns off/len %llu/%u but size %llu\n",
573 ll_i2sbi(inode)->ll_fsname, rnb->rnb_offset,
574 rnb->rnb_len, body->mbo_dom_size);
578 env = cl_env_get(&refcheck);
581 io = vvp_env_new_io(env);
583 rc = cl_io_init(env, io, CIT_MISC, obj);
587 CDEBUG(D_INFO, "Get data along with open at %llu len %i, size %llu\n",
588 rnb->rnb_offset, rnb->rnb_len, body->mbo_dom_size);
590 data = (char *)rnb + sizeof(*rnb);
592 lnb.lnb_file_offset = rnb->rnb_offset;
593 start = lnb.lnb_file_offset >> PAGE_SHIFT;
595 LASSERT((lnb.lnb_file_offset & ~PAGE_MASK) == 0);
596 lnb.lnb_page_offset = 0;
598 struct cl_page *page;
600 lnb.lnb_data = data + (index << PAGE_SHIFT);
601 lnb.lnb_len = rnb->rnb_len - (index << PAGE_SHIFT);
602 if (lnb.lnb_len > PAGE_SIZE)
603 lnb.lnb_len = PAGE_SIZE;
605 vmpage = ll_read_cache_page(mapping, index + start,
606 ll_dom_read_folio, &lnb);
607 if (IS_ERR(vmpage)) {
608 CWARN("%s: cannot fill page %lu for "DFID" with data: rc = %li\n",
609 ll_i2sbi(inode)->ll_fsname, index + start,
610 PFID(lu_object_fid(&obj->co_lu)),
615 if (vmpage->mapping == NULL) {
618 /* page was truncated */
621 /* attach VM page to CL page cache */
622 page = cl_page_find(env, obj, vmpage->index, vmpage,
625 ClearPageUptodate(vmpage);
630 SetPageUptodate(vmpage);
631 cl_page_put(env, page);
635 } while (rnb->rnb_len > (index << PAGE_SHIFT));
639 cl_env_put(env, &refcheck);
643 void ll_dir_finish_open(struct inode *inode, struct ptlrpc_request *req)
645 struct obd_export *exp = ll_i2mdexp(inode);
648 struct lu_dirpage *dp;
651 unsigned long offset;
658 if (!exp_connect_open_readdir(exp))
661 if (!req_capsule_field_present(&req->rq_pill, &RMF_NIOBUF_INLINE,
665 data = req_capsule_server_get(&req->rq_pill, &RMF_NIOBUF_INLINE);
669 npages = req_capsule_get_size(&req->rq_pill, &RMF_NIOBUF_INLINE,
671 if (npages < sizeof(*dp))
675 npages = DIV_ROUND_UP(npages, PAGE_SIZE);
676 is_hash64 = test_bit(LL_SBI_64BIT_HASH, ll_i2sbi(inode)->ll_flags);
678 for (i = 0; i < npages; i++) {
679 page = __page_cache_alloc(mapping_gfp_mask(inode->i_mapping));
684 SetPageUptodate(page);
686 dp = kmap_atomic(page);
687 memcpy(dp, data, PAGE_SIZE);
688 hash = le64_to_cpu(dp->ldp_hash_start);
691 offset = hash_x_index(hash, is_hash64);
693 prefetchw(&page->flags);
694 rc = add_to_page_cache_lru(page, inode->i_mapping, offset,
705 static int ll_intent_file_open(struct dentry *de, void *lmm, ssize_t lmmsize,
706 struct lookup_intent *itp)
708 struct ll_sb_info *sbi = ll_i2sbi(de->d_inode);
709 struct dentry *parent = dget_parent(de);
712 struct md_op_data *op_data;
713 struct ptlrpc_request *req = NULL;
717 LASSERT(parent != NULL);
718 LASSERT(itp->it_open_flags & MDS_OPEN_BY_FID);
720 /* if server supports open-by-fid, or file name is invalid, don't pack
721 * name in open request
723 if (CFS_FAIL_CHECK(OBD_FAIL_LLITE_OPEN_BY_NAME) ||
724 !(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_OPEN_BY_FID)) {
726 len = de->d_name.len;
727 name = kmalloc(len + 1, GFP_NOFS);
729 GOTO(out_put, rc = -ENOMEM);
732 spin_lock(&de->d_lock);
733 if (len != de->d_name.len) {
734 spin_unlock(&de->d_lock);
738 memcpy(name, de->d_name.name, len);
740 spin_unlock(&de->d_lock);
742 if (!lu_name_is_valid_2(name, len)) {
744 GOTO(out_put, rc = -ESTALE);
748 op_data = ll_prep_md_op_data(NULL, parent->d_inode, de->d_inode,
749 name, len, 0, LUSTRE_OPC_OPEN, NULL);
750 if (IS_ERR(op_data)) {
752 GOTO(out_put, rc = PTR_ERR(op_data));
754 op_data->op_data = lmm;
755 op_data->op_data_size = lmmsize;
757 if (!sbi->ll_dir_open_read && S_ISDIR(de->d_inode->i_mode))
758 op_data->op_cli_flags &= ~CLI_READ_ON_OPEN;
760 CFS_FAIL_TIMEOUT(OBD_FAIL_LLITE_OPEN_DELAY, cfs_fail_val);
762 rc = ll_intent_lock(sbi->ll_md_exp, op_data, itp, &req,
763 &ll_md_blocking_ast, 0, true);
765 ll_finish_md_op_data(op_data);
767 /* reason for keep own exit path - don`t flood log
768 * with messages with -ESTALE errors.
770 if (!it_disposition(itp, DISP_OPEN_OPEN) ||
771 it_open_error(DISP_OPEN_OPEN, itp))
773 ll_release_openhandle(de, itp);
777 if (it_disposition(itp, DISP_LOOKUP_NEG))
778 GOTO(out, rc = -ENOENT);
780 if (rc != 0 || it_open_error(DISP_OPEN_OPEN, itp)) {
781 rc = rc ? rc : it_open_error(DISP_OPEN_OPEN, itp);
782 CDEBUG(D_VFSTRACE, "lock enqueue: err: %d\n", rc);
786 rc = ll_prep_inode(&de->d_inode, &req->rq_pill, NULL, itp);
788 if (!rc && itp->it_lock_mode) {
789 enum mds_ibits_locks bits = MDS_INODELOCK_NONE;
791 /* if DoM bit returned along with LAYOUT bit then there
792 * can be read-on-open data returned.
794 ll_set_lock_data(sbi->ll_md_exp, de->d_inode, itp, &bits);
795 if (bits & MDS_INODELOCK_DOM && bits & MDS_INODELOCK_LAYOUT)
796 ll_dom_finish_open(de->d_inode, req);
797 if (bits & MDS_INODELOCK_UPDATE && S_ISDIR(de->d_inode->i_mode))
798 ll_dir_finish_open(de->d_inode, req);
800 /* open may not fetch LOOKUP lock, update dir depth and default LMV
803 if (!rc && !d_lustre_invalid(de) && S_ISDIR(de->d_inode->i_mode))
804 ll_update_dir_depth_dmv(parent->d_inode, de);
808 ll_intent_drop_lock(itp);
810 /* We did open by fid, but by the time we got to the server, the object
811 * disappeared. This is possible if the object was unlinked, but it's
812 * also possible if the object was unlinked by a rename. In the case
813 * of an object renamed over our existing one, we can't fail this open.
814 * O_CREAT also goes through this path if we had an existing dentry,
815 * and it's obviously wrong to return ENOENT for O_CREAT.
817 * Instead let's return -ESTALE, and the VFS will retry the open with
818 * LOOKUP_REVAL, which we catch in ll_revalidate_dentry and fail to
819 * revalidate, causing a lookup. This causes extra lookups in the case
820 * where we had a dentry in cache but the file is being unlinked and we
821 * lose the race with unlink, but this should be very rare.
830 static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it,
831 struct obd_client_handle *och)
833 struct mdt_body *body;
835 body = req_capsule_server_get(&it->it_request->rq_pill, &RMF_MDT_BODY);
836 och->och_open_handle = body->mbo_open_handle;
837 och->och_fid = body->mbo_fid1;
838 och->och_lease_handle.cookie = it->it_lock_handle;
839 och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
840 och->och_flags = it->it_open_flags;
842 return md_set_open_replay_data(md_exp, och, it);
846 * ll_kernel_to_mds_open_flags() - Convert kernel flags to MDS flags (Access
849 * @kernel_open_flags: kernel input (struct file.f_flags)
854 enum mds_open_flags ll_kernel_to_mds_open_flags(unsigned int kernel_open_flags)
856 enum mds_open_flags mds_open_flags = MDS_FMODE_CLOSED;
858 if (kernel_open_flags & FMODE_READ)
859 mds_open_flags |= MDS_FMODE_READ;
861 if (kernel_open_flags & FMODE_WRITE)
862 mds_open_flags |= MDS_FMODE_WRITE;
864 if (kernel_open_flags & O_CREAT)
865 mds_open_flags |= MDS_OPEN_CREAT;
867 if (kernel_open_flags & O_EXCL)
868 mds_open_flags |= MDS_OPEN_EXCL;
870 if (kernel_open_flags & O_TRUNC)
871 mds_open_flags |= MDS_OPEN_TRUNC;
873 if (kernel_open_flags & O_APPEND)
874 mds_open_flags |= MDS_OPEN_APPEND;
876 if (kernel_open_flags & O_SYNC)
877 mds_open_flags |= MDS_OPEN_SYNC;
879 if (kernel_open_flags & O_DIRECTORY)
880 mds_open_flags |= MDS_OPEN_DIRECTORY;
882 /* FMODE_EXEC is only valid with fmode_t, use __FMODE_EXEC instead
883 * which indicates file is opened for execution with sys_execve
885 if (kernel_open_flags & __FMODE_EXEC)
886 mds_open_flags |= MDS_FMODE_EXECUTE;
888 if (ll_lov_delay_create_is_set(kernel_open_flags))
889 mds_open_flags |= O_LOV_DELAY_CREATE;
891 if (kernel_open_flags & O_LARGEFILE)
892 mds_open_flags |= MDS_OPEN_LARGEFILE;
894 if (kernel_open_flags & O_NONBLOCK)
895 mds_open_flags |= MDS_OPEN_NORESTORE;
897 if (kernel_open_flags & O_NOCTTY)
898 mds_open_flags |= MDS_OPEN_NOCTTY;
900 if (kernel_open_flags & O_NONBLOCK)
901 mds_open_flags |= MDS_OPEN_NONBLOCK;
903 if (kernel_open_flags & O_NOFOLLOW)
904 mds_open_flags |= MDS_OPEN_NOFOLLOW;
906 if (kernel_open_flags & FASYNC)
907 mds_open_flags |= MDS_OPEN_FASYNC;
909 return mds_open_flags;
912 static int ll_local_open(struct file *file, struct lookup_intent *it,
913 struct ll_file_data *lfd,
914 struct obd_client_handle *och)
916 struct inode *inode = file_inode(file);
919 LASSERT(!file->private_data);
921 LASSERT(lfd != NULL);
926 rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
931 file->private_data = lfd;
932 ll_readahead_init(inode, &lfd->fd_ras);
933 lfd->fd_open_mode = it->it_open_flags & (MDS_FMODE_READ |
940 void ll_track_file_opens(struct inode *inode)
942 struct ll_inode_info *lli = ll_i2info(inode);
943 struct ll_sb_info *sbi = ll_i2sbi(inode);
945 /* do not skew results with delays from never-opened inodes */
946 if (ktime_to_ns(lli->lli_close_fd_time))
947 ll_stats_ops_tally(sbi, LPROC_LL_INODE_OPCLTM,
948 ktime_us_delta(ktime_get(), lli->lli_close_fd_time));
950 if (ktime_after(ktime_get(),
951 ktime_add_ms(lli->lli_close_fd_time,
952 sbi->ll_oc_max_ms))) {
953 lli->lli_open_fd_count = 1;
954 lli->lli_close_fd_time = ns_to_ktime(0);
956 lli->lli_open_fd_count++;
959 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_OCOUNT,
960 lli->lli_open_fd_count);
964 * ll_file_open() - setup and handle file open
965 * @inode: inode of the file being opened
966 * @file: Open file pointer in the kernel
968 * Open a file, and (for the very first open) create objects on the OSTs at
969 * this time. If opened with O_LOV_DELAY_CREATE, then we don't do the object
970 * creation or open until ll_lov_setstripe() ioctl is called.
972 * If we already have the stripe MD locally then we don't request it in
973 * md_open(), by passing a lmm_size = 0.
975 * It is up to the application to ensure no other processes open this file
976 * in the O_LOV_DELAY_CREATE case, or the default striping pattern will be
977 * used. We might be able to avoid races of that sort by getting lli_open_sem
978 * before returning in the O_LOV_DELAY_CREATE case and dropping it here
979 * or in ll_file_release(), but I'm not sure that is desirable/necessary.
985 int ll_file_open(struct inode *inode, struct file *file)
987 struct ll_inode_info *lli = ll_i2info(inode);
988 struct lookup_intent *it, oit = { .it_op = IT_OPEN,
989 .it_open_flags = file->f_flags };
990 struct obd_client_handle **och_p = NULL;
991 __u64 *och_usecount = NULL;
992 struct ll_file_data *lfd;
993 ktime_t kstart = ktime_get();
997 CDEBUG(D_VFSTRACE|D_IOTRACE,
998 "START file "DNAME":"DFID"(%p), flags %o\n",
999 encode_fn_file(file), PFID(ll_inode2fid(file_inode(file))),
1000 inode, file->f_flags);
1002 it = file->private_data; /* XXX: compat macro */
1003 file->private_data = NULL; /* prevent ll_local_open assertion */
1005 if (S_ISREG(inode->i_mode)) {
1006 rc = ll_file_open_encrypt(inode, file);
1008 if (it && it->it_disposition)
1009 ll_release_openhandle(file_dentry(file), it);
1010 GOTO(out_nofiledata, rc);
1014 lfd = ll_file_data_get();
1016 GOTO(out_nofiledata, rc = -ENOMEM);
1018 lfd->fd_file = file;
1019 if (S_ISDIR(inode->i_mode))
1020 ll_authorize_statahead(inode, lfd);
1022 ll_track_file_opens(inode);
1023 if (is_root_inode(inode)) {
1024 file->private_data = lfd;
1028 if (!it || !it->it_disposition) {
1029 unsigned int kernel_flags = file->f_flags;
1031 /* Convert f_flags into access mode. We cannot use file->f_mode,
1032 * because everything but O_ACCMODE mask was stripped from there
1034 if ((oit.it_open_flags + MDS_FMODE_READ) & O_ACCMODE)
1037 oit.it_open_flags = ll_kernel_to_mds_open_flags(kernel_flags);
1039 if (file->f_flags & O_TRUNC)
1040 oit.it_open_flags |= MDS_FMODE_WRITE;
1042 /* kernel only call f_op->open in dentry_open. filp_open calls
1043 * dentry_open after call to open_namei that checks permissions.
1044 * Only nfsd_open call dentry_open directly without checking
1045 * permissions and because of that this code below is safe.
1047 if (oit.it_open_flags & (MDS_FMODE_WRITE | MDS_FMODE_READ))
1048 oit.it_open_flags |= MDS_OPEN_OWNEROVERRIDE;
1050 /* We do not want O_EXCL here, presumably we opened the file
1051 * already? XXX - NFS implications?
1053 oit.it_open_flags &= ~MDS_OPEN_EXCL;
1055 /* bug20584, if "it_open_flags" contains O_CREAT, file will be
1056 * created if necessary, then "IT_CREAT" should be set to keep
1057 * consistent with it
1059 if (oit.it_open_flags & MDS_OPEN_CREAT)
1060 oit.it_op |= IT_CREAT;
1066 /* Let's see if we have file open on MDS already. */
1067 if (it->it_open_flags & MDS_FMODE_WRITE) {
1068 och_p = &lli->lli_mds_write_och;
1069 och_usecount = &lli->lli_open_fd_write_count;
1070 } else if (it->it_open_flags & MDS_FMODE_EXEC) {
1071 och_p = &lli->lli_mds_exec_och;
1072 och_usecount = &lli->lli_open_fd_exec_count;
1074 och_p = &lli->lli_mds_read_och;
1075 och_usecount = &lli->lli_open_fd_read_count;
1078 mutex_lock(&lli->lli_och_mutex);
1079 if (*och_p) { /* Open handle is present */
1080 if (it_disposition(it, DISP_OPEN_OPEN)) {
1081 /* Well, there's extra open request that we do not need,
1082 * let's close it somehow. This will decref request.
1084 rc = it_open_error(DISP_OPEN_OPEN, it);
1086 mutex_unlock(&lli->lli_och_mutex);
1087 GOTO(out_openerr, rc);
1090 ll_release_openhandle(file_dentry(file), it);
1094 rc = ll_local_open(file, it, lfd, NULL);
1097 mutex_unlock(&lli->lli_och_mutex);
1098 GOTO(out_openerr, rc);
1101 LASSERT(*och_usecount == 0);
1102 if (!it->it_disposition) {
1103 struct dentry *dentry = file_dentry(file);
1104 struct ll_sb_info *sbi = ll_i2sbi(inode);
1105 int open_threshold = sbi->ll_oc_thrsh_count;
1107 /* We cannot just request lock handle now, new ELC code
1108 * means that one of other OPEN locks for this file
1109 * could be cancelled, and since blocking ast handler
1110 * would attempt to grab och_mutex as well, that would
1111 * result in a deadlock
1113 mutex_unlock(&lli->lli_och_mutex);
1115 * Normally called under two situations:
1116 * 1. fhandle / NFS export.
1117 * 2. A race/condition on MDS resulting in no open
1118 * handle to be returned from LOOKUP|OPEN request,
1119 * for example if the target entry was a symlink.
1121 * For NFSv3 we need to always cache the open lock
1122 * for pre 5.5 Linux kernels.
1124 * After reaching number of opens of this inode
1125 * we always ask for an open lock on it to handle
1126 * bad userspace actors that open and close files
1127 * in a loop for absolutely no good reason
1129 /* fhandle / NFS path. */
1130 if (lli->lli_open_thrsh_count != UINT_MAX)
1131 open_threshold = lli->lli_open_thrsh_count;
1133 if (filename_is_volatile(dentry->d_name.name,
1136 /* There really is nothing here, but this
1137 * make this more readable I think.
1138 * We do not want openlock for volatile
1139 * files under any circumstances
1141 } else if (open_threshold > 0) {
1142 /* Take MDS_OPEN_LOCK with many opens */
1143 if (lli->lli_open_fd_count >= open_threshold)
1144 it->it_open_flags |= MDS_OPEN_LOCK;
1146 /* If this is open after we just closed */
1147 else if (ktime_before(ktime_get(),
1148 ktime_add_ms(lli->lli_close_fd_time,
1149 sbi->ll_oc_thrsh_ms)))
1150 it->it_open_flags |= MDS_OPEN_LOCK;
1154 * Always specify MDS_OPEN_BY_FID because we don't want
1155 * to get file with different fid.
1157 it->it_open_flags |= MDS_OPEN_BY_FID;
1158 rc = ll_intent_file_open(dentry, NULL, 0, it);
1160 GOTO(out_openerr, rc);
1164 OBD_ALLOC(*och_p, sizeof(struct obd_client_handle));
1166 GOTO(out_och_free, rc = -ENOMEM);
1170 /* md_intent_lock() didn't get a request ref if there was an
1171 * open error, so don't do cleanup on the request here
1174 * XXX (green): Should not we bail out on any error here, not
1177 rc = it_open_error(DISP_OPEN_OPEN, it);
1179 GOTO(out_och_free, rc);
1181 LASSERTF(it_disposition(it, DISP_ENQ_OPEN_REF),
1182 "inode %px: disposition %x, status %d\n", inode,
1183 it_disposition(it, ~0), it->it_status);
1185 rc = ll_local_open(file, it, lfd, *och_p);
1187 GOTO(out_och_free, rc);
1190 mutex_unlock(&lli->lli_och_mutex);
1192 /* It is not from atomic_open(). */
1194 rc = pcc_file_open(inode, file);
1196 GOTO(out_och_free, rc);
1201 /* Must do this outside lli_och_mutex lock to prevent deadlock where
1202 * different kind of OPEN lock for this same inode gets cancelled by
1205 if (!S_ISREG(inode->i_mode))
1206 GOTO(out_och_free, rc);
1207 cl_lov_delay_create_clear(&file->f_flags);
1208 GOTO(out_och_free, rc);
1212 if (och_p && *och_p) {
1213 OBD_FREE(*och_p, sizeof(struct obd_client_handle));
1214 *och_p = NULL; /* OBD_FREE writes some magic there */
1217 mutex_unlock(&lli->lli_och_mutex);
1220 if (lli->lli_opendir_key == lfd)
1221 ll_deauthorize_statahead(inode, lfd);
1224 ll_file_data_put(lfd);
1226 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN,
1227 ktime_us_delta(ktime_get(), kstart));
1231 if (it && it_disposition(it, DISP_ENQ_OPEN_REF)) {
1232 ptlrpc_req_put(it->it_request);
1233 it_clear_disposition(it, DISP_ENQ_OPEN_REF);
1237 "COMPLETED file "DNAME":"DFID"(%p), flags %o, rc = %d\n",
1238 encode_fn_file(file), PFID(ll_inode2fid(file_inode(file))),
1239 inode, file->f_flags, rc);
1244 static int ll_md_blocking_lease_ast(struct ldlm_lock *lock,
1245 struct ldlm_lock_desc *desc, void *data, int flag)
1248 struct lustre_handle lockh;
1252 case LDLM_CB_BLOCKING:
1253 ldlm_lock2handle(lock, &lockh);
1254 rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
1256 CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
1260 case LDLM_CB_CANCELING:
1268 * When setting a lease on a file, we take ownership of the lli_mds_*_och
1269 * and save it as fd->fd_och so as to force client to reopen the file even
1270 * if it has an open lock in cache already.
1272 static int ll_lease_och_acquire(struct inode *inode, struct file *file,
1273 struct lustre_handle *old_open_handle)
1275 struct ll_inode_info *lli = ll_i2info(inode);
1276 struct ll_file_data *lfd = file->private_data;
1277 struct obd_client_handle **och_p;
1278 __u64 *och_usecount;
1282 /* Get the openhandle of the file */
1283 mutex_lock(&lli->lli_och_mutex);
1284 if (lfd->fd_lease_och != NULL)
1285 GOTO(out_unlock, rc = -EBUSY);
1287 if (lfd->fd_och == NULL) {
1288 if (file->f_mode & FMODE_WRITE) {
1289 LASSERT(lli->lli_mds_write_och != NULL);
1290 och_p = &lli->lli_mds_write_och;
1291 och_usecount = &lli->lli_open_fd_write_count;
1293 LASSERT(lli->lli_mds_read_och != NULL);
1294 och_p = &lli->lli_mds_read_och;
1295 och_usecount = &lli->lli_open_fd_read_count;
1298 if (*och_usecount > 1)
1299 GOTO(out_unlock, rc = -EBUSY);
1301 lfd->fd_och = *och_p;
1306 *old_open_handle = lfd->fd_och->och_open_handle;
1310 mutex_unlock(&lli->lli_och_mutex);
1314 /* Release ownership on lli_mds_*_och when putting back a file lease. */
1315 static int ll_lease_och_release(struct inode *inode, struct file *file)
1317 struct ll_inode_info *lli = ll_i2info(inode);
1318 struct ll_file_data *lfd = file->private_data;
1319 struct obd_client_handle **och_p;
1320 struct obd_client_handle *old_och = NULL;
1321 __u64 *och_usecount;
1325 mutex_lock(&lli->lli_och_mutex);
1326 if (file->f_mode & FMODE_WRITE) {
1327 och_p = &lli->lli_mds_write_och;
1328 och_usecount = &lli->lli_open_fd_write_count;
1330 och_p = &lli->lli_mds_read_och;
1331 och_usecount = &lli->lli_open_fd_read_count;
1334 /* The file may have been open by another process (broken lease) so
1335 * *och_p is not NULL. In this case we should simply increase usecount
1338 if (*och_p != NULL) {
1339 old_och = lfd->fd_och;
1342 *och_p = lfd->fd_och;
1346 mutex_unlock(&lli->lli_och_mutex);
1348 if (old_och != NULL)
1349 rc = ll_close_inode_openhandle(inode, old_och, 0, NULL);
1355 * ll_lease_open() - Acquire a lease(block other open() call on this file)
1356 * and open the file.
1358 * @inode: mount point
1359 * @file: file to open
1360 * @fmode: Kernel mode open flag pass to open() (permissions)
1361 * @open_flags: MDS flags passed from client
1364 * * populate obd_client_handle object on success
1366 static struct obd_client_handle *
1367 ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
1368 enum mds_open_flags open_flags)
1370 struct lookup_intent it = { .it_op = IT_OPEN };
1371 struct ll_sb_info *sbi = ll_i2sbi(inode);
1372 struct md_op_data *op_data;
1373 struct ptlrpc_request *req = NULL;
1374 struct lustre_handle old_open_handle = { 0 };
1375 struct obd_client_handle *och = NULL;
1380 if (fmode != FMODE_WRITE && fmode != FMODE_READ)
1381 RETURN(ERR_PTR(-EINVAL));
1384 if (!(fmode & file->f_mode) || (file->f_mode & FMODE_EXEC))
1385 RETURN(ERR_PTR(-EPERM));
1387 rc = ll_lease_och_acquire(inode, file, &old_open_handle);
1389 RETURN(ERR_PTR(rc));
1394 RETURN(ERR_PTR(-ENOMEM));
1396 op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0,
1397 LUSTRE_OPC_ANY, NULL);
1398 if (IS_ERR(op_data))
1399 GOTO(out, rc = PTR_ERR(op_data));
1401 /* To tell the MDT this openhandle is from the same owner */
1402 op_data->op_open_handle = old_open_handle;
1404 it.it_open_flags = fmode | open_flags;
1405 it.it_open_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID | MDS_OPEN_LEASE;
1406 rc = ll_intent_lock(sbi->ll_md_exp, op_data, &it, &req,
1407 &ll_md_blocking_lease_ast,
1408 /* LDLM_FL_NO_LRU: To not put the lease lock into LRU list, otherwise
1409 * it can be cancelled which may mislead applications that the lease is
1411 * LDLM_FL_EXCL: Set this flag so that it won't be matched by normal
1412 * open in ll_md_blocking_ast(). Otherwise as ll_md_blocking_lease_ast
1413 * doesn't deal with openhandle, so normal openhandle will be leaked.
1415 LDLM_FL_NO_LRU | LDLM_FL_EXCL,
1417 ll_finish_md_op_data(op_data);
1418 ptlrpc_req_put(req);
1420 GOTO(out_release_it, rc);
1422 if (it_disposition(&it, DISP_LOOKUP_NEG))
1423 GOTO(out_release_it, rc = -ENOENT);
1425 rc = it_open_error(DISP_OPEN_OPEN, &it);
1427 GOTO(out_release_it, rc);
1429 LASSERT(it_disposition(&it, DISP_ENQ_OPEN_REF));
1430 rc = ll_och_fill(sbi->ll_md_exp, &it, och);
1432 GOTO(out_release_it, rc);
1434 if (!it_disposition(&it, DISP_OPEN_LEASE)) /* old server? */
1435 GOTO(out_close, rc = -EOPNOTSUPP);
1437 /* already get lease, handle lease lock */
1438 ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
1439 if (!it.it_lock_mode ||
1440 !(it.it_lock_bits & MDS_INODELOCK_OPEN)) {
1441 /* open lock must return for lease */
1443 CERROR("%s: "DFID" lease granted but no open lock, %d/%lu: rc = %d\n",
1444 sbi->ll_fsname, PFID(ll_inode2fid(inode)),
1445 it.it_lock_mode, it.it_lock_bits, rc);
1446 GOTO(out_close, rc);
1449 ll_intent_release(&it);
1453 /* Cancel open lock */
1454 if (it.it_lock_mode != 0) {
1455 ldlm_lock_decref_and_cancel(&och->och_lease_handle,
1457 it.it_lock_mode = 0;
1458 och->och_lease_handle.cookie = 0ULL;
1460 rc2 = ll_close_inode_openhandle(inode, och, 0, NULL);
1462 CERROR("%s: error closing file "DFID": %d\n",
1463 sbi->ll_fsname, PFID(&ll_i2info(inode)->lli_fid), rc2);
1464 och = NULL; /* och has been freed in ll_close_inode_openhandle() */
1466 ll_intent_release(&it);
1469 RETURN(ERR_PTR(rc));
1473 * ll_check_swap_layouts_validity() - Check whether a layout swap can be done
1474 * between two inodes.
1475 * @inode1: First inode to check
1476 * @inode2: Second inode to check
1479 * * %0 on success, layout swap can be performed between both inodes
1480 * * %negative error code if requirements are not met
1482 static int ll_check_swap_layouts_validity(struct inode *inode1,
1483 struct inode *inode2)
1485 if (!S_ISREG(inode1->i_mode) || !S_ISREG(inode2->i_mode))
1488 if (inode_permission(&nop_mnt_idmap, inode1, MAY_WRITE) ||
1489 inode_permission(&nop_mnt_idmap, inode2, MAY_WRITE))
1492 if (inode1->i_sb != inode2->i_sb)
1498 static int ll_swap_layouts_close(struct obd_client_handle *och,
1499 struct inode *inode, struct inode *inode2,
1500 struct lustre_swap_layouts *lsl)
1502 const struct lu_fid *fid1 = ll_inode2fid(inode);
1503 struct swap_layouts_param slp;
1504 const struct lu_fid *fid2;
1508 CDEBUG(D_INODE, "%s: biased close of file "DFID"\n",
1509 ll_i2sbi(inode)->ll_fsname, PFID(fid1));
1511 rc = ll_check_swap_layouts_validity(inode, inode2);
1513 GOTO(out_free_och, rc);
1515 /* We now know that inode2 is a lustre inode */
1516 fid2 = ll_inode2fid(inode2);
1518 rc = lu_fid_cmp(fid1, fid2);
1520 GOTO(out_free_och, rc = -EINVAL);
1522 /* Close the file and {swap,merge} layouts between inode & inode2.
1523 * NB: local lease handle is released in mdc_close_intent_pack()
1524 * because we still need it to pack l_remote_handle to MDT.
1526 slp.slp_inode = inode2;
1527 slp.slp_dv1 = lsl->sl_dv1;
1528 slp.slp_dv2 = lsl->sl_dv2;
1529 rc = ll_close_inode_openhandle(inode, och, MDS_CLOSE_LAYOUT_SWAP, &slp);
1531 och = NULL; /* freed in ll_close_inode_openhandle() */
1540 * Release lease and close the file.
1541 * It will check if the lease has ever broken.
1543 static int ll_lease_close_intent(struct obd_client_handle *och,
1544 struct inode *inode,
1545 bool *lease_broken, enum mds_op_bias bias,
1548 struct ldlm_lock *lock;
1549 bool cancelled = true;
1553 lock = ldlm_handle2lock(&och->och_lease_handle);
1555 lock_res_and_lock(lock);
1556 cancelled = ldlm_is_cancel(lock);
1557 unlock_res_and_lock(lock);
1558 ldlm_lock_put(lock);
1561 CDEBUG(D_INODE, "lease for "DFID" broken? %d, bias: %x\n",
1562 PFID(&ll_i2info(inode)->lli_fid), cancelled, bias);
1564 if (lease_broken != NULL)
1565 *lease_broken = cancelled;
1567 if (!cancelled && !bias)
1568 ldlm_cli_cancel(&och->och_lease_handle, 0);
1570 if (cancelled) { /* no need to excute intent */
1575 rc = ll_close_inode_openhandle(inode, och, bias, data);
1579 static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
1582 return ll_lease_close_intent(och, inode, lease_broken, 0, NULL);
1585 /* After lease is taken, send the RPC MDS_REINT_RESYNC to the MDT */
1586 static int ll_lease_file_resync(struct obd_client_handle *och,
1587 struct inode *inode, void __user *uarg)
1589 struct ll_sb_info *sbi = ll_i2sbi(inode);
1590 struct md_op_data *op_data;
1591 struct ll_ioc_lease_id ioc;
1592 __u64 data_version_unused;
1596 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1597 LUSTRE_OPC_ANY, NULL);
1598 if (IS_ERR(op_data))
1599 RETURN(PTR_ERR(op_data));
1601 if (copy_from_user(&ioc, uarg, sizeof(ioc)))
1604 /* before starting file resync, it's necessary to clean up page cache
1605 * in client memory, otherwise once the layout version is increased,
1606 * writing back cached data will be denied the OSTs.
1608 rc = ll_data_version(inode, &data_version_unused, LL_DV_WR_FLUSH);
1612 op_data->op_lease_handle = och->och_lease_handle;
1613 op_data->op_mirror_id = ioc.lil_mirror_id;
1614 rc = md_file_resync(sbi->ll_md_exp, op_data);
1620 ll_finish_md_op_data(op_data);
1624 static int ll_merge_attr_nolock(const struct lu_env *env, struct inode *inode)
1626 struct ll_inode_info *lli = ll_i2info(inode);
1627 struct cl_object *obj = lli->lli_clob;
1628 struct cl_attr *attr = vvp_env_new_attr(env);
1635 /* Merge timestamps the most recently obtained from MDS with
1636 * timestamps obtained from OSTs.
1638 * Do not overwrite atime of inode because it may be refreshed
1639 * by file_accessed() function. If the read was served by cache
1640 * data, there is no RPC to be sent so that atime may not be
1641 * transferred to OSTs at all. MDT only updates atime at close time
1642 * if it's at least 'mdd.*.atime_diff' older.
1643 * All in all, the atime in Lustre does not strictly comply with
1644 * POSIX. Solving this problem needs to send an RPC to MDT for each
1645 * read, this will hurt performance.
1647 if (test_and_clear_bit(LLIF_UPDATE_ATIME, &lli->lli_flags) ||
1648 inode_get_atime_sec(inode) < lli->lli_atime)
1649 inode_set_atime(inode, lli->lli_atime, 0);
1651 inode_set_mtime(inode, lli->lli_mtime, 0);
1652 inode_set_ctime(inode, lli->lli_ctime, 0);
1654 mtime = inode_get_mtime_sec(inode);
1655 atime = inode_get_atime_sec(inode);
1656 ctime = inode_get_ctime_sec(inode);
1658 cl_object_attr_lock(obj);
1659 if (CFS_FAIL_CHECK(OBD_FAIL_MDC_MERGE))
1662 rc = cl_object_attr_get(env, obj, attr);
1663 cl_object_attr_unlock(obj);
1666 GOTO(out, rc = (rc == -ENODATA ? 0 : rc));
1668 CFS_RACE(OBD_FAIL_LLITE_STAT_RACE2);
1670 * let a awaken stat thread a chance to get intermediate
1671 * attributes from inode
1673 CFS_FAIL_TIMEOUT(OBD_FAIL_LLITE_STAT_RACE2, 1);
1675 if (atime < attr->cat_atime)
1676 atime = attr->cat_atime;
1678 if (ctime < attr->cat_ctime)
1679 ctime = attr->cat_ctime;
1681 if (mtime < attr->cat_mtime)
1682 mtime = attr->cat_mtime;
1684 CDEBUG(D_VFSTRACE, DFID" updating i_size %llu i_blocks %llu\n",
1685 PFID(&lli->lli_fid), attr->cat_size, attr->cat_blocks);
1687 if (IS_ENCRYPTED(inode) && !ll_has_encryption_key(inode)) {
1688 /* Without the key, round up encrypted file size to next
1689 * LUSTRE_ENCRYPTION_UNIT_SIZE. Clear text size is put in
1690 * lli_lazysize for proper file size setting at close time.
1692 lli->lli_attr_valid |= OBD_MD_FLLAZYSIZE;
1693 lli->lli_lazysize = attr->cat_size;
1694 attr->cat_size = round_up(attr->cat_size,
1695 LUSTRE_ENCRYPTION_UNIT_SIZE);
1697 i_size_write(inode, attr->cat_size);
1698 inode->i_blocks = attr->cat_blocks;
1700 inode_set_mtime(inode, mtime, 0);
1701 inode_set_atime(inode, atime, 0);
1702 inode_set_ctime(inode, ctime, 0);
1709 int ll_merge_attr(const struct lu_env *env, struct inode *inode)
1713 ll_inode_size_lock(inode);
1714 rc = ll_merge_attr_nolock(env, inode);
1715 ll_inode_size_unlock(inode);
1720 /* Use to update size and blocks on inode for LSOM if there is no contention */
1721 int ll_merge_attr_try(const struct lu_env *env, struct inode *inode)
1725 if (ll_inode_size_trylock(inode)) {
1726 rc = ll_merge_attr_nolock(env, inode);
1727 ll_inode_size_unlock(inode);
1734 * Set designated mirror for I/O.
1736 * So far only read, write, and truncated can support to issue I/O to
1737 * designated mirror.
1739 void ll_io_set_mirror(struct cl_io *io, const struct file *file)
1741 struct ll_file_data *lfd = file->private_data;
1743 /* clear layout version for generic(non-resync) I/O in case it carries
1744 * stale layout version due to I/O restart
1746 io->ci_layout_version = 0;
1748 /* FLR: disable non-delay for designated mirror I/O because obviously
1749 * only one mirror is available
1751 if (lfd->fd_designated_mirror > 0) {
1753 io->ci_designated_mirror = lfd->fd_designated_mirror;
1754 io->ci_layout_version = lfd->fd_layout_version;
1757 CDEBUG(D_VFSTRACE, DNAME": desiginated mirror: %d\n",
1758 encode_fn_file(file), io->ci_designated_mirror);
1761 /* This is relatime_need_update() from Linux 5.17, which is not exported */
1762 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1763 struct timespec64 now)
1765 struct timespec64 ts;
1766 struct timespec64 atime;
1768 if (!(mnt->mnt_flags & MNT_RELATIME))
1770 /* Is mtime younger than atime? If yes, update atime: */
1771 atime = inode_get_atime(inode);
1772 ts = inode_get_mtime(inode);
1773 if (timespec64_compare(&ts, &atime) >= 0)
1775 /* Is ctime younger than atime? If yes, update atime: */
1776 ts = inode_get_ctime(inode);
1777 if (timespec64_compare(&ts, &atime) >= 0)
1780 /* Is the previous atime value older than a day? If yes, update atime */
1781 if ((long)(now.tv_sec - atime.tv_sec) >= 24*60*60)
1783 /* Good, we can skip the atime update: */
1787 /* Very similar to kernel function: !__atime_needs_update() */
1788 static bool file_is_noatime(const struct file *file)
1790 struct vfsmount *mnt = file->f_path.mnt;
1791 struct inode *inode = file_inode((struct file *)file);
1792 struct timespec64 now;
1794 if (file->f_flags & O_NOATIME)
1797 if (inode->i_flags & S_NOATIME)
1800 if (IS_NOATIME(inode))
1803 if (mnt->mnt_flags & (MNT_NOATIME | MNT_READONLY))
1806 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1809 if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
1812 now = current_time(inode);
1814 if (!relatime_need_update(mnt, inode, now))
1820 void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot,
1821 struct vvp_io_args *args)
1823 struct inode *inode = file_inode(file);
1824 struct ll_file_data *lfd = file->private_data;
1825 int flags = vvp_io_args_flags(file, args);
1827 io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK;
1828 io->ci_lock_no_expand = lfd->lfd_lock_no_expand;
1830 if (iot == CIT_WRITE) {
1831 io->u.ci_wr.wr_append = iocb_ki_flags_check(flags, APPEND);
1832 io->u.ci_wr.wr_sync = !!(iocb_ki_flags_check(flags, SYNC) ||
1833 iocb_ki_flags_check(flags, DSYNC) ||
1838 io->ci_iocb_nowait = iocb_ki_flags_check(flags, NOWAIT);
1841 io->ci_obj = ll_i2info(inode)->lli_clob;
1842 io->ci_lockreq = CILR_MAYBE;
1843 if (ll_file_nolock(file)) {
1844 io->ci_lockreq = CILR_NEVER;
1845 io->ci_no_srvlock = 1;
1846 } else if (iocb_ki_flags_check(flags, APPEND)) {
1847 io->ci_lockreq = CILR_MANDATORY;
1849 io->ci_noatime = file_is_noatime(file);
1850 io->ci_async_readahead = false;
1852 /* FLR: only use non-delay I/O for read as there is only one
1853 * avaliable mirror for write.
1855 io->ci_ndelay = !(iot == CIT_WRITE);
1856 /* unaligned DIO has compat issues with some older servers, but we find
1857 * out if there are such servers while setting up the IO, so it starts
1860 io->ci_allow_unaligned_dio = true;
1862 io->ci_hybrid_switched = args->via_hybrid_switched;
1864 ll_io_set_mirror(io, file);
1867 static void ll_heat_add(struct inode *inode, enum cl_io_type iot,
1870 struct ll_inode_info *lli = ll_i2info(inode);
1871 struct ll_sb_info *sbi = ll_i2sbi(inode);
1872 enum obd_heat_type sample_type;
1873 enum obd_heat_type iobyte_type;
1874 __u64 now = ktime_get_real_seconds();
1876 if (!ll_sbi_has_file_heat(sbi) ||
1877 lli->lli_heat_flags & LU_HEAT_FLAG_OFF)
1880 if (iot == CIT_READ) {
1881 sample_type = OBD_HEAT_READSAMPLE;
1882 iobyte_type = OBD_HEAT_READBYTE;
1883 } else if (iot == CIT_WRITE) {
1884 sample_type = OBD_HEAT_WRITESAMPLE;
1885 iobyte_type = OBD_HEAT_WRITEBYTE;
1890 spin_lock(&lli->lli_heat_lock);
1891 obd_heat_add(&lli->lli_heat_instances[sample_type], now, 1,
1892 sbi->ll_heat_decay_weight, sbi->ll_heat_period_second);
1893 obd_heat_add(&lli->lli_heat_instances[iobyte_type], now, count,
1894 sbi->ll_heat_decay_weight, sbi->ll_heat_period_second);
1895 spin_unlock(&lli->lli_heat_lock);
1899 ll_hybrid_bio_dio_switch_check(struct file *file, struct kiocb *iocb,
1900 enum cl_io_type iot, size_t count)
1902 /* we can only do this with IOCB_FLAGS, since we can't modify f_flags
1903 * because they're visible in userspace. so we check for IOCB_DIRECT
1906 struct inode *inode = file_inode(file);
1907 struct ll_sb_info *sbi = ll_i2sbi(inode);
1908 int op = LPROC_LL_HYBRID_NOSWITCH;
1909 int dio_switch = false;
1912 /* it doesn't make sense to switch unless it's READ or WRITE */
1913 if (iot != CIT_WRITE && iot != CIT_READ)
1919 /* Already using direct I/O, no need to switch. */
1920 if (iocb->ki_flags & IOCB_DIRECT)
1923 if (!test_bit(LL_SBI_HYBRID_IO, sbi->ll_flags))
1926 /* we only log hybrid IO stats if we hit the actual switching logic -
1927 * not if hybrid IO is disabled or the IO was never a candidate to
1930 if (iot == CIT_WRITE &&
1931 count >= sbi->ll_hybrid_io_write_threshold_bytes) {
1932 op = LPROC_LL_HYBRID_WRITESIZE_SWITCH;
1933 GOTO(out, dio_switch = true);
1936 if (iot == CIT_READ &&
1937 count >= sbi->ll_hybrid_io_read_threshold_bytes) {
1938 op = LPROC_LL_HYBRID_READSIZE_SWITCH;
1939 GOTO(out, dio_switch = true);
1943 ll_stats_ops_tally(sbi, op, 1);
1951 ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
1952 struct file *file, enum cl_io_type iot,
1953 loff_t *ppos, size_t bytes)
1955 struct inode *inode = file_inode(file);
1956 struct ll_file_data *lfd = file->private_data;
1957 struct ll_inode_info *lli = ll_i2info(inode);
1958 struct ll_sb_info *sbi = ll_i2sbi(inode);
1959 struct vvp_io *vio = vvp_env_io(env);
1960 struct cl_dio_aio *ci_dio_aio = NULL;
1961 struct range_lock range;
1963 int flags = vvp_io_args_flags(file, args);
1964 bool is_parallel_dio = false;
1965 bool range_locked = false;
1966 unsigned int retried = 0;
1967 bool dio_lock = false;
1968 bool is_aio = false;
1969 size_t max_io_bytes;
1978 CDEBUG(D_VFSTRACE, DNAME": %s ppos: %llu, bytes: %zu\n",
1979 encode_fn_file(file),
1980 iot == CIT_READ ? "read" : "write", *ppos, bytes);
1982 max_io_bytes = min_t(size_t, PTLRPC_MAX_BRW_PAGES * OBD_MAX_RIF_DEFAULT,
1983 sbi->ll_cache->ccc_lru_max >> 2) << PAGE_SHIFT;
1985 io = vvp_env_new_io(env);
1986 if (iocb_ki_flags_check(flags, DIRECT)) {
1987 if (iocb_ki_flags_check(flags, APPEND))
1989 if (!is_sync_kiocb(args->u.normal.via_iocb) &&
1990 /* hybrid IO is also potentially async */
1991 !args->via_hybrid_switched)
1994 /* the kernel does not support AIO on pipes, and parallel DIO
1995 * uses part of the AIO path, so we must not do parallel dio
1998 is_parallel_dio = !iov_iter_is_pipe(args->u.normal.via_iter) &&
2001 if (!ll_sbi_has_parallel_dio(sbi))
2002 is_parallel_dio = false;
2004 ci_dio_aio = cl_dio_aio_alloc(args->u.normal.via_iocb,
2005 ll_i2info(inode)->lli_clob, is_aio);
2007 GOTO(out, rc = -ENOMEM);
2012 * IO block size need be aware of cached page limit, otherwise
2013 * if we have small max_cached_mb but large block IO issued, io
2014 * could not be finished and blocked whole client.
2016 if (iocb_ki_flags_check(flags, DIRECT) || bytes < max_io_bytes) {
2020 per_bytes = max_io_bytes;
2023 io = vvp_env_new_io(env);
2024 ll_io_init(io, file, iot, args);
2025 io->ci_dio_aio = ci_dio_aio;
2026 io->ci_dio_lock = dio_lock;
2027 io->ci_ndelay_tried = retried;
2028 io->ci_parallel_dio = is_parallel_dio;
2030 if (cl_io_rw_init(env, io, iot, *ppos, per_bytes) == 0) {
2031 if (iocb_ki_flags_check(flags, APPEND))
2032 range_lock_init(&range, 0, LUSTRE_EOF);
2034 range_lock_init(&range, *ppos, *ppos + per_bytes - 1);
2036 vio->vui_fd = file->private_data;
2037 vio->vui_iter = args->u.normal.via_iter;
2038 vio->vui_iocb = args->u.normal.via_iocb;
2039 /* Direct IO reads must also take range lock,
2040 * or multiple reads will try to work on the same pages
2041 * See LU-6227 for details.
2043 if (((iot == CIT_WRITE) ||
2044 (iot == CIT_READ && iocb_ki_flags_check(flags, DIRECT))) &&
2045 !(vio->vui_fd->lfd_file_flags & LL_FILE_GROUP_LOCKED)) {
2046 CDEBUG(D_VFSTRACE, "Range lock "RL_FMT"\n",
2048 rc = range_lock(&lli->lli_write_tree, &range);
2052 range_locked = true;
2055 ll_cl_add(inode, env, io, LCC_RW);
2056 rc = cl_io_loop(env, io);
2057 ll_cl_remove(inode, env);
2059 /* cl_io_rw_init() handled IO */
2063 if (io->ci_dio_aio && !is_aio) {
2064 struct cl_sync_io *anchor = &io->ci_dio_aio->cda_sync;
2066 /* for dio, EIOCBQUEUED is an implementation detail,
2067 * and we don't return it to userspace
2069 if (rc == -EIOCBQUEUED)
2072 /* N/B: parallel DIO may be disabled during i/o submission;
2073 * if that occurs, I/O shifts to sync, so it's all resolved
2074 * before we get here, and this wait call completes
2077 rc2 = cl_sync_io_wait_recycle(env, anchor, 0, 0);
2083 CDEBUG(D_VFSTRACE, "Range unlock "RL_FMT"\n",
2085 range_unlock(&lli->lli_write_tree, &range);
2086 range_locked = false;
2089 if (io->ci_bytes > 0) {
2091 result += io->ci_bytes;
2092 *ppos = io->u.ci_wr.wr.crw_pos; /* for splice */
2096 bytes -= io->ci_bytes;
2098 /* prepare IO restart */
2100 args->u.normal.via_iter = vio->vui_iter;
2104 * Reexpand iov count because it was zero
2107 iov_iter_reexpand(vio->vui_iter, bytes);
2108 if (per_bytes == io->ci_bytes)
2109 io->ci_need_restart = 1;
2113 cl_io_fini(env, io);
2116 DNAME": %d io complete with rc: %d, result: %zd, restart: %d\n",
2117 encode_fn_file(file), iot, rc, result, io->ci_need_restart);
2119 if ((!rc || rc == -ENODATA || rc == -ENOLCK || rc == -EIOCBQUEUED) &&
2120 bytes > 0 && io->ci_need_restart && retries-- > 0) {
2122 DNAME": restart %s from ppos=%lld bytes=%zu retries=%u ret=%zd: rc = %d\n",
2123 encode_fn_file(file), iot == CIT_READ ? "read" : "write",
2124 *ppos, bytes, retries, result, rc);
2125 /* preserve the tried count for FLR */
2126 retried = io->ci_ndelay_tried;
2127 dio_lock = io->ci_dio_lock;
2131 /* update inode size */
2132 if (io->ci_type == CIT_WRITE)
2133 ll_merge_attr(env, inode);
2135 if (io->ci_dio_aio) {
2136 /* set the number of bytes successfully moved in the aio */
2138 io->ci_dio_aio->cda_bytes = result;
2140 * VFS will call aio_complete() if no -EIOCBQUEUED
2141 * is returned for AIO, so we can not call aio_complete()
2142 * in our end_io(). (cda_no_aio_complete is always set for
2145 * NB: Setting cda_no_aio_complete like this is safe because
2146 * the atomic_dec_and_lock in cl_sync_io_note has implicit
2147 * memory barriers, so this will be seen by whichever thread
2148 * completes the DIO/AIO, even if it's not this one.
2150 if (is_aio && rc != -EIOCBQUEUED)
2151 io->ci_dio_aio->cda_no_aio_complete = 1;
2152 /* if an aio enqueued successfully (-EIOCBQUEUED), then Lustre
2153 * will call aio_complete rather than the vfs, so we return 0
2154 * to tell the VFS we're handling it
2156 else if (is_aio) /* rc == -EIOCBQUEUED */
2159 * Drop the reference held by the llite layer on this top level
2162 * For DIO, this frees it here, since IO is complete, and for
2163 * AIO, we will call aio_complete() (and then free this top
2164 * level context) once all the outstanding chunks of this AIO
2167 cl_sync_io_note(env, &io->ci_dio_aio->cda_sync,
2168 rc == -EIOCBQUEUED ? 0 : rc);
2170 LASSERT(io->ci_dio_aio->cda_creator_free);
2171 cl_dio_aio_free(env, io->ci_dio_aio);
2172 io->ci_dio_aio = NULL;
2176 if (iot == CIT_READ) {
2178 ll_stats_ops_tally(ll_i2sbi(inode),
2179 LPROC_LL_READ_BYTES, result);
2180 if (args->via_hybrid_switched)
2181 ll_stats_ops_tally(ll_i2sbi(inode),
2182 LPROC_LL_HIO_READ, result);
2184 } else if (iot == CIT_WRITE) {
2186 ll_stats_ops_tally(ll_i2sbi(inode),
2187 LPROC_LL_WRITE_BYTES, result);
2188 if (args->via_hybrid_switched)
2189 ll_stats_ops_tally(ll_i2sbi(inode),
2190 LPROC_LL_HIO_WRITE, result);
2191 lfd->fd_write_failed = false;
2192 } else if (result == 0 && rc == 0) {
2195 lfd->fd_write_failed = true;
2197 lfd->fd_write_failed = false;
2198 } else if (rc != -ERESTARTSYS) {
2199 lfd->fd_write_failed = true;
2203 CDEBUG(D_VFSTRACE, "iot: %d, result: %zd\n", iot, result);
2205 ll_heat_add(inode, iot, result);
2207 RETURN(result > 0 ? result : rc);
2211 * ll_do_fast_read() - read data directly from the page cache
2212 * @iocb: kiocb from kernel
2213 * @iter: user space buffers where the data will be copied
2215 * The purpose of fast read is to overcome per I/O overhead and improve IOPS
2216 * especially for small I/O.
2218 * To serve a read request, CLIO has to create and initialize a cl_io and
2219 * then request DLM lock. This has turned out to have siginificant overhead
2220 * and affects the performance of small I/O dramatically.
2222 * It's not necessary to create a cl_io for each I/O. Under the help of read
2223 * ahead, most of the pages being read are already in memory cache and we can
2224 * read those pages directly because if the pages exist, the corresponding DLM
2225 * lock must exist so that page content must be valid.
2227 * In fast read implementation, the llite speculatively finds and reads pages
2228 * in memory cache. There are three scenarios for fast read:
2229 * - If the page exists and is uptodate, kernel VM will provide the data and
2230 * CLIO won't be intervened;
2231 * - If the page was brought into memory by read ahead, it will be exported
2232 * and read ahead parameters will be updated;
2233 * - Otherwise the page is not in memory, we can't do fast read. Therefore,
2234 * it will go back and invoke normal read, i.e., a cl_io will be created
2235 * and DLM lock will be requested.
2237 * POSIX compliance: posix standard states that read is intended to be atomic.
2238 * Lustre read implementation is in line with Linux kernel read implementation
2239 * and neither of them complies with POSIX standard in this matter. Fast read
2240 * doesn't make the situation worse on single node but it may interleave write
2241 * results from multiple nodes due to short read handling in ll_file_aio_read().
2243 * Returns number of bytes have been read, or error code if error occurred.
2246 ll_do_fast_read(struct kiocb *iocb, struct iov_iter *iter)
2248 struct ll_inode_info *lli = ll_i2info(file_inode(iocb->ki_filp));
2249 int flags = iocb_ki_flags_get(iocb->ki_filp, iocb);
2252 if (!ll_sbi_has_fast_read(ll_i2sbi(file_inode(iocb->ki_filp))))
2255 /* NB: we can't do direct IO for fast read because it will need a lock
2256 * to make IO engine happy.
2258 if (iocb_ki_flags_check(flags, DIRECT))
2261 if (ll_layout_version_get(lli) == CL_LAYOUT_GEN_NONE)
2264 result = generic_file_read_iter(iocb, iter);
2266 /* If the first page is not in cache, generic_file_aio_read() will be
2267 * returned with -ENODATA. Fall back to full read path.
2268 * See corresponding code in ll_readpage().
2270 * if we raced with page deletion, we might get EIO. Rather than add
2271 * locking to the fast path for this rare case, fall back to the full
2272 * read path. (See vvp_io_read_start() for rest of handling.
2274 if (result == -ENODATA || result == -EIO)
2278 ll_heat_add(file_inode(iocb->ki_filp), CIT_READ, result);
2279 ll_stats_ops_tally(ll_i2sbi(file_inode(iocb->ki_filp)),
2280 LPROC_LL_READ_BYTES, result);
2287 * file_read_confine_iter() - Confine read iter lest read beyond the EOF
2288 * @env: execution environment for this thread
2289 * @iocb: kernel iocb
2290 * @to: reader iov_iter
2295 * * >0 @iocb->ki_pos has passed the EOF
2297 static int file_read_confine_iter(struct lu_env *env, struct kiocb *iocb,
2298 struct iov_iter *to)
2301 struct cl_attr *attr = vvp_env_new_attr(env);
2302 struct file *file = iocb->ki_filp;
2303 struct inode *inode = file_inode(file);
2304 struct ll_inode_info *lli = ll_i2info(inode);
2305 struct cl_object *obj = lli->lli_clob;
2306 loff_t read_end = iocb->ki_pos + iov_iter_count(to);
2315 io = vvp_env_new_io(env);
2317 rc = cl_io_init(env, io, CIT_MISC, obj);
2321 cl_object_attr_lock(lli->lli_clob);
2322 rc = cl_object_attr_get(env, lli->lli_clob, attr);
2323 cl_object_attr_unlock(lli->lli_clob);
2326 cl_io_fini(env, io);
2330 kms = attr->cat_kms;
2331 /* if read beyond end-of-file, adjust read count */
2332 if (kms > 0 && (iocb->ki_pos >= kms || read_end > kms)) {
2333 rc = ll_glimpse_size(inode);
2337 size = i_size_read(inode);
2338 if (iocb->ki_pos >= size || read_end > size) {
2340 DNAME": read [%llu, %llu] over eof, kms %llu, file_size %llu.\n",
2341 encode_fn_file(file), iocb->ki_pos, read_end,
2344 if (iocb->ki_pos >= size)
2347 if (read_end > size)
2348 iov_iter_truncate(to, size - iocb->ki_pos);
2355 #ifdef HAVE_IOV_ITER_INIT_DIRECTION
2356 # define ll_iov_iter_init(i, d, v, n, l) \
2357 iov_iter_init((i), (d), (v), (n), (l))
2359 # define ll_iov_iter_init(i, d, v, n, l) \
2360 iov_iter_init((i), (v), (n), (l), 0)
2363 typedef ssize_t (*iter_fn_t)(struct kiocb *, struct iov_iter *);
2365 static ssize_t do_loop_readv_writev(struct kiocb *iocb, const struct iovec *iov,
2366 int rw, unsigned long nr_segs, iter_fn_t fn)
2368 const struct iovec *vector = iov;
2371 while (nr_segs > 0) {
2374 size_t len = vector->iov_len;
2376 ll_iov_iter_init(&i, rw, vector, 1, len);
2394 * Check if we need loop over the iovec and submit each segment in a loop.
2395 * This is needed when:
2396 * - Prior to the introduction of HAVE_DIO_ITER
2397 * - unaligned direct i/o
2398 * Returns true for the above cases and false otherwise.
2400 * Note that looping is always safe although it is preferable to pass the
2401 * iovec down unmodified when the appropriate support is available.
2403 static bool is_unaligned_directio(struct kiocb *iocb, struct iov_iter *iter,
2404 enum cl_io_type io_type)
2406 #ifdef HAVE_DIO_ITER
2407 struct file *file = iocb->ki_filp;
2408 int iocb_flags = iocb_ki_flags_get(file, iocb);
2409 bool direct_io = iocb_ki_flags_check(iocb_flags, DIRECT);
2410 bool unaligned = false;
2412 /* This I/O could be switched to direct i/o if the kernel is new enough */
2414 if (ll_hybrid_bio_dio_switch_check(file, iocb, io_type,
2415 iov_iter_count(iter)))
2420 if (iocb->ki_pos & ~PAGE_MASK)
2423 unaligned = ll_iov_iter_is_unaligned(iter);
2428 #endif /* HAVE_DIO_ITER */
2431 /* Read from a file (through the page cache) */
2432 static ssize_t do_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2435 struct vvp_io_args *args;
2436 struct file *file = iocb->ki_filp;
2437 loff_t orig_ki_pos = iocb->ki_pos;
2441 ktime_t kstart = ktime_get();
2443 bool stale_data = false;
2446 CDEBUG(D_VFSTRACE|D_IOTRACE,
2447 "START file "DNAME":"DFID", ppos: %lld, count: %zu\n",
2448 encode_fn_file(file), PFID(ll_inode2fid(file_inode(file))),
2449 iocb->ki_pos, iov_iter_count(to));
2451 if (!iov_iter_count(to))
2454 env = cl_env_get(&refcheck);
2456 RETURN(PTR_ERR(env));
2458 result = file_read_confine_iter(env, iocb, to);
2461 else if (result > 0)
2464 CFS_FAIL_TIMEOUT_ORSET(OBD_FAIL_LLITE_READ_PAUSE, CFS_FAIL_ONCE,
2467 * Currently when PCC read failed, we do not fall back to the
2468 * normal read path, just return the error.
2469 * The resaon is that: for RW-PCC, the file data may be modified
2470 * in the PCC and inconsistent with the data on OSTs (or file
2471 * data has been removed from the Lustre file system), at this
2472 * time, fallback to the normal read path may read the wrong
2474 * TODO: for RO-PCC (readonly PCC), fall back to normal read
2475 * path: read data from data copy on OSTs.
2477 result = pcc_file_read_iter(iocb, to, &cached);
2481 ll_ras_enter(file, iocb->ki_pos, iov_iter_count(to));
2483 args = ll_env_args(env);
2484 args->u.normal.via_iter = to;
2485 args->u.normal.via_iocb = iocb;
2487 if (ll_hybrid_bio_dio_switch_check(file, iocb, CIT_READ,
2488 iov_iter_count(to)) ||
2489 CFS_FAIL_CHECK(OBD_FAIL_LLITE_FORCE_BIO_AS_DIO)) {
2491 iocb->ki_flags |= IOCB_DIRECT;
2492 CDEBUG(D_VFSTRACE, "switching to DIO\n");
2493 args->via_hybrid_switched = 1;
2497 result = ll_do_fast_read(iocb, to);
2498 if (result < 0 || iov_iter_count(to) == 0)
2501 rc2 = ll_file_io_generic(env, args, file, CIT_READ,
2502 &iocb->ki_pos, iov_iter_count(to));
2505 else if (result == 0)
2509 cl_env_put(env, &refcheck);
2511 if (stale_data && result > 0) {
2513 * we've reached EOF before the read, the data read are cached
2516 iocb->ki_pos = orig_ki_pos;
2517 iov_iter_truncate(to, 0);
2522 ll_rw_stats_tally(ll_i2sbi(file_inode(file)), current->pid,
2523 file->private_data, iocb->ki_pos, result,
2525 ll_stats_ops_tally(ll_i2sbi(file_inode(file)), LPROC_LL_READ,
2526 ktime_us_delta(ktime_get(), kstart));
2530 "COMPLETED: file "DNAME":"DFID", ppos: %lld, count: %zu, rc = %zu\n",
2531 encode_fn_file(file), PFID(ll_inode2fid(file_inode(file))),
2532 iocb->ki_pos, iov_iter_count(to), result);
2537 static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2539 if (iter->nr_segs > 1 && is_unaligned_directio(iocb, iter, CIT_READ))
2540 return do_loop_readv_writev(iocb, iter->__iov, READ,
2541 iter->nr_segs, do_file_read_iter);
2542 return do_file_read_iter(iocb, iter);
2546 * Similar trick to ll_do_fast_read, this improves write speed for tiny writes.
2547 * If a page is already in the page cache and dirty (and some other things -
2548 * See ll_tiny_write_begin for the instantiation of these rules), then we can
2549 * write to it without doing a full I/O, because Lustre already knows about it
2550 * and will write it out. This saves a lot of processing time.
2552 * All writes here are within one page, so exclusion is handled by the page
2553 * lock on the vm page. We do not do tiny writes for writes which touch
2554 * multiple pages because it's very unlikely multiple sequential pages are
2555 * are already dirty.
2557 * We limit these to < PAGE_SIZE because PAGE_SIZE writes are relatively common
2558 * and are unlikely to be to already dirty pages.
2560 * Attribute updates are important here, we do them in ll_tiny_write_end.
2562 static ssize_t ll_do_tiny_write(struct kiocb *iocb, struct iov_iter *iter)
2564 ssize_t count = iov_iter_count(iter);
2565 struct file *file = iocb->ki_filp;
2566 struct inode *inode = file_inode(file);
2567 bool lock_inode = !IS_NOSEC(inode);
2571 /* Restrict writes to single page and < PAGE_SIZE. See comment at top
2572 * of function for why.
2574 if (count >= PAGE_SIZE ||
2575 (iocb->ki_pos & (PAGE_SIZE-1)) + count > PAGE_SIZE)
2577 /* For aarch64's 64k pages maxbytes is inside of a page. */
2578 if (iocb->ki_pos + count > ll_file_maxbytes(inode))
2581 if (unlikely(lock_inode))
2582 ll_inode_lock(inode);
2583 result = __generic_file_write_iter(iocb, iter);
2585 if (unlikely(lock_inode))
2586 ll_inode_unlock(inode);
2588 /* If the page is not already dirty, ll_tiny_write_begin returns
2589 * -ENODATA. We continue on to normal write.
2591 if (result == -ENODATA)
2595 ll_heat_add(inode, CIT_WRITE, result);
2596 set_bit(LLIF_DATA_MODIFIED, &ll_i2info(inode)->lli_flags);
2599 CDEBUG(D_VFSTRACE, "result: %zu, original count %zu\n", result, count);
2604 /* Write to a file (through the page cache).*/
2605 static ssize_t do_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2607 struct file *file = iocb->ki_filp;
2608 struct vvp_io_args *args;
2610 int flags = iocb_ki_flags_get(file, iocb);
2611 ktime_t kstart = ktime_get();
2612 bool hybrid_switched = false;
2613 ssize_t rc_tiny = 0;
2620 CDEBUG(D_VFSTRACE|D_IOTRACE,
2621 "START file "DNAME":"DFID", ppos: %lld, count: %zu\n",
2622 encode_fn_file(file), PFID(ll_inode2fid(file_inode(file))),
2623 iocb->ki_pos, iov_iter_count(from));
2625 if (!iov_iter_count(from))
2626 GOTO(out, rc_normal = 0);
2629 * When PCC write failed, we usually do not fall back to the normal
2630 * write path, just return the error. But there is a special case when
2631 * returned error code is -ENOSPC due to running out of space on PCC HSM
2632 * bakcend. At this time, it will fall back to normal I/O path and
2633 * retry the I/O. As the file is in HSM released state, it will restore
2634 * the file data to OSTs first and redo the write again. And the
2635 * restore process will revoke the layout lock and detach the file
2636 * from PCC cache automatically.
2638 result = pcc_file_write_iter(iocb, from, &cached);
2639 if (cached && result != -ENOSPC && result != -EDQUOT)
2640 GOTO(out, rc_normal = result);
2642 if (ll_hybrid_bio_dio_switch_check(file, iocb, CIT_WRITE,
2643 iov_iter_count(from)) ||
2644 CFS_FAIL_CHECK(OBD_FAIL_LLITE_FORCE_BIO_AS_DIO)) {
2646 iocb->ki_flags |= IOCB_DIRECT;
2647 CDEBUG(D_VFSTRACE, "switching to DIO\n");
2648 hybrid_switched = true;
2652 /* NB: we can't do direct IO for tiny writes because they use the page
2653 * cache, we can't do sync writes because tiny writes can't flush
2654 * pages, and we can't do append writes because we can't guarantee the
2655 * required DLM locks are held to protect file size.
2657 if (ll_sbi_has_tiny_write(ll_i2sbi(file_inode(file))) &&
2659 (ki_flag(DIRECT) | ki_flag(DSYNC) | ki_flag(SYNC) |
2661 rc_tiny = ll_do_tiny_write(iocb, from);
2663 /* In case of error, go on and try normal write - Only stop if tiny
2664 * write completed I/O.
2666 if (iov_iter_count(from) == 0)
2667 GOTO(out, rc_normal = rc_tiny);
2669 env = cl_env_get(&refcheck);
2671 RETURN(PTR_ERR(env));
2673 args = ll_env_args(env);
2674 args->u.normal.via_iter = from;
2675 args->u.normal.via_iocb = iocb;
2676 args->via_hybrid_switched = hybrid_switched;
2678 rc_normal = ll_file_io_generic(env, args, file, CIT_WRITE,
2679 &iocb->ki_pos, iov_iter_count(from));
2681 /* On success, combine bytes written. */
2682 if (rc_tiny >= 0 && rc_normal > 0)
2683 rc_normal += rc_tiny;
2684 /* On error, only return error from normal write if tiny write did not
2685 * write any bytes. Otherwise return bytes written by tiny write.
2687 else if (rc_tiny > 0)
2688 rc_normal = rc_tiny;
2690 cl_env_put(env, &refcheck);
2692 if (rc_normal > 0) {
2693 ll_rw_stats_tally(ll_i2sbi(file_inode(file)), current->pid,
2694 file->private_data, iocb->ki_pos,
2696 ll_stats_ops_tally(ll_i2sbi(file_inode(file)), LPROC_LL_WRITE,
2697 ktime_us_delta(ktime_get(), kstart));
2701 "COMPLETED: file "DNAME":"DFID", ppos: %lld, count: %zu, rc = %zu\n",
2702 encode_fn_file(file), PFID(ll_inode2fid(file_inode(file))),
2703 iocb->ki_pos, iov_iter_count(from), rc_normal);
2708 static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
2710 if (iter->nr_segs > 1 && is_unaligned_directio(iocb, iter, CIT_WRITE))
2711 return do_loop_readv_writev(iocb, iter->__iov, WRITE,
2712 iter->nr_segs, do_file_write_iter);
2713 return do_file_write_iter(iocb, iter);
2716 #ifndef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
2718 * XXX: exact copy from kernel code (__generic_file_aio_write_nolock)
2720 static int ll_file_get_iov_count(const struct iovec *iov,
2721 unsigned long *nr_segs, size_t *count,
2727 for (seg = 0; seg < *nr_segs; seg++) {
2728 const struct iovec *iv = &iov[seg];
2731 * If any segment has a negative length, or the cumulative
2732 * length ever wraps negative then return -EINVAL.
2735 if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
2737 if (access_ok(access_flags, iv->iov_base, iv->iov_len))
2742 cnt -= iv->iov_len; /* This segment is no good */
2749 static ssize_t ll_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
2750 unsigned long nr_segs, loff_t pos)
2757 result = ll_file_get_iov_count(iov, &nr_segs, &iov_count, VERIFY_READ);
2764 ll_iov_iter_init(&to, READ, iov, nr_segs, iov_count);
2766 RETURN(ll_file_read_iter(iocb, &to));
2769 static ssize_t ll_file_read(struct file *file, char __user *buf, size_t count,
2772 struct iovec iov = { .iov_base = buf, .iov_len = count };
2780 init_sync_kiocb(&kiocb, file);
2781 kiocb.ki_pos = *ppos;
2782 #ifdef HAVE_KIOCB_KI_LEFT
2783 kiocb.ki_left = count;
2784 #elif defined(HAVE_KI_NBYTES)
2785 kiocb.i_nbytes = count;
2788 result = ll_file_aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
2789 *ppos = kiocb.ki_pos;
2794 /* Write to a file (through the page cache). AIO stuff */
2795 static ssize_t ll_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2796 unsigned long nr_segs, loff_t pos)
2798 struct iov_iter from;
2803 result = ll_file_get_iov_count(iov, &nr_segs, &iov_count, VERIFY_WRITE);
2810 ll_iov_iter_init(&from, WRITE, iov, nr_segs, iov_count);
2812 RETURN(ll_file_write_iter(iocb, &from));
2815 static ssize_t ll_file_write(struct file *file, const char __user *buf,
2816 size_t count, loff_t *ppos)
2818 struct iovec iov = { .iov_base = (void __user *)buf,
2827 init_sync_kiocb(&kiocb, file);
2828 kiocb.ki_pos = *ppos;
2829 #ifdef HAVE_KIOCB_KI_LEFT
2830 kiocb.ki_left = count;
2831 #elif defined(HAVE_KI_NBYTES)
2832 kiocb.ki_nbytes = count;
2835 result = ll_file_aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
2836 *ppos = kiocb.ki_pos;
2840 #endif /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
2842 int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
2843 __u64 flags, struct lov_user_md *lum,
2846 struct lookup_intent oit = {
2848 .it_open_flags = flags | MDS_OPEN_BY_FID,
2853 if ((__swab32(lum->lmm_magic) & le32_to_cpu(LOV_MAGIC_MASK)) ==
2854 le32_to_cpu(LOV_MAGIC_MAGIC)) {
2855 /* this code will only exist for big-endian systems */
2856 lustre_swab_lov_user_md(lum, 0);
2859 ll_inode_size_lock(inode);
2860 rc = ll_intent_file_open(dentry, lum, lum_size, &oit);
2862 GOTO(out_unlock, rc);
2864 ll_release_openhandle(dentry, &oit);
2867 ll_inode_size_unlock(inode);
2868 ll_intent_release(&oit);
2873 int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
2874 struct lov_mds_md **lmmp, int *lmm_size,
2875 struct ptlrpc_request **request)
2877 struct ll_sb_info *sbi = ll_i2sbi(inode);
2878 struct mdt_body *body;
2879 struct lov_mds_md *lmm = NULL;
2880 struct ptlrpc_request *req = NULL;
2881 struct md_op_data *op_data;
2886 rc = ll_get_default_mdsize(sbi, &lmmsize);
2890 namesize = filename ? strlen(filename) : 0;
2891 op_data = ll_prep_md_op_data(NULL, inode, NULL, filename, namesize,
2892 lmmsize, LUSTRE_OPC_ANY, NULL);
2893 if (IS_ERR(op_data))
2894 RETURN(PTR_ERR(op_data));
2896 op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
2897 rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
2898 ll_finish_md_op_data(op_data);
2900 CDEBUG(D_INFO, "md_getattr_name failed on %s: rc %d\n",
2901 encode_fn_len(filename, namesize), rc);
2905 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2906 LASSERT(body != NULL); /* checked by mdc_getattr_name */
2908 lmmsize = body->mbo_eadatasize;
2910 if (!(body->mbo_valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
2912 GOTO(out, rc = -ENODATA);
2914 lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize);
2915 LASSERT(lmm != NULL);
2917 if (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1) &&
2918 lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3) &&
2919 lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_COMP_V1) &&
2920 lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_FOREIGN))
2921 GOTO(out, rc = -EPROTO);
2924 * This is coming from the MDS, so is probably in
2925 * little endian. We convert it to host endian before
2926 * passing it to userspace.
2928 if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) {
2929 int stripe_count = 0;
2931 if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) ||
2932 lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
2933 stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
2934 if (le32_to_cpu(lmm->lmm_pattern) &
2935 LOV_PATTERN_F_RELEASED)
2937 lustre_swab_lov_user_md((struct lov_user_md *)lmm, 0);
2939 /* if function called for directory - we should
2940 * avoid swab not existent lsm objects
2942 if (lmm->lmm_magic == LOV_MAGIC_V1 &&
2943 S_ISREG(body->mbo_mode))
2944 lustre_swab_lov_user_md_objects(
2945 ((struct lov_user_md_v1 *)lmm)->lmm_objects,
2947 else if (lmm->lmm_magic == LOV_MAGIC_V3 &&
2948 S_ISREG(body->mbo_mode))
2949 lustre_swab_lov_user_md_objects(
2950 ((struct lov_user_md_v3 *)lmm)->lmm_objects,
2952 } else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_COMP_V1)) {
2953 lustre_swab_lov_comp_md_v1(
2954 (struct lov_comp_md_v1 *)lmm);
2958 if (lmm->lmm_magic == LOV_MAGIC_COMP_V1) {
2959 struct lov_comp_md_v1 *comp_v1 = NULL;
2960 struct lov_comp_md_entry_v1 *ent;
2961 struct lov_user_md_v1 *v1 = NULL;
2965 comp_v1 = (struct lov_comp_md_v1 *)lmm;
2966 /* Dump the striping information */
2967 for (; i < comp_v1->lcm_entry_count; i++) {
2968 ent = &comp_v1->lcm_entries[i];
2969 off = ent->lcme_offset;
2970 v1 = (struct lov_user_md_v1 *)((char *)lmm + off);
2972 "comp[%d]: stripe_count=%u, stripe_size=%u\n",
2973 i, v1->lmm_stripe_count, v1->lmm_stripe_size);
2975 if (unlikely(CFS_FAIL_CHECK(OBD_FAIL_LOV_COMP_MAGIC) &&
2976 (cfs_fail_val == i + 1)))
2977 v1->lmm_magic = LOV_MAGIC_BAD;
2979 if (unlikely(CFS_FAIL_CHECK(OBD_FAIL_LOV_COMP_PATTERN) &&
2980 (cfs_fail_val == i + 1)))
2981 v1->lmm_pattern = LOV_PATTERN_BAD;
2985 GOTO(out, rc = -EINVAL);
2987 lmm->lmm_stripe_count = v1->lmm_stripe_count;
2988 lmm->lmm_stripe_size = v1->lmm_stripe_size;
2990 * Return valid stripe_count and stripe_size instead of 0 for
2991 * DoM files to avoid divide-by-zero for older userspace that
2992 * calls this ioctl, e.g. lustre ADIO driver.
2994 if (lmm->lmm_stripe_count == 0)
2995 lmm->lmm_stripe_count = 1;
2996 if (lmm->lmm_stripe_size == 0) {
2997 /* Since the first component of the file data is placed
2998 * on the MDT for faster access, the stripe_size of the
2999 * second one is always that applications which are
3002 if (lmm->lmm_pattern & LOV_PATTERN_MDT)
3003 i = comp_v1->lcm_entry_count > 1 ? 1 : 0;
3005 i = comp_v1->lcm_entry_count > 1 ?
3006 comp_v1->lcm_entry_count - 1 : 0;
3007 ent = &comp_v1->lcm_entries[i];
3008 off = ent->lcme_offset;
3009 v1 = (struct lov_user_md_v1 *)((char *)lmm + off);
3010 lmm->lmm_stripe_size = v1->lmm_stripe_size;
3015 *lmm_size = lmmsize;
3020 static int ll_lov_setea(struct inode *inode, struct file *file,
3023 __u64 flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
3024 struct lov_user_md *lump;
3025 ssize_t lum_size = sizeof(*lump) + sizeof(struct lov_user_ost_data);
3029 if (!capable(CAP_SYS_ADMIN))
3032 OBD_ALLOC_LARGE(lump, lum_size);
3036 if (copy_from_user(lump, arg, lum_size))
3037 GOTO(out_lump, rc = -EFAULT);
3039 rc = ll_lov_setstripe_ea_info(inode, file_dentry(file), flags, lump,
3041 cl_lov_delay_create_clear(&file->f_flags);
3044 OBD_FREE_LARGE(lump, lum_size);
3048 static int ll_file_getstripe(struct inode *inode, void __user *lum, size_t size)
3055 /* exit before doing any work if pointer is bad */
3056 if (unlikely(!ll_access_ok(lum, sizeof(struct lov_user_md))))
3059 env = cl_env_get(&refcheck);
3061 RETURN(PTR_ERR(env));
3063 rc = cl_object_getstripe(env, ll_i2info(inode)->lli_clob, lum, size);
3064 cl_env_put(env, &refcheck);
3068 static ssize_t ll_lov_setstripe(struct inode *inode, struct file *file,
3071 struct lov_user_md __user *lum = arg;
3072 struct lov_user_md *klum;
3075 __u64 flags = FMODE_WRITE;
3078 lum_size = ll_copy_user_md(lum, &klum);
3082 rc = ll_lov_setstripe_ea_info(inode, file_dentry(file), flags, klum,
3087 rc = put_user(0, &lum->lmm_stripe_count);
3091 rc = ll_layout_refresh(inode, &gen);
3095 rc = ll_file_getstripe(inode, arg, lum_size);
3096 if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode) &&
3097 ll_i2info(inode)->lli_clob) {
3098 struct iattr attr = { 0 };
3100 rc = cl_setattr_ost(inode, &attr, OP_XVALID_FLAGS,
3104 cl_lov_delay_create_clear(&file->f_flags);
3107 OBD_FREE_LARGE(klum, lum_size);
3113 ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
3115 struct ll_inode_info *lli = ll_i2info(inode);
3116 struct cl_object *obj = lli->lli_clob;
3117 struct ll_file_data *lfd = file->private_data;
3118 struct ll_grouplock grouplock;
3125 CWARN("%s: group id for group lock on "DFID" is 0: rc = %d\n",
3126 ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid), rc);
3130 if (ll_file_nolock(file))
3131 RETURN(-EOPNOTSUPP);
3133 if (file->f_flags & O_NONBLOCK) {
3134 if (!mutex_trylock(&lli->lli_group_mutex))
3137 mutex_lock(&lli->lli_group_mutex);
3140 if (lfd->lfd_file_flags & LL_FILE_GROUP_LOCKED) {
3142 CWARN("%s: group lock already exists with gid %lu on "DFID": rc = %d\n",
3143 ll_i2sbi(inode)->ll_fsname, lfd->fd_grouplock.lg_gid,
3144 PFID(&lli->lli_fid), rc);
3147 if (arg != lli->lli_group_gid && lli->lli_group_users != 0) {
3148 if (file->f_flags & O_NONBLOCK)
3149 GOTO(out, rc = -EAGAIN);
3150 mutex_unlock(&lli->lli_group_mutex);
3151 wait_var_event(&lli->lli_group_users, !lli->lli_group_users);
3152 GOTO(retry, rc = 0);
3154 LASSERT(lfd->fd_grouplock.lg_lock == NULL);
3157 * XXX: group lock needs to protect all OST objects while PFL
3158 * can add new OST objects during the IO, so we'd instantiate
3159 * all OST objects before getting its group lock.
3164 struct cl_layout cl = {
3165 .cl_is_composite = false,
3167 struct lu_extent ext = {
3169 .e_end = OBD_OBJECT_EOF,
3172 env = cl_env_get(&refcheck);
3174 GOTO(out, rc = PTR_ERR(env));
3176 rc = cl_object_layout_get(env, obj, &cl);
3177 if (rc >= 0 && cl.cl_is_composite)
3178 rc = ll_layout_write_intent(inode, LAYOUT_INTENT_WRITE,
3181 cl_env_put(env, &refcheck);
3186 rc = cl_get_grouplock(ll_i2info(inode)->lli_clob,
3187 arg, (file->f_flags & O_NONBLOCK), &grouplock);
3192 lfd->lfd_file_flags |= LL_FILE_GROUP_LOCKED;
3193 lfd->fd_grouplock = grouplock;
3194 if (lli->lli_group_users == 0)
3195 lli->lli_group_gid = grouplock.lg_gid;
3196 lli->lli_group_users++;
3198 CDEBUG(D_INFO, "group lock %lu obtained on "DFID"\n",
3199 arg, PFID(&lli->lli_fid));
3201 mutex_unlock(&lli->lli_group_mutex);
3206 static int ll_put_grouplock(struct inode *inode, struct file *file,
3209 struct ll_inode_info *lli = ll_i2info(inode);
3210 struct ll_file_data *lfd = file->private_data;
3211 struct ll_grouplock grouplock;
3215 mutex_lock(&lli->lli_group_mutex);
3216 if (!(lfd->lfd_file_flags & LL_FILE_GROUP_LOCKED)) {
3218 CWARN("%s: no group lock held on "DFID": rc = %d\n",
3219 ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid), rc);
3223 LASSERT(lfd->fd_grouplock.lg_lock != NULL);
3225 if (lfd->fd_grouplock.lg_gid != arg) {
3227 CWARN("%s: group lock %lu doesn't match current id %lu on "DFID": rc = %d\n",
3228 ll_i2sbi(inode)->ll_fsname, arg, lfd->fd_grouplock.lg_gid,
3229 PFID(&lli->lli_fid), rc);
3233 grouplock = lfd->fd_grouplock;
3234 memset(&lfd->fd_grouplock, 0, sizeof(lfd->fd_grouplock));
3235 lfd->lfd_file_flags &= ~LL_FILE_GROUP_LOCKED;
3237 cl_put_grouplock(&grouplock);
3239 lli->lli_group_users--;
3240 if (lli->lli_group_users == 0) {
3241 lli->lli_group_gid = 0;
3242 wake_up_var(&lli->lli_group_users);
3244 CDEBUG(D_INFO, "group lock %lu on "DFID" released\n", arg,
3245 PFID(&lli->lli_fid));
3248 mutex_unlock(&lli->lli_group_mutex);
3254 * ll_release_openhandle() - Close inode open handle
3255 * @dentry: dentry which contains the inode
3256 * @it: [in,out] intent which contains open info and result
3262 int ll_release_openhandle(struct dentry *dentry, struct lookup_intent *it)
3264 struct inode *inode = dentry->d_inode;
3265 struct obd_client_handle *och;
3271 /* Root ? Do nothing. */
3272 if (is_root_inode(inode))
3275 /* No open handle to close? Move away */
3276 if (!it_disposition(it, DISP_OPEN_OPEN))
3279 LASSERT(it_open_error(DISP_OPEN_OPEN, it) == 0);
3281 OBD_ALLOC(och, sizeof(*och));
3283 GOTO(out, rc = -ENOMEM);
3285 rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
3289 rc = ll_close_inode_openhandle(inode, och, 0, NULL);
3291 /* this one is in place of ll_file_open */
3292 if (it_disposition(it, DISP_ENQ_OPEN_REF)) {
3293 ptlrpc_req_put(it->it_request);
3294 it_clear_disposition(it, DISP_ENQ_OPEN_REF);
3300 * Get size for inode for which FIEMAP mapping is requested.
3301 * Make the FIEMAP get_info call and returns the result.
3302 * \param fiemap kernel buffer to hold extens
3303 * \param num_bytes kernel buffer size
3305 static int ll_do_fiemap(struct inode *inode, struct fiemap *fiemap,
3311 struct ll_fiemap_info_key fmkey = { .lfik_name = KEY_FIEMAP, };
3314 /* Checks for fiemap flags */
3315 if (fiemap->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
3316 fiemap->fm_flags &= ~LUSTRE_FIEMAP_FLAGS_COMPAT;
3320 /* Check for FIEMAP_FLAG_SYNC */
3321 if (fiemap->fm_flags & FIEMAP_FLAG_SYNC) {
3322 rc = filemap_write_and_wait(inode->i_mapping);
3327 env = cl_env_get(&refcheck);
3329 RETURN(PTR_ERR(env));
3331 if (i_size_read(inode) == 0) {
3332 rc = ll_glimpse_size(inode);
3337 fmkey.lfik_oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLPROJID;
3338 obdo_from_inode(&fmkey.lfik_oa, inode, OBD_MD_FLSIZE);
3339 obdo_set_parent_fid(&fmkey.lfik_oa, &ll_i2info(inode)->lli_fid);
3341 /* If filesize is 0, then there would be no objects for mapping */
3342 if (fmkey.lfik_oa.o_size == 0) {
3343 fiemap->fm_mapped_extents = 0;
3347 fmkey.lfik_fiemap = *fiemap;
3349 rc = cl_object_fiemap(env, ll_i2info(inode)->lli_clob,
3350 &fmkey, fiemap, &num_bytes);
3352 cl_env_put(env, &refcheck);
3356 static int fid2path_for_enc_file(struct inode *parent, char *gfpath,
3359 struct dentry *de = NULL, *de_parent = d_find_any_alias(parent);
3360 struct llcrypt_str lltr = LLTR_INIT(NULL, 0);
3361 struct llcrypt_str de_name;
3362 char *p, *ptr = gfpath;
3363 size_t len = 0, len_orig = 0;
3364 int enckey = -1, nameenc = -1;
3368 while ((p = strsep(&gfpath, "/")) != NULL) {
3376 len_orig = strlen(p);
3378 rc = sscanf(p, "["SFID"]", RFID(&fid));
3380 p = strchr(p, ']') + 1;
3386 if (!IS_ENCRYPTED(parent)) {
3387 if (gfpathlen < len + 1) {
3392 memmove(ptr, p, len);
3396 gfpathlen -= len + 1;
3400 /* From here, we know parent is encrypted */
3403 rc = llcrypt_prepare_readdir(parent);
3404 if (rc && rc != -ENOKEY) {
3411 if (llcrypt_has_encryption_key(parent))
3417 llcrypt_policy_has_filename_enc(parent);
3420 /* Even if names are not encrypted, we still need to call
3421 * ll_fname_disk_to_usr in order to decode names as they are
3422 * coming from the wire.
3424 rc = llcrypt_fname_alloc_buffer(parent, NAME_MAX + 1, &lltr);
3432 rc = ll_fname_disk_to_usr(parent, 0, 0, &de_name,
3435 llcrypt_fname_free_buffer(&lltr);
3439 lltr.name[lltr.len] = '\0';
3441 if (lltr.len <= len_orig && gfpathlen >= lltr.len + 1) {
3442 memcpy(ptr, lltr.name, lltr.len);
3447 gfpathlen -= lltr.len + 1;
3451 llcrypt_fname_free_buffer(&lltr);
3453 if (rc == -EOVERFLOW) {
3460 /* We reached the end of the string, which means
3461 * we are dealing with the last component in the path.
3462 * So save a useless lookup and exit.
3468 if (enckey == 0 || nameenc == 0)
3471 ll_inode_lock(parent);
3472 de = lookup_one_len(p, de_parent, len);
3473 ll_inode_unlock(parent);
3474 if (IS_ERR_OR_NULL(de) || !de->d_inode) {
3480 parent = de->d_inode;
3487 if (!IS_ERR_OR_NULL(de))
3493 * The FID provided could be either an MDT FID or an OST FID, both need to
3495 * 1. query from fldb-server the actual type of this FID
3496 * 2a. if it's an OST-FID, try OSC_IOCONTROL(FID2PATH) with given FID, which
3497 * should return the corresponding parent FID, i.e. the MDT FID
3498 * 2b. otherwise it's a MDT FID already, continue to step 3
3499 * 3. take the MDT FID calling MDC_IOCONTROL(FID2PATH)
3501 int __ll_fid2path(struct inode *inode, struct getinfo_fid2path *gfout,
3502 size_t outsize, __u32 pathlen_orig)
3504 struct obd_export *exp = ll_i2mdexp(inode);
3505 struct obd_device *md_exp = ll_i2sbi(inode)->ll_md_exp->exp_obd;
3506 struct lmv_obd *lmv = &md_exp->u.lmv;
3507 struct lu_seq_range res = {0};
3510 rc = fld_client_lookup(&lmv->lmv_fld, fid_seq(&gfout->gf_fid),
3511 LU_SEQ_RANGE_ANY, NULL, &res);
3514 "%s: Error looking for target idx. Seq %#llx: rc=%d\n",
3515 md_exp->obd_name, fid_seq(&gfout->gf_fid), rc);
3519 /* Call osc_iocontrol */
3520 if (res.lsr_flags == LU_SEQ_RANGE_OST) {
3521 __u64 gf_recno = gfout->gf_recno;
3522 __u32 gf_linkno = gfout->gf_linkno;
3523 struct obd_export *dt_exp = ll_i2dtexp(inode);
3525 /* Pass 'ost_idx' down to the lower layer via u.gf_root_fid,
3526 * which is a non-functional field in the OST context
3528 gfout->gf_u.gf_root_fid->f_oid = res.lsr_index;
3530 rc = obd_iocontrol(OBD_IOC_FID2PATH, dt_exp, outsize, gfout,
3534 "%s: Err on FID2PATH(OST), Seq %#llx: rc=%d\n",
3535 md_exp->obd_name, fid_seq(&gfout->gf_fid), rc);
3538 gfout->gf_recno = gf_recno;
3539 gfout->gf_linkno = gf_linkno;
3542 /* Append root FID after gfout to let MDT know the root FID so that
3543 * it can lookup the correct path, this is mainly for fileset.
3544 * old server without fileset mount support will ignore this.
3546 *gfout->gf_u.gf_root_fid = *ll_inode2fid(inode);
3548 /* Call mdc_iocontrol */
3549 rc = obd_iocontrol(OBD_IOC_FID2PATH, exp, outsize, gfout, NULL);
3551 if (!rc && gfout->gf_pathlen && gfout->gf_u.gf_path[0] == '/') {
3552 /* by convention, server side (mdt_path_current()) puts
3553 * a leading '/' to tell client that we are dealing with
3556 rc = fid2path_for_enc_file(inode, gfout->gf_u.gf_path,
3558 if (!rc && strlen(gfout->gf_u.gf_path) > pathlen_orig)
3565 int ll_fid2path(struct inode *inode, void __user *arg)
3567 const struct getinfo_fid2path __user *gfin = arg;
3568 __u32 pathlen, pathlen_orig;
3569 struct getinfo_fid2path *gfout;
3574 if (!capable(CAP_DAC_READ_SEARCH) &&
3575 !test_bit(LL_SBI_USER_FID2PATH, ll_i2sbi(inode)->ll_flags))
3578 /* Only need to get the buflen */
3579 if (get_user(pathlen, &gfin->gf_pathlen))
3582 pathlen_orig = pathlen;
3585 outsize = sizeof(*gfout) + pathlen;
3586 OBD_ALLOC(gfout, outsize);
3590 if (copy_from_user(gfout, arg, sizeof(*gfout)))
3591 GOTO(gf_free, rc = -EFAULT);
3593 gfout->gf_pathlen = pathlen;
3594 rc = __ll_fid2path(inode, gfout, outsize, pathlen_orig);
3598 if (copy_to_user(arg, gfout, sizeof(*gfout) + pathlen_orig))
3602 OBD_FREE(gfout, outsize);
3603 if (rc == -ENAMETOOLONG) {
3604 pathlen += PATH_MAX;
3611 ll_ioc_data_version(struct inode *inode, struct ioc_data_version *ioc)
3613 struct cl_object *obj = ll_i2info(inode)->lli_clob;
3620 ioc->idv_version = 0;
3621 ioc->idv_layout_version = UINT_MAX;
3623 /* If no file object initialized, we consider its version is 0. */
3627 env = cl_env_get(&refcheck);
3629 RETURN(PTR_ERR(env));
3631 io = vvp_env_new_io(env);
3633 io->u.ci_data_version.dv_data_version = 0;
3634 io->u.ci_data_version.dv_layout_version = UINT_MAX;
3635 io->u.ci_data_version.dv_flags = ioc->idv_flags;
3638 if (cl_io_init(env, io, CIT_DATA_VERSION, io->ci_obj) == 0)
3639 result = cl_io_loop(env, io);
3641 result = io->ci_result;
3643 ioc->idv_version = io->u.ci_data_version.dv_data_version;
3644 ioc->idv_layout_version = io->u.ci_data_version.dv_layout_version;
3645 cl_io_fini(env, io);
3647 if (unlikely(io->ci_need_restart))
3650 cl_env_put(env, &refcheck);
3656 * ll_data_version() - retrieve the data version of a file
3657 * @inode: inode of the file for which the data version is being queried
3658 * @data_version: store the retrieved data version
3659 * @flags: if do sync on the OST side;
3661 * LL_DV_RD_FLUSH: flush dirty pages, LCK_PR on OSTs
3662 * LL_DV_WR_FLUSH: drop all caching pages, LCK_PW on OSTs
3664 * This value is computed using stripe object version on OST.
3665 * Version is computed using server side locking.
3671 int ll_data_version(struct inode *inode, __u64 *data_version, int flags)
3673 struct ioc_data_version ioc = { .idv_flags = flags };
3676 rc = ll_ioc_data_version(inode, &ioc);
3678 *data_version = ioc.idv_version;
3683 /* Trigger a HSM release request for the provided inode. */
3684 int ll_hsm_release(struct inode *inode)
3687 struct obd_client_handle *och = NULL;
3688 __u64 data_version = 0;
3693 CDEBUG(D_INODE, "%s: Releasing file "DFID".\n",
3694 ll_i2sbi(inode)->ll_fsname,
3695 PFID(&ll_i2info(inode)->lli_fid));
3697 och = ll_lease_open(inode, NULL, FMODE_WRITE, MDS_OPEN_RELEASE);
3699 GOTO(out, rc = PTR_ERR(och));
3701 /* Grab latest data_version and [am]time values */
3702 rc = ll_data_version(inode, &data_version,
3703 LL_DV_WR_FLUSH | LL_DV_SZ_UPDATE);
3707 env = cl_env_get(&refcheck);
3709 GOTO(out, rc = PTR_ERR(env));
3711 rc = ll_merge_attr(env, inode);
3712 cl_env_put(env, &refcheck);
3714 /* If error happen, we have the wrong size for a file.
3720 /* Release the file. NB: lease lock handle is released in
3721 * mdc_hsm_release_pack() because we still need it to pack
3722 * l_remote_handle to MDT.
3724 rc = ll_close_inode_openhandle(inode, och, MDS_HSM_RELEASE,
3730 if (och != NULL && !IS_ERR(och)) /* close the file */
3731 ll_lease_close(och, inode, NULL);
3736 struct ll_swap_stack {
3739 struct inode *inode1;
3740 struct inode *inode2;
3745 static int ll_swap_layouts(struct file *file1, struct file *file2,
3746 struct lustre_swap_layouts *lsl)
3748 struct mdc_swap_layouts msl;
3749 struct md_op_data *op_data;
3752 struct ll_swap_stack *llss = NULL;
3755 OBD_ALLOC_PTR(llss);
3759 llss->inode1 = file_inode(file1);
3760 llss->inode2 = file_inode(file2);
3762 rc = ll_check_swap_layouts_validity(llss->inode1, llss->inode2);
3766 /* we use 2 bool because it is easier to swap than 2 bits */
3767 if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV1)
3768 llss->check_dv1 = true;
3770 if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV2)
3771 llss->check_dv2 = true;
3773 /* we cannot use lsl->sl_dvX directly because we may swap them */
3774 llss->dv1 = lsl->sl_dv1;
3775 llss->dv2 = lsl->sl_dv2;
3777 rc = lu_fid_cmp(ll_inode2fid(llss->inode1), ll_inode2fid(llss->inode2));
3778 if (rc == 0) /* same file, done! */
3781 if (rc < 0) { /* sequentialize it */
3782 swap(llss->inode1, llss->inode2);
3784 swap(llss->dv1, llss->dv2);
3785 swap(llss->check_dv1, llss->check_dv2);
3789 if (gid != 0) { /* application asks to flush dirty cache */
3790 rc = ll_get_grouplock(llss->inode1, file1, gid);
3794 rc = ll_get_grouplock(llss->inode2, file2, gid);
3796 ll_put_grouplock(llss->inode1, file1, gid);
3801 /* ultimate check, before swaping the layouts we check if
3802 * dataversion has changed (if requested)
3804 if (llss->check_dv1) {
3805 rc = ll_data_version(llss->inode1, &dv, 0);
3808 if (dv != llss->dv1)
3809 GOTO(putgl, rc = -EAGAIN);
3812 if (llss->check_dv2) {
3813 rc = ll_data_version(llss->inode2, &dv, 0);
3816 if (dv != llss->dv2)
3817 GOTO(putgl, rc = -EAGAIN);
3820 /* struct md_op_data is used to send the swap args to the mdt
3821 * only flags is missing, so we use struct mdc_swap_layouts
3822 * through the md_op_data->op_data
3824 * flags from user space have to be converted before they are send to
3825 * server, no flag is sent today, they are only used on the client
3829 op_data = ll_prep_md_op_data(NULL, llss->inode1, llss->inode2, NULL, 0,
3830 0, LUSTRE_OPC_ANY, &msl);
3831 if (IS_ERR(op_data))
3832 GOTO(free, rc = PTR_ERR(op_data));
3834 rc = obd_iocontrol(LL_IOC_LOV_SWAP_LAYOUTS, ll_i2mdexp(llss->inode1),
3835 sizeof(*op_data), op_data, NULL);
3836 ll_finish_md_op_data(op_data);
3843 ll_put_grouplock(llss->inode2, file2, gid);
3844 ll_put_grouplock(llss->inode1, file1, gid);
3853 int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss)
3855 struct obd_export *exp = ll_i2mdexp(inode);
3856 struct md_op_data *op_data;
3860 /* Detect out-of range masks */
3861 if ((hss->hss_setmask | hss->hss_clearmask) & ~HSM_FLAGS_MASK)
3864 /* Non-root users are forbidden to set or clear flags which are
3865 * NOT defined in HSM_USER_MASK.
3867 if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) &&
3868 !capable(CAP_SYS_ADMIN))
3871 if (!exp_connect_archive_id_array(exp)) {
3872 /* Detect out-of range archive id */
3873 if ((hss->hss_valid & HSS_ARCHIVE_ID) &&
3874 (hss->hss_archive_id > LL_HSM_ORIGIN_MAX_ARCHIVE))
3878 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
3879 LUSTRE_OPC_ANY, hss);
3880 if (IS_ERR(op_data))
3881 RETURN(PTR_ERR(op_data));
3883 rc = obd_iocontrol(LL_IOC_HSM_STATE_SET, exp, sizeof(*op_data),
3886 ll_finish_md_op_data(op_data);
3891 static int ll_hsm_data_version_sync(struct inode *inode, __u64 data_version)
3893 struct obd_export *exp = ll_i2mdexp(inode);
3894 struct md_op_data *op_data;
3901 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
3902 LUSTRE_OPC_ANY, NULL);
3903 if (IS_ERR(op_data))
3904 RETURN(PTR_ERR(op_data));
3906 op_data->op_data_version = data_version;
3908 rc = obd_iocontrol(LL_IOC_HSM_DATA_VERSION, exp, sizeof(*op_data),
3911 ll_finish_md_op_data(op_data);
3916 static int ll_hsm_import(struct inode *inode, struct file *file,
3917 struct hsm_user_import *hui)
3919 struct hsm_state_set *hss = NULL;
3920 struct iattr *attr = NULL;
3924 if (!S_ISREG(inode->i_mode))
3930 GOTO(out, rc = -ENOMEM);
3932 hss->hss_valid = HSS_SETMASK | HSS_ARCHIVE_ID;
3933 hss->hss_archive_id = hui->hui_archive_id;
3934 hss->hss_setmask = HS_ARCHIVED | HS_EXISTS | HS_RELEASED;
3935 rc = ll_hsm_state_set(inode, hss);
3939 OBD_ALLOC_PTR(attr);
3941 GOTO(out, rc = -ENOMEM);
3943 attr->ia_mode = hui->hui_mode & (0777);
3944 attr->ia_mode |= S_IFREG;
3945 attr->ia_uid = make_kuid(&init_user_ns, hui->hui_uid);
3946 attr->ia_gid = make_kgid(&init_user_ns, hui->hui_gid);
3947 attr->ia_size = hui->hui_size;
3948 attr->ia_mtime.tv_sec = hui->hui_mtime;
3949 attr->ia_mtime.tv_nsec = hui->hui_mtime_ns;
3950 attr->ia_atime.tv_sec = hui->hui_atime;
3951 attr->ia_atime.tv_nsec = hui->hui_atime_ns;
3953 attr->ia_valid = ATTR_SIZE | ATTR_MODE | ATTR_FORCE |
3954 ATTR_UID | ATTR_GID |
3955 ATTR_MTIME | ATTR_MTIME_SET |
3956 ATTR_ATIME | ATTR_ATIME_SET;
3959 /* inode lock owner set in ll_setattr_raw()*/
3960 rc = ll_setattr_raw(file_dentry(file), attr, 0, true);
3963 inode_unlock(inode);
3973 static inline long ll_lease_type_from_open_flags(enum mds_open_flags
3976 return ((fd_open_mode & MDS_FMODE_READ) ? LL_LEASE_RDLCK : 0) |
3977 ((fd_open_mode & MDS_FMODE_WRITE) ? LL_LEASE_WRLCK : 0);
3980 static int ll_file_futimes_3(struct file *file, const struct ll_futimes_3 *lfu)
3982 struct inode *inode = file_inode(file);
3984 .ia_valid = ATTR_ATIME | ATTR_ATIME_SET |
3985 ATTR_MTIME | ATTR_MTIME_SET |
3988 .tv_sec = lfu->lfu_atime_sec,
3989 .tv_nsec = lfu->lfu_atime_nsec,
3992 .tv_sec = lfu->lfu_mtime_sec,
3993 .tv_nsec = lfu->lfu_mtime_nsec,
3996 .tv_sec = lfu->lfu_ctime_sec,
3997 .tv_nsec = lfu->lfu_ctime_nsec,
4003 if (!capable(CAP_SYS_ADMIN))
4006 if (!S_ISREG(inode->i_mode))
4010 /* inode lock owner set in ll_setattr_raw()*/
4011 rc = ll_setattr_raw(file_dentry(file), &ia, OP_XVALID_CTIME_SET,
4013 inode_unlock(inode);
4018 static enum cl_lock_mode cl_mode_user_to_kernel(enum lock_mode_user mode)
4021 case MODE_READ_USER:
4023 case MODE_WRITE_USER:
4030 static const char *const user_lockname[] = LOCK_MODE_NAMES;
4033 * ll_file_lock_ahead() -
4034 * @file: file this ladvise lock request is on
4035 * @ladvise: ladvise struct describing this lock request
4037 * Used to allow the upper layers of the client to request an LDLM lock
4038 * without doing an actual read or write.
4040 * Used for ladvise lockahead to manually request specific locks.
4043 * * %0: success, no detailed result available (sync requests
4044 * and requests sent to the server [not handled locally]
4045 * cannot return detailed results)
4046 * * %<0: negative errno on error
4047 * * LLA_RESULT_{SAME,DIFFERENT} - detailed result of the lock request,
4048 * see definitions for details.
4050 int ll_file_lock_ahead(struct file *file, struct llapi_lu_ladvise *ladvise)
4052 struct lu_env *env = NULL;
4053 struct cl_io *io = NULL;
4054 struct cl_lock *lock = NULL;
4055 struct cl_lock_descr *descr = NULL;
4056 struct dentry *dentry = file->f_path.dentry;
4057 struct inode *inode = dentry->d_inode;
4058 enum cl_lock_mode cl_mode;
4059 off_t start = ladvise->lla_start;
4060 off_t end = ladvise->lla_end;
4066 "Lock request: file="DNAME", inode=%p, mode=%s start=%llu, end=%llu\n",
4067 encode_fn_dentry(dentry), dentry->d_inode,
4068 user_lockname[ladvise->lla_lockahead_mode], (__u64) start,
4071 cl_mode = cl_mode_user_to_kernel(ladvise->lla_lockahead_mode);
4073 GOTO(out, result = cl_mode);
4075 /* Get IO environment */
4076 result = cl_io_get(inode, &env, &io, &refcheck);
4080 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
4083 * nothing to do for this io. This currently happens when
4084 * stripe sub-object's are not yet created.
4086 result = io->ci_result;
4087 } else if (result == 0) {
4088 lock = vvp_env_new_lock(env);
4089 descr = &lock->cll_descr;
4091 descr->cld_obj = io->ci_obj;
4092 /* Convert byte offsets to pages */
4093 descr->cld_start = start >> PAGE_SHIFT;
4094 descr->cld_end = end >> PAGE_SHIFT;
4095 descr->cld_mode = cl_mode;
4096 /* CEF_MUST is used because we do not want to convert a
4097 * lockahead request to a lockless lock
4099 descr->cld_enq_flags = CEF_MUST | CEF_LOCK_NO_EXPAND;
4101 if (ladvise->lla_peradvice_flags & LF_ASYNC)
4102 descr->cld_enq_flags |= CEF_SPECULATIVE;
4104 result = cl_lock_request(env, io, lock);
4106 /* On success, we need to release the lock */
4108 cl_lock_release(env, lock);
4110 cl_io_fini(env, io);
4111 cl_env_put(env, &refcheck);
4113 /* -ECANCELED indicates a matching lock with a different extent
4114 * was already present, and -EEXIST indicates a matching lock
4115 * on exactly the same extent was already present.
4116 * We convert them to positive values for userspace to make
4117 * recognizing true errors easier.
4118 * Note we can only return these detailed results on async requests,
4119 * as sync requests look the same as i/o requests for locking.
4121 if (result == -ECANCELED)
4122 result = LLA_RESULT_DIFFERENT;
4123 else if (result == -EEXIST)
4124 result = LLA_RESULT_SAME;
4129 static const char *const ladvise_names[] = LU_LADVISE_NAMES;
4131 static int ll_ladvise_sanity(struct inode *inode,
4132 struct llapi_lu_ladvise *ladvise)
4134 struct ll_sb_info *sbi = ll_i2sbi(inode);
4135 enum lu_ladvise_type advice = ladvise->lla_advice;
4136 /* Note the peradvice flags is a 32 bit field, so per advice flags must
4137 * be in the first 32 bits of enum ladvise_flags
4139 __u32 flags = ladvise->lla_peradvice_flags;
4140 /* 3 lines at 80 characters per line, should be plenty */
4143 if (advice > LU_LADVISE_MAX || advice == LU_LADVISE_INVALID) {
4146 "%s: advice with value '%d' not recognized, last supported advice is %s (value '%d'): rc = %d\n",
4147 sbi->ll_fsname, advice,
4148 ladvise_names[LU_LADVISE_MAX-1], LU_LADVISE_MAX-1, rc);
4152 /* Per-advice checks */
4154 case LU_LADVISE_LOCKNOEXPAND:
4155 if (flags & ~LF_LOCKNOEXPAND_MASK) {
4157 CDEBUG(D_VFSTRACE, "%s: Invalid flags (%x) for %s: rc = %d\n",
4158 sbi->ll_fsname, flags,
4159 ladvise_names[advice], rc);
4163 case LU_LADVISE_LOCKAHEAD:
4164 /* Currently only READ and WRITE modes can be requested */
4165 if (ladvise->lla_lockahead_mode >= MODE_MAX_USER ||
4166 ladvise->lla_lockahead_mode == 0) {
4168 CDEBUG(D_VFSTRACE, "%s: Invalid mode (%d) for %s: rc = %d\n",
4170 ladvise->lla_lockahead_mode,
4171 ladvise_names[advice], rc);
4175 case LU_LADVISE_WILLREAD:
4176 case LU_LADVISE_DONTNEED:
4178 /* Note fall through above - These checks apply to all advices
4179 * except LOCKNOEXPAND
4181 if (flags & ~LF_DEFAULT_MASK) {
4183 CDEBUG(D_VFSTRACE, "%s: Invalid flags (%x) for %s: rc = %d\n",
4184 sbi->ll_fsname, flags,
4185 ladvise_names[advice], rc);
4188 if (ladvise->lla_start >= ladvise->lla_end) {
4190 CDEBUG(D_VFSTRACE, "%s: Invalid range (%llu to %llu) for %s: rc = %d\n",
4192 ladvise->lla_start, ladvise->lla_end,
4193 ladvise_names[advice], rc);
4205 * Give file access advices
4207 * The ladvise interface is similar to Linux fadvise() system call, except it
4208 * forwards the advices directly from Lustre client to server. The server side
4209 * codes will apply appropriate read-ahead and caching techniques for the
4210 * corresponding files.
4212 * A typical workload for ladvise is e.g. a bunch of different clients are
4213 * doing small random reads of a file, so prefetching pages into OSS cache
4214 * with big linear reads before the random IO is a net benefit. Fetching
4215 * all that data into each client cache with fadvise() may not be, due to
4216 * much more data being sent to the client.
4218 static int ll_ladvise(struct inode *inode, struct file *file, __u64 flags,
4219 struct llapi_lu_ladvise *ladvise)
4223 struct cl_ladvise_io *lio;
4228 env = cl_env_get(&refcheck);
4230 RETURN(PTR_ERR(env));
4232 io = vvp_env_new_io(env);
4233 io->ci_obj = ll_i2info(inode)->lli_clob;
4235 /* initialize parameters for ladvise */
4236 lio = &io->u.ci_ladvise;
4237 lio->lio_start = ladvise->lla_start;
4238 lio->lio_end = ladvise->lla_end;
4239 lio->lio_fid = ll_inode2fid(inode);
4240 lio->lio_advice = ladvise->lla_advice;
4241 lio->lio_flags = flags;
4243 if (cl_io_init(env, io, CIT_LADVISE, io->ci_obj) == 0)
4244 rc = cl_io_loop(env, io);
4248 cl_io_fini(env, io);
4249 cl_env_put(env, &refcheck);
4253 static int ll_lock_noexpand(struct file *file, int flags)
4255 struct ll_file_data *lfd = file->private_data;
4257 lfd->lfd_lock_no_expand = !(flags & LF_UNSET);
4262 #ifndef HAVE_FILEATTR_GET
4263 int ll_ioctl_fsgetxattr(struct inode *inode, unsigned int cmd,
4266 struct fsxattr fsxattr;
4268 if (copy_from_user(&fsxattr, uarg, sizeof(fsxattr)))
4271 fsxattr.fsx_xflags = ll_inode_flags_to_xflags(inode->i_flags);
4272 if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags))
4273 fsxattr.fsx_xflags |= FS_XFLAG_PROJINHERIT;
4274 fsxattr.fsx_projid = ll_i2info(inode)->lli_projid;
4275 if (copy_to_user(uarg, &fsxattr, sizeof(fsxattr)))
4282 int ll_ioctl_check_project(struct inode *inode, __u32 xflags,
4286 * Project Quota ID state is only allowed to change from within the init
4287 * namespace. Enforce that restriction only if we are trying to change
4288 * the quota ID state. Everything else is allowed in user namespaces.
4290 if (current_user_ns() == &init_user_ns) {
4292 * Caller is allowed to change the project ID. if it is being
4293 * changed, make sure that the new value is valid.
4295 if (ll_i2info(inode)->lli_projid != projid &&
4296 !projid_valid(make_kprojid(&init_user_ns, projid)))
4302 if (ll_i2info(inode)->lli_projid != projid)
4305 if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags)) {
4306 if (!(xflags & FS_XFLAG_PROJINHERIT))
4309 if (xflags & FS_XFLAG_PROJINHERIT)
4316 int ll_set_project(struct inode *inode, __u32 xflags, __u32 projid)
4318 struct ptlrpc_request *req = NULL;
4319 struct md_op_data *op_data;
4322 CDEBUG(D_QUOTA, DFID" xflags=%x projid=%u\n",
4323 PFID(ll_inode2fid(inode)), xflags, projid);
4324 rc = ll_ioctl_check_project(inode, xflags, projid);
4328 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
4329 LUSTRE_OPC_ANY, NULL);
4330 if (IS_ERR(op_data))
4331 RETURN(PTR_ERR(op_data));
4333 op_data->op_attr_flags = ll_xflags_to_ext_flags(xflags);
4335 /* pass projid to md_op_data */
4336 op_data->op_projid = projid;
4338 op_data->op_xvalid |= OP_XVALID_PROJID | OP_XVALID_FLAGS;
4339 rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data, NULL, 0, &req);
4340 ptlrpc_req_put(req);
4342 GOTO(out_fsxattr, rc);
4343 ll_update_inode_flags(inode, op_data->op_attr_flags);
4345 /* Avoid OST RPC if this is only ioctl setting project inherit flag */
4346 if (xflags == 0 || xflags == FS_XFLAG_PROJINHERIT)
4347 GOTO(out_fsxattr, rc);
4349 if (ll_i2info(inode)->lli_clob) {
4350 struct iattr attr = { 0 };
4352 rc = cl_setattr_ost(inode, &attr, OP_XVALID_FLAGS, xflags);
4356 ll_finish_md_op_data(op_data);
4360 #ifndef HAVE_FILEATTR_GET
4361 int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd,
4364 struct fsxattr fsxattr;
4367 if (copy_from_user(&fsxattr, uarg, sizeof(fsxattr)))
4370 RETURN(ll_set_project(inode, fsxattr.fsx_xflags,
4371 fsxattr.fsx_projid));
4375 int ll_ioctl_project(struct file *file, unsigned int cmd, void __user *uarg)
4377 struct lu_project lu_project;
4378 struct dentry *dentry = file_dentry(file);
4379 struct inode *inode = file_inode(file);
4380 struct dentry *child_dentry = NULL;
4381 int rc = 0, name_len;
4383 if (copy_from_user(&lu_project, uarg, sizeof(lu_project)))
4386 /* apply child dentry if name is valid */
4387 name_len = strnlen(lu_project.project_name, NAME_MAX);
4388 if (name_len > 0 && name_len <= NAME_MAX) {
4389 ll_inode_lock(inode);
4390 child_dentry = lookup_one_len(lu_project.project_name,
4392 ll_inode_unlock(inode);
4393 if (IS_ERR(child_dentry)) {
4394 rc = PTR_ERR(child_dentry);
4397 inode = child_dentry->d_inode;
4402 } else if (name_len > NAME_MAX) {
4407 switch (lu_project.project_type) {
4408 case LU_PROJECT_SET:
4409 rc = ll_set_project(inode, lu_project.project_xflags,
4410 lu_project.project_id);
4412 case LU_PROJECT_GET:
4413 lu_project.project_xflags =
4414 ll_inode_flags_to_xflags(inode->i_flags);
4415 if (test_bit(LLIF_PROJECT_INHERIT,
4416 &ll_i2info(inode)->lli_flags))
4417 lu_project.project_xflags |= FS_XFLAG_PROJINHERIT;
4418 lu_project.project_id = ll_i2info(inode)->lli_projid;
4419 if (copy_to_user(uarg, &lu_project, sizeof(lu_project))) {
4429 if (!IS_ERR_OR_NULL(child_dentry))
4434 static long ll_file_unlock_lease(struct file *file, struct ll_ioc_lease *ioc,
4437 struct inode *inode = file_inode(file);
4438 struct ll_file_data *lfd = file->private_data;
4439 struct ll_inode_info *lli = ll_i2info(inode);
4440 struct obd_client_handle *och = NULL;
4441 struct split_param sp;
4442 struct pcc_param param;
4443 bool lease_broken = false;
4444 enum mds_open_flags open_flags = MDS_FMODE_CLOSED;
4445 enum mds_op_bias bias = 0;
4447 struct file *layout_file = NULL;
4449 size_t data_size = 0;
4450 bool attached = false;
4454 mutex_lock(&lli->lli_och_mutex);
4455 if (lfd->fd_lease_och != NULL) {
4456 och = lfd->fd_lease_och;
4457 lfd->fd_lease_och = NULL;
4459 mutex_unlock(&lli->lli_och_mutex);
4464 open_flags = och->och_flags;
4466 switch (ioc->lil_flags) {
4467 case LL_LEASE_RESYNC_DONE:
4468 if (ioc->lil_count > IOC_IDS_MAX)
4469 GOTO(out_lease_close, rc = -EINVAL);
4471 data_size = offsetof(typeof(*ioc), lil_ids[ioc->lil_count]);
4472 OBD_ALLOC(data, data_size);
4474 GOTO(out_lease_close, rc = -ENOMEM);
4476 if (copy_from_user(data, uarg, data_size))
4477 GOTO(out_lease_close, rc = -EFAULT);
4479 bias = MDS_CLOSE_RESYNC_DONE;
4481 case LL_LEASE_LAYOUT_MERGE:
4482 if (ioc->lil_count != 1)
4483 GOTO(out_lease_close, rc = -EINVAL);
4485 uarg += sizeof(*ioc);
4486 if (copy_from_user(&fdv, uarg, sizeof(fdv)))
4487 GOTO(out_lease_close, rc = -EFAULT);
4489 layout_file = fget(fdv);
4491 GOTO(out_lease_close, rc = -EBADF);
4493 if ((file->f_flags & O_ACCMODE) == O_RDONLY ||
4494 (layout_file->f_flags & O_ACCMODE) == O_RDONLY)
4495 GOTO(out_lease_close, rc = -EPERM);
4497 data = file_inode(layout_file);
4498 bias = MDS_CLOSE_LAYOUT_MERGE;
4500 case LL_LEASE_LAYOUT_SPLIT: {
4503 if (ioc->lil_count != 2)
4504 GOTO(out_lease_close, rc = -EINVAL);
4506 uarg += sizeof(*ioc);
4507 if (copy_from_user(&fdv, uarg, sizeof(fdv)))
4508 GOTO(out_lease_close, rc = -EFAULT);
4510 uarg += sizeof(fdv);
4511 if (copy_from_user(&mirror_id, uarg, sizeof(mirror_id)))
4512 GOTO(out_lease_close, rc = -EFAULT);
4513 if (mirror_id >= MIRROR_ID_NEG)
4514 GOTO(out_lease_close, rc = -EINVAL);
4516 layout_file = fget(fdv);
4518 GOTO(out_lease_close, rc = -EBADF);
4520 /* if layout_file == file, it means to destroy the mirror */
4521 sp.sp_inode = file_inode(layout_file);
4522 sp.sp_mirror_id = (__u16)mirror_id;
4524 bias = MDS_CLOSE_LAYOUT_SPLIT;
4527 case LL_LEASE_PCC_ATTACH:
4528 if (ioc->lil_count != 1)
4531 /* PCC-RW is not supported for encrypted files. */
4532 if (IS_ENCRYPTED(inode))
4533 RETURN(-EOPNOTSUPP);
4535 uarg += sizeof(*ioc);
4536 if (copy_from_user(¶m.pa_archive_id, uarg, sizeof(__u32)))
4537 GOTO(out_lease_close, rc2 = -EFAULT);
4539 rc2 = pcc_readwrite_attach(file, inode, param.pa_archive_id);
4541 GOTO(out_lease_close, rc2);
4544 /* Grab latest data version */
4545 rc2 = ll_data_version(inode, ¶m.pa_data_version,
4548 GOTO(out_lease_close, rc2);
4551 bias = MDS_PCC_ATTACH;
4554 /* without close intent */
4559 rc = ll_lease_close_intent(och, inode, &lease_broken, bias, data);
4563 rc = ll_lease_och_release(inode, file);
4568 open_flags = MDS_FMODE_CLOSED;
4572 if (ioc->lil_flags == LL_LEASE_RESYNC_DONE && data)
4573 OBD_FREE(data, data_size);
4578 if (ioc->lil_flags == LL_LEASE_PCC_ATTACH) {
4581 rc = pcc_readwrite_attach_fini(file, inode,
4582 param.pa_layout_gen,
4587 ll_layout_refresh(inode, &lfd->fd_layout_version);
4590 rc = ll_lease_type_from_open_flags(open_flags);
4594 static long ll_file_set_lease(struct file *file, struct ll_ioc_lease *ioc,
4597 struct inode *inode = file_inode(file);
4598 struct ll_inode_info *lli = ll_i2info(inode);
4599 struct ll_file_data *lfd = file->private_data;
4600 struct obd_client_handle *och = NULL;
4601 enum mds_open_flags open_flags = MDS_FMODE_CLOSED;
4603 fmode_t fmode; /* kernel permissions */
4607 switch (ioc->lil_mode) {
4608 case LL_LEASE_WRLCK:
4609 if (!(file->f_mode & FMODE_WRITE))
4611 fmode = FMODE_WRITE;
4613 case LL_LEASE_RDLCK:
4614 if (!(file->f_mode & FMODE_READ))
4618 case LL_LEASE_UNLCK:
4619 RETURN(ll_file_unlock_lease(file, ioc, uarg));
4624 CDEBUG(D_INODE, "Set lease with mode %u\n", fmode);
4626 /* apply for lease */
4627 if (ioc->lil_flags & LL_LEASE_RESYNC)
4628 open_flags = MDS_OPEN_RESYNC;
4629 och = ll_lease_open(inode, file, fmode, open_flags);
4631 RETURN(PTR_ERR(och));
4633 if (ioc->lil_flags & LL_LEASE_RESYNC) {
4634 rc = ll_lease_file_resync(och, inode, uarg);
4636 ll_lease_close(och, inode, NULL);
4639 rc = ll_layout_refresh(inode, &lfd->fd_layout_version);
4641 ll_lease_close(och, inode, NULL);
4647 mutex_lock(&lli->lli_och_mutex);
4648 if (lfd->fd_lease_och == NULL) {
4649 lfd->fd_lease_och = och;
4652 mutex_unlock(&lli->lli_och_mutex);
4654 /* impossible now that only excl is supported for now */
4655 ll_lease_close(och, inode, &lease_broken);
4661 static void ll_heat_get(struct inode *inode, struct lu_heat *heat)
4663 struct ll_inode_info *lli = ll_i2info(inode);
4664 struct ll_sb_info *sbi = ll_i2sbi(inode);
4665 __u64 now = ktime_get_real_seconds();
4668 spin_lock(&lli->lli_heat_lock);
4669 heat->lh_flags = lli->lli_heat_flags;
4670 for (i = 0; i < heat->lh_count; i++)
4671 heat->lh_heat[i] = obd_heat_get(&lli->lli_heat_instances[i],
4672 now, sbi->ll_heat_decay_weight,
4673 sbi->ll_heat_period_second);
4674 spin_unlock(&lli->lli_heat_lock);
4677 static int ll_heat_set(struct inode *inode, enum lu_heat_flag flags)
4679 struct ll_inode_info *lli = ll_i2info(inode);
4682 spin_lock(&lli->lli_heat_lock);
4683 if (flags & LU_HEAT_FLAG_CLEAR)
4684 obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
4686 if (flags & LU_HEAT_FLAG_OFF)
4687 lli->lli_heat_flags |= LU_HEAT_FLAG_OFF;
4689 lli->lli_heat_flags &= ~LU_HEAT_FLAG_OFF;
4691 spin_unlock(&lli->lli_heat_lock);
4697 ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4699 struct inode *inode = file_inode(file);
4700 struct ll_file_data *lfd = file->private_data;
4701 struct ll_sb_info *sbi = ll_i2sbi(inode);
4702 void __user *uarg = (void __user *)arg;
4706 CDEBUG(D_VFSTRACE|D_IOCTL, "VFS Op:inode="DFID"(%pK) cmd=%x arg=%lx\n",
4707 PFID(ll_inode2fid(inode)), inode, cmd, arg);
4708 ll_stats_ops_tally(sbi, LPROC_LL_IOCTL, 1);
4710 /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
4711 if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
4714 /* can't do a generic karg == NULL check here, since it is too noisy and
4715 * we need to return -ENOTTY for unsupported ioctls instead of -EINVAL.
4718 case LL_IOC_GETFLAGS:
4719 /* Get the current value of the Lustre file flags */
4720 return put_user(lfd->lfd_file_flags, (int __user *)arg);
4721 case LL_IOC_SETFLAGS:
4722 case LL_IOC_CLRFLAGS: {
4723 enum ll_file_flags lfd_file_flags;
4725 /* Set or clear specific Lustre file flags */
4726 /* XXX This probably needs checks to ensure the flags are
4727 * not abused, and to handle any flag side effects.
4729 if (get_user(lfd_file_flags, (int __user *)arg))
4732 /* LL_FILE_GROUP_LOCKED is managed via its own ioctls */
4733 if (lfd_file_flags & LL_FILE_GROUP_LOCKED)
4736 if (cmd == LL_IOC_SETFLAGS) {
4737 if ((lfd_file_flags & LL_FILE_IGNORE_LOCK) &&
4738 !(file->f_flags & O_DIRECT)) {
4740 CERROR("%s: unable to disable locking on non-O_DIRECT file "DFID": rc = %d\n",
4741 current->comm, PFID(ll_inode2fid(inode)),
4746 lfd->lfd_file_flags |= lfd_file_flags;
4748 lfd->lfd_file_flags &= ~lfd_file_flags;
4752 case LL_IOC_LOV_SETSTRIPE:
4753 case LL_IOC_LOV_SETSTRIPE_NEW:
4754 if (sbi->ll_enable_setstripe_gid != -1 &&
4755 !capable(CAP_SYS_RESOURCE) &&
4756 /* in_group_p always returns true for gid == 0, so we check
4757 * for this case directly
4759 (sbi->ll_enable_setstripe_gid == 0 ||
4760 !in_group_p(KGIDT_INIT(sbi->ll_enable_setstripe_gid)))) {
4761 /* for lfs we return EACCES, so we can print an error
4764 if (!strcmp(current->comm, "lfs"))
4766 /* otherwise, setstripe is refused silently so
4767 * applications do not fail
4772 RETURN(ll_lov_setstripe(inode, file, uarg));
4773 case LL_IOC_LOV_SETEA:
4774 RETURN(ll_lov_setea(inode, file, uarg));
4775 case LL_IOC_LOV_SWAP_LAYOUTS: {
4777 struct lustre_swap_layouts lsl;
4779 if (copy_from_user(&lsl, uarg, sizeof(lsl)))
4782 if ((file->f_flags & O_ACCMODE) == O_RDONLY)
4785 file2 = fget(lsl.sl_fd);
4789 /* O_WRONLY or O_RDWR */
4790 if ((file2->f_flags & O_ACCMODE) == O_RDONLY)
4791 GOTO(out, rc = -EPERM);
4793 if (lsl.sl_flags & SWAP_LAYOUTS_CLOSE) {
4794 struct obd_client_handle *och = NULL;
4795 struct ll_inode_info *lli;
4796 struct inode *inode2;
4798 lli = ll_i2info(inode);
4799 mutex_lock(&lli->lli_och_mutex);
4800 if (lfd->fd_lease_och != NULL) {
4801 och = lfd->fd_lease_och;
4802 lfd->fd_lease_och = NULL;
4804 mutex_unlock(&lli->lli_och_mutex);
4806 GOTO(out, rc = -ENOLCK);
4807 inode2 = file_inode(file2);
4808 rc = ll_swap_layouts_close(och, inode, inode2, &lsl);
4810 rc = ll_swap_layouts(file, file2, &lsl);
4816 case LL_IOC_LOV_GETSTRIPE:
4817 case LL_IOC_LOV_GETSTRIPE_NEW:
4818 RETURN(ll_file_getstripe(inode, uarg, 0));
4819 case LL_IOC_GROUP_LOCK:
4820 RETURN(ll_get_grouplock(inode, file, arg));
4821 case LL_IOC_GROUP_UNLOCK:
4822 RETURN(ll_put_grouplock(inode, file, arg));
4823 case LL_IOC_DATA_VERSION: {
4824 struct ioc_data_version idv;
4827 if (copy_from_user(&idv, uarg, sizeof(idv)))
4830 idv.idv_flags &= LL_DV_RD_FLUSH | LL_DV_WR_FLUSH;
4831 rc = ll_ioc_data_version(inode, &idv);
4833 if (rc == 0 && copy_to_user(uarg, &idv, sizeof(idv)))
4838 case LL_IOC_HSM_STATE_GET: {
4839 struct md_op_data *op_data;
4840 struct hsm_user_state *hus;
4843 if (!ll_access_ok(uarg, sizeof(*hus)))
4850 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
4851 LUSTRE_OPC_ANY, hus);
4852 if (IS_ERR(op_data)) {
4853 rc = PTR_ERR(op_data);
4855 rc = obd_iocontrol(cmd, ll_i2mdexp(inode),
4856 sizeof(*op_data), op_data, NULL);
4858 if (copy_to_user(uarg, hus, sizeof(*hus)))
4861 ll_finish_md_op_data(op_data);
4866 case LL_IOC_HSM_STATE_SET: {
4867 struct hsm_state_set *hss;
4874 if (copy_from_user(hss, uarg, sizeof(*hss)))
4877 rc = ll_hsm_state_set(inode, hss);
4882 case LL_IOC_HSM_ACTION: {
4883 struct md_op_data *op_data;
4884 struct hsm_current_action *hca;
4888 if (!ll_access_ok(uarg, sizeof(*hca)))
4895 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
4896 LUSTRE_OPC_ANY, hca);
4897 if (IS_ERR(op_data)) {
4899 RETURN(PTR_ERR(op_data));
4902 rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
4905 GOTO(skip_copy, rc);
4907 /* The hsm_current_action retreived from the server could
4908 * contain corrupt information. If it is incorrect data collect
4909 * debug information. We still send the data even if incorrect
4910 * to user land to handle.
4912 action = hsm_user_action2name(hca->hca_action);
4913 if (strcmp(action, "UNKNOWN") == 0 ||
4914 hca->hca_state > HPS_DONE) {
4916 "HSM current state %s action %s, offset = %llu, length %llu\n",
4917 hsm_progress_state2name(hca->hca_state), action,
4918 hca->hca_location.offset,
4919 hca->hca_location.length);
4922 if (copy_to_user(uarg, hca, sizeof(*hca)))
4925 ll_finish_md_op_data(op_data);
4929 case LL_IOC_HSM_DATA_VERSION: {
4932 if (get_user(data_version, (u64 __user *)arg))
4935 rc = ll_hsm_data_version_sync(inode, data_version);
4939 case LL_IOC_SET_LEASE_OLD: {
4940 struct ll_ioc_lease ioc = { .lil_mode = arg };
4942 RETURN(ll_file_set_lease(file, &ioc, 0));
4944 case LL_IOC_SET_LEASE: {
4945 struct ll_ioc_lease ioc;
4947 if (copy_from_user(&ioc, uarg, sizeof(ioc)))
4950 RETURN(ll_file_set_lease(file, &ioc, uarg));
4952 case LL_IOC_GET_LEASE: {
4953 struct ll_inode_info *lli = ll_i2info(inode);
4954 struct ldlm_lock *lock = NULL;
4955 enum mds_open_flags open_flags = MDS_FMODE_CLOSED;
4957 mutex_lock(&lli->lli_och_mutex);
4958 if (lfd->fd_lease_och != NULL) {
4959 struct obd_client_handle *och = lfd->fd_lease_och;
4961 lock = ldlm_handle2lock(&och->och_lease_handle);
4963 lock_res_and_lock(lock);
4964 if (!ldlm_is_cancel(lock))
4965 open_flags = och->och_flags;
4967 unlock_res_and_lock(lock);
4968 ldlm_lock_put(lock);
4971 mutex_unlock(&lli->lli_och_mutex);
4973 RETURN(ll_lease_type_from_open_flags(open_flags));
4975 case LL_IOC_HSM_IMPORT: {
4976 struct hsm_user_import *hui;
4982 if (copy_from_user(hui, uarg, sizeof(*hui)))
4985 rc = ll_hsm_import(inode, file, hui);
4990 case LL_IOC_FUTIMES_3: {
4991 struct ll_futimes_3 lfu;
4993 if (copy_from_user(&lfu, uarg, sizeof(lfu)))
4996 RETURN(ll_file_futimes_3(file, &lfu));
4998 case LL_IOC_LADVISE: {
4999 struct llapi_ladvise_hdr *k_ladvise_hdr;
5000 struct llapi_ladvise_hdr __user *u_ladvise_hdr;
5003 int alloc_size = sizeof(*k_ladvise_hdr);
5006 u_ladvise_hdr = uarg;
5007 OBD_ALLOC_PTR(k_ladvise_hdr);
5008 if (k_ladvise_hdr == NULL)
5011 if (copy_from_user(k_ladvise_hdr, u_ladvise_hdr, alloc_size))
5012 GOTO(out_ladvise, rc = -EFAULT);
5014 if (k_ladvise_hdr->lah_magic != LADVISE_MAGIC ||
5015 k_ladvise_hdr->lah_count < 1)
5016 GOTO(out_ladvise, rc = -EINVAL);
5018 num_advise = k_ladvise_hdr->lah_count;
5019 if (num_advise >= LAH_COUNT_MAX)
5020 GOTO(out_ladvise, rc = -EFBIG);
5022 OBD_FREE_PTR(k_ladvise_hdr);
5023 alloc_size = offsetof(typeof(*k_ladvise_hdr),
5024 lah_advise[num_advise]);
5025 OBD_ALLOC(k_ladvise_hdr, alloc_size);
5026 if (k_ladvise_hdr == NULL)
5030 * TODO: submit multiple advices to one server in a single RPC
5032 if (copy_from_user(k_ladvise_hdr, u_ladvise_hdr, alloc_size))
5033 GOTO(out_ladvise, rc = -EFAULT);
5035 for (i = 0; i < num_advise; i++) {
5036 struct llapi_lu_ladvise *k_ladvise =
5037 &k_ladvise_hdr->lah_advise[i];
5038 struct llapi_lu_ladvise __user *u_ladvise =
5039 &u_ladvise_hdr->lah_advise[i];
5041 rc = ll_ladvise_sanity(inode, k_ladvise);
5043 GOTO(out_ladvise, rc);
5045 switch (k_ladvise->lla_advice) {
5046 case LU_LADVISE_LOCKNOEXPAND:
5047 rc = ll_lock_noexpand(file,
5048 k_ladvise->lla_peradvice_flags);
5049 GOTO(out_ladvise, rc);
5050 case LU_LADVISE_LOCKAHEAD:
5052 rc = ll_file_lock_ahead(file, k_ladvise);
5055 GOTO(out_ladvise, rc);
5058 &u_ladvise->lla_lockahead_result))
5059 GOTO(out_ladvise, rc = -EFAULT);
5062 rc = ll_ladvise(inode, file,
5063 k_ladvise_hdr->lah_flags,
5066 GOTO(out_ladvise, rc);
5073 OBD_FREE(k_ladvise_hdr, alloc_size);
5076 case LL_IOC_FLR_SET_MIRROR: {
5077 /* mirror I/O must be direct to avoid polluting page cache
5080 if (!(file->f_flags & O_DIRECT))
5083 lfd->fd_designated_mirror = arg;
5086 case LL_IOC_HEAT_GET: {
5087 struct lu_heat uheat;
5088 struct lu_heat *heat;
5091 if (copy_from_user(&uheat, uarg, sizeof(uheat)))
5094 if (uheat.lh_count > OBD_HEAT_COUNT)
5095 uheat.lh_count = OBD_HEAT_COUNT;
5097 size = offsetof(typeof(uheat), lh_heat[uheat.lh_count]);
5098 OBD_ALLOC(heat, size);
5102 heat->lh_count = uheat.lh_count;
5103 ll_heat_get(inode, heat);
5104 rc = copy_to_user(uarg, heat, size);
5105 OBD_FREE(heat, size);
5106 RETURN(rc ? -EFAULT : 0);
5108 case LL_IOC_HEAT_SET: {
5111 if (copy_from_user(&heat_flags, uarg, sizeof(heat_flags)))
5114 rc = ll_heat_set(inode, heat_flags);
5117 case LL_IOC_PCC_ATTACH: {
5118 struct lu_pcc_attach *attach;
5120 if (!S_ISREG(inode->i_mode))
5123 if (!pcc_inode_permission(inode))
5126 OBD_ALLOC_PTR(attach);
5130 if (copy_from_user(attach,
5131 (const struct lu_pcc_attach __user *)arg,
5133 GOTO(out_pcc, rc = -EFAULT);
5135 /* We only support pcc for encrypted files if we have the
5136 * encryption key and if it is PCC-RO.
5138 if (IS_ENCRYPTED(inode) &&
5139 (!llcrypt_has_encryption_key(inode) ||
5140 attach->pcca_type != LU_PCC_READONLY))
5141 GOTO(out_pcc, rc = -EOPNOTSUPP);
5143 rc = pcc_ioctl_attach(file, inode, attach);
5145 OBD_FREE_PTR(attach);
5148 case LL_IOC_PCC_DETACH: {
5149 struct lu_pcc_detach *detach;
5151 OBD_ALLOC_PTR(detach);
5155 if (copy_from_user(detach, uarg, sizeof(*detach)))
5156 GOTO(out_detach_free, rc = -EFAULT);
5158 if (!S_ISREG(inode->i_mode))
5159 GOTO(out_detach_free, rc = -EINVAL);
5161 if (!pcc_inode_permission(inode))
5162 GOTO(out_detach_free, rc = -EPERM);
5164 rc = pcc_ioctl_detach(inode, &detach->pccd_flags);
5166 GOTO(out_detach_free, rc);
5168 if (copy_to_user((char __user *)arg, detach, sizeof(*detach)))
5169 GOTO(out_detach_free, rc = -EFAULT);
5171 OBD_FREE_PTR(detach);
5174 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 18, 53, 0)
5175 case LL_IOC_PCC_STATE: {
5176 struct lu_pcc_state __user *ustate = uarg;
5177 struct lu_pcc_state *state;
5179 OBD_ALLOC_PTR(state);
5183 if (copy_from_user(state, ustate, sizeof(*state)))
5184 GOTO(out_state, rc = -EFAULT);
5186 rc = pcc_ioctl_state(file, inode, state);
5188 GOTO(out_state, rc);
5190 if (copy_to_user(ustate, state, sizeof(*state)))
5191 GOTO(out_state, rc = -EFAULT);
5194 OBD_FREE_PTR(state);
5199 rc = ll_iocontrol(inode, file, cmd, uarg);
5202 RETURN(obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL, uarg));
5206 static loff_t ll_lseek(struct file *file, loff_t offset, int whence)
5208 struct inode *inode = file_inode(file);
5211 struct cl_lseek_io *lsio;
5217 env = cl_env_get(&refcheck);
5219 RETURN(PTR_ERR(env));
5221 io = vvp_env_new_io(env);
5222 io->ci_obj = ll_i2info(inode)->lli_clob;
5223 ll_io_set_mirror(io, file);
5225 lsio = &io->u.ci_lseek;
5226 lsio->ls_start = offset;
5227 lsio->ls_whence = whence;
5228 lsio->ls_result = -ENXIO;
5231 rc = cl_io_init(env, io, CIT_LSEEK, io->ci_obj);
5233 struct vvp_io *vio = vvp_env_io(env);
5235 vio->vui_fd = file->private_data;
5236 rc = cl_io_loop(env, io);
5240 retval = rc ? : lsio->ls_result;
5241 cl_io_fini(env, io);
5242 } while (unlikely(io->ci_need_restart));
5244 cl_env_put(env, &refcheck);
5246 /* Without the key, SEEK_HOLE return value has to be
5247 * rounded up to next LUSTRE_ENCRYPTION_UNIT_SIZE.
5249 if (IS_ENCRYPTED(inode) && !ll_has_encryption_key(inode) &&
5250 whence == SEEK_HOLE)
5251 retval = round_up(retval, LUSTRE_ENCRYPTION_UNIT_SIZE);
5256 #define LU_SEEK_NAMES { \
5257 [SEEK_SET] = "SEEK_SET", \
5258 [SEEK_CUR] = "SEEK_CUR", \
5259 [SEEK_DATA] = "SEEK_DATA", \
5260 [SEEK_HOLE] = "SEEK_HOLE", \
5263 static const char *const ll_seek_names[] = LU_SEEK_NAMES;
5265 static loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
5267 struct inode *inode = file_inode(file);
5268 loff_t retval = offset, eof = 0;
5269 ktime_t kstart = ktime_get();
5272 CDEBUG(D_VFSTRACE|D_IOTRACE,
5273 "START file "DNAME":"DFID", offset: %lld, type: %s\n",
5274 encode_fn_file(file), PFID(ll_inode2fid(file_inode(file))),
5275 offset, ll_seek_names[origin]);
5277 if (origin == SEEK_END) {
5278 retval = ll_glimpse_size(inode);
5281 eof = i_size_read(inode);
5284 if (origin == SEEK_HOLE || origin == SEEK_DATA) {
5288 /* flush local cache first if any */
5289 cl_sync_file_range(inode, offset, OBD_OBJECT_EOF,
5292 retval = ll_lseek(file, offset, origin);
5295 retval = vfs_setpos(file, retval, ll_file_maxbytes(inode));
5297 retval = generic_file_llseek_size(file, offset, origin,
5298 ll_file_maxbytes(inode), eof);
5301 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK,
5302 ktime_us_delta(ktime_get(), kstart));
5303 CDEBUG(D_VFSTRACE|D_IOTRACE,
5304 "COMPLETED file "DNAME":"DFID", offset: %lld, type: %s, rc = %lld\n",
5305 encode_fn_file(file), PFID(ll_inode2fid(file_inode(file))),
5306 offset, ll_seek_names[origin], retval);
5311 static int ll_flush(struct file *file, fl_owner_t id)
5313 struct inode *inode = file_inode(file);
5314 struct ll_inode_info *lli = ll_i2info(inode);
5315 struct ll_file_data *lfd = file->private_data;
5318 LASSERT(!S_ISDIR(inode->i_mode));
5320 /* catch async errors that were recorded back when async writeback
5321 * failed for pages in this mapping.
5323 rc = lli->lli_async_rc;
5324 lli->lli_async_rc = 0;
5325 if (lli->lli_clob != NULL) {
5326 err = lov_read_and_clear_async_rc(lli->lli_clob);
5331 /* The application has been told write failure already.
5332 * Do not report failure again.
5334 if (lfd->fd_write_failed)
5336 return rc ? -EIO : 0;
5340 * Called to make sure a portion of file has been written out.
5341 * if @mode is not CL_FSYNC_LOCAL, it will send OST_SYNC RPCs to OST.
5343 * Return how many pages have been written.
5345 int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
5346 enum cl_fsync_mode mode, int ignore_layout)
5350 struct cl_fsync_io *fio;
5355 if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL &&
5356 mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL &&
5357 mode != CL_FSYNC_RECLAIM)
5360 env = cl_env_get(&refcheck);
5362 RETURN(PTR_ERR(env));
5364 io = vvp_env_new_io(env);
5365 io->ci_obj = ll_i2info(inode)->lli_clob;
5366 cl_object_get(io->ci_obj);
5367 io->ci_ignore_layout = ignore_layout;
5369 /* initialize parameters for sync */
5370 fio = &io->u.ci_fsync;
5371 fio->fi_start = start;
5373 fio->fi_fid = ll_inode2fid(inode);
5374 fio->fi_mode = mode;
5375 fio->fi_nr_written = 0;
5377 if (cl_io_init(env, io, CIT_FSYNC, io->ci_obj) == 0)
5378 result = cl_io_loop(env, io);
5380 result = io->ci_result;
5382 result = fio->fi_nr_written;
5383 cl_io_fini(env, io);
5384 cl_object_put(env, io->ci_obj);
5385 cl_env_put(env, &refcheck);
5391 * When dentry is provided (the 'else' case), file_dentry() may be
5392 * null and dentry must be used directly rather than pulled from
5393 * file_dentry() as is done otherwise.
5395 int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
5397 struct dentry *dentry = file_dentry(file);
5398 struct inode *inode = dentry->d_inode;
5399 struct ll_inode_info *lli = ll_i2info(inode);
5400 struct ptlrpc_request *req;
5401 ktime_t kstart = ktime_get();
5406 "VFS Op:inode="DFID"(%p), start %lld, end %lld, datasync %d\n",
5407 PFID(ll_inode2fid(inode)), inode, start, end, datasync);
5409 /* fsync's caller has already called _fdata{sync,write}, we want
5410 * that IO to finish before calling the osc and mdc sync methods
5412 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
5414 /* catch async errors that were recorded back when async writeback
5415 * failed for pages in this mapping.
5417 if (!S_ISDIR(inode->i_mode)) {
5418 err = lli->lli_async_rc;
5419 lli->lli_async_rc = 0;
5422 if (lli->lli_clob != NULL) {
5423 err = lov_read_and_clear_async_rc(lli->lli_clob);
5429 if (S_ISREG(inode->i_mode) && !lli->lli_synced_to_mds) {
5431 * only the first sync on MDS makes sense,
5432 * everything else is stored on OSTs
5434 err = md_fsync(ll_i2sbi(inode)->ll_md_exp,
5435 ll_inode2fid(inode), &req);
5439 lli->lli_synced_to_mds = true;
5440 ptlrpc_req_put(req);
5444 if (S_ISREG(inode->i_mode)) {
5445 struct ll_file_data *lfd = file->private_data;
5448 /* Sync metadata on MDT first, and then sync the cached data
5451 err = pcc_fsync(file, start, end, datasync, &cached);
5453 err = cl_sync_file_range(inode, start, end,
5455 if (rc == 0 && err < 0)
5458 lfd->fd_write_failed = true;
5460 lfd->fd_write_failed = false;
5464 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC,
5465 ktime_us_delta(ktime_get(), kstart));
5469 static int ll_file_flc2policy(struct file_lock *file_lock, int cmd,
5470 union ldlm_policy_data *flock)
5474 if (file_lock->C_FLC_FLAGS & FL_FLOCK) {
5475 LASSERT((cmd == F_SETLKW) || (cmd == F_SETLK));
5476 /* flocks are whole-file locks */
5477 flock->l_flock.end = OFFSET_MAX;
5478 /* For flocks owner is determined by the local file desctiptor*/
5479 flock->l_flock.owner = (unsigned long)file_lock->C_FLC_FILE;
5480 } else if (file_lock->C_FLC_FLAGS & FL_POSIX) {
5481 flock->l_flock.owner = (unsigned long)file_lock->C_FLC_OWNER;
5482 flock->l_flock.start = file_lock->fl_start;
5483 flock->l_flock.end = file_lock->fl_end;
5487 flock->l_flock.pid = file_lock->C_FLC_PID;
5489 #if defined(HAVE_LM_COMPARE_OWNER) || defined(lm_compare_owner)
5490 /* Somewhat ugly workaround for svc lockd.
5491 * lockd installs custom fl_lmops->lm_compare_owner that checks
5492 * for the fl_owner to be the same (which it always is on local node
5493 * I guess between lockd processes) and then compares pid.
5494 * As such we assign pid to the owner field to make it all work,
5495 * conflict with normal locks is unlikely since pid space and
5496 * pointer space for current->files are not intersecting
5498 if (file_lock->fl_lmops && file_lock->fl_lmops->lm_compare_owner)
5499 flock->l_flock.owner = (unsigned long)file_lock->C_FLC_PID;
5505 static int ll_file_flock_lock(struct file *file, struct file_lock *file_lock)
5509 /* We don't need to sleep on conflicting locks.
5510 * It is called in following usecases :
5511 * 1. adding new lock - no conflicts exist as it is already granted
5513 * 2. unlock - never conflicts with anything.
5515 file_lock->C_FLC_FLAGS &= ~FL_SLEEP;
5516 #ifdef HAVE_LOCKS_LOCK_FILE_WAIT
5517 rc = locks_lock_file_wait(file, file_lock);
5519 if (file_lock->C_FLC_FLAGS & FL_FLOCK)
5520 rc = flock_lock_file_wait(file, file_lock);
5521 else if (file_lock->C_FLC_FLAGS & FL_POSIX)
5522 rc = posix_lock_file(file, file_lock, NULL);
5523 #endif /* HAVE_LOCKS_LOCK_FILE_WAIT */
5525 CDEBUG_LIMIT(rc == -ENOENT ? D_DLMTRACE : D_ERROR,
5526 "kernel lock failed: rc = %d\n", rc);
5531 static int ll_flock_upcall(void *cookie, int err);
5533 ll_flock_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data);
5535 static int ll_file_flock_async_unlock(struct inode *inode,
5536 struct file_lock *file_lock)
5538 struct ll_sb_info *sbi = ll_i2sbi(inode);
5539 struct ldlm_enqueue_info einfo = { .ei_type = LDLM_FLOCK,
5541 ll_flock_completion_ast_async,
5543 .ei_cbdata = NULL };
5544 union ldlm_policy_data flock = { {0} };
5545 struct md_op_data *op_data;
5549 rc = ll_file_flc2policy(file_lock, F_SETLK, &flock);
5553 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
5554 LUSTRE_OPC_ANY, NULL);
5555 if (IS_ERR(op_data))
5556 RETURN(PTR_ERR(op_data));
5558 rc = md_enqueue_async(sbi->ll_md_exp, &einfo, ll_flock_upcall,
5559 op_data, &flock, 0);
5561 ll_finish_md_op_data(op_data);
5566 /* This function is called only once after ldlm callback. Args are already
5567 * detached from lock. So, locking isn't needed.
5568 * It should only report lock status to kernel.
5570 static void ll_file_flock_async_cb(struct ldlm_flock_info *args)
5572 struct file_lock *file_lock = args->fa_fl;
5573 struct file_lock *flc = &args->fa_flc;
5574 struct file *file = args->fa_file;
5575 struct inode *inode = file->f_path.dentry->d_inode;
5576 int err = args->fa_err;
5580 CDEBUG(D_INFO, "err=%d file_lock=%p file=%p start=%llu end=%llu\n",
5581 err, file_lock, file, flc->fl_start, flc->fl_end);
5583 /* The kernel is responsible for resolving grant vs F_CANCELK or
5584 * grant vs. cleanup races, it may happen that CANCELED flag
5585 * isn't set and err == 0, because f_CANCELK/cleanup happens between
5586 * ldlm_flock_completion_ast_async() and ll_flock_run_flock_cb().
5587 * In this case notify() returns error for already canceled flock.
5589 if (!(args->fa_flags & FA_FL_CANCELED)) {
5590 struct file_lock notify_lock;
5592 locks_init_lock(¬ify_lock);
5593 locks_copy_lock(¬ify_lock, flc);
5596 ll_file_flock_lock(file, flc);
5598 wait_event_idle(args->fa_waitq, args->fa_ready);
5600 #ifdef HAVE_LM_GRANT_2ARGS
5601 rc = args->fa_notify(¬ify_lock, err);
5603 rc = args->fa_notify(¬ify_lock, NULL, err);
5606 CDEBUG_LIMIT(D_ERROR,
5607 "notify failed file_lock=%p err=%d\n",
5610 flc->C_FLC_TYPE = F_UNLCK;
5611 ll_file_flock_lock(file, flc);
5612 ll_file_flock_async_unlock(inode, flc);
5622 static void ll_flock_run_flock_cb(struct ldlm_flock_info *args)
5625 ll_file_flock_async_cb(args);
5630 static int ll_flock_upcall(void *cookie, int err)
5632 struct ldlm_flock_info *args;
5633 struct ldlm_lock *lock = cookie;
5636 CERROR("ldlm_cli_enqueue_fini lock=%p : rc = %d\n", lock, err);
5638 lock_res_and_lock(lock);
5639 args = lock->l_ast_data;
5640 lock->l_ast_data = NULL;
5641 unlock_res_and_lock(lock);
5645 ll_flock_run_flock_cb(args);
5652 ll_flock_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data)
5654 struct ldlm_flock_info *args;
5658 args = ldlm_flock_completion_ast_async(lock, flags, data);
5659 if (args && args->fa_flags & FA_FL_CANCELED) {
5660 /* lock was cancelled in a race */
5661 struct inode *inode = args->fa_file->f_path.dentry->d_inode;
5663 ll_file_flock_async_unlock(inode, &args->fa_flc);
5666 ll_flock_run_flock_cb(args);
5672 ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
5674 struct inode *inode = file_inode(file);
5675 struct ll_sb_info *sbi = ll_i2sbi(inode);
5676 struct ldlm_enqueue_info einfo = {
5677 .ei_type = LDLM_FLOCK,
5678 .ei_cb_cp = ldlm_flock_completion_ast,
5681 struct md_op_data *op_data;
5682 struct lustre_handle lockh = { 0 };
5683 union ldlm_policy_data flock = { { 0 } };
5684 int fl_type = file_lock->C_FLC_TYPE;
5685 ktime_t kstart = ktime_get();
5687 struct ldlm_flock_info *cb_data = NULL;
5691 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID" file_lock=%p\n",
5692 PFID(ll_inode2fid(inode)), file_lock);
5694 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK, 1);
5696 rc = ll_file_flc2policy(file_lock, cmd, &flock);
5702 einfo.ei_mode = LCK_PR;
5705 /* An unlock request may or may not have any relation to
5706 * existing locks so we may not be able to pass a lock handle
5707 * via a normal ldlm_lock_cancel() request. The request may even
5708 * unlock a byte range in the middle of an existing lock. In
5709 * order to process an unlock request we need all of the same
5710 * information that is given with a normal read or write record
5711 * lock request. To avoid creating another ldlm unlock (cancel)
5712 * message we'll treat a LCK_NL flock request as an unlock.
5714 einfo.ei_mode = LCK_NL;
5717 einfo.ei_mode = LCK_PW;
5721 CERROR("%s: fcntl from '%s' unknown lock type=%d: rc = %d\n",
5722 sbi->ll_fsname, current->comm, fl_type, rc);
5737 flags = LDLM_FL_BLOCK_NOWAIT;
5743 flags = LDLM_FL_TEST_LOCK;
5746 CDEBUG(D_DLMTRACE, "F_CANCELLK owner=%llx %llu-%llu\n",
5747 flock.l_flock.owner, flock.l_flock.start,
5749 file_lock->C_FLC_TYPE = F_UNLCK;
5750 einfo.ei_mode = LCK_NL;
5754 CERROR("%s: fcntl from '%s' unknown lock command=%d: rc = %d\n",
5755 sbi->ll_fsname, current->comm, cmd, rc);
5760 "inode="DFID", pid=%u, owner=%#llx, flags=%#llx, mode=%u, start=%llu, end=%llu\n",
5761 PFID(ll_inode2fid(inode)), flock.l_flock.pid,
5762 flock.l_flock.owner, flags, einfo.ei_mode,
5763 flock.l_flock.start, flock.l_flock.end);
5765 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
5766 LUSTRE_OPC_ANY, NULL);
5767 if (IS_ERR(op_data))
5768 RETURN(PTR_ERR(op_data));
5770 OBD_ALLOC_PTR(cb_data);
5772 GOTO(out, rc = -ENOMEM);
5774 cb_data->fa_file = file;
5775 cb_data->fa_fl = file_lock;
5776 cb_data->fa_mode = einfo.ei_mode;
5777 init_waitqueue_head(&cb_data->fa_waitq);
5778 locks_init_lock(&cb_data->fa_flc);
5779 locks_copy_lock(&cb_data->fa_flc, file_lock);
5780 if (cmd == F_CANCELLK)
5781 cb_data->fa_flags |= FA_FL_CANCEL_RQST;
5782 einfo.ei_cbdata = cb_data;
5784 if (file_lock->fl_lmops && file_lock->fl_lmops->lm_grant &&
5785 file_lock->C_FLC_TYPE != F_UNLCK &&
5786 flags == LDLM_FL_BLOCK_NOWAIT /* F_SETLK/F_SETLK64 */) {
5788 cb_data->fa_notify = file_lock->fl_lmops->lm_grant;
5789 flags = (file_lock->C_FLC_FLAGS & FL_SLEEP) ?
5790 0 : LDLM_FL_BLOCK_NOWAIT;
5791 einfo.ei_cb_cp = ll_flock_completion_ast_async;
5794 rc = md_enqueue_async(sbi->ll_md_exp, &einfo,
5795 ll_flock_upcall, op_data, &flock, flags);
5798 OBD_FREE_PTR(cb_data);
5801 rc = FILE_LOCK_DEFERRED;
5804 if (file_lock->C_FLC_TYPE == F_UNLCK &&
5805 flags != LDLM_FL_TEST_LOCK) {
5806 /* We unlock kernel lock before ldlm one to avoid race
5807 * with reordering of unlock & lock responses from
5810 cb_data->fa_flc.C_FLC_FLAGS |= FL_EXISTS;
5811 rc = ll_file_flock_lock(file, &cb_data->fa_flc);
5813 if (rc == -ENOENT) {
5814 if (!(file_lock->C_FLC_TYPE &
5818 CDEBUG_LIMIT(D_ERROR,
5819 "local unlock failed rc=%d\n",
5822 OBD_FREE_PTR(cb_data);
5828 rc = md_enqueue(sbi->ll_md_exp, &einfo, &flock, op_data,
5832 if (!rc && file_lock->C_FLC_TYPE != F_UNLCK &&
5833 !(flags & LDLM_FL_TEST_LOCK)) {
5836 rc2 = ll_file_flock_lock(file, file_lock);
5839 einfo.ei_mode = LCK_NL;
5840 cb_data->fa_mode = einfo.ei_mode;
5841 md_enqueue(sbi->ll_md_exp, &einfo, &flock,
5842 op_data, &lockh, flags);
5846 OBD_FREE_PTR(cb_data);
5850 ll_finish_md_op_data(op_data);
5852 cb_data->fa_ready = 1;
5853 wake_up(&cb_data->fa_waitq);
5856 if (rc == 0 && (flags & LDLM_FL_TEST_LOCK) &&
5857 file_lock->C_FLC_TYPE != F_UNLCK) {
5858 struct file_lock flbuf = { .fl_ops = NULL, };
5859 /* The parallel-scale-nfs test_2 checks this line */
5860 char __maybe_unused *str = "Invoke locks_copy_lock for NFSv3";
5862 /* Take a extra reference for lockowner while
5863 * working with lockd.
5865 locks_copy_lock(&flbuf, file_lock);
5869 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK,
5870 ktime_us_delta(ktime_get(), kstart));
5874 int ll_get_fid_by_name(struct inode *parent, const char *name,
5875 int namelen, struct lu_fid *fid,
5876 struct inode **inode)
5878 struct md_op_data *op_data = NULL;
5879 struct mdt_body *body;
5880 struct ptlrpc_request *req;
5884 op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen, 0,
5885 LUSTRE_OPC_ANY, NULL);
5886 if (IS_ERR(op_data))
5887 RETURN(PTR_ERR(op_data));
5889 op_data->op_valid = OBD_MD_FLID | OBD_MD_FLTYPE;
5890 rc = md_getattr_name(ll_i2sbi(parent)->ll_md_exp, op_data, &req);
5891 ll_finish_md_op_data(op_data);
5895 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
5897 GOTO(out_req, rc = -EFAULT);
5899 *fid = body->mbo_fid1;
5902 rc = ll_prep_inode(inode, &req->rq_pill, parent->i_sb, NULL);
5904 ptlrpc_req_put(req);
5908 int ll_migrate(struct inode *parent, struct file *file, struct lmv_user_md *lum,
5909 const char *name, __u32 flags)
5911 struct dentry *dchild = NULL;
5912 struct inode *child_inode = NULL;
5913 struct md_op_data *op_data;
5914 struct ptlrpc_request *request = NULL;
5915 struct obd_client_handle *och = NULL;
5917 struct mdt_body *body;
5918 __u64 data_version = 0;
5919 size_t namelen = strlen(name);
5920 int lumlen = lmv_user_md_size(lum->lum_stripe_count, lum->lum_magic);
5921 bool locked = false;
5925 CDEBUG(D_VFSTRACE, "migrate "DFID"/%s to MDT%04x stripe count %d\n",
5926 PFID(ll_inode2fid(parent)), encode_fn_len(name, namelen),
5927 lum->lum_stripe_offset, lum->lum_stripe_count);
5929 if (lum->lum_magic != cpu_to_le32(LMV_USER_MAGIC) &&
5930 lum->lum_magic != cpu_to_le32(LMV_USER_MAGIC_SPECIFIC))
5931 lustre_swab_lmv_user_md(lum);
5933 /* Get child FID first */
5934 qstr.hash = ll_full_name_hash(file_dentry(file), name, namelen);
5937 dchild = d_lookup(file_dentry(file), &qstr);
5939 if (dchild->d_inode)
5940 child_inode = igrab(dchild->d_inode);
5945 rc = ll_get_fid_by_name(parent, name, namelen, NULL,
5954 if (!(exp_connect_flags2(ll_i2sbi(parent)->ll_md_exp) &
5955 OBD_CONNECT2_DIR_MIGRATE)) {
5956 if (le32_to_cpu(lum->lum_stripe_count) > 1 ||
5957 ll_dir_striped(child_inode)) {
5959 CERROR("%s: MDT doesn't support stripe directory migration!: rc = %d\n",
5960 ll_i2sbi(parent)->ll_fsname, rc);
5966 * lfs migrate command needs to be blocked on the client
5967 * by checking the migrate FID against the FID of the
5970 if (is_root_inode(child_inode))
5971 GOTO(out_iput, rc = -EINVAL);
5974 * setxattr() used for finishing the dir migration, has the same
5975 * capability check for updating attributes in "trusted" namespace.
5977 if (!capable(CAP_SYS_ADMIN))
5978 GOTO(out_iput, rc = -EPERM);
5980 op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen,
5981 child_inode->i_mode, LUSTRE_OPC_ANY, NULL);
5982 if (IS_ERR(op_data))
5983 GOTO(out_iput, rc = PTR_ERR(op_data));
5985 op_data->op_fid3 = *ll_inode2fid(child_inode);
5986 if (!fid_is_sane(&op_data->op_fid3)) {
5988 CERROR("%s: migrate %s, but FID "DFID" is insane: rc = %d\n",
5989 ll_i2sbi(parent)->ll_fsname, name,
5990 PFID(&op_data->op_fid3), rc);
5994 op_data->op_cli_flags |= CLI_MIGRATE | CLI_SET_MEA;
5995 op_data->op_data = lum;
5996 op_data->op_data_size = lumlen;
5998 /* migrate dirent only for subdirs if MDS_MIGRATE_NSONLY set */
5999 if (S_ISDIR(child_inode->i_mode) && (flags & MDS_MIGRATE_NSONLY) &&
6000 lmv_dir_layout_changing(op_data->op_lso1))
6001 op_data->op_bias |= MDS_MIGRATE_NSONLY;
6004 if (S_ISREG(child_inode->i_mode)) {
6005 och = ll_lease_open(child_inode, NULL, FMODE_WRITE, 0);
6012 rc = ll_data_version(child_inode, &data_version,
6015 GOTO(out_close, rc);
6017 op_data->op_open_handle = och->och_open_handle;
6018 op_data->op_data_version = data_version;
6019 op_data->op_lease_handle = och->och_lease_handle;
6020 op_data->op_bias |= MDS_CLOSE_MIGRATE;
6022 spin_lock(&och->och_mod->mod_open_req->rq_lock);
6023 och->och_mod->mod_open_req->rq_replay = 0;
6024 spin_unlock(&och->och_mod->mod_open_req->rq_lock);
6026 LASSERT(locked == false);
6027 ll_inode_lock(child_inode);
6030 rc = md_rename(ll_i2sbi(parent)->ll_md_exp, op_data,
6031 op_data->op_name, op_data->op_namelen,
6032 op_data->op_name, op_data->op_namelen, &request);
6034 LASSERT(request != NULL);
6035 ll_update_times(request, parent);
6038 if (rc == 0 || rc == -EAGAIN) {
6039 body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
6040 LASSERT(body != NULL);
6042 /* If the server does release layout lock, then we cleanup
6043 * the client och here, otherwise release it in out_close:
6045 if (och && body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED) {
6046 obd_mod_put(och->och_mod);
6047 md_clear_open_replay_data(ll_i2sbi(parent)->ll_md_exp,
6049 och->och_open_handle.cookie = DEAD_HANDLE_MAGIC;
6055 if (request != NULL) {
6056 ptlrpc_req_put(request);
6060 /* Try again if the lease has cancelled. */
6061 if (rc == -EAGAIN && S_ISREG(child_inode->i_mode)) {
6062 LASSERT(locked == true);
6063 ll_inode_unlock(child_inode);
6070 ll_lease_close(och, child_inode, NULL);
6072 clear_nlink(child_inode);
6074 ll_finish_md_op_data(op_data);
6077 ll_inode_unlock(child_inode);
6083 ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock)
6085 struct ll_file_data *lfd = file->private_data;
6089 * In order to avoid flood of warning messages, only print one message
6090 * for one file. And the entire message rate on the client is limited
6091 * by CDEBUG_LIMIT too.
6093 if (!(lfd->lfd_file_flags & LL_FILE_FLOCK_WARNING)) {
6094 lfd->lfd_file_flags |= LL_FILE_FLOCK_WARNING;
6095 CDEBUG_LIMIT(D_CONSOLE,
6096 "flock disabled, mount with '-o [local]flock' to enable\r\n");
6102 * test if some locks matching bits and l_req_mode are acquired
6103 * - bits can be in different locks
6104 * - if found clear the common lock bits in *bits
6105 * - the bits not found, are kept in *bits
6107 * \param bits [IN] searched lock bits [IN]
6108 * \param l_req_mode [IN] searched lock mode
6109 * \param match_flags [IN] match flags
6110 * \retval boolean, true iff all bits are found
6112 int ll_have_md_lock(struct obd_export *exp, struct inode *inode,
6113 enum mds_ibits_locks *bits, enum ldlm_mode l_req_mode,
6114 enum ldlm_match_flags match_flags)
6116 struct lustre_handle lockh;
6117 union ldlm_policy_data policy;
6118 enum ldlm_mode mode = (l_req_mode == LCK_MODE_MIN) ?
6119 (LCK_CR | LCK_CW | LCK_PR | LCK_PW) : l_req_mode;
6128 fid = &ll_i2info(inode)->lli_fid;
6129 CDEBUG(D_INFO, "trying to match res "DFID" mode %s\n", PFID(fid),
6130 ldlm_lockname[mode]);
6132 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING | LDLM_FL_TEST_LOCK;
6133 for (i = 0; i < MDS_INODELOCK_NUMBITS && *bits != 0; i++) {
6134 policy.l_inodebits.bits = *bits & BIT(i);
6135 if (policy.l_inodebits.bits == MDS_INODELOCK_NONE)
6138 if (md_lock_match(exp, flags, fid, LDLM_IBITS, &policy, mode,
6139 match_flags, &lockh)) {
6140 struct ldlm_lock *lock;
6142 lock = ldlm_handle2lock(&lockh);
6145 ~(lock->l_policy_data.l_inodebits.bits);
6146 ldlm_lock_put(lock);
6148 *bits &= ~policy.l_inodebits.bits;
6155 enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
6156 struct lustre_handle *lockh, __u64 flags,
6157 enum ldlm_mode mode)
6159 union ldlm_policy_data policy = { .l_inodebits = { bits } };
6164 fid = &ll_i2info(inode)->lli_fid;
6165 CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
6167 rc = md_lock_match(ll_i2mdexp(inode), LDLM_FL_BLOCK_GRANTED|flags,
6168 fid, LDLM_IBITS, &policy, mode, 0, lockh);
6173 static int ll_inode_revalidate_fini(struct inode *inode, int rc)
6175 /* Already unlinked. Just update nlink and return success */
6176 if (rc == -ENOENT) {
6178 /* If it is striped directory, and there is bad stripe
6179 * Let's revalidate the dentry again, instead of returning
6182 if (ll_dir_striped(inode))
6185 /* This path cannot be hit for regular files unless in
6186 * case of obscure races, so no need to to validate
6189 if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
6191 } else if (rc != 0) {
6192 CDEBUG_LIMIT((rc == -EACCES || rc == -EIDRM) ? D_INFO : D_ERROR,
6193 "%s: revalidate FID "DFID" error: rc = %d\n",
6194 ll_i2sbi(inode)->ll_fsname,
6195 PFID(ll_inode2fid(inode)), rc);
6201 static int ll_inode_revalidate(struct dentry *dentry, enum ldlm_intent_flags op)
6203 struct dentry *parent = NULL;
6205 struct inode *inode = dentry->d_inode;
6206 struct obd_export *exp = ll_i2mdexp(inode);
6207 struct lookup_intent oit = {
6210 struct ptlrpc_request *req = NULL;
6211 struct md_op_data *op_data;
6212 const char *name = NULL;
6218 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p),name="DNAME"\n",
6219 PFID(ll_inode2fid(inode)), inode, encode_fn_dentry(dentry));
6221 /* Call getattr by fid */
6222 if ((exp_connect_flags2(exp) & OBD_CONNECT2_GETATTR_PFID) &&
6223 !d_lustre_invalid(dentry)) {
6224 flags = MF_GETATTR_BY_FID;
6225 parent = dget_parent(dentry);
6226 dir = d_inode(parent);
6227 name = dentry->d_name.name;
6228 namelen = dentry->d_name.len;
6233 op_data = ll_prep_md_op_data(NULL, dir, inode, name, namelen, 0,
6234 LUSTRE_OPC_ANY, NULL);
6237 if (IS_ERR(op_data))
6238 RETURN(PTR_ERR(op_data));
6240 op_data->op_flags |= flags;
6241 rc = ll_intent_lock(exp, op_data, &oit, &req,
6242 &ll_md_blocking_ast, 0, true);
6243 ll_finish_md_op_data(op_data);
6245 rc = ll_inode_revalidate_fini(inode, rc);
6249 rc = ll_revalidate_it_finish(req, &oit, dentry);
6251 ll_intent_release(&oit);
6255 /* Unlinked? Unhash dentry, so it is not picked up later by
6256 * do_lookup() -> ll_revalidate_it(). We cannot use d_drop here to
6257 * preserve get_cwd functionality on 2.6. Bug 10503
6259 if (!dentry->d_inode->i_nlink)
6260 d_lustre_invalidate(dentry);
6262 ll_lookup_finish_locks(&oit, dentry);
6264 ptlrpc_req_put(req);
6269 static int ll_merge_md_attr(struct inode *inode)
6271 struct ll_inode_info *lli = ll_i2info(inode);
6272 struct lmv_stripe_object *lsm_obj;
6273 struct cl_attr attr = { 0 };
6276 if (!ll_dir_striped(inode))
6279 down_read(&lli->lli_lsm_sem);
6280 if (!ll_dir_striped_locked(inode)) {
6281 up_read(&lli->lli_lsm_sem);
6284 LASSERT(lli->lli_lsm_obj != NULL);
6286 lsm_obj = lmv_stripe_object_get(lli->lli_lsm_obj);
6287 up_read(&lli->lli_lsm_sem);
6289 rc = md_merge_attr(ll_i2mdexp(inode), lsm_obj,
6290 &attr, ll_md_blocking_ast);
6291 lmv_stripe_object_put(&lsm_obj);
6295 spin_lock(&inode->i_lock);
6296 set_nlink(inode, attr.cat_nlink);
6297 spin_unlock(&inode->i_lock);
6299 inode->i_blocks = attr.cat_blocks;
6300 i_size_write(inode, attr.cat_size);
6302 ll_i2info(inode)->lli_atime = attr.cat_atime;
6303 ll_i2info(inode)->lli_mtime = attr.cat_mtime;
6304 ll_i2info(inode)->lli_ctime = attr.cat_ctime;
6309 int ll_getattr_dentry(struct dentry *de, struct kstat *stat, u32 request_mask,
6310 unsigned int flags, bool foreign)
6312 struct inode *inode = de->d_inode;
6313 struct ll_sb_info *sbi = ll_i2sbi(inode);
6314 struct ll_inode_info *lli = ll_i2info(inode);
6315 struct dentry *parent;
6317 bool need_glimpse = true;
6318 ktime_t kstart = ktime_get();
6321 CDEBUG(D_VFSTRACE|D_IOTRACE,
6322 "START file "DNAME":"DFID"(%p), request_mask %d, flags %u, foreign %d\n",
6323 encode_fn_dentry(de), PFID(ll_inode2fid(inode)), inode,
6324 request_mask, flags, foreign);
6326 /* The OST object(s) determine the file size, blocks and mtime. */
6327 if (!(request_mask & STATX_SIZE || request_mask & STATX_BLOCKS ||
6328 request_mask & STATX_MTIME))
6329 need_glimpse = false;
6331 parent = dget_parent(de);
6332 dir = d_inode(parent);
6333 ll_statahead_enter(dir, de);
6334 if (dentry_may_statahead(dir, de))
6335 ll_start_statahead(dir, de, need_glimpse &&
6336 !(flags & AT_STATX_DONT_SYNC));
6339 if (flags & AT_STATX_DONT_SYNC)
6340 GOTO(fill_attr, rc = 0);
6342 rc = ll_inode_revalidate(de, IT_GETATTR);
6346 /* foreign file/dir are always of zero length, so don't
6347 * need to validate size.
6349 if (S_ISREG(inode->i_mode) && !foreign) {
6353 GOTO(fill_attr, rc);
6355 rc = pcc_inode_getattr(inode, request_mask, flags, &cached);
6356 if (cached && rc < 0)
6360 GOTO(fill_attr, rc);
6363 * If the returned attr is masked with OBD_MD_FLSIZE &
6364 * OBD_MD_FLBLOCKS & OBD_MD_FLMTIME, it means that the file size
6365 * or blocks obtained from MDT is strictly correct, and the file
6366 * is usually not being modified by clients, and the [a|m|c]time
6367 * got from MDT is also strictly correct.
6368 * Under this circumstance, it does not need to send glimpse
6369 * RPCs to OSTs for file attributes such as the size and blocks.
6371 if (lli->lli_attr_valid & OBD_MD_FLSIZE &&
6372 lli->lli_attr_valid & OBD_MD_FLBLOCKS &&
6373 lli->lli_attr_valid & OBD_MD_FLMTIME) {
6374 inode_set_mtime(inode, lli->lli_mtime, 0);
6375 if (lli->lli_attr_valid & OBD_MD_FLATIME)
6376 inode_set_atime(inode, lli->lli_atime, 0);
6377 if (lli->lli_attr_valid & OBD_MD_FLCTIME)
6378 inode_set_ctime(inode, lli->lli_ctime, 0);
6379 GOTO(fill_attr, rc);
6382 /* In case of restore, the MDT has the right size and has
6383 * already send it back without granting the layout lock,
6384 * inode is up-to-date so glimpse is useless.
6385 * Also to glimpse we need the layout, in case of a running
6386 * restore the MDT holds the layout lock so the glimpse will
6387 * block up to the end of restore (getattr will block)
6389 if (!test_bit(LLIF_FILE_RESTORING, &lli->lli_flags)) {
6390 rc = ll_glimpse_size(inode);
6395 /* If object isn't regular file then don't validate size.
6396 * foreign dir is not striped dir
6399 rc = ll_merge_md_attr(inode);
6404 if (lli->lli_attr_valid & OBD_MD_FLATIME)
6405 inode_set_atime(inode, lli->lli_atime, 0);
6406 if (lli->lli_attr_valid & OBD_MD_FLMTIME)
6407 inode_set_mtime(inode, lli->lli_mtime, 0);
6408 if (lli->lli_attr_valid & OBD_MD_FLCTIME)
6409 inode_set_ctime(inode, lli->lli_ctime, 0);
6413 CFS_FAIL_TIMEOUT(OBD_FAIL_GETATTR_DELAY, 30);
6415 if (ll_need_32bit_api(sbi)) {
6416 stat->ino = cl_fid_build_ino(&lli->lli_fid, 1);
6417 stat->dev = ll_compat_encode_dev(inode->i_sb->s_dev);
6418 stat->rdev = ll_compat_encode_dev(inode->i_rdev);
6420 stat->ino = inode->i_ino;
6421 stat->dev = inode->i_sb->s_dev;
6422 stat->rdev = inode->i_rdev;
6425 /* foreign symlink to be exposed as a real symlink */
6427 stat->mode = inode->i_mode;
6429 stat->mode = (inode->i_mode & ~S_IFMT) | S_IFLNK;
6431 CFS_FAIL_CHECK_RESET(OBD_FAIL_LLITE_STAT_RACE1,
6432 OBD_FAIL_LLITE_STAT_RACE2);
6433 /* pause to let other stat to do intermediate changes to inode */
6434 CFS_RACE(OBD_FAIL_LLITE_STAT_RACE2);
6437 * ll_merge_attr() (in case of regular files) does not update
6438 * inode's timestamps atomically. Protect against intermediate
6441 if (!S_ISDIR(inode->i_mode))
6442 ll_inode_size_lock(inode);
6443 stat->uid = inode->i_uid;
6444 stat->gid = inode->i_gid;
6445 stat->atime = inode_get_atime(inode);
6446 stat->mtime = inode_get_mtime(inode);
6447 stat->ctime = inode_get_ctime(inode);
6449 /* stat->blksize is used to tell about preferred IO size */
6450 if (sbi->ll_stat_blksize)
6451 stat->blksize = sbi->ll_stat_blksize;
6452 else if (S_ISREG(inode->i_mode))
6453 stat->blksize = min(PTLRPC_MAX_BRW_SIZE,
6454 1U << LL_MAX_BLKSIZE_BITS);
6455 else if (S_ISDIR(inode->i_mode))
6456 stat->blksize = min(MD_MAX_BRW_SIZE,
6457 1U << LL_MAX_BLKSIZE_BITS);
6459 stat->blksize = 1 << inode->i_sb->s_blocksize_bits;
6461 stat->nlink = inode->i_nlink;
6462 stat->size = i_size_read(inode);
6463 stat->blocks = inode->i_blocks;
6465 if (!S_ISDIR(inode->i_mode))
6466 ll_inode_size_unlock(inode);
6468 #if defined(HAVE_USER_NAMESPACE_ARG) || defined(HAVE_INODEOPS_ENHANCED_GETATTR)
6469 if (flags & AT_STATX_DONT_SYNC) {
6470 if (stat->size == 0 &&
6471 lli->lli_attr_valid & OBD_MD_FLLAZYSIZE)
6472 stat->size = lli->lli_lazysize;
6473 if (stat->blocks == 0 &&
6474 lli->lli_attr_valid & OBD_MD_FLLAZYBLOCKS)
6475 stat->blocks = lli->lli_lazyblocks;
6478 if (lli->lli_attr_valid & OBD_MD_FLBTIME) {
6479 stat->result_mask |= STATX_BTIME;
6480 stat->btime.tv_sec = lli->lli_btime;
6483 stat->attributes_mask = STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND;
6484 #ifdef HAVE_LUSTRE_CRYPTO
6485 stat->attributes_mask |= STATX_ATTR_ENCRYPTED;
6487 stat->attributes |= ll_inode_to_ext_flags(inode->i_flags);
6488 /* if Lustre specific LUSTRE_ENCRYPT_FL flag is set, also set
6489 * ext4 equivalent to please statx
6491 if (stat->attributes & LUSTRE_ENCRYPT_FL)
6492 stat->attributes |= STATX_ATTR_ENCRYPTED;
6493 stat->result_mask &= request_mask;
6496 ll_stats_ops_tally(sbi, LPROC_LL_GETATTR,
6497 ktime_us_delta(ktime_get(), kstart));
6500 "COMPLETED file "DNAME":"DFID"(%p), request_mask %d, flags %u, foreign %d\n",
6501 encode_fn_dentry(de), PFID(ll_inode2fid(inode)), inode,
6502 request_mask, flags, foreign);
6507 #if defined(HAVE_USER_NAMESPACE_ARG) || defined(HAVE_INODEOPS_ENHANCED_GETATTR)
6508 int ll_getattr(struct mnt_idmap *map, const struct path *path,
6509 struct kstat *stat, u32 request_mask, unsigned int flags)
6511 return ll_getattr_dentry(path->dentry, stat, request_mask, flags,
6515 int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
6517 return ll_getattr_dentry(de, stat, STATX_BASIC_STATS,
6518 AT_STATX_SYNC_AS_STAT, false);
6522 static int cl_falloc(struct file *file, struct inode *inode, int mode,
6523 loff_t offset, loff_t len)
6525 loff_t size = i_size_read(inode);
6532 env = cl_env_get(&refcheck);
6534 RETURN(PTR_ERR(env));
6536 io = vvp_env_new_io(env);
6537 io->ci_obj = ll_i2info(inode)->lli_clob;
6538 ll_io_set_mirror(io, file);
6540 io->ci_verify_layout = 1;
6541 io->u.ci_setattr.sa_parent_fid = lu_object_fid(&io->ci_obj->co_lu);
6542 io->u.ci_setattr.sa_falloc_mode = mode;
6543 io->u.ci_setattr.sa_falloc_offset = offset;
6544 io->u.ci_setattr.sa_falloc_end = offset + len;
6545 io->u.ci_setattr.sa_subtype = CL_SETATTR_FALLOCATE;
6547 CDEBUG(D_INODE, "UID %u GID %u PRJID %u\n",
6548 from_kuid(&init_user_ns, inode->i_uid),
6549 from_kgid(&init_user_ns, inode->i_gid),
6550 ll_i2info(inode)->lli_projid);
6552 io->u.ci_setattr.sa_attr_uid = from_kuid(&init_user_ns, inode->i_uid);
6553 io->u.ci_setattr.sa_attr_gid = from_kgid(&init_user_ns, inode->i_gid);
6554 io->u.ci_setattr.sa_attr_projid = ll_i2info(inode)->lli_projid;
6556 if (io->u.ci_setattr.sa_falloc_end > size) {
6557 loff_t newsize = io->u.ci_setattr.sa_falloc_end;
6559 /* Check new size against VFS/VM file size limit and rlimit */
6560 rc = inode_newsize_ok(inode, newsize);
6563 if (newsize > ll_file_maxbytes(inode)) {
6564 CDEBUG(D_INODE, "file size too large %llu > %llu\n",
6565 (unsigned long long)newsize,
6566 ll_file_maxbytes(inode));
6573 rc = cl_io_init(env, io, CIT_SETATTR, io->ci_obj);
6575 rc = cl_io_loop(env, io);
6578 cl_io_fini(env, io);
6579 } while (unlikely(io->ci_need_restart));
6582 cl_env_put(env, &refcheck);
6586 static long ll_fallocate(struct file *filp, int mode, loff_t offset, loff_t len)
6588 struct inode *inode = file_inode(filp);
6591 CDEBUG(D_VFSTRACE, "VFS Op: "DNAME", mode %x, offset %lld, len %lld\n",
6592 encode_fn_file(filp), mode, offset, len);
6594 if (offset < 0 || len <= 0)
6597 * Encrypted inodes can't handle collapse range or zero range or insert
6598 * range since we would need to re-encrypt blocks with a different IV or
6599 * XTS tweak (which are based on the logical block number).
6600 * Similar to what ext4 does.
6602 if (IS_ENCRYPTED(inode) &&
6603 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE |
6604 FALLOC_FL_ZERO_RANGE)))
6605 RETURN(-EOPNOTSUPP);
6608 * mode == 0 (which is standard prealloc) and PUNCH/ZERO are supported
6609 * Rest of mode options are not supported yet.
6611 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
6612 FALLOC_FL_ZERO_RANGE))
6613 RETURN(-EOPNOTSUPP);
6615 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FALLOCATE, 1);
6617 rc = cl_falloc(filp, inode, mode, offset, len);
6619 * ENOTSUPP (524) is an NFSv3 specific error code erroneously
6620 * used by Lustre in several places. Retuning it here would
6621 * confuse applications that explicity test for EOPNOTSUPP
6622 * (95) and fall back to ftruncate().
6624 if (rc == -ENOTSUPP)
6630 static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
6631 __u64 start, __u64 len)
6635 struct fiemap *fiemap;
6636 unsigned int extent_count = fieinfo->fi_extents_max;
6638 num_bytes = sizeof(*fiemap) + (extent_count *
6639 sizeof(struct fiemap_extent));
6640 OBD_ALLOC_LARGE(fiemap, num_bytes);
6644 fiemap->fm_flags = fieinfo->fi_flags;
6645 fiemap->fm_extent_count = fieinfo->fi_extents_max;
6646 fiemap->fm_start = start;
6647 fiemap->fm_length = len;
6648 if (extent_count > 0 &&
6649 copy_from_user(&fiemap->fm_extents[0], fieinfo->fi_extents_start,
6650 sizeof(struct fiemap_extent)) != 0)
6651 GOTO(out, rc = -EFAULT);
6653 rc = ll_do_fiemap(inode, fiemap, num_bytes);
6655 if (IS_ENCRYPTED(inode) && extent_count > 0) {
6658 for (i = 0; i < fiemap->fm_mapped_extents; i++)
6659 fiemap->fm_extents[i].fe_flags |=
6660 FIEMAP_EXTENT_DATA_ENCRYPTED |
6661 FIEMAP_EXTENT_ENCODED;
6664 fieinfo->fi_flags = fiemap->fm_flags;
6665 fieinfo->fi_extents_mapped = fiemap->fm_mapped_extents;
6666 if (extent_count > 0 &&
6667 copy_to_user(fieinfo->fi_extents_start, &fiemap->fm_extents[0],
6668 fiemap->fm_mapped_extents *
6669 sizeof(struct fiemap_extent)) != 0)
6670 GOTO(out, rc = -EFAULT);
6672 OBD_FREE_LARGE(fiemap, num_bytes);
6676 int ll_inode_permission(struct mnt_idmap *idmap, struct inode *inode, int mask)
6679 struct ll_sb_info *sbi;
6680 struct root_squash_info *squash;
6681 struct cred *cred = NULL;
6682 const struct cred *old_cred = NULL;
6683 bool squash_id = false;
6684 ktime_t kstart = ktime_get();
6687 if (mask & MAY_NOT_BLOCK)
6691 * as root inode are NOT getting validated in lookup operation,
6692 * need to revalidate PERM before permission check.
6694 if (is_root_inode(inode)) {
6695 rc = ll_inode_revalidate(inode->i_sb->s_root, IT_GETATTR);
6700 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), inode mode %x mask %o\n",
6701 PFID(ll_inode2fid(inode)), inode, inode->i_mode, mask);
6703 /* squash fsuid/fsgid if needed */
6704 sbi = ll_i2sbi(inode);
6705 squash = &sbi->ll_squash;
6706 if (unlikely(squash->rsi_uid != 0 &&
6707 uid_eq(current_fsuid(), GLOBAL_ROOT_UID) &&
6708 !test_bit(LL_SBI_NOROOTSQUASH, sbi->ll_flags))) {
6712 CDEBUG(D_OTHER, "squash creds (%d:%d)=>(%d:%d)\n",
6713 __kuid_val(current_fsuid()), __kgid_val(current_fsgid()),
6714 squash->rsi_uid, squash->rsi_gid);
6716 /* update current process's credentials and FS capability */
6717 cred = prepare_creds();
6721 cred->fsuid = make_kuid(&init_user_ns, squash->rsi_uid);
6722 cred->fsgid = make_kgid(&init_user_ns, squash->rsi_gid);
6723 cred->cap_effective = cap_drop_nfsd_set(cred->cap_effective);
6724 cred->cap_effective = cap_drop_fs_set(cred->cap_effective);
6726 old_cred = override_creds(cred);
6729 rc = generic_permission(idmap, inode, mask);
6730 /* restore current process's credentials and FS capability */
6732 revert_creds(old_cred);
6737 ll_stats_ops_tally(sbi, LPROC_LL_INODE_PERM,
6738 ktime_us_delta(ktime_get(), kstart));
6743 # define ll_splice_read pcc_file_splice_read
6745 /* -o localflock - only provides locally consistent flock locks */
6746 static const struct file_operations ll_file_operations = {
6747 #ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
6748 # ifdef HAVE_SYNC_READ_WRITE
6749 .read = new_sync_read,
6750 .write = new_sync_write,
6752 .read_iter = ll_file_read_iter,
6753 .write_iter = ll_file_write_iter,
6754 #else /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
6755 .read = ll_file_read,
6756 .aio_read = ll_file_aio_read,
6757 .write = ll_file_write,
6758 .aio_write = ll_file_aio_write,
6759 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
6760 .unlocked_ioctl = ll_file_ioctl,
6761 .open = ll_file_open,
6762 .release = ll_file_release,
6763 .mmap = ll_file_mmap,
6764 .llseek = ll_file_seek,
6765 .splice_read = ll_splice_read,
6766 #ifdef HAVE_ITER_FILE_SPLICE_WRITE
6767 .splice_write = iter_file_splice_write,
6771 .fallocate = ll_fallocate,
6774 static const struct file_operations ll_file_operations_flock = {
6775 #ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
6776 # ifdef HAVE_SYNC_READ_WRITE
6777 .read = new_sync_read,
6778 .write = new_sync_write,
6779 # endif /* HAVE_SYNC_READ_WRITE */
6780 .read_iter = ll_file_read_iter,
6781 .write_iter = ll_file_write_iter,
6782 #else /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
6783 .read = ll_file_read,
6784 .aio_read = ll_file_aio_read,
6785 .write = ll_file_write,
6786 .aio_write = ll_file_aio_write,
6787 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
6788 .unlocked_ioctl = ll_file_ioctl,
6789 .open = ll_file_open,
6790 .release = ll_file_release,
6791 .mmap = ll_file_mmap,
6792 .llseek = ll_file_seek,
6793 .splice_read = ll_splice_read,
6794 #ifdef HAVE_ITER_FILE_SPLICE_WRITE
6795 .splice_write = iter_file_splice_write,
6799 .flock = ll_file_flock,
6800 .lock = ll_file_flock,
6801 .fallocate = ll_fallocate,
6804 /* These are for -o noflock - to return ENOSYS on flock calls */
6805 static const struct file_operations ll_file_operations_noflock = {
6806 #ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
6807 # ifdef HAVE_SYNC_READ_WRITE
6808 .read = new_sync_read,
6809 .write = new_sync_write,
6810 # endif /* HAVE_SYNC_READ_WRITE */
6811 .read_iter = ll_file_read_iter,
6812 .write_iter = ll_file_write_iter,
6813 #else /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
6814 .read = ll_file_read,
6815 .aio_read = ll_file_aio_read,
6816 .write = ll_file_write,
6817 .aio_write = ll_file_aio_write,
6818 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
6819 .unlocked_ioctl = ll_file_ioctl,
6820 .open = ll_file_open,
6821 .release = ll_file_release,
6822 .mmap = ll_file_mmap,
6823 .llseek = ll_file_seek,
6824 .splice_read = ll_splice_read,
6825 #ifdef HAVE_ITER_FILE_SPLICE_WRITE
6826 .splice_write = iter_file_splice_write,
6830 .flock = ll_file_noflock,
6831 .lock = ll_file_noflock,
6832 .fallocate = ll_fallocate,
6835 const struct inode_operations ll_file_inode_operations = {
6836 .setattr = ll_setattr,
6837 .getattr = ll_getattr,
6838 .permission = ll_inode_permission,
6839 #ifdef HAVE_IOP_XATTR
6840 .setxattr = ll_setxattr,
6841 .getxattr = ll_getxattr,
6842 .removexattr = ll_removexattr,
6844 .listxattr = ll_listxattr,
6845 .fiemap = ll_fiemap,
6846 #ifdef HAVE_IOP_GET_INODE_ACL
6847 .get_inode_acl = ll_get_inode_acl,
6849 .get_acl = ll_get_acl,
6850 #ifdef HAVE_IOP_SET_ACL
6851 .set_acl = ll_set_acl,
6853 #ifdef HAVE_FILEATTR_GET
6854 .fileattr_get = ll_fileattr_get,
6855 .fileattr_set = ll_fileattr_set,
6859 const struct file_operations *ll_select_file_operations(struct ll_sb_info *sbi)
6861 const struct file_operations *fops = &ll_file_operations_noflock;
6863 if (test_bit(LL_SBI_FLOCK, sbi->ll_flags))
6864 fops = &ll_file_operations_flock;
6865 else if (test_bit(LL_SBI_LOCALFLOCK, sbi->ll_flags))
6866 fops = &ll_file_operations;
6871 int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
6873 struct ll_inode_info *lli = ll_i2info(inode);
6874 struct cl_object *obj = lli->lli_clob;
6883 env = cl_env_get(&refcheck);
6885 RETURN(PTR_ERR(env));
6887 rc = cl_conf_set(env, lli->lli_clob, conf);
6891 if (conf->coc_opc == OBJECT_CONF_SET) {
6892 struct ldlm_lock *lock = conf->coc_lock;
6893 struct cl_layout cl = {
6897 LASSERT(lock != NULL);
6898 LASSERT(ldlm_has_layout(lock));
6900 /* it can only be allowed to match after layout is
6901 * applied to inode otherwise false layout would be
6902 * seen. Applying layout shoud happen before dropping
6905 ldlm_lock_allow_match(lock);
6907 rc = cl_object_layout_get(env, obj, &cl);
6912 DFID": layout version change: %u -> %u\n",
6913 PFID(&lli->lli_fid), ll_layout_version_get(lli),
6915 ll_layout_version_set(lli, cl.cl_layout_gen);
6919 cl_env_put(env, &refcheck);
6921 RETURN(rc < 0 ? rc : 0);
6924 /* Fetch layout from MDT with getxattr request, if it's not ready yet */
6925 static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
6927 struct ll_sb_info *sbi = ll_i2sbi(inode);
6928 struct ptlrpc_request *req;
6935 CDEBUG(D_INODE, DFID" LVB_READY=%d l_lvb_data=%p l_lvb_len=%u\n",
6936 PFID(ll_inode2fid(inode)), ldlm_is_lvb_ready(lock),
6937 lock->l_lvb_data, lock->l_lvb_len);
6939 if (lock->l_lvb_data != NULL)
6942 /* if layout lock was granted right away, the layout is returned
6943 * within DLM_LVB of dlm reply; otherwise if the lock was ever
6944 * blocked and then granted via completion ast, we have to fetch
6945 * layout here. Please note that we can't use the LVB buffer in
6946 * completion AST because it doesn't have a large enough buffer
6948 rc = ll_get_default_mdsize(sbi, &lmmsize);
6952 rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), OBD_MD_FLXATTR,
6953 XATTR_NAME_LOV, lmmsize, ll_i2projid(inode), &req);
6956 GOTO(out, rc = 0); /* empty layout */
6963 if (lmmsize == 0) /* empty layout */
6966 lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, lmmsize);
6968 GOTO(out, rc = -EFAULT);
6970 OBD_ALLOC_LARGE(lvbdata, lmmsize);
6971 if (lvbdata == NULL)
6972 GOTO(out, rc = -ENOMEM);
6974 memcpy(lvbdata, lmm, lmmsize);
6975 lock_res_and_lock(lock);
6976 if (unlikely(lock->l_lvb_data == NULL)) {
6977 lock->l_lvb_type = LVB_T_LAYOUT;
6978 lock->l_lvb_data = lvbdata;
6979 lock->l_lvb_len = lmmsize;
6982 unlock_res_and_lock(lock);
6985 OBD_FREE_LARGE(lvbdata, lmmsize);
6990 ptlrpc_req_put(req);
6995 * Apply the layout to the inode. Layout lock is held and will be released
6998 static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
6999 struct inode *inode, bool try)
7001 struct ll_inode_info *lli = ll_i2info(inode);
7002 struct ll_sb_info *sbi = ll_i2sbi(inode);
7003 struct ldlm_lock *lock;
7004 struct cl_object_conf conf;
7007 bool wait_layout = false;
7010 LASSERT(lustre_handle_is_used(lockh));
7012 lock = ldlm_handle2lock(lockh);
7013 LASSERT(lock != NULL);
7015 if (!ldlm_has_layout(lock))
7016 GOTO(out, rc = -EAGAIN);
7018 LDLM_DEBUG(lock, "file "DFID"(%p) being reconfigured",
7019 PFID(&lli->lli_fid), inode);
7021 /* in case this is a caching lock and reinstate with new inode */
7022 md_set_lock_data(sbi->ll_md_exp, lockh, inode, NULL);
7024 lock_res_and_lock(lock);
7025 lvb_ready = ldlm_is_lvb_ready(lock);
7026 unlock_res_and_lock(lock);
7028 /* checking lvb_ready is racy but this is okay. The worst case is
7029 * that multi processes may configure the file on the same time.
7034 rc = ll_layout_fetch(inode, lock);
7038 /* for layout lock, lmm is stored in lock's lvb.
7039 * lvb_data is immutable if the lock is held so it's safe to access it
7042 * set layout to file. Unlikely this will fail as old layout was
7045 memset(&conf, 0, sizeof(conf));
7046 conf.coc_opc = OBJECT_CONF_SET;
7047 conf.coc_inode = inode;
7048 conf.coc_lock = lock;
7050 conf.u.coc_layout.lb_buf = lock->l_lvb_data;
7051 conf.u.coc_layout.lb_len = lock->l_lvb_len;
7052 rc = ll_layout_conf(inode, &conf);
7054 /* refresh layout failed, need to wait */
7055 wait_layout = rc == -EBUSY;
7058 ldlm_lock_put(lock);
7059 ldlm_lock_decref(lockh, mode);
7061 /* wait for IO to complete if it's still being used. */
7063 CDEBUG(D_INODE, "%s: "DFID"(%p) wait for layout reconf\n",
7064 sbi->ll_fsname, PFID(&lli->lli_fid), inode);
7066 memset(&conf, 0, sizeof(conf));
7067 conf.coc_opc = OBJECT_CONF_WAIT;
7068 conf.coc_inode = inode;
7069 rc = ll_layout_conf(inode, &conf);
7073 CDEBUG(D_INODE, "%s file="DFID" waiting layout return: %d\n",
7074 sbi->ll_fsname, PFID(&lli->lli_fid), rc);
7077 if (rc == -ERESTARTSYS) {
7080 struct cl_object *obj = lli->lli_clob;
7082 env = cl_env_get(&refcheck);
7084 RETURN(PTR_ERR(env));
7086 CDEBUG(D_INODE, "prune without lock "DFID"\n",
7087 PFID(lu_object_fid(&obj->co_lu)));
7089 trunc_sem_down_write(&lli->lli_trunc_sem);
7090 cl_object_prune(env, obj);
7091 trunc_sem_up_write(&lli->lli_trunc_sem);
7092 cl_env_put(env, &refcheck);
7101 * ll_layout_intent() - Issue layout intent RPC to MDS.
7102 * @inode: file inode
7103 * @intent: layout intent
7109 static int ll_layout_intent(struct inode *inode, struct layout_intent *intent)
7111 struct ll_inode_info *lli = ll_i2info(inode);
7112 struct ll_sb_info *sbi = ll_i2sbi(inode);
7113 struct md_op_data *op_data;
7114 struct lookup_intent it;
7115 struct ptlrpc_request *req;
7119 op_data = ll_prep_md_op_data(NULL, inode, inode, NULL,
7120 0, 0, LUSTRE_OPC_ANY, NULL);
7121 if (IS_ERR(op_data))
7122 RETURN(PTR_ERR(op_data));
7124 op_data->op_data = intent;
7125 op_data->op_data_size = sizeof(*intent);
7127 memset(&it, 0, sizeof(it));
7128 it.it_op = IT_LAYOUT;
7129 if (intent->lai_opc == LAYOUT_INTENT_WRITE ||
7130 intent->lai_opc == LAYOUT_INTENT_TRUNC ||
7131 intent->lai_opc == LAYOUT_INTENT_PCCRO_SET ||
7132 intent->lai_opc == LAYOUT_INTENT_PCCRO_CLEAR)
7133 it.it_open_flags = MDS_FMODE_WRITE;
7135 LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file "DFID"(%p)",
7136 sbi->ll_fsname, PFID(&lli->lli_fid), inode);
7138 rc = ll_intent_lock(sbi->ll_md_exp, op_data, &it, &req,
7139 &ll_md_blocking_ast, 0, true);
7140 if (it.it_request != NULL)
7141 ptlrpc_req_put(it.it_request);
7142 it.it_request = NULL;
7144 ll_finish_md_op_data(op_data);
7146 /* set lock data in case this is a new lock */
7148 ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
7150 ll_intent_drop_lock(&it);
7156 * This function checks if there exists a LAYOUT lock on the client side,
7157 * or enqueues it if it doesn't have one in cache.
7159 * This function will not hold layout lock so it may be revoked any time after
7160 * this function returns. Any operations depend on layout should be redone
7163 * This function should be called before lov_io_init() to get an uptodate
7164 * layout version, the caller should save the version number and after IO
7165 * is finished, this function should be called again to verify that layout
7166 * is not changed during IO time.
7168 int ll_layout_refresh(struct inode *inode, __u32 *gen)
7170 struct ll_inode_info *lli = ll_i2info(inode);
7171 struct ll_sb_info *sbi = ll_i2sbi(inode);
7172 struct lustre_handle lockh;
7173 struct layout_intent intent = {
7174 .lai_opc = LAYOUT_INTENT_ACCESS,
7176 enum ldlm_mode mode;
7181 *gen = ll_layout_version_get(lli);
7182 if (!test_bit(LL_SBI_LAYOUT_LOCK, sbi->ll_flags) ||
7183 *gen != CL_LAYOUT_GEN_NONE)
7187 LASSERT(fid_is_sane(ll_inode2fid(inode)));
7188 LASSERT(S_ISREG(inode->i_mode));
7191 /* mostly layout lock is caching on the local side, so try to
7192 * match it before grabbing layout lock mutex.
7194 mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
7195 LCK_CR | LCK_CW | LCK_PR |
7197 if (mode != 0) { /* hit cached lock */
7198 rc = ll_layout_lock_set(&lockh, mode, inode, try);
7205 /* take layout lock mutex to enqueue layout lock exclusively. */
7206 mutex_lock(&lli->lli_layout_mutex);
7207 rc = ll_layout_intent(inode, &intent);
7208 mutex_unlock(&lli->lli_layout_mutex);
7214 *gen = ll_layout_version_get(lli);
7220 * ll_layout_write_intent() - Issue layout intent RPC indicating where in a file
7221 * an IO is about to write.
7222 * @inode: file inode.
7223 * @opc: type of layout operation being requested
7224 * @ext: write range with start offset of fille in bytes where an IO is
7225 * about to write, and exclusive end offset in bytes.
7231 int ll_layout_write_intent(struct inode *inode, enum layout_intent_opc opc,
7232 struct lu_extent *ext)
7234 struct layout_intent intent = {
7236 .lai_extent.e_start = ext->e_start,
7237 .lai_extent.e_end = ext->e_end,
7242 rc = ll_layout_intent(inode, &intent);
7247 /* This function send a restore request to the MDT */
7248 int ll_layout_restore(struct inode *inode, loff_t offset, __u64 length)
7250 struct ll_inode_info *lli = ll_i2info(inode);
7251 struct hsm_user_request *hur;
7255 len = sizeof(struct hsm_user_request) +
7256 sizeof(struct hsm_user_item);
7257 OBD_ALLOC(hur, len);
7261 hur->hur_request.hr_action = HUA_RESTORE;
7262 hur->hur_request.hr_archive_id = 0;
7263 hur->hur_request.hr_flags = 0;
7264 memcpy(&hur->hur_user_item[0].hui_fid, &ll_i2info(inode)->lli_fid,
7265 sizeof(hur->hur_user_item[0].hui_fid));
7266 hur->hur_user_item[0].hui_extent.offset = offset;
7267 hur->hur_user_item[0].hui_extent.length = length;
7268 hur->hur_request.hr_itemcount = 1;
7269 rc = mutex_lock_interruptible(&lli->lli_layout_mutex);
7272 rc = obd_iocontrol(LL_IOC_HSM_REQUEST, ll_i2sbi(inode)->ll_md_exp,
7274 mutex_unlock(&lli->lli_layout_mutex);