4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
33 * Author: Peter Braam <braam@clusterfs.com>
34 * Author: Phil Schwan <phil@clusterfs.com>
35 * Author: Andreas Dilger <adilger@clusterfs.com>
38 #define DEBUG_SUBSYSTEM S_LLITE
39 #include <lustre_dlm.h>
40 #include <linux/pagemap.h>
41 #include <linux/file.h>
42 #include <linux/sched.h>
43 #include <linux/user_namespace.h>
44 #include <linux/uidgid.h>
45 #include <linux/falloc.h>
46 #include <linux/ktime.h>
47 #ifdef HAVE_LINUX_FILELOCK_HEADER
48 #include <linux/filelock.h>
51 #include <uapi/linux/lustre/lustre_ioctl.h>
52 #include <lustre_swab.h>
53 #include <libcfs/linux/linux-misc.h>
55 #include "cl_object.h"
56 #include "llite_internal.h"
57 #include "vvp_internal.h"
60 struct inode *sp_inode;
65 __u64 pa_data_version;
70 struct swap_layouts_param {
71 struct inode *slp_inode;
77 ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg);
79 static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
82 static struct ll_file_data *ll_file_data_get(void)
84 struct ll_file_data *fd;
86 OBD_SLAB_ALLOC_PTR_GFP(fd, ll_file_data_slab, GFP_NOFS);
90 fd->fd_write_failed = false;
91 pcc_file_init(&fd->fd_pcc_file);
96 static void ll_file_data_put(struct ll_file_data *fd)
99 OBD_SLAB_FREE_PTR(fd, ll_file_data_slab);
103 * Packs all the attributes into @op_data for the CLOSE rpc.
105 static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
106 struct obd_client_handle *och)
110 ll_prep_md_op_data(op_data, inode, NULL, NULL,
111 0, 0, LUSTRE_OPC_ANY, NULL);
113 op_data->op_attr.ia_mode = inode->i_mode;
114 op_data->op_attr.ia_atime = inode_get_atime(inode);
115 op_data->op_attr.ia_mtime = inode_get_mtime(inode);
116 op_data->op_attr.ia_ctime = inode_get_ctime(inode);
117 /* In case of encrypted file without the key, visible size was rounded
118 * up to next LUSTRE_ENCRYPTION_UNIT_SIZE, and clear text size was
119 * stored into lli_lazysize in ll_merge_attr(), so set proper file size
120 * now that we are closing.
122 if (llcrypt_require_key(inode) == -ENOKEY &&
123 ll_i2info(inode)->lli_attr_valid & OBD_MD_FLLAZYSIZE)
124 op_data->op_attr.ia_size = ll_i2info(inode)->lli_lazysize;
126 op_data->op_attr.ia_size = i_size_read(inode);
127 op_data->op_attr.ia_valid |= (ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
128 ATTR_MTIME | ATTR_MTIME_SET |
130 op_data->op_xvalid |= OP_XVALID_CTIME_SET;
131 op_data->op_attr_blocks = inode->i_blocks;
132 op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags);
133 if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags))
134 op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL;
135 op_data->op_open_handle = och->och_open_handle;
137 if (och->och_flags & FMODE_WRITE &&
138 test_and_clear_bit(LLIF_DATA_MODIFIED,
139 &ll_i2info(inode)->lli_flags))
140 /* For HSM: if inode data has been modified, pack it so that
141 * MDT can set data dirty flag in the archive. */
142 op_data->op_bias |= MDS_DATA_MODIFIED;
148 * Perform a close, possibly with a bias.
149 * The meaning of "data" depends on the value of "bias".
151 * If \a bias is MDS_HSM_RELEASE then \a data is a pointer to the data version.
152 * If \a bias is MDS_CLOSE_LAYOUT_SWAP then \a data is a pointer to a
153 * struct swap_layouts_param containing the inode to swap with and the old and
156 static int ll_close_inode_openhandle(struct inode *inode,
157 struct obd_client_handle *och,
158 enum mds_op_bias bias, void *data)
160 struct obd_export *md_exp = ll_i2mdexp(inode);
161 const struct ll_inode_info *lli = ll_i2info(inode);
162 struct md_op_data *op_data;
163 struct ptlrpc_request *req = NULL;
167 if (class_exp2obd(md_exp) == NULL) {
168 CERROR("%s: invalid MDC connection handle closing "DFID"\n",
169 ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
173 OBD_ALLOC_PTR(op_data);
174 /* We leak openhandle and request here on error, but not much to be
175 * done in OOM case since app won't retry close on error either. */
177 GOTO(out, rc = -ENOMEM);
179 ll_prepare_close(inode, op_data, och);
181 case MDS_CLOSE_LAYOUT_MERGE:
182 /* merge blocks from the victim inode */
183 op_data->op_attr_blocks += ((struct inode *)data)->i_blocks;
184 op_data->op_attr.ia_valid |= ATTR_SIZE;
185 op_data->op_xvalid |= OP_XVALID_BLOCKS;
187 case MDS_CLOSE_LAYOUT_SPLIT: {
188 struct split_param *sp = data;
190 LASSERT(data != NULL);
191 op_data->op_bias |= bias;
192 op_data->op_data_version = 0;
193 op_data->op_lease_handle = och->och_lease_handle;
194 if (bias == MDS_CLOSE_LAYOUT_SPLIT) {
195 op_data->op_fid2 = *ll_inode2fid(sp->sp_inode);
196 op_data->op_mirror_id = sp->sp_mirror_id;
197 } else { /* MDS_CLOSE_LAYOUT_MERGE */
198 op_data->op_fid2 = *ll_inode2fid(data);
202 case MDS_CLOSE_LAYOUT_SWAP: {
203 struct swap_layouts_param *slp = data;
205 LASSERT(data != NULL);
206 op_data->op_bias |= (bias | MDS_CLOSE_LAYOUT_SWAP_HSM);
207 op_data->op_lease_handle = och->och_lease_handle;
208 op_data->op_fid2 = *ll_inode2fid(slp->slp_inode);
209 op_data->op_data_version = slp->slp_dv1;
210 op_data->op_data_version2 = slp->slp_dv2;
214 case MDS_CLOSE_RESYNC_DONE: {
215 struct ll_ioc_lease *ioc = data;
217 LASSERT(data != NULL);
218 op_data->op_attr_blocks +=
219 ioc->lil_count * op_data->op_attr_blocks;
220 op_data->op_attr.ia_valid |= ATTR_SIZE;
221 op_data->op_xvalid |= OP_XVALID_BLOCKS;
222 op_data->op_bias |= MDS_CLOSE_RESYNC_DONE;
224 op_data->op_lease_handle = och->och_lease_handle;
225 op_data->op_data = &ioc->lil_ids[0];
226 op_data->op_data_size =
227 ioc->lil_count * sizeof(ioc->lil_ids[0]);
231 case MDS_PCC_ATTACH: {
232 struct pcc_param *param = data;
234 LASSERT(data != NULL);
235 op_data->op_bias |= MDS_HSM_RELEASE | MDS_PCC_ATTACH;
236 op_data->op_archive_id = param->pa_archive_id;
237 op_data->op_data_version = param->pa_data_version;
238 op_data->op_lease_handle = och->och_lease_handle;
242 case MDS_HSM_RELEASE:
243 LASSERT(data != NULL);
244 op_data->op_bias |= MDS_HSM_RELEASE;
245 op_data->op_data_version = *(__u64 *)data;
246 op_data->op_lease_handle = och->och_lease_handle;
247 op_data->op_attr.ia_valid |= ATTR_SIZE;
248 op_data->op_xvalid |= OP_XVALID_BLOCKS;
252 LASSERT(data == NULL);
256 if (!(op_data->op_attr.ia_valid & ATTR_SIZE))
257 op_data->op_xvalid |= OP_XVALID_LAZYSIZE;
258 if (!(op_data->op_xvalid & OP_XVALID_BLOCKS))
259 op_data->op_xvalid |= OP_XVALID_LAZYBLOCKS;
261 rc = md_close(md_exp, op_data, och->och_mod, &req);
262 if (rc != 0 && rc != -EINTR)
263 CERROR("%s: inode "DFID" mdc close failed: rc = %d\n",
264 md_exp->exp_obd->obd_name, PFID(&lli->lli_fid), rc);
266 if (rc == 0 && op_data->op_bias & bias) {
267 struct mdt_body *body;
269 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
270 if (!(body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED))
273 if (bias & MDS_PCC_ATTACH) {
274 struct pcc_param *param = data;
276 param->pa_layout_gen = body->mbo_layout_gen;
280 ll_finish_md_op_data(op_data);
284 md_clear_open_replay_data(md_exp, och);
285 och->och_open_handle.cookie = DEAD_HANDLE_MAGIC;
288 ptlrpc_req_finished(req); /* This is close request */
292 int ll_md_real_close(struct inode *inode, fmode_t fmode)
294 struct ll_inode_info *lli = ll_i2info(inode);
295 struct obd_client_handle **och_p;
296 struct obd_client_handle *och;
301 if (fmode & FMODE_WRITE) {
302 och_p = &lli->lli_mds_write_och;
303 och_usecount = &lli->lli_open_fd_write_count;
304 } else if (fmode & FMODE_EXEC) {
305 och_p = &lli->lli_mds_exec_och;
306 och_usecount = &lli->lli_open_fd_exec_count;
308 LASSERT(fmode & FMODE_READ);
309 och_p = &lli->lli_mds_read_och;
310 och_usecount = &lli->lli_open_fd_read_count;
313 mutex_lock(&lli->lli_och_mutex);
314 if (*och_usecount > 0) {
315 /* There are still users of this handle, so skip
317 mutex_unlock(&lli->lli_och_mutex);
323 mutex_unlock(&lli->lli_och_mutex);
326 /* There might be a race and this handle may already
328 rc = ll_close_inode_openhandle(inode, och, 0, NULL);
334 static int ll_md_close(struct inode *inode, struct file *file)
336 union ldlm_policy_data policy = {
337 .l_inodebits = { MDS_INODELOCK_OPEN },
339 __u64 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
340 struct ll_file_data *fd = file->private_data;
341 struct ll_inode_info *lli = ll_i2info(inode);
342 struct lustre_handle lockh;
343 enum ldlm_mode lockmode;
347 /* clear group lock, if present */
348 if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
349 ll_put_grouplock(inode, file, fd->fd_grouplock.lg_gid);
351 mutex_lock(&lli->lli_och_mutex);
352 if (fd->fd_lease_och != NULL) {
354 struct obd_client_handle *lease_och;
356 lease_och = fd->fd_lease_och;
357 fd->fd_lease_och = NULL;
358 mutex_unlock(&lli->lli_och_mutex);
360 /* Usually the lease is not released when the
361 * application crashed, we need to release here. */
362 rc = ll_lease_close(lease_och, inode, &lease_broken);
364 mutex_lock(&lli->lli_och_mutex);
366 CDEBUG_LIMIT(rc ? D_ERROR : D_INODE,
367 "Clean up lease "DFID" %d/%d\n",
368 PFID(&lli->lli_fid), rc, lease_broken);
371 if (fd->fd_och != NULL) {
372 struct obd_client_handle *och;
376 mutex_unlock(&lli->lli_och_mutex);
378 rc = ll_close_inode_openhandle(inode, och, 0, NULL);
382 /* Let's see if we have good enough OPEN lock on the file and if
383 we can skip talking to MDS */
384 if (fd->fd_omode & FMODE_WRITE) {
386 LASSERT(lli->lli_open_fd_write_count);
387 lli->lli_open_fd_write_count--;
388 } else if (fd->fd_omode & FMODE_EXEC) {
390 LASSERT(lli->lli_open_fd_exec_count);
391 lli->lli_open_fd_exec_count--;
394 LASSERT(lli->lli_open_fd_read_count);
395 lli->lli_open_fd_read_count--;
397 mutex_unlock(&lli->lli_och_mutex);
399 /* LU-4398: do not cache write open lock if the file has exec bit */
400 if ((lockmode == LCK_CW && inode->i_mode & S_IXUGO) ||
401 !md_lock_match(ll_i2mdexp(inode), flags, ll_inode2fid(inode),
402 LDLM_IBITS, &policy, lockmode, &lockh))
403 rc = ll_md_real_close(inode, fd->fd_omode);
406 file->private_data = NULL;
407 ll_file_data_put(fd);
412 /* While this returns an error code, fput() the caller does not, so we need
413 * to make every effort to clean up all of our state here. Also, applications
414 * rarely check close errors and even if an error is returned they will not
415 * re-try the close call.
417 int ll_file_release(struct inode *inode, struct file *file)
419 struct ll_file_data *fd;
420 struct ll_sb_info *sbi = ll_i2sbi(inode);
421 struct ll_inode_info *lli = ll_i2info(inode);
422 ktime_t kstart = ktime_get();
427 CDEBUG(D_VFSTRACE|D_IOTRACE,
428 "START file %s:"DFID"(%p), flags %o\n",
429 file_dentry(file)->d_name.name,
430 PFID(ll_inode2fid(file_inode(file))), inode, file->f_flags);
432 fd = file->private_data;
435 /* The last ref on @file, maybe not the the owner pid of statahead,
436 * because parent and child process can share the same file handle. */
437 if (S_ISDIR(inode->i_mode) &&
438 (lli->lli_opendir_key == fd || fd->fd_sai))
439 ll_deauthorize_statahead(inode, fd);
441 if (is_root_inode(inode)) {
442 file->private_data = NULL;
443 ll_file_data_put(fd);
447 pcc_file_release(inode, file);
449 if (!S_ISDIR(inode->i_mode)) {
450 if (lli->lli_clob != NULL)
451 lov_read_and_clear_async_rc(lli->lli_clob);
452 lli->lli_async_rc = 0;
455 lli->lli_close_fd_time = ktime_get();
457 rc = ll_md_close(inode, file);
459 if (CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_DUMP_LOG, cfs_fail_val))
460 libcfs_debug_dumplog();
463 if (!rc && !is_root_inode(inode))
464 ll_stats_ops_tally(sbi, LPROC_LL_RELEASE,
465 ktime_us_delta(ktime_get(), kstart));
467 "COMPLETED file %s:"DFID"(%p), flags %o, rc = %d\n",
468 file_dentry(file)->d_name.name,
469 PFID(ll_inode2fid(file_inode(file))), inode, file->f_flags,
475 static inline int ll_dom_readpage(void *data, struct page *page)
477 /* since ll_dom_readpage is a page cache helper, it is safe to assume
478 * mapping and host pointers are set here
481 struct niobuf_local *lnb = data;
485 inode = page2inode(page);
487 kaddr = kmap_atomic(page);
488 memcpy(kaddr, lnb->lnb_data, lnb->lnb_len);
489 if (lnb->lnb_len < PAGE_SIZE)
490 memset(kaddr + lnb->lnb_len, 0,
491 PAGE_SIZE - lnb->lnb_len);
492 kunmap_atomic(kaddr);
494 if (inode && IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
495 if (!llcrypt_has_encryption_key(inode)) {
496 CDEBUG(D_SEC, "no enc key for "DFID"\n",
497 PFID(ll_inode2fid(inode)));
500 unsigned int offs = 0;
502 while (offs < PAGE_SIZE) {
503 /* decrypt only if page is not empty */
504 if (memcmp(page_address(page) + offs,
505 page_address(ZERO_PAGE(0)),
506 LUSTRE_ENCRYPTION_UNIT_SIZE) == 0)
509 rc = llcrypt_decrypt_pagecache_blocks(page,
510 LUSTRE_ENCRYPTION_UNIT_SIZE,
515 offs += LUSTRE_ENCRYPTION_UNIT_SIZE;
520 flush_dcache_page(page);
521 SetPageUptodate(page);
528 #ifdef HAVE_READ_CACHE_PAGE_WANTS_FILE
529 static inline int ll_dom_read_folio(struct file *file, struct folio *folio0)
531 return ll_dom_readpage(file->private_data, folio_page(folio0, 0));
534 #define ll_dom_read_folio ll_dom_readpage
537 void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req)
541 struct ll_inode_info *lli = ll_i2info(inode);
542 struct cl_object *obj = lli->lli_clob;
543 struct address_space *mapping = inode->i_mapping;
545 struct niobuf_remote *rnb;
546 struct mdt_body *body;
548 unsigned long index, start;
549 struct niobuf_local lnb;
558 if (!req_capsule_field_present(&req->rq_pill, &RMF_NIOBUF_INLINE,
562 rnb = req_capsule_server_get(&req->rq_pill, &RMF_NIOBUF_INLINE);
563 if (rnb == NULL || rnb->rnb_len == 0)
566 /* LU-11595: Server may return whole file and that is OK always or
567 * it may return just file tail and its offset must be aligned with
568 * client PAGE_SIZE to be used on that client, if server's PAGE_SIZE is
569 * smaller then offset may be not aligned and that data is just ignored.
571 if (rnb->rnb_offset & ~PAGE_MASK)
574 /* Server returns whole file or just file tail if it fills in reply
575 * buffer, in both cases total size should be equal to the file size.
577 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
578 if (rnb->rnb_offset + rnb->rnb_len != body->mbo_dom_size &&
579 !(inode && IS_ENCRYPTED(inode))) {
580 CERROR("%s: server returns off/len %llu/%u but size %llu\n",
581 ll_i2sbi(inode)->ll_fsname, rnb->rnb_offset,
582 rnb->rnb_len, body->mbo_dom_size);
586 env = cl_env_get(&refcheck);
589 io = vvp_env_thread_io(env);
591 rc = cl_io_init(env, io, CIT_MISC, obj);
595 CDEBUG(D_INFO, "Get data along with open at %llu len %i, size %llu\n",
596 rnb->rnb_offset, rnb->rnb_len, body->mbo_dom_size);
598 data = (char *)rnb + sizeof(*rnb);
600 lnb.lnb_file_offset = rnb->rnb_offset;
601 start = lnb.lnb_file_offset >> PAGE_SHIFT;
603 LASSERT((lnb.lnb_file_offset & ~PAGE_MASK) == 0);
604 lnb.lnb_page_offset = 0;
606 struct cl_page *page;
608 lnb.lnb_data = data + (index << PAGE_SHIFT);
609 lnb.lnb_len = rnb->rnb_len - (index << PAGE_SHIFT);
610 if (lnb.lnb_len > PAGE_SIZE)
611 lnb.lnb_len = PAGE_SIZE;
613 vmpage = ll_read_cache_page(mapping, index + start,
614 ll_dom_read_folio, &lnb);
615 if (IS_ERR(vmpage)) {
616 CWARN("%s: cannot fill page %lu for "DFID
617 " with data: rc = %li\n",
618 ll_i2sbi(inode)->ll_fsname, index + start,
619 PFID(lu_object_fid(&obj->co_lu)),
624 if (vmpage->mapping == NULL) {
627 /* page was truncated */
630 /* attach VM page to CL page cache */
631 page = cl_page_find(env, obj, vmpage->index, vmpage,
634 ClearPageUptodate(vmpage);
639 SetPageUptodate(vmpage);
640 cl_page_put(env, page);
644 } while (rnb->rnb_len > (index << PAGE_SHIFT));
648 cl_env_put(env, &refcheck);
653 static int ll_intent_file_open(struct dentry *de, void *lmm, int lmmsize,
654 struct lookup_intent *itp)
656 struct ll_sb_info *sbi = ll_i2sbi(de->d_inode);
657 struct dentry *parent = dget_parent(de);
660 struct md_op_data *op_data;
661 struct ptlrpc_request *req = NULL;
665 LASSERT(parent != NULL);
666 LASSERT(itp->it_open_flags & MDS_OPEN_BY_FID);
668 /* if server supports open-by-fid, or file name is invalid, don't pack
669 * name in open request */
670 if (CFS_FAIL_CHECK(OBD_FAIL_LLITE_OPEN_BY_NAME) ||
671 !(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_OPEN_BY_FID)) {
673 len = de->d_name.len;
674 name = kmalloc(len + 1, GFP_NOFS);
676 GOTO(out_put, rc = -ENOMEM);
679 spin_lock(&de->d_lock);
680 if (len != de->d_name.len) {
681 spin_unlock(&de->d_lock);
685 memcpy(name, de->d_name.name, len);
687 spin_unlock(&de->d_lock);
689 if (!lu_name_is_valid_2(name, len)) {
691 GOTO(out_put, rc = -ESTALE);
695 op_data = ll_prep_md_op_data(NULL, parent->d_inode, de->d_inode,
696 name, len, 0, LUSTRE_OPC_OPEN, NULL);
697 if (IS_ERR(op_data)) {
699 GOTO(out_put, rc = PTR_ERR(op_data));
701 op_data->op_data = lmm;
702 op_data->op_data_size = lmmsize;
704 CFS_FAIL_TIMEOUT(OBD_FAIL_LLITE_OPEN_DELAY, cfs_fail_val);
706 rc = md_intent_lock(sbi->ll_md_exp, op_data, itp, &req,
707 &ll_md_blocking_ast, 0);
709 ll_finish_md_op_data(op_data);
711 /* reason for keep own exit path - don`t flood log
712 * with messages with -ESTALE errors.
714 if (!it_disposition(itp, DISP_OPEN_OPEN) ||
715 it_open_error(DISP_OPEN_OPEN, itp))
717 ll_release_openhandle(de, itp);
721 if (it_disposition(itp, DISP_LOOKUP_NEG))
722 GOTO(out, rc = -ENOENT);
724 if (rc != 0 || it_open_error(DISP_OPEN_OPEN, itp)) {
725 rc = rc ? rc : it_open_error(DISP_OPEN_OPEN, itp);
726 CDEBUG(D_VFSTRACE, "lock enqueue: err: %d\n", rc);
730 rc = ll_prep_inode(&de->d_inode, &req->rq_pill, NULL, itp);
732 if (!rc && itp->it_lock_mode) {
735 /* If we got a lock back and it has a LOOKUP bit set,
736 * make sure the dentry is marked as valid so we can find it.
737 * We don't need to care about actual hashing since other bits
738 * of kernel will deal with that later.
740 ll_set_lock_data(sbi->ll_md_exp, de->d_inode, itp, &bits);
741 if (bits & MDS_INODELOCK_LOOKUP)
742 d_lustre_revalidate(de);
744 /* if DoM bit returned along with LAYOUT bit then there
745 * can be read-on-open data returned.
747 if (bits & MDS_INODELOCK_DOM && bits & MDS_INODELOCK_LAYOUT)
748 ll_dom_finish_open(de->d_inode, req);
750 /* open may not fetch LOOKUP lock, update dir depth and default LMV
753 if (!rc && S_ISDIR(de->d_inode->i_mode))
754 ll_update_dir_depth_dmv(parent->d_inode, de);
757 ptlrpc_req_finished(req);
758 ll_intent_drop_lock(itp);
760 /* We did open by fid, but by the time we got to the server, the object
761 * disappeared. This is possible if the object was unlinked, but it's
762 * also possible if the object was unlinked by a rename. In the case
763 * of an object renamed over our existing one, we can't fail this open.
764 * O_CREAT also goes through this path if we had an existing dentry,
765 * and it's obviously wrong to return ENOENT for O_CREAT.
767 * Instead let's return -ESTALE, and the VFS will retry the open with
768 * LOOKUP_REVAL, which we catch in ll_revalidate_dentry and fail to
769 * revalidate, causing a lookup. This causes extra lookups in the case
770 * where we had a dentry in cache but the file is being unlinked and we
771 * lose the race with unlink, but this should be very rare.
780 static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it,
781 struct obd_client_handle *och)
783 struct mdt_body *body;
785 body = req_capsule_server_get(&it->it_request->rq_pill, &RMF_MDT_BODY);
786 och->och_open_handle = body->mbo_open_handle;
787 och->och_fid = body->mbo_fid1;
788 och->och_lease_handle.cookie = it->it_lock_handle;
789 och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
790 och->och_flags = it->it_open_flags;
792 return md_set_open_replay_data(md_exp, och, it);
795 static int ll_local_open(struct file *file, struct lookup_intent *it,
796 struct ll_file_data *fd, struct obd_client_handle *och)
798 struct inode *inode = file_inode(file);
801 LASSERT(!file->private_data);
808 rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
813 file->private_data = fd;
814 ll_readahead_init(inode, &fd->fd_ras);
815 fd->fd_omode = it->it_open_flags & (FMODE_READ | FMODE_WRITE |
821 void ll_track_file_opens(struct inode *inode)
823 struct ll_inode_info *lli = ll_i2info(inode);
824 struct ll_sb_info *sbi = ll_i2sbi(inode);
826 /* do not skew results with delays from never-opened inodes */
827 if (ktime_to_ns(lli->lli_close_fd_time))
828 ll_stats_ops_tally(sbi, LPROC_LL_INODE_OPCLTM,
829 ktime_us_delta(ktime_get(), lli->lli_close_fd_time));
831 if (ktime_after(ktime_get(),
832 ktime_add_ms(lli->lli_close_fd_time,
833 sbi->ll_oc_max_ms))) {
834 lli->lli_open_fd_count = 1;
835 lli->lli_close_fd_time = ns_to_ktime(0);
837 lli->lli_open_fd_count++;
840 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_OCOUNT,
841 lli->lli_open_fd_count);
844 /* Open a file, and (for the very first open) create objects on the OSTs at
845 * this time. If opened with O_LOV_DELAY_CREATE, then we don't do the object
846 * creation or open until ll_lov_setstripe() ioctl is called.
848 * If we already have the stripe MD locally then we don't request it in
849 * md_open(), by passing a lmm_size = 0.
851 * It is up to the application to ensure no other processes open this file
852 * in the O_LOV_DELAY_CREATE case, or the default striping pattern will be
853 * used. We might be able to avoid races of that sort by getting lli_open_sem
854 * before returning in the O_LOV_DELAY_CREATE case and dropping it here
855 * or in ll_file_release(), but I'm not sure that is desirable/necessary.
857 int ll_file_open(struct inode *inode, struct file *file)
859 struct ll_inode_info *lli = ll_i2info(inode);
860 struct lookup_intent *it, oit = { .it_op = IT_OPEN,
861 .it_open_flags = file->f_flags };
862 struct obd_client_handle **och_p = NULL;
863 __u64 *och_usecount = NULL;
864 struct ll_file_data *fd;
865 ktime_t kstart = ktime_get();
869 CDEBUG(D_VFSTRACE|D_IOTRACE,
870 "START file %s:"DFID"(%p), flags %o\n",
871 file_dentry(file)->d_name.name,
872 PFID(ll_inode2fid(file_inode(file))), inode, file->f_flags);
874 it = file->private_data; /* XXX: compat macro */
875 file->private_data = NULL; /* prevent ll_local_open assertion */
877 if (S_ISREG(inode->i_mode)) {
878 rc = ll_file_open_encrypt(inode, file);
880 if (it && it->it_disposition)
881 ll_release_openhandle(file_dentry(file), it);
882 GOTO(out_nofiledata, rc);
886 fd = ll_file_data_get();
888 GOTO(out_nofiledata, rc = -ENOMEM);
891 if (S_ISDIR(inode->i_mode))
892 ll_authorize_statahead(inode, fd);
894 ll_track_file_opens(inode);
895 if (is_root_inode(inode)) {
896 file->private_data = fd;
900 if (!it || !it->it_disposition) {
901 /* Convert f_flags into access mode. We cannot use file->f_mode,
902 * because everything but O_ACCMODE mask was stripped from
904 if ((oit.it_open_flags + 1) & O_ACCMODE)
906 if (file->f_flags & O_TRUNC)
907 oit.it_open_flags |= FMODE_WRITE;
909 /* kernel only call f_op->open in dentry_open. filp_open calls
910 * dentry_open after call to open_namei that checks permissions.
911 * Only nfsd_open call dentry_open directly without checking
912 * permissions and because of that this code below is safe.
914 if (oit.it_open_flags & (FMODE_WRITE | FMODE_READ))
915 oit.it_open_flags |= MDS_OPEN_OWNEROVERRIDE;
917 /* We do not want O_EXCL here, presumably we opened the file
918 * already? XXX - NFS implications? */
919 oit.it_open_flags &= ~O_EXCL;
921 /* bug20584, if "it_open_flags" contains O_CREAT, file will be
922 * created if necessary, then "IT_CREAT" should be set to keep
923 * consistent with it */
924 if (oit.it_open_flags & O_CREAT)
925 oit.it_op |= IT_CREAT;
931 /* Let's see if we have file open on MDS already. */
932 if (it->it_open_flags & FMODE_WRITE) {
933 och_p = &lli->lli_mds_write_och;
934 och_usecount = &lli->lli_open_fd_write_count;
935 } else if (it->it_open_flags & FMODE_EXEC) {
936 och_p = &lli->lli_mds_exec_och;
937 och_usecount = &lli->lli_open_fd_exec_count;
939 och_p = &lli->lli_mds_read_och;
940 och_usecount = &lli->lli_open_fd_read_count;
943 mutex_lock(&lli->lli_och_mutex);
944 if (*och_p) { /* Open handle is present */
945 if (it_disposition(it, DISP_OPEN_OPEN)) {
946 /* Well, there's extra open request that we do not need,
947 * let's close it somehow. This will decref request. */
948 rc = it_open_error(DISP_OPEN_OPEN, it);
950 mutex_unlock(&lli->lli_och_mutex);
951 GOTO(out_openerr, rc);
954 ll_release_openhandle(file_dentry(file), it);
958 rc = ll_local_open(file, it, fd, NULL);
961 mutex_unlock(&lli->lli_och_mutex);
962 GOTO(out_openerr, rc);
965 LASSERT(*och_usecount == 0);
966 if (!it->it_disposition) {
967 struct dentry *dentry = file_dentry(file);
968 struct ll_sb_info *sbi = ll_i2sbi(inode);
969 int open_threshold = sbi->ll_oc_thrsh_count;
971 /* We cannot just request lock handle now, new ELC code
972 * means that one of other OPEN locks for this file
973 * could be cancelled, and since blocking ast handler
974 * would attempt to grab och_mutex as well, that would
975 * result in a deadlock
977 mutex_unlock(&lli->lli_och_mutex);
979 * Normally called under two situations:
980 * 1. fhandle / NFS export.
981 * 2. A race/condition on MDS resulting in no open
982 * handle to be returned from LOOKUP|OPEN request,
983 * for example if the target entry was a symlink.
985 * For NFSv3 we need to always cache the open lock
986 * for pre 5.5 Linux kernels.
988 * After reaching number of opens of this inode
989 * we always ask for an open lock on it to handle
990 * bad userspace actors that open and close files
991 * in a loop for absolutely no good reason
993 /* fhandle / NFS path. */
994 if (lli->lli_open_thrsh_count != UINT_MAX)
995 open_threshold = lli->lli_open_thrsh_count;
997 if (filename_is_volatile(dentry->d_name.name,
1000 /* There really is nothing here, but this
1001 * make this more readable I think.
1002 * We do not want openlock for volatile
1003 * files under any circumstances
1005 } else if (open_threshold > 0) {
1006 /* Take MDS_OPEN_LOCK with many opens */
1007 if (lli->lli_open_fd_count >= open_threshold)
1008 it->it_open_flags |= MDS_OPEN_LOCK;
1010 /* If this is open after we just closed */
1011 else if (ktime_before(ktime_get(),
1012 ktime_add_ms(lli->lli_close_fd_time,
1013 sbi->ll_oc_thrsh_ms)))
1014 it->it_open_flags |= MDS_OPEN_LOCK;
1018 * Always specify MDS_OPEN_BY_FID because we don't want
1019 * to get file with different fid.
1021 it->it_open_flags |= MDS_OPEN_BY_FID;
1022 rc = ll_intent_file_open(dentry, NULL, 0, it);
1024 GOTO(out_openerr, rc);
1028 OBD_ALLOC(*och_p, sizeof(struct obd_client_handle));
1030 GOTO(out_och_free, rc = -ENOMEM);
1034 /* md_intent_lock() didn't get a request ref if there was an
1035 * open error, so don't do cleanup on the request here
1037 /* XXX (green): Should not we bail out on any error here, not
1038 * just open error? */
1039 rc = it_open_error(DISP_OPEN_OPEN, it);
1041 GOTO(out_och_free, rc);
1043 LASSERTF(it_disposition(it, DISP_ENQ_OPEN_REF),
1044 "inode %px: disposition %x, status %d\n", inode,
1045 it_disposition(it, ~0), it->it_status);
1047 rc = ll_local_open(file, it, fd, *och_p);
1049 GOTO(out_och_free, rc);
1052 rc = pcc_file_open(inode, file);
1054 GOTO(out_och_free, rc);
1056 mutex_unlock(&lli->lli_och_mutex);
1060 /* Must do this outside lli_och_mutex lock to prevent deadlock where
1061 different kind of OPEN lock for this same inode gets cancelled
1062 by ldlm_cancel_lru */
1063 if (!S_ISREG(inode->i_mode))
1064 GOTO(out_och_free, rc);
1065 cl_lov_delay_create_clear(&file->f_flags);
1066 GOTO(out_och_free, rc);
1070 if (och_p && *och_p) {
1071 OBD_FREE(*och_p, sizeof(struct obd_client_handle));
1072 *och_p = NULL; /* OBD_FREE writes some magic there */
1075 mutex_unlock(&lli->lli_och_mutex);
1078 if (lli->lli_opendir_key == fd)
1079 ll_deauthorize_statahead(inode, fd);
1082 ll_file_data_put(fd);
1084 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN,
1085 ktime_us_delta(ktime_get(), kstart));
1089 if (it && it_disposition(it, DISP_ENQ_OPEN_REF)) {
1090 ptlrpc_req_finished(it->it_request);
1091 it_clear_disposition(it, DISP_ENQ_OPEN_REF);
1095 "COMPLETED file %s:"DFID"(%p), flags %o, rc = %d\n",
1096 file_dentry(file)->d_name.name,
1097 PFID(ll_inode2fid(file_inode(file))), inode, file->f_flags,
1103 static int ll_md_blocking_lease_ast(struct ldlm_lock *lock,
1104 struct ldlm_lock_desc *desc, void *data, int flag)
1107 struct lustre_handle lockh;
1111 case LDLM_CB_BLOCKING:
1112 ldlm_lock2handle(lock, &lockh);
1113 rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
1115 CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
1119 case LDLM_CB_CANCELING:
1127 * When setting a lease on a file, we take ownership of the lli_mds_*_och
1128 * and save it as fd->fd_och so as to force client to reopen the file even
1129 * if it has an open lock in cache already.
1131 static int ll_lease_och_acquire(struct inode *inode, struct file *file,
1132 struct lustre_handle *old_open_handle)
1134 struct ll_inode_info *lli = ll_i2info(inode);
1135 struct ll_file_data *fd = file->private_data;
1136 struct obd_client_handle **och_p;
1137 __u64 *och_usecount;
1141 /* Get the openhandle of the file */
1142 mutex_lock(&lli->lli_och_mutex);
1143 if (fd->fd_lease_och != NULL)
1144 GOTO(out_unlock, rc = -EBUSY);
1146 if (fd->fd_och == NULL) {
1147 if (file->f_mode & FMODE_WRITE) {
1148 LASSERT(lli->lli_mds_write_och != NULL);
1149 och_p = &lli->lli_mds_write_och;
1150 och_usecount = &lli->lli_open_fd_write_count;
1152 LASSERT(lli->lli_mds_read_och != NULL);
1153 och_p = &lli->lli_mds_read_och;
1154 och_usecount = &lli->lli_open_fd_read_count;
1157 if (*och_usecount > 1)
1158 GOTO(out_unlock, rc = -EBUSY);
1160 fd->fd_och = *och_p;
1165 *old_open_handle = fd->fd_och->och_open_handle;
1169 mutex_unlock(&lli->lli_och_mutex);
1174 * Release ownership on lli_mds_*_och when putting back a file lease.
1176 static int ll_lease_och_release(struct inode *inode, struct file *file)
1178 struct ll_inode_info *lli = ll_i2info(inode);
1179 struct ll_file_data *fd = file->private_data;
1180 struct obd_client_handle **och_p;
1181 struct obd_client_handle *old_och = NULL;
1182 __u64 *och_usecount;
1186 mutex_lock(&lli->lli_och_mutex);
1187 if (file->f_mode & FMODE_WRITE) {
1188 och_p = &lli->lli_mds_write_och;
1189 och_usecount = &lli->lli_open_fd_write_count;
1191 och_p = &lli->lli_mds_read_och;
1192 och_usecount = &lli->lli_open_fd_read_count;
1195 /* The file may have been open by another process (broken lease) so
1196 * *och_p is not NULL. In this case we should simply increase usecount
1199 if (*och_p != NULL) {
1200 old_och = fd->fd_och;
1203 *och_p = fd->fd_och;
1207 mutex_unlock(&lli->lli_och_mutex);
1209 if (old_och != NULL)
1210 rc = ll_close_inode_openhandle(inode, old_och, 0, NULL);
1216 * Acquire a lease and open the file.
1218 static struct obd_client_handle *
1219 ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
1222 struct lookup_intent it = { .it_op = IT_OPEN };
1223 struct ll_sb_info *sbi = ll_i2sbi(inode);
1224 struct md_op_data *op_data;
1225 struct ptlrpc_request *req = NULL;
1226 struct lustre_handle old_open_handle = { 0 };
1227 struct obd_client_handle *och = NULL;
1232 if (fmode != FMODE_WRITE && fmode != FMODE_READ)
1233 RETURN(ERR_PTR(-EINVAL));
1236 if (!(fmode & file->f_mode) || (file->f_mode & FMODE_EXEC))
1237 RETURN(ERR_PTR(-EPERM));
1239 rc = ll_lease_och_acquire(inode, file, &old_open_handle);
1241 RETURN(ERR_PTR(rc));
1246 RETURN(ERR_PTR(-ENOMEM));
1248 op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0,
1249 LUSTRE_OPC_ANY, NULL);
1250 if (IS_ERR(op_data))
1251 GOTO(out, rc = PTR_ERR(op_data));
1253 /* To tell the MDT this openhandle is from the same owner */
1254 op_data->op_open_handle = old_open_handle;
1256 it.it_open_flags = fmode | open_flags;
1257 it.it_open_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID | MDS_OPEN_LEASE;
1258 rc = md_intent_lock(sbi->ll_md_exp, op_data, &it, &req,
1259 &ll_md_blocking_lease_ast,
1260 /* LDLM_FL_NO_LRU: To not put the lease lock into LRU list, otherwise
1261 * it can be cancelled which may mislead applications that the lease is
1263 * LDLM_FL_EXCL: Set this flag so that it won't be matched by normal
1264 * open in ll_md_blocking_ast(). Otherwise as ll_md_blocking_lease_ast
1265 * doesn't deal with openhandle, so normal openhandle will be leaked. */
1266 LDLM_FL_NO_LRU | LDLM_FL_EXCL);
1267 ll_finish_md_op_data(op_data);
1268 ptlrpc_req_finished(req);
1270 GOTO(out_release_it, rc);
1272 if (it_disposition(&it, DISP_LOOKUP_NEG))
1273 GOTO(out_release_it, rc = -ENOENT);
1275 rc = it_open_error(DISP_OPEN_OPEN, &it);
1277 GOTO(out_release_it, rc);
1279 LASSERT(it_disposition(&it, DISP_ENQ_OPEN_REF));
1280 rc = ll_och_fill(sbi->ll_md_exp, &it, och);
1282 GOTO(out_release_it, rc);
1284 if (!it_disposition(&it, DISP_OPEN_LEASE)) /* old server? */
1285 GOTO(out_close, rc = -EOPNOTSUPP);
1287 /* already get lease, handle lease lock */
1288 ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
1289 if (!it.it_lock_mode ||
1290 !(it.it_lock_bits & MDS_INODELOCK_OPEN)) {
1291 /* open lock must return for lease */
1292 CERROR(DFID "lease granted but no open lock, %d/%llu.\n",
1293 PFID(ll_inode2fid(inode)), it.it_lock_mode,
1295 GOTO(out_close, rc = -EPROTO);
1298 ll_intent_release(&it);
1302 /* Cancel open lock */
1303 if (it.it_lock_mode != 0) {
1304 ldlm_lock_decref_and_cancel(&och->och_lease_handle,
1306 it.it_lock_mode = 0;
1307 och->och_lease_handle.cookie = 0ULL;
1309 rc2 = ll_close_inode_openhandle(inode, och, 0, NULL);
1311 CERROR("%s: error closing file "DFID": %d\n",
1312 sbi->ll_fsname, PFID(&ll_i2info(inode)->lli_fid), rc2);
1313 och = NULL; /* och has been freed in ll_close_inode_openhandle() */
1315 ll_intent_release(&it);
1319 RETURN(ERR_PTR(rc));
1323 * Check whether a layout swap can be done between two inodes.
1325 * \param[in] inode1 First inode to check
1326 * \param[in] inode2 Second inode to check
1328 * \retval 0 on success, layout swap can be performed between both inodes
1329 * \retval negative error code if requirements are not met
1331 static int ll_check_swap_layouts_validity(struct inode *inode1,
1332 struct inode *inode2)
1334 if (!S_ISREG(inode1->i_mode) || !S_ISREG(inode2->i_mode))
1337 if (inode_permission(&nop_mnt_idmap, inode1, MAY_WRITE) ||
1338 inode_permission(&nop_mnt_idmap, inode2, MAY_WRITE))
1341 if (inode1->i_sb != inode2->i_sb)
1347 static int ll_swap_layouts_close(struct obd_client_handle *och,
1348 struct inode *inode, struct inode *inode2,
1349 struct lustre_swap_layouts *lsl)
1351 const struct lu_fid *fid1 = ll_inode2fid(inode);
1352 struct swap_layouts_param slp;
1353 const struct lu_fid *fid2;
1357 CDEBUG(D_INODE, "%s: biased close of file "DFID"\n",
1358 ll_i2sbi(inode)->ll_fsname, PFID(fid1));
1360 rc = ll_check_swap_layouts_validity(inode, inode2);
1362 GOTO(out_free_och, rc);
1364 /* We now know that inode2 is a lustre inode */
1365 fid2 = ll_inode2fid(inode2);
1367 rc = lu_fid_cmp(fid1, fid2);
1369 GOTO(out_free_och, rc = -EINVAL);
1371 /* Close the file and {swap,merge} layouts between inode & inode2.
1372 * NB: local lease handle is released in mdc_close_intent_pack()
1373 * because we still need it to pack l_remote_handle to MDT. */
1374 slp.slp_inode = inode2;
1375 slp.slp_dv1 = lsl->sl_dv1;
1376 slp.slp_dv2 = lsl->sl_dv2;
1377 rc = ll_close_inode_openhandle(inode, och, MDS_CLOSE_LAYOUT_SWAP, &slp);
1379 och = NULL; /* freed in ll_close_inode_openhandle() */
1389 * Release lease and close the file.
1390 * It will check if the lease has ever broken.
1392 static int ll_lease_close_intent(struct obd_client_handle *och,
1393 struct inode *inode,
1394 bool *lease_broken, enum mds_op_bias bias,
1397 struct ldlm_lock *lock;
1398 bool cancelled = true;
1402 lock = ldlm_handle2lock(&och->och_lease_handle);
1404 lock_res_and_lock(lock);
1405 cancelled = ldlm_is_cancel(lock);
1406 unlock_res_and_lock(lock);
1407 LDLM_LOCK_PUT(lock);
1410 CDEBUG(D_INODE, "lease for "DFID" broken? %d, bias: %x\n",
1411 PFID(&ll_i2info(inode)->lli_fid), cancelled, bias);
1413 if (lease_broken != NULL)
1414 *lease_broken = cancelled;
1416 if (!cancelled && !bias)
1417 ldlm_cli_cancel(&och->och_lease_handle, 0);
1419 if (cancelled) { /* no need to excute intent */
1424 rc = ll_close_inode_openhandle(inode, och, bias, data);
1428 static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
1431 return ll_lease_close_intent(och, inode, lease_broken, 0, NULL);
1435 * After lease is taken, send the RPC MDS_REINT_RESYNC to the MDT
1437 static int ll_lease_file_resync(struct obd_client_handle *och,
1438 struct inode *inode, void __user *uarg)
1440 struct ll_sb_info *sbi = ll_i2sbi(inode);
1441 struct md_op_data *op_data;
1442 struct ll_ioc_lease_id ioc;
1443 __u64 data_version_unused;
1447 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1448 LUSTRE_OPC_ANY, NULL);
1449 if (IS_ERR(op_data))
1450 RETURN(PTR_ERR(op_data));
1452 if (copy_from_user(&ioc, uarg, sizeof(ioc)))
1455 /* before starting file resync, it's necessary to clean up page cache
1456 * in client memory, otherwise once the layout version is increased,
1457 * writing back cached data will be denied the OSTs. */
1458 rc = ll_data_version(inode, &data_version_unused, LL_DV_WR_FLUSH);
1462 op_data->op_lease_handle = och->och_lease_handle;
1463 op_data->op_mirror_id = ioc.lil_mirror_id;
1464 rc = md_file_resync(sbi->ll_md_exp, op_data);
1470 ll_finish_md_op_data(op_data);
1474 static int ll_merge_attr_nolock(const struct lu_env *env, struct inode *inode)
1476 struct ll_inode_info *lli = ll_i2info(inode);
1477 struct cl_object *obj = lli->lli_clob;
1478 struct cl_attr *attr = vvp_env_thread_attr(env);
1486 /* Merge timestamps the most recently obtained from MDS with
1487 * timestamps obtained from OSTs.
1489 * Do not overwrite atime of inode because it may be refreshed
1490 * by file_accessed() function. If the read was served by cache
1491 * data, there is no RPC to be sent so that atime may not be
1492 * transferred to OSTs at all. MDT only updates atime at close time
1493 * if it's at least 'mdd.*.atime_diff' older.
1494 * All in all, the atime in Lustre does not strictly comply with
1495 * POSIX. Solving this problem needs to send an RPC to MDT for each
1496 * read, this will hurt performance.
1498 if (test_and_clear_bit(LLIF_UPDATE_ATIME, &lli->lli_flags) ||
1499 inode_get_atime_sec(inode) < lli->lli_atime)
1500 inode_set_atime(inode, lli->lli_atime, 0);
1502 inode_set_mtime(inode, lli->lli_mtime, 0);
1503 inode_set_ctime(inode, lli->lli_ctime, 0);
1505 mtime = inode_get_mtime_sec(inode);
1506 atime = inode_get_atime_sec(inode);
1507 ctime = inode_get_ctime_sec(inode);
1509 cl_object_attr_lock(obj);
1510 if (CFS_FAIL_CHECK(OBD_FAIL_MDC_MERGE))
1513 rc = cl_object_attr_get(env, obj, attr);
1514 cl_object_attr_unlock(obj);
1517 GOTO(out, rc = (rc == -ENODATA ? 0 : rc));
1519 if (atime < attr->cat_atime)
1520 atime = attr->cat_atime;
1522 if (ctime < attr->cat_ctime)
1523 ctime = attr->cat_ctime;
1525 if (mtime < attr->cat_mtime)
1526 mtime = attr->cat_mtime;
1528 CDEBUG(D_VFSTRACE, DFID" updating i_size %llu i_blocks %llu\n",
1529 PFID(&lli->lli_fid), attr->cat_size, attr->cat_blocks);
1531 if (llcrypt_require_key(inode) == -ENOKEY) {
1532 /* Without the key, round up encrypted file size to next
1533 * LUSTRE_ENCRYPTION_UNIT_SIZE. Clear text size is put in
1534 * lli_lazysize for proper file size setting at close time.
1536 lli->lli_attr_valid |= OBD_MD_FLLAZYSIZE;
1537 lli->lli_lazysize = attr->cat_size;
1538 attr->cat_size = round_up(attr->cat_size,
1539 LUSTRE_ENCRYPTION_UNIT_SIZE);
1541 i_size_write(inode, attr->cat_size);
1542 inode->i_blocks = attr->cat_blocks;
1544 inode_set_mtime(inode, mtime, 0);
1545 inode_set_atime(inode, atime, 0);
1546 inode_set_ctime(inode, ctime, 0);
1553 int ll_merge_attr(const struct lu_env *env, struct inode *inode)
1557 ll_inode_size_lock(inode);
1558 rc = ll_merge_attr_nolock(env, inode);
1559 ll_inode_size_unlock(inode);
1564 /* Use to update size and blocks on inode for LSOM if there is no contention */
1565 int ll_merge_attr_try(const struct lu_env *env, struct inode *inode)
1569 if (ll_inode_size_trylock(inode)) {
1570 rc = ll_merge_attr_nolock(env, inode);
1571 ll_inode_size_unlock(inode);
1578 * Set designated mirror for I/O.
1580 * So far only read, write, and truncated can support to issue I/O to
1581 * designated mirror.
1583 void ll_io_set_mirror(struct cl_io *io, const struct file *file)
1585 struct ll_file_data *fd = file->private_data;
1587 /* clear layout version for generic(non-resync) I/O in case it carries
1588 * stale layout version due to I/O restart */
1589 io->ci_layout_version = 0;
1591 /* FLR: disable non-delay for designated mirror I/O because obviously
1592 * only one mirror is available */
1593 if (fd->fd_designated_mirror > 0) {
1595 io->ci_designated_mirror = fd->fd_designated_mirror;
1596 io->ci_layout_version = fd->fd_layout_version;
1599 CDEBUG(D_VFSTRACE, "%s: desiginated mirror: %d\n",
1600 file->f_path.dentry->d_name.name, io->ci_designated_mirror);
1604 * This is relatime_need_update() from Linux 5.17, which is not exported.
1606 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1607 struct timespec64 now)
1609 struct timespec64 ts;
1610 struct timespec64 atime;
1612 if (!(mnt->mnt_flags & MNT_RELATIME))
1615 * Is mtime younger than atime? If yes, update atime:
1617 atime = inode_get_atime(inode);
1618 ts = inode_get_mtime(inode);
1619 if (timespec64_compare(&ts, &atime) >= 0)
1622 * Is ctime younger than atime? If yes, update atime:
1624 ts = inode_get_ctime(inode);
1625 if (timespec64_compare(&ts, &atime) >= 0)
1629 * Is the previous atime value older than a day? If yes,
1632 if ((long)(now.tv_sec - atime.tv_sec) >= 24*60*60)
1635 * Good, we can skip the atime update:
1641 * Very similar to kernel function: !__atime_needs_update()
1643 static bool file_is_noatime(const struct file *file)
1645 struct vfsmount *mnt = file->f_path.mnt;
1646 struct inode *inode = file_inode((struct file *)file);
1647 struct timespec64 now;
1649 if (file->f_flags & O_NOATIME)
1652 if (inode->i_flags & S_NOATIME)
1655 if (IS_NOATIME(inode))
1658 if (mnt->mnt_flags & (MNT_NOATIME | MNT_READONLY))
1661 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1664 if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
1667 now = current_time(inode);
1669 if (!relatime_need_update(mnt, inode, now))
1675 void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot,
1676 struct vvp_io_args *args)
1678 struct inode *inode = file_inode(file);
1679 struct ll_file_data *fd = file->private_data;
1680 int flags = vvp_io_args_flags(file, args);
1682 io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK;
1683 io->ci_lock_no_expand = fd->ll_lock_no_expand;
1685 if (iot == CIT_WRITE) {
1686 io->u.ci_wr.wr_append = iocb_ki_flags_check(flags, APPEND);
1687 io->u.ci_wr.wr_sync = !!(iocb_ki_flags_check(flags, SYNC) ||
1688 iocb_ki_flags_check(flags, DSYNC) ||
1693 io->ci_iocb_nowait = iocb_ki_flags_check(flags, NOWAIT);
1696 io->ci_obj = ll_i2info(inode)->lli_clob;
1697 io->ci_lockreq = CILR_MAYBE;
1698 if (ll_file_nolock(file)) {
1699 io->ci_lockreq = CILR_NEVER;
1700 io->ci_no_srvlock = 1;
1701 } else if (iocb_ki_flags_check(flags, APPEND)) {
1702 io->ci_lockreq = CILR_MANDATORY;
1704 io->ci_noatime = file_is_noatime(file);
1705 io->ci_async_readahead = false;
1707 /* FLR: only use non-delay I/O for read as there is only one
1708 * avaliable mirror for write. */
1709 io->ci_ndelay = !(iot == CIT_WRITE);
1710 /* unaligned DIO has compat issues with some older servers, but we find
1711 * out if there are such servers while setting up the IO, so it starts
1714 io->ci_allow_unaligned_dio = true;
1716 io->ci_hybrid_switched = args->via_hybrid_switched;
1718 ll_io_set_mirror(io, file);
1721 static void ll_heat_add(struct inode *inode, enum cl_io_type iot,
1724 struct ll_inode_info *lli = ll_i2info(inode);
1725 struct ll_sb_info *sbi = ll_i2sbi(inode);
1726 enum obd_heat_type sample_type;
1727 enum obd_heat_type iobyte_type;
1728 __u64 now = ktime_get_real_seconds();
1730 if (!ll_sbi_has_file_heat(sbi) ||
1731 lli->lli_heat_flags & LU_HEAT_FLAG_OFF)
1734 if (iot == CIT_READ) {
1735 sample_type = OBD_HEAT_READSAMPLE;
1736 iobyte_type = OBD_HEAT_READBYTE;
1737 } else if (iot == CIT_WRITE) {
1738 sample_type = OBD_HEAT_WRITESAMPLE;
1739 iobyte_type = OBD_HEAT_WRITEBYTE;
1744 spin_lock(&lli->lli_heat_lock);
1745 obd_heat_add(&lli->lli_heat_instances[sample_type], now, 1,
1746 sbi->ll_heat_decay_weight, sbi->ll_heat_period_second);
1747 obd_heat_add(&lli->lli_heat_instances[iobyte_type], now, count,
1748 sbi->ll_heat_decay_weight, sbi->ll_heat_period_second);
1749 spin_unlock(&lli->lli_heat_lock);
1753 ll_hybrid_bio_dio_switch_check(struct file *file, struct kiocb *iocb,
1754 enum cl_io_type iot, size_t count)
1756 /* we can only do this with IOCB_FLAGS, since we can't modify f_flags
1757 * because they're visible in userspace. so we check for IOCB_DIRECT
1760 struct inode *inode = file_inode(file);
1761 struct ll_sb_info *sbi = ll_i2sbi(inode);
1765 /* it doesn't make sense to switch unless it's READ or WRITE */
1766 if (iot != CIT_WRITE && iot != CIT_READ)
1772 /* Already using direct I/O, no need to switch. */
1773 if (iocb->ki_flags & IOCB_DIRECT)
1776 if (!test_bit(LL_SBI_HYBRID_IO, sbi->ll_flags))
1783 ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
1784 struct file *file, enum cl_io_type iot,
1785 loff_t *ppos, size_t bytes)
1787 struct inode *inode = file_inode(file);
1788 struct ll_file_data *fd = file->private_data;
1789 struct ll_inode_info *lli = ll_i2info(inode);
1790 struct ll_sb_info *sbi = ll_i2sbi(inode);
1791 struct vvp_io *vio = vvp_env_io(env);
1792 struct cl_dio_aio *ci_dio_aio = NULL;
1793 struct range_lock range;
1795 int flags = vvp_io_args_flags(file, args);
1796 bool is_parallel_dio = false;
1797 bool range_locked = false;
1798 unsigned int retried = 0;
1799 bool dio_lock = false;
1800 bool is_aio = false;
1801 size_t max_io_bytes;
1811 CDEBUG(D_VFSTRACE, "%s: %s ppos: %llu, bytes: %zu\n",
1812 file_dentry(file)->d_name.name,
1813 iot == CIT_READ ? "read" : "write", *ppos, bytes);
1815 max_io_bytes = min_t(size_t, PTLRPC_MAX_BRW_PAGES * OBD_MAX_RIF_DEFAULT,
1816 sbi->ll_cache->ccc_lru_max >> 2) << PAGE_SHIFT;
1818 io = vvp_env_thread_io(env);
1819 if (iocb_ki_flags_check(flags, DIRECT)) {
1820 if (iocb_ki_flags_check(flags, APPEND))
1822 if (!is_sync_kiocb(args->u.normal.via_iocb) &&
1823 /* hybrid IO is also potentially async */
1824 !args->via_hybrid_switched)
1827 /* the kernel does not support AIO on pipes, and parallel DIO
1828 * uses part of the AIO path, so we must not do parallel dio
1831 is_parallel_dio = !iov_iter_is_pipe(args->u.normal.via_iter) &&
1834 if (!ll_sbi_has_parallel_dio(sbi))
1835 is_parallel_dio = false;
1837 ci_dio_aio = cl_dio_aio_alloc(args->u.normal.via_iocb,
1838 ll_i2info(inode)->lli_clob, is_aio);
1840 GOTO(out, rc = -ENOMEM);
1845 * IO block size need be aware of cached page limit, otherwise
1846 * if we have small max_cached_mb but large block IO issued, io
1847 * could not be finished and blocked whole client.
1849 if (iocb_ki_flags_check(flags, DIRECT) || bytes < max_io_bytes) {
1853 per_bytes = max_io_bytes;
1856 io = vvp_env_thread_io(env);
1857 ll_io_init(io, file, iot, args);
1858 io->ci_dio_aio = ci_dio_aio;
1859 io->ci_dio_lock = dio_lock;
1860 io->ci_ndelay_tried = retried;
1861 io->ci_parallel_dio = is_parallel_dio;
1863 if (cl_io_rw_init(env, io, iot, *ppos, per_bytes) == 0) {
1864 if (iocb_ki_flags_check(flags, APPEND))
1865 range_lock_init(&range, 0, LUSTRE_EOF);
1867 range_lock_init(&range, *ppos, *ppos + per_bytes - 1);
1869 vio->vui_fd = file->private_data;
1870 vio->vui_iter = args->u.normal.via_iter;
1871 vio->vui_iocb = args->u.normal.via_iocb;
1872 /* Direct IO reads must also take range lock,
1873 * or multiple reads will try to work on the same pages
1874 * See LU-6227 for details.
1876 if (((iot == CIT_WRITE) ||
1877 (iot == CIT_READ && iocb_ki_flags_check(flags, DIRECT))) &&
1878 !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
1879 CDEBUG(D_VFSTRACE, "Range lock "RL_FMT"\n",
1881 rc = range_lock(&lli->lli_write_tree, &range);
1885 range_locked = true;
1888 ll_cl_add(inode, env, io, LCC_RW);
1889 rc = cl_io_loop(env, io);
1890 ll_cl_remove(inode, env);
1892 /* cl_io_rw_init() handled IO */
1896 if (io->ci_dio_aio && !is_aio) {
1897 struct cl_sync_io *anchor = &io->ci_dio_aio->cda_sync;
1899 /* for dio, EIOCBQUEUED is an implementation detail,
1900 * and we don't return it to userspace
1902 if (rc == -EIOCBQUEUED)
1905 /* N/B: parallel DIO may be disabled during i/o submission;
1906 * if that occurs, I/O shifts to sync, so it's all resolved
1907 * before we get here, and this wait call completes
1910 rc2 = cl_sync_io_wait_recycle(env, anchor, 0, 0);
1916 CDEBUG(D_VFSTRACE, "Range unlock "RL_FMT"\n",
1918 range_unlock(&lli->lli_write_tree, &range);
1919 range_locked = false;
1922 if (io->ci_bytes > 0) {
1924 result += io->ci_bytes;
1925 *ppos = io->u.ci_wr.wr.crw_pos; /* for splice */
1929 bytes -= io->ci_bytes;
1931 /* prepare IO restart */
1933 args->u.normal.via_iter = vio->vui_iter;
1937 * Reexpand iov count because it was zero
1940 iov_iter_reexpand(vio->vui_iter, bytes);
1941 if (per_bytes == io->ci_bytes)
1942 io->ci_need_restart = 1;
1946 cl_io_fini(env, io);
1949 "%s: %d io complete with rc: %d, result: %zd, restart: %d\n",
1950 file->f_path.dentry->d_name.name,
1951 iot, rc, result, io->ci_need_restart);
1953 if ((!rc || rc == -ENODATA || rc == -ENOLCK || rc == -EIOCBQUEUED) &&
1954 bytes > 0 && io->ci_need_restart && retries-- > 0) {
1956 "%s: restart %s from ppos=%lld bytes=%zu retries=%u ret=%zd: rc = %d\n",
1957 file_dentry(file)->d_name.name,
1958 iot == CIT_READ ? "read" : "write",
1959 *ppos, bytes, retries, result, rc);
1960 /* preserve the tried count for FLR */
1961 retried = io->ci_ndelay_tried;
1962 dio_lock = io->ci_dio_lock;
1966 /* update inode size */
1967 if (io->ci_type == CIT_WRITE)
1968 ll_merge_attr(env, inode);
1970 if (io->ci_dio_aio) {
1971 /* set the number of bytes successfully moved in the aio */
1973 io->ci_dio_aio->cda_bytes = result;
1975 * VFS will call aio_complete() if no -EIOCBQUEUED
1976 * is returned for AIO, so we can not call aio_complete()
1977 * in our end_io(). (cda_no_aio_complete is always set for
1980 * NB: Setting cda_no_aio_complete like this is safe because
1981 * the atomic_dec_and_lock in cl_sync_io_note has implicit
1982 * memory barriers, so this will be seen by whichever thread
1983 * completes the DIO/AIO, even if it's not this one.
1985 if (is_aio && rc != -EIOCBQUEUED)
1986 io->ci_dio_aio->cda_no_aio_complete = 1;
1987 /* if an aio enqueued successfully (-EIOCBQUEUED), then Lustre
1988 * will call aio_complete rather than the vfs, so we return 0
1989 * to tell the VFS we're handling it
1991 else if (is_aio) /* rc == -EIOCBQUEUED */
1994 * Drop the reference held by the llite layer on this top level
1997 * For DIO, this frees it here, since IO is complete, and for
1998 * AIO, we will call aio_complete() (and then free this top
1999 * level context) once all the outstanding chunks of this AIO
2002 cl_sync_io_note(env, &io->ci_dio_aio->cda_sync,
2003 rc == -EIOCBQUEUED ? 0 : rc);
2005 LASSERT(io->ci_dio_aio->cda_creator_free);
2006 cl_dio_aio_free(env, io->ci_dio_aio);
2007 io->ci_dio_aio = NULL;
2011 if (iot == CIT_READ) {
2013 ll_stats_ops_tally(ll_i2sbi(inode),
2014 LPROC_LL_READ_BYTES, result);
2015 } else if (iot == CIT_WRITE) {
2017 ll_stats_ops_tally(ll_i2sbi(inode),
2018 LPROC_LL_WRITE_BYTES, result);
2019 fd->fd_write_failed = false;
2020 } else if (result == 0 && rc == 0) {
2023 fd->fd_write_failed = true;
2025 fd->fd_write_failed = false;
2026 } else if (rc != -ERESTARTSYS) {
2027 fd->fd_write_failed = true;
2031 CDEBUG(D_VFSTRACE, "iot: %d, result: %zd\n", iot, result);
2033 ll_heat_add(inode, iot, result);
2035 RETURN(result > 0 ? result : rc);
2039 * The purpose of fast read is to overcome per I/O overhead and improve IOPS
2040 * especially for small I/O.
2042 * To serve a read request, CLIO has to create and initialize a cl_io and
2043 * then request DLM lock. This has turned out to have siginificant overhead
2044 * and affects the performance of small I/O dramatically.
2046 * It's not necessary to create a cl_io for each I/O. Under the help of read
2047 * ahead, most of the pages being read are already in memory cache and we can
2048 * read those pages directly because if the pages exist, the corresponding DLM
2049 * lock must exist so that page content must be valid.
2051 * In fast read implementation, the llite speculatively finds and reads pages
2052 * in memory cache. There are three scenarios for fast read:
2053 * - If the page exists and is uptodate, kernel VM will provide the data and
2054 * CLIO won't be intervened;
2055 * - If the page was brought into memory by read ahead, it will be exported
2056 * and read ahead parameters will be updated;
2057 * - Otherwise the page is not in memory, we can't do fast read. Therefore,
2058 * it will go back and invoke normal read, i.e., a cl_io will be created
2059 * and DLM lock will be requested.
2061 * POSIX compliance: posix standard states that read is intended to be atomic.
2062 * Lustre read implementation is in line with Linux kernel read implementation
2063 * and neither of them complies with POSIX standard in this matter. Fast read
2064 * doesn't make the situation worse on single node but it may interleave write
2065 * results from multiple nodes due to short read handling in ll_file_aio_read().
2067 * \param env - lu_env
2068 * \param iocb - kiocb from kernel
2069 * \param iter - user space buffers where the data will be copied
2071 * \retval - number of bytes have been read, or error code if error occurred.
2074 ll_do_fast_read(struct kiocb *iocb, struct iov_iter *iter)
2076 struct ll_inode_info *lli = ll_i2info(file_inode(iocb->ki_filp));
2077 int flags = iocb_ki_flags_get(iocb->ki_filp, iocb);
2080 if (!ll_sbi_has_fast_read(ll_i2sbi(file_inode(iocb->ki_filp))))
2083 /* NB: we can't do direct IO for fast read because it will need a lock
2084 * to make IO engine happy. */
2085 if (iocb_ki_flags_check(flags, DIRECT))
2088 if (ll_layout_version_get(lli) == CL_LAYOUT_GEN_NONE)
2091 result = generic_file_read_iter(iocb, iter);
2093 /* If the first page is not in cache, generic_file_aio_read() will be
2094 * returned with -ENODATA. Fall back to full read path.
2095 * See corresponding code in ll_readpage().
2097 * if we raced with page deletion, we might get EIO. Rather than add
2098 * locking to the fast path for this rare case, fall back to the full
2099 * read path. (See vvp_io_read_start() for rest of handling.
2101 if (result == -ENODATA || result == -EIO)
2105 ll_heat_add(file_inode(iocb->ki_filp), CIT_READ, result);
2106 ll_stats_ops_tally(ll_i2sbi(file_inode(iocb->ki_filp)),
2107 LPROC_LL_READ_BYTES, result);
2114 * Confine read iter lest read beyond the EOF
2116 * \param iocb [in] kernel iocb
2117 * \param to [in] reader iov_iter
2119 * \retval <0 failure
2121 * \retval >0 @iocb->ki_pos has passed the EOF
2123 static int file_read_confine_iter(struct lu_env *env, struct kiocb *iocb,
2124 struct iov_iter *to)
2127 struct cl_attr *attr = vvp_env_thread_attr(env);
2128 struct file *file = iocb->ki_filp;
2129 struct inode *inode = file_inode(file);
2130 struct ll_inode_info *lli = ll_i2info(inode);
2131 struct cl_object *obj = lli->lli_clob;
2132 loff_t read_end = iocb->ki_pos + iov_iter_count(to);
2142 io = vvp_env_thread_io(env);
2144 rc = cl_io_init(env, io, CIT_MISC, obj);
2148 cl_object_attr_lock(lli->lli_clob);
2149 rc = cl_object_attr_get(env, lli->lli_clob, attr);
2150 cl_object_attr_unlock(lli->lli_clob);
2153 cl_io_fini(env, io);
2157 kms = attr->cat_kms;
2158 /* if read beyond end-of-file, adjust read count */
2159 if (kms > 0 && (iocb->ki_pos >= kms || read_end > kms)) {
2160 rc = ll_glimpse_size(inode);
2164 size = i_size_read(inode);
2165 if (iocb->ki_pos >= size || read_end > size) {
2167 "%s: read [%llu, %llu] over eof, kms %llu, file_size %llu.\n",
2168 file_dentry(file)->d_name.name,
2169 iocb->ki_pos, read_end, kms, size);
2171 if (iocb->ki_pos >= size)
2174 if (read_end > size)
2175 iov_iter_truncate(to, size - iocb->ki_pos);
2182 #ifdef HAVE_IOV_ITER_INIT_DIRECTION
2183 # define ll_iov_iter_init(i, d, v, n, l) \
2184 iov_iter_init((i), (d), (v), (n), (l))
2186 # define ll_iov_iter_init(i, d, v, n, l) \
2187 iov_iter_init((i), (v), (n), (l), 0)
2190 typedef ssize_t (*iter_fn_t)(struct kiocb *, struct iov_iter *);
2192 static ssize_t do_loop_readv_writev(struct kiocb *iocb, const struct iovec *iov,
2193 int rw, unsigned long nr_segs, iter_fn_t fn)
2195 const struct iovec *vector = iov;
2198 while (nr_segs > 0) {
2201 size_t len = vector->iov_len;
2203 ll_iov_iter_init(&i, rw, vector, 1, len);
2221 * Check if we need loop over the iovec and submit each segment in a loop.
2222 * This is needed when:
2223 * - Prior to the introduction of HAVE_DIO_ITER
2224 * - unaligned direct i/o
2225 * Returns true for the above cases and false otherwise.
2227 * Note that looping is always safe although it is preferable to pass the
2228 * iovec down unmodified when the appropriate support is available.
2230 static bool is_unaligned_directio(struct kiocb *iocb, struct iov_iter *iter,
2231 enum cl_io_type io_type)
2233 #ifdef HAVE_DIO_ITER
2234 struct file *file = iocb->ki_filp;
2235 int iocb_flags = iocb_ki_flags_get(file, iocb);
2236 bool direct_io = iocb_ki_flags_check(iocb_flags, DIRECT);
2237 bool unaligned = false;
2239 /* This I/O could be switched to direct i/o if the kernel is new enough */
2241 if (ll_hybrid_bio_dio_switch_check(file, iocb, io_type,
2242 iov_iter_count(iter)))
2247 if (iocb->ki_pos & ~PAGE_MASK)
2249 else if (iov_iter_count(iter) & ~PAGE_MASK)
2251 else if (ll_iov_iter_alignment(iter) & ~PAGE_MASK)
2257 #endif /* HAVE_DIO_ITER */
2261 * Read from a file (through the page cache).
2263 static ssize_t do_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2266 struct vvp_io_args *args;
2267 struct file *file = iocb->ki_filp;
2268 loff_t orig_ki_pos = iocb->ki_pos;
2272 ktime_t kstart = ktime_get();
2274 bool stale_data = false;
2278 CDEBUG(D_VFSTRACE|D_IOTRACE,
2279 "START file %s:"DFID", ppos: %lld, count: %zu\n",
2280 file_dentry(file)->d_name.name,
2281 PFID(ll_inode2fid(file_inode(file))), iocb->ki_pos,
2282 iov_iter_count(to));
2284 if (!iov_iter_count(to))
2287 env = cl_env_get(&refcheck);
2289 RETURN(PTR_ERR(env));
2291 result = file_read_confine_iter(env, iocb, to);
2294 else if (result > 0)
2297 CFS_FAIL_TIMEOUT_ORSET(OBD_FAIL_LLITE_READ_PAUSE, CFS_FAIL_ONCE,
2300 * Currently when PCC read failed, we do not fall back to the
2301 * normal read path, just return the error.
2302 * The resaon is that: for RW-PCC, the file data may be modified
2303 * in the PCC and inconsistent with the data on OSTs (or file
2304 * data has been removed from the Lustre file system), at this
2305 * time, fallback to the normal read path may read the wrong
2307 * TODO: for RO-PCC (readonly PCC), fall back to normal read
2308 * path: read data from data copy on OSTs.
2310 result = pcc_file_read_iter(iocb, to, &cached);
2314 ll_ras_enter(file, iocb->ki_pos, iov_iter_count(to));
2316 args = ll_env_args(env);
2317 args->u.normal.via_iter = to;
2318 args->u.normal.via_iocb = iocb;
2320 if (ll_hybrid_bio_dio_switch_check(file, iocb, CIT_READ,
2321 iov_iter_count(to)) ||
2322 CFS_FAIL_CHECK(OBD_FAIL_LLITE_FORCE_BIO_AS_DIO)) {
2324 iocb->ki_flags |= IOCB_DIRECT;
2325 CDEBUG(D_VFSTRACE, "switching to DIO\n");
2326 args->via_hybrid_switched = 1;
2330 result = ll_do_fast_read(iocb, to);
2331 if (result < 0 || iov_iter_count(to) == 0)
2334 rc2 = ll_file_io_generic(env, args, file, CIT_READ,
2335 &iocb->ki_pos, iov_iter_count(to));
2338 else if (result == 0)
2342 cl_env_put(env, &refcheck);
2344 if (stale_data && result > 0) {
2346 * we've reached EOF before the read, the data read are cached
2349 iocb->ki_pos = orig_ki_pos;
2350 iov_iter_truncate(to, 0);
2355 ll_rw_stats_tally(ll_i2sbi(file_inode(file)), current->pid,
2356 file->private_data, iocb->ki_pos, result,
2358 ll_stats_ops_tally(ll_i2sbi(file_inode(file)), LPROC_LL_READ,
2359 ktime_us_delta(ktime_get(), kstart));
2363 "COMPLETED: file %s:"DFID", ppos: %lld, count: %zu, rc = %zu\n",
2364 file_dentry(file)->d_name.name,
2365 PFID(ll_inode2fid(file_inode(file))), iocb->ki_pos,
2366 iov_iter_count(to), result);
2371 static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2373 if (iter->nr_segs > 1 && is_unaligned_directio(iocb, iter, CIT_READ))
2374 return do_loop_readv_writev(iocb, iter->__iov, READ,
2375 iter->nr_segs, do_file_read_iter);
2376 return do_file_read_iter(iocb, iter);
2380 * Similar trick to ll_do_fast_read, this improves write speed for tiny writes.
2381 * If a page is already in the page cache and dirty (and some other things -
2382 * See ll_tiny_write_begin for the instantiation of these rules), then we can
2383 * write to it without doing a full I/O, because Lustre already knows about it
2384 * and will write it out. This saves a lot of processing time.
2386 * All writes here are within one page, so exclusion is handled by the page
2387 * lock on the vm page. We do not do tiny writes for writes which touch
2388 * multiple pages because it's very unlikely multiple sequential pages are
2389 * are already dirty.
2391 * We limit these to < PAGE_SIZE because PAGE_SIZE writes are relatively common
2392 * and are unlikely to be to already dirty pages.
2394 * Attribute updates are important here, we do them in ll_tiny_write_end.
2396 static ssize_t ll_do_tiny_write(struct kiocb *iocb, struct iov_iter *iter)
2398 ssize_t count = iov_iter_count(iter);
2399 struct file *file = iocb->ki_filp;
2400 struct inode *inode = file_inode(file);
2401 bool lock_inode = !IS_NOSEC(inode);
2406 /* Restrict writes to single page and < PAGE_SIZE. See comment at top
2407 * of function for why.
2409 if (count >= PAGE_SIZE ||
2410 (iocb->ki_pos & (PAGE_SIZE-1)) + count > PAGE_SIZE)
2413 if (unlikely(lock_inode))
2414 ll_inode_lock(inode);
2415 result = __generic_file_write_iter(iocb, iter);
2417 if (unlikely(lock_inode))
2418 ll_inode_unlock(inode);
2420 /* If the page is not already dirty, ll_tiny_write_begin returns
2421 * -ENODATA. We continue on to normal write.
2423 if (result == -ENODATA)
2427 ll_heat_add(inode, CIT_WRITE, result);
2428 set_bit(LLIF_DATA_MODIFIED, &ll_i2info(inode)->lli_flags);
2431 CDEBUG(D_VFSTRACE, "result: %zu, original count %zu\n", result, count);
2437 * Write to a file (through the page cache).
2439 static ssize_t do_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2441 struct file *file = iocb->ki_filp;
2442 struct vvp_io_args *args;
2444 int flags = iocb_ki_flags_get(file, iocb);
2445 ktime_t kstart = ktime_get();
2446 bool hybrid_switched = false;
2447 ssize_t rc_tiny = 0;
2455 CDEBUG(D_VFSTRACE|D_IOTRACE,
2456 "START file %s:"DFID", ppos: %lld, count: %zu\n",
2457 file_dentry(file)->d_name.name,
2458 PFID(ll_inode2fid(file_inode(file))), iocb->ki_pos,
2459 iov_iter_count(from));
2461 if (!iov_iter_count(from))
2462 GOTO(out, rc_normal = 0);
2465 * When PCC write failed, we usually do not fall back to the normal
2466 * write path, just return the error. But there is a special case when
2467 * returned error code is -ENOSPC due to running out of space on PCC HSM
2468 * bakcend. At this time, it will fall back to normal I/O path and
2469 * retry the I/O. As the file is in HSM released state, it will restore
2470 * the file data to OSTs first and redo the write again. And the
2471 * restore process will revoke the layout lock and detach the file
2472 * from PCC cache automatically.
2474 result = pcc_file_write_iter(iocb, from, &cached);
2475 if (cached && result != -ENOSPC && result != -EDQUOT)
2476 GOTO(out, rc_normal = result);
2478 if (ll_hybrid_bio_dio_switch_check(file, iocb, CIT_WRITE,
2479 iov_iter_count(from)) ||
2480 CFS_FAIL_CHECK(OBD_FAIL_LLITE_FORCE_BIO_AS_DIO)) {
2482 iocb->ki_flags |= IOCB_DIRECT;
2483 CDEBUG(D_VFSTRACE, "switching to DIO\n");
2484 hybrid_switched = true;
2488 /* NB: we can't do direct IO for tiny writes because they use the page
2489 * cache, we can't do sync writes because tiny writes can't flush
2490 * pages, and we can't do append writes because we can't guarantee the
2491 * required DLM locks are held to protect file size.
2493 if (ll_sbi_has_tiny_write(ll_i2sbi(file_inode(file))) &&
2495 (ki_flag(DIRECT) | ki_flag(DSYNC) | ki_flag(SYNC) | ki_flag(APPEND))))
2496 rc_tiny = ll_do_tiny_write(iocb, from);
2498 /* In case of error, go on and try normal write - Only stop if tiny
2499 * write completed I/O.
2501 if (iov_iter_count(from) == 0)
2502 GOTO(out, rc_normal = rc_tiny);
2504 env = cl_env_get(&refcheck);
2506 RETURN(PTR_ERR(env));
2508 args = ll_env_args(env);
2509 args->u.normal.via_iter = from;
2510 args->u.normal.via_iocb = iocb;
2511 args->via_hybrid_switched = hybrid_switched;
2513 rc_normal = ll_file_io_generic(env, args, file, CIT_WRITE,
2514 &iocb->ki_pos, iov_iter_count(from));
2516 /* On success, combine bytes written. */
2517 if (rc_tiny >= 0 && rc_normal > 0)
2518 rc_normal += rc_tiny;
2519 /* On error, only return error from normal write if tiny write did not
2520 * write any bytes. Otherwise return bytes written by tiny write.
2522 else if (rc_tiny > 0)
2523 rc_normal = rc_tiny;
2525 cl_env_put(env, &refcheck);
2527 if (rc_normal > 0) {
2528 ll_rw_stats_tally(ll_i2sbi(file_inode(file)), current->pid,
2529 file->private_data, iocb->ki_pos,
2531 ll_stats_ops_tally(ll_i2sbi(file_inode(file)), LPROC_LL_WRITE,
2532 ktime_us_delta(ktime_get(), kstart));
2536 "COMPLETED: file %s:"DFID", ppos: %lld, count: %zu, rc = %zu\n",
2537 file_dentry(file)->d_name.name,
2538 PFID(ll_inode2fid(file_inode(file))), iocb->ki_pos,
2539 iov_iter_count(from), rc_normal);
2544 static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
2546 if (iter->nr_segs > 1 && is_unaligned_directio(iocb, iter, CIT_WRITE))
2547 return do_loop_readv_writev(iocb, iter->__iov, WRITE,
2548 iter->nr_segs, do_file_write_iter);
2549 return do_file_write_iter(iocb, iter);
2552 #ifndef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
2554 * XXX: exact copy from kernel code (__generic_file_aio_write_nolock)
2556 static int ll_file_get_iov_count(const struct iovec *iov,
2557 unsigned long *nr_segs, size_t *count,
2563 for (seg = 0; seg < *nr_segs; seg++) {
2564 const struct iovec *iv = &iov[seg];
2567 * If any segment has a negative length, or the cumulative
2568 * length ever wraps negative then return -EINVAL.
2571 if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
2573 if (access_ok(access_flags, iv->iov_base, iv->iov_len))
2578 cnt -= iv->iov_len; /* This segment is no good */
2585 static ssize_t ll_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
2586 unsigned long nr_segs, loff_t pos)
2594 result = ll_file_get_iov_count(iov, &nr_segs, &iov_count, VERIFY_READ);
2601 ll_iov_iter_init(&to, READ, iov, nr_segs, iov_count);
2603 RETURN(ll_file_read_iter(iocb, &to));
2606 static ssize_t ll_file_read(struct file *file, char __user *buf, size_t count,
2609 struct iovec iov = { .iov_base = buf, .iov_len = count };
2618 init_sync_kiocb(&kiocb, file);
2619 kiocb.ki_pos = *ppos;
2620 #ifdef HAVE_KIOCB_KI_LEFT
2621 kiocb.ki_left = count;
2622 #elif defined(HAVE_KI_NBYTES)
2623 kiocb.i_nbytes = count;
2626 result = ll_file_aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
2627 *ppos = kiocb.ki_pos;
2633 * Write to a file (through the page cache).
2636 static ssize_t ll_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2637 unsigned long nr_segs, loff_t pos)
2639 struct iov_iter from;
2645 result = ll_file_get_iov_count(iov, &nr_segs, &iov_count, VERIFY_WRITE);
2652 ll_iov_iter_init(&from, WRITE, iov, nr_segs, iov_count);
2654 RETURN(ll_file_write_iter(iocb, &from));
2657 static ssize_t ll_file_write(struct file *file, const char __user *buf,
2658 size_t count, loff_t *ppos)
2660 struct iovec iov = { .iov_base = (void __user *)buf,
2670 init_sync_kiocb(&kiocb, file);
2671 kiocb.ki_pos = *ppos;
2672 #ifdef HAVE_KIOCB_KI_LEFT
2673 kiocb.ki_left = count;
2674 #elif defined(HAVE_KI_NBYTES)
2675 kiocb.ki_nbytes = count;
2678 result = ll_file_aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
2679 *ppos = kiocb.ki_pos;
2683 #endif /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
2685 int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
2686 __u64 flags, struct lov_user_md *lum, int lum_size)
2688 struct lookup_intent oit = {
2690 .it_open_flags = flags | MDS_OPEN_BY_FID,
2695 if ((__swab32(lum->lmm_magic) & le32_to_cpu(LOV_MAGIC_MASK)) ==
2696 le32_to_cpu(LOV_MAGIC_MAGIC)) {
2697 /* this code will only exist for big-endian systems */
2698 lustre_swab_lov_user_md(lum, 0);
2701 ll_inode_size_lock(inode);
2702 rc = ll_intent_file_open(dentry, lum, lum_size, &oit);
2704 GOTO(out_unlock, rc);
2706 ll_release_openhandle(dentry, &oit);
2709 ll_inode_size_unlock(inode);
2710 ll_intent_release(&oit);
2715 int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
2716 struct lov_mds_md **lmmp, int *lmm_size,
2717 struct ptlrpc_request **request)
2719 struct ll_sb_info *sbi = ll_i2sbi(inode);
2720 struct mdt_body *body;
2721 struct lov_mds_md *lmm = NULL;
2722 struct ptlrpc_request *req = NULL;
2723 struct md_op_data *op_data;
2728 rc = ll_get_default_mdsize(sbi, &lmmsize);
2732 op_data = ll_prep_md_op_data(NULL, inode, NULL, filename,
2733 strlen(filename), lmmsize,
2734 LUSTRE_OPC_ANY, NULL);
2735 if (IS_ERR(op_data))
2736 RETURN(PTR_ERR(op_data));
2738 op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
2739 rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
2740 ll_finish_md_op_data(op_data);
2742 CDEBUG(D_INFO, "md_getattr_name failed on %s: rc %d\n",
2747 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2748 LASSERT(body != NULL); /* checked by mdc_getattr_name */
2750 lmmsize = body->mbo_eadatasize;
2752 if (!(body->mbo_valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
2754 GOTO(out, rc = -ENODATA);
2756 lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize);
2757 LASSERT(lmm != NULL);
2759 if (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1) &&
2760 lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3) &&
2761 lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_COMP_V1) &&
2762 lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_FOREIGN))
2763 GOTO(out, rc = -EPROTO);
2766 * This is coming from the MDS, so is probably in
2767 * little endian. We convert it to host endian before
2768 * passing it to userspace.
2770 if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) {
2771 int stripe_count = 0;
2773 if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) ||
2774 lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
2775 stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
2776 if (le32_to_cpu(lmm->lmm_pattern) &
2777 LOV_PATTERN_F_RELEASED)
2779 lustre_swab_lov_user_md((struct lov_user_md *)lmm, 0);
2781 /* if function called for directory - we should
2782 * avoid swab not existent lsm objects
2784 if (lmm->lmm_magic == LOV_MAGIC_V1 &&
2785 S_ISREG(body->mbo_mode))
2786 lustre_swab_lov_user_md_objects(
2787 ((struct lov_user_md_v1 *)lmm)->lmm_objects,
2789 else if (lmm->lmm_magic == LOV_MAGIC_V3 &&
2790 S_ISREG(body->mbo_mode))
2791 lustre_swab_lov_user_md_objects(
2792 ((struct lov_user_md_v3 *)lmm)->lmm_objects,
2794 } else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_COMP_V1)) {
2795 lustre_swab_lov_comp_md_v1(
2796 (struct lov_comp_md_v1 *)lmm);
2800 if (lmm->lmm_magic == LOV_MAGIC_COMP_V1) {
2801 struct lov_comp_md_v1 *comp_v1 = NULL;
2802 struct lov_comp_md_entry_v1 *ent;
2803 struct lov_user_md_v1 *v1 = NULL;
2807 comp_v1 = (struct lov_comp_md_v1 *)lmm;
2808 /* Dump the striping information */
2809 for (; i < comp_v1->lcm_entry_count; i++) {
2810 ent = &comp_v1->lcm_entries[i];
2811 off = ent->lcme_offset;
2812 v1 = (struct lov_user_md_v1 *)((char *)lmm + off);
2814 "comp[%d]: stripe_count=%u, stripe_size=%u\n",
2815 i, v1->lmm_stripe_count, v1->lmm_stripe_size);
2817 if (unlikely(CFS_FAIL_CHECK(OBD_FAIL_LOV_COMP_MAGIC) &&
2818 (cfs_fail_val == i + 1)))
2819 v1->lmm_magic = LOV_MAGIC_BAD;
2821 if (unlikely(CFS_FAIL_CHECK(OBD_FAIL_LOV_COMP_PATTERN) &&
2822 (cfs_fail_val == i + 1)))
2823 v1->lmm_pattern = LOV_PATTERN_BAD;
2827 GOTO(out, rc = -EINVAL);
2829 lmm->lmm_stripe_count = v1->lmm_stripe_count;
2830 lmm->lmm_stripe_size = v1->lmm_stripe_size;
2832 * Return valid stripe_count and stripe_size instead of 0 for
2833 * DoM files to avoid divide-by-zero for older userspace that
2834 * calls this ioctl, e.g. lustre ADIO driver.
2836 if (lmm->lmm_stripe_count == 0)
2837 lmm->lmm_stripe_count = 1;
2838 if (lmm->lmm_stripe_size == 0) {
2839 /* Since the first component of the file data is placed
2840 * on the MDT for faster access, the stripe_size of the
2841 * second one is always that applications which are
2844 if (lmm->lmm_pattern & LOV_PATTERN_MDT)
2845 i = comp_v1->lcm_entry_count > 1 ? 1 : 0;
2847 i = comp_v1->lcm_entry_count > 1 ?
2848 comp_v1->lcm_entry_count - 1 : 0;
2849 ent = &comp_v1->lcm_entries[i];
2850 off = ent->lcme_offset;
2851 v1 = (struct lov_user_md_v1 *)((char *)lmm + off);
2852 lmm->lmm_stripe_size = v1->lmm_stripe_size;
2857 *lmm_size = lmmsize;
2862 static int ll_lov_setea(struct inode *inode, struct file *file,
2865 __u64 flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
2866 struct lov_user_md *lump;
2867 int lum_size = sizeof(*lump) + sizeof(struct lov_user_ost_data);
2871 if (!capable(CAP_SYS_ADMIN))
2874 OBD_ALLOC_LARGE(lump, lum_size);
2878 if (copy_from_user(lump, arg, lum_size))
2879 GOTO(out_lump, rc = -EFAULT);
2881 rc = ll_lov_setstripe_ea_info(inode, file_dentry(file), flags, lump,
2883 cl_lov_delay_create_clear(&file->f_flags);
2886 OBD_FREE_LARGE(lump, lum_size);
2890 static int ll_file_getstripe(struct inode *inode, void __user *lum, size_t size)
2897 /* exit before doing any work if pointer is bad */
2898 if (unlikely(!ll_access_ok(lum, sizeof(struct lov_user_md))))
2901 env = cl_env_get(&refcheck);
2903 RETURN(PTR_ERR(env));
2905 rc = cl_object_getstripe(env, ll_i2info(inode)->lli_clob, lum, size);
2906 cl_env_put(env, &refcheck);
2910 static int ll_lov_setstripe(struct inode *inode, struct file *file,
2913 struct lov_user_md __user *lum = arg;
2914 struct lov_user_md *klum;
2916 __u64 flags = FMODE_WRITE;
2919 rc = ll_copy_user_md(lum, &klum);
2924 rc = ll_lov_setstripe_ea_info(inode, file_dentry(file), flags, klum,
2929 rc = put_user(0, &lum->lmm_stripe_count);
2933 rc = ll_layout_refresh(inode, &gen);
2937 rc = ll_file_getstripe(inode, arg, lum_size);
2938 if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode) &&
2939 ll_i2info(inode)->lli_clob) {
2940 struct iattr attr = { 0 };
2942 rc = cl_setattr_ost(ll_i2info(inode)->lli_clob, &attr,
2943 OP_XVALID_FLAGS, LUSTRE_ENCRYPT_FL);
2946 cl_lov_delay_create_clear(&file->f_flags);
2949 OBD_FREE_LARGE(klum, lum_size);
2955 ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
2957 struct ll_inode_info *lli = ll_i2info(inode);
2958 struct cl_object *obj = lli->lli_clob;
2959 struct ll_file_data *fd = file->private_data;
2960 struct ll_grouplock grouplock;
2965 CWARN("group id for group lock must not be 0\n");
2969 if (ll_file_nolock(file))
2970 RETURN(-EOPNOTSUPP);
2972 if (file->f_flags & O_NONBLOCK) {
2973 if (!mutex_trylock(&lli->lli_group_mutex))
2976 mutex_lock(&lli->lli_group_mutex);
2979 if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
2980 CWARN("group lock already existed with gid %lu\n",
2981 fd->fd_grouplock.lg_gid);
2982 GOTO(out, rc = -EINVAL);
2984 if (arg != lli->lli_group_gid && lli->lli_group_users != 0) {
2985 if (file->f_flags & O_NONBLOCK)
2986 GOTO(out, rc = -EAGAIN);
2987 mutex_unlock(&lli->lli_group_mutex);
2988 wait_var_event(&lli->lli_group_users, !lli->lli_group_users);
2989 GOTO(retry, rc = 0);
2991 LASSERT(fd->fd_grouplock.lg_lock == NULL);
2994 * XXX: group lock needs to protect all OST objects while PFL
2995 * can add new OST objects during the IO, so we'd instantiate
2996 * all OST objects before getting its group lock.
3001 struct cl_layout cl = {
3002 .cl_is_composite = false,
3004 struct lu_extent ext = {
3006 .e_end = OBD_OBJECT_EOF,
3009 env = cl_env_get(&refcheck);
3011 GOTO(out, rc = PTR_ERR(env));
3013 rc = cl_object_layout_get(env, obj, &cl);
3014 if (rc >= 0 && cl.cl_is_composite)
3015 rc = ll_layout_write_intent(inode, LAYOUT_INTENT_WRITE,
3018 cl_env_put(env, &refcheck);
3023 rc = cl_get_grouplock(ll_i2info(inode)->lli_clob,
3024 arg, (file->f_flags & O_NONBLOCK), &grouplock);
3029 fd->fd_flags |= LL_FILE_GROUP_LOCKED;
3030 fd->fd_grouplock = grouplock;
3031 if (lli->lli_group_users == 0)
3032 lli->lli_group_gid = grouplock.lg_gid;
3033 lli->lli_group_users++;
3035 CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
3037 mutex_unlock(&lli->lli_group_mutex);
3042 static int ll_put_grouplock(struct inode *inode, struct file *file,
3045 struct ll_inode_info *lli = ll_i2info(inode);
3046 struct ll_file_data *fd = file->private_data;
3047 struct ll_grouplock grouplock;
3051 mutex_lock(&lli->lli_group_mutex);
3052 if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
3053 CWARN("no group lock held\n");
3054 GOTO(out, rc = -EINVAL);
3057 LASSERT(fd->fd_grouplock.lg_lock != NULL);
3059 if (fd->fd_grouplock.lg_gid != arg) {
3060 CWARN("group lock %lu doesn't match current id %lu\n",
3061 arg, fd->fd_grouplock.lg_gid);
3062 GOTO(out, rc = -EINVAL);
3065 grouplock = fd->fd_grouplock;
3066 memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock));
3067 fd->fd_flags &= ~LL_FILE_GROUP_LOCKED;
3069 cl_put_grouplock(&grouplock);
3071 lli->lli_group_users--;
3072 if (lli->lli_group_users == 0) {
3073 lli->lli_group_gid = 0;
3074 wake_up_var(&lli->lli_group_users);
3076 CDEBUG(D_INFO, "group lock %lu released\n", arg);
3079 mutex_unlock(&lli->lli_group_mutex);
3085 * Close inode open handle
3087 * \param dentry [in] dentry which contains the inode
3088 * \param it [in,out] intent which contains open info and result
3091 * \retval <0 failure
3093 int ll_release_openhandle(struct dentry *dentry, struct lookup_intent *it)
3095 struct inode *inode = dentry->d_inode;
3096 struct obd_client_handle *och;
3102 /* Root ? Do nothing. */
3103 if (is_root_inode(inode))
3106 /* No open handle to close? Move away */
3107 if (!it_disposition(it, DISP_OPEN_OPEN))
3110 LASSERT(it_open_error(DISP_OPEN_OPEN, it) == 0);
3112 OBD_ALLOC(och, sizeof(*och));
3114 GOTO(out, rc = -ENOMEM);
3116 rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
3120 rc = ll_close_inode_openhandle(inode, och, 0, NULL);
3122 /* this one is in place of ll_file_open */
3123 if (it_disposition(it, DISP_ENQ_OPEN_REF)) {
3124 ptlrpc_req_finished(it->it_request);
3125 it_clear_disposition(it, DISP_ENQ_OPEN_REF);
3131 * Get size for inode for which FIEMAP mapping is requested.
3132 * Make the FIEMAP get_info call and returns the result.
3133 * \param fiemap kernel buffer to hold extens
3134 * \param num_bytes kernel buffer size
3136 static int ll_do_fiemap(struct inode *inode, struct fiemap *fiemap,
3142 struct ll_fiemap_info_key fmkey = { .lfik_name = KEY_FIEMAP, };
3145 /* Checks for fiemap flags */
3146 if (fiemap->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
3147 fiemap->fm_flags &= ~LUSTRE_FIEMAP_FLAGS_COMPAT;
3151 /* Check for FIEMAP_FLAG_SYNC */
3152 if (fiemap->fm_flags & FIEMAP_FLAG_SYNC) {
3153 rc = filemap_write_and_wait(inode->i_mapping);
3158 env = cl_env_get(&refcheck);
3160 RETURN(PTR_ERR(env));
3162 if (i_size_read(inode) == 0) {
3163 rc = ll_glimpse_size(inode);
3168 fmkey.lfik_oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLPROJID;
3169 obdo_from_inode(&fmkey.lfik_oa, inode, OBD_MD_FLSIZE);
3170 obdo_set_parent_fid(&fmkey.lfik_oa, &ll_i2info(inode)->lli_fid);
3172 /* If filesize is 0, then there would be no objects for mapping */
3173 if (fmkey.lfik_oa.o_size == 0) {
3174 fiemap->fm_mapped_extents = 0;
3178 fmkey.lfik_fiemap = *fiemap;
3180 rc = cl_object_fiemap(env, ll_i2info(inode)->lli_clob,
3181 &fmkey, fiemap, &num_bytes);
3183 cl_env_put(env, &refcheck);
3187 static int fid2path_for_enc_file(struct inode *parent, char *gfpath,
3190 struct dentry *de = NULL, *de_parent = d_find_any_alias(parent);
3191 struct llcrypt_str lltr = LLTR_INIT(NULL, 0);
3192 struct llcrypt_str de_name;
3193 char *p, *ptr = gfpath;
3194 size_t len = 0, len_orig = 0;
3195 int enckey = -1, nameenc = -1;
3199 while ((p = strsep(&gfpath, "/")) != NULL) {
3207 len_orig = strlen(p);
3209 rc = sscanf(p, "["SFID"]", RFID(&fid));
3211 p = strchr(p, ']') + 1;
3217 if (!IS_ENCRYPTED(parent)) {
3218 if (gfpathlen < len + 1) {
3223 memmove(ptr, p, len);
3227 gfpathlen -= len + 1;
3231 /* From here, we know parent is encrypted */
3234 rc = llcrypt_prepare_readdir(parent);
3235 if (rc && rc != -ENOKEY) {
3242 if (llcrypt_has_encryption_key(parent))
3248 llcrypt_policy_has_filename_enc(parent);
3251 /* Even if names are not encrypted, we still need to call
3252 * ll_fname_disk_to_usr in order to decode names as they are
3253 * coming from the wire.
3255 rc = llcrypt_fname_alloc_buffer(parent, NAME_MAX + 1, &lltr);
3263 rc = ll_fname_disk_to_usr(parent, 0, 0, &de_name,
3266 llcrypt_fname_free_buffer(&lltr);
3270 lltr.name[lltr.len] = '\0';
3272 if (lltr.len <= len_orig && gfpathlen >= lltr.len + 1) {
3273 memcpy(ptr, lltr.name, lltr.len);
3278 gfpathlen -= lltr.len + 1;
3282 llcrypt_fname_free_buffer(&lltr);
3284 if (rc == -EOVERFLOW) {
3291 /* We reached the end of the string, which means
3292 * we are dealing with the last component in the path.
3293 * So save a useless lookup and exit.
3299 if (enckey == 0 || nameenc == 0)
3302 ll_inode_lock(parent);
3303 de = lookup_one_len(p, de_parent, len);
3304 ll_inode_unlock(parent);
3305 if (IS_ERR_OR_NULL(de) || !de->d_inode) {
3311 parent = de->d_inode;
3318 if (!IS_ERR_OR_NULL(de))
3323 int __ll_fid2path(struct inode *inode, struct getinfo_fid2path *gfout,
3324 size_t outsize, __u32 pathlen_orig)
3326 struct obd_export *exp = ll_i2mdexp(inode);
3329 /* Append root FID after gfout to let MDT know the root FID so that
3330 * it can lookup the correct path, this is mainly for fileset.
3331 * old server without fileset mount support will ignore this.
3333 *gfout->gf_u.gf_root_fid = *ll_inode2fid(inode);
3335 /* Call mdc_iocontrol */
3336 rc = obd_iocontrol(OBD_IOC_FID2PATH, exp, outsize, gfout, NULL);
3338 if (!rc && gfout->gf_pathlen && gfout->gf_u.gf_path[0] == '/') {
3339 /* by convention, server side (mdt_path_current()) puts
3340 * a leading '/' to tell client that we are dealing with
3343 rc = fid2path_for_enc_file(inode, gfout->gf_u.gf_path,
3345 if (!rc && strlen(gfout->gf_u.gf_path) > pathlen_orig)
3352 int ll_fid2path(struct inode *inode, void __user *arg)
3354 const struct getinfo_fid2path __user *gfin = arg;
3355 __u32 pathlen, pathlen_orig;
3356 struct getinfo_fid2path *gfout;
3362 if (!capable(CAP_DAC_READ_SEARCH) &&
3363 !test_bit(LL_SBI_USER_FID2PATH, ll_i2sbi(inode)->ll_flags))
3366 /* Only need to get the buflen */
3367 if (get_user(pathlen, &gfin->gf_pathlen))
3370 if (pathlen > PATH_MAX)
3372 pathlen_orig = pathlen;
3375 outsize = sizeof(*gfout) + pathlen;
3376 OBD_ALLOC(gfout, outsize);
3380 if (copy_from_user(gfout, arg, sizeof(*gfout)))
3381 GOTO(gf_free, rc = -EFAULT);
3383 gfout->gf_pathlen = pathlen;
3384 rc = __ll_fid2path(inode, gfout, outsize, pathlen_orig);
3388 if (copy_to_user(arg, gfout, sizeof(*gfout) + pathlen_orig))
3392 OBD_FREE(gfout, outsize);
3393 if (rc == -ENAMETOOLONG) {
3394 pathlen += PATH_MAX;
3401 ll_ioc_data_version(struct inode *inode, struct ioc_data_version *ioc)
3403 struct cl_object *obj = ll_i2info(inode)->lli_clob;
3411 ioc->idv_version = 0;
3412 ioc->idv_layout_version = UINT_MAX;
3414 /* If no file object initialized, we consider its version is 0. */
3418 env = cl_env_get(&refcheck);
3420 RETURN(PTR_ERR(env));
3422 io = vvp_env_thread_io(env);
3424 io->u.ci_data_version.dv_data_version = 0;
3425 io->u.ci_data_version.dv_layout_version = UINT_MAX;
3426 io->u.ci_data_version.dv_flags = ioc->idv_flags;
3429 if (cl_io_init(env, io, CIT_DATA_VERSION, io->ci_obj) == 0)
3430 result = cl_io_loop(env, io);
3432 result = io->ci_result;
3434 ioc->idv_version = io->u.ci_data_version.dv_data_version;
3435 ioc->idv_layout_version = io->u.ci_data_version.dv_layout_version;
3436 cl_io_fini(env, io);
3438 if (unlikely(io->ci_need_restart))
3441 cl_env_put(env, &refcheck);
3447 * Read the data_version for inode.
3449 * This value is computed using stripe object version on OST.
3450 * Version is computed using server side locking.
3452 * @param flags if do sync on the OST side;
3454 * LL_DV_RD_FLUSH: flush dirty pages, LCK_PR on OSTs
3455 * LL_DV_WR_FLUSH: drop all caching pages, LCK_PW on OSTs
3457 int ll_data_version(struct inode *inode, __u64 *data_version, int flags)
3459 struct ioc_data_version ioc = { .idv_flags = flags };
3462 rc = ll_ioc_data_version(inode, &ioc);
3464 *data_version = ioc.idv_version;
3470 * Trigger a HSM release request for the provided inode.
3472 int ll_hsm_release(struct inode *inode)
3475 struct obd_client_handle *och = NULL;
3476 __u64 data_version = 0;
3482 CDEBUG(D_INODE, "%s: Releasing file "DFID".\n",
3483 ll_i2sbi(inode)->ll_fsname,
3484 PFID(&ll_i2info(inode)->lli_fid));
3486 och = ll_lease_open(inode, NULL, FMODE_WRITE, MDS_OPEN_RELEASE);
3488 GOTO(out, rc = PTR_ERR(och));
3490 /* Grab latest data_version and [am]time values */
3491 rc = ll_data_version(inode, &data_version,
3492 LL_DV_WR_FLUSH | LL_DV_SZ_UPDATE);
3496 env = cl_env_get(&refcheck);
3498 GOTO(out, rc = PTR_ERR(env));
3500 rc = ll_merge_attr(env, inode);
3501 cl_env_put(env, &refcheck);
3503 /* If error happen, we have the wrong size for a file.
3509 /* Release the file.
3510 * NB: lease lock handle is released in mdc_hsm_release_pack() because
3511 * we still need it to pack l_remote_handle to MDT. */
3512 rc = ll_close_inode_openhandle(inode, och, MDS_HSM_RELEASE,
3518 if (och != NULL && !IS_ERR(och)) /* close the file */
3519 ll_lease_close(och, inode, NULL);
3524 struct ll_swap_stack {
3527 struct inode *inode1;
3528 struct inode *inode2;
3533 static int ll_swap_layouts(struct file *file1, struct file *file2,
3534 struct lustre_swap_layouts *lsl)
3536 struct mdc_swap_layouts msl;
3537 struct md_op_data *op_data;
3540 struct ll_swap_stack *llss = NULL;
3543 OBD_ALLOC_PTR(llss);
3547 llss->inode1 = file_inode(file1);
3548 llss->inode2 = file_inode(file2);
3550 rc = ll_check_swap_layouts_validity(llss->inode1, llss->inode2);
3554 /* we use 2 bool because it is easier to swap than 2 bits */
3555 if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV1)
3556 llss->check_dv1 = true;
3558 if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV2)
3559 llss->check_dv2 = true;
3561 /* we cannot use lsl->sl_dvX directly because we may swap them */
3562 llss->dv1 = lsl->sl_dv1;
3563 llss->dv2 = lsl->sl_dv2;
3565 rc = lu_fid_cmp(ll_inode2fid(llss->inode1), ll_inode2fid(llss->inode2));
3566 if (rc == 0) /* same file, done! */
3569 if (rc < 0) { /* sequentialize it */
3570 swap(llss->inode1, llss->inode2);
3572 swap(llss->dv1, llss->dv2);
3573 swap(llss->check_dv1, llss->check_dv2);
3577 if (gid != 0) { /* application asks to flush dirty cache */
3578 rc = ll_get_grouplock(llss->inode1, file1, gid);
3582 rc = ll_get_grouplock(llss->inode2, file2, gid);
3584 ll_put_grouplock(llss->inode1, file1, gid);
3589 /* ultimate check, before swaping the layouts we check if
3590 * dataversion has changed (if requested) */
3591 if (llss->check_dv1) {
3592 rc = ll_data_version(llss->inode1, &dv, 0);
3595 if (dv != llss->dv1)
3596 GOTO(putgl, rc = -EAGAIN);
3599 if (llss->check_dv2) {
3600 rc = ll_data_version(llss->inode2, &dv, 0);
3603 if (dv != llss->dv2)
3604 GOTO(putgl, rc = -EAGAIN);
3607 /* struct md_op_data is used to send the swap args to the mdt
3608 * only flags is missing, so we use struct mdc_swap_layouts
3609 * through the md_op_data->op_data */
3610 /* flags from user space have to be converted before they are send to
3611 * server, no flag is sent today, they are only used on the client */
3614 op_data = ll_prep_md_op_data(NULL, llss->inode1, llss->inode2, NULL, 0,
3615 0, LUSTRE_OPC_ANY, &msl);
3616 if (IS_ERR(op_data))
3617 GOTO(free, rc = PTR_ERR(op_data));
3619 rc = obd_iocontrol(LL_IOC_LOV_SWAP_LAYOUTS, ll_i2mdexp(llss->inode1),
3620 sizeof(*op_data), op_data, NULL);
3621 ll_finish_md_op_data(op_data);
3628 ll_put_grouplock(llss->inode2, file2, gid);
3629 ll_put_grouplock(llss->inode1, file1, gid);
3639 int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss)
3641 struct obd_export *exp = ll_i2mdexp(inode);
3642 struct md_op_data *op_data;
3646 /* Detect out-of range masks */
3647 if ((hss->hss_setmask | hss->hss_clearmask) & ~HSM_FLAGS_MASK)
3650 /* Non-root users are forbidden to set or clear flags which are
3651 * NOT defined in HSM_USER_MASK. */
3652 if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) &&
3653 !capable(CAP_SYS_ADMIN))
3656 if (!exp_connect_archive_id_array(exp)) {
3657 /* Detect out-of range archive id */
3658 if ((hss->hss_valid & HSS_ARCHIVE_ID) &&
3659 (hss->hss_archive_id > LL_HSM_ORIGIN_MAX_ARCHIVE))
3663 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
3664 LUSTRE_OPC_ANY, hss);
3665 if (IS_ERR(op_data))
3666 RETURN(PTR_ERR(op_data));
3668 rc = obd_iocontrol(LL_IOC_HSM_STATE_SET, exp, sizeof(*op_data),
3671 ll_finish_md_op_data(op_data);
3676 static int ll_hsm_import(struct inode *inode, struct file *file,
3677 struct hsm_user_import *hui)
3679 struct hsm_state_set *hss = NULL;
3680 struct iattr *attr = NULL;
3684 if (!S_ISREG(inode->i_mode))
3690 GOTO(out, rc = -ENOMEM);
3692 hss->hss_valid = HSS_SETMASK | HSS_ARCHIVE_ID;
3693 hss->hss_archive_id = hui->hui_archive_id;
3694 hss->hss_setmask = HS_ARCHIVED | HS_EXISTS | HS_RELEASED;
3695 rc = ll_hsm_state_set(inode, hss);
3699 OBD_ALLOC_PTR(attr);
3701 GOTO(out, rc = -ENOMEM);
3703 attr->ia_mode = hui->hui_mode & (S_IRWXU | S_IRWXG | S_IRWXO);
3704 attr->ia_mode |= S_IFREG;
3705 attr->ia_uid = make_kuid(&init_user_ns, hui->hui_uid);
3706 attr->ia_gid = make_kgid(&init_user_ns, hui->hui_gid);
3707 attr->ia_size = hui->hui_size;
3708 attr->ia_mtime.tv_sec = hui->hui_mtime;
3709 attr->ia_mtime.tv_nsec = hui->hui_mtime_ns;
3710 attr->ia_atime.tv_sec = hui->hui_atime;
3711 attr->ia_atime.tv_nsec = hui->hui_atime_ns;
3713 attr->ia_valid = ATTR_SIZE | ATTR_MODE | ATTR_FORCE |
3714 ATTR_UID | ATTR_GID |
3715 ATTR_MTIME | ATTR_MTIME_SET |
3716 ATTR_ATIME | ATTR_ATIME_SET;
3719 /* inode lock owner set in ll_setattr_raw()*/
3720 rc = ll_setattr_raw(file_dentry(file), attr, 0, true);
3723 inode_unlock(inode);
3735 static inline long ll_lease_type_from_fmode(fmode_t fmode)
3737 return ((fmode & FMODE_READ) ? LL_LEASE_RDLCK : 0) |
3738 ((fmode & FMODE_WRITE) ? LL_LEASE_WRLCK : 0);
3741 static int ll_file_futimes_3(struct file *file, const struct ll_futimes_3 *lfu)
3743 struct inode *inode = file_inode(file);
3745 .ia_valid = ATTR_ATIME | ATTR_ATIME_SET |
3746 ATTR_MTIME | ATTR_MTIME_SET |
3749 .tv_sec = lfu->lfu_atime_sec,
3750 .tv_nsec = lfu->lfu_atime_nsec,
3753 .tv_sec = lfu->lfu_mtime_sec,
3754 .tv_nsec = lfu->lfu_mtime_nsec,
3757 .tv_sec = lfu->lfu_ctime_sec,
3758 .tv_nsec = lfu->lfu_ctime_nsec,
3764 if (!capable(CAP_SYS_ADMIN))
3767 if (!S_ISREG(inode->i_mode))
3771 /* inode lock owner set in ll_setattr_raw()*/
3772 rc = ll_setattr_raw(file_dentry(file), &ia, OP_XVALID_CTIME_SET,
3774 inode_unlock(inode);
3779 static enum cl_lock_mode cl_mode_user_to_kernel(enum lock_mode_user mode)
3782 case MODE_READ_USER:
3784 case MODE_WRITE_USER:
3791 static const char *const user_lockname[] = LOCK_MODE_NAMES;
3793 /* Used to allow the upper layers of the client to request an LDLM lock
3794 * without doing an actual read or write.
3796 * Used for ladvise lockahead to manually request specific locks.
3798 * \param[in] file file this ladvise lock request is on
3799 * \param[in] ladvise ladvise struct describing this lock request
3801 * \retval 0 success, no detailed result available (sync requests
3802 * and requests sent to the server [not handled locally]
3803 * cannot return detailed results)
3804 * \retval LLA_RESULT_{SAME,DIFFERENT} - detailed result of the lock request,
3805 * see definitions for details.
3806 * \retval negative negative errno on error
3808 int ll_file_lock_ahead(struct file *file, struct llapi_lu_ladvise *ladvise)
3810 struct lu_env *env = NULL;
3811 struct cl_io *io = NULL;
3812 struct cl_lock *lock = NULL;
3813 struct cl_lock_descr *descr = NULL;
3814 struct dentry *dentry = file->f_path.dentry;
3815 struct inode *inode = dentry->d_inode;
3816 enum cl_lock_mode cl_mode;
3817 off_t start = ladvise->lla_start;
3818 off_t end = ladvise->lla_end;
3825 "Lock request: file=%pd, inode=%p, mode=%s start=%llu, end=%llu\n",
3826 dentry, dentry->d_inode,
3827 user_lockname[ladvise->lla_lockahead_mode], (__u64) start,
3830 cl_mode = cl_mode_user_to_kernel(ladvise->lla_lockahead_mode);
3832 GOTO(out, result = cl_mode);
3834 /* Get IO environment */
3835 result = cl_io_get(inode, &env, &io, &refcheck);
3839 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
3842 * nothing to do for this io. This currently happens when
3843 * stripe sub-object's are not yet created.
3845 result = io->ci_result;
3846 } else if (result == 0) {
3847 lock = vvp_env_lock(env);
3848 descr = &lock->cll_descr;
3850 descr->cld_obj = io->ci_obj;
3851 /* Convert byte offsets to pages */
3852 descr->cld_start = start >> PAGE_SHIFT;
3853 descr->cld_end = end >> PAGE_SHIFT;
3854 descr->cld_mode = cl_mode;
3855 /* CEF_MUST is used because we do not want to convert a
3856 * lockahead request to a lockless lock */
3857 descr->cld_enq_flags = CEF_MUST | CEF_LOCK_NO_EXPAND;
3859 if (ladvise->lla_peradvice_flags & LF_ASYNC)
3860 descr->cld_enq_flags |= CEF_SPECULATIVE;
3862 result = cl_lock_request(env, io, lock);
3864 /* On success, we need to release the lock */
3866 cl_lock_release(env, lock);
3868 cl_io_fini(env, io);
3869 cl_env_put(env, &refcheck);
3871 /* -ECANCELED indicates a matching lock with a different extent
3872 * was already present, and -EEXIST indicates a matching lock
3873 * on exactly the same extent was already present.
3874 * We convert them to positive values for userspace to make
3875 * recognizing true errors easier.
3876 * Note we can only return these detailed results on async requests,
3877 * as sync requests look the same as i/o requests for locking. */
3878 if (result == -ECANCELED)
3879 result = LLA_RESULT_DIFFERENT;
3880 else if (result == -EEXIST)
3881 result = LLA_RESULT_SAME;
3886 static const char *const ladvise_names[] = LU_LADVISE_NAMES;
3888 static int ll_ladvise_sanity(struct inode *inode,
3889 struct llapi_lu_ladvise *ladvise)
3891 struct ll_sb_info *sbi = ll_i2sbi(inode);
3892 enum lu_ladvise_type advice = ladvise->lla_advice;
3893 /* Note the peradvice flags is a 32 bit field, so per advice flags must
3894 * be in the first 32 bits of enum ladvise_flags */
3895 __u32 flags = ladvise->lla_peradvice_flags;
3896 /* 3 lines at 80 characters per line, should be plenty */
3899 if (advice > LU_LADVISE_MAX || advice == LU_LADVISE_INVALID) {
3902 "%s: advice with value '%d' not recognized, last supported advice is %s (value '%d'): rc = %d\n",
3903 sbi->ll_fsname, advice,
3904 ladvise_names[LU_LADVISE_MAX-1], LU_LADVISE_MAX-1, rc);
3908 /* Per-advice checks */
3910 case LU_LADVISE_LOCKNOEXPAND:
3911 if (flags & ~LF_LOCKNOEXPAND_MASK) {
3913 CDEBUG(D_VFSTRACE, "%s: Invalid flags (%x) for %s: "
3914 "rc = %d\n", sbi->ll_fsname, flags,
3915 ladvise_names[advice], rc);
3919 case LU_LADVISE_LOCKAHEAD:
3920 /* Currently only READ and WRITE modes can be requested */
3921 if (ladvise->lla_lockahead_mode >= MODE_MAX_USER ||
3922 ladvise->lla_lockahead_mode == 0) {
3924 CDEBUG(D_VFSTRACE, "%s: Invalid mode (%d) for %s: "
3925 "rc = %d\n", sbi->ll_fsname,
3926 ladvise->lla_lockahead_mode,
3927 ladvise_names[advice], rc);
3931 case LU_LADVISE_WILLREAD:
3932 case LU_LADVISE_DONTNEED:
3934 /* Note fall through above - These checks apply to all advices
3935 * except LOCKNOEXPAND */
3936 if (flags & ~LF_DEFAULT_MASK) {
3938 CDEBUG(D_VFSTRACE, "%s: Invalid flags (%x) for %s: "
3939 "rc = %d\n", sbi->ll_fsname, flags,
3940 ladvise_names[advice], rc);
3943 if (ladvise->lla_start >= ladvise->lla_end) {
3945 CDEBUG(D_VFSTRACE, "%s: Invalid range (%llu to %llu) "
3946 "for %s: rc = %d\n", sbi->ll_fsname,
3947 ladvise->lla_start, ladvise->lla_end,
3948 ladvise_names[advice], rc);
3960 * Give file access advices
3962 * The ladvise interface is similar to Linux fadvise() system call, except it
3963 * forwards the advices directly from Lustre client to server. The server side
3964 * codes will apply appropriate read-ahead and caching techniques for the
3965 * corresponding files.
3967 * A typical workload for ladvise is e.g. a bunch of different clients are
3968 * doing small random reads of a file, so prefetching pages into OSS cache
3969 * with big linear reads before the random IO is a net benefit. Fetching
3970 * all that data into each client cache with fadvise() may not be, due to
3971 * much more data being sent to the client.
3973 static int ll_ladvise(struct inode *inode, struct file *file, __u64 flags,
3974 struct llapi_lu_ladvise *ladvise)
3978 struct cl_ladvise_io *lio;
3983 env = cl_env_get(&refcheck);
3985 RETURN(PTR_ERR(env));
3987 io = vvp_env_thread_io(env);
3988 io->ci_obj = ll_i2info(inode)->lli_clob;
3990 /* initialize parameters for ladvise */
3991 lio = &io->u.ci_ladvise;
3992 lio->lio_start = ladvise->lla_start;
3993 lio->lio_end = ladvise->lla_end;
3994 lio->lio_fid = ll_inode2fid(inode);
3995 lio->lio_advice = ladvise->lla_advice;
3996 lio->lio_flags = flags;
3998 if (cl_io_init(env, io, CIT_LADVISE, io->ci_obj) == 0)
3999 rc = cl_io_loop(env, io);
4003 cl_io_fini(env, io);
4004 cl_env_put(env, &refcheck);
4008 static int ll_lock_noexpand(struct file *file, int flags)
4010 struct ll_file_data *fd = file->private_data;
4012 fd->ll_lock_no_expand = !(flags & LF_UNSET);
4017 #ifndef HAVE_FILEATTR_GET
4018 int ll_ioctl_fsgetxattr(struct inode *inode, unsigned int cmd,
4021 struct fsxattr fsxattr;
4023 if (copy_from_user(&fsxattr, uarg, sizeof(fsxattr)))
4026 fsxattr.fsx_xflags = ll_inode_flags_to_xflags(inode->i_flags);
4027 if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags))
4028 fsxattr.fsx_xflags |= FS_XFLAG_PROJINHERIT;
4029 fsxattr.fsx_projid = ll_i2info(inode)->lli_projid;
4030 if (copy_to_user(uarg, &fsxattr, sizeof(fsxattr)))
4037 int ll_ioctl_check_project(struct inode *inode, __u32 xflags,
4041 * Project Quota ID state is only allowed to change from within the init
4042 * namespace. Enforce that restriction only if we are trying to change
4043 * the quota ID state. Everything else is allowed in user namespaces.
4045 if (current_user_ns() == &init_user_ns) {
4047 * Caller is allowed to change the project ID. if it is being
4048 * changed, make sure that the new value is valid.
4050 if (ll_i2info(inode)->lli_projid != projid &&
4051 !projid_valid(make_kprojid(&init_user_ns, projid)))
4057 if (ll_i2info(inode)->lli_projid != projid)
4060 if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags)) {
4061 if (!(xflags & FS_XFLAG_PROJINHERIT))
4064 if (xflags & FS_XFLAG_PROJINHERIT)
4071 int ll_set_project(struct inode *inode, __u32 xflags, __u32 projid)
4073 struct ptlrpc_request *req = NULL;
4074 struct md_op_data *op_data;
4075 struct cl_object *obj;
4076 unsigned int inode_flags;
4079 CDEBUG(D_QUOTA, DFID" xflags=%x projid=%u\n",
4080 PFID(ll_inode2fid(inode)), xflags, projid);
4081 rc = ll_ioctl_check_project(inode, xflags, projid);
4085 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
4086 LUSTRE_OPC_ANY, NULL);
4087 if (IS_ERR(op_data))
4088 RETURN(PTR_ERR(op_data));
4090 inode_flags = ll_xflags_to_inode_flags(xflags);
4091 op_data->op_attr_flags = ll_inode_to_ext_flags(inode_flags);
4092 if (xflags & FS_XFLAG_PROJINHERIT)
4093 op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL;
4095 /* pass projid to md_op_data */
4096 op_data->op_projid = projid;
4098 op_data->op_xvalid |= OP_XVALID_PROJID | OP_XVALID_FLAGS;
4099 rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data, NULL, 0, &req);
4100 ptlrpc_req_finished(req);
4102 GOTO(out_fsxattr, rc);
4103 ll_update_inode_flags(inode, op_data->op_attr_flags);
4105 /* Avoid OST RPC if this is only ioctl setting project inherit flag */
4106 if (xflags == 0 || xflags == FS_XFLAG_PROJINHERIT)
4107 GOTO(out_fsxattr, rc);
4109 obj = ll_i2info(inode)->lli_clob;
4111 struct iattr attr = { 0 };
4113 rc = cl_setattr_ost(obj, &attr, OP_XVALID_FLAGS, xflags);
4117 ll_finish_md_op_data(op_data);
4121 #ifndef HAVE_FILEATTR_GET
4122 int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd,
4125 struct fsxattr fsxattr;
4129 if (copy_from_user(&fsxattr, uarg, sizeof(fsxattr)))
4132 RETURN(ll_set_project(inode, fsxattr.fsx_xflags,
4133 fsxattr.fsx_projid));
4137 int ll_ioctl_project(struct file *file, unsigned int cmd, void __user *uarg)
4139 struct lu_project lu_project;
4140 struct dentry *dentry = file_dentry(file);
4141 struct inode *inode = file_inode(file);
4142 struct dentry *child_dentry = NULL;
4143 int rc = 0, name_len;
4145 if (copy_from_user(&lu_project, uarg, sizeof(lu_project)))
4148 /* apply child dentry if name is valid */
4149 name_len = strnlen(lu_project.project_name, NAME_MAX);
4150 if (name_len > 0 && name_len <= NAME_MAX) {
4151 ll_inode_lock(inode);
4152 child_dentry = lookup_one_len(lu_project.project_name,
4154 ll_inode_unlock(inode);
4155 if (IS_ERR(child_dentry)) {
4156 rc = PTR_ERR(child_dentry);
4159 inode = child_dentry->d_inode;
4164 } else if (name_len > NAME_MAX) {
4169 switch (lu_project.project_type) {
4170 case LU_PROJECT_SET:
4171 rc = ll_set_project(inode, lu_project.project_xflags,
4172 lu_project.project_id);
4174 case LU_PROJECT_GET:
4175 lu_project.project_xflags =
4176 ll_inode_flags_to_xflags(inode->i_flags);
4177 if (test_bit(LLIF_PROJECT_INHERIT,
4178 &ll_i2info(inode)->lli_flags))
4179 lu_project.project_xflags |= FS_XFLAG_PROJINHERIT;
4180 lu_project.project_id = ll_i2info(inode)->lli_projid;
4181 if (copy_to_user(uarg, &lu_project, sizeof(lu_project))) {
4191 if (!IS_ERR_OR_NULL(child_dentry))
4196 static long ll_file_unlock_lease(struct file *file, struct ll_ioc_lease *ioc,
4199 struct inode *inode = file_inode(file);
4200 struct ll_file_data *fd = file->private_data;
4201 struct ll_inode_info *lli = ll_i2info(inode);
4202 struct obd_client_handle *och = NULL;
4203 struct split_param sp;
4204 struct pcc_param param;
4205 bool lease_broken = false;
4207 enum mds_op_bias bias = 0;
4209 struct file *layout_file = NULL;
4211 size_t data_size = 0;
4212 bool attached = false;
4217 mutex_lock(&lli->lli_och_mutex);
4218 if (fd->fd_lease_och != NULL) {
4219 och = fd->fd_lease_och;
4220 fd->fd_lease_och = NULL;
4222 mutex_unlock(&lli->lli_och_mutex);
4227 fmode = och->och_flags;
4229 switch (ioc->lil_flags) {
4230 case LL_LEASE_RESYNC_DONE:
4231 if (ioc->lil_count > IOC_IDS_MAX)
4232 GOTO(out_lease_close, rc = -EINVAL);
4234 data_size = offsetof(typeof(*ioc), lil_ids[ioc->lil_count]);
4235 OBD_ALLOC(data, data_size);
4237 GOTO(out_lease_close, rc = -ENOMEM);
4239 if (copy_from_user(data, uarg, data_size))
4240 GOTO(out_lease_close, rc = -EFAULT);
4242 bias = MDS_CLOSE_RESYNC_DONE;
4244 case LL_LEASE_LAYOUT_MERGE:
4245 if (ioc->lil_count != 1)
4246 GOTO(out_lease_close, rc = -EINVAL);
4248 uarg += sizeof(*ioc);
4249 if (copy_from_user(&fdv, uarg, sizeof(fdv)))
4250 GOTO(out_lease_close, rc = -EFAULT);
4252 layout_file = fget(fdv);
4254 GOTO(out_lease_close, rc = -EBADF);
4256 if ((file->f_flags & O_ACCMODE) == O_RDONLY ||
4257 (layout_file->f_flags & O_ACCMODE) == O_RDONLY)
4258 GOTO(out_lease_close, rc = -EPERM);
4260 data = file_inode(layout_file);
4261 bias = MDS_CLOSE_LAYOUT_MERGE;
4263 case LL_LEASE_LAYOUT_SPLIT: {
4266 if (ioc->lil_count != 2)
4267 GOTO(out_lease_close, rc = -EINVAL);
4269 uarg += sizeof(*ioc);
4270 if (copy_from_user(&fdv, uarg, sizeof(fdv)))
4271 GOTO(out_lease_close, rc = -EFAULT);
4273 uarg += sizeof(fdv);
4274 if (copy_from_user(&mirror_id, uarg, sizeof(mirror_id)))
4275 GOTO(out_lease_close, rc = -EFAULT);
4276 if (mirror_id >= MIRROR_ID_NEG)
4277 GOTO(out_lease_close, rc = -EINVAL);
4279 layout_file = fget(fdv);
4281 GOTO(out_lease_close, rc = -EBADF);
4283 /* if layout_file == file, it means to destroy the mirror */
4284 sp.sp_inode = file_inode(layout_file);
4285 sp.sp_mirror_id = (__u16)mirror_id;
4287 bias = MDS_CLOSE_LAYOUT_SPLIT;
4290 case LL_LEASE_PCC_ATTACH:
4291 if (ioc->lil_count != 1)
4294 if (IS_ENCRYPTED(inode))
4295 RETURN(-EOPNOTSUPP);
4297 uarg += sizeof(*ioc);
4298 if (copy_from_user(¶m.pa_archive_id, uarg, sizeof(__u32)))
4299 GOTO(out_lease_close, rc2 = -EFAULT);
4301 rc2 = pcc_readwrite_attach(file, inode, param.pa_archive_id);
4303 GOTO(out_lease_close, rc2);
4306 /* Grab latest data version */
4307 rc2 = ll_data_version(inode, ¶m.pa_data_version,
4310 GOTO(out_lease_close, rc2);
4313 bias = MDS_PCC_ATTACH;
4316 /* without close intent */
4321 rc = ll_lease_close_intent(och, inode, &lease_broken, bias, data);
4325 rc = ll_lease_och_release(inode, file);
4334 if (ioc->lil_flags == LL_LEASE_RESYNC_DONE && data)
4335 OBD_FREE(data, data_size);
4340 if (ioc->lil_flags == LL_LEASE_PCC_ATTACH) {
4343 rc = pcc_readwrite_attach_fini(file, inode,
4344 param.pa_layout_gen,
4349 ll_layout_refresh(inode, &fd->fd_layout_version);
4352 rc = ll_lease_type_from_fmode(fmode);
4356 static long ll_file_set_lease(struct file *file, struct ll_ioc_lease *ioc,
4359 struct inode *inode = file_inode(file);
4360 struct ll_inode_info *lli = ll_i2info(inode);
4361 struct ll_file_data *fd = file->private_data;
4362 struct obd_client_handle *och = NULL;
4363 __u64 open_flags = 0;
4369 switch (ioc->lil_mode) {
4370 case LL_LEASE_WRLCK:
4371 if (!(file->f_mode & FMODE_WRITE))
4373 fmode = FMODE_WRITE;
4375 case LL_LEASE_RDLCK:
4376 if (!(file->f_mode & FMODE_READ))
4380 case LL_LEASE_UNLCK:
4381 RETURN(ll_file_unlock_lease(file, ioc, uarg));
4386 CDEBUG(D_INODE, "Set lease with mode %u\n", fmode);
4388 /* apply for lease */
4389 if (ioc->lil_flags & LL_LEASE_RESYNC)
4390 open_flags = MDS_OPEN_RESYNC;
4391 och = ll_lease_open(inode, file, fmode, open_flags);
4393 RETURN(PTR_ERR(och));
4395 if (ioc->lil_flags & LL_LEASE_RESYNC) {
4396 rc = ll_lease_file_resync(och, inode, uarg);
4398 ll_lease_close(och, inode, NULL);
4401 rc = ll_layout_refresh(inode, &fd->fd_layout_version);
4403 ll_lease_close(och, inode, NULL);
4409 mutex_lock(&lli->lli_och_mutex);
4410 if (fd->fd_lease_och == NULL) {
4411 fd->fd_lease_och = och;
4414 mutex_unlock(&lli->lli_och_mutex);
4416 /* impossible now that only excl is supported for now */
4417 ll_lease_close(och, inode, &lease_broken);
4423 static void ll_heat_get(struct inode *inode, struct lu_heat *heat)
4425 struct ll_inode_info *lli = ll_i2info(inode);
4426 struct ll_sb_info *sbi = ll_i2sbi(inode);
4427 __u64 now = ktime_get_real_seconds();
4430 spin_lock(&lli->lli_heat_lock);
4431 heat->lh_flags = lli->lli_heat_flags;
4432 for (i = 0; i < heat->lh_count; i++)
4433 heat->lh_heat[i] = obd_heat_get(&lli->lli_heat_instances[i],
4434 now, sbi->ll_heat_decay_weight,
4435 sbi->ll_heat_period_second);
4436 spin_unlock(&lli->lli_heat_lock);
4439 static int ll_heat_set(struct inode *inode, enum lu_heat_flag flags)
4441 struct ll_inode_info *lli = ll_i2info(inode);
4444 spin_lock(&lli->lli_heat_lock);
4445 if (flags & LU_HEAT_FLAG_CLEAR)
4446 obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
4448 if (flags & LU_HEAT_FLAG_OFF)
4449 lli->lli_heat_flags |= LU_HEAT_FLAG_OFF;
4451 lli->lli_heat_flags &= ~LU_HEAT_FLAG_OFF;
4453 spin_unlock(&lli->lli_heat_lock);
4459 ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4461 struct inode *inode = file_inode(file);
4462 struct ll_file_data *fd = file->private_data;
4463 void __user *uarg = (void __user *)arg;
4467 CDEBUG(D_VFSTRACE|D_IOCTL, "VFS Op:inode="DFID"(%pK) cmd=%x arg=%lx\n",
4468 PFID(ll_inode2fid(inode)), inode, cmd, arg);
4469 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
4471 /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
4472 if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
4475 /* can't do a generic karg == NULL check here, since it is too noisy and
4476 * we need to return -ENOTTY for unsupported ioctls instead of -EINVAL.
4479 case LL_IOC_GETFLAGS:
4480 /* Get the current value of the file flags */
4481 return put_user(fd->fd_flags, (int __user *)arg);
4482 case LL_IOC_SETFLAGS:
4483 case LL_IOC_CLRFLAGS:
4484 /* Set or clear specific file flags */
4485 /* XXX This probably needs checks to ensure the flags are
4486 * not abused, and to handle any flag side effects.
4488 if (get_user(flags, (int __user *)arg))
4491 /* LL_FILE_GROUP_LOCKED is managed via its own ioctls */
4492 if (flags & LL_FILE_GROUP_LOCKED)
4495 if (cmd == LL_IOC_SETFLAGS) {
4496 if ((flags & LL_FILE_IGNORE_LOCK) &&
4497 !(file->f_flags & O_DIRECT)) {
4499 CERROR("%s: unable to disable locking on non-O_DIRECT file "DFID": rc = %d\n",
4500 current->comm, PFID(ll_inode2fid(inode)),
4505 fd->fd_flags |= flags;
4507 fd->fd_flags &= ~flags;
4510 case LL_IOC_LOV_SETSTRIPE:
4511 case LL_IOC_LOV_SETSTRIPE_NEW:
4512 RETURN(ll_lov_setstripe(inode, file, uarg));
4513 case LL_IOC_LOV_SETEA:
4514 RETURN(ll_lov_setea(inode, file, uarg));
4515 case LL_IOC_LOV_SWAP_LAYOUTS: {
4517 struct lustre_swap_layouts lsl;
4519 if (copy_from_user(&lsl, uarg, sizeof(lsl)))
4522 if ((file->f_flags & O_ACCMODE) == O_RDONLY)
4525 file2 = fget(lsl.sl_fd);
4529 /* O_WRONLY or O_RDWR */
4530 if ((file2->f_flags & O_ACCMODE) == O_RDONLY)
4531 GOTO(out, rc = -EPERM);
4533 if (lsl.sl_flags & SWAP_LAYOUTS_CLOSE) {
4534 struct obd_client_handle *och = NULL;
4535 struct ll_inode_info *lli;
4536 struct inode *inode2;
4538 lli = ll_i2info(inode);
4539 mutex_lock(&lli->lli_och_mutex);
4540 if (fd->fd_lease_och != NULL) {
4541 och = fd->fd_lease_och;
4542 fd->fd_lease_och = NULL;
4544 mutex_unlock(&lli->lli_och_mutex);
4546 GOTO(out, rc = -ENOLCK);
4547 inode2 = file_inode(file2);
4548 rc = ll_swap_layouts_close(och, inode, inode2, &lsl);
4550 rc = ll_swap_layouts(file, file2, &lsl);
4556 case LL_IOC_LOV_GETSTRIPE:
4557 case LL_IOC_LOV_GETSTRIPE_NEW:
4558 RETURN(ll_file_getstripe(inode, uarg, 0));
4559 case LL_IOC_GROUP_LOCK:
4560 RETURN(ll_get_grouplock(inode, file, arg));
4561 case LL_IOC_GROUP_UNLOCK:
4562 RETURN(ll_put_grouplock(inode, file, arg));
4563 case LL_IOC_DATA_VERSION: {
4564 struct ioc_data_version idv;
4567 if (copy_from_user(&idv, uarg, sizeof(idv)))
4570 idv.idv_flags &= LL_DV_RD_FLUSH | LL_DV_WR_FLUSH;
4571 rc = ll_ioc_data_version(inode, &idv);
4573 if (rc == 0 && copy_to_user(uarg, &idv, sizeof(idv)))
4578 case LL_IOC_HSM_STATE_GET: {
4579 struct md_op_data *op_data;
4580 struct hsm_user_state *hus;
4583 if (!ll_access_ok(uarg, sizeof(*hus)))
4590 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
4591 LUSTRE_OPC_ANY, hus);
4592 if (IS_ERR(op_data)) {
4593 rc = PTR_ERR(op_data);
4595 rc = obd_iocontrol(cmd, ll_i2mdexp(inode),
4596 sizeof(*op_data), op_data, NULL);
4598 if (copy_to_user(uarg, hus, sizeof(*hus)))
4601 ll_finish_md_op_data(op_data);
4606 case LL_IOC_HSM_STATE_SET: {
4607 struct hsm_state_set *hss;
4614 if (copy_from_user(hss, uarg, sizeof(*hss)))
4617 rc = ll_hsm_state_set(inode, hss);
4622 case LL_IOC_HSM_ACTION: {
4623 struct md_op_data *op_data;
4624 struct hsm_current_action *hca;
4628 if (!ll_access_ok(uarg, sizeof(*hca)))
4635 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
4636 LUSTRE_OPC_ANY, hca);
4637 if (IS_ERR(op_data)) {
4639 RETURN(PTR_ERR(op_data));
4642 rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
4645 GOTO(skip_copy, rc);
4647 /* The hsm_current_action retreived from the server could
4648 * contain corrupt information. If it is incorrect data collect
4649 * debug information. We still send the data even if incorrect
4650 * to user land to handle.
4652 action = hsm_user_action2name(hca->hca_action);
4653 if (strcmp(action, "UNKNOWN") == 0 ||
4654 hca->hca_state > HPS_DONE) {
4656 "HSM current state %s action %s, offset = %llu, length %llu\n",
4657 hsm_progress_state2name(hca->hca_state), action,
4658 hca->hca_location.offset, hca->hca_location.length);
4661 if (copy_to_user(uarg, hca, sizeof(*hca)))
4664 ll_finish_md_op_data(op_data);
4668 case LL_IOC_SET_LEASE_OLD: {
4669 struct ll_ioc_lease ioc = { .lil_mode = arg };
4671 RETURN(ll_file_set_lease(file, &ioc, 0));
4673 case LL_IOC_SET_LEASE: {
4674 struct ll_ioc_lease ioc;
4676 if (copy_from_user(&ioc, uarg, sizeof(ioc)))
4679 RETURN(ll_file_set_lease(file, &ioc, uarg));
4681 case LL_IOC_GET_LEASE: {
4682 struct ll_inode_info *lli = ll_i2info(inode);
4683 struct ldlm_lock *lock = NULL;
4686 mutex_lock(&lli->lli_och_mutex);
4687 if (fd->fd_lease_och != NULL) {
4688 struct obd_client_handle *och = fd->fd_lease_och;
4690 lock = ldlm_handle2lock(&och->och_lease_handle);
4692 lock_res_and_lock(lock);
4693 if (!ldlm_is_cancel(lock))
4694 fmode = och->och_flags;
4696 unlock_res_and_lock(lock);
4697 LDLM_LOCK_PUT(lock);
4700 mutex_unlock(&lli->lli_och_mutex);
4702 RETURN(ll_lease_type_from_fmode(fmode));
4704 case LL_IOC_HSM_IMPORT: {
4705 struct hsm_user_import *hui;
4711 if (copy_from_user(hui, uarg, sizeof(*hui)))
4714 rc = ll_hsm_import(inode, file, hui);
4719 case LL_IOC_FUTIMES_3: {
4720 struct ll_futimes_3 lfu;
4722 if (copy_from_user(&lfu, uarg, sizeof(lfu)))
4725 RETURN(ll_file_futimes_3(file, &lfu));
4727 case LL_IOC_LADVISE: {
4728 struct llapi_ladvise_hdr *k_ladvise_hdr;
4729 struct llapi_ladvise_hdr __user *u_ladvise_hdr;
4732 int alloc_size = sizeof(*k_ladvise_hdr);
4735 u_ladvise_hdr = uarg;
4736 OBD_ALLOC_PTR(k_ladvise_hdr);
4737 if (k_ladvise_hdr == NULL)
4740 if (copy_from_user(k_ladvise_hdr, u_ladvise_hdr, alloc_size))
4741 GOTO(out_ladvise, rc = -EFAULT);
4743 if (k_ladvise_hdr->lah_magic != LADVISE_MAGIC ||
4744 k_ladvise_hdr->lah_count < 1)
4745 GOTO(out_ladvise, rc = -EINVAL);
4747 num_advise = k_ladvise_hdr->lah_count;
4748 if (num_advise >= LAH_COUNT_MAX)
4749 GOTO(out_ladvise, rc = -EFBIG);
4751 OBD_FREE_PTR(k_ladvise_hdr);
4752 alloc_size = offsetof(typeof(*k_ladvise_hdr),
4753 lah_advise[num_advise]);
4754 OBD_ALLOC(k_ladvise_hdr, alloc_size);
4755 if (k_ladvise_hdr == NULL)
4759 * TODO: submit multiple advices to one server in a single RPC
4761 if (copy_from_user(k_ladvise_hdr, u_ladvise_hdr, alloc_size))
4762 GOTO(out_ladvise, rc = -EFAULT);
4764 for (i = 0; i < num_advise; i++) {
4765 struct llapi_lu_ladvise *k_ladvise =
4766 &k_ladvise_hdr->lah_advise[i];
4767 struct llapi_lu_ladvise __user *u_ladvise =
4768 &u_ladvise_hdr->lah_advise[i];
4770 rc = ll_ladvise_sanity(inode, k_ladvise);
4772 GOTO(out_ladvise, rc);
4774 switch (k_ladvise->lla_advice) {
4775 case LU_LADVISE_LOCKNOEXPAND:
4776 rc = ll_lock_noexpand(file,
4777 k_ladvise->lla_peradvice_flags);
4778 GOTO(out_ladvise, rc);
4779 case LU_LADVISE_LOCKAHEAD:
4781 rc = ll_file_lock_ahead(file, k_ladvise);
4784 GOTO(out_ladvise, rc);
4787 &u_ladvise->lla_lockahead_result))
4788 GOTO(out_ladvise, rc = -EFAULT);
4791 rc = ll_ladvise(inode, file,
4792 k_ladvise_hdr->lah_flags,
4795 GOTO(out_ladvise, rc);
4802 OBD_FREE(k_ladvise_hdr, alloc_size);
4805 case LL_IOC_FLR_SET_MIRROR: {
4806 /* mirror I/O must be direct to avoid polluting page cache
4808 if (!(file->f_flags & O_DIRECT))
4811 fd->fd_designated_mirror = arg;
4814 case LL_IOC_HEAT_GET: {
4815 struct lu_heat uheat;
4816 struct lu_heat *heat;
4819 if (copy_from_user(&uheat, uarg, sizeof(uheat)))
4822 if (uheat.lh_count > OBD_HEAT_COUNT)
4823 uheat.lh_count = OBD_HEAT_COUNT;
4825 size = offsetof(typeof(uheat), lh_heat[uheat.lh_count]);
4826 OBD_ALLOC(heat, size);
4830 heat->lh_count = uheat.lh_count;
4831 ll_heat_get(inode, heat);
4832 rc = copy_to_user(uarg, heat, size);
4833 OBD_FREE(heat, size);
4834 RETURN(rc ? -EFAULT : 0);
4836 case LL_IOC_HEAT_SET: {
4839 if (copy_from_user(&flags, uarg, sizeof(flags)))
4842 rc = ll_heat_set(inode, flags);
4845 case LL_IOC_PCC_ATTACH: {
4846 struct lu_pcc_attach *attach;
4848 if (!S_ISREG(inode->i_mode))
4851 if (!inode_owner_or_capable(&nop_mnt_idmap, inode))
4854 OBD_ALLOC_PTR(attach);
4858 if (copy_from_user(attach,
4859 (const struct lu_pcc_attach __user *)arg,
4861 GOTO(out_pcc, rc = -EFAULT);
4863 rc = pcc_ioctl_attach(file, inode, attach);
4865 OBD_FREE_PTR(attach);
4868 case LL_IOC_PCC_DETACH: {
4869 struct lu_pcc_detach *detach;
4871 OBD_ALLOC_PTR(detach);
4875 if (copy_from_user(detach, uarg, sizeof(*detach)))
4876 GOTO(out_detach_free, rc = -EFAULT);
4878 if (!S_ISREG(inode->i_mode))
4879 GOTO(out_detach_free, rc = -EINVAL);
4881 if (!inode_owner_or_capable(&nop_mnt_idmap, inode))
4882 GOTO(out_detach_free, rc = -EPERM);
4884 rc = pcc_ioctl_detach(inode, detach->pccd_opt);
4886 OBD_FREE_PTR(detach);
4889 case LL_IOC_PCC_STATE: {
4890 struct lu_pcc_state __user *ustate = uarg;
4891 struct lu_pcc_state *state;
4893 OBD_ALLOC_PTR(state);
4897 if (copy_from_user(state, ustate, sizeof(*state)))
4898 GOTO(out_state, rc = -EFAULT);
4900 rc = pcc_ioctl_state(file, inode, state);
4902 GOTO(out_state, rc);
4904 if (copy_to_user(ustate, state, sizeof(*state)))
4905 GOTO(out_state, rc = -EFAULT);
4908 OBD_FREE_PTR(state);
4912 rc = ll_iocontrol(inode, file, cmd, uarg);
4915 RETURN(obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL, uarg));
4919 static loff_t ll_lseek(struct file *file, loff_t offset, int whence)
4921 struct inode *inode = file_inode(file);
4924 struct cl_lseek_io *lsio;
4931 env = cl_env_get(&refcheck);
4933 RETURN(PTR_ERR(env));
4935 io = vvp_env_thread_io(env);
4936 io->ci_obj = ll_i2info(inode)->lli_clob;
4937 ll_io_set_mirror(io, file);
4939 lsio = &io->u.ci_lseek;
4940 lsio->ls_start = offset;
4941 lsio->ls_whence = whence;
4942 lsio->ls_result = -ENXIO;
4945 rc = cl_io_init(env, io, CIT_LSEEK, io->ci_obj);
4947 struct vvp_io *vio = vvp_env_io(env);
4949 vio->vui_fd = file->private_data;
4950 rc = cl_io_loop(env, io);
4954 retval = rc ? : lsio->ls_result;
4955 cl_io_fini(env, io);
4956 } while (unlikely(io->ci_need_restart));
4958 cl_env_put(env, &refcheck);
4960 /* Without the key, SEEK_HOLE return value has to be
4961 * rounded up to next LUSTRE_ENCRYPTION_UNIT_SIZE.
4963 if (llcrypt_require_key(inode) == -ENOKEY && whence == SEEK_HOLE)
4964 retval = round_up(retval, LUSTRE_ENCRYPTION_UNIT_SIZE);
4969 #define LU_SEEK_NAMES { \
4970 [SEEK_SET] = "SEEK_SET", \
4971 [SEEK_CUR] = "SEEK_CUR", \
4972 [SEEK_DATA] = "SEEK_DATA", \
4973 [SEEK_HOLE] = "SEEK_HOLE", \
4976 static const char *const ll_seek_names[] = LU_SEEK_NAMES;
4978 static loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
4980 struct inode *inode = file_inode(file);
4981 loff_t retval = offset, eof = 0;
4982 ktime_t kstart = ktime_get();
4986 CDEBUG(D_VFSTRACE|D_IOTRACE,
4987 "START file %s:"DFID", offset: %lld, type: %s\n",
4988 file_dentry(file)->d_name.name,
4989 PFID(ll_inode2fid(file_inode(file))), offset,
4990 ll_seek_names[origin]);
4992 if (origin == SEEK_END) {
4993 retval = ll_glimpse_size(inode);
4996 eof = i_size_read(inode);
4999 if (origin == SEEK_HOLE || origin == SEEK_DATA) {
5003 /* flush local cache first if any */
5004 cl_sync_file_range(inode, offset, OBD_OBJECT_EOF,
5007 retval = ll_lseek(file, offset, origin);
5010 retval = vfs_setpos(file, retval, ll_file_maxbytes(inode));
5012 retval = generic_file_llseek_size(file, offset, origin,
5013 ll_file_maxbytes(inode), eof);
5016 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK,
5017 ktime_us_delta(ktime_get(), kstart));
5018 CDEBUG(D_VFSTRACE|D_IOTRACE,
5019 "COMPLETED file %s:"DFID", offset: %lld, type: %s, rc = %lld\n",
5020 file_dentry(file)->d_name.name,
5021 PFID(ll_inode2fid(file_inode(file))), offset,
5022 ll_seek_names[origin], retval);
5027 static int ll_flush(struct file *file, fl_owner_t id)
5029 struct inode *inode = file_inode(file);
5030 struct ll_inode_info *lli = ll_i2info(inode);
5031 struct ll_file_data *fd = file->private_data;
5034 LASSERT(!S_ISDIR(inode->i_mode));
5036 /* catch async errors that were recorded back when async writeback
5037 * failed for pages in this mapping. */
5038 rc = lli->lli_async_rc;
5039 lli->lli_async_rc = 0;
5040 if (lli->lli_clob != NULL) {
5041 err = lov_read_and_clear_async_rc(lli->lli_clob);
5046 /* The application has been told write failure already.
5047 * Do not report failure again. */
5048 if (fd->fd_write_failed)
5050 return rc ? -EIO : 0;
5054 * Called to make sure a portion of file has been written out.
5055 * if @mode is not CL_FSYNC_LOCAL, it will send OST_SYNC RPCs to OST.
5057 * Return how many pages have been written.
5059 int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
5060 enum cl_fsync_mode mode, int ignore_layout)
5064 struct cl_fsync_io *fio;
5069 if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL &&
5070 mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL &&
5071 mode != CL_FSYNC_RECLAIM)
5074 env = cl_env_get(&refcheck);
5076 RETURN(PTR_ERR(env));
5078 io = vvp_env_thread_io(env);
5079 io->ci_obj = ll_i2info(inode)->lli_clob;
5080 cl_object_get(io->ci_obj);
5081 io->ci_ignore_layout = ignore_layout;
5083 /* initialize parameters for sync */
5084 fio = &io->u.ci_fsync;
5085 fio->fi_start = start;
5087 fio->fi_fid = ll_inode2fid(inode);
5088 fio->fi_mode = mode;
5089 fio->fi_nr_written = 0;
5091 if (cl_io_init(env, io, CIT_FSYNC, io->ci_obj) == 0)
5092 result = cl_io_loop(env, io);
5094 result = io->ci_result;
5096 result = fio->fi_nr_written;
5097 cl_io_fini(env, io);
5098 cl_object_put(env, io->ci_obj);
5099 cl_env_put(env, &refcheck);
5105 * When dentry is provided (the 'else' case), file_dentry() may be
5106 * null and dentry must be used directly rather than pulled from
5107 * file_dentry() as is done otherwise.
5110 int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
5112 struct dentry *dentry = file_dentry(file);
5113 struct inode *inode = dentry->d_inode;
5114 struct ll_inode_info *lli = ll_i2info(inode);
5115 struct ptlrpc_request *req;
5116 ktime_t kstart = ktime_get();
5122 "VFS Op:inode="DFID"(%p), start %lld, end %lld, datasync %d\n",
5123 PFID(ll_inode2fid(inode)), inode, start, end, datasync);
5125 /* fsync's caller has already called _fdata{sync,write}, we want
5126 * that IO to finish before calling the osc and mdc sync methods */
5127 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
5129 /* catch async errors that were recorded back when async writeback
5130 * failed for pages in this mapping. */
5131 if (!S_ISDIR(inode->i_mode)) {
5132 err = lli->lli_async_rc;
5133 lli->lli_async_rc = 0;
5136 if (lli->lli_clob != NULL) {
5137 err = lov_read_and_clear_async_rc(lli->lli_clob);
5143 if (S_ISREG(inode->i_mode) && !lli->lli_synced_to_mds) {
5145 * only the first sync on MDS makes sense,
5146 * everything else is stored on OSTs
5148 err = md_fsync(ll_i2sbi(inode)->ll_md_exp,
5149 ll_inode2fid(inode), &req);
5153 lli->lli_synced_to_mds = true;
5154 ptlrpc_req_finished(req);
5158 if (S_ISREG(inode->i_mode)) {
5159 struct ll_file_data *fd = file->private_data;
5162 /* Sync metadata on MDT first, and then sync the cached data
5165 err = pcc_fsync(file, start, end, datasync, &cached);
5167 err = cl_sync_file_range(inode, start, end,
5169 if (rc == 0 && err < 0)
5172 fd->fd_write_failed = true;
5174 fd->fd_write_failed = false;
5178 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC,
5179 ktime_us_delta(ktime_get(), kstart));
5184 ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
5186 struct inode *inode = file_inode(file);
5187 struct ll_sb_info *sbi = ll_i2sbi(inode);
5188 struct ldlm_enqueue_info einfo = {
5189 .ei_type = LDLM_FLOCK,
5190 .ei_cb_cp = ldlm_flock_completion_ast,
5191 .ei_cbdata = file_lock,
5193 struct md_op_data *op_data;
5194 struct lustre_handle lockh = { 0 };
5195 union ldlm_policy_data flock = { { 0 } };
5196 struct file_lock flbuf = *file_lock;
5197 int fl_type = file_lock->fl_type;
5198 ktime_t kstart = ktime_get();
5204 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID" file_lock=%p\n",
5205 PFID(ll_inode2fid(inode)), file_lock);
5207 if (file_lock->fl_flags & FL_FLOCK) {
5208 LASSERT((cmd == F_SETLKW) || (cmd == F_SETLK));
5209 /* flocks are whole-file locks */
5210 flock.l_flock.end = OFFSET_MAX;
5211 /* For flocks owner is determined by the local file desctiptor*/
5212 flock.l_flock.owner = (unsigned long)file_lock->fl_file;
5213 } else if (file_lock->fl_flags & FL_POSIX) {
5214 flock.l_flock.owner = (unsigned long)file_lock->fl_owner;
5215 flock.l_flock.start = file_lock->fl_start;
5216 flock.l_flock.end = file_lock->fl_end;
5220 flock.l_flock.pid = file_lock->fl_pid;
5222 #if defined(HAVE_LM_COMPARE_OWNER) || defined(lm_compare_owner)
5223 /* Somewhat ugly workaround for svc lockd.
5224 * lockd installs custom fl_lmops->lm_compare_owner that checks
5225 * for the fl_owner to be the same (which it always is on local node
5226 * I guess between lockd processes) and then compares pid.
5227 * As such we assign pid to the owner field to make it all work,
5228 * conflict with normal locks is unlikely since pid space and
5229 * pointer space for current->files are not intersecting */
5230 if (file_lock->fl_lmops && file_lock->fl_lmops->lm_compare_owner)
5231 flock.l_flock.owner = (unsigned long)file_lock->fl_pid;
5236 einfo.ei_mode = LCK_PR;
5239 /* An unlock request may or may not have any relation to
5240 * existing locks so we may not be able to pass a lock handle
5241 * via a normal ldlm_lock_cancel() request. The request may even
5242 * unlock a byte range in the middle of an existing lock. In
5243 * order to process an unlock request we need all of the same
5244 * information that is given with a normal read or write record
5245 * lock request. To avoid creating another ldlm unlock (cancel)
5246 * message we'll treat a LCK_NL flock request as an unlock. */
5247 einfo.ei_mode = LCK_NL;
5250 einfo.ei_mode = LCK_PW;
5254 CERROR("%s: fcntl from '%s' unknown lock type=%d: rc = %d\n",
5255 sbi->ll_fsname, current->comm, fl_type, rc);
5270 flags = LDLM_FL_BLOCK_NOWAIT;
5276 flags = LDLM_FL_TEST_LOCK;
5278 * To work with lockd we should check local lock first,
5279 * else lock_owner could disappear in conflict case.
5281 posix_test_lock(file, &flbuf);
5285 CERROR("%s: fcntl from '%s' unknown lock command=%d: rc = %d\n",
5286 sbi->ll_fsname, current->comm, cmd, rc);
5290 /* Save the old mode so that if the mode in the lock changes we
5291 * can decrement the appropriate reader or writer refcount. */
5292 file_lock->fl_type = einfo.ei_mode;
5294 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
5295 LUSTRE_OPC_ANY, NULL);
5296 if (IS_ERR(op_data))
5297 RETURN(PTR_ERR(op_data));
5299 CDEBUG(D_DLMTRACE, "inode="DFID", pid=%u, flags=%#llx, mode=%u, "
5300 "start=%llu, end=%llu\n", PFID(ll_inode2fid(inode)),
5301 flock.l_flock.pid, flags, einfo.ei_mode,
5302 flock.l_flock.start, flock.l_flock.end);
5304 rc = md_enqueue(sbi->ll_md_exp, &einfo, &flock, op_data, &lockh,
5307 /* Restore the file lock type if not TEST lock. */
5308 if (!(flags & LDLM_FL_TEST_LOCK))
5309 file_lock->fl_type = fl_type;
5311 #ifdef HAVE_LOCKS_LOCK_FILE_WAIT
5312 if ((rc == 0 || file_lock->fl_type == F_UNLCK) &&
5313 !(flags & LDLM_FL_TEST_LOCK))
5314 rc2 = locks_lock_file_wait(file, file_lock);
5316 if ((file_lock->fl_flags & FL_FLOCK) &&
5317 (rc == 0 || file_lock->fl_type == F_UNLCK))
5318 rc2 = flock_lock_file_wait(file, file_lock);
5319 if ((file_lock->fl_flags & FL_POSIX) &&
5320 (rc == 0 || file_lock->fl_type == F_UNLCK) &&
5321 !(flags & LDLM_FL_TEST_LOCK))
5322 rc2 = posix_lock_file_wait(file, file_lock);
5323 #endif /* HAVE_LOCKS_LOCK_FILE_WAIT */
5325 if (rc2 && file_lock->fl_type != F_UNLCK) {
5326 einfo.ei_mode = LCK_NL;
5327 md_enqueue(sbi->ll_md_exp, &einfo, &flock, op_data,
5332 ll_finish_md_op_data(op_data);
5334 if (rc == 0 && (flags & LDLM_FL_TEST_LOCK) &&
5335 flbuf.fl_type != file_lock->fl_type) { /* Verify local & remote */
5336 CERROR("Flock LR mismatch! inode="DFID", flags=%#llx, mode=%u, "
5337 "pid=%u/%u, start=%llu/%llu, end=%llu/%llu,type=%u/%u\n",
5338 PFID(ll_inode2fid(inode)), flags, einfo.ei_mode,
5339 file_lock->fl_pid, flbuf.fl_pid,
5340 file_lock->fl_start, flbuf.fl_start,
5341 file_lock->fl_end, flbuf.fl_end,
5342 file_lock->fl_type, flbuf.fl_type);
5348 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK,
5349 ktime_us_delta(ktime_get(), kstart));
5353 int ll_get_fid_by_name(struct inode *parent, const char *name,
5354 int namelen, struct lu_fid *fid,
5355 struct inode **inode)
5357 struct md_op_data *op_data = NULL;
5358 struct mdt_body *body;
5359 struct ptlrpc_request *req;
5363 op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen, 0,
5364 LUSTRE_OPC_ANY, NULL);
5365 if (IS_ERR(op_data))
5366 RETURN(PTR_ERR(op_data));
5368 op_data->op_valid = OBD_MD_FLID | OBD_MD_FLTYPE;
5369 rc = md_getattr_name(ll_i2sbi(parent)->ll_md_exp, op_data, &req);
5370 ll_finish_md_op_data(op_data);
5374 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
5376 GOTO(out_req, rc = -EFAULT);
5378 *fid = body->mbo_fid1;
5381 rc = ll_prep_inode(inode, &req->rq_pill, parent->i_sb, NULL);
5383 ptlrpc_req_finished(req);
5387 int ll_migrate(struct inode *parent, struct file *file, struct lmv_user_md *lum,
5388 const char *name, __u32 flags)
5390 struct dentry *dchild = NULL;
5391 struct inode *child_inode = NULL;
5392 struct md_op_data *op_data;
5393 struct ptlrpc_request *request = NULL;
5394 struct obd_client_handle *och = NULL;
5396 struct mdt_body *body;
5397 __u64 data_version = 0;
5398 size_t namelen = strlen(name);
5399 int lumlen = lmv_user_md_size(lum->lum_stripe_count, lum->lum_magic);
5400 bool locked = false;
5404 CDEBUG(D_VFSTRACE, "migrate "DFID"/%s to MDT%04x stripe count %d\n",
5405 PFID(ll_inode2fid(parent)), name,
5406 lum->lum_stripe_offset, lum->lum_stripe_count);
5408 if (lum->lum_magic != cpu_to_le32(LMV_USER_MAGIC) &&
5409 lum->lum_magic != cpu_to_le32(LMV_USER_MAGIC_SPECIFIC))
5410 lustre_swab_lmv_user_md(lum);
5412 /* Get child FID first */
5413 qstr.hash = ll_full_name_hash(file_dentry(file), name, namelen);
5416 dchild = d_lookup(file_dentry(file), &qstr);
5418 if (dchild->d_inode)
5419 child_inode = igrab(dchild->d_inode);
5424 rc = ll_get_fid_by_name(parent, name, namelen, NULL,
5433 if (!(exp_connect_flags2(ll_i2sbi(parent)->ll_md_exp) &
5434 OBD_CONNECT2_DIR_MIGRATE)) {
5435 if (le32_to_cpu(lum->lum_stripe_count) > 1 ||
5436 ll_dir_striped(child_inode)) {
5437 CERROR("%s: MDT doesn't support stripe directory "
5438 "migration!\n", ll_i2sbi(parent)->ll_fsname);
5439 GOTO(out_iput, rc = -EOPNOTSUPP);
5444 * lfs migrate command needs to be blocked on the client
5445 * by checking the migrate FID against the FID of the
5448 if (is_root_inode(child_inode))
5449 GOTO(out_iput, rc = -EINVAL);
5451 op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen,
5452 child_inode->i_mode, LUSTRE_OPC_ANY, NULL);
5453 if (IS_ERR(op_data))
5454 GOTO(out_iput, rc = PTR_ERR(op_data));
5456 op_data->op_fid3 = *ll_inode2fid(child_inode);
5457 if (!fid_is_sane(&op_data->op_fid3)) {
5458 CERROR("%s: migrate %s, but FID "DFID" is insane\n",
5459 ll_i2sbi(parent)->ll_fsname, name,
5460 PFID(&op_data->op_fid3));
5461 GOTO(out_data, rc = -EINVAL);
5464 op_data->op_cli_flags |= CLI_MIGRATE | CLI_SET_MEA;
5465 op_data->op_data = lum;
5466 op_data->op_data_size = lumlen;
5468 /* migrate dirent only for subdirs if MDS_MIGRATE_NSONLY set */
5469 if (S_ISDIR(child_inode->i_mode) && (flags & MDS_MIGRATE_NSONLY) &&
5470 lmv_dir_layout_changing(op_data->op_lso1))
5471 op_data->op_bias |= MDS_MIGRATE_NSONLY;
5474 if (S_ISREG(child_inode->i_mode)) {
5475 och = ll_lease_open(child_inode, NULL, FMODE_WRITE, 0);
5482 rc = ll_data_version(child_inode, &data_version,
5485 GOTO(out_close, rc);
5487 op_data->op_open_handle = och->och_open_handle;
5488 op_data->op_data_version = data_version;
5489 op_data->op_lease_handle = och->och_lease_handle;
5490 op_data->op_bias |= MDS_CLOSE_MIGRATE;
5492 spin_lock(&och->och_mod->mod_open_req->rq_lock);
5493 och->och_mod->mod_open_req->rq_replay = 0;
5494 spin_unlock(&och->och_mod->mod_open_req->rq_lock);
5496 LASSERT(locked == false);
5497 ll_inode_lock(child_inode);
5500 rc = md_rename(ll_i2sbi(parent)->ll_md_exp, op_data,
5501 op_data->op_name, op_data->op_namelen,
5502 op_data->op_name, op_data->op_namelen, &request);
5504 LASSERT(request != NULL);
5505 ll_update_times(request, parent);
5508 if (rc == 0 || rc == -EAGAIN) {
5509 body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
5510 LASSERT(body != NULL);
5512 /* If the server does release layout lock, then we cleanup
5513 * the client och here, otherwise release it in out_close: */
5514 if (och && body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED) {
5515 obd_mod_put(och->och_mod);
5516 md_clear_open_replay_data(ll_i2sbi(parent)->ll_md_exp,
5518 och->och_open_handle.cookie = DEAD_HANDLE_MAGIC;
5524 if (request != NULL) {
5525 ptlrpc_req_finished(request);
5529 /* Try again if the lease has cancelled. */
5530 if (rc == -EAGAIN && S_ISREG(child_inode->i_mode)) {
5531 LASSERT(locked == true);
5532 ll_inode_unlock(child_inode);
5539 ll_lease_close(och, child_inode, NULL);
5541 clear_nlink(child_inode);
5543 ll_finish_md_op_data(op_data);
5546 ll_inode_unlock(child_inode);
5552 ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock)
5554 struct ll_file_data *fd = file->private_data;
5558 * In order to avoid flood of warning messages, only print one message
5559 * for one file. And the entire message rate on the client is limited
5560 * by CDEBUG_LIMIT too.
5562 if (!(fd->fd_flags & LL_FILE_FLOCK_WARNING)) {
5563 fd->fd_flags |= LL_FILE_FLOCK_WARNING;
5564 CDEBUG_LIMIT(D_CONSOLE,
5565 "flock disabled, mount with '-o [local]flock' to enable\r\n");
5571 * test if some locks matching bits and l_req_mode are acquired
5572 * - bits can be in different locks
5573 * - if found clear the common lock bits in *bits
5574 * - the bits not found, are kept in *bits
5576 * \param bits [IN] searched lock bits [IN]
5577 * \param l_req_mode [IN] searched lock mode
5578 * \retval boolean, true iff all bits are found
5580 int ll_have_md_lock(struct obd_export *exp, struct inode *inode, __u64 *bits,
5581 enum ldlm_mode l_req_mode)
5583 struct lustre_handle lockh;
5584 union ldlm_policy_data policy;
5585 enum ldlm_mode mode = (l_req_mode == LCK_MINMODE) ?
5586 (LCK_CR | LCK_CW | LCK_PR | LCK_PW) : l_req_mode;
5595 fid = &ll_i2info(inode)->lli_fid;
5596 CDEBUG(D_INFO, "trying to match res "DFID" mode %s\n", PFID(fid),
5597 ldlm_lockname[mode]);
5599 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING | LDLM_FL_TEST_LOCK;
5600 for (i = 0; i < MDS_INODELOCK_NUMBITS && *bits != 0; i++) {
5601 policy.l_inodebits.bits = *bits & BIT(i);
5602 if (policy.l_inodebits.bits == 0)
5605 if (md_lock_match(exp, flags, fid, LDLM_IBITS, &policy, mode,
5607 struct ldlm_lock *lock;
5609 lock = ldlm_handle2lock(&lockh);
5612 ~(lock->l_policy_data.l_inodebits.bits);
5613 LDLM_LOCK_PUT(lock);
5615 *bits &= ~policy.l_inodebits.bits;
5622 enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
5623 struct lustre_handle *lockh, __u64 flags,
5624 enum ldlm_mode mode)
5626 union ldlm_policy_data policy = { .l_inodebits = { bits } };
5631 fid = &ll_i2info(inode)->lli_fid;
5632 CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
5634 rc = md_lock_match(ll_i2mdexp(inode), LDLM_FL_BLOCK_GRANTED|flags,
5635 fid, LDLM_IBITS, &policy, mode, lockh);
5640 static int ll_inode_revalidate_fini(struct inode *inode, int rc)
5642 /* Already unlinked. Just update nlink and return success */
5643 if (rc == -ENOENT) {
5645 /* If it is striped directory, and there is bad stripe
5646 * Let's revalidate the dentry again, instead of returning
5648 if (ll_dir_striped(inode))
5651 /* This path cannot be hit for regular files unless in
5652 * case of obscure races, so no need to to validate
5654 if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
5656 } else if (rc != 0) {
5657 CDEBUG_LIMIT((rc == -EACCES || rc == -EIDRM) ? D_INFO : D_ERROR,
5658 "%s: revalidate FID "DFID" error: rc = %d\n",
5659 ll_i2sbi(inode)->ll_fsname,
5660 PFID(ll_inode2fid(inode)), rc);
5666 static int ll_inode_revalidate(struct dentry *dentry, enum ldlm_intent_flags op)
5668 struct dentry *parent = NULL;
5670 struct inode *inode = dentry->d_inode;
5671 struct obd_export *exp = ll_i2mdexp(inode);
5672 struct lookup_intent oit = {
5675 struct ptlrpc_request *req = NULL;
5676 struct md_op_data *op_data;
5677 const char *name = NULL;
5682 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p),name=%s\n",
5683 PFID(ll_inode2fid(inode)), inode, dentry->d_name.name);
5685 if (exp_connect_flags2(exp) & OBD_CONNECT2_GETATTR_PFID) {
5686 parent = dget_parent(dentry);
5687 dir = d_inode(parent);
5688 name = dentry->d_name.name;
5689 namelen = dentry->d_name.len;
5694 op_data = ll_prep_md_op_data(NULL, dir, inode, name, namelen, 0,
5695 LUSTRE_OPC_ANY, NULL);
5698 if (IS_ERR(op_data))
5699 RETURN(PTR_ERR(op_data));
5701 /* Call getattr by fid */
5702 if (exp_connect_flags2(exp) & OBD_CONNECT2_GETATTR_PFID)
5703 op_data->op_flags = MF_GETATTR_BY_FID;
5704 rc = md_intent_lock(exp, op_data, &oit, &req, &ll_md_blocking_ast, 0);
5705 ll_finish_md_op_data(op_data);
5707 rc = ll_inode_revalidate_fini(inode, rc);
5711 rc = ll_revalidate_it_finish(req, &oit, dentry);
5713 ll_intent_release(&oit);
5717 /* Unlinked? Unhash dentry, so it is not picked up later by
5718 * do_lookup() -> ll_revalidate_it(). We cannot use d_drop
5719 * here to preserve get_cwd functionality on 2.6.
5721 if (!dentry->d_inode->i_nlink)
5722 d_lustre_invalidate(dentry);
5724 ll_lookup_finish_locks(&oit, dentry);
5726 ptlrpc_req_finished(req);
5731 static int ll_merge_md_attr(struct inode *inode)
5733 struct ll_inode_info *lli = ll_i2info(inode);
5734 struct lmv_stripe_object *lsm_obj;
5735 struct cl_attr attr = { 0 };
5738 if (!ll_dir_striped(inode))
5741 down_read(&lli->lli_lsm_sem);
5742 if (!ll_dir_striped_locked(inode)) {
5743 up_read(&lli->lli_lsm_sem);
5746 LASSERT(lli->lli_lsm_obj != NULL);
5748 lsm_obj = lmv_stripe_object_get(lli->lli_lsm_obj);
5749 up_read(&lli->lli_lsm_sem);
5751 rc = md_merge_attr(ll_i2mdexp(inode), lsm_obj,
5752 &attr, ll_md_blocking_ast);
5753 lmv_stripe_object_put(&lsm_obj);
5757 spin_lock(&inode->i_lock);
5758 set_nlink(inode, attr.cat_nlink);
5759 spin_unlock(&inode->i_lock);
5761 inode->i_blocks = attr.cat_blocks;
5762 i_size_write(inode, attr.cat_size);
5764 ll_i2info(inode)->lli_atime = attr.cat_atime;
5765 ll_i2info(inode)->lli_mtime = attr.cat_mtime;
5766 ll_i2info(inode)->lli_ctime = attr.cat_ctime;
5771 int ll_getattr_dentry(struct dentry *de, struct kstat *stat, u32 request_mask,
5772 unsigned int flags, bool foreign)
5774 struct inode *inode = de->d_inode;
5775 struct ll_sb_info *sbi = ll_i2sbi(inode);
5776 struct ll_inode_info *lli = ll_i2info(inode);
5777 struct dentry *parent;
5779 bool need_glimpse = true;
5780 ktime_t kstart = ktime_get();
5783 CDEBUG(D_VFSTRACE|D_IOTRACE,
5784 "START file %s:"DFID"(%p), request_mask %d, flags %u, foreign %d\n",
5785 de->d_name.name, PFID(ll_inode2fid(inode)), inode,
5786 request_mask, flags, foreign);
5788 /* The OST object(s) determine the file size, blocks and mtime. */
5789 if (!(request_mask & STATX_SIZE || request_mask & STATX_BLOCKS ||
5790 request_mask & STATX_MTIME))
5791 need_glimpse = false;
5793 parent = dget_parent(de);
5794 dir = d_inode(parent);
5795 ll_statahead_enter(dir, de);
5796 if (dentry_may_statahead(dir, de))
5797 ll_start_statahead(dir, de, need_glimpse &&
5798 !(flags & AT_STATX_DONT_SYNC));
5801 if (flags & AT_STATX_DONT_SYNC)
5802 GOTO(fill_attr, rc = 0);
5804 rc = ll_inode_revalidate(de, IT_GETATTR);
5808 /* foreign file/dir are always of zero length, so don't
5809 * need to validate size.
5811 if (S_ISREG(inode->i_mode) && !foreign) {
5815 GOTO(fill_attr, rc);
5817 rc = pcc_inode_getattr(inode, request_mask, flags, &cached);
5818 if (cached && rc < 0)
5822 GOTO(fill_attr, rc);
5825 * If the returned attr is masked with OBD_MD_FLSIZE &
5826 * OBD_MD_FLBLOCKS & OBD_MD_FLMTIME, it means that the file size
5827 * or blocks obtained from MDT is strictly correct, and the file
5828 * is usually not being modified by clients, and the [a|m|c]time
5829 * got from MDT is also strictly correct.
5830 * Under this circumstance, it does not need to send glimpse
5831 * RPCs to OSTs for file attributes such as the size and blocks.
5833 if (lli->lli_attr_valid & OBD_MD_FLSIZE &&
5834 lli->lli_attr_valid & OBD_MD_FLBLOCKS &&
5835 lli->lli_attr_valid & OBD_MD_FLMTIME) {
5836 inode_set_mtime(inode, lli->lli_mtime, 0);
5837 if (lli->lli_attr_valid & OBD_MD_FLATIME)
5838 inode_set_atime(inode, lli->lli_atime, 0);
5839 if (lli->lli_attr_valid & OBD_MD_FLCTIME)
5840 inode_set_ctime(inode, lli->lli_ctime, 0);
5841 GOTO(fill_attr, rc);
5844 /* In case of restore, the MDT has the right size and has
5845 * already send it back without granting the layout lock,
5846 * inode is up-to-date so glimpse is useless.
5847 * Also to glimpse we need the layout, in case of a running
5848 * restore the MDT holds the layout lock so the glimpse will
5849 * block up to the end of restore (getattr will block)
5851 if (!test_bit(LLIF_FILE_RESTORING, &lli->lli_flags)) {
5852 rc = ll_glimpse_size(inode);
5857 /* If object isn't regular a file then don't validate size. */
5858 /* foreign dir is not striped dir */
5860 rc = ll_merge_md_attr(inode);
5865 if (lli->lli_attr_valid & OBD_MD_FLATIME)
5866 inode_set_atime(inode, lli->lli_atime, 0);
5867 if (lli->lli_attr_valid & OBD_MD_FLMTIME)
5868 inode_set_mtime(inode, lli->lli_mtime, 0);
5869 if (lli->lli_attr_valid & OBD_MD_FLCTIME)
5870 inode_set_ctime(inode, lli->lli_ctime, 0);
5874 CFS_FAIL_TIMEOUT(OBD_FAIL_GETATTR_DELAY, 30);
5876 if (ll_need_32bit_api(sbi)) {
5877 stat->ino = cl_fid_build_ino(&lli->lli_fid, 1);
5878 stat->dev = ll_compat_encode_dev(inode->i_sb->s_dev);
5879 stat->rdev = ll_compat_encode_dev(inode->i_rdev);
5881 stat->ino = inode->i_ino;
5882 stat->dev = inode->i_sb->s_dev;
5883 stat->rdev = inode->i_rdev;
5886 /* foreign symlink to be exposed as a real symlink */
5888 stat->mode = inode->i_mode;
5890 stat->mode = (inode->i_mode & ~S_IFMT) | S_IFLNK;
5892 stat->uid = inode->i_uid;
5893 stat->gid = inode->i_gid;
5894 stat->atime = inode_get_atime(inode);
5895 stat->mtime = inode_get_mtime(inode);
5896 stat->ctime = inode_get_ctime(inode);
5897 /* stat->blksize is used to tell about preferred IO size */
5898 if (sbi->ll_stat_blksize)
5899 stat->blksize = sbi->ll_stat_blksize;
5900 else if (S_ISREG(inode->i_mode))
5901 stat->blksize = min(PTLRPC_MAX_BRW_SIZE,
5902 1U << LL_MAX_BLKSIZE_BITS);
5903 else if (S_ISDIR(inode->i_mode))
5904 stat->blksize = min(MD_MAX_BRW_SIZE,
5905 1U << LL_MAX_BLKSIZE_BITS);
5907 stat->blksize = 1 << inode->i_sb->s_blocksize_bits;
5909 stat->nlink = inode->i_nlink;
5910 stat->size = i_size_read(inode);
5911 stat->blocks = inode->i_blocks;
5913 #if defined(HAVE_USER_NAMESPACE_ARG) || defined(HAVE_INODEOPS_ENHANCED_GETATTR)
5914 if (flags & AT_STATX_DONT_SYNC) {
5915 if (stat->size == 0 &&
5916 lli->lli_attr_valid & OBD_MD_FLLAZYSIZE)
5917 stat->size = lli->lli_lazysize;
5918 if (stat->blocks == 0 &&
5919 lli->lli_attr_valid & OBD_MD_FLLAZYBLOCKS)
5920 stat->blocks = lli->lli_lazyblocks;
5923 if (lli->lli_attr_valid & OBD_MD_FLBTIME) {
5924 stat->result_mask |= STATX_BTIME;
5925 stat->btime.tv_sec = lli->lli_btime;
5928 stat->attributes_mask = STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND;
5929 #ifdef HAVE_LUSTRE_CRYPTO
5930 stat->attributes_mask |= STATX_ATTR_ENCRYPTED;
5932 stat->attributes |= ll_inode_to_ext_flags(inode->i_flags);
5933 /* if Lustre specific LUSTRE_ENCRYPT_FL flag is set, also set
5934 * ext4 equivalent to please statx
5936 if (stat->attributes & LUSTRE_ENCRYPT_FL)
5937 stat->attributes |= STATX_ATTR_ENCRYPTED;
5938 stat->result_mask &= request_mask;
5941 ll_stats_ops_tally(sbi, LPROC_LL_GETATTR,
5942 ktime_us_delta(ktime_get(), kstart));
5945 "COMPLETED file %s:"DFID"(%p), request_mask %d, flags %u, foreign %d\n",
5946 de->d_name.name, PFID(ll_inode2fid(inode)), inode,
5947 request_mask, flags, foreign);
5952 #if defined(HAVE_USER_NAMESPACE_ARG) || defined(HAVE_INODEOPS_ENHANCED_GETATTR)
5953 int ll_getattr(struct mnt_idmap *map, const struct path *path,
5954 struct kstat *stat, u32 request_mask, unsigned int flags)
5956 return ll_getattr_dentry(path->dentry, stat, request_mask, flags,
5960 int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
5962 return ll_getattr_dentry(de, stat, STATX_BASIC_STATS,
5963 AT_STATX_SYNC_AS_STAT, false);
5967 static int cl_falloc(struct file *file, struct inode *inode, int mode,
5968 loff_t offset, loff_t len)
5970 loff_t size = i_size_read(inode);
5978 env = cl_env_get(&refcheck);
5980 RETURN(PTR_ERR(env));
5982 io = vvp_env_thread_io(env);
5983 io->ci_obj = ll_i2info(inode)->lli_clob;
5984 ll_io_set_mirror(io, file);
5986 io->ci_verify_layout = 1;
5987 io->u.ci_setattr.sa_parent_fid = lu_object_fid(&io->ci_obj->co_lu);
5988 io->u.ci_setattr.sa_falloc_mode = mode;
5989 io->u.ci_setattr.sa_falloc_offset = offset;
5990 io->u.ci_setattr.sa_falloc_end = offset + len;
5991 io->u.ci_setattr.sa_subtype = CL_SETATTR_FALLOCATE;
5993 CDEBUG(D_INODE, "UID %u GID %u PRJID %u\n",
5994 from_kuid(&init_user_ns, inode->i_uid),
5995 from_kgid(&init_user_ns, inode->i_gid),
5996 ll_i2info(inode)->lli_projid);
5998 io->u.ci_setattr.sa_falloc_uid = from_kuid(&init_user_ns, inode->i_uid);
5999 io->u.ci_setattr.sa_falloc_gid = from_kgid(&init_user_ns, inode->i_gid);
6000 io->u.ci_setattr.sa_falloc_projid = ll_i2info(inode)->lli_projid;
6002 if (io->u.ci_setattr.sa_falloc_end > size) {
6003 loff_t newsize = io->u.ci_setattr.sa_falloc_end;
6005 /* Check new size against VFS/VM file size limit and rlimit */
6006 rc = inode_newsize_ok(inode, newsize);
6009 if (newsize > ll_file_maxbytes(inode)) {
6010 CDEBUG(D_INODE, "file size too large %llu > %llu\n",
6011 (unsigned long long)newsize,
6012 ll_file_maxbytes(inode));
6019 rc = cl_io_init(env, io, CIT_SETATTR, io->ci_obj);
6021 rc = cl_io_loop(env, io);
6024 cl_io_fini(env, io);
6025 } while (unlikely(io->ci_need_restart));
6028 cl_env_put(env, &refcheck);
6032 static long ll_fallocate(struct file *filp, int mode, loff_t offset, loff_t len)
6034 struct inode *inode = file_inode(filp);
6037 if (offset < 0 || len <= 0)
6040 * Encrypted inodes can't handle collapse range or zero range or insert
6041 * range since we would need to re-encrypt blocks with a different IV or
6042 * XTS tweak (which are based on the logical block number).
6043 * Similar to what ext4 does.
6045 if (IS_ENCRYPTED(inode) &&
6046 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE |
6047 FALLOC_FL_ZERO_RANGE)))
6048 RETURN(-EOPNOTSUPP);
6051 * mode == 0 (which is standard prealloc) and PUNCH is supported
6052 * Rest of mode options are not supported yet.
6054 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
6055 RETURN(-EOPNOTSUPP);
6057 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FALLOCATE, 1);
6059 rc = cl_falloc(filp, inode, mode, offset, len);
6061 * ENOTSUPP (524) is an NFSv3 specific error code erroneously
6062 * used by Lustre in several places. Retuning it here would
6063 * confuse applications that explicity test for EOPNOTSUPP
6064 * (95) and fall back to ftruncate().
6066 if (rc == -ENOTSUPP)
6072 static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
6073 __u64 start, __u64 len)
6077 struct fiemap *fiemap;
6078 unsigned int extent_count = fieinfo->fi_extents_max;
6080 num_bytes = sizeof(*fiemap) + (extent_count *
6081 sizeof(struct fiemap_extent));
6082 OBD_ALLOC_LARGE(fiemap, num_bytes);
6087 fiemap->fm_flags = fieinfo->fi_flags;
6088 fiemap->fm_extent_count = fieinfo->fi_extents_max;
6089 fiemap->fm_start = start;
6090 fiemap->fm_length = len;
6091 if (extent_count > 0 &&
6092 copy_from_user(&fiemap->fm_extents[0], fieinfo->fi_extents_start,
6093 sizeof(struct fiemap_extent)) != 0)
6094 GOTO(out, rc = -EFAULT);
6096 rc = ll_do_fiemap(inode, fiemap, num_bytes);
6098 if (IS_ENCRYPTED(inode) && extent_count > 0) {
6101 for (i = 0; i < fiemap->fm_mapped_extents; i++)
6102 fiemap->fm_extents[i].fe_flags |=
6103 FIEMAP_EXTENT_DATA_ENCRYPTED |
6104 FIEMAP_EXTENT_ENCODED;
6107 fieinfo->fi_flags = fiemap->fm_flags;
6108 fieinfo->fi_extents_mapped = fiemap->fm_mapped_extents;
6109 if (extent_count > 0 &&
6110 copy_to_user(fieinfo->fi_extents_start, &fiemap->fm_extents[0],
6111 fiemap->fm_mapped_extents *
6112 sizeof(struct fiemap_extent)) != 0)
6113 GOTO(out, rc = -EFAULT);
6115 OBD_FREE_LARGE(fiemap, num_bytes);
6119 int ll_inode_permission(struct mnt_idmap *idmap, struct inode *inode, int mask)
6122 struct ll_sb_info *sbi;
6123 struct root_squash_info *squash;
6124 struct cred *cred = NULL;
6125 const struct cred *old_cred = NULL;
6126 bool squash_id = false;
6127 ktime_t kstart = ktime_get();
6131 if (mask & MAY_NOT_BLOCK)
6135 * as root inode are NOT getting validated in lookup operation,
6136 * need to revalidate PERM before permission check.
6138 if (is_root_inode(inode)) {
6139 rc = ll_inode_revalidate(inode->i_sb->s_root, IT_GETATTR);
6144 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), inode mode %x mask %o\n",
6145 PFID(ll_inode2fid(inode)), inode, inode->i_mode, mask);
6147 /* squash fsuid/fsgid if needed */
6148 sbi = ll_i2sbi(inode);
6149 squash = &sbi->ll_squash;
6150 if (unlikely(squash->rsi_uid != 0 &&
6151 uid_eq(current_fsuid(), GLOBAL_ROOT_UID) &&
6152 !test_bit(LL_SBI_NOROOTSQUASH, sbi->ll_flags))) {
6156 CDEBUG(D_OTHER, "squash creds (%d:%d)=>(%d:%d)\n",
6157 __kuid_val(current_fsuid()), __kgid_val(current_fsgid()),
6158 squash->rsi_uid, squash->rsi_gid);
6160 /* update current process's credentials
6161 * and FS capability */
6162 cred = prepare_creds();
6166 cred->fsuid = make_kuid(&init_user_ns, squash->rsi_uid);
6167 cred->fsgid = make_kgid(&init_user_ns, squash->rsi_gid);
6168 cred->cap_effective = cap_drop_nfsd_set(cred->cap_effective);
6169 cred->cap_effective = cap_drop_fs_set(cred->cap_effective);
6171 old_cred = override_creds(cred);
6174 rc = generic_permission(idmap, inode, mask);
6175 /* restore current process's credentials and FS capability */
6177 revert_creds(old_cred);
6182 ll_stats_ops_tally(sbi, LPROC_LL_INODE_PERM,
6183 ktime_us_delta(ktime_get(), kstart));
6188 #if defined(HAVE_FILEMAP_SPLICE_READ)
6189 # define ll_splice_read filemap_splice_read
6190 #elif !defined(HAVE_DEFAULT_FILE_SPLICE_READ_EXPORT)
6191 # define ll_splice_read generic_file_splice_read
6193 # define ll_splice_read pcc_file_splice_read
6196 /* -o localflock - only provides locally consistent flock locks */
6197 static const struct file_operations ll_file_operations = {
6198 #ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
6199 # ifdef HAVE_SYNC_READ_WRITE
6200 .read = new_sync_read,
6201 .write = new_sync_write,
6203 .read_iter = ll_file_read_iter,
6204 .write_iter = ll_file_write_iter,
6205 #else /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
6206 .read = ll_file_read,
6207 .aio_read = ll_file_aio_read,
6208 .write = ll_file_write,
6209 .aio_write = ll_file_aio_write,
6210 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
6211 .unlocked_ioctl = ll_file_ioctl,
6212 .open = ll_file_open,
6213 .release = ll_file_release,
6214 .mmap = ll_file_mmap,
6215 .llseek = ll_file_seek,
6216 .splice_read = ll_splice_read,
6217 #ifdef HAVE_ITER_FILE_SPLICE_WRITE
6218 .splice_write = iter_file_splice_write,
6222 .fallocate = ll_fallocate,
6225 static const struct file_operations ll_file_operations_flock = {
6226 #ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
6227 # ifdef HAVE_SYNC_READ_WRITE
6228 .read = new_sync_read,
6229 .write = new_sync_write,
6230 # endif /* HAVE_SYNC_READ_WRITE */
6231 .read_iter = ll_file_read_iter,
6232 .write_iter = ll_file_write_iter,
6233 #else /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
6234 .read = ll_file_read,
6235 .aio_read = ll_file_aio_read,
6236 .write = ll_file_write,
6237 .aio_write = ll_file_aio_write,
6238 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
6239 .unlocked_ioctl = ll_file_ioctl,
6240 .open = ll_file_open,
6241 .release = ll_file_release,
6242 .mmap = ll_file_mmap,
6243 .llseek = ll_file_seek,
6244 .splice_read = ll_splice_read,
6245 #ifdef HAVE_ITER_FILE_SPLICE_WRITE
6246 .splice_write = iter_file_splice_write,
6250 .flock = ll_file_flock,
6251 .lock = ll_file_flock,
6252 .fallocate = ll_fallocate,
6255 /* These are for -o noflock - to return ENOSYS on flock calls */
6256 static const struct file_operations ll_file_operations_noflock = {
6257 #ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
6258 # ifdef HAVE_SYNC_READ_WRITE
6259 .read = new_sync_read,
6260 .write = new_sync_write,
6261 # endif /* HAVE_SYNC_READ_WRITE */
6262 .read_iter = ll_file_read_iter,
6263 .write_iter = ll_file_write_iter,
6264 #else /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
6265 .read = ll_file_read,
6266 .aio_read = ll_file_aio_read,
6267 .write = ll_file_write,
6268 .aio_write = ll_file_aio_write,
6269 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
6270 .unlocked_ioctl = ll_file_ioctl,
6271 .open = ll_file_open,
6272 .release = ll_file_release,
6273 .mmap = ll_file_mmap,
6274 .llseek = ll_file_seek,
6275 .splice_read = ll_splice_read,
6276 #ifdef HAVE_ITER_FILE_SPLICE_WRITE
6277 .splice_write = iter_file_splice_write,
6281 .flock = ll_file_noflock,
6282 .lock = ll_file_noflock,
6283 .fallocate = ll_fallocate,
6286 const struct inode_operations ll_file_inode_operations = {
6287 .setattr = ll_setattr,
6288 .getattr = ll_getattr,
6289 .permission = ll_inode_permission,
6290 #ifdef HAVE_IOP_XATTR
6291 .setxattr = ll_setxattr,
6292 .getxattr = ll_getxattr,
6293 .removexattr = ll_removexattr,
6295 .listxattr = ll_listxattr,
6296 .fiemap = ll_fiemap,
6297 .get_acl = ll_get_acl,
6298 #ifdef HAVE_IOP_SET_ACL
6299 .set_acl = ll_set_acl,
6301 #ifdef HAVE_FILEATTR_GET
6302 .fileattr_get = ll_fileattr_get,
6303 .fileattr_set = ll_fileattr_set,
6307 const struct file_operations *ll_select_file_operations(struct ll_sb_info *sbi)
6309 const struct file_operations *fops = &ll_file_operations_noflock;
6311 if (test_bit(LL_SBI_FLOCK, sbi->ll_flags))
6312 fops = &ll_file_operations_flock;
6313 else if (test_bit(LL_SBI_LOCALFLOCK, sbi->ll_flags))
6314 fops = &ll_file_operations;
6319 int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
6321 struct ll_inode_info *lli = ll_i2info(inode);
6322 struct cl_object *obj = lli->lli_clob;
6331 env = cl_env_get(&refcheck);
6333 RETURN(PTR_ERR(env));
6335 rc = cl_conf_set(env, lli->lli_clob, conf);
6339 if (conf->coc_opc == OBJECT_CONF_SET) {
6340 struct ldlm_lock *lock = conf->coc_lock;
6341 struct cl_layout cl = {
6345 LASSERT(lock != NULL);
6346 LASSERT(ldlm_has_layout(lock));
6348 /* it can only be allowed to match after layout is
6349 * applied to inode otherwise false layout would be
6350 * seen. Applying layout shoud happen before dropping
6351 * the intent lock. */
6352 ldlm_lock_allow_match(lock);
6354 rc = cl_object_layout_get(env, obj, &cl);
6359 DFID": layout version change: %u -> %u\n",
6360 PFID(&lli->lli_fid), ll_layout_version_get(lli),
6362 ll_layout_version_set(lli, cl.cl_layout_gen);
6366 cl_env_put(env, &refcheck);
6368 RETURN(rc < 0 ? rc : 0);
6371 /* Fetch layout from MDT with getxattr request, if it's not ready yet */
6372 static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
6375 struct ll_sb_info *sbi = ll_i2sbi(inode);
6376 struct ptlrpc_request *req;
6383 CDEBUG(D_INODE, DFID" LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n",
6384 PFID(ll_inode2fid(inode)), ldlm_is_lvb_ready(lock),
6385 lock->l_lvb_data, lock->l_lvb_len);
6387 if (lock->l_lvb_data != NULL)
6390 /* if layout lock was granted right away, the layout is returned
6391 * within DLM_LVB of dlm reply; otherwise if the lock was ever
6392 * blocked and then granted via completion ast, we have to fetch
6393 * layout here. Please note that we can't use the LVB buffer in
6394 * completion AST because it doesn't have a large enough buffer */
6395 rc = ll_get_default_mdsize(sbi, &lmmsize);
6399 rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), OBD_MD_FLXATTR,
6400 XATTR_NAME_LOV, lmmsize, &req);
6403 GOTO(out, rc = 0); /* empty layout */
6410 if (lmmsize == 0) /* empty layout */
6413 lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, lmmsize);
6415 GOTO(out, rc = -EFAULT);
6417 OBD_ALLOC_LARGE(lvbdata, lmmsize);
6418 if (lvbdata == NULL)
6419 GOTO(out, rc = -ENOMEM);
6421 memcpy(lvbdata, lmm, lmmsize);
6422 lock_res_and_lock(lock);
6423 if (unlikely(lock->l_lvb_data == NULL)) {
6424 lock->l_lvb_type = LVB_T_LAYOUT;
6425 lock->l_lvb_data = lvbdata;
6426 lock->l_lvb_len = lmmsize;
6429 unlock_res_and_lock(lock);
6432 OBD_FREE_LARGE(lvbdata, lmmsize);
6437 ptlrpc_req_finished(req);
6442 * Apply the layout to the inode. Layout lock is held and will be released
6445 static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
6446 struct inode *inode, bool try)
6448 struct ll_inode_info *lli = ll_i2info(inode);
6449 struct ll_sb_info *sbi = ll_i2sbi(inode);
6450 struct ldlm_lock *lock;
6451 struct cl_object_conf conf;
6454 bool wait_layout = false;
6457 LASSERT(lustre_handle_is_used(lockh));
6459 lock = ldlm_handle2lock(lockh);
6460 LASSERT(lock != NULL);
6462 if (!ldlm_has_layout(lock))
6463 GOTO(out, rc = -EAGAIN);
6465 LDLM_DEBUG(lock, "file "DFID"(%p) being reconfigured",
6466 PFID(&lli->lli_fid), inode);
6468 /* in case this is a caching lock and reinstate with new inode */
6469 md_set_lock_data(sbi->ll_md_exp, lockh, inode, NULL);
6471 lock_res_and_lock(lock);
6472 lvb_ready = ldlm_is_lvb_ready(lock);
6473 unlock_res_and_lock(lock);
6475 /* checking lvb_ready is racy but this is okay. The worst case is
6476 * that multi processes may configure the file on the same time. */
6480 rc = ll_layout_fetch(inode, lock);
6484 /* for layout lock, lmm is stored in lock's lvb.
6485 * lvb_data is immutable if the lock is held so it's safe to access it
6488 * set layout to file. Unlikely this will fail as old layout was
6489 * surely eliminated */
6490 memset(&conf, 0, sizeof conf);
6491 conf.coc_opc = OBJECT_CONF_SET;
6492 conf.coc_inode = inode;
6493 conf.coc_lock = lock;
6495 conf.u.coc_layout.lb_buf = lock->l_lvb_data;
6496 conf.u.coc_layout.lb_len = lock->l_lvb_len;
6497 rc = ll_layout_conf(inode, &conf);
6499 /* refresh layout failed, need to wait */
6500 wait_layout = rc == -EBUSY;
6503 LDLM_LOCK_PUT(lock);
6504 ldlm_lock_decref(lockh, mode);
6506 /* wait for IO to complete if it's still being used. */
6508 CDEBUG(D_INODE, "%s: "DFID"(%p) wait for layout reconf\n",
6509 sbi->ll_fsname, PFID(&lli->lli_fid), inode);
6511 memset(&conf, 0, sizeof conf);
6512 conf.coc_opc = OBJECT_CONF_WAIT;
6513 conf.coc_inode = inode;
6514 rc = ll_layout_conf(inode, &conf);
6518 CDEBUG(D_INODE, "%s file="DFID" waiting layout return: %d\n",
6519 sbi->ll_fsname, PFID(&lli->lli_fid), rc);
6522 if (rc == -ERESTARTSYS) {
6525 struct cl_object * obj = lli->lli_clob;
6527 env = cl_env_get(&refcheck);
6529 RETURN(PTR_ERR(env));
6531 CDEBUG(D_INODE, "prune without lock "DFID"\n",
6532 PFID(lu_object_fid(&obj->co_lu)));
6534 trunc_sem_down_write(&lli->lli_trunc_sem);
6535 cl_object_prune(env, obj);
6536 trunc_sem_up_write(&lli->lli_trunc_sem);
6537 cl_env_put(env, &refcheck);
6546 * Issue layout intent RPC to MDS.
6547 * \param inode [in] file inode
6548 * \param intent [in] layout intent
6550 * \retval 0 on success
6551 * \retval < 0 error code
6553 static int ll_layout_intent(struct inode *inode, struct layout_intent *intent)
6555 struct ll_inode_info *lli = ll_i2info(inode);
6556 struct ll_sb_info *sbi = ll_i2sbi(inode);
6557 struct md_op_data *op_data;
6558 struct lookup_intent it;
6559 struct ptlrpc_request *req;
6563 op_data = ll_prep_md_op_data(NULL, inode, inode, NULL,
6564 0, 0, LUSTRE_OPC_ANY, NULL);
6565 if (IS_ERR(op_data))
6566 RETURN(PTR_ERR(op_data));
6568 op_data->op_data = intent;
6569 op_data->op_data_size = sizeof(*intent);
6571 memset(&it, 0, sizeof(it));
6572 it.it_op = IT_LAYOUT;
6573 if (intent->lai_opc == LAYOUT_INTENT_WRITE ||
6574 intent->lai_opc == LAYOUT_INTENT_TRUNC)
6575 it.it_open_flags = FMODE_WRITE;
6577 LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file "DFID"(%p)",
6578 sbi->ll_fsname, PFID(&lli->lli_fid), inode);
6580 rc = md_intent_lock(sbi->ll_md_exp, op_data, &it, &req,
6581 &ll_md_blocking_ast, 0);
6582 if (it.it_request != NULL)
6583 ptlrpc_req_finished(it.it_request);
6584 it.it_request = NULL;
6586 ll_finish_md_op_data(op_data);
6588 /* set lock data in case this is a new lock */
6590 ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
6592 ll_intent_drop_lock(&it);
6598 * This function checks if there exists a LAYOUT lock on the client side,
6599 * or enqueues it if it doesn't have one in cache.
6601 * This function will not hold layout lock so it may be revoked any time after
6602 * this function returns. Any operations depend on layout should be redone
6605 * This function should be called before lov_io_init() to get an uptodate
6606 * layout version, the caller should save the version number and after IO
6607 * is finished, this function should be called again to verify that layout
6608 * is not changed during IO time.
6610 int ll_layout_refresh(struct inode *inode, __u32 *gen)
6612 struct ll_inode_info *lli = ll_i2info(inode);
6613 struct ll_sb_info *sbi = ll_i2sbi(inode);
6614 struct lustre_handle lockh;
6615 struct layout_intent intent = {
6616 .lai_opc = LAYOUT_INTENT_ACCESS,
6618 enum ldlm_mode mode;
6623 *gen = ll_layout_version_get(lli);
6624 if (!test_bit(LL_SBI_LAYOUT_LOCK, sbi->ll_flags) ||
6625 *gen != CL_LAYOUT_GEN_NONE)
6629 LASSERT(fid_is_sane(ll_inode2fid(inode)));
6630 LASSERT(S_ISREG(inode->i_mode));
6633 /* mostly layout lock is caching on the local side, so try to
6634 * match it before grabbing layout lock mutex. */
6635 mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
6636 LCK_CR | LCK_CW | LCK_PR |
6638 if (mode != 0) { /* hit cached lock */
6639 rc = ll_layout_lock_set(&lockh, mode, inode, try);
6646 /* take layout lock mutex to enqueue layout lock exclusively. */
6647 mutex_lock(&lli->lli_layout_mutex);
6648 rc = ll_layout_intent(inode, &intent);
6649 mutex_unlock(&lli->lli_layout_mutex);
6655 *gen = ll_layout_version_get(lli);
6661 * Issue layout intent RPC indicating where in a file an IO is about to write.
6663 * \param[in] inode file inode.
6664 * \param[in] ext write range with start offset of fille in bytes where
6665 * an IO is about to write, and exclusive end offset in
6668 * \retval 0 on success
6669 * \retval < 0 error code
6671 int ll_layout_write_intent(struct inode *inode, enum layout_intent_opc opc,
6672 struct lu_extent *ext)
6674 struct layout_intent intent = {
6676 .lai_extent.e_start = ext->e_start,
6677 .lai_extent.e_end = ext->e_end,
6682 rc = ll_layout_intent(inode, &intent);
6688 * This function send a restore request to the MDT
6690 int ll_layout_restore(struct inode *inode, loff_t offset, __u64 length)
6692 struct ll_inode_info *lli = ll_i2info(inode);
6693 struct hsm_user_request *hur;
6698 len = sizeof(struct hsm_user_request) +
6699 sizeof(struct hsm_user_item);
6700 OBD_ALLOC(hur, len);
6704 hur->hur_request.hr_action = HUA_RESTORE;
6705 hur->hur_request.hr_archive_id = 0;
6706 hur->hur_request.hr_flags = 0;
6707 memcpy(&hur->hur_user_item[0].hui_fid, &ll_i2info(inode)->lli_fid,
6708 sizeof(hur->hur_user_item[0].hui_fid));
6709 hur->hur_user_item[0].hui_extent.offset = offset;
6710 hur->hur_user_item[0].hui_extent.length = length;
6711 hur->hur_request.hr_itemcount = 1;
6712 rc = mutex_lock_interruptible(&lli->lli_layout_mutex);
6715 rc = obd_iocontrol(LL_IOC_HSM_REQUEST, ll_i2sbi(inode)->ll_md_exp,
6717 mutex_unlock(&lli->lli_layout_mutex);