4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
33 * Author: Peter Braam <braam@clusterfs.com>
34 * Author: Phil Schwan <phil@clusterfs.com>
35 * Author: Andreas Dilger <adilger@clusterfs.com>
38 #define DEBUG_SUBSYSTEM S_LLITE
39 #include <lustre_dlm.h>
40 #include <linux/pagemap.h>
41 #include <linux/file.h>
42 #include <linux/sched.h>
43 #include <linux/user_namespace.h>
44 #include <linux/uidgid.h>
45 #include <linux/falloc.h>
46 #include <linux/ktime.h>
47 #ifdef HAVE_LINUX_FILELOCK_HEADER
48 #include <linux/filelock.h>
51 #include <uapi/linux/lustre/lustre_ioctl.h>
52 #include <lustre_swab.h>
54 #include "cl_object.h"
55 #include "llite_internal.h"
56 #include "vvp_internal.h"
59 struct inode *sp_inode;
64 __u64 pa_data_version;
70 ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg);
72 static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
75 static struct ll_file_data *ll_file_data_get(void)
77 struct ll_file_data *fd;
79 OBD_SLAB_ALLOC_PTR_GFP(fd, ll_file_data_slab, GFP_NOFS);
83 fd->fd_write_failed = false;
84 pcc_file_init(&fd->fd_pcc_file);
89 static void ll_file_data_put(struct ll_file_data *fd)
92 OBD_SLAB_FREE_PTR(fd, ll_file_data_slab);
96 * Packs all the attributes into @op_data for the CLOSE rpc.
98 static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
99 struct obd_client_handle *och)
103 ll_prep_md_op_data(op_data, inode, NULL, NULL,
104 0, 0, LUSTRE_OPC_ANY, NULL);
106 op_data->op_attr.ia_mode = inode->i_mode;
107 op_data->op_attr.ia_atime = inode->i_atime;
108 op_data->op_attr.ia_mtime = inode->i_mtime;
109 op_data->op_attr.ia_ctime = inode->i_ctime;
110 /* In case of encrypted file without the key, visible size was rounded
111 * up to next LUSTRE_ENCRYPTION_UNIT_SIZE, and clear text size was
112 * stored into lli_lazysize in ll_merge_attr(), so set proper file size
113 * now that we are closing.
115 if (llcrypt_require_key(inode) == -ENOKEY &&
116 ll_i2info(inode)->lli_attr_valid & OBD_MD_FLLAZYSIZE)
117 op_data->op_attr.ia_size = ll_i2info(inode)->lli_lazysize;
119 op_data->op_attr.ia_size = i_size_read(inode);
120 op_data->op_attr.ia_valid |= (ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
121 ATTR_MTIME | ATTR_MTIME_SET |
123 op_data->op_xvalid |= OP_XVALID_CTIME_SET;
124 op_data->op_attr_blocks = inode->i_blocks;
125 op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags);
126 if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags))
127 op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL;
128 op_data->op_open_handle = och->och_open_handle;
130 if (och->och_flags & FMODE_WRITE &&
131 test_and_clear_bit(LLIF_DATA_MODIFIED,
132 &ll_i2info(inode)->lli_flags))
133 /* For HSM: if inode data has been modified, pack it so that
134 * MDT can set data dirty flag in the archive. */
135 op_data->op_bias |= MDS_DATA_MODIFIED;
141 * Perform a close, possibly with a bias.
142 * The meaning of "data" depends on the value of "bias".
144 * If \a bias is MDS_HSM_RELEASE then \a data is a pointer to the data version.
145 * If \a bias is MDS_CLOSE_LAYOUT_SWAP then \a data is a pointer to the inode to
148 static int ll_close_inode_openhandle(struct inode *inode,
149 struct obd_client_handle *och,
150 enum mds_op_bias bias, void *data)
152 struct obd_export *md_exp = ll_i2mdexp(inode);
153 const struct ll_inode_info *lli = ll_i2info(inode);
154 struct md_op_data *op_data;
155 struct ptlrpc_request *req = NULL;
159 if (class_exp2obd(md_exp) == NULL) {
160 CERROR("%s: invalid MDC connection handle closing "DFID"\n",
161 ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
165 OBD_ALLOC_PTR(op_data);
166 /* We leak openhandle and request here on error, but not much to be
167 * done in OOM case since app won't retry close on error either. */
169 GOTO(out, rc = -ENOMEM);
171 ll_prepare_close(inode, op_data, och);
173 case MDS_CLOSE_LAYOUT_MERGE:
174 /* merge blocks from the victim inode */
175 op_data->op_attr_blocks += ((struct inode *)data)->i_blocks;
176 op_data->op_attr.ia_valid |= ATTR_SIZE;
177 op_data->op_xvalid |= OP_XVALID_BLOCKS;
179 case MDS_CLOSE_LAYOUT_SPLIT:
180 case MDS_CLOSE_LAYOUT_SWAP: {
181 struct split_param *sp = data;
183 LASSERT(data != NULL);
184 op_data->op_bias |= bias;
185 op_data->op_data_version = 0;
186 op_data->op_lease_handle = och->och_lease_handle;
187 if (bias == MDS_CLOSE_LAYOUT_SPLIT) {
188 op_data->op_fid2 = *ll_inode2fid(sp->sp_inode);
189 op_data->op_mirror_id = sp->sp_mirror_id;
191 op_data->op_fid2 = *ll_inode2fid(data);
196 case MDS_CLOSE_RESYNC_DONE: {
197 struct ll_ioc_lease *ioc = data;
199 LASSERT(data != NULL);
200 op_data->op_attr_blocks +=
201 ioc->lil_count * op_data->op_attr_blocks;
202 op_data->op_attr.ia_valid |= ATTR_SIZE;
203 op_data->op_xvalid |= OP_XVALID_BLOCKS;
204 op_data->op_bias |= MDS_CLOSE_RESYNC_DONE;
206 op_data->op_lease_handle = och->och_lease_handle;
207 op_data->op_data = &ioc->lil_ids[0];
208 op_data->op_data_size =
209 ioc->lil_count * sizeof(ioc->lil_ids[0]);
213 case MDS_PCC_ATTACH: {
214 struct pcc_param *param = data;
216 LASSERT(data != NULL);
217 op_data->op_bias |= MDS_HSM_RELEASE | MDS_PCC_ATTACH;
218 op_data->op_archive_id = param->pa_archive_id;
219 op_data->op_data_version = param->pa_data_version;
220 op_data->op_lease_handle = och->och_lease_handle;
224 case MDS_HSM_RELEASE:
225 LASSERT(data != NULL);
226 op_data->op_bias |= MDS_HSM_RELEASE;
227 op_data->op_data_version = *(__u64 *)data;
228 op_data->op_lease_handle = och->och_lease_handle;
229 op_data->op_attr.ia_valid |= ATTR_SIZE;
230 op_data->op_xvalid |= OP_XVALID_BLOCKS;
234 LASSERT(data == NULL);
238 if (!(op_data->op_attr.ia_valid & ATTR_SIZE))
239 op_data->op_xvalid |= OP_XVALID_LAZYSIZE;
240 if (!(op_data->op_xvalid & OP_XVALID_BLOCKS))
241 op_data->op_xvalid |= OP_XVALID_LAZYBLOCKS;
243 rc = md_close(md_exp, op_data, och->och_mod, &req);
244 if (rc != 0 && rc != -EINTR)
245 CERROR("%s: inode "DFID" mdc close failed: rc = %d\n",
246 md_exp->exp_obd->obd_name, PFID(&lli->lli_fid), rc);
248 if (rc == 0 && op_data->op_bias & bias) {
249 struct mdt_body *body;
251 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
252 if (!(body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED))
255 if (bias & MDS_PCC_ATTACH) {
256 struct pcc_param *param = data;
258 param->pa_layout_gen = body->mbo_layout_gen;
262 ll_finish_md_op_data(op_data);
266 md_clear_open_replay_data(md_exp, och);
267 och->och_open_handle.cookie = DEAD_HANDLE_MAGIC;
270 ptlrpc_req_finished(req); /* This is close request */
274 int ll_md_real_close(struct inode *inode, fmode_t fmode)
276 struct ll_inode_info *lli = ll_i2info(inode);
277 struct obd_client_handle **och_p;
278 struct obd_client_handle *och;
283 if (fmode & FMODE_WRITE) {
284 och_p = &lli->lli_mds_write_och;
285 och_usecount = &lli->lli_open_fd_write_count;
286 } else if (fmode & FMODE_EXEC) {
287 och_p = &lli->lli_mds_exec_och;
288 och_usecount = &lli->lli_open_fd_exec_count;
290 LASSERT(fmode & FMODE_READ);
291 och_p = &lli->lli_mds_read_och;
292 och_usecount = &lli->lli_open_fd_read_count;
295 mutex_lock(&lli->lli_och_mutex);
296 if (*och_usecount > 0) {
297 /* There are still users of this handle, so skip
299 mutex_unlock(&lli->lli_och_mutex);
305 mutex_unlock(&lli->lli_och_mutex);
308 /* There might be a race and this handle may already
310 rc = ll_close_inode_openhandle(inode, och, 0, NULL);
316 static int ll_md_close(struct inode *inode, struct file *file)
318 union ldlm_policy_data policy = {
319 .l_inodebits = { MDS_INODELOCK_OPEN },
321 __u64 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
322 struct ll_file_data *fd = file->private_data;
323 struct ll_inode_info *lli = ll_i2info(inode);
324 struct lustre_handle lockh;
325 enum ldlm_mode lockmode;
329 /* clear group lock, if present */
330 if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
331 ll_put_grouplock(inode, file, fd->fd_grouplock.lg_gid);
333 mutex_lock(&lli->lli_och_mutex);
334 if (fd->fd_lease_och != NULL) {
336 struct obd_client_handle *lease_och;
338 lease_och = fd->fd_lease_och;
339 fd->fd_lease_och = NULL;
340 mutex_unlock(&lli->lli_och_mutex);
342 /* Usually the lease is not released when the
343 * application crashed, we need to release here. */
344 rc = ll_lease_close(lease_och, inode, &lease_broken);
346 mutex_lock(&lli->lli_och_mutex);
348 CDEBUG_LIMIT(rc ? D_ERROR : D_INODE,
349 "Clean up lease "DFID" %d/%d\n",
350 PFID(&lli->lli_fid), rc, lease_broken);
353 if (fd->fd_och != NULL) {
354 struct obd_client_handle *och;
358 mutex_unlock(&lli->lli_och_mutex);
360 rc = ll_close_inode_openhandle(inode, och, 0, NULL);
364 /* Let's see if we have good enough OPEN lock on the file and if
365 we can skip talking to MDS */
366 if (fd->fd_omode & FMODE_WRITE) {
368 LASSERT(lli->lli_open_fd_write_count);
369 lli->lli_open_fd_write_count--;
370 } else if (fd->fd_omode & FMODE_EXEC) {
372 LASSERT(lli->lli_open_fd_exec_count);
373 lli->lli_open_fd_exec_count--;
376 LASSERT(lli->lli_open_fd_read_count);
377 lli->lli_open_fd_read_count--;
379 mutex_unlock(&lli->lli_och_mutex);
381 /* LU-4398: do not cache write open lock if the file has exec bit */
382 if ((lockmode == LCK_CW && inode->i_mode & S_IXUGO) ||
383 !md_lock_match(ll_i2mdexp(inode), flags, ll_inode2fid(inode),
384 LDLM_IBITS, &policy, lockmode, &lockh))
385 rc = ll_md_real_close(inode, fd->fd_omode);
388 file->private_data = NULL;
389 ll_file_data_put(fd);
394 /* While this returns an error code, fput() the caller does not, so we need
395 * to make every effort to clean up all of our state here. Also, applications
396 * rarely check close errors and even if an error is returned they will not
397 * re-try the close call.
399 int ll_file_release(struct inode *inode, struct file *file)
401 struct ll_file_data *fd;
402 struct ll_sb_info *sbi = ll_i2sbi(inode);
403 struct ll_inode_info *lli = ll_i2info(inode);
404 ktime_t kstart = ktime_get();
409 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
410 PFID(ll_inode2fid(inode)), inode);
412 fd = file->private_data;
415 /* The last ref on @file, maybe not the the owner pid of statahead,
416 * because parent and child process can share the same file handle. */
417 if (S_ISDIR(inode->i_mode) &&
418 (lli->lli_opendir_key == fd || fd->fd_sai))
419 ll_deauthorize_statahead(inode, fd);
421 if (is_root_inode(inode)) {
422 file->private_data = NULL;
423 ll_file_data_put(fd);
427 pcc_file_release(inode, file);
429 if (!S_ISDIR(inode->i_mode)) {
430 if (lli->lli_clob != NULL)
431 lov_read_and_clear_async_rc(lli->lli_clob);
432 lli->lli_async_rc = 0;
435 lli->lli_close_fd_time = ktime_get();
437 rc = ll_md_close(inode, file);
439 if (CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_DUMP_LOG, cfs_fail_val))
440 libcfs_debug_dumplog();
443 if (!rc && !is_root_inode(inode))
444 ll_stats_ops_tally(sbi, LPROC_LL_RELEASE,
445 ktime_us_delta(ktime_get(), kstart));
449 static inline int ll_dom_readpage(void *data, struct page *page)
451 /* since ll_dom_readpage is a page cache helper, it is safe to assume
452 * mapping and host pointers are set here
455 struct niobuf_local *lnb = data;
459 inode = page2inode(page);
461 kaddr = kmap_atomic(page);
462 memcpy(kaddr, lnb->lnb_data, lnb->lnb_len);
463 if (lnb->lnb_len < PAGE_SIZE)
464 memset(kaddr + lnb->lnb_len, 0,
465 PAGE_SIZE - lnb->lnb_len);
466 kunmap_atomic(kaddr);
468 if (inode && IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
469 if (!llcrypt_has_encryption_key(inode)) {
470 CDEBUG(D_SEC, "no enc key for "DFID"\n",
471 PFID(ll_inode2fid(inode)));
474 unsigned int offs = 0;
476 while (offs < PAGE_SIZE) {
477 /* decrypt only if page is not empty */
478 if (memcmp(page_address(page) + offs,
479 page_address(ZERO_PAGE(0)),
480 LUSTRE_ENCRYPTION_UNIT_SIZE) == 0)
483 rc = llcrypt_decrypt_pagecache_blocks(page,
484 LUSTRE_ENCRYPTION_UNIT_SIZE,
489 offs += LUSTRE_ENCRYPTION_UNIT_SIZE;
494 flush_dcache_page(page);
495 SetPageUptodate(page);
502 #ifdef HAVE_READ_CACHE_PAGE_WANTS_FILE
503 static inline int ll_dom_read_folio(struct file *file, struct folio *folio0)
505 return ll_dom_readpage(file->private_data, folio_page(folio0, 0));
508 #define ll_dom_read_folio ll_dom_readpage
511 void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req)
515 struct ll_inode_info *lli = ll_i2info(inode);
516 struct cl_object *obj = lli->lli_clob;
517 struct address_space *mapping = inode->i_mapping;
519 struct niobuf_remote *rnb;
520 struct mdt_body *body;
522 unsigned long index, start;
523 struct niobuf_local lnb;
532 if (!req_capsule_field_present(&req->rq_pill, &RMF_NIOBUF_INLINE,
536 rnb = req_capsule_server_get(&req->rq_pill, &RMF_NIOBUF_INLINE);
537 if (rnb == NULL || rnb->rnb_len == 0)
540 /* LU-11595: Server may return whole file and that is OK always or
541 * it may return just file tail and its offset must be aligned with
542 * client PAGE_SIZE to be used on that client, if server's PAGE_SIZE is
543 * smaller then offset may be not aligned and that data is just ignored.
545 if (rnb->rnb_offset & ~PAGE_MASK)
548 /* Server returns whole file or just file tail if it fills in reply
549 * buffer, in both cases total size should be equal to the file size.
551 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
552 if (rnb->rnb_offset + rnb->rnb_len != body->mbo_dom_size &&
553 !(inode && IS_ENCRYPTED(inode))) {
554 CERROR("%s: server returns off/len %llu/%u but size %llu\n",
555 ll_i2sbi(inode)->ll_fsname, rnb->rnb_offset,
556 rnb->rnb_len, body->mbo_dom_size);
560 env = cl_env_get(&refcheck);
563 io = vvp_env_thread_io(env);
565 io->ci_ignore_layout = 1;
566 rc = cl_io_init(env, io, CIT_MISC, obj);
570 CDEBUG(D_INFO, "Get data along with open at %llu len %i, size %llu\n",
571 rnb->rnb_offset, rnb->rnb_len, body->mbo_dom_size);
573 data = (char *)rnb + sizeof(*rnb);
575 lnb.lnb_file_offset = rnb->rnb_offset;
576 start = lnb.lnb_file_offset >> PAGE_SHIFT;
578 LASSERT((lnb.lnb_file_offset & ~PAGE_MASK) == 0);
579 lnb.lnb_page_offset = 0;
581 struct cl_page *page;
583 lnb.lnb_data = data + (index << PAGE_SHIFT);
584 lnb.lnb_len = rnb->rnb_len - (index << PAGE_SHIFT);
585 if (lnb.lnb_len > PAGE_SIZE)
586 lnb.lnb_len = PAGE_SIZE;
588 vmpage = ll_read_cache_page(mapping, index + start,
589 ll_dom_read_folio, &lnb);
590 if (IS_ERR(vmpage)) {
591 CWARN("%s: cannot fill page %lu for "DFID
592 " with data: rc = %li\n",
593 ll_i2sbi(inode)->ll_fsname, index + start,
594 PFID(lu_object_fid(&obj->co_lu)),
599 if (vmpage->mapping == NULL) {
602 /* page was truncated */
605 /* attach VM page to CL page cache */
606 page = cl_page_find(env, obj, vmpage->index, vmpage,
609 ClearPageUptodate(vmpage);
614 SetPageUptodate(vmpage);
615 cl_page_put(env, page);
619 } while (rnb->rnb_len > (index << PAGE_SHIFT));
623 cl_env_put(env, &refcheck);
628 static int ll_intent_file_open(struct dentry *de, void *lmm, int lmmsize,
629 struct lookup_intent *itp)
631 struct ll_sb_info *sbi = ll_i2sbi(de->d_inode);
632 struct dentry *parent = de->d_parent;
635 struct md_op_data *op_data;
636 struct ptlrpc_request *req = NULL;
640 LASSERT(parent != NULL);
641 LASSERT(itp->it_flags & MDS_OPEN_BY_FID);
643 /* if server supports open-by-fid, or file name is invalid, don't pack
644 * name in open request */
645 if (CFS_FAIL_CHECK(OBD_FAIL_LLITE_OPEN_BY_NAME) ||
646 !(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_OPEN_BY_FID)) {
648 len = de->d_name.len;
649 name = kmalloc(len + 1, GFP_NOFS);
654 spin_lock(&de->d_lock);
655 if (len != de->d_name.len) {
656 spin_unlock(&de->d_lock);
660 memcpy(name, de->d_name.name, len);
662 spin_unlock(&de->d_lock);
664 if (!lu_name_is_valid_2(name, len)) {
670 op_data = ll_prep_md_op_data(NULL, parent->d_inode, de->d_inode,
671 name, len, 0, LUSTRE_OPC_OPEN, NULL);
672 if (IS_ERR(op_data)) {
674 RETURN(PTR_ERR(op_data));
676 op_data->op_data = lmm;
677 op_data->op_data_size = lmmsize;
679 CFS_FAIL_TIMEOUT(OBD_FAIL_LLITE_OPEN_DELAY, cfs_fail_val);
681 rc = md_intent_lock(sbi->ll_md_exp, op_data, itp, &req,
682 &ll_md_blocking_ast, 0);
684 ll_finish_md_op_data(op_data);
686 /* reason for keep own exit path - don`t flood log
687 * with messages with -ESTALE errors.
689 if (!it_disposition(itp, DISP_OPEN_OPEN) ||
690 it_open_error(DISP_OPEN_OPEN, itp))
692 ll_release_openhandle(de, itp);
696 if (it_disposition(itp, DISP_LOOKUP_NEG))
697 GOTO(out, rc = -ENOENT);
699 if (rc != 0 || it_open_error(DISP_OPEN_OPEN, itp)) {
700 rc = rc ? rc : it_open_error(DISP_OPEN_OPEN, itp);
701 CDEBUG(D_VFSTRACE, "lock enqueue: err: %d\n", rc);
705 rc = ll_prep_inode(&de->d_inode, &req->rq_pill, NULL, itp);
707 if (!rc && itp->it_lock_mode) {
710 /* If we got a lock back and it has a LOOKUP bit set,
711 * make sure the dentry is marked as valid so we can find it.
712 * We don't need to care about actual hashing since other bits
713 * of kernel will deal with that later.
715 ll_set_lock_data(sbi->ll_md_exp, de->d_inode, itp, &bits);
716 if (bits & MDS_INODELOCK_LOOKUP)
717 d_lustre_revalidate(de);
719 /* if DoM bit returned along with LAYOUT bit then there
720 * can be read-on-open data returned.
722 if (bits & MDS_INODELOCK_DOM && bits & MDS_INODELOCK_LAYOUT)
723 ll_dom_finish_open(de->d_inode, req);
725 /* open may not fetch LOOKUP lock, update dir depth and default LMV
728 if (!rc && S_ISDIR(de->d_inode->i_mode))
729 ll_update_dir_depth_dmv(parent->d_inode, de);
732 ptlrpc_req_finished(req);
733 ll_intent_drop_lock(itp);
735 /* We did open by fid, but by the time we got to the server, the object
736 * disappeared. This is possible if the object was unlinked, but it's
737 * also possible if the object was unlinked by a rename. In the case
738 * of an object renamed over our existing one, we can't fail this open.
739 * O_CREAT also goes through this path if we had an existing dentry,
740 * and it's obviously wrong to return ENOENT for O_CREAT.
742 * Instead let's return -ESTALE, and the VFS will retry the open with
743 * LOOKUP_REVAL, which we catch in ll_revalidate_dentry and fail to
744 * revalidate, causing a lookup. This causes extra lookups in the case
745 * where we had a dentry in cache but the file is being unlinked and we
746 * lose the race with unlink, but this should be very rare.
754 static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it,
755 struct obd_client_handle *och)
757 struct mdt_body *body;
759 body = req_capsule_server_get(&it->it_request->rq_pill, &RMF_MDT_BODY);
760 och->och_open_handle = body->mbo_open_handle;
761 och->och_fid = body->mbo_fid1;
762 och->och_lease_handle.cookie = it->it_lock_handle;
763 och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
764 och->och_flags = it->it_flags;
766 return md_set_open_replay_data(md_exp, och, it);
769 static int ll_local_open(struct file *file, struct lookup_intent *it,
770 struct ll_file_data *fd, struct obd_client_handle *och)
772 struct inode *inode = file_inode(file);
775 LASSERT(!file->private_data);
782 rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
787 file->private_data = fd;
788 ll_readahead_init(inode, &fd->fd_ras);
789 fd->fd_omode = it->it_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
794 void ll_track_file_opens(struct inode *inode)
796 struct ll_inode_info *lli = ll_i2info(inode);
797 struct ll_sb_info *sbi = ll_i2sbi(inode);
799 /* do not skew results with delays from never-opened inodes */
800 if (ktime_to_ns(lli->lli_close_fd_time))
801 ll_stats_ops_tally(sbi, LPROC_LL_INODE_OPCLTM,
802 ktime_us_delta(ktime_get(), lli->lli_close_fd_time));
804 if (ktime_after(ktime_get(),
805 ktime_add_ms(lli->lli_close_fd_time,
806 sbi->ll_oc_max_ms))) {
807 lli->lli_open_fd_count = 1;
808 lli->lli_close_fd_time = ns_to_ktime(0);
810 lli->lli_open_fd_count++;
813 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_OCOUNT,
814 lli->lli_open_fd_count);
817 /* Open a file, and (for the very first open) create objects on the OSTs at
818 * this time. If opened with O_LOV_DELAY_CREATE, then we don't do the object
819 * creation or open until ll_lov_setstripe() ioctl is called.
821 * If we already have the stripe MD locally then we don't request it in
822 * md_open(), by passing a lmm_size = 0.
824 * It is up to the application to ensure no other processes open this file
825 * in the O_LOV_DELAY_CREATE case, or the default striping pattern will be
826 * used. We might be able to avoid races of that sort by getting lli_open_sem
827 * before returning in the O_LOV_DELAY_CREATE case and dropping it here
828 * or in ll_file_release(), but I'm not sure that is desirable/necessary.
830 int ll_file_open(struct inode *inode, struct file *file)
832 struct ll_inode_info *lli = ll_i2info(inode);
833 struct lookup_intent *it, oit = { .it_op = IT_OPEN,
834 .it_flags = file->f_flags };
835 struct obd_client_handle **och_p = NULL;
836 __u64 *och_usecount = NULL;
837 struct ll_file_data *fd;
838 ktime_t kstart = ktime_get();
842 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), flags %o\n",
843 PFID(ll_inode2fid(inode)), inode, file->f_flags);
845 it = file->private_data; /* XXX: compat macro */
846 file->private_data = NULL; /* prevent ll_local_open assertion */
848 if (S_ISREG(inode->i_mode)) {
849 rc = ll_file_open_encrypt(inode, file);
851 if (it && it->it_disposition)
852 ll_release_openhandle(file_dentry(file), it);
853 GOTO(out_nofiledata, rc);
857 fd = ll_file_data_get();
859 GOTO(out_nofiledata, rc = -ENOMEM);
862 if (S_ISDIR(inode->i_mode))
863 ll_authorize_statahead(inode, fd);
865 ll_track_file_opens(inode);
866 if (is_root_inode(inode)) {
867 file->private_data = fd;
871 if (!it || !it->it_disposition) {
872 /* Convert f_flags into access mode. We cannot use file->f_mode,
873 * because everything but O_ACCMODE mask was stripped from
875 if ((oit.it_flags + 1) & O_ACCMODE)
877 if (file->f_flags & O_TRUNC)
878 oit.it_flags |= FMODE_WRITE;
880 /* kernel only call f_op->open in dentry_open. filp_open calls
881 * dentry_open after call to open_namei that checks permissions.
882 * Only nfsd_open call dentry_open directly without checking
883 * permissions and because of that this code below is safe.
885 if (oit.it_flags & (FMODE_WRITE | FMODE_READ))
886 oit.it_flags |= MDS_OPEN_OWNEROVERRIDE;
888 /* We do not want O_EXCL here, presumably we opened the file
889 * already? XXX - NFS implications? */
890 oit.it_flags &= ~O_EXCL;
892 /* bug20584, if "it_flags" contains O_CREAT, the file will be
893 * created if necessary, then "IT_CREAT" should be set to keep
894 * consistent with it */
895 if (oit.it_flags & O_CREAT)
896 oit.it_op |= IT_CREAT;
902 /* Let's see if we have file open on MDS already. */
903 if (it->it_flags & FMODE_WRITE) {
904 och_p = &lli->lli_mds_write_och;
905 och_usecount = &lli->lli_open_fd_write_count;
906 } else if (it->it_flags & FMODE_EXEC) {
907 och_p = &lli->lli_mds_exec_och;
908 och_usecount = &lli->lli_open_fd_exec_count;
910 och_p = &lli->lli_mds_read_och;
911 och_usecount = &lli->lli_open_fd_read_count;
914 mutex_lock(&lli->lli_och_mutex);
915 if (*och_p) { /* Open handle is present */
916 if (it_disposition(it, DISP_OPEN_OPEN)) {
917 /* Well, there's extra open request that we do not need,
918 * let's close it somehow. This will decref request. */
919 rc = it_open_error(DISP_OPEN_OPEN, it);
921 mutex_unlock(&lli->lli_och_mutex);
922 GOTO(out_openerr, rc);
925 ll_release_openhandle(file_dentry(file), it);
929 rc = ll_local_open(file, it, fd, NULL);
932 mutex_unlock(&lli->lli_och_mutex);
933 GOTO(out_openerr, rc);
936 LASSERT(*och_usecount == 0);
937 if (!it->it_disposition) {
938 struct dentry *dentry = file_dentry(file);
939 struct ll_sb_info *sbi = ll_i2sbi(inode);
940 int open_threshold = sbi->ll_oc_thrsh_count;
942 /* We cannot just request lock handle now, new ELC code
943 * means that one of other OPEN locks for this file
944 * could be cancelled, and since blocking ast handler
945 * would attempt to grab och_mutex as well, that would
946 * result in a deadlock
948 mutex_unlock(&lli->lli_och_mutex);
950 * Normally called under two situations:
951 * 1. fhandle / NFS export.
952 * 2. A race/condition on MDS resulting in no open
953 * handle to be returned from LOOKUP|OPEN request,
954 * for example if the target entry was a symlink.
956 * For NFSv3 we need to always cache the open lock
957 * for pre 5.5 Linux kernels.
959 * After reaching number of opens of this inode
960 * we always ask for an open lock on it to handle
961 * bad userspace actors that open and close files
962 * in a loop for absolutely no good reason
964 /* fhandle / NFS path. */
965 if (lli->lli_open_thrsh_count != UINT_MAX)
966 open_threshold = lli->lli_open_thrsh_count;
968 if (filename_is_volatile(dentry->d_name.name,
971 /* There really is nothing here, but this
972 * make this more readable I think.
973 * We do not want openlock for volatile
974 * files under any circumstances
976 } else if (open_threshold > 0) {
977 /* Take MDS_OPEN_LOCK with many opens */
978 if (lli->lli_open_fd_count >= open_threshold)
979 it->it_flags |= MDS_OPEN_LOCK;
981 /* If this is open after we just closed */
982 else if (ktime_before(ktime_get(),
983 ktime_add_ms(lli->lli_close_fd_time,
984 sbi->ll_oc_thrsh_ms)))
985 it->it_flags |= MDS_OPEN_LOCK;
989 * Always specify MDS_OPEN_BY_FID because we don't want
990 * to get file with different fid.
992 it->it_flags |= MDS_OPEN_BY_FID;
993 rc = ll_intent_file_open(dentry, NULL, 0, it);
995 GOTO(out_openerr, rc);
999 OBD_ALLOC(*och_p, sizeof(struct obd_client_handle));
1001 GOTO(out_och_free, rc = -ENOMEM);
1005 /* md_intent_lock() didn't get a request ref if there was an
1006 * open error, so don't do cleanup on the request here
1008 /* XXX (green): Should not we bail out on any error here, not
1009 * just open error? */
1010 rc = it_open_error(DISP_OPEN_OPEN, it);
1012 GOTO(out_och_free, rc);
1014 LASSERTF(it_disposition(it, DISP_ENQ_OPEN_REF),
1015 "inode %p: disposition %x, status %d\n", inode,
1016 it_disposition(it, ~0), it->it_status);
1018 rc = ll_local_open(file, it, fd, *och_p);
1020 GOTO(out_och_free, rc);
1023 rc = pcc_file_open(inode, file);
1025 GOTO(out_och_free, rc);
1027 mutex_unlock(&lli->lli_och_mutex);
1031 /* Must do this outside lli_och_mutex lock to prevent deadlock where
1032 different kind of OPEN lock for this same inode gets cancelled
1033 by ldlm_cancel_lru */
1034 if (!S_ISREG(inode->i_mode))
1035 GOTO(out_och_free, rc);
1036 cl_lov_delay_create_clear(&file->f_flags);
1037 GOTO(out_och_free, rc);
1041 if (och_p && *och_p) {
1042 OBD_FREE(*och_p, sizeof(struct obd_client_handle));
1043 *och_p = NULL; /* OBD_FREE writes some magic there */
1046 mutex_unlock(&lli->lli_och_mutex);
1049 if (lli->lli_opendir_key == fd)
1050 ll_deauthorize_statahead(inode, fd);
1053 ll_file_data_put(fd);
1055 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN,
1056 ktime_us_delta(ktime_get(), kstart));
1060 if (it && it_disposition(it, DISP_ENQ_OPEN_REF)) {
1061 ptlrpc_req_finished(it->it_request);
1062 it_clear_disposition(it, DISP_ENQ_OPEN_REF);
1068 static int ll_md_blocking_lease_ast(struct ldlm_lock *lock,
1069 struct ldlm_lock_desc *desc, void *data, int flag)
1072 struct lustre_handle lockh;
1076 case LDLM_CB_BLOCKING:
1077 ldlm_lock2handle(lock, &lockh);
1078 rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
1080 CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
1084 case LDLM_CB_CANCELING:
1092 * When setting a lease on a file, we take ownership of the lli_mds_*_och
1093 * and save it as fd->fd_och so as to force client to reopen the file even
1094 * if it has an open lock in cache already.
1096 static int ll_lease_och_acquire(struct inode *inode, struct file *file,
1097 struct lustre_handle *old_open_handle)
1099 struct ll_inode_info *lli = ll_i2info(inode);
1100 struct ll_file_data *fd = file->private_data;
1101 struct obd_client_handle **och_p;
1102 __u64 *och_usecount;
1106 /* Get the openhandle of the file */
1107 mutex_lock(&lli->lli_och_mutex);
1108 if (fd->fd_lease_och != NULL)
1109 GOTO(out_unlock, rc = -EBUSY);
1111 if (fd->fd_och == NULL) {
1112 if (file->f_mode & FMODE_WRITE) {
1113 LASSERT(lli->lli_mds_write_och != NULL);
1114 och_p = &lli->lli_mds_write_och;
1115 och_usecount = &lli->lli_open_fd_write_count;
1117 LASSERT(lli->lli_mds_read_och != NULL);
1118 och_p = &lli->lli_mds_read_och;
1119 och_usecount = &lli->lli_open_fd_read_count;
1122 if (*och_usecount > 1)
1123 GOTO(out_unlock, rc = -EBUSY);
1125 fd->fd_och = *och_p;
1130 *old_open_handle = fd->fd_och->och_open_handle;
1134 mutex_unlock(&lli->lli_och_mutex);
1139 * Release ownership on lli_mds_*_och when putting back a file lease.
1141 static int ll_lease_och_release(struct inode *inode, struct file *file)
1143 struct ll_inode_info *lli = ll_i2info(inode);
1144 struct ll_file_data *fd = file->private_data;
1145 struct obd_client_handle **och_p;
1146 struct obd_client_handle *old_och = NULL;
1147 __u64 *och_usecount;
1151 mutex_lock(&lli->lli_och_mutex);
1152 if (file->f_mode & FMODE_WRITE) {
1153 och_p = &lli->lli_mds_write_och;
1154 och_usecount = &lli->lli_open_fd_write_count;
1156 och_p = &lli->lli_mds_read_och;
1157 och_usecount = &lli->lli_open_fd_read_count;
1160 /* The file may have been open by another process (broken lease) so
1161 * *och_p is not NULL. In this case we should simply increase usecount
1164 if (*och_p != NULL) {
1165 old_och = fd->fd_och;
1168 *och_p = fd->fd_och;
1172 mutex_unlock(&lli->lli_och_mutex);
1174 if (old_och != NULL)
1175 rc = ll_close_inode_openhandle(inode, old_och, 0, NULL);
1181 * Acquire a lease and open the file.
1183 static struct obd_client_handle *
1184 ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
1187 struct lookup_intent it = { .it_op = IT_OPEN };
1188 struct ll_sb_info *sbi = ll_i2sbi(inode);
1189 struct md_op_data *op_data;
1190 struct ptlrpc_request *req = NULL;
1191 struct lustre_handle old_open_handle = { 0 };
1192 struct obd_client_handle *och = NULL;
1197 if (fmode != FMODE_WRITE && fmode != FMODE_READ)
1198 RETURN(ERR_PTR(-EINVAL));
1201 if (!(fmode & file->f_mode) || (file->f_mode & FMODE_EXEC))
1202 RETURN(ERR_PTR(-EPERM));
1204 rc = ll_lease_och_acquire(inode, file, &old_open_handle);
1206 RETURN(ERR_PTR(rc));
1211 RETURN(ERR_PTR(-ENOMEM));
1213 op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0,
1214 LUSTRE_OPC_ANY, NULL);
1215 if (IS_ERR(op_data))
1216 GOTO(out, rc = PTR_ERR(op_data));
1218 /* To tell the MDT this openhandle is from the same owner */
1219 op_data->op_open_handle = old_open_handle;
1221 it.it_flags = fmode | open_flags;
1222 it.it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID | MDS_OPEN_LEASE;
1223 rc = md_intent_lock(sbi->ll_md_exp, op_data, &it, &req,
1224 &ll_md_blocking_lease_ast,
1225 /* LDLM_FL_NO_LRU: To not put the lease lock into LRU list, otherwise
1226 * it can be cancelled which may mislead applications that the lease is
1228 * LDLM_FL_EXCL: Set this flag so that it won't be matched by normal
1229 * open in ll_md_blocking_ast(). Otherwise as ll_md_blocking_lease_ast
1230 * doesn't deal with openhandle, so normal openhandle will be leaked. */
1231 LDLM_FL_NO_LRU | LDLM_FL_EXCL);
1232 ll_finish_md_op_data(op_data);
1233 ptlrpc_req_finished(req);
1235 GOTO(out_release_it, rc);
1237 if (it_disposition(&it, DISP_LOOKUP_NEG))
1238 GOTO(out_release_it, rc = -ENOENT);
1240 rc = it_open_error(DISP_OPEN_OPEN, &it);
1242 GOTO(out_release_it, rc);
1244 LASSERT(it_disposition(&it, DISP_ENQ_OPEN_REF));
1245 rc = ll_och_fill(sbi->ll_md_exp, &it, och);
1247 GOTO(out_release_it, rc);
1249 if (!it_disposition(&it, DISP_OPEN_LEASE)) /* old server? */
1250 GOTO(out_close, rc = -EOPNOTSUPP);
1252 /* already get lease, handle lease lock */
1253 ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
1254 if (!it.it_lock_mode ||
1255 !(it.it_lock_bits & MDS_INODELOCK_OPEN)) {
1256 /* open lock must return for lease */
1257 CERROR(DFID "lease granted but no open lock, %d/%llu.\n",
1258 PFID(ll_inode2fid(inode)), it.it_lock_mode,
1260 GOTO(out_close, rc = -EPROTO);
1263 ll_intent_release(&it);
1267 /* Cancel open lock */
1268 if (it.it_lock_mode != 0) {
1269 ldlm_lock_decref_and_cancel(&och->och_lease_handle,
1271 it.it_lock_mode = 0;
1272 och->och_lease_handle.cookie = 0ULL;
1274 rc2 = ll_close_inode_openhandle(inode, och, 0, NULL);
1276 CERROR("%s: error closing file "DFID": %d\n",
1277 sbi->ll_fsname, PFID(&ll_i2info(inode)->lli_fid), rc2);
1278 och = NULL; /* och has been freed in ll_close_inode_openhandle() */
1280 ll_intent_release(&it);
1284 RETURN(ERR_PTR(rc));
1288 * Check whether a layout swap can be done between two inodes.
1290 * \param[in] inode1 First inode to check
1291 * \param[in] inode2 Second inode to check
1293 * \retval 0 on success, layout swap can be performed between both inodes
1294 * \retval negative error code if requirements are not met
1296 static int ll_check_swap_layouts_validity(struct inode *inode1,
1297 struct inode *inode2)
1299 if (!S_ISREG(inode1->i_mode) || !S_ISREG(inode2->i_mode))
1302 if (inode_permission(&nop_mnt_idmap, inode1, MAY_WRITE) ||
1303 inode_permission(&nop_mnt_idmap, inode2, MAY_WRITE))
1306 if (inode1->i_sb != inode2->i_sb)
1312 static int ll_swap_layouts_close(struct obd_client_handle *och,
1313 struct inode *inode, struct inode *inode2)
1315 const struct lu_fid *fid1 = ll_inode2fid(inode);
1316 const struct lu_fid *fid2;
1320 CDEBUG(D_INODE, "%s: biased close of file "DFID"\n",
1321 ll_i2sbi(inode)->ll_fsname, PFID(fid1));
1323 rc = ll_check_swap_layouts_validity(inode, inode2);
1325 GOTO(out_free_och, rc);
1327 /* We now know that inode2 is a lustre inode */
1328 fid2 = ll_inode2fid(inode2);
1330 rc = lu_fid_cmp(fid1, fid2);
1332 GOTO(out_free_och, rc = -EINVAL);
1334 /* Close the file and {swap,merge} layouts between inode & inode2.
1335 * NB: local lease handle is released in mdc_close_intent_pack()
1336 * because we still need it to pack l_remote_handle to MDT. */
1337 rc = ll_close_inode_openhandle(inode, och, MDS_CLOSE_LAYOUT_SWAP,
1340 och = NULL; /* freed in ll_close_inode_openhandle() */
1350 * Release lease and close the file.
1351 * It will check if the lease has ever broken.
1353 static int ll_lease_close_intent(struct obd_client_handle *och,
1354 struct inode *inode,
1355 bool *lease_broken, enum mds_op_bias bias,
1358 struct ldlm_lock *lock;
1359 bool cancelled = true;
1363 lock = ldlm_handle2lock(&och->och_lease_handle);
1365 lock_res_and_lock(lock);
1366 cancelled = ldlm_is_cancel(lock);
1367 unlock_res_and_lock(lock);
1368 LDLM_LOCK_PUT(lock);
1371 CDEBUG(D_INODE, "lease for "DFID" broken? %d, bias: %x\n",
1372 PFID(&ll_i2info(inode)->lli_fid), cancelled, bias);
1374 if (lease_broken != NULL)
1375 *lease_broken = cancelled;
1377 if (!cancelled && !bias)
1378 ldlm_cli_cancel(&och->och_lease_handle, 0);
1380 if (cancelled) { /* no need to excute intent */
1385 rc = ll_close_inode_openhandle(inode, och, bias, data);
1389 static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
1392 return ll_lease_close_intent(och, inode, lease_broken, 0, NULL);
1396 * After lease is taken, send the RPC MDS_REINT_RESYNC to the MDT
1398 static int ll_lease_file_resync(struct obd_client_handle *och,
1399 struct inode *inode, void __user *uarg)
1401 struct ll_sb_info *sbi = ll_i2sbi(inode);
1402 struct md_op_data *op_data;
1403 struct ll_ioc_lease_id ioc;
1404 __u64 data_version_unused;
1408 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1409 LUSTRE_OPC_ANY, NULL);
1410 if (IS_ERR(op_data))
1411 RETURN(PTR_ERR(op_data));
1413 if (copy_from_user(&ioc, uarg, sizeof(ioc)))
1416 /* before starting file resync, it's necessary to clean up page cache
1417 * in client memory, otherwise once the layout version is increased,
1418 * writing back cached data will be denied the OSTs. */
1419 rc = ll_data_version(inode, &data_version_unused, LL_DV_WR_FLUSH);
1423 op_data->op_lease_handle = och->och_lease_handle;
1424 op_data->op_mirror_id = ioc.lil_mirror_id;
1425 rc = md_file_resync(sbi->ll_md_exp, op_data);
1431 ll_finish_md_op_data(op_data);
1435 static int ll_merge_attr_nolock(const struct lu_env *env, struct inode *inode)
1437 struct ll_inode_info *lli = ll_i2info(inode);
1438 struct cl_object *obj = lli->lli_clob;
1439 struct cl_attr *attr = vvp_env_thread_attr(env);
1447 /* Merge timestamps the most recently obtained from MDS with
1448 * timestamps obtained from OSTs.
1450 * Do not overwrite atime of inode because it may be refreshed
1451 * by file_accessed() function. If the read was served by cache
1452 * data, there is no RPC to be sent so that atime may not be
1453 * transferred to OSTs at all. MDT only updates atime at close time
1454 * if it's at least 'mdd.*.atime_diff' older.
1455 * All in all, the atime in Lustre does not strictly comply with
1456 * POSIX. Solving this problem needs to send an RPC to MDT for each
1457 * read, this will hurt performance.
1459 if (test_and_clear_bit(LLIF_UPDATE_ATIME, &lli->lli_flags) ||
1460 inode->i_atime.tv_sec < lli->lli_atime)
1461 inode->i_atime.tv_sec = lli->lli_atime;
1463 inode->i_mtime.tv_sec = lli->lli_mtime;
1464 inode->i_ctime.tv_sec = lli->lli_ctime;
1466 mtime = inode->i_mtime.tv_sec;
1467 atime = inode->i_atime.tv_sec;
1468 ctime = inode->i_ctime.tv_sec;
1470 cl_object_attr_lock(obj);
1471 if (CFS_FAIL_CHECK(OBD_FAIL_MDC_MERGE))
1474 rc = cl_object_attr_get(env, obj, attr);
1475 cl_object_attr_unlock(obj);
1478 GOTO(out, rc = (rc == -ENODATA ? 0 : rc));
1480 if (atime < attr->cat_atime)
1481 atime = attr->cat_atime;
1483 if (ctime < attr->cat_ctime)
1484 ctime = attr->cat_ctime;
1486 if (mtime < attr->cat_mtime)
1487 mtime = attr->cat_mtime;
1489 CDEBUG(D_VFSTRACE, DFID" updating i_size %llu i_blocks %llu\n",
1490 PFID(&lli->lli_fid), attr->cat_size, attr->cat_blocks);
1492 if (llcrypt_require_key(inode) == -ENOKEY) {
1493 /* Without the key, round up encrypted file size to next
1494 * LUSTRE_ENCRYPTION_UNIT_SIZE. Clear text size is put in
1495 * lli_lazysize for proper file size setting at close time.
1497 lli->lli_attr_valid |= OBD_MD_FLLAZYSIZE;
1498 lli->lli_lazysize = attr->cat_size;
1499 attr->cat_size = round_up(attr->cat_size,
1500 LUSTRE_ENCRYPTION_UNIT_SIZE);
1502 i_size_write(inode, attr->cat_size);
1503 inode->i_blocks = attr->cat_blocks;
1505 inode->i_mtime.tv_sec = mtime;
1506 inode->i_atime.tv_sec = atime;
1507 inode->i_ctime.tv_sec = ctime;
1514 int ll_merge_attr(const struct lu_env *env, struct inode *inode)
1518 ll_inode_size_lock(inode);
1519 rc = ll_merge_attr_nolock(env, inode);
1520 ll_inode_size_unlock(inode);
1525 /* Use to update size and blocks on inode for LSOM if there is no contention */
1526 int ll_merge_attr_try(const struct lu_env *env, struct inode *inode)
1530 if (ll_inode_size_trylock(inode)) {
1531 rc = ll_merge_attr_nolock(env, inode);
1532 ll_inode_size_unlock(inode);
1539 * Set designated mirror for I/O.
1541 * So far only read, write, and truncated can support to issue I/O to
1542 * designated mirror.
1544 void ll_io_set_mirror(struct cl_io *io, const struct file *file)
1546 struct ll_file_data *fd = file->private_data;
1548 /* clear layout version for generic(non-resync) I/O in case it carries
1549 * stale layout version due to I/O restart */
1550 io->ci_layout_version = 0;
1552 /* FLR: disable non-delay for designated mirror I/O because obviously
1553 * only one mirror is available */
1554 if (fd->fd_designated_mirror > 0) {
1556 io->ci_designated_mirror = fd->fd_designated_mirror;
1557 io->ci_layout_version = fd->fd_layout_version;
1560 CDEBUG(D_VFSTRACE, "%s: desiginated mirror: %d\n",
1561 file->f_path.dentry->d_name.name, io->ci_designated_mirror);
1565 * This is relatime_need_update() from Linux 5.17, which is not exported.
1567 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1568 struct timespec64 now)
1571 if (!(mnt->mnt_flags & MNT_RELATIME))
1574 * Is mtime younger than atime? If yes, update atime:
1576 if (timespec64_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1579 * Is ctime younger than atime? If yes, update atime:
1581 if (timespec64_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1585 * Is the previous atime value older than a day? If yes,
1588 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1591 * Good, we can skip the atime update:
1597 * Very similar to kernel function: !__atime_needs_update()
1599 static bool file_is_noatime(const struct file *file)
1601 struct vfsmount *mnt = file->f_path.mnt;
1602 struct inode *inode = file_inode((struct file *)file);
1603 struct timespec64 now;
1605 if (file->f_flags & O_NOATIME)
1608 if (inode->i_flags & S_NOATIME)
1611 if (IS_NOATIME(inode))
1614 if (mnt->mnt_flags & (MNT_NOATIME | MNT_READONLY))
1617 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1620 if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
1623 now = current_time(inode);
1625 if (!relatime_need_update(mnt, inode, now))
1631 void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot,
1632 struct vvp_io_args *args)
1634 struct inode *inode = file_inode(file);
1635 struct ll_file_data *fd = file->private_data;
1637 io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK;
1638 io->ci_lock_no_expand = fd->ll_lock_no_expand;
1640 if (iot == CIT_WRITE) {
1641 io->u.ci_wr.wr_append = !!(file->f_flags & O_APPEND);
1642 io->u.ci_wr.wr_sync = !!(file->f_flags & O_SYNC ||
1643 file->f_flags & O_DIRECT ||
1645 #ifdef HAVE_GENERIC_WRITE_SYNC_2ARGS
1646 io->u.ci_wr.wr_sync |= !!(args &&
1647 (args->u.normal.via_iocb->ki_flags &
1653 io->ci_iocb_nowait = !!(args &&
1654 (args->u.normal.via_iocb->ki_flags &
1658 io->ci_obj = ll_i2info(inode)->lli_clob;
1659 io->ci_lockreq = CILR_MAYBE;
1660 if (ll_file_nolock(file)) {
1661 io->ci_lockreq = CILR_NEVER;
1662 io->ci_no_srvlock = 1;
1663 } else if (file->f_flags & O_APPEND) {
1664 io->ci_lockreq = CILR_MANDATORY;
1666 io->ci_noatime = file_is_noatime(file);
1667 io->ci_async_readahead = false;
1669 /* FLR: only use non-delay I/O for read as there is only one
1670 * avaliable mirror for write. */
1671 io->ci_ndelay = !(iot == CIT_WRITE);
1673 ll_io_set_mirror(io, file);
1676 static void ll_heat_add(struct inode *inode, enum cl_io_type iot,
1679 struct ll_inode_info *lli = ll_i2info(inode);
1680 struct ll_sb_info *sbi = ll_i2sbi(inode);
1681 enum obd_heat_type sample_type;
1682 enum obd_heat_type iobyte_type;
1683 __u64 now = ktime_get_real_seconds();
1685 if (!ll_sbi_has_file_heat(sbi) ||
1686 lli->lli_heat_flags & LU_HEAT_FLAG_OFF)
1689 if (iot == CIT_READ) {
1690 sample_type = OBD_HEAT_READSAMPLE;
1691 iobyte_type = OBD_HEAT_READBYTE;
1692 } else if (iot == CIT_WRITE) {
1693 sample_type = OBD_HEAT_WRITESAMPLE;
1694 iobyte_type = OBD_HEAT_WRITEBYTE;
1699 spin_lock(&lli->lli_heat_lock);
1700 obd_heat_add(&lli->lli_heat_instances[sample_type], now, 1,
1701 sbi->ll_heat_decay_weight, sbi->ll_heat_period_second);
1702 obd_heat_add(&lli->lli_heat_instances[iobyte_type], now, count,
1703 sbi->ll_heat_decay_weight, sbi->ll_heat_period_second);
1704 spin_unlock(&lli->lli_heat_lock);
1708 ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
1709 struct file *file, enum cl_io_type iot,
1710 loff_t *ppos, size_t count)
1712 struct vvp_io *vio = vvp_env_io(env);
1713 struct inode *inode = file_inode(file);
1714 struct ll_inode_info *lli = ll_i2info(inode);
1715 struct ll_sb_info *sbi = ll_i2sbi(inode);
1716 struct ll_file_data *fd = file->private_data;
1717 struct range_lock range;
1718 bool range_locked = false;
1724 unsigned int retried = 0, dio_lock = 0;
1725 bool is_aio = false;
1726 bool is_parallel_dio = false;
1727 struct cl_dio_aio *ci_dio_aio = NULL;
1729 bool partial_io = false;
1730 size_t max_io_pages, max_cached_pages;
1734 CDEBUG(D_VFSTRACE, "%s: %s ppos: %llu, count: %zu\n",
1735 file_dentry(file)->d_name.name,
1736 iot == CIT_READ ? "read" : "write", *ppos, count);
1738 max_io_pages = PTLRPC_MAX_BRW_PAGES * OBD_MAX_RIF_DEFAULT;
1739 max_cached_pages = sbi->ll_cache->ccc_lru_max;
1740 if (max_io_pages > (max_cached_pages >> 2))
1741 max_io_pages = max_cached_pages >> 2;
1743 io = vvp_env_thread_io(env);
1744 if (file->f_flags & O_DIRECT) {
1745 if (file->f_flags & O_APPEND)
1747 if (!is_sync_kiocb(args->u.normal.via_iocb))
1750 /* the kernel does not support AIO on pipes, and parallel DIO
1751 * uses part of the AIO path, so we must not do parallel dio
1754 is_parallel_dio = !iov_iter_is_pipe(args->u.normal.via_iter) &&
1757 if (!ll_sbi_has_parallel_dio(sbi))
1758 is_parallel_dio = false;
1760 ci_dio_aio = cl_dio_aio_alloc(args->u.normal.via_iocb,
1761 ll_i2info(inode)->lli_clob, is_aio);
1763 GOTO(out, rc = -ENOMEM);
1768 * IO block size need be aware of cached page limit, otherwise
1769 * if we have small max_cached_mb but large block IO issued, io
1770 * could not be finished and blocked whole client.
1772 if (file->f_flags & O_DIRECT)
1775 per_bytes = min(max_io_pages << PAGE_SHIFT, count);
1776 partial_io = per_bytes < count;
1777 io = vvp_env_thread_io(env);
1778 ll_io_init(io, file, iot, args);
1779 io->ci_dio_aio = ci_dio_aio;
1780 io->ci_dio_lock = dio_lock;
1781 io->ci_ndelay_tried = retried;
1782 io->ci_parallel_dio = is_parallel_dio;
1784 if (cl_io_rw_init(env, io, iot, *ppos, per_bytes) == 0) {
1785 if (file->f_flags & O_APPEND)
1786 range_lock_init(&range, 0, LUSTRE_EOF);
1788 range_lock_init(&range, *ppos, *ppos + per_bytes - 1);
1790 vio->vui_fd = file->private_data;
1791 vio->vui_iter = args->u.normal.via_iter;
1792 vio->vui_iocb = args->u.normal.via_iocb;
1793 /* Direct IO reads must also take range lock,
1794 * or multiple reads will try to work on the same pages
1795 * See LU-6227 for details.
1797 if (((iot == CIT_WRITE) ||
1798 (iot == CIT_READ && (file->f_flags & O_DIRECT))) &&
1799 !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
1800 CDEBUG(D_VFSTRACE, "Range lock "RL_FMT"\n",
1802 rc = range_lock(&lli->lli_write_tree, &range);
1806 range_locked = true;
1809 ll_cl_add(inode, env, io, LCC_RW);
1810 rc = cl_io_loop(env, io);
1811 ll_cl_remove(inode, env);
1813 /* cl_io_rw_init() handled IO */
1817 if (io->ci_dio_aio && !is_aio) {
1818 struct cl_sync_io *anchor = &io->ci_dio_aio->cda_sync;
1820 /* for dio, EIOCBQUEUED is an implementation detail,
1821 * and we don't return it to userspace
1823 if (rc == -EIOCBQUEUED)
1826 /* N/B: parallel DIO may be disabled during i/o submission;
1827 * if that occurs, I/O shifts to sync, so it's all resolved
1828 * before we get here, and this wait call completes
1831 rc2 = cl_sync_io_wait_recycle(env, anchor, 0, 0);
1837 CDEBUG(D_VFSTRACE, "Range unlock "RL_FMT"\n",
1839 range_unlock(&lli->lli_write_tree, &range);
1840 range_locked = false;
1843 if (io->ci_nob > 0) {
1845 result += io->ci_nob;
1846 *ppos = io->u.ci_wr.wr.crw_pos; /* for splice */
1850 count -= io->ci_nob;
1852 /* prepare IO restart */
1854 args->u.normal.via_iter = vio->vui_iter;
1858 * Reexpand iov count because it was zero
1861 iov_iter_reexpand(vio->vui_iter, count);
1862 if (per_bytes == io->ci_nob)
1863 io->ci_need_restart = 1;
1867 cl_io_fini(env, io);
1870 "%s: %d io complete with rc: %d, result: %zd, restart: %d\n",
1871 file->f_path.dentry->d_name.name,
1872 iot, rc, result, io->ci_need_restart);
1874 if ((!rc || rc == -ENODATA || rc == -ENOLCK || rc == -EIOCBQUEUED) &&
1875 count > 0 && io->ci_need_restart && retries-- > 0) {
1877 "%s: restart %s from ppos=%lld count=%zu retries=%u ret=%zd: rc = %d\n",
1878 file_dentry(file)->d_name.name,
1879 iot == CIT_READ ? "read" : "write",
1880 *ppos, count, retries, result, rc);
1881 /* preserve the tried count for FLR */
1882 retried = io->ci_ndelay_tried;
1883 dio_lock = io->ci_dio_lock;
1887 if (io->ci_dio_aio) {
1888 /* set the number of bytes successfully moved in the aio */
1890 io->ci_dio_aio->cda_bytes = result;
1892 * VFS will call aio_complete() if no -EIOCBQUEUED
1893 * is returned for AIO, so we can not call aio_complete()
1894 * in our end_io(). (cda_no_aio_complete is always set for
1897 * NB: Setting cda_no_aio_complete like this is safe because
1898 * the atomic_dec_and_lock in cl_sync_io_note has implicit
1899 * memory barriers, so this will be seen by whichever thread
1900 * completes the DIO/AIO, even if it's not this one.
1902 if (is_aio && rc != -EIOCBQUEUED)
1903 io->ci_dio_aio->cda_no_aio_complete = 1;
1904 /* if an aio enqueued successfully (-EIOCBQUEUED), then Lustre
1905 * will call aio_complete rather than the vfs, so we return 0
1906 * to tell the VFS we're handling it
1908 else if (is_aio) /* rc == -EIOCBQUEUED */
1911 * Drop the reference held by the llite layer on this top level
1914 * For DIO, this frees it here, since IO is complete, and for
1915 * AIO, we will call aio_complete() (and then free this top
1916 * level context) once all the outstanding chunks of this AIO
1919 cl_sync_io_note(env, &io->ci_dio_aio->cda_sync,
1920 rc == -EIOCBQUEUED ? 0 : rc);
1922 LASSERT(io->ci_dio_aio->cda_creator_free);
1923 cl_dio_aio_free(env, io->ci_dio_aio);
1924 io->ci_dio_aio = NULL;
1928 if (iot == CIT_READ) {
1930 ll_stats_ops_tally(ll_i2sbi(inode),
1931 LPROC_LL_READ_BYTES, result);
1932 } else if (iot == CIT_WRITE) {
1934 ll_stats_ops_tally(ll_i2sbi(inode),
1935 LPROC_LL_WRITE_BYTES, result);
1936 fd->fd_write_failed = false;
1937 } else if (result == 0 && rc == 0) {
1940 fd->fd_write_failed = true;
1942 fd->fd_write_failed = false;
1943 } else if (rc != -ERESTARTSYS) {
1944 fd->fd_write_failed = true;
1948 CDEBUG(D_VFSTRACE, "iot: %d, result: %zd\n", iot, result);
1950 ll_heat_add(inode, iot, result);
1952 RETURN(result > 0 ? result : rc);
1956 * The purpose of fast read is to overcome per I/O overhead and improve IOPS
1957 * especially for small I/O.
1959 * To serve a read request, CLIO has to create and initialize a cl_io and
1960 * then request DLM lock. This has turned out to have siginificant overhead
1961 * and affects the performance of small I/O dramatically.
1963 * It's not necessary to create a cl_io for each I/O. Under the help of read
1964 * ahead, most of the pages being read are already in memory cache and we can
1965 * read those pages directly because if the pages exist, the corresponding DLM
1966 * lock must exist so that page content must be valid.
1968 * In fast read implementation, the llite speculatively finds and reads pages
1969 * in memory cache. There are three scenarios for fast read:
1970 * - If the page exists and is uptodate, kernel VM will provide the data and
1971 * CLIO won't be intervened;
1972 * - If the page was brought into memory by read ahead, it will be exported
1973 * and read ahead parameters will be updated;
1974 * - Otherwise the page is not in memory, we can't do fast read. Therefore,
1975 * it will go back and invoke normal read, i.e., a cl_io will be created
1976 * and DLM lock will be requested.
1978 * POSIX compliance: posix standard states that read is intended to be atomic.
1979 * Lustre read implementation is in line with Linux kernel read implementation
1980 * and neither of them complies with POSIX standard in this matter. Fast read
1981 * doesn't make the situation worse on single node but it may interleave write
1982 * results from multiple nodes due to short read handling in ll_file_aio_read().
1984 * \param env - lu_env
1985 * \param iocb - kiocb from kernel
1986 * \param iter - user space buffers where the data will be copied
1988 * \retval - number of bytes have been read, or error code if error occurred.
1991 ll_do_fast_read(struct kiocb *iocb, struct iov_iter *iter)
1993 struct ll_inode_info *lli = ll_i2info(file_inode(iocb->ki_filp));
1996 if (!ll_sbi_has_fast_read(ll_i2sbi(file_inode(iocb->ki_filp))))
1999 /* NB: we can't do direct IO for fast read because it will need a lock
2000 * to make IO engine happy. */
2001 if (iocb->ki_filp->f_flags & O_DIRECT)
2004 if (ll_layout_version_get(lli) == CL_LAYOUT_GEN_NONE)
2007 result = generic_file_read_iter(iocb, iter);
2009 /* If the first page is not in cache, generic_file_aio_read() will be
2010 * returned with -ENODATA. Fall back to full read path.
2011 * See corresponding code in ll_readpage().
2013 * if we raced with page deletion, we might get EIO. Rather than add
2014 * locking to the fast path for this rare case, fall back to the full
2015 * read path. (See vvp_io_read_start() for rest of handling.
2017 if (result == -ENODATA || result == -EIO)
2021 ll_heat_add(file_inode(iocb->ki_filp), CIT_READ, result);
2022 ll_stats_ops_tally(ll_i2sbi(file_inode(iocb->ki_filp)),
2023 LPROC_LL_READ_BYTES, result);
2030 * Confine read iter lest read beyond the EOF
2032 * \param iocb [in] kernel iocb
2033 * \param to [in] reader iov_iter
2035 * \retval <0 failure
2037 * \retval >0 @iocb->ki_pos has passed the EOF
2039 static int file_read_confine_iter(struct lu_env *env, struct kiocb *iocb,
2040 struct iov_iter *to)
2042 struct cl_attr *attr = vvp_env_thread_attr(env);
2043 struct file *file = iocb->ki_filp;
2044 struct inode *inode = file_inode(file);
2045 struct ll_inode_info *lli = ll_i2info(inode);
2046 loff_t read_end = iocb->ki_pos + iov_iter_count(to);
2051 cl_object_attr_lock(lli->lli_clob);
2052 rc = cl_object_attr_get(env, lli->lli_clob, attr);
2053 cl_object_attr_unlock(lli->lli_clob);
2057 kms = attr->cat_kms;
2058 /* if read beyond end-of-file, adjust read count */
2059 if (kms > 0 && (iocb->ki_pos >= kms || read_end > kms)) {
2060 rc = ll_glimpse_size(inode);
2064 size = i_size_read(inode);
2065 if (iocb->ki_pos >= size || read_end > size) {
2067 "%s: read [%llu, %llu] over eof, kms %llu, file_size %llu.\n",
2068 file_dentry(file)->d_name.name,
2069 iocb->ki_pos, read_end, kms, size);
2071 if (iocb->ki_pos >= size)
2074 if (read_end > size)
2075 iov_iter_truncate(to, size - iocb->ki_pos);
2083 * Read from a file (through the page cache).
2085 static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2088 struct vvp_io_args *args;
2089 struct file *file = iocb->ki_filp;
2093 ktime_t kstart = ktime_get();
2095 bool stale_data = false;
2099 CDEBUG(D_VFSTRACE|D_IOTRACE,
2100 "START file %s:"DFID", ppos: %lld, count: %zu\n",
2101 file_dentry(file)->d_name.name,
2102 PFID(ll_inode2fid(file_inode(file))), iocb->ki_pos,
2103 iov_iter_count(to));
2105 if (!iov_iter_count(to))
2108 env = cl_env_get(&refcheck);
2110 RETURN(PTR_ERR(env));
2112 result = file_read_confine_iter(env, iocb, to);
2115 else if (result > 0)
2119 * Currently when PCC read failed, we do not fall back to the
2120 * normal read path, just return the error.
2121 * The resaon is that: for RW-PCC, the file data may be modified
2122 * in the PCC and inconsistent with the data on OSTs (or file
2123 * data has been removed from the Lustre file system), at this
2124 * time, fallback to the normal read path may read the wrong
2126 * TODO: for RO-PCC (readonly PCC), fall back to normal read
2127 * path: read data from data copy on OSTs.
2129 result = pcc_file_read_iter(iocb, to, &cached);
2133 ll_ras_enter(file, iocb->ki_pos, iov_iter_count(to));
2135 result = ll_do_fast_read(iocb, to);
2136 if (result < 0 || iov_iter_count(to) == 0)
2139 args = ll_env_args(env);
2140 args->u.normal.via_iter = to;
2141 args->u.normal.via_iocb = iocb;
2143 rc2 = ll_file_io_generic(env, args, file, CIT_READ,
2144 &iocb->ki_pos, iov_iter_count(to));
2147 else if (result == 0)
2151 cl_env_put(env, &refcheck);
2153 if (stale_data && result > 0) {
2155 * we've reached EOF before the read, the data read are cached
2158 iov_iter_truncate(to, 0);
2163 ll_rw_stats_tally(ll_i2sbi(file_inode(file)), current->pid,
2164 file->private_data, iocb->ki_pos, result,
2166 ll_stats_ops_tally(ll_i2sbi(file_inode(file)), LPROC_LL_READ,
2167 ktime_us_delta(ktime_get(), kstart));
2171 "COMPLETED: file %s:"DFID", ppos: %lld, count: %zu\n",
2172 file_dentry(file)->d_name.name,
2173 PFID(ll_inode2fid(file_inode(file))), iocb->ki_pos,
2174 iov_iter_count(to));
2180 * Similar trick to ll_do_fast_read, this improves write speed for tiny writes.
2181 * If a page is already in the page cache and dirty (and some other things -
2182 * See ll_tiny_write_begin for the instantiation of these rules), then we can
2183 * write to it without doing a full I/O, because Lustre already knows about it
2184 * and will write it out. This saves a lot of processing time.
2186 * All writes here are within one page, so exclusion is handled by the page
2187 * lock on the vm page. We do not do tiny writes for writes which touch
2188 * multiple pages because it's very unlikely multiple sequential pages are
2189 * are already dirty.
2191 * We limit these to < PAGE_SIZE because PAGE_SIZE writes are relatively common
2192 * and are unlikely to be to already dirty pages.
2194 * Attribute updates are important here, we do them in ll_tiny_write_end.
2196 static ssize_t ll_do_tiny_write(struct kiocb *iocb, struct iov_iter *iter)
2198 ssize_t count = iov_iter_count(iter);
2199 struct file *file = iocb->ki_filp;
2200 struct inode *inode = file_inode(file);
2201 bool lock_inode = !IS_NOSEC(inode);
2206 /* Restrict writes to single page and < PAGE_SIZE. See comment at top
2207 * of function for why.
2209 if (count >= PAGE_SIZE ||
2210 (iocb->ki_pos & (PAGE_SIZE-1)) + count > PAGE_SIZE)
2213 if (unlikely(lock_inode))
2214 ll_inode_lock(inode);
2215 result = __generic_file_write_iter(iocb, iter);
2217 if (unlikely(lock_inode))
2218 ll_inode_unlock(inode);
2220 /* If the page is not already dirty, ll_tiny_write_begin returns
2221 * -ENODATA. We continue on to normal write.
2223 if (result == -ENODATA)
2227 ll_heat_add(inode, CIT_WRITE, result);
2228 set_bit(LLIF_DATA_MODIFIED, &ll_i2info(inode)->lli_flags);
2231 CDEBUG(D_VFSTRACE, "result: %zu, original count %zu\n", result, count);
2237 * Write to a file (through the page cache).
2239 static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2241 struct vvp_io_args *args;
2243 ssize_t rc_tiny = 0, rc_normal;
2244 struct file *file = iocb->ki_filp;
2247 ktime_t kstart = ktime_get();
2252 CDEBUG(D_VFSTRACE|D_IOTRACE,
2253 "START file %s:"DFID", ppos: %lld, count: %zu\n",
2254 file_dentry(file)->d_name.name,
2255 PFID(ll_inode2fid(file_inode(file))), iocb->ki_pos,
2256 iov_iter_count(from));
2258 if (!iov_iter_count(from))
2259 GOTO(out, rc_normal = 0);
2262 * When PCC write failed, we usually do not fall back to the normal
2263 * write path, just return the error. But there is a special case when
2264 * returned error code is -ENOSPC due to running out of space on PCC HSM
2265 * bakcend. At this time, it will fall back to normal I/O path and
2266 * retry the I/O. As the file is in HSM released state, it will restore
2267 * the file data to OSTs first and redo the write again. And the
2268 * restore process will revoke the layout lock and detach the file
2269 * from PCC cache automatically.
2271 result = pcc_file_write_iter(iocb, from, &cached);
2272 if (cached && result != -ENOSPC && result != -EDQUOT)
2273 GOTO(out, rc_normal = result);
2275 /* NB: we can't do direct IO for tiny writes because they use the page
2276 * cache, we can't do sync writes because tiny writes can't flush
2277 * pages, and we can't do append writes because we can't guarantee the
2278 * required DLM locks are held to protect file size.
2280 if (ll_sbi_has_tiny_write(ll_i2sbi(file_inode(file))) &&
2281 !(file->f_flags & (O_DIRECT | O_SYNC | O_APPEND)))
2282 rc_tiny = ll_do_tiny_write(iocb, from);
2284 /* In case of error, go on and try normal write - Only stop if tiny
2285 * write completed I/O.
2287 if (iov_iter_count(from) == 0)
2288 GOTO(out, rc_normal = rc_tiny);
2290 env = cl_env_get(&refcheck);
2292 RETURN(PTR_ERR(env));
2294 args = ll_env_args(env);
2295 args->u.normal.via_iter = from;
2296 args->u.normal.via_iocb = iocb;
2298 rc_normal = ll_file_io_generic(env, args, file, CIT_WRITE,
2299 &iocb->ki_pos, iov_iter_count(from));
2301 /* On success, combine bytes written. */
2302 if (rc_tiny >= 0 && rc_normal > 0)
2303 rc_normal += rc_tiny;
2304 /* On error, only return error from normal write if tiny write did not
2305 * write any bytes. Otherwise return bytes written by tiny write.
2307 else if (rc_tiny > 0)
2308 rc_normal = rc_tiny;
2310 cl_env_put(env, &refcheck);
2312 if (rc_normal > 0) {
2313 ll_rw_stats_tally(ll_i2sbi(file_inode(file)), current->pid,
2314 file->private_data, iocb->ki_pos,
2316 ll_stats_ops_tally(ll_i2sbi(file_inode(file)), LPROC_LL_WRITE,
2317 ktime_us_delta(ktime_get(), kstart));
2321 "COMPLETED: file %s:"DFID", ppos: %lld, count: %zu\n",
2322 file_dentry(file)->d_name.name,
2323 PFID(ll_inode2fid(file_inode(file))), iocb->ki_pos,
2324 iov_iter_count(from));
2329 #ifndef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
2331 * XXX: exact copy from kernel code (__generic_file_aio_write_nolock)
2333 static int ll_file_get_iov_count(const struct iovec *iov,
2334 unsigned long *nr_segs, size_t *count,
2340 for (seg = 0; seg < *nr_segs; seg++) {
2341 const struct iovec *iv = &iov[seg];
2344 * If any segment has a negative length, or the cumulative
2345 * length ever wraps negative then return -EINVAL.
2348 if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
2350 if (access_ok(access_flags, iv->iov_base, iv->iov_len))
2355 cnt -= iv->iov_len; /* This segment is no good */
2362 static ssize_t ll_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
2363 unsigned long nr_segs, loff_t pos)
2370 result = ll_file_get_iov_count(iov, &nr_segs, &iov_count, VERIFY_READ);
2377 # ifdef HAVE_IOV_ITER_INIT_DIRECTION
2378 iov_iter_init(&to, READ, iov, nr_segs, iov_count);
2379 # else /* !HAVE_IOV_ITER_INIT_DIRECTION */
2380 iov_iter_init(&to, iov, nr_segs, iov_count, 0);
2381 # endif /* HAVE_IOV_ITER_INIT_DIRECTION */
2383 result = ll_file_read_iter(iocb, &to);
2388 static ssize_t ll_file_read(struct file *file, char __user *buf, size_t count,
2391 struct iovec iov = { .iov_base = buf, .iov_len = count };
2400 init_sync_kiocb(&kiocb, file);
2401 kiocb.ki_pos = *ppos;
2402 #ifdef HAVE_KIOCB_KI_LEFT
2403 kiocb.ki_left = count;
2404 #elif defined(HAVE_KI_NBYTES)
2405 kiocb.i_nbytes = count;
2408 result = ll_file_aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
2409 *ppos = kiocb.ki_pos;
2415 * Write to a file (through the page cache).
2418 static ssize_t ll_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2419 unsigned long nr_segs, loff_t pos)
2421 struct iov_iter from;
2426 result = ll_file_get_iov_count(iov, &nr_segs, &iov_count, VERIFY_WRITE);
2433 # ifdef HAVE_IOV_ITER_INIT_DIRECTION
2434 iov_iter_init(&from, WRITE, iov, nr_segs, iov_count);
2435 # else /* !HAVE_IOV_ITER_INIT_DIRECTION */
2436 iov_iter_init(&from, iov, nr_segs, iov_count, 0);
2437 # endif /* HAVE_IOV_ITER_INIT_DIRECTION */
2439 result = ll_file_write_iter(iocb, &from);
2444 static ssize_t ll_file_write(struct file *file, const char __user *buf,
2445 size_t count, loff_t *ppos)
2447 struct iovec iov = { .iov_base = (void __user *)buf,
2457 init_sync_kiocb(&kiocb, file);
2458 kiocb.ki_pos = *ppos;
2459 #ifdef HAVE_KIOCB_KI_LEFT
2460 kiocb.ki_left = count;
2461 #elif defined(HAVE_KI_NBYTES)
2462 kiocb.ki_nbytes = count;
2465 result = ll_file_aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
2466 *ppos = kiocb.ki_pos;
2470 #endif /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
2472 int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
2473 __u64 flags, struct lov_user_md *lum, int lum_size)
2475 struct lookup_intent oit = {
2477 .it_flags = flags | MDS_OPEN_BY_FID,
2482 if ((__swab32(lum->lmm_magic) & le32_to_cpu(LOV_MAGIC_MASK)) ==
2483 le32_to_cpu(LOV_MAGIC_MAGIC)) {
2484 /* this code will only exist for big-endian systems */
2485 lustre_swab_lov_user_md(lum, 0);
2488 ll_inode_size_lock(inode);
2489 rc = ll_intent_file_open(dentry, lum, lum_size, &oit);
2491 GOTO(out_unlock, rc);
2493 ll_release_openhandle(dentry, &oit);
2496 ll_inode_size_unlock(inode);
2497 ll_intent_release(&oit);
2502 int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
2503 struct lov_mds_md **lmmp, int *lmm_size,
2504 struct ptlrpc_request **request)
2506 struct ll_sb_info *sbi = ll_i2sbi(inode);
2507 struct mdt_body *body;
2508 struct lov_mds_md *lmm = NULL;
2509 struct ptlrpc_request *req = NULL;
2510 struct md_op_data *op_data;
2515 rc = ll_get_default_mdsize(sbi, &lmmsize);
2519 op_data = ll_prep_md_op_data(NULL, inode, NULL, filename,
2520 strlen(filename), lmmsize,
2521 LUSTRE_OPC_ANY, NULL);
2522 if (IS_ERR(op_data))
2523 RETURN(PTR_ERR(op_data));
2525 op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
2526 rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
2527 ll_finish_md_op_data(op_data);
2529 CDEBUG(D_INFO, "md_getattr_name failed on %s: rc %d\n",
2534 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2535 LASSERT(body != NULL); /* checked by mdc_getattr_name */
2537 lmmsize = body->mbo_eadatasize;
2539 if (!(body->mbo_valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
2541 GOTO(out, rc = -ENODATA);
2543 lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize);
2544 LASSERT(lmm != NULL);
2546 if (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1) &&
2547 lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3) &&
2548 lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_COMP_V1) &&
2549 lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_FOREIGN))
2550 GOTO(out, rc = -EPROTO);
2553 * This is coming from the MDS, so is probably in
2554 * little endian. We convert it to host endian before
2555 * passing it to userspace.
2557 if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) {
2558 int stripe_count = 0;
2560 if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) ||
2561 lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
2562 stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
2563 if (le32_to_cpu(lmm->lmm_pattern) &
2564 LOV_PATTERN_F_RELEASED)
2566 lustre_swab_lov_user_md((struct lov_user_md *)lmm, 0);
2568 /* if function called for directory - we should
2569 * avoid swab not existent lsm objects
2571 if (lmm->lmm_magic == LOV_MAGIC_V1 &&
2572 S_ISREG(body->mbo_mode))
2573 lustre_swab_lov_user_md_objects(
2574 ((struct lov_user_md_v1 *)lmm)->lmm_objects,
2576 else if (lmm->lmm_magic == LOV_MAGIC_V3 &&
2577 S_ISREG(body->mbo_mode))
2578 lustre_swab_lov_user_md_objects(
2579 ((struct lov_user_md_v3 *)lmm)->lmm_objects,
2581 } else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_COMP_V1)) {
2582 lustre_swab_lov_comp_md_v1(
2583 (struct lov_comp_md_v1 *)lmm);
2587 if (lmm->lmm_magic == LOV_MAGIC_COMP_V1) {
2588 struct lov_comp_md_v1 *comp_v1 = NULL;
2589 struct lov_comp_md_entry_v1 *ent;
2590 struct lov_user_md_v1 *v1 = NULL;
2594 comp_v1 = (struct lov_comp_md_v1 *)lmm;
2595 /* Dump the striping information */
2596 for (; i < comp_v1->lcm_entry_count; i++) {
2597 ent = &comp_v1->lcm_entries[i];
2598 off = ent->lcme_offset;
2599 v1 = (struct lov_user_md_v1 *)((char *)lmm + off);
2601 "comp[%d]: stripe_count=%u, stripe_size=%u\n",
2602 i, v1->lmm_stripe_count, v1->lmm_stripe_size);
2606 GOTO(out, rc = -EINVAL);
2608 lmm->lmm_stripe_count = v1->lmm_stripe_count;
2609 lmm->lmm_stripe_size = v1->lmm_stripe_size;
2611 * Return valid stripe_count and stripe_size instead of 0 for
2612 * DoM files to avoid divide-by-zero for older userspace that
2613 * calls this ioctl, e.g. lustre ADIO driver.
2615 if (lmm->lmm_stripe_count == 0)
2616 lmm->lmm_stripe_count = 1;
2617 if (lmm->lmm_stripe_size == 0) {
2618 /* Since the first component of the file data is placed
2619 * on the MDT for faster access, the stripe_size of the
2620 * second one is always that applications which are
2623 if (lmm->lmm_pattern & LOV_PATTERN_MDT)
2624 i = comp_v1->lcm_entry_count > 1 ? 1 : 0;
2626 i = comp_v1->lcm_entry_count > 1 ?
2627 comp_v1->lcm_entry_count - 1 : 0;
2628 ent = &comp_v1->lcm_entries[i];
2629 off = ent->lcme_offset;
2630 v1 = (struct lov_user_md_v1 *)((char *)lmm + off);
2631 lmm->lmm_stripe_size = v1->lmm_stripe_size;
2636 *lmm_size = lmmsize;
2641 static int ll_lov_setea(struct inode *inode, struct file *file,
2644 __u64 flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
2645 struct lov_user_md *lump;
2646 int lum_size = sizeof(*lump) + sizeof(struct lov_user_ost_data);
2650 if (!capable(CAP_SYS_ADMIN))
2653 OBD_ALLOC_LARGE(lump, lum_size);
2657 if (copy_from_user(lump, arg, lum_size))
2658 GOTO(out_lump, rc = -EFAULT);
2660 rc = ll_lov_setstripe_ea_info(inode, file_dentry(file), flags, lump,
2662 cl_lov_delay_create_clear(&file->f_flags);
2665 OBD_FREE_LARGE(lump, lum_size);
2669 static int ll_file_getstripe(struct inode *inode, void __user *lum, size_t size)
2676 /* exit before doing any work if pointer is bad */
2677 if (unlikely(!ll_access_ok(lum, sizeof(struct lov_user_md))))
2680 env = cl_env_get(&refcheck);
2682 RETURN(PTR_ERR(env));
2684 rc = cl_object_getstripe(env, ll_i2info(inode)->lli_clob, lum, size);
2685 cl_env_put(env, &refcheck);
2689 static int ll_lov_setstripe(struct inode *inode, struct file *file,
2692 struct lov_user_md __user *lum = arg;
2693 struct lov_user_md *klum;
2695 __u64 flags = FMODE_WRITE;
2698 rc = ll_copy_user_md(lum, &klum);
2703 rc = ll_lov_setstripe_ea_info(inode, file_dentry(file), flags, klum,
2708 rc = put_user(0, &lum->lmm_stripe_count);
2712 rc = ll_layout_refresh(inode, &gen);
2716 rc = ll_file_getstripe(inode, arg, lum_size);
2717 if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode) &&
2718 ll_i2info(inode)->lli_clob) {
2719 struct iattr attr = { 0 };
2721 rc = cl_setattr_ost(ll_i2info(inode)->lli_clob, &attr,
2722 OP_XVALID_FLAGS, LUSTRE_ENCRYPT_FL);
2725 cl_lov_delay_create_clear(&file->f_flags);
2728 OBD_FREE_LARGE(klum, lum_size);
2734 ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
2736 struct ll_inode_info *lli = ll_i2info(inode);
2737 struct cl_object *obj = lli->lli_clob;
2738 struct ll_file_data *fd = file->private_data;
2739 struct ll_grouplock grouplock;
2744 CWARN("group id for group lock must not be 0\n");
2748 if (ll_file_nolock(file))
2749 RETURN(-EOPNOTSUPP);
2751 if (file->f_flags & O_NONBLOCK) {
2752 if (!mutex_trylock(&lli->lli_group_mutex))
2755 mutex_lock(&lli->lli_group_mutex);
2758 if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
2759 CWARN("group lock already existed with gid %lu\n",
2760 fd->fd_grouplock.lg_gid);
2761 GOTO(out, rc = -EINVAL);
2763 if (arg != lli->lli_group_gid && lli->lli_group_users != 0) {
2764 if (file->f_flags & O_NONBLOCK)
2765 GOTO(out, rc = -EAGAIN);
2766 mutex_unlock(&lli->lli_group_mutex);
2767 wait_var_event(&lli->lli_group_users, !lli->lli_group_users);
2768 GOTO(retry, rc = 0);
2770 LASSERT(fd->fd_grouplock.lg_lock == NULL);
2773 * XXX: group lock needs to protect all OST objects while PFL
2774 * can add new OST objects during the IO, so we'd instantiate
2775 * all OST objects before getting its group lock.
2780 struct cl_layout cl = {
2781 .cl_is_composite = false,
2783 struct lu_extent ext = {
2785 .e_end = OBD_OBJECT_EOF,
2788 env = cl_env_get(&refcheck);
2790 GOTO(out, rc = PTR_ERR(env));
2792 rc = cl_object_layout_get(env, obj, &cl);
2793 if (rc >= 0 && cl.cl_is_composite)
2794 rc = ll_layout_write_intent(inode, LAYOUT_INTENT_WRITE,
2797 cl_env_put(env, &refcheck);
2802 rc = cl_get_grouplock(ll_i2info(inode)->lli_clob,
2803 arg, (file->f_flags & O_NONBLOCK), &grouplock);
2808 fd->fd_flags |= LL_FILE_GROUP_LOCKED;
2809 fd->fd_grouplock = grouplock;
2810 if (lli->lli_group_users == 0)
2811 lli->lli_group_gid = grouplock.lg_gid;
2812 lli->lli_group_users++;
2814 CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
2816 mutex_unlock(&lli->lli_group_mutex);
2821 static int ll_put_grouplock(struct inode *inode, struct file *file,
2824 struct ll_inode_info *lli = ll_i2info(inode);
2825 struct ll_file_data *fd = file->private_data;
2826 struct ll_grouplock grouplock;
2830 mutex_lock(&lli->lli_group_mutex);
2831 if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
2832 CWARN("no group lock held\n");
2833 GOTO(out, rc = -EINVAL);
2836 LASSERT(fd->fd_grouplock.lg_lock != NULL);
2838 if (fd->fd_grouplock.lg_gid != arg) {
2839 CWARN("group lock %lu doesn't match current id %lu\n",
2840 arg, fd->fd_grouplock.lg_gid);
2841 GOTO(out, rc = -EINVAL);
2844 grouplock = fd->fd_grouplock;
2845 memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock));
2846 fd->fd_flags &= ~LL_FILE_GROUP_LOCKED;
2848 cl_put_grouplock(&grouplock);
2850 lli->lli_group_users--;
2851 if (lli->lli_group_users == 0) {
2852 lli->lli_group_gid = 0;
2853 wake_up_var(&lli->lli_group_users);
2855 CDEBUG(D_INFO, "group lock %lu released\n", arg);
2858 mutex_unlock(&lli->lli_group_mutex);
2864 * Close inode open handle
2866 * \param dentry [in] dentry which contains the inode
2867 * \param it [in,out] intent which contains open info and result
2870 * \retval <0 failure
2872 int ll_release_openhandle(struct dentry *dentry, struct lookup_intent *it)
2874 struct inode *inode = dentry->d_inode;
2875 struct obd_client_handle *och;
2881 /* Root ? Do nothing. */
2882 if (is_root_inode(inode))
2885 /* No open handle to close? Move away */
2886 if (!it_disposition(it, DISP_OPEN_OPEN))
2889 LASSERT(it_open_error(DISP_OPEN_OPEN, it) == 0);
2891 OBD_ALLOC(och, sizeof(*och));
2893 GOTO(out, rc = -ENOMEM);
2895 rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
2899 rc = ll_close_inode_openhandle(inode, och, 0, NULL);
2901 /* this one is in place of ll_file_open */
2902 if (it_disposition(it, DISP_ENQ_OPEN_REF)) {
2903 ptlrpc_req_finished(it->it_request);
2904 it_clear_disposition(it, DISP_ENQ_OPEN_REF);
2910 * Get size for inode for which FIEMAP mapping is requested.
2911 * Make the FIEMAP get_info call and returns the result.
2912 * \param fiemap kernel buffer to hold extens
2913 * \param num_bytes kernel buffer size
2915 static int ll_do_fiemap(struct inode *inode, struct fiemap *fiemap,
2921 struct ll_fiemap_info_key fmkey = { .lfik_name = KEY_FIEMAP, };
2924 /* Checks for fiemap flags */
2925 if (fiemap->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
2926 fiemap->fm_flags &= ~LUSTRE_FIEMAP_FLAGS_COMPAT;
2930 /* Check for FIEMAP_FLAG_SYNC */
2931 if (fiemap->fm_flags & FIEMAP_FLAG_SYNC) {
2932 rc = filemap_fdatawrite(inode->i_mapping);
2937 env = cl_env_get(&refcheck);
2939 RETURN(PTR_ERR(env));
2941 if (i_size_read(inode) == 0) {
2942 rc = ll_glimpse_size(inode);
2947 fmkey.lfik_oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLPROJID;
2948 obdo_from_inode(&fmkey.lfik_oa, inode, OBD_MD_FLSIZE);
2949 obdo_set_parent_fid(&fmkey.lfik_oa, &ll_i2info(inode)->lli_fid);
2951 /* If filesize is 0, then there would be no objects for mapping */
2952 if (fmkey.lfik_oa.o_size == 0) {
2953 fiemap->fm_mapped_extents = 0;
2957 fmkey.lfik_fiemap = *fiemap;
2959 rc = cl_object_fiemap(env, ll_i2info(inode)->lli_clob,
2960 &fmkey, fiemap, &num_bytes);
2962 cl_env_put(env, &refcheck);
2966 static int fid2path_for_enc_file(struct inode *parent, char *gfpath,
2969 struct dentry *de = NULL, *de_parent = d_find_any_alias(parent);
2970 struct llcrypt_str lltr = LLTR_INIT(NULL, 0);
2971 struct llcrypt_str de_name;
2972 char *p, *ptr = gfpath;
2973 size_t len = 0, len_orig = 0;
2974 int enckey = -1, nameenc = -1;
2978 while ((p = strsep(&gfpath, "/")) != NULL) {
2986 len_orig = strlen(p);
2988 rc = sscanf(p, "["SFID"]", RFID(&fid));
2990 p = strchr(p, ']') + 1;
2996 if (!IS_ENCRYPTED(parent)) {
2997 if (gfpathlen < len + 1) {
3002 memmove(ptr, p, len);
3006 gfpathlen -= len + 1;
3010 /* From here, we know parent is encrypted */
3013 rc = llcrypt_prepare_readdir(parent);
3014 if (rc && rc != -ENOKEY) {
3021 if (llcrypt_has_encryption_key(parent))
3027 llcrypt_policy_has_filename_enc(parent);
3030 /* Even if names are not encrypted, we still need to call
3031 * ll_fname_disk_to_usr in order to decode names as they are
3032 * coming from the wire.
3034 rc = llcrypt_fname_alloc_buffer(parent, NAME_MAX + 1, &lltr);
3042 rc = ll_fname_disk_to_usr(parent, 0, 0, &de_name,
3045 llcrypt_fname_free_buffer(&lltr);
3049 lltr.name[lltr.len] = '\0';
3051 if (lltr.len <= len_orig && gfpathlen >= lltr.len + 1) {
3052 memcpy(ptr, lltr.name, lltr.len);
3057 gfpathlen -= lltr.len + 1;
3061 llcrypt_fname_free_buffer(&lltr);
3063 if (rc == -EOVERFLOW) {
3070 /* We reached the end of the string, which means
3071 * we are dealing with the last component in the path.
3072 * So save a useless lookup and exit.
3078 if (enckey == 0 || nameenc == 0)
3081 ll_inode_lock(parent);
3082 de = lookup_one_len(p, de_parent, len);
3083 ll_inode_unlock(parent);
3084 if (IS_ERR_OR_NULL(de) || !de->d_inode) {
3090 parent = de->d_inode;
3097 if (!IS_ERR_OR_NULL(de))
3102 int __ll_fid2path(struct inode *inode, struct getinfo_fid2path *gfout,
3103 size_t outsize, __u32 pathlen_orig)
3105 struct obd_export *exp = ll_i2mdexp(inode);
3108 /* Append root FID after gfout to let MDT know the root FID so that
3109 * it can lookup the correct path, this is mainly for fileset.
3110 * old server without fileset mount support will ignore this.
3112 *gfout->gf_u.gf_root_fid = *ll_inode2fid(inode);
3114 /* Call mdc_iocontrol */
3115 rc = obd_iocontrol(OBD_IOC_FID2PATH, exp, outsize, gfout, NULL);
3117 if (!rc && gfout->gf_pathlen && gfout->gf_u.gf_path[0] == '/') {
3118 /* by convention, server side (mdt_path_current()) puts
3119 * a leading '/' to tell client that we are dealing with
3122 rc = fid2path_for_enc_file(inode, gfout->gf_u.gf_path,
3124 if (!rc && strlen(gfout->gf_u.gf_path) > pathlen_orig)
3131 int ll_fid2path(struct inode *inode, void __user *arg)
3133 const struct getinfo_fid2path __user *gfin = arg;
3134 __u32 pathlen, pathlen_orig;
3135 struct getinfo_fid2path *gfout;
3141 if (!capable(CAP_DAC_READ_SEARCH) &&
3142 !test_bit(LL_SBI_USER_FID2PATH, ll_i2sbi(inode)->ll_flags))
3145 /* Only need to get the buflen */
3146 if (get_user(pathlen, &gfin->gf_pathlen))
3149 if (pathlen > PATH_MAX)
3151 pathlen_orig = pathlen;
3154 outsize = sizeof(*gfout) + pathlen;
3155 OBD_ALLOC(gfout, outsize);
3159 if (copy_from_user(gfout, arg, sizeof(*gfout)))
3160 GOTO(gf_free, rc = -EFAULT);
3162 gfout->gf_pathlen = pathlen;
3163 rc = __ll_fid2path(inode, gfout, outsize, pathlen_orig);
3167 if (copy_to_user(arg, gfout, sizeof(*gfout) + pathlen_orig))
3171 OBD_FREE(gfout, outsize);
3172 if (rc == -ENAMETOOLONG) {
3173 pathlen += PATH_MAX;
3180 ll_ioc_data_version(struct inode *inode, struct ioc_data_version *ioc)
3182 struct cl_object *obj = ll_i2info(inode)->lli_clob;
3190 ioc->idv_version = 0;
3191 ioc->idv_layout_version = UINT_MAX;
3193 /* If no file object initialized, we consider its version is 0. */
3197 env = cl_env_get(&refcheck);
3199 RETURN(PTR_ERR(env));
3201 io = vvp_env_thread_io(env);
3203 io->u.ci_data_version.dv_data_version = 0;
3204 io->u.ci_data_version.dv_layout_version = UINT_MAX;
3205 io->u.ci_data_version.dv_flags = ioc->idv_flags;
3208 if (cl_io_init(env, io, CIT_DATA_VERSION, io->ci_obj) == 0)
3209 result = cl_io_loop(env, io);
3211 result = io->ci_result;
3213 ioc->idv_version = io->u.ci_data_version.dv_data_version;
3214 ioc->idv_layout_version = io->u.ci_data_version.dv_layout_version;
3215 cl_io_fini(env, io);
3217 if (unlikely(io->ci_need_restart))
3220 cl_env_put(env, &refcheck);
3226 * Read the data_version for inode.
3228 * This value is computed using stripe object version on OST.
3229 * Version is computed using server side locking.
3231 * @param flags if do sync on the OST side;
3233 * LL_DV_RD_FLUSH: flush dirty pages, LCK_PR on OSTs
3234 * LL_DV_WR_FLUSH: drop all caching pages, LCK_PW on OSTs
3236 int ll_data_version(struct inode *inode, __u64 *data_version, int flags)
3238 struct ioc_data_version ioc = { .idv_flags = flags };
3241 rc = ll_ioc_data_version(inode, &ioc);
3243 *data_version = ioc.idv_version;
3249 * Trigger a HSM release request for the provided inode.
3251 int ll_hsm_release(struct inode *inode)
3254 struct obd_client_handle *och = NULL;
3255 __u64 data_version = 0;
3261 CDEBUG(D_INODE, "%s: Releasing file "DFID".\n",
3262 ll_i2sbi(inode)->ll_fsname,
3263 PFID(&ll_i2info(inode)->lli_fid));
3265 och = ll_lease_open(inode, NULL, FMODE_WRITE, MDS_OPEN_RELEASE);
3267 GOTO(out, rc = PTR_ERR(och));
3269 /* Grab latest data_version and [am]time values */
3270 rc = ll_data_version(inode, &data_version,
3271 LL_DV_WR_FLUSH | LL_DV_SZ_UPDATE);
3275 env = cl_env_get(&refcheck);
3277 GOTO(out, rc = PTR_ERR(env));
3279 rc = ll_merge_attr(env, inode);
3280 cl_env_put(env, &refcheck);
3282 /* If error happen, we have the wrong size for a file.
3288 /* Release the file.
3289 * NB: lease lock handle is released in mdc_hsm_release_pack() because
3290 * we still need it to pack l_remote_handle to MDT. */
3291 rc = ll_close_inode_openhandle(inode, och, MDS_HSM_RELEASE,
3297 if (och != NULL && !IS_ERR(och)) /* close the file */
3298 ll_lease_close(och, inode, NULL);
3303 struct ll_swap_stack {
3306 struct inode *inode1;
3307 struct inode *inode2;
3312 static int ll_swap_layouts(struct file *file1, struct file *file2,
3313 struct lustre_swap_layouts *lsl)
3315 struct mdc_swap_layouts msl;
3316 struct md_op_data *op_data;
3319 struct ll_swap_stack *llss = NULL;
3322 OBD_ALLOC_PTR(llss);
3326 llss->inode1 = file_inode(file1);
3327 llss->inode2 = file_inode(file2);
3329 rc = ll_check_swap_layouts_validity(llss->inode1, llss->inode2);
3333 /* we use 2 bool because it is easier to swap than 2 bits */
3334 if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV1)
3335 llss->check_dv1 = true;
3337 if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV2)
3338 llss->check_dv2 = true;
3340 /* we cannot use lsl->sl_dvX directly because we may swap them */
3341 llss->dv1 = lsl->sl_dv1;
3342 llss->dv2 = lsl->sl_dv2;
3344 rc = lu_fid_cmp(ll_inode2fid(llss->inode1), ll_inode2fid(llss->inode2));
3345 if (rc == 0) /* same file, done! */
3348 if (rc < 0) { /* sequentialize it */
3349 swap(llss->inode1, llss->inode2);
3351 swap(llss->dv1, llss->dv2);
3352 swap(llss->check_dv1, llss->check_dv2);
3356 if (gid != 0) { /* application asks to flush dirty cache */
3357 rc = ll_get_grouplock(llss->inode1, file1, gid);
3361 rc = ll_get_grouplock(llss->inode2, file2, gid);
3363 ll_put_grouplock(llss->inode1, file1, gid);
3368 /* ultimate check, before swaping the layouts we check if
3369 * dataversion has changed (if requested) */
3370 if (llss->check_dv1) {
3371 rc = ll_data_version(llss->inode1, &dv, 0);
3374 if (dv != llss->dv1)
3375 GOTO(putgl, rc = -EAGAIN);
3378 if (llss->check_dv2) {
3379 rc = ll_data_version(llss->inode2, &dv, 0);
3382 if (dv != llss->dv2)
3383 GOTO(putgl, rc = -EAGAIN);
3386 /* struct md_op_data is used to send the swap args to the mdt
3387 * only flags is missing, so we use struct mdc_swap_layouts
3388 * through the md_op_data->op_data */
3389 /* flags from user space have to be converted before they are send to
3390 * server, no flag is sent today, they are only used on the client */
3393 op_data = ll_prep_md_op_data(NULL, llss->inode1, llss->inode2, NULL, 0,
3394 0, LUSTRE_OPC_ANY, &msl);
3395 if (IS_ERR(op_data))
3396 GOTO(free, rc = PTR_ERR(op_data));
3398 rc = obd_iocontrol(LL_IOC_LOV_SWAP_LAYOUTS, ll_i2mdexp(llss->inode1),
3399 sizeof(*op_data), op_data, NULL);
3400 ll_finish_md_op_data(op_data);
3407 ll_put_grouplock(llss->inode2, file2, gid);
3408 ll_put_grouplock(llss->inode1, file1, gid);
3418 int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss)
3420 struct obd_export *exp = ll_i2mdexp(inode);
3421 struct md_op_data *op_data;
3425 /* Detect out-of range masks */
3426 if ((hss->hss_setmask | hss->hss_clearmask) & ~HSM_FLAGS_MASK)
3429 /* Non-root users are forbidden to set or clear flags which are
3430 * NOT defined in HSM_USER_MASK. */
3431 if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) &&
3432 !capable(CAP_SYS_ADMIN))
3435 if (!exp_connect_archive_id_array(exp)) {
3436 /* Detect out-of range archive id */
3437 if ((hss->hss_valid & HSS_ARCHIVE_ID) &&
3438 (hss->hss_archive_id > LL_HSM_ORIGIN_MAX_ARCHIVE))
3442 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
3443 LUSTRE_OPC_ANY, hss);
3444 if (IS_ERR(op_data))
3445 RETURN(PTR_ERR(op_data));
3447 rc = obd_iocontrol(LL_IOC_HSM_STATE_SET, exp, sizeof(*op_data),
3450 ll_finish_md_op_data(op_data);
3455 static int ll_hsm_import(struct inode *inode, struct file *file,
3456 struct hsm_user_import *hui)
3458 struct hsm_state_set *hss = NULL;
3459 struct iattr *attr = NULL;
3463 if (!S_ISREG(inode->i_mode))
3469 GOTO(out, rc = -ENOMEM);
3471 hss->hss_valid = HSS_SETMASK | HSS_ARCHIVE_ID;
3472 hss->hss_archive_id = hui->hui_archive_id;
3473 hss->hss_setmask = HS_ARCHIVED | HS_EXISTS | HS_RELEASED;
3474 rc = ll_hsm_state_set(inode, hss);
3478 OBD_ALLOC_PTR(attr);
3480 GOTO(out, rc = -ENOMEM);
3482 attr->ia_mode = hui->hui_mode & (S_IRWXU | S_IRWXG | S_IRWXO);
3483 attr->ia_mode |= S_IFREG;
3484 attr->ia_uid = make_kuid(&init_user_ns, hui->hui_uid);
3485 attr->ia_gid = make_kgid(&init_user_ns, hui->hui_gid);
3486 attr->ia_size = hui->hui_size;
3487 attr->ia_mtime.tv_sec = hui->hui_mtime;
3488 attr->ia_mtime.tv_nsec = hui->hui_mtime_ns;
3489 attr->ia_atime.tv_sec = hui->hui_atime;
3490 attr->ia_atime.tv_nsec = hui->hui_atime_ns;
3492 attr->ia_valid = ATTR_SIZE | ATTR_MODE | ATTR_FORCE |
3493 ATTR_UID | ATTR_GID |
3494 ATTR_MTIME | ATTR_MTIME_SET |
3495 ATTR_ATIME | ATTR_ATIME_SET;
3498 /* inode lock owner set in ll_setattr_raw()*/
3499 rc = ll_setattr_raw(file_dentry(file), attr, 0, true);
3502 inode_unlock(inode);
3514 static inline long ll_lease_type_from_fmode(fmode_t fmode)
3516 return ((fmode & FMODE_READ) ? LL_LEASE_RDLCK : 0) |
3517 ((fmode & FMODE_WRITE) ? LL_LEASE_WRLCK : 0);
3520 static int ll_file_futimes_3(struct file *file, const struct ll_futimes_3 *lfu)
3522 struct inode *inode = file_inode(file);
3524 .ia_valid = ATTR_ATIME | ATTR_ATIME_SET |
3525 ATTR_MTIME | ATTR_MTIME_SET |
3528 .tv_sec = lfu->lfu_atime_sec,
3529 .tv_nsec = lfu->lfu_atime_nsec,
3532 .tv_sec = lfu->lfu_mtime_sec,
3533 .tv_nsec = lfu->lfu_mtime_nsec,
3536 .tv_sec = lfu->lfu_ctime_sec,
3537 .tv_nsec = lfu->lfu_ctime_nsec,
3543 if (!capable(CAP_SYS_ADMIN))
3546 if (!S_ISREG(inode->i_mode))
3550 /* inode lock owner set in ll_setattr_raw()*/
3551 rc = ll_setattr_raw(file_dentry(file), &ia, OP_XVALID_CTIME_SET,
3553 inode_unlock(inode);
3558 static enum cl_lock_mode cl_mode_user_to_kernel(enum lock_mode_user mode)
3561 case MODE_READ_USER:
3563 case MODE_WRITE_USER:
3570 static const char *const user_lockname[] = LOCK_MODE_NAMES;
3572 /* Used to allow the upper layers of the client to request an LDLM lock
3573 * without doing an actual read or write.
3575 * Used for ladvise lockahead to manually request specific locks.
3577 * \param[in] file file this ladvise lock request is on
3578 * \param[in] ladvise ladvise struct describing this lock request
3580 * \retval 0 success, no detailed result available (sync requests
3581 * and requests sent to the server [not handled locally]
3582 * cannot return detailed results)
3583 * \retval LLA_RESULT_{SAME,DIFFERENT} - detailed result of the lock request,
3584 * see definitions for details.
3585 * \retval negative negative errno on error
3587 int ll_file_lock_ahead(struct file *file, struct llapi_lu_ladvise *ladvise)
3589 struct lu_env *env = NULL;
3590 struct cl_io *io = NULL;
3591 struct cl_lock *lock = NULL;
3592 struct cl_lock_descr *descr = NULL;
3593 struct dentry *dentry = file->f_path.dentry;
3594 struct inode *inode = dentry->d_inode;
3595 enum cl_lock_mode cl_mode;
3596 off_t start = ladvise->lla_start;
3597 off_t end = ladvise->lla_end;
3604 "Lock request: file=%pd, inode=%p, mode=%s start=%llu, end=%llu\n",
3605 dentry, dentry->d_inode,
3606 user_lockname[ladvise->lla_lockahead_mode], (__u64) start,
3609 cl_mode = cl_mode_user_to_kernel(ladvise->lla_lockahead_mode);
3611 GOTO(out, result = cl_mode);
3613 /* Get IO environment */
3614 result = cl_io_get(inode, &env, &io, &refcheck);
3618 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
3621 * nothing to do for this io. This currently happens when
3622 * stripe sub-object's are not yet created.
3624 result = io->ci_result;
3625 } else if (result == 0) {
3626 lock = vvp_env_lock(env);
3627 descr = &lock->cll_descr;
3629 descr->cld_obj = io->ci_obj;
3630 /* Convert byte offsets to pages */
3631 descr->cld_start = start >> PAGE_SHIFT;
3632 descr->cld_end = end >> PAGE_SHIFT;
3633 descr->cld_mode = cl_mode;
3634 /* CEF_MUST is used because we do not want to convert a
3635 * lockahead request to a lockless lock */
3636 descr->cld_enq_flags = CEF_MUST | CEF_LOCK_NO_EXPAND;
3638 if (ladvise->lla_peradvice_flags & LF_ASYNC)
3639 descr->cld_enq_flags |= CEF_SPECULATIVE;
3641 result = cl_lock_request(env, io, lock);
3643 /* On success, we need to release the lock */
3645 cl_lock_release(env, lock);
3647 cl_io_fini(env, io);
3648 cl_env_put(env, &refcheck);
3650 /* -ECANCELED indicates a matching lock with a different extent
3651 * was already present, and -EEXIST indicates a matching lock
3652 * on exactly the same extent was already present.
3653 * We convert them to positive values for userspace to make
3654 * recognizing true errors easier.
3655 * Note we can only return these detailed results on async requests,
3656 * as sync requests look the same as i/o requests for locking. */
3657 if (result == -ECANCELED)
3658 result = LLA_RESULT_DIFFERENT;
3659 else if (result == -EEXIST)
3660 result = LLA_RESULT_SAME;
3665 static const char *const ladvise_names[] = LU_LADVISE_NAMES;
3667 static int ll_ladvise_sanity(struct inode *inode,
3668 struct llapi_lu_ladvise *ladvise)
3670 struct ll_sb_info *sbi = ll_i2sbi(inode);
3671 enum lu_ladvise_type advice = ladvise->lla_advice;
3672 /* Note the peradvice flags is a 32 bit field, so per advice flags must
3673 * be in the first 32 bits of enum ladvise_flags */
3674 __u32 flags = ladvise->lla_peradvice_flags;
3675 /* 3 lines at 80 characters per line, should be plenty */
3678 if (advice > LU_LADVISE_MAX || advice == LU_LADVISE_INVALID) {
3681 "%s: advice with value '%d' not recognized, last supported advice is %s (value '%d'): rc = %d\n",
3682 sbi->ll_fsname, advice,
3683 ladvise_names[LU_LADVISE_MAX-1], LU_LADVISE_MAX-1, rc);
3687 /* Per-advice checks */
3689 case LU_LADVISE_LOCKNOEXPAND:
3690 if (flags & ~LF_LOCKNOEXPAND_MASK) {
3692 CDEBUG(D_VFSTRACE, "%s: Invalid flags (%x) for %s: "
3693 "rc = %d\n", sbi->ll_fsname, flags,
3694 ladvise_names[advice], rc);
3698 case LU_LADVISE_LOCKAHEAD:
3699 /* Currently only READ and WRITE modes can be requested */
3700 if (ladvise->lla_lockahead_mode >= MODE_MAX_USER ||
3701 ladvise->lla_lockahead_mode == 0) {
3703 CDEBUG(D_VFSTRACE, "%s: Invalid mode (%d) for %s: "
3704 "rc = %d\n", sbi->ll_fsname,
3705 ladvise->lla_lockahead_mode,
3706 ladvise_names[advice], rc);
3710 case LU_LADVISE_WILLREAD:
3711 case LU_LADVISE_DONTNEED:
3713 /* Note fall through above - These checks apply to all advices
3714 * except LOCKNOEXPAND */
3715 if (flags & ~LF_DEFAULT_MASK) {
3717 CDEBUG(D_VFSTRACE, "%s: Invalid flags (%x) for %s: "
3718 "rc = %d\n", sbi->ll_fsname, flags,
3719 ladvise_names[advice], rc);
3722 if (ladvise->lla_start >= ladvise->lla_end) {
3724 CDEBUG(D_VFSTRACE, "%s: Invalid range (%llu to %llu) "
3725 "for %s: rc = %d\n", sbi->ll_fsname,
3726 ladvise->lla_start, ladvise->lla_end,
3727 ladvise_names[advice], rc);
3739 * Give file access advices
3741 * The ladvise interface is similar to Linux fadvise() system call, except it
3742 * forwards the advices directly from Lustre client to server. The server side
3743 * codes will apply appropriate read-ahead and caching techniques for the
3744 * corresponding files.
3746 * A typical workload for ladvise is e.g. a bunch of different clients are
3747 * doing small random reads of a file, so prefetching pages into OSS cache
3748 * with big linear reads before the random IO is a net benefit. Fetching
3749 * all that data into each client cache with fadvise() may not be, due to
3750 * much more data being sent to the client.
3752 static int ll_ladvise(struct inode *inode, struct file *file, __u64 flags,
3753 struct llapi_lu_ladvise *ladvise)
3757 struct cl_ladvise_io *lio;
3762 env = cl_env_get(&refcheck);
3764 RETURN(PTR_ERR(env));
3766 io = vvp_env_thread_io(env);
3767 io->ci_obj = ll_i2info(inode)->lli_clob;
3769 /* initialize parameters for ladvise */
3770 lio = &io->u.ci_ladvise;
3771 lio->li_start = ladvise->lla_start;
3772 lio->li_end = ladvise->lla_end;
3773 lio->li_fid = ll_inode2fid(inode);
3774 lio->li_advice = ladvise->lla_advice;
3775 lio->li_flags = flags;
3777 if (cl_io_init(env, io, CIT_LADVISE, io->ci_obj) == 0)
3778 rc = cl_io_loop(env, io);
3782 cl_io_fini(env, io);
3783 cl_env_put(env, &refcheck);
3787 static int ll_lock_noexpand(struct file *file, int flags)
3789 struct ll_file_data *fd = file->private_data;
3791 fd->ll_lock_no_expand = !(flags & LF_UNSET);
3796 #ifndef HAVE_FILEATTR_GET
3797 int ll_ioctl_fsgetxattr(struct inode *inode, unsigned int cmd,
3800 struct fsxattr fsxattr;
3802 if (copy_from_user(&fsxattr, uarg, sizeof(fsxattr)))
3805 fsxattr.fsx_xflags = ll_inode_flags_to_xflags(inode->i_flags);
3806 if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags))
3807 fsxattr.fsx_xflags |= FS_XFLAG_PROJINHERIT;
3808 fsxattr.fsx_projid = ll_i2info(inode)->lli_projid;
3809 if (copy_to_user(uarg, &fsxattr, sizeof(fsxattr)))
3816 int ll_ioctl_check_project(struct inode *inode, __u32 xflags,
3820 * Project Quota ID state is only allowed to change from within the init
3821 * namespace. Enforce that restriction only if we are trying to change
3822 * the quota ID state. Everything else is allowed in user namespaces.
3824 if (current_user_ns() == &init_user_ns) {
3826 * Caller is allowed to change the project ID. if it is being
3827 * changed, make sure that the new value is valid.
3829 if (ll_i2info(inode)->lli_projid != projid &&
3830 !projid_valid(make_kprojid(&init_user_ns, projid)))
3836 if (ll_i2info(inode)->lli_projid != projid)
3839 if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags)) {
3840 if (!(xflags & FS_XFLAG_PROJINHERIT))
3843 if (xflags & FS_XFLAG_PROJINHERIT)
3850 int ll_set_project(struct inode *inode, __u32 xflags, __u32 projid)
3852 struct ptlrpc_request *req = NULL;
3853 struct md_op_data *op_data;
3854 struct cl_object *obj;
3855 unsigned int inode_flags;
3858 CDEBUG(D_QUOTA, DFID" xflags=%x projid=%u\n",
3859 PFID(ll_inode2fid(inode)), xflags, projid);
3860 rc = ll_ioctl_check_project(inode, xflags, projid);
3864 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
3865 LUSTRE_OPC_ANY, NULL);
3866 if (IS_ERR(op_data))
3867 RETURN(PTR_ERR(op_data));
3869 inode_flags = ll_xflags_to_inode_flags(xflags);
3870 op_data->op_attr_flags = ll_inode_to_ext_flags(inode_flags);
3871 if (xflags & FS_XFLAG_PROJINHERIT)
3872 op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL;
3874 /* pass projid to md_op_data */
3875 op_data->op_projid = projid;
3877 op_data->op_xvalid |= OP_XVALID_PROJID | OP_XVALID_FLAGS;
3878 rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data, NULL, 0, &req);
3879 ptlrpc_req_finished(req);
3881 GOTO(out_fsxattr, rc);
3882 ll_update_inode_flags(inode, op_data->op_attr_flags);
3884 /* Avoid OST RPC if this is only ioctl setting project inherit flag */
3885 if (xflags == 0 || xflags == FS_XFLAG_PROJINHERIT)
3886 GOTO(out_fsxattr, rc);
3888 obj = ll_i2info(inode)->lli_clob;
3890 struct iattr attr = { 0 };
3892 rc = cl_setattr_ost(obj, &attr, OP_XVALID_FLAGS, xflags);
3896 ll_finish_md_op_data(op_data);
3900 #ifndef HAVE_FILEATTR_GET
3901 int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd,
3904 struct fsxattr fsxattr;
3908 if (copy_from_user(&fsxattr, uarg, sizeof(fsxattr)))
3911 RETURN(ll_set_project(inode, fsxattr.fsx_xflags,
3912 fsxattr.fsx_projid));
3916 int ll_ioctl_project(struct file *file, unsigned int cmd, void __user *uarg)
3918 struct lu_project lu_project;
3919 struct dentry *dentry = file_dentry(file);
3920 struct inode *inode = file_inode(file);
3921 struct dentry *child_dentry = NULL;
3922 int rc = 0, name_len;
3924 if (copy_from_user(&lu_project, uarg, sizeof(lu_project)))
3927 /* apply child dentry if name is valid */
3928 name_len = strnlen(lu_project.project_name, NAME_MAX);
3929 if (name_len > 0 && name_len <= NAME_MAX) {
3930 ll_inode_lock(inode);
3931 child_dentry = lookup_one_len(lu_project.project_name,
3933 ll_inode_unlock(inode);
3934 if (IS_ERR(child_dentry)) {
3935 rc = PTR_ERR(child_dentry);
3938 inode = child_dentry->d_inode;
3943 } else if (name_len > NAME_MAX) {
3948 switch (lu_project.project_type) {
3949 case LU_PROJECT_SET:
3950 rc = ll_set_project(inode, lu_project.project_xflags,
3951 lu_project.project_id);
3953 case LU_PROJECT_GET:
3954 lu_project.project_xflags =
3955 ll_inode_flags_to_xflags(inode->i_flags);
3956 if (test_bit(LLIF_PROJECT_INHERIT,
3957 &ll_i2info(inode)->lli_flags))
3958 lu_project.project_xflags |= FS_XFLAG_PROJINHERIT;
3959 lu_project.project_id = ll_i2info(inode)->lli_projid;
3960 if (copy_to_user(uarg, &lu_project, sizeof(lu_project))) {
3970 if (!IS_ERR_OR_NULL(child_dentry))
3975 static long ll_file_unlock_lease(struct file *file, struct ll_ioc_lease *ioc,
3978 struct inode *inode = file_inode(file);
3979 struct ll_file_data *fd = file->private_data;
3980 struct ll_inode_info *lli = ll_i2info(inode);
3981 struct obd_client_handle *och = NULL;
3982 struct split_param sp;
3983 struct pcc_param param;
3984 bool lease_broken = false;
3986 enum mds_op_bias bias = 0;
3988 struct file *layout_file = NULL;
3990 size_t data_size = 0;
3991 bool attached = false;
3996 mutex_lock(&lli->lli_och_mutex);
3997 if (fd->fd_lease_och != NULL) {
3998 och = fd->fd_lease_och;
3999 fd->fd_lease_och = NULL;
4001 mutex_unlock(&lli->lli_och_mutex);
4006 fmode = och->och_flags;
4008 switch (ioc->lil_flags) {
4009 case LL_LEASE_RESYNC_DONE:
4010 if (ioc->lil_count > IOC_IDS_MAX)
4011 GOTO(out_lease_close, rc = -EINVAL);
4013 data_size = offsetof(typeof(*ioc), lil_ids[ioc->lil_count]);
4014 OBD_ALLOC(data, data_size);
4016 GOTO(out_lease_close, rc = -ENOMEM);
4018 if (copy_from_user(data, uarg, data_size))
4019 GOTO(out_lease_close, rc = -EFAULT);
4021 bias = MDS_CLOSE_RESYNC_DONE;
4023 case LL_LEASE_LAYOUT_MERGE:
4024 if (ioc->lil_count != 1)
4025 GOTO(out_lease_close, rc = -EINVAL);
4027 uarg += sizeof(*ioc);
4028 if (copy_from_user(&fdv, uarg, sizeof(fdv)))
4029 GOTO(out_lease_close, rc = -EFAULT);
4031 layout_file = fget(fdv);
4033 GOTO(out_lease_close, rc = -EBADF);
4035 if ((file->f_flags & O_ACCMODE) == O_RDONLY ||
4036 (layout_file->f_flags & O_ACCMODE) == O_RDONLY)
4037 GOTO(out_lease_close, rc = -EPERM);
4039 data = file_inode(layout_file);
4040 bias = MDS_CLOSE_LAYOUT_MERGE;
4042 case LL_LEASE_LAYOUT_SPLIT: {
4045 if (ioc->lil_count != 2)
4046 GOTO(out_lease_close, rc = -EINVAL);
4048 uarg += sizeof(*ioc);
4049 if (copy_from_user(&fdv, uarg, sizeof(fdv)))
4050 GOTO(out_lease_close, rc = -EFAULT);
4052 uarg += sizeof(fdv);
4053 if (copy_from_user(&mirror_id, uarg, sizeof(mirror_id)))
4054 GOTO(out_lease_close, rc = -EFAULT);
4055 if (mirror_id >= MIRROR_ID_NEG)
4056 GOTO(out_lease_close, rc = -EINVAL);
4058 layout_file = fget(fdv);
4060 GOTO(out_lease_close, rc = -EBADF);
4062 /* if layout_file == file, it means to destroy the mirror */
4063 sp.sp_inode = file_inode(layout_file);
4064 sp.sp_mirror_id = (__u16)mirror_id;
4066 bias = MDS_CLOSE_LAYOUT_SPLIT;
4069 case LL_LEASE_PCC_ATTACH:
4070 if (ioc->lil_count != 1)
4073 if (IS_ENCRYPTED(inode))
4074 RETURN(-EOPNOTSUPP);
4076 uarg += sizeof(*ioc);
4077 if (copy_from_user(¶m.pa_archive_id, uarg, sizeof(__u32)))
4078 GOTO(out_lease_close, rc2 = -EFAULT);
4080 rc2 = pcc_readwrite_attach(file, inode, param.pa_archive_id);
4082 GOTO(out_lease_close, rc2);
4085 /* Grab latest data version */
4086 rc2 = ll_data_version(inode, ¶m.pa_data_version,
4089 GOTO(out_lease_close, rc2);
4092 bias = MDS_PCC_ATTACH;
4095 /* without close intent */
4100 rc = ll_lease_close_intent(och, inode, &lease_broken, bias, data);
4104 rc = ll_lease_och_release(inode, file);
4113 if (ioc->lil_flags == LL_LEASE_RESYNC_DONE && data)
4114 OBD_FREE(data, data_size);
4119 if (ioc->lil_flags == LL_LEASE_PCC_ATTACH) {
4122 rc = pcc_readwrite_attach_fini(file, inode,
4123 param.pa_layout_gen,
4128 ll_layout_refresh(inode, &fd->fd_layout_version);
4131 rc = ll_lease_type_from_fmode(fmode);
4135 static long ll_file_set_lease(struct file *file, struct ll_ioc_lease *ioc,
4138 struct inode *inode = file_inode(file);
4139 struct ll_inode_info *lli = ll_i2info(inode);
4140 struct ll_file_data *fd = file->private_data;
4141 struct obd_client_handle *och = NULL;
4142 __u64 open_flags = 0;
4148 switch (ioc->lil_mode) {
4149 case LL_LEASE_WRLCK:
4150 if (!(file->f_mode & FMODE_WRITE))
4152 fmode = FMODE_WRITE;
4154 case LL_LEASE_RDLCK:
4155 if (!(file->f_mode & FMODE_READ))
4159 case LL_LEASE_UNLCK:
4160 RETURN(ll_file_unlock_lease(file, ioc, uarg));
4165 CDEBUG(D_INODE, "Set lease with mode %u\n", fmode);
4167 /* apply for lease */
4168 if (ioc->lil_flags & LL_LEASE_RESYNC)
4169 open_flags = MDS_OPEN_RESYNC;
4170 och = ll_lease_open(inode, file, fmode, open_flags);
4172 RETURN(PTR_ERR(och));
4174 if (ioc->lil_flags & LL_LEASE_RESYNC) {
4175 rc = ll_lease_file_resync(och, inode, uarg);
4177 ll_lease_close(och, inode, NULL);
4180 rc = ll_layout_refresh(inode, &fd->fd_layout_version);
4182 ll_lease_close(och, inode, NULL);
4188 mutex_lock(&lli->lli_och_mutex);
4189 if (fd->fd_lease_och == NULL) {
4190 fd->fd_lease_och = och;
4193 mutex_unlock(&lli->lli_och_mutex);
4195 /* impossible now that only excl is supported for now */
4196 ll_lease_close(och, inode, &lease_broken);
4202 static void ll_heat_get(struct inode *inode, struct lu_heat *heat)
4204 struct ll_inode_info *lli = ll_i2info(inode);
4205 struct ll_sb_info *sbi = ll_i2sbi(inode);
4206 __u64 now = ktime_get_real_seconds();
4209 spin_lock(&lli->lli_heat_lock);
4210 heat->lh_flags = lli->lli_heat_flags;
4211 for (i = 0; i < heat->lh_count; i++)
4212 heat->lh_heat[i] = obd_heat_get(&lli->lli_heat_instances[i],
4213 now, sbi->ll_heat_decay_weight,
4214 sbi->ll_heat_period_second);
4215 spin_unlock(&lli->lli_heat_lock);
4218 static int ll_heat_set(struct inode *inode, enum lu_heat_flag flags)
4220 struct ll_inode_info *lli = ll_i2info(inode);
4223 spin_lock(&lli->lli_heat_lock);
4224 if (flags & LU_HEAT_FLAG_CLEAR)
4225 obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
4227 if (flags & LU_HEAT_FLAG_OFF)
4228 lli->lli_heat_flags |= LU_HEAT_FLAG_OFF;
4230 lli->lli_heat_flags &= ~LU_HEAT_FLAG_OFF;
4232 spin_unlock(&lli->lli_heat_lock);
4238 ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4240 struct inode *inode = file_inode(file);
4241 struct ll_file_data *fd = file->private_data;
4242 void __user *uarg = (void __user *)arg;
4246 CDEBUG(D_VFSTRACE|D_IOCTL, "VFS Op:inode="DFID"(%pK) cmd=%x arg=%lx\n",
4247 PFID(ll_inode2fid(inode)), inode, cmd, arg);
4248 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
4250 /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
4251 if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
4254 /* can't do a generic karg == NULL check here, since it is too noisy and
4255 * we need to return -ENOTTY for unsupported ioctls instead of -EINVAL.
4258 case LL_IOC_GETFLAGS:
4259 /* Get the current value of the file flags */
4260 return put_user(fd->fd_flags, (int __user *)arg);
4261 case LL_IOC_SETFLAGS:
4262 case LL_IOC_CLRFLAGS:
4263 /* Set or clear specific file flags */
4264 /* XXX This probably needs checks to ensure the flags are
4265 * not abused, and to handle any flag side effects.
4267 if (get_user(flags, (int __user *)arg))
4270 if (cmd == LL_IOC_SETFLAGS) {
4271 if ((flags & LL_FILE_IGNORE_LOCK) &&
4272 !(file->f_flags & O_DIRECT)) {
4274 CERROR("%s: unable to disable locking on non-O_DIRECT file "DFID": rc = %d\n",
4275 current->comm, PFID(ll_inode2fid(inode)),
4280 fd->fd_flags |= flags;
4282 fd->fd_flags &= ~flags;
4285 case LL_IOC_LOV_SETSTRIPE:
4286 case LL_IOC_LOV_SETSTRIPE_NEW:
4287 RETURN(ll_lov_setstripe(inode, file, uarg));
4288 case LL_IOC_LOV_SETEA:
4289 RETURN(ll_lov_setea(inode, file, uarg));
4290 case LL_IOC_LOV_SWAP_LAYOUTS: {
4292 struct lustre_swap_layouts lsl;
4294 if (copy_from_user(&lsl, uarg, sizeof(lsl)))
4297 if ((file->f_flags & O_ACCMODE) == O_RDONLY)
4300 file2 = fget(lsl.sl_fd);
4304 /* O_WRONLY or O_RDWR */
4305 if ((file2->f_flags & O_ACCMODE) == O_RDONLY)
4306 GOTO(out, rc = -EPERM);
4308 if (lsl.sl_flags & SWAP_LAYOUTS_CLOSE) {
4309 struct obd_client_handle *och = NULL;
4310 struct ll_inode_info *lli;
4311 struct inode *inode2;
4313 lli = ll_i2info(inode);
4314 mutex_lock(&lli->lli_och_mutex);
4315 if (fd->fd_lease_och != NULL) {
4316 och = fd->fd_lease_och;
4317 fd->fd_lease_och = NULL;
4319 mutex_unlock(&lli->lli_och_mutex);
4321 GOTO(out, rc = -ENOLCK);
4322 inode2 = file_inode(file2);
4323 rc = ll_swap_layouts_close(och, inode, inode2);
4325 rc = ll_swap_layouts(file, file2, &lsl);
4331 case LL_IOC_LOV_GETSTRIPE:
4332 case LL_IOC_LOV_GETSTRIPE_NEW:
4333 RETURN(ll_file_getstripe(inode, uarg, 0));
4334 case LL_IOC_GROUP_LOCK:
4335 RETURN(ll_get_grouplock(inode, file, arg));
4336 case LL_IOC_GROUP_UNLOCK:
4337 RETURN(ll_put_grouplock(inode, file, arg));
4338 case LL_IOC_DATA_VERSION: {
4339 struct ioc_data_version idv;
4342 if (copy_from_user(&idv, uarg, sizeof(idv)))
4345 idv.idv_flags &= LL_DV_RD_FLUSH | LL_DV_WR_FLUSH;
4346 rc = ll_ioc_data_version(inode, &idv);
4348 if (rc == 0 && copy_to_user(uarg, &idv, sizeof(idv)))
4353 case LL_IOC_HSM_STATE_GET: {
4354 struct md_op_data *op_data;
4355 struct hsm_user_state *hus;
4358 if (!ll_access_ok(uarg, sizeof(*hus)))
4365 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
4366 LUSTRE_OPC_ANY, hus);
4367 if (IS_ERR(op_data)) {
4368 rc = PTR_ERR(op_data);
4370 rc = obd_iocontrol(cmd, ll_i2mdexp(inode),
4371 sizeof(*op_data), op_data, NULL);
4373 if (copy_to_user(uarg, hus, sizeof(*hus)))
4376 ll_finish_md_op_data(op_data);
4381 case LL_IOC_HSM_STATE_SET: {
4382 struct hsm_state_set *hss;
4389 if (copy_from_user(hss, uarg, sizeof(*hss)))
4392 rc = ll_hsm_state_set(inode, hss);
4397 case LL_IOC_HSM_ACTION: {
4398 struct md_op_data *op_data;
4399 struct hsm_current_action *hca;
4403 if (!ll_access_ok(uarg, sizeof(*hca)))
4410 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
4411 LUSTRE_OPC_ANY, hca);
4412 if (IS_ERR(op_data)) {
4414 RETURN(PTR_ERR(op_data));
4417 rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
4420 GOTO(skip_copy, rc);
4422 /* The hsm_current_action retreived from the server could
4423 * contain corrupt information. If it is incorrect data collect
4424 * debug information. We still send the data even if incorrect
4425 * to user land to handle.
4427 action = hsm_user_action2name(hca->hca_action);
4428 if (strcmp(action, "UNKNOWN") == 0 ||
4429 hca->hca_state > HPS_DONE) {
4431 "HSM current state %s action %s, offset = %llu, length %llu\n",
4432 hsm_progress_state2name(hca->hca_state), action,
4433 hca->hca_location.offset, hca->hca_location.length);
4436 if (copy_to_user(uarg, hca, sizeof(*hca)))
4439 ll_finish_md_op_data(op_data);
4443 case LL_IOC_SET_LEASE_OLD: {
4444 struct ll_ioc_lease ioc = { .lil_mode = arg };
4446 RETURN(ll_file_set_lease(file, &ioc, 0));
4448 case LL_IOC_SET_LEASE: {
4449 struct ll_ioc_lease ioc;
4451 if (copy_from_user(&ioc, uarg, sizeof(ioc)))
4454 RETURN(ll_file_set_lease(file, &ioc, uarg));
4456 case LL_IOC_GET_LEASE: {
4457 struct ll_inode_info *lli = ll_i2info(inode);
4458 struct ldlm_lock *lock = NULL;
4461 mutex_lock(&lli->lli_och_mutex);
4462 if (fd->fd_lease_och != NULL) {
4463 struct obd_client_handle *och = fd->fd_lease_och;
4465 lock = ldlm_handle2lock(&och->och_lease_handle);
4467 lock_res_and_lock(lock);
4468 if (!ldlm_is_cancel(lock))
4469 fmode = och->och_flags;
4471 unlock_res_and_lock(lock);
4472 LDLM_LOCK_PUT(lock);
4475 mutex_unlock(&lli->lli_och_mutex);
4477 RETURN(ll_lease_type_from_fmode(fmode));
4479 case LL_IOC_HSM_IMPORT: {
4480 struct hsm_user_import *hui;
4486 if (copy_from_user(hui, uarg, sizeof(*hui)))
4489 rc = ll_hsm_import(inode, file, hui);
4494 case LL_IOC_FUTIMES_3: {
4495 struct ll_futimes_3 lfu;
4497 if (copy_from_user(&lfu, uarg, sizeof(lfu)))
4500 RETURN(ll_file_futimes_3(file, &lfu));
4502 case LL_IOC_LADVISE: {
4503 struct llapi_ladvise_hdr *k_ladvise_hdr;
4504 struct llapi_ladvise_hdr __user *u_ladvise_hdr;
4507 int alloc_size = sizeof(*k_ladvise_hdr);
4510 u_ladvise_hdr = uarg;
4511 OBD_ALLOC_PTR(k_ladvise_hdr);
4512 if (k_ladvise_hdr == NULL)
4515 if (copy_from_user(k_ladvise_hdr, u_ladvise_hdr, alloc_size))
4516 GOTO(out_ladvise, rc = -EFAULT);
4518 if (k_ladvise_hdr->lah_magic != LADVISE_MAGIC ||
4519 k_ladvise_hdr->lah_count < 1)
4520 GOTO(out_ladvise, rc = -EINVAL);
4522 num_advise = k_ladvise_hdr->lah_count;
4523 if (num_advise >= LAH_COUNT_MAX)
4524 GOTO(out_ladvise, rc = -EFBIG);
4526 OBD_FREE_PTR(k_ladvise_hdr);
4527 alloc_size = offsetof(typeof(*k_ladvise_hdr),
4528 lah_advise[num_advise]);
4529 OBD_ALLOC(k_ladvise_hdr, alloc_size);
4530 if (k_ladvise_hdr == NULL)
4534 * TODO: submit multiple advices to one server in a single RPC
4536 if (copy_from_user(k_ladvise_hdr, u_ladvise_hdr, alloc_size))
4537 GOTO(out_ladvise, rc = -EFAULT);
4539 for (i = 0; i < num_advise; i++) {
4540 struct llapi_lu_ladvise *k_ladvise =
4541 &k_ladvise_hdr->lah_advise[i];
4542 struct llapi_lu_ladvise __user *u_ladvise =
4543 &u_ladvise_hdr->lah_advise[i];
4545 rc = ll_ladvise_sanity(inode, k_ladvise);
4547 GOTO(out_ladvise, rc);
4549 switch (k_ladvise->lla_advice) {
4550 case LU_LADVISE_LOCKNOEXPAND:
4551 rc = ll_lock_noexpand(file,
4552 k_ladvise->lla_peradvice_flags);
4553 GOTO(out_ladvise, rc);
4554 case LU_LADVISE_LOCKAHEAD:
4556 rc = ll_file_lock_ahead(file, k_ladvise);
4559 GOTO(out_ladvise, rc);
4562 &u_ladvise->lla_lockahead_result))
4563 GOTO(out_ladvise, rc = -EFAULT);
4566 rc = ll_ladvise(inode, file,
4567 k_ladvise_hdr->lah_flags,
4570 GOTO(out_ladvise, rc);
4577 OBD_FREE(k_ladvise_hdr, alloc_size);
4580 case LL_IOC_FLR_SET_MIRROR: {
4581 /* mirror I/O must be direct to avoid polluting page cache
4583 if (!(file->f_flags & O_DIRECT))
4586 fd->fd_designated_mirror = arg;
4589 case LL_IOC_HEAT_GET: {
4590 struct lu_heat uheat;
4591 struct lu_heat *heat;
4594 if (copy_from_user(&uheat, uarg, sizeof(uheat)))
4597 if (uheat.lh_count > OBD_HEAT_COUNT)
4598 uheat.lh_count = OBD_HEAT_COUNT;
4600 size = offsetof(typeof(uheat), lh_heat[uheat.lh_count]);
4601 OBD_ALLOC(heat, size);
4605 heat->lh_count = uheat.lh_count;
4606 ll_heat_get(inode, heat);
4607 rc = copy_to_user(uarg, heat, size);
4608 OBD_FREE(heat, size);
4609 RETURN(rc ? -EFAULT : 0);
4611 case LL_IOC_HEAT_SET: {
4614 if (copy_from_user(&flags, uarg, sizeof(flags)))
4617 rc = ll_heat_set(inode, flags);
4620 case LL_IOC_PCC_DETACH: {
4621 struct lu_pcc_detach *detach;
4623 OBD_ALLOC_PTR(detach);
4627 if (copy_from_user(detach, uarg, sizeof(*detach)))
4628 GOTO(out_detach_free, rc = -EFAULT);
4630 if (!S_ISREG(inode->i_mode))
4631 GOTO(out_detach_free, rc = -EINVAL);
4633 if (!inode_owner_or_capable(&nop_mnt_idmap, inode))
4634 GOTO(out_detach_free, rc = -EPERM);
4636 rc = pcc_ioctl_detach(inode, detach->pccd_opt);
4638 OBD_FREE_PTR(detach);
4641 case LL_IOC_PCC_STATE: {
4642 struct lu_pcc_state __user *ustate = uarg;
4643 struct lu_pcc_state *state;
4645 OBD_ALLOC_PTR(state);
4649 if (copy_from_user(state, ustate, sizeof(*state)))
4650 GOTO(out_state, rc = -EFAULT);
4652 rc = pcc_ioctl_state(file, inode, state);
4654 GOTO(out_state, rc);
4656 if (copy_to_user(ustate, state, sizeof(*state)))
4657 GOTO(out_state, rc = -EFAULT);
4660 OBD_FREE_PTR(state);
4664 rc = ll_iocontrol(inode, file, cmd, uarg);
4667 RETURN(obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL, uarg));
4671 static loff_t ll_lseek(struct file *file, loff_t offset, int whence)
4673 struct inode *inode = file_inode(file);
4676 struct cl_lseek_io *lsio;
4683 env = cl_env_get(&refcheck);
4685 RETURN(PTR_ERR(env));
4687 io = vvp_env_thread_io(env);
4688 io->ci_obj = ll_i2info(inode)->lli_clob;
4689 ll_io_set_mirror(io, file);
4691 lsio = &io->u.ci_lseek;
4692 lsio->ls_start = offset;
4693 lsio->ls_whence = whence;
4694 lsio->ls_result = -ENXIO;
4697 rc = cl_io_init(env, io, CIT_LSEEK, io->ci_obj);
4699 struct vvp_io *vio = vvp_env_io(env);
4701 vio->vui_fd = file->private_data;
4702 rc = cl_io_loop(env, io);
4706 retval = rc ? : lsio->ls_result;
4707 cl_io_fini(env, io);
4708 } while (unlikely(io->ci_need_restart));
4710 cl_env_put(env, &refcheck);
4712 /* Without the key, SEEK_HOLE return value has to be
4713 * rounded up to next LUSTRE_ENCRYPTION_UNIT_SIZE.
4715 if (llcrypt_require_key(inode) == -ENOKEY && whence == SEEK_HOLE)
4716 retval = round_up(retval, LUSTRE_ENCRYPTION_UNIT_SIZE);
4721 static loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
4723 struct inode *inode = file_inode(file);
4724 loff_t retval = offset, eof = 0;
4725 ktime_t kstart = ktime_get();
4729 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), to=%llu=%#llx(%d)\n",
4730 PFID(ll_inode2fid(inode)), inode, retval, retval,
4733 if (origin == SEEK_END) {
4734 retval = ll_glimpse_size(inode);
4737 eof = i_size_read(inode);
4740 if (origin == SEEK_HOLE || origin == SEEK_DATA) {
4744 /* flush local cache first if any */
4745 cl_sync_file_range(inode, offset, OBD_OBJECT_EOF,
4748 retval = ll_lseek(file, offset, origin);
4751 retval = vfs_setpos(file, retval, ll_file_maxbytes(inode));
4753 retval = generic_file_llseek_size(file, offset, origin,
4754 ll_file_maxbytes(inode), eof);
4757 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK,
4758 ktime_us_delta(ktime_get(), kstart));
4762 static int ll_flush(struct file *file, fl_owner_t id)
4764 struct inode *inode = file_inode(file);
4765 struct ll_inode_info *lli = ll_i2info(inode);
4766 struct ll_file_data *fd = file->private_data;
4769 LASSERT(!S_ISDIR(inode->i_mode));
4771 /* catch async errors that were recorded back when async writeback
4772 * failed for pages in this mapping. */
4773 rc = lli->lli_async_rc;
4774 lli->lli_async_rc = 0;
4775 if (lli->lli_clob != NULL) {
4776 err = lov_read_and_clear_async_rc(lli->lli_clob);
4781 /* The application has been told write failure already.
4782 * Do not report failure again. */
4783 if (fd->fd_write_failed)
4785 return rc ? -EIO : 0;
4789 * Called to make sure a portion of file has been written out.
4790 * if @mode is not CL_FSYNC_LOCAL, it will send OST_SYNC RPCs to OST.
4792 * Return how many pages have been written.
4794 int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
4795 enum cl_fsync_mode mode, int ignore_layout)
4799 struct cl_fsync_io *fio;
4804 if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL &&
4805 mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL)
4808 env = cl_env_get(&refcheck);
4810 RETURN(PTR_ERR(env));
4812 io = vvp_env_thread_io(env);
4813 io->ci_obj = ll_i2info(inode)->lli_clob;
4814 io->ci_ignore_layout = ignore_layout;
4816 /* initialize parameters for sync */
4817 fio = &io->u.ci_fsync;
4818 fio->fi_start = start;
4820 fio->fi_fid = ll_inode2fid(inode);
4821 fio->fi_mode = mode;
4822 fio->fi_nr_written = 0;
4824 if (cl_io_init(env, io, CIT_FSYNC, io->ci_obj) == 0)
4825 result = cl_io_loop(env, io);
4827 result = io->ci_result;
4829 result = fio->fi_nr_written;
4830 cl_io_fini(env, io);
4831 cl_env_put(env, &refcheck);
4837 * When dentry is provided (the 'else' case), file_dentry() may be
4838 * null and dentry must be used directly rather than pulled from
4839 * file_dentry() as is done otherwise.
4842 int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
4844 struct dentry *dentry = file_dentry(file);
4845 struct inode *inode = dentry->d_inode;
4846 struct ll_inode_info *lli = ll_i2info(inode);
4847 struct ptlrpc_request *req;
4848 ktime_t kstart = ktime_get();
4854 "VFS Op:inode="DFID"(%p), start %lld, end %lld, datasync %d\n",
4855 PFID(ll_inode2fid(inode)), inode, start, end, datasync);
4857 /* fsync's caller has already called _fdata{sync,write}, we want
4858 * that IO to finish before calling the osc and mdc sync methods */
4859 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
4861 /* catch async errors that were recorded back when async writeback
4862 * failed for pages in this mapping. */
4863 if (!S_ISDIR(inode->i_mode)) {
4864 err = lli->lli_async_rc;
4865 lli->lli_async_rc = 0;
4868 if (lli->lli_clob != NULL) {
4869 err = lov_read_and_clear_async_rc(lli->lli_clob);
4875 if (S_ISREG(inode->i_mode) && !lli->lli_synced_to_mds) {
4877 * only the first sync on MDS makes sense,
4878 * everything else is stored on OSTs
4880 err = md_fsync(ll_i2sbi(inode)->ll_md_exp,
4881 ll_inode2fid(inode), &req);
4885 lli->lli_synced_to_mds = true;
4886 ptlrpc_req_finished(req);
4890 if (S_ISREG(inode->i_mode)) {
4891 struct ll_file_data *fd = file->private_data;
4894 /* Sync metadata on MDT first, and then sync the cached data
4897 err = pcc_fsync(file, start, end, datasync, &cached);
4899 err = cl_sync_file_range(inode, start, end,
4901 if (rc == 0 && err < 0)
4904 fd->fd_write_failed = true;
4906 fd->fd_write_failed = false;
4910 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC,
4911 ktime_us_delta(ktime_get(), kstart));
4916 ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
4918 struct inode *inode = file_inode(file);
4919 struct ll_sb_info *sbi = ll_i2sbi(inode);
4920 struct ldlm_enqueue_info einfo = {
4921 .ei_type = LDLM_FLOCK,
4922 .ei_cb_cp = ldlm_flock_completion_ast,
4923 .ei_cbdata = file_lock,
4925 struct md_op_data *op_data;
4926 struct lustre_handle lockh = { 0 };
4927 union ldlm_policy_data flock = { { 0 } };
4928 int fl_type = file_lock->fl_type;
4929 ktime_t kstart = ktime_get();
4935 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID" file_lock=%p\n",
4936 PFID(ll_inode2fid(inode)), file_lock);
4938 if (file_lock->fl_flags & FL_FLOCK) {
4939 LASSERT((cmd == F_SETLKW) || (cmd == F_SETLK));
4940 /* flocks are whole-file locks */
4941 flock.l_flock.end = OFFSET_MAX;
4942 /* For flocks owner is determined by the local file desctiptor*/
4943 flock.l_flock.owner = (unsigned long)file_lock->fl_file;
4944 } else if (file_lock->fl_flags & FL_POSIX) {
4945 flock.l_flock.owner = (unsigned long)file_lock->fl_owner;
4946 flock.l_flock.start = file_lock->fl_start;
4947 flock.l_flock.end = file_lock->fl_end;
4951 flock.l_flock.pid = file_lock->fl_pid;
4953 #if defined(HAVE_LM_COMPARE_OWNER) || defined(lm_compare_owner)
4954 /* Somewhat ugly workaround for svc lockd.
4955 * lockd installs custom fl_lmops->lm_compare_owner that checks
4956 * for the fl_owner to be the same (which it always is on local node
4957 * I guess between lockd processes) and then compares pid.
4958 * As such we assign pid to the owner field to make it all work,
4959 * conflict with normal locks is unlikely since pid space and
4960 * pointer space for current->files are not intersecting */
4961 if (file_lock->fl_lmops && file_lock->fl_lmops->lm_compare_owner)
4962 flock.l_flock.owner = (unsigned long)file_lock->fl_pid;
4967 einfo.ei_mode = LCK_PR;
4970 /* An unlock request may or may not have any relation to
4971 * existing locks so we may not be able to pass a lock handle
4972 * via a normal ldlm_lock_cancel() request. The request may even
4973 * unlock a byte range in the middle of an existing lock. In
4974 * order to process an unlock request we need all of the same
4975 * information that is given with a normal read or write record
4976 * lock request. To avoid creating another ldlm unlock (cancel)
4977 * message we'll treat a LCK_NL flock request as an unlock. */
4978 einfo.ei_mode = LCK_NL;
4981 einfo.ei_mode = LCK_PW;
4985 CERROR("%s: fcntl from '%s' unknown lock type=%d: rc = %d\n",
4986 sbi->ll_fsname, current->comm, fl_type, rc);
5001 flags = LDLM_FL_BLOCK_NOWAIT;
5007 flags = LDLM_FL_TEST_LOCK;
5011 CERROR("%s: fcntl from '%s' unknown lock command=%d: rc = %d\n",
5012 sbi->ll_fsname, current->comm, cmd, rc);
5016 /* Save the old mode so that if the mode in the lock changes we
5017 * can decrement the appropriate reader or writer refcount. */
5018 file_lock->fl_type = einfo.ei_mode;
5020 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
5021 LUSTRE_OPC_ANY, NULL);
5022 if (IS_ERR(op_data))
5023 RETURN(PTR_ERR(op_data));
5025 CDEBUG(D_DLMTRACE, "inode="DFID", pid=%u, flags=%#llx, mode=%u, "
5026 "start=%llu, end=%llu\n", PFID(ll_inode2fid(inode)),
5027 flock.l_flock.pid, flags, einfo.ei_mode,
5028 flock.l_flock.start, flock.l_flock.end);
5030 rc = md_enqueue(sbi->ll_md_exp, &einfo, &flock, op_data, &lockh,
5033 /* Restore the file lock type if not TEST lock. */
5034 if (!(flags & LDLM_FL_TEST_LOCK))
5035 file_lock->fl_type = fl_type;
5037 #ifdef HAVE_LOCKS_LOCK_FILE_WAIT
5038 if ((rc == 0 || file_lock->fl_type == F_UNLCK) &&
5039 !(flags & LDLM_FL_TEST_LOCK))
5040 rc2 = locks_lock_file_wait(file, file_lock);
5042 if ((file_lock->fl_flags & FL_FLOCK) &&
5043 (rc == 0 || file_lock->fl_type == F_UNLCK))
5044 rc2 = flock_lock_file_wait(file, file_lock);
5045 if ((file_lock->fl_flags & FL_POSIX) &&
5046 (rc == 0 || file_lock->fl_type == F_UNLCK) &&
5047 !(flags & LDLM_FL_TEST_LOCK))
5048 rc2 = posix_lock_file_wait(file, file_lock);
5049 #endif /* HAVE_LOCKS_LOCK_FILE_WAIT */
5051 if (rc2 && file_lock->fl_type != F_UNLCK) {
5052 einfo.ei_mode = LCK_NL;
5053 md_enqueue(sbi->ll_md_exp, &einfo, &flock, op_data,
5058 ll_finish_md_op_data(op_data);
5061 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK,
5062 ktime_us_delta(ktime_get(), kstart));
5066 int ll_get_fid_by_name(struct inode *parent, const char *name,
5067 int namelen, struct lu_fid *fid,
5068 struct inode **inode)
5070 struct md_op_data *op_data = NULL;
5071 struct mdt_body *body;
5072 struct ptlrpc_request *req;
5076 op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen, 0,
5077 LUSTRE_OPC_ANY, NULL);
5078 if (IS_ERR(op_data))
5079 RETURN(PTR_ERR(op_data));
5081 op_data->op_valid = OBD_MD_FLID | OBD_MD_FLTYPE;
5082 rc = md_getattr_name(ll_i2sbi(parent)->ll_md_exp, op_data, &req);
5083 ll_finish_md_op_data(op_data);
5087 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
5089 GOTO(out_req, rc = -EFAULT);
5091 *fid = body->mbo_fid1;
5094 rc = ll_prep_inode(inode, &req->rq_pill, parent->i_sb, NULL);
5096 ptlrpc_req_finished(req);
5100 int ll_migrate(struct inode *parent, struct file *file, struct lmv_user_md *lum,
5101 const char *name, __u32 flags)
5103 struct dentry *dchild = NULL;
5104 struct inode *child_inode = NULL;
5105 struct md_op_data *op_data;
5106 struct ptlrpc_request *request = NULL;
5107 struct obd_client_handle *och = NULL;
5109 struct mdt_body *body;
5110 __u64 data_version = 0;
5111 size_t namelen = strlen(name);
5112 int lumlen = lmv_user_md_size(lum->lum_stripe_count, lum->lum_magic);
5116 CDEBUG(D_VFSTRACE, "migrate "DFID"/%s to MDT%04x stripe count %d\n",
5117 PFID(ll_inode2fid(parent)), name,
5118 lum->lum_stripe_offset, lum->lum_stripe_count);
5120 if (lum->lum_magic != cpu_to_le32(LMV_USER_MAGIC) &&
5121 lum->lum_magic != cpu_to_le32(LMV_USER_MAGIC_SPECIFIC))
5122 lustre_swab_lmv_user_md(lum);
5124 /* Get child FID first */
5125 qstr.hash = ll_full_name_hash(file_dentry(file), name, namelen);
5128 dchild = d_lookup(file_dentry(file), &qstr);
5130 if (dchild->d_inode)
5131 child_inode = igrab(dchild->d_inode);
5136 rc = ll_get_fid_by_name(parent, name, namelen, NULL,
5145 if (!(exp_connect_flags2(ll_i2sbi(parent)->ll_md_exp) &
5146 OBD_CONNECT2_DIR_MIGRATE)) {
5147 if (le32_to_cpu(lum->lum_stripe_count) > 1 ||
5148 ll_dir_striped(child_inode)) {
5149 CERROR("%s: MDT doesn't support stripe directory "
5150 "migration!\n", ll_i2sbi(parent)->ll_fsname);
5151 GOTO(out_iput, rc = -EOPNOTSUPP);
5156 * lfs migrate command needs to be blocked on the client
5157 * by checking the migrate FID against the FID of the
5160 if (is_root_inode(child_inode))
5161 GOTO(out_iput, rc = -EINVAL);
5163 op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen,
5164 child_inode->i_mode, LUSTRE_OPC_ANY, NULL);
5165 if (IS_ERR(op_data))
5166 GOTO(out_iput, rc = PTR_ERR(op_data));
5168 ll_inode_lock(child_inode);
5169 op_data->op_fid3 = *ll_inode2fid(child_inode);
5170 if (!fid_is_sane(&op_data->op_fid3)) {
5171 CERROR("%s: migrate %s, but FID "DFID" is insane\n",
5172 ll_i2sbi(parent)->ll_fsname, name,
5173 PFID(&op_data->op_fid3));
5174 GOTO(out_unlock, rc = -EINVAL);
5177 op_data->op_cli_flags |= CLI_MIGRATE | CLI_SET_MEA;
5178 op_data->op_data = lum;
5179 op_data->op_data_size = lumlen;
5181 /* migrate dirent only for subdirs if MDS_MIGRATE_NSONLY set */
5182 if (S_ISDIR(child_inode->i_mode) && (flags & MDS_MIGRATE_NSONLY) &&
5183 lmv_dir_layout_changing(op_data->op_lso1))
5184 op_data->op_bias |= MDS_MIGRATE_NSONLY;
5187 if (S_ISREG(child_inode->i_mode)) {
5188 och = ll_lease_open(child_inode, NULL, FMODE_WRITE, 0);
5192 GOTO(out_unlock, rc);
5195 rc = ll_data_version(child_inode, &data_version,
5198 GOTO(out_close, rc);
5200 op_data->op_open_handle = och->och_open_handle;
5201 op_data->op_data_version = data_version;
5202 op_data->op_lease_handle = och->och_lease_handle;
5203 op_data->op_bias |= MDS_CLOSE_MIGRATE;
5205 spin_lock(&och->och_mod->mod_open_req->rq_lock);
5206 och->och_mod->mod_open_req->rq_replay = 0;
5207 spin_unlock(&och->och_mod->mod_open_req->rq_lock);
5210 rc = md_rename(ll_i2sbi(parent)->ll_md_exp, op_data,
5211 op_data->op_name, op_data->op_namelen,
5212 op_data->op_name, op_data->op_namelen, &request);
5214 LASSERT(request != NULL);
5215 ll_update_times(request, parent);
5218 if (rc == 0 || rc == -EAGAIN) {
5219 body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
5220 LASSERT(body != NULL);
5222 /* If the server does release layout lock, then we cleanup
5223 * the client och here, otherwise release it in out_close: */
5224 if (och && body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED) {
5225 obd_mod_put(och->och_mod);
5226 md_clear_open_replay_data(ll_i2sbi(parent)->ll_md_exp,
5228 och->och_open_handle.cookie = DEAD_HANDLE_MAGIC;
5234 if (request != NULL) {
5235 ptlrpc_req_finished(request);
5239 /* Try again if the lease has cancelled. */
5240 if (rc == -EAGAIN && S_ISREG(child_inode->i_mode))
5245 ll_lease_close(och, child_inode, NULL);
5247 clear_nlink(child_inode);
5249 ll_inode_unlock(child_inode);
5250 ll_finish_md_op_data(op_data);
5257 ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock)
5259 struct ll_file_data *fd = file->private_data;
5263 * In order to avoid flood of warning messages, only print one message
5264 * for one file. And the entire message rate on the client is limited
5265 * by CDEBUG_LIMIT too.
5267 if (!(fd->fd_flags & LL_FILE_FLOCK_WARNING)) {
5268 fd->fd_flags |= LL_FILE_FLOCK_WARNING;
5269 CDEBUG_LIMIT(D_CONSOLE,
5270 "flock disabled, mount with '-o [local]flock' to enable\r\n");
5276 * test if some locks matching bits and l_req_mode are acquired
5277 * - bits can be in different locks
5278 * - if found clear the common lock bits in *bits
5279 * - the bits not found, are kept in *bits
5281 * \param bits [IN] searched lock bits [IN]
5282 * \param l_req_mode [IN] searched lock mode
5283 * \retval boolean, true iff all bits are found
5285 int ll_have_md_lock(struct obd_export *exp, struct inode *inode, __u64 *bits,
5286 enum ldlm_mode l_req_mode)
5288 struct lustre_handle lockh;
5289 union ldlm_policy_data policy;
5290 enum ldlm_mode mode = (l_req_mode == LCK_MINMODE) ?
5291 (LCK_CR | LCK_CW | LCK_PR | LCK_PW) : l_req_mode;
5300 fid = &ll_i2info(inode)->lli_fid;
5301 CDEBUG(D_INFO, "trying to match res "DFID" mode %s\n", PFID(fid),
5302 ldlm_lockname[mode]);
5304 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING | LDLM_FL_TEST_LOCK;
5305 for (i = 0; i < MDS_INODELOCK_NUMBITS && *bits != 0; i++) {
5306 policy.l_inodebits.bits = *bits & BIT(i);
5307 if (policy.l_inodebits.bits == 0)
5310 if (md_lock_match(exp, flags, fid, LDLM_IBITS, &policy, mode,
5312 struct ldlm_lock *lock;
5314 lock = ldlm_handle2lock(&lockh);
5317 ~(lock->l_policy_data.l_inodebits.bits);
5318 LDLM_LOCK_PUT(lock);
5320 *bits &= ~policy.l_inodebits.bits;
5327 enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
5328 struct lustre_handle *lockh, __u64 flags,
5329 enum ldlm_mode mode)
5331 union ldlm_policy_data policy = { .l_inodebits = { bits } };
5336 fid = &ll_i2info(inode)->lli_fid;
5337 CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
5339 rc = md_lock_match(ll_i2mdexp(inode), LDLM_FL_BLOCK_GRANTED|flags,
5340 fid, LDLM_IBITS, &policy, mode, lockh);
5345 static int ll_inode_revalidate_fini(struct inode *inode, int rc)
5347 /* Already unlinked. Just update nlink and return success */
5348 if (rc == -ENOENT) {
5350 /* If it is striped directory, and there is bad stripe
5351 * Let's revalidate the dentry again, instead of returning
5353 if (ll_dir_striped(inode))
5356 /* This path cannot be hit for regular files unless in
5357 * case of obscure races, so no need to to validate
5359 if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
5361 } else if (rc != 0) {
5362 CDEBUG_LIMIT((rc == -EACCES || rc == -EIDRM) ? D_INFO : D_ERROR,
5363 "%s: revalidate FID "DFID" error: rc = %d\n",
5364 ll_i2sbi(inode)->ll_fsname,
5365 PFID(ll_inode2fid(inode)), rc);
5371 static int ll_inode_revalidate(struct dentry *dentry, enum ldlm_intent_flags op)
5373 struct inode *parent;
5374 struct inode *inode = dentry->d_inode;
5375 struct obd_export *exp = ll_i2mdexp(inode);
5376 struct lookup_intent oit = {
5379 struct ptlrpc_request *req = NULL;
5380 struct md_op_data *op_data;
5381 const char *name = NULL;
5386 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p),name=%s\n",
5387 PFID(ll_inode2fid(inode)), inode, dentry->d_name.name);
5389 if (exp_connect_flags2(exp) & OBD_CONNECT2_GETATTR_PFID) {
5390 parent = dentry->d_parent->d_inode;
5391 name = dentry->d_name.name;
5392 namelen = dentry->d_name.len;
5397 op_data = ll_prep_md_op_data(NULL, parent, inode, name, namelen, 0,
5398 LUSTRE_OPC_ANY, NULL);
5399 if (IS_ERR(op_data))
5400 RETURN(PTR_ERR(op_data));
5402 /* Call getattr by fid */
5403 if (exp_connect_flags2(exp) & OBD_CONNECT2_GETATTR_PFID)
5404 op_data->op_flags = MF_GETATTR_BY_FID;
5405 rc = md_intent_lock(exp, op_data, &oit, &req, &ll_md_blocking_ast, 0);
5406 ll_finish_md_op_data(op_data);
5408 rc = ll_inode_revalidate_fini(inode, rc);
5412 rc = ll_revalidate_it_finish(req, &oit, dentry);
5414 ll_intent_release(&oit);
5418 /* Unlinked? Unhash dentry, so it is not picked up later by
5419 * do_lookup() -> ll_revalidate_it(). We cannot use d_drop
5420 * here to preserve get_cwd functionality on 2.6.
5422 if (!dentry->d_inode->i_nlink)
5423 d_lustre_invalidate(dentry);
5425 ll_lookup_finish_locks(&oit, dentry);
5427 ptlrpc_req_finished(req);
5432 static int ll_merge_md_attr(struct inode *inode)
5434 struct ll_inode_info *lli = ll_i2info(inode);
5435 struct lmv_stripe_object *lsm_obj;
5436 struct cl_attr attr = { 0 };
5439 if (!ll_dir_striped(inode))
5442 down_read(&lli->lli_lsm_sem);
5443 LASSERT(lli->lli_lsm_obj != NULL);
5445 lsm_obj = lmv_stripe_object_get(lli->lli_lsm_obj);
5446 up_read(&lli->lli_lsm_sem);
5448 rc = md_merge_attr(ll_i2mdexp(inode), lsm_obj,
5449 &attr, ll_md_blocking_ast);
5450 lmv_stripe_object_put(&lsm_obj);
5454 spin_lock(&inode->i_lock);
5455 set_nlink(inode, attr.cat_nlink);
5456 spin_unlock(&inode->i_lock);
5458 inode->i_blocks = attr.cat_blocks;
5459 i_size_write(inode, attr.cat_size);
5461 ll_i2info(inode)->lli_atime = attr.cat_atime;
5462 ll_i2info(inode)->lli_mtime = attr.cat_mtime;
5463 ll_i2info(inode)->lli_ctime = attr.cat_ctime;
5468 int ll_getattr_dentry(struct dentry *de, struct kstat *stat, u32 request_mask,
5469 unsigned int flags, bool foreign)
5471 struct inode *inode = de->d_inode;
5472 struct ll_sb_info *sbi = ll_i2sbi(inode);
5473 struct ll_inode_info *lli = ll_i2info(inode);
5474 struct inode *dir = de->d_parent->d_inode;
5475 bool need_glimpse = true;
5476 ktime_t kstart = ktime_get();
5479 /* The OST object(s) determine the file size, blocks and mtime. */
5480 if (!(request_mask & STATX_SIZE || request_mask & STATX_BLOCKS ||
5481 request_mask & STATX_MTIME))
5482 need_glimpse = false;
5484 if (dentry_may_statahead(dir, de))
5485 ll_start_statahead(dir, de, need_glimpse &&
5486 !(flags & AT_STATX_DONT_SYNC));
5488 if (flags & AT_STATX_DONT_SYNC)
5489 GOTO(fill_attr, rc = 0);
5491 rc = ll_inode_revalidate(de, IT_GETATTR);
5495 /* foreign file/dir are always of zero length, so don't
5496 * need to validate size.
5498 if (S_ISREG(inode->i_mode) && !foreign) {
5502 GOTO(fill_attr, rc);
5504 rc = pcc_inode_getattr(inode, request_mask, flags, &cached);
5505 if (cached && rc < 0)
5509 GOTO(fill_attr, rc);
5512 * If the returned attr is masked with OBD_MD_FLSIZE &
5513 * OBD_MD_FLBLOCKS & OBD_MD_FLMTIME, it means that the file size
5514 * or blocks obtained from MDT is strictly correct, and the file
5515 * is usually not being modified by clients, and the [a|m|c]time
5516 * got from MDT is also strictly correct.
5517 * Under this circumstance, it does not need to send glimpse
5518 * RPCs to OSTs for file attributes such as the size and blocks.
5520 if (lli->lli_attr_valid & OBD_MD_FLSIZE &&
5521 lli->lli_attr_valid & OBD_MD_FLBLOCKS &&
5522 lli->lli_attr_valid & OBD_MD_FLMTIME) {
5523 inode->i_mtime.tv_sec = lli->lli_mtime;
5524 if (lli->lli_attr_valid & OBD_MD_FLATIME)
5525 inode->i_atime.tv_sec = lli->lli_atime;
5526 if (lli->lli_attr_valid & OBD_MD_FLCTIME)
5527 inode->i_ctime.tv_sec = lli->lli_ctime;
5528 GOTO(fill_attr, rc);
5531 /* In case of restore, the MDT has the right size and has
5532 * already send it back without granting the layout lock,
5533 * inode is up-to-date so glimpse is useless.
5534 * Also to glimpse we need the layout, in case of a running
5535 * restore the MDT holds the layout lock so the glimpse will
5536 * block up to the end of restore (getattr will block)
5538 if (!test_bit(LLIF_FILE_RESTORING, &lli->lli_flags)) {
5539 rc = ll_glimpse_size(inode);
5544 /* If object isn't regular a file then don't validate size. */
5545 /* foreign dir is not striped dir */
5547 rc = ll_merge_md_attr(inode);
5552 if (lli->lli_attr_valid & OBD_MD_FLATIME)
5553 inode->i_atime.tv_sec = lli->lli_atime;
5554 if (lli->lli_attr_valid & OBD_MD_FLMTIME)
5555 inode->i_mtime.tv_sec = lli->lli_mtime;
5556 if (lli->lli_attr_valid & OBD_MD_FLCTIME)
5557 inode->i_ctime.tv_sec = lli->lli_ctime;
5561 CFS_FAIL_TIMEOUT(OBD_FAIL_GETATTR_DELAY, 30);
5563 if (ll_need_32bit_api(sbi)) {
5564 stat->ino = cl_fid_build_ino(&lli->lli_fid, 1);
5565 stat->dev = ll_compat_encode_dev(inode->i_sb->s_dev);
5566 stat->rdev = ll_compat_encode_dev(inode->i_rdev);
5568 stat->ino = inode->i_ino;
5569 stat->dev = inode->i_sb->s_dev;
5570 stat->rdev = inode->i_rdev;
5573 /* foreign symlink to be exposed as a real symlink */
5575 stat->mode = inode->i_mode;
5577 stat->mode = (inode->i_mode & ~S_IFMT) | S_IFLNK;
5579 stat->uid = inode->i_uid;
5580 stat->gid = inode->i_gid;
5581 stat->atime = inode->i_atime;
5582 stat->mtime = inode->i_mtime;
5583 stat->ctime = inode->i_ctime;
5584 /* stat->blksize is used to tell about preferred IO size */
5585 if (sbi->ll_stat_blksize)
5586 stat->blksize = sbi->ll_stat_blksize;
5587 else if (S_ISREG(inode->i_mode))
5588 stat->blksize = min(PTLRPC_MAX_BRW_SIZE,
5589 1U << LL_MAX_BLKSIZE_BITS);
5590 else if (S_ISDIR(inode->i_mode))
5591 stat->blksize = min(MD_MAX_BRW_SIZE,
5592 1U << LL_MAX_BLKSIZE_BITS);
5594 stat->blksize = 1 << inode->i_sb->s_blocksize_bits;
5596 stat->nlink = inode->i_nlink;
5597 stat->size = i_size_read(inode);
5598 stat->blocks = inode->i_blocks;
5600 #if defined(HAVE_USER_NAMESPACE_ARG) || defined(HAVE_INODEOPS_ENHANCED_GETATTR)
5601 if (flags & AT_STATX_DONT_SYNC) {
5602 if (stat->size == 0 &&
5603 lli->lli_attr_valid & OBD_MD_FLLAZYSIZE)
5604 stat->size = lli->lli_lazysize;
5605 if (stat->blocks == 0 &&
5606 lli->lli_attr_valid & OBD_MD_FLLAZYBLOCKS)
5607 stat->blocks = lli->lli_lazyblocks;
5610 if (lli->lli_attr_valid & OBD_MD_FLBTIME) {
5611 stat->result_mask |= STATX_BTIME;
5612 stat->btime.tv_sec = lli->lli_btime;
5615 stat->attributes_mask = STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND;
5616 #ifdef HAVE_LUSTRE_CRYPTO
5617 stat->attributes_mask |= STATX_ATTR_ENCRYPTED;
5619 stat->attributes |= ll_inode_to_ext_flags(inode->i_flags);
5620 /* if Lustre specific LUSTRE_ENCRYPT_FL flag is set, also set
5621 * ext4 equivalent to please statx
5623 if (stat->attributes & LUSTRE_ENCRYPT_FL)
5624 stat->attributes |= STATX_ATTR_ENCRYPTED;
5625 stat->result_mask &= request_mask;
5628 ll_stats_ops_tally(sbi, LPROC_LL_GETATTR,
5629 ktime_us_delta(ktime_get(), kstart));
5634 #if defined(HAVE_USER_NAMESPACE_ARG) || defined(HAVE_INODEOPS_ENHANCED_GETATTR)
5635 int ll_getattr(struct mnt_idmap *map, const struct path *path,
5636 struct kstat *stat, u32 request_mask, unsigned int flags)
5638 return ll_getattr_dentry(path->dentry, stat, request_mask, flags,
5642 int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
5644 return ll_getattr_dentry(de, stat, STATX_BASIC_STATS,
5645 AT_STATX_SYNC_AS_STAT, false);
5649 static int cl_falloc(struct file *file, struct inode *inode, int mode,
5650 loff_t offset, loff_t len)
5652 loff_t size = i_size_read(inode);
5660 env = cl_env_get(&refcheck);
5662 RETURN(PTR_ERR(env));
5664 io = vvp_env_thread_io(env);
5665 io->ci_obj = ll_i2info(inode)->lli_clob;
5666 ll_io_set_mirror(io, file);
5668 io->ci_verify_layout = 1;
5669 io->u.ci_setattr.sa_parent_fid = lu_object_fid(&io->ci_obj->co_lu);
5670 io->u.ci_setattr.sa_falloc_mode = mode;
5671 io->u.ci_setattr.sa_falloc_offset = offset;
5672 io->u.ci_setattr.sa_falloc_end = offset + len;
5673 io->u.ci_setattr.sa_subtype = CL_SETATTR_FALLOCATE;
5675 CDEBUG(D_INODE, "UID %u GID %u PRJID %u\n",
5676 from_kuid(&init_user_ns, inode->i_uid),
5677 from_kgid(&init_user_ns, inode->i_gid),
5678 ll_i2info(inode)->lli_projid);
5680 io->u.ci_setattr.sa_falloc_uid = from_kuid(&init_user_ns, inode->i_uid);
5681 io->u.ci_setattr.sa_falloc_gid = from_kgid(&init_user_ns, inode->i_gid);
5682 io->u.ci_setattr.sa_falloc_projid = ll_i2info(inode)->lli_projid;
5684 if (io->u.ci_setattr.sa_falloc_end > size) {
5685 loff_t newsize = io->u.ci_setattr.sa_falloc_end;
5687 /* Check new size against VFS/VM file size limit and rlimit */
5688 rc = inode_newsize_ok(inode, newsize);
5691 if (newsize > ll_file_maxbytes(inode)) {
5692 CDEBUG(D_INODE, "file size too large %llu > %llu\n",
5693 (unsigned long long)newsize,
5694 ll_file_maxbytes(inode));
5701 rc = cl_io_init(env, io, CIT_SETATTR, io->ci_obj);
5703 rc = cl_io_loop(env, io);
5706 cl_io_fini(env, io);
5707 } while (unlikely(io->ci_need_restart));
5710 cl_env_put(env, &refcheck);
5714 static long ll_fallocate(struct file *filp, int mode, loff_t offset, loff_t len)
5716 struct inode *inode = file_inode(filp);
5719 if (offset < 0 || len <= 0)
5722 * Encrypted inodes can't handle collapse range or zero range or insert
5723 * range since we would need to re-encrypt blocks with a different IV or
5724 * XTS tweak (which are based on the logical block number).
5725 * Similar to what ext4 does.
5727 if (IS_ENCRYPTED(inode) &&
5728 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE |
5729 FALLOC_FL_ZERO_RANGE)))
5730 RETURN(-EOPNOTSUPP);
5733 * mode == 0 (which is standard prealloc) and PUNCH is supported
5734 * Rest of mode options are not supported yet.
5736 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
5737 RETURN(-EOPNOTSUPP);
5739 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FALLOCATE, 1);
5741 rc = cl_falloc(filp, inode, mode, offset, len);
5743 * ENOTSUPP (524) is an NFSv3 specific error code erroneously
5744 * used by Lustre in several places. Retuning it here would
5745 * confuse applications that explicity test for EOPNOTSUPP
5746 * (95) and fall back to ftruncate().
5748 if (rc == -ENOTSUPP)
5754 static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
5755 __u64 start, __u64 len)
5759 struct fiemap *fiemap;
5760 unsigned int extent_count = fieinfo->fi_extents_max;
5762 num_bytes = sizeof(*fiemap) + (extent_count *
5763 sizeof(struct fiemap_extent));
5764 OBD_ALLOC_LARGE(fiemap, num_bytes);
5769 fiemap->fm_flags = fieinfo->fi_flags;
5770 fiemap->fm_extent_count = fieinfo->fi_extents_max;
5771 fiemap->fm_start = start;
5772 fiemap->fm_length = len;
5773 if (extent_count > 0 &&
5774 copy_from_user(&fiemap->fm_extents[0], fieinfo->fi_extents_start,
5775 sizeof(struct fiemap_extent)) != 0)
5776 GOTO(out, rc = -EFAULT);
5778 rc = ll_do_fiemap(inode, fiemap, num_bytes);
5780 if (IS_ENCRYPTED(inode)) {
5783 for (i = 0; i < fiemap->fm_mapped_extents; i++)
5784 fiemap->fm_extents[i].fe_flags |=
5785 FIEMAP_EXTENT_DATA_ENCRYPTED |
5786 FIEMAP_EXTENT_ENCODED;
5789 fieinfo->fi_flags = fiemap->fm_flags;
5790 fieinfo->fi_extents_mapped = fiemap->fm_mapped_extents;
5791 if (extent_count > 0 &&
5792 copy_to_user(fieinfo->fi_extents_start, &fiemap->fm_extents[0],
5793 fiemap->fm_mapped_extents *
5794 sizeof(struct fiemap_extent)) != 0)
5795 GOTO(out, rc = -EFAULT);
5797 OBD_FREE_LARGE(fiemap, num_bytes);
5801 int ll_inode_permission(struct mnt_idmap *idmap, struct inode *inode, int mask)
5804 struct ll_sb_info *sbi;
5805 struct root_squash_info *squash;
5806 struct cred *cred = NULL;
5807 const struct cred *old_cred = NULL;
5808 bool squash_id = false;
5809 ktime_t kstart = ktime_get();
5813 if (mask & MAY_NOT_BLOCK)
5817 * as root inode are NOT getting validated in lookup operation,
5818 * need to revalidate PERM before permission check.
5820 if (is_root_inode(inode)) {
5821 rc = ll_inode_revalidate(inode->i_sb->s_root, IT_GETATTR);
5826 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), inode mode %x mask %o\n",
5827 PFID(ll_inode2fid(inode)), inode, inode->i_mode, mask);
5829 /* squash fsuid/fsgid if needed */
5830 sbi = ll_i2sbi(inode);
5831 squash = &sbi->ll_squash;
5832 if (unlikely(squash->rsi_uid != 0 &&
5833 uid_eq(current_fsuid(), GLOBAL_ROOT_UID) &&
5834 !test_bit(LL_SBI_NOROOTSQUASH, sbi->ll_flags))) {
5838 CDEBUG(D_OTHER, "squash creds (%d:%d)=>(%d:%d)\n",
5839 __kuid_val(current_fsuid()), __kgid_val(current_fsgid()),
5840 squash->rsi_uid, squash->rsi_gid);
5842 /* update current process's credentials
5843 * and FS capability */
5844 cred = prepare_creds();
5848 cred->fsuid = make_kuid(&init_user_ns, squash->rsi_uid);
5849 cred->fsgid = make_kgid(&init_user_ns, squash->rsi_gid);
5850 cred->cap_effective = cap_drop_nfsd_set(cred->cap_effective);
5851 cred->cap_effective = cap_drop_fs_set(cred->cap_effective);
5853 old_cred = override_creds(cred);
5856 rc = generic_permission(idmap, inode, mask);
5857 /* restore current process's credentials and FS capability */
5859 revert_creds(old_cred);
5864 ll_stats_ops_tally(sbi, LPROC_LL_INODE_PERM,
5865 ktime_us_delta(ktime_get(), kstart));
5870 /* -o localflock - only provides locally consistent flock locks */
5871 static const struct file_operations ll_file_operations = {
5872 #ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
5873 # ifdef HAVE_SYNC_READ_WRITE
5874 .read = new_sync_read,
5875 .write = new_sync_write,
5877 .read_iter = ll_file_read_iter,
5878 .write_iter = ll_file_write_iter,
5879 #else /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5880 .read = ll_file_read,
5881 .aio_read = ll_file_aio_read,
5882 .write = ll_file_write,
5883 .aio_write = ll_file_aio_write,
5884 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5885 .unlocked_ioctl = ll_file_ioctl,
5886 .open = ll_file_open,
5887 .release = ll_file_release,
5888 .mmap = ll_file_mmap,
5889 .llseek = ll_file_seek,
5890 #ifndef HAVE_DEFAULT_FILE_SPLICE_READ_EXPORT
5891 .splice_read = generic_file_splice_read,
5893 .splice_read = pcc_file_splice_read,
5895 #ifdef HAVE_ITER_FILE_SPLICE_WRITE
5896 .splice_write = iter_file_splice_write,
5900 .fallocate = ll_fallocate,
5903 static const struct file_operations ll_file_operations_flock = {
5904 #ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
5905 # ifdef HAVE_SYNC_READ_WRITE
5906 .read = new_sync_read,
5907 .write = new_sync_write,
5908 # endif /* HAVE_SYNC_READ_WRITE */
5909 .read_iter = ll_file_read_iter,
5910 .write_iter = ll_file_write_iter,
5911 #else /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5912 .read = ll_file_read,
5913 .aio_read = ll_file_aio_read,
5914 .write = ll_file_write,
5915 .aio_write = ll_file_aio_write,
5916 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5917 .unlocked_ioctl = ll_file_ioctl,
5918 .open = ll_file_open,
5919 .release = ll_file_release,
5920 .mmap = ll_file_mmap,
5921 .llseek = ll_file_seek,
5922 #ifndef HAVE_DEFAULT_FILE_SPLICE_READ_EXPORT
5923 .splice_read = generic_file_splice_read,
5925 .splice_read = pcc_file_splice_read,
5927 #ifdef HAVE_ITER_FILE_SPLICE_WRITE
5928 .splice_write = iter_file_splice_write,
5932 .flock = ll_file_flock,
5933 .lock = ll_file_flock,
5934 .fallocate = ll_fallocate,
5937 /* These are for -o noflock - to return ENOSYS on flock calls */
5938 static const struct file_operations ll_file_operations_noflock = {
5939 #ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
5940 # ifdef HAVE_SYNC_READ_WRITE
5941 .read = new_sync_read,
5942 .write = new_sync_write,
5943 # endif /* HAVE_SYNC_READ_WRITE */
5944 .read_iter = ll_file_read_iter,
5945 .write_iter = ll_file_write_iter,
5946 #else /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5947 .read = ll_file_read,
5948 .aio_read = ll_file_aio_read,
5949 .write = ll_file_write,
5950 .aio_write = ll_file_aio_write,
5951 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5952 .unlocked_ioctl = ll_file_ioctl,
5953 .open = ll_file_open,
5954 .release = ll_file_release,
5955 .mmap = ll_file_mmap,
5956 .llseek = ll_file_seek,
5957 #ifndef HAVE_DEFAULT_FILE_SPLICE_READ_EXPORT
5958 .splice_read = generic_file_splice_read,
5960 .splice_read = pcc_file_splice_read,
5962 #ifdef HAVE_ITER_FILE_SPLICE_WRITE
5963 .splice_write = iter_file_splice_write,
5967 .flock = ll_file_noflock,
5968 .lock = ll_file_noflock,
5969 .fallocate = ll_fallocate,
5972 const struct inode_operations ll_file_inode_operations = {
5973 .setattr = ll_setattr,
5974 .getattr = ll_getattr,
5975 .permission = ll_inode_permission,
5976 #ifdef HAVE_IOP_XATTR
5977 .setxattr = ll_setxattr,
5978 .getxattr = ll_getxattr,
5979 .removexattr = ll_removexattr,
5981 .listxattr = ll_listxattr,
5982 .fiemap = ll_fiemap,
5983 .get_acl = ll_get_acl,
5984 #ifdef HAVE_IOP_SET_ACL
5985 .set_acl = ll_set_acl,
5987 #ifdef HAVE_FILEATTR_GET
5988 .fileattr_get = ll_fileattr_get,
5989 .fileattr_set = ll_fileattr_set,
5993 const struct file_operations *ll_select_file_operations(struct ll_sb_info *sbi)
5995 const struct file_operations *fops = &ll_file_operations_noflock;
5997 if (test_bit(LL_SBI_FLOCK, sbi->ll_flags))
5998 fops = &ll_file_operations_flock;
5999 else if (test_bit(LL_SBI_LOCALFLOCK, sbi->ll_flags))
6000 fops = &ll_file_operations;
6005 int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
6007 struct ll_inode_info *lli = ll_i2info(inode);
6008 struct cl_object *obj = lli->lli_clob;
6017 env = cl_env_get(&refcheck);
6019 RETURN(PTR_ERR(env));
6021 rc = cl_conf_set(env, lli->lli_clob, conf);
6025 if (conf->coc_opc == OBJECT_CONF_SET) {
6026 struct ldlm_lock *lock = conf->coc_lock;
6027 struct cl_layout cl = {
6031 LASSERT(lock != NULL);
6032 LASSERT(ldlm_has_layout(lock));
6034 /* it can only be allowed to match after layout is
6035 * applied to inode otherwise false layout would be
6036 * seen. Applying layout shoud happen before dropping
6037 * the intent lock. */
6038 ldlm_lock_allow_match(lock);
6040 rc = cl_object_layout_get(env, obj, &cl);
6045 DFID": layout version change: %u -> %u\n",
6046 PFID(&lli->lli_fid), ll_layout_version_get(lli),
6048 ll_layout_version_set(lli, cl.cl_layout_gen);
6052 cl_env_put(env, &refcheck);
6054 RETURN(rc < 0 ? rc : 0);
6057 /* Fetch layout from MDT with getxattr request, if it's not ready yet */
6058 static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
6061 struct ll_sb_info *sbi = ll_i2sbi(inode);
6062 struct ptlrpc_request *req;
6069 CDEBUG(D_INODE, DFID" LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n",
6070 PFID(ll_inode2fid(inode)), ldlm_is_lvb_ready(lock),
6071 lock->l_lvb_data, lock->l_lvb_len);
6073 if (lock->l_lvb_data != NULL)
6076 /* if layout lock was granted right away, the layout is returned
6077 * within DLM_LVB of dlm reply; otherwise if the lock was ever
6078 * blocked and then granted via completion ast, we have to fetch
6079 * layout here. Please note that we can't use the LVB buffer in
6080 * completion AST because it doesn't have a large enough buffer */
6081 rc = ll_get_default_mdsize(sbi, &lmmsize);
6085 rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), OBD_MD_FLXATTR,
6086 XATTR_NAME_LOV, lmmsize, &req);
6089 GOTO(out, rc = 0); /* empty layout */
6096 if (lmmsize == 0) /* empty layout */
6099 lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, lmmsize);
6101 GOTO(out, rc = -EFAULT);
6103 OBD_ALLOC_LARGE(lvbdata, lmmsize);
6104 if (lvbdata == NULL)
6105 GOTO(out, rc = -ENOMEM);
6107 memcpy(lvbdata, lmm, lmmsize);
6108 lock_res_and_lock(lock);
6109 if (unlikely(lock->l_lvb_data == NULL)) {
6110 lock->l_lvb_type = LVB_T_LAYOUT;
6111 lock->l_lvb_data = lvbdata;
6112 lock->l_lvb_len = lmmsize;
6115 unlock_res_and_lock(lock);
6118 OBD_FREE_LARGE(lvbdata, lmmsize);
6123 ptlrpc_req_finished(req);
6128 * Apply the layout to the inode. Layout lock is held and will be released
6131 static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
6132 struct inode *inode, bool try)
6134 struct ll_inode_info *lli = ll_i2info(inode);
6135 struct ll_sb_info *sbi = ll_i2sbi(inode);
6136 struct ldlm_lock *lock;
6137 struct cl_object_conf conf;
6140 bool wait_layout = false;
6143 LASSERT(lustre_handle_is_used(lockh));
6145 lock = ldlm_handle2lock(lockh);
6146 LASSERT(lock != NULL);
6148 if (!ldlm_has_layout(lock))
6149 GOTO(out, rc = -EAGAIN);
6151 LDLM_DEBUG(lock, "file "DFID"(%p) being reconfigured",
6152 PFID(&lli->lli_fid), inode);
6154 /* in case this is a caching lock and reinstate with new inode */
6155 md_set_lock_data(sbi->ll_md_exp, lockh, inode, NULL);
6157 lock_res_and_lock(lock);
6158 lvb_ready = ldlm_is_lvb_ready(lock);
6159 unlock_res_and_lock(lock);
6161 /* checking lvb_ready is racy but this is okay. The worst case is
6162 * that multi processes may configure the file on the same time. */
6166 rc = ll_layout_fetch(inode, lock);
6170 /* for layout lock, lmm is stored in lock's lvb.
6171 * lvb_data is immutable if the lock is held so it's safe to access it
6174 * set layout to file. Unlikely this will fail as old layout was
6175 * surely eliminated */
6176 memset(&conf, 0, sizeof conf);
6177 conf.coc_opc = OBJECT_CONF_SET;
6178 conf.coc_inode = inode;
6179 conf.coc_lock = lock;
6181 conf.u.coc_layout.lb_buf = lock->l_lvb_data;
6182 conf.u.coc_layout.lb_len = lock->l_lvb_len;
6183 rc = ll_layout_conf(inode, &conf);
6185 /* refresh layout failed, need to wait */
6186 wait_layout = rc == -EBUSY;
6189 LDLM_LOCK_PUT(lock);
6190 ldlm_lock_decref(lockh, mode);
6192 /* wait for IO to complete if it's still being used. */
6194 CDEBUG(D_INODE, "%s: "DFID"(%p) wait for layout reconf\n",
6195 sbi->ll_fsname, PFID(&lli->lli_fid), inode);
6197 memset(&conf, 0, sizeof conf);
6198 conf.coc_opc = OBJECT_CONF_WAIT;
6199 conf.coc_inode = inode;
6200 rc = ll_layout_conf(inode, &conf);
6204 CDEBUG(D_INODE, "%s file="DFID" waiting layout return: %d\n",
6205 sbi->ll_fsname, PFID(&lli->lli_fid), rc);
6208 if (rc == -ERESTARTSYS) {
6211 struct cl_object * obj = lli->lli_clob;
6213 env = cl_env_get(&refcheck);
6215 RETURN(PTR_ERR(env));
6217 CDEBUG(D_INODE, "prune without lock "DFID"\n",
6218 PFID(lu_object_fid(&obj->co_lu)));
6220 trunc_sem_down_write(&lli->lli_trunc_sem);
6221 cl_object_prune(env, obj);
6222 trunc_sem_up_write(&lli->lli_trunc_sem);
6223 cl_env_put(env, &refcheck);
6232 * Issue layout intent RPC to MDS.
6233 * \param inode [in] file inode
6234 * \param intent [in] layout intent
6236 * \retval 0 on success
6237 * \retval < 0 error code
6239 static int ll_layout_intent(struct inode *inode, struct layout_intent *intent)
6241 struct ll_inode_info *lli = ll_i2info(inode);
6242 struct ll_sb_info *sbi = ll_i2sbi(inode);
6243 struct md_op_data *op_data;
6244 struct lookup_intent it;
6245 struct ptlrpc_request *req;
6249 op_data = ll_prep_md_op_data(NULL, inode, inode, NULL,
6250 0, 0, LUSTRE_OPC_ANY, NULL);
6251 if (IS_ERR(op_data))
6252 RETURN(PTR_ERR(op_data));
6254 op_data->op_data = intent;
6255 op_data->op_data_size = sizeof(*intent);
6257 memset(&it, 0, sizeof(it));
6258 it.it_op = IT_LAYOUT;
6259 if (intent->li_opc == LAYOUT_INTENT_WRITE ||
6260 intent->li_opc == LAYOUT_INTENT_TRUNC)
6261 it.it_flags = FMODE_WRITE;
6263 LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file "DFID"(%p)",
6264 sbi->ll_fsname, PFID(&lli->lli_fid), inode);
6266 rc = md_intent_lock(sbi->ll_md_exp, op_data, &it, &req,
6267 &ll_md_blocking_ast, 0);
6268 if (it.it_request != NULL)
6269 ptlrpc_req_finished(it.it_request);
6270 it.it_request = NULL;
6272 ll_finish_md_op_data(op_data);
6274 /* set lock data in case this is a new lock */
6276 ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
6278 ll_intent_drop_lock(&it);
6284 * This function checks if there exists a LAYOUT lock on the client side,
6285 * or enqueues it if it doesn't have one in cache.
6287 * This function will not hold layout lock so it may be revoked any time after
6288 * this function returns. Any operations depend on layout should be redone
6291 * This function should be called before lov_io_init() to get an uptodate
6292 * layout version, the caller should save the version number and after IO
6293 * is finished, this function should be called again to verify that layout
6294 * is not changed during IO time.
6296 int ll_layout_refresh(struct inode *inode, __u32 *gen)
6298 struct ll_inode_info *lli = ll_i2info(inode);
6299 struct ll_sb_info *sbi = ll_i2sbi(inode);
6300 struct lustre_handle lockh;
6301 struct layout_intent intent = {
6302 .li_opc = LAYOUT_INTENT_ACCESS,
6304 enum ldlm_mode mode;
6309 *gen = ll_layout_version_get(lli);
6310 if (!test_bit(LL_SBI_LAYOUT_LOCK, sbi->ll_flags) ||
6311 *gen != CL_LAYOUT_GEN_NONE)
6315 LASSERT(fid_is_sane(ll_inode2fid(inode)));
6316 LASSERT(S_ISREG(inode->i_mode));
6318 /* take layout lock mutex to enqueue layout lock exclusively. */
6319 mutex_lock(&lli->lli_layout_mutex);
6320 lli->lli_layout_lock_owner = current;
6323 /* mostly layout lock is caching on the local side, so try to
6324 * match it before grabbing layout lock mutex. */
6325 mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
6326 LCK_CR | LCK_CW | LCK_PR |
6328 if (mode != 0) { /* hit cached lock */
6329 rc = ll_layout_lock_set(&lockh, mode, inode, try);
6336 rc = ll_layout_intent(inode, &intent);
6342 *gen = ll_layout_version_get(lli);
6343 lli->lli_layout_lock_owner = NULL;
6344 mutex_unlock(&lli->lli_layout_mutex);
6350 * Issue layout intent RPC indicating where in a file an IO is about to write.
6352 * \param[in] inode file inode.
6353 * \param[in] ext write range with start offset of fille in bytes where
6354 * an IO is about to write, and exclusive end offset in
6357 * \retval 0 on success
6358 * \retval < 0 error code
6360 int ll_layout_write_intent(struct inode *inode, enum layout_intent_opc opc,
6361 struct lu_extent *ext)
6363 struct layout_intent intent = {
6365 .li_extent.e_start = ext->e_start,
6366 .li_extent.e_end = ext->e_end,
6371 rc = ll_layout_intent(inode, &intent);
6377 * This function send a restore request to the MDT
6379 int ll_layout_restore(struct inode *inode, loff_t offset, __u64 length)
6381 struct hsm_user_request *hur;
6385 len = sizeof(struct hsm_user_request) +
6386 sizeof(struct hsm_user_item);
6387 OBD_ALLOC(hur, len);
6391 hur->hur_request.hr_action = HUA_RESTORE;
6392 hur->hur_request.hr_archive_id = 0;
6393 hur->hur_request.hr_flags = 0;
6394 memcpy(&hur->hur_user_item[0].hui_fid, &ll_i2info(inode)->lli_fid,
6395 sizeof(hur->hur_user_item[0].hui_fid));
6396 hur->hur_user_item[0].hui_extent.offset = offset;
6397 hur->hur_user_item[0].hui_extent.length = length;
6398 hur->hur_request.hr_itemcount = 1;
6399 rc = obd_iocontrol(LL_IOC_HSM_REQUEST, ll_i2sbi(inode)->ll_md_exp,