4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
33 * Author: Peter Braam <braam@clusterfs.com>
34 * Author: Phil Schwan <phil@clusterfs.com>
35 * Author: Andreas Dilger <adilger@clusterfs.com>
38 #define DEBUG_SUBSYSTEM S_LLITE
39 #include <lustre_dlm.h>
40 #include <linux/pagemap.h>
41 #include <linux/file.h>
42 #include <linux/sched.h>
43 #include <linux/user_namespace.h>
44 #include <linux/uidgid.h>
45 #include <linux/falloc.h>
47 #include <uapi/linux/lustre/lustre_ioctl.h>
48 #include <uapi/linux/llcrypt.h>
49 #include <lustre_swab.h>
51 #include "cl_object.h"
52 #include "llite_internal.h"
53 #include "vvp_internal.h"
56 struct inode *sp_inode;
61 __u64 pa_data_version;
67 ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg);
69 static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
72 static struct ll_file_data *ll_file_data_get(void)
74 struct ll_file_data *fd;
76 OBD_SLAB_ALLOC_PTR_GFP(fd, ll_file_data_slab, GFP_NOFS);
80 fd->fd_write_failed = false;
81 pcc_file_init(&fd->fd_pcc_file);
86 static void ll_file_data_put(struct ll_file_data *fd)
89 OBD_SLAB_FREE_PTR(fd, ll_file_data_slab);
93 * Packs all the attributes into @op_data for the CLOSE rpc.
95 static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
96 struct obd_client_handle *och)
100 ll_prep_md_op_data(op_data, inode, NULL, NULL,
101 0, 0, LUSTRE_OPC_ANY, NULL);
103 op_data->op_attr.ia_mode = inode->i_mode;
104 op_data->op_attr.ia_atime = inode->i_atime;
105 op_data->op_attr.ia_mtime = inode->i_mtime;
106 op_data->op_attr.ia_ctime = inode->i_ctime;
107 op_data->op_attr.ia_size = i_size_read(inode);
108 op_data->op_attr.ia_valid |= (ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
109 ATTR_MTIME | ATTR_MTIME_SET |
111 op_data->op_xvalid |= OP_XVALID_CTIME_SET;
112 op_data->op_attr_blocks = inode->i_blocks;
113 op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags);
114 if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags))
115 op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL;
116 op_data->op_open_handle = och->och_open_handle;
118 if (och->och_flags & FMODE_WRITE &&
119 test_and_clear_bit(LLIF_DATA_MODIFIED, &ll_i2info(inode)->lli_flags))
120 /* For HSM: if inode data has been modified, pack it so that
121 * MDT can set data dirty flag in the archive. */
122 op_data->op_bias |= MDS_DATA_MODIFIED;
128 * Perform a close, possibly with a bias.
129 * The meaning of "data" depends on the value of "bias".
131 * If \a bias is MDS_HSM_RELEASE then \a data is a pointer to the data version.
132 * If \a bias is MDS_CLOSE_LAYOUT_SWAP then \a data is a pointer to the inode to
135 static int ll_close_inode_openhandle(struct inode *inode,
136 struct obd_client_handle *och,
137 enum mds_op_bias bias, void *data)
139 struct obd_export *md_exp = ll_i2mdexp(inode);
140 const struct ll_inode_info *lli = ll_i2info(inode);
141 struct md_op_data *op_data;
142 struct ptlrpc_request *req = NULL;
146 if (class_exp2obd(md_exp) == NULL) {
147 CERROR("%s: invalid MDC connection handle closing "DFID"\n",
148 ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
152 OBD_ALLOC_PTR(op_data);
153 /* We leak openhandle and request here on error, but not much to be
154 * done in OOM case since app won't retry close on error either. */
156 GOTO(out, rc = -ENOMEM);
158 ll_prepare_close(inode, op_data, och);
160 case MDS_CLOSE_LAYOUT_MERGE:
161 /* merge blocks from the victim inode */
162 op_data->op_attr_blocks += ((struct inode *)data)->i_blocks;
163 op_data->op_attr.ia_valid |= ATTR_SIZE;
164 op_data->op_xvalid |= OP_XVALID_BLOCKS;
166 case MDS_CLOSE_LAYOUT_SPLIT:
167 case MDS_CLOSE_LAYOUT_SWAP: {
168 struct split_param *sp = data;
170 LASSERT(data != NULL);
171 op_data->op_bias |= bias;
172 op_data->op_data_version = 0;
173 op_data->op_lease_handle = och->och_lease_handle;
174 if (bias == MDS_CLOSE_LAYOUT_SPLIT) {
175 op_data->op_fid2 = *ll_inode2fid(sp->sp_inode);
176 op_data->op_mirror_id = sp->sp_mirror_id;
178 op_data->op_fid2 = *ll_inode2fid(data);
183 case MDS_CLOSE_RESYNC_DONE: {
184 struct ll_ioc_lease *ioc = data;
186 LASSERT(data != NULL);
187 op_data->op_attr_blocks +=
188 ioc->lil_count * op_data->op_attr_blocks;
189 op_data->op_attr.ia_valid |= ATTR_SIZE;
190 op_data->op_xvalid |= OP_XVALID_BLOCKS;
191 op_data->op_bias |= MDS_CLOSE_RESYNC_DONE;
193 op_data->op_lease_handle = och->och_lease_handle;
194 op_data->op_data = &ioc->lil_ids[0];
195 op_data->op_data_size =
196 ioc->lil_count * sizeof(ioc->lil_ids[0]);
200 case MDS_PCC_ATTACH: {
201 struct pcc_param *param = data;
203 LASSERT(data != NULL);
204 op_data->op_bias |= MDS_HSM_RELEASE | MDS_PCC_ATTACH;
205 op_data->op_archive_id = param->pa_archive_id;
206 op_data->op_data_version = param->pa_data_version;
207 op_data->op_lease_handle = och->och_lease_handle;
211 case MDS_HSM_RELEASE:
212 LASSERT(data != NULL);
213 op_data->op_bias |= MDS_HSM_RELEASE;
214 op_data->op_data_version = *(__u64 *)data;
215 op_data->op_lease_handle = och->och_lease_handle;
216 op_data->op_attr.ia_valid |= ATTR_SIZE;
217 op_data->op_xvalid |= OP_XVALID_BLOCKS;
221 LASSERT(data == NULL);
225 if (!(op_data->op_attr.ia_valid & ATTR_SIZE))
226 op_data->op_xvalid |= OP_XVALID_LAZYSIZE;
227 if (!(op_data->op_xvalid & OP_XVALID_BLOCKS))
228 op_data->op_xvalid |= OP_XVALID_LAZYBLOCKS;
230 rc = md_close(md_exp, op_data, och->och_mod, &req);
231 if (rc != 0 && rc != -EINTR)
232 CERROR("%s: inode "DFID" mdc close failed: rc = %d\n",
233 md_exp->exp_obd->obd_name, PFID(&lli->lli_fid), rc);
235 if (rc == 0 && op_data->op_bias & bias) {
236 struct mdt_body *body;
238 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
239 if (!(body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED))
242 if (bias & MDS_PCC_ATTACH) {
243 struct pcc_param *param = data;
245 param->pa_layout_gen = body->mbo_layout_gen;
249 ll_finish_md_op_data(op_data);
253 md_clear_open_replay_data(md_exp, och);
254 och->och_open_handle.cookie = DEAD_HANDLE_MAGIC;
257 ptlrpc_req_finished(req); /* This is close request */
261 int ll_md_real_close(struct inode *inode, fmode_t fmode)
263 struct ll_inode_info *lli = ll_i2info(inode);
264 struct obd_client_handle **och_p;
265 struct obd_client_handle *och;
270 if (fmode & FMODE_WRITE) {
271 och_p = &lli->lli_mds_write_och;
272 och_usecount = &lli->lli_open_fd_write_count;
273 } else if (fmode & FMODE_EXEC) {
274 och_p = &lli->lli_mds_exec_och;
275 och_usecount = &lli->lli_open_fd_exec_count;
277 LASSERT(fmode & FMODE_READ);
278 och_p = &lli->lli_mds_read_och;
279 och_usecount = &lli->lli_open_fd_read_count;
282 mutex_lock(&lli->lli_och_mutex);
283 if (*och_usecount > 0) {
284 /* There are still users of this handle, so skip
286 mutex_unlock(&lli->lli_och_mutex);
292 mutex_unlock(&lli->lli_och_mutex);
295 /* There might be a race and this handle may already
297 rc = ll_close_inode_openhandle(inode, och, 0, NULL);
303 static int ll_md_close(struct inode *inode, struct file *file)
305 union ldlm_policy_data policy = {
306 .l_inodebits = { MDS_INODELOCK_OPEN },
308 __u64 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
309 struct ll_file_data *fd = file->private_data;
310 struct ll_inode_info *lli = ll_i2info(inode);
311 struct lustre_handle lockh;
312 enum ldlm_mode lockmode;
316 /* clear group lock, if present */
317 if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
318 ll_put_grouplock(inode, file, fd->fd_grouplock.lg_gid);
320 if (fd->fd_lease_och != NULL) {
323 /* Usually the lease is not released when the
324 * application crashed, we need to release here. */
325 rc = ll_lease_close(fd->fd_lease_och, inode, &lease_broken);
326 CDEBUG_LIMIT(rc ? D_ERROR : D_INODE,
327 "Clean up lease "DFID" %d/%d\n",
328 PFID(&lli->lli_fid), rc, lease_broken);
330 fd->fd_lease_och = NULL;
333 if (fd->fd_och != NULL) {
334 rc = ll_close_inode_openhandle(inode, fd->fd_och, 0, NULL);
339 /* Let's see if we have good enough OPEN lock on the file and if
340 we can skip talking to MDS */
341 mutex_lock(&lli->lli_och_mutex);
342 if (fd->fd_omode & FMODE_WRITE) {
344 LASSERT(lli->lli_open_fd_write_count);
345 lli->lli_open_fd_write_count--;
346 } else if (fd->fd_omode & FMODE_EXEC) {
348 LASSERT(lli->lli_open_fd_exec_count);
349 lli->lli_open_fd_exec_count--;
352 LASSERT(lli->lli_open_fd_read_count);
353 lli->lli_open_fd_read_count--;
355 mutex_unlock(&lli->lli_och_mutex);
357 /* LU-4398: do not cache write open lock if the file has exec bit */
358 if ((lockmode == LCK_CW && inode->i_mode & S_IXUGO) ||
359 !md_lock_match(ll_i2mdexp(inode), flags, ll_inode2fid(inode),
360 LDLM_IBITS, &policy, lockmode, &lockh))
361 rc = ll_md_real_close(inode, fd->fd_omode);
364 file->private_data = NULL;
365 ll_file_data_put(fd);
370 /* While this returns an error code, fput() the caller does not, so we need
371 * to make every effort to clean up all of our state here. Also, applications
372 * rarely check close errors and even if an error is returned they will not
373 * re-try the close call.
375 int ll_file_release(struct inode *inode, struct file *file)
377 struct ll_file_data *fd;
378 struct ll_sb_info *sbi = ll_i2sbi(inode);
379 struct ll_inode_info *lli = ll_i2info(inode);
380 ktime_t kstart = ktime_get();
385 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
386 PFID(ll_inode2fid(inode)), inode);
388 fd = file->private_data;
391 /* The last ref on @file, maybe not the the owner pid of statahead,
392 * because parent and child process can share the same file handle. */
393 if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd)
394 ll_deauthorize_statahead(inode, fd);
396 if (is_root_inode(inode)) {
397 file->private_data = NULL;
398 ll_file_data_put(fd);
402 pcc_file_release(inode, file);
404 if (!S_ISDIR(inode->i_mode)) {
405 if (lli->lli_clob != NULL)
406 lov_read_and_clear_async_rc(lli->lli_clob);
407 lli->lli_async_rc = 0;
410 rc = ll_md_close(inode, file);
412 if (CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_DUMP_LOG, cfs_fail_val))
413 libcfs_debug_dumplog();
416 if (!rc && !is_root_inode(inode))
417 ll_stats_ops_tally(sbi, LPROC_LL_RELEASE,
418 ktime_us_delta(ktime_get(), kstart));
422 static inline int ll_dom_readpage(void *data, struct page *page)
424 struct niobuf_local *lnb = data;
428 struct inode *inode = page2inode(page);
430 kaddr = kmap_atomic(page);
431 memcpy(kaddr, lnb->lnb_data, lnb->lnb_len);
432 if (lnb->lnb_len < PAGE_SIZE)
433 memset(kaddr + lnb->lnb_len, 0,
434 PAGE_SIZE - lnb->lnb_len);
435 flush_dcache_page(page);
436 SetPageUptodate(page);
437 kunmap_atomic(kaddr);
439 if (inode && IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
440 if (!llcrypt_has_encryption_key(inode))
441 CDEBUG(D_SEC, "no enc key for "DFID"\n",
442 PFID(ll_inode2fid(inode)));
444 unsigned int offs = 0;
446 while (offs < PAGE_SIZE) {
447 /* decrypt only if page is not empty */
448 if (memcmp(page_address(page) + offs,
449 page_address(ZERO_PAGE(0)),
450 LUSTRE_ENCRYPTION_UNIT_SIZE) == 0)
453 rc = llcrypt_decrypt_pagecache_blocks(page,
454 LUSTRE_ENCRYPTION_UNIT_SIZE,
459 offs += LUSTRE_ENCRYPTION_UNIT_SIZE;
468 void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req)
472 struct ll_inode_info *lli = ll_i2info(inode);
473 struct cl_object *obj = lli->lli_clob;
474 struct address_space *mapping = inode->i_mapping;
476 struct niobuf_remote *rnb;
477 struct mdt_body *body;
479 unsigned long index, start;
480 struct niobuf_local lnb;
489 if (!req_capsule_field_present(&req->rq_pill, &RMF_NIOBUF_INLINE,
493 rnb = req_capsule_server_get(&req->rq_pill, &RMF_NIOBUF_INLINE);
494 if (rnb == NULL || rnb->rnb_len == 0)
497 /* LU-11595: Server may return whole file and that is OK always or
498 * it may return just file tail and its offset must be aligned with
499 * client PAGE_SIZE to be used on that client, if server's PAGE_SIZE is
500 * smaller then offset may be not aligned and that data is just ignored.
502 if (rnb->rnb_offset & ~PAGE_MASK)
505 /* Server returns whole file or just file tail if it fills in reply
506 * buffer, in both cases total size should be equal to the file size.
508 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
509 if (rnb->rnb_offset + rnb->rnb_len != body->mbo_dom_size &&
510 !(inode && IS_ENCRYPTED(inode))) {
511 CERROR("%s: server returns off/len %llu/%u but size %llu\n",
512 ll_i2sbi(inode)->ll_fsname, rnb->rnb_offset,
513 rnb->rnb_len, body->mbo_dom_size);
517 env = cl_env_get(&refcheck);
520 io = vvp_env_thread_io(env);
522 io->ci_ignore_layout = 1;
523 rc = cl_io_init(env, io, CIT_MISC, obj);
527 CDEBUG(D_INFO, "Get data along with open at %llu len %i, size %llu\n",
528 rnb->rnb_offset, rnb->rnb_len, body->mbo_dom_size);
530 data = (char *)rnb + sizeof(*rnb);
532 lnb.lnb_file_offset = rnb->rnb_offset;
533 start = lnb.lnb_file_offset >> PAGE_SHIFT;
535 LASSERT((lnb.lnb_file_offset & ~PAGE_MASK) == 0);
536 lnb.lnb_page_offset = 0;
538 struct cl_page *page;
540 lnb.lnb_data = data + (index << PAGE_SHIFT);
541 lnb.lnb_len = rnb->rnb_len - (index << PAGE_SHIFT);
542 if (lnb.lnb_len > PAGE_SIZE)
543 lnb.lnb_len = PAGE_SIZE;
545 vmpage = read_cache_page(mapping, index + start,
546 ll_dom_readpage, &lnb);
547 if (IS_ERR(vmpage)) {
548 CWARN("%s: cannot fill page %lu for "DFID
549 " with data: rc = %li\n",
550 ll_i2sbi(inode)->ll_fsname, index + start,
551 PFID(lu_object_fid(&obj->co_lu)),
556 if (vmpage->mapping == NULL) {
559 /* page was truncated */
562 /* attach VM page to CL page cache */
563 page = cl_page_find(env, obj, vmpage->index, vmpage,
566 ClearPageUptodate(vmpage);
571 cl_page_export(env, page, 1);
572 cl_page_put(env, page);
576 } while (rnb->rnb_len > (index << PAGE_SHIFT));
580 cl_env_put(env, &refcheck);
585 static int ll_intent_file_open(struct dentry *de, void *lmm, int lmmsize,
586 struct lookup_intent *itp)
588 struct ll_sb_info *sbi = ll_i2sbi(de->d_inode);
589 struct dentry *parent = de->d_parent;
592 struct md_op_data *op_data;
593 struct ptlrpc_request *req = NULL;
597 LASSERT(parent != NULL);
598 LASSERT(itp->it_flags & MDS_OPEN_BY_FID);
600 /* if server supports open-by-fid, or file name is invalid, don't pack
601 * name in open request */
602 if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_OPEN_BY_NAME) ||
603 !(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_OPEN_BY_FID)) {
605 len = de->d_name.len;
606 name = kmalloc(len + 1, GFP_NOFS);
611 spin_lock(&de->d_lock);
612 if (len != de->d_name.len) {
613 spin_unlock(&de->d_lock);
617 memcpy(name, de->d_name.name, len);
619 spin_unlock(&de->d_lock);
621 if (!lu_name_is_valid_2(name, len)) {
627 op_data = ll_prep_md_op_data(NULL, parent->d_inode, de->d_inode,
628 name, len, 0, LUSTRE_OPC_ANY, NULL);
629 if (IS_ERR(op_data)) {
631 RETURN(PTR_ERR(op_data));
633 op_data->op_data = lmm;
634 op_data->op_data_size = lmmsize;
636 rc = md_intent_lock(sbi->ll_md_exp, op_data, itp, &req,
637 &ll_md_blocking_ast, 0);
639 ll_finish_md_op_data(op_data);
641 /* reason for keep own exit path - don`t flood log
642 * with messages with -ESTALE errors.
644 if (!it_disposition(itp, DISP_OPEN_OPEN) ||
645 it_open_error(DISP_OPEN_OPEN, itp))
647 ll_release_openhandle(de, itp);
651 if (it_disposition(itp, DISP_LOOKUP_NEG))
652 GOTO(out, rc = -ENOENT);
654 if (rc != 0 || it_open_error(DISP_OPEN_OPEN, itp)) {
655 rc = rc ? rc : it_open_error(DISP_OPEN_OPEN, itp);
656 CDEBUG(D_VFSTRACE, "lock enqueue: err: %d\n", rc);
660 rc = ll_prep_inode(&de->d_inode, req, NULL, itp);
662 if (!rc && itp->it_lock_mode) {
665 /* If we got a lock back and it has a LOOKUP bit set,
666 * make sure the dentry is marked as valid so we can find it.
667 * We don't need to care about actual hashing since other bits
668 * of kernel will deal with that later.
670 ll_set_lock_data(sbi->ll_md_exp, de->d_inode, itp, &bits);
671 if (bits & MDS_INODELOCK_LOOKUP)
672 d_lustre_revalidate(de);
673 /* if DoM bit returned along with LAYOUT bit then there
674 * can be read-on-open data returned.
676 if (bits & MDS_INODELOCK_DOM && bits & MDS_INODELOCK_LAYOUT)
677 ll_dom_finish_open(de->d_inode, req);
681 ptlrpc_req_finished(req);
682 ll_intent_drop_lock(itp);
684 /* We did open by fid, but by the time we got to the server,
685 * the object disappeared. If this is a create, we cannot really
686 * tell the userspace that the file it was trying to create
687 * does not exist. Instead let's return -ESTALE, and the VFS will
688 * retry the create with LOOKUP_REVAL that we are going to catch
689 * in ll_revalidate_dentry() and use lookup then.
691 if (rc == -ENOENT && itp->it_op & IT_CREAT)
697 static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it,
698 struct obd_client_handle *och)
700 struct mdt_body *body;
702 body = req_capsule_server_get(&it->it_request->rq_pill, &RMF_MDT_BODY);
703 och->och_open_handle = body->mbo_open_handle;
704 och->och_fid = body->mbo_fid1;
705 och->och_lease_handle.cookie = it->it_lock_handle;
706 och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
707 och->och_flags = it->it_flags;
709 return md_set_open_replay_data(md_exp, och, it);
712 static int ll_local_open(struct file *file, struct lookup_intent *it,
713 struct ll_file_data *fd, struct obd_client_handle *och)
715 struct inode *inode = file_inode(file);
718 LASSERT(!file->private_data);
725 rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
730 file->private_data = fd;
731 ll_readahead_init(inode, &fd->fd_ras);
732 fd->fd_omode = it->it_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
733 /* turn off the kernel's read-ahead */
734 file->f_ra.ra_pages = 0;
736 /* ll_cl_context initialize */
737 rwlock_init(&fd->fd_lock);
738 INIT_LIST_HEAD(&fd->fd_lccs);
743 /* Open a file, and (for the very first open) create objects on the OSTs at
744 * this time. If opened with O_LOV_DELAY_CREATE, then we don't do the object
745 * creation or open until ll_lov_setstripe() ioctl is called.
747 * If we already have the stripe MD locally then we don't request it in
748 * md_open(), by passing a lmm_size = 0.
750 * It is up to the application to ensure no other processes open this file
751 * in the O_LOV_DELAY_CREATE case, or the default striping pattern will be
752 * used. We might be able to avoid races of that sort by getting lli_open_sem
753 * before returning in the O_LOV_DELAY_CREATE case and dropping it here
754 * or in ll_file_release(), but I'm not sure that is desirable/necessary.
756 int ll_file_open(struct inode *inode, struct file *file)
758 struct ll_inode_info *lli = ll_i2info(inode);
759 struct lookup_intent *it, oit = { .it_op = IT_OPEN,
760 .it_flags = file->f_flags };
761 struct obd_client_handle **och_p = NULL;
762 __u64 *och_usecount = NULL;
763 struct ll_file_data *fd;
764 ktime_t kstart = ktime_get();
768 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), flags %o\n",
769 PFID(ll_inode2fid(inode)), inode, file->f_flags);
771 it = file->private_data; /* XXX: compat macro */
772 file->private_data = NULL; /* prevent ll_local_open assertion */
774 if (S_ISREG(inode->i_mode)) {
775 rc = llcrypt_file_open(inode, file);
777 GOTO(out_nofiledata, rc);
780 fd = ll_file_data_get();
782 GOTO(out_nofiledata, rc = -ENOMEM);
785 if (S_ISDIR(inode->i_mode))
786 ll_authorize_statahead(inode, fd);
788 if (is_root_inode(inode)) {
789 file->private_data = fd;
793 if (!it || !it->it_disposition) {
794 /* Convert f_flags into access mode. We cannot use file->f_mode,
795 * because everything but O_ACCMODE mask was stripped from
797 if ((oit.it_flags + 1) & O_ACCMODE)
799 if (file->f_flags & O_TRUNC)
800 oit.it_flags |= FMODE_WRITE;
802 /* kernel only call f_op->open in dentry_open. filp_open calls
803 * dentry_open after call to open_namei that checks permissions.
804 * Only nfsd_open call dentry_open directly without checking
805 * permissions and because of that this code below is safe.
807 if (oit.it_flags & (FMODE_WRITE | FMODE_READ))
808 oit.it_flags |= MDS_OPEN_OWNEROVERRIDE;
810 /* We do not want O_EXCL here, presumably we opened the file
811 * already? XXX - NFS implications? */
812 oit.it_flags &= ~O_EXCL;
814 /* bug20584, if "it_flags" contains O_CREAT, the file will be
815 * created if necessary, then "IT_CREAT" should be set to keep
816 * consistent with it */
817 if (oit.it_flags & O_CREAT)
818 oit.it_op |= IT_CREAT;
824 /* Let's see if we have file open on MDS already. */
825 if (it->it_flags & FMODE_WRITE) {
826 och_p = &lli->lli_mds_write_och;
827 och_usecount = &lli->lli_open_fd_write_count;
828 } else if (it->it_flags & FMODE_EXEC) {
829 och_p = &lli->lli_mds_exec_och;
830 och_usecount = &lli->lli_open_fd_exec_count;
832 och_p = &lli->lli_mds_read_och;
833 och_usecount = &lli->lli_open_fd_read_count;
836 mutex_lock(&lli->lli_och_mutex);
837 if (*och_p) { /* Open handle is present */
838 if (it_disposition(it, DISP_OPEN_OPEN)) {
839 /* Well, there's extra open request that we do not need,
840 * let's close it somehow. This will decref request. */
841 rc = it_open_error(DISP_OPEN_OPEN, it);
843 mutex_unlock(&lli->lli_och_mutex);
844 GOTO(out_openerr, rc);
847 ll_release_openhandle(file_dentry(file), it);
851 rc = ll_local_open(file, it, fd, NULL);
854 mutex_unlock(&lli->lli_och_mutex);
855 GOTO(out_openerr, rc);
858 LASSERT(*och_usecount == 0);
859 if (!it->it_disposition) {
860 struct dentry *dentry = file_dentry(file);
861 struct ll_dentry_data *ldd;
863 /* We cannot just request lock handle now, new ELC code
864 * means that one of other OPEN locks for this file
865 * could be cancelled, and since blocking ast handler
866 * would attempt to grab och_mutex as well, that would
867 * result in a deadlock
869 mutex_unlock(&lli->lli_och_mutex);
871 * Normally called under two situations:
873 * 2. A race/condition on MDS resulting in no open
874 * handle to be returned from LOOKUP|OPEN request,
875 * for example if the target entry was a symlink.
877 * Only fetch MDS_OPEN_LOCK if this is in NFS path,
878 * marked by a bit set in ll_iget_for_nfs. Clear the
879 * bit so that it's not confusing later callers.
881 * NB; when ldd is NULL, it must have come via normal
882 * lookup path only, since ll_iget_for_nfs always calls
885 ldd = ll_d2d(dentry);
886 if (ldd && ldd->lld_nfs_dentry) {
887 ldd->lld_nfs_dentry = 0;
888 if (!filename_is_volatile(dentry->d_name.name,
891 it->it_flags |= MDS_OPEN_LOCK;
895 * Always specify MDS_OPEN_BY_FID because we don't want
896 * to get file with different fid.
898 it->it_flags |= MDS_OPEN_BY_FID;
899 rc = ll_intent_file_open(dentry, NULL, 0, it);
901 GOTO(out_openerr, rc);
905 OBD_ALLOC(*och_p, sizeof(struct obd_client_handle));
907 GOTO(out_och_free, rc = -ENOMEM);
911 /* md_intent_lock() didn't get a request ref if there was an
912 * open error, so don't do cleanup on the request here
914 /* XXX (green): Should not we bail out on any error here, not
915 * just open error? */
916 rc = it_open_error(DISP_OPEN_OPEN, it);
918 GOTO(out_och_free, rc);
920 LASSERTF(it_disposition(it, DISP_ENQ_OPEN_REF),
921 "inode %p: disposition %x, status %d\n", inode,
922 it_disposition(it, ~0), it->it_status);
924 rc = ll_local_open(file, it, fd, *och_p);
926 GOTO(out_och_free, rc);
929 rc = pcc_file_open(inode, file);
931 GOTO(out_och_free, rc);
933 mutex_unlock(&lli->lli_och_mutex);
937 /* Must do this outside lli_och_mutex lock to prevent deadlock where
938 different kind of OPEN lock for this same inode gets cancelled
939 by ldlm_cancel_lru */
940 if (!S_ISREG(inode->i_mode))
941 GOTO(out_och_free, rc);
942 cl_lov_delay_create_clear(&file->f_flags);
943 GOTO(out_och_free, rc);
947 if (och_p && *och_p) {
948 OBD_FREE(*och_p, sizeof(struct obd_client_handle));
949 *och_p = NULL; /* OBD_FREE writes some magic there */
952 mutex_unlock(&lli->lli_och_mutex);
955 if (lli->lli_opendir_key == fd)
956 ll_deauthorize_statahead(inode, fd);
959 ll_file_data_put(fd);
961 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN,
962 ktime_us_delta(ktime_get(), kstart));
966 if (it && it_disposition(it, DISP_ENQ_OPEN_REF)) {
967 ptlrpc_req_finished(it->it_request);
968 it_clear_disposition(it, DISP_ENQ_OPEN_REF);
974 static int ll_md_blocking_lease_ast(struct ldlm_lock *lock,
975 struct ldlm_lock_desc *desc, void *data, int flag)
978 struct lustre_handle lockh;
982 case LDLM_CB_BLOCKING:
983 ldlm_lock2handle(lock, &lockh);
984 rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
986 CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
990 case LDLM_CB_CANCELING:
998 * When setting a lease on a file, we take ownership of the lli_mds_*_och
999 * and save it as fd->fd_och so as to force client to reopen the file even
1000 * if it has an open lock in cache already.
1002 static int ll_lease_och_acquire(struct inode *inode, struct file *file,
1003 struct lustre_handle *old_open_handle)
1005 struct ll_inode_info *lli = ll_i2info(inode);
1006 struct ll_file_data *fd = file->private_data;
1007 struct obd_client_handle **och_p;
1008 __u64 *och_usecount;
1012 /* Get the openhandle of the file */
1013 mutex_lock(&lli->lli_och_mutex);
1014 if (fd->fd_lease_och != NULL)
1015 GOTO(out_unlock, rc = -EBUSY);
1017 if (fd->fd_och == NULL) {
1018 if (file->f_mode & FMODE_WRITE) {
1019 LASSERT(lli->lli_mds_write_och != NULL);
1020 och_p = &lli->lli_mds_write_och;
1021 och_usecount = &lli->lli_open_fd_write_count;
1023 LASSERT(lli->lli_mds_read_och != NULL);
1024 och_p = &lli->lli_mds_read_och;
1025 och_usecount = &lli->lli_open_fd_read_count;
1028 if (*och_usecount > 1)
1029 GOTO(out_unlock, rc = -EBUSY);
1031 fd->fd_och = *och_p;
1036 *old_open_handle = fd->fd_och->och_open_handle;
1040 mutex_unlock(&lli->lli_och_mutex);
1045 * Release ownership on lli_mds_*_och when putting back a file lease.
1047 static int ll_lease_och_release(struct inode *inode, struct file *file)
1049 struct ll_inode_info *lli = ll_i2info(inode);
1050 struct ll_file_data *fd = file->private_data;
1051 struct obd_client_handle **och_p;
1052 struct obd_client_handle *old_och = NULL;
1053 __u64 *och_usecount;
1057 mutex_lock(&lli->lli_och_mutex);
1058 if (file->f_mode & FMODE_WRITE) {
1059 och_p = &lli->lli_mds_write_och;
1060 och_usecount = &lli->lli_open_fd_write_count;
1062 och_p = &lli->lli_mds_read_och;
1063 och_usecount = &lli->lli_open_fd_read_count;
1066 /* The file may have been open by another process (broken lease) so
1067 * *och_p is not NULL. In this case we should simply increase usecount
1070 if (*och_p != NULL) {
1071 old_och = fd->fd_och;
1074 *och_p = fd->fd_och;
1078 mutex_unlock(&lli->lli_och_mutex);
1080 if (old_och != NULL)
1081 rc = ll_close_inode_openhandle(inode, old_och, 0, NULL);
1087 * Acquire a lease and open the file.
1089 static struct obd_client_handle *
1090 ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
1093 struct lookup_intent it = { .it_op = IT_OPEN };
1094 struct ll_sb_info *sbi = ll_i2sbi(inode);
1095 struct md_op_data *op_data;
1096 struct ptlrpc_request *req = NULL;
1097 struct lustre_handle old_open_handle = { 0 };
1098 struct obd_client_handle *och = NULL;
1103 if (fmode != FMODE_WRITE && fmode != FMODE_READ)
1104 RETURN(ERR_PTR(-EINVAL));
1107 if (!(fmode & file->f_mode) || (file->f_mode & FMODE_EXEC))
1108 RETURN(ERR_PTR(-EPERM));
1110 rc = ll_lease_och_acquire(inode, file, &old_open_handle);
1112 RETURN(ERR_PTR(rc));
1117 RETURN(ERR_PTR(-ENOMEM));
1119 op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0,
1120 LUSTRE_OPC_ANY, NULL);
1121 if (IS_ERR(op_data))
1122 GOTO(out, rc = PTR_ERR(op_data));
1124 /* To tell the MDT this openhandle is from the same owner */
1125 op_data->op_open_handle = old_open_handle;
1127 it.it_flags = fmode | open_flags;
1128 it.it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID | MDS_OPEN_LEASE;
1129 rc = md_intent_lock(sbi->ll_md_exp, op_data, &it, &req,
1130 &ll_md_blocking_lease_ast,
1131 /* LDLM_FL_NO_LRU: To not put the lease lock into LRU list, otherwise
1132 * it can be cancelled which may mislead applications that the lease is
1134 * LDLM_FL_EXCL: Set this flag so that it won't be matched by normal
1135 * open in ll_md_blocking_ast(). Otherwise as ll_md_blocking_lease_ast
1136 * doesn't deal with openhandle, so normal openhandle will be leaked. */
1137 LDLM_FL_NO_LRU | LDLM_FL_EXCL);
1138 ll_finish_md_op_data(op_data);
1139 ptlrpc_req_finished(req);
1141 GOTO(out_release_it, rc);
1143 if (it_disposition(&it, DISP_LOOKUP_NEG))
1144 GOTO(out_release_it, rc = -ENOENT);
1146 rc = it_open_error(DISP_OPEN_OPEN, &it);
1148 GOTO(out_release_it, rc);
1150 LASSERT(it_disposition(&it, DISP_ENQ_OPEN_REF));
1151 rc = ll_och_fill(sbi->ll_md_exp, &it, och);
1153 GOTO(out_release_it, rc);
1155 if (!it_disposition(&it, DISP_OPEN_LEASE)) /* old server? */
1156 GOTO(out_close, rc = -EOPNOTSUPP);
1158 /* already get lease, handle lease lock */
1159 ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
1160 if (!it.it_lock_mode ||
1161 !(it.it_lock_bits & MDS_INODELOCK_OPEN)) {
1162 /* open lock must return for lease */
1163 CERROR(DFID "lease granted but no open lock, %d/%llu.\n",
1164 PFID(ll_inode2fid(inode)), it.it_lock_mode,
1166 GOTO(out_close, rc = -EPROTO);
1169 ll_intent_release(&it);
1173 /* Cancel open lock */
1174 if (it.it_lock_mode != 0) {
1175 ldlm_lock_decref_and_cancel(&och->och_lease_handle,
1177 it.it_lock_mode = 0;
1178 och->och_lease_handle.cookie = 0ULL;
1180 rc2 = ll_close_inode_openhandle(inode, och, 0, NULL);
1182 CERROR("%s: error closing file "DFID": %d\n",
1183 sbi->ll_fsname, PFID(&ll_i2info(inode)->lli_fid), rc2);
1184 och = NULL; /* och has been freed in ll_close_inode_openhandle() */
1186 ll_intent_release(&it);
1190 RETURN(ERR_PTR(rc));
1194 * Check whether a layout swap can be done between two inodes.
1196 * \param[in] inode1 First inode to check
1197 * \param[in] inode2 Second inode to check
1199 * \retval 0 on success, layout swap can be performed between both inodes
1200 * \retval negative error code if requirements are not met
1202 static int ll_check_swap_layouts_validity(struct inode *inode1,
1203 struct inode *inode2)
1205 if (!S_ISREG(inode1->i_mode) || !S_ISREG(inode2->i_mode))
1208 if (inode_permission(inode1, MAY_WRITE) ||
1209 inode_permission(inode2, MAY_WRITE))
1212 if (inode1->i_sb != inode2->i_sb)
1218 static int ll_swap_layouts_close(struct obd_client_handle *och,
1219 struct inode *inode, struct inode *inode2)
1221 const struct lu_fid *fid1 = ll_inode2fid(inode);
1222 const struct lu_fid *fid2;
1226 CDEBUG(D_INODE, "%s: biased close of file "DFID"\n",
1227 ll_i2sbi(inode)->ll_fsname, PFID(fid1));
1229 rc = ll_check_swap_layouts_validity(inode, inode2);
1231 GOTO(out_free_och, rc);
1233 /* We now know that inode2 is a lustre inode */
1234 fid2 = ll_inode2fid(inode2);
1236 rc = lu_fid_cmp(fid1, fid2);
1238 GOTO(out_free_och, rc = -EINVAL);
1240 /* Close the file and {swap,merge} layouts between inode & inode2.
1241 * NB: lease lock handle is released in mdc_close_layout_swap_pack()
1242 * because we still need it to pack l_remote_handle to MDT. */
1243 rc = ll_close_inode_openhandle(inode, och, MDS_CLOSE_LAYOUT_SWAP,
1246 och = NULL; /* freed in ll_close_inode_openhandle() */
1256 * Release lease and close the file.
1257 * It will check if the lease has ever broken.
1259 static int ll_lease_close_intent(struct obd_client_handle *och,
1260 struct inode *inode,
1261 bool *lease_broken, enum mds_op_bias bias,
1264 struct ldlm_lock *lock;
1265 bool cancelled = true;
1269 lock = ldlm_handle2lock(&och->och_lease_handle);
1271 lock_res_and_lock(lock);
1272 cancelled = ldlm_is_cancel(lock);
1273 unlock_res_and_lock(lock);
1274 LDLM_LOCK_PUT(lock);
1277 CDEBUG(D_INODE, "lease for "DFID" broken? %d, bias: %x\n",
1278 PFID(&ll_i2info(inode)->lli_fid), cancelled, bias);
1280 if (lease_broken != NULL)
1281 *lease_broken = cancelled;
1283 if (!cancelled && !bias)
1284 ldlm_cli_cancel(&och->och_lease_handle, 0);
1286 if (cancelled) { /* no need to excute intent */
1291 rc = ll_close_inode_openhandle(inode, och, bias, data);
1295 static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
1298 return ll_lease_close_intent(och, inode, lease_broken, 0, NULL);
1302 * After lease is taken, send the RPC MDS_REINT_RESYNC to the MDT
1304 static int ll_lease_file_resync(struct obd_client_handle *och,
1305 struct inode *inode, unsigned long arg)
1307 struct ll_sb_info *sbi = ll_i2sbi(inode);
1308 struct md_op_data *op_data;
1309 struct ll_ioc_lease_id ioc;
1310 __u64 data_version_unused;
1314 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1315 LUSTRE_OPC_ANY, NULL);
1316 if (IS_ERR(op_data))
1317 RETURN(PTR_ERR(op_data));
1319 if (copy_from_user(&ioc, (struct ll_ioc_lease_id __user *)arg,
1323 /* before starting file resync, it's necessary to clean up page cache
1324 * in client memory, otherwise once the layout version is increased,
1325 * writing back cached data will be denied the OSTs. */
1326 rc = ll_data_version(inode, &data_version_unused, LL_DV_WR_FLUSH);
1330 op_data->op_lease_handle = och->och_lease_handle;
1331 op_data->op_mirror_id = ioc.lil_mirror_id;
1332 rc = md_file_resync(sbi->ll_md_exp, op_data);
1338 ll_finish_md_op_data(op_data);
1342 int ll_merge_attr(const struct lu_env *env, struct inode *inode)
1344 struct ll_inode_info *lli = ll_i2info(inode);
1345 struct cl_object *obj = lli->lli_clob;
1346 struct cl_attr *attr = vvp_env_thread_attr(env);
1354 ll_inode_size_lock(inode);
1356 /* Merge timestamps the most recently obtained from MDS with
1357 * timestamps obtained from OSTs.
1359 * Do not overwrite atime of inode because it may be refreshed
1360 * by file_accessed() function. If the read was served by cache
1361 * data, there is no RPC to be sent so that atime may not be
1362 * transferred to OSTs at all. MDT only updates atime at close time
1363 * if it's at least 'mdd.*.atime_diff' older.
1364 * All in all, the atime in Lustre does not strictly comply with
1365 * POSIX. Solving this problem needs to send an RPC to MDT for each
1366 * read, this will hurt performance.
1368 if (test_and_clear_bit(LLIF_UPDATE_ATIME, &lli->lli_flags) ||
1369 inode->i_atime.tv_sec < lli->lli_atime)
1370 inode->i_atime.tv_sec = lli->lli_atime;
1372 inode->i_mtime.tv_sec = lli->lli_mtime;
1373 inode->i_ctime.tv_sec = lli->lli_ctime;
1375 mtime = inode->i_mtime.tv_sec;
1376 atime = inode->i_atime.tv_sec;
1377 ctime = inode->i_ctime.tv_sec;
1379 cl_object_attr_lock(obj);
1380 if (OBD_FAIL_CHECK(OBD_FAIL_MDC_MERGE))
1383 rc = cl_object_attr_get(env, obj, attr);
1384 cl_object_attr_unlock(obj);
1387 GOTO(out_size_unlock, rc = (rc == -ENODATA ? 0 : rc));
1389 if (atime < attr->cat_atime)
1390 atime = attr->cat_atime;
1392 if (ctime < attr->cat_ctime)
1393 ctime = attr->cat_ctime;
1395 if (mtime < attr->cat_mtime)
1396 mtime = attr->cat_mtime;
1398 CDEBUG(D_VFSTRACE, DFID" updating i_size %llu\n",
1399 PFID(&lli->lli_fid), attr->cat_size);
1401 i_size_write(inode, attr->cat_size);
1402 inode->i_blocks = attr->cat_blocks;
1404 inode->i_mtime.tv_sec = mtime;
1405 inode->i_atime.tv_sec = atime;
1406 inode->i_ctime.tv_sec = ctime;
1409 ll_inode_size_unlock(inode);
1415 * Set designated mirror for I/O.
1417 * So far only read, write, and truncated can support to issue I/O to
1418 * designated mirror.
1420 void ll_io_set_mirror(struct cl_io *io, const struct file *file)
1422 struct ll_file_data *fd = file->private_data;
1424 /* clear layout version for generic(non-resync) I/O in case it carries
1425 * stale layout version due to I/O restart */
1426 io->ci_layout_version = 0;
1428 /* FLR: disable non-delay for designated mirror I/O because obviously
1429 * only one mirror is available */
1430 if (fd->fd_designated_mirror > 0) {
1432 io->ci_designated_mirror = fd->fd_designated_mirror;
1433 io->ci_layout_version = fd->fd_layout_version;
1436 CDEBUG(D_VFSTRACE, "%s: desiginated mirror: %d\n",
1437 file->f_path.dentry->d_name.name, io->ci_designated_mirror);
1440 static bool file_is_noatime(const struct file *file)
1442 const struct vfsmount *mnt = file->f_path.mnt;
1443 const struct inode *inode = file_inode((struct file *)file);
1445 /* Adapted from file_accessed() and touch_atime().*/
1446 if (file->f_flags & O_NOATIME)
1449 if (inode->i_flags & S_NOATIME)
1452 if (IS_NOATIME(inode))
1455 if (mnt->mnt_flags & (MNT_NOATIME | MNT_READONLY))
1458 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1461 if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
1467 void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot,
1468 struct vvp_io_args *args)
1470 struct inode *inode = file_inode(file);
1471 struct ll_file_data *fd = file->private_data;
1473 io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK;
1474 io->ci_lock_no_expand = fd->ll_lock_no_expand;
1476 if (iot == CIT_WRITE) {
1477 io->u.ci_wr.wr_append = !!(file->f_flags & O_APPEND);
1478 io->u.ci_wr.wr_sync = !!(file->f_flags & O_SYNC ||
1479 file->f_flags & O_DIRECT ||
1481 #ifdef HAVE_GENERIC_WRITE_SYNC_2ARGS
1482 io->u.ci_wr.wr_sync |= !!(args &&
1483 (args->u.normal.via_iocb->ki_flags &
1488 io->ci_obj = ll_i2info(inode)->lli_clob;
1489 io->ci_lockreq = CILR_MAYBE;
1490 if (ll_file_nolock(file)) {
1491 io->ci_lockreq = CILR_NEVER;
1492 io->ci_no_srvlock = 1;
1493 } else if (file->f_flags & O_APPEND) {
1494 io->ci_lockreq = CILR_MANDATORY;
1496 io->ci_noatime = file_is_noatime(file);
1497 io->ci_async_readahead = false;
1499 /* FLR: only use non-delay I/O for read as there is only one
1500 * avaliable mirror for write. */
1501 io->ci_ndelay = !(iot == CIT_WRITE);
1503 ll_io_set_mirror(io, file);
1506 static void ll_heat_add(struct inode *inode, enum cl_io_type iot,
1509 struct ll_inode_info *lli = ll_i2info(inode);
1510 struct ll_sb_info *sbi = ll_i2sbi(inode);
1511 enum obd_heat_type sample_type;
1512 enum obd_heat_type iobyte_type;
1513 __u64 now = ktime_get_real_seconds();
1515 if (!ll_sbi_has_file_heat(sbi) ||
1516 lli->lli_heat_flags & LU_HEAT_FLAG_OFF)
1519 if (iot == CIT_READ) {
1520 sample_type = OBD_HEAT_READSAMPLE;
1521 iobyte_type = OBD_HEAT_READBYTE;
1522 } else if (iot == CIT_WRITE) {
1523 sample_type = OBD_HEAT_WRITESAMPLE;
1524 iobyte_type = OBD_HEAT_WRITEBYTE;
1529 spin_lock(&lli->lli_heat_lock);
1530 obd_heat_add(&lli->lli_heat_instances[sample_type], now, 1,
1531 sbi->ll_heat_decay_weight, sbi->ll_heat_period_second);
1532 obd_heat_add(&lli->lli_heat_instances[iobyte_type], now, count,
1533 sbi->ll_heat_decay_weight, sbi->ll_heat_period_second);
1534 spin_unlock(&lli->lli_heat_lock);
1538 ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
1539 struct file *file, enum cl_io_type iot,
1540 loff_t *ppos, size_t count)
1542 struct vvp_io *vio = vvp_env_io(env);
1543 struct inode *inode = file_inode(file);
1544 struct ll_inode_info *lli = ll_i2info(inode);
1545 struct ll_sb_info *sbi = ll_i2sbi(inode);
1546 struct ll_file_data *fd = file->private_data;
1547 struct range_lock range;
1551 unsigned int retried = 0, dio_lock = 0;
1552 bool is_aio = false;
1553 struct cl_dio_aio *ci_aio = NULL;
1555 bool partial_io = false;
1556 size_t max_io_pages, max_cached_pages;
1560 CDEBUG(D_VFSTRACE, "%s: %s ppos: %llu, count: %zu\n",
1561 file_dentry(file)->d_name.name,
1562 iot == CIT_READ ? "read" : "write", *ppos, count);
1564 max_io_pages = PTLRPC_MAX_BRW_PAGES * OBD_MAX_RIF_DEFAULT;
1565 max_cached_pages = sbi->ll_cache->ccc_lru_max;
1566 if (max_io_pages > (max_cached_pages >> 2))
1567 max_io_pages = max_cached_pages >> 2;
1569 io = vvp_env_thread_io(env);
1570 if (file->f_flags & O_DIRECT) {
1571 if (!is_sync_kiocb(args->u.normal.via_iocb))
1573 ci_aio = cl_aio_alloc(args->u.normal.via_iocb);
1575 GOTO(out, rc = -ENOMEM);
1580 * IO block size need be aware of cached page limit, otherwise
1581 * if we have small max_cached_mb but large block IO issued, io
1582 * could not be finished and blocked whole client.
1584 if (file->f_flags & O_DIRECT)
1587 per_bytes = min(max_io_pages << PAGE_SHIFT, count);
1588 partial_io = per_bytes < count;
1589 io = vvp_env_thread_io(env);
1590 ll_io_init(io, file, iot, args);
1591 io->ci_aio = ci_aio;
1592 io->ci_dio_lock = dio_lock;
1593 io->ci_ndelay_tried = retried;
1595 if (cl_io_rw_init(env, io, iot, *ppos, per_bytes) == 0) {
1596 bool range_locked = false;
1598 if (file->f_flags & O_APPEND)
1599 range_lock_init(&range, 0, LUSTRE_EOF);
1601 range_lock_init(&range, *ppos, *ppos + per_bytes - 1);
1603 vio->vui_fd = file->private_data;
1604 vio->vui_iter = args->u.normal.via_iter;
1605 vio->vui_iocb = args->u.normal.via_iocb;
1606 /* Direct IO reads must also take range lock,
1607 * or multiple reads will try to work on the same pages
1608 * See LU-6227 for details.
1610 if (((iot == CIT_WRITE) ||
1611 (iot == CIT_READ && (file->f_flags & O_DIRECT))) &&
1612 !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
1613 CDEBUG(D_VFSTRACE, "Range lock "RL_FMT"\n",
1615 rc = range_lock(&lli->lli_write_tree, &range);
1619 range_locked = true;
1622 ll_cl_add(file, env, io, LCC_RW);
1623 rc = cl_io_loop(env, io);
1624 ll_cl_remove(file, env);
1627 CDEBUG(D_VFSTRACE, "Range unlock "RL_FMT"\n",
1629 range_unlock(&lli->lli_write_tree, &range);
1632 /* cl_io_rw_init() handled IO */
1637 * In order to move forward AIO, ci_nob was increased,
1638 * but that doesn't mean io have been finished, it just
1639 * means io have been submited, we will always return
1640 * EIOCBQUEUED to the caller, So we could only return
1641 * number of bytes in non-AIO case.
1643 if (io->ci_nob > 0) {
1645 result += io->ci_nob;
1646 *ppos = io->u.ci_wr.wr.crw_pos; /* for splice */
1648 count -= io->ci_nob;
1650 /* prepare IO restart */
1652 args->u.normal.via_iter = vio->vui_iter;
1656 * Reexpand iov count because it was zero
1659 iov_iter_reexpand(vio->vui_iter, count);
1660 if (per_bytes == io->ci_nob)
1661 io->ci_need_restart = 1;
1665 cl_io_fini(env, io);
1668 "%s: %d io complete with rc: %d, result: %zd, restart: %d\n",
1669 file->f_path.dentry->d_name.name,
1670 iot, rc, result, io->ci_need_restart);
1672 if ((rc == 0 || rc == -ENODATA || rc == -ENOLCK) &&
1673 count > 0 && io->ci_need_restart) {
1675 "%s: restart %s from %lld, count: %zu, ret: %zd, rc: %d\n",
1676 file_dentry(file)->d_name.name,
1677 iot == CIT_READ ? "read" : "write",
1678 *ppos, count, result, rc);
1679 /* preserve the tried count for FLR */
1680 retried = io->ci_ndelay_tried;
1681 dio_lock = io->ci_dio_lock;
1687 * VFS will call aio_complete() if no -EIOCBQUEUED
1688 * is returned for AIO, so we can not call aio_complete()
1691 if (rc != -EIOCBQUEUED)
1692 io->ci_aio->cda_no_aio_complete = 1;
1694 * Drop one extra reference so that end_io() could be
1695 * called for this IO context, we could call it after
1696 * we make sure all AIO requests have been proceed.
1698 cl_sync_io_note(env, &io->ci_aio->cda_sync,
1699 rc == -EIOCBQUEUED ? 0 : rc);
1701 cl_aio_free(io->ci_aio);
1706 if (iot == CIT_READ) {
1708 ll_stats_ops_tally(ll_i2sbi(inode),
1709 LPROC_LL_READ_BYTES, result);
1710 } else if (iot == CIT_WRITE) {
1712 ll_stats_ops_tally(ll_i2sbi(inode),
1713 LPROC_LL_WRITE_BYTES, result);
1714 fd->fd_write_failed = false;
1715 } else if (result == 0 && rc == 0) {
1718 fd->fd_write_failed = true;
1720 fd->fd_write_failed = false;
1721 } else if (rc != -ERESTARTSYS) {
1722 fd->fd_write_failed = true;
1726 CDEBUG(D_VFSTRACE, "iot: %d, result: %zd\n", iot, result);
1728 ll_heat_add(inode, iot, result);
1730 RETURN(result > 0 ? result : rc);
1734 * The purpose of fast read is to overcome per I/O overhead and improve IOPS
1735 * especially for small I/O.
1737 * To serve a read request, CLIO has to create and initialize a cl_io and
1738 * then request DLM lock. This has turned out to have siginificant overhead
1739 * and affects the performance of small I/O dramatically.
1741 * It's not necessary to create a cl_io for each I/O. Under the help of read
1742 * ahead, most of the pages being read are already in memory cache and we can
1743 * read those pages directly because if the pages exist, the corresponding DLM
1744 * lock must exist so that page content must be valid.
1746 * In fast read implementation, the llite speculatively finds and reads pages
1747 * in memory cache. There are three scenarios for fast read:
1748 * - If the page exists and is uptodate, kernel VM will provide the data and
1749 * CLIO won't be intervened;
1750 * - If the page was brought into memory by read ahead, it will be exported
1751 * and read ahead parameters will be updated;
1752 * - Otherwise the page is not in memory, we can't do fast read. Therefore,
1753 * it will go back and invoke normal read, i.e., a cl_io will be created
1754 * and DLM lock will be requested.
1756 * POSIX compliance: posix standard states that read is intended to be atomic.
1757 * Lustre read implementation is in line with Linux kernel read implementation
1758 * and neither of them complies with POSIX standard in this matter. Fast read
1759 * doesn't make the situation worse on single node but it may interleave write
1760 * results from multiple nodes due to short read handling in ll_file_aio_read().
1762 * \param env - lu_env
1763 * \param iocb - kiocb from kernel
1764 * \param iter - user space buffers where the data will be copied
1766 * \retval - number of bytes have been read, or error code if error occurred.
1769 ll_do_fast_read(struct kiocb *iocb, struct iov_iter *iter)
1773 if (!ll_sbi_has_fast_read(ll_i2sbi(file_inode(iocb->ki_filp))))
1776 /* NB: we can't do direct IO for fast read because it will need a lock
1777 * to make IO engine happy. */
1778 if (iocb->ki_filp->f_flags & O_DIRECT)
1781 result = generic_file_read_iter(iocb, iter);
1783 /* If the first page is not in cache, generic_file_aio_read() will be
1784 * returned with -ENODATA.
1785 * See corresponding code in ll_readpage(). */
1786 if (result == -ENODATA)
1790 ll_heat_add(file_inode(iocb->ki_filp), CIT_READ, result);
1791 ll_stats_ops_tally(ll_i2sbi(file_inode(iocb->ki_filp)),
1792 LPROC_LL_READ_BYTES, result);
1799 * Read from a file (through the page cache).
1801 static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1804 struct vvp_io_args *args;
1805 struct file *file = iocb->ki_filp;
1809 ktime_t kstart = ktime_get();
1812 if (!iov_iter_count(to))
1816 * Currently when PCC read failed, we do not fall back to the
1817 * normal read path, just return the error.
1818 * The resaon is that: for RW-PCC, the file data may be modified
1819 * in the PCC and inconsistent with the data on OSTs (or file
1820 * data has been removed from the Lustre file system), at this
1821 * time, fallback to the normal read path may read the wrong
1823 * TODO: for RO-PCC (readonly PCC), fall back to normal read
1824 * path: read data from data copy on OSTs.
1826 result = pcc_file_read_iter(iocb, to, &cached);
1830 ll_ras_enter(file, iocb->ki_pos, iov_iter_count(to));
1832 result = ll_do_fast_read(iocb, to);
1833 if (result < 0 || iov_iter_count(to) == 0)
1836 env = cl_env_get(&refcheck);
1838 return PTR_ERR(env);
1840 args = ll_env_args(env);
1841 args->u.normal.via_iter = to;
1842 args->u.normal.via_iocb = iocb;
1844 rc2 = ll_file_io_generic(env, args, file, CIT_READ,
1845 &iocb->ki_pos, iov_iter_count(to));
1848 else if (result == 0)
1851 cl_env_put(env, &refcheck);
1854 ll_rw_stats_tally(ll_i2sbi(file_inode(file)), current->pid,
1855 file->private_data, iocb->ki_pos, result,
1857 ll_stats_ops_tally(ll_i2sbi(file_inode(file)), LPROC_LL_READ,
1858 ktime_us_delta(ktime_get(), kstart));
1865 * Similar trick to ll_do_fast_read, this improves write speed for tiny writes.
1866 * If a page is already in the page cache and dirty (and some other things -
1867 * See ll_tiny_write_begin for the instantiation of these rules), then we can
1868 * write to it without doing a full I/O, because Lustre already knows about it
1869 * and will write it out. This saves a lot of processing time.
1871 * All writes here are within one page, so exclusion is handled by the page
1872 * lock on the vm page. We do not do tiny writes for writes which touch
1873 * multiple pages because it's very unlikely multiple sequential pages are
1874 * are already dirty.
1876 * We limit these to < PAGE_SIZE because PAGE_SIZE writes are relatively common
1877 * and are unlikely to be to already dirty pages.
1879 * Attribute updates are important here, we do them in ll_tiny_write_end.
1881 static ssize_t ll_do_tiny_write(struct kiocb *iocb, struct iov_iter *iter)
1883 ssize_t count = iov_iter_count(iter);
1884 struct file *file = iocb->ki_filp;
1885 struct inode *inode = file_inode(file);
1886 bool lock_inode = !IS_NOSEC(inode);
1891 /* Restrict writes to single page and < PAGE_SIZE. See comment at top
1892 * of function for why.
1894 if (count >= PAGE_SIZE ||
1895 (iocb->ki_pos & (PAGE_SIZE-1)) + count > PAGE_SIZE)
1898 if (unlikely(lock_inode))
1900 result = __generic_file_write_iter(iocb, iter);
1902 if (unlikely(lock_inode))
1903 inode_unlock(inode);
1905 /* If the page is not already dirty, ll_tiny_write_begin returns
1906 * -ENODATA. We continue on to normal write.
1908 if (result == -ENODATA)
1912 ll_heat_add(inode, CIT_WRITE, result);
1913 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_WRITE_BYTES,
1915 set_bit(LLIF_DATA_MODIFIED, &ll_i2info(inode)->lli_flags);
1918 CDEBUG(D_VFSTRACE, "result: %zu, original count %zu\n", result, count);
1924 * Write to a file (through the page cache).
1926 static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1928 struct vvp_io_args *args;
1930 ssize_t rc_tiny = 0, rc_normal;
1931 struct file *file = iocb->ki_filp;
1934 ktime_t kstart = ktime_get();
1939 if (!iov_iter_count(from))
1940 GOTO(out, rc_normal = 0);
1943 * When PCC write failed, we usually do not fall back to the normal
1944 * write path, just return the error. But there is a special case when
1945 * returned error code is -ENOSPC due to running out of space on PCC HSM
1946 * bakcend. At this time, it will fall back to normal I/O path and
1947 * retry the I/O. As the file is in HSM released state, it will restore
1948 * the file data to OSTs first and redo the write again. And the
1949 * restore process will revoke the layout lock and detach the file
1950 * from PCC cache automatically.
1952 result = pcc_file_write_iter(iocb, from, &cached);
1953 if (cached && result != -ENOSPC && result != -EDQUOT)
1954 GOTO(out, rc_normal = result);
1956 /* NB: we can't do direct IO for tiny writes because they use the page
1957 * cache, we can't do sync writes because tiny writes can't flush
1958 * pages, and we can't do append writes because we can't guarantee the
1959 * required DLM locks are held to protect file size.
1961 if (ll_sbi_has_tiny_write(ll_i2sbi(file_inode(file))) &&
1962 !(file->f_flags & (O_DIRECT | O_SYNC | O_APPEND)))
1963 rc_tiny = ll_do_tiny_write(iocb, from);
1965 /* In case of error, go on and try normal write - Only stop if tiny
1966 * write completed I/O.
1968 if (iov_iter_count(from) == 0)
1969 GOTO(out, rc_normal = rc_tiny);
1971 env = cl_env_get(&refcheck);
1973 return PTR_ERR(env);
1975 args = ll_env_args(env);
1976 args->u.normal.via_iter = from;
1977 args->u.normal.via_iocb = iocb;
1979 rc_normal = ll_file_io_generic(env, args, file, CIT_WRITE,
1980 &iocb->ki_pos, iov_iter_count(from));
1982 /* On success, combine bytes written. */
1983 if (rc_tiny >= 0 && rc_normal > 0)
1984 rc_normal += rc_tiny;
1985 /* On error, only return error from normal write if tiny write did not
1986 * write any bytes. Otherwise return bytes written by tiny write.
1988 else if (rc_tiny > 0)
1989 rc_normal = rc_tiny;
1991 cl_env_put(env, &refcheck);
1993 if (rc_normal > 0) {
1994 ll_rw_stats_tally(ll_i2sbi(file_inode(file)), current->pid,
1995 file->private_data, iocb->ki_pos,
1997 ll_stats_ops_tally(ll_i2sbi(file_inode(file)), LPROC_LL_WRITE,
1998 ktime_us_delta(ktime_get(), kstart));
2004 #ifndef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
2006 * XXX: exact copy from kernel code (__generic_file_aio_write_nolock)
2008 static int ll_file_get_iov_count(const struct iovec *iov,
2009 unsigned long *nr_segs, size_t *count,
2015 for (seg = 0; seg < *nr_segs; seg++) {
2016 const struct iovec *iv = &iov[seg];
2019 * If any segment has a negative length, or the cumulative
2020 * length ever wraps negative then return -EINVAL.
2023 if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
2025 if (access_ok(access_flags, iv->iov_base, iv->iov_len))
2030 cnt -= iv->iov_len; /* This segment is no good */
2037 static ssize_t ll_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
2038 unsigned long nr_segs, loff_t pos)
2045 result = ll_file_get_iov_count(iov, &nr_segs, &iov_count, VERIFY_READ);
2052 # ifdef HAVE_IOV_ITER_INIT_DIRECTION
2053 iov_iter_init(&to, READ, iov, nr_segs, iov_count);
2054 # else /* !HAVE_IOV_ITER_INIT_DIRECTION */
2055 iov_iter_init(&to, iov, nr_segs, iov_count, 0);
2056 # endif /* HAVE_IOV_ITER_INIT_DIRECTION */
2058 result = ll_file_read_iter(iocb, &to);
2063 static ssize_t ll_file_read(struct file *file, char __user *buf, size_t count,
2066 struct iovec iov = { .iov_base = buf, .iov_len = count };
2075 init_sync_kiocb(&kiocb, file);
2076 kiocb.ki_pos = *ppos;
2077 #ifdef HAVE_KIOCB_KI_LEFT
2078 kiocb.ki_left = count;
2079 #elif defined(HAVE_KI_NBYTES)
2080 kiocb.i_nbytes = count;
2083 result = ll_file_aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
2084 *ppos = kiocb.ki_pos;
2090 * Write to a file (through the page cache).
2093 static ssize_t ll_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2094 unsigned long nr_segs, loff_t pos)
2096 struct iov_iter from;
2101 result = ll_file_get_iov_count(iov, &nr_segs, &iov_count, VERIFY_WRITE);
2108 # ifdef HAVE_IOV_ITER_INIT_DIRECTION
2109 iov_iter_init(&from, WRITE, iov, nr_segs, iov_count);
2110 # else /* !HAVE_IOV_ITER_INIT_DIRECTION */
2111 iov_iter_init(&from, iov, nr_segs, iov_count, 0);
2112 # endif /* HAVE_IOV_ITER_INIT_DIRECTION */
2114 result = ll_file_write_iter(iocb, &from);
2119 static ssize_t ll_file_write(struct file *file, const char __user *buf,
2120 size_t count, loff_t *ppos)
2122 struct iovec iov = { .iov_base = (void __user *)buf,
2132 init_sync_kiocb(&kiocb, file);
2133 kiocb.ki_pos = *ppos;
2134 #ifdef HAVE_KIOCB_KI_LEFT
2135 kiocb.ki_left = count;
2136 #elif defined(HAVE_KI_NBYTES)
2137 kiocb.ki_nbytes = count;
2140 result = ll_file_aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
2141 *ppos = kiocb.ki_pos;
2145 #endif /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
2147 int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
2148 __u64 flags, struct lov_user_md *lum, int lum_size)
2150 struct lookup_intent oit = {
2152 .it_flags = flags | MDS_OPEN_BY_FID,
2157 if ((__swab32(lum->lmm_magic) & le32_to_cpu(LOV_MAGIC_MASK)) ==
2158 le32_to_cpu(LOV_MAGIC_MAGIC)) {
2159 /* this code will only exist for big-endian systems */
2160 lustre_swab_lov_user_md(lum, 0);
2163 ll_inode_size_lock(inode);
2164 rc = ll_intent_file_open(dentry, lum, lum_size, &oit);
2166 GOTO(out_unlock, rc);
2168 ll_release_openhandle(dentry, &oit);
2171 ll_inode_size_unlock(inode);
2172 ll_intent_release(&oit);
2177 int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
2178 struct lov_mds_md **lmmp, int *lmm_size,
2179 struct ptlrpc_request **request)
2181 struct ll_sb_info *sbi = ll_i2sbi(inode);
2182 struct mdt_body *body;
2183 struct lov_mds_md *lmm = NULL;
2184 struct ptlrpc_request *req = NULL;
2185 struct md_op_data *op_data;
2190 rc = ll_get_default_mdsize(sbi, &lmmsize);
2194 op_data = ll_prep_md_op_data(NULL, inode, NULL, filename,
2195 strlen(filename), lmmsize,
2196 LUSTRE_OPC_ANY, NULL);
2197 if (IS_ERR(op_data))
2198 RETURN(PTR_ERR(op_data));
2200 op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
2201 rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
2202 ll_finish_md_op_data(op_data);
2204 CDEBUG(D_INFO, "md_getattr_name failed "
2205 "on %s: rc %d\n", filename, rc);
2209 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2210 LASSERT(body != NULL); /* checked by mdc_getattr_name */
2212 lmmsize = body->mbo_eadatasize;
2214 if (!(body->mbo_valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
2216 GOTO(out, rc = -ENODATA);
2218 lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize);
2219 LASSERT(lmm != NULL);
2221 if (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1) &&
2222 lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3) &&
2223 lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_COMP_V1) &&
2224 lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_FOREIGN))
2225 GOTO(out, rc = -EPROTO);
2228 * This is coming from the MDS, so is probably in
2229 * little endian. We convert it to host endian before
2230 * passing it to userspace.
2232 if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) {
2233 int stripe_count = 0;
2235 if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) ||
2236 lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
2237 stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
2238 if (le32_to_cpu(lmm->lmm_pattern) &
2239 LOV_PATTERN_F_RELEASED)
2241 lustre_swab_lov_user_md((struct lov_user_md *)lmm, 0);
2243 /* if function called for directory - we should
2244 * avoid swab not existent lsm objects
2246 if (lmm->lmm_magic == LOV_MAGIC_V1 &&
2247 S_ISREG(body->mbo_mode))
2248 lustre_swab_lov_user_md_objects(
2249 ((struct lov_user_md_v1 *)lmm)->lmm_objects,
2251 else if (lmm->lmm_magic == LOV_MAGIC_V3 &&
2252 S_ISREG(body->mbo_mode))
2253 lustre_swab_lov_user_md_objects(
2254 ((struct lov_user_md_v3 *)lmm)->lmm_objects,
2256 } else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_COMP_V1)) {
2257 lustre_swab_lov_comp_md_v1(
2258 (struct lov_comp_md_v1 *)lmm);
2262 if (lmm->lmm_magic == LOV_MAGIC_COMP_V1) {
2263 struct lov_comp_md_v1 *comp_v1 = NULL;
2264 struct lov_comp_md_entry_v1 *ent;
2265 struct lov_user_md_v1 *v1;
2269 comp_v1 = (struct lov_comp_md_v1 *)lmm;
2270 /* Dump the striping information */
2271 for (; i < comp_v1->lcm_entry_count; i++) {
2272 ent = &comp_v1->lcm_entries[i];
2273 off = ent->lcme_offset;
2274 v1 = (struct lov_user_md_v1 *)((char *)lmm + off);
2276 "comp[%d]: stripe_count=%u, stripe_size=%u\n",
2277 i, v1->lmm_stripe_count, v1->lmm_stripe_size);
2281 * Return valid stripe_count and stripe_size instead of 0 for
2282 * DoM files to avoid divide-by-zero for older userspace that
2283 * calls this ioctl, e.g. lustre ADIO driver.
2285 if (lmm->lmm_stripe_count == 0)
2286 lmm->lmm_stripe_count = 1;
2287 if (lmm->lmm_stripe_size == 0) {
2288 /* Since the first component of the file data is placed
2289 * on the MDT for faster access, the stripe_size of the
2290 * second one is always that applications which are
2293 if (lmm->lmm_pattern == LOV_PATTERN_MDT)
2294 i = comp_v1->lcm_entry_count > 1 ? 1 : 0;
2296 i = comp_v1->lcm_entry_count > 1 ?
2297 comp_v1->lcm_entry_count - 1 : 0;
2298 ent = &comp_v1->lcm_entries[i];
2299 off = ent->lcme_offset;
2300 v1 = (struct lov_user_md_v1 *)((char *)lmm + off);
2301 lmm->lmm_stripe_size = v1->lmm_stripe_size;
2306 *lmm_size = lmmsize;
2311 static int ll_lov_setea(struct inode *inode, struct file *file,
2314 __u64 flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
2315 struct lov_user_md *lump;
2316 int lum_size = sizeof(struct lov_user_md) +
2317 sizeof(struct lov_user_ost_data);
2321 if (!capable(CAP_SYS_ADMIN))
2324 OBD_ALLOC_LARGE(lump, lum_size);
2328 if (copy_from_user(lump, arg, lum_size))
2329 GOTO(out_lump, rc = -EFAULT);
2331 rc = ll_lov_setstripe_ea_info(inode, file_dentry(file), flags, lump,
2333 cl_lov_delay_create_clear(&file->f_flags);
2336 OBD_FREE_LARGE(lump, lum_size);
2340 static int ll_file_getstripe(struct inode *inode, void __user *lum, size_t size)
2347 env = cl_env_get(&refcheck);
2349 RETURN(PTR_ERR(env));
2351 rc = cl_object_getstripe(env, ll_i2info(inode)->lli_clob, lum, size);
2352 cl_env_put(env, &refcheck);
2356 static int ll_lov_setstripe(struct inode *inode, struct file *file,
2359 struct lov_user_md __user *lum = (struct lov_user_md __user *)arg;
2360 struct lov_user_md *klum;
2362 __u64 flags = FMODE_WRITE;
2365 rc = ll_copy_user_md(lum, &klum);
2370 rc = ll_lov_setstripe_ea_info(inode, file_dentry(file), flags, klum,
2375 rc = put_user(0, &lum->lmm_stripe_count);
2379 rc = ll_layout_refresh(inode, &gen);
2383 rc = ll_file_getstripe(inode, arg, lum_size);
2384 if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode) &&
2385 ll_i2info(inode)->lli_clob) {
2386 struct iattr attr = { 0 };
2388 rc = cl_setattr_ost(ll_i2info(inode)->lli_clob, &attr,
2389 OP_XVALID_FLAGS, LUSTRE_ENCRYPT_FL);
2392 cl_lov_delay_create_clear(&file->f_flags);
2395 OBD_FREE_LARGE(klum, lum_size);
2401 ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
2403 struct ll_inode_info *lli = ll_i2info(inode);
2404 struct cl_object *obj = lli->lli_clob;
2405 struct ll_file_data *fd = file->private_data;
2406 struct ll_grouplock grouplock;
2411 CWARN("group id for group lock must not be 0\n");
2415 if (ll_file_nolock(file))
2416 RETURN(-EOPNOTSUPP);
2418 if (file->f_flags & O_NONBLOCK) {
2419 if (!mutex_trylock(&lli->lli_group_mutex))
2422 mutex_lock(&lli->lli_group_mutex);
2424 if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
2425 CWARN("group lock already existed with gid %lu\n",
2426 fd->fd_grouplock.lg_gid);
2427 GOTO(out, rc = -EINVAL);
2429 if (arg != lli->lli_group_gid && lli->lli_group_users != 0) {
2430 if (file->f_flags & O_NONBLOCK)
2431 GOTO(out, rc = -EAGAIN);
2432 mutex_unlock(&lli->lli_group_mutex);
2433 wait_var_event(&lli->lli_group_users, !lli->lli_group_users);
2434 GOTO(retry, rc = 0);
2436 LASSERT(fd->fd_grouplock.lg_lock == NULL);
2439 * XXX: group lock needs to protect all OST objects while PFL
2440 * can add new OST objects during the IO, so we'd instantiate
2441 * all OST objects before getting its group lock.
2446 struct cl_layout cl = {
2447 .cl_is_composite = false,
2449 struct lu_extent ext = {
2451 .e_end = OBD_OBJECT_EOF,
2454 env = cl_env_get(&refcheck);
2456 GOTO(out, rc = PTR_ERR(env));
2458 rc = cl_object_layout_get(env, obj, &cl);
2459 if (rc >= 0 && cl.cl_is_composite)
2460 rc = ll_layout_write_intent(inode, LAYOUT_INTENT_WRITE,
2463 cl_env_put(env, &refcheck);
2468 rc = cl_get_grouplock(ll_i2info(inode)->lli_clob,
2469 arg, (file->f_flags & O_NONBLOCK), &grouplock);
2474 fd->fd_flags |= LL_FILE_GROUP_LOCKED;
2475 fd->fd_grouplock = grouplock;
2476 if (lli->lli_group_users == 0)
2477 lli->lli_group_gid = grouplock.lg_gid;
2478 lli->lli_group_users++;
2480 CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
2482 mutex_unlock(&lli->lli_group_mutex);
2487 static int ll_put_grouplock(struct inode *inode, struct file *file,
2490 struct ll_inode_info *lli = ll_i2info(inode);
2491 struct ll_file_data *fd = file->private_data;
2492 struct ll_grouplock grouplock;
2496 mutex_lock(&lli->lli_group_mutex);
2497 if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
2498 CWARN("no group lock held\n");
2499 GOTO(out, rc = -EINVAL);
2502 LASSERT(fd->fd_grouplock.lg_lock != NULL);
2504 if (fd->fd_grouplock.lg_gid != arg) {
2505 CWARN("group lock %lu doesn't match current id %lu\n",
2506 arg, fd->fd_grouplock.lg_gid);
2507 GOTO(out, rc = -EINVAL);
2510 grouplock = fd->fd_grouplock;
2511 memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock));
2512 fd->fd_flags &= ~LL_FILE_GROUP_LOCKED;
2514 cl_put_grouplock(&grouplock);
2516 lli->lli_group_users--;
2517 if (lli->lli_group_users == 0) {
2518 lli->lli_group_gid = 0;
2519 wake_up_var(&lli->lli_group_users);
2521 CDEBUG(D_INFO, "group lock %lu released\n", arg);
2524 mutex_unlock(&lli->lli_group_mutex);
2530 * Close inode open handle
2532 * \param dentry [in] dentry which contains the inode
2533 * \param it [in,out] intent which contains open info and result
2536 * \retval <0 failure
2538 int ll_release_openhandle(struct dentry *dentry, struct lookup_intent *it)
2540 struct inode *inode = dentry->d_inode;
2541 struct obd_client_handle *och;
2547 /* Root ? Do nothing. */
2548 if (is_root_inode(inode))
2551 /* No open handle to close? Move away */
2552 if (!it_disposition(it, DISP_OPEN_OPEN))
2555 LASSERT(it_open_error(DISP_OPEN_OPEN, it) == 0);
2557 OBD_ALLOC(och, sizeof(*och));
2559 GOTO(out, rc = -ENOMEM);
2561 rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
2565 rc = ll_close_inode_openhandle(inode, och, 0, NULL);
2567 /* this one is in place of ll_file_open */
2568 if (it_disposition(it, DISP_ENQ_OPEN_REF)) {
2569 ptlrpc_req_finished(it->it_request);
2570 it_clear_disposition(it, DISP_ENQ_OPEN_REF);
2576 * Get size for inode for which FIEMAP mapping is requested.
2577 * Make the FIEMAP get_info call and returns the result.
2578 * \param fiemap kernel buffer to hold extens
2579 * \param num_bytes kernel buffer size
2581 static int ll_do_fiemap(struct inode *inode, struct fiemap *fiemap,
2587 struct ll_fiemap_info_key fmkey = { .lfik_name = KEY_FIEMAP, };
2590 /* Checks for fiemap flags */
2591 if (fiemap->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
2592 fiemap->fm_flags &= ~LUSTRE_FIEMAP_FLAGS_COMPAT;
2596 /* Check for FIEMAP_FLAG_SYNC */
2597 if (fiemap->fm_flags & FIEMAP_FLAG_SYNC) {
2598 rc = filemap_fdatawrite(inode->i_mapping);
2603 env = cl_env_get(&refcheck);
2605 RETURN(PTR_ERR(env));
2607 if (i_size_read(inode) == 0) {
2608 rc = ll_glimpse_size(inode);
2613 fmkey.lfik_oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
2614 obdo_from_inode(&fmkey.lfik_oa, inode, OBD_MD_FLSIZE);
2615 obdo_set_parent_fid(&fmkey.lfik_oa, &ll_i2info(inode)->lli_fid);
2617 /* If filesize is 0, then there would be no objects for mapping */
2618 if (fmkey.lfik_oa.o_size == 0) {
2619 fiemap->fm_mapped_extents = 0;
2623 fmkey.lfik_fiemap = *fiemap;
2625 rc = cl_object_fiemap(env, ll_i2info(inode)->lli_clob,
2626 &fmkey, fiemap, &num_bytes);
2628 cl_env_put(env, &refcheck);
2632 int ll_fid2path(struct inode *inode, void __user *arg)
2634 struct obd_export *exp = ll_i2mdexp(inode);
2635 const struct getinfo_fid2path __user *gfin = arg;
2637 struct getinfo_fid2path *gfout;
2643 if (!capable(CAP_DAC_READ_SEARCH) &&
2644 !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
2647 /* Only need to get the buflen */
2648 if (get_user(pathlen, &gfin->gf_pathlen))
2651 if (pathlen > PATH_MAX)
2654 outsize = sizeof(*gfout) + pathlen;
2655 OBD_ALLOC(gfout, outsize);
2659 if (copy_from_user(gfout, arg, sizeof(*gfout)))
2660 GOTO(gf_free, rc = -EFAULT);
2661 /* append root FID after gfout to let MDT know the root FID so that it
2662 * can lookup the correct path, this is mainly for fileset.
2663 * old server without fileset mount support will ignore this. */
2664 *gfout->gf_u.gf_root_fid = *ll_inode2fid(inode);
2666 /* Call mdc_iocontrol */
2667 rc = obd_iocontrol(OBD_IOC_FID2PATH, exp, outsize, gfout, NULL);
2671 if (copy_to_user(arg, gfout, outsize))
2675 OBD_FREE(gfout, outsize);
2680 ll_ioc_data_version(struct inode *inode, struct ioc_data_version *ioc)
2682 struct cl_object *obj = ll_i2info(inode)->lli_clob;
2690 ioc->idv_version = 0;
2691 ioc->idv_layout_version = UINT_MAX;
2693 /* If no file object initialized, we consider its version is 0. */
2697 env = cl_env_get(&refcheck);
2699 RETURN(PTR_ERR(env));
2701 io = vvp_env_thread_io(env);
2703 io->u.ci_data_version.dv_data_version = 0;
2704 io->u.ci_data_version.dv_layout_version = UINT_MAX;
2705 io->u.ci_data_version.dv_flags = ioc->idv_flags;
2708 if (cl_io_init(env, io, CIT_DATA_VERSION, io->ci_obj) == 0)
2709 result = cl_io_loop(env, io);
2711 result = io->ci_result;
2713 ioc->idv_version = io->u.ci_data_version.dv_data_version;
2714 ioc->idv_layout_version = io->u.ci_data_version.dv_layout_version;
2716 cl_io_fini(env, io);
2718 if (unlikely(io->ci_need_restart))
2721 cl_env_put(env, &refcheck);
2727 * Read the data_version for inode.
2729 * This value is computed using stripe object version on OST.
2730 * Version is computed using server side locking.
2732 * @param flags if do sync on the OST side;
2734 * LL_DV_RD_FLUSH: flush dirty pages, LCK_PR on OSTs
2735 * LL_DV_WR_FLUSH: drop all caching pages, LCK_PW on OSTs
2737 int ll_data_version(struct inode *inode, __u64 *data_version, int flags)
2739 struct ioc_data_version ioc = { .idv_flags = flags };
2742 rc = ll_ioc_data_version(inode, &ioc);
2744 *data_version = ioc.idv_version;
2750 * Trigger a HSM release request for the provided inode.
2752 int ll_hsm_release(struct inode *inode)
2755 struct obd_client_handle *och = NULL;
2756 __u64 data_version = 0;
2761 CDEBUG(D_INODE, "%s: Releasing file "DFID".\n",
2762 ll_i2sbi(inode)->ll_fsname,
2763 PFID(&ll_i2info(inode)->lli_fid));
2765 och = ll_lease_open(inode, NULL, FMODE_WRITE, MDS_OPEN_RELEASE);
2767 GOTO(out, rc = PTR_ERR(och));
2769 /* Grab latest data_version and [am]time values */
2770 rc = ll_data_version(inode, &data_version, LL_DV_WR_FLUSH);
2774 env = cl_env_get(&refcheck);
2776 GOTO(out, rc = PTR_ERR(env));
2778 rc = ll_merge_attr(env, inode);
2779 cl_env_put(env, &refcheck);
2781 /* If error happen, we have the wrong size for a file.
2787 /* Release the file.
2788 * NB: lease lock handle is released in mdc_hsm_release_pack() because
2789 * we still need it to pack l_remote_handle to MDT. */
2790 rc = ll_close_inode_openhandle(inode, och, MDS_HSM_RELEASE,
2796 if (och != NULL && !IS_ERR(och)) /* close the file */
2797 ll_lease_close(och, inode, NULL);
2802 struct ll_swap_stack {
2805 struct inode *inode1;
2806 struct inode *inode2;
2811 static int ll_swap_layouts(struct file *file1, struct file *file2,
2812 struct lustre_swap_layouts *lsl)
2814 struct mdc_swap_layouts msl;
2815 struct md_op_data *op_data;
2818 struct ll_swap_stack *llss = NULL;
2821 OBD_ALLOC_PTR(llss);
2825 llss->inode1 = file_inode(file1);
2826 llss->inode2 = file_inode(file2);
2828 rc = ll_check_swap_layouts_validity(llss->inode1, llss->inode2);
2832 /* we use 2 bool because it is easier to swap than 2 bits */
2833 if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV1)
2834 llss->check_dv1 = true;
2836 if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV2)
2837 llss->check_dv2 = true;
2839 /* we cannot use lsl->sl_dvX directly because we may swap them */
2840 llss->dv1 = lsl->sl_dv1;
2841 llss->dv2 = lsl->sl_dv2;
2843 rc = lu_fid_cmp(ll_inode2fid(llss->inode1), ll_inode2fid(llss->inode2));
2844 if (rc == 0) /* same file, done! */
2847 if (rc < 0) { /* sequentialize it */
2848 swap(llss->inode1, llss->inode2);
2850 swap(llss->dv1, llss->dv2);
2851 swap(llss->check_dv1, llss->check_dv2);
2855 if (gid != 0) { /* application asks to flush dirty cache */
2856 rc = ll_get_grouplock(llss->inode1, file1, gid);
2860 rc = ll_get_grouplock(llss->inode2, file2, gid);
2862 ll_put_grouplock(llss->inode1, file1, gid);
2867 /* ultimate check, before swaping the layouts we check if
2868 * dataversion has changed (if requested) */
2869 if (llss->check_dv1) {
2870 rc = ll_data_version(llss->inode1, &dv, 0);
2873 if (dv != llss->dv1)
2874 GOTO(putgl, rc = -EAGAIN);
2877 if (llss->check_dv2) {
2878 rc = ll_data_version(llss->inode2, &dv, 0);
2881 if (dv != llss->dv2)
2882 GOTO(putgl, rc = -EAGAIN);
2885 /* struct md_op_data is used to send the swap args to the mdt
2886 * only flags is missing, so we use struct mdc_swap_layouts
2887 * through the md_op_data->op_data */
2888 /* flags from user space have to be converted before they are send to
2889 * server, no flag is sent today, they are only used on the client */
2892 op_data = ll_prep_md_op_data(NULL, llss->inode1, llss->inode2, NULL, 0,
2893 0, LUSTRE_OPC_ANY, &msl);
2894 if (IS_ERR(op_data))
2895 GOTO(free, rc = PTR_ERR(op_data));
2897 rc = obd_iocontrol(LL_IOC_LOV_SWAP_LAYOUTS, ll_i2mdexp(llss->inode1),
2898 sizeof(*op_data), op_data, NULL);
2899 ll_finish_md_op_data(op_data);
2906 ll_put_grouplock(llss->inode2, file2, gid);
2907 ll_put_grouplock(llss->inode1, file1, gid);
2917 int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss)
2919 struct obd_export *exp = ll_i2mdexp(inode);
2920 struct md_op_data *op_data;
2924 /* Detect out-of range masks */
2925 if ((hss->hss_setmask | hss->hss_clearmask) & ~HSM_FLAGS_MASK)
2928 /* Non-root users are forbidden to set or clear flags which are
2929 * NOT defined in HSM_USER_MASK. */
2930 if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) &&
2931 !capable(CAP_SYS_ADMIN))
2934 if (!exp_connect_archive_id_array(exp)) {
2935 /* Detect out-of range archive id */
2936 if ((hss->hss_valid & HSS_ARCHIVE_ID) &&
2937 (hss->hss_archive_id > LL_HSM_ORIGIN_MAX_ARCHIVE))
2941 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
2942 LUSTRE_OPC_ANY, hss);
2943 if (IS_ERR(op_data))
2944 RETURN(PTR_ERR(op_data));
2946 rc = obd_iocontrol(LL_IOC_HSM_STATE_SET, exp, sizeof(*op_data),
2949 ll_finish_md_op_data(op_data);
2954 static int ll_hsm_import(struct inode *inode, struct file *file,
2955 struct hsm_user_import *hui)
2957 struct hsm_state_set *hss = NULL;
2958 struct iattr *attr = NULL;
2962 if (!S_ISREG(inode->i_mode))
2968 GOTO(out, rc = -ENOMEM);
2970 hss->hss_valid = HSS_SETMASK | HSS_ARCHIVE_ID;
2971 hss->hss_archive_id = hui->hui_archive_id;
2972 hss->hss_setmask = HS_ARCHIVED | HS_EXISTS | HS_RELEASED;
2973 rc = ll_hsm_state_set(inode, hss);
2977 OBD_ALLOC_PTR(attr);
2979 GOTO(out, rc = -ENOMEM);
2981 attr->ia_mode = hui->hui_mode & (S_IRWXU | S_IRWXG | S_IRWXO);
2982 attr->ia_mode |= S_IFREG;
2983 attr->ia_uid = make_kuid(&init_user_ns, hui->hui_uid);
2984 attr->ia_gid = make_kgid(&init_user_ns, hui->hui_gid);
2985 attr->ia_size = hui->hui_size;
2986 attr->ia_mtime.tv_sec = hui->hui_mtime;
2987 attr->ia_mtime.tv_nsec = hui->hui_mtime_ns;
2988 attr->ia_atime.tv_sec = hui->hui_atime;
2989 attr->ia_atime.tv_nsec = hui->hui_atime_ns;
2991 attr->ia_valid = ATTR_SIZE | ATTR_MODE | ATTR_FORCE |
2992 ATTR_UID | ATTR_GID |
2993 ATTR_MTIME | ATTR_MTIME_SET |
2994 ATTR_ATIME | ATTR_ATIME_SET;
2998 rc = ll_setattr_raw(file_dentry(file), attr, 0, true);
3002 inode_unlock(inode);
3014 static inline long ll_lease_type_from_fmode(fmode_t fmode)
3016 return ((fmode & FMODE_READ) ? LL_LEASE_RDLCK : 0) |
3017 ((fmode & FMODE_WRITE) ? LL_LEASE_WRLCK : 0);
3020 static int ll_file_futimes_3(struct file *file, const struct ll_futimes_3 *lfu)
3022 struct inode *inode = file_inode(file);
3024 .ia_valid = ATTR_ATIME | ATTR_ATIME_SET |
3025 ATTR_MTIME | ATTR_MTIME_SET |
3028 .tv_sec = lfu->lfu_atime_sec,
3029 .tv_nsec = lfu->lfu_atime_nsec,
3032 .tv_sec = lfu->lfu_mtime_sec,
3033 .tv_nsec = lfu->lfu_mtime_nsec,
3036 .tv_sec = lfu->lfu_ctime_sec,
3037 .tv_nsec = lfu->lfu_ctime_nsec,
3043 if (!capable(CAP_SYS_ADMIN))
3046 if (!S_ISREG(inode->i_mode))
3050 rc = ll_setattr_raw(file_dentry(file), &ia, OP_XVALID_CTIME_SET,
3052 inode_unlock(inode);
3057 static enum cl_lock_mode cl_mode_user_to_kernel(enum lock_mode_user mode)
3060 case MODE_READ_USER:
3062 case MODE_WRITE_USER:
3069 static const char *const user_lockname[] = LOCK_MODE_NAMES;
3071 /* Used to allow the upper layers of the client to request an LDLM lock
3072 * without doing an actual read or write.
3074 * Used for ladvise lockahead to manually request specific locks.
3076 * \param[in] file file this ladvise lock request is on
3077 * \param[in] ladvise ladvise struct describing this lock request
3079 * \retval 0 success, no detailed result available (sync requests
3080 * and requests sent to the server [not handled locally]
3081 * cannot return detailed results)
3082 * \retval LLA_RESULT_{SAME,DIFFERENT} - detailed result of the lock request,
3083 * see definitions for details.
3084 * \retval negative negative errno on error
3086 int ll_file_lock_ahead(struct file *file, struct llapi_lu_ladvise *ladvise)
3088 struct lu_env *env = NULL;
3089 struct cl_io *io = NULL;
3090 struct cl_lock *lock = NULL;
3091 struct cl_lock_descr *descr = NULL;
3092 struct dentry *dentry = file->f_path.dentry;
3093 struct inode *inode = dentry->d_inode;
3094 enum cl_lock_mode cl_mode;
3095 off_t start = ladvise->lla_start;
3096 off_t end = ladvise->lla_end;
3103 "Lock request: file=%pd, inode=%p, mode=%s start=%llu, end=%llu\n",
3104 dentry, dentry->d_inode,
3105 user_lockname[ladvise->lla_lockahead_mode], (__u64) start,
3108 cl_mode = cl_mode_user_to_kernel(ladvise->lla_lockahead_mode);
3110 GOTO(out, result = cl_mode);
3112 /* Get IO environment */
3113 result = cl_io_get(inode, &env, &io, &refcheck);
3117 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
3120 * nothing to do for this io. This currently happens when
3121 * stripe sub-object's are not yet created.
3123 result = io->ci_result;
3124 } else if (result == 0) {
3125 lock = vvp_env_lock(env);
3126 descr = &lock->cll_descr;
3128 descr->cld_obj = io->ci_obj;
3129 /* Convert byte offsets to pages */
3130 descr->cld_start = cl_index(io->ci_obj, start);
3131 descr->cld_end = cl_index(io->ci_obj, end);
3132 descr->cld_mode = cl_mode;
3133 /* CEF_MUST is used because we do not want to convert a
3134 * lockahead request to a lockless lock */
3135 descr->cld_enq_flags = CEF_MUST | CEF_LOCK_NO_EXPAND |
3138 if (ladvise->lla_peradvice_flags & LF_ASYNC)
3139 descr->cld_enq_flags |= CEF_SPECULATIVE;
3141 result = cl_lock_request(env, io, lock);
3143 /* On success, we need to release the lock */
3145 cl_lock_release(env, lock);
3147 cl_io_fini(env, io);
3148 cl_env_put(env, &refcheck);
3150 /* -ECANCELED indicates a matching lock with a different extent
3151 * was already present, and -EEXIST indicates a matching lock
3152 * on exactly the same extent was already present.
3153 * We convert them to positive values for userspace to make
3154 * recognizing true errors easier.
3155 * Note we can only return these detailed results on async requests,
3156 * as sync requests look the same as i/o requests for locking. */
3157 if (result == -ECANCELED)
3158 result = LLA_RESULT_DIFFERENT;
3159 else if (result == -EEXIST)
3160 result = LLA_RESULT_SAME;
3165 static const char *const ladvise_names[] = LU_LADVISE_NAMES;
3167 static int ll_ladvise_sanity(struct inode *inode,
3168 struct llapi_lu_ladvise *ladvise)
3170 struct ll_sb_info *sbi = ll_i2sbi(inode);
3171 enum lu_ladvise_type advice = ladvise->lla_advice;
3172 /* Note the peradvice flags is a 32 bit field, so per advice flags must
3173 * be in the first 32 bits of enum ladvise_flags */
3174 __u32 flags = ladvise->lla_peradvice_flags;
3175 /* 3 lines at 80 characters per line, should be plenty */
3178 if (advice > LU_LADVISE_MAX || advice == LU_LADVISE_INVALID) {
3181 "%s: advice with value '%d' not recognized, last supported advice is %s (value '%d'): rc = %d\n",
3182 sbi->ll_fsname, advice,
3183 ladvise_names[LU_LADVISE_MAX-1], LU_LADVISE_MAX-1, rc);
3187 /* Per-advice checks */
3189 case LU_LADVISE_LOCKNOEXPAND:
3190 if (flags & ~LF_LOCKNOEXPAND_MASK) {
3192 CDEBUG(D_VFSTRACE, "%s: Invalid flags (%x) for %s: "
3193 "rc = %d\n", sbi->ll_fsname, flags,
3194 ladvise_names[advice], rc);
3198 case LU_LADVISE_LOCKAHEAD:
3199 /* Currently only READ and WRITE modes can be requested */
3200 if (ladvise->lla_lockahead_mode >= MODE_MAX_USER ||
3201 ladvise->lla_lockahead_mode == 0) {
3203 CDEBUG(D_VFSTRACE, "%s: Invalid mode (%d) for %s: "
3204 "rc = %d\n", sbi->ll_fsname,
3205 ladvise->lla_lockahead_mode,
3206 ladvise_names[advice], rc);
3210 case LU_LADVISE_WILLREAD:
3211 case LU_LADVISE_DONTNEED:
3213 /* Note fall through above - These checks apply to all advices
3214 * except LOCKNOEXPAND */
3215 if (flags & ~LF_DEFAULT_MASK) {
3217 CDEBUG(D_VFSTRACE, "%s: Invalid flags (%x) for %s: "
3218 "rc = %d\n", sbi->ll_fsname, flags,
3219 ladvise_names[advice], rc);
3222 if (ladvise->lla_start >= ladvise->lla_end) {
3224 CDEBUG(D_VFSTRACE, "%s: Invalid range (%llu to %llu) "
3225 "for %s: rc = %d\n", sbi->ll_fsname,
3226 ladvise->lla_start, ladvise->lla_end,
3227 ladvise_names[advice], rc);
3239 * Give file access advices
3241 * The ladvise interface is similar to Linux fadvise() system call, except it
3242 * forwards the advices directly from Lustre client to server. The server side
3243 * codes will apply appropriate read-ahead and caching techniques for the
3244 * corresponding files.
3246 * A typical workload for ladvise is e.g. a bunch of different clients are
3247 * doing small random reads of a file, so prefetching pages into OSS cache
3248 * with big linear reads before the random IO is a net benefit. Fetching
3249 * all that data into each client cache with fadvise() may not be, due to
3250 * much more data being sent to the client.
3252 static int ll_ladvise(struct inode *inode, struct file *file, __u64 flags,
3253 struct llapi_lu_ladvise *ladvise)
3257 struct cl_ladvise_io *lio;
3262 env = cl_env_get(&refcheck);
3264 RETURN(PTR_ERR(env));
3266 io = vvp_env_thread_io(env);
3267 io->ci_obj = ll_i2info(inode)->lli_clob;
3269 /* initialize parameters for ladvise */
3270 lio = &io->u.ci_ladvise;
3271 lio->li_start = ladvise->lla_start;
3272 lio->li_end = ladvise->lla_end;
3273 lio->li_fid = ll_inode2fid(inode);
3274 lio->li_advice = ladvise->lla_advice;
3275 lio->li_flags = flags;
3277 if (cl_io_init(env, io, CIT_LADVISE, io->ci_obj) == 0)
3278 rc = cl_io_loop(env, io);
3282 cl_io_fini(env, io);
3283 cl_env_put(env, &refcheck);
3287 static int ll_lock_noexpand(struct file *file, int flags)
3289 struct ll_file_data *fd = file->private_data;
3291 fd->ll_lock_no_expand = !(flags & LF_UNSET);
3296 int ll_ioctl_fsgetxattr(struct inode *inode, unsigned int cmd,
3299 struct fsxattr fsxattr;
3301 if (copy_from_user(&fsxattr,
3302 (const struct fsxattr __user *)arg,
3306 fsxattr.fsx_xflags = ll_inode_flags_to_xflags(inode->i_flags);
3307 if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags))
3308 fsxattr.fsx_xflags |= FS_XFLAG_PROJINHERIT;
3309 fsxattr.fsx_projid = ll_i2info(inode)->lli_projid;
3310 if (copy_to_user((struct fsxattr __user *)arg,
3311 &fsxattr, sizeof(fsxattr)))
3317 int ll_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
3320 * Project Quota ID state is only allowed to change from within the init
3321 * namespace. Enforce that restriction only if we are trying to change
3322 * the quota ID state. Everything else is allowed in user namespaces.
3324 if (current_user_ns() == &init_user_ns)
3327 if (ll_i2info(inode)->lli_projid != fa->fsx_projid)
3330 if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags)) {
3331 if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
3334 if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
3341 int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd,
3344 struct md_op_data *op_data;
3345 struct ptlrpc_request *req = NULL;
3346 struct fsxattr fsxattr;
3347 struct cl_object *obj;
3348 unsigned int inode_flags;
3351 if (copy_from_user(&fsxattr,
3352 (const struct fsxattr __user *)arg,
3356 rc = ll_ioctl_check_project(inode, &fsxattr);
3360 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
3361 LUSTRE_OPC_ANY, NULL);
3362 if (IS_ERR(op_data))
3363 RETURN(PTR_ERR(op_data));
3365 inode_flags = ll_xflags_to_inode_flags(fsxattr.fsx_xflags);
3366 op_data->op_attr_flags = ll_inode_to_ext_flags(inode_flags);
3367 if (fsxattr.fsx_xflags & FS_XFLAG_PROJINHERIT)
3368 op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL;
3369 op_data->op_projid = fsxattr.fsx_projid;
3370 op_data->op_xvalid |= OP_XVALID_PROJID | OP_XVALID_FLAGS;
3371 rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data, NULL, 0, &req);
3372 ptlrpc_req_finished(req);
3374 GOTO(out_fsxattr, rc);
3375 ll_update_inode_flags(inode, op_data->op_attr_flags);
3377 /* Avoid OST RPC if this is only ioctl setting project inherit flag */
3378 if (fsxattr.fsx_xflags == 0 ||
3379 fsxattr.fsx_xflags == FS_XFLAG_PROJINHERIT)
3380 GOTO(out_fsxattr, rc);
3382 obj = ll_i2info(inode)->lli_clob;
3384 struct iattr attr = { 0 };
3386 rc = cl_setattr_ost(obj, &attr, OP_XVALID_FLAGS,
3387 fsxattr.fsx_xflags);
3391 ll_finish_md_op_data(op_data);
3395 static long ll_file_unlock_lease(struct file *file, struct ll_ioc_lease *ioc,
3398 struct inode *inode = file_inode(file);
3399 struct ll_file_data *fd = file->private_data;
3400 struct ll_inode_info *lli = ll_i2info(inode);
3401 struct obd_client_handle *och = NULL;
3402 struct split_param sp;
3403 struct pcc_param param;
3404 bool lease_broken = false;
3406 enum mds_op_bias bias = 0;
3407 struct file *layout_file = NULL;
3409 size_t data_size = 0;
3410 bool attached = false;
3415 mutex_lock(&lli->lli_och_mutex);
3416 if (fd->fd_lease_och != NULL) {
3417 och = fd->fd_lease_och;
3418 fd->fd_lease_och = NULL;
3420 mutex_unlock(&lli->lli_och_mutex);
3425 fmode = och->och_flags;
3427 switch (ioc->lil_flags) {
3428 case LL_LEASE_RESYNC_DONE:
3429 if (ioc->lil_count > IOC_IDS_MAX)
3430 GOTO(out_lease_close, rc = -EINVAL);
3432 data_size = offsetof(typeof(*ioc), lil_ids[ioc->lil_count]);
3433 OBD_ALLOC(data, data_size);
3435 GOTO(out_lease_close, rc = -ENOMEM);
3437 if (copy_from_user(data, (void __user *)arg, data_size))
3438 GOTO(out_lease_close, rc = -EFAULT);
3440 bias = MDS_CLOSE_RESYNC_DONE;
3442 case LL_LEASE_LAYOUT_MERGE: {
3445 if (ioc->lil_count != 1)
3446 GOTO(out_lease_close, rc = -EINVAL);
3448 arg += sizeof(*ioc);
3449 if (copy_from_user(&fd, (void __user *)arg, sizeof(__u32)))
3450 GOTO(out_lease_close, rc = -EFAULT);
3452 layout_file = fget(fd);
3454 GOTO(out_lease_close, rc = -EBADF);
3456 if ((file->f_flags & O_ACCMODE) == O_RDONLY ||
3457 (layout_file->f_flags & O_ACCMODE) == O_RDONLY)
3458 GOTO(out_lease_close, rc = -EPERM);
3460 data = file_inode(layout_file);
3461 bias = MDS_CLOSE_LAYOUT_MERGE;
3464 case LL_LEASE_LAYOUT_SPLIT: {
3468 if (ioc->lil_count != 2)
3469 GOTO(out_lease_close, rc = -EINVAL);
3471 arg += sizeof(*ioc);
3472 if (copy_from_user(&fdv, (void __user *)arg, sizeof(__u32)))
3473 GOTO(out_lease_close, rc = -EFAULT);
3475 arg += sizeof(__u32);
3476 if (copy_from_user(&mirror_id, (void __user *)arg,
3478 GOTO(out_lease_close, rc = -EFAULT);
3480 layout_file = fget(fdv);
3482 GOTO(out_lease_close, rc = -EBADF);
3484 /* if layout_file == file, it means to destroy the mirror */
3485 sp.sp_inode = file_inode(layout_file);
3486 sp.sp_mirror_id = (__u16)mirror_id;
3488 bias = MDS_CLOSE_LAYOUT_SPLIT;
3491 case LL_LEASE_PCC_ATTACH:
3492 if (ioc->lil_count != 1)
3495 arg += sizeof(*ioc);
3496 if (copy_from_user(¶m.pa_archive_id, (void __user *)arg,
3498 GOTO(out_lease_close, rc2 = -EFAULT);
3500 rc2 = pcc_readwrite_attach(file, inode, param.pa_archive_id);
3502 GOTO(out_lease_close, rc2);
3505 /* Grab latest data version */
3506 rc2 = ll_data_version(inode, ¶m.pa_data_version,
3509 GOTO(out_lease_close, rc2);
3512 bias = MDS_PCC_ATTACH;
3515 /* without close intent */
3520 rc = ll_lease_close_intent(och, inode, &lease_broken, bias, data);
3524 rc = ll_lease_och_release(inode, file);
3533 switch (ioc->lil_flags) {
3534 case LL_LEASE_RESYNC_DONE:
3536 OBD_FREE(data, data_size);
3538 case LL_LEASE_LAYOUT_MERGE:
3539 case LL_LEASE_LAYOUT_SPLIT:
3543 case LL_LEASE_PCC_ATTACH:
3546 rc = pcc_readwrite_attach_fini(file, inode,
3547 param.pa_layout_gen,
3554 rc = ll_lease_type_from_fmode(fmode);
3558 static long ll_file_set_lease(struct file *file, struct ll_ioc_lease *ioc,
3561 struct inode *inode = file_inode(file);
3562 struct ll_inode_info *lli = ll_i2info(inode);
3563 struct ll_file_data *fd = file->private_data;
3564 struct obd_client_handle *och = NULL;
3565 __u64 open_flags = 0;
3571 switch (ioc->lil_mode) {
3572 case LL_LEASE_WRLCK:
3573 if (!(file->f_mode & FMODE_WRITE))
3575 fmode = FMODE_WRITE;
3577 case LL_LEASE_RDLCK:
3578 if (!(file->f_mode & FMODE_READ))
3582 case LL_LEASE_UNLCK:
3583 RETURN(ll_file_unlock_lease(file, ioc, arg));
3588 CDEBUG(D_INODE, "Set lease with mode %u\n", fmode);
3590 /* apply for lease */
3591 if (ioc->lil_flags & LL_LEASE_RESYNC)
3592 open_flags = MDS_OPEN_RESYNC;
3593 och = ll_lease_open(inode, file, fmode, open_flags);
3595 RETURN(PTR_ERR(och));
3597 if (ioc->lil_flags & LL_LEASE_RESYNC) {
3598 rc = ll_lease_file_resync(och, inode, arg);
3600 ll_lease_close(och, inode, NULL);
3603 rc = ll_layout_refresh(inode, &fd->fd_layout_version);
3605 ll_lease_close(och, inode, NULL);
3611 mutex_lock(&lli->lli_och_mutex);
3612 if (fd->fd_lease_och == NULL) {
3613 fd->fd_lease_och = och;
3616 mutex_unlock(&lli->lli_och_mutex);
3618 /* impossible now that only excl is supported for now */
3619 ll_lease_close(och, inode, &lease_broken);
3625 static void ll_heat_get(struct inode *inode, struct lu_heat *heat)
3627 struct ll_inode_info *lli = ll_i2info(inode);
3628 struct ll_sb_info *sbi = ll_i2sbi(inode);
3629 __u64 now = ktime_get_real_seconds();
3632 spin_lock(&lli->lli_heat_lock);
3633 heat->lh_flags = lli->lli_heat_flags;
3634 for (i = 0; i < heat->lh_count; i++)
3635 heat->lh_heat[i] = obd_heat_get(&lli->lli_heat_instances[i],
3636 now, sbi->ll_heat_decay_weight,
3637 sbi->ll_heat_period_second);
3638 spin_unlock(&lli->lli_heat_lock);
3641 static int ll_heat_set(struct inode *inode, enum lu_heat_flag flags)
3643 struct ll_inode_info *lli = ll_i2info(inode);
3646 spin_lock(&lli->lli_heat_lock);
3647 if (flags & LU_HEAT_FLAG_CLEAR)
3648 obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
3650 if (flags & LU_HEAT_FLAG_OFF)
3651 lli->lli_heat_flags |= LU_HEAT_FLAG_OFF;
3653 lli->lli_heat_flags &= ~LU_HEAT_FLAG_OFF;
3655 spin_unlock(&lli->lli_heat_lock);
3661 ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3663 struct inode *inode = file_inode(file);
3664 struct ll_file_data *fd = file->private_data;
3668 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), cmd=%x\n",
3669 PFID(ll_inode2fid(inode)), inode, cmd);
3670 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
3672 /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
3673 if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
3677 case LL_IOC_GETFLAGS:
3678 /* Get the current value of the file flags */
3679 return put_user(fd->fd_flags, (int __user *)arg);
3680 case LL_IOC_SETFLAGS:
3681 case LL_IOC_CLRFLAGS:
3682 /* Set or clear specific file flags */
3683 /* XXX This probably needs checks to ensure the flags are
3684 * not abused, and to handle any flag side effects.
3686 if (get_user(flags, (int __user *) arg))
3689 if (cmd == LL_IOC_SETFLAGS) {
3690 if ((flags & LL_FILE_IGNORE_LOCK) &&
3691 !(file->f_flags & O_DIRECT)) {
3692 CERROR("%s: unable to disable locking on "
3693 "non-O_DIRECT file\n", current->comm);
3697 fd->fd_flags |= flags;
3699 fd->fd_flags &= ~flags;
3702 case LL_IOC_LOV_SETSTRIPE:
3703 case LL_IOC_LOV_SETSTRIPE_NEW:
3704 RETURN(ll_lov_setstripe(inode, file, (void __user *)arg));
3705 case LL_IOC_LOV_SETEA:
3706 RETURN(ll_lov_setea(inode, file, (void __user *)arg));
3707 case LL_IOC_LOV_SWAP_LAYOUTS: {
3709 struct lustre_swap_layouts lsl;
3711 if (copy_from_user(&lsl, (char __user *)arg,
3712 sizeof(struct lustre_swap_layouts)))
3715 if ((file->f_flags & O_ACCMODE) == O_RDONLY)
3718 file2 = fget(lsl.sl_fd);
3722 /* O_WRONLY or O_RDWR */
3723 if ((file2->f_flags & O_ACCMODE) == O_RDONLY)
3724 GOTO(out, rc = -EPERM);
3726 if (lsl.sl_flags & SWAP_LAYOUTS_CLOSE) {
3727 struct inode *inode2;
3728 struct ll_inode_info *lli;
3729 struct obd_client_handle *och = NULL;
3731 lli = ll_i2info(inode);
3732 mutex_lock(&lli->lli_och_mutex);
3733 if (fd->fd_lease_och != NULL) {
3734 och = fd->fd_lease_och;
3735 fd->fd_lease_och = NULL;
3737 mutex_unlock(&lli->lli_och_mutex);
3739 GOTO(out, rc = -ENOLCK);
3740 inode2 = file_inode(file2);
3741 rc = ll_swap_layouts_close(och, inode, inode2);
3743 rc = ll_swap_layouts(file, file2, &lsl);
3749 case LL_IOC_LOV_GETSTRIPE:
3750 case LL_IOC_LOV_GETSTRIPE_NEW:
3751 RETURN(ll_file_getstripe(inode, (void __user *)arg, 0));
3752 case FS_IOC_GETFLAGS:
3753 case FS_IOC_SETFLAGS:
3754 RETURN(ll_iocontrol(inode, file, cmd, arg));
3755 case FSFILT_IOC_GETVERSION:
3756 case FS_IOC_GETVERSION:
3757 RETURN(put_user(inode->i_generation, (int __user *)arg));
3758 /* We need to special case any other ioctls we want to handle,
3759 * to send them to the MDS/OST as appropriate and to properly
3760 * network encode the arg field. */
3761 case FS_IOC_SETVERSION:
3764 case LL_IOC_GROUP_LOCK:
3765 RETURN(ll_get_grouplock(inode, file, arg));
3766 case LL_IOC_GROUP_UNLOCK:
3767 RETURN(ll_put_grouplock(inode, file, arg));
3768 case IOC_OBD_STATFS:
3769 RETURN(ll_obd_statfs(inode, (void __user *)arg));
3771 case LL_IOC_FLUSHCTX:
3772 RETURN(ll_flush_ctx(inode));
3773 case LL_IOC_PATH2FID: {
3774 if (copy_to_user((void __user *)arg, ll_inode2fid(inode),
3775 sizeof(struct lu_fid)))
3780 case LL_IOC_GETPARENT:
3781 RETURN(ll_getparent(file, (struct getparent __user *)arg));
3783 case OBD_IOC_FID2PATH:
3784 RETURN(ll_fid2path(inode, (void __user *)arg));
3785 case LL_IOC_DATA_VERSION: {
3786 struct ioc_data_version idv;
3789 if (copy_from_user(&idv, (char __user *)arg, sizeof(idv)))
3792 idv.idv_flags &= LL_DV_RD_FLUSH | LL_DV_WR_FLUSH;
3793 rc = ll_ioc_data_version(inode, &idv);
3796 copy_to_user((char __user *)arg, &idv, sizeof(idv)))
3802 case LL_IOC_GET_MDTIDX: {
3805 mdtidx = ll_get_mdt_idx(inode);
3809 if (put_user((int)mdtidx, (int __user *)arg))
3814 case OBD_IOC_GETNAME_OLD:
3816 case OBD_IOC_GETDTNAME:
3818 case OBD_IOC_GETMDNAME:
3819 RETURN(ll_get_obd_name(inode, cmd, arg));
3820 case LL_IOC_HSM_STATE_GET: {
3821 struct md_op_data *op_data;
3822 struct hsm_user_state *hus;
3829 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
3830 LUSTRE_OPC_ANY, hus);
3831 if (IS_ERR(op_data)) {
3833 RETURN(PTR_ERR(op_data));
3836 rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
3839 if (copy_to_user((void __user *)arg, hus, sizeof(*hus)))
3842 ll_finish_md_op_data(op_data);
3846 case LL_IOC_HSM_STATE_SET: {
3847 struct hsm_state_set *hss;
3854 if (copy_from_user(hss, (char __user *)arg, sizeof(*hss))) {
3859 rc = ll_hsm_state_set(inode, hss);
3864 case LL_IOC_HSM_ACTION: {
3865 struct md_op_data *op_data;
3866 struct hsm_current_action *hca;
3873 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
3874 LUSTRE_OPC_ANY, hca);
3875 if (IS_ERR(op_data)) {
3877 RETURN(PTR_ERR(op_data));
3880 rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
3883 if (copy_to_user((char __user *)arg, hca, sizeof(*hca)))
3886 ll_finish_md_op_data(op_data);
3890 case LL_IOC_SET_LEASE_OLD: {
3891 struct ll_ioc_lease ioc = { .lil_mode = (__u32)arg };
3893 RETURN(ll_file_set_lease(file, &ioc, 0));
3895 case LL_IOC_SET_LEASE: {
3896 struct ll_ioc_lease ioc;
3898 if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc)))
3901 RETURN(ll_file_set_lease(file, &ioc, arg));
3903 case LL_IOC_GET_LEASE: {
3904 struct ll_inode_info *lli = ll_i2info(inode);
3905 struct ldlm_lock *lock = NULL;
3908 mutex_lock(&lli->lli_och_mutex);
3909 if (fd->fd_lease_och != NULL) {
3910 struct obd_client_handle *och = fd->fd_lease_och;
3912 lock = ldlm_handle2lock(&och->och_lease_handle);
3914 lock_res_and_lock(lock);
3915 if (!ldlm_is_cancel(lock))
3916 fmode = och->och_flags;
3918 unlock_res_and_lock(lock);
3919 LDLM_LOCK_PUT(lock);
3922 mutex_unlock(&lli->lli_och_mutex);
3924 RETURN(ll_lease_type_from_fmode(fmode));
3926 case LL_IOC_HSM_IMPORT: {
3927 struct hsm_user_import *hui;
3933 if (copy_from_user(hui, (void __user *)arg, sizeof(*hui))) {
3938 rc = ll_hsm_import(inode, file, hui);
3943 case LL_IOC_FUTIMES_3: {
3944 struct ll_futimes_3 lfu;
3946 if (copy_from_user(&lfu,
3947 (const struct ll_futimes_3 __user *)arg,
3951 RETURN(ll_file_futimes_3(file, &lfu));
3953 case LL_IOC_LADVISE: {
3954 struct llapi_ladvise_hdr *k_ladvise_hdr;
3955 struct llapi_ladvise_hdr __user *u_ladvise_hdr;
3958 int alloc_size = sizeof(*k_ladvise_hdr);
3961 u_ladvise_hdr = (void __user *)arg;
3962 OBD_ALLOC_PTR(k_ladvise_hdr);
3963 if (k_ladvise_hdr == NULL)
3966 if (copy_from_user(k_ladvise_hdr, u_ladvise_hdr, alloc_size))
3967 GOTO(out_ladvise, rc = -EFAULT);
3969 if (k_ladvise_hdr->lah_magic != LADVISE_MAGIC ||
3970 k_ladvise_hdr->lah_count < 1)
3971 GOTO(out_ladvise, rc = -EINVAL);
3973 num_advise = k_ladvise_hdr->lah_count;
3974 if (num_advise >= LAH_COUNT_MAX)
3975 GOTO(out_ladvise, rc = -EFBIG);
3977 OBD_FREE_PTR(k_ladvise_hdr);
3978 alloc_size = offsetof(typeof(*k_ladvise_hdr),
3979 lah_advise[num_advise]);
3980 OBD_ALLOC(k_ladvise_hdr, alloc_size);
3981 if (k_ladvise_hdr == NULL)
3985 * TODO: submit multiple advices to one server in a single RPC
3987 if (copy_from_user(k_ladvise_hdr, u_ladvise_hdr, alloc_size))
3988 GOTO(out_ladvise, rc = -EFAULT);
3990 for (i = 0; i < num_advise; i++) {
3991 struct llapi_lu_ladvise *k_ladvise =
3992 &k_ladvise_hdr->lah_advise[i];
3993 struct llapi_lu_ladvise __user *u_ladvise =
3994 &u_ladvise_hdr->lah_advise[i];
3996 rc = ll_ladvise_sanity(inode, k_ladvise);
3998 GOTO(out_ladvise, rc);
4000 switch (k_ladvise->lla_advice) {
4001 case LU_LADVISE_LOCKNOEXPAND:
4002 rc = ll_lock_noexpand(file,
4003 k_ladvise->lla_peradvice_flags);
4004 GOTO(out_ladvise, rc);
4005 case LU_LADVISE_LOCKAHEAD:
4007 rc = ll_file_lock_ahead(file, k_ladvise);
4010 GOTO(out_ladvise, rc);
4013 &u_ladvise->lla_lockahead_result))
4014 GOTO(out_ladvise, rc = -EFAULT);
4017 rc = ll_ladvise(inode, file,
4018 k_ladvise_hdr->lah_flags,
4021 GOTO(out_ladvise, rc);
4028 OBD_FREE(k_ladvise_hdr, alloc_size);
4031 case LL_IOC_FLR_SET_MIRROR: {
4032 /* mirror I/O must be direct to avoid polluting page cache
4034 if (!(file->f_flags & O_DIRECT))
4037 fd->fd_designated_mirror = (__u32)arg;
4040 case FS_IOC_FSGETXATTR:
4041 RETURN(ll_ioctl_fsgetxattr(inode, cmd, arg));
4042 case FS_IOC_FSSETXATTR:
4043 RETURN(ll_ioctl_fssetxattr(inode, cmd, arg));
4045 RETURN(put_user(PAGE_SIZE, (int __user *)arg));
4046 case LL_IOC_HEAT_GET: {
4047 struct lu_heat uheat;
4048 struct lu_heat *heat;
4051 if (copy_from_user(&uheat, (void __user *)arg, sizeof(uheat)))
4054 if (uheat.lh_count > OBD_HEAT_COUNT)
4055 uheat.lh_count = OBD_HEAT_COUNT;
4057 size = offsetof(typeof(uheat), lh_heat[uheat.lh_count]);
4058 OBD_ALLOC(heat, size);
4062 heat->lh_count = uheat.lh_count;
4063 ll_heat_get(inode, heat);
4064 rc = copy_to_user((char __user *)arg, heat, size);
4065 OBD_FREE(heat, size);
4066 RETURN(rc ? -EFAULT : 0);
4068 case LL_IOC_HEAT_SET: {
4071 if (copy_from_user(&flags, (void __user *)arg, sizeof(flags)))
4074 rc = ll_heat_set(inode, flags);
4077 case LL_IOC_PCC_DETACH: {
4078 struct lu_pcc_detach *detach;
4080 OBD_ALLOC_PTR(detach);
4084 if (copy_from_user(detach,
4085 (const struct lu_pcc_detach __user *)arg,
4087 GOTO(out_detach_free, rc = -EFAULT);
4089 if (!S_ISREG(inode->i_mode))
4090 GOTO(out_detach_free, rc = -EINVAL);
4092 if (!inode_owner_or_capable(inode))
4093 GOTO(out_detach_free, rc = -EPERM);
4095 rc = pcc_ioctl_detach(inode, detach->pccd_opt);
4097 OBD_FREE_PTR(detach);
4100 case LL_IOC_PCC_STATE: {
4101 struct lu_pcc_state __user *ustate =
4102 (struct lu_pcc_state __user *)arg;
4103 struct lu_pcc_state *state;
4105 OBD_ALLOC_PTR(state);
4109 if (copy_from_user(state, ustate, sizeof(*state)))
4110 GOTO(out_state, rc = -EFAULT);
4112 rc = pcc_ioctl_state(file, inode, state);
4114 GOTO(out_state, rc);
4116 if (copy_to_user(ustate, state, sizeof(*state)))
4117 GOTO(out_state, rc = -EFAULT);
4120 OBD_FREE_PTR(state);
4123 #ifdef HAVE_LUSTRE_CRYPTO
4124 case LL_IOC_SET_ENCRYPTION_POLICY:
4125 if (!ll_sbi_has_encrypt(ll_i2sbi(inode)))
4127 return llcrypt_ioctl_set_policy(file, (const void __user *)arg);
4128 case LL_IOC_GET_ENCRYPTION_POLICY_EX:
4129 if (!ll_sbi_has_encrypt(ll_i2sbi(inode)))
4131 return llcrypt_ioctl_get_policy_ex(file, (void __user *)arg);
4132 case LL_IOC_ADD_ENCRYPTION_KEY:
4133 if (!ll_sbi_has_encrypt(ll_i2sbi(inode)))
4135 return llcrypt_ioctl_add_key(file, (void __user *)arg);
4136 case LL_IOC_REMOVE_ENCRYPTION_KEY:
4137 if (!ll_sbi_has_encrypt(ll_i2sbi(inode)))
4139 return llcrypt_ioctl_remove_key(file, (void __user *)arg);
4140 case LL_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4141 if (!ll_sbi_has_encrypt(ll_i2sbi(inode)))
4143 return llcrypt_ioctl_remove_key_all_users(file,
4144 (void __user *)arg);
4145 case LL_IOC_GET_ENCRYPTION_KEY_STATUS:
4146 if (!ll_sbi_has_encrypt(ll_i2sbi(inode)))
4148 return llcrypt_ioctl_get_key_status(file, (void __user *)arg);
4151 case LL_IOC_UNLOCK_FOREIGN: {
4152 struct dentry *dentry = file_dentry(file);
4154 /* if not a foreign symlink do nothing */
4155 if (ll_foreign_is_removable(dentry, true)) {
4157 "prevent unlink of non-foreign file ("DFID")\n",
4158 PFID(ll_inode2fid(inode)));
4159 RETURN(-EOPNOTSUPP);
4165 RETURN(obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL,
4166 (void __user *)arg));
4170 loff_t ll_lseek(struct file *file, loff_t offset, int whence)
4172 struct inode *inode = file_inode(file);
4175 struct cl_lseek_io *lsio;
4182 env = cl_env_get(&refcheck);
4184 RETURN(PTR_ERR(env));
4186 io = vvp_env_thread_io(env);
4187 io->ci_obj = ll_i2info(inode)->lli_clob;
4188 ll_io_set_mirror(io, file);
4190 lsio = &io->u.ci_lseek;
4191 lsio->ls_start = offset;
4192 lsio->ls_whence = whence;
4193 lsio->ls_result = -ENXIO;
4196 rc = cl_io_init(env, io, CIT_LSEEK, io->ci_obj);
4198 struct vvp_io *vio = vvp_env_io(env);
4200 vio->vui_fd = file->private_data;
4201 rc = cl_io_loop(env, io);
4205 retval = rc ? : lsio->ls_result;
4206 cl_io_fini(env, io);
4207 } while (unlikely(io->ci_need_restart));
4209 cl_env_put(env, &refcheck);
4214 static loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
4216 struct inode *inode = file_inode(file);
4217 loff_t retval = offset, eof = 0;
4218 ktime_t kstart = ktime_get();
4222 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), to=%llu=%#llx(%d)\n",
4223 PFID(ll_inode2fid(inode)), inode, retval, retval,
4226 if (origin == SEEK_END) {
4227 retval = ll_glimpse_size(inode);
4230 eof = i_size_read(inode);
4233 if (origin == SEEK_HOLE || origin == SEEK_DATA) {
4237 /* flush local cache first if any */
4238 cl_sync_file_range(inode, offset, OBD_OBJECT_EOF,
4241 retval = ll_lseek(file, offset, origin);
4244 retval = vfs_setpos(file, retval, ll_file_maxbytes(inode));
4246 retval = generic_file_llseek_size(file, offset, origin,
4247 ll_file_maxbytes(inode), eof);
4250 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK,
4251 ktime_us_delta(ktime_get(), kstart));
4255 static int ll_flush(struct file *file, fl_owner_t id)
4257 struct inode *inode = file_inode(file);
4258 struct ll_inode_info *lli = ll_i2info(inode);
4259 struct ll_file_data *fd = file->private_data;
4262 LASSERT(!S_ISDIR(inode->i_mode));
4264 /* catch async errors that were recorded back when async writeback
4265 * failed for pages in this mapping. */
4266 rc = lli->lli_async_rc;
4267 lli->lli_async_rc = 0;
4268 if (lli->lli_clob != NULL) {
4269 err = lov_read_and_clear_async_rc(lli->lli_clob);
4274 /* The application has been told write failure already.
4275 * Do not report failure again. */
4276 if (fd->fd_write_failed)
4278 return rc ? -EIO : 0;
4282 * Called to make sure a portion of file has been written out.
4283 * if @mode is not CL_FSYNC_LOCAL, it will send OST_SYNC RPCs to OST.
4285 * Return how many pages have been written.
4287 int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
4288 enum cl_fsync_mode mode, int ignore_layout)
4292 struct cl_fsync_io *fio;
4297 if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL &&
4298 mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL)
4301 env = cl_env_get(&refcheck);
4303 RETURN(PTR_ERR(env));
4305 io = vvp_env_thread_io(env);
4306 io->ci_obj = ll_i2info(inode)->lli_clob;
4307 io->ci_ignore_layout = ignore_layout;
4309 /* initialize parameters for sync */
4310 fio = &io->u.ci_fsync;
4311 fio->fi_start = start;
4313 fio->fi_fid = ll_inode2fid(inode);
4314 fio->fi_mode = mode;
4315 fio->fi_nr_written = 0;
4317 if (cl_io_init(env, io, CIT_FSYNC, io->ci_obj) == 0)
4318 result = cl_io_loop(env, io);
4320 result = io->ci_result;
4322 result = fio->fi_nr_written;
4323 cl_io_fini(env, io);
4324 cl_env_put(env, &refcheck);
4330 * When dentry is provided (the 'else' case), file_dentry() may be
4331 * null and dentry must be used directly rather than pulled from
4332 * file_dentry() as is done otherwise.
4335 int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
4337 struct dentry *dentry = file_dentry(file);
4338 struct inode *inode = dentry->d_inode;
4339 struct ll_inode_info *lli = ll_i2info(inode);
4340 struct ptlrpc_request *req;
4341 ktime_t kstart = ktime_get();
4347 "VFS Op:inode="DFID"(%p), start %lld, end %lld, datasync %d\n",
4348 PFID(ll_inode2fid(inode)), inode, start, end, datasync);
4350 /* fsync's caller has already called _fdata{sync,write}, we want
4351 * that IO to finish before calling the osc and mdc sync methods */
4352 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
4355 /* catch async errors that were recorded back when async writeback
4356 * failed for pages in this mapping. */
4357 if (!S_ISDIR(inode->i_mode)) {
4358 err = lli->lli_async_rc;
4359 lli->lli_async_rc = 0;
4362 if (lli->lli_clob != NULL) {
4363 err = lov_read_and_clear_async_rc(lli->lli_clob);
4369 err = md_fsync(ll_i2sbi(inode)->ll_md_exp, ll_inode2fid(inode), &req);
4373 ptlrpc_req_finished(req);
4375 if (S_ISREG(inode->i_mode)) {
4376 struct ll_file_data *fd = file->private_data;
4379 /* Sync metadata on MDT first, and then sync the cached data
4382 err = pcc_fsync(file, start, end, datasync, &cached);
4384 err = cl_sync_file_range(inode, start, end,
4386 if (rc == 0 && err < 0)
4389 fd->fd_write_failed = true;
4391 fd->fd_write_failed = false;
4394 inode_unlock(inode);
4397 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC,
4398 ktime_us_delta(ktime_get(), kstart));
4403 ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
4405 struct inode *inode = file_inode(file);
4406 struct ll_sb_info *sbi = ll_i2sbi(inode);
4407 struct ldlm_enqueue_info einfo = {
4408 .ei_type = LDLM_FLOCK,
4409 .ei_cb_cp = ldlm_flock_completion_ast,
4410 .ei_cbdata = file_lock,
4412 struct md_op_data *op_data;
4413 struct lustre_handle lockh = { 0 };
4414 union ldlm_policy_data flock = { { 0 } };
4415 int fl_type = file_lock->fl_type;
4416 ktime_t kstart = ktime_get();
4422 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID" file_lock=%p\n",
4423 PFID(ll_inode2fid(inode)), file_lock);
4425 if (file_lock->fl_flags & FL_FLOCK) {
4426 LASSERT((cmd == F_SETLKW) || (cmd == F_SETLK));
4427 /* flocks are whole-file locks */
4428 flock.l_flock.end = OFFSET_MAX;
4429 /* For flocks owner is determined by the local file desctiptor*/
4430 flock.l_flock.owner = (unsigned long)file_lock->fl_file;
4431 } else if (file_lock->fl_flags & FL_POSIX) {
4432 flock.l_flock.owner = (unsigned long)file_lock->fl_owner;
4433 flock.l_flock.start = file_lock->fl_start;
4434 flock.l_flock.end = file_lock->fl_end;
4438 flock.l_flock.pid = file_lock->fl_pid;
4440 #if defined(HAVE_LM_COMPARE_OWNER) || defined(lm_compare_owner)
4441 /* Somewhat ugly workaround for svc lockd.
4442 * lockd installs custom fl_lmops->lm_compare_owner that checks
4443 * for the fl_owner to be the same (which it always is on local node
4444 * I guess between lockd processes) and then compares pid.
4445 * As such we assign pid to the owner field to make it all work,
4446 * conflict with normal locks is unlikely since pid space and
4447 * pointer space for current->files are not intersecting */
4448 if (file_lock->fl_lmops && file_lock->fl_lmops->lm_compare_owner)
4449 flock.l_flock.owner = (unsigned long)file_lock->fl_pid;
4454 einfo.ei_mode = LCK_PR;
4457 /* An unlock request may or may not have any relation to
4458 * existing locks so we may not be able to pass a lock handle
4459 * via a normal ldlm_lock_cancel() request. The request may even
4460 * unlock a byte range in the middle of an existing lock. In
4461 * order to process an unlock request we need all of the same
4462 * information that is given with a normal read or write record
4463 * lock request. To avoid creating another ldlm unlock (cancel)
4464 * message we'll treat a LCK_NL flock request as an unlock. */
4465 einfo.ei_mode = LCK_NL;
4468 einfo.ei_mode = LCK_PW;
4471 CDEBUG(D_INFO, "Unknown fcntl lock type: %d\n", fl_type);
4486 flags = LDLM_FL_BLOCK_NOWAIT;
4492 flags = LDLM_FL_TEST_LOCK;
4495 CERROR("unknown fcntl lock command: %d\n", cmd);
4499 /* Save the old mode so that if the mode in the lock changes we
4500 * can decrement the appropriate reader or writer refcount. */
4501 file_lock->fl_type = einfo.ei_mode;
4503 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
4504 LUSTRE_OPC_ANY, NULL);
4505 if (IS_ERR(op_data))
4506 RETURN(PTR_ERR(op_data));
4508 CDEBUG(D_DLMTRACE, "inode="DFID", pid=%u, flags=%#llx, mode=%u, "
4509 "start=%llu, end=%llu\n", PFID(ll_inode2fid(inode)),
4510 flock.l_flock.pid, flags, einfo.ei_mode,
4511 flock.l_flock.start, flock.l_flock.end);
4513 rc = md_enqueue(sbi->ll_md_exp, &einfo, &flock, op_data, &lockh,
4516 /* Restore the file lock type if not TEST lock. */
4517 if (!(flags & LDLM_FL_TEST_LOCK))
4518 file_lock->fl_type = fl_type;
4520 #ifdef HAVE_LOCKS_LOCK_FILE_WAIT
4521 if ((rc == 0 || file_lock->fl_type == F_UNLCK) &&
4522 !(flags & LDLM_FL_TEST_LOCK))
4523 rc2 = locks_lock_file_wait(file, file_lock);
4525 if ((file_lock->fl_flags & FL_FLOCK) &&
4526 (rc == 0 || file_lock->fl_type == F_UNLCK))
4527 rc2 = flock_lock_file_wait(file, file_lock);
4528 if ((file_lock->fl_flags & FL_POSIX) &&
4529 (rc == 0 || file_lock->fl_type == F_UNLCK) &&
4530 !(flags & LDLM_FL_TEST_LOCK))
4531 rc2 = posix_lock_file_wait(file, file_lock);
4532 #endif /* HAVE_LOCKS_LOCK_FILE_WAIT */
4534 if (rc2 && file_lock->fl_type != F_UNLCK) {
4535 einfo.ei_mode = LCK_NL;
4536 md_enqueue(sbi->ll_md_exp, &einfo, &flock, op_data,
4541 ll_finish_md_op_data(op_data);
4544 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK,
4545 ktime_us_delta(ktime_get(), kstart));
4549 int ll_get_fid_by_name(struct inode *parent, const char *name,
4550 int namelen, struct lu_fid *fid,
4551 struct inode **inode)
4553 struct md_op_data *op_data = NULL;
4554 struct mdt_body *body;
4555 struct ptlrpc_request *req;
4559 op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen, 0,
4560 LUSTRE_OPC_ANY, NULL);
4561 if (IS_ERR(op_data))
4562 RETURN(PTR_ERR(op_data));
4564 op_data->op_valid = OBD_MD_FLID | OBD_MD_FLTYPE;
4565 rc = md_getattr_name(ll_i2sbi(parent)->ll_md_exp, op_data, &req);
4566 ll_finish_md_op_data(op_data);
4570 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
4572 GOTO(out_req, rc = -EFAULT);
4574 *fid = body->mbo_fid1;
4577 rc = ll_prep_inode(inode, req, parent->i_sb, NULL);
4579 ptlrpc_req_finished(req);
4583 int ll_migrate(struct inode *parent, struct file *file, struct lmv_user_md *lum,
4586 struct dentry *dchild = NULL;
4587 struct inode *child_inode = NULL;
4588 struct md_op_data *op_data;
4589 struct ptlrpc_request *request = NULL;
4590 struct obd_client_handle *och = NULL;
4592 struct mdt_body *body;
4593 __u64 data_version = 0;
4594 size_t namelen = strlen(name);
4595 int lumlen = lmv_user_md_size(lum->lum_stripe_count, lum->lum_magic);
4599 CDEBUG(D_VFSTRACE, "migrate "DFID"/%s to MDT%04x stripe count %d\n",
4600 PFID(ll_inode2fid(parent)), name,
4601 lum->lum_stripe_offset, lum->lum_stripe_count);
4603 if (lum->lum_magic != cpu_to_le32(LMV_USER_MAGIC) &&
4604 lum->lum_magic != cpu_to_le32(LMV_USER_MAGIC_SPECIFIC))
4605 lustre_swab_lmv_user_md(lum);
4607 /* Get child FID first */
4608 qstr.hash = ll_full_name_hash(file_dentry(file), name, namelen);
4611 dchild = d_lookup(file_dentry(file), &qstr);
4613 if (dchild->d_inode)
4614 child_inode = igrab(dchild->d_inode);
4619 rc = ll_get_fid_by_name(parent, name, namelen, NULL,
4628 if (!(exp_connect_flags2(ll_i2sbi(parent)->ll_md_exp) &
4629 OBD_CONNECT2_DIR_MIGRATE)) {
4630 if (le32_to_cpu(lum->lum_stripe_count) > 1 ||
4631 ll_dir_striped(child_inode)) {
4632 CERROR("%s: MDT doesn't support stripe directory "
4633 "migration!\n", ll_i2sbi(parent)->ll_fsname);
4634 GOTO(out_iput, rc = -EOPNOTSUPP);
4639 * lfs migrate command needs to be blocked on the client
4640 * by checking the migrate FID against the FID of the
4643 if (is_root_inode(child_inode))
4644 GOTO(out_iput, rc = -EINVAL);
4646 if (IS_ENCRYPTED(child_inode)) {
4647 rc = llcrypt_get_encryption_info(child_inode);
4650 if (!llcrypt_has_encryption_key(child_inode)) {
4651 CDEBUG(D_SEC, "no enc key for "DFID"\n",
4652 PFID(ll_inode2fid(child_inode)));
4653 GOTO(out_iput, rc = -ENOKEY);
4657 op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen,
4658 child_inode->i_mode, LUSTRE_OPC_ANY, NULL);
4659 if (IS_ERR(op_data))
4660 GOTO(out_iput, rc = PTR_ERR(op_data));
4662 inode_lock(child_inode);
4663 op_data->op_fid3 = *ll_inode2fid(child_inode);
4664 if (!fid_is_sane(&op_data->op_fid3)) {
4665 CERROR("%s: migrate %s, but FID "DFID" is insane\n",
4666 ll_i2sbi(parent)->ll_fsname, name,
4667 PFID(&op_data->op_fid3));
4668 GOTO(out_unlock, rc = -EINVAL);
4671 op_data->op_cli_flags |= CLI_MIGRATE | CLI_SET_MEA;
4672 op_data->op_data = lum;
4673 op_data->op_data_size = lumlen;
4676 if (S_ISREG(child_inode->i_mode)) {
4677 och = ll_lease_open(child_inode, NULL, FMODE_WRITE, 0);
4681 GOTO(out_unlock, rc);
4684 rc = ll_data_version(child_inode, &data_version,
4687 GOTO(out_close, rc);
4689 op_data->op_open_handle = och->och_open_handle;
4690 op_data->op_data_version = data_version;
4691 op_data->op_lease_handle = och->och_lease_handle;
4692 op_data->op_bias |= MDS_CLOSE_MIGRATE;
4694 spin_lock(&och->och_mod->mod_open_req->rq_lock);
4695 och->och_mod->mod_open_req->rq_replay = 0;
4696 spin_unlock(&och->och_mod->mod_open_req->rq_lock);
4699 rc = md_rename(ll_i2sbi(parent)->ll_md_exp, op_data, name, namelen,
4700 name, namelen, &request);
4702 LASSERT(request != NULL);
4703 ll_update_times(request, parent);
4706 if (rc == 0 || rc == -EAGAIN) {
4707 body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
4708 LASSERT(body != NULL);
4710 /* If the server does release layout lock, then we cleanup
4711 * the client och here, otherwise release it in out_close: */
4712 if (och && body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED) {
4713 obd_mod_put(och->och_mod);
4714 md_clear_open_replay_data(ll_i2sbi(parent)->ll_md_exp,
4716 och->och_open_handle.cookie = DEAD_HANDLE_MAGIC;
4722 if (request != NULL) {
4723 ptlrpc_req_finished(request);
4727 /* Try again if the lease has cancelled. */
4728 if (rc == -EAGAIN && S_ISREG(child_inode->i_mode))
4733 ll_lease_close(och, child_inode, NULL);
4735 clear_nlink(child_inode);
4737 inode_unlock(child_inode);
4738 ll_finish_md_op_data(op_data);
4745 ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock)
4747 struct ll_file_data *fd = file->private_data;
4751 * In order to avoid flood of warning messages, only print one message
4752 * for one file. And the entire message rate on the client is limited
4753 * by CDEBUG_LIMIT too.
4755 if (!(fd->fd_flags & LL_FILE_FLOCK_WARNING)) {
4756 fd->fd_flags |= LL_FILE_FLOCK_WARNING;
4757 CDEBUG_LIMIT(D_TTY | D_CONSOLE,
4758 "flock disabled, mount with '-o [local]flock' to enable\r\n");
4764 * test if some locks matching bits and l_req_mode are acquired
4765 * - bits can be in different locks
4766 * - if found clear the common lock bits in *bits
4767 * - the bits not found, are kept in *bits
4769 * \param bits [IN] searched lock bits [IN]
4770 * \param l_req_mode [IN] searched lock mode
4771 * \retval boolean, true iff all bits are found
4773 int ll_have_md_lock(struct inode *inode, __u64 *bits, enum ldlm_mode l_req_mode)
4775 struct lustre_handle lockh;
4776 union ldlm_policy_data policy;
4777 enum ldlm_mode mode = (l_req_mode == LCK_MINMODE) ?
4778 (LCK_CR | LCK_CW | LCK_PR | LCK_PW) : l_req_mode;
4787 fid = &ll_i2info(inode)->lli_fid;
4788 CDEBUG(D_INFO, "trying to match res "DFID" mode %s\n", PFID(fid),
4789 ldlm_lockname[mode]);
4791 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING | LDLM_FL_TEST_LOCK;
4792 for (i = 0; i < MDS_INODELOCK_NUMBITS && *bits != 0; i++) {
4793 policy.l_inodebits.bits = *bits & BIT(i);
4794 if (policy.l_inodebits.bits == 0)
4797 if (md_lock_match(ll_i2mdexp(inode), flags, fid, LDLM_IBITS,
4798 &policy, mode, &lockh)) {
4799 struct ldlm_lock *lock;
4801 lock = ldlm_handle2lock(&lockh);
4804 ~(lock->l_policy_data.l_inodebits.bits);
4805 LDLM_LOCK_PUT(lock);
4807 *bits &= ~policy.l_inodebits.bits;
4814 enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
4815 struct lustre_handle *lockh, __u64 flags,
4816 enum ldlm_mode mode)
4818 union ldlm_policy_data policy = { .l_inodebits = { bits } };
4823 fid = &ll_i2info(inode)->lli_fid;
4824 CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
4826 rc = md_lock_match(ll_i2mdexp(inode), LDLM_FL_BLOCK_GRANTED|flags,
4827 fid, LDLM_IBITS, &policy, mode, lockh);
4832 static int ll_inode_revalidate_fini(struct inode *inode, int rc)
4834 /* Already unlinked. Just update nlink and return success */
4835 if (rc == -ENOENT) {
4837 /* If it is striped directory, and there is bad stripe
4838 * Let's revalidate the dentry again, instead of returning
4840 if (ll_dir_striped(inode))
4843 /* This path cannot be hit for regular files unless in
4844 * case of obscure races, so no need to to validate
4846 if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
4848 } else if (rc != 0) {
4849 CDEBUG_LIMIT((rc == -EACCES || rc == -EIDRM) ? D_INFO : D_ERROR,
4850 "%s: revalidate FID "DFID" error: rc = %d\n",
4851 ll_i2sbi(inode)->ll_fsname,
4852 PFID(ll_inode2fid(inode)), rc);
4858 static int ll_inode_revalidate(struct dentry *dentry, enum ldlm_intent_flags op)
4860 struct inode *parent;
4861 struct inode *inode = dentry->d_inode;
4862 struct obd_export *exp = ll_i2mdexp(inode);
4863 struct lookup_intent oit = {
4866 struct ptlrpc_request *req = NULL;
4867 struct md_op_data *op_data;
4868 const char *name = NULL;
4873 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p),name=%s\n",
4874 PFID(ll_inode2fid(inode)), inode, dentry->d_name.name);
4876 if (exp_connect_flags2(exp) & OBD_CONNECT2_GETATTR_PFID) {
4877 parent = dentry->d_parent->d_inode;
4878 name = dentry->d_name.name;
4879 namelen = dentry->d_name.len;
4884 op_data = ll_prep_md_op_data(NULL, parent, inode, name, namelen, 0,
4885 LUSTRE_OPC_ANY, NULL);
4886 if (IS_ERR(op_data))
4887 RETURN(PTR_ERR(op_data));
4889 /* Call getattr by fid */
4890 if (exp_connect_flags2(exp) & OBD_CONNECT2_GETATTR_PFID)
4891 op_data->op_flags = MF_GETATTR_BY_FID;
4892 rc = md_intent_lock(exp, op_data, &oit, &req, &ll_md_blocking_ast, 0);
4893 ll_finish_md_op_data(op_data);
4895 rc = ll_inode_revalidate_fini(inode, rc);
4899 rc = ll_revalidate_it_finish(req, &oit, dentry);
4901 ll_intent_release(&oit);
4905 /* Unlinked? Unhash dentry, so it is not picked up later by
4906 * do_lookup() -> ll_revalidate_it(). We cannot use d_drop
4907 * here to preserve get_cwd functionality on 2.6.
4909 if (!dentry->d_inode->i_nlink)
4910 d_lustre_invalidate(dentry);
4912 ll_lookup_finish_locks(&oit, dentry);
4914 ptlrpc_req_finished(req);
4919 static int ll_merge_md_attr(struct inode *inode)
4921 struct ll_inode_info *lli = ll_i2info(inode);
4922 struct cl_attr attr = { 0 };
4925 LASSERT(lli->lli_lsm_md != NULL);
4927 if (!lmv_dir_striped(lli->lli_lsm_md))
4930 down_read(&lli->lli_lsm_sem);
4931 rc = md_merge_attr(ll_i2mdexp(inode), ll_i2info(inode)->lli_lsm_md,
4932 &attr, ll_md_blocking_ast);
4933 up_read(&lli->lli_lsm_sem);
4937 set_nlink(inode, attr.cat_nlink);
4938 inode->i_blocks = attr.cat_blocks;
4939 i_size_write(inode, attr.cat_size);
4941 ll_i2info(inode)->lli_atime = attr.cat_atime;
4942 ll_i2info(inode)->lli_mtime = attr.cat_mtime;
4943 ll_i2info(inode)->lli_ctime = attr.cat_ctime;
4948 int ll_getattr_dentry(struct dentry *de, struct kstat *stat, u32 request_mask,
4949 unsigned int flags, bool foreign)
4951 struct inode *inode = de->d_inode;
4952 struct ll_sb_info *sbi = ll_i2sbi(inode);
4953 struct ll_inode_info *lli = ll_i2info(inode);
4954 struct inode *dir = de->d_parent->d_inode;
4955 bool need_glimpse = true;
4956 ktime_t kstart = ktime_get();
4959 /* The OST object(s) determine the file size, blocks and mtime. */
4960 if (!(request_mask & STATX_SIZE || request_mask & STATX_BLOCKS ||
4961 request_mask & STATX_MTIME))
4962 need_glimpse = false;
4964 if (dentry_may_statahead(dir, de))
4965 ll_start_statahead(dir, de, need_glimpse &&
4966 !(flags & AT_STATX_DONT_SYNC));
4968 if (flags & AT_STATX_DONT_SYNC)
4969 GOTO(fill_attr, rc = 0);
4971 rc = ll_inode_revalidate(de, IT_GETATTR);
4975 /* foreign file/dir are always of zero length, so don't
4976 * need to validate size.
4978 if (S_ISREG(inode->i_mode) && !foreign) {
4982 GOTO(fill_attr, rc);
4984 rc = pcc_inode_getattr(inode, request_mask, flags, &cached);
4985 if (cached && rc < 0)
4989 GOTO(fill_attr, rc);
4992 * If the returned attr is masked with OBD_MD_FLSIZE &
4993 * OBD_MD_FLBLOCKS & OBD_MD_FLMTIME, it means that the file size
4994 * or blocks obtained from MDT is strictly correct, and the file
4995 * is usually not being modified by clients, and the [a|m|c]time
4996 * got from MDT is also strictly correct.
4997 * Under this circumstance, it does not need to send glimpse
4998 * RPCs to OSTs for file attributes such as the size and blocks.
5000 if (lli->lli_attr_valid & OBD_MD_FLSIZE &&
5001 lli->lli_attr_valid & OBD_MD_FLBLOCKS &&
5002 lli->lli_attr_valid & OBD_MD_FLMTIME) {
5003 inode->i_mtime.tv_sec = lli->lli_mtime;
5004 if (lli->lli_attr_valid & OBD_MD_FLATIME)
5005 inode->i_atime.tv_sec = lli->lli_atime;
5006 if (lli->lli_attr_valid & OBD_MD_FLCTIME)
5007 inode->i_ctime.tv_sec = lli->lli_ctime;
5008 GOTO(fill_attr, rc);
5011 /* In case of restore, the MDT has the right size and has
5012 * already send it back without granting the layout lock,
5013 * inode is up-to-date so glimpse is useless.
5014 * Also to glimpse we need the layout, in case of a running
5015 * restore the MDT holds the layout lock so the glimpse will
5016 * block up to the end of restore (getattr will block)
5018 if (!test_bit(LLIF_FILE_RESTORING, &lli->lli_flags)) {
5019 rc = ll_glimpse_size(inode);
5024 /* If object isn't regular a file then don't validate size. */
5025 /* foreign dir is not striped dir */
5026 if (ll_dir_striped(inode) && !foreign) {
5027 rc = ll_merge_md_attr(inode);
5032 if (lli->lli_attr_valid & OBD_MD_FLATIME)
5033 inode->i_atime.tv_sec = lli->lli_atime;
5034 if (lli->lli_attr_valid & OBD_MD_FLMTIME)
5035 inode->i_mtime.tv_sec = lli->lli_mtime;
5036 if (lli->lli_attr_valid & OBD_MD_FLCTIME)
5037 inode->i_ctime.tv_sec = lli->lli_ctime;
5041 OBD_FAIL_TIMEOUT(OBD_FAIL_GETATTR_DELAY, 30);
5043 if (ll_need_32bit_api(sbi)) {
5044 stat->ino = cl_fid_build_ino(&lli->lli_fid, 1);
5045 stat->dev = ll_compat_encode_dev(inode->i_sb->s_dev);
5046 stat->rdev = ll_compat_encode_dev(inode->i_rdev);
5048 stat->ino = inode->i_ino;
5049 stat->dev = inode->i_sb->s_dev;
5050 stat->rdev = inode->i_rdev;
5053 /* foreign symlink to be exposed as a real symlink */
5055 stat->mode = inode->i_mode;
5057 stat->mode = (inode->i_mode & ~S_IFMT) | S_IFLNK;
5059 stat->uid = inode->i_uid;
5060 stat->gid = inode->i_gid;
5061 stat->atime = inode->i_atime;
5062 stat->mtime = inode->i_mtime;
5063 stat->ctime = inode->i_ctime;
5064 /* stat->blksize is used to tell about preferred IO size */
5065 if (sbi->ll_stat_blksize)
5066 stat->blksize = sbi->ll_stat_blksize;
5067 else if (S_ISREG(inode->i_mode))
5068 stat->blksize = 1 << min(PTLRPC_MAX_BRW_BITS + 1,
5069 LL_MAX_BLKSIZE_BITS);
5071 stat->blksize = 1 << inode->i_sb->s_blocksize_bits;
5073 stat->nlink = inode->i_nlink;
5074 stat->size = i_size_read(inode);
5075 stat->blocks = inode->i_blocks;
5077 #ifdef HAVE_INODEOPS_ENHANCED_GETATTR
5078 if (flags & AT_STATX_DONT_SYNC) {
5079 if (stat->size == 0 &&
5080 lli->lli_attr_valid & OBD_MD_FLLAZYSIZE)
5081 stat->size = lli->lli_lazysize;
5082 if (stat->blocks == 0 &&
5083 lli->lli_attr_valid & OBD_MD_FLLAZYBLOCKS)
5084 stat->blocks = lli->lli_lazyblocks;
5087 if (lli->lli_attr_valid & OBD_MD_FLBTIME) {
5088 stat->result_mask |= STATX_BTIME;
5089 stat->btime.tv_sec = lli->lli_btime;
5092 stat->attributes_mask = STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND;
5093 stat->attributes |= ll_inode_to_ext_flags(inode->i_flags);
5094 stat->result_mask &= request_mask;
5097 ll_stats_ops_tally(sbi, LPROC_LL_GETATTR,
5098 ktime_us_delta(ktime_get(), kstart));
5103 #ifdef HAVE_INODEOPS_ENHANCED_GETATTR
5104 int ll_getattr(const struct path *path, struct kstat *stat,
5105 u32 request_mask, unsigned int flags)
5107 return ll_getattr_dentry(path->dentry, stat, request_mask, flags,
5111 int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
5113 return ll_getattr_dentry(de, stat, STATX_BASIC_STATS,
5114 AT_STATX_SYNC_AS_STAT, false);
5118 int cl_falloc(struct inode *inode, int mode, loff_t offset, loff_t len)
5124 loff_t size = i_size_read(inode);
5128 env = cl_env_get(&refcheck);
5130 RETURN(PTR_ERR(env));
5132 io = vvp_env_thread_io(env);
5133 io->ci_obj = ll_i2info(inode)->lli_clob;
5134 io->ci_verify_layout = 1;
5135 io->u.ci_setattr.sa_parent_fid = lu_object_fid(&io->ci_obj->co_lu);
5136 io->u.ci_setattr.sa_falloc_mode = mode;
5137 io->u.ci_setattr.sa_falloc_offset = offset;
5138 io->u.ci_setattr.sa_falloc_end = offset + len;
5139 io->u.ci_setattr.sa_subtype = CL_SETATTR_FALLOCATE;
5140 if (io->u.ci_setattr.sa_falloc_end > size) {
5141 loff_t newsize = io->u.ci_setattr.sa_falloc_end;
5143 /* Check new size against VFS/VM file size limit and rlimit */
5144 rc = inode_newsize_ok(inode, newsize);
5147 if (newsize > ll_file_maxbytes(inode)) {
5148 CDEBUG(D_INODE, "file size too large %llu > %llu\n",
5149 (unsigned long long)newsize,
5150 ll_file_maxbytes(inode));
5157 rc = cl_io_init(env, io, CIT_SETATTR, io->ci_obj);
5159 rc = cl_io_loop(env, io);
5162 cl_io_fini(env, io);
5163 } while (unlikely(io->ci_need_restart));
5166 cl_env_put(env, &refcheck);
5170 long ll_fallocate(struct file *filp, int mode, loff_t offset, loff_t len)
5172 struct inode *inode = filp->f_path.dentry->d_inode;
5175 if (offset < 0 || len <= 0)
5178 * Encrypted inodes can't handle collapse range or zero range or insert
5179 * range since we would need to re-encrypt blocks with a different IV or
5180 * XTS tweak (which are based on the logical block number).
5181 * Similar to what ext4 does.
5183 if (IS_ENCRYPTED(inode) &&
5184 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE |
5185 FALLOC_FL_ZERO_RANGE)))
5186 RETURN(-EOPNOTSUPP);
5189 * mode == 0 (which is standard prealloc) and PUNCH is supported
5190 * Rest of mode options are not supported yet.
5192 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
5193 RETURN(-EOPNOTSUPP);
5195 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FALLOCATE, 1);
5197 rc = cl_falloc(inode, mode, offset, len);
5199 * ENOTSUPP (524) is an NFSv3 specific error code erroneously
5200 * used by Lustre in several places. Retuning it here would
5201 * confuse applications that explicity test for EOPNOTSUPP
5202 * (95) and fall back to ftruncate().
5204 if (rc == -ENOTSUPP)
5210 static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
5211 __u64 start, __u64 len)
5215 struct fiemap *fiemap;
5216 unsigned int extent_count = fieinfo->fi_extents_max;
5218 num_bytes = sizeof(*fiemap) + (extent_count *
5219 sizeof(struct fiemap_extent));
5220 OBD_ALLOC_LARGE(fiemap, num_bytes);
5225 fiemap->fm_flags = fieinfo->fi_flags;
5226 fiemap->fm_extent_count = fieinfo->fi_extents_max;
5227 fiemap->fm_start = start;
5228 fiemap->fm_length = len;
5229 if (extent_count > 0 &&
5230 copy_from_user(&fiemap->fm_extents[0], fieinfo->fi_extents_start,
5231 sizeof(struct fiemap_extent)) != 0)
5232 GOTO(out, rc = -EFAULT);
5234 rc = ll_do_fiemap(inode, fiemap, num_bytes);
5236 if (IS_ENCRYPTED(inode)) {
5239 for (i = 0; i < fiemap->fm_mapped_extents; i++)
5240 fiemap->fm_extents[i].fe_flags |=
5241 FIEMAP_EXTENT_DATA_ENCRYPTED |
5242 FIEMAP_EXTENT_ENCODED;
5245 fieinfo->fi_flags = fiemap->fm_flags;
5246 fieinfo->fi_extents_mapped = fiemap->fm_mapped_extents;
5247 if (extent_count > 0 &&
5248 copy_to_user(fieinfo->fi_extents_start, &fiemap->fm_extents[0],
5249 fiemap->fm_mapped_extents *
5250 sizeof(struct fiemap_extent)) != 0)
5251 GOTO(out, rc = -EFAULT);
5253 OBD_FREE_LARGE(fiemap, num_bytes);
5257 int ll_inode_permission(struct inode *inode, int mask)
5260 struct ll_sb_info *sbi;
5261 struct root_squash_info *squash;
5262 struct cred *cred = NULL;
5263 const struct cred *old_cred = NULL;
5264 bool squash_id = false;
5265 ktime_t kstart = ktime_get();
5269 if (mask & MAY_NOT_BLOCK)
5273 * as root inode are NOT getting validated in lookup operation,
5274 * need to do it before permission check.
5277 if (is_root_inode(inode)) {
5278 rc = ll_inode_revalidate(inode->i_sb->s_root, IT_LOOKUP);
5283 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), inode mode %x mask %o\n",
5284 PFID(ll_inode2fid(inode)), inode, inode->i_mode, mask);
5286 /* squash fsuid/fsgid if needed */
5287 sbi = ll_i2sbi(inode);
5288 squash = &sbi->ll_squash;
5289 if (unlikely(squash->rsi_uid != 0 &&
5290 uid_eq(current_fsuid(), GLOBAL_ROOT_UID) &&
5291 !(sbi->ll_flags & LL_SBI_NOROOTSQUASH))) {
5295 CDEBUG(D_OTHER, "squash creds (%d:%d)=>(%d:%d)\n",
5296 __kuid_val(current_fsuid()), __kgid_val(current_fsgid()),
5297 squash->rsi_uid, squash->rsi_gid);
5299 /* update current process's credentials
5300 * and FS capability */
5301 cred = prepare_creds();
5305 cred->fsuid = make_kuid(&init_user_ns, squash->rsi_uid);
5306 cred->fsgid = make_kgid(&init_user_ns, squash->rsi_gid);
5307 cred->cap_effective = cap_drop_nfsd_set(cred->cap_effective);
5308 cred->cap_effective = cap_drop_fs_set(cred->cap_effective);
5310 old_cred = override_creds(cred);
5313 rc = generic_permission(inode, mask);
5314 /* restore current process's credentials and FS capability */
5316 revert_creds(old_cred);
5321 ll_stats_ops_tally(sbi, LPROC_LL_INODE_PERM,
5322 ktime_us_delta(ktime_get(), kstart));
5327 /* -o localflock - only provides locally consistent flock locks */
5328 static const struct file_operations ll_file_operations = {
5329 #ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
5330 # ifdef HAVE_SYNC_READ_WRITE
5331 .read = new_sync_read,
5332 .write = new_sync_write,
5334 .read_iter = ll_file_read_iter,
5335 .write_iter = ll_file_write_iter,
5336 #else /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5337 .read = ll_file_read,
5338 .aio_read = ll_file_aio_read,
5339 .write = ll_file_write,
5340 .aio_write = ll_file_aio_write,
5341 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5342 .unlocked_ioctl = ll_file_ioctl,
5343 .open = ll_file_open,
5344 .release = ll_file_release,
5345 .mmap = ll_file_mmap,
5346 .llseek = ll_file_seek,
5347 #ifndef HAVE_DEFAULT_FILE_SPLICE_READ_EXPORT
5348 .splice_read = generic_file_splice_read,
5350 .splice_read = pcc_file_splice_read,
5354 .fallocate = ll_fallocate,
5357 static const struct file_operations ll_file_operations_flock = {
5358 #ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
5359 # ifdef HAVE_SYNC_READ_WRITE
5360 .read = new_sync_read,
5361 .write = new_sync_write,
5362 # endif /* HAVE_SYNC_READ_WRITE */
5363 .read_iter = ll_file_read_iter,
5364 .write_iter = ll_file_write_iter,
5365 #else /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5366 .read = ll_file_read,
5367 .aio_read = ll_file_aio_read,
5368 .write = ll_file_write,
5369 .aio_write = ll_file_aio_write,
5370 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5371 .unlocked_ioctl = ll_file_ioctl,
5372 .open = ll_file_open,
5373 .release = ll_file_release,
5374 .mmap = ll_file_mmap,
5375 .llseek = ll_file_seek,
5376 #ifndef HAVE_DEFAULT_FILE_SPLICE_READ_EXPORT
5377 .splice_read = generic_file_splice_read,
5379 .splice_read = pcc_file_splice_read,
5383 .flock = ll_file_flock,
5384 .lock = ll_file_flock,
5385 .fallocate = ll_fallocate,
5388 /* These are for -o noflock - to return ENOSYS on flock calls */
5389 static const struct file_operations ll_file_operations_noflock = {
5390 #ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
5391 # ifdef HAVE_SYNC_READ_WRITE
5392 .read = new_sync_read,
5393 .write = new_sync_write,
5394 # endif /* HAVE_SYNC_READ_WRITE */
5395 .read_iter = ll_file_read_iter,
5396 .write_iter = ll_file_write_iter,
5397 #else /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5398 .read = ll_file_read,
5399 .aio_read = ll_file_aio_read,
5400 .write = ll_file_write,
5401 .aio_write = ll_file_aio_write,
5402 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5403 .unlocked_ioctl = ll_file_ioctl,
5404 .open = ll_file_open,
5405 .release = ll_file_release,
5406 .mmap = ll_file_mmap,
5407 .llseek = ll_file_seek,
5408 #ifndef HAVE_DEFAULT_FILE_SPLICE_READ_EXPORT
5409 .splice_read = generic_file_splice_read,
5411 .splice_read = pcc_file_splice_read,
5415 .flock = ll_file_noflock,
5416 .lock = ll_file_noflock,
5417 .fallocate = ll_fallocate,
5420 const struct inode_operations ll_file_inode_operations = {
5421 .setattr = ll_setattr,
5422 .getattr = ll_getattr,
5423 .permission = ll_inode_permission,
5424 #ifdef HAVE_IOP_XATTR
5425 .setxattr = ll_setxattr,
5426 .getxattr = ll_getxattr,
5427 .removexattr = ll_removexattr,
5429 .listxattr = ll_listxattr,
5430 .fiemap = ll_fiemap,
5431 .get_acl = ll_get_acl,
5432 #ifdef HAVE_IOP_SET_ACL
5433 .set_acl = ll_set_acl,
5437 const struct file_operations *ll_select_file_operations(struct ll_sb_info *sbi)
5439 const struct file_operations *fops = &ll_file_operations_noflock;
5441 if (sbi->ll_flags & LL_SBI_FLOCK)
5442 fops = &ll_file_operations_flock;
5443 else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
5444 fops = &ll_file_operations;
5449 int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
5451 struct ll_inode_info *lli = ll_i2info(inode);
5452 struct cl_object *obj = lli->lli_clob;
5461 env = cl_env_get(&refcheck);
5463 RETURN(PTR_ERR(env));
5465 rc = cl_conf_set(env, lli->lli_clob, conf);
5469 if (conf->coc_opc == OBJECT_CONF_SET) {
5470 struct ldlm_lock *lock = conf->coc_lock;
5471 struct cl_layout cl = {
5475 LASSERT(lock != NULL);
5476 LASSERT(ldlm_has_layout(lock));
5478 /* it can only be allowed to match after layout is
5479 * applied to inode otherwise false layout would be
5480 * seen. Applying layout shoud happen before dropping
5481 * the intent lock. */
5482 ldlm_lock_allow_match(lock);
5484 rc = cl_object_layout_get(env, obj, &cl);
5489 DFID": layout version change: %u -> %u\n",
5490 PFID(&lli->lli_fid), ll_layout_version_get(lli),
5492 ll_layout_version_set(lli, cl.cl_layout_gen);
5496 cl_env_put(env, &refcheck);
5498 RETURN(rc < 0 ? rc : 0);
5501 /* Fetch layout from MDT with getxattr request, if it's not ready yet */
5502 static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
5505 struct ll_sb_info *sbi = ll_i2sbi(inode);
5506 struct ptlrpc_request *req;
5513 CDEBUG(D_INODE, DFID" LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n",
5514 PFID(ll_inode2fid(inode)), ldlm_is_lvb_ready(lock),
5515 lock->l_lvb_data, lock->l_lvb_len);
5517 if (lock->l_lvb_data != NULL)
5520 /* if layout lock was granted right away, the layout is returned
5521 * within DLM_LVB of dlm reply; otherwise if the lock was ever
5522 * blocked and then granted via completion ast, we have to fetch
5523 * layout here. Please note that we can't use the LVB buffer in
5524 * completion AST because it doesn't have a large enough buffer */
5525 rc = ll_get_default_mdsize(sbi, &lmmsize);
5529 rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), OBD_MD_FLXATTR,
5530 XATTR_NAME_LOV, lmmsize, &req);
5533 GOTO(out, rc = 0); /* empty layout */
5540 if (lmmsize == 0) /* empty layout */
5543 lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, lmmsize);
5545 GOTO(out, rc = -EFAULT);
5547 OBD_ALLOC_LARGE(lvbdata, lmmsize);
5548 if (lvbdata == NULL)
5549 GOTO(out, rc = -ENOMEM);
5551 memcpy(lvbdata, lmm, lmmsize);
5552 lock_res_and_lock(lock);
5553 if (unlikely(lock->l_lvb_data == NULL)) {
5554 lock->l_lvb_type = LVB_T_LAYOUT;
5555 lock->l_lvb_data = lvbdata;
5556 lock->l_lvb_len = lmmsize;
5559 unlock_res_and_lock(lock);
5562 OBD_FREE_LARGE(lvbdata, lmmsize);
5567 ptlrpc_req_finished(req);
5572 * Apply the layout to the inode. Layout lock is held and will be released
5575 static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
5576 struct inode *inode)
5578 struct ll_inode_info *lli = ll_i2info(inode);
5579 struct ll_sb_info *sbi = ll_i2sbi(inode);
5580 struct ldlm_lock *lock;
5581 struct cl_object_conf conf;
5584 bool wait_layout = false;
5587 LASSERT(lustre_handle_is_used(lockh));
5589 lock = ldlm_handle2lock(lockh);
5590 LASSERT(lock != NULL);
5591 LASSERT(ldlm_has_layout(lock));
5593 LDLM_DEBUG(lock, "file "DFID"(%p) being reconfigured",
5594 PFID(&lli->lli_fid), inode);
5596 /* in case this is a caching lock and reinstate with new inode */
5597 md_set_lock_data(sbi->ll_md_exp, lockh, inode, NULL);
5599 lock_res_and_lock(lock);
5600 lvb_ready = ldlm_is_lvb_ready(lock);
5601 unlock_res_and_lock(lock);
5603 /* checking lvb_ready is racy but this is okay. The worst case is
5604 * that multi processes may configure the file on the same time. */
5608 rc = ll_layout_fetch(inode, lock);
5612 /* for layout lock, lmm is stored in lock's lvb.
5613 * lvb_data is immutable if the lock is held so it's safe to access it
5616 * set layout to file. Unlikely this will fail as old layout was
5617 * surely eliminated */
5618 memset(&conf, 0, sizeof conf);
5619 conf.coc_opc = OBJECT_CONF_SET;
5620 conf.coc_inode = inode;
5621 conf.coc_lock = lock;
5622 conf.u.coc_layout.lb_buf = lock->l_lvb_data;
5623 conf.u.coc_layout.lb_len = lock->l_lvb_len;
5624 rc = ll_layout_conf(inode, &conf);
5626 /* refresh layout failed, need to wait */
5627 wait_layout = rc == -EBUSY;
5630 LDLM_LOCK_PUT(lock);
5631 ldlm_lock_decref(lockh, mode);
5633 /* wait for IO to complete if it's still being used. */
5635 CDEBUG(D_INODE, "%s: "DFID"(%p) wait for layout reconf\n",
5636 sbi->ll_fsname, PFID(&lli->lli_fid), inode);
5638 memset(&conf, 0, sizeof conf);
5639 conf.coc_opc = OBJECT_CONF_WAIT;
5640 conf.coc_inode = inode;
5641 rc = ll_layout_conf(inode, &conf);
5645 CDEBUG(D_INODE, "%s file="DFID" waiting layout return: %d\n",
5646 sbi->ll_fsname, PFID(&lli->lli_fid), rc);
5652 * Issue layout intent RPC to MDS.
5653 * \param inode [in] file inode
5654 * \param intent [in] layout intent
5656 * \retval 0 on success
5657 * \retval < 0 error code
5659 static int ll_layout_intent(struct inode *inode, struct layout_intent *intent)
5661 struct ll_inode_info *lli = ll_i2info(inode);
5662 struct ll_sb_info *sbi = ll_i2sbi(inode);
5663 struct md_op_data *op_data;
5664 struct lookup_intent it;
5665 struct ptlrpc_request *req;
5669 op_data = ll_prep_md_op_data(NULL, inode, inode, NULL,
5670 0, 0, LUSTRE_OPC_ANY, NULL);
5671 if (IS_ERR(op_data))
5672 RETURN(PTR_ERR(op_data));
5674 op_data->op_data = intent;
5675 op_data->op_data_size = sizeof(*intent);
5677 memset(&it, 0, sizeof(it));
5678 it.it_op = IT_LAYOUT;
5679 if (intent->li_opc == LAYOUT_INTENT_WRITE ||
5680 intent->li_opc == LAYOUT_INTENT_TRUNC)
5681 it.it_flags = FMODE_WRITE;
5683 LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file "DFID"(%p)",
5684 sbi->ll_fsname, PFID(&lli->lli_fid), inode);
5686 rc = md_intent_lock(sbi->ll_md_exp, op_data, &it, &req,
5687 &ll_md_blocking_ast, 0);
5688 if (it.it_request != NULL)
5689 ptlrpc_req_finished(it.it_request);
5690 it.it_request = NULL;
5692 ll_finish_md_op_data(op_data);
5694 /* set lock data in case this is a new lock */
5696 ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
5698 ll_intent_drop_lock(&it);
5704 * This function checks if there exists a LAYOUT lock on the client side,
5705 * or enqueues it if it doesn't have one in cache.
5707 * This function will not hold layout lock so it may be revoked any time after
5708 * this function returns. Any operations depend on layout should be redone
5711 * This function should be called before lov_io_init() to get an uptodate
5712 * layout version, the caller should save the version number and after IO
5713 * is finished, this function should be called again to verify that layout
5714 * is not changed during IO time.
5716 int ll_layout_refresh(struct inode *inode, __u32 *gen)
5718 struct ll_inode_info *lli = ll_i2info(inode);
5719 struct ll_sb_info *sbi = ll_i2sbi(inode);
5720 struct lustre_handle lockh;
5721 struct layout_intent intent = {
5722 .li_opc = LAYOUT_INTENT_ACCESS,
5724 enum ldlm_mode mode;
5728 *gen = ll_layout_version_get(lli);
5729 if (!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK) || *gen != CL_LAYOUT_GEN_NONE)
5733 LASSERT(fid_is_sane(ll_inode2fid(inode)));
5734 LASSERT(S_ISREG(inode->i_mode));
5736 /* take layout lock mutex to enqueue layout lock exclusively. */
5737 mutex_lock(&lli->lli_layout_mutex);
5740 /* mostly layout lock is caching on the local side, so try to
5741 * match it before grabbing layout lock mutex. */
5742 mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
5743 LCK_CR | LCK_CW | LCK_PR |
5745 if (mode != 0) { /* hit cached lock */
5746 rc = ll_layout_lock_set(&lockh, mode, inode);
5752 rc = ll_layout_intent(inode, &intent);
5758 *gen = ll_layout_version_get(lli);
5759 mutex_unlock(&lli->lli_layout_mutex);
5765 * Issue layout intent RPC indicating where in a file an IO is about to write.
5767 * \param[in] inode file inode.
5768 * \param[in] ext write range with start offset of fille in bytes where
5769 * an IO is about to write, and exclusive end offset in
5772 * \retval 0 on success
5773 * \retval < 0 error code
5775 int ll_layout_write_intent(struct inode *inode, enum layout_intent_opc opc,
5776 struct lu_extent *ext)
5778 struct layout_intent intent = {
5780 .li_extent.e_start = ext->e_start,
5781 .li_extent.e_end = ext->e_end,
5786 rc = ll_layout_intent(inode, &intent);
5792 * This function send a restore request to the MDT
5794 int ll_layout_restore(struct inode *inode, loff_t offset, __u64 length)
5796 struct hsm_user_request *hur;
5800 len = sizeof(struct hsm_user_request) +
5801 sizeof(struct hsm_user_item);
5802 OBD_ALLOC(hur, len);
5806 hur->hur_request.hr_action = HUA_RESTORE;
5807 hur->hur_request.hr_archive_id = 0;
5808 hur->hur_request.hr_flags = 0;
5809 memcpy(&hur->hur_user_item[0].hui_fid, &ll_i2info(inode)->lli_fid,
5810 sizeof(hur->hur_user_item[0].hui_fid));
5811 hur->hur_user_item[0].hui_extent.offset = offset;
5812 hur->hur_user_item[0].hui_extent.length = length;
5813 hur->hur_request.hr_itemcount = 1;
5814 rc = obd_iocontrol(LL_IOC_HSM_REQUEST, ll_i2sbi(inode)->ll_md_exp,