4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 * Author: Peter Braam <braam@clusterfs.com>
35 * Author: Phil Schwan <phil@clusterfs.com>
36 * Author: Andreas Dilger <adilger@clusterfs.com>
39 #define DEBUG_SUBSYSTEM S_LLITE
40 #include <lustre_dlm.h>
41 #include <linux/pagemap.h>
42 #include <linux/file.h>
43 #include <linux/sched.h>
44 #include <linux/user_namespace.h>
45 #include <linux/uidgid.h>
47 #include <uapi/linux/lustre/lustre_ioctl.h>
48 #include <lustre_swab.h>
50 #include "cl_object.h"
51 #include "llite_internal.h"
52 #include "vvp_internal.h"
55 struct inode *sp_inode;
60 __u64 pa_data_version;
66 ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg);
68 static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
71 static struct ll_file_data *ll_file_data_get(void)
73 struct ll_file_data *fd;
75 OBD_SLAB_ALLOC_PTR_GFP(fd, ll_file_data_slab, GFP_NOFS);
79 fd->fd_write_failed = false;
80 pcc_file_init(&fd->fd_pcc_file);
85 static void ll_file_data_put(struct ll_file_data *fd)
88 OBD_SLAB_FREE_PTR(fd, ll_file_data_slab);
92 * Packs all the attributes into @op_data for the CLOSE rpc.
94 static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
95 struct obd_client_handle *och)
99 ll_prep_md_op_data(op_data, inode, NULL, NULL,
100 0, 0, LUSTRE_OPC_ANY, NULL);
102 op_data->op_attr.ia_mode = inode->i_mode;
103 op_data->op_attr.ia_atime = inode->i_atime;
104 op_data->op_attr.ia_mtime = inode->i_mtime;
105 op_data->op_attr.ia_ctime = inode->i_ctime;
106 op_data->op_attr.ia_size = i_size_read(inode);
107 op_data->op_attr.ia_valid |= (ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
108 ATTR_MTIME | ATTR_MTIME_SET |
110 op_data->op_xvalid |= OP_XVALID_CTIME_SET;
111 op_data->op_attr_blocks = inode->i_blocks;
112 op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags);
113 if (ll_file_test_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT))
114 op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL;
115 op_data->op_open_handle = och->och_open_handle;
117 if (och->och_flags & FMODE_WRITE &&
118 ll_file_test_and_clear_flag(ll_i2info(inode), LLIF_DATA_MODIFIED))
119 /* For HSM: if inode data has been modified, pack it so that
120 * MDT can set data dirty flag in the archive. */
121 op_data->op_bias |= MDS_DATA_MODIFIED;
127 * Perform a close, possibly with a bias.
128 * The meaning of "data" depends on the value of "bias".
130 * If \a bias is MDS_HSM_RELEASE then \a data is a pointer to the data version.
131 * If \a bias is MDS_CLOSE_LAYOUT_SWAP then \a data is a pointer to the inode to
134 static int ll_close_inode_openhandle(struct inode *inode,
135 struct obd_client_handle *och,
136 enum mds_op_bias bias, void *data)
138 struct obd_export *md_exp = ll_i2mdexp(inode);
139 const struct ll_inode_info *lli = ll_i2info(inode);
140 struct md_op_data *op_data;
141 struct ptlrpc_request *req = NULL;
145 if (class_exp2obd(md_exp) == NULL) {
146 CERROR("%s: invalid MDC connection handle closing "DFID"\n",
147 ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
151 OBD_ALLOC_PTR(op_data);
152 /* We leak openhandle and request here on error, but not much to be
153 * done in OOM case since app won't retry close on error either. */
155 GOTO(out, rc = -ENOMEM);
157 ll_prepare_close(inode, op_data, och);
159 case MDS_CLOSE_LAYOUT_MERGE:
160 /* merge blocks from the victim inode */
161 op_data->op_attr_blocks += ((struct inode *)data)->i_blocks;
162 op_data->op_attr.ia_valid |= ATTR_SIZE;
163 op_data->op_xvalid |= OP_XVALID_BLOCKS;
165 case MDS_CLOSE_LAYOUT_SPLIT:
166 case MDS_CLOSE_LAYOUT_SWAP: {
167 struct split_param *sp = data;
169 LASSERT(data != NULL);
170 op_data->op_bias |= bias;
171 op_data->op_data_version = 0;
172 op_data->op_lease_handle = och->och_lease_handle;
173 if (bias == MDS_CLOSE_LAYOUT_SPLIT) {
174 op_data->op_fid2 = *ll_inode2fid(sp->sp_inode);
175 op_data->op_mirror_id = sp->sp_mirror_id;
177 op_data->op_fid2 = *ll_inode2fid(data);
182 case MDS_CLOSE_RESYNC_DONE: {
183 struct ll_ioc_lease *ioc = data;
185 LASSERT(data != NULL);
186 op_data->op_attr_blocks +=
187 ioc->lil_count * op_data->op_attr_blocks;
188 op_data->op_attr.ia_valid |= ATTR_SIZE;
189 op_data->op_xvalid |= OP_XVALID_BLOCKS;
190 op_data->op_bias |= MDS_CLOSE_RESYNC_DONE;
192 op_data->op_lease_handle = och->och_lease_handle;
193 op_data->op_data = &ioc->lil_ids[0];
194 op_data->op_data_size =
195 ioc->lil_count * sizeof(ioc->lil_ids[0]);
199 case MDS_PCC_ATTACH: {
200 struct pcc_param *param = data;
202 LASSERT(data != NULL);
203 op_data->op_bias |= MDS_HSM_RELEASE | MDS_PCC_ATTACH;
204 op_data->op_archive_id = param->pa_archive_id;
205 op_data->op_data_version = param->pa_data_version;
206 op_data->op_lease_handle = och->och_lease_handle;
210 case MDS_HSM_RELEASE:
211 LASSERT(data != NULL);
212 op_data->op_bias |= MDS_HSM_RELEASE;
213 op_data->op_data_version = *(__u64 *)data;
214 op_data->op_lease_handle = och->och_lease_handle;
215 op_data->op_attr.ia_valid |= ATTR_SIZE;
216 op_data->op_xvalid |= OP_XVALID_BLOCKS;
220 LASSERT(data == NULL);
224 if (!(op_data->op_attr.ia_valid & ATTR_SIZE))
225 op_data->op_xvalid |= OP_XVALID_LAZYSIZE;
226 if (!(op_data->op_xvalid & OP_XVALID_BLOCKS))
227 op_data->op_xvalid |= OP_XVALID_LAZYBLOCKS;
229 rc = md_close(md_exp, op_data, och->och_mod, &req);
230 if (rc != 0 && rc != -EINTR)
231 CERROR("%s: inode "DFID" mdc close failed: rc = %d\n",
232 md_exp->exp_obd->obd_name, PFID(&lli->lli_fid), rc);
234 if (rc == 0 && op_data->op_bias & bias) {
235 struct mdt_body *body;
237 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
238 if (!(body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED))
241 if (bias & MDS_PCC_ATTACH) {
242 struct pcc_param *param = data;
244 param->pa_layout_gen = body->mbo_layout_gen;
248 ll_finish_md_op_data(op_data);
252 md_clear_open_replay_data(md_exp, och);
253 och->och_open_handle.cookie = DEAD_HANDLE_MAGIC;
256 ptlrpc_req_finished(req); /* This is close request */
260 int ll_md_real_close(struct inode *inode, fmode_t fmode)
262 struct ll_inode_info *lli = ll_i2info(inode);
263 struct obd_client_handle **och_p;
264 struct obd_client_handle *och;
269 if (fmode & FMODE_WRITE) {
270 och_p = &lli->lli_mds_write_och;
271 och_usecount = &lli->lli_open_fd_write_count;
272 } else if (fmode & FMODE_EXEC) {
273 och_p = &lli->lli_mds_exec_och;
274 och_usecount = &lli->lli_open_fd_exec_count;
276 LASSERT(fmode & FMODE_READ);
277 och_p = &lli->lli_mds_read_och;
278 och_usecount = &lli->lli_open_fd_read_count;
281 mutex_lock(&lli->lli_och_mutex);
282 if (*och_usecount > 0) {
283 /* There are still users of this handle, so skip
285 mutex_unlock(&lli->lli_och_mutex);
291 mutex_unlock(&lli->lli_och_mutex);
294 /* There might be a race and this handle may already
296 rc = ll_close_inode_openhandle(inode, och, 0, NULL);
302 static int ll_md_close(struct inode *inode, struct file *file)
304 union ldlm_policy_data policy = {
305 .l_inodebits = { MDS_INODELOCK_OPEN },
307 __u64 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
308 struct ll_file_data *fd = file->private_data;
309 struct ll_inode_info *lli = ll_i2info(inode);
310 struct lustre_handle lockh;
311 enum ldlm_mode lockmode;
315 /* clear group lock, if present */
316 if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
317 ll_put_grouplock(inode, file, fd->fd_grouplock.lg_gid);
319 if (fd->fd_lease_och != NULL) {
322 /* Usually the lease is not released when the
323 * application crashed, we need to release here. */
324 rc = ll_lease_close(fd->fd_lease_och, inode, &lease_broken);
325 CDEBUG(rc ? D_ERROR : D_INODE, "Clean up lease "DFID" %d/%d\n",
326 PFID(&lli->lli_fid), rc, lease_broken);
328 fd->fd_lease_och = NULL;
331 if (fd->fd_och != NULL) {
332 rc = ll_close_inode_openhandle(inode, fd->fd_och, 0, NULL);
337 /* Let's see if we have good enough OPEN lock on the file and if
338 we can skip talking to MDS */
339 mutex_lock(&lli->lli_och_mutex);
340 if (fd->fd_omode & FMODE_WRITE) {
342 LASSERT(lli->lli_open_fd_write_count);
343 lli->lli_open_fd_write_count--;
344 } else if (fd->fd_omode & FMODE_EXEC) {
346 LASSERT(lli->lli_open_fd_exec_count);
347 lli->lli_open_fd_exec_count--;
350 LASSERT(lli->lli_open_fd_read_count);
351 lli->lli_open_fd_read_count--;
353 mutex_unlock(&lli->lli_och_mutex);
355 /* LU-4398: do not cache write open lock if the file has exec bit */
356 if ((lockmode == LCK_CW && inode->i_mode & S_IXUGO) ||
357 !md_lock_match(ll_i2mdexp(inode), flags, ll_inode2fid(inode),
358 LDLM_IBITS, &policy, lockmode, &lockh))
359 rc = ll_md_real_close(inode, fd->fd_omode);
362 file->private_data = NULL;
363 ll_file_data_put(fd);
368 /* While this returns an error code, fput() the caller does not, so we need
369 * to make every effort to clean up all of our state here. Also, applications
370 * rarely check close errors and even if an error is returned they will not
371 * re-try the close call.
373 int ll_file_release(struct inode *inode, struct file *file)
375 struct ll_file_data *fd;
376 struct ll_sb_info *sbi = ll_i2sbi(inode);
377 struct ll_inode_info *lli = ll_i2info(inode);
378 ktime_t kstart = ktime_get();
383 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
384 PFID(ll_inode2fid(inode)), inode);
386 fd = file->private_data;
389 /* The last ref on @file, maybe not the the owner pid of statahead,
390 * because parent and child process can share the same file handle. */
391 if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd)
392 ll_deauthorize_statahead(inode, fd);
394 if (inode->i_sb->s_root == file_dentry(file)) {
395 file->private_data = NULL;
396 ll_file_data_put(fd);
400 pcc_file_release(inode, file);
402 if (!S_ISDIR(inode->i_mode)) {
403 if (lli->lli_clob != NULL)
404 lov_read_and_clear_async_rc(lli->lli_clob);
405 lli->lli_async_rc = 0;
408 rc = ll_md_close(inode, file);
410 if (CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_DUMP_LOG, cfs_fail_val))
411 libcfs_debug_dumplog();
414 if (!rc && inode->i_sb->s_root != file_dentry(file))
415 ll_stats_ops_tally(sbi, LPROC_LL_RELEASE,
416 ktime_us_delta(ktime_get(), kstart));
420 static inline int ll_dom_readpage(void *data, struct page *page)
422 struct niobuf_local *lnb = data;
425 kaddr = ll_kmap_atomic(page, KM_USER0);
426 memcpy(kaddr, lnb->lnb_data, lnb->lnb_len);
427 if (lnb->lnb_len < PAGE_SIZE)
428 memset(kaddr + lnb->lnb_len, 0,
429 PAGE_SIZE - lnb->lnb_len);
430 flush_dcache_page(page);
431 SetPageUptodate(page);
432 ll_kunmap_atomic(kaddr, KM_USER0);
438 void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req,
439 struct lookup_intent *it)
441 struct ll_inode_info *lli = ll_i2info(inode);
442 struct cl_object *obj = lli->lli_clob;
443 struct address_space *mapping = inode->i_mapping;
445 struct niobuf_remote *rnb;
446 struct mdt_body *body;
448 unsigned long index, start;
449 struct niobuf_local lnb;
456 if (!req_capsule_field_present(&req->rq_pill, &RMF_NIOBUF_INLINE,
460 rnb = req_capsule_server_get(&req->rq_pill, &RMF_NIOBUF_INLINE);
461 if (rnb == NULL || rnb->rnb_len == 0)
464 /* LU-11595: Server may return whole file and that is OK always or
465 * it may return just file tail and its offset must be aligned with
466 * client PAGE_SIZE to be used on that client, if server's PAGE_SIZE is
467 * smaller then offset may be not aligned and that data is just ignored.
469 if (rnb->rnb_offset & ~PAGE_MASK)
472 /* Server returns whole file or just file tail if it fills in reply
473 * buffer, in both cases total size should be equal to the file size.
475 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
476 if (rnb->rnb_offset + rnb->rnb_len != body->mbo_dom_size) {
477 CERROR("%s: server returns off/len %llu/%u but size %llu\n",
478 ll_i2sbi(inode)->ll_fsname, rnb->rnb_offset,
479 rnb->rnb_len, body->mbo_dom_size);
483 CDEBUG(D_INFO, "Get data along with open at %llu len %i, size %llu\n",
484 rnb->rnb_offset, rnb->rnb_len, body->mbo_dom_size);
486 data = (char *)rnb + sizeof(*rnb);
488 lnb.lnb_file_offset = rnb->rnb_offset;
489 start = lnb.lnb_file_offset >> PAGE_SHIFT;
491 LASSERT((lnb.lnb_file_offset & ~PAGE_MASK) == 0);
492 lnb.lnb_page_offset = 0;
494 lnb.lnb_data = data + (index << PAGE_SHIFT);
495 lnb.lnb_len = rnb->rnb_len - (index << PAGE_SHIFT);
496 if (lnb.lnb_len > PAGE_SIZE)
497 lnb.lnb_len = PAGE_SIZE;
499 vmpage = read_cache_page(mapping, index + start,
500 ll_dom_readpage, &lnb);
501 if (IS_ERR(vmpage)) {
502 CWARN("%s: cannot fill page %lu for "DFID
503 " with data: rc = %li\n",
504 ll_i2sbi(inode)->ll_fsname, index + start,
505 PFID(lu_object_fid(&obj->co_lu)),
511 } while (rnb->rnb_len > (index << PAGE_SHIFT));
515 static int ll_intent_file_open(struct dentry *de, void *lmm, int lmmsize,
516 struct lookup_intent *itp)
518 struct ll_sb_info *sbi = ll_i2sbi(de->d_inode);
519 struct dentry *parent = de->d_parent;
522 struct md_op_data *op_data;
523 struct ptlrpc_request *req = NULL;
527 LASSERT(parent != NULL);
528 LASSERT(itp->it_flags & MDS_OPEN_BY_FID);
530 /* if server supports open-by-fid, or file name is invalid, don't pack
531 * name in open request */
532 if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_OPEN_BY_NAME) ||
533 !(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_OPEN_BY_FID)) {
535 len = de->d_name.len;
536 name = kmalloc(len + 1, GFP_NOFS);
541 spin_lock(&de->d_lock);
542 if (len != de->d_name.len) {
543 spin_unlock(&de->d_lock);
547 memcpy(name, de->d_name.name, len);
549 spin_unlock(&de->d_lock);
551 if (!lu_name_is_valid_2(name, len)) {
557 op_data = ll_prep_md_op_data(NULL, parent->d_inode, de->d_inode,
558 name, len, 0, LUSTRE_OPC_ANY, NULL);
559 if (IS_ERR(op_data)) {
561 RETURN(PTR_ERR(op_data));
563 op_data->op_data = lmm;
564 op_data->op_data_size = lmmsize;
566 rc = md_intent_lock(sbi->ll_md_exp, op_data, itp, &req,
567 &ll_md_blocking_ast, 0);
569 ll_finish_md_op_data(op_data);
571 /* reason for keep own exit path - don`t flood log
572 * with messages with -ESTALE errors.
574 if (!it_disposition(itp, DISP_OPEN_OPEN) ||
575 it_open_error(DISP_OPEN_OPEN, itp))
577 ll_release_openhandle(de, itp);
581 if (it_disposition(itp, DISP_LOOKUP_NEG))
582 GOTO(out, rc = -ENOENT);
584 if (rc != 0 || it_open_error(DISP_OPEN_OPEN, itp)) {
585 rc = rc ? rc : it_open_error(DISP_OPEN_OPEN, itp);
586 CDEBUG(D_VFSTRACE, "lock enqueue: err: %d\n", rc);
590 rc = ll_prep_inode(&de->d_inode, req, NULL, itp);
592 if (!rc && itp->it_lock_mode) {
593 struct lustre_handle handle = {.cookie = itp->it_lock_handle};
594 struct ldlm_lock *lock;
595 bool has_dom_bit = false;
597 /* If we got a lock back and it has a LOOKUP bit set,
598 * make sure the dentry is marked as valid so we can find it.
599 * We don't need to care about actual hashing since other bits
600 * of kernel will deal with that later.
602 lock = ldlm_handle2lock(&handle);
604 has_dom_bit = ldlm_has_dom(lock);
605 if (lock->l_policy_data.l_inodebits.bits &
606 MDS_INODELOCK_LOOKUP)
607 d_lustre_revalidate(de);
611 ll_set_lock_data(sbi->ll_md_exp, de->d_inode, itp, NULL);
613 ll_dom_finish_open(de->d_inode, req, itp);
617 ptlrpc_req_finished(req);
618 ll_intent_drop_lock(itp);
620 /* We did open by fid, but by the time we got to the server,
621 * the object disappeared. If this is a create, we cannot really
622 * tell the userspace that the file it was trying to create
623 * does not exist. Instead let's return -ESTALE, and the VFS will
624 * retry the create with LOOKUP_REVAL that we are going to catch
625 * in ll_revalidate_dentry() and use lookup then.
627 if (rc == -ENOENT && itp->it_op & IT_CREAT)
633 static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it,
634 struct obd_client_handle *och)
636 struct mdt_body *body;
638 body = req_capsule_server_get(&it->it_request->rq_pill, &RMF_MDT_BODY);
639 och->och_open_handle = body->mbo_open_handle;
640 och->och_fid = body->mbo_fid1;
641 och->och_lease_handle.cookie = it->it_lock_handle;
642 och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
643 och->och_flags = it->it_flags;
645 return md_set_open_replay_data(md_exp, och, it);
648 static int ll_local_open(struct file *file, struct lookup_intent *it,
649 struct ll_file_data *fd, struct obd_client_handle *och)
651 struct inode *inode = file_inode(file);
654 LASSERT(!file->private_data);
661 rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
666 file->private_data = fd;
667 ll_readahead_init(inode, &fd->fd_ras);
668 fd->fd_omode = it->it_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
670 /* ll_cl_context initialize */
671 rwlock_init(&fd->fd_lock);
672 INIT_LIST_HEAD(&fd->fd_lccs);
677 /* Open a file, and (for the very first open) create objects on the OSTs at
678 * this time. If opened with O_LOV_DELAY_CREATE, then we don't do the object
679 * creation or open until ll_lov_setstripe() ioctl is called.
681 * If we already have the stripe MD locally then we don't request it in
682 * md_open(), by passing a lmm_size = 0.
684 * It is up to the application to ensure no other processes open this file
685 * in the O_LOV_DELAY_CREATE case, or the default striping pattern will be
686 * used. We might be able to avoid races of that sort by getting lli_open_sem
687 * before returning in the O_LOV_DELAY_CREATE case and dropping it here
688 * or in ll_file_release(), but I'm not sure that is desirable/necessary.
690 int ll_file_open(struct inode *inode, struct file *file)
692 struct ll_inode_info *lli = ll_i2info(inode);
693 struct lookup_intent *it, oit = { .it_op = IT_OPEN,
694 .it_flags = file->f_flags };
695 struct obd_client_handle **och_p = NULL;
696 __u64 *och_usecount = NULL;
697 struct ll_file_data *fd;
698 ktime_t kstart = ktime_get();
702 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), flags %o\n",
703 PFID(ll_inode2fid(inode)), inode, file->f_flags);
705 it = file->private_data; /* XXX: compat macro */
706 file->private_data = NULL; /* prevent ll_local_open assertion */
708 fd = ll_file_data_get();
710 GOTO(out_nofiledata, rc = -ENOMEM);
713 if (S_ISDIR(inode->i_mode))
714 ll_authorize_statahead(inode, fd);
716 if (inode->i_sb->s_root == file_dentry(file)) {
717 file->private_data = fd;
721 if (!it || !it->it_disposition) {
722 /* Convert f_flags into access mode. We cannot use file->f_mode,
723 * because everything but O_ACCMODE mask was stripped from
725 if ((oit.it_flags + 1) & O_ACCMODE)
727 if (file->f_flags & O_TRUNC)
728 oit.it_flags |= FMODE_WRITE;
730 /* kernel only call f_op->open in dentry_open. filp_open calls
731 * dentry_open after call to open_namei that checks permissions.
732 * Only nfsd_open call dentry_open directly without checking
733 * permissions and because of that this code below is safe.
735 if (oit.it_flags & (FMODE_WRITE | FMODE_READ))
736 oit.it_flags |= MDS_OPEN_OWNEROVERRIDE;
738 /* We do not want O_EXCL here, presumably we opened the file
739 * already? XXX - NFS implications? */
740 oit.it_flags &= ~O_EXCL;
742 /* bug20584, if "it_flags" contains O_CREAT, the file will be
743 * created if necessary, then "IT_CREAT" should be set to keep
744 * consistent with it */
745 if (oit.it_flags & O_CREAT)
746 oit.it_op |= IT_CREAT;
752 /* Let's see if we have file open on MDS already. */
753 if (it->it_flags & FMODE_WRITE) {
754 och_p = &lli->lli_mds_write_och;
755 och_usecount = &lli->lli_open_fd_write_count;
756 } else if (it->it_flags & FMODE_EXEC) {
757 och_p = &lli->lli_mds_exec_och;
758 och_usecount = &lli->lli_open_fd_exec_count;
760 och_p = &lli->lli_mds_read_och;
761 och_usecount = &lli->lli_open_fd_read_count;
764 mutex_lock(&lli->lli_och_mutex);
765 if (*och_p) { /* Open handle is present */
766 if (it_disposition(it, DISP_OPEN_OPEN)) {
767 /* Well, there's extra open request that we do not need,
768 * let's close it somehow. This will decref request. */
769 rc = it_open_error(DISP_OPEN_OPEN, it);
771 mutex_unlock(&lli->lli_och_mutex);
772 GOTO(out_openerr, rc);
775 ll_release_openhandle(file_dentry(file), it);
779 rc = ll_local_open(file, it, fd, NULL);
782 mutex_unlock(&lli->lli_och_mutex);
783 GOTO(out_openerr, rc);
786 LASSERT(*och_usecount == 0);
787 if (!it->it_disposition) {
788 struct dentry *dentry = file_dentry(file);
789 struct ll_dentry_data *ldd;
791 /* We cannot just request lock handle now, new ELC code
792 * means that one of other OPEN locks for this file
793 * could be cancelled, and since blocking ast handler
794 * would attempt to grab och_mutex as well, that would
795 * result in a deadlock
797 mutex_unlock(&lli->lli_och_mutex);
799 * Normally called under two situations:
801 * 2. A race/condition on MDS resulting in no open
802 * handle to be returned from LOOKUP|OPEN request,
803 * for example if the target entry was a symlink.
805 * Only fetch MDS_OPEN_LOCK if this is in NFS path,
806 * marked by a bit set in ll_iget_for_nfs. Clear the
807 * bit so that it's not confusing later callers.
809 * NB; when ldd is NULL, it must have come via normal
810 * lookup path only, since ll_iget_for_nfs always calls
813 ldd = ll_d2d(dentry);
814 if (ldd && ldd->lld_nfs_dentry) {
815 ldd->lld_nfs_dentry = 0;
816 if (!filename_is_volatile(dentry->d_name.name,
819 it->it_flags |= MDS_OPEN_LOCK;
823 * Always specify MDS_OPEN_BY_FID because we don't want
824 * to get file with different fid.
826 it->it_flags |= MDS_OPEN_BY_FID;
827 rc = ll_intent_file_open(dentry, NULL, 0, it);
829 GOTO(out_openerr, rc);
833 OBD_ALLOC(*och_p, sizeof(struct obd_client_handle));
835 GOTO(out_och_free, rc = -ENOMEM);
839 /* md_intent_lock() didn't get a request ref if there was an
840 * open error, so don't do cleanup on the request here
842 /* XXX (green): Should not we bail out on any error here, not
843 * just open error? */
844 rc = it_open_error(DISP_OPEN_OPEN, it);
846 GOTO(out_och_free, rc);
848 LASSERTF(it_disposition(it, DISP_ENQ_OPEN_REF),
849 "inode %p: disposition %x, status %d\n", inode,
850 it_disposition(it, ~0), it->it_status);
852 rc = ll_local_open(file, it, fd, *och_p);
854 GOTO(out_och_free, rc);
857 rc = pcc_file_open(inode, file);
859 GOTO(out_och_free, rc);
861 mutex_unlock(&lli->lli_och_mutex);
863 /* lockless for direct IO so that it can do IO in parallel */
864 if (file->f_flags & O_DIRECT)
865 fd->fd_flags |= LL_FILE_LOCKLESS_IO;
868 /* Must do this outside lli_och_mutex lock to prevent deadlock where
869 different kind of OPEN lock for this same inode gets cancelled
870 by ldlm_cancel_lru */
871 if (!S_ISREG(inode->i_mode))
872 GOTO(out_och_free, rc);
873 cl_lov_delay_create_clear(&file->f_flags);
874 GOTO(out_och_free, rc);
878 if (och_p && *och_p) {
879 OBD_FREE(*och_p, sizeof(struct obd_client_handle));
880 *och_p = NULL; /* OBD_FREE writes some magic there */
883 mutex_unlock(&lli->lli_och_mutex);
886 if (lli->lli_opendir_key == fd)
887 ll_deauthorize_statahead(inode, fd);
890 ll_file_data_put(fd);
892 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN,
893 ktime_us_delta(ktime_get(), kstart));
897 if (it && it_disposition(it, DISP_ENQ_OPEN_REF)) {
898 ptlrpc_req_finished(it->it_request);
899 it_clear_disposition(it, DISP_ENQ_OPEN_REF);
905 static int ll_md_blocking_lease_ast(struct ldlm_lock *lock,
906 struct ldlm_lock_desc *desc, void *data, int flag)
909 struct lustre_handle lockh;
913 case LDLM_CB_BLOCKING:
914 ldlm_lock2handle(lock, &lockh);
915 rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
917 CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
921 case LDLM_CB_CANCELING:
929 * When setting a lease on a file, we take ownership of the lli_mds_*_och
930 * and save it as fd->fd_och so as to force client to reopen the file even
931 * if it has an open lock in cache already.
933 static int ll_lease_och_acquire(struct inode *inode, struct file *file,
934 struct lustre_handle *old_open_handle)
936 struct ll_inode_info *lli = ll_i2info(inode);
937 struct ll_file_data *fd = file->private_data;
938 struct obd_client_handle **och_p;
943 /* Get the openhandle of the file */
944 mutex_lock(&lli->lli_och_mutex);
945 if (fd->fd_lease_och != NULL)
946 GOTO(out_unlock, rc = -EBUSY);
948 if (fd->fd_och == NULL) {
949 if (file->f_mode & FMODE_WRITE) {
950 LASSERT(lli->lli_mds_write_och != NULL);
951 och_p = &lli->lli_mds_write_och;
952 och_usecount = &lli->lli_open_fd_write_count;
954 LASSERT(lli->lli_mds_read_och != NULL);
955 och_p = &lli->lli_mds_read_och;
956 och_usecount = &lli->lli_open_fd_read_count;
959 if (*och_usecount > 1)
960 GOTO(out_unlock, rc = -EBUSY);
967 *old_open_handle = fd->fd_och->och_open_handle;
971 mutex_unlock(&lli->lli_och_mutex);
976 * Release ownership on lli_mds_*_och when putting back a file lease.
978 static int ll_lease_och_release(struct inode *inode, struct file *file)
980 struct ll_inode_info *lli = ll_i2info(inode);
981 struct ll_file_data *fd = file->private_data;
982 struct obd_client_handle **och_p;
983 struct obd_client_handle *old_och = NULL;
988 mutex_lock(&lli->lli_och_mutex);
989 if (file->f_mode & FMODE_WRITE) {
990 och_p = &lli->lli_mds_write_och;
991 och_usecount = &lli->lli_open_fd_write_count;
993 och_p = &lli->lli_mds_read_och;
994 och_usecount = &lli->lli_open_fd_read_count;
997 /* The file may have been open by another process (broken lease) so
998 * *och_p is not NULL. In this case we should simply increase usecount
1001 if (*och_p != NULL) {
1002 old_och = fd->fd_och;
1005 *och_p = fd->fd_och;
1009 mutex_unlock(&lli->lli_och_mutex);
1011 if (old_och != NULL)
1012 rc = ll_close_inode_openhandle(inode, old_och, 0, NULL);
1018 * Acquire a lease and open the file.
1020 static struct obd_client_handle *
1021 ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
1024 struct lookup_intent it = { .it_op = IT_OPEN };
1025 struct ll_sb_info *sbi = ll_i2sbi(inode);
1026 struct md_op_data *op_data;
1027 struct ptlrpc_request *req = NULL;
1028 struct lustre_handle old_open_handle = { 0 };
1029 struct obd_client_handle *och = NULL;
1034 if (fmode != FMODE_WRITE && fmode != FMODE_READ)
1035 RETURN(ERR_PTR(-EINVAL));
1038 if (!(fmode & file->f_mode) || (file->f_mode & FMODE_EXEC))
1039 RETURN(ERR_PTR(-EPERM));
1041 rc = ll_lease_och_acquire(inode, file, &old_open_handle);
1043 RETURN(ERR_PTR(rc));
1048 RETURN(ERR_PTR(-ENOMEM));
1050 op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0,
1051 LUSTRE_OPC_ANY, NULL);
1052 if (IS_ERR(op_data))
1053 GOTO(out, rc = PTR_ERR(op_data));
1055 /* To tell the MDT this openhandle is from the same owner */
1056 op_data->op_open_handle = old_open_handle;
1058 it.it_flags = fmode | open_flags;
1059 it.it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID | MDS_OPEN_LEASE;
1060 rc = md_intent_lock(sbi->ll_md_exp, op_data, &it, &req,
1061 &ll_md_blocking_lease_ast,
1062 /* LDLM_FL_NO_LRU: To not put the lease lock into LRU list, otherwise
1063 * it can be cancelled which may mislead applications that the lease is
1065 * LDLM_FL_EXCL: Set this flag so that it won't be matched by normal
1066 * open in ll_md_blocking_ast(). Otherwise as ll_md_blocking_lease_ast
1067 * doesn't deal with openhandle, so normal openhandle will be leaked. */
1068 LDLM_FL_NO_LRU | LDLM_FL_EXCL);
1069 ll_finish_md_op_data(op_data);
1070 ptlrpc_req_finished(req);
1072 GOTO(out_release_it, rc);
1074 if (it_disposition(&it, DISP_LOOKUP_NEG))
1075 GOTO(out_release_it, rc = -ENOENT);
1077 rc = it_open_error(DISP_OPEN_OPEN, &it);
1079 GOTO(out_release_it, rc);
1081 LASSERT(it_disposition(&it, DISP_ENQ_OPEN_REF));
1082 rc = ll_och_fill(sbi->ll_md_exp, &it, och);
1084 GOTO(out_release_it, rc);
1086 if (!it_disposition(&it, DISP_OPEN_LEASE)) /* old server? */
1087 GOTO(out_close, rc = -EOPNOTSUPP);
1089 /* already get lease, handle lease lock */
1090 ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
1091 if (it.it_lock_mode == 0 ||
1092 it.it_lock_bits != MDS_INODELOCK_OPEN) {
1093 /* open lock must return for lease */
1094 CERROR(DFID "lease granted but no open lock, %d/%llu.\n",
1095 PFID(ll_inode2fid(inode)), it.it_lock_mode,
1097 GOTO(out_close, rc = -EPROTO);
1100 ll_intent_release(&it);
1104 /* Cancel open lock */
1105 if (it.it_lock_mode != 0) {
1106 ldlm_lock_decref_and_cancel(&och->och_lease_handle,
1108 it.it_lock_mode = 0;
1109 och->och_lease_handle.cookie = 0ULL;
1111 rc2 = ll_close_inode_openhandle(inode, och, 0, NULL);
1113 CERROR("%s: error closing file "DFID": %d\n",
1114 sbi->ll_fsname, PFID(&ll_i2info(inode)->lli_fid), rc2);
1115 och = NULL; /* och has been freed in ll_close_inode_openhandle() */
1117 ll_intent_release(&it);
1121 RETURN(ERR_PTR(rc));
1125 * Check whether a layout swap can be done between two inodes.
1127 * \param[in] inode1 First inode to check
1128 * \param[in] inode2 Second inode to check
1130 * \retval 0 on success, layout swap can be performed between both inodes
1131 * \retval negative error code if requirements are not met
1133 static int ll_check_swap_layouts_validity(struct inode *inode1,
1134 struct inode *inode2)
1136 if (!S_ISREG(inode1->i_mode) || !S_ISREG(inode2->i_mode))
1139 if (inode_permission(inode1, MAY_WRITE) ||
1140 inode_permission(inode2, MAY_WRITE))
1143 if (inode1->i_sb != inode2->i_sb)
1149 static int ll_swap_layouts_close(struct obd_client_handle *och,
1150 struct inode *inode, struct inode *inode2)
1152 const struct lu_fid *fid1 = ll_inode2fid(inode);
1153 const struct lu_fid *fid2;
1157 CDEBUG(D_INODE, "%s: biased close of file "DFID"\n",
1158 ll_i2sbi(inode)->ll_fsname, PFID(fid1));
1160 rc = ll_check_swap_layouts_validity(inode, inode2);
1162 GOTO(out_free_och, rc);
1164 /* We now know that inode2 is a lustre inode */
1165 fid2 = ll_inode2fid(inode2);
1167 rc = lu_fid_cmp(fid1, fid2);
1169 GOTO(out_free_och, rc = -EINVAL);
1171 /* Close the file and {swap,merge} layouts between inode & inode2.
1172 * NB: lease lock handle is released in mdc_close_layout_swap_pack()
1173 * because we still need it to pack l_remote_handle to MDT. */
1174 rc = ll_close_inode_openhandle(inode, och, MDS_CLOSE_LAYOUT_SWAP,
1177 och = NULL; /* freed in ll_close_inode_openhandle() */
1187 * Release lease and close the file.
1188 * It will check if the lease has ever broken.
1190 static int ll_lease_close_intent(struct obd_client_handle *och,
1191 struct inode *inode,
1192 bool *lease_broken, enum mds_op_bias bias,
1195 struct ldlm_lock *lock;
1196 bool cancelled = true;
1200 lock = ldlm_handle2lock(&och->och_lease_handle);
1202 lock_res_and_lock(lock);
1203 cancelled = ldlm_is_cancel(lock);
1204 unlock_res_and_lock(lock);
1205 LDLM_LOCK_PUT(lock);
1208 CDEBUG(D_INODE, "lease for "DFID" broken? %d, bias: %x\n",
1209 PFID(&ll_i2info(inode)->lli_fid), cancelled, bias);
1211 if (lease_broken != NULL)
1212 *lease_broken = cancelled;
1214 if (!cancelled && !bias)
1215 ldlm_cli_cancel(&och->och_lease_handle, 0);
1217 if (cancelled) { /* no need to excute intent */
1222 rc = ll_close_inode_openhandle(inode, och, bias, data);
1226 static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
1229 return ll_lease_close_intent(och, inode, lease_broken, 0, NULL);
1233 * After lease is taken, send the RPC MDS_REINT_RESYNC to the MDT
1235 static int ll_lease_file_resync(struct obd_client_handle *och,
1236 struct inode *inode, unsigned long arg)
1238 struct ll_sb_info *sbi = ll_i2sbi(inode);
1239 struct md_op_data *op_data;
1240 struct ll_ioc_lease_id ioc;
1241 __u64 data_version_unused;
1245 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1246 LUSTRE_OPC_ANY, NULL);
1247 if (IS_ERR(op_data))
1248 RETURN(PTR_ERR(op_data));
1250 if (copy_from_user(&ioc, (struct ll_ioc_lease_id __user *)arg,
1254 /* before starting file resync, it's necessary to clean up page cache
1255 * in client memory, otherwise once the layout version is increased,
1256 * writing back cached data will be denied the OSTs. */
1257 rc = ll_data_version(inode, &data_version_unused, LL_DV_WR_FLUSH);
1261 op_data->op_lease_handle = och->och_lease_handle;
1262 op_data->op_mirror_id = ioc.lil_mirror_id;
1263 rc = md_file_resync(sbi->ll_md_exp, op_data);
1269 ll_finish_md_op_data(op_data);
1273 int ll_merge_attr(const struct lu_env *env, struct inode *inode)
1275 struct ll_inode_info *lli = ll_i2info(inode);
1276 struct cl_object *obj = lli->lli_clob;
1277 struct cl_attr *attr = vvp_env_thread_attr(env);
1285 ll_inode_size_lock(inode);
1287 /* Merge timestamps the most recently obtained from MDS with
1288 * timestamps obtained from OSTs.
1290 * Do not overwrite atime of inode because it may be refreshed
1291 * by file_accessed() function. If the read was served by cache
1292 * data, there is no RPC to be sent so that atime may not be
1293 * transferred to OSTs at all. MDT only updates atime at close time
1294 * if it's at least 'mdd.*.atime_diff' older.
1295 * All in all, the atime in Lustre does not strictly comply with
1296 * POSIX. Solving this problem needs to send an RPC to MDT for each
1297 * read, this will hurt performance.
1299 if (ll_file_test_and_clear_flag(lli, LLIF_UPDATE_ATIME) ||
1300 inode->i_atime.tv_sec < lli->lli_atime)
1301 inode->i_atime.tv_sec = lli->lli_atime;
1303 inode->i_mtime.tv_sec = lli->lli_mtime;
1304 inode->i_ctime.tv_sec = lli->lli_ctime;
1306 mtime = inode->i_mtime.tv_sec;
1307 atime = inode->i_atime.tv_sec;
1308 ctime = inode->i_ctime.tv_sec;
1310 cl_object_attr_lock(obj);
1311 if (OBD_FAIL_CHECK(OBD_FAIL_MDC_MERGE))
1314 rc = cl_object_attr_get(env, obj, attr);
1315 cl_object_attr_unlock(obj);
1318 GOTO(out_size_unlock, rc = (rc == -ENODATA ? 0 : rc));
1320 if (atime < attr->cat_atime)
1321 atime = attr->cat_atime;
1323 if (ctime < attr->cat_ctime)
1324 ctime = attr->cat_ctime;
1326 if (mtime < attr->cat_mtime)
1327 mtime = attr->cat_mtime;
1329 CDEBUG(D_VFSTRACE, DFID" updating i_size %llu\n",
1330 PFID(&lli->lli_fid), attr->cat_size);
1332 i_size_write(inode, attr->cat_size);
1333 inode->i_blocks = attr->cat_blocks;
1335 inode->i_mtime.tv_sec = mtime;
1336 inode->i_atime.tv_sec = atime;
1337 inode->i_ctime.tv_sec = ctime;
1340 ll_inode_size_unlock(inode);
1346 * Set designated mirror for I/O.
1348 * So far only read, write, and truncated can support to issue I/O to
1349 * designated mirror.
1351 void ll_io_set_mirror(struct cl_io *io, const struct file *file)
1353 struct ll_file_data *fd = file->private_data;
1355 /* clear layout version for generic(non-resync) I/O in case it carries
1356 * stale layout version due to I/O restart */
1357 io->ci_layout_version = 0;
1359 /* FLR: disable non-delay for designated mirror I/O because obviously
1360 * only one mirror is available */
1361 if (fd->fd_designated_mirror > 0) {
1363 io->ci_designated_mirror = fd->fd_designated_mirror;
1364 io->ci_layout_version = fd->fd_layout_version;
1367 CDEBUG(D_VFSTRACE, "%s: desiginated mirror: %d\n",
1368 file->f_path.dentry->d_name.name, io->ci_designated_mirror);
1371 static bool file_is_noatime(const struct file *file)
1373 const struct vfsmount *mnt = file->f_path.mnt;
1374 const struct inode *inode = file_inode((struct file *)file);
1376 /* Adapted from file_accessed() and touch_atime().*/
1377 if (file->f_flags & O_NOATIME)
1380 if (inode->i_flags & S_NOATIME)
1383 if (IS_NOATIME(inode))
1386 if (mnt->mnt_flags & (MNT_NOATIME | MNT_READONLY))
1389 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1392 if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
1398 void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot,
1399 struct vvp_io_args *args)
1401 struct inode *inode = file_inode(file);
1402 struct ll_file_data *fd = file->private_data;
1404 io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK;
1405 io->ci_lock_no_expand = fd->ll_lock_no_expand;
1407 if (iot == CIT_WRITE) {
1408 io->u.ci_wr.wr_append = !!(file->f_flags & O_APPEND);
1409 io->u.ci_wr.wr_sync = !!(file->f_flags & O_SYNC ||
1410 file->f_flags & O_DIRECT ||
1412 #ifdef HAVE_GENERIC_WRITE_SYNC_2ARGS
1413 io->u.ci_wr.wr_sync |= !!(args &&
1414 args->via_io_subtype == IO_NORMAL &&
1415 args->u.normal.via_iocb->ki_flags & IOCB_DSYNC);
1419 io->ci_obj = ll_i2info(inode)->lli_clob;
1420 io->ci_lockreq = CILR_MAYBE;
1421 if (ll_file_nolock(file)) {
1422 io->ci_lockreq = CILR_NEVER;
1423 io->ci_no_srvlock = 1;
1424 } else if (file->f_flags & O_APPEND) {
1425 io->ci_lockreq = CILR_MANDATORY;
1427 io->ci_noatime = file_is_noatime(file);
1428 io->ci_async_readahead = false;
1430 /* FLR: only use non-delay I/O for read as there is only one
1431 * avaliable mirror for write. */
1432 io->ci_ndelay = !(iot == CIT_WRITE);
1434 ll_io_set_mirror(io, file);
1437 static void ll_heat_add(struct inode *inode, enum cl_io_type iot,
1440 struct ll_inode_info *lli = ll_i2info(inode);
1441 struct ll_sb_info *sbi = ll_i2sbi(inode);
1442 enum obd_heat_type sample_type;
1443 enum obd_heat_type iobyte_type;
1444 __u64 now = ktime_get_real_seconds();
1446 if (!ll_sbi_has_file_heat(sbi) ||
1447 lli->lli_heat_flags & LU_HEAT_FLAG_OFF)
1450 if (iot == CIT_READ) {
1451 sample_type = OBD_HEAT_READSAMPLE;
1452 iobyte_type = OBD_HEAT_READBYTE;
1453 } else if (iot == CIT_WRITE) {
1454 sample_type = OBD_HEAT_WRITESAMPLE;
1455 iobyte_type = OBD_HEAT_WRITEBYTE;
1460 spin_lock(&lli->lli_heat_lock);
1461 obd_heat_add(&lli->lli_heat_instances[sample_type], now, 1,
1462 sbi->ll_heat_decay_weight, sbi->ll_heat_period_second);
1463 obd_heat_add(&lli->lli_heat_instances[iobyte_type], now, count,
1464 sbi->ll_heat_decay_weight, sbi->ll_heat_period_second);
1465 spin_unlock(&lli->lli_heat_lock);
1469 ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
1470 struct file *file, enum cl_io_type iot,
1471 loff_t *ppos, size_t count)
1473 struct vvp_io *vio = vvp_env_io(env);
1474 struct inode *inode = file_inode(file);
1475 struct ll_inode_info *lli = ll_i2info(inode);
1476 struct ll_file_data *fd = file->private_data;
1477 struct range_lock range;
1481 unsigned retried = 0;
1482 unsigned ignore_lockless = 0;
1486 CDEBUG(D_VFSTRACE, "%s: %s ppos: %llu, count: %zu\n",
1487 file_dentry(file)->d_name.name,
1488 iot == CIT_READ ? "read" : "write", *ppos, count);
1491 io = vvp_env_thread_io(env);
1492 ll_io_init(io, file, iot, args);
1493 io->ci_ignore_lockless = ignore_lockless;
1494 io->ci_ndelay_tried = retried;
1496 if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
1497 bool range_locked = false;
1499 if (file->f_flags & O_APPEND)
1500 range_lock_init(&range, 0, LUSTRE_EOF);
1502 range_lock_init(&range, *ppos, *ppos + count - 1);
1504 vio->vui_fd = file->private_data;
1505 vio->vui_io_subtype = args->via_io_subtype;
1507 switch (vio->vui_io_subtype) {
1509 vio->vui_iter = args->u.normal.via_iter;
1510 vio->vui_iocb = args->u.normal.via_iocb;
1511 /* Direct IO reads must also take range lock,
1512 * or multiple reads will try to work on the same pages
1513 * See LU-6227 for details. */
1514 if (((iot == CIT_WRITE) ||
1515 (iot == CIT_READ && (file->f_flags & O_DIRECT))) &&
1516 !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
1517 CDEBUG(D_VFSTRACE, "Range lock "RL_FMT"\n",
1519 rc = range_lock(&lli->lli_write_tree, &range);
1523 range_locked = true;
1527 vio->u.splice.vui_pipe = args->u.splice.via_pipe;
1528 vio->u.splice.vui_flags = args->u.splice.via_flags;
1531 CERROR("unknown IO subtype %u\n", vio->vui_io_subtype);
1535 ll_cl_add(file, env, io, LCC_RW);
1536 rc = cl_io_loop(env, io);
1537 ll_cl_remove(file, env);
1540 CDEBUG(D_VFSTRACE, "Range unlock "RL_FMT"\n",
1542 range_unlock(&lli->lli_write_tree, &range);
1545 /* cl_io_rw_init() handled IO */
1549 if (io->ci_nob > 0) {
1550 result += io->ci_nob;
1551 count -= io->ci_nob;
1552 *ppos = io->u.ci_wr.wr.crw_pos; /* for splice */
1554 /* prepare IO restart */
1555 if (count > 0 && args->via_io_subtype == IO_NORMAL)
1556 args->u.normal.via_iter = vio->vui_iter;
1559 cl_io_fini(env, io);
1562 "%s: %d io complete with rc: %d, result: %zd, restart: %d\n",
1563 file->f_path.dentry->d_name.name,
1564 iot, rc, result, io->ci_need_restart);
1566 if ((rc == 0 || rc == -ENODATA || rc == -ENOLCK) &&
1567 count > 0 && io->ci_need_restart) {
1569 "%s: restart %s from %lld, count: %zu, ret: %zd, rc: %d\n",
1570 file_dentry(file)->d_name.name,
1571 iot == CIT_READ ? "read" : "write",
1572 *ppos, count, result, rc);
1573 /* preserve the tried count for FLR */
1574 retried = io->ci_ndelay_tried;
1575 ignore_lockless = io->ci_ignore_lockless;
1579 if (iot == CIT_READ) {
1581 ll_stats_ops_tally(ll_i2sbi(inode),
1582 LPROC_LL_READ_BYTES, result);
1583 } else if (iot == CIT_WRITE) {
1585 ll_stats_ops_tally(ll_i2sbi(inode),
1586 LPROC_LL_WRITE_BYTES, result);
1587 fd->fd_write_failed = false;
1588 } else if (result == 0 && rc == 0) {
1591 fd->fd_write_failed = true;
1593 fd->fd_write_failed = false;
1594 } else if (rc != -ERESTARTSYS) {
1595 fd->fd_write_failed = true;
1599 CDEBUG(D_VFSTRACE, "iot: %d, result: %zd\n", iot, result);
1601 ll_heat_add(inode, iot, result);
1603 RETURN(result > 0 ? result : rc);
1607 * The purpose of fast read is to overcome per I/O overhead and improve IOPS
1608 * especially for small I/O.
1610 * To serve a read request, CLIO has to create and initialize a cl_io and
1611 * then request DLM lock. This has turned out to have siginificant overhead
1612 * and affects the performance of small I/O dramatically.
1614 * It's not necessary to create a cl_io for each I/O. Under the help of read
1615 * ahead, most of the pages being read are already in memory cache and we can
1616 * read those pages directly because if the pages exist, the corresponding DLM
1617 * lock must exist so that page content must be valid.
1619 * In fast read implementation, the llite speculatively finds and reads pages
1620 * in memory cache. There are three scenarios for fast read:
1621 * - If the page exists and is uptodate, kernel VM will provide the data and
1622 * CLIO won't be intervened;
1623 * - If the page was brought into memory by read ahead, it will be exported
1624 * and read ahead parameters will be updated;
1625 * - Otherwise the page is not in memory, we can't do fast read. Therefore,
1626 * it will go back and invoke normal read, i.e., a cl_io will be created
1627 * and DLM lock will be requested.
1629 * POSIX compliance: posix standard states that read is intended to be atomic.
1630 * Lustre read implementation is in line with Linux kernel read implementation
1631 * and neither of them complies with POSIX standard in this matter. Fast read
1632 * doesn't make the situation worse on single node but it may interleave write
1633 * results from multiple nodes due to short read handling in ll_file_aio_read().
1635 * \param env - lu_env
1636 * \param iocb - kiocb from kernel
1637 * \param iter - user space buffers where the data will be copied
1639 * \retval - number of bytes have been read, or error code if error occurred.
1642 ll_do_fast_read(struct kiocb *iocb, struct iov_iter *iter)
1646 if (!ll_sbi_has_fast_read(ll_i2sbi(file_inode(iocb->ki_filp))))
1649 /* NB: we can't do direct IO for fast read because it will need a lock
1650 * to make IO engine happy. */
1651 if (iocb->ki_filp->f_flags & O_DIRECT)
1654 result = generic_file_read_iter(iocb, iter);
1656 /* If the first page is not in cache, generic_file_aio_read() will be
1657 * returned with -ENODATA.
1658 * See corresponding code in ll_readpage(). */
1659 if (result == -ENODATA)
1663 ll_heat_add(file_inode(iocb->ki_filp), CIT_READ, result);
1664 ll_stats_ops_tally(ll_i2sbi(file_inode(iocb->ki_filp)),
1665 LPROC_LL_READ_BYTES, result);
1672 * Read from a file (through the page cache).
1674 static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1677 struct vvp_io_args *args;
1678 struct file *file = iocb->ki_filp;
1682 ktime_t kstart = ktime_get();
1685 if (!iov_iter_count(to))
1689 * Currently when PCC read failed, we do not fall back to the
1690 * normal read path, just return the error.
1691 * The resaon is that: for RW-PCC, the file data may be modified
1692 * in the PCC and inconsistent with the data on OSTs (or file
1693 * data has been removed from the Lustre file system), at this
1694 * time, fallback to the normal read path may read the wrong
1696 * TODO: for RO-PCC (readonly PCC), fall back to normal read
1697 * path: read data from data copy on OSTs.
1699 result = pcc_file_read_iter(iocb, to, &cached);
1703 ll_ras_enter(file, iocb->ki_pos, iov_iter_count(to));
1705 result = ll_do_fast_read(iocb, to);
1706 if (result < 0 || iov_iter_count(to) == 0)
1709 env = cl_env_get(&refcheck);
1711 return PTR_ERR(env);
1713 args = ll_env_args(env, IO_NORMAL);
1714 args->u.normal.via_iter = to;
1715 args->u.normal.via_iocb = iocb;
1717 rc2 = ll_file_io_generic(env, args, file, CIT_READ,
1718 &iocb->ki_pos, iov_iter_count(to));
1721 else if (result == 0)
1724 cl_env_put(env, &refcheck);
1727 ll_rw_stats_tally(ll_i2sbi(file_inode(file)), current->pid,
1728 file->private_data, iocb->ki_pos, result,
1730 ll_stats_ops_tally(ll_i2sbi(file_inode(file)), LPROC_LL_READ,
1731 ktime_us_delta(ktime_get(), kstart));
1738 * Similar trick to ll_do_fast_read, this improves write speed for tiny writes.
1739 * If a page is already in the page cache and dirty (and some other things -
1740 * See ll_tiny_write_begin for the instantiation of these rules), then we can
1741 * write to it without doing a full I/O, because Lustre already knows about it
1742 * and will write it out. This saves a lot of processing time.
1744 * All writes here are within one page, so exclusion is handled by the page
1745 * lock on the vm page. We do not do tiny writes for writes which touch
1746 * multiple pages because it's very unlikely multiple sequential pages are
1747 * are already dirty.
1749 * We limit these to < PAGE_SIZE because PAGE_SIZE writes are relatively common
1750 * and are unlikely to be to already dirty pages.
1752 * Attribute updates are important here, we do them in ll_tiny_write_end.
1754 static ssize_t ll_do_tiny_write(struct kiocb *iocb, struct iov_iter *iter)
1756 ssize_t count = iov_iter_count(iter);
1757 struct file *file = iocb->ki_filp;
1758 struct inode *inode = file_inode(file);
1759 bool lock_inode = !IS_NOSEC(inode);
1764 /* Restrict writes to single page and < PAGE_SIZE. See comment at top
1765 * of function for why.
1767 if (count >= PAGE_SIZE ||
1768 (iocb->ki_pos & (PAGE_SIZE-1)) + count > PAGE_SIZE)
1771 if (unlikely(lock_inode))
1773 result = __generic_file_write_iter(iocb, iter);
1775 if (unlikely(lock_inode))
1776 inode_unlock(inode);
1778 /* If the page is not already dirty, ll_tiny_write_begin returns
1779 * -ENODATA. We continue on to normal write.
1781 if (result == -ENODATA)
1785 ll_heat_add(inode, CIT_WRITE, result);
1786 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_WRITE_BYTES,
1788 ll_file_set_flag(ll_i2info(inode), LLIF_DATA_MODIFIED);
1791 CDEBUG(D_VFSTRACE, "result: %zu, original count %zu\n", result, count);
1797 * Write to a file (through the page cache).
1799 static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1801 struct vvp_io_args *args;
1803 ssize_t rc_tiny = 0, rc_normal;
1804 struct file *file = iocb->ki_filp;
1807 ktime_t kstart = ktime_get();
1812 if (!iov_iter_count(from))
1813 GOTO(out, rc_normal = 0);
1816 * When PCC write failed, we usually do not fall back to the normal
1817 * write path, just return the error. But there is a special case when
1818 * returned error code is -ENOSPC due to running out of space on PCC HSM
1819 * bakcend. At this time, it will fall back to normal I/O path and
1820 * retry the I/O. As the file is in HSM released state, it will restore
1821 * the file data to OSTs first and redo the write again. And the
1822 * restore process will revoke the layout lock and detach the file
1823 * from PCC cache automatically.
1825 result = pcc_file_write_iter(iocb, from, &cached);
1826 if (cached && result != -ENOSPC && result != -EDQUOT)
1827 GOTO(out, rc_normal = result);
1829 /* NB: we can't do direct IO for tiny writes because they use the page
1830 * cache, we can't do sync writes because tiny writes can't flush
1831 * pages, and we can't do append writes because we can't guarantee the
1832 * required DLM locks are held to protect file size.
1834 if (ll_sbi_has_tiny_write(ll_i2sbi(file_inode(file))) &&
1835 !(file->f_flags & (O_DIRECT | O_SYNC | O_APPEND)))
1836 rc_tiny = ll_do_tiny_write(iocb, from);
1838 /* In case of error, go on and try normal write - Only stop if tiny
1839 * write completed I/O.
1841 if (iov_iter_count(from) == 0)
1842 GOTO(out, rc_normal = rc_tiny);
1844 env = cl_env_get(&refcheck);
1846 return PTR_ERR(env);
1848 args = ll_env_args(env, IO_NORMAL);
1849 args->u.normal.via_iter = from;
1850 args->u.normal.via_iocb = iocb;
1852 rc_normal = ll_file_io_generic(env, args, file, CIT_WRITE,
1853 &iocb->ki_pos, iov_iter_count(from));
1855 /* On success, combine bytes written. */
1856 if (rc_tiny >= 0 && rc_normal > 0)
1857 rc_normal += rc_tiny;
1858 /* On error, only return error from normal write if tiny write did not
1859 * write any bytes. Otherwise return bytes written by tiny write.
1861 else if (rc_tiny > 0)
1862 rc_normal = rc_tiny;
1864 cl_env_put(env, &refcheck);
1866 if (rc_normal > 0) {
1867 ll_rw_stats_tally(ll_i2sbi(file_inode(file)), current->pid,
1868 file->private_data, iocb->ki_pos,
1870 ll_stats_ops_tally(ll_i2sbi(file_inode(file)), LPROC_LL_WRITE,
1871 ktime_us_delta(ktime_get(), kstart));
1877 #ifndef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
1879 * XXX: exact copy from kernel code (__generic_file_aio_write_nolock)
1881 static int ll_file_get_iov_count(const struct iovec *iov,
1882 unsigned long *nr_segs, size_t *count)
1887 for (seg = 0; seg < *nr_segs; seg++) {
1888 const struct iovec *iv = &iov[seg];
1891 * If any segment has a negative length, or the cumulative
1892 * length ever wraps negative then return -EINVAL.
1895 if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
1897 if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
1902 cnt -= iv->iov_len; /* This segment is no good */
1909 static ssize_t ll_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1910 unsigned long nr_segs, loff_t pos)
1917 result = ll_file_get_iov_count(iov, &nr_segs, &iov_count);
1924 # ifdef HAVE_IOV_ITER_INIT_DIRECTION
1925 iov_iter_init(&to, READ, iov, nr_segs, iov_count);
1926 # else /* !HAVE_IOV_ITER_INIT_DIRECTION */
1927 iov_iter_init(&to, iov, nr_segs, iov_count, 0);
1928 # endif /* HAVE_IOV_ITER_INIT_DIRECTION */
1930 result = ll_file_read_iter(iocb, &to);
1935 static ssize_t ll_file_read(struct file *file, char __user *buf, size_t count,
1938 struct iovec iov = { .iov_base = buf, .iov_len = count };
1947 init_sync_kiocb(&kiocb, file);
1948 kiocb.ki_pos = *ppos;
1949 #ifdef HAVE_KIOCB_KI_LEFT
1950 kiocb.ki_left = count;
1951 #elif defined(HAVE_KI_NBYTES)
1952 kiocb.i_nbytes = count;
1955 result = ll_file_aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
1956 *ppos = kiocb.ki_pos;
1962 * Write to a file (through the page cache).
1965 static ssize_t ll_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
1966 unsigned long nr_segs, loff_t pos)
1968 struct iov_iter from;
1973 result = ll_file_get_iov_count(iov, &nr_segs, &iov_count);
1980 # ifdef HAVE_IOV_ITER_INIT_DIRECTION
1981 iov_iter_init(&from, WRITE, iov, nr_segs, iov_count);
1982 # else /* !HAVE_IOV_ITER_INIT_DIRECTION */
1983 iov_iter_init(&from, iov, nr_segs, iov_count, 0);
1984 # endif /* HAVE_IOV_ITER_INIT_DIRECTION */
1986 result = ll_file_write_iter(iocb, &from);
1991 static ssize_t ll_file_write(struct file *file, const char __user *buf,
1992 size_t count, loff_t *ppos)
1994 struct iovec iov = { .iov_base = (void __user *)buf,
2004 init_sync_kiocb(&kiocb, file);
2005 kiocb.ki_pos = *ppos;
2006 #ifdef HAVE_KIOCB_KI_LEFT
2007 kiocb.ki_left = count;
2008 #elif defined(HAVE_KI_NBYTES)
2009 kiocb.ki_nbytes = count;
2012 result = ll_file_aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
2013 *ppos = kiocb.ki_pos;
2017 #endif /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
2020 * Send file content (through pagecache) somewhere with helper
2022 static ssize_t ll_file_splice_read(struct file *in_file, loff_t *ppos,
2023 struct pipe_inode_info *pipe, size_t count,
2027 struct vvp_io_args *args;
2034 result = pcc_file_splice_read(in_file, ppos, pipe,
2035 count, flags, &cached);
2039 ll_ras_enter(in_file, *ppos, count);
2041 env = cl_env_get(&refcheck);
2043 RETURN(PTR_ERR(env));
2045 args = ll_env_args(env, IO_SPLICE);
2046 args->u.splice.via_pipe = pipe;
2047 args->u.splice.via_flags = flags;
2049 result = ll_file_io_generic(env, args, in_file, CIT_READ, ppos, count);
2050 cl_env_put(env, &refcheck);
2053 ll_rw_stats_tally(ll_i2sbi(file_inode(in_file)), current->pid,
2054 in_file->private_data, *ppos, result,
2059 int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
2060 __u64 flags, struct lov_user_md *lum, int lum_size)
2062 struct lookup_intent oit = {
2064 .it_flags = flags | MDS_OPEN_BY_FID,
2069 if ((__swab32(lum->lmm_magic) & le32_to_cpu(LOV_MAGIC_MASK)) ==
2070 le32_to_cpu(LOV_MAGIC_MAGIC)) {
2071 /* this code will only exist for big-endian systems */
2072 lustre_swab_lov_user_md(lum, 0);
2075 ll_inode_size_lock(inode);
2076 rc = ll_intent_file_open(dentry, lum, lum_size, &oit);
2078 GOTO(out_unlock, rc);
2080 ll_release_openhandle(dentry, &oit);
2083 ll_inode_size_unlock(inode);
2084 ll_intent_release(&oit);
2089 int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
2090 struct lov_mds_md **lmmp, int *lmm_size,
2091 struct ptlrpc_request **request)
2093 struct ll_sb_info *sbi = ll_i2sbi(inode);
2094 struct mdt_body *body;
2095 struct lov_mds_md *lmm = NULL;
2096 struct ptlrpc_request *req = NULL;
2097 struct md_op_data *op_data;
2100 rc = ll_get_default_mdsize(sbi, &lmmsize);
2104 op_data = ll_prep_md_op_data(NULL, inode, NULL, filename,
2105 strlen(filename), lmmsize,
2106 LUSTRE_OPC_ANY, NULL);
2107 if (IS_ERR(op_data))
2108 RETURN(PTR_ERR(op_data));
2110 op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
2111 rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
2112 ll_finish_md_op_data(op_data);
2114 CDEBUG(D_INFO, "md_getattr_name failed "
2115 "on %s: rc %d\n", filename, rc);
2119 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2120 LASSERT(body != NULL); /* checked by mdc_getattr_name */
2122 lmmsize = body->mbo_eadatasize;
2124 if (!(body->mbo_valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
2126 GOTO(out, rc = -ENODATA);
2129 lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize);
2130 LASSERT(lmm != NULL);
2132 if (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1) &&
2133 lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3) &&
2134 lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_COMP_V1) &&
2135 lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_FOREIGN))
2136 GOTO(out, rc = -EPROTO);
2139 * This is coming from the MDS, so is probably in
2140 * little endian. We convert it to host endian before
2141 * passing it to userspace.
2143 if ((lmm->lmm_magic & __swab32(LOV_MAGIC_MAGIC)) ==
2144 __swab32(LOV_MAGIC_MAGIC)) {
2145 int stripe_count = 0;
2147 if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) ||
2148 lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
2149 stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
2150 if (le32_to_cpu(lmm->lmm_pattern) &
2151 LOV_PATTERN_F_RELEASED)
2155 lustre_swab_lov_user_md((struct lov_user_md *)lmm, 0);
2157 /* if function called for directory - we should
2158 * avoid swab not existent lsm objects */
2159 if (lmm->lmm_magic == LOV_MAGIC_V1 && S_ISREG(body->mbo_mode))
2160 lustre_swab_lov_user_md_objects(
2161 ((struct lov_user_md_v1 *)lmm)->lmm_objects,
2163 else if (lmm->lmm_magic == LOV_MAGIC_V3 &&
2164 S_ISREG(body->mbo_mode))
2165 lustre_swab_lov_user_md_objects(
2166 ((struct lov_user_md_v3 *)lmm)->lmm_objects,
2172 *lmm_size = lmmsize;
2177 static int ll_lov_setea(struct inode *inode, struct file *file,
2180 __u64 flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
2181 struct lov_user_md *lump;
2182 int lum_size = sizeof(struct lov_user_md) +
2183 sizeof(struct lov_user_ost_data);
2187 if (!cfs_capable(CFS_CAP_SYS_ADMIN))
2190 OBD_ALLOC_LARGE(lump, lum_size);
2194 if (copy_from_user(lump, arg, lum_size))
2195 GOTO(out_lump, rc = -EFAULT);
2197 rc = ll_lov_setstripe_ea_info(inode, file_dentry(file), flags, lump,
2199 cl_lov_delay_create_clear(&file->f_flags);
2202 OBD_FREE_LARGE(lump, lum_size);
2206 static int ll_file_getstripe(struct inode *inode, void __user *lum, size_t size)
2213 env = cl_env_get(&refcheck);
2215 RETURN(PTR_ERR(env));
2217 rc = cl_object_getstripe(env, ll_i2info(inode)->lli_clob, lum, size);
2218 cl_env_put(env, &refcheck);
2222 static int ll_lov_setstripe(struct inode *inode, struct file *file,
2225 struct lov_user_md __user *lum = (struct lov_user_md __user *)arg;
2226 struct lov_user_md *klum;
2228 __u64 flags = FMODE_WRITE;
2231 rc = ll_copy_user_md(lum, &klum);
2236 rc = ll_lov_setstripe_ea_info(inode, file_dentry(file), flags, klum,
2241 rc = put_user(0, &lum->lmm_stripe_count);
2245 rc = ll_layout_refresh(inode, &gen);
2249 rc = ll_file_getstripe(inode, arg, lum_size);
2251 cl_lov_delay_create_clear(&file->f_flags);
2254 OBD_FREE_LARGE(klum, lum_size);
2260 ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
2262 struct ll_inode_info *lli = ll_i2info(inode);
2263 struct cl_object *obj = lli->lli_clob;
2264 struct ll_file_data *fd = file->private_data;
2265 struct ll_grouplock grouplock;
2270 CWARN("group id for group lock must not be 0\n");
2274 if (ll_file_nolock(file))
2275 RETURN(-EOPNOTSUPP);
2277 if (file->f_flags & O_NONBLOCK) {
2278 if (!mutex_trylock(&lli->lli_group_mutex))
2281 mutex_lock(&lli->lli_group_mutex);
2283 if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
2284 CWARN("group lock already existed with gid %lu\n",
2285 fd->fd_grouplock.lg_gid);
2286 GOTO(out, rc = -EINVAL);
2288 if (arg != lli->lli_group_gid && lli->lli_group_users != 0) {
2289 if (file->f_flags & O_NONBLOCK)
2290 GOTO(out, rc = -EAGAIN);
2291 mutex_unlock(&lli->lli_group_mutex);
2292 wait_var_event(&lli->lli_group_users, !lli->lli_group_users);
2293 GOTO(retry, rc = 0);
2295 LASSERT(fd->fd_grouplock.lg_lock == NULL);
2298 * XXX: group lock needs to protect all OST objects while PFL
2299 * can add new OST objects during the IO, so we'd instantiate
2300 * all OST objects before getting its group lock.
2305 struct cl_layout cl = {
2306 .cl_is_composite = false,
2308 struct lu_extent ext = {
2310 .e_end = OBD_OBJECT_EOF,
2313 env = cl_env_get(&refcheck);
2315 GOTO(out, rc = PTR_ERR(env));
2317 rc = cl_object_layout_get(env, obj, &cl);
2318 if (!rc && cl.cl_is_composite)
2319 rc = ll_layout_write_intent(inode, LAYOUT_INTENT_WRITE,
2322 cl_env_put(env, &refcheck);
2327 rc = cl_get_grouplock(ll_i2info(inode)->lli_clob,
2328 arg, (file->f_flags & O_NONBLOCK), &grouplock);
2333 fd->fd_flags |= LL_FILE_GROUP_LOCKED;
2334 fd->fd_grouplock = grouplock;
2335 if (lli->lli_group_users == 0)
2336 lli->lli_group_gid = grouplock.lg_gid;
2337 lli->lli_group_users++;
2339 CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
2341 mutex_unlock(&lli->lli_group_mutex);
2346 static int ll_put_grouplock(struct inode *inode, struct file *file,
2349 struct ll_inode_info *lli = ll_i2info(inode);
2350 struct ll_file_data *fd = file->private_data;
2351 struct ll_grouplock grouplock;
2355 mutex_lock(&lli->lli_group_mutex);
2356 if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
2357 CWARN("no group lock held\n");
2358 GOTO(out, rc = -EINVAL);
2361 LASSERT(fd->fd_grouplock.lg_lock != NULL);
2363 if (fd->fd_grouplock.lg_gid != arg) {
2364 CWARN("group lock %lu doesn't match current id %lu\n",
2365 arg, fd->fd_grouplock.lg_gid);
2366 GOTO(out, rc = -EINVAL);
2369 grouplock = fd->fd_grouplock;
2370 memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock));
2371 fd->fd_flags &= ~LL_FILE_GROUP_LOCKED;
2373 cl_put_grouplock(&grouplock);
2375 lli->lli_group_users--;
2376 if (lli->lli_group_users == 0) {
2377 lli->lli_group_gid = 0;
2378 wake_up_var(&lli->lli_group_users);
2380 CDEBUG(D_INFO, "group lock %lu released\n", arg);
2383 mutex_unlock(&lli->lli_group_mutex);
2389 * Close inode open handle
2391 * \param dentry [in] dentry which contains the inode
2392 * \param it [in,out] intent which contains open info and result
2395 * \retval <0 failure
2397 int ll_release_openhandle(struct dentry *dentry, struct lookup_intent *it)
2399 struct inode *inode = dentry->d_inode;
2400 struct obd_client_handle *och;
2406 /* Root ? Do nothing. */
2407 if (dentry->d_inode->i_sb->s_root == dentry)
2410 /* No open handle to close? Move away */
2411 if (!it_disposition(it, DISP_OPEN_OPEN))
2414 LASSERT(it_open_error(DISP_OPEN_OPEN, it) == 0);
2416 OBD_ALLOC(och, sizeof(*och));
2418 GOTO(out, rc = -ENOMEM);
2420 rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
2424 rc = ll_close_inode_openhandle(inode, och, 0, NULL);
2426 /* this one is in place of ll_file_open */
2427 if (it_disposition(it, DISP_ENQ_OPEN_REF)) {
2428 ptlrpc_req_finished(it->it_request);
2429 it_clear_disposition(it, DISP_ENQ_OPEN_REF);
2435 * Get size for inode for which FIEMAP mapping is requested.
2436 * Make the FIEMAP get_info call and returns the result.
2437 * \param fiemap kernel buffer to hold extens
2438 * \param num_bytes kernel buffer size
2440 static int ll_do_fiemap(struct inode *inode, struct fiemap *fiemap,
2446 struct ll_fiemap_info_key fmkey = { .lfik_name = KEY_FIEMAP, };
2449 /* Checks for fiemap flags */
2450 if (fiemap->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
2451 fiemap->fm_flags &= ~LUSTRE_FIEMAP_FLAGS_COMPAT;
2455 /* Check for FIEMAP_FLAG_SYNC */
2456 if (fiemap->fm_flags & FIEMAP_FLAG_SYNC) {
2457 rc = filemap_fdatawrite(inode->i_mapping);
2462 env = cl_env_get(&refcheck);
2464 RETURN(PTR_ERR(env));
2466 if (i_size_read(inode) == 0) {
2467 rc = ll_glimpse_size(inode);
2472 fmkey.lfik_oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
2473 obdo_from_inode(&fmkey.lfik_oa, inode, OBD_MD_FLSIZE);
2474 obdo_set_parent_fid(&fmkey.lfik_oa, &ll_i2info(inode)->lli_fid);
2476 /* If filesize is 0, then there would be no objects for mapping */
2477 if (fmkey.lfik_oa.o_size == 0) {
2478 fiemap->fm_mapped_extents = 0;
2482 fmkey.lfik_fiemap = *fiemap;
2484 rc = cl_object_fiemap(env, ll_i2info(inode)->lli_clob,
2485 &fmkey, fiemap, &num_bytes);
2487 cl_env_put(env, &refcheck);
2491 int ll_fid2path(struct inode *inode, void __user *arg)
2493 struct obd_export *exp = ll_i2mdexp(inode);
2494 const struct getinfo_fid2path __user *gfin = arg;
2496 struct getinfo_fid2path *gfout;
2502 if (!cfs_capable(CFS_CAP_DAC_READ_SEARCH) &&
2503 !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
2506 /* Only need to get the buflen */
2507 if (get_user(pathlen, &gfin->gf_pathlen))
2510 if (pathlen > PATH_MAX)
2513 outsize = sizeof(*gfout) + pathlen;
2514 OBD_ALLOC(gfout, outsize);
2518 if (copy_from_user(gfout, arg, sizeof(*gfout)))
2519 GOTO(gf_free, rc = -EFAULT);
2520 /* append root FID after gfout to let MDT know the root FID so that it
2521 * can lookup the correct path, this is mainly for fileset.
2522 * old server without fileset mount support will ignore this. */
2523 *gfout->gf_u.gf_root_fid = *ll_inode2fid(inode);
2525 /* Call mdc_iocontrol */
2526 rc = obd_iocontrol(OBD_IOC_FID2PATH, exp, outsize, gfout, NULL);
2530 if (copy_to_user(arg, gfout, outsize))
2534 OBD_FREE(gfout, outsize);
2539 ll_ioc_data_version(struct inode *inode, struct ioc_data_version *ioc)
2541 struct cl_object *obj = ll_i2info(inode)->lli_clob;
2549 ioc->idv_version = 0;
2550 ioc->idv_layout_version = UINT_MAX;
2552 /* If no file object initialized, we consider its version is 0. */
2556 env = cl_env_get(&refcheck);
2558 RETURN(PTR_ERR(env));
2560 io = vvp_env_thread_io(env);
2562 io->u.ci_data_version.dv_data_version = 0;
2563 io->u.ci_data_version.dv_layout_version = UINT_MAX;
2564 io->u.ci_data_version.dv_flags = ioc->idv_flags;
2567 if (cl_io_init(env, io, CIT_DATA_VERSION, io->ci_obj) == 0)
2568 result = cl_io_loop(env, io);
2570 result = io->ci_result;
2572 ioc->idv_version = io->u.ci_data_version.dv_data_version;
2573 ioc->idv_layout_version = io->u.ci_data_version.dv_layout_version;
2575 cl_io_fini(env, io);
2577 if (unlikely(io->ci_need_restart))
2580 cl_env_put(env, &refcheck);
2586 * Read the data_version for inode.
2588 * This value is computed using stripe object version on OST.
2589 * Version is computed using server side locking.
2591 * @param flags if do sync on the OST side;
2593 * LL_DV_RD_FLUSH: flush dirty pages, LCK_PR on OSTs
2594 * LL_DV_WR_FLUSH: drop all caching pages, LCK_PW on OSTs
2596 int ll_data_version(struct inode *inode, __u64 *data_version, int flags)
2598 struct ioc_data_version ioc = { .idv_flags = flags };
2601 rc = ll_ioc_data_version(inode, &ioc);
2603 *data_version = ioc.idv_version;
2609 * Trigger a HSM release request for the provided inode.
2611 int ll_hsm_release(struct inode *inode)
2614 struct obd_client_handle *och = NULL;
2615 __u64 data_version = 0;
2620 CDEBUG(D_INODE, "%s: Releasing file "DFID".\n",
2621 ll_i2sbi(inode)->ll_fsname,
2622 PFID(&ll_i2info(inode)->lli_fid));
2624 och = ll_lease_open(inode, NULL, FMODE_WRITE, MDS_OPEN_RELEASE);
2626 GOTO(out, rc = PTR_ERR(och));
2628 /* Grab latest data_version and [am]time values */
2629 rc = ll_data_version(inode, &data_version, LL_DV_WR_FLUSH);
2633 env = cl_env_get(&refcheck);
2635 GOTO(out, rc = PTR_ERR(env));
2637 rc = ll_merge_attr(env, inode);
2638 cl_env_put(env, &refcheck);
2640 /* If error happen, we have the wrong size for a file.
2646 /* Release the file.
2647 * NB: lease lock handle is released in mdc_hsm_release_pack() because
2648 * we still need it to pack l_remote_handle to MDT. */
2649 rc = ll_close_inode_openhandle(inode, och, MDS_HSM_RELEASE,
2655 if (och != NULL && !IS_ERR(och)) /* close the file */
2656 ll_lease_close(och, inode, NULL);
2661 struct ll_swap_stack {
2664 struct inode *inode1;
2665 struct inode *inode2;
2670 static int ll_swap_layouts(struct file *file1, struct file *file2,
2671 struct lustre_swap_layouts *lsl)
2673 struct mdc_swap_layouts msl;
2674 struct md_op_data *op_data;
2677 struct ll_swap_stack *llss = NULL;
2680 OBD_ALLOC_PTR(llss);
2684 llss->inode1 = file_inode(file1);
2685 llss->inode2 = file_inode(file2);
2687 rc = ll_check_swap_layouts_validity(llss->inode1, llss->inode2);
2691 /* we use 2 bool because it is easier to swap than 2 bits */
2692 if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV1)
2693 llss->check_dv1 = true;
2695 if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV2)
2696 llss->check_dv2 = true;
2698 /* we cannot use lsl->sl_dvX directly because we may swap them */
2699 llss->dv1 = lsl->sl_dv1;
2700 llss->dv2 = lsl->sl_dv2;
2702 rc = lu_fid_cmp(ll_inode2fid(llss->inode1), ll_inode2fid(llss->inode2));
2703 if (rc == 0) /* same file, done! */
2706 if (rc < 0) { /* sequentialize it */
2707 swap(llss->inode1, llss->inode2);
2709 swap(llss->dv1, llss->dv2);
2710 swap(llss->check_dv1, llss->check_dv2);
2714 if (gid != 0) { /* application asks to flush dirty cache */
2715 rc = ll_get_grouplock(llss->inode1, file1, gid);
2719 rc = ll_get_grouplock(llss->inode2, file2, gid);
2721 ll_put_grouplock(llss->inode1, file1, gid);
2726 /* ultimate check, before swaping the layouts we check if
2727 * dataversion has changed (if requested) */
2728 if (llss->check_dv1) {
2729 rc = ll_data_version(llss->inode1, &dv, 0);
2732 if (dv != llss->dv1)
2733 GOTO(putgl, rc = -EAGAIN);
2736 if (llss->check_dv2) {
2737 rc = ll_data_version(llss->inode2, &dv, 0);
2740 if (dv != llss->dv2)
2741 GOTO(putgl, rc = -EAGAIN);
2744 /* struct md_op_data is used to send the swap args to the mdt
2745 * only flags is missing, so we use struct mdc_swap_layouts
2746 * through the md_op_data->op_data */
2747 /* flags from user space have to be converted before they are send to
2748 * server, no flag is sent today, they are only used on the client */
2751 op_data = ll_prep_md_op_data(NULL, llss->inode1, llss->inode2, NULL, 0,
2752 0, LUSTRE_OPC_ANY, &msl);
2753 if (IS_ERR(op_data))
2754 GOTO(free, rc = PTR_ERR(op_data));
2756 rc = obd_iocontrol(LL_IOC_LOV_SWAP_LAYOUTS, ll_i2mdexp(llss->inode1),
2757 sizeof(*op_data), op_data, NULL);
2758 ll_finish_md_op_data(op_data);
2765 ll_put_grouplock(llss->inode2, file2, gid);
2766 ll_put_grouplock(llss->inode1, file1, gid);
2776 int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss)
2778 struct obd_export *exp = ll_i2mdexp(inode);
2779 struct md_op_data *op_data;
2783 /* Detect out-of range masks */
2784 if ((hss->hss_setmask | hss->hss_clearmask) & ~HSM_FLAGS_MASK)
2787 /* Non-root users are forbidden to set or clear flags which are
2788 * NOT defined in HSM_USER_MASK. */
2789 if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) &&
2790 !cfs_capable(CFS_CAP_SYS_ADMIN))
2793 if (!exp_connect_archive_id_array(exp)) {
2794 /* Detect out-of range archive id */
2795 if ((hss->hss_valid & HSS_ARCHIVE_ID) &&
2796 (hss->hss_archive_id > LL_HSM_ORIGIN_MAX_ARCHIVE))
2800 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
2801 LUSTRE_OPC_ANY, hss);
2802 if (IS_ERR(op_data))
2803 RETURN(PTR_ERR(op_data));
2805 rc = obd_iocontrol(LL_IOC_HSM_STATE_SET, exp, sizeof(*op_data),
2808 ll_finish_md_op_data(op_data);
2813 static int ll_hsm_import(struct inode *inode, struct file *file,
2814 struct hsm_user_import *hui)
2816 struct hsm_state_set *hss = NULL;
2817 struct iattr *attr = NULL;
2821 if (!S_ISREG(inode->i_mode))
2827 GOTO(out, rc = -ENOMEM);
2829 hss->hss_valid = HSS_SETMASK | HSS_ARCHIVE_ID;
2830 hss->hss_archive_id = hui->hui_archive_id;
2831 hss->hss_setmask = HS_ARCHIVED | HS_EXISTS | HS_RELEASED;
2832 rc = ll_hsm_state_set(inode, hss);
2836 OBD_ALLOC_PTR(attr);
2838 GOTO(out, rc = -ENOMEM);
2840 attr->ia_mode = hui->hui_mode & (S_IRWXU | S_IRWXG | S_IRWXO);
2841 attr->ia_mode |= S_IFREG;
2842 attr->ia_uid = make_kuid(&init_user_ns, hui->hui_uid);
2843 attr->ia_gid = make_kgid(&init_user_ns, hui->hui_gid);
2844 attr->ia_size = hui->hui_size;
2845 attr->ia_mtime.tv_sec = hui->hui_mtime;
2846 attr->ia_mtime.tv_nsec = hui->hui_mtime_ns;
2847 attr->ia_atime.tv_sec = hui->hui_atime;
2848 attr->ia_atime.tv_nsec = hui->hui_atime_ns;
2850 attr->ia_valid = ATTR_SIZE | ATTR_MODE | ATTR_FORCE |
2851 ATTR_UID | ATTR_GID |
2852 ATTR_MTIME | ATTR_MTIME_SET |
2853 ATTR_ATIME | ATTR_ATIME_SET;
2857 rc = ll_setattr_raw(file_dentry(file), attr, 0, true);
2861 inode_unlock(inode);
2873 static inline long ll_lease_type_from_fmode(fmode_t fmode)
2875 return ((fmode & FMODE_READ) ? LL_LEASE_RDLCK : 0) |
2876 ((fmode & FMODE_WRITE) ? LL_LEASE_WRLCK : 0);
2879 static int ll_file_futimes_3(struct file *file, const struct ll_futimes_3 *lfu)
2881 struct inode *inode = file_inode(file);
2883 .ia_valid = ATTR_ATIME | ATTR_ATIME_SET |
2884 ATTR_MTIME | ATTR_MTIME_SET |
2887 .tv_sec = lfu->lfu_atime_sec,
2888 .tv_nsec = lfu->lfu_atime_nsec,
2891 .tv_sec = lfu->lfu_mtime_sec,
2892 .tv_nsec = lfu->lfu_mtime_nsec,
2895 .tv_sec = lfu->lfu_ctime_sec,
2896 .tv_nsec = lfu->lfu_ctime_nsec,
2902 if (!capable(CAP_SYS_ADMIN))
2905 if (!S_ISREG(inode->i_mode))
2909 rc = ll_setattr_raw(file_dentry(file), &ia, OP_XVALID_CTIME_SET,
2911 inode_unlock(inode);
2916 static enum cl_lock_mode cl_mode_user_to_kernel(enum lock_mode_user mode)
2919 case MODE_READ_USER:
2921 case MODE_WRITE_USER:
2928 static const char *const user_lockname[] = LOCK_MODE_NAMES;
2930 /* Used to allow the upper layers of the client to request an LDLM lock
2931 * without doing an actual read or write.
2933 * Used for ladvise lockahead to manually request specific locks.
2935 * \param[in] file file this ladvise lock request is on
2936 * \param[in] ladvise ladvise struct describing this lock request
2938 * \retval 0 success, no detailed result available (sync requests
2939 * and requests sent to the server [not handled locally]
2940 * cannot return detailed results)
2941 * \retval LLA_RESULT_{SAME,DIFFERENT} - detailed result of the lock request,
2942 * see definitions for details.
2943 * \retval negative negative errno on error
2945 int ll_file_lock_ahead(struct file *file, struct llapi_lu_ladvise *ladvise)
2947 struct lu_env *env = NULL;
2948 struct cl_io *io = NULL;
2949 struct cl_lock *lock = NULL;
2950 struct cl_lock_descr *descr = NULL;
2951 struct dentry *dentry = file->f_path.dentry;
2952 struct inode *inode = dentry->d_inode;
2953 enum cl_lock_mode cl_mode;
2954 off_t start = ladvise->lla_start;
2955 off_t end = ladvise->lla_end;
2961 CDEBUG(D_VFSTRACE, "Lock request: file=%.*s, inode=%p, mode=%s "
2962 "start=%llu, end=%llu\n", dentry->d_name.len,
2963 dentry->d_name.name, dentry->d_inode,
2964 user_lockname[ladvise->lla_lockahead_mode], (__u64) start,
2967 cl_mode = cl_mode_user_to_kernel(ladvise->lla_lockahead_mode);
2969 GOTO(out, result = cl_mode);
2971 /* Get IO environment */
2972 result = cl_io_get(inode, &env, &io, &refcheck);
2976 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
2979 * nothing to do for this io. This currently happens when
2980 * stripe sub-object's are not yet created.
2982 result = io->ci_result;
2983 } else if (result == 0) {
2984 lock = vvp_env_lock(env);
2985 descr = &lock->cll_descr;
2987 descr->cld_obj = io->ci_obj;
2988 /* Convert byte offsets to pages */
2989 descr->cld_start = cl_index(io->ci_obj, start);
2990 descr->cld_end = cl_index(io->ci_obj, end);
2991 descr->cld_mode = cl_mode;
2992 /* CEF_MUST is used because we do not want to convert a
2993 * lockahead request to a lockless lock */
2994 descr->cld_enq_flags = CEF_MUST | CEF_LOCK_NO_EXPAND |
2997 if (ladvise->lla_peradvice_flags & LF_ASYNC)
2998 descr->cld_enq_flags |= CEF_SPECULATIVE;
3000 result = cl_lock_request(env, io, lock);
3002 /* On success, we need to release the lock */
3004 cl_lock_release(env, lock);
3006 cl_io_fini(env, io);
3007 cl_env_put(env, &refcheck);
3009 /* -ECANCELED indicates a matching lock with a different extent
3010 * was already present, and -EEXIST indicates a matching lock
3011 * on exactly the same extent was already present.
3012 * We convert them to positive values for userspace to make
3013 * recognizing true errors easier.
3014 * Note we can only return these detailed results on async requests,
3015 * as sync requests look the same as i/o requests for locking. */
3016 if (result == -ECANCELED)
3017 result = LLA_RESULT_DIFFERENT;
3018 else if (result == -EEXIST)
3019 result = LLA_RESULT_SAME;
3024 static const char *const ladvise_names[] = LU_LADVISE_NAMES;
3026 static int ll_ladvise_sanity(struct inode *inode,
3027 struct llapi_lu_ladvise *ladvise)
3029 struct ll_sb_info *sbi = ll_i2sbi(inode);
3030 enum lu_ladvise_type advice = ladvise->lla_advice;
3031 /* Note the peradvice flags is a 32 bit field, so per advice flags must
3032 * be in the first 32 bits of enum ladvise_flags */
3033 __u32 flags = ladvise->lla_peradvice_flags;
3034 /* 3 lines at 80 characters per line, should be plenty */
3037 if (advice > LU_LADVISE_MAX || advice == LU_LADVISE_INVALID) {
3039 CDEBUG(D_VFSTRACE, "%s: advice with value '%d' not recognized,"
3040 "last supported advice is %s (value '%d'): rc = %d\n",
3041 sbi->ll_fsname, advice,
3042 ladvise_names[LU_LADVISE_MAX-1], LU_LADVISE_MAX-1, rc);
3046 /* Per-advice checks */
3048 case LU_LADVISE_LOCKNOEXPAND:
3049 if (flags & ~LF_LOCKNOEXPAND_MASK) {
3051 CDEBUG(D_VFSTRACE, "%s: Invalid flags (%x) for %s: "
3052 "rc = %d\n", sbi->ll_fsname, flags,
3053 ladvise_names[advice], rc);
3057 case LU_LADVISE_LOCKAHEAD:
3058 /* Currently only READ and WRITE modes can be requested */
3059 if (ladvise->lla_lockahead_mode >= MODE_MAX_USER ||
3060 ladvise->lla_lockahead_mode == 0) {
3062 CDEBUG(D_VFSTRACE, "%s: Invalid mode (%d) for %s: "
3063 "rc = %d\n", sbi->ll_fsname,
3064 ladvise->lla_lockahead_mode,
3065 ladvise_names[advice], rc);
3069 case LU_LADVISE_WILLREAD:
3070 case LU_LADVISE_DONTNEED:
3072 /* Note fall through above - These checks apply to all advices
3073 * except LOCKNOEXPAND */
3074 if (flags & ~LF_DEFAULT_MASK) {
3076 CDEBUG(D_VFSTRACE, "%s: Invalid flags (%x) for %s: "
3077 "rc = %d\n", sbi->ll_fsname, flags,
3078 ladvise_names[advice], rc);
3081 if (ladvise->lla_start >= ladvise->lla_end) {
3083 CDEBUG(D_VFSTRACE, "%s: Invalid range (%llu to %llu) "
3084 "for %s: rc = %d\n", sbi->ll_fsname,
3085 ladvise->lla_start, ladvise->lla_end,
3086 ladvise_names[advice], rc);
3098 * Give file access advices
3100 * The ladvise interface is similar to Linux fadvise() system call, except it
3101 * forwards the advices directly from Lustre client to server. The server side
3102 * codes will apply appropriate read-ahead and caching techniques for the
3103 * corresponding files.
3105 * A typical workload for ladvise is e.g. a bunch of different clients are
3106 * doing small random reads of a file, so prefetching pages into OSS cache
3107 * with big linear reads before the random IO is a net benefit. Fetching
3108 * all that data into each client cache with fadvise() may not be, due to
3109 * much more data being sent to the client.
3111 static int ll_ladvise(struct inode *inode, struct file *file, __u64 flags,
3112 struct llapi_lu_ladvise *ladvise)
3116 struct cl_ladvise_io *lio;
3121 env = cl_env_get(&refcheck);
3123 RETURN(PTR_ERR(env));
3125 io = vvp_env_thread_io(env);
3126 io->ci_obj = ll_i2info(inode)->lli_clob;
3128 /* initialize parameters for ladvise */
3129 lio = &io->u.ci_ladvise;
3130 lio->li_start = ladvise->lla_start;
3131 lio->li_end = ladvise->lla_end;
3132 lio->li_fid = ll_inode2fid(inode);
3133 lio->li_advice = ladvise->lla_advice;
3134 lio->li_flags = flags;
3136 if (cl_io_init(env, io, CIT_LADVISE, io->ci_obj) == 0)
3137 rc = cl_io_loop(env, io);
3141 cl_io_fini(env, io);
3142 cl_env_put(env, &refcheck);
3146 static int ll_lock_noexpand(struct file *file, int flags)
3148 struct ll_file_data *fd = file->private_data;
3150 fd->ll_lock_no_expand = !(flags & LF_UNSET);
3155 int ll_ioctl_fsgetxattr(struct inode *inode, unsigned int cmd,
3158 struct fsxattr fsxattr;
3160 if (copy_from_user(&fsxattr,
3161 (const struct fsxattr __user *)arg,
3165 fsxattr.fsx_xflags = ll_inode_flags_to_xflags(inode->i_flags);
3166 if (ll_file_test_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT))
3167 fsxattr.fsx_xflags |= FS_XFLAG_PROJINHERIT;
3168 fsxattr.fsx_projid = ll_i2info(inode)->lli_projid;
3169 if (copy_to_user((struct fsxattr __user *)arg,
3170 &fsxattr, sizeof(fsxattr)))
3176 int ll_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
3179 * Project Quota ID state is only allowed to change from within the init
3180 * namespace. Enforce that restriction only if we are trying to change
3181 * the quota ID state. Everything else is allowed in user namespaces.
3183 if (current_user_ns() == &init_user_ns)
3186 if (ll_i2info(inode)->lli_projid != fa->fsx_projid)
3189 if (ll_file_test_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT)) {
3190 if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
3193 if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
3200 int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd,
3204 struct md_op_data *op_data;
3205 struct ptlrpc_request *req = NULL;
3207 struct fsxattr fsxattr;
3208 struct cl_object *obj;
3212 if (copy_from_user(&fsxattr,
3213 (const struct fsxattr __user *)arg,
3217 rc = ll_ioctl_check_project(inode, &fsxattr);
3221 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
3222 LUSTRE_OPC_ANY, NULL);
3223 if (IS_ERR(op_data))
3224 RETURN(PTR_ERR(op_data));
3226 flags = ll_xflags_to_inode_flags(fsxattr.fsx_xflags);
3227 op_data->op_attr_flags = ll_inode_to_ext_flags(flags);
3228 if (fsxattr.fsx_xflags & FS_XFLAG_PROJINHERIT)
3229 op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL;
3230 op_data->op_projid = fsxattr.fsx_projid;
3231 op_data->op_xvalid |= OP_XVALID_PROJID | OP_XVALID_FLAGS;
3232 rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data, NULL,
3234 ptlrpc_req_finished(req);
3236 GOTO(out_fsxattr, rc);
3237 ll_update_inode_flags(inode, op_data->op_attr_flags);
3238 obj = ll_i2info(inode)->lli_clob;
3240 GOTO(out_fsxattr, rc);
3242 OBD_ALLOC_PTR(attr);
3244 GOTO(out_fsxattr, rc = -ENOMEM);
3246 rc = cl_setattr_ost(obj, attr, OP_XVALID_FLAGS,
3247 fsxattr.fsx_xflags);
3250 ll_finish_md_op_data(op_data);
3254 static long ll_file_unlock_lease(struct file *file, struct ll_ioc_lease *ioc,
3257 struct inode *inode = file_inode(file);
3258 struct ll_file_data *fd = file->private_data;
3259 struct ll_inode_info *lli = ll_i2info(inode);
3260 struct obd_client_handle *och = NULL;
3261 struct split_param sp;
3262 struct pcc_param param;
3263 bool lease_broken = false;
3265 enum mds_op_bias bias = 0;
3266 struct file *layout_file = NULL;
3268 size_t data_size = 0;
3269 bool attached = false;
3274 mutex_lock(&lli->lli_och_mutex);
3275 if (fd->fd_lease_och != NULL) {
3276 och = fd->fd_lease_och;
3277 fd->fd_lease_och = NULL;
3279 mutex_unlock(&lli->lli_och_mutex);
3284 fmode = och->och_flags;
3286 switch (ioc->lil_flags) {
3287 case LL_LEASE_RESYNC_DONE:
3288 if (ioc->lil_count > IOC_IDS_MAX)
3289 GOTO(out_lease_close, rc = -EINVAL);
3291 data_size = offsetof(typeof(*ioc), lil_ids[ioc->lil_count]);
3292 OBD_ALLOC(data, data_size);
3294 GOTO(out_lease_close, rc = -ENOMEM);
3296 if (copy_from_user(data, (void __user *)arg, data_size))
3297 GOTO(out_lease_close, rc = -EFAULT);
3299 bias = MDS_CLOSE_RESYNC_DONE;
3301 case LL_LEASE_LAYOUT_MERGE: {
3304 if (ioc->lil_count != 1)
3305 GOTO(out_lease_close, rc = -EINVAL);
3307 arg += sizeof(*ioc);
3308 if (copy_from_user(&fd, (void __user *)arg, sizeof(__u32)))
3309 GOTO(out_lease_close, rc = -EFAULT);
3311 layout_file = fget(fd);
3313 GOTO(out_lease_close, rc = -EBADF);
3315 if ((file->f_flags & O_ACCMODE) == O_RDONLY ||
3316 (layout_file->f_flags & O_ACCMODE) == O_RDONLY)
3317 GOTO(out_lease_close, rc = -EPERM);
3319 data = file_inode(layout_file);
3320 bias = MDS_CLOSE_LAYOUT_MERGE;
3323 case LL_LEASE_LAYOUT_SPLIT: {
3327 if (ioc->lil_count != 2)
3328 GOTO(out_lease_close, rc = -EINVAL);
3330 arg += sizeof(*ioc);
3331 if (copy_from_user(&fdv, (void __user *)arg, sizeof(__u32)))
3332 GOTO(out_lease_close, rc = -EFAULT);
3334 arg += sizeof(__u32);
3335 if (copy_from_user(&mirror_id, (void __user *)arg,
3337 GOTO(out_lease_close, rc = -EFAULT);
3339 layout_file = fget(fdv);
3341 GOTO(out_lease_close, rc = -EBADF);
3343 sp.sp_inode = file_inode(layout_file);
3344 sp.sp_mirror_id = (__u16)mirror_id;
3346 bias = MDS_CLOSE_LAYOUT_SPLIT;
3349 case LL_LEASE_PCC_ATTACH:
3350 if (ioc->lil_count != 1)
3353 arg += sizeof(*ioc);
3354 if (copy_from_user(¶m.pa_archive_id, (void __user *)arg,
3356 GOTO(out_lease_close, rc2 = -EFAULT);
3358 rc2 = pcc_readwrite_attach(file, inode, param.pa_archive_id);
3360 GOTO(out_lease_close, rc2);
3363 /* Grab latest data version */
3364 rc2 = ll_data_version(inode, ¶m.pa_data_version,
3367 GOTO(out_lease_close, rc2);
3370 bias = MDS_PCC_ATTACH;
3373 /* without close intent */
3378 rc = ll_lease_close_intent(och, inode, &lease_broken, bias, data);
3382 rc = ll_lease_och_release(inode, file);
3391 switch (ioc->lil_flags) {
3392 case LL_LEASE_RESYNC_DONE:
3394 OBD_FREE(data, data_size);
3396 case LL_LEASE_LAYOUT_MERGE:
3397 case LL_LEASE_LAYOUT_SPLIT:
3401 case LL_LEASE_PCC_ATTACH:
3404 rc = pcc_readwrite_attach_fini(file, inode,
3405 param.pa_layout_gen,
3412 rc = ll_lease_type_from_fmode(fmode);
3416 static long ll_file_set_lease(struct file *file, struct ll_ioc_lease *ioc,
3419 struct inode *inode = file_inode(file);
3420 struct ll_inode_info *lli = ll_i2info(inode);
3421 struct ll_file_data *fd = file->private_data;
3422 struct obd_client_handle *och = NULL;
3423 __u64 open_flags = 0;
3429 switch (ioc->lil_mode) {
3430 case LL_LEASE_WRLCK:
3431 if (!(file->f_mode & FMODE_WRITE))
3433 fmode = FMODE_WRITE;
3435 case LL_LEASE_RDLCK:
3436 if (!(file->f_mode & FMODE_READ))
3440 case LL_LEASE_UNLCK:
3441 RETURN(ll_file_unlock_lease(file, ioc, arg));
3446 CDEBUG(D_INODE, "Set lease with mode %u\n", fmode);
3448 /* apply for lease */
3449 if (ioc->lil_flags & LL_LEASE_RESYNC)
3450 open_flags = MDS_OPEN_RESYNC;
3451 och = ll_lease_open(inode, file, fmode, open_flags);
3453 RETURN(PTR_ERR(och));
3455 if (ioc->lil_flags & LL_LEASE_RESYNC) {
3456 rc = ll_lease_file_resync(och, inode, arg);
3458 ll_lease_close(och, inode, NULL);
3461 rc = ll_layout_refresh(inode, &fd->fd_layout_version);
3463 ll_lease_close(och, inode, NULL);
3469 mutex_lock(&lli->lli_och_mutex);
3470 if (fd->fd_lease_och == NULL) {
3471 fd->fd_lease_och = och;
3474 mutex_unlock(&lli->lli_och_mutex);
3476 /* impossible now that only excl is supported for now */
3477 ll_lease_close(och, inode, &lease_broken);
3483 static void ll_heat_get(struct inode *inode, struct lu_heat *heat)
3485 struct ll_inode_info *lli = ll_i2info(inode);
3486 struct ll_sb_info *sbi = ll_i2sbi(inode);
3487 __u64 now = ktime_get_real_seconds();
3490 spin_lock(&lli->lli_heat_lock);
3491 heat->lh_flags = lli->lli_heat_flags;
3492 for (i = 0; i < heat->lh_count; i++)
3493 heat->lh_heat[i] = obd_heat_get(&lli->lli_heat_instances[i],
3494 now, sbi->ll_heat_decay_weight,
3495 sbi->ll_heat_period_second);
3496 spin_unlock(&lli->lli_heat_lock);
3499 static int ll_heat_set(struct inode *inode, enum lu_heat_flag flags)
3501 struct ll_inode_info *lli = ll_i2info(inode);
3504 spin_lock(&lli->lli_heat_lock);
3505 if (flags & LU_HEAT_FLAG_CLEAR)
3506 obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
3508 if (flags & LU_HEAT_FLAG_OFF)
3509 lli->lli_heat_flags |= LU_HEAT_FLAG_OFF;
3511 lli->lli_heat_flags &= ~LU_HEAT_FLAG_OFF;
3513 spin_unlock(&lli->lli_heat_lock);
3519 ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3521 struct inode *inode = file_inode(file);
3522 struct ll_file_data *fd = file->private_data;
3526 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), cmd=%x\n",
3527 PFID(ll_inode2fid(inode)), inode, cmd);
3528 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
3530 /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
3531 if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
3535 case LL_IOC_GETFLAGS:
3536 /* Get the current value of the file flags */
3537 return put_user(fd->fd_flags, (int __user *)arg);
3538 case LL_IOC_SETFLAGS:
3539 case LL_IOC_CLRFLAGS:
3540 /* Set or clear specific file flags */
3541 /* XXX This probably needs checks to ensure the flags are
3542 * not abused, and to handle any flag side effects.
3544 if (get_user(flags, (int __user *) arg))
3547 if (cmd == LL_IOC_SETFLAGS) {
3548 if ((flags & LL_FILE_IGNORE_LOCK) &&
3549 !(file->f_flags & O_DIRECT)) {
3550 CERROR("%s: unable to disable locking on "
3551 "non-O_DIRECT file\n", current->comm);
3555 fd->fd_flags |= flags;
3557 fd->fd_flags &= ~flags;
3560 case LL_IOC_LOV_SETSTRIPE:
3561 case LL_IOC_LOV_SETSTRIPE_NEW:
3562 RETURN(ll_lov_setstripe(inode, file, (void __user *)arg));
3563 case LL_IOC_LOV_SETEA:
3564 RETURN(ll_lov_setea(inode, file, (void __user *)arg));
3565 case LL_IOC_LOV_SWAP_LAYOUTS: {
3567 struct lustre_swap_layouts lsl;
3569 if (copy_from_user(&lsl, (char __user *)arg,
3570 sizeof(struct lustre_swap_layouts)))
3573 if ((file->f_flags & O_ACCMODE) == O_RDONLY)
3576 file2 = fget(lsl.sl_fd);
3580 /* O_WRONLY or O_RDWR */
3581 if ((file2->f_flags & O_ACCMODE) == O_RDONLY)
3582 GOTO(out, rc = -EPERM);
3584 if (lsl.sl_flags & SWAP_LAYOUTS_CLOSE) {
3585 struct inode *inode2;
3586 struct ll_inode_info *lli;
3587 struct obd_client_handle *och = NULL;
3589 lli = ll_i2info(inode);
3590 mutex_lock(&lli->lli_och_mutex);
3591 if (fd->fd_lease_och != NULL) {
3592 och = fd->fd_lease_och;
3593 fd->fd_lease_och = NULL;
3595 mutex_unlock(&lli->lli_och_mutex);
3597 GOTO(out, rc = -ENOLCK);
3598 inode2 = file_inode(file2);
3599 rc = ll_swap_layouts_close(och, inode, inode2);
3601 rc = ll_swap_layouts(file, file2, &lsl);
3607 case LL_IOC_LOV_GETSTRIPE:
3608 case LL_IOC_LOV_GETSTRIPE_NEW:
3609 RETURN(ll_file_getstripe(inode, (void __user *)arg, 0));
3610 case FS_IOC_GETFLAGS:
3611 case FS_IOC_SETFLAGS:
3612 RETURN(ll_iocontrol(inode, file, cmd, arg));
3613 case FSFILT_IOC_GETVERSION:
3614 case FS_IOC_GETVERSION:
3615 RETURN(put_user(inode->i_generation, (int __user *)arg));
3616 /* We need to special case any other ioctls we want to handle,
3617 * to send them to the MDS/OST as appropriate and to properly
3618 * network encode the arg field. */
3619 case FS_IOC_SETVERSION:
3622 case LL_IOC_GROUP_LOCK:
3623 RETURN(ll_get_grouplock(inode, file, arg));
3624 case LL_IOC_GROUP_UNLOCK:
3625 RETURN(ll_put_grouplock(inode, file, arg));
3626 case IOC_OBD_STATFS:
3627 RETURN(ll_obd_statfs(inode, (void __user *)arg));
3629 case LL_IOC_FLUSHCTX:
3630 RETURN(ll_flush_ctx(inode));
3631 case LL_IOC_PATH2FID: {
3632 if (copy_to_user((void __user *)arg, ll_inode2fid(inode),
3633 sizeof(struct lu_fid)))
3638 case LL_IOC_GETPARENT:
3639 RETURN(ll_getparent(file, (struct getparent __user *)arg));
3641 case OBD_IOC_FID2PATH:
3642 RETURN(ll_fid2path(inode, (void __user *)arg));
3643 case LL_IOC_DATA_VERSION: {
3644 struct ioc_data_version idv;
3647 if (copy_from_user(&idv, (char __user *)arg, sizeof(idv)))
3650 idv.idv_flags &= LL_DV_RD_FLUSH | LL_DV_WR_FLUSH;
3651 rc = ll_ioc_data_version(inode, &idv);
3654 copy_to_user((char __user *)arg, &idv, sizeof(idv)))
3660 case LL_IOC_GET_MDTIDX: {
3663 mdtidx = ll_get_mdt_idx(inode);
3667 if (put_user((int)mdtidx, (int __user *)arg))
3672 case OBD_IOC_GETDTNAME:
3673 case OBD_IOC_GETMDNAME:
3674 RETURN(ll_get_obd_name(inode, cmd, arg));
3675 case LL_IOC_HSM_STATE_GET: {
3676 struct md_op_data *op_data;
3677 struct hsm_user_state *hus;
3684 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
3685 LUSTRE_OPC_ANY, hus);
3686 if (IS_ERR(op_data)) {
3688 RETURN(PTR_ERR(op_data));
3691 rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
3694 if (copy_to_user((void __user *)arg, hus, sizeof(*hus)))
3697 ll_finish_md_op_data(op_data);
3701 case LL_IOC_HSM_STATE_SET: {
3702 struct hsm_state_set *hss;
3709 if (copy_from_user(hss, (char __user *)arg, sizeof(*hss))) {
3714 rc = ll_hsm_state_set(inode, hss);
3719 case LL_IOC_HSM_ACTION: {
3720 struct md_op_data *op_data;
3721 struct hsm_current_action *hca;
3728 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
3729 LUSTRE_OPC_ANY, hca);
3730 if (IS_ERR(op_data)) {
3732 RETURN(PTR_ERR(op_data));
3735 rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
3738 if (copy_to_user((char __user *)arg, hca, sizeof(*hca)))
3741 ll_finish_md_op_data(op_data);
3745 case LL_IOC_SET_LEASE_OLD: {
3746 struct ll_ioc_lease ioc = { .lil_mode = (__u32)arg };
3748 RETURN(ll_file_set_lease(file, &ioc, 0));
3750 case LL_IOC_SET_LEASE: {
3751 struct ll_ioc_lease ioc;
3753 if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc)))
3756 RETURN(ll_file_set_lease(file, &ioc, arg));
3758 case LL_IOC_GET_LEASE: {
3759 struct ll_inode_info *lli = ll_i2info(inode);
3760 struct ldlm_lock *lock = NULL;
3763 mutex_lock(&lli->lli_och_mutex);
3764 if (fd->fd_lease_och != NULL) {
3765 struct obd_client_handle *och = fd->fd_lease_och;
3767 lock = ldlm_handle2lock(&och->och_lease_handle);
3769 lock_res_and_lock(lock);
3770 if (!ldlm_is_cancel(lock))
3771 fmode = och->och_flags;
3773 unlock_res_and_lock(lock);
3774 LDLM_LOCK_PUT(lock);
3777 mutex_unlock(&lli->lli_och_mutex);
3779 RETURN(ll_lease_type_from_fmode(fmode));
3781 case LL_IOC_HSM_IMPORT: {
3782 struct hsm_user_import *hui;
3788 if (copy_from_user(hui, (void __user *)arg, sizeof(*hui))) {
3793 rc = ll_hsm_import(inode, file, hui);
3798 case LL_IOC_FUTIMES_3: {
3799 struct ll_futimes_3 lfu;
3801 if (copy_from_user(&lfu,
3802 (const struct ll_futimes_3 __user *)arg,
3806 RETURN(ll_file_futimes_3(file, &lfu));
3808 case LL_IOC_LADVISE: {
3809 struct llapi_ladvise_hdr *k_ladvise_hdr;
3810 struct llapi_ladvise_hdr __user *u_ladvise_hdr;
3813 int alloc_size = sizeof(*k_ladvise_hdr);
3816 u_ladvise_hdr = (void __user *)arg;
3817 OBD_ALLOC_PTR(k_ladvise_hdr);
3818 if (k_ladvise_hdr == NULL)
3821 if (copy_from_user(k_ladvise_hdr, u_ladvise_hdr, alloc_size))
3822 GOTO(out_ladvise, rc = -EFAULT);
3824 if (k_ladvise_hdr->lah_magic != LADVISE_MAGIC ||
3825 k_ladvise_hdr->lah_count < 1)
3826 GOTO(out_ladvise, rc = -EINVAL);
3828 num_advise = k_ladvise_hdr->lah_count;
3829 if (num_advise >= LAH_COUNT_MAX)
3830 GOTO(out_ladvise, rc = -EFBIG);
3832 OBD_FREE_PTR(k_ladvise_hdr);
3833 alloc_size = offsetof(typeof(*k_ladvise_hdr),
3834 lah_advise[num_advise]);
3835 OBD_ALLOC(k_ladvise_hdr, alloc_size);
3836 if (k_ladvise_hdr == NULL)
3840 * TODO: submit multiple advices to one server in a single RPC
3842 if (copy_from_user(k_ladvise_hdr, u_ladvise_hdr, alloc_size))
3843 GOTO(out_ladvise, rc = -EFAULT);
3845 for (i = 0; i < num_advise; i++) {
3846 struct llapi_lu_ladvise *k_ladvise =
3847 &k_ladvise_hdr->lah_advise[i];
3848 struct llapi_lu_ladvise __user *u_ladvise =
3849 &u_ladvise_hdr->lah_advise[i];
3851 rc = ll_ladvise_sanity(inode, k_ladvise);
3853 GOTO(out_ladvise, rc);
3855 switch (k_ladvise->lla_advice) {
3856 case LU_LADVISE_LOCKNOEXPAND:
3857 rc = ll_lock_noexpand(file,
3858 k_ladvise->lla_peradvice_flags);
3859 GOTO(out_ladvise, rc);
3860 case LU_LADVISE_LOCKAHEAD:
3862 rc = ll_file_lock_ahead(file, k_ladvise);
3865 GOTO(out_ladvise, rc);
3868 &u_ladvise->lla_lockahead_result))
3869 GOTO(out_ladvise, rc = -EFAULT);
3872 rc = ll_ladvise(inode, file,
3873 k_ladvise_hdr->lah_flags,
3876 GOTO(out_ladvise, rc);
3883 OBD_FREE(k_ladvise_hdr, alloc_size);
3886 case LL_IOC_FLR_SET_MIRROR: {
3887 /* mirror I/O must be direct to avoid polluting page cache
3889 if (!(file->f_flags & O_DIRECT))
3892 fd->fd_designated_mirror = (__u32)arg;
3895 case LL_IOC_FSGETXATTR:
3896 RETURN(ll_ioctl_fsgetxattr(inode, cmd, arg));
3897 case LL_IOC_FSSETXATTR:
3898 RETURN(ll_ioctl_fssetxattr(inode, cmd, arg));
3900 RETURN(put_user(PAGE_SIZE, (int __user *)arg));
3901 case LL_IOC_HEAT_GET: {
3902 struct lu_heat uheat;
3903 struct lu_heat *heat;
3906 if (copy_from_user(&uheat, (void __user *)arg, sizeof(uheat)))
3909 if (uheat.lh_count > OBD_HEAT_COUNT)
3910 uheat.lh_count = OBD_HEAT_COUNT;
3912 size = offsetof(typeof(uheat), lh_heat[uheat.lh_count]);
3913 OBD_ALLOC(heat, size);
3917 heat->lh_count = uheat.lh_count;
3918 ll_heat_get(inode, heat);
3919 rc = copy_to_user((char __user *)arg, heat, size);
3920 OBD_FREE(heat, size);
3921 RETURN(rc ? -EFAULT : 0);
3923 case LL_IOC_HEAT_SET: {
3926 if (copy_from_user(&flags, (void __user *)arg, sizeof(flags)))
3929 rc = ll_heat_set(inode, flags);
3932 case LL_IOC_PCC_DETACH: {
3933 struct lu_pcc_detach *detach;
3935 OBD_ALLOC_PTR(detach);
3939 if (copy_from_user(detach,
3940 (const struct lu_pcc_detach __user *)arg,
3942 GOTO(out_detach_free, rc = -EFAULT);
3944 if (!S_ISREG(inode->i_mode))
3945 GOTO(out_detach_free, rc = -EINVAL);
3947 if (!inode_owner_or_capable(inode))
3948 GOTO(out_detach_free, rc = -EPERM);
3950 rc = pcc_ioctl_detach(inode, detach->pccd_opt);
3952 OBD_FREE_PTR(detach);
3955 case LL_IOC_PCC_STATE: {
3956 struct lu_pcc_state __user *ustate =
3957 (struct lu_pcc_state __user *)arg;
3958 struct lu_pcc_state *state;
3960 OBD_ALLOC_PTR(state);
3964 if (copy_from_user(state, ustate, sizeof(*state)))
3965 GOTO(out_state, rc = -EFAULT);
3967 rc = pcc_ioctl_state(file, inode, state);
3969 GOTO(out_state, rc);
3971 if (copy_to_user(ustate, state, sizeof(*state)))
3972 GOTO(out_state, rc = -EFAULT);
3975 OBD_FREE_PTR(state);
3979 RETURN(obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL,
3980 (void __user *)arg));
3984 #ifndef HAVE_FILE_LLSEEK_SIZE
3985 static inline loff_t
3986 llseek_execute(struct file *file, loff_t offset, loff_t maxsize)
3988 if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
3990 if (offset > maxsize)
3993 if (offset != file->f_pos) {
3994 file->f_pos = offset;
3995 file->f_version = 0;
4001 generic_file_llseek_size(struct file *file, loff_t offset, int origin,
4002 loff_t maxsize, loff_t eof)
4004 struct inode *inode = file_inode(file);
4012 * Here we special-case the lseek(fd, 0, SEEK_CUR)
4013 * position-querying operation. Avoid rewriting the "same"
4014 * f_pos value back to the file because a concurrent read(),
4015 * write() or lseek() might have altered it
4020 * f_lock protects against read/modify/write race with other
4021 * SEEK_CURs. Note that parallel writes and reads behave
4025 offset = llseek_execute(file, file->f_pos + offset, maxsize);
4026 inode_unlock(inode);
4030 * In the generic case the entire file is data, so as long as
4031 * offset isn't at the end of the file then the offset is data.
4038 * There is a virtual hole at the end of the file, so as long as
4039 * offset isn't i_size or larger, return i_size.
4047 return llseek_execute(file, offset, maxsize);
4051 static loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
4053 struct inode *inode = file_inode(file);
4054 loff_t retval, eof = 0;
4055 ktime_t kstart = ktime_get();
4058 retval = offset + ((origin == SEEK_END) ? i_size_read(inode) :
4059 (origin == SEEK_CUR) ? file->f_pos : 0);
4060 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), to=%llu=%#llx(%d)\n",
4061 PFID(ll_inode2fid(inode)), inode, retval, retval,
4064 if (origin == SEEK_END || origin == SEEK_HOLE || origin == SEEK_DATA) {
4065 retval = ll_glimpse_size(inode);
4068 eof = i_size_read(inode);
4071 retval = ll_generic_file_llseek_size(file, offset, origin,
4072 ll_file_maxbytes(inode), eof);
4074 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK,
4075 ktime_us_delta(ktime_get(), kstart));
4079 static int ll_flush(struct file *file, fl_owner_t id)
4081 struct inode *inode = file_inode(file);
4082 struct ll_inode_info *lli = ll_i2info(inode);
4083 struct ll_file_data *fd = file->private_data;
4086 LASSERT(!S_ISDIR(inode->i_mode));
4088 /* catch async errors that were recorded back when async writeback
4089 * failed for pages in this mapping. */
4090 rc = lli->lli_async_rc;
4091 lli->lli_async_rc = 0;
4092 if (lli->lli_clob != NULL) {
4093 err = lov_read_and_clear_async_rc(lli->lli_clob);
4098 /* The application has been told write failure already.
4099 * Do not report failure again. */
4100 if (fd->fd_write_failed)
4102 return rc ? -EIO : 0;
4106 * Called to make sure a portion of file has been written out.
4107 * if @mode is not CL_FSYNC_LOCAL, it will send OST_SYNC RPCs to OST.
4109 * Return how many pages have been written.
4111 int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
4112 enum cl_fsync_mode mode, int ignore_layout)
4116 struct cl_fsync_io *fio;
4121 if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL &&
4122 mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL)
4125 env = cl_env_get(&refcheck);
4127 RETURN(PTR_ERR(env));
4129 io = vvp_env_thread_io(env);
4130 io->ci_obj = ll_i2info(inode)->lli_clob;
4131 io->ci_ignore_layout = ignore_layout;
4133 /* initialize parameters for sync */
4134 fio = &io->u.ci_fsync;
4135 fio->fi_start = start;
4137 fio->fi_fid = ll_inode2fid(inode);
4138 fio->fi_mode = mode;
4139 fio->fi_nr_written = 0;
4141 if (cl_io_init(env, io, CIT_FSYNC, io->ci_obj) == 0)
4142 result = cl_io_loop(env, io);
4144 result = io->ci_result;
4146 result = fio->fi_nr_written;
4147 cl_io_fini(env, io);
4148 cl_env_put(env, &refcheck);
4154 * When dentry is provided (the 'else' case), file_dentry() may be
4155 * null and dentry must be used directly rather than pulled from
4156 * file_dentry() as is done otherwise.
4159 int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
4161 struct dentry *dentry = file_dentry(file);
4162 struct inode *inode = dentry->d_inode;
4163 struct ll_inode_info *lli = ll_i2info(inode);
4164 struct ptlrpc_request *req;
4165 ktime_t kstart = ktime_get();
4170 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), start %lld, end %lld,"
4172 PFID(ll_inode2fid(inode)), inode, start, end, datasync);
4174 /* fsync's caller has already called _fdata{sync,write}, we want
4175 * that IO to finish before calling the osc and mdc sync methods */
4176 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
4179 /* catch async errors that were recorded back when async writeback
4180 * failed for pages in this mapping. */
4181 if (!S_ISDIR(inode->i_mode)) {
4182 err = lli->lli_async_rc;
4183 lli->lli_async_rc = 0;
4186 if (lli->lli_clob != NULL) {
4187 err = lov_read_and_clear_async_rc(lli->lli_clob);
4193 err = md_fsync(ll_i2sbi(inode)->ll_md_exp, ll_inode2fid(inode), &req);
4197 ptlrpc_req_finished(req);
4199 if (S_ISREG(inode->i_mode)) {
4200 struct ll_file_data *fd = file->private_data;
4203 /* Sync metadata on MDT first, and then sync the cached data
4206 err = pcc_fsync(file, start, end, datasync, &cached);
4208 err = cl_sync_file_range(inode, start, end,
4210 if (rc == 0 && err < 0)
4213 fd->fd_write_failed = true;
4215 fd->fd_write_failed = false;
4218 inode_unlock(inode);
4221 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC,
4222 ktime_us_delta(ktime_get(), kstart));
4227 ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
4229 struct inode *inode = file_inode(file);
4230 struct ll_sb_info *sbi = ll_i2sbi(inode);
4231 struct ldlm_enqueue_info einfo = {
4232 .ei_type = LDLM_FLOCK,
4233 .ei_cb_cp = ldlm_flock_completion_ast,
4234 .ei_cbdata = file_lock,
4236 struct md_op_data *op_data;
4237 struct lustre_handle lockh = { 0 };
4238 union ldlm_policy_data flock = { { 0 } };
4239 int fl_type = file_lock->fl_type;
4240 ktime_t kstart = ktime_get();
4246 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID" file_lock=%p\n",
4247 PFID(ll_inode2fid(inode)), file_lock);
4249 if (file_lock->fl_flags & FL_FLOCK) {
4250 LASSERT((cmd == F_SETLKW) || (cmd == F_SETLK));
4251 /* flocks are whole-file locks */
4252 flock.l_flock.end = OFFSET_MAX;
4253 /* For flocks owner is determined by the local file desctiptor*/
4254 flock.l_flock.owner = (unsigned long)file_lock->fl_file;
4255 } else if (file_lock->fl_flags & FL_POSIX) {
4256 flock.l_flock.owner = (unsigned long)file_lock->fl_owner;
4257 flock.l_flock.start = file_lock->fl_start;
4258 flock.l_flock.end = file_lock->fl_end;
4262 flock.l_flock.pid = file_lock->fl_pid;
4264 #if defined(HAVE_LM_COMPARE_OWNER) || defined(lm_compare_owner)
4265 /* Somewhat ugly workaround for svc lockd.
4266 * lockd installs custom fl_lmops->lm_compare_owner that checks
4267 * for the fl_owner to be the same (which it always is on local node
4268 * I guess between lockd processes) and then compares pid.
4269 * As such we assign pid to the owner field to make it all work,
4270 * conflict with normal locks is unlikely since pid space and
4271 * pointer space for current->files are not intersecting */
4272 if (file_lock->fl_lmops && file_lock->fl_lmops->lm_compare_owner)
4273 flock.l_flock.owner = (unsigned long)file_lock->fl_pid;
4278 einfo.ei_mode = LCK_PR;
4281 /* An unlock request may or may not have any relation to
4282 * existing locks so we may not be able to pass a lock handle
4283 * via a normal ldlm_lock_cancel() request. The request may even
4284 * unlock a byte range in the middle of an existing lock. In
4285 * order to process an unlock request we need all of the same
4286 * information that is given with a normal read or write record
4287 * lock request. To avoid creating another ldlm unlock (cancel)
4288 * message we'll treat a LCK_NL flock request as an unlock. */
4289 einfo.ei_mode = LCK_NL;
4292 einfo.ei_mode = LCK_PW;
4295 CDEBUG(D_INFO, "Unknown fcntl lock type: %d\n", fl_type);
4310 flags = LDLM_FL_BLOCK_NOWAIT;
4316 flags = LDLM_FL_TEST_LOCK;
4319 CERROR("unknown fcntl lock command: %d\n", cmd);
4323 /* Save the old mode so that if the mode in the lock changes we
4324 * can decrement the appropriate reader or writer refcount. */
4325 file_lock->fl_type = einfo.ei_mode;
4327 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
4328 LUSTRE_OPC_ANY, NULL);
4329 if (IS_ERR(op_data))
4330 RETURN(PTR_ERR(op_data));
4332 CDEBUG(D_DLMTRACE, "inode="DFID", pid=%u, flags=%#llx, mode=%u, "
4333 "start=%llu, end=%llu\n", PFID(ll_inode2fid(inode)),
4334 flock.l_flock.pid, flags, einfo.ei_mode,
4335 flock.l_flock.start, flock.l_flock.end);
4337 rc = md_enqueue(sbi->ll_md_exp, &einfo, &flock, op_data, &lockh,
4340 /* Restore the file lock type if not TEST lock. */
4341 if (!(flags & LDLM_FL_TEST_LOCK))
4342 file_lock->fl_type = fl_type;
4344 #ifdef HAVE_LOCKS_LOCK_FILE_WAIT
4345 if ((rc == 0 || file_lock->fl_type == F_UNLCK) &&
4346 !(flags & LDLM_FL_TEST_LOCK))
4347 rc2 = locks_lock_file_wait(file, file_lock);
4349 if ((file_lock->fl_flags & FL_FLOCK) &&
4350 (rc == 0 || file_lock->fl_type == F_UNLCK))
4351 rc2 = flock_lock_file_wait(file, file_lock);
4352 if ((file_lock->fl_flags & FL_POSIX) &&
4353 (rc == 0 || file_lock->fl_type == F_UNLCK) &&
4354 !(flags & LDLM_FL_TEST_LOCK))
4355 rc2 = posix_lock_file_wait(file, file_lock);
4356 #endif /* HAVE_LOCKS_LOCK_FILE_WAIT */
4358 if (rc2 && file_lock->fl_type != F_UNLCK) {
4359 einfo.ei_mode = LCK_NL;
4360 md_enqueue(sbi->ll_md_exp, &einfo, &flock, op_data,
4365 ll_finish_md_op_data(op_data);
4368 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK,
4369 ktime_us_delta(ktime_get(), kstart));
4373 int ll_get_fid_by_name(struct inode *parent, const char *name,
4374 int namelen, struct lu_fid *fid,
4375 struct inode **inode)
4377 struct md_op_data *op_data = NULL;
4378 struct mdt_body *body;
4379 struct ptlrpc_request *req;
4383 op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen, 0,
4384 LUSTRE_OPC_ANY, NULL);
4385 if (IS_ERR(op_data))
4386 RETURN(PTR_ERR(op_data));
4388 op_data->op_valid = OBD_MD_FLID | OBD_MD_FLTYPE;
4389 rc = md_getattr_name(ll_i2sbi(parent)->ll_md_exp, op_data, &req);
4390 ll_finish_md_op_data(op_data);
4394 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
4396 GOTO(out_req, rc = -EFAULT);
4398 *fid = body->mbo_fid1;
4401 rc = ll_prep_inode(inode, req, parent->i_sb, NULL);
4403 ptlrpc_req_finished(req);
4407 int ll_migrate(struct inode *parent, struct file *file, struct lmv_user_md *lum,
4410 struct dentry *dchild = NULL;
4411 struct inode *child_inode = NULL;
4412 struct md_op_data *op_data;
4413 struct ptlrpc_request *request = NULL;
4414 struct obd_client_handle *och = NULL;
4416 struct mdt_body *body;
4417 __u64 data_version = 0;
4418 size_t namelen = strlen(name);
4419 int lumlen = lmv_user_md_size(lum->lum_stripe_count, lum->lum_magic);
4423 CDEBUG(D_VFSTRACE, "migrate "DFID"/%s to MDT%04x stripe count %d\n",
4424 PFID(ll_inode2fid(parent)), name,
4425 lum->lum_stripe_offset, lum->lum_stripe_count);
4427 if (lum->lum_magic != cpu_to_le32(LMV_USER_MAGIC) &&
4428 lum->lum_magic != cpu_to_le32(LMV_USER_MAGIC_SPECIFIC))
4429 lustre_swab_lmv_user_md(lum);
4431 /* Get child FID first */
4432 qstr.hash = ll_full_name_hash(file_dentry(file), name, namelen);
4435 dchild = d_lookup(file_dentry(file), &qstr);
4437 if (dchild->d_inode)
4438 child_inode = igrab(dchild->d_inode);
4443 rc = ll_get_fid_by_name(parent, name, namelen, NULL,
4452 if (!(exp_connect_flags2(ll_i2sbi(parent)->ll_md_exp) &
4453 OBD_CONNECT2_DIR_MIGRATE)) {
4454 if (le32_to_cpu(lum->lum_stripe_count) > 1 ||
4455 ll_dir_striped(child_inode)) {
4456 CERROR("%s: MDT doesn't support stripe directory "
4457 "migration!\n", ll_i2sbi(parent)->ll_fsname);
4458 GOTO(out_iput, rc = -EOPNOTSUPP);
4463 * lfs migrate command needs to be blocked on the client
4464 * by checking the migrate FID against the FID of the
4467 if (child_inode == parent->i_sb->s_root->d_inode)
4468 GOTO(out_iput, rc = -EINVAL);
4470 op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen,
4471 child_inode->i_mode, LUSTRE_OPC_ANY, NULL);
4472 if (IS_ERR(op_data))
4473 GOTO(out_iput, rc = PTR_ERR(op_data));
4475 inode_lock(child_inode);
4476 op_data->op_fid3 = *ll_inode2fid(child_inode);
4477 if (!fid_is_sane(&op_data->op_fid3)) {
4478 CERROR("%s: migrate %s, but FID "DFID" is insane\n",
4479 ll_i2sbi(parent)->ll_fsname, name,
4480 PFID(&op_data->op_fid3));
4481 GOTO(out_unlock, rc = -EINVAL);
4484 op_data->op_cli_flags |= CLI_MIGRATE | CLI_SET_MEA;
4485 op_data->op_data = lum;
4486 op_data->op_data_size = lumlen;
4489 if (S_ISREG(child_inode->i_mode)) {
4490 och = ll_lease_open(child_inode, NULL, FMODE_WRITE, 0);
4494 GOTO(out_unlock, rc);
4497 rc = ll_data_version(child_inode, &data_version,
4500 GOTO(out_close, rc);
4502 op_data->op_open_handle = och->och_open_handle;
4503 op_data->op_data_version = data_version;
4504 op_data->op_lease_handle = och->och_lease_handle;
4505 op_data->op_bias |= MDS_CLOSE_MIGRATE;
4507 spin_lock(&och->och_mod->mod_open_req->rq_lock);
4508 och->och_mod->mod_open_req->rq_replay = 0;
4509 spin_unlock(&och->och_mod->mod_open_req->rq_lock);
4512 rc = md_rename(ll_i2sbi(parent)->ll_md_exp, op_data, name, namelen,
4513 name, namelen, &request);
4515 LASSERT(request != NULL);
4516 ll_update_times(request, parent);
4519 if (rc == 0 || rc == -EAGAIN) {
4520 body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
4521 LASSERT(body != NULL);
4523 /* If the server does release layout lock, then we cleanup
4524 * the client och here, otherwise release it in out_close: */
4525 if (och && body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED) {
4526 obd_mod_put(och->och_mod);
4527 md_clear_open_replay_data(ll_i2sbi(parent)->ll_md_exp,
4529 och->och_open_handle.cookie = DEAD_HANDLE_MAGIC;
4535 if (request != NULL) {
4536 ptlrpc_req_finished(request);
4540 /* Try again if the lease has cancelled. */
4541 if (rc == -EAGAIN && S_ISREG(child_inode->i_mode))
4546 ll_lease_close(och, child_inode, NULL);
4548 clear_nlink(child_inode);
4550 inode_unlock(child_inode);
4551 ll_finish_md_op_data(op_data);
4558 ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock)
4560 struct ll_file_data *fd = file->private_data;
4564 * In order to avoid flood of warning messages, only print one message
4565 * for one file. And the entire message rate on the client is limited
4566 * by CDEBUG_LIMIT too.
4568 if (!(fd->fd_flags & LL_FILE_FLOCK_WARNING)) {
4569 fd->fd_flags |= LL_FILE_FLOCK_WARNING;
4570 CDEBUG_LIMIT(D_TTY | D_CONSOLE,
4571 "flock disabled, mount with '-o [local]flock' to enable\r\n");
4577 * test if some locks matching bits and l_req_mode are acquired
4578 * - bits can be in different locks
4579 * - if found clear the common lock bits in *bits
4580 * - the bits not found, are kept in *bits
4582 * \param bits [IN] searched lock bits [IN]
4583 * \param l_req_mode [IN] searched lock mode
4584 * \retval boolean, true iff all bits are found
4586 int ll_have_md_lock(struct inode *inode, __u64 *bits, enum ldlm_mode l_req_mode)
4588 struct lustre_handle lockh;
4589 union ldlm_policy_data policy;
4590 enum ldlm_mode mode = (l_req_mode == LCK_MINMODE) ?
4591 (LCK_CR | LCK_CW | LCK_PR | LCK_PW) : l_req_mode;
4600 fid = &ll_i2info(inode)->lli_fid;
4601 CDEBUG(D_INFO, "trying to match res "DFID" mode %s\n", PFID(fid),
4602 ldlm_lockname[mode]);
4604 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING | LDLM_FL_TEST_LOCK;
4605 for (i = 0; i < MDS_INODELOCK_NUMBITS && *bits != 0; i++) {
4606 policy.l_inodebits.bits = *bits & (1 << i);
4607 if (policy.l_inodebits.bits == 0)
4610 if (md_lock_match(ll_i2mdexp(inode), flags, fid, LDLM_IBITS,
4611 &policy, mode, &lockh)) {
4612 struct ldlm_lock *lock;
4614 lock = ldlm_handle2lock(&lockh);
4617 ~(lock->l_policy_data.l_inodebits.bits);
4618 LDLM_LOCK_PUT(lock);
4620 *bits &= ~policy.l_inodebits.bits;
4627 enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
4628 struct lustre_handle *lockh, __u64 flags,
4629 enum ldlm_mode mode)
4631 union ldlm_policy_data policy = { .l_inodebits = { bits } };
4636 fid = &ll_i2info(inode)->lli_fid;
4637 CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
4639 rc = md_lock_match(ll_i2mdexp(inode), LDLM_FL_BLOCK_GRANTED|flags,
4640 fid, LDLM_IBITS, &policy, mode, lockh);
4645 static int ll_inode_revalidate_fini(struct inode *inode, int rc)
4647 /* Already unlinked. Just update nlink and return success */
4648 if (rc == -ENOENT) {
4650 /* If it is striped directory, and there is bad stripe
4651 * Let's revalidate the dentry again, instead of returning
4653 if (ll_dir_striped(inode))
4656 /* This path cannot be hit for regular files unless in
4657 * case of obscure races, so no need to to validate
4659 if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
4661 } else if (rc != 0) {
4662 CDEBUG_LIMIT((rc == -EACCES || rc == -EIDRM) ? D_INFO : D_ERROR,
4663 "%s: revalidate FID "DFID" error: rc = %d\n",
4664 ll_i2sbi(inode)->ll_fsname,
4665 PFID(ll_inode2fid(inode)), rc);
4671 static int ll_inode_revalidate(struct dentry *dentry, enum ldlm_intent_flags op)
4673 struct inode *inode = dentry->d_inode;
4674 struct obd_export *exp = ll_i2mdexp(inode);
4675 struct lookup_intent oit = {
4678 struct ptlrpc_request *req = NULL;
4679 struct md_op_data *op_data;
4683 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p),name=%s\n",
4684 PFID(ll_inode2fid(inode)), inode, dentry->d_name.name);
4686 /* Call getattr by fid, so do not provide name at all. */
4687 op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0,
4688 LUSTRE_OPC_ANY, NULL);
4689 if (IS_ERR(op_data))
4690 RETURN(PTR_ERR(op_data));
4692 rc = md_intent_lock(exp, op_data, &oit, &req, &ll_md_blocking_ast, 0);
4693 ll_finish_md_op_data(op_data);
4695 rc = ll_inode_revalidate_fini(inode, rc);
4699 rc = ll_revalidate_it_finish(req, &oit, dentry);
4701 ll_intent_release(&oit);
4705 /* Unlinked? Unhash dentry, so it is not picked up later by
4706 * do_lookup() -> ll_revalidate_it(). We cannot use d_drop
4707 * here to preserve get_cwd functionality on 2.6.
4709 if (!dentry->d_inode->i_nlink) {
4710 spin_lock(&inode->i_lock);
4711 d_lustre_invalidate(dentry, 0);
4712 spin_unlock(&inode->i_lock);
4715 ll_lookup_finish_locks(&oit, dentry);
4717 ptlrpc_req_finished(req);
4722 static int ll_merge_md_attr(struct inode *inode)
4724 struct ll_inode_info *lli = ll_i2info(inode);
4725 struct cl_attr attr = { 0 };
4728 LASSERT(lli->lli_lsm_md != NULL);
4730 if (!lmv_dir_striped(lli->lli_lsm_md))
4733 down_read(&lli->lli_lsm_sem);
4734 rc = md_merge_attr(ll_i2mdexp(inode), ll_i2info(inode)->lli_lsm_md,
4735 &attr, ll_md_blocking_ast);
4736 up_read(&lli->lli_lsm_sem);
4740 set_nlink(inode, attr.cat_nlink);
4741 inode->i_blocks = attr.cat_blocks;
4742 i_size_write(inode, attr.cat_size);
4744 ll_i2info(inode)->lli_atime = attr.cat_atime;
4745 ll_i2info(inode)->lli_mtime = attr.cat_mtime;
4746 ll_i2info(inode)->lli_ctime = attr.cat_ctime;
4751 int ll_getattr_dentry(struct dentry *de, struct kstat *stat)
4753 struct inode *inode = de->d_inode;
4754 struct ll_sb_info *sbi = ll_i2sbi(inode);
4755 struct ll_inode_info *lli = ll_i2info(inode);
4756 ktime_t kstart = ktime_get();
4759 rc = ll_inode_revalidate(de, IT_GETATTR);
4763 if (S_ISREG(inode->i_mode)) {
4766 rc = pcc_inode_getattr(inode, &cached);
4767 if (cached && rc < 0)
4770 /* In case of restore, the MDT has the right size and has
4771 * already send it back without granting the layout lock,
4772 * inode is up-to-date so glimpse is useless.
4773 * Also to glimpse we need the layout, in case of a running
4774 * restore the MDT holds the layout lock so the glimpse will
4775 * block up to the end of restore (getattr will block)
4777 if (!cached && !ll_file_test_flag(lli, LLIF_FILE_RESTORING)) {
4778 rc = ll_glimpse_size(inode);
4783 /* If object isn't regular a file then don't validate size. */
4784 if (ll_dir_striped(inode)) {
4785 rc = ll_merge_md_attr(inode);
4790 inode->i_atime.tv_sec = lli->lli_atime;
4791 inode->i_mtime.tv_sec = lli->lli_mtime;
4792 inode->i_ctime.tv_sec = lli->lli_ctime;
4795 OBD_FAIL_TIMEOUT(OBD_FAIL_GETATTR_DELAY, 30);
4797 if (ll_need_32bit_api(sbi)) {
4798 stat->ino = cl_fid_build_ino(&lli->lli_fid, 1);
4799 stat->dev = ll_compat_encode_dev(inode->i_sb->s_dev);
4800 stat->rdev = ll_compat_encode_dev(inode->i_rdev);
4802 stat->ino = inode->i_ino;
4803 stat->dev = inode->i_sb->s_dev;
4804 stat->rdev = inode->i_rdev;
4807 stat->mode = inode->i_mode;
4808 stat->uid = inode->i_uid;
4809 stat->gid = inode->i_gid;
4810 stat->atime = inode->i_atime;
4811 stat->mtime = inode->i_mtime;
4812 stat->ctime = inode->i_ctime;
4813 stat->blksize = sbi->ll_stat_blksize ?: 1 << inode->i_blkbits;
4815 stat->nlink = inode->i_nlink;
4816 stat->size = i_size_read(inode);
4817 stat->blocks = inode->i_blocks;
4819 ll_stats_ops_tally(sbi, LPROC_LL_GETATTR,
4820 ktime_us_delta(ktime_get(), kstart));
4825 #ifdef HAVE_INODEOPS_ENHANCED_GETATTR
4826 int ll_getattr(const struct path *path, struct kstat *stat,
4827 u32 request_mask, unsigned int flags)
4829 struct dentry *de = path->dentry;
4831 int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
4834 return ll_getattr_dentry(de, stat);
4837 static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4838 __u64 start, __u64 len)
4842 struct fiemap *fiemap;
4843 unsigned int extent_count = fieinfo->fi_extents_max;
4845 num_bytes = sizeof(*fiemap) + (extent_count *
4846 sizeof(struct fiemap_extent));
4847 OBD_ALLOC_LARGE(fiemap, num_bytes);
4852 fiemap->fm_flags = fieinfo->fi_flags;
4853 fiemap->fm_extent_count = fieinfo->fi_extents_max;
4854 fiemap->fm_start = start;
4855 fiemap->fm_length = len;
4856 if (extent_count > 0 &&
4857 copy_from_user(&fiemap->fm_extents[0], fieinfo->fi_extents_start,
4858 sizeof(struct fiemap_extent)) != 0)
4859 GOTO(out, rc = -EFAULT);
4861 rc = ll_do_fiemap(inode, fiemap, num_bytes);
4863 fieinfo->fi_flags = fiemap->fm_flags;
4864 fieinfo->fi_extents_mapped = fiemap->fm_mapped_extents;
4865 if (extent_count > 0 &&
4866 copy_to_user(fieinfo->fi_extents_start, &fiemap->fm_extents[0],
4867 fiemap->fm_mapped_extents *
4868 sizeof(struct fiemap_extent)) != 0)
4869 GOTO(out, rc = -EFAULT);
4871 OBD_FREE_LARGE(fiemap, num_bytes);
4875 struct posix_acl *ll_get_acl(struct inode *inode, int type)
4877 struct ll_inode_info *lli = ll_i2info(inode);
4878 struct posix_acl *acl = NULL;
4881 spin_lock(&lli->lli_lock);
4882 /* VFS' acl_permission_check->check_acl will release the refcount */
4883 acl = posix_acl_dup(lli->lli_posix_acl);
4884 spin_unlock(&lli->lli_lock);
4889 #ifdef HAVE_IOP_SET_ACL
4890 #ifdef CONFIG_LUSTRE_FS_POSIX_ACL
4891 int ll_set_acl(struct inode *inode, struct posix_acl *acl, int type)
4893 struct ll_sb_info *sbi = ll_i2sbi(inode);
4894 struct ptlrpc_request *req = NULL;
4895 const char *name = NULL;
4897 size_t value_size = 0;
4902 case ACL_TYPE_ACCESS:
4903 name = XATTR_NAME_POSIX_ACL_ACCESS;
4905 rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
4908 case ACL_TYPE_DEFAULT:
4909 name = XATTR_NAME_POSIX_ACL_DEFAULT;
4910 if (!S_ISDIR(inode->i_mode))
4911 rc = acl ? -EACCES : 0;
4922 value_size = posix_acl_xattr_size(acl->a_count);
4923 value = kmalloc(value_size, GFP_NOFS);
4925 GOTO(out, rc = -ENOMEM);
4927 rc = posix_acl_to_xattr(&init_user_ns, acl, value, value_size);
4929 GOTO(out_value, rc);
4932 rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode),
4933 value ? OBD_MD_FLXATTR : OBD_MD_FLXATTRRM,
4934 name, value, value_size, 0, 0, &req);
4936 ptlrpc_req_finished(req);
4941 forget_cached_acl(inode, type);
4943 set_cached_acl(inode, type, acl);
4946 #endif /* CONFIG_LUSTRE_FS_POSIX_ACL */
4947 #endif /* HAVE_IOP_SET_ACL */
4949 int ll_inode_permission(struct inode *inode, int mask)
4952 struct ll_sb_info *sbi;
4953 struct root_squash_info *squash;
4954 struct cred *cred = NULL;
4955 const struct cred *old_cred = NULL;
4957 bool squash_id = false;
4958 ktime_t kstart = ktime_get();
4961 if (mask & MAY_NOT_BLOCK)
4964 /* as root inode are NOT getting validated in lookup operation,
4965 * need to do it before permission check. */
4967 if (inode == inode->i_sb->s_root->d_inode) {
4968 rc = ll_inode_revalidate(inode->i_sb->s_root, IT_LOOKUP);
4973 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), inode mode %x mask %o\n",
4974 PFID(ll_inode2fid(inode)), inode, inode->i_mode, mask);
4976 /* squash fsuid/fsgid if needed */
4977 sbi = ll_i2sbi(inode);
4978 squash = &sbi->ll_squash;
4979 if (unlikely(squash->rsi_uid != 0 &&
4980 uid_eq(current_fsuid(), GLOBAL_ROOT_UID) &&
4981 !(sbi->ll_flags & LL_SBI_NOROOTSQUASH))) {
4985 CDEBUG(D_OTHER, "squash creds (%d:%d)=>(%d:%d)\n",
4986 __kuid_val(current_fsuid()), __kgid_val(current_fsgid()),
4987 squash->rsi_uid, squash->rsi_gid);
4989 /* update current process's credentials
4990 * and FS capability */
4991 cred = prepare_creds();
4995 cred->fsuid = make_kuid(&init_user_ns, squash->rsi_uid);
4996 cred->fsgid = make_kgid(&init_user_ns, squash->rsi_gid);
4997 for (cap = 0; cap < sizeof(cfs_cap_t) * 8; cap++) {
4998 if ((1 << cap) & CFS_CAP_FS_MASK)
4999 cap_lower(cred->cap_effective, cap);
5001 old_cred = override_creds(cred);
5004 rc = generic_permission(inode, mask);
5005 /* restore current process's credentials and FS capability */
5007 revert_creds(old_cred);
5012 ll_stats_ops_tally(sbi, LPROC_LL_INODE_PERM,
5013 ktime_us_delta(ktime_get(), kstart));
5018 /* -o localflock - only provides locally consistent flock locks */
5019 struct file_operations ll_file_operations = {
5020 #ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
5021 # ifdef HAVE_SYNC_READ_WRITE
5022 .read = new_sync_read,
5023 .write = new_sync_write,
5025 .read_iter = ll_file_read_iter,
5026 .write_iter = ll_file_write_iter,
5027 #else /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5028 .read = ll_file_read,
5029 .aio_read = ll_file_aio_read,
5030 .write = ll_file_write,
5031 .aio_write = ll_file_aio_write,
5032 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5033 .unlocked_ioctl = ll_file_ioctl,
5034 .open = ll_file_open,
5035 .release = ll_file_release,
5036 .mmap = ll_file_mmap,
5037 .llseek = ll_file_seek,
5038 .splice_read = ll_file_splice_read,
5043 struct file_operations ll_file_operations_flock = {
5044 #ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
5045 # ifdef HAVE_SYNC_READ_WRITE
5046 .read = new_sync_read,
5047 .write = new_sync_write,
5048 # endif /* HAVE_SYNC_READ_WRITE */
5049 .read_iter = ll_file_read_iter,
5050 .write_iter = ll_file_write_iter,
5051 #else /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5052 .read = ll_file_read,
5053 .aio_read = ll_file_aio_read,
5054 .write = ll_file_write,
5055 .aio_write = ll_file_aio_write,
5056 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5057 .unlocked_ioctl = ll_file_ioctl,
5058 .open = ll_file_open,
5059 .release = ll_file_release,
5060 .mmap = ll_file_mmap,
5061 .llseek = ll_file_seek,
5062 .splice_read = ll_file_splice_read,
5065 .flock = ll_file_flock,
5066 .lock = ll_file_flock
5069 /* These are for -o noflock - to return ENOSYS on flock calls */
5070 struct file_operations ll_file_operations_noflock = {
5071 #ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
5072 # ifdef HAVE_SYNC_READ_WRITE
5073 .read = new_sync_read,
5074 .write = new_sync_write,
5075 # endif /* HAVE_SYNC_READ_WRITE */
5076 .read_iter = ll_file_read_iter,
5077 .write_iter = ll_file_write_iter,
5078 #else /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5079 .read = ll_file_read,
5080 .aio_read = ll_file_aio_read,
5081 .write = ll_file_write,
5082 .aio_write = ll_file_aio_write,
5083 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
5084 .unlocked_ioctl = ll_file_ioctl,
5085 .open = ll_file_open,
5086 .release = ll_file_release,
5087 .mmap = ll_file_mmap,
5088 .llseek = ll_file_seek,
5089 .splice_read = ll_file_splice_read,
5092 .flock = ll_file_noflock,
5093 .lock = ll_file_noflock
5096 struct inode_operations ll_file_inode_operations = {
5097 .setattr = ll_setattr,
5098 .getattr = ll_getattr,
5099 .permission = ll_inode_permission,
5100 #ifdef HAVE_IOP_XATTR
5101 .setxattr = ll_setxattr,
5102 .getxattr = ll_getxattr,
5103 .removexattr = ll_removexattr,
5105 .listxattr = ll_listxattr,
5106 .fiemap = ll_fiemap,
5107 #ifdef HAVE_IOP_GET_ACL
5108 .get_acl = ll_get_acl,
5110 #ifdef HAVE_IOP_SET_ACL
5111 .set_acl = ll_set_acl,
5115 int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
5117 struct ll_inode_info *lli = ll_i2info(inode);
5118 struct cl_object *obj = lli->lli_clob;
5127 env = cl_env_get(&refcheck);
5129 RETURN(PTR_ERR(env));
5131 rc = cl_conf_set(env, lli->lli_clob, conf);
5135 if (conf->coc_opc == OBJECT_CONF_SET) {
5136 struct ldlm_lock *lock = conf->coc_lock;
5137 struct cl_layout cl = {
5141 LASSERT(lock != NULL);
5142 LASSERT(ldlm_has_layout(lock));
5144 /* it can only be allowed to match after layout is
5145 * applied to inode otherwise false layout would be
5146 * seen. Applying layout shoud happen before dropping
5147 * the intent lock. */
5148 ldlm_lock_allow_match(lock);
5150 rc = cl_object_layout_get(env, obj, &cl);
5155 DFID": layout version change: %u -> %u\n",
5156 PFID(&lli->lli_fid), ll_layout_version_get(lli),
5158 ll_layout_version_set(lli, cl.cl_layout_gen);
5162 cl_env_put(env, &refcheck);
5167 /* Fetch layout from MDT with getxattr request, if it's not ready yet */
5168 static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
5171 struct ll_sb_info *sbi = ll_i2sbi(inode);
5172 struct ptlrpc_request *req;
5179 CDEBUG(D_INODE, DFID" LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n",
5180 PFID(ll_inode2fid(inode)), ldlm_is_lvb_ready(lock),
5181 lock->l_lvb_data, lock->l_lvb_len);
5183 if (lock->l_lvb_data != NULL)
5186 /* if layout lock was granted right away, the layout is returned
5187 * within DLM_LVB of dlm reply; otherwise if the lock was ever
5188 * blocked and then granted via completion ast, we have to fetch
5189 * layout here. Please note that we can't use the LVB buffer in
5190 * completion AST because it doesn't have a large enough buffer */
5191 rc = ll_get_default_mdsize(sbi, &lmmsize);
5195 rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), OBD_MD_FLXATTR,
5196 XATTR_NAME_LOV, lmmsize, &req);
5199 GOTO(out, rc = 0); /* empty layout */
5206 if (lmmsize == 0) /* empty layout */
5209 lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, lmmsize);
5211 GOTO(out, rc = -EFAULT);
5213 OBD_ALLOC_LARGE(lvbdata, lmmsize);
5214 if (lvbdata == NULL)
5215 GOTO(out, rc = -ENOMEM);
5217 memcpy(lvbdata, lmm, lmmsize);
5218 lock_res_and_lock(lock);
5219 if (unlikely(lock->l_lvb_data == NULL)) {
5220 lock->l_lvb_type = LVB_T_LAYOUT;
5221 lock->l_lvb_data = lvbdata;
5222 lock->l_lvb_len = lmmsize;
5225 unlock_res_and_lock(lock);
5228 OBD_FREE_LARGE(lvbdata, lmmsize);
5233 ptlrpc_req_finished(req);
5238 * Apply the layout to the inode. Layout lock is held and will be released
5241 static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
5242 struct inode *inode)
5244 struct ll_inode_info *lli = ll_i2info(inode);
5245 struct ll_sb_info *sbi = ll_i2sbi(inode);
5246 struct ldlm_lock *lock;
5247 struct cl_object_conf conf;
5250 bool wait_layout = false;
5253 LASSERT(lustre_handle_is_used(lockh));
5255 lock = ldlm_handle2lock(lockh);
5256 LASSERT(lock != NULL);
5257 LASSERT(ldlm_has_layout(lock));
5259 LDLM_DEBUG(lock, "file "DFID"(%p) being reconfigured",
5260 PFID(&lli->lli_fid), inode);
5262 /* in case this is a caching lock and reinstate with new inode */
5263 md_set_lock_data(sbi->ll_md_exp, lockh, inode, NULL);
5265 lock_res_and_lock(lock);
5266 lvb_ready = ldlm_is_lvb_ready(lock);
5267 unlock_res_and_lock(lock);
5269 /* checking lvb_ready is racy but this is okay. The worst case is
5270 * that multi processes may configure the file on the same time. */
5274 rc = ll_layout_fetch(inode, lock);
5278 /* for layout lock, lmm is stored in lock's lvb.
5279 * lvb_data is immutable if the lock is held so it's safe to access it
5282 * set layout to file. Unlikely this will fail as old layout was
5283 * surely eliminated */
5284 memset(&conf, 0, sizeof conf);
5285 conf.coc_opc = OBJECT_CONF_SET;
5286 conf.coc_inode = inode;
5287 conf.coc_lock = lock;
5288 conf.u.coc_layout.lb_buf = lock->l_lvb_data;
5289 conf.u.coc_layout.lb_len = lock->l_lvb_len;
5290 rc = ll_layout_conf(inode, &conf);
5292 /* refresh layout failed, need to wait */
5293 wait_layout = rc == -EBUSY;
5296 LDLM_LOCK_PUT(lock);
5297 ldlm_lock_decref(lockh, mode);
5299 /* wait for IO to complete if it's still being used. */
5301 CDEBUG(D_INODE, "%s: "DFID"(%p) wait for layout reconf\n",
5302 sbi->ll_fsname, PFID(&lli->lli_fid), inode);
5304 memset(&conf, 0, sizeof conf);
5305 conf.coc_opc = OBJECT_CONF_WAIT;
5306 conf.coc_inode = inode;
5307 rc = ll_layout_conf(inode, &conf);
5311 CDEBUG(D_INODE, "%s file="DFID" waiting layout return: %d\n",
5312 sbi->ll_fsname, PFID(&lli->lli_fid), rc);
5318 * Issue layout intent RPC to MDS.
5319 * \param inode [in] file inode
5320 * \param intent [in] layout intent
5322 * \retval 0 on success
5323 * \retval < 0 error code
5325 static int ll_layout_intent(struct inode *inode, struct layout_intent *intent)
5327 struct ll_inode_info *lli = ll_i2info(inode);
5328 struct ll_sb_info *sbi = ll_i2sbi(inode);
5329 struct md_op_data *op_data;
5330 struct lookup_intent it;
5331 struct ptlrpc_request *req;
5335 op_data = ll_prep_md_op_data(NULL, inode, inode, NULL,
5336 0, 0, LUSTRE_OPC_ANY, NULL);
5337 if (IS_ERR(op_data))
5338 RETURN(PTR_ERR(op_data));
5340 op_data->op_data = intent;
5341 op_data->op_data_size = sizeof(*intent);
5343 memset(&it, 0, sizeof(it));
5344 it.it_op = IT_LAYOUT;
5345 if (intent->li_opc == LAYOUT_INTENT_WRITE ||
5346 intent->li_opc == LAYOUT_INTENT_TRUNC)
5347 it.it_flags = FMODE_WRITE;
5349 LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file "DFID"(%p)",
5350 sbi->ll_fsname, PFID(&lli->lli_fid), inode);
5352 rc = md_intent_lock(sbi->ll_md_exp, op_data, &it, &req,
5353 &ll_md_blocking_ast, 0);
5354 if (it.it_request != NULL)
5355 ptlrpc_req_finished(it.it_request);
5356 it.it_request = NULL;
5358 ll_finish_md_op_data(op_data);
5360 /* set lock data in case this is a new lock */
5362 ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
5364 ll_intent_drop_lock(&it);
5370 * This function checks if there exists a LAYOUT lock on the client side,
5371 * or enqueues it if it doesn't have one in cache.
5373 * This function will not hold layout lock so it may be revoked any time after
5374 * this function returns. Any operations depend on layout should be redone
5377 * This function should be called before lov_io_init() to get an uptodate
5378 * layout version, the caller should save the version number and after IO
5379 * is finished, this function should be called again to verify that layout
5380 * is not changed during IO time.
5382 int ll_layout_refresh(struct inode *inode, __u32 *gen)
5384 struct ll_inode_info *lli = ll_i2info(inode);
5385 struct ll_sb_info *sbi = ll_i2sbi(inode);
5386 struct lustre_handle lockh;
5387 struct layout_intent intent = {
5388 .li_opc = LAYOUT_INTENT_ACCESS,
5390 enum ldlm_mode mode;
5394 *gen = ll_layout_version_get(lli);
5395 if (!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK) || *gen != CL_LAYOUT_GEN_NONE)
5399 LASSERT(fid_is_sane(ll_inode2fid(inode)));
5400 LASSERT(S_ISREG(inode->i_mode));
5402 /* take layout lock mutex to enqueue layout lock exclusively. */
5403 mutex_lock(&lli->lli_layout_mutex);
5406 /* mostly layout lock is caching on the local side, so try to
5407 * match it before grabbing layout lock mutex. */
5408 mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
5409 LCK_CR | LCK_CW | LCK_PR |
5411 if (mode != 0) { /* hit cached lock */
5412 rc = ll_layout_lock_set(&lockh, mode, inode);
5418 rc = ll_layout_intent(inode, &intent);
5424 *gen = ll_layout_version_get(lli);
5425 mutex_unlock(&lli->lli_layout_mutex);
5431 * Issue layout intent RPC indicating where in a file an IO is about to write.
5433 * \param[in] inode file inode.
5434 * \param[in] ext write range with start offset of fille in bytes where
5435 * an IO is about to write, and exclusive end offset in
5438 * \retval 0 on success
5439 * \retval < 0 error code
5441 int ll_layout_write_intent(struct inode *inode, enum layout_intent_opc opc,
5442 struct lu_extent *ext)
5444 struct layout_intent intent = {
5446 .li_extent.e_start = ext->e_start,
5447 .li_extent.e_end = ext->e_end,
5452 rc = ll_layout_intent(inode, &intent);
5458 * This function send a restore request to the MDT
5460 int ll_layout_restore(struct inode *inode, loff_t offset, __u64 length)
5462 struct hsm_user_request *hur;
5466 len = sizeof(struct hsm_user_request) +
5467 sizeof(struct hsm_user_item);
5468 OBD_ALLOC(hur, len);
5472 hur->hur_request.hr_action = HUA_RESTORE;
5473 hur->hur_request.hr_archive_id = 0;
5474 hur->hur_request.hr_flags = 0;
5475 memcpy(&hur->hur_user_item[0].hui_fid, &ll_i2info(inode)->lli_fid,
5476 sizeof(hur->hur_user_item[0].hui_fid));
5477 hur->hur_user_item[0].hui_extent.offset = offset;
5478 hur->hur_user_item[0].hui_extent.length = length;
5479 hur->hur_request.hr_itemcount = 1;
5480 rc = obd_iocontrol(LL_IOC_HSM_REQUEST, ll_i2sbi(inode)->ll_md_exp,