4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * Implementation of cl_io for VVP layer.
33 * Author: Nikita Danilov <nikita.danilov@sun.com>
34 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
37 #define DEBUG_SUBSYSTEM S_LLITE
40 #include <linux/pagevec.h>
41 #include <linux/memcontrol.h>
42 #include <linux/falloc.h>
44 #include "llite_internal.h"
45 #include "vvp_internal.h"
46 #include <libcfs/linux/linux-misc.h>
48 static struct vvp_io *cl2vvp_io(const struct lu_env *env,
49 const struct cl_io_slice *slice)
53 vio = container_of(slice, struct vvp_io, vui_cl);
54 LASSERT(vio == vvp_env_io(env));
59 /* For swapping layout. The file's layout may have changed.
60 * To avoid populating pages to a wrong stripe, we have to verify the
61 * correctness of layout. It works because swapping layout processes
62 * have to acquire group lock.
64 static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
67 struct ll_inode_info *lli = ll_i2info(inode);
68 struct vvp_io *vio = vvp_env_io(env);
71 switch (io->ci_type) {
74 /* don't need lock here to check lli_layout_gen as we have held
75 * extent lock and GROUP lock has to hold to swap layout
77 if (ll_layout_version_get(lli) != vio->vui_layout_gen ||
78 CFS_FAIL_CHECK_RESET(OBD_FAIL_LLITE_LOST_LAYOUT, 0)) {
79 io->ci_need_restart = 1;
80 /* this will cause a short read/write */
85 /* fault is okay because we've already had a page. */
93 static void vvp_object_size_lock(struct cl_object *obj)
95 struct inode *inode = vvp_object_inode(obj);
97 ll_inode_size_lock(inode);
98 cl_object_attr_lock(obj);
101 static void vvp_object_size_unlock(struct cl_object *obj)
103 struct inode *inode = vvp_object_inode(obj);
105 cl_object_attr_unlock(obj);
106 ll_inode_size_unlock(inode);
110 * Helper function that if necessary adjusts file size (inode->i_size), when
111 * position at the offset \a pos is accessed. File size can be arbitrary stale
112 * on a Lustre client, but client at least knows KMS. If accessed area is
113 * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
115 * Locking: i_size_lock is used to serialize changes to inode size and to
116 * protect consistency between inode size and cl_object
117 * attributes. cl_object_size_lock() protects consistency between cl_attr's of
118 * top-object and sub-objects.
120 static int vvp_prep_size(const struct lu_env *env, struct cl_object *obj,
121 struct cl_io *io, loff_t start, size_t bytes,
124 struct cl_attr *attr = vvp_env_thread_attr(env);
125 struct inode *inode = vvp_object_inode(obj);
126 loff_t pos = start + bytes - 1;
131 * Consistency guarantees: following possibilities exist for the
132 * relation between region being accessed and real file size at this
135 * (A): the region is completely inside of the file;
137 * (B-x): x bytes of region are inside of the file, the rest is
140 * (C): the region is completely outside of the file.
142 * This classification is stable under DLM lock already acquired by
143 * the caller, because to change the class, other client has to take
144 * DLM lock conflicting with our lock. Also, any updates to ->i_size
145 * by other threads on this client are serialized by
146 * ll_inode_size_lock(). This guarantees that short reads are handled
147 * correctly in the face of concurrent writes and truncates.
149 vvp_object_size_lock(obj);
150 result = cl_object_attr_get(env, obj, attr);
153 if (pos > kms || !attr->cat_kms_valid) {
155 * A glimpse is necessary to determine whether we
156 * return a short read (B) or some zeroes at the end
159 vvp_object_size_unlock(obj);
160 result = cl_glimpse_lock(env, io, inode, obj, 0);
161 if (result == 0 && exceed != NULL) {
162 /* If objective page index exceed end-of-file
163 * page index, return directly. Do not expect
164 * kernel will check such case correctly.
166 loff_t size = i_size_read(inode);
167 unsigned long cur_index = start >>
170 if ((size == 0 && cur_index != 0) ||
171 (((size - 1) >> PAGE_SHIFT) <
179 * region is within kms and, hence, within real file
180 * size (A). We need to increase i_size to cover the
181 * read region so that generic_file_read() will do its
182 * job, but that doesn't mean the kms size is
183 * _correct_, it is only the _minimum_ size. If
184 * someone does a stat they will get the correct size
185 * which will always be >= the kms value here.
188 if (i_size_read(inode) < kms) {
189 i_size_write(inode, kms);
191 DFID" updating i_size %llu\n",
192 PFID(lu_object_fid(&obj->co_lu)),
193 (__u64)i_size_read(inode));
197 vvp_object_size_unlock(obj);
203 static int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
204 __u32 enqflags, enum cl_lock_mode mode,
205 pgoff_t start, pgoff_t end)
207 struct vvp_io *vio = vvp_env_io(env);
208 struct cl_lock_descr *descr = &vio->vui_link.cill_descr;
209 struct cl_object *obj = io->ci_obj;
211 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
214 CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
216 memset(&vio->vui_link, 0, sizeof(vio->vui_link));
218 if (vio->vui_fd && (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
219 descr->cld_mode = CLM_GROUP;
220 descr->cld_gid = vio->vui_fd->fd_grouplock.lg_gid;
221 enqflags |= CEF_LOCK_MATCH;
223 descr->cld_mode = mode;
226 descr->cld_obj = obj;
227 descr->cld_start = start;
228 descr->cld_end = end;
229 descr->cld_enq_flags = enqflags;
231 cl_io_lock_add(env, io, &vio->vui_link);
236 static int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io,
237 __u32 enqflags, enum cl_lock_mode mode,
238 loff_t start, loff_t end)
240 return vvp_io_one_lock_index(env, io, enqflags, mode,
241 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
244 static int vvp_io_write_iter_init(const struct lu_env *env,
245 const struct cl_io_slice *ios)
247 struct vvp_io *vio = cl2vvp_io(env, ios);
249 cl_page_list_init(&vio->u.readwrite.vui_queue);
250 vio->u.readwrite.vui_written = 0;
251 vio->u.readwrite.vui_from = 0;
252 vio->u.readwrite.vui_to = PAGE_SIZE;
257 static int vvp_io_read_iter_init(const struct lu_env *env,
258 const struct cl_io_slice *ios)
260 struct vvp_io *vio = cl2vvp_io(env, ios);
262 vio->u.readwrite.vui_read = 0;
267 static void vvp_io_write_iter_fini(const struct lu_env *env,
268 const struct cl_io_slice *ios)
270 struct vvp_io *vio = cl2vvp_io(env, ios);
272 LASSERT(vio->u.readwrite.vui_queue.pl_nr == 0);
275 static int vvp_io_fault_iter_init(const struct lu_env *env,
276 const struct cl_io_slice *ios)
278 struct vvp_io *vio = cl2vvp_io(env, ios);
279 struct inode *inode = vvp_object_inode(ios->cis_obj);
281 LASSERT(inode == file_inode(vio->vui_fd->fd_file));
286 static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
288 struct cl_io *io = ios->cis_io;
289 struct cl_object *obj = io->ci_obj;
290 struct vvp_io *vio = cl2vvp_io(env, ios);
291 struct inode *inode = vvp_object_inode(obj);
297 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
299 CDEBUG(D_VFSTRACE, DFID" ignore/verify layout %d/%d, layout version %d need write layout %d, restore needed %d, invalidate_lock %d\n",
300 PFID(lu_object_fid(&obj->co_lu)),
301 io->ci_ignore_layout, io->ci_verify_layout,
302 vio->vui_layout_gen, io->ci_need_write_intent,
303 io->ci_restore_needed, io->ci_invalidate_page_cache);
305 #ifdef HAVE_INVALIDATE_LOCK
306 if (io->ci_invalidate_page_cache) {
307 filemap_invalidate_unlock(inode->i_mapping);
308 io->ci_invalidate_page_cache = 0;
310 #endif /* HAVE_INVALIDATE_LOCK */
312 if (io->ci_restore_needed) {
313 /* file was detected release, we need to restore it
314 * before finishing the io
316 rc = ll_layout_restore(inode, 0, OBD_OBJECT_EOF);
317 /* if restore registration failed, no restart,
318 * we will return -ENODATA
320 * The layout will change after restore, so we need to
321 * block on layout lock held by the MDT
322 * as MDT will not send new layout in lvb (see LU-3124)
323 * we have to explicitly fetch it, all this will be done
324 * by ll_layout_refresh().
325 * Even if ll_layout_restore() returns zero, it doesn't mean
326 * that restore has been successful. Therefore it sets
327 * ci_verify_layout so that it will check layout at the end
331 io->ci_restore_needed = 1;
332 io->ci_need_restart = 0;
333 io->ci_verify_layout = 0;
338 io->ci_restore_needed = 0;
340 /* Even if ll_layout_restore() returns zero, it doesn't mean
341 * that restore has been successful. Therefore it should verify
342 * if there was layout change and restart I/O correspondingly.
344 ll_layout_refresh(inode, &gen);
345 io->ci_need_restart = vio->vui_layout_gen != gen;
346 if (io->ci_need_restart) {
348 DFID" layout changed from %d to %d.\n",
349 PFID(lu_object_fid(&obj->co_lu)),
350 vio->vui_layout_gen, gen);
351 /* today successful restore is the only possible case */
352 /* restore was done, clear restoring state */
353 clear_bit(LLIF_FILE_RESTORING,
354 &ll_i2info(vvp_object_inode(obj))->lli_flags);
359 /* dynamic layout change needed, send layout intent RPC. */
360 if (io->ci_need_write_intent || io->ci_need_pccro_clear) {
361 enum layout_intent_opc opc = LAYOUT_INTENT_WRITE;
363 io->ci_need_write_intent = 0;
365 LASSERT(io->ci_type == CIT_WRITE || cl_io_is_fallocate(io) ||
366 cl_io_is_trunc(io) || cl_io_is_mkwrite(io));
368 CDEBUG(D_VFSTRACE, DFID" write layout, type %u "DEXT"\n",
369 PFID(lu_object_fid(&obj->co_lu)), io->ci_type,
370 PEXT(&io->ci_write_intent));
372 if (cl_io_is_trunc(io))
373 opc = LAYOUT_INTENT_TRUNC;
375 if (io->ci_need_pccro_clear) {
376 io->ci_need_pccro_clear = 0;
377 opc = LAYOUT_INTENT_PCCRO_CLEAR;
380 rc = ll_layout_write_intent(inode, opc, &io->ci_write_intent);
383 io->ci_need_restart = 1;
387 if (!io->ci_need_restart &&
388 !io->ci_ignore_layout && io->ci_verify_layout) {
389 /* check layout version */
390 ll_layout_refresh(inode, &gen);
391 io->ci_need_restart = vio->vui_layout_gen != gen;
392 if (io->ci_need_restart) {
394 DFID" layout changed from %d to %d.\n",
395 PFID(lu_object_fid(&obj->co_lu)),
396 vio->vui_layout_gen, gen);
404 static void vvp_io_fault_fini(const struct lu_env *env,
405 const struct cl_io_slice *ios)
407 struct cl_io *io = ios->cis_io;
408 struct cl_page *page = io->u.ci_fault.ft_page;
410 CLOBINVRNT(env, io->ci_obj, vvp_object_invariant(io->ci_obj));
413 lu_ref_del(&page->cp_reference, "fault", io);
414 cl_page_put(env, page);
415 io->u.ci_fault.ft_page = NULL;
417 vvp_io_fini(env, ios);
420 static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
422 /* we only want to hold PW locks if the mmap() can generate
423 * writes back to the file and that only happens in shared
426 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
431 static int vvp_mmap_locks(const struct lu_env *env,
432 struct vvp_io *vio, struct cl_io *io)
434 struct vvp_thread_info *vti = vvp_env_info(env);
435 struct mm_struct *mm = current->mm;
436 struct vm_area_struct *vma;
437 struct cl_lock_descr *descr = &vti->vti_descr;
438 union ldlm_policy_data policy;
447 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
449 /* nfs or loop back device write */
450 if (vio->vui_iter == NULL)
453 /* No MM (e.g. NFS)? No vmas too. */
457 if (!iter_is_iovec(vio->vui_iter) && !iov_iter_is_kvec(vio->vui_iter))
460 for (i = *vio->vui_iter;
462 iov_iter_advance(&i, iov.iov_len)) {
463 iov = iov_iter_iovec(&i);
464 addr = (unsigned long)iov.iov_base;
470 bytes += addr & ~PAGE_MASK;
474 while ((vma = our_vma(mm, addr, bytes)) != NULL) {
475 struct dentry *de = file_dentry(vma->vm_file);
476 struct inode *inode = de->d_inode;
477 int flags = CEF_MUST;
479 if (ll_file_nolock(vma->vm_file)) {
480 /* For no lock case is not allowed for mmap */
486 * XXX: Required lock mode can be weakened: CIT_WRITE
487 * io only ever reads user level buffer, and CIT_READ
490 policy_from_vma(&policy, vma, addr, bytes);
491 descr->cld_mode = vvp_mode_from_vma(vma);
492 descr->cld_obj = ll_i2info(inode)->lli_clob;
493 descr->cld_start = policy.l_extent.start >> PAGE_SHIFT;
494 descr->cld_end = policy.l_extent.end >> PAGE_SHIFT;
495 descr->cld_enq_flags = flags;
496 result = cl_io_lock_alloc_add(env, io, descr);
498 CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
499 descr->cld_mode, descr->cld_start,
505 if (vma->vm_end - addr >= bytes)
508 bytes -= vma->vm_end - addr;
511 mmap_read_unlock(mm);
518 static void vvp_io_advance(const struct lu_env *env,
519 const struct cl_io_slice *ios, size_t bytes)
521 struct cl_object *obj = ios->cis_io->ci_obj;
522 struct vvp_io *vio = cl2vvp_io(env, ios);
524 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
527 * Since 3.16(26978b8b4) vfs revert iov iter to
528 * original position even io succeed, so instead
529 * of relying on VFS, we move iov iter by ourselves.
531 iov_iter_advance(vio->vui_iter, bytes);
532 CDEBUG(D_VFSTRACE, "advancing %ld bytes\n", bytes);
533 vio->vui_tot_bytes -= bytes;
534 iov_iter_reexpand(vio->vui_iter, vio->vui_tot_bytes);
537 static void vvp_io_update_iov(const struct lu_env *env,
538 struct vvp_io *vio, struct cl_io *io)
540 size_t size = io->u.ci_rw.crw_bytes;
545 iov_iter_truncate(vio->vui_iter, size);
548 static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
549 enum cl_lock_mode mode, loff_t start, loff_t end)
551 struct vvp_io *vio = vvp_env_io(env);
555 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
558 vvp_io_update_iov(env, vio, io);
560 if (io->u.ci_rw.crw_nonblock)
561 ast_flags |= CEF_NONBLOCK;
562 if (io->ci_lock_no_expand)
563 ast_flags |= CEF_LOCK_NO_EXPAND;
567 /* Group lock held means no lockless any more */
568 if (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
571 flags = iocb_ki_flags_get(vio->vui_iocb->ki_filp,
573 if (ll_file_nolock(vio->vui_fd->fd_file) ||
574 (iocb_ki_flags_check(flags, DIRECT) &&
576 ast_flags |= CEF_NEVER;
579 result = vvp_mmap_locks(env, vio, io);
581 result = vvp_io_one_lock(env, io, ast_flags, mode, start, end);
586 static int vvp_io_read_lock(const struct lu_env *env,
587 const struct cl_io_slice *ios)
589 struct cl_io *io = ios->cis_io;
590 struct cl_io_rw_common *rd = &io->u.ci_rd.rd;
594 result = vvp_io_rw_lock(env, io, CLM_READ, rd->crw_pos,
595 rd->crw_pos + rd->crw_bytes - 1);
599 static int vvp_io_fault_lock(const struct lu_env *env,
600 const struct cl_io_slice *ios)
602 struct cl_io *io = ios->cis_io;
603 struct vvp_io *vio = cl2vvp_io(env, ios);
604 /* XXX LDLM_FL_CBPENDING */
605 return vvp_io_one_lock_index(env,
607 vvp_mode_from_vma(vio->u.fault.ft_vma),
608 io->u.ci_fault.ft_index,
609 io->u.ci_fault.ft_index);
612 static int vvp_io_write_lock(const struct lu_env *env,
613 const struct cl_io_slice *ios)
615 struct cl_io *io = ios->cis_io;
619 if (io->u.ci_wr.wr_append) {
621 end = OBD_OBJECT_EOF;
623 start = io->u.ci_wr.wr.crw_pos;
624 end = start + io->u.ci_wr.wr.crw_bytes - 1;
627 RETURN(vvp_io_rw_lock(env, io, CLM_WRITE, start, end));
630 static int vvp_io_setattr_iter_init(const struct lu_env *env,
631 const struct cl_io_slice *ios)
638 * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io.
640 * Handles "lockless io" mode when extent locking is done by server.
642 static int vvp_io_setattr_lock(const struct lu_env *env,
643 const struct cl_io_slice *ios)
645 struct cl_io *io = ios->cis_io;
646 __u64 lock_start = 0;
647 __u64 lock_end = OBD_OBJECT_EOF;
650 if (cl_io_is_trunc(io)) {
651 struct inode *inode = vvp_object_inode(io->ci_obj);
653 /* set enqueue flags to CEF_MUST in case of encrypted file,
654 * to prevent lockless truncate
656 if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode))
658 else if (io->u.ci_setattr.sa_attr.lvb_size == 0)
659 enqflags = CEF_DISCARD_DATA;
660 } else if (cl_io_is_fallocate(io)) {
661 lock_start = io->u.ci_setattr.sa_falloc_offset;
662 lock_end = io->u.ci_setattr.sa_falloc_end - 1;
664 unsigned int valid = io->u.ci_setattr.sa_avalid;
666 if (!(valid & TIMES_SET_FLAGS))
669 if ((!(valid & ATTR_MTIME) ||
670 io->u.ci_setattr.sa_attr.lvb_mtime >=
671 io->u.ci_setattr.sa_attr.lvb_ctime) &&
672 (!(valid & ATTR_ATIME) ||
673 io->u.ci_setattr.sa_attr.lvb_atime >=
674 io->u.ci_setattr.sa_attr.lvb_ctime))
678 return vvp_io_one_lock(env, io, enqflags, CLM_WRITE,
679 lock_start, lock_end);
682 static int vvp_do_vmtruncate(struct inode *inode, size_t size)
686 /* Only ll_inode_size_lock is taken at this level. */
687 ll_inode_size_lock(inode);
688 result = inode_newsize_ok(inode, size);
690 ll_inode_size_unlock(inode);
693 i_size_write(inode, size);
695 ll_truncate_pagecache(inode, size);
696 ll_inode_size_unlock(inode);
700 static int vvp_io_setattr_time(const struct lu_env *env,
701 const struct cl_io_slice *ios)
703 struct cl_io *io = ios->cis_io;
704 struct cl_object *obj = io->ci_obj;
705 struct cl_attr *attr = vvp_env_thread_attr(env);
707 unsigned int valid = CAT_CTIME;
709 cl_object_attr_lock(obj);
710 attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime;
711 if (io->u.ci_setattr.sa_avalid & ATTR_ATIME_SET) {
712 attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime;
715 if (io->u.ci_setattr.sa_avalid & ATTR_MTIME_SET) {
716 attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime;
719 result = cl_object_attr_update(env, obj, attr, valid);
720 cl_object_attr_unlock(obj);
725 static int vvp_io_setattr_start(const struct lu_env *env,
726 const struct cl_io_slice *ios)
728 struct cl_io *io = ios->cis_io;
729 struct inode *inode = vvp_object_inode(io->ci_obj);
730 struct ll_inode_info *lli = ll_i2info(inode);
731 int mode = io->u.ci_setattr.sa_falloc_mode;
733 if (cl_io_is_trunc(io)) {
734 trunc_sem_down_write(&lli->lli_trunc_sem);
735 mutex_lock(&lli->lli_setattr_mutex);
736 inode_dio_wait(inode);
737 } else if (cl_io_is_fallocate(io)) {
740 trunc_sem_down_write(&lli->lli_trunc_sem);
741 mutex_lock(&lli->lli_setattr_mutex);
742 inode_dio_wait(inode);
744 ll_merge_attr(env, inode);
745 size = i_size_read(inode);
746 if (io->u.ci_setattr.sa_falloc_end > size &&
747 !(mode & FALLOC_FL_KEEP_SIZE)) {
748 size = io->u.ci_setattr.sa_falloc_end;
749 io->u.ci_setattr.sa_avalid |= ATTR_SIZE;
751 io->u.ci_setattr.sa_attr.lvb_size = size;
753 mutex_lock(&lli->lli_setattr_mutex);
756 if (io->u.ci_setattr.sa_avalid & TIMES_SET_FLAGS)
757 return vvp_io_setattr_time(env, ios);
762 static void vvp_io_setattr_end(const struct lu_env *env,
763 const struct cl_io_slice *ios)
765 struct cl_io *io = ios->cis_io;
766 struct inode *inode = vvp_object_inode(io->ci_obj);
767 struct ll_inode_info *lli = ll_i2info(inode);
768 loff_t size = io->u.ci_setattr.sa_attr.lvb_size;
770 if (cl_io_is_trunc(io)) {
771 /* Truncate in memory pages - they must be clean pages
772 * because osc has already notified to destroy osc_extents.
774 vvp_do_vmtruncate(inode, size);
775 mutex_unlock(&lli->lli_setattr_mutex);
776 trunc_sem_up_write(&lli->lli_trunc_sem);
778 /* Update size and blocks for LSOM */
779 if (!io->ci_ignore_layout)
780 ll_merge_attr(env, inode);
781 } else if (cl_io_is_fallocate(io)) {
782 int mode = io->u.ci_setattr.sa_falloc_mode;
784 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
785 size > i_size_read(inode)) {
786 ll_inode_size_lock(inode);
787 i_size_write(inode, size);
788 ll_inode_size_unlock(inode);
790 inode->i_ctime = current_time(inode);
791 mutex_unlock(&lli->lli_setattr_mutex);
792 trunc_sem_up_write(&lli->lli_trunc_sem);
794 mutex_unlock(&lli->lli_setattr_mutex);
798 static void vvp_io_setattr_fini(const struct lu_env *env,
799 const struct cl_io_slice *ios)
801 bool restore_needed = ios->cis_io->ci_restore_needed;
802 struct inode *inode = vvp_object_inode(ios->cis_obj);
804 vvp_io_fini(env, ios);
806 if (restore_needed && !ios->cis_io->ci_restore_needed) {
807 /* restore finished, set data modified flag for HSM */
808 set_bit(LLIF_DATA_MODIFIED, &ll_i2info(inode)->lli_flags);
812 static int vvp_io_read_start(const struct lu_env *env,
813 const struct cl_io_slice *ios)
815 struct vvp_io *vio = cl2vvp_io(env, ios);
816 struct cl_io *io = ios->cis_io;
817 struct cl_object *obj = io->ci_obj;
818 struct inode *inode = vvp_object_inode(obj);
819 struct ll_inode_info *lli = ll_i2info(inode);
820 struct file *file = vio->vui_fd->fd_file;
821 loff_t pos = io->u.ci_rd.rd.crw_pos;
822 size_t crw_bytes = io->u.ci_rd.rd.crw_bytes;
823 size_t tot_bytes = vio->vui_tot_bytes;
824 struct ll_cl_context *lcc;
828 int total_bytes_read = 0;
829 struct iov_iter iter;
835 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
837 CDEBUG(D_VFSTRACE, "%s: read [%llu, %llu)\n",
838 file_dentry(file)->d_name.name,
839 pos, pos + crw_bytes);
841 trunc_sem_down_read(&lli->lli_trunc_sem);
843 if (io->ci_async_readahead) {
848 if (!can_populate_pages(env, io, inode))
851 flags = iocb_ki_flags_get(file, vio->vui_iocb);
852 if (!iocb_ki_flags_check(flags, DIRECT)) {
853 result = cl_io_lru_reserve(env, io, pos, crw_bytes);
858 /* Unless this is reading a sparse file, otherwise the lock has already
859 * been acquired so vvp_prep_size() is an empty op.
861 result = vvp_prep_size(env, obj, io, pos, crw_bytes, &exceed);
864 else if (exceed != 0)
867 LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
868 "Read ino %lu, %zu bytes, offset %lld, size %llu\n",
869 inode->i_ino, crw_bytes, pos, i_size_read(inode));
871 /* initialize read-ahead window once per syscall */
872 if (!vio->vui_ra_valid) {
873 vio->vui_ra_valid = true;
874 vio->vui_ra_start_idx = pos >> PAGE_SHIFT;
875 vio->vui_ra_pages = 0;
876 page_offset = pos & ~PAGE_MASK;
879 if (tot_bytes > PAGE_SIZE - page_offset)
880 tot_bytes -= (PAGE_SIZE - page_offset);
884 vio->vui_ra_pages += (tot_bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
886 CDEBUG(D_READA, "tot %zu, ra_start %lu, ra_count %lu\n",
887 vio->vui_tot_bytes, vio->vui_ra_start_idx,
893 LASSERT(vio->vui_iocb->ki_pos == pos);
894 iter = *vio->vui_iter;
896 lcc = ll_cl_find(inode);
897 lcc->lcc_end_index = DIV_ROUND_UP(pos + iter.count, PAGE_SIZE);
898 CDEBUG(D_VFSTRACE, "count:%ld iocb pos:%lld\n", iter.count, pos);
900 /* this seqlock lets us notice if a page has been deleted on this inode
901 * during the fault process, allowing us to catch an erroneous short
902 * read or EIO. See LU-16160
905 seq = read_seqbegin(&ll_i2info(inode)->lli_page_inv_lock);
906 result = generic_file_read_iter(vio->vui_iocb, &iter);
908 io->ci_bytes += result;
909 total_bytes_read += result;
911 /* got a short read or -EIO and we raced with page invalidation retry */
912 } while (read_seqretry(&ll_i2info(inode)->lli_page_inv_lock, seq) &&
913 ((result >= 0 && iov_iter_count(&iter) > 0)
918 if (total_bytes_read < crw_bytes)
921 } else if (result == -EIOCBQUEUED) {
922 io->ci_bytes += vio->u.readwrite.vui_read;
923 vio->vui_iocb->ki_pos = pos + vio->u.readwrite.vui_read;
929 static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
930 struct cl_page_list *plist, int from, int to)
932 struct cl_2queue *queue = &io->ci_queue;
933 struct cl_page *page;
934 unsigned int bytes = 0;
939 if (plist->pl_nr == 0)
942 if (from > 0 || to != PAGE_SIZE) {
943 page = cl_page_list_first(plist);
944 if (plist->pl_nr == 1) {
945 cl_page_clip(env, page, from, to);
948 cl_page_clip(env, page, from, PAGE_SIZE);
949 if (to != PAGE_SIZE) {
950 page = cl_page_list_last(plist);
951 cl_page_clip(env, page, 0, to);
956 cl_2queue_init(queue);
957 cl_page_list_splice(plist, &queue->c2_qin);
958 rc = cl_io_submit_sync(env, io, CRT_WRITE, queue, 0);
960 /* plist is not sorted any more */
961 cl_page_list_splice(&queue->c2_qin, plist);
962 cl_page_list_splice(&queue->c2_qout, plist);
963 cl_2queue_fini(env, queue);
966 /* calculate bytes */
967 bytes = plist->pl_nr << PAGE_SHIFT;
968 bytes -= from + PAGE_SIZE - to;
970 while (plist->pl_nr > 0) {
971 page = cl_page_list_first(plist);
972 cl_page_list_del(env, plist, page, true);
974 cl_page_clip(env, page, 0, PAGE_SIZE);
976 SetPageUptodate(cl_page_vmpage(page));
977 cl_page_disown(env, io, page);
979 /* held in ll_cl_init() */
980 lu_ref_del(&page->cp_reference, "cl_io", io);
981 cl_page_put(env, page);
985 RETURN(bytes > 0 ? bytes : rc);
989 * From kernel v4.19-rc5-248-g9b89a0355144 use XArrary
990 * Prior kernels use radix_tree for tags
992 static inline void ll_page_tag_dirty(struct page *page,
993 struct address_space *mapping)
995 #ifndef HAVE_RADIX_TREE_TAG_SET
996 __xa_set_mark(&mapping->i_pages, page_index(page), PAGECACHE_TAG_DIRTY);
998 radix_tree_tag_set(&mapping->page_tree, page_index(page),
999 PAGECACHE_TAG_DIRTY);
1004 * Kernels 4.2 - 4.5 pass memcg argument to account_page_dirtied()
1005 * Kernel v5.2-5678-gac1c3e4 no longer exports account_page_dirtied
1007 static inline void ll_account_page_dirtied(struct page *page,
1008 struct address_space *mapping)
1010 #ifdef HAVE_ACCOUNT_PAGE_DIRTIED_3ARGS
1011 struct mem_cgroup *memcg = mem_cgroup_begin_page_stat(page);
1013 account_page_dirtied(page, mapping, memcg);
1014 mem_cgroup_end_page_stat(memcg);
1015 #elif defined(HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT)
1016 account_page_dirtied(page, mapping);
1018 vvp_account_page_dirtied(page, mapping);
1020 ll_page_tag_dirty(page, mapping);
1023 /* Taken from kernel set_page_dirty, __set_page_dirty_nobuffers
1024 * Last change to this area: b93b016313b3ba8003c3b8bb71f569af91f19fc7
1026 * Current with Linus tip of tree (7/13/2019):
1027 * v5.2-rc4-224-ge01e060fe0
1029 * Backwards compat for 3.x, 5.x kernels relating to memcg handling
1030 * & rename of radix tree to xarray.
1032 static void vvp_set_pagevec_dirty(struct pagevec *pvec)
1034 struct page *page = pvec->pages[0];
1035 int count = pagevec_count(pvec);
1037 #ifdef HAVE_KALLSYMS_LOOKUP_NAME
1038 struct address_space *mapping = page->mapping;
1039 unsigned long flags;
1040 unsigned long skip_pages = 0;
1046 BUILD_BUG_ON(PAGEVEC_SIZE > BITS_PER_LONG);
1047 LASSERTF(page->mapping,
1048 "mapping must be set. page %px, page->private (cl_page) %px\n",
1049 page, (void *) page->private);
1052 * kernels without HAVE_KALLSYMS_LOOKUP_NAME also don't have
1053 * account_dirty_page exported, and if we can't access that symbol,
1054 * we can't do page dirtying in batch (taking the xarray lock only once)
1055 * so we just fall back to a looped call to __set_page_dirty_nobuffers
1057 #ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT
1058 if (!vvp_account_page_dirtied) {
1059 for (i = 0; i < count; i++)
1060 __set_page_dirty_nobuffers(pvec->pages[i]);
1065 #ifdef HAVE_KALLSYMS_LOOKUP_NAME
1066 for (i = 0; i < count; i++) {
1067 page = pvec->pages[i];
1069 ClearPageReclaim(page);
1071 vvp_lock_page_memcg(page);
1072 if (TestSetPageDirty(page)) {
1073 /* page is already dirty .. no extra work needed
1074 * set a flag for the i'th page to be skipped
1076 vvp_unlock_page_memcg(page);
1077 skip_pages |= (1 << i);
1081 ll_xa_lock_irqsave(&mapping->i_pages, flags);
1083 /* Notes on differences with __set_page_dirty_nobuffers:
1084 * 1. We don't need to call page_mapping because we know this is a page
1086 * 2. We have the pages locked, so there is no need for the careful
1087 * mapping/mapping2 dance.
1088 * 3. No mapping is impossible. (Race w/truncate mentioned in
1089 * dirty_nobuffers should be impossible because we hold the page lock.)
1090 * 4. All mappings are the same because i/o is only to one file.
1092 for (i = 0; i < count; i++) {
1093 page = pvec->pages[i];
1094 /* if the i'th page was unlocked above, skip it here */
1095 if ((skip_pages >> i) & 1)
1098 LASSERTF(page->mapping == mapping,
1099 "all pages must have the same mapping. page %px, mapping %px, first mapping %px\n",
1100 page, page->mapping, mapping);
1101 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
1102 ll_account_page_dirtied(page, mapping);
1104 vvp_unlock_page_memcg(page);
1106 ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
1108 CDEBUG(D_VFSTRACE, "mapping %p, count %d, dirtied %d\n", mapping,
1111 if (mapping->host && dirtied) {
1112 /* !PageAnon && !swapper_space */
1113 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1119 static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
1120 struct pagevec *pvec)
1127 count = pagevec_count(pvec);
1130 for (i = 0; i < count; i++) {
1131 struct page *vmpage = pvec->pages[i];
1133 SetPageUptodate(vmpage);
1136 vvp_set_pagevec_dirty(pvec);
1138 for (i = 0; i < count; i++) {
1139 struct page *vmpage = pvec->pages[i];
1140 struct cl_page *page = (struct cl_page *) vmpage->private;
1142 cl_page_disown(env, io, page);
1143 lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
1144 cl_page_put(env, page);
1150 /* make sure the page list is contiguous */
1151 static bool page_list_sanity_check(struct cl_object *obj,
1152 struct cl_page_list *plist)
1154 struct cl_page *page;
1155 pgoff_t index = CL_PAGE_EOF;
1157 cl_page_list_for_each(page, plist) {
1158 if (index == CL_PAGE_EOF) {
1159 index = cl_page_index(page);
1164 if (index == cl_page_index(page))
1172 /* Return how many bytes have queued or written */
1173 int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
1175 struct cl_object *obj = io->ci_obj;
1176 struct inode *inode = vvp_object_inode(obj);
1177 struct vvp_io *vio = vvp_env_io(env);
1178 struct cl_page_list *queue = &vio->u.readwrite.vui_queue;
1179 struct cl_page *page;
1182 unsigned int npages = vio->u.readwrite.vui_queue.pl_nr;
1189 CDEBUG(D_VFSTRACE, "commit async pages: %d, from %d, to %d\n",
1190 npages, vio->u.readwrite.vui_from, vio->u.readwrite.vui_to);
1192 LASSERT(page_list_sanity_check(obj, queue));
1194 /* submit IO with async write */
1195 rc = cl_io_commit_async(env, io, queue,
1196 vio->u.readwrite.vui_from,
1197 vio->u.readwrite.vui_to,
1198 write_commit_callback);
1199 npages -= queue->pl_nr; /* already committed pages */
1201 /* calculate how many bytes were written */
1202 bytes = npages << PAGE_SHIFT;
1205 bytes -= vio->u.readwrite.vui_from;
1206 if (queue->pl_nr == 0) /* last page */
1207 bytes -= PAGE_SIZE - vio->u.readwrite.vui_to;
1208 LASSERTF(bytes > 0, "bytes = %d, pages = %d\n", bytes, npages);
1210 vio->u.readwrite.vui_written += bytes;
1212 CDEBUG(D_VFSTRACE, "Committed %d pages %d bytes, tot: %ld\n",
1213 npages, bytes, vio->u.readwrite.vui_written);
1215 /* the first page must have been written. */
1216 vio->u.readwrite.vui_from = 0;
1218 LASSERT(page_list_sanity_check(obj, queue));
1219 LASSERT(ergo(rc == 0, queue->pl_nr == 0));
1221 /* out of quota, try sync write */
1222 if (rc == -EDQUOT && !cl_io_is_mkwrite(io)) {
1223 struct ll_inode_info *lli = ll_i2info(inode);
1225 rc = vvp_io_commit_sync(env, io, queue,
1226 vio->u.readwrite.vui_from,
1227 vio->u.readwrite.vui_to);
1229 vio->u.readwrite.vui_written += rc;
1232 if (lli->lli_clob != NULL)
1233 lov_read_and_clear_async_rc(lli->lli_clob);
1234 lli->lli_async_rc = 0;
1237 /* update inode size */
1238 ll_merge_attr(env, inode);
1240 /* Now the pages in queue were failed to commit, discard them
1241 * unless they were dirtied before.
1243 while (queue->pl_nr > 0) {
1244 page = cl_page_list_first(queue);
1245 cl_page_list_del(env, queue, page, true);
1247 if (!PageDirty(cl_page_vmpage(page)))
1248 cl_page_discard(env, io, page);
1250 cl_page_disown(env, io, page);
1252 /* held in ll_cl_init() */
1253 lu_ref_del(&page->cp_reference, "cl_io", io);
1254 cl_page_put(env, page);
1256 cl_page_list_fini(env, queue);
1261 static int vvp_io_write_start(const struct lu_env *env,
1262 const struct cl_io_slice *ios)
1264 struct vvp_io *vio = cl2vvp_io(env, ios);
1265 struct cl_io *io = ios->cis_io;
1266 struct cl_object *obj = io->ci_obj;
1267 struct inode *inode = vvp_object_inode(obj);
1268 struct ll_inode_info *lli = ll_i2info(inode);
1269 struct file *file = vio->vui_fd->fd_file;
1271 loff_t pos = io->u.ci_wr.wr.crw_pos;
1272 size_t crw_bytes = io->u.ci_wr.wr.crw_bytes;
1273 bool lock_inode = !IS_NOSEC(inode);
1274 size_t ci_bytes = io->ci_bytes;
1275 struct iov_iter iter;
1281 trunc_sem_down_read(&lli->lli_trunc_sem);
1283 if (!can_populate_pages(env, io, inode))
1286 if (cl_io_is_append(io)) {
1288 * PARALLEL IO This has to be changed for parallel IO doing
1289 * out-of-order writes.
1291 ll_merge_attr(env, inode);
1292 pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
1293 vio->vui_iocb->ki_pos = pos;
1295 LASSERTF(vio->vui_iocb->ki_pos == pos,
1296 "ki_pos %lld [%lld, %lld)\n",
1297 vio->vui_iocb->ki_pos,
1298 pos, pos + crw_bytes);
1301 CDEBUG(D_VFSTRACE, "%s: write [%llu, %llu)\n",
1302 file_dentry(file)->d_name.name, pos, pos + crw_bytes);
1304 /* The maximum Lustre file size is variable, based on the OST maximum
1305 * object size and number of stripes. This needs another check in
1306 * addition to the VFS checks earlier.
1308 if (pos + crw_bytes > ll_file_maxbytes(inode)) {
1310 "%s: file %s ("DFID") offset %llu > maxbytes %llu\n",
1311 ll_i2sbi(inode)->ll_fsname,
1312 file_dentry(file)->d_name.name,
1313 PFID(ll_inode2fid(inode)), pos + crw_bytes,
1314 ll_file_maxbytes(inode));
1318 /* Tests to verify we take the i_mutex correctly */
1319 if (CFS_FAIL_CHECK(OBD_FAIL_LLITE_IMUTEX_SEC) && !lock_inode)
1322 if (CFS_FAIL_CHECK(OBD_FAIL_LLITE_IMUTEX_NOSEC) && lock_inode)
1325 flags = iocb_ki_flags_get(file, vio->vui_iocb);
1326 if (!iocb_ki_flags_check(flags, DIRECT)) {
1327 result = cl_io_lru_reserve(env, io, pos, crw_bytes);
1332 if (vio->vui_iter == NULL) {
1333 /* from a temp io in ll_cl_init(). */
1337 * When using the locked AIO function (generic_file_aio_write())
1338 * testing has shown the inode mutex to be a limiting factor
1339 * with multi-threaded single shared file performance. To get
1340 * around this, we now use the lockless version. To maintain
1341 * consistency, proper locking to protect against writes,
1342 * trucates, etc. is handled in the higher layers of lustre.
1344 lock_inode = !IS_NOSEC(inode);
1345 iter = *vio->vui_iter;
1347 if (unlikely(lock_inode))
1348 ll_inode_lock(inode);
1349 result = __generic_file_write_iter(vio->vui_iocb, &iter);
1350 if (unlikely(lock_inode))
1351 ll_inode_unlock(inode);
1355 #ifdef HAVE_GENERIC_WRITE_SYNC_2ARGS
1356 result = generic_write_sync(vio->vui_iocb, result);
1361 err = generic_write_sync(vio->vui_iocb->ki_filp, pos,
1363 if (err < 0 && result > 0)
1370 result = vvp_io_write_commit(env, io);
1371 /* Simulate short commit */
1372 if (CFS_FAULT_CHECK(OBD_FAIL_LLITE_SHORT_COMMIT)) {
1373 vio->u.readwrite.vui_written >>= 1;
1374 if (vio->u.readwrite.vui_written > 0)
1375 io->ci_need_restart = 1;
1377 if (vio->u.readwrite.vui_written > 0) {
1378 result = vio->u.readwrite.vui_written;
1379 CDEBUG(D_VFSTRACE, "%s: write bytes %zd, result: %zd\n",
1380 file_dentry(file)->d_name.name,
1381 io->ci_bytes, result);
1382 io->ci_bytes += result;
1384 io->ci_continue = 0;
1387 if (vio->vui_iocb->ki_pos != (pos + io->ci_bytes - ci_bytes)) {
1389 "%s: write position mismatch: ki_pos %lld vs. pos %lld, written %zd, commit %ld: rc = %zd\n",
1390 file_dentry(file)->d_name.name,
1391 vio->vui_iocb->ki_pos, pos + io->ci_bytes - ci_bytes,
1392 written, io->ci_bytes - ci_bytes, result);
1394 * Rewind ki_pos and vui_iter to where it has
1395 * successfully committed.
1397 vio->vui_iocb->ki_pos = pos + io->ci_bytes - ci_bytes;
1399 if (result > 0 || result == -EIOCBQUEUED) {
1400 set_bit(LLIF_DATA_MODIFIED, &ll_i2info(inode)->lli_flags);
1402 if (result != -EIOCBQUEUED && result < crw_bytes)
1403 io->ci_continue = 0;
1407 if (result == -EIOCBQUEUED) {
1408 io->ci_bytes += vio->u.readwrite.vui_written;
1409 vio->vui_iocb->ki_pos = pos +
1410 vio->u.readwrite.vui_written;
1417 static void vvp_io_rw_end(const struct lu_env *env,
1418 const struct cl_io_slice *ios)
1420 struct inode *inode = vvp_object_inode(ios->cis_obj);
1421 struct ll_inode_info *lli = ll_i2info(inode);
1423 trunc_sem_up_read(&lli->lli_trunc_sem);
1426 static void vvp_io_write_end(const struct lu_env *env,
1427 const struct cl_io_slice *ios)
1429 struct inode *inode = vvp_object_inode(ios->cis_obj);
1430 struct cl_io *io = ios->cis_io;
1432 vvp_io_rw_end(env, ios);
1434 /* Update size and blocks for LSOM (best effort) */
1435 if (!io->ci_ignore_layout && cl_io_is_sync_write(io))
1436 ll_merge_attr_try(env, inode);
1440 static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
1442 struct vm_fault *vmf = cfio->ft_vmf;
1444 cfio->ft_flags = ll_filemap_fault(cfio->ft_vma, vmf);
1445 cfio->ft_flags_valid = 1;
1448 /* success, vmpage is locked */
1449 LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n",
1450 get_vmf_address(vmf));
1451 if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) {
1452 lock_page(vmf->page);
1453 cfio->ft_flags |= VM_FAULT_LOCKED;
1456 cfio->ft_vmpage = vmf->page;
1461 if (cfio->ft_flags & VM_FAULT_SIGBUS) {
1462 CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", get_vmf_address(vmf));
1466 if (cfio->ft_flags & VM_FAULT_OOM) {
1467 CDEBUG(D_PAGE, "got addr %p - OOM\n", get_vmf_address(vmf));
1471 if (cfio->ft_flags & VM_FAULT_RETRY)
1474 CERROR("unknown error in page fault %d\n", cfio->ft_flags);
1479 static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
1480 struct pagevec *pvec)
1482 vvp_set_pagevec_dirty(pvec);
1485 static int vvp_io_fault_start(const struct lu_env *env,
1486 const struct cl_io_slice *ios)
1488 struct vvp_io *vio = cl2vvp_io(env, ios);
1489 struct cl_io *io = ios->cis_io;
1490 struct cl_object *obj = io->ci_obj;
1491 struct inode *inode = vvp_object_inode(obj);
1492 struct ll_inode_info *lli = ll_i2info(inode);
1493 struct cl_fault_io *fio = &io->u.ci_fault;
1494 struct vvp_fault_io *cfio = &vio->u.fault;
1497 struct page *vmpage = NULL;
1498 struct cl_page *page;
1504 trunc_sem_down_read_nowait(&lli->lli_trunc_sem);
1506 /* offset of the last byte on the page */
1507 offset = ((fio->ft_index + 1) << PAGE_SHIFT) - 1;
1508 LASSERT((offset >> PAGE_SHIFT) == fio->ft_index);
1509 result = vvp_prep_size(env, obj, io, 0, offset + 1, NULL);
1513 /* must return locked page */
1514 if (fio->ft_mkwrite) {
1515 LASSERT(cfio->ft_vmpage != NULL);
1516 vmpage = cfio->ft_vmpage;
1519 * page was turncated and lock was cancelled, return ENODATA
1520 * so that VM_FAULT_NOPAGE will be returned to handle_mm_fault()
1521 * XXX: cannot return VM_FAULT_RETRY to vfs since we cannot
1522 * release mmap_lock and VM_FAULT_RETRY implies that the
1523 * mmap_lock is released.
1525 if (!PageUptodate(vmpage))
1526 GOTO(out, result = -ENODATA);
1528 result = vvp_io_kernel_fault(cfio);
1533 vmpage = cfio->ft_vmpage;
1534 LASSERT(PageLocked(vmpage));
1536 if (CFS_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE))
1537 generic_error_remove_page(vmpage->mapping, vmpage);
1539 size = i_size_read(inode);
1540 /* Though we have already held a cl_lock upon this page, but
1541 * it still can be truncated locally.
1543 if (unlikely((vmpage->mapping != inode->i_mapping) ||
1544 (page_offset(vmpage) > size))) {
1545 CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n");
1547 /* return +1 to stop cl_io_loop() and ll_fault() will catch
1550 GOTO(out, result = + 1);
1553 last_index = (size - 1) >> PAGE_SHIFT;
1555 if (fio->ft_mkwrite) {
1557 * Capture the size while holding the lli_trunc_sem from above
1558 * we want to make sure that we complete the mkwrite action
1559 * while holding this lock. We need to make sure that we are
1560 * not past the end of the file.
1562 if (last_index < fio->ft_index) {
1564 "llite: mkwrite and truncate race happened: %p: 0x%lx 0x%lx\n",
1565 vmpage->mapping, fio->ft_index, last_index);
1567 * We need to return if we are
1568 * passed the end of the file. This will propagate
1569 * up the call stack to ll_page_mkwrite where
1570 * we will return VM_FAULT_NOPAGE. Any non-negative
1571 * value returned here will be silently
1572 * converted to 0. If the vmpage->mapping is null
1573 * the error code would be converted back to ENODATA
1574 * in ll_page_mkwrite0. Thus we return -ENODATA
1575 * to handle both cases
1577 GOTO(out, result = -ENODATA);
1581 page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE);
1583 GOTO(out, result = PTR_ERR(page));
1585 /* if page will be written, then add this page into cache earlier. */
1586 if (fio->ft_mkwrite) {
1587 wait_on_page_writeback(vmpage);
1588 if (!PageDirty(vmpage)) {
1589 struct cl_page_list *plist = &vio->u.fault.ft_queue;
1592 /* vvp_page_assume() calls wait_on_page_writeback(). */
1593 cl_page_assume(env, io, page);
1595 cl_page_list_init(plist);
1596 cl_page_list_add(plist, page, true);
1599 if (last_index == cl_page_index(page))
1600 to = ((size - 1) & ~PAGE_MASK) + 1;
1602 /* Do not set Dirty bit here so that in case IO is
1603 * started before the page is really made dirty, we
1604 * still have chance to detect it.
1606 result = cl_io_commit_async(env, io, plist, 0, to,
1607 mkwrite_commit_callback);
1608 /* Have overquota flag, trying sync write to check
1609 * whether indeed out of quota
1611 if (result == -EDQUOT) {
1613 result = vvp_io_commit_sync(env, io,
1617 cl_page_own(env, io, page);
1618 cl_page_list_add(plist, page, true);
1619 lu_ref_add(&page->cp_reference,
1621 result = cl_io_commit_async(env, io,
1623 mkwrite_commit_callback);
1626 cl_page_put(env, page);
1630 LASSERT(cl_page_is_owned(page, io));
1631 cl_page_list_fini(env, plist);
1635 cl_page_discard(env, io, page);
1636 cl_page_disown(env, io, page);
1638 cl_page_put(env, page);
1640 /* we're in big trouble, what can we do now? */
1641 if (result == -EDQUOT)
1645 cl_page_disown(env, io, page);
1651 * The ft_index is only used in the case of mkwrite action. We need to
1652 * check our assertions are correct, since we should have caught this
1655 LASSERT(!fio->ft_mkwrite || fio->ft_index <= last_index);
1656 if (fio->ft_index == last_index)
1657 /* Last page is mapped partially. */
1658 fio->ft_bytes = size - (fio->ft_index << PAGE_SHIFT);
1660 fio->ft_bytes = PAGE_SIZE;
1662 lu_ref_add(&page->cp_reference, "fault", io);
1663 fio->ft_page = page;
1667 /* return unlocked vmpage to avoid deadlocking */
1669 unlock_page(vmpage);
1671 cfio->ft_flags &= ~VM_FAULT_LOCKED;
1676 static void vvp_io_fault_end(const struct lu_env *env,
1677 const struct cl_io_slice *ios)
1679 struct inode *inode = vvp_object_inode(ios->cis_obj);
1680 struct ll_inode_info *lli = ll_i2info(inode);
1682 CLOBINVRNT(env, ios->cis_io->ci_obj,
1683 vvp_object_invariant(ios->cis_io->ci_obj));
1684 trunc_sem_up_read(&lli->lli_trunc_sem);
1687 static int vvp_io_fsync_start(const struct lu_env *env,
1688 const struct cl_io_slice *ios)
1690 /* mark TOWRITE bit to each dirty page in radix tree to verify pages
1691 * have been written, but this is difficult because of race.
1696 static void vvp_io_fsync_end(const struct lu_env *env,
1697 const struct cl_io_slice *ios)
1699 struct inode *inode = vvp_object_inode(ios->cis_obj);
1700 struct cl_io *io = ios->cis_io;
1702 /* Update size and blocks for LSOM (best effort) */
1703 if (!io->ci_ignore_layout)
1704 ll_merge_attr_try(env, inode);
1707 static int vvp_io_read_ahead(const struct lu_env *env,
1708 const struct cl_io_slice *ios,
1709 pgoff_t start, struct cl_read_ahead *ra)
1715 if (ios->cis_io->ci_type == CIT_READ ||
1716 ios->cis_io->ci_type == CIT_FAULT) {
1717 struct vvp_io *vio = cl2vvp_io(env, ios);
1719 if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
1720 ra->cra_end_idx = CL_PAGE_EOF;
1721 result = 1; /* no need to call down */
1728 static int vvp_io_lseek_lock(const struct lu_env *env,
1729 const struct cl_io_slice *ios)
1731 struct cl_io *io = ios->cis_io;
1732 __u64 lock_start = io->u.ci_lseek.ls_start;
1733 __u64 lock_end = OBD_OBJECT_EOF;
1734 __u32 enqflags = CEF_MUST; /* always take client lock */
1736 return vvp_io_one_lock(env, io, enqflags, CLM_READ,
1737 lock_start, lock_end);
1740 static int vvp_io_lseek_start(const struct lu_env *env,
1741 const struct cl_io_slice *ios)
1743 struct cl_io *io = ios->cis_io;
1744 struct inode *inode = vvp_object_inode(io->ci_obj);
1745 __u64 start = io->u.ci_lseek.ls_start;
1747 ll_inode_lock(inode);
1748 inode_dio_wait(inode);
1750 /* At the moment we have DLM lock so just update inode
1751 * to know the file size.
1753 ll_merge_attr(env, inode);
1754 if (start >= i_size_read(inode)) {
1755 io->u.ci_lseek.ls_result = -ENXIO;
1761 static void vvp_io_lseek_end(const struct lu_env *env,
1762 const struct cl_io_slice *ios)
1764 struct cl_io *io = ios->cis_io;
1765 struct inode *inode = vvp_object_inode(io->ci_obj);
1767 if (io->u.ci_lseek.ls_result > i_size_read(inode))
1768 io->u.ci_lseek.ls_result = -ENXIO;
1770 ll_inode_unlock(inode);
1773 static const struct cl_io_operations vvp_io_ops = {
1776 .cio_fini = vvp_io_fini,
1777 .cio_iter_init = vvp_io_read_iter_init,
1778 .cio_lock = vvp_io_read_lock,
1779 .cio_start = vvp_io_read_start,
1780 .cio_end = vvp_io_rw_end,
1781 .cio_advance = vvp_io_advance,
1784 .cio_fini = vvp_io_fini,
1785 .cio_iter_init = vvp_io_write_iter_init,
1786 .cio_iter_fini = vvp_io_write_iter_fini,
1787 .cio_lock = vvp_io_write_lock,
1788 .cio_start = vvp_io_write_start,
1789 .cio_end = vvp_io_write_end,
1790 .cio_advance = vvp_io_advance,
1793 .cio_fini = vvp_io_setattr_fini,
1794 .cio_iter_init = vvp_io_setattr_iter_init,
1795 .cio_lock = vvp_io_setattr_lock,
1796 .cio_start = vvp_io_setattr_start,
1797 .cio_end = vvp_io_setattr_end
1800 .cio_fini = vvp_io_fault_fini,
1801 .cio_iter_init = vvp_io_fault_iter_init,
1802 .cio_lock = vvp_io_fault_lock,
1803 .cio_start = vvp_io_fault_start,
1804 .cio_end = vvp_io_fault_end,
1807 .cio_start = vvp_io_fsync_start,
1808 .cio_fini = vvp_io_fini,
1809 .cio_end = vvp_io_fsync_end,
1812 .cio_fini = vvp_io_fini
1815 .cio_fini = vvp_io_fini
1818 .cio_fini = vvp_io_fini
1821 .cio_fini = vvp_io_fini,
1822 .cio_lock = vvp_io_lseek_lock,
1823 .cio_start = vvp_io_lseek_start,
1824 .cio_end = vvp_io_lseek_end,
1827 .cio_read_ahead = vvp_io_read_ahead
1830 int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
1833 struct vvp_io *vio = vvp_env_io(env);
1834 struct inode *inode = vvp_object_inode(obj);
1837 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
1840 CDEBUG(D_VFSTRACE, DFID" ignore/verify layout %d/%d, layout version %d restore needed %d\n",
1841 PFID(lu_object_fid(&obj->co_lu)),
1842 io->ci_ignore_layout, io->ci_verify_layout,
1843 vio->vui_layout_gen, io->ci_restore_needed);
1845 CL_IO_SLICE_CLEAN(vio, vui_cl);
1846 cl_io_slice_add(io, &vio->vui_cl, obj, &vvp_io_ops);
1847 vio->vui_ra_valid = false;
1849 if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
1851 struct ll_inode_info *lli = ll_i2info(inode);
1853 bytes = io->u.ci_rw.crw_bytes;
1854 /* "If nbyte is 0, read() will return 0 and have no other
1855 * results." -- Single Unix Spec
1860 vio->vui_tot_bytes = bytes;
1862 /* for read/write, we store the process jobid/gid/uid in the
1863 * inode, and it'll be fetched by osc when building RPC.
1865 * it's not accurate if the file is shared by different
1868 lustre_get_jobid(lli->lli_jobid, sizeof(lli->lli_jobid));
1869 lli->lli_uid = from_kuid(&init_user_ns, current_uid());
1870 lli->lli_gid = from_kgid(&init_user_ns, current_gid());
1871 } else if (io->ci_type == CIT_SETATTR) {
1872 if (!cl_io_is_trunc(io))
1873 io->ci_lockreq = CILR_MANDATORY;
1876 /* Enqueue layout lock and get layout version. We need to do this
1877 * even for operations requiring to open file, such as read and write,
1878 * because it might not grant layout lock in IT_OPEN.
1880 if (result == 0 && !io->ci_ignore_layout) {
1881 result = ll_layout_refresh(inode, &vio->vui_layout_gen);
1882 if (result == -ENOENT)
1883 /* If the inode on MDS has been removed, but the objects
1884 * on OSTs haven't been destroyed (async unlink), layout
1885 * fetch will return -ENOENT, we'd ingore this error
1886 * and continue with dirty flush. LU-3230.
1890 CERROR("%s: refresh file layout " DFID " error %d.\n",
1891 ll_i2sbi(inode)->ll_fsname,
1892 PFID(lu_object_fid(&obj->co_lu)), result);
1895 #ifdef HAVE_INVALIDATE_LOCK
1896 if (io->ci_invalidate_page_cache)
1897 filemap_invalidate_lock(inode->i_mapping);
1898 #endif /* HAVE_INVALIDATE_LOCK */
1900 io->ci_result = result < 0 ? result : 0;