4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * Implementation of cl_io for VVP layer.
33 * Author: Nikita Danilov <nikita.danilov@sun.com>
34 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
37 #define DEBUG_SUBSYSTEM S_LLITE
40 #include <linux/pagevec.h>
41 #include <linux/memcontrol.h>
42 #include <linux/falloc.h>
44 #include "llite_internal.h"
45 #include "vvp_internal.h"
46 #include <lustre_compat.h>
47 #include <libcfs/linux/linux-misc.h>
49 static struct vvp_io *cl2vvp_io(const struct lu_env *env,
50 const struct cl_io_slice *slice)
54 vio = container_of(slice, struct vvp_io, vui_cl);
55 LASSERT(vio == vvp_env_io(env));
60 /* For swapping layout. The file's layout may have changed.
61 * To avoid populating pages to a wrong stripe, we have to verify the
62 * correctness of layout. It works because swapping layout processes
63 * have to acquire group lock.
65 static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
68 struct ll_inode_info *lli = ll_i2info(inode);
69 struct vvp_io *vio = vvp_env_io(env);
72 switch (io->ci_type) {
75 /* don't need lock here to check lli_layout_gen as we have held
76 * extent lock and GROUP lock has to hold to swap layout
78 if (ll_layout_version_get(lli) != vio->vui_layout_gen ||
79 CFS_FAIL_CHECK_RESET(OBD_FAIL_LLITE_LOST_LAYOUT, 0)) {
80 io->ci_need_restart = 1;
81 /* this will cause a short read/write */
86 /* fault is okay because we've already had a page. */
94 static void vvp_object_size_lock(struct cl_object *obj)
96 struct inode *inode = vvp_object_inode(obj);
98 ll_inode_size_lock(inode);
99 cl_object_attr_lock(obj);
102 static void vvp_object_size_unlock(struct cl_object *obj)
104 struct inode *inode = vvp_object_inode(obj);
106 cl_object_attr_unlock(obj);
107 ll_inode_size_unlock(inode);
111 * Helper function that if necessary adjusts file size (inode->i_size), when
112 * position at the offset \a pos is accessed. File size can be arbitrary stale
113 * on a Lustre client, but client at least knows KMS. If accessed area is
114 * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
116 * Locking: i_size_lock is used to serialize changes to inode size and to
117 * protect consistency between inode size and cl_object
118 * attributes. cl_object_size_lock() protects consistency between cl_attr's of
119 * top-object and sub-objects.
121 static int vvp_prep_size(const struct lu_env *env, struct cl_object *obj,
122 struct cl_io *io, loff_t start, size_t bytes,
125 struct cl_attr *attr = vvp_env_thread_attr(env);
126 struct inode *inode = vvp_object_inode(obj);
127 loff_t pos = start + bytes - 1;
132 * Consistency guarantees: following possibilities exist for the
133 * relation between region being accessed and real file size at this
136 * (A): the region is completely inside of the file;
138 * (B-x): x bytes of region are inside of the file, the rest is
141 * (C): the region is completely outside of the file.
143 * This classification is stable under DLM lock already acquired by
144 * the caller, because to change the class, other client has to take
145 * DLM lock conflicting with our lock. Also, any updates to ->i_size
146 * by other threads on this client are serialized by
147 * ll_inode_size_lock(). This guarantees that short reads are handled
148 * correctly in the face of concurrent writes and truncates.
150 vvp_object_size_lock(obj);
151 result = cl_object_attr_get(env, obj, attr);
154 if (pos > kms || !attr->cat_kms_valid) {
156 * A glimpse is necessary to determine whether we
157 * return a short read (B) or some zeroes at the end
160 vvp_object_size_unlock(obj);
161 result = cl_glimpse_lock(env, io, inode, obj, 0);
162 if (result == 0 && exceed != NULL) {
163 /* If objective page index exceed end-of-file
164 * page index, return directly. Do not expect
165 * kernel will check such case correctly.
167 loff_t size = i_size_read(inode);
168 unsigned long cur_index = start >>
171 if ((size == 0 && cur_index != 0) ||
172 (((size - 1) >> PAGE_SHIFT) <
180 * region is within kms and, hence, within real file
181 * size (A). We need to increase i_size to cover the
182 * read region so that generic_file_read() will do its
183 * job, but that doesn't mean the kms size is
184 * _correct_, it is only the _minimum_ size. If
185 * someone does a stat they will get the correct size
186 * which will always be >= the kms value here.
189 if (i_size_read(inode) < kms) {
190 i_size_write(inode, kms);
192 DFID" updating i_size %llu\n",
193 PFID(lu_object_fid(&obj->co_lu)),
194 (__u64)i_size_read(inode));
198 vvp_object_size_unlock(obj);
204 static int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
205 __u32 enqflags, enum cl_lock_mode mode,
206 pgoff_t start, pgoff_t end)
208 struct vvp_io *vio = vvp_env_io(env);
209 struct cl_lock_descr *descr = &vio->vui_link.cill_descr;
210 struct cl_object *obj = io->ci_obj;
212 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
215 CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
217 memset(&vio->vui_link, 0, sizeof(vio->vui_link));
219 if (vio->vui_fd && (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
220 descr->cld_mode = CLM_GROUP;
221 descr->cld_gid = vio->vui_fd->fd_grouplock.lg_gid;
222 enqflags |= CEF_LOCK_MATCH;
224 descr->cld_mode = mode;
227 descr->cld_obj = obj;
228 descr->cld_start = start;
229 descr->cld_end = end;
230 descr->cld_enq_flags = enqflags;
232 cl_io_lock_add(env, io, &vio->vui_link);
237 static int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io,
238 __u32 enqflags, enum cl_lock_mode mode,
239 loff_t start, loff_t end)
241 return vvp_io_one_lock_index(env, io, enqflags, mode,
242 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
245 static int vvp_io_write_iter_init(const struct lu_env *env,
246 const struct cl_io_slice *ios)
248 struct vvp_io *vio = cl2vvp_io(env, ios);
250 cl_page_list_init(&vio->u.readwrite.vui_queue);
251 vio->u.readwrite.vui_written = 0;
252 vio->u.readwrite.vui_from = 0;
253 vio->u.readwrite.vui_to = PAGE_SIZE;
258 static int vvp_io_read_iter_init(const struct lu_env *env,
259 const struct cl_io_slice *ios)
261 struct vvp_io *vio = cl2vvp_io(env, ios);
263 vio->u.readwrite.vui_read = 0;
268 static void vvp_io_write_iter_fini(const struct lu_env *env,
269 const struct cl_io_slice *ios)
271 struct vvp_io *vio = cl2vvp_io(env, ios);
273 LASSERT(vio->u.readwrite.vui_queue.pl_nr == 0);
276 static int vvp_io_fault_iter_init(const struct lu_env *env,
277 const struct cl_io_slice *ios)
279 struct vvp_io *vio = cl2vvp_io(env, ios);
280 struct inode *inode = vvp_object_inode(ios->cis_obj);
282 LASSERT(inode == file_inode(vio->vui_fd->fd_file));
287 static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
289 struct cl_io *io = ios->cis_io;
290 struct cl_object *obj = io->ci_obj;
291 struct vvp_io *vio = cl2vvp_io(env, ios);
292 struct inode *inode = vvp_object_inode(obj);
298 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
300 CDEBUG(D_VFSTRACE, DFID" ignore/verify layout %d/%d, layout version %d need write layout %d, restore needed %d, invalidate_lock %d\n",
301 PFID(lu_object_fid(&obj->co_lu)),
302 io->ci_ignore_layout, io->ci_verify_layout,
303 vio->vui_layout_gen, io->ci_need_write_intent,
304 io->ci_restore_needed, io->ci_invalidate_page_cache);
306 #ifdef HAVE_INVALIDATE_LOCK
307 if (io->ci_invalidate_page_cache) {
308 filemap_invalidate_unlock(inode->i_mapping);
309 io->ci_invalidate_page_cache = 0;
311 #endif /* HAVE_INVALIDATE_LOCK */
313 if (io->ci_restore_needed) {
314 /* file was detected release, we need to restore it
315 * before finishing the io
317 rc = ll_layout_restore(inode, 0, OBD_OBJECT_EOF);
318 /* if restore registration failed, no restart,
319 * we will return -ENODATA
321 * The layout will change after restore, so we need to
322 * block on layout lock held by the MDT
323 * as MDT will not send new layout in lvb (see LU-3124)
324 * we have to explicitly fetch it, all this will be done
325 * by ll_layout_refresh().
326 * Even if ll_layout_restore() returns zero, it doesn't mean
327 * that restore has been successful. Therefore it sets
328 * ci_verify_layout so that it will check layout at the end
332 io->ci_restore_needed = 1;
333 io->ci_need_restart = 0;
334 io->ci_verify_layout = 0;
339 io->ci_restore_needed = 0;
341 /* Even if ll_layout_restore() returns zero, it doesn't mean
342 * that restore has been successful. Therefore it should verify
343 * if there was layout change and restart I/O correspondingly.
345 ll_layout_refresh(inode, &gen);
346 io->ci_need_restart = vio->vui_layout_gen != gen;
347 if (io->ci_need_restart) {
349 DFID" layout changed from %d to %d.\n",
350 PFID(lu_object_fid(&obj->co_lu)),
351 vio->vui_layout_gen, gen);
352 /* today successful restore is the only possible case */
353 /* restore was done, clear restoring state */
354 clear_bit(LLIF_FILE_RESTORING,
355 &ll_i2info(vvp_object_inode(obj))->lli_flags);
360 /* dynamic layout change needed, send layout intent RPC. */
361 if (io->ci_need_write_intent || io->ci_need_pccro_clear) {
362 enum layout_intent_opc opc = LAYOUT_INTENT_WRITE;
364 io->ci_need_write_intent = 0;
366 LASSERT(io->ci_type == CIT_WRITE || cl_io_is_fallocate(io) ||
367 cl_io_is_trunc(io) || cl_io_is_mkwrite(io));
369 CDEBUG(D_VFSTRACE, DFID" write layout, type %u "DEXT"\n",
370 PFID(lu_object_fid(&obj->co_lu)), io->ci_type,
371 PEXT(&io->ci_write_intent));
373 if (cl_io_is_trunc(io))
374 opc = LAYOUT_INTENT_TRUNC;
376 if (io->ci_need_pccro_clear) {
377 io->ci_need_pccro_clear = 0;
378 opc = LAYOUT_INTENT_PCCRO_CLEAR;
381 rc = ll_layout_write_intent(inode, opc, &io->ci_write_intent);
384 io->ci_need_restart = 1;
388 if (!io->ci_need_restart &&
389 !io->ci_ignore_layout && io->ci_verify_layout) {
390 /* check layout version */
391 ll_layout_refresh(inode, &gen);
392 io->ci_need_restart = vio->vui_layout_gen != gen;
393 if (io->ci_need_restart) {
395 DFID" layout changed from %d to %d.\n",
396 PFID(lu_object_fid(&obj->co_lu)),
397 vio->vui_layout_gen, gen);
405 static void vvp_io_fault_fini(const struct lu_env *env,
406 const struct cl_io_slice *ios)
408 struct cl_io *io = ios->cis_io;
409 struct cl_page *page = io->u.ci_fault.ft_page;
411 CLOBINVRNT(env, io->ci_obj, vvp_object_invariant(io->ci_obj));
414 cl_page_put(env, page);
415 io->u.ci_fault.ft_page = NULL;
417 vvp_io_fini(env, ios);
420 static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
422 /* we only want to hold PW locks if the mmap() can generate
423 * writes back to the file and that only happens in shared
426 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
431 static int vvp_mmap_locks(const struct lu_env *env,
432 struct vvp_io *vio, struct cl_io *io)
434 struct vvp_thread_info *vti = vvp_env_info(env);
435 struct mm_struct *mm = current->mm;
436 struct vm_area_struct *vma;
437 struct cl_lock_descr *descr = &vti->vti_descr;
438 union ldlm_policy_data policy;
447 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
449 /* nfs or loop back device write */
450 if (vio->vui_iter == NULL)
453 /* No MM (e.g. NFS)? No vmas too. */
457 if (!iter_is_iovec(vio->vui_iter) && !iov_iter_is_kvec(vio->vui_iter))
460 for (i = *vio->vui_iter;
462 iov_iter_advance(&i, iov.iov_len)) {
463 iov = iov_iter_iovec(&i);
464 addr = (unsigned long)iov.iov_base;
470 bytes += addr & ~PAGE_MASK;
474 while ((vma = our_vma(mm, addr, bytes)) != NULL) {
475 struct dentry *de = file_dentry(vma->vm_file);
476 struct inode *inode = de->d_inode;
477 int flags = CEF_MUST;
479 if (ll_file_nolock(vma->vm_file)) {
480 /* For no lock case is not allowed for mmap */
486 * XXX: Required lock mode can be weakened: CIT_WRITE
487 * io only ever reads user level buffer, and CIT_READ
490 policy_from_vma(&policy, vma, addr, bytes);
491 descr->cld_mode = vvp_mode_from_vma(vma);
492 descr->cld_obj = ll_i2info(inode)->lli_clob;
493 descr->cld_start = policy.l_extent.start >> PAGE_SHIFT;
494 descr->cld_end = policy.l_extent.end >> PAGE_SHIFT;
495 descr->cld_enq_flags = flags;
496 result = cl_io_lock_alloc_add(env, io, descr);
498 CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
499 descr->cld_mode, descr->cld_start,
505 if (vma->vm_end - addr >= bytes)
508 bytes -= vma->vm_end - addr;
511 mmap_read_unlock(mm);
518 static void vvp_io_advance(const struct lu_env *env,
519 const struct cl_io_slice *ios, size_t bytes)
521 struct cl_object *obj = ios->cis_io->ci_obj;
522 struct vvp_io *vio = cl2vvp_io(env, ios);
524 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
527 * Since 3.16(26978b8b4) vfs revert iov iter to
528 * original position even io succeed, so instead
529 * of relying on VFS, we move iov iter by ourselves.
531 iov_iter_advance(vio->vui_iter, bytes);
532 CDEBUG(D_VFSTRACE, "advancing %ld bytes\n", bytes);
533 vio->vui_tot_bytes -= bytes;
534 iov_iter_reexpand(vio->vui_iter, vio->vui_tot_bytes);
537 static void vvp_io_update_iov(const struct lu_env *env,
538 struct vvp_io *vio, struct cl_io *io)
540 size_t size = io->u.ci_rw.crw_bytes;
545 iov_iter_truncate(vio->vui_iter, size);
548 static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
549 enum cl_lock_mode mode, loff_t start, loff_t end)
551 struct vvp_io *vio = vvp_env_io(env);
555 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
558 vvp_io_update_iov(env, vio, io);
560 if (io->u.ci_rw.crw_nonblock)
561 ast_flags |= CEF_NONBLOCK;
562 if (io->ci_lock_no_expand)
563 ast_flags |= CEF_LOCK_NO_EXPAND;
567 /* Group lock held means no lockless any more */
568 if (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
571 flags = iocb_ki_flags_get(vio->vui_iocb->ki_filp,
573 if (ll_file_nolock(vio->vui_fd->fd_file) ||
574 (iocb_ki_flags_check(flags, DIRECT) &&
576 ast_flags |= CEF_NEVER;
579 result = vvp_mmap_locks(env, vio, io);
581 result = vvp_io_one_lock(env, io, ast_flags, mode, start, end);
586 static int vvp_io_read_lock(const struct lu_env *env,
587 const struct cl_io_slice *ios)
589 struct cl_io *io = ios->cis_io;
590 struct cl_io_rw_common *rd = &io->u.ci_rd.rd;
594 result = vvp_io_rw_lock(env, io, CLM_READ, rd->crw_pos,
595 rd->crw_pos + rd->crw_bytes - 1);
599 static int vvp_io_fault_lock(const struct lu_env *env,
600 const struct cl_io_slice *ios)
602 struct cl_io *io = ios->cis_io;
603 struct vvp_io *vio = cl2vvp_io(env, ios);
604 /* XXX LDLM_FL_CBPENDING */
605 return vvp_io_one_lock_index(env,
607 vvp_mode_from_vma(vio->u.fault.ft_vma),
608 io->u.ci_fault.ft_index,
609 io->u.ci_fault.ft_index);
612 static int vvp_io_write_lock(const struct lu_env *env,
613 const struct cl_io_slice *ios)
615 struct cl_io *io = ios->cis_io;
619 if (io->u.ci_wr.wr_append) {
621 end = OBD_OBJECT_EOF;
623 start = io->u.ci_wr.wr.crw_pos;
624 end = start + io->u.ci_wr.wr.crw_bytes - 1;
627 RETURN(vvp_io_rw_lock(env, io, CLM_WRITE, start, end));
630 static int vvp_io_setattr_iter_init(const struct lu_env *env,
631 const struct cl_io_slice *ios)
638 * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io.
640 * Handles "lockless io" mode when extent locking is done by server.
642 static int vvp_io_setattr_lock(const struct lu_env *env,
643 const struct cl_io_slice *ios)
645 struct cl_io *io = ios->cis_io;
646 __u64 lock_start = 0;
647 __u64 lock_end = OBD_OBJECT_EOF;
650 if (cl_io_is_trunc(io)) {
651 struct inode *inode = vvp_object_inode(io->ci_obj);
653 /* set enqueue flags to CEF_MUST in case of encrypted file,
654 * to prevent lockless truncate
656 if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode))
658 else if (io->u.ci_setattr.sa_attr.lvb_size == 0)
659 enqflags = CEF_DISCARD_DATA;
660 } else if (cl_io_is_fallocate(io)) {
661 lock_start = io->u.ci_setattr.sa_falloc_offset;
662 lock_end = io->u.ci_setattr.sa_falloc_end - 1;
664 unsigned int valid = io->u.ci_setattr.sa_avalid;
666 if (!(valid & TIMES_SET_FLAGS))
669 if ((!(valid & ATTR_MTIME) ||
670 io->u.ci_setattr.sa_attr.lvb_mtime >=
671 io->u.ci_setattr.sa_attr.lvb_ctime) &&
672 (!(valid & ATTR_ATIME) ||
673 io->u.ci_setattr.sa_attr.lvb_atime >=
674 io->u.ci_setattr.sa_attr.lvb_ctime))
678 return vvp_io_one_lock(env, io, enqflags, CLM_WRITE,
679 lock_start, lock_end);
682 static int vvp_do_vmtruncate(struct inode *inode, size_t size)
686 /* Only ll_inode_size_lock is taken at this level. */
687 ll_inode_size_lock(inode);
688 result = inode_newsize_ok(inode, size);
690 ll_inode_size_unlock(inode);
693 i_size_write(inode, size);
695 ll_truncate_pagecache(inode, size);
696 ll_inode_size_unlock(inode);
700 static int vvp_io_setattr_time(const struct lu_env *env,
701 const struct cl_io_slice *ios)
703 struct cl_io *io = ios->cis_io;
704 struct cl_object *obj = io->ci_obj;
705 struct cl_attr *attr = vvp_env_thread_attr(env);
707 unsigned int valid = CAT_CTIME;
709 cl_object_attr_lock(obj);
710 attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime;
711 if (io->u.ci_setattr.sa_avalid & ATTR_ATIME_SET) {
712 attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime;
715 if (io->u.ci_setattr.sa_avalid & ATTR_MTIME_SET) {
716 attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime;
719 result = cl_object_attr_update(env, obj, attr, valid);
720 cl_object_attr_unlock(obj);
725 static int vvp_io_setattr_start(const struct lu_env *env,
726 const struct cl_io_slice *ios)
728 struct cl_io *io = ios->cis_io;
729 struct inode *inode = vvp_object_inode(io->ci_obj);
730 struct ll_inode_info *lli = ll_i2info(inode);
731 int mode = io->u.ci_setattr.sa_falloc_mode;
733 if (cl_io_is_trunc(io)) {
734 trunc_sem_down_write(&lli->lli_trunc_sem);
735 mutex_lock(&lli->lli_setattr_mutex);
736 inode_dio_wait(inode);
737 } else if (cl_io_is_fallocate(io)) {
740 trunc_sem_down_write(&lli->lli_trunc_sem);
741 mutex_lock(&lli->lli_setattr_mutex);
742 inode_dio_wait(inode);
744 ll_merge_attr(env, inode);
745 size = i_size_read(inode);
746 if (io->u.ci_setattr.sa_falloc_end > size &&
747 !(mode & FALLOC_FL_KEEP_SIZE)) {
748 size = io->u.ci_setattr.sa_falloc_end;
749 io->u.ci_setattr.sa_avalid |= ATTR_SIZE;
751 io->u.ci_setattr.sa_attr.lvb_size = size;
753 mutex_lock(&lli->lli_setattr_mutex);
756 if (io->u.ci_setattr.sa_avalid & TIMES_SET_FLAGS)
757 return vvp_io_setattr_time(env, ios);
762 static void vvp_io_setattr_end(const struct lu_env *env,
763 const struct cl_io_slice *ios)
765 struct cl_io *io = ios->cis_io;
766 struct inode *inode = vvp_object_inode(io->ci_obj);
767 struct ll_inode_info *lli = ll_i2info(inode);
768 loff_t size = io->u.ci_setattr.sa_attr.lvb_size;
770 if (cl_io_is_trunc(io)) {
771 /* Truncate in memory pages - they must be clean pages
772 * because osc has already notified to destroy osc_extents.
774 vvp_do_vmtruncate(inode, size);
775 mutex_unlock(&lli->lli_setattr_mutex);
776 trunc_sem_up_write(&lli->lli_trunc_sem);
778 /* Update size and blocks for LSOM */
779 if (!io->ci_ignore_layout)
780 ll_merge_attr(env, inode);
781 } else if (cl_io_is_fallocate(io)) {
782 int mode = io->u.ci_setattr.sa_falloc_mode;
784 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
785 size > i_size_read(inode)) {
786 ll_inode_size_lock(inode);
787 i_size_write(inode, size);
788 ll_inode_size_unlock(inode);
790 inode_set_ctime_current(inode);
791 mutex_unlock(&lli->lli_setattr_mutex);
792 trunc_sem_up_write(&lli->lli_trunc_sem);
794 mutex_unlock(&lli->lli_setattr_mutex);
798 static void vvp_io_setattr_fini(const struct lu_env *env,
799 const struct cl_io_slice *ios)
801 bool restore_needed = ios->cis_io->ci_restore_needed;
802 struct inode *inode = vvp_object_inode(ios->cis_obj);
804 vvp_io_fini(env, ios);
806 if (restore_needed && !ios->cis_io->ci_restore_needed) {
807 /* restore finished, set data modified flag for HSM */
808 set_bit(LLIF_DATA_MODIFIED, &ll_i2info(inode)->lli_flags);
812 static int vvp_io_read_start(const struct lu_env *env,
813 const struct cl_io_slice *ios)
815 struct vvp_io *vio = cl2vvp_io(env, ios);
816 struct cl_io *io = ios->cis_io;
817 struct cl_object *obj = io->ci_obj;
818 struct inode *inode = vvp_object_inode(obj);
819 struct ll_inode_info *lli = ll_i2info(inode);
820 struct file *file = vio->vui_fd->fd_file;
821 loff_t pos = io->u.ci_rd.rd.crw_pos;
822 size_t crw_bytes = io->u.ci_rd.rd.crw_bytes;
823 size_t tot_bytes = vio->vui_tot_bytes;
824 struct ll_cl_context *lcc;
828 int total_bytes_read = 0;
829 struct iov_iter iter;
835 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
837 CDEBUG(D_VFSTRACE, "%s: read [%llu, %llu)\n",
838 file_dentry(file)->d_name.name,
839 pos, pos + crw_bytes);
841 trunc_sem_down_read(&lli->lli_trunc_sem);
843 if (io->ci_async_readahead) {
848 if (!can_populate_pages(env, io, inode))
851 flags = iocb_ki_flags_get(file, vio->vui_iocb);
852 if (!iocb_ki_flags_check(flags, DIRECT)) {
853 result = cl_io_lru_reserve(env, io, pos, crw_bytes);
858 /* Unless this is reading a sparse file, otherwise the lock has already
859 * been acquired so vvp_prep_size() is an empty op.
861 result = vvp_prep_size(env, obj, io, pos, crw_bytes, &exceed);
864 else if (exceed != 0)
867 LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
868 "Read ino %lu, %zu bytes, offset %lld, size %llu\n",
869 inode->i_ino, crw_bytes, pos, i_size_read(inode));
871 /* initialize read-ahead window once per syscall */
872 if (!vio->vui_ra_valid) {
873 vio->vui_ra_valid = true;
874 vio->vui_ra_start_idx = pos >> PAGE_SHIFT;
875 vio->vui_ra_pages = 0;
876 page_offset = pos & ~PAGE_MASK;
879 if (tot_bytes > PAGE_SIZE - page_offset)
880 tot_bytes -= (PAGE_SIZE - page_offset);
884 vio->vui_ra_pages += (tot_bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
886 CDEBUG(D_READA, "tot %zu, ra_start %lu, ra_count %lu\n",
887 vio->vui_tot_bytes, vio->vui_ra_start_idx,
893 LASSERT(vio->vui_iocb->ki_pos == pos);
894 iter = *vio->vui_iter;
896 lcc = ll_cl_find(inode);
897 lcc->lcc_end_index = DIV_ROUND_UP(pos + iter.count, PAGE_SIZE);
898 CDEBUG(D_VFSTRACE, "count:%ld iocb pos:%lld\n", iter.count, pos);
900 /* this seqlock lets us notice if a page has been deleted on this inode
901 * during the fault process, allowing us to catch an erroneous short
902 * read or EIO. See LU-16160
905 seq = read_seqbegin(&ll_i2info(inode)->lli_page_inv_lock);
906 result = generic_file_read_iter(vio->vui_iocb, &iter);
908 io->ci_bytes += result;
909 total_bytes_read += result;
911 /* got a short read or -EIO and we raced with page invalidation retry */
912 } while (read_seqretry(&ll_i2info(inode)->lli_page_inv_lock, seq) &&
913 ((result >= 0 && iov_iter_count(&iter) > 0)
918 if (total_bytes_read < crw_bytes)
921 } else if (result == -EIOCBQUEUED) {
922 io->ci_bytes += vio->u.readwrite.vui_read;
923 vio->vui_iocb->ki_pos = pos + vio->u.readwrite.vui_read;
929 static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
930 struct cl_page_list *plist, int from, int to)
932 struct cl_2queue *queue = &io->ci_queue;
933 struct cl_page *page;
934 unsigned int bytes = 0;
939 if (plist->pl_nr == 0)
942 if (from > 0 || to != PAGE_SIZE) {
943 page = cl_page_list_first(plist);
944 if (plist->pl_nr == 1) {
945 cl_page_clip(env, page, from, to);
948 cl_page_clip(env, page, from, PAGE_SIZE);
949 if (to != PAGE_SIZE) {
950 page = cl_page_list_last(plist);
951 cl_page_clip(env, page, 0, to);
956 cl_2queue_init(queue);
957 cl_page_list_splice(plist, &queue->c2_qin);
958 rc = cl_io_submit_sync(env, io, CRT_WRITE, queue, 0);
960 /* plist is not sorted any more */
961 cl_page_list_splice(&queue->c2_qin, plist);
962 cl_page_list_splice(&queue->c2_qout, plist);
963 cl_2queue_fini(env, queue);
966 /* calculate bytes */
967 bytes = plist->pl_nr << PAGE_SHIFT;
968 bytes -= from + PAGE_SIZE - to;
970 while (plist->pl_nr > 0) {
971 page = cl_page_list_first(plist);
972 cl_page_list_del(env, plist, page, true);
974 cl_page_clip(env, page, 0, PAGE_SIZE);
976 SetPageUptodate(cl_page_vmpage(page));
977 cl_page_disown(env, io, page);
979 /* held in ll_cl_init() */
980 cl_page_put(env, page);
984 RETURN(bytes > 0 ? bytes : rc);
988 * From kernel v4.19-rc5-248-g9b89a0355144 use XArrary
989 * Prior kernels use radix_tree for tags
991 static inline void ll_page_tag_dirty(struct page *page,
992 struct address_space *mapping)
994 #ifndef HAVE_RADIX_TREE_TAG_SET
995 __xa_set_mark(&mapping->i_pages, page_index(page), PAGECACHE_TAG_DIRTY);
997 radix_tree_tag_set(&mapping->page_tree, page_index(page),
998 PAGECACHE_TAG_DIRTY);
1003 * Kernels 4.2 - 4.5 pass memcg argument to account_page_dirtied()
1004 * Kernel v5.2-5678-gac1c3e4 no longer exports account_page_dirtied
1006 static inline void ll_account_page_dirtied(struct page *page,
1007 struct address_space *mapping)
1009 #ifdef HAVE_ACCOUNT_PAGE_DIRTIED_3ARGS
1010 struct mem_cgroup *memcg = mem_cgroup_begin_page_stat(page);
1012 account_page_dirtied(page, mapping, memcg);
1013 mem_cgroup_end_page_stat(memcg);
1014 #elif defined(HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT)
1015 account_page_dirtied(page, mapping);
1017 vvp_account_page_dirtied(page, mapping);
1019 ll_page_tag_dirty(page, mapping);
1022 /* Taken from kernel set_page_dirty, __set_page_dirty_nobuffers
1023 * Last change to this area: b93b016313b3ba8003c3b8bb71f569af91f19fc7
1025 * Current with Linus tip of tree (7/13/2019):
1026 * v5.2-rc4-224-ge01e060fe0
1028 * Backwards compat for 3.x, 5.x kernels relating to memcg handling
1029 * & rename of radix tree to xarray.
1031 static void vvp_set_batch_dirty(struct folio_batch *fbatch)
1033 struct page *page = fbatch_at_pg(fbatch, 0, 0);
1034 int count = folio_batch_count(fbatch);
1036 #if !defined(HAVE_FOLIO_BATCH) || defined(HAVE_KALLSYMS_LOOKUP_NAME)
1039 #ifdef HAVE_KALLSYMS_LOOKUP_NAME
1040 struct address_space *mapping = page->mapping;
1041 unsigned long flags;
1042 unsigned long skip_pages = 0;
1049 BUILD_BUG_ON(PAGEVEC_SIZE > BITS_PER_LONG);
1050 LASSERTF(page->mapping,
1051 "mapping must be set. page %px, page->private (cl_page) %px\n",
1052 page, (void *) page->private);
1055 * kernels without HAVE_KALLSYMS_LOOKUP_NAME also don't have
1056 * account_dirty_page exported, and if we can't access that symbol,
1057 * we can't do page dirtying in batch (taking the xarray lock only once)
1058 * so we just fall back to a looped call to __set_page_dirty_nobuffers
1060 #ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT
1061 if (!vvp_account_page_dirtied) {
1062 for (i = 0; i < count; i++) {
1063 #ifdef HAVE_FOLIO_BATCH
1064 filemap_dirty_folio(page->mapping, fbatch->folios[i]);
1066 npgs = fbatch_at_npgs(fbatch, i);
1067 for (pg = 0; pg < npgs; pg++) {
1068 page = fbatch_at_pg(fbatch, i, pg);
1069 __set_page_dirty_nobuffers(page);
1077 /* account_page_dirtied is available directly or via kallsyms */
1078 #ifdef HAVE_KALLSYMS_LOOKUP_NAME
1079 for (pgno = i = 0; i < count; i++) {
1080 npgs = fbatch_at_npgs(fbatch, i);
1081 for (pg = 0; pg < npgs; pg++) {
1082 page = fbatch_at_pg(fbatch, i, pg);
1084 ClearPageReclaim(page);
1086 vvp_lock_page_memcg(page);
1087 if (TestSetPageDirty(page)) {
1088 /* page is already dirty .. no extra work needed
1089 * set a flag for the i'th page to be skipped
1091 vvp_unlock_page_memcg(page);
1092 skip_pages |= (1ul << pgno++);
1093 LASSERTF(pgno <= BITS_PER_LONG,
1094 "Limit exceeded pgno: %d/%d\n", pgno,
1100 ll_xa_lock_irqsave(&mapping->i_pages, flags);
1102 /* Notes on differences with __set_page_dirty_nobuffers:
1103 * 1. We don't need to call page_mapping because we know this is a page
1105 * 2. We have the pages locked, so there is no need for the careful
1106 * mapping/mapping2 dance.
1107 * 3. No mapping is impossible. (Race w/truncate mentioned in
1108 * dirty_nobuffers should be impossible because we hold the page lock.)
1109 * 4. All mappings are the same because i/o is only to one file.
1111 for (pgno = i = 0; i < count; i++) {
1112 npgs = fbatch_at_npgs(fbatch, f);
1113 for (pg = 0; pg < npgs; pg++) {
1114 page = fbatch_at_pg(fbatch, i, pg);
1115 /* if the i'th page was unlocked above, skip it here */
1116 if ((skip_pages >> pgno++) & 1)
1119 LASSERTF(page->mapping == mapping,
1120 "all pages must have the same mapping. page %px, mapping %px, first mapping %px\n",
1121 page, page->mapping, mapping);
1122 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
1123 ll_account_page_dirtied(page, mapping);
1125 vvp_unlock_page_memcg(page);
1128 ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
1130 CDEBUG(D_VFSTRACE, "mapping %p, count %d, dirtied %d\n", mapping,
1133 if (mapping->host && dirtied) {
1134 /* !PageAnon && !swapper_space */
1135 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1141 static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
1142 struct folio_batch *fbatch)
1144 struct page *vmpage;
1145 struct cl_page *page;
1152 count = folio_batch_count(fbatch);
1155 for (i = 0; i < count; i++) {
1156 npgs = fbatch_at_npgs(fbatch, i);
1157 for (pg = 0; pg < npgs; pg++)
1158 SetPageUptodate(fbatch_at_pg(fbatch, i, pg));
1161 vvp_set_batch_dirty(fbatch);
1163 for (i = 0; i < count; i++) {
1164 npgs = fbatch_at_npgs(fbatch, i);
1165 for (pg = 0; pg < npgs; pg++) {
1166 vmpage = fbatch_at_pg(fbatch, i, pg);
1167 page = (struct cl_page *) vmpage->private;
1168 cl_page_disown(env, io, page);
1169 cl_page_put(env, page);
1176 /* make sure the page list is contiguous */
1177 static bool page_list_sanity_check(struct cl_object *obj,
1178 struct cl_page_list *plist)
1180 struct cl_page *page;
1181 pgoff_t index = CL_PAGE_EOF;
1183 cl_page_list_for_each(page, plist) {
1184 if (index == CL_PAGE_EOF) {
1185 index = cl_page_index(page);
1190 if (index == cl_page_index(page))
1198 /* Return how many bytes have queued or written */
1199 int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
1201 struct cl_object *obj = io->ci_obj;
1202 struct inode *inode = vvp_object_inode(obj);
1203 struct vvp_io *vio = vvp_env_io(env);
1204 struct cl_page_list *queue = &vio->u.readwrite.vui_queue;
1205 struct cl_page *page;
1208 unsigned int npages = vio->u.readwrite.vui_queue.pl_nr;
1215 CDEBUG(D_VFSTRACE, "commit async pages: %d, from %d, to %d\n",
1216 npages, vio->u.readwrite.vui_from, vio->u.readwrite.vui_to);
1218 LASSERT(page_list_sanity_check(obj, queue));
1220 /* submit IO with async write */
1221 rc = cl_io_commit_async(env, io, queue,
1222 vio->u.readwrite.vui_from,
1223 vio->u.readwrite.vui_to,
1224 write_commit_callback);
1225 npages -= queue->pl_nr; /* already committed pages */
1227 /* calculate how many bytes were written */
1228 bytes = npages << PAGE_SHIFT;
1231 bytes -= vio->u.readwrite.vui_from;
1232 if (queue->pl_nr == 0) /* last page */
1233 bytes -= PAGE_SIZE - vio->u.readwrite.vui_to;
1234 LASSERTF(bytes > 0, "bytes = %d, pages = %d\n", bytes, npages);
1236 vio->u.readwrite.vui_written += bytes;
1238 CDEBUG(D_VFSTRACE, "Committed %d pages %d bytes, tot: %ld\n",
1239 npages, bytes, vio->u.readwrite.vui_written);
1241 /* the first page must have been written. */
1242 vio->u.readwrite.vui_from = 0;
1244 LASSERT(page_list_sanity_check(obj, queue));
1245 LASSERT(ergo(rc == 0, queue->pl_nr == 0));
1247 /* out of quota, try sync write */
1248 if (rc == -EDQUOT && !cl_io_is_mkwrite(io)) {
1249 struct ll_inode_info *lli = ll_i2info(inode);
1251 rc = vvp_io_commit_sync(env, io, queue,
1252 vio->u.readwrite.vui_from,
1253 vio->u.readwrite.vui_to);
1255 vio->u.readwrite.vui_written += rc;
1258 if (lli->lli_clob != NULL)
1259 lov_read_and_clear_async_rc(lli->lli_clob);
1260 lli->lli_async_rc = 0;
1263 /* Now the pages in queue were failed to commit, discard them
1264 * unless they were dirtied before.
1266 while (queue->pl_nr > 0) {
1267 page = cl_page_list_first(queue);
1268 cl_page_list_del(env, queue, page, true);
1270 if (!PageDirty(cl_page_vmpage(page)))
1271 cl_page_discard(env, io, page);
1273 cl_page_disown(env, io, page);
1275 /* held in ll_cl_init() */
1276 cl_page_put(env, page);
1278 cl_page_list_fini(env, queue);
1283 static int vvp_io_write_start(const struct lu_env *env,
1284 const struct cl_io_slice *ios)
1286 struct vvp_io *vio = cl2vvp_io(env, ios);
1287 struct cl_io *io = ios->cis_io;
1288 struct cl_object *obj = io->ci_obj;
1289 struct inode *inode = vvp_object_inode(obj);
1290 struct ll_inode_info *lli = ll_i2info(inode);
1291 struct file *file = vio->vui_fd->fd_file;
1293 loff_t pos = io->u.ci_wr.wr.crw_pos;
1294 size_t crw_bytes = io->u.ci_wr.wr.crw_bytes;
1295 bool lock_inode = !IS_NOSEC(inode);
1296 size_t ci_bytes = io->ci_bytes;
1297 struct iov_iter iter;
1303 trunc_sem_down_read(&lli->lli_trunc_sem);
1305 if (!can_populate_pages(env, io, inode))
1308 if (cl_io_is_append(io)) {
1310 * PARALLEL IO This has to be changed for parallel IO doing
1311 * out-of-order writes.
1313 ll_merge_attr(env, inode);
1314 pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
1315 vio->vui_iocb->ki_pos = pos;
1317 LASSERTF(vio->vui_iocb->ki_pos == pos,
1318 "ki_pos %lld [%lld, %lld)\n",
1319 vio->vui_iocb->ki_pos,
1320 pos, pos + crw_bytes);
1323 CDEBUG(D_VFSTRACE, "%s: write [%llu, %llu)\n",
1324 file_dentry(file)->d_name.name, pos, pos + crw_bytes);
1326 /* The maximum Lustre file size is variable, based on the OST maximum
1327 * object size and number of stripes. This needs another check in
1328 * addition to the VFS checks earlier.
1330 if (pos + crw_bytes > ll_file_maxbytes(inode)) {
1332 "%s: file %s ("DFID") offset %llu > maxbytes %llu\n",
1333 ll_i2sbi(inode)->ll_fsname,
1334 file_dentry(file)->d_name.name,
1335 PFID(ll_inode2fid(inode)), pos + crw_bytes,
1336 ll_file_maxbytes(inode));
1340 /* Tests to verify we take the i_mutex correctly */
1341 if (CFS_FAIL_CHECK(OBD_FAIL_LLITE_IMUTEX_SEC) && !lock_inode)
1344 if (CFS_FAIL_CHECK(OBD_FAIL_LLITE_IMUTEX_NOSEC) && lock_inode)
1347 flags = iocb_ki_flags_get(file, vio->vui_iocb);
1348 if (!iocb_ki_flags_check(flags, DIRECT)) {
1349 result = cl_io_lru_reserve(env, io, pos, crw_bytes);
1354 if (vio->vui_iter == NULL) {
1355 /* from a temp io in ll_cl_init(). */
1359 * When using the locked AIO function (generic_file_aio_write())
1360 * testing has shown the inode mutex to be a limiting factor
1361 * with multi-threaded single shared file performance. To get
1362 * around this, we now use the lockless version. To maintain
1363 * consistency, proper locking to protect against writes,
1364 * trucates, etc. is handled in the higher layers of lustre.
1366 lock_inode = !IS_NOSEC(inode);
1367 iter = *vio->vui_iter;
1369 if (unlikely(lock_inode))
1370 ll_inode_lock(inode);
1371 result = __generic_file_write_iter(vio->vui_iocb, &iter);
1372 if (unlikely(lock_inode))
1373 ll_inode_unlock(inode);
1377 #ifdef HAVE_GENERIC_WRITE_SYNC_2ARGS
1378 result = generic_write_sync(vio->vui_iocb, result);
1383 err = generic_write_sync(vio->vui_iocb->ki_filp, pos,
1385 if (err < 0 && result > 0)
1392 result = vvp_io_write_commit(env, io);
1393 /* Simulate short commit */
1394 if (CFS_FAULT_CHECK(OBD_FAIL_LLITE_SHORT_COMMIT)) {
1395 vio->u.readwrite.vui_written >>= 1;
1396 if (vio->u.readwrite.vui_written > 0)
1397 io->ci_need_restart = 1;
1399 if (vio->u.readwrite.vui_written > 0) {
1400 result = vio->u.readwrite.vui_written;
1401 CDEBUG(D_VFSTRACE, "%s: write bytes %zd, result: %zd\n",
1402 file_dentry(file)->d_name.name,
1403 io->ci_bytes, result);
1404 io->ci_bytes += result;
1406 io->ci_continue = 0;
1409 if (vio->vui_iocb->ki_pos != (pos + io->ci_bytes - ci_bytes)) {
1411 "%s: write position mismatch: ki_pos %lld vs. pos %lld, written %zd, commit %ld: rc = %zd\n",
1412 file_dentry(file)->d_name.name,
1413 vio->vui_iocb->ki_pos, pos + io->ci_bytes - ci_bytes,
1414 written, io->ci_bytes - ci_bytes, result);
1416 * Rewind ki_pos and vui_iter to where it has
1417 * successfully committed.
1419 vio->vui_iocb->ki_pos = pos + io->ci_bytes - ci_bytes;
1421 if (result > 0 || result == -EIOCBQUEUED) {
1422 set_bit(LLIF_DATA_MODIFIED, &ll_i2info(inode)->lli_flags);
1424 if (result != -EIOCBQUEUED && result < crw_bytes)
1425 io->ci_continue = 0;
1429 if (result == -EIOCBQUEUED) {
1430 io->ci_bytes += vio->u.readwrite.vui_written;
1431 vio->vui_iocb->ki_pos = pos +
1432 vio->u.readwrite.vui_written;
1439 static void vvp_io_rw_end(const struct lu_env *env,
1440 const struct cl_io_slice *ios)
1442 struct inode *inode = vvp_object_inode(ios->cis_obj);
1443 struct ll_inode_info *lli = ll_i2info(inode);
1445 trunc_sem_up_read(&lli->lli_trunc_sem);
1448 static void vvp_io_write_end(const struct lu_env *env,
1449 const struct cl_io_slice *ios)
1451 struct inode *inode = vvp_object_inode(ios->cis_obj);
1452 struct cl_io *io = ios->cis_io;
1454 vvp_io_rw_end(env, ios);
1456 /* Update size and blocks for LSOM (best effort) */
1457 if (!io->ci_ignore_layout && cl_io_is_sync_write(io))
1458 ll_merge_attr_try(env, inode);
1462 static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
1464 struct vm_fault *vmf = cfio->ft_vmf;
1466 cfio->ft_flags = ll_filemap_fault(cfio->ft_vma, vmf);
1467 cfio->ft_flags_valid = 1;
1470 /* success, vmpage is locked */
1471 LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n",
1472 get_vmf_address(vmf));
1473 if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) {
1474 lock_page(vmf->page);
1475 cfio->ft_flags |= VM_FAULT_LOCKED;
1478 cfio->ft_vmpage = vmf->page;
1483 if (cfio->ft_flags & VM_FAULT_SIGBUS) {
1484 CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", get_vmf_address(vmf));
1488 if (cfio->ft_flags & VM_FAULT_OOM) {
1489 CDEBUG(D_PAGE, "got addr %p - OOM\n", get_vmf_address(vmf));
1493 if (cfio->ft_flags & VM_FAULT_RETRY)
1496 CERROR("unknown error in page fault %d\n", cfio->ft_flags);
1501 static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
1502 struct folio_batch *fbatch)
1504 vvp_set_batch_dirty(fbatch);
1507 static int vvp_io_fault_start(const struct lu_env *env,
1508 const struct cl_io_slice *ios)
1510 struct vvp_io *vio = cl2vvp_io(env, ios);
1511 struct cl_io *io = ios->cis_io;
1512 struct cl_object *obj = io->ci_obj;
1513 struct inode *inode = vvp_object_inode(obj);
1514 struct ll_inode_info *lli = ll_i2info(inode);
1515 struct cl_fault_io *fio = &io->u.ci_fault;
1516 struct vvp_fault_io *cfio = &vio->u.fault;
1519 struct page *vmpage = NULL;
1520 struct cl_page *page;
1526 trunc_sem_down_read_nowait(&lli->lli_trunc_sem);
1528 /* offset of the last byte on the page */
1529 offset = ((fio->ft_index + 1) << PAGE_SHIFT) - 1;
1530 LASSERT((offset >> PAGE_SHIFT) == fio->ft_index);
1531 result = vvp_prep_size(env, obj, io, 0, offset + 1, NULL);
1535 CFS_FAIL_TIMEOUT(OBD_FAIL_LLITE_FAULT_PAUSE, cfs_fail_val);
1537 /* must return locked page */
1538 if (fio->ft_mkwrite) {
1539 LASSERT(cfio->ft_vmpage != NULL);
1540 vmpage = cfio->ft_vmpage;
1543 * page was turncated and lock was cancelled, return ENODATA
1544 * so that VM_FAULT_NOPAGE will be returned to handle_mm_fault()
1545 * XXX: cannot return VM_FAULT_RETRY to vfs since we cannot
1546 * release mmap_lock and VM_FAULT_RETRY implies that the
1547 * mmap_lock is released.
1549 if (!PageUptodate(vmpage))
1550 GOTO(out, result = -ENODATA);
1552 result = vvp_io_kernel_fault(cfio);
1557 vmpage = cfio->ft_vmpage;
1558 LASSERT(PageLocked(vmpage));
1560 if (CFS_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE))
1561 generic_error_remove_folio(vmpage->mapping, page_folio(vmpage));
1563 size = i_size_read(inode);
1564 /* Though we have already held a cl_lock upon this page, but
1565 * it still can be truncated locally.
1567 if (unlikely((vmpage->mapping != inode->i_mapping) ||
1568 (page_offset(vmpage) > size))) {
1569 CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n");
1571 /* return +1 to stop cl_io_loop() and ll_fault() will catch
1574 GOTO(out, result = + 1);
1577 last_index = (size - 1) >> PAGE_SHIFT;
1579 if (fio->ft_mkwrite) {
1581 * Capture the size while holding the lli_trunc_sem from above
1582 * we want to make sure that we complete the mkwrite action
1583 * while holding this lock. We need to make sure that we are
1584 * not past the end of the file.
1586 if (last_index < fio->ft_index) {
1588 "llite: mkwrite and truncate race happened: %p: 0x%lx 0x%lx\n",
1589 vmpage->mapping, fio->ft_index, last_index);
1591 * We need to return if we are
1592 * passed the end of the file. This will propagate
1593 * up the call stack to ll_page_mkwrite where
1594 * we will return VM_FAULT_NOPAGE. Any non-negative
1595 * value returned here will be silently
1596 * converted to 0. If the vmpage->mapping is null
1597 * the error code would be converted back to ENODATA
1598 * in ll_page_mkwrite0. Thus we return -ENODATA
1599 * to handle both cases
1601 GOTO(out, result = -ENODATA);
1605 page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE);
1607 GOTO(out, result = PTR_ERR(page));
1609 /* if page will be written, then add this page into cache earlier. */
1610 if (fio->ft_mkwrite) {
1611 wait_on_page_writeback(vmpage);
1612 if (!PageDirty(vmpage)) {
1613 struct cl_page_list *plist = &vio->u.fault.ft_queue;
1616 /* vvp_page_assume() calls wait_on_page_writeback(). */
1617 cl_page_assume(env, io, page);
1619 cl_page_list_init(plist);
1620 cl_page_list_add(plist, page, true);
1623 if (last_index == cl_page_index(page))
1624 to = ((size - 1) & ~PAGE_MASK) + 1;
1626 /* Do not set Dirty bit here so that in case IO is
1627 * started before the page is really made dirty, we
1628 * still have chance to detect it.
1630 result = cl_io_commit_async(env, io, plist, 0, to,
1631 mkwrite_commit_callback);
1632 /* Have overquota flag, trying sync write to check
1633 * whether indeed out of quota
1635 if (result == -EDQUOT) {
1637 result = vvp_io_commit_sync(env, io,
1641 cl_page_own(env, io, page);
1642 cl_page_list_add(plist, page, true);
1643 result = cl_io_commit_async(env, io,
1645 mkwrite_commit_callback);
1648 cl_page_put(env, page);
1652 LASSERT(cl_page_is_owned(page, io));
1653 cl_page_list_fini(env, plist);
1657 cl_page_discard(env, io, page);
1658 cl_page_disown(env, io, page);
1660 cl_page_put(env, page);
1662 /* we're in big trouble, what can we do now? */
1663 if (result == -EDQUOT)
1667 cl_page_disown(env, io, page);
1673 * The ft_index is only used in the case of mkwrite action. We need to
1674 * check our assertions are correct, since we should have caught this
1677 LASSERT(!fio->ft_mkwrite || fio->ft_index <= last_index);
1678 if (fio->ft_index == last_index)
1679 /* Last page is mapped partially. */
1680 fio->ft_bytes = size - (fio->ft_index << PAGE_SHIFT);
1682 fio->ft_bytes = PAGE_SIZE;
1684 fio->ft_page = page;
1688 /* return unlocked vmpage to avoid deadlocking */
1690 unlock_page(vmpage);
1692 cfio->ft_flags &= ~VM_FAULT_LOCKED;
1697 static void vvp_io_fault_end(const struct lu_env *env,
1698 const struct cl_io_slice *ios)
1700 struct inode *inode = vvp_object_inode(ios->cis_obj);
1701 struct ll_inode_info *lli = ll_i2info(inode);
1703 CLOBINVRNT(env, ios->cis_io->ci_obj,
1704 vvp_object_invariant(ios->cis_io->ci_obj));
1705 trunc_sem_up_read(&lli->lli_trunc_sem);
1708 static int vvp_io_fsync_start(const struct lu_env *env,
1709 const struct cl_io_slice *ios)
1711 /* mark TOWRITE bit to each dirty page in radix tree to verify pages
1712 * have been written, but this is difficult because of race.
1717 static void vvp_io_fsync_end(const struct lu_env *env,
1718 const struct cl_io_slice *ios)
1720 struct inode *inode = vvp_object_inode(ios->cis_obj);
1721 struct cl_io *io = ios->cis_io;
1723 /* Update size and blocks for LSOM (best effort) */
1724 if (!io->ci_ignore_layout)
1725 ll_merge_attr_try(env, inode);
1728 static int vvp_io_read_ahead(const struct lu_env *env,
1729 const struct cl_io_slice *ios,
1730 pgoff_t start, struct cl_read_ahead *ra)
1736 if (ios->cis_io->ci_type == CIT_READ ||
1737 ios->cis_io->ci_type == CIT_FAULT) {
1738 struct vvp_io *vio = cl2vvp_io(env, ios);
1740 if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
1741 ra->cra_end_idx = CL_PAGE_EOF;
1742 result = 1; /* no need to call down */
1749 static int vvp_io_lseek_lock(const struct lu_env *env,
1750 const struct cl_io_slice *ios)
1752 struct cl_io *io = ios->cis_io;
1753 __u64 lock_start = io->u.ci_lseek.ls_start;
1754 __u64 lock_end = OBD_OBJECT_EOF;
1755 __u32 enqflags = CEF_MUST; /* always take client lock */
1757 return vvp_io_one_lock(env, io, enqflags, CLM_READ,
1758 lock_start, lock_end);
1761 static int vvp_io_lseek_start(const struct lu_env *env,
1762 const struct cl_io_slice *ios)
1764 struct cl_io *io = ios->cis_io;
1765 struct inode *inode = vvp_object_inode(io->ci_obj);
1766 __u64 start = io->u.ci_lseek.ls_start;
1768 ll_inode_lock(inode);
1769 inode_dio_wait(inode);
1771 /* At the moment we have DLM lock so just update inode
1772 * to know the file size.
1774 ll_merge_attr(env, inode);
1775 if (start >= i_size_read(inode)) {
1776 io->u.ci_lseek.ls_result = -ENXIO;
1782 static void vvp_io_lseek_end(const struct lu_env *env,
1783 const struct cl_io_slice *ios)
1785 struct cl_io *io = ios->cis_io;
1786 struct inode *inode = vvp_object_inode(io->ci_obj);
1788 if (io->u.ci_lseek.ls_result > i_size_read(inode))
1789 io->u.ci_lseek.ls_result = -ENXIO;
1791 ll_inode_unlock(inode);
1794 static const struct cl_io_operations vvp_io_ops = {
1797 .cio_fini = vvp_io_fini,
1798 .cio_iter_init = vvp_io_read_iter_init,
1799 .cio_lock = vvp_io_read_lock,
1800 .cio_start = vvp_io_read_start,
1801 .cio_end = vvp_io_rw_end,
1802 .cio_advance = vvp_io_advance,
1805 .cio_fini = vvp_io_fini,
1806 .cio_iter_init = vvp_io_write_iter_init,
1807 .cio_iter_fini = vvp_io_write_iter_fini,
1808 .cio_lock = vvp_io_write_lock,
1809 .cio_start = vvp_io_write_start,
1810 .cio_end = vvp_io_write_end,
1811 .cio_advance = vvp_io_advance,
1814 .cio_fini = vvp_io_setattr_fini,
1815 .cio_iter_init = vvp_io_setattr_iter_init,
1816 .cio_lock = vvp_io_setattr_lock,
1817 .cio_start = vvp_io_setattr_start,
1818 .cio_end = vvp_io_setattr_end
1821 .cio_fini = vvp_io_fault_fini,
1822 .cio_iter_init = vvp_io_fault_iter_init,
1823 .cio_lock = vvp_io_fault_lock,
1824 .cio_start = vvp_io_fault_start,
1825 .cio_end = vvp_io_fault_end,
1828 .cio_start = vvp_io_fsync_start,
1829 .cio_fini = vvp_io_fini,
1830 .cio_end = vvp_io_fsync_end,
1833 .cio_fini = vvp_io_fini
1836 .cio_fini = vvp_io_fini
1839 .cio_fini = vvp_io_fini
1842 .cio_fini = vvp_io_fini,
1843 .cio_lock = vvp_io_lseek_lock,
1844 .cio_start = vvp_io_lseek_start,
1845 .cio_end = vvp_io_lseek_end,
1848 .cio_read_ahead = vvp_io_read_ahead
1851 int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
1854 struct vvp_io *vio = vvp_env_io(env);
1855 struct inode *inode = vvp_object_inode(obj);
1858 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
1861 CDEBUG(D_VFSTRACE, DFID" ignore/verify layout %d/%d, layout version %d restore needed %d\n",
1862 PFID(lu_object_fid(&obj->co_lu)),
1863 io->ci_ignore_layout, io->ci_verify_layout,
1864 vio->vui_layout_gen, io->ci_restore_needed);
1866 CL_IO_SLICE_CLEAN(vio, vui_cl);
1867 cl_io_slice_add(io, &vio->vui_cl, obj, &vvp_io_ops);
1868 vio->vui_ra_valid = false;
1870 if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
1872 struct ll_inode_info *lli = ll_i2info(inode);
1875 bytes = io->u.ci_rw.crw_bytes;
1876 /* "If nbyte is 0, read() will return 0 and have no other
1877 * results." -- Single Unix Spec
1882 vio->vui_tot_bytes = bytes;
1884 /* this might sleep */
1885 lustre_get_jobid(ji.ji_jobid, sizeof(ji.ji_jobid));
1886 ji.ji_uid = from_kuid(&init_user_ns, current_uid());
1887 ji.ji_gid = from_kgid(&init_user_ns, current_gid());
1889 /* for read/write, we store the process jobid/gid/uid in the
1890 * inode, and it'll be fetched by osc when building RPC.
1892 * it's not accurate if the file is shared by different
1895 write_seqlock(&lli->lli_jobinfo_seqlock);
1896 memcpy(&lli->lli_jobinfo, &ji, sizeof(ji));
1897 write_sequnlock(&lli->lli_jobinfo_seqlock);
1898 } else if (io->ci_type == CIT_SETATTR) {
1899 if (!cl_io_is_trunc(io))
1900 io->ci_lockreq = CILR_MANDATORY;
1903 /* Enqueue layout lock and get layout version. We need to do this
1904 * even for operations requiring to open file, such as read and write,
1905 * because it might not grant layout lock in IT_OPEN.
1907 if (result == 0 && !io->ci_ignore_layout) {
1908 result = ll_layout_refresh(inode, &vio->vui_layout_gen);
1909 if (result == -ENOENT)
1910 /* If the inode on MDS has been removed, but the objects
1911 * on OSTs haven't been destroyed (async unlink), layout
1912 * fetch will return -ENOENT, we'd ingore this error
1913 * and continue with dirty flush. LU-3230.
1917 CERROR("%s: refresh file layout " DFID " error %d.\n",
1918 ll_i2sbi(inode)->ll_fsname,
1919 PFID(lu_object_fid(&obj->co_lu)), result);
1922 #ifdef HAVE_INVALIDATE_LOCK
1923 if (io->ci_invalidate_page_cache)
1924 filemap_invalidate_lock(inode->i_mapping);
1925 #endif /* HAVE_INVALIDATE_LOCK */
1927 io->ci_result = result < 0 ? result : 0;