4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #include <linux/errno.h>
33 #include <linux/delay.h>
34 #include <linux/kernel.h>
36 #include <linux/file.h>
38 #define DEBUG_SUBSYSTEM S_LLITE
40 #include "llite_internal.h"
41 #include <lustre_compat.h>
43 static const struct vm_operations_struct ll_file_vm_ops;
45 void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
46 unsigned long addr, size_t bytes)
48 policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
49 (vma->vm_pgoff << PAGE_SHIFT);
50 policy->l_extent.end = (policy->l_extent.start + bytes - 1) |
55 * Linux commit v6.0-rc3-225-gf39af05949a4
56 * mm: add VMA iterator
59 #define vma_iterator vm_area_struct *
60 #define vma_iter_init(vmip, mm, addr) *(vmip) = find_vma(mm, addr)
61 #define for_each_vma(vmi, vma) \
62 for (vma = vmi; vma != NULL; vma = vma->vm_next)
65 struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
68 struct vm_area_struct *vma, *ret = NULL;
69 struct vma_iterator vmi;
73 /* mmap_lock must have been held by caller. */
74 LASSERT(!mmap_write_trylock(mm));
76 vma_iter_init(&vmi, mm, addr);
77 for_each_vma(vmi, vma) {
78 if (vma->vm_start < (addr + bytes))
80 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
81 vma->vm_flags & VM_SHARED) {
90 * API independent part for page fault initialization.
91 * \param env - corespondent lu_env to processing
92 * \param vma - virtual memory area addressed to page fault
93 * \param index - page index corespondent to fault.
94 * \param mkwrite - whether it is mmap write.
96 * \return error codes from cl_io_init.
99 ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma,
100 pgoff_t index, bool mkwrite)
102 struct file *file = vma->vm_file;
103 struct inode *inode = file_inode(file);
105 struct cl_fault_io *fio;
110 if (ll_file_nolock(file))
111 RETURN(ERR_PTR(-EOPNOTSUPP));
114 io = vvp_env_thread_io(env);
115 io->ci_obj = ll_i2info(inode)->lli_clob;
116 LASSERT(io->ci_obj != NULL);
118 fio = &io->u.ci_fault;
119 fio->ft_index = index;
120 fio->ft_executable = vma->vm_flags & VM_EXEC;
124 fio->ft_writable = 1;
127 if (vma->vm_flags & VM_SEQ_READ)
129 else if (vma->vm_flags & VM_RAND_READ)
130 io->ci_rand_read = 1;
132 rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
134 struct vvp_io *vio = vvp_env_io(env);
135 struct ll_file_data *fd = file->private_data;
137 LASSERT(vio->vui_cl.cis_io == io);
139 /* mmap lock must be MANDATORY it has to cache pages. */
140 io->ci_lockreq = CILR_MANDATORY;
144 if (io->ci_need_restart)
153 /* Sharing code of page_mkwrite method for rhel5 and rhel6 */
154 static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
163 struct inode *inode = NULL;
164 struct ll_inode_info *lli;
168 LASSERT(vmpage != NULL);
169 env = cl_env_get(&refcheck);
171 RETURN(PTR_ERR(env));
173 io = ll_fault_io_init(env, vma, vmpage->index, true);
175 GOTO(out, result = PTR_ERR(io));
177 result = io->ci_result;
179 GOTO(out_io, result);
181 vio = vvp_env_io(env);
182 vio->u.fault.ft_vma = vma;
183 vio->u.fault.ft_vmpage = vmpage;
185 siginitsetinv(&new, sigmask(SIGKILL) | sigmask(SIGTERM));
186 sigprocmask(SIG_BLOCK, &new, &old);
188 inode = vvp_object_inode(io->ci_obj);
189 lli = ll_i2info(inode);
191 result = cl_io_loop(env, io);
193 sigprocmask(SIG_SETMASK, &old, NULL);
197 if (vmpage->mapping == NULL) {
200 /* page was truncated and lock was cancelled, return
201 * ENODATA so that VM_FAULT_NOPAGE will be returned
202 * to handle_mm_fault().
206 } else if (!PageDirty(vmpage)) {
207 /* race, the page has been cleaned by ptlrpcd after
208 * it was unlocked, it has to be added into dirty
209 * cache again otherwise this soon-to-dirty page won't
210 * consume any grants, even worse if this page is being
211 * transferred because it will break RPC checksum.
215 CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has been written out, retry.\n",
216 vmpage, vmpage->index);
223 set_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
230 cl_env_put(env, &refcheck);
231 CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
232 LASSERT(ergo(result == 0, PageLocked(vmpage)));
234 /* if page has been unmapped, presumably due to lock reclaim for
235 * concurrent usage, add some delay before retrying to prevent
236 * entering live-lock situation with competitors
238 if (result == -ENODATA && inode != NULL) {
239 CDEBUG(D_MMAP, "delaying new page-fault for inode %p to prevent live-lock\n",
247 static inline int to_fault_error(int result)
251 result = VM_FAULT_LOCKED;
254 result = VM_FAULT_OOM;
257 result = VM_FAULT_SIGBUS;
263 int ll_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
265 struct inode *inode = file_inode(vma->vm_file);
269 /* this seqlock lets us notice if a page has been deleted on this inode
270 * during the fault process, allowing us to catch an erroneous SIGBUS
274 seq = read_seqbegin(&ll_i2info(inode)->lli_page_inv_lock);
275 ret = __ll_filemap_fault(vma, vmf);
276 } while (read_seqretry(&ll_i2info(inode)->lli_page_inv_lock, seq) &&
277 (ret & VM_FAULT_SIGBUS));
283 * Lustre implementation of a vm_operations_struct::fault() method, called by
284 * VM to server page fault (both in kernel and user space).
286 * \param vma - is virtiual area struct related to page fault
287 * \param vmf - structure which describe type and address where hit fault
289 * \return allocated and filled _locked_ page for address
290 * \retval VM_FAULT_ERROR on general error
291 * \retval NOPAGE_OOM not have memory for allocate new page
293 static vm_fault_t ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
295 struct inode *inode = file_inode(vma->vm_file);
298 struct vvp_io *vio = NULL;
306 env = cl_env_get(&refcheck);
308 RETURN(PTR_ERR(env));
310 if (ll_sbi_has_fast_read(ll_i2sbi(inode))) {
312 bool allow_retry = vmf->flags & FAULT_FLAG_ALLOW_RETRY;
313 bool has_retry = vmf->flags & FAULT_FLAG_RETRY_NOWAIT;
315 /* To avoid loops, instruct downstream to not drop mmap_sem */
317 * only need FAULT_FLAG_ALLOW_RETRY prior to Linux 5.1
318 * (6b4c9f4469819), where FAULT_FLAG_RETRY_NOWAIT is enough
319 * to not drop mmap_sem when failed to lock the page.
321 vmf->flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
322 ll_cl_add(inode, env, NULL, LCC_MMAP);
323 fault_ret = ll_filemap_fault(vma, vmf);
324 ll_cl_remove(inode, env);
326 vmf->flags &= ~FAULT_FLAG_RETRY_NOWAIT;
328 vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY;
330 /* - If there is no error, then the page was found in cache and
332 * - If VM_FAULT_RETRY is set, the page existed but failed to
333 * lock. We will try slow path to avoid loops.
334 * - Otherwise, it should try normal fault under DLM lock.
336 if (!(fault_ret & VM_FAULT_RETRY) &&
337 !(fault_ret & VM_FAULT_ERROR))
338 GOTO(out, result = 0);
343 io = ll_fault_io_init(env, vma, vmf->pgoff, false);
345 GOTO(out, result = PTR_ERR(io));
347 result = io->ci_result;
349 vio = vvp_env_io(env);
350 vio->u.fault.ft_vma = vma;
351 vio->u.fault.ft_vmpage = NULL;
352 vio->u.fault.ft_vmf = vmf;
353 vio->u.fault.ft_flags = 0;
354 vio->u.fault.ft_flags_valid = 0;
356 /* May call ll_readpage() */
357 ll_cl_add(inode, env, io, LCC_MMAP);
359 result = cl_io_loop(env, io);
361 ll_cl_remove(inode, env);
363 /* ft_flags are only valid if we reached ll_filemap_fault() */
364 if (vio->u.fault.ft_flags_valid)
365 fault_ret = vio->u.fault.ft_flags;
367 vmpage = vio->u.fault.ft_vmpage;
368 if (result != 0 && vmpage != NULL) {
376 cl_env_put(env, &refcheck);
377 if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
378 fault_ret |= to_fault_error(result);
380 CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result);
384 #ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
385 static vm_fault_t ll_fault(struct vm_fault *vmf)
387 struct vm_area_struct *vma = vmf->vma;
389 static vm_fault_t ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
393 bool printed = false;
396 ktime_t kstart = ktime_get();
399 result = pcc_fault(vma, vmf, &cached);
403 CDEBUG(D_MMAP|D_IOTRACE,
404 "START file %s:"DFID", vma=%p start=%#lx end=%#lx vm_flags=%#lx idx=%lu\n",
405 file_dentry(vma->vm_file)->d_name.name,
406 PFID(&ll_i2info(file_inode(vma->vm_file))->lli_fid),
407 vma, vma->vm_start, vma->vm_end, vma->vm_flags, vmf->pgoff);
409 /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
410 * so that it can be killed by admin but not cause segfault by
413 siginitsetinv(&new, sigmask(SIGKILL) | sigmask(SIGTERM));
414 sigprocmask(SIG_BLOCK, &new, &old);
416 /* make sure offset is not a negative number */
417 if (vmf->pgoff > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
418 return VM_FAULT_SIGBUS;
421 result = ll_fault0(vma, vmf);
423 !(result & (VM_FAULT_RETRY | VM_FAULT_ERROR | VM_FAULT_LOCKED))) {
424 struct page *vmpage = vmf->page;
426 /* lock the page, then check if this page has been truncated
427 * or deleted from Lustre and retry if so
430 if (unlikely(vmpage->mapping == NULL) ||
431 vmpage->private == 0) { /* unlucky */
436 if (!printed && ++count > 16) {
437 struct inode *inode = file_inode(vma->vm_file);
439 CWARN("%s: FID "DFID" under heavy mmap contention by '%s', consider revising IO pattern\n",
440 ll_i2sbi(inode)->ll_fsname,
441 PFID(&ll_i2info(inode)->lli_fid),
449 result |= VM_FAULT_LOCKED;
451 sigprocmask(SIG_SETMASK, &old, NULL);
454 if (vmf->page && result == VM_FAULT_LOCKED) {
455 ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)),
456 current->pid, vma->vm_file->private_data,
457 vmf->page->index << PAGE_SHIFT, PAGE_SIZE,
459 ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
461 ktime_us_delta(ktime_get(), kstart));
465 "COMPLETED: "DFID": vma=%p start=%#lx end=%#lx vm_flags=%#lx idx=%lu, rc %d\n",
466 PFID(&ll_i2info(file_inode(vma->vm_file))->lli_fid),
467 vma, vma->vm_start, vma->vm_end, vma->vm_flags, vmf->pgoff,
473 #ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
474 static vm_fault_t ll_page_mkwrite(struct vm_fault *vmf)
476 struct vm_area_struct *vma = vmf->vma;
478 static vm_fault_t ll_page_mkwrite(struct vm_area_struct *vma,
479 struct vm_fault *vmf)
483 bool printed = false;
486 ktime_t kstart = ktime_get();
489 CDEBUG(D_MMAP|D_IOTRACE,
490 "START file %s:"DFID", vma=%p start=%#lx end=%#lx vm_flags=%#lx idx=%lu\n",
491 file_dentry(vma->vm_file)->d_name.name,
492 PFID(&ll_i2info(file_inode(vma->vm_file))->lli_fid), vma,
493 vma->vm_start, vma->vm_end, vma->vm_flags, vmf->page->index);
495 result = pcc_page_mkwrite(vma, vmf, &cached);
499 file_update_time(vma->vm_file);
502 result = ll_page_mkwrite0(vma, vmf->page, &retry);
504 if (!printed && ++count > 16) {
505 const struct dentry *de = file_dentry(vma->vm_file);
507 CWARN("app(%s): the page %lu of file "DFID" is under heavy contention\n",
508 current->comm, vmf->pgoff,
509 PFID(ll_inode2fid(de->d_inode)));
516 LASSERT(PageLocked(vmf->page));
517 result = VM_FAULT_LOCKED;
521 result = VM_FAULT_NOPAGE;
524 result = VM_FAULT_OOM;
527 result = VM_FAULT_RETRY;
530 result = VM_FAULT_SIGBUS;
535 if (result == VM_FAULT_LOCKED) {
536 ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)),
537 current->pid, vma->vm_file->private_data,
538 vmf->page->index << PAGE_SHIFT, PAGE_SIZE,
540 ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
542 ktime_us_delta(ktime_get(), kstart));
546 "COMPLETED: "DFID": vma=%p start=%#lx end=%#lx vm_flags=%#lx idx=%lu, rc %d\n",
547 PFID(&ll_i2info(file_inode(vma->vm_file))->lli_fid),
548 vma, vma->vm_start, vma->vm_end, vma->vm_flags,
549 vmf->page->index, result);
554 * To avoid cancel the locks covering mmapped region for lock cache pressure,
555 * we track the mapped vma count in vvp_object::vob_mmap_cnt.
557 static void ll_vm_open(struct vm_area_struct *vma)
559 struct inode *inode = file_inode(vma->vm_file);
560 struct vvp_object *vob = cl_inode2vvp(inode);
563 LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
564 atomic_inc(&vob->vob_mmap_cnt);
570 * Dual to ll_vm_open().
572 static void ll_vm_close(struct vm_area_struct *vma)
574 struct inode *inode = file_inode(vma->vm_file);
575 struct vvp_object *vob = cl_inode2vvp(inode);
578 atomic_dec(&vob->vob_mmap_cnt);
579 LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
584 static const struct vm_operations_struct ll_file_vm_ops = {
586 .page_mkwrite = ll_page_mkwrite,
588 .close = ll_vm_close,
591 int ll_file_mmap(struct file *file, struct vm_area_struct *vma)
593 struct inode *inode = file_inode(file);
594 ktime_t kstart = ktime_get();
599 CDEBUG(D_VFSTRACE | D_MMAP,
600 "VFS_Op: fid="DFID" vma=%p start=%#lx end=%#lx vm_flags=%#lx\n",
601 PFID(&ll_i2info(inode)->lli_fid),
602 vma, vma->vm_start, vma->vm_end, vma->vm_flags);
604 if (ll_file_nolock(file))
607 rc = pcc_file_mmap(file, vma, &cached);
608 if (cached && rc != 0)
611 rc = generic_file_mmap(file, vma);
613 vma->vm_ops = &ll_file_vm_ops;
614 vma->vm_ops->open(vma);
615 /* update the inode's size and mtime */
617 rc = ll_glimpse_size(inode);
621 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MMAP,
622 ktime_us_delta(ktime_get(), kstart));