4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #include <linux/kernel.h>
34 #include <linux/delay.h>
36 #include <linux/string.h>
37 #include <linux/stat.h>
38 #include <linux/errno.h>
39 #include <linux/unistd.h>
40 #include <linux/version.h>
41 #include <asm/uaccess.h>
44 #include <linux/stat.h>
45 #include <asm/uaccess.h>
47 #include <linux/pagemap.h>
49 #define DEBUG_SUBSYSTEM S_LLITE
51 #include "llite_internal.h"
52 #include <lustre_compat.h>
54 static const struct vm_operations_struct ll_file_vm_ops;
56 void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
57 unsigned long addr, size_t count)
59 policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
60 (vma->vm_pgoff << PAGE_SHIFT);
61 policy->l_extent.end = (policy->l_extent.start + count - 1) |
65 struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
68 struct vm_area_struct *vma, *ret = NULL;
71 /* mmap_sem must have been held by caller. */
72 LASSERT(!down_write_trylock(&mm->mmap_sem));
74 for(vma = find_vma(mm, addr);
75 vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
76 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
77 vma->vm_flags & VM_SHARED) {
86 * API independent part for page fault initialization.
87 * \param env - corespondent lu_env to processing
88 * \param vma - virtual memory area addressed to page fault
89 * \param index - page index corespondent to fault.
90 * \parm ra_flags - vma readahead flags.
92 * \return error codes from cl_io_init.
95 ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma,
96 pgoff_t index, unsigned long *ra_flags)
98 struct file *file = vma->vm_file;
99 struct inode *inode = file_inode(file);
101 struct cl_fault_io *fio;
105 if (ll_file_nolock(file))
106 RETURN(ERR_PTR(-EOPNOTSUPP));
109 io = vvp_env_thread_io(env);
110 io->ci_obj = ll_i2info(inode)->lli_clob;
111 LASSERT(io->ci_obj != NULL);
113 fio = &io->u.ci_fault;
114 fio->ft_index = index;
115 fio->ft_executable = vma->vm_flags&VM_EXEC;
118 * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
119 * the kernel will not read other pages not covered by ldlm in
120 * filemap_nopage. we do our readahead in ll_readpage.
122 if (ra_flags != NULL)
123 *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
124 vma->vm_flags &= ~VM_SEQ_READ;
125 vma->vm_flags |= VM_RAND_READ;
127 CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
128 fio->ft_index, fio->ft_executable);
130 rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
132 struct vvp_io *vio = vvp_env_io(env);
133 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
135 LASSERT(vio->vui_cl.cis_io == io);
137 /* mmap lock must be MANDATORY it has to cache
139 io->ci_lockreq = CILR_MANDATORY;
144 if (io->ci_need_restart)
153 /* Sharing code of page_mkwrite method for rhel5 and rhel6 */
154 static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
163 struct inode *inode = NULL;
164 struct ll_inode_info *lli;
167 LASSERT(vmpage != NULL);
168 env = cl_env_get(&refcheck);
170 RETURN(PTR_ERR(env));
172 io = ll_fault_io_init(env, vma, vmpage->index, NULL);
174 GOTO(out, result = PTR_ERR(io));
176 result = io->ci_result;
178 GOTO(out_io, result);
180 io->u.ci_fault.ft_mkwrite = 1;
181 io->u.ci_fault.ft_writable = 1;
183 vio = vvp_env_io(env);
184 vio->u.fault.ft_vma = vma;
185 vio->u.fault.ft_vmpage = vmpage;
187 set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
189 inode = vvp_object_inode(io->ci_obj);
190 lli = ll_i2info(inode);
192 result = cl_io_loop(env, io);
194 cfs_restore_sigs(set);
198 if (vmpage->mapping == NULL) {
201 /* page was truncated and lock was cancelled, return
202 * ENODATA so that VM_FAULT_NOPAGE will be returned
203 * to handle_mm_fault(). */
206 } else if (!PageDirty(vmpage)) {
207 /* race, the page has been cleaned by ptlrpcd after
208 * it was unlocked, it has to be added into dirty
209 * cache again otherwise this soon-to-dirty page won't
210 * consume any grants, even worse if this page is being
211 * transferred because it will break RPC checksum.
215 CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has "
216 "been written out, retry.\n",
217 vmpage, vmpage->index);
224 ll_file_set_flag(lli, LLIF_DATA_MODIFIED);
231 cl_env_put(env, &refcheck);
232 CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
233 LASSERT(ergo(result == 0, PageLocked(vmpage)));
235 /* if page has been unmapped, presumably due to lock reclaim for
236 * concurrent usage, add some delay before retrying to prevent
237 * entering live-lock situation with competitors
239 if (result == -ENODATA && inode != NULL) {
240 CDEBUG(D_MMAP, "delaying new page-fault for inode %p to "
241 "prevent live-lock\n", inode);
248 static inline int to_fault_error(int result)
252 result = VM_FAULT_LOCKED;
255 result = VM_FAULT_NOPAGE;
258 result = VM_FAULT_OOM;
261 result = VM_FAULT_SIGBUS;
268 * Lustre implementation of a vm_operations_struct::fault() method, called by
269 * VM to server page fault (both in kernel and user space).
271 * \param vma - is virtiual area struct related to page fault
272 * \param vmf - structure which describe type and address where hit fault
274 * \return allocated and filled _locked_ page for address
275 * \retval VM_FAULT_ERROR on general error
276 * \retval NOPAGE_OOM not have memory for allocate new page
278 static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
282 struct vvp_io *vio = NULL;
284 unsigned long ra_flags;
290 env = cl_env_get(&refcheck);
292 RETURN(PTR_ERR(env));
294 if (ll_sbi_has_fast_read(ll_i2sbi(file_inode(vma->vm_file)))) {
296 ll_cl_add(vma->vm_file, env, NULL, LCC_MMAP);
297 fault_ret = ll_filemap_fault(vma, vmf);
298 ll_cl_remove(vma->vm_file, env);
300 /* - If there is no error, then the page was found in cache and
302 * - If VM_FAULT_RETRY is set, the page existed but failed to
303 * lock. It will return to kernel and retry;
304 * - Otherwise, it should try normal fault under DLM lock. */
305 if ((fault_ret & VM_FAULT_RETRY) ||
306 !(fault_ret & VM_FAULT_ERROR))
307 GOTO(out, result = 0);
312 io = ll_fault_io_init(env, vma, vmf->pgoff, &ra_flags);
314 GOTO(out, result = PTR_ERR(io));
316 result = io->ci_result;
318 vio = vvp_env_io(env);
319 vio->u.fault.ft_vma = vma;
320 vio->u.fault.ft_vmpage = NULL;
321 vio->u.fault.ft_vmf = vmf;
322 vio->u.fault.ft_flags = 0;
323 vio->u.fault.ft_flags_valid = 0;
325 /* May call ll_readpage() */
326 ll_cl_add(vma->vm_file, env, io, LCC_MMAP);
328 result = cl_io_loop(env, io);
330 ll_cl_remove(vma->vm_file, env);
332 /* ft_flags are only valid if we reached
333 * the call to filemap_fault */
334 if (vio->u.fault.ft_flags_valid)
335 fault_ret = vio->u.fault.ft_flags;
337 vmpage = vio->u.fault.ft_vmpage;
338 if (result != 0 && vmpage != NULL) {
345 vma->vm_flags |= ra_flags;
348 cl_env_put(env, &refcheck);
349 if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
350 fault_ret |= to_fault_error(result);
352 CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result);
356 #ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
357 static int ll_fault(struct vm_fault *vmf)
359 struct vm_area_struct *vma = vmf->vma;
361 static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
365 bool printed = false;
369 /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
370 * so that it can be killed by admin but not cause segfault by
372 set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
374 ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
378 result = ll_fault0(vma, vmf);
379 if (!(result & (VM_FAULT_RETRY | VM_FAULT_ERROR | VM_FAULT_LOCKED))) {
380 struct page *vmpage = vmf->page;
382 /* check if this page has been truncated */
384 if (unlikely(vmpage->mapping == NULL)) { /* unlucky */
389 if (!printed && ++count > 16) {
390 CWARN("the page is under heavy contention,"
391 "maybe your app(%s) needs revising :-)\n",
399 result |= VM_FAULT_LOCKED;
401 cfs_restore_sigs(set);
405 #ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
406 static int ll_page_mkwrite(struct vm_fault *vmf)
408 struct vm_area_struct *vma = vmf->vma;
410 static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
414 bool printed = false;
418 ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
419 LPROC_LL_MKWRITE, 1);
421 file_update_time(vma->vm_file);
424 result = ll_page_mkwrite0(vma, vmf->page, &retry);
426 if (!printed && ++count > 16) {
427 const struct dentry *de = file_dentry(vma->vm_file);
429 CWARN("app(%s): the page %lu of file "DFID" is under"
430 " heavy contention\n",
431 current->comm, vmf->pgoff,
432 PFID(ll_inode2fid(de->d_inode)));
439 LASSERT(PageLocked(vmf->page));
440 result = VM_FAULT_LOCKED;
444 result = VM_FAULT_NOPAGE;
447 result = VM_FAULT_OOM;
450 result = VM_FAULT_RETRY;
453 result = VM_FAULT_SIGBUS;
461 * To avoid cancel the locks covering mmapped region for lock cache pressure,
462 * we track the mapped vma count in vvp_object::vob_mmap_cnt.
464 static void ll_vm_open(struct vm_area_struct * vma)
466 struct inode *inode = file_inode(vma->vm_file);
467 struct vvp_object *vob = cl_inode2vvp(inode);
470 LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
471 atomic_inc(&vob->vob_mmap_cnt);
476 * Dual to ll_vm_open().
478 static void ll_vm_close(struct vm_area_struct *vma)
480 struct inode *inode = file_inode(vma->vm_file);
481 struct vvp_object *vob = cl_inode2vvp(inode);
484 atomic_dec(&vob->vob_mmap_cnt);
485 LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
489 /* XXX put nice comment here. talk about __free_pte -> dirty pages and
490 * nopage's reference passing to the pte */
491 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
496 LASSERTF(last > first, "last %llu first %llu\n", last, first);
497 if (mapping_mapped(mapping)) {
499 unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
500 last - first + 1, 0);
506 static const struct vm_operations_struct ll_file_vm_ops = {
508 .page_mkwrite = ll_page_mkwrite,
510 .close = ll_vm_close,
513 int ll_file_mmap(struct file *file, struct vm_area_struct * vma)
515 struct inode *inode = file_inode(file);
519 if (ll_file_nolock(file))
522 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
523 rc = generic_file_mmap(file, vma);
525 vma->vm_ops = &ll_file_vm_ops;
526 vma->vm_ops->open(vma);
527 /* update the inode's size and mtime */
528 rc = ll_glimpse_size(inode);