4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #include <linux/errno.h>
33 #include <linux/delay.h>
34 #include <linux/kernel.h>
37 #define DEBUG_SUBSYSTEM S_LLITE
39 #include "llite_internal.h"
40 #include <lustre_compat.h>
42 static const struct vm_operations_struct ll_file_vm_ops;
44 void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
45 unsigned long addr, size_t count)
47 policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
48 (vma->vm_pgoff << PAGE_SHIFT);
49 policy->l_extent.end = (policy->l_extent.start + count - 1) |
53 struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
56 struct vm_area_struct *vma, *ret = NULL;
59 /* mmap_lock must have been held by caller. */
60 LASSERT(!mmap_write_trylock(mm));
62 for (vma = find_vma(mm, addr);
63 vma != NULL && vma->vm_start < (addr + count);
65 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
66 vma->vm_flags & VM_SHARED) {
75 * API independent part for page fault initialization.
76 * \param env - corespondent lu_env to processing
77 * \param vma - virtual memory area addressed to page fault
78 * \param index - page index corespondent to fault.
80 * \return error codes from cl_io_init.
83 ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma, pgoff_t index)
85 struct file *file = vma->vm_file;
86 struct inode *inode = file_inode(file);
88 struct cl_fault_io *fio;
92 if (ll_file_nolock(file))
93 RETURN(ERR_PTR(-EOPNOTSUPP));
96 io = vvp_env_thread_io(env);
97 io->ci_obj = ll_i2info(inode)->lli_clob;
98 LASSERT(io->ci_obj != NULL);
100 fio = &io->u.ci_fault;
101 fio->ft_index = index;
102 fio->ft_executable = vma->vm_flags & VM_EXEC;
105 DFID": vma=%p start=%#lx end=%#lx vm_flags=%#lx idx=%lu\n",
106 PFID(&ll_i2info(inode)->lli_fid), vma, vma->vm_start,
107 vma->vm_end, vma->vm_flags, fio->ft_index);
109 if (vma->vm_flags & VM_SEQ_READ)
111 else if (vma->vm_flags & VM_RAND_READ)
112 io->ci_rand_read = 1;
114 if (vma->vm_flags & VM_WRITE)
115 fio->ft_writable = 1;
117 rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
119 struct vvp_io *vio = vvp_env_io(env);
120 struct ll_file_data *fd = file->private_data;
122 LASSERT(vio->vui_cl.cis_io == io);
124 /* mmap lock must be MANDATORY it has to cache
126 io->ci_lockreq = CILR_MANDATORY;
130 if (io->ci_need_restart)
139 /* Sharing code of page_mkwrite method for rhel5 and rhel6 */
140 static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
149 struct inode *inode = NULL;
150 struct ll_inode_info *lli;
153 LASSERT(vmpage != NULL);
154 env = cl_env_get(&refcheck);
156 RETURN(PTR_ERR(env));
158 io = ll_fault_io_init(env, vma, vmpage->index);
160 GOTO(out, result = PTR_ERR(io));
162 result = io->ci_result;
164 GOTO(out_io, result);
166 io->u.ci_fault.ft_mkwrite = 1;
167 io->u.ci_fault.ft_writable = 1;
169 vio = vvp_env_io(env);
170 vio->u.fault.ft_vma = vma;
171 vio->u.fault.ft_vmpage = vmpage;
173 siginitsetinv(&new, sigmask(SIGKILL) | sigmask(SIGTERM));
174 sigprocmask(SIG_BLOCK, &new, &old);
176 inode = vvp_object_inode(io->ci_obj);
177 lli = ll_i2info(inode);
179 result = cl_io_loop(env, io);
181 sigprocmask(SIG_SETMASK, &old, NULL);
185 if (vmpage->mapping == NULL) {
188 /* page was truncated and lock was cancelled, return
189 * ENODATA so that VM_FAULT_NOPAGE will be returned
190 * to handle_mm_fault(). */
193 } else if (!PageDirty(vmpage)) {
194 /* race, the page has been cleaned by ptlrpcd after
195 * it was unlocked, it has to be added into dirty
196 * cache again otherwise this soon-to-dirty page won't
197 * consume any grants, even worse if this page is being
198 * transferred because it will break RPC checksum.
202 CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has "
203 "been written out, retry.\n",
204 vmpage, vmpage->index);
211 set_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
218 cl_env_put(env, &refcheck);
219 CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
220 LASSERT(ergo(result == 0, PageLocked(vmpage)));
222 /* if page has been unmapped, presumably due to lock reclaim for
223 * concurrent usage, add some delay before retrying to prevent
224 * entering live-lock situation with competitors
226 if (result == -ENODATA && inode != NULL) {
227 CDEBUG(D_MMAP, "delaying new page-fault for inode %p to "
228 "prevent live-lock\n", inode);
235 static inline int to_fault_error(int result)
239 result = VM_FAULT_LOCKED;
242 result = VM_FAULT_OOM;
245 result = VM_FAULT_SIGBUS;
252 * Lustre implementation of a vm_operations_struct::fault() method, called by
253 * VM to server page fault (both in kernel and user space).
255 * \param vma - is virtiual area struct related to page fault
256 * \param vmf - structure which describe type and address where hit fault
258 * \return allocated and filled _locked_ page for address
259 * \retval VM_FAULT_ERROR on general error
260 * \retval NOPAGE_OOM not have memory for allocate new page
262 static vm_fault_t ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
266 struct vvp_io *vio = NULL;
273 env = cl_env_get(&refcheck);
275 RETURN(PTR_ERR(env));
277 if (ll_sbi_has_fast_read(ll_i2sbi(file_inode(vma->vm_file)))) {
279 bool has_retry = vmf->flags & FAULT_FLAG_RETRY_NOWAIT;
281 /* To avoid loops, instruct downstream to not drop mmap_sem */
282 vmf->flags |= FAULT_FLAG_RETRY_NOWAIT;
283 ll_cl_add(vma->vm_file, env, NULL, LCC_MMAP);
284 fault_ret = ll_filemap_fault(vma, vmf);
285 ll_cl_remove(vma->vm_file, env);
287 vmf->flags &= ~FAULT_FLAG_RETRY_NOWAIT;
289 /* - If there is no error, then the page was found in cache and
291 * - If VM_FAULT_RETRY is set, the page existed but failed to
292 * lock. We will try slow path to avoid loops.
293 * - Otherwise, it should try normal fault under DLM lock. */
294 if (!(fault_ret & VM_FAULT_RETRY) &&
295 !(fault_ret & VM_FAULT_ERROR))
296 GOTO(out, result = 0);
301 io = ll_fault_io_init(env, vma, vmf->pgoff);
303 GOTO(out, result = PTR_ERR(io));
305 result = io->ci_result;
307 vio = vvp_env_io(env);
308 vio->u.fault.ft_vma = vma;
309 vio->u.fault.ft_vmpage = NULL;
310 vio->u.fault.ft_vmf = vmf;
311 vio->u.fault.ft_flags = 0;
312 vio->u.fault.ft_flags_valid = 0;
314 /* May call ll_readpage() */
315 ll_cl_add(vma->vm_file, env, io, LCC_MMAP);
317 result = cl_io_loop(env, io);
319 ll_cl_remove(vma->vm_file, env);
321 /* ft_flags are only valid if we reached
322 * the call to filemap_fault */
323 if (vio->u.fault.ft_flags_valid)
324 fault_ret = vio->u.fault.ft_flags;
326 vmpage = vio->u.fault.ft_vmpage;
327 if (result != 0 && vmpage != NULL) {
335 cl_env_put(env, &refcheck);
336 if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
337 fault_ret |= to_fault_error(result);
339 CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result);
343 #ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
344 static vm_fault_t ll_fault(struct vm_fault *vmf)
346 struct vm_area_struct *vma = vmf->vma;
348 static vm_fault_t ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
352 bool printed = false;
355 ktime_t kstart = ktime_get();
358 result = pcc_fault(vma, vmf, &cached);
362 CDEBUG(D_MMAP, DFID": vma=%p start=%#lx end=%#lx vm_flags=%#lx\n",
363 PFID(&ll_i2info(file_inode(vma->vm_file))->lli_fid),
364 vma, vma->vm_start, vma->vm_end, vma->vm_flags);
366 /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
367 * so that it can be killed by admin but not cause segfault by
370 siginitsetinv(&new, sigmask(SIGKILL) | sigmask(SIGTERM));
371 sigprocmask(SIG_BLOCK, &new, &old);
373 /* make sure offset is not a negative number */
374 if (vmf->pgoff > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
375 return VM_FAULT_SIGBUS;
378 result = ll_fault0(vma, vmf);
380 !(result & (VM_FAULT_RETRY | VM_FAULT_ERROR | VM_FAULT_LOCKED))) {
381 struct page *vmpage = vmf->page;
383 /* check if this page has been truncated */
385 if (unlikely(vmpage->mapping == NULL)) { /* unlucky */
390 if (!printed && ++count > 16) {
391 CWARN("the page is under heavy contention, maybe your app(%s) needs revising :-)\n",
399 result |= VM_FAULT_LOCKED;
401 sigprocmask(SIG_SETMASK, &old, NULL);
404 if (vmf->page && result == VM_FAULT_LOCKED) {
405 ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)),
406 current->pid, vma->vm_file->private_data,
407 cl_offset(NULL, vmf->page->index), PAGE_SIZE,
409 ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
411 ktime_us_delta(ktime_get(), kstart));
417 #ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
418 static vm_fault_t ll_page_mkwrite(struct vm_fault *vmf)
420 struct vm_area_struct *vma = vmf->vma;
422 static vm_fault_t ll_page_mkwrite(struct vm_area_struct *vma,
423 struct vm_fault *vmf)
427 bool printed = false;
430 ktime_t kstart = ktime_get();
433 result = pcc_page_mkwrite(vma, vmf, &cached);
437 file_update_time(vma->vm_file);
440 result = ll_page_mkwrite0(vma, vmf->page, &retry);
442 if (!printed && ++count > 16) {
443 const struct dentry *de = file_dentry(vma->vm_file);
445 CWARN("app(%s): the page %lu of file "DFID" is under heavy contention\n",
446 current->comm, vmf->pgoff,
447 PFID(ll_inode2fid(de->d_inode)));
454 LASSERT(PageLocked(vmf->page));
455 result = VM_FAULT_LOCKED;
459 result = VM_FAULT_NOPAGE;
462 result = VM_FAULT_OOM;
465 result = VM_FAULT_RETRY;
468 result = VM_FAULT_SIGBUS;
473 if (result == VM_FAULT_LOCKED) {
474 ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)),
475 current->pid, vma->vm_file->private_data,
476 cl_offset(NULL, vmf->page->index), PAGE_SIZE,
478 ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
480 ktime_us_delta(ktime_get(), kstart));
487 * To avoid cancel the locks covering mmapped region for lock cache pressure,
488 * we track the mapped vma count in vvp_object::vob_mmap_cnt.
490 static void ll_vm_open(struct vm_area_struct * vma)
492 struct inode *inode = file_inode(vma->vm_file);
493 struct vvp_object *vob = cl_inode2vvp(inode);
496 LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
497 atomic_inc(&vob->vob_mmap_cnt);
503 * Dual to ll_vm_open().
505 static void ll_vm_close(struct vm_area_struct *vma)
507 struct inode *inode = file_inode(vma->vm_file);
508 struct vvp_object *vob = cl_inode2vvp(inode);
511 atomic_dec(&vob->vob_mmap_cnt);
512 LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
517 static const struct vm_operations_struct ll_file_vm_ops = {
519 .page_mkwrite = ll_page_mkwrite,
521 .close = ll_vm_close,
524 int ll_file_mmap(struct file *file, struct vm_area_struct * vma)
526 struct inode *inode = file_inode(file);
527 ktime_t kstart = ktime_get();
532 CDEBUG(D_VFSTRACE | D_MMAP,
533 "VFS_Op: fid="DFID" vma=%p start=%#lx end=%#lx vm_flags=%#lx\n",
534 PFID(&ll_i2info(inode)->lli_fid),
535 vma, vma->vm_start, vma->vm_end, vma->vm_flags);
537 if (ll_file_nolock(file))
540 rc = pcc_file_mmap(file, vma, &cached);
541 if (cached && rc != 0)
544 rc = generic_file_mmap(file, vma);
546 vma->vm_ops = &ll_file_vm_ops;
547 vma->vm_ops->open(vma);
548 /* update the inode's size and mtime */
550 rc = ll_glimpse_size(inode);
554 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MMAP,
555 ktime_us_delta(ktime_get(), kstart));