4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 #include <linux/kernel.h>
39 #include <linux/string.h>
40 #include <linux/stat.h>
41 #include <linux/errno.h>
42 #include <linux/unistd.h>
43 #include <linux/version.h>
44 #include <asm/system.h>
45 #include <asm/uaccess.h>
48 #include <linux/stat.h>
49 #include <asm/uaccess.h>
51 #include <linux/pagemap.h>
53 #define DEBUG_SUBSYSTEM S_LLITE
55 #include <lustre_lite.h>
56 #include "llite_internal.h"
57 #include <linux/lustre_compat25.h>
59 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
62 static struct vm_operations_struct ll_file_vm_ops;
64 void policy_from_vma(ldlm_policy_data_t *policy,
65 struct vm_area_struct *vma, unsigned long addr,
68 policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
69 (vma->vm_pgoff << CFS_PAGE_SHIFT);
70 policy->l_extent.end = (policy->l_extent.start + count - 1) |
74 struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
77 struct vm_area_struct *vma, *ret = NULL;
80 /* mmap_sem must have been held by caller. */
81 LASSERT(!down_write_trylock(&mm->mmap_sem));
83 for(vma = find_vma(mm, addr);
84 vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
85 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
86 vma->vm_flags & VM_SHARED) {
95 * API independent part for page fault initialization.
96 * \param vma - virtual memory area addressed to page fault
97 * \param env - corespondent lu_env to processing
98 * \param nest - nested level
99 * \param index - page index corespondent to fault.
100 * \parm ra_flags - vma readahead flags.
102 * \return allocated and initialized env for fault operation.
103 * \retval EINVAL if env can't allocated
104 * \return other error codes from cl_io_init.
106 struct cl_io *ll_fault_io_init(struct vm_area_struct *vma,
107 struct lu_env **env_ret,
108 struct cl_env_nest *nest,
109 pgoff_t index, unsigned long *ra_flags)
111 struct file *file = vma->vm_file;
112 struct inode *inode = file->f_dentry->d_inode;
114 struct cl_fault_io *fio;
119 if (ll_file_nolock(file))
120 RETURN(ERR_PTR(-EOPNOTSUPP));
123 * page fault can be called when lustre IO is
124 * already active for the current thread, e.g., when doing read/write
125 * against user level buffer mapped from Lustre buffer. To avoid
126 * stomping on existing context, optionally force an allocation of a new
129 env = cl_env_nested_get(nest);
131 RETURN(ERR_PTR(-EINVAL));
135 io = ccc_env_thread_io(env);
136 io->ci_obj = ll_i2info(inode)->lli_clob;
137 LASSERT(io->ci_obj != NULL);
139 fio = &io->u.ci_fault;
140 fio->ft_index = index;
141 fio->ft_executable = vma->vm_flags&VM_EXEC;
144 * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
145 * the kernel will not read other pages not covered by ldlm in
146 * filemap_nopage. we do our readahead in ll_readpage.
148 if (ra_flags != NULL)
149 *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
150 vma->vm_flags &= ~VM_SEQ_READ;
151 vma->vm_flags |= VM_RAND_READ;
153 CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
154 fio->ft_index, fio->ft_executable);
156 if (cl_io_init(env, io, CIT_FAULT, io->ci_obj) == 0) {
157 struct ccc_io *cio = ccc_env_io(env);
158 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
160 LASSERT(cio->cui_cl.cis_io == io);
162 /* mmap lock must be MANDATORY
163 * it has to cache pages. */
164 io->ci_lockreq = CILR_MANDATORY;
172 /* Sharing code of page_mkwrite method for rhel5 and rhel6 */
173 static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
179 struct cl_env_nest nest;
184 LASSERT(vmpage != NULL);
186 io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL);
188 GOTO(out, result = PTR_ERR(io));
190 result = io->ci_result;
194 /* Don't enqueue new locks for page_mkwrite().
195 * If the lock has been cancelled then page must have been
196 * truncated, in that case, kernel will handle it.
198 io->ci_lockreq = CILR_PEEK;
199 io->u.ci_fault.ft_mkwrite = 1;
200 io->u.ci_fault.ft_writable = 1;
202 vio = vvp_env_io(env);
203 vio->u.fault.ft_vma = vma;
204 vio->u.fault.ft_vmpage = vmpage;
206 set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
207 result = cl_io_loop(env, io);
208 cfs_restore_sigs(set);
210 if (result == -ENODATA) /* peek failed, no lock caching. */
211 CDEBUG(D_MMAP, "race on page_mkwrite: %lx (%lu %p)\n",
212 vma->vm_flags, io->u.ci_fault.ft_index, vmpage);
214 if (result == 0 || result == -ENODATA) {
216 if (vmpage->mapping == NULL) {
219 /* page was truncated and lock was cancelled, return
220 * ENODATA so that VM_FAULT_NOPAGE will be returned
221 * to handle_mm_fault(). */
224 } else if (result == -ENODATA) {
225 /* Invalidate it if the cl_lock is being revoked.
226 * This piece of code is definitely needed for RHEL5,
227 * otherwise, SIGBUS will be wrongly returned to
229 write_one_page(vmpage, 1);
231 if (vmpage->mapping != NULL) {
232 ll_invalidate_page(vmpage);
233 LASSERT(vmpage->mapping == NULL);
236 } else if (!PageDirty(vmpage)) {
237 /* race, the page has been cleaned by ptlrpcd after
238 * it was unlocked, it has to be added into dirty
239 * cache again otherwise this soon-to-dirty page won't
240 * consume any grants, even worse if this page is being
241 * transferred because it will break RPC checksum.
245 CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has "
246 "been written out, retry.\n",
247 vmpage, vmpage->index);
257 cl_env_nested_put(&nest, env);
259 CDEBUG(D_MMAP, "%s mkwrite with %d\n", cfs_current()->comm, result);
261 LASSERT(ergo(result == 0, PageLocked(vmpage)));
266 #ifndef HAVE_VM_OP_FAULT
268 * Lustre implementation of a vm_operations_struct::nopage() method, called by
269 * VM to server page fault (both in kernel and user space).
271 * This function sets up CIT_FAULT cl_io that does the job.
273 * \param vma - is virtiual area struct related to page fault
274 * \param address - address when hit fault
275 * \param type - of fault
277 * \return allocated and filled _unlocked_ page for address
278 * \retval NOPAGE_SIGBUS if page not exist on this address
279 * \retval NOPAGE_OOM not have memory for allocate new page
281 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
285 struct cl_env_nest nest;
287 struct page *page = NOPAGE_SIGBUS;
288 struct vvp_io *vio = NULL;
289 unsigned long ra_flags;
292 const unsigned long writable = VM_SHARED|VM_WRITE;
296 pg_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
297 io = ll_fault_io_init(vma, &env, &nest, pg_offset, &ra_flags);
299 return NOPAGE_SIGBUS;
301 result = io->ci_result;
305 io->u.ci_fault.ft_writable = (vma->vm_flags&writable) == writable;
307 vio = vvp_env_io(env);
308 vio->u.fault.ft_vma = vma;
309 vio->u.fault.nopage.ft_address = address;
310 vio->u.fault.nopage.ft_type = type;
311 vio->u.fault.ft_vmpage = NULL;
313 set = cfs_block_sigsinv(sigmask(SIGKILL)|sigmask(SIGTERM));
314 result = cl_io_loop(env, io);
315 cfs_restore_sigs(set);
317 page = vio->u.fault.ft_vmpage;
318 if (result != 0 && page != NULL) {
319 page_cache_release(page);
320 page = NOPAGE_SIGBUS;
324 if (result == -ENOMEM)
327 vma->vm_flags &= ~VM_RAND_READ;
328 vma->vm_flags |= ra_flags;
331 cl_env_nested_put(&nest, env);
338 static inline int to_fault_error(int result)
342 result = VM_FAULT_LOCKED;
345 result = VM_FAULT_NOPAGE;
348 result = VM_FAULT_OOM;
351 result = VM_FAULT_SIGBUS;
358 * Lustre implementation of a vm_operations_struct::fault() method, called by
359 * VM to server page fault (both in kernel and user space).
361 * \param vma - is virtiual area struct related to page fault
362 * \param vmf - structure which describe type and address where hit fault
364 * \return allocated and filled _locked_ page for address
365 * \retval VM_FAULT_ERROR on general error
366 * \retval NOPAGE_OOM not have memory for allocate new page
368 static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
372 struct vvp_io *vio = NULL;
374 unsigned long ra_flags;
375 struct cl_env_nest nest;
380 io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags);
382 RETURN(to_fault_error(PTR_ERR(io)));
384 result = io->ci_result;
386 vio = vvp_env_io(env);
387 vio->u.fault.ft_vma = vma;
388 vio->u.fault.ft_vmpage = NULL;
389 vio->u.fault.fault.ft_vmf = vmf;
391 result = cl_io_loop(env, io);
393 fault_ret = vio->u.fault.fault.ft_flags;
394 vmpage = vio->u.fault.ft_vmpage;
395 if (result != 0 && vmpage != NULL) {
396 page_cache_release(vmpage);
401 cl_env_nested_put(&nest, env);
403 vma->vm_flags |= ra_flags;
404 if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
405 fault_ret |= to_fault_error(result);
407 CDEBUG(D_MMAP, "%s fault %d/%d\n",
408 cfs_current()->comm, fault_ret, result);
412 static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
415 bool printed = false;
419 /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
420 * so that it can be killed by admin but not cause segfault by
422 set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
425 result = ll_fault0(vma, vmf);
426 LASSERT(!(result & VM_FAULT_LOCKED));
428 struct page *vmpage = vmf->page;
430 /* check if this page has been truncated */
432 if (unlikely(vmpage->mapping == NULL)) { /* unlucky */
434 page_cache_release(vmpage);
437 if (!printed && ++count > 16) {
438 CWARN("the page is under heavy contention,"
439 "maybe your app(%s) needs revising :-)\n",
447 result |= VM_FAULT_LOCKED;
449 cfs_restore_sigs(set);
454 #ifndef HAVE_PGMKWRITE_USE_VMFAULT
455 static int ll_page_mkwrite(struct vm_area_struct *vma, struct page *vmpage)
458 bool printed = false;
464 result = ll_page_mkwrite0(vma, vmpage, &retry);
466 if (!printed && ++count > 16) {
467 CWARN("app(%s): the page %lu of file %lu is under heavy"
469 current->comm, page_index(vmpage),
470 vma->vm_file->f_dentry->d_inode->i_ino);
477 else if (result == -ENODATA)
478 result = 0; /* kernel will know truncate has happened and
484 static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
487 bool printed = false;
493 result = ll_page_mkwrite0(vma, vmf->page, &retry);
495 if (!printed && ++count > 16) {
496 CWARN("app(%s): the page %lu of file %lu is under heavy"
498 current->comm, vmf->pgoff,
499 vma->vm_file->f_dentry->d_inode->i_ino);
506 LASSERT(PageLocked(vmf->page));
507 result = VM_FAULT_LOCKED;
511 result = VM_FAULT_NOPAGE;
514 result = VM_FAULT_OOM;
517 result = VM_FAULT_RETRY;
520 result = VM_FAULT_SIGBUS;
529 * To avoid cancel the locks covering mmapped region for lock cache pressure,
530 * we track the mapped vma count in ccc_object::cob_mmap_cnt.
532 static void ll_vm_open(struct vm_area_struct * vma)
534 struct inode *inode = vma->vm_file->f_dentry->d_inode;
535 struct ccc_object *vob = cl_inode2ccc(inode);
538 LASSERT(vma->vm_file);
539 LASSERT(cfs_atomic_read(&vob->cob_mmap_cnt) >= 0);
540 cfs_atomic_inc(&vob->cob_mmap_cnt);
545 * Dual to ll_vm_open().
547 static void ll_vm_close(struct vm_area_struct *vma)
549 struct inode *inode = vma->vm_file->f_dentry->d_inode;
550 struct ccc_object *vob = cl_inode2ccc(inode);
553 LASSERT(vma->vm_file);
554 cfs_atomic_dec(&vob->cob_mmap_cnt);
555 LASSERT(cfs_atomic_read(&vob->cob_mmap_cnt) >= 0);
559 #ifndef HAVE_VM_OP_FAULT
560 #ifndef HAVE_FILEMAP_POPULATE
561 static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
563 static int ll_populate(struct vm_area_struct *area, unsigned long address,
564 unsigned long len, pgprot_t prot, unsigned long pgoff,
570 /* always set nonblock as true to avoid page read ahead */
571 rc = filemap_populate(area, address, len, prot, pgoff, 1);
576 /* return the user space pointer that maps to a file offset via a vma */
577 static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
579 return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT));
583 /* XXX put nice comment here. talk about __free_pte -> dirty pages and
584 * nopage's reference passing to the pte */
585 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
590 LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
591 if (mapping_mapped(mapping)) {
593 unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1,
594 last - first + 1, 0);
600 static struct vm_operations_struct ll_file_vm_ops = {
601 #ifndef HAVE_VM_OP_FAULT
603 .populate = ll_populate,
607 #ifndef HAVE_PGMKWRITE_COMPACT
608 .page_mkwrite = ll_page_mkwrite,
610 ._pmkw.page_mkwrite = ll_page_mkwrite,
613 .close = ll_vm_close,
616 int ll_file_mmap(struct file *file, struct vm_area_struct * vma)
618 struct inode *inode = file->f_dentry->d_inode;
622 if (ll_file_nolock(file))
625 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
626 rc = generic_file_mmap(file, vma);
628 #if !defined(HAVE_FILEMAP_POPULATE) && !defined(HAVE_VM_OP_FAULT)
629 if (!filemap_populate)
630 filemap_populate = vma->vm_ops->populate;
632 vma->vm_ops = &ll_file_vm_ops;
633 vma->vm_ops->open(vma);
634 /* update the inode's size and mtime */
635 rc = ll_glimpse_size(inode);