4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 #include <linux/kernel.h>
39 #include <linux/string.h>
40 #include <linux/stat.h>
41 #include <linux/errno.h>
42 #include <linux/unistd.h>
43 #include <linux/version.h>
44 #include <asm/uaccess.h>
47 #include <linux/stat.h>
48 #include <asm/uaccess.h>
50 #include <linux/pagemap.h>
52 #define DEBUG_SUBSYSTEM S_LLITE
54 #include <lustre_lite.h>
55 #include "llite_internal.h"
56 #include <linux/lustre_compat25.h>
58 static const struct vm_operations_struct ll_file_vm_ops;
60 void policy_from_vma(ldlm_policy_data_t *policy,
61 struct vm_area_struct *vma, unsigned long addr,
64 policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
65 (vma->vm_pgoff << PAGE_CACHE_SHIFT);
66 policy->l_extent.end = (policy->l_extent.start + count - 1) |
70 struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
73 struct vm_area_struct *vma, *ret = NULL;
76 /* mmap_sem must have been held by caller. */
77 LASSERT(!down_write_trylock(&mm->mmap_sem));
79 for(vma = find_vma(mm, addr);
80 vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
81 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
82 vma->vm_flags & VM_SHARED) {
91 * API independent part for page fault initialization.
92 * \param vma - virtual memory area addressed to page fault
93 * \param env - corespondent lu_env to processing
94 * \param nest - nested level
95 * \param index - page index corespondent to fault.
96 * \parm ra_flags - vma readahead flags.
98 * \return allocated and initialized env for fault operation.
99 * \retval EINVAL if env can't allocated
100 * \return other error codes from cl_io_init.
102 static struct cl_io *
103 ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
104 struct cl_env_nest *nest, pgoff_t index,
105 unsigned long *ra_flags)
107 struct file *file = vma->vm_file;
108 struct inode *inode = file->f_dentry->d_inode;
110 struct cl_fault_io *fio;
116 if (ll_file_nolock(file))
117 RETURN(ERR_PTR(-EOPNOTSUPP));
120 * page fault can be called when lustre IO is
121 * already active for the current thread, e.g., when doing read/write
122 * against user level buffer mapped from Lustre buffer. To avoid
123 * stomping on existing context, optionally force an allocation of a new
126 env = cl_env_nested_get(nest);
128 RETURN(ERR_PTR(-EINVAL));
132 io = ccc_env_thread_io(env);
133 io->ci_obj = ll_i2info(inode)->lli_clob;
134 LASSERT(io->ci_obj != NULL);
136 fio = &io->u.ci_fault;
137 fio->ft_index = index;
138 fio->ft_executable = vma->vm_flags&VM_EXEC;
141 * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
142 * the kernel will not read other pages not covered by ldlm in
143 * filemap_nopage. we do our readahead in ll_readpage.
145 if (ra_flags != NULL)
146 *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
147 vma->vm_flags &= ~VM_SEQ_READ;
148 vma->vm_flags |= VM_RAND_READ;
150 CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
151 fio->ft_index, fio->ft_executable);
153 rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
155 struct ccc_io *cio = ccc_env_io(env);
156 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
158 LASSERT(cio->cui_cl.cis_io == io);
160 /* mmap lock must be MANDATORY it has to cache
162 io->ci_lockreq = CILR_MANDATORY;
167 cl_env_nested_put(nest, env);
174 /* Sharing code of page_mkwrite method for rhel5 and rhel6 */
175 static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
181 struct cl_env_nest nest;
185 struct ll_inode_info *lli;
188 LASSERT(vmpage != NULL);
190 io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL);
192 GOTO(out, result = PTR_ERR(io));
194 result = io->ci_result;
196 GOTO(out_io, result);
198 io->u.ci_fault.ft_mkwrite = 1;
199 io->u.ci_fault.ft_writable = 1;
201 vio = vvp_env_io(env);
202 vio->u.fault.ft_vma = vma;
203 vio->u.fault.ft_vmpage = vmpage;
205 set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
207 /* we grab lli_trunc_sem to exclude truncate case.
208 * Otherwise, we could add dirty pages into osc cache
209 * while truncate is on-going. */
210 inode = ccc_object_inode(io->ci_obj);
211 lli = ll_i2info(inode);
212 down_read(&lli->lli_trunc_sem);
214 result = cl_io_loop(env, io);
216 up_read(&lli->lli_trunc_sem);
218 cfs_restore_sigs(set);
221 struct inode *inode = vma->vm_file->f_dentry->d_inode;
222 struct ll_inode_info *lli = ll_i2info(inode);
225 if (vmpage->mapping == NULL) {
228 /* page was truncated and lock was cancelled, return
229 * ENODATA so that VM_FAULT_NOPAGE will be returned
230 * to handle_mm_fault(). */
233 } else if (!PageDirty(vmpage)) {
234 /* race, the page has been cleaned by ptlrpcd after
235 * it was unlocked, it has to be added into dirty
236 * cache again otherwise this soon-to-dirty page won't
237 * consume any grants, even worse if this page is being
238 * transferred because it will break RPC checksum.
242 CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has "
243 "been written out, retry.\n",
244 vmpage, vmpage->index);
251 spin_lock(&lli->lli_lock);
252 lli->lli_flags |= LLIF_DATA_MODIFIED;
253 spin_unlock(&lli->lli_lock);
260 cl_env_nested_put(&nest, env);
262 CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
263 LASSERT(ergo(result == 0, PageLocked(vmpage)));
268 static inline int to_fault_error(int result)
272 result = VM_FAULT_LOCKED;
275 result = VM_FAULT_NOPAGE;
278 result = VM_FAULT_OOM;
281 result = VM_FAULT_SIGBUS;
288 * Lustre implementation of a vm_operations_struct::fault() method, called by
289 * VM to server page fault (both in kernel and user space).
291 * \param vma - is virtiual area struct related to page fault
292 * \param vmf - structure which describe type and address where hit fault
294 * \return allocated and filled _locked_ page for address
295 * \retval VM_FAULT_ERROR on general error
296 * \retval NOPAGE_OOM not have memory for allocate new page
298 static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
302 struct vvp_io *vio = NULL;
304 unsigned long ra_flags;
305 struct cl_env_nest nest;
310 io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags);
312 RETURN(to_fault_error(PTR_ERR(io)));
314 result = io->ci_result;
316 vio = vvp_env_io(env);
317 vio->u.fault.ft_vma = vma;
318 vio->u.fault.ft_vmpage = NULL;
319 vio->u.fault.fault.ft_vmf = vmf;
320 vio->u.fault.fault.ft_flags = 0;
321 vio->u.fault.fault.ft_flags_valid = 0;
323 /* May call ll_readpage() */
324 ll_cl_add(vma->vm_file, env, io);
326 result = cl_io_loop(env, io);
328 ll_cl_remove(vma->vm_file, env);
330 /* ft_flags are only valid if we reached
331 * the call to filemap_fault */
332 if (vio->u.fault.fault.ft_flags_valid)
333 fault_ret = vio->u.fault.fault.ft_flags;
335 vmpage = vio->u.fault.ft_vmpage;
336 if (result != 0 && vmpage != NULL) {
337 page_cache_release(vmpage);
342 cl_env_nested_put(&nest, env);
344 vma->vm_flags |= ra_flags;
345 if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
346 fault_ret |= to_fault_error(result);
348 CDEBUG(D_MMAP, "%s fault %d/%d\n",
349 current->comm, fault_ret, result);
353 static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
356 bool printed = false;
360 /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
361 * so that it can be killed by admin but not cause segfault by
363 set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
366 result = ll_fault0(vma, vmf);
367 LASSERT(!(result & VM_FAULT_LOCKED));
369 struct page *vmpage = vmf->page;
371 /* check if this page has been truncated */
373 if (unlikely(vmpage->mapping == NULL)) { /* unlucky */
375 page_cache_release(vmpage);
378 if (!printed && ++count > 16) {
379 CWARN("the page is under heavy contention,"
380 "maybe your app(%s) needs revising :-)\n",
388 result |= VM_FAULT_LOCKED;
390 cfs_restore_sigs(set);
394 static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
397 bool printed = false;
403 result = ll_page_mkwrite0(vma, vmf->page, &retry);
405 if (!printed && ++count > 16) {
406 CWARN("app(%s): the page %lu of file "DFID" is under"
407 " heavy contention\n",
408 current->comm, vmf->pgoff,
409 PFID(ll_inode2fid(vma->vm_file->f_dentry->d_inode)));
416 LASSERT(PageLocked(vmf->page));
417 result = VM_FAULT_LOCKED;
421 result = VM_FAULT_NOPAGE;
424 result = VM_FAULT_OOM;
427 result = VM_FAULT_RETRY;
430 result = VM_FAULT_SIGBUS;
438 * To avoid cancel the locks covering mmapped region for lock cache pressure,
439 * we track the mapped vma count in ccc_object::cob_mmap_cnt.
441 static void ll_vm_open(struct vm_area_struct * vma)
443 struct inode *inode = vma->vm_file->f_dentry->d_inode;
444 struct ccc_object *vob = cl_inode2ccc(inode);
447 LASSERT(vma->vm_file);
448 LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
449 atomic_inc(&vob->cob_mmap_cnt);
454 * Dual to ll_vm_open().
456 static void ll_vm_close(struct vm_area_struct *vma)
458 struct inode *inode = vma->vm_file->f_dentry->d_inode;
459 struct ccc_object *vob = cl_inode2ccc(inode);
462 LASSERT(vma->vm_file);
463 atomic_dec(&vob->cob_mmap_cnt);
464 LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
468 /* XXX put nice comment here. talk about __free_pte -> dirty pages and
469 * nopage's reference passing to the pte */
470 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
475 LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
476 if (mapping_mapped(mapping)) {
478 unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1,
479 last - first + 1, 0);
485 static const struct vm_operations_struct ll_file_vm_ops = {
487 .page_mkwrite = ll_page_mkwrite,
489 .close = ll_vm_close,
492 int ll_file_mmap(struct file *file, struct vm_area_struct * vma)
494 struct inode *inode = file->f_dentry->d_inode;
498 if (ll_file_nolock(file))
501 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
502 rc = generic_file_mmap(file, vma);
504 vma->vm_ops = &ll_file_vm_ops;
505 vma->vm_ops->open(vma);
506 /* update the inode's size and mtime */
507 rc = ll_glimpse_size(inode);