1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 #ifndef AUTOCONF_INCLUDED
38 #include <linux/config.h>
40 #include <linux/kernel.h>
42 #include <linux/string.h>
43 #include <linux/stat.h>
44 #include <linux/errno.h>
45 #include <linux/smp_lock.h>
46 #include <linux/unistd.h>
47 #include <linux/version.h>
48 #include <asm/system.h>
49 #include <asm/uaccess.h>
52 #include <linux/stat.h>
53 #include <asm/uaccess.h>
55 #include <linux/pagemap.h>
56 #include <linux/smp_lock.h>
58 #define DEBUG_SUBSYSTEM S_LLITE
60 //#include <lustre_mdc.h>
61 #include <lustre_lite.h>
62 #include "llite_internal.h"
63 #include <linux/lustre_compat25.h>
65 #define VMA_DEBUG(vma, fmt, arg...) \
66 CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p) " \
67 "ino(%lu) iname(%s): " fmt, vma, vma->vm_start, vma->vm_end, \
68 vma->vm_pgoff, vma->vm_file->f_dentry->d_inode, \
69 vma->vm_file->f_dentry->d_inode->i_ino, \
70 vma->vm_file->f_dentry->d_iname, ## arg); \
72 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
75 static struct vm_operations_struct ll_file_vm_ops;
77 void policy_from_vma(ldlm_policy_data_t *policy,
78 struct vm_area_struct *vma, unsigned long addr,
81 policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
82 (vma->vm_pgoff << CFS_PAGE_SHIFT);
83 policy->l_extent.end = (policy->l_extent.start + count - 1) |
87 struct vm_area_struct * our_vma(unsigned long addr, size_t count)
89 struct mm_struct *mm = current->mm;
90 struct vm_area_struct *vma, *ret = NULL;
93 /* No MM (e.g. NFS)? No vmas too. */
97 spin_lock(&mm->page_table_lock);
98 for(vma = find_vma(mm, addr);
99 vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
100 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
101 vma->vm_flags & VM_SHARED) {
106 spin_unlock(&mm->page_table_lock);
111 * API independent part for page fault initialization.
112 * \param vma - virtual memory area addressed to page fault
113 * \param env - corespondent lu_env to processing
114 * \param nest - nested level
115 * \param index - page index corespondent to fault.
116 * \parm ra_flags - vma readahead flags.
118 * \return allocated and initialized env for fault operation.
119 * \retval EINVAL if env can't allocated
120 * \return other error codes from cl_io_init.
122 int ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
123 struct cl_env_nest *nest, pgoff_t index, unsigned long *ra_flags)
125 struct file *file = vma->vm_file;
126 struct inode *inode = file->f_dentry->d_inode;
127 const unsigned long writable = VM_SHARED|VM_WRITE;
129 struct cl_fault_io *fio;
133 if (ll_file_nolock(file))
137 * page fault can be called when lustre IO is
138 * already active for the current thread, e.g., when doing read/write
139 * against user level buffer mapped from Lustre buffer. To avoid
140 * stomping on existing context, optionally force an allocation of a new
143 env = cl_env_nested_get(nest);
151 io = &ccc_env_info(env)->cti_io;
152 io->ci_obj = ll_i2info(inode)->lli_clob;
153 LASSERT(io->ci_obj != NULL);
155 fio = &io->u.ci_fault;
156 fio->ft_index = index;
157 fio->ft_writable = (vma->vm_flags&writable) == writable;
158 fio->ft_executable = vma->vm_flags&VM_EXEC;
161 * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
162 * the kernel will not read other pages not covered by ldlm in
163 * filemap_nopage. we do our readahead in ll_readpage.
165 *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
166 vma->vm_flags &= ~VM_SEQ_READ;
167 vma->vm_flags |= VM_RAND_READ;
169 CDEBUG(D_INFO, "vm_flags: %lx (%lu %i %i)\n", vma->vm_flags,
170 fio->ft_index, fio->ft_writable, fio->ft_executable);
172 if (cl_io_init(env, io, CIT_FAULT, io->ci_obj) == 0) {
173 struct ccc_io *cio = ccc_env_io(env);
174 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
176 LASSERT(cio->cui_cl.cis_io == io);
178 /* mmap lock must be MANDATORY
179 * it has to cache pages. */
180 io->ci_lockreq = CILR_MANDATORY;
185 return io->ci_result;
188 #ifndef HAVE_VM_OP_FAULT
190 * Lustre implementation of a vm_operations_struct::nopage() method, called by
191 * VM to server page fault (both in kernel and user space).
193 * This function sets up CIT_FAULT cl_io that does the job.
195 * \param vma - is virtiual area struct related to page fault
196 * \param address - address when hit fault
197 * \param type - of fault
199 * \return allocated and filled _unlocked_ page for address
200 * \retval NOPAGE_SIGBUS if page not exist on this address
201 * \retval NOPAGE_OOM not have memory for allocate new page
203 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
207 struct cl_env_nest nest;
209 struct page *page = NOPAGE_SIGBUS;
210 struct vvp_io *vio = NULL;
211 unsigned long ra_flags;
216 pg_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
217 result = ll_fault_io_init(vma, &env, &nest, pg_offset, &ra_flags);
219 return NOPAGE_SIGBUS;
221 io = &ccc_env_info(env)->cti_io;
225 vio = vvp_env_io(env);
227 vio->u.fault.ft_vma = vma;
228 vio->u.fault.nopage.ft_address = address;
229 vio->u.fault.nopage.ft_type = type;
231 result = cl_io_loop(env, io);
235 LASSERT(io->u.ci_fault.ft_page != NULL);
236 page = vio->u.fault.ft_vmpage;
238 if (result == -ENOMEM)
242 vma->vm_flags &= ~VM_RAND_READ;
243 vma->vm_flags |= ra_flags;
246 cl_env_nested_put(&nest, env);
252 * Lustre implementation of a vm_operations_struct::fault() method, called by
253 * VM to server page fault (both in kernel and user space).
255 * \param vma - is virtiual area struct related to page fault
256 * \param vmf - structure which describe type and address where hit fault
258 * \return allocated and filled _locked_ page for address
259 * \retval VM_FAULT_ERROR on general error
260 * \retval NOPAGE_OOM not have memory for allocate new page
262 int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
266 struct vvp_io *vio = NULL;
267 unsigned long ra_flags;
268 struct cl_env_nest nest;
273 result = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags);
275 RETURN(VM_FAULT_ERROR);
277 io = &ccc_env_info(env)->cti_io;
281 vio = vvp_env_io(env);
283 vio->u.fault.ft_vma = vma;
284 vio->u.fault.fault.ft_vmf = vmf;
286 result = cl_io_loop(env, io);
287 fault_ret = vio->u.fault.fault.ft_flags;
291 fault_ret |= VM_FAULT_ERROR;
293 vma->vm_flags |= ra_flags;
296 cl_env_nested_put(&nest, env);
304 * To avoid cancel the locks covering mmapped region for lock cache pressure,
305 * we track the mapped vma count in ccc_object::cob_mmap_cnt.
307 static void ll_vm_open(struct vm_area_struct * vma)
309 struct inode *inode = vma->vm_file->f_dentry->d_inode;
310 struct ccc_object *vob = cl_inode2ccc(inode);
313 LASSERT(vma->vm_file);
314 LASSERT(cfs_atomic_read(&vob->cob_mmap_cnt) >= 0);
315 cfs_atomic_inc(&vob->cob_mmap_cnt);
320 * Dual to ll_vm_open().
322 static void ll_vm_close(struct vm_area_struct *vma)
324 struct inode *inode = vma->vm_file->f_dentry->d_inode;
325 struct ccc_object *vob = cl_inode2ccc(inode);
328 LASSERT(vma->vm_file);
329 cfs_atomic_dec(&vob->cob_mmap_cnt);
330 LASSERT(cfs_atomic_read(&vob->cob_mmap_cnt) >= 0);
334 #ifndef HAVE_VM_OP_FAULT
335 #ifndef HAVE_FILEMAP_POPULATE
336 static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
338 static int ll_populate(struct vm_area_struct *area, unsigned long address,
339 unsigned long len, pgprot_t prot, unsigned long pgoff,
345 /* always set nonblock as true to avoid page read ahead */
346 rc = filemap_populate(area, address, len, prot, pgoff, 1);
351 /* return the user space pointer that maps to a file offset via a vma */
352 static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
354 return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT));
358 /* XXX put nice comment here. talk about __free_pte -> dirty pages and
359 * nopage's reference passing to the pte */
360 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
365 LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
366 if (mapping_mapped(mapping)) {
368 unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1,
369 last - first + 1, 0);
375 static struct vm_operations_struct ll_file_vm_ops = {
376 #ifndef HAVE_VM_OP_FAULT
378 .populate = ll_populate,
384 .close = ll_vm_close,
387 int ll_file_mmap(struct file *file, struct vm_area_struct * vma)
389 struct inode *inode = file->f_dentry->d_inode;
393 if (ll_file_nolock(file))
396 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
397 rc = generic_file_mmap(file, vma);
399 #if !defined(HAVE_FILEMAP_POPULATE) && !defined(HAVE_VM_OP_FAULT)
400 if (!filemap_populate)
401 filemap_populate = vma->vm_ops->populate;
403 vma->vm_ops = &ll_file_vm_ops;
404 vma->vm_ops->open(vma);
405 /* update the inode's size and mtime */
406 rc = cl_glimpse_size(inode);