struct cl_io *io;
struct cl_fault_io *fio;
int rc;
+
ENTRY;
if (ll_file_nolock(file))
LASSERT(vio->vui_cl.cis_io == io);
- /* mmap lock must be MANDATORY it has to cache
- * pages. */
+ /* mmap lock must be MANDATORY it has to cache pages. */
io->ci_lockreq = CILR_MANDATORY;
vio->vui_fd = fd;
} else {
/* Sharing code of page_mkwrite method for rhel5 and rhel6 */
static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
- bool *retry)
+ bool *retry)
{
struct lu_env *env;
struct cl_io *io;
sigset_t old, new;
struct inode *inode = NULL;
struct ll_inode_info *lli;
+
ENTRY;
LASSERT(vmpage != NULL);
sigprocmask(SIG_SETMASK, &old, NULL);
- if (result == 0) {
- lock_page(vmpage);
- if (vmpage->mapping == NULL) {
- unlock_page(vmpage);
-
- /* page was truncated and lock was cancelled, return
- * ENODATA so that VM_FAULT_NOPAGE will be returned
- * to handle_mm_fault(). */
- if (result == 0)
- result = -ENODATA;
- } else if (!PageDirty(vmpage)) {
- /* race, the page has been cleaned by ptlrpcd after
- * it was unlocked, it has to be added into dirty
- * cache again otherwise this soon-to-dirty page won't
- * consume any grants, even worse if this page is being
- * transferred because it will break RPC checksum.
- */
- unlock_page(vmpage);
-
- CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has "
- "been written out, retry.\n",
- vmpage, vmpage->index);
-
- *retry = true;
- result = -EAGAIN;
- }
+ if (result == 0) {
+ lock_page(vmpage);
+ if (vmpage->mapping == NULL) {
+ unlock_page(vmpage);
+
+ /* page was truncated and lock was cancelled, return
+ * ENODATA so that VM_FAULT_NOPAGE will be returned
+ * to handle_mm_fault().
+ */
+ if (result == 0)
+ result = -ENODATA;
+ } else if (!PageDirty(vmpage)) {
+ /* race, the page has been cleaned by ptlrpcd after
+ * it was unlocked, it has to be added into dirty
+ * cache again otherwise this soon-to-dirty page won't
+ * consume any grants, even worse if this page is being
+ * transferred because it will break RPC checksum.
+ */
+ unlock_page(vmpage);
+
+ CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has been written out, retry.\n",
+ vmpage, vmpage->index);
+
+ *retry = true;
+ result = -EAGAIN;
+ }
if (result == 0)
set_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
- }
- EXIT;
+ }
+ EXIT;
out_io:
cl_io_fini(env, io);
* entering live-lock situation with competitors
*/
if (result == -ENODATA && inode != NULL) {
- CDEBUG(D_MMAP, "delaying new page-fault for inode %p to "
- "prevent live-lock\n", inode);
+ CDEBUG(D_MMAP, "delaying new page-fault for inode %p to prevent live-lock\n",
+ inode);
msleep(10);
}
static inline int to_fault_error(int result)
{
- switch(result) {
+ switch (result) {
case 0:
result = VM_FAULT_LOCKED;
break;
int result = 0;
int fault_ret = 0;
__u16 refcheck;
+
ENTRY;
env = cl_env_get(&refcheck);
* uptodate;
* - If VM_FAULT_RETRY is set, the page existed but failed to
* lock. We will try slow path to avoid loops.
- * - Otherwise, it should try normal fault under DLM lock. */
+ * - Otherwise, it should try normal fault under DLM lock.
+ */
if (!(fault_ret & VM_FAULT_RETRY) &&
!(fault_ret & VM_FAULT_ERROR))
GOTO(out, result = 0);
ll_cl_remove(inode, env);
- /* ft_flags are only valid if we reached
- * the call to filemap_fault */
+ /* ft_flags are only valid if we reached ll_filemap_fault() */
if (vio->u.fault.ft_flags_valid)
fault_ret = vio->u.fault.ft_flags;
put_page(vmpage);
vmf->page = NULL;
}
- }
+ }
cl_io_fini(env, io);
out:
CDEBUG(D_MMAP|D_IOTRACE,
"START file %s:"DFID", vma=%p start=%#lx end=%#lx vm_flags=%#lx idx=%lu\n",
file_dentry(vma->vm_file)->d_name.name,
- PFID(&ll_i2info(file_inode(vma->vm_file))->lli_fid),
- vma, vma->vm_start, vma->vm_end, vma->vm_flags, vmf->page->index);
+ PFID(&ll_i2info(file_inode(vma->vm_file))->lli_fid), vma,
+ vma->vm_start, vma->vm_end, vma->vm_flags, vmf->page->index);
result = pcc_page_mkwrite(vma, vmf, &cached);
if (cached)
* To avoid cancel the locks covering mmapped region for lock cache pressure,
* we track the mapped vma count in vvp_object::vob_mmap_cnt.
*/
-static void ll_vm_open(struct vm_area_struct * vma)
+static void ll_vm_open(struct vm_area_struct *vma)
{
struct inode *inode = file_inode(vma->vm_file);
struct vvp_object *vob = cl_inode2vvp(inode);
.close = ll_vm_close,
};
-int ll_file_mmap(struct file *file, struct vm_area_struct * vma)
+int ll_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct inode *inode = file_inode(file);
ktime_t kstart = ktime_get();