X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fllite%2Fllite_mmap.c;h=72acd3dbeab53d365f960a68d55c590d6820c445;hb=4533271278;hp=aea44c7bd3b68b0066535ad088da287f7339c2a6;hpb=9765c6174ef580fb4deef4e7faea6d5ed634b00f;p=fs%2Flustre-release.git diff --git a/lustre/llite/llite_mmap.c b/lustre/llite/llite_mmap.c index aea44c7..72acd3d 100644 --- a/lustre/llite/llite_mmap.c +++ b/lustre/llite/llite_mmap.c @@ -87,46 +87,27 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, /** * API independent part for page fault initialization. - * \param vma - virtual memory area addressed to page fault * \param env - corespondent lu_env to processing - * \param nest - nested level + * \param vma - virtual memory area addressed to page fault * \param index - page index corespondent to fault. * \parm ra_flags - vma readahead flags. * - * \return allocated and initialized env for fault operation. - * \retval EINVAL if env can't allocated - * \return other error codes from cl_io_init. + * \return error codes from cl_io_init. */ static struct cl_io * -ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, - struct cl_env_nest *nest, pgoff_t index, - unsigned long *ra_flags) +ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma, + pgoff_t index, unsigned long *ra_flags) { struct file *file = vma->vm_file; struct inode *inode = file->f_path.dentry->d_inode; struct cl_io *io; struct cl_fault_io *fio; - struct lu_env *env; int rc; ENTRY; - *env_ret = NULL; if (ll_file_nolock(file)) RETURN(ERR_PTR(-EOPNOTSUPP)); - /* - * page fault can be called when lustre IO is - * already active for the current thread, e.g., when doing read/write - * against user level buffer mapped from Lustre buffer. To avoid - * stomping on existing context, optionally force an allocation of a new - * one. - */ - env = cl_env_nested_get(nest); - if (IS_ERR(env)) - RETURN(ERR_PTR(-EINVAL)); - - *env_ret = env; - restart: io = vvp_env_thread_io(env); io->ci_obj = ll_i2info(inode)->lli_clob; @@ -166,7 +147,6 @@ restart: if (io->ci_need_restart) goto restart; - cl_env_nested_put(nest, env); io = ERR_PTR(rc); } @@ -180,16 +160,19 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, struct lu_env *env; struct cl_io *io; struct vvp_io *vio; - struct cl_env_nest nest; int result; + __u16 refcheck; sigset_t set; struct inode *inode; struct ll_inode_info *lli; ENTRY; LASSERT(vmpage != NULL); + env = cl_env_get(&refcheck); + if (IS_ERR(env)) + RETURN(PTR_ERR(env)); - io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL); + io = ll_fault_io_init(env, vma, vmpage->index, NULL); if (IS_ERR(io)) GOTO(out, result = PTR_ERR(io)); @@ -247,8 +230,8 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, out_io: cl_io_fini(env, io); - cl_env_nested_put(&nest, env); out: + cl_env_put(env, &refcheck); CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result); LASSERT(ergo(result == 0, PageLocked(vmpage))); @@ -292,14 +275,18 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) struct vvp_io *vio = NULL; struct page *vmpage; unsigned long ra_flags; - struct cl_env_nest nest; - int result; + int result = 0; int fault_ret = 0; + __u16 refcheck; ENTRY; - io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags); + env = cl_env_get(&refcheck); + if (IS_ERR(env)) + RETURN(PTR_ERR(env)); + + io = ll_fault_io_init(env, vma, vmf->pgoff, &ra_flags); if (IS_ERR(io)) - RETURN(to_fault_error(PTR_ERR(io))); + GOTO(out, result = PTR_ERR(io)); result = io->ci_result; if (result == 0) { @@ -329,14 +316,15 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) } } cl_io_fini(env, io); - cl_env_nested_put(&nest, env); vma->vm_flags |= ra_flags; + +out: + cl_env_put(env, &refcheck); if (result != 0 && !(fault_ret & VM_FAULT_RETRY)) fault_ret |= to_fault_error(result); - CDEBUG(D_MMAP, "%s fault %d/%d\n", - current->comm, fault_ret, result); + CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result); RETURN(fault_ret); }