-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2016, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*/
-#ifndef AUTOCONF_INCLUDED
-#include <linux/config.h>
-#endif
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/errno.h>
-#include <linux/smp_lock.h>
#include <linux/unistd.h>
#include <linux/version.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
-#include <linux/smp_lock.h>
#define DEBUG_SUBSYSTEM S_LLITE
-//#include <lustre_mdc.h>
-#include <lustre_lite.h>
#include "llite_internal.h"
-#include <linux/lustre_compat25.h>
+#include <lustre_compat.h>
-#define VMA_DEBUG(vma, fmt, arg...) \
- CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p) " \
- "ino(%lu) iname(%s): " fmt, vma, vma->vm_start, vma->vm_end, \
- vma->vm_pgoff, vma->vm_file->f_dentry->d_inode, \
- vma->vm_file->f_dentry->d_inode->i_ino, \
- vma->vm_file->f_dentry->d_iname, ## arg); \
+static const struct vm_operations_struct ll_file_vm_ops;
-struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
- int *type);
-
-void policy_from_vma(ldlm_policy_data_t *policy,
- struct vm_area_struct *vma, unsigned long addr,
- size_t count)
+void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
+ unsigned long addr, size_t count)
{
- policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
- (vma->vm_pgoff << CFS_PAGE_SHIFT);
- policy->l_extent.end = (policy->l_extent.start + count - 1) |
- ~CFS_PAGE_MASK;
+ policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
+ (vma->vm_pgoff << PAGE_SHIFT);
+ policy->l_extent.end = (policy->l_extent.start + count - 1) |
+ ~PAGE_MASK;
}
-struct vm_area_struct * our_vma(unsigned long addr, size_t count)
+struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
+ size_t count)
{
- struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *ret = NULL;
ENTRY;
- /* No MM (e.g. NFS)? No vmas too. */
- if (!mm)
- RETURN(NULL);
+ /* mmap_sem must have been held by caller. */
+ LASSERT(!down_write_trylock(&mm->mmap_sem));
- spin_lock(&mm->page_table_lock);
for(vma = find_vma(mm, addr);
vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
- if (vma->vm_ops && vma->vm_ops->nopage == ll_nopage &&
+ if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
vma->vm_flags & VM_SHARED) {
ret = vma;
break;
}
}
- spin_unlock(&mm->page_table_lock);
RETURN(ret);
}
/**
- * Lustre implementation of a vm_operations_struct::nopage() method, called by
- * VM to server page fault (both in kernel and user space).
+ * API independent part for page fault initialization.
+ * \param env - corespondent lu_env to processing
+ * \param vma - virtual memory area addressed to page fault
+ * \param index - page index corespondent to fault.
+ * \parm ra_flags - vma readahead flags.
*
- * This function sets up CIT_FAULT cl_io that does the job.
- *
- * \param vma - is virtiual area struct related to page fault
- * \param address - address when hit fault
- * \param type - of fault
- *
- * XXX newer 2.6 kernels provide vm_operations_struct::fault() method with
- * slightly different semantics instead.
- *
- * \return allocated and filled page for address
- * \retval NOPAGE_SIGBUS if page not exist on this address
- * \retval NOPAGE_OOM not have memory for allocate new page
+ * \return error codes from cl_io_init.
*/
-struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
- int *type)
+static struct cl_io *
+ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma,
+ pgoff_t index, unsigned long *ra_flags)
{
- struct file *file = vma->vm_file;
- struct inode *inode = file->f_dentry->d_inode;
- struct lu_env *env;
- struct cl_io *io;
- struct page *page = NULL;
- struct cl_env_nest nest;
- int result;
-
- ENTRY;
+ struct file *file = vma->vm_file;
+ struct inode *inode = file_inode(file);
+ struct cl_io *io;
+ struct cl_fault_io *fio;
+ int rc;
+ ENTRY;
if (ll_file_nolock(file))
RETURN(ERR_PTR(-EOPNOTSUPP));
+restart:
+ io = vvp_env_thread_io(env);
+ io->ci_obj = ll_i2info(inode)->lli_clob;
+ LASSERT(io->ci_obj != NULL);
+
+ fio = &io->u.ci_fault;
+ fio->ft_index = index;
+ fio->ft_executable = vma->vm_flags&VM_EXEC;
+
/*
- * vm_operations_struct::nopage() can be called when lustre IO is
- * already active for the current thread, e.g., when doing read/write
- * against user level buffer mapped from Lustre buffer. To avoid
- * stomping on existing context, optionally force an allocation of a new
- * one.
+ * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
+ * the kernel will not read other pages not covered by ldlm in
+ * filemap_nopage. we do our readahead in ll_readpage.
*/
- env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- pgoff_t pg_offset;
- const unsigned long writable = VM_SHARED|VM_WRITE;
- unsigned long ra_flags;
- struct cl_fault_io *fio;
-
- io = &ccc_env_info(env)->cti_io;
- memset(io, 0, sizeof(*io));
- io->ci_obj = ll_i2info(inode)->lli_clob;
- LASSERT(io->ci_obj != NULL);
-
- fio = &io->u.ci_fault;
- pg_offset = (address - vma->vm_start) >> PAGE_SHIFT;
- fio->ft_index = pg_offset + vma->vm_pgoff;
- fio->ft_writable = (vma->vm_flags&writable) == writable;
- fio->ft_executable = vma->vm_flags&VM_EXEC;
-
- /*
- * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
- * the kernel will not read other pages not covered by ldlm in
- * filemap_nopage. we do our readahead in ll_readpage.
- */
- ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
- vma->vm_flags &= ~VM_SEQ_READ;
- vma->vm_flags |= VM_RAND_READ;
-
- CDEBUG(D_INFO, "vm_flags: %lx (%lu %i %i)\n", vma->vm_flags,
- fio->ft_index, fio->ft_writable, fio->ft_executable);
-
- if (cl_io_init(env, io, CIT_FAULT, io->ci_obj) == 0) {
- struct vvp_io *vio = vvp_env_io(env);
- struct ccc_io *cio = ccc_env_io(env);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
-
- LASSERT(cio->cui_cl.cis_io == io);
-
- /* mmap lock must be MANDATORY. */
- io->ci_lockreq = CILR_MANDATORY;
- vio->u.fault.ft_vma = vma;
- vio->u.fault.ft_address = address;
- vio->u.fault.ft_type = type;
- cio->cui_fd = fd;
-
- result = cl_io_loop(env, io);
- if (result == 0) {
- LASSERT(fio->ft_page != NULL);
- page = cl_page_vmpage(env, fio->ft_page);
- } else if (result == -EFAULT) {
- page = NOPAGE_SIGBUS;
- } else if (result == -ENOMEM) {
- page = NOPAGE_OOM;
- }
- } else
- result = io->ci_result;
+ if (ra_flags != NULL)
+ *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
+ vma->vm_flags &= ~VM_SEQ_READ;
+ vma->vm_flags |= VM_RAND_READ;
+
+ CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
+ fio->ft_index, fio->ft_executable);
+
+ rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
+ if (rc == 0) {
+ struct vvp_io *vio = vvp_env_io(env);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+
+ LASSERT(vio->vui_cl.cis_io == io);
+
+ /* mmap lock must be MANDATORY it has to cache
+ * pages. */
+ io->ci_lockreq = CILR_MANDATORY;
+ vio->vui_fd = fd;
+ } else {
+ LASSERT(rc < 0);
+ cl_io_fini(env, io);
+ if (io->ci_need_restart)
+ goto restart;
+
+ io = ERR_PTR(rc);
+ }
+
+ RETURN(io);
+}
- vma->vm_flags &= ~VM_RAND_READ;
- vma->vm_flags |= ra_flags;
+/* Sharing code of page_mkwrite method for rhel5 and rhel6 */
+static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
+ bool *retry)
+{
+ struct lu_env *env;
+ struct cl_io *io;
+ struct vvp_io *vio;
+ int result;
+ __u16 refcheck;
+ sigset_t set;
+ struct inode *inode;
+ struct ll_inode_info *lli;
+ ENTRY;
+
+ LASSERT(vmpage != NULL);
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+
+ io = ll_fault_io_init(env, vma, vmpage->index, NULL);
+ if (IS_ERR(io))
+ GOTO(out, result = PTR_ERR(io));
+
+ result = io->ci_result;
+ if (result < 0)
+ GOTO(out_io, result);
+
+ io->u.ci_fault.ft_mkwrite = 1;
+ io->u.ci_fault.ft_writable = 1;
+
+ vio = vvp_env_io(env);
+ vio->u.fault.ft_vma = vma;
+ vio->u.fault.ft_vmpage = vmpage;
+
+ set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
+
+ inode = vvp_object_inode(io->ci_obj);
+ lli = ll_i2info(inode);
+
+ result = cl_io_loop(env, io);
+
+ cfs_restore_sigs(set);
+
+ if (result == 0) {
+ lock_page(vmpage);
+ if (vmpage->mapping == NULL) {
+ unlock_page(vmpage);
+
+ /* page was truncated and lock was cancelled, return
+ * ENODATA so that VM_FAULT_NOPAGE will be returned
+ * to handle_mm_fault(). */
+ if (result == 0)
+ result = -ENODATA;
+ } else if (!PageDirty(vmpage)) {
+ /* race, the page has been cleaned by ptlrpcd after
+ * it was unlocked, it has to be added into dirty
+ * cache again otherwise this soon-to-dirty page won't
+ * consume any grants, even worse if this page is being
+ * transferred because it will break RPC checksum.
+ */
+ unlock_page(vmpage);
+
+ CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has "
+ "been written out, retry.\n",
+ vmpage, vmpage->index);
+
+ *retry = true;
+ result = -EAGAIN;
+ }
- cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
+ if (result == 0)
+ ll_file_set_flag(lli, LLIF_DATA_MODIFIED);
}
- RETURN(page);
+ EXIT;
+
+out_io:
+ cl_io_fini(env, io);
+out:
+ cl_env_put(env, &refcheck);
+ CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
+ LASSERT(ergo(result == 0, PageLocked(vmpage)));
+
+ return result;
+}
+
+static inline int to_fault_error(int result)
+{
+ switch(result) {
+ case 0:
+ result = VM_FAULT_LOCKED;
+ break;
+ case -EFAULT:
+ result = VM_FAULT_NOPAGE;
+ break;
+ case -ENOMEM:
+ result = VM_FAULT_OOM;
+ break;
+ default:
+ result = VM_FAULT_SIGBUS;
+ break;
+ }
+ return result;
}
/**
- * To avoid cancel the locks covering mmapped region for lock cache pressure,
- * we track the mapped vma count in ccc_object::cob_mmap_cnt.
+ * Lustre implementation of a vm_operations_struct::fault() method, called by
+ * VM to server page fault (both in kernel and user space).
+ *
+ * \param vma - is virtiual area struct related to page fault
+ * \param vmf - structure which describe type and address where hit fault
+ *
+ * \return allocated and filled _locked_ page for address
+ * \retval VM_FAULT_ERROR on general error
+ * \retval NOPAGE_OOM not have memory for allocate new page
*/
-static void ll_vm_open(struct vm_area_struct * vma)
+static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- struct inode *inode = vma->vm_file->f_dentry->d_inode;
- struct ccc_object *vob = cl_inode2ccc(inode);
+ struct lu_env *env;
+ struct cl_io *io;
+ struct vvp_io *vio = NULL;
+ struct page *vmpage;
+ unsigned long ra_flags;
+ int result = 0;
+ int fault_ret = 0;
+ __u16 refcheck;
+ ENTRY;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+
+ if (ll_sbi_has_fast_read(ll_i2sbi(file_inode(vma->vm_file)))) {
+ /* do fast fault */
+ ll_cl_add(vma->vm_file, env, NULL, LCC_MMAP);
+ fault_ret = ll_filemap_fault(vma, vmf);
+ ll_cl_remove(vma->vm_file, env);
+
+ /* - If there is no error, then the page was found in cache and
+ * uptodate;
+ * - If VM_FAULT_RETRY is set, the page existed but failed to
+ * lock. It will return to kernel and retry;
+ * - Otherwise, it should try normal fault under DLM lock. */
+ if ((fault_ret & VM_FAULT_RETRY) ||
+ !(fault_ret & VM_FAULT_ERROR))
+ GOTO(out, result = 0);
+
+ fault_ret = 0;
+ }
+
+ io = ll_fault_io_init(env, vma, vmf->pgoff, &ra_flags);
+ if (IS_ERR(io))
+ GOTO(out, result = PTR_ERR(io));
+
+ result = io->ci_result;
+ if (result == 0) {
+ vio = vvp_env_io(env);
+ vio->u.fault.ft_vma = vma;
+ vio->u.fault.ft_vmpage = NULL;
+ vio->u.fault.ft_vmf = vmf;
+ vio->u.fault.ft_flags = 0;
+ vio->u.fault.ft_flags_valid = 0;
+
+ /* May call ll_readpage() */
+ ll_cl_add(vma->vm_file, env, io, LCC_MMAP);
+
+ result = cl_io_loop(env, io);
+
+ ll_cl_remove(vma->vm_file, env);
+
+ /* ft_flags are only valid if we reached
+ * the call to filemap_fault */
+ if (vio->u.fault.ft_flags_valid)
+ fault_ret = vio->u.fault.ft_flags;
+
+ vmpage = vio->u.fault.ft_vmpage;
+ if (result != 0 && vmpage != NULL) {
+ put_page(vmpage);
+ vmf->page = NULL;
+ }
+ }
+ cl_io_fini(env, io);
- ENTRY;
- LASSERT(vma->vm_file);
- LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
- atomic_inc(&vob->cob_mmap_cnt);
- EXIT;
+ vma->vm_flags |= ra_flags;
+
+out:
+ cl_env_put(env, &refcheck);
+ if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
+ fault_ret |= to_fault_error(result);
+
+ CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result);
+ RETURN(fault_ret);
}
-/**
- * Dual to ll_vm_open().
- */
-static void ll_vm_close(struct vm_area_struct *vma)
+#ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
+static int ll_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+#else
+static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- struct inode *inode = vma->vm_file->f_dentry->d_inode;
- struct ccc_object *vob = cl_inode2ccc(inode);
+#endif
+ int count = 0;
+ bool printed = false;
+ int result;
+ sigset_t set;
+
+ /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
+ * so that it can be killed by admin but not cause segfault by
+ * other signals. */
+ set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
+
+ ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
+ LPROC_LL_FAULT, 1);
+
+restart:
+ result = ll_fault0(vma, vmf);
+ if (!(result & (VM_FAULT_RETRY | VM_FAULT_ERROR | VM_FAULT_LOCKED))) {
+ struct page *vmpage = vmf->page;
+
+ /* check if this page has been truncated */
+ lock_page(vmpage);
+ if (unlikely(vmpage->mapping == NULL)) { /* unlucky */
+ unlock_page(vmpage);
+ put_page(vmpage);
+ vmf->page = NULL;
+
+ if (!printed && ++count > 16) {
+ CWARN("the page is under heavy contention,"
+ "maybe your app(%s) needs revising :-)\n",
+ current->comm);
+ printed = true;
+ }
- ENTRY;
- LASSERT(vma->vm_file);
- atomic_dec(&vob->cob_mmap_cnt);
- LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
- EXIT;
+ goto restart;
+ }
+
+ result |= VM_FAULT_LOCKED;
+ }
+ cfs_restore_sigs(set);
+ return result;
}
-#ifndef HAVE_FILEMAP_POPULATE
-static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
+#ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
+static int ll_page_mkwrite(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+#else
+static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
#endif
-static int ll_populate(struct vm_area_struct *area, unsigned long address,
- unsigned long len, pgprot_t prot, unsigned long pgoff,
- int nonblock)
+ int count = 0;
+ bool printed = false;
+ bool retry;
+ int result;
+
+ ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
+ LPROC_LL_MKWRITE, 1);
+
+ file_update_time(vma->vm_file);
+ do {
+ retry = false;
+ result = ll_page_mkwrite0(vma, vmf->page, &retry);
+
+ if (!printed && ++count > 16) {
+ const struct dentry *de = file_dentry(vma->vm_file);
+
+ CWARN("app(%s): the page %lu of file "DFID" is under"
+ " heavy contention\n",
+ current->comm, vmf->pgoff,
+ PFID(ll_inode2fid(de->d_inode)));
+ printed = true;
+ }
+ } while (retry);
+
+ switch(result) {
+ case 0:
+ LASSERT(PageLocked(vmf->page));
+ result = VM_FAULT_LOCKED;
+ break;
+ case -ENODATA:
+ case -EFAULT:
+ result = VM_FAULT_NOPAGE;
+ break;
+ case -ENOMEM:
+ result = VM_FAULT_OOM;
+ break;
+ case -EAGAIN:
+ result = VM_FAULT_RETRY;
+ break;
+ default:
+ result = VM_FAULT_SIGBUS;
+ break;
+ }
+
+ return result;
+}
+
+/**
+ * To avoid cancel the locks covering mmapped region for lock cache pressure,
+ * we track the mapped vma count in vvp_object::vob_mmap_cnt.
+ */
+static void ll_vm_open(struct vm_area_struct * vma)
{
- int rc = 0;
- ENTRY;
+ struct inode *inode = file_inode(vma->vm_file);
+ struct vvp_object *vob = cl_inode2vvp(inode);
- /* always set nonblock as true to avoid page read ahead */
- rc = filemap_populate(area, address, len, prot, pgoff, 1);
- RETURN(rc);
+ ENTRY;
+ LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
+ atomic_inc(&vob->vob_mmap_cnt);
+ EXIT;
}
-/* return the user space pointer that maps to a file offset via a vma */
-static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
+/**
+ * Dual to ll_vm_open().
+ */
+static void ll_vm_close(struct vm_area_struct *vma)
{
- return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT));
+ struct inode *inode = file_inode(vma->vm_file);
+ struct vvp_object *vob = cl_inode2vvp(inode);
+ ENTRY;
+ atomic_dec(&vob->vob_mmap_cnt);
+ LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
+ EXIT;
}
/* XXX put nice comment here. talk about __free_pte -> dirty pages and
int rc = -ENOENT;
ENTRY;
- LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
+ LASSERTF(last > first, "last %llu first %llu\n", last, first);
if (mapping_mapped(mapping)) {
rc = 0;
- unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1,
+ unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
last - first + 1, 0);
}
RETURN(rc);
}
-static struct vm_operations_struct ll_file_vm_ops = {
- .nopage = ll_nopage,
- .open = ll_vm_open,
- .close = ll_vm_close,
- .populate = ll_populate,
+static const struct vm_operations_struct ll_file_vm_ops = {
+ .fault = ll_fault,
+ .page_mkwrite = ll_page_mkwrite,
+ .open = ll_vm_open,
+ .close = ll_vm_close,
};
int ll_file_mmap(struct file *file, struct vm_area_struct * vma)
{
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
int rc;
ENTRY;
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
rc = generic_file_mmap(file, vma);
if (rc == 0) {
-#if !defined(HAVE_FILEMAP_POPULATE)
- if (!filemap_populate)
- filemap_populate = vma->vm_ops->populate;
-#endif
vma->vm_ops = &ll_file_vm_ops;
vma->vm_ops->open(vma);
/* update the inode's size and mtime */
- rc = cl_glimpse_size(inode);
+ rc = ll_glimpse_size(inode);
}
RETURN(rc);