described in CVE-2007-4573. This problem could allow untrusted
local users to gain root access.
+Severity : normal
+Bugzilla : 13568
+Description: Remove 2.4 kernel checks lustre 1.8
+Details : Remove 2.5.0 check from dcache.c dir.c file.c llite_internal.h
+ llite_lib.c llite_mmap.c lproc_llite.c namei.c rw.c symlink.c
+ llite subsystem.
+
--------------------------------------------------------------------------------
2007-08-10 Cluster File Systems, Inc. <info@clusterfs.com>
* sys_getcwd() could return -ENOENT -bzzz */
#ifdef LUSTRE_KERNEL_VERSION
dentry->d_flags |= DCACHE_LUSTRE_INVALID;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- __d_drop(dentry);
- if (dentry->d_inode) {
- /* Put positive dentries to orphan list */
- list_add(&dentry->d_hash,
- &ll_i2sbi(dentry->d_inode)->ll_orphan_dentry_list);
- }
-#else
- if (!dentry->d_inode || !S_ISDIR(dentry->d_inode->i_mode))
- __d_drop(dentry);
#endif
-#else
if (!dentry->d_inode || !S_ISDIR(dentry->d_inode->i_mode))
__d_drop(dentry);
-#endif
}
unlock_dentry(dentry);
/* drop lookup or getattr locks immediately */
if (it->it_op == IT_LOOKUP || it->it_op == IT_GETATTR) {
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
/* on 2.6 there are situation when several lookups and
* revalidations may be requested during single operation.
* therefore, we don't release intent here -bzzz */
ll_intent_drop_lock(it);
-#else
- ll_intent_release(it);
-#endif
}
}
void ll_frob_intent(struct lookup_intent **itp, struct lookup_intent *deft)
{
struct lookup_intent *it = *itp;
-#if defined(LUSTRE_KERNEL_VERSION)&&(LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+#if defined(LUSTRE_KERNEL_VERSION)
if (it) {
LASSERTF(it->it_magic == INTENT_MAGIC,
"%p has bad intent magic: %x\n",
return;
}
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
#ifdef LUSTRE_KERNEL_VERSION
static int ll_revalidate_nd(struct dentry *dentry, struct nameidata *nd)
{
RETURN(rc);
}
#endif
-#endif
struct dentry_operations ll_d_ops = {
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
.d_revalidate = ll_revalidate_nd,
-#else
- .d_revalidate_it = ll_revalidate_it,
-#endif
.d_release = ll_release,
.d_delete = ll_ddelete,
#ifdef LUSTRE_KERNEL_VERSION
#include <linux/version.h>
#include <linux/smp_lock.h>
#include <asm/uaccess.h>
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-# include <linux/locks.h> // for wait_on_buffer
-#else
-# include <linux/buffer_head.h> // for wait_on_buffer
-#endif
+#include <linux/buffer_head.h> // for wait_on_buffer
#define DEBUG_SUBSYSTEM S_LLITE
#include <lustre_mdc.h>
#include <linux/pagemap.h>
#include <linux/file.h>
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-#include <linux/lustre_compat25.h>
-#endif
#include "llite_internal.h"
/* also used by llite/special.c:ll_special_open() */
inode->i_ino, chunk, *ppos, i_size_read(inode));
/* turn off the kernel's read-ahead */
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- file->f_ramax = 0;
-#else
file->f_ra.ra_pages = 0;
-#endif
+
/* initialize read-ahead window once per syscall */
if (ra == 0) {
ra = 1;
/*
* Send file content (through pagecache) somewhere with helper
*/
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
static ssize_t ll_file_sendfile(struct file *in_file, loff_t *ppos,size_t count,
read_actor_t actor, void *target)
{
ll_tree_unlock(&tree);
RETURN(retval);
}
-#endif
static int ll_lov_recreate_obj(struct inode *inode, struct file *file,
unsigned long arg)
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%s\n",
inode->i_ino, inode->i_generation, inode, dentry->d_name.name);
-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0))
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_REVALIDATE, 1);
-#endif
exp = ll_i2mdexp(inode);
return rc;
}
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
int ll_getattr_it(struct vfsmount *mnt, struct dentry *de,
struct lookup_intent *it, struct kstat *stat)
{
return ll_getattr_it(mnt, de, &it, stat);
}
-#endif
static
int lustre_check_acl(struct inode *inode, int mask)
return generic_permission(inode, mask, lustre_check_acl);
}
#else
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
int ll_inode_permission(struct inode *inode, int mask, struct nameidata *nd)
-#else
-int ll_inode_permission(struct inode *inode, int mask)
-#endif
{
int mode = inode->i_mode;
int rc;
.release = ll_file_release,
.mmap = ll_file_mmap,
.llseek = ll_file_seek,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
.sendfile = ll_file_sendfile,
-#endif
.fsync = ll_fsync,
};
.release = ll_file_release,
.mmap = ll_file_mmap,
.llseek = ll_file_seek,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
.sendfile = ll_file_sendfile,
-#endif
.fsync = ll_fsync,
#ifdef HAVE_F_OP_FLOCK
.flock = ll_file_flock,
.release = ll_file_release,
.mmap = ll_file_mmap,
.llseek = ll_file_seek,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
.sendfile = ll_file_sendfile,
-#endif
.fsync = ll_fsync,
#ifdef HAVE_F_OP_FLOCK
.flock = ll_file_noflock,
#endif
.setattr = ll_setattr,
.truncate = ll_truncate,
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
.getattr = ll_getattr,
-#else
- .revalidate_it = ll_inode_revalidate_it,
-#endif
.permission = ll_inode_permission,
.setxattr = ll_setxattr,
.getxattr = ll_getxattr,
#ifdef LUSTRE_KERNEL_VERSION
static inline struct lookup_intent *ll_nd2it(struct nameidata *nd)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
return &nd->intent;
-#else
- return nd->intent;
-#endif
}
#endif
struct obd_client_handle *lli_mds_exec_och;
__u64 lli_open_fd_exec_count;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
struct inode lli_vfs_inode;
-#endif
/* identifying fields for both metadata and data stacks. */
struct lu_fid lli_fid;
// static inline struct ll_inode_info *LL_I(struct inode *inode)
static inline struct ll_inode_info *ll_i2info(struct inode *inode)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
return container_of(inode, struct ll_inode_info, lli_vfs_inode);
-#else
- CLASSERT(sizeof(inode->u) >= sizeof(struct ll_inode_info));
- return (struct ll_inode_info *)&(inode->u.generic_ip);
-#endif
}
/* default to about 40meg of readahead on a given system. That much tied
static inline struct inode *ll_info2i(struct ll_inode_info *lli)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
return &lli->lli_vfs_inode;
-#else
- return list_entry(lli, struct inode, u.generic_ip);
-#endif
}
struct it_cb_data {
struct lustre_handle *fh);
extern void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid, struct file
*file, size_t count, int rw);
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
int ll_getattr_it(struct vfsmount *mnt, struct dentry *de,
struct lookup_intent *it, struct kstat *stat);
int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat);
-#endif
struct ll_file_data *ll_file_data_get(void);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
int ll_inode_permission(struct inode *inode, int mask, struct nameidata *nd);
-#else
-int ll_inode_permission(struct inode *inode, int mask);
-#endif
int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
int flags, struct lov_user_md *lum,
int lum_size);
int ll_close_thread_start(struct ll_close_queue **lcq_ret);
/* llite/llite_mmap.c */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
typedef struct rb_root rb_root_t;
typedef struct rb_node rb_node_t;
-#endif
struct ll_lock_tree_node;
struct ll_lock_tree {
#define ll_s2sbi(sb) (s2lsi(sb)->lsi_llsbi)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
void __d_rehash(struct dentry * entry, int lock);
static inline __u64 ll_ts2u64(struct timespec *time)
{
__u64 t = time->tv_sec;
return t;
}
-#else /* 2.4 here */
-static inline __u64 ll_ts2u64(time_t *time)
-{
- return *time;
-}
-#endif
/* don't need an addref as the sb_info should be holding one */
static inline struct obd_export *ll_s2dtexp(struct super_block *sb)
sbi->ll_root_fid = rootfid;
sb->s_op = &lustre_super_operations;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
sb->s_export_op = &lustre_export_operations;
-#endif
/* make root inode
* XXX: move this to after cbd setup? */
err = obd_set_info_async(sbi->ll_dt_exp, strlen("checksum"),"checksum",
sizeof(checksum), &checksum, NULL);
- /* making vm readahead 0 for 2.4.x. In the case of 2.6.x,
- backing dev info assigned to inode mapping is used for
- determining maximal readahead. */
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) && \
- !defined(KERNEL_HAS_AS_MAX_READAHEAD)
- /* bug 2805 - set VM readahead to zero */
- vm_max_readahead = vm_min_readahead = 0;
-#endif
-
sb->s_root = d_alloc_root(root);
if (data != NULL)
OBD_FREE(data, sizeof(*data));
}
}
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-void lustre_throw_orphan_dentries(struct super_block *sb)
-{
- struct dentry *dentry, *next;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
-
- /* Do this to get rid of orphaned dentries. That is not really trw. */
- list_for_each_entry_safe(dentry, next, &sbi->ll_orphan_dentry_list,
- d_hash) {
- CWARN("found orphan dentry %.*s (%p->%p) at unmount, dumping "
- "before and after shrink_dcache_parent\n",
- dentry->d_name.len, dentry->d_name.name, dentry, next);
- lustre_dump_dentry(dentry, 1);
- shrink_dcache_parent(dentry);
- lustre_dump_dentry(dentry, 1);
- }
-}
-#else
-#define lustre_throw_orphan_dentries(sb)
-#endif
-
#ifdef HAVE_EXPORT___IGET
static void prune_dir_dentries(struct inode *inode)
{
UP_WRITE_I_ALLOC_SEM(inode);
rc = ll_extent_lock(NULL, inode, lsm, LCK_PW, &policy, &lockh,
ast_flags);
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- DOWN_WRITE_I_ALLOC_SEM(inode);
- LOCK_INODE_MUTEX(inode);
-#else
LOCK_INODE_MUTEX(inode);
DOWN_WRITE_I_ALLOC_SEM(inode);
-#endif
+
if (rc != 0)
GOTO(out, rc);
if (body->valid & OBD_MD_FLNLINK)
inode->i_nlink = body->nlink;
if (body->valid & OBD_MD_FLRDEV)
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- inode->i_rdev = body->rdev;
-#else
inode->i_rdev = old_decode_dev(body->rdev);
-#endif
if (body->valid & OBD_MD_FLSIZE) {
if (ll_i2mdexp(inode)->exp_connect_flags & OBD_CONNECT_SOM) {
if (lli->lli_flags & (LLIF_DONE_WRITING |
}
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
static struct backing_dev_info ll_backing_dev_info = {
.ra_pages = 0, /* No readahead */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12))
.memory_backed = 0, /* Does contribute to dirty memory */
#endif
};
-#endif
void ll_read_inode2(struct inode *inode, void *opaque)
{
} else {
inode->i_op = &ll_special_inode_operations;
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
init_special_inode(inode, inode->i_mode,
kdev_t_to_nr(inode->i_rdev));
/* initializing backing dev info. */
inode->i_mapping->backing_dev_info = &ll_backing_dev_info;
-#else
- init_special_inode(inode, inode->i_mode, inode->i_rdev);
-#endif
+
EXIT;
}
}
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/smp_lock.h>
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-#include <linux/iobuf.h>
-#endif
#define DEBUG_SUBSYSTEM S_LLITE
int lt_get_mmap_locks(struct ll_lock_tree *tree,
unsigned long addr, size_t count);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
int *type);
-#else
-
-struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
- int unused);
-#endif
struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start,
__u64 end, ldlm_mode_t mode)
*
* In 2.6, the truncate_count of address_space can cover this race.
*/
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
int *type)
-#else
-struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
- int type /* unused */)
-#endif
{
struct file *filp = vma->vm_file;
struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
}
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
#ifndef HAVE_FILEMAP_POPULATE
static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
#endif
rc = filemap_populate(area, address, len, prot, pgoff, 1);
RETURN(rc);
}
-#endif
/* return the user space pointer that maps to a file offset via a vma */
static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
}
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-/* [first, last] are the byte offsets affected.
- * vm_{start, end} are user addresses of the first byte of the mapping and
- * the next byte beyond it
- * vm_pgoff is the page index of the first byte in the mapping */
-static void teardown_vmas(struct vm_area_struct *vma, __u64 first,
- __u64 last)
-{
- unsigned long address, len;
- for (; vma ; vma = vma->vm_next_share) {
- if (last >> CFS_PAGE_SHIFT < vma->vm_pgoff)
- continue;
- if (first >> CFS_PAGE_SHIFT >= (vma->vm_pgoff +
- ((vma->vm_end - vma->vm_start) >> CFS_PAGE_SHIFT)))
- continue;
-
- /* XXX in case of unmap the cow pages of a running file,
- * don't unmap these private writeable mapping here!
- * though that will break private mappping a little.
- *
- * the clean way is to check the mapping of every page
- * and just unmap the non-cow pages, just like
- * unmap_mapping_range() with even_cow=0 in kernel 2.6.
- */
- if (!(vma->vm_flags & VM_SHARED) &&
- (vma->vm_flags & VM_WRITE))
- continue;
-
- address = max((unsigned long)vma->vm_start,
- file_to_user(vma, first));
- len = min((unsigned long)vma->vm_end,
- file_to_user(vma, last) + 1) - address;
-
- VMA_DEBUG(vma, "zapping vma [first="LPU64" last="LPU64" "
- "address=%ld len=%ld]\n", first, last, address, len);
- LASSERT(len > 0);
- ll_zap_page_range(vma, address, len);
- }
-}
-#endif
-
/* XXX put nice comment here. talk about __free_pte -> dirty pages and
* nopage's reference passing to the pte */
int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
ENTRY;
LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
if (mapping_mapped(mapping)) {
rc = 0;
unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1,
last - first + 1, 0);
}
-#else
- spin_lock(&mapping->i_shared_lock);
- if (mapping->i_mmap != NULL) {
- rc = 0;
- teardown_vmas(mapping->i_mmap, first, last);
- }
- if (mapping->i_mmap_shared != NULL) {
- rc = 0;
- teardown_vmas(mapping->i_mmap_shared, first, last);
- }
- spin_unlock(&mapping->i_shared_lock);
-#endif
+
RETURN(rc);
}
.nopage = ll_nopage,
.open = ll_vm_open,
.close = ll_vm_close,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
.populate = ll_populate,
-#endif
};
int ll_file_mmap(struct file * file, struct vm_area_struct * vma)
ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode), LPROC_LL_MAP, 1);
rc = generic_file_mmap(file, vma);
if (rc == 0) {
-#if !defined(HAVE_FILEMAP_POPULATE) && \
- (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
+#if !defined(HAVE_FILEMAP_POPULATE)
if (!filemap_populate)
filemap_populate = vma->vm_ops->populate;
#endif
{ LPROC_LL_SETATTR, LPROCFS_TYPE_REGS, "setattr" },
{ LPROC_LL_TRUNC, LPROCFS_TYPE_REGS, "truncate" },
{ LPROC_LL_FLOCK, LPROCFS_TYPE_REGS, "flock" },
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
{ LPROC_LL_GETATTR, LPROCFS_TYPE_REGS, "getattr" },
-#else
- { LPROC_LL_REVALIDATE, LPROCFS_TYPE_REGS, "getattr" },
-#endif
/* special inode operation */
{ LPROC_LL_STAFS, LPROCFS_TYPE_REGS, "statfs" },
{ LPROC_LL_ALLOC_INODE, LPROCFS_TYPE_REGS, "alloc_inode" },
* Get an inode by inode number (already instantiated by the intent lookup).
* Returns inode or NULL
*/
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
struct inode *ll_iget(struct super_block *sb, ino_t hash,
struct lustre_md *md)
{
return inode;
}
-#else
-struct inode *ll_iget(struct super_block *sb, ino_t hash,
- struct lustre_md *md)
-{
- struct inode *inode;
- LASSERT(hash != 0);
-
- inode = iget4(sb, hash, NULL, md);
- if (inode) {
- if (!(inode->i_state & (I_FREEING | I_CLEAR)))
- ll_update_inode(inode, md);
-
- CDEBUG(D_VFSTRACE, "inode: %lu/%u(%p)\n",
- inode->i_ino, inode->i_generation, inode);
- }
- return inode;
-}
-#endif
static void ll_drop_negative_dentry(struct inode *dir)
{
return retval;
}
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
#ifdef LUSTRE_KERNEL_VERSION
static struct dentry *ll_lookup_nd(struct inode *parent, struct dentry *dentry,
struct nameidata *nd)
RETURN(de);
}
#endif
-#endif
/* We depend on "mode" being set with the proper file type/umask by now */
static struct inode *ll_create_node(struct inode *dir, const char *name,
RETURN(err);
}
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
#ifndef LUSTRE_KERNEL_VERSION
static int ll_create_nd(struct inode *dir, struct dentry *dentry,
int mode, struct nameidata *nd)
return ll_create_it(dir, dentry, mode, &nd->intent);
}
#endif
-#endif
static int ll_symlink_generic(struct inode *dir, struct qstr *name,
const char *tgt, struct dentry *dchild)
old_encode_dev(rdev), dchild);
}
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
static int ll_unlink(struct inode * dir, struct dentry *dentry)
{
return ll_unlink_generic(dir, NULL, dentry, &dentry->d_name);
new_dir, NULL, new_dentry,
&new_dentry->d_name);
}
-#endif
struct inode_operations ll_dir_inode_operations = {
#ifdef LUSTRE_KERNEL_VERSION
.setattr_raw = ll_setattr_raw,
#endif
.mknod = ll_mknod,
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- .create_it = ll_create_it,
- .lookup_it = ll_lookup_it,
- .revalidate_it = ll_inode_revalidate_it,
-#else
.lookup = ll_lookup_nd,
.create = ll_create_nd,
/* We need all these non-raw things for NFSD, to not patch it. */
.rename = ll_rename,
.setattr = ll_setattr,
.getattr = ll_getattr,
-#endif
.permission = ll_inode_permission,
.setxattr = ll_setxattr,
.getxattr = ll_getxattr,
.setattr_raw = ll_setattr_raw,
#endif
.setattr = ll_setattr,
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
.getattr = ll_getattr,
-#else
- .revalidate_it = ll_inode_revalidate_it,
-#endif
.permission = ll_inode_permission,
.setxattr = ll_setxattr,
.getxattr = ll_getxattr,
* we got the page cache list we'd create a lock inversion
* with the removepage path which gets the page lock then the
* cli lock */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
- clear_page_dirty(page);
-#else
LASSERTF(!PageWriteback(page),"cmd %x page %p ino %lu index %lu\n", cmd, page,
page->mapping->host->i_ino, page->index);
clear_page_dirty_for_io(page);
/* This actually clears the dirty bit in the radix tree.*/
set_page_writeback(page);
-#endif
LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
page_cache_get(page);
llap->llap_defer_uptodate = 0;
}
SetPageError(page);
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
if (rc == -ENOSPC)
set_bit(AS_ENOSPC, &page->mapping->flags);
else
set_bit(AS_EIO, &page->mapping->flags);
-#else
- page->mapping->gfp_mask |= AS_EIO_MASK;
-#endif
}
unlock_page(page);
#ifdef HAVE_COOKIE_FOLLOW_LINK
.put_link = ll_put_link,
#endif
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- .revalidate_it = ll_inode_revalidate_it,
-#else
.getattr = ll_getattr,
-#endif
.permission = ll_inode_permission,
.setxattr = ll_setxattr,
.getxattr = ll_getxattr,