size_t size,
__u64 valid);
-int ll_init_security(struct dentry *dentry,
- struct inode *inode,
- struct inode *dir);
+int ll_dentry_init_security(struct dentry *dentry, int mode, struct qstr *name,
+ const char **secctx_name, void **secctx,
+ __u32 *secctx_size);
+int ll_inode_init_security(struct dentry *dentry, struct inode *inode,
+ struct inode *dir);
/*
* Locking to guarantee consistency of non-atomic updates to long long i_size,
}
/* default to about 64M of readahead on a given system. */
-#define SBI_DEFAULT_READAHEAD_MAX (64UL << (20 - PAGE_CACHE_SHIFT))
+#define SBI_DEFAULT_READAHEAD_MAX (64UL << (20 - PAGE_SHIFT))
/* default to read-ahead full files smaller than 2MB on the second read */
-#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT))
+#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_SHIFT))
enum ra_stat {
RA_STAT_HIT = 0,
#define LL_SBI_ALWAYS_PING 0x200000 /* always ping even if server
* suppress_pings */
#define LL_SBI_FAST_READ 0x400000 /* fast read support */
+#define LL_SBI_FILE_SECCTX 0x800000 /* set file security context at create */
#define LL_SBI_FLAGS { \
"nolck", \
"norootsquash", \
"always_ping", \
"fast_read", \
+ "file_secctx", \
}
/* This is embedded into llite super-blocks to keep track of connect
struct list_head fd_lccs; /* list of ll_cl_context */
};
-extern spinlock_t inode_lock;
-
extern struct proc_dir_entry *proc_lustre_fs_root;
static inline struct inode *ll_info2i(struct ll_inode_info *lli)
#if BITS_PER_LONG == 32
return 1;
#elif defined(CONFIG_COMPAT)
- return unlikely(is_compat_task() || (sbi->ll_flags & LL_SBI_32BIT_API));
+ return unlikely(in_compat_syscall() || (sbi->ll_flags & LL_SBI_32BIT_API));
#else
return unlikely(sbi->ll_flags & LL_SBI_32BIT_API);
#endif
static inline void ll_invalidate_page(struct page *vmpage)
{
struct address_space *mapping = vmpage->mapping;
- loff_t offset = vmpage->index << PAGE_CACHE_SHIFT;
+ loff_t offset = vmpage->index << PAGE_SHIFT;
LASSERT(PageLocked(vmpage));
if (mapping == NULL)
* truncate_complete_page() calls
* a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
*/
- ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE);
+ ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE);
truncate_complete_page(mapping, vmpage);
}
if (it->it_remote_lock_mode) {
handle.cookie = it->it_remote_lock_handle;
CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID
- "(%p) for remote lock "LPX64"\n",
+ "(%p) for remote lock %#llx\n",
PFID(ll_inode2fid(inode)), inode,
handle.cookie);
md_set_lock_data(exp, &handle, inode, NULL);
handle.cookie = it->it_lock_handle;
CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"(%p)"
- " for lock "LPX64"\n",
+ " for lock %#llx\n",
PFID(ll_inode2fid(inode)), inode, handle.cookie);
md_set_lock_data(exp, &handle, inode, &it->it_lock_bits);
}
#endif
+#ifndef HAVE_IS_SXID
+static inline bool is_sxid(umode_t mode)
+{
+ return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
+}
+#endif
+
+#ifndef IS_NOSEC
+#define IS_NOSEC(inode) (!is_sxid(inode->i_mode))
+#endif
+
#ifndef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
{
}
static inline ssize_t
-generic_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+__generic_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
{
struct iovec iov;
struct iov_iter i;