#include <linux/compat.h>
#include <linux/aio.h>
#include <lustre_compat.h>
+#include <lustre_crypto.h>
#include "vvp_internal.h"
#include "range_lock.h"
#define FMODE_EXEC 0
#endif
-#ifndef VM_FAULT_RETRY
+#ifndef HAVE_VM_FAULT_RETRY
#define VM_FAULT_RETRY 0
#endif
char *lli_symlink_name;
struct ll_trunc_sem lli_trunc_sem;
struct range_lock_tree lli_write_tree;
+ struct mutex lli_setattr_mutex;
struct rw_semaphore lli_glimpse_sem;
ktime_t lli_glimpse_time;
};
};
- /* XXX: For following frequent used members, although they maybe special
- * used for non-directory object, it is some time-wasting to check
- * whether the object is directory or not before using them. On the
- * other hand, currently, sizeof(f) > sizeof(d), it cannot reduce
- * the "ll_inode_info" size even if moving those members into u.f.
- * So keep them out side.
- *
- * In the future, if more members are added only for directory,
- * some of the following members can be moved into u.f.
- */
+ /* XXX: For following frequent used members, although they maybe special
+ * used for non-directory object, it is some time-wasting to check
+ * whether the object is directory or not before using them. On the
+ * other hand, currently, sizeof(f) > sizeof(d), it cannot reduce
+ * the "ll_inode_info" size even if moving those members into u.f.
+ * So keep them out side.
+ *
+ * In the future, if more members are added only for directory,
+ * some of the following members can be moved into u.f.
+ */
struct cl_object *lli_clob;
/* mutex to request for layout lock exclusively. */
wake_up_var(&sem->ll_trunc_readers);
}
+#ifdef CONFIG_LUSTRE_FS_POSIX_ACL
+static inline void lli_clear_acl(struct ll_inode_info *lli)
+{
+ if (lli->lli_posix_acl) {
+ posix_acl_release(lli->lli_posix_acl);
+ lli->lli_posix_acl = NULL;
+ }
+}
+
+static inline void lli_replace_acl(struct ll_inode_info *lli,
+ struct lustre_md *md)
+{
+ spin_lock(&lli->lli_lock);
+ if (lli->lli_posix_acl)
+ posix_acl_release(lli->lli_posix_acl);
+ lli->lli_posix_acl = md->posix_acl;
+ spin_unlock(&lli->lli_lock);
+}
+#else
+static inline void lli_clear_acl(struct ll_inode_info *lli)
+{
+}
+
+static inline void lli_replace_acl(struct ll_inode_info *lli,
+ struct lustre_md *md)
+{
+}
+#endif
+
static inline __u32 ll_layout_version_get(struct ll_inode_info *lli)
{
__u32 gen;
int ll_xattr_cache_destroy(struct inode *inode);
int ll_xattr_cache_get(struct inode *inode,
- const char *name,
- char *buffer,
- size_t size,
- __u64 valid);
+ const char *name,
+ char *buffer,
+ size_t size,
+ __u64 valid);
+
+int ll_xattr_cache_insert(struct inode *inode,
+ const char *name,
+ char *buffer,
+ size_t size);
static inline bool obd_connect_has_secctx(struct obd_connect_data *data)
{
#ifdef CONFIG_SECURITY
return data->ocd_connect_flags & OBD_CONNECT_FLAGS2 &&
- data->ocd_connect_flags2 & OBD_CONNECT2_FILE_SECCTX;
+ data->ocd_connect_flags2 & OBD_CONNECT2_FILE_SECCTX;
#else
return false;
#endif
int ll_listsecurity(struct inode *inode, char *secctx_name,
size_t secctx_name_size);
+static inline bool obd_connect_has_enc(struct obd_connect_data *data)
+{
+#ifdef HAVE_LUSTRE_CRYPTO
+ return data->ocd_connect_flags & OBD_CONNECT_FLAGS2 &&
+ data->ocd_connect_flags2 & OBD_CONNECT2_ENCRYPT;
+#else
+ return false;
+#endif
+}
+
+static inline void obd_connect_set_enc(struct obd_connect_data *data)
+{
+#ifdef HAVE_LUSTRE_CRYPTO
+ data->ocd_connect_flags2 |= OBD_CONNECT2_ENCRYPT;
+#endif
+}
+
/*
* Locking to guarantee consistency of non-atomic updates to long long i_size,
* consistency between file size and KMS.
/* default to use at least 16M for fast read if possible */
#define RA_REMAIN_WINDOW_MIN MiB_TO_PAGES(16UL)
-/* default readahead on a given system. */
-#define SBI_DEFAULT_READ_AHEAD_MAX MiB_TO_PAGES(64UL)
+/* default read-ahead on a given client mountpoint. */
+#define SBI_DEFAULT_READ_AHEAD_MAX MiB_TO_PAGES(1024UL)
+
+/* default read-ahead for a single file descriptor */
+#define SBI_DEFAULT_READ_AHEAD_PER_FILE_MAX MiB_TO_PAGES(256UL)
/* default read-ahead full files smaller than limit on the second read */
#define SBI_DEFAULT_READ_AHEAD_WHOLE_MAX MiB_TO_PAGES(2UL)
2.10, abandoned */
#define LL_SBI_TINY_WRITE 0x2000000 /* tiny write support */
#define LL_SBI_FILE_HEAT 0x4000000 /* file heat support */
+#define LL_SBI_TEST_DUMMY_ENCRYPTION 0x8000000 /* test dummy encryption */
+#define LL_SBI_ENCRYPT 0x10000000 /* client side encryption */
#define LL_SBI_FLAGS { \
"nolck", \
"checksum", \
"pio", \
"tiny_write", \
"file_heat", \
+ "test_dummy_encryption", \
+ "noencrypt", \
}
/* This is embedded into llite super-blocks to keep track of connect
extern const struct dentry_operations ll_d_ops;
void ll_intent_drop_lock(struct lookup_intent *);
void ll_intent_release(struct lookup_intent *);
-void ll_invalidate_aliases(struct inode *);
+void ll_prune_aliases(struct inode *inode);
void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry);
int ll_revalidate_it_finish(struct ptlrpc_request *request,
struct lookup_intent *it, struct dentry *de);
struct lov_user_md **kbuf);
void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req);
-void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req,
- struct lookup_intent *it);
+void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req);
/* Compute expected user md size when passing in a md from user space */
static inline ssize_t ll_lov_user_md_size(const struct lov_user_md *lum)
*/
struct vvp_io_args {
/** normal/sendfile/splice */
- enum vvp_io_subtype via_io_subtype;
-
union {
struct {
struct kiocb *via_iocb;
struct iov_iter *via_iter;
} normal;
- struct {
- struct pipe_inode_info *via_pipe;
- unsigned int via_flags;
- } splice;
} u;
};
return lti;
}
-static inline struct vvp_io_args *ll_env_args(const struct lu_env *env,
- enum vvp_io_subtype type)
+static inline struct vvp_io_args *ll_env_args(const struct lu_env *env)
{
- struct vvp_io_args *via = &ll_env_info(env)->lti_args;
-
- via->via_io_subtype = type;
-
- return via;
+ return &ll_env_info(env)->lti_args;
}
void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot,
/*
* Mark dentry INVALID, if dentry refcount is zero (this is normally case for
- * ll_md_blocking_ast), unhash this dentry, and let dcache to reclaim it later;
- * else dput() of the last refcount will unhash this dentry and kill it.
+ * ll_md_blocking_ast), it will be pruned by ll_prune_aliases() and
+ * ll_prune_negative_children(); otherwise dput() of the last refcount will
+ * unhash this dentry and kill it.
*/
-static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
+static inline void d_lustre_invalidate(struct dentry *dentry)
{
CDEBUG(D_DENTRY, "invalidate dentry %pd (%p) parent %p inode %p refc %d\n",
dentry, dentry,
dentry->d_parent, dentry->d_inode, ll_d_count(dentry));
- spin_lock_nested(&dentry->d_lock,
- nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL);
+ spin_lock(&dentry->d_lock);
__d_lustre_invalidate(dentry);
- /*
- * We should be careful about dentries created by d_obtain_alias().
- * These dentries are not put in the dentry tree, instead they are
- * linked to sb->s_anon through dentry->d_hash.
- * shrink_dcache_for_umount() shrinks the tree and sb->s_anon list.
- * If we unhashed such a dentry, unmount would not be able to find
- * it and busy inodes would be reported.
- */
- if (ll_d_count(dentry) == 0 && !(dentry->d_flags & DCACHE_DISCONNECTED))
- __d_drop(dentry);
spin_unlock(&dentry->d_lock);
}
return ll_i2pccs(ll_info2i(lli));
}
+#ifdef HAVE_LUSTRE_CRYPTO
+/* crypto.c */
+extern const struct llcrypt_operations lustre_cryptops;
+#endif
+
#endif /* LLITE_INTERNAL_H */