#define LL_MAX_BLKSIZE_BITS 22
#define LL_IT2STR(it) ((it) ? ldlm_it2str((it)->it_op) : "0")
-#define LUSTRE_FPRIVATE(file) ((file)->private_data)
struct ll_dentry_data {
struct lookup_intent *lld_it;
unsigned long lg_gid;
};
+/* See comment on trunc_sem_down_read_nowait */
+struct ll_trunc_sem {
+ /* when positive, this is a count of readers, when -1, it indicates
+ * the semaphore is held for write, and 0 is unlocked
+ */
+ atomic_t ll_trunc_readers;
+ /* this tracks a count of waiting writers */
+ atomic_t ll_trunc_waiters;
+};
+
struct ll_inode_info {
__u32 lli_inode_magic;
spinlock_t lli_lock;
s64 lli_ctime;
spinlock_t lli_agl_lock;
- /* update atime from MDS no matter if it's older than
- * local inode atime. */
- unsigned int lli_update_atime:1;
-
/* Try to make the d::member and f::member are aligned. Before using
* these members, make clear whether it is directory or not. */
union {
struct {
struct mutex lli_size_mutex;
char *lli_symlink_name;
- /*
- * struct rw_semaphore {
- * signed long count; // align d.d_def_acl
- * spinlock_t wait_lock; // align d.d_sa_lock
- * struct list_head wait_list;
- * }
- */
- struct rw_semaphore lli_trunc_sem;
+ struct ll_trunc_sem lli_trunc_sem;
struct range_lock_tree lli_write_tree;
struct rw_semaphore lli_glimpse_sem;
struct mutex lli_pcc_lock;
enum lu_pcc_state_flags lli_pcc_state;
+ /*
+ * @lli_pcc_generation saves the gobal PCC generation
+ * when the file was successfully attached into PCC.
+ * The flags of the PCC dataset are saved in
+ * @lli_pcc_dsflags.
+ * The gobal PCC generation will be increased when add
+ * or delete a PCC backend, or change the configuration
+ * parameters for PCC.
+ * If @lli_pcc_generation is same as the gobal PCC
+ * generation, we can use the saved flags of the PCC
+ * dataset to determine whether need to try auto attach
+ * safely.
+ */
+ __u64 lli_pcc_generation;
+ enum pcc_dataset_flags lli_pcc_dsflags;
struct pcc_inode *lli_pcc_inode;
- struct mutex lli_group_mutex;
- __u64 lli_group_users;
- unsigned long lli_group_gid;
+
+ struct mutex lli_group_mutex;
+ __u64 lli_group_users;
+ unsigned long lli_group_gid;
};
};
struct list_head lli_xattrs; /* ll_xattr_entry->xe_list */
};
+static inline void ll_trunc_sem_init(struct ll_trunc_sem *sem)
+{
+ atomic_set(&sem->ll_trunc_readers, 0);
+ atomic_set(&sem->ll_trunc_waiters, 0);
+}
+
+/* This version of down read ignores waiting writers, meaning if the semaphore
+ * is already held for read, this down_read will 'join' that reader and also
+ * take the semaphore.
+ *
+ * This lets us avoid an unusual deadlock.
+ *
+ * We must take lli_trunc_sem in read mode on entry in to various i/o paths
+ * in Lustre, in order to exclude truncates. Some of these paths then need to
+ * take the mmap_sem, while still holding the trunc_sem. The problem is that
+ * page faults hold the mmap_sem when calling in to Lustre, and then must also
+ * take the trunc_sem to exclude truncate.
+ *
+ * This means the locking order for trunc_sem and mmap_sem is sometimes AB,
+ * sometimes BA. This is almost OK because in both cases, we take the trunc
+ * sem for read, so it doesn't block.
+ *
+ * However, if a write mode user (truncate, a setattr op) arrives in the
+ * middle of this, the second reader on the truncate_sem will wait behind that
+ * writer.
+ *
+ * So we have, on our truncate sem, in order (where 'reader' and 'writer' refer
+ * to the mode in which they take the semaphore):
+ * reader (holding mmap_sem, needs truncate_sem)
+ * writer
+ * reader (holding truncate sem, waiting for mmap_sem)
+ *
+ * And so the readers deadlock.
+ *
+ * The solution is this modified semaphore, where this down_read ignores
+ * waiting write operations, and all waiters are woken up at once, so readers
+ * using down_read_nowait cannot get stuck behind waiting writers, regardless
+ * of the order they arrived in.
+ *
+ * down_read_nowait is only used in the page fault case, where we already hold
+ * the mmap_sem. This is because otherwise repeated read and write operations
+ * (which take the truncate sem) could prevent a truncate from ever starting.
+ * This could still happen with page faults, but without an even more complex
+ * mechanism, this is unavoidable.
+ *
+ * LU-12460
+ */
+static inline void trunc_sem_down_read_nowait(struct ll_trunc_sem *sem)
+{
+ wait_var_event(&sem->ll_trunc_readers,
+ atomic_inc_unless_negative(&sem->ll_trunc_readers));
+}
+
+static inline void trunc_sem_down_read(struct ll_trunc_sem *sem)
+{
+ wait_var_event(&sem->ll_trunc_readers,
+ atomic_read(&sem->ll_trunc_waiters) == 0 &&
+ atomic_inc_unless_negative(&sem->ll_trunc_readers));
+}
+
+static inline void trunc_sem_up_read(struct ll_trunc_sem *sem)
+{
+ if (atomic_dec_return(&sem->ll_trunc_readers) == 0 &&
+ atomic_read(&sem->ll_trunc_waiters))
+ wake_up_var(&sem->ll_trunc_readers);
+}
+
+static inline void trunc_sem_down_write(struct ll_trunc_sem *sem)
+{
+ atomic_inc(&sem->ll_trunc_waiters);
+ wait_var_event(&sem->ll_trunc_readers,
+ atomic_cmpxchg(&sem->ll_trunc_readers, 0, -1) == 0);
+ atomic_dec(&sem->ll_trunc_waiters);
+}
+
+static inline void trunc_sem_up_write(struct ll_trunc_sem *sem)
+{
+ atomic_set(&sem->ll_trunc_readers, 0);
+ wake_up_var(&sem->ll_trunc_readers);
+}
+
static inline __u32 ll_layout_version_get(struct ll_inode_info *lli)
{
__u32 gen;
LLIF_XATTR_CACHE = 2,
/* Project inherit */
LLIF_PROJECT_INHERIT = 3,
+ /* update atime from MDS even if it's older than local inode atime. */
+ LLIF_UPDATE_ATIME = 4,
+
};
static inline void ll_file_set_flag(struct ll_inode_info *lli,
* counted by page index.
*/
struct ra_io_arg {
- pgoff_t ria_start; /* start offset of read-ahead*/
- pgoff_t ria_end; /* end offset of read-ahead*/
- unsigned long ria_reserved; /* reserved pages for read-ahead */
- pgoff_t ria_end_min; /* minimum end to cover current read */
- bool ria_eof; /* reach end of file */
- /* If stride read pattern is detected, ria_stoff means where
- * stride read is started. Note: for normal read-ahead, the
+ pgoff_t ria_start_idx; /* start offset of read-ahead*/
+ pgoff_t ria_end_idx; /* end offset of read-ahead*/
+ unsigned long ria_reserved; /* reserved pages for read-ahead */
+ pgoff_t ria_end_idx_min;/* minimum end to cover current read */
+ bool ria_eof; /* reach end of file */
+ /* If stride read pattern is detected, ria_stoff is the byte offset
+ * where stride read is started. Note: for normal read-ahead, the
* value here is meaningless, and also it will not be accessed*/
- unsigned long ria_stoff;
+ loff_t ria_stoff;
/* ria_length and ria_bytes are the length and pages length in the
* stride I/O mode. And they will also be used to check whether
* it is stride I/O read-ahead in the read-ahead pages*/
- unsigned long ria_length;
- unsigned long ria_bytes;
+ loff_t ria_length;
+ loff_t ria_bytes;
};
/* LL_HIST_MAX=32 causes an overflow */
* per file-descriptor read-ahead data.
*/
struct ll_readahead_state {
- spinlock_t ras_lock;
+ spinlock_t ras_lock;
/* End byte that read(2) try to read. */
- unsigned long ras_last_read_end;
+ loff_t ras_last_read_end_bytes;
/*
* number of bytes read after last read-ahead window reset. As window
* is reset on each seek, this is effectively a number of consecutive
* case, it probably doesn't make sense to expand window to
* PTLRPC_MAX_BRW_PAGES on the third access.
*/
- unsigned long ras_consecutive_bytes;
+ loff_t ras_consecutive_bytes;
/*
* number of read requests after the last read-ahead window reset
* As window is reset on each seek, this is effectively the number
* on consecutive read request and is used to trigger read-ahead.
*/
- unsigned long ras_consecutive_requests;
+ unsigned long ras_consecutive_requests;
/*
* Parameters of current read-ahead window. Handled by
* ras_update(). On the initial access to the file or after a seek,
* expanded to PTLRPC_MAX_BRW_PAGES. Afterwards, window is enlarged by
* PTLRPC_MAX_BRW_PAGES chunks up to ->ra_max_pages.
*/
- pgoff_t ras_window_start, ras_window_len;
+ pgoff_t ras_window_start_idx;
+ pgoff_t ras_window_pages;
/*
- * Optimal RPC size. It decides how many pages will be sent
- * for each read-ahead.
+ * Optimal RPC size in pages.
+ * It decides how many pages will be sent for each read-ahead.
*/
- unsigned long ras_rpc_size;
+ unsigned long ras_rpc_pages;
/*
* Where next read-ahead should start at. This lies within read-ahead
* window. Read-ahead window is read in pieces rather than at once
* ->ra_max_pages (see ll_ra_count_get()), 2. client cannot read pages
* not covered by DLM lock.
*/
- pgoff_t ras_next_readahead;
+ pgoff_t ras_next_readahead_idx;
/*
* Total number of ll_file_read requests issued, reads originating
* due to mmap are not counted in this total. This value is used to
* trigger full file read-ahead after multiple reads to a small file.
*/
- unsigned long ras_requests;
- /*
- * Page index with respect to the current request, these value
- * will not be accurate when dealing with reads issued via mmap.
- */
- unsigned long ras_request_index;
+ unsigned long ras_requests;
/*
* The following 3 items are used for detecting the stride I/O
* mode.
* ras_stride_bytes = stride_bytes;
* Note: all these three items are counted by bytes.
*/
- unsigned long ras_stride_length;
- unsigned long ras_stride_bytes;
- unsigned long ras_stride_offset;
+ loff_t ras_stride_offset;
+ loff_t ras_stride_length;
+ loff_t ras_stride_bytes;
/*
* number of consecutive stride request count, and it is similar as
* ras_consecutive_requests, but used for stride I/O mode.
* Note: only more than 2 consecutive stride request are detected,
* stride read-ahead will be enable
*/
- unsigned long ras_consecutive_stride_requests;
+ unsigned long ras_consecutive_stride_requests;
/* index of the last page that async readahead starts */
- pgoff_t ras_async_last_readpage;
+ pgoff_t ras_async_last_readpage_idx;
+ /* whether we should increase readahead window */
+ bool ras_need_increase_window;
+ /* whether ra miss check should be skipped */
+ bool ras_no_miss_check;
};
struct ll_readahead_work {
/** File to readahead */
struct file *lrw_file;
- /** Start bytes */
- unsigned long lrw_start;
- /** End bytes */
- unsigned long lrw_end;
+ pgoff_t lrw_start_idx;
+ pgoff_t lrw_end_idx;
/* async worker to handler read */
struct work_struct lrw_readahead_work;
return !!(sbi->ll_flags & LL_SBI_FILE_HEAT);
}
-void ll_ras_enter(struct file *f);
+void ll_ras_enter(struct file *f, loff_t pos, size_t count);
/* llite/lcommon_misc.c */
int cl_ocd_update(struct obd_device *host, struct obd_device *watched,
};
/* llite/dir.c */
+enum get_default_layout_type {
+ GET_DEFAULT_LAYOUT_ROOT = 1,
+};
+
struct ll_dir_chain {
};
struct ptlrpc_request **request);
int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
int set_default);
-int ll_dir_getstripe(struct inode *inode, void **lmmp,
- int *lmm_size, struct ptlrpc_request **request,
- u64 valid);
+int ll_dir_getstripe_default(struct inode *inode, void **lmmp,
+ int *lmm_size, struct ptlrpc_request **request,
+ struct ptlrpc_request **root_request, u64 valid);
+int ll_dir_getstripe(struct inode *inode, void **plmm, int *plmm_size,
+ struct ptlrpc_request **request, u64 valid);
int ll_fsync(struct file *file, loff_t start, loff_t end, int data);
int ll_merge_attr(const struct lu_env *env, struct inode *inode);
int ll_fid2path(struct inode *inode, void __user *arg);
extern struct super_operations lustre_super_operations;
void ll_lli_init(struct ll_inode_info *lli);
-int ll_fill_super(struct super_block *sb, struct vfsmount *mnt);
+int ll_fill_super(struct super_block *sb);
void ll_put_super(struct super_block *sb);
void ll_kill_super(struct super_block *sb);
struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock);
static inline int ll_file_nolock(const struct file *file)
{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct inode *inode = file_inode((struct file *)file);
- LASSERT(fd != NULL);
- return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) ||
- (ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK));
+ LASSERT(fd != NULL);
+ return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) ||
+ (ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK));
}
static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
u32 cl_fid_build_gen(const struct lu_fid *fid);
+static inline struct pcc_super *ll_i2pccs(struct inode *inode)
+{
+ return &ll_i2sbi(inode)->ll_pcc_super;
+}
+
+static inline struct pcc_super *ll_info2pccs(struct ll_inode_info *lli)
+{
+ return ll_i2pccs(ll_info2i(lli));
+}
+
#endif /* LLITE_INTERNAL_H */