4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #ifndef LLITE_INTERNAL_H
34 #define LLITE_INTERNAL_H
35 #include <lustre_debug.h>
36 #include <lustre_disk.h> /* for s2sbi */
37 #include <lustre_eacl.h>
38 #include <lustre_linkea.h>
40 /* for struct cl_lock_descr and struct cl_io */
41 #include <cl_object.h>
42 #include <lustre_lmv.h>
43 #include <lustre_mdc.h>
44 #include <lustre_intent.h>
45 #include <linux/compat.h>
46 #include <linux/aio.h>
47 #include <lustre_compat.h>
49 #include "vvp_internal.h"
50 #include "range_lock.h"
57 #ifndef VM_FAULT_RETRY
58 #define VM_FAULT_RETRY 0
61 /* Kernel 3.1 kills LOOKUP_CONTINUE, LOOKUP_PARENT is equivalent to it.
62 * seem kernel commit 49084c3bb2055c401f3493c13edae14d49128ca0 */
63 #ifndef LOOKUP_CONTINUE
64 #define LOOKUP_CONTINUE LOOKUP_PARENT
67 /** Only used on client-side for indicating the tail of dir hash/offset. */
68 #define LL_DIR_END_OFF 0x7fffffffffffffffULL
69 #define LL_DIR_END_OFF_32BIT 0x7fffffffUL
71 /* 4UL * 1024 * 1024 */
72 #define LL_MAX_BLKSIZE_BITS 22
74 #define LL_IT2STR(it) ((it) ? ldlm_it2str((it)->it_op) : "0")
76 #define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
78 struct ll_dentry_data {
79 unsigned int lld_sa_generation;
80 unsigned int lld_invalid:1;
81 unsigned int lld_nfs_dentry:1;
82 struct rcu_head lld_rcu_head;
85 #define ll_d2d(de) ((struct ll_dentry_data*)((de)->d_fsdata))
87 #define LLI_INODE_MAGIC 0x111d0de5
88 #define LLI_INODE_DEAD 0xdeadd00d
90 struct ll_getname_data {
91 #ifdef HAVE_DIR_CONTEXT
92 struct dir_context ctx;
94 char *lgd_name; /* points to a buffer with NAME_MAX+1 size */
95 struct lu_fid lgd_fid; /* target fid we are looking for */
96 int lgd_found; /* inode matched? */
100 struct lu_env *lg_env;
102 struct cl_lock *lg_lock;
103 unsigned long lg_gid;
106 /* See comment on trunc_sem_down_read_nowait */
107 struct ll_trunc_sem {
108 /* when positive, this is a count of readers, when -1, it indicates
109 * the semaphore is held for write, and 0 is unlocked
111 atomic_t ll_trunc_readers;
112 /* this tracks a count of waiting writers */
113 atomic_t ll_trunc_waiters;
116 struct ll_inode_info {
117 __u32 lli_inode_magic;
120 volatile unsigned long lli_flags;
121 struct posix_acl *lli_posix_acl;
123 /* identifying fields for both metadata and data stacks. */
124 struct lu_fid lli_fid;
125 /* master inode fid for stripe directory */
126 struct lu_fid lli_pfid;
128 /* We need all three because every inode may be opened in different
130 struct obd_client_handle *lli_mds_read_och;
131 struct obd_client_handle *lli_mds_write_och;
132 struct obd_client_handle *lli_mds_exec_och;
133 __u64 lli_open_fd_read_count;
134 __u64 lli_open_fd_write_count;
135 __u64 lli_open_fd_exec_count;
136 /* Protects access to och pointers and their usage counters */
137 struct mutex lli_och_mutex;
139 struct inode lli_vfs_inode;
141 /* the most recent timestamps obtained from mds */
146 spinlock_t lli_agl_lock;
148 /* Try to make the d::member and f::member are aligned. Before using
149 * these members, make clear whether it is directory or not. */
153 /* metadata statahead */
154 /* since parent-child threads can share the same @file
155 * struct, "opendir_key" is the token when dir close for
156 * case of parent exit before child -- it is me should
157 * cleanup the dir readahead. */
158 void *lli_opendir_key;
159 struct ll_statahead_info *lli_sai;
160 /* protect statahead stuff. */
161 spinlock_t lli_sa_lock;
162 /* "opendir_pid" is the token when lookup/revalid
163 * -- I am the owner of dir statahead. */
164 pid_t lli_opendir_pid;
165 /* stat will try to access statahead entries or start
166 * statahead if this flag is set, and this flag will be
167 * set upon dir open, and cleared when dir is closed,
168 * statahead hit ratio is too low, or start statahead
170 unsigned int lli_sa_enabled:1;
171 /* generation for statahead */
172 unsigned int lli_sa_generation;
173 /* rw lock protects lli_lsm_md */
174 struct rw_semaphore lli_lsm_sem;
175 /* directory stripe information */
176 struct lmv_stripe_md *lli_lsm_md;
177 /* directory default LMV */
178 struct lmv_stripe_md *lli_default_lsm_md;
181 /* for non-directory */
183 struct mutex lli_size_mutex;
184 char *lli_symlink_name;
185 struct ll_trunc_sem lli_trunc_sem;
186 struct range_lock_tree lli_write_tree;
188 struct rw_semaphore lli_glimpse_sem;
189 ktime_t lli_glimpse_time;
190 struct list_head lli_agl_list;
193 /* for writepage() only to communicate to fsync */
196 /* protect the file heat fields */
197 spinlock_t lli_heat_lock;
198 __u32 lli_heat_flags;
199 struct obd_heat_instance lli_heat_instances[OBD_HEAT_COUNT];
202 * Whenever a process try to read/write the file, the
203 * jobid of the process will be saved here, and it'll
204 * be packed into the write PRC when flush later.
206 * So the read/write statistics for jobid will not be
207 * accurate if the file is shared by different jobs.
209 char lli_jobid[LUSTRE_JOBID_SIZE];
211 struct mutex lli_pcc_lock;
212 enum lu_pcc_state_flags lli_pcc_state;
214 * @lli_pcc_generation saves the gobal PCC generation
215 * when the file was successfully attached into PCC.
216 * The flags of the PCC dataset are saved in
218 * The gobal PCC generation will be increased when add
219 * or delete a PCC backend, or change the configuration
220 * parameters for PCC.
221 * If @lli_pcc_generation is same as the gobal PCC
222 * generation, we can use the saved flags of the PCC
223 * dataset to determine whether need to try auto attach
226 __u64 lli_pcc_generation;
227 enum pcc_dataset_flags lli_pcc_dsflags;
228 struct pcc_inode *lli_pcc_inode;
230 struct mutex lli_group_mutex;
231 __u64 lli_group_users;
232 unsigned long lli_group_gid;
234 __u64 lli_attr_valid;
236 __u64 lli_lazyblocks;
240 /* XXX: For following frequent used members, although they maybe special
241 * used for non-directory object, it is some time-wasting to check
242 * whether the object is directory or not before using them. On the
243 * other hand, currently, sizeof(f) > sizeof(d), it cannot reduce
244 * the "ll_inode_info" size even if moving those members into u.f.
245 * So keep them out side.
247 * In the future, if more members are added only for directory,
248 * some of the following members can be moved into u.f.
250 struct cl_object *lli_clob;
252 /* mutex to request for layout lock exclusively. */
253 struct mutex lli_layout_mutex;
254 /* Layout version, protected by lli_layout_lock */
255 __u32 lli_layout_gen;
256 spinlock_t lli_layout_lock;
258 __u32 lli_projid; /* project id */
260 struct rw_semaphore lli_xattrs_list_rwsem;
261 struct mutex lli_xattrs_enq_lock;
262 struct list_head lli_xattrs; /* ll_xattr_entry->xe_list */
265 static inline void ll_trunc_sem_init(struct ll_trunc_sem *sem)
267 atomic_set(&sem->ll_trunc_readers, 0);
268 atomic_set(&sem->ll_trunc_waiters, 0);
271 /* This version of down read ignores waiting writers, meaning if the semaphore
272 * is already held for read, this down_read will 'join' that reader and also
273 * take the semaphore.
275 * This lets us avoid an unusual deadlock.
277 * We must take lli_trunc_sem in read mode on entry in to various i/o paths
278 * in Lustre, in order to exclude truncates. Some of these paths then need to
279 * take the mmap_sem, while still holding the trunc_sem. The problem is that
280 * page faults hold the mmap_sem when calling in to Lustre, and then must also
281 * take the trunc_sem to exclude truncate.
283 * This means the locking order for trunc_sem and mmap_sem is sometimes AB,
284 * sometimes BA. This is almost OK because in both cases, we take the trunc
285 * sem for read, so it doesn't block.
287 * However, if a write mode user (truncate, a setattr op) arrives in the
288 * middle of this, the second reader on the truncate_sem will wait behind that
291 * So we have, on our truncate sem, in order (where 'reader' and 'writer' refer
292 * to the mode in which they take the semaphore):
293 * reader (holding mmap_sem, needs truncate_sem)
295 * reader (holding truncate sem, waiting for mmap_sem)
297 * And so the readers deadlock.
299 * The solution is this modified semaphore, where this down_read ignores
300 * waiting write operations, and all waiters are woken up at once, so readers
301 * using down_read_nowait cannot get stuck behind waiting writers, regardless
302 * of the order they arrived in.
304 * down_read_nowait is only used in the page fault case, where we already hold
305 * the mmap_sem. This is because otherwise repeated read and write operations
306 * (which take the truncate sem) could prevent a truncate from ever starting.
307 * This could still happen with page faults, but without an even more complex
308 * mechanism, this is unavoidable.
312 static inline void trunc_sem_down_read_nowait(struct ll_trunc_sem *sem)
314 wait_var_event(&sem->ll_trunc_readers,
315 atomic_inc_unless_negative(&sem->ll_trunc_readers));
318 static inline void trunc_sem_down_read(struct ll_trunc_sem *sem)
320 wait_var_event(&sem->ll_trunc_readers,
321 atomic_read(&sem->ll_trunc_waiters) == 0 &&
322 atomic_inc_unless_negative(&sem->ll_trunc_readers));
325 static inline void trunc_sem_up_read(struct ll_trunc_sem *sem)
327 if (atomic_dec_return(&sem->ll_trunc_readers) == 0 &&
328 atomic_read(&sem->ll_trunc_waiters))
329 wake_up_var(&sem->ll_trunc_readers);
332 static inline void trunc_sem_down_write(struct ll_trunc_sem *sem)
334 atomic_inc(&sem->ll_trunc_waiters);
335 wait_var_event(&sem->ll_trunc_readers,
336 atomic_cmpxchg(&sem->ll_trunc_readers, 0, -1) == 0);
337 atomic_dec(&sem->ll_trunc_waiters);
340 static inline void trunc_sem_up_write(struct ll_trunc_sem *sem)
342 atomic_set(&sem->ll_trunc_readers, 0);
343 wake_up_var(&sem->ll_trunc_readers);
346 static inline __u32 ll_layout_version_get(struct ll_inode_info *lli)
350 spin_lock(&lli->lli_layout_lock);
351 gen = lli->lli_layout_gen;
352 spin_unlock(&lli->lli_layout_lock);
357 static inline void ll_layout_version_set(struct ll_inode_info *lli, __u32 gen)
359 spin_lock(&lli->lli_layout_lock);
360 lli->lli_layout_gen = gen;
361 spin_unlock(&lli->lli_layout_lock);
365 /* File data is modified. */
366 LLIF_DATA_MODIFIED = 0,
367 /* File is being restored */
368 LLIF_FILE_RESTORING = 1,
369 /* Xattr cache is attached to the file */
370 LLIF_XATTR_CACHE = 2,
371 /* Project inherit */
372 LLIF_PROJECT_INHERIT = 3,
373 /* update atime from MDS even if it's older than local inode atime. */
374 LLIF_UPDATE_ATIME = 4,
378 static inline void ll_file_set_flag(struct ll_inode_info *lli,
379 enum ll_file_flags flag)
381 set_bit(flag, &lli->lli_flags);
384 static inline void ll_file_clear_flag(struct ll_inode_info *lli,
385 enum ll_file_flags flag)
387 clear_bit(flag, &lli->lli_flags);
390 static inline bool ll_file_test_flag(struct ll_inode_info *lli,
391 enum ll_file_flags flag)
393 return test_bit(flag, &lli->lli_flags);
396 static inline bool ll_file_test_and_clear_flag(struct ll_inode_info *lli,
397 enum ll_file_flags flag)
399 return test_and_clear_bit(flag, &lli->lli_flags);
402 int ll_xattr_cache_destroy(struct inode *inode);
404 int ll_xattr_cache_get(struct inode *inode,
410 static inline bool obd_connect_has_secctx(struct obd_connect_data *data)
412 #ifdef CONFIG_SECURITY
413 return data->ocd_connect_flags & OBD_CONNECT_FLAGS2 &&
414 data->ocd_connect_flags2 & OBD_CONNECT2_FILE_SECCTX;
420 static inline void obd_connect_set_secctx(struct obd_connect_data *data)
422 #ifdef CONFIG_SECURITY
423 data->ocd_connect_flags2 |= OBD_CONNECT2_FILE_SECCTX;
427 int ll_dentry_init_security(struct dentry *dentry, int mode, struct qstr *name,
428 const char **secctx_name, void **secctx,
430 int ll_inode_init_security(struct dentry *dentry, struct inode *inode,
433 int ll_listsecurity(struct inode *inode, char *secctx_name,
434 size_t secctx_name_size);
437 * Locking to guarantee consistency of non-atomic updates to long long i_size,
438 * consistency between file size and KMS.
440 * Implemented by ->lli_size_mutex and ->lsm_lock, nested in that order.
443 void ll_inode_size_lock(struct inode *inode);
444 void ll_inode_size_unlock(struct inode *inode);
446 static inline struct ll_inode_info *ll_i2info(struct inode *inode)
448 return container_of(inode, struct ll_inode_info, lli_vfs_inode);
451 static inline struct pcc_inode *ll_i2pcci(struct inode *inode)
453 return ll_i2info(inode)->lli_pcc_inode;
456 /* default to use at least 16M for fast read if possible */
457 #define RA_REMAIN_WINDOW_MIN MiB_TO_PAGES(16UL)
459 /* default readahead on a given system. */
460 #define SBI_DEFAULT_READ_AHEAD_MAX MiB_TO_PAGES(64UL)
462 /* default read-ahead full files smaller than limit on the second read */
463 #define SBI_DEFAULT_READ_AHEAD_WHOLE_MAX MiB_TO_PAGES(2UL)
468 RA_STAT_DISTANT_READPAGE,
469 RA_STAT_MISS_IN_WINDOW,
470 RA_STAT_FAILED_GRAB_PAGE,
471 RA_STAT_FAILED_MATCH,
476 RA_STAT_MAX_IN_FLIGHT,
477 RA_STAT_WRONG_GRAB_PAGE,
478 RA_STAT_FAILED_REACH_END,
480 RA_STAT_FAILED_FAST_READ,
485 atomic_t ra_cur_pages;
486 unsigned long ra_max_pages;
487 unsigned long ra_max_pages_per_file;
488 unsigned long ra_max_read_ahead_whole_pages;
489 struct workqueue_struct *ll_readahead_wq;
491 * Max number of active works could be triggered
492 * for async readahead.
494 unsigned int ra_async_max_active;
495 /* how many async readahead triggered in flight */
496 atomic_t ra_async_inflight;
497 /* Threshold to control when to trigger async readahead */
498 unsigned long ra_async_pages_per_file_threshold;
501 /* ra_io_arg will be filled in the beginning of ll_readahead with
502 * ras_lock, then the following ll_read_ahead_pages will read RA
503 * pages according to this arg, all the items in this structure are
504 * counted by page index.
507 pgoff_t ria_start_idx; /* start offset of read-ahead*/
508 pgoff_t ria_end_idx; /* end offset of read-ahead*/
509 unsigned long ria_reserved; /* reserved pages for read-ahead */
510 pgoff_t ria_end_idx_min;/* minimum end to cover current read */
511 bool ria_eof; /* reach end of file */
512 /* If stride read pattern is detected, ria_stoff is the byte offset
513 * where stride read is started. Note: for normal read-ahead, the
514 * value here is meaningless, and also it will not be accessed*/
516 /* ria_length and ria_bytes are the length and pages length in the
517 * stride I/O mode. And they will also be used to check whether
518 * it is stride I/O read-ahead in the read-ahead pages*/
523 /* LL_HIST_MAX=32 causes an overflow */
524 #define LL_HIST_MAX 28
525 #define LL_HIST_START 12 /* buckets start at 2^12 = 4k */
526 #define LL_PROCESS_HIST_MAX 10
527 struct per_process_info {
529 struct obd_histogram pp_r_hist;
530 struct obd_histogram pp_w_hist;
533 /* pp_extents[LL_PROCESS_HIST_MAX] will hold the combined process info */
534 struct ll_rw_extents_info {
535 struct per_process_info pp_extents[LL_PROCESS_HIST_MAX + 1];
538 #define LL_OFFSET_HIST_MAX 100
539 struct ll_rw_process_info {
542 loff_t rw_range_start;
544 loff_t rw_last_file_pos;
546 size_t rw_smallest_extent;
547 size_t rw_largest_extent;
548 struct ll_file_data *rw_last_file;
551 enum stats_track_type {
552 STATS_TRACK_ALL = 0, /* track all processes */
553 STATS_TRACK_PID, /* track process with this pid */
554 STATS_TRACK_PPID, /* track processes with this ppid */
555 STATS_TRACK_GID, /* track processes with this gid */
559 /* flags for sbi->ll_flags */
560 #define LL_SBI_NOLCK 0x01 /* DLM locking disabled (directio-only) */
561 #define LL_SBI_CHECKSUM 0x02 /* checksum each page as it's written */
562 #define LL_SBI_FLOCK 0x04
563 #define LL_SBI_USER_XATTR 0x08 /* support user xattr */
564 #define LL_SBI_ACL 0x10 /* support ACL */
565 /* LL_SBI_RMT_CLIENT 0x40 remote client */
566 #define LL_SBI_MDS_CAPA 0x80 /* support mds capa, obsolete */
567 #define LL_SBI_OSS_CAPA 0x100 /* support oss capa, obsolete */
568 #define LL_SBI_LOCALFLOCK 0x200 /* Local flocks support by kernel */
569 #define LL_SBI_LRU_RESIZE 0x400 /* lru resize support */
570 #define LL_SBI_LAZYSTATFS 0x800 /* lazystatfs mount option */
571 /* LL_SBI_SOM_PREVIEW 0x1000 SOM preview mount option, obsolete */
572 #define LL_SBI_32BIT_API 0x2000 /* generate 32 bit inodes. */
573 #define LL_SBI_64BIT_HASH 0x4000 /* support 64-bits dir hash/offset */
574 #define LL_SBI_AGL_ENABLED 0x8000 /* enable agl */
575 #define LL_SBI_VERBOSE 0x10000 /* verbose mount/umount */
576 #define LL_SBI_LAYOUT_LOCK 0x20000 /* layout lock support */
577 #define LL_SBI_USER_FID2PATH 0x40000 /* allow fid2path by unprivileged users */
578 #define LL_SBI_XATTR_CACHE 0x80000 /* support for xattr cache */
579 #define LL_SBI_NOROOTSQUASH 0x100000 /* do not apply root squash */
580 #define LL_SBI_ALWAYS_PING 0x200000 /* always ping even if server
582 #define LL_SBI_FAST_READ 0x400000 /* fast read support */
583 #define LL_SBI_FILE_SECCTX 0x800000 /* set file security context at create */
584 /* LL_SBI_PIO 0x1000000 parallel IO support, introduced in
586 #define LL_SBI_TINY_WRITE 0x2000000 /* tiny write support */
587 #define LL_SBI_FILE_HEAT 0x4000000 /* file heat support */
588 #define LL_SBI_FLAGS { \
618 /* This is embedded into llite super-blocks to keep track of connect
619 * flags (capabilities) supported by all imports given mount is
621 struct lustre_client_ocd {
622 /* This is conjunction of connect_flags across all imports
623 * (LOVs) this mount is connected to. This field is updated by
624 * cl_ocd_update() under ->lco_lock. */
626 struct mutex lco_lock;
627 struct obd_export *lco_md_exp;
628 struct obd_export *lco_dt_exp;
632 /* this protects pglist and ra_info. It isn't safe to
633 * grab from interrupt contexts */
635 spinlock_t ll_pp_extent_lock; /* pp_extent entry*/
636 spinlock_t ll_process_lock; /* ll_rw_process_info */
637 struct obd_uuid ll_sb_uuid;
638 struct obd_export *ll_md_exp;
639 struct obd_export *ll_dt_exp;
640 struct obd_device *ll_md_obd;
641 struct obd_device *ll_dt_obd;
642 struct dentry *ll_debugfs_entry;
643 struct lu_fid ll_root_fid; /* root object fid */
646 unsigned int ll_xattr_cache_enabled:1,
647 ll_xattr_cache_set:1, /* already set to 0/1 */
648 ll_client_common_fill_super_succeeded:1,
651 struct lustre_client_ocd ll_lco;
653 struct lprocfs_stats *ll_stats; /* lprocfs stats counter */
655 /* Used to track "unstable" pages on a client, and maintain a
656 * LRU list of clean pages. An "unstable" page is defined as
657 * any page which is sent to a server as part of a bulk request,
658 * but is uncommitted to stable storage. */
659 struct cl_client_cache *ll_cache;
661 struct lprocfs_stats *ll_ra_stats;
663 struct ll_ra_info ll_ra_info;
664 unsigned int ll_namelen;
665 struct file_operations *ll_fop;
667 struct lu_site *ll_site;
668 struct cl_device *ll_cl;
670 struct ll_rw_extents_info ll_rw_extents_info;
671 int ll_extent_process_count;
672 struct ll_rw_process_info ll_rw_process_info[LL_PROCESS_HIST_MAX];
673 unsigned int ll_offset_process_count;
674 struct ll_rw_process_info ll_rw_offset_info[LL_OFFSET_HIST_MAX];
675 unsigned int ll_rw_offset_entry_count;
676 int ll_stats_track_id;
677 enum stats_track_type ll_stats_track_type;
680 /* metadata stat-ahead */
681 unsigned int ll_sa_running_max;/* max concurrent
682 * statahead instances */
683 unsigned int ll_sa_max; /* max statahead RPCs */
684 atomic_t ll_sa_total; /* statahead thread started
686 atomic_t ll_sa_wrong; /* statahead thread stopped for
688 atomic_t ll_sa_running; /* running statahead thread
690 atomic_t ll_agl_total; /* AGL thread started count */
692 dev_t ll_sdev_orig; /* save s_dev before assign for
695 struct root_squash_info ll_squash;
698 /* st_blksize returned by stat(2), when non-zero */
699 unsigned int ll_stat_blksize;
701 /* maximum relative age of cached statfs results */
702 unsigned int ll_statfs_max_age;
704 struct kset ll_kset; /* sysfs object */
705 struct completion ll_kobj_unregister;
708 unsigned int ll_heat_decay_weight;
709 unsigned int ll_heat_period_second;
711 /* filesystem fsname */
712 char ll_fsname[LUSTRE_MAXFSNAME + 1];
714 /* Persistent Client Cache */
715 struct pcc_super ll_pcc_super;
718 #define SBI_DEFAULT_HEAT_DECAY_WEIGHT ((80 * 256 + 50) / 100)
719 #define SBI_DEFAULT_HEAT_PERIOD_SECOND (60)
721 * per file-descriptor read-ahead data.
723 struct ll_readahead_state {
725 /* End byte that read(2) try to read. */
726 loff_t ras_last_read_end_bytes;
728 * number of bytes read after last read-ahead window reset. As window
729 * is reset on each seek, this is effectively a number of consecutive
730 * accesses. Maybe ->ras_accessed_in_window is better name.
732 * XXX nikita: window is also reset (by ras_update()) when Lustre
733 * believes that memory pressure evicts read-ahead pages. In that
734 * case, it probably doesn't make sense to expand window to
735 * PTLRPC_MAX_BRW_PAGES on the third access.
737 loff_t ras_consecutive_bytes;
739 * number of read requests after the last read-ahead window reset
740 * As window is reset on each seek, this is effectively the number
741 * on consecutive read request and is used to trigger read-ahead.
743 unsigned long ras_consecutive_requests;
745 * Parameters of current read-ahead window. Handled by
746 * ras_update(). On the initial access to the file or after a seek,
747 * window is reset to 0. After 3 consecutive accesses, window is
748 * expanded to PTLRPC_MAX_BRW_PAGES. Afterwards, window is enlarged by
749 * PTLRPC_MAX_BRW_PAGES chunks up to ->ra_max_pages.
751 pgoff_t ras_window_start_idx;
752 pgoff_t ras_window_pages;
754 * Optimal RPC size in pages.
755 * It decides how many pages will be sent for each read-ahead.
757 unsigned long ras_rpc_pages;
759 * Where next read-ahead should start at. This lies within read-ahead
760 * window. Read-ahead window is read in pieces rather than at once
761 * because: 1. lustre limits total number of pages under read-ahead by
762 * ->ra_max_pages (see ll_ra_count_get()), 2. client cannot read pages
763 * not covered by DLM lock.
765 pgoff_t ras_next_readahead_idx;
767 * Total number of ll_file_read requests issued, reads originating
768 * due to mmap are not counted in this total. This value is used to
769 * trigger full file read-ahead after multiple reads to a small file.
771 unsigned long ras_requests;
773 * The following 3 items are used for detecting the stride I/O
775 * In stride I/O mode,
776 * ...............|-----data-----|****gap*****|--------|******|....
777 * offset |-stride_bytes-|-stride_gap-|
778 * ras_stride_offset = offset;
779 * ras_stride_length = stride_bytes + stride_gap;
780 * ras_stride_bytes = stride_bytes;
781 * Note: all these three items are counted by bytes.
783 loff_t ras_stride_offset;
784 loff_t ras_stride_length;
785 loff_t ras_stride_bytes;
787 * number of consecutive stride request count, and it is similar as
788 * ras_consecutive_requests, but used for stride I/O mode.
789 * Note: only more than 2 consecutive stride request are detected,
790 * stride read-ahead will be enable
792 unsigned long ras_consecutive_stride_requests;
793 /* index of the last page that async readahead starts */
794 pgoff_t ras_async_last_readpage_idx;
795 /* whether we should increase readahead window */
796 bool ras_need_increase_window;
797 /* whether ra miss check should be skipped */
798 bool ras_no_miss_check;
801 struct ll_readahead_work {
802 /** File to readahead */
803 struct file *lrw_file;
804 pgoff_t lrw_start_idx;
807 /* async worker to handler read */
808 struct work_struct lrw_readahead_work;
809 char lrw_jobid[LUSTRE_JOBID_SIZE];
812 extern struct kmem_cache *ll_file_data_slab;
813 struct lustre_handle;
814 struct ll_file_data {
815 struct ll_readahead_state fd_ras;
816 struct ll_grouplock fd_grouplock;
820 /* openhandle if lease exists for this file.
821 * Borrow lli->lli_och_mutex to protect assignment */
822 struct obd_client_handle *fd_lease_och;
823 struct obd_client_handle *fd_och;
824 struct file *fd_file;
825 /* Indicate whether need to report failure when close.
826 * true: failure is known, not report again.
827 * false: unknown failure, should report. */
828 bool fd_write_failed;
829 bool ll_lock_no_expand;
830 rwlock_t fd_lock; /* protect lcc list */
831 struct list_head fd_lccs; /* list of ll_cl_context */
832 /* Used by mirrored file to lead IOs to a specific mirror, usually
833 * for mirror resync. 0 means default. */
834 __u32 fd_designated_mirror;
835 /* The layout version when resync starts. Resync I/O should carry this
836 * layout version for verification to OST objects */
837 __u32 fd_layout_version;
838 struct pcc_file fd_pcc_file;
841 void llite_tunables_unregister(void);
842 int llite_tunables_register(void);
844 static inline struct inode *ll_info2i(struct ll_inode_info *lli)
846 return &lli->lli_vfs_inode;
849 __u32 ll_i2suppgid(struct inode *i);
850 void ll_i2gids(__u32 *suppgids, struct inode *i1,struct inode *i2);
852 static inline int ll_need_32bit_api(struct ll_sb_info *sbi)
854 #if BITS_PER_LONG == 32
856 #elif defined(CONFIG_COMPAT)
857 if (unlikely(sbi->ll_flags & LL_SBI_32BIT_API))
860 # ifdef CONFIG_X86_X32
861 /* in_compat_syscall() returns true when called from a kthread
862 * and CONFIG_X86_X32 is enabled, which is wrong. So check
863 * whether the caller comes from a syscall (ie. not a kthread)
864 * before calling in_compat_syscall(). */
865 if (current->flags & PF_KTHREAD)
869 return unlikely(in_compat_syscall());
871 return unlikely(sbi->ll_flags & LL_SBI_32BIT_API);
875 static inline bool ll_sbi_has_fast_read(struct ll_sb_info *sbi)
877 return !!(sbi->ll_flags & LL_SBI_FAST_READ);
880 static inline bool ll_sbi_has_tiny_write(struct ll_sb_info *sbi)
882 return !!(sbi->ll_flags & LL_SBI_TINY_WRITE);
885 static inline bool ll_sbi_has_file_heat(struct ll_sb_info *sbi)
887 return !!(sbi->ll_flags & LL_SBI_FILE_HEAT);
890 void ll_ras_enter(struct file *f, loff_t pos, size_t count);
892 /* llite/lcommon_misc.c */
893 int cl_ocd_update(struct obd_device *host, struct obd_device *watched,
894 enum obd_notify_event ev, void *owner);
895 int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
896 struct ll_grouplock *lg);
897 void cl_put_grouplock(struct ll_grouplock *lg);
899 /* llite/lproc_llite.c */
900 int ll_debugfs_register_super(struct super_block *sb, const char *name);
901 void ll_debugfs_unregister_super(struct super_block *sb);
902 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, long count);
906 LPROC_LL_WRITE_BYTES,
933 LPROC_LL_GETXATTR_HITS,
935 LPROC_LL_REMOVEXATTR,
938 LPROC_LL_FILE_OPCODES
942 enum get_default_layout_type {
943 GET_DEFAULT_LAYOUT_ROOT = 1,
946 struct ll_dir_chain {
949 static inline void ll_dir_chain_init(struct ll_dir_chain *chain)
953 static inline void ll_dir_chain_fini(struct ll_dir_chain *chain)
957 extern const struct file_operations ll_dir_operations;
958 extern const struct inode_operations ll_dir_inode_operations;
959 #ifdef HAVE_DIR_CONTEXT
960 int ll_dir_read(struct inode *inode, __u64 *pos, struct md_op_data *op_data,
961 struct dir_context *ctx);
963 int ll_dir_read(struct inode *inode, __u64 *pos, struct md_op_data *op_data,
964 void *cookie, filldir_t filldir);
966 int ll_get_mdt_idx(struct inode *inode);
967 int ll_get_mdt_idx_by_fid(struct ll_sb_info *sbi, const struct lu_fid *fid);
968 struct page *ll_get_dir_page(struct inode *dir, struct md_op_data *op_data,
969 __u64 offset, struct ll_dir_chain *chain);
970 void ll_release_page(struct inode *inode, struct page *page, bool remove);
973 extern const struct inode_operations ll_special_inode_operations;
975 struct inode *ll_iget(struct super_block *sb, ino_t hash,
976 struct lustre_md *lic);
977 int ll_test_inode_by_fid(struct inode *inode, void *opaque);
978 int ll_md_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
979 void *data, int flag);
980 struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de);
981 int ll_rmdir_entry(struct inode *dir, char *name, int namelen);
982 void ll_update_times(struct ptlrpc_request *request, struct inode *inode);
985 int ll_writepage(struct page *page, struct writeback_control *wbc);
986 int ll_writepages(struct address_space *, struct writeback_control *wbc);
987 int ll_readpage(struct file *file, struct page *page);
988 int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
989 struct cl_page *page, struct file *file);
990 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras);
991 int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
994 void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io,
996 void ll_cl_remove(struct file *file, const struct lu_env *env);
997 struct ll_cl_context *ll_cl_find(struct file *file);
999 extern const struct address_space_operations ll_aops;
1002 extern struct file_operations ll_file_operations;
1003 extern struct file_operations ll_file_operations_flock;
1004 extern struct file_operations ll_file_operations_noflock;
1005 extern struct inode_operations ll_file_inode_operations;
1006 extern int ll_have_md_lock(struct inode *inode, __u64 *bits,
1007 enum ldlm_mode l_req_mode);
1008 extern enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
1009 struct lustre_handle *lockh, __u64 flags,
1010 enum ldlm_mode mode);
1012 int ll_file_open(struct inode *inode, struct file *file);
1013 int ll_file_release(struct inode *inode, struct file *file);
1014 int ll_release_openhandle(struct dentry *, struct lookup_intent *);
1015 int ll_md_real_close(struct inode *inode, fmode_t fmode);
1016 extern void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
1017 struct ll_file_data *file, loff_t pos,
1018 size_t count, int rw);
1019 #ifdef HAVE_INODEOPS_ENHANCED_GETATTR
1020 int ll_getattr(const struct path *path, struct kstat *stat,
1021 u32 request_mask, unsigned int flags);
1023 int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat);
1025 int ll_getattr_dentry(struct dentry *de, struct kstat *stat, u32 request_mask,
1026 unsigned int flags);
1027 struct posix_acl *ll_get_acl(struct inode *inode, int type);
1028 #ifdef HAVE_IOP_SET_ACL
1029 #ifdef CONFIG_LUSTRE_FS_POSIX_ACL
1030 int ll_set_acl(struct inode *inode, struct posix_acl *acl, int type);
1031 #else /* !CONFIG_LUSTRE_FS_POSIX_ACL */
1032 #define ll_set_acl NULL
1033 #endif /* CONFIG_LUSTRE_FS_POSIX_ACL */
1037 static inline int ll_xflags_to_inode_flags(int xflags)
1039 return ((xflags & FS_XFLAG_SYNC) ? S_SYNC : 0) |
1040 ((xflags & FS_XFLAG_NOATIME) ? S_NOATIME : 0) |
1041 ((xflags & FS_XFLAG_APPEND) ? S_APPEND : 0) |
1042 ((xflags & FS_XFLAG_IMMUTABLE) ? S_IMMUTABLE : 0);
1045 static inline int ll_inode_flags_to_xflags(int flags)
1047 return ((flags & S_SYNC) ? FS_XFLAG_SYNC : 0) |
1048 ((flags & S_NOATIME) ? FS_XFLAG_NOATIME : 0) |
1049 ((flags & S_APPEND) ? FS_XFLAG_APPEND : 0) |
1050 ((flags & S_IMMUTABLE) ? FS_XFLAG_IMMUTABLE : 0);
1053 int ll_migrate(struct inode *parent, struct file *file,
1054 struct lmv_user_md *lum, const char *name);
1055 int ll_get_fid_by_name(struct inode *parent, const char *name,
1056 int namelen, struct lu_fid *fid, struct inode **inode);
1057 int ll_inode_permission(struct inode *inode, int mask);
1058 int ll_ioctl_check_project(struct inode *inode, struct fsxattr *fa);
1059 int ll_ioctl_fsgetxattr(struct inode *inode, unsigned int cmd,
1061 int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd,
1064 int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
1065 __u64 flags, struct lov_user_md *lum,
1067 int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
1068 struct lov_mds_md **lmm, int *lmm_size,
1069 struct ptlrpc_request **request);
1070 int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
1072 int ll_dir_getstripe_default(struct inode *inode, void **lmmp,
1073 int *lmm_size, struct ptlrpc_request **request,
1074 struct ptlrpc_request **root_request, u64 valid);
1075 int ll_dir_getstripe(struct inode *inode, void **plmm, int *plmm_size,
1076 struct ptlrpc_request **request, u64 valid);
1077 int ll_fsync(struct file *file, loff_t start, loff_t end, int data);
1078 int ll_merge_attr(const struct lu_env *env, struct inode *inode);
1079 int ll_fid2path(struct inode *inode, void __user *arg);
1080 int ll_data_version(struct inode *inode, __u64 *data_version, int flags);
1081 int ll_hsm_release(struct inode *inode);
1082 int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss);
1083 void ll_io_set_mirror(struct cl_io *io, const struct file *file);
1085 /* llite/dcache.c */
1087 int ll_d_init(struct dentry *de);
1088 extern const struct dentry_operations ll_d_ops;
1089 void ll_intent_drop_lock(struct lookup_intent *);
1090 void ll_intent_release(struct lookup_intent *);
1091 void ll_invalidate_aliases(struct inode *);
1092 void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry);
1093 int ll_revalidate_it_finish(struct ptlrpc_request *request,
1094 struct lookup_intent *it, struct dentry *de);
1096 /* llite/llite_lib.c */
1097 extern struct super_operations lustre_super_operations;
1099 void ll_lli_init(struct ll_inode_info *lli);
1100 int ll_fill_super(struct super_block *sb);
1101 void ll_put_super(struct super_block *sb);
1102 void ll_kill_super(struct super_block *sb);
1103 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock);
1104 void ll_dir_clear_lsm_md(struct inode *inode);
1105 void ll_clear_inode(struct inode *inode);
1106 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr,
1107 enum op_xvalid xvalid, bool hsm_import);
1108 int ll_setattr(struct dentry *de, struct iattr *attr);
1109 int ll_statfs(struct dentry *de, struct kstatfs *sfs);
1110 int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs,
1112 int ll_update_inode(struct inode *inode, struct lustre_md *md);
1113 void ll_update_inode_flags(struct inode *inode, int ext_flags);
1114 int ll_read_inode2(struct inode *inode, void *opaque);
1115 void ll_delete_inode(struct inode *inode);
1116 int ll_iocontrol(struct inode *inode, struct file *file,
1117 unsigned int cmd, unsigned long arg);
1118 int ll_flush_ctx(struct inode *inode);
1119 void ll_umount_begin(struct super_block *sb);
1120 int ll_remount_fs(struct super_block *sb, int *flags, char *data);
1121 int ll_show_options(struct seq_file *seq, struct dentry *dentry);
1122 void ll_dirty_page_discard_warn(struct page *page, int ioret);
1123 int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
1124 struct super_block *, struct lookup_intent *);
1125 int ll_obd_statfs(struct inode *inode, void __user *arg);
1126 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *max_mdsize);
1127 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *default_mdsize);
1128 int ll_set_default_mdsize(struct ll_sb_info *sbi, int default_mdsize);
1130 void ll_unlock_md_op_lsm(struct md_op_data *op_data);
1131 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
1132 struct inode *i1, struct inode *i2,
1133 const char *name, size_t namelen,
1134 __u32 mode, enum md_op_code opc,
1136 void ll_finish_md_op_data(struct md_op_data *op_data);
1137 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg);
1138 void ll_compute_rootsquash_state(struct ll_sb_info *sbi);
1139 ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
1140 struct lov_user_md **kbuf);
1141 void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req);
1143 void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req,
1144 struct lookup_intent *it);
1146 /* Compute expected user md size when passing in a md from user space */
1147 static inline ssize_t ll_lov_user_md_size(const struct lov_user_md *lum)
1149 switch (lum->lmm_magic) {
1150 case LOV_USER_MAGIC_V1:
1151 return sizeof(struct lov_user_md_v1);
1152 case LOV_USER_MAGIC_V3:
1153 return sizeof(struct lov_user_md_v3);
1154 case LOV_USER_MAGIC_SPECIFIC:
1155 if (lum->lmm_stripe_count > LOV_MAX_STRIPE_COUNT)
1158 return lov_user_md_size(lum->lmm_stripe_count,
1159 LOV_USER_MAGIC_SPECIFIC);
1160 case LOV_USER_MAGIC_COMP_V1:
1161 return ((struct lov_comp_md_v1 *)lum)->lcm_size;
1162 case LOV_USER_MAGIC_FOREIGN:
1163 return foreign_size(lum);
1169 /* llite/llite_nfs.c */
1170 extern struct export_operations lustre_export_operations;
1171 __u32 get_uuid2int(const char *name, int len);
1172 struct inode *search_inode_for_lustre(struct super_block *sb,
1173 const struct lu_fid *fid);
1174 int ll_dir_get_parent_fid(struct inode *dir, struct lu_fid *parent_fid);
1176 /* llite/symlink.c */
1177 extern struct inode_operations ll_fast_symlink_inode_operations;
1180 * IO arguments for various VFS I/O interfaces.
1182 struct vvp_io_args {
1183 /** normal/sendfile/splice */
1184 enum vvp_io_subtype via_io_subtype;
1188 struct kiocb *via_iocb;
1189 struct iov_iter *via_iter;
1192 struct pipe_inode_info *via_pipe;
1193 unsigned int via_flags;
1203 struct ll_cl_context {
1204 struct list_head lcc_list;
1206 const struct lu_env *lcc_env;
1207 struct cl_io *lcc_io;
1208 struct cl_page *lcc_page;
1209 enum lcc_type lcc_type;
1212 struct ll_thread_info {
1213 struct vvp_io_args lti_args;
1214 struct ra_io_arg lti_ria;
1215 struct ll_cl_context lti_io_ctx;
1218 extern struct lu_context_key ll_thread_key;
1220 static inline struct ll_thread_info *ll_env_info(const struct lu_env *env)
1222 struct ll_thread_info *lti;
1224 lti = lu_context_key_get(&env->le_ctx, &ll_thread_key);
1225 LASSERT(lti != NULL);
1230 static inline struct vvp_io_args *ll_env_args(const struct lu_env *env,
1231 enum vvp_io_subtype type)
1233 struct vvp_io_args *via = &ll_env_info(env)->lti_args;
1235 via->via_io_subtype = type;
1240 void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot,
1241 struct vvp_io_args *args);
1243 /* llite/llite_mmap.c */
1245 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last);
1246 int ll_file_mmap(struct file * file, struct vm_area_struct * vma);
1247 void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
1248 unsigned long addr, size_t count);
1249 struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
1252 #define ll_s2sbi(sb) (s2lsi(sb)->lsi_llsbi)
1254 /* don't need an addref as the sb_info should be holding one */
1255 static inline struct obd_export *ll_s2dtexp(struct super_block *sb)
1257 return ll_s2sbi(sb)->ll_dt_exp;
1260 /* don't need an addref as the sb_info should be holding one */
1261 static inline struct obd_export *ll_s2mdexp(struct super_block *sb)
1263 return ll_s2sbi(sb)->ll_md_exp;
1266 static inline struct client_obd *sbi2mdc(struct ll_sb_info *sbi)
1268 struct obd_device *obd = sbi->ll_md_exp->exp_obd;
1274 // FIXME: replace the name of this with LL_SB to conform to kernel stuff
1275 static inline struct ll_sb_info *ll_i2sbi(struct inode *inode)
1277 return ll_s2sbi(inode->i_sb);
1280 static inline struct obd_export *ll_i2dtexp(struct inode *inode)
1282 return ll_s2dtexp(inode->i_sb);
1285 static inline struct obd_export *ll_i2mdexp(struct inode *inode)
1287 return ll_s2mdexp(inode->i_sb);
1290 static inline struct lu_fid *ll_inode2fid(struct inode *inode)
1294 LASSERT(inode != NULL);
1295 fid = &ll_i2info(inode)->lli_fid;
1300 static inline bool ll_dir_striped(struct inode *inode)
1303 return S_ISDIR(inode->i_mode) &&
1304 lmv_dir_striped(ll_i2info(inode)->lli_lsm_md);
1307 static inline loff_t ll_file_maxbytes(struct inode *inode)
1309 struct cl_object *obj = ll_i2info(inode)->lli_clob;
1312 return MAX_LFS_FILESIZE;
1314 return min_t(loff_t, cl_object_maxbytes(obj), MAX_LFS_FILESIZE);
1318 extern const struct xattr_handler *ll_xattr_handlers[];
1320 #define XATTR_USER_T 1
1321 #define XATTR_TRUSTED_T 2
1322 #define XATTR_SECURITY_T 3
1323 #define XATTR_ACL_ACCESS_T 4
1324 #define XATTR_ACL_DEFAULT_T 5
1325 #define XATTR_LUSTRE_T 6
1326 #define XATTR_OTHER_T 7
1328 ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
1329 int ll_xattr_list(struct inode *inode, const char *name, int type,
1330 void *buffer, size_t size, u64 valid);
1331 const struct xattr_handler *get_xattr_type(const char *name);
1334 * Common IO arguments for various VFS I/O interfaces.
1336 int cl_sb_init(struct super_block *sb);
1337 int cl_sb_fini(struct super_block *sb);
1339 enum ras_update_flags {
1343 void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len);
1344 void ll_ra_stats_inc(struct inode *inode, enum ra_stat which);
1348 #define LL_SA_RPC_MIN 2
1349 #define LL_SA_RPC_DEF 32
1350 #define LL_SA_RPC_MAX 512
1352 /* XXX: If want to support more concurrent statahead instances,
1353 * please consider to decentralize the RPC lists attached
1354 * on related import, such as imp_{sending,delayed}_list.
1356 #define LL_SA_RUNNING_MAX 256
1357 #define LL_SA_RUNNING_DEF 16
1359 #define LL_SA_CACHE_BIT 5
1360 #define LL_SA_CACHE_SIZE (1 << LL_SA_CACHE_BIT)
1361 #define LL_SA_CACHE_MASK (LL_SA_CACHE_SIZE - 1)
1363 /* per inode struct, for dir only */
1364 struct ll_statahead_info {
1365 struct dentry *sai_dentry;
1366 atomic_t sai_refcount; /* when access this struct, hold
1368 unsigned int sai_max; /* max ahead of lookup */
1369 __u64 sai_sent; /* stat requests sent count */
1370 __u64 sai_replied; /* stat requests which received
1372 __u64 sai_index; /* index of statahead entry */
1373 __u64 sai_index_wait; /* index of entry which is the
1374 * caller is waiting for */
1375 __u64 sai_hit; /* hit count */
1376 __u64 sai_miss; /* miss count:
1377 * for "ls -al" case, includes
1378 * hidden dentry miss;
1379 * for "ls -l" case, it does not
1380 * include hidden dentry miss.
1381 * "sai_miss_hidden" is used for
1384 unsigned int sai_consecutive_miss; /* consecutive miss */
1385 unsigned int sai_miss_hidden;/* "ls -al", but first dentry
1386 * is not a hidden one */
1387 unsigned int sai_skip_hidden;/* skipped hidden dentry count
1389 unsigned int sai_ls_all:1, /* "ls -al", do stat-ahead for
1391 sai_agl_valid:1,/* AGL is valid for the dir */
1392 sai_in_readpage:1;/* statahead is in readdir()*/
1393 wait_queue_head_t sai_waitq; /* stat-ahead wait queue */
1394 struct task_struct *sai_task; /* stat-ahead thread */
1395 struct task_struct *sai_agl_task; /* AGL thread */
1396 struct list_head sai_interim_entries; /* entries which got async
1397 * stat reply, but not
1399 struct list_head sai_entries; /* completed entries */
1400 struct list_head sai_agls; /* AGLs to be sent */
1401 struct list_head sai_cache[LL_SA_CACHE_SIZE];
1402 spinlock_t sai_cache_lock[LL_SA_CACHE_SIZE];
1403 atomic_t sai_cache_count; /* entry count in cache */
1406 int ll_revalidate_statahead(struct inode *dir, struct dentry **dentry,
1408 int ll_start_statahead(struct inode *dir, struct dentry *dentry, bool agl);
1409 void ll_authorize_statahead(struct inode *dir, void *key);
1410 void ll_deauthorize_statahead(struct inode *dir, void *key);
1413 blkcnt_t dirty_cnt(struct inode *inode);
1415 int cl_glimpse_size0(struct inode *inode, int agl);
1416 int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
1417 struct inode *inode, struct cl_object *clob, int agl);
1419 static inline int cl_glimpse_size(struct inode *inode)
1421 return cl_glimpse_size0(inode, 0);
1424 /* AGL is 'asychronous glimpse lock', which is a speculative lock taken as
1425 * part of statahead */
1426 static inline int cl_agl(struct inode *inode)
1428 return cl_glimpse_size0(inode, 1);
1431 int ll_file_lock_ahead(struct file *file, struct llapi_lu_ladvise *ladvise);
1433 int cl_io_get(struct inode *inode, struct lu_env **envout,
1434 struct cl_io **ioout, __u16 *refcheck);
1436 static inline int ll_glimpse_size(struct inode *inode)
1438 struct ll_inode_info *lli = ll_i2info(inode);
1441 down_read(&lli->lli_glimpse_sem);
1442 rc = cl_glimpse_size(inode);
1443 lli->lli_glimpse_time = ktime_get();
1444 up_read(&lli->lli_glimpse_sem);
1448 /* dentry may statahead when statahead is enabled and current process has opened
1449 * parent directory, and this dentry hasn't accessed statahead cache before */
1451 dentry_may_statahead(struct inode *dir, struct dentry *dentry)
1453 struct ll_inode_info *lli;
1454 struct ll_dentry_data *ldd;
1456 if (ll_i2sbi(dir)->ll_sa_max == 0)
1459 lli = ll_i2info(dir);
1461 /* statahead is not allowed for this dir, there may be three causes:
1462 * 1. dir is not opened.
1463 * 2. statahead hit ratio is too low.
1464 * 3. previous stat started statahead thread failed. */
1465 if (!lli->lli_sa_enabled)
1468 /* not the same process, don't statahead */
1469 if (lli->lli_opendir_pid != current->pid)
1473 * When stating a dentry, kernel may trigger 'revalidate' or 'lookup'
1474 * multiple times, eg. for 'getattr', 'getxattr' and etc.
1475 * For patchless client, lookup intent is not accurate, which may
1476 * misguide statahead. For example:
1477 * The 'revalidate' call for 'getattr' and 'getxattr' of a dentry will
1478 * have the same intent -- IT_GETATTR, while one dentry should access
1479 * statahead cache once, otherwise statahead windows is messed up.
1480 * The solution is as following:
1481 * Assign 'lld_sa_generation' with 'lli_sa_generation' when a dentry
1482 * IT_GETATTR for the first time, and subsequent IT_GETATTR will
1483 * bypass interacting with statahead cache by checking
1484 * 'lld_sa_generation == lli->lli_sa_generation'.
1486 ldd = ll_d2d(dentry);
1487 if (ldd != NULL && lli->lli_sa_generation &&
1488 ldd->lld_sa_generation == lli->lli_sa_generation)
1494 int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
1495 enum cl_fsync_mode mode, int ignore_layout);
1497 static inline int ll_file_nolock(const struct file *file)
1499 struct ll_file_data *fd = file->private_data;
1500 struct inode *inode = file_inode((struct file *)file);
1502 LASSERT(fd != NULL);
1503 return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) ||
1504 (ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK));
1507 static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
1508 struct lookup_intent *it, __u64 *bits)
1510 if (!it->it_lock_set) {
1511 struct lustre_handle handle;
1513 /* If this inode is a remote object, it will get two
1514 * separate locks in different namespaces, Master MDT,
1515 * where the name entry is, will grant LOOKUP lock,
1516 * remote MDT, where the object is, will grant
1517 * UPDATE|PERM lock. The inode will be attched to both
1518 * LOOKUP and PERM locks, so revoking either locks will
1519 * case the dcache being cleared */
1520 if (it->it_remote_lock_mode) {
1521 handle.cookie = it->it_remote_lock_handle;
1522 CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID
1523 "(%p) for remote lock %#llx\n",
1524 PFID(ll_inode2fid(inode)), inode,
1526 md_set_lock_data(exp, &handle, inode, NULL);
1529 handle.cookie = it->it_lock_handle;
1531 CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"(%p)"
1532 " for lock %#llx\n",
1533 PFID(ll_inode2fid(inode)), inode, handle.cookie);
1535 md_set_lock_data(exp, &handle, inode, &it->it_lock_bits);
1536 it->it_lock_set = 1;
1540 *bits = it->it_lock_bits;
1543 static inline int d_lustre_invalid(const struct dentry *dentry)
1545 struct ll_dentry_data *lld = ll_d2d(dentry);
1547 return (lld == NULL) || lld->lld_invalid;
1550 static inline void __d_lustre_invalidate(struct dentry *dentry)
1552 struct ll_dentry_data *lld = ll_d2d(dentry);
1555 lld->lld_invalid = 1;
1559 * Mark dentry INVALID, if dentry refcount is zero (this is normally case for
1560 * ll_md_blocking_ast), unhash this dentry, and let dcache to reclaim it later;
1561 * else dput() of the last refcount will unhash this dentry and kill it.
1563 static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
1565 CDEBUG(D_DENTRY, "invalidate dentry %pd (%p) parent %p inode %p refc %d\n",
1567 dentry->d_parent, dentry->d_inode, ll_d_count(dentry));
1569 spin_lock_nested(&dentry->d_lock,
1570 nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL);
1571 __d_lustre_invalidate(dentry);
1573 * We should be careful about dentries created by d_obtain_alias().
1574 * These dentries are not put in the dentry tree, instead they are
1575 * linked to sb->s_anon through dentry->d_hash.
1576 * shrink_dcache_for_umount() shrinks the tree and sb->s_anon list.
1577 * If we unhashed such a dentry, unmount would not be able to find
1578 * it and busy inodes would be reported.
1580 if (ll_d_count(dentry) == 0 && !(dentry->d_flags & DCACHE_DISCONNECTED))
1582 spin_unlock(&dentry->d_lock);
1585 static inline void d_lustre_revalidate(struct dentry *dentry)
1587 spin_lock(&dentry->d_lock);
1588 LASSERT(ll_d2d(dentry) != NULL);
1589 ll_d2d(dentry)->lld_invalid = 0;
1590 spin_unlock(&dentry->d_lock);
1593 static inline dev_t ll_compat_encode_dev(dev_t dev)
1595 /* The compat_sys_*stat*() syscalls will fail unless the
1596 * device majors and minors are both less than 256. Note that
1597 * the value returned here will be passed through
1598 * old_encode_dev() in cp_compat_stat(). And so we are not
1599 * trying to return a valid compat (u16) device number, just
1600 * one that will pass the old_valid_dev() check. */
1602 return MKDEV(MAJOR(dev) & 0xff, MINOR(dev) & 0xff);
1605 int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf);
1606 int ll_layout_refresh(struct inode *inode, __u32 *gen);
1607 int ll_layout_restore(struct inode *inode, loff_t start, __u64 length);
1608 int ll_layout_write_intent(struct inode *inode, enum layout_intent_opc opc,
1609 struct lu_extent *ext);
1611 int ll_xattr_init(void);
1612 void ll_xattr_fini(void);
1614 int ll_page_sync_io(const struct lu_env *env, struct cl_io *io,
1615 struct cl_page *page, enum cl_req_type crt);
1617 int ll_getparent(struct file *file, struct getparent __user *arg);
1620 int cl_setattr_ost(struct cl_object *obj, const struct iattr *attr,
1621 enum op_xvalid xvalid, unsigned int attr_flags);
1623 extern struct lu_env *cl_inode_fini_env;
1624 extern __u16 cl_inode_fini_refcheck;
1626 int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
1627 void cl_inode_fini(struct inode *inode);
1629 u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
1630 u32 cl_fid_build_gen(const struct lu_fid *fid);
1632 static inline struct pcc_super *ll_i2pccs(struct inode *inode)
1634 return &ll_i2sbi(inode)->ll_pcc_super;
1637 static inline struct pcc_super *ll_info2pccs(struct ll_inode_info *lli)
1639 return ll_i2pccs(ll_info2i(lli));
1642 #endif /* LLITE_INTERNAL_H */