4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #ifndef LLITE_INTERNAL_H
34 #define LLITE_INTERNAL_H
35 #include <lustre_debug.h>
36 #include <lustre_disk.h> /* for s2sbi */
37 #include <lustre_eacl.h>
38 #include <lustre_linkea.h>
40 /* for struct cl_lock_descr and struct cl_io */
41 #include <cl_object.h>
42 #include <lustre_lmv.h>
43 #include <lustre_mdc.h>
44 #include <lustre_intent.h>
45 #include <linux/compat.h>
46 #include <linux/aio.h>
47 #include <lustre_compat.h>
49 #include "vvp_internal.h"
50 #include "range_lock.h"
57 #ifndef VM_FAULT_RETRY
58 #define VM_FAULT_RETRY 0
61 /* Kernel 3.1 kills LOOKUP_CONTINUE, LOOKUP_PARENT is equivalent to it.
62 * seem kernel commit 49084c3bb2055c401f3493c13edae14d49128ca0 */
63 #ifndef LOOKUP_CONTINUE
64 #define LOOKUP_CONTINUE LOOKUP_PARENT
67 /** Only used on client-side for indicating the tail of dir hash/offset. */
68 #define LL_DIR_END_OFF 0x7fffffffffffffffULL
69 #define LL_DIR_END_OFF_32BIT 0x7fffffffUL
71 /* 4UL * 1024 * 1024 */
72 #define LL_MAX_BLKSIZE_BITS 22
74 #define LL_IT2STR(it) ((it) ? ldlm_it2str((it)->it_op) : "0")
76 #define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
78 struct ll_dentry_data {
79 unsigned int lld_sa_generation;
80 unsigned int lld_invalid:1;
81 unsigned int lld_nfs_dentry:1;
82 struct rcu_head lld_rcu_head;
85 #define ll_d2d(de) ((struct ll_dentry_data*)((de)->d_fsdata))
87 #define LLI_INODE_MAGIC 0x111d0de5
88 #define LLI_INODE_DEAD 0xdeadd00d
90 struct ll_getname_data {
91 #ifdef HAVE_DIR_CONTEXT
92 struct dir_context ctx;
94 char *lgd_name; /* points to a buffer with NAME_MAX+1 size */
95 struct lu_fid lgd_fid; /* target fid we are looking for */
96 int lgd_found; /* inode matched? */
100 struct lu_env *lg_env;
102 struct cl_lock *lg_lock;
103 unsigned long lg_gid;
106 /* See comment on trunc_sem_down_read_nowait */
107 struct ll_trunc_sem {
108 /* when positive, this is a count of readers, when -1, it indicates
109 * the semaphore is held for write, and 0 is unlocked
111 atomic_t ll_trunc_readers;
112 /* this tracks a count of waiting writers */
113 atomic_t ll_trunc_waiters;
116 struct ll_inode_info {
117 __u32 lli_inode_magic;
120 volatile unsigned long lli_flags;
121 struct posix_acl *lli_posix_acl;
123 /* identifying fields for both metadata and data stacks. */
124 struct lu_fid lli_fid;
125 /* master inode fid for stripe directory */
126 struct lu_fid lli_pfid;
128 /* We need all three because every inode may be opened in different
130 struct obd_client_handle *lli_mds_read_och;
131 struct obd_client_handle *lli_mds_write_och;
132 struct obd_client_handle *lli_mds_exec_och;
133 __u64 lli_open_fd_read_count;
134 __u64 lli_open_fd_write_count;
135 __u64 lli_open_fd_exec_count;
136 /* Protects access to och pointers and their usage counters */
137 struct mutex lli_och_mutex;
139 struct inode lli_vfs_inode;
141 /* the most recent timestamps obtained from mds */
145 spinlock_t lli_agl_lock;
147 /* Try to make the d::member and f::member are aligned. Before using
148 * these members, make clear whether it is directory or not. */
152 /* metadata statahead */
153 /* since parent-child threads can share the same @file
154 * struct, "opendir_key" is the token when dir close for
155 * case of parent exit before child -- it is me should
156 * cleanup the dir readahead. */
157 void *lli_opendir_key;
158 struct ll_statahead_info *lli_sai;
159 /* protect statahead stuff. */
160 spinlock_t lli_sa_lock;
161 /* "opendir_pid" is the token when lookup/revalid
162 * -- I am the owner of dir statahead. */
163 pid_t lli_opendir_pid;
164 /* stat will try to access statahead entries or start
165 * statahead if this flag is set, and this flag will be
166 * set upon dir open, and cleared when dir is closed,
167 * statahead hit ratio is too low, or start statahead
169 unsigned int lli_sa_enabled:1;
170 /* generation for statahead */
171 unsigned int lli_sa_generation;
172 /* rw lock protects lli_lsm_md */
173 struct rw_semaphore lli_lsm_sem;
174 /* directory stripe information */
175 struct lmv_stripe_md *lli_lsm_md;
176 /* directory default LMV */
177 struct lmv_stripe_md *lli_default_lsm_md;
180 /* for non-directory */
182 struct mutex lli_size_mutex;
183 char *lli_symlink_name;
184 struct ll_trunc_sem lli_trunc_sem;
185 struct range_lock_tree lli_write_tree;
187 struct rw_semaphore lli_glimpse_sem;
188 ktime_t lli_glimpse_time;
189 struct list_head lli_agl_list;
192 /* for writepage() only to communicate to fsync */
195 /* protect the file heat fields */
196 spinlock_t lli_heat_lock;
197 __u32 lli_heat_flags;
198 struct obd_heat_instance lli_heat_instances[OBD_HEAT_COUNT];
201 * Whenever a process try to read/write the file, the
202 * jobid of the process will be saved here, and it'll
203 * be packed into the write PRC when flush later.
205 * So the read/write statistics for jobid will not be
206 * accurate if the file is shared by different jobs.
208 char lli_jobid[LUSTRE_JOBID_SIZE];
210 struct mutex lli_pcc_lock;
211 enum lu_pcc_state_flags lli_pcc_state;
213 * @lli_pcc_generation saves the gobal PCC generation
214 * when the file was successfully attached into PCC.
215 * The flags of the PCC dataset are saved in
217 * The gobal PCC generation will be increased when add
218 * or delete a PCC backend, or change the configuration
219 * parameters for PCC.
220 * If @lli_pcc_generation is same as the gobal PCC
221 * generation, we can use the saved flags of the PCC
222 * dataset to determine whether need to try auto attach
225 __u64 lli_pcc_generation;
226 enum pcc_dataset_flags lli_pcc_dsflags;
227 struct pcc_inode *lli_pcc_inode;
229 struct mutex lli_group_mutex;
230 __u64 lli_group_users;
231 unsigned long lli_group_gid;
235 /* XXX: For following frequent used members, although they maybe special
236 * used for non-directory object, it is some time-wasting to check
237 * whether the object is directory or not before using them. On the
238 * other hand, currently, sizeof(f) > sizeof(d), it cannot reduce
239 * the "ll_inode_info" size even if moving those members into u.f.
240 * So keep them out side.
242 * In the future, if more members are added only for directory,
243 * some of the following members can be moved into u.f.
245 struct cl_object *lli_clob;
247 /* mutex to request for layout lock exclusively. */
248 struct mutex lli_layout_mutex;
249 /* Layout version, protected by lli_layout_lock */
250 __u32 lli_layout_gen;
251 spinlock_t lli_layout_lock;
253 __u32 lli_projid; /* project id */
255 struct rw_semaphore lli_xattrs_list_rwsem;
256 struct mutex lli_xattrs_enq_lock;
257 struct list_head lli_xattrs; /* ll_xattr_entry->xe_list */
260 static inline void ll_trunc_sem_init(struct ll_trunc_sem *sem)
262 atomic_set(&sem->ll_trunc_readers, 0);
263 atomic_set(&sem->ll_trunc_waiters, 0);
266 /* This version of down read ignores waiting writers, meaning if the semaphore
267 * is already held for read, this down_read will 'join' that reader and also
268 * take the semaphore.
270 * This lets us avoid an unusual deadlock.
272 * We must take lli_trunc_sem in read mode on entry in to various i/o paths
273 * in Lustre, in order to exclude truncates. Some of these paths then need to
274 * take the mmap_sem, while still holding the trunc_sem. The problem is that
275 * page faults hold the mmap_sem when calling in to Lustre, and then must also
276 * take the trunc_sem to exclude truncate.
278 * This means the locking order for trunc_sem and mmap_sem is sometimes AB,
279 * sometimes BA. This is almost OK because in both cases, we take the trunc
280 * sem for read, so it doesn't block.
282 * However, if a write mode user (truncate, a setattr op) arrives in the
283 * middle of this, the second reader on the truncate_sem will wait behind that
286 * So we have, on our truncate sem, in order (where 'reader' and 'writer' refer
287 * to the mode in which they take the semaphore):
288 * reader (holding mmap_sem, needs truncate_sem)
290 * reader (holding truncate sem, waiting for mmap_sem)
292 * And so the readers deadlock.
294 * The solution is this modified semaphore, where this down_read ignores
295 * waiting write operations, and all waiters are woken up at once, so readers
296 * using down_read_nowait cannot get stuck behind waiting writers, regardless
297 * of the order they arrived in.
299 * down_read_nowait is only used in the page fault case, where we already hold
300 * the mmap_sem. This is because otherwise repeated read and write operations
301 * (which take the truncate sem) could prevent a truncate from ever starting.
302 * This could still happen with page faults, but without an even more complex
303 * mechanism, this is unavoidable.
307 static inline void trunc_sem_down_read_nowait(struct ll_trunc_sem *sem)
309 wait_var_event(&sem->ll_trunc_readers,
310 atomic_inc_unless_negative(&sem->ll_trunc_readers));
313 static inline void trunc_sem_down_read(struct ll_trunc_sem *sem)
315 wait_var_event(&sem->ll_trunc_readers,
316 atomic_read(&sem->ll_trunc_waiters) == 0 &&
317 atomic_inc_unless_negative(&sem->ll_trunc_readers));
320 static inline void trunc_sem_up_read(struct ll_trunc_sem *sem)
322 if (atomic_dec_return(&sem->ll_trunc_readers) == 0 &&
323 atomic_read(&sem->ll_trunc_waiters))
324 wake_up_var(&sem->ll_trunc_readers);
327 static inline void trunc_sem_down_write(struct ll_trunc_sem *sem)
329 atomic_inc(&sem->ll_trunc_waiters);
330 wait_var_event(&sem->ll_trunc_readers,
331 atomic_cmpxchg(&sem->ll_trunc_readers, 0, -1) == 0);
332 atomic_dec(&sem->ll_trunc_waiters);
335 static inline void trunc_sem_up_write(struct ll_trunc_sem *sem)
337 atomic_set(&sem->ll_trunc_readers, 0);
338 wake_up_var(&sem->ll_trunc_readers);
341 static inline __u32 ll_layout_version_get(struct ll_inode_info *lli)
345 spin_lock(&lli->lli_layout_lock);
346 gen = lli->lli_layout_gen;
347 spin_unlock(&lli->lli_layout_lock);
352 static inline void ll_layout_version_set(struct ll_inode_info *lli, __u32 gen)
354 spin_lock(&lli->lli_layout_lock);
355 lli->lli_layout_gen = gen;
356 spin_unlock(&lli->lli_layout_lock);
360 /* File data is modified. */
361 LLIF_DATA_MODIFIED = 0,
362 /* File is being restored */
363 LLIF_FILE_RESTORING = 1,
364 /* Xattr cache is attached to the file */
365 LLIF_XATTR_CACHE = 2,
366 /* Project inherit */
367 LLIF_PROJECT_INHERIT = 3,
368 /* update atime from MDS even if it's older than local inode atime. */
369 LLIF_UPDATE_ATIME = 4,
373 static inline void ll_file_set_flag(struct ll_inode_info *lli,
374 enum ll_file_flags flag)
376 set_bit(flag, &lli->lli_flags);
379 static inline void ll_file_clear_flag(struct ll_inode_info *lli,
380 enum ll_file_flags flag)
382 clear_bit(flag, &lli->lli_flags);
385 static inline bool ll_file_test_flag(struct ll_inode_info *lli,
386 enum ll_file_flags flag)
388 return test_bit(flag, &lli->lli_flags);
391 static inline bool ll_file_test_and_clear_flag(struct ll_inode_info *lli,
392 enum ll_file_flags flag)
394 return test_and_clear_bit(flag, &lli->lli_flags);
397 int ll_xattr_cache_destroy(struct inode *inode);
399 int ll_xattr_cache_get(struct inode *inode,
405 static inline bool obd_connect_has_secctx(struct obd_connect_data *data)
407 #ifdef CONFIG_SECURITY
408 return data->ocd_connect_flags & OBD_CONNECT_FLAGS2 &&
409 data->ocd_connect_flags2 & OBD_CONNECT2_FILE_SECCTX;
415 static inline void obd_connect_set_secctx(struct obd_connect_data *data)
417 #ifdef CONFIG_SECURITY
418 data->ocd_connect_flags2 |= OBD_CONNECT2_FILE_SECCTX;
422 int ll_dentry_init_security(struct dentry *dentry, int mode, struct qstr *name,
423 const char **secctx_name, void **secctx,
425 int ll_inode_init_security(struct dentry *dentry, struct inode *inode,
428 int ll_listsecurity(struct inode *inode, char *secctx_name,
429 size_t secctx_name_size);
432 * Locking to guarantee consistency of non-atomic updates to long long i_size,
433 * consistency between file size and KMS.
435 * Implemented by ->lli_size_mutex and ->lsm_lock, nested in that order.
438 void ll_inode_size_lock(struct inode *inode);
439 void ll_inode_size_unlock(struct inode *inode);
441 static inline struct ll_inode_info *ll_i2info(struct inode *inode)
443 return container_of(inode, struct ll_inode_info, lli_vfs_inode);
446 static inline struct pcc_inode *ll_i2pcci(struct inode *inode)
448 return ll_i2info(inode)->lli_pcc_inode;
451 /* default to use at least 16M for fast read if possible */
452 #define RA_REMAIN_WINDOW_MIN MiB_TO_PAGES(16UL)
454 /* default readahead on a given system. */
455 #define SBI_DEFAULT_READ_AHEAD_MAX MiB_TO_PAGES(64UL)
457 /* default read-ahead full files smaller than limit on the second read */
458 #define SBI_DEFAULT_READ_AHEAD_WHOLE_MAX MiB_TO_PAGES(2UL)
463 RA_STAT_DISTANT_READPAGE,
464 RA_STAT_MISS_IN_WINDOW,
465 RA_STAT_FAILED_GRAB_PAGE,
466 RA_STAT_FAILED_MATCH,
471 RA_STAT_MAX_IN_FLIGHT,
472 RA_STAT_WRONG_GRAB_PAGE,
473 RA_STAT_FAILED_REACH_END,
475 RA_STAT_FAILED_FAST_READ,
480 atomic_t ra_cur_pages;
481 unsigned long ra_max_pages;
482 unsigned long ra_max_pages_per_file;
483 unsigned long ra_max_read_ahead_whole_pages;
484 struct workqueue_struct *ll_readahead_wq;
486 * Max number of active works could be triggered
487 * for async readahead.
489 unsigned int ra_async_max_active;
490 /* how many async readahead triggered in flight */
491 atomic_t ra_async_inflight;
492 /* Threshold to control when to trigger async readahead */
493 unsigned long ra_async_pages_per_file_threshold;
496 /* ra_io_arg will be filled in the beginning of ll_readahead with
497 * ras_lock, then the following ll_read_ahead_pages will read RA
498 * pages according to this arg, all the items in this structure are
499 * counted by page index.
502 pgoff_t ria_start_idx; /* start offset of read-ahead*/
503 pgoff_t ria_end_idx; /* end offset of read-ahead*/
504 unsigned long ria_reserved; /* reserved pages for read-ahead */
505 pgoff_t ria_end_idx_min;/* minimum end to cover current read */
506 bool ria_eof; /* reach end of file */
507 /* If stride read pattern is detected, ria_stoff is the byte offset
508 * where stride read is started. Note: for normal read-ahead, the
509 * value here is meaningless, and also it will not be accessed*/
511 /* ria_length and ria_bytes are the length and pages length in the
512 * stride I/O mode. And they will also be used to check whether
513 * it is stride I/O read-ahead in the read-ahead pages*/
518 /* LL_HIST_MAX=32 causes an overflow */
519 #define LL_HIST_MAX 28
520 #define LL_HIST_START 12 /* buckets start at 2^12 = 4k */
521 #define LL_PROCESS_HIST_MAX 10
522 struct per_process_info {
524 struct obd_histogram pp_r_hist;
525 struct obd_histogram pp_w_hist;
528 /* pp_extents[LL_PROCESS_HIST_MAX] will hold the combined process info */
529 struct ll_rw_extents_info {
530 struct per_process_info pp_extents[LL_PROCESS_HIST_MAX + 1];
533 #define LL_OFFSET_HIST_MAX 100
534 struct ll_rw_process_info {
537 loff_t rw_range_start;
539 loff_t rw_last_file_pos;
541 size_t rw_smallest_extent;
542 size_t rw_largest_extent;
543 struct ll_file_data *rw_last_file;
546 enum stats_track_type {
547 STATS_TRACK_ALL = 0, /* track all processes */
548 STATS_TRACK_PID, /* track process with this pid */
549 STATS_TRACK_PPID, /* track processes with this ppid */
550 STATS_TRACK_GID, /* track processes with this gid */
554 /* flags for sbi->ll_flags */
555 #define LL_SBI_NOLCK 0x01 /* DLM locking disabled (directio-only) */
556 #define LL_SBI_CHECKSUM 0x02 /* checksum each page as it's written */
557 #define LL_SBI_FLOCK 0x04
558 #define LL_SBI_USER_XATTR 0x08 /* support user xattr */
559 #define LL_SBI_ACL 0x10 /* support ACL */
560 /* LL_SBI_RMT_CLIENT 0x40 remote client */
561 #define LL_SBI_MDS_CAPA 0x80 /* support mds capa, obsolete */
562 #define LL_SBI_OSS_CAPA 0x100 /* support oss capa, obsolete */
563 #define LL_SBI_LOCALFLOCK 0x200 /* Local flocks support by kernel */
564 #define LL_SBI_LRU_RESIZE 0x400 /* lru resize support */
565 #define LL_SBI_LAZYSTATFS 0x800 /* lazystatfs mount option */
566 /* LL_SBI_SOM_PREVIEW 0x1000 SOM preview mount option, obsolete */
567 #define LL_SBI_32BIT_API 0x2000 /* generate 32 bit inodes. */
568 #define LL_SBI_64BIT_HASH 0x4000 /* support 64-bits dir hash/offset */
569 #define LL_SBI_AGL_ENABLED 0x8000 /* enable agl */
570 #define LL_SBI_VERBOSE 0x10000 /* verbose mount/umount */
571 #define LL_SBI_LAYOUT_LOCK 0x20000 /* layout lock support */
572 #define LL_SBI_USER_FID2PATH 0x40000 /* allow fid2path by unprivileged users */
573 #define LL_SBI_XATTR_CACHE 0x80000 /* support for xattr cache */
574 #define LL_SBI_NOROOTSQUASH 0x100000 /* do not apply root squash */
575 #define LL_SBI_ALWAYS_PING 0x200000 /* always ping even if server
577 #define LL_SBI_FAST_READ 0x400000 /* fast read support */
578 #define LL_SBI_FILE_SECCTX 0x800000 /* set file security context at create */
579 /* LL_SBI_PIO 0x1000000 parallel IO support, introduced in
581 #define LL_SBI_TINY_WRITE 0x2000000 /* tiny write support */
582 #define LL_SBI_FILE_HEAT 0x4000000 /* file heat support */
583 #define LL_SBI_FLAGS { \
613 /* This is embedded into llite super-blocks to keep track of connect
614 * flags (capabilities) supported by all imports given mount is
616 struct lustre_client_ocd {
617 /* This is conjunction of connect_flags across all imports
618 * (LOVs) this mount is connected to. This field is updated by
619 * cl_ocd_update() under ->lco_lock. */
621 struct mutex lco_lock;
622 struct obd_export *lco_md_exp;
623 struct obd_export *lco_dt_exp;
627 /* this protects pglist and ra_info. It isn't safe to
628 * grab from interrupt contexts */
630 spinlock_t ll_pp_extent_lock; /* pp_extent entry*/
631 spinlock_t ll_process_lock; /* ll_rw_process_info */
632 struct obd_uuid ll_sb_uuid;
633 struct obd_export *ll_md_exp;
634 struct obd_export *ll_dt_exp;
635 struct obd_device *ll_md_obd;
636 struct obd_device *ll_dt_obd;
637 struct dentry *ll_debugfs_entry;
638 struct lu_fid ll_root_fid; /* root object fid */
641 unsigned int ll_xattr_cache_enabled:1,
642 ll_xattr_cache_set:1, /* already set to 0/1 */
643 ll_client_common_fill_super_succeeded:1,
646 struct lustre_client_ocd ll_lco;
648 struct lprocfs_stats *ll_stats; /* lprocfs stats counter */
650 /* Used to track "unstable" pages on a client, and maintain a
651 * LRU list of clean pages. An "unstable" page is defined as
652 * any page which is sent to a server as part of a bulk request,
653 * but is uncommitted to stable storage. */
654 struct cl_client_cache *ll_cache;
656 struct lprocfs_stats *ll_ra_stats;
658 struct ll_ra_info ll_ra_info;
659 unsigned int ll_namelen;
660 struct file_operations *ll_fop;
662 struct lu_site *ll_site;
663 struct cl_device *ll_cl;
665 struct ll_rw_extents_info ll_rw_extents_info;
666 int ll_extent_process_count;
667 struct ll_rw_process_info ll_rw_process_info[LL_PROCESS_HIST_MAX];
668 unsigned int ll_offset_process_count;
669 struct ll_rw_process_info ll_rw_offset_info[LL_OFFSET_HIST_MAX];
670 unsigned int ll_rw_offset_entry_count;
671 int ll_stats_track_id;
672 enum stats_track_type ll_stats_track_type;
675 /* metadata stat-ahead */
676 unsigned int ll_sa_running_max;/* max concurrent
677 * statahead instances */
678 unsigned int ll_sa_max; /* max statahead RPCs */
679 atomic_t ll_sa_total; /* statahead thread started
681 atomic_t ll_sa_wrong; /* statahead thread stopped for
683 atomic_t ll_sa_running; /* running statahead thread
685 atomic_t ll_agl_total; /* AGL thread started count */
687 dev_t ll_sdev_orig; /* save s_dev before assign for
690 struct root_squash_info ll_squash;
693 /* st_blksize returned by stat(2), when non-zero */
694 unsigned int ll_stat_blksize;
696 /* maximum relative age of cached statfs results */
697 unsigned int ll_statfs_max_age;
699 struct kset ll_kset; /* sysfs object */
700 struct completion ll_kobj_unregister;
703 unsigned int ll_heat_decay_weight;
704 unsigned int ll_heat_period_second;
706 /* filesystem fsname */
707 char ll_fsname[LUSTRE_MAXFSNAME + 1];
709 /* Persistent Client Cache */
710 struct pcc_super ll_pcc_super;
713 #define SBI_DEFAULT_HEAT_DECAY_WEIGHT ((80 * 256 + 50) / 100)
714 #define SBI_DEFAULT_HEAT_PERIOD_SECOND (60)
716 * per file-descriptor read-ahead data.
718 struct ll_readahead_state {
720 /* End byte that read(2) try to read. */
721 loff_t ras_last_read_end_bytes;
723 * number of bytes read after last read-ahead window reset. As window
724 * is reset on each seek, this is effectively a number of consecutive
725 * accesses. Maybe ->ras_accessed_in_window is better name.
727 * XXX nikita: window is also reset (by ras_update()) when Lustre
728 * believes that memory pressure evicts read-ahead pages. In that
729 * case, it probably doesn't make sense to expand window to
730 * PTLRPC_MAX_BRW_PAGES on the third access.
732 loff_t ras_consecutive_bytes;
734 * number of read requests after the last read-ahead window reset
735 * As window is reset on each seek, this is effectively the number
736 * on consecutive read request and is used to trigger read-ahead.
738 unsigned long ras_consecutive_requests;
740 * Parameters of current read-ahead window. Handled by
741 * ras_update(). On the initial access to the file or after a seek,
742 * window is reset to 0. After 3 consecutive accesses, window is
743 * expanded to PTLRPC_MAX_BRW_PAGES. Afterwards, window is enlarged by
744 * PTLRPC_MAX_BRW_PAGES chunks up to ->ra_max_pages.
746 pgoff_t ras_window_start_idx;
747 pgoff_t ras_window_pages;
749 * Optimal RPC size in pages.
750 * It decides how many pages will be sent for each read-ahead.
752 unsigned long ras_rpc_pages;
754 * Where next read-ahead should start at. This lies within read-ahead
755 * window. Read-ahead window is read in pieces rather than at once
756 * because: 1. lustre limits total number of pages under read-ahead by
757 * ->ra_max_pages (see ll_ra_count_get()), 2. client cannot read pages
758 * not covered by DLM lock.
760 pgoff_t ras_next_readahead_idx;
762 * Total number of ll_file_read requests issued, reads originating
763 * due to mmap are not counted in this total. This value is used to
764 * trigger full file read-ahead after multiple reads to a small file.
766 unsigned long ras_requests;
768 * The following 3 items are used for detecting the stride I/O
770 * In stride I/O mode,
771 * ...............|-----data-----|****gap*****|--------|******|....
772 * offset |-stride_bytes-|-stride_gap-|
773 * ras_stride_offset = offset;
774 * ras_stride_length = stride_bytes + stride_gap;
775 * ras_stride_bytes = stride_bytes;
776 * Note: all these three items are counted by bytes.
778 loff_t ras_stride_offset;
779 loff_t ras_stride_length;
780 loff_t ras_stride_bytes;
782 * number of consecutive stride request count, and it is similar as
783 * ras_consecutive_requests, but used for stride I/O mode.
784 * Note: only more than 2 consecutive stride request are detected,
785 * stride read-ahead will be enable
787 unsigned long ras_consecutive_stride_requests;
788 /* index of the last page that async readahead starts */
789 pgoff_t ras_async_last_readpage_idx;
790 /* whether we should increase readahead window */
791 bool ras_need_increase_window;
792 /* whether ra miss check should be skipped */
793 bool ras_no_miss_check;
796 struct ll_readahead_work {
797 /** File to readahead */
798 struct file *lrw_file;
799 pgoff_t lrw_start_idx;
802 /* async worker to handler read */
803 struct work_struct lrw_readahead_work;
806 extern struct kmem_cache *ll_file_data_slab;
807 struct lustre_handle;
808 struct ll_file_data {
809 struct ll_readahead_state fd_ras;
810 struct ll_grouplock fd_grouplock;
814 /* openhandle if lease exists for this file.
815 * Borrow lli->lli_och_mutex to protect assignment */
816 struct obd_client_handle *fd_lease_och;
817 struct obd_client_handle *fd_och;
818 struct file *fd_file;
819 /* Indicate whether need to report failure when close.
820 * true: failure is known, not report again.
821 * false: unknown failure, should report. */
822 bool fd_write_failed;
823 bool ll_lock_no_expand;
824 rwlock_t fd_lock; /* protect lcc list */
825 struct list_head fd_lccs; /* list of ll_cl_context */
826 /* Used by mirrored file to lead IOs to a specific mirror, usually
827 * for mirror resync. 0 means default. */
828 __u32 fd_designated_mirror;
829 /* The layout version when resync starts. Resync I/O should carry this
830 * layout version for verification to OST objects */
831 __u32 fd_layout_version;
832 struct pcc_file fd_pcc_file;
835 void llite_tunables_unregister(void);
836 int llite_tunables_register(void);
838 static inline struct inode *ll_info2i(struct ll_inode_info *lli)
840 return &lli->lli_vfs_inode;
843 __u32 ll_i2suppgid(struct inode *i);
844 void ll_i2gids(__u32 *suppgids, struct inode *i1,struct inode *i2);
846 static inline int ll_need_32bit_api(struct ll_sb_info *sbi)
848 #if BITS_PER_LONG == 32
850 #elif defined(CONFIG_COMPAT)
851 if (unlikely(sbi->ll_flags & LL_SBI_32BIT_API))
854 # ifdef CONFIG_X86_X32
855 /* in_compat_syscall() returns true when called from a kthread
856 * and CONFIG_X86_X32 is enabled, which is wrong. So check
857 * whether the caller comes from a syscall (ie. not a kthread)
858 * before calling in_compat_syscall(). */
859 if (current->flags & PF_KTHREAD)
863 return unlikely(in_compat_syscall());
865 return unlikely(sbi->ll_flags & LL_SBI_32BIT_API);
869 static inline bool ll_sbi_has_fast_read(struct ll_sb_info *sbi)
871 return !!(sbi->ll_flags & LL_SBI_FAST_READ);
874 static inline bool ll_sbi_has_tiny_write(struct ll_sb_info *sbi)
876 return !!(sbi->ll_flags & LL_SBI_TINY_WRITE);
879 static inline bool ll_sbi_has_file_heat(struct ll_sb_info *sbi)
881 return !!(sbi->ll_flags & LL_SBI_FILE_HEAT);
884 void ll_ras_enter(struct file *f, loff_t pos, size_t count);
886 /* llite/lcommon_misc.c */
887 int cl_ocd_update(struct obd_device *host, struct obd_device *watched,
888 enum obd_notify_event ev, void *owner);
889 int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
890 struct ll_grouplock *lg);
891 void cl_put_grouplock(struct ll_grouplock *lg);
893 /* llite/lproc_llite.c */
894 int ll_debugfs_register_super(struct super_block *sb, const char *name);
895 void ll_debugfs_unregister_super(struct super_block *sb);
896 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, long count);
900 LPROC_LL_WRITE_BYTES,
927 LPROC_LL_GETXATTR_HITS,
929 LPROC_LL_REMOVEXATTR,
931 LPROC_LL_FILE_OPCODES
935 enum get_default_layout_type {
936 GET_DEFAULT_LAYOUT_ROOT = 1,
939 struct ll_dir_chain {
942 static inline void ll_dir_chain_init(struct ll_dir_chain *chain)
946 static inline void ll_dir_chain_fini(struct ll_dir_chain *chain)
950 extern const struct file_operations ll_dir_operations;
951 extern const struct inode_operations ll_dir_inode_operations;
952 #ifdef HAVE_DIR_CONTEXT
953 int ll_dir_read(struct inode *inode, __u64 *pos, struct md_op_data *op_data,
954 struct dir_context *ctx);
956 int ll_dir_read(struct inode *inode, __u64 *pos, struct md_op_data *op_data,
957 void *cookie, filldir_t filldir);
959 int ll_get_mdt_idx(struct inode *inode);
960 int ll_get_mdt_idx_by_fid(struct ll_sb_info *sbi, const struct lu_fid *fid);
961 struct page *ll_get_dir_page(struct inode *dir, struct md_op_data *op_data,
962 __u64 offset, struct ll_dir_chain *chain);
963 void ll_release_page(struct inode *inode, struct page *page, bool remove);
966 extern const struct inode_operations ll_special_inode_operations;
968 struct inode *ll_iget(struct super_block *sb, ino_t hash,
969 struct lustre_md *lic);
970 int ll_test_inode_by_fid(struct inode *inode, void *opaque);
971 int ll_md_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
972 void *data, int flag);
973 struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de);
974 int ll_rmdir_entry(struct inode *dir, char *name, int namelen);
975 void ll_update_times(struct ptlrpc_request *request, struct inode *inode);
978 int ll_writepage(struct page *page, struct writeback_control *wbc);
979 int ll_writepages(struct address_space *, struct writeback_control *wbc);
980 int ll_readpage(struct file *file, struct page *page);
981 int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
982 struct cl_page *page, struct file *file);
983 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras);
984 int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
987 void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io,
989 void ll_cl_remove(struct file *file, const struct lu_env *env);
990 struct ll_cl_context *ll_cl_find(struct file *file);
992 extern const struct address_space_operations ll_aops;
995 extern struct file_operations ll_file_operations;
996 extern struct file_operations ll_file_operations_flock;
997 extern struct file_operations ll_file_operations_noflock;
998 extern struct inode_operations ll_file_inode_operations;
999 extern int ll_have_md_lock(struct inode *inode, __u64 *bits,
1000 enum ldlm_mode l_req_mode);
1001 extern enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
1002 struct lustre_handle *lockh, __u64 flags,
1003 enum ldlm_mode mode);
1005 int ll_file_open(struct inode *inode, struct file *file);
1006 int ll_file_release(struct inode *inode, struct file *file);
1007 int ll_release_openhandle(struct dentry *, struct lookup_intent *);
1008 int ll_md_real_close(struct inode *inode, fmode_t fmode);
1009 extern void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
1010 struct ll_file_data *file, loff_t pos,
1011 size_t count, int rw);
1012 #ifdef HAVE_INODEOPS_ENHANCED_GETATTR
1013 int ll_getattr(const struct path *path, struct kstat *stat,
1014 u32 request_mask, unsigned int flags);
1016 int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat);
1018 int ll_getattr_dentry(struct dentry *de, struct kstat *stat);
1019 struct posix_acl *ll_get_acl(struct inode *inode, int type);
1020 #ifdef HAVE_IOP_SET_ACL
1021 #ifdef CONFIG_LUSTRE_FS_POSIX_ACL
1022 int ll_set_acl(struct inode *inode, struct posix_acl *acl, int type);
1023 #else /* !CONFIG_LUSTRE_FS_POSIX_ACL */
1024 #define ll_set_acl NULL
1025 #endif /* CONFIG_LUSTRE_FS_POSIX_ACL */
1028 int ll_migrate(struct inode *parent, struct file *file,
1029 struct lmv_user_md *lum, const char *name);
1030 int ll_get_fid_by_name(struct inode *parent, const char *name,
1031 int namelen, struct lu_fid *fid, struct inode **inode);
1032 int ll_inode_permission(struct inode *inode, int mask);
1033 int ll_ioctl_check_project(struct inode *inode, struct fsxattr *fa);
1034 int ll_ioctl_fsgetxattr(struct inode *inode, unsigned int cmd,
1036 int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd,
1039 int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
1040 __u64 flags, struct lov_user_md *lum,
1042 int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
1043 struct lov_mds_md **lmm, int *lmm_size,
1044 struct ptlrpc_request **request);
1045 int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
1047 int ll_dir_getstripe_default(struct inode *inode, void **lmmp,
1048 int *lmm_size, struct ptlrpc_request **request,
1049 struct ptlrpc_request **root_request, u64 valid);
1050 int ll_dir_getstripe(struct inode *inode, void **plmm, int *plmm_size,
1051 struct ptlrpc_request **request, u64 valid);
1052 int ll_fsync(struct file *file, loff_t start, loff_t end, int data);
1053 int ll_merge_attr(const struct lu_env *env, struct inode *inode);
1054 int ll_fid2path(struct inode *inode, void __user *arg);
1055 int ll_data_version(struct inode *inode, __u64 *data_version, int flags);
1056 int ll_hsm_release(struct inode *inode);
1057 int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss);
1058 void ll_io_set_mirror(struct cl_io *io, const struct file *file);
1060 /* llite/dcache.c */
1062 int ll_d_init(struct dentry *de);
1063 extern const struct dentry_operations ll_d_ops;
1064 void ll_intent_drop_lock(struct lookup_intent *);
1065 void ll_intent_release(struct lookup_intent *);
1066 void ll_invalidate_aliases(struct inode *);
1067 void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry);
1068 int ll_revalidate_it_finish(struct ptlrpc_request *request,
1069 struct lookup_intent *it, struct dentry *de);
1071 /* llite/llite_lib.c */
1072 extern struct super_operations lustre_super_operations;
1074 void ll_lli_init(struct ll_inode_info *lli);
1075 int ll_fill_super(struct super_block *sb);
1076 void ll_put_super(struct super_block *sb);
1077 void ll_kill_super(struct super_block *sb);
1078 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock);
1079 void ll_dir_clear_lsm_md(struct inode *inode);
1080 void ll_clear_inode(struct inode *inode);
1081 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr,
1082 enum op_xvalid xvalid, bool hsm_import);
1083 int ll_setattr(struct dentry *de, struct iattr *attr);
1084 int ll_statfs(struct dentry *de, struct kstatfs *sfs);
1085 int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs,
1087 int ll_update_inode(struct inode *inode, struct lustre_md *md);
1088 void ll_update_inode_flags(struct inode *inode, int ext_flags);
1089 int ll_read_inode2(struct inode *inode, void *opaque);
1090 void ll_delete_inode(struct inode *inode);
1091 int ll_iocontrol(struct inode *inode, struct file *file,
1092 unsigned int cmd, unsigned long arg);
1093 int ll_flush_ctx(struct inode *inode);
1094 void ll_umount_begin(struct super_block *sb);
1095 int ll_remount_fs(struct super_block *sb, int *flags, char *data);
1096 int ll_show_options(struct seq_file *seq, struct dentry *dentry);
1097 void ll_dirty_page_discard_warn(struct page *page, int ioret);
1098 int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
1099 struct super_block *, struct lookup_intent *);
1100 int ll_obd_statfs(struct inode *inode, void __user *arg);
1101 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *max_mdsize);
1102 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *default_mdsize);
1103 int ll_set_default_mdsize(struct ll_sb_info *sbi, int default_mdsize);
1105 void ll_unlock_md_op_lsm(struct md_op_data *op_data);
1106 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
1107 struct inode *i1, struct inode *i2,
1108 const char *name, size_t namelen,
1109 __u32 mode, enum md_op_code opc,
1111 void ll_finish_md_op_data(struct md_op_data *op_data);
1112 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg);
1113 void ll_compute_rootsquash_state(struct ll_sb_info *sbi);
1114 ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
1115 struct lov_user_md **kbuf);
1116 void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req);
1118 void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req,
1119 struct lookup_intent *it);
1121 /* Compute expected user md size when passing in a md from user space */
1122 static inline ssize_t ll_lov_user_md_size(const struct lov_user_md *lum)
1124 switch (lum->lmm_magic) {
1125 case LOV_USER_MAGIC_V1:
1126 return sizeof(struct lov_user_md_v1);
1127 case LOV_USER_MAGIC_V3:
1128 return sizeof(struct lov_user_md_v3);
1129 case LOV_USER_MAGIC_SPECIFIC:
1130 if (lum->lmm_stripe_count > LOV_MAX_STRIPE_COUNT)
1133 return lov_user_md_size(lum->lmm_stripe_count,
1134 LOV_USER_MAGIC_SPECIFIC);
1135 case LOV_USER_MAGIC_COMP_V1:
1136 return ((struct lov_comp_md_v1 *)lum)->lcm_size;
1137 case LOV_USER_MAGIC_FOREIGN:
1138 return foreign_size(lum);
1144 /* llite/llite_nfs.c */
1145 extern struct export_operations lustre_export_operations;
1146 __u32 get_uuid2int(const char *name, int len);
1147 struct inode *search_inode_for_lustre(struct super_block *sb,
1148 const struct lu_fid *fid);
1149 int ll_dir_get_parent_fid(struct inode *dir, struct lu_fid *parent_fid);
1151 /* llite/symlink.c */
1152 extern struct inode_operations ll_fast_symlink_inode_operations;
1155 * IO arguments for various VFS I/O interfaces.
1157 struct vvp_io_args {
1158 /** normal/sendfile/splice */
1159 enum vvp_io_subtype via_io_subtype;
1163 struct kiocb *via_iocb;
1164 struct iov_iter *via_iter;
1167 struct pipe_inode_info *via_pipe;
1168 unsigned int via_flags;
1178 struct ll_cl_context {
1179 struct list_head lcc_list;
1181 const struct lu_env *lcc_env;
1182 struct cl_io *lcc_io;
1183 struct cl_page *lcc_page;
1184 enum lcc_type lcc_type;
1187 struct ll_thread_info {
1188 struct vvp_io_args lti_args;
1189 struct ra_io_arg lti_ria;
1190 struct ll_cl_context lti_io_ctx;
1193 extern struct lu_context_key ll_thread_key;
1195 static inline struct ll_thread_info *ll_env_info(const struct lu_env *env)
1197 struct ll_thread_info *lti;
1199 lti = lu_context_key_get(&env->le_ctx, &ll_thread_key);
1200 LASSERT(lti != NULL);
1205 static inline struct vvp_io_args *ll_env_args(const struct lu_env *env,
1206 enum vvp_io_subtype type)
1208 struct vvp_io_args *via = &ll_env_info(env)->lti_args;
1210 via->via_io_subtype = type;
1215 void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot,
1216 struct vvp_io_args *args);
1218 /* llite/llite_mmap.c */
1220 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last);
1221 int ll_file_mmap(struct file * file, struct vm_area_struct * vma);
1222 void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
1223 unsigned long addr, size_t count);
1224 struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
1227 #define ll_s2sbi(sb) (s2lsi(sb)->lsi_llsbi)
1229 /* don't need an addref as the sb_info should be holding one */
1230 static inline struct obd_export *ll_s2dtexp(struct super_block *sb)
1232 return ll_s2sbi(sb)->ll_dt_exp;
1235 /* don't need an addref as the sb_info should be holding one */
1236 static inline struct obd_export *ll_s2mdexp(struct super_block *sb)
1238 return ll_s2sbi(sb)->ll_md_exp;
1241 static inline struct client_obd *sbi2mdc(struct ll_sb_info *sbi)
1243 struct obd_device *obd = sbi->ll_md_exp->exp_obd;
1249 // FIXME: replace the name of this with LL_SB to conform to kernel stuff
1250 static inline struct ll_sb_info *ll_i2sbi(struct inode *inode)
1252 return ll_s2sbi(inode->i_sb);
1255 static inline struct obd_export *ll_i2dtexp(struct inode *inode)
1257 return ll_s2dtexp(inode->i_sb);
1260 static inline struct obd_export *ll_i2mdexp(struct inode *inode)
1262 return ll_s2mdexp(inode->i_sb);
1265 static inline struct lu_fid *ll_inode2fid(struct inode *inode)
1269 LASSERT(inode != NULL);
1270 fid = &ll_i2info(inode)->lli_fid;
1275 static inline bool ll_dir_striped(struct inode *inode)
1278 return S_ISDIR(inode->i_mode) &&
1279 lmv_dir_striped(ll_i2info(inode)->lli_lsm_md);
1282 static inline loff_t ll_file_maxbytes(struct inode *inode)
1284 struct cl_object *obj = ll_i2info(inode)->lli_clob;
1287 return MAX_LFS_FILESIZE;
1289 return min_t(loff_t, cl_object_maxbytes(obj), MAX_LFS_FILESIZE);
1293 extern const struct xattr_handler *ll_xattr_handlers[];
1295 #define XATTR_USER_T 1
1296 #define XATTR_TRUSTED_T 2
1297 #define XATTR_SECURITY_T 3
1298 #define XATTR_ACL_ACCESS_T 4
1299 #define XATTR_ACL_DEFAULT_T 5
1300 #define XATTR_LUSTRE_T 6
1301 #define XATTR_OTHER_T 7
1303 ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
1304 int ll_xattr_list(struct inode *inode, const char *name, int type,
1305 void *buffer, size_t size, u64 valid);
1306 const struct xattr_handler *get_xattr_type(const char *name);
1309 * Common IO arguments for various VFS I/O interfaces.
1311 int cl_sb_init(struct super_block *sb);
1312 int cl_sb_fini(struct super_block *sb);
1314 enum ras_update_flags {
1318 void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len);
1319 void ll_ra_stats_inc(struct inode *inode, enum ra_stat which);
1323 #define LL_SA_RPC_MIN 2
1324 #define LL_SA_RPC_DEF 32
1325 #define LL_SA_RPC_MAX 512
1327 /* XXX: If want to support more concurrent statahead instances,
1328 * please consider to decentralize the RPC lists attached
1329 * on related import, such as imp_{sending,delayed}_list.
1331 #define LL_SA_RUNNING_MAX 256
1332 #define LL_SA_RUNNING_DEF 16
1334 #define LL_SA_CACHE_BIT 5
1335 #define LL_SA_CACHE_SIZE (1 << LL_SA_CACHE_BIT)
1336 #define LL_SA_CACHE_MASK (LL_SA_CACHE_SIZE - 1)
1338 /* per inode struct, for dir only */
1339 struct ll_statahead_info {
1340 struct dentry *sai_dentry;
1341 atomic_t sai_refcount; /* when access this struct, hold
1343 unsigned int sai_max; /* max ahead of lookup */
1344 __u64 sai_sent; /* stat requests sent count */
1345 __u64 sai_replied; /* stat requests which received
1347 __u64 sai_index; /* index of statahead entry */
1348 __u64 sai_index_wait; /* index of entry which is the
1349 * caller is waiting for */
1350 __u64 sai_hit; /* hit count */
1351 __u64 sai_miss; /* miss count:
1352 * for "ls -al" case, includes
1353 * hidden dentry miss;
1354 * for "ls -l" case, it does not
1355 * include hidden dentry miss.
1356 * "sai_miss_hidden" is used for
1359 unsigned int sai_consecutive_miss; /* consecutive miss */
1360 unsigned int sai_miss_hidden;/* "ls -al", but first dentry
1361 * is not a hidden one */
1362 unsigned int sai_skip_hidden;/* skipped hidden dentry count
1364 unsigned int sai_ls_all:1, /* "ls -al", do stat-ahead for
1366 sai_agl_valid:1,/* AGL is valid for the dir */
1367 sai_in_readpage:1;/* statahead is in readdir()*/
1368 wait_queue_head_t sai_waitq; /* stat-ahead wait queue */
1369 struct task_struct *sai_task; /* stat-ahead thread */
1370 struct task_struct *sai_agl_task; /* AGL thread */
1371 struct list_head sai_interim_entries; /* entries which got async
1372 * stat reply, but not
1374 struct list_head sai_entries; /* completed entries */
1375 struct list_head sai_agls; /* AGLs to be sent */
1376 struct list_head sai_cache[LL_SA_CACHE_SIZE];
1377 spinlock_t sai_cache_lock[LL_SA_CACHE_SIZE];
1378 atomic_t sai_cache_count; /* entry count in cache */
1381 int ll_statahead(struct inode *dir, struct dentry **dentry, bool unplug);
1382 void ll_authorize_statahead(struct inode *dir, void *key);
1383 void ll_deauthorize_statahead(struct inode *dir, void *key);
1386 blkcnt_t dirty_cnt(struct inode *inode);
1388 int cl_glimpse_size0(struct inode *inode, int agl);
1389 int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
1390 struct inode *inode, struct cl_object *clob, int agl);
1392 static inline int cl_glimpse_size(struct inode *inode)
1394 return cl_glimpse_size0(inode, 0);
1397 /* AGL is 'asychronous glimpse lock', which is a speculative lock taken as
1398 * part of statahead */
1399 static inline int cl_agl(struct inode *inode)
1401 return cl_glimpse_size0(inode, 1);
1404 int ll_file_lock_ahead(struct file *file, struct llapi_lu_ladvise *ladvise);
1406 int cl_io_get(struct inode *inode, struct lu_env **envout,
1407 struct cl_io **ioout, __u16 *refcheck);
1409 static inline int ll_glimpse_size(struct inode *inode)
1411 struct ll_inode_info *lli = ll_i2info(inode);
1414 down_read(&lli->lli_glimpse_sem);
1415 rc = cl_glimpse_size(inode);
1416 lli->lli_glimpse_time = ktime_get();
1417 up_read(&lli->lli_glimpse_sem);
1421 /* dentry may statahead when statahead is enabled and current process has opened
1422 * parent directory, and this dentry hasn't accessed statahead cache before */
1424 dentry_may_statahead(struct inode *dir, struct dentry *dentry)
1426 struct ll_inode_info *lli;
1427 struct ll_dentry_data *ldd;
1429 if (ll_i2sbi(dir)->ll_sa_max == 0)
1432 lli = ll_i2info(dir);
1434 /* statahead is not allowed for this dir, there may be three causes:
1435 * 1. dir is not opened.
1436 * 2. statahead hit ratio is too low.
1437 * 3. previous stat started statahead thread failed. */
1438 if (!lli->lli_sa_enabled)
1441 /* not the same process, don't statahead */
1442 if (lli->lli_opendir_pid != current->pid)
1446 * When stating a dentry, kernel may trigger 'revalidate' or 'lookup'
1447 * multiple times, eg. for 'getattr', 'getxattr' and etc.
1448 * For patchless client, lookup intent is not accurate, which may
1449 * misguide statahead. For example:
1450 * The 'revalidate' call for 'getattr' and 'getxattr' of a dentry will
1451 * have the same intent -- IT_GETATTR, while one dentry should access
1452 * statahead cache once, otherwise statahead windows is messed up.
1453 * The solution is as following:
1454 * Assign 'lld_sa_generation' with 'lli_sa_generation' when a dentry
1455 * IT_GETATTR for the first time, and subsequent IT_GETATTR will
1456 * bypass interacting with statahead cache by checking
1457 * 'lld_sa_generation == lli->lli_sa_generation'.
1459 ldd = ll_d2d(dentry);
1460 if (ldd != NULL && ldd->lld_sa_generation == lli->lli_sa_generation)
1466 int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
1467 enum cl_fsync_mode mode, int ignore_layout);
1469 static inline int ll_file_nolock(const struct file *file)
1471 struct ll_file_data *fd = file->private_data;
1472 struct inode *inode = file_inode((struct file *)file);
1474 LASSERT(fd != NULL);
1475 return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) ||
1476 (ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK));
1479 static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
1480 struct lookup_intent *it, __u64 *bits)
1482 if (!it->it_lock_set) {
1483 struct lustre_handle handle;
1485 /* If this inode is a remote object, it will get two
1486 * separate locks in different namespaces, Master MDT,
1487 * where the name entry is, will grant LOOKUP lock,
1488 * remote MDT, where the object is, will grant
1489 * UPDATE|PERM lock. The inode will be attched to both
1490 * LOOKUP and PERM locks, so revoking either locks will
1491 * case the dcache being cleared */
1492 if (it->it_remote_lock_mode) {
1493 handle.cookie = it->it_remote_lock_handle;
1494 CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID
1495 "(%p) for remote lock %#llx\n",
1496 PFID(ll_inode2fid(inode)), inode,
1498 md_set_lock_data(exp, &handle, inode, NULL);
1501 handle.cookie = it->it_lock_handle;
1503 CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"(%p)"
1504 " for lock %#llx\n",
1505 PFID(ll_inode2fid(inode)), inode, handle.cookie);
1507 md_set_lock_data(exp, &handle, inode, &it->it_lock_bits);
1508 it->it_lock_set = 1;
1512 *bits = it->it_lock_bits;
1515 static inline int d_lustre_invalid(const struct dentry *dentry)
1517 struct ll_dentry_data *lld = ll_d2d(dentry);
1519 return (lld == NULL) || lld->lld_invalid;
1522 static inline void __d_lustre_invalidate(struct dentry *dentry)
1524 struct ll_dentry_data *lld = ll_d2d(dentry);
1527 lld->lld_invalid = 1;
1531 * Mark dentry INVALID, if dentry refcount is zero (this is normally case for
1532 * ll_md_blocking_ast), unhash this dentry, and let dcache to reclaim it later;
1533 * else dput() of the last refcount will unhash this dentry and kill it.
1535 static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
1537 CDEBUG(D_DENTRY, "invalidate dentry %.*s (%p) parent %p inode %p "
1538 "refc %d\n", dentry->d_name.len, dentry->d_name.name, dentry,
1539 dentry->d_parent, dentry->d_inode, ll_d_count(dentry));
1541 spin_lock_nested(&dentry->d_lock,
1542 nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL);
1543 __d_lustre_invalidate(dentry);
1545 * We should be careful about dentries created by d_obtain_alias().
1546 * These dentries are not put in the dentry tree, instead they are
1547 * linked to sb->s_anon through dentry->d_hash.
1548 * shrink_dcache_for_umount() shrinks the tree and sb->s_anon list.
1549 * If we unhashed such a dentry, unmount would not be able to find
1550 * it and busy inodes would be reported.
1552 if (ll_d_count(dentry) == 0 && !(dentry->d_flags & DCACHE_DISCONNECTED))
1554 spin_unlock(&dentry->d_lock);
1557 static inline void d_lustre_revalidate(struct dentry *dentry)
1559 spin_lock(&dentry->d_lock);
1560 LASSERT(ll_d2d(dentry) != NULL);
1561 ll_d2d(dentry)->lld_invalid = 0;
1562 spin_unlock(&dentry->d_lock);
1565 static inline dev_t ll_compat_encode_dev(dev_t dev)
1567 /* The compat_sys_*stat*() syscalls will fail unless the
1568 * device majors and minors are both less than 256. Note that
1569 * the value returned here will be passed through
1570 * old_encode_dev() in cp_compat_stat(). And so we are not
1571 * trying to return a valid compat (u16) device number, just
1572 * one that will pass the old_valid_dev() check. */
1574 return MKDEV(MAJOR(dev) & 0xff, MINOR(dev) & 0xff);
1577 int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf);
1578 int ll_layout_refresh(struct inode *inode, __u32 *gen);
1579 int ll_layout_restore(struct inode *inode, loff_t start, __u64 length);
1580 int ll_layout_write_intent(struct inode *inode, enum layout_intent_opc opc,
1581 struct lu_extent *ext);
1583 int ll_xattr_init(void);
1584 void ll_xattr_fini(void);
1586 int ll_page_sync_io(const struct lu_env *env, struct cl_io *io,
1587 struct cl_page *page, enum cl_req_type crt);
1589 int ll_getparent(struct file *file, struct getparent __user *arg);
1592 int cl_setattr_ost(struct cl_object *obj, const struct iattr *attr,
1593 enum op_xvalid xvalid, unsigned int attr_flags);
1595 extern struct lu_env *cl_inode_fini_env;
1596 extern __u16 cl_inode_fini_refcheck;
1598 int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
1599 void cl_inode_fini(struct inode *inode);
1601 u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
1602 u32 cl_fid_build_gen(const struct lu_fid *fid);
1604 static inline struct pcc_super *ll_i2pccs(struct inode *inode)
1606 return &ll_i2sbi(inode)->ll_pcc_super;
1609 static inline struct pcc_super *ll_info2pccs(struct ll_inode_info *lli)
1611 return ll_i2pccs(ll_info2i(lli));
1614 #endif /* LLITE_INTERNAL_H */