4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #ifndef LLITE_INTERNAL_H
33 #define LLITE_INTERNAL_H
35 #include <lustre_disk.h> /* for s2sbi */
36 #include <lustre_linkea.h>
38 /* for struct cl_lock_descr and struct cl_io */
39 #include <cl_object.h>
40 #include <lustre_lmv.h>
41 #include <lustre_mdc.h>
42 #include <lustre_intent.h>
43 #include <linux/compat.h>
44 #include <linux/aio.h>
45 #include <linux/parser.h>
46 #include <lustre_compat.h>
47 #include <lustre_crypto.h>
48 #include <range_lock.h>
50 #include "vvp_internal.h"
52 #include "foreign_symlink.h"
58 #ifndef HAVE_VM_FAULT_RETRY
59 #define VM_FAULT_RETRY 0
62 /* Kernel 3.1 kills LOOKUP_CONTINUE, LOOKUP_PARENT is equivalent to it.
63 * seem kernel commit 49084c3bb2055c401f3493c13edae14d49128ca0 */
64 #ifndef LOOKUP_CONTINUE
65 #define LOOKUP_CONTINUE LOOKUP_PARENT
68 /** Only used on client-side for indicating the tail of dir hash/offset. */
69 #define LL_DIR_END_OFF 0x7fffffffffffffffULL
70 #define LL_DIR_END_OFF_32BIT 0x7fffffffUL
72 /* 4UL * 1024 * 1024 */
73 #define LL_MAX_BLKSIZE_BITS 22
75 #define LL_IT2STR(it) ((it) ? ldlm_it2str((it)->it_op) : "0")
77 #define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
79 struct ll_dentry_data {
80 unsigned int lld_sa_generation;
81 unsigned int lld_invalid:1;
82 unsigned int lld_nfs_dentry:1;
83 struct rcu_head lld_rcu_head;
86 #define ll_d2d(de) ((struct ll_dentry_data*)((de)->d_fsdata))
88 #define LLI_INODE_MAGIC 0x111d0de5
89 #define LLI_INODE_DEAD 0xdeadd00d
91 struct ll_getname_data {
92 #ifdef HAVE_DIR_CONTEXT
93 struct dir_context ctx;
95 char *lgd_name; /* points to a buffer with NAME_MAX+1 size */
96 struct lu_fid lgd_fid; /* target fid we are looking for */
97 int lgd_found; /* inode matched? */
100 struct ll_grouplock {
101 struct lu_env *lg_env;
103 struct cl_lock *lg_lock;
104 unsigned long lg_gid;
107 /* See comment on trunc_sem_down_read_nowait */
108 struct ll_trunc_sem {
109 /* when positive, this is a count of readers, when -1, it indicates
110 * the semaphore is held for write, and 0 is unlocked
112 atomic_t ll_trunc_readers;
113 /* this tracks a count of waiting writers */
114 atomic_t ll_trunc_waiters;
117 struct ll_inode_info {
118 __u32 lli_inode_magic;
121 volatile unsigned long lli_flags;
122 struct posix_acl *lli_posix_acl;
124 /* identifying fields for both metadata and data stacks. */
125 struct lu_fid lli_fid;
126 /* master inode fid for stripe directory */
127 struct lu_fid lli_pfid;
129 /* We need all three because every inode may be opened in different
131 struct obd_client_handle *lli_mds_read_och;
132 struct obd_client_handle *lli_mds_write_och;
133 struct obd_client_handle *lli_mds_exec_och;
134 __u64 lli_open_fd_read_count;
135 __u64 lli_open_fd_write_count;
136 __u64 lli_open_fd_exec_count;
138 /* Number of times this inode was opened */
139 u64 lli_open_fd_count;
140 /* When last close was performed on this inode */
141 ktime_t lli_close_fd_time;
143 /* Protects access to och pointers and their usage counters */
144 struct mutex lli_och_mutex;
146 struct inode lli_vfs_inode;
148 /* the most recent timestamps obtained from mds */
153 spinlock_t lli_agl_lock;
155 /* Try to make the d::member and f::member are aligned. Before using
156 * these members, make clear whether it is directory or not. */
160 /* metadata statahead */
161 /* since parent-child threads can share the same @file
162 * struct, "opendir_key" is the token when dir close for
163 * case of parent exit before child -- it is me should
164 * cleanup the dir readahead. */
165 void *lli_opendir_key;
166 struct ll_statahead_info *lli_sai;
167 /* protect statahead stuff. */
168 spinlock_t lli_sa_lock;
169 /* "opendir_pid" is the token when lookup/revalid
170 * -- I am the owner of dir statahead. */
171 pid_t lli_opendir_pid;
172 /* directory depth to ROOT */
173 unsigned short lli_dir_depth;
174 /* stat will try to access statahead entries or start
175 * statahead if this flag is set, and this flag will be
176 * set upon dir open, and cleared when dir is closed,
177 * statahead hit ratio is too low, or start statahead
179 unsigned short lli_sa_enabled:1;
180 /* generation for statahead */
181 unsigned int lli_sa_generation;
182 /* rw lock protects lli_lsm_md */
183 struct rw_semaphore lli_lsm_sem;
184 /* directory stripe information */
185 struct lmv_stripe_md *lli_lsm_md;
186 /* directory default LMV */
187 struct lmv_stripe_md *lli_default_lsm_md;
190 /* for non-directory */
192 struct mutex lli_size_mutex;
193 char *lli_symlink_name;
194 struct ll_trunc_sem lli_trunc_sem;
195 struct range_lock_tree lli_write_tree;
196 struct mutex lli_setattr_mutex;
198 struct rw_semaphore lli_glimpse_sem;
199 ktime_t lli_glimpse_time;
200 struct list_head lli_agl_list;
203 /* for writepage() only to communicate to fsync */
206 /* protect the file heat fields */
207 spinlock_t lli_heat_lock;
208 __u32 lli_heat_flags;
209 struct obd_heat_instance lli_heat_instances[OBD_HEAT_COUNT];
212 * Whenever a process try to read/write the file, the
213 * jobid of the process will be saved here, and it'll
214 * be packed into the write PRC when flush later.
216 * So the read/write statistics for jobid will not be
217 * accurate if the file is shared by different jobs.
219 char lli_jobid[LUSTRE_JOBID_SIZE];
221 struct mutex lli_pcc_lock;
222 enum lu_pcc_state_flags lli_pcc_state;
224 * @lli_pcc_generation saves the gobal PCC generation
225 * when the file was successfully attached into PCC.
226 * The flags of the PCC dataset are saved in
228 * The gobal PCC generation will be increased when add
229 * or delete a PCC backend, or change the configuration
230 * parameters for PCC.
231 * If @lli_pcc_generation is same as the gobal PCC
232 * generation, we can use the saved flags of the PCC
233 * dataset to determine whether need to try auto attach
236 __u64 lli_pcc_generation;
237 enum pcc_dataset_flags lli_pcc_dsflags;
238 struct pcc_inode *lli_pcc_inode;
240 struct mutex lli_group_mutex;
241 __u64 lli_group_users;
242 unsigned long lli_group_gid;
244 __u64 lli_attr_valid;
246 __u64 lli_lazyblocks;
250 /* XXX: For following frequent used members, although they maybe special
251 * used for non-directory object, it is some time-wasting to check
252 * whether the object is directory or not before using them. On the
253 * other hand, currently, sizeof(f) > sizeof(d), it cannot reduce
254 * the "ll_inode_info" size even if moving those members into u.f.
255 * So keep them out side.
257 * In the future, if more members are added only for directory,
258 * some of the following members can be moved into u.f.
260 struct cl_object *lli_clob;
262 /* mutex to request for layout lock exclusively. */
263 struct mutex lli_layout_mutex;
264 /* Layout version, protected by lli_layout_lock */
265 __u32 lli_layout_gen;
266 spinlock_t lli_layout_lock;
268 __u32 lli_projid; /* project id */
270 struct rw_semaphore lli_xattrs_list_rwsem;
271 struct mutex lli_xattrs_enq_lock;
272 struct list_head lli_xattrs; /* ll_xattr_entry->xe_list */
273 struct list_head lli_lccs; /* list of ll_cl_context */
276 #ifndef HAVE_USER_NAMESPACE_ARG
277 #define inode_permission(ns, inode, mask) inode_permission(inode, mask)
278 #define generic_permission(ns, inode, mask) generic_permission(inode, mask)
279 #define simple_setattr(ns, de, iattr) simple_setattr(de, iattr)
280 #define ll_inode_permission(ns, inode, mask) ll_inode_permission(inode, mask)
281 #ifdef HAVE_INODEOPS_ENHANCED_GETATTR
282 #define ll_getattr(ns, path, stat, mask, fl) ll_getattr(path, stat, mask, fl)
283 #endif /* HAVE_INODEOPS_ENHANCED_GETATTR */
284 #define ll_setattr(ns, de, attr) ll_setattr(de, attr)
287 static inline void ll_trunc_sem_init(struct ll_trunc_sem *sem)
289 atomic_set(&sem->ll_trunc_readers, 0);
290 atomic_set(&sem->ll_trunc_waiters, 0);
293 /* This version of down read ignores waiting writers, meaning if the semaphore
294 * is already held for read, this down_read will 'join' that reader and also
295 * take the semaphore.
297 * This lets us avoid an unusual deadlock.
299 * We must take lli_trunc_sem in read mode on entry in to various i/o paths
300 * in Lustre, in order to exclude truncates. Some of these paths then need to
301 * take the mmap_lock, while still holding the trunc_sem. The problem is that
302 * page faults hold the mmap_lock when calling in to Lustre, and then must also
303 * take the trunc_sem to exclude truncate.
305 * This means the locking order for trunc_sem and mmap_lock is sometimes AB,
306 * sometimes BA. This is almost OK because in both cases, we take the trunc
307 * sem for read, so it doesn't block.
309 * However, if a write mode user (truncate, a setattr op) arrives in the
310 * middle of this, the second reader on the truncate_sem will wait behind that
313 * So we have, on our truncate sem, in order (where 'reader' and 'writer' refer
314 * to the mode in which they take the semaphore):
315 * reader (holding mmap_lock, needs truncate_sem)
317 * reader (holding truncate sem, waiting for mmap_lock)
319 * And so the readers deadlock.
321 * The solution is this modified semaphore, where this down_read ignores
322 * waiting write operations, and all waiters are woken up at once, so readers
323 * using down_read_nowait cannot get stuck behind waiting writers, regardless
324 * of the order they arrived in.
326 * down_read_nowait is only used in the page fault case, where we already hold
327 * the mmap_lock. This is because otherwise repeated read and write operations
328 * (which take the truncate sem) could prevent a truncate from ever starting.
329 * This could still happen with page faults, but without an even more complex
330 * mechanism, this is unavoidable.
334 static inline void trunc_sem_down_read_nowait(struct ll_trunc_sem *sem)
336 wait_var_event(&sem->ll_trunc_readers,
337 atomic_inc_unless_negative(&sem->ll_trunc_readers));
340 static inline void trunc_sem_down_read(struct ll_trunc_sem *sem)
342 wait_var_event(&sem->ll_trunc_readers,
343 atomic_read(&sem->ll_trunc_waiters) == 0 &&
344 atomic_inc_unless_negative(&sem->ll_trunc_readers));
347 static inline void trunc_sem_up_read(struct ll_trunc_sem *sem)
349 if (atomic_dec_return(&sem->ll_trunc_readers) == 0 &&
350 atomic_read(&sem->ll_trunc_waiters))
351 wake_up_var(&sem->ll_trunc_readers);
354 static inline void trunc_sem_down_write(struct ll_trunc_sem *sem)
356 atomic_inc(&sem->ll_trunc_waiters);
357 wait_var_event(&sem->ll_trunc_readers,
358 atomic_cmpxchg(&sem->ll_trunc_readers, 0, -1) == 0);
359 atomic_dec(&sem->ll_trunc_waiters);
362 static inline void trunc_sem_up_write(struct ll_trunc_sem *sem)
364 atomic_set(&sem->ll_trunc_readers, 0);
365 /* match the smp_mb() in wait_var_event()->prepare_to_wait() */
367 wake_up_var(&sem->ll_trunc_readers);
370 #ifdef CONFIG_LUSTRE_FS_POSIX_ACL
371 static inline void lli_clear_acl(struct ll_inode_info *lli)
373 if (lli->lli_posix_acl) {
374 posix_acl_release(lli->lli_posix_acl);
375 lli->lli_posix_acl = NULL;
379 static inline void lli_replace_acl(struct ll_inode_info *lli,
380 struct lustre_md *md)
382 write_lock(&lli->lli_lock);
383 if (lli->lli_posix_acl)
384 posix_acl_release(lli->lli_posix_acl);
385 lli->lli_posix_acl = md->posix_acl;
386 write_unlock(&lli->lli_lock);
389 static inline void lli_clear_acl(struct ll_inode_info *lli)
393 static inline void lli_replace_acl(struct ll_inode_info *lli,
394 struct lustre_md *md)
399 static inline __u32 ll_layout_version_get(struct ll_inode_info *lli)
403 spin_lock(&lli->lli_layout_lock);
404 gen = lli->lli_layout_gen;
405 spin_unlock(&lli->lli_layout_lock);
410 static inline void ll_layout_version_set(struct ll_inode_info *lli, __u32 gen)
412 spin_lock(&lli->lli_layout_lock);
413 lli->lli_layout_gen = gen;
414 spin_unlock(&lli->lli_layout_lock);
418 /* File data is modified. */
419 LLIF_DATA_MODIFIED = 0,
420 /* File is being restored */
421 LLIF_FILE_RESTORING = 1,
422 /* Xattr cache is attached to the file */
423 LLIF_XATTR_CACHE = 2,
424 /* Project inherit */
425 LLIF_PROJECT_INHERIT = 3,
426 /* update atime from MDS even if it's older than local inode atime. */
427 LLIF_UPDATE_ATIME = 4,
428 /* foreign file/dir can be unlinked unconditionnaly */
429 LLIF_FOREIGN_REMOVABLE = 5,
430 /* Xattr cache is filled */
431 LLIF_XATTR_CACHE_FILLED = 7,
435 int ll_xattr_cache_destroy(struct inode *inode);
436 int ll_xattr_cache_empty(struct inode *inode);
438 int ll_xattr_cache_get(struct inode *inode,
444 int ll_xattr_cache_insert(struct inode *inode,
449 static inline bool obd_connect_has_secctx(struct obd_connect_data *data)
451 #ifdef CONFIG_SECURITY
452 return data->ocd_connect_flags & OBD_CONNECT_FLAGS2 &&
453 data->ocd_connect_flags2 & OBD_CONNECT2_FILE_SECCTX;
459 static inline void obd_connect_set_secctx(struct obd_connect_data *data)
461 #ifdef CONFIG_SECURITY
462 data->ocd_connect_flags2 |= OBD_CONNECT2_FILE_SECCTX;
466 int ll_dentry_init_security(struct dentry *dentry, int mode, struct qstr *name,
467 const char **secctx_name, void **secctx,
469 int ll_inode_init_security(struct dentry *dentry, struct inode *inode,
472 int ll_listsecurity(struct inode *inode, char *secctx_name,
473 size_t secctx_name_size);
475 static inline bool obd_connect_has_enc(struct obd_connect_data *data)
477 #ifdef HAVE_LUSTRE_CRYPTO
478 return data->ocd_connect_flags & OBD_CONNECT_FLAGS2 &&
479 data->ocd_connect_flags2 & OBD_CONNECT2_ENCRYPT;
485 static inline void obd_connect_set_enc(struct obd_connect_data *data)
487 #ifdef HAVE_LUSTRE_CRYPTO
488 data->ocd_connect_flags2 |= OBD_CONNECT2_ENCRYPT;
493 * Locking to guarantee consistency of non-atomic updates to long long i_size,
494 * consistency between file size and KMS.
496 * Implemented by ->lli_size_mutex and ->lsm_lock, nested in that order.
499 void ll_inode_size_lock(struct inode *inode);
500 void ll_inode_size_unlock(struct inode *inode);
502 static inline struct ll_inode_info *ll_i2info(struct inode *inode)
504 return container_of(inode, struct ll_inode_info, lli_vfs_inode);
507 static inline struct pcc_inode *ll_i2pcci(struct inode *inode)
509 return ll_i2info(inode)->lli_pcc_inode;
512 /* default to use at least 16M for fast read if possible */
513 #define RA_REMAIN_WINDOW_MIN MiB_TO_PAGES(16UL)
515 /* default read-ahead on a given client mountpoint. */
516 #define SBI_DEFAULT_READ_AHEAD_MAX MiB_TO_PAGES(1024UL)
518 /* default read-ahead for a single file descriptor */
519 #define SBI_DEFAULT_READ_AHEAD_PER_FILE_MAX MiB_TO_PAGES(256UL)
521 /* default read-ahead full files smaller than limit on the second read */
522 #define SBI_DEFAULT_READ_AHEAD_WHOLE_MAX MiB_TO_PAGES(2UL)
524 /* default range pages */
525 #define SBI_DEFAULT_RA_RANGE_PAGES MiB_TO_PAGES(1ULL)
527 /* Min range pages */
528 #define RA_MIN_MMAP_RANGE_PAGES 16UL
533 RA_STAT_DISTANT_READPAGE,
534 RA_STAT_MISS_IN_WINDOW,
535 RA_STAT_FAILED_GRAB_PAGE,
536 RA_STAT_FAILED_MATCH,
541 RA_STAT_MAX_IN_FLIGHT,
542 RA_STAT_WRONG_GRAB_PAGE,
543 RA_STAT_FAILED_REACH_END,
545 RA_STAT_FAILED_FAST_READ,
546 RA_STAT_MMAP_RANGE_READ,
551 atomic_t ra_cur_pages;
552 unsigned long ra_max_pages;
553 unsigned long ra_max_pages_per_file;
554 unsigned long ra_range_pages;
555 unsigned long ra_max_read_ahead_whole_pages;
556 struct workqueue_struct *ll_readahead_wq;
558 * Max number of active works could be triggered
559 * for async readahead.
561 unsigned int ra_async_max_active;
562 /* how many async readahead triggered in flight */
563 atomic_t ra_async_inflight;
564 /* Threshold to control when to trigger async readahead */
565 unsigned long ra_async_pages_per_file_threshold;
568 /* ra_io_arg will be filled in the beginning of ll_readahead with
569 * ras_lock, then the following ll_read_ahead_pages will read RA
570 * pages according to this arg, all the items in this structure are
571 * counted by page index.
574 pgoff_t ria_start_idx; /* start offset of read-ahead*/
575 pgoff_t ria_end_idx; /* end offset of read-ahead*/
576 unsigned long ria_reserved; /* reserved pages for read-ahead */
577 pgoff_t ria_end_idx_min;/* minimum end to cover current read */
578 bool ria_eof; /* reach end of file */
579 /* If stride read pattern is detected, ria_stoff is the byte offset
580 * where stride read is started. Note: for normal read-ahead, the
581 * value here is meaningless, and also it will not be accessed*/
583 /* ria_length and ria_bytes are the length and pages length in the
584 * stride I/O mode. And they will also be used to check whether
585 * it is stride I/O read-ahead in the read-ahead pages*/
590 /* LL_HIST_MAX=32 causes an overflow */
591 #define LL_HIST_MAX 28
592 #define LL_HIST_START 12 /* buckets start at 2^12 = 4k */
593 #define LL_PROCESS_HIST_MAX 10
594 struct per_process_info {
596 struct obd_histogram pp_r_hist;
597 struct obd_histogram pp_w_hist;
600 /* pp_extents[LL_PROCESS_HIST_MAX] will hold the combined process info */
601 struct ll_rw_extents_info {
603 struct per_process_info pp_extents[LL_PROCESS_HIST_MAX + 1];
606 #define LL_OFFSET_HIST_MAX 100
607 struct ll_rw_process_info {
610 loff_t rw_range_start;
612 loff_t rw_last_file_pos;
614 size_t rw_smallest_extent;
615 size_t rw_largest_extent;
616 struct ll_file_data *rw_last_file;
619 enum stats_track_type {
620 STATS_TRACK_ALL = 0, /* track all processes */
621 STATS_TRACK_PID, /* track process with this pid */
622 STATS_TRACK_PPID, /* track processes with this ppid */
623 STATS_TRACK_GID, /* track processes with this gid */
627 /* flags for sbi->ll_flags */
629 LL_SBI_NOLCK, /* DLM locking disabled directio-only */
630 LL_SBI_CHECKSUM, /* checksum each page as it's written */
631 LL_SBI_LOCALFLOCK, /* local flocks instead of fs-wide */
632 LL_SBI_FLOCK, /* flock enabled */
633 LL_SBI_USER_XATTR, /* support user xattr */
634 LL_SBI_LRU_RESIZE, /* lru resize support */
635 LL_SBI_LAZYSTATFS, /* lazystatfs mount option */
636 LL_SBI_32BIT_API, /* generate 32 bit inodes. */
637 LL_SBI_USER_FID2PATH, /* fid2path by unprivileged users */
638 LL_SBI_VERBOSE, /* verbose mount/umount */
639 LL_SBI_ALWAYS_PING, /* ping even if server suppress_pings */
640 LL_SBI_TEST_DUMMY_ENCRYPTION, /* test dummy encryption */
641 LL_SBI_ENCRYPT, /* client side encryption */
642 LL_SBI_FOREIGN_SYMLINK, /* foreign fake-symlink support */
643 LL_SBI_FOREIGN_SYMLINK_UPCALL, /* foreign fake-symlink upcall set */
644 LL_SBI_NUM_MOUNT_OPT,
646 LL_SBI_ACL, /* support ACL */
647 LL_SBI_AGL_ENABLED, /* enable agl */
648 LL_SBI_64BIT_HASH, /* support 64-bits dir hash/offset */
649 LL_SBI_LAYOUT_LOCK, /* layout lock support */
650 LL_SBI_XATTR_CACHE, /* support for xattr cache */
651 LL_SBI_NOROOTSQUASH, /* do not apply root squash */
652 LL_SBI_FAST_READ, /* fast read support */
653 LL_SBI_FILE_SECCTX, /* file security context at create */
654 LL_SBI_TINY_WRITE, /* tiny write support */
655 LL_SBI_FILE_HEAT, /* file heat support */
656 LL_SBI_PARALLEL_DIO, /* parallel (async) O_DIRECT RPCs */
660 int ll_sbi_flags_seq_show(struct seq_file *m, void *v);
662 /* This is embedded into llite super-blocks to keep track of connect
663 * flags (capabilities) supported by all imports given mount is
665 struct lustre_client_ocd {
666 /* This is conjunction of connect_flags across all imports
667 * (LOVs) this mount is connected to. This field is updated by
668 * cl_ocd_update() under ->lco_lock. */
670 struct mutex lco_lock;
671 struct obd_export *lco_md_exp;
672 struct obd_export *lco_dt_exp;
676 /* this protects pglist and ra_info. It isn't safe to
677 * grab from interrupt contexts */
679 spinlock_t ll_pp_extent_lock; /* pp_extent entry*/
680 spinlock_t ll_process_lock; /* ll_rw_process_info */
681 struct obd_uuid ll_sb_uuid;
682 struct obd_export *ll_md_exp;
683 struct obd_export *ll_dt_exp;
684 struct obd_device *ll_md_obd;
685 struct obd_device *ll_dt_obd;
686 struct dentry *ll_debugfs_entry;
687 struct lu_fid ll_root_fid; /* root object fid */
688 struct mnt_namespace *ll_mnt_ns;
690 DECLARE_BITMAP(ll_flags, LL_SBI_NUM_FLAGS); /* enum ll_sbi_flags */
691 unsigned int ll_xattr_cache_enabled:1,
692 ll_xattr_cache_set:1, /* already set to 0/1 */
693 ll_client_common_fill_super_succeeded:1,
695 ll_inode_cache_enabled:1;
697 struct lustre_client_ocd ll_lco;
699 struct lprocfs_stats *ll_stats; /* lprocfs stats counter */
701 /* Used to track "unstable" pages on a client, and maintain a
702 * LRU list of clean pages. An "unstable" page is defined as
703 * any page which is sent to a server as part of a bulk request,
704 * but is uncommitted to stable storage. */
705 struct cl_client_cache *ll_cache;
707 struct lprocfs_stats *ll_ra_stats;
709 struct ll_ra_info ll_ra_info;
710 unsigned int ll_namelen;
711 const struct file_operations *ll_fop;
713 struct lu_site *ll_site;
714 struct cl_device *ll_cl;
717 struct ll_rw_extents_info *ll_rw_extents_info;
718 int ll_extent_process_count;
719 unsigned int ll_offset_process_count;
720 struct ll_rw_process_info *ll_rw_process_info;
721 struct ll_rw_process_info *ll_rw_offset_info;
722 ktime_t ll_process_stats_init;
723 unsigned int ll_rw_offset_entry_count;
724 int ll_stats_track_id;
725 enum stats_track_type ll_stats_track_type;
728 /* metadata stat-ahead */
729 unsigned int ll_sa_running_max;/* max concurrent
730 * statahead instances */
731 unsigned int ll_sa_max; /* max statahead RPCs */
732 atomic_t ll_sa_total; /* statahead thread started
734 atomic_t ll_sa_wrong; /* statahead thread stopped for
736 atomic_t ll_sa_running; /* running statahead thread
738 atomic_t ll_agl_total; /* AGL thread started count */
740 dev_t ll_sdev_orig; /* save s_dev before assign for
743 struct root_squash_info ll_squash;
746 /* st_blksize returned by stat(2), when non-zero */
747 unsigned int ll_stat_blksize;
749 /* maximum relative age of cached statfs results */
750 unsigned int ll_statfs_max_age;
752 struct kset ll_kset; /* sysfs object */
753 struct completion ll_kobj_unregister;
756 unsigned int ll_heat_decay_weight;
757 unsigned int ll_heat_period_second;
759 /* Opens of the same inode before we start requesting open lock */
760 u32 ll_oc_thrsh_count;
762 /* Time in ms between last inode close and next open to be considered
763 * instant back to back and would trigger an open lock request
767 /* Time in ms after last file close that we no longer count prior opens*/
770 /* filesystem fsname */
771 char ll_fsname[LUSTRE_MAXFSNAME + 1];
773 /* Persistent Client Cache */
774 struct pcc_super ll_pcc_super;
776 /* to protect vs updates in all following foreign symlink fields */
777 struct rw_semaphore ll_foreign_symlink_sem;
778 /* foreign symlink path prefix */
779 char *ll_foreign_symlink_prefix;
780 /* full prefix size including leading '\0' */
781 size_t ll_foreign_symlink_prefix_size;
782 /* foreign symlink path upcall */
783 char *ll_foreign_symlink_upcall;
784 /* foreign symlink path upcall infos */
785 struct ll_foreign_symlink_upcall_item *ll_foreign_symlink_upcall_items;
786 /* foreign symlink path upcall nb infos */
787 unsigned int ll_foreign_symlink_upcall_nb_items;
790 #define SBI_DEFAULT_HEAT_DECAY_WEIGHT ((80 * 256 + 50) / 100)
791 #define SBI_DEFAULT_HEAT_PERIOD_SECOND (60)
793 #define SBI_DEFAULT_OPENCACHE_THRESHOLD_COUNT (5)
794 #define SBI_DEFAULT_OPENCACHE_THRESHOLD_MS (100) /* 0.1 second */
795 #define SBI_DEFAULT_OPENCACHE_THRESHOLD_MAX_MS (60000) /* 1 minute */
798 * per file-descriptor read-ahead data.
800 struct ll_readahead_state {
802 /* End byte that read(2) try to read. */
803 loff_t ras_last_read_end_bytes;
805 * number of bytes read after last read-ahead window reset. As window
806 * is reset on each seek, this is effectively a number of consecutive
807 * accesses. Maybe ->ras_accessed_in_window is better name.
809 * XXX nikita: window is also reset (by ras_update()) when Lustre
810 * believes that memory pressure evicts read-ahead pages. In that
811 * case, it probably doesn't make sense to expand window to
812 * PTLRPC_MAX_BRW_PAGES on the third access.
814 loff_t ras_consecutive_bytes;
816 * number of read requests after the last read-ahead window reset
817 * As window is reset on each seek, this is effectively the number
818 * on consecutive read request and is used to trigger read-ahead.
820 unsigned long ras_consecutive_requests;
822 * Parameters of current read-ahead window. Handled by
823 * ras_update(). On the initial access to the file or after a seek,
824 * window is reset to 0. After 3 consecutive accesses, window is
825 * expanded to PTLRPC_MAX_BRW_PAGES. Afterwards, window is enlarged by
826 * PTLRPC_MAX_BRW_PAGES chunks up to ->ra_max_pages.
828 pgoff_t ras_window_start_idx;
829 pgoff_t ras_window_pages;
831 /* Page index where min range read starts */
832 pgoff_t ras_range_min_start_idx;
833 /* Page index where mmap range read ends */
834 pgoff_t ras_range_max_end_idx;
835 /* number of mmap pages where last time detected */
836 pgoff_t ras_last_range_pages;
837 /* number of mmap range requests */
838 pgoff_t ras_range_requests;
841 * Optimal RPC size in pages.
842 * It decides how many pages will be sent for each read-ahead.
844 unsigned long ras_rpc_pages;
846 * Where next read-ahead should start at. This lies within read-ahead
847 * window. Read-ahead window is read in pieces rather than at once
848 * because: 1. lustre limits total number of pages under read-ahead by
849 * ->ra_max_pages (see ll_ra_count_get()), 2. client cannot read pages
850 * not covered by DLM lock.
852 pgoff_t ras_next_readahead_idx;
854 * Total number of ll_file_read requests issued, reads originating
855 * due to mmap are not counted in this total. This value is used to
856 * trigger full file read-ahead after multiple reads to a small file.
858 unsigned long ras_requests;
860 * The following 3 items are used for detecting the stride I/O
862 * In stride I/O mode,
863 * ...............|-----data-----|****gap*****|--------|******|....
864 * offset |-stride_bytes-|-stride_gap-|
865 * ras_stride_offset = offset;
866 * ras_stride_length = stride_bytes + stride_gap;
867 * ras_stride_bytes = stride_bytes;
868 * Note: all these three items are counted by bytes.
870 loff_t ras_stride_offset;
871 loff_t ras_stride_length;
872 loff_t ras_stride_bytes;
874 * number of consecutive stride request count, and it is similar as
875 * ras_consecutive_requests, but used for stride I/O mode.
876 * Note: only more than 2 consecutive stride request are detected,
877 * stride read-ahead will be enable
879 unsigned long ras_consecutive_stride_requests;
880 /* index of the last page that async readahead starts */
881 pgoff_t ras_async_last_readpage_idx;
882 /* whether we should increase readahead window */
883 bool ras_need_increase_window;
884 /* whether ra miss check should be skipped */
885 bool ras_no_miss_check;
888 struct ll_readahead_work {
889 /** File to readahead */
890 struct file *lrw_file;
891 pgoff_t lrw_start_idx;
895 /* async worker to handler read */
896 struct work_struct lrw_readahead_work;
897 char lrw_jobid[LUSTRE_JOBID_SIZE];
900 extern struct kmem_cache *ll_file_data_slab;
901 struct lustre_handle;
902 struct ll_file_data {
903 struct ll_readahead_state fd_ras;
904 struct ll_grouplock fd_grouplock;
908 /* openhandle if lease exists for this file.
909 * Borrow lli->lli_och_mutex to protect assignment */
910 struct obd_client_handle *fd_lease_och;
911 struct obd_client_handle *fd_och;
912 struct file *fd_file;
913 /* Indicate whether need to report failure when close.
914 * true: failure is known, not report again.
915 * false: unknown failure, should report. */
916 bool fd_write_failed;
917 bool ll_lock_no_expand;
918 /* Used by mirrored file to lead IOs to a specific mirror, usually
919 * for mirror resync. 0 means default. */
920 __u32 fd_designated_mirror;
921 /* The layout version when resync starts. Resync I/O should carry this
922 * layout version for verification to OST objects */
923 __u32 fd_layout_version;
924 struct pcc_file fd_pcc_file;
925 /* striped directory may read partially if some stripe inaccessible,
926 * -errno is saved here, and will return to user in close().
928 int fd_partial_readdir_rc;
931 void llite_tunables_unregister(void);
932 int llite_tunables_register(void);
934 static inline struct inode *ll_info2i(struct ll_inode_info *lli)
936 return &lli->lli_vfs_inode;
939 __u32 ll_i2suppgid(struct inode *i);
940 void ll_i2gids(__u32 *suppgids, struct inode *i1,struct inode *i2);
942 static inline int ll_need_32bit_api(struct ll_sb_info *sbi)
944 #if BITS_PER_LONG == 32
946 #elif defined(CONFIG_COMPAT)
947 if (unlikely(test_bit(LL_SBI_32BIT_API, sbi->ll_flags)))
950 # ifdef CONFIG_X86_X32
951 /* in_compat_syscall() returns true when called from a kthread
952 * and CONFIG_X86_X32 is enabled, which is wrong. So check
953 * whether the caller comes from a syscall (ie. not a kthread)
954 * before calling in_compat_syscall(). */
955 if (current->flags & PF_KTHREAD)
959 return unlikely(in_compat_syscall());
961 return unlikely(test_bit(LL_SBI_32BIT_API, sbi->ll_flags));
965 static inline bool ll_sbi_has_fast_read(struct ll_sb_info *sbi)
967 return test_bit(LL_SBI_FAST_READ, sbi->ll_flags);
970 static inline bool ll_sbi_has_tiny_write(struct ll_sb_info *sbi)
972 return test_bit(LL_SBI_TINY_WRITE, sbi->ll_flags);
975 static inline bool ll_sbi_has_file_heat(struct ll_sb_info *sbi)
977 return test_bit(LL_SBI_FILE_HEAT, sbi->ll_flags);
980 static inline bool ll_sbi_has_foreign_symlink(struct ll_sb_info *sbi)
982 return test_bit(LL_SBI_FOREIGN_SYMLINK, sbi->ll_flags);
985 static inline bool ll_sbi_has_parallel_dio(struct ll_sb_info *sbi)
987 return test_bit(LL_SBI_PARALLEL_DIO, sbi->ll_flags);
990 void ll_ras_enter(struct file *f, loff_t pos, size_t count);
992 /* llite/lcommon_misc.c */
993 int cl_ocd_update(struct obd_device *host, struct obd_device *watched,
994 enum obd_notify_event ev, void *owner);
995 int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
996 struct ll_grouplock *lg);
997 void cl_put_grouplock(struct ll_grouplock *lg);
999 /* llite/lproc_llite.c */
1000 int ll_debugfs_register_super(struct super_block *sb, const char *name);
1001 void ll_debugfs_unregister_super(struct super_block *sb);
1002 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, long count);
1003 void ll_free_rw_stats_info(struct ll_sb_info *sbi);
1006 LPROC_LL_READ_BYTES,
1007 LPROC_LL_WRITE_BYTES,
1034 LPROC_LL_GETXATTR_HITS,
1036 LPROC_LL_REMOVEXATTR,
1037 LPROC_LL_INODE_PERM,
1039 LPROC_LL_INODE_OCOUNT,
1040 LPROC_LL_INODE_OPCLTM,
1041 LPROC_LL_FILE_OPCODES
1045 enum get_default_layout_type {
1046 GET_DEFAULT_LAYOUT_ROOT = 1,
1049 extern const struct file_operations ll_dir_operations;
1050 extern const struct inode_operations ll_dir_inode_operations;
1051 #ifdef HAVE_DIR_CONTEXT
1052 int ll_dir_read(struct inode *inode, __u64 *pos, struct md_op_data *op_data,
1053 struct dir_context *ctx, int *partial_readdir_rc);
1055 int ll_dir_read(struct inode *inode, __u64 *pos, struct md_op_data *op_data,
1056 void *cookie, filldir_t filldir, int *partial_readdir_rc);
1058 int ll_get_mdt_idx(struct inode *inode);
1059 int ll_get_mdt_idx_by_fid(struct ll_sb_info *sbi, const struct lu_fid *fid);
1060 struct page *ll_get_dir_page(struct inode *dir, struct md_op_data *op_data,
1061 __u64 offset, int *partial_readdir_rc);
1062 void ll_release_page(struct inode *inode, struct page *page, bool remove);
1063 int quotactl_ioctl(struct super_block *sb, struct if_quotactl *qctl);
1066 extern const struct inode_operations ll_special_inode_operations;
1068 struct inode *ll_iget(struct super_block *sb, ino_t hash,
1069 struct lustre_md *lic);
1070 int ll_test_inode_by_fid(struct inode *inode, void *opaque);
1071 int ll_md_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
1072 void *data, int flag);
1073 struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de);
1074 int ll_rmdir_entry(struct inode *dir, char *name, int namelen);
1075 void ll_update_times(struct ptlrpc_request *request, struct inode *inode);
1078 int ll_writepage(struct page *page, struct writeback_control *wbc);
1079 int ll_writepages(struct address_space *, struct writeback_control *wbc);
1080 int ll_readpage(struct file *file, struct page *page);
1081 int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
1082 struct cl_page *page, struct file *file);
1083 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras);
1084 int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
1087 void ll_cl_add(struct inode *inode, const struct lu_env *env, struct cl_io *io,
1088 enum lcc_type type);
1089 void ll_cl_remove(struct inode *inode, const struct lu_env *env);
1090 struct ll_cl_context *ll_cl_find(struct inode *inode);
1092 extern const struct address_space_operations ll_aops;
1095 extern const struct inode_operations ll_file_inode_operations;
1096 const struct file_operations *ll_select_file_operations(struct ll_sb_info *sbi);
1097 extern int ll_have_md_lock(struct inode *inode, __u64 *bits,
1098 enum ldlm_mode l_req_mode);
1099 extern enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
1100 struct lustre_handle *lockh, __u64 flags,
1101 enum ldlm_mode mode);
1103 int ll_file_open(struct inode *inode, struct file *file);
1104 int ll_file_release(struct inode *inode, struct file *file);
1105 int ll_release_openhandle(struct dentry *, struct lookup_intent *);
1106 int ll_md_real_close(struct inode *inode, fmode_t fmode);
1107 void ll_track_file_opens(struct inode *inode);
1108 extern void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
1109 struct ll_file_data *file, loff_t pos,
1110 size_t count, int rw);
1111 #if defined(HAVE_USER_NAMESPACE_ARG) || defined(HAVE_INODEOPS_ENHANCED_GETATTR)
1112 int ll_getattr(struct user_namespace *mnt_userns, const struct path *path,
1113 struct kstat *stat, u32 request_mask, unsigned int flags);
1115 int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat);
1116 #endif /* HAVE_USER_NAMESPACE_ARG */
1117 int ll_getattr_dentry(struct dentry *de, struct kstat *stat, u32 request_mask,
1118 unsigned int flags, bool foreign);
1119 #ifdef CONFIG_LUSTRE_FS_POSIX_ACL
1120 struct posix_acl *ll_get_acl(struct inode *inode, int type
1121 #ifdef HAVE_GET_ACL_RCU_ARG
1123 #endif /* HAVE_GET_ACL_RCU_ARG */
1125 int ll_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
1126 struct posix_acl *acl, int type);
1127 #else /* !CONFIG_LUSTRE_FS_POSIX_ACL */
1128 #define ll_get_acl NULL
1129 #define ll_set_acl NULL
1130 #endif /* CONFIG_LUSTRE_FS_POSIX_ACL */
1132 static inline int ll_xflags_to_inode_flags(int xflags)
1134 return ((xflags & FS_XFLAG_SYNC) ? S_SYNC : 0) |
1135 ((xflags & FS_XFLAG_NOATIME) ? S_NOATIME : 0) |
1136 ((xflags & FS_XFLAG_APPEND) ? S_APPEND : 0) |
1137 ((xflags & FS_XFLAG_IMMUTABLE) ? S_IMMUTABLE : 0);
1140 static inline int ll_inode_flags_to_xflags(int inode_flags)
1142 return ((inode_flags & S_SYNC) ? FS_XFLAG_SYNC : 0) |
1143 ((inode_flags & S_NOATIME) ? FS_XFLAG_NOATIME : 0) |
1144 ((inode_flags & S_APPEND) ? FS_XFLAG_APPEND : 0) |
1145 ((inode_flags & S_IMMUTABLE) ? FS_XFLAG_IMMUTABLE : 0);
1148 int ll_migrate(struct inode *parent, struct file *file,
1149 struct lmv_user_md *lum, const char *name, __u32 flags);
1150 int ll_get_fid_by_name(struct inode *parent, const char *name,
1151 int namelen, struct lu_fid *fid, struct inode **inode);
1152 int ll_inode_permission(struct user_namespace *mnt_userns, struct inode *inode,
1154 int ll_ioctl_check_project(struct inode *inode, __u32 xflags, __u32 projid);
1155 int ll_ioctl_fsgetxattr(struct inode *inode, unsigned int cmd,
1157 int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd,
1159 int ll_ioctl_project(struct file *file, unsigned int cmd,
1162 int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
1163 __u64 flags, struct lov_user_md *lum,
1165 int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
1166 struct lov_mds_md **lmm, int *lmm_size,
1167 struct ptlrpc_request **request);
1168 int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
1170 int ll_dir_get_default_layout(struct inode *inode, void **plmm, int *plmm_size,
1171 struct ptlrpc_request **request, u64 valid,
1172 enum get_default_layout_type type);
1173 int ll_dir_getstripe_default(struct inode *inode, void **lmmp,
1174 int *lmm_size, struct ptlrpc_request **request,
1175 struct ptlrpc_request **root_request, u64 valid);
1176 int ll_dir_getstripe(struct inode *inode, void **plmm, int *plmm_size,
1177 struct ptlrpc_request **request, u64 valid);
1178 int ll_fsync(struct file *file, loff_t start, loff_t end, int data);
1179 int ll_merge_attr(const struct lu_env *env, struct inode *inode);
1180 int ll_fid2path(struct inode *inode, void __user *arg);
1181 int ll_data_version(struct inode *inode, __u64 *data_version, int flags);
1182 int ll_hsm_release(struct inode *inode);
1183 int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss);
1184 void ll_io_set_mirror(struct cl_io *io, const struct file *file);
1186 /* llite/dcache.c */
1188 extern const struct dentry_operations ll_d_ops;
1190 bool ll_d_setup(struct dentry *de, bool do_put);
1192 static inline bool lld_is_init(struct dentry *dentry)
1194 return ll_d2d(dentry);
1197 #define ll_d_setup(de, do_put) (true)
1198 #define lld_is_init(dentry) (true)
1201 void ll_intent_drop_lock(struct lookup_intent *);
1202 void ll_intent_release(struct lookup_intent *);
1203 void ll_prune_aliases(struct inode *inode);
1204 void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry);
1205 int ll_revalidate_it_finish(struct ptlrpc_request *request,
1206 struct lookup_intent *it, struct dentry *de);
1208 /* llite/llite_lib.c */
1209 extern const struct super_operations lustre_super_operations;
1211 void ll_lli_init(struct ll_inode_info *lli);
1212 int ll_fill_super(struct super_block *sb);
1213 void ll_put_super(struct super_block *sb);
1214 void ll_kill_super(struct super_block *sb);
1215 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock);
1216 void ll_dir_clear_lsm_md(struct inode *inode);
1217 void ll_clear_inode(struct inode *inode);
1218 int volatile_ref_file(const char *volatile_name, int volatile_len,
1219 struct file **ref_file);
1220 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr,
1221 enum op_xvalid xvalid, bool hsm_import);
1222 int ll_setattr(struct user_namespace *mnt_userns, struct dentry *de,
1223 struct iattr *attr);
1224 int ll_statfs(struct dentry *de, struct kstatfs *sfs);
1225 int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs,
1227 int ll_update_inode(struct inode *inode, struct lustre_md *md);
1228 void ll_update_inode_flags(struct inode *inode, unsigned int ext_flags);
1229 void ll_update_dir_depth(struct inode *dir, struct inode *inode);
1230 int ll_read_inode2(struct inode *inode, void *opaque);
1231 void ll_truncate_inode_pages_final(struct inode *inode);
1232 void ll_delete_inode(struct inode *inode);
1233 int ll_iocontrol(struct inode *inode, struct file *file,
1234 unsigned int cmd, unsigned long arg);
1235 int ll_flush_ctx(struct inode *inode);
1236 void ll_umount_begin(struct super_block *sb);
1237 int ll_remount_fs(struct super_block *sb, int *flags, char *data);
1238 int ll_show_options(struct seq_file *seq, struct dentry *dentry);
1239 void ll_dirty_page_discard_warn(struct inode *inode, int ioret);
1240 int ll_prep_inode(struct inode **inode, struct req_capsule *pill,
1241 struct super_block *sb, struct lookup_intent *it);
1242 int ll_obd_statfs(struct inode *inode, void __user *arg);
1243 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *max_mdsize);
1244 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *default_mdsize);
1245 int ll_set_default_mdsize(struct ll_sb_info *sbi, int default_mdsize);
1247 void ll_unlock_md_op_lsm(struct md_op_data *op_data);
1248 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
1249 struct inode *i1, struct inode *i2,
1250 const char *name, size_t namelen,
1251 __u32 mode, enum md_op_code opc,
1253 void ll_finish_md_op_data(struct md_op_data *op_data);
1254 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg);
1255 void ll_compute_rootsquash_state(struct ll_sb_info *sbi);
1256 ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
1257 struct lov_user_md **kbuf);
1258 void ll_open_cleanup(struct super_block *sb, struct req_capsule *pill);
1260 void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req);
1262 /* Compute expected user md size when passing in a md from user space */
1263 static inline ssize_t ll_lov_user_md_size(const struct lov_user_md *lum)
1265 switch (lum->lmm_magic) {
1266 case LOV_USER_MAGIC_V1:
1267 return sizeof(struct lov_user_md_v1);
1268 case LOV_USER_MAGIC_V3:
1269 return sizeof(struct lov_user_md_v3);
1270 case LOV_USER_MAGIC_SPECIFIC:
1271 if (lum->lmm_stripe_count > LOV_MAX_STRIPE_COUNT)
1274 return lov_user_md_size(lum->lmm_stripe_count,
1275 LOV_USER_MAGIC_SPECIFIC);
1276 case LOV_USER_MAGIC_COMP_V1:
1277 return ((struct lov_comp_md_v1 *)lum)->lcm_size;
1278 case LOV_USER_MAGIC_FOREIGN:
1279 return foreign_size(lum);
1285 /* llite/llite_nfs.c */
1286 extern const struct export_operations lustre_export_operations;
1287 __u32 get_uuid2int(const char *name, int len);
1288 struct inode *search_inode_for_lustre(struct super_block *sb,
1289 const struct lu_fid *fid);
1290 int ll_dir_get_parent_fid(struct inode *dir, struct lu_fid *parent_fid);
1292 /* llite/symlink.c */
1293 extern const struct inode_operations ll_fast_symlink_inode_operations;
1296 * IO arguments for various VFS I/O interfaces.
1298 struct vvp_io_args {
1299 /** normal/sendfile/splice */
1302 struct kiocb *via_iocb;
1303 struct iov_iter *via_iter;
1313 struct ll_cl_context {
1314 struct list_head lcc_list;
1316 const struct lu_env *lcc_env;
1317 struct cl_io *lcc_io;
1318 struct cl_page *lcc_page;
1319 enum lcc_type lcc_type;
1322 struct ll_thread_info {
1323 struct vvp_io_args lti_args;
1324 struct ra_io_arg lti_ria;
1325 struct ll_cl_context lti_io_ctx;
1328 extern struct lu_context_key ll_thread_key;
1330 static inline struct ll_thread_info *ll_env_info(const struct lu_env *env)
1332 struct ll_thread_info *lti;
1334 lti = lu_context_key_get(&env->le_ctx, &ll_thread_key);
1335 LASSERT(lti != NULL);
1340 static inline struct vvp_io_args *ll_env_args(const struct lu_env *env)
1342 return &ll_env_info(env)->lti_args;
1345 void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot,
1346 struct vvp_io_args *args);
1348 /* llite/llite_mmap.c */
1350 int ll_file_mmap(struct file * file, struct vm_area_struct * vma);
1351 void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
1352 unsigned long addr, size_t count);
1353 struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
1356 #define ll_s2sbi(sb) (s2lsi(sb)->lsi_llsbi)
1358 /* don't need an addref as the sb_info should be holding one */
1359 static inline struct obd_export *ll_s2dtexp(struct super_block *sb)
1361 return ll_s2sbi(sb)->ll_dt_exp;
1364 /* don't need an addref as the sb_info should be holding one */
1365 static inline struct obd_export *ll_s2mdexp(struct super_block *sb)
1367 return ll_s2sbi(sb)->ll_md_exp;
1370 static inline struct client_obd *sbi2mdc(struct ll_sb_info *sbi)
1372 struct obd_device *obd = sbi->ll_md_exp->exp_obd;
1378 // FIXME: replace the name of this with LL_SB to conform to kernel stuff
1379 static inline struct ll_sb_info *ll_i2sbi(struct inode *inode)
1381 return ll_s2sbi(inode->i_sb);
1384 static inline struct obd_export *ll_i2dtexp(struct inode *inode)
1386 return ll_s2dtexp(inode->i_sb);
1389 static inline struct obd_export *ll_i2mdexp(struct inode *inode)
1391 return ll_s2mdexp(inode->i_sb);
1394 static inline struct lu_fid *ll_inode2fid(struct inode *inode)
1398 LASSERT(inode != NULL);
1399 fid = &ll_i2info(inode)->lli_fid;
1404 static inline bool ll_dir_striped(struct inode *inode)
1406 struct ll_inode_info *lli;
1410 if (!S_ISDIR(inode->i_mode))
1413 lli = ll_i2info(inode);
1414 if (!lli->lli_lsm_md)
1417 down_read(&lli->lli_lsm_sem);
1418 rc = lmv_dir_striped(lli->lli_lsm_md);
1419 up_read(&lli->lli_lsm_sem);
1424 static inline loff_t ll_file_maxbytes(struct inode *inode)
1426 struct cl_object *obj = ll_i2info(inode)->lli_clob;
1429 return MAX_LFS_FILESIZE;
1431 return min_t(loff_t, cl_object_maxbytes(obj), MAX_LFS_FILESIZE);
1435 extern const struct xattr_handler *ll_xattr_handlers[];
1437 #define XATTR_USER_T 1
1438 #define XATTR_TRUSTED_T 2
1439 #define XATTR_SECURITY_T 3
1440 #define XATTR_ACL_ACCESS_T 4
1441 #define XATTR_ACL_DEFAULT_T 5
1442 #define XATTR_LUSTRE_T 6
1443 #define XATTR_OTHER_T 7
1444 #define XATTR_ENCRYPTION_T 9
1446 ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
1447 int ll_xattr_list(struct inode *inode, const char *name, int type,
1448 void *buffer, size_t size, u64 valid);
1449 const struct xattr_handler *get_xattr_type(const char *name);
1452 * Common IO arguments for various VFS I/O interfaces.
1454 int cl_sb_init(struct super_block *sb);
1455 int cl_sb_fini(struct super_block *sb);
1457 enum ras_update_flags {
1461 void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len);
1462 void ll_ra_stats_inc(struct inode *inode, enum ra_stat which);
1466 #define LL_SA_RPC_MIN 2
1467 #define LL_SA_RPC_DEF 32
1468 #define LL_SA_RPC_MAX 512
1470 /* XXX: If want to support more concurrent statahead instances,
1471 * please consider to decentralize the RPC lists attached
1472 * on related import, such as imp_{sending,delayed}_list.
1474 #define LL_SA_RUNNING_MAX 256
1475 #define LL_SA_RUNNING_DEF 16
1477 #define LL_SA_CACHE_BIT 5
1478 #define LL_SA_CACHE_SIZE (1 << LL_SA_CACHE_BIT)
1479 #define LL_SA_CACHE_MASK (LL_SA_CACHE_SIZE - 1)
1481 /* per inode struct, for dir only */
1482 struct ll_statahead_info {
1483 struct dentry *sai_dentry;
1484 atomic_t sai_refcount; /* when access this struct, hold
1486 unsigned int sai_max; /* max ahead of lookup */
1487 __u64 sai_sent; /* stat requests sent count */
1488 __u64 sai_replied; /* stat requests which received
1490 __u64 sai_index; /* index of statahead entry */
1491 __u64 sai_index_wait; /* index of entry which is the
1492 * caller is waiting for */
1493 __u64 sai_hit; /* hit count */
1494 __u64 sai_miss; /* miss count:
1495 * for "ls -al" case, includes
1496 * hidden dentry miss;
1497 * for "ls -l" case, it does not
1498 * include hidden dentry miss.
1499 * "sai_miss_hidden" is used for
1502 unsigned int sai_consecutive_miss; /* consecutive miss */
1503 unsigned int sai_miss_hidden;/* "ls -al", but first dentry
1504 * is not a hidden one */
1505 unsigned int sai_skip_hidden;/* skipped hidden dentry count
1507 unsigned int sai_ls_all:1, /* "ls -al", do stat-ahead for
1509 sai_in_readpage:1;/* statahead is in readdir()*/
1510 wait_queue_head_t sai_waitq; /* stat-ahead wait queue */
1511 struct task_struct *sai_task; /* stat-ahead thread */
1512 struct task_struct *sai_agl_task; /* AGL thread */
1513 struct list_head sai_interim_entries; /* entries which got async
1514 * stat reply, but not
1516 struct list_head sai_entries; /* completed entries */
1517 struct list_head sai_agls; /* AGLs to be sent */
1518 struct list_head sai_cache[LL_SA_CACHE_SIZE];
1519 spinlock_t sai_cache_lock[LL_SA_CACHE_SIZE];
1520 atomic_t sai_cache_count; /* entry count in cache */
1523 int ll_revalidate_statahead(struct inode *dir, struct dentry **dentry,
1525 int ll_start_statahead(struct inode *dir, struct dentry *dentry, bool agl);
1526 void ll_authorize_statahead(struct inode *dir, void *key);
1527 void ll_deauthorize_statahead(struct inode *dir, void *key);
1530 blkcnt_t dirty_cnt(struct inode *inode);
1532 int cl_glimpse_size0(struct inode *inode, int agl);
1533 int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
1534 struct inode *inode, struct cl_object *clob, int agl);
1536 static inline int cl_glimpse_size(struct inode *inode)
1538 return cl_glimpse_size0(inode, 0);
1541 /* AGL is 'asychronous glimpse lock', which is a speculative lock taken as
1542 * part of statahead */
1543 static inline int cl_agl(struct inode *inode)
1545 return cl_glimpse_size0(inode, 1);
1548 int ll_file_lock_ahead(struct file *file, struct llapi_lu_ladvise *ladvise);
1550 int cl_io_get(struct inode *inode, struct lu_env **envout,
1551 struct cl_io **ioout, __u16 *refcheck);
1553 static inline int ll_glimpse_size(struct inode *inode)
1555 struct ll_inode_info *lli = ll_i2info(inode);
1558 down_read(&lli->lli_glimpse_sem);
1559 rc = cl_glimpse_size(inode);
1560 lli->lli_glimpse_time = ktime_get();
1561 up_read(&lli->lli_glimpse_sem);
1565 /* dentry may statahead when statahead is enabled and current process has opened
1566 * parent directory, and this dentry hasn't accessed statahead cache before */
1568 dentry_may_statahead(struct inode *dir, struct dentry *dentry)
1570 struct ll_inode_info *lli;
1571 struct ll_dentry_data *ldd;
1573 if (ll_i2sbi(dir)->ll_sa_max == 0)
1576 lli = ll_i2info(dir);
1578 /* statahead is not allowed for this dir, there may be three causes:
1579 * 1. dir is not opened.
1580 * 2. statahead hit ratio is too low.
1581 * 3. previous stat started statahead thread failed. */
1582 if (!lli->lli_sa_enabled)
1585 /* not the same process, don't statahead */
1586 if (lli->lli_opendir_pid != current->pid)
1590 * When stating a dentry, kernel may trigger 'revalidate' or 'lookup'
1591 * multiple times, eg. for 'getattr', 'getxattr' and etc.
1592 * For patchless client, lookup intent is not accurate, which may
1593 * misguide statahead. For example:
1594 * The 'revalidate' call for 'getattr' and 'getxattr' of a dentry will
1595 * have the same intent -- IT_GETATTR, while one dentry should access
1596 * statahead cache once, otherwise statahead windows is messed up.
1597 * The solution is as following:
1598 * Assign 'lld_sa_generation' with 'lli_sa_generation' when a dentry
1599 * IT_GETATTR for the first time, and subsequent IT_GETATTR will
1600 * bypass interacting with statahead cache by checking
1601 * 'lld_sa_generation == lli->lli_sa_generation'.
1603 ldd = ll_d2d(dentry);
1604 if (ldd != NULL && lli->lli_sa_generation &&
1605 ldd->lld_sa_generation == lli->lli_sa_generation)
1611 int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
1612 enum cl_fsync_mode mode, int ignore_layout);
1614 static inline int ll_file_nolock(const struct file *file)
1616 struct ll_file_data *fd = file->private_data;
1617 struct inode *inode = file_inode((struct file *)file);
1619 LASSERT(fd != NULL);
1620 return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) ||
1621 test_bit(LL_SBI_NOLCK, ll_i2sbi(inode)->ll_flags));
1624 static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
1625 struct lookup_intent *it, __u64 *bits)
1627 if (!it->it_lock_set) {
1628 struct lustre_handle handle;
1630 /* If this inode is a remote object, it will get two
1631 * separate locks in different namespaces, Master MDT,
1632 * where the name entry is, will grant LOOKUP lock,
1633 * remote MDT, where the object is, will grant
1634 * UPDATE|PERM lock. The inode will be attched to both
1635 * LOOKUP and PERM locks, so revoking either locks will
1636 * case the dcache being cleared */
1637 if (it->it_remote_lock_mode) {
1638 handle.cookie = it->it_remote_lock_handle;
1639 CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID
1640 "(%p) for remote lock %#llx\n",
1641 PFID(ll_inode2fid(inode)), inode,
1643 md_set_lock_data(exp, &handle, inode, NULL);
1646 handle.cookie = it->it_lock_handle;
1648 CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"(%p)"
1649 " for lock %#llx\n",
1650 PFID(ll_inode2fid(inode)), inode, handle.cookie);
1652 md_set_lock_data(exp, &handle, inode, &it->it_lock_bits);
1653 it->it_lock_set = 1;
1657 *bits = it->it_lock_bits;
1660 static inline int d_lustre_invalid(const struct dentry *dentry)
1662 return !ll_d2d(dentry) || ll_d2d(dentry)->lld_invalid;
1666 * Mark dentry INVALID, if dentry refcount is zero (this is normally case for
1667 * ll_md_blocking_ast), it will be pruned by ll_prune_aliases() and
1668 * ll_prune_negative_children(); otherwise dput() of the last refcount will
1669 * unhash this dentry and kill it.
1671 static inline void d_lustre_invalidate(struct dentry *dentry)
1673 CDEBUG(D_DENTRY, "invalidate dentry %pd (%p) parent %p inode %p refc %d\n",
1675 dentry->d_parent, dentry->d_inode, ll_d_count(dentry));
1677 spin_lock(&dentry->d_lock);
1678 if (lld_is_init(dentry))
1679 ll_d2d(dentry)->lld_invalid = 1;
1680 spin_unlock(&dentry->d_lock);
1683 static inline void d_lustre_revalidate(struct dentry *dentry)
1685 spin_lock(&dentry->d_lock);
1686 LASSERT(ll_d2d(dentry));
1687 ll_d2d(dentry)->lld_invalid = 0;
1688 spin_unlock(&dentry->d_lock);
1691 static inline dev_t ll_compat_encode_dev(dev_t dev)
1693 /* The compat_sys_*stat*() syscalls will fail unless the
1694 * device majors and minors are both less than 256. Note that
1695 * the value returned here will be passed through
1696 * old_encode_dev() in cp_compat_stat(). And so we are not
1697 * trying to return a valid compat (u16) device number, just
1698 * one that will pass the old_valid_dev() check. */
1700 return MKDEV(MAJOR(dev) & 0xff, MINOR(dev) & 0xff);
1703 int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf);
1704 int ll_layout_refresh(struct inode *inode, __u32 *gen);
1705 int ll_layout_restore(struct inode *inode, loff_t start, __u64 length);
1706 int ll_layout_write_intent(struct inode *inode, enum layout_intent_opc opc,
1707 struct lu_extent *ext);
1709 int ll_xattr_init(void);
1710 void ll_xattr_fini(void);
1712 int ll_page_sync_io(const struct lu_env *env, struct cl_io *io,
1713 struct cl_page *page, enum cl_req_type crt);
1715 int ll_getparent(struct file *file, struct getparent __user *arg);
1718 int cl_setattr_ost(struct cl_object *obj, const struct iattr *attr,
1719 enum op_xvalid xvalid, unsigned int attr_flags);
1721 extern struct lu_env *cl_inode_fini_env;
1722 extern __u16 cl_inode_fini_refcheck;
1724 int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
1725 void cl_inode_fini(struct inode *inode);
1727 u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
1728 u32 cl_fid_build_gen(const struct lu_fid *fid);
1730 static inline struct pcc_super *ll_i2pccs(struct inode *inode)
1732 return &ll_i2sbi(inode)->ll_pcc_super;
1735 static inline struct pcc_super *ll_info2pccs(struct ll_inode_info *lli)
1737 return ll_i2pccs(ll_info2i(lli));
1741 /* The digested form is made of a FID (16 bytes) followed by the second-to-last
1742 * ciphertext block (16 bytes), so a total length of 32 bytes.
1743 * That way, llcrypt does not compute a digested form of this digest.
1745 struct ll_digest_filename {
1746 struct lu_fid ldf_fid;
1747 char ldf_excerpt[LL_CRYPTO_BLOCK_SIZE];
1750 int ll_setup_filename(struct inode *dir, const struct qstr *iname,
1751 int lookup, struct llcrypt_name *fname,
1752 struct lu_fid *fid);
1753 int ll_fname_disk_to_usr(struct inode *inode,
1754 u32 hash, u32 minor_hash,
1755 struct llcrypt_str *iname, struct llcrypt_str *oname,
1756 struct lu_fid *fid);
1757 int ll_revalidate_d_crypto(struct dentry *dentry, unsigned int flags);
1758 int ll_file_open_encrypt(struct inode *inode, struct file *filp);
1759 #ifdef HAVE_LUSTRE_CRYPTO
1760 extern const struct llcrypt_operations lustre_cryptops;
1763 /* llite/llite_foreign.c */
1764 int ll_manage_foreign(struct inode *inode, struct lustre_md *lmd);
1765 bool ll_foreign_is_openable(struct dentry *dentry, unsigned int flags);
1766 bool ll_foreign_is_removable(struct dentry *dentry, bool unset);
1768 #endif /* LLITE_INTERNAL_H */