X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;ds=sidebyside;f=lustre%2Fllite%2Fllite_internal.h;h=7b89ca99ad222a197cc058dd13999a152ebb61a6;hb=b2d0facce07e734e4aa14653d0ef637dc553cb4a;hp=099c62cfbab2ca8fd819da19e7cfbbc57576f791;hpb=9e4f0079e94b54194a869c3e56b4f0cf7426f285;p=fs%2Flustre-release.git diff --git a/lustre/llite/llite_internal.h b/lustre/llite/llite_internal.h index 099c62c..7b89ca9 100644 --- a/lustre/llite/llite_internal.h +++ b/lustre/llite/llite_internal.h @@ -26,10 +26,13 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. */ /* + * Copyright (c) 2011 Whamcloud, Inc. + */ +/* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. */ @@ -47,33 +50,34 @@ /* for struct cl_lock_descr and struct cl_io */ #include #include +#include +#include #ifndef FMODE_EXEC #define FMODE_EXEC 0 #endif +#ifndef VM_FAULT_RETRY +#define VM_FAULT_RETRY 0 +#endif + +/** Only used on client-side for indicating the tail of dir hash/offset. */ +#define LL_DIR_END_OFF 0x7fffffffffffffffULL +#define LL_DIR_END_OFF_32BIT 0x7fffffffUL + #ifndef DCACHE_LUSTRE_INVALID -#define DCACHE_LUSTRE_INVALID 0x100 +#define DCACHE_LUSTRE_INVALID 0x4000000 #endif #define LL_IT2STR(it) ((it) ? ldlm_it2str((it)->it_op) : "0") #define LUSTRE_FPRIVATE(file) ((file)->private_data) -#ifdef HAVE_VFS_INTENT_PATCHES -static inline struct lookup_intent *ll_nd2it(struct nameidata *nd) -{ - return &nd->intent; -} -#endif - struct ll_dentry_data { int lld_cwd_count; int lld_mnt_count; struct obd_client_handle lld_cwd_och; struct obd_client_handle lld_mnt_och; -#ifndef HAVE_VFS_INTENT_PATCHES struct lookup_intent *lld_it; -#endif unsigned int lld_sa_generation; }; @@ -87,9 +91,15 @@ extern struct file_operations ll_pgcache_seq_fops; /* remote client permission cache */ #define REMOTE_PERM_HASHSIZE 16 +struct ll_getname_data { + char *lgd_name; /* points to a buffer with NAME_MAX+1 size */ + struct lu_fid lgd_fid; /* target fid we are looking for */ + int lgd_found; /* inode matched? */ +}; + /* llite setxid/access permission for user on remote client */ struct ll_remote_perm { - struct hlist_node lrp_list; + cfs_hlist_node_t lrp_list; uid_t lrp_uid; gid_t lrp_gid; uid_t lrp_fsuid; @@ -118,18 +128,18 @@ enum lli_flags { struct ll_inode_info { int lli_inode_magic; - struct semaphore lli_size_sem; /* protect open and change size */ + cfs_semaphore_t lli_size_sem; /* protect open and change size */ void *lli_size_sem_owner; - struct semaphore lli_write_sem; + cfs_semaphore_t lli_write_sem; + cfs_rw_semaphore_t lli_trunc_sem; char *lli_symlink_name; __u64 lli_maxbytes; __u64 lli_ioepoch; unsigned long lli_flags; - cfs_time_t lli_contention_time; /* this lock protects posix_acl, pending_write_llaps, mmap_cnt */ - spinlock_t lli_lock; - struct list_head lli_close_list; + cfs_spinlock_t lli_lock; + cfs_list_t lli_close_list; /* handle is to be sent to MDS later on done_writing and setattr. * Open handle data are needed for the recovery to reconstruct * the inode state on the MDS. XXX: recovery is not ready yet. */ @@ -137,18 +147,21 @@ struct ll_inode_info { /* for writepage() only to communicate to fsync */ int lli_async_rc; + int lli_write_rc; struct posix_acl *lli_posix_acl; /* remote permission hash */ - struct hlist_head *lli_remote_perms; + cfs_hlist_head_t *lli_remote_perms; unsigned long lli_rmtperm_utime; - struct semaphore lli_rmtperm_sem; + cfs_semaphore_t lli_rmtperm_sem; - struct list_head lli_dead_list; + cfs_list_t lli_dead_list; - struct semaphore lli_och_sem; /* Protects access to och pointers - and their usage counters */ + cfs_semaphore_t lli_och_sem; /* Protects access to och pointers + and their usage counters, also + atomicity of check-update of + lli_smd */ /* We need all three because every inode may be opened in different modes */ struct obd_client_handle *lli_mds_read_och; @@ -162,16 +175,22 @@ struct ll_inode_info { /* identifying fields for both metadata and data stacks. */ struct lu_fid lli_fid; + /* Parent fid for accessing default stripe data on parent directory + * for allocating OST objects after a mknod() and later open-by-FID. */ + struct lu_fid lli_pfid; struct lov_stripe_md *lli_smd; /* fid capability */ /* open count currently used by capability only, indicate whether * capability needs renewal */ - atomic_t lli_open_count; + cfs_atomic_t lli_open_count; struct obd_capa *lli_mds_capa; - struct list_head lli_oss_capas; + cfs_list_t lli_oss_capas; - /* metadata stat-ahead */ + /* metadata statahead */ + /* protect statahead stuff: lli_opendir_pid, lli_opendir_key, lli_sai, + * and so on. */ + cfs_spinlock_t lli_sa_lock; /* * "opendir_pid" is the token when lookup/revalid -- I am the owner of * dir statahead. @@ -183,7 +202,14 @@ struct ll_inode_info { * before child -- it is me should cleanup the dir readahead. */ void *lli_opendir_key; struct ll_statahead_info *lli_sai; + __u64 lli_sa_pos; struct cl_object *lli_clob; + /* the most recent timestamps obtained from mds */ + struct ost_lvb lli_lvb; + /** + * serialize normal readdir and statahead-readdir + */ + cfs_semaphore_t lli_readdir_sem; }; /* @@ -228,7 +254,7 @@ enum ra_stat { }; struct ll_ra_info { - atomic_t ra_cur_pages; + cfs_atomic_t ra_cur_pages; unsigned long ra_max_pages; unsigned long ra_max_pages_per_file; unsigned long ra_max_read_ahead_whole_pages; @@ -301,6 +327,9 @@ enum stats_track_type { #define LL_SBI_LOCALFLOCK 0x200 /* Local flocks support by kernel */ #define LL_SBI_LRU_RESIZE 0x400 /* lru resize support */ #define LL_SBI_LAZYSTATFS 0x800 /* lazystatfs mount option */ +#define LL_SBI_SOM_PREVIEW 0x1000 /* SOM preview mount option */ +#define LL_SBI_32BIT_API 0x2000 /* generate 32 bit inodes. */ +#define LL_SBI_64BIT_HASH 0x4000 /* support 64-bits dir hash/offset */ /* default value for ll_sb_info->contention_time */ #define SBI_DEFAULT_CONTENTION_SECONDS 60 @@ -309,20 +338,20 @@ enum stats_track_type { #define RCE_HASHES 32 struct rmtacl_ctl_entry { - struct list_head rce_list; + cfs_list_t rce_list; pid_t rce_key; /* hash key */ int rce_ops; /* acl operation type */ }; struct rmtacl_ctl_table { - spinlock_t rct_lock; - struct list_head rct_entries[RCE_HASHES]; + cfs_spinlock_t rct_lock; + cfs_list_t rct_entries[RCE_HASHES]; }; #define EE_HASHES 32 struct eacl_entry { - struct list_head ee_list; + cfs_list_t ee_list; pid_t ee_key; /* hash key */ struct lu_fid ee_fid; int ee_type; /* ACL type for ACCESS or DEFAULT */ @@ -330,17 +359,17 @@ struct eacl_entry { }; struct eacl_table { - spinlock_t et_lock; - struct list_head et_entries[EE_HASHES]; + cfs_spinlock_t et_lock; + cfs_list_t et_entries[EE_HASHES]; }; struct ll_sb_info { - struct list_head ll_list; + cfs_list_t ll_list; /* this protects pglist and ra_info. It isn't safe to * grab from interrupt contexts */ - spinlock_t ll_lock; - spinlock_t ll_pp_extent_lock; /* Lock for pp_extent entries */ - spinlock_t ll_process_lock; /* Lock for ll_rw_process_info */ + cfs_spinlock_t ll_lock; + cfs_spinlock_t ll_pp_extent_lock; /* Lock for pp_extent entries */ + cfs_spinlock_t ll_process_lock; /* Lock for ll_rw_process_info */ struct obd_uuid ll_sb_uuid; struct obd_export *ll_md_exp; struct obd_export *ll_dt_exp; @@ -348,10 +377,10 @@ struct ll_sb_info { struct lu_fid ll_root_fid; /* root object fid */ int ll_flags; - struct list_head ll_conn_chain; /* per-conn chain of SBs */ + cfs_list_t ll_conn_chain; /* per-conn chain of SBs */ struct lustre_client_ocd ll_lco; - struct list_head ll_orphan_dentry_list; /*please don't ask -p*/ + cfs_list_t ll_orphan_dentry_list; /*please don't ask -p*/ struct ll_close_queue *ll_lcq; struct lprocfs_stats *ll_stats; /* lprocfs stats counter */ @@ -365,13 +394,10 @@ struct ll_sb_info { unsigned int ll_namelen; struct file_operations *ll_fop; -#ifdef HAVE_EXPORT___IGET - struct list_head ll_deathrow; /* inodes to be destroyed (b1443) */ - spinlock_t ll_deathrow_lock; -#endif /* =0 - hold lock over whole read/write * >0 - max. chunk to be read/written w/o lock re-acquiring */ unsigned long ll_max_rw_chunk; + unsigned int ll_md_brw_size; /* used by readdir */ struct lu_site *ll_site; struct cl_device *ll_cl; @@ -388,20 +414,16 @@ struct ll_sb_info { /* metadata stat-ahead */ unsigned int ll_sa_max; /* max statahead RPCs */ - unsigned int ll_sa_wrong; /* statahead thread stopped for - * low hit ratio */ - unsigned int ll_sa_total; /* statahead thread started + atomic_t ll_sa_total; /* statahead thread started * count */ - unsigned long long ll_sa_blocked; /* ls count waiting for - * statahead */ - unsigned long long ll_sa_cached; /* ls count got in cache */ - unsigned long long ll_sa_hit; /* hit count */ - unsigned long long ll_sa_miss; /* miss count */ + atomic_t ll_sa_wrong; /* statahead thread stopped for + * low hit ratio */ dev_t ll_sdev_orig; /* save s_dev before assign for * clustred nfs */ struct rmtacl_ctl_table ll_rct; struct eacl_table ll_et; + struct vfsmount *ll_mnt; }; #define LL_DEFAULT_MAX_RW_CHUNK (32 * 1024 * 1024) @@ -410,14 +432,14 @@ struct ll_ra_read { pgoff_t lrr_start; pgoff_t lrr_count; struct task_struct *lrr_reader; - struct list_head lrr_linkage; + cfs_list_t lrr_linkage; }; /* * per file-descriptor read-ahead data. */ struct ll_readahead_state { - spinlock_t ras_lock; + cfs_spinlock_t ras_lock; /* * index of the last page that read(2) needed and that wasn't in the * cache. Used by ras_update() to detect seeks. @@ -475,7 +497,7 @@ struct ll_readahead_state { * progress against this file descriptor. Used by read-ahead code, * protected by ->ras_lock. */ - struct list_head ras_read_beads; + cfs_list_t ras_read_beads; /* * The following 3 items are used for detecting the stride I/O * mode. @@ -487,19 +509,21 @@ struct ll_readahead_state { * ras_stride_pages = stride_pages; * Note: all these three items are counted by pages. */ - unsigned long ras_stride_length; - unsigned long ras_stride_pages; - pgoff_t ras_stride_offset; + unsigned long ras_stride_length; + unsigned long ras_stride_pages; + pgoff_t ras_stride_offset; /* * number of consecutive stride request count, and it is similar as * ras_consecutive_requests, but used for stride I/O mode. * Note: only more than 2 consecutive stride request are detected, * stride read-ahead will be enable */ - unsigned long ras_consecutive_stride_requests; + unsigned long ras_consecutive_stride_requests; }; struct ll_file_dir { + __u64 lfd_pos; + __u64 lfd_next; }; extern cfs_mem_cache_t *ll_file_data_slab; @@ -515,7 +539,7 @@ struct ll_file_data { struct lov_stripe_md; -extern spinlock_t inode_lock; +extern cfs_spinlock_t inode_lock; extern struct proc_dir_entry *proc_lustre_fs_root; @@ -525,14 +549,23 @@ static inline struct inode *ll_info2i(struct ll_inode_info *lli) } struct it_cb_data { - struct inode *icbd_parent; + struct inode *icbd_parent; struct dentry **icbd_childp; - obd_id hash; + obd_id hash; }; __u32 ll_i2suppgid(struct inode *i); void ll_i2gids(__u32 *suppgids, struct inode *i1,struct inode *i2); +static inline int ll_need_32bit_api(struct ll_sb_info *sbi) +{ +#if BITS_PER_LONG == 32 + return 1; +#else + return unlikely(cfs_curproc_is_32bit() || (sbi->ll_flags & LL_SBI_32BIT_API)); +#endif +} + #define LLAP_MAGIC 98764321 extern cfs_mem_cache_t *ll_async_page_slab; @@ -562,16 +595,14 @@ static void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars) /* llite/dir.c */ -static inline void ll_put_page(struct page *page) -{ - kunmap(page); - page_cache_release(page); -} - +void ll_release_page(struct page *page, int remove); extern struct file_operations ll_dir_operations; extern struct inode_operations ll_dir_inode_operations; -struct page *ll_get_dir_page(struct inode *dir, __u64 hash, int exact, +struct page *ll_get_dir_page(struct file *filp, struct inode *dir, __u64 hash, struct ll_dir_chain *chain); +int ll_readdir(struct file *filp, void *cookie, filldir_t filldir); + +int ll_get_mdt_idx(struct inode *inode); /* llite/namei.c */ int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir); @@ -579,13 +610,11 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash, struct lustre_md *lic); int ll_md_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *, void *data, int flag); -#ifndef HAVE_VFS_INTENT_PATCHES struct lookup_intent *ll_convert_intent(struct open_intent *oit, int lookup_flags); -#endif int ll_lookup_it_finish(struct ptlrpc_request *request, struct lookup_intent *it, void *data); -void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry); +struct dentry *ll_find_alias(struct inode *inode, struct dentry *de); /* llite/rw.c */ int ll_prepare_write(struct file *, struct page *, unsigned from, unsigned to); @@ -599,19 +628,23 @@ int ll_file_punch(struct inode *, loff_t, int); ssize_t ll_file_lockless_io(struct file *, char *, size_t, loff_t *, int); void ll_clear_file_contended(struct inode*); int ll_sync_page_range(struct inode *, struct address_space *, loff_t, size_t); -int ll_readahead(const struct lu_env *env, struct cl_io *io, struct ll_readahead_state *ras, - struct address_space *mapping, struct cl_page_list *queue, int flags); +int ll_readahead(const struct lu_env *env, struct cl_io *io, + struct ll_readahead_state *ras, struct address_space *mapping, + struct cl_page_list *queue, int flags); /* llite/file.c */ extern struct file_operations ll_file_operations; extern struct file_operations ll_file_operations_flock; extern struct file_operations ll_file_operations_noflock; extern struct inode_operations ll_file_inode_operations; -extern int ll_inode_revalidate_it(struct dentry *, struct lookup_intent *); -extern int ll_have_md_lock(struct inode *inode, __u64 bits); +extern int ll_inode_revalidate_it(struct dentry *, struct lookup_intent *, + __u64); +extern int ll_have_md_lock(struct inode *inode, __u64 *bits, + ldlm_mode_t l_req_mode); extern ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits, struct lustre_handle *lockh); -int __ll_inode_revalidate_it(struct dentry *, struct lookup_intent *, __u64 bits); +int __ll_inode_revalidate_it(struct dentry *, struct lookup_intent *, + __u64 bits); int ll_revalidate_nd(struct dentry *dentry, struct nameidata *nd); int ll_file_open(struct inode *inode, struct file *file); int ll_file_release(struct inode *inode, struct file *file); @@ -625,11 +658,12 @@ int ll_release_openhandle(struct dentry *, struct lookup_intent *); int ll_md_close(struct obd_export *md_exp, struct inode *inode, struct file *file); int ll_md_real_close(struct inode *inode, int flags); -void ll_epoch_close(struct inode *inode, struct md_op_data *op_data, - struct obd_client_handle **och, unsigned long flags); -int ll_sizeonmds_update(struct inode *inode, struct lustre_handle *fh, - __u64 ioepoch); -int ll_inode_getattr(struct inode *inode, struct obdo *obdo); +void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data, + struct obd_client_handle **och, unsigned long flags); +void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data); +int ll_som_update(struct inode *inode, struct md_op_data *op_data); +int ll_inode_getattr(struct inode *inode, struct obdo *obdo, + __u64 ioepoch, int sync); int ll_md_setattr(struct inode *inode, struct md_op_data *op_data, struct md_open_data **mod); void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data, @@ -665,17 +699,12 @@ int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg); int ll_fid2path(struct obd_export *exp, void *arg); /* llite/dcache.c */ -/* llite/namei.c */ -/** - * protect race ll_find_aliases vs ll_revalidate_it vs ll_unhash_aliases - */ -extern spinlock_t ll_lookup_lock; +int ll_dops_init(struct dentry *de, int block, int init_sa); +extern cfs_spinlock_t ll_lookup_lock; extern struct dentry_operations ll_d_ops; void ll_intent_drop_lock(struct lookup_intent *); void ll_intent_release(struct lookup_intent *); int ll_drop_dentry(struct dentry *dentry); -extern void ll_set_dd(struct dentry *de); -int ll_drop_dentry(struct dentry *dentry); void ll_unhash_aliases(struct inode *); void ll_frob_intent(struct lookup_intent **itp, struct lookup_intent *deft); void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry); @@ -688,10 +717,9 @@ extern struct super_operations lustre_super_operations; char *ll_read_opt(const char *opt, char *data); void ll_lli_init(struct ll_inode_info *lli); -int ll_fill_super(struct super_block *sb); +int ll_fill_super(struct super_block *sb, struct vfsmount *mnt); void ll_put_super(struct super_block *sb); void ll_kill_super(struct super_block *sb); -int ll_shrink_cache(int nr_to_scan, gfp_t gfp_mask); struct inode *ll_inode_from_lock(struct ldlm_lock *lock); void ll_clear_inode(struct inode *inode); int ll_setattr_raw(struct inode *inode, struct iattr *attr); @@ -747,11 +775,11 @@ extern struct inode_operations ll_fast_symlink_inode_operations; /* llite/llite_close.c */ struct ll_close_queue { - spinlock_t lcq_lock; - struct list_head lcq_head; - wait_queue_head_t lcq_waitq; - struct completion lcq_comp; - atomic_t lcq_stop; + cfs_spinlock_t lcq_lock; + cfs_list_t lcq_head; + cfs_waitq_t lcq_waitq; + cfs_completion_t lcq_comp; + cfs_atomic_t lcq_stop; }; struct ccc_object *cl_inode2ccc(struct inode *inode); @@ -830,15 +858,6 @@ struct vvp_io { */ int cui_ra_window_set; /** - * If IO was created directly in low level method like - * ->prepare_write(), this field stores the number of method calls - * that constitute this IO. This field is decremented by ll_cl_fini(), - * and cl_io is destroyed, when it reaches 0. When oneshot IO - * completes, this fields is set to -1. - */ - - int cui_oneshot; - /** * Partially truncated page, that vvp_io_trunc_start() keeps locked * across truncate. */ @@ -871,6 +890,15 @@ struct vvp_io_args { } u; }; +struct ll_cl_context { + void *lcc_cookie; + struct cl_io *lcc_io; + struct cl_page *lcc_page; + struct lu_env *lcc_env; + int lcc_refcheck; + int lcc_created; +}; + struct vvp_thread_info { struct ost_lvb vti_lvb; struct cl_2queue vti_queue; @@ -878,6 +906,7 @@ struct vvp_thread_info { struct vvp_io_args vti_args; struct ra_io_arg vti_ria; struct kiocb vti_kiocb; + struct ll_cl_context vti_io_ctx; }; static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env) @@ -901,7 +930,7 @@ static inline struct vvp_io_args *vvp_env_args(const struct lu_env *env, } struct vvp_session { - struct vvp_io vs_ios; + struct vvp_io vs_ios; }; static inline struct vvp_session *vvp_env_session(const struct lu_env *env) @@ -930,8 +959,8 @@ typedef struct rb_node rb_node_t; struct ll_lock_tree_node; struct ll_lock_tree { rb_root_t lt_root; - struct list_head lt_locked_list; - struct ll_file_data *lt_fd; + cfs_list_t lt_locked_list; + struct ll_file_data *lt_fd; }; int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last); @@ -944,12 +973,6 @@ struct vm_area_struct *our_vma(unsigned long addr, size_t count); #define ll_s2sbi(sb) (s2lsi(sb)->lsi_llsbi) -static inline __u64 ll_ts2u64(struct timespec *time) -{ - __u64 t = time->tv_sec; - return t; -} - /* don't need an addref as the sb_info should be holding one */ static inline struct obd_export *ll_s2dtexp(struct super_block *sb) { @@ -1017,8 +1040,8 @@ int ll_removexattr(struct dentry *dentry, const char *name); extern cfs_mem_cache_t *ll_remote_perm_cachep; extern cfs_mem_cache_t *ll_rmtperm_hash_cachep; -struct hlist_head *alloc_rmtperm_hash(void); -void free_rmtperm_hash(struct hlist_head *hash); +cfs_hlist_head_t *alloc_rmtperm_hash(void); +void free_rmtperm_hash(cfs_hlist_head_t *hash); int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm); int lustre_check_remote_perm(struct inode *inode, int mask); @@ -1085,25 +1108,29 @@ void et_fini(struct eacl_table *et); /* statahead.c */ -#define LL_SA_RPC_MIN 2 -#define LL_SA_RPC_DEF 32 -#define LL_SA_RPC_MAX 8192 +#define LL_SA_RPC_MIN 2 +#define LL_SA_RPC_DEF 32 +#define LL_SA_RPC_MAX 8192 + +#define LL_SA_CACHE_BIT 5 +#define LL_SA_CACHE_SIZE (1 << LL_SA_CACHE_BIT) +#define LL_SA_CACHE_MASK (LL_SA_CACHE_SIZE - 1) /* per inode struct, for dir only */ struct ll_statahead_info { struct inode *sai_inode; - unsigned int sai_generation; /* generation for statahead */ - atomic_t sai_refcount; /* when access this struct, hold + cfs_atomic_t sai_refcount; /* when access this struct, hold * refcount */ - unsigned int sai_sent; /* stat requests sent count */ - unsigned int sai_replied; /* stat requests which received - * reply */ + unsigned int sai_generation; /* generation for statahead */ unsigned int sai_max; /* max ahead of lookup */ - unsigned int sai_index; /* index of statahead entry */ - unsigned int sai_index_next; /* index for the next statahead - * entry to be stated */ - unsigned int sai_hit; /* hit count */ - unsigned int sai_miss; /* miss count: + __u64 sai_sent; /* stat requests sent count */ + __u64 sai_replied; /* stat requests which received + * reply */ + __u64 sai_index; /* index of statahead entry */ + __u64 sai_index_wait; /* index of entry which is the + * caller is waiting for */ + __u64 sai_hit; /* hit count */ + __u64 sai_miss; /* miss count: * for "ls -al" case, it includes * hidden dentry miss; * for "ls -l" case, it does not @@ -1115,48 +1142,43 @@ struct ll_statahead_info { unsigned int sai_miss_hidden;/* "ls -al", but first dentry * is not a hidden one */ unsigned int sai_skip_hidden;/* skipped hidden dentry count */ - unsigned int sai_ls_all:1; /* "ls -al", do stat-ahead for + unsigned int sai_ls_all:1, /* "ls -al", do stat-ahead for * hidden entries */ + sai_in_readpage:1;/* statahead is in readdir()*/ cfs_waitq_t sai_waitq; /* stat-ahead wait queue */ struct ptlrpc_thread sai_thread; /* stat-ahead thread */ - struct list_head sai_entries_sent; /* entries sent out */ - struct list_head sai_entries_received; /* entries returned */ - struct list_head sai_entries_stated; /* entries stated */ + cfs_list_t sai_entries_sent; /* entries sent out */ + cfs_list_t sai_entries_received; /* entries returned */ + cfs_list_t sai_entries_stated; /* entries stated */ + cfs_list_t sai_cache[LL_SA_CACHE_SIZE]; + cfs_spinlock_t sai_cache_lock[LL_SA_CACHE_SIZE]; + cfs_atomic_t sai_cache_count; /* entry count in cache */ }; -int do_statahead_enter(struct inode *dir, struct dentry **dentry, int lookup); -void ll_statahead_exit(struct inode *dir, struct dentry *dentry, int result); -void ll_stop_statahead(struct inode *inode, void *key); +int do_statahead_enter(struct inode *dir, struct dentry **dentry, + int only_unplug); +void ll_stop_statahead(struct inode *dir, void *key); -static inline -void ll_statahead_mark(struct inode *dir, struct dentry *dentry) +static inline void +ll_statahead_mark(struct inode *dir, struct dentry *dentry) { - struct ll_inode_info *lli; - struct ll_dentry_data *ldd = ll_d2d(dentry); + struct ll_inode_info *lli = ll_i2info(dir); + struct ll_statahead_info *sai = lli->lli_sai; + struct ll_dentry_data *ldd = ll_d2d(dentry); - /* dentry has been move to other directory, no need mark */ - if (unlikely(dir != dentry->d_parent->d_inode)) - return; - - lli = ll_i2info(dir); /* not the same process, don't mark */ if (lli->lli_opendir_pid != cfs_curproc_pid()) return; - spin_lock(&lli->lli_lock); - if (likely(lli->lli_sai != NULL && ldd != NULL)) - ldd->lld_sa_generation = lli->lli_sai->sai_generation; - spin_unlock(&lli->lli_lock); + if (sai != NULL && ldd != NULL) + ldd->lld_sa_generation = sai->sai_generation; } -static inline -int ll_statahead_enter(struct inode *dir, struct dentry **dentryp, int lookup) +static inline int +ll_statahead_enter(struct inode *dir, struct dentry **dentryp, int only_unplug) { struct ll_inode_info *lli; - struct ll_dentry_data *ldd = ll_d2d(*dentryp); - - if (unlikely(dir == NULL)) - return -EAGAIN; + struct ll_dentry_data *ldd; if (ll_i2sbi(dir)->ll_sa_max == 0) return -ENOTSUPP; @@ -1166,11 +1188,12 @@ int ll_statahead_enter(struct inode *dir, struct dentry **dentryp, int lookup) if (lli->lli_opendir_pid != cfs_curproc_pid()) return -EAGAIN; + ldd = ll_d2d(*dentryp); /* - * When "ls" a dentry, the system trigger more than once "revalidate" or - * "lookup", for "getattr", for "getxattr", and maybe for others. + * When stats a dentry, the system trigger more than once "revalidate" + * or "lookup", for "getattr", for "getxattr", and maybe for others. * Under patchless client mode, the operation intent is not accurate, - * it maybe misguide the statahead thread. For example: + * which maybe misguide the statahead thread. For example: * The "revalidate" call for "getattr" and "getxattr" of a dentry maybe * have the same operation intent -- "IT_GETATTR". * In fact, one dentry should has only one chance to interact with the @@ -1185,22 +1208,7 @@ int ll_statahead_enter(struct inode *dir, struct dentry **dentryp, int lookup) ldd->lld_sa_generation == lli->lli_sai->sai_generation) return -EAGAIN; - return do_statahead_enter(dir, dentryp, lookup); -} - -static void inline ll_dops_init(struct dentry *de, int block) -{ - struct ll_dentry_data *lld = ll_d2d(de); - - if (lld == NULL && block != 0) { - ll_set_dd(de); - lld = ll_d2d(de); - } - - if (lld != NULL) - lld->lld_sa_generation = 0; - - de->d_op = &ll_d_ops; + return do_statahead_enter(dir, dentryp, only_unplug); } /* llite ioctl register support rountine */ @@ -1255,9 +1263,6 @@ void ll_iocontrol_unregister(void *magic); #define cl_i2info(info) ll_i2info(info) #define cl_inode_mode(inode) ((inode)->i_mode) #define cl_i2sbi ll_i2sbi -#define cl_isize_read(inode) i_size_read(inode) -#define cl_isize_write(inode,kms) i_size_write(inode, kms) -#define cl_isize_write_nolock(inode,kms) do {(inode)->i_size=(kms);}while(0) static inline void cl_isize_lock(struct inode *inode, int lsmlock) { @@ -1269,6 +1274,21 @@ static inline void cl_isize_unlock(struct inode *inode, int lsmlock) ll_inode_size_unlock(inode, lsmlock); } +static inline void cl_isize_write_nolock(struct inode *inode, loff_t kms) +{ + LASSERT_SEM_LOCKED(&ll_i2info(inode)->lli_size_sem); + i_size_write(inode, kms); +} + +static inline void cl_isize_write(struct inode *inode, loff_t kms) +{ + ll_inode_size_lock(inode, 0); + i_size_write(inode, kms); + ll_inode_size_unlock(inode, 0); +} + +#define cl_isize_read(inode) i_size_read(inode) + static inline int cl_merge_lvb(struct inode *inode) { return ll_merge_lvb(inode); @@ -1297,6 +1317,15 @@ struct ll_dio_pages { int ldp_nr; }; +static inline void cl_stats_tally(struct cl_device *dev, enum cl_req_type crt, + int rc) +{ + int opc = (crt == CIT_READ) ? LPROC_LL_OSC_READ : + LPROC_LL_OSC_WRITE; + + ll_stats_ops_tally(ll_s2sbi(cl2ccc_dev(dev)->cdv_sb), opc, rc); +} + extern ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, int rw, struct inode *inode, struct ll_dio_pages *pv); @@ -1310,4 +1339,62 @@ static inline int ll_file_nolock(const struct file *file) return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) || (ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK)); } + +static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode, + struct lookup_intent *it, __u64 *bits) +{ + if (!it->d.lustre.it_lock_set) { + CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n", + inode, inode->i_ino, inode->i_generation); + md_set_lock_data(exp, &it->d.lustre.it_lock_handle, + inode, &it->d.lustre.it_lock_bits); + it->d.lustre.it_lock_set = 1; + } + + if (bits != NULL) + *bits = it->d.lustre.it_lock_bits; +} + +static inline void ll_dentry_rehash(struct dentry *dentry, int locked) +{ + if (!locked) { + cfs_spin_lock(&ll_lookup_lock); + spin_lock(&dcache_lock); + } + if (d_unhashed(dentry)) + d_rehash_cond(dentry, 0); + if (!locked) { + spin_unlock(&dcache_lock); + cfs_spin_unlock(&ll_lookup_lock); + } +} + +static inline void ll_dentry_reset_flags(struct dentry *dentry, __u64 bits) +{ + if (bits & MDS_INODELOCK_LOOKUP && + dentry->d_flags & DCACHE_LUSTRE_INVALID) { + lock_dentry(dentry); + dentry->d_flags &= ~DCACHE_LUSTRE_INVALID; + unlock_dentry(dentry); + } +} + +#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2,7,50,0) +/* Compatibility for old (1.8) compiled userspace quota code */ +struct if_quotactl_18 { + __u32 qc_cmd; + __u32 qc_type; + __u32 qc_id; + __u32 qc_stat; + struct obd_dqinfo qc_dqinfo; + struct obd_dqblk qc_dqblk; + char obd_type[16]; + struct obd_uuid obd_uuid; +}; +#define LL_IOC_QUOTACTL_18 _IOWR('f', 162, struct if_quotactl_18 *) +/* End compatibility for old (1.8) compiled userspace quota code */ +#else +#warning "remove old LL_IOC_QUOTACTL_18 compatibility code" +#endif /* LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2,7,50,0) */ + #endif /* LLITE_INTERNAL_H */