X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fllite%2Fllite_internal.h;h=b03ce4b28b3101e71579cb3d37dac71aaa118c73;hp=c061183c1bc6af2125e61aec274c22c709a0fc15;hb=2d337a3da6bee76d66450fedda8386f9ede035fa;hpb=e0419ec1b09478d871b1457f787ff1dc92cf6c89 diff --git a/lustre/llite/llite_internal.h b/lustre/llite/llite_internal.h index c061183..b03ce4b 100644 --- a/lustre/llite/llite_internal.h +++ b/lustre/llite/llite_internal.h @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -26,8 +24,10 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2012, Whamcloud, Inc. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -47,33 +47,34 @@ /* for struct cl_lock_descr and struct cl_io */ #include #include +#include +#include #ifndef FMODE_EXEC #define FMODE_EXEC 0 #endif +#ifndef VM_FAULT_RETRY +#define VM_FAULT_RETRY 0 +#endif + +/** Only used on client-side for indicating the tail of dir hash/offset. */ +#define LL_DIR_END_OFF 0x7fffffffffffffffULL +#define LL_DIR_END_OFF_32BIT 0x7fffffffUL + #ifndef DCACHE_LUSTRE_INVALID -#define DCACHE_LUSTRE_INVALID 0x100 +#define DCACHE_LUSTRE_INVALID 0x4000000 #endif #define LL_IT2STR(it) ((it) ? ldlm_it2str((it)->it_op) : "0") #define LUSTRE_FPRIVATE(file) ((file)->private_data) -#ifdef HAVE_VFS_INTENT_PATCHES -static inline struct lookup_intent *ll_nd2it(struct nameidata *nd) -{ - return &nd->intent; -} -#endif - struct ll_dentry_data { int lld_cwd_count; int lld_mnt_count; struct obd_client_handle lld_cwd_och; struct obd_client_handle lld_mnt_och; -#ifndef HAVE_VFS_INTENT_PATCHES struct lookup_intent *lld_it; -#endif unsigned int lld_sa_generation; }; @@ -87,9 +88,15 @@ extern struct file_operations ll_pgcache_seq_fops; /* remote client permission cache */ #define REMOTE_PERM_HASHSIZE 16 +struct ll_getname_data { + char *lgd_name; /* points to a buffer with NAME_MAX+1 size */ + struct lu_fid lgd_fid; /* target fid we are looking for */ + int lgd_found; /* inode matched? */ +}; + /* llite setxid/access permission for user on remote client */ struct ll_remote_perm { - struct hlist_node lrp_list; + cfs_hlist_node_t lrp_list; uid_t lrp_uid; gid_t lrp_gid; uid_t lrp_fsuid; @@ -110,80 +117,155 @@ enum lli_flags { * be sent to MDS. */ LLIF_SOM_DIRTY = (1 << 3), /* File is contented */ - LLIF_CONTENDED = (1 << 4), + LLIF_CONTENDED = (1 << 4), /* Truncate uses server lock for this file */ - LLIF_SRVLOCK = (1 << 5) + LLIF_SRVLOCK = (1 << 5), }; struct ll_inode_info { - int lli_inode_magic; - struct semaphore lli_size_sem; /* protect open and change size */ - void *lli_size_sem_owner; - struct semaphore lli_write_sem; - char *lli_symlink_name; - __u64 lli_maxbytes; - __u64 lli_ioepoch; - unsigned long lli_flags; - cfs_time_t lli_contention_time; - - /* this lock protects posix_acl, pending_write_llaps, mmap_cnt */ - spinlock_t lli_lock; - struct list_head lli_close_list; - /* handle is to be sent to MDS later on done_writing and setattr. - * Open handle data are needed for the recovery to reconstruct - * the inode state on the MDS. XXX: recovery is not ready yet. */ - struct obd_client_handle *lli_pending_och; - - /* for writepage() only to communicate to fsync */ - int lli_async_rc; - - struct posix_acl *lli_posix_acl; + __u32 lli_inode_magic; + __u32 lli_flags; + __u64 lli_ioepoch; - /* remote permission hash */ - struct hlist_head *lli_remote_perms; - unsigned long lli_rmtperm_utime; - struct semaphore lli_rmtperm_sem; + cfs_spinlock_t lli_lock; + struct posix_acl *lli_posix_acl; - struct list_head lli_dead_list; - - struct semaphore lli_och_sem; /* Protects access to och pointers - and their usage counters */ - /* We need all three because every inode may be opened in different - modes */ - struct obd_client_handle *lli_mds_read_och; - __u64 lli_open_fd_read_count; - struct obd_client_handle *lli_mds_write_och; - __u64 lli_open_fd_write_count; - struct obd_client_handle *lli_mds_exec_och; - __u64 lli_open_fd_exec_count; - - struct inode lli_vfs_inode; + cfs_hlist_head_t *lli_remote_perms; + cfs_mutex_t lli_rmtperm_mutex; /* identifying fields for both metadata and data stacks. */ - struct lu_fid lli_fid; - struct lov_stripe_md *lli_smd; + struct lu_fid lli_fid; + /* Parent fid for accessing default stripe data on parent directory + * for allocating OST objects after a mknod() and later open-by-FID. */ + struct lu_fid lli_pfid; - /* fid capability */ + cfs_list_t lli_close_list; + cfs_list_t lli_oss_capas; /* open count currently used by capability only, indicate whether * capability needs renewal */ - atomic_t lli_open_count; - struct obd_capa *lli_mds_capa; - struct list_head lli_oss_capas; + cfs_atomic_t lli_open_count; + struct obd_capa *lli_mds_capa; + cfs_time_t lli_rmtperm_time; - /* metadata stat-ahead */ - /* - * "opendir_pid" is the token when lookup/revalid -- I am the owner of - * dir statahead. + /* handle is to be sent to MDS later on done_writing and setattr. + * Open handle data are needed for the recovery to reconstruct + * the inode state on the MDS. XXX: recovery is not ready yet. */ + struct obd_client_handle *lli_pending_och; + + /* We need all three because every inode may be opened in different + * modes */ + struct obd_client_handle *lli_mds_read_och; + struct obd_client_handle *lli_mds_write_och; + struct obd_client_handle *lli_mds_exec_och; + __u64 lli_open_fd_read_count; + __u64 lli_open_fd_write_count; + __u64 lli_open_fd_exec_count; + /* Protects access to och pointers and their usage counters, also + * atomicity of check-update of lli_smd */ + cfs_mutex_t lli_och_mutex; + + struct inode lli_vfs_inode; + + /* the most recent timestamps obtained from mds */ + struct ost_lvb lli_lvb; + cfs_spinlock_t lli_agl_lock; + + /* Try to make the d::member and f::member are aligned. Before using + * these members, make clear whether it is directory or not. */ + union { + /* for directory */ + struct { + /* serialize normal readdir and statahead-readdir. */ + cfs_mutex_t d_readdir_mutex; + + /* metadata statahead */ + /* since parent-child threads can share the same @file + * struct, "opendir_key" is the token when dir close for + * case of parent exit before child -- it is me should + * cleanup the dir readahead. */ + void *d_opendir_key; + struct ll_statahead_info *d_sai; + __u64 d_sa_pos; + struct posix_acl *d_def_acl; + /* protect statahead stuff. */ + cfs_spinlock_t d_sa_lock; + /* "opendir_pid" is the token when lookup/revalid + * -- I am the owner of dir statahead. */ + pid_t d_opendir_pid; + } d; + +#define lli_readdir_mutex u.d.d_readdir_mutex +#define lli_opendir_key u.d.d_opendir_key +#define lli_sai u.d.d_sai +#define lli_sa_pos u.d.d_sa_pos +#define lli_def_acl u.d.d_def_acl +#define lli_sa_lock u.d.d_sa_lock +#define lli_opendir_pid u.d.d_opendir_pid + + /* for non-directory */ + struct { + cfs_semaphore_t f_size_sem; + void *f_size_sem_owner; + char *f_symlink_name; + __u64 f_maxbytes; + /* + * cfs_rw_semaphore_t { + * signed long count; // align u.d.d_def_acl + * cfs_spinlock_t wait_lock; // align u.d.d_sa_lock + * struct list_head wait_list; + * } + */ + cfs_rw_semaphore_t f_trunc_sem; + cfs_mutex_t f_write_mutex; + + /* for writepage() only to communicate to fsync */ + int f_async_rc; + int f_write_rc; + + cfs_rw_semaphore_t f_glimpse_sem; + cfs_time_t f_glimpse_time; + cfs_list_t f_agl_list; + __u64 f_agl_index; + /* + * whenever a process try to read/write the file, the + * jobid of the process will be saved here, and it'll + * be packed into the write PRC when flush later. + * + * so the read/write statistics for jobid will not be + * accurate if the file is shared by different jobs. + */ + char f_jobid[JOBSTATS_JOBID_SIZE]; + } f; + +#define lli_size_sem u.f.f_size_sem +#define lli_size_sem_owner u.f.f_size_sem_owner +#define lli_symlink_name u.f.f_symlink_name +#define lli_maxbytes u.f.f_maxbytes +#define lli_trunc_sem u.f.f_trunc_sem +#define lli_write_mutex u.f.f_write_mutex +#define lli_async_rc u.f.f_async_rc +#define lli_write_rc u.f.f_write_rc +#define lli_glimpse_sem u.f.f_glimpse_sem +#define lli_glimpse_time u.f.f_glimpse_time +#define lli_agl_list u.f.f_agl_list +#define lli_agl_index u.f.f_agl_index +#define lli_jobid u.f.f_jobid + + } u; + + /* XXX: For following frequent used members, although they maybe special + * used for non-directory object, it is some time-wasting to check + * whether the object is directory or not before using them. On the + * other hand, currently, sizeof(f) > sizeof(d), it cannot reduce + * the "ll_inode_info" size even if moving those members into u.f. + * So keep them out side. + * + * In the future, if more members are added only for directory, + * some of the following members can be moved into u.f. */ - pid_t lli_opendir_pid; - /* - * since parent-child threads can share the same @file struct, - * "opendir_key" is the token when dir close for case of parent exit - * before child -- it is me should cleanup the dir readahead. */ - void *lli_opendir_key; - struct ll_statahead_info *lli_sai; - struct cl_object *lli_clob; + struct lov_stripe_md *lli_smd; + struct cl_object *lli_clob; }; /* @@ -228,7 +310,7 @@ enum ra_stat { }; struct ll_ra_info { - atomic_t ra_cur_pages; + cfs_atomic_t ra_cur_pages; unsigned long ra_max_pages; unsigned long ra_max_pages_per_file; unsigned long ra_max_read_ahead_whole_pages; @@ -295,13 +377,17 @@ enum stats_track_type { #define LL_SBI_FLOCK 0x04 #define LL_SBI_USER_XATTR 0x08 /* support user xattr */ #define LL_SBI_ACL 0x10 /* support ACL */ -#define LL_SBI_JOIN 0x20 /* support JOIN */ #define LL_SBI_RMT_CLIENT 0x40 /* remote client */ #define LL_SBI_MDS_CAPA 0x80 /* support mds capa */ #define LL_SBI_OSS_CAPA 0x100 /* support oss capa */ #define LL_SBI_LOCALFLOCK 0x200 /* Local flocks support by kernel */ #define LL_SBI_LRU_RESIZE 0x400 /* lru resize support */ #define LL_SBI_LAZYSTATFS 0x800 /* lazystatfs mount option */ +#define LL_SBI_SOM_PREVIEW 0x1000 /* SOM preview mount option */ +#define LL_SBI_32BIT_API 0x2000 /* generate 32 bit inodes. */ +#define LL_SBI_64BIT_HASH 0x4000 /* support 64-bits dir hash/offset */ +#define LL_SBI_AGL_ENABLED 0x8000 /* enable agl */ +#define LL_SBI_VERBOSE 0x10000 /* verbose mount/umount */ /* default value for ll_sb_info->contention_time */ #define SBI_DEFAULT_CONTENTION_SECONDS 60 @@ -310,20 +396,20 @@ enum stats_track_type { #define RCE_HASHES 32 struct rmtacl_ctl_entry { - struct list_head rce_list; + cfs_list_t rce_list; pid_t rce_key; /* hash key */ int rce_ops; /* acl operation type */ }; struct rmtacl_ctl_table { - spinlock_t rct_lock; - struct list_head rct_entries[RCE_HASHES]; + cfs_spinlock_t rct_lock; + cfs_list_t rct_entries[RCE_HASHES]; }; #define EE_HASHES 32 struct eacl_entry { - struct list_head ee_list; + cfs_list_t ee_list; pid_t ee_key; /* hash key */ struct lu_fid ee_fid; int ee_type; /* ACL type for ACCESS or DEFAULT */ @@ -331,17 +417,17 @@ struct eacl_entry { }; struct eacl_table { - spinlock_t et_lock; - struct list_head et_entries[EE_HASHES]; + cfs_spinlock_t et_lock; + cfs_list_t et_entries[EE_HASHES]; }; struct ll_sb_info { - struct list_head ll_list; + cfs_list_t ll_list; /* this protects pglist and ra_info. It isn't safe to * grab from interrupt contexts */ - spinlock_t ll_lock; - spinlock_t ll_pp_extent_lock; /* Lock for pp_extent entries */ - spinlock_t ll_process_lock; /* Lock for ll_rw_process_info */ + cfs_spinlock_t ll_lock; + cfs_spinlock_t ll_pp_extent_lock; /* Lock for pp_extent entries */ + cfs_spinlock_t ll_process_lock; /* Lock for ll_rw_process_info */ struct obd_uuid ll_sb_uuid; struct obd_export *ll_md_exp; struct obd_export *ll_dt_exp; @@ -349,10 +435,10 @@ struct ll_sb_info { struct lu_fid ll_root_fid; /* root object fid */ int ll_flags; - struct list_head ll_conn_chain; /* per-conn chain of SBs */ + cfs_list_t ll_conn_chain; /* per-conn chain of SBs */ struct lustre_client_ocd ll_lco; - struct list_head ll_orphan_dentry_list; /*please don't ask -p*/ + cfs_list_t ll_orphan_dentry_list; /*please don't ask -p*/ struct ll_close_queue *ll_lcq; struct lprocfs_stats *ll_stats; /* lprocfs stats counter */ @@ -366,13 +452,10 @@ struct ll_sb_info { unsigned int ll_namelen; struct file_operations *ll_fop; -#ifdef HAVE_EXPORT___IGET - struct list_head ll_deathrow; /* inodes to be destroyed (b1443) */ - spinlock_t ll_deathrow_lock; -#endif /* =0 - hold lock over whole read/write * >0 - max. chunk to be read/written w/o lock re-acquiring */ unsigned long ll_max_rw_chunk; + unsigned int ll_md_brw_size; /* used by readdir */ struct lu_site *ll_site; struct cl_device *ll_cl; @@ -383,26 +466,23 @@ struct ll_sb_info { unsigned int ll_offset_process_count; struct ll_rw_process_info ll_rw_offset_info[LL_OFFSET_HIST_MAX]; unsigned int ll_rw_offset_entry_count; - enum stats_track_type ll_stats_track_type; int ll_stats_track_id; + enum stats_track_type ll_stats_track_type; int ll_rw_stats_on; /* metadata stat-ahead */ unsigned int ll_sa_max; /* max statahead RPCs */ - unsigned int ll_sa_wrong; /* statahead thread stopped for - * low hit ratio */ - unsigned int ll_sa_total; /* statahead thread started + atomic_t ll_sa_total; /* statahead thread started * count */ - unsigned long long ll_sa_blocked; /* ls count waiting for - * statahead */ - unsigned long long ll_sa_cached; /* ls count got in cache */ - unsigned long long ll_sa_hit; /* hit count */ - unsigned long long ll_sa_miss; /* miss count */ + atomic_t ll_sa_wrong; /* statahead thread stopped for + * low hit ratio */ + atomic_t ll_agl_total; /* AGL thread started count */ dev_t ll_sdev_orig; /* save s_dev before assign for * clustred nfs */ struct rmtacl_ctl_table ll_rct; struct eacl_table ll_et; + struct vfsmount *ll_mnt; }; #define LL_DEFAULT_MAX_RW_CHUNK (32 * 1024 * 1024) @@ -411,14 +491,14 @@ struct ll_ra_read { pgoff_t lrr_start; pgoff_t lrr_count; struct task_struct *lrr_reader; - struct list_head lrr_linkage; + cfs_list_t lrr_linkage; }; /* * per file-descriptor read-ahead data. */ struct ll_readahead_state { - spinlock_t ras_lock; + cfs_spinlock_t ras_lock; /* * index of the last page that read(2) needed and that wasn't in the * cache. Used by ras_update() to detect seeks. @@ -476,7 +556,7 @@ struct ll_readahead_state { * progress against this file descriptor. Used by read-ahead code, * protected by ->ras_lock. */ - struct list_head ras_read_beads; + cfs_list_t ras_read_beads; /* * The following 3 items are used for detecting the stride I/O * mode. @@ -488,19 +568,21 @@ struct ll_readahead_state { * ras_stride_pages = stride_pages; * Note: all these three items are counted by pages. */ - unsigned long ras_stride_length; - unsigned long ras_stride_pages; - pgoff_t ras_stride_offset; + unsigned long ras_stride_length; + unsigned long ras_stride_pages; + pgoff_t ras_stride_offset; /* * number of consecutive stride request count, and it is similar as * ras_consecutive_requests, but used for stride I/O mode. * Note: only more than 2 consecutive stride request are detected, * stride read-ahead will be enable */ - unsigned long ras_consecutive_stride_requests; + unsigned long ras_consecutive_stride_requests; }; struct ll_file_dir { + __u64 lfd_pos; + __u64 lfd_next; }; extern cfs_mem_cache_t *ll_file_data_slab; @@ -516,7 +598,7 @@ struct ll_file_data { struct lov_stripe_md; -extern spinlock_t inode_lock; +extern cfs_spinlock_t inode_lock; extern struct proc_dir_entry *proc_lustre_fs_root; @@ -526,14 +608,23 @@ static inline struct inode *ll_info2i(struct ll_inode_info *lli) } struct it_cb_data { - struct inode *icbd_parent; + struct inode *icbd_parent; struct dentry **icbd_childp; - obd_id hash; + obd_id hash; }; __u32 ll_i2suppgid(struct inode *i); void ll_i2gids(__u32 *suppgids, struct inode *i1,struct inode *i2); +static inline int ll_need_32bit_api(struct ll_sb_info *sbi) +{ +#if BITS_PER_LONG == 32 + return 1; +#else + return unlikely(cfs_curproc_is_32bit() || (sbi->ll_flags & LL_SBI_32BIT_API)); +#endif +} + #define LLAP_MAGIC 98764321 extern cfs_mem_cache_t *ll_async_page_slab; @@ -563,16 +654,14 @@ static void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars) /* llite/dir.c */ -static inline void ll_put_page(struct page *page) -{ - kunmap(page); - page_cache_release(page); -} - +void ll_release_page(struct page *page, int remove); extern struct file_operations ll_dir_operations; extern struct inode_operations ll_dir_inode_operations; -struct page *ll_get_dir_page(struct inode *dir, __u64 hash, int exact, +struct page *ll_get_dir_page(struct file *filp, struct inode *dir, __u64 hash, struct ll_dir_chain *chain); +int ll_readdir(struct file *filp, void *cookie, filldir_t filldir); + +int ll_get_mdt_idx(struct inode *inode); /* llite/namei.c */ int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir); @@ -580,13 +669,11 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash, struct lustre_md *lic); int ll_md_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *, void *data, int flag); -#ifndef HAVE_VFS_INTENT_PATCHES struct lookup_intent *ll_convert_intent(struct open_intent *oit, int lookup_flags); -#endif int ll_lookup_it_finish(struct ptlrpc_request *request, struct lookup_intent *it, void *data); -void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry); +struct dentry *ll_find_alias(struct inode *inode, struct dentry *de); /* llite/rw.c */ int ll_prepare_write(struct file *, struct page *, unsigned from, unsigned to); @@ -600,19 +687,23 @@ int ll_file_punch(struct inode *, loff_t, int); ssize_t ll_file_lockless_io(struct file *, char *, size_t, loff_t *, int); void ll_clear_file_contended(struct inode*); int ll_sync_page_range(struct inode *, struct address_space *, loff_t, size_t); -int ll_readahead(const struct lu_env *env, struct cl_io *io, struct ll_readahead_state *ras, - struct address_space *mapping, struct cl_page_list *queue, int flags); +int ll_readahead(const struct lu_env *env, struct cl_io *io, + struct ll_readahead_state *ras, struct address_space *mapping, + struct cl_page_list *queue, int flags); /* llite/file.c */ extern struct file_operations ll_file_operations; extern struct file_operations ll_file_operations_flock; extern struct file_operations ll_file_operations_noflock; extern struct inode_operations ll_file_inode_operations; -extern int ll_inode_revalidate_it(struct dentry *, struct lookup_intent *); -extern int ll_have_md_lock(struct inode *inode, __u64 bits); +extern int ll_inode_revalidate_it(struct dentry *, struct lookup_intent *, + __u64); +extern int ll_have_md_lock(struct inode *inode, __u64 *bits, + ldlm_mode_t l_req_mode); extern ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits, struct lustre_handle *lockh); -int __ll_inode_revalidate_it(struct dentry *, struct lookup_intent *, __u64 bits); +int __ll_inode_revalidate_it(struct dentry *, struct lookup_intent *, + __u64 bits); int ll_revalidate_nd(struct dentry *dentry, struct nameidata *nd); int ll_file_open(struct inode *inode, struct file *file); int ll_file_release(struct inode *inode, struct file *file); @@ -626,12 +717,13 @@ int ll_release_openhandle(struct dentry *, struct lookup_intent *); int ll_md_close(struct obd_export *md_exp, struct inode *inode, struct file *file); int ll_md_real_close(struct inode *inode, int flags); -void ll_epoch_close(struct inode *inode, struct md_op_data *op_data, - struct obd_client_handle **och, unsigned long flags); -int ll_sizeonmds_update(struct inode *inode, struct lustre_handle *fh, - __u64 ioepoch); -int ll_inode_getattr(struct inode *inode, struct obdo *obdo); -int ll_md_setattr(struct inode *inode, struct md_op_data *op_data, +void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data, + struct obd_client_handle **och, unsigned long flags); +void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data); +int ll_som_update(struct inode *inode, struct md_op_data *op_data); +int ll_inode_getattr(struct inode *inode, struct obdo *obdo, + __u64 ioepoch, int sync); +int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data, struct md_open_data **mod); void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data, struct lustre_handle *fh); @@ -642,7 +734,15 @@ int ll_getattr_it(struct vfsmount *mnt, struct dentry *de, struct lookup_intent *it, struct kstat *stat); int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat); struct ll_file_data *ll_file_data_get(void); +#ifdef HAVE_GENERIC_PERMISSION_4ARGS +int ll_inode_permission(struct inode *inode, int mask, unsigned int flags); +#else +# ifndef HAVE_INODE_PERMISION_2ARGS int ll_inode_permission(struct inode *inode, int mask, struct nameidata *nd); +# else +int ll_inode_permission(struct inode *inode, int mask); +# endif +#endif int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file, int flags, struct lov_user_md *lum, int lum_size); @@ -653,8 +753,14 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump, int set_default); int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmm, int *lmm_size, struct ptlrpc_request **request); +#ifdef HAVE_FILE_FSYNC_4ARGS +int ll_fsync(struct file *file, loff_t start, loff_t end, int data); +#elif defined(HAVE_FILE_FSYNC_2ARGS) +int ll_fsync(struct file *file, int data); +#else int ll_fsync(struct file *file, struct dentry *dentry, int data); -int ll_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap, +#endif +int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap, int num_bytes); int ll_merge_lvb(struct inode *inode); int ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg); @@ -662,17 +768,12 @@ int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg); int ll_fid2path(struct obd_export *exp, void *arg); /* llite/dcache.c */ -/* llite/namei.c */ -/** - * protect race ll_find_aliases vs ll_revalidate_it vs ll_unhash_aliases - */ -extern spinlock_t ll_lookup_lock; +int ll_dops_init(struct dentry *de, int block, int init_sa); +extern cfs_spinlock_t ll_lookup_lock; extern struct dentry_operations ll_d_ops; void ll_intent_drop_lock(struct lookup_intent *); void ll_intent_release(struct lookup_intent *); int ll_drop_dentry(struct dentry *dentry); -extern void ll_set_dd(struct dentry *de); -int ll_drop_dentry(struct dentry *dentry); void ll_unhash_aliases(struct inode *); void ll_frob_intent(struct lookup_intent **itp, struct lookup_intent *deft); void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry); @@ -685,13 +786,12 @@ extern struct super_operations lustre_super_operations; char *ll_read_opt(const char *opt, char *data); void ll_lli_init(struct ll_inode_info *lli); -int ll_fill_super(struct super_block *sb); +int ll_fill_super(struct super_block *sb, struct vfsmount *mnt); void ll_put_super(struct super_block *sb); void ll_kill_super(struct super_block *sb); -int ll_shrink_cache(int nr_to_scan, gfp_t gfp_mask); struct inode *ll_inode_from_lock(struct ldlm_lock *lock); void ll_clear_inode(struct inode *inode); -int ll_setattr_raw(struct inode *inode, struct iattr *attr); +int ll_setattr_raw(struct dentry *dentry, struct iattr *attr); int ll_setattr(struct dentry *de, struct iattr *attr); #ifndef HAVE_STATFS_DENTRY_PARAM int ll_statfs(struct super_block *sb, struct kstatfs *sfs); @@ -725,13 +825,11 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data, const char *name, int namelen, int mode, __u32 opc, void *data); void ll_finish_md_op_data(struct md_op_data *op_data); +int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg); /* llite/llite_nfs.c */ extern struct export_operations lustre_export_operations; __u32 get_uuid2int(const char *name, int len); -struct dentry *ll_fh_to_dentry(struct super_block *sb, __u32 *data, int len, - int fhtype, int parent); -int ll_dentry_to_fh(struct dentry *, __u32 *datap, int *lenp, int need_parent); /* llite/special.c */ extern struct inode_operations ll_special_inode_operations; @@ -747,43 +845,43 @@ extern struct inode_operations ll_fast_symlink_inode_operations; /* llite/llite_close.c */ struct ll_close_queue { - spinlock_t lcq_lock; - struct list_head lcq_head; - wait_queue_head_t lcq_waitq; - struct completion lcq_comp; - atomic_t lcq_stop; -}; - -struct vvp_thread_info { - struct ost_lvb vti_lvb; - struct cl_2queue vti_queue; - struct iovec vti_local_iov; - struct ccc_io_args vti_args; - struct ra_io_arg vti_ria; - struct kiocb vti_kiocb; + cfs_spinlock_t lcq_lock; + cfs_list_t lcq_head; + cfs_waitq_t lcq_waitq; + cfs_completion_t lcq_comp; + cfs_atomic_t lcq_stop; }; struct ccc_object *cl_inode2ccc(struct inode *inode); -static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env) -{ - extern struct lu_context_key vvp_key; - struct vvp_thread_info *info; - - info = lu_context_key_get(&env->le_ctx, &vvp_key); - LASSERT(info != NULL); - return info; -} void vvp_write_pending (struct ccc_object *club, struct ccc_page *page); void vvp_write_complete(struct ccc_object *club, struct ccc_page *page); +/* specific achitecture can implement only part of this list */ +enum vvp_io_subtype { + /** normal IO */ + IO_NORMAL, + /** io called from .sendfile */ + IO_SENDFILE, + /** io started from splice_{read|write} */ + IO_SPLICE +}; + +/* IO subtypes */ struct vvp_io { + /** io subtype */ + enum vvp_io_subtype cui_io_subtype; + union { struct { read_actor_t cui_actor; void *cui_target; - } read; + } sendfile; + struct { + struct pipe_inode_info *cui_pipe; + unsigned int cui_flags; + } splice; struct vvp_fault_io { /** * Inode modification time that is checked across DLM @@ -792,13 +890,33 @@ struct vvp_io { time_t ft_mtime; struct vm_area_struct *ft_vma; /** - * Virtual address at which fault occurred. + * locked page returned from vvp_io */ - unsigned long ft_address; - /** - * Fault type, as to be supplied to filemap_nopage(). - */ - int *ft_type; + cfs_page_t *ft_vmpage; +#ifndef HAVE_VM_OP_FAULT + struct vm_nopage_api { + /** + * Virtual address at which fault occurred. + */ + unsigned long ft_address; + /** + * Fault type, as to be supplied to + * filemap_nopage(). + */ + int *ft_type; + } nopage; +#else + struct vm_fault_api { + /** + * kernel fault info + */ + struct vm_fault *ft_vmf; + /** + * fault API used bitflags for return code. + */ + unsigned int ft_flags; + } fault; +#endif } fault; } u; /** @@ -810,23 +928,79 @@ struct vvp_io { */ int cui_ra_window_set; /** - * If IO was created directly in low level method like - * ->prepare_write(), this field stores the number of method calls - * that constitute this IO. This field is decremented by ll_cl_fini(), - * and cl_io is destroyed, when it reaches 0. When oneshot IO - * completes, this fields is set to -1. - */ - - int cui_oneshot; - /** * Partially truncated page, that vvp_io_trunc_start() keeps locked * across truncate. */ struct cl_page *cui_partpage; }; +/** + * IO arguments for various VFS I/O interfaces. + */ +struct vvp_io_args { + /** normal/sendfile/splice */ + enum vvp_io_subtype via_io_subtype; + + union { + struct { +#ifndef HAVE_FILE_WRITEV + struct kiocb *via_iocb; +#endif + struct iovec *via_iov; + unsigned long via_nrsegs; + } normal; + struct { + read_actor_t via_actor; + void *via_target; + } sendfile; + struct { + struct pipe_inode_info *via_pipe; + unsigned int via_flags; + } splice; + } u; +}; + +struct ll_cl_context { + void *lcc_cookie; + struct cl_io *lcc_io; + struct cl_page *lcc_page; + struct lu_env *lcc_env; + int lcc_refcheck; + int lcc_created; +}; + +struct vvp_thread_info { + struct ost_lvb vti_lvb; + struct cl_2queue vti_queue; + struct iovec vti_local_iov; + struct vvp_io_args vti_args; + struct ra_io_arg vti_ria; + struct kiocb vti_kiocb; + struct ll_cl_context vti_io_ctx; +}; + +static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env) +{ + extern struct lu_context_key vvp_key; + struct vvp_thread_info *info; + + info = lu_context_key_get(&env->le_ctx, &vvp_key); + LASSERT(info != NULL); + return info; +} + +static inline struct vvp_io_args *vvp_env_args(const struct lu_env *env, + enum vvp_io_subtype type) +{ + struct vvp_io_args *ret = &vvp_env_info(env)->vti_args; + + ret->via_io_subtype = type; + + return ret; +} + struct vvp_session { - struct vvp_io vs_ios; + struct vvp_io vs_ios; }; static inline struct vvp_session *vvp_env_session(const struct lu_env *env) @@ -855,8 +1029,8 @@ typedef struct rb_node rb_node_t; struct ll_lock_tree_node; struct ll_lock_tree { rb_root_t lt_root; - struct list_head lt_locked_list; - struct ll_file_data *lt_fd; + cfs_list_t lt_locked_list; + struct ll_file_data *lt_fd; }; int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last); @@ -865,16 +1039,24 @@ struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start, __u64 end, ldlm_mode_t mode); void policy_from_vma(ldlm_policy_data_t *policy, struct vm_area_struct *vma, unsigned long addr, size_t count); -struct vm_area_struct *our_vma(unsigned long addr, size_t count); - -#define ll_s2sbi(sb) (s2lsi(sb)->lsi_llsbi) +struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, + size_t count); -static inline __u64 ll_ts2u64(struct timespec *time) +static inline void ll_invalidate_page(struct page *vmpage) { - __u64 t = time->tv_sec; - return t; + struct address_space *mapping = vmpage->mapping; + loff_t offset = vmpage->index << PAGE_CACHE_SHIFT; + + LASSERT(PageLocked(vmpage)); + if (mapping == NULL) + return; + + ll_teardown_mmaps(mapping, offset, offset + CFS_PAGE_SIZE); + truncate_complete_page(mapping, vmpage); } +#define ll_s2sbi(sb) (s2lsi(sb)->lsi_llsbi) + /* don't need an addref as the sb_info should be holding one */ static inline struct obd_export *ll_s2dtexp(struct super_block *sb) { @@ -942,8 +1124,8 @@ int ll_removexattr(struct dentry *dentry, const char *name); extern cfs_mem_cache_t *ll_remote_perm_cachep; extern cfs_mem_cache_t *ll_rmtperm_hash_cachep; -struct hlist_head *alloc_rmtperm_hash(void); -void free_rmtperm_hash(struct hlist_head *hash); +cfs_hlist_head_t *alloc_rmtperm_hash(void); +void free_rmtperm_hash(cfs_hlist_head_t *hash); int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm); int lustre_check_remote_perm(struct inode *inode, int mask); @@ -973,12 +1155,8 @@ extern struct lu_device_type vvp_device_type; /** * Common IO arguments for various VFS I/O interfaces. */ - int cl_sb_init(struct super_block *sb); int cl_sb_fini(struct super_block *sb); -int cl_inode_init(struct inode *inode, struct lustre_md *md); -void cl_inode_fini(struct inode *inode); - enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma); void ll_io_init(struct cl_io *io, const struct file *file, int write); @@ -1010,25 +1188,29 @@ void et_fini(struct eacl_table *et); /* statahead.c */ -#define LL_SA_RPC_MIN 2 -#define LL_SA_RPC_DEF 32 -#define LL_SA_RPC_MAX 8192 +#define LL_SA_RPC_MIN 2 +#define LL_SA_RPC_DEF 32 +#define LL_SA_RPC_MAX 8192 + +#define LL_SA_CACHE_BIT 5 +#define LL_SA_CACHE_SIZE (1 << LL_SA_CACHE_BIT) +#define LL_SA_CACHE_MASK (LL_SA_CACHE_SIZE - 1) /* per inode struct, for dir only */ struct ll_statahead_info { struct inode *sai_inode; - unsigned int sai_generation; /* generation for statahead */ - atomic_t sai_refcount; /* when access this struct, hold + cfs_atomic_t sai_refcount; /* when access this struct, hold * refcount */ - unsigned int sai_sent; /* stat requests sent count */ - unsigned int sai_replied; /* stat requests which received - * reply */ + unsigned int sai_generation; /* generation for statahead */ unsigned int sai_max; /* max ahead of lookup */ - unsigned int sai_index; /* index of statahead entry */ - unsigned int sai_index_next; /* index for the next statahead - * entry to be stated */ - unsigned int sai_hit; /* hit count */ - unsigned int sai_miss; /* miss count: + __u64 sai_sent; /* stat requests sent count */ + __u64 sai_replied; /* stat requests which received + * reply */ + __u64 sai_index; /* index of statahead entry */ + __u64 sai_index_wait; /* index of entry which is the + * caller is waiting for */ + __u64 sai_hit; /* hit count */ + __u64 sai_miss; /* miss count: * for "ls -al" case, it includes * hidden dentry miss; * for "ls -l" case, it does not @@ -1040,62 +1222,73 @@ struct ll_statahead_info { unsigned int sai_miss_hidden;/* "ls -al", but first dentry * is not a hidden one */ unsigned int sai_skip_hidden;/* skipped hidden dentry count */ - unsigned int sai_ls_all:1; /* "ls -al", do stat-ahead for + unsigned int sai_ls_all:1, /* "ls -al", do stat-ahead for * hidden entries */ + sai_in_readpage:1,/* statahead is in readdir()*/ + sai_agl_valid:1;/* AGL is valid for the dir */ cfs_waitq_t sai_waitq; /* stat-ahead wait queue */ struct ptlrpc_thread sai_thread; /* stat-ahead thread */ - struct list_head sai_entries_sent; /* entries sent out */ - struct list_head sai_entries_received; /* entries returned */ - struct list_head sai_entries_stated; /* entries stated */ + struct ptlrpc_thread sai_agl_thread; /* AGL thread */ + cfs_list_t sai_entries_sent; /* entries sent out */ + cfs_list_t sai_entries_received; /* entries returned */ + cfs_list_t sai_entries_stated; /* entries stated */ + cfs_list_t sai_entries_agl; /* AGL entries to be sent */ + cfs_list_t sai_cache[LL_SA_CACHE_SIZE]; + cfs_spinlock_t sai_cache_lock[LL_SA_CACHE_SIZE]; + cfs_atomic_t sai_cache_count; /* entry count in cache */ }; -int do_statahead_enter(struct inode *dir, struct dentry **dentry, int lookup); -void ll_statahead_exit(struct inode *dir, struct dentry *dentry, int result); -void ll_stop_statahead(struct inode *inode, void *key); +int do_statahead_enter(struct inode *dir, struct dentry **dentry, + int only_unplug); +void ll_stop_statahead(struct inode *dir, void *key); -static inline -void ll_statahead_mark(struct inode *dir, struct dentry *dentry) +static inline int ll_glimpse_size(struct inode *inode) { - struct ll_inode_info *lli; - struct ll_dentry_data *ldd = ll_d2d(dentry); + struct ll_inode_info *lli = ll_i2info(inode); + int rc; + + cfs_down_read(&lli->lli_glimpse_sem); + rc = cl_glimpse_size(inode); + lli->lli_glimpse_time = cfs_time_current(); + cfs_up_read(&lli->lli_glimpse_sem); + return rc; +} - /* dentry has been move to other directory, no need mark */ - if (unlikely(dir != dentry->d_parent->d_inode)) - return; +static inline void +ll_statahead_mark(struct inode *dir, struct dentry *dentry) +{ + struct ll_inode_info *lli = ll_i2info(dir); + struct ll_statahead_info *sai = lli->lli_sai; + struct ll_dentry_data *ldd = ll_d2d(dentry); - lli = ll_i2info(dir); /* not the same process, don't mark */ if (lli->lli_opendir_pid != cfs_curproc_pid()) return; - spin_lock(&lli->lli_lock); - if (likely(lli->lli_sai != NULL && ldd != NULL)) - ldd->lld_sa_generation = lli->lli_sai->sai_generation; - spin_unlock(&lli->lli_lock); + if (sai != NULL && ldd != NULL) + ldd->lld_sa_generation = sai->sai_generation; } -static inline -int ll_statahead_enter(struct inode *dir, struct dentry **dentryp, int lookup) +static inline int +ll_statahead_enter(struct inode *dir, struct dentry **dentryp, int only_unplug) { struct ll_inode_info *lli; - struct ll_dentry_data *ldd = ll_d2d(*dentryp); - - if (unlikely(dir == NULL)) - return -EAGAIN; + struct ll_dentry_data *ldd; if (ll_i2sbi(dir)->ll_sa_max == 0) - return -ENOTSUPP; + return -EAGAIN; lli = ll_i2info(dir); /* not the same process, don't statahead */ if (lli->lli_opendir_pid != cfs_curproc_pid()) return -EAGAIN; + ldd = ll_d2d(*dentryp); /* - * When "ls" a dentry, the system trigger more than once "revalidate" or - * "lookup", for "getattr", for "getxattr", and maybe for others. + * When stats a dentry, the system trigger more than once "revalidate" + * or "lookup", for "getattr", for "getxattr", and maybe for others. * Under patchless client mode, the operation intent is not accurate, - * it maybe misguide the statahead thread. For example: + * which maybe misguide the statahead thread. For example: * The "revalidate" call for "getattr" and "getxattr" of a dentry maybe * have the same operation intent -- "IT_GETATTR". * In fact, one dentry should has only one chance to interact with the @@ -1110,22 +1303,7 @@ int ll_statahead_enter(struct inode *dir, struct dentry **dentryp, int lookup) ldd->lld_sa_generation == lli->lli_sai->sai_generation) return -EAGAIN; - return do_statahead_enter(dir, dentryp, lookup); -} - -static void inline ll_dops_init(struct dentry *de, int block) -{ - struct ll_dentry_data *lld = ll_d2d(de); - - if (lld == NULL && block != 0) { - ll_set_dd(de); - lld = ll_d2d(de); - } - - if (lld != NULL) - lld->lld_sa_generation = 0; - - de->d_op = &ll_d_ops; + return do_statahead_enter(dir, dentryp, only_unplug); } /* llite ioctl register support rountine */ @@ -1180,9 +1358,13 @@ void ll_iocontrol_unregister(void *magic); #define cl_i2info(info) ll_i2info(info) #define cl_inode_mode(inode) ((inode)->i_mode) #define cl_i2sbi ll_i2sbi -#define cl_isize_read(inode) i_size_read(inode) -#define cl_isize_write(inode,kms) i_size_write(inode, kms) -#define cl_isize_write_nolock(inode,kms) do {(inode)->i_size=(kms);}while(0) + +static inline struct ll_file_data *cl_iattr2fd(struct inode *inode, + const struct iattr *attr) +{ + LASSERT(attr->ia_valid & ATTR_FILE); + return LUSTRE_FPRIVATE(attr->ia_file); +} static inline void cl_isize_lock(struct inode *inode, int lsmlock) { @@ -1194,6 +1376,21 @@ static inline void cl_isize_unlock(struct inode *inode, int lsmlock) ll_inode_size_unlock(inode, lsmlock); } +static inline void cl_isize_write_nolock(struct inode *inode, loff_t kms) +{ + LASSERT_SEM_LOCKED(&ll_i2info(inode)->lli_size_sem); + i_size_write(inode, kms); +} + +static inline void cl_isize_write(struct inode *inode, loff_t kms) +{ + ll_inode_size_lock(inode, 0); + i_size_write(inode, kms); + ll_inode_size_unlock(inode, 0); +} + +#define cl_isize_read(inode) i_size_read(inode) + static inline int cl_merge_lvb(struct inode *inode) { return ll_merge_lvb(inode); @@ -1205,6 +1402,8 @@ static inline int cl_merge_lvb(struct inode *inode) struct obd_capa *cl_capa_lookup(struct inode *inode, enum cl_req_type crt); +int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end); + /** direct write pages */ struct ll_dio_pages { /** page array to be written. we don't support @@ -1222,6 +1421,15 @@ struct ll_dio_pages { int ldp_nr; }; +static inline void cl_stats_tally(struct cl_device *dev, enum cl_req_type crt, + int rc) +{ + int opc = (crt == CRT_READ) ? LPROC_LL_OSC_READ : + LPROC_LL_OSC_WRITE; + + ll_stats_ops_tally(ll_s2sbi(cl2ccc_dev(dev)->cdv_sb), opc, rc); +} + extern ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, int rw, struct inode *inode, struct ll_dio_pages *pv); @@ -1235,4 +1443,62 @@ static inline int ll_file_nolock(const struct file *file) return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) || (ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK)); } + +static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode, + struct lookup_intent *it, __u64 *bits) +{ + if (!it->d.lustre.it_lock_set) { + CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n", + inode, inode->i_ino, inode->i_generation); + md_set_lock_data(exp, &it->d.lustre.it_lock_handle, + inode, &it->d.lustre.it_lock_bits); + it->d.lustre.it_lock_set = 1; + } + + if (bits != NULL) + *bits = it->d.lustre.it_lock_bits; +} + +static inline void ll_dentry_rehash(struct dentry *dentry, int locked) +{ + if (!locked) { + cfs_spin_lock(&ll_lookup_lock); + spin_lock(&dcache_lock); + } + if (d_unhashed(dentry)) + d_rehash_cond(dentry, 0); + if (!locked) { + spin_unlock(&dcache_lock); + cfs_spin_unlock(&ll_lookup_lock); + } +} + +static inline void ll_dentry_reset_flags(struct dentry *dentry, __u64 bits) +{ + if (bits & MDS_INODELOCK_LOOKUP && + dentry->d_flags & DCACHE_LUSTRE_INVALID) { + lock_dentry(dentry); + dentry->d_flags &= ~DCACHE_LUSTRE_INVALID; + unlock_dentry(dentry); + } +} + +#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2,7,50,0) +/* Compatibility for old (1.8) compiled userspace quota code */ +struct if_quotactl_18 { + __u32 qc_cmd; + __u32 qc_type; + __u32 qc_id; + __u32 qc_stat; + struct obd_dqinfo qc_dqinfo; + struct obd_dqblk qc_dqblk; + char obd_type[16]; + struct obd_uuid obd_uuid; +}; +#define LL_IOC_QUOTACTL_18 _IOWR('f', 162, struct if_quotactl_18 *) +/* End compatibility for old (1.8) compiled userspace quota code */ +#else +#warning "remove old LL_IOC_QUOTACTL_18 compatibility code" +#endif /* LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2,7,50,0) */ + #endif /* LLITE_INTERNAL_H */