X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fllite%2Fllite_internal.h;h=78f6b9730d7a142b7ef1fc1f098733c0eb4e0179;hp=4363d09622d30ee170359d34c9f2ff047a755b42;hb=7d5d004506650c3739898e70d72c9a86b8aeeb88;hpb=e95eca236471cf23083ef281ef204a5920e4db9b diff --git a/lustre/llite/llite_internal.h b/lustre/llite/llite_internal.h index 4363d09..78f6b97 100644 --- a/lustre/llite/llite_internal.h +++ b/lustre/llite/llite_internal.h @@ -27,14 +27,12 @@ */ /* * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. */ #ifndef LLITE_INTERNAL_H #define LLITE_INTERNAL_H -#include +#include #include /* for s2sbi */ -#include #include /* for struct cl_lock_descr and struct cl_io */ @@ -46,10 +44,11 @@ #include #include #include +#include #include "vvp_internal.h" -#include "range_lock.h" #include "pcc.h" +#include "foreign_symlink.h" #ifndef FMODE_EXEC #define FMODE_EXEC 0 @@ -134,6 +133,12 @@ struct ll_inode_info { __u64 lli_open_fd_read_count; __u64 lli_open_fd_write_count; __u64 lli_open_fd_exec_count; + + /* Number of times this inode was opened */ + u64 lli_open_fd_count; + /* When last close was performed on this inode */ + ktime_t lli_close_fd_time; + /* Protects access to och pointers and their usage counters */ struct mutex lli_och_mutex; @@ -239,16 +244,16 @@ struct ll_inode_info { }; }; - /* XXX: For following frequent used members, although they maybe special - * used for non-directory object, it is some time-wasting to check - * whether the object is directory or not before using them. On the - * other hand, currently, sizeof(f) > sizeof(d), it cannot reduce - * the "ll_inode_info" size even if moving those members into u.f. - * So keep them out side. - * - * In the future, if more members are added only for directory, - * some of the following members can be moved into u.f. - */ + /* XXX: For following frequent used members, although they maybe special + * used for non-directory object, it is some time-wasting to check + * whether the object is directory or not before using them. On the + * other hand, currently, sizeof(f) > sizeof(d), it cannot reduce + * the "ll_inode_info" size even if moving those members into u.f. + * So keep them out side. + * + * In the future, if more members are added only for directory, + * some of the following members can be moved into u.f. + */ struct cl_object *lli_clob; /* mutex to request for layout lock exclusively. */ @@ -278,11 +283,11 @@ static inline void ll_trunc_sem_init(struct ll_trunc_sem *sem) * * We must take lli_trunc_sem in read mode on entry in to various i/o paths * in Lustre, in order to exclude truncates. Some of these paths then need to - * take the mmap_sem, while still holding the trunc_sem. The problem is that - * page faults hold the mmap_sem when calling in to Lustre, and then must also + * take the mmap_lock, while still holding the trunc_sem. The problem is that + * page faults hold the mmap_lock when calling in to Lustre, and then must also * take the trunc_sem to exclude truncate. * - * This means the locking order for trunc_sem and mmap_sem is sometimes AB, + * This means the locking order for trunc_sem and mmap_lock is sometimes AB, * sometimes BA. This is almost OK because in both cases, we take the trunc * sem for read, so it doesn't block. * @@ -292,9 +297,9 @@ static inline void ll_trunc_sem_init(struct ll_trunc_sem *sem) * * So we have, on our truncate sem, in order (where 'reader' and 'writer' refer * to the mode in which they take the semaphore): - * reader (holding mmap_sem, needs truncate_sem) + * reader (holding mmap_lock, needs truncate_sem) * writer - * reader (holding truncate sem, waiting for mmap_sem) + * reader (holding truncate sem, waiting for mmap_lock) * * And so the readers deadlock. * @@ -304,7 +309,7 @@ static inline void ll_trunc_sem_init(struct ll_trunc_sem *sem) * of the order they arrived in. * * down_read_nowait is only used in the page fault case, where we already hold - * the mmap_sem. This is because otherwise repeated read and write operations + * the mmap_lock. This is because otherwise repeated read and write operations * (which take the truncate sem) could prevent a truncate from ever starting. * This could still happen with page faults, but without an even more complex * mechanism, this is unavoidable. @@ -403,40 +408,20 @@ enum ll_file_flags { LLIF_PROJECT_INHERIT = 3, /* update atime from MDS even if it's older than local inode atime. */ LLIF_UPDATE_ATIME = 4, + /* foreign file/dir can be unlinked unconditionnaly */ + LLIF_FOREIGN_REMOVABLE = 5, + /* setting encryption context in progress */ + LLIF_SET_ENC_CTX = 6, }; -static inline void ll_file_set_flag(struct ll_inode_info *lli, - enum ll_file_flags flag) -{ - set_bit(flag, &lli->lli_flags); -} - -static inline void ll_file_clear_flag(struct ll_inode_info *lli, - enum ll_file_flags flag) -{ - clear_bit(flag, &lli->lli_flags); -} - -static inline bool ll_file_test_flag(struct ll_inode_info *lli, - enum ll_file_flags flag) -{ - return test_bit(flag, &lli->lli_flags); -} - -static inline bool ll_file_test_and_clear_flag(struct ll_inode_info *lli, - enum ll_file_flags flag) -{ - return test_and_clear_bit(flag, &lli->lli_flags); -} - int ll_xattr_cache_destroy(struct inode *inode); int ll_xattr_cache_get(struct inode *inode, - const char *name, - char *buffer, - size_t size, - __u64 valid); + const char *name, + char *buffer, + size_t size, + __u64 valid); int ll_xattr_cache_insert(struct inode *inode, const char *name, @@ -447,7 +432,7 @@ static inline bool obd_connect_has_secctx(struct obd_connect_data *data) { #ifdef CONFIG_SECURITY return data->ocd_connect_flags & OBD_CONNECT_FLAGS2 && - data->ocd_connect_flags2 & OBD_CONNECT2_FILE_SECCTX; + data->ocd_connect_flags2 & OBD_CONNECT2_FILE_SECCTX; #else return false; #endif @@ -509,12 +494,21 @@ static inline struct pcc_inode *ll_i2pcci(struct inode *inode) /* default to use at least 16M for fast read if possible */ #define RA_REMAIN_WINDOW_MIN MiB_TO_PAGES(16UL) -/* default readahead on a given system. */ -#define SBI_DEFAULT_READ_AHEAD_MAX MiB_TO_PAGES(64UL) +/* default read-ahead on a given client mountpoint. */ +#define SBI_DEFAULT_READ_AHEAD_MAX MiB_TO_PAGES(1024UL) + +/* default read-ahead for a single file descriptor */ +#define SBI_DEFAULT_READ_AHEAD_PER_FILE_MAX MiB_TO_PAGES(256UL) /* default read-ahead full files smaller than limit on the second read */ #define SBI_DEFAULT_READ_AHEAD_WHOLE_MAX MiB_TO_PAGES(2UL) +/* default range pages */ +#define SBI_DEFAULT_RA_RANGE_PAGES MiB_TO_PAGES(1ULL) + +/* Min range pages */ +#define RA_MIN_MMAP_RANGE_PAGES 16UL + enum ra_stat { RA_STAT_HIT = 0, RA_STAT_MISS, @@ -531,6 +525,7 @@ enum ra_stat { RA_STAT_FAILED_REACH_END, RA_STAT_ASYNC, RA_STAT_FAILED_FAST_READ, + RA_STAT_MMAP_RANGE_READ, _NR_RA_STAT, }; @@ -538,6 +533,7 @@ struct ll_ra_info { atomic_t ra_cur_pages; unsigned long ra_max_pages; unsigned long ra_max_pages_per_file; + unsigned long ra_range_pages; unsigned long ra_max_read_ahead_whole_pages; struct workqueue_struct *ll_readahead_wq; /* @@ -640,6 +636,9 @@ enum stats_track_type { #define LL_SBI_FILE_HEAT 0x4000000 /* file heat support */ #define LL_SBI_TEST_DUMMY_ENCRYPTION 0x8000000 /* test dummy encryption */ #define LL_SBI_ENCRYPT 0x10000000 /* client side encryption */ +#define LL_SBI_FOREIGN_SYMLINK 0x20000000 /* foreign fake-symlink support */ +/* foreign fake-symlink upcall registered */ +#define LL_SBI_FOREIGN_SYMLINK_UPCALL 0x40000000 #define LL_SBI_FLAGS { \ "nolck", \ "checksum", \ @@ -670,6 +669,8 @@ enum stats_track_type { "file_heat", \ "test_dummy_encryption", \ "noencrypt", \ + "foreign_symlink", \ + "foreign_symlink_upcall", \ } /* This is embedded into llite super-blocks to keep track of connect @@ -699,15 +700,15 @@ struct ll_sb_info { struct dentry *ll_debugfs_entry; struct lu_fid ll_root_fid; /* root object fid */ - int ll_flags; + int ll_flags; unsigned int ll_xattr_cache_enabled:1, ll_xattr_cache_set:1, /* already set to 0/1 */ ll_client_common_fill_super_succeeded:1, ll_checksum_set:1; - struct lustre_client_ocd ll_lco; + struct lustre_client_ocd ll_lco; - struct lprocfs_stats *ll_stats; /* lprocfs stats counter */ + struct lprocfs_stats *ll_stats; /* lprocfs stats counter */ /* Used to track "unstable" pages on a client, and maintain a * LRU list of clean pages. An "unstable" page is defined as @@ -715,24 +716,24 @@ struct ll_sb_info { * but is uncommitted to stable storage. */ struct cl_client_cache *ll_cache; - struct lprocfs_stats *ll_ra_stats; - - struct ll_ra_info ll_ra_info; - unsigned int ll_namelen; - struct file_operations *ll_fop; - - struct lu_site *ll_site; - struct cl_device *ll_cl; - /* Statistics */ - struct ll_rw_extents_info ll_rw_extents_info; - int ll_extent_process_count; - struct ll_rw_process_info ll_rw_process_info[LL_PROCESS_HIST_MAX]; - unsigned int ll_offset_process_count; - struct ll_rw_process_info ll_rw_offset_info[LL_OFFSET_HIST_MAX]; - unsigned int ll_rw_offset_entry_count; - int ll_stats_track_id; - enum stats_track_type ll_stats_track_type; - int ll_rw_stats_on; + struct lprocfs_stats *ll_ra_stats; + + struct ll_ra_info ll_ra_info; + unsigned int ll_namelen; + const struct file_operations *ll_fop; + + struct lu_site *ll_site; + struct cl_device *ll_cl; + /* Statistics */ + struct ll_rw_extents_info ll_rw_extents_info; + int ll_extent_process_count; + struct ll_rw_process_info ll_rw_process_info[LL_PROCESS_HIST_MAX]; + unsigned int ll_offset_process_count; + struct ll_rw_process_info ll_rw_offset_info[LL_OFFSET_HIST_MAX]; + unsigned int ll_rw_offset_entry_count; + int ll_stats_track_id; + enum stats_track_type ll_stats_track_type; + int ll_rw_stats_on; /* metadata stat-ahead */ unsigned int ll_sa_running_max;/* max concurrent @@ -765,15 +766,44 @@ struct ll_sb_info { unsigned int ll_heat_decay_weight; unsigned int ll_heat_period_second; + /* Opens of the same inode before we start requesting open lock */ + u32 ll_oc_thrsh_count; + + /* Time in ms between last inode close and next open to be considered + * instant back to back and would trigger an open lock request + */ + u32 ll_oc_thrsh_ms; + + /* Time in ms after last file close that we no longer count prior opens*/ + u32 ll_oc_max_ms; + /* filesystem fsname */ char ll_fsname[LUSTRE_MAXFSNAME + 1]; /* Persistent Client Cache */ struct pcc_super ll_pcc_super; + + /* to protect vs updates in all following foreign symlink fields */ + struct rw_semaphore ll_foreign_symlink_sem; + /* foreign symlink path prefix */ + char *ll_foreign_symlink_prefix; + /* full prefix size including leading '\0' */ + size_t ll_foreign_symlink_prefix_size; + /* foreign symlink path upcall */ + char *ll_foreign_symlink_upcall; + /* foreign symlink path upcall infos */ + struct ll_foreign_symlink_upcall_item *ll_foreign_symlink_upcall_items; + /* foreign symlink path upcall nb infos */ + unsigned int ll_foreign_symlink_upcall_nb_items; }; #define SBI_DEFAULT_HEAT_DECAY_WEIGHT ((80 * 256 + 50) / 100) #define SBI_DEFAULT_HEAT_PERIOD_SECOND (60) + +#define SBI_DEFAULT_OPENCACHE_THRESHOLD_COUNT (5) +#define SBI_DEFAULT_OPENCACHE_THRESHOLD_MS (100) /* 0.1 second */ +#define SBI_DEFAULT_OPENCACHE_THRESHOLD_MAX_MS (60000) /* 1 minute */ + /* * per file-descriptor read-ahead data. */ @@ -807,6 +837,16 @@ struct ll_readahead_state { */ pgoff_t ras_window_start_idx; pgoff_t ras_window_pages; + + /* Page index where min range read starts */ + pgoff_t ras_range_min_start_idx; + /* Page index where mmap range read ends */ + pgoff_t ras_range_max_end_idx; + /* number of mmap pages where last time detected */ + pgoff_t ras_last_range_pages; + /* number of mmap range requests */ + pgoff_t ras_range_requests; + /* * Optimal RPC size in pages. * It decides how many pages will be sent for each read-ahead. @@ -944,6 +984,11 @@ static inline bool ll_sbi_has_file_heat(struct ll_sb_info *sbi) return !!(sbi->ll_flags & LL_SBI_FILE_HEAT); } +static inline bool ll_sbi_has_foreign_symlink(struct ll_sb_info *sbi) +{ + return !!(sbi->ll_flags & LL_SBI_FOREIGN_SYMLINK); +} + void ll_ras_enter(struct file *f, loff_t pos, size_t count); /* llite/lcommon_misc.c */ @@ -992,6 +1037,8 @@ enum { LPROC_LL_REMOVEXATTR, LPROC_LL_INODE_PERM, LPROC_LL_FALLOCATE, + LPROC_LL_INODE_OCOUNT, + LPROC_LL_INODE_OPCLTM, LPROC_LL_FILE_OPCODES }; @@ -1000,17 +1047,6 @@ enum get_default_layout_type { GET_DEFAULT_LAYOUT_ROOT = 1, }; -struct ll_dir_chain { -}; - -static inline void ll_dir_chain_init(struct ll_dir_chain *chain) -{ -} - -static inline void ll_dir_chain_fini(struct ll_dir_chain *chain) -{ -} - extern const struct file_operations ll_dir_operations; extern const struct inode_operations ll_dir_inode_operations; #ifdef HAVE_DIR_CONTEXT @@ -1023,8 +1059,9 @@ int ll_dir_read(struct inode *inode, __u64 *pos, struct md_op_data *op_data, int ll_get_mdt_idx(struct inode *inode); int ll_get_mdt_idx_by_fid(struct ll_sb_info *sbi, const struct lu_fid *fid); struct page *ll_get_dir_page(struct inode *dir, struct md_op_data *op_data, - __u64 offset, struct ll_dir_chain *chain); + __u64 offset); void ll_release_page(struct inode *inode, struct page *page, bool remove); +int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl); /* llite/namei.c */ extern const struct inode_operations ll_special_inode_operations; @@ -1056,10 +1093,8 @@ struct ll_cl_context *ll_cl_find(struct file *file); extern const struct address_space_operations ll_aops; /* llite/file.c */ -extern struct file_operations ll_file_operations; -extern struct file_operations ll_file_operations_flock; -extern struct file_operations ll_file_operations_noflock; -extern struct inode_operations ll_file_inode_operations; +extern const struct inode_operations ll_file_inode_operations; +const struct file_operations *ll_select_file_operations(struct ll_sb_info *sbi); extern int ll_have_md_lock(struct inode *inode, __u64 *bits, enum ldlm_mode l_req_mode); extern enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits, @@ -1070,6 +1105,7 @@ int ll_file_open(struct inode *inode, struct file *file); int ll_file_release(struct inode *inode, struct file *file); int ll_release_openhandle(struct dentry *, struct lookup_intent *); int ll_md_real_close(struct inode *inode, fmode_t fmode); +void ll_track_file_opens(struct inode *inode); extern void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid, struct ll_file_data *file, loff_t pos, size_t count, int rw); @@ -1080,17 +1116,15 @@ int ll_getattr(const struct path *path, struct kstat *stat, int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat); #endif int ll_getattr_dentry(struct dentry *de, struct kstat *stat, u32 request_mask, - unsigned int flags); -struct posix_acl *ll_get_acl(struct inode *inode, int type); -#ifdef HAVE_IOP_SET_ACL + unsigned int flags, bool foreign); #ifdef CONFIG_LUSTRE_FS_POSIX_ACL +struct posix_acl *ll_get_acl(struct inode *inode, int type); int ll_set_acl(struct inode *inode, struct posix_acl *acl, int type); #else /* !CONFIG_LUSTRE_FS_POSIX_ACL */ +#define ll_get_acl NULL #define ll_set_acl NULL #endif /* CONFIG_LUSTRE_FS_POSIX_ACL */ -#endif - static inline int ll_xflags_to_inode_flags(int xflags) { return ((xflags & FS_XFLAG_SYNC) ? S_SYNC : 0) | @@ -1099,12 +1133,12 @@ static inline int ll_xflags_to_inode_flags(int xflags) ((xflags & FS_XFLAG_IMMUTABLE) ? S_IMMUTABLE : 0); } -static inline int ll_inode_flags_to_xflags(int flags) +static inline int ll_inode_flags_to_xflags(int inode_flags) { - return ((flags & S_SYNC) ? FS_XFLAG_SYNC : 0) | - ((flags & S_NOATIME) ? FS_XFLAG_NOATIME : 0) | - ((flags & S_APPEND) ? FS_XFLAG_APPEND : 0) | - ((flags & S_IMMUTABLE) ? FS_XFLAG_IMMUTABLE : 0); + return ((inode_flags & S_SYNC) ? FS_XFLAG_SYNC : 0) | + ((inode_flags & S_NOATIME) ? FS_XFLAG_NOATIME : 0) | + ((inode_flags & S_APPEND) ? FS_XFLAG_APPEND : 0) | + ((inode_flags & S_IMMUTABLE) ? FS_XFLAG_IMMUTABLE : 0); } int ll_migrate(struct inode *parent, struct file *file, @@ -1145,13 +1179,13 @@ int ll_d_init(struct dentry *de); extern const struct dentry_operations ll_d_ops; void ll_intent_drop_lock(struct lookup_intent *); void ll_intent_release(struct lookup_intent *); -void ll_invalidate_aliases(struct inode *); +void ll_prune_aliases(struct inode *inode); void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry); int ll_revalidate_it_finish(struct ptlrpc_request *request, struct lookup_intent *it, struct dentry *de); /* llite/llite_lib.c */ -extern struct super_operations lustre_super_operations; +extern const struct super_operations lustre_super_operations; void ll_lli_init(struct ll_inode_info *lli); int ll_fill_super(struct super_block *sb); @@ -1167,8 +1201,9 @@ int ll_statfs(struct dentry *de, struct kstatfs *sfs); int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs, u32 flags); int ll_update_inode(struct inode *inode, struct lustre_md *md); -void ll_update_inode_flags(struct inode *inode, int ext_flags); +void ll_update_inode_flags(struct inode *inode, unsigned int ext_flags); int ll_read_inode2(struct inode *inode, void *opaque); +void ll_truncate_inode_pages_final(struct inode *inode); void ll_delete_inode(struct inode *inode); int ll_iocontrol(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); @@ -1223,31 +1258,25 @@ static inline ssize_t ll_lov_user_md_size(const struct lov_user_md *lum) } /* llite/llite_nfs.c */ -extern struct export_operations lustre_export_operations; +extern const struct export_operations lustre_export_operations; __u32 get_uuid2int(const char *name, int len); struct inode *search_inode_for_lustre(struct super_block *sb, const struct lu_fid *fid); int ll_dir_get_parent_fid(struct inode *dir, struct lu_fid *parent_fid); /* llite/symlink.c */ -extern struct inode_operations ll_fast_symlink_inode_operations; +extern const struct inode_operations ll_fast_symlink_inode_operations; /** * IO arguments for various VFS I/O interfaces. */ struct vvp_io_args { /** normal/sendfile/splice */ - enum vvp_io_subtype via_io_subtype; - union { struct { struct kiocb *via_iocb; struct iov_iter *via_iter; } normal; - struct { - struct pipe_inode_info *via_pipe; - unsigned int via_flags; - } splice; } u; }; @@ -1283,14 +1312,9 @@ static inline struct ll_thread_info *ll_env_info(const struct lu_env *env) return lti; } -static inline struct vvp_io_args *ll_env_args(const struct lu_env *env, - enum vvp_io_subtype type) +static inline struct vvp_io_args *ll_env_args(const struct lu_env *env) { - struct vvp_io_args *via = &ll_env_info(env)->lti_args; - - via->via_io_subtype = type; - - return via; + return &ll_env_info(env)->lti_args; } void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot, @@ -1298,7 +1322,6 @@ void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot, /* llite/llite_mmap.c */ -int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last); int ll_file_mmap(struct file * file, struct vm_area_struct * vma); void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma, unsigned long addr, size_t count); @@ -1444,7 +1467,6 @@ struct ll_statahead_info { */ unsigned int sai_ls_all:1, /* "ls -al", do stat-ahead for * hidden entries */ - sai_agl_valid:1,/* AGL is valid for the dir */ sai_in_readpage:1;/* statahead is in readdir()*/ wait_queue_head_t sai_waitq; /* stat-ahead wait queue */ struct task_struct *sai_task; /* stat-ahead thread */ @@ -1613,28 +1635,18 @@ static inline void __d_lustre_invalidate(struct dentry *dentry) /* * Mark dentry INVALID, if dentry refcount is zero (this is normally case for - * ll_md_blocking_ast), unhash this dentry, and let dcache to reclaim it later; - * else dput() of the last refcount will unhash this dentry and kill it. + * ll_md_blocking_ast), it will be pruned by ll_prune_aliases() and + * ll_prune_negative_children(); otherwise dput() of the last refcount will + * unhash this dentry and kill it. */ -static inline void d_lustre_invalidate(struct dentry *dentry, int nested) +static inline void d_lustre_invalidate(struct dentry *dentry) { CDEBUG(D_DENTRY, "invalidate dentry %pd (%p) parent %p inode %p refc %d\n", dentry, dentry, dentry->d_parent, dentry->d_inode, ll_d_count(dentry)); - spin_lock_nested(&dentry->d_lock, - nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL); + spin_lock(&dentry->d_lock); __d_lustre_invalidate(dentry); - /* - * We should be careful about dentries created by d_obtain_alias(). - * These dentries are not put in the dentry tree, instead they are - * linked to sb->s_anon through dentry->d_hash. - * shrink_dcache_for_umount() shrinks the tree and sb->s_anon list. - * If we unhashed such a dentry, unmount would not be able to find - * it and busy inodes would be reported. - */ - if (ll_d_count(dentry) == 0 && !(dentry->d_flags & DCACHE_DISCONNECTED)) - __d_drop(dentry); spin_unlock(&dentry->d_lock); } @@ -1699,5 +1711,9 @@ static inline struct pcc_super *ll_info2pccs(struct ll_inode_info *lli) /* crypto.c */ extern const struct llcrypt_operations lustre_cryptops; #endif +/* llite/llite_foreign.c */ +int ll_manage_foreign(struct inode *inode, struct lustre_md *lmd); +bool ll_foreign_is_openable(struct dentry *dentry, unsigned int flags); +bool ll_foreign_is_removable(struct dentry *dentry, bool unset); #endif /* LLITE_INTERNAL_H */