Whamcloud - gitweb
LU-1330 obdclass: splits server-side object stack from client
[fs/lustre-release.git] / lustre / llite / llite_internal.h
index 8b3ab1b..52e40b5 100644 (file)
@@ -27,7 +27,7 @@
  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -36,9 +36,6 @@
 
 #ifndef LLITE_INTERNAL_H
 #define LLITE_INTERNAL_H
-
-#include <lustre_acl.h>
-
 #include <lustre_debug.h>
 #include <lustre_ver.h>
 #include <lustre_disk.h>  /* for s2sbi */
@@ -124,19 +121,20 @@ enum lli_flags {
         LLIF_CONTENDED          = (1 << 4),
         /* Truncate uses server lock for this file */
         LLIF_SRVLOCK            = (1 << 5),
-
+       /* File data is modified. */
+       LLIF_DATA_MODIFIED      = (1 << 6),
 };
 
 struct ll_inode_info {
-        __u32                           lli_inode_magic;
-        __u32                           lli_flags;
-        __u64                           lli_ioepoch;
+       __u32                           lli_inode_magic;
+       __u32                           lli_flags;
+       __u64                           lli_ioepoch;
 
-        cfs_spinlock_t                  lli_lock;
-        struct posix_acl               *lli_posix_acl;
+       spinlock_t                      lli_lock;
+       struct posix_acl                *lli_posix_acl;
 
-        cfs_hlist_head_t               *lli_remote_perms;
-        cfs_mutex_t                     lli_rmtperm_mutex;
+       cfs_hlist_head_t                *lli_remote_perms;
+       struct mutex                            lli_rmtperm_mutex;
 
         /* identifying fields for both metadata and data stacks. */
         struct lu_fid                   lli_fid;
@@ -165,23 +163,22 @@ struct ll_inode_info {
         __u64                           lli_open_fd_read_count;
         __u64                           lli_open_fd_write_count;
         __u64                           lli_open_fd_exec_count;
-        /* Protects access to och pointers and their usage counters, also
-        * atomicity of check-update of lli_has_smd */
-        cfs_mutex_t                     lli_och_mutex;
+        /* Protects access to och pointers and their usage counters */
+       struct mutex                    lli_och_mutex;
 
-        struct inode                    lli_vfs_inode;
+       struct inode                    lli_vfs_inode;
 
-        /* the most recent timestamps obtained from mds */
-        struct ost_lvb                  lli_lvb;
-        cfs_spinlock_t                  lli_agl_lock;
+       /* the most recent timestamps obtained from mds */
+       struct ost_lvb                  lli_lvb;
+       spinlock_t                      lli_agl_lock;
 
-        /* Try to make the d::member and f::member are aligned. Before using
-         * these members, make clear whether it is directory or not. */
-        union {
-                /* for directory */
-                struct {
-                        /* serialize normal readdir and statahead-readdir. */
-                        cfs_mutex_t                     d_readdir_mutex;
+       /* Try to make the d::member and f::member are aligned. Before using
+        * these members, make clear whether it is directory or not. */
+       union {
+               /* for directory */
+               struct {
+                       /* serialize normal readdir and statahead-readdir. */
+                       struct mutex                    d_readdir_mutex;
 
                         /* metadata statahead */
                         /* since parent-child threads can share the same @file
@@ -192,11 +189,11 @@ struct ll_inode_info {
                         struct ll_statahead_info       *d_sai;
                         struct posix_acl               *d_def_acl;
                         /* protect statahead stuff. */
-                        cfs_spinlock_t                  d_sa_lock;
-                        /* "opendir_pid" is the token when lookup/revalid
-                         * -- I am the owner of dir statahead. */
-                        pid_t                           d_opendir_pid;
-                } d;
+                       spinlock_t                      d_sa_lock;
+                       /* "opendir_pid" is the token when lookup/revalid
+                        * -- I am the owner of dir statahead. */
+                       pid_t                           d_opendir_pid;
+               } d;
 
 #define lli_readdir_mutex       u.d.d_readdir_mutex
 #define lli_opendir_key         u.d.d_opendir_key
@@ -205,30 +202,35 @@ struct ll_inode_info {
 #define lli_sa_lock             u.d.d_sa_lock
 #define lli_opendir_pid         u.d.d_opendir_pid
 
-                /* for non-directory */
-                struct {
-                        cfs_semaphore_t                 f_size_sem;
-                        void                           *f_size_sem_owner;
-                        char                           *f_symlink_name;
-                        __u64                           f_maxbytes;
-                        /*
-                         * cfs_rw_semaphore_t {
-                         *    signed long      count;     // align u.d.d_def_acl
-                         *    cfs_spinlock_t   wait_lock; // align u.d.d_sa_lock
-                         *    struct list_head wait_list;
-                         * }
-                         */
-                        cfs_rw_semaphore_t              f_trunc_sem;
-                        cfs_mutex_t                     f_write_mutex;
+               /* for non-directory */
+               struct {
+                       struct semaphore                f_size_sem;
+                       void                            *f_size_sem_owner;
+                       char                            *f_symlink_name;
+                       __u64                           f_maxbytes;
+                       /*
+                        * struct rw_semaphore {
+                        *    signed long       count;     // align d.d_def_acl
+                        *    spinlock_t        wait_lock; // align d.d_sa_lock
+                        *    struct list_head wait_list;
+                        * }
+                        */
+                       struct rw_semaphore             f_trunc_sem;
+                       struct mutex                    f_write_mutex;
 
-                       cfs_rw_semaphore_t              f_glimpse_sem;
+                       struct rw_semaphore             f_glimpse_sem;
                        cfs_time_t                      f_glimpse_time;
                        cfs_list_t                      f_agl_list;
                        __u64                           f_agl_index;
 
-                        /* for writepage() only to communicate to fsync */
-                        int                            f_async_rc;
+                       /* for writepage() only to communicate to fsync */
+                       int                             f_async_rc;
 
+                       /* volatile file criteria is based on file name, this
+                        * flag is used to keep the test result, so the strcmp
+                        * is done only once
+                        */
+                       bool                            f_volatile;
                        /*
                         * whenever a process try to read/write the file, the
                         * jobid of the process will be saved here, and it'll
@@ -252,6 +254,7 @@ struct ll_inode_info {
 #define lli_agl_index          u.f.f_agl_index
 #define lli_async_rc           u.f.f_async_rc
 #define lli_jobid              u.f.f_jobid
+#define lli_volatile           u.f.f_volatile
 
        } u;
 
@@ -269,7 +272,9 @@ struct ll_inode_info {
        struct cl_object               *lli_clob;
 
        /* mutex to request for layout lock exclusively. */
-       cfs_mutex_t                     lli_layout_mutex;
+       struct mutex                    lli_layout_mutex;
+       /* valid only inside LAYOUT ibits lock, protected by lli_layout_mutex */
+       __u32                           lli_layout_gen;
 };
 
 /*
@@ -394,6 +399,26 @@ enum stats_track_type {
 #define LL_SBI_LAYOUT_LOCK    0x20000 /* layout lock support */
 #define LL_SBI_USER_FID2PATH  0x40000 /* allow fid2path by unprivileged users */
 
+#define LL_SBI_FLAGS {         \
+       "nolck",        \
+       "checksum",     \
+       "flock",        \
+       "xattr",        \
+       "acl",          \
+       "rmt_client",   \
+       "mds_capa",     \
+       "oss_capa",     \
+       "flock",        \
+       "lru_resize",   \
+       "lazy_statfs",  \
+       "som",          \
+       "32bit_api",    \
+       "64bit_hash",   \
+       "agl",          \
+       "verbose",      \
+       "layout",       \
+       "user_fid2path" }
+
 /* default value for ll_sb_info->contention_time */
 #define SBI_DEFAULT_CONTENTION_SECONDS     60
 /* default value for lockless_truncate_enable */
@@ -407,8 +432,8 @@ struct rmtacl_ctl_entry {
 };
 
 struct rmtacl_ctl_table {
-        cfs_spinlock_t   rct_lock;
-        cfs_list_t       rct_entries[RCE_HASHES];
+       spinlock_t      rct_lock;
+       cfs_list_t      rct_entries[RCE_HASHES];
 };
 
 #define EE_HASHES       32
@@ -422,17 +447,17 @@ struct eacl_entry {
 };
 
 struct eacl_table {
-        cfs_spinlock_t   et_lock;
-        cfs_list_t       et_entries[EE_HASHES];
+       spinlock_t      et_lock;
+       cfs_list_t      et_entries[EE_HASHES];
 };
 
 struct ll_sb_info {
-        cfs_list_t                ll_list;
-        /* this protects pglist and ra_info.  It isn't safe to
-         * grab from interrupt contexts */
-        cfs_spinlock_t            ll_lock;
-        cfs_spinlock_t            ll_pp_extent_lock; /* Lock for pp_extent entries */
-        cfs_spinlock_t            ll_process_lock; /* Lock for ll_rw_process_info */
+       cfs_list_t                ll_list;
+       /* this protects pglist and ra_info.  It isn't safe to
+        * grab from interrupt contexts */
+       spinlock_t                ll_lock;
+       spinlock_t                ll_pp_extent_lock; /* pp_extent entry*/
+       spinlock_t                ll_process_lock; /* ll_rw_process_info */
         struct obd_uuid           ll_sb_uuid;
         struct obd_export        *ll_md_exp;
         struct obd_export        *ll_dt_exp;
@@ -501,7 +526,7 @@ struct ll_ra_read {
  * per file-descriptor read-ahead data.
  */
 struct ll_readahead_state {
-        cfs_spinlock_t  ras_lock;
+       spinlock_t  ras_lock;
         /*
          * index of the last page that read(2) needed and that wasn't in the
          * cache. Used by ras_update() to detect seeks.
@@ -600,7 +625,7 @@ struct ll_file_data {
 
 struct lov_stripe_md;
 
-extern cfs_spinlock_t inode_lock;
+extern spinlock_t inode_lock;
 
 extern struct proc_dir_entry *proc_lustre_fs_root;
 
@@ -672,11 +697,12 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash,
                       struct lustre_md *lic);
 int ll_md_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
                        void *data, int flag);
+#ifndef HAVE_IOP_ATOMIC_OPEN
 struct lookup_intent *ll_convert_intent(struct open_intent *oit,
                                         int lookup_flags);
-int ll_lookup_it_finish(struct ptlrpc_request *request,
-                        struct lookup_intent *it, void *data);
+#endif
 struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de);
+int ll_rmdir_entry(struct inode *dir, char *name, int namelen);
 
 /* llite/rw.c */
 int ll_prepare_write(struct file *, struct page *, unsigned from, unsigned to);
@@ -704,10 +730,14 @@ extern int ll_inode_revalidate_it(struct dentry *, struct lookup_intent *,
 extern int ll_have_md_lock(struct inode *inode, __u64 *bits,
                            ldlm_mode_t l_req_mode);
 extern ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
-                                   struct lustre_handle *lockh);
+                                   struct lustre_handle *lockh, __u64 flags);
 int __ll_inode_revalidate_it(struct dentry *, struct lookup_intent *,
                              __u64 bits);
+#ifdef HAVE_IOP_ATOMIC_OPEN
+int ll_revalidate_nd(struct dentry *dentry, unsigned int flags);
+#else
 int ll_revalidate_nd(struct dentry *dentry, struct nameidata *nd);
+#endif
 int ll_file_open(struct inode *inode, struct file *file);
 int ll_file_release(struct inode *inode, struct file *file);
 int ll_glimpse_ioctl(struct ll_sb_info *sbi,
@@ -757,7 +787,7 @@ int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
                              struct ptlrpc_request **request);
 int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
                      int set_default);
-int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmm,
+int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
                      int *lmm_size, struct ptlrpc_request **request);
 #ifdef HAVE_FILE_FSYNC_4ARGS
 int ll_fsync(struct file *file, loff_t start, loff_t end, int data);
@@ -768,10 +798,11 @@ int ll_fsync(struct file *file, struct dentry *dentry, int data);
 #endif
 int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
               int num_bytes);
-int ll_merge_lvb(struct inode *inode);
+int ll_merge_lvb(const struct lu_env *env, struct inode *inode);
 int ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg);
 int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg);
 int ll_fid2path(struct inode *inode, void *arg);
+int ll_data_version(struct inode *inode, __u64 *data_version, int extent_lock);
 
 /* llite/dcache.c */
 
@@ -800,6 +831,7 @@ void ll_lli_init(struct ll_inode_info *lli);
 int ll_fill_super(struct super_block *sb, struct vfsmount *mnt);
 void ll_put_super(struct super_block *sb);
 void ll_kill_super(struct super_block *sb);
+struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock);
 struct inode *ll_inode_from_lock(struct ldlm_lock *lock);
 void ll_clear_inode(struct inode *inode);
 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr);
@@ -819,10 +851,14 @@ void ll_umount_begin(struct vfsmount *vfsmnt, int flags);
 void ll_umount_begin(struct super_block *sb);
 #endif
 int ll_remount_fs(struct super_block *sb, int *flags, char *data);
+#ifdef HAVE_SUPEROPS_USE_DENTRY
+int ll_show_options(struct seq_file *seq, struct dentry *dentry);
+#else
 int ll_show_options(struct seq_file *seq, struct vfsmount *vfs);
+#endif
 void ll_dirty_page_discard_warn(cfs_page_t *page, int ioret);
 int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
-                  struct super_block *);
+                 struct super_block *, struct lookup_intent *);
 void lustre_dump_dentry(struct dentry *, int recur);
 void lustre_dump_inode(struct inode *);
 int ll_obd_statfs(struct inode *inode, void *arg);
@@ -839,6 +875,8 @@ char *ll_get_fsname(struct super_block *sb, char *buf, int buflen);
 /* llite/llite_nfs.c */
 extern struct export_operations lustre_export_operations;
 __u32 get_uuid2int(const char *name, int len);
+struct inode *search_inode_for_lustre(struct super_block *sb,
+                                     const struct lu_fid *fid);
 
 /* llite/special.c */
 extern struct inode_operations ll_special_inode_operations;
@@ -854,11 +892,11 @@ extern struct inode_operations ll_fast_symlink_inode_operations;
 
 /* llite/llite_close.c */
 struct ll_close_queue {
-        cfs_spinlock_t          lcq_lock;
-        cfs_list_t              lcq_head;
-        cfs_waitq_t             lcq_waitq;
-        cfs_completion_t        lcq_comp;
-        cfs_atomic_t            lcq_stop;
+       spinlock_t              lcq_lock;
+       cfs_list_t              lcq_head;
+       cfs_waitq_t             lcq_waitq;
+       struct completion       lcq_comp;
+       cfs_atomic_t            lcq_stop;
 };
 
 struct ccc_object *cl_inode2ccc(struct inode *inode);
@@ -1105,9 +1143,10 @@ static inline struct obd_export *ll_i2mdexp(struct inode *inode)
 static inline struct lu_fid *ll_inode2fid(struct inode *inode)
 {
         struct lu_fid *fid;
+
         LASSERT(inode != NULL);
         fid = &ll_i2info(inode)->lli_fid;
-        LASSERT(fid_is_igif(fid) || fid_ver(fid) == 0);
+
         return fid;
 }
 
@@ -1243,8 +1282,8 @@ struct ll_statahead_info {
         cfs_list_t              sai_entries_stated;   /* entries stated */
         cfs_list_t              sai_entries_agl; /* AGL entries to be sent */
         cfs_list_t              sai_cache[LL_SA_CACHE_SIZE];
-        cfs_spinlock_t          sai_cache_lock[LL_SA_CACHE_SIZE];
-        cfs_atomic_t            sai_cache_count;      /* entry count in cache */
+       spinlock_t              sai_cache_lock[LL_SA_CACHE_SIZE];
+       cfs_atomic_t            sai_cache_count; /* entry count in cache */
 };
 
 int do_statahead_enter(struct inode *dir, struct dentry **dentry,
@@ -1253,14 +1292,14 @@ void ll_stop_statahead(struct inode *dir, void *key);
 
 static inline int ll_glimpse_size(struct inode *inode)
 {
-        struct ll_inode_info *lli = ll_i2info(inode);
-        int rc;
-
-        cfs_down_read(&lli->lli_glimpse_sem);
-        rc = cl_glimpse_size(inode);
-        lli->lli_glimpse_time = cfs_time_current();
-        cfs_up_read(&lli->lli_glimpse_sem);
-        return rc;
+       struct ll_inode_info *lli = ll_i2info(inode);
+       int rc;
+
+       down_read(&lli->lli_glimpse_sem);
+       rc = cl_glimpse_size(inode);
+       lli->lli_glimpse_time = cfs_time_current();
+       up_read(&lli->lli_glimpse_sem);
+       return rc;
 }
 
 static inline void
@@ -1279,40 +1318,56 @@ ll_statahead_mark(struct inode *dir, struct dentry *dentry)
 }
 
 static inline int
-ll_statahead_enter(struct inode *dir, struct dentry **dentryp, int only_unplug)
+ll_need_statahead(struct inode *dir, struct dentry *dentryp)
 {
-        struct ll_inode_info  *lli;
-        struct ll_dentry_data *ldd;
-
-        if (ll_i2sbi(dir)->ll_sa_max == 0)
-                return -EAGAIN;
+       struct ll_inode_info  *lli;
+       struct ll_dentry_data *ldd;
+
+       if (ll_i2sbi(dir)->ll_sa_max == 0)
+               return -EAGAIN;
+
+       lli = ll_i2info(dir);
+       /* not the same process, don't statahead */
+       if (lli->lli_opendir_pid != cfs_curproc_pid())
+               return -EAGAIN;
+
+       /* statahead has been stopped */
+       if (lli->lli_opendir_key == NULL)
+               return -EAGAIN;
+
+       ldd = ll_d2d(dentryp);
+       /*
+        * When stats a dentry, the system trigger more than once "revalidate"
+        * or "lookup", for "getattr", for "getxattr", and maybe for others.
+        * Under patchless client mode, the operation intent is not accurate,
+        * which maybe misguide the statahead thread. For example:
+        * The "revalidate" call for "getattr" and "getxattr" of a dentry maybe
+        * have the same operation intent -- "IT_GETATTR".
+        * In fact, one dentry should has only one chance to interact with the
+        * statahead thread, otherwise the statahead windows will be confused.
+        * The solution is as following:
+        * Assign "lld_sa_generation" with "sai_generation" when a dentry
+        * "IT_GETATTR" for the first time, and the subsequent "IT_GETATTR"
+        * will bypass interacting with statahead thread for checking:
+        * "lld_sa_generation == lli_sai->sai_generation"
+        */
+       if (ldd && lli->lli_sai &&
+           ldd->lld_sa_generation == lli->lli_sai->sai_generation)
+               return -EAGAIN;
+
+       return 1;
+}
 
-        lli = ll_i2info(dir);
-        /* not the same process, don't statahead */
-        if (lli->lli_opendir_pid != cfs_curproc_pid())
-                return -EAGAIN;
+static inline int
+ll_statahead_enter(struct inode *dir, struct dentry **dentryp, int only_unplug)
+{
+       int ret;
 
-        ldd = ll_d2d(*dentryp);
-        /*
-         * When stats a dentry, the system trigger more than once "revalidate"
-         * or "lookup", for "getattr", for "getxattr", and maybe for others.
-         * Under patchless client mode, the operation intent is not accurate,
-         * which maybe misguide the statahead thread. For example:
-         * The "revalidate" call for "getattr" and "getxattr" of a dentry maybe
-         * have the same operation intent -- "IT_GETATTR".
-         * In fact, one dentry should has only one chance to interact with the
-         * statahead thread, otherwise the statahead windows will be confused.
-         * The solution is as following:
-         * Assign "lld_sa_generation" with "sai_generation" when a dentry
-         * "IT_GETATTR" for the first time, and the subsequent "IT_GETATTR"
-         * will bypass interacting with statahead thread for checking:
-         * "lld_sa_generation == lli_sai->sai_generation"
-         */
-        if (ldd && lli->lli_sai &&
-            ldd->lld_sa_generation == lli->lli_sai->sai_generation)
-                return -EAGAIN;
+       ret = ll_need_statahead(dir, *dentryp);
+       if (ret <= 0)
+               return ret;
 
-        return do_statahead_enter(dir, dentryp, only_unplug);
+       return do_statahead_enter(dir, dentryp, only_unplug);
 }
 
 /* llite ioctl register support rountine */
@@ -1400,9 +1455,9 @@ static inline void cl_isize_write(struct inode *inode, loff_t kms)
 
 #define cl_isize_read(inode)             i_size_read(inode)
 
-static inline int cl_merge_lvb(struct inode *inode)
+static inline int cl_merge_lvb(const struct lu_env *env, struct inode *inode)
 {
-        return ll_merge_lvb(inode);
+       return ll_merge_lvb(env, inode);
 }
 
 #define cl_inode_atime(inode) LTIME_S((inode)->i_atime)
@@ -1457,16 +1512,38 @@ static inline int ll_file_nolock(const struct file *file)
 static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
                                     struct lookup_intent *it, __u64 *bits)
 {
-        if (!it->d.lustre.it_lock_set) {
-                CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
-                       inode, inode->i_ino, inode->i_generation);
-                md_set_lock_data(exp, &it->d.lustre.it_lock_handle,
-                                 inode, &it->d.lustre.it_lock_bits);
-                it->d.lustre.it_lock_set = 1;
-        }
-
-        if (bits != NULL)
-                *bits = it->d.lustre.it_lock_bits;
+       if (!it->d.lustre.it_lock_set) {
+               struct lustre_handle handle;
+
+               /* If this inode is a remote object, it will get two
+                * separate locks in different namespaces, Master MDT,
+                * where the name entry is, will grant LOOKUP lock,
+                * remote MDT, where the object is, will grant
+                * UPDATE|PERM lock. The inode will be attched to both
+                * LOOKUP and PERM locks, so revoking either locks will
+                * case the dcache being cleared */
+               if (it->d.lustre.it_remote_lock_mode) {
+                       handle.cookie = it->d.lustre.it_remote_lock_handle;
+                       CDEBUG(D_DLMTRACE, "setting l_data to inode %p"
+                              "(%lu/%u) for remote lock "LPX64"\n", inode,
+                              inode->i_ino, inode->i_generation,
+                              handle.cookie);
+                       md_set_lock_data(exp, &handle.cookie, inode, NULL);
+               }
+               
+               handle.cookie = it->d.lustre.it_lock_handle;
+
+               CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)"
+                      " for lock "LPX64"\n", inode, inode->i_ino,
+                      inode->i_generation, handle.cookie);
+
+               md_set_lock_data(exp, &handle.cookie, inode,
+                                &it->d.lustre.it_lock_bits);
+               it->d.lustre.it_lock_set = 1;
+       }
+
+       if (bits != NULL)
+               *bits = it->d.lustre.it_lock_bits;
 }
 
 static inline void ll_lock_dcache(struct inode *inode)
@@ -1546,6 +1623,11 @@ struct if_quotactl_18 {
 #warning "remove old LL_IOC_QUOTACTL_18 compatibility code"
 #endif /* LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0) */
 
+enum {
+       LL_LAYOUT_GEN_NONE  = ((__u32)-2),      /* layout lock was cancelled */
+       LL_LAYOUT_GEN_EMPTY = ((__u32)-1)       /* for empty layout */
+};
+
 int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf);
 int ll_layout_refresh(struct inode *inode, __u32 *gen);