4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #ifndef _LUSTRE_COMPAT_H
34 #define _LUSTRE_COMPAT_H
36 #include <linux/aio.h>
38 #include <linux/fs_struct.h>
39 #include <linux/namei.h>
40 #include <linux/pagemap.h>
41 #include <linux/bio.h>
42 #include <linux/xattr.h>
43 #include <linux/workqueue.h>
44 #include <linux/blkdev.h>
45 #include <linux/slab.h>
47 #include <libcfs/linux/linux-fs.h>
48 #include <obd_support.h>
50 #define current_ngroups current_cred()->group_info->ngroups
51 #define current_groups current_cred()->group_info->small_block
53 #ifdef HAVE_4ARGS_VFS_SYMLINK
54 #define ll_vfs_symlink(dir, dentry, mnt, path, mode) \
55 vfs_symlink(dir, dentry, path, mode)
57 #define ll_vfs_symlink(dir, dentry, mnt, path, mode) \
58 vfs_symlink(dir, dentry, path)
61 #if !defined(HAVE_FILE_LLSEEK_SIZE) || defined(HAVE_FILE_LLSEEK_SIZE_5ARGS)
62 #define ll_generic_file_llseek_size(file, offset, origin, maxbytes, eof) \
63 generic_file_llseek_size(file, offset, origin, maxbytes, eof);
65 #define ll_generic_file_llseek_size(file, offset, origin, maxbytes, eof) \
66 generic_file_llseek_size(file, offset, origin, maxbytes);
69 #ifdef HAVE_INODE_DIO_WAIT
70 /* inode_dio_wait(i) use as-is for write lock */
71 # define inode_dio_write_done(i) do {} while (0) /* for write unlock */
73 # define inode_dio_wait(i) down_write(&(i)->i_alloc_sem)
74 # define inode_dio_write_done(i) up_write(&(i)->i_alloc_sem)
77 #ifndef HAVE_INIT_LIST_HEAD_RCU
78 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
80 WRITE_ONCE(list->next, list);
81 WRITE_ONCE(list->prev, list);
86 #define bio_idx(bio) (bio->bi_iter.bi_idx)
87 #define bio_set_sector(bio, sector) (bio->bi_iter.bi_sector = sector)
88 #define bvl_to_page(bvl) (bvl->bv_page)
90 #define bio_idx(bio) (bio->bi_idx)
91 #define bio_set_sector(bio, sector) (bio->bi_sector = sector)
92 #define bio_sectors(bio) ((bio)->bi_size >> 9)
93 #ifndef HAVE_BIO_END_SECTOR
94 #define bio_end_sector(bio) (bio->bi_sector + bio_sectors(bio))
96 #define bvl_to_page(bvl) (bvl->bv_page)
100 #define bio_start_sector(bio) (bio->bi_iter.bi_sector)
102 #define bio_start_sector(bio) (bio->bi_sector)
105 #ifdef HAVE_KMAP_ATOMIC_HAS_1ARG
106 #define ll_kmap_atomic(a, b) kmap_atomic(a)
107 #define ll_kunmap_atomic(a, b) kunmap_atomic(a)
109 #define ll_kmap_atomic(a, b) kmap_atomic(a, b)
110 #define ll_kunmap_atomic(a, b) kunmap_atomic(a, b)
113 #ifndef HAVE_CLEAR_INODE
114 #define clear_inode(i) end_writeback(i)
117 #ifndef HAVE_DENTRY_D_CHILD
118 #define d_child d_u.d_child
121 #ifdef HAVE_DENTRY_D_U_D_ALIAS
122 #define d_alias d_u.d_alias
125 #ifndef DATA_FOR_LLITE_IS_LIST
126 #define ll_d_hlist_node hlist_node
127 #define ll_d_hlist_empty(list) hlist_empty(list)
128 #define ll_d_hlist_entry(ptr, type, name) hlist_entry(ptr.first, type, name)
129 #define ll_d_hlist_for_each(tmp, i_dentry) hlist_for_each(tmp, i_dentry)
130 # ifdef HAVE_HLIST_FOR_EACH_3ARG
131 # define ll_d_hlist_for_each_entry(dentry, p, i_dentry) \
132 p = NULL; hlist_for_each_entry(dentry, i_dentry, d_alias)
134 # define ll_d_hlist_for_each_entry(dentry, p, i_dentry) \
135 hlist_for_each_entry(dentry, p, i_dentry, d_alias)
137 #define DECLARE_LL_D_HLIST_NODE_PTR(name) struct ll_d_hlist_node *name
139 #define ll_d_hlist_node list_head
140 #define ll_d_hlist_empty(list) list_empty(list)
141 #define ll_d_hlist_entry(ptr, type, name) list_entry(ptr.next, type, name)
142 #define ll_d_hlist_for_each(tmp, i_dentry) list_for_each(tmp, i_dentry)
143 #define ll_d_hlist_for_each_entry(dentry, p, i_dentry) \
144 list_for_each_entry(dentry, i_dentry, d_alias)
145 #define DECLARE_LL_D_HLIST_NODE_PTR(name) /* nothing */
146 #endif /* !DATA_FOR_LLITE_IS_LIST */
148 #ifndef HAVE_D_IN_LOOKUP
149 static inline int d_in_lookup(struct dentry *dentry)
159 # define NO_QUOTA (-EDQUOT)
163 #define SEEK_DATA 3 /* seek to the next data */
166 #define SEEK_HOLE 4 /* seek to the next hole */
169 #ifndef FMODE_UNSIGNED_OFFSET
170 #define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000)
173 #if !defined(_ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_) && !defined(ext2_set_bit)
174 # define ext2_set_bit __test_and_set_bit_le
175 # define ext2_clear_bit __test_and_clear_bit_le
176 # define ext2_test_bit test_bit_le
177 # define ext2_find_first_zero_bit find_first_zero_bit_le
178 # define ext2_find_next_zero_bit find_next_zero_bit_le
181 #ifdef ATTR_TIMES_SET
182 # define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
184 # define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET)
187 #ifndef XATTR_NAME_POSIX_ACL_ACCESS
188 # define XATTR_NAME_POSIX_ACL_ACCESS POSIX_ACL_XATTR_ACCESS
191 #ifndef XATTR_NAME_POSIX_ACL_DEFAULT
192 # define XATTR_NAME_POSIX_ACL_DEFAULT POSIX_ACL_XATTR_DEFAULT
195 #ifndef HAVE_LM_XXX_LOCK_MANAGER_OPS
196 # define lm_compare_owner fl_compare_owner
200 * After 3.1, kernel's nameidata.intent.open.flags is different
201 * with lustre's lookup_intent.it_flags, as lustre's it_flags'
202 * lower bits equal to FMODE_xxx while kernel doesn't transliterate
203 * lower bits of nameidata.intent.open.flags to FMODE_xxx.
205 #include <linux/version.h>
206 static inline int ll_namei_to_lookup_intent_flag(int flag)
208 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
209 flag = (flag & ~O_ACCMODE) | OPEN_FMODE(flag);
214 #include <linux/fs.h>
215 #ifndef HAVE_PROTECT_I_NLINK
216 static inline void set_nlink(struct inode *inode, unsigned int nlink)
218 inode->i_nlink = nlink;
222 #ifdef HAVE_INODEOPS_USE_UMODE_T
223 # define ll_umode_t umode_t
225 # define ll_umode_t int
228 #ifndef HAVE_VM_FAULT_T
229 #define vm_fault_t int
232 #include <linux/dcache.h>
233 #ifndef HAVE_D_MAKE_ROOT
234 static inline struct dentry *d_make_root(struct inode *root)
236 struct dentry *res = d_alloc_root(root);
238 if (res == NULL && root)
245 #ifdef HAVE_DIRTY_INODE_HAS_FLAG
246 # define ll_dirty_inode(inode, flag) (inode)->i_sb->s_op->dirty_inode((inode), flag)
248 # define ll_dirty_inode(inode, flag) (inode)->i_sb->s_op->dirty_inode((inode))
251 #ifdef HAVE_FILE_F_INODE
252 # define set_file_inode(file, inode) (file)->f_inode = inode
254 # define set_file_inode(file, inode)
257 #ifndef HAVE_FILE_INODE
258 static inline struct inode *file_inode(const struct file *file)
260 return file->f_path.dentry->d_inode;
264 #ifdef HAVE_OLDSIZE_TRUNCATE_PAGECACHE
265 #define ll_truncate_pagecache(inode, size) truncate_pagecache(inode, 0, size)
267 #define ll_truncate_pagecache(inode, size) truncate_pagecache(inode, size)
270 #ifdef HAVE_VFS_RENAME_5ARGS
271 #define ll_vfs_rename(a, b, c, d) vfs_rename(a, b, c, d, NULL)
272 #elif defined HAVE_VFS_RENAME_6ARGS
273 #define ll_vfs_rename(a, b, c, d) vfs_rename(a, b, c, d, NULL, 0)
275 #define ll_vfs_rename(a, b, c, d) vfs_rename(a, b, c, d)
278 #ifdef HAVE_VFS_UNLINK_3ARGS
279 #define ll_vfs_unlink(a, b) vfs_unlink(a, b, NULL)
281 #define ll_vfs_unlink(a, b) vfs_unlink(a, b)
284 #ifndef HAVE_INODE_OWNER_OR_CAPABLE
285 #define inode_owner_or_capable(inode) is_owner_or_cap(inode)
288 static inline int ll_vfs_getattr(struct path *path, struct kstat *st)
292 #ifdef HAVE_INODEOPS_ENHANCED_GETATTR
293 rc = vfs_getattr(path, st, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT);
294 #elif defined HAVE_VFS_GETATTR_2ARGS
295 rc = vfs_getattr(path, st);
297 rc = vfs_getattr(path->mnt, path->dentry, st);
302 #ifndef HAVE_D_IS_POSITIVE
303 static inline bool d_is_positive(const struct dentry *dentry)
305 return dentry->d_inode != NULL;
309 #ifdef HAVE_VFS_CREATE_USE_NAMEIDATA
310 # define LL_VFS_CREATE_FALSE NULL
312 # define LL_VFS_CREATE_FALSE false
315 #ifndef HAVE_INODE_LOCK
316 # define inode_lock(inode) mutex_lock(&(inode)->i_mutex)
317 # define inode_unlock(inode) mutex_unlock(&(inode)->i_mutex)
318 # define inode_trylock(inode) mutex_trylock(&(inode)->i_mutex)
321 #ifndef HAVE_RADIX_EXCEPTION_ENTRY
322 static inline int radix_tree_exceptional_entry(void *arg)
328 #ifndef HAVE_TRUNCATE_INODE_PAGES_FINAL
329 static inline void truncate_inode_pages_final(struct address_space *map)
331 truncate_inode_pages(map, 0);
335 #ifndef HAVE_PTR_ERR_OR_ZERO
336 static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
346 #define SIZE_MAX (~(size_t)0)
349 #ifdef HAVE_SECURITY_IINITSEC_CALLBACK
350 # define ll_security_inode_init_security(inode, dir, name, value, len, \
351 initxattrs, dentry) \
352 security_inode_init_security(inode, dir, &((dentry)->d_name), \
354 #elif defined HAVE_SECURITY_IINITSEC_QSTR
355 # define ll_security_inode_init_security(inode, dir, name, value, len, \
356 initxattrs, dentry) \
357 security_inode_init_security(inode, dir, &((dentry)->d_name), \
359 #else /* !HAVE_SECURITY_IINITSEC_CALLBACK && !HAVE_SECURITY_IINITSEC_QSTR */
360 # define ll_security_inode_init_security(inode, dir, name, value, len, \
361 initxattrs, dentry) \
362 security_inode_init_security(inode, dir, name, value, len)
365 #ifndef bio_for_each_segment_all /* since kernel version 3.9 */
366 #ifdef HAVE_BVEC_ITER
367 #define bio_for_each_segment_all(bv, bio, it) \
368 for (it = 0, bv = (bio)->bi_io_vec; it < (bio)->bi_vcnt; it++, bv++)
370 #define bio_for_each_segment_all(bv, bio, it) bio_for_each_segment(bv, bio, it)
374 #ifdef HAVE_PID_NS_FOR_CHILDREN
375 # define ll_task_pid_ns(task) ((task)->nsproxy->pid_ns_for_children)
377 # define ll_task_pid_ns(task) ((task)->nsproxy->pid_ns)
380 #ifdef HAVE_FULL_NAME_HASH_3ARGS
381 # define ll_full_name_hash(salt, name, len) full_name_hash(salt, name, len)
383 # define ll_full_name_hash(salt, name, len) full_name_hash(name, len)
386 #ifdef HAVE_STRUCT_POSIX_ACL_XATTR
387 # define posix_acl_xattr_header struct posix_acl_xattr_header
388 # define posix_acl_xattr_entry struct posix_acl_xattr_entry
389 # define GET_POSIX_ACL_XATTR_ENTRY(head) ((void *)((head) + 1))
391 # define GET_POSIX_ACL_XATTR_ENTRY(head) ((head)->a_entries)
394 #ifdef HAVE_IOP_XATTR
395 #define ll_setxattr generic_setxattr
396 #define ll_getxattr generic_getxattr
397 #define ll_removexattr generic_removexattr
398 #endif /* HAVE_IOP_XATTR */
400 #ifndef HAVE_VFS_SETXATTR
401 const struct xattr_handler *get_xattr_type(const char *name);
404 __vfs_setxattr(struct dentry *dentry, struct inode *inode, const char *name,
405 const void *value, size_t size, int flags)
407 const struct xattr_handler *handler;
410 handler = get_xattr_type(name);
414 # if defined(HAVE_XATTR_HANDLER_INODE_PARAM)
415 rc = handler->set(handler, dentry, inode, name, value, size, flags);
416 # elif defined(HAVE_XATTR_HANDLER_SIMPLIFIED)
417 rc = handler->set(handler, dentry, name, value, size, flags);
419 rc = handler->set(dentry, name, value, size, flags, handler->flags);
420 # endif /* !HAVE_XATTR_HANDLER_INODE_PARAM */
423 #endif /* HAVE_VFS_SETXATTR */
425 #ifdef HAVE_IOP_SET_ACL
426 #ifdef CONFIG_FS_POSIX_ACL
427 #ifndef HAVE_POSIX_ACL_UPDATE_MODE
428 static inline int posix_acl_update_mode(struct inode *inode, umode_t *mode_p,
429 struct posix_acl **acl)
431 umode_t mode = inode->i_mode;
434 error = posix_acl_equiv_mode(*acl, &mode);
439 if (!in_group_p(inode->i_gid) &&
440 !capable_wrt_inode_uidgid(inode, CAP_FSETID))
445 #endif /* HAVE_POSIX_ACL_UPDATE_MODE */
449 #ifndef HAVE_IOV_ITER_TRUNCATE
450 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
452 if (i->count > count)
458 static inline bool is_sxid(umode_t mode)
460 return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
465 #define IS_NOSEC(inode) (!is_sxid(inode->i_mode))
469 * mount MS_* flags split from superblock SB_* flags
470 * if the SB_* flags are not available use the MS_* flags
472 #if !defined(SB_RDONLY) && defined(MS_RDONLY)
473 # define SB_RDONLY MS_RDONLY
475 #if !defined(SB_ACTIVE) && defined(MS_ACTIVE)
476 # define SB_ACTIVE MS_ACTIVE
478 #if !defined(SB_NOSEC) && defined(MS_NOSEC)
479 # define SB_NOSEC MS_NOSEC
481 #if !defined(SB_POSIXACL) && defined(MS_POSIXACL)
482 # define SB_POSIXACL MS_POSIXACL
484 #if !defined(SB_NODIRATIME) && defined(MS_NODIRATIME)
485 # define SB_NODIRATIME MS_NODIRATIME
488 #ifndef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
489 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
494 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
496 return (struct iovec) {
497 .iov_base = iter->iov->iov_base + iter->iov_offset,
498 .iov_len = min(iter->count,
499 iter->iov->iov_len - iter->iov_offset),
503 #define iov_for_each(iov, iter, start) \
504 for (iter = (start); \
505 (iter).count && ((iov = iov_iter_iovec(&(iter))), 1); \
506 iov_iter_advance(&(iter), (iov).iov_len))
508 static inline ssize_t
509 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
515 iov_for_each(iov, i, *iter) {
518 res = generic_file_aio_read(iocb, &iov, 1, iocb->ki_pos);
526 if (res < iov.iov_len)
531 iov_iter_advance(iter, bytes);
535 static inline ssize_t
536 __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
542 /* Since LLITE updates file size at the end of I/O in
543 * vvp_io_commit_write(), append write has to be done in atomic when
544 * there are multiple segments because otherwise each iteration to
545 * __generic_file_aio_write() will see original file size */
546 if (unlikely(iocb->ki_filp->f_flags & O_APPEND && iter->nr_segs > 1)) {
547 struct iovec *iov_copy;
550 OBD_ALLOC(iov_copy, sizeof(*iov_copy) * iter->nr_segs);
554 iov_for_each(iov, i, *iter)
555 iov_copy[count++] = iov;
557 bytes = __generic_file_aio_write(iocb, iov_copy, count,
559 OBD_FREE(iov_copy, sizeof(*iov_copy) * iter->nr_segs);
562 iov_iter_advance(iter, bytes);
566 iov_for_each(iov, i, *iter) {
569 res = __generic_file_aio_write(iocb, &iov, 1, &iocb->ki_pos);
577 if (res < iov.iov_len)
582 iov_iter_advance(iter, bytes);
585 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
587 static inline void __user *get_vmf_address(struct vm_fault *vmf)
589 #ifdef HAVE_VM_FAULT_ADDRESS
590 return (void __user *)vmf->address;
592 return vmf->virtual_address;
596 #ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
597 # define ll_filemap_fault(vma, vmf) filemap_fault(vmf)
599 # define ll_filemap_fault(vma, vmf) filemap_fault(vma, vmf)
602 #ifndef HAVE_CURRENT_TIME
603 static inline struct timespec current_time(struct inode *inode)
611 * time_after32 - compare two 32-bit relative times
612 * @a: the time which may be after @b
613 * @b: the time which may be before @a
615 * time_after32(a, b) returns true if the time @a is after time @b.
616 * time_before32(b, a) returns true if the time @b is before time @a.
618 * Similar to time_after(), compare two 32-bit timestamps for relative
619 * times. This is useful for comparing 32-bit seconds values that can't
620 * be converted to 64-bit values (e.g. due to disk format or wire protocol
621 * issues) when it is known that the times are less than 68 years apart.
623 #define time_after32(a, b) ((s32)((u32)(b) - (u32)(a)) < 0)
624 #define time_before32(b, a) time_after32(a, b)
632 #ifndef alloc_workqueue
633 #define alloc_workqueue(name, flags, max_active) create_workqueue(name)
637 #define smp_store_mb(var, value) set_mb(var, value)
641 #define READ_ONCE ACCESS_ONCE
644 #if IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)
645 static inline unsigned short blk_integrity_interval(struct blk_integrity *bi)
647 #ifdef HAVE_INTERVAL_EXP_BLK_INTEGRITY
648 return bi->interval_exp ? 1 << bi->interval_exp : 0;
649 #elif defined(HAVE_INTERVAL_BLK_INTEGRITY)
652 return bi->sector_size;
653 #endif /* !HAVE_INTERVAL_EXP_BLK_INTEGRITY */
656 static inline const char *blk_integrity_name(struct blk_integrity *bi)
658 #ifdef HAVE_INTERVAL_EXP_BLK_INTEGRITY
659 return bi->profile->name;
665 static inline unsigned int bip_size(struct bio_integrity_payload *bip)
667 #ifdef HAVE_BIP_ITER_BIO_INTEGRITY_PAYLOAD
668 return bip->bip_iter.bi_size;
670 return bip->bip_size;
673 #else /* !CONFIG_BLK_DEV_INTEGRITY */
674 static inline unsigned short blk_integrity_interval(struct blk_integrity *bi)
678 static inline const char *blk_integrity_name(struct blk_integrity *bi)
680 /* gcc8 dislikes when strcmp() is called against NULL */
683 #endif /* !CONFIG_BLK_DEV_INTEGRITY */
685 #ifndef INTEGRITY_FLAG_READ
686 #define INTEGRITY_FLAG_READ BLK_INTEGRITY_VERIFY
689 #ifndef INTEGRITY_FLAG_WRITE
690 #define INTEGRITY_FLAG_WRITE BLK_INTEGRITY_GENERATE
693 static inline bool bdev_integrity_enabled(struct block_device *bdev, int rw)
695 #if IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)
696 struct blk_integrity *bi = bdev_get_integrity(bdev);
701 #ifdef HAVE_INTERVAL_EXP_BLK_INTEGRITY
702 if (rw == 0 && bi->profile->verify_fn != NULL &&
703 (bi->flags & INTEGRITY_FLAG_READ))
706 if (rw == 1 && bi->profile->generate_fn != NULL &&
707 (bi->flags & INTEGRITY_FLAG_WRITE))
710 if (rw == 0 && bi->verify_fn != NULL &&
711 (bi->flags & INTEGRITY_FLAG_READ))
714 if (rw == 1 && bi->generate_fn != NULL &&
715 (bi->flags & INTEGRITY_FLAG_WRITE))
717 #endif /* !HAVE_INTERVAL_EXP_BLK_INTEGRITY */
718 #endif /* !CONFIG_BLK_DEV_INTEGRITY */
723 #ifdef HAVE_PAGEVEC_INIT_ONE_PARAM
724 #define ll_pagevec_init(pvec, n) pagevec_init(pvec)
726 #define ll_pagevec_init(pvec, n) pagevec_init(pvec, n)
730 # define ll_d_count(d) d_count(d)
732 # define ll_d_count(d) ((d)->d_count)
733 #endif /* HAVE_D_COUNT */
735 #ifndef HAVE_IN_COMPAT_SYSCALL
736 #define in_compat_syscall is_compat_task
740 #define page_tree i_pages
742 #define i_pages tree_lock
743 #define xa_lock_irq(lockp) spin_lock_irq(lockp)
744 #define xa_unlock_irq(lockp) spin_unlock_irq(lockp)
747 #ifndef KMEM_CACHE_USERCOPY
748 #define kmem_cache_create_usercopy(name, size, align, flags, useroffset, \
750 kmem_cache_create(name, size, align, flags, ctor)
753 #ifndef HAVE_LINUX_SELINUX_IS_ENABLED
754 #define selinux_is_enabled() 1
757 #endif /* _LUSTRE_COMPAT_H */