4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #ifndef _LUSTRE_COMPAT_H
34 #define _LUSTRE_COMPAT_H
36 #include <linux/aio.h>
38 #include <linux/fs_struct.h>
39 #include <linux/namei.h>
40 #include <linux/pagemap.h>
41 #include <linux/posix_acl_xattr.h>
42 #include <linux/bio.h>
43 #include <linux/xattr.h>
44 #include <linux/workqueue.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
48 #include <libcfs/linux/linux-fs.h>
49 #include <obd_support.h>
51 #ifdef HAVE_4ARGS_VFS_SYMLINK
52 #define ll_vfs_symlink(dir, dentry, mnt, path, mode) \
53 vfs_symlink(dir, dentry, path, mode)
55 #define ll_vfs_symlink(dir, dentry, mnt, path, mode) \
56 vfs_symlink(dir, dentry, path)
59 #ifndef HAVE_INIT_LIST_HEAD_RCU
60 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
62 WRITE_ONCE(list->next, list);
63 WRITE_ONCE(list->prev, list);
68 #define bio_idx(bio) (bio->bi_iter.bi_idx)
69 #define bio_set_sector(bio, sector) (bio->bi_iter.bi_sector = sector)
70 #define bvl_to_page(bvl) (bvl->bv_page)
72 #define bio_idx(bio) (bio->bi_idx)
73 #define bio_set_sector(bio, sector) (bio->bi_sector = sector)
74 #define bio_sectors(bio) ((bio)->bi_size >> 9)
75 #define bvl_to_page(bvl) (bvl->bv_page)
79 #define bio_start_sector(bio) (bio->bi_iter.bi_sector)
81 #define bio_start_sector(bio) (bio->bi_sector)
84 #ifndef HAVE_DENTRY_D_CHILD
85 #define d_child d_u.d_child
88 #ifdef HAVE_DENTRY_D_U_D_ALIAS
89 #define d_alias d_u.d_alias
92 #ifndef HAVE_D_IN_LOOKUP
93 static inline int d_in_lookup(struct dentry *dentry)
99 #ifndef HAVE_VM_FAULT_T
100 #define vm_fault_t int
103 #ifndef HAVE_FOP_ITERATE_SHARED
104 #define iterate_shared iterate
107 #ifdef HAVE_OLDSIZE_TRUNCATE_PAGECACHE
108 #define ll_truncate_pagecache(inode, size) truncate_pagecache(inode, 0, size)
110 #define ll_truncate_pagecache(inode, size) truncate_pagecache(inode, size)
113 #ifdef HAVE_VFS_RENAME_5ARGS
114 #define ll_vfs_rename(a, b, c, d) vfs_rename(a, b, c, d, NULL)
115 #elif defined HAVE_VFS_RENAME_6ARGS
116 #define ll_vfs_rename(a, b, c, d) vfs_rename(a, b, c, d, NULL, 0)
118 #define ll_vfs_rename(a, b, c, d) vfs_rename(a, b, c, d)
121 #ifdef HAVE_VFS_UNLINK_3ARGS
122 #define ll_vfs_unlink(a, b) vfs_unlink(a, b, NULL)
124 #define ll_vfs_unlink(a, b) vfs_unlink(a, b)
127 static inline int ll_vfs_getattr(struct path *path, struct kstat *st)
131 #ifdef HAVE_INODEOPS_ENHANCED_GETATTR
132 rc = vfs_getattr(path, st, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT);
134 rc = vfs_getattr(path, st);
139 #ifndef HAVE_D_IS_POSITIVE
140 static inline bool d_is_positive(const struct dentry *dentry)
142 return dentry->d_inode != NULL;
146 #ifndef HAVE_INODE_LOCK
147 # define inode_lock(inode) mutex_lock(&(inode)->i_mutex)
148 # define inode_unlock(inode) mutex_unlock(&(inode)->i_mutex)
149 # define inode_trylock(inode) mutex_trylock(&(inode)->i_mutex)
152 #ifndef HAVE_XA_IS_VALUE
153 static inline bool xa_is_value(void *entry)
155 return radix_tree_exceptional_entry(entry);
159 #ifndef HAVE_TRUNCATE_INODE_PAGES_FINAL
160 static inline void truncate_inode_pages_final(struct address_space *map)
162 truncate_inode_pages(map, 0);
166 #ifndef HAVE_PTR_ERR_OR_ZERO
167 static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
176 #ifdef HAVE_PID_NS_FOR_CHILDREN
177 # define ll_task_pid_ns(task) \
178 ((task)->nsproxy ? ((task)->nsproxy->pid_ns_for_children) : NULL)
180 # define ll_task_pid_ns(task) \
181 ((task)->nsproxy ? ((task)->nsproxy->pid_ns) : NULL)
184 #ifdef HAVE_FULL_NAME_HASH_3ARGS
185 # define ll_full_name_hash(salt, name, len) full_name_hash(salt, name, len)
187 # define ll_full_name_hash(salt, name, len) full_name_hash(name, len)
190 #ifdef HAVE_STRUCT_POSIX_ACL_XATTR
191 # define posix_acl_xattr_header struct posix_acl_xattr_header
192 # define posix_acl_xattr_entry struct posix_acl_xattr_entry
193 # define GET_POSIX_ACL_XATTR_ENTRY(head) ((void *)((head) + 1))
195 # define GET_POSIX_ACL_XATTR_ENTRY(head) ((head)->a_entries)
198 #ifdef HAVE_IOP_XATTR
199 #define ll_setxattr generic_setxattr
200 #define ll_getxattr generic_getxattr
201 #define ll_removexattr generic_removexattr
202 #endif /* HAVE_IOP_XATTR */
204 #ifndef HAVE_VFS_SETXATTR
205 const struct xattr_handler *get_xattr_type(const char *name);
208 __vfs_setxattr(struct dentry *dentry, struct inode *inode, const char *name,
209 const void *value, size_t size, int flags)
211 const struct xattr_handler *handler;
214 handler = get_xattr_type(name);
218 # if defined(HAVE_XATTR_HANDLER_INODE_PARAM)
219 rc = handler->set(handler, dentry, inode, name, value, size, flags);
220 # elif defined(HAVE_XATTR_HANDLER_SIMPLIFIED)
221 rc = handler->set(handler, dentry, name, value, size, flags);
223 rc = handler->set(dentry, name, value, size, flags, handler->flags);
224 # endif /* !HAVE_XATTR_HANDLER_INODE_PARAM */
227 #endif /* HAVE_VFS_SETXATTR */
229 #ifndef HAVE_POSIX_ACL_VALID_USER_NS
230 #define posix_acl_valid(a,b) posix_acl_valid(b)
233 #ifdef HAVE_IOP_SET_ACL
234 #ifdef CONFIG_LUSTRE_FS_POSIX_ACL
235 #ifndef HAVE_POSIX_ACL_UPDATE_MODE
236 static inline int posix_acl_update_mode(struct inode *inode, umode_t *mode_p,
237 struct posix_acl **acl)
239 umode_t mode = inode->i_mode;
242 error = posix_acl_equiv_mode(*acl, &mode);
247 if (!in_group_p(inode->i_gid) &&
248 !capable_wrt_inode_uidgid(inode, CAP_FSETID))
253 #endif /* HAVE_POSIX_ACL_UPDATE_MODE */
257 #ifndef HAVE_IOV_ITER_TRUNCATE
258 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
260 if (i->count > count)
266 * mount MS_* flags split from superblock SB_* flags
267 * if the SB_* flags are not available use the MS_* flags
269 #if !defined(SB_RDONLY) && defined(MS_RDONLY)
270 # define SB_RDONLY MS_RDONLY
272 #if !defined(SB_ACTIVE) && defined(MS_ACTIVE)
273 # define SB_ACTIVE MS_ACTIVE
275 #if !defined(SB_NOSEC) && defined(MS_NOSEC)
276 # define SB_NOSEC MS_NOSEC
278 #if !defined(SB_POSIXACL) && defined(MS_POSIXACL)
279 # define SB_POSIXACL MS_POSIXACL
281 #if !defined(SB_NODIRATIME) && defined(MS_NODIRATIME)
282 # define SB_NODIRATIME MS_NODIRATIME
285 #ifndef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
286 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
291 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
293 return (struct iovec) {
294 .iov_base = iter->iov->iov_base + iter->iov_offset,
295 .iov_len = min(iter->count,
296 iter->iov->iov_len - iter->iov_offset),
300 #define iov_for_each(iov, iter, start) \
301 for (iter = (start); \
302 (iter).count && ((iov = iov_iter_iovec(&(iter))), 1); \
303 iov_iter_advance(&(iter), (iov).iov_len))
305 static inline ssize_t
306 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
312 iov_for_each(iov, i, *iter) {
315 res = generic_file_aio_read(iocb, &iov, 1, iocb->ki_pos);
323 if (res < iov.iov_len)
328 iov_iter_advance(iter, bytes);
332 static inline ssize_t
333 __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
339 /* Since LLITE updates file size at the end of I/O in
340 * vvp_io_commit_write(), append write has to be done in atomic when
341 * there are multiple segments because otherwise each iteration to
342 * __generic_file_aio_write() will see original file size */
343 if (unlikely(iocb->ki_filp->f_flags & O_APPEND && iter->nr_segs > 1)) {
344 struct iovec *iov_copy;
347 OBD_ALLOC(iov_copy, sizeof(*iov_copy) * iter->nr_segs);
351 iov_for_each(iov, i, *iter)
352 iov_copy[count++] = iov;
354 bytes = __generic_file_aio_write(iocb, iov_copy, count,
356 OBD_FREE(iov_copy, sizeof(*iov_copy) * iter->nr_segs);
359 iov_iter_advance(iter, bytes);
363 iov_for_each(iov, i, *iter) {
366 res = __generic_file_aio_write(iocb, &iov, 1, &iocb->ki_pos);
374 if (res < iov.iov_len)
379 iov_iter_advance(iter, bytes);
382 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
384 static inline void __user *get_vmf_address(struct vm_fault *vmf)
386 #ifdef HAVE_VM_FAULT_ADDRESS
387 return (void __user *)vmf->address;
389 return vmf->virtual_address;
393 #ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
394 # define ll_filemap_fault(vma, vmf) filemap_fault(vmf)
396 # define ll_filemap_fault(vma, vmf) filemap_fault(vma, vmf)
399 #ifndef HAVE_CURRENT_TIME
400 static inline struct timespec current_time(struct inode *inode)
408 * time_after32 - compare two 32-bit relative times
409 * @a: the time which may be after @b
410 * @b: the time which may be before @a
412 * Needed for kernels earlier than v4.14-rc1~134^2
414 * time_after32(a, b) returns true if the time @a is after time @b.
415 * time_before32(b, a) returns true if the time @b is before time @a.
417 * Similar to time_after(), compare two 32-bit timestamps for relative
418 * times. This is useful for comparing 32-bit seconds values that can't
419 * be converted to 64-bit values (e.g. due to disk format or wire protocol
420 * issues) when it is known that the times are less than 68 years apart.
422 #define time_after32(a, b) ((s32)((u32)(b) - (u32)(a)) < 0)
423 #define time_before32(b, a) time_after32(a, b)
428 #define smp_store_mb(var, value) set_mb(var, value)
431 #if IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)
432 static inline unsigned short blk_integrity_interval(struct blk_integrity *bi)
434 #ifdef HAVE_INTERVAL_EXP_BLK_INTEGRITY
435 return bi->interval_exp ? 1 << bi->interval_exp : 0;
436 #elif defined(HAVE_INTERVAL_BLK_INTEGRITY)
439 return bi->sector_size;
440 #endif /* !HAVE_INTERVAL_EXP_BLK_INTEGRITY */
443 static inline const char *blk_integrity_name(struct blk_integrity *bi)
445 #ifdef HAVE_INTERVAL_EXP_BLK_INTEGRITY
446 return bi->profile->name;
452 static inline unsigned int bip_size(struct bio_integrity_payload *bip)
454 #ifdef HAVE_BIP_ITER_BIO_INTEGRITY_PAYLOAD
455 return bip->bip_iter.bi_size;
457 return bip->bip_size;
460 #else /* !CONFIG_BLK_DEV_INTEGRITY */
461 static inline unsigned short blk_integrity_interval(struct blk_integrity *bi)
465 static inline const char *blk_integrity_name(struct blk_integrity *bi)
467 /* gcc8 dislikes when strcmp() is called against NULL */
470 #endif /* !CONFIG_BLK_DEV_INTEGRITY */
472 #ifndef INTEGRITY_FLAG_READ
473 #define INTEGRITY_FLAG_READ BLK_INTEGRITY_VERIFY
476 #ifndef INTEGRITY_FLAG_WRITE
477 #define INTEGRITY_FLAG_WRITE BLK_INTEGRITY_GENERATE
480 static inline bool bdev_integrity_enabled(struct block_device *bdev, int rw)
482 #if IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)
483 struct blk_integrity *bi = bdev_get_integrity(bdev);
488 #ifdef HAVE_INTERVAL_EXP_BLK_INTEGRITY
489 if (rw == 0 && bi->profile->verify_fn != NULL &&
490 (bi->flags & INTEGRITY_FLAG_READ))
493 if (rw == 1 && bi->profile->generate_fn != NULL &&
494 (bi->flags & INTEGRITY_FLAG_WRITE))
497 if (rw == 0 && bi->verify_fn != NULL &&
498 (bi->flags & INTEGRITY_FLAG_READ))
501 if (rw == 1 && bi->generate_fn != NULL &&
502 (bi->flags & INTEGRITY_FLAG_WRITE))
504 #endif /* !HAVE_INTERVAL_EXP_BLK_INTEGRITY */
505 #endif /* !CONFIG_BLK_DEV_INTEGRITY */
510 #ifdef HAVE_PAGEVEC_INIT_ONE_PARAM
511 #define ll_pagevec_init(pvec, n) pagevec_init(pvec)
513 #define ll_pagevec_init(pvec, n) pagevec_init(pvec, n)
517 # define ll_d_count(d) d_count(d)
519 # define ll_d_count(d) ((d)->d_count)
520 #endif /* HAVE_D_COUNT */
522 #ifndef HAVE_IN_COMPAT_SYSCALL
523 #define in_compat_syscall is_compat_task
527 #define page_tree i_pages
529 #define i_pages tree_lock
532 #ifndef xa_lock_irqsave
533 #define xa_lock_irqsave(lockp, flags) spin_lock_irqsave(lockp, flags)
534 #define xa_unlock_irqrestore(lockp, flags) spin_unlock_irqrestore(lockp, flags)
537 #ifndef HAVE_LOCK_PAGE_MEMCG
538 #define lock_page_memcg(page) do {} while (0)
539 #define unlock_page_memcg(page) do {} while (0)
542 #ifndef KMEM_CACHE_USERCOPY
543 #define kmem_cache_create_usercopy(name, size, align, flags, useroffset, \
545 kmem_cache_create(name, size, align, flags, ctor)
548 #ifndef HAVE_LINUX_SELINUX_IS_ENABLED
549 #define selinux_is_enabled() 1
552 #endif /* _LUSTRE_COMPAT_H */