4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #ifndef _LUSTRE_COMPAT_H
33 #define _LUSTRE_COMPAT_H
35 #include <linux/aio.h>
37 #include <linux/namei.h>
38 #include <linux/pagemap.h>
39 #include <linux/posix_acl_xattr.h>
40 #include <linux/bio.h>
41 #include <linux/xattr.h>
42 #include <linux/workqueue.h>
43 #include <linux/blkdev.h>
44 #include <linux/slab.h>
45 #include <linux/security.h>
46 #include <libcfs/linux/linux-fs.h>
47 #include <obd_support.h>
49 #ifdef HAVE_4ARGS_VFS_SYMLINK
50 #define ll_vfs_symlink(dir, dentry, mnt, path, mode) \
51 vfs_symlink(dir, dentry, path, mode)
53 #define ll_vfs_symlink(dir, dentry, mnt, path, mode) \
54 vfs_symlink(dir, dentry, path)
58 #define bio_idx(bio) (bio->bi_iter.bi_idx)
59 #define bio_set_sector(bio, sector) (bio->bi_iter.bi_sector = sector)
60 #define bvl_to_page(bvl) (bvl->bv_page)
62 #define bio_idx(bio) (bio->bi_idx)
63 #define bio_set_sector(bio, sector) (bio->bi_sector = sector)
64 #define bio_sectors(bio) ((bio)->bi_size >> 9)
65 #define bvl_to_page(bvl) (bvl->bv_page)
69 #define bio_start_sector(bio) (bio->bi_iter.bi_sector)
71 #define bio_start_sector(bio) (bio->bi_sector)
74 #ifndef HAVE_DENTRY_D_CHILD
75 #define d_child d_u.d_child
78 #ifdef HAVE_DENTRY_D_U_D_ALIAS
79 #define d_alias d_u.d_alias
82 #ifndef HAVE_D_IN_LOOKUP
83 static inline int d_in_lookup(struct dentry *dentry)
89 #ifndef HAVE_VM_FAULT_T
90 #define vm_fault_t int
93 #ifndef HAVE_FOP_ITERATE_SHARED
94 #define iterate_shared iterate
97 #ifdef HAVE_OLDSIZE_TRUNCATE_PAGECACHE
98 #define ll_truncate_pagecache(inode, size) truncate_pagecache(inode, 0, size)
100 #define ll_truncate_pagecache(inode, size) truncate_pagecache(inode, size)
103 #ifdef HAVE_VFS_RENAME_5ARGS
104 #define ll_vfs_rename(a, b, c, d) vfs_rename(a, b, c, d, NULL)
105 #elif defined HAVE_VFS_RENAME_6ARGS
106 #define ll_vfs_rename(a, b, c, d) vfs_rename(a, b, c, d, NULL, 0)
108 #define ll_vfs_rename(a, b, c, d) vfs_rename(a, b, c, d)
111 #ifdef HAVE_USER_NAMESPACE_ARG
112 #define vfs_unlink(ns, dir, de) vfs_unlink(ns, dir, de, NULL)
113 #elif defined HAVE_VFS_UNLINK_3ARGS
114 #define vfs_unlink(ns, dir, de) vfs_unlink(dir, de, NULL)
116 #define vfs_unlink(ns, dir, de) vfs_unlink(dir, de)
119 static inline int ll_vfs_getattr(struct path *path, struct kstat *st,
120 u32 request_mask, unsigned int flags)
124 #if defined(HAVE_USER_NAMESPACE_ARG) || defined(HAVE_INODEOPS_ENHANCED_GETATTR)
125 rc = vfs_getattr(path, st, request_mask, flags);
127 rc = vfs_getattr(path, st);
132 #ifndef HAVE_D_IS_POSITIVE
133 static inline bool d_is_positive(const struct dentry *dentry)
135 return dentry->d_inode != NULL;
139 #ifndef HAVE_INODE_LOCK
140 # define inode_lock(inode) mutex_lock(&(inode)->i_mutex)
141 # define inode_unlock(inode) mutex_unlock(&(inode)->i_mutex)
142 # define inode_trylock(inode) mutex_trylock(&(inode)->i_mutex)
145 /* Old kernels lacked both Xarray support and the page cache
146 * using Xarrays. Our back ported Xarray support introduces
147 * the real xa_is_value() but we need a wrapper as well for
148 * the page cache interaction. Lets keep xa_is_value() separate
149 * in old kernels for Xarray support and page cache handling.
151 #ifndef HAVE_XARRAY_SUPPORT
152 static inline bool ll_xa_is_value(void *entry)
154 return radix_tree_exceptional_entry(entry);
157 #define ll_xa_is_value xa_is_value
160 #ifndef HAVE_TRUNCATE_INODE_PAGES_FINAL
161 static inline void truncate_inode_pages_final(struct address_space *map)
163 truncate_inode_pages(map, 0);
167 #ifndef HAVE_PTR_ERR_OR_ZERO
168 static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
177 #ifdef HAVE_PID_NS_FOR_CHILDREN
178 # define ll_task_pid_ns(task) \
179 ((task)->nsproxy ? ((task)->nsproxy->pid_ns_for_children) : NULL)
181 # define ll_task_pid_ns(task) \
182 ((task)->nsproxy ? ((task)->nsproxy->pid_ns) : NULL)
185 #ifdef HAVE_FULL_NAME_HASH_3ARGS
186 # define ll_full_name_hash(salt, name, len) full_name_hash(salt, name, len)
188 # define ll_full_name_hash(salt, name, len) full_name_hash(name, len)
191 #ifdef HAVE_STRUCT_POSIX_ACL_XATTR
192 # define posix_acl_xattr_header struct posix_acl_xattr_header
193 # define posix_acl_xattr_entry struct posix_acl_xattr_entry
194 # define GET_POSIX_ACL_XATTR_ENTRY(head) ((void *)((head) + 1))
196 # define GET_POSIX_ACL_XATTR_ENTRY(head) ((head)->a_entries)
199 #ifdef HAVE_IOP_XATTR
200 #define ll_setxattr generic_setxattr
201 #define ll_getxattr generic_getxattr
202 #define ll_removexattr generic_removexattr
203 #endif /* HAVE_IOP_XATTR */
205 #ifndef HAVE_POSIX_ACL_VALID_USER_NS
206 #define posix_acl_valid(a,b) posix_acl_valid(b)
209 #ifdef HAVE_IOP_SET_ACL
210 #ifdef CONFIG_LUSTRE_FS_POSIX_ACL
211 #if !defined(HAVE_USER_NAMESPACE_ARG) && !defined(HAVE_POSIX_ACL_UPDATE_MODE)
212 static inline int posix_acl_update_mode(struct inode *inode, umode_t *mode_p,
213 struct posix_acl **acl)
215 umode_t mode = inode->i_mode;
218 error = posix_acl_equiv_mode(*acl, &mode);
223 if (!in_group_p(inode->i_gid) &&
224 !capable_wrt_inode_uidgid(inode, CAP_FSETID))
229 #endif /* HAVE_POSIX_ACL_UPDATE_MODE */
233 #ifndef HAVE_IOV_ITER_TRUNCATE
234 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
236 if (i->count > count)
242 * mount MS_* flags split from superblock SB_* flags
243 * if the SB_* flags are not available use the MS_* flags
245 #if !defined(SB_RDONLY) && defined(MS_RDONLY)
246 # define SB_RDONLY MS_RDONLY
248 #if !defined(SB_ACTIVE) && defined(MS_ACTIVE)
249 # define SB_ACTIVE MS_ACTIVE
251 #if !defined(SB_NOSEC) && defined(MS_NOSEC)
252 # define SB_NOSEC MS_NOSEC
254 #if !defined(SB_POSIXACL) && defined(MS_POSIXACL)
255 # define SB_POSIXACL MS_POSIXACL
257 #if !defined(SB_NODIRATIME) && defined(MS_NODIRATIME)
258 # define SB_NODIRATIME MS_NODIRATIME
261 #ifndef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
262 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
267 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
269 return (struct iovec) {
270 .iov_base = iter->iov->iov_base + iter->iov_offset,
271 .iov_len = min(iter->count,
272 iter->iov->iov_len - iter->iov_offset),
276 #define iov_for_each(iov, iter, start) \
277 for (iter = (start); \
278 (iter).count && ((iov = iov_iter_iovec(&(iter))), 1); \
279 iov_iter_advance(&(iter), (iov).iov_len))
281 static inline ssize_t
282 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
288 iov_for_each(iov, i, *iter) {
291 res = generic_file_aio_read(iocb, &iov, 1, iocb->ki_pos);
299 if (res < iov.iov_len)
304 iov_iter_advance(iter, bytes);
308 static inline ssize_t
309 __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
315 /* Since LLITE updates file size at the end of I/O in
316 * vvp_io_commit_write(), append write has to be done in atomic when
317 * there are multiple segments because otherwise each iteration to
318 * __generic_file_aio_write() will see original file size */
319 if (unlikely(iocb->ki_filp->f_flags & O_APPEND && iter->nr_segs > 1)) {
320 struct iovec *iov_copy;
323 OBD_ALLOC_PTR_ARRAY(iov_copy, iter->nr_segs);
327 iov_for_each(iov, i, *iter)
328 iov_copy[count++] = iov;
330 bytes = __generic_file_aio_write(iocb, iov_copy, count,
332 OBD_FREE_PTR_ARRAY(iov_copy, iter->nr_segs);
335 iov_iter_advance(iter, bytes);
339 iov_for_each(iov, i, *iter) {
342 res = __generic_file_aio_write(iocb, &iov, 1, &iocb->ki_pos);
350 if (res < iov.iov_len)
355 iov_iter_advance(iter, bytes);
358 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
360 static inline void __user *get_vmf_address(struct vm_fault *vmf)
362 #ifdef HAVE_VM_FAULT_ADDRESS
363 return (void __user *)vmf->address;
365 return vmf->virtual_address;
369 #ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
370 # define ll_filemap_fault(vma, vmf) filemap_fault(vmf)
372 # define ll_filemap_fault(vma, vmf) filemap_fault(vma, vmf)
375 #ifndef HAVE_CURRENT_TIME
376 static inline struct timespec current_time(struct inode *inode)
384 * time_after32 - compare two 32-bit relative times
385 * @a: the time which may be after @b
386 * @b: the time which may be before @a
388 * Needed for kernels earlier than v4.14-rc1~134^2
390 * time_after32(a, b) returns true if the time @a is after time @b.
391 * time_before32(b, a) returns true if the time @b is before time @a.
393 * Similar to time_after(), compare two 32-bit timestamps for relative
394 * times. This is useful for comparing 32-bit seconds values that can't
395 * be converted to 64-bit values (e.g. due to disk format or wire protocol
396 * issues) when it is known that the times are less than 68 years apart.
398 #define time_after32(a, b) ((s32)((u32)(b) - (u32)(a)) < 0)
399 #define time_before32(b, a) time_after32(a, b)
404 #define smp_store_mb(var, value) set_mb(var, value)
407 #ifdef HAVE_PAGEVEC_INIT_ONE_PARAM
408 #define ll_pagevec_init(pvec, n) pagevec_init(pvec)
410 #define ll_pagevec_init(pvec, n) pagevec_init(pvec, n)
414 # define ll_d_count(d) d_count(d)
416 # define ll_d_count(d) ((d)->d_count)
417 #endif /* HAVE_D_COUNT */
419 #ifndef HAVE_IN_COMPAT_SYSCALL
420 #define in_compat_syscall is_compat_task
424 #define page_tree i_pages
425 #define ll_xa_lock_irqsave(lockp, flags) xa_lock_irqsave(lockp, flags)
426 #define ll_xa_unlock_irqrestore(lockp, flags) xa_unlock_irqrestore(lockp, flags)
428 #define i_pages tree_lock
429 #define ll_xa_lock_irqsave(lockp, flags) spin_lock_irqsave(lockp, flags)
430 #define ll_xa_unlock_irqrestore(lockp, flags) spin_unlock_irqrestore(lockp, flags)
433 #ifndef HAVE_LOCK_PAGE_MEMCG
434 #define lock_page_memcg(page) do {} while (0)
435 #define unlock_page_memcg(page) do {} while (0)
438 #ifndef KMEM_CACHE_USERCOPY
439 #define kmem_cache_create_usercopy(name, size, align, flags, useroffset, \
441 kmem_cache_create(name, size, align, flags, ctor)
444 #ifndef HAVE_LINUX_SELINUX_IS_ENABLED
445 #define selinux_is_enabled() 1
448 static inline int ll_vfs_getxattr(struct dentry *dentry, struct inode *inode,
450 void *value, size_t size)
452 #ifdef HAVE_USER_NAMESPACE_ARG
453 return vfs_getxattr(&init_user_ns, dentry, name, value, size);
454 #elif defined(HAVE_VFS_SETXATTR)
455 return __vfs_getxattr(dentry, inode, name, value, size);
457 if (unlikely(!inode->i_op->getxattr))
460 return inode->i_op->getxattr(dentry, name, value, size);
464 static inline int ll_vfs_setxattr(struct dentry *dentry, struct inode *inode,
466 const void *value, size_t size, int flags)
468 #ifdef HAVE_USER_NAMESPACE_ARG
469 return vfs_setxattr(&init_user_ns, dentry, name, value, size, flags);
470 #elif defined(HAVE_VFS_SETXATTR)
471 return __vfs_setxattr(dentry, inode, name, value, size, flags);
473 if (unlikely(!inode->i_op->setxattr))
476 return inode->i_op->setxattr(dentry, name, value, size, flags);
480 static inline int ll_vfs_removexattr(struct dentry *dentry, struct inode *inode,
483 #ifdef HAVE_USER_NAMESPACE_ARG
484 return vfs_removexattr(&init_user_ns, dentry, name);
485 #elif defined(HAVE_VFS_SETXATTR)
486 return __vfs_removexattr(dentry, name);
488 if (unlikely(!inode->i_op->setxattr))
491 return inode->i_op->removexattr(dentry, name);
495 #ifndef FALLOC_FL_COLLAPSE_RANGE
496 #define FALLOC_FL_COLLAPSE_RANGE 0x08 /* remove a range of a file */
499 #ifndef FALLOC_FL_ZERO_RANGE
500 #define FALLOC_FL_ZERO_RANGE 0x10 /* convert range to zeros */
503 #ifndef FALLOC_FL_INSERT_RANGE
504 #define FALLOC_FL_INSERT_RANGE 0x20 /* insert space within file */
508 #define raw_cpu_ptr(p) __this_cpu_ptr(p)
511 #ifndef HAVE_IS_ROOT_INODE
512 static inline bool is_root_inode(struct inode *inode)
514 return inode == inode->i_sb->s_root->d_inode;
518 #ifndef HAVE_REGISTER_SHRINKER_RET
519 #define register_shrinker(_s) (register_shrinker(_s), 0)
523 # if defined(__GNUC__) && __GNUC__ >= 7
524 # define fallthrough __attribute__((fallthrough)) /* fallthrough */
526 # define fallthrough do {} while (0) /* fallthrough */
530 static inline void ll_security_release_secctx(char *secdata, u32 seclen)
532 #ifdef HAVE_SEC_RELEASE_SECCTX_1ARG
533 struct lsmcontext context = { };
535 lsmcontext_init(&context, secdata, seclen, 0);
536 return security_release_secctx(&context);
538 return security_release_secctx(secdata, seclen);
542 #ifndef HAVE_USER_NAMESPACE_ARG
543 #define posix_acl_update_mode(ns, inode, mode, acl) \
544 posix_acl_update_mode(inode, mode, acl)
545 #define notify_change(ns, de, attr, inode) notify_change(de, attr, inode)
546 #define inode_owner_or_capable(ns, inode) inode_owner_or_capable(inode)
547 #define vfs_create(ns, dir, de, mode, ex) vfs_create(dir, de, mode, ex)
548 #define vfs_mkdir(ns, dir, de, mode) vfs_mkdir(dir, de, mode)
549 #define ll_set_acl(ns, inode, acl, type) ll_set_acl(inode, acl, type)
552 #endif /* _LUSTRE_COMPAT_H */