4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #ifndef _LUSTRE_COMPAT_H
33 #define _LUSTRE_COMPAT_H
35 #include <linux/aio.h>
37 #include <linux/namei.h>
38 #include <linux/pagemap.h>
39 #include <linux/posix_acl_xattr.h>
40 #include <linux/bio.h>
41 #include <linux/xattr.h>
42 #include <linux/workqueue.h>
43 #include <linux/blkdev.h>
44 #include <linux/backing-dev.h>
45 #include <linux/slab.h>
46 #include <linux/security.h>
47 #include <linux/pagevec.h>
48 #include <linux/workqueue.h>
49 #include <libcfs/linux/linux-fs.h>
50 #ifdef HAVE_XARRAY_SUPPORT
51 #include <linux/xarray.h>
53 #include <libcfs/linux/xarray.h>
55 #include <obd_support.h>
57 #ifdef HAVE_4ARGS_VFS_SYMLINK
58 #define ll_vfs_symlink(dir, dentry, mnt, path, mode) \
59 vfs_symlink(dir, dentry, path, mode)
61 #define ll_vfs_symlink(dir, dentry, mnt, path, mode) \
62 vfs_symlink(dir, dentry, path)
66 #define bio_idx(bio) (bio->bi_iter.bi_idx)
67 #define bio_set_sector(bio, sector) (bio->bi_iter.bi_sector = sector)
68 #define bvl_to_page(bvl) (bvl->bv_page)
70 #define bio_idx(bio) (bio->bi_idx)
71 #define bio_set_sector(bio, sector) (bio->bi_sector = sector)
72 #define bio_sectors(bio) ((bio)->bi_size >> 9)
73 #define bvl_to_page(bvl) (bvl->bv_page)
77 #define bio_start_sector(bio) (bio->bi_iter.bi_sector)
79 #define bio_start_sector(bio) (bio->bi_sector)
83 # define bio_get_dev(bio) ((bio)->bi_bdev)
84 # define bio_get_disk(bio) (bio_get_dev(bio)->bd_disk)
85 # define bio_get_queue(bio) bdev_get_queue(bio_get_dev(bio))
87 # ifndef HAVE_BIO_SET_DEV
88 # define bio_set_dev(bio, bdev) (bio_get_dev(bio) = (bdev))
91 # define bio_get_disk(bio) ((bio)->bi_disk)
92 # define bio_get_queue(bio) (bio_get_disk(bio)->queue)
99 static inline struct bio *cfs_bio_alloc(struct block_device *bdev,
100 unsigned short nr_vecs,
101 __u32 op, gfp_t gfp_mask)
104 #ifdef HAVE_BIO_ALLOC_WITH_BDEV
105 bio = bio_alloc(bdev, nr_vecs, op, gfp_mask);
107 bio = bio_alloc(gfp_mask, nr_vecs);
109 bio_set_dev(bio, bdev);
112 #endif /* HAVE_BIO_ALLOC_WITH_BDEV */
116 #ifndef HAVE_DENTRY_D_CHILD
117 #define d_child d_u.d_child
120 #ifndef HAVE_D_IN_LOOKUP
121 static inline int d_in_lookup(struct dentry *dentry)
127 #ifdef HAVE_DENTRY_D_CHILDREN
128 #define d_no_children(dentry) (hlist_empty(&(dentry)->d_children))
129 #define d_for_each_child(child, dentry) \
130 hlist_for_each_entry((child), &(dentry)->d_children, d_sib)
132 #define d_no_children(dentry) (list_empty(&(dentry)->d_subdirs))
133 #define d_for_each_child(child, dentry) \
134 list_for_each_entry((child), &(dentry)->d_subdirs, d_child)
137 #ifndef HAVE_VM_FAULT_T
138 #define vm_fault_t int
141 #ifndef HAVE_FOP_ITERATE_SHARED
142 #define iterate_shared iterate
145 #ifdef HAVE_OLDSIZE_TRUNCATE_PAGECACHE
146 #define ll_truncate_pagecache(inode, size) truncate_pagecache(inode, 0, size)
148 #define ll_truncate_pagecache(inode, size) truncate_pagecache(inode, size)
151 #ifdef HAVE_VFS_RENAME_5ARGS
152 #define ll_vfs_rename(a, b, c, d) vfs_rename(a, b, c, d, NULL)
153 #elif defined HAVE_VFS_RENAME_6ARGS
154 #define ll_vfs_rename(a, b, c, d) vfs_rename(a, b, c, d, NULL, 0)
156 #define ll_vfs_rename(a, b, c, d) vfs_rename(a, b, c, d)
159 #ifdef HAVE_USER_NAMESPACE_ARG
160 #define vfs_unlink(ns, dir, de) vfs_unlink(ns, dir, de, NULL)
161 #elif defined HAVE_VFS_UNLINK_3ARGS
162 #define vfs_unlink(ns, dir, de) vfs_unlink(dir, de, NULL)
164 #define vfs_unlink(ns, dir, de) vfs_unlink(dir, de)
167 #ifndef HAVE_MNT_IDMAP_ARG
168 #define mnt_idmap user_namespace
169 #define nop_mnt_idmap init_user_ns
172 static inline int ll_vfs_getattr(struct path *path, struct kstat *st,
173 u32 request_mask, unsigned int flags)
177 #if defined(HAVE_USER_NAMESPACE_ARG) || defined(HAVE_INODEOPS_ENHANCED_GETATTR)
178 rc = vfs_getattr(path, st, request_mask, flags);
180 rc = vfs_getattr(path, st);
185 #ifndef HAVE_D_IS_POSITIVE
186 static inline bool d_is_positive(const struct dentry *dentry)
188 return dentry->d_inode != NULL;
192 #ifndef HAVE_INODE_LOCK
193 # define inode_lock(inode) mutex_lock(&(inode)->i_mutex)
194 # define inode_unlock(inode) mutex_unlock(&(inode)->i_mutex)
195 # define inode_trylock(inode) mutex_trylock(&(inode)->i_mutex)
198 #ifndef HAVE_PAGECACHE_GET_PAGE
199 #define pagecache_get_page(mapping, index, fp, gfp) \
200 grab_cache_page_nowait(mapping, index)
203 /* Old kernels lacked both Xarray support and the page cache
204 * using Xarrays. Our back ported Xarray support introduces
205 * the real xa_is_value() but we need a wrapper as well for
206 * the page cache interaction. Lets keep xa_is_value() separate
207 * in old kernels for Xarray support and page cache handling.
209 #ifndef HAVE_XARRAY_SUPPORT
210 static inline bool ll_xa_is_value(void *entry)
212 return radix_tree_exceptional_entry(entry);
215 #define ll_xa_is_value xa_is_value
218 /* Linux kernel version v5.0 commit fd9dc93e36231fb6d520e0edd467058fad4fd12d
219 * ("XArray: Change xa_insert to return -EBUSY")
222 static inline int __must_check ll_xa_insert(struct xarray *xa,
224 void *entry, gfp_t gpf)
226 int rc = xa_insert(xa, index, entry, gpf);
233 #ifndef HAVE_TRUNCATE_INODE_PAGES_FINAL
234 static inline void truncate_inode_pages_final(struct address_space *map)
236 truncate_inode_pages(map, 0);
240 #ifdef HAVE_U64_CAPABILITY
241 #define ll_capability_u32(kcap) \
242 ((kcap).val & 0xFFFFFFFF)
243 #define ll_set_capability_u32(kcap, val32) \
244 ((kcap)->val = ((kcap)->val & 0xffffffff00000000ull) | (val32))
246 #define ll_capability_u32(kcap) \
248 #define ll_set_capability_u32(kcap, val32) \
249 ((kcap)->cap[0] = val32)
252 #ifndef HAVE_PTR_ERR_OR_ZERO
253 static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
262 #ifdef HAVE_PID_NS_FOR_CHILDREN
263 # define ll_task_pid_ns(task) \
264 ((task)->nsproxy ? ((task)->nsproxy->pid_ns_for_children) : NULL)
266 # define ll_task_pid_ns(task) \
267 ((task)->nsproxy ? ((task)->nsproxy->pid_ns) : NULL)
270 #ifdef HAVE_FULL_NAME_HASH_3ARGS
271 # define ll_full_name_hash(salt, name, len) full_name_hash(salt, name, len)
273 # define ll_full_name_hash(salt, name, len) full_name_hash(name, len)
276 #ifdef HAVE_STRUCT_POSIX_ACL_XATTR
277 # define posix_acl_xattr_header struct posix_acl_xattr_header
278 # define posix_acl_xattr_entry struct posix_acl_xattr_entry
279 # define GET_POSIX_ACL_XATTR_ENTRY(head) ((void *)((head) + 1))
281 # define GET_POSIX_ACL_XATTR_ENTRY(head) ((head)->a_entries)
284 #ifdef HAVE_IOP_XATTR
285 #define ll_setxattr generic_setxattr
286 #define ll_getxattr generic_getxattr
287 #define ll_removexattr generic_removexattr
288 #endif /* HAVE_IOP_XATTR */
290 #ifndef HAVE_POSIX_ACL_VALID_USER_NS
291 #define posix_acl_valid(a, b) posix_acl_valid(b)
294 #ifdef HAVE_IOP_SET_ACL
295 #ifdef CONFIG_LUSTRE_FS_POSIX_ACL
296 #if !defined(HAVE_USER_NAMESPACE_ARG) && \
297 !defined(HAVE_POSIX_ACL_UPDATE_MODE) && \
298 !defined(HAVE_MNT_IDMAP_ARG)
299 static inline int posix_acl_update_mode(struct inode *inode, umode_t *mode_p,
300 struct posix_acl **acl)
302 umode_t mode = inode->i_mode;
305 error = posix_acl_equiv_mode(*acl, &mode);
310 if (!in_group_p(inode->i_gid) &&
311 !capable_wrt_inode_uidgid(inode, CAP_FSETID))
316 #endif /* HAVE_POSIX_ACL_UPDATE_MODE */
320 #ifndef HAVE_IOV_ITER_TRUNCATE
321 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
323 if (i->count > count)
329 * mount MS_* flags split from superblock SB_* flags
330 * if the SB_* flags are not available use the MS_* flags
332 #if !defined(SB_RDONLY) && defined(MS_RDONLY)
333 # define SB_RDONLY MS_RDONLY
335 #if !defined(SB_ACTIVE) && defined(MS_ACTIVE)
336 # define SB_ACTIVE MS_ACTIVE
338 #if !defined(SB_NOSEC) && defined(MS_NOSEC)
339 # define SB_NOSEC MS_NOSEC
341 #if !defined(SB_POSIXACL) && defined(MS_POSIXACL)
342 # define SB_POSIXACL MS_POSIXACL
344 #if !defined(SB_NODIRATIME) && defined(MS_NODIRATIME)
345 # define SB_NODIRATIME MS_NODIRATIME
347 #if !defined(SB_KERNMOUNT) && defined(MS_KERNMOUNT)
348 # define SB_KERNMOUNT MS_KERNMOUNT
351 #ifndef HAVE_IOV_ITER_IOVEC
352 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
354 return (struct iovec) {
355 .iov_base = iter->__iov->iov_base + iter->iov_offset,
356 .iov_len = min(iter->count,
357 iter->__iov->iov_len - iter->iov_offset),
362 #ifndef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
363 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
368 #define iov_for_each(iov, iter, start) \
369 for (iter = (start); \
370 (iter).count && ((iov = iov_iter_iovec(&(iter))), 1); \
371 iov_iter_advance(&(iter), (iov).iov_len))
373 static inline ssize_t
374 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
380 iov_for_each(iov, i, *iter) {
383 res = generic_file_aio_read(iocb, &iov, 1, iocb->ki_pos);
391 if (res < iov.iov_len)
396 iov_iter_advance(iter, bytes);
400 static inline ssize_t
401 __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
407 /* Since LLITE updates file size at the end of I/O in
408 * vvp_io_commit_write(), append write has to be done in atomic when
409 * there are multiple segments because otherwise each iteration to
410 * __generic_file_aio_write() will see original file size
412 if (unlikely(iocb->ki_filp->f_flags & O_APPEND && iter->nr_segs > 1)) {
413 struct iovec *iov_copy;
416 OBD_ALLOC_PTR_ARRAY(iov_copy, iter->nr_segs);
420 iov_for_each(iov, i, *iter)
421 iov_copy[count++] = iov;
423 bytes = __generic_file_aio_write(iocb, iov_copy, count,
425 OBD_FREE_PTR_ARRAY(iov_copy, iter->nr_segs);
428 iov_iter_advance(iter, bytes);
432 iov_for_each(iov, i, *iter) {
435 res = __generic_file_aio_write(iocb, &iov, 1, &iocb->ki_pos);
443 if (res < iov.iov_len)
448 iov_iter_advance(iter, bytes);
451 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
453 static inline void __user *get_vmf_address(struct vm_fault *vmf)
455 #ifdef HAVE_VM_FAULT_ADDRESS
456 return (void __user *)vmf->address;
458 return vmf->virtual_address;
462 #ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
463 # define __ll_filemap_fault(vma, vmf) filemap_fault(vmf)
465 # define __ll_filemap_fault(vma, vmf) filemap_fault(vma, vmf)
468 #ifndef HAVE_CURRENT_TIME
469 static inline struct timespec current_time(struct inode *inode)
477 * time_after32 - compare two 32-bit relative times
478 * @a: the time which may be after @b
479 * @b: the time which may be before @a
481 * Needed for kernels earlier than v4.14-rc1~134^2
483 * time_after32(a, b) returns true if the time @a is after time @b.
484 * time_before32(b, a) returns true if the time @b is before time @a.
486 * Similar to time_after(), compare two 32-bit timestamps for relative
487 * times. This is useful for comparing 32-bit seconds values that can't
488 * be converted to 64-bit values (e.g. due to disk format or wire protocol
489 * issues) when it is known that the times are less than 68 years apart.
491 #define time_after32(a, b) ((s32)((u32)(b) - (u32)(a)) < 0)
492 #define time_before32(b, a) time_after32(a, b)
496 /* kernel version less than 4.2, smp_store_mb is not defined, use set_mb */
498 #define smp_store_mb(var, value) set_mb(var, value) /* set full mem barrier */
502 # define ll_d_count(d) d_count(d)
504 # define ll_d_count(d) ((d)->d_count)
505 #endif /* HAVE_D_COUNT */
507 #ifndef HAVE_IN_COMPAT_SYSCALL
508 #define in_compat_syscall is_compat_task
512 #define page_tree i_pages
513 #define ll_xa_lock_irqsave(lockp, flags) xa_lock_irqsave(lockp, flags)
514 #define ll_xa_unlock_irqrestore(lockp, flags) xa_unlock_irqrestore(lockp, flags)
516 #define i_pages tree_lock
517 #define ll_xa_lock_irqsave(lockp, flags) spin_lock_irqsave(lockp, flags)
518 #define ll_xa_unlock_irqrestore(lockp, flags) spin_unlock_irqrestore(lockp, \
522 /* Linux commit v5.15-12273-gab2f9d2d3626
523 * mm: unexport {,un}lock_page_memcg
525 * Note that the functions are still defined or declared breaking
526 * the simple approach of just defining the missing functions here
528 #ifdef HAVE_LOCK_PAGE_MEMCG
529 #define vvp_lock_page_memcg(page) lock_page_memcg((page))
530 #define vvp_unlock_page_memcg(page) unlock_page_memcg((page))
532 #define vvp_lock_page_memcg(page)
533 #define vvp_unlock_page_memcg(page)
536 #ifndef KMEM_CACHE_USERCOPY
537 #define kmem_cache_create_usercopy(name, size, align, flags, useroffset, \
539 kmem_cache_create(name, size, align, flags, ctor)
542 static inline bool ll_security_xattr_wanted(struct inode *in)
544 #ifdef CONFIG_SECURITY
545 return in->i_security && in->i_sb->s_security;
551 static inline int ll_vfs_getxattr(struct dentry *dentry, struct inode *inode,
553 void *value, size_t size)
555 #if defined(HAVE_MNT_IDMAP_ARG) || defined(HAVE_USER_NAMESPACE_ARG)
556 return vfs_getxattr(&nop_mnt_idmap, dentry, name, value, size);
557 #elif defined(HAVE_VFS_SETXATTR)
558 return __vfs_getxattr(dentry, inode, name, value, size);
560 if (unlikely(!inode->i_op->getxattr))
563 return inode->i_op->getxattr(dentry, name, value, size);
567 static inline int ll_vfs_setxattr(struct dentry *dentry, struct inode *inode,
569 const void *value, size_t size, int flags)
571 #if defined(HAVE_MNT_IDMAP_ARG) || defined(HAVE_USER_NAMESPACE_ARG)
572 return __vfs_setxattr(&nop_mnt_idmap, dentry, inode, name,
573 VFS_SETXATTR_VALUE(value), size, flags);
574 #elif defined(HAVE_VFS_SETXATTR)
575 return __vfs_setxattr(dentry, inode, name, value, size, flags);
577 if (unlikely(!inode->i_op->setxattr))
580 return inode->i_op->setxattr(dentry, name, value, size, flags);
584 static inline int ll_vfs_removexattr(struct dentry *dentry, struct inode *inode,
587 #if defined(HAVE_MNT_IDMAP_ARG) || defined(HAVE_USER_NAMESPACE_ARG)
588 return __vfs_removexattr(&nop_mnt_idmap, dentry, name);
589 #elif defined(HAVE_VFS_SETXATTR)
590 return __vfs_removexattr(dentry, name);
592 if (unlikely(!inode->i_op->setxattr))
595 return inode->i_op->removexattr(dentry, name);
599 /* until v3.19-rc5-3-gb4caecd48005 */
600 #ifndef BDI_CAP_MAP_COPY
601 #define BDI_CAP_MAP_COPY 0
604 /* from v4.1-rc2-56-g89e9b9e07a39, until v5.9-rc3-161-gf56753ac2a90 */
605 #ifndef BDI_CAP_CGROUP_WRITEBACK
606 #define BDI_CAP_CGROUP_WRITEBACK 0
609 /* from v5.9-rc3-161-gf56753ac2a90 */
610 #ifndef BDI_CAP_WRITEBACK
611 #define BDI_CAP_WRITEBACK 0
614 /* from v5.9-rc3-161-gf56753ac2a90 */
615 #ifndef BDI_CAP_WRITEBACK_ACCT
616 #define BDI_CAP_WRITEBACK_ACCT 0
619 #define LL_BDI_CAP_FLAGS (BDI_CAP_CGROUP_WRITEBACK | BDI_CAP_MAP_COPY | \
620 BDI_CAP_WRITEBACK | BDI_CAP_WRITEBACK_ACCT)
622 #ifndef FALLOC_FL_COLLAPSE_RANGE
623 #define FALLOC_FL_COLLAPSE_RANGE 0x08 /* remove a range of a file */
626 #ifndef FALLOC_FL_ZERO_RANGE
627 #define FALLOC_FL_ZERO_RANGE 0x10 /* convert range to zeros */
630 #ifndef FALLOC_FL_INSERT_RANGE
631 #define FALLOC_FL_INSERT_RANGE 0x20 /* insert space within file */
635 #define raw_cpu_ptr(p) __this_cpu_ptr(p)
638 #ifndef HAVE_IS_ROOT_INODE
639 static inline bool is_root_inode(struct inode *inode)
641 return inode == inode->i_sb->s_root->d_inode;
645 #ifndef HAVE_IOV_ITER_GET_PAGES_ALLOC2
646 #define iov_iter_get_pages_alloc2(i, p, m, s) \
647 iov_iter_get_pages_alloc((i), (p), (m), (s))
650 #ifdef HAVE_AOPS_MIGRATE_FOLIO
651 #define folio_migr folio
653 #define folio_migr page
654 #define migrate_folio migratepage
657 struct ll_shrinker_ops {
658 #ifdef HAVE_SHRINKER_COUNT
659 unsigned long (*count_objects)(struct shrinker *,
660 struct shrink_control *sc);
661 unsigned long (*scan_objects)(struct shrinker *,
662 struct shrink_control *sc);
664 int (*shrink)(struct shrinker *, struct shrink_control *sc);
666 int seeks; /* seeks to recreate an obj */
669 #ifndef HAVE_SHRINKER_ALLOC
670 static inline void shrinker_free(struct shrinker *shrinker)
672 unregister_shrinker(shrinker);
673 OBD_FREE_PTR(shrinker);
677 /* allocate and register a shrinker, return should be checked with IS_ERR() */
678 static inline struct shrinker *
679 ll_shrinker_create(struct ll_shrinker_ops *ops, unsigned int flags,
680 const char *fmt, ...)
682 struct shrinker *shrinker;
685 #if defined(HAVE_REGISTER_SHRINKER_FORMAT_NAMED) || defined(HAVE_SHRINKER_ALLOC)
686 struct va_format vaf;
690 #ifdef HAVE_SHRINKER_ALLOC
694 shrinker = shrinker_alloc(flags, "%pV", &vaf);
697 OBD_ALLOC_PTR(shrinker);
700 return ERR_PTR(-ENOMEM);
702 #ifdef HAVE_SHRINKER_COUNT
703 shrinker->count_objects = ops->count_objects;
704 shrinker->scan_objects = ops->scan_objects;
706 shrinker->shrink = ops->shrink;
708 shrinker->seeks = ops->seeks;
710 #ifdef HAVE_SHRINKER_ALLOC
711 shrinker_register(shrinker);
713 #ifdef HAVE_REGISTER_SHRINKER_FORMAT_NAMED
717 rc = register_shrinker(shrinker, "%pV", &vaf);
719 #elif defined(HAVE_REGISTER_SHRINKER_RET)
720 rc = register_shrinker(shrinker);
722 register_shrinker(shrinker);
726 #ifdef HAVE_SHRINKER_ALLOC
727 shrinker_free(shrinker);
729 OBD_FREE_PTR(shrinker);
731 shrinker = ERR_PTR(rc);
737 # if defined(__GNUC__) && __GNUC__ >= 7
738 # define fallthrough __attribute__((fallthrough)) /* fallthrough */
740 # define fallthrough do {} while (0) /* fallthrough */
744 #ifdef VERIFY_WRITE /* removed in kernel commit v4.20-10979-g96d4f267e40f */
745 #define ll_access_ok(ptr, len) access_ok(VERIFY_WRITE, ptr, len)
747 #define ll_access_ok(ptr, len) access_ok(ptr, len)
750 #ifdef HAVE_WB_STAT_MOD
751 #define __add_wb_stat(wb, item, amount) wb_stat_mod(wb, item, amount)
754 #ifdef HAVE_SEC_RELEASE_SECCTX_1ARG
755 #ifndef HAVE_LSMCONTEXT_INIT
757 static inline void lsmcontext_init(struct lsmcontext *cp, char *context,
761 cp->context = context;
767 static inline void ll_security_release_secctx(char *secdata, u32 seclen,
770 #ifdef HAVE_SEC_RELEASE_SECCTX_1ARG
771 struct lsmcontext context = { };
773 lsmcontext_init(&context, secdata, seclen, slot);
774 return security_release_secctx(&context);
776 return security_release_secctx(secdata, seclen);
780 #if !defined(HAVE_USER_NAMESPACE_ARG) && !defined(HAVE_MNT_IDMAP_ARG)
781 #define posix_acl_update_mode(ns, inode, mode, acl) \
782 posix_acl_update_mode(inode, mode, acl)
783 #define notify_change(ns, de, attr, inode) notify_change(de, attr, inode)
784 #define inode_owner_or_capable(ns, inode) inode_owner_or_capable(inode)
785 #define vfs_create(ns, dir, de, mode, ex) vfs_create(dir, de, mode, ex)
786 #define vfs_mkdir(ns, dir, de, mode) vfs_mkdir(dir, de, mode)
787 #define ll_set_acl(ns, inode, acl, type) ll_set_acl(inode, acl, type)
790 #ifndef HAVE_GENERIC_ERROR_REMOVE_FOLIO
791 #ifdef HAVE_FOLIO_BATCH
792 #define generic_folio folio
794 #define generic_folio page
795 #define folio_page(page, n) (page)
796 #define folio_nr_pages(page) (1)
797 #define page_folio(page) (page)
799 static inline int generic_error_remove_folio(struct address_space *mapping,
800 struct generic_folio *folio)
802 int pg, npgs = folio_nr_pages(folio);
805 for (pg = 0; pg < npgs; pg++) {
806 err = generic_error_remove_page(mapping, folio_page(folio, pg));
815 * delete_from_page_cache is not exported anymore
817 #ifdef HAVE_DELETE_FROM_PAGE_CACHE
818 #define cfs_delete_from_page_cache(page) delete_from_page_cache((page))
820 static inline void cfs_delete_from_page_cache(struct page *page)
824 LASSERT(PageLocked(page));
827 /* on entry page is locked */
828 if (S_ISREG(page->mapping->host->i_mode)) {
829 generic_error_remove_folio(page->mapping, page_folio(page));
831 loff_t lstart = page->index << PAGE_SHIFT;
832 loff_t lend = lstart + PAGE_SIZE - 1;
834 truncate_inode_pages_range(page->mapping, lstart, lend);
841 static inline struct page *ll_read_cache_page(struct address_space *mapping,
842 pgoff_t index, filler_t *filler,
845 #ifdef HAVE_READ_CACHE_PAGE_WANTS_FILE
846 struct file dummy_file;
848 dummy_file.f_ra.ra_pages = 32; /* unused, modified on ra error */
849 dummy_file.private_data = data;
850 return read_cache_page(mapping, index, filler, &dummy_file);
852 return read_cache_page(mapping, index, filler, data);
853 #endif /* HAVE_READ_CACHE_PAGE_WANTS_FILE */
856 #ifdef HAVE_FOLIO_BATCH
857 # define ll_folio_batch_init(batch, n) folio_batch_init(batch)
858 # define fbatch_at(fbatch, f) ((fbatch)->folios[(f)])
859 # define fbatch_at_npgs(fbatch, f) folio_nr_pages((fbatch)->folios[(f)])
860 # define fbatch_at_pg(fbatch, f, pg) folio_page((fbatch)->folios[(f)], (pg))
861 # define folio_batch_add_page(fbatch, page) \
862 folio_batch_add(fbatch, page_folio(page))
863 # ifndef HAVE_FOLIO_BATCH_REINIT
864 static inline void folio_batch_reinit(struct folio_batch *fbatch)
868 # endif /* HAVE_FOLIO_BATCH_REINIT */
870 #else /* !HAVE_FOLIO_BATCH */
873 # define folio_batch pagevec
875 # define folio_batch_init(pvec) pagevec_init(pvec)
876 # define folio_batch_reinit(pvec) pagevec_reinit(pvec)
877 # define folio_batch_count(pvec) pagevec_count(pvec)
878 # define folio_batch_space(pvec) pagevec_space(pvec)
879 # define folio_batch_add_page(pvec, page) \
880 pagevec_add(pvec, page)
881 # define folio_batch_release(pvec) \
882 pagevec_release(((struct pagevec *)pvec))
883 # ifdef HAVE_PAGEVEC_INIT_ONE_PARAM
884 # define ll_folio_batch_init(pvec, n) pagevec_init(pvec)
886 # define ll_folio_batch_init(pvec, n) pagevec_init(pvec, n)
888 # define fbatch_at(pvec, n) ((pvec)->pages[(n)])
889 # define fbatch_at_npgs(pvec, n) 1
890 # define fbatch_at_pg(pvec, n, pg) ((pvec)->pages[(n)])
891 #endif /* HAVE_FOLIO_BATCH */
893 #ifndef HAVE_FLUSH___WORKQUEUE
894 #define __flush_workqueue(wq) flush_scheduled_work()
897 #ifdef HAVE_NSPROXY_COUNT_AS_REFCOUNT
898 #define nsproxy_dec(ns) refcount_dec(&(ns)->count)
900 #define nsproxy_dec(ns) atomic_dec(&(ns)->count)
903 #ifndef HAVE_INODE_GET_CTIME
904 #define inode_get_ctime(i) ((i)->i_ctime)
905 #define inode_set_ctime_to_ts(i, ts) ((i)->i_ctime = ts)
906 #define inode_set_ctime_current(i) \
907 inode_set_ctime_to_ts((i), current_time((i)))
909 static inline struct timespec64 inode_set_ctime(struct inode *inode,
910 time64_t sec, long nsec)
912 struct timespec64 ts = { .tv_sec = sec,
915 return inode_set_ctime_to_ts(inode, ts);
917 #endif /* !HAVE_INODE_GET_CTIME */
919 #ifndef HAVE_INODE_GET_MTIME_SEC
921 #define inode_get_ctime_sec(i) (inode_get_ctime((i)).tv_sec)
923 #define inode_get_atime(i) ((i)->i_atime)
924 #define inode_get_atime_sec(i) ((i)->i_atime.tv_sec)
925 #define inode_set_atime_to_ts(i, ts) ((i)->i_atime = ts)
927 static inline struct timespec64 inode_set_atime(struct inode *inode,
928 time64_t sec, long nsec)
930 struct timespec64 ts = { .tv_sec = sec,
932 return inode_set_atime_to_ts(inode, ts);
935 #define inode_get_mtime(i) ((i)->i_mtime)
936 #define inode_get_mtime_sec(i) ((i)->i_mtime.tv_sec)
937 #define inode_set_mtime_to_ts(i, ts) ((i)->i_mtime = ts)
939 static inline struct timespec64 inode_set_mtime(struct inode *inode,
940 time64_t sec, long nsec)
942 struct timespec64 ts = { .tv_sec = sec,
944 return inode_set_mtime_to_ts(inode, ts);
946 #endif /* !HAVE_INODE_GET_MTIME_SEC */
948 #endif /* _LUSTRE_COMPAT_H */