4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #ifndef _LUSTRE_COMPAT_H
34 #define _LUSTRE_COMPAT_H
36 #include <linux/aio.h>
38 #include <linux/fs_struct.h>
39 #include <linux/namei.h>
40 #include <linux/pagemap.h>
41 #include <linux/bio.h>
42 #include <linux/xattr.h>
43 #include <linux/workqueue.h>
45 #include <libcfs/linux/linux-fs.h>
46 #include <lustre_patchless_compat.h>
47 #include <obd_support.h>
49 #ifdef HAVE_FS_STRUCT_RWLOCK
50 # define LOCK_FS_STRUCT(fs) write_lock(&(fs)->lock)
51 # define UNLOCK_FS_STRUCT(fs) write_unlock(&(fs)->lock)
53 # define LOCK_FS_STRUCT(fs) spin_lock(&(fs)->lock)
54 # define UNLOCK_FS_STRUCT(fs) spin_unlock(&(fs)->lock)
57 #ifdef HAVE_FS_STRUCT_SEQCOUNT
58 # define WRITE_FS_SEQ_BEGIN(fs) write_seqcount_begin(&(fs)->seq)
59 # define WRITE_FS_SEQ_END(fs) write_seqcount_end(&(fs)->seq)
61 # define WRITE_FS_SEQ_BEGIN(fs)
62 # define WRITE_FS_SEQ_END(fs)
64 static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
65 struct dentry *dentry)
74 WRITE_FS_SEQ_BEGIN(fs);
85 * set ATTR_BLOCKS to a high value to avoid any risk of collision with other
86 * ATTR_* attributes (see bug 13828)
88 #define ATTR_BLOCKS (1 << 27)
90 #define current_ngroups current_cred()->group_info->ngroups
91 #define current_groups current_cred()->group_info->small_block
94 * OBD need working random driver, thus all our
95 * initialization routines must be called after device
96 * driver initialization
100 #define module_init(a) late_initcall(a)
103 #ifndef MODULE_ALIAS_FS
104 #define MODULE_ALIAS_FS(name)
107 #define LTIME_S(time) (time.tv_sec)
109 #ifdef HAVE_GENERIC_PERMISSION_2ARGS
110 # define ll_generic_permission(inode, mask, flags, check_acl) \
111 generic_permission(inode, mask)
112 #elif defined HAVE_GENERIC_PERMISSION_4ARGS
113 # define ll_generic_permission(inode, mask, flags, check_acl) \
114 generic_permission(inode, mask, flags, check_acl)
116 # define ll_generic_permission(inode, mask, flags, check_acl) \
117 generic_permission(inode, mask, check_acl)
120 #ifdef HAVE_4ARGS_VFS_SYMLINK
121 #define ll_vfs_symlink(dir, dentry, mnt, path, mode) \
122 vfs_symlink(dir, dentry, path, mode)
124 #define ll_vfs_symlink(dir, dentry, mnt, path, mode) \
125 vfs_symlink(dir, dentry, path)
128 #if !defined(HAVE_FILE_LLSEEK_SIZE) || defined(HAVE_FILE_LLSEEK_SIZE_5ARGS)
129 #define ll_generic_file_llseek_size(file, offset, origin, maxbytes, eof) \
130 generic_file_llseek_size(file, offset, origin, maxbytes, eof);
132 #define ll_generic_file_llseek_size(file, offset, origin, maxbytes, eof) \
133 generic_file_llseek_size(file, offset, origin, maxbytes);
136 #ifdef HAVE_INODE_DIO_WAIT
137 /* inode_dio_wait(i) use as-is for write lock */
138 # define inode_dio_write_done(i) do {} while (0) /* for write unlock */
140 # define inode_dio_wait(i) down_write(&(i)->i_alloc_sem)
141 # define inode_dio_write_done(i) up_write(&(i)->i_alloc_sem)
144 #ifndef FS_HAS_FIEMAP
145 #define FS_HAS_FIEMAP (0)
148 #ifndef HAVE_SIMPLE_SETATTR
149 #define simple_setattr(dentry, ops) inode_setattr((dentry)->d_inode, ops)
152 #ifndef SLAB_DESTROY_BY_RCU
153 #define SLAB_DESTROY_BY_RCU 0
156 #ifndef HAVE_DQUOT_SUSPEND
157 # define ll_vfs_dq_init vfs_dq_init
158 # define ll_vfs_dq_drop vfs_dq_drop
159 # define ll_vfs_dq_transfer vfs_dq_transfer
160 # define ll_vfs_dq_off(sb, remount) vfs_dq_off(sb, remount)
162 # define ll_vfs_dq_init dquot_initialize
163 # define ll_vfs_dq_drop dquot_drop
164 # define ll_vfs_dq_transfer dquot_transfer
165 # define ll_vfs_dq_off(sb, remount) dquot_suspend(sb, -1)
168 #ifndef HAVE_BLKDEV_GET_BY_DEV
169 # define blkdev_get_by_dev(dev, mode, holder) open_by_devnum(dev, mode)
172 #ifdef HAVE_BVEC_ITER
173 #define bio_idx(bio) (bio->bi_iter.bi_idx)
174 #define bio_set_sector(bio, sector) (bio->bi_iter.bi_sector = sector)
175 #define bvl_to_page(bvl) (bvl->bv_page)
177 #define bio_idx(bio) (bio->bi_idx)
178 #define bio_set_sector(bio, sector) (bio->bi_sector = sector)
179 #define bio_sectors(bio) ((bio)->bi_size >> 9)
180 #ifndef HAVE_BIO_END_SECTOR
181 #define bio_end_sector(bio) (bio->bi_sector + bio_sectors(bio))
183 #define bvl_to_page(bvl) (bvl->bv_page)
186 #ifndef HAVE_BLK_QUEUE_MAX_SEGMENTS
187 #define blk_queue_max_segments(rq, seg) \
188 do { blk_queue_max_phys_segments(rq, seg); \
189 blk_queue_max_hw_segments(rq, seg); } while (0)
191 #define queue_max_phys_segments(rq) queue_max_segments(rq)
192 #define queue_max_hw_segments(rq) queue_max_segments(rq)
196 #define DECLARE_PLUG(plug) struct blk_plug plug
197 #else /* !HAVE_BLK_PLUG */
198 #define DECLARE_PLUG(name)
199 #define blk_start_plug(plug) do {} while (0)
200 #define blk_finish_plug(plug) do {} while (0)
203 #ifdef HAVE_KMAP_ATOMIC_HAS_1ARG
204 #define ll_kmap_atomic(a, b) kmap_atomic(a)
205 #define ll_kunmap_atomic(a, b) kunmap_atomic(a)
207 #define ll_kmap_atomic(a, b) kmap_atomic(a, b)
208 #define ll_kunmap_atomic(a, b) kunmap_atomic(a, b)
211 #ifndef HAVE_CLEAR_INODE
212 #define clear_inode(i) end_writeback(i)
215 #ifndef HAVE_DENTRY_D_CHILD
216 #define d_child d_u.d_child
219 #ifdef HAVE_DENTRY_D_U_D_ALIAS
220 #define d_alias d_u.d_alias
223 #ifndef DATA_FOR_LLITE_IS_LIST
224 #define ll_d_hlist_node hlist_node
225 #define ll_d_hlist_empty(list) hlist_empty(list)
226 #define ll_d_hlist_entry(ptr, type, name) hlist_entry(ptr.first, type, name)
227 #define ll_d_hlist_for_each(tmp, i_dentry) hlist_for_each(tmp, i_dentry)
228 # ifdef HAVE_HLIST_FOR_EACH_3ARG
229 # define ll_d_hlist_for_each_entry(dentry, p, i_dentry) \
230 p = NULL; hlist_for_each_entry(dentry, i_dentry, d_alias)
232 # define ll_d_hlist_for_each_entry(dentry, p, i_dentry) \
233 hlist_for_each_entry(dentry, p, i_dentry, d_alias)
235 #define DECLARE_LL_D_HLIST_NODE_PTR(name) struct ll_d_hlist_node *name
237 #define ll_d_hlist_node list_head
238 #define ll_d_hlist_empty(list) list_empty(list)
239 #define ll_d_hlist_entry(ptr, type, name) list_entry(ptr.next, type, name)
240 #define ll_d_hlist_for_each(tmp, i_dentry) list_for_each(tmp, i_dentry)
241 #define ll_d_hlist_for_each_entry(dentry, p, i_dentry) \
242 list_for_each_entry(dentry, i_dentry, d_alias)
243 #define DECLARE_LL_D_HLIST_NODE_PTR(name) /* nothing */
244 #endif /* !DATA_FOR_LLITE_IS_LIST */
250 # define NO_QUOTA (-EDQUOT)
254 #define SEEK_DATA 3 /* seek to the next data */
257 #define SEEK_HOLE 4 /* seek to the next hole */
260 #ifndef FMODE_UNSIGNED_OFFSET
261 #define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000)
264 #if !defined(_ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_) && !defined(ext2_set_bit)
265 # define ext2_set_bit __test_and_set_bit_le
266 # define ext2_clear_bit __test_and_clear_bit_le
267 # define ext2_test_bit test_bit_le
268 # define ext2_find_first_zero_bit find_first_zero_bit_le
269 # define ext2_find_next_zero_bit find_next_zero_bit_le
272 #ifdef ATTR_TIMES_SET
273 # define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
275 # define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET)
278 #ifndef XATTR_NAME_POSIX_ACL_ACCESS
279 # define XATTR_NAME_POSIX_ACL_ACCESS POSIX_ACL_XATTR_ACCESS
282 #ifndef XATTR_NAME_POSIX_ACL_DEFAULT
283 # define XATTR_NAME_POSIX_ACL_DEFAULT POSIX_ACL_XATTR_DEFAULT
286 #ifndef HAVE_LM_XXX_LOCK_MANAGER_OPS
287 # define lm_compare_owner fl_compare_owner
291 * After 3.1, kernel's nameidata.intent.open.flags is different
292 * with lustre's lookup_intent.it_flags, as lustre's it_flags'
293 * lower bits equal to FMODE_xxx while kernel doesn't transliterate
294 * lower bits of nameidata.intent.open.flags to FMODE_xxx.
296 #include <linux/version.h>
297 static inline int ll_namei_to_lookup_intent_flag(int flag)
299 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
300 flag = (flag & ~O_ACCMODE) | OPEN_FMODE(flag);
305 #include <linux/fs.h>
306 #ifndef HAVE_PROTECT_I_NLINK
307 static inline void set_nlink(struct inode *inode, unsigned int nlink)
309 inode->i_nlink = nlink;
313 #ifdef HAVE_INODEOPS_USE_UMODE_T
314 # define ll_umode_t umode_t
316 # define ll_umode_t int
319 #include <linux/dcache.h>
320 #ifndef HAVE_D_MAKE_ROOT
321 static inline struct dentry *d_make_root(struct inode *root)
323 struct dentry *res = d_alloc_root(root);
325 if (res == NULL && root)
332 #ifdef HAVE_DIRTY_INODE_HAS_FLAG
333 # define ll_dirty_inode(inode, flag) (inode)->i_sb->s_op->dirty_inode((inode), flag)
335 # define ll_dirty_inode(inode, flag) (inode)->i_sb->s_op->dirty_inode((inode))
338 #ifdef HAVE_FILE_F_INODE
339 # define set_file_inode(file, inode) (file)->f_inode = inode
341 # define set_file_inode(file, inode)
344 #ifndef HAVE_FILE_INODE
345 static inline struct inode *file_inode(const struct file *file)
347 return file->f_path.dentry->d_inode;
351 #ifdef HAVE_OLDSIZE_TRUNCATE_PAGECACHE
352 #define ll_truncate_pagecache(inode, size) truncate_pagecache(inode, 0, size)
354 #define ll_truncate_pagecache(inode, size) truncate_pagecache(inode, size)
357 #ifdef HAVE_VFS_RENAME_5ARGS
358 #define ll_vfs_rename(a, b, c, d) vfs_rename(a, b, c, d, NULL)
359 #elif defined HAVE_VFS_RENAME_6ARGS
360 #define ll_vfs_rename(a, b, c, d) vfs_rename(a, b, c, d, NULL, 0)
362 #define ll_vfs_rename(a, b, c, d) vfs_rename(a, b, c, d)
365 #ifdef HAVE_VFS_UNLINK_3ARGS
366 #define ll_vfs_unlink(a, b) vfs_unlink(a, b, NULL)
368 #define ll_vfs_unlink(a, b) vfs_unlink(a, b)
371 #ifndef HAVE_INODE_LOCK
372 # define inode_lock(inode) mutex_lock(&(inode)->i_mutex)
373 # define inode_unlock(inode) mutex_unlock(&(inode)->i_mutex)
374 # define inode_trylock(inode) mutex_trylock(&(inode)->i_mutex)
377 #ifndef HAVE_RADIX_EXCEPTION_ENTRY
378 static inline int radix_tree_exceptional_entry(void *arg)
384 #ifndef HAVE_TRUNCATE_INODE_PAGES_FINAL
385 static inline void truncate_inode_pages_final(struct address_space *map)
387 truncate_inode_pages(map, 0);
388 /* Workaround for LU-118 */
390 spin_lock_irq(&map->tree_lock);
391 spin_unlock_irq(&map->tree_lock);
392 } /* Workaround end */
396 #ifndef HAVE_PTR_ERR_OR_ZERO
397 static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
407 #define SIZE_MAX (~(size_t)0)
410 #ifdef HAVE_SECURITY_IINITSEC_CALLBACK
411 # define ll_security_inode_init_security(inode, dir, name, value, len, \
412 initxattrs, dentry) \
413 security_inode_init_security(inode, dir, &((dentry)->d_name), \
415 #elif defined HAVE_SECURITY_IINITSEC_QSTR
416 # define ll_security_inode_init_security(inode, dir, name, value, len, \
417 initxattrs, dentry) \
418 security_inode_init_security(inode, dir, &((dentry)->d_name), \
420 #else /* !HAVE_SECURITY_IINITSEC_CALLBACK && !HAVE_SECURITY_IINITSEC_QSTR */
421 # define ll_security_inode_init_security(inode, dir, name, value, len, \
422 initxattrs, dentry) \
423 security_inode_init_security(inode, dir, name, value, len)
426 #ifndef bio_for_each_segment_all /* since kernel version 3.9 */
427 #ifdef HAVE_BVEC_ITER
428 #define bio_for_each_segment_all(bv, bio, it) \
429 for (it = 0, bv = (bio)->bi_io_vec; it < (bio)->bi_vcnt; it++, bv++)
431 #define bio_for_each_segment_all(bv, bio, it) bio_for_each_segment(bv, bio, it)
435 #ifdef HAVE_PID_NS_FOR_CHILDREN
436 # define ll_task_pid_ns(task) ((task)->nsproxy->pid_ns_for_children)
438 # define ll_task_pid_ns(task) ((task)->nsproxy->pid_ns)
441 #ifdef HAVE_FULL_NAME_HASH_3ARGS
442 # define ll_full_name_hash(salt, name, len) full_name_hash(salt, name, len)
444 # define ll_full_name_hash(salt, name, len) full_name_hash(name, len)
447 #ifdef HAVE_STRUCT_POSIX_ACL_XATTR
448 # define posix_acl_xattr_header struct posix_acl_xattr_header
449 # define posix_acl_xattr_entry struct posix_acl_xattr_entry
450 # define GET_POSIX_ACL_XATTR_ENTRY(head) ((void *)((head) + 1))
452 # define GET_POSIX_ACL_XATTR_ENTRY(head) ((head)->a_entries)
455 #ifdef HAVE_IOP_XATTR
456 #ifdef HAVE_XATTR_HANDLER_FLAGS
457 #define ll_setxattr generic_setxattr
458 #define ll_getxattr generic_getxattr
459 #define ll_removexattr generic_removexattr
461 int ll_setxattr(struct dentry *dentry, const char *name,
462 const void *value, size_t size, int flags);
463 ssize_t ll_getxattr(struct dentry *dentry, const char *name,
464 void *buf, size_t buf_size);
465 int ll_removexattr(struct dentry *dentry, const char *name);
466 #endif /* ! HAVE_XATTR_HANDLER_FLAGS */
467 #endif /* HAVE_IOP_XATTR */
469 #ifndef HAVE_VFS_SETXATTR
470 const struct xattr_handler *get_xattr_type(const char *name);
473 __vfs_setxattr(struct dentry *dentry, struct inode *inode, const char *name,
474 const void *value, size_t size, int flags)
476 # ifdef HAVE_XATTR_HANDLER_FLAGS
477 const struct xattr_handler *handler;
480 handler = get_xattr_type(name);
484 # if defined(HAVE_XATTR_HANDLER_INODE_PARAM)
485 rc = handler->set(handler, dentry, inode, name, value, size, flags);
486 # elif defined(HAVE_XATTR_HANDLER_SIMPLIFIED)
487 rc = handler->set(handler, dentry, name, value, size, flags);
489 rc = handler->set(dentry, name, value, size, flags, handler->flags);
490 # endif /* !HAVE_XATTR_HANDLER_INODE_PARAM */
492 # else /* !HAVE_XATTR_HANDLER_FLAGS */
493 return ll_setxattr(dentry, name, value, size, flags);
494 # endif /* HAVE_XATTR_HANDLER_FLAGS */
496 #endif /* HAVE_VFS_SETXATTR */
498 #ifdef HAVE_IOP_SET_ACL
499 #ifdef CONFIG_FS_POSIX_ACL
500 #ifndef HAVE_POSIX_ACL_UPDATE_MODE
501 static inline int posix_acl_update_mode(struct inode *inode, umode_t *mode_p,
502 struct posix_acl **acl)
504 umode_t mode = inode->i_mode;
507 error = posix_acl_equiv_mode(*acl, &mode);
512 if (!in_group_p(inode->i_gid) &&
513 !capable_wrt_inode_uidgid(inode, CAP_FSETID))
518 #endif /* HAVE_POSIX_ACL_UPDATE_MODE */
522 #ifndef HAVE_IOV_ITER_TRUNCATE
523 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
525 if (i->count > count)
531 static inline bool is_sxid(umode_t mode)
533 return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
538 #define IS_NOSEC(inode) (!is_sxid(inode->i_mode))
542 static inline void inode_has_no_xattr(struct inode *inode)
548 #ifndef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
549 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
554 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
556 return (struct iovec) {
557 .iov_base = iter->iov->iov_base + iter->iov_offset,
558 .iov_len = min(iter->count,
559 iter->iov->iov_len - iter->iov_offset),
563 #define iov_for_each(iov, iter, start) \
564 for (iter = (start); \
565 (iter).count && ((iov = iov_iter_iovec(&(iter))), 1); \
566 iov_iter_advance(&(iter), (iov).iov_len))
568 static inline ssize_t
569 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
575 iov_for_each(iov, i, *iter) {
578 res = generic_file_aio_read(iocb, &iov, 1, iocb->ki_pos);
586 if (res < iov.iov_len)
591 iov_iter_advance(iter, bytes);
595 static inline ssize_t
596 __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
602 /* Since LLITE updates file size at the end of I/O in
603 * vvp_io_commit_write(), append write has to be done in atomic when
604 * there are multiple segments because otherwise each iteration to
605 * __generic_file_aio_write() will see original file size */
606 if (unlikely(iocb->ki_filp->f_flags & O_APPEND && iter->nr_segs > 1)) {
607 struct iovec *iov_copy;
610 OBD_ALLOC(iov_copy, sizeof(*iov_copy) * iter->nr_segs);
614 iov_for_each(iov, i, *iter)
615 iov_copy[count++] = iov;
617 bytes = __generic_file_aio_write(iocb, iov_copy, count,
619 OBD_FREE(iov_copy, sizeof(*iov_copy) * iter->nr_segs);
622 iov_iter_advance(iter, bytes);
626 iov_for_each(iov, i, *iter) {
629 res = __generic_file_aio_write(iocb, &iov, 1, &iocb->ki_pos);
637 if (res < iov.iov_len)
642 iov_iter_advance(iter, bytes);
645 #endif /* HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
647 static inline void __user *get_vmf_address(struct vm_fault *vmf)
649 #ifdef HAVE_VM_FAULT_ADDRESS
650 return (void __user *)vmf->address;
652 return vmf->virtual_address;
656 #ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
657 # define ll_filemap_fault(vma, vmf) filemap_fault(vmf)
659 # define ll_filemap_fault(vma, vmf) filemap_fault(vma, vmf)
662 #ifndef HAVE_CURRENT_TIME
663 static inline struct timespec current_time(struct inode *inode)
671 * time_after32 - compare two 32-bit relative times
672 * @a: the time which may be after @b
673 * @b: the time which may be before @a
675 * time_after32(a, b) returns true if the time @a is after time @b.
676 * time_before32(b, a) returns true if the time @b is before time @a.
678 * Similar to time_after(), compare two 32-bit timestamps for relative
679 * times. This is useful for comparing 32-bit seconds values that can't
680 * be converted to 64-bit values (e.g. due to disk format or wire protocol
681 * issues) when it is known that the times are less than 68 years apart.
683 #define time_after32(a, b) ((s32)((u32)(b) - (u32)(a)) < 0)
684 #define time_before32(b, a) time_after32(a, b)
692 #ifndef alloc_workqueue
693 #define alloc_workqueue(name, flags, max_active) create_workqueue(name)
697 #define READ_ONCE ACCESS_ONCE
700 #endif /* _LUSTRE_COMPAT_H */