1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #ifndef _LINUX_COMPAT25_H
24 #define _LINUX_COMPAT25_H
28 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) && LINUX_VERSION_CODE < KERNEL_VERSION(2,5,69)
29 #error sorry, lustre requires at least 2.5.69
32 #include <libcfs/linux/portals_compat25.h>
34 #include <linux/lustre_patchless_compat.h>
36 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)
37 struct ll_iattr_struct {
39 unsigned int ia_attr_flags;
42 #define ll_iattr_struct iattr
43 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) */
45 #ifndef HAVE_SET_FS_PWD
46 static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
47 struct dentry *dentry)
49 struct dentry *old_pwd;
50 struct vfsmount *old_pwdmnt;
52 write_lock(&fs->lock);
54 old_pwdmnt = fs->pwdmnt;
55 fs->pwdmnt = mntget(mnt);
56 fs->pwd = dget(dentry);
57 write_unlock(&fs->lock);
65 #define ll_set_fs_pwd set_fs_pwd
66 #endif /* HAVE_SET_FS_PWD */
68 #ifdef HAVE_INODE_I_MUTEX
69 #define UNLOCK_INODE_MUTEX(inode) do {mutex_unlock(&(inode)->i_mutex); } while(0)
70 #define LOCK_INODE_MUTEX(inode) do {mutex_lock(&(inode)->i_mutex); } while(0)
71 #define TRYLOCK_INODE_MUTEX(inode) mutex_trylock(&(inode)->i_mutex)
73 #define UNLOCK_INODE_MUTEX(inode) do {up(&(inode)->i_sem); } while(0)
74 #define LOCK_INODE_MUTEX(inode) do {down(&(inode)->i_sem); } while(0)
75 #define TRYLOCK_INODE_MUTEX(inode) (!down_trylock(&(inode)->i_sem))
76 #endif /* HAVE_INODE_I_MUTEX */
78 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
79 #define d_child d_u.d_child
80 #define d_rcu d_u.d_rcu
83 #ifdef HAVE_DQUOTOFF_MUTEX
84 #define UNLOCK_DQONOFF_MUTEX(dqopt) do {mutex_unlock(&(dqopt)->dqonoff_mutex); } while(0)
85 #define LOCK_DQONOFF_MUTEX(dqopt) do {mutex_lock(&(dqopt)->dqonoff_mutex); } while(0)
87 #define UNLOCK_DQONOFF_MUTEX(dqopt) do {up(&(dqopt)->dqonoff_sem); } while(0)
88 #define LOCK_DQONOFF_MUTEX(dqopt) do {down(&(dqopt)->dqonoff_sem); } while(0)
89 #endif /* HAVE_DQUOTOFF_MUTEX */
92 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4)
93 #define NGROUPS_SMALL NGROUPS
94 #define NGROUPS_PER_BLOCK ((int)(EXEC_PAGESIZE / sizeof(gid_t)))
99 gid_t small_block[NGROUPS_SMALL];
103 #define current_ngroups current->ngroups
104 #define current_groups current->groups
106 struct group_info *groups_alloc(int gidsetsize);
107 void groups_free(struct group_info *ginfo);
110 #define current_ngroups current->group_info->ngroups
111 #define current_groups current->group_info->small_block
113 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) */
116 #define page_private(page) ((page)->private)
117 #define set_page_private(page, v) ((page)->private = (v))
124 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
126 #define lock_dentry(___dentry) spin_lock(&(___dentry)->d_lock)
127 #define unlock_dentry(___dentry) spin_unlock(&(___dentry)->d_lock)
129 #define lock_24kernel() do {} while (0)
130 #define unlock_24kernel() do {} while (0)
131 #define ll_kernel_locked() kernel_locked()
134 * OBD need working random driver, thus all our
135 * initialization routines must be called after device
136 * driver initialization
140 #define module_init(a) late_initcall(a)
143 /* XXX our code should be using the 2.6 calls, not the other way around */
144 #define TryLockPage(page) TestSetPageLocked(page)
145 #define Page_Uptodate(page) PageUptodate(page)
146 #define ll_redirty_page(page) set_page_dirty(page)
148 #define KDEVT_INIT(val) (val)
150 #define LTIME_S(time) (time.tv_sec)
151 #define ll_path_lookup path_lookup
152 #define ll_permission(inode,mask,nd) permission(inode,mask,nd)
154 #define ll_pgcache_lock(mapping) spin_lock(&mapping->page_lock)
155 #define ll_pgcache_unlock(mapping) spin_unlock(&mapping->page_lock)
156 #define ll_call_writepage(inode, page) \
157 (inode)->i_mapping->a_ops->writepage(page, NULL)
158 #define ll_invalidate_inode_pages(inode) \
159 invalidate_inode_pages((inode)->i_mapping)
160 #define ll_truncate_complete_page(page) \
161 truncate_complete_page(page->mapping, page)
163 #define ll_vfs_create(a,b,c,d) vfs_create(a,b,c,d)
164 #define ll_dev_t dev_t
166 #define to_kdev_t(dev) (dev)
167 #define kdev_t_to_nr(dev) (dev)
168 #define val_to_kdev(dev) (dev)
169 #define ILOOKUP(sb, ino, test, data) ilookup5(sb, ino, test, data);
171 #include <linux/writeback.h>
173 static inline int cleanup_group_info(void)
175 struct group_info *ginfo;
177 ginfo = groups_alloc(0);
181 set_current_groups(ginfo);
182 put_group_info(ginfo);
187 #define __set_page_ll_data(page, llap) \
189 page_cache_get(page); \
190 SetPagePrivate(page); \
191 set_page_private(page, (unsigned long)llap); \
193 #define __clear_page_ll_data(page) \
195 ClearPagePrivate(page); \
196 set_page_private(page, 0); \
197 page_cache_release(page); \
202 #include <linux/proc_fs.h>
204 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
205 #define __d_rehash(dentry, lock) d_rehash_cond(dentry, lock)
208 #ifdef HAVE_CAN_SLEEP_ARG
209 #define ll_flock_lock_file_wait(file, lock, can_sleep) \
210 flock_lock_file_wait(file, lock, can_sleep)
212 #define ll_flock_lock_file_wait(file, lock, can_sleep) \
213 flock_lock_file_wait(file, lock)
216 #define CheckWriteback(page, cmd) \
217 (!(!PageWriteback(page) && cmd == OBD_BRW_WRITE))
221 #define ll_flock_lock_file_wait(file, lock, can_sleep) \
224 #define lock_dentry(___dentry)
225 #define unlock_dentry(___dentry)
227 #define lock_24kernel() lock_kernel()
228 #define unlock_24kernel() unlock_kernel()
229 #define ll_kernel_locked() (current->lock_depth >= 0)
231 /* 2.4 kernels have HZ=100 on i386/x86_64, this should be reasonably safe */
232 #define get_jiffies_64() (__u64)jiffies
234 #ifdef HAVE_MM_INLINE
235 #include <linux/mm_inline.h>
239 #define pgoff_t unsigned long
242 #define ll_vfs_create(a,b,c,d) vfs_create(a,b,c)
243 #define ll_permission(inode,mask,nd) permission(inode,mask)
244 #define ILOOKUP(sb, ino, test, data) ilookup4(sb, ino, test, data);
245 #define DCACHE_DISCONNECTED DCACHE_NFSD_DISCONNECTED
247 #define old_encode_dev(dev) (dev)
249 /* 2.5 uses hlists for some things, like the d_hash. we'll treat them
250 * as 2.5 and let macros drop back.. */
251 #ifndef HLIST_HEAD /* until we get a kernel newer than l28 */
252 #define hlist_entry list_entry
253 #define hlist_head list_head
254 #define hlist_node list_head
255 #define HLIST_HEAD LIST_HEAD
256 #define INIT_HLIST_HEAD INIT_LIST_HEAD
257 #define hlist_del_init list_del_init
258 #define hlist_add_head list_add
261 #ifndef INIT_HLIST_NODE
262 #define INIT_HLIST_NODE(p) ((p)->next = NULL, (p)->prev = NULL)
265 #ifndef hlist_for_each
266 #define hlist_for_each list_for_each
269 #ifndef hlist_for_each_safe
270 #define hlist_for_each_safe list_for_each_safe
273 #define KDEVT_INIT(val) (val)
274 #define ext3_xattr_set_handle ext3_xattr_set
275 #define try_module_get __MOD_INC_USE_COUNT
276 #define module_put __MOD_DEC_USE_COUNT
277 #define LTIME_S(time) (time)
279 #if !defined(CONFIG_RH_2_4_20) && !defined(cpu_online)
280 #define cpu_online(cpu) test_bit(cpu, &(cpu_online_map))
283 static inline int ll_path_lookup(const char *path, unsigned flags,
284 struct nameidata *nd)
287 if (path_init(path, flags, nd))
288 error = path_walk(path, nd);
291 #define ll_permission(inode,mask,nd) permission(inode,mask)
292 typedef long sector_t;
294 #define ll_pgcache_lock(mapping) spin_lock(&pagecache_lock)
295 #define ll_pgcache_unlock(mapping) spin_unlock(&pagecache_lock)
296 #define ll_call_writepage(inode, page) \
297 (inode)->i_mapping->a_ops->writepage(page)
298 #define ll_invalidate_inode_pages(inode) invalidate_inode_pages(inode)
299 #define ll_truncate_complete_page(page) truncate_complete_page(page)
301 static inline void clear_page_dirty(struct page *page)
304 ClearPageDirty(page);
307 static inline int clear_page_dirty_for_io(struct page *page)
309 struct address_space *mapping = page->mapping;
311 if (page->mapping && PageDirty(page)) {
312 ClearPageDirty(page);
313 ll_pgcache_lock(mapping);
314 list_del(&page->list);
315 list_add(&page->list, &mapping->locked_pages);
316 ll_pgcache_unlock(mapping);
322 static inline void ll_redirty_page(struct page *page)
325 ClearPageLaunder(page);
328 static inline void __d_drop(struct dentry *dentry)
330 list_del_init(&dentry->d_hash);
333 static inline int cleanup_group_info(void)
335 /* Get rid of unneeded supplementary groups */
336 current->ngroups = 0;
337 memset(current->groups, 0, sizeof(current->groups));
341 #ifndef HAVE_COND_RESCHED
342 static inline void cond_resched(void)
344 if (unlikely(need_resched())) {
345 set_current_state(TASK_RUNNING);
351 /* to find proc_dir_entry from inode. 2.6 has native one -bzzz */
353 #define PDE(ii) ((ii)->u.generic_ip)
356 #define __set_page_ll_data(page, llap) set_page_private(page, (unsigned long)llap)
357 #define __clear_page_ll_data(page) set_page_private(page, 0)
358 #define PageWriteback(page) 0
359 #define CheckWriteback(page, cmd) 1
360 #define set_page_writeback(page) do {} while (0)
361 #define end_page_writeback(page) do {} while (0)
363 static inline int mapping_mapped(struct address_space *mapping)
365 if (mapping->i_mmap_shared)
372 #ifdef ZAP_PAGE_RANGE_VMA
373 #define ll_zap_page_range(vma, addr, len) zap_page_range(vma, addr, len)
375 #define ll_zap_page_range(vma, addr, len) zap_page_range(vma->vm_mm, addr, len)
378 #ifndef HAVE_PAGE_MAPPED
379 /* Poor man's page_mapped. substract from page count, counts from
380 buffers/pagecache and our own count (we are supposed to hold one reference).
381 What is left are user mappings and also others who work with this page now,
382 but there are supposedly none. */
383 static inline int page_mapped(struct page *page)
385 return page_count(page) - !!page->mapping - !!page->buffers - 1;
387 #endif /* !HAVE_PAGE_MAPPED */
389 static inline void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
391 update_atime(dentry->d_inode);
394 static inline void file_accessed(struct file *file)
397 if (file->f_flags & O_NOATIME)
400 touch_atime(file->f_vfsmnt, file->f_dentry);
405 * Check at compile time that something is of a particular type.
406 * Always evaluates to 1 so you may use it easily in comparisons.
408 #define typecheck(type,x) \
410 typeof(x) __dummy2; \
411 (void)(&__dummy == &__dummy2); \
416 #endif /* end of 2.4 compat macros */
418 #ifdef HAVE_PAGE_LIST
419 static inline int mapping_has_pages(struct address_space *mapping)
423 ll_pgcache_lock(mapping);
424 if (list_empty(&mapping->dirty_pages) &&
425 list_empty(&mapping->clean_pages) &&
426 list_empty(&mapping->locked_pages)) {
429 ll_pgcache_unlock(mapping);
434 static inline int mapping_has_pages(struct address_space *mapping)
436 return mapping->nrpages > 0;
440 #ifdef HAVE_KIOBUF_KIO_BLOCKS
441 #define KIOBUF_GET_BLOCKS(k) ((k)->kio_blocks)
443 #define KIOBUF_GET_BLOCKS(k) ((k)->blocks)
446 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7))
447 #define ll_set_dflags(dentry, flags) do { dentry->d_vfs_flags |= flags; } while(0)
448 #define ll_vfs_symlink(dir, dentry, path, mode) vfs_symlink(dir, dentry, path)
450 #define ll_set_dflags(dentry, flags) do { \
451 spin_lock(&dentry->d_lock); \
452 dentry->d_flags |= flags; \
453 spin_unlock(&dentry->d_lock); \
455 #define ll_vfs_symlink(dir, dentry, path, mode) vfs_symlink(dir, dentry, path, mode)
459 #define container_of(ptr, type, member) ({ \
460 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
461 (type *)( (char *)__mptr - offsetof(type,member) );})
464 #ifdef HAVE_I_ALLOC_SEM
465 #define UP_WRITE_I_ALLOC_SEM(i) do { up_write(&(i)->i_alloc_sem); } while (0)
466 #define DOWN_WRITE_I_ALLOC_SEM(i) do { down_write(&(i)->i_alloc_sem); } while(0)
467 #define LASSERT_I_ALLOC_SEM_WRITE_LOCKED(i) LASSERT(down_read_trylock(&(i)->i_alloc_sem) == 0)
469 #define UP_READ_I_ALLOC_SEM(i) do { up_read(&(i)->i_alloc_sem); } while (0)
470 #define DOWN_READ_I_ALLOC_SEM(i) do { down_read(&(i)->i_alloc_sem); } while (0)
471 #define LASSERT_I_ALLOC_SEM_READ_LOCKED(i) LASSERT(down_write_trylock(&(i)->i_alloc_sem) == 0)
473 #define UP_READ_I_ALLOC_SEM(i) do { } while (0)
474 #define DOWN_READ_I_ALLOC_SEM(i) do { } while (0)
475 #define LASSERT_I_ALLOC_SEM_READ_LOCKED(i) do { } while (0)
477 #define UP_WRITE_I_ALLOC_SEM(i) do { } while (0)
478 #define DOWN_WRITE_I_ALLOC_SEM(i) do { } while (0)
479 #define LASSERT_I_ALLOC_SEM_WRITE_LOCKED(i) do { } while (0)
482 #ifndef HAVE_GRAB_CACHE_PAGE_NOWAIT_GFP
483 #define grab_cache_page_nowait_gfp(x, y, z) grab_cache_page_nowait((x), (y))
486 #ifndef HAVE_FILEMAP_FDATAWRITE
487 #define filemap_fdatawrite(mapping) filemap_fdatasync(mapping)
490 #ifdef HAVE_VFS_KERN_MOUNT
493 ll_kern_mount(const char *fstype, int flags, const char *name, void *data)
495 struct file_system_type *type = get_fs_type(fstype);
496 struct vfsmount *mnt;
498 return ERR_PTR(-ENODEV);
499 mnt = vfs_kern_mount(type, flags, name, data);
503 #define ll_kern_mount(fstype, flags, name, data) do_kern_mount((fstype), (flags), (name), (data))
506 #ifndef HAVE_GENERIC_FILE_READ
509 generic_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
511 struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
515 init_sync_kiocb(&kiocb, filp);
516 kiocb.ki_pos = *ppos;
519 ret = generic_file_aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
520 *ppos = kiocb.ki_pos;
525 #ifndef HAVE_GENERIC_FILE_WRITE
528 generic_file_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
530 struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
534 init_sync_kiocb(&kiocb, filp);
535 kiocb.ki_pos = *ppos;
538 ret = generic_file_aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
539 *ppos = kiocb.ki_pos;
545 #ifdef HAVE_STATFS_DENTRY_PARAM
546 #define ll_do_statfs(sb, sfs) (sb)->s_op->statfs((sb)->s_root, (sfs))
548 #define ll_do_statfs(sb, sfs) (sb)->s_op->statfs((sb), (sfs))
552 #ifndef HAVE_TASK_PPTR
553 #define p_pptr parent
556 #endif /* __KERNEL__ */
557 #endif /* _COMPAT25_H */