1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #ifndef _LINUX_COMPAT25_H
24 #define _LINUX_COMPAT25_H
28 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) && LINUX_VERSION_CODE < KERNEL_VERSION(2,5,69)
29 #error sorry, lustre requires at least 2.5.69
32 #include <libcfs/linux/portals_compat25.h>
34 #include <linux/lustre_patchless_compat.h>
36 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)
39 unsigned int ia_attr_flags;
42 #define ll_iattr iattr
43 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) */
45 #ifndef HAVE_SET_FS_PWD
46 static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
47 struct dentry *dentry)
49 struct dentry *old_pwd;
50 struct vfsmount *old_pwdmnt;
52 write_lock(&fs->lock);
54 old_pwdmnt = fs->pwdmnt;
55 fs->pwdmnt = mntget(mnt);
56 fs->pwd = dget(dentry);
57 write_unlock(&fs->lock);
65 #define ll_set_fs_pwd set_fs_pwd
66 #endif /* HAVE_SET_FS_PWD */
68 #define ATTR_BLOCKS 0x4000
70 #if HAVE_INODE_I_MUTEX
71 #define UNLOCK_INODE_MUTEX(inode) do {mutex_unlock(&(inode)->i_mutex); } while(0)
72 #define LOCK_INODE_MUTEX(inode) do {mutex_lock(&(inode)->i_mutex); } while(0)
73 #define TRYLOCK_INODE_MUTEX(inode) mutex_trylock(&(inode)->i_mutex)
75 #define UNLOCK_INODE_MUTEX(inode) do {up(&(inode)->i_sem); } while(0)
76 #define LOCK_INODE_MUTEX(inode) do {down(&(inode)->i_sem); } while(0)
77 #define TRYLOCK_INODE_MUTEX(inode) (!down_trylock(&(inode)->i_sem))
78 #endif /* HAVE_INODE_I_MUTEX */
80 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
81 #define d_child d_u.d_child
82 #define d_rcu d_u.d_rcu
85 #ifdef HAVE_DQUOTOFF_MUTEX
86 #define UNLOCK_DQONOFF_MUTEX(dqopt) do {mutex_unlock(&(dqopt)->dqonoff_mutex); } while(0)
87 #define LOCK_DQONOFF_MUTEX(dqopt) do {mutex_lock(&(dqopt)->dqonoff_mutex); } while(0)
89 #define UNLOCK_DQONOFF_MUTEX(dqopt) do {up(&(dqopt)->dqonoff_sem); } while(0)
90 #define LOCK_DQONOFF_MUTEX(dqopt) do {down(&(dqopt)->dqonoff_sem); } while(0)
91 #endif /* HAVE_DQUOTOFF_MUTEX */
94 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4)
95 #define NGROUPS_SMALL NGROUPS
96 #define NGROUPS_PER_BLOCK ((int)(EXEC_PAGESIZE / sizeof(gid_t)))
101 gid_t small_block[NGROUPS_SMALL];
105 #define current_ngroups current->ngroups
106 #define current_groups current->groups
108 struct group_info *groups_alloc(int gidsetsize);
109 void groups_free(struct group_info *ginfo);
112 #define current_ngroups current->group_info->ngroups
113 #define current_groups current->group_info->small_block
115 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) */
118 #define page_private(page) ((page)->private)
119 #define set_page_private(page, v) ((page)->private = (v))
126 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
128 #define lock_dentry(___dentry) spin_lock(&(___dentry)->d_lock)
129 #define unlock_dentry(___dentry) spin_unlock(&(___dentry)->d_lock)
131 #define lock_24kernel() do {} while (0)
132 #define unlock_24kernel() do {} while (0)
133 #define ll_kernel_locked() kernel_locked()
136 * OBD need working random driver, thus all our
137 * initialization routines must be called after device
138 * driver initialization
142 #define module_init(a) late_initcall(a)
145 /* XXX our code should be using the 2.6 calls, not the other way around */
146 #define TryLockPage(page) TestSetPageLocked(page)
147 #define Page_Uptodate(page) PageUptodate(page)
148 #define ll_redirty_page(page) set_page_dirty(page)
150 #define KDEVT_INIT(val) (val)
152 #define LTIME_S(time) (time.tv_sec)
153 #define ll_path_lookup path_lookup
154 #define ll_permission(inode,mask,nd) permission(inode,mask,nd)
156 #define ll_pgcache_lock(mapping) spin_lock(&mapping->page_lock)
157 #define ll_pgcache_unlock(mapping) spin_unlock(&mapping->page_lock)
158 #define ll_call_writepage(inode, page) \
159 (inode)->i_mapping->a_ops->writepage(page, NULL)
160 #define ll_invalidate_inode_pages(inode) \
161 invalidate_inode_pages((inode)->i_mapping)
162 #define ll_truncate_complete_page(page) \
163 truncate_complete_page(page->mapping, page)
165 #define ll_vfs_create(a,b,c,d) vfs_create(a,b,c,d)
166 #define ll_dev_t dev_t
168 #define to_kdev_t(dev) (dev)
169 #define kdev_t_to_nr(dev) (dev)
170 #define val_to_kdev(dev) (dev)
171 #define ILOOKUP(sb, ino, test, data) ilookup5(sb, ino, test, data);
173 #include <linux/writeback.h>
175 static inline int cleanup_group_info(void)
177 struct group_info *ginfo;
179 ginfo = groups_alloc(0);
183 set_current_groups(ginfo);
184 put_group_info(ginfo);
189 #define __set_page_ll_data(page, llap) \
191 page_cache_get(page); \
192 SetPagePrivate(page); \
193 set_page_private(page, (unsigned long)llap); \
195 #define __clear_page_ll_data(page) \
197 ClearPagePrivate(page); \
198 set_page_private(page, 0); \
199 page_cache_release(page); \
204 #include <linux/proc_fs.h>
206 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
207 #define __d_rehash(dentry, lock) d_rehash_cond(dentry, lock)
210 #ifdef HAVE_CAN_SLEEP_ARG
211 #define ll_flock_lock_file_wait(file, lock, can_sleep) \
212 flock_lock_file_wait(file, lock, can_sleep)
214 #define ll_flock_lock_file_wait(file, lock, can_sleep) \
215 flock_lock_file_wait(file, lock)
218 #define CheckWriteback(page, cmd) \
219 (!(!PageWriteback(page) && cmd == OBD_BRW_WRITE))
223 #ifdef HAVE_PAGE_LIST
224 static inline int mapping_has_pages(struct address_space *mapping)
228 ll_pgcache_lock(mapping);
229 if (list_empty(&mapping->dirty_pages) &&
230 list_empty(&mapping->clean_pages) &&
231 list_empty(&mapping->locked_pages)) {
234 ll_pgcache_unlock(mapping);
239 static inline int mapping_has_pages(struct address_space *mapping)
241 return mapping->nrpages > 0;
245 #ifdef HAVE_KIOBUF_KIO_BLOCKS
246 #define KIOBUF_GET_BLOCKS(k) ((k)->kio_blocks)
248 #define KIOBUF_GET_BLOCKS(k) ((k)->blocks)
251 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7))
252 #define ll_set_dflags(dentry, flags) do { dentry->d_vfs_flags |= flags; } while(0)
253 #define ll_vfs_symlink(dir, dentry, path, mode) vfs_symlink(dir, dentry, path)
255 #define ll_set_dflags(dentry, flags) do { \
256 spin_lock(&dentry->d_lock); \
257 dentry->d_flags |= flags; \
258 spin_unlock(&dentry->d_lock); \
260 #define ll_vfs_symlink(dir, dentry, path, mode) vfs_symlink(dir, dentry, path, mode)
264 #define container_of(ptr, type, member) ({ \
265 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
266 (type *)( (char *)__mptr - offsetof(type,member) );})
269 #ifdef HAVE_I_ALLOC_SEM
270 #define UP_WRITE_I_ALLOC_SEM(i) do { up_write(&(i)->i_alloc_sem); } while (0)
271 #define DOWN_WRITE_I_ALLOC_SEM(i) do { down_write(&(i)->i_alloc_sem); } while(0)
272 #define LASSERT_I_ALLOC_SEM_WRITE_LOCKED(i) LASSERT(down_read_trylock(&(i)->i_alloc_sem) == 0)
274 #define UP_READ_I_ALLOC_SEM(i) do { up_read(&(i)->i_alloc_sem); } while (0)
275 #define DOWN_READ_I_ALLOC_SEM(i) do { down_read(&(i)->i_alloc_sem); } while (0)
276 #define LASSERT_I_ALLOC_SEM_READ_LOCKED(i) LASSERT(down_write_trylock(&(i)->i_alloc_sem) == 0)
278 #define UP_READ_I_ALLOC_SEM(i) do { } while (0)
279 #define DOWN_READ_I_ALLOC_SEM(i) do { } while (0)
280 #define LASSERT_I_ALLOC_SEM_READ_LOCKED(i) do { } while (0)
282 #define UP_WRITE_I_ALLOC_SEM(i) do { } while (0)
283 #define DOWN_WRITE_I_ALLOC_SEM(i) do { } while (0)
284 #define LASSERT_I_ALLOC_SEM_WRITE_LOCKED(i) do { } while (0)
287 #ifndef HAVE_GRAB_CACHE_PAGE_NOWAIT_GFP
288 #define grab_cache_page_nowait_gfp(x, y, z) grab_cache_page_nowait((x), (y))
291 #ifndef HAVE_FILEMAP_FDATAWRITE
292 #define filemap_fdatawrite(mapping) filemap_fdatasync(mapping)
295 #ifdef HAVE_VFS_KERN_MOUNT
298 ll_kern_mount(const char *fstype, int flags, const char *name, void *data)
300 struct file_system_type *type = get_fs_type(fstype);
301 struct vfsmount *mnt;
303 return ERR_PTR(-ENODEV);
304 mnt = vfs_kern_mount(type, flags, name, data);
305 module_put(type->owner);
309 #define ll_kern_mount(fstype, flags, name, data) do_kern_mount((fstype), (flags), (name), (data))
312 #ifndef HAVE_GENERIC_FILE_READ
315 generic_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
317 struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
321 init_sync_kiocb(&kiocb, filp);
322 kiocb.ki_pos = *ppos;
325 ret = generic_file_aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
326 *ppos = kiocb.ki_pos;
331 #ifndef HAVE_GENERIC_FILE_WRITE
334 generic_file_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
336 struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
340 init_sync_kiocb(&kiocb, filp);
341 kiocb.ki_pos = *ppos;
344 ret = generic_file_aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
345 *ppos = kiocb.ki_pos;
351 #ifdef HAVE_STATFS_DENTRY_PARAM
352 #define ll_do_statfs(sb, sfs) (sb)->s_op->statfs((sb)->s_root, (sfs))
354 #define ll_do_statfs(sb, sfs) (sb)->s_op->statfs((sb), (sfs))
358 #ifndef HAVE_TASK_PPTR
359 #define p_pptr parent
362 #endif /* __KERNEL__ */
363 #endif /* _COMPAT25_H */