Whamcloud - gitweb
land b1_5 onto HEAD
[fs/lustre-release.git] / lustre / include / linux / lustre_compat25.h
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #ifndef _LINUX_COMPAT25_H
24 #define _LINUX_COMPAT25_H
25
26 #ifdef __KERNEL__
27
28 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) && LINUX_VERSION_CODE < KERNEL_VERSION(2,5,69)
29 #error sorry, lustre requires at least 2.5.69
30 #endif
31
32 #include <libcfs/linux/portals_compat25.h>
33
34 #include <linux/lustre_patchless_compat.h>
35
36 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)
37 struct ll_iattr_struct {
38         struct iattr    iattr;
39         unsigned int    ia_attr_flags;
40 };
41 #else
42 #define ll_iattr_struct iattr
43 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) */
44
45 #ifndef HAVE_SET_FS_PWD
46 static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
47                 struct dentry *dentry)
48 {
49         struct dentry *old_pwd;
50         struct vfsmount *old_pwdmnt;
51
52         write_lock(&fs->lock);
53         old_pwd = fs->pwd;
54         old_pwdmnt = fs->pwdmnt;
55         fs->pwdmnt = mntget(mnt);
56         fs->pwd = dget(dentry);
57         write_unlock(&fs->lock);
58
59         if (old_pwd) {
60                 dput(old_pwd);
61                 mntput(old_pwdmnt);
62         }
63 }
64 #else
65 #define ll_set_fs_pwd set_fs_pwd
66 #endif /* HAVE_SET_FS_PWD */
67
68 #ifdef HAVE_INODE_I_MUTEX
69 #define UNLOCK_INODE_MUTEX(inode) do {mutex_unlock(&(inode)->i_mutex); } while(0)
70 #define LOCK_INODE_MUTEX(inode) do {mutex_lock(&(inode)->i_mutex); } while(0)
71 #define TRYLOCK_INODE_MUTEX(inode) mutex_trylock(&(inode)->i_mutex)
72 #else
73 #define UNLOCK_INODE_MUTEX(inode) do {up(&(inode)->i_sem); } while(0)
74 #define LOCK_INODE_MUTEX(inode) do {down(&(inode)->i_sem); } while(0)
75 #define TRYLOCK_INODE_MUTEX(inode) (!down_trylock(&(inode)->i_sem))
76 #endif /* HAVE_INODE_I_MUTEX */
77
78 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
79 #define d_child d_u.d_child
80 #define d_rcu d_u.d_rcu
81 #endif
82
83 #ifdef HAVE_DQUOTOFF_MUTEX
84 #define UNLOCK_DQONOFF_MUTEX(dqopt) do {mutex_unlock(&(dqopt)->dqonoff_mutex); } while(0)
85 #define LOCK_DQONOFF_MUTEX(dqopt) do {mutex_lock(&(dqopt)->dqonoff_mutex); } while(0)
86 #else
87 #define UNLOCK_DQONOFF_MUTEX(dqopt) do {up(&(dqopt)->dqonoff_sem); } while(0)
88 #define LOCK_DQONOFF_MUTEX(dqopt) do {down(&(dqopt)->dqonoff_sem); } while(0)
89 #endif /* HAVE_DQUOTOFF_MUTEX */
90
91
92 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4)
93 #define NGROUPS_SMALL           NGROUPS
94 #define NGROUPS_PER_BLOCK       ((int)(EXEC_PAGESIZE / sizeof(gid_t)))
95
96 struct group_info {
97         int        ngroups;
98         atomic_t   usage;
99         gid_t      small_block[NGROUPS_SMALL];
100         int        nblocks;
101         gid_t     *blocks[0];
102 };
103 #define current_ngroups current->ngroups
104 #define current_groups current->groups
105
106 struct group_info *groups_alloc(int gidsetsize);
107 void groups_free(struct group_info *ginfo);
108 #else /* >= 2.6.4 */
109
110 #define current_ngroups current->group_info->ngroups
111 #define current_groups current->group_info->small_block
112
113 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) */
114
115 #ifndef page_private
116 #define page_private(page) ((page)->private)
117 #define set_page_private(page, v) ((page)->private = (v))
118 #endif
119
120 #ifndef HAVE_GFP_T
121 #define gfp_t int
122 #endif
123
124 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
125
126 #define lock_dentry(___dentry)          spin_lock(&(___dentry)->d_lock)
127 #define unlock_dentry(___dentry)        spin_unlock(&(___dentry)->d_lock)
128
129 #define lock_24kernel()         do {} while (0)
130 #define unlock_24kernel()       do {} while (0)
131 #define ll_kernel_locked()      kernel_locked()
132
133 /*
134  * OBD need working random driver, thus all our
135  * initialization routines must be called after device
136  * driver initialization
137  */
138 #ifndef MODULE
139 #undef module_init
140 #define module_init(a)     late_initcall(a)
141 #endif
142
143 /* XXX our code should be using the 2.6 calls, not the other way around */
144 #define TryLockPage(page)               TestSetPageLocked(page)
145 #define Page_Uptodate(page)             PageUptodate(page)
146 #define ll_redirty_page(page)           set_page_dirty(page)
147
148 #define KDEVT_INIT(val)                 (val)
149
150 #define LTIME_S(time)                   (time.tv_sec)
151 #define ll_path_lookup                  path_lookup
152 #define ll_permission(inode,mask,nd)    permission(inode,mask,nd)
153
154 #define ll_pgcache_lock(mapping)          spin_lock(&mapping->page_lock)
155 #define ll_pgcache_unlock(mapping)        spin_unlock(&mapping->page_lock)
156 #define ll_call_writepage(inode, page)  \
157                                 (inode)->i_mapping->a_ops->writepage(page, NULL)
158 #define ll_invalidate_inode_pages(inode) \
159                                 invalidate_inode_pages((inode)->i_mapping)
160 #define ll_truncate_complete_page(page) \
161                                 truncate_complete_page(page->mapping, page)
162
163 #define ll_vfs_create(a,b,c,d)          vfs_create(a,b,c,d)
164 #define ll_dev_t                        dev_t
165 #define kdev_t                          dev_t
166 #define to_kdev_t(dev)                  (dev)
167 #define kdev_t_to_nr(dev)               (dev)
168 #define val_to_kdev(dev)                (dev)
169 #define ILOOKUP(sb, ino, test, data)    ilookup5(sb, ino, test, data);
170
171 #include <linux/writeback.h>
172
173 static inline int cleanup_group_info(void)
174 {
175         struct group_info *ginfo;
176
177         ginfo = groups_alloc(0);
178         if (!ginfo)
179                 return -ENOMEM;
180
181         set_current_groups(ginfo);
182         put_group_info(ginfo);
183
184         return 0;
185 }
186
187 #define __set_page_ll_data(page, llap) \
188         do {       \
189                 page_cache_get(page); \
190                 SetPagePrivate(page); \
191                 set_page_private(page, (unsigned long)llap); \
192         } while (0)
193 #define __clear_page_ll_data(page) \
194         do {       \
195                 ClearPagePrivate(page); \
196                 set_page_private(page, 0); \
197                 page_cache_release(page); \
198         } while(0)
199
200 #define kiobuf bio
201
202 #include <linux/proc_fs.h>
203
204 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
205 #define __d_rehash(dentry, lock) d_rehash_cond(dentry, lock)
206 #endif
207
208 #ifdef HAVE_CAN_SLEEP_ARG
209 #define ll_flock_lock_file_wait(file, lock, can_sleep) \
210         flock_lock_file_wait(file, lock, can_sleep)
211 #else
212 #define ll_flock_lock_file_wait(file, lock, can_sleep) \
213         flock_lock_file_wait(file, lock)
214 #endif
215
216
217 #else /* 2.4.. */
218
219 #define ll_flock_lock_file_wait(file, lock, can_sleep) \
220         do {} while(0)
221
222 #define lock_dentry(___dentry)
223 #define unlock_dentry(___dentry)
224
225 #define lock_24kernel()         lock_kernel()
226 #define unlock_24kernel()       unlock_kernel()
227 #define ll_kernel_locked()      (current->lock_depth >= 0)
228
229 /* 2.4 kernels have HZ=100 on i386/x86_64, this should be reasonably safe */
230 #define get_jiffies_64()        (__u64)jiffies
231
232 #ifdef HAVE_MM_INLINE
233 #include <linux/mm_inline.h>
234 #endif
235
236 #ifndef pgoff_t
237 #define pgoff_t unsigned long
238 #endif
239
240 #define ll_vfs_create(a,b,c,d)              vfs_create(a,b,c)
241 #define ll_permission(inode,mask,nd)        permission(inode,mask)
242 #define ILOOKUP(sb, ino, test, data)        ilookup4(sb, ino, test, data);
243 #define DCACHE_DISCONNECTED                 DCACHE_NFSD_DISCONNECTED
244 #define ll_dev_t                            int
245 #define old_encode_dev(dev)                 (dev)
246
247 /* 2.5 uses hlists for some things, like the d_hash.  we'll treat them
248  * as 2.5 and let macros drop back.. */
249 #ifndef HLIST_HEAD /* until we get a kernel newer than l28 */
250 #define hlist_entry                     list_entry
251 #define hlist_head                      list_head
252 #define hlist_node                      list_head
253 #define HLIST_HEAD                      LIST_HEAD
254 #define INIT_HLIST_HEAD                 INIT_LIST_HEAD
255 #define hlist_del_init                  list_del_init
256 #define hlist_add_head                  list_add
257 #endif
258
259 #ifndef INIT_HLIST_NODE
260 #define INIT_HLIST_NODE(p)              ((p)->next = NULL, (p)->prev = NULL)
261 #endif
262
263 #ifndef hlist_for_each
264 #define hlist_for_each                  list_for_each
265 #endif
266
267 #ifndef hlist_for_each_safe
268 #define hlist_for_each_safe             list_for_each_safe
269 #endif
270
271 #define KDEVT_INIT(val)                 (val)
272 #define ext3_xattr_set_handle           ext3_xattr_set
273 #define try_module_get                  __MOD_INC_USE_COUNT
274 #define module_put                      __MOD_DEC_USE_COUNT
275 #define LTIME_S(time)                   (time)
276
277 #if !defined(CONFIG_RH_2_4_20) && !defined(cpu_online)
278 #define cpu_online(cpu)                 test_bit(cpu, &(cpu_online_map))
279 #endif
280
281 static inline int ll_path_lookup(const char *path, unsigned flags,
282                                  struct nameidata *nd)
283 {
284         int error = 0;
285         if (path_init(path, flags, nd))
286                 error = path_walk(path, nd);
287         return error;
288 }
289 #define ll_permission(inode,mask,nd)    permission(inode,mask)
290 typedef long sector_t;
291
292 #define ll_pgcache_lock(mapping)        spin_lock(&pagecache_lock)
293 #define ll_pgcache_unlock(mapping)      spin_unlock(&pagecache_lock)
294 #define ll_call_writepage(inode, page)  \
295                                (inode)->i_mapping->a_ops->writepage(page)
296 #define ll_invalidate_inode_pages(inode) invalidate_inode_pages(inode)
297 #define ll_truncate_complete_page(page) truncate_complete_page(page)
298
299 static inline void clear_page_dirty(struct page *page)
300 {
301         if (PageDirty(page))
302                 ClearPageDirty(page);
303 }
304
305 static inline int clear_page_dirty_for_io(struct page *page)
306 {
307         struct address_space *mapping = page->mapping;
308
309         if (page->mapping && PageDirty(page)) {
310                 ClearPageDirty(page);
311                 ll_pgcache_lock(mapping);
312                 list_del(&page->list);
313                 list_add(&page->list, &mapping->locked_pages);
314                 ll_pgcache_unlock(mapping);
315                 return 1;
316         }
317         return 0;
318 }
319
320 static inline void ll_redirty_page(struct page *page)
321 {
322         SetPageDirty(page);
323         ClearPageLaunder(page);
324 }
325
326 static inline void __d_drop(struct dentry *dentry)
327 {
328         list_del_init(&dentry->d_hash);
329 }
330
331 static inline int cleanup_group_info(void)
332 {
333         /* Get rid of unneeded supplementary groups */
334         current->ngroups = 0;
335         memset(current->groups, 0, sizeof(current->groups));
336         return 0;
337 }
338
339 #ifndef HAVE_COND_RESCHED
340 static inline void cond_resched(void)
341 {
342         if (unlikely(need_resched())) {
343                 set_current_state(TASK_RUNNING);
344                 schedule();
345         }
346 }
347 #endif
348
349 /* to find proc_dir_entry from inode. 2.6 has native one -bzzz */
350 #ifndef HAVE_PDE
351 #define PDE(ii)         ((ii)->u.generic_ip)
352 #endif
353
354 #define __set_page_ll_data(page, llap) set_page_private(page, (unsigned long)llap)
355 #define __clear_page_ll_data(page) set_page_private(page, 0)
356 #define PageWriteback(page) 0
357 #define set_page_writeback(page) do {} while (0)
358 #define end_page_writeback(page) do {} while (0)
359
360 static inline int mapping_mapped(struct address_space *mapping)
361 {
362         if (mapping->i_mmap_shared)
363                 return 1;
364         if (mapping->i_mmap)
365                 return 1;
366         return 0;
367 }
368
369 #ifdef ZAP_PAGE_RANGE_VMA
370 #define ll_zap_page_range(vma, addr, len)  zap_page_range(vma, addr, len)
371 #else
372 #define ll_zap_page_range(vma, addr, len)  zap_page_range(vma->vm_mm, addr, len)
373 #endif
374
375 #ifndef HAVE_PAGE_MAPPED
376 /* Poor man's page_mapped. substract from page count, counts from
377    buffers/pagecache and our own count (we are supposed to hold one reference).
378    What is left are user mappings and also others who work with this page now,
379    but there are supposedly none. */
380 static inline int page_mapped(struct page *page)
381 {
382         return page_count(page) - !!page->mapping - !!page->buffers - 1;
383 }
384 #endif /* !HAVE_PAGE_MAPPED */
385
386 static inline void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
387 {
388         update_atime(dentry->d_inode);
389 }
390
391 static inline void file_accessed(struct file *file)
392 {
393 #ifdef O_NOATIME
394         if (file->f_flags & O_NOATIME)
395                 return;
396 #endif
397         touch_atime(file->f_vfsmnt, file->f_dentry);
398 }
399
400 #endif /* end of 2.4 compat macros */
401
402 #ifdef HAVE_PAGE_LIST
403 static inline int mapping_has_pages(struct address_space *mapping)
404 {
405         int rc = 1;
406
407         ll_pgcache_lock(mapping);
408         if (list_empty(&mapping->dirty_pages) &&
409             list_empty(&mapping->clean_pages) &&
410             list_empty(&mapping->locked_pages)) {
411                 rc = 0;
412         }
413         ll_pgcache_unlock(mapping);
414
415         return rc;
416 }
417 #else
418 static inline int mapping_has_pages(struct address_space *mapping)
419 {
420         return mapping->nrpages > 0;
421 }
422 #endif
423
424 #ifdef HAVE_KIOBUF_KIO_BLOCKS
425 #define KIOBUF_GET_BLOCKS(k) ((k)->kio_blocks)
426 #else
427 #define KIOBUF_GET_BLOCKS(k) ((k)->blocks)
428 #endif
429
430 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7))
431 #define ll_set_dflags(dentry, flags) do { dentry->d_vfs_flags |= flags; } while(0)
432 #define ll_vfs_symlink(dir, dentry, path, mode) vfs_symlink(dir, dentry, path)
433 #else
434 #define ll_set_dflags(dentry, flags) do { \
435                 spin_lock(&dentry->d_lock); \
436                 dentry->d_flags |= flags; \
437                 spin_unlock(&dentry->d_lock); \
438         } while(0)
439 #define ll_vfs_symlink(dir, dentry, path, mode) vfs_symlink(dir, dentry, path, mode)
440 #endif
441
442 #ifndef container_of
443 #define container_of(ptr, type, member) ({                      \
444                 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
445                 (type *)( (char *)__mptr - offsetof(type,member) );})
446 #endif
447
448 #ifdef HAVE_I_ALLOC_SEM
449 #define UP_WRITE_I_ALLOC_SEM(i)   do { up_write(&(i)->i_alloc_sem); } while (0)
450 #define DOWN_WRITE_I_ALLOC_SEM(i) do { down_write(&(i)->i_alloc_sem); } while(0)
451 #define LASSERT_I_ALLOC_SEM_WRITE_LOCKED(i) LASSERT(down_read_trylock(&(i)->i_alloc_sem) == 0)
452
453 #define UP_READ_I_ALLOC_SEM(i)    do { up_read(&(i)->i_alloc_sem); } while (0)
454 #define DOWN_READ_I_ALLOC_SEM(i)  do { down_read(&(i)->i_alloc_sem); } while (0)
455 #define LASSERT_I_ALLOC_SEM_READ_LOCKED(i) LASSERT(down_write_trylock(&(i)->i_alloc_sem) == 0)
456 #else
457 #define UP_READ_I_ALLOC_SEM(i)              do { } while (0)
458 #define DOWN_READ_I_ALLOC_SEM(i)            do { } while (0)
459 #define LASSERT_I_ALLOC_SEM_READ_LOCKED(i)  do { } while (0)
460
461 #define UP_WRITE_I_ALLOC_SEM(i)             do { } while (0)
462 #define DOWN_WRITE_I_ALLOC_SEM(i)           do { } while (0)
463 #define LASSERT_I_ALLOC_SEM_WRITE_LOCKED(i) do { } while (0)
464 #endif
465
466 #ifndef HAVE_GRAB_CACHE_PAGE_NOWAIT_GFP
467 #define grab_cache_page_nowait_gfp(x, y, z) grab_cache_page_nowait((x), (y))
468 #endif
469
470 #ifndef HAVE_FILEMAP_FDATAWRITE
471 #define filemap_fdatawrite(mapping)      filemap_fdatasync(mapping)
472 #endif
473
474 #ifdef HAVE_VFS_KERN_MOUNT
475 static inline 
476 struct vfsmount *
477 ll_kern_mount(const char *fstype, int flags, const char *name, void *data)
478 {
479         struct file_system_type *type = get_fs_type(fstype);
480         struct vfsmount *mnt;
481         if (!type)
482                 return ERR_PTR(-ENODEV);
483         mnt = vfs_kern_mount(type, flags, name, data);
484         return mnt;
485 }
486 #else
487 #define ll_kern_mount(fstype, flags, name, data) do_kern_mount((fstype), (flags), (name), (data))
488 #endif
489
490 #ifndef HAVE_GENERIC_FILE_READ
491 static inline
492 ssize_t
493 generic_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
494 {
495         struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
496         struct kiocb kiocb;
497         ssize_t ret;
498
499         init_sync_kiocb(&kiocb, filp);
500         kiocb.ki_pos = *ppos;
501         kiocb.ki_left = len;
502
503         ret = generic_file_aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
504         *ppos = kiocb.ki_pos;
505         return ret;
506 }
507 #endif
508
509 #ifndef HAVE_GENERIC_FILE_WRITE
510 static inline
511 ssize_t
512 generic_file_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
513 {
514         struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
515         struct kiocb kiocb;
516         ssize_t ret;
517
518         init_sync_kiocb(&kiocb, filp);
519         kiocb.ki_pos = *ppos;
520         kiocb.ki_left = len;
521
522         ret = generic_file_aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
523         *ppos = kiocb.ki_pos;
524
525         return ret;
526 }
527 #endif
528
529 #ifdef HAVE_STATFS_DENTRY_PARAM
530 #define ll_do_statfs(sb, sfs) (sb)->s_op->statfs((sb)->s_root, (sfs))
531 #else
532 #define ll_do_statfs(sb, sfs) (sb)->s_op->statfs((sb), (sfs))
533 #endif
534
535 /* task_struct */
536 #ifndef HAVE_TASK_PPTR
537 #define p_pptr parent
538 #endif
539
540 #endif /* __KERNEL__ */
541 #endif /* _COMPAT25_H */