Whamcloud - gitweb
05622a344f81c657389defe305f2f16bf5d43445
[fs/lustre-release.git] / lustre / include / linux / lustre_compat25.h
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #ifndef _LINUX_COMPAT25_H
24 #define _LINUX_COMPAT25_H
25
26 #ifdef __KERNEL__
27
28 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) && LINUX_VERSION_CODE < KERNEL_VERSION(2,5,69)
29 #error sorry, lustre requires at least 2.5.69
30 #endif
31
32 #include <libcfs/linux/portals_compat25.h>
33
34 #include <linux/lustre_patchless_compat.h>
35
36 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)
37 struct ll_iattr_struct {
38         struct iattr    iattr;
39         unsigned int    ia_attr_flags;
40 };
41 #else
42 #define ll_iattr_struct iattr
43 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) */
44
45 #ifndef HAVE_SET_FS_PWD
46 static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
47                 struct dentry *dentry)
48 {
49         struct dentry *old_pwd;
50         struct vfsmount *old_pwdmnt;
51
52         write_lock(&fs->lock);
53         old_pwd = fs->pwd;
54         old_pwdmnt = fs->pwdmnt;
55         fs->pwdmnt = mntget(mnt);
56         fs->pwd = dget(dentry);
57         write_unlock(&fs->lock);
58
59         if (old_pwd) {
60                 dput(old_pwd);
61                 mntput(old_pwdmnt);
62         }
63 }
64 #else
65 #define ll_set_fs_pwd set_fs_pwd
66 #endif /* HAVE_SET_FS_PWD */
67
68 #ifdef HAVE_INODE_I_MUTEX
69 #define UNLOCK_INODE_MUTEX(inode) do {mutex_unlock(&(inode)->i_mutex); } while(0)
70 #define LOCK_INODE_MUTEX(inode) do {mutex_lock(&(inode)->i_mutex); } while(0)
71 #define TRYLOCK_INODE_MUTEX(inode) mutex_trylock(&(inode)->i_mutex)
72 #else
73 #define UNLOCK_INODE_MUTEX(inode) do {up(&(inode)->i_sem); } while(0)
74 #define LOCK_INODE_MUTEX(inode) do {down(&(inode)->i_sem); } while(0)
75 #define TRYLOCK_INODE_MUTEX(inode) (!down_trylock(&(inode)->i_sem))
76 #endif /* HAVE_INODE_I_MUTEX */
77
78 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
79 #define d_child d_u.d_child
80 #define d_rcu d_u.d_rcu
81 #endif
82
83 #ifdef HAVE_DQUOTOFF_MUTEX
84 #define UNLOCK_DQONOFF_MUTEX(dqopt) do {mutex_unlock(&(dqopt)->dqonoff_mutex); } while(0)
85 #define LOCK_DQONOFF_MUTEX(dqopt) do {mutex_lock(&(dqopt)->dqonoff_mutex); } while(0)
86 #else
87 #define UNLOCK_DQONOFF_MUTEX(dqopt) do {up(&(dqopt)->dqonoff_sem); } while(0)
88 #define LOCK_DQONOFF_MUTEX(dqopt) do {down(&(dqopt)->dqonoff_sem); } while(0)
89 #endif /* HAVE_DQUOTOFF_MUTEX */
90
91
92 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4)
93 #define NGROUPS_SMALL           NGROUPS
94 #define NGROUPS_PER_BLOCK       ((int)(EXEC_PAGESIZE / sizeof(gid_t)))
95
96 struct group_info {
97         int        ngroups;
98         atomic_t   usage;
99         gid_t      small_block[NGROUPS_SMALL];
100         int        nblocks;
101         gid_t     *blocks[0];
102 };
103 #define current_ngroups current->ngroups
104 #define current_groups current->groups
105
106 struct group_info *groups_alloc(int gidsetsize);
107 void groups_free(struct group_info *ginfo);
108 #else /* >= 2.6.4 */
109
110 #define current_ngroups current->group_info->ngroups
111 #define current_groups current->group_info->small_block
112
113 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) */
114
115 #ifndef page_private
116 #define page_private(page) ((page)->private)
117 #define set_page_private(page, v) ((page)->private = (v))
118 #endif
119
120 #ifndef HAVE_GFP_T
121 #define gfp_t int
122 #endif
123
124 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
125
126 #define lock_dentry(___dentry)          spin_lock(&(___dentry)->d_lock)
127 #define unlock_dentry(___dentry)        spin_unlock(&(___dentry)->d_lock)
128
129 #define lock_24kernel()         do {} while (0)
130 #define unlock_24kernel()       do {} while (0)
131 #define ll_kernel_locked()      kernel_locked()
132
133 /*
134  * OBD need working random driver, thus all our
135  * initialization routines must be called after device
136  * driver initialization
137  */
138 #ifndef MODULE
139 #undef module_init
140 #define module_init(a)     late_initcall(a)
141 #endif
142
143 /* XXX our code should be using the 2.6 calls, not the other way around */
144 #define TryLockPage(page)               TestSetPageLocked(page)
145 #define Page_Uptodate(page)             PageUptodate(page)
146 #define ll_redirty_page(page)           set_page_dirty(page)
147
148 #define KDEVT_INIT(val)                 (val)
149
150 #define LTIME_S(time)                   (time.tv_sec)
151 #define ll_path_lookup                  path_lookup
152 #define ll_permission(inode,mask,nd)    permission(inode,mask,nd)
153
154 #define ll_pgcache_lock(mapping)          spin_lock(&mapping->page_lock)
155 #define ll_pgcache_unlock(mapping)        spin_unlock(&mapping->page_lock)
156 #define ll_call_writepage(inode, page)  \
157                                 (inode)->i_mapping->a_ops->writepage(page, NULL)
158 #define ll_invalidate_inode_pages(inode) \
159                                 invalidate_inode_pages((inode)->i_mapping)
160 #define ll_truncate_complete_page(page) \
161                                 truncate_complete_page(page->mapping, page)
162
163 #define ll_vfs_create(a,b,c,d)          vfs_create(a,b,c,d)
164 #define ll_dev_t                        dev_t
165 #define kdev_t                          dev_t
166 #define to_kdev_t(dev)                  (dev)
167 #define kdev_t_to_nr(dev)               (dev)
168 #define val_to_kdev(dev)                (dev)
169 #define ILOOKUP(sb, ino, test, data)    ilookup5(sb, ino, test, data);
170
171 #include <linux/writeback.h>
172
173 static inline int cleanup_group_info(void)
174 {
175         struct group_info *ginfo;
176
177         ginfo = groups_alloc(0);
178         if (!ginfo)
179                 return -ENOMEM;
180
181         set_current_groups(ginfo);
182         put_group_info(ginfo);
183
184         return 0;
185 }
186
187 #define __set_page_ll_data(page, llap) \
188         do {       \
189                 page_cache_get(page); \
190                 SetPagePrivate(page); \
191                 set_page_private(page, (unsigned long)llap); \
192         } while (0)
193 #define __clear_page_ll_data(page) \
194         do {       \
195                 ClearPagePrivate(page); \
196                 set_page_private(page, 0); \
197                 page_cache_release(page); \
198         } while(0)
199
200 #define kiobuf bio
201
202 #include <linux/proc_fs.h>
203
204 #ifndef HAVE___D_REHASH
205 #define __d_rehash(dentry, lock) d_rehash_cond(dentry, lock)
206 #endif
207
208 #ifdef HAVE_CAN_SLEEP_ARG
209 #define ll_flock_lock_file_wait(file, lock, can_sleep) \
210         flock_lock_file_wait(file, lock, can_sleep)
211 #else
212 #define ll_flock_lock_file_wait(file, lock, can_sleep) \
213         flock_lock_file_wait(file, lock)
214 #endif
215
216 #define CheckWriteback(page, cmd) \
217         (!(!PageWriteback(page) && cmd == OBD_BRW_WRITE))
218
219 #else /* 2.4.. */
220
221 #define ll_flock_lock_file_wait(file, lock, can_sleep) \
222         do {} while(0)
223
224 #define lock_dentry(___dentry)
225 #define unlock_dentry(___dentry)
226
227 #define lock_24kernel()         lock_kernel()
228 #define unlock_24kernel()       unlock_kernel()
229 #define ll_kernel_locked()      (current->lock_depth >= 0)
230
231 /* 2.4 kernels have HZ=100 on i386/x86_64, this should be reasonably safe */
232 #define get_jiffies_64()        (__u64)jiffies
233
234 #ifdef HAVE_MM_INLINE
235 #include <linux/mm_inline.h>
236 #endif
237
238 #ifndef pgoff_t
239 #define pgoff_t unsigned long
240 #endif
241
242 #define ll_vfs_create(a,b,c,d)              vfs_create(a,b,c)
243 #define ll_permission(inode,mask,nd)        permission(inode,mask)
244 #define ILOOKUP(sb, ino, test, data)        ilookup4(sb, ino, test, data);
245 #define DCACHE_DISCONNECTED                 DCACHE_NFSD_DISCONNECTED
246 #define ll_dev_t                            int
247 #define old_encode_dev(dev)                 (dev)
248
249 /* 2.5 uses hlists for some things, like the d_hash.  we'll treat them
250  * as 2.5 and let macros drop back.. */
251 #ifndef HLIST_HEAD /* until we get a kernel newer than l28 */
252 #define hlist_entry                     list_entry
253 #define hlist_head                      list_head
254 #define hlist_node                      list_head
255 #define HLIST_HEAD                      LIST_HEAD
256 #define INIT_HLIST_HEAD                 INIT_LIST_HEAD
257 #define hlist_del_init                  list_del_init
258 #define hlist_add_head                  list_add
259 #endif
260
261 #ifndef INIT_HLIST_NODE
262 #define INIT_HLIST_NODE(p)              ((p)->next = NULL, (p)->prev = NULL)
263 #endif
264
265 #ifndef hlist_for_each
266 #define hlist_for_each                  list_for_each
267 #endif
268
269 #ifndef hlist_for_each_safe
270 #define hlist_for_each_safe             list_for_each_safe
271 #endif
272
273 #define KDEVT_INIT(val)                 (val)
274 #define ext3_xattr_set_handle           ext3_xattr_set
275 #define try_module_get                  __MOD_INC_USE_COUNT
276 #define module_put                      __MOD_DEC_USE_COUNT
277 #define LTIME_S(time)                   (time)
278
279 #if !defined(CONFIG_RH_2_4_20) && !defined(cpu_online)
280 #define cpu_online(cpu)                 test_bit(cpu, &(cpu_online_map))
281 #endif
282
283 static inline int ll_path_lookup(const char *path, unsigned flags,
284                                  struct nameidata *nd)
285 {
286         int error = 0;
287         if (path_init(path, flags, nd))
288                 error = path_walk(path, nd);
289         return error;
290 }
291 #define ll_permission(inode,mask,nd)    permission(inode,mask)
292 typedef long sector_t;
293
294 #define ll_pgcache_lock(mapping)        spin_lock(&pagecache_lock)
295 #define ll_pgcache_unlock(mapping)      spin_unlock(&pagecache_lock)
296 #define ll_call_writepage(inode, page)  \
297                                (inode)->i_mapping->a_ops->writepage(page)
298 #define ll_invalidate_inode_pages(inode) invalidate_inode_pages(inode)
299 #define ll_truncate_complete_page(page) truncate_complete_page(page)
300
301 static inline void clear_page_dirty(struct page *page)
302 {
303         if (PageDirty(page))
304                 ClearPageDirty(page);
305 }
306
307 static inline int clear_page_dirty_for_io(struct page *page)
308 {
309         struct address_space *mapping = page->mapping;
310
311         if (page->mapping && PageDirty(page)) {
312                 ClearPageDirty(page);
313                 ll_pgcache_lock(mapping);
314                 list_del(&page->list);
315                 list_add(&page->list, &mapping->locked_pages);
316                 ll_pgcache_unlock(mapping);
317                 return 1;
318         }
319         return 0;
320 }
321
322 static inline void ll_redirty_page(struct page *page)
323 {
324         SetPageDirty(page);
325         ClearPageLaunder(page);
326 }
327
328 static inline void __d_drop(struct dentry *dentry)
329 {
330         list_del_init(&dentry->d_hash);
331 }
332
333 static inline int cleanup_group_info(void)
334 {
335         /* Get rid of unneeded supplementary groups */
336         current->ngroups = 0;
337         memset(current->groups, 0, sizeof(current->groups));
338         return 0;
339 }
340
341 #ifndef HAVE_COND_RESCHED
342 static inline void cond_resched(void)
343 {
344         if (unlikely(need_resched())) {
345                 set_current_state(TASK_RUNNING);
346                 schedule();
347         }
348 }
349 #endif
350
351 /* to find proc_dir_entry from inode. 2.6 has native one -bzzz */
352 #ifndef HAVE_PDE
353 #define PDE(ii)         ((ii)->u.generic_ip)
354 #endif
355
356 #define __set_page_ll_data(page, llap) set_page_private(page, (unsigned long)llap)
357 #define __clear_page_ll_data(page) set_page_private(page, 0)
358 #define PageWriteback(page) 0
359 #define CheckWriteback(page, cmd) 1
360 #define set_page_writeback(page) do {} while (0)
361 #define end_page_writeback(page) do {} while (0)
362 #define wait_on_page_writeback(page do {} while (0)
363
364 static inline int mapping_mapped(struct address_space *mapping)
365 {
366         if (mapping->i_mmap_shared)
367                 return 1;
368         if (mapping->i_mmap)
369                 return 1;
370         return 0;
371 }
372
373 #ifdef ZAP_PAGE_RANGE_VMA
374 #define ll_zap_page_range(vma, addr, len)  zap_page_range(vma, addr, len)
375 #else
376 #define ll_zap_page_range(vma, addr, len)  zap_page_range(vma->vm_mm, addr, len)
377 #endif
378
379 #ifndef HAVE_PAGE_MAPPED
380 /* Poor man's page_mapped. substract from page count, counts from
381    buffers/pagecache and our own count (we are supposed to hold one reference).
382    What is left are user mappings and also others who work with this page now,
383    but there are supposedly none. */
384 static inline int page_mapped(struct page *page)
385 {
386         return page_count(page) - !!page->mapping - !!page->buffers - 1;
387 }
388 #endif /* !HAVE_PAGE_MAPPED */
389
390 static inline void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
391 {
392         update_atime(dentry->d_inode);
393 }
394
395 static inline void file_accessed(struct file *file)
396 {
397 #ifdef O_NOATIME
398         if (file->f_flags & O_NOATIME)
399                 return;
400 #endif
401         touch_atime(file->f_vfsmnt, file->f_dentry);
402 }
403
404 #ifndef typecheck
405 /*
406  * Check at compile time that something is of a particular type.
407  * Always evaluates to 1 so you may use it easily in comparisons.
408  */
409 #define typecheck(type,x) \
410 ({      type __dummy; \
411         typeof(x) __dummy2; \
412         (void)(&__dummy == &__dummy2); \
413         1; \
414 })
415 #endif
416
417 #endif /* end of 2.4 compat macros */
418
419 #ifdef HAVE_PAGE_LIST
420 static inline int mapping_has_pages(struct address_space *mapping)
421 {
422         int rc = 1;
423
424         ll_pgcache_lock(mapping);
425         if (list_empty(&mapping->dirty_pages) &&
426             list_empty(&mapping->clean_pages) &&
427             list_empty(&mapping->locked_pages)) {
428                 rc = 0;
429         }
430         ll_pgcache_unlock(mapping);
431
432         return rc;
433 }
434 #else
435 static inline int mapping_has_pages(struct address_space *mapping)
436 {
437         return mapping->nrpages > 0;
438 }
439 #endif
440
441 #ifdef HAVE_KIOBUF_KIO_BLOCKS
442 #define KIOBUF_GET_BLOCKS(k) ((k)->kio_blocks)
443 #else
444 #define KIOBUF_GET_BLOCKS(k) ((k)->blocks)
445 #endif
446
447 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7))
448 #define ll_set_dflags(dentry, flags) do { dentry->d_vfs_flags |= flags; } while(0)
449 #define ll_vfs_symlink(dir, dentry, path, mode) vfs_symlink(dir, dentry, path)
450 #else
451 #define ll_set_dflags(dentry, flags) do { \
452                 spin_lock(&dentry->d_lock); \
453                 dentry->d_flags |= flags; \
454                 spin_unlock(&dentry->d_lock); \
455         } while(0)
456 #define ll_vfs_symlink(dir, dentry, path, mode) vfs_symlink(dir, dentry, path, mode)
457 #endif
458
459 #ifndef container_of
460 #define container_of(ptr, type, member) ({                      \
461                 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
462                 (type *)( (char *)__mptr - offsetof(type,member) );})
463 #endif
464
465 #ifdef HAVE_I_ALLOC_SEM
466 #define UP_WRITE_I_ALLOC_SEM(i)   do { up_write(&(i)->i_alloc_sem); } while (0)
467 #define DOWN_WRITE_I_ALLOC_SEM(i) do { down_write(&(i)->i_alloc_sem); } while(0)
468 #define LASSERT_I_ALLOC_SEM_WRITE_LOCKED(i) LASSERT(down_read_trylock(&(i)->i_alloc_sem) == 0)
469
470 #define UP_READ_I_ALLOC_SEM(i)    do { up_read(&(i)->i_alloc_sem); } while (0)
471 #define DOWN_READ_I_ALLOC_SEM(i)  do { down_read(&(i)->i_alloc_sem); } while (0)
472 #define LASSERT_I_ALLOC_SEM_READ_LOCKED(i) LASSERT(down_write_trylock(&(i)->i_alloc_sem) == 0)
473 #else
474 #define UP_READ_I_ALLOC_SEM(i)              do { } while (0)
475 #define DOWN_READ_I_ALLOC_SEM(i)            do { } while (0)
476 #define LASSERT_I_ALLOC_SEM_READ_LOCKED(i)  do { } while (0)
477
478 #define UP_WRITE_I_ALLOC_SEM(i)             do { } while (0)
479 #define DOWN_WRITE_I_ALLOC_SEM(i)           do { } while (0)
480 #define LASSERT_I_ALLOC_SEM_WRITE_LOCKED(i) do { } while (0)
481 #endif
482
483 #ifndef HAVE_GRAB_CACHE_PAGE_NOWAIT_GFP
484 #define grab_cache_page_nowait_gfp(x, y, z) grab_cache_page_nowait((x), (y))
485 #endif
486
487 #ifndef HAVE_FILEMAP_FDATAWRITE
488 #define filemap_fdatawrite(mapping)      filemap_fdatasync(mapping)
489 #endif
490
491 #ifdef HAVE_VFS_KERN_MOUNT
492 static inline 
493 struct vfsmount *
494 ll_kern_mount(const char *fstype, int flags, const char *name, void *data)
495 {
496         struct file_system_type *type = get_fs_type(fstype);
497         struct vfsmount *mnt;
498         if (!type)
499                 return ERR_PTR(-ENODEV);
500         mnt = vfs_kern_mount(type, flags, name, data);
501         module_put(type->owner);
502         return mnt;
503 }
504 #else
505 #define ll_kern_mount(fstype, flags, name, data) do_kern_mount((fstype), (flags), (name), (data))
506 #endif
507
508 #ifndef HAVE_GENERIC_FILE_READ
509 static inline
510 ssize_t
511 generic_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
512 {
513         struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
514         struct kiocb kiocb;
515         ssize_t ret;
516
517         init_sync_kiocb(&kiocb, filp);
518         kiocb.ki_pos = *ppos;
519         kiocb.ki_left = len;
520
521         ret = generic_file_aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
522         *ppos = kiocb.ki_pos;
523         return ret;
524 }
525 #endif
526
527 #ifndef HAVE_GENERIC_FILE_WRITE
528 static inline
529 ssize_t
530 generic_file_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
531 {
532         struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
533         struct kiocb kiocb;
534         ssize_t ret;
535
536         init_sync_kiocb(&kiocb, filp);
537         kiocb.ki_pos = *ppos;
538         kiocb.ki_left = len;
539
540         ret = generic_file_aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
541         *ppos = kiocb.ki_pos;
542
543         return ret;
544 }
545 #endif
546
547 #ifdef HAVE_STATFS_DENTRY_PARAM
548 #define ll_do_statfs(sb, sfs) (sb)->s_op->statfs((sb)->s_root, (sfs))
549 #else
550 #define ll_do_statfs(sb, sfs) (sb)->s_op->statfs((sb), (sfs))
551 #endif
552
553 /* task_struct */
554 #ifndef HAVE_TASK_PPTR
555 #define p_pptr parent
556 #endif
557
558 #endif /* __KERNEL__ */
559 #endif /* _COMPAT25_H */