Whamcloud - gitweb
(1) Drop unnecessary permission check for name_{insert,remove}.
[fs/lustre-release.git] / lustre / include / linux / lustre_compat25.h
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #ifndef _LINUX_COMPAT25_H
24 #define _LINUX_COMPAT25_H
25
26 #ifdef __KERNEL__
27
28 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) && LINUX_VERSION_CODE < KERNEL_VERSION(2,5,69)
29 #error sorry, lustre requires at least 2.5.69
30 #endif
31
32 #include <libcfs/linux/portals_compat25.h>
33
34 #include <linux/lustre_patchless_compat.h>
35
36 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)
37 struct ll_iattr {
38         struct iattr    iattr;
39         unsigned int    ia_attr_flags;
40 };
41 #else
42 #define ll_iattr iattr
43 #endif
44
45 #ifndef HAVE_SET_FS_PWD
46 static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
47                 struct dentry *dentry)
48 {
49         struct dentry *old_pwd;
50         struct vfsmount *old_pwdmnt;
51
52         write_lock(&fs->lock);
53         old_pwd = fs->pwd;
54         old_pwdmnt = fs->pwdmnt;
55         fs->pwdmnt = mntget(mnt);
56         fs->pwd = dget(dentry);
57         write_unlock(&fs->lock);
58
59         if (old_pwd) {
60                 dput(old_pwd);
61                 mntput(old_pwdmnt);
62         }
63 }
64 #else
65 #define ll_set_fs_pwd set_fs_pwd
66 #endif
67
68 #define ATTR_BLOCKS     0x4000
69
70 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
71 #define UNLOCK_INODE_MUTEX(inode) do {mutex_unlock(&(inode)->i_mutex); } while(0)
72 #define LOCK_INODE_MUTEX(inode) do {mutex_lock(&(inode)->i_mutex); } while(0)
73 #define TRYLOCK_INODE_MUTEX(inode) mutex_trylock(&(inode)->i_mutex)
74 #define d_child d_u.d_child
75 #define d_rcu d_u.d_rcu
76 #else
77 #define UNLOCK_INODE_MUTEX(inode) do {up(&(inode)->i_sem); } while(0)
78 #define LOCK_INODE_MUTEX(inode) do {down(&(inode)->i_sem); } while(0)
79 #define TRYLOCK_INODE_MUTEX(inode) (!down_trylock(&(inode)->i_sem))
80 #endif
81
82 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
83 #define UNLOCK_DQONOFF_MUTEX(dqopt) do {mutex_unlock(&(dqopt)->dqonoff_mutex); } while(0)
84 #define LOCK_DQONOFF_MUTEX(dqopt) do {mutex_lock(&(dqopt)->dqonoff_mutex); } while(0)
85 #else
86 #define UNLOCK_DQONOFF_MUTEX(dqopt) do {up(&(dqopt)->dqonoff_sem); } while(0)
87 #define LOCK_DQONOFF_MUTEX(dqopt) do {down(&(dqopt)->dqonoff_sem); } while(0)
88 #endif
89
90
91 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4)
92 #define NGROUPS_SMALL           NGROUPS
93 #define NGROUPS_PER_BLOCK       ((int)(EXEC_PAGESIZE / sizeof(gid_t)))
94
95 struct group_info {
96         int        ngroups;
97         atomic_t   usage;
98         gid_t      small_block[NGROUPS_SMALL];
99         int        nblocks;
100         gid_t     *blocks[0];
101 };
102 #define current_ngroups current->ngroups
103 #define current_groups current->groups
104
105 struct group_info *groups_alloc(int gidsetsize);
106 void groups_free(struct group_info *ginfo);
107 #else /* >= 2.6.4 */
108
109 #define current_ngroups current->group_info->ngroups
110 #define current_groups current->group_info->small_block
111
112 #endif
113
114 #ifndef page_private
115 #define page_private(page) ((page)->private)
116 #define set_page_private(page, v) ((page)->private = (v))
117 #endif
118
119 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
120 #define gfp_t int
121 #endif
122
123 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
124
125 #define lock_dentry(___dentry)          spin_lock(&(___dentry)->d_lock)
126 #define unlock_dentry(___dentry)        spin_unlock(&(___dentry)->d_lock)
127
128 #define lock_24kernel()         do {} while (0)
129 #define unlock_24kernel()       do {} while (0)
130 #define ll_kernel_locked()      kernel_locked()
131
132 /*
133  * OBD need working random driver, thus all our
134  * initialization routines must be called after device
135  * driver initialization
136  */
137 #ifndef MODULE
138 #undef module_init
139 #define module_init(a)     late_initcall(a)
140 #endif
141
142 /* XXX our code should be using the 2.6 calls, not the other way around */
143 #define TryLockPage(page)               TestSetPageLocked(page)
144 #define Page_Uptodate(page)             PageUptodate(page)
145 #define ll_redirty_page(page)           set_page_dirty(page)
146
147 #define KDEVT_INIT(val)                 (val)
148
149 #define LTIME_S(time)                   (time.tv_sec)
150 #define ll_path_lookup                  path_lookup
151 #define ll_permission(inode,mask,nd)    permission(inode,mask,nd)
152
153 #define ll_pgcache_lock(mapping)          spin_lock(&mapping->page_lock)
154 #define ll_pgcache_unlock(mapping)        spin_unlock(&mapping->page_lock)
155 #define ll_call_writepage(inode, page)  \
156                                 (inode)->i_mapping->a_ops->writepage(page, NULL)
157 #define ll_invalidate_inode_pages(inode) \
158                                 invalidate_inode_pages((inode)->i_mapping)
159 #define ll_truncate_complete_page(page) \
160                                 truncate_complete_page(page->mapping, page)
161
162 #define ll_vfs_create(a,b,c,d)          vfs_create(a,b,c,d)
163 #define ll_dev_t                        dev_t
164 #define kdev_t                          dev_t
165 #define to_kdev_t(dev)                  (dev)
166 #define kdev_t_to_nr(dev)               (dev)
167 #define val_to_kdev(dev)                (dev)
168 #define ILOOKUP(sb, ino, test, data)    ilookup5(sb, ino, test, data);
169
170 #include <linux/writeback.h>
171
172 static inline int cleanup_group_info(void)
173 {
174         struct group_info *ginfo;
175
176         ginfo = groups_alloc(0);
177         if (!ginfo)
178                 return -ENOMEM;
179
180         set_current_groups(ginfo);
181         put_group_info(ginfo);
182
183         return 0;
184 }
185
186 #define __set_page_ll_data(page, llap) \
187         do {       \
188                 page_cache_get(page); \
189                 SetPagePrivate(page); \
190                 set_page_private(page, (unsigned long)llap); \
191         } while (0)
192 #define __clear_page_ll_data(page) \
193         do {       \
194                 ClearPagePrivate(page); \
195                 set_page_private(page, 0); \
196                 page_cache_release(page); \
197         } while(0)
198
199 #define kiobuf bio
200
201 #include <linux/proc_fs.h>
202
203 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
204 #define __d_rehash(dentry, lock) d_rehash_cond(dentry, lock)
205 #endif
206
207 #else /* 2.4.. */
208
209 #define lock_dentry(___dentry)
210 #define unlock_dentry(___dentry)
211
212 #define lock_24kernel()         lock_kernel()
213 #define unlock_24kernel()       unlock_kernel()
214 #define ll_kernel_locked()      (current->lock_depth >= 0)
215
216 /* 2.4 kernels have HZ=100 on i386/x86_64, this should be reasonably safe */
217 #define get_jiffies_64()        (__u64)jiffies
218
219 #ifdef HAVE_MM_INLINE
220 #include <linux/mm_inline.h>
221 #endif
222
223 #ifndef pgoff_t
224 #define pgoff_t unsigned long
225 #endif
226
227 #define ll_vfs_create(a,b,c,d)              vfs_create(a,b,c)
228 #define ll_permission(inode,mask,nd)        permission(inode,mask)
229 #define ILOOKUP(sb, ino, test, data)        ilookup4(sb, ino, test, data);
230 #define DCACHE_DISCONNECTED                 DCACHE_NFSD_DISCONNECTED
231 #define ll_dev_t                            int
232 #define old_encode_dev(dev)                 (dev)
233
234 /* 2.5 uses hlists for some things, like the d_hash.  we'll treat them
235  * as 2.5 and let macros drop back.. */
236 #ifndef HLIST_HEAD /* until we get a kernel newer than l28 */
237 #define hlist_entry                     list_entry
238 #define hlist_head                      list_head
239 #define hlist_node                      list_head
240 #define HLIST_HEAD                      LIST_HEAD
241 #define INIT_HLIST_HEAD                 INIT_LIST_HEAD
242 #define hlist_del_init                  list_del_init
243 #define hlist_add_head                  list_add
244 #endif
245 #ifndef INIT_HLIST_NODE
246 #define INIT_HLIST_NODE(p)              ((p)->next = NULL, (p)->prev = NULL)
247 #endif
248 #ifndef hlist_for_each
249 #define hlist_for_each                  list_for_each
250 #endif
251 #ifndef hlist_for_each_safe
252 #define hlist_for_each_safe             list_for_each_safe
253 #endif
254 #define KDEVT_INIT(val)                 (val)
255 #define ext3_xattr_set_handle           ext3_xattr_set
256 #define try_module_get                  __MOD_INC_USE_COUNT
257 #define module_put                      __MOD_DEC_USE_COUNT
258 #define LTIME_S(time)                   (time)
259 #if !defined(CONFIG_RH_2_4_20) && !defined(cpu_online)
260 #define cpu_online(cpu)                 test_bit(cpu, &(cpu_online_map))
261 #endif
262
263 static inline int ll_path_lookup(const char *path, unsigned flags,
264                                  struct nameidata *nd)
265 {
266         int error = 0;
267         if (path_init(path, flags, nd))
268                 error = path_walk(path, nd);
269         return error;
270 }
271 #define ll_permission(inode,mask,nd)    permission(inode,mask)
272 typedef long sector_t;
273
274 #define ll_pgcache_lock(mapping)        spin_lock(&pagecache_lock)
275 #define ll_pgcache_unlock(mapping)      spin_unlock(&pagecache_lock)
276 #define ll_call_writepage(inode, page)  \
277                                (inode)->i_mapping->a_ops->writepage(page)
278 #define ll_invalidate_inode_pages(inode) invalidate_inode_pages(inode)
279 #define ll_truncate_complete_page(page) truncate_complete_page(page)
280
281 static inline void clear_page_dirty(struct page *page)
282 {
283         if (PageDirty(page))
284                 ClearPageDirty(page);
285 }
286
287 static inline int clear_page_dirty_for_io(struct page *page)
288 {
289         struct address_space *mapping = page->mapping;
290
291         if (page->mapping && PageDirty(page)) {
292                 ClearPageDirty(page);
293                 ll_pgcache_lock(mapping);
294                 list_del(&page->list);
295                 list_add(&page->list, &mapping->locked_pages);
296                 ll_pgcache_unlock(mapping);
297                 return 1;
298         }
299         return 0;
300 }
301
302 static inline void ll_redirty_page(struct page *page)
303 {
304         SetPageDirty(page);
305         ClearPageLaunder(page);
306 }
307
308 static inline void __d_drop(struct dentry *dentry)
309 {
310         list_del_init(&dentry->d_hash);
311 }
312
313 static inline int cleanup_group_info(void)
314 {
315         /* Get rid of unneeded supplementary groups */
316         current->ngroups = 0;
317         memset(current->groups, 0, sizeof(current->groups));
318         return 0;
319 }
320
321 #ifndef HAVE_COND_RESCHED
322 static inline void cond_resched(void)
323 {
324         if (unlikely(need_resched())) {
325                 set_current_state(TASK_RUNNING);
326                 schedule();
327         }
328 }
329 #endif
330
331 /* to find proc_dir_entry from inode. 2.6 has native one -bzzz */
332 #ifndef HAVE_PDE
333 #define PDE(ii)         ((ii)->u.generic_ip)
334 #endif
335
336 #define __set_page_ll_data(page, llap) set_page_private(page, (unsigned long)llap)
337 #define __clear_page_ll_data(page) set_page_private(page, 0)
338 #define PageWriteback(page) 0
339 #define set_page_writeback(page) do {} while (0)
340 #define end_page_writeback(page) do {} while (0)
341
342 static inline int mapping_mapped(struct address_space *mapping)
343 {
344         if (mapping->i_mmap_shared)
345                 return 1;
346         if (mapping->i_mmap)
347                 return 1;
348         return 0;
349 }
350
351 #ifdef ZAP_PAGE_RANGE_VMA
352 #define ll_zap_page_range(vma, addr, len)  zap_page_range(vma, addr, len)
353 #else
354 #define ll_zap_page_range(vma, addr, len)  zap_page_range(vma->vm_mm, addr, len)
355 #endif
356
357 #ifndef HAVE_PAGE_MAPPED
358 /* Poor man's page_mapped. substract from page count, counts from
359    buffers/pagecache and our own count (we are supposed to hold one reference).
360    What is left are user mappings and also others who work with this page now,
361    but there are supposedly none. */
362 static inline int page_mapped(struct page *page)
363 {
364         return page_count(page) - !!page->mapping - !!page->buffers - 1;
365 }
366 #endif /* !HAVE_PAGE_MAPPED */
367
368 static inline void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
369 {
370         update_atime(dentry->d_inode);
371 }
372
373 static inline void file_accessed(struct file *file)
374 {
375 #ifdef O_NOATIME
376         if (file->f_flags & O_NOATIME)
377                 return;
378 #endif
379         touch_atime(file->f_vfsmnt, file->f_dentry);
380 }
381
382 #endif /* end of 2.4 compat macros */
383
384 #ifdef HAVE_PAGE_LIST
385 static inline int mapping_has_pages(struct address_space *mapping)
386 {
387         int rc = 1;
388
389         ll_pgcache_lock(mapping);
390         if (list_empty(&mapping->dirty_pages) &&
391             list_empty(&mapping->clean_pages) &&
392             list_empty(&mapping->locked_pages)) {
393                 rc = 0;
394         }
395         ll_pgcache_unlock(mapping);
396
397         return rc;
398 }
399 #else
400 static inline int mapping_has_pages(struct address_space *mapping)
401 {
402         return mapping->nrpages > 0;
403 }
404 #endif
405
406 #ifdef HAVE_KIOBUF_KIO_BLOCKS
407 #define KIOBUF_GET_BLOCKS(k) ((k)->kio_blocks)
408 #else
409 #define KIOBUF_GET_BLOCKS(k) ((k)->blocks)
410 #endif
411
412 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7))
413 #define ll_set_dflags(dentry, flags) do { dentry->d_vfs_flags |= flags; } while(0)
414 #define ll_vfs_symlink(dir, dentry, path, mode) vfs_symlink(dir, dentry, path)
415 #else
416 #define ll_set_dflags(dentry, flags) do { \
417                 spin_lock(&dentry->d_lock); \
418                 dentry->d_flags |= flags; \
419                 spin_unlock(&dentry->d_lock); \
420         } while(0)
421 #define ll_vfs_symlink(dir, dentry, path, mode) vfs_symlink(dir, dentry, path, mode)
422 #endif
423
424 #ifndef container_of
425 #define container_of(ptr, type, member) ({                      \
426                 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
427                 (type *)( (char *)__mptr - offsetof(type,member) );})
428 #endif
429
430 #ifdef HAVE_I_ALLOC_SEM
431 #define UP_WRITE_I_ALLOC_SEM(i)   do { up_write(&(i)->i_alloc_sem); } while (0)
432 #define DOWN_WRITE_I_ALLOC_SEM(i) do { down_write(&(i)->i_alloc_sem); } while(0)
433 #define LASSERT_I_ALLOC_SEM_WRITE_LOCKED(i) LASSERT(down_read_trylock(&(i)->i_alloc_sem) == 0)
434
435 #define UP_READ_I_ALLOC_SEM(i)    do { up_read(&(i)->i_alloc_sem); } while (0)
436 #define DOWN_READ_I_ALLOC_SEM(i)  do { down_read(&(i)->i_alloc_sem); } while (0)
437 #define LASSERT_I_ALLOC_SEM_READ_LOCKED(i) LASSERT(down_write_trylock(&(i)->i_alloc_sem) == 0)
438 #else
439 #define UP_READ_I_ALLOC_SEM(i)              do { } while (0)
440 #define DOWN_READ_I_ALLOC_SEM(i)            do { } while (0)
441 #define LASSERT_I_ALLOC_SEM_READ_LOCKED(i)  do { } while (0)
442
443 #define UP_WRITE_I_ALLOC_SEM(i)             do { } while (0)
444 #define DOWN_WRITE_I_ALLOC_SEM(i)           do { } while (0)
445 #define LASSERT_I_ALLOC_SEM_WRITE_LOCKED(i) do { } while (0)
446 #endif
447
448 #ifndef HAVE_GRAB_CACHE_PAGE_NOWAIT_GFP
449 #define grab_cache_page_nowait_gfp(x, y, z) grab_cache_page_nowait((x), (y))
450 #endif
451
452 #ifndef HAVE_FILEMAP_FDATAWRITE
453 #define filemap_fdatawrite(mapping)      filemap_fdatasync(mapping)
454 #endif
455
456 #endif /* __KERNEL__ */
457 #endif /* _COMPAT25_H */