Whamcloud - gitweb
LU-5 readdir read multiple pages per rpc
[fs/lustre-release.git] / lustre / include / linux / lustre_compat25.h
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * Copyright (c) 2011 Whamcloud, Inc.
34  */
35 /*
36  * This file is part of Lustre, http://www.lustre.org/
37  * Lustre is a trademark of Sun Microsystems, Inc.
38  */
39
40 #ifndef _LINUX_COMPAT25_H
41 #define _LINUX_COMPAT25_H
42
43 #ifdef __KERNEL__
44
45 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
46 #error sorry, lustre requires at least linux kernel 2.6.9 or later
47 #endif
48
49 #include <linux/fs_struct.h>
50 #include <libcfs/linux/portals_compat25.h>
51
52 #include <linux/lustre_patchless_compat.h>
53
54 /* Some old kernels (like 2.6.9) may not define such SEEK_XXX. So the
55  * definition allows to compile lustre client on more OS platforms. */
56 #ifndef SEEK_SET
57  #define SEEK_SET 0
58  #define SEEK_CUR 1
59  #define SEEK_END 2
60 #endif
61
62 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)
63 struct ll_iattr {
64         struct iattr    iattr;
65         unsigned int    ia_attr_flags;
66 };
67 #else
68 #define ll_iattr iattr
69 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) */
70
71 #ifdef HAVE_FS_STRUCT_USE_PATH
72 static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
73                                  struct dentry *dentry)
74 {
75         struct path path;
76         struct path old_pwd;
77
78         path.mnt = mnt;
79         path.dentry = dentry;
80         write_lock(&fs->lock);
81         old_pwd = fs->pwd;
82         path_get(&path);
83         fs->pwd = path;
84         write_unlock(&fs->lock);
85
86         if (old_pwd.dentry)
87                 path_put(&old_pwd);
88 }
89
90 #else
91
92 static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
93                 struct dentry *dentry)
94 {
95         struct dentry *old_pwd;
96         struct vfsmount *old_pwdmnt;
97
98         cfs_write_lock(&fs->lock);
99         old_pwd = fs->pwd;
100         old_pwdmnt = fs->pwdmnt;
101         fs->pwdmnt = mntget(mnt);
102         fs->pwd = dget(dentry);
103         cfs_write_unlock(&fs->lock);
104
105         if (old_pwd) {
106                 dput(old_pwd);
107                 mntput(old_pwdmnt);
108         }
109 }
110 #endif
111
112 /*
113  * set ATTR_BLOCKS to a high value to avoid any risk of collision with other
114  * ATTR_* attributes (see bug 13828)
115  */
116 #define ATTR_BLOCKS    (1 << 27)
117
118 #if HAVE_INODE_I_MUTEX
119 #define UNLOCK_INODE_MUTEX(inode) \
120 do {cfs_mutex_unlock(&(inode)->i_mutex); } while(0)
121 #define LOCK_INODE_MUTEX(inode) \
122 do {cfs_mutex_lock(&(inode)->i_mutex); } while(0)
123 #define LOCK_INODE_MUTEX_PARENT(inode) \
124 do {cfs_mutex_lock_nested(&(inode)->i_mutex, I_MUTEX_PARENT); } while(0)
125 #define TRYLOCK_INODE_MUTEX(inode) cfs_mutex_trylock(&(inode)->i_mutex)
126 #else
127 #define UNLOCK_INODE_MUTEX(inode) do  cfs_up(&(inode)->i_sem); } while(0)
128 #define LOCK_INODE_MUTEX(inode) do  cfs_down(&(inode)->i_sem); } while(0)
129 #define TRYLOCK_INODE_MUTEX(inode) (!down_trylock(&(inode)->i_sem))
130 #define LOCK_INODE_MUTEX_PARENT(inode) LOCK_INODE_MUTEX(inode)
131 #endif /* HAVE_INODE_I_MUTEX */
132
133 #ifdef HAVE_SEQ_LOCK
134 #define LL_SEQ_LOCK(seq) cfs_mutex_lock(&(seq)->lock)
135 #define LL_SEQ_UNLOCK(seq) cfs_mutex_unlock(&(seq)->lock)
136 #else
137 #define LL_SEQ_LOCK(seq) cfs_down(&(seq)->sem)
138 #define LL_SEQ_UNLOCK(seq) cfs_up(&(seq)->sem)
139 #endif
140
141 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
142 #define d_child d_u.d_child
143 #define d_rcu d_u.d_rcu
144 #endif
145
146 #ifdef HAVE_DQUOTOFF_MUTEX
147 #define UNLOCK_DQONOFF_MUTEX(dqopt) cfs_mutex_unlock(&(dqopt)->dqonoff_mutex)
148 #define LOCK_DQONOFF_MUTEX(dqopt) cfs_mutex_lock(&(dqopt)->dqonoff_mutex)
149 #else
150 #define UNLOCK_DQONOFF_MUTEX(dqopt) cfs_up(&(dqopt)->dqonoff_sem)
151 #define LOCK_DQONOFF_MUTEX(dqopt) cfs_down(&(dqopt)->dqonoff_sem)
152 #endif /* HAVE_DQUOTOFF_MUTEX */
153
154 #define current_ngroups current_cred()->group_info->ngroups
155 #define current_groups current_cred()->group_info->small_block
156
157 #ifndef page_private
158 #define page_private(page) ((page)->private)
159 #define set_page_private(page, v) ((page)->private = (v))
160 #endif
161
162 #ifndef HAVE_GFP_T
163 #define gfp_t int
164 #endif
165
166 #define lock_dentry(___dentry)          cfs_spin_lock(&(___dentry)->d_lock)
167 #define unlock_dentry(___dentry)        cfs_spin_unlock(&(___dentry)->d_lock)
168
169 #define ll_kernel_locked()      kernel_locked()
170
171 /*
172  * OBD need working random driver, thus all our
173  * initialization routines must be called after device
174  * driver initialization
175  */
176 #ifndef MODULE
177 #undef module_init
178 #define module_init(a)     late_initcall(a)
179 #endif
180
181 /* XXX our code should be using the 2.6 calls, not the other way around */
182 #ifdef HAVE_TRYLOCK_PAGE
183 #define TestSetPageLocked(page)         (!trylock_page(page))
184 #endif
185
186 #define Page_Uptodate(page)             PageUptodate(page)
187 #define ll_redirty_page(page)           set_page_dirty(page)
188
189 #define KDEVT_INIT(val)                 (val)
190
191 #define LTIME_S(time)                   (time.tv_sec)
192 #define ll_path_lookup                  path_lookup
193
194 #ifdef HAVE_EXPORT_INODE_PERMISSION
195 #define ll_permission(inode,mask,nd)    inode_permission(inode,mask)
196 #else
197 #define ll_permission(inode,mask,nd)    permission(inode,mask,nd)
198 #endif
199
200 #define ll_pgcache_lock(mapping)          cfs_spin_lock(&mapping->page_lock)
201 #define ll_pgcache_unlock(mapping)        cfs_spin_unlock(&mapping->page_lock)
202 #define ll_call_writepage(inode, page)  \
203                                 (inode)->i_mapping->a_ops->writepage(page, NULL)
204 #define ll_invalidate_inode_pages(inode) \
205                                 invalidate_inode_pages((inode)->i_mapping)
206 #define ll_truncate_complete_page(page) \
207                                 truncate_complete_page(page->mapping, page)
208
209 #define ll_vfs_create(a,b,c,d)          vfs_create(a,b,c,d)
210 #define ll_dev_t                        dev_t
211 #define kdev_t                          dev_t
212 #define to_kdev_t(dev)                  (dev)
213 #define kdev_t_to_nr(dev)               (dev)
214 #define val_to_kdev(dev)                (dev)
215
216 #ifdef HAVE_BLKDEV_PUT_2ARGS
217 #define ll_blkdev_put(a, b) blkdev_put(a, b)
218 #else
219 #define ll_blkdev_put(a, b) blkdev_put(a)
220 #endif
221
222 #ifdef HAVE_DENTRY_OPEN_4ARGS
223 #define ll_dentry_open(a, b, c, d) dentry_open(a, b, c, d)
224 #else
225 #define ll_dentry_open(a, b, c, d) dentry_open(a, b, c)
226 #endif
227
228 #include <linux/writeback.h>
229
230 static inline int cfs_cleanup_group_info(void)
231 {
232         struct group_info *ginfo;
233
234         ginfo = groups_alloc(0);
235         if (!ginfo)
236                 return -ENOMEM;
237
238         set_current_groups(ginfo);
239         put_group_info(ginfo);
240
241         return 0;
242 }
243
244 #define __set_page_ll_data(page, llap) \
245         do {       \
246                 page_cache_get(page); \
247                 SetPagePrivate(page); \
248                 set_page_private(page, (unsigned long)llap); \
249         } while (0)
250 #define __clear_page_ll_data(page) \
251         do {       \
252                 ClearPagePrivate(page); \
253                 set_page_private(page, 0); \
254                 page_cache_release(page); \
255         } while(0)
256
257 #define kiobuf bio
258
259 #include <linux/proc_fs.h>
260
261 #if !defined(HAVE_D_REHASH_COND) && defined(HAVE___D_REHASH)
262 #define d_rehash_cond(dentry, lock) __d_rehash(dentry, lock)
263 extern void __d_rehash(struct dentry *dentry, int lock);
264 #endif
265
266 #ifdef HAVE_CAN_SLEEP_ARG
267 #define ll_flock_lock_file_wait(file, lock, can_sleep) \
268         flock_lock_file_wait(file, lock, can_sleep)
269 #else
270 #define ll_flock_lock_file_wait(file, lock, can_sleep) \
271         flock_lock_file_wait(file, lock)
272 #endif
273
274 #define CheckWriteback(page, cmd) \
275         ((!PageWriteback(page) && (cmd & OBD_BRW_READ)) || \
276          (PageWriteback(page) && (cmd & OBD_BRW_WRITE)))
277
278 #ifdef HAVE_PAGE_LIST
279 static inline int mapping_has_pages(struct address_space *mapping)
280 {
281         int rc = 1;
282
283         ll_pgcache_lock(mapping);
284         if (cfs_list_empty(&mapping->dirty_pages) &&
285             cfs_list_empty(&mapping->clean_pages) &&
286             cfs_list_empty(&mapping->locked_pages)) {
287                 rc = 0;
288         }
289         ll_pgcache_unlock(mapping);
290
291         return rc;
292 }
293 #else
294 static inline int mapping_has_pages(struct address_space *mapping)
295 {
296         return mapping->nrpages > 0;
297 }
298 #endif
299
300 #ifdef HAVE_KIOBUF_KIO_BLOCKS
301 #define KIOBUF_GET_BLOCKS(k) ((k)->kio_blocks)
302 #else
303 #define KIOBUF_GET_BLOCKS(k) ((k)->blocks)
304 #endif
305
306 #ifdef HAVE_SECURITY_PLUG
307 #define ll_vfs_symlink(dir, dentry, mnt, path, mode) \
308                 vfs_symlink(dir, dentry, mnt, path, mode)
309 #else
310 #ifdef HAVE_4ARGS_VFS_SYMLINK
311 #define ll_vfs_symlink(dir, dentry, mnt, path, mode) \
312                 vfs_symlink(dir, dentry, path, mode)
313 #else
314 #define ll_vfs_symlink(dir, dentry, mnt, path, mode) \
315                        vfs_symlink(dir, dentry, path)
316 #endif
317
318 #define ll_set_dflags(dentry, flags) do { \
319                 cfs_spin_lock(&dentry->d_lock); \
320                 dentry->d_flags |= flags; \
321                 cfs_spin_unlock(&dentry->d_lock); \
322         } while(0)
323 #endif
324
325 #ifndef container_of
326 #define container_of(ptr, type, member) ({                      \
327                 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
328                 (type *)( (char *)__mptr - offsetof(type,member) );})
329 #endif
330
331 #define UP_WRITE_I_ALLOC_SEM(i)   up_write(&(i)->i_alloc_sem)
332 #define DOWN_WRITE_I_ALLOC_SEM(i) down_write(&(i)->i_alloc_sem)
333 #define LASSERT_I_ALLOC_SEM_WRITE_LOCKED(i) LASSERT(down_read_trylock(&(i)->i_alloc_sem) == 0)
334
335 #define UP_READ_I_ALLOC_SEM(i)    up_read(&(i)->i_alloc_sem)
336 #define DOWN_READ_I_ALLOC_SEM(i)  down_read(&(i)->i_alloc_sem)
337 #define LASSERT_I_ALLOC_SEM_READ_LOCKED(i) LASSERT(down_write_trylock(&(i)->i_alloc_sem) == 0)
338
339 #ifndef HAVE_GRAB_CACHE_PAGE_NOWAIT_GFP
340 #define grab_cache_page_nowait_gfp(x, y, z) grab_cache_page_nowait((x), (y))
341 #endif
342
343 #include <linux/mpage.h>        /* for generic_writepages */
344 #ifndef HAVE_FILEMAP_FDATAWRITE_RANGE
345 #include <linux/backing-dev.h>  /* for mapping->backing_dev_info */
346 static inline int filemap_fdatawrite_range(struct address_space *mapping,
347                                            loff_t start, loff_t end)
348 {
349         int rc;
350         struct writeback_control wbc = {
351                 .sync_mode = WB_SYNC_ALL,
352                 .nr_to_write = (end - start + PAGE_SIZE - 1) >> PAGE_SHIFT,
353         };
354
355 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
356         wbc.range_start = start;
357         wbc.range_end = end;
358 #else
359         wbc.start = start;
360         wbc.end = end;
361 #endif
362
363 #ifdef HAVE_MAPPING_CAP_WRITEBACK_DIRTY
364         if (!mapping_cap_writeback_dirty(mapping))
365                 rc = 0;
366 #else
367         if (mapping->backing_dev_info->memory_backed)
368                 rc = 0;
369 #endif
370         /* do_writepages() */
371         else if (mapping->a_ops->writepages)
372                 rc = mapping->a_ops->writepages(mapping, &wbc);
373         else
374                 rc = generic_writepages(mapping, &wbc);
375         return rc;
376 }
377 #else
378 int filemap_fdatawrite_range(struct address_space *mapping,
379                              loff_t start, loff_t end);
380 #endif /* HAVE_FILEMAP_FDATAWRITE_RANGE */
381
382 #ifdef HAVE_VFS_KERN_MOUNT
383 static inline struct vfsmount *
384 ll_kern_mount(const char *fstype, int flags, const char *name, void *data)
385 {
386         struct file_system_type *type = get_fs_type(fstype);
387         struct vfsmount *mnt;
388         if (!type)
389                 return ERR_PTR(-ENODEV);
390         mnt = vfs_kern_mount(type, flags, name, data);
391         cfs_module_put(type->owner);
392         return mnt;
393 }
394 #else
395 #define ll_kern_mount(fstype, flags, name, data) do_kern_mount((fstype), (flags), (name), (data))
396 #endif
397
398 #ifdef HAVE_STATFS_DENTRY_PARAM
399 #define ll_do_statfs(sb, sfs) (sb)->s_op->statfs((sb)->s_root, (sfs))
400 #else
401 #define ll_do_statfs(sb, sfs) (sb)->s_op->statfs((sb), (sfs))
402 #endif
403
404 #ifndef HAVE_SB_TIME_GRAN
405 #ifndef HAVE_S_TIME_GRAN
406 #error Need s_time_gran patch!
407 #endif
408 static inline u32 get_sb_time_gran(struct super_block *sb)
409 {
410         return sb->s_time_gran;
411 }
412 #endif
413
414 #ifdef HAVE_RW_TREE_LOCK
415 #define TREE_READ_LOCK_IRQ(mapping)     read_lock_irq(&(mapping)->tree_lock)
416 #define TREE_READ_UNLOCK_IRQ(mapping) read_unlock_irq(&(mapping)->tree_lock)
417 #else
418 #define TREE_READ_LOCK_IRQ(mapping) cfs_spin_lock_irq(&(mapping)->tree_lock)
419 #define TREE_READ_UNLOCK_IRQ(mapping) cfs_spin_unlock_irq(&(mapping)->tree_lock)
420 #endif
421
422 #ifdef HAVE_UNREGISTER_BLKDEV_RETURN_INT
423 #define ll_unregister_blkdev(a,b)       unregister_blkdev((a),(b))
424 #else
425 static inline
426 int ll_unregister_blkdev(unsigned int dev, const char *name)
427 {
428         unregister_blkdev(dev, name);
429         return 0;
430 }
431 #endif
432
433 #ifdef HAVE_INVALIDATE_BDEV_2ARG
434 #define ll_invalidate_bdev(a,b)         invalidate_bdev((a),(b))
435 #else
436 #define ll_invalidate_bdev(a,b)         invalidate_bdev((a))
437 #endif
438
439 #ifdef HAVE_INODE_BLKSIZE
440 #define ll_inode_blksize(a)     (a)->i_blksize
441 #else
442 #define ll_inode_blksize(a)     (1<<(a)->i_blkbits)
443 #endif
444
445 #ifdef HAVE_FS_RENAME_DOES_D_MOVE
446 #define LL_RENAME_DOES_D_MOVE   FS_RENAME_DOES_D_MOVE
447 #else
448 #define LL_RENAME_DOES_D_MOVE   FS_ODD_RENAME
449 #endif
450
451 #ifndef HAVE_D_OBTAIN_ALIAS
452 /* The old d_alloc_anon() didn't free the inode reference on error
453  * like d_obtain_alias().  Hide that difference/inconvenience here. */
454 static inline struct dentry *d_obtain_alias(struct inode *inode)
455 {
456         struct dentry *anon = d_alloc_anon(inode);
457
458         if (anon == NULL) {
459                 iput(inode);
460                 anon = ERR_PTR(-ENOMEM);
461         }
462
463         return anon;
464 }
465 #endif
466
467 /* add a lustre compatible layer for crypto API */
468 #include <linux/crypto.h>
469 #ifdef HAVE_ASYNC_BLOCK_CIPHER
470 #define ll_crypto_hash          crypto_hash
471 #define ll_crypto_cipher        crypto_blkcipher
472 #define ll_crypto_alloc_hash(name, type, mask)  crypto_alloc_hash(name, type, mask)
473 #define ll_crypto_hash_setkey(tfm, key, keylen) crypto_hash_setkey(tfm, key, keylen)
474 #define ll_crypto_hash_init(desc)               crypto_hash_init(desc)
475 #define ll_crypto_hash_update(desc, sl, bytes)  crypto_hash_update(desc, sl, bytes)
476 #define ll_crypto_hash_final(desc, out)         crypto_hash_final(desc, out)
477 #define ll_crypto_alloc_blkcipher(name, type, mask) \
478                 crypto_alloc_blkcipher(name ,type, mask)
479 #define ll_crypto_blkcipher_setkey(tfm, key, keylen) \
480                 crypto_blkcipher_setkey(tfm, key, keylen)
481 #define ll_crypto_blkcipher_set_iv(tfm, src, len) \
482                 crypto_blkcipher_set_iv(tfm, src, len)
483 #define ll_crypto_blkcipher_get_iv(tfm, dst, len) \
484                 crypto_blkcipher_get_iv(tfm, dst, len)
485 #define ll_crypto_blkcipher_encrypt(desc, dst, src, bytes) \
486                 crypto_blkcipher_encrypt(desc, dst, src, bytes)
487 #define ll_crypto_blkcipher_decrypt(desc, dst, src, bytes) \
488                 crypto_blkcipher_decrypt(desc, dst, src, bytes)
489 #define ll_crypto_blkcipher_encrypt_iv(desc, dst, src, bytes) \
490                 crypto_blkcipher_encrypt_iv(desc, dst, src, bytes)
491 #define ll_crypto_blkcipher_decrypt_iv(desc, dst, src, bytes) \
492                 crypto_blkcipher_decrypt_iv(desc, dst, src, bytes)
493
494 static inline int ll_crypto_hmac(struct ll_crypto_hash *tfm,
495                                  u8 *key, unsigned int *keylen,
496                                  struct scatterlist *sg,
497                                  unsigned int size, u8 *result)
498 {
499         struct hash_desc desc;
500         int              rv;
501         desc.tfm   = tfm;
502         desc.flags = 0;
503         rv = crypto_hash_setkey(desc.tfm, key, *keylen);
504         if (rv) {
505                 CERROR("failed to hash setkey: %d\n", rv);
506                 return rv;
507         }
508         return crypto_hash_digest(&desc, sg, size, result);
509 }
510 static inline
511 unsigned int ll_crypto_tfm_alg_max_keysize(struct crypto_blkcipher *tfm)
512 {
513         return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.max_keysize;
514 }
515 static inline
516 unsigned int ll_crypto_tfm_alg_min_keysize(struct crypto_blkcipher *tfm)
517 {
518         return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.min_keysize;
519 }
520
521 #define ll_crypto_hash_blocksize(tfm)       crypto_hash_blocksize(tfm)
522 #define ll_crypto_hash_digestsize(tfm)      crypto_hash_digestsize(tfm)
523 #define ll_crypto_blkcipher_ivsize(tfm)     crypto_blkcipher_ivsize(tfm)
524 #define ll_crypto_blkcipher_blocksize(tfm)  crypto_blkcipher_blocksize(tfm)
525 #define ll_crypto_free_hash(tfm)            crypto_free_hash(tfm)
526 #define ll_crypto_free_blkcipher(tfm)       crypto_free_blkcipher(tfm)
527 #else /* HAVE_ASYNC_BLOCK_CIPHER */
528 #include <linux/scatterlist.h>
529 #define ll_crypto_hash          crypto_tfm
530 #define ll_crypto_cipher        crypto_tfm
531 #ifndef HAVE_STRUCT_HASH_DESC
532 struct hash_desc {
533         struct ll_crypto_hash *tfm;
534         u32                    flags;
535 };
536 #endif
537 #ifndef HAVE_STRUCT_BLKCIPHER_DESC
538 struct blkcipher_desc {
539         struct ll_crypto_cipher *tfm;
540         void                    *info;
541         u32                      flags;
542 };
543 #endif
544 #define ll_crypto_blkcipher_setkey(tfm, key, keylen) \
545         crypto_cipher_setkey(tfm, key, keylen)
546 #define ll_crypto_blkcipher_set_iv(tfm, src, len) \
547         crypto_cipher_set_iv(tfm, src, len)
548 #define ll_crypto_blkcipher_get_iv(tfm, dst, len) \
549         crypto_cipher_get_iv(tfm, dst, len)
550 #define ll_crypto_blkcipher_encrypt(desc, dst, src, bytes) \
551         crypto_cipher_encrypt((desc)->tfm, dst, src, bytes)
552 #define ll_crypto_blkcipher_decrypt(desc, dst, src, bytes) \
553         crypto_cipher_decrypt((desc)->tfm, dst, src, bytes)
554 #define ll_crypto_blkcipher_decrypt_iv(desc, dst, src, bytes) \
555         crypto_cipher_decrypt_iv((desc)->tfm, dst, src, bytes, (desc)->info)
556 #define ll_crypto_blkcipher_encrypt_iv(desc, dst, src, bytes) \
557         crypto_cipher_encrypt_iv((desc)->tfm, dst, src, bytes, (desc)->info)
558
559 static inline
560 struct ll_crypto_cipher *ll_crypto_alloc_blkcipher(const char * algname,
561                                                    u32 type, u32 mask)
562 {
563         char        buf[CRYPTO_MAX_ALG_NAME + 1];
564         const char *pan = algname;
565         u32         flag = 0;
566
567         if (strncmp("cbc(", algname, 4) == 0)
568                 flag |= CRYPTO_TFM_MODE_CBC;
569         else if (strncmp("ecb(", algname, 4) == 0)
570                 flag |= CRYPTO_TFM_MODE_ECB;
571         if (flag) {
572                 char *vp = strnchr(algname, CRYPTO_MAX_ALG_NAME, ')');
573                 if (vp) {
574                         memcpy(buf, algname + 4, vp - algname - 4);
575                         buf[vp - algname - 4] = '\0';
576                         pan = buf;
577                 } else {
578                         flag = 0;
579                 }
580         }
581         return crypto_alloc_tfm(pan, flag);
582 }
583
584 static inline
585 struct ll_crypto_hash *ll_crypto_alloc_hash(const char *alg, u32 type, u32 mask)
586 {
587         char        buf[CRYPTO_MAX_ALG_NAME + 1];
588         const char *pan = alg;
589
590         if (strncmp("hmac(", alg, 5) == 0) {
591                 char *vp = strnchr(alg, CRYPTO_MAX_ALG_NAME, ')');
592                 if (vp) {
593                         memcpy(buf, alg+ 5, vp - alg- 5);
594                         buf[vp - alg - 5] = 0x00;
595                         pan = buf;
596                 }
597         }
598         return crypto_alloc_tfm(pan, 0);
599 }
600 static inline int ll_crypto_hash_init(struct hash_desc *desc)
601 {
602        crypto_digest_init(desc->tfm); return 0;
603 }
604 static inline int ll_crypto_hash_update(struct hash_desc *desc,
605                                         struct scatterlist *sg,
606                                         unsigned int nbytes)
607 {
608         struct scatterlist *sl = sg;
609         unsigned int        count;
610                 /*
611                  * This way is very weakness. We must ensure that
612                  * the sum of sg[0..i]->length isn't greater than nbytes.
613                  * In the upstream kernel the crypto_hash_update() also
614                  * via the nbytes computed the count of sg[...].
615                  * The old style is more safely. but it gone.
616                  */
617         for (count = 0; nbytes > 0; count ++, sl ++) {
618                 nbytes -= sl->length;
619         }
620         crypto_digest_update(desc->tfm, sg, count); return 0;
621 }
622 static inline int ll_crypto_hash_final(struct hash_desc *desc, u8 *out)
623 {
624         crypto_digest_final(desc->tfm, out); return 0;
625 }
626 static inline int ll_crypto_hmac(struct crypto_tfm *tfm,
627                                  u8 *key, unsigned int *keylen,
628                                  struct scatterlist *sg,
629                                  unsigned int nbytes,
630                                  u8 *out)
631 {
632         struct scatterlist *sl = sg;
633         int                 count;
634         for (count = 0; nbytes > 0; count ++, sl ++) {
635                 nbytes -= sl->length;
636         }
637         crypto_hmac(tfm, key, keylen, sg, count, out);
638         return 0;
639 }
640
641 #define ll_crypto_hash_setkey(tfm, key, keylen) crypto_digest_setkey(tfm, key, keylen)
642 #define ll_crypto_blkcipher_blocksize(tfm)      crypto_tfm_alg_blocksize(tfm)
643 #define ll_crypto_blkcipher_ivsize(tfm) crypto_tfm_alg_ivsize(tfm)
644 #define ll_crypto_hash_digestsize(tfm)  crypto_tfm_alg_digestsize(tfm)
645 #define ll_crypto_hash_blocksize(tfm)   crypto_tfm_alg_blocksize(tfm)
646 #define ll_crypto_free_hash(tfm)        crypto_free_tfm(tfm)
647 #define ll_crypto_free_blkcipher(tfm)   crypto_free_tfm(tfm)
648 #define ll_crypto_tfm_alg_min_keysize   crypto_tfm_alg_min_keysize
649 #define ll_crypto_tfm_alg_max_keysize   crypto_tfm_alg_max_keysize
650 #endif /* HAVE_ASYNC_BLOCK_CIPHER */
651
652 #ifndef HAVE_SYNCHRONIZE_RCU
653 /* Linux 2.6.32 provides define when !CONFIG_TREE_PREEMPT_RCU */
654 #ifndef synchronize_rcu
655 #define synchronize_rcu() synchronize_kernel()
656 #endif
657 #endif
658
659 #ifdef HAVE_FILE_REMOVE_SUID
660 # define ll_remove_suid(file, mnt)       file_remove_suid(file)
661 #else
662 # ifdef HAVE_SECURITY_PLUG
663 #  define ll_remove_suid(file,mnt)      remove_suid(file->f_dentry,mnt)
664 # else
665 #  define ll_remove_suid(file,mnt)      remove_suid(file->f_dentry)
666 # endif
667 #endif
668
669 #ifdef HAVE_SECURITY_PLUG
670 #define ll_vfs_rmdir(dir,entry,mnt)             vfs_rmdir(dir,entry,mnt)
671 #define ll_vfs_mkdir(inode,dir,mnt,mode)        vfs_mkdir(inode,dir,mnt,mode)
672 #define ll_vfs_link(old,mnt,dir,new,mnt1)       vfs_link(old,mnt,dir,new,mnt1)
673 #define ll_vfs_unlink(inode,entry,mnt)          vfs_unlink(inode,entry,mnt)
674 #define ll_vfs_mknod(dir,entry,mnt,mode,dev)            \
675                 vfs_mknod(dir,entry,mnt,mode,dev)
676 #define ll_security_inode_unlink(dir,entry,mnt)         \
677                 security_inode_unlink(dir,entry,mnt)
678 #define ll_vfs_rename(old,old_dir,mnt,new,new_dir,mnt1) \
679                 vfs_rename(old,old_dir,mnt,new,new_dir,mnt1)
680 #else
681 #define ll_vfs_rmdir(dir,entry,mnt)             vfs_rmdir(dir,entry)
682 #define ll_vfs_mkdir(inode,dir,mnt,mode)        vfs_mkdir(inode,dir,mode)
683 #define ll_vfs_link(old,mnt,dir,new,mnt1)       vfs_link(old,dir,new)
684 #define ll_vfs_unlink(inode,entry,mnt)          vfs_unlink(inode,entry)
685 #define ll_vfs_mknod(dir,entry,mnt,mode,dev)    vfs_mknod(dir,entry,mode,dev)
686 #define ll_security_inode_unlink(dir,entry,mnt) security_inode_unlink(dir,entry)
687 #define ll_vfs_rename(old,old_dir,mnt,new,new_dir,mnt1) \
688                 vfs_rename(old,old_dir,new,new_dir)
689 #endif /* HAVE_SECURITY_PLUG */
690
691 #ifdef for_each_possible_cpu
692 #define cfs_for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
693 #elif defined(for_each_cpu)
694 #define cfs_for_each_possible_cpu(cpu) for_each_cpu(cpu)
695 #endif
696
697 #ifndef cpu_to_node
698 #define cpu_to_node(cpu)         0
699 #endif
700
701 #ifdef HAVE_BIO_ENDIO_2ARG
702 #define cfs_bio_io_error(a,b)   bio_io_error((a))
703 #define cfs_bio_endio(a,b,c)    bio_endio((a),(c))
704 #else
705 #define cfs_bio_io_error(a,b)   bio_io_error((a),(b))
706 #define cfs_bio_endio(a,b,c)    bio_endio((a),(b),(c))
707 #endif
708
709 #ifdef HAVE_FS_STRUCT_USE_PATH
710 #define cfs_fs_pwd(fs)       ((fs)->pwd.dentry)
711 #define cfs_fs_mnt(fs)       ((fs)->pwd.mnt)
712 #define cfs_path_put(nd)     path_put(&(nd)->path)
713 #else
714 #define cfs_fs_pwd(fs)       ((fs)->pwd)
715 #define cfs_fs_mnt(fs)       ((fs)->pwdmnt)
716 #define cfs_path_put(nd)     path_release(nd)
717 #endif
718
719 #ifndef abs
720 static inline int abs(int x)
721 {
722         return (x < 0) ? -x : x;
723 }
724 #endif
725
726 #ifndef labs
727 static inline long labs(long x)
728 {
729         return (x < 0) ? -x : x;
730 }
731 #endif /* HAVE_REGISTER_SHRINKER */
732
733 #ifdef HAVE_INVALIDATE_INODE_PAGES
734 #define invalidate_mapping_pages(mapping,s,e) invalidate_inode_pages(mapping)
735 #endif
736
737 #ifdef HAVE_INODE_IPRIVATE
738 #define INODE_PRIVATE_DATA(inode)       ((inode)->i_private)
739 #else
740 #define INODE_PRIVATE_DATA(inode)       ((inode)->u.generic_ip)
741 #endif
742
743 #ifndef SLAB_DESTROY_BY_RCU
744 #define CFS_SLAB_DESTROY_BY_RCU 0
745 #else
746 #define CFS_SLAB_DESTROY_BY_RCU SLAB_DESTROY_BY_RCU
747 #endif
748
749 #ifdef HAVE_SB_HAS_QUOTA_ACTIVE
750 #define ll_sb_has_quota_active(sb, type) sb_has_quota_active(sb, type)
751 #else
752 #define ll_sb_has_quota_active(sb, type) sb_has_quota_enabled(sb, type)
753 #endif
754
755 #ifdef HAVE_SB_ANY_QUOTA_ACTIVE
756 #define ll_sb_any_quota_active(sb) sb_any_quota_active(sb)
757 #else
758 #define ll_sb_any_quota_active(sb) sb_any_quota_enabled(sb)
759 #endif
760
761 static inline int
762 ll_quota_on(struct super_block *sb, int off, int ver, char *name, int remount)
763 {
764         if (sb->s_qcop->quota_on) {
765                 return sb->s_qcop->quota_on(sb, off, ver, name
766 #ifdef HAVE_QUOTA_ON_5ARGS
767                                             , remount
768 #endif
769                                            );
770         }
771         else
772                 return -ENOSYS;
773 }
774
775 static inline int ll_quota_off(struct super_block *sb, int off, int remount)
776 {
777         if (sb->s_qcop->quota_off) {
778                 return sb->s_qcop->quota_off(sb, off
779 #ifdef HAVE_QUOTA_OFF_3ARGS
780                                              , remount
781 #endif
782                                             );
783         }
784         else
785                 return -ENOSYS;
786 }
787
788 #ifndef HAVE_BLK_QUEUE_LOG_BLK_SIZE /* added in 2.6.31 */
789 #define blk_queue_logical_block_size(q, sz) blk_queue_hardsect_size(q, sz)
790 #endif
791
792 #ifndef HAVE_VFS_DQ_OFF
793 # define ll_vfs_dq_init             DQUOT_INIT
794 # define ll_vfs_dq_drop             DQUOT_DROP
795 # define ll_vfs_dq_transfer         DQUOT_TRANSFER
796 # define ll_vfs_dq_off(sb, remount) DQUOT_OFF(sb)
797 #else
798 # define ll_vfs_dq_init             vfs_dq_init
799 # define ll_vfs_dq_drop             vfs_dq_drop
800 # define ll_vfs_dq_transfer         vfs_dq_transfer
801 # define ll_vfs_dq_off(sb, remount) vfs_dq_off(sb, remount)
802 #endif
803
804 #ifdef HAVE_BDI_INIT
805 #define ll_bdi_init(bdi)    bdi_init(bdi)
806 #define ll_bdi_destroy(bdi) bdi_destroy(bdi)
807 #else
808 #define ll_bdi_init(bdi)    0
809 #define ll_bdi_destroy(bdi) do { } while(0)
810 #endif
811
812 #ifdef HAVE_NEW_BACKING_DEV_INFO
813 # define ll_bdi_wb_cnt(bdi) ((bdi).wb_cnt)
814 #else
815 # define ll_bdi_wb_cnt(bdi) 1
816 #endif
817
818 #ifdef HAVE_BLK_QUEUE_MAX_SECTORS /* removed in rhel6 */
819 #define blk_queue_max_hw_sectors(q, sect) blk_queue_max_sectors(q, sect)
820 #endif
821
822 #ifndef HAVE_REQUEST_QUEUE_LIMITS
823 #define queue_max_sectors(rq)             ((rq)->max_sectors)
824 #define queue_max_hw_sectors(rq)          ((rq)->max_hw_sectors)
825 #define queue_max_phys_segments(rq)       ((rq)->max_phys_segments)
826 #define queue_max_hw_segments(rq)         ((rq)->max_hw_segments)
827 #endif
828
829 #ifndef HAVE_BLK_QUEUE_MAX_SEGMENTS
830 #define blk_queue_max_segments(rq, seg)                      \
831         do { blk_queue_max_phys_segments(rq, seg);           \
832              blk_queue_max_hw_segments(rq, seg); } while (0)
833 #else
834 #define queue_max_phys_segments(rq)       queue_max_segments(rq)
835 #define queue_max_hw_segments(rq)         queue_max_segments(rq)
836 #endif
837
838
839 #ifndef HAVE_BI_HW_SEGMENTS
840 #define bio_hw_segments(q, bio) 0
841 #endif
842
843 #ifndef HAVE_PAGEVEC_LRU_ADD_FILE
844 #define pagevec_lru_add_file pagevec_lru_add
845 #endif
846
847 #ifdef HAVE_ADD_TO_PAGE_CACHE_LRU
848 #define ll_add_to_page_cache_lru(pg, mapping, off, gfp) \
849         add_to_page_cache_lru(pg, mapping, off, gfp)
850 #define ll_pagevec_init(pv, cold)       do {} while (0)
851 #define ll_pagevec_add(pv, pg)          (0)
852 #define ll_pagevec_lru_add_file(pv)     do {} while (0)
853 #else
854 #define ll_add_to_page_cache_lru(pg, mapping, off, gfp) \
855         add_to_page_cache(pg, mapping, off, gfp)
856 #define ll_pagevec_init(pv, cold)       pagevec_init(&lru_pvec, cold);
857 #define ll_pagevec_add(pv, pg)          pagevec_add(pv, pg)
858 #define ll_pagevec_lru_add_file(pv)     pagevec_lru_add_file(pv)
859 #endif
860
861 #endif /* __KERNEL__ */
862 #endif /* _COMPAT25_H */