#error sorry, lustre requires at least 2.5.69
#endif
-#include <linux/portals_compat25.h>
+#include <libcfs/linux/portals_compat25.h>
+
+/*
+ * groups_info related staff
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4)
+
+#define NGROUPS_SMALL NGROUPS
+#define NGROUPS_PER_BLOCK ((int)(EXEC_PAGESIZE / sizeof(gid_t)))
+struct group_info {
+ int ngroups;
+ atomic_t usage;
+ gid_t small_block[NGROUPS_SMALL];
+ int nblocks;
+ gid_t *blocks[0];
+};
+#define current_ngroups current->ngroups
+
+struct group_info *groups_alloc(int gidsetsize);
+void groups_free(struct group_info *ginfo);
+int groups_search(struct group_info *ginfo, gid_t grp);
+
+#define get_group_info(group_info) \
+ do { \
+ atomic_inc(&(group_info)->usage); \
+ } while (0)
+
+#define put_group_info(group_info) \
+ do { \
+ if (atomic_dec_and_test(&(group_info)->usage)) \
+ groups_free(group_info); \
+ } while (0)
+
+#define groups_sort(gi) do {} while (0)
+#define GROUP_AT(gi, i) ((gi)->small_block[(i)])
+
+static inline int cleanup_group_info(void)
+{
+ /* Get rid of unneeded supplementary groups */
+ current->ngroups = 0;
+ memset(current->groups, 0, sizeof(current->groups));
+ return 0;
+}
+
+#else /* >= 2.6.4 */
+
+#define current_ngroups current->group_info->ngroups
+
+void groups_sort(struct group_info *ginfo);
+int groups_search(struct group_info *ginfo, gid_t grp);
+
+static inline int cleanup_group_info(void)
+{
+ struct group_info *ginfo;
+
+ ginfo = groups_alloc(0);
+ if (!ginfo)
+ return -ENOMEM;
+
+ set_current_groups(ginfo);
+ put_group_info(ginfo);
+
+ return 0;
+}
+#endif /* end of groups_info stuff */
+
+/*
+ * this define is from the same namespace as other lookup flags in linux, will
+ * be gone when COBD medium switching will be done more correct manner.
+ * Currently this is dirty hack and this flag is needed to let MDC layer know
+ * that it does not have to check if requested id is the same as receviced from
+ * MDS in mdc_intent_lock() --umka
+ */
+#define LOOKUP_COBD 4096
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+/* New (actually old) intent naming */
+#define lookup_intent open_intent
+
+/* And internals */
+#define it_flags flags
+#define it_op op
+#define it_magic magic
+#define it_op_release op_release
+#define it_create_mode create_mode
+
/*
* OBD need working random driver, thus all our
* initialization routines must be called after device
* driver initialization
*/
+#ifndef MODULE
+#undef module_init
#define module_init(a) late_initcall(a)
+#endif
/* XXX our code should be using the 2.6 calls, not the other way around */
#define TryLockPage(page) TestSetPageLocked(page)
-#define filemap_fdatasync(mapping) filemap_fdatawrite(mapping)
#define Page_Uptodate(page) PageUptodate(page)
#define KDEVT_INIT(val) (val)
#define LTIME_S(time) (time.tv_sec)
#define ll_path_lookup path_lookup
-#define ll_permission permission
+#define ll_permission(inode,mask,nd) permission(inode,mask,nd)
#define ll_pgcache_lock(mapping) spin_lock(&mapping->page_lock)
#define ll_pgcache_unlock(mapping) spin_unlock(&mapping->page_lock)
+#define ll_call_writepage(inode, page) \
+ (inode)->i_mapping->a_ops->writepage(page, NULL)
+#define ll_invalidate_inode_pages(inode) \
+ invalidate_inode_pages((inode)->i_mapping)
+#define ll_truncate_complete_page(page) \
+ truncate_complete_page(page->mapping, page)
#define ll_vfs_create(a,b,c,d) vfs_create(a,b,c,d)
{
LASSERT(current->signal != NULL);
current->signal->session = 1;
- current->signal->pgrp = 1;
+ if (current->group_leader)
+ current->group_leader->signal->pgrp = 1;
+ else
+ CERROR("we aren't group leader\n");
current->signal->tty = NULL;
}
-#define rb_node_s rb_node
-#define rb_root_s rb_root
-typedef struct rb_root_s rb_root_t;
-typedef struct rb_node_s rb_node_t;
+#define __set_page_ll_data(page, llap) \
+ do { \
+ page_cache_get(page); \
+ SetPagePrivate(page); \
+ page->private = (unsigned long)llap; \
+ } while (0)
+#define __clear_page_ll_data(page) \
+ do { \
+ ClearPagePrivate(page); \
+ page_cache_release(page); \
+ page->private = 0; \
+ } while(0)
+
+#ifndef smp_num_cpus
+#define smp_num_cpus num_online_cpus()
+#endif
+
+#define kiobuf bio
+
+#include <linux/proc_fs.h>
#else /* 2.4.. */
#define ll_vfs_create(a,b,c,d) vfs_create(a,b,c)
-#define ll_permission(a,b,c) permission(a,b)
+#define ll_permission(inode,mask,nd) permission(inode,mask)
#define ILOOKUP(sb, ino, test, data) ilookup4(sb, ino, test, data);
#define DCACHE_DISCONNECTED DCACHE_NFSD_DISCONNECTED
#define ll_dev_t int
#define try_module_get __MOD_INC_USE_COUNT
#define module_put __MOD_DEC_USE_COUNT
#define LTIME_S(time) (time)
-#ifndef CONFIG_RH_2_4_20
+#if !defined(CONFIG_RH_2_4_20) && !defined(cpu_online)
#define cpu_online(cpu) (cpu_online_map & (1<<cpu))
#endif
-static inline int ll_path_lookup(const char *path, unsigned flags,
- struct nameidata *nd)
+static inline int ll_path_lookup(const char *path, unsigned flags,
+ struct nameidata *nd)
{
int error = 0;
if (path_init(path, flags, nd))
error = path_walk(path, nd);
return error;
}
-#define ll_permission(a,b,c) permission(a,b)
+#define ll_permission(inode,mask,nd) permission(inode,mask)
typedef long sector_t;
#define ll_pgcache_lock(mapping) spin_lock(&pagecache_lock)
#define ll_pgcache_unlock(mapping) spin_unlock(&pagecache_lock)
+#define ll_call_writepage(inode, page) \
+ (inode)->i_mapping->a_ops->writepage(page)
+#define filemap_fdatawrite(mapping) filemap_fdatasync(mapping)
+#define ll_invalidate_inode_pages(inode) invalidate_inode_pages(inode)
+#define ll_truncate_complete_page(page) truncate_complete_page(page)
static inline void __d_drop(struct dentry *dentry)
{
current->tty = NULL;
}
-#ifndef conditional_schedule
-#define conditional_schedule() if (unlikely(need_resched())) schedule()
+#ifndef HAVE_COND_RESCHED
+static inline void cond_resched(void)
+{
+ if (unlikely(need_resched())) {
+ set_current_state(TASK_RUNNING);
+ schedule();
+ }
+}
+#endif
+
+static inline int mapping_mapped(struct address_space *mapping)
+{
+ if (mapping->i_mmap_shared)
+ return 1;
+ if (mapping->i_mmap)
+ return 1;
+ return 0;
+}
+
+/* to find proc_dir_entry from inode. 2.6 has native one -bzzz */
+#ifndef HAVE_PDE
+#define PDE(ii) ((ii)->u.generic_ip)
+#endif
+
+#define __set_page_ll_data(page, llap) page->private = (unsigned long)llap
+#define __clear_page_ll_data(page) page->private = 0
+#define PageWriteback(page) 0
+#define set_page_writeback(page) do {} while (0)
+#define end_page_writeback(page) do {} while (0)
+
+#define end_page_writeback(page)
+
+#ifdef ZAP_PAGE_RANGE_VMA
+#define ll_zap_page_range(vma, addr, len) zap_page_range(vma, addr, len)
+#else
+#define ll_zap_page_range(vma, addr, len) zap_page_range(vma->vm_mm, addr, len)
#endif
#endif /* end of 2.4 compat macros */
+#ifdef HAVE_PAGE_LIST
+static inline int mapping_has_pages(struct address_space *mapping)
+{
+ int rc = 1;
+
+ ll_pgcache_lock(mapping);
+ if (list_empty(&mapping->dirty_pages) &&
+ list_empty(&mapping->clean_pages) &&
+ list_empty(&mapping->locked_pages)) {
+ rc = 0;
+ }
+ ll_pgcache_unlock(mapping);
+
+ return rc;
+}
+
+static inline int clear_page_dirty_for_io(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+
+ if (page->mapping && PageDirty(page)) {
+ ClearPageDirty(page);
+ ll_pgcache_lock(mapping);
+ list_del(&page->list);
+ list_add(&page->list, &mapping->locked_pages);
+ ll_pgcache_unlock(mapping);
+ return 1;
+ }
+ return 0;
+}
+#else
+static inline int mapping_has_pages(struct address_space *mapping)
+{
+ return mapping->nrpages > 0;
+}
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7))
+#define ll_set_dflags(dentry, flags) do { dentry->d_vfs_flags |= flags; } while(0)
+#define ll_vfs_symlink(dir, dentry, path, mode) vfs_symlink(dir, dentry, path)
+#else
+#define ll_set_dflags(dentry, flags) do { \
+ spin_lock(&dentry->d_lock); \
+ dentry->d_flags |= flags; \
+ spin_unlock(&dentry->d_lock); \
+ } while(0)
+#define ll_vfs_symlink(dir, dentry, path, mode) vfs_symlink(dir, dentry, path, mode)
+#endif
+
+#ifndef container_of
+#define container_of(ptr, type, member) ({ \
+ const typeof( ((type *)0)->member ) *__mptr = (ptr); \
+ (type *)( (char *)__mptr - offsetof(type,member) );})
+#endif
+
+#ifdef HAVE_I_ALLOC_SEM
+#define UP_WRITE_I_ALLOC_SEM(i) do { up_write(&(i)->i_alloc_sem); } while (0)
+#define DOWN_WRITE_I_ALLOC_SEM(i) do { down_write(&(i)->i_alloc_sem); } while(0)
+#define LASSERT_MDS_ORPHAN_WRITE_LOCKED(i) LASSERT(down_read_trylock(&(i)->i_alloc_sem) == 0)
+
+#define UP_READ_I_ALLOC_SEM(i) do { up_read(&(i)->i_alloc_sem); } while (0)
+#define DOWN_READ_I_ALLOC_SEM(i) do { down_read(&(i)->i_alloc_sem); } while (0)
+#define LASSERT_MDS_ORPHAN_READ_LOCKED(i) LASSERT(down_write_trylock(&(i)->i_alloc_sem) == 0)
+#define MDS_PACK_MD_LOCK 1
+#else
+#define UP_READ_I_ALLOC_SEM(i) do { up(&(i)->i_sem); } while (0)
+#define DOWN_READ_I_ALLOC_SEM(i) do { down(&(i)->i_sem); } while (0)
+#define LASSERT_MDS_ORPHAN_READ_LOCKED(i) LASSERT(down_trylock(&(i)->i_sem) != 0)
+
+#define UP_WRITE_I_ALLOC_SEM(i) do { up(&(i)->i_sem); } while (0)
+#define DOWN_WRITE_I_ALLOC_SEM(i) do { down(&(i)->i_sem); } while (0)
+#define LASSERT_MDS_ORPHAN_WRITE_LOCKED(i) LASSERT(down_trylock(&(i)->i_sem) != 0)
+#define MDS_PACK_MD_LOCK 0
+#endif
+
#endif /* __KERNEL__ */
#endif /* _COMPAT25_H */