static inline struct ll_sb_info *ll_s2sbi(struct super_block *sb)
{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
+ return (struct ll_sb_info *)(sb->s_fs_info);
+#else
return (struct ll_sb_info *)(sb->u.generic_sbp);
+#endif
}
static inline struct lustre_handle *ll_s2obdconn(struct super_block *sb)
int (*bp_cb)(struct ptlrpc_bulk_page *);
};
+
struct ptlrpc_bulk_desc {
int bd_flags;
struct ptlrpc_connection *bd_connection;
__u32 bd_page_count;
atomic_t bd_refcount;
void *bd_desc_private;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
+ struct work_struct bd_queue;
+#else
struct tq_struct bd_queue;
+#endif
ptl_md_t bd_md;
ptl_handle_md_t bd_md_h;
lock_kernel();
daemonize();
- spin_lock_irq(¤t->sigmask_lock);
- sigfillset(¤t->blocked);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+ sigfillset(¤t->blocked);
recalc_sigpending();
#else
+ spin_lock_irq(¤t->sigmask_lock);
+ sigfillset(¤t->blocked);
recalc_sigpending(current);
-#endif
spin_unlock_irq(¤t->sigmask_lock);
+#endif
sprintf(current->comm, "ldlm_test");
unlock_kernel();
lock_kernel();
daemonize();
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
spin_lock_irq(¤t->sigmask_lock);
sigfillset(¤t->blocked);
our_recalc_sigpending(current);
spin_unlock_irq(¤t->sigmask_lock);
+#else
+ sigfillset(¤t->blocked);
+ our_recalc_sigpending(current);
+#endif
sprintf(current->comm, "lustre_commitcbd");
unlock_kernel();
CDEBUG(D_INODE, "invalidating obdo/inode %ld\n", inode->i_ino);
/* FIXME: do something better than throwing away everything */
//down(&inode->i_sem);
- invalidate_inode_pages(inode);
+ ll_invalidate_inode_pages(inode);
//up(&inode->i_sem);
break;
default:
namelen, name, rc);
RETURN(ERR_PTR(rc));
}
- invalidate_inode_pages(dir);
+ ll_invalidate_inode_pages(dir);
request = it->it_data;
body = lustre_msg_buf(request->rq_repmsg, 1);
lic.lic_lmm = NULL;
ext2_inc_count(inode);
atomic_inc(&inode->i_count);
d_instantiate(dentry, inode);
- invalidate_inode_pages(dir);
+ ll_invalidate_inode_pages(dir);
RETURN(0);
}
if (it && it->it_disposition) {
rc = it->it_status;
- invalidate_inode_pages(dir);
+ ll_invalidate_inode_pages(dir);
if (rc)
GOTO(out, rc);
GOTO(out_dec, 0);
/* AED: not sure if needed - directory lock revocation should do it
* in the case where the client has cached it for non-intent ops.
*/
- invalidate_inode_pages(dir);
+ ll_invalidate_inode_pages(dir);
inode->i_ctime = dir->i_ctime;
out_dec:
tgt_inode->i_ctime = CURRENT_TIME;
tgt_inode->i_nlink--;
}
- invalidate_inode_pages(old_dir);
- invalidate_inode_pages(new_dir);
+ ll_invalidate_inode_pages(old_dir);
+ ll_invalidate_inode_pages(new_dir);
GOTO(out, err = it->it_status);
}
generate_random_uuid(uuid);
class_uuid_unparse(uuid, sbi->ll_sb_uuid);
- sb->u.generic_sbp = sbi;
+ sb->s_fs_info = sbi;
ll_options(data, &osc, &mdc, &sbi->ll_flags);
if (S_ISDIR(inode->i_mode)) {
CDEBUG(D_INODE, "invalidating inode %ld\n",
inode->i_ino);
- invalidate_inode_pages(inode);
+ ll_invalidate_inode_pages(inode);
}
break;
default:
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/lustre_dlm.h>
+#include <linux/workqueue.h>
#include <linux/lustre_mds.h> /* for mds_objid */
#include <linux/obd_ost.h>
#include <linux/obd_lov.h>
/* We can't kunmap the desc from interrupt context, so we do it from
* the bottom half above. */
- INIT_TQUEUE(&desc->bd_queue, 0, 0);
- PREPARE_TQUEUE(&desc->bd_queue, unmap_and_decref_bulk_desc, desc);
- schedule_task(&desc->bd_queue);
+ prepare_work(&desc->bd_queue, unmap_and_decref_bulk_desc, desc);
+ schedule_work(&desc->bd_queue);
EXIT;
}
lock_kernel();
daemonize();
- spin_lock_irq(¤t->sigmask_lock);
- sigfillset(¤t->blocked);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+ sigfillset(¤t->blocked);
recalc_sigpending();
#else
+ spin_lock_irq(¤t->sigmask_lock);
+ sigfillset(¤t->blocked);
recalc_sigpending(current);
-#endif
spin_unlock_irq(¤t->sigmask_lock);
+#endif
sprintf(current->comm, "lustre_recovd");
unlock_kernel();
lock_kernel();
daemonize();
- spin_lock_irq(¤t->sigmask_lock);
- sigfillset(¤t->blocked);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+ sigfillset(¤t->blocked);
recalc_sigpending();
#else
+ spin_lock_irq(¤t->sigmask_lock);
+ sigfillset(¤t->blocked);
recalc_sigpending(current);
-#endif
spin_unlock_irq(¤t->sigmask_lock);
+#endif
strcpy(current->comm, data->name);
unlock_kernel();