* Client support for unpatched kernels:
(see http://wiki.lustre.org/index.php?title=Patchless_Client)
2.6.9-42.0.10.EL (RHEL 4)
- 2.6.16 - 2.6.21 vanilla (kernel.org)
+ 2.6.16 - 2.6.22 vanilla (kernel.org)
* Recommended e2fsprogs version: 1.39.cfs7
* Note that reiserfs quotas are disabled on SLES 10 in this kernel.
* bug fixes
Severity : enhancement
Bugzilla : 12605
Description: add #ifdef HAVE_KERNEL_CONFIG_H
-Details : kernels from 2.6.21 not need include linux/config.h, but add include
+Details : kernels from 2.6.19 not need include linux/config.h, but add include
linux/autoconf.h in commpiler command line.
+Severity : enhancement
+Bugzilla : 12764
+Description: patchless client support for 2.6.22 kernel
+Details : 2.6.22 has only one visble change, SLAB_CTOR_* constants is
+ removed. In this case we need drop using os depended interface to
+ kmem_cache and use cfs_mem_cache API.
+
--------------------------------------------------------------------------------
2007-05-03 Cluster File Systems, Inc. <info@clusterfs.com>
struct list_head ras_read_beads;
};
-extern kmem_cache_t *ll_file_data_slab;
+extern cfs_mem_cache_t *ll_file_data_slab;
struct lustre_handle;
struct ll_file_data {
struct ll_readahead_state fd_ras;
#define LLAP_MAGIC 98764321
-extern kmem_cache_t *ll_async_page_slab;
+extern cfs_mem_cache_t *ll_async_page_slab;
extern size_t ll_async_page_slab_size;
struct ll_async_page {
int llap_magic;
#include <lustre_param.h>
#include "llite_internal.h"
-kmem_cache_t *ll_file_data_slab;
+cfs_mem_cache_t *ll_file_data_slab;
LIST_HEAD(ll_super_blocks);
spinlock_t ll_sb_lock = SPIN_LOCK_UNLOCKED;
if (!ll_async_page_slab) {
ll_async_page_slab_size =
size_round(sizeof(struct ll_async_page)) + err;
- ll_async_page_slab = kmem_cache_create("ll_async_page",
- ll_async_page_slab_size,
- 0, 0, NULL, NULL);
+ ll_async_page_slab = cfs_mem_cache_create("ll_async_page",
+ ll_async_page_slab_size,
+ 0, 0);
if (!ll_async_page_slab)
GOTO(out_osc, -ENOMEM);
}
pos = n, n = pos->prev )
#endif
-kmem_cache_t *ll_async_page_slab = NULL;
+cfs_mem_cache_t *ll_async_page_slab = NULL;
size_t ll_async_page_slab_size = 0;
/* SYNCHRONOUS I/O to object storage for an inode */
printk(KERN_INFO "Lustre: Lustre Client File System; "
"info@clusterfs.com\n");
- ll_file_data_slab = kmem_cache_create("ll_file_data",
- sizeof(struct ll_file_data), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ ll_file_data_slab = cfs_mem_cache_create("ll_file_data",
+ sizeof(struct ll_file_data), 0,
+ SLAB_HWCACHE_ALIGN);
if (ll_file_data_slab == NULL)
return -ENOMEM;
ll_unregister_cache(&ll_cache_definition);
- rc = kmem_cache_destroy(ll_file_data_slab);
+ rc = cfs_mem_cache_destroy(ll_file_data_slab);
LASSERTF(rc == 0, "couldn't destroy ll_file_data slab\n");
if (ll_async_page_slab) {
- rc = kmem_cache_destroy(ll_async_page_slab);
+ rc = cfs_mem_cache_destroy(ll_async_page_slab);
LASSERTF(rc == 0, "couldn't destroy ll_async_page slab\n");
}
#include <lprocfs_status.h>
#include "llite_internal.h"
-static kmem_cache_t *ll_inode_cachep;
+static cfs_mem_cache_t *ll_inode_cachep;
static struct inode *ll_alloc_inode(struct super_block *sb)
{
OBD_SLAB_FREE(ptr, ll_inode_cachep, sizeof(*ptr));
}
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
-{
- struct ll_inode_info *lli = foo;
-
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
- inode_init_once(&lli->lli_vfs_inode);
-}
-
int ll_init_inodecache(void)
{
- ll_inode_cachep = kmem_cache_create("lustre_inode_cache",
- sizeof(struct ll_inode_info),
- 0, SLAB_HWCACHE_ALIGN,
- init_once, NULL);
+ ll_inode_cachep = cfs_mem_cache_create("lustre_inode_cache",
+ sizeof(struct ll_inode_info),
+ 0, SLAB_HWCACHE_ALIGN);
if (ll_inode_cachep == NULL)
return -ENOMEM;
return 0;
void ll_destroy_inodecache(void)
{
-#ifdef HAVE_KMEM_CACHE_DESTROY_INT
int rc;
-
- rc = kmem_cache_destroy(ll_inode_cachep);
+
+ rc = cfs_mem_cache_destroy(ll_inode_cachep);
LASSERTF(rc == 0, "ll_inode_cache: not all structures were freed\n");
-#else
- kmem_cache_destroy(ll_inode_cachep);
-#endif
}
/* exported operations */
rc = ll_init_inodecache();
if (rc)
return -ENOMEM;
- ll_file_data_slab = kmem_cache_create("ll_file_data",
- sizeof(struct ll_file_data), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ ll_file_data_slab = cfs_mem_cache_create("ll_file_data",
+ sizeof(struct ll_file_data), 0,
+ SLAB_HWCACHE_ALIGN);
if (ll_file_data_slab == NULL) {
ll_destroy_inodecache();
return -ENOMEM;
static void __exit exit_lustre_lite(void)
{
-#ifdef HAVE_KMEM_CACHE_DESTROY_INT
int rc;
-#endif
lustre_register_client_fill_super(NULL);
lustre_register_client_process_config(NULL);
ll_unregister_cache(&ll_cache_definition);
ll_destroy_inodecache();
-#ifdef HAVE_KMEM_CACHE_DESTROY_INT
- rc = kmem_cache_destroy(ll_file_data_slab);
+ rc = cfs_mem_cache_destroy(ll_file_data_slab);
LASSERTF(rc == 0, "couldn't destroy ll_file_data slab\n");
-#else
- kmem_cache_destroy(ll_file_data_slab);
-#endif
+
if (ll_async_page_slab) {
-#ifdef HAVE_KMEM_CACHE_DESTROY_INT
- rc = kmem_cache_destroy(ll_async_page_slab);
+ rc = cfs_mem_cache_destroy(ll_async_page_slab);
LASSERTF(rc == 0, "couldn't destroy ll_async_page slab\n");
-#else
- kmem_cache_destroy(ll_async_page_slab);
-#endif
}
if (proc_lustre_fs_root)
#define fsfilt_ext3_journal_stop(handle) ext3_journal_stop(handle)
#endif
-static kmem_cache_t *fcb_cache;
+static cfs_mem_cache_t *fcb_cache;
struct fsfilt_cb_data {
struct journal_callback cb_jcb; /* jbd private data - MUST BE FIRST */
{
int rc;
- fcb_cache = kmem_cache_create("fsfilt_ext3_fcb",
- sizeof(struct fsfilt_cb_data), 0,
- 0, NULL, NULL);
+ fcb_cache = cfs_mem_cache_create("fsfilt_ext3_fcb",
+ sizeof(struct fsfilt_cb_data), 0, 0);
if (!fcb_cache) {
CERROR("error allocating fsfilt journal callback cache\n");
GOTO(out, rc = -ENOMEM);
rc = fsfilt_register_ops(&fsfilt_ext3_ops);
if (rc) {
- int err = kmem_cache_destroy(fcb_cache);
+ int err = cfs_mem_cache_destroy(fcb_cache);
LASSERTF(err == 0, "error destroying new cache: rc %d\n", err);
}
out:
int rc;
fsfilt_unregister_ops(&fsfilt_ext3_ops);
- rc = kmem_cache_destroy(fcb_cache);
+ rc = cfs_mem_cache_destroy(fcb_cache);
LASSERTF(rc == 0, "couldn't destroy fcb_cache slab\n");
}
LASSERT(obd_device_cachep == NULL);
obd_device_cachep = cfs_mem_cache_create("ll_obd_dev_cache",
- sizeof(struct obd_device), 0, 0);
+ sizeof(struct obd_device),
+ 0, 0);
if (!obd_device_cachep)
GOTO(out, -ENOMEM);
LASSERT(obdo_cachep == NULL);
obdo_cachep = cfs_mem_cache_create("ll_obdo_cache", sizeof(struct obdo),
- 0, 0);
+ 0, 0);
if (!obdo_cachep)
GOTO(out, -ENOMEM);
LASSERT(import_cachep == NULL);
import_cachep = cfs_mem_cache_create("ll_import_cache",
- sizeof(struct obd_import),
- 0, 0);
+ sizeof(struct obd_import),
+ 0, 0);
if (!import_cachep)
GOTO(out, -ENOMEM);
#include "filter_internal.h"
static struct lvfs_callback_ops filter_lvfs_ops;
-kmem_cache_t *ll_fmd_cachep;
+cfs_mem_cache_t *ll_fmd_cachep;
static void filter_commit_cb(struct obd_device *obd, __u64 transno,
void *cb_data, int error)
if (obdfilter_created_scratchpad == NULL)
return -ENOMEM;
- ll_fmd_cachep = kmem_cache_create("ll_fmd_cache",
- sizeof(struct filter_mod_data),
- 0, 0, NULL, NULL);
+ ll_fmd_cachep = cfs_mem_cache_create("ll_fmd_cache",
+ sizeof(struct filter_mod_data),
+ 0, 0);
if (!ll_fmd_cachep)
GOTO(out, rc = -ENOMEM);
if (rc) {
int err;
- err = kmem_cache_destroy(ll_fmd_cachep);
+ err = cfs_mem_cache_destroy(ll_fmd_cachep);
LASSERTF(err == 0, "Cannot destroy ll_fmd_cachep: rc %d\n",err);
ll_fmd_cachep = NULL;
out:
PORTAL_SYMBOL_PUT(filter_quota_interface);
if (ll_fmd_cachep) {
- int rc = kmem_cache_destroy(ll_fmd_cachep);
+ int rc = cfs_mem_cache_destroy(ll_fmd_cachep);
LASSERTF(rc == 0, "Cannot destroy ll_fmd_cachep: rc %d\n", rc);
ll_fmd_cachep = NULL;
}
unsigned long default_iunit_sz = 5000; /* 5000 inodes */
unsigned long default_itune_ratio = 50; /* 50 percentage */
-kmem_cache_t *qunit_cachep = NULL;
+cfs_mem_cache_t *qunit_cachep = NULL;
struct list_head qunit_hash[NR_DQHASH];
spinlock_t qunit_hash_lock = SPIN_LOCK_UNLOCKED;
spin_unlock(&qunit_hash_lock);
if (qunit_cachep) {
-#ifdef HAVE_KMEM_CACHE_DESTROY_INT
int rc;
- rc = kmem_cache_destroy(qunit_cachep);
+ rc = cfs_mem_cache_destroy(qunit_cachep);
LASSERTF(rc == 0, "couldn't destory qunit_cache slab\n");
-#else
- kmem_cache_destroy(qunit_cachep);
-#endif
qunit_cachep = NULL;
}
EXIT;
ENTRY;
LASSERT(qunit_cachep == NULL);
- qunit_cachep = kmem_cache_create("ll_qunit_cache",
- sizeof(struct lustre_qunit),
- 0, 0, NULL, NULL);
+ qunit_cachep = cfs_mem_cache_create("ll_qunit_cache",
+ sizeof(struct lustre_qunit),
+ 0, 0);
if (!qunit_cachep)
RETURN(-ENOMEM);
LASSERT(qinfo_cachep == NULL);
qinfo_cachep = cfs_mem_cache_create("osc_quota_info",
- sizeof(struct osc_quota_info),
- 0, 0);
+ sizeof(struct osc_quota_info),
+ 0, 0);
if (!qinfo_cachep)
RETURN(-ENOMEM);
static struct list_head lustre_dquot_hash[NR_DQHASH];
static spinlock_t dquot_hash_lock = SPIN_LOCK_UNLOCKED;
-kmem_cache_t *lustre_dquot_cachep;
+cfs_mem_cache_t *lustre_dquot_cachep;
int lustre_dquot_init(void)
{
ENTRY;
LASSERT(lustre_dquot_cachep == NULL);
- lustre_dquot_cachep = kmem_cache_create("lustre_dquot_cache",
- sizeof(struct lustre_dquot),
- 0, 0, NULL, NULL);
+ lustre_dquot_cachep = cfs_mem_cache_create("lustre_dquot_cache",
+ sizeof(struct lustre_dquot),
+ 0, 0);
if (!lustre_dquot_cachep)
return (-ENOMEM);
LASSERT(list_empty(lustre_dquot_hash + i));
}
if (lustre_dquot_cachep) {
-#ifdef HAVE_KMEM_CACHE_DESTROY_INT
int rc;
- rc = kmem_cache_destroy(lustre_dquot_cachep);
+ rc = cfs_mem_cache_destroy(lustre_dquot_cachep);
LASSERTF(rc == 0,"couldn't destroy lustre_dquot_cachep slab\n");
-#else
- kmem_cache_destroy(lustre_dquot_cachep);
-#endif
lustre_dquot_cachep = NULL;
}
EXIT;