extern void *cfs_cpt_malloc(struct cfs_cpt_table *cptab, int cpt,
- size_t nr_bytes, unsigned int flags);
+ size_t nr_bytes, gfp_t flags);
extern void *cfs_cpt_vzalloc(struct cfs_cpt_table *cptab, int cpt,
size_t nr_bytes);
extern struct page *cfs_page_cpt_alloc(struct cfs_cpt_table *cptab,
- int cpt, unsigned int flags);
+ int cpt, gfp_t flags);
extern void *cfs_mem_cache_cpt_alloc(struct kmem_cache *cachep,
struct cfs_cpt_table *cptab,
- int cpt, unsigned int flags);
+ int cpt, gfp_t flags);
/*
* Shrinker
void *
cfs_cpt_malloc(struct cfs_cpt_table *cptab, int cpt,
- size_t nr_bytes, unsigned int flags)
+ size_t nr_bytes, gfp_t flags)
{
return kmalloc_node(nr_bytes, flags,
cfs_cpt_spread_node(cptab, cpt));
EXPORT_SYMBOL(cfs_cpt_vzalloc);
struct page *
-cfs_page_cpt_alloc(struct cfs_cpt_table *cptab, int cpt, unsigned int flags)
+cfs_page_cpt_alloc(struct cfs_cpt_table *cptab, int cpt, gfp_t flags)
{
return alloc_pages_node(cfs_cpt_spread_node(cptab, cpt), flags, 0);
}
void *
cfs_mem_cache_cpt_alloc(struct kmem_cache *cachep, struct cfs_cpt_table *cptab,
- int cpt, unsigned int flags)
+ int cpt, gfp_t flags)
{
return kmem_cache_alloc_node(cachep, flags,
cfs_cpt_spread_node(cptab, cpt));
}
int
-kportal_memhog_alloc (struct libcfs_device_userstate *ldu, int npages, int flags)
+kportal_memhog_alloc(struct libcfs_device_userstate *ldu, int npages,
+ gfp_t flags)
{
struct page **level0p;
struct page **level1p;
return list_entry(list, struct cfs_trace_page, linkage);
}
-static struct cfs_trace_page *cfs_tage_alloc(int gfp)
+static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
{
struct page *page;
struct cfs_trace_page *tage;
list_move_tail(&tage->linkage, queue);
}
-int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, int gfp,
+int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp,
struct list_head *stock)
{
int i;
put_cpu();
}
-int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, int gfp,
+int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp,
struct list_head *stock);
/** Recalculate pool \a pl usage */
int (*po_recalc)(struct ldlm_pool *pl);
/** Cancel at least \a nr locks from pool \a pl */
- int (*po_shrink)(struct ldlm_pool *pl, int nr,
- unsigned int gfp_mask);
+ int (*po_shrink)(struct ldlm_pool *pl, int nr, gfp_t gfp_mask);
int (*po_setup)(struct ldlm_pool *pl, int limit);
};
int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
int idx, ldlm_side_t client);
-int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
- unsigned int gfp_mask);
+int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask);
void ldlm_pool_fini(struct ldlm_pool *pl);
int ldlm_pool_setup(struct ldlm_pool *pl, int limit);
int ldlm_pool_recalc(struct ldlm_pool *pl);
* locks smaller in next 10h.
*/
static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
- int nr, unsigned int gfp_mask)
+ int nr, gfp_t gfp_mask)
{
__u32 limit;
* passed \a pl according to \a nr and \a gfp_mask.
*/
static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
- int nr, unsigned int gfp_mask)
+ int nr, gfp_t gfp_mask)
{
struct ldlm_namespace *ns;
int unused;
* Pool shrink wrapper. Will call either client or server pool recalc callback
* depending what pool \a pl is used.
*/
-int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
- unsigned int gfp_mask)
+int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask)
{
int cancel = 0;
* count locks from all namespaces (if possible). Returns number of
* cached locks.
*/
-static unsigned long ldlm_pools_count(ldlm_side_t client, unsigned int gfp_mask)
+static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
{
int total = 0, nr_ns;
struct ldlm_namespace *ns;
}
static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr,
- unsigned int gfp_mask)
+ gfp_t gfp_mask)
{
unsigned long freed = 0;
int tmp, nr_ns;
* cancel approximately equal amount of locks to keep balancing.
*/
static int ldlm_pools_shrink(ldlm_side_t client, int nr,
- unsigned int gfp_mask)
+ gfp_t gfp_mask)
{
unsigned int total = 0;
EXPORT_SYMBOL(ldlm_pool_recalc);
int ldlm_pool_shrink(struct ldlm_pool *pl,
- int nr, unsigned int gfp_mask)
+ int nr, gfp_t gfp_mask)
{
return 0;
}
struct block_device *lo_device;
unsigned lo_blocksize;
- int old_gfp_mask;
+ gfp_t old_gfp_mask;
spinlock_t lo_lock;
struct bio *lo_bio;
int count)
{
struct file *filp = lo->lo_backing_file;
- int gfp = lo->old_gfp_mask;
+ gfp_t gfp = lo->old_gfp_mask;
if (lo->lo_state != LLOOP_BOUND)
return -ENXIO;
struct page *vmpage;
struct cl_page *page;
enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */
- unsigned int gfp_mask;
+ gfp_t gfp_mask;
int rc = 0;
const char *msg = NULL;
struct niobuf_remote *nb, int *pages,
struct niobuf_local *lb, int cmd, int *left)
{
- int gfp_mask = (ostid_id(&obj->ioo_oid) & 1) ?
+ gfp_t gfp_mask = (ostid_id(&obj->ioo_oid) & 1) ?
GFP_HIGHUSER : GFP_IOFS;
int ispersistent = ostid_id(&obj->ioo_oid) == ECHO_PERSISTENT_OBJID;
int debug_setup = (!ispersistent &&
int i;
for (i = 0; i < ECHO_PERSISTENT_PAGES; i++) {
- int gfp_mask = (i < ECHO_PERSISTENT_PAGES/2) ?
+ gfp_t gfp_mask = (i < ECHO_PERSISTENT_PAGES/2) ?
GFP_IOFS : GFP_HIGHUSER;
OBD_PAGE_ALLOC(pg, gfp_mask);
int i;
int rc;
int verify;
- int gfp_mask;
+ gfp_t gfp_mask;
int brw_flags = 0;
ENTRY;
kmem_cache_destroy(request_cache);
}
-struct ptlrpc_request *ptlrpc_request_cache_alloc(int flags)
+struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags)
{
struct ptlrpc_request *req;
unsigned type, unsigned portal);
int ptlrpc_request_cache_init(void);
void ptlrpc_request_cache_fini(void);
-struct ptlrpc_request *ptlrpc_request_cache_alloc(int flags);
+struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags);
void ptlrpc_request_cache_free(struct ptlrpc_request *req);
void ptlrpc_init_xid(void);