Do not use kernel GFP masks directly when call cfs_alloc,
instead, Lustre special flags can be used to control the
allocator's behavior.
cfs_alloc will convert these Lustre special flags to some
kernle GFP masks, then call kmalloc with these GFP masks.
Signed-off-by: Fan Yong <yong.fan@whamcloud.com>
Change-Id: I3189e143ff0cf65a08a1bdf2b8476ab151dc308e
Reviewed-on: http://review.whamcloud.com/3118
Reviewed-by: Oleg Drokin <green@whamcloud.com>
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
/* standard allocator flag combination */
CFS_ALLOC_STD = CFS_ALLOC_FS | CFS_ALLOC_IO,
CFS_ALLOC_USER = CFS_ALLOC_WAIT | CFS_ALLOC_FS | CFS_ALLOC_IO,
/* standard allocator flag combination */
CFS_ALLOC_STD = CFS_ALLOC_FS | CFS_ALLOC_IO,
CFS_ALLOC_USER = CFS_ALLOC_WAIT | CFS_ALLOC_FS | CFS_ALLOC_IO,
+ CFS_ALLOC_NOFS = CFS_ALLOC_WAIT | CFS_ALLOC_IO,
+ CFS_ALLOC_KERNEL = CFS_ALLOC_WAIT | CFS_ALLOC_IO | CFS_ALLOC_FS,
};
/* flags for cfs_page_alloc() in addition to enum cfs_alloc_flags */
};
/* flags for cfs_page_alloc() in addition to enum cfs_alloc_flags */
Eoverflow:
m->op->stop(m, p);
cfs_free(m->buf);
Eoverflow:
m->op->stop(m, p);
cfs_free(m->buf);
- m->buf = cfs_alloc(m->size <<= 1, GFP_KERNEL | CFS_ALLOC_ZERO);
+ m->buf = cfs_alloc(m->size <<= 1, CFS_ALLOC_KERNEL | CFS_ALLOC_ZERO);
return !m->buf ? -ENOMEM : -EAGAIN;
}
return !m->buf ? -ENOMEM : -EAGAIN;
}
void *private;
struct seq_file *seq;
void *private;
struct seq_file *seq;
- private = cfs_alloc(psize, GFP_KERNEL | CFS_ALLOC_ZERO);
+ private = cfs_alloc(psize, CFS_ALLOC_KERNEL | CFS_ALLOC_ZERO);
if (private == NULL)
goto out;
if (private == NULL)
goto out;
for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
cfs_trace_data[i] =
cfs_alloc(sizeof(union cfs_trace_data_union) * \
for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
cfs_trace_data[i] =
cfs_alloc(sizeof(union cfs_trace_data_union) * \
- CFS_NR_CPUS, GFP_KERNEL);
+ CFS_NR_CPUS, CFS_ALLOC_KERNEL);
if (cfs_trace_data[i] == NULL)
goto out;
}
if (cfs_trace_data[i] == NULL)
goto out;
}
for (j = 0; j < CFS_TCD_TYPE_MAX; j++) {
cfs_trace_console_buffers[i][j] =
cfs_alloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
for (j = 0; j < CFS_TCD_TYPE_MAX; j++) {
cfs_trace_console_buffers[i][j] =
cfs_alloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
if (cfs_trace_console_buffers[i][j] == NULL)
goto out;
if (cfs_trace_console_buffers[i][j] == NULL)
goto out;
static inline struct ll_remote_perm *alloc_ll_remote_perm(void)
{
static inline struct ll_remote_perm *alloc_ll_remote_perm(void)
{
- struct ll_remote_perm *lrp;
+ struct ll_remote_perm *lrp;
- OBD_SLAB_ALLOC_PTR_GFP(lrp, ll_remote_perm_cachep, GFP_KERNEL);
- if (lrp)
- CFS_INIT_HLIST_NODE(&lrp->lrp_list);
- return lrp;
+ OBD_SLAB_ALLOC_PTR_GFP(lrp, ll_remote_perm_cachep, CFS_ALLOC_KERNEL);
+ if (lrp)
+ CFS_INIT_HLIST_NODE(&lrp->lrp_list);
+ return lrp;
}
static inline void free_ll_remote_perm(struct ll_remote_perm *lrp)
}
static inline void free_ll_remote_perm(struct ll_remote_perm *lrp)
(char *)&ddquot[i], dqblk_sz))
continue;
(char *)&ddquot[i], dqblk_sz))
continue;
- OBD_ALLOC_GFP(dqid, sizeof(*dqid), GFP_NOFS);
+ OBD_ALLOC_GFP(dqid, sizeof(*dqid), CFS_ALLOC_NOFS);
if (!dqid)
GOTO(out_free, rc = -ENOMEM);
if (!dqid)
GOTO(out_free, rc = -ENOMEM);
OBD_ALLOC_GFP(filter->fo_iobuf_pool, OSS_THREADS_MAX * sizeof(*pool),
OBD_ALLOC_GFP(filter->fo_iobuf_pool, OSS_THREADS_MAX * sizeof(*pool),
if (filter->fo_iobuf_pool == NULL)
RETURN(-ENOMEM);
if (filter->fo_iobuf_pool == NULL)
RETURN(-ENOMEM);