GFP_IOFS was intended to be shorthand for clearing two flags, not a set of
allocation flags. There is only one user of this flag combination now and
there appears to be no reason why Lustre had to be protected from reclaim
stalls. As none of the sites appear to be atomic, this patch simply
deletes GFP_IOFS and converts Lustre to using GFP_KERNEL, GFP_NOFS or
GFP_NOIO as appropriate.
Linux-commit :
40113370836e8e79befa585277296ed42781ef31
Change-Id: Ie17d225583f9b48ec2558a85bb6e4e9957010c14
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: James Simmons <uja.ornl@yahoo.com>
Reviewed-on: http://review.whamcloud.com/18956
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Lai Siyao <lai.siyao@intel.com>
Reviewed-by: John L. Hammond <john.hammond@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
# include <linux/mm_inline.h>
#endif
-/* GFP_IOFS was added in 2.6.33 kernel */
-#ifndef GFP_IOFS
-#define GFP_IOFS (__GFP_IO | __GFP_FS)
-#endif
-
/*
* Shrinker
*/
if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */
return -EINVAL;
- *str = kmalloc(nob, GFP_IOFS | __GFP_ZERO);
+ *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
if (*str == NULL)
return -ENOMEM;
for (i = 0; i < npages; i++) {
page = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
- __GFP_ZERO | GFP_IOFS);
+ GFP_KERNEL | __GFP_ZERO);
if (page == NULL) {
while (--i >= 0)
__free_page(rb->rb_kiov[i].kiov_page);
bulk->bk_iovs[i].kiov_offset = 0;
bulk->bk_iovs[i].kiov_len = len;
bulk->bk_iovs[i].kiov_page =
- alloc_page(GFP_IOFS);
+ alloc_page(GFP_KERNEL);
if (bulk->bk_iovs[i].kiov_page == NULL) {
lstcon_rpc_put(*crpc);
struct page *pg;
int nob;
- pg = cfs_page_cpt_alloc(lnet_cpt_table(), cpt, GFP_IOFS);
+ pg = cfs_page_cpt_alloc(lnet_cpt_table(), cpt, GFP_KERNEL);
if (pg == NULL) {
CERROR("Can't allocate page %d of %d\n", i, bulk_npg);
srpc_free_bulk(bk);
OBD_SLAB_ALLOC_GFP(hash, ll_rmtperm_hash_cachep,
REMOTE_PERM_HASHSIZE * sizeof(*hash),
- GFP_IOFS);
+ GFP_NOFS);
if (!hash)
return NULL;
GOTO(out, rc = -ENOMEM);
for (i = 0; i < nrpages; i++) {
- pages[i] = alloc_page(GFP_IOFS);
+ pages[i] = alloc_page(GFP_KERNEL);
if (pages[i] == NULL)
GOTO(out, rc = -ENOMEM);
}
if (units_in_page == 0) {
/* allocate a new page */
- pages[index] = alloc_page(GFP_IOFS);
+ pages[index] = alloc_page(GFP_KERNEL);
if (pages[index] == NULL) {
rc = -ENOMEM;
break;
struct niobuf_local *lb, int cmd, int *left)
{
gfp_t gfp_mask = (ostid_id(&obj->ioo_oid) & 1) ?
- GFP_HIGHUSER : GFP_IOFS;
+ GFP_HIGHUSER : GFP_KERNEL;
int ispersistent = ostid_id(&obj->ioo_oid) == ECHO_PERSISTENT_OBJID;
int debug_setup = (!ispersistent &&
(oa->o_valid & OBD_MD_FLFLAGS) != 0 &&
for (i = 0; i < ECHO_PERSISTENT_PAGES; i++) {
gfp_t gfp_mask = (i < ECHO_PERSISTENT_PAGES/2) ?
- GFP_IOFS : GFP_HIGHUSER;
+ GFP_KERNEL : GFP_HIGHUSER;
pg = alloc_page(gfp_mask);
if (pg == NULL) {
(oa->o_valid & OBD_MD_FLFLAGS) != 0 &&
(oa->o_flags & OBD_FL_DEBUG_CHECK) != 0);
- gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? GFP_IOFS : GFP_HIGHUSER;
+ gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? GFP_KERNEL : GFP_HIGHUSER;
LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ);
{
struct osc_extent *ext;
- OBD_SLAB_ALLOC_PTR_GFP(ext, osc_extent_kmem, GFP_IOFS);
+ OBD_SLAB_ALLOC_PTR_GFP(ext, osc_extent_kmem, GFP_NOFS);
if (ext == NULL)
return NULL;
if (opts != NULL && strstr(opts, "force_over_256tb") != NULL)
force_over_256tb = 1;
- __page = alloc_page(GFP_IOFS);
+ __page = alloc_page(GFP_KERNEL);
if (__page == NULL)
GOTO(out, rc = -ENOMEM);
page = (unsigned long)page_address(__page);
{
ENTRY;
- tgt_page_to_corrupt = alloc_page(GFP_IOFS);
+ tgt_page_to_corrupt = alloc_page(GFP_KERNEL);
tgt_key_init_generic(&tgt_thread_key, NULL);
lu_context_key_register_many(&tgt_thread_key, NULL);