* default allocator
*/
#define LIBCFS_ALLOC(ptr, size) \
- LIBCFS_ALLOC_GFP(ptr, size, __GFP_IO)
+ LIBCFS_ALLOC_GFP(ptr, size, GFP_NOFS)
/**
* non-sleeping allocator
/** default numa allocator */
#define LIBCFS_CPT_ALLOC(ptr, cptab, cpt, size) \
- LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, __GFP_IO)
+ LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, GFP_NOFS)
#define LIBCFS_FREE(ptr, size) \
do { \
for (i = 0; i < LNET_MAX_IOV; i++) {
src[i].kiov_offset = 0;
src[i].kiov_len = PAGE_SIZE;
- src[i].kiov_page = alloc_page(__GFP_IO | __GFP_FS | __GFP_ZERO);
+ src[i].kiov_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (src[i].kiov_page == NULL) {
CERROR("couldn't allocate page %d\n", i);
dest[i].kiov_offset = 0;
dest[i].kiov_len = PAGE_SIZE;
- dest[i].kiov_page = alloc_page(__GFP_IO | __GFP_FS | __GFP_ZERO);
+ dest[i].kiov_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (dest[i].kiov_page == NULL) {
CERROR("couldn't allocate page %d\n", i);
for (i = 0; i < npages; i++) {
p->ibp_pages[i] = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
- __GFP_IO);
+ GFP_NOFS);
if (p->ibp_pages[i] == NULL) {
CERROR("Can't allocate page %d of %d\n", i, npages);
kiblnd_free_pages(p);
#define OBDO_ALLOC(ptr) \
do { \
- OBD_SLAB_ALLOC_PTR_GFP((ptr), obdo_cachep, __GFP_IO); \
+ OBD_SLAB_ALLOC_PTR_GFP((ptr), obdo_cachep, GFP_NOFS); \
} while(0)
#define OBDO_FREE(ptr) \
#define OBD_ALLOC_GFP(ptr, size, gfp_mask) \
__OBD_MALLOC_VERBOSE(ptr, NULL, 0, size, gfp_mask)
-#define OBD_ALLOC(ptr, size) OBD_ALLOC_GFP(ptr, size, __GFP_IO)
-#define OBD_ALLOC_WAIT(ptr, size) OBD_ALLOC_GFP(ptr, size, GFP_IOFS)
+#define OBD_ALLOC(ptr, size) OBD_ALLOC_GFP(ptr, size, GFP_NOFS)
+#define OBD_ALLOC_WAIT(ptr, size) OBD_ALLOC_GFP(ptr, size, GFP_KERNEL)
#define OBD_ALLOC_PTR(ptr) OBD_ALLOC(ptr, sizeof *(ptr))
#define OBD_ALLOC_PTR_WAIT(ptr) OBD_ALLOC_WAIT(ptr, sizeof *(ptr))
__OBD_MALLOC_VERBOSE(ptr, cptab, cpt, size, gfp_mask)
#define OBD_CPT_ALLOC(ptr, cptab, cpt, size) \
- OBD_CPT_ALLOC_GFP(ptr, cptab, cpt, size, __GFP_IO)
+ OBD_CPT_ALLOC_GFP(ptr, cptab, cpt, size, GFP_NOFS)
#define OBD_CPT_ALLOC_PTR(ptr, cptab, cpt) \
OBD_CPT_ALLOC(ptr, cptab, cpt, sizeof *(ptr))
} while(0)
#define OBD_SLAB_ALLOC(ptr, slab, size) \
- OBD_SLAB_ALLOC_GFP(ptr, slab, size, __GFP_IO)
+ OBD_SLAB_ALLOC_GFP(ptr, slab, size, GFP_NOFS)
#define OBD_SLAB_CPT_ALLOC(ptr, slab, cptab, cpt, size) \
- OBD_SLAB_CPT_ALLOC_GFP(ptr, slab, cptab, cpt, size, __GFP_IO)
+ OBD_SLAB_CPT_ALLOC_GFP(ptr, slab, cptab, cpt, size, GFP_NOFS)
#define OBD_SLAB_ALLOC_PTR(ptr, slab) \
OBD_SLAB_ALLOC(ptr, slab, sizeof *(ptr))
*
*/
-void *ccc_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+void *ccc_key_init(const struct lu_context *ctx, struct lu_context_key *key)
{
- struct ccc_thread_info *info;
+ struct ccc_thread_info *info;
- OBD_SLAB_ALLOC_PTR_GFP(info, ccc_thread_kmem, __GFP_IO);
- if (info == NULL)
- info = ERR_PTR(-ENOMEM);
- return info;
+ OBD_SLAB_ALLOC_PTR_GFP(info, ccc_thread_kmem, GFP_NOFS);
+ if (info == NULL)
+ info = ERR_PTR(-ENOMEM);
+ return info;
}
void ccc_key_fini(const struct lu_context *ctx,
}
void *ccc_session_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+ struct lu_context_key *key)
{
- struct ccc_session *session;
+ struct ccc_session *session;
- OBD_SLAB_ALLOC_PTR_GFP(session, ccc_session_kmem, __GFP_IO);
- if (session == NULL)
- session = ERR_PTR(-ENOMEM);
- return session;
+ OBD_SLAB_ALLOC_PTR_GFP(session, ccc_session_kmem, GFP_NOFS);
+ if (session == NULL)
+ session = ERR_PTR(-ENOMEM);
+ return session;
}
void ccc_session_key_fini(const struct lu_context *ctx,
}
int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req)
+ struct cl_req *req)
{
- struct ccc_req *vrq;
- int result;
+ struct ccc_req *vrq;
+ int result;
- OBD_SLAB_ALLOC_PTR_GFP(vrq, ccc_req_kmem, __GFP_IO);
- if (vrq != NULL) {
- cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
+ OBD_SLAB_ALLOC_PTR_GFP(vrq, ccc_req_kmem, GFP_NOFS);
+ if (vrq != NULL) {
+ cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops);
+ result = 0;
+ } else
+ result = -ENOMEM;
+ return result;
}
/**
*/
struct lu_object *ccc_object_alloc(const struct lu_env *env,
- const struct lu_object_header *unused,
- struct lu_device *dev,
- const struct cl_object_operations *clops,
- const struct lu_object_operations *luops)
+ const struct lu_object_header *unused,
+ struct lu_device *dev,
+ const struct cl_object_operations *clops,
+ const struct lu_object_operations *luops)
{
- struct ccc_object *vob;
- struct lu_object *obj;
+ struct ccc_object *vob;
+ struct lu_object *obj;
- OBD_SLAB_ALLOC_PTR_GFP(vob, ccc_object_kmem, __GFP_IO);
- if (vob != NULL) {
- struct cl_object_header *hdr;
+ OBD_SLAB_ALLOC_PTR_GFP(vob, ccc_object_kmem, GFP_NOFS);
+ if (vob != NULL) {
+ struct cl_object_header *hdr;
- obj = ccc2lu(vob);
- hdr = &vob->cob_header;
- cl_object_header_init(hdr);
+ obj = ccc2lu(vob);
+ hdr = &vob->cob_header;
+ cl_object_header_init(hdr);
hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
- lu_object_init(obj, &hdr->coh_lu, dev);
- lu_object_add_top(&hdr->coh_lu, obj);
+ lu_object_init(obj, &hdr->coh_lu, dev);
+ lu_object_add_top(&hdr->coh_lu, obj);
- vob->cob_cl.co_ops = clops;
- obj->lo_ops = luops;
- } else
- obj = NULL;
- return obj;
+ vob->cob_cl.co_ops = clops;
+ obj->lo_ops = luops;
+ } else
+ obj = NULL;
+ return obj;
}
int ccc_object_init0(const struct lu_env *env,
}
int ccc_lock_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *unused,
- const struct cl_lock_operations *lkops)
+ struct cl_object *obj, struct cl_lock *lock,
+ const struct cl_io *unused,
+ const struct cl_lock_operations *lkops)
{
- struct ccc_lock *clk;
- int result;
+ struct ccc_lock *clk;
+ int result;
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+ CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- OBD_SLAB_ALLOC_PTR_GFP(clk, ccc_lock_kmem, __GFP_IO);
- if (clk != NULL) {
- cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
+ OBD_SLAB_ALLOC_PTR_GFP(clk, ccc_lock_kmem, GFP_NOFS);
+ if (clk != NULL) {
+ cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops);
+ result = 0;
+ } else
+ result = -ENOMEM;
+ return result;
}
int ccc_attr_set(const struct lu_env *env, struct cl_object *obj,
struct kmem_cache *ldlm_interval_slab;
struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
{
- struct ldlm_interval *node;
- ENTRY;
+ struct ldlm_interval *node;
+ ENTRY;
- LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
- OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, __GFP_IO);
- if (node == NULL)
- RETURN(NULL);
+ LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
+ OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
+ if (node == NULL)
+ RETURN(NULL);
- CFS_INIT_LIST_HEAD(&node->li_group);
- ldlm_interval_attach(node, lock);
- RETURN(node);
+ CFS_INIT_LIST_HEAD(&node->li_group);
+ ldlm_interval_attach(node, lock);
+ RETURN(node);
}
void ldlm_interval_free(struct ldlm_interval *node)
*/
static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
{
- struct ldlm_lock *lock;
- ENTRY;
+ struct ldlm_lock *lock;
+ ENTRY;
- if (resource == NULL)
- LBUG();
+ if (resource == NULL)
+ LBUG();
- OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, __GFP_IO);
- if (lock == NULL)
- RETURN(NULL);
+ OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, GFP_NOFS);
+ if (lock == NULL)
+ RETURN(NULL);
spin_lock_init(&lock->l_lock);
- lock->l_resource = resource;
- lu_ref_add(&resource->lr_reference, "lock", lock);
+ lock->l_resource = resource;
+ lu_ref_add(&resource->lr_reference, "lock", lock);
atomic_set(&lock->l_refc, 2);
CFS_INIT_LIST_HEAD(&lock->l_res_link);
}
}
- /* For a replaying lock, it might be already in granted list. So
- * unlinking the lock will cause the interval node to be freed, we
- * have to allocate the interval node early otherwise we can't regrant
- * this lock in the future. - jay */
- if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT)
- OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, __GFP_IO);
+ /* For a replaying lock, it might be already in granted list. So
+ * unlinking the lock will cause the interval node to be freed, we
+ * have to allocate the interval node early otherwise we can't regrant
+ * this lock in the future. - jay */
+ if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT)
+ OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
lock_res_and_lock(lock);
if (local && lock->l_req_mode == lock->l_granted_mode) {
* pages on a file.
*/
struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
- __u32 *flags)
+ __u32 *flags)
{
- CFS_LIST_HEAD(rpc_list);
- struct ldlm_resource *res;
- struct ldlm_namespace *ns;
- int granted = 0;
+ CFS_LIST_HEAD(rpc_list);
+ struct ldlm_resource *res;
+ struct ldlm_namespace *ns;
+ int granted = 0;
#ifdef HAVE_SERVER_SUPPORT
int old_mode;
struct sl_insert_point prev;
#endif
- struct ldlm_interval *node;
- ENTRY;
+ struct ldlm_interval *node;
+ ENTRY;
/* Just return if mode is unchanged. */
if (new_mode == lock->l_granted_mode) {
- *flags |= LDLM_FL_BLOCK_GRANTED;
- RETURN(lock->l_resource);
- }
+ *flags |= LDLM_FL_BLOCK_GRANTED;
+ RETURN(lock->l_resource);
+ }
- /* I can't check the type of lock here because the bitlock of lock
- * is not held here, so do the allocation blindly. -jay */
- OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, __GFP_IO);
+ /* I can't check the type of lock here because the bitlock of lock
+ * is not held here, so do the allocation blindly. -jay */
+ OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
if (node == NULL) /* Actually, this causes EDEADLOCK to be returned */
- RETURN(NULL);
+ RETURN(NULL);
- LASSERTF((new_mode == LCK_PW && lock->l_granted_mode == LCK_PR),
- "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode);
+ LASSERTF((new_mode == LCK_PW && lock->l_granted_mode == LCK_PR),
+ "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode);
- lock_res_and_lock(lock);
+ lock_res_and_lock(lock);
- res = lock->l_resource;
- ns = ldlm_res_to_ns(res);
+ res = lock->l_resource;
+ ns = ldlm_res_to_ns(res);
#ifdef HAVE_SERVER_SUPPORT
old_mode = lock->l_req_mode;
/** Create and initialize new resource. */
static struct ldlm_resource *ldlm_resource_new(void)
{
- struct ldlm_resource *res;
- int idx;
+ struct ldlm_resource *res;
+ int idx;
- OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, __GFP_IO);
- if (res == NULL)
- return NULL;
+ OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, GFP_NOFS);
+ if (res == NULL)
+ return NULL;
- CFS_INIT_LIST_HEAD(&res->lr_granted);
- CFS_INIT_LIST_HEAD(&res->lr_converting);
- CFS_INIT_LIST_HEAD(&res->lr_waiting);
+ CFS_INIT_LIST_HEAD(&res->lr_granted);
+ CFS_INIT_LIST_HEAD(&res->lr_converting);
+ CFS_INIT_LIST_HEAD(&res->lr_waiting);
/* Initialize interval trees for each lock mode. */
for (idx = 0; idx < LCK_MODE_NUM; idx++) {
{
struct ll_file_data *fd;
- OBD_SLAB_ALLOC_PTR_GFP(fd, ll_file_data_slab, __GFP_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(fd, ll_file_data_slab, GFP_NOFS);
if (fd == NULL)
return NULL;
{
struct ll_inode_info *lli;
ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_ALLOC_INODE, 1);
- OBD_SLAB_ALLOC_PTR_GFP(lli, ll_inode_cachep, __GFP_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(lli, ll_inode_cachep, GFP_NOFS);
if (lli == NULL)
return NULL;
{
struct vvp_thread_info *info;
- OBD_SLAB_ALLOC_PTR_GFP(info, vvp_thread_kmem, __GFP_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(info, vvp_thread_kmem, GFP_NOFS);
if (info == NULL)
info = ERR_PTR(-ENOMEM);
return info;
{
struct vvp_session *session;
- OBD_SLAB_ALLOC_PTR_GFP(session, vvp_session_kmem, __GFP_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(session, vvp_session_kmem, GFP_NOFS);
if (session == NULL)
session = ERR_PTR(-ENOMEM);
return session;
RETURN(-EPROTO);
}
- OBD_SLAB_ALLOC_PTR_GFP(xattr, xattr_kmem, __GFP_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(xattr, xattr_kmem, GFP_NOFS);
if (xattr == NULL) {
CDEBUG(D_CACHE, "failed to allocate xattr\n");
RETURN(-ENOMEM);
struct lu_object *lu_obj;
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(lod_obj, lod_object_kmem, __GFP_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(lod_obj, lod_object_kmem, GFP_NOFS);
if (lod_obj == NULL)
RETURN(ERR_PTR(-ENOMEM));
*/
static void *lov_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+ struct lu_context_key *key)
{
- struct lov_thread_info *info;
-
- OBD_SLAB_ALLOC_PTR_GFP(info, lov_thread_kmem, __GFP_IO);
- if (info != NULL)
- CFS_INIT_LIST_HEAD(&info->lti_closure.clc_list);
- else
- info = ERR_PTR(-ENOMEM);
- return info;
+ struct lov_thread_info *info;
+
+ OBD_SLAB_ALLOC_PTR_GFP(info, lov_thread_kmem, GFP_NOFS);
+ if (info != NULL)
+ CFS_INIT_LIST_HEAD(&info->lti_closure.clc_list);
+ else
+ info = ERR_PTR(-ENOMEM);
+ return info;
}
static void lov_key_fini(const struct lu_context *ctx,
};
static void *lov_session_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+ struct lu_context_key *key)
{
- struct lov_session *info;
+ struct lov_session *info;
- OBD_SLAB_ALLOC_PTR_GFP(info, lov_session_kmem, __GFP_IO);
- if (info == NULL)
- info = ERR_PTR(-ENOMEM);
- return info;
+ OBD_SLAB_ALLOC_PTR_GFP(info, lov_session_kmem, GFP_NOFS);
+ if (info == NULL)
+ info = ERR_PTR(-ENOMEM);
+ return info;
}
static void lov_session_key_fini(const struct lu_context *ctx,
}
static int lov_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req)
+ struct cl_req *req)
{
- struct lov_req *lr;
- int result;
-
- ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(lr, lov_req_kmem, __GFP_IO);
- if (lr != NULL) {
- cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops);
- result = 0;
- } else
- result = -ENOMEM;
- RETURN(result);
+ struct lov_req *lr;
+ int result;
+
+ ENTRY;
+ OBD_SLAB_ALLOC_PTR_GFP(lr, lov_req_kmem, GFP_NOFS);
+ if (lr != NULL) {
+ cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops);
+ result = 0;
+ } else
+ result = -ENOMEM;
+ RETURN(result);
}
static const struct cl_device_operations lov_cl_ops = {
struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size)
{
- struct lov_stripe_md *lsm;
- struct lov_oinfo *loi;
- int i, oinfo_ptrs_size;
+ struct lov_stripe_md *lsm;
+ struct lov_oinfo *loi;
+ int i, oinfo_ptrs_size;
- LASSERT(stripe_count <= LOV_MAX_STRIPE_COUNT);
+ LASSERT(stripe_count <= LOV_MAX_STRIPE_COUNT);
- oinfo_ptrs_size = sizeof(struct lov_oinfo *) * stripe_count;
- *size = sizeof(struct lov_stripe_md) + oinfo_ptrs_size;
+ oinfo_ptrs_size = sizeof(struct lov_oinfo *) * stripe_count;
+ *size = sizeof(struct lov_stripe_md) + oinfo_ptrs_size;
- OBD_ALLOC_LARGE(lsm, *size);
- if (!lsm)
- return NULL;;
+ OBD_ALLOC_LARGE(lsm, *size);
+ if (!lsm)
+ return NULL;
- for (i = 0; i < stripe_count; i++) {
- OBD_SLAB_ALLOC_PTR_GFP(loi, lov_oinfo_slab, __GFP_IO);
- if (loi == NULL)
- goto err;
- lsm->lsm_oinfo[i] = loi;
- }
- lsm->lsm_stripe_count = stripe_count;
- return lsm;
+ for (i = 0; i < stripe_count; i++) {
+ OBD_SLAB_ALLOC_PTR_GFP(loi, lov_oinfo_slab, GFP_NOFS);
+ if (loi == NULL)
+ goto err;
+ lsm->lsm_oinfo[i] = loi;
+ }
+ lsm->lsm_stripe_count = stripe_count;
+ return lsm;
err:
- while (--i >= 0)
- OBD_SLAB_FREE(lsm->lsm_oinfo[i], lov_oinfo_slab, sizeof(*loi));
- OBD_FREE_LARGE(lsm, *size);
- return NULL;
+ while (--i >= 0)
+ OBD_SLAB_FREE(lsm->lsm_oinfo[i], lov_oinfo_slab, sizeof(*loi));
+ OBD_FREE_LARGE(lsm, *size);
+ return NULL;
}
void lsm_free_plain(struct lov_stripe_md *lsm)
}
static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
- const struct cl_io *io,
- struct lov_lock *lck,
- int idx, struct lov_lock_link **out)
+ const struct cl_io *io,
+ struct lov_lock *lck,
+ int idx, struct lov_lock_link **out)
{
- struct cl_lock *sublock;
- struct cl_lock *parent;
- struct lov_lock_link *link;
+ struct cl_lock *sublock;
+ struct cl_lock *parent;
+ struct lov_lock_link *link;
- LASSERT(idx < lck->lls_nr);
- ENTRY;
+ LASSERT(idx < lck->lls_nr);
+ ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, __GFP_IO);
- if (link != NULL) {
- struct lov_sublock_env *subenv;
- struct lov_lock_sub *lls;
- struct cl_lock_descr *descr;
-
- parent = lck->lls_cl.cls_lock;
- lls = &lck->lls_sub[idx];
- descr = &lls->sub_got;
-
- subenv = lov_sublock_env_get(env, parent, lls);
- if (!IS_ERR(subenv)) {
- /* CAVEAT: Don't try to add a field in lov_lock_sub
- * to remember the subio. This is because lock is able
- * to be cached, but this is not true for IO. This
- * further means a sublock might be referenced in
- * different io context. -jay */
-
- sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
- descr, "lov-parent", parent);
- lov_sublock_env_put(subenv);
- } else {
- /* error occurs. */
- sublock = (void*)subenv;
- }
+ OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, GFP_NOFS);
+ if (link != NULL) {
+ struct lov_sublock_env *subenv;
+ struct lov_lock_sub *lls;
+ struct cl_lock_descr *descr;
+
+ parent = lck->lls_cl.cls_lock;
+ lls = &lck->lls_sub[idx];
+ descr = &lls->sub_got;
+
+ subenv = lov_sublock_env_get(env, parent, lls);
+ if (!IS_ERR(subenv)) {
+ /* CAVEAT: Don't try to add a field in lov_lock_sub
+ * to remember the subio. This is because lock is able
+ * to be cached, but this is not true for IO. This
+ * further means a sublock might be referenced in
+ * different io context. -jay */
+
+ sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
+ descr, "lov-parent", parent);
+ lov_sublock_env_put(subenv);
+ } else {
+ /* error occurs. */
+ sublock = (void *)subenv;
+ }
- if (!IS_ERR(sublock))
- *out = link;
- else
- OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
- } else
- sublock = ERR_PTR(-ENOMEM);
- RETURN(sublock);
+ if (!IS_ERR(sublock))
+ *out = link;
+ else
+ OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
+ } else
+ sublock = ERR_PTR(-ENOMEM);
+ RETURN(sublock);
}
static void lov_sublock_unlock(const struct lu_env *env,
};
int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
+ struct cl_lock *lock, const struct cl_io *io)
{
- struct lov_lock *lck;
- int result;
+ struct lov_lock *lck;
+ int result;
- ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, __GFP_IO);
- if (lck != NULL) {
- cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
- result = lov_lock_sub_init(env, lck, io);
- } else
- result = -ENOMEM;
- RETURN(result);
+ ENTRY;
+ OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, GFP_NOFS);
+ if (lck != NULL) {
+ cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
+ result = lov_lock_sub_init(env, lck, io);
+ } else
+ result = -ENOMEM;
+ RETURN(result);
}
static void lov_empty_lock_fini(const struct lu_env *env,
};
int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
+ struct cl_lock *lock, const struct cl_io *io)
{
struct lov_lock *lck;
int result = -ENOMEM;
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, __GFP_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, GFP_NOFS);
if (lck != NULL) {
cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
lck->lls_orig = lock->cll_descr;
};
struct lu_object *lov_object_alloc(const struct lu_env *env,
- const struct lu_object_header *unused,
- struct lu_device *dev)
+ const struct lu_object_header *unused,
+ struct lu_device *dev)
{
- struct lov_object *lov;
- struct lu_object *obj;
+ struct lov_object *lov;
+ struct lu_object *obj;
- ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, __GFP_IO);
- if (lov != NULL) {
- obj = lov2lu(lov);
- lu_object_init(obj, NULL, dev);
- lov->lo_cl.co_ops = &lov_ops;
- lov->lo_type = -1; /* invalid, to catch uninitialized type */
- /*
- * object io operation vector (cl_object::co_iop) is installed
- * later in lov_object_init(), as different vectors are used
- * for object with different layouts.
- */
- obj->lo_ops = &lov_lu_obj_ops;
- } else
- obj = NULL;
- RETURN(obj);
+ ENTRY;
+ OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, GFP_NOFS);
+ if (lov != NULL) {
+ obj = lov2lu(lov);
+ lu_object_init(obj, NULL, dev);
+ lov->lo_cl.co_ops = &lov_ops;
+ lov->lo_type = -1; /* invalid, to catch uninitialized type */
+ /*
+ * object io operation vector (cl_object::co_iop) is installed
+ * later in lov_object_init(), as different vectors are used
+ * for object with different layouts.
+ */
+ obj->lo_ops = &lov_lu_obj_ops;
+ } else
+ obj = NULL;
+ RETURN(obj);
}
struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
}
static int lovsub_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req)
+ struct cl_req *req)
{
- struct lovsub_req *lsr;
- int result;
-
- OBD_SLAB_ALLOC_PTR_GFP(lsr, lovsub_req_kmem, __GFP_IO);
- if (lsr != NULL) {
- cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
+ struct lovsub_req *lsr;
+ int result;
+
+ OBD_SLAB_ALLOC_PTR_GFP(lsr, lovsub_req_kmem, GFP_NOFS);
+ if (lsr != NULL) {
+ cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops);
+ result = 0;
+ } else
+ result = -ENOMEM;
+ return result;
}
static const struct lu_device_operations lovsub_lu_ops = {
};
int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
+ struct cl_lock *lock, const struct cl_io *io)
{
- struct lovsub_lock *lsk;
- int result;
+ struct lovsub_lock *lsk;
+ int result;
- ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(lsk, lovsub_lock_kmem, __GFP_IO);
- if (lsk != NULL) {
- CFS_INIT_LIST_HEAD(&lsk->lss_parents);
- cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops);
- result = 0;
- } else
- result = -ENOMEM;
- RETURN(result);
+ ENTRY;
+ OBD_SLAB_ALLOC_PTR_GFP(lsk, lovsub_lock_kmem, GFP_NOFS);
+ if (lsk != NULL) {
+ CFS_INIT_LIST_HEAD(&lsk->lss_parents);
+ cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops);
+ result = 0;
+ } else
+ result = -ENOMEM;
+ RETURN(result);
}
/** @} lov */
};
struct lu_object *lovsub_object_alloc(const struct lu_env *env,
- const struct lu_object_header *unused,
- struct lu_device *dev)
+ const struct lu_object_header *unused,
+ struct lu_device *dev)
{
- struct lovsub_object *los;
- struct lu_object *obj;
-
- ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(los, lovsub_object_kmem, __GFP_IO);
- if (los != NULL) {
- struct cl_object_header *hdr;
-
- obj = lovsub2lu(los);
- hdr = &los->lso_header;
- cl_object_header_init(hdr);
- lu_object_init(obj, &hdr->coh_lu, dev);
- lu_object_add_top(&hdr->coh_lu, obj);
- los->lso_cl.co_ops = &lovsub_ops;
- obj->lo_ops = &lovsub_lu_obj_ops;
- } else
- obj = NULL;
- RETURN(obj);
+ struct lovsub_object *los;
+ struct lu_object *obj;
+
+ ENTRY;
+ OBD_SLAB_ALLOC_PTR_GFP(los, lovsub_object_kmem, GFP_NOFS);
+ if (los != NULL) {
+ struct cl_object_header *hdr;
+
+ obj = lovsub2lu(los);
+ hdr = &los->lso_header;
+ cl_object_header_init(hdr);
+ lu_object_init(obj, &hdr->coh_lu, dev);
+ lu_object_add_top(&hdr->coh_lu, obj);
+ los->lso_cl.co_ops = &lovsub_ops;
+ obj->lo_ops = &lovsub_lu_obj_ops;
+ } else
+ obj = NULL;
+ RETURN(obj);
}
/** @} lov */
}
struct lu_object *mdd_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
- struct lu_device *d)
+ const struct lu_object_header *hdr,
+ struct lu_device *d)
{
- struct mdd_object *mdd_obj;
-
- OBD_SLAB_ALLOC_PTR_GFP(mdd_obj, mdd_object_kmem, __GFP_IO);
- if (mdd_obj != NULL) {
- struct lu_object *o;
-
- o = mdd2lu_obj(mdd_obj);
- lu_object_init(o, NULL, d);
- mdd_obj->mod_obj.mo_ops = &mdd_obj_ops;
- mdd_obj->mod_obj.mo_dir_ops = &mdd_dir_ops;
- mdd_obj->mod_count = 0;
- o->lo_ops = &mdd_lu_obj_ops;
- return o;
- } else {
- return NULL;
- }
+ struct mdd_object *mdd_obj;
+
+ OBD_SLAB_ALLOC_PTR_GFP(mdd_obj, mdd_object_kmem, GFP_NOFS);
+ if (mdd_obj != NULL) {
+ struct lu_object *o;
+
+ o = mdd2lu_obj(mdd_obj);
+ lu_object_init(o, NULL, d);
+ mdd_obj->mod_obj.mo_ops = &mdd_obj_ops;
+ mdd_obj->mod_obj.mo_dir_ops = &mdd_dir_ops;
+ mdd_obj->mod_count = 0;
+ o->lo_ops = &mdd_lu_obj_ops;
+ return o;
+ } else {
+ return NULL;
+ }
}
static int mdd_object_init(const struct lu_env *env, struct lu_object *o,
}
static struct lu_object *mdt_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
- struct lu_device *d)
+ const struct lu_object_header *hdr,
+ struct lu_device *d)
{
- struct mdt_object *mo;
+ struct mdt_object *mo;
- ENTRY;
+ ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(mo, mdt_object_kmem, __GFP_IO);
- if (mo != NULL) {
- struct lu_object *o;
- struct lu_object_header *h;
+ OBD_SLAB_ALLOC_PTR_GFP(mo, mdt_object_kmem, GFP_NOFS);
+ if (mo != NULL) {
+ struct lu_object *o;
+ struct lu_object_header *h;
o = &mo->mot_obj;
- h = &mo->mot_header;
- lu_object_header_init(h);
- lu_object_init(o, h, d);
- lu_object_add_top(h, o);
- o->lo_ops = &mdt_obj_ops;
+ h = &mo->mot_header;
+ lu_object_header_init(h);
+ lu_object_init(o, h, d);
+ lu_object_add_top(h, o);
+ o->lo_ops = &mdt_obj_ops;
mutex_init(&mo->mot_ioepoch_mutex);
mutex_init(&mo->mot_lov_mutex);
init_rwsem(&mo->mot_open_sem);
}
static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
- struct cl_object *obj,
- const struct cl_io *io,
- const struct cl_lock_descr *descr)
+ struct cl_object *obj,
+ const struct cl_io *io,
+ const struct cl_lock_descr *descr)
{
- struct cl_lock *lock;
- struct lu_object_header *head;
+ struct cl_lock *lock;
+ struct lu_object_header *head;
- ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, __GFP_IO);
- if (lock != NULL) {
+ ENTRY;
+ OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, GFP_NOFS);
+ if (lock != NULL) {
atomic_set(&lock->cll_ref, 1);
- lock->cll_descr = *descr;
- lock->cll_state = CLS_NEW;
- cl_object_get(obj);
+ lock->cll_descr = *descr;
+ lock->cll_state = CLS_NEW;
+ cl_object_get(obj);
lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock",
lock);
CFS_INIT_LIST_HEAD(&lock->cll_layers);
static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
{
- struct lu_env *env;
- struct cl_env *cle;
+ struct lu_env *env;
+ struct cl_env *cle;
- OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, __GFP_IO);
- if (cle != NULL) {
- int rc;
+ OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, GFP_NOFS);
+ if (cle != NULL) {
+ int rc;
- CFS_INIT_LIST_HEAD(&cle->ce_linkage);
- cle->ce_magic = &cl_env_init0;
- env = &cle->ce_lu;
- rc = lu_env_init(env, LCT_CL_THREAD|ctx_tags);
- if (rc == 0) {
- rc = lu_context_init(&cle->ce_ses,
- LCT_SESSION | ses_tags);
- if (rc == 0) {
- lu_context_enter(&cle->ce_ses);
- env->le_ses = &cle->ce_ses;
- cl_env_init0(cle, debug);
- } else
- lu_env_fini(env);
- }
- if (rc != 0) {
- OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
- env = ERR_PTR(rc);
- } else {
- CL_ENV_INC(create);
- CL_ENV_INC(total);
- }
- } else
- env = ERR_PTR(-ENOMEM);
- return env;
+ CFS_INIT_LIST_HEAD(&cle->ce_linkage);
+ cle->ce_magic = &cl_env_init0;
+ env = &cle->ce_lu;
+ rc = lu_env_init(env, LCT_CL_THREAD|ctx_tags);
+ if (rc == 0) {
+ rc = lu_context_init(&cle->ce_ses,
+ LCT_SESSION | ses_tags);
+ if (rc == 0) {
+ lu_context_enter(&cle->ce_ses);
+ env->le_ses = &cle->ce_ses;
+ cl_env_init0(cle, debug);
+ } else
+ lu_env_fini(env);
+ }
+ if (rc != 0) {
+ OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
+ env = ERR_PTR(rc);
+ } else {
+ CL_ENV_INC(create);
+ CL_ENV_INC(total);
+ }
+ } else
+ env = ERR_PTR(-ENOMEM);
+ return env;
}
static void cl_env_fini(struct cl_env *cle)
ENTRY;
OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize,
- __GFP_IO);
+ GFP_NOFS);
if (page != NULL) {
int result = 0;
atomic_set(&page->cp_ref, 1);
{
struct obd_device *obd;
- OBD_SLAB_ALLOC_PTR_GFP(obd, obd_device_cachep, __GFP_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(obd, obd_device_cachep, GFP_NOFS);
if (obd != NULL) {
obd->obd_magic = OBD_DEVICE_MAGIC;
}
}
static int echo_lock_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *unused)
+ struct cl_object *obj, struct cl_lock *lock,
+ const struct cl_io *unused)
{
- struct echo_lock *el;
- ENTRY;
+ struct echo_lock *el;
+ ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(el, echo_lock_kmem, __GFP_IO);
- if (el != NULL) {
- cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops);
- el->el_object = cl2echo_obj(obj);
- CFS_INIT_LIST_HEAD(&el->el_chain);
+ OBD_SLAB_ALLOC_PTR_GFP(el, echo_lock_kmem, GFP_NOFS);
+ if (el != NULL) {
+ cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops);
+ el->el_object = cl2echo_obj(obj);
+ CFS_INIT_LIST_HEAD(&el->el_chain);
atomic_set(&el->el_refcount, 0);
- }
- RETURN(el == NULL ? -ENOMEM : 0);
+ }
+ RETURN(el == NULL ? -ENOMEM : 0);
}
static int echo_conf_set(const struct lu_env *env, struct cl_object *obj,
* @{
*/
static struct lu_object *echo_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
- struct lu_device *dev)
+ const struct lu_object_header *hdr,
+ struct lu_device *dev)
{
- struct echo_object *eco;
- struct lu_object *obj = NULL;
- ENTRY;
+ struct echo_object *eco;
+ struct lu_object *obj = NULL;
+ ENTRY;
- /* we're the top dev. */
- LASSERT(hdr == NULL);
- OBD_SLAB_ALLOC_PTR_GFP(eco, echo_object_kmem, __GFP_IO);
- if (eco != NULL) {
- struct cl_object_header *hdr = &eco->eo_hdr;
+ /* we're the top dev. */
+ LASSERT(hdr == NULL);
+ OBD_SLAB_ALLOC_PTR_GFP(eco, echo_object_kmem, GFP_NOFS);
+ if (eco != NULL) {
+ struct cl_object_header *hdr = &eco->eo_hdr;
- obj = &echo_obj2cl(eco)->co_lu;
- cl_object_header_init(hdr);
+ obj = &echo_obj2cl(eco)->co_lu;
+ cl_object_header_init(hdr);
hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
- lu_object_init(obj, &hdr->coh_lu, dev);
- lu_object_add_top(&hdr->coh_lu, obj);
+ lu_object_init(obj, &hdr->coh_lu, dev);
+ lu_object_add_top(&hdr->coh_lu, obj);
- eco->eo_cl.co_ops = &echo_cl_obj_ops;
- obj->lo_ops = &echo_lu_obj_ops;
- }
- RETURN(obj);
+ eco->eo_cl.co_ops = &echo_cl_obj_ops;
+ obj->lo_ops = &echo_lu_obj_ops;
+ }
+ RETURN(obj);
}
static struct lu_device_operations echo_device_lu_ops = {
}
static void *echo_thread_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+ struct lu_context_key *key)
{
- struct echo_thread_info *info;
+ struct echo_thread_info *info;
- OBD_SLAB_ALLOC_PTR_GFP(info, echo_thread_kmem, __GFP_IO);
- if (info == NULL)
- info = ERR_PTR(-ENOMEM);
- return info;
+ OBD_SLAB_ALLOC_PTR_GFP(info, echo_thread_kmem, GFP_NOFS);
+ if (info == NULL)
+ info = ERR_PTR(-ENOMEM);
+ return info;
}
static void echo_thread_key_fini(const struct lu_context *ctx,
};
static void *echo_session_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+ struct lu_context_key *key)
{
- struct echo_session_info *session;
+ struct echo_session_info *session;
- OBD_SLAB_ALLOC_PTR_GFP(session, echo_session_kmem, __GFP_IO);
- if (session == NULL)
- session = ERR_PTR(-ENOMEM);
- return session;
+ OBD_SLAB_ALLOC_PTR_GFP(session, echo_session_kmem, GFP_NOFS);
+ if (session == NULL)
+ session = ERR_PTR(-ENOMEM);
+ return session;
}
static void echo_session_key_fini(const struct lu_context *ctx,
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(of, ofd_object_kmem, __GFP_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(of, ofd_object_kmem, GFP_NOFS);
if (of != NULL) {
struct lu_object *o;
struct lu_object_header *h;
{
struct osc_thread_info *info;
- OBD_SLAB_ALLOC_PTR_GFP(info, osc_thread_kmem, __GFP_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(info, osc_thread_kmem, GFP_NOFS);
if (info == NULL)
info = ERR_PTR(-ENOMEM);
return info;
{
struct osc_session *info;
- OBD_SLAB_ALLOC_PTR_GFP(info, osc_session_kmem, __GFP_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(info, osc_session_kmem, GFP_NOFS);
if (info == NULL)
info = ERR_PTR(-ENOMEM);
return info;
}
int osc_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req)
+ struct cl_req *req)
{
- struct osc_req *or;
- int result;
-
- OBD_SLAB_ALLOC_PTR_GFP(or, osc_req_kmem, __GFP_IO);
- if (or != NULL) {
- cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
+ struct osc_req *or;
+ int result;
+
+ OBD_SLAB_ALLOC_PTR_GFP(or, osc_req_kmem, GFP_NOFS);
+ if (or != NULL) {
+ cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);
+ result = 0;
+ } else
+ result = -ENOMEM;
+ return result;
}
/** @} osc */
struct osc_lock *clk;
int result;
- OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, __GFP_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, GFP_NOFS);
if (clk != NULL) {
__u32 enqflags = lock->cll_descr.cld_enq_flags;
};
struct lu_object *osc_object_alloc(const struct lu_env *env,
- const struct lu_object_header *unused,
- struct lu_device *dev)
+ const struct lu_object_header *unused,
+ struct lu_device *dev)
{
- struct osc_object *osc;
- struct lu_object *obj;
-
- OBD_SLAB_ALLOC_PTR_GFP(osc, osc_object_kmem, __GFP_IO);
- if (osc != NULL) {
- obj = osc2lu(osc);
- lu_object_init(obj, NULL, dev);
- osc->oo_cl.co_ops = &osc_ops;
- obj->lo_ops = &osc_lu_obj_ops;
- } else
- obj = NULL;
- return obj;
+ struct osc_object *osc;
+ struct lu_object *obj;
+
+ OBD_SLAB_ALLOC_PTR_GFP(osc, osc_object_kmem, GFP_NOFS);
+ if (osc != NULL) {
+ obj = osc2lu(osc);
+ lu_object_init(obj, NULL, dev);
+ osc->oo_cl.co_ops = &osc_ops;
+ obj->lo_ops = &osc_lu_obj_ops;
+ } else
+ obj = NULL;
+ return obj;
}
/** @} osc */
/* on pending IO in this thread should left from prev. request */
LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
- th = ERR_PTR(-ENOMEM);
- OBD_ALLOC_GFP(oh, sizeof *oh, __GFP_IO);
- if (oh != NULL) {
+ th = ERR_PTR(-ENOMEM);
+ OBD_ALLOC_GFP(oh, sizeof *oh, GFP_NOFS);
+ if (oh != NULL) {
oh->ot_quota_trans = &oti->oti_quota_trans;
memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
- th = &oh->ot_super;
- th->th_dev = d;
- th->th_result = 0;
- th->th_tags = LCT_TX_HANDLE;
- oh->ot_credits = 0;
+ th = &oh->ot_super;
+ th->th_dev = d;
+ th->th_result = 0;
+ th->th_tags = LCT_TX_HANDLE;
+ oh->ot_credits = 0;
atomic_set(&th->th_refc, 1);
th->th_alloc_size = sizeof(*oh);
oti->oti_dev = osd_dt_dev(d);
- CFS_INIT_LIST_HEAD(&oh->ot_dcb_list);
- osd_th_alloced(oh);
+ CFS_INIT_LIST_HEAD(&oh->ot_dcb_list);
+ osd_th_alloced(oh);
memset(oti->oti_declare_ops, 0,
sizeof(oti->oti_declare_ops));
memset(oti->oti_declare_ops_cred, 0,
sizeof(oti->oti_declare_ops_cred));
oti->oti_rollback = false;
- }
- RETURN(th);
+ }
+ RETURN(th);
}
/*
static inline dqbuf_t getdqbuf(void)
{
- dqbuf_t buf = kmalloc(LUSTRE_DQBLKSIZE, __GFP_IO);
+ dqbuf_t buf = kmalloc(LUSTRE_DQBLKSIZE, GFP_NOFS);
if (!buf)
CWARN("Not enough memory for quota buffers.\n");
return buf;
{
struct osd_object *mo;
- OBD_SLAB_ALLOC_PTR_GFP(mo, osd_object_kmem, __GFP_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(mo, osd_object_kmem, GFP_NOFS);
if (mo != NULL) {
struct lu_object *l;
struct osp_object *o;
struct lu_object *l;
- OBD_SLAB_ALLOC_PTR_GFP(o, osp_object_kmem, __GFP_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(o, osp_object_kmem, GFP_NOFS);
if (o != NULL) {
l = &o->opo_obj.do_lu;
struct lustre_msg *msg;
spin_unlock(&pool->prp_lock);
- req = ptlrpc_request_cache_alloc(__GFP_IO);
+ req = ptlrpc_request_cache_alloc(GFP_NOFS);
if (!req)
return;
OBD_ALLOC_LARGE(msg, size);
*/
static inline
struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
- struct ptlrpc_request_pool *pool)
+ struct ptlrpc_request_pool *pool)
{
struct ptlrpc_request *request = NULL;
request = ptlrpc_prep_req_from_pool(pool);
if (!request)
- request = ptlrpc_request_cache_alloc(__GFP_IO);
+ request = ptlrpc_request_cache_alloc(GFP_NOFS);
- if (request) {
- LASSERTF((unsigned long)imp > 0x1000, "%p", imp);
- LASSERT(imp != LP_POISON);
- LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p",
- imp->imp_client);
- LASSERT(imp->imp_client != LP_POISON);
+ if (request) {
+ LASSERTF((unsigned long)imp > 0x1000, "%p", imp);
+ LASSERT(imp != LP_POISON);
+ LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p",
+ imp->imp_client);
+ LASSERT(imp->imp_client != LP_POISON);
- request->rq_import = class_import_get(imp);
- } else {
- CERROR("request allocation out of memory\n");
- }
+ request->rq_import = class_import_get(imp);
+ } else {
+ CERROR("request allocation out of memory\n");
+ }
- return request;
+ return request;
}
/**
if (cb == NULL)
RETURN(ERR_PTR(-EINVAL));
- /* copy some code from deprecated fakereq. */
- req = ptlrpc_request_cache_alloc(__GFP_IO);
- if (req == NULL) {
- CERROR("ptlrpc: run out of memory!\n");
- RETURN(ERR_PTR(-ENOMEM));
- }
+ /* copy some code from deprecated fakereq. */
+ req = ptlrpc_request_cache_alloc(GFP_NOFS);
+ if (req == NULL) {
+ CERROR("ptlrpc: run out of memory!\n");
+ RETURN(ERR_PTR(-ENOMEM));
+ }
- req->rq_send_state = LUSTRE_IMP_FULL;
- req->rq_type = PTL_RPC_MSG_REQUEST;
- req->rq_import = class_import_get(imp);
- req->rq_export = NULL;
- req->rq_interpret_reply = work_interpreter;
- /* don't want reply */
- req->rq_receiving_reply = 0;
- req->rq_must_unlink = 0;
- req->rq_no_delay = req->rq_no_resend = 1;
+ req->rq_send_state = LUSTRE_IMP_FULL;
+ req->rq_type = PTL_RPC_MSG_REQUEST;
+ req->rq_import = class_import_get(imp);
+ req->rq_export = NULL;
+ req->rq_interpret_reply = work_interpreter;
+ /* don't want reply */
+ req->rq_receiving_reply = 0;
+ req->rq_must_unlink = 0;
+ req->rq_no_delay = req->rq_no_resend = 1;
req->rq_pill.rc_fmt = (void *)&worker_format;
spin_lock_init(&req->rq_lock);
LASSERT(desc->pd_compat != NULL);
OBD_CPT_ALLOC_GFP(policy, svcpt->scp_service->srv_cptable,
- svcpt->scp_cpt, sizeof(*policy), __GFP_IO);
+ svcpt->scp_cpt, sizeof(*policy), GFP_NOFS);
if (policy == NULL)
RETURN(-ENOMEM);
goto out;
OBD_CPT_ALLOC_GFP(cli, nrs_pol2cptab(policy), nrs_pol2cptid(policy),
- sizeof(*cli), moving_req ? GFP_ATOMIC :
- __GFP_IO);
+ sizeof(*cli), moving_req ? GFP_ATOMIC : GFP_NOFS);
if (cli == NULL)
return -ENOMEM;
OBD_SLAB_CPT_ALLOC_PTR_GFP(orro, orrd->od_cache,
nrs_pol2cptab(policy), nrs_pol2cptid(policy),
- moving_req ? GFP_ATOMIC : __GFP_IO);
+ moving_req ? GFP_ATOMIC : GFP_NOFS);
if (orro == NULL)
RETURN(-ENOMEM);
RETURN(-EACCES);
}
- req = ptlrpc_request_cache_alloc(__GFP_IO);
+ req = ptlrpc_request_cache_alloc(GFP_NOFS);
if (!req)
RETURN(-ENOMEM);
* changed at any time, no matter we're holding rq_lock or not. For this reason
* we allocate a separate ptlrpc_request and reply buffer for early reply
* processing.
- *
+ *
* \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
* Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
* \a *req_ret to release it.
* \retval -ev error number, and \a req_ret will not be set.
*/
int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
- struct ptlrpc_request **req_ret)
+ struct ptlrpc_request **req_ret)
{
struct ptlrpc_request *early_req;
char *early_buf;
int rc;
ENTRY;
- early_req = ptlrpc_request_cache_alloc(__GFP_IO);
- if (early_req == NULL)
- RETURN(-ENOMEM);
+ early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
+ if (early_req == NULL)
+ RETURN(-ENOMEM);
- early_size = req->rq_nob_received;
- early_bufsz = size_roundup_power2(early_size);
- OBD_ALLOC_LARGE(early_buf, early_bufsz);
- if (early_buf == NULL)
- GOTO(err_req, rc = -ENOMEM);
+ early_size = req->rq_nob_received;
+ early_bufsz = size_roundup_power2(early_size);
+ OBD_ALLOC_LARGE(early_buf, early_bufsz);
+ if (early_buf == NULL)
+ GOTO(err_req, rc = -ENOMEM);
- /* sanity checkings and copy data out, do it inside spinlock */
+ /* sanity checkings and copy data out, do it inside spinlock */
spin_lock(&req->rq_lock);
if (req->rq_replied) {
goto out_pools;
for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) {
- pools[i][j] = alloc_page(__GFP_IO |
- __GFP_HIGHMEM);
+ pools[i][j] = alloc_page(GFP_NOFS |
+ __GFP_HIGHMEM);
if (pools[i][j] == NULL)
goto out_pools;
}
newdl = cfs_time_current_sec() + at_get(&svcpt->scp_at_estimate);
- reqcopy = ptlrpc_request_cache_alloc(__GFP_IO);
+ reqcopy = ptlrpc_request_cache_alloc(GFP_NOFS);
if (reqcopy == NULL)
RETURN(-ENOMEM);
OBD_ALLOC_LARGE(reqmsg, req->rq_reqlen);
RETURN(lqe);
}
- OBD_SLAB_ALLOC_PTR_GFP(new, lqe_kmem, __GFP_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(new, lqe_kmem, GFP_NOFS);
if (new == NULL) {
CERROR("Fail to allocate lqe for id:"LPU64", "
"hash:%s\n", qid->qid_uid, site->lqs_hash->hs_name);
{
struct qsd_upd_rec *upd;
- OBD_SLAB_ALLOC_PTR_GFP(upd, upd_kmem, __GFP_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(upd, upd_kmem, GFP_NOFS);
if (upd == NULL) {
CERROR("Failed to allocate upd");
return NULL;