'strcpy', 'strncpy',
'strcat', 'strncat',
'tempnam', 'mkstemp',
- 'OBD_FREE_LARGE', 'OBD_FREE',
'f_dentry', 'f_path.dentry',
);
* operation, and updates on each MDTs are linked to
* dtr_sub_list */
struct distribute_txn_replay_req {
- /* update record */
+ /* update record, may be vmalloc'd */
struct llog_update_record *dtrq_lur;
int dtrq_lur_size;
/**
* Temporary storage for a LVB received during an enqueue operation.
+ * May be vmalloc'd, so needs to be freed with OBD_FREE_LARGE().
*/
__u32 l_lvb_len;
void *l_lvb_data;
struct rw_semaphore lgh_lock;
struct mutex lgh_hdr_mutex; /* protect lgh_hdr data */
struct llog_logid lgh_id; /* id of this log */
- struct llog_log_hdr *lgh_hdr;
+ struct llog_log_hdr *lgh_hdr; /* may be vmalloc'd */
size_t lgh_hdr_size;
struct dt_object *lgh_obj;
/* For a Catalog, is the last/newest used index for a plain slot.
struct ptlrpc_svc_ctx *sr_svc_ctx;
/** (server side), pointed directly into req buffer */
struct ptlrpc_user_desc *sr_user_desc;
- /** separated reply state */
+ /** separated reply state, may be vmalloc'd */
struct ptlrpc_reply_state *sr_reply_state;
/** server-side hp handlers */
struct ptlrpc_hpreq_ops *sr_ops;
/** various buffer pointers */
- struct lustre_msg *rq_reqbuf; /**< req wrapper */
- char *rq_repbuf; /**< rep buffer */
- struct lustre_msg *rq_repdata; /**< rep wrapper msg */
+ struct lustre_msg *rq_reqbuf; /**< req wrapper, vmalloc*/
+ char *rq_repbuf; /**< rep buffer, vmalloc */
+ struct lustre_msg *rq_repdata; /**< rep wrapper msg */
/** only in priv mode */
struct lustre_msg *rq_clrbuf;
int rq_reqbuf_len; /* req wrapper buf len */
* Definition of server service thread structure
*/
struct ptlrpc_thread {
- /**
- * List of active threads in svc->srv_threads
- */
+ /**
+ * List of active threads in svc->srv_threads
+ */
struct list_head t_link;
- /**
- * thread-private data (preallocated memory)
- */
- void *t_data;
- __u32 t_flags;
- /**
- * service thread index, from ptlrpc_start_threads
- */
- unsigned int t_id;
- /**
- * service thread pid
- */
+ /**
+ * thread-private data (preallocated vmalloc'd memory)
+ */
+ void *t_data;
+ __u32 t_flags;
+ /**
+ * service thread index, from ptlrpc_start_threads
+ */
+ unsigned int t_id;
+ /**
+ * service thread pid
+ */
pid_t t_pid;
- /**
- * put watchdog in the structure per thread b=14840
- */
- struct lc_watchdog *t_watchdog;
- /**
- * the svc this thread belonged to b=18582
- */
+ /**
+ * put watchdog in the structure per thread b=14840
+ */
+ struct lc_watchdog *t_watchdog;
+ /**
+ * the svc this thread belonged to b=18582
+ */
struct ptlrpc_service_part *t_svcpt;
wait_queue_head_t t_ctl_waitq;
struct lu_env *t_env;
* distribution.
*/
struct thandle_update_records {
- /* All of updates for the cross-MDT operation. */
+ /* All of updates for the cross-MDT operation, vmalloc'd. */
struct llog_update_record *tur_update_records;
size_t tur_update_records_buf_size;
- /* All of parameters for the cross-MDT operation */
+ /* All of parameters for the cross-MDT operation, vmalloc'd */
struct update_params *tur_update_params;
unsigned int tur_update_param_count;
size_t tur_update_params_buf_size;
OBD_CPT_VMALLOC(ptr, cptab, cpt, size); \
} while (0)
-#define OBD_FREE_LARGE(ptr, size) OBD_FREE(ptr, size)
-
#ifdef CONFIG_DEBUG_SLAB
#define POISON(ptr, c, s) do {} while (0)
#define POISON_PTR(ptr) ((void)0)
#endif
#ifdef POISON_BULK
-#define POISON_PAGE(page, val) do { memset(kmap(page), val, PAGE_CACHE_SIZE); \
+#define POISON_PAGE(page, val) do { memset(kmap(page), val, PAGE_CACHE_SIZE); \
kunmap(page); } while (0)
#else
#define POISON_PAGE(page, val) do { } while (0)
#endif
-#define OBD_FREE(ptr, size) \
-do { \
- if (is_vmalloc_addr(ptr)) { \
+#define OBD_FREE(ptr, size) \
+do { \
+ OBD_FREE_PRE(ptr, size, "kfreed"); \
+ kfree(ptr); \
+ POISON_PTR(ptr); \
+} while (0)
+
+#define OBD_FREE_LARGE(ptr, size) \
+do { \
+ if (is_vmalloc_addr(ptr)) { \
OBD_FREE_PRE(ptr, size, "vfreed"); \
- vfree(ptr); \
- } else { \
- OBD_FREE_PRE(ptr, size, "kfreed"); \
- kfree(ptr); \
+ vfree(ptr); \
+ POISON_PTR(ptr); \
+ } else { \
+ OBD_FREE(ptr, size); \
} \
- POISON_PTR(ptr); \
-} while(0)
+} while (0)
#define OBD_FREE_RCU(ptr, size, handle) \
do { \
ll_inline:1,
ll_failed:1,
ll_ignore:1;
- struct lfsck_slave_lmv_rec *ll_lslr;
+ struct lfsck_slave_lmv_rec *ll_lslr; /* may be vmalloc'd */
};
/* If the namespace LFSCK finds that the master MDT-object of a striped
RETURN(-ENOMEM);
/* Copy the whole struct */
- if (copy_from_user(hur, (void __user *)arg, totalsize)) {
- OBD_FREE_LARGE(hur, totalsize);
- RETURN(-EFAULT);
- }
+ if (copy_from_user(hur, (void __user *)arg, totalsize))
+ GOTO(out_hur, rc = -EFAULT);
if (hur->hur_request.hr_action == HUA_RELEASE) {
const struct lu_fid *fid;
hur, NULL);
}
+out_hur:
OBD_FREE_LARGE(hur, totalsize);
RETURN(rc);
if (lump == NULL)
RETURN(-ENOMEM);
- if (copy_from_user(lump, (struct lov_user_md __user *)arg, lum_size)) {
- OBD_FREE_LARGE(lump, lum_size);
- RETURN(-EFAULT);
- }
+ if (copy_from_user(lump, (struct lov_user_md __user *)arg, lum_size))
+ GOTO(out_lump, rc = -EFAULT);
rc = ll_lov_setstripe_ea_info(inode, file, flags, lump, lum_size);
+out_lump:
OBD_FREE_LARGE(lump, lum_size);
RETURN(rc);
}
}
unlock_res_and_lock(lock);
- if (lvbdata != NULL)
+ if (lvbdata)
OBD_FREE_LARGE(lvbdata, lmmsize);
EXIT;
#if defined(HAVE_DIRECTIO_ITER) || defined(HAVE_IOV_ITER_RW)
kvfree(pages);
#else
- OBD_FREE(pages, npages * sizeof(*pages));
+ OBD_FREE_LARGE(pages, npages * sizeof(*pages));
#endif
}
NULL);
up_read(¤t->mm->mmap_sem);
if (unlikely(result <= 0))
- OBD_FREE(*pages, *max_pages * sizeof(**pages));
+ OBD_FREE_LARGE(*pages, *max_pages * sizeof(**pages));
}
return result;
};
struct lod_thread_info {
- /* per-thread buffer for LOV EA */
+ /* per-thread buffer for LOV EA, may be vmalloc'd */
void *lti_ea_store;
__u32 lti_ea_store_size;
/* per-thread buffer for LMV EA */
if (info->lti_ea_store == NULL)
RETURN(-ENOMEM);
info->lti_ea_store_size = round;
+
RETURN(0);
}
* When top-object is destroyed (lov_delete_raid0())
* it releases its reference to a sub-object and waits
* until the latter is finally destroyed.
+ *
+ * May be vmalloc'd, must be freed with OBD_FREE_LARGE.
*/
struct lovsub_object **lo_sub;
/**
* (stripe), used by ci_io_loop().
*/
loff_t lis_pos;
- /**
- * end position with in a file, for the current stripe io. This is
- * exclusive (i.e., next offset after last byte affected by io).
- */
+ /**
+ * end position with in a file, for the current stripe io. This is
+ * exclusive (i.e., next offset after last byte affected by io).
+ */
loff_t lis_endpos;
- int lis_mem_frozen;
- int lis_stripe_count;
- int lis_active_subios;
+ int lis_mem_frozen;
+ int lis_stripe_count;
+ int lis_active_subios;
- /**
- * the index of ls_single_subio in ls_subios array
- */
- int lis_single_subio_index;
- struct cl_io lis_single_subio;
+ /**
+ * the index of ls_single_subio in ls_subios array
+ */
+ int lis_single_subio_index;
+ struct cl_io lis_single_subio;
- /**
- * size of ls_subios array, actually the highest stripe #
- */
- int lis_nr_subios;
- struct lov_io_sub *lis_subs;
- /**
- * List of active sub-io's.
- */
+ /**
+ * size of ls_subios array, actually the highest stripe #
+ * May be vmalloc'd, must be freed with OBD_FREE_LARGE().
+ */
+ int lis_nr_subios;
+ struct lov_io_sub *lis_subs;
+ /**
+ * List of active sub-io's.
+ */
struct list_head lis_active;
};
void lsm_free_plain(struct lov_stripe_md *lsm)
{
- __u16 stripe_count = lsm->lsm_stripe_count;
- int i;
-
- for (i = 0; i < stripe_count; i++)
- OBD_SLAB_FREE(lsm->lsm_oinfo[i], lov_oinfo_slab,
- sizeof(struct lov_oinfo));
- OBD_FREE_LARGE(lsm, sizeof(struct lov_stripe_md) +
- stripe_count * sizeof(struct lov_oinfo *));
+ __u16 stripe_count = lsm->lsm_stripe_count;
+ int i;
+
+ for (i = 0; i < stripe_count; i++)
+ OBD_SLAB_FREE(lsm->lsm_oinfo[i], lov_oinfo_slab,
+ sizeof(struct lov_oinfo));
+ OBD_FREE_LARGE(lsm, sizeof(struct lov_stripe_md) +
+ stripe_count * sizeof(struct lov_oinfo *));
}
/* Find minimum stripe maxbytes value. For inactive or
LASSERT(lio->lis_object != NULL);
lsm = lio->lis_object->lo_lsm;
- /*
- * Need to be optimized, we can't afford to allocate a piece of memory
- * when writing a page. -jay
- */
- OBD_ALLOC_LARGE(lio->lis_subs,
- lsm->lsm_stripe_count * sizeof lio->lis_subs[0]);
- if (lio->lis_subs != NULL) {
- lio->lis_nr_subios = lio->lis_stripe_count;
- lio->lis_single_subio_index = -1;
- lio->lis_active_subios = 0;
- result = 0;
- } else
- result = -ENOMEM;
- RETURN(result);
+ /*
+ * Need to be optimized, we can't afford to allocate a piece of memory
+ * when writing a page. -jay
+ */
+ OBD_ALLOC_LARGE(lio->lis_subs,
+ lsm->lsm_stripe_count * sizeof lio->lis_subs[0]);
+ if (lio->lis_subs != NULL) {
+ lio->lis_nr_subios = lio->lis_stripe_count;
+ lio->lis_single_subio_index = -1;
+ lio->lis_active_subios = 0;
+ result = 0;
+ } else
+ result = -ENOMEM;
+
+ RETURN(result);
}
static int lov_io_slice_init(struct lov_io *lio,
*/
if (lsm->lsm_stripe_count > 1 && !(fiemap->fm_flags &
FIEMAP_FLAG_DEVICE_ORDER))
- GOTO(out, rc = -ENOTSUPP);
+ GOTO(out_lsm, rc = -ENOTSUPP);
if (lsm_is_released(lsm)) {
if (fiemap->fm_start < fmkey->lfik_oa.o_size) {
fiemap->fm_extents[0].fe_flags |=
FIEMAP_EXTENT_UNKNOWN | FIEMAP_EXTENT_LAST;
}
- GOTO(out, rc = 0);
+ GOTO(out_lsm, rc = 0);
}
if (fiemap_count_to_size(fiemap->fm_extent_count) < buffer_size)
OBD_ALLOC_LARGE(fm_local, buffer_size);
if (fm_local == NULL)
- GOTO(out, rc = -ENOMEM);
+ GOTO(out_lsm, rc = -ENOMEM);
lcl_fm_ext = &fm_local->fm_extents[0];
count_local = fiemap_size_to_count(buffer_size);
fm_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fm_start, fm_end,
&start_stripe);
if (fm_end_offset == -EINVAL)
- GOTO(out, rc = -EINVAL);
+ GOTO(out_fm_local, rc = -EINVAL);
/**
* Requested extent count exceeds the fiemap buffer size, shrink our
continue;
if (lov_oinfo_is_dummy(lsm->lsm_oinfo[cur_stripe]))
- GOTO(out, rc = -EIO);
+ GOTO(out_fm_local, rc = -EIO);
/* If this is a continuation FIEMAP call and we are on
* starting stripe then lun_start needs to be set to
subobj = lov_find_subobj(env, cl2lov(obj), lsm,
cur_stripe);
if (IS_ERR(subobj))
- GOTO(out, rc = PTR_ERR(subobj));
+ GOTO(out_fm_local, rc = PTR_ERR(subobj));
/* If the output buffer is very large and the objects have many
* extents we may need to loop on a single OST repeatedly */
ost_eof = false;
obj_put:
if (subobj != NULL)
cl_object_put(env, subobj);
-out:
- if (fm_local != NULL)
- OBD_FREE_LARGE(fm_local, buffer_size);
+out_fm_local:
+ OBD_FREE_LARGE(fm_local, buffer_size);
+out_lsm:
lov_lsm_put(lsm);
return rc;
/* Ops object filename */
struct lu_name mti_name;
- /* per-thread values, can be re-used */
+ /* per-thread values, can be re-used, may be vmalloc'd */
void *mti_big_lmm;
int mti_big_lmmsize;
/* big_lmm buffer was used and must be used in reply */
struct md_attr eti_ma;
struct lu_name eti_lname;
/* per-thread values, can be re-used */
- void *eti_big_lmm;
+ void *eti_big_lmm; /* may be vmalloc'd */
int eti_big_lmmsize;
char eti_name[20];
struct lu_buf eti_buf;
};
struct osp_update_request_sub {
- struct object_update_request *ours_req;
+ struct object_update_request *ours_req; /* may be vmalloc'd */
size_t ours_req_size;
/* Linked to osp_update_request->our_req_list */
struct list_head ours_list;
list_for_each_entry_safe(ours, tmp, &our->our_req_list, ours_list) {
list_del(&ours->ours_list);
if (ours->ours_req != NULL)
- OBD_FREE(ours->ours_req, ours->ours_req_size);
+ OBD_FREE_LARGE(ours->ours_req, ours->ours_req_size);
OBD_FREE_PTR(ours);
}
OBD_FREE_PTR(our);
if (!msg) {
ptlrpc_request_cache_free(req);
return i;
- }
- req->rq_reqbuf = msg;
- req->rq_reqbuf_len = size;
- req->rq_pool = pool;
+ }
+ req->rq_reqbuf = msg;
+ req->rq_reqbuf_len = size;
+ req->rq_pool = pool;
spin_lock(&pool->prp_lock);
list_add_tail(&req->rq_list, &pool->prp_req_list);
}
cfs_time_t epp_st_max_wait; /* in jeffies */
unsigned long epp_st_outofmem; /* # of out of mem requests */
/*
- * pointers to pools
+ * pointers to pools, may be vmalloc'd
*/
struct page ***epp_pools;
} page_pools;
static inline void enc_pools_alloc(void)
{
- LASSERT(page_pools.epp_max_pools);
- OBD_ALLOC_LARGE(page_pools.epp_pools,
- page_pools.epp_max_pools *
- sizeof(*page_pools.epp_pools));
+ LASSERT(page_pools.epp_max_pools);
+ OBD_ALLOC_LARGE(page_pools.epp_pools,
+ page_pools.epp_max_pools *
+ sizeof(*page_pools.epp_pools));
}
static inline void enc_pools_free(void)
{
- LASSERT(page_pools.epp_max_pools);
- LASSERT(page_pools.epp_pools);
+ LASSERT(page_pools.epp_max_pools);
+ LASSERT(page_pools.epp_pools);
- OBD_FREE_LARGE(page_pools.epp_pools,
- page_pools.epp_max_pools *
- sizeof(*page_pools.epp_pools));
+ OBD_FREE_LARGE(page_pools.epp_pools,
+ page_pools.epp_max_pools *
+ sizeof(*page_pools.epp_pools));
}
int sptlrpc_enc_pool_init(void)
struct ptlrpc_request *req,
int msgsize)
{
- if (!req->rq_reqbuf) {
- int alloc_size = size_roundup_power2(msgsize);
-
- LASSERT(!req->rq_pool);
- OBD_ALLOC_LARGE(req->rq_reqbuf, alloc_size);
- if (!req->rq_reqbuf)
- return -ENOMEM;
-
- req->rq_reqbuf_len = alloc_size;
- } else {
- LASSERT(req->rq_pool);
- LASSERT(req->rq_reqbuf_len >= msgsize);
- memset(req->rq_reqbuf, 0, msgsize);
- }
+ if (!req->rq_reqbuf) {
+ int alloc_size = size_roundup_power2(msgsize);
+
+ LASSERT(!req->rq_pool);
+ OBD_ALLOC_LARGE(req->rq_reqbuf, alloc_size);
+ if (!req->rq_reqbuf)
+ return -ENOMEM;
+
+ req->rq_reqbuf_len = alloc_size;
+ } else {
+ LASSERT(req->rq_pool);
+ LASSERT(req->rq_reqbuf_len >= msgsize);
+ memset(req->rq_reqbuf, 0, msgsize);
+ }
- req->rq_reqmsg = req->rq_reqbuf;
- return 0;
+ req->rq_reqmsg = req->rq_reqbuf;
+ return 0;
}
static
struct ptlrpc_request *req,
int msgsize)
{
- /* add space for early replied */
- msgsize += lustre_msg_early_size();
+ /* add space for early replied */
+ msgsize += lustre_msg_early_size();
- msgsize = size_roundup_power2(msgsize);
+ msgsize = size_roundup_power2(msgsize);
- OBD_ALLOC_LARGE(req->rq_repbuf, msgsize);
- if (!req->rq_repbuf)
- return -ENOMEM;
+ OBD_ALLOC_LARGE(req->rq_repbuf, msgsize);
+ if (!req->rq_repbuf)
+ return -ENOMEM;
- req->rq_repbuf_len = msgsize;
- return 0;
+ req->rq_repbuf_len = msgsize;
+ return 0;
}
static
/* request from pool should always have enough buffer */
LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newmsg_size);
- if (req->rq_reqbuf_len < newmsg_size) {
- alloc_size = size_roundup_power2(newmsg_size);
+ if (req->rq_reqbuf_len < newmsg_size) {
+ alloc_size = size_roundup_power2(newmsg_size);
- OBD_ALLOC_LARGE(newbuf, alloc_size);
- if (newbuf == NULL)
- return -ENOMEM;
+ OBD_ALLOC_LARGE(newbuf, alloc_size);
+ if (newbuf == NULL)
+ return -ENOMEM;
/* Must lock this, so that otherwise unprotected change of
* rq_reqmsg is not racing with parallel processing of
* there */
if (req->rq_import)
spin_lock(&req->rq_import->imp_lock);
- memcpy(newbuf, req->rq_reqbuf, req->rq_reqlen);
+ memcpy(newbuf, req->rq_reqbuf, req->rq_reqlen);
- OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
- req->rq_reqbuf = req->rq_reqmsg = newbuf;
- req->rq_reqbuf_len = alloc_size;
+ OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
+ req->rq_reqbuf = req->rq_reqmsg = newbuf;
+ req->rq_reqbuf_len = alloc_size;
if (req->rq_import)
spin_unlock(&req->rq_import->imp_lock);
/* pre-allocated */
LASSERT(rs->rs_size >= rs_size);
} else {
- OBD_ALLOC_LARGE(rs, rs_size);
- if (rs == NULL)
- return -ENOMEM;
+ OBD_ALLOC_LARGE(rs, rs_size);
+ if (rs == NULL)
+ return -ENOMEM;
- rs->rs_size = rs_size;
- }
+ rs->rs_size = rs_size;
+ }
rs->rs_svc_ctx = req->rq_svc_ctx;
atomic_inc(&req->rq_svc_ctx->sc_refcount);
alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
- if (!req->rq_reqbuf) {
- LASSERT(!req->rq_pool);
+ if (!req->rq_reqbuf) {
+ LASSERT(!req->rq_pool);
- alloc_len = size_roundup_power2(alloc_len);
- OBD_ALLOC_LARGE(req->rq_reqbuf, alloc_len);
- if (!req->rq_reqbuf)
- RETURN(-ENOMEM);
+ alloc_len = size_roundup_power2(alloc_len);
+ OBD_ALLOC_LARGE(req->rq_reqbuf, alloc_len);
+ if (!req->rq_reqbuf)
+ RETURN(-ENOMEM);
- req->rq_reqbuf_len = alloc_len;
+ req->rq_reqbuf_len = alloc_len;
} else {
LASSERT(req->rq_pool);
LASSERT(req->rq_reqbuf_len >= alloc_len);
void plain_free_reqbuf(struct ptlrpc_sec *sec,
struct ptlrpc_request *req)
{
- ENTRY;
- if (!req->rq_pool) {
- OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
- req->rq_reqbuf = NULL;
- req->rq_reqbuf_len = 0;
- }
- EXIT;
+ ENTRY;
+ if (!req->rq_pool) {
+ OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
+ req->rq_reqbuf = NULL;
+ req->rq_reqbuf_len = 0;
+ }
+ EXIT;
}
static
alloc_len = size_roundup_power2(alloc_len);
- OBD_ALLOC_LARGE(req->rq_repbuf, alloc_len);
- if (!req->rq_repbuf)
- RETURN(-ENOMEM);
+ OBD_ALLOC_LARGE(req->rq_repbuf, alloc_len);
+ if (!req->rq_repbuf)
+ RETURN(-ENOMEM);
- req->rq_repbuf_len = alloc_len;
- RETURN(0);
+ req->rq_repbuf_len = alloc_len;
+ RETURN(0);
}
static
/* request from pool should always have enough buffer */
LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
- if (req->rq_reqbuf_len < newbuf_size) {
- newbuf_size = size_roundup_power2(newbuf_size);
+ if (req->rq_reqbuf_len < newbuf_size) {
+ newbuf_size = size_roundup_power2(newbuf_size);
- OBD_ALLOC_LARGE(newbuf, newbuf_size);
- if (newbuf == NULL)
- RETURN(-ENOMEM);
+ OBD_ALLOC_LARGE(newbuf, newbuf_size);
+ if (newbuf == NULL)
+ RETURN(-ENOMEM);
/* Must lock this, so that otherwise unprotected change of
* rq_reqmsg is not racing with parallel processing of
if (req->rq_import)
spin_lock(&req->rq_import->imp_lock);
- memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
+ memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
- OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
- req->rq_reqbuf = newbuf;
- req->rq_reqbuf_len = newbuf_size;
- req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
- PLAIN_PACK_MSG_OFF, 0);
+ OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
+ req->rq_reqbuf = newbuf;
+ req->rq_reqbuf_len = newbuf_size;
+ req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
+ PLAIN_PACK_MSG_OFF, 0);
if (req->rq_import)
spin_unlock(&req->rq_import->imp_lock);
- }
+ }
- _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
- newmsg_size);
- _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
+ _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
+ newmsg_size);
+ _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
- req->rq_reqlen = newmsg_size;
- RETURN(0);
+ req->rq_reqlen = newmsg_size;
+ RETURN(0);
}
/****************************************
rs = req->rq_reply_state;
- if (rs) {
- /* pre-allocated */
- LASSERT(rs->rs_size >= rs_size);
- } else {
- OBD_ALLOC_LARGE(rs, rs_size);
- if (rs == NULL)
- RETURN(-ENOMEM);
+ if (rs) {
+ /* pre-allocated */
+ LASSERT(rs->rs_size >= rs_size);
+ } else {
+ OBD_ALLOC_LARGE(rs, rs_size);
+ if (rs == NULL)
+ RETURN(-ENOMEM);
- rs->rs_size = rs_size;
- }
+ rs->rs_size = rs_size;
+ }
rs->rs_svc_ctx = req->rq_svc_ctx;
atomic_inc(&req->rq_svc_ctx->sc_refcount);
goto out_srv_fini;
}
- /* Alloc reply state structure for this one */
- OBD_ALLOC_LARGE(rs, svc->srv_max_reply_size);
- if (!rs) {
- rc = -ENOMEM;
- goto out_srv_fini;
- }
+ /* Alloc reply state structure for this one */
+ OBD_ALLOC_LARGE(rs, svc->srv_max_reply_size);
+ if (!rs) {
+ rc = -ENOMEM;
+ goto out_srv_fini;
+ }
spin_lock(&svcpt->scp_lock);
* we do not need lock here */
if (replace_record) {
/* Replace the update record and master transno */
- OBD_FREE(dtrq->dtrq_lur, dtrq->dtrq_lur_size);
+ OBD_FREE_LARGE(dtrq->dtrq_lur, dtrq->dtrq_lur_size);
dtrq->dtrq_lur = NULL;
dtrq->dtrq_lur_size = llog_update_record_size(lur);
OBD_ALLOC_LARGE(dtrq->dtrq_lur, dtrq->dtrq_lur_size);