* GPL HEADER END
*/
/*
- * Copyright (c) 2011, 2012, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* lustre/target/tgt_handler.c
#define DEBUG_SUBSYSTEM S_CLASS
+#include <linux/user_namespace.h>
+#ifdef HAVE_UIDGID_HEADER
+# include <linux/uidgid.h>
+#endif
+
#include <obd.h>
#include <obd_class.h>
#include <obd_cksum.h>
#include <md_object.h>
#include <lustre_lfsck.h>
+#include <lustre_nodemap.h>
#include "tgt_internal.h"
tsi->tsi_mdt_body = body;
- if (!(body->valid & OBD_MD_FLID))
+ if (!(body->mbo_valid & OBD_MD_FLID))
RETURN(0);
/* mdc_pack_body() doesn't check if fid is zero and set OBD_ML_FID
* in any case in pre-2.5 clients. Fix that here if needed */
- if (unlikely(fid_is_zero(&body->fid1)))
+ if (unlikely(fid_is_zero(&body->mbo_fid1)))
RETURN(0);
- if (!fid_is_sane(&body->fid1)) {
+ if (!fid_is_sane(&body->mbo_fid1)) {
CERROR("%s: invalid FID: "DFID"\n", tgt_name(tsi->tsi_tgt),
- PFID(&body->fid1));
+ PFID(&body->mbo_fid1));
RETURN(-EINVAL);
}
obj = lu_object_find(tsi->tsi_env,
&tsi->tsi_tgt->lut_bottom->dd_lu_dev,
- &body->fid1, NULL);
+ &body->mbo_fid1, NULL);
if (!IS_ERR(obj)) {
if ((flags & HABEO_CORPUS) && !lu_object_exists(obj)) {
lu_object_put(tsi->tsi_env, obj);
- /* for capability renew ENOENT will be handled in
- * mdt_renew_capa */
- if (body->valid & OBD_MD_FLOSSCAPA)
- rc = 0;
- else
- rc = -ENOENT;
+ rc = -ENOENT;
} else {
tsi->tsi_corpus = obj;
rc = 0;
rc = PTR_ERR(obj);
}
- tsi->tsi_fid = body->fid1;
+ tsi->tsi_fid = body->mbo_fid1;
RETURN(rc);
}
int tgt_validate_obdo(struct tgt_session_info *tsi, struct obdo *oa)
{
struct ost_id *oi = &oa->o_oi;
- obd_seq seq = ostid_seq(oi);
- obd_id id = ostid_id(oi);
+ u64 seq = ostid_seq(oi);
+ u64 id = ostid_id(oi);
int rc;
ENTRY;
{
struct ost_body *body;
struct req_capsule *pill = tsi->tsi_pill;
- struct lustre_capa *capa;
+ struct lu_nodemap *nodemap;
int rc;
ENTRY;
if (rc)
RETURN(rc);
- if (body->oa.o_valid & OBD_MD_FLOSSCAPA) {
- capa = req_capsule_client_get(pill, &RMF_CAPA1);
- if (capa == NULL) {
- CERROR("%s: OSSCAPA flag is set without capability\n",
- tgt_name(tsi->tsi_tgt));
- RETURN(-EFAULT);
- }
- }
+ nodemap = tsi->tsi_exp->exp_target_data.ted_nodemap;
+
+ body->oa.o_uid = nodemap_map_id(nodemap, NODEMAP_UID,
+ NODEMAP_CLIENT_TO_FS,
+ body->oa.o_uid);
+ body->oa.o_gid = nodemap_map_id(nodemap, NODEMAP_GID,
+ NODEMAP_CLIENT_TO_FS,
+ body->oa.o_gid);
tsi->tsi_ost_body = body;
tsi->tsi_fid = body->oa.o_oi.oi_fid;
RCL_SERVER))
req_capsule_set_size(tsi->tsi_pill, &RMF_MDT_MD,
RCL_SERVER,
- tsi->tsi_mdt_body->eadatasize);
+ tsi->tsi_mdt_body->mbo_eadatasize);
if (req_capsule_has_field(tsi->tsi_pill, &RMF_LOGCOOKIES,
RCL_SERVER))
req_capsule_set_size(tsi->tsi_pill, &RMF_LOGCOOKIES,
*process = 1;
RETURN(0);
case MDS_CLOSE:
- case MDS_DONE_WRITING:
case MDS_SYNC: /* used in unmounting */
case OBD_PING:
case MDS_REINT:
case OUT_UPDATE:
case SEQ_QUERY:
case FLD_QUERY:
+ case FLD_READ:
case LDLM_ENQUEUE:
case OST_CREATE:
case OST_DESTROY:
case OST_SETATTR:
case OST_SYNC:
case OST_WRITE:
+ case MDS_HSM_PROGRESS:
+ case MDS_HSM_STATE_SET:
+ case MDS_HSM_REQUEST:
*process = target_queue_recovery_request(req, obd);
RETURN(0);
* -ve: abort immediately with the given error code;
* 0: send reply with error code in req->rq_status;
*/
-int tgt_handle_recovery(struct ptlrpc_request *req, int reply_fail_id)
+static int tgt_handle_recovery(struct ptlrpc_request *req, int reply_fail_id)
{
ENTRY;
ENTRY;
tgt = class_exp2tgt(req->rq_export);
+ if (unlikely(tgt == NULL)) {
+ DEBUG_REQ(D_ERROR, req, "%s: No target for connected export\n",
+ class_exp2obd(req->rq_export)->obd_name);
+ RETURN(ERR_PTR(-EINVAL));
+ }
for (s = tgt->lut_slice; s->tos_hs != NULL; s++)
if (s->tos_opc_start <= opc && opc < s->tos_opc_end)
else
tsi->tsi_jobid = NULL;
+ if (tgt == NULL) {
+ DEBUG_REQ(D_ERROR, req, "%s: No target for connected export\n",
+ class_exp2obd(req->rq_export)->obd_name);
+ req->rq_status = -EINVAL;
+ rc = ptlrpc_error(req);
+ GOTO(out, rc);
+ }
+
request_fail_id = tgt->lut_request_fail_id;
tsi->tsi_reply_fail_id = tgt->lut_reply_fail_id;
static inline void tgt_init_sec_none(struct obd_connect_data *reply)
{
reply->ocd_connect_flags &= ~(OBD_CONNECT_RMT_CLIENT |
- OBD_CONNECT_RMT_CLIENT_FORCE |
- OBD_CONNECT_MDS_CAPA |
- OBD_CONNECT_OSS_CAPA);
+ OBD_CONNECT_RMT_CLIENT_FORCE);
}
static int tgt_init_sec_level(struct ptlrpc_request *req)
{
struct lu_target *tgt = class_exp2tgt(req->rq_export);
- char *client = libcfs_nid2str(req->rq_peer.nid);
+ char *client;
struct obd_connect_data *data, *reply;
int rc = 0;
bool remote;
-
ENTRY;
data = req_capsule_client_get(&req->rq_pill, &RMF_CONNECT_DATA);
RETURN(0);
}
+ if (unlikely(tgt == NULL)) {
+ DEBUG_REQ(D_ERROR, req, "%s: No target for connected export\n",
+ class_exp2obd(req->rq_export)->obd_name);
+ RETURN(-EINVAL);
+ }
+
+ client = libcfs_nid2str(req->rq_peer.nid);
/* no GSS support case */
if (!req->rq_auth_gss) {
if (tgt->lut_sec_level > LUSTRE_SEC_NONE) {
"as remote by default.\n", client, tgt_name(tgt));
}
- if (remote) {
- if (!tgt->lut_oss_capa) {
- CDEBUG(D_SEC,
- "client %s -> target %s is set as remote,"
- " but OSS capabilities are not enabled: %d.\n",
- client, tgt_name(tgt), tgt->lut_oss_capa);
- RETURN(-EACCES);
- }
- } else {
+ if (remote == 0) {
if (!uid_valid(make_kuid(&init_user_ns, req->rq_auth_uid))) {
CDEBUG(D_SEC, "client %s -> target %s: user is not "
"authenticated!\n", client, tgt_name(tgt));
break;
reply->ocd_connect_flags &= ~(OBD_CONNECT_RMT_CLIENT |
OBD_CONNECT_RMT_CLIENT_FORCE);
- if (!tgt->lut_oss_capa)
- reply->ocd_connect_flags &= ~OBD_CONNECT_OSS_CAPA;
- if (!tgt->lut_mds_capa)
- reply->ocd_connect_flags &= ~OBD_CONNECT_MDS_CAPA;
+ reply->ocd_connect_flags &= ~OBD_CONNECT_OSS_CAPA;
+ reply->ocd_connect_flags &= ~OBD_CONNECT_MDS_CAPA;
break;
default:
RETURN(-EINVAL);
struct sptlrpc_rule_set tmp_rset;
int rc;
+ if (unlikely(tgt == NULL)) {
+ CERROR("No target passed");
+ return -EINVAL;
+ }
+
sptlrpc_rule_set_init(&tmp_rset);
rc = sptlrpc_conf_target_get_rules(tgt->lut_obd, &tmp_rset, initial);
if (rc) {
{
return err_serious(-EOPNOTSUPP);
}
-EXPORT_SYMBOL(tgt_obd_log_cancel);
-
-int tgt_obd_qc_callback(struct tgt_session_info *tsi)
-{
- return err_serious(-EOPNOTSUPP);
-}
-EXPORT_SYMBOL(tgt_obd_qc_callback);
int tgt_sendpage(struct tgt_session_info *tsi, struct lu_rdpg *rdpg, int nob)
{
/*
* OBD_IDX_READ handler
*/
-int tgt_obd_idx_read(struct tgt_session_info *tsi)
+static int tgt_obd_idx_read(struct tgt_session_info *tsi)
{
struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env);
struct lu_rdpg *rdpg = &tti->tti_u.rdpg.tti_rdpg;
}
return rc;
}
-EXPORT_SYMBOL(tgt_obd_idx_read);
struct tgt_handler tgt_obd_handlers[] = {
TGT_OBD_HDL (0, OBD_PING, tgt_obd_ping),
TGT_OBD_HDL_VAR(0, OBD_LOG_CANCEL, tgt_obd_log_cancel),
-TGT_OBD_HDL_VAR(0, OBD_QC_CALLBACK, tgt_obd_qc_callback),
TGT_OBD_HDL (0, OBD_IDX_READ, tgt_obd_idx_read)
};
EXPORT_SYMBOL(tgt_obd_handlers);
int tgt_sync(const struct lu_env *env, struct lu_target *tgt,
- struct dt_object *obj)
+ struct dt_object *obj, __u64 start, __u64 end)
{
int rc = 0;
rc = dt_sync(env, tgt->lut_bottom);
} else if (dt_version_get(env, obj) >
tgt->lut_obd->obd_last_committed) {
- rc = dt_object_sync(env, obj);
+ rc = dt_object_sync(env, obj, start, end);
}
RETURN(rc);
/* Ensure that data and metadata are synced to the disk when lock is cancelled
* (if requested) */
-int tgt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
- void *data, int flag)
+static int tgt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
+ void *data, int flag)
{
struct lu_env env;
struct lu_target *tgt;
tgt = class_exp2tgt(lock->l_export);
+ if (unlikely(tgt == NULL)) {
+ CDEBUG(D_ERROR, "%s: No target for connected export\n",
+ class_exp2obd(lock->l_export)->obd_name);
+ RETURN(-EINVAL);
+ }
+
if (flag == LDLM_CB_CANCELING &&
(lock->l_granted_mode & (LCK_PW | LCK_GROUP)) &&
(tgt->lut_sync_lock_cancel == ALWAYS_SYNC_ON_CANCEL ||
(tgt->lut_sync_lock_cancel == BLOCKING_SYNC_ON_CANCEL &&
lock->l_flags & LDLM_FL_CBPENDING))) {
+ __u64 start = 0;
+ __u64 end = OBD_OBJECT_EOF;
+
rc = lu_env_init(&env, LCT_DT_THREAD);
if (unlikely(rc != 0))
RETURN(rc);
if (!dt_object_exists(obj))
GOTO(err_put, rc = -ENOENT);
- rc = tgt_sync(&env, tgt, obj);
+ if (lock->l_resource->lr_type == LDLM_EXTENT) {
+ start = lock->l_policy_data.l_extent.start;
+ end = lock->l_policy_data.l_extent.end;
+ }
+
+ rc = tgt_sync(&env, tgt, obj, start, end);
if (rc < 0) {
CERROR("%s: syncing "DFID" ("LPU64"-"LPU64") on lock "
"cancel: rc = %d\n",
RETURN(rc);
}
-struct ldlm_callback_suite tgt_dlm_cbs = {
+static struct ldlm_callback_suite tgt_dlm_cbs = {
.lcs_completion = ldlm_server_completion_ast,
.lcs_blocking = tgt_blocking_ast,
.lcs_glimpse = ldlm_server_glimpse_ast
if (rc)
RETURN(err_serious(rc));
+ switch (LUT_FAIL_CLASS(tsi->tsi_reply_fail_id)) {
+ case LUT_FAIL_MDT:
+ tsi->tsi_reply_fail_id = OBD_FAIL_MDS_LDLM_REPLY_NET;
+ break;
+ case LUT_FAIL_OST:
+ tsi->tsi_reply_fail_id = OBD_FAIL_OST_LDLM_REPLY_NET;
+ break;
+ case LUT_FAIL_MGT:
+ tsi->tsi_reply_fail_id = OBD_FAIL_MGS_LDLM_REPLY_NET;
+ break;
+ default:
+ tsi->tsi_reply_fail_id = OBD_FAIL_LDLM_REPLY;
+ break;
+ }
RETURN(req->rq_status);
}
EXPORT_SYMBOL(tgt_enqueue);
RETURN(req->rq_status);
}
-EXPORT_SYMBOL(tgt_convert);
int tgt_bl_callback(struct tgt_session_info *tsi)
{
return err_serious(-EOPNOTSUPP);
}
-EXPORT_SYMBOL(tgt_bl_callback);
int tgt_cp_callback(struct tgt_session_info *tsi)
{
return err_serious(-EOPNOTSUPP);
}
-EXPORT_SYMBOL(tgt_cp_callback);
/* generic LDLM target handler */
struct tgt_handler tgt_dlm_handlers[] = {
RETURN(rc);
}
-EXPORT_SYMBOL(tgt_llog_destroy);
int tgt_llog_read_header(struct tgt_session_info *tsi)
{
* sec context handlers
*/
/* XXX: Implement based on mdt_sec_ctx_handle()? */
-int tgt_sec_ctx_handle(struct tgt_session_info *tsi)
+static int tgt_sec_ctx_handle(struct tgt_session_info *tsi)
{
return 0;
}
int (*tgt_lfsck_in_notify)(const struct lu_env *env,
struct dt_device *key,
- struct lfsck_request *lr) = NULL;
+ struct lfsck_request *lr,
+ struct thandle *th) = NULL;
void tgt_register_lfsck_in_notify(int (*notify)(const struct lu_env *,
struct dt_device *,
- struct lfsck_request *))
+ struct lfsck_request *,
+ struct thandle *))
{
tgt_lfsck_in_notify = notify;
}
if (lr == NULL)
RETURN(-EPROTO);
- rc = tgt_lfsck_in_notify(env, key, lr);
+ rc = tgt_lfsck_in_notify(env, key, lr, NULL);
RETURN(rc);
}
{
struct lfsck_request *request;
struct lfsck_reply *reply;
- int rc = 0;
+ int rc;
ENTRY;
request = req_capsule_client_get(tsi->tsi_pill, &RMF_LFSCK_REQUEST);
if (reply == NULL)
RETURN(-ENOMEM);
- reply->lr_status = tgt_lfsck_query(tsi->tsi_env,
- tsi->tsi_tgt->lut_bottom, request);
- if (reply->lr_status < 0)
- rc = reply->lr_status;
+ rc = tgt_lfsck_query(tsi->tsi_env, tsi->tsi_tgt->lut_bottom, request);
+ reply->lr_status = rc;
- RETURN(rc);
+ RETURN(rc < 0 ? rc : 0);
}
struct tgt_handler tgt_lfsck_handlers[] = {
LASSERT(!lustre_handle_is_used(lh));
policy.l_extent.gid = 0;
- policy.l_extent.start = start & CFS_PAGE_MASK;
+ policy.l_extent.start = start & PAGE_MASK;
/*
* If ->o_blocks is EOF it means "lock till the end of the file".
if (end == OBD_OBJECT_EOF || end < start)
policy.l_extent.end = OBD_OBJECT_EOF;
else
- policy.l_extent.end = end | ~CFS_PAGE_MASK;
+ policy.l_extent.end = end | ~PAGE_MASK;
rc = ldlm_cli_enqueue_local(ns, res_id, LDLM_EXTENT, &policy, mode,
flags, ldlm_blocking_ast,
LASSERT(mode == LCK_PR || mode == LCK_PW);
LASSERT(!lustre_handle_is_used(lh));
- if (nrbufs == 0 || !(nb[0].flags & OBD_BRW_SRVLOCK))
+ if (nrbufs == 0 || !(nb[0].rnb_flags & OBD_BRW_SRVLOCK))
RETURN(0);
for (i = 1; i < nrbufs; i++)
- if (!(nb[i].flags & OBD_BRW_SRVLOCK))
+ if (!(nb[i].rnb_flags & OBD_BRW_SRVLOCK))
RETURN(-EFAULT);
- RETURN(tgt_extent_lock(ns, res_id, nb[0].offset,
- nb[nrbufs - 1].offset + nb[nrbufs - 1].len - 1,
+ RETURN(tgt_extent_lock(ns, res_id, nb[0].rnb_offset,
+ nb[nrbufs - 1].rnb_offset +
+ nb[nrbufs - 1].rnb_len - 1,
lh, mode, &flags));
}
-EXPORT_SYMBOL(tgt_brw_lock);
void tgt_brw_unlock(struct obd_ioobj *obj, struct niobuf_remote *niob,
struct lustre_handle *lh, int mode)
ENTRY;
LASSERT(mode == LCK_PR || mode == LCK_PW);
- LASSERT((obj->ioo_bufcnt > 0 && (niob[0].flags & OBD_BRW_SRVLOCK)) ==
+ LASSERT((obj->ioo_bufcnt > 0 &&
+ (niob[0].rnb_flags & OBD_BRW_SRVLOCK)) ==
lustre_handle_is_used(lh));
+
if (lustre_handle_is_used(lh))
tgt_extent_unlock(lh, mode);
EXIT;
}
-EXPORT_SYMBOL(tgt_brw_unlock);
static __u32 tgt_checksum_bulk(struct lu_target *tgt,
struct ptlrpc_bulk_desc *desc, int opc,
* simulate a client->OST data error */
if (i == 0 && opc == OST_WRITE &&
OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_RECEIVE)) {
- int off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
+ int off = desc->bd_iov[i].kiov_offset & ~PAGE_MASK;
int len = desc->bd_iov[i].kiov_len;
struct page *np = tgt_page_to_corrupt;
char *ptr = kmap(desc->bd_iov[i].kiov_page) + off;
}
}
cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page,
- desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK,
+ desc->bd_iov[i].kiov_offset & ~PAGE_MASK,
desc->bd_iov[i].kiov_len);
/* corrupt the data after we compute the checksum, to
* simulate an OST->client data error */
if (i == 0 && opc == OST_READ &&
OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_SEND)) {
- int off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
+ int off = desc->bd_iov[i].kiov_offset & ~PAGE_MASK;
int len = desc->bd_iov[i].kiov_len;
struct page *np = tgt_page_to_corrupt;
char *ptr = kmap(desc->bd_iov[i].kiov_page) + off;
}
}
- bufsize = 4;
+ bufsize = sizeof(cksum);
err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
- if (err)
- cfs_crypto_hash_final(hdesc, NULL, NULL);
return cksum;
}
struct ost_body *body, *repbody;
struct l_wait_info lwi;
struct lustre_handle lockh = { 0 };
- int niocount, npages, nob = 0, rc, i;
- int no_reply = 0;
+ int npages, nob = 0, rc, i, no_reply = 0;
struct tgt_thread_big_cache *tbc = req->rq_svc_thread->t_data;
ENTRY;
ioo = req_capsule_client_get(tsi->tsi_pill, &RMF_OBD_IOOBJ);
LASSERT(ioo != NULL); /* must exists after tgt_ost_body_unpack */
- niocount = ioo->ioo_bufcnt;
remote_nb = req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE);
LASSERT(remote_nb != NULL); /* must exists after tgt_ost_body_unpack */
npages = PTLRPC_MAX_BRW_PAGES;
rc = obd_preprw(tsi->tsi_env, OBD_BRW_READ, exp, &repbody->oa, 1,
- ioo, remote_nb, &npages, local_nb, NULL, BYPASS_CAPA);
+ ioo, remote_nb, &npages, local_nb, NULL);
if (rc != 0)
GOTO(out_lock, rc);
nob = 0;
for (i = 0; i < npages; i++) {
- int page_rc = local_nb[i].rc;
+ int page_rc = local_nb[i].lnb_rc;
if (page_rc < 0) {
rc = page_rc;
nob += page_rc;
if (page_rc != 0) { /* some data! */
- LASSERT(local_nb[i].page != NULL);
- ptlrpc_prep_bulk_page_nopin(desc, local_nb[i].page,
+ LASSERT(local_nb[i].lnb_page != NULL);
+ ptlrpc_prep_bulk_page_nopin(desc, local_nb[i].lnb_page,
local_nb[i].lnb_page_offset,
page_rc);
}
- if (page_rc != local_nb[i].len) { /* short read */
+ if (page_rc != local_nb[i].lnb_len) { /* short read */
/* All subsequent pages should be 0 */
while (++i < npages)
- LASSERT(local_nb[i].rc == 0);
+ LASSERT(local_nb[i].lnb_rc == 0);
break;
}
}
static void tgt_warn_on_cksum(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc,
struct niobuf_local *local_nb, int npages,
- obd_count client_cksum, obd_count server_cksum,
+ u32 client_cksum, u32 server_cksum,
bool mmap)
{
struct obd_export *exp = req->rq_export;
struct ost_body *body;
- char *router;
- char *via;
+ char *router = "";
+ char *via = "";
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
LASSERT(body != NULL);
- if (req->rq_peer.nid == desc->bd_sender) {
- via = router = "";
- } else {
+ if (req->rq_peer.nid != desc->bd_sender) {
via = " via ";
router = libcfs_nid2str(desc->bd_sender);
}
POSTID(&body->oa.o_oi),
local_nb[0].lnb_file_offset,
local_nb[npages-1].lnb_file_offset +
- local_nb[npages-1].len - 1,
+ local_nb[npages - 1].lnb_len - 1,
client_cksum, server_cksum);
}
sizeof(*remote_nb))
RETURN(err_serious(-EPROTO));
- if ((remote_nb[0].flags & OBD_BRW_MEMALLOC) &&
+ if ((remote_nb[0].rnb_flags & OBD_BRW_MEMALLOC) &&
(exp->exp_connection->c_peer.nid == exp->exp_connection->c_self))
memory_pressure_set();
npages = PTLRPC_MAX_BRW_PAGES;
rc = obd_preprw(tsi->tsi_env, OBD_BRW_WRITE, exp, &repbody->oa,
- objcount, ioo, remote_nb, &npages, local_nb, NULL,
- BYPASS_CAPA);
+ objcount, ioo, remote_nb, &npages, local_nb, NULL);
if (rc < 0)
GOTO(out_lock, rc);
/* NB Having prepped, we must commit... */
for (i = 0; i < npages; i++)
- ptlrpc_prep_bulk_page_nopin(desc, local_nb[i].page,
+ ptlrpc_prep_bulk_page_nopin(desc, local_nb[i].lnb_page,
local_nb[i].lnb_page_offset,
- local_nb[i].len);
+ local_nb[i].lnb_len);
rc = sptlrpc_svc_prep_bulk(req, desc);
if (rc != 0)
/* set per-requested niobuf return codes */
for (i = j = 0; i < niocount; i++) {
- int len = remote_nb[i].len;
+ int len = remote_nb[i].rnb_len;
nob += len;
rcs[i] = 0;
do {
LASSERT(j < npages);
- if (local_nb[j].rc < 0)
- rcs[i] = local_nb[j].rc;
- len -= local_nb[j].len;
+ if (local_nb[j].lnb_rc < 0)
+ rcs[i] = local_nb[j].lnb_rc;
+ len -= local_nb[j].lnb_len;
j++;
} while (len > 0);
LASSERT(len == 0);