CFS_MODULE_PARM(oss_io_cpts, "s", charp, 0444,
"CPU partitions OSS IO threads should run on");
+/*
+ * this page is allocated statically when module is initializing
+ * it is used to simulate data corruptions, see ost_checksum_bulk()
+ * for details. as the original pages provided by the layers below
+ * can be remain in the internal cache, we do not want to modify
+ * them.
+ */
+static struct page *ost_page_to_corrupt = NULL;
+
/**
* Do not return server-side uid/gid to remote client
*/
if (req->rq_status != 0)
CERROR("ost: statfs failed: rc %d\n", req->rq_status);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OST_STATFS_EINPROGRESS))
+ req->rq_status = -EINPROGRESS;
+
RETURN(0);
}
OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_RECEIVE)) {
int off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
int len = desc->bd_iov[i].kiov_len;
+ struct page *np = ost_page_to_corrupt;
char *ptr = kmap(desc->bd_iov[i].kiov_page) + off;
- memcpy(ptr, "bad3", min(4, len));
- kunmap(desc->bd_iov[i].kiov_page);
+
+ if (np) {
+ char *ptr2 = kmap(np) + off;
+
+ memcpy(ptr2, ptr, len);
+ memcpy(ptr2, "bad3", min(4, len));
+ kunmap(np);
+ desc->bd_iov[i].kiov_page = np;
+ } else {
+ CERROR("can't alloc page for corruption\n");
+ }
}
cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page,
desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK,
OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_SEND)) {
int off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
int len = desc->bd_iov[i].kiov_len;
+ struct page *np = ost_page_to_corrupt;
char *ptr = kmap(desc->bd_iov[i].kiov_page) + off;
- memcpy(ptr, "bad4", min(4, len));
- kunmap(desc->bd_iov[i].kiov_page);
- /* nobody should use corrupted page again */
- ClearPageUptodate(desc->bd_iov[i].kiov_page);
+
+ if (np) {
+ char *ptr2 = kmap(np) + off;
+
+ memcpy(ptr2, ptr, len);
+ memcpy(ptr2, "bad4", min(4, len));
+ kunmap(np);
+ desc->bd_iov[i].kiov_page = np;
+ } else {
+ CERROR("can't alloc page for corruption\n");
+ }
}
}
nob += page_rc;
if (page_rc != 0) { /* some data! */
LASSERT (local_nb[i].page != NULL);
- ptlrpc_prep_bulk_page(desc, local_nb[i].page,
- local_nb[i].offset & ~CFS_PAGE_MASK,
- page_rc);
+ ptlrpc_prep_bulk_page_nopin(desc, local_nb[i].page,
+ local_nb[i].lnb_page_offset,
+ page_rc);
}
if (page_rc != local_nb[i].len) { /* short read */
ost_tls_put(req);
out_bulk:
if (desc && !CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2))
- ptlrpc_free_bulk(desc);
+ ptlrpc_free_bulk_nopin(desc);
out:
LASSERT(rc <= 0);
if (rc == 0) {
/* NB Having prepped, we must commit... */
for (i = 0; i < npages; i++)
- ptlrpc_prep_bulk_page(desc, local_nb[i].page,
- local_nb[i].offset & ~CFS_PAGE_MASK,
- local_nb[i].len);
+ ptlrpc_prep_bulk_page_nopin(desc, local_nb[i].page,
+ local_nb[i].lnb_page_offset,
+ local_nb[i].len);
rc = sptlrpc_svc_prep_bulk(req, desc);
if (rc != 0)
body->oa.o_id,
body->oa.o_valid & OBD_MD_FLGROUP ?
body->oa.o_seq : (__u64)0,
- local_nb[0].offset,
- local_nb[npages-1].offset +
+ local_nb[0].lnb_file_offset,
+ local_nb[npages-1].lnb_file_offset +
local_nb[npages-1].len - 1 );
CERROR("client csum %x, original server csum %x, "
"server csum now %x\n",
ost_tls_put(req);
out_bulk:
if (desc)
- ptlrpc_free_bulk(desc);
+ ptlrpc_free_bulk_nopin(desc);
out:
if (rc == 0) {
oti_to_request(oti, req);
RETURN(rc);
}
-#ifdef HAVE_QUOTA_SUPPORT
static int ost_handle_quotactl(struct ptlrpc_request *req)
{
struct obd_quotactl *oqctl, *repoqc;
if (rc)
RETURN(-ENOMEM);
- req->rq_status = obd_quotacheck(req->rq_export, oqctl);
- RETURN(0);
+ /* deprecated, not used any more */
+ req->rq_status = -EOPNOTSUPP;
+ RETURN(-EOPNOTSUPP);
}
-static int ost_handle_quota_adjust_qunit(struct ptlrpc_request *req)
-{
- struct quota_adjust_qunit *oqaq, *repoqa;
- struct lustre_quota_ctxt *qctxt;
- int rc;
- ENTRY;
-
- qctxt = &req->rq_export->exp_obd->u.obt.obt_qctxt;
- oqaq = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_ADJUST_QUNIT);
- if (oqaq == NULL)
- GOTO(out, rc = -EPROTO);
-
- rc = req_capsule_server_pack(&req->rq_pill);
- if (rc)
- GOTO(out, rc);
-
- repoqa = req_capsule_server_get(&req->rq_pill, &RMF_QUOTA_ADJUST_QUNIT);
- req->rq_status = obd_quota_adjust_qunit(req->rq_export, oqaq, qctxt, NULL);
- *repoqa = *oqaq;
-
- out:
- RETURN(rc);
-}
-#endif
-
static int ost_llog_handle_connect(struct obd_export *exp,
struct ptlrpc_request *req)
{
oa->o_seq = lock->l_resource->lr_name.name[1];
oa->o_valid = OBD_MD_FLID|OBD_MD_FLGROUP;
oinfo->oi_oa = oa;
+ oinfo->oi_capa = BYPASS_CAPA;
rc = obd_sync(&env, lock->l_export, oinfo,
lock->l_policy_data.l_extent.start,
case OST_SYNC:
case OST_SET_INFO:
case OST_GET_INFO:
-#ifdef HAVE_QUOTA_SUPPORT
case OST_QUOTACHECK:
case OST_QUOTACTL:
case OST_QUOTA_ADJUST_QUNIT:
-#endif
rc = lustre_msg_check_version(msg, LUSTRE_OST_VERSION);
if (rc)
CERROR("bad opc %u version %08x, expecting %08x\n",
req_capsule_set(&req->rq_pill, &RQF_OST_GET_INFO_GENERIC);
rc = ost_get_info(req->rq_export, req);
break;
-#ifdef HAVE_QUOTA_SUPPORT
case OST_QUOTACHECK:
CDEBUG(D_INODE, "quotacheck\n");
req_capsule_set(&req->rq_pill, &RQF_OST_QUOTACHECK);
RETURN(0);
rc = ost_handle_quotactl(req);
break;
- case OST_QUOTA_ADJUST_QUNIT:
- CDEBUG(D_INODE, "quota_adjust_qunit\n");
- req_capsule_set(&req->rq_pill, &RQF_OST_QUOTA_ADJUST_QUNIT);
- rc = ost_handle_quota_adjust_qunit(req);
- break;
-#endif
case OBD_PING:
DEBUG_REQ(D_INODE, req, "ping");
req_capsule_set(&req->rq_pill, &RQF_OBD_PING);
.psc_ops = {
.so_req_handler = ost_handle,
.so_req_printer = target_print_req,
+ .so_hpreq_handler = ptlrpc_hpreq_handler,
},
};
ost->ost_service = ptlrpc_register_service(&svc_conf,
.psc_ops = {
.so_req_handler = ost_handle,
.so_req_printer = target_print_req,
+ .so_hpreq_handler = NULL,
},
};
ost->ost_create_service = ptlrpc_register_service(&svc_conf,
.so_req_handler = ost_handle,
.so_hpreq_handler = ost_hpreq_handler,
.so_req_printer = target_print_req,
+ .so_hpreq_handler = NULL,
},
};
ost->ost_io_service = ptlrpc_register_service(&svc_conf,
int rc;
ENTRY;
+ ost_page_to_corrupt = cfs_alloc_page(CFS_ALLOC_STD);
+
lprocfs_ost_init_vars(&lvars);
rc = class_register_type(&ost_obd_ops, NULL, lvars.module_vars,
LUSTRE_OSS_NAME, NULL);
static void /*__exit*/ ost_exit(void)
{
+ if (ost_page_to_corrupt)
+ page_cache_release(ost_page_to_corrupt);
+
class_unregister_type(LUSTRE_OSS_NAME);
}