#include <linux/init.h>
#include <lprocfs_status.h>
#include <libcfs/list.h>
-#include <lustre_quota.h>
#include "ost_internal.h"
static int oss_num_threads;
CFS_MODULE_PARM(oss_io_cpts, "s", charp, 0444,
"CPU partitions OSS IO threads should run on");
+/*
+ * this page is allocated statically when module is initializing
+ * it is used to simulate data corruptions, see ost_checksum_bulk()
+ * for details. as the original pages provided by the layers below
+ * can be remain in the internal cache, we do not want to modify
+ * them.
+ */
+static struct page *ost_page_to_corrupt = NULL;
+
/**
* Do not return server-side uid/gid to remote client
*/
*/
static int ost_lock_get(struct obd_export *exp, struct obdo *oa,
__u64 start, __u64 count, struct lustre_handle *lh,
- int mode, int flags)
+ int mode, __u64 flags)
{
struct ldlm_res_id res_id;
ldlm_policy_data_t policy;
struct obd_trans_info *oti)
{
struct ost_body *body, *repbody;
- int rc, flags = 0;
+ __u64 flags = 0;
struct lustre_handle lh = {0,};
+ int rc;
ENTRY;
/* check that we do support OBD_CONNECT_TRUNCLOCK. */
OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_RECEIVE)) {
int off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
int len = desc->bd_iov[i].kiov_len;
+ struct page *np = ost_page_to_corrupt;
char *ptr = kmap(desc->bd_iov[i].kiov_page) + off;
- memcpy(ptr, "bad3", min(4, len));
- kunmap(desc->bd_iov[i].kiov_page);
+
+ if (np) {
+ char *ptr2 = kmap(np) + off;
+
+ memcpy(ptr2, ptr, len);
+ memcpy(ptr2, "bad3", min(4, len));
+ kunmap(np);
+ desc->bd_iov[i].kiov_page = np;
+ } else {
+ CERROR("can't alloc page for corruption\n");
+ }
}
cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page,
desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK,
OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_SEND)) {
int off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
int len = desc->bd_iov[i].kiov_len;
+ struct page *np = ost_page_to_corrupt;
char *ptr = kmap(desc->bd_iov[i].kiov_page) + off;
- memcpy(ptr, "bad4", min(4, len));
- kunmap(desc->bd_iov[i].kiov_page);
- /* nobody should use corrupted page again */
- ClearPageUptodate(desc->bd_iov[i].kiov_page);
+
+ if (np) {
+ char *ptr2 = kmap(np) + off;
+
+ memcpy(ptr2, ptr, len);
+ memcpy(ptr2, "bad4", min(4, len));
+ kunmap(np);
+ desc->bd_iov[i].kiov_page = np;
+ } else {
+ CERROR("can't alloc page for corruption\n");
+ }
}
}
struct obd_ioobj *obj, struct niobuf_remote *nb,
struct lustre_handle *lh)
{
- int flags = 0;
+ __u64 flags = 0;
int nrbufs = obj->ioo_bufcnt;
struct ldlm_res_id res_id;
ldlm_policy_data_t policy;
/* In normal mode of operation an I/O request is serviced only
* by ll_ost_io threads each of them has own tls buffers allocated by
- * ost_thread_init().
+ * ost_io_thread_init().
* During recovery, an I/O request may be queued until any of the ost
* service threads process it. Not necessary it should be one of
* ll_ost_io threads. In that case we dynamically allocating tls
nob += page_rc;
if (page_rc != 0) { /* some data! */
LASSERT (local_nb[i].page != NULL);
- ptlrpc_prep_bulk_page(desc, local_nb[i].page,
- local_nb[i].lnb_page_offset,
- page_rc);
+ ptlrpc_prep_bulk_page_nopin(desc, local_nb[i].page,
+ local_nb[i].lnb_page_offset,
+ page_rc);
}
if (page_rc != local_nb[i].len) { /* short read */
ost_tls_put(req);
out_bulk:
if (desc && !CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2))
- ptlrpc_free_bulk(desc);
+ ptlrpc_free_bulk_nopin(desc);
out:
LASSERT(rc <= 0);
if (rc == 0) {
lwi1 = LWI_TIMEOUT_INTR(cfs_time_seconds(3), NULL, NULL, NULL);
l_wait_event(waitq, 0, &lwi1);
rc = target_bulk_io(exp, desc, &lwi);
- ptlrpc_free_bulk(desc);
+ ptlrpc_free_bulk_nopin(desc);
}
RETURN(rc);
/* NB Having prepped, we must commit... */
for (i = 0; i < npages; i++)
- ptlrpc_prep_bulk_page(desc, local_nb[i].page,
- local_nb[i].lnb_page_offset,
- local_nb[i].len);
+ ptlrpc_prep_bulk_page_nopin(desc, local_nb[i].page,
+ local_nb[i].lnb_page_offset,
+ local_nb[i].len);
rc = sptlrpc_svc_prep_bulk(req, desc);
if (rc != 0)
ost_tls_put(req);
out_bulk:
if (desc)
- ptlrpc_free_bulk(desc);
+ ptlrpc_free_bulk_nopin(desc);
out:
if (rc == 0) {
oti_to_request(oti, req);
case OST_GET_INFO:
case OST_QUOTACHECK:
case OST_QUOTACTL:
- case OST_QUOTA_ADJUST_QUNIT:
rc = lustre_msg_check_version(msg, LUSTRE_OST_VERSION);
if (rc)
CERROR("bad opc %u version %08x, expecting %08x\n",
lustre_msg_get_version(msg),
LUSTRE_LOG_VERSION);
break;
+ case OST_QUOTA_ADJUST_QUNIT:
+ rc = -ENOTSUPP;
+ CERROR("Quota adjust is deprecated as of 2.4.0\n");
+ break;
default:
CERROR("Unexpected opcode %d\n", lustre_msg_get_opc(msg));
rc = -ENOTSUPP;
};
/** Assign high priority operations to the request if needed. */
-static int ost_hpreq_handler(struct ptlrpc_request *req)
+static int ost_io_hpreq_handler(struct ptlrpc_request *req)
{
ENTRY;
if (req->rq_export) {
RETURN(0);
if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOSPC))
GOTO(out, rc = -ENOSPC);
- if (OBD_FAIL_CHECK(OBD_FAIL_OST_EROFS))
+ if (OBD_FAIL_TIMEOUT(OBD_FAIL_OST_EROFS, 1))
GOTO(out, rc = -EROFS);
rc = ost_brw_write(req, oti);
LASSERT(current->journal_info == NULL);
if (rc)
RETURN(rc);
RETURN(ptlrpc_reply(req));
- case LDLM_ENQUEUE:
- CDEBUG(D_INODE, "enqueue\n");
- req_capsule_set(&req->rq_pill, &RQF_LDLM_ENQUEUE);
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE))
- RETURN(0);
- rc = ldlm_handle_enqueue(req, ldlm_server_completion_ast,
- ost_blocking_ast,
- ldlm_server_glimpse_ast);
- fail = OBD_FAIL_OST_LDLM_REPLY_NET;
- break;
- case LDLM_CONVERT:
- CDEBUG(D_INODE, "convert\n");
- req_capsule_set(&req->rq_pill, &RQF_LDLM_CONVERT);
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CONVERT))
- RETURN(0);
- rc = ldlm_handle_convert(req);
- break;
- case LDLM_CANCEL:
- CDEBUG(D_INODE, "cancel\n");
- req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL))
- RETURN(0);
- rc = ldlm_handle_cancel(req);
- break;
+ case LDLM_ENQUEUE:
+ CDEBUG(D_INODE, "enqueue\n");
+ req_capsule_set(&req->rq_pill, &RQF_LDLM_ENQUEUE);
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_NET))
+ RETURN(0);
+ rc = ldlm_handle_enqueue(req, ldlm_server_completion_ast,
+ ost_blocking_ast,
+ ldlm_server_glimpse_ast);
+ fail = OBD_FAIL_OST_LDLM_REPLY_NET;
+ break;
+ case LDLM_CONVERT:
+ CDEBUG(D_INODE, "convert\n");
+ req_capsule_set(&req->rq_pill, &RQF_LDLM_CONVERT);
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CONVERT_NET))
+ RETURN(0);
+ rc = ldlm_handle_convert(req);
+ break;
+ case LDLM_CANCEL:
+ CDEBUG(D_INODE, "cancel\n");
+ req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_NET))
+ RETURN(0);
+ rc = ldlm_handle_cancel(req);
+ break;
case LDLM_BL_CALLBACK:
case LDLM_CP_CALLBACK:
CDEBUG(D_INODE, "callback\n");
return 0;
}
EXPORT_SYMBOL(ost_handle);
+
/*
- * free per-thread pool created by ost_thread_init().
+ * free per-thread pool created by ost_io_thread_init().
*/
-static void ost_thread_done(struct ptlrpc_thread *thread)
+static void ost_io_thread_done(struct ptlrpc_thread *thread)
{
struct ost_thread_local_cache *tls; /* TLS stands for Thread-Local
* Storage */
/*
* be prepared to handle partially-initialized pools (because this is
- * called from ost_thread_init() for cleanup.
+ * called from ost_io_thread_init() for cleanup.
*/
tls = thread->t_data;
if (tls != NULL) {
/*
* initialize per-thread page pool (bug 5137).
*/
-static int ost_thread_init(struct ptlrpc_thread *thread)
+static int ost_io_thread_init(struct ptlrpc_thread *thread)
{
struct ost_thread_local_cache *tls;
.psc_ops = {
.so_req_handler = ost_handle,
.so_req_printer = target_print_req,
- .so_hpreq_handler = NULL,
},
};
ost->ost_create_service = ptlrpc_register_service(&svc_conf,
oss_io_cpts : NULL,
},
.psc_ops = {
- .so_thr_init = ost_thread_init,
- .so_thr_done = ost_thread_done,
+ .so_thr_init = ost_io_thread_init,
+ .so_thr_done = ost_io_thread_done,
.so_req_handler = ost_handle,
- .so_hpreq_handler = ost_hpreq_handler,
+ .so_hpreq_handler = ost_io_hpreq_handler,
.so_req_printer = target_print_req,
- .so_hpreq_handler = NULL,
},
};
ost->ost_io_service = ptlrpc_register_service(&svc_conf,
int rc;
ENTRY;
+ ost_page_to_corrupt = cfs_alloc_page(CFS_ALLOC_STD);
+
lprocfs_ost_init_vars(&lvars);
rc = class_register_type(&ost_obd_ops, NULL, lvars.module_vars,
LUSTRE_OSS_NAME, NULL);
static void /*__exit*/ ost_exit(void)
{
+ if (ost_page_to_corrupt)
+ page_cache_release(ost_page_to_corrupt);
+
class_unregister_type(LUSTRE_OSS_NAME);
}