1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
5 * Use is subject to license terms.
7 * Copyright (c) 2012, 2017, Intel Corporation.
11 * This file is part of Lustre, http://www.lustre.org/
13 * Author: Isaac Huang <isaac@clusterfs.com>
18 static int brw_srv_workitems = SFW_TEST_WI_MAX;
19 module_param(brw_srv_workitems, int, 0644);
20 MODULE_PARM_DESC(brw_srv_workitems, "# BRW server workitems");
22 static int brw_inject_errors;
23 module_param(brw_inject_errors, int, 0644);
24 MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default");
26 #define BRW_POISON 0xbeefbeefbeefbeefULL
27 #define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL
28 #define BRW_MSIZE sizeof(__u64)
31 brw_client_fini(struct sfw_test_instance *tsi)
33 struct srpc_bulk *bulk;
34 struct sfw_test_unit *tsu;
36 LASSERT(tsi->tsi_is_client);
38 list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
39 bulk = tsu->tsu_private;
44 tsu->tsu_private = NULL;
49 brw_client_init(struct sfw_test_instance *tsi)
51 struct sfw_session *sn = tsi->tsi_batch->bat_session;
56 struct srpc_bulk *bulk;
57 struct sfw_test_unit *tsu;
60 LASSERT(tsi->tsi_is_client);
62 if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) {
63 struct test_bulk_req *breq = &tsi->tsi_u.bulk_v0;
66 flags = breq->blk_flags;
67 /* NB: this is not going to work for variable page size,
68 * but we have to keep it for compatibility */
69 len = breq->blk_npg * PAGE_SIZE;
73 struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
75 /* I should never get this step if it's unknown feature
76 * because make_session will reject unknown feature */
77 LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0);
80 flags = breq->blk_flags;
82 off = breq->blk_offset & ~PAGE_MASK;
85 if (off % BRW_MSIZE != 0)
91 if (opc != LST_BRW_READ && opc != LST_BRW_WRITE)
94 if (flags != LST_BRW_CHECK_NONE &&
95 flags != LST_BRW_CHECK_FULL && flags != LST_BRW_CHECK_SIMPLE)
98 list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
99 bulk = srpc_alloc_bulk(lnet_cpt_of_nid(tsu->tsu_dest.nid, NULL),
102 brw_client_fini(tsi);
105 srpc_init_bulk(bulk, off, len, opc == LST_BRW_READ);
107 tsu->tsu_private = bulk;
113 #define BRW_POISON 0xbeefbeefbeefbeefULL
114 #define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL
115 #define BRW_MSIZE sizeof(__u64)
117 static int brw_inject_one_error(void)
119 struct timespec64 ts;
121 if (brw_inject_errors <= 0) return 0;
125 if (((ts.tv_nsec / NSEC_PER_USEC) & 1) == 0)
128 return brw_inject_errors--;
132 brw_fill_page(struct page *pg, int off, int len, int pattern, __u64 magic)
134 char *addr = page_address(pg) + off;
137 LASSERT(addr != NULL);
138 LASSERT(off % BRW_MSIZE == 0 && len % BRW_MSIZE == 0);
140 if (pattern == LST_BRW_CHECK_NONE)
143 if (magic == BRW_MAGIC)
144 magic += brw_inject_one_error();
146 if (pattern == LST_BRW_CHECK_SIMPLE) {
147 memcpy(addr, &magic, BRW_MSIZE);
148 if (len > BRW_MSIZE) {
149 addr += len - BRW_MSIZE;
150 memcpy(addr, &magic, BRW_MSIZE);
155 if (pattern == LST_BRW_CHECK_FULL) {
156 for (i = 0; i < len; i += BRW_MSIZE)
157 memcpy(addr + i, &magic, BRW_MSIZE);
164 brw_check_page(struct page *pg, int off, int len, int pattern, __u64 magic)
166 char *addr = page_address(pg) + off;
167 __u64 data = 0; /* make compiler happy */
170 LASSERT(addr != NULL);
171 LASSERT(off % BRW_MSIZE == 0 && len % BRW_MSIZE == 0);
173 if (pattern == LST_BRW_CHECK_NONE)
176 if (pattern == LST_BRW_CHECK_SIMPLE) {
177 data = *((__u64 *) addr);
181 if (len > BRW_MSIZE) {
182 addr += len - BRW_MSIZE;
183 data = *((__u64 *) addr);
190 if (pattern == LST_BRW_CHECK_FULL) {
191 for (i = 0; i < len; i += BRW_MSIZE) {
192 data = *(__u64 *)(addr + i);
202 CERROR ("Bad data in page %p: %#llx, %#llx expected\n",
208 brw_fill_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
213 for (i = 0; i < bk->bk_niov; i++) {
217 pg = bk->bk_iovs[i].bv_page;
218 off = bk->bk_iovs[i].bv_offset;
219 len = bk->bk_iovs[i].bv_len;
220 brw_fill_page(pg, off, len, pattern, magic);
225 brw_check_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
230 for (i = 0; i < bk->bk_niov; i++) {
234 pg = bk->bk_iovs[i].bv_page;
235 off = bk->bk_iovs[i].bv_offset;
236 len = bk->bk_iovs[i].bv_len;
237 if (brw_check_page(pg, off, len, pattern, magic) != 0) {
238 CERROR("Bulk page %p (%d/%d) is corrupted!\n",
248 brw_client_prep_rpc(struct sfw_test_unit *tsu, struct lnet_process_id dest,
249 struct srpc_client_rpc **rpcpp)
251 struct srpc_bulk *bulk = tsu->tsu_private;
252 struct sfw_test_instance *tsi = tsu->tsu_instance;
253 struct sfw_session *sn = tsi->tsi_batch->bat_session;
254 struct srpc_client_rpc *rpc;
255 struct srpc_brw_reqst *req;
264 LASSERT(bulk != NULL);
266 if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) {
267 struct test_bulk_req *breq = &tsi->tsi_u.bulk_v0;
270 flags = breq->blk_flags;
271 len = breq->blk_npg * PAGE_SIZE;
275 struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
278 /* I should never get this step if it's unknown feature
279 * because make_session will reject unknown feature */
280 LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0);
283 flags = breq->blk_flags;
285 off = breq->blk_offset;
287 npg = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
289 rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
293 unsafe_memcpy(&rpc->crpc_bulk, bulk,
294 offsetof(struct srpc_bulk, bk_iovs[npg]),
296 if (opc == LST_BRW_WRITE)
297 brw_fill_bulk(&rpc->crpc_bulk, flags, BRW_MAGIC);
299 brw_fill_bulk(&rpc->crpc_bulk, flags, BRW_POISON);
301 req = &rpc->crpc_reqstmsg.msg_body.brw_reqst;
302 req->brw_flags = flags;
311 brw_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
313 __u64 magic = BRW_MAGIC;
314 struct sfw_test_instance *tsi = tsu->tsu_instance;
315 struct sfw_session *sn = tsi->tsi_batch->bat_session;
316 struct srpc_msg *msg = &rpc->crpc_replymsg;
317 struct srpc_brw_reply *reply = &msg->msg_body.brw_reply;
318 struct srpc_brw_reqst *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst;
322 if (rpc->crpc_status != 0) {
323 CERROR("BRW RPC to %s failed with %d\n",
324 libcfs_id2str(rpc->crpc_dest), rpc->crpc_status);
325 if (!tsi->tsi_stopping) /* rpc could have been aborted */
326 atomic_inc(&sn->sn_brw_errors);
330 if (msg->msg_magic != SRPC_MSG_MAGIC) {
332 __swab32s(&reply->brw_status);
335 CDEBUG(reply->brw_status ? D_WARNING : D_NET,
336 "BRW RPC to %s finished with brw_status: %d\n",
337 libcfs_id2str(rpc->crpc_dest), reply->brw_status);
339 if (reply->brw_status != 0) {
340 atomic_inc(&sn->sn_brw_errors);
341 rpc->crpc_status = -(int)reply->brw_status;
345 if (reqst->brw_rw == LST_BRW_WRITE)
348 if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic) != 0) {
349 CERROR("Bulk data from %s is corrupted!\n",
350 libcfs_id2str(rpc->crpc_dest));
351 atomic_inc(&sn->sn_brw_errors);
352 rpc->crpc_status = -EBADMSG;
357 brw_server_rpc_done(struct srpc_server_rpc *rpc)
359 struct srpc_bulk *blk = rpc->srpc_bulk;
364 if (rpc->srpc_status != 0)
365 CERROR("Bulk transfer %s %s has failed: %d\n",
366 blk->bk_sink ? "from" : "to",
367 libcfs_id2str(rpc->srpc_peer), rpc->srpc_status);
369 CDEBUG(D_NET, "Transferred %d pages bulk data %s %s\n",
370 blk->bk_niov, blk->bk_sink ? "from" : "to",
371 libcfs_id2str(rpc->srpc_peer));
375 brw_bulk_ready(struct srpc_server_rpc *rpc, int status)
377 __u64 magic = BRW_MAGIC;
378 struct srpc_brw_reply *reply = &rpc->srpc_replymsg.msg_body.brw_reply;
379 struct srpc_brw_reqst *reqst;
380 struct srpc_msg *reqstmsg;
382 LASSERT (rpc->srpc_bulk != NULL);
383 LASSERT (rpc->srpc_reqstbuf != NULL);
385 reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
386 reqst = &reqstmsg->msg_body.brw_reqst;
389 CERROR ("BRW bulk %s failed for RPC from %s: %d\n",
390 reqst->brw_rw == LST_BRW_READ ? "READ" : "WRITE",
391 libcfs_id2str(rpc->srpc_peer), status);
395 if (reqst->brw_rw == LST_BRW_READ)
398 if (reqstmsg->msg_magic != SRPC_MSG_MAGIC)
401 if (brw_check_bulk(rpc->srpc_bulk, reqst->brw_flags, magic) != 0) {
402 CERROR ("Bulk data from %s is corrupted!\n",
403 libcfs_id2str(rpc->srpc_peer));
404 reply->brw_status = EBADMSG;
411 brw_server_handle(struct srpc_server_rpc *rpc)
413 struct srpc_service *sv = rpc->srpc_scd->scd_svc;
414 struct srpc_msg *replymsg = &rpc->srpc_replymsg;
415 struct srpc_msg *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
416 struct srpc_brw_reply *reply = &replymsg->msg_body.brw_reply;
417 struct srpc_brw_reqst *reqst = &reqstmsg->msg_body.brw_reqst;
419 LASSERT (sv->sv_id == SRPC_SERVICE_BRW);
421 if (reqstmsg->msg_magic != SRPC_MSG_MAGIC) {
422 LASSERT (reqstmsg->msg_magic == __swab32(SRPC_MSG_MAGIC));
424 __swab32s(&reqst->brw_rw);
425 __swab32s(&reqst->brw_len);
426 __swab32s(&reqst->brw_flags);
427 __swab64s(&reqst->brw_rpyid);
428 __swab64s(&reqst->brw_bulkid);
430 LASSERT (reqstmsg->msg_type == (__u32)srpc_service2request(sv->sv_id));
432 reply->brw_status = 0;
433 rpc->srpc_done = brw_server_rpc_done;
435 if ((reqst->brw_rw != LST_BRW_READ && reqst->brw_rw != LST_BRW_WRITE) ||
436 (reqst->brw_flags != LST_BRW_CHECK_NONE &&
437 reqst->brw_flags != LST_BRW_CHECK_FULL &&
438 reqst->brw_flags != LST_BRW_CHECK_SIMPLE)) {
439 reply->brw_status = EINVAL;
443 if ((reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
444 replymsg->msg_ses_feats = LST_FEATS_MASK;
445 reply->brw_status = EPROTO;
449 if ((reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN) == 0) {
450 /* compat with old version */
451 if ((reqst->brw_len & ~PAGE_MASK) != 0) {
452 reply->brw_status = EINVAL;
457 replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
459 if (reqst->brw_len == 0 || reqst->brw_len > LNET_MTU) {
460 reply->brw_status = EINVAL;
464 srpc_init_bulk(rpc->srpc_bulk, 0, reqst->brw_len,
465 reqst->brw_rw == LST_BRW_WRITE);
467 if (reqst->brw_rw == LST_BRW_READ)
468 brw_fill_bulk(rpc->srpc_bulk, reqst->brw_flags, BRW_MAGIC);
470 brw_fill_bulk(rpc->srpc_bulk, reqst->brw_flags, BRW_POISON);
476 brw_srpc_init(struct srpc_server_rpc *rpc, int cpt)
478 /* just alloc a maximal size - actual values will be adjusted later */
479 rpc->srpc_bulk = srpc_alloc_bulk(cpt, LNET_MTU);
480 if (rpc->srpc_bulk == NULL)
483 srpc_init_bulk(rpc->srpc_bulk, 0, 0, 0);
489 brw_srpc_fini(struct srpc_server_rpc *rpc)
491 srpc_free_bulk(rpc->srpc_bulk);
492 rpc->srpc_bulk = NULL;
495 struct sfw_test_client_ops brw_test_client = {
496 .tso_init = brw_client_init,
497 .tso_fini = brw_client_fini,
498 .tso_prep_rpc = brw_client_prep_rpc,
499 .tso_done_rpc = brw_client_done_rpc,
502 struct srpc_service brw_test_service = {
503 .sv_id = SRPC_SERVICE_BRW,
504 .sv_name = "brw_test",
505 .sv_handler = brw_server_handle,
506 .sv_bulk_ready = brw_bulk_ready,
508 .sv_srpc_init = brw_srpc_init,
509 .sv_srpc_fini = brw_srpc_fini,
512 void brw_init_test_service(void)
514 unsigned long cache_size = cfs_totalram_pages() >> 4;
516 /* brw prealloc cache should don't eat more than half memory */
517 cache_size /= ((LNET_MTU >> PAGE_SHIFT) + 1) ;
519 brw_test_service.sv_wi_total = brw_srv_workitems;
521 if (brw_test_service.sv_wi_total > cache_size)
522 brw_test_service.sv_wi_total = cache_size;