/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
__swab64s(&(lc).route_length); \
} while (0)
-#define sfw_test_active(t) (cfs_atomic_read(&(t)->tsi_nactive) != 0)
-#define sfw_batch_active(b) (cfs_atomic_read(&(b)->bat_nactive) != 0)
+#define sfw_test_active(t) (atomic_read(&(t)->tsi_nactive) != 0)
+#define sfw_batch_active(b) (atomic_read(&(b)->bat_nactive) != 0)
struct smoketest_framework {
- cfs_list_t fw_zombie_rpcs; /* RPCs to be recycled */
- cfs_list_t fw_zombie_sessions; /* stopping sessions */
- cfs_list_t fw_tests; /* registered test cases */
- cfs_atomic_t fw_nzombies; /* # zombie sessions */
- cfs_spinlock_t fw_lock; /* serialise */
- sfw_session_t *fw_session; /* _the_ session */
- int fw_shuttingdown; /* shutdown in progress */
- srpc_server_rpc_t *fw_active_srpc; /* running RPC */
+ cfs_list_t fw_zombie_rpcs; /* RPCs to be recycled */
+ cfs_list_t fw_zombie_sessions; /* stopping sessions */
+ cfs_list_t fw_tests; /* registered test cases */
+ atomic_t fw_nzombies; /* # zombie sessions */
+ spinlock_t fw_lock; /* serialise */
+ sfw_session_t *fw_session; /* _the_ session */
+ int fw_shuttingdown; /* shutdown in progress */
+ srpc_server_rpc_t *fw_active_srpc; /* running RPC */
} sfw_data;
/* forward ref's */
LASSERT (!sn->sn_timer_active);
sfw_data.fw_session = NULL;
- cfs_atomic_inc(&sfw_data.fw_nzombies);
+ atomic_inc(&sfw_data.fw_nzombies);
cfs_list_add(&sn->sn_list, &sfw_data.fw_zombie_sessions);
- cfs_spin_unlock(&sfw_data.fw_lock);
+ spin_unlock(&sfw_data.fw_lock);
- cfs_list_for_each_entry_typed (tsc, &sfw_data.fw_tests,
- sfw_test_case_t, tsc_list) {
- srpc_abort_service(tsc->tsc_srv_service);
- }
+ cfs_list_for_each_entry_typed(tsc, &sfw_data.fw_tests,
+ sfw_test_case_t, tsc_list) {
+ srpc_abort_service(tsc->tsc_srv_service);
+ }
- cfs_spin_lock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
cfs_list_for_each_entry_typed (tsb, &sn->sn_batches,
sfw_batch_t, bat_list) {
return; /* wait for active batches to stop */
cfs_list_del_init(&sn->sn_list);
- cfs_spin_unlock(&sfw_data.fw_lock);
+ spin_unlock(&sfw_data.fw_lock);
- sfw_destroy_session(sn);
+ sfw_destroy_session(sn);
- cfs_spin_lock(&sfw_data.fw_lock);
- return;
+ spin_lock(&sfw_data.fw_lock);
}
#ifndef __KERNEL__
void
sfw_session_expired (void *data)
{
- sfw_session_t *sn = data;
+ sfw_session_t *sn = data;
- cfs_spin_lock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
LASSERT (sn->sn_timer_active);
LASSERT (sn == sfw_data.fw_session);
sn->sn_timer_active = 0;
sfw_deactivate_session();
- cfs_spin_unlock(&sfw_data.fw_lock);
- return;
+ spin_unlock(&sfw_data.fw_lock);
}
static inline void
-sfw_init_session (sfw_session_t *sn, lst_sid_t sid, const char *name)
+sfw_init_session(sfw_session_t *sn, lst_sid_t sid,
+ unsigned features, const char *name)
{
stt_timer_t *timer = &sn->sn_timer;
memset(sn, 0, sizeof(sfw_session_t));
CFS_INIT_LIST_HEAD(&sn->sn_list);
CFS_INIT_LIST_HEAD(&sn->sn_batches);
- cfs_atomic_set(&sn->sn_refcount, 1); /* +1 for caller */
- cfs_atomic_set(&sn->sn_brw_errors, 0);
- cfs_atomic_set(&sn->sn_ping_errors, 0);
- strncpy(&sn->sn_name[0], name, LST_NAME_SIZE);
+ atomic_set(&sn->sn_refcount, 1); /* +1 for caller */
+ atomic_set(&sn->sn_brw_errors, 0);
+ atomic_set(&sn->sn_ping_errors, 0);
+ strlcpy(&sn->sn_name[0], name, sizeof(sn->sn_name));
sn->sn_timer_active = 0;
sn->sn_id = sid;
+ sn->sn_features = features;
sn->sn_timeout = session_timeout;
sn->sn_started = cfs_time_current();
{
LASSERT (rpc->crpc_bulk.bk_niov == 0);
LASSERT (cfs_list_empty(&rpc->crpc_list));
- LASSERT (cfs_atomic_read(&rpc->crpc_refcount) == 0);
+ LASSERT (atomic_read(&rpc->crpc_refcount) == 0);
#ifndef __KERNEL__
LASSERT (rpc->crpc_bulk.bk_pages == NULL);
#endif
swi_state2str(rpc->crpc_wi.swi_state),
rpc->crpc_aborted, rpc->crpc_status);
- cfs_spin_lock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
- /* my callers must finish all RPCs before shutting me down */
- LASSERT (!sfw_data.fw_shuttingdown);
- cfs_list_add(&rpc->crpc_list, &sfw_data.fw_zombie_rpcs);
+ /* my callers must finish all RPCs before shutting me down */
+ LASSERT(!sfw_data.fw_shuttingdown);
+ cfs_list_add(&rpc->crpc_list, &sfw_data.fw_zombie_rpcs);
- cfs_spin_unlock(&sfw_data.fw_lock);
- return;
+ spin_unlock(&sfw_data.fw_lock);
}
sfw_batch_t *
bat->bat_error = 0;
bat->bat_session = sn;
bat->bat_id = bid;
- cfs_atomic_set(&bat->bat_nactive, 0);
+ atomic_set(&bat->bat_nactive, 0);
CFS_INIT_LIST_HEAD(&bat->bat_tests);
cfs_list_add_tail(&bat->bat_list, &sn->sn_batches);
sn->sn_started), &tv);
cnt->running_ms = (__u32)(tv.tv_sec * 1000 + tv.tv_usec / 1000);
- cnt->brw_errors = cfs_atomic_read(&sn->sn_brw_errors);
- cnt->ping_errors = cfs_atomic_read(&sn->sn_ping_errors);
- cnt->zombie_sessions = cfs_atomic_read(&sfw_data.fw_nzombies);
+ cnt->brw_errors = atomic_read(&sn->sn_brw_errors);
+ cnt->ping_errors = atomic_read(&sn->sn_ping_errors);
+ cnt->zombie_sessions = atomic_read(&sfw_data.fw_nzombies);
cnt->active_batches = 0;
cfs_list_for_each_entry_typed (bat, &sn->sn_batches,
sfw_batch_t, bat_list) {
- if (cfs_atomic_read(&bat->bat_nactive) > 0)
+ if (atomic_read(&bat->bat_nactive) > 0)
cnt->active_batches++;
}
}
int
-sfw_make_session (srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
+sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ sfw_session_t *sn = sfw_data.fw_session;
+ srpc_msg_t *msg = container_of(request, srpc_msg_t,
+ msg_body.mksn_reqst);
+ int cplen = 0;
if (request->mksn_sid.ses_nid == LNET_NID_ANY) {
reply->mksn_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id;
reply->mksn_timeout = sn->sn_timeout;
if (sfw_sid_equal(request->mksn_sid, sn->sn_id)) {
- cfs_atomic_inc(&sn->sn_refcount);
+ atomic_inc(&sn->sn_refcount);
return 0;
}
if (!request->mksn_force) {
reply->mksn_status = EBUSY;
- strncpy(&reply->mksn_name[0], &sn->sn_name[0], LST_NAME_SIZE);
+ cplen = strlcpy(&reply->mksn_name[0], &sn->sn_name[0],
+ sizeof(reply->mksn_name));
+ if (cplen >= sizeof(reply->mksn_name))
+ return -E2BIG;
return 0;
}
}
+ /* reject the request if it requires unknown features
+ * NB: old version will always accept all features because it's not
+ * aware of srpc_msg_t::msg_ses_feats, it's a defect but it's also
+ * harmless because it will return zero feature to console, and it's
+ * console's responsibility to make sure all nodes in a session have
+ * same feature mask. */
+ if ((msg->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
+ reply->mksn_status = EPROTO;
+ return 0;
+ }
+
/* brand new or create by force */
LIBCFS_ALLOC(sn, sizeof(sfw_session_t));
if (sn == NULL) {
return -ENOMEM;
}
- sfw_init_session(sn, request->mksn_sid, &request->mksn_name[0]);
+ sfw_init_session(sn, request->mksn_sid,
+ msg->msg_ses_feats, &request->mksn_name[0]);
- cfs_spin_lock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
- sfw_deactivate_session();
- LASSERT (sfw_data.fw_session == NULL);
- sfw_data.fw_session = sn;
+ sfw_deactivate_session();
+ LASSERT(sfw_data.fw_session == NULL);
+ sfw_data.fw_session = sn;
- cfs_spin_unlock(&sfw_data.fw_lock);
+ spin_unlock(&sfw_data.fw_lock);
- reply->mksn_status = 0;
- reply->mksn_sid = sn->sn_id;
- reply->mksn_timeout = sn->sn_timeout;
- return 0;
+ reply->mksn_status = 0;
+ reply->mksn_sid = sn->sn_id;
+ reply->mksn_timeout = sn->sn_timeout;
+ return 0;
}
int
return 0;
}
- if (!cfs_atomic_dec_and_test(&sn->sn_refcount)) {
+ if (!atomic_dec_and_test(&sn->sn_refcount)) {
reply->rmsn_status = 0;
return 0;
}
- cfs_spin_lock(&sfw_data.fw_lock);
- sfw_deactivate_session();
- cfs_spin_unlock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
+ sfw_deactivate_session();
+ spin_unlock(&sfw_data.fw_lock);
- reply->rmsn_status = 0;
- reply->rmsn_sid = LST_INVALID_SID;
- LASSERT (sfw_data.fw_session == NULL);
- return 0;
+ reply->rmsn_status = 0;
+ reply->rmsn_sid = LST_INVALID_SID;
+ LASSERT(sfw_data.fw_session == NULL);
+ return 0;
}
int
reply->dbg_status = 0;
reply->dbg_sid = sn->sn_id;
reply->dbg_timeout = sn->sn_timeout;
- strncpy(reply->dbg_name, &sn->sn_name[0], LST_NAME_SIZE);
+ if (strlcpy(reply->dbg_name, &sn->sn_name[0], sizeof(reply->dbg_name))
+ >= sizeof(reply->dbg_name))
+ return -E2BIG;
return 0;
}
int
sfw_load_test(struct sfw_test_instance *tsi)
{
- struct sfw_test_case *tsc = sfw_find_test_case(tsi->tsi_service);
- struct srpc_service *svc = tsc->tsc_srv_service;
- int nbuf = sfw_test_buffers(tsi);
+ struct sfw_test_case *tsc;
+ struct srpc_service *svc;
+ int nbuf;
int rc;
+ LASSERT(tsi != NULL);
+ tsc = sfw_find_test_case(tsi->tsi_service);
+ nbuf = sfw_test_buffers(tsi);
LASSERT(tsc != NULL);
+ svc = tsc->tsc_srv_service;
if (tsi->tsi_is_client) {
tsi->tsi_ops = tsc->tsc_cli_ops;
}
LIBCFS_FREE(sn, sizeof(*sn));
- cfs_atomic_dec(&sfw_data.fw_nzombies);
+ atomic_dec(&sfw_data.fw_nzombies);
return;
}
void
-sfw_unpack_test_req (srpc_msg_t *msg)
+sfw_unpack_addtest_req(srpc_msg_t *msg)
{
srpc_test_reqst_t *req = &msg->msg_body.tes_reqst;
LASSERT (msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
- if (req->tsr_service == SRPC_SERVICE_BRW) {
- test_bulk_req_t *bulk = &req->tsr_u.bulk;
+ if (req->tsr_service == SRPC_SERVICE_BRW) {
+ if ((msg->msg_ses_feats & LST_FEAT_BULK_LEN) == 0) {
+ test_bulk_req_t *bulk = &req->tsr_u.bulk_v0;
- __swab32s(&bulk->blk_opc);
- __swab32s(&bulk->blk_npg);
- __swab32s(&bulk->blk_flags);
- return;
- }
+ __swab32s(&bulk->blk_opc);
+ __swab32s(&bulk->blk_npg);
+ __swab32s(&bulk->blk_flags);
+
+ } else {
+ test_bulk_req_v1_t *bulk = &req->tsr_u.bulk_v1;
+
+ __swab16s(&bulk->blk_opc);
+ __swab16s(&bulk->blk_flags);
+ __swab32s(&bulk->blk_offset);
+ __swab32s(&bulk->blk_len);
+ }
+
+ return;
+ }
if (req->tsr_service == SRPC_SERVICE_PING) {
test_ping_req_t *ping = &req->tsr_u.ping;
}
memset(tsi, 0, sizeof(*tsi));
- cfs_spin_lock_init(&tsi->tsi_lock);
- cfs_atomic_set(&tsi->tsi_nactive, 0);
+ spin_lock_init(&tsi->tsi_lock);
+ atomic_set(&tsi->tsi_nactive, 0);
CFS_INIT_LIST_HEAD(&tsi->tsi_units);
CFS_INIT_LIST_HEAD(&tsi->tsi_free_rpcs);
CFS_INIT_LIST_HEAD(&tsi->tsi_active_rpcs);
LASSERT (bk->bk_pages != NULL);
#endif
LASSERT (bk->bk_niov * SFW_ID_PER_PAGE >= (unsigned int)ndest);
- LASSERT ((unsigned int)bk->bk_len >= sizeof(lnet_process_id_t) * ndest);
+ LASSERT((unsigned int)bk->bk_len >=
+ sizeof(lnet_process_id_packed_t) * ndest);
- sfw_unpack_test_req(msg);
+ sfw_unpack_addtest_req(msg);
memcpy(&tsi->tsi_u, &req->tsr_u, sizeof(tsi->tsi_u));
for (i = 0; i < ndest; i++) {
int j;
#ifdef __KERNEL__
- dests = cfs_page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page);
- LASSERT (dests != NULL); /* my pages are within KVM always */
+ dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page);
+ LASSERT (dests != NULL); /* my pages are within KVM always */
#else
- dests = cfs_page_address(bk->bk_pages[i / SFW_ID_PER_PAGE]);
+ dests = page_address(bk->bk_pages[i / SFW_ID_PER_PAGE]);
#endif
id = dests[i % SFW_ID_PER_PAGE];
if (msg->msg_magic != SRPC_MSG_MAGIC)
LASSERT (sfw_test_active(tsi));
- if (!cfs_atomic_dec_and_test(&tsi->tsi_nactive))
+ if (!atomic_dec_and_test(&tsi->tsi_nactive))
return;
/* the test instance is done */
- cfs_spin_lock(&tsi->tsi_lock);
+ spin_lock(&tsi->tsi_lock);
- tsi->tsi_stopping = 0;
+ tsi->tsi_stopping = 0;
- cfs_spin_unlock(&tsi->tsi_lock);
+ spin_unlock(&tsi->tsi_lock);
- cfs_spin_lock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
- if (!cfs_atomic_dec_and_test(&tsb->bat_nactive) ||/* tsb still active */
- sn == sfw_data.fw_session) { /* sn also active */
- cfs_spin_unlock(&sfw_data.fw_lock);
+ if (!atomic_dec_and_test(&tsb->bat_nactive) ||/* tsb still active */
+ sn == sfw_data.fw_session) { /* sn also active */
+ spin_unlock(&sfw_data.fw_lock);
return;
}
cfs_list_for_each_entry_typed (tsb, &sn->sn_batches,
sfw_batch_t, bat_list) {
if (sfw_batch_active(tsb)) {
- cfs_spin_unlock(&sfw_data.fw_lock);
- return;
- }
- }
+ spin_unlock(&sfw_data.fw_lock);
+ return;
+ }
+ }
- cfs_list_del_init(&sn->sn_list);
- cfs_spin_unlock(&sfw_data.fw_lock);
+ cfs_list_del_init(&sn->sn_list);
+ spin_unlock(&sfw_data.fw_lock);
- sfw_destroy_session(sn);
- return;
+ sfw_destroy_session(sn);
+ return;
}
void
int done = 0;
tsi->tsi_ops->tso_done_rpc(tsu, rpc);
-
- cfs_spin_lock(&tsi->tsi_lock);
+
+ spin_lock(&tsi->tsi_lock);
LASSERT (sfw_test_active(tsi));
LASSERT (!cfs_list_empty(&rpc->crpc_list));
/* dec ref for poster */
srpc_client_rpc_decref(rpc);
- cfs_spin_unlock(&tsi->tsi_lock);
+ spin_unlock(&tsi->tsi_lock);
if (!done) {
swi_schedule_workitem(&tsu->tsu_worker);
}
int
-sfw_create_test_rpc (sfw_test_unit_t *tsu, lnet_process_id_t peer,
- int nblk, int blklen, srpc_client_rpc_t **rpcpp)
+sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer,
+ unsigned features, int nblk, int blklen,
+ srpc_client_rpc_t **rpcpp)
{
- srpc_client_rpc_t *rpc = NULL;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
+ srpc_client_rpc_t *rpc = NULL;
+ sfw_test_instance_t *tsi = tsu->tsu_instance;
- cfs_spin_lock(&tsi->tsi_lock);
+ spin_lock(&tsi->tsi_lock);
LASSERT (sfw_test_active(tsi));
cfs_list_del_init(&rpc->crpc_list);
}
- cfs_spin_unlock(&tsi->tsi_lock);
+ spin_unlock(&tsi->tsi_lock);
if (rpc == NULL) {
rpc = srpc_create_client_rpc(peer, tsi->tsi_service, nblk,
sfw_test_rpc_fini, tsu);
}
- if (rpc == NULL) {
- CERROR ("Can't create rpc for test %d\n", tsi->tsi_service);
- return -ENOMEM;
- }
+ if (rpc == NULL) {
+ CERROR("Can't create rpc for test %d\n", tsi->tsi_service);
+ return -ENOMEM;
+ }
- *rpcpp = rpc;
- return 0;
+ rpc->crpc_reqstmsg.msg_ses_feats = features;
+ *rpcpp = rpc;
+
+ return 0;
}
int
LASSERT (rpc != NULL);
- cfs_spin_lock(&tsi->tsi_lock);
+ spin_lock(&tsi->tsi_lock);
- if (tsi->tsi_stopping) {
- cfs_list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
- cfs_spin_unlock(&tsi->tsi_lock);
- goto test_done;
- }
-
- if (tsu->tsu_loop > 0)
- tsu->tsu_loop--;
+ if (tsi->tsi_stopping) {
+ cfs_list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
+ spin_unlock(&tsi->tsi_lock);
+ goto test_done;
+ }
- cfs_list_add_tail(&rpc->crpc_list, &tsi->tsi_active_rpcs);
- cfs_spin_unlock(&tsi->tsi_lock);
+ if (tsu->tsu_loop > 0)
+ tsu->tsu_loop--;
- rpc->crpc_timeout = rpc_timeout;
+ cfs_list_add_tail(&rpc->crpc_list, &tsi->tsi_active_rpcs);
+ spin_unlock(&tsi->tsi_lock);
- cfs_spin_lock(&rpc->crpc_lock);
- srpc_post_rpc(rpc);
- cfs_spin_unlock(&rpc->crpc_lock);
- return 0;
+ spin_lock(&rpc->crpc_lock);
+ rpc->crpc_timeout = rpc_timeout;
+ srpc_post_rpc(rpc);
+ spin_unlock(&rpc->crpc_lock);
+ return 0;
test_done:
/*
if (sfw_batch_active(tsb)) {
CDEBUG(D_NET, "Batch already active: "LPU64" (%d)\n",
- tsb->bat_id.bat_id, cfs_atomic_read(&tsb->bat_nactive));
+ tsb->bat_id.bat_id, atomic_read(&tsb->bat_nactive));
return 0;
}
LASSERT (!tsi->tsi_stopping);
LASSERT (!sfw_test_active(tsi));
- cfs_atomic_inc(&tsb->bat_nactive);
+ atomic_inc(&tsb->bat_nactive);
cfs_list_for_each_entry_typed (tsu, &tsi->tsi_units,
sfw_test_unit_t, tsu_list) {
- cfs_atomic_inc(&tsi->tsi_nactive);
+ atomic_inc(&tsi->tsi_nactive);
tsu->tsu_loop = tsi->tsi_loop;
wi = &tsu->tsu_worker;
swi_init_workitem(wi, tsu, sfw_run_test,
cfs_list_for_each_entry_typed (tsi, &tsb->bat_tests,
sfw_test_instance_t, tsi_list) {
- cfs_spin_lock(&tsi->tsi_lock);
+ spin_lock(&tsi->tsi_lock);
- if (!tsi->tsi_is_client ||
- !sfw_test_active(tsi) || tsi->tsi_stopping) {
- cfs_spin_unlock(&tsi->tsi_lock);
- continue;
- }
+ if (!tsi->tsi_is_client ||
+ !sfw_test_active(tsi) || tsi->tsi_stopping) {
+ spin_unlock(&tsi->tsi_lock);
+ continue;
+ }
- tsi->tsi_stopping = 1;
+ tsi->tsi_stopping = 1;
- if (!force) {
- cfs_spin_unlock(&tsi->tsi_lock);
- continue;
- }
+ if (!force) {
+ spin_unlock(&tsi->tsi_lock);
+ continue;
+ }
- /* abort launched rpcs in the test */
- cfs_list_for_each_entry_typed (rpc, &tsi->tsi_active_rpcs,
- srpc_client_rpc_t, crpc_list) {
- cfs_spin_lock(&rpc->crpc_lock);
+ /* abort launched rpcs in the test */
+ cfs_list_for_each_entry_typed(rpc, &tsi->tsi_active_rpcs,
+ srpc_client_rpc_t, crpc_list) {
+ spin_lock(&rpc->crpc_lock);
- srpc_abort_rpc(rpc, -EINTR);
+ srpc_abort_rpc(rpc, -EINTR);
- cfs_spin_unlock(&rpc->crpc_lock);
- }
+ spin_unlock(&rpc->crpc_lock);
+ }
- cfs_spin_unlock(&tsi->tsi_lock);
- }
+ spin_unlock(&tsi->tsi_lock);
+ }
- return 0;
+ return 0;
}
int
return -EINVAL;
if (testidx == 0) {
- reply->bar_active = cfs_atomic_read(&tsb->bat_nactive);
+ reply->bar_active = atomic_read(&tsb->bat_nactive);
return 0;
}
if (testidx-- > 1)
continue;
- reply->bar_active = cfs_atomic_read(&tsi->tsi_nactive);
+ reply->bar_active = atomic_read(&tsi->tsi_nactive);
return 0;
}
}
int
-sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int sink)
+sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
+ int sink)
{
LASSERT(rpc->srpc_bulk == NULL);
LASSERT(npages > 0 && npages <= LNET_MAX_IOV);
- rpc->srpc_bulk = srpc_alloc_bulk(cpt, npages, sink);
+ rpc->srpc_bulk = srpc_alloc_bulk(cpt, npages, len, sink);
if (rpc->srpc_bulk == NULL)
return -ENOMEM;
if (request->tsr_is_client && rpc->srpc_bulk == NULL) {
/* rpc will be resumed later in sfw_bulk_ready */
- return sfw_alloc_pages(rpc, CFS_CPT_ANY,
- sfw_id_pages(request->tsr_ndest), 1);
+ int npg = sfw_id_pages(request->tsr_ndest);
+ int len;
+
+ if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) {
+ len = npg * PAGE_CACHE_SIZE;
+
+ } else {
+ len = sizeof(lnet_process_id_packed_t) *
+ request->tsr_ndest;
+ }
+
+ return sfw_alloc_pages(rpc, CFS_CPT_ANY, npg, len, 1);
}
rc = sfw_add_test_instance(bat, rpc);
sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
{
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
- srpc_msg_t *reply = &rpc->srpc_replymsg;
- srpc_msg_t *request = &rpc->srpc_reqstbuf->buf_msg;
- int rc = 0;
+ srpc_msg_t *reply = &rpc->srpc_replymsg;
+ srpc_msg_t *request = &rpc->srpc_reqstbuf->buf_msg;
+ unsigned features = LST_FEATS_MASK;
+ int rc = 0;
- LASSERT (sfw_data.fw_active_srpc == NULL);
- LASSERT (sv->sv_id <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
+ LASSERT(sfw_data.fw_active_srpc == NULL);
+ LASSERT(sv->sv_id <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
- cfs_spin_lock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
- if (sfw_data.fw_shuttingdown) {
- cfs_spin_unlock(&sfw_data.fw_lock);
- return -ESHUTDOWN;
- }
+ if (sfw_data.fw_shuttingdown) {
+ spin_unlock(&sfw_data.fw_lock);
+ return -ESHUTDOWN;
+ }
- /* Remove timer to avoid racing with it or expiring active session */
- if (sfw_del_session_timer() != 0) {
- CERROR ("Dropping RPC (%s) from %s: racing with expiry timer.",
- sv->sv_name, libcfs_id2str(rpc->srpc_peer));
- cfs_spin_unlock(&sfw_data.fw_lock);
- return -EAGAIN;
- }
+ /* Remove timer to avoid racing with it or expiring active session */
+ if (sfw_del_session_timer() != 0) {
+ CERROR("Dropping RPC (%s) from %s: racing with expiry timer.",
+ sv->sv_name, libcfs_id2str(rpc->srpc_peer));
+ spin_unlock(&sfw_data.fw_lock);
+ return -EAGAIN;
+ }
+
+ sfw_data.fw_active_srpc = rpc;
+ spin_unlock(&sfw_data.fw_lock);
+
+ sfw_unpack_message(request);
+ LASSERT(request->msg_type == srpc_service2request(sv->sv_id));
- sfw_data.fw_active_srpc = rpc;
- cfs_spin_unlock(&sfw_data.fw_lock);
+ /* rpc module should have checked this */
+ LASSERT(request->msg_version == SRPC_MSG_VERSION);
+
+ if (sv->sv_id != SRPC_SERVICE_MAKE_SESSION &&
+ sv->sv_id != SRPC_SERVICE_DEBUG) {
+ sfw_session_t *sn = sfw_data.fw_session;
+
+ if (sn != NULL &&
+ sn->sn_features != request->msg_ses_feats) {
+ CNETERR("Features of framework RPC don't match "
+ "features of current session: %x/%x\n",
+ request->msg_ses_feats, sn->sn_features);
+ reply->msg_body.reply.status = EPROTO;
+ reply->msg_body.reply.sid = sn->sn_id;
+ goto out;
+ }
- sfw_unpack_message(request);
- LASSERT (request->msg_type == srpc_service2request(sv->sv_id));
+ } else if ((request->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
+ /* NB: at this point, old version will ignore features and
+ * create new session anyway, so console should be able
+ * to handle this */
+ reply->msg_body.reply.status = EPROTO;
+ goto out;
+ }
switch(sv->sv_id) {
default:
break;
}
- rpc->srpc_done = sfw_server_rpc_done;
- cfs_spin_lock(&sfw_data.fw_lock);
+ if (sfw_data.fw_session != NULL)
+ features = sfw_data.fw_session->sn_features;
+ out:
+ reply->msg_ses_feats = features;
+ rpc->srpc_done = sfw_server_rpc_done;
+ spin_lock(&sfw_data.fw_lock);
#ifdef __KERNEL__
- if (!sfw_data.fw_shuttingdown)
- sfw_add_session_timer();
+ if (!sfw_data.fw_shuttingdown)
+ sfw_add_session_timer();
#else
- LASSERT (!sfw_data.fw_shuttingdown);
- sfw_add_session_timer();
+ LASSERT(!sfw_data.fw_shuttingdown);
+ sfw_add_session_timer();
#endif
- sfw_data.fw_active_srpc = NULL;
- cfs_spin_unlock(&sfw_data.fw_lock);
- return rc;
+ sfw_data.fw_active_srpc = NULL;
+ spin_unlock(&sfw_data.fw_lock);
+ return rc;
}
int
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
int rc;
- LASSERT (rpc->srpc_bulk != NULL);
- LASSERT (sv->sv_id == SRPC_SERVICE_TEST);
- LASSERT (sfw_data.fw_active_srpc == NULL);
- LASSERT (rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst.tsr_is_client);
+ LASSERT(rpc->srpc_bulk != NULL);
+ LASSERT(sv->sv_id == SRPC_SERVICE_TEST);
+ LASSERT(sfw_data.fw_active_srpc == NULL);
+ LASSERT(rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst.tsr_is_client);
- cfs_spin_lock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
- if (status != 0) {
- CERROR ("Bulk transfer failed for RPC: "
- "service %s, peer %s, status %d\n",
- sv->sv_name, libcfs_id2str(rpc->srpc_peer), status);
- cfs_spin_unlock(&sfw_data.fw_lock);
- return -EIO;
- }
+ if (status != 0) {
+ CERROR("Bulk transfer failed for RPC: "
+ "service %s, peer %s, status %d\n",
+ sv->sv_name, libcfs_id2str(rpc->srpc_peer), status);
+ spin_unlock(&sfw_data.fw_lock);
+ return -EIO;
+ }
- if (sfw_data.fw_shuttingdown) {
- cfs_spin_unlock(&sfw_data.fw_lock);
- return -ESHUTDOWN;
- }
+ if (sfw_data.fw_shuttingdown) {
+ spin_unlock(&sfw_data.fw_lock);
+ return -ESHUTDOWN;
+ }
- if (sfw_del_session_timer() != 0) {
- CERROR ("Dropping RPC (%s) from %s: racing with expiry timer",
- sv->sv_name, libcfs_id2str(rpc->srpc_peer));
- cfs_spin_unlock(&sfw_data.fw_lock);
- return -EAGAIN;
- }
+ if (sfw_del_session_timer() != 0) {
+ CERROR("Dropping RPC (%s) from %s: racing with expiry timer",
+ sv->sv_name, libcfs_id2str(rpc->srpc_peer));
+ spin_unlock(&sfw_data.fw_lock);
+ return -EAGAIN;
+ }
- sfw_data.fw_active_srpc = rpc;
- cfs_spin_unlock(&sfw_data.fw_lock);
+ sfw_data.fw_active_srpc = rpc;
+ spin_unlock(&sfw_data.fw_lock);
- rc = sfw_add_test(rpc);
+ rc = sfw_add_test(rpc);
- cfs_spin_lock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
#ifdef __KERNEL__
- if (!sfw_data.fw_shuttingdown)
- sfw_add_session_timer();
+ if (!sfw_data.fw_shuttingdown)
+ sfw_add_session_timer();
#else
- LASSERT (!sfw_data.fw_shuttingdown);
- sfw_add_session_timer();
+ LASSERT(!sfw_data.fw_shuttingdown);
+ sfw_add_session_timer();
#endif
- sfw_data.fw_active_srpc = NULL;
- cfs_spin_unlock(&sfw_data.fw_lock);
- return rc;
+ sfw_data.fw_active_srpc = NULL;
+ spin_unlock(&sfw_data.fw_lock);
+ return rc;
}
srpc_client_rpc_t *
-sfw_create_rpc (lnet_process_id_t peer, int service,
- int nbulkiov, int bulklen,
- void (*done) (srpc_client_rpc_t *), void *priv)
+sfw_create_rpc(lnet_process_id_t peer, int service,
+ unsigned features, int nbulkiov, int bulklen,
+ void (*done)(srpc_client_rpc_t *), void *priv)
{
- srpc_client_rpc_t *rpc;
+ srpc_client_rpc_t *rpc = NULL;
- cfs_spin_lock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
LASSERT (!sfw_data.fw_shuttingdown);
LASSERT (service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
rpc = cfs_list_entry(sfw_data.fw_zombie_rpcs.next,
srpc_client_rpc_t, crpc_list);
cfs_list_del(&rpc->crpc_list);
- cfs_spin_unlock(&sfw_data.fw_lock);
srpc_init_client_rpc(rpc, peer, service, 0, 0,
done, sfw_client_rpc_fini, priv);
- return rpc;
}
- cfs_spin_unlock(&sfw_data.fw_lock);
+ spin_unlock(&sfw_data.fw_lock);
+
+ if (rpc == NULL) {
+ rpc = srpc_create_client_rpc(peer, service,
+ nbulkiov, bulklen, done,
+ nbulkiov != 0 ? NULL :
+ sfw_client_rpc_fini,
+ priv);
+ }
- rpc = srpc_create_client_rpc(peer, service, nbulkiov, bulklen, done,
- nbulkiov != 0 ? NULL : sfw_client_rpc_fini,
- priv);
- return rpc;
+ if (rpc != NULL) /* "session" is concept in framework */
+ rpc->crpc_reqstmsg.msg_ses_feats = features;
+
+ return rpc;
}
void
if (msg->msg_magic == SRPC_MSG_MAGIC)
return; /* no flipping needed */
+ /* srpc module should guarantee I wouldn't get crap */
LASSERT (msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
- __swab32s(&msg->msg_type);
-
if (msg->msg_type == SRPC_MSG_STAT_REQST) {
srpc_stat_reqst_t *req = &msg->msg_body.stat_reqst;
void
sfw_abort_rpc (srpc_client_rpc_t *rpc)
{
- LASSERT (cfs_atomic_read(&rpc->crpc_refcount) > 0);
- LASSERT (rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
+ LASSERT(atomic_read(&rpc->crpc_refcount) > 0);
+ LASSERT(rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
- cfs_spin_lock(&rpc->crpc_lock);
- srpc_abort_rpc(rpc, -EINTR);
- cfs_spin_unlock(&rpc->crpc_lock);
- return;
+ spin_lock(&rpc->crpc_lock);
+ srpc_abort_rpc(rpc, -EINTR);
+ spin_unlock(&rpc->crpc_lock);
+ return;
}
void
sfw_post_rpc (srpc_client_rpc_t *rpc)
{
- cfs_spin_lock(&rpc->crpc_lock);
+ spin_lock(&rpc->crpc_lock);
LASSERT (!rpc->crpc_closed);
LASSERT (!rpc->crpc_aborted);
rpc->crpc_timeout = rpc_timeout;
srpc_post_rpc(rpc);
- cfs_spin_unlock(&rpc->crpc_lock);
- return;
+ spin_unlock(&rpc->crpc_lock);
+ return;
}
static srpc_service_t sfw_services[] =
sfw_data.fw_session = NULL;
sfw_data.fw_active_srpc = NULL;
- cfs_spin_lock_init(&sfw_data.fw_lock);
- cfs_atomic_set(&sfw_data.fw_nzombies, 0);
+ spin_lock_init(&sfw_data.fw_lock);
+ atomic_set(&sfw_data.fw_nzombies, 0);
CFS_INIT_LIST_HEAD(&sfw_data.fw_tests);
CFS_INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
CFS_INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
void
sfw_shutdown (void)
{
- srpc_service_t *sv;
- sfw_test_case_t *tsc;
- int i;
+ srpc_service_t *sv;
+ sfw_test_case_t *tsc;
+ int i;
- cfs_spin_lock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
sfw_data.fw_shuttingdown = 1;
#ifdef __KERNEL__
"waiting for session timer to explode.\n");
sfw_deactivate_session();
- lst_wait_until(cfs_atomic_read(&sfw_data.fw_nzombies) == 0,
+ lst_wait_until(atomic_read(&sfw_data.fw_nzombies) == 0,
sfw_data.fw_lock,
"waiting for %d zombie sessions to die.\n",
- cfs_atomic_read(&sfw_data.fw_nzombies));
+ atomic_read(&sfw_data.fw_nzombies));
- cfs_spin_unlock(&sfw_data.fw_lock);
+ spin_unlock(&sfw_data.fw_lock);
for (i = 0; ; i++) {
sv = &sfw_services[i];