* - create lu_object, corresponding to the fid in mdt_body, and save it in
* @tsi;
*
- * - if HABEO_CORPUS flag is set for this request type check whether object
+ * - if HAS_BODY flag is set for this request type check whether object
* actually exists on storage (lu_object_exists()).
*
*/
&tsi->tsi_tgt->lut_bottom->dd_lu_dev,
&body->mbo_fid1, NULL);
if (!IS_ERR(obj)) {
- if ((flags & HABEO_CORPUS) && !lu_object_exists(obj)) {
+ if ((flags & HAS_BODY) && !lu_object_exists(obj)) {
lu_object_put(tsi->tsi_env, obj);
rc = -ENOENT;
} else {
}
if (!(body->oa.o_valid & OBD_MD_FLID)) {
- if (flags & HABEO_CORPUS) {
- CERROR("%s: OBD_MD_FLID flag is not set in ost_body "
- "but OID/FID is mandatory with HABEO_CORPUS\n",
+ if (flags & HAS_BODY) {
+ CERROR("%s: OBD_MD_FLID flag is not set in ost_body but OID/FID is mandatory with HAS_BODY\n",
tgt_name(tsi->tsi_tgt));
RETURN(-EPROTO);
} else {
LASSERT(h->th_opc == lustre_msg_get_opc(req->rq_reqmsg));
LASSERT(current->journal_info == NULL);
- LASSERT(ergo(flags & (HABEO_CORPUS | HABEO_REFERO),
+ LASSERT(ergo(flags & (HAS_BODY | HAS_REPLY),
h->th_fmt != NULL));
if (h->th_fmt != NULL) {
req_capsule_set(pill, h->th_fmt);
}
}
- if (flags & MUTABOR && tgt_conn_flags(tsi) & OBD_CONNECT_RDONLY)
+ if (flags & IS_MUTABLE && tgt_conn_flags(tsi) & OBD_CONNECT_RDONLY)
RETURN(-EROFS);
- if (flags & HABEO_CLAVIS) {
+ if (flags & HAS_KEY) {
struct ldlm_request *dlm_req;
LASSERT(h->th_fmt != NULL);
rc = tgt_request_preprocess(tsi, h, req);
/* pack reply if reply format is fixed */
- if (rc == 0 && h->th_flags & HABEO_REFERO) {
+ if (rc == 0 && h->th_flags & HAS_REPLY) {
/* Pack reply */
if (req_capsule_has_field(tsi->tsi_pill, &RMF_MDT_MD,
RCL_SERVER))
LASSERTF(h->th_opc == opc, "opcode mismatch %d != %d\n",
h->th_opc, opc);
- if (CFS_FAIL_CHECK_ORSET(request_fail_id, CFS_FAIL_ONCE))
+ if ((cfs_fail_val == 0 || cfs_fail_val == opc) &&
+ CFS_FAIL_CHECK_ORSET(request_fail_id, CFS_FAIL_ONCE))
GOTO(out, rc = 0);
rc = lustre_msg_check_version(msg, h->th_version);
reply = req_capsule_server_get(tsi->tsi_pill, &RMF_CONNECT_DATA);
spin_lock(&tsi->tsi_exp->exp_lock);
*exp_connect_flags_ptr(tsi->tsi_exp) = reply->ocd_connect_flags;
+ if (reply->ocd_connect_flags & OBD_CONNECT_FLAGS2)
+ *exp_connect_flags2_ptr(tsi->tsi_exp) =
+ reply->ocd_connect_flags2;
tsi->tsi_exp->exp_connect_data.ocd_brw_size = reply->ocd_brw_size;
spin_unlock(&tsi->tsi_exp->exp_lock);
+ if (strcmp(tsi->tsi_exp->exp_obd->obd_type->typ_name,
+ LUSTRE_MDT_NAME) == 0) {
+ rc = req_check_sepol(tsi->tsi_pill);
+ if (rc)
+ GOTO(out, rc);
+ }
+
RETURN(0);
out:
obd_disconnect(class_export_get(tsi->tsi_exp));
ENTRY;
+ OBD_FAIL_TIMEOUT(OBD_FAIL_OST_DISCONNECT_DELAY, cfs_fail_val);
+
rc = target_handle_disconnect(tgt_ses_req(tsi));
if (rc)
RETURN(err_serious(rc));
ENTRY;
- rc = target_handle_ping(tgt_ses_req(tsi));
+ /* The target-specific part of OBD_PING request handling.
+ * It controls Filter Modification Data (FMD) expiration each time
+ * PING is received.
+ *
+ * Valid only for replayable targets, e.g. MDT and OFD
+ */
+ if (tsi->tsi_exp->exp_obd->obd_replayable)
+ tgt_fmd_expire(tsi->tsi_exp);
+
+ rc = req_capsule_server_pack(tsi->tsi_pill);
if (rc)
RETURN(err_serious(rc));
rc = lu_env_init(&env, LCT_DT_THREAD);
if (unlikely(rc != 0))
- RETURN(rc);
+ GOTO(err, rc);
ost_fid_from_resid(&fid, &lock->l_resource->lr_name,
tgt->lut_lsd.lsd_osd_index);
err_env:
lu_env_fini(&env);
}
-
+err:
rc = ldlm_server_blocking_ast(lock, desc, data, flag);
RETURN(rc);
}
/* generic LDLM target handler */
struct tgt_handler tgt_dlm_handlers[] = {
-TGT_DLM_HDL (HABEO_CLAVIS, LDLM_ENQUEUE, tgt_enqueue),
-TGT_DLM_HDL (HABEO_CLAVIS, LDLM_CONVERT, tgt_convert),
-TGT_DLM_HDL_VAR(0, LDLM_BL_CALLBACK, tgt_bl_callback),
-TGT_DLM_HDL_VAR(0, LDLM_CP_CALLBACK, tgt_cp_callback)
+TGT_DLM_HDL(HAS_KEY, LDLM_ENQUEUE, tgt_enqueue),
+TGT_DLM_HDL(HAS_KEY, LDLM_CONVERT, tgt_convert),
+TGT_DLM_HDL_VAR(0, LDLM_BL_CALLBACK, tgt_bl_callback),
+TGT_DLM_HDL_VAR(0, LDLM_CP_CALLBACK, tgt_cp_callback)
};
EXPORT_SYMBOL(tgt_dlm_handlers);
}
struct tgt_handler tgt_lfsck_handlers[] = {
-TGT_LFSCK_HDL(HABEO_REFERO, LFSCK_NOTIFY, tgt_handle_lfsck_notify),
-TGT_LFSCK_HDL(HABEO_REFERO, LFSCK_QUERY, tgt_handle_lfsck_query),
+TGT_LFSCK_HDL(HAS_REPLY, LFSCK_NOTIFY, tgt_handle_lfsck_notify),
+TGT_LFSCK_HDL(HAS_REPLY, LFSCK_QUERY, tgt_handle_lfsck_query),
};
EXPORT_SYMBOL(tgt_lfsck_handlers);
LASSERT(ns != NULL);
LASSERT(!lustre_handle_is_used(lh));
- rc = ldlm_cli_enqueue_local(ns, res_id, LDLM_IBITS, &policy, mode,
+ rc = ldlm_cli_enqueue_local(NULL, ns, res_id, LDLM_IBITS, &policy, mode,
flags, ldlm_blocking_ast,
ldlm_completion_ast, ldlm_glimpse_ast,
NULL, 0, LVB_T_NONE, NULL, lh);
* Helper function for getting server side [start, start+count] DLM lock
* if asked by client.
*/
-int tgt_extent_lock(struct ldlm_namespace *ns, struct ldlm_res_id *res_id,
- __u64 start, __u64 end, struct lustre_handle *lh,
- int mode, __u64 *flags)
+int tgt_extent_lock(const struct lu_env *env, struct ldlm_namespace *ns,
+ struct ldlm_res_id *res_id, __u64 start, __u64 end,
+ struct lustre_handle *lh, int mode, __u64 *flags)
{
union ldlm_policy_data policy;
int rc;
else
policy.l_extent.end = end | ~PAGE_MASK;
- rc = ldlm_cli_enqueue_local(ns, res_id, LDLM_EXTENT, &policy, mode,
- flags, ldlm_blocking_ast,
+ rc = ldlm_cli_enqueue_local(env, ns, res_id, LDLM_EXTENT, &policy,
+ mode, flags, ldlm_blocking_ast,
ldlm_completion_ast, ldlm_glimpse_ast,
NULL, 0, LVB_T_NONE, NULL, lh);
RETURN(rc == ELDLM_OK ? 0 : -EIO);
}
EXPORT_SYMBOL(tgt_extent_unlock);
-static int tgt_brw_lock(struct obd_export *exp, struct ldlm_res_id *res_id,
- struct obd_ioobj *obj, struct niobuf_remote *nb,
- struct lustre_handle *lh, enum ldlm_mode mode)
+static int tgt_brw_lock(const struct lu_env *env, struct obd_export *exp,
+ struct ldlm_res_id *res_id, struct obd_ioobj *obj,
+ struct niobuf_remote *nb, struct lustre_handle *lh,
+ enum ldlm_mode mode)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
__u64 flags = 0;
if (exp->exp_connect_data.ocd_connect_flags & OBD_CONNECT_IBITS)
rc = tgt_mdt_data_lock(ns, res_id, lh, mode, &flags);
else
- rc = tgt_extent_lock(ns, res_id, nb[0].rnb_offset,
+ rc = tgt_extent_lock(env, ns, res_id, nb[0].rnb_offset,
nb[nrbufs - 1].rnb_offset +
nb[nrbufs - 1].rnb_len - 1,
lh, mode, &flags);
int opc, enum cksum_types cksum_type,
__u32 *cksum)
{
- struct cfs_crypto_hash_desc *hdesc;
+ struct ahash_request *req;
unsigned int bufsize;
int i, err;
unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
- hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
- if (IS_ERR(hdesc)) {
+ req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
+ if (IS_ERR(req)) {
CERROR("%s: unable to initialize checksum hash %s\n",
tgt_name(tgt), cfs_crypto_hash_name(cfs_alg));
- return PTR_ERR(hdesc);
+ return PTR_ERR(req);
}
CDEBUG(D_INFO, "Checksum for algo %s\n", cfs_crypto_hash_name(cfs_alg));
* display in dump_all_bulk_pages() */
np->index = i;
- cfs_crypto_hash_update_page(hdesc, np, off,
+ cfs_crypto_hash_update_page(req, np, off,
len);
continue;
} else {
tgt_name(tgt));
}
}
- cfs_crypto_hash_update_page(hdesc, local_nb[i].lnb_page,
+ cfs_crypto_hash_update_page(req, local_nb[i].lnb_page,
local_nb[i].lnb_page_offset & ~PAGE_MASK,
local_nb[i].lnb_len);
* display in dump_all_bulk_pages() */
np->index = i;
- cfs_crypto_hash_update_page(hdesc, np, off,
+ cfs_crypto_hash_update_page(req, np, off,
len);
continue;
} else {
}
bufsize = sizeof(*cksum);
- err = cfs_crypto_hash_final(hdesc, (unsigned char *)cksum, &bufsize);
+ err = cfs_crypto_hash_final(req, (unsigned char *)cksum, &bufsize);
return 0;
}
kunmap(local_nb[i].lnb_page);
}
- rc = ll_vfs_fsync_range(filp, 0, LLONG_MAX, 1);
+ rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
if (rc)
CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
filp_close(filp, NULL);
int sector_size,
u32 *check_sum)
{
+ enum cksum_types t10_cksum_type = tgt->lut_dt_conf.ddp_t10_cksum_type;
unsigned char cfs_alg = cksum_obd2cfs(OBD_CKSUM_T10_TOP);
const char *obd_name = tgt->lut_obd->obd_name;
- struct cfs_crypto_hash_desc *hdesc;
+ struct ahash_request *req;
unsigned int bufsize;
unsigned char *buffer;
struct page *__page;
if (__page == NULL)
return -ENOMEM;
- hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
- if (IS_ERR(hdesc)) {
+ req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
+ if (IS_ERR(req)) {
CERROR("%s: unable to initialize checksum hash %s\n",
tgt_name(tgt), cfs_crypto_hash_name(cfs_alg));
- return PTR_ERR(hdesc);
+ return PTR_ERR(req);
}
buffer = kmap(__page);
* display in dump_all_bulk_pages() */
np->index = i;
- cfs_crypto_hash_update_page(hdesc, np, off,
+ cfs_crypto_hash_update_page(req, np, off,
len);
continue;
} else {
* The left guard number should be able to hold checksums of a
* whole page
*/
- rc = obd_page_dif_generate_buffer(obd_name,
- local_nb[i].lnb_page,
- local_nb[i].lnb_page_offset & ~PAGE_MASK,
- local_nb[i].lnb_len, guard_start + used_number,
- guard_number - used_number, &used, sector_size,
- fn);
- if (rc)
- break;
+ if (t10_cksum_type && opc == OST_READ &&
+ local_nb[i].lnb_guard_disk) {
+ used = DIV_ROUND_UP(local_nb[i].lnb_len, sector_size);
+ if (used > (guard_number - used_number)) {
+ rc = -E2BIG;
+ break;
+ }
+ memcpy(guard_start + used_number,
+ local_nb[i].lnb_guards,
+ used * sizeof(*local_nb[i].lnb_guards));
+ } else {
+ rc = obd_page_dif_generate_buffer(obd_name,
+ local_nb[i].lnb_page,
+ local_nb[i].lnb_page_offset & ~PAGE_MASK,
+ local_nb[i].lnb_len, guard_start + used_number,
+ guard_number - used_number, &used, sector_size,
+ fn);
+ if (rc)
+ break;
+ }
+
+ LASSERT(used <= MAX_GUARD_NUMBER);
+ /*
+ * If disk support T10PI checksum, copy guards to local_nb.
+ * If the write is partial page, do not use the guards for bio
+ * submission since the data might not be full-sector. The bio
+ * guards will be generated later based on the full sectors. If
+ * the sector size is 512B rather than 4 KB, or the page size
+ * is larger than 4KB, this might drop some useful guards for
+ * partial page write, but it will only add minimal extra time
+ * of checksum calculation.
+ */
+ if (t10_cksum_type && opc == OST_WRITE &&
+ local_nb[i].lnb_len == PAGE_SIZE) {
+ local_nb[i].lnb_guard_rpc = 1;
+ memcpy(local_nb[i].lnb_guards,
+ guard_start + used_number,
+ used * sizeof(*local_nb[i].lnb_guards));
+ }
used_number += used;
if (used_number == guard_number) {
- cfs_crypto_hash_update_page(hdesc, __page, 0,
+ cfs_crypto_hash_update_page(req, __page, 0,
used_number * sizeof(*guard_start));
used_number = 0;
}
* display in dump_all_bulk_pages() */
np->index = i;
- cfs_crypto_hash_update_page(hdesc, np, off,
+ cfs_crypto_hash_update_page(req, np, off,
len);
continue;
} else {
GOTO(out, rc);
if (used_number != 0)
- cfs_crypto_hash_update_page(hdesc, __page, 0,
+ cfs_crypto_hash_update_page(req, __page, 0,
used_number * sizeof(*guard_start));
bufsize = sizeof(cksum);
- rc = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
+ rc = cfs_crypto_hash_final(req, (unsigned char *)&cksum, &bufsize);
if (rc == 0)
*check_sum = cksum;
local_nb = tbc->local;
- rc = tgt_brw_lock(exp, &tsi->tsi_resid, ioo, remote_nb, &lockh,
- LCK_PR);
+ rc = tgt_brw_lock(tsi->tsi_env, exp, &tsi->tsi_resid, ioo, remote_nb,
+ &lockh, LCK_PR);
if (rc != 0)
RETURN(rc);
EXPORT_SYMBOL(tgt_brw_read);
static int tgt_shortio2pages(struct niobuf_local *local, int npages,
- unsigned char *buf, int size)
+ unsigned char *buf, unsigned int size)
{
int i, off, len;
char *ptr;
local_nb = tbc->local;
- rc = tgt_brw_lock(exp, &tsi->tsi_resid, ioo, remote_nb, &lockh,
- LCK_PW);
+ rc = tgt_brw_lock(tsi->tsi_env, exp, &tsi->tsi_resid, ioo, remote_nb,
+ &lockh, LCK_PW);
if (rc != 0)
GOTO(out, rc);
if (rc < 0)
GOTO(out_lock, rc);
if (body->oa.o_flags & OBD_FL_SHORT_IO) {
- int short_io_size;
+ unsigned int short_io_size;
unsigned char *short_io_buf;
short_io_size = req_capsule_get_size(&req->rq_pill,