LASSERT(ns != NULL);
LASSERT(!lustre_handle_is_used(lh));
- rc = ldlm_cli_enqueue_local(ns, res_id, LDLM_IBITS, &policy, mode,
+ rc = ldlm_cli_enqueue_local(NULL, ns, res_id, LDLM_IBITS, &policy, mode,
flags, ldlm_blocking_ast,
ldlm_completion_ast, ldlm_glimpse_ast,
NULL, 0, LVB_T_NONE, NULL, lh);
* Helper function for getting server side [start, start+count] DLM lock
* if asked by client.
*/
-int tgt_extent_lock(struct ldlm_namespace *ns, struct ldlm_res_id *res_id,
- __u64 start, __u64 end, struct lustre_handle *lh,
- int mode, __u64 *flags)
+int tgt_extent_lock(const struct lu_env *env, struct ldlm_namespace *ns,
+ struct ldlm_res_id *res_id, __u64 start, __u64 end,
+ struct lustre_handle *lh, int mode, __u64 *flags)
{
union ldlm_policy_data policy;
int rc;
else
policy.l_extent.end = end | ~PAGE_MASK;
- rc = ldlm_cli_enqueue_local(ns, res_id, LDLM_EXTENT, &policy, mode,
- flags, ldlm_blocking_ast,
+ rc = ldlm_cli_enqueue_local(env, ns, res_id, LDLM_EXTENT, &policy,
+ mode, flags, ldlm_blocking_ast,
ldlm_completion_ast, ldlm_glimpse_ast,
NULL, 0, LVB_T_NONE, NULL, lh);
RETURN(rc == ELDLM_OK ? 0 : -EIO);
}
EXPORT_SYMBOL(tgt_extent_unlock);
-static int tgt_brw_lock(struct obd_export *exp, struct ldlm_res_id *res_id,
- struct obd_ioobj *obj, struct niobuf_remote *nb,
- struct lustre_handle *lh, enum ldlm_mode mode)
+static int tgt_brw_lock(const struct lu_env *env, struct obd_export *exp,
+ struct ldlm_res_id *res_id, struct obd_ioobj *obj,
+ struct niobuf_remote *nb, struct lustre_handle *lh,
+ enum ldlm_mode mode)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
__u64 flags = 0;
if (exp->exp_connect_data.ocd_connect_flags & OBD_CONNECT_IBITS)
rc = tgt_mdt_data_lock(ns, res_id, lh, mode, &flags);
else
- rc = tgt_extent_lock(ns, res_id, nb[0].rnb_offset,
+ rc = tgt_extent_lock(env, ns, res_id, nb[0].rnb_offset,
nb[nrbufs - 1].rnb_offset +
nb[nrbufs - 1].rnb_len - 1,
lh, mode, &flags);
int opc, enum cksum_types cksum_type,
__u32 *cksum)
{
- struct cfs_crypto_hash_desc *hdesc;
+ struct ahash_request *req;
unsigned int bufsize;
int i, err;
unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
- hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
- if (IS_ERR(hdesc)) {
+ req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
+ if (IS_ERR(req)) {
CERROR("%s: unable to initialize checksum hash %s\n",
tgt_name(tgt), cfs_crypto_hash_name(cfs_alg));
- return PTR_ERR(hdesc);
+ return PTR_ERR(req);
}
CDEBUG(D_INFO, "Checksum for algo %s\n", cfs_crypto_hash_name(cfs_alg));
* display in dump_all_bulk_pages() */
np->index = i;
- cfs_crypto_hash_update_page(hdesc, np, off,
+ cfs_crypto_hash_update_page(req, np, off,
len);
continue;
} else {
tgt_name(tgt));
}
}
- cfs_crypto_hash_update_page(hdesc, local_nb[i].lnb_page,
+ cfs_crypto_hash_update_page(req, local_nb[i].lnb_page,
local_nb[i].lnb_page_offset & ~PAGE_MASK,
local_nb[i].lnb_len);
* display in dump_all_bulk_pages() */
np->index = i;
- cfs_crypto_hash_update_page(hdesc, np, off,
+ cfs_crypto_hash_update_page(req, np, off,
len);
continue;
} else {
}
bufsize = sizeof(*cksum);
- err = cfs_crypto_hash_final(hdesc, (unsigned char *)cksum, &bufsize);
+ err = cfs_crypto_hash_final(req, (unsigned char *)cksum, &bufsize);
return 0;
}
int sector_size,
u32 *check_sum)
{
+ enum cksum_types t10_cksum_type = tgt->lut_dt_conf.ddp_t10_cksum_type;
unsigned char cfs_alg = cksum_obd2cfs(OBD_CKSUM_T10_TOP);
const char *obd_name = tgt->lut_obd->obd_name;
- struct cfs_crypto_hash_desc *hdesc;
+ struct ahash_request *req;
unsigned int bufsize;
unsigned char *buffer;
struct page *__page;
if (__page == NULL)
return -ENOMEM;
- hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
- if (IS_ERR(hdesc)) {
+ req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
+ if (IS_ERR(req)) {
CERROR("%s: unable to initialize checksum hash %s\n",
tgt_name(tgt), cfs_crypto_hash_name(cfs_alg));
- return PTR_ERR(hdesc);
+ return PTR_ERR(req);
}
buffer = kmap(__page);
* display in dump_all_bulk_pages() */
np->index = i;
- cfs_crypto_hash_update_page(hdesc, np, off,
+ cfs_crypto_hash_update_page(req, np, off,
len);
continue;
} else {
* The left guard number should be able to hold checksums of a
* whole page
*/
- rc = obd_page_dif_generate_buffer(obd_name,
- local_nb[i].lnb_page,
- local_nb[i].lnb_page_offset & ~PAGE_MASK,
- local_nb[i].lnb_len, guard_start + used_number,
- guard_number - used_number, &used, sector_size,
- fn);
- if (rc)
- break;
+ if (t10_cksum_type && opc == OST_READ &&
+ local_nb[i].lnb_guard_disk) {
+ used = DIV_ROUND_UP(local_nb[i].lnb_len, sector_size);
+ if (used > (guard_number - used_number)) {
+ rc = -E2BIG;
+ break;
+ }
+ memcpy(guard_start + used_number,
+ local_nb[i].lnb_guards,
+ used * sizeof(*local_nb[i].lnb_guards));
+ } else {
+ rc = obd_page_dif_generate_buffer(obd_name,
+ local_nb[i].lnb_page,
+ local_nb[i].lnb_page_offset & ~PAGE_MASK,
+ local_nb[i].lnb_len, guard_start + used_number,
+ guard_number - used_number, &used, sector_size,
+ fn);
+ if (rc)
+ break;
+ }
+
+ LASSERT(used <= MAX_GUARD_NUMBER);
+ /*
+ * If disk support T10PI checksum, copy guards to local_nb.
+ * If the write is partial page, do not use the guards for bio
+ * submission since the data might not be full-sector. The bio
+ * guards will be generated later based on the full sectors. If
+ * the sector size is 512B rather than 4 KB, or the page size
+ * is larger than 4KB, this might drop some useful guards for
+ * partial page write, but it will only add minimal extra time
+ * of checksum calculation.
+ */
+ if (t10_cksum_type && opc == OST_WRITE &&
+ local_nb[i].lnb_len == PAGE_SIZE) {
+ local_nb[i].lnb_guard_rpc = 1;
+ memcpy(local_nb[i].lnb_guards,
+ guard_start + used_number,
+ used * sizeof(*local_nb[i].lnb_guards));
+ }
used_number += used;
if (used_number == guard_number) {
- cfs_crypto_hash_update_page(hdesc, __page, 0,
+ cfs_crypto_hash_update_page(req, __page, 0,
used_number * sizeof(*guard_start));
used_number = 0;
}
* display in dump_all_bulk_pages() */
np->index = i;
- cfs_crypto_hash_update_page(hdesc, np, off,
+ cfs_crypto_hash_update_page(req, np, off,
len);
continue;
} else {
GOTO(out, rc);
if (used_number != 0)
- cfs_crypto_hash_update_page(hdesc, __page, 0,
+ cfs_crypto_hash_update_page(req, __page, 0,
used_number * sizeof(*guard_start));
bufsize = sizeof(cksum);
- rc = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
+ rc = cfs_crypto_hash_final(req, (unsigned char *)&cksum, &bufsize);
if (rc == 0)
*check_sum = cksum;
local_nb = tbc->local;
- rc = tgt_brw_lock(exp, &tsi->tsi_resid, ioo, remote_nb, &lockh,
- LCK_PR);
+ rc = tgt_brw_lock(tsi->tsi_env, exp, &tsi->tsi_resid, ioo, remote_nb,
+ &lockh, LCK_PR);
if (rc != 0)
RETURN(rc);
local_nb = tbc->local;
- rc = tgt_brw_lock(exp, &tsi->tsi_resid, ioo, remote_nb, &lockh,
- LCK_PW);
+ rc = tgt_brw_lock(tsi->tsi_env, exp, &tsi->tsi_resid, ioo, remote_nb,
+ &lockh, LCK_PW);
if (rc != 0)
GOTO(out, rc);