* GPL HEADER END
*/
/*
- * Copyright (c) 2012, 2015, Intel Corporation.
+ * Copyright (c) 2013, 2016, Intel Corporation.
*/
/*
* lustre/target/tgt_handler.c
#include <obd.h>
#include <obd_class.h>
#include <obd_cksum.h>
-#include <md_object.h>
#include <lustre_lfsck.h>
#include <lustre_nodemap.h>
+#include <lustre_acl.h>
#include "tgt_internal.h"
dlm_req = req_capsule_client_get(pill, &RMF_DLM_REQ);
if (dlm_req != NULL) {
+ union ldlm_wire_policy_data *policy =
+ &dlm_req->lock_desc.l_policy_data;
+
if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
LDLM_IBITS &&
- dlm_req->lock_desc.l_policy_data.\
- l_inodebits.bits == 0)) {
+ (policy->l_inodebits.bits |
+ policy->l_inodebits.try_bits) == 0)) {
/*
* Lock without inodebits makes no sense and
* will oops later in ldlm. If client miss to
RCL_SERVER))
req_capsule_set_size(tsi->tsi_pill, &RMF_LOGCOOKIES,
RCL_SERVER, 0);
+ if (req_capsule_has_field(tsi->tsi_pill, &RMF_ACL, RCL_SERVER))
+ req_capsule_set_size(tsi->tsi_pill,
+ &RMF_ACL, RCL_SERVER,
+ LUSTRE_POSIX_ACL_MAX_SIZE_OLD);
rc = req_capsule_server_pack(tsi->tsi_pill);
}
if (req_can_reconstruct(req, NULL)) {
if (!(lustre_msg_get_flags(req->rq_reqmsg) &
(MSG_RESENT | MSG_REPLAY))) {
- DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches "
+ DEBUG_REQ(D_WARNING, req, "rq_xid %llu matches "
"saved xid, expected REPLAY or RESENT flag "
"(%x)", req->rq_xid,
lustre_msg_get_flags(req->rq_reqmsg));
* Unified target generic handlers.
*/
-/*
- * Security functions
- */
-static inline void tgt_init_sec_none(struct obd_connect_data *reply)
-{
-}
-
-static int tgt_init_sec_level(struct ptlrpc_request *req)
-{
- struct lu_target *tgt = class_exp2tgt(req->rq_export);
- char *client;
- struct obd_connect_data *data, *reply;
- int rc = 0;
- ENTRY;
-
- data = req_capsule_client_get(&req->rq_pill, &RMF_CONNECT_DATA);
- reply = req_capsule_server_get(&req->rq_pill, &RMF_CONNECT_DATA);
- if (data == NULL || reply == NULL)
- RETURN(-EFAULT);
-
- /* connection from MDT is always trusted */
- if (req->rq_auth_usr_mdt) {
- tgt_init_sec_none(reply);
- RETURN(0);
- }
-
- if (unlikely(tgt == NULL)) {
- DEBUG_REQ(D_ERROR, req, "%s: No target for connected export\n",
- class_exp2obd(req->rq_export)->obd_name);
- RETURN(-EINVAL);
- }
-
- client = libcfs_nid2str(req->rq_peer.nid);
- /* no GSS support case */
- if (!req->rq_auth_gss) {
- if (tgt->lut_sec_level > LUSTRE_SEC_NONE) {
- CWARN("client %s -> target %s does not use GSS, "
- "can not run under security level %d.\n",
- client, tgt_name(tgt), tgt->lut_sec_level);
- RETURN(-EACCES);
- } else {
- tgt_init_sec_none(reply);
- RETURN(0);
- }
- }
-
- /* old version case */
- if (unlikely(!(data->ocd_connect_flags & OBD_CONNECT_MDS_CAPA) ||
- !(data->ocd_connect_flags & OBD_CONNECT_OSS_CAPA))) {
- if (tgt->lut_sec_level > LUSTRE_SEC_NONE) {
- CWARN("client %s -> target %s uses old version, "
- "can not run under security level %d.\n",
- client, tgt_name(tgt), tgt->lut_sec_level);
- RETURN(-EACCES);
- } else {
- CWARN("client %s -> target %s uses old version, "
- "run under security level %d.\n",
- client, tgt_name(tgt), tgt->lut_sec_level);
- tgt_init_sec_none(reply);
- RETURN(0);
- }
- }
-
- if (!uid_valid(make_kuid(&init_user_ns, req->rq_auth_uid))) {
- CDEBUG(D_SEC, "client %s -> target %s: user is not "
- "authenticated!\n", client, tgt_name(tgt));
- RETURN(-EACCES);
- }
-
- switch (tgt->lut_sec_level) {
- case LUSTRE_SEC_NONE:
- case LUSTRE_SEC_REMOTE:
- tgt_init_sec_none(reply);
- break;
- case LUSTRE_SEC_ALL:
- reply->ocd_connect_flags &= ~OBD_CONNECT_OSS_CAPA;
- reply->ocd_connect_flags &= ~OBD_CONNECT_MDS_CAPA;
- break;
- default:
- RETURN(-EINVAL);
- }
-
- RETURN(rc);
-}
-
int tgt_connect_check_sptlrpc(struct ptlrpc_request *req, struct obd_export *exp)
{
struct lu_target *tgt = class_exp2tgt(exp);
return rc;
}
-int tgt_adapt_sptlrpc_conf(struct lu_target *tgt, int initial)
+int tgt_adapt_sptlrpc_conf(struct lu_target *tgt)
{
struct sptlrpc_rule_set tmp_rset;
int rc;
}
sptlrpc_rule_set_init(&tmp_rset);
- rc = sptlrpc_conf_target_get_rules(tgt->lut_obd, &tmp_rset, initial);
+ rc = sptlrpc_conf_target_get_rules(tgt->lut_obd, &tmp_rset);
if (rc) {
CERROR("%s: failed get sptlrpc rules: rc = %d\n",
tgt_name(tgt), rc);
ENTRY;
- rc = tgt_init_sec_level(req);
- if (rc != 0)
- GOTO(out, rc);
-
/* XXX: better to call this check right after getting new export but
* before last_rcvd slot allocation to avoid server load upon insecure
* connects. This is to be fixed after unifiyng all targets.
RETURN(-ENOMEM);
if (!(exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE))
- /* old client requires reply size in it's PAGE_CACHE_SIZE,
+ /* old client requires reply size in it's PAGE_SIZE,
* which is rdpg->rp_count */
nob = rdpg->rp_count;
for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0;
i++, tmpcount -= tmpsize) {
- tmpsize = min_t(int, tmpcount, PAGE_CACHE_SIZE);
+ tmpsize = min_t(int, tmpcount, PAGE_SIZE);
desc->bd_frag_ops->add_kiov_frag(desc, rdpg->rp_pages[i], 0,
tmpsize);
}
GOTO(out, rc = -EFAULT);
rdpg->rp_count = min_t(unsigned int, req_ii->ii_count << LU_PAGE_SHIFT,
exp_max_brw_size(tsi->tsi_exp));
- rdpg->rp_npages = (rdpg->rp_count + PAGE_CACHE_SIZE -1) >> PAGE_CACHE_SHIFT;
+ rdpg->rp_npages = (rdpg->rp_count + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* allocate pages to store the containers */
OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
if (rdpg->rp_pages == NULL)
GOTO(out, rc = -ENOMEM);
for (i = 0; i < rdpg->rp_npages; i++) {
- rdpg->rp_pages[i] = alloc_page(GFP_IOFS);
+ rdpg->rp_pages[i] = alloc_page(GFP_NOFS);
if (rdpg->rp_pages[i] == NULL)
GOTO(out, rc = -ENOMEM);
}
rc = tgt_sync(&env, tgt, obj, start, end);
if (rc < 0) {
- CERROR("%s: syncing "DFID" ("LPU64"-"LPU64") on lock "
+ CERROR("%s: syncing "DFID" (%llu-%llu) on lock "
"cancel: rc = %d\n",
tgt_name(tgt), PFID(&fid),
lock->l_policy_data.l_extent.start,
}
err_put:
if (obj != NULL)
- lu_object_put(&env, &obj->do_lu);
+ dt_object_put(&env, obj);
err_env:
lu_env_fini(&env);
}
};
EXPORT_SYMBOL(tgt_sec_ctx_handlers);
+int (*tgt_lfsck_in_notify_local)(const struct lu_env *env,
+ struct dt_device *key,
+ struct lfsck_req_local *lrl,
+ struct thandle *th) = NULL;
+
+void tgt_register_lfsck_in_notify_local(int (*notify)(const struct lu_env *,
+ struct dt_device *,
+ struct lfsck_req_local *,
+ struct thandle *))
+{
+ tgt_lfsck_in_notify_local = notify;
+}
+EXPORT_SYMBOL(tgt_register_lfsck_in_notify_local);
+
int (*tgt_lfsck_in_notify)(const struct lu_env *env,
struct dt_device *key,
- struct lfsck_request *lr,
- struct thandle *th) = NULL;
+ struct lfsck_request *lr) = NULL;
void tgt_register_lfsck_in_notify(int (*notify)(const struct lu_env *,
struct dt_device *,
- struct lfsck_request *,
- struct thandle *))
+ struct lfsck_request *))
{
tgt_lfsck_in_notify = notify;
}
if (lr == NULL)
RETURN(-EPROTO);
- rc = tgt_lfsck_in_notify(env, key, lr, NULL);
+ rc = tgt_lfsck_in_notify(env, key, lr);
RETURN(rc);
}
LASSERT(mode == LCK_PR || mode == LCK_PW);
LASSERT(!lustre_handle_is_used(lh));
+ if (ns->ns_obd->obd_recovering)
+ RETURN(0);
+
if (nrbufs == 0 || !(nb[0].rnb_flags & OBD_BRW_SRVLOCK))
RETURN(0);
memcpy(ptr2, ptr, len);
memcpy(ptr2, "bad3", min(4, len));
kunmap(np);
+
+ /* LU-8376 to preserve original index for
+ * display in dump_all_bulk_pages() */
+ np->index = BD_GET_KIOV(desc,
+ i).kiov_page->index;
+
BD_GET_KIOV(desc, i).kiov_page = np;
} else {
CERROR("%s: can't alloc page for corruption\n",
memcpy(ptr2, ptr, len);
memcpy(ptr2, "bad4", min(4, len));
kunmap(np);
+
+ /* LU-8376 to preserve original index for
+ * display in dump_all_bulk_pages() */
+ np->index = BD_GET_KIOV(desc,
+ i).kiov_page->index;
+
BD_GET_KIOV(desc, i).kiov_page = np;
} else {
CERROR("%s: can't alloc page for corruption\n",
return cksum;
}
+char dbgcksum_file_name[PATH_MAX];
+
+static void dump_all_bulk_pages(struct obdo *oa, int count,
+ lnet_kiov_t *iov, __u32 server_cksum,
+ __u32 client_cksum)
+{
+ struct file *filp;
+ int rc, i;
+ unsigned int len;
+ char *buf;
+ mm_segment_t oldfs;
+
+ /* will only keep dump of pages on first error for the same range in
+ * file/fid, not during the resends/retries. */
+ snprintf(dbgcksum_file_name, sizeof(dbgcksum_file_name),
+ "%s-checksum_dump-ost-"DFID":[%llu-%llu]-%x-%x",
+ (strncmp(libcfs_debug_file_path_arr, "NONE", 4) != 0 ?
+ libcfs_debug_file_path_arr :
+ LIBCFS_DEBUG_FILE_PATH_DEFAULT),
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
+ (__u64)iov[0].kiov_page->index << PAGE_SHIFT,
+ ((__u64)iov[count - 1].kiov_page->index << PAGE_SHIFT) +
+ iov[count - 1].kiov_len - 1, client_cksum, server_cksum);
+ filp = filp_open(dbgcksum_file_name,
+ O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, 0600);
+ if (IS_ERR(filp)) {
+ rc = PTR_ERR(filp);
+ if (rc == -EEXIST)
+ CDEBUG(D_INFO, "%s: can't open to dump pages with "
+ "checksum error: rc = %d\n", dbgcksum_file_name,
+ rc);
+ else
+ CERROR("%s: can't open to dump pages with checksum "
+ "error: rc = %d\n", dbgcksum_file_name, rc);
+ return;
+ }
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ for (i = 0; i < count; i++) {
+ len = iov[i].kiov_len;
+ buf = kmap(iov[i].kiov_page);
+ while (len != 0) {
+ rc = vfs_write(filp, (__force const char __user *)buf,
+ len, &filp->f_pos);
+ if (rc < 0) {
+ CERROR("%s: wanted to write %u but got %d "
+ "error\n", dbgcksum_file_name, len, rc);
+ break;
+ }
+ len -= rc;
+ buf += rc;
+ CDEBUG(D_INFO, "%s: wrote %d bytes\n",
+ dbgcksum_file_name, rc);
+ }
+ kunmap(iov[i].kiov_page);
+ }
+ set_fs(oldfs);
+
+ rc = ll_vfs_fsync_range(filp, 0, LLONG_MAX, 1);
+ if (rc)
+ CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
+ filp_close(filp, NULL);
+ return;
+}
+
+static int check_read_checksum(struct ptlrpc_bulk_desc *desc, struct obdo *oa,
+ const lnet_process_id_t *peer,
+ __u32 client_cksum, __u32 server_cksum,
+ cksum_type_t server_cksum_type)
+{
+ char *msg;
+ cksum_type_t cksum_type;
+
+ /* unlikely to happen and only if resend does not occur due to cksum
+ * control failure on Client */
+ if (unlikely(server_cksum == client_cksum)) {
+ CDEBUG(D_PAGE, "checksum %x confirmed upon retry\n",
+ client_cksum);
+ return 0;
+ }
+
+ if (desc->bd_export->exp_obd->obd_checksum_dump)
+ dump_all_bulk_pages(oa, desc->bd_iov_count,
+ &BD_GET_KIOV(desc, 0), server_cksum,
+ client_cksum);
+
+ cksum_type = cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
+ oa->o_flags : 0);
+
+ if (cksum_type != server_cksum_type)
+ msg = "the server may have not used the checksum type specified"
+ " in the original request - likely a protocol problem";
+ else
+ msg = "should have changed on the client or in transit";
+
+ LCONSOLE_ERROR_MSG(0x132, "%s: BAD READ CHECKSUM: %s: from %s inode "
+ DFID " object "DOSTID" extent [%llu-%llu], client returned csum"
+ " %x (type %x), server csum %x (type %x)\n",
+ desc->bd_export->exp_obd->obd_name,
+ msg, libcfs_nid2str(peer->nid),
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : 0ULL,
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
+ POSTID(&oa->o_oi),
+ (__u64)BD_GET_KIOV(desc, 0).kiov_page->index << PAGE_SHIFT,
+ ((__u64)BD_GET_KIOV(desc,
+ desc->bd_iov_count - 1).kiov_page->index
+ << PAGE_SHIFT) +
+ BD_GET_KIOV(desc, desc->bd_iov_count - 1).kiov_len - 1,
+ client_cksum, cksum_type, server_cksum, server_cksum_type);
+ return 1;
+}
+
int tgt_brw_read(struct tgt_session_info *tsi)
{
struct ptlrpc_request *req = tgt_ses_req(tsi);
* If getting the lock took more time than
* client was willing to wait, drop it. b=11330
*/
- if (cfs_time_current_sec() > req->rq_deadline ||
+ if (ktime_get_real_seconds() > req->rq_deadline ||
OBD_FAIL_CHECK(OBD_FAIL_OST_DROP_REQ)) {
no_reply = 1;
- CERROR("Dropping timed-out read from %s because locking"
- "object "DOSTID" took %ld seconds (limit was %ld).\n",
+ CERROR("Dropping timed-out read from %s because locking object " DOSTID " took %lld seconds (limit was %lld).\n",
libcfs_id2str(req->rq_peer), POSTID(&ioo->ioo_oid),
- cfs_time_current_sec() - req->rq_arrival_time.tv_sec,
+ ktime_get_real_seconds() - req->rq_arrival_time.tv_sec,
req->rq_deadline - req->rq_arrival_time.tv_sec);
GOTO(out_lock, rc = -ETIMEDOUT);
}
cksum_type_t cksum_type =
cksum_type_unpack(body->oa.o_valid & OBD_MD_FLFLAGS ?
body->oa.o_flags : 0);
+
repbody->oa.o_flags = cksum_type_pack(cksum_type);
repbody->oa.o_valid = OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
repbody->oa.o_cksum = tgt_checksum_bulk(tsi->tsi_tgt, desc,
OST_READ, cksum_type);
CDEBUG(D_PAGE, "checksum at read origin: %x\n",
repbody->oa.o_cksum);
+
+ /* if a resend it could be for a cksum error, so check Server
+ * cksum with returned Client cksum (this should even cover
+ * zero-cksum case) */
+ if ((body->oa.o_valid & OBD_MD_FLFLAGS) &&
+ (body->oa.o_flags & OBD_FL_RECOV_RESEND))
+ check_read_checksum(desc, &body->oa, &req->rq_peer,
+ body->oa.o_cksum,
+ repbody->oa.o_cksum, cksum_type);
} else {
repbody->oa.o_valid = 0;
}
router = libcfs_nid2str(desc->bd_sender);
}
+ if (exp->exp_obd->obd_checksum_dump)
+ dump_all_bulk_pages(&body->oa, desc->bd_iov_count,
+ &BD_GET_KIOV(desc, 0), server_cksum,
+ client_cksum);
+
if (mmap) {
CDEBUG_LIMIT(D_INFO, "client csum %x, server csum %x\n",
client_cksum, server_cksum);
return;
}
- LCONSOLE_ERROR_MSG(0x168, "BAD WRITE CHECKSUM: %s from %s%s%s inode "
- DFID" object "DOSTID" extent ["LPU64"-"LPU64
+ LCONSOLE_ERROR_MSG(0x168, "%s: BAD WRITE CHECKSUM: from %s%s%s inode "
+ DFID" object "DOSTID" extent [%llu-%llu"
"]: client csum %x, server csum %x\n",
exp->exp_obd->obd_name, libcfs_id2str(req->rq_peer),
via, router,
cksum_type_t cksum_type = OBD_CKSUM_CRC32;
bool no_reply = false, mmap;
struct tgt_thread_big_cache *tbc = req->rq_svc_thread->t_data;
+ bool wait_sync = false;
ENTRY;
RETURN(err_serious(-EPROTO));
if ((remote_nb[0].rnb_flags & OBD_BRW_MEMALLOC) &&
- (exp->exp_connection->c_peer.nid == exp->exp_connection->c_self))
+ ptlrpc_connection_is_local(exp->exp_connection))
memory_pressure_set();
req_capsule_set_size(&req->rq_pill, &RMF_RCS, RCL_SERVER,
* If getting the lock took more time than
* client was willing to wait, drop it. b=11330
*/
- if (cfs_time_current_sec() > req->rq_deadline ||
+ if (ktime_get_real_seconds() > req->rq_deadline ||
OBD_FAIL_CHECK(OBD_FAIL_OST_DROP_REQ)) {
no_reply = true;
- CERROR("%s: Dropping timed-out write from %s because locking "
- "object "DOSTID" took %ld seconds (limit was %ld).\n",
+ CERROR("%s: Dropping timed-out write from %s because locking object " DOSTID " took %lld seconds (limit was %lld).\n",
tgt_name(tsi->tsi_tgt), libcfs_id2str(req->rq_peer),
POSTID(&ioo->ioo_oid),
- cfs_time_current_sec() - req->rq_arrival_time.tv_sec,
+ ktime_get_real_seconds() - req->rq_arrival_time.tv_sec,
req->rq_deadline - req->rq_arrival_time.tv_sec);
GOTO(out_lock, rc = -ETIMEDOUT);
}
* has timed out the request already */
no_reply = true;
+ for (i = 0; i < niocount; i++) {
+ if (!(local_nb[i].lnb_flags & OBD_BRW_ASYNC)) {
+ wait_sync = true;
+ break;
+ }
+ }
/*
* Disable sending mtime back to the client. If the client locked the
* whole object, then it has already updated the mtime on its side,
if (desc)
ptlrpc_free_bulk(desc);
out:
- if (no_reply) {
+ if (unlikely(no_reply || (exp->exp_obd->obd_no_transno && wait_sync))) {
req->rq_no_reply = 1;
/* reply out callback would free */
ptlrpc_req_drop_rs(req);
- LCONSOLE_WARN("%s: Bulk IO write error with %s (at %s), "
- "client will retry: rc %d\n",
- exp->exp_obd->obd_name,
- obd_uuid2str(&exp->exp_client_uuid),
- obd_export_nid2str(exp), rc);
+ if (!exp->exp_obd->obd_no_transno)
+ LCONSOLE_WARN("%s: Bulk IO write error with %s (at %s),"
+ " client will retry: rc = %d\n",
+ exp->exp_obd->obd_name,
+ obd_uuid2str(&exp->exp_client_uuid),
+ obd_export_nid2str(exp), rc);
}
memory_pressure_clr();
RETURN(rc);