* GPL HEADER END
*/
/*
- * Copyright (c) 2012, 2015, Intel Corporation.
+ * Copyright (c) 2013, 2016, Intel Corporation.
*/
/*
* lustre/target/tgt_handler.c
#include <obd.h>
#include <obd_class.h>
#include <obd_cksum.h>
-#include <md_object.h>
#include <lustre_lfsck.h>
#include <lustre_nodemap.h>
if (rc)
RETURN(rc);
- nodemap = tsi->tsi_exp->exp_target_data.ted_nodemap;
+ nodemap = nodemap_get_from_exp(tsi->tsi_exp);
+ if (IS_ERR(nodemap))
+ RETURN(PTR_ERR(nodemap));
body->oa.o_uid = nodemap_map_id(nodemap, NODEMAP_UID,
NODEMAP_CLIENT_TO_FS,
body->oa.o_gid = nodemap_map_id(nodemap, NODEMAP_GID,
NODEMAP_CLIENT_TO_FS,
body->oa.o_gid);
+ nodemap_putref(nodemap);
tsi->tsi_ost_body = body;
tsi->tsi_fid = body->oa.o_oi.oi_fid;
if (req_can_reconstruct(req, NULL)) {
if (!(lustre_msg_get_flags(req->rq_reqmsg) &
(MSG_RESENT | MSG_REPLAY))) {
- DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches "
+ DEBUG_REQ(D_WARNING, req, "rq_xid %llu matches "
"saved xid, expected REPLAY or RESENT flag "
"(%x)", req->rq_xid,
lustre_msg_get_flags(req->rq_reqmsg));
RETURN(h);
}
+static int process_req_last_xid(struct ptlrpc_request *req)
+{
+ __u64 last_xid;
+ ENTRY;
+
+ /* check request's xid is consistent with export's last_xid */
+ last_xid = lustre_msg_get_last_xid(req->rq_reqmsg);
+ if (last_xid > req->rq_export->exp_last_xid)
+ req->rq_export->exp_last_xid = last_xid;
+
+ if (req->rq_xid == 0 ||
+ (req->rq_xid <= req->rq_export->exp_last_xid)) {
+ DEBUG_REQ(D_ERROR, req, "Unexpected xid %llx vs. "
+ "last_xid %llx\n", req->rq_xid,
+ req->rq_export->exp_last_xid);
+ /* Some request is allowed to be sent during replay,
+ * such as OUT update requests, FLD requests, so it
+ * is possible that replay requests has smaller XID
+ * than the exp_last_xid.
+ *
+ * Some non-replay requests may have smaller XID as
+ * well:
+ *
+ * - Client send a no_resend RPC, like statfs;
+ * - The RPC timedout (or some other error) on client,
+ * then it's removed from the unreplied list;
+ * - Client send some other request to bump the
+ * exp_last_xid on server;
+ * - The former RPC got chance to be processed;
+ */
+ if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY))
+ RETURN(-EPROTO);
+ }
+
+ /* try to release in-memory reply data */
+ if (tgt_is_multimodrpcs_client(req->rq_export)) {
+ tgt_handle_received_xid(req->rq_export,
+ lustre_msg_get_last_xid(req->rq_reqmsg));
+ if (!(lustre_msg_get_flags(req->rq_reqmsg) &
+ (MSG_RESENT | MSG_REPLAY)))
+ tgt_handle_tag(req->rq_export,
+ lustre_msg_get_tag(req->rq_reqmsg));
+ }
+ RETURN(0);
+}
+
int tgt_request_handle(struct ptlrpc_request *req)
{
struct tgt_session_info *tsi = tgt_ses_info(req->rq_svc_thread->t_env);
struct lu_target *tgt;
int request_fail_id = 0;
__u32 opc = lustre_msg_get_opc(msg);
+ struct obd_device *obd;
int rc;
-
+ bool is_connect = false;
ENTRY;
/* Refill the context, to make sure all thread keys are allocated */
* target, otherwise that should be connect operation */
if (opc == MDS_CONNECT || opc == OST_CONNECT ||
opc == MGS_CONNECT) {
+ is_connect = true;
req_capsule_set(&req->rq_pill, &RQF_CONNECT);
rc = target_handle_connect(req);
if (rc != 0) {
GOTO(out, rc);
}
- /* check request's xid is consistent with export's last_xid */
- if (req->rq_export != NULL) {
- __u64 last_xid = lustre_msg_get_last_xid(req->rq_reqmsg);
- if (last_xid != 0)
- req->rq_export->exp_last_xid = last_xid;
- if (req->rq_xid == 0 ||
- req->rq_xid <= req->rq_export->exp_last_xid) {
- DEBUG_REQ(D_ERROR, req,
- "Unexpected xid %llx vs. last_xid %llx\n",
- req->rq_xid, req->rq_export->exp_last_xid);
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 93, 0)
- LBUG();
-#endif
- req->rq_status = -EPROTO;
+ /* Skip last_xid processing for the recovery thread, otherwise, the
+ * last_xid on same request could be processed twice: first time when
+ * processing the incoming request, second time when the request is
+ * being processed by recovery thread. */
+ obd = class_exp2obd(req->rq_export);
+ if (is_connect) {
+ /* reset the exp_last_xid on each connection. */
+ req->rq_export->exp_last_xid = 0;
+ } else if (obd->obd_recovery_data.trd_processing_task !=
+ current_pid()) {
+ rc = process_req_last_xid(req);
+ if (rc) {
+ req->rq_status = rc;
rc = ptlrpc_error(req);
GOTO(out, rc);
}
request_fail_id = tgt->lut_request_fail_id;
tsi->tsi_reply_fail_id = tgt->lut_reply_fail_id;
- /* try to release in-memory reply data */
- if (tgt_is_multimodrpcs_client(req->rq_export)) {
- tgt_handle_received_xid(req->rq_export,
- lustre_msg_get_last_xid(req->rq_reqmsg));
- if (!(lustre_msg_get_flags(req->rq_reqmsg) &
- (MSG_RESENT | MSG_REPLAY)))
- tgt_handle_tag(req->rq_export,
- lustre_msg_get_tag(req->rq_reqmsg));
- }
-
h = tgt_handler_find_check(req);
if (IS_ERR(h)) {
req->rq_status = PTR_ERR(h);
* Unified target generic handlers.
*/
-/*
- * Security functions
- */
-static inline void tgt_init_sec_none(struct obd_connect_data *reply)
-{
- reply->ocd_connect_flags &= ~(OBD_CONNECT_RMT_CLIENT |
- OBD_CONNECT_RMT_CLIENT_FORCE);
-}
-
-static int tgt_init_sec_level(struct ptlrpc_request *req)
-{
- struct lu_target *tgt = class_exp2tgt(req->rq_export);
- char *client;
- struct obd_connect_data *data, *reply;
- int rc = 0;
- bool remote;
- ENTRY;
-
- data = req_capsule_client_get(&req->rq_pill, &RMF_CONNECT_DATA);
- reply = req_capsule_server_get(&req->rq_pill, &RMF_CONNECT_DATA);
- if (data == NULL || reply == NULL)
- RETURN(-EFAULT);
-
- /* connection from MDT is always trusted */
- if (req->rq_auth_usr_mdt) {
- tgt_init_sec_none(reply);
- RETURN(0);
- }
-
- if (unlikely(tgt == NULL)) {
- DEBUG_REQ(D_ERROR, req, "%s: No target for connected export\n",
- class_exp2obd(req->rq_export)->obd_name);
- RETURN(-EINVAL);
- }
-
- client = libcfs_nid2str(req->rq_peer.nid);
- /* no GSS support case */
- if (!req->rq_auth_gss) {
- if (tgt->lut_sec_level > LUSTRE_SEC_NONE) {
- CWARN("client %s -> target %s does not use GSS, "
- "can not run under security level %d.\n",
- client, tgt_name(tgt), tgt->lut_sec_level);
- RETURN(-EACCES);
- } else {
- tgt_init_sec_none(reply);
- RETURN(0);
- }
- }
-
- /* old version case */
- if (unlikely(!(data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT) ||
- !(data->ocd_connect_flags & OBD_CONNECT_MDS_CAPA) ||
- !(data->ocd_connect_flags & OBD_CONNECT_OSS_CAPA))) {
- if (tgt->lut_sec_level > LUSTRE_SEC_NONE) {
- CWARN("client %s -> target %s uses old version, "
- "can not run under security level %d.\n",
- client, tgt_name(tgt), tgt->lut_sec_level);
- RETURN(-EACCES);
- } else {
- CWARN("client %s -> target %s uses old version, "
- "run under security level %d.\n",
- client, tgt_name(tgt), tgt->lut_sec_level);
- tgt_init_sec_none(reply);
- RETURN(0);
- }
- }
-
- remote = data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT_FORCE;
- if (remote) {
- if (!req->rq_auth_remote)
- CDEBUG(D_SEC, "client (local realm) %s -> target %s "
- "asked to be remote.\n", client, tgt_name(tgt));
- } else if (req->rq_auth_remote) {
- remote = true;
- CDEBUG(D_SEC, "client (remote realm) %s -> target %s is set "
- "as remote by default.\n", client, tgt_name(tgt));
- }
-
- if (remote == 0) {
- if (!uid_valid(make_kuid(&init_user_ns, req->rq_auth_uid))) {
- CDEBUG(D_SEC, "client %s -> target %s: user is not "
- "authenticated!\n", client, tgt_name(tgt));
- RETURN(-EACCES);
- }
- }
-
-
- switch (tgt->lut_sec_level) {
- case LUSTRE_SEC_NONE:
- if (remote) {
- CDEBUG(D_SEC,
- "client %s -> target %s is set as remote, "
- "can not run under security level %d.\n",
- client, tgt_name(tgt), tgt->lut_sec_level);
- RETURN(-EACCES);
- }
- tgt_init_sec_none(reply);
- break;
- case LUSTRE_SEC_REMOTE:
- if (!remote)
- tgt_init_sec_none(reply);
- break;
- case LUSTRE_SEC_ALL:
- if (remote)
- break;
- reply->ocd_connect_flags &= ~(OBD_CONNECT_RMT_CLIENT |
- OBD_CONNECT_RMT_CLIENT_FORCE);
- reply->ocd_connect_flags &= ~OBD_CONNECT_OSS_CAPA;
- reply->ocd_connect_flags &= ~OBD_CONNECT_MDS_CAPA;
- break;
- default:
- RETURN(-EINVAL);
- }
-
- RETURN(rc);
-}
-
int tgt_connect_check_sptlrpc(struct ptlrpc_request *req, struct obd_export *exp)
{
struct lu_target *tgt = class_exp2tgt(exp);
return rc;
}
-int tgt_adapt_sptlrpc_conf(struct lu_target *tgt, int initial)
+int tgt_adapt_sptlrpc_conf(struct lu_target *tgt)
{
struct sptlrpc_rule_set tmp_rset;
int rc;
}
sptlrpc_rule_set_init(&tmp_rset);
- rc = sptlrpc_conf_target_get_rules(tgt->lut_obd, &tmp_rset, initial);
+ rc = sptlrpc_conf_target_get_rules(tgt->lut_obd, &tmp_rset);
if (rc) {
CERROR("%s: failed get sptlrpc rules: rc = %d\n",
tgt_name(tgt), rc);
ENTRY;
- rc = tgt_init_sec_level(req);
- if (rc != 0)
- GOTO(out, rc);
-
/* XXX: better to call this check right after getting new export but
* before last_rcvd slot allocation to avoid server load upon insecure
* connects. This is to be fixed after unifiyng all targets.
RETURN(-ENOMEM);
if (!(exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE))
- /* old client requires reply size in it's PAGE_CACHE_SIZE,
+ /* old client requires reply size in it's PAGE_SIZE,
* which is rdpg->rp_count */
nob = rdpg->rp_count;
for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0;
i++, tmpcount -= tmpsize) {
- tmpsize = min_t(int, tmpcount, PAGE_CACHE_SIZE);
+ tmpsize = min_t(int, tmpcount, PAGE_SIZE);
desc->bd_frag_ops->add_kiov_frag(desc, rdpg->rp_pages[i], 0,
tmpsize);
}
GOTO(out, rc = -EFAULT);
rdpg->rp_count = min_t(unsigned int, req_ii->ii_count << LU_PAGE_SHIFT,
exp_max_brw_size(tsi->tsi_exp));
- rdpg->rp_npages = (rdpg->rp_count + PAGE_CACHE_SIZE -1) >> PAGE_CACHE_SHIFT;
+ rdpg->rp_npages = (rdpg->rp_count + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* allocate pages to store the containers */
OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
if (rdpg->rp_pages == NULL)
GOTO(out, rc = -ENOMEM);
for (i = 0; i < rdpg->rp_npages; i++) {
- rdpg->rp_pages[i] = alloc_page(GFP_IOFS);
+ rdpg->rp_pages[i] = alloc_page(GFP_NOFS);
if (rdpg->rp_pages[i] == NULL)
GOTO(out, rc = -ENOMEM);
}
tgt->lut_obd->obd_last_committed) {
rc = dt_object_sync(env, obj, start, end);
}
+ atomic_inc(&tgt->lut_sync_count);
RETURN(rc);
}
* Unified target DLM handlers.
*/
-/* Ensure that data and metadata are synced to the disk when lock is cancelled
- * (if requested) */
+/**
+ * Unified target BAST
+ *
+ * Ensure data and metadata are synced to disk when lock is canceled if Sync on
+ * Cancel (SOC) is enabled. If it's extent lock, normally sync obj is enough,
+ * but if it's cross-MDT lock, because remote object version is not set, a
+ * filesystem sync is needed.
+ *
+ * \param lock server side lock
+ * \param desc lock desc
+ * \param data ldlm_cb_set_arg
+ * \param flag indicates whether this cancelling or blocking callback
+ * \retval 0 on success
+ * \retval negative number on error
+ */
static int tgt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
void *data, int flag)
{
struct lu_env env;
struct lu_target *tgt;
- struct dt_object *obj;
+ struct dt_object *obj = NULL;
struct lu_fid fid;
int rc = 0;
}
if (flag == LDLM_CB_CANCELING &&
- (lock->l_granted_mode & (LCK_PW | LCK_GROUP)) &&
+ (lock->l_granted_mode & (LCK_EX | LCK_PW | LCK_GROUP)) &&
(tgt->lut_sync_lock_cancel == ALWAYS_SYNC_ON_CANCEL ||
(tgt->lut_sync_lock_cancel == BLOCKING_SYNC_ON_CANCEL &&
- lock->l_flags & LDLM_FL_CBPENDING))) {
+ ldlm_is_cbpending(lock))) &&
+ ((exp_connect_flags(lock->l_export) & OBD_CONNECT_MDS_MDS) ||
+ lock->l_resource->lr_type == LDLM_EXTENT)) {
__u64 start = 0;
__u64 end = OBD_OBJECT_EOF;
ost_fid_from_resid(&fid, &lock->l_resource->lr_name,
tgt->lut_lsd.lsd_osd_index);
- obj = dt_locate(&env, tgt->lut_bottom, &fid);
- if (IS_ERR(obj))
- GOTO(err_env, rc = PTR_ERR(obj));
-
- if (!dt_object_exists(obj))
- GOTO(err_put, rc = -ENOENT);
if (lock->l_resource->lr_type == LDLM_EXTENT) {
+ obj = dt_locate(&env, tgt->lut_bottom, &fid);
+ if (IS_ERR(obj))
+ GOTO(err_env, rc = PTR_ERR(obj));
+
+ if (!dt_object_exists(obj))
+ GOTO(err_put, rc = -ENOENT);
+
start = lock->l_policy_data.l_extent.start;
end = lock->l_policy_data.l_extent.end;
}
rc = tgt_sync(&env, tgt, obj, start, end);
if (rc < 0) {
- CERROR("%s: syncing "DFID" ("LPU64"-"LPU64") on lock "
+ CERROR("%s: syncing "DFID" (%llu-%llu) on lock "
"cancel: rc = %d\n",
tgt_name(tgt), PFID(&fid),
lock->l_policy_data.l_extent.start,
lock->l_policy_data.l_extent.end, rc);
}
err_put:
- lu_object_put(&env, &obj->do_lu);
+ if (obj != NULL)
+ dt_object_put(&env, obj);
err_env:
lu_env_fini(&env);
}
};
EXPORT_SYMBOL(tgt_sec_ctx_handlers);
+int (*tgt_lfsck_in_notify_local)(const struct lu_env *env,
+ struct dt_device *key,
+ struct lfsck_req_local *lrl,
+ struct thandle *th) = NULL;
+
+void tgt_register_lfsck_in_notify_local(int (*notify)(const struct lu_env *,
+ struct dt_device *,
+ struct lfsck_req_local *,
+ struct thandle *))
+{
+ tgt_lfsck_in_notify_local = notify;
+}
+EXPORT_SYMBOL(tgt_register_lfsck_in_notify_local);
+
int (*tgt_lfsck_in_notify)(const struct lu_env *env,
struct dt_device *key,
- struct lfsck_request *lr,
- struct thandle *th) = NULL;
+ struct lfsck_request *lr) = NULL;
void tgt_register_lfsck_in_notify(int (*notify)(const struct lu_env *,
struct dt_device *,
- struct lfsck_request *,
- struct thandle *))
+ struct lfsck_request *))
{
tgt_lfsck_in_notify = notify;
}
static int (*tgt_lfsck_query)(const struct lu_env *env,
struct dt_device *key,
- struct lfsck_request *lr) = NULL;
+ struct lfsck_request *req,
+ struct lfsck_reply *rep,
+ struct lfsck_query *que) = NULL;
void tgt_register_lfsck_query(int (*query)(const struct lu_env *,
struct dt_device *,
- struct lfsck_request *))
+ struct lfsck_request *,
+ struct lfsck_reply *,
+ struct lfsck_query *))
{
tgt_lfsck_query = query;
}
if (lr == NULL)
RETURN(-EPROTO);
- rc = tgt_lfsck_in_notify(env, key, lr, NULL);
+ rc = tgt_lfsck_in_notify(env, key, lr);
RETURN(rc);
}
if (reply == NULL)
RETURN(-ENOMEM);
- rc = tgt_lfsck_query(tsi->tsi_env, tsi->tsi_tgt->lut_bottom, request);
- reply->lr_status = rc;
+ rc = tgt_lfsck_query(tsi->tsi_env, tsi->tsi_tgt->lut_bottom,
+ request, reply, NULL);
RETURN(rc < 0 ? rc : 0);
}
LASSERT(mode == LCK_PR || mode == LCK_PW);
LASSERT(!lustre_handle_is_used(lh));
+ if (ns->ns_obd->obd_recovering)
+ RETURN(0);
+
if (nrbufs == 0 || !(nb[0].rnb_flags & OBD_BRW_SRVLOCK))
RETURN(0);
break;
}
}
+ if (OBD_FAIL_CHECK(OBD_FAIL_OST_READ_SIZE) &&
+ nob != cfs_fail_val)
+ rc = -E2BIG;
if (body->oa.o_valid & OBD_MD_FLCKSUM) {
cksum_type_t cksum_type =
/* Check if client was evicted while we were doing i/o before touching
* network */
if (likely(rc == 0 &&
- !CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2))) {
+ !CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2) &&
+ !CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_BULK))) {
rc = target_bulk_io(exp, desc, &lwi);
no_reply = rc != 0;
}
/* Must commit after prep above in all cases */
rc = obd_commitrw(tsi->tsi_env, OBD_BRW_READ, exp, &repbody->oa, 1, ioo,
remote_nb, npages, local_nb, rc);
- if (rc == 0)
- tgt_drop_id(exp, &repbody->oa);
out_lock:
tgt_brw_unlock(ioo, remote_nb, &lockh, LCK_PR);
}
LCONSOLE_ERROR_MSG(0x168, "BAD WRITE CHECKSUM: %s from %s%s%s inode "
- DFID" object "DOSTID" extent ["LPU64"-"LPU64
+ DFID" object "DOSTID" extent [%llu-%llu"
"]: client csum %x, server csum %x\n",
exp->exp_obd->obd_name, libcfs_id2str(req->rq_peer),
via, router,
cksum_type_t cksum_type = OBD_CKSUM_CRC32;
bool no_reply = false, mmap;
struct tgt_thread_big_cache *tbc = req->rq_svc_thread->t_data;
+ bool wait_sync = false;
ENTRY;
* has timed out the request already */
no_reply = true;
+ for (i = 0; i < niocount; i++) {
+ if (!(local_nb[i].lnb_flags & OBD_BRW_ASYNC)) {
+ wait_sync = true;
+ break;
+ }
+ }
/*
* Disable sending mtime back to the client. If the client locked the
* whole object, then it has already updated the mtime on its side,
}
LASSERT(j == npages);
ptlrpc_lprocfs_brw(req, nob);
-
- tgt_drop_id(exp, &repbody->oa);
}
out_lock:
tgt_brw_unlock(ioo, remote_nb, &lockh, LCK_PW);
if (desc)
ptlrpc_free_bulk(desc);
out:
- if (no_reply) {
+ if (unlikely(no_reply || (exp->exp_obd->obd_no_transno && wait_sync))) {
req->rq_no_reply = 1;
/* reply out callback would free */
ptlrpc_req_drop_rs(req);
- LCONSOLE_WARN("%s: Bulk IO write error with %s (at %s), "
- "client will retry: rc %d\n",
- exp->exp_obd->obd_name,
- obd_uuid2str(&exp->exp_client_uuid),
- obd_export_nid2str(exp), rc);
+ if (!exp->exp_obd->obd_no_transno)
+ LCONSOLE_WARN("%s: Bulk IO write error with %s (at %s),"
+ " client will retry: rc = %d\n",
+ exp->exp_obd->obd_name,
+ obd_uuid2str(&exp->exp_client_uuid),
+ obd_export_nid2str(exp), rc);
}
memory_pressure_clr();
RETURN(rc);