X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fost%2Fost_handler.c;h=25b8bf86ce17a62bcb60ff531f466ae45c8d4f13;hb=4980567857699c7f902ebda336ea98fdc4b83100;hp=4bbf6c54d37ba3bdbc84b7acd8ce4868bbd0bc4e;hpb=f95393b0d0a59cf3dc2f29cffc35dcc4cc9d7728;p=fs%2Flustre-release.git diff --git a/lustre/ost/ost_handler.c b/lustre/ost/ost_handler.c index 4bbf6c5..25b8bf8 100644 --- a/lustre/ost/ost_handler.c +++ b/lustre/ost/ost_handler.c @@ -28,6 +28,8 @@ /* * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2012, Whamcloud, Inc. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -72,7 +74,7 @@ CFS_MODULE_PARM(oss_num_create_threads, "i", int, 0444, /** * Do not return server-side uid/gid to remote client */ -static void ost_drop_id(struct obd_export *exp, struct obdo *oa) +static void ost_drop_id(struct obd_export *exp, struct obdo *oa) { if (exp_connect_rmtclient(exp)) { oa->o_uid = -1; @@ -83,23 +85,21 @@ static void ost_drop_id(struct obd_export *exp, struct obdo *oa) /** * Validate oa from client. - * 1. If the request comes from 1.8 clients, it will reset o_seq with MDT0. - * 2. If the request comes from 2.0 clients, currently only RSVD seq and IDIF - * req are valid. - * a. for single MDS seq = FID_SEQ_OST_MDT0, - * b. for CMD, seq = FID_SEQ_OST_MDT0, FID_SEQ_OST_MDT1 - FID_SEQ_OST_MAX + * If the request comes from 2.0 clients, currently only RSVD seq and IDIF + * req are valid. + * a. for single MDS seq = FID_SEQ_OST_MDT0, + * b. for CMD, seq = FID_SEQ_OST_MDT0, FID_SEQ_OST_MDT1 - FID_SEQ_OST_MAX */ static int ost_validate_obdo(struct obd_export *exp, struct obdo *oa, struct obd_ioobj *ioobj) { - if (oa != NULL && (!(oa->o_valid & OBD_MD_FLGROUP) || - !(exp->exp_connect_flags & OBD_CONNECT_FULL20))) { + if (oa != NULL && !(oa->o_valid & OBD_MD_FLGROUP)) { oa->o_seq = FID_SEQ_OST_MDT0; if (ioobj) ioobj->ioo_seq = FID_SEQ_OST_MDT0; /* remove fid_seq_is_rsvd() after FID-on-OST allows SEQ > 9 */ - } else if (oa == NULL || - !(fid_seq_is_rsvd(oa->o_seq) || fid_seq_is_idif(oa->o_seq))) { + } else if (oa == NULL || !(fid_seq_is_rsvd(oa->o_seq) || + fid_seq_is_mdt0(oa->o_seq))) { CERROR("%s: client %s sent invalid object "POSTID"\n", exp->exp_obd->obd_name, obd_export_nid2str(exp), oa ? oa->o_id : -1, oa ? oa->o_seq : -1); @@ -189,7 +189,7 @@ static int ost_destroy(struct obd_export *exp, struct ptlrpc_request *req, memcpy(&repbody->oa, &body->oa, sizeof(body->oa)); /* Do the destroy and set the reply status accordingly */ - req->rq_status = obd_destroy(exp, &body->oa, NULL, oti, NULL, capa); + req->rq_status = obd_destroy(exp, &repbody->oa, NULL, oti, NULL, capa); RETURN(0); } @@ -249,8 +249,9 @@ static void ost_lock_put(struct obd_export *exp, static int ost_getattr(struct obd_export *exp, struct ptlrpc_request *req) { struct ost_body *body, *repbody; - struct obd_info oinfo = { { { 0 } } }; + struct obd_info *oinfo; struct lustre_handle lh = { 0 }; + struct lustre_capa *capa = NULL; int rc; ENTRY; @@ -262,6 +263,14 @@ static int ost_getattr(struct obd_export *exp, struct ptlrpc_request *req) if (rc) RETURN(rc); + if (body->oa.o_valid & OBD_MD_FLOSSCAPA) { + capa = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1); + if (capa == NULL) { + CERROR("Missing capability for OST GETATTR"); + RETURN(-EFAULT); + } + } + rc = req_capsule_server_pack(&req->rq_pill); if (rc) RETURN(rc); @@ -269,25 +278,25 @@ static int ost_getattr(struct obd_export *exp, struct ptlrpc_request *req) repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); repbody->oa = body->oa; - rc = ost_lock_get(exp, &body->oa, 0, OBD_OBJECT_EOF, &lh, LCK_PR, 0); + rc = ost_lock_get(exp, &repbody->oa, 0, OBD_OBJECT_EOF, &lh, LCK_PR, 0); if (rc) RETURN(rc); - oinfo.oi_oa = &repbody->oa; - if (oinfo.oi_oa->o_valid & OBD_MD_FLOSSCAPA) { - oinfo.oi_capa = req_capsule_client_get(&req->rq_pill, - &RMF_CAPA1); - if (oinfo.oi_capa == NULL) { - CERROR("Missing capability for OST GETATTR"); - RETURN (-EFAULT); - } - } + OBD_ALLOC_PTR(oinfo); + if (!oinfo) + GOTO(unlock, rc = -ENOMEM); + oinfo->oi_oa = &repbody->oa; + oinfo->oi_capa = capa; - req->rq_status = obd_getattr(exp, &oinfo); - ost_lock_put(exp, &lh, LCK_PR); + req->rq_status = obd_getattr(exp, oinfo); + + OBD_FREE_PTR(oinfo); ost_drop_id(exp, &repbody->oa); - RETURN(0); + +unlock: + ost_lock_put(exp, &lh, LCK_PR); + RETURN(rc); } static int ost_statfs(struct ptlrpc_request *req) @@ -303,7 +312,8 @@ static int ost_statfs(struct ptlrpc_request *req) osfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS); req->rq_status = obd_statfs(req->rq_export->exp_obd, osfs, - cfs_time_current_64() - CFS_HZ, 0); + cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS), + 0); if (req->rq_status != 0) CERROR("ost: statfs failed: rc %d\n", req->rq_status); @@ -330,8 +340,8 @@ static int ost_create(struct obd_export *exp, struct ptlrpc_request *req, RETURN(rc); repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - memcpy(&repbody->oa, &body->oa, sizeof(body->oa)); - oti->oti_logcookies = &repbody->oa.o_lcookie; + repbody->oa = body->oa; + oti->oti_logcookies = &body->oa.o_lcookie; req->rq_status = obd_create(exp, &repbody->oa, NULL, oti); //obd_log_cancel(conn, NULL, 1, oti->oti_logcookies, 0); @@ -341,7 +351,6 @@ static int ost_create(struct obd_export *exp, struct ptlrpc_request *req, static int ost_punch(struct obd_export *exp, struct ptlrpc_request *req, struct obd_trans_info *oti) { - struct obd_info oinfo = { { { 0 } } }; struct ost_body *body, *repbody; int rc, flags = 0; struct lustre_handle lh = {0,}; @@ -358,11 +367,7 @@ static int ost_punch(struct obd_export *exp, struct ptlrpc_request *req, if (rc) RETURN(rc); - oinfo.oi_oa = &body->oa; - oinfo.oi_policy.l_extent.start = oinfo.oi_oa->o_size; - oinfo.oi_policy.l_extent.end = oinfo.oi_oa->o_blocks; - - if ((oinfo.oi_oa->o_valid & (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS)) != + if ((body->oa.o_valid & (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS)) != (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS)) RETURN(-EPROTO); @@ -372,34 +377,51 @@ static int ost_punch(struct obd_export *exp, struct ptlrpc_request *req, /* standard truncate optimization: if file body is completely * destroyed, don't send data back to the server. */ - if (oinfo.oi_oa->o_size == 0) + if (body->oa.o_size == 0) flags |= LDLM_AST_DISCARD_DATA; repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - rc = ost_lock_get(exp, oinfo.oi_oa, oinfo.oi_oa->o_size, - oinfo.oi_oa->o_blocks, &lh, LCK_PW, flags); + repbody->oa = body->oa; + + rc = ost_lock_get(exp, &repbody->oa, repbody->oa.o_size, + repbody->oa.o_blocks, &lh, LCK_PW, flags); if (rc == 0) { - if (oinfo.oi_oa->o_valid & OBD_MD_FLFLAGS && - oinfo.oi_oa->o_flags == OBD_FL_SRVLOCK) + struct obd_info *oinfo; + struct lustre_capa *capa = NULL; + + if (repbody->oa.o_valid & OBD_MD_FLFLAGS && + repbody->oa.o_flags == OBD_FL_SRVLOCK) /* * If OBD_FL_SRVLOCK is the only bit set in * ->o_flags, clear OBD_MD_FLFLAGS to avoid falling * through filter_setattr() to filter_iocontrol(). */ - oinfo.oi_oa->o_valid &= ~OBD_MD_FLFLAGS; + repbody->oa.o_valid &= ~OBD_MD_FLFLAGS; - if (oinfo.oi_oa->o_valid & OBD_MD_FLOSSCAPA) { - oinfo.oi_capa = req_capsule_client_get(&req->rq_pill, - &RMF_CAPA1); - if (oinfo.oi_capa == NULL) { + if (repbody->oa.o_valid & OBD_MD_FLOSSCAPA) { + capa = req_capsule_client_get(&req->rq_pill, + &RMF_CAPA1); + if (capa == NULL) { CERROR("Missing capability for OST PUNCH"); - RETURN (-EFAULT); + GOTO(unlock, rc = -EFAULT); } } - req->rq_status = obd_punch(exp, &oinfo, oti, NULL); + + OBD_ALLOC_PTR(oinfo); + if (!oinfo) + GOTO(unlock, rc = -ENOMEM); + oinfo->oi_oa = &repbody->oa; + oinfo->oi_policy.l_extent.start = oinfo->oi_oa->o_size; + oinfo->oi_policy.l_extent.end = oinfo->oi_oa->o_blocks; + oinfo->oi_capa = capa; + oinfo->oi_flags = OBD_FL_PUNCH; + + req->rq_status = obd_punch(exp, oinfo, oti, NULL); + OBD_FREE_PTR(oinfo); +unlock: ost_lock_put(exp, &lh, LCK_PW); } - repbody->oa = *oinfo.oi_oa; + ost_drop_id(exp, &repbody->oa); RETURN(rc); } @@ -407,6 +429,7 @@ static int ost_punch(struct obd_export *exp, struct ptlrpc_request *req, static int ost_sync(struct obd_export *exp, struct ptlrpc_request *req) { struct ost_body *body, *repbody; + struct obd_info *oinfo; struct lustre_capa *capa = NULL; int rc; ENTRY; @@ -432,9 +455,18 @@ static int ost_sync(struct obd_export *exp, struct ptlrpc_request *req) RETURN(rc); repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - memcpy(&repbody->oa, &body->oa, sizeof(body->oa)); - req->rq_status = obd_sync(exp, &repbody->oa, NULL, repbody->oa.o_size, - repbody->oa.o_blocks, capa); + repbody->oa = body->oa; + + OBD_ALLOC_PTR(oinfo); + if (!oinfo) + RETURN(-ENOMEM); + + oinfo->oi_oa = &repbody->oa; + oinfo->oi_capa = capa; + req->rq_status = obd_sync(exp, oinfo, repbody->oa.o_size, + repbody->oa.o_blocks, NULL); + OBD_FREE_PTR(oinfo); + ost_drop_id(exp, &repbody->oa); RETURN(0); } @@ -443,8 +475,9 @@ static int ost_setattr(struct obd_export *exp, struct ptlrpc_request *req, struct obd_trans_info *oti) { struct ost_body *body, *repbody; + struct obd_info *oinfo; + struct lustre_capa *capa = NULL; int rc; - struct obd_info oinfo = { { { 0 } } }; ENTRY; body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); @@ -459,32 +492,31 @@ static int ost_setattr(struct obd_export *exp, struct ptlrpc_request *req, if (rc) RETURN(rc); - repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - repbody->oa = body->oa; - - oinfo.oi_oa = &repbody->oa; - if (oinfo.oi_oa->o_valid & OBD_MD_FLOSSCAPA) { - oinfo.oi_capa = req_capsule_client_get(&req->rq_pill, - &RMF_CAPA1); - if (oinfo.oi_capa == NULL) { + if (body->oa.o_valid & OBD_MD_FLOSSCAPA) { + capa = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1); + if (capa == NULL) { CERROR("Missing capability for OST SETATTR"); RETURN (-EFAULT); } } - req->rq_status = obd_setattr(exp, &oinfo, oti); + + repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); + repbody->oa = body->oa; + + OBD_ALLOC_PTR(oinfo); + if (!oinfo) + RETURN(-ENOMEM); + oinfo->oi_oa = &repbody->oa; + oinfo->oi_capa = capa; + + req->rq_status = obd_setattr(exp, oinfo, oti); + + OBD_FREE_PTR(oinfo); + ost_drop_id(exp, &repbody->oa); RETURN(0); } -static int ost_bulk_timeout(void *data) -{ - ENTRY; - /* We don't fail the connection here, because having the export - * killed makes the (vital) call to commitrw very sad. - */ - RETURN(1); -} - static __u32 ost_checksum_bulk(struct ptlrpc_bulk_desc *desc, int opc, cksum_type_t cksum_type) { @@ -515,7 +547,7 @@ static __u32 ost_checksum_bulk(struct ptlrpc_bulk_desc *desc, int opc, kunmap(page); } - return cksum; + return fini_checksum(cksum, cksum_type); } static int ost_brw_lock_get(int mode, struct obd_export *exp, @@ -564,124 +596,6 @@ static void ost_brw_lock_put(int mode, EXIT; } -struct ost_prolong_data { - struct obd_export *opd_exp; - ldlm_policy_data_t opd_policy; - struct obdo *opd_oa; - ldlm_mode_t opd_mode; - int opd_lock_match; - int opd_timeout; -}; - -static int ost_prolong_locks_iter(struct ldlm_lock *lock, void *data) -{ - struct ost_prolong_data *opd = data; - - LASSERT(lock->l_resource->lr_type == LDLM_EXTENT); - - if (lock->l_req_mode != lock->l_granted_mode) { - /* scan granted locks only */ - return LDLM_ITER_STOP; - } - - if (lock->l_export != opd->opd_exp) { - /* prolong locks only for given client */ - return LDLM_ITER_CONTINUE; - } - - if (!(lock->l_granted_mode & opd->opd_mode)) { - /* we aren't interesting in all type of locks */ - return LDLM_ITER_CONTINUE; - } - - if (lock->l_policy_data.l_extent.end < opd->opd_policy.l_extent.start || - lock->l_policy_data.l_extent.start > opd->opd_policy.l_extent.end) { - /* the request doesn't cross the lock, skip it */ - return LDLM_ITER_CONTINUE; - } - - /* Fill the obdo with the matched lock handle. - * XXX: it is possible in some cases the IO RPC is covered by several - * locks, even for the write case, so it may need to be a lock list. */ - if (opd->opd_oa && !(opd->opd_oa->o_valid & OBD_MD_FLHANDLE)) { - opd->opd_oa->o_handle.cookie = lock->l_handle.h_cookie; - opd->opd_oa->o_valid |= OBD_MD_FLHANDLE; - } - - if (!(lock->l_flags & LDLM_FL_AST_SENT)) { - /* ignore locks not being cancelled */ - return LDLM_ITER_CONTINUE; - } - - CDEBUG(D_DLMTRACE,"refresh lock: "LPU64"/"LPU64" ("LPU64"->"LPU64")\n", - lock->l_resource->lr_name.name[0], - lock->l_resource->lr_name.name[1], - opd->opd_policy.l_extent.start, opd->opd_policy.l_extent.end); - /* OK. this is a possible lock the user holds doing I/O - * let's refresh eviction timer for it */ - ldlm_refresh_waiting_lock(lock, opd->opd_timeout); - opd->opd_lock_match = 1; - - return LDLM_ITER_CONTINUE; -} - -static int ost_rw_prolong_locks(struct ptlrpc_request *req, struct obd_ioobj *obj, - struct niobuf_remote *nb, struct obdo *oa, - ldlm_mode_t mode) -{ - struct ldlm_res_id res_id; - int nrbufs = obj->ioo_bufcnt; - struct ost_prolong_data opd = { 0 }; - ENTRY; - - osc_build_res_name(obj->ioo_id, obj->ioo_seq, &res_id); - - opd.opd_mode = mode; - opd.opd_exp = req->rq_export; - opd.opd_policy.l_extent.start = nb[0].offset & CFS_PAGE_MASK; - opd.opd_policy.l_extent.end = (nb[nrbufs - 1].offset + - nb[nrbufs - 1].len - 1) | ~CFS_PAGE_MASK; - - /* prolong locks for the current service time of the corresponding - * portal (= OST_IO_PORTAL) */ - opd.opd_timeout = AT_OFF ? obd_timeout / 2: - max(at_est2timeout(at_get(&req->rq_rqbd-> - rqbd_service->srv_at_estimate)), ldlm_timeout); - - CDEBUG(D_INFO,"refresh locks: "LPU64"/"LPU64" ("LPU64"->"LPU64")\n", - res_id.name[0], res_id.name[1], opd.opd_policy.l_extent.start, - opd.opd_policy.l_extent.end); - - if (oa->o_valid & OBD_MD_FLHANDLE) { - struct ldlm_lock *lock; - - lock = ldlm_handle2lock(&oa->o_handle); - if (lock != NULL) { - ost_prolong_locks_iter(lock, &opd); - if (opd.opd_lock_match) { - LDLM_LOCK_PUT(lock); - RETURN(1); - } - - /* Check if the lock covers the whole IO region, - * otherwise iterate through the resource. */ - if (lock->l_policy_data.l_extent.end >= - opd.opd_policy.l_extent.end && - lock->l_policy_data.l_extent.start <= - opd.opd_policy.l_extent.start) { - LDLM_LOCK_PUT(lock); - RETURN(0); - } - LDLM_LOCK_PUT(lock); - } - } - - opd.opd_oa = oa; - ldlm_resource_iterate(req->rq_export->exp_obd->obd_namespace, &res_id, - ost_prolong_locks_iter, &opd); - RETURN(opd.opd_lock_match); -} - /* Allocate thread local buffers if needed */ static struct ost_thread_local_cache *ost_tls_get(struct ptlrpc_request *r) { @@ -813,8 +727,11 @@ static int ost_brw_read(struct ptlrpc_request *req, struct obd_trans_info *oti) GOTO(out_lock, rc = -ETIMEDOUT); } + repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); + memcpy(&repbody->oa, &body->oa, sizeof(repbody->oa)); + npages = OST_THREAD_POOL_SIZE; - rc = obd_preprw(OBD_BRW_READ, exp, &body->oa, 1, ioo, + rc = obd_preprw(OBD_BRW_READ, exp, &repbody->oa, 1, ioo, remote_nb, &npages, local_nb, oti, capa); if (rc != 0) GOTO(out_lock, rc); @@ -822,13 +739,7 @@ static int ost_brw_read(struct ptlrpc_request *req, struct obd_trans_info *oti) desc = ptlrpc_prep_bulk_exp(req, npages, BULK_PUT_SOURCE, OST_BULK_PORTAL); if (desc == NULL) - GOTO(out_lock, rc = -ENOMEM); - - if (!lustre_handle_is_used(&lockh)) - /* no needs to try to prolong lock if server is asked - * to handle locking (= OBD_BRW_SRVLOCK) */ - ost_rw_prolong_locks(req, ioo, remote_nb, &body->oa, - LCK_PW | LCK_PR); + GOTO(out_commitrw, rc = -ENOMEM); nob = 0; for (i = 0; i < npages; i++) { @@ -856,111 +767,41 @@ static int ost_brw_read(struct ptlrpc_request *req, struct obd_trans_info *oti) } if (body->oa.o_valid & OBD_MD_FLCKSUM) { - cksum_type_t cksum_type = OBD_CKSUM_CRC32; - - if (body->oa.o_valid & OBD_MD_FLFLAGS) - cksum_type = cksum_type_unpack(body->oa.o_flags); - body->oa.o_flags = cksum_type_pack(cksum_type); - body->oa.o_valid = OBD_MD_FLCKSUM | OBD_MD_FLFLAGS; - body->oa.o_cksum = ost_checksum_bulk(desc, OST_READ, cksum_type); - CDEBUG(D_PAGE,"checksum at read origin: %x\n",body->oa.o_cksum); + cksum_type_t cksum_type = + cksum_type_unpack(repbody->oa.o_valid & OBD_MD_FLFLAGS ? + repbody->oa.o_flags : 0); + repbody->oa.o_flags = cksum_type_pack(cksum_type); + repbody->oa.o_valid = OBD_MD_FLCKSUM | OBD_MD_FLFLAGS; + repbody->oa.o_cksum = ost_checksum_bulk(desc, OST_READ,cksum_type); + CDEBUG(D_PAGE, "checksum at read origin: %x\n", + repbody->oa.o_cksum); } else { - body->oa.o_valid = 0; + repbody->oa.o_valid = 0; } /* We're finishing using body->oa as an input variable */ /* Check if client was evicted while we were doing i/o before touching network */ if (rc == 0) { - /* Check if there is eviction in progress, and if so, wait for - * it to finish */ - if (unlikely(cfs_atomic_read(&exp->exp_obd-> - obd_evict_inprogress))) { - lwi = LWI_INTR(NULL, NULL); - rc = l_wait_event(exp->exp_obd-> - obd_evict_inprogress_waitq, - !cfs_atomic_read(&exp->exp_obd-> - obd_evict_inprogress), - &lwi); - } - /* Check if client was evicted or tried to reconnect already */ - if (exp->exp_failed || exp->exp_abort_active_req) - rc = -ENOTCONN; - else { - rc = sptlrpc_svc_wrap_bulk(req, desc); - if (rc == 0) - rc = ptlrpc_start_bulk_transfer(desc); - } - - if (rc == 0) { - time_t start = cfs_time_current_sec(); - do { - long timeoutl = req->rq_deadline - - cfs_time_current_sec(); - cfs_duration_t timeout = timeoutl <= 0 ? - CFS_TICK : cfs_time_seconds(timeoutl); - lwi = LWI_TIMEOUT_INTERVAL(timeout, - cfs_time_seconds(1), - ost_bulk_timeout, - desc); - rc = l_wait_event(desc->bd_waitq, - !ptlrpc_server_bulk_active(desc) || - exp->exp_failed || - exp->exp_abort_active_req, - &lwi); - LASSERT(rc == 0 || rc == -ETIMEDOUT); - /* Wait again if we changed deadline */ - } while ((rc == -ETIMEDOUT) && - (req->rq_deadline > cfs_time_current_sec())); - - if (rc == -ETIMEDOUT) { - DEBUG_REQ(D_ERROR, req, - "timeout on bulk PUT after %ld%+lds", - req->rq_deadline - start, - cfs_time_current_sec() - - req->rq_deadline); - ptlrpc_abort_bulk(desc); - } else if (exp->exp_failed) { - DEBUG_REQ(D_ERROR, req, "Eviction on bulk PUT"); - rc = -ENOTCONN; - ptlrpc_abort_bulk(desc); - } else if (exp->exp_abort_active_req) { - DEBUG_REQ(D_ERROR, req, "Reconnect on bulk PUT"); - /* we don't reply anyway */ - rc = -ETIMEDOUT; - ptlrpc_abort_bulk(desc); - } else if (!desc->bd_success || - desc->bd_nob_transferred != desc->bd_nob) { - DEBUG_REQ(D_ERROR, req, "%s bulk PUT %d(%d)", - desc->bd_success ? - "truncated" : "network error on", - desc->bd_nob_transferred, - desc->bd_nob); - /* XXX should this be a different errno? */ - rc = -ETIMEDOUT; - } - } else { - DEBUG_REQ(D_ERROR, req, "bulk PUT failed: rc %d", rc); - } + if (likely(!CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2))) + rc = target_bulk_io(exp, desc, &lwi); no_reply = rc != 0; } +out_commitrw: /* Must commit after prep above in all cases */ - rc = obd_commitrw(OBD_BRW_READ, exp, &body->oa, 1, ioo, + rc = obd_commitrw(OBD_BRW_READ, exp, &repbody->oa, 1, ioo, remote_nb, npages, local_nb, oti, rc); - if (rc == 0) { - repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - memcpy(&repbody->oa, &body->oa, sizeof(repbody->oa)); + if (rc == 0) ost_drop_id(exp, &repbody->oa); - } out_lock: ost_brw_lock_put(LCK_PR, ioo, remote_nb, &lockh); out_tls: ost_tls_put(req); out_bulk: - if (desc) + if (desc && !CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2)) ptlrpc_free_bulk(desc); out: LASSERT(rc <= 0); @@ -984,6 +825,20 @@ out: exp->exp_connection->c_remote_uuid.uuid, libcfs_id2str(req->rq_peer)); } + /* send a bulk after reply to simulate a network delay or reordering + * by a router */ + if (unlikely(CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2))) { + cfs_waitq_t waitq; + struct l_wait_info lwi1; + + CDEBUG(D_INFO, "reorder BULK\n"); + cfs_waitq_init(&waitq); + + lwi1 = LWI_TIMEOUT_INTR(cfs_time_seconds(3), NULL, NULL, NULL); + l_wait_event(waitq, 0, &lwi1); + rc = target_bulk_io(exp, desc, &lwi); + ptlrpc_free_bulk(desc); + } RETURN(rc); } @@ -1004,7 +859,7 @@ static int ost_brw_write(struct ptlrpc_request *req, struct obd_trans_info *oti) int rc, i, j; obd_count client_cksum = 0, server_cksum = 0; cksum_type_t cksum_type = OBD_CKSUM_CRC32; - int no_reply = 0; + int no_reply = 0, mmap = 0; __u32 o_uid = 0, o_gid = 0; struct ost_thread_local_cache *tls; ENTRY; @@ -1019,27 +874,12 @@ static int ost_brw_write(struct ptlrpc_request *req, struct obd_trans_info *oti) /* pause before transaction has been started */ OBD_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_BULK, (obd_timeout + 1) / 4); - /* Check if there is eviction in progress, and if so, wait for it to - * finish */ - if (unlikely(cfs_atomic_read(&exp->exp_obd->obd_evict_inprogress))) { - lwi = LWI_INTR(NULL, NULL); // We do not care how long it takes - rc = l_wait_event(exp->exp_obd->obd_evict_inprogress_waitq, - !cfs_atomic_read(&exp->exp_obd->obd_evict_inprogress), - &lwi); - } - if (exp->exp_failed) - GOTO(out, rc = -ENOTCONN); - /* ost_body, ioobj & noibuf_remote are verified and swabbed in * ost_rw_hpreq_check(). */ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); if (body == NULL) GOTO(out, rc = -EFAULT); - if ((body->oa.o_flags & OBD_BRW_MEMALLOC) && - (exp->exp_connection->c_peer.nid == exp->exp_connection->c_self)) - libcfs_memory_pressure_set(); - objcount = req_capsule_get_size(&req->rq_pill, &RMF_OBD_IOOBJ, RCL_CLIENT) / sizeof(*ioo); ioo = req_capsule_client_get(&req->rq_pill, &RMF_OBD_IOOBJ); @@ -1063,6 +903,10 @@ static int ost_brw_write(struct ptlrpc_request *req, struct obd_trans_info *oti) &RMF_NIOBUF_REMOTE, RCL_CLIENT) / sizeof(*remote_nb))) GOTO(out, rc = -EFAULT); + if ((remote_nb[0].flags & OBD_BRW_MEMALLOC) && + (exp->exp_connection->c_peer.nid == exp->exp_connection->c_self)) + cfs_memory_pressure_set(); + if (body->oa.o_valid & OBD_MD_FLOSSCAPA) { capa = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1); if (capa == NULL) { @@ -1076,7 +920,7 @@ static int ost_brw_write(struct ptlrpc_request *req, struct obd_trans_info *oti) rc = req_capsule_server_pack(&req->rq_pill); if (rc != 0) GOTO(out, rc); - OBD_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_PACK, obd_fail_val); + CFS_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_PACK, cfs_fail_val); rcs = req_capsule_server_get(&req->rq_pill, &RMF_RCS); tls = ost_tls_get(req); @@ -1103,17 +947,14 @@ static int ost_brw_write(struct ptlrpc_request *req, struct obd_trans_info *oti) GOTO(out_lock, rc = -ETIMEDOUT); } - if (!lustre_handle_is_used(&lockh)) - /* no needs to try to prolong lock if server is asked - * to handle locking (= OBD_BRW_SRVLOCK) */ - ost_rw_prolong_locks(req, ioo, remote_nb,&body->oa, LCK_PW); - /* obd_preprw clobbers oa->valid, so save what we need */ if (body->oa.o_valid & OBD_MD_FLCKSUM) { client_cksum = body->oa.o_cksum; if (body->oa.o_valid & OBD_MD_FLFLAGS) cksum_type = cksum_type_unpack(body->oa.o_flags); } + if (body->oa.o_valid & OBD_MD_FLFLAGS && body->oa.o_flags & OBD_FL_MMAP) + mmap = 1; /* Because we already sync grant info with client when reconnect, * grant info will be cleared for resent req, then fed_grant and @@ -1127,8 +968,12 @@ static int ost_brw_write(struct ptlrpc_request *req, struct obd_trans_info *oti) o_uid = body->oa.o_uid; o_gid = body->oa.o_gid; } + + repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); + memcpy(&repbody->oa, &body->oa, sizeof(repbody->oa)); + npages = OST_THREAD_POOL_SIZE; - rc = obd_preprw(OBD_BRW_WRITE, exp, &body->oa, objcount, + rc = obd_preprw(OBD_BRW_WRITE, exp, &repbody->oa, objcount, ioo, remote_nb, &npages, local_nb, oti, capa); if (rc != 0) GOTO(out_lock, rc); @@ -1136,7 +981,7 @@ static int ost_brw_write(struct ptlrpc_request *req, struct obd_trans_info *oti) desc = ptlrpc_prep_bulk_exp(req, npages, BULK_GET_SINK, OST_BULK_PORTAL); if (desc == NULL) - GOTO(out_lock, rc = -ENOMEM); + GOTO(skip_transfer, rc = -ENOMEM); /* NB Having prepped, we must commit... */ @@ -1149,64 +994,11 @@ static int ost_brw_write(struct ptlrpc_request *req, struct obd_trans_info *oti) if (rc != 0) GOTO(out_lock, rc); - /* Check if client was evicted or tried to reconnect while we - * were doing i/o before touching network */ - if (desc->bd_export->exp_failed || - desc->bd_export->exp_abort_active_req) - rc = -ENOTCONN; - else - rc = ptlrpc_start_bulk_transfer(desc); - if (rc == 0) { - time_t start = cfs_time_current_sec(); - do { - long timeoutl = req->rq_deadline - - cfs_time_current_sec(); - cfs_duration_t timeout = timeoutl <= 0 ? - CFS_TICK : cfs_time_seconds(timeoutl); - lwi = LWI_TIMEOUT_INTERVAL(timeout, cfs_time_seconds(1), - ost_bulk_timeout, desc); - rc = l_wait_event(desc->bd_waitq, - !ptlrpc_server_bulk_active(desc) || - desc->bd_export->exp_failed || - desc->bd_export->exp_abort_active_req, - &lwi); - LASSERT(rc == 0 || rc == -ETIMEDOUT); - /* Wait again if we changed deadline */ - } while ((rc == -ETIMEDOUT) && - (req->rq_deadline > cfs_time_current_sec())); - - if (rc == -ETIMEDOUT) { - DEBUG_REQ(D_ERROR, req, - "timeout on bulk GET after %ld%+lds", - req->rq_deadline - start, - cfs_time_current_sec() - - req->rq_deadline); - ptlrpc_abort_bulk(desc); - } else if (desc->bd_export->exp_failed) { - DEBUG_REQ(D_ERROR, req, "Eviction on bulk GET"); - rc = -ENOTCONN; - ptlrpc_abort_bulk(desc); - } else if (desc->bd_export->exp_abort_active_req) { - DEBUG_REQ(D_ERROR, req, "Reconnect on bulk GET"); - /* we don't reply anyway */ - rc = -ETIMEDOUT; - ptlrpc_abort_bulk(desc); - } else if (!desc->bd_success) { - DEBUG_REQ(D_ERROR, req, "network error on bulk GET"); - /* XXX should this be a different errno? */ - rc = -ETIMEDOUT; - } else { - rc = sptlrpc_svc_unwrap_bulk(req, desc); - } - } else { - DEBUG_REQ(D_ERROR, req, "ptlrpc_bulk_get failed: rc %d", rc); - } + rc = target_bulk_io(exp, desc, &lwi); no_reply = rc != 0; - repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - memcpy(&repbody->oa, &body->oa, sizeof(repbody->oa)); - - if (unlikely(client_cksum != 0 && rc == 0)) { +skip_transfer: + if (client_cksum != 0 && rc == 0) { static int cksum_counter; repbody->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS; repbody->oa.o_flags &= ~OBD_FL_CKSUM_ALL; @@ -1215,8 +1007,9 @@ static int ost_brw_write(struct ptlrpc_request *req, struct obd_trans_info *oti) repbody->oa.o_cksum = server_cksum; cksum_counter++; if (unlikely(client_cksum != server_cksum)) { - CERROR("client csum %x, server csum %x\n", - client_cksum, server_cksum); + CDEBUG_LIMIT(mmap ? D_INFO : D_ERROR, + "client csum %x, server csum %x\n", + client_cksum, server_cksum); cksum_counter = 0; } else if ((cksum_counter & (-cksum_counter)) == cksum_counter){ CDEBUG(D_INFO, "Checksum %u from %s OK: %x\n", @@ -1246,7 +1039,7 @@ static int ost_brw_write(struct ptlrpc_request *req, struct obd_trans_info *oti) */ repbody->oa.o_valid &= ~(OBD_MD_FLMTIME | OBD_MD_FLATIME); - if (unlikely(client_cksum != server_cksum && rc == 0)) { + if (unlikely(client_cksum != server_cksum && rc == 0 && !mmap)) { int new_cksum = ost_checksum_bulk(desc, OST_WRITE, cksum_type); char *msg; char *via; @@ -1319,14 +1112,6 @@ out_bulk: if (desc) ptlrpc_free_bulk(desc); out: - /* XXX: don't send reply if obd rdonly mode, this can cause data loss - * on client, see bug 22190. Remove this when async bulk will be done. - * Meanwhile, if this is umount then don't reply anything. */ - if (req->rq_export->exp_obd->obd_no_transno) { - no_reply = req->rq_export->exp_obd->obd_stopping; - rc = -EIO; - } - if (rc == 0) { oti_to_request(oti, req); target_committed_to_req(req); @@ -1346,7 +1131,7 @@ out: exp->exp_connection->c_remote_uuid.uuid, libcfs_id2str(req->rq_peer)); } - libcfs_memory_pressure_clr(); + cfs_memory_pressure_clr(); RETURN(rc); } @@ -1532,7 +1317,7 @@ static int ost_handle_quota_adjust_qunit(struct ptlrpc_request *req) GOTO(out, rc); repoqa = req_capsule_server_get(&req->rq_pill, &RMF_QUOTA_ADJUST_QUNIT); - req->rq_status = obd_quota_adjust_qunit(req->rq_export, oqaq, qctxt); + req->rq_status = obd_quota_adjust_qunit(req->rq_export, oqaq, qctxt, NULL); *repoqa = *oqaq; out: @@ -1727,6 +1512,56 @@ static int ost_connect_check_sptlrpc(struct ptlrpc_request *req) return rc; } +/* Ensure that data and metadata are synced to the disk when lock is cancelled + * (if requested) */ +int ost_blocking_ast(struct ldlm_lock *lock, + struct ldlm_lock_desc *desc, + void *data, int flag) +{ + __u32 sync_lock_cancel = 0; + __u32 len = sizeof(sync_lock_cancel); + int rc = 0; + ENTRY; + + rc = obd_get_info(lock->l_export, sizeof(KEY_SYNC_LOCK_CANCEL), + KEY_SYNC_LOCK_CANCEL, &len, &sync_lock_cancel, NULL); + + if (!rc && flag == LDLM_CB_CANCELING && + (lock->l_granted_mode & (LCK_PW|LCK_GROUP)) && + (sync_lock_cancel == ALWAYS_SYNC_ON_CANCEL || + (sync_lock_cancel == BLOCKING_SYNC_ON_CANCEL && + lock->l_flags & LDLM_FL_CBPENDING))) { + struct obd_info *oinfo; + struct obdo *oa; + int rc; + + OBD_ALLOC_PTR(oinfo); + if (!oinfo) + RETURN(-ENOMEM); + OBDO_ALLOC(oa); + if (!oa) { + OBD_FREE_PTR(oinfo); + RETURN(-ENOMEM); + } + oa->o_id = lock->l_resource->lr_name.name[0]; + oa->o_seq = lock->l_resource->lr_name.name[1]; + oa->o_valid = OBD_MD_FLID|OBD_MD_FLGROUP; + oinfo->oi_oa = oa; + + rc = obd_sync(lock->l_export, oinfo, + lock->l_policy_data.l_extent.start, + lock->l_policy_data.l_extent.end, NULL); + if (rc) + CERROR("Error %d syncing data on lock cancel\n", rc); + + OBDO_FREE(oa); + OBD_FREE_PTR(oinfo); + } + + rc = ldlm_server_blocking_ast(lock, desc, data, flag); + RETURN(rc); +} + static int ost_filter_recovery_request(struct ptlrpc_request *req, struct obd_device *obd, int *process) { @@ -1824,6 +1659,101 @@ int ost_msg_check_version(struct lustre_msg *msg) return rc; } +struct ost_prolong_data { + struct ptlrpc_request *opd_req; + struct obd_export *opd_exp; + struct obdo *opd_oa; + struct ldlm_res_id opd_resid; + struct ldlm_extent opd_extent; + ldlm_mode_t opd_mode; + unsigned int opd_locks; + int opd_timeout; +}; + +/* prolong locks for the current service time of the corresponding + * portal (= OST_IO_PORTAL) + */ +static inline int prolong_timeout(struct ptlrpc_request *req) +{ + struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service; + + if (AT_OFF) + return obd_timeout / 2; + + return max(at_est2timeout(at_get(&svc->srv_at_estimate)), ldlm_timeout); +} + +static void ost_prolong_lock_one(struct ost_prolong_data *opd, + struct ldlm_lock *lock) +{ + LASSERT(lock->l_req_mode == lock->l_granted_mode); + LASSERT(lock->l_export == opd->opd_exp); + + /* XXX: never try to grab resource lock here because we're inside + * exp_bl_list_lock; in ldlm_lockd.c to handle waiting list we take + * res lock and then exp_bl_list_lock. */ + + if (!(lock->l_flags & LDLM_FL_AST_SENT)) + /* ignore locks not being cancelled */ + return; + + LDLM_DEBUG(lock, + "refreshed for req x"LPU64" ext("LPU64"->"LPU64") to %ds.\n", + opd->opd_req->rq_xid, opd->opd_extent.start, + opd->opd_extent.end, opd->opd_timeout); + + /* OK. this is a possible lock the user holds doing I/O + * let's refresh eviction timer for it */ + ldlm_refresh_waiting_lock(lock, opd->opd_timeout); + ++opd->opd_locks; +} + +static void ost_prolong_locks(struct ost_prolong_data *data) +{ + struct obd_export *exp = data->opd_exp; + struct obdo *oa = data->opd_oa; + struct ldlm_lock *lock; + ENTRY; + + if (oa->o_valid & OBD_MD_FLHANDLE) { + /* mostly a request should be covered by only one lock, try + * fast path. */ + lock = ldlm_handle2lock(&oa->o_handle); + if (lock != NULL) { + /* Fast path to check if the lock covers the whole IO + * region exclusively. */ + if (lock->l_granted_mode == LCK_PW && + ldlm_extent_contain(&lock->l_policy_data.l_extent, + &data->opd_extent)) { + /* bingo */ + ost_prolong_lock_one(data, lock); + LDLM_LOCK_PUT(lock); + RETURN_EXIT; + } + LDLM_LOCK_PUT(lock); + } + } + + + cfs_spin_lock_bh(&exp->exp_bl_list_lock); + cfs_list_for_each_entry(lock, &exp->exp_bl_list, l_exp_list) { + LASSERT(lock->l_flags & LDLM_FL_AST_SENT); + LASSERT(lock->l_resource->lr_type == LDLM_EXTENT); + + if (!ldlm_res_eq(&data->opd_resid, &lock->l_resource->lr_name)) + continue; + + if (!ldlm_extent_overlap(&lock->l_policy_data.l_extent, + &data->opd_extent)) + continue; + + ost_prolong_lock_one(data, lock); + } + cfs_spin_unlock_bh(&exp->exp_bl_list_lock); + + EXIT; +} + /** * Returns 1 if the given PTLRPC matches the given LDLM locks, or 0 if it does * not. @@ -1833,61 +1763,35 @@ static int ost_rw_hpreq_lock_match(struct ptlrpc_request *req, { struct niobuf_remote *nb; struct obd_ioobj *ioo; - struct ost_body *body; - int objcount, niocount; - int mode, opc, i, rc; - __u64 start, end; + int mode, opc; + struct ldlm_extent ext; ENTRY; opc = lustre_msg_get_opc(req->rq_reqmsg); LASSERT(opc == OST_READ || opc == OST_WRITE); - /* As the request may be covered by several locks, do not look at - * o_handle, look at the RPC IO region. */ - body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) - RETURN(0); - - objcount = req_capsule_get_size(&req->rq_pill, &RMF_OBD_IOOBJ, - RCL_CLIENT) / sizeof(*ioo); ioo = req_capsule_client_get(&req->rq_pill, &RMF_OBD_IOOBJ); - if (ioo == NULL) - RETURN(0); - - rc = ost_validate_obdo(req->rq_export, &body->oa, ioo); - if (rc) - RETURN(rc); - - for (niocount = i = 0; i < objcount; i++) - niocount += ioo[i].ioo_bufcnt; + LASSERT(ioo != NULL); nb = req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE); - if (nb == NULL || - niocount != (req_capsule_get_size(&req->rq_pill, &RMF_NIOBUF_REMOTE, - RCL_CLIENT) / sizeof(*nb))) - RETURN(0); + LASSERT(nb != NULL); - mode = LCK_PW; - if (opc == OST_READ) - mode |= LCK_PR; - - start = nb[0].offset & CFS_PAGE_MASK; - end = (nb[ioo->ioo_bufcnt - 1].offset + - nb[ioo->ioo_bufcnt - 1].len - 1) | ~CFS_PAGE_MASK; + ext.start = nb->offset; + nb += ioo->ioo_bufcnt - 1; + ext.end = nb->offset + nb->len - 1; LASSERT(lock->l_resource != NULL); if (!osc_res_name_eq(ioo->ioo_id, ioo->ioo_seq, &lock->l_resource->lr_name)) RETURN(0); + mode = LCK_PW; + if (opc == OST_READ) + mode |= LCK_PR; if (!(lock->l_granted_mode & mode)) RETURN(0); - if (lock->l_policy_data.l_extent.end < start || - lock->l_policy_data.l_extent.start > end) - RETURN(0); - - RETURN(1); + RETURN(ldlm_extent_overlap(&lock->l_policy_data.l_extent, &ext)); } /** @@ -1902,78 +1806,62 @@ static int ost_rw_hpreq_lock_match(struct ptlrpc_request *req, */ static int ost_rw_hpreq_check(struct ptlrpc_request *req) { - struct niobuf_remote *nb; - struct obd_ioobj *ioo; + struct obd_device *obd = req->rq_export->exp_obd; struct ost_body *body; - int objcount, niocount; - int mode, opc, i, rc; + struct obd_ioobj *ioo; + struct niobuf_remote *nb; + struct ost_prolong_data opd = { 0 }; + int mode, opc; ENTRY; + /* + * Use LASSERT to do sanity check because malformed RPCs should have + * been filtered out in ost_hpreq_handler(). + */ opc = lustre_msg_get_opc(req->rq_reqmsg); LASSERT(opc == OST_READ || opc == OST_WRITE); body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) - RETURN(-EFAULT); + LASSERT(body != NULL); - objcount = req_capsule_get_size(&req->rq_pill, &RMF_OBD_IOOBJ, - RCL_CLIENT) / sizeof(*ioo); ioo = req_capsule_client_get(&req->rq_pill, &RMF_OBD_IOOBJ); - if (ioo == NULL) - RETURN(-EFAULT); + LASSERT(ioo != NULL); - rc = ost_validate_obdo(req->rq_export, &body->oa, ioo); - if (rc) - RETURN(rc); - - for (niocount = i = 0; i < objcount; i++) - niocount += ioo[i].ioo_bufcnt; nb = req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE); - if (nb == NULL || - niocount != (req_capsule_get_size(&req->rq_pill, &RMF_NIOBUF_REMOTE, - RCL_CLIENT) / sizeof(*nb))) - RETURN(-EFAULT); - if (niocount != 0 && (nb[0].flags & OBD_BRW_SRVLOCK)) - RETURN(-EFAULT); + LASSERT(nb != NULL); + LASSERT(!(nb->flags & OBD_BRW_SRVLOCK)); + + osc_build_res_name(ioo->ioo_id, ioo->ioo_seq, &opd.opd_resid); + opd.opd_req = req; mode = LCK_PW; if (opc == OST_READ) mode |= LCK_PR; - RETURN(ost_rw_prolong_locks(req, ioo, nb, &body->oa, mode)); -} + opd.opd_mode = mode; + opd.opd_exp = req->rq_export; + opd.opd_oa = &body->oa; + opd.opd_extent.start = nb->offset; + nb += ioo->ioo_bufcnt - 1; + opd.opd_extent.end = nb->offset + nb->len - 1; + opd.opd_timeout = prolong_timeout(req); -static int ost_punch_prolong_locks(struct ptlrpc_request *req, struct obdo *oa) -{ - struct ldlm_res_id res_id = { .name = { oa->o_id } }; - struct ost_prolong_data opd = { 0 }; - __u64 start, end; - ENTRY; + DEBUG_REQ(D_RPCTRACE, req, + "%s %s: refresh rw locks: " LPU64"/"LPU64" ("LPU64"->"LPU64")\n", + obd->obd_name, cfs_current()->comm, + opd.opd_resid.name[0], opd.opd_resid.name[1], + opd.opd_extent.start, opd.opd_extent.end); - start = oa->o_size; - end = start + oa->o_blocks; + ost_prolong_locks(&opd); - opd.opd_mode = LCK_PW; - opd.opd_exp = req->rq_export; - opd.opd_policy.l_extent.start = start & CFS_PAGE_MASK; - if (oa->o_blocks == OBD_OBJECT_EOF || end < start) - opd.opd_policy.l_extent.end = OBD_OBJECT_EOF; - else - opd.opd_policy.l_extent.end = end | ~CFS_PAGE_MASK; - - /* prolong locks for the current service time of the corresponding - * portal (= OST_IO_PORTAL) */ - opd.opd_timeout = AT_OFF ? obd_timeout / 2: - max(at_est2timeout(at_get(&req->rq_rqbd-> - rqbd_service->srv_at_estimate)), ldlm_timeout); - - CDEBUG(D_DLMTRACE,"refresh locks: "LPU64"/"LPU64" ("LPU64"->"LPU64")\n", - res_id.name[0], res_id.name[1], opd.opd_policy.l_extent.start, - opd.opd_policy.l_extent.end); - - opd.opd_oa = oa; - ldlm_resource_iterate(req->rq_export->exp_obd->obd_namespace, &res_id, - ost_prolong_locks_iter, &opd); - RETURN(opd.opd_lock_match); + CDEBUG(D_DLMTRACE, "%s: refreshed %u locks timeout for req %p.\n", + obd->obd_name, opd.opd_locks, req); + + RETURN(opd.opd_locks); +} + +static void ost_rw_hpreq_fini(struct ptlrpc_request *req) +{ + (void)ost_rw_hpreq_check(req); } /** @@ -1983,20 +1871,15 @@ static int ost_punch_hpreq_lock_match(struct ptlrpc_request *req, struct ldlm_lock *lock) { struct ost_body *body; - int rc; ENTRY; body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) - RETURN(0); /* can't return -EFAULT here */ - - rc = ost_validate_obdo(req->rq_export, &body->oa, NULL); - if (rc) - RETURN(rc); + LASSERT(body != NULL); if (body->oa.o_valid & OBD_MD_FLHANDLE && body->oa.o_handle.cookie == lock->l_handle.h_cookie) RETURN(1); + RETURN(0); } @@ -2005,31 +1888,64 @@ static int ost_punch_hpreq_lock_match(struct ptlrpc_request *req, */ static int ost_punch_hpreq_check(struct ptlrpc_request *req) { + struct obd_device *obd = req->rq_export->exp_obd; struct ost_body *body; - int rc; + struct obdo *oa; + struct ost_prolong_data opd = { 0 }; + __u64 start, end; + ENTRY; body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) - RETURN(-EFAULT); + LASSERT(body != NULL); - rc = ost_validate_obdo(req->rq_export, &body->oa, NULL); - if (rc) - RETURN(rc); + oa = &body->oa; + LASSERT(!(oa->o_valid & OBD_MD_FLFLAGS) || + !(oa->o_flags & OBD_FL_SRVLOCK)); + + start = oa->o_size; + end = start + oa->o_blocks; + + opd.opd_req = req; + opd.opd_mode = LCK_PW; + opd.opd_exp = req->rq_export; + opd.opd_oa = oa; + opd.opd_extent.start = start; + opd.opd_extent.end = end; + if (oa->o_blocks == OBD_OBJECT_EOF) + opd.opd_extent.end = OBD_OBJECT_EOF; + opd.opd_timeout = prolong_timeout(req); + + osc_build_res_name(oa->o_id, oa->o_seq, &opd.opd_resid); + + CDEBUG(D_DLMTRACE, + "%s: refresh locks: "LPU64"/"LPU64" ("LPU64"->"LPU64")\n", + obd->obd_name, + opd.opd_resid.name[0], opd.opd_resid.name[1], + opd.opd_extent.start, opd.opd_extent.end); - LASSERT(!(body->oa.o_valid & OBD_MD_FLFLAGS) || - !(body->oa.o_flags & OBD_FL_SRVLOCK)); + ost_prolong_locks(&opd); - RETURN(ost_punch_prolong_locks(req, &body->oa)); + CDEBUG(D_DLMTRACE, "%s: refreshed %u locks timeout for req %p.\n", + obd->obd_name, opd.opd_locks, req); + + RETURN(opd.opd_locks > 0); +} + +static void ost_punch_hpreq_fini(struct ptlrpc_request *req) +{ + (void)ost_punch_hpreq_check(req); } struct ptlrpc_hpreq_ops ost_hpreq_rw = { - .hpreq_lock_match = ost_rw_hpreq_lock_match, - .hpreq_check = ost_rw_hpreq_check, + .hpreq_lock_match = ost_rw_hpreq_lock_match, + .hpreq_check = ost_rw_hpreq_check, + .hpreq_fini = ost_rw_hpreq_fini }; struct ptlrpc_hpreq_ops ost_hpreq_punch = { - .hpreq_lock_match = ost_punch_hpreq_lock_match, - .hpreq_check = ost_punch_hpreq_check, + .hpreq_lock_match = ost_punch_hpreq_lock_match, + .hpreq_check = ost_punch_hpreq_check, + .hpreq_fini = ost_punch_hpreq_fini }; /** Assign high priority operations to the request if needed. */ @@ -2044,6 +1960,7 @@ static int ost_hpreq_handler(struct ptlrpc_request *req) struct niobuf_remote *nb; struct obd_ioobj *ioo; int objcount, niocount; + int rc; int i; /* RPCs on the H-P queue can be inspected before @@ -2087,6 +2004,12 @@ static int ost_hpreq_handler(struct ptlrpc_request *req) RETURN(-EFAULT); } + rc = ost_validate_obdo(req->rq_export, &body->oa, ioo); + if (rc) { + CERROR("invalid object ids\n"); + RETURN(rc); + } + for (niocount = i = 0; i < objcount; i++) { if (ioo[i].ioo_bufcnt == 0) { CERROR("ioo[%d] has zero bufcnt\n", i); @@ -2362,7 +2285,7 @@ int ost_handle(struct ptlrpc_request *req) if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE)) RETURN(0); rc = ldlm_handle_enqueue(req, ldlm_server_completion_ast, - ldlm_server_blocking_ast, + ost_blocking_ast, ldlm_server_glimpse_ast); fail = OBD_FAIL_OST_LDLM_REPLY_NET; break; @@ -2473,7 +2396,7 @@ static int ost_setup(struct obd_device *obd, struct lustre_cfg* lcfg) lprocfs_ost_init_vars(&lvars); lprocfs_obd_setup(obd, lvars.obd_vars); - cfs_sema_init(&ost->ost_health_sem, 1); + cfs_mutex_init(&ost->ost_health_mutex); if (oss_num_threads) { /* If oss_num_threads is set, it is the min and the max. */ @@ -2508,7 +2431,7 @@ static int ost_setup(struct obd_device *obd, struct lustre_cfg* lcfg) GOTO(out_lprocfs, rc = -ENOMEM); } - rc = ptlrpc_start_threads(obd, ost->ost_service); + rc = ptlrpc_start_threads(ost->ost_service); if (rc) GOTO(out_service, rc = -EINVAL); @@ -2537,7 +2460,7 @@ static int ost_setup(struct obd_device *obd, struct lustre_cfg* lcfg) GOTO(out_service, rc = -ENOMEM); } - rc = ptlrpc_start_threads(obd, ost->ost_create_service); + rc = ptlrpc_start_threads(ost->ost_create_service); if (rc) GOTO(out_create, rc = -EINVAL); @@ -2557,7 +2480,7 @@ static int ost_setup(struct obd_device *obd, struct lustre_cfg* lcfg) ost->ost_io_service->srv_init = ost_thread_init; ost->ost_io_service->srv_done = ost_thread_done; ost->ost_io_service->srv_cpu_affinity = 1; - rc = ptlrpc_start_threads(obd, ost->ost_io_service); + rc = ptlrpc_start_threads(ost->ost_io_service); if (rc) GOTO(out_io, rc = -EINVAL); @@ -2587,20 +2510,16 @@ static int ost_cleanup(struct obd_device *obd) ping_evictor_stop(); - cfs_spin_lock_bh(&obd->obd_processing_task_lock); - if (obd->obd_recovering) { - target_cancel_recovery_timer(obd); - obd->obd_recovering = 0; - } - cfs_spin_unlock_bh(&obd->obd_processing_task_lock); - - cfs_down(&ost->ost_health_sem); + /* there is no recovery for OST OBD, all recovery is controlled by + * obdfilter OBD */ + LASSERT(obd->obd_recovering == 0); + cfs_mutex_lock(&ost->ost_health_mutex); ptlrpc_unregister_service(ost->ost_service); ptlrpc_unregister_service(ost->ost_create_service); ptlrpc_unregister_service(ost->ost_io_service); ost->ost_service = NULL; ost->ost_create_service = NULL; - cfs_up(&ost->ost_health_sem); + cfs_mutex_unlock(&ost->ost_health_mutex); lprocfs_obd_cleanup(obd); @@ -2612,11 +2531,11 @@ static int ost_health_check(struct obd_device *obd) struct ost_obd *ost = &obd->u.ost; int rc = 0; - cfs_down(&ost->ost_health_sem); + cfs_mutex_lock(&ost->ost_health_mutex); rc |= ptlrpc_service_health_check(ost->ost_service); rc |= ptlrpc_service_health_check(ost->ost_create_service); rc |= ptlrpc_service_health_check(ost->ost_io_service); - cfs_up(&ost->ost_health_sem); + cfs_mutex_unlock(&ost->ost_health_mutex); /* * health_check to return 0 on healthy