X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fosc%2Fosc_request.c;h=76a046fa56910262a0e88f702d246c9624b9337e;hp=6b270a699d1195f1d141640981b5b6b0e53c7745;hb=e62f0a3c5b9db5e048de54454ccfded1c44963d8;hpb=f10371aecfb4aeb7fd972eac5eeb6b7318ad3fc6 diff --git a/lustre/osc/osc_request.c b/lustre/osc/osc_request.c index 6b270a6..76a046f 100644 --- a/lustre/osc/osc_request.c +++ b/lustre/osc/osc_request.c @@ -26,7 +26,7 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. */ /* @@ -96,9 +96,9 @@ static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, if (lsm) { LASSERT(lsm->lsm_object_id); - LASSERT_MDS_GROUP(lsm->lsm_object_gr); + LASSERT_SEQ_IS_MDT(lsm->lsm_object_seq); (*lmmp)->lmm_object_id = cpu_to_le64(lsm->lsm_object_id); - (*lmmp)->lmm_object_gr = cpu_to_le64(lsm->lsm_object_gr); + (*lmmp)->lmm_object_seq = cpu_to_le64(lsm->lsm_object_seq); } RETURN(lmm_size); @@ -151,9 +151,9 @@ static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, if (lmm != NULL) { /* XXX zero *lsmp? */ (*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id); - (*lsmp)->lsm_object_gr = le64_to_cpu (lmm->lmm_object_gr); + (*lsmp)->lsm_object_seq = le64_to_cpu (lmm->lmm_object_seq); LASSERT((*lsmp)->lsm_object_id); - LASSERT_MDS_GROUP((*lsmp)->lsm_object_gr); + LASSERT_SEQ_IS_MDT((*lsmp)->lsm_object_seq); } (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES; @@ -185,7 +185,7 @@ static inline void osc_pack_req_body(struct ptlrpc_request *req, body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); - body->oa = *oinfo->oi_oa; + lustre_set_wire_obdo(&body->oa, oinfo->oi_oa); osc_pack_capa(req, body, oinfo->oi_capa); } @@ -210,11 +210,10 @@ static int osc_getattr_interpret(const struct lu_env *env, if (rc != 0) GOTO(out, rc); - body = lustre_swab_repbuf(req, REPLY_REC_OFF, sizeof(*body), - lustre_swab_ost_body); + body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); if (body) { CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode); - memcpy(aa->aa_oi->oi_oa, &body->oa, sizeof(*aa->aa_oi->oi_oa)); + lustre_get_wire_obdo(aa->aa_oi->oi_oa, &body->oa); /* This should really be sent by the OST */ aa->aa_oi->oi_oa->o_blksize = PTLRPC_MAX_BRW_SIZE; @@ -292,7 +291,7 @@ static int osc_getattr(struct obd_export *exp, struct obd_info *oinfo) GOTO(out, rc = -EPROTO); CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode); - *oinfo->oi_oa = body->oa; + lustre_get_wire_obdo(oinfo->oi_oa, &body->oa); /* This should really be sent by the OST */ oinfo->oi_oa->o_blksize = PTLRPC_MAX_BRW_SIZE; @@ -312,10 +311,7 @@ static int osc_setattr(struct obd_export *exp, struct obd_info *oinfo, int rc; ENTRY; - LASSERTF(!(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP) || - CHECK_MDS_GROUP(oinfo->oi_oa->o_gr), - "oinfo->oi_oa->o_valid="LPU64" oinfo->oi_oa->o_gr="LPU64"\n", - oinfo->oi_oa->o_valid, oinfo->oi_oa->o_gr); + LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR); if (req == NULL) @@ -340,7 +336,7 @@ static int osc_setattr(struct obd_export *exp, struct obd_info *oinfo, if (body == NULL) GOTO(out, rc = -EPROTO); - *oinfo->oi_oa = body->oa; + lustre_get_wire_obdo(oinfo->oi_oa, &body->oa); EXIT; out: @@ -350,7 +346,7 @@ out: static int osc_setattr_interpret(const struct lu_env *env, struct ptlrpc_request *req, - struct osc_async_args *aa, int rc) + struct osc_setattr_args *sa, int rc) { struct ost_body *body; ENTRY; @@ -362,19 +358,20 @@ static int osc_setattr_interpret(const struct lu_env *env, if (body == NULL) GOTO(out, rc = -EPROTO); - *aa->aa_oi->oi_oa = body->oa; + lustre_get_wire_obdo(sa->sa_oa, &body->oa); out: - rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc); + rc = sa->sa_upcall(sa->sa_cookie, rc); RETURN(rc); } -static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo, - struct obd_trans_info *oti, - struct ptlrpc_request_set *rqset) +int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo, + struct obd_trans_info *oti, + obd_enqueue_update_f upcall, void *cookie, + struct ptlrpc_request_set *rqset) { - struct ptlrpc_request *req; - struct osc_async_args *aa; - int rc; + struct ptlrpc_request *req; + struct osc_setattr_args *sa; + int rc; ENTRY; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR); @@ -388,15 +385,13 @@ static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo, RETURN(rc); } + if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) + oinfo->oi_oa->o_lcookie = *oti->oti_logcookies; + osc_pack_req_body(req, oinfo); ptlrpc_request_set_replen(req); - if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) { - LASSERT(oti); - oinfo->oi_oa->o_lcookie = *oti->oti_logcookies; - } - /* do mds to ost setattr asynchronously */ if (!rqset) { /* Do not wait for response. */ @@ -405,16 +400,29 @@ static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo, req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret; - CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args)); - aa = ptlrpc_req_async_args(req); - aa->aa_oi = oinfo; + CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args)); + sa = ptlrpc_req_async_args(req); + sa->sa_oa = oinfo->oi_oa; + sa->sa_upcall = upcall; + sa->sa_cookie = cookie; - ptlrpc_set_add_req(rqset, req); + if (rqset == PTLRPCD_SET) + ptlrpcd_add_req(req, PSCOPE_OTHER); + else + ptlrpc_set_add_req(rqset, req); } RETURN(0); } +static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo, + struct obd_trans_info *oti, + struct ptlrpc_request_set *rqset) +{ + return osc_setattr_async_base(exp, oinfo, oti, + oinfo->oi_cb_up, oinfo, rqset); +} + int osc_real_create(struct obd_export *exp, struct obdo *oa, struct lov_stripe_md **ea, struct obd_trans_info *oti) { @@ -446,7 +454,7 @@ int osc_real_create(struct obd_export *exp, struct obdo *oa, body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); - body->oa = *oa; + lustre_set_wire_obdo(&body->oa, oa); ptlrpc_request_set_replen(req); @@ -466,7 +474,7 @@ int osc_real_create(struct obd_export *exp, struct obdo *oa, if (body == NULL) GOTO(out_req, rc = -EPROTO); - *oa = body->oa; + lustre_get_wire_obdo(oa, &body->oa); /* This should really be sent by the OST */ oa->o_blksize = PTLRPC_MAX_BRW_SIZE; @@ -477,7 +485,7 @@ int osc_real_create(struct obd_export *exp, struct obdo *oa, * This needs to be fixed in a big way. */ lsm->lsm_object_id = oa->o_id; - lsm->lsm_object_gr = oa->o_gr; + lsm->lsm_object_seq = oa->o_seq; *ea = lsm; if (oti != NULL) { @@ -500,42 +508,21 @@ out: RETURN(rc); } -static int osc_punch_interpret(const struct lu_env *env, - struct ptlrpc_request *req, - struct osc_punch_args *aa, int rc) -{ - struct ost_body *body; - ENTRY; - - if (rc != 0) - GOTO(out, rc); - - body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) - GOTO(out, rc = -EPROTO); - - *aa->pa_oa = body->oa; -out: - rc = aa->pa_upcall(aa->pa_cookie, rc); - RETURN(rc); -} - -int osc_punch_base(struct obd_export *exp, struct obdo *oa, - struct obd_capa *capa, +int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo, obd_enqueue_update_f upcall, void *cookie, struct ptlrpc_request_set *rqset) { - struct ptlrpc_request *req; - struct osc_punch_args *aa; - struct ost_body *body; - int rc; + struct ptlrpc_request *req; + struct osc_setattr_args *sa; + struct ost_body *body; + int rc; ENTRY; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH); if (req == NULL) RETURN(-ENOMEM); - osc_set_capa_size(req, &RMF_CAPA1, capa); + osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa); rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH); if (rc) { ptlrpc_request_free(req); @@ -546,18 +533,18 @@ int osc_punch_base(struct obd_export *exp, struct obdo *oa, body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); - body->oa = *oa; - osc_pack_capa(req, body, capa); + lustre_set_wire_obdo(&body->oa, oinfo->oi_oa); + osc_pack_capa(req, body, oinfo->oi_capa); ptlrpc_request_set_replen(req); - req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_punch_interpret; - CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args)); - aa = ptlrpc_req_async_args(req); - aa->pa_oa = oa; - aa->pa_upcall = upcall; - aa->pa_cookie = cookie; + req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret; + CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args)); + sa = ptlrpc_req_async_args(req); + sa->sa_oa = oinfo->oi_oa; + sa->sa_upcall = upcall; + sa->sa_cookie = cookie; if (rqset == PTLRPCD_SET) ptlrpcd_add_req(req, PSCOPE_OTHER); else @@ -573,7 +560,7 @@ static int osc_punch(struct obd_export *exp, struct obd_info *oinfo, oinfo->oi_oa->o_size = oinfo->oi_policy.l_extent.start; oinfo->oi_oa->o_blocks = oinfo->oi_policy.l_extent.end; oinfo->oi_oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS; - return osc_punch_base(exp, oinfo->oi_oa, oinfo->oi_capa, + return osc_punch_base(exp, oinfo, oinfo->oi_cb_up, oinfo, rqset); } @@ -605,7 +592,7 @@ static int osc_sync(struct obd_export *exp, struct obdo *oa, /* overload the size and blocks fields in the oa with start/end */ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); - body->oa = *oa; + lustre_set_wire_obdo(&body->oa, oa); body->oa.o_size = start; body->oa.o_blocks = end; body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS); @@ -621,7 +608,7 @@ static int osc_sync(struct obd_export *exp, struct obdo *oa, if (body == NULL) GOTO(out, rc = -EPROTO); - *oa = body->oa; + lustre_get_wire_obdo(oa, &body->oa); EXIT; out: @@ -633,8 +620,8 @@ static int osc_sync(struct obd_export *exp, struct obdo *oa, * @objid. Found locks are added into @cancel list. Returns the amount of * locks added to @cancels list. */ static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa, - struct list_head *cancels, ldlm_mode_t mode, - int lock_flags) + cfs_list_t *cancels, + ldlm_mode_t mode, int lock_flags) { struct ldlm_namespace *ns = exp->exp_obd->obd_namespace; struct ldlm_res_id res_id; @@ -642,7 +629,7 @@ static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa, int count; ENTRY; - osc_build_res_name(oa->o_id, oa->o_gr, &res_id); + osc_build_res_name(oa->o_id, oa->o_seq, &res_id); res = ldlm_resource_get(ns, NULL, &res_id, 0, 0); if (res == NULL) RETURN(0); @@ -661,19 +648,19 @@ static int osc_destroy_interpret(const struct lu_env *env, { struct client_obd *cli = &req->rq_import->imp_obd->u.cli; - atomic_dec(&cli->cl_destroy_in_flight); + cfs_atomic_dec(&cli->cl_destroy_in_flight); cfs_waitq_signal(&cli->cl_destroy_waitq); return 0; } static int osc_can_send_destroy(struct client_obd *cli) { - if (atomic_inc_return(&cli->cl_destroy_in_flight) <= + if (cfs_atomic_inc_return(&cli->cl_destroy_in_flight) <= cli->cl_max_rpcs_in_flight) { /* The destroy request can be sent */ return 1; } - if (atomic_dec_return(&cli->cl_destroy_in_flight) < + if (cfs_atomic_dec_return(&cli->cl_destroy_in_flight) < cli->cl_max_rpcs_in_flight) { /* * The counter has been modified between the two atomic @@ -728,27 +715,31 @@ static int osc_destroy(struct obd_export *exp, struct obdo *oa, } req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */ - req->rq_interpret_reply = osc_destroy_interpret; ptlrpc_at_set_req_timeout(req); if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE) oa->o_lcookie = *oti->oti_logcookies; body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); - body->oa = *oa; + lustre_set_wire_obdo(&body->oa, oa); osc_pack_capa(req, body, (struct obd_capa *)capa); ptlrpc_request_set_replen(req); - if (!osc_can_send_destroy(cli)) { - struct l_wait_info lwi = { 0 }; - - /* - * Wait until the number of on-going destroy RPCs drops - * under max_rpc_in_flight - */ - l_wait_event_exclusive(cli->cl_destroy_waitq, - osc_can_send_destroy(cli), &lwi); + /* don't throttle destroy RPCs for the MDT */ + if (!(cli->cl_import->imp_connect_flags_orig & OBD_CONNECT_MDS)) { + req->rq_interpret_reply = osc_destroy_interpret; + if (!osc_can_send_destroy(cli)) { + struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, + NULL); + + /* + * Wait until the number of on-going destroy RPCs drops + * under max_rpc_in_flight + */ + l_wait_event_exclusive(cli->cl_destroy_waitq, + osc_can_send_destroy(cli), &lwi); + } } /* Do not wait for response */ @@ -770,11 +761,15 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa, CERROR("dirty %lu - %lu > dirty_max %lu\n", cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max); oa->o_undirty = 0; - } else if (atomic_read(&obd_dirty_pages) - - atomic_read(&obd_dirty_transit_pages) > obd_max_dirty_pages){ + } else if (cfs_atomic_read(&obd_dirty_pages) - + cfs_atomic_read(&obd_dirty_transit_pages) > + obd_max_dirty_pages + 1){ + /* The cfs_atomic_read() allowing the cfs_atomic_inc() are + * not covered by a lock thus they may safely race and trip + * this CERROR() unless we add in a small fudge factor (+1). */ CERROR("dirty %d - %d > system dirty_max %d\n", - atomic_read(&obd_dirty_pages), - atomic_read(&obd_dirty_transit_pages), + cfs_atomic_read(&obd_dirty_pages), + cfs_atomic_read(&obd_dirty_transit_pages), obd_max_dirty_pages); oa->o_undirty = 0; } else if (cli->cl_dirty_max - cli->cl_dirty > 0x7fffffff) { @@ -792,20 +787,31 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa, client_obd_list_unlock(&cli->cl_loi_list_lock); CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n", oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant); + +} + +static void osc_update_next_shrink(struct client_obd *cli) +{ + cli->cl_next_shrink_grant = + cfs_time_shift(cli->cl_grant_shrink_interval); + CDEBUG(D_CACHE, "next time %ld to shrink grant \n", + cli->cl_next_shrink_grant); } /* caller must hold loi_list_lock */ static void osc_consume_write_grant(struct client_obd *cli, struct brw_page *pga) { + LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock); LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT)); - atomic_inc(&obd_dirty_pages); + cfs_atomic_inc(&obd_dirty_pages); cli->cl_dirty += CFS_PAGE_SIZE; cli->cl_avail_grant -= CFS_PAGE_SIZE; pga->flag |= OBD_BRW_FROM_GRANT; CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n", CFS_PAGE_SIZE, pga, pga->pg); LASSERT(cli->cl_avail_grant >= 0); + osc_update_next_shrink(cli); } /* the companion to osc_consume_write_grant, called when a brw has completed. @@ -816,17 +822,18 @@ static void osc_release_write_grant(struct client_obd *cli, int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096; ENTRY; + LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock); if (!(pga->flag & OBD_BRW_FROM_GRANT)) { EXIT; return; } pga->flag &= ~OBD_BRW_FROM_GRANT; - atomic_dec(&obd_dirty_pages); + cfs_atomic_dec(&obd_dirty_pages); cli->cl_dirty -= CFS_PAGE_SIZE; if (pga->flag & OBD_BRW_NOCACHE) { pga->flag &= ~OBD_BRW_NOCACHE; - atomic_dec(&obd_dirty_transit_pages); + cfs_atomic_dec(&obd_dirty_transit_pages); cli->cl_dirty_transit -= CFS_PAGE_SIZE; } if (!sent) { @@ -860,14 +867,15 @@ static unsigned long rpcs_in_flight(struct client_obd *cli) /* caller must hold loi_list_lock */ void osc_wake_cache_waiters(struct client_obd *cli) { - struct list_head *l, *tmp; + cfs_list_t *l, *tmp; struct osc_cache_waiter *ocw; ENTRY; - list_for_each_safe(l, tmp, &cli->cl_cache_waiters) { + cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) { /* if we can't dirty more, we must wait until some is written */ if ((cli->cl_dirty + CFS_PAGE_SIZE > cli->cl_dirty_max) || - (atomic_read(&obd_dirty_pages) + 1 > obd_max_dirty_pages)) { + (cfs_atomic_read(&obd_dirty_pages) + 1 > + obd_max_dirty_pages)) { CDEBUG(D_CACHE, "no dirty room: dirty: %ld " "osc max %ld, sys max %d\n", cli->cl_dirty, cli->cl_dirty_max, obd_max_dirty_pages); @@ -882,8 +890,8 @@ void osc_wake_cache_waiters(struct client_obd *cli) return; } - ocw = list_entry(l, struct osc_cache_waiter, ocw_entry); - list_del_init(&ocw->ocw_entry); + ocw = cfs_list_entry(l, struct osc_cache_waiter, ocw_entry); + cfs_list_del_init(&ocw->ocw_entry); if (cli->cl_avail_grant < CFS_PAGE_SIZE) { /* no more RPCs in flight to return grant, do sync IO */ ocw->ocw_rc = -EDQUOT; @@ -899,25 +907,205 @@ void osc_wake_cache_waiters(struct client_obd *cli) EXIT; } -static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd) +static void __osc_update_grant(struct client_obd *cli, obd_size grant) { client_obd_list_lock(&cli->cl_loi_list_lock); - cli->cl_avail_grant = ocd->ocd_grant; + cli->cl_avail_grant += grant; client_obd_list_unlock(&cli->cl_loi_list_lock); - - CDEBUG(D_CACHE, "setting cl_avail_grant: %ld cl_lost_grant: %ld\n", - cli->cl_avail_grant, cli->cl_lost_grant); - LASSERT(cli->cl_avail_grant >= 0); } static void osc_update_grant(struct client_obd *cli, struct ost_body *body) { + if (body->oa.o_valid & OBD_MD_FLGRANT) { + CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant); + __osc_update_grant(cli, body->oa.o_grant); + } +} + +static int osc_set_info_async(struct obd_export *exp, obd_count keylen, + void *key, obd_count vallen, void *val, + struct ptlrpc_request_set *set); + +static int osc_shrink_grant_interpret(const struct lu_env *env, + struct ptlrpc_request *req, + void *aa, int rc) +{ + struct client_obd *cli = &req->rq_import->imp_obd->u.cli; + struct obdo *oa = ((struct osc_grant_args *)aa)->aa_oa; + struct ost_body *body; + + if (rc != 0) { + __osc_update_grant(cli, oa->o_grant); + GOTO(out, rc); + } + + body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); + LASSERT(body); + osc_update_grant(cli, body); +out: + OBDO_FREE(oa); + return rc; +} + +static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa) +{ client_obd_list_lock(&cli->cl_loi_list_lock); - CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant); - if (body->oa.o_valid & OBD_MD_FLGRANT) - cli->cl_avail_grant += body->oa.o_grant; - /* waiters are woken in brw_interpret */ + oa->o_grant = cli->cl_avail_grant / 4; + cli->cl_avail_grant -= oa->o_grant; client_obd_list_unlock(&cli->cl_loi_list_lock); + oa->o_flags |= OBD_FL_SHRINK_GRANT; + osc_update_next_shrink(cli); +} + +/* Shrink the current grant, either from some large amount to enough for a + * full set of in-flight RPCs, or if we have already shrunk to that limit + * then to enough for a single RPC. This avoids keeping more grant than + * needed, and avoids shrinking the grant piecemeal. */ +static int osc_shrink_grant(struct client_obd *cli) +{ + long target = (cli->cl_max_rpcs_in_flight + 1) * + cli->cl_max_pages_per_rpc; + + client_obd_list_lock(&cli->cl_loi_list_lock); + if (cli->cl_avail_grant <= target) + target = cli->cl_max_pages_per_rpc; + client_obd_list_unlock(&cli->cl_loi_list_lock); + + return osc_shrink_grant_to_target(cli, target); +} + +int osc_shrink_grant_to_target(struct client_obd *cli, long target) +{ + int rc = 0; + struct ost_body *body; + ENTRY; + + client_obd_list_lock(&cli->cl_loi_list_lock); + /* Don't shrink if we are already above or below the desired limit + * We don't want to shrink below a single RPC, as that will negatively + * impact block allocation and long-term performance. */ + if (target < cli->cl_max_pages_per_rpc) + target = cli->cl_max_pages_per_rpc; + + if (target >= cli->cl_avail_grant) { + client_obd_list_unlock(&cli->cl_loi_list_lock); + RETURN(0); + } + client_obd_list_unlock(&cli->cl_loi_list_lock); + + OBD_ALLOC_PTR(body); + if (!body) + RETURN(-ENOMEM); + + osc_announce_cached(cli, &body->oa, 0); + + client_obd_list_lock(&cli->cl_loi_list_lock); + body->oa.o_grant = cli->cl_avail_grant - target; + cli->cl_avail_grant = target; + client_obd_list_unlock(&cli->cl_loi_list_lock); + body->oa.o_flags |= OBD_FL_SHRINK_GRANT; + osc_update_next_shrink(cli); + + rc = osc_set_info_async(cli->cl_import->imp_obd->obd_self_export, + sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK, + sizeof(*body), body, NULL); + if (rc != 0) + __osc_update_grant(cli, body->oa.o_grant); + OBD_FREE_PTR(body); + RETURN(rc); +} + +#define GRANT_SHRINK_LIMIT PTLRPC_MAX_BRW_SIZE +static int osc_should_shrink_grant(struct client_obd *client) +{ + cfs_time_t time = cfs_time_current(); + cfs_time_t next_shrink = client->cl_next_shrink_grant; + + if ((client->cl_import->imp_connect_data.ocd_connect_flags & + OBD_CONNECT_GRANT_SHRINK) == 0) + return 0; + + if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) { + if (client->cl_import->imp_state == LUSTRE_IMP_FULL && + client->cl_avail_grant > GRANT_SHRINK_LIMIT) + return 1; + else + osc_update_next_shrink(client); + } + return 0; +} + +static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data) +{ + struct client_obd *client; + + cfs_list_for_each_entry(client, &item->ti_obd_list, + cl_grant_shrink_list) { + if (osc_should_shrink_grant(client)) + osc_shrink_grant(client); + } + return 0; +} + +static int osc_add_shrink_grant(struct client_obd *client) +{ + int rc; + + rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval, + TIMEOUT_GRANT, + osc_grant_shrink_grant_cb, NULL, + &client->cl_grant_shrink_list); + if (rc) { + CERROR("add grant client %s error %d\n", + client->cl_import->imp_obd->obd_name, rc); + return rc; + } + CDEBUG(D_CACHE, "add grant client %s \n", + client->cl_import->imp_obd->obd_name); + osc_update_next_shrink(client); + return 0; +} + +static int osc_del_shrink_grant(struct client_obd *client) +{ + return ptlrpc_del_timeout_client(&client->cl_grant_shrink_list, + TIMEOUT_GRANT); +} + +static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd) +{ + /* + * ocd_grant is the total grant amount we're expect to hold: if we've + * been evicted, it's the new avail_grant amount, cl_dirty will drop + * to 0 as inflight RPCs fail out; otherwise, it's avail_grant + dirty. + * + * race is tolerable here: if we're evicted, but imp_state already + * left EVICTED state, then cl_dirty must be 0 already. + */ + client_obd_list_lock(&cli->cl_loi_list_lock); + if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED) + cli->cl_avail_grant = ocd->ocd_grant; + else + cli->cl_avail_grant = ocd->ocd_grant - cli->cl_dirty; + + if (cli->cl_avail_grant < 0) { + CWARN("%s: available grant < 0, the OSS is probably not running" + " with patch from bug20278 (%ld) \n", + cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant); + /* workaround for 1.6 servers which do not have + * the patch from bug20278 */ + cli->cl_avail_grant = ocd->ocd_grant; + } + + client_obd_list_unlock(&cli->cl_loi_list_lock); + + CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld \n", + cli->cl_import->imp_obd->obd_name, + cli->cl_avail_grant, cli->cl_lost_grant); + + if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK && + cfs_list_empty(&cli->cl_grant_shrink_list)) + osc_add_shrink_grant(cli); } /* We assume that the reason this OSC got a short read is because it read @@ -963,19 +1151,18 @@ static int check_write_rcs(struct ptlrpc_request *req, int requested_nob, int niocount, obd_count page_count, struct brw_page **pga) { - int *remote_rcs, i; + int i; + __u32 *remote_rcs; - /* return error if any niobuf was in error */ - remote_rcs = lustre_swab_repbuf(req, REQ_REC_OFF + 1, - sizeof(*remote_rcs) * niocount, NULL); + remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS, + sizeof(*remote_rcs) * + niocount); if (remote_rcs == NULL) { CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n"); return(-EPROTO); } - if (lustre_msg_swabbed(req->rq_repmsg)) - for (i = 0; i < niocount; i++) - __swab32s(&remote_rcs[i]); + /* return error if any niobuf was in error */ for (i = 0; i < niocount; i++) { if (remote_rcs[i] < 0) return(remote_rcs[i]); @@ -999,7 +1186,8 @@ static int check_write_rcs(struct ptlrpc_request *req, static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2) { if (p1->flag != p2->flag) { - unsigned mask = ~(OBD_BRW_FROM_GRANT|OBD_BRW_NOCACHE); + unsigned mask = ~(OBD_BRW_FROM_GRANT| + OBD_BRW_NOCACHE|OBD_BRW_SYNC|OBD_BRW_ASYNC); /* warn if we try to combine flags that we don't know to be * safe to combine */ @@ -1074,10 +1262,10 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa, opc = OST_WRITE; req = ptlrpc_request_alloc_pool(cli->cl_import, cli->cl_import->imp_rq_pool, - &RQF_OST_BRW); + &RQF_OST_BRW_WRITE); } else { opc = OST_READ; - req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW); + req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ); } if (req == NULL) RETURN(-ENOMEM); @@ -1088,6 +1276,8 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa, } pill = &req->rq_pill; + req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT, + sizeof(*ioobj)); req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT, niocount * sizeof(*niobuf)); osc_set_capa_size(req, &RMF_CAPA1, ocapa); @@ -1114,9 +1304,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa, body = req_capsule_client_get(pill, &RMF_OST_BODY); ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ); niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE); - LASSERT(body && ioobj && niobuf); + LASSERT(body != NULL && ioobj != NULL && niobuf != NULL); - body->oa = *oa; + lustre_set_wire_obdo(&body->oa, oa); obdo_to_ioobj(oa, ioobj); ioobj->ioo_bufcnt = niocount; @@ -1161,24 +1351,26 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa, } LASSERTF((void *)(niobuf - niocount) == - lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF + 2, - niocount * sizeof(*niobuf)), - "want %p - real %p\n", lustre_msg_buf(req->rq_reqmsg, - REQ_REC_OFF + 2, niocount * sizeof(*niobuf)), - (void *)(niobuf - niocount)); + req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE), + "want %p - real %p\n", req_capsule_client_get(&req->rq_pill, + &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount)); osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0); + if (osc_should_shrink_grant(cli)) + osc_shrink_grant_local(cli, &body->oa); /* size[REQ_REC_OFF] still sizeof (*body) */ if (opc == OST_WRITE) { if (unlikely(cli->cl_checksum) && - req->rq_flvr.sf_bulk_hash == BULK_HASH_ALG_NULL) { + !sptlrpc_flavor_has_bulk(&req->rq_flvr)) { /* store cl_cksum_type in a local variable since * it can be changed via lprocfs */ cksum_type_t cksum_type = cli->cl_cksum_type; - if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) - oa->o_flags = body->oa.o_flags = 0; + if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) { + oa->o_flags &= OBD_FL_LOCAL_MASK; + body->oa.o_flags = 0; + } body->oa.o_flags |= cksum_type_pack(cksum_type); body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS; body->oa.o_cksum = osc_checksum_bulk(requested_nob, @@ -1197,18 +1389,16 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa, } oa->o_cksum = body->oa.o_cksum; /* 1 RC per niobuf */ - req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_SERVER, + req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER, sizeof(__u32) * niocount); } else { if (unlikely(cli->cl_checksum) && - req->rq_flvr.sf_bulk_hash == BULK_HASH_ALG_NULL) { + !sptlrpc_flavor_has_bulk(&req->rq_flvr)) { if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) body->oa.o_flags = 0; body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type); body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS; } - req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_SERVER, 0); - /* 1 RC for the whole I/O */ } ptlrpc_request_set_replen(req); @@ -1247,6 +1437,10 @@ static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer, return 0; } + /* If this is mmaped file - it can be changed at any time */ + if (oa->o_valid & OBD_MD_FLFLAGS && oa->o_flags & OBD_FL_MMAP) + return 1; + if (oa->o_valid & OBD_MD_FLFLAGS) cksum_type = cksum_type_unpack(oa->o_flags); else @@ -1267,15 +1461,14 @@ static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer, msg = "changed in transit AND doesn't match the original - " "likely false positive due to mmap IO (bug 11742)"; - LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inum " - LPU64"/"LPU64" object "LPU64"/"LPU64" extent " - "["LPU64"-"LPU64"]\n", + LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inode "DFID + " object "LPU64"/"LPU64" extent ["LPU64"-"LPU64"]\n", msg, libcfs_nid2str(peer->nid), - oa->o_valid & OBD_MD_FLFID ? oa->o_fid : (__u64)0, - oa->o_valid & OBD_MD_FLFID ? oa->o_generation : - (__u64)0, + oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0, + oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0, + oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0, oa->o_id, - oa->o_valid & OBD_MD_FLGROUP ? oa->o_gr : (__u64)0, + oa->o_valid & OBD_MD_FLGROUP ? oa->o_seq : (__u64)0, pga[0]->off, pga[page_count-1]->off + pga[page_count-1]->count - 1); CERROR("original client csum %x (type %x), server csum %x (type %x), " @@ -1295,23 +1488,33 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) __u32 client_cksum = 0; ENTRY; - if (rc < 0 && rc != -EDQUOT) + if (rc < 0 && rc != -EDQUOT) { + DEBUG_REQ(D_INFO, req, "Failed request with rc = %d\n", rc); RETURN(rc); + } LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc); - body = lustre_swab_repbuf(req, REPLY_REC_OFF, sizeof(*body), - lustre_swab_ost_body); + body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); if (body == NULL) { - CDEBUG(D_INFO, "Can't unpack body\n"); + DEBUG_REQ(D_INFO, req, "Can't unpack body\n"); RETURN(-EPROTO); } +#ifdef HAVE_QUOTA_SUPPORT /* set/clear over quota flag for a uid/gid */ if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && - body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) - lquota_setdq(quota_interface, cli, body->oa.o_uid, - body->oa.o_gid, body->oa.o_valid, + body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) { + unsigned int qid[MAXQUOTAS] = { body->oa.o_uid, body->oa.o_gid }; + + CDEBUG(D_QUOTA, "setdq for [%u %u] with valid "LPX64", flags %x\n", + body->oa.o_uid, body->oa.o_gid, body->oa.o_valid, + body->oa.o_flags); + lquota_setdq(quota_interface, cli, qid, body->oa.o_valid, body->oa.o_flags); + } +#endif + + osc_update_grant(cli, body); if (rc < 0) RETURN(rc); @@ -1319,8 +1522,6 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM) client_cksum = aa->aa_oa->o_cksum; /* save for later */ - osc_update_grant(cli, body); - if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) { if (rc > 0) { CERROR("Unexpected +ve rc %d\n", rc); @@ -1328,6 +1529,9 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) } LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob); + if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk)) + RETURN(-EAGAIN); + if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum && check_write_checksum(&body->oa, peer, client_cksum, body->oa.o_cksum, aa->aa_requested_nob, @@ -1335,15 +1539,18 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) cksum_type_unpack(aa->aa_oa->o_flags))) RETURN(-EAGAIN); - if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk)) - RETURN(-EAGAIN); - rc = check_write_rcs(req, aa->aa_requested_nob,aa->aa_nio_count, aa->aa_page_count, aa->aa_ppga); GOTO(out, rc); } /* The rest of this function executes only for OST_READs */ + + /* if unwrap_bulk failed, return -EAGAIN to retry */ + rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc); + if (rc < 0) + GOTO(out, rc = -EAGAIN); + if (rc > aa->aa_requested_nob) { CERROR("Unexpected rc %d (%d requested)\n", rc, aa->aa_requested_nob); @@ -1359,10 +1566,6 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) if (rc < aa->aa_requested_nob) handle_short_read(rc, aa->aa_page_count, aa->aa_ppga); - if (sptlrpc_cli_unwrap_bulk_read(req, rc, aa->aa_page_count, - aa->aa_ppga)) - GOTO(out, rc = -EAGAIN); - if (body->oa.o_valid & OBD_MD_FLCKSUM) { static int cksum_counter; __u32 server_cksum = body->oa.o_cksum; @@ -1392,19 +1595,21 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) libcfs_nid2str(peer->nid)); } else if (server_cksum != client_cksum) { LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from " - "%s%s%s inum "LPU64"/"LPU64" object " + "%s%s%s inode "DFID" object " LPU64"/"LPU64" extent " "["LPU64"-"LPU64"]\n", req->rq_import->imp_obd->obd_name, libcfs_nid2str(peer->nid), via, router, body->oa.o_valid & OBD_MD_FLFID ? - body->oa.o_fid : (__u64)0, + body->oa.o_parent_seq : (__u64)0, + body->oa.o_valid & OBD_MD_FLFID ? + body->oa.o_parent_oid : 0, body->oa.o_valid & OBD_MD_FLFID ? - body->oa.o_generation :(__u64)0, + body->oa.o_parent_ver : 0, body->oa.o_id, body->oa.o_valid & OBD_MD_FLGROUP ? - body->oa.o_gr : (__u64)0, + body->oa.o_seq : (__u64)0, aa->aa_ppga[0]->off, aa->aa_ppga[aa->aa_page_count-1]->off + aa->aa_ppga[aa->aa_page_count-1]->count - @@ -1431,7 +1636,7 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) } out: if (rc >= 0) - *aa->aa_oa = body->oa; + lustre_get_wire_obdo(aa->aa_oa, &body->oa); RETURN(rc); } @@ -1495,7 +1700,7 @@ int osc_brw_redo_request(struct ptlrpc_request *request, ENTRY; if (!osc_should_resend(aa->aa_resends, aa->aa_cli)) { - CERROR("too many resend retries, returning error\n"); + CERROR("too many resent retries, returning error\n"); RETURN(-EIO); } @@ -1512,7 +1717,7 @@ int osc_brw_redo_request(struct ptlrpc_request *request, client_obd_list_lock(&aa->aa_cli->cl_loi_list_lock); - list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) { + cfs_list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) { if (oap->oap_request != NULL) { LASSERTF(request == oap->oap_request, "request %p != oap_request %p\n", @@ -1534,10 +1739,10 @@ int osc_brw_redo_request(struct ptlrpc_request *request, new_aa = ptlrpc_req_async_args(new_req); CFS_INIT_LIST_HEAD(&new_aa->aa_oaps); - list_splice(&aa->aa_oaps, &new_aa->aa_oaps); + cfs_list_splice(&aa->aa_oaps, &new_aa->aa_oaps); CFS_INIT_LIST_HEAD(&aa->aa_oaps); - list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) { + cfs_list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) { if (oap->oap_request) { ptlrpc_req_finished(oap->oap_request); oap->oap_request = ptlrpc_request_addref(new_req); @@ -1643,15 +1848,18 @@ static int osc_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo, struct obdo *saved_oa = NULL; struct brw_page **ppga, **orig; struct obd_import *imp = class_exp2cliimp(exp); - struct client_obd *cli = &imp->imp_obd->u.cli; + struct client_obd *cli; int rc, page_count_orig; ENTRY; + LASSERT((imp != NULL) && (imp->imp_obd != NULL)); + cli = &imp->imp_obd->u.cli; + if (cmd & OBD_BRW_CHECK) { /* The caller just wants to know if there's a chance that this * I/O can succeed */ - if (imp == NULL || imp->imp_invalid) + if (imp->imp_invalid) RETURN(-EIO); RETURN(0); } @@ -1740,7 +1948,7 @@ static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop, * queued. this is our cheap solution for good batching in the case * where writepage marks some random page in the middle of the file * as urgent because of, say, memory pressure */ - if (!list_empty(&lop->lop_urgent)) { + if (!cfs_list_empty(&lop->lop_urgent)) { CDEBUG(D_CACHE, "urgent request forcing RPC\n"); RETURN(1); } @@ -1749,8 +1957,8 @@ static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop, if (cmd & OBD_BRW_WRITE) { /* trigger a write rpc stream as long as there are dirtiers * waiting for space. as they're waiting, they're not going to - * create more pages to coallesce with what's waiting.. */ - if (!list_empty(&cli->cl_cache_waiters)) { + * create more pages to coalesce with what's waiting.. */ + if (!cfs_list_empty(&cli->cl_cache_waiters)) { CDEBUG(D_CACHE, "cache waiters forcing RPC\n"); RETURN(1); } @@ -1766,22 +1974,49 @@ static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop, RETURN(0); } -static void on_list(struct list_head *item, struct list_head *list, +static int lop_makes_hprpc(struct loi_oap_pages *lop) +{ + struct osc_async_page *oap; + ENTRY; + + if (cfs_list_empty(&lop->lop_urgent)) + RETURN(0); + + oap = cfs_list_entry(lop->lop_urgent.next, + struct osc_async_page, oap_urgent_item); + + if (oap->oap_async_flags & ASYNC_HP) { + CDEBUG(D_CACHE, "hp request forcing RPC\n"); + RETURN(1); + } + + RETURN(0); +} + +static void on_list(cfs_list_t *item, cfs_list_t *list, int should_be_on) { - if (list_empty(item) && should_be_on) - list_add_tail(item, list); - else if (!list_empty(item) && !should_be_on) - list_del_init(item); + if (cfs_list_empty(item) && should_be_on) + cfs_list_add_tail(item, list); + else if (!cfs_list_empty(item) && !should_be_on) + cfs_list_del_init(item); } /* maintain the loi's cli list membership invariants so that osc_send_oap_rpc * can find pages to build into rpcs quickly */ void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi) { - on_list(&loi->loi_cli_item, &cli->cl_loi_ready_list, - lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE) || - lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)); + if (lop_makes_hprpc(&loi->loi_write_lop) || + lop_makes_hprpc(&loi->loi_read_lop)) { + /* HP rpc */ + on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list, 0); + on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 1); + } else { + on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 0); + on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list, + lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)|| + lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)); + } on_list(&loi->loi_write_item, &cli->cl_loi_write_list, loi->loi_write_lop.lop_num_pending); @@ -1829,9 +2064,9 @@ int osc_oap_interrupted(const struct lu_env *env, struct osc_async_page *oap) * page completion may be called only if ->cpo_prep() method was * executed by osc_io_submit(), that also adds page the to pending list */ - if (!list_empty(&oap->oap_pending_item)) { - list_del_init(&oap->oap_pending_item); - list_del_init(&oap->oap_urgent_item); + if (!cfs_list_empty(&oap->oap_pending_item)) { + cfs_list_del_init(&oap->oap_pending_item); + cfs_list_del_init(&oap->oap_urgent_item); loi = oap->oap_loi; lop = (oap->oap_cmd & OBD_BRW_WRITE) ? @@ -1877,9 +2112,11 @@ void osc_oap_to_pending(struct osc_async_page *oap) else lop = &oap->oap_loi->loi_read_lop; - if (oap->oap_async_flags & ASYNC_URGENT) - list_add(&oap->oap_urgent_item, &lop->lop_urgent); - list_add_tail(&oap->oap_pending_item, &lop->lop_pending); + if (oap->oap_async_flags & ASYNC_HP) + cfs_list_add(&oap->oap_urgent_item, &lop->lop_urgent); + else if (oap->oap_async_flags & ASYNC_URGENT) + cfs_list_add_tail(&oap->oap_urgent_item, &lop->lop_urgent); + cfs_list_add_tail(&oap->oap_pending_item, &lop->lop_pending); lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, 1); } @@ -1898,7 +2135,9 @@ static void osc_ap_completion(const struct lu_env *env, oap->oap_request = NULL; } + cfs_spin_lock(&oap->oap_lock); oap->oap_async_flags = 0; + cfs_spin_unlock(&oap->oap_lock); oap->oap_interrupted = 0; if (oap->oap_cmd & OBD_BRW_WRITE) { @@ -1942,9 +2181,20 @@ static int brw_interpret(const struct lu_env *env, rc = osc_brw_fini_request(req, rc); CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc); if (osc_recoverable_error(rc)) { - rc = osc_brw_redo_request(req, aa); - if (rc == 0) - RETURN(0); + /* Only retry once for mmaped files since the mmaped page + * might be modified at anytime. We have to retry at least + * once in case there WAS really a corruption of the page + * on the network, that was not caused by mmap() modifying + * the page. Bug11742 */ + if ((rc == -EAGAIN) && (aa->aa_resends > 0) && + aa->aa_oa->o_valid & OBD_MD_FLFLAGS && + aa->aa_oa->o_flags & OBD_FL_MMAP) { + rc = 0; + } else { + rc = osc_brw_redo_request(req, aa); + if (rc == 0) + RETURN(0); + } } if (aa->aa_ocapa) { @@ -1964,18 +2214,19 @@ static int brw_interpret(const struct lu_env *env, else cli->cl_r_in_flight--; - async = list_empty(&aa->aa_oaps); + async = cfs_list_empty(&aa->aa_oaps); if (!async) { /* from osc_send_oap_rpc() */ struct osc_async_page *oap, *tmp; /* the caller may re-use the oap after the completion call so * we need to clean it up a little */ - list_for_each_entry_safe(oap, tmp, &aa->aa_oaps, oap_rpc_item) { - list_del_init(&oap->oap_rpc_item); + cfs_list_for_each_entry_safe(oap, tmp, &aa->aa_oaps, + oap_rpc_item) { + cfs_list_del_init(&oap->oap_rpc_item); osc_ap_completion(env, cli, aa->aa_oa, oap, 1, rc); } OBDO_FREE(aa->aa_oa); } else { /* from async_internal() */ - int i; + obd_count i; for (i = 0; i < aa->aa_page_count; i++) osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1); } @@ -1985,12 +2236,13 @@ static int brw_interpret(const struct lu_env *env, if (!async) cl_req_completion(env, aa->aa_clerq, rc); osc_release_ppga(aa->aa_ppga, aa->aa_page_count); + RETURN(rc); } static struct ptlrpc_request *osc_build_req(const struct lu_env *env, struct client_obd *cli, - struct list_head *rpc_list, + cfs_list_t *rpc_list, int page_count, int cmd) { struct ptlrpc_request *req; @@ -2006,10 +2258,13 @@ static struct ptlrpc_request *osc_build_req(const struct lu_env *env, enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ; struct ldlm_lock *lock = NULL; struct cl_req_attr crattr; - int i, rc; + int i, rc, mpflag = 0; ENTRY; - LASSERT(!list_empty(rpc_list)); + LASSERT(!cfs_list_empty(rpc_list)); + + if (cmd & OBD_BRW_MEMALLOC) + mpflag = cfs_memory_pressure_get_and_set(); memset(&crattr, 0, sizeof crattr); OBD_ALLOC(pga, sizeof(*pga) * page_count); @@ -2021,7 +2276,7 @@ static struct ptlrpc_request *osc_build_req(const struct lu_env *env, GOTO(out, req = ERR_PTR(-ENOMEM)); i = 0; - list_for_each_entry(oap, rpc_list, oap_rpc_item) { + cfs_list_for_each_entry(oap, rpc_list, oap_rpc_item) { struct cl_page *page = osc_oap2cl_page(oap); if (ops == NULL) { ops = oap->oap_caller_ops; @@ -2066,6 +2321,9 @@ static struct ptlrpc_request *osc_build_req(const struct lu_env *env, GOTO(out, req = ERR_PTR(rc)); } + if (cmd & OBD_BRW_MEMALLOC) + req->rq_memalloc = 1; + /* Need to update the timestamps after the request is built in case * we race with setattr (locally or in queue at OST). If OST gets * later setattr before earlier BRW (as determined by the request xid), @@ -2078,10 +2336,13 @@ static struct ptlrpc_request *osc_build_req(const struct lu_env *env, CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); aa = ptlrpc_req_async_args(req); CFS_INIT_LIST_HEAD(&aa->aa_oaps); - list_splice(rpc_list, &aa->aa_oaps); + cfs_list_splice(rpc_list, &aa->aa_oaps); CFS_INIT_LIST_HEAD(rpc_list); aa->aa_clerq = clerq; out: + if (cmd & OBD_BRW_MEMALLOC) + cfs_memory_pressure_restore(mpflag); + capa_put(crattr.cra_capa); if (IS_ERR(req)) { if (oa) @@ -2091,8 +2352,8 @@ out: /* this should happen rarely and is pretty bad, it makes the * pending list not follow the dirty order */ client_obd_list_lock(&cli->cl_loi_list_lock); - list_for_each_entry_safe(oap, tmp, rpc_list, oap_rpc_item) { - list_del_init(&oap->oap_rpc_item); + cfs_list_for_each_entry_safe(oap, tmp, rpc_list, oap_rpc_item) { + cfs_list_del_init(&oap->oap_rpc_item); /* queued sync pages can be torn down while the pages * were between the pending list and the rpc */ @@ -2113,13 +2374,12 @@ out: /** * prepare pages for ASYNC io and put pages in send queue. * - * \param cli - - * \param loi - - * \param cmd - OBD_BRW_* macroses - * \param lop - pending pages + * \param cmd OBD_BRW_* macroses + * \param lop pending pages * - * \return zero if pages successfully add to send queue. - * \return not zere if error occurring. + * \return zero if no page added to send queue. + * \return 1 if pages successfully added to send queue. + * \return negative on errors. */ static int osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli, @@ -2132,18 +2392,35 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli, struct osc_brw_async_args *aa; const struct obd_async_page_ops *ops; CFS_LIST_HEAD(rpc_list); + CFS_LIST_HEAD(tmp_list); unsigned int ending_offset; unsigned starting_offset = 0; - int srvlock = 0; + int srvlock = 0, mem_tight = 0; struct cl_object *clob = NULL; ENTRY; + /* ASYNC_HP pages first. At present, when the lock the pages is + * to be canceled, the pages covered by the lock will be sent out + * with ASYNC_HP. We have to send out them as soon as possible. */ + cfs_list_for_each_entry_safe(oap, tmp, &lop->lop_urgent, oap_urgent_item) { + if (oap->oap_async_flags & ASYNC_HP) + cfs_list_move(&oap->oap_pending_item, &tmp_list); + else + cfs_list_move_tail(&oap->oap_pending_item, &tmp_list); + if (++page_count >= cli->cl_max_pages_per_rpc) + break; + } + + cfs_list_splice(&tmp_list, &lop->lop_pending); + page_count = 0; + /* first we find the pages we're allowed to work with */ - list_for_each_entry_safe(oap, tmp, &lop->lop_pending, - oap_pending_item) { + cfs_list_for_each_entry_safe(oap, tmp, &lop->lop_pending, + oap_pending_item) { ops = oap->oap_caller_ops; - LASSERT(oap->oap_magic == OAP_MAGIC); + LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, " + "magic 0x%x\n", oap, oap->oap_magic); if (clob == NULL) { /* pin object in memory, so that completion call-backs @@ -2159,11 +2436,18 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli, oap, oap->oap_brw_page.pg, (unsigned)!srvlock); break; } + + /* If there is a gap at the start of this page, it can't merge + * with any previous page, so we'll hand the network a + * "fragmented" page array that it can't transfer in 1 RDMA */ + if (page_count != 0 && oap->oap_page_off != 0) + break; + /* in llite being 'ready' equates to the page being locked * until completion unlocks it. commit_write submits a page * as not ready because its unlock will happen unconditionally * as the call returns. if we race with commit_write giving - * us that page we dont' want to create a hole in the page + * us that page we don't want to create a hole in the page * stream, so we stop and leave the rpc to be fired by * another dirtier or kupdated interval (the not ready page * will still be on the dirty list). we could call in @@ -2188,11 +2472,15 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli, case -EINTR: /* the io isn't needed.. tell the checks * below to complete the rpc with EINTR */ + cfs_spin_lock(&oap->oap_lock); oap->oap_async_flags |= ASYNC_COUNT_STABLE; + cfs_spin_unlock(&oap->oap_lock); oap->oap_count = -EINTR; break; case 0: + cfs_spin_lock(&oap->oap_lock); oap->oap_async_flags |= ASYNC_READY; + cfs_spin_unlock(&oap->oap_lock); break; default: LASSERTF(0, "oap %p page %p returned %d " @@ -2224,16 +2512,11 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli, } } #endif - /* If there is a gap at the start of this page, it can't merge - * with any previous page, so we'll hand the network a - * "fragmented" page array that it can't transfer in 1 RDMA */ - if (page_count != 0 && oap->oap_page_off != 0) - break; /* take the page out of our book-keeping */ - list_del_init(&oap->oap_pending_item); + cfs_list_del_init(&oap->oap_pending_item); lop_update_pending(cli, lop, cmd, -1); - list_del_init(&oap->oap_urgent_item); + cfs_list_del_init(&oap->oap_urgent_item); if (page_count == 0) starting_offset = (oap->oap_obj_off+oap->oap_page_off) & @@ -2255,7 +2538,9 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli, } /* now put the page back in our accounting */ - list_add_tail(&oap->oap_rpc_item, &rpc_list); + cfs_list_add_tail(&oap->oap_rpc_item, &rpc_list); + if (oap->oap_brw_flags & OBD_BRW_MEMALLOC) + mem_tight = 1; if (page_count == 0) srvlock = !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK); if (++page_count >= cli->cl_max_pages_per_rpc) @@ -2291,9 +2576,10 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli, RETURN(0); } - req = osc_build_req(env, cli, &rpc_list, page_count, cmd); + req = osc_build_req(env, cli, &rpc_list, page_count, + mem_tight ? (cmd | OBD_BRW_MEMALLOC) : cmd); if (IS_ERR(req)) { - LASSERT(list_empty(&rpc_list)); + LASSERT(cfs_list_empty(&rpc_list)); loi_list_maint(cli, loi); RETURN(PTR_ERR(req)); } @@ -2324,7 +2610,7 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli, /* queued sync pages can be torn down while the pages * were between the pending list and the rpc */ tmp = NULL; - list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) { + cfs_list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) { /* only one oap gets a request reference */ if (tmp == NULL) tmp = oap; @@ -2347,11 +2633,12 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli, #define LOI_DEBUG(LOI, STR, args...) \ CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR, \ - !list_empty(&(LOI)->loi_cli_item), \ + !cfs_list_empty(&(LOI)->loi_ready_item) || \ + !cfs_list_empty(&(LOI)->loi_hp_ready_item), \ (LOI)->loi_write_lop.lop_num_pending, \ - !list_empty(&(LOI)->loi_write_lop.lop_urgent), \ + !cfs_list_empty(&(LOI)->loi_write_lop.lop_urgent), \ (LOI)->loi_read_lop.lop_num_pending, \ - !list_empty(&(LOI)->loi_read_lop.lop_urgent), \ + !cfs_list_empty(&(LOI)->loi_read_lop.lop_urgent), \ args) \ /* This is called by osc_check_rpcs() to find which objects have pages that @@ -2359,34 +2646,60 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli, struct lov_oinfo *osc_next_loi(struct client_obd *cli) { ENTRY; - /* first return all objects which we already know to have - * pages ready to be stuffed into rpcs */ - if (!list_empty(&cli->cl_loi_ready_list)) - RETURN(list_entry(cli->cl_loi_ready_list.next, - struct lov_oinfo, loi_cli_item)); + + /* First return objects that have blocked locks so that they + * will be flushed quickly and other clients can get the lock, + * then objects which have pages ready to be stuffed into RPCs */ + if (!cfs_list_empty(&cli->cl_loi_hp_ready_list)) + RETURN(cfs_list_entry(cli->cl_loi_hp_ready_list.next, + struct lov_oinfo, loi_hp_ready_item)); + if (!cfs_list_empty(&cli->cl_loi_ready_list)) + RETURN(cfs_list_entry(cli->cl_loi_ready_list.next, + struct lov_oinfo, loi_ready_item)); /* then if we have cache waiters, return all objects with queued * writes. This is especially important when many small files * have filled up the cache and not been fired into rpcs because * they don't pass the nr_pending/object threshhold */ - if (!list_empty(&cli->cl_cache_waiters) && - !list_empty(&cli->cl_loi_write_list)) - RETURN(list_entry(cli->cl_loi_write_list.next, - struct lov_oinfo, loi_write_item)); + if (!cfs_list_empty(&cli->cl_cache_waiters) && + !cfs_list_empty(&cli->cl_loi_write_list)) + RETURN(cfs_list_entry(cli->cl_loi_write_list.next, + struct lov_oinfo, loi_write_item)); /* then return all queued objects when we have an invalid import * so that they get flushed */ if (cli->cl_import == NULL || cli->cl_import->imp_invalid) { - if (!list_empty(&cli->cl_loi_write_list)) - RETURN(list_entry(cli->cl_loi_write_list.next, - struct lov_oinfo, loi_write_item)); - if (!list_empty(&cli->cl_loi_read_list)) - RETURN(list_entry(cli->cl_loi_read_list.next, - struct lov_oinfo, loi_read_item)); + if (!cfs_list_empty(&cli->cl_loi_write_list)) + RETURN(cfs_list_entry(cli->cl_loi_write_list.next, + struct lov_oinfo, + loi_write_item)); + if (!cfs_list_empty(&cli->cl_loi_read_list)) + RETURN(cfs_list_entry(cli->cl_loi_read_list.next, + struct lov_oinfo, loi_read_item)); } RETURN(NULL); } +static int osc_max_rpc_in_flight(struct client_obd *cli, struct lov_oinfo *loi) +{ + struct osc_async_page *oap; + int hprpc = 0; + + if (!cfs_list_empty(&loi->loi_write_lop.lop_urgent)) { + oap = cfs_list_entry(loi->loi_write_lop.lop_urgent.next, + struct osc_async_page, oap_urgent_item); + hprpc = !!(oap->oap_async_flags & ASYNC_HP); + } + + if (!hprpc && !cfs_list_empty(&loi->loi_read_lop.lop_urgent)) { + oap = cfs_list_entry(loi->loi_read_lop.lop_urgent.next, + struct osc_async_page, oap_urgent_item); + hprpc = !!(oap->oap_async_flags & ASYNC_HP); + } + + return rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight + hprpc; +} + /* called with the loi list lock held */ void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli) { @@ -2397,7 +2710,7 @@ void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli) while ((loi = osc_next_loi(cli)) != NULL) { LOI_DEBUG(loi, "%lu in flight\n", rpcs_in_flight(cli)); - if (rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight) + if (osc_max_rpc_in_flight(cli, loi)) break; /* attempt some read/write balancing by alternating between @@ -2409,8 +2722,28 @@ void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli) if (lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) { rc = osc_send_oap_rpc(env, cli, loi, OBD_BRW_WRITE, &loi->loi_write_lop); - if (rc < 0) - break; + if (rc < 0) { + CERROR("Write request failed with %d\n", rc); + + /* osc_send_oap_rpc failed, mostly because of + * memory pressure. + * + * It can't break here, because if: + * - a page was submitted by osc_io_submit, so + * page locked; + * - no request in flight + * - no subsequent request + * The system will be in live-lock state, + * because there is no chance to call + * osc_io_unplug() and osc_check_rpcs() any + * more. pdflush can't help in this case, + * because it might be blocked at grabbing + * the page lock as we mentioned. + * + * Anyway, continue to drain pages. */ + /* break; */ + } + if (rc > 0) race_counter = 0; else @@ -2420,21 +2753,24 @@ void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli) rc = osc_send_oap_rpc(env, cli, loi, OBD_BRW_READ, &loi->loi_read_lop); if (rc < 0) - break; + CERROR("Read request failed with %d\n", rc); + if (rc > 0) race_counter = 0; else race_counter++; } - /* attempt some inter-object balancing by issueing rpcs + /* attempt some inter-object balancing by issuing rpcs * for each object in turn */ - if (!list_empty(&loi->loi_cli_item)) - list_del_init(&loi->loi_cli_item); - if (!list_empty(&loi->loi_write_item)) - list_del_init(&loi->loi_write_item); - if (!list_empty(&loi->loi_read_item)) - list_del_init(&loi->loi_read_item); + if (!cfs_list_empty(&loi->loi_hp_ready_item)) + cfs_list_del_init(&loi->loi_hp_ready_item); + if (!cfs_list_empty(&loi->loi_ready_item)) + cfs_list_del_init(&loi->loi_ready_item); + if (!cfs_list_empty(&loi->loi_write_item)) + cfs_list_del_init(&loi->loi_write_item); + if (!cfs_list_empty(&loi->loi_read_item)) + cfs_list_del_init(&loi->loi_read_item); loi_list_maint(cli, loi); @@ -2462,7 +2798,7 @@ static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw) int rc; ENTRY; client_obd_list_lock(&cli->cl_loi_list_lock); - rc = list_empty(&ocw->ocw_entry) || rpcs_in_flight(cli) == 0; + rc = cfs_list_empty(&ocw->ocw_entry) || rpcs_in_flight(cli) == 0; client_obd_list_unlock(&cli->cl_loi_list_lock); RETURN(rc); }; @@ -2482,7 +2818,7 @@ int osc_enter_cache_try(const struct lu_env *env, osc_consume_write_grant(cli, &oap->oap_brw_page); if (transient) { cli->cl_dirty_transit += CFS_PAGE_SIZE; - atomic_inc(&obd_dirty_transit_pages); + cfs_atomic_inc(&obd_dirty_transit_pages); oap->oap_brw_flags |= OBD_BRW_NOCACHE; } } @@ -2501,7 +2837,7 @@ static int osc_enter_cache(const struct lu_env *env, ENTRY; CDEBUG(D_CACHE, "dirty: %ld/%d dirty_max: %ld/%d dropped: %lu " - "grant: %lu\n", cli->cl_dirty, atomic_read(&obd_dirty_pages), + "grant: %lu\n", cli->cl_dirty, cfs_atomic_read(&obd_dirty_pages), cli->cl_dirty_max, obd_max_dirty_pages, cli->cl_lost_grant, cli->cl_avail_grant); @@ -2513,15 +2849,19 @@ static int osc_enter_cache(const struct lu_env *env, /* Hopefully normal case - cache space and write credits available */ if (cli->cl_dirty + CFS_PAGE_SIZE <= cli->cl_dirty_max && - atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages && + cfs_atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages && osc_enter_cache_try(env, cli, loi, oap, 0)) RETURN(0); - /* Make sure that there are write rpcs in flight to wait for. This - * is a little silly as this object may not have any pending but - * other objects sure might. */ - if (cli->cl_w_in_flight) { - list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters); + /* It is safe to block as a cache waiter as long as there is grant + * space available or the hope of additional grant being returned + * when an in flight write completes. Using the write back cache + * if possible is preferable to sending the data synchronously + * because write pages can then be merged in to large requests. + * The addition of this cache waiter will causing pending write + * pages to be sent immediately. */ + if (cli->cl_w_in_flight || cli->cl_avail_grant >= CFS_PAGE_SIZE) { + cfs_list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters); cfs_waitq_init(&ocw.ocw_waitq); ocw.ocw_oap = oap; ocw.ocw_rc = 0; @@ -2534,8 +2874,8 @@ static int osc_enter_cache(const struct lu_env *env, l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi); client_obd_list_lock(&cli->cl_loi_list_lock); - if (!list_empty(&ocw.ocw_entry)) { - list_del(&ocw.ocw_entry); + if (!cfs_list_empty(&ocw.ocw_entry)) { + cfs_list_del(&ocw.ocw_entry); RETURN(-EINTR); } RETURN(ocw.ocw_rc); @@ -2556,7 +2896,7 @@ int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm, ENTRY; if (!page) - return size_round(sizeof(*oap)); + return cfs_size_round(sizeof(*oap)); oap = *res; oap->oap_magic = OAP_MAGIC; @@ -2579,7 +2919,7 @@ int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm, CFS_INIT_LIST_HEAD(&oap->oap_rpc_item); CFS_INIT_LIST_HEAD(&oap->oap_page_list); - spin_lock_init(&oap->oap_lock); + cfs_spin_lock_init(&oap->oap_lock); CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset); RETURN(0); } @@ -2610,15 +2950,16 @@ int osc_queue_async_io(const struct lu_env *env, if (cli->cl_import == NULL || cli->cl_import->imp_invalid) RETURN(-EIO); - if (!list_empty(&oap->oap_pending_item) || - !list_empty(&oap->oap_urgent_item) || - !list_empty(&oap->oap_rpc_item)) + if (!cfs_list_empty(&oap->oap_pending_item) || + !cfs_list_empty(&oap->oap_urgent_item) || + !cfs_list_empty(&oap->oap_rpc_item)) RETURN(-EBUSY); /* check if the file's owner/group is over quota */ if ((cmd & OBD_BRW_WRITE) && !(cmd & OBD_BRW_NOQUOTA)) { struct cl_object *obj; struct cl_attr attr; /* XXX put attr into thread info */ + unsigned int qid[MAXQUOTAS]; obj = cl_object_top(osc_oap2cl_page(oap)->cp_obj); @@ -2626,8 +2967,10 @@ int osc_queue_async_io(const struct lu_env *env, rc = cl_object_attr_get(env, obj, &attr); cl_object_attr_unlock(obj); - if (rc == 0 && lquota_chkdq(quota_interface, cli, attr.cat_uid, - attr.cat_gid) == NO_QUOTA) + qid[USRQUOTA] = attr.cat_uid; + qid[GRPQUOTA] = attr.cat_gid; + if (rc == 0 && + lquota_chkdq(quota_interface, cli, qid) == NO_QUOTA) rc = -EDQUOT; if (rc) RETURN(rc); @@ -2643,7 +2986,12 @@ int osc_queue_async_io(const struct lu_env *env, oap->oap_page_off = off; oap->oap_count = count; oap->oap_brw_flags = brw_flags; + /* Give a hint to OST that requests are coming from kswapd - bug19529 */ + if (cfs_memory_pressure_get()) + oap->oap_brw_flags |= OBD_BRW_MEMALLOC; + cfs_spin_lock(&oap->oap_lock); oap->oap_async_flags = async_flags; + cfs_spin_unlock(&oap->oap_lock); if (cmd & OBD_BRW_WRITE) { rc = osc_enter_cache(env, cli, loi, oap); @@ -2673,10 +3021,10 @@ int osc_set_async_flags_base(struct client_obd *cli, obd_flag async_flags) { struct loi_oap_pages *lop; + int flags = 0; ENTRY; - if (cli->cl_import == NULL || cli->cl_import->imp_invalid) - RETURN(-EIO); + LASSERT(!cfs_list_empty(&oap->oap_pending_item)); if (oap->oap_cmd & OBD_BRW_WRITE) { lop = &loi->loi_write_lop; @@ -2684,21 +3032,25 @@ int osc_set_async_flags_base(struct client_obd *cli, lop = &loi->loi_read_lop; } - if (list_empty(&oap->oap_pending_item)) - RETURN(-EINVAL); - if ((oap->oap_async_flags & async_flags) == async_flags) RETURN(0); if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY)) - oap->oap_async_flags |= ASYNC_READY; + flags |= ASYNC_READY; - if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT)) { - if (list_empty(&oap->oap_rpc_item)) { - list_add(&oap->oap_urgent_item, &lop->lop_urgent); - loi_list_maint(cli, loi); - } + if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT) && + cfs_list_empty(&oap->oap_rpc_item)) { + if (oap->oap_async_flags & ASYNC_HP) + cfs_list_add(&oap->oap_urgent_item, &lop->lop_urgent); + else + cfs_list_add_tail(&oap->oap_urgent_item, + &lop->lop_urgent); + flags |= ASYNC_URGENT; + loi_list_maint(cli, loi); } + cfs_spin_lock(&oap->oap_lock); + oap->oap_async_flags |= flags; + cfs_spin_unlock(&oap->oap_lock); LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page, oap->oap_async_flags); @@ -2730,18 +3082,20 @@ int osc_teardown_async_page(struct obd_export *exp, client_obd_list_lock(&cli->cl_loi_list_lock); - if (!list_empty(&oap->oap_rpc_item)) + if (!cfs_list_empty(&oap->oap_rpc_item)) GOTO(out, rc = -EBUSY); osc_exit_cache(cli, oap, 0); osc_wake_cache_waiters(cli); - if (!list_empty(&oap->oap_urgent_item)) { - list_del_init(&oap->oap_urgent_item); - oap->oap_async_flags &= ~ASYNC_URGENT; + if (!cfs_list_empty(&oap->oap_urgent_item)) { + cfs_list_del_init(&oap->oap_urgent_item); + cfs_spin_lock(&oap->oap_lock); + oap->oap_async_flags &= ~(ASYNC_URGENT | ASYNC_HP); + cfs_spin_unlock(&oap->oap_lock); } - if (!list_empty(&oap->oap_pending_item)) { - list_del_init(&oap->oap_pending_item); + if (!cfs_list_empty(&oap->oap_pending_item)) { + cfs_list_del_init(&oap->oap_pending_item); lop_update_pending(cli, lop, oap->oap_cmd, -1); } loi_list_maint(cli, loi); @@ -2764,10 +3118,10 @@ static void osc_set_lock_data_with_check(struct ldlm_lock *lock, LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl); lock_res_and_lock(lock); - spin_lock(&osc_ast_guard); + cfs_spin_lock(&osc_ast_guard); LASSERT(lock->l_ast_data == NULL || lock->l_ast_data == data); lock->l_ast_data = data; - spin_unlock(&osc_ast_guard); + cfs_spin_unlock(&osc_ast_guard); unlock_res_and_lock(lock); } @@ -2791,11 +3145,31 @@ static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm, struct ldlm_res_id res_id; struct obd_device *obd = class_exp2obd(exp); - osc_build_res_name(lsm->lsm_object_id, lsm->lsm_object_gr, &res_id); + osc_build_res_name(lsm->lsm_object_id, lsm->lsm_object_seq, &res_id); ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data); return 0; } +/* find any ldlm lock of the inode in osc + * return 0 not find + * 1 find one + * < 0 error */ +static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm, + ldlm_iterator_t replace, void *data) +{ + struct ldlm_res_id res_id; + struct obd_device *obd = class_exp2obd(exp); + int rc = 0; + + osc_build_res_name(lsm->lsm_object_id, lsm->lsm_object_seq, &res_id); + rc = ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data); + if (rc == LDLM_ITER_STOP) + return(1); + if (rc == LDLM_ITER_CONTINUE) + return(0); + return(rc); +} + static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb, obd_enqueue_update_f upcall, void *cookie, int *flags, int rc) @@ -2850,14 +3224,19 @@ static int osc_enqueue_interpret(const struct lu_env *env, * osc_enqueue_fini(). */ ldlm_lock_addref(&handle, mode); + /* Let CP AST to grant the lock first. */ + OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1); + /* Complete obtaining the lock procedure. */ rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1, mode, aa->oa_flags, aa->oa_lvb, - sizeof(*aa->oa_lvb), lustre_swab_ost_lvb, - &handle, rc); + sizeof(*aa->oa_lvb), &handle, rc); /* Complete osc stuff. */ rc = osc_enqueue_fini(req, aa->oa_lvb, aa->oa_upcall, aa->oa_cookie, aa->oa_flags, rc); + + OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10); + /* Release the lock for async request. */ if (lustre_handle_is_used(&handle) && rc == ELDLM_OK) /* @@ -3018,7 +3397,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, *flags &= ~LDLM_FL_BLOCK_GRANTED; rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb, - sizeof(*lvb), lustre_swab_ost_lvb, lockh, async); + sizeof(*lvb), lockh, async); if (rqset) { if (!rc) { struct osc_enqueue_args *aa; @@ -3060,7 +3439,7 @@ static int osc_enqueue(struct obd_export *exp, struct obd_info *oinfo, ENTRY; osc_build_res_name(oinfo->oi_md->lsm_object_id, - oinfo->oi_md->lsm_object_gr, &res_id); + oinfo->oi_md->lsm_object_seq, &res_id); rc = osc_enqueue_base(exp, &res_id, &oinfo->oi_flags, &oinfo->oi_policy, &oinfo->oi_md->lsm_oinfo[0]->loi_lvb, @@ -3095,7 +3474,7 @@ int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id, rc = mode; if (mode == LCK_PR) rc |= LCK_PW; - rc = ldlm_lock_match(obd->obd_namespace, lflags | LDLM_FL_LVB_READY, + rc = ldlm_lock_match(obd->obd_namespace, lflags, res_id, type, policy, rc, lockh, unref); if (rc) { if (data != NULL) @@ -3129,7 +3508,8 @@ static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md, } static int osc_cancel_unused(struct obd_export *exp, - struct lov_stripe_md *lsm, int flags, + struct lov_stripe_md *lsm, + ldlm_cancel_flags_t flags, void *opaque) { struct obd_device *obd = class_exp2obd(exp); @@ -3137,7 +3517,7 @@ static int osc_cancel_unused(struct obd_export *exp, if (lsm != NULL) { resp = osc_build_res_name(lsm->lsm_object_id, - lsm->lsm_object_gr, &res_id); + lsm->lsm_object_seq, &res_id); } return ldlm_cli_cancel_unused(obd->obd_namespace, resp, flags, opaque); @@ -3147,9 +3527,23 @@ static int osc_statfs_interpret(const struct lu_env *env, struct ptlrpc_request *req, struct osc_async_args *aa, int rc) { + struct client_obd *cli = &req->rq_import->imp_obd->u.cli; struct obd_statfs *msfs; + __u64 used; ENTRY; + if (rc == -EBADR) + /* The request has in fact never been sent + * due to issues at a higher level (LOV). + * Exit immediately since the caller is + * aware of the problem and takes care + * of the clean up */ + RETURN(rc); + + if ((rc == -ENOTCONN || rc == -EAGAIN) && + (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY)) + GOTO(out, rc = 0); + if (rc != 0) GOTO(out, rc); @@ -3158,6 +3552,44 @@ static int osc_statfs_interpret(const struct lu_env *env, GOTO(out, rc = -EPROTO); } + /* Reinitialize the RDONLY and DEGRADED flags at the client + * on each statfs, so they don't stay set permanently. */ + cfs_spin_lock(&cli->cl_oscc.oscc_lock); + + if (unlikely(msfs->os_state & OS_STATE_DEGRADED)) + cli->cl_oscc.oscc_flags |= OSCC_FLAG_DEGRADED; + else if (unlikely(cli->cl_oscc.oscc_flags & OSCC_FLAG_DEGRADED)) + cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_DEGRADED; + + if (unlikely(msfs->os_state & OS_STATE_READONLY)) + cli->cl_oscc.oscc_flags |= OSCC_FLAG_RDONLY; + else if (unlikely(cli->cl_oscc.oscc_flags & OSCC_FLAG_RDONLY)) + cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_RDONLY; + + /* Add a bit of hysteresis so this flag isn't continually flapping, + * and ensure that new files don't get extremely fragmented due to + * only a small amount of available space in the filesystem. + * We want to set the NOSPC flag when there is less than ~0.1% free + * and clear it when there is at least ~0.2% free space, so: + * avail < ~0.1% max max = avail + used + * 1025 * avail < avail + used used = blocks - free + * 1024 * avail < used + * 1024 * avail < blocks - free + * avail < ((blocks - free) >> 10) + * + * On very large disk, say 16TB 0.1% will be 16 GB. We don't want to + * lose that amount of space so in those cases we report no space left + * if their is less than 1 GB left. */ + used = min_t(__u64,(msfs->os_blocks - msfs->os_bfree) >> 10, 1 << 30); + if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) == 0) && + ((msfs->os_ffree < 32) || (msfs->os_bavail < used)))) + cli->cl_oscc.oscc_flags |= OSCC_FLAG_NOSPC; + else if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) != 0) && + (msfs->os_ffree > 64) && (msfs->os_bavail > (used << 1)))) + cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_NOSPC; + + cfs_spin_unlock(&cli->cl_oscc.oscc_lock); + *aa->aa_oi->oi_osfs = *msfs; out: rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc); @@ -3217,10 +3649,10 @@ static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs, /*Since the request might also come from lprocfs, so we need *sync this with client_disconnect_export Bug15684*/ - down_read(&obd->u.cli.cl_sem); + cfs_down_read(&obd->u.cli.cl_sem); if (obd->u.cli.cl_import) imp = class_import_get(obd->u.cli.cl_import); - up_read(&obd->u.cli.cl_sem); + cfs_up_read(&obd->u.cli.cl_sem); if (!imp) RETURN(-ENODEV); @@ -3289,7 +3721,7 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump) /* we only need the header part from user space to get lmm_magic and * lmm_stripe_count, (the header part is common to v1 and v3) */ lum_size = sizeof(struct lov_user_md_v1); - if (copy_from_user(&lum, lump, lum_size)) + if (cfs_copy_from_user(&lum, lump, lum_size)) RETURN(-EFAULT); if ((lum.lmm_magic != LOV_USER_MAGIC_V1) && @@ -3320,10 +3752,10 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump) } lumk->lmm_object_id = lsm->lsm_object_id; - lumk->lmm_object_gr = lsm->lsm_object_gr; + lumk->lmm_object_seq = lsm->lsm_object_seq; lumk->lmm_stripe_count = 1; - if (copy_to_user(lump, lumk, lum_size)) + if (cfs_copy_to_user(lump, lumk, lum_size)) rc = -EFAULT; if (lumk != &lum) @@ -3341,7 +3773,7 @@ static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, int err = 0; ENTRY; - if (!try_module_get(THIS_MODULE)) { + if (!cfs_try_module_get(THIS_MODULE)) { CERROR("Can't get module. Is it alive?"); return -EINVAL; } @@ -3379,7 +3811,7 @@ static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid)); - err = copy_to_user((void *)uarg, buf, len); + err = cfs_copy_to_user((void *)uarg, buf, len); if (err) err = -EFAULT; obd_ioctl_freedata(buf, len); @@ -3416,7 +3848,7 @@ static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, GOTO(out, err = -ENOTTY); } out: - module_put(THIS_MODULE); + cfs_module_put(THIS_MODULE); return err; } @@ -3455,6 +3887,7 @@ static int osc_get_info(struct obd_export *exp, obd_count keylen, tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY); memcpy(tmp, key, keylen); + req->rq_no_delay = req->rq_no_resend = 1; ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); if (rc) @@ -3516,36 +3949,40 @@ static int osc_get_info(struct obd_export *exp, obd_count keylen, RETURN(-EINVAL); } -static int osc_setinfo_mds_conn_interpret(const struct lu_env *env, - struct ptlrpc_request *req, - void *aa, int rc) +static int osc_setinfo_mds_connect_import(struct obd_import *imp) { struct llog_ctxt *ctxt; - struct obd_import *imp = req->rq_import; + int rc = 0; ENTRY; - if (rc != 0) - RETURN(rc); - ctxt = llog_get_context(imp->imp_obd, LLOG_MDS_OST_ORIG_CTXT); if (ctxt) { - if (rc == 0) - rc = llog_initiator_connect(ctxt); - else - CERROR("cannot establish connection for " - "ctxt %p: %d\n", ctxt, rc); + rc = llog_initiator_connect(ctxt); + llog_ctxt_put(ctxt); + } else { + /* XXX return an error? skip setting below flags? */ } - llog_ctxt_put(ctxt); - spin_lock(&imp->imp_lock); + cfs_spin_lock(&imp->imp_lock); imp->imp_server_timeout = 1; imp->imp_pingable = 1; - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); CDEBUG(D_RPCTRACE, "pinging OST %s\n", obd2cli_tgt(imp->imp_obd)); RETURN(rc); } +static int osc_setinfo_mds_conn_interpret(const struct lu_env *env, + struct ptlrpc_request *req, + void *aa, int rc) +{ + ENTRY; + if (rc != 0) + RETURN(rc); + + RETURN(osc_setinfo_mds_connect_import(req->rq_import)); +} + static int osc_set_info_async(struct obd_export *exp, obd_count keylen, void *key, obd_count vallen, void *val, struct ptlrpc_request_set *set) @@ -3560,11 +3997,24 @@ static int osc_set_info_async(struct obd_export *exp, obd_count keylen, OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10); if (KEY_IS(KEY_NEXT_ID)) { + obd_id new_val; + struct osc_creator *oscc = &obd->u.cli.cl_oscc; + if (vallen != sizeof(obd_id)) RETURN(-ERANGE); if (val == NULL) RETURN(-EINVAL); - obd->u.cli.cl_oscc.oscc_next_id = *((obd_id*)val) + 1; + + if (vallen != sizeof(obd_id)) + RETURN(-EINVAL); + + /* avoid race between allocate new object and set next id + * from ll_sync thread */ + cfs_spin_lock(&oscc->oscc_lock); + new_val = *((obd_id*)val) + 1; + if (new_val > oscc->oscc_next_id) + oscc->oscc_next_id = new_val; + cfs_spin_unlock(&oscc->oscc_lock); CDEBUG(D_HA, "%s: set oscc_next_id = "LPU64"\n", exp->exp_obd->obd_name, obd->u.cli.cl_oscc.oscc_next_id); @@ -3572,26 +4022,6 @@ static int osc_set_info_async(struct obd_export *exp, obd_count keylen, RETURN(0); } - if (KEY_IS(KEY_UNLINKED)) { - struct osc_creator *oscc = &obd->u.cli.cl_oscc; - spin_lock(&oscc->oscc_lock); - oscc->oscc_flags &= ~OSCC_FLAG_NOSPC; - spin_unlock(&oscc->oscc_lock); - RETURN(0); - } - - if (KEY_IS(KEY_INIT_RECOV)) { - if (vallen != sizeof(int)) - RETURN(-EINVAL); - spin_lock(&imp->imp_lock); - imp->imp_initial_recov = *(int *)val; - spin_unlock(&imp->imp_lock); - CDEBUG(D_HA, "%s: set imp_initial_recov = %d\n", - exp->exp_obd->obd_name, - imp->imp_initial_recov); - RETURN(0); - } - if (KEY_IS(KEY_CHECKSUM)) { if (vallen != sizeof(int)) RETURN(-EINVAL); @@ -3609,7 +4039,7 @@ static int osc_set_info_async(struct obd_export *exp, obd_count keylen, RETURN(0); } - if (!set) + if (!set && !KEY_IS(KEY_GRANT_SHRINK)) RETURN(-EINVAL); /* We pass all other commands directly to OST. Since nobody calls osc @@ -3619,8 +4049,11 @@ static int osc_set_info_async(struct obd_export *exp, obd_count keylen, Even if something bad goes through, we'd get a -EINVAL from OST anyway. */ + if (KEY_IS(KEY_GRANT_SHRINK)) + req = ptlrpc_request_alloc(imp, &RQF_OST_SET_GRANT_INFO); + else + req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO); - req = ptlrpc_request_alloc(imp, &RQF_OST_SET_INFO); if (req == NULL) RETURN(-ENOMEM); @@ -3642,15 +4075,34 @@ static int osc_set_info_async(struct obd_export *exp, obd_count keylen, if (KEY_IS(KEY_MDS_CONN)) { struct osc_creator *oscc = &obd->u.cli.cl_oscc; - oscc->oscc_oa.o_gr = (*(__u32 *)val); + oscc->oscc_oa.o_seq = (*(__u32 *)val); oscc->oscc_oa.o_valid |= OBD_MD_FLGROUP; - LASSERT_MDS_GROUP(oscc->oscc_oa.o_gr); + LASSERT_SEQ_IS_MDT(oscc->oscc_oa.o_seq); + req->rq_no_delay = req->rq_no_resend = 1; req->rq_interpret_reply = osc_setinfo_mds_conn_interpret; + } else if (KEY_IS(KEY_GRANT_SHRINK)) { + struct osc_grant_args *aa; + struct obdo *oa; + + CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); + aa = ptlrpc_req_async_args(req); + OBDO_ALLOC(oa); + if (!oa) { + ptlrpc_req_finished(req); + RETURN(-ENOMEM); + } + *oa = ((struct ost_body *)val)->oa; + aa->aa_oa = oa; + req->rq_interpret_reply = osc_shrink_grant_interpret; } ptlrpc_request_set_replen(req); - ptlrpc_set_add_req(set, req); - ptlrpc_check_set(NULL, set); + if (!KEY_IS(KEY_GRANT_SHRINK)) { + LASSERT(set != NULL); + ptlrpc_set_add_req(set, req); + ptlrpc_check_set(NULL, set); + } else + ptlrpcd_add_req(req, PSCOPE_OTHER); RETURN(0); } @@ -3661,32 +4113,21 @@ static struct llog_operations osc_size_repl_logops = { }; static struct llog_operations osc_mds_ost_orig_logops; -static int osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg, - struct obd_device *tgt, int count, - struct llog_catid *catid, struct obd_uuid *uuid) + +static int __osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg, + struct obd_device *tgt, struct llog_catid *catid) { int rc; ENTRY; - LASSERT(olg == &obd->obd_olg); - spin_lock(&obd->obd_dev_lock); - if (osc_mds_ost_orig_logops.lop_setup != llog_obd_origin_setup) { - osc_mds_ost_orig_logops = llog_lvfs_ops; - osc_mds_ost_orig_logops.lop_setup = llog_obd_origin_setup; - osc_mds_ost_orig_logops.lop_cleanup = llog_obd_origin_cleanup; - osc_mds_ost_orig_logops.lop_add = llog_obd_origin_add; - osc_mds_ost_orig_logops.lop_connect = llog_origin_connect; - } - spin_unlock(&obd->obd_dev_lock); - - rc = llog_setup(obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, tgt, count, + rc = llog_setup(obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, tgt, 1, &catid->lci_logid, &osc_mds_ost_orig_logops); if (rc) { CERROR("failed LLOG_MDS_OST_ORIG_CTXT\n"); - GOTO (out, rc); + GOTO(out, rc); } - rc = llog_setup(obd, &obd->obd_olg, LLOG_SIZE_REPL_CTXT, tgt, count, + rc = llog_setup(obd, &obd->obd_olg, LLOG_SIZE_REPL_CTXT, tgt, 1, NULL, &osc_size_repl_logops); if (rc) { struct llog_ctxt *ctxt = @@ -3698,14 +4139,53 @@ static int osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg, GOTO(out, rc); out: if (rc) { - CERROR("osc '%s' tgt '%s' cnt %d catid %p rc=%d\n", - obd->obd_name, tgt->obd_name, count, catid, rc); + CERROR("osc '%s' tgt '%s' catid %p rc=%d\n", + obd->obd_name, tgt->obd_name, catid, rc); CERROR("logid "LPX64":0x%x\n", catid->lci_logid.lgl_oid, catid->lci_logid.lgl_ogen); } return rc; } +static int osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg, + struct obd_device *disk_obd, int *index) +{ + struct llog_catid catid; + static char name[32] = CATLIST; + int rc; + ENTRY; + + LASSERT(olg == &obd->obd_olg); + + cfs_mutex_down(&olg->olg_cat_processing); + rc = llog_get_cat_list(disk_obd, name, *index, 1, &catid); + if (rc) { + CERROR("rc: %d\n", rc); + GOTO(out, rc); + } + + CDEBUG(D_INFO, "%s: Init llog for %d - catid "LPX64"/"LPX64":%x\n", + obd->obd_name, *index, catid.lci_logid.lgl_oid, + catid.lci_logid.lgl_oseq, catid.lci_logid.lgl_ogen); + + rc = __osc_llog_init(obd, olg, disk_obd, &catid); + if (rc) { + CERROR("rc: %d\n", rc); + GOTO(out, rc); + } + + rc = llog_put_cat_list(disk_obd, name, *index, 1, &catid); + if (rc) { + CERROR("rc: %d\n", rc); + GOTO(out, rc); + } + + out: + cfs_mutex_up(&olg->olg_cat_processing); + + return rc; +} + static int osc_llog_finish(struct obd_device *obd, int count) { struct llog_ctxt *ctxt; @@ -3737,15 +4217,15 @@ static int osc_reconnect(const struct lu_env *env, long lost_grant; client_obd_list_lock(&cli->cl_loi_list_lock); - data->ocd_grant = cli->cl_avail_grant ?: + data->ocd_grant = (cli->cl_avail_grant + cli->cl_dirty) ?: 2 * cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT; lost_grant = cli->cl_lost_grant; cli->cl_lost_grant = 0; client_obd_list_unlock(&cli->cl_loi_list_lock); CDEBUG(D_CACHE, "request ocd_grant: %d cl_avail_grant: %ld " - "cl_lost_grant: %ld\n", data->ocd_grant, - cli->cl_avail_grant, lost_grant); + "cl_dirty: %ld cl_lost_grant: %ld\n", data->ocd_grant, + cli->cl_avail_grant, cli->cl_dirty, lost_grant); CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d" " ocd_grant: %d\n", data->ocd_connect_flags, data->ocd_version, data->ocd_grant); @@ -3763,17 +4243,36 @@ static int osc_disconnect(struct obd_export *exp) ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT); if (ctxt) { if (obd->u.cli.cl_conn_count == 1) { - /* Flush any remaining cancel messages out to the + /* Flush any remaining cancel messages out to the * target */ llog_sync(ctxt, exp); } llog_ctxt_put(ctxt); } else { - CDEBUG(D_HA, "No LLOG_SIZE_REPL_CTXT found in obd %p\n", + CDEBUG(D_HA, "No LLOG_SIZE_REPL_CTXT found in obd %p\n", obd); } rc = client_disconnect_export(exp); + /** + * Initially we put del_shrink_grant before disconnect_export, but it + * causes the following problem if setup (connect) and cleanup + * (disconnect) are tangled together. + * connect p1 disconnect p2 + * ptlrpc_connect_import + * ............... class_manual_cleanup + * osc_disconnect + * del_shrink_grant + * ptlrpc_connect_interrupt + * init_grant_shrink + * add this client to shrink list + * cleanup_osc + * Bang! pinger trigger the shrink. + * So the osc should be disconnected from the shrink list, after we + * are sure the import has been destroyed. BUG18662 + */ + if (obd->u.cli.cl_import == NULL) + osc_del_shrink_grant(&obd->u.cli); return rc; } @@ -3793,9 +4292,9 @@ static int osc_import_event(struct obd_device *obd, if (imp->imp_server_timeout) { struct osc_creator *oscc = &obd->u.cli.cl_oscc; - spin_lock(&oscc->oscc_lock); + cfs_spin_lock(&oscc->oscc_lock); oscc->oscc_flags |= OSCC_FLAG_RECOVERING; - spin_unlock(&oscc->oscc_lock); + cfs_spin_unlock(&oscc->oscc_lock); } cli = &obd->u.cli; client_obd_list_lock(&cli->cl_loi_list_lock); @@ -3834,9 +4333,9 @@ static int osc_import_event(struct obd_device *obd, if (imp->imp_server_timeout) { struct osc_creator *oscc = &obd->u.cli.cl_oscc; - spin_lock(&oscc->oscc_lock); + cfs_spin_lock(&oscc->oscc_lock); oscc->oscc_flags &= ~OSCC_FLAG_NOSPC; - spin_unlock(&oscc->oscc_lock); + cfs_spin_unlock(&oscc->oscc_lock); } rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL); break; @@ -3878,6 +4377,7 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg) struct lprocfs_static_vars lvars = { 0 }; struct client_obd *cli = &obd->u.cli; + cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL; lprocfs_osc_init_vars(&lvars); if (lprocfs_obd_setup(obd, lvars.obd_vars) == 0) { lproc_osc_attach_seqstat(obd); @@ -3895,6 +4395,9 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg) ptlrpc_init_rq_pool(cli->cl_max_rpcs_in_flight + 2, OST_MAXREQSIZE, ptlrpc_add_rqs_to_pool); + + CFS_INIT_LIST_HEAD(&cli->cl_grant_shrink_list); + cfs_sema_init(&cli->cl_grant_sem, 1); } RETURN(rc); @@ -3912,9 +4415,9 @@ static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name); /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */ ptlrpc_deactivate_import(imp); - spin_lock(&imp->imp_lock); + cfs_spin_lock(&imp->imp_lock); imp->imp_pingable = 0; - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); break; } case OBD_CLEANUP_EXPORTS: { @@ -3922,7 +4425,7 @@ static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) client import will not have been cleaned. */ if (obd->u.cli.cl_import) { struct obd_import *imp; - down_write(&obd->u.cli.cl_sem); + cfs_down_write(&obd->u.cli.cl_sem); imp = obd->u.cli.cl_import; CDEBUG(D_CONFIG, "%s: client import never connected\n", obd->obd_name); @@ -3932,32 +4435,26 @@ static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) imp->imp_rq_pool = NULL; } class_destroy_import(imp); - up_write(&obd->u.cli.cl_sem); + cfs_up_write(&obd->u.cli.cl_sem); obd->u.cli.cl_import = NULL; } rc = obd_llog_finish(obd, 0); if (rc != 0) CERROR("failed to cleanup llogging subsystems\n"); break; - } + } } RETURN(rc); } int osc_cleanup(struct obd_device *obd) { - struct osc_creator *oscc = &obd->u.cli.cl_oscc; int rc; ENTRY; ptlrpc_lprocfs_unregister_obd(obd); lprocfs_obd_cleanup(obd); - spin_lock(&oscc->oscc_lock); - oscc->oscc_flags &= ~OSCC_FLAG_RECOVERING; - oscc->oscc_flags |= OSCC_FLAG_EXITING; - spin_unlock(&oscc->oscc_lock); - /* free memory of osc quota cache */ lquota_cleanup(quota_interface, obd); @@ -3978,8 +4475,8 @@ int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg) default: rc = class_process_proc_param(PARAM_OSC, lvars.obd_vars, lcfg, obd); - if (rc > 0) - rc = 0; + if (rc > 0) + rc = 0; break; } @@ -4007,6 +4504,7 @@ struct obd_ops osc_obd_ops = { .o_unpackmd = osc_unpackmd, .o_precreate = osc_precreate, .o_create = osc_create, + .o_create_async = osc_create_async, .o_destroy = osc_destroy, .o_getattr = osc_getattr, .o_getattr_async = osc_getattr_async, @@ -4017,6 +4515,7 @@ struct obd_ops osc_obd_ops = { .o_sync = osc_sync, .o_enqueue = osc_enqueue, .o_change_cbdata = osc_change_cbdata, + .o_find_cbdata = osc_find_cbdata, .o_cancel = osc_cancel, .o_cancel_unused = osc_cancel_unused, .o_iocontrol = osc_iocontrol, @@ -4028,9 +4527,9 @@ struct obd_ops osc_obd_ops = { .o_process_config = osc_process_config, }; -extern struct lu_kmem_descr osc_caches[]; -extern spinlock_t osc_ast_guard; -extern struct lock_class_key osc_ast_guard_class; +extern struct lu_kmem_descr osc_caches[]; +extern cfs_spinlock_t osc_ast_guard; +extern cfs_lock_class_key_t osc_ast_guard_class; int __init osc_init(void) { @@ -4047,7 +4546,7 @@ int __init osc_init(void) lprocfs_osc_init_vars(&lvars); - request_module("lquota"); + cfs_request_module("lquota"); quota_interface = PORTAL_SYMBOL_GET(osc_quota_interface); lquota_init(quota_interface); init_obd_quota_ops(quota_interface, &osc_obd_ops); @@ -4061,8 +4560,14 @@ int __init osc_init(void) RETURN(rc); } - spin_lock_init(&osc_ast_guard); - lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class); + cfs_spin_lock_init(&osc_ast_guard); + cfs_lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class); + + osc_mds_ost_orig_logops = llog_lvfs_ops; + osc_mds_ost_orig_logops.lop_setup = llog_obd_origin_setup; + osc_mds_ost_orig_logops.lop_cleanup = llog_obd_origin_cleanup; + osc_mds_ost_orig_logops.lop_add = llog_obd_origin_add; + osc_mds_ost_orig_logops.lop_connect = llog_origin_connect; RETURN(rc); }