X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fosc%2Fosc_request.c;h=cd7fac640428ab0d4a1cba2c43325f35c2158b1d;hp=d8d5be7c78f3c1d866c691df0d75f78540b8455f;hb=42c04e8ee918adb6ce658334c12610e925466752;hpb=c9842fdc5244e38593f0b12468e87f23853dba9f diff --git a/lustre/osc/osc_request.c b/lustre/osc/osc_request.c index d8d5be7..cd7fac6 100644 --- a/lustre/osc/osc_request.c +++ b/lustre/osc/osc_request.c @@ -1,32 +1,37 @@ /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * - * Copyright (C) 2001-2003 Cluster File Systems, Inc. - * Author Peter Braam + * GPL HEADER START * - * This file is part of the Lustre file system, http://www.lustre.org - * Lustre is a trademark of Cluster File Systems, Inc. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * You may have signed or agreed to another license before downloading - * this software. If so, you are bound by the terms and conditions - * of that agreement, and the following does not apply to you. See the - * LICENSE file included with this distribution for more information. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. * - * If you did not agree to a different license, then this copy of Lustre - * is open source software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). * - * In either case, Lustre is distributed in the hope that it will be - * useful, but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * license text for more details. + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * - * For testing and management it is treated as an obd_device, - * although * it does not export a full OBD method table (the - * requests are coming * in over the wire, so object target modules - * do not have a full * method table.) + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. * + * GPL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Use is subject to license terms. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. */ #ifndef EXPORT_SYMTAB @@ -34,16 +39,16 @@ #endif #define DEBUG_SUBSYSTEM S_OSC -#ifdef __KERNEL__ -# include -#else /* __KERNEL__ */ +#include + +#ifndef __KERNEL__ # include #endif #include -#include #include #include +#include #include #include @@ -62,6 +67,9 @@ static quota_interface_t *quota_interface = NULL; extern quota_interface_t osc_quota_interface; static void osc_release_ppga(struct brw_page **ppga, obd_count count); +static int brw_interpret(const struct lu_env *env, + struct ptlrpc_request *req, void *data, int rc); +int osc_cleanup(struct obd_device *obd); /* Pack OSC object metadata for disk storage (LE byte order). */ static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, @@ -88,7 +96,7 @@ static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, if (lsm) { LASSERT(lsm->lsm_object_id); - LASSERT(lsm->lsm_object_gr); + LASSERT_MDS_GROUP(lsm->lsm_object_gr); (*lmmp)->lmm_object_id = cpu_to_le64(lsm->lsm_object_id); (*lmmp)->lmm_object_gr = cpu_to_le64(lsm->lsm_object_gr); } @@ -145,7 +153,7 @@ static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, (*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id); (*lsmp)->lsm_object_gr = le64_to_cpu (lmm->lmm_object_gr); LASSERT((*lsmp)->lsm_object_id); - LASSERT((*lsmp)->lsm_object_gr); + LASSERT_MDS_GROUP((*lsmp)->lsm_object_gr); } (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES; @@ -177,7 +185,7 @@ static inline void osc_pack_req_body(struct ptlrpc_request *req, body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); - body->oa = *oinfo->oi_oa; + lustre_set_wire_obdo(&body->oa, oinfo->oi_oa); osc_pack_capa(req, body, oinfo->oi_capa); } @@ -192,7 +200,8 @@ static inline void osc_set_capa_size(struct ptlrpc_request *req, ; } -static int osc_getattr_interpret(struct ptlrpc_request *req, +static int osc_getattr_interpret(const struct lu_env *env, + struct ptlrpc_request *req, struct osc_async_args *aa, int rc) { struct ost_body *body; @@ -201,11 +210,10 @@ static int osc_getattr_interpret(struct ptlrpc_request *req, if (rc != 0) GOTO(out, rc); - body = lustre_swab_repbuf(req, REPLY_REC_OFF, sizeof(*body), - lustre_swab_ost_body); + body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); if (body) { CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode); - memcpy(aa->aa_oi->oi_oa, &body->oa, sizeof(*aa->aa_oi->oi_oa)); + lustre_get_wire_obdo(aa->aa_oi->oi_oa, &body->oa); /* This should really be sent by the OST */ aa->aa_oi->oi_oa->o_blksize = PTLRPC_MAX_BRW_SIZE; @@ -242,10 +250,10 @@ static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo, osc_pack_req_body(req, oinfo); ptlrpc_request_set_replen(req); - req->rq_interpret_reply = osc_getattr_interpret; + req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret; CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); - aa = (struct osc_async_args *)&req->rq_async_args; + aa = ptlrpc_req_async_args(req); aa->aa_oi = oinfo; ptlrpc_set_add_req(set, req); @@ -273,7 +281,7 @@ static int osc_getattr(struct obd_export *exp, struct obd_info *oinfo) osc_pack_req_body(req, oinfo); ptlrpc_request_set_replen(req); - + rc = ptlrpc_queue_wait(req); if (rc) GOTO(out, rc); @@ -283,7 +291,7 @@ static int osc_getattr(struct obd_export *exp, struct obd_info *oinfo) GOTO(out, rc = -EPROTO); CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode); - *oinfo->oi_oa = body->oa; + lustre_get_wire_obdo(oinfo->oi_oa, &body->oa); /* This should really be sent by the OST */ oinfo->oi_oa->o_blksize = PTLRPC_MAX_BRW_SIZE; @@ -303,8 +311,7 @@ static int osc_setattr(struct obd_export *exp, struct obd_info *oinfo, int rc; ENTRY; - LASSERT(!(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP) || - oinfo->oi_oa->o_gr > 0); + LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR); if (req == NULL) @@ -320,7 +327,6 @@ static int osc_setattr(struct obd_export *exp, struct obd_info *oinfo, osc_pack_req_body(req, oinfo); ptlrpc_request_set_replen(req); - rc = ptlrpc_queue_wait(req); if (rc) @@ -330,7 +336,7 @@ static int osc_setattr(struct obd_export *exp, struct obd_info *oinfo, if (body == NULL) GOTO(out, rc = -EPROTO); - *oinfo->oi_oa = body->oa; + lustre_get_wire_obdo(oinfo->oi_oa, &body->oa); EXIT; out: @@ -338,7 +344,8 @@ out: RETURN(rc); } -static int osc_setattr_interpret(struct ptlrpc_request *req, +static int osc_setattr_interpret(const struct lu_env *env, + struct ptlrpc_request *req, struct osc_async_args *aa, int rc) { struct ost_body *body; @@ -351,7 +358,7 @@ static int osc_setattr_interpret(struct ptlrpc_request *req, if (body == NULL) GOTO(out, rc = -EPROTO); - *aa->aa_oi->oi_oa = body->oa; + lustre_get_wire_obdo(aa->aa_oi->oi_oa, &body->oa); out: rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc); RETURN(rc); @@ -377,24 +384,23 @@ static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo, RETURN(rc); } + if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) + oinfo->oi_oa->o_lcookie = *oti->oti_logcookies; + osc_pack_req_body(req, oinfo); ptlrpc_request_set_replen(req); - - if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) { - LASSERT(oti); - *obdo_logcookie(oinfo->oi_oa) = *oti->oti_logcookies; - } - /* do mds to ost setattr asynchronouly */ + /* do mds to ost setattr asynchronously */ if (!rqset) { /* Do not wait for response. */ - ptlrpcd_add_req(req); + ptlrpcd_add_req(req, PSCOPE_OTHER); } else { - req->rq_interpret_reply = osc_setattr_interpret; + req->rq_interpret_reply = + (ptlrpc_interpterer_t)osc_setattr_interpret; CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args)); - aa = (struct osc_async_args *)&req->rq_async_args; + aa = ptlrpc_req_async_args(req); aa->aa_oi = oinfo; ptlrpc_set_add_req(rqset, req); @@ -434,13 +440,12 @@ int osc_real_create(struct obd_export *exp, struct obdo *oa, body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); - body->oa = *oa; + lustre_set_wire_obdo(&body->oa, oa); ptlrpc_request_set_replen(req); - if (oa->o_valid & OBD_MD_FLINLINE) { - LASSERT((oa->o_valid & OBD_MD_FLFLAGS) && - oa->o_flags == OBD_FL_DELORPHAN); + if ((oa->o_valid & OBD_MD_FLFLAGS) && + oa->o_flags == OBD_FL_DELORPHAN) { DEBUG_REQ(D_HA, req, "delorphan from OST integration"); /* Don't resend the delorphan req */ @@ -455,7 +460,7 @@ int osc_real_create(struct obd_export *exp, struct obdo *oa, if (body == NULL) GOTO(out_req, rc = -EPROTO); - *oa = body->oa; + lustre_get_wire_obdo(oa, &body->oa); /* This should really be sent by the OST */ oa->o_blksize = PTLRPC_MAX_BRW_SIZE; @@ -475,7 +480,7 @@ int osc_real_create(struct obd_export *exp, struct obdo *oa, if (oa->o_valid & OBD_MD_FLCOOKIE) { if (!oti->oti_logcookies) oti_alloc_cookies(oti, 1); - *oti->oti_logcookies = *obdo_logcookie(oa); + *oti->oti_logcookies = oa->o_lcookie; } } @@ -489,8 +494,9 @@ out: RETURN(rc); } -static int osc_punch_interpret(struct ptlrpc_request *req, - struct osc_async_args *aa, int rc) +static int osc_punch_interpret(const struct lu_env *env, + struct ptlrpc_request *req, + struct osc_punch_args *aa, int rc) { struct ost_body *body; ENTRY; @@ -502,58 +508,69 @@ static int osc_punch_interpret(struct ptlrpc_request *req, if (body == NULL) GOTO(out, rc = -EPROTO); - *aa->aa_oi->oi_oa = body->oa; + lustre_get_wire_obdo(aa->pa_oa, &body->oa); out: - rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc); + rc = aa->pa_upcall(aa->pa_cookie, rc); RETURN(rc); } -static int osc_punch(struct obd_export *exp, struct obd_info *oinfo, - struct obd_trans_info *oti, - struct ptlrpc_request_set *rqset) +int osc_punch_base(struct obd_export *exp, struct obdo *oa, + struct obd_capa *capa, + obd_enqueue_update_f upcall, void *cookie, + struct ptlrpc_request_set *rqset) { struct ptlrpc_request *req; - struct osc_async_args *aa; + struct osc_punch_args *aa; struct ost_body *body; int rc; ENTRY; - if (!oinfo->oi_oa) { - CDEBUG(D_INFO, "oa NULL\n"); - RETURN(-EINVAL); - } - req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH); if (req == NULL) RETURN(-ENOMEM); - osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa); + osc_set_capa_size(req, &RMF_CAPA1, capa); rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH); if (rc) { ptlrpc_request_free(req); RETURN(rc); } req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */ - osc_pack_req_body(req, oinfo); + ptlrpc_at_set_req_timeout(req); - /* overload the size and blocks fields in the oa with start/end */ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); - body->oa.o_size = oinfo->oi_policy.l_extent.start; - body->oa.o_blocks = oinfo->oi_policy.l_extent.end; - body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS); + lustre_set_wire_obdo(&body->oa, oa); + osc_pack_capa(req, body, capa); + ptlrpc_request_set_replen(req); - req->rq_interpret_reply = osc_punch_interpret; + req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_punch_interpret; CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args)); - aa = (struct osc_async_args *)&req->rq_async_args; - aa->aa_oi = oinfo; - ptlrpc_set_add_req(rqset, req); + aa = ptlrpc_req_async_args(req); + aa->pa_oa = oa; + aa->pa_upcall = upcall; + aa->pa_cookie = cookie; + if (rqset == PTLRPCD_SET) + ptlrpcd_add_req(req, PSCOPE_OTHER); + else + ptlrpc_set_add_req(rqset, req); RETURN(0); } +static int osc_punch(struct obd_export *exp, struct obd_info *oinfo, + struct obd_trans_info *oti, + struct ptlrpc_request_set *rqset) +{ + oinfo->oi_oa->o_size = oinfo->oi_policy.l_extent.start; + oinfo->oi_oa->o_blocks = oinfo->oi_policy.l_extent.end; + oinfo->oi_oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS; + return osc_punch_base(exp, oinfo->oi_oa, oinfo->oi_capa, + oinfo->oi_cb_up, oinfo, rqset); +} + static int osc_sync(struct obd_export *exp, struct obdo *oa, struct lov_stripe_md *md, obd_size start, obd_size end, void *capa) @@ -582,7 +599,7 @@ static int osc_sync(struct obd_export *exp, struct obdo *oa, /* overload the size and blocks fields in the oa with start/end */ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); - body->oa = *oa; + lustre_set_wire_obdo(&body->oa, oa); body->oa.o_size = start; body->oa.o_blocks = end; body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS); @@ -598,7 +615,7 @@ static int osc_sync(struct obd_export *exp, struct obdo *oa, if (body == NULL) GOTO(out, rc = -EPROTO); - *oa = body->oa; + lustre_get_wire_obdo(oa, &body->oa); EXIT; out: @@ -614,21 +631,26 @@ static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa, int lock_flags) { struct ldlm_namespace *ns = exp->exp_obd->obd_namespace; - struct ldlm_res_id res_id = { .name = { oa->o_id, 0, oa->o_gr, 0 } }; - struct ldlm_resource *res = ldlm_resource_get(ns, NULL, &res_id, 0, 0); + struct ldlm_res_id res_id; + struct ldlm_resource *res; int count; ENTRY; + osc_build_res_name(oa->o_id, oa->o_gr, &res_id); + res = ldlm_resource_get(ns, NULL, &res_id, 0, 0); if (res == NULL) RETURN(0); + LDLM_RESOURCE_ADDREF(res); count = ldlm_cancel_resource_local(res, cancels, NULL, mode, lock_flags, 0, NULL); + LDLM_RESOURCE_DELREF(res); ldlm_resource_putref(res); RETURN(count); } -static int osc_destroy_interpret(struct ptlrpc_request *req, void *data, +static int osc_destroy_interpret(const struct lu_env *env, + struct ptlrpc_request *req, void *data, int rc) { struct client_obd *cli = &req->rq_import->imp_obd->u.cli; @@ -668,7 +690,7 @@ static int osc_can_send_destroy(struct client_obd *cli) * cookies to the MDS after committing destroy transactions. */ static int osc_destroy(struct obd_export *exp, struct obdo *oa, struct lov_stripe_md *ea, struct obd_trans_info *oti, - struct obd_export *md_export) + struct obd_export *md_export, void *capa) { struct client_obd *cli = &exp->exp_obd->u.cli; struct ptlrpc_request *req; @@ -691,7 +713,8 @@ static int osc_destroy(struct obd_export *exp, struct obdo *oa, RETURN(-ENOMEM); } - rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY, + osc_set_capa_size(req, &RMF_CAPA1, (struct obd_capa *)capa); + rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY, 0, &cancels, count); if (rc) { ptlrpc_request_free(req); @@ -699,30 +722,35 @@ static int osc_destroy(struct obd_export *exp, struct obdo *oa, } req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */ - req->rq_interpret_reply = osc_destroy_interpret; + ptlrpc_at_set_req_timeout(req); if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE) - memcpy(obdo_logcookie(oa), oti->oti_logcookies, - sizeof(*oti->oti_logcookies)); + oa->o_lcookie = *oti->oti_logcookies; body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); - body->oa = *oa; + lustre_set_wire_obdo(&body->oa, oa); + osc_pack_capa(req, body, (struct obd_capa *)capa); ptlrpc_request_set_replen(req); - if (!osc_can_send_destroy(cli)) { - struct l_wait_info lwi = { 0 }; - - /* - * Wait until the number of on-going destroy RPCs drops - * under max_rpc_in_flight - */ - l_wait_event_exclusive(cli->cl_destroy_waitq, - osc_can_send_destroy(cli), &lwi); + /* don't throttle destroy RPCs for the MDT */ + if (!(cli->cl_import->imp_connect_flags_orig & OBD_CONNECT_MDS)) { + req->rq_interpret_reply = osc_destroy_interpret; + if (!osc_can_send_destroy(cli)) { + struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, + NULL); + + /* + * Wait until the number of on-going destroy RPCs drops + * under max_rpc_in_flight + */ + l_wait_event_exclusive(cli->cl_destroy_waitq, + osc_can_send_destroy(cli), &lwi); + } } /* Do not wait for response */ - ptlrpcd_add_req(req); + ptlrpcd_add_req(req, PSCOPE_OTHER); RETURN(0); } @@ -736,13 +764,19 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa, oa->o_valid |= bits; client_obd_list_lock(&cli->cl_loi_list_lock); oa->o_dirty = cli->cl_dirty; - if (cli->cl_dirty > cli->cl_dirty_max) { - CERROR("dirty %lu > dirty_max %lu\n", - cli->cl_dirty, cli->cl_dirty_max); + if (cli->cl_dirty - cli->cl_dirty_transit > cli->cl_dirty_max) { + CERROR("dirty %lu - %lu > dirty_max %lu\n", + cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max); oa->o_undirty = 0; - } else if (atomic_read(&obd_dirty_pages) > obd_max_dirty_pages) { - CERROR("dirty %d > system dirty_max %d\n", - atomic_read(&obd_dirty_pages), obd_max_dirty_pages); + } else if (atomic_read(&obd_dirty_pages) - + atomic_read(&obd_dirty_transit_pages) > obd_max_dirty_pages + 1){ + /* The atomic_read() allowing the atomic_inc() are not covered + * by a lock thus they may safely race and trip this CERROR() + * unless we add in a small fudge factor (+1). */ + CERROR("dirty %d - %d > system dirty_max %d\n", + atomic_read(&obd_dirty_pages), + atomic_read(&obd_dirty_transit_pages), + obd_max_dirty_pages); oa->o_undirty = 0; } else if (cli->cl_dirty_max - cli->cl_dirty > 0x7fffffff) { CERROR("dirty %lu - dirty_max %lu too big???\n", @@ -759,12 +793,23 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa, client_obd_list_unlock(&cli->cl_loi_list_lock); CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n", oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant); + +} + +static void osc_update_next_shrink(struct client_obd *cli) +{ + cli->cl_next_shrink_grant = + cfs_time_shift(cli->cl_grant_shrink_interval); + CDEBUG(D_CACHE, "next time %ld to shrink grant \n", + cli->cl_next_shrink_grant); } /* caller must hold loi_list_lock */ static void osc_consume_write_grant(struct client_obd *cli, struct brw_page *pga) { + LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock); + LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT)); atomic_inc(&obd_dirty_pages); cli->cl_dirty += CFS_PAGE_SIZE; cli->cl_avail_grant -= CFS_PAGE_SIZE; @@ -772,6 +817,7 @@ static void osc_consume_write_grant(struct client_obd *cli, CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n", CFS_PAGE_SIZE, pga, pga->pg); LASSERT(cli->cl_avail_grant >= 0); + osc_update_next_shrink(cli); } /* the companion to osc_consume_write_grant, called when a brw has completed. @@ -782,6 +828,7 @@ static void osc_release_write_grant(struct client_obd *cli, int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096; ENTRY; + LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock); if (!(pga->flag & OBD_BRW_FROM_GRANT)) { EXIT; return; @@ -790,6 +837,11 @@ static void osc_release_write_grant(struct client_obd *cli, pga->flag &= ~OBD_BRW_FROM_GRANT; atomic_dec(&obd_dirty_pages); cli->cl_dirty -= CFS_PAGE_SIZE; + if (pga->flag & OBD_BRW_NOCACHE) { + pga->flag &= ~OBD_BRW_NOCACHE; + atomic_dec(&obd_dirty_transit_pages); + cli->cl_dirty_transit -= CFS_PAGE_SIZE; + } if (!sent) { cli->cl_lost_grant += CFS_PAGE_SIZE; CDEBUG(D_CACHE, "lost grant: %lu avail grant: %lu dirty: %lu\n", @@ -860,25 +912,189 @@ void osc_wake_cache_waiters(struct client_obd *cli) EXIT; } -static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd) +static void __osc_update_grant(struct client_obd *cli, obd_size grant) { client_obd_list_lock(&cli->cl_loi_list_lock); - cli->cl_avail_grant = ocd->ocd_grant; + cli->cl_avail_grant += grant; client_obd_list_unlock(&cli->cl_loi_list_lock); - - CDEBUG(D_CACHE, "setting cl_avail_grant: %ld cl_lost_grant: %ld\n", - cli->cl_avail_grant, cli->cl_lost_grant); - LASSERT(cli->cl_avail_grant >= 0); } static void osc_update_grant(struct client_obd *cli, struct ost_body *body) { + if (body->oa.o_valid & OBD_MD_FLGRANT) { + CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant); + __osc_update_grant(cli, body->oa.o_grant); + } +} + +static int osc_set_info_async(struct obd_export *exp, obd_count keylen, + void *key, obd_count vallen, void *val, + struct ptlrpc_request_set *set); + +static int osc_shrink_grant_interpret(const struct lu_env *env, + struct ptlrpc_request *req, + void *aa, int rc) +{ + struct client_obd *cli = &req->rq_import->imp_obd->u.cli; + struct obdo *oa = ((struct osc_grant_args *)aa)->aa_oa; + struct ost_body *body; + + if (rc != 0) { + __osc_update_grant(cli, oa->o_grant); + GOTO(out, rc); + } + + body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); + LASSERT(body); + osc_update_grant(cli, body); +out: + OBD_FREE_PTR(oa); + return rc; +} + +static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa) +{ + client_obd_list_lock(&cli->cl_loi_list_lock); + oa->o_grant = cli->cl_avail_grant / 4; + cli->cl_avail_grant -= oa->o_grant; + client_obd_list_unlock(&cli->cl_loi_list_lock); + oa->o_flags |= OBD_FL_SHRINK_GRANT; + osc_update_next_shrink(cli); +} + +/* Shrink the current grant, either from some large amount to enough for a + * full set of in-flight RPCs, or if we have already shrunk to that limit + * then to enough for a single RPC. This avoids keeping more grant than + * needed, and avoids shrinking the grant piecemeal. */ +static int osc_shrink_grant(struct client_obd *cli) +{ + long target = (cli->cl_max_rpcs_in_flight + 1) * + cli->cl_max_pages_per_rpc; + + client_obd_list_lock(&cli->cl_loi_list_lock); + if (cli->cl_avail_grant <= target) + target = cli->cl_max_pages_per_rpc; + client_obd_list_unlock(&cli->cl_loi_list_lock); + + return osc_shrink_grant_to_target(cli, target); +} + +int osc_shrink_grant_to_target(struct client_obd *cli, long target) +{ + int rc = 0; + struct ost_body *body; + ENTRY; + + client_obd_list_lock(&cli->cl_loi_list_lock); + /* Don't shrink if we are already above or below the desired limit + * We don't want to shrink below a single RPC, as that will negatively + * impact block allocation and long-term performance. */ + if (target < cli->cl_max_pages_per_rpc) + target = cli->cl_max_pages_per_rpc; + + if (target >= cli->cl_avail_grant) { + client_obd_list_unlock(&cli->cl_loi_list_lock); + RETURN(0); + } + client_obd_list_unlock(&cli->cl_loi_list_lock); + + OBD_ALLOC_PTR(body); + if (!body) + RETURN(-ENOMEM); + + osc_announce_cached(cli, &body->oa, 0); + + client_obd_list_lock(&cli->cl_loi_list_lock); + body->oa.o_grant = cli->cl_avail_grant - target; + cli->cl_avail_grant = target; + client_obd_list_unlock(&cli->cl_loi_list_lock); + body->oa.o_flags |= OBD_FL_SHRINK_GRANT; + osc_update_next_shrink(cli); + + rc = osc_set_info_async(cli->cl_import->imp_obd->obd_self_export, + sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK, + sizeof(*body), body, NULL); + if (rc != 0) + __osc_update_grant(cli, body->oa.o_grant); + OBD_FREE_PTR(body); + RETURN(rc); +} + +#define GRANT_SHRINK_LIMIT PTLRPC_MAX_BRW_SIZE +static int osc_should_shrink_grant(struct client_obd *client) +{ + cfs_time_t time = cfs_time_current(); + cfs_time_t next_shrink = client->cl_next_shrink_grant; + if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) { + if (client->cl_import->imp_state == LUSTRE_IMP_FULL && + client->cl_avail_grant > GRANT_SHRINK_LIMIT) + return 1; + else + osc_update_next_shrink(client); + } + return 0; +} + +static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data) +{ + struct client_obd *client; + + list_for_each_entry(client, &item->ti_obd_list, cl_grant_shrink_list) { + if (osc_should_shrink_grant(client)) + osc_shrink_grant(client); + } + return 0; +} + +static int osc_add_shrink_grant(struct client_obd *client) +{ + int rc; + + rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval, + TIMEOUT_GRANT, + osc_grant_shrink_grant_cb, NULL, + &client->cl_grant_shrink_list); + if (rc) { + CERROR("add grant client %s error %d\n", + client->cl_import->imp_obd->obd_name, rc); + return rc; + } + CDEBUG(D_CACHE, "add grant client %s \n", + client->cl_import->imp_obd->obd_name); + osc_update_next_shrink(client); + return 0; +} + +static int osc_del_shrink_grant(struct client_obd *client) +{ + return ptlrpc_del_timeout_client(&client->cl_grant_shrink_list, + TIMEOUT_GRANT); +} + +static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd) +{ + /* + * ocd_grant is the total grant amount we're expect to hold: if we've + * been evicted, it's the new avail_grant amount, cl_dirty will drop + * to 0 as inflight RPCs fail out; otherwise, it's avail_grant + dirty. + * + * race is tolerable here: if we're evicted, but imp_state already + * left EVICTED state, then cl_dirty must be 0 already. + */ client_obd_list_lock(&cli->cl_loi_list_lock); - CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant); - if (body->oa.o_valid & OBD_MD_FLGRANT) - cli->cl_avail_grant += body->oa.o_grant; - /* waiters are woken in brw_interpret_oap */ + if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED) + cli->cl_avail_grant = ocd->ocd_grant; + else + cli->cl_avail_grant = ocd->ocd_grant - cli->cl_dirty; client_obd_list_unlock(&cli->cl_loi_list_lock); + + CDEBUG(D_CACHE, "setting cl_avail_grant: %ld cl_lost_grant: %ld \n", + cli->cl_avail_grant, cli->cl_lost_grant); + LASSERT(cli->cl_avail_grant >= 0); + + if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK && + list_empty(&cli->cl_grant_shrink_list)) + osc_add_shrink_grant(cli); } /* We assume that the reason this OSC got a short read is because it read @@ -924,19 +1140,18 @@ static int check_write_rcs(struct ptlrpc_request *req, int requested_nob, int niocount, obd_count page_count, struct brw_page **pga) { - int *remote_rcs, i; + int i; + __u32 *remote_rcs; - /* return error if any niobuf was in error */ - remote_rcs = lustre_swab_repbuf(req, REQ_REC_OFF + 1, - sizeof(*remote_rcs) * niocount, NULL); + remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS, + sizeof(*remote_rcs) * + niocount); if (remote_rcs == NULL) { CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n"); return(-EPROTO); } - if (lustre_msg_swabbed(req->rq_repmsg)) - for (i = 0; i < niocount; i++) - __swab32s(&remote_rcs[i]); + /* return error if any niobuf was in error */ for (i = 0; i < niocount; i++) { if (remote_rcs[i] < 0) return(remote_rcs[i]); @@ -950,7 +1165,7 @@ static int check_write_rcs(struct ptlrpc_request *req, if (req->rq_bulk->bd_nob_transferred != requested_nob) { CERROR("Unexpected # bytes transferred: %d (requested %d)\n", - requested_nob, req->rq_bulk->bd_nob_transferred); + req->rq_bulk->bd_nob_transferred, requested_nob); return(-EPROTO); } @@ -960,7 +1175,8 @@ static int check_write_rcs(struct ptlrpc_request *req, static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2) { if (p1->flag != p2->flag) { - unsigned mask = ~OBD_BRW_FROM_GRANT; + unsigned mask = ~(OBD_BRW_FROM_GRANT| + OBD_BRW_NOCACHE|OBD_BRW_SYNC); /* warn if we try to combine flags that we don't know to be * safe to combine */ @@ -974,12 +1190,14 @@ static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2) } static obd_count osc_checksum_bulk(int nob, obd_count pg_count, - struct brw_page **pga, int opc) + struct brw_page **pga, int opc, + cksum_type_t cksum_type) { - __u32 cksum = ~0; + __u32 cksum; int i = 0; LASSERT (pg_count > 0); + cksum = init_checksum(cksum_type); while (nob > 0 && pg_count > 0) { unsigned char *ptr = cfs_kmap(pga[i]->pg); int off = pga[i]->off & ~CFS_PAGE_MASK; @@ -990,7 +1208,7 @@ static obd_count osc_checksum_bulk(int nob, obd_count pg_count, if (i == 0 && opc == OST_READ && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) memcpy(ptr + off, "bad1", min(4, nob)); - cksum = crc32_le(cksum, ptr + off, count); + cksum = compute_checksum(cksum, ptr + off, count, cksum_type); cfs_kunmap(pga[i]->pg); LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d checksum %x\n", off, cksum); @@ -1009,9 +1227,9 @@ static obd_count osc_checksum_bulk(int nob, obd_count pg_count, static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa, struct lov_stripe_md *lsm, obd_count page_count, - struct brw_page **pga, + struct brw_page **pga, struct ptlrpc_request **reqp, - struct obd_capa *ocapa) + struct obd_capa *ocapa, int reserve) { struct ptlrpc_request *req; struct ptlrpc_bulk_desc *desc; @@ -1021,6 +1239,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa, int niocount, i, requested_nob, opc, rc; struct osc_brw_async_args *aa; struct req_capsule *pill; + struct brw_page *pg_prev; ENTRY; if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ)) @@ -1030,14 +1249,13 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa, if ((cmd & OBD_BRW_WRITE) != 0) { opc = OST_WRITE; - req = ptlrpc_request_alloc_pool(cli->cl_import, + req = ptlrpc_request_alloc_pool(cli->cl_import, cli->cl_import->imp_rq_pool, &RQF_OST_BRW); } else { opc = OST_READ; req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW); } - if (req == NULL) RETURN(-ENOMEM); @@ -1047,6 +1265,8 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa, } pill = &req->rq_pill; + req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT, + sizeof(*ioobj)); req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT, niocount * sizeof(*niobuf)); osc_set_capa_size(req, &RMF_CAPA1, ocapa); @@ -1057,6 +1277,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa, RETURN(rc); } req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */ + ptlrpc_at_set_req_timeout(req); if (opc == OST_WRITE) desc = ptlrpc_prep_bulk_imp(req, page_count, @@ -1072,17 +1293,17 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa, body = req_capsule_client_get(pill, &RMF_OST_BODY); ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ); niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE); - LASSERT(body && ioobj && niobuf); + LASSERT(body != NULL && ioobj != NULL && niobuf != NULL); - body->oa = *oa; + lustre_set_wire_obdo(&body->oa, oa); obdo_to_ioobj(oa, ioobj); ioobj->ioo_bufcnt = niocount; osc_pack_capa(req, body, ocapa); LASSERT (page_count > 0); + pg_prev = pga[0]; for (requested_nob = i = 0; i < page_count; i++, niobuf++) { struct brw_page *pg = pga[i]; - struct brw_page *pg_prev = pga[i - 1]; LASSERT(pg->count > 0); LASSERTF((pg->off & ~CFS_PAGE_MASK) + pg->count <= CFS_PAGE_SIZE, @@ -1115,25 +1336,41 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa, niobuf->len = pg->count; niobuf->flags = pg->flag; } + pg_prev = pg; } - LASSERT((void *)(niobuf - niocount) == - lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF + 2, - niocount * sizeof(*niobuf))); + LASSERTF((void *)(niobuf - niocount) == + req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE), + "want %p - real %p\n", req_capsule_client_get(&req->rq_pill, + &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount)); + osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0); + if (osc_should_shrink_grant(cli)) + osc_shrink_grant_local(cli, &body->oa); /* size[REQ_REC_OFF] still sizeof (*body) */ if (opc == OST_WRITE) { if (unlikely(cli->cl_checksum) && - req->rq_flvr.sf_bulk_csum == BULK_CSUM_ALG_NULL) { - body->oa.o_valid |= OBD_MD_FLCKSUM; + !sptlrpc_flavor_has_bulk(&req->rq_flvr)) { + /* store cl_cksum_type in a local variable since + * it can be changed via lprocfs */ + cksum_type_t cksum_type = cli->cl_cksum_type; + + if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) { + oa->o_flags &= OBD_FL_LOCAL_MASK; + body->oa.o_flags = 0; + } + body->oa.o_flags |= cksum_type_pack(cksum_type); + body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS; body->oa.o_cksum = osc_checksum_bulk(requested_nob, page_count, pga, - OST_WRITE); + OST_WRITE, + cksum_type); CDEBUG(D_PAGE, "checksum at write origin: %x\n", body->oa.o_cksum); /* save this in 'oa', too, for later checking */ - oa->o_valid |= OBD_MD_FLCKSUM; + oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS; + oa->o_flags |= cksum_type_pack(cksum_type); } else { /* clear out the checksum flag, in case this is a * resend but cl_checksum is no longer set. b=11238 */ @@ -1141,19 +1378,23 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa, } oa->o_cksum = body->oa.o_cksum; /* 1 RC per niobuf */ - req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_SERVER, + req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER, sizeof(__u32) * niocount); } else { if (unlikely(cli->cl_checksum) && - req->rq_flvr.sf_bulk_csum == BULK_CSUM_ALG_NULL) - body->oa.o_valid |= OBD_MD_FLCKSUM; - req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_SERVER, 0); + !sptlrpc_flavor_has_bulk(&req->rq_flvr)) { + if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) + body->oa.o_flags = 0; + body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type); + body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS; + } + req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER, 0); /* 1 RC for the whole I/O */ } ptlrpc_request_set_replen(req); CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); - aa = (struct osc_brw_async_args *)&req->rq_async_args; + aa = ptlrpc_req_async_args(req); aa->aa_oa = oa; aa->aa_requested_nob = requested_nob; aa->aa_nio_count = niocount; @@ -1161,7 +1402,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa, aa->aa_resends = 0; aa->aa_ppga = pga; aa->aa_cli = cli; - INIT_LIST_HEAD(&aa->aa_oaps); + CFS_INIT_LIST_HEAD(&aa->aa_oaps); + if (ocapa && reserve) + aa->aa_ocapa = capa_get(ocapa); *reqp = req; RETURN(0); @@ -1172,21 +1415,31 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa, } static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer, - __u32 client_cksum, __u32 server_cksum, - int nob, obd_count page_count, - struct brw_page **pga) + __u32 client_cksum, __u32 server_cksum, int nob, + obd_count page_count, struct brw_page **pga, + cksum_type_t client_cksum_type) { __u32 new_cksum; char *msg; + cksum_type_t cksum_type; if (server_cksum == client_cksum) { CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum); return 0; } - new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE); + if (oa->o_valid & OBD_MD_FLFLAGS) + cksum_type = cksum_type_unpack(oa->o_flags); + else + cksum_type = OBD_CKSUM_CRC32; + + new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE, + cksum_type); - if (new_cksum == server_cksum) + if (cksum_type != client_cksum_type) + msg = "the server did not use the checksum type specified in " + "the original request - likely a protocol problem"; + else if (new_cksum == server_cksum) msg = "changed on the client after we checksummed it - " "likely false positive due to mmap IO (bug 11742)"; else if (new_cksum == client_cksum) @@ -1200,15 +1453,16 @@ static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer, "["LPU64"-"LPU64"]\n", msg, libcfs_nid2str(peer->nid), oa->o_valid & OBD_MD_FLFID ? oa->o_fid : (__u64)0, - oa->o_valid & OBD_MD_FLFID ? oa->o_generation : + oa->o_valid & OBD_MD_FLFID ? oa->o_generation : (__u64)0, oa->o_id, oa->o_valid & OBD_MD_FLGROUP ? oa->o_gr : (__u64)0, pga[0]->off, pga[page_count-1]->off + pga[page_count-1]->count - 1); - CERROR("original client csum %x, server csum %x, client csum now %x\n", - client_cksum, server_cksum, new_cksum); - return 1; + CERROR("original client csum %x (type %x), server csum %x (type %x), " + "client csum now %x\n", client_cksum, client_cksum_type, + server_cksum, cksum_type, new_cksum); + return 1; } /* Note rc enters this function as number of bytes transferred */ @@ -1226,8 +1480,7 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) RETURN(rc); LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc); - body = lustre_swab_repbuf(req, REPLY_REC_OFF, sizeof(*body), - lustre_swab_ost_body); + body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); if (body == NULL) { CDEBUG(D_INFO, "Can't unpack body\n"); RETURN(-EPROTO); @@ -1235,15 +1488,17 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) /* set/clear over quota flag for a uid/gid */ if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && - body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) - lquota_setdq(quota_interface, cli, body->oa.o_uid, - body->oa.o_gid, body->oa.o_valid, + body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) { + unsigned int qid[MAXQUOTAS] = { body->oa.o_uid, body->oa.o_gid }; + + lquota_setdq(quota_interface, cli, qid, body->oa.o_valid, body->oa.o_flags); + } if (rc < 0) RETURN(rc); - if (unlikely(aa->aa_oa->o_valid & OBD_MD_FLCKSUM)) + if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM) client_cksum = aa->aa_oa->o_cksum; /* save for later */ osc_update_grant(cli, body); @@ -1255,16 +1510,14 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) } LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob); - if (unlikely((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && - client_cksum && - check_write_checksum(&body->oa, peer, client_cksum, - body->oa.o_cksum, - aa->aa_requested_nob, - aa->aa_page_count, - aa->aa_ppga))) + if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk)) RETURN(-EAGAIN); - if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk)) + if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum && + check_write_checksum(&body->oa, peer, client_cksum, + body->oa.o_cksum, aa->aa_requested_nob, + aa->aa_page_count, aa->aa_ppga, + cksum_type_unpack(aa->aa_oa->o_flags))) RETURN(-EAGAIN); rc = check_write_rcs(req, aa->aa_requested_nob,aa->aa_nio_count, @@ -1273,6 +1526,12 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) } /* The rest of this function executes only for OST_READs */ + + /* if unwrap_bulk failed, return -EAGAIN to retry */ + rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc); + if (rc < 0) + GOTO(out, rc = -EAGAIN); + if (rc > aa->aa_requested_nob) { CERROR("Unexpected rc %d (%d requested)\n", rc, aa->aa_requested_nob); @@ -1288,18 +1547,20 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) if (rc < aa->aa_requested_nob) handle_short_read(rc, aa->aa_page_count, aa->aa_ppga); - if (sptlrpc_cli_unwrap_bulk_read(req, rc, aa->aa_page_count, - aa->aa_ppga)) - GOTO(out, rc = -EAGAIN); - - if (unlikely(body->oa.o_valid & OBD_MD_FLCKSUM)) { + if (body->oa.o_valid & OBD_MD_FLCKSUM) { static int cksum_counter; __u32 server_cksum = body->oa.o_cksum; char *via; char *router; + cksum_type_t cksum_type; + if (body->oa.o_valid & OBD_MD_FLFLAGS) + cksum_type = cksum_type_unpack(body->oa.o_flags); + else + cksum_type = OBD_CKSUM_CRC32; client_cksum = osc_checksum_bulk(rc, aa->aa_page_count, - aa->aa_ppga, OST_READ); + aa->aa_ppga, OST_READ, + cksum_type); if (peer->nid == req->rq_bulk->bd_sender) { via = router = ""; @@ -1311,7 +1572,7 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) if (server_cksum == ~0 && rc > 0) { CERROR("Protocol error: server %s set the 'checksum' " "bit, but didn't send a checksum. Not fatal, " - "but please tell CFS.\n", + "but please notify on http://bugzilla.lustre.org/\n", libcfs_nid2str(peer->nid)); } else if (server_cksum != client_cksum) { LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from " @@ -1332,8 +1593,8 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) aa->aa_ppga[aa->aa_page_count-1]->off + aa->aa_ppga[aa->aa_page_count-1]->count - 1); - CERROR("client %x, server %x\n", - client_cksum, server_cksum); + CERROR("client %x, server %x, cksum_type %x\n", + client_cksum, server_cksum, cksum_type); cksum_counter = 0; aa->aa_oa->o_cksum = client_cksum; rc = -EAGAIN; @@ -1354,7 +1615,7 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) } out: if (rc >= 0) - *aa->aa_oa = body->oa; + lustre_get_wire_obdo(aa->aa_oa, &body->oa); RETURN(rc); } @@ -1376,7 +1637,7 @@ static int osc_brw_internal(int cmd, struct obd_export *exp, struct obdo *oa, restart_bulk: rc = osc_brw_prep_request(cmd, &exp->exp_obd->u.cli, oa, lsm, - page_count, pga, &req, ocapa); + page_count, pga, &req, ocapa, 0); if (rc != 0) return (rc); @@ -1403,7 +1664,7 @@ restart_bulk: goto restart_bulk; } - + RETURN (rc); } @@ -1421,25 +1682,20 @@ int osc_brw_redo_request(struct ptlrpc_request *request, CERROR("too many resend retries, returning error\n"); RETURN(-EIO); } - + DEBUG_REQ(D_ERROR, request, "redo for recoverable error"); -/* - body = lustre_msg_buf(request->rq_reqmsg, REQ_REC_OFF, sizeof(*body)); - if (body->oa.o_valid & OBD_MD_FLOSSCAPA) - ocapa = lustre_unpack_capa(request->rq_reqmsg, - REQ_REC_OFF + 3); -*/ + rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) == OST_WRITE ? OBD_BRW_WRITE :OBD_BRW_READ, aa->aa_cli, aa->aa_oa, NULL /* lsm unused by osc currently */, - aa->aa_page_count, aa->aa_ppga, - &new_req, NULL /* ocapa */); + aa->aa_page_count, aa->aa_ppga, + &new_req, aa->aa_ocapa, 0); if (rc) RETURN(rc); client_obd_list_lock(&aa->aa_cli->cl_loi_list_lock); - + list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) { if (oap->oap_request != NULL) { LASSERTF(request == oap->oap_request, @@ -1447,7 +1703,7 @@ int osc_brw_redo_request(struct ptlrpc_request *request, request, oap->oap_request); if (oap->oap_interrupted) { client_obd_list_unlock(&aa->aa_cli->cl_loi_list_lock); - ptlrpc_req_finished(new_req); + ptlrpc_req_finished(new_req); RETURN(-EINTR); } } @@ -1457,13 +1713,13 @@ int osc_brw_redo_request(struct ptlrpc_request *request, aa->aa_resends++; new_req->rq_interpret_reply = request->rq_interpret_reply; new_req->rq_async_args = request->rq_async_args; - new_req->rq_sent = CURRENT_SECONDS + aa->aa_resends; + new_req->rq_sent = cfs_time_current_sec() + aa->aa_resends; - new_aa = (struct osc_brw_async_args *)&new_req->rq_async_args; + new_aa = ptlrpc_req_async_args(new_req); - INIT_LIST_HEAD(&new_aa->aa_oaps); + CFS_INIT_LIST_HEAD(&new_aa->aa_oaps); list_splice(&aa->aa_oaps, &new_aa->aa_oaps); - INIT_LIST_HEAD(&aa->aa_oaps); + CFS_INIT_LIST_HEAD(&aa->aa_oaps); list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) { if (oap->oap_request) { @@ -1472,9 +1728,12 @@ int osc_brw_redo_request(struct ptlrpc_request *request, } } - /* use ptlrpc_set_add_req is safe because interpret functions work - * in check_set context. only one way exist with access to request - * from different thread got -EINTR - this way protected with + new_aa->aa_ocapa = aa->aa_ocapa; + aa->aa_ocapa = NULL; + + /* use ptlrpc_set_add_req is safe because interpret functions work + * in check_set context. only one way exist with access to request + * from different thread got -EINTR - this way protected with * cl_loi_list_lock */ ptlrpc_set_add_req(set, new_req); @@ -1484,88 +1743,6 @@ int osc_brw_redo_request(struct ptlrpc_request *request, RETURN(0); } -static int brw_interpret(struct ptlrpc_request *req, void *data, int rc) -{ - struct osc_brw_async_args *aa = data; - int i; - ENTRY; - - rc = osc_brw_fini_request(req, rc); - if (osc_recoverable_error(rc)) { - rc = osc_brw_redo_request(req, aa); - if (rc == 0) - RETURN(0); - } - - client_obd_list_lock(&aa->aa_cli->cl_loi_list_lock); - if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) - aa->aa_cli->cl_w_in_flight--; - else - aa->aa_cli->cl_r_in_flight--; - for (i = 0; i < aa->aa_page_count; i++) - osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1); - client_obd_list_unlock(&aa->aa_cli->cl_loi_list_lock); - - osc_release_ppga(aa->aa_ppga, aa->aa_page_count); - - RETURN(rc); -} - -static int async_internal(int cmd, struct obd_export *exp, struct obdo *oa, - struct lov_stripe_md *lsm, obd_count page_count, - struct brw_page **pga, struct ptlrpc_request_set *set, - struct obd_capa *ocapa) -{ - struct ptlrpc_request *req; - struct client_obd *cli = &exp->exp_obd->u.cli; - int rc, i; - struct osc_brw_async_args *aa; - ENTRY; - - /* Consume write credits even if doing a sync write - - * otherwise we may run out of space on OST due to grant. */ - if (cmd == OBD_BRW_WRITE) { - spin_lock(&cli->cl_loi_list_lock); - for (i = 0; i < page_count; i++) { - if (cli->cl_avail_grant >= CFS_PAGE_SIZE) - osc_consume_write_grant(cli, pga[i]); - } - spin_unlock(&cli->cl_loi_list_lock); - } - - rc = osc_brw_prep_request(cmd, cli, oa, lsm, page_count, pga, - &req, ocapa); - - aa = (struct osc_brw_async_args *)&req->rq_async_args; - if (cmd == OBD_BRW_READ) { - lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count); - lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight); - ptlrpc_lprocfs_brw(req, OST_READ, aa->aa_requested_nob); - } else { - lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count); - lprocfs_oh_tally(&cli->cl_write_rpc_hist, - cli->cl_w_in_flight); - ptlrpc_lprocfs_brw(req, OST_WRITE, aa->aa_requested_nob); - } - - if (rc == 0) { - req->rq_interpret_reply = brw_interpret; - ptlrpc_set_add_req(set, req); - client_obd_list_lock(&cli->cl_loi_list_lock); - if (cmd == OBD_BRW_READ) - cli->cl_r_in_flight++; - else - cli->cl_w_in_flight++; - client_obd_list_unlock(&cli->cl_loi_list_lock); - } else if (cmd == OBD_BRW_WRITE) { - client_obd_list_lock(&cli->cl_loi_list_lock); - for (i = 0; i < page_count; i++) - osc_release_write_grant(cli, pga[i], 0); - client_obd_list_unlock(&cli->cl_loi_list_lock); - } - RETURN (rc); -} - /* * ugh, we want disk allocation on the target to happen in offset order. we'll * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do @@ -1650,15 +1827,18 @@ static int osc_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo, struct obdo *saved_oa = NULL; struct brw_page **ppga, **orig; struct obd_import *imp = class_exp2cliimp(exp); - struct client_obd *cli = &imp->imp_obd->u.cli; + struct client_obd *cli; int rc, page_count_orig; ENTRY; + LASSERT((imp != NULL) && (imp->imp_obd != NULL)); + cli = &imp->imp_obd->u.cli; + if (cmd & OBD_BRW_CHECK) { /* The caller just wants to know if there's a chance that this * I/O can succeed */ - if (imp == NULL || imp->imp_invalid) + if (imp->imp_invalid) RETURN(-EIO); RETURN(0); } @@ -1714,76 +1894,6 @@ out: RETURN(rc); } -static int osc_brw_async(int cmd, struct obd_export *exp, - struct obd_info *oinfo, obd_count page_count, - struct brw_page *pga, struct obd_trans_info *oti, - struct ptlrpc_request_set *set) -{ - struct brw_page **ppga, **orig; - struct client_obd *cli = &exp->exp_obd->u.cli; - int page_count_orig; - int rc = 0; - ENTRY; - - if (cmd & OBD_BRW_CHECK) { - struct obd_import *imp = class_exp2cliimp(exp); - /* The caller just wants to know if there's a chance that this - * I/O can succeed */ - - if (imp == NULL || imp->imp_invalid) - RETURN(-EIO); - RETURN(0); - } - - orig = ppga = osc_build_ppga(pga, page_count); - if (ppga == NULL) - RETURN(-ENOMEM); - page_count_orig = page_count; - - sort_brw_pages(ppga, page_count); - while (page_count) { - struct brw_page **copy; - obd_count pages_per_brw; - - pages_per_brw = min_t(obd_count, page_count, - cli->cl_max_pages_per_rpc); - - pages_per_brw = max_unfragmented_pages(ppga, pages_per_brw); - - /* use ppga only if single RPC is going to fly */ - if (pages_per_brw != page_count_orig || ppga != orig) { - OBD_ALLOC(copy, sizeof(*copy) * pages_per_brw); - if (copy == NULL) - GOTO(out, rc = -ENOMEM); - memcpy(copy, ppga, sizeof(*copy) * pages_per_brw); - } else - copy = ppga; - - rc = async_internal(cmd, exp, oinfo->oi_oa, oinfo->oi_md, - pages_per_brw, copy, set, oinfo->oi_capa); - - if (rc != 0) { - if (copy != ppga) - OBD_FREE(copy, sizeof(*copy) * pages_per_brw); - break; - } - if (copy == orig) { - /* we passed it to async_internal() which is - * now responsible for releasing memory */ - orig = NULL; - } - - page_count -= pages_per_brw; - ppga += pages_per_brw; - } -out: - if (orig) - osc_release_ppga(orig, page_count_orig); - RETURN(rc); -} - -static void osc_check_rpcs(struct client_obd *cli); - /* The companion to osc_enter_cache(), called when @oap is no longer part of * the dirty accounting. Writeback completes or truncate happens before * writing starts. Must be called with the loi lock held. */ @@ -1843,10 +1953,29 @@ static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop, RETURN(0); } -static void on_list(struct list_head *item, struct list_head *list, - int should_be_on) +static int lop_makes_hprpc(struct loi_oap_pages *lop) { - if (list_empty(item) && should_be_on) + struct osc_async_page *oap; + ENTRY; + + if (list_empty(&lop->lop_urgent)) + RETURN(0); + + oap = list_entry(lop->lop_urgent.next, + struct osc_async_page, oap_urgent_item); + + if (oap->oap_async_flags & ASYNC_HP) { + CDEBUG(D_CACHE, "hp request forcing RPC\n"); + RETURN(1); + } + + RETURN(0); +} + +static void on_list(struct list_head *item, struct list_head *list, + int should_be_on) +{ + if (list_empty(item) && should_be_on) list_add_tail(item, list); else if (!list_empty(item) && !should_be_on) list_del_init(item); @@ -1854,11 +1983,19 @@ static void on_list(struct list_head *item, struct list_head *list, /* maintain the loi's cli list membership invariants so that osc_send_oap_rpc * can find pages to build into rpcs quickly */ -static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi) +void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi) { - on_list(&loi->loi_cli_item, &cli->cl_loi_ready_list, - lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE) || - lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)); + if (lop_makes_hprpc(&loi->loi_write_lop) || + lop_makes_hprpc(&loi->loi_read_lop)) { + /* HP rpc */ + on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list, 0); + on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 1); + } else { + on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 0); + on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list, + lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)|| + lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)); + } on_list(&loi->loi_write_item, &cli->cl_loi_write_list, loi->loi_write_lop.lop_num_pending); @@ -1877,34 +2014,35 @@ static void lop_update_pending(struct client_obd *cli, cli->cl_pending_r_pages += delta; } -/* this is called when a sync waiter receives an interruption. Its job is to +/** + * this is called when a sync waiter receives an interruption. Its job is to * get the caller woken as soon as possible. If its page hasn't been put in an * rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as * desiring interruption which will forcefully complete the rpc once the rpc - * has timed out */ -static void osc_occ_interrupted(struct oig_callback_context *occ) + * has timed out. + */ +int osc_oap_interrupted(const struct lu_env *env, struct osc_async_page *oap) { - struct osc_async_page *oap; struct loi_oap_pages *lop; struct lov_oinfo *loi; + int rc = -EBUSY; ENTRY; - /* XXX member_of() */ - oap = list_entry(occ, struct osc_async_page, oap_occ); - - client_obd_list_lock(&oap->oap_cli->cl_loi_list_lock); - + LASSERT(!oap->oap_interrupted); oap->oap_interrupted = 1; /* ok, it's been put in an rpc. only one oap gets a request reference */ if (oap->oap_request != NULL) { ptlrpc_mark_interrupted(oap->oap_request); ptlrpcd_wake(oap->oap_request); - GOTO(unlock, 0); + ptlrpc_req_finished(oap->oap_request); + oap->oap_request = NULL; } - /* we don't get interruption callbacks until osc_trigger_group_io() - * has been called and put the sync oaps in the pending/urgent lists.*/ + /* + * page completion may be called only if ->cpo_prep() method was + * executed by osc_io_submit(), that also adds page the to pending list + */ if (!list_empty(&oap->oap_pending_item)) { list_del_init(&oap->oap_pending_item); list_del_init(&oap->oap_urgent_item); @@ -1914,13 +2052,12 @@ static void osc_occ_interrupted(struct oig_callback_context *occ) &loi->loi_write_lop : &loi->loi_read_lop; lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, -1); loi_list_maint(oap->oap_cli, oap->oap_loi); - - oig_complete_one(oap->oap_oig, &oap->oap_occ, -EINTR); - oap->oap_oig = NULL; + rc = oap->oap_caller_ops->ap_completion(env, + oap->oap_caller_data, + oap->oap_cmd, NULL, -EINTR); } -unlock: - client_obd_list_unlock(&oap->oap_cli->cl_loi_list_lock); + RETURN(rc); } /* this is trying to propogate async writeback errors back up to the @@ -1945,7 +2082,7 @@ static void osc_process_ar(struct osc_async_rc *ar, __u64 xid, ar->ar_force_sync = 0; } -static void osc_oap_to_pending(struct osc_async_page *oap) +void osc_oap_to_pending(struct osc_async_page *oap) { struct loi_oap_pages *lop; @@ -1954,15 +2091,18 @@ static void osc_oap_to_pending(struct osc_async_page *oap) else lop = &oap->oap_loi->loi_read_lop; - if (oap->oap_async_flags & ASYNC_URGENT) + if (oap->oap_async_flags & ASYNC_HP) list_add(&oap->oap_urgent_item, &lop->lop_urgent); + else if (oap->oap_async_flags & ASYNC_URGENT) + list_add_tail(&oap->oap_urgent_item, &lop->lop_urgent); list_add_tail(&oap->oap_pending_item, &lop->lop_pending); lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, 1); } /* this must be called holding the loi list lock to give coverage to exit_cache, * async_flag maintenance, and oap_request */ -static void osc_ap_completion(struct client_obd *cli, struct obdo *oa, +static void osc_ap_completion(const struct lu_env *env, + struct client_obd *cli, struct obdo *oa, struct osc_async_page *oap, int sent, int rc) { __u64 xid = 0; @@ -1974,7 +2114,9 @@ static void osc_ap_completion(struct client_obd *cli, struct obdo *oa, oap->oap_request = NULL; } + spin_lock(&oap->oap_lock); oap->oap_async_flags = 0; + spin_unlock(&oap->oap_lock); oap->oap_interrupted = 0; if (oap->oap_cmd & OBD_BRW_WRITE) { @@ -1993,15 +2135,7 @@ static void osc_ap_completion(struct client_obd *cli, struct obdo *oa, oap->oap_loi->loi_lvb.lvb_ctime = oa->o_ctime; } - if (oap->oap_oig) { - osc_exit_cache(cli, oap, sent); - oig_complete_one(oap->oap_oig, &oap->oap_occ, rc); - oap->oap_oig = NULL; - EXIT; - return; - } - - rc = oap->oap_caller_ops->ap_completion(oap->oap_caller_data, + rc = oap->oap_caller_ops->ap_completion(env, oap->oap_caller_data, oap->oap_cmd, oa, rc); /* ll_ap_completion (from llite) drops PG_locked. so, a new @@ -2015,11 +2149,12 @@ static void osc_ap_completion(struct client_obd *cli, struct obdo *oa, EXIT; } -static int brw_interpret_oap(struct ptlrpc_request *req, void *data, int rc) +static int brw_interpret(const struct lu_env *env, + struct ptlrpc_request *req, void *data, int rc) { - struct osc_async_page *oap, *tmp; struct osc_brw_async_args *aa = data; struct client_obd *cli; + int async; ENTRY; rc = osc_brw_fini_request(req, rc); @@ -2030,6 +2165,11 @@ static int brw_interpret_oap(struct ptlrpc_request *req, void *data, int rc) RETURN(0); } + if (aa->aa_ocapa) { + capa_put(aa->aa_ocapa); + aa->aa_ocapa = NULL; + } + cli = aa->aa_cli; client_obd_list_lock(&cli->cl_loi_list_lock); @@ -2042,25 +2182,35 @@ static int brw_interpret_oap(struct ptlrpc_request *req, void *data, int rc) else cli->cl_r_in_flight--; - /* the caller may re-use the oap after the completion call so - * we need to clean it up a little */ - list_for_each_entry_safe(oap, tmp, &aa->aa_oaps, oap_rpc_item) { - list_del_init(&oap->oap_rpc_item); - osc_ap_completion(cli, aa->aa_oa, oap, 1, rc); - } + async = list_empty(&aa->aa_oaps); + if (!async) { /* from osc_send_oap_rpc() */ + struct osc_async_page *oap, *tmp; + /* the caller may re-use the oap after the completion call so + * we need to clean it up a little */ + list_for_each_entry_safe(oap, tmp, &aa->aa_oaps, oap_rpc_item) { + list_del_init(&oap->oap_rpc_item); + osc_ap_completion(env, cli, aa->aa_oa, oap, 1, rc); + } + OBDO_FREE(aa->aa_oa); + } else { /* from async_internal() */ + int i; + for (i = 0; i < aa->aa_page_count; i++) + osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1); + if (aa->aa_oa->o_flags & OBD_FL_TEMPORARY) + OBDO_FREE(aa->aa_oa); + } osc_wake_cache_waiters(cli); - osc_check_rpcs(cli); - + osc_check_rpcs(env, cli); client_obd_list_unlock(&cli->cl_loi_list_lock); - - OBDO_FREE(aa->aa_oa); - + if (!async) + cl_req_completion(env, aa->aa_clerq, rc); osc_release_ppga(aa->aa_ppga, aa->aa_page_count); RETURN(rc); } -static struct ptlrpc_request *osc_build_req(struct client_obd *cli, +static struct ptlrpc_request *osc_build_req(const struct lu_env *env, + struct client_obd *cli, struct list_head *rpc_list, int page_count, int cmd) { @@ -2068,18 +2218,24 @@ static struct ptlrpc_request *osc_build_req(struct client_obd *cli, struct brw_page **pga = NULL; struct osc_brw_async_args *aa; struct obdo *oa = NULL; - struct obd_async_page_ops *ops = NULL; + const struct obd_async_page_ops *ops = NULL; void *caller_data = NULL; - struct obd_capa *ocapa; struct osc_async_page *oap; + struct osc_async_page *tmp; + struct ost_body *body; + struct cl_req *clerq = NULL; + enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ; + struct ldlm_lock *lock = NULL; + struct cl_req_attr crattr; int i, rc; ENTRY; LASSERT(!list_empty(rpc_list)); + memset(&crattr, 0, sizeof crattr); OBD_ALLOC(pga, sizeof(*pga) * page_count); if (pga == NULL) - RETURN(ERR_PTR(-ENOMEM)); + GOTO(out, req = ERR_PTR(-ENOMEM)); OBDO_ALLOC(oa); if (oa == NULL) @@ -2087,26 +2243,45 @@ static struct ptlrpc_request *osc_build_req(struct client_obd *cli, i = 0; list_for_each_entry(oap, rpc_list, oap_rpc_item) { + struct cl_page *page = osc_oap2cl_page(oap); if (ops == NULL) { ops = oap->oap_caller_ops; caller_data = oap->oap_caller_data; + + clerq = cl_req_alloc(env, page, crt, + 1 /* only 1-object rpcs for + * now */); + if (IS_ERR(clerq)) + GOTO(out, req = (void *)clerq); + lock = oap->oap_ldlm_lock; } pga[i] = &oap->oap_brw_page; pga[i]->off = oap->oap_obj_off + oap->oap_page_off; CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n", pga[i]->pg, cfs_page_index(oap->oap_page), oap, pga[i]->flag); i++; + cl_req_page_add(env, clerq, page); } /* always get the data for the obdo for the rpc */ LASSERT(ops != NULL); - ops->ap_fill_obdo(caller_data, cmd, oa); - ocapa = ops->ap_lookup_capa(caller_data, cmd); + crattr.cra_oa = oa; + crattr.cra_capa = NULL; + cl_req_attr_set(env, clerq, &crattr, ~0ULL); + if (lock) { + oa->o_handle = lock->l_remote_handle; + oa->o_valid |= OBD_MD_FLHANDLE; + } + + rc = cl_req_prep(env, clerq); + if (rc != 0) { + CERROR("cl_req_prep failed: %d\n", rc); + GOTO(out, req = ERR_PTR(rc)); + } sort_brw_pages(pga, page_count); rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count, - pga, &req, ocapa); - capa_put(ocapa); + pga, &req, crattr.cra_capa, 1); if (rc != 0) { CERROR("prep_req failed: %d\n", rc); GOTO(out, req = ERR_PTR(rc)); @@ -2117,46 +2292,115 @@ static struct ptlrpc_request *osc_build_req(struct client_obd *cli, * later setattr before earlier BRW (as determined by the request xid), * the OST will not use BRW timestamps. Sadly, there is no obvious * way to do this in a single call. bug 10150 */ - ops->ap_update_obdo(caller_data, cmd, oa, - OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME); + body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); + cl_req_attr_set(env, clerq, &crattr, + OBD_MD_FLMTIME|OBD_MD_FLCTIME|OBD_MD_FLATIME); CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); - aa = (struct osc_brw_async_args *)&req->rq_async_args; - INIT_LIST_HEAD(&aa->aa_oaps); + aa = ptlrpc_req_async_args(req); + CFS_INIT_LIST_HEAD(&aa->aa_oaps); list_splice(rpc_list, &aa->aa_oaps); - INIT_LIST_HEAD(rpc_list); - + CFS_INIT_LIST_HEAD(rpc_list); + aa->aa_clerq = clerq; out: + capa_put(crattr.cra_capa); if (IS_ERR(req)) { if (oa) OBDO_FREE(oa); if (pga) OBD_FREE(pga, sizeof(*pga) * page_count); + /* this should happen rarely and is pretty bad, it makes the + * pending list not follow the dirty order */ + client_obd_list_lock(&cli->cl_loi_list_lock); + list_for_each_entry_safe(oap, tmp, rpc_list, oap_rpc_item) { + list_del_init(&oap->oap_rpc_item); + + /* queued sync pages can be torn down while the pages + * were between the pending list and the rpc */ + if (oap->oap_interrupted) { + CDEBUG(D_INODE, "oap %p interrupted\n", oap); + osc_ap_completion(env, cli, NULL, oap, 0, + oap->oap_count); + continue; + } + osc_ap_completion(env, cli, NULL, oap, 0, PTR_ERR(req)); + } + if (clerq && !IS_ERR(clerq)) + cl_req_completion(env, clerq, PTR_ERR(req)); } RETURN(req); } -/* the loi lock is held across this function but it's allowed to release - * and reacquire it during its work */ -static int osc_send_oap_rpc(struct client_obd *cli, struct lov_oinfo *loi, - int cmd, struct loi_oap_pages *lop) +/** + * prepare pages for ASYNC io and put pages in send queue. + * + * \param cmd OBD_BRW_* macroses + * \param lop pending pages + * + * \return zero if pages successfully add to send queue. + * \return not zere if error occurring. + */ +static int +osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli, + struct lov_oinfo *loi, + int cmd, struct loi_oap_pages *lop) { struct ptlrpc_request *req; obd_count page_count = 0; struct osc_async_page *oap = NULL, *tmp; struct osc_brw_async_args *aa; - struct obd_async_page_ops *ops; + const struct obd_async_page_ops *ops; CFS_LIST_HEAD(rpc_list); + CFS_LIST_HEAD(tmp_list); unsigned int ending_offset; unsigned starting_offset = 0; + int srvlock = 0; + struct cl_object *clob = NULL; ENTRY; + /* ASYNC_HP pages first. At present, when the lock the pages is + * to be canceled, the pages covered by the lock will be sent out + * with ASYNC_HP. We have to send out them as soon as possible. */ + list_for_each_entry_safe(oap, tmp, &lop->lop_urgent, oap_urgent_item) { + if (oap->oap_async_flags & ASYNC_HP) + list_move(&oap->oap_pending_item, &tmp_list); + else + list_move_tail(&oap->oap_pending_item, &tmp_list); + if (++page_count >= cli->cl_max_pages_per_rpc) + break; + } + + list_splice(&tmp_list, &lop->lop_pending); + page_count = 0; + /* first we find the pages we're allowed to work with */ list_for_each_entry_safe(oap, tmp, &lop->lop_pending, oap_pending_item) { ops = oap->oap_caller_ops; - LASSERT(oap->oap_magic == OAP_MAGIC); + LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, " + "magic 0x%x\n", oap, oap->oap_magic); + + if (clob == NULL) { + /* pin object in memory, so that completion call-backs + * can be safely called under client_obd_list lock. */ + clob = osc_oap2cl_page(oap)->cp_obj; + cl_object_get(clob); + } + + if (page_count != 0 && + srvlock != !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK)) { + CDEBUG(D_PAGE, "SRVLOCK flag mismatch," + " oap %p, page %p, srvlock %u\n", + oap, oap->oap_brw_page.pg, (unsigned)!srvlock); + break; + } + + /* If there is a gap at the start of this page, it can't merge + * with any previous page, so we'll hand the network a + * "fragmented" page array that it can't transfer in 1 RDMA */ + if (page_count != 0 && oap->oap_page_off != 0) + break; /* in llite being 'ready' equates to the page being locked * until completion unlocks it. commit_write submits a page @@ -2168,7 +2412,8 @@ static int osc_send_oap_rpc(struct client_obd *cli, struct lov_oinfo *loi, * will still be on the dirty list). we could call in * at the end of ll_file_write to process the queue again. */ if (!(oap->oap_async_flags & ASYNC_READY)) { - int rc = ops->ap_make_ready(oap->oap_caller_data, cmd); + int rc = ops->ap_make_ready(env, oap->oap_caller_data, + cmd); if (rc < 0) CDEBUG(D_INODE, "oap %p page %p returned %d " "instead of ready\n", oap, @@ -2186,11 +2431,15 @@ static int osc_send_oap_rpc(struct client_obd *cli, struct lov_oinfo *loi, case -EINTR: /* the io isn't needed.. tell the checks * below to complete the rpc with EINTR */ + spin_lock(&oap->oap_lock); oap->oap_async_flags |= ASYNC_COUNT_STABLE; + spin_unlock(&oap->oap_lock); oap->oap_count = -EINTR; break; case 0: + spin_lock(&oap->oap_lock); oap->oap_async_flags |= ASYNC_READY; + spin_unlock(&oap->oap_lock); break; default: LASSERTF(0, "oap %p page %p returned %d " @@ -2204,18 +2453,24 @@ static int osc_send_oap_rpc(struct client_obd *cli, struct lov_oinfo *loi, /* * Page submitted for IO has to be locked. Either by * ->ap_make_ready() or by higher layers. - * - * XXX nikita: this assertion should be adjusted when lustre - * starts using PG_writeback for pages being written out. */ #if defined(__KERNEL__) && defined(__linux__) - LASSERT(PageLocked(oap->oap_page)); + { + struct cl_page *page; + + page = osc_oap2cl_page(oap); + + if (page->cp_type == CPT_CACHEABLE && + !(PageLocked(oap->oap_page) && + (CheckWriteback(oap->oap_page, cmd)))) { + CDEBUG(D_PAGE, "page %p lost wb %lx/%x\n", + oap->oap_page, + (long)oap->oap_page->flags, + oap->oap_async_flags); + LBUG(); + } + } #endif - /* If there is a gap at the start of this page, it can't merge - * with any previous page, so we'll hand the network a - * "fragmented" page array that it can't transfer in 1 RDMA */ - if (page_count != 0 && oap->oap_page_off != 0) - break; /* take the page out of our book-keeping */ list_del_init(&oap->oap_pending_item); @@ -2227,18 +2482,24 @@ static int osc_send_oap_rpc(struct client_obd *cli, struct lov_oinfo *loi, (PTLRPC_MAX_BRW_SIZE - 1); /* ask the caller for the size of the io as the rpc leaves. */ - if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) + if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) { oap->oap_count = - ops->ap_refresh_count(oap->oap_caller_data,cmd); + ops->ap_refresh_count(env, oap->oap_caller_data, + cmd); + LASSERT(oap->oap_page_off + oap->oap_count <= CFS_PAGE_SIZE); + } if (oap->oap_count <= 0) { CDEBUG(D_CACHE, "oap %p count %d, completing\n", oap, oap->oap_count); - osc_ap_completion(cli, NULL, oap, 0, oap->oap_count); + osc_ap_completion(env, cli, NULL, + oap, 0, oap->oap_count); continue; } /* now put the page back in our accounting */ list_add_tail(&oap->oap_rpc_item, &rpc_list); + if (page_count == 0) + srvlock = !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK); if (++page_count >= cli->cl_max_pages_per_rpc) break; @@ -2260,51 +2521,40 @@ static int osc_send_oap_rpc(struct client_obd *cli, struct lov_oinfo *loi, osc_wake_cache_waiters(cli); - if (page_count == 0) - RETURN(0); - loi_list_maint(cli, loi); client_obd_list_unlock(&cli->cl_loi_list_lock); - req = osc_build_req(cli, &rpc_list, page_count, cmd); - if (IS_ERR(req)) { - /* this should happen rarely and is pretty bad, it makes the - * pending list not follow the dirty order */ + if (clob != NULL) + cl_object_put(env, clob); + + if (page_count == 0) { client_obd_list_lock(&cli->cl_loi_list_lock); - list_for_each_entry_safe(oap, tmp, &rpc_list, oap_rpc_item) { - list_del_init(&oap->oap_rpc_item); + RETURN(0); + } - /* queued sync pages can be torn down while the pages - * were between the pending list and the rpc */ - if (oap->oap_interrupted) { - CDEBUG(D_INODE, "oap %p interrupted\n", oap); - osc_ap_completion(cli, NULL, oap, 0, - oap->oap_count); - continue; - } - osc_ap_completion(cli, NULL, oap, 0, PTR_ERR(req)); - } + req = osc_build_req(env, cli, &rpc_list, page_count, cmd); + if (IS_ERR(req)) { + LASSERT(list_empty(&rpc_list)); loi_list_maint(cli, loi); RETURN(PTR_ERR(req)); } - aa = (struct osc_brw_async_args *)&req->rq_async_args; + aa = ptlrpc_req_async_args(req); if (cmd == OBD_BRW_READ) { lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count); lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight); lprocfs_oh_tally_log2(&cli->cl_read_offset_hist, (starting_offset >> CFS_PAGE_SHIFT) + 1); - ptlrpc_lprocfs_brw(req, OST_READ, aa->aa_requested_nob); } else { lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count); lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight); lprocfs_oh_tally_log2(&cli->cl_write_offset_hist, (starting_offset >> CFS_PAGE_SHIFT) + 1); - ptlrpc_lprocfs_brw(req, OST_WRITE, aa->aa_requested_nob); } + ptlrpc_lprocfs_brw(req, aa->aa_requested_nob); client_obd_list_lock(&cli->cl_loi_list_lock); @@ -2332,14 +2582,15 @@ static int osc_send_oap_rpc(struct client_obd *cli, struct lov_oinfo *loi, DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight", page_count, aa, cli->cl_r_in_flight, cli->cl_w_in_flight); - req->rq_interpret_reply = brw_interpret_oap; - ptlrpcd_add_req(req); + req->rq_interpret_reply = brw_interpret; + ptlrpcd_add_req(req, PSCOPE_BRW); RETURN(1); } #define LOI_DEBUG(LOI, STR, args...) \ CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR, \ - !list_empty(&(LOI)->loi_cli_item), \ + !list_empty(&(LOI)->loi_ready_item) || \ + !list_empty(&(LOI)->loi_hp_ready_item), \ (LOI)->loi_write_lop.lop_num_pending, \ !list_empty(&(LOI)->loi_write_lop.lop_urgent), \ (LOI)->loi_read_lop.lop_num_pending, \ @@ -2351,11 +2602,16 @@ static int osc_send_oap_rpc(struct client_obd *cli, struct lov_oinfo *loi, struct lov_oinfo *osc_next_loi(struct client_obd *cli) { ENTRY; - /* first return all objects which we already know to have - * pages ready to be stuffed into rpcs */ + + /* First return objects that have blocked locks so that they + * will be flushed quickly and other clients can get the lock, + * then objects which have pages ready to be stuffed into RPCs */ + if (!list_empty(&cli->cl_loi_hp_ready_list)) + RETURN(list_entry(cli->cl_loi_hp_ready_list.next, + struct lov_oinfo, loi_hp_ready_item)); if (!list_empty(&cli->cl_loi_ready_list)) RETURN(list_entry(cli->cl_loi_ready_list.next, - struct lov_oinfo, loi_cli_item)); + struct lov_oinfo, loi_ready_item)); /* then if we have cache waiters, return all objects with queued * writes. This is especially important when many small files @@ -2379,8 +2635,28 @@ struct lov_oinfo *osc_next_loi(struct client_obd *cli) RETURN(NULL); } +static int osc_max_rpc_in_flight(struct client_obd *cli, struct lov_oinfo *loi) +{ + struct osc_async_page *oap; + int hprpc = 0; + + if (!list_empty(&loi->loi_write_lop.lop_urgent)) { + oap = list_entry(loi->loi_write_lop.lop_urgent.next, + struct osc_async_page, oap_urgent_item); + hprpc = !!(oap->oap_async_flags & ASYNC_HP); + } + + if (!hprpc && !list_empty(&loi->loi_read_lop.lop_urgent)) { + oap = list_entry(loi->loi_read_lop.lop_urgent.next, + struct osc_async_page, oap_urgent_item); + hprpc = !!(oap->oap_async_flags & ASYNC_HP); + } + + return rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight + hprpc; +} + /* called with the loi list lock held */ -static void osc_check_rpcs(struct client_obd *cli) +void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli) { struct lov_oinfo *loi; int rc = 0, race_counter = 0; @@ -2389,7 +2665,7 @@ static void osc_check_rpcs(struct client_obd *cli) while ((loi = osc_next_loi(cli)) != NULL) { LOI_DEBUG(loi, "%lu in flight\n", rpcs_in_flight(cli)); - if (rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight) + if (osc_max_rpc_in_flight(cli, loi)) break; /* attempt some read/write balancing by alternating between @@ -2399,20 +2675,41 @@ static void osc_check_rpcs(struct client_obd *cli) * partial read pending queue when we're given this object to * do io on writes while there are cache waiters */ if (lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) { - rc = osc_send_oap_rpc(cli, loi, OBD_BRW_WRITE, + rc = osc_send_oap_rpc(env, cli, loi, OBD_BRW_WRITE, &loi->loi_write_lop); - if (rc < 0) - break; + if (rc < 0) { + CERROR("Write request failed with %d\n", rc); + + /* osc_send_oap_rpc failed, mostly because of + * memory pressure. + * + * It can't break here, because if: + * - a page was submitted by osc_io_submit, so + * page locked; + * - no request in flight + * - no subsequent request + * The system will be in live-lock state, + * because there is no chance to call + * osc_io_unplug() and osc_check_rpcs() any + * more. pdflush can't help in this case, + * because it might be blocked at grabbing + * the page lock as we mentioned. + * + * Anyway, continue to drain pages. */ + /* break; */ + } + if (rc > 0) race_counter = 0; else race_counter++; } if (lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)) { - rc = osc_send_oap_rpc(cli, loi, OBD_BRW_READ, + rc = osc_send_oap_rpc(env, cli, loi, OBD_BRW_READ, &loi->loi_read_lop); if (rc < 0) - break; + CERROR("Read request failed with %d\n", rc); + if (rc > 0) race_counter = 0; else @@ -2421,8 +2718,10 @@ static void osc_check_rpcs(struct client_obd *cli) /* attempt some inter-object balancing by issueing rpcs * for each object in turn */ - if (!list_empty(&loi->loi_cli_item)) - list_del_init(&loi->loi_cli_item); + if (!list_empty(&loi->loi_hp_ready_item)) + list_del_init(&loi->loi_hp_ready_item); + if (!list_empty(&loi->loi_ready_item)) + list_del_init(&loi->loi_ready_item); if (!list_empty(&loi->loi_write_item)) list_del_init(&loi->loi_write_item); if (!list_empty(&loi->loi_read_item)) @@ -2459,9 +2758,32 @@ static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw) RETURN(rc); }; +/** + * Non-blocking version of osc_enter_cache() that consumes grant only when it + * is available. + */ +int osc_enter_cache_try(const struct lu_env *env, + struct client_obd *cli, struct lov_oinfo *loi, + struct osc_async_page *oap, int transient) +{ + int has_grant; + + has_grant = cli->cl_avail_grant >= CFS_PAGE_SIZE; + if (has_grant) { + osc_consume_write_grant(cli, &oap->oap_brw_page); + if (transient) { + cli->cl_dirty_transit += CFS_PAGE_SIZE; + atomic_inc(&obd_dirty_transit_pages); + oap->oap_brw_flags |= OBD_BRW_NOCACHE; + } + } + return has_grant; +} + /* Caller must hold loi_list_lock - we drop/regain it if we need to wait for * grant or cache space. */ -static int osc_enter_cache(struct client_obd *cli, struct lov_oinfo *loi, +static int osc_enter_cache(const struct lu_env *env, + struct client_obd *cli, struct lov_oinfo *loi, struct osc_async_page *oap) { struct osc_cache_waiter ocw; @@ -2481,13 +2803,10 @@ static int osc_enter_cache(struct client_obd *cli, struct lov_oinfo *loi, RETURN(-EDQUOT); /* Hopefully normal case - cache space and write credits available */ - if ((cli->cl_dirty + CFS_PAGE_SIZE <= cli->cl_dirty_max) && - (atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) && - (cli->cl_avail_grant >= CFS_PAGE_SIZE)) { - /* account for ourselves */ - osc_consume_write_grant(cli, &oap->oap_brw_page); + if (cli->cl_dirty + CFS_PAGE_SIZE <= cli->cl_dirty_max && + atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages && + osc_enter_cache_try(env, cli, loi, oap, 0)) RETURN(0); - } /* Make sure that there are write rpcs in flight to wait for. This * is a little silly as this object may not have any pending but @@ -2499,7 +2818,7 @@ static int osc_enter_cache(struct client_obd *cli, struct lov_oinfo *loi, ocw.ocw_rc = 0; loi_list_maint(cli, loi); - osc_check_rpcs(cli); + osc_check_rpcs(env, cli); client_obd_list_unlock(&cli->cl_loi_list_lock); CDEBUG(D_CACHE, "sleeping for cache space\n"); @@ -2516,12 +2835,15 @@ static int osc_enter_cache(struct client_obd *cli, struct lov_oinfo *loi, RETURN(-EDQUOT); } + int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm, struct lov_oinfo *loi, cfs_page_t *page, - obd_off offset, struct obd_async_page_ops *ops, - void *data, void **res) + obd_off offset, const struct obd_async_page_ops *ops, + void *data, void **res, int nocache, + struct lustre_handle *lockh) { struct osc_async_page *oap; + ENTRY; if (!page) @@ -2537,13 +2859,18 @@ int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm, oap->oap_page = page; oap->oap_obj_off = offset; + if (!client_is_remote(exp) && + cfs_capable(CFS_CAP_SYS_RESOURCE)) + oap->oap_brw_flags = OBD_BRW_NOQUOTA; + + LASSERT(!(offset & ~CFS_PAGE_MASK)); CFS_INIT_LIST_HEAD(&oap->oap_pending_item); CFS_INIT_LIST_HEAD(&oap->oap_urgent_item); CFS_INIT_LIST_HEAD(&oap->oap_rpc_item); + CFS_INIT_LIST_HEAD(&oap->oap_page_list); - oap->oap_occ.occ_interrupted = osc_occ_interrupted; - + spin_lock_init(&oap->oap_lock); CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset); RETURN(0); } @@ -2556,10 +2883,11 @@ struct osc_async_page *oap_from_cookie(void *cookie) return oap; }; -static int osc_queue_async_io(struct obd_export *exp, struct lov_stripe_md *lsm, - struct lov_oinfo *loi, void *cookie, - int cmd, obd_off off, int count, - obd_flag brw_flags, enum async_flags async_flags) +int osc_queue_async_io(const struct lu_env *env, + struct obd_export *exp, struct lov_stripe_md *lsm, + struct lov_oinfo *loi, void *cookie, + int cmd, obd_off off, int count, + obd_flag brw_flags, enum async_flags async_flags) { struct client_obd *cli = &exp->exp_obd->u.cli; struct osc_async_page *oap; @@ -2579,40 +2907,45 @@ static int osc_queue_async_io(struct obd_export *exp, struct lov_stripe_md *lsm, RETURN(-EBUSY); /* check if the file's owner/group is over quota */ -#ifdef HAVE_QUOTA_SUPPORT - if ((cmd & OBD_BRW_WRITE) && !(cmd & OBD_BRW_NOQUOTA)){ - struct obd_async_page_ops *ops; - struct obdo *oa; + if ((cmd & OBD_BRW_WRITE) && !(cmd & OBD_BRW_NOQUOTA)) { + struct cl_object *obj; + struct cl_attr attr; /* XXX put attr into thread info */ + unsigned int qid[MAXQUOTAS]; - OBDO_ALLOC(oa); - if (oa == NULL) - RETURN(-ENOMEM); + obj = cl_object_top(osc_oap2cl_page(oap)->cp_obj); - ops = oap->oap_caller_ops; - ops->ap_fill_obdo(oap->oap_caller_data, cmd, oa); - if (lquota_chkdq(quota_interface, cli, oa->o_uid, oa->o_gid) == - NO_QUOTA) - rc = -EDQUOT; + cl_object_attr_lock(obj); + rc = cl_object_attr_get(env, obj, &attr); + cl_object_attr_unlock(obj); - OBDO_FREE(oa); + qid[USRQUOTA] = attr.cat_uid; + qid[GRPQUOTA] = attr.cat_gid; + if (rc == 0 && + lquota_chkdq(quota_interface, cli, qid) == NO_QUOTA) + rc = -EDQUOT; if (rc) RETURN(rc); } -#endif if (loi == NULL) loi = lsm->lsm_oinfo[0]; client_obd_list_lock(&cli->cl_loi_list_lock); + LASSERT(off + count <= CFS_PAGE_SIZE); oap->oap_cmd = cmd; oap->oap_page_off = off; oap->oap_count = count; oap->oap_brw_flags = brw_flags; + /* Give a hint to OST that requests are coming from kswapd - bug19529 */ + if (libcfs_memory_pressure_get()) + oap->oap_brw_flags |= OBD_BRW_MEMALLOC; + spin_lock(&oap->oap_lock); oap->oap_async_flags = async_flags; + spin_unlock(&oap->oap_lock); if (cmd & OBD_BRW_WRITE) { - rc = osc_enter_cache(cli, loi, oap); + rc = osc_enter_cache(env, cli, loi, oap); if (rc) { client_obd_list_unlock(&cli->cl_loi_list_lock); RETURN(rc); @@ -2625,7 +2958,7 @@ static int osc_queue_async_io(struct obd_export *exp, struct lov_stripe_md *lsm, LOI_DEBUG(loi, "oap %p page %p added for cmd %d\n", oap, oap->oap_page, cmd); - osc_check_rpcs(cli); + osc_check_rpcs(env, cli); client_obd_list_unlock(&cli->cl_loi_list_lock); RETURN(0); @@ -2634,36 +2967,15 @@ static int osc_queue_async_io(struct obd_export *exp, struct lov_stripe_md *lsm, /* aka (~was & now & flag), but this is more clear :) */ #define SETTING(was, now, flag) (!(was & flag) && (now & flag)) -static int osc_set_async_flags(struct obd_export *exp, - struct lov_stripe_md *lsm, - struct lov_oinfo *loi, void *cookie, - obd_flag async_flags) +int osc_set_async_flags_base(struct client_obd *cli, + struct lov_oinfo *loi, struct osc_async_page *oap, + obd_flag async_flags) { - struct client_obd *cli = &exp->exp_obd->u.cli; struct loi_oap_pages *lop; - struct osc_async_page *oap; - int rc = 0; + int flags = 0; ENTRY; - oap = oap_from_cookie(cookie); - if (IS_ERR(oap)) - RETURN(PTR_ERR(oap)); - - /* - * bug 7311: OST-side locking is only supported for liblustre for now - * (and liblustre never calls obd_set_async_flags(). I hope.), generic - * implementation has to handle case where OST-locked page was picked - * up by, e.g., ->writepage(). - */ - LASSERT(!(oap->oap_brw_flags & OBD_BRW_SRVLOCK)); - LASSERT(!LIBLUSTRE_CLIENT); /* check that liblustre angels do fear to - * tread here. */ - - if (cli->cl_import == NULL || cli->cl_import->imp_invalid) - RETURN(-EIO); - - if (loi == NULL) - loi = lsm->lsm_oinfo[0]; + LASSERT(!list_empty(&oap->oap_pending_item)); if (oap->oap_cmd & OBD_BRW_WRITE) { lop = &loi->loi_write_lop; @@ -2671,126 +2983,33 @@ static int osc_set_async_flags(struct obd_export *exp, lop = &loi->loi_read_lop; } - client_obd_list_lock(&cli->cl_loi_list_lock); - - if (list_empty(&oap->oap_pending_item)) - GOTO(out, rc = -EINVAL); - if ((oap->oap_async_flags & async_flags) == async_flags) - GOTO(out, rc = 0); + RETURN(0); if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY)) - oap->oap_async_flags |= ASYNC_READY; + flags |= ASYNC_READY; - if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT)) { - if (list_empty(&oap->oap_rpc_item)) { + if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT) && + list_empty(&oap->oap_rpc_item)) { + if (oap->oap_async_flags & ASYNC_HP) list_add(&oap->oap_urgent_item, &lop->lop_urgent); - loi_list_maint(cli, loi); - } + else + list_add_tail(&oap->oap_urgent_item, &lop->lop_urgent); + flags |= ASYNC_URGENT; + loi_list_maint(cli, loi); } + spin_lock(&oap->oap_lock); + oap->oap_async_flags |= flags; + spin_unlock(&oap->oap_lock); LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page, oap->oap_async_flags); -out: - osc_check_rpcs(cli); - client_obd_list_unlock(&cli->cl_loi_list_lock); - RETURN(rc); -} - -static int osc_queue_group_io(struct obd_export *exp, struct lov_stripe_md *lsm, - struct lov_oinfo *loi, - struct obd_io_group *oig, void *cookie, - int cmd, obd_off off, int count, - obd_flag brw_flags, - obd_flag async_flags) -{ - struct client_obd *cli = &exp->exp_obd->u.cli; - struct osc_async_page *oap; - struct loi_oap_pages *lop; - int rc = 0; - ENTRY; - - oap = oap_from_cookie(cookie); - if (IS_ERR(oap)) - RETURN(PTR_ERR(oap)); - - if (cli->cl_import == NULL || cli->cl_import->imp_invalid) - RETURN(-EIO); - - if (!list_empty(&oap->oap_pending_item) || - !list_empty(&oap->oap_urgent_item) || - !list_empty(&oap->oap_rpc_item)) - RETURN(-EBUSY); - - if (loi == NULL) - loi = lsm->lsm_oinfo[0]; - - client_obd_list_lock(&cli->cl_loi_list_lock); - - oap->oap_cmd = cmd; - oap->oap_page_off = off; - oap->oap_count = count; - oap->oap_brw_flags = brw_flags; - oap->oap_async_flags = async_flags; - - if (cmd & OBD_BRW_WRITE) - lop = &loi->loi_write_lop; - else - lop = &loi->loi_read_lop; - - list_add_tail(&oap->oap_pending_item, &lop->lop_pending_group); - if (oap->oap_async_flags & ASYNC_GROUP_SYNC) { - oap->oap_oig = oig; - rc = oig_add_one(oig, &oap->oap_occ); - } - - LOI_DEBUG(loi, "oap %p page %p on group pending: rc %d\n", - oap, oap->oap_page, rc); - - client_obd_list_unlock(&cli->cl_loi_list_lock); - - RETURN(rc); -} - -static void osc_group_to_pending(struct client_obd *cli, struct lov_oinfo *loi, - struct loi_oap_pages *lop, int cmd) -{ - struct list_head *pos, *tmp; - struct osc_async_page *oap; - - list_for_each_safe(pos, tmp, &lop->lop_pending_group) { - oap = list_entry(pos, struct osc_async_page, oap_pending_item); - list_del(&oap->oap_pending_item); - osc_oap_to_pending(oap); - } - loi_list_maint(cli, loi); -} - -static int osc_trigger_group_io(struct obd_export *exp, - struct lov_stripe_md *lsm, - struct lov_oinfo *loi, - struct obd_io_group *oig) -{ - struct client_obd *cli = &exp->exp_obd->u.cli; - ENTRY; - - if (loi == NULL) - loi = lsm->lsm_oinfo[0]; - - client_obd_list_lock(&cli->cl_loi_list_lock); - - osc_group_to_pending(cli, loi, &loi->loi_write_lop, OBD_BRW_WRITE); - osc_group_to_pending(cli, loi, &loi->loi_read_lop, OBD_BRW_READ); - - osc_check_rpcs(cli); - client_obd_list_unlock(&cli->cl_loi_list_lock); - RETURN(0); } -static int osc_teardown_async_page(struct obd_export *exp, - struct lov_stripe_md *lsm, - struct lov_oinfo *loi, void *cookie) +int osc_teardown_async_page(struct obd_export *exp, + struct lov_stripe_md *lsm, + struct lov_oinfo *loi, void *cookie) { struct client_obd *cli = &exp->exp_obd->u.cli; struct loi_oap_pages *lop; @@ -2821,67 +3040,71 @@ static int osc_teardown_async_page(struct obd_export *exp, if (!list_empty(&oap->oap_urgent_item)) { list_del_init(&oap->oap_urgent_item); - oap->oap_async_flags &= ~ASYNC_URGENT; + spin_lock(&oap->oap_lock); + oap->oap_async_flags &= ~(ASYNC_URGENT | ASYNC_HP); + spin_unlock(&oap->oap_lock); } if (!list_empty(&oap->oap_pending_item)) { list_del_init(&oap->oap_pending_item); lop_update_pending(cli, lop, oap->oap_cmd, -1); } loi_list_maint(cli, loi); - LOI_DEBUG(loi, "oap %p page %p torn down\n", oap, oap->oap_page); out: client_obd_list_unlock(&cli->cl_loi_list_lock); RETURN(rc); } -static void osc_set_data_with_check(struct lustre_handle *lockh, void *data, - int flags) +static void osc_set_lock_data_with_check(struct ldlm_lock *lock, + struct ldlm_enqueue_info *einfo, + int flags) { - struct ldlm_lock *lock = ldlm_handle2lock(lockh); + void *data = einfo->ei_cbdata; + + LASSERT(lock != NULL); + LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl); + LASSERT(lock->l_resource->lr_type == einfo->ei_type); + LASSERT(lock->l_completion_ast == einfo->ei_cb_cp); + LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl); - if (lock == NULL) { - CERROR("lockh %p, data %p - client evicted?\n", lockh, data); - return; - } lock_res_and_lock(lock); -#if defined (__KERNEL__) && defined (__linux__) - /* Liang XXX: Darwin and Winnt checking should be added */ - if (lock->l_ast_data && lock->l_ast_data != data) { - struct inode *new_inode = data; - struct inode *old_inode = lock->l_ast_data; - if (!(old_inode->i_state & I_FREEING)) - LDLM_ERROR(lock, "inconsistent l_ast_data found"); - LASSERTF(old_inode->i_state & I_FREEING, - "Found existing inode %p/%lu/%u state %lu in lock: " - "setting data to %p/%lu/%u\n", old_inode, - old_inode->i_ino, old_inode->i_generation, - old_inode->i_state, - new_inode, new_inode->i_ino, new_inode->i_generation); - } -#endif + spin_lock(&osc_ast_guard); + LASSERT(lock->l_ast_data == NULL || lock->l_ast_data == data); lock->l_ast_data = data; - lock->l_flags |= (flags & LDLM_FL_NO_LRU); + spin_unlock(&osc_ast_guard); unlock_res_and_lock(lock); - LDLM_LOCK_PUT(lock); +} + +static void osc_set_data_with_check(struct lustre_handle *lockh, + struct ldlm_enqueue_info *einfo, + int flags) +{ + struct ldlm_lock *lock = ldlm_handle2lock(lockh); + + if (lock != NULL) { + osc_set_lock_data_with_check(lock, einfo, flags); + LDLM_LOCK_PUT(lock); + } else + CERROR("lockh %p, data %p - client evicted?\n", + lockh, einfo->ei_cbdata); } static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm, ldlm_iterator_t replace, void *data) { - struct ldlm_res_id res_id = { .name = {0} }; + struct ldlm_res_id res_id; struct obd_device *obd = class_exp2obd(exp); - res_id.name[0] = lsm->lsm_object_id; - res_id.name[2] = lsm->lsm_object_gr; - + osc_build_res_name(lsm->lsm_object_id, lsm->lsm_object_gr, &res_id); ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data); return 0; } -static int osc_enqueue_fini(struct ptlrpc_request *req, struct obd_info *oinfo, - int intent, int rc) +static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb, + obd_enqueue_update_f upcall, void *cookie, + int *flags, int rc) { + int intent = *flags & LDLM_FL_HAS_INTENT; ENTRY; if (intent) { @@ -2898,50 +3121,103 @@ static int osc_enqueue_fini(struct ptlrpc_request *req, struct obd_info *oinfo, } if ((intent && rc == ELDLM_LOCK_ABORTED) || !rc) { + *flags |= LDLM_FL_LVB_READY; CDEBUG(D_INODE,"got kms "LPU64" blocks "LPU64" mtime "LPU64"\n", - oinfo->oi_md->lsm_oinfo[0]->loi_lvb.lvb_size, - oinfo->oi_md->lsm_oinfo[0]->loi_lvb.lvb_blocks, - oinfo->oi_md->lsm_oinfo[0]->loi_lvb.lvb_mtime); + lvb->lvb_size, lvb->lvb_blocks, lvb->lvb_mtime); } /* Call the update callback. */ - rc = oinfo->oi_cb_up(oinfo, rc); + rc = (*upcall)(cookie, rc); RETURN(rc); } -static int osc_enqueue_interpret(struct ptlrpc_request *req, +static int osc_enqueue_interpret(const struct lu_env *env, + struct ptlrpc_request *req, struct osc_enqueue_args *aa, int rc) { - int intent = aa->oa_oi->oi_flags & LDLM_FL_HAS_INTENT; - struct lov_stripe_md *lsm = aa->oa_oi->oi_md; struct ldlm_lock *lock; + struct lustre_handle handle; + __u32 mode; + + /* Make a local copy of a lock handle and a mode, because aa->oa_* + * might be freed anytime after lock upcall has been called. */ + lustre_handle_copy(&handle, aa->oa_lockh); + mode = aa->oa_ei->ei_mode; /* ldlm_cli_enqueue is holding a reference on the lock, so it must * be valid. */ - lock = ldlm_handle2lock(aa->oa_oi->oi_lockh); + lock = ldlm_handle2lock(&handle); + + /* Take an additional reference so that a blocking AST that + * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed + * to arrive after an upcall has been executed by + * osc_enqueue_fini(). */ + ldlm_lock_addref(&handle, mode); /* Complete obtaining the lock procedure. */ rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1, - aa->oa_ei->ei_mode, - &aa->oa_oi->oi_flags, - &lsm->lsm_oinfo[0]->loi_lvb, - sizeof(lsm->lsm_oinfo[0]->loi_lvb), - lustre_swab_ost_lvb, - aa->oa_oi->oi_lockh, rc); - + mode, aa->oa_flags, aa->oa_lvb, + sizeof(*aa->oa_lvb), &handle, rc); /* Complete osc stuff. */ - rc = osc_enqueue_fini(req, aa->oa_oi, intent, rc); + rc = osc_enqueue_fini(req, aa->oa_lvb, + aa->oa_upcall, aa->oa_cookie, aa->oa_flags, rc); + + OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10); /* Release the lock for async request. */ - if (lustre_handle_is_used(aa->oa_oi->oi_lockh) && rc == ELDLM_OK) - ldlm_lock_decref(aa->oa_oi->oi_lockh, aa->oa_ei->ei_mode); + if (lustre_handle_is_used(&handle) && rc == ELDLM_OK) + /* + * Releases a reference taken by ldlm_cli_enqueue(), if it is + * not already released by + * ldlm_cli_enqueue_fini()->failed_lock_cleanup() + */ + ldlm_lock_decref(&handle, mode); LASSERTF(lock != NULL, "lockh %p, req %p, aa %p - client evicted?\n", - aa->oa_oi->oi_lockh, req, aa); + aa->oa_lockh, req, aa); + ldlm_lock_decref(&handle, mode); LDLM_LOCK_PUT(lock); return rc; } +void osc_update_enqueue(struct lustre_handle *lov_lockhp, + struct lov_oinfo *loi, int flags, + struct ost_lvb *lvb, __u32 mode, int rc) +{ + if (rc == ELDLM_OK) { + struct ldlm_lock *lock = ldlm_handle2lock(lov_lockhp); + __u64 tmp; + + LASSERT(lock != NULL); + loi->loi_lvb = *lvb; + tmp = loi->loi_lvb.lvb_size; + /* Extend KMS up to the end of this lock and no further + * A lock on [x,y] means a KMS of up to y + 1 bytes! */ + if (tmp > lock->l_policy_data.l_extent.end) + tmp = lock->l_policy_data.l_extent.end + 1; + if (tmp >= loi->loi_kms) { + LDLM_DEBUG(lock, "lock acquired, setting rss="LPU64 + ", kms="LPU64, loi->loi_lvb.lvb_size, tmp); + loi_kms_set(loi, tmp); + } else { + LDLM_DEBUG(lock, "lock acquired, setting rss=" + LPU64"; leaving kms="LPU64", end="LPU64, + loi->loi_lvb.lvb_size, loi->loi_kms, + lock->l_policy_data.l_extent.end); + } + ldlm_lock_allow_match(lock); + LDLM_LOCK_PUT(lock); + } else if (rc == ELDLM_LOCK_ABORTED && (flags & LDLM_FL_HAS_INTENT)) { + loi->loi_lvb = *lvb; + CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving" + " kms="LPU64"\n", loi->loi_lvb.lvb_size, loi->loi_kms); + rc = ELDLM_OK; + } +} +EXPORT_SYMBOL(osc_update_enqueue); + +struct ptlrpc_request_set *PTLRPCD_SET = (void *)1; + /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock * from the 2nd OSC before a lock from the 1st one. This does not deadlock with * other synchronous requests, however keeping some locks and trying to obtain @@ -2949,28 +3225,33 @@ static int osc_enqueue_interpret(struct ptlrpc_request *req, * when other sync requests do not get released lock from a client, the client * is excluded from the cluster -- such scenarious make the life difficult, so * release locks just after they are obtained. */ -static int osc_enqueue(struct obd_export *exp, struct obd_info *oinfo, - struct ldlm_enqueue_info *einfo, - struct ptlrpc_request_set *rqset) +int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, + int *flags, ldlm_policy_data_t *policy, + struct ost_lvb *lvb, int kms_valid, + obd_enqueue_update_f upcall, void *cookie, + struct ldlm_enqueue_info *einfo, + struct lustre_handle *lockh, + struct ptlrpc_request_set *rqset, int async) { - struct ldlm_res_id res_id = { .name = {0} }; struct obd_device *obd = exp->exp_obd; struct ptlrpc_request *req = NULL; - int intent = oinfo->oi_flags & LDLM_FL_HAS_INTENT; + int intent = *flags & LDLM_FL_HAS_INTENT; ldlm_mode_t mode; int rc; ENTRY; - res_id.name[0] = oinfo->oi_md->lsm_object_id; - res_id.name[2] = oinfo->oi_md->lsm_object_gr; - /* Filesystem lock extents are extended to page boundaries so that * dealing with the page cache is a little smoother. */ - oinfo->oi_policy.l_extent.start -= - oinfo->oi_policy.l_extent.start & ~CFS_PAGE_MASK; - oinfo->oi_policy.l_extent.end |= ~CFS_PAGE_MASK; + policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK; + policy->l_extent.end |= ~CFS_PAGE_MASK; - if (oinfo->oi_md->lsm_oinfo[0]->loi_kms_valid == 0) + /* + * kms is not valid when either object is completely fresh (so that no + * locks are cached), or object was evicted. In the latter case cached + * lock cannot be used, because it would prime inode state with + * potentially stale LVB. + */ + if (!kms_valid) goto no_match; /* Next, search for already existing extent locks that will cover us */ @@ -2989,32 +3270,37 @@ static int osc_enqueue(struct obd_export *exp, struct obd_info *oinfo, if (einfo->ei_mode == LCK_PR) mode |= LCK_PW; mode = ldlm_lock_match(obd->obd_namespace, - oinfo->oi_flags | LDLM_FL_LVB_READY, &res_id, - einfo->ei_type, &oinfo->oi_policy, mode, - oinfo->oi_lockh); + *flags | LDLM_FL_LVB_READY, res_id, + einfo->ei_type, policy, mode, lockh, 0); if (mode) { - /* addref the lock only if not async requests and PW lock is - * matched whereas we asked for PR. */ - if (!rqset && einfo->ei_mode != mode) - ldlm_lock_addref(oinfo->oi_lockh, LCK_PR); - osc_set_data_with_check(oinfo->oi_lockh, einfo->ei_cbdata, - oinfo->oi_flags); - if (intent) { - /* I would like to be able to ASSERT here that rss <= - * kms, but I can't, for reasons which are explained in - * lov_enqueue() */ - } - - /* We already have a lock, and it's referenced */ - oinfo->oi_cb_up(oinfo, ELDLM_OK); + struct ldlm_lock *matched = ldlm_handle2lock(lockh); + + if (matched->l_ast_data == NULL || + matched->l_ast_data == einfo->ei_cbdata) { + /* addref the lock only if not async requests and PW + * lock is matched whereas we asked for PR. */ + if (!rqset && einfo->ei_mode != mode) + ldlm_lock_addref(lockh, LCK_PR); + osc_set_lock_data_with_check(matched, einfo, *flags); + if (intent) { + /* I would like to be able to ASSERT here that + * rss <= kms, but I can't, for reasons which + * are explained in lov_enqueue() */ + } - /* For async requests, decref the lock. */ - if (einfo->ei_mode != mode) - ldlm_lock_decref(oinfo->oi_lockh, LCK_PW); - else if (rqset) - ldlm_lock_decref(oinfo->oi_lockh, einfo->ei_mode); + /* We already have a lock, and it's referenced */ + (*upcall)(cookie, ELDLM_OK); - RETURN(ELDLM_OK); + /* For async requests, decref the lock. */ + if (einfo->ei_mode != mode) + ldlm_lock_decref(lockh, LCK_PW); + else if (rqset) + ldlm_lock_decref(lockh, einfo->ei_mode); + LDLM_LOCK_PUT(matched); + RETURN(ELDLM_OK); + } else + ldlm_lock_decref(lockh, mode); + LDLM_LOCK_PUT(matched); } no_match: @@ -3030,56 +3316,76 @@ static int osc_enqueue(struct obd_export *exp, struct obd_info *oinfo, RETURN(rc); req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, - sizeof(oinfo->oi_md->lsm_oinfo[0]->loi_lvb)); + sizeof *lvb); ptlrpc_request_set_replen(req); } /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */ - oinfo->oi_flags &= ~LDLM_FL_BLOCK_GRANTED; + *flags &= ~LDLM_FL_BLOCK_GRANTED; - rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, - &oinfo->oi_policy, &oinfo->oi_flags, - &oinfo->oi_md->lsm_oinfo[0]->loi_lvb, - sizeof(oinfo->oi_md->lsm_oinfo[0]->loi_lvb), - lustre_swab_ost_lvb, oinfo->oi_lockh, - rqset ? 1 : 0); + rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb, + sizeof(*lvb), lockh, async); if (rqset) { if (!rc) { struct osc_enqueue_args *aa; CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args)); - aa = (struct osc_enqueue_args *)&req->rq_async_args; - aa->oa_oi = oinfo; + aa = ptlrpc_req_async_args(req); aa->oa_ei = einfo; aa->oa_exp = exp; - - req->rq_interpret_reply = osc_enqueue_interpret; - ptlrpc_set_add_req(rqset, req); + aa->oa_flags = flags; + aa->oa_upcall = upcall; + aa->oa_cookie = cookie; + aa->oa_lvb = lvb; + aa->oa_lockh = lockh; + + req->rq_interpret_reply = + (ptlrpc_interpterer_t)osc_enqueue_interpret; + if (rqset == PTLRPCD_SET) + ptlrpcd_add_req(req, PSCOPE_OTHER); + else + ptlrpc_set_add_req(rqset, req); } else if (intent) { ptlrpc_req_finished(req); } RETURN(rc); } - rc = osc_enqueue_fini(req, oinfo, intent, rc); + rc = osc_enqueue_fini(req, lvb, upcall, cookie, flags, rc); if (intent) ptlrpc_req_finished(req); RETURN(rc); } -static int osc_match(struct obd_export *exp, struct lov_stripe_md *lsm, - __u32 type, ldlm_policy_data_t *policy, __u32 mode, - int *flags, void *data, struct lustre_handle *lockh) +static int osc_enqueue(struct obd_export *exp, struct obd_info *oinfo, + struct ldlm_enqueue_info *einfo, + struct ptlrpc_request_set *rqset) +{ + struct ldlm_res_id res_id; + int rc; + ENTRY; + + osc_build_res_name(oinfo->oi_md->lsm_object_id, + oinfo->oi_md->lsm_object_gr, &res_id); + + rc = osc_enqueue_base(exp, &res_id, &oinfo->oi_flags, &oinfo->oi_policy, + &oinfo->oi_md->lsm_oinfo[0]->loi_lvb, + oinfo->oi_md->lsm_oinfo[0]->loi_kms_valid, + oinfo->oi_cb_up, oinfo, einfo, oinfo->oi_lockh, + rqset, rqset != NULL); + RETURN(rc); +} + +int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id, + __u32 type, ldlm_policy_data_t *policy, __u32 mode, + int *flags, void *data, struct lustre_handle *lockh, + int unref) { - struct ldlm_res_id res_id = { .name = {0} }; struct obd_device *obd = exp->exp_obd; int lflags = *flags; ldlm_mode_t rc; ENTRY; - res_id.name[0] = lsm->lsm_object_id; - res_id.name[2] = lsm->lsm_object_gr; - if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH)) RETURN(-EIO); @@ -3095,10 +3401,11 @@ static int osc_match(struct obd_export *exp, struct lov_stripe_md *lsm, rc = mode; if (mode == LCK_PR) rc |= LCK_PW; - rc = ldlm_lock_match(obd->obd_namespace, lflags | LDLM_FL_LVB_READY, - &res_id, type, policy, rc, lockh); + rc = ldlm_lock_match(obd->obd_namespace, lflags, + res_id, type, policy, rc, lockh, unref); if (rc) { - osc_set_data_with_check(lockh, data, lflags); + if (data != NULL) + osc_set_data_with_check(lockh, data, lflags); if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) { ldlm_lock_addref(lockh, LCK_PR); ldlm_lock_decref(lockh, LCK_PW); @@ -3108,8 +3415,7 @@ static int osc_match(struct obd_export *exp, struct lov_stripe_md *lsm, RETURN(rc); } -static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md, - __u32 mode, struct lustre_handle *lockh) +int osc_cancel_base(struct lustre_handle *lockh, __u32 mode) { ENTRY; @@ -3121,43 +3427,49 @@ static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md, RETURN(0); } +static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md, + __u32 mode, struct lustre_handle *lockh) +{ + ENTRY; + RETURN(osc_cancel_base(lockh, mode)); +} + static int osc_cancel_unused(struct obd_export *exp, struct lov_stripe_md *lsm, int flags, void *opaque) { struct obd_device *obd = class_exp2obd(exp); - struct ldlm_res_id res_id = { .name = {0} }, *resp = NULL; + struct ldlm_res_id res_id, *resp = NULL; if (lsm != NULL) { - res_id.name[0] = lsm->lsm_object_id; - res_id.name[2] = lsm->lsm_object_gr; - resp = &res_id; + resp = osc_build_res_name(lsm->lsm_object_id, + lsm->lsm_object_gr, &res_id); } return ldlm_cli_cancel_unused(obd->obd_namespace, resp, flags, opaque); } -static int osc_join_lru(struct obd_export *exp, - struct lov_stripe_md *lsm, int join) -{ - struct obd_device *obd = class_exp2obd(exp); - struct ldlm_res_id res_id = { .name = {0} }, *resp = NULL; - - if (lsm != NULL) { - res_id.name[0] = lsm->lsm_object_id; - res_id.name[2] = lsm->lsm_object_gr; - resp = &res_id; - } - - return ldlm_cli_join_lru(obd->obd_namespace, resp, join); -} - -static int osc_statfs_interpret(struct ptlrpc_request *req, +static int osc_statfs_interpret(const struct lu_env *env, + struct ptlrpc_request *req, struct osc_async_args *aa, int rc) { + struct client_obd *cli = &req->rq_import->imp_obd->u.cli; struct obd_statfs *msfs; + __u64 used; ENTRY; + if (rc == -EBADR) + /* The request has in fact never been sent + * due to issues at a higher level (LOV). + * Exit immediately since the caller is + * aware of the problem and takes care + * of the clean up */ + RETURN(rc); + + if ((rc == -ENOTCONN || rc == -EAGAIN) && + (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY)) + GOTO(out, rc = 0); + if (rc != 0) GOTO(out, rc); @@ -3166,6 +3478,44 @@ static int osc_statfs_interpret(struct ptlrpc_request *req, GOTO(out, rc = -EPROTO); } + /* Reinitialize the RDONLY and DEGRADED flags at the client + * on each statfs, so they don't stay set permanently. */ + spin_lock(&cli->cl_oscc.oscc_lock); + + if (unlikely(msfs->os_state & OS_STATE_DEGRADED)) + cli->cl_oscc.oscc_flags |= OSCC_FLAG_DEGRADED; + else if (unlikely(cli->cl_oscc.oscc_flags & OSCC_FLAG_DEGRADED)) + cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_DEGRADED; + + if (unlikely(msfs->os_state & OS_STATE_READONLY)) + cli->cl_oscc.oscc_flags |= OSCC_FLAG_RDONLY; + else if (unlikely(cli->cl_oscc.oscc_flags & OSCC_FLAG_RDONLY)) + cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_RDONLY; + + /* Add a bit of hysteresis so this flag isn't continually flapping, + * and ensure that new files don't get extremely fragmented due to + * only a small amount of available space in the filesystem. + * We want to set the NOSPC flag when there is less than ~0.1% free + * and clear it when there is at least ~0.2% free space, so: + * avail < ~0.1% max max = avail + used + * 1025 * avail < avail + used used = blocks - free + * 1024 * avail < used + * 1024 * avail < blocks - free + * avail < ((blocks - free) >> 10) + * + * On very large disk, say 16TB 0.1% will be 16 GB. We don't want to + * lose that amount of space so in those cases we report no space left + * if their is less than 1 GB left. */ + used = min_t(__u64,(msfs->os_blocks - msfs->os_bfree) >> 10, 1 << 30); + if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) == 0) && + ((msfs->os_ffree < 32) || (msfs->os_bavail < used)))) + cli->cl_oscc.oscc_flags |= OSCC_FLAG_NOSPC; + else if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) != 0) && + (msfs->os_ffree > 64) && (msfs->os_bavail > (used << 1)))) + cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_NOSPC; + + spin_unlock(&cli->cl_oscc.oscc_lock); + *aa->aa_oi->oi_osfs = *msfs; out: rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc); @@ -3196,11 +3546,18 @@ static int osc_statfs_async(struct obd_device *obd, struct obd_info *oinfo, RETURN(rc); } ptlrpc_request_set_replen(req); - req->rq_request_portal = OST_CREATE_PORTAL; //XXX FIXME bug 249 + req->rq_request_portal = OST_CREATE_PORTAL; + ptlrpc_at_set_req_timeout(req); + + if (oinfo->oi_flags & OBD_STATFS_NODELAY) { + /* procfs requests not want stat in wait for avoid deadlock */ + req->rq_no_resend = 1; + req->rq_no_delay = 1; + } - req->rq_interpret_reply = osc_statfs_interpret; + req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret; CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args)); - aa = (struct osc_async_args *)&req->rq_async_args; + aa = ptlrpc_req_async_args(req); aa->aa_oi = oinfo; ptlrpc_set_add_req(rqset, req); @@ -3208,20 +3565,33 @@ static int osc_statfs_async(struct obd_device *obd, struct obd_info *oinfo, } static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs, - __u64 max_age) + __u64 max_age, __u32 flags) { struct obd_statfs *msfs; struct ptlrpc_request *req; + struct obd_import *imp = NULL; int rc; ENTRY; + /*Since the request might also come from lprocfs, so we need + *sync this with client_disconnect_export Bug15684*/ + down_read(&obd->u.cli.cl_sem); + if (obd->u.cli.cl_import) + imp = class_import_get(obd->u.cli.cl_import); + up_read(&obd->u.cli.cl_sem); + if (!imp) + RETURN(-ENODEV); + /* We could possibly pass max_age in the request (as an absolute * timestamp or a "seconds.usec ago") so the target can avoid doing * extra calls into the filesystem if that isn't necessary (e.g. * during mount that would help a bit). Having relative timestamps * is not so great if request processing is slow, while absolute * timestamps are not ideal because they need time synchronization. */ - req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS); + req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS); + + class_import_put(imp); + if (req == NULL) RETURN(-ENOMEM); @@ -3231,7 +3601,14 @@ static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs, RETURN(rc); } ptlrpc_request_set_replen(req); - req->rq_request_portal = OST_CREATE_PORTAL; //XXX FIXME bug 249 + req->rq_request_portal = OST_CREATE_PORTAL; + ptlrpc_at_set_req_timeout(req); + + if (flags & OBD_STATFS_NODELAY) { + /* procfs requests not want stat in wait for avoid deadlock */ + req->rq_no_resend = 1; + req->rq_no_delay = 1; + } rc = ptlrpc_queue_wait(req); if (rc) @@ -3258,29 +3635,45 @@ static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs, */ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump) { - struct lov_user_md lum, *lumk; + /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */ + struct lov_user_md_v3 lum, *lumk; + struct lov_user_ost_data_v1 *lmm_objects; int rc = 0, lum_size; ENTRY; if (!lsm) RETURN(-ENODATA); - if (copy_from_user(&lum, lump, sizeof(lum))) + /* we only need the header part from user space to get lmm_magic and + * lmm_stripe_count, (the header part is common to v1 and v3) */ + lum_size = sizeof(struct lov_user_md_v1); + if (copy_from_user(&lum, lump, lum_size)) RETURN(-EFAULT); - if (lum.lmm_magic != LOV_USER_MAGIC) + if ((lum.lmm_magic != LOV_USER_MAGIC_V1) && + (lum.lmm_magic != LOV_USER_MAGIC_V3)) RETURN(-EINVAL); + /* lov_user_md_vX and lov_mds_md_vX must have the same size */ + LASSERT(sizeof(struct lov_user_md_v1) == sizeof(struct lov_mds_md_v1)); + LASSERT(sizeof(struct lov_user_md_v3) == sizeof(struct lov_mds_md_v3)); + LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lumk->lmm_objects[0])); + + /* we can use lov_mds_md_size() to compute lum_size + * because lov_user_md_vX and lov_mds_md_vX have the same size */ if (lum.lmm_stripe_count > 0) { - lum_size = sizeof(lum) + sizeof(lum.lmm_objects[0]); + lum_size = lov_mds_md_size(lum.lmm_stripe_count, lum.lmm_magic); OBD_ALLOC(lumk, lum_size); if (!lumk) RETURN(-ENOMEM); - lumk->lmm_objects[0].l_object_id = lsm->lsm_object_id; - lumk->lmm_objects[0].l_object_gr = lsm->lsm_object_gr; + if (lum.lmm_magic == LOV_USER_MAGIC_V1) + lmm_objects = &(((struct lov_user_md_v1 *)lumk)->lmm_objects[0]); + else + lmm_objects = &(lumk->lmm_objects[0]); + lmm_objects->l_object_id = lsm->lsm_object_id; } else { - lum_size = sizeof(lum); + lum_size = lov_mds_md_size(0, lum.lmm_magic); lumk = &lum; } @@ -3372,6 +3765,9 @@ static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, err = lquota_poll_check(quota_interface, exp, (struct if_quotacheck *)karg); GOTO(out, err); + case OBD_IOC_PING_TARGET: + err = ptlrpc_obd_ping(obd); + GOTO(out, err); default: CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n", cmd, cfs_curproc_comm()); @@ -3383,7 +3779,8 @@ out: } static int osc_get_info(struct obd_export *exp, obd_count keylen, - void *key, __u32 *vallen, void *val) + void *key, __u32 *vallen, void *val, + struct lov_stripe_md *lsm) { ENTRY; if (!vallen || !val) @@ -3416,6 +3813,7 @@ static int osc_get_info(struct obd_export *exp, obd_count keylen, tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY); memcpy(tmp, key, keylen); + req->rq_no_delay = req->rq_no_resend = 1; ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); if (rc) @@ -3429,30 +3827,68 @@ static int osc_get_info(struct obd_export *exp, obd_count keylen, out: ptlrpc_req_finished(req); RETURN(rc); + } else if (KEY_IS(KEY_FIEMAP)) { + struct ptlrpc_request *req; + struct ll_user_fiemap *reply; + char *tmp; + int rc; + + req = ptlrpc_request_alloc(class_exp2cliimp(exp), + &RQF_OST_GET_INFO_FIEMAP); + if (req == NULL) + RETURN(-ENOMEM); + + req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY, + RCL_CLIENT, keylen); + req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, + RCL_CLIENT, *vallen); + req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, + RCL_SERVER, *vallen); + + rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO); + if (rc) { + ptlrpc_request_free(req); + RETURN(rc); + } + + tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY); + memcpy(tmp, key, keylen); + tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL); + memcpy(tmp, val, *vallen); + + ptlrpc_request_set_replen(req); + rc = ptlrpc_queue_wait(req); + if (rc) + GOTO(out1, rc); + + reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL); + if (reply == NULL) + GOTO(out1, rc = -EPROTO); + + memcpy(val, reply, *vallen); + out1: + ptlrpc_req_finished(req); + + RETURN(rc); } + RETURN(-EINVAL); } -static int osc_setinfo_mds_conn_interpret(struct ptlrpc_request *req, - void *aa, int rc) +static int osc_setinfo_mds_connect_import(struct obd_import *imp) { struct llog_ctxt *ctxt; - struct obd_import *imp = req->rq_import; + int rc = 0; ENTRY; - if (rc != 0) - RETURN(rc); - ctxt = llog_get_context(imp->imp_obd, LLOG_MDS_OST_ORIG_CTXT); if (ctxt) { - if (rc == 0) - rc = llog_initiator_connect(ctxt); - else - CERROR("cannot establish connection for " - "ctxt %p: %d\n", ctxt, rc); + rc = llog_initiator_connect(ctxt); + llog_ctxt_put(ctxt); + } else { + /* XXX return an error? skip setting below flags? */ } - llog_ctxt_put(ctxt); spin_lock(&imp->imp_lock); imp->imp_server_timeout = 1; imp->imp_pingable = 1; @@ -3462,6 +3898,17 @@ static int osc_setinfo_mds_conn_interpret(struct ptlrpc_request *req, RETURN(rc); } +static int osc_setinfo_mds_conn_interpret(const struct lu_env *env, + struct ptlrpc_request *req, + void *aa, int rc) +{ + ENTRY; + if (rc != 0) + RETURN(rc); + + RETURN(osc_setinfo_mds_connect_import(req->rq_import)); +} + static int osc_set_info_async(struct obd_export *exp, obd_count keylen, void *key, obd_count vallen, void *val, struct ptlrpc_request_set *set) @@ -3476,11 +3923,24 @@ static int osc_set_info_async(struct obd_export *exp, obd_count keylen, OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10); if (KEY_IS(KEY_NEXT_ID)) { + obd_id new_val; + struct osc_creator *oscc = &obd->u.cli.cl_oscc; + if (vallen != sizeof(obd_id)) RETURN(-ERANGE); if (val == NULL) RETURN(-EINVAL); - obd->u.cli.cl_oscc.oscc_next_id = *((obd_id*)val) + 1; + + if (vallen != sizeof(obd_id)) + RETURN(-EINVAL); + + /* avoid race between allocate new object and set next id + * from ll_sync thread */ + spin_lock(&oscc->oscc_lock); + new_val = *((obd_id*)val) + 1; + if (new_val > oscc->oscc_next_id) + oscc->oscc_next_id = new_val; + spin_unlock(&oscc->oscc_lock); CDEBUG(D_HA, "%s: set oscc_next_id = "LPU64"\n", exp->exp_obd->obd_name, obd->u.cli.cl_oscc.oscc_next_id); @@ -3488,14 +3948,6 @@ static int osc_set_info_async(struct obd_export *exp, obd_count keylen, RETURN(0); } - if (KEY_IS("unlinked")) { - struct osc_creator *oscc = &obd->u.cli.cl_oscc; - spin_lock(&oscc->oscc_lock); - oscc->oscc_flags &= ~OSCC_FLAG_NOSPC; - spin_unlock(&oscc->oscc_lock); - RETURN(0); - } - if (KEY_IS(KEY_INIT_RECOV)) { if (vallen != sizeof(int)) RETURN(-EINVAL); @@ -3508,19 +3960,24 @@ static int osc_set_info_async(struct obd_export *exp, obd_count keylen, RETURN(0); } - if (KEY_IS("checksum")) { + if (KEY_IS(KEY_CHECKSUM)) { if (vallen != sizeof(int)) RETURN(-EINVAL); exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0; RETURN(0); } + if (KEY_IS(KEY_SPTLRPC_CONF)) { + sptlrpc_conf_client_adapt(obd); + RETURN(0); + } + if (KEY_IS(KEY_FLUSH_CTX)) { sptlrpc_import_flush_my_ctx(imp); RETURN(0); } - if (!set) + if (!set && !KEY_IS(KEY_GRANT_SHRINK)) RETURN(-EINVAL); /* We pass all other commands directly to OST. Since nobody calls osc @@ -3530,8 +3987,11 @@ static int osc_set_info_async(struct obd_export *exp, obd_count keylen, Even if something bad goes through, we'd get a -EINVAL from OST anyway. */ + if (KEY_IS(KEY_GRANT_SHRINK)) + req = ptlrpc_request_alloc(imp, &RQF_OST_SET_GRANT_INFO); + else + req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO); - req = ptlrpc_request_alloc(imp, &RQF_OST_SET_INFO); if (req == NULL) RETURN(-ENOMEM); @@ -3555,13 +4015,32 @@ static int osc_set_info_async(struct obd_export *exp, obd_count keylen, oscc->oscc_oa.o_gr = (*(__u32 *)val); oscc->oscc_oa.o_valid |= OBD_MD_FLGROUP; - LASSERT(oscc->oscc_oa.o_gr > 0); + LASSERT_MDS_GROUP(oscc->oscc_oa.o_gr); + req->rq_no_delay = req->rq_no_resend = 1; req->rq_interpret_reply = osc_setinfo_mds_conn_interpret; + } else if (KEY_IS(KEY_GRANT_SHRINK)) { + struct osc_grant_args *aa; + struct obdo *oa; + + CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); + aa = ptlrpc_req_async_args(req); + OBD_ALLOC_PTR(oa); + if (!oa) { + ptlrpc_req_finished(req); + RETURN(-ENOMEM); + } + *oa = ((struct ost_body *)val)->oa; + aa->aa_oa = oa; + req->rq_interpret_reply = osc_shrink_grant_interpret; } ptlrpc_request_set_replen(req); - ptlrpc_set_add_req(set, req); - ptlrpc_check_set(set); + if (!KEY_IS(KEY_GRANT_SHRINK)) { + LASSERT(set != NULL); + ptlrpc_set_add_req(set, req); + ptlrpc_check_set(NULL, set); + } else + ptlrpcd_add_req(req, PSCOPE_OTHER); RETURN(0); } @@ -3572,42 +4051,77 @@ static struct llog_operations osc_size_repl_logops = { }; static struct llog_operations osc_mds_ost_orig_logops; -static int osc_llog_init(struct obd_device *obd, int group, - struct obd_device *tgt, int count, - struct llog_catid *catid, struct obd_uuid *uuid) + +static int __osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg, + struct obd_device *tgt, struct llog_catid *catid) { int rc; ENTRY; - LASSERT(group == OBD_LLOG_GROUP); - spin_lock(&obd->obd_dev_lock); - if (osc_mds_ost_orig_logops.lop_setup != llog_obd_origin_setup) { - osc_mds_ost_orig_logops = llog_lvfs_ops; - osc_mds_ost_orig_logops.lop_setup = llog_obd_origin_setup; - osc_mds_ost_orig_logops.lop_cleanup = llog_obd_origin_cleanup; - osc_mds_ost_orig_logops.lop_add = llog_obd_origin_add; - osc_mds_ost_orig_logops.lop_connect = llog_origin_connect; - } - spin_unlock(&obd->obd_dev_lock); - - rc = llog_setup(obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, tgt, count, + + rc = llog_setup(obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, tgt, 1, &catid->lci_logid, &osc_mds_ost_orig_logops); if (rc) { CERROR("failed LLOG_MDS_OST_ORIG_CTXT\n"); - GOTO (out, rc); + GOTO(out, rc); } - rc = llog_setup(obd, &obd->obd_olg, LLOG_SIZE_REPL_CTXT, tgt, count, + rc = llog_setup(obd, &obd->obd_olg, LLOG_SIZE_REPL_CTXT, tgt, 1, NULL, &osc_size_repl_logops); - if (rc) + if (rc) { + struct llog_ctxt *ctxt = + llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT); + if (ctxt) + llog_cleanup(ctxt); CERROR("failed LLOG_SIZE_REPL_CTXT\n"); + } + GOTO(out, rc); out: if (rc) { - CERROR("osc '%s' tgt '%s' cnt %d catid %p rc=%d\n", - obd->obd_name, tgt->obd_name, count, catid, rc); + CERROR("osc '%s' tgt '%s' catid %p rc=%d\n", + obd->obd_name, tgt->obd_name, catid, rc); CERROR("logid "LPX64":0x%x\n", catid->lci_logid.lgl_oid, catid->lci_logid.lgl_ogen); } - RETURN(rc); + return rc; +} + +static int osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg, + struct obd_device *disk_obd, int *index) +{ + struct llog_catid catid; + static char name[32] = CATLIST; + int rc; + ENTRY; + + LASSERT(olg == &obd->obd_olg); + + mutex_down(&olg->olg_cat_processing); + rc = llog_get_cat_list(disk_obd, name, *index, 1, &catid); + if (rc) { + CERROR("rc: %d\n", rc); + GOTO(out, rc); + } + + CDEBUG(D_INFO, "%s: Init llog for %d - catid "LPX64"/"LPX64":%x\n", + obd->obd_name, *index, catid.lci_logid.lgl_oid, + catid.lci_logid.lgl_ogr, catid.lci_logid.lgl_ogen); + + rc = __osc_llog_init(obd, olg, disk_obd, &catid); + if (rc) { + CERROR("rc: %d\n", rc); + GOTO(out, rc); + } + + rc = llog_put_cat_list(disk_obd, name, *index, 1, &catid); + if (rc) { + CERROR("rc: %d\n", rc); + GOTO(out, rc); + } + + out: + mutex_up(&olg->olg_cat_processing); + + return rc; } static int osc_llog_finish(struct obd_device *obd, int count) @@ -3632,7 +4146,8 @@ static int osc_llog_finish(struct obd_device *obd, int count) static int osc_reconnect(const struct lu_env *env, struct obd_export *exp, struct obd_device *obd, struct obd_uuid *cluuid, - struct obd_connect_data *data) + struct obd_connect_data *data, + void *localdata) { struct client_obd *cli = &obd->u.cli; @@ -3640,15 +4155,15 @@ static int osc_reconnect(const struct lu_env *env, long lost_grant; client_obd_list_lock(&cli->cl_loi_list_lock); - data->ocd_grant = cli->cl_avail_grant ?: + data->ocd_grant = (cli->cl_avail_grant + cli->cl_dirty) ?: 2 * cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT; lost_grant = cli->cl_lost_grant; cli->cl_lost_grant = 0; client_obd_list_unlock(&cli->cl_loi_list_lock); CDEBUG(D_CACHE, "request ocd_grant: %d cl_avail_grant: %ld " - "cl_lost_grant: %ld\n", data->ocd_grant, - cli->cl_avail_grant, lost_grant); + "cl_dirty: %ld cl_lost_grant: %ld\n", data->ocd_grant, + cli->cl_avail_grant, cli->cl_dirty, lost_grant); CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d" " ocd_grant: %d\n", data->ocd_connect_flags, data->ocd_version, data->ocd_grant); @@ -3660,16 +4175,42 @@ static int osc_reconnect(const struct lu_env *env, static int osc_disconnect(struct obd_export *exp) { struct obd_device *obd = class_exp2obd(exp); - struct llog_ctxt *ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT); + struct llog_ctxt *ctxt; int rc; - if (obd->u.cli.cl_conn_count == 1) - /* flush any remaining cancel messages out to the target */ - llog_sync(ctxt, exp); - - llog_ctxt_put(ctxt); + ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT); + if (ctxt) { + if (obd->u.cli.cl_conn_count == 1) { + /* Flush any remaining cancel messages out to the + * target */ + llog_sync(ctxt, exp); + } + llog_ctxt_put(ctxt); + } else { + CDEBUG(D_HA, "No LLOG_SIZE_REPL_CTXT found in obd %p\n", + obd); + } rc = client_disconnect_export(exp); + /** + * Initially we put del_shrink_grant before disconnect_export, but it + * causes the following problem if setup (connect) and cleanup + * (disconnect) are tangled together. + * connect p1 disconnect p2 + * ptlrpc_connect_import + * ............... class_manual_cleanup + * osc_disconnect + * del_shrink_grant + * ptlrpc_connect_interrupt + * init_grant_shrink + * add this client to shrink list + * cleanup_osc + * Bang! pinger trigger the shrink. + * So the osc should be disconnected from the shrink list, after we + * are sure the import has been destroyed. BUG18662 + */ + if (obd->u.cli.cl_import == NULL) + osc_del_shrink_grant(&obd->u.cli); return rc; } @@ -3706,16 +4247,23 @@ static int osc_import_event(struct obd_device *obd, } case IMP_EVENT_INVALIDATE: { struct ldlm_namespace *ns = obd->obd_namespace; + struct lu_env *env; + int refcheck; + + env = cl_env_get(&refcheck); + if (!IS_ERR(env)) { + /* Reset grants */ + cli = &obd->u.cli; + client_obd_list_lock(&cli->cl_loi_list_lock); + /* all pages go to failing rpcs due to the invalid + * import */ + osc_check_rpcs(env, cli); + client_obd_list_unlock(&cli->cl_loi_list_lock); - /* Reset grants */ - cli = &obd->u.cli; - client_obd_list_lock(&cli->cl_loi_list_lock); - /* all pages go to failing rpcs due to the invalid import */ - osc_check_rpcs(cli); - client_obd_list_unlock(&cli->cl_loi_list_lock); - - ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY); - + ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY); + cl_env_put(env, &refcheck); + } else + rc = PTR_ERR(env); break; } case IMP_EVENT_ACTIVE: { @@ -3767,6 +4315,7 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg) struct lprocfs_static_vars lvars = { 0 }; struct client_obd *cli = &obd->u.cli; + cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL; lprocfs_osc_init_vars(&lvars); if (lprocfs_obd_setup(obd, lvars.obd_vars) == 0) { lproc_osc_attach_seqstat(obd); @@ -3776,7 +4325,7 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg) oscc_init(obd); /* We need to allocate a few requests more, because - brw_interpret_oap tries to create new requests before freeing + brw_interpret tries to create new requests before freeing previous ones. Ideally we want to have 2x max_rpcs_in_flight reserved, but I afraid that might be too much wasted RAM in fact, so 2 is just my guess and still should work. */ @@ -3784,6 +4333,9 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg) ptlrpc_init_rq_pool(cli->cl_max_rpcs_in_flight + 2, OST_MAXREQSIZE, ptlrpc_add_rqs_to_pool); + + CFS_INIT_LIST_HEAD(&cli->cl_grant_shrink_list); + sema_init(&cli->cl_grant_sem, 1); } RETURN(rc); @@ -3811,41 +4363,36 @@ static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) client import will not have been cleaned. */ if (obd->u.cli.cl_import) { struct obd_import *imp; + down_write(&obd->u.cli.cl_sem); imp = obd->u.cli.cl_import; CDEBUG(D_CONFIG, "%s: client import never connected\n", obd->obd_name); ptlrpc_invalidate_import(imp); - ptlrpc_free_rq_pool(imp->imp_rq_pool); + if (imp->imp_rq_pool) { + ptlrpc_free_rq_pool(imp->imp_rq_pool); + imp->imp_rq_pool = NULL; + } class_destroy_import(imp); + up_write(&obd->u.cli.cl_sem); obd->u.cli.cl_import = NULL; } - break; - } - case OBD_CLEANUP_SELF_EXP: rc = obd_llog_finish(obd, 0); if (rc != 0) CERROR("failed to cleanup llogging subsystems\n"); break; - case OBD_CLEANUP_OBD: - break; + } } RETURN(rc); } int osc_cleanup(struct obd_device *obd) { - struct osc_creator *oscc = &obd->u.cli.cl_oscc; int rc; ENTRY; ptlrpc_lprocfs_unregister_obd(obd); lprocfs_obd_cleanup(obd); - spin_lock(&oscc->oscc_lock); - oscc->oscc_flags &= ~OSCC_FLAG_RECOVERING; - oscc->oscc_flags |= OSCC_FLAG_EXITING; - spin_unlock(&oscc->oscc_lock); - /* free memory of osc quota cache */ lquota_cleanup(quota_interface, obd); @@ -3855,27 +4402,30 @@ int osc_cleanup(struct obd_device *obd) RETURN(rc); } -static int osc_process_config(struct obd_device *obd, obd_count len, void *buf) +int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg) { - struct lustre_cfg *lcfg = buf; struct lprocfs_static_vars lvars = { 0 }; int rc = 0; lprocfs_osc_init_vars(&lvars); switch (lcfg->lcfg_command) { - case LCFG_SPTLRPC_CONF: - rc = sptlrpc_cliobd_process_config(obd, lcfg); - break; default: rc = class_process_proc_param(PARAM_OSC, lvars.obd_vars, lcfg, obd); + if (rc > 0) + rc = 0; break; } return(rc); } +static int osc_process_config(struct obd_device *obd, obd_count len, void *buf) +{ + return osc_process_config_base(obd, buf); +} + struct obd_ops osc_obd_ops = { .o_owner = THIS_MODULE, .o_setup = osc_setup, @@ -3892,27 +4442,19 @@ struct obd_ops osc_obd_ops = { .o_unpackmd = osc_unpackmd, .o_precreate = osc_precreate, .o_create = osc_create, + .o_create_async = osc_create_async, .o_destroy = osc_destroy, .o_getattr = osc_getattr, .o_getattr_async = osc_getattr_async, .o_setattr = osc_setattr, .o_setattr_async = osc_setattr_async, .o_brw = osc_brw, - .o_brw_async = osc_brw_async, - .o_prep_async_page = osc_prep_async_page, - .o_queue_async_io = osc_queue_async_io, - .o_set_async_flags = osc_set_async_flags, - .o_queue_group_io = osc_queue_group_io, - .o_trigger_group_io = osc_trigger_group_io, - .o_teardown_async_page = osc_teardown_async_page, .o_punch = osc_punch, .o_sync = osc_sync, .o_enqueue = osc_enqueue, - .o_match = osc_match, .o_change_cbdata = osc_change_cbdata, .o_cancel = osc_cancel, .o_cancel_unused = osc_cancel_unused, - .o_join_lru = osc_join_lru, .o_iocontrol = osc_iocontrol, .o_get_info = osc_get_info, .o_set_info_async = osc_set_info_async, @@ -3921,12 +4463,24 @@ struct obd_ops osc_obd_ops = { .o_llog_finish = osc_llog_finish, .o_process_config = osc_process_config, }; + +extern struct lu_kmem_descr osc_caches[]; +extern spinlock_t osc_ast_guard; +extern struct lock_class_key osc_ast_guard_class; + int __init osc_init(void) { struct lprocfs_static_vars lvars = { 0 }; int rc; ENTRY; + /* print an address of _any_ initialized kernel symbol from this + * module, to allow debugging with gdb that doesn't support data + * symbols from modules.*/ + CDEBUG(D_CONSOLE, "Lustre OSC module (%p).\n", &osc_caches); + + rc = lu_kmem_init(osc_caches); + lprocfs_osc_init_vars(&lvars); request_module("lquota"); @@ -3935,27 +4489,40 @@ int __init osc_init(void) init_obd_quota_ops(quota_interface, &osc_obd_ops); rc = class_register_type(&osc_obd_ops, NULL, lvars.module_vars, - LUSTRE_OSC_NAME, NULL); + LUSTRE_OSC_NAME, &osc_device_type); if (rc) { if (quota_interface) PORTAL_SYMBOL_PUT(osc_quota_interface); + lu_kmem_fini(osc_caches); RETURN(rc); } + spin_lock_init(&osc_ast_guard); + lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class); + + osc_mds_ost_orig_logops = llog_lvfs_ops; + osc_mds_ost_orig_logops.lop_setup = llog_obd_origin_setup; + osc_mds_ost_orig_logops.lop_cleanup = llog_obd_origin_cleanup; + osc_mds_ost_orig_logops.lop_add = llog_obd_origin_add; + osc_mds_ost_orig_logops.lop_connect = llog_origin_connect; + RETURN(rc); } #ifdef __KERNEL__ static void /*__exit*/ osc_exit(void) { + lu_device_type_fini(&osc_device_type); + lquota_exit(quota_interface); if (quota_interface) PORTAL_SYMBOL_PUT(osc_quota_interface); class_unregister_type(LUSTRE_OSC_NAME); + lu_kmem_fini(osc_caches); } -MODULE_AUTHOR("Cluster File Systems, Inc. "); +MODULE_AUTHOR("Sun Microsystems, Inc. "); MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)"); MODULE_LICENSE("GPL");