/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * Copyright (C) 2001-2003 Cluster File Systems, Inc.
- * Author Peter Braam <braam@clusterfs.com>
+ * GPL HEADER START
*
- * This file is part of the Lustre file system, http://www.lustre.org
- * Lustre is a trademark of Cluster File Systems, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * You may have signed or agreed to another license before downloading
- * this software. If so, you are bound by the terms and conditions
- * of that agreement, and the following does not apply to you. See the
- * LICENSE file included with this distribution for more information.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * If you did not agree to a different license, then this copy of Lustre
- * is open source software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * In either case, Lustre is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * license text for more details.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
- * For testing and management it is treated as an obd_device,
- * although * it does not export a full OBD method table (the
- * requests are coming * in over the wire, so object target modules
- * do not have a full * method table.)
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
*
+ * GPL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
*/
#ifndef EXPORT_SYMTAB
#endif
#define DEBUG_SUBSYSTEM S_OSC
-#ifdef __KERNEL__
-# include <libcfs/libcfs.h>
-#else /* __KERNEL__ */
+#include <libcfs/libcfs.h>
+
+#ifndef __KERNEL__
# include <liblustre.h>
#endif
#include <lustre_dlm.h>
-#include <libcfs/kp30.h>
#include <lustre_net.h>
#include <lustre/lustre_user.h>
+#include <obd_cksum.h>
#include <obd_ost.h>
#include <obd_lov.h>
extern quota_interface_t osc_quota_interface;
static void osc_release_ppga(struct brw_page **ppga, obd_count count);
+static int brw_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req, void *data, int rc);
+int osc_cleanup(struct obd_device *obd);
/* Pack OSC object metadata for disk storage (LE byte order). */
static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
if (lsm) {
LASSERT(lsm->lsm_object_id);
- LASSERT(lsm->lsm_object_gr);
+ LASSERT_MDS_GROUP(lsm->lsm_object_gr);
(*lmmp)->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
(*lmmp)->lmm_object_gr = cpu_to_le64(lsm->lsm_object_gr);
}
(*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
(*lsmp)->lsm_object_gr = le64_to_cpu (lmm->lmm_object_gr);
LASSERT((*lsmp)->lsm_object_id);
- LASSERT((*lsmp)->lsm_object_gr);
+ LASSERT_MDS_GROUP((*lsmp)->lsm_object_gr);
}
(*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
RETURN(lsm_size);
}
-static inline void osc_pack_capa(struct ptlrpc_request *req, int offset,
+static inline void osc_pack_capa(struct ptlrpc_request *req,
struct ost_body *body, void *capa)
{
struct obd_capa *oc = (struct obd_capa *)capa;
if (!capa)
return;
- c = lustre_msg_buf(req->rq_reqmsg, offset, sizeof(*c));
+ c = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1);
LASSERT(c);
capa_cpy(c, oc);
body->oa.o_valid |= OBD_MD_FLOSSCAPA;
DEBUG_CAPA(D_SEC, c, "pack");
}
-static inline void osc_pack_req_body(struct ptlrpc_request *req, int offset,
+static inline void osc_pack_req_body(struct ptlrpc_request *req,
struct obd_info *oinfo)
{
struct ost_body *body;
- body = lustre_msg_buf(req->rq_reqmsg, offset, sizeof(*body));
- body->oa = *oinfo->oi_oa;
- osc_pack_capa(req, offset + 1, body, oinfo->oi_capa);
+ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+ LASSERT(body);
+
+ lustre_set_wire_obdo(&body->oa, oinfo->oi_oa);
+ osc_pack_capa(req, body, oinfo->oi_capa);
+}
+
+static inline void osc_set_capa_size(struct ptlrpc_request *req,
+ const struct req_msg_field *field,
+ struct obd_capa *oc)
+{
+ if (oc == NULL)
+ req_capsule_set_size(&req->rq_pill, field, RCL_CLIENT, 0);
+ else
+ /* it is already calculated as sizeof struct obd_capa */
+ ;
}
-static int osc_getattr_interpret(struct ptlrpc_request *req,
+static int osc_getattr_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
struct osc_async_args *aa, int rc)
{
struct ost_body *body;
lustre_swab_ost_body);
if (body) {
CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
- memcpy(aa->aa_oi->oi_oa, &body->oa, sizeof(*aa->aa_oi->oi_oa));
+ lustre_get_wire_obdo(aa->aa_oi->oi_oa, &body->oa);
/* This should really be sent by the OST */
aa->aa_oi->oi_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
aa->aa_oi->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
} else {
- CERROR("can't unpack ost_body\n");
+ CDEBUG(D_INFO, "can't unpack ost_body\n");
rc = -EPROTO;
aa->aa_oi->oi_oa->o_valid = 0;
}
struct ptlrpc_request_set *set)
{
struct ptlrpc_request *req;
- struct ost_body *body;
- int size[3] = { sizeof(struct ptlrpc_body), sizeof(*body) };
struct osc_async_args *aa;
+ int rc;
ENTRY;
- size[REQ_REC_OFF + 1] = oinfo->oi_capa ? sizeof(struct lustre_capa) : 0;
- req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OST_VERSION,
- OST_GETATTR, 3, size,NULL);
- if (!req)
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
+ if (req == NULL)
RETURN(-ENOMEM);
- osc_pack_req_body(req, REQ_REC_OFF, oinfo);
+ osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
+ if (rc) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+
+ osc_pack_req_body(req, oinfo);
- ptlrpc_req_set_repsize(req, 2, size);
- req->rq_interpret_reply = osc_getattr_interpret;
+ ptlrpc_request_set_replen(req);
+ req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret;
CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = (struct osc_async_args *)&req->rq_async_args;
+ aa = ptlrpc_req_async_args(req);
aa->aa_oi = oinfo;
ptlrpc_set_add_req(set, req);
- RETURN (0);
+ RETURN(0);
}
static int osc_getattr(struct obd_export *exp, struct obd_info *oinfo)
{
struct ptlrpc_request *req;
- struct ost_body *body;
- int rc, size[3] = { sizeof(struct ptlrpc_body), sizeof(*body) };
+ struct ost_body *body;
+ int rc;
ENTRY;
- size[REQ_REC_OFF + 1] = oinfo->oi_capa ? sizeof(struct lustre_capa) : 0;
- req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OST_VERSION,
- OST_GETATTR, 3, size, NULL);
- if (!req)
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
+ if (req == NULL)
RETURN(-ENOMEM);
- osc_pack_req_body(req, REQ_REC_OFF, oinfo);
+ osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
+ if (rc) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+
+ osc_pack_req_body(req, oinfo);
- ptlrpc_req_set_repsize(req, 2, size);
+ ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
- if (rc) {
- CERROR("%s failed: rc = %d\n", __FUNCTION__, rc);
+ if (rc)
GOTO(out, rc);
- }
- body = lustre_swab_repbuf(req, REPLY_REC_OFF, sizeof(*body),
- lustre_swab_ost_body);
- if (body == NULL) {
- CERROR ("can't unpack ost_body\n");
- GOTO (out, rc = -EPROTO);
- }
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ if (body == NULL)
+ GOTO(out, rc = -EPROTO);
CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
- *oinfo->oi_oa = body->oa;
+ lustre_get_wire_obdo(oinfo->oi_oa, &body->oa);
/* This should really be sent by the OST */
oinfo->oi_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
struct obd_trans_info *oti)
{
struct ptlrpc_request *req;
- struct ost_body *body;
- int rc, size[3] = { sizeof(struct ptlrpc_body), sizeof(*body) };
+ struct ost_body *body;
+ int rc;
ENTRY;
- LASSERT(!(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP) ||
- oinfo->oi_oa->o_gr > 0);
- size[REQ_REC_OFF + 1] = oinfo->oi_capa ? sizeof(struct lustre_capa) : 0;
- req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OST_VERSION,
- OST_SETATTR, 3, size, NULL);
- if (!req)
+ LASSERTF(!(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP) ||
+ CHECK_MDS_GROUP(oinfo->oi_oa->o_gr),
+ "oinfo->oi_oa->o_valid="LPU64" oinfo->oi_oa->o_gr="LPU64"\n",
+ oinfo->oi_oa->o_valid, oinfo->oi_oa->o_gr);
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
+ if (req == NULL)
RETURN(-ENOMEM);
- osc_pack_req_body(req, REQ_REC_OFF, oinfo);
+ osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
+ if (rc) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+
+ osc_pack_req_body(req, oinfo);
- ptlrpc_req_set_repsize(req, 2, size);
+ ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
if (rc)
GOTO(out, rc);
- body = lustre_swab_repbuf(req, REPLY_REC_OFF, sizeof(*body),
- lustre_swab_ost_body);
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
if (body == NULL)
GOTO(out, rc = -EPROTO);
- *oinfo->oi_oa = body->oa;
+ lustre_get_wire_obdo(oinfo->oi_oa, &body->oa);
EXIT;
out:
RETURN(rc);
}
-static int osc_setattr_interpret(struct ptlrpc_request *req,
+static int osc_setattr_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
struct osc_async_args *aa, int rc)
{
struct ost_body *body;
if (rc != 0)
GOTO(out, rc);
- body = lustre_swab_repbuf(req, REPLY_REC_OFF, sizeof(*body),
- lustre_swab_ost_body);
- if (body == NULL) {
- CERROR("can't unpack ost_body\n");
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ if (body == NULL)
GOTO(out, rc = -EPROTO);
- }
- *aa->aa_oi->oi_oa = body->oa;
+ lustre_get_wire_obdo(aa->aa_oi->oi_oa, &body->oa);
out:
rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
RETURN(rc);
struct ptlrpc_request_set *rqset)
{
struct ptlrpc_request *req;
- int size[3] = { sizeof(struct ptlrpc_body), sizeof(struct ost_body) };
struct osc_async_args *aa;
+ int rc;
ENTRY;
- size[REQ_REC_OFF + 1] = oinfo->oi_capa ? sizeof(struct lustre_capa) : 0;
- req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OST_VERSION,
- OST_SETATTR, 3, size, NULL);
- if (!req)
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
+ if (req == NULL)
RETURN(-ENOMEM);
- osc_pack_req_body(req, REQ_REC_OFF, oinfo);
+ osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
+ if (rc) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+
+ osc_pack_req_body(req, oinfo);
+
+ ptlrpc_request_set_replen(req);
+
if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) {
LASSERT(oti);
- *obdo_logcookie(oinfo->oi_oa) = *oti->oti_logcookies;
+ oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
}
- ptlrpc_req_set_repsize(req, 2, size);
- /* do mds to ost setattr asynchronouly */
+ /* do mds to ost setattr asynchronously */
if (!rqset) {
/* Do not wait for response. */
- ptlrpcd_add_req(req);
+ ptlrpcd_add_req(req, PSCOPE_OTHER);
} else {
- req->rq_interpret_reply = osc_setattr_interpret;
+ req->rq_interpret_reply =
+ (ptlrpc_interpterer_t)osc_setattr_interpret;
CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = (struct osc_async_args *)&req->rq_async_args;
+ aa = ptlrpc_req_async_args(req);
aa->aa_oi = oinfo;
ptlrpc_set_add_req(rqset, req);
struct lov_stripe_md **ea, struct obd_trans_info *oti)
{
struct ptlrpc_request *req;
- struct ost_body *body;
- struct lov_stripe_md *lsm;
- int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*body) };
+ struct ost_body *body;
+ struct lov_stripe_md *lsm;
+ int rc;
ENTRY;
LASSERT(oa);
RETURN(rc);
}
- req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OST_VERSION,
- OST_CREATE, 2, size, NULL);
- if (!req)
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
+ if (req == NULL)
GOTO(out, rc = -ENOMEM);
- body = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF, sizeof(*body));
- body->oa = *oa;
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
+ if (rc) {
+ ptlrpc_request_free(req);
+ GOTO(out, rc);
+ }
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+ LASSERT(body);
+ lustre_set_wire_obdo(&body->oa, oa);
- ptlrpc_req_set_repsize(req, 2, size);
- if (oa->o_valid & OBD_MD_FLINLINE) {
- LASSERT((oa->o_valid & OBD_MD_FLFLAGS) &&
- oa->o_flags == OBD_FL_DELORPHAN);
+ ptlrpc_request_set_replen(req);
+
+ if ((oa->o_valid & OBD_MD_FLFLAGS) &&
+ oa->o_flags == OBD_FL_DELORPHAN) {
DEBUG_REQ(D_HA, req,
"delorphan from OST integration");
/* Don't resend the delorphan req */
if (rc)
GOTO(out_req, rc);
- body = lustre_swab_repbuf(req, REPLY_REC_OFF, sizeof(*body),
- lustre_swab_ost_body);
- if (body == NULL) {
- CERROR ("can't unpack ost_body\n");
- GOTO (out_req, rc = -EPROTO);
- }
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ if (body == NULL)
+ GOTO(out_req, rc = -EPROTO);
- *oa = body->oa;
+ lustre_get_wire_obdo(oa, &body->oa);
/* This should really be sent by the OST */
oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
if (oa->o_valid & OBD_MD_FLCOOKIE) {
if (!oti->oti_logcookies)
oti_alloc_cookies(oti, 1);
- *oti->oti_logcookies = *obdo_logcookie(oa);
+ *oti->oti_logcookies = oa->o_lcookie;
}
}
RETURN(rc);
}
-static int osc_punch_interpret(struct ptlrpc_request *req,
- struct osc_async_args *aa, int rc)
+static int osc_punch_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ struct osc_punch_args *aa, int rc)
{
struct ost_body *body;
ENTRY;
if (rc != 0)
GOTO(out, rc);
- body = lustre_swab_repbuf(req, REPLY_REC_OFF, sizeof (*body),
- lustre_swab_ost_body);
- if (body == NULL) {
- CERROR ("can't unpack ost_body\n");
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ if (body == NULL)
GOTO(out, rc = -EPROTO);
- }
- *aa->aa_oi->oi_oa = body->oa;
+ lustre_get_wire_obdo(aa->pa_oa, &body->oa);
out:
- rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
+ rc = aa->pa_upcall(aa->pa_cookie, rc);
RETURN(rc);
}
-static int osc_punch(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- struct ptlrpc_request_set *rqset)
+int osc_punch_base(struct obd_export *exp, struct obdo *oa,
+ struct obd_capa *capa,
+ obd_enqueue_update_f upcall, void *cookie,
+ struct ptlrpc_request_set *rqset)
{
struct ptlrpc_request *req;
- struct osc_async_args *aa;
- struct ost_body *body;
- int size[3] = { sizeof(struct ptlrpc_body), sizeof(*body) };
+ struct osc_punch_args *aa;
+ struct ost_body *body;
+ int rc;
ENTRY;
- if (!oinfo->oi_oa) {
- CERROR("oa NULL\n");
- RETURN(-EINVAL);
- }
-
- size[REQ_REC_OFF + 1] = oinfo->oi_capa? sizeof(struct lustre_capa) : 0;
- req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OST_VERSION,
- OST_PUNCH, 3, size, NULL);
- if (!req)
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
+ if (req == NULL)
RETURN(-ENOMEM);
+ osc_set_capa_size(req, &RMF_CAPA1, capa);
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
+ if (rc) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
+ ptlrpc_at_set_req_timeout(req);
- osc_pack_req_body(req, REQ_REC_OFF, oinfo);
- /* overload the size and blocks fields in the oa with start/end */
- body = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF, sizeof(*body));
- body->oa.o_size = oinfo->oi_policy.l_extent.start;
- body->oa.o_blocks = oinfo->oi_policy.l_extent.end;
- body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
+ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+ LASSERT(body);
+ lustre_set_wire_obdo(&body->oa, oa);
+ osc_pack_capa(req, body, capa);
- ptlrpc_req_set_repsize(req, 2, size);
+ ptlrpc_request_set_replen(req);
- req->rq_interpret_reply = osc_punch_interpret;
+
+ req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_punch_interpret;
CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = (struct osc_async_args *)&req->rq_async_args;
- aa->aa_oi = oinfo;
- ptlrpc_set_add_req(rqset, req);
+ aa = ptlrpc_req_async_args(req);
+ aa->pa_oa = oa;
+ aa->pa_upcall = upcall;
+ aa->pa_cookie = cookie;
+ if (rqset == PTLRPCD_SET)
+ ptlrpcd_add_req(req, PSCOPE_OTHER);
+ else
+ ptlrpc_set_add_req(rqset, req);
RETURN(0);
}
+static int osc_punch(struct obd_export *exp, struct obd_info *oinfo,
+ struct obd_trans_info *oti,
+ struct ptlrpc_request_set *rqset)
+{
+ oinfo->oi_oa->o_size = oinfo->oi_policy.l_extent.start;
+ oinfo->oi_oa->o_blocks = oinfo->oi_policy.l_extent.end;
+ oinfo->oi_oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
+ return osc_punch_base(exp, oinfo->oi_oa, oinfo->oi_capa,
+ oinfo->oi_cb_up, oinfo, rqset);
+}
+
static int osc_sync(struct obd_export *exp, struct obdo *oa,
struct lov_stripe_md *md, obd_size start, obd_size end,
void *capa)
{
struct ptlrpc_request *req;
- struct ost_body *body;
- int rc, size[3] = { sizeof(struct ptlrpc_body), sizeof(*body) };
+ struct ost_body *body;
+ int rc;
ENTRY;
if (!oa) {
- CERROR("oa NULL\n");
+ CDEBUG(D_INFO, "oa NULL\n");
RETURN(-EINVAL);
}
- size[REQ_REC_OFF + 1] = capa ? sizeof(struct lustre_capa) : 0;
-
- req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OST_VERSION,
- OST_SYNC, 3, size, NULL);
- if (!req)
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
+ if (req == NULL)
RETURN(-ENOMEM);
+ osc_set_capa_size(req, &RMF_CAPA1, capa);
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
+ if (rc) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+
/* overload the size and blocks fields in the oa with start/end */
- body = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF, sizeof(*body));
- body->oa = *oa;
+ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+ LASSERT(body);
+ lustre_set_wire_obdo(&body->oa, oa);
body->oa.o_size = start;
body->oa.o_blocks = end;
body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
+ osc_pack_capa(req, body, capa);
- osc_pack_capa(req, REQ_REC_OFF + 1, body, capa);
-
- ptlrpc_req_set_repsize(req, 2, size);
+ ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
if (rc)
GOTO(out, rc);
- body = lustre_swab_repbuf(req, REPLY_REC_OFF, sizeof(*body),
- lustre_swab_ost_body);
- if (body == NULL) {
- CERROR ("can't unpack ost_body\n");
- GOTO (out, rc = -EPROTO);
- }
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ if (body == NULL)
+ GOTO(out, rc = -EPROTO);
- *oa = body->oa;
+ lustre_get_wire_obdo(oa, &body->oa);
EXIT;
out:
int lock_flags)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
- struct ldlm_res_id res_id = { .name = { oa->o_id, 0, oa->o_gr, 0 } };
- struct ldlm_resource *res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
+ struct ldlm_res_id res_id;
+ struct ldlm_resource *res;
int count;
ENTRY;
+ osc_build_res_name(oa->o_id, oa->o_gr, &res_id);
+ res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
if (res == NULL)
RETURN(0);
+ LDLM_RESOURCE_ADDREF(res);
count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
lock_flags, 0, NULL);
+ LDLM_RESOURCE_DELREF(res);
ldlm_resource_putref(res);
RETURN(count);
}
+static int osc_destroy_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req, void *data,
+ int rc)
+{
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+
+ atomic_dec(&cli->cl_destroy_in_flight);
+ cfs_waitq_signal(&cli->cl_destroy_waitq);
+ return 0;
+}
+
+static int osc_can_send_destroy(struct client_obd *cli)
+{
+ if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
+ cli->cl_max_rpcs_in_flight) {
+ /* The destroy request can be sent */
+ return 1;
+ }
+ if (atomic_dec_return(&cli->cl_destroy_in_flight) <
+ cli->cl_max_rpcs_in_flight) {
+ /*
+ * The counter has been modified between the two atomic
+ * operations.
+ */
+ cfs_waitq_signal(&cli->cl_destroy_waitq);
+ }
+ return 0;
+}
+
/* Destroy requests can be async always on the client, and we don't even really
* care about the return code since the client cannot do anything at all about
* a destroy failure.
* cookies to the MDS after committing destroy transactions. */
static int osc_destroy(struct obd_export *exp, struct obdo *oa,
struct lov_stripe_md *ea, struct obd_trans_info *oti,
- struct obd_export *md_export)
+ struct obd_export *md_export, void *capa)
{
- CFS_LIST_HEAD(cancels);
+ struct client_obd *cli = &exp->exp_obd->u.cli;
struct ptlrpc_request *req;
- struct ost_body *body;
- int size[3] = { sizeof(struct ptlrpc_body), sizeof(*body), 0 };
- int count, bufcount = 2;
+ struct ost_body *body;
+ CFS_LIST_HEAD(cancels);
+ int rc, count;
ENTRY;
if (!oa) {
- CERROR("oa NULL\n");
+ CDEBUG(D_INFO, "oa NULL\n");
RETURN(-EINVAL);
}
count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
LDLM_FL_DISCARD_DATA);
- if (exp_connect_cancelset(exp) && count) {
- bufcount = 3;
- size[REQ_REC_OFF + 1] = ldlm_request_bufsize(count,
- OST_DESTROY);
- }
- req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OST_VERSION,
- OST_DESTROY, bufcount, size, NULL);
- if (exp_connect_cancelset(exp) && req)
- ldlm_cli_cancel_list(&cancels, count, req, REQ_REC_OFF + 1, 0);
- else
- ldlm_lock_list_put(&cancels, l_bl_ast, count);
- if (!req)
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
+ if (req == NULL) {
+ ldlm_lock_list_put(&cancels, l_bl_ast, count);
RETURN(-ENOMEM);
+ }
+
+ osc_set_capa_size(req, &RMF_CAPA1, (struct obd_capa *)capa);
+ rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
+ 0, &cancels, count);
+ if (rc) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
+ ptlrpc_at_set_req_timeout(req);
- body = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF, sizeof(*body));
if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE)
- memcpy(obdo_logcookie(oa), oti->oti_logcookies,
- sizeof(*oti->oti_logcookies));
- body->oa = *oa;
-
- ptlrpc_req_set_repsize(req, 2, size);
+ oa->o_lcookie = *oti->oti_logcookies;
+ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+ LASSERT(body);
+ lustre_set_wire_obdo(&body->oa, oa);
+
+ osc_pack_capa(req, body, (struct obd_capa *)capa);
+ ptlrpc_request_set_replen(req);
+
+ /* don't throttle destroy RPCs for the MDT */
+ if (!(cli->cl_import->imp_connect_flags_orig & OBD_CONNECT_MDS)) {
+ req->rq_interpret_reply = osc_destroy_interpret;
+ if (!osc_can_send_destroy(cli)) {
+ struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP,
+ NULL);
+
+ /*
+ * Wait until the number of on-going destroy RPCs drops
+ * under max_rpc_in_flight
+ */
+ l_wait_event_exclusive(cli->cl_destroy_waitq,
+ osc_can_send_destroy(cli), &lwi);
+ }
+ }
- ptlrpcd_add_req(req);
+ /* Do not wait for response */
+ ptlrpcd_add_req(req, PSCOPE_OTHER);
RETURN(0);
}
oa->o_valid |= bits;
client_obd_list_lock(&cli->cl_loi_list_lock);
oa->o_dirty = cli->cl_dirty;
- if (cli->cl_dirty > cli->cl_dirty_max) {
- CERROR("dirty %lu > dirty_max %lu\n",
- cli->cl_dirty, cli->cl_dirty_max);
+ if (cli->cl_dirty - cli->cl_dirty_transit > cli->cl_dirty_max) {
+ CERROR("dirty %lu - %lu > dirty_max %lu\n",
+ cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max);
oa->o_undirty = 0;
- } else if (atomic_read(&obd_dirty_pages) > obd_max_dirty_pages) {
- CERROR("dirty %d > system dirty_max %d\n",
- atomic_read(&obd_dirty_pages), obd_max_dirty_pages);
+ } else if (atomic_read(&obd_dirty_pages) -
+ atomic_read(&obd_dirty_transit_pages) > obd_max_dirty_pages){
+ CERROR("dirty %d - %d > system dirty_max %d\n",
+ atomic_read(&obd_dirty_pages),
+ atomic_read(&obd_dirty_transit_pages),
+ obd_max_dirty_pages);
oa->o_undirty = 0;
} else if (cli->cl_dirty_max - cli->cl_dirty > 0x7fffffff) {
CERROR("dirty %lu - dirty_max %lu too big???\n",
client_obd_list_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n",
oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
+
+}
+
+static void osc_update_next_shrink(struct client_obd *cli)
+{
+ cli->cl_next_shrink_grant =
+ cfs_time_shift(cli->cl_grant_shrink_interval);
+ CDEBUG(D_CACHE, "next time %ld to shrink grant \n",
+ cli->cl_next_shrink_grant);
}
/* caller must hold loi_list_lock */
static void osc_consume_write_grant(struct client_obd *cli,
struct brw_page *pga)
{
+ LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock);
+ LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
atomic_inc(&obd_dirty_pages);
cli->cl_dirty += CFS_PAGE_SIZE;
cli->cl_avail_grant -= CFS_PAGE_SIZE;
CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
CFS_PAGE_SIZE, pga, pga->pg);
LASSERT(cli->cl_avail_grant >= 0);
+ osc_update_next_shrink(cli);
}
/* the companion to osc_consume_write_grant, called when a brw has completed.
int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096;
ENTRY;
+ LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock);
if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
EXIT;
return;
pga->flag &= ~OBD_BRW_FROM_GRANT;
atomic_dec(&obd_dirty_pages);
cli->cl_dirty -= CFS_PAGE_SIZE;
+ if (pga->flag & OBD_BRW_NOCACHE) {
+ pga->flag &= ~OBD_BRW_NOCACHE;
+ atomic_dec(&obd_dirty_transit_pages);
+ cli->cl_dirty_transit -= CFS_PAGE_SIZE;
+ }
if (!sent) {
cli->cl_lost_grant += CFS_PAGE_SIZE;
CDEBUG(D_CACHE, "lost grant: %lu avail grant: %lu dirty: %lu\n",
EXIT;
}
-static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
+static void __osc_update_grant(struct client_obd *cli, obd_size grant)
{
client_obd_list_lock(&cli->cl_loi_list_lock);
- cli->cl_avail_grant = ocd->ocd_grant;
+ cli->cl_avail_grant += grant;
client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- CDEBUG(D_CACHE, "setting cl_avail_grant: %ld cl_lost_grant: %ld\n",
- cli->cl_avail_grant, cli->cl_lost_grant);
- LASSERT(cli->cl_avail_grant >= 0);
}
static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
{
+ if (body->oa.o_valid & OBD_MD_FLGRANT) {
+ CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
+ __osc_update_grant(cli, body->oa.o_grant);
+ }
+}
+
+static int osc_set_info_async(struct obd_export *exp, obd_count keylen,
+ void *key, obd_count vallen, void *val,
+ struct ptlrpc_request_set *set);
+
+static int osc_shrink_grant_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ void *aa, int rc)
+{
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ struct obdo *oa = ((struct osc_grant_args *)aa)->aa_oa;
+ struct ost_body *body;
+
+ if (rc != 0) {
+ __osc_update_grant(cli, oa->o_grant);
+ GOTO(out, rc);
+ }
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ LASSERT(body);
+ osc_update_grant(cli, body);
+out:
+ OBD_FREE_PTR(oa);
+ return rc;
+}
+
+static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
+{
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ oa->o_grant = cli->cl_avail_grant / 4;
+ cli->cl_avail_grant -= oa->o_grant;
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ oa->o_flags |= OBD_FL_SHRINK_GRANT;
+ osc_update_next_shrink(cli);
+}
+
+/* Shrink the current grant, either from some large amount to enough for a
+ * full set of in-flight RPCs, or if we have already shrunk to that limit
+ * then to enough for a single RPC. This avoids keeping more grant than
+ * needed, and avoids shrinking the grant piecemeal. */
+static int osc_shrink_grant(struct client_obd *cli)
+{
+ long target = (cli->cl_max_rpcs_in_flight + 1) *
+ cli->cl_max_pages_per_rpc;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ if (cli->cl_avail_grant <= target)
+ target = cli->cl_max_pages_per_rpc;
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+
+ return osc_shrink_grant_to_target(cli, target);
+}
+
+int osc_shrink_grant_to_target(struct client_obd *cli, long target)
+{
+ int rc = 0;
+ struct ost_body *body;
+ ENTRY;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ /* Don't shrink if we are already above or below the desired limit
+ * We don't want to shrink below a single RPC, as that will negatively
+ * impact block allocation and long-term performance. */
+ if (target < cli->cl_max_pages_per_rpc)
+ target = cli->cl_max_pages_per_rpc;
+
+ if (target >= cli->cl_avail_grant) {
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ RETURN(0);
+ }
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+
+ OBD_ALLOC_PTR(body);
+ if (!body)
+ RETURN(-ENOMEM);
+
+ osc_announce_cached(cli, &body->oa, 0);
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ body->oa.o_grant = cli->cl_avail_grant - target;
+ cli->cl_avail_grant = target;
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
+ osc_update_next_shrink(cli);
+
+ rc = osc_set_info_async(cli->cl_import->imp_obd->obd_self_export,
+ sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
+ sizeof(*body), body, NULL);
+ if (rc != 0)
+ __osc_update_grant(cli, body->oa.o_grant);
+ OBD_FREE_PTR(body);
+ RETURN(rc);
+}
+
+#define GRANT_SHRINK_LIMIT PTLRPC_MAX_BRW_SIZE
+static int osc_should_shrink_grant(struct client_obd *client)
+{
+ cfs_time_t time = cfs_time_current();
+ cfs_time_t next_shrink = client->cl_next_shrink_grant;
+ if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) {
+ if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
+ client->cl_avail_grant > GRANT_SHRINK_LIMIT)
+ return 1;
+ else
+ osc_update_next_shrink(client);
+ }
+ return 0;
+}
+
+static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data)
+{
+ struct client_obd *client;
+
+ list_for_each_entry(client, &item->ti_obd_list, cl_grant_shrink_list) {
+ if (osc_should_shrink_grant(client))
+ osc_shrink_grant(client);
+ }
+ return 0;
+}
+
+static int osc_add_shrink_grant(struct client_obd *client)
+{
+ int rc;
+
+ rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval,
+ TIMEOUT_GRANT,
+ osc_grant_shrink_grant_cb, NULL,
+ &client->cl_grant_shrink_list);
+ if (rc) {
+ CERROR("add grant client %s error %d\n",
+ client->cl_import->imp_obd->obd_name, rc);
+ return rc;
+ }
+ CDEBUG(D_CACHE, "add grant client %s \n",
+ client->cl_import->imp_obd->obd_name);
+ osc_update_next_shrink(client);
+ return 0;
+}
+
+static int osc_del_shrink_grant(struct client_obd *client)
+{
+ return ptlrpc_del_timeout_client(&client->cl_grant_shrink_list,
+ TIMEOUT_GRANT);
+}
+
+static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
+{
client_obd_list_lock(&cli->cl_loi_list_lock);
- CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
- if (body->oa.o_valid & OBD_MD_FLGRANT)
- cli->cl_avail_grant += body->oa.o_grant;
- /* waiters are woken in brw_interpret_oap */
+ cli->cl_avail_grant = ocd->ocd_grant;
client_obd_list_unlock(&cli->cl_loi_list_lock);
+
+ if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
+ list_empty(&cli->cl_grant_shrink_list))
+ osc_add_shrink_grant(cli);
+
+ CDEBUG(D_CACHE, "setting cl_avail_grant: %ld cl_lost_grant: %ld \n",
+ cli->cl_avail_grant, cli->cl_lost_grant);
+ LASSERT(cli->cl_avail_grant >= 0);
}
/* We assume that the reason this OSC got a short read is because it read
remote_rcs = lustre_swab_repbuf(req, REQ_REC_OFF + 1,
sizeof(*remote_rcs) * niocount, NULL);
if (remote_rcs == NULL) {
- CERROR("Missing/short RC vector on BRW_WRITE reply\n");
+ CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
return(-EPROTO);
}
if (lustre_msg_swabbed(req->rq_repmsg))
return(remote_rcs[i]);
if (remote_rcs[i] != 0) {
- CERROR("rc[%d] invalid (%d) req %p\n",
+ CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
i, remote_rcs[i], req);
return(-EPROTO);
}
if (req->rq_bulk->bd_nob_transferred != requested_nob) {
CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
- requested_nob, req->rq_bulk->bd_nob_transferred);
+ req->rq_bulk->bd_nob_transferred, requested_nob);
return(-EPROTO);
}
static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
{
if (p1->flag != p2->flag) {
- unsigned mask = ~OBD_BRW_FROM_GRANT;
+ unsigned mask = ~(OBD_BRW_FROM_GRANT|
+ OBD_BRW_NOCACHE|OBD_BRW_SYNC);
/* warn if we try to combine flags that we don't know to be
* safe to combine */
}
static obd_count osc_checksum_bulk(int nob, obd_count pg_count,
- struct brw_page **pga, int opc)
+ struct brw_page **pga, int opc,
+ cksum_type_t cksum_type)
{
- __u32 cksum = ~0;
+ __u32 cksum;
int i = 0;
LASSERT (pg_count > 0);
+ cksum = init_checksum(cksum_type);
while (nob > 0 && pg_count > 0) {
- char *ptr = cfs_kmap(pga[i]->pg);
+ unsigned char *ptr = cfs_kmap(pga[i]->pg);
int off = pga[i]->off & ~CFS_PAGE_MASK;
int count = pga[i]->count > nob ? nob : pga[i]->count;
/* corrupt the data before we compute the checksum, to
* simulate an OST->client data error */
if (i == 0 && opc == OST_READ &&
- OBD_FAIL_CHECK_ONCE(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
+ OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
memcpy(ptr + off, "bad1", min(4, nob));
- cksum = crc32_le(cksum, ptr + off, count);
+ cksum = compute_checksum(cksum, ptr + off, count, cksum_type);
cfs_kunmap(pga[i]->pg);
LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d checksum %x\n",
off, cksum);
}
/* For sending we only compute the wrong checksum instead
* of corrupting the data so it is still correct on a redo */
- if (opc == OST_WRITE && OBD_FAIL_CHECK_ONCE(OBD_FAIL_OSC_CHECKSUM_SEND))
+ if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
cksum++;
return cksum;
static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
struct lov_stripe_md *lsm, obd_count page_count,
- struct brw_page **pga,
+ struct brw_page **pga,
struct ptlrpc_request **reqp,
- struct obd_capa *ocapa)
+ struct obd_capa *ocapa, int reserve)
{
struct ptlrpc_request *req;
struct ptlrpc_bulk_desc *desc;
struct ost_body *body;
struct obd_ioobj *ioobj;
struct niobuf_remote *niobuf;
- int size[5] = { sizeof(struct ptlrpc_body), sizeof(*body) };
int niocount, i, requested_nob, opc, rc;
- struct ptlrpc_request_pool *pool;
- struct lustre_capa *capa;
struct osc_brw_async_args *aa;
+ struct req_capsule *pill;
+ struct brw_page *pg_prev;
ENTRY;
- OBD_FAIL_RETURN(OBD_FAIL_OSC_BRW_PREP_REQ, -ENOMEM); /* Recoverable */
- OBD_FAIL_RETURN(OBD_FAIL_OSC_BRW_PREP_REQ2, -EINVAL); /* Fatal */
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
+ RETURN(-ENOMEM); /* Recoverable */
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
+ RETURN(-EINVAL); /* Fatal */
if ((cmd & OBD_BRW_WRITE) != 0) {
opc = OST_WRITE;
- pool = cli->cl_import->imp_rq_pool;
+ req = ptlrpc_request_alloc_pool(cli->cl_import,
+ cli->cl_import->imp_rq_pool,
+ &RQF_OST_BRW);
} else {
opc = OST_READ;
- pool = NULL;
+ req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW);
}
+ if (req == NULL)
+ RETURN(-ENOMEM);
for (niocount = i = 1; i < page_count; i++) {
if (!can_merge_pages(pga[i - 1], pga[i]))
niocount++;
}
- size[REQ_REC_OFF + 1] = sizeof(*ioobj);
- size[REQ_REC_OFF + 2] = niocount * sizeof(*niobuf);
- if (ocapa)
- size[REQ_REC_OFF + 3] = sizeof(*capa);
-
- req = ptlrpc_prep_req_pool(cli->cl_import, LUSTRE_OST_VERSION, opc, 5,
- size, NULL, pool, NULL);
- if (req == NULL)
- RETURN (-ENOMEM);
+ pill = &req->rq_pill;
+ req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
+ niocount * sizeof(*niobuf));
+ osc_set_capa_size(req, &RMF_CAPA1, ocapa);
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
+ if (rc) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
+ ptlrpc_at_set_req_timeout(req);
if (opc == OST_WRITE)
- desc = ptlrpc_prep_bulk_imp (req, page_count,
- BULK_GET_SOURCE, OST_BULK_PORTAL);
+ desc = ptlrpc_prep_bulk_imp(req, page_count,
+ BULK_GET_SOURCE, OST_BULK_PORTAL);
else
- desc = ptlrpc_prep_bulk_imp (req, page_count,
- BULK_PUT_SINK, OST_BULK_PORTAL);
+ desc = ptlrpc_prep_bulk_imp(req, page_count,
+ BULK_PUT_SINK, OST_BULK_PORTAL);
+
if (desc == NULL)
GOTO(out, rc = -ENOMEM);
/* NB request now owns desc and will free it when it gets freed */
- body = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF, sizeof(*body));
- ioobj = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF + 1, sizeof(*ioobj));
- niobuf = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF + 2,
- niocount * sizeof(*niobuf));
+ body = req_capsule_client_get(pill, &RMF_OST_BODY);
+ ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
+ niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
+ LASSERT(body && ioobj && niobuf);
- body->oa = *oa;
+ lustre_set_wire_obdo(&body->oa, oa);
obdo_to_ioobj(oa, ioobj);
ioobj->ioo_bufcnt = niocount;
- if (ocapa) {
- capa = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF + 3,
- sizeof(*capa));
- capa_cpy(capa, ocapa);
- body->oa.o_valid |= OBD_MD_FLOSSCAPA;
- }
-
+ osc_pack_capa(req, body, ocapa);
LASSERT (page_count > 0);
+ pg_prev = pga[0];
for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
struct brw_page *pg = pga[i];
- struct brw_page *pg_prev = pga[i - 1];
LASSERT(pg->count > 0);
LASSERTF((pg->off & ~CFS_PAGE_MASK) + pg->count <= CFS_PAGE_SIZE,
"i: %d pg: %p off: "LPU64", count: %u\n", i, pg,
pg->off, pg->count);
-#ifdef __LINUX__
+#ifdef __linux__
LASSERTF(i == 0 || pg->off > pg_prev->off,
"i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
" prev_pg %p [pri %lu ind %lu] off "LPU64"\n",
niobuf->len = pg->count;
niobuf->flags = pg->flag;
}
+ pg_prev = pg;
}
- LASSERT((void *)(niobuf - niocount) ==
+ LASSERTF((void *)(niobuf - niocount) ==
lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF + 2,
- niocount * sizeof(*niobuf)));
+ niocount * sizeof(*niobuf)),
+ "want %p - real %p\n", lustre_msg_buf(req->rq_reqmsg,
+ REQ_REC_OFF + 2, niocount * sizeof(*niobuf)),
+ (void *)(niobuf - niocount));
+
osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
+ if (osc_should_shrink_grant(cli))
+ osc_shrink_grant_local(cli, &body->oa);
/* size[REQ_REC_OFF] still sizeof (*body) */
if (opc == OST_WRITE) {
- if (unlikely(cli->cl_checksum)) {
- body->oa.o_valid |= OBD_MD_FLCKSUM;
+ if (unlikely(cli->cl_checksum) &&
+ !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
+ /* store cl_cksum_type in a local variable since
+ * it can be changed via lprocfs */
+ cksum_type_t cksum_type = cli->cl_cksum_type;
+
+ if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
+ oa->o_flags &= OBD_FL_LOCAL_MASK;
+ body->oa.o_flags = 0;
+ }
+ body->oa.o_flags |= cksum_type_pack(cksum_type);
+ body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
body->oa.o_cksum = osc_checksum_bulk(requested_nob,
page_count, pga,
- OST_WRITE);
+ OST_WRITE,
+ cksum_type);
CDEBUG(D_PAGE, "checksum at write origin: %x\n",
body->oa.o_cksum);
/* save this in 'oa', too, for later checking */
- oa->o_valid |= OBD_MD_FLCKSUM;
+ oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
+ oa->o_flags |= cksum_type_pack(cksum_type);
} else {
/* clear out the checksum flag, in case this is a
* resend but cl_checksum is no longer set. b=11238 */
}
oa->o_cksum = body->oa.o_cksum;
/* 1 RC per niobuf */
- size[REPLY_REC_OFF + 1] = sizeof(__u32) * niocount;
- ptlrpc_req_set_repsize(req, 3, size);
+ req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_SERVER,
+ sizeof(__u32) * niocount);
} else {
- if (unlikely(cli->cl_checksum))
- body->oa.o_valid |= OBD_MD_FLCKSUM;
+ if (unlikely(cli->cl_checksum) &&
+ !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
+ if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
+ body->oa.o_flags = 0;
+ body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type);
+ body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
+ }
+ req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_SERVER, 0);
/* 1 RC for the whole I/O */
- ptlrpc_req_set_repsize(req, 2, size);
}
+ ptlrpc_request_set_replen(req);
CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = (struct osc_brw_async_args *)&req->rq_async_args;
+ aa = ptlrpc_req_async_args(req);
aa->aa_oa = oa;
aa->aa_requested_nob = requested_nob;
aa->aa_nio_count = niocount;
aa->aa_resends = 0;
aa->aa_ppga = pga;
aa->aa_cli = cli;
- INIT_LIST_HEAD(&aa->aa_oaps);
+ CFS_INIT_LIST_HEAD(&aa->aa_oaps);
+ if (ocapa && reserve)
+ aa->aa_ocapa = capa_get(ocapa);
*reqp = req;
- RETURN (0);
+ RETURN(0);
out:
- ptlrpc_req_finished (req);
- RETURN (rc);
+ ptlrpc_req_finished(req);
+ RETURN(rc);
}
static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
- __u32 client_cksum, __u32 server_cksum,
- int nob, obd_count page_count,
- struct brw_page **pga)
+ __u32 client_cksum, __u32 server_cksum, int nob,
+ obd_count page_count, struct brw_page **pga,
+ cksum_type_t client_cksum_type)
{
__u32 new_cksum;
char *msg;
+ cksum_type_t cksum_type;
if (server_cksum == client_cksum) {
CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
return 0;
}
- new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE);
+ if (oa->o_valid & OBD_MD_FLFLAGS)
+ cksum_type = cksum_type_unpack(oa->o_flags);
+ else
+ cksum_type = OBD_CKSUM_CRC32;
+
+ new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE,
+ cksum_type);
- if (new_cksum == server_cksum)
+ if (cksum_type != client_cksum_type)
+ msg = "the server did not use the checksum type specified in "
+ "the original request - likely a protocol problem";
+ else if (new_cksum == server_cksum)
msg = "changed on the client after we checksummed it - "
"likely false positive due to mmap IO (bug 11742)";
else if (new_cksum == client_cksum)
"["LPU64"-"LPU64"]\n",
msg, libcfs_nid2str(peer->nid),
oa->o_valid & OBD_MD_FLFID ? oa->o_fid : (__u64)0,
- oa->o_valid & OBD_MD_FLFID ? oa->o_generation :
+ oa->o_valid & OBD_MD_FLFID ? oa->o_generation :
(__u64)0,
oa->o_id,
oa->o_valid & OBD_MD_FLGROUP ? oa->o_gr : (__u64)0,
pga[0]->off,
pga[page_count-1]->off + pga[page_count-1]->count - 1);
- CERROR("original client csum %x, server csum %x, client csum now %x\n",
- client_cksum, server_cksum, new_cksum);
- return 1;
+ CERROR("original client csum %x (type %x), server csum %x (type %x), "
+ "client csum now %x\n", client_cksum, client_cksum_type,
+ server_cksum, cksum_type, new_cksum);
+ return 1;
}
/* Note rc enters this function as number of bytes transferred */
body = lustre_swab_repbuf(req, REPLY_REC_OFF, sizeof(*body),
lustre_swab_ost_body);
if (body == NULL) {
- CERROR ("Can't unpack body\n");
+ CDEBUG(D_INFO, "Can't unpack body\n");
RETURN(-EPROTO);
}
/* set/clear over quota flag for a uid/gid */
if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
- body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA))
- lquota_setdq(quota_interface, cli, body->oa.o_uid,
- body->oa.o_gid, body->oa.o_valid,
- body->oa.o_flags);
+ body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) {
+ unsigned int qid[MAXQUOTAS] = { body->oa.o_uid, body->oa.o_gid };
+
+ lquota_setdq(quota_interface, cli, qid, body->oa.o_valid,
+ body->oa.o_flags);
+ }
if (rc < 0)
RETURN(rc);
- if (unlikely(aa->aa_oa->o_valid & OBD_MD_FLCKSUM))
+ if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
client_cksum = aa->aa_oa->o_cksum; /* save for later */
osc_update_grant(cli, body);
if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
if (rc > 0) {
- CERROR ("Unexpected +ve rc %d\n", rc);
+ CERROR("Unexpected +ve rc %d\n", rc);
RETURN(-EPROTO);
}
LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob);
- if (unlikely((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) &&
- client_cksum &&
- check_write_checksum(&body->oa, peer, client_cksum,
- body->oa.o_cksum,
- aa->aa_requested_nob,
- aa->aa_page_count,
- aa->aa_ppga)))
+ if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
RETURN(-EAGAIN);
- if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
+ if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
+ check_write_checksum(&body->oa, peer, client_cksum,
+ body->oa.o_cksum, aa->aa_requested_nob,
+ aa->aa_page_count, aa->aa_ppga,
+ cksum_type_unpack(aa->aa_oa->o_flags)))
RETURN(-EAGAIN);
rc = check_write_rcs(req, aa->aa_requested_nob,aa->aa_nio_count,
}
/* The rest of this function executes only for OST_READs */
+
+ /* if unwrap_bulk failed, return -EAGAIN to retry */
+ rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
+ if (rc < 0)
+ GOTO(out, rc = -EAGAIN);
+
if (rc > aa->aa_requested_nob) {
CERROR("Unexpected rc %d (%d requested)\n", rc,
aa->aa_requested_nob);
if (rc < aa->aa_requested_nob)
handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
- if (sptlrpc_cli_unwrap_bulk_read(req, rc, aa->aa_page_count,
- aa->aa_ppga))
- GOTO(out, rc = -EAGAIN);
-
- if (unlikely(body->oa.o_valid & OBD_MD_FLCKSUM)) {
+ if (body->oa.o_valid & OBD_MD_FLCKSUM) {
static int cksum_counter;
__u32 server_cksum = body->oa.o_cksum;
char *via;
char *router;
+ cksum_type_t cksum_type;
+ if (body->oa.o_valid & OBD_MD_FLFLAGS)
+ cksum_type = cksum_type_unpack(body->oa.o_flags);
+ else
+ cksum_type = OBD_CKSUM_CRC32;
client_cksum = osc_checksum_bulk(rc, aa->aa_page_count,
- aa->aa_ppga, OST_READ);
+ aa->aa_ppga, OST_READ,
+ cksum_type);
if (peer->nid == req->rq_bulk->bd_sender) {
via = router = "";
if (server_cksum == ~0 && rc > 0) {
CERROR("Protocol error: server %s set the 'checksum' "
"bit, but didn't send a checksum. Not fatal, "
- "but please tell CFS.\n",
+ "but please notify on http://bugzilla.lustre.org/\n",
libcfs_nid2str(peer->nid));
} else if (server_cksum != client_cksum) {
LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
aa->aa_ppga[aa->aa_page_count-1]->off +
aa->aa_ppga[aa->aa_page_count-1]->count -
1);
- CERROR("client %x, server %x\n",
- client_cksum, server_cksum);
+ CERROR("client %x, server %x, cksum_type %x\n",
+ client_cksum, server_cksum, cksum_type);
cksum_counter = 0;
aa->aa_oa->o_cksum = client_cksum;
rc = -EAGAIN;
}
out:
if (rc >= 0)
- *aa->aa_oa = body->oa;
+ lustre_get_wire_obdo(aa->aa_oa, &body->oa);
RETURN(rc);
}
restart_bulk:
rc = osc_brw_prep_request(cmd, &exp->exp_obd->u.cli, oa, lsm,
- page_count, pga, &req, ocapa);
+ page_count, pga, &req, ocapa, 0);
if (rc != 0)
return (rc);
goto restart_bulk;
}
-
+
RETURN (rc);
}
CERROR("too many resend retries, returning error\n");
RETURN(-EIO);
}
-
+
DEBUG_REQ(D_ERROR, request, "redo for recoverable error");
-/*
- body = lustre_msg_buf(request->rq_reqmsg, REQ_REC_OFF, sizeof(*body));
- if (body->oa.o_valid & OBD_MD_FLOSSCAPA)
- ocapa = lustre_unpack_capa(request->rq_reqmsg,
- REQ_REC_OFF + 3);
-*/
+
rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
OST_WRITE ? OBD_BRW_WRITE :OBD_BRW_READ,
aa->aa_cli, aa->aa_oa,
NULL /* lsm unused by osc currently */,
- aa->aa_page_count, aa->aa_ppga,
- &new_req, NULL /* ocapa */);
+ aa->aa_page_count, aa->aa_ppga,
+ &new_req, aa->aa_ocapa, 0);
if (rc)
RETURN(rc);
client_obd_list_lock(&aa->aa_cli->cl_loi_list_lock);
-
+
list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
if (oap->oap_request != NULL) {
LASSERTF(request == oap->oap_request,
request, oap->oap_request);
if (oap->oap_interrupted) {
client_obd_list_unlock(&aa->aa_cli->cl_loi_list_lock);
- ptlrpc_req_finished(new_req);
+ ptlrpc_req_finished(new_req);
RETURN(-EINTR);
}
}
aa->aa_resends++;
new_req->rq_interpret_reply = request->rq_interpret_reply;
new_req->rq_async_args = request->rq_async_args;
- new_req->rq_sent = CURRENT_SECONDS + aa->aa_resends;
+ new_req->rq_sent = cfs_time_current_sec() + aa->aa_resends;
- new_aa = (struct osc_brw_async_args *)&new_req->rq_async_args;
+ new_aa = ptlrpc_req_async_args(new_req);
- INIT_LIST_HEAD(&new_aa->aa_oaps);
+ CFS_INIT_LIST_HEAD(&new_aa->aa_oaps);
list_splice(&aa->aa_oaps, &new_aa->aa_oaps);
- INIT_LIST_HEAD(&aa->aa_oaps);
+ CFS_INIT_LIST_HEAD(&aa->aa_oaps);
list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
if (oap->oap_request) {
oap->oap_request = ptlrpc_request_addref(new_req);
}
}
- client_obd_list_unlock(&aa->aa_cli->cl_loi_list_lock);
- DEBUG_REQ(D_INFO, new_req, "new request");
+ new_aa->aa_ocapa = aa->aa_ocapa;
+ aa->aa_ocapa = NULL;
+ /* use ptlrpc_set_add_req is safe because interpret functions work
+ * in check_set context. only one way exist with access to request
+ * from different thread got -EINTR - this way protected with
+ * cl_loi_list_lock */
ptlrpc_set_add_req(set, new_req);
- RETURN(0);
-}
-
-static int brw_interpret(struct ptlrpc_request *req, void *data, int rc)
-{
- struct osc_brw_async_args *aa = data;
- int i;
- int nob = rc;
- ENTRY;
-
- rc = osc_brw_fini_request(req, rc);
- if (osc_recoverable_error(rc)) {
- rc = osc_brw_redo_request(req, aa);
- if (rc == 0)
- RETURN(0);
- }
- if ((rc >= 0) && req->rq_set && req->rq_set->set_countp)
- atomic_add(nob, (atomic_t *)req->rq_set->set_countp);
-
- client_obd_list_lock(&aa->aa_cli->cl_loi_list_lock);
- if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
- aa->aa_cli->cl_w_in_flight--;
- else
- aa->aa_cli->cl_r_in_flight--;
- for (i = 0; i < aa->aa_page_count; i++)
- osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1);
client_obd_list_unlock(&aa->aa_cli->cl_loi_list_lock);
- osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
-
- RETURN(rc);
-}
-
-static int async_internal(int cmd, struct obd_export *exp, struct obdo *oa,
- struct lov_stripe_md *lsm, obd_count page_count,
- struct brw_page **pga, struct ptlrpc_request_set *set,
- struct obd_capa *ocapa)
-{
- struct ptlrpc_request *req;
- struct client_obd *cli = &exp->exp_obd->u.cli;
- int rc, i;
- struct osc_brw_async_args *aa;
- ENTRY;
-
- /* Consume write credits even if doing a sync write -
- * otherwise we may run out of space on OST due to grant. */
- if (cmd == OBD_BRW_WRITE) {
- spin_lock(&cli->cl_loi_list_lock);
- for (i = 0; i < page_count; i++) {
- if (cli->cl_avail_grant >= CFS_PAGE_SIZE)
- osc_consume_write_grant(cli, pga[i]);
- }
- spin_unlock(&cli->cl_loi_list_lock);
- }
-
- rc = osc_brw_prep_request(cmd, cli, oa, lsm, page_count, pga,
- &req, ocapa);
-
- aa = (struct osc_brw_async_args *)&req->rq_async_args;
- if (cmd == OBD_BRW_READ) {
- lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
- lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
- ptlrpc_lprocfs_brw(req, OST_READ, aa->aa_requested_nob);
- } else {
- lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
- lprocfs_oh_tally(&cli->cl_write_rpc_hist,
- cli->cl_w_in_flight);
- ptlrpc_lprocfs_brw(req, OST_WRITE, aa->aa_requested_nob);
- }
-
- if (rc == 0) {
- req->rq_interpret_reply = brw_interpret;
- ptlrpc_set_add_req(set, req);
- client_obd_list_lock(&cli->cl_loi_list_lock);
- if (cmd == OBD_BRW_READ)
- cli->cl_r_in_flight++;
- else
- cli->cl_w_in_flight++;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- } else if (cmd == OBD_BRW_WRITE) {
- client_obd_list_lock(&cli->cl_loi_list_lock);
- for (i = 0; i < page_count; i++)
- osc_release_write_grant(cli, pga[i], 0);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- }
- RETURN (rc);
+ DEBUG_REQ(D_INFO, new_req, "new request");
+ RETURN(0);
}
/*
struct obdo *saved_oa = NULL;
struct brw_page **ppga, **orig;
struct obd_import *imp = class_exp2cliimp(exp);
- struct client_obd *cli = &imp->imp_obd->u.cli;
+ struct client_obd *cli;
int rc, page_count_orig;
ENTRY;
+ LASSERT((imp != NULL) && (imp->imp_obd != NULL));
+ cli = &imp->imp_obd->u.cli;
+
if (cmd & OBD_BRW_CHECK) {
/* The caller just wants to know if there's a chance that this
* I/O can succeed */
- if (imp == NULL || imp->imp_invalid)
+ if (imp->imp_invalid)
RETURN(-EIO);
RETURN(0);
}
RETURN(rc);
}
-static int osc_brw_async(int cmd, struct obd_export *exp,
- struct obd_info *oinfo, obd_count page_count,
- struct brw_page *pga, struct obd_trans_info *oti,
- struct ptlrpc_request_set *set)
-{
- struct brw_page **ppga, **orig;
- struct client_obd *cli = &exp->exp_obd->u.cli;
- int page_count_orig;
- int rc = 0;
- ENTRY;
-
- if (cmd & OBD_BRW_CHECK) {
- struct obd_import *imp = class_exp2cliimp(exp);
- /* The caller just wants to know if there's a chance that this
- * I/O can succeed */
-
- if (imp == NULL || imp->imp_invalid)
- RETURN(-EIO);
- RETURN(0);
- }
-
- orig = ppga = osc_build_ppga(pga, page_count);
- if (ppga == NULL)
- RETURN(-ENOMEM);
- page_count_orig = page_count;
-
- sort_brw_pages(ppga, page_count);
- while (page_count) {
- struct brw_page **copy;
- obd_count pages_per_brw;
-
- pages_per_brw = min_t(obd_count, page_count,
- cli->cl_max_pages_per_rpc);
-
- pages_per_brw = max_unfragmented_pages(ppga, pages_per_brw);
-
- /* use ppga only if single RPC is going to fly */
- if (pages_per_brw != page_count_orig || ppga != orig) {
- OBD_ALLOC(copy, sizeof(*copy) * pages_per_brw);
- if (copy == NULL)
- GOTO(out, rc = -ENOMEM);
- memcpy(copy, ppga, sizeof(*copy) * pages_per_brw);
- } else
- copy = ppga;
-
- rc = async_internal(cmd, exp, oinfo->oi_oa, oinfo->oi_md,
- pages_per_brw, copy, set, oinfo->oi_capa);
-
- if (rc != 0) {
- if (copy != ppga)
- OBD_FREE(copy, sizeof(*copy) * pages_per_brw);
- break;
- }
- if (copy == orig) {
- /* we passed it to async_internal() which is
- * now responsible for releasing memory */
- orig = NULL;
- }
-
- page_count -= pages_per_brw;
- ppga += pages_per_brw;
- }
-out:
- if (orig)
- osc_release_ppga(orig, page_count_orig);
- RETURN(rc);
-}
-
-static void osc_check_rpcs(struct client_obd *cli);
-
/* The companion to osc_enter_cache(), called when @oap is no longer part of
* the dirty accounting. Writeback completes or truncate happens before
* writing starts. Must be called with the loi lock held. */
RETURN(0);
}
+static int lop_makes_hprpc(struct loi_oap_pages *lop)
+{
+ struct osc_async_page *oap;
+ ENTRY;
+
+ if (list_empty(&lop->lop_urgent))
+ RETURN(0);
+
+ oap = list_entry(lop->lop_urgent.next,
+ struct osc_async_page, oap_urgent_item);
+
+ if (oap->oap_async_flags & ASYNC_HP) {
+ CDEBUG(D_CACHE, "hp request forcing RPC\n");
+ RETURN(1);
+ }
+
+ RETURN(0);
+}
+
static void on_list(struct list_head *item, struct list_head *list,
int should_be_on)
{
/* maintain the loi's cli list membership invariants so that osc_send_oap_rpc
* can find pages to build into rpcs quickly */
-static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi)
+void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi)
{
- on_list(&loi->loi_cli_item, &cli->cl_loi_ready_list,
- lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE) ||
- lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ));
+ if (lop_makes_hprpc(&loi->loi_write_lop) ||
+ lop_makes_hprpc(&loi->loi_read_lop)) {
+ /* HP rpc */
+ on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list, 0);
+ on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 1);
+ } else {
+ on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 0);
+ on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list,
+ lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)||
+ lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ));
+ }
on_list(&loi->loi_write_item, &cli->cl_loi_write_list,
loi->loi_write_lop.lop_num_pending);
cli->cl_pending_r_pages += delta;
}
-/* this is called when a sync waiter receives an interruption. Its job is to
+/**
+ * this is called when a sync waiter receives an interruption. Its job is to
* get the caller woken as soon as possible. If its page hasn't been put in an
* rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as
* desiring interruption which will forcefully complete the rpc once the rpc
- * has timed out */
-static void osc_occ_interrupted(struct oig_callback_context *occ)
+ * has timed out.
+ */
+int osc_oap_interrupted(const struct lu_env *env, struct osc_async_page *oap)
{
- struct osc_async_page *oap;
struct loi_oap_pages *lop;
struct lov_oinfo *loi;
+ int rc = -EBUSY;
ENTRY;
- /* XXX member_of() */
- oap = list_entry(occ, struct osc_async_page, oap_occ);
-
- client_obd_list_lock(&oap->oap_cli->cl_loi_list_lock);
-
+ LASSERT(!oap->oap_interrupted);
oap->oap_interrupted = 1;
/* ok, it's been put in an rpc. only one oap gets a request reference */
if (oap->oap_request != NULL) {
ptlrpc_mark_interrupted(oap->oap_request);
ptlrpcd_wake(oap->oap_request);
- GOTO(unlock, 0);
+ ptlrpc_req_finished(oap->oap_request);
+ oap->oap_request = NULL;
}
- /* we don't get interruption callbacks until osc_trigger_group_io()
- * has been called and put the sync oaps in the pending/urgent lists.*/
+ /*
+ * page completion may be called only if ->cpo_prep() method was
+ * executed by osc_io_submit(), that also adds page the to pending list
+ */
if (!list_empty(&oap->oap_pending_item)) {
list_del_init(&oap->oap_pending_item);
list_del_init(&oap->oap_urgent_item);
&loi->loi_write_lop : &loi->loi_read_lop;
lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, -1);
loi_list_maint(oap->oap_cli, oap->oap_loi);
-
- oig_complete_one(oap->oap_oig, &oap->oap_occ, -EINTR);
- oap->oap_oig = NULL;
+ rc = oap->oap_caller_ops->ap_completion(env,
+ oap->oap_caller_data,
+ oap->oap_cmd, NULL, -EINTR);
}
-unlock:
- client_obd_list_unlock(&oap->oap_cli->cl_loi_list_lock);
+ RETURN(rc);
}
/* this is trying to propogate async writeback errors back up to the
ar->ar_force_sync = 0;
}
-static void osc_oap_to_pending(struct osc_async_page *oap)
+void osc_oap_to_pending(struct osc_async_page *oap)
{
struct loi_oap_pages *lop;
else
lop = &oap->oap_loi->loi_read_lop;
- if (oap->oap_async_flags & ASYNC_URGENT)
+ if (oap->oap_async_flags & ASYNC_HP)
list_add(&oap->oap_urgent_item, &lop->lop_urgent);
+ else if (oap->oap_async_flags & ASYNC_URGENT)
+ list_add_tail(&oap->oap_urgent_item, &lop->lop_urgent);
list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, 1);
}
/* this must be called holding the loi list lock to give coverage to exit_cache,
* async_flag maintenance, and oap_request */
-static void osc_ap_completion(struct client_obd *cli, struct obdo *oa,
+static void osc_ap_completion(const struct lu_env *env,
+ struct client_obd *cli, struct obdo *oa,
struct osc_async_page *oap, int sent, int rc)
{
__u64 xid = 0;
oap->oap_loi->loi_lvb.lvb_ctime = oa->o_ctime;
}
- if (oap->oap_oig) {
- osc_exit_cache(cli, oap, sent);
- oig_complete_one(oap->oap_oig, &oap->oap_occ, rc);
- oap->oap_oig = NULL;
- EXIT;
- return;
- }
-
- rc = oap->oap_caller_ops->ap_completion(oap->oap_caller_data,
+ rc = oap->oap_caller_ops->ap_completion(env, oap->oap_caller_data,
oap->oap_cmd, oa, rc);
/* ll_ap_completion (from llite) drops PG_locked. so, a new
EXIT;
}
-static int brw_interpret_oap(struct ptlrpc_request *req, void *data, int rc)
+static int brw_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req, void *data, int rc)
{
- struct osc_async_page *oap, *tmp;
struct osc_brw_async_args *aa = data;
struct client_obd *cli;
+ int async;
ENTRY;
rc = osc_brw_fini_request(req, rc);
RETURN(0);
}
+ if (aa->aa_ocapa) {
+ capa_put(aa->aa_ocapa);
+ aa->aa_ocapa = NULL;
+ }
+
cli = aa->aa_cli;
client_obd_list_lock(&cli->cl_loi_list_lock);
else
cli->cl_r_in_flight--;
- /* the caller may re-use the oap after the completion call so
- * we need to clean it up a little */
- list_for_each_entry_safe(oap, tmp, &aa->aa_oaps, oap_rpc_item) {
- list_del_init(&oap->oap_rpc_item);
- osc_ap_completion(cli, aa->aa_oa, oap, 1, rc);
+ async = list_empty(&aa->aa_oaps);
+ if (!async) { /* from osc_send_oap_rpc() */
+ struct osc_async_page *oap, *tmp;
+ /* the caller may re-use the oap after the completion call so
+ * we need to clean it up a little */
+ list_for_each_entry_safe(oap, tmp, &aa->aa_oaps, oap_rpc_item) {
+ list_del_init(&oap->oap_rpc_item);
+ osc_ap_completion(env, cli, aa->aa_oa, oap, 1, rc);
+ }
+ OBDO_FREE(aa->aa_oa);
+ } else { /* from async_internal() */
+ int i;
+ for (i = 0; i < aa->aa_page_count; i++)
+ osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1);
+
+ if (aa->aa_oa->o_flags & OBD_FL_TEMPORARY)
+ OBDO_FREE(aa->aa_oa);
}
-
osc_wake_cache_waiters(cli);
- osc_check_rpcs(cli);
-
+ osc_check_rpcs(env, cli);
client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- OBDO_FREE(aa->aa_oa);
-
+ if (!async)
+ cl_req_completion(env, aa->aa_clerq, rc);
osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
RETURN(rc);
}
-static struct ptlrpc_request *osc_build_req(struct client_obd *cli,
+static struct ptlrpc_request *osc_build_req(const struct lu_env *env,
+ struct client_obd *cli,
struct list_head *rpc_list,
int page_count, int cmd)
{
struct brw_page **pga = NULL;
struct osc_brw_async_args *aa;
struct obdo *oa = NULL;
- struct obd_async_page_ops *ops = NULL;
+ const struct obd_async_page_ops *ops = NULL;
void *caller_data = NULL;
- struct obd_capa *ocapa;
struct osc_async_page *oap;
+ struct osc_async_page *tmp;
+ struct ost_body *body;
+ struct cl_req *clerq = NULL;
+ enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
+ struct ldlm_lock *lock = NULL;
+ struct cl_req_attr crattr;
int i, rc;
ENTRY;
LASSERT(!list_empty(rpc_list));
+ memset(&crattr, 0, sizeof crattr);
OBD_ALLOC(pga, sizeof(*pga) * page_count);
if (pga == NULL)
- RETURN(ERR_PTR(-ENOMEM));
+ GOTO(out, req = ERR_PTR(-ENOMEM));
OBDO_ALLOC(oa);
if (oa == NULL)
i = 0;
list_for_each_entry(oap, rpc_list, oap_rpc_item) {
+ struct cl_page *page = osc_oap2cl_page(oap);
if (ops == NULL) {
ops = oap->oap_caller_ops;
caller_data = oap->oap_caller_data;
+
+ clerq = cl_req_alloc(env, page, crt,
+ 1 /* only 1-object rpcs for
+ * now */);
+ if (IS_ERR(clerq))
+ GOTO(out, req = (void *)clerq);
+ lock = oap->oap_ldlm_lock;
}
pga[i] = &oap->oap_brw_page;
pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
pga[i]->pg, cfs_page_index(oap->oap_page), oap, pga[i]->flag);
i++;
+ cl_req_page_add(env, clerq, page);
}
/* always get the data for the obdo for the rpc */
LASSERT(ops != NULL);
- ops->ap_fill_obdo(caller_data, cmd, oa);
- ocapa = ops->ap_lookup_capa(caller_data, cmd);
+ crattr.cra_oa = oa;
+ crattr.cra_capa = NULL;
+ cl_req_attr_set(env, clerq, &crattr, ~0ULL);
+ if (lock) {
+ oa->o_handle = lock->l_remote_handle;
+ oa->o_valid |= OBD_MD_FLHANDLE;
+ }
+
+ rc = cl_req_prep(env, clerq);
+ if (rc != 0) {
+ CERROR("cl_req_prep failed: %d\n", rc);
+ GOTO(out, req = ERR_PTR(rc));
+ }
sort_brw_pages(pga, page_count);
rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
- pga, &req, ocapa);
- capa_put(ocapa);
+ pga, &req, crattr.cra_capa, 1);
if (rc != 0) {
CERROR("prep_req failed: %d\n", rc);
GOTO(out, req = ERR_PTR(rc));
* later setattr before earlier BRW (as determined by the request xid),
* the OST will not use BRW timestamps. Sadly, there is no obvious
* way to do this in a single call. bug 10150 */
- ops->ap_update_obdo(caller_data, cmd, oa,
- OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME);
+ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+ cl_req_attr_set(env, clerq, &crattr,
+ OBD_MD_FLMTIME|OBD_MD_FLCTIME|OBD_MD_FLATIME);
CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = (struct osc_brw_async_args *)&req->rq_async_args;
- INIT_LIST_HEAD(&aa->aa_oaps);
+ aa = ptlrpc_req_async_args(req);
+ CFS_INIT_LIST_HEAD(&aa->aa_oaps);
list_splice(rpc_list, &aa->aa_oaps);
- INIT_LIST_HEAD(rpc_list);
-
+ CFS_INIT_LIST_HEAD(rpc_list);
+ aa->aa_clerq = clerq;
out:
+ capa_put(crattr.cra_capa);
if (IS_ERR(req)) {
if (oa)
OBDO_FREE(oa);
if (pga)
OBD_FREE(pga, sizeof(*pga) * page_count);
+ /* this should happen rarely and is pretty bad, it makes the
+ * pending list not follow the dirty order */
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ list_for_each_entry_safe(oap, tmp, rpc_list, oap_rpc_item) {
+ list_del_init(&oap->oap_rpc_item);
+
+ /* queued sync pages can be torn down while the pages
+ * were between the pending list and the rpc */
+ if (oap->oap_interrupted) {
+ CDEBUG(D_INODE, "oap %p interrupted\n", oap);
+ osc_ap_completion(env, cli, NULL, oap, 0,
+ oap->oap_count);
+ continue;
+ }
+ osc_ap_completion(env, cli, NULL, oap, 0, PTR_ERR(req));
+ }
+ if (clerq && !IS_ERR(clerq))
+ cl_req_completion(env, clerq, PTR_ERR(req));
}
RETURN(req);
}
-/* the loi lock is held across this function but it's allowed to release
- * and reacquire it during its work */
-static int osc_send_oap_rpc(struct client_obd *cli, struct lov_oinfo *loi,
- int cmd, struct loi_oap_pages *lop)
+/**
+ * prepare pages for ASYNC io and put pages in send queue.
+ *
+ * \param cli -
+ * \param loi -
+ * \param cmd - OBD_BRW_* macroses
+ * \param lop - pending pages
+ *
+ * \return zero if pages successfully add to send queue.
+ * \return not zere if error occurring.
+ */
+static int
+osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
+ struct lov_oinfo *loi,
+ int cmd, struct loi_oap_pages *lop)
{
struct ptlrpc_request *req;
obd_count page_count = 0;
struct osc_async_page *oap = NULL, *tmp;
struct osc_brw_async_args *aa;
- struct obd_async_page_ops *ops;
+ const struct obd_async_page_ops *ops;
CFS_LIST_HEAD(rpc_list);
unsigned int ending_offset;
unsigned starting_offset = 0;
+ int srvlock = 0;
+ struct cl_object *clob = NULL;
ENTRY;
+ /* If there are HP OAPs we need to handle at least 1 of them,
+ * move it the beginning of the pending list for that. */
+ if (!list_empty(&lop->lop_urgent)) {
+ oap = list_entry(lop->lop_urgent.next,
+ struct osc_async_page, oap_urgent_item);
+ if (oap->oap_async_flags & ASYNC_HP)
+ list_move(&oap->oap_pending_item, &lop->lop_pending);
+ }
+
/* first we find the pages we're allowed to work with */
list_for_each_entry_safe(oap, tmp, &lop->lop_pending,
oap_pending_item) {
ops = oap->oap_caller_ops;
- LASSERT(oap->oap_magic == OAP_MAGIC);
+ LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, "
+ "magic 0x%x\n", oap, oap->oap_magic);
+ if (clob == NULL) {
+ /* pin object in memory, so that completion call-backs
+ * can be safely called under client_obd_list lock. */
+ clob = osc_oap2cl_page(oap)->cp_obj;
+ cl_object_get(clob);
+ }
+
+ if (page_count != 0 &&
+ srvlock != !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK)) {
+ CDEBUG(D_PAGE, "SRVLOCK flag mismatch,"
+ " oap %p, page %p, srvlock %u\n",
+ oap, oap->oap_brw_page.pg, (unsigned)!srvlock);
+ break;
+ }
/* in llite being 'ready' equates to the page being locked
* until completion unlocks it. commit_write submits a page
* as not ready because its unlock will happen unconditionally
* will still be on the dirty list). we could call in
* at the end of ll_file_write to process the queue again. */
if (!(oap->oap_async_flags & ASYNC_READY)) {
- int rc = ops->ap_make_ready(oap->oap_caller_data, cmd);
+ int rc = ops->ap_make_ready(env, oap->oap_caller_data,
+ cmd);
if (rc < 0)
CDEBUG(D_INODE, "oap %p page %p returned %d "
"instead of ready\n", oap,
/*
* Page submitted for IO has to be locked. Either by
* ->ap_make_ready() or by higher layers.
- *
- * XXX nikita: this assertion should be adjusted when lustre
- * starts using PG_writeback for pages being written out.
*/
-#if defined(__KERNEL__) && defined(__LINUX__)
- LASSERT(PageLocked(oap->oap_page));
+#if defined(__KERNEL__) && defined(__linux__)
+ {
+ struct cl_page *page;
+
+ page = osc_oap2cl_page(oap);
+
+ if (page->cp_type == CPT_CACHEABLE &&
+ !(PageLocked(oap->oap_page) &&
+ (CheckWriteback(oap->oap_page, cmd)))) {
+ CDEBUG(D_PAGE, "page %p lost wb %lx/%x\n",
+ oap->oap_page,
+ (long)oap->oap_page->flags,
+ oap->oap_async_flags);
+ LBUG();
+ }
+ }
#endif
/* If there is a gap at the start of this page, it can't merge
* with any previous page, so we'll hand the network a
(PTLRPC_MAX_BRW_SIZE - 1);
/* ask the caller for the size of the io as the rpc leaves. */
- if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE))
+ if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
oap->oap_count =
- ops->ap_refresh_count(oap->oap_caller_data,cmd);
+ ops->ap_refresh_count(env, oap->oap_caller_data,
+ cmd);
+ LASSERT(oap->oap_page_off + oap->oap_count <= CFS_PAGE_SIZE);
+ }
if (oap->oap_count <= 0) {
CDEBUG(D_CACHE, "oap %p count %d, completing\n", oap,
oap->oap_count);
- osc_ap_completion(cli, NULL, oap, 0, oap->oap_count);
+ osc_ap_completion(env, cli, NULL,
+ oap, 0, oap->oap_count);
continue;
}
/* now put the page back in our accounting */
list_add_tail(&oap->oap_rpc_item, &rpc_list);
+ if (page_count == 0)
+ srvlock = !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK);
if (++page_count >= cli->cl_max_pages_per_rpc)
break;
osc_wake_cache_waiters(cli);
- if (page_count == 0)
- RETURN(0);
-
loi_list_maint(cli, loi);
client_obd_list_unlock(&cli->cl_loi_list_lock);
- req = osc_build_req(cli, &rpc_list, page_count, cmd);
- if (IS_ERR(req)) {
- /* this should happen rarely and is pretty bad, it makes the
- * pending list not follow the dirty order */
+ if (clob != NULL)
+ cl_object_put(env, clob);
+
+ if (page_count == 0) {
client_obd_list_lock(&cli->cl_loi_list_lock);
- list_for_each_entry_safe(oap, tmp, &rpc_list, oap_rpc_item) {
- list_del_init(&oap->oap_rpc_item);
+ RETURN(0);
+ }
- /* queued sync pages can be torn down while the pages
- * were between the pending list and the rpc */
- if (oap->oap_interrupted) {
- CDEBUG(D_INODE, "oap %p interrupted\n", oap);
- osc_ap_completion(cli, NULL, oap, 0,
- oap->oap_count);
- continue;
- }
- osc_ap_completion(cli, NULL, oap, 0, PTR_ERR(req));
- }
+ req = osc_build_req(env, cli, &rpc_list, page_count, cmd);
+ if (IS_ERR(req)) {
+ LASSERT(list_empty(&rpc_list));
loi_list_maint(cli, loi);
RETURN(PTR_ERR(req));
}
- aa = (struct osc_brw_async_args *)&req->rq_async_args;
+ aa = ptlrpc_req_async_args(req);
if (cmd == OBD_BRW_READ) {
lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
(starting_offset >> CFS_PAGE_SHIFT) + 1);
- ptlrpc_lprocfs_brw(req, OST_READ, aa->aa_requested_nob);
} else {
lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
lprocfs_oh_tally(&cli->cl_write_rpc_hist,
cli->cl_w_in_flight);
lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
(starting_offset >> CFS_PAGE_SHIFT) + 1);
- ptlrpc_lprocfs_brw(req, OST_WRITE, aa->aa_requested_nob);
}
+ ptlrpc_lprocfs_brw(req, aa->aa_requested_nob);
client_obd_list_lock(&cli->cl_loi_list_lock);
DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
page_count, aa, cli->cl_r_in_flight, cli->cl_w_in_flight);
- req->rq_interpret_reply = brw_interpret_oap;
- ptlrpcd_add_req(req);
+ req->rq_interpret_reply = brw_interpret;
+ ptlrpcd_add_req(req, PSCOPE_BRW);
RETURN(1);
}
#define LOI_DEBUG(LOI, STR, args...) \
CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR, \
- !list_empty(&(LOI)->loi_cli_item), \
+ !list_empty(&(LOI)->loi_ready_item) || \
+ !list_empty(&(LOI)->loi_hp_ready_item), \
(LOI)->loi_write_lop.lop_num_pending, \
!list_empty(&(LOI)->loi_write_lop.lop_urgent), \
(LOI)->loi_read_lop.lop_num_pending, \
struct lov_oinfo *osc_next_loi(struct client_obd *cli)
{
ENTRY;
- /* first return all objects which we already know to have
- * pages ready to be stuffed into rpcs */
+
+ /* First return objects that have blocked locks so that they
+ * will be flushed quickly and other clients can get the lock,
+ * then objects which have pages ready to be stuffed into RPCs */
+ if (!list_empty(&cli->cl_loi_hp_ready_list))
+ RETURN(list_entry(cli->cl_loi_hp_ready_list.next,
+ struct lov_oinfo, loi_hp_ready_item));
if (!list_empty(&cli->cl_loi_ready_list))
RETURN(list_entry(cli->cl_loi_ready_list.next,
- struct lov_oinfo, loi_cli_item));
+ struct lov_oinfo, loi_ready_item));
/* then if we have cache waiters, return all objects with queued
* writes. This is especially important when many small files
RETURN(NULL);
}
+static int osc_max_rpc_in_flight(struct client_obd *cli, struct lov_oinfo *loi)
+{
+ struct osc_async_page *oap;
+ int hprpc = 0;
+
+ if (!list_empty(&loi->loi_write_lop.lop_urgent)) {
+ oap = list_entry(loi->loi_write_lop.lop_urgent.next,
+ struct osc_async_page, oap_urgent_item);
+ hprpc = !!(oap->oap_async_flags & ASYNC_HP);
+ }
+
+ if (!hprpc && !list_empty(&loi->loi_read_lop.lop_urgent)) {
+ oap = list_entry(loi->loi_read_lop.lop_urgent.next,
+ struct osc_async_page, oap_urgent_item);
+ hprpc = !!(oap->oap_async_flags & ASYNC_HP);
+ }
+
+ return rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight + hprpc;
+}
+
/* called with the loi list lock held */
-static void osc_check_rpcs(struct client_obd *cli)
+void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
{
struct lov_oinfo *loi;
int rc = 0, race_counter = 0;
while ((loi = osc_next_loi(cli)) != NULL) {
LOI_DEBUG(loi, "%lu in flight\n", rpcs_in_flight(cli));
- if (rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight)
+ if (osc_max_rpc_in_flight(cli, loi))
break;
/* attempt some read/write balancing by alternating between
* partial read pending queue when we're given this object to
* do io on writes while there are cache waiters */
if (lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
- rc = osc_send_oap_rpc(cli, loi, OBD_BRW_WRITE,
+ rc = osc_send_oap_rpc(env, cli, loi, OBD_BRW_WRITE,
&loi->loi_write_lop);
if (rc < 0)
break;
race_counter++;
}
if (lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)) {
- rc = osc_send_oap_rpc(cli, loi, OBD_BRW_READ,
+ rc = osc_send_oap_rpc(env, cli, loi, OBD_BRW_READ,
&loi->loi_read_lop);
if (rc < 0)
break;
/* attempt some inter-object balancing by issueing rpcs
* for each object in turn */
- if (!list_empty(&loi->loi_cli_item))
- list_del_init(&loi->loi_cli_item);
+ if (!list_empty(&loi->loi_hp_ready_item))
+ list_del_init(&loi->loi_hp_ready_item);
+ if (!list_empty(&loi->loi_ready_item))
+ list_del_init(&loi->loi_ready_item);
if (!list_empty(&loi->loi_write_item))
list_del_init(&loi->loi_write_item);
if (!list_empty(&loi->loi_read_item))
RETURN(rc);
};
+/**
+ * Non-blocking version of osc_enter_cache() that consumes grant only when it
+ * is available.
+ */
+int osc_enter_cache_try(const struct lu_env *env,
+ struct client_obd *cli, struct lov_oinfo *loi,
+ struct osc_async_page *oap, int transient)
+{
+ int has_grant;
+
+ has_grant = cli->cl_avail_grant >= CFS_PAGE_SIZE;
+ if (has_grant) {
+ osc_consume_write_grant(cli, &oap->oap_brw_page);
+ if (transient) {
+ cli->cl_dirty_transit += CFS_PAGE_SIZE;
+ atomic_inc(&obd_dirty_transit_pages);
+ oap->oap_brw_flags |= OBD_BRW_NOCACHE;
+ }
+ }
+ return has_grant;
+}
+
/* Caller must hold loi_list_lock - we drop/regain it if we need to wait for
* grant or cache space. */
-static int osc_enter_cache(struct client_obd *cli, struct lov_oinfo *loi,
+static int osc_enter_cache(const struct lu_env *env,
+ struct client_obd *cli, struct lov_oinfo *loi,
struct osc_async_page *oap)
{
struct osc_cache_waiter ocw;
RETURN(-EDQUOT);
/* Hopefully normal case - cache space and write credits available */
- if ((cli->cl_dirty + CFS_PAGE_SIZE <= cli->cl_dirty_max) &&
- (atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) &&
- (cli->cl_avail_grant >= CFS_PAGE_SIZE)) {
- /* account for ourselves */
- osc_consume_write_grant(cli, &oap->oap_brw_page);
+ if (cli->cl_dirty + CFS_PAGE_SIZE <= cli->cl_dirty_max &&
+ atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages &&
+ osc_enter_cache_try(env, cli, loi, oap, 0))
RETURN(0);
- }
/* Make sure that there are write rpcs in flight to wait for. This
* is a little silly as this object may not have any pending but
ocw.ocw_rc = 0;
loi_list_maint(cli, loi);
- osc_check_rpcs(cli);
+ osc_check_rpcs(env, cli);
client_obd_list_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "sleeping for cache space\n");
RETURN(-EDQUOT);
}
+
int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
struct lov_oinfo *loi, cfs_page_t *page,
- obd_off offset, struct obd_async_page_ops *ops,
- void *data, void **res)
+ obd_off offset, const struct obd_async_page_ops *ops,
+ void *data, void **res, int nocache,
+ struct lustre_handle *lockh)
{
struct osc_async_page *oap;
+
ENTRY;
if (!page)
oap->oap_page = page;
oap->oap_obj_off = offset;
+ if (!client_is_remote(exp) &&
+ cfs_capable(CFS_CAP_SYS_RESOURCE))
+ oap->oap_brw_flags = OBD_BRW_NOQUOTA;
+
+ LASSERT(!(offset & ~CFS_PAGE_MASK));
CFS_INIT_LIST_HEAD(&oap->oap_pending_item);
CFS_INIT_LIST_HEAD(&oap->oap_urgent_item);
CFS_INIT_LIST_HEAD(&oap->oap_rpc_item);
+ CFS_INIT_LIST_HEAD(&oap->oap_page_list);
- oap->oap_occ.occ_interrupted = osc_occ_interrupted;
-
+ spin_lock_init(&oap->oap_lock);
CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset);
RETURN(0);
}
return oap;
};
-static int osc_queue_async_io(struct obd_export *exp, struct lov_stripe_md *lsm,
- struct lov_oinfo *loi, void *cookie,
- int cmd, obd_off off, int count,
- obd_flag brw_flags, enum async_flags async_flags)
+int osc_queue_async_io(const struct lu_env *env,
+ struct obd_export *exp, struct lov_stripe_md *lsm,
+ struct lov_oinfo *loi, void *cookie,
+ int cmd, obd_off off, int count,
+ obd_flag brw_flags, enum async_flags async_flags)
{
struct client_obd *cli = &exp->exp_obd->u.cli;
struct osc_async_page *oap;
RETURN(-EBUSY);
/* check if the file's owner/group is over quota */
-#ifdef HAVE_QUOTA_SUPPORT
- if ((cmd & OBD_BRW_WRITE) && !(cmd & OBD_BRW_NOQUOTA)){
- struct obd_async_page_ops *ops;
- struct obdo *oa;
+ if ((cmd & OBD_BRW_WRITE) && !(cmd & OBD_BRW_NOQUOTA)) {
+ struct cl_object *obj;
+ struct cl_attr attr; /* XXX put attr into thread info */
+ unsigned int qid[MAXQUOTAS];
- OBDO_ALLOC(oa);
- if (oa == NULL)
- RETURN(-ENOMEM);
+ obj = cl_object_top(osc_oap2cl_page(oap)->cp_obj);
- ops = oap->oap_caller_ops;
- ops->ap_fill_obdo(oap->oap_caller_data, cmd, oa);
- if (lquota_chkdq(quota_interface, cli, oa->o_uid, oa->o_gid) ==
- NO_QUOTA)
- rc = -EDQUOT;
+ cl_object_attr_lock(obj);
+ rc = cl_object_attr_get(env, obj, &attr);
+ cl_object_attr_unlock(obj);
- OBDO_FREE(oa);
+ qid[USRQUOTA] = attr.cat_uid;
+ qid[GRPQUOTA] = attr.cat_gid;
+ if (rc == 0 &&
+ lquota_chkdq(quota_interface, cli, qid) == NO_QUOTA)
+ rc = -EDQUOT;
if (rc)
RETURN(rc);
}
-#endif
if (loi == NULL)
loi = lsm->lsm_oinfo[0];
client_obd_list_lock(&cli->cl_loi_list_lock);
+ LASSERT(off + count <= CFS_PAGE_SIZE);
oap->oap_cmd = cmd;
oap->oap_page_off = off;
oap->oap_count = count;
oap->oap_brw_flags = brw_flags;
+ /* Give a hint to OST that requests are coming from kswapd - bug19529 */
+ if (libcfs_memory_pressure_get())
+ oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
oap->oap_async_flags = async_flags;
if (cmd & OBD_BRW_WRITE) {
- rc = osc_enter_cache(cli, loi, oap);
+ rc = osc_enter_cache(env, cli, loi, oap);
if (rc) {
client_obd_list_unlock(&cli->cl_loi_list_lock);
RETURN(rc);
LOI_DEBUG(loi, "oap %p page %p added for cmd %d\n", oap, oap->oap_page,
cmd);
- osc_check_rpcs(cli);
+ osc_check_rpcs(env, cli);
client_obd_list_unlock(&cli->cl_loi_list_lock);
RETURN(0);
/* aka (~was & now & flag), but this is more clear :) */
#define SETTING(was, now, flag) (!(was & flag) && (now & flag))
-static int osc_set_async_flags(struct obd_export *exp,
- struct lov_stripe_md *lsm,
- struct lov_oinfo *loi, void *cookie,
- obd_flag async_flags)
+int osc_set_async_flags_base(struct client_obd *cli,
+ struct lov_oinfo *loi, struct osc_async_page *oap,
+ obd_flag async_flags)
{
- struct client_obd *cli = &exp->exp_obd->u.cli;
struct loi_oap_pages *lop;
- struct osc_async_page *oap;
- int rc = 0;
ENTRY;
- oap = oap_from_cookie(cookie);
- if (IS_ERR(oap))
- RETURN(PTR_ERR(oap));
-
- /*
- * bug 7311: OST-side locking is only supported for liblustre for now
- * (and liblustre never calls obd_set_async_flags(). I hope.), generic
- * implementation has to handle case where OST-locked page was picked
- * up by, e.g., ->writepage().
- */
- LASSERT(!(oap->oap_brw_flags & OBD_BRW_SRVLOCK));
- LASSERT(!LIBLUSTRE_CLIENT); /* check that liblustre angels do fear to
- * tread here. */
-
- if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
- RETURN(-EIO);
-
- if (loi == NULL)
- loi = lsm->lsm_oinfo[0];
+ LASSERT(!list_empty(&oap->oap_pending_item));
if (oap->oap_cmd & OBD_BRW_WRITE) {
lop = &loi->loi_write_lop;
lop = &loi->loi_read_lop;
}
- client_obd_list_lock(&cli->cl_loi_list_lock);
-
- if (list_empty(&oap->oap_pending_item))
- GOTO(out, rc = -EINVAL);
-
if ((oap->oap_async_flags & async_flags) == async_flags)
- GOTO(out, rc = 0);
+ RETURN(0);
if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY))
oap->oap_async_flags |= ASYNC_READY;
- if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT)) {
- if (list_empty(&oap->oap_rpc_item)) {
+ if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT) &&
+ list_empty(&oap->oap_rpc_item)) {
+ if (oap->oap_async_flags & ASYNC_HP)
list_add(&oap->oap_urgent_item, &lop->lop_urgent);
- loi_list_maint(cli, loi);
- }
+ else
+ list_add_tail(&oap->oap_urgent_item, &lop->lop_urgent);
+ oap->oap_async_flags |= ASYNC_URGENT;
+ loi_list_maint(cli, loi);
}
LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page,
oap->oap_async_flags);
-out:
- osc_check_rpcs(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- RETURN(rc);
-}
-
-static int osc_queue_group_io(struct obd_export *exp, struct lov_stripe_md *lsm,
- struct lov_oinfo *loi,
- struct obd_io_group *oig, void *cookie,
- int cmd, obd_off off, int count,
- obd_flag brw_flags,
- obd_flag async_flags)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- struct osc_async_page *oap;
- struct loi_oap_pages *lop;
- int rc = 0;
- ENTRY;
-
- oap = oap_from_cookie(cookie);
- if (IS_ERR(oap))
- RETURN(PTR_ERR(oap));
-
- if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
- RETURN(-EIO);
-
- if (!list_empty(&oap->oap_pending_item) ||
- !list_empty(&oap->oap_urgent_item) ||
- !list_empty(&oap->oap_rpc_item))
- RETURN(-EBUSY);
-
- if (loi == NULL)
- loi = lsm->lsm_oinfo[0];
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
-
- oap->oap_cmd = cmd;
- oap->oap_page_off = off;
- oap->oap_count = count;
- oap->oap_brw_flags = brw_flags;
- oap->oap_async_flags = async_flags;
-
- if (cmd & OBD_BRW_WRITE)
- lop = &loi->loi_write_lop;
- else
- lop = &loi->loi_read_lop;
-
- list_add_tail(&oap->oap_pending_item, &lop->lop_pending_group);
- if (oap->oap_async_flags & ASYNC_GROUP_SYNC) {
- oap->oap_oig = oig;
- rc = oig_add_one(oig, &oap->oap_occ);
- }
-
- LOI_DEBUG(loi, "oap %p page %p on group pending: rc %d\n",
- oap, oap->oap_page, rc);
-
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- RETURN(rc);
-}
-
-static void osc_group_to_pending(struct client_obd *cli, struct lov_oinfo *loi,
- struct loi_oap_pages *lop, int cmd)
-{
- struct list_head *pos, *tmp;
- struct osc_async_page *oap;
-
- list_for_each_safe(pos, tmp, &lop->lop_pending_group) {
- oap = list_entry(pos, struct osc_async_page, oap_pending_item);
- list_del(&oap->oap_pending_item);
- osc_oap_to_pending(oap);
- }
- loi_list_maint(cli, loi);
-}
-
-static int osc_trigger_group_io(struct obd_export *exp,
- struct lov_stripe_md *lsm,
- struct lov_oinfo *loi,
- struct obd_io_group *oig)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- ENTRY;
-
- if (loi == NULL)
- loi = lsm->lsm_oinfo[0];
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
-
- osc_group_to_pending(cli, loi, &loi->loi_write_lop, OBD_BRW_WRITE);
- osc_group_to_pending(cli, loi, &loi->loi_read_lop, OBD_BRW_READ);
-
- osc_check_rpcs(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
RETURN(0);
}
-static int osc_teardown_async_page(struct obd_export *exp,
- struct lov_stripe_md *lsm,
- struct lov_oinfo *loi, void *cookie)
+int osc_teardown_async_page(struct obd_export *exp,
+ struct lov_stripe_md *lsm,
+ struct lov_oinfo *loi, void *cookie)
{
struct client_obd *cli = &exp->exp_obd->u.cli;
struct loi_oap_pages *lop;
if (!list_empty(&oap->oap_urgent_item)) {
list_del_init(&oap->oap_urgent_item);
- oap->oap_async_flags &= ~ASYNC_URGENT;
+ oap->oap_async_flags &= ~(ASYNC_URGENT | ASYNC_HP);
}
if (!list_empty(&oap->oap_pending_item)) {
list_del_init(&oap->oap_pending_item);
lop_update_pending(cli, lop, oap->oap_cmd, -1);
}
loi_list_maint(cli, loi);
-
LOI_DEBUG(loi, "oap %p page %p torn down\n", oap, oap->oap_page);
out:
client_obd_list_unlock(&cli->cl_loi_list_lock);
RETURN(rc);
}
-static void osc_set_data_with_check(struct lustre_handle *lockh, void *data,
- int flags)
+static void osc_set_lock_data_with_check(struct ldlm_lock *lock,
+ struct ldlm_enqueue_info *einfo,
+ int flags)
{
- struct ldlm_lock *lock = ldlm_handle2lock(lockh);
+ void *data = einfo->ei_cbdata;
+
+ LASSERT(lock != NULL);
+ LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
+ LASSERT(lock->l_resource->lr_type == einfo->ei_type);
+ LASSERT(lock->l_completion_ast == einfo->ei_cb_cp);
+ LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
- if (lock == NULL) {
- CERROR("lockh %p, data %p - client evicted?\n", lockh, data);
- return;
- }
lock_res_and_lock(lock);
-#ifdef __KERNEL__
-#ifdef __LINUX__
- /* Liang XXX: Darwin and Winnt checking should be added */
- if (lock->l_ast_data && lock->l_ast_data != data) {
- struct inode *new_inode = data;
- struct inode *old_inode = lock->l_ast_data;
- if (!(old_inode->i_state & I_FREEING))
- LDLM_ERROR(lock, "inconsistent l_ast_data found");
- LASSERTF(old_inode->i_state & I_FREEING,
- "Found existing inode %p/%lu/%u state %lu in lock: "
- "setting data to %p/%lu/%u\n", old_inode,
- old_inode->i_ino, old_inode->i_generation,
- old_inode->i_state,
- new_inode, new_inode->i_ino, new_inode->i_generation);
- }
-#endif
-#endif
+ spin_lock(&osc_ast_guard);
+ LASSERT(lock->l_ast_data == NULL || lock->l_ast_data == data);
lock->l_ast_data = data;
- lock->l_flags |= (flags & LDLM_FL_NO_LRU);
+ spin_unlock(&osc_ast_guard);
unlock_res_and_lock(lock);
- LDLM_LOCK_PUT(lock);
+}
+
+static void osc_set_data_with_check(struct lustre_handle *lockh,
+ struct ldlm_enqueue_info *einfo,
+ int flags)
+{
+ struct ldlm_lock *lock = ldlm_handle2lock(lockh);
+
+ if (lock != NULL) {
+ osc_set_lock_data_with_check(lock, einfo, flags);
+ LDLM_LOCK_PUT(lock);
+ } else
+ CERROR("lockh %p, data %p - client evicted?\n",
+ lockh, einfo->ei_cbdata);
}
static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
ldlm_iterator_t replace, void *data)
{
- struct ldlm_res_id res_id = { .name = {0} };
+ struct ldlm_res_id res_id;
struct obd_device *obd = class_exp2obd(exp);
- res_id.name[0] = lsm->lsm_object_id;
- res_id.name[2] = lsm->lsm_object_gr;
-
+ osc_build_res_name(lsm->lsm_object_id, lsm->lsm_object_gr, &res_id);
ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
return 0;
}
-static int osc_enqueue_fini(struct ptlrpc_request *req, struct obd_info *oinfo,
- int intent, int rc)
+static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb,
+ obd_enqueue_update_f upcall, void *cookie,
+ int *flags, int rc)
{
+ int intent = *flags & LDLM_FL_HAS_INTENT;
ENTRY;
if (intent) {
/* The request was created before ldlm_cli_enqueue call. */
if (rc == ELDLM_LOCK_ABORTED) {
struct ldlm_reply *rep;
+ rep = req_capsule_server_get(&req->rq_pill,
+ &RMF_DLM_REP);
- /* swabbed by ldlm_cli_enqueue() */
- LASSERT(lustre_rep_swabbed(req, DLM_LOCKREPLY_OFF));
- rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF,
- sizeof(*rep));
LASSERT(rep != NULL);
if (rep->lock_policy_res1)
rc = rep->lock_policy_res1;
}
if ((intent && rc == ELDLM_LOCK_ABORTED) || !rc) {
+ *flags |= LDLM_FL_LVB_READY;
CDEBUG(D_INODE,"got kms "LPU64" blocks "LPU64" mtime "LPU64"\n",
- oinfo->oi_md->lsm_oinfo[0]->loi_lvb.lvb_size,
- oinfo->oi_md->lsm_oinfo[0]->loi_lvb.lvb_blocks,
- oinfo->oi_md->lsm_oinfo[0]->loi_lvb.lvb_mtime);
+ lvb->lvb_size, lvb->lvb_blocks, lvb->lvb_mtime);
}
/* Call the update callback. */
- rc = oinfo->oi_cb_up(oinfo, rc);
+ rc = (*upcall)(cookie, rc);
RETURN(rc);
}
-static int osc_enqueue_interpret(struct ptlrpc_request *req,
+static int osc_enqueue_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
struct osc_enqueue_args *aa, int rc)
{
- int intent = aa->oa_oi->oi_flags & LDLM_FL_HAS_INTENT;
- struct lov_stripe_md *lsm = aa->oa_oi->oi_md;
struct ldlm_lock *lock;
+ struct lustre_handle handle;
+ __u32 mode;
+
+ /* Make a local copy of a lock handle and a mode, because aa->oa_*
+ * might be freed anytime after lock upcall has been called. */
+ lustre_handle_copy(&handle, aa->oa_lockh);
+ mode = aa->oa_ei->ei_mode;
/* ldlm_cli_enqueue is holding a reference on the lock, so it must
* be valid. */
- lock = ldlm_handle2lock(aa->oa_oi->oi_lockh);
+ lock = ldlm_handle2lock(&handle);
+
+ /* Take an additional reference so that a blocking AST that
+ * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
+ * to arrive after an upcall has been executed by
+ * osc_enqueue_fini(). */
+ ldlm_lock_addref(&handle, mode);
/* Complete obtaining the lock procedure. */
rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
- aa->oa_ei->ei_mode,
- &aa->oa_oi->oi_flags,
- &lsm->lsm_oinfo[0]->loi_lvb,
- sizeof(lsm->lsm_oinfo[0]->loi_lvb),
- lustre_swab_ost_lvb,
- aa->oa_oi->oi_lockh, rc);
-
+ mode, aa->oa_flags, aa->oa_lvb,
+ sizeof(*aa->oa_lvb), lustre_swab_ost_lvb,
+ &handle, rc);
/* Complete osc stuff. */
- rc = osc_enqueue_fini(req, aa->oa_oi, intent, rc);
+ rc = osc_enqueue_fini(req, aa->oa_lvb,
+ aa->oa_upcall, aa->oa_cookie, aa->oa_flags, rc);
+
+ OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
/* Release the lock for async request. */
- if (lustre_handle_is_used(aa->oa_oi->oi_lockh) && rc == ELDLM_OK)
- ldlm_lock_decref(aa->oa_oi->oi_lockh, aa->oa_ei->ei_mode);
+ if (lustre_handle_is_used(&handle) && rc == ELDLM_OK)
+ /*
+ * Releases a reference taken by ldlm_cli_enqueue(), if it is
+ * not already released by
+ * ldlm_cli_enqueue_fini()->failed_lock_cleanup()
+ */
+ ldlm_lock_decref(&handle, mode);
LASSERTF(lock != NULL, "lockh %p, req %p, aa %p - client evicted?\n",
- aa->oa_oi->oi_lockh, req, aa);
+ aa->oa_lockh, req, aa);
+ ldlm_lock_decref(&handle, mode);
LDLM_LOCK_PUT(lock);
return rc;
}
+void osc_update_enqueue(struct lustre_handle *lov_lockhp,
+ struct lov_oinfo *loi, int flags,
+ struct ost_lvb *lvb, __u32 mode, int rc)
+{
+ if (rc == ELDLM_OK) {
+ struct ldlm_lock *lock = ldlm_handle2lock(lov_lockhp);
+ __u64 tmp;
+
+ LASSERT(lock != NULL);
+ loi->loi_lvb = *lvb;
+ tmp = loi->loi_lvb.lvb_size;
+ /* Extend KMS up to the end of this lock and no further
+ * A lock on [x,y] means a KMS of up to y + 1 bytes! */
+ if (tmp > lock->l_policy_data.l_extent.end)
+ tmp = lock->l_policy_data.l_extent.end + 1;
+ if (tmp >= loi->loi_kms) {
+ LDLM_DEBUG(lock, "lock acquired, setting rss="LPU64
+ ", kms="LPU64, loi->loi_lvb.lvb_size, tmp);
+ loi_kms_set(loi, tmp);
+ } else {
+ LDLM_DEBUG(lock, "lock acquired, setting rss="
+ LPU64"; leaving kms="LPU64", end="LPU64,
+ loi->loi_lvb.lvb_size, loi->loi_kms,
+ lock->l_policy_data.l_extent.end);
+ }
+ ldlm_lock_allow_match(lock);
+ LDLM_LOCK_PUT(lock);
+ } else if (rc == ELDLM_LOCK_ABORTED && (flags & LDLM_FL_HAS_INTENT)) {
+ loi->loi_lvb = *lvb;
+ CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
+ " kms="LPU64"\n", loi->loi_lvb.lvb_size, loi->loi_kms);
+ rc = ELDLM_OK;
+ }
+}
+EXPORT_SYMBOL(osc_update_enqueue);
+
+struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
+
/* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
* from the 2nd OSC before a lock from the 1st one. This does not deadlock with
* other synchronous requests, however keeping some locks and trying to obtain
* when other sync requests do not get released lock from a client, the client
* is excluded from the cluster -- such scenarious make the life difficult, so
* release locks just after they are obtained. */
-static int osc_enqueue(struct obd_export *exp, struct obd_info *oinfo,
- struct ldlm_enqueue_info *einfo,
- struct ptlrpc_request_set *rqset)
+int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
+ int *flags, ldlm_policy_data_t *policy,
+ struct ost_lvb *lvb, int kms_valid,
+ obd_enqueue_update_f upcall, void *cookie,
+ struct ldlm_enqueue_info *einfo,
+ struct lustre_handle *lockh,
+ struct ptlrpc_request_set *rqset, int async)
{
- struct ldlm_res_id res_id = { .name = {0} };
struct obd_device *obd = exp->exp_obd;
- struct ldlm_reply *rep;
struct ptlrpc_request *req = NULL;
- int intent = oinfo->oi_flags & LDLM_FL_HAS_INTENT;
+ int intent = *flags & LDLM_FL_HAS_INTENT;
+ ldlm_mode_t mode;
int rc;
ENTRY;
- res_id.name[0] = oinfo->oi_md->lsm_object_id;
- res_id.name[2] = oinfo->oi_md->lsm_object_gr;
-
/* Filesystem lock extents are extended to page boundaries so that
* dealing with the page cache is a little smoother. */
- oinfo->oi_policy.l_extent.start -=
- oinfo->oi_policy.l_extent.start & ~CFS_PAGE_MASK;
- oinfo->oi_policy.l_extent.end |= ~CFS_PAGE_MASK;
+ policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
+ policy->l_extent.end |= ~CFS_PAGE_MASK;
- if (oinfo->oi_md->lsm_oinfo[0]->loi_kms_valid == 0)
+ /*
+ * kms is not valid when either object is completely fresh (so that no
+ * locks are cached), or object was evicted. In the latter case cached
+ * lock cannot be used, because it would prime inode state with
+ * potentially stale LVB.
+ */
+ if (!kms_valid)
goto no_match;
/* Next, search for already existing extent locks that will cover us */
- rc = ldlm_lock_match(obd->obd_namespace,
- oinfo->oi_flags | LDLM_FL_LVB_READY, &res_id,
- einfo->ei_type, &oinfo->oi_policy, einfo->ei_mode,
- oinfo->oi_lockh);
- if (rc == 1) {
- osc_set_data_with_check(oinfo->oi_lockh, einfo->ei_cbdata,
- oinfo->oi_flags);
- if (intent) {
- /* I would like to be able to ASSERT here that rss <=
- * kms, but I can't, for reasons which are explained in
- * lov_enqueue() */
- }
-
- /* We already have a lock, and it's referenced */
- oinfo->oi_cb_up(oinfo, ELDLM_OK);
-
- /* For async requests, decref the lock. */
- if (rqset)
- ldlm_lock_decref(oinfo->oi_lockh, einfo->ei_mode);
-
- RETURN(ELDLM_OK);
- }
-
/* If we're trying to read, we also search for an existing PW lock. The
* VFS and page cache already protect us locally, so lots of readers/
* writers can share a single PW lock.
* At some point we should cancel the read lock instead of making them
* send us a blocking callback, but there are problems with canceling
* locks out from other users right now, too. */
+ mode = einfo->ei_mode;
+ if (einfo->ei_mode == LCK_PR)
+ mode |= LCK_PW;
+ mode = ldlm_lock_match(obd->obd_namespace,
+ *flags | LDLM_FL_LVB_READY, res_id,
+ einfo->ei_type, policy, mode, lockh, 0);
+ if (mode) {
+ struct ldlm_lock *matched = ldlm_handle2lock(lockh);
+
+ if (matched->l_ast_data == NULL ||
+ matched->l_ast_data == einfo->ei_cbdata) {
+ /* addref the lock only if not async requests and PW
+ * lock is matched whereas we asked for PR. */
+ if (!rqset && einfo->ei_mode != mode)
+ ldlm_lock_addref(lockh, LCK_PR);
+ osc_set_lock_data_with_check(matched, einfo, *flags);
+ if (intent) {
+ /* I would like to be able to ASSERT here that
+ * rss <= kms, but I can't, for reasons which
+ * are explained in lov_enqueue() */
+ }
- if (einfo->ei_mode == LCK_PR) {
- rc = ldlm_lock_match(obd->obd_namespace,
- oinfo->oi_flags | LDLM_FL_LVB_READY,
- &res_id, einfo->ei_type, &oinfo->oi_policy,
- LCK_PW, oinfo->oi_lockh);
- if (rc == 1) {
- /* FIXME: This is not incredibly elegant, but it might
- * be more elegant than adding another parameter to
- * lock_match. I want a second opinion. */
- /* addref the lock only if not async requests. */
- if (!rqset)
- ldlm_lock_addref(oinfo->oi_lockh, LCK_PR);
- osc_set_data_with_check(oinfo->oi_lockh,
- einfo->ei_cbdata,
- oinfo->oi_flags);
- oinfo->oi_cb_up(oinfo, ELDLM_OK);
- ldlm_lock_decref(oinfo->oi_lockh, LCK_PW);
+ /* We already have a lock, and it's referenced */
+ (*upcall)(cookie, ELDLM_OK);
+
+ /* For async requests, decref the lock. */
+ if (einfo->ei_mode != mode)
+ ldlm_lock_decref(lockh, LCK_PW);
+ else if (rqset)
+ ldlm_lock_decref(lockh, einfo->ei_mode);
+ LDLM_LOCK_PUT(matched);
RETURN(ELDLM_OK);
- }
+ } else
+ ldlm_lock_decref(lockh, mode);
+ LDLM_LOCK_PUT(matched);
}
no_match:
if (intent) {
- int size[3] = {
- [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
- [DLM_LOCKREQ_OFF] = sizeof(struct ldlm_request),
- [DLM_LOCKREQ_OFF + 1] = 0 };
-
- req = ldlm_prep_enqueue_req(exp, 2, size, NULL, 0);
+ CFS_LIST_HEAD(cancels);
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_LDLM_ENQUEUE_LVB);
if (req == NULL)
RETURN(-ENOMEM);
- size[DLM_LOCKREPLY_OFF] = sizeof(*rep);
- size[DLM_REPLY_REC_OFF] =
- sizeof(oinfo->oi_md->lsm_oinfo[0]->loi_lvb);
- ptlrpc_req_set_repsize(req, 3, size);
+ rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
+ if (rc)
+ RETURN(rc);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
+ sizeof *lvb);
+ ptlrpc_request_set_replen(req);
}
/* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
- oinfo->oi_flags &= ~LDLM_FL_BLOCK_GRANTED;
+ *flags &= ~LDLM_FL_BLOCK_GRANTED;
- rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id,
- &oinfo->oi_policy, &oinfo->oi_flags,
- &oinfo->oi_md->lsm_oinfo[0]->loi_lvb,
- sizeof(oinfo->oi_md->lsm_oinfo[0]->loi_lvb),
- lustre_swab_ost_lvb, oinfo->oi_lockh,
- rqset ? 1 : 0);
+ rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
+ sizeof(*lvb), lustre_swab_ost_lvb, lockh, async);
if (rqset) {
if (!rc) {
struct osc_enqueue_args *aa;
CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = (struct osc_enqueue_args *)&req->rq_async_args;
- aa->oa_oi = oinfo;
+ aa = ptlrpc_req_async_args(req);
aa->oa_ei = einfo;
aa->oa_exp = exp;
-
- req->rq_interpret_reply = osc_enqueue_interpret;
- ptlrpc_set_add_req(rqset, req);
+ aa->oa_flags = flags;
+ aa->oa_upcall = upcall;
+ aa->oa_cookie = cookie;
+ aa->oa_lvb = lvb;
+ aa->oa_lockh = lockh;
+
+ req->rq_interpret_reply =
+ (ptlrpc_interpterer_t)osc_enqueue_interpret;
+ if (rqset == PTLRPCD_SET)
+ ptlrpcd_add_req(req, PSCOPE_OTHER);
+ else
+ ptlrpc_set_add_req(rqset, req);
} else if (intent) {
ptlrpc_req_finished(req);
}
RETURN(rc);
}
- rc = osc_enqueue_fini(req, oinfo, intent, rc);
+ rc = osc_enqueue_fini(req, lvb, upcall, cookie, flags, rc);
if (intent)
ptlrpc_req_finished(req);
RETURN(rc);
}
-static int osc_match(struct obd_export *exp, struct lov_stripe_md *lsm,
- __u32 type, ldlm_policy_data_t *policy, __u32 mode,
- int *flags, void *data, struct lustre_handle *lockh)
+static int osc_enqueue(struct obd_export *exp, struct obd_info *oinfo,
+ struct ldlm_enqueue_info *einfo,
+ struct ptlrpc_request_set *rqset)
{
- struct ldlm_res_id res_id = { .name = {0} };
- struct obd_device *obd = exp->exp_obd;
+ struct ldlm_res_id res_id;
int rc;
- int lflags = *flags;
ENTRY;
- res_id.name[0] = lsm->lsm_object_id;
- res_id.name[2] = lsm->lsm_object_gr;
+ osc_build_res_name(oinfo->oi_md->lsm_object_id,
+ oinfo->oi_md->lsm_object_gr, &res_id);
+
+ rc = osc_enqueue_base(exp, &res_id, &oinfo->oi_flags, &oinfo->oi_policy,
+ &oinfo->oi_md->lsm_oinfo[0]->loi_lvb,
+ oinfo->oi_md->lsm_oinfo[0]->loi_kms_valid,
+ oinfo->oi_cb_up, oinfo, einfo, oinfo->oi_lockh,
+ rqset, rqset != NULL);
+ RETURN(rc);
+}
+
+int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
+ __u32 type, ldlm_policy_data_t *policy, __u32 mode,
+ int *flags, void *data, struct lustre_handle *lockh,
+ int unref)
+{
+ struct obd_device *obd = exp->exp_obd;
+ int lflags = *flags;
+ ldlm_mode_t rc;
+ ENTRY;
- OBD_FAIL_RETURN(OBD_FAIL_OSC_MATCH, -EIO);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
+ RETURN(-EIO);
/* Filesystem lock extents are extended to page boundaries so that
* dealing with the page cache is a little smoother */
policy->l_extent.end |= ~CFS_PAGE_MASK;
/* Next, search for already existing extent locks that will cover us */
- rc = ldlm_lock_match(obd->obd_namespace, lflags | LDLM_FL_LVB_READY,
- &res_id, type, policy, mode, lockh);
- if (rc) {
- //if (!(*flags & LDLM_FL_TEST_LOCK))
- osc_set_data_with_check(lockh, data, lflags);
- RETURN(rc);
- }
/* If we're trying to read, we also search for an existing PW lock. The
* VFS and page cache already protect us locally, so lots of readers/
* writers can share a single PW lock. */
- if (mode == LCK_PR) {
- rc = ldlm_lock_match(obd->obd_namespace,
- lflags | LDLM_FL_LVB_READY, &res_id,
- type, policy, LCK_PW, lockh);
- if (rc == 1 && !(lflags & LDLM_FL_TEST_LOCK)) {
- /* FIXME: This is not incredibly elegant, but it might
- * be more elegant than adding another parameter to
- * lock_match. I want a second opinion. */
+ rc = mode;
+ if (mode == LCK_PR)
+ rc |= LCK_PW;
+ rc = ldlm_lock_match(obd->obd_namespace, lflags | LDLM_FL_LVB_READY,
+ res_id, type, policy, rc, lockh, unref);
+ if (rc) {
+ if (data != NULL)
osc_set_data_with_check(lockh, data, lflags);
+ if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
ldlm_lock_addref(lockh, LCK_PR);
ldlm_lock_decref(lockh, LCK_PW);
}
+ RETURN(rc);
}
RETURN(rc);
}
-static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
- __u32 mode, struct lustre_handle *lockh)
+int osc_cancel_base(struct lustre_handle *lockh, __u32 mode)
{
ENTRY;
RETURN(0);
}
+static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
+ __u32 mode, struct lustre_handle *lockh)
+{
+ ENTRY;
+ RETURN(osc_cancel_base(lockh, mode));
+}
+
static int osc_cancel_unused(struct obd_export *exp,
struct lov_stripe_md *lsm, int flags,
void *opaque)
{
struct obd_device *obd = class_exp2obd(exp);
- struct ldlm_res_id res_id = { .name = {0} }, *resp = NULL;
+ struct ldlm_res_id res_id, *resp = NULL;
if (lsm != NULL) {
- res_id.name[0] = lsm->lsm_object_id;
- res_id.name[2] = lsm->lsm_object_gr;
- resp = &res_id;
+ resp = osc_build_res_name(lsm->lsm_object_id,
+ lsm->lsm_object_gr, &res_id);
}
return ldlm_cli_cancel_unused(obd->obd_namespace, resp, flags, opaque);
}
-static int osc_join_lru(struct obd_export *exp,
- struct lov_stripe_md *lsm, int join)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct ldlm_res_id res_id = { .name = {0} }, *resp = NULL;
-
- if (lsm != NULL) {
- res_id.name[0] = lsm->lsm_object_id;
- res_id.name[2] = lsm->lsm_object_gr;
- resp = &res_id;
- }
-
- return ldlm_cli_join_lru(obd->obd_namespace, resp, join);
-}
-
-static int osc_statfs_interpret(struct ptlrpc_request *req,
+static int osc_statfs_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
struct osc_async_args *aa, int rc)
{
struct obd_statfs *msfs;
ENTRY;
+ if ((rc == -ENOTCONN || rc == -EAGAIN) &&
+ (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
+ GOTO(out, rc = 0);
+
if (rc != 0)
GOTO(out, rc);
- msfs = lustre_swab_repbuf(req, REPLY_REC_OFF, sizeof(*msfs),
- lustre_swab_obd_statfs);
+ msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
if (msfs == NULL) {
- CERROR("Can't unpack obd_statfs\n");
GOTO(out, rc = -EPROTO);
}
- memcpy(aa->aa_oi->oi_osfs, msfs, sizeof(*msfs));
+ *aa->aa_oi->oi_osfs = *msfs;
out:
rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
RETURN(rc);
{
struct ptlrpc_request *req;
struct osc_async_args *aa;
- int size[2] = { sizeof(struct ptlrpc_body), sizeof(*oinfo->oi_osfs) };
+ int rc;
ENTRY;
/* We could possibly pass max_age in the request (as an absolute
* during mount that would help a bit). Having relative timestamps
* is not so great if request processing is slow, while absolute
* timestamps are not ideal because they need time synchronization. */
- req = ptlrpc_prep_req(obd->u.cli.cl_import, LUSTRE_OST_VERSION,
- OST_STATFS, 1, NULL, NULL);
- if (!req)
+ req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
+ if (req == NULL)
RETURN(-ENOMEM);
- ptlrpc_req_set_repsize(req, 2, size);
- req->rq_request_portal = OST_CREATE_PORTAL; //XXX FIXME bug 249
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
+ if (rc) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+ ptlrpc_request_set_replen(req);
+ req->rq_request_portal = OST_CREATE_PORTAL;
+ ptlrpc_at_set_req_timeout(req);
- req->rq_interpret_reply = osc_statfs_interpret;
+ if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
+ /* procfs requests not want stat in wait for avoid deadlock */
+ req->rq_no_resend = 1;
+ req->rq_no_delay = 1;
+ }
+
+ req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = (struct osc_async_args *)&req->rq_async_args;
+ aa = ptlrpc_req_async_args(req);
aa->aa_oi = oinfo;
ptlrpc_set_add_req(rqset, req);
}
static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
- __u64 max_age)
+ __u64 max_age, __u32 flags)
{
- struct obd_statfs *msfs;
+ struct obd_statfs *msfs;
struct ptlrpc_request *req;
- int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*osfs) };
+ struct obd_import *imp = NULL;
+ int rc;
ENTRY;
+ /*Since the request might also come from lprocfs, so we need
+ *sync this with client_disconnect_export Bug15684*/
+ down_read(&obd->u.cli.cl_sem);
+ if (obd->u.cli.cl_import)
+ imp = class_import_get(obd->u.cli.cl_import);
+ up_read(&obd->u.cli.cl_sem);
+ if (!imp)
+ RETURN(-ENODEV);
+
/* We could possibly pass max_age in the request (as an absolute
* timestamp or a "seconds.usec ago") so the target can avoid doing
* extra calls into the filesystem if that isn't necessary (e.g.
* during mount that would help a bit). Having relative timestamps
* is not so great if request processing is slow, while absolute
* timestamps are not ideal because they need time synchronization. */
- req = ptlrpc_prep_req(obd->u.cli.cl_import, LUSTRE_OST_VERSION,
- OST_STATFS, 1, NULL, NULL);
- if (!req)
+ req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
+
+ class_import_put(imp);
+
+ if (req == NULL)
RETURN(-ENOMEM);
- ptlrpc_req_set_repsize(req, 2, size);
- req->rq_request_portal = OST_CREATE_PORTAL; //XXX FIXME bug 249
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
+ if (rc) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+ ptlrpc_request_set_replen(req);
+ req->rq_request_portal = OST_CREATE_PORTAL;
+ ptlrpc_at_set_req_timeout(req);
+
+ if (flags & OBD_STATFS_NODELAY) {
+ /* procfs requests not want stat in wait for avoid deadlock */
+ req->rq_no_resend = 1;
+ req->rq_no_delay = 1;
+ }
rc = ptlrpc_queue_wait(req);
if (rc)
GOTO(out, rc);
- msfs = lustre_swab_repbuf(req, REPLY_REC_OFF, sizeof(*msfs),
- lustre_swab_obd_statfs);
+ msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
if (msfs == NULL) {
- CERROR("Can't unpack obd_statfs\n");
GOTO(out, rc = -EPROTO);
}
- memcpy(osfs, msfs, sizeof(*osfs));
+ *osfs = *msfs;
EXIT;
out:
*/
static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
{
- struct lov_user_md lum, *lumk;
+ /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
+ struct lov_user_md_v3 lum, *lumk;
+ struct lov_user_ost_data_v1 *lmm_objects;
int rc = 0, lum_size;
ENTRY;
if (!lsm)
RETURN(-ENODATA);
- if (copy_from_user(&lum, lump, sizeof(lum)))
+ /* we only need the header part from user space to get lmm_magic and
+ * lmm_stripe_count, (the header part is common to v1 and v3) */
+ lum_size = sizeof(struct lov_user_md_v1);
+ if (copy_from_user(&lum, lump, lum_size))
RETURN(-EFAULT);
- if (lum.lmm_magic != LOV_USER_MAGIC)
+ if ((lum.lmm_magic != LOV_USER_MAGIC_V1) &&
+ (lum.lmm_magic != LOV_USER_MAGIC_V3))
RETURN(-EINVAL);
+ /* lov_user_md_vX and lov_mds_md_vX must have the same size */
+ LASSERT(sizeof(struct lov_user_md_v1) == sizeof(struct lov_mds_md_v1));
+ LASSERT(sizeof(struct lov_user_md_v3) == sizeof(struct lov_mds_md_v3));
+ LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lumk->lmm_objects[0]));
+
+ /* we can use lov_mds_md_size() to compute lum_size
+ * because lov_user_md_vX and lov_mds_md_vX have the same size */
if (lum.lmm_stripe_count > 0) {
- lum_size = sizeof(lum) + sizeof(lum.lmm_objects[0]);
+ lum_size = lov_mds_md_size(lum.lmm_stripe_count, lum.lmm_magic);
OBD_ALLOC(lumk, lum_size);
if (!lumk)
RETURN(-ENOMEM);
- lumk->lmm_objects[0].l_object_id = lsm->lsm_object_id;
- lumk->lmm_objects[0].l_object_gr = lsm->lsm_object_gr;
+ if (lum.lmm_magic == LOV_USER_MAGIC_V1)
+ lmm_objects = &(((struct lov_user_md_v1 *)lumk)->lmm_objects[0]);
+ else
+ lmm_objects = &(lumk->lmm_objects[0]);
+ lmm_objects->l_object_id = lsm->lsm_object_id;
} else {
- lum_size = sizeof(lum);
+ lum_size = lov_mds_md_size(0, lum.lmm_magic);
lumk = &lum;
}
int err = 0;
ENTRY;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- MOD_INC_USE_COUNT;
-#else
if (!try_module_get(THIS_MODULE)) {
CERROR("Can't get module. Is it alive?");
return -EINVAL;
}
-#endif
switch (cmd) {
case OBD_IOC_LOV_GET_CONFIG: {
char *buf;
err = lquota_poll_check(quota_interface, exp,
(struct if_quotacheck *)karg);
GOTO(out, err);
+ case OBD_IOC_PING_TARGET:
+ err = ptlrpc_obd_ping(obd);
+ GOTO(out, err);
default:
CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n",
cmd, cfs_curproc_comm());
GOTO(out, err = -ENOTTY);
}
out:
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- MOD_DEC_USE_COUNT;
-#else
module_put(THIS_MODULE);
-#endif
return err;
}
static int osc_get_info(struct obd_export *exp, obd_count keylen,
- void *key, __u32 *vallen, void *val)
+ void *key, __u32 *vallen, void *val,
+ struct lov_stripe_md *lsm)
{
ENTRY;
if (!vallen || !val)
RETURN(-EFAULT);
- if (keylen > strlen("lock_to_stripe") &&
- strcmp(key, "lock_to_stripe") == 0) {
+ if (KEY_IS(KEY_LOCK_TO_STRIPE)) {
__u32 *stripe = val;
*vallen = sizeof(*stripe);
*stripe = 0;
RETURN(0);
- } else if (keylen >= strlen("last_id") && strcmp(key, "last_id") == 0) {
+ } else if (KEY_IS(KEY_LAST_ID)) {
struct ptlrpc_request *req;
- obd_id *reply;
- char *bufs[2] = { NULL, key };
- int rc, size[2] = { sizeof(struct ptlrpc_body), keylen };
+ obd_id *reply;
+ char *tmp;
+ int rc;
- req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OST_VERSION,
- OST_GET_INFO, 2, size, bufs);
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_OST_GET_INFO_LAST_ID);
if (req == NULL)
RETURN(-ENOMEM);
- size[REPLY_REC_OFF] = *vallen;
- ptlrpc_req_set_repsize(req, 2, size);
+ req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
+ RCL_CLIENT, keylen);
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
+ if (rc) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
+ memcpy(tmp, key, keylen);
+
+ req->rq_no_delay = req->rq_no_resend = 1;
+ ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
if (rc)
GOTO(out, rc);
- reply = lustre_swab_repbuf(req, REPLY_REC_OFF, sizeof(*reply),
- lustre_swab_ost_last_id);
- if (reply == NULL) {
- CERROR("Can't unpack OST last ID\n");
+ reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID);
+ if (reply == NULL)
GOTO(out, rc = -EPROTO);
- }
+
*((obd_id *)val) = *reply;
out:
ptlrpc_req_finished(req);
RETURN(rc);
+ } else if (KEY_IS(KEY_FIEMAP)) {
+ struct ptlrpc_request *req;
+ struct ll_user_fiemap *reply;
+ char *tmp;
+ int rc;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_OST_GET_INFO_FIEMAP);
+ if (req == NULL)
+ RETURN(-ENOMEM);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY,
+ RCL_CLIENT, keylen);
+ req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
+ RCL_CLIENT, *vallen);
+ req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
+ RCL_SERVER, *vallen);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
+ if (rc) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
+ memcpy(tmp, key, keylen);
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
+ memcpy(tmp, val, *vallen);
+
+ ptlrpc_request_set_replen(req);
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ GOTO(out1, rc);
+
+ reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
+ if (reply == NULL)
+ GOTO(out1, rc = -EPROTO);
+
+ memcpy(val, reply, *vallen);
+ out1:
+ ptlrpc_req_finished(req);
+
+ RETURN(rc);
}
+
RETURN(-EINVAL);
}
-static int osc_setinfo_mds_conn_interpret(struct ptlrpc_request *req,
- void *aa, int rc)
+static int osc_setinfo_mds_connect_import(struct obd_import *imp)
{
struct llog_ctxt *ctxt;
- struct obd_import *imp = req->rq_import;
+ int rc = 0;
ENTRY;
- if (rc != 0)
- RETURN(rc);
-
ctxt = llog_get_context(imp->imp_obd, LLOG_MDS_OST_ORIG_CTXT);
if (ctxt) {
- if (rc == 0)
- rc = llog_initiator_connect(ctxt);
- else
- CERROR("cannot establish connection for "
- "ctxt %p: %d\n", ctxt, rc);
+ rc = llog_initiator_connect(ctxt);
+ llog_ctxt_put(ctxt);
+ } else {
+ /* XXX return an error? skip setting below flags? */
}
spin_lock(&imp->imp_lock);
RETURN(rc);
}
+static int osc_setinfo_mds_conn_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ void *aa, int rc)
+{
+ ENTRY;
+ if (rc != 0)
+ RETURN(rc);
+
+ RETURN(osc_setinfo_mds_connect_import(req->rq_import));
+}
+
static int osc_set_info_async(struct obd_export *exp, obd_count keylen,
void *key, obd_count vallen, void *val,
struct ptlrpc_request_set *set)
{
struct ptlrpc_request *req;
- struct obd_device *obd = exp->exp_obd;
- struct obd_import *imp = class_exp2cliimp(exp);
- int size[3] = { sizeof(struct ptlrpc_body), keylen, vallen };
- char *bufs[3] = { NULL, key, val };
+ struct obd_device *obd = exp->exp_obd;
+ struct obd_import *imp = class_exp2cliimp(exp);
+ char *tmp;
+ int rc;
ENTRY;
OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
if (KEY_IS(KEY_NEXT_ID)) {
if (vallen != sizeof(obd_id))
+ RETURN(-ERANGE);
+ if (val == NULL)
RETURN(-EINVAL);
obd->u.cli.cl_oscc.oscc_next_id = *((obd_id*)val) + 1;
CDEBUG(D_HA, "%s: set oscc_next_id = "LPU64"\n",
RETURN(0);
}
- if (KEY_IS("unlinked")) {
+ if (KEY_IS(KEY_UNLINKED)) {
struct osc_creator *oscc = &obd->u.cli.cl_oscc;
spin_lock(&oscc->oscc_lock);
oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
RETURN(0);
}
- if (KEY_IS("checksum")) {
+ if (KEY_IS(KEY_CHECKSUM)) {
if (vallen != sizeof(int))
RETURN(-EINVAL);
exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
RETURN(0);
}
+ if (KEY_IS(KEY_SPTLRPC_CONF)) {
+ sptlrpc_conf_client_adapt(obd);
+ RETURN(0);
+ }
+
if (KEY_IS(KEY_FLUSH_CTX)) {
sptlrpc_import_flush_my_ctx(imp);
RETURN(0);
}
- if (!set)
+ if (!set && !KEY_IS(KEY_GRANT_SHRINK))
RETURN(-EINVAL);
/* We pass all other commands directly to OST. Since nobody calls osc
Even if something bad goes through, we'd get a -EINVAL from OST
anyway. */
- req = ptlrpc_prep_req(imp, LUSTRE_OST_VERSION, OST_SET_INFO, 3, size,
- bufs);
+ if (KEY_IS(KEY_GRANT_SHRINK))
+ req = ptlrpc_request_alloc(imp, &RQF_OST_SET_GRANT_INFO);
+ else
+ req = ptlrpc_request_alloc(imp, &RQF_OST_SET_INFO);
+
if (req == NULL)
RETURN(-ENOMEM);
+ req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
+ RCL_CLIENT, keylen);
+ req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
+ RCL_CLIENT, vallen);
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
+ if (rc) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
+ memcpy(tmp, key, keylen);
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
+ memcpy(tmp, val, vallen);
+
if (KEY_IS(KEY_MDS_CONN)) {
struct osc_creator *oscc = &obd->u.cli.cl_oscc;
oscc->oscc_oa.o_gr = (*(__u32 *)val);
oscc->oscc_oa.o_valid |= OBD_MD_FLGROUP;
- LASSERT(oscc->oscc_oa.o_gr > 0);
+ LASSERT_MDS_GROUP(oscc->oscc_oa.o_gr);
+ req->rq_no_delay = req->rq_no_resend = 1;
req->rq_interpret_reply = osc_setinfo_mds_conn_interpret;
+ } else if (KEY_IS(KEY_GRANT_SHRINK)) {
+ struct osc_grant_args *aa;
+ struct obdo *oa;
+
+ CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+ aa = ptlrpc_req_async_args(req);
+ OBD_ALLOC_PTR(oa);
+ if (!oa) {
+ ptlrpc_req_finished(req);
+ RETURN(-ENOMEM);
+ }
+ *oa = ((struct ost_body *)val)->oa;
+ aa->aa_oa = oa;
+ req->rq_interpret_reply = osc_shrink_grant_interpret;
}
- ptlrpc_req_set_repsize(req, 1, NULL);
- ptlrpc_set_add_req(set, req);
- ptlrpc_check_set(set);
+ ptlrpc_request_set_replen(req);
+ if (!KEY_IS(KEY_GRANT_SHRINK)) {
+ LASSERT(set != NULL);
+ ptlrpc_set_add_req(set, req);
+ ptlrpc_check_set(NULL, set);
+ } else
+ ptlrpcd_add_req(req, PSCOPE_OTHER);
RETURN(0);
}
};
static struct llog_operations osc_mds_ost_orig_logops;
-static int osc_llog_init(struct obd_device *obd, struct obd_llogs *llogs,
+static int osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
struct obd_device *tgt, int count,
struct llog_catid *catid, struct obd_uuid *uuid)
{
int rc;
ENTRY;
+ LASSERT(olg == &obd->obd_olg);
spin_lock(&obd->obd_dev_lock);
if (osc_mds_ost_orig_logops.lop_setup != llog_obd_origin_setup) {
osc_mds_ost_orig_logops = llog_lvfs_ops;
}
spin_unlock(&obd->obd_dev_lock);
- rc = llog_setup(obd, llogs, LLOG_MDS_OST_ORIG_CTXT, tgt, count,
+ rc = llog_setup(obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, tgt, count,
&catid->lci_logid, &osc_mds_ost_orig_logops);
if (rc) {
CERROR("failed LLOG_MDS_OST_ORIG_CTXT\n");
- GOTO (out, rc);
+ GOTO(out, rc);
}
- rc = llog_setup(obd, llogs, LLOG_SIZE_REPL_CTXT, tgt, count, NULL,
- &osc_size_repl_logops);
- if (rc)
+ rc = llog_setup(obd, &obd->obd_olg, LLOG_SIZE_REPL_CTXT, tgt, count,
+ NULL, &osc_size_repl_logops);
+ if (rc) {
+ struct llog_ctxt *ctxt =
+ llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
+ if (ctxt)
+ llog_cleanup(ctxt);
CERROR("failed LLOG_SIZE_REPL_CTXT\n");
+ }
+ GOTO(out, rc);
out:
if (rc) {
CERROR("osc '%s' tgt '%s' cnt %d catid %p rc=%d\n",
CERROR("logid "LPX64":0x%x\n",
catid->lci_logid.lgl_oid, catid->lci_logid.lgl_ogen);
}
- RETURN(rc);
+ return rc;
}
static int osc_llog_finish(struct obd_device *obd, int count)
RETURN(rc);
}
-static int osc_reconnect(struct obd_export *exp, struct obd_device *obd,
+static int osc_reconnect(const struct lu_env *env,
+ struct obd_export *exp, struct obd_device *obd,
struct obd_uuid *cluuid,
- struct obd_connect_data *data)
+ struct obd_connect_data *data,
+ void *localdata)
{
struct client_obd *cli = &obd->u.cli;
static int osc_disconnect(struct obd_export *exp)
{
struct obd_device *obd = class_exp2obd(exp);
- struct llog_ctxt *ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
+ struct llog_ctxt *ctxt;
int rc;
- if (obd->u.cli.cl_conn_count == 1)
- /* flush any remaining cancel messages out to the target */
- llog_sync(ctxt, exp);
+ ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
+ if (ctxt) {
+ if (obd->u.cli.cl_conn_count == 1) {
+ /* Flush any remaining cancel messages out to the
+ * target */
+ llog_sync(ctxt, exp);
+ }
+ llog_ctxt_put(ctxt);
+ } else {
+ CDEBUG(D_HA, "No LLOG_SIZE_REPL_CTXT found in obd %p\n",
+ obd);
+ }
rc = client_disconnect_export(exp);
+ /**
+ * Initially we put del_shrink_grant before disconnect_export, but it
+ * causes the following problem if setup (connect) and cleanup
+ * (disconnect) are tangled together.
+ * connect p1 disconnect p2
+ * ptlrpc_connect_import
+ * ............... class_manual_cleanup
+ * osc_disconnect
+ * del_shrink_grant
+ * ptlrpc_connect_interrupt
+ * init_grant_shrink
+ * add this client to shrink list
+ * cleanup_osc
+ * Bang! pinger trigger the shrink.
+ * So the osc should be disconnected from the shrink list, after we
+ * are sure the import has been destroyed. BUG18662
+ */
+ if (obd->u.cli.cl_import == NULL)
+ osc_del_shrink_grant(&obd->u.cli);
return rc;
}
}
case IMP_EVENT_INVALIDATE: {
struct ldlm_namespace *ns = obd->obd_namespace;
+ struct lu_env *env;
+ int refcheck;
+
+ env = cl_env_get(&refcheck);
+ if (!IS_ERR(env)) {
+ /* Reset grants */
+ cli = &obd->u.cli;
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ /* all pages go to failing rpcs due to the invalid
+ * import */
+ osc_check_rpcs(env, cli);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
- /* Reset grants */
- cli = &obd->u.cli;
- client_obd_list_lock(&cli->cl_loi_list_lock);
- /* all pages go to failing rpcs due to the invalid import */
- osc_check_rpcs(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
-
+ ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
+ cl_env_put(env, &refcheck);
+ } else
+ rc = PTR_ERR(env);
break;
}
case IMP_EVENT_ACTIVE: {
if (rc) {
ptlrpcd_decref();
} else {
- struct lprocfs_static_vars lvars;
+ struct lprocfs_static_vars lvars = { 0 };
struct client_obd *cli = &obd->u.cli;
- lprocfs_init_vars(osc, &lvars);
+ cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
+ lprocfs_osc_init_vars(&lvars);
if (lprocfs_obd_setup(obd, lvars.obd_vars) == 0) {
lproc_osc_attach_seqstat(obd);
+ sptlrpc_lprocfs_cliobd_attach(obd);
ptlrpc_lprocfs_register_obd(obd);
}
oscc_init(obd);
/* We need to allocate a few requests more, because
- brw_interpret_oap tries to create new requests before freeing
+ brw_interpret tries to create new requests before freeing
previous ones. Ideally we want to have 2x max_rpcs_in_flight
reserved, but I afraid that might be too much wasted RAM
in fact, so 2 is just my guess and still should work. */
ptlrpc_init_rq_pool(cli->cl_max_rpcs_in_flight + 2,
OST_MAXREQSIZE,
ptlrpc_add_rqs_to_pool);
+
+ CFS_INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
+ sema_init(&cli->cl_grant_sem, 1);
}
RETURN(rc);
client import will not have been cleaned. */
if (obd->u.cli.cl_import) {
struct obd_import *imp;
+ down_write(&obd->u.cli.cl_sem);
imp = obd->u.cli.cl_import;
CDEBUG(D_CONFIG, "%s: client import never connected\n",
obd->obd_name);
ptlrpc_invalidate_import(imp);
- ptlrpc_free_rq_pool(imp->imp_rq_pool);
+ if (imp->imp_rq_pool) {
+ ptlrpc_free_rq_pool(imp->imp_rq_pool);
+ imp->imp_rq_pool = NULL;
+ }
class_destroy_import(imp);
+ up_write(&obd->u.cli.cl_sem);
obd->u.cli.cl_import = NULL;
}
- break;
- }
- case OBD_CLEANUP_SELF_EXP:
rc = obd_llog_finish(obd, 0);
if (rc != 0)
CERROR("failed to cleanup llogging subsystems\n");
break;
- case OBD_CLEANUP_OBD:
- break;
+ }
}
RETURN(rc);
}
int osc_cleanup(struct obd_device *obd)
{
- struct osc_creator *oscc = &obd->u.cli.cl_oscc;
int rc;
ENTRY;
ptlrpc_lprocfs_unregister_obd(obd);
lprocfs_obd_cleanup(obd);
- spin_lock(&oscc->oscc_lock);
- oscc->oscc_flags &= ~OSCC_FLAG_RECOVERING;
- oscc->oscc_flags |= OSCC_FLAG_EXITING;
- spin_unlock(&oscc->oscc_lock);
-
/* free memory of osc quota cache */
lquota_cleanup(quota_interface, obd);
RETURN(rc);
}
-static int osc_process_config(struct obd_device *obd, obd_count len, void *buf)
+int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg)
{
- struct lustre_cfg *lcfg = buf;
- struct lprocfs_static_vars lvars;
+ struct lprocfs_static_vars lvars = { 0 };
int rc = 0;
- lprocfs_init_vars(osc, &lvars);
+ lprocfs_osc_init_vars(&lvars);
+
+ switch (lcfg->lcfg_command) {
+ default:
+ rc = class_process_proc_param(PARAM_OSC, lvars.obd_vars,
+ lcfg, obd);
+ if (rc > 0)
+ rc = 0;
+ break;
+ }
- rc = class_process_proc_param(PARAM_OSC, lvars.obd_vars, lcfg, obd);
return(rc);
}
+static int osc_process_config(struct obd_device *obd, obd_count len, void *buf)
+{
+ return osc_process_config_base(obd, buf);
+}
+
struct obd_ops osc_obd_ops = {
.o_owner = THIS_MODULE,
.o_setup = osc_setup,
.o_unpackmd = osc_unpackmd,
.o_precreate = osc_precreate,
.o_create = osc_create,
+ .o_create_async = osc_create_async,
.o_destroy = osc_destroy,
.o_getattr = osc_getattr,
.o_getattr_async = osc_getattr_async,
.o_setattr = osc_setattr,
.o_setattr_async = osc_setattr_async,
.o_brw = osc_brw,
- .o_brw_async = osc_brw_async,
- .o_prep_async_page = osc_prep_async_page,
- .o_queue_async_io = osc_queue_async_io,
- .o_set_async_flags = osc_set_async_flags,
- .o_queue_group_io = osc_queue_group_io,
- .o_trigger_group_io = osc_trigger_group_io,
- .o_teardown_async_page = osc_teardown_async_page,
.o_punch = osc_punch,
.o_sync = osc_sync,
.o_enqueue = osc_enqueue,
- .o_match = osc_match,
.o_change_cbdata = osc_change_cbdata,
.o_cancel = osc_cancel,
.o_cancel_unused = osc_cancel_unused,
- .o_join_lru = osc_join_lru,
.o_iocontrol = osc_iocontrol,
.o_get_info = osc_get_info,
.o_set_info_async = osc_set_info_async,
.o_llog_finish = osc_llog_finish,
.o_process_config = osc_process_config,
};
+
+extern struct lu_kmem_descr osc_caches[];
+extern spinlock_t osc_ast_guard;
+extern struct lock_class_key osc_ast_guard_class;
+
int __init osc_init(void)
{
- struct lprocfs_static_vars lvars;
+ struct lprocfs_static_vars lvars = { 0 };
int rc;
ENTRY;
- lprocfs_init_vars(osc, &lvars);
+ /* print an address of _any_ initialized kernel symbol from this
+ * module, to allow debugging with gdb that doesn't support data
+ * symbols from modules.*/
+ CDEBUG(D_CONSOLE, "Lustre OSC module (%p).\n", &osc_caches);
+
+ rc = lu_kmem_init(osc_caches);
+
+ lprocfs_osc_init_vars(&lvars);
request_module("lquota");
quota_interface = PORTAL_SYMBOL_GET(osc_quota_interface);
init_obd_quota_ops(quota_interface, &osc_obd_ops);
rc = class_register_type(&osc_obd_ops, NULL, lvars.module_vars,
- LUSTRE_OSC_NAME, NULL);
+ LUSTRE_OSC_NAME, &osc_device_type);
if (rc) {
if (quota_interface)
PORTAL_SYMBOL_PUT(osc_quota_interface);
+ lu_kmem_fini(osc_caches);
RETURN(rc);
}
+ spin_lock_init(&osc_ast_guard);
+ lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
+
RETURN(rc);
}
#ifdef __KERNEL__
static void /*__exit*/ osc_exit(void)
{
+ lu_device_type_fini(&osc_device_type);
+
lquota_exit(quota_interface);
if (quota_interface)
PORTAL_SYMBOL_PUT(osc_quota_interface);
class_unregister_type(LUSTRE_OSC_NAME);
+ lu_kmem_fini(osc_caches);
}
-MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
MODULE_LICENSE("GPL");