/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * Copyright (C) 2001-2003 Cluster File Systems, Inc.
- * Author Peter Braam <braam@clusterfs.com>
+ * GPL HEADER START
*
- * This file is part of the Lustre file system, http://www.lustre.org
- * Lustre is a trademark of Cluster File Systems, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * You may have signed or agreed to another license before downloading
- * this software. If so, you are bound by the terms and conditions
- * of that agreement, and the following does not apply to you. See the
- * LICENSE file included with this distribution for more information.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * If you did not agree to a different license, then this copy of Lustre
- * is open source software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * In either case, Lustre is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * license text for more details.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
- * For testing and management it is treated as an obd_device,
- * although * it does not export a full OBD method table (the
- * requests are coming * in over the wire, so object target modules
- * do not have a full * method table.)
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
*
+ * GPL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
*/
#ifndef EXPORT_SYMTAB
#endif
#define DEBUG_SUBSYSTEM S_OSC
-#ifdef __KERNEL__
-# include <libcfs/libcfs.h>
-#else /* __KERNEL__ */
+#include <libcfs/libcfs.h>
+
+#ifndef __KERNEL__
# include <liblustre.h>
#endif
#include <lustre_dlm.h>
-#include <libcfs/kp30.h>
#include <lustre_net.h>
#include <lustre/lustre_user.h>
+#include <obd_cksum.h>
#include <obd_ost.h>
#include <obd_lov.h>
#include <lustre_log.h>
#include <lustre_debug.h>
#include <lustre_param.h>
+#include <lustre_cache.h>
#include "osc_internal.h"
static quota_interface_t *quota_interface = NULL;
extern quota_interface_t osc_quota_interface;
static void osc_release_ppga(struct brw_page **ppga, obd_count count);
+static int brw_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req, void *data, int rc);
+int osc_cleanup(struct obd_device *obd);
/* Pack OSC object metadata for disk storage (LE byte order). */
static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
;
}
-static int osc_getattr_interpret(struct ptlrpc_request *req,
+static int osc_getattr_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
struct osc_async_args *aa, int rc)
{
struct ost_body *body;
osc_pack_req_body(req, oinfo);
ptlrpc_request_set_replen(req);
- req->rq_interpret_reply = osc_getattr_interpret;
+ req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret;
CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = (struct osc_async_args *)&req->rq_async_args;
+ aa = ptlrpc_req_async_args(req);
aa->aa_oi = oinfo;
ptlrpc_set_add_req(set, req);
osc_pack_req_body(req, oinfo);
ptlrpc_request_set_replen(req);
-
+
rc = ptlrpc_queue_wait(req);
if (rc)
GOTO(out, rc);
osc_pack_req_body(req, oinfo);
ptlrpc_request_set_replen(req);
-
rc = ptlrpc_queue_wait(req);
if (rc)
RETURN(rc);
}
-static int osc_setattr_interpret(struct ptlrpc_request *req,
+static int osc_setattr_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
struct osc_async_args *aa, int rc)
{
struct ost_body *body;
osc_pack_req_body(req, oinfo);
ptlrpc_request_set_replen(req);
-
+
if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) {
LASSERT(oti);
- *obdo_logcookie(oinfo->oi_oa) = *oti->oti_logcookies;
+ oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
}
- /* do mds to ost setattr asynchronouly */
+ /* do mds to ost setattr asynchronously */
if (!rqset) {
/* Do not wait for response. */
ptlrpcd_add_req(req);
} else {
- req->rq_interpret_reply = osc_setattr_interpret;
+ req->rq_interpret_reply =
+ (ptlrpc_interpterer_t)osc_setattr_interpret;
CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = (struct osc_async_args *)&req->rq_async_args;
+ aa = ptlrpc_req_async_args(req);
aa->aa_oi = oinfo;
ptlrpc_set_add_req(rqset, req);
ptlrpc_request_set_replen(req);
- if (oa->o_valid & OBD_MD_FLINLINE) {
- LASSERT((oa->o_valid & OBD_MD_FLFLAGS) &&
- oa->o_flags == OBD_FL_DELORPHAN);
+ if ((oa->o_valid & OBD_MD_FLFLAGS) &&
+ oa->o_flags == OBD_FL_DELORPHAN) {
DEBUG_REQ(D_HA, req,
"delorphan from OST integration");
/* Don't resend the delorphan req */
if (oa->o_valid & OBD_MD_FLCOOKIE) {
if (!oti->oti_logcookies)
oti_alloc_cookies(oti, 1);
- *oti->oti_logcookies = *obdo_logcookie(oa);
+ *oti->oti_logcookies = oa->o_lcookie;
}
}
RETURN(rc);
}
-static int osc_punch_interpret(struct ptlrpc_request *req,
+static int osc_punch_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
struct osc_async_args *aa, int rc)
{
struct ost_body *body;
RETURN(rc);
}
req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
+ ptlrpc_at_set_req_timeout(req);
osc_pack_req_body(req, oinfo);
/* overload the size and blocks fields in the oa with start/end */
ptlrpc_request_set_replen(req);
- req->rq_interpret_reply = osc_punch_interpret;
+ req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_punch_interpret;
CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = (struct osc_async_args *)&req->rq_async_args;
+ aa = ptlrpc_req_async_args(req);
aa->aa_oi = oinfo;
ptlrpc_set_add_req(rqset, req);
int lock_flags)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
- struct ldlm_res_id res_id = { .name = { oa->o_id, 0, oa->o_gr, 0 } };
- struct ldlm_resource *res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
+ struct ldlm_res_id res_id;
+ struct ldlm_resource *res;
int count;
ENTRY;
+ osc_build_res_name(oa->o_id, oa->o_gr, &res_id);
+ res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
if (res == NULL)
RETURN(0);
+ LDLM_RESOURCE_ADDREF(res);
count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
lock_flags, 0, NULL);
+ LDLM_RESOURCE_DELREF(res);
ldlm_resource_putref(res);
RETURN(count);
}
-static int osc_destroy_interpret(struct ptlrpc_request *req, void *data,
+static int osc_destroy_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req, void *data,
int rc)
{
struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
RETURN(-ENOMEM);
}
- rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
+ rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
0, &cancels, count);
if (rc) {
ptlrpc_request_free(req);
req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
req->rq_interpret_reply = osc_destroy_interpret;
+ ptlrpc_at_set_req_timeout(req);
if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE)
- memcpy(obdo_logcookie(oa), oti->oti_logcookies,
- sizeof(*oti->oti_logcookies));
+ oa->o_lcookie = *oti->oti_logcookies;
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
LASSERT(body);
body->oa = *oa;
CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
if (body->oa.o_valid & OBD_MD_FLGRANT)
cli->cl_avail_grant += body->oa.o_grant;
- /* waiters are woken in brw_interpret_oap */
+ /* waiters are woken in brw_interpret */
client_obd_list_unlock(&cli->cl_loi_list_lock);
}
if (req->rq_bulk->bd_nob_transferred != requested_nob) {
CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
- requested_nob, req->rq_bulk->bd_nob_transferred);
+ req->rq_bulk->bd_nob_transferred, requested_nob);
return(-EPROTO);
}
static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
struct lov_stripe_md *lsm, obd_count page_count,
- struct brw_page **pga,
+ struct brw_page **pga,
struct ptlrpc_request **reqp,
struct obd_capa *ocapa)
{
int niocount, i, requested_nob, opc, rc;
struct osc_brw_async_args *aa;
struct req_capsule *pill;
+ struct brw_page *pg_prev;
ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
if ((cmd & OBD_BRW_WRITE) != 0) {
opc = OST_WRITE;
- req = ptlrpc_request_alloc_pool(cli->cl_import,
+ req = ptlrpc_request_alloc_pool(cli->cl_import,
cli->cl_import->imp_rq_pool,
&RQF_OST_BRW);
} else {
RETURN(rc);
}
req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
+ ptlrpc_at_set_req_timeout(req);
if (opc == OST_WRITE)
desc = ptlrpc_prep_bulk_imp(req, page_count,
ioobj->ioo_bufcnt = niocount;
osc_pack_capa(req, body, ocapa);
LASSERT (page_count > 0);
+ pg_prev = pga[0];
for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
struct brw_page *pg = pga[i];
- struct brw_page *pg_prev = pga[i - 1];
LASSERT(pg->count > 0);
LASSERTF((pg->off & ~CFS_PAGE_MASK) + pg->count <= CFS_PAGE_SIZE,
niobuf->len = pg->count;
niobuf->flags = pg->flag;
}
+ pg_prev = pg;
}
- LASSERT((void *)(niobuf - niocount) ==
+ LASSERTF((void *)(niobuf - niocount) ==
lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF + 2,
- niocount * sizeof(*niobuf)));
+ niocount * sizeof(*niobuf)),
+ "want %p - real %p\n", lustre_msg_buf(req->rq_reqmsg,
+ REQ_REC_OFF + 2, niocount * sizeof(*niobuf)),
+ (void *)(niobuf - niocount));
+
osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
/* size[REQ_REC_OFF] still sizeof (*body) */
ptlrpc_request_set_replen(req);
CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = (struct osc_brw_async_args *)&req->rq_async_args;
+ aa = ptlrpc_req_async_args(req);
aa->aa_oa = oa;
aa->aa_requested_nob = requested_nob;
aa->aa_nio_count = niocount;
aa->aa_resends = 0;
aa->aa_ppga = pga;
aa->aa_cli = cli;
- INIT_LIST_HEAD(&aa->aa_oaps);
+ CFS_INIT_LIST_HEAD(&aa->aa_oaps);
*reqp = req;
RETURN(0);
"["LPU64"-"LPU64"]\n",
msg, libcfs_nid2str(peer->nid),
oa->o_valid & OBD_MD_FLFID ? oa->o_fid : (__u64)0,
- oa->o_valid & OBD_MD_FLFID ? oa->o_generation :
+ oa->o_valid & OBD_MD_FLFID ? oa->o_generation :
(__u64)0,
oa->o_id,
oa->o_valid & OBD_MD_FLGROUP ? oa->o_gr : (__u64)0,
CERROR("original client csum %x (type %x), server csum %x (type %x), "
"client csum now %x\n", client_cksum, client_cksum_type,
server_cksum, cksum_type, new_cksum);
- return 1;
+ return 1;
}
/* Note rc enters this function as number of bytes transferred */
if (server_cksum == ~0 && rc > 0) {
CERROR("Protocol error: server %s set the 'checksum' "
"bit, but didn't send a checksum. Not fatal, "
- "but please tell CFS.\n",
+ "but please notify on http://bugzilla.lustre.org/\n",
libcfs_nid2str(peer->nid));
} else if (server_cksum != client_cksum) {
LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
goto restart_bulk;
}
-
+
RETURN (rc);
}
OST_WRITE ? OBD_BRW_WRITE :OBD_BRW_READ,
aa->aa_cli, aa->aa_oa,
NULL /* lsm unused by osc currently */,
- aa->aa_page_count, aa->aa_ppga,
+ aa->aa_page_count, aa->aa_ppga,
&new_req, NULL /* ocapa */);
if (rc)
RETURN(rc);
aa->aa_resends++;
new_req->rq_interpret_reply = request->rq_interpret_reply;
new_req->rq_async_args = request->rq_async_args;
- new_req->rq_sent = CURRENT_SECONDS + aa->aa_resends;
+ new_req->rq_sent = cfs_time_current_sec() + aa->aa_resends;
- new_aa = (struct osc_brw_async_args *)&new_req->rq_async_args;
+ new_aa = ptlrpc_req_async_args(new_req);
- INIT_LIST_HEAD(&new_aa->aa_oaps);
+ CFS_INIT_LIST_HEAD(&new_aa->aa_oaps);
list_splice(&aa->aa_oaps, &new_aa->aa_oaps);
- INIT_LIST_HEAD(&aa->aa_oaps);
+ CFS_INIT_LIST_HEAD(&aa->aa_oaps);
list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
if (oap->oap_request) {
}
}
- /* use ptlrpc_set_add_req is safe because interpret functions work
- * in check_set context. only one way exist with access to request
- * from different thread got -EINTR - this way protected with
+ /* use ptlrpc_set_add_req is safe because interpret functions work
+ * in check_set context. only one way exist with access to request
+ * from different thread got -EINTR - this way protected with
* cl_loi_list_lock */
ptlrpc_set_add_req(set, new_req);
RETURN(0);
}
-static int brw_interpret(struct ptlrpc_request *req, void *data, int rc)
-{
- struct osc_brw_async_args *aa = data;
- int i;
- ENTRY;
-
- rc = osc_brw_fini_request(req, rc);
- if (osc_recoverable_error(rc)) {
- rc = osc_brw_redo_request(req, aa);
- if (rc == 0)
- RETURN(0);
- }
-
- client_obd_list_lock(&aa->aa_cli->cl_loi_list_lock);
- if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
- aa->aa_cli->cl_w_in_flight--;
- else
- aa->aa_cli->cl_r_in_flight--;
- for (i = 0; i < aa->aa_page_count; i++)
- osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1);
- client_obd_list_unlock(&aa->aa_cli->cl_loi_list_lock);
-
- osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
-
- RETURN(rc);
-}
-
static int async_internal(int cmd, struct obd_export *exp, struct obdo *oa,
struct lov_stripe_md *lsm, obd_count page_count,
struct brw_page **pga, struct ptlrpc_request_set *set,
rc = osc_brw_prep_request(cmd, cli, oa, lsm, page_count, pga,
&req, ocapa);
- aa = (struct osc_brw_async_args *)&req->rq_async_args;
+ aa = ptlrpc_req_async_args(req);
if (cmd == OBD_BRW_READ) {
lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
- ptlrpc_lprocfs_brw(req, OST_READ, aa->aa_requested_nob);
} else {
- lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
+ lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
lprocfs_oh_tally(&cli->cl_write_rpc_hist,
cli->cl_w_in_flight);
- ptlrpc_lprocfs_brw(req, OST_WRITE, aa->aa_requested_nob);
}
+ ptlrpc_lprocfs_brw(req, aa->aa_requested_nob);
+ LASSERT(list_empty(&aa->aa_oaps));
if (rc == 0) {
req->rq_interpret_reply = brw_interpret;
ptlrpc_set_add_req(set, req);
else
cli->cl_w_in_flight++;
client_obd_list_unlock(&cli->cl_loi_list_lock);
+ OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DIO_PAUSE, 3);
} else if (cmd == OBD_BRW_WRITE) {
client_obd_list_lock(&cli->cl_loi_list_lock);
for (i = 0; i < page_count; i++)
osc_release_write_grant(cli, pga[i], 0);
+ osc_wake_cache_waiters(cli);
client_obd_list_unlock(&cli->cl_loi_list_lock);
}
RETURN (rc);
EXIT;
}
-static int brw_interpret_oap(struct ptlrpc_request *req, void *data, int rc)
+static int brw_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req, void *data, int rc)
{
- struct osc_async_page *oap, *tmp;
struct osc_brw_async_args *aa = data;
struct client_obd *cli;
ENTRY;
else
cli->cl_r_in_flight--;
- /* the caller may re-use the oap after the completion call so
- * we need to clean it up a little */
- list_for_each_entry_safe(oap, tmp, &aa->aa_oaps, oap_rpc_item) {
- list_del_init(&oap->oap_rpc_item);
- osc_ap_completion(cli, aa->aa_oa, oap, 1, rc);
+ if (!list_empty(&aa->aa_oaps)) { /* from osc_send_oap_rpc() */
+ struct osc_async_page *oap, *tmp;
+ /* the caller may re-use the oap after the completion call so
+ * we need to clean it up a little */
+ list_for_each_entry_safe(oap, tmp, &aa->aa_oaps, oap_rpc_item) {
+ list_del_init(&oap->oap_rpc_item);
+ osc_ap_completion(cli, aa->aa_oa, oap, 1, rc);
+ }
+ OBDO_FREE(aa->aa_oa);
+ } else { /* from async_internal() */
+ int i;
+ for (i = 0; i < aa->aa_page_count; i++)
+ osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1);
}
-
osc_wake_cache_waiters(cli);
osc_check_rpcs(cli);
-
client_obd_list_unlock(&cli->cl_loi_list_lock);
- OBDO_FREE(aa->aa_oa);
-
osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
RETURN(rc);
}
void *caller_data = NULL;
struct obd_capa *ocapa;
struct osc_async_page *oap;
+ struct ldlm_lock *lock = NULL;
int i, rc;
ENTRY;
if (ops == NULL) {
ops = oap->oap_caller_ops;
caller_data = oap->oap_caller_data;
+ lock = oap->oap_ldlm_lock;
}
pga[i] = &oap->oap_brw_page;
pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
LASSERT(ops != NULL);
ops->ap_fill_obdo(caller_data, cmd, oa);
ocapa = ops->ap_lookup_capa(caller_data, cmd);
+ if (lock) {
+ oa->o_handle = lock->l_remote_handle;
+ oa->o_valid |= OBD_MD_FLHANDLE;
+ }
sort_brw_pages(pga, page_count);
rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME);
CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = (struct osc_brw_async_args *)&req->rq_async_args;
- INIT_LIST_HEAD(&aa->aa_oaps);
+ aa = ptlrpc_req_async_args(req);
+ CFS_INIT_LIST_HEAD(&aa->aa_oaps);
list_splice(rpc_list, &aa->aa_oaps);
- INIT_LIST_HEAD(rpc_list);
+ CFS_INIT_LIST_HEAD(rpc_list);
out:
if (IS_ERR(req)) {
/* the loi lock is held across this function but it's allowed to release
* and reacquire it during its work */
+/**
+ * prepare pages for ASYNC io and put pages in send queue.
+ *
+ * \param cli -
+ * \param loi -
+ * \param cmd - OBD_BRW_* macroses
+ * \param lop - pending pages
+ *
+ * \return zero if pages successfully add to send queue.
+ * \return not zere if error occurring.
+ */
static int osc_send_oap_rpc(struct client_obd *cli, struct lov_oinfo *loi,
int cmd, struct loi_oap_pages *lop)
{
CFS_LIST_HEAD(rpc_list);
unsigned int ending_offset;
unsigned starting_offset = 0;
+ int srvlock = 0;
ENTRY;
/* first we find the pages we're allowed to work with */
LASSERT(oap->oap_magic == OAP_MAGIC);
+ if (page_count != 0 &&
+ srvlock != !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK)) {
+ CDEBUG(D_PAGE, "SRVLOCK flag mismatch,"
+ " oap %p, page %p, srvlock %u\n",
+ oap, oap->oap_brw_page.pg, (unsigned)!srvlock);
+ break;
+ }
/* in llite being 'ready' equates to the page being locked
* until completion unlocks it. commit_write submits a page
* as not ready because its unlock will happen unconditionally
/*
* Page submitted for IO has to be locked. Either by
* ->ap_make_ready() or by higher layers.
- *
- * XXX nikita: this assertion should be adjusted when lustre
- * starts using PG_writeback for pages being written out.
*/
#if defined(__KERNEL__) && defined(__linux__)
- LASSERT(PageLocked(oap->oap_page));
+ if(!(PageLocked(oap->oap_page) &&
+ (CheckWriteback(oap->oap_page, cmd) || oap->oap_oig !=NULL))) {
+ CDEBUG(D_PAGE, "page %p lost wb %lx/%x\n",
+ oap->oap_page, (long)oap->oap_page->flags, oap->oap_async_flags);
+ LBUG();
+ }
#endif
/* If there is a gap at the start of this page, it can't merge
* with any previous page, so we'll hand the network a
/* now put the page back in our accounting */
list_add_tail(&oap->oap_rpc_item, &rpc_list);
+ if (page_count == 0)
+ srvlock = !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK);
if (++page_count >= cli->cl_max_pages_per_rpc)
break;
RETURN(PTR_ERR(req));
}
- aa = (struct osc_brw_async_args *)&req->rq_async_args;
+ aa = ptlrpc_req_async_args(req);
if (cmd == OBD_BRW_READ) {
lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
(starting_offset >> CFS_PAGE_SHIFT) + 1);
- ptlrpc_lprocfs_brw(req, OST_READ, aa->aa_requested_nob);
} else {
lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
lprocfs_oh_tally(&cli->cl_write_rpc_hist,
cli->cl_w_in_flight);
lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
(starting_offset >> CFS_PAGE_SHIFT) + 1);
- ptlrpc_lprocfs_brw(req, OST_WRITE, aa->aa_requested_nob);
}
+ ptlrpc_lprocfs_brw(req, aa->aa_requested_nob);
client_obd_list_lock(&cli->cl_loi_list_lock);
DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
page_count, aa, cli->cl_r_in_flight, cli->cl_w_in_flight);
- req->rq_interpret_reply = brw_interpret_oap;
+ req->rq_interpret_reply = brw_interpret;
ptlrpcd_add_req(req);
RETURN(1);
}
RETURN(-EDQUOT);
}
+/**
+ * Checks if requested extent lock is compatible with a lock under the page.
+ *
+ * Checks if the lock under \a page is compatible with a read or write lock
+ * (specified by \a rw) for an extent [\a start , \a end].
+ *
+ * \param exp osc export
+ * \param lsm striping information for the file
+ * \param res osc_async_page placeholder
+ * \param rw OBD_BRW_READ if requested for reading,
+ * OBD_BRW_WRITE if requested for writing
+ * \param start start of the requested extent
+ * \param end end of the requested extent
+ * \param cookie transparent parameter for passing locking context
+ *
+ * \post result == 1, *cookie == context, appropriate lock is referenced or
+ * \post result == 0
+ *
+ * \retval 1 owned lock is reused for the request
+ * \retval 0 no lock reused for the request
+ *
+ * \see osc_release_short_lock
+ */
+static int osc_reget_short_lock(struct obd_export *exp,
+ struct lov_stripe_md *lsm,
+ void **res, int rw,
+ obd_off start, obd_off end,
+ void **cookie)
+{
+ struct osc_async_page *oap = *res;
+ int rc;
+
+ ENTRY;
+
+ spin_lock(&oap->oap_lock);
+ rc = ldlm_lock_fast_match(oap->oap_ldlm_lock, rw,
+ start, end, cookie);
+ spin_unlock(&oap->oap_lock);
+
+ RETURN(rc);
+}
+
+/**
+ * Releases a reference to a lock taken in a "fast" way.
+ *
+ * Releases a read or a write (specified by \a rw) lock
+ * referenced by \a cookie.
+ *
+ * \param exp osc export
+ * \param lsm striping information for the file
+ * \param end end of the locked extent
+ * \param rw OBD_BRW_READ if requested for reading,
+ * OBD_BRW_WRITE if requested for writing
+ * \param cookie transparent parameter for passing locking context
+ *
+ * \post appropriate lock is dereferenced
+ *
+ * \see osc_reget_short_lock
+ */
+static int osc_release_short_lock(struct obd_export *exp,
+ struct lov_stripe_md *lsm, obd_off end,
+ void *cookie, int rw)
+{
+ ENTRY;
+ ldlm_lock_fast_release(cookie, rw);
+ /* no error could have happened at this layer */
+ RETURN(0);
+}
+
int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
struct lov_oinfo *loi, cfs_page_t *page,
obd_off offset, struct obd_async_page_ops *ops,
- void *data, void **res)
+ void *data, void **res, int nocache,
+ struct lustre_handle *lockh)
{
struct osc_async_page *oap;
+ struct ldlm_res_id oid;
+ int rc = 0;
ENTRY;
if (!page)
CFS_INIT_LIST_HEAD(&oap->oap_pending_item);
CFS_INIT_LIST_HEAD(&oap->oap_urgent_item);
CFS_INIT_LIST_HEAD(&oap->oap_rpc_item);
+ CFS_INIT_LIST_HEAD(&oap->oap_page_list);
oap->oap_occ.occ_interrupted = osc_occ_interrupted;
+ spin_lock_init(&oap->oap_lock);
+
+ /* If the page was marked as notcacheable - don't add to any locks */
+ if (!nocache) {
+ osc_build_res_name(loi->loi_id, loi->loi_gr, &oid);
+ /* This is the only place where we can call cache_add_extent
+ without oap_lock, because this page is locked now, and
+ the lock we are adding it to is referenced, so cannot lose
+ any pages either. */
+ rc = cache_add_extent(oap->oap_cli->cl_cache, &oid, oap, lockh);
+ if (rc)
+ RETURN(rc);
+ }
+
CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset);
RETURN(0);
}
lop_update_pending(cli, lop, oap->oap_cmd, -1);
}
loi_list_maint(cli, loi);
+ cache_remove_extent(cli->cl_cache, oap);
LOI_DEBUG(loi, "oap %p page %p torn down\n", oap, oap->oap_page);
out:
RETURN(rc);
}
+int osc_extent_blocking_cb(struct ldlm_lock *lock,
+ struct ldlm_lock_desc *new, void *data,
+ int flag)
+{
+ struct lustre_handle lockh = { 0 };
+ int rc;
+ ENTRY;
+
+ if ((unsigned long)data > 0 && (unsigned long)data < 0x1000) {
+ LDLM_ERROR(lock, "cancelling lock with bad data %p", data);
+ LBUG();
+ }
+
+ switch (flag) {
+ case LDLM_CB_BLOCKING:
+ ldlm_lock2handle(lock, &lockh);
+ rc = ldlm_cli_cancel(&lockh);
+ if (rc != ELDLM_OK)
+ CERROR("ldlm_cli_cancel failed: %d\n", rc);
+ break;
+ case LDLM_CB_CANCELING: {
+
+ ldlm_lock2handle(lock, &lockh);
+ /* This lock wasn't granted, don't try to do anything */
+ if (lock->l_req_mode != lock->l_granted_mode)
+ RETURN(0);
+
+ cache_remove_lock(lock->l_conn_export->exp_obd->u.cli.cl_cache,
+ &lockh);
+
+ if (lock->l_conn_export->exp_obd->u.cli.cl_ext_lock_cancel_cb)
+ lock->l_conn_export->exp_obd->u.cli.cl_ext_lock_cancel_cb(
+ lock, new, data,flag);
+ break;
+ }
+ default:
+ LBUG();
+ }
+
+ RETURN(0);
+}
+EXPORT_SYMBOL(osc_extent_blocking_cb);
+
static void osc_set_data_with_check(struct lustre_handle *lockh, void *data,
int flags)
{
}
#endif
lock->l_ast_data = data;
- lock->l_flags |= (flags & LDLM_FL_NO_LRU);
unlock_res_and_lock(lock);
LDLM_LOCK_PUT(lock);
}
static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
ldlm_iterator_t replace, void *data)
{
- struct ldlm_res_id res_id = { .name = {0} };
+ struct ldlm_res_id res_id;
struct obd_device *obd = class_exp2obd(exp);
- res_id.name[0] = lsm->lsm_object_id;
- res_id.name[2] = lsm->lsm_object_gr;
-
+ osc_build_res_name(lsm->lsm_object_id, lsm->lsm_object_gr, &res_id);
ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
return 0;
}
-static int osc_enqueue_fini(struct ptlrpc_request *req, struct obd_info *oinfo,
- int intent, int rc)
+static int osc_enqueue_fini(struct obd_device *obd, struct ptlrpc_request *req,
+ struct obd_info *oinfo, int intent, int rc)
{
ENTRY;
oinfo->oi_md->lsm_oinfo[0]->loi_lvb.lvb_mtime);
}
+ if (!rc)
+ cache_add_lock(obd->u.cli.cl_cache, oinfo->oi_lockh);
+
/* Call the update callback. */
rc = oinfo->oi_cb_up(oinfo, rc);
RETURN(rc);
}
-static int osc_enqueue_interpret(struct ptlrpc_request *req,
+static int osc_enqueue_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
struct osc_enqueue_args *aa, int rc)
{
int intent = aa->oa_oi->oi_flags & LDLM_FL_HAS_INTENT;
aa->oa_oi->oi_lockh, rc);
/* Complete osc stuff. */
- rc = osc_enqueue_fini(req, aa->oa_oi, intent, rc);
+ rc = osc_enqueue_fini(aa->oa_exp->exp_obd, req, aa->oa_oi, intent, rc);
/* Release the lock for async request. */
if (lustre_handle_is_used(aa->oa_oi->oi_lockh) && rc == ELDLM_OK)
struct ldlm_enqueue_info *einfo,
struct ptlrpc_request_set *rqset)
{
- struct ldlm_res_id res_id = { .name = {0} };
+ struct ldlm_res_id res_id;
struct obd_device *obd = exp->exp_obd;
struct ptlrpc_request *req = NULL;
int intent = oinfo->oi_flags & LDLM_FL_HAS_INTENT;
int rc;
ENTRY;
- res_id.name[0] = oinfo->oi_md->lsm_object_id;
- res_id.name[2] = oinfo->oi_md->lsm_object_gr;
+ osc_build_res_name(oinfo->oi_md->lsm_object_id,
+ oinfo->oi_md->lsm_object_gr, &res_id);
/* Filesystem lock extents are extended to page boundaries so that
* dealing with the page cache is a little smoother. */
oinfo->oi_policy.l_extent.start -=
if (!rc) {
struct osc_enqueue_args *aa;
CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = (struct osc_enqueue_args *)&req->rq_async_args;
+ aa = ptlrpc_req_async_args(req);
aa->oa_oi = oinfo;
aa->oa_ei = einfo;
aa->oa_exp = exp;
- req->rq_interpret_reply = osc_enqueue_interpret;
+ req->rq_interpret_reply =
+ (ptlrpc_interpterer_t)osc_enqueue_interpret;
ptlrpc_set_add_req(rqset, req);
} else if (intent) {
ptlrpc_req_finished(req);
RETURN(rc);
}
- rc = osc_enqueue_fini(req, oinfo, intent, rc);
+ rc = osc_enqueue_fini(obd, req, oinfo, intent, rc);
if (intent)
ptlrpc_req_finished(req);
__u32 type, ldlm_policy_data_t *policy, __u32 mode,
int *flags, void *data, struct lustre_handle *lockh)
{
- struct ldlm_res_id res_id = { .name = {0} };
+ struct ldlm_res_id res_id;
struct obd_device *obd = exp->exp_obd;
int lflags = *flags;
ldlm_mode_t rc;
ENTRY;
- res_id.name[0] = lsm->lsm_object_id;
- res_id.name[2] = lsm->lsm_object_gr;
+ osc_build_res_name(lsm->lsm_object_id, lsm->lsm_object_gr, &res_id);
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
RETURN(-EIO);
void *opaque)
{
struct obd_device *obd = class_exp2obd(exp);
- struct ldlm_res_id res_id = { .name = {0} }, *resp = NULL;
+ struct ldlm_res_id res_id, *resp = NULL;
if (lsm != NULL) {
- res_id.name[0] = lsm->lsm_object_id;
- res_id.name[2] = lsm->lsm_object_gr;
- resp = &res_id;
+ resp = osc_build_res_name(lsm->lsm_object_id,
+ lsm->lsm_object_gr, &res_id);
}
return ldlm_cli_cancel_unused(obd->obd_namespace, resp, flags, opaque);
}
-static int osc_join_lru(struct obd_export *exp,
- struct lov_stripe_md *lsm, int join)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct ldlm_res_id res_id = { .name = {0} }, *resp = NULL;
-
- if (lsm != NULL) {
- res_id.name[0] = lsm->lsm_object_id;
- res_id.name[2] = lsm->lsm_object_gr;
- resp = &res_id;
- }
-
- return ldlm_cli_join_lru(obd->obd_namespace, resp, join);
-}
-
-static int osc_statfs_interpret(struct ptlrpc_request *req,
+static int osc_statfs_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
struct osc_async_args *aa, int rc)
{
struct obd_statfs *msfs;
RETURN(rc);
}
ptlrpc_request_set_replen(req);
- req->rq_request_portal = OST_CREATE_PORTAL; //XXX FIXME bug 249
+ req->rq_request_portal = OST_CREATE_PORTAL;
+ ptlrpc_at_set_req_timeout(req);
+
if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
/* procfs requests not want stat in wait for avoid deadlock */
req->rq_no_resend = 1;
req->rq_no_delay = 1;
}
- req->rq_interpret_reply = osc_statfs_interpret;
+ req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = (struct osc_async_args *)&req->rq_async_args;
+ aa = ptlrpc_req_async_args(req);
aa->aa_oi = oinfo;
ptlrpc_set_add_req(rqset, req);
{
struct obd_statfs *msfs;
struct ptlrpc_request *req;
+ struct obd_import *imp = NULL;
int rc;
ENTRY;
+ /*Since the request might also come from lprocfs, so we need
+ *sync this with client_disconnect_export Bug15684*/
+ down_read(&obd->u.cli.cl_sem);
+ if (obd->u.cli.cl_import)
+ imp = class_import_get(obd->u.cli.cl_import);
+ up_read(&obd->u.cli.cl_sem);
+ if (!imp)
+ RETURN(-ENODEV);
+
/* We could possibly pass max_age in the request (as an absolute
* timestamp or a "seconds.usec ago") so the target can avoid doing
* extra calls into the filesystem if that isn't necessary (e.g.
* during mount that would help a bit). Having relative timestamps
* is not so great if request processing is slow, while absolute
* timestamps are not ideal because they need time synchronization. */
- req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
+ req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
+
+ class_import_put(imp);
+
if (req == NULL)
RETURN(-ENOMEM);
RETURN(rc);
}
ptlrpc_request_set_replen(req);
- req->rq_request_portal = OST_CREATE_PORTAL; //XXX FIXME bug 249
+ req->rq_request_portal = OST_CREATE_PORTAL;
+ ptlrpc_at_set_req_timeout(req);
if (flags & OBD_STATFS_NODELAY) {
/* procfs requests not want stat in wait for avoid deadlock */
*/
static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
{
- struct lov_user_md lum, *lumk;
+ /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
+ struct lov_user_md_v3 lum, *lumk;
+ struct lov_user_ost_data_v1 *lmm_objects;
int rc = 0, lum_size;
ENTRY;
if (!lsm)
RETURN(-ENODATA);
- if (copy_from_user(&lum, lump, sizeof(lum)))
+ /* we only need the header part from user space to get lmm_magic and
+ * lmm_stripe_count, (the header part is common to v1 and v3) */
+ lum_size = sizeof(struct lov_user_md_v1);
+ if (copy_from_user(&lum, lump, lum_size))
RETURN(-EFAULT);
- if (lum.lmm_magic != LOV_USER_MAGIC)
+ if ((lum.lmm_magic != LOV_USER_MAGIC_V1) &&
+ (lum.lmm_magic != LOV_USER_MAGIC_V3))
RETURN(-EINVAL);
+ /* lov_user_md_vX and lov_mds_md_vX must have the same size */
+ LASSERT(sizeof(struct lov_user_md_v1) == sizeof(struct lov_mds_md_v1));
+ LASSERT(sizeof(struct lov_user_md_v3) == sizeof(struct lov_mds_md_v3));
+ LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lumk->lmm_objects[0]));
+
+ /* we can use lov_mds_md_size() to compute lum_size
+ * because lov_user_md_vX and lov_mds_md_vX have the same size */
if (lum.lmm_stripe_count > 0) {
- lum_size = sizeof(lum) + sizeof(lum.lmm_objects[0]);
+ lum_size = lov_mds_md_size(lum.lmm_stripe_count, lum.lmm_magic);
OBD_ALLOC(lumk, lum_size);
if (!lumk)
RETURN(-ENOMEM);
- lumk->lmm_objects[0].l_object_id = lsm->lsm_object_id;
- lumk->lmm_objects[0].l_object_gr = lsm->lsm_object_gr;
+ if (lum.lmm_magic == LOV_USER_MAGIC_V1)
+ lmm_objects = &(((struct lov_user_md_v1 *)lumk)->lmm_objects[0]);
+ else
+ lmm_objects = &(lumk->lmm_objects[0]);
+ lmm_objects->l_object_id = lsm->lsm_object_id;
} else {
- lum_size = sizeof(lum);
+ lum_size = lov_mds_md_size(0, lum.lmm_magic);
lumk = &lum;
}
}
static int osc_get_info(struct obd_export *exp, obd_count keylen,
- void *key, __u32 *vallen, void *val)
+ void *key, __u32 *vallen, void *val,
+ struct lov_stripe_md *lsm)
{
ENTRY;
if (!vallen || !val)
out:
ptlrpc_req_finished(req);
RETURN(rc);
+ } else if (KEY_IS(KEY_FIEMAP)) {
+ struct ptlrpc_request *req;
+ struct ll_user_fiemap *reply;
+ char *tmp;
+ int rc;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_OST_GET_INFO_FIEMAP);
+ if (req == NULL)
+ RETURN(-ENOMEM);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY,
+ RCL_CLIENT, keylen);
+ req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
+ RCL_CLIENT, *vallen);
+ req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
+ RCL_SERVER, *vallen);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
+ if (rc) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
+ memcpy(tmp, key, keylen);
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
+ memcpy(tmp, val, *vallen);
+
+ ptlrpc_request_set_replen(req);
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ GOTO(out1, rc);
+
+ reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
+ if (reply == NULL)
+ GOTO(out1, rc = -EPROTO);
+
+ memcpy(val, reply, *vallen);
+ out1:
+ ptlrpc_req_finished(req);
+
+ RETURN(rc);
}
+
RETURN(-EINVAL);
}
-static int osc_setinfo_mds_conn_interpret(struct ptlrpc_request *req,
+static int osc_setinfo_mds_conn_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
void *aa, int rc)
{
struct llog_ctxt *ctxt;
RETURN(0);
}
- if (KEY_IS("unlinked")) {
+ if (KEY_IS(KEY_UNLINKED)) {
struct osc_creator *oscc = &obd->u.cli.cl_oscc;
spin_lock(&oscc->oscc_lock);
oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
RETURN(0);
}
- if (KEY_IS("checksum")) {
+ if (KEY_IS(KEY_CHECKSUM)) {
if (vallen != sizeof(int))
RETURN(-EINVAL);
exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
ptlrpc_request_set_replen(req);
ptlrpc_set_add_req(set, req);
- ptlrpc_check_set(set);
+ ptlrpc_check_set(NULL, set);
RETURN(0);
}
};
static struct llog_operations osc_mds_ost_orig_logops;
-static int osc_llog_init(struct obd_device *obd, int group,
+static int osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
struct obd_device *tgt, int count,
struct llog_catid *catid, struct obd_uuid *uuid)
{
int rc;
ENTRY;
- LASSERT(group == OBD_LLOG_GROUP);
+
+ LASSERT(olg == &obd->obd_olg);
spin_lock(&obd->obd_dev_lock);
if (osc_mds_ost_orig_logops.lop_setup != llog_obd_origin_setup) {
osc_mds_ost_orig_logops = llog_lvfs_ops;
rc = llog_setup(obd, &obd->obd_olg, LLOG_SIZE_REPL_CTXT, tgt, count,
NULL, &osc_size_repl_logops);
- if (rc)
+ if (rc) {
+ struct llog_ctxt *ctxt =
+ llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
+ if (ctxt)
+ llog_cleanup(ctxt);
CERROR("failed LLOG_SIZE_REPL_CTXT\n");
+ }
+ GOTO(out, rc);
out:
if (rc) {
CERROR("osc '%s' tgt '%s' cnt %d catid %p rc=%d\n",
CERROR("logid "LPX64":0x%x\n",
catid->lci_logid.lgl_oid, catid->lci_logid.lgl_ogen);
}
- RETURN(rc);
+ return rc;
}
static int osc_llog_finish(struct obd_device *obd, int count)
static int osc_reconnect(const struct lu_env *env,
struct obd_export *exp, struct obd_device *obd,
struct obd_uuid *cluuid,
- struct obd_connect_data *data)
+ struct obd_connect_data *data,
+ void *localdata)
{
struct client_obd *cli = &obd->u.cli;
oscc_init(obd);
/* We need to allocate a few requests more, because
- brw_interpret_oap tries to create new requests before freeing
+ brw_interpret tries to create new requests before freeing
previous ones. Ideally we want to have 2x max_rpcs_in_flight
reserved, but I afraid that might be too much wasted RAM
in fact, so 2 is just my guess and still should work. */
ptlrpc_init_rq_pool(cli->cl_max_rpcs_in_flight + 2,
OST_MAXREQSIZE,
ptlrpc_add_rqs_to_pool);
+ cli->cl_cache = cache_create(obd);
+ if (!cli->cl_cache) {
+ osc_cleanup(obd);
+ rc = -ENOMEM;
+ }
}
RETURN(rc);
class_destroy_import(imp);
obd->u.cli.cl_import = NULL;
}
- break;
- }
- case OBD_CLEANUP_SELF_EXP:
rc = obd_llog_finish(obd, 0);
if (rc != 0)
CERROR("failed to cleanup llogging subsystems\n");
break;
- case OBD_CLEANUP_OBD:
- break;
+ }
}
RETURN(rc);
}
/* free memory of osc quota cache */
lquota_cleanup(quota_interface, obd);
+ cache_destroy(obd->u.cli.cl_cache);
rc = client_obd_cleanup(obd);
ptlrpcd_decref();
RETURN(rc);
}
+static int osc_register_page_removal_cb(struct obd_export *exp,
+ obd_page_removal_cb_t func,
+ obd_pin_extent_cb pin_cb)
+{
+ return cache_add_extent_removal_cb(exp->exp_obd->u.cli.cl_cache, func,
+ pin_cb);
+}
+
+static int osc_unregister_page_removal_cb(struct obd_export *exp,
+ obd_page_removal_cb_t func)
+{
+ return cache_del_extent_removal_cb(exp->exp_obd->u.cli.cl_cache, func);
+}
+
+static int osc_register_lock_cancel_cb(struct obd_export *exp,
+ obd_lock_cancel_cb cb)
+{
+ LASSERT(exp->exp_obd->u.cli.cl_ext_lock_cancel_cb == NULL);
+
+ exp->exp_obd->u.cli.cl_ext_lock_cancel_cb = cb;
+ return 0;
+}
+
+static int osc_unregister_lock_cancel_cb(struct obd_export *exp,
+ obd_lock_cancel_cb cb)
+{
+ if (exp->exp_obd->u.cli.cl_ext_lock_cancel_cb != cb) {
+ CERROR("Unregistering cancel cb %p, while only %p was "
+ "registered\n", cb,
+ exp->exp_obd->u.cli.cl_ext_lock_cancel_cb);
+ RETURN(-EINVAL);
+ }
+
+ exp->exp_obd->u.cli.cl_ext_lock_cancel_cb = NULL;
+ return 0;
+}
+
static int osc_process_config(struct obd_device *obd, obd_count len, void *buf)
{
struct lustre_cfg *lcfg = buf;
.o_brw = osc_brw,
.o_brw_async = osc_brw_async,
.o_prep_async_page = osc_prep_async_page,
+ .o_reget_short_lock = osc_reget_short_lock,
+ .o_release_short_lock = osc_release_short_lock,
.o_queue_async_io = osc_queue_async_io,
.o_set_async_flags = osc_set_async_flags,
.o_queue_group_io = osc_queue_group_io,
.o_change_cbdata = osc_change_cbdata,
.o_cancel = osc_cancel,
.o_cancel_unused = osc_cancel_unused,
- .o_join_lru = osc_join_lru,
.o_iocontrol = osc_iocontrol,
.o_get_info = osc_get_info,
.o_set_info_async = osc_set_info_async,
.o_llog_init = osc_llog_init,
.o_llog_finish = osc_llog_finish,
.o_process_config = osc_process_config,
+ .o_register_page_removal_cb = osc_register_page_removal_cb,
+ .o_unregister_page_removal_cb = osc_unregister_page_removal_cb,
+ .o_register_lock_cancel_cb = osc_register_lock_cancel_cb,
+ .o_unregister_lock_cancel_cb = osc_unregister_lock_cancel_cb,
};
+
int __init osc_init(void)
{
struct lprocfs_static_vars lvars = { 0 };
class_unregister_type(LUSTRE_OSC_NAME);
}
-MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
MODULE_LICENSE("GPL");