# include <linux/module.h>
# include <linux/mm.h>
# include <linux/highmem.h>
-# include <linux/lustre_dlm.h>
+# include <linux/ctype.h>
+# include <linux/init.h>
# if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
# include <linux/workqueue.h>
# include <linux/smp_lock.h>
# include <liblustre.h>
#endif
-#include <linux/kp30.h>
+#include <linux/lustre_dlm.h>
+#include <libcfs/kp30.h>
#include <linux/lustre_net.h>
-#include <linux/lustre_user.h>
+#include <linux/lustre_sec.h>
+#include <lustre/lustre_user.h>
#include <linux/obd_ost.h>
#include <linux/obd_lov.h>
-#ifndef __CYGWIN__
-# include <linux/ctype.h>
-# include <linux/init.h>
-#else
+#ifdef __CYGWIN__
# include <ctype.h>
#endif
#include <linux/lustre_log.h>
#include "osc_internal.h"
-
-static int osc_attach(struct obd_device *dev, obd_count len, void *data)
-{
- struct lprocfs_static_vars lvars;
- int rc;
- ENTRY;
-
- lprocfs_init_vars(osc,&lvars);
- rc = lprocfs_obd_attach(dev, lvars.obd_vars);
- if (rc < 0)
- RETURN(rc);
-
- rc = lproc_osc_attach_seqstat(dev);
- if (rc < 0) {
- lprocfs_obd_detach(dev);
- RETURN(rc);
- }
-
- ptlrpc_lprocfs_register_obd(dev);
- RETURN(0);
-}
-
-static int osc_detach(struct obd_device *dev)
-{
- ptlrpc_lprocfs_unregister_obd(dev);
- return lprocfs_obd_detach(dev);
-}
-
-
/* Pack OSC object metadata for disk storage (LE byte order). */
static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
struct lov_stripe_md *lsm)
if (lsm) {
LASSERT(lsm->lsm_object_id);
+ LASSERT(lsm->lsm_object_gr);
(*lmmp)->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
+ (*lmmp)->lmm_object_gr = cpu_to_le64(lsm->lsm_object_gr);
}
RETURN(lmm_size);
if (lmm != NULL) {
/* XXX zero *lsmp? */
(*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
+ (*lsmp)->lsm_object_gr = le64_to_cpu (lmm->lmm_object_gr);
LASSERT((*lsmp)->lsm_object_id);
+ LASSERT((*lsmp)->lsm_object_gr);
}
(*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
memcpy(aa->aa_oa, &body->oa, sizeof(*aa->aa_oa));
/* This should really be sent by the OST */
- aa->aa_oa->o_blksize = OSC_BRW_MAX_SIZE;
+ aa->aa_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
aa->aa_oa->o_valid |= OBD_MD_FLBLKSZ;
} else {
CERROR("can't unpack ost_body\n");
struct osc_getattr_async_args *aa;
ENTRY;
- request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_GETATTR, 1,
- &size, NULL);
+ request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
+ OST_GETATTR, 1, &size, NULL);
if (!request)
RETURN(-ENOMEM);
int rc, size = sizeof(*body);
ENTRY;
- request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_GETATTR, 1,
- &size, NULL);
+ request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
+ OST_GETATTR, 1, &size, NULL);
if (!request)
RETURN(-ENOMEM);
memcpy(oa, &body->oa, sizeof(*oa));
/* This should really be sent by the OST */
- oa->o_blksize = OSC_BRW_MAX_SIZE;
+ oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
oa->o_valid |= OBD_MD_FLBLKSZ;
EXIT;
int rc, size = sizeof(*body);
ENTRY;
- request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_SETATTR, 1, &size,
- NULL);
+ LASSERT(!(oa->o_valid & OBD_MD_FLGROUP) || oa->o_gr > 0);
+
+ request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
+ OST_SETATTR, 1, &size, NULL);
if (!request)
RETURN(-ENOMEM);
request->rq_replen = lustre_msg_size(1, &size);
- rc = ptlrpc_queue_wait(request);
- if (rc)
- GOTO(out, rc);
-
- body = lustre_swab_repbuf(request, 0, sizeof(*body),
- lustre_swab_ost_body);
- if (body == NULL)
- GOTO(out, rc = -EPROTO);
+ if (oti != NULL && (oti->oti_flags & OBD_MODE_ASYNC)) {
+ ptlrpcd_add_req(request);
+ rc = 0;
+ } else {
+ rc = ptlrpc_queue_wait(request);
+ if (rc)
+ GOTO(out, rc);
- memcpy(oa, &body->oa, sizeof(*oa));
+ body = lustre_swab_repbuf(request, 0, sizeof(*body),
+ lustre_swab_ost_body);
+ if (body == NULL)
+ GOTO(out, rc = -EPROTO);
+ memcpy(oa, &body->oa, sizeof(*oa));
+ }
EXIT;
out:
ptlrpc_req_finished(request);
int osc_real_create(struct obd_export *exp, struct obdo *oa,
struct lov_stripe_md **ea, struct obd_trans_info *oti)
{
+ struct osc_creator *oscc = &exp->exp_obd->u.cli.cl_oscc;
struct ptlrpc_request *request;
struct ost_body *body;
struct lov_stripe_md *lsm;
RETURN(rc);
}
- request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_CREATE, 1, &size,
- NULL);
+ request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
+ OST_CREATE, 1, &size, NULL);
if (!request)
GOTO(out, rc = -ENOMEM);
LASSERT((oa->o_valid & OBD_MD_FLFLAGS) &&
oa->o_flags == OBD_FL_DELORPHAN);
DEBUG_REQ(D_HA, request,
- "delorphan from OST integration; level == RECOVER");
- request->rq_send_state = LUSTRE_IMP_RECOVER;
+ "delorphan from OST integration");
+ /* Don't resend the delorphan request */
+ request->rq_no_resend = request->rq_no_delay = 1;
}
rc = ptlrpc_queue_wait(request);
GOTO (out_req, rc = -EPROTO);
}
+ if ((oa->o_valid & OBD_MD_FLFLAGS) && oa->o_flags == OBD_FL_DELORPHAN) {
+ struct obd_import *imp = class_exp2cliimp(exp);
+ /* MDS declares last known object, OSS responses
+ * with next possible object -bzzz */
+ spin_lock(&oscc->oscc_lock);
+ oscc->oscc_next_id = body->oa.o_id;
+ spin_unlock(&oscc->oscc_lock);
+ CDEBUG(D_HA, "%s: set nextid "LPD64" after recovery\n",
+ imp->imp_target_uuid.uuid, oa->o_id);
+ }
memcpy(oa, &body->oa, sizeof(*oa));
/* This should really be sent by the OST */
- oa->o_blksize = OSC_BRW_MAX_SIZE;
+ oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
oa->o_valid |= OBD_MD_FLBLKSZ;
/* XXX LOV STACKING: the lsm that is passed to us from LOV does not
* This needs to be fixed in a big way.
*/
lsm->lsm_object_id = oa->o_id;
+ lsm->lsm_object_gr = oa->o_gr;
*ea = lsm;
if (oti != NULL) {
RETURN(-EINVAL);
}
- request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_PUNCH, 1, &size,
- NULL);
+ request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
+ OST_PUNCH, 1, &size, NULL);
if (!request)
RETURN(-ENOMEM);
}
static int osc_sync(struct obd_export *exp, struct obdo *oa,
- struct lov_stripe_md *md, obd_size start, obd_size end)
+ struct lov_stripe_md *md, obd_size start,
+ obd_size end)
{
struct ptlrpc_request *request;
struct ost_body *body;
RETURN(-EINVAL);
}
- request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_SYNC, 1, &size,
- NULL);
+ request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
+ OST_SYNC, 1, &size, NULL);
if (!request)
RETURN(-ENOMEM);
RETURN(-EINVAL);
}
- request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_DESTROY, 1,
- &size, NULL);
+ request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
+ OST_DESTROY, 1, &size, NULL);
if (!request)
RETURN(-ENOMEM);
memcpy(&body->oa, oa, sizeof(*oa));
request->rq_replen = lustre_msg_size(1, &size);
- rc = ptlrpc_queue_wait(request);
- if (rc)
- GOTO(out, rc);
+ if (oti != NULL && (oti->oti_flags & OBD_MODE_ASYNC)) {
+ ptlrpcd_add_req(request);
+ rc = 0;
+ } else {
+ rc = ptlrpc_queue_wait(request);
+
+ if (rc == -ENOENT)
+ rc = 0;
- body = lustre_swab_repbuf(request, 0, sizeof(*body),
- lustre_swab_ost_body);
- if (body == NULL) {
- CERROR ("Can't unpack body\n");
- GOTO (out, rc = -EPROTO);
- }
+ if (rc) {
+ ptlrpc_req_finished(request);
+ RETURN(rc);
+ }
- memcpy(oa, &body->oa, sizeof(*oa));
+ body = lustre_swab_repbuf(request, 0, sizeof(*body),
+ lustre_swab_ost_body);
+ if (body == NULL) {
+ CERROR ("Can't unpack body\n");
+ ptlrpc_req_finished(request);
+ RETURN(-EPROTO);
+ }
- EXIT;
- out:
- ptlrpc_req_finished(request);
- return rc;
+ memcpy(oa, &body->oa, sizeof(*oa));
+ ptlrpc_req_finished(request);
+ }
+ RETURN(rc);
}
-static void osc_announce_cached(struct client_obd *cli, struct ost_body *body)
+static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
+ long writing_bytes)
{
- obd_flag bits = OBD_MD_FLBLOCKS|OBD_MD_FLRDEV;
+ obd_valid bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
- LASSERT(!(body->oa.o_valid & bits));
+ LASSERT(!(oa->o_valid & bits));
- body->oa.o_valid |= bits;
- down(&cli->cl_dirty_sem);
- body->oa.o_blocks = cli->cl_dirty;
- body->oa.o_rdev = cli->cl_dirty_granted;
- up(&cli->cl_dirty_sem);
- CDEBUG(D_INODE, "announcing "LPU64" dirty "LPU64" granted\n",
- cli->cl_dirty, cli->cl_dirty_granted);
+ oa->o_valid |= bits;
+ spin_lock(&cli->cl_loi_list_lock);
+ oa->o_dirty = cli->cl_dirty;
+ oa->o_undirty = cli->cl_dirty_max - oa->o_dirty;
+ oa->o_grant = cli->cl_avail_grant;
+ oa->o_dropped = cli->cl_lost_grant;
+ cli->cl_lost_grant = 0;
+ spin_unlock(&cli->cl_loi_list_lock);
+ CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n",
+ oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
}
-static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
+/* caller must hold loi_list_lock */
+static void osc_consume_write_grant(struct client_obd *cli,
+ struct osc_async_page *oap)
{
- if(!(body->oa.o_valid & OBD_MD_FLRDEV)) {
- if (cli->cl_ost_can_grant) {
- CDEBUG(D_INODE, "%s can't grant\n",
- cli->cl_import->imp_target_uuid.uuid);
+ cli->cl_dirty += PAGE_SIZE;
+ cli->cl_avail_grant -= PAGE_SIZE;
+ oap->oap_brw_flags |= OBD_BRW_FROM_GRANT;
+ CDEBUG(D_CACHE, "using %lu grant credits for oap %p\n", PAGE_SIZE, oap);
+ LASSERT(cli->cl_avail_grant >= 0);
+}
+
+static unsigned long rpcs_in_flight(struct client_obd *cli)
+{
+ return cli->cl_r_in_flight + cli->cl_w_in_flight;
+}
+
+/* caller must hold loi_list_lock */
+void osc_wake_cache_waiters(struct client_obd *cli)
+{
+ struct list_head *l, *tmp;
+ struct osc_cache_waiter *ocw;
+
+ list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
+ /* if we can't dirty more, we must wait until some is written */
+ if (cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) {
+ CDEBUG(D_CACHE, "no dirty room: dirty: %ld max %ld\n",
+ cli->cl_dirty, cli->cl_dirty_max);
+ return;
}
- cli->cl_ost_can_grant = 0;
- return;
+
+ /* if still dirty cache but no grant wait for pending RPCs that
+ * may yet return us some grant before doing sync writes */
+ if (cli->cl_w_in_flight && cli->cl_avail_grant < PAGE_SIZE) {
+ CDEBUG(D_CACHE, "%u BRW writes in flight, no grant\n",
+ cli->cl_w_in_flight);
+ }
+ ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
+ list_del_init(&ocw->ocw_entry);
+ if (cli->cl_avail_grant < PAGE_SIZE) {
+ /* no more RPCs in flight to return grant, do sync IO */
+ ocw->ocw_rc = -EDQUOT;
+ CDEBUG(D_INODE, "wake oap %p for sync\n", ocw->ocw_oap);
+ } else {
+ osc_consume_write_grant(cli, ocw->ocw_oap);
+ }
+
+ wake_up(&ocw->ocw_waitq);
}
- CDEBUG(D_ERROR, "got "LPU64" grant\n", body->oa.o_rdev);
- down(&cli->cl_dirty_sem);
- cli->cl_dirty_granted = body->oa.o_rdev;
- /* XXX check for over-run and wake up the io thread that
- * doesn't exist yet */
- up(&cli->cl_dirty_sem);
+ EXIT;
+}
+
+static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
+{
+ spin_lock(&cli->cl_loi_list_lock);
+ CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
+ cli->cl_avail_grant += body->oa.o_grant;
+ /* waiters are woken in brw_interpret_oap */
+ spin_unlock(&cli->cl_loi_list_lock);
}
/* We assume that the reason this OSC got a short read is because it read
if (pga->count > nob_read) {
/* EOF inside this page */
- ptr = kmap(pga->pg) + (pga->off & ~PAGE_MASK);
+ ptr = kmap(pga->pg) + (pga->page_offset & ~PAGE_MASK);
memset(ptr + nob_read, 0, pga->count - nob_read);
kunmap(pga->pg);
page_count--;
/* zero remaining pages */
while (page_count-- > 0) {
- ptr = kmap(pga->pg) + (pga->off & ~PAGE_MASK);
+ ptr = kmap(pga->pg) + (pga->page_offset & ~PAGE_MASK);
memset(ptr, 0, pga->count);
kunmap(pga->pg);
pga++;
}
}
-static int check_write_rcs(struct ptlrpc_request *request, int niocount,
+static int check_write_rcs(struct ptlrpc_request *request,
+ int requested_nob, int niocount,
obd_count page_count, struct brw_page *pga)
{
- int i;
- int *remote_rcs;
+ int *remote_rcs, i;
/* return error if any niobuf was in error */
remote_rcs = lustre_swab_repbuf(request, 1,
sizeof(*remote_rcs) * niocount, NULL);
if (remote_rcs == NULL) {
- CERROR ("Missing/short RC vector on BRW_WRITE reply\n");
- return (-EPROTO);
+ CERROR("Missing/short RC vector on BRW_WRITE reply\n");
+ return(-EPROTO);
}
- if (lustre_msg_swabbed (request->rq_repmsg))
+ if (lustre_msg_swabbed(request->rq_repmsg))
for (i = 0; i < niocount; i++)
- __swab32s (&remote_rcs[i]);
+ __swab32s((__u32 *)&remote_rcs[i]);
for (i = 0; i < niocount; i++) {
if (remote_rcs[i] < 0)
- return (remote_rcs[i]);
+ return(remote_rcs[i]);
if (remote_rcs[i] != 0) {
- CERROR ("rc[%d] invalid (%d) req %p\n",
+ CERROR("rc[%d] invalid (%d) req %p\n",
i, remote_rcs[i], request);
- return (-EPROTO);
+ return(-EPROTO);
}
}
+ if (request->rq_bulk->bd_nob_transferred != requested_nob) {
+ CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
+ requested_nob, request->rq_bulk->bd_nob_transferred);
+ return(-EPROTO);
+ }
+
return (0);
}
static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
{
if (p1->flag != p2->flag) {
- unsigned mask = ~(OBD_BRW_CREATE|OBD_BRW_FROM_GRANT);
+ unsigned mask = ~OBD_BRW_FROM_GRANT;
/* warn if we try to combine flags that we don't know to be
* safe to combine */
return 0;
}
- return (p1->off + p1->count == p2->off);
+ return (p1->disk_offset + p1->count == p2->disk_offset);
}
#if CHECKSUM_BULK
struct ost_body *body;
struct obd_ioobj *ioobj;
struct niobuf_remote *niobuf;
- unsigned long flags;
int niocount;
int size[3];
int i;
opc = ((cmd & OBD_BRW_WRITE) != 0) ? OST_WRITE : OST_READ;
for (niocount = i = 1; i < page_count; i++)
- if (!can_merge_pages (&pga[i - 1], &pga[i]))
+ if (!can_merge_pages(&pga[i - 1], &pga[i]))
niocount++;
size[0] = sizeof(*body);
size[1] = sizeof(*ioobj);
size[2] = niocount * sizeof(*niobuf);
- req = ptlrpc_prep_req(imp, opc, 3, size, NULL);
+ req = ptlrpc_prep_req(imp, LUSTRE_OBD_VERSION, opc, 3, size, NULL);
if (req == NULL)
return (-ENOMEM);
if (opc == OST_WRITE)
- desc = ptlrpc_prep_bulk_imp(req, BULK_GET_SOURCE,
- OST_BULK_PORTAL);
+ desc = ptlrpc_prep_bulk_imp (req, page_count,
+ BULK_GET_SOURCE, OST_BULK_PORTAL);
else
- desc = ptlrpc_prep_bulk_imp(req, BULK_PUT_SINK,
- OST_BULK_PORTAL);
+ desc = ptlrpc_prep_bulk_imp (req, page_count,
+ BULK_PUT_SINK, OST_BULK_PORTAL);
if (desc == NULL)
GOTO(out, rc = -ENOMEM);
/* NB request now owns desc and will free it when it gets freed */
ioobj->ioo_bufcnt = niocount;
LASSERT (page_count > 0);
+
for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
struct brw_page *pg = &pga[i];
struct brw_page *pg_prev = pg - 1;
LASSERT(pg->count > 0);
- LASSERT((pg->off & ~PAGE_MASK) + pg->count <= PAGE_SIZE);
- LASSERTF(i == 0 || pg->off > pg_prev->off,
+ LASSERTF((pg->page_offset & ~PAGE_MASK)+ pg->count <= PAGE_SIZE,
+ "i: %d pg: %p pg_off: "LPU64", count: %u\n", i, pg,
+ pg->page_offset, pg->count);
+ LASSERTF(i == 0 || pg->disk_offset > pg_prev->disk_offset,
"i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
- " prev_pg %p [pri %lu ind %lu] off "LPU64,
+ " prev_pg %p [pri %lu ind %lu] off "LPU64"\n",
i, page_count,
- pg->pg, pg->pg->private, pg->pg->index, pg->off,
+ pg->pg, pg->pg->private, pg->pg->index, pg->disk_offset,
pg_prev->pg, pg_prev->pg->private, pg_prev->pg->index,
- pg_prev->off);
-
- rc = ptlrpc_prep_bulk_page(desc, pg->pg, pg->off & ~PAGE_MASK,
- pg->count);
- if (rc != 0)
- GOTO(out, rc);
+ pg_prev->disk_offset);
+ ptlrpc_prep_bulk_page(desc, pg->pg,
+ pg->page_offset & ~PAGE_MASK, pg->count);
requested_nob += pg->count;
if (i > 0 && can_merge_pages(pg_prev, pg)) {
niobuf--;
niobuf->len += pg->count;
} else {
- niobuf->offset = pg->off;
+ niobuf->offset = pg->disk_offset;
niobuf->len = pg->count;
niobuf->flags = pg->flag;
}
LASSERT((void *)(niobuf - niocount) ==
lustre_msg_buf(req->rq_reqmsg, 2, niocount * sizeof(*niobuf)));
- osc_announce_cached(cli, body);
- spin_lock_irqsave(&req->rq_lock, flags);
- req->rq_no_resend = 1;
- spin_unlock_irqrestore(&req->rq_lock, flags);
+ osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
/* size[0] still sizeof (*body) */
if (opc == OST_WRITE) {
#if CHECKSUM_BULK
body->oa.o_valid |= OBD_MD_FLCKSUM;
- body->oa.o_nlink = cksum_pages(requested_nob, page_count, pga);
+ body->oa.o_cksum = cksum_pages(requested_nob, page_count, pga);
#endif
/* 1 RC per niobuf */
size[1] = sizeof(__u32) * niocount;
{
struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
struct ost_body *body;
+ ENTRY;
if (rc < 0)
- return (rc);
+ RETURN(rc);
body = lustre_swab_repbuf(req, 0, sizeof(*body), lustre_swab_ost_body);
if (body == NULL) {
CERROR ("Can't unpack body\n");
- return (-EPROTO);
+ RETURN(-EPROTO);
}
osc_update_grant(cli, body);
+ memcpy(oa, &body->oa, sizeof(*oa));
if (req->rq_reqmsg->opc == OST_WRITE) {
if (rc > 0) {
CERROR ("Unexpected +ve rc %d\n", rc);
- return (-EPROTO);
+ RETURN(-EPROTO);
}
+ LASSERT (req->rq_bulk->bd_nob == requested_nob);
- return(check_write_rcs(req, niocount, page_count, pga));
+ RETURN(check_write_rcs(req, requested_nob, niocount,
+ page_count, pga));
}
if (rc > requested_nob) {
CERROR("Unexpected rc %d (%d requested)\n", rc, requested_nob);
+ RETURN(-EPROTO);
+ }
+
+ if (rc != req->rq_bulk->bd_nob_transferred) {
+ CERROR ("Unexpected rc %d (%d transferred)\n",
+ rc, req->rq_bulk->bd_nob_transferred);
return (-EPROTO);
}
if (rc < requested_nob)
handle_short_read(rc, page_count, pga);
- memcpy(oa, &body->oa, sizeof(*oa));
-
#if CHECKSUM_BULK
if (oa->o_valid & OBD_MD_FLCKSUM) {
const struct ptlrpc_peer *peer =
&req->rq_import->imp_connection->c_peer;
static int cksum_counter;
- obd_count server_cksum = oa->o_nlink;
+ obd_count server_cksum = oa->o_cksum;
obd_count cksum = cksum_pages(rc, page_count, pga);
char str[PTL_NALFMT_SIZE];
- portals_nid2str(peer->peer_ni->pni_number, peer->peer_nid, str);
+ ptlrpc_peernid2str(peer, str);
cksum_counter++;
if (server_cksum != cksum) {
CERROR("Bad checksum: server %x, client %x, server NID "
LPX64" (%s)\n", server_cksum, cksum,
- peer->peer_nid, str);
+ peer->peer_id.nid, str);
cksum_counter = 0;
- oa->o_nlink = cksum;
+ oa->o_cksum = cksum;
} else if ((cksum_counter & (-cksum_counter)) == cksum_counter){
CWARN("Checksum %u from "LPX64" (%s) OK: %x\n",
- cksum_counter, peer->peer_nid, str, cksum);
+ cksum_counter, peer->peer_id.nid, str, cksum);
}
} else {
static int cksum_missed;
if ((cksum_missed & (-cksum_missed)) == cksum_missed)
CERROR("Request checksum %u from "LPX64", no reply\n",
cksum_missed,
- req->rq_import->imp_connection->c_peer.peer_nid);
+ req->rq_import->imp_connection->c_peer.peer_id.nid);
}
#endif
- return (0);
+ RETURN(0);
}
static int osc_brw_internal(int cmd, struct obd_export *exp,struct obdo *oa,
rc = osc_brw_prep_request(cmd, class_exp2cliimp(exp), oa, lsm,
page_count, pga, &requested_nob, &niocount,
&request);
- /* NB ^ sets rq_no_resend */
-
if (rc != 0)
return (rc);
struct brw_page *pga = aa->aa_pga;
ENTRY;
- /* XXX bug 937 here */
- if (rc == -ETIMEDOUT && request->rq_resend) {
- DEBUG_REQ(D_HA, request, "BULK TIMEOUT");
- LBUG(); /* re-send. later. */
- //goto restart_bulk;
- }
-
rc = osc_brw_fini_request(request, oa, requested_nob, niocount,
page_count, pga, rc);
RETURN (rc);
rc = osc_brw_prep_request(cmd, class_exp2cliimp(exp), oa, lsm,
page_count, pga, &requested_nob, &nio_count,
&request);
- /* NB ^ sets rq_no_resend */
-
if (rc == 0) {
LASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
aa = (struct osc_brw_async_args *)&request->rq_async_args;
for (i = stride ; i < num ; i++) {
tmp = array[i];
j = i;
- while (j >= stride && array[j - stride].off > tmp.off) {
+ while (j >= stride && array[j - stride].disk_offset >
+ tmp.disk_offset) {
array[j] = array[j - stride];
j -= stride;
}
}
static int osc_brw(int cmd, struct obd_export *exp, struct obdo *oa,
- struct lov_stripe_md *md, obd_count page_count,
+ struct lov_stripe_md *lsm, obd_count page_count,
struct brw_page *pga, struct obd_trans_info *oti)
{
ENTRY;
obd_count pages_per_brw;
int rc;
- if (page_count > OSC_BRW_MAX_IOV)
- pages_per_brw = OSC_BRW_MAX_IOV;
+ if (page_count > PTLRPC_MAX_BRW_PAGES)
+ pages_per_brw = PTLRPC_MAX_BRW_PAGES;
else
pages_per_brw = page_count;
sort_brw_pages(pga, pages_per_brw);
pages_per_brw = check_elan_limit(pga, pages_per_brw);
- rc = osc_brw_internal(cmd, exp, oa, md, pages_per_brw, pga);
+ rc = osc_brw_internal(cmd, exp, oa, lsm, pages_per_brw, pga);
if (rc != 0)
RETURN(rc);
}
static int osc_brw_async(int cmd, struct obd_export *exp, struct obdo *oa,
- struct lov_stripe_md *md, obd_count page_count,
+ struct lov_stripe_md *lsm, obd_count page_count,
struct brw_page *pga, struct ptlrpc_request_set *set,
struct obd_trans_info *oti)
{
obd_count pages_per_brw;
int rc;
- if (page_count > OSC_BRW_MAX_IOV)
- pages_per_brw = OSC_BRW_MAX_IOV;
+ if (page_count > PTLRPC_MAX_BRW_PAGES)
+ pages_per_brw = PTLRPC_MAX_BRW_PAGES;
else
pages_per_brw = page_count;
sort_brw_pages(pga, pages_per_brw);
pages_per_brw = check_elan_limit(pga, pages_per_brw);
- rc = async_internal(cmd, exp, oa, md, pages_per_brw, pga, set);
+ rc = async_internal(cmd, exp, oa, lsm, pages_per_brw, pga, set);
if (rc != 0)
RETURN(rc);
}
static void osc_check_rpcs(struct client_obd *cli);
-static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap);
+static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
+ int sent);
static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi);
+static void lop_update_pending(struct client_obd *cli,
+ struct loi_oap_pages *lop, int cmd, int delta);
-static void osc_complete_oap(struct client_obd *cli,
- struct osc_async_page *oap, int rc)
+/* this is called when a sync waiter receives an interruption. Its job is to
+ * get the caller woken as soon as possible. If its page hasn't been put in an
+ * rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as
+ * desiring interruption which will forcefully complete the rpc once the rpc
+ * has timed out */
+static void osc_occ_interrupted(struct oig_callback_context *occ)
{
+ struct osc_async_page *oap;
+ struct loi_oap_pages *lop;
+ struct lov_oinfo *loi;
ENTRY;
- osc_exit_cache(cli, oap);
+
+ /* XXX member_of() */
+ oap = list_entry(occ, struct osc_async_page, oap_occ);
+
+ spin_lock(&oap->oap_cli->cl_loi_list_lock);
+
+ oap->oap_interrupted = 1;
+
+ /* ok, it's been put in an rpc. */
+ if (oap->oap_request != NULL) {
+ ptlrpc_mark_interrupted(oap->oap_request);
+ ptlrpcd_wake(oap->oap_request);
+ GOTO(unlock, 0);
+ }
+
+ /* we don't get interruption callbacks until osc_trigger_sync_io()
+ * has been called and put the sync oaps in the pending/urgent lists.*/
+ if (!list_empty(&oap->oap_pending_item)) {
+ list_del_init(&oap->oap_pending_item);
+ if (oap->oap_async_flags & ASYNC_URGENT)
+ list_del_init(&oap->oap_urgent_item);
+
+ loi = oap->oap_loi;
+ lop = (oap->oap_cmd == OBD_BRW_WRITE) ?
+ &loi->loi_write_lop : &loi->loi_read_lop;
+ lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, -1);
+ loi_list_maint(oap->oap_cli, oap->oap_loi);
+
+ oig_complete_one(oap->oap_oig, &oap->oap_occ, 0);
+ oap->oap_oig = NULL;
+ }
+
+unlock:
+ spin_unlock(&oap->oap_cli->cl_loi_list_lock);
+}
+
+/* this must be called holding the loi list lock to give coverage to exit_cache,
+ * async_flag maintenance, and oap_request */
+static void osc_ap_completion(struct client_obd *cli, struct obdo *oa,
+ struct osc_async_page *oap, int sent, int rc)
+{
+ osc_exit_cache(cli, oap, sent);
oap->oap_async_flags = 0;
- if (oap->oap_osic) {
- osic_complete_one(oap->oap_osic, rc);
- oap->oap_osic = NULL;
+ oap->oap_interrupted = 0;
+
+ if (oap->oap_request != NULL) {
+ ptlrpc_req_finished(oap->oap_request);
+ oap->oap_request = NULL;
+ }
+
+ if (rc == 0 && oa != NULL)
+ oap->oap_loi->loi_blocks = oa->o_blocks;
+
+ if (oap->oap_oig) {
+ oig_complete_one(oap->oap_oig, &oap->oap_occ, rc);
+ oap->oap_oig = NULL;
EXIT;
return;
}
oap->oap_caller_ops->ap_completion(oap->oap_caller_data, oap->oap_cmd,
- rc);
- EXIT;
+ oa, rc);
}
static int brw_interpret_oap(struct ptlrpc_request *request,
struct osc_async_page *oap;
struct client_obd *cli;
struct list_head *pos, *n;
+ struct timeval now;
ENTRY;
- CDEBUG(D_INODE, "request %p aa %p\n", request, aa);
-
+ do_gettimeofday(&now);
rc = osc_brw_fini_request(request, aa->aa_oa, aa->aa_requested_nob,
aa->aa_nio_count, aa->aa_page_count,
aa->aa_pga, rc);
+ CDEBUG(D_INODE, "request %p aa %p rc %d\n", request, aa, rc);
+
cli = aa->aa_cli;
/* in failout recovery we ignore writeback failure and want
* to just tell llite to unlock the page and continue */
- if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
+ if (request->rq_reqmsg->opc == OST_WRITE &&
+ (cli->cl_import == NULL || cli->cl_import->imp_invalid)) {
+ CDEBUG(D_INODE, "flipping to rc 0 imp %p inv %d\n",
+ cli->cl_import,
+ cli->cl_import ? cli->cl_import->imp_invalid : -1);
rc = 0;
+ }
spin_lock(&cli->cl_loi_list_lock);
+ if (request->rq_reqmsg->opc == OST_WRITE)
+ lprocfs_stime_record(&cli->cl_write_stime, &now,
+ &request->rq_rpcd_start);
+ else
+ lprocfs_stime_record(&cli->cl_read_stime, &now,
+ &request->rq_rpcd_start);
+
+
+
+ /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
+ * is called so we know whether to go to sync BRWs or wait for more
+ * RPCs to complete */
+ if (request->rq_reqmsg->opc == OST_WRITE)
+ cli->cl_w_in_flight--;
+ else
+ cli->cl_r_in_flight--;
+
/* the caller may re-use the oap after the completion call so
* we need to clean it up a little */
list_for_each_safe(pos, n, &aa->aa_oaps) {
//oap->oap_page, oap->oap_page->index, oap);
list_del_init(&oap->oap_rpc_item);
- osc_complete_oap(cli, oap, rc);
+ osc_ap_completion(cli, aa->aa_oa, oap, 1, rc);
}
- cli->cl_brw_in_flight--;
+ osc_wake_cache_waiters(cli);
osc_check_rpcs(cli);
-
spin_unlock(&cli->cl_loi_list_lock);
obdo_free(aa->aa_oa);
ops = oap->oap_caller_ops;
caller_data = oap->oap_caller_data;
}
- pga[i].off = oap->oap_obj_off + oap->oap_page_off;
+ pga[i].disk_offset = oap->oap_obj_off + oap->oap_page_off;
+ pga[i].page_offset = pga[i].disk_offset;
pga[i].pg = oap->oap_page;
pga[i].count = oap->oap_count;
pga[i].flag = oap->oap_brw_flags;
- //CDEBUG(D_INODE, "putting page %p index %lu oap %p into pga\n",
- //pga[i].pg, oap->oap_page->index, oap);
+ CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
+ pga[i].pg, oap->oap_page->index, oap, pga[i].flag);
i++;
}
oap = list_entry(pos, struct osc_async_page, oap_pending_item);
ops = oap->oap_caller_ops;
+ LASSERT(oap->oap_magic == OAP_MAGIC);
+
/* in llite being 'ready' equates to the page being locked
* until completion unlocks it. commit_write submits a page
* as not ready because its unlock will happen unconditionally
int rc = ops->ap_make_ready(oap->oap_caller_data, cmd);
if (rc < 0)
CDEBUG(D_INODE, "oap %p page %p returned %d "
- "instead of ready\n", oap,
+ "instead of ready\n", oap,
oap->oap_page, rc);
switch (rc) {
case -EAGAIN:
/* llite is telling us that the page is still
* in commit_write and that we should try
- * and put it in an rpc again later. we
+ * and put it in an rpc again later. we
* break out of the loop so we don't create
- * a whole in the sequence of pages in
- * the rpc stream.*/
+ * a hole in the sequence of pages in the rpc
+ * stream.*/
pos = NULL;
break;
case -EINTR:
break;
default:
LASSERTF(0, "oap %p page %p returned %d "
- "from make_ready\n", oap,
+ "from make_ready\n", oap,
oap->oap_page, rc);
break;
}
/* take the page out of our book-keeping */
list_del_init(&oap->oap_pending_item);
lop_update_pending(cli, lop, cmd, -1);
- if (!list_empty(&oap->oap_urgent_item))
- list_del_init(&oap->oap_urgent_item);
+ list_del_init(&oap->oap_urgent_item);
/* ask the caller for the size of the io as the rpc leaves. */
if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE))
- oap->oap_count = ops->ap_refresh_count(
- oap->oap_caller_data,
- cmd);
+ oap->oap_count =
+ ops->ap_refresh_count(oap->oap_caller_data,cmd);
if (oap->oap_count <= 0) {
- CDEBUG(D_INODE, "oap %p count %d, completing\n", oap,
+ CDEBUG(D_CACHE, "oap %p count %d, completing\n", oap,
oap->oap_count);
- osc_complete_oap(cli, oap, oap->oap_count);
+ osc_ap_completion(cli, NULL, oap, 0, oap->oap_count);
continue;
}
break;
}
+ osc_wake_cache_waiters(cli);
+
if (page_count == 0)
RETURN(0);
oap = list_entry(pos, struct osc_async_page,
oap_rpc_item);
list_del_init(&oap->oap_rpc_item);
+
+ /* queued sync pages can be torn down while the pages
+ * were between the pending list and the rpc */
+ if (oap->oap_interrupted) {
+ CDEBUG(D_INODE, "oap %p interrupted\n", oap);
+ osc_ap_completion(cli, NULL, oap, 0,
+ oap->oap_count);
+ continue;
+ }
+
+ /* put the page back in the loi/lop lists */
list_add_tail(&oap->oap_pending_item,
&lop->lop_pending);
lop_update_pending(cli, lop, cmd, 1);
list_splice(&rpc_list, &aa->aa_oaps);
INIT_LIST_HEAD(&rpc_list);
- if (cmd == OBD_BRW_READ)
+#ifdef __KERNEL__
+ if (cmd == OBD_BRW_READ) {
lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
- else
+ lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
+ } else {
lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
+ lprocfs_oh_tally(&cli->cl_write_rpc_hist,
+ cli->cl_w_in_flight);
+ }
+#endif
spin_lock(&cli->cl_loi_list_lock);
+
if (cmd == OBD_BRW_READ)
- lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_brw_in_flight);
- else
- lprocfs_oh_tally(&cli->cl_write_rpc_hist,
- cli->cl_brw_in_flight);
+ cli->cl_r_in_flight++;
+ else
+ cli->cl_w_in_flight++;
+ /* queued sync pages can be torn down while the pages
+ * were between the pending list and the rpc */
+ list_for_each(pos, &aa->aa_oaps) {
+ oap = list_entry(pos, struct osc_async_page, oap_rpc_item);
+ if (oap->oap_interrupted) {
+ CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
+ oap, request);
+ ptlrpc_mark_interrupted(request);
+ break;
+ }
+ }
- cli->cl_brw_in_flight++;
- CDEBUG(D_INODE, "req %p: %d pages, aa %p. now %d in flight\n", request,
- page_count, aa, cli->cl_brw_in_flight);
+ CDEBUG(D_INODE, "req %p: %d pages, aa %p. now %dr/%dw in flight\n",
+ request, page_count, aa, cli->cl_r_in_flight,
+ cli->cl_w_in_flight);
+ oap->oap_request = ptlrpc_request_addref(request);
request->rq_interpret_reply = brw_interpret_oap;
ptlrpcd_add_req(request);
RETURN(1);
if (lop->lop_num_pending == 0)
RETURN(0);
+ /* if we have an invalid import we want to drain the queued pages
+ * by forcing them through rpcs that immediately fail and complete
+ * the pages. recovery relies on this to empty the queued pages
+ * before canceling the locks and evicting down the llite pages */
+ if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
+ RETURN(1);
+
/* stream rpcs in queue order as long as as there is an urgent page
* queued. this is our cheap solution for good batching in the case
* where writepage marks some random page in the middle of the file as
* that are being queued but which can't be made ready until
* the queuer finishes with the page. this is a wart for
* llite::commit_write() */
- optimal *= 2;
+ optimal += 16;
}
if (lop->lop_num_pending >= optimal)
RETURN(1);
RETURN(0);
}
-static void on_list(struct list_head *item, struct list_head *list,
+static void on_list(struct list_head *item, struct list_head *list,
int should_be_on)
{
if (list_empty(item) && should_be_on)
* can find pages to build into rpcs quickly */
static void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi)
{
- on_list(&loi->loi_cli_item, &cli->cl_loi_ready_list,
+ on_list(&loi->loi_cli_item, &cli->cl_loi_ready_list,
lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE) ||
lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ));
- on_list(&loi->loi_write_item, &cli->cl_loi_write_list,
+ on_list(&loi->loi_write_item, &cli->cl_loi_write_list,
loi->loi_write_lop.lop_num_pending);
+
+ on_list(&loi->loi_read_item, &cli->cl_loi_read_list,
+ loi->loi_read_lop.lop_num_pending);
}
-#define LOI_DEBUG(LOI, STR, args...) \
- CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR, \
- !list_empty(&(LOI)->loi_cli_item), \
+#define LOI_DEBUG(LOI, STR, args...) \
+ CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR, \
+ !list_empty(&(LOI)->loi_cli_item), \
(LOI)->loi_write_lop.lop_num_pending, \
- !list_empty(&(LOI)->loi_write_lop.lop_urgent), \
+ !list_empty(&(LOI)->loi_write_lop.lop_urgent), \
(LOI)->loi_read_lop.lop_num_pending, \
- !list_empty(&(LOI)->loi_read_lop.lop_urgent), \
- args) \
+ !list_empty(&(LOI)->loi_read_lop.lop_urgent), \
+ args) \
struct lov_oinfo *osc_next_loi(struct client_obd *cli)
{
ENTRY;
/* first return all objects which we already know to have
- * pages ready to be stuffed into rpcs */
+ * pages ready to be stuffed into rpcs */
if (!list_empty(&cli->cl_loi_ready_list))
- RETURN(list_entry(cli->cl_loi_ready_list.next,
+ RETURN(list_entry(cli->cl_loi_ready_list.next,
struct lov_oinfo, loi_cli_item));
-
- /* then if we have cache waiters, return all objects with queued
+
+ /* then if we have cache waiters, return all objects with queued
* writes. This is especially important when many small files
* have filled up the cache and not been fired into rpcs because
* they don't pass the nr_pending/object threshhold */
if (!list_empty(&cli->cl_cache_waiters) &&
!list_empty(&cli->cl_loi_write_list))
- RETURN(list_entry(cli->cl_loi_write_list.next,
+ RETURN(list_entry(cli->cl_loi_write_list.next,
struct lov_oinfo, loi_write_item));
+
+ /* then return all queued objects when we have an invalid import
+ * so that they get flushed */
+ if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
+ if (!list_empty(&cli->cl_loi_write_list))
+ RETURN(list_entry(cli->cl_loi_write_list.next,
+ struct lov_oinfo, loi_write_item));
+ if (!list_empty(&cli->cl_loi_read_list))
+ RETURN(list_entry(cli->cl_loi_read_list.next,
+ struct lov_oinfo, loi_read_item));
+ }
RETURN(NULL);
}
ENTRY;
while ((loi = osc_next_loi(cli)) != NULL) {
-
- LOI_DEBUG(loi, "%d in flight", cli->cl_brw_in_flight);
-
- if (cli->cl_brw_in_flight >= cli->cl_max_rpcs_in_flight)
+ LOI_DEBUG(loi, "%lu in flight\n", rpcs_in_flight(cli));
+
+ if (rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight)
break;
/* attempt some read/write balancing by alternating between
list_del_init(&loi->loi_cli_item);
if (!list_empty(&loi->loi_write_item))
list_del_init(&loi->loi_write_item);
+ if (!list_empty(&loi->loi_read_item))
+ list_del_init(&loi->loi_read_item);
loi_list_maint(cli, loi);
/* we're trying to queue a page in the osc so we're subject to the
* 'cl_dirty_max' limit on the number of pages that can be queued in the osc.
* If the osc's queued pages are already at that limit, then we want to sleep
- * until there is space in the osc's queue for us. we need this goofy
- * little struct to really tell that our allocation was fulfilled in
- * the presence of pending signals */
-struct osc_cache_waiter {
- struct list_head ocw_entry;
- wait_queue_head_t ocw_waitq;
-};
+ * until there is space in the osc's queue for us. We also may be waiting for
+ * write credits from the OST if there are RPCs in flight that may return some
+ * before we fall back to sync writes.
+ *
+ * We need this know our allocation was granted in the presence of signals */
static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
{
int rc;
ENTRY;
spin_lock(&cli->cl_loi_list_lock);
- rc = list_empty(&ocw->ocw_entry);
+ rc = list_empty(&ocw->ocw_entry) || rpcs_in_flight(cli) == 0;
spin_unlock(&cli->cl_loi_list_lock);
RETURN(rc);
};
+
+/* Caller must hold loi_list_lock - we drop/regain it if we need to wait for
+ * grant or cache space. */
static int osc_enter_cache(struct client_obd *cli, struct lov_oinfo *loi,
struct osc_async_page *oap)
{
struct osc_cache_waiter ocw;
- struct l_wait_info lwi = {0};
- int rc = 0;
- ENTRY;
+ struct l_wait_info lwi = { 0 };
+ struct timeval start, stop;
+
+ CDEBUG(D_CACHE, "dirty: %ld dirty_max: %ld dropped: %lu grant: %lu\n",
+ cli->cl_dirty, cli->cl_dirty_max, cli->cl_lost_grant,
+ cli->cl_avail_grant);
- /* XXX check for ost grants here as well.. for now we ignore them. */
if (cli->cl_dirty_max < PAGE_SIZE)
- RETURN(-EDQUOT);
+ return(-EDQUOT);
- /* if we fail this test then cl_dirty contains at least one page
- * that will have to be completed after we release the lock */
- if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max) {
+ /* Hopefully normal case - cache space and write credits available */
+ if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
+ cli->cl_avail_grant >= PAGE_SIZE) {
/* account for ourselves */
- cli->cl_dirty += PAGE_SIZE;
- GOTO(out, rc = 0);
+ osc_consume_write_grant(cli, oap);
+ return(0);
}
- init_waitqueue_head(&ocw.ocw_waitq);
- list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
+ /* Make sure that there are write rpcs in flight to wait for. This
+ * is a little silly as this object may not have any pending but
+ * other objects sure might. */
+ if (cli->cl_w_in_flight) {
+ list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
+ init_waitqueue_head(&ocw.ocw_waitq);
+ ocw.ocw_oap = oap;
+ ocw.ocw_rc = 0;
- /* make sure that there are write rpcs in flight to wait for. this
- * is a little silly as this object may not have any pending
- * but other objects sure might. this should probably be cleaned. */
- loi_list_maint(cli, loi);
- osc_check_rpcs(cli);
- spin_unlock(&cli->cl_loi_list_lock);
-
- CDEBUG(D_INODE, "sleeping for cache space\n");
- l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
+ loi_list_maint(cli, loi);
+ osc_check_rpcs(cli);
+ spin_unlock(&cli->cl_loi_list_lock);
- spin_lock(&cli->cl_loi_list_lock);
- if (!list_empty(&ocw.ocw_entry)) {
- rc = -EINTR;
- list_del(&ocw.ocw_entry);
+ CDEBUG(0, "sleeping for cache space\n");
+ do_gettimeofday(&start);
+ l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
+ do_gettimeofday(&stop);
+ spin_lock(&cli->cl_loi_list_lock);
+ lprocfs_stime_record(&cli->cl_enter_stime, &stop, &start);
+ if (!list_empty(&ocw.ocw_entry)) {
+ list_del(&ocw.ocw_entry);
+ RETURN(-EINTR);
+ }
+ RETURN(ocw.ocw_rc);
}
- GOTO(out, rc);
-out:
- if (rc == 0)
- oap->oap_brw_flags |= OBD_BRW_FROM_GRANT;
- return rc;
+
+ RETURN(-EDQUOT);
}
-/* the companion to enter_cache, called when an oap is now longer part of the
+/* the companion to enter_cache, called when an oap is no longer part of the
* dirty accounting.. so writeback completes or truncate happens before writing
* starts. must be called with the loi lock held. */
-static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap)
+static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
+ int sent)
{
- struct osc_cache_waiter *ocw;
ENTRY;
if (!(oap->oap_brw_flags & OBD_BRW_FROM_GRANT)) {
return;
}
- if (list_empty(&cli->cl_cache_waiters)) {
- cli->cl_dirty -= PAGE_SIZE;
- } else {
- ocw = list_entry(cli->cl_cache_waiters.next,
- struct osc_cache_waiter, ocw_entry);
- list_del_init(&ocw->ocw_entry);
- wake_up(&ocw->ocw_waitq);
+ oap->oap_brw_flags &= ~OBD_BRW_FROM_GRANT;
+ cli->cl_dirty -= PAGE_SIZE;
+ if (!sent) {
+ cli->cl_lost_grant += PAGE_SIZE;
+ CDEBUG(D_CACHE, "lost grant: %lu avail grant: %lu dirty: %lu\n",
+ cli->cl_lost_grant, cli->cl_avail_grant, cli->cl_dirty);
}
- oap->oap_brw_flags &= ~OBD_BRW_FROM_GRANT;
EXIT;
}
return -ENOMEM;
oap->oap_magic = OAP_MAGIC;
+ oap->oap_cli = &exp->exp_obd->u.cli;
+ oap->oap_loi = loi;
+
oap->oap_caller_ops = ops;
oap->oap_caller_data = data;
INIT_LIST_HEAD(&oap->oap_urgent_item);
INIT_LIST_HEAD(&oap->oap_rpc_item);
+ oap->oap_occ.occ_interrupted = osc_occ_interrupted;
+
CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset);
*res = oap;
RETURN(0);
}
-struct osc_async_page *oap_from_cookie(void *cookie)
-{
- struct osc_async_page *oap = cookie;
- if (oap->oap_magic != OAP_MAGIC)
- return ERR_PTR(-EINVAL);
- return oap;
-};
-
static int osc_queue_async_io(struct obd_export *exp, struct lov_stripe_md *lsm,
struct lov_oinfo *loi, void *cookie,
int cmd, obd_off off, int count,
- obd_flag brw_flags, enum async_flags async_flags)
+ obd_flags brw_flags, enum async_flags async_flags)
{
struct client_obd *cli = &exp->exp_obd->u.cli;
struct osc_async_page *oap;
int rc;
ENTRY;
- oap = oap_from_cookie(cookie);
- if (IS_ERR(oap))
- RETURN(PTR_ERR(oap));
+ oap = OAP_FROM_COOKIE(cookie);
if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
RETURN(-EIO);
static int osc_set_async_flags(struct obd_export *exp,
struct lov_stripe_md *lsm,
struct lov_oinfo *loi, void *cookie,
- obd_flag async_flags)
+ obd_flags async_flags)
{
struct client_obd *cli = &exp->exp_obd->u.cli;
struct loi_oap_pages *lop;
int rc = 0;
ENTRY;
- oap = oap_from_cookie(cookie);
- if (IS_ERR(oap))
- RETURN(PTR_ERR(oap));
+ oap = OAP_FROM_COOKIE(cookie);
if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
RETURN(-EIO);
RETURN(rc);
}
-static int osc_queue_sync_io(struct obd_export *exp, struct lov_stripe_md *lsm,
+static int osc_queue_group_io(struct obd_export *exp, struct lov_stripe_md *lsm,
struct lov_oinfo *loi,
- struct obd_sync_io_container *osic, void *cookie,
+ struct obd_io_group *oig, void *cookie,
int cmd, obd_off off, int count,
- obd_flag brw_flags)
+ obd_flags brw_flags,
+ obd_flags async_flags)
{
struct client_obd *cli = &exp->exp_obd->u.cli;
struct osc_async_page *oap;
struct loi_oap_pages *lop;
ENTRY;
- oap = oap_from_cookie(cookie);
- if (IS_ERR(oap))
- RETURN(PTR_ERR(oap));
+ oap = OAP_FROM_COOKIE(cookie);
if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
RETURN(-EIO);
oap->oap_page_off = off;
oap->oap_count = count;
oap->oap_brw_flags = brw_flags;
+ oap->oap_async_flags = async_flags;
if (cmd == OBD_BRW_WRITE)
lop = &loi->loi_write_lop;
else
lop = &loi->loi_read_lop;
- list_add_tail(&oap->oap_pending_item, &lop->lop_pending_sync);
- oap->oap_osic = osic;
- osic_add_one(osic);
+ list_add_tail(&oap->oap_pending_item, &lop->lop_pending_group);
+ if (oap->oap_async_flags & ASYNC_GROUP_SYNC) {
+ oap->oap_oig = oig;
+ oig_add_one(oig, &oap->oap_occ);
+ }
- LOI_DEBUG(loi, "oap %p page %p on sync pending\n", oap, oap->oap_page);
+ LOI_DEBUG(loi, "oap %p page %p on group pending\n", oap, oap->oap_page);
spin_unlock(&cli->cl_loi_list_lock);
RETURN(0);
}
-static void osc_sync_to_pending(struct client_obd *cli, struct lov_oinfo *loi,
- struct loi_oap_pages *lop, int cmd)
+static void osc_group_to_pending(struct client_obd *cli, struct lov_oinfo *loi,
+ struct loi_oap_pages *lop, int cmd)
{
struct list_head *pos, *tmp;
struct osc_async_page *oap;
- list_for_each_safe(pos, tmp, &lop->lop_pending_sync) {
+ list_for_each_safe(pos, tmp, &lop->lop_pending_group) {
oap = list_entry(pos, struct osc_async_page, oap_pending_item);
list_del(&oap->oap_pending_item);
- oap->oap_async_flags |= ASYNC_READY | ASYNC_URGENT |
- ASYNC_COUNT_STABLE;
list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
list_add(&oap->oap_urgent_item, &lop->lop_urgent);
lop_update_pending(cli, lop, cmd, 1);
loi_list_maint(cli, loi);
}
-static int osc_trigger_sync_io(struct obd_export *exp,
- struct lov_stripe_md *lsm,
- struct lov_oinfo *loi,
- struct obd_sync_io_container *osic)
+static int osc_trigger_group_io(struct obd_export *exp,
+ struct lov_stripe_md *lsm,
+ struct lov_oinfo *loi,
+ struct obd_io_group *oig)
{
struct client_obd *cli = &exp->exp_obd->u.cli;
ENTRY;
- if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
- RETURN(-EIO);
-
if (loi == NULL)
loi = &lsm->lsm_oinfo[0];
spin_lock(&cli->cl_loi_list_lock);
- osc_sync_to_pending(cli, loi, &loi->loi_write_lop, OBD_BRW_WRITE);
- osc_sync_to_pending(cli, loi, &loi->loi_read_lop, OBD_BRW_READ);
+ osc_group_to_pending(cli, loi, &loi->loi_write_lop, OBD_BRW_WRITE);
+ osc_group_to_pending(cli, loi, &loi->loi_read_lop, OBD_BRW_READ);
osc_check_rpcs(cli);
spin_unlock(&cli->cl_loi_list_lock);
int rc = 0;
ENTRY;
- oap = oap_from_cookie(cookie);
- if (IS_ERR(oap))
- RETURN(PTR_ERR(oap));
+ oap = OAP_FROM_COOKIE(cookie);
if (loi == NULL)
loi = &lsm->lsm_oinfo[0];
spin_lock(&cli->cl_loi_list_lock);
- osc_exit_cache(cli, oap);
-
if (!list_empty(&oap->oap_rpc_item))
GOTO(out, rc = -EBUSY);
+ osc_exit_cache(cli, oap, 0);
+ osc_wake_cache_waiters(cli);
+
if (!list_empty(&oap->oap_urgent_item)) {
list_del_init(&oap->oap_urgent_item);
oap->oap_async_flags &= ~ASYNC_URGENT;
LOI_DEBUG(loi, "oap %p page %p torn down\n", oap, oap->oap_page);
out:
spin_unlock(&cli->cl_loi_list_lock);
- OBD_FREE(oap, sizeof(*oap));
+ if (rc == 0)
+ OBD_FREE(oap, sizeof(*oap));
RETURN(rc);
}
size[1] = sizeof(struct obd_ioobj);
size[2] = page_count * sizeof(*nioptr);
- request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_SAN_READ, 3,
- size, NULL);
+ request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
+ OST_SAN_READ, 3, size, NULL);
if (!request)
RETURN(-ENOMEM);
for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
LASSERT(PageLocked(pga[mapped].pg));
- LASSERT(mapped == 0 || pga[mapped].off > pga[mapped - 1].off);
+ LASSERT(mapped == 0 ||
+ pga[mapped].disk_offset > pga[mapped - 1].disk_offset);
- nioptr->offset = pga[mapped].off;
+ nioptr->offset = pga[mapped].disk_offset;
nioptr->len = pga[mapped].count;
nioptr->flags = pga[mapped].flag;
}
size[1] = sizeof(struct obd_ioobj);
size[2] = page_count * sizeof(*nioptr);
- request = ptlrpc_prep_req(class_exp2cliimp(exp), OST_SAN_WRITE,
- 3, size, NULL);
+ request = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
+ OST_SAN_WRITE, 3, size, NULL);
if (!request)
RETURN(-ENOMEM);
/* pack request */
for (mapped = 0; mapped < page_count; mapped++, nioptr++) {
LASSERT(PageLocked(pga[mapped].pg));
- LASSERT(mapped == 0 || pga[mapped].off > pga[mapped - 1].off);
+ LASSERT(mapped == 0 ||
+ pga[mapped].disk_offset > pga[mapped - 1].disk_offset);
- nioptr->offset = pga[mapped].off;
+ nioptr->offset = pga[mapped].disk_offset;
nioptr->len = pga[mapped].count;
nioptr->flags = pga[mapped].flag;
}
obd_count pages_per_brw;
int rc;
- if (page_count > OSC_BRW_MAX_IOV)
- pages_per_brw = OSC_BRW_MAX_IOV;
+ if (page_count > PTLRPC_MAX_BRW_PAGES)
+ pages_per_brw = PTLRPC_MAX_BRW_PAGES;
else
pages_per_brw = page_count;
{
struct ldlm_lock *lock = ldlm_handle2lock(lockh);
- LASSERT(lock != NULL);
+ if (lock == NULL) {
+ CERROR("lockh %p, data %p - client evicted?\n", lockh, data);
+ return;
+ }
+
l_lock(&lock->l_resource->lr_namespace->ns_lock);
#ifdef __KERNEL__
if (lock->l_ast_data && lock->l_ast_data != data) {
struct inode *new_inode = data;
struct inode *old_inode = lock->l_ast_data;
- unsigned long state = old_inode->i_state & I_FREEING;
- CERROR("Found existing inode %p/%lu/%u state %lu in lock: "
- "setting data to %p/%lu/%u\n", old_inode,
- old_inode->i_ino, old_inode->i_generation, state,
- new_inode, new_inode->i_ino, new_inode->i_generation);
- LASSERT(state);
+ if (!(old_inode->i_state & I_FREEING))
+ LDLM_ERROR(lock, "inconsistent l_ast_data found");
+ LASSERTF(old_inode->i_state & I_FREEING,
+ "Found existing inode %p/%lu/%u state %lu in lock: "
+ "setting data to %p/%lu/%u\n", old_inode,
+ old_inode->i_ino, old_inode->i_generation,
+ old_inode->i_state,
+ new_inode, new_inode->i_ino, new_inode->i_generation);
}
#endif
lock->l_ast_data = data;
static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
ldlm_iterator_t replace, void *data)
{
- struct ldlm_res_id res_id = { .name = {lsm->lsm_object_id} };
+ struct ldlm_res_id res_id = { .name = {0} };
struct obd_device *obd = class_exp2obd(exp);
+ res_id.name[0] = lsm->lsm_object_id;
+ res_id.name[2] = lsm->lsm_object_gr;
ldlm_change_cbdata(obd->obd_namespace, &res_id, replace, data);
return 0;
}
static int osc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
- struct lustre_handle *parent_lock,
- __u32 type, void *extentp, int extent_len, __u32 mode,
- int *flags, void *callback, void *data,
+ __u32 type, ldlm_policy_data_t *policy, __u32 mode,
+ int *flags, void *bl_cb, void *cp_cb, void *gl_cb,
+ void *data, __u32 lvb_len, void *lvb_swabber,
struct lustre_handle *lockh)
{
- struct ldlm_res_id res_id = { .name = {lsm->lsm_object_id} };
struct obd_device *obd = exp->exp_obd;
- struct ldlm_extent *extent = extentp;
+ struct ldlm_res_id res_id = { .name = {0} };
+ struct ost_lvb lvb;
+ struct ldlm_reply *rep;
+ struct ptlrpc_request *req = NULL;
int rc;
ENTRY;
+ res_id.name[0] = lsm->lsm_object_id;
+ res_id.name[2] = lsm->lsm_object_gr;
+
/* Filesystem lock extents are extended to page boundaries so that
* dealing with the page cache is a little smoother. */
- extent->start -= extent->start & ~PAGE_MASK;
- extent->end |= ~PAGE_MASK;
+ policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
+ policy->l_extent.end |= ~PAGE_MASK;
+
+ if (lsm->lsm_oinfo->loi_kms_valid == 0)
+ goto no_match;
/* Next, search for already existing extent locks that will cover us */
- rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id,
- type, extent, sizeof(*extent), mode, lockh);
+ rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type, policy, mode,
+ lockh);
if (rc == 1) {
+ if (ptlrpcs_check_cred(obd->u.cli.cl_import)) {
+ /* return immediately if no credential held */
+ ldlm_lock_decref(lockh, mode);
+ RETURN(-EACCES);
+ }
+
osc_set_data_with_check(lockh, data);
+ if (*flags & LDLM_FL_HAS_INTENT) {
+ /* I would like to be able to ASSERT here that rss <=
+ * kms, but I can't, for reasons which are explained in
+ * lov_enqueue() */
+ }
/* We already have a lock, and it's referenced */
RETURN(ELDLM_OK);
}
if (mode == LCK_PR) {
rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type,
- extent, sizeof(*extent), LCK_PW, lockh);
+ policy, LCK_PW, lockh);
if (rc == 1) {
+ if (ptlrpcs_check_cred(obd->u.cli.cl_import)) {
+ /* return immediately if no credential held */
+ ldlm_lock_decref(lockh, LCK_PW);
+ RETURN(-EACCES);
+ }
+
/* FIXME: This is not incredibly elegant, but it might
* be more elegant than adding another parameter to
* lock_match. I want a second opinion. */
RETURN(ELDLM_OK);
}
}
+ if (mode == LCK_PW) {
+ rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type,
+ policy, LCK_PR, lockh);
+ if (rc == 1) {
+ rc = ldlm_cli_convert(lockh, mode, flags);
+ if (!rc) {
+ /* Update readers/writers accounting */
+ ldlm_lock_addref(lockh, LCK_PW);
+ ldlm_lock_decref(lockh, LCK_PR);
+ osc_set_data_with_check(lockh, data);
+ RETURN(ELDLM_OK);
+ }
+ /* If the conversion failed, we need to drop refcount
+ on matched lock before we get new one */
+ /* XXX Won't it save us some efforts if we cancel PR
+ lock here? We are going to take PW lock anyway and it
+ will invalidate PR lock */
+ ldlm_lock_decref(lockh, LCK_PR);
+ if (rc != EDEADLOCK) {
+ RETURN(rc);
+ }
+ }
+ }
+
+ if (mode == LCK_PW) {
+ rc = ldlm_lock_match(obd->obd_namespace, 0, &res_id, type,
+ policy, LCK_PR, lockh);
+ if (rc == 1) {
+ rc = ldlm_cli_convert(lockh, mode, flags);
+ if (!rc) {
+ /* Update readers/writers accounting */
+ ldlm_lock_addref(lockh, LCK_PW);
+ ldlm_lock_decref(lockh, LCK_PR);
+ osc_set_data_with_check(lockh, data);
+ RETURN(ELDLM_OK);
+ }
+ /* If the conversion failed, we need to drop refcount
+ on matched lock before we get new one */
+ /* XXX Won't it save us some efforts if we cancel PR
+ lock here? We are going to take PW lock anyway and it
+ will invalidate PR lock */
+ ldlm_lock_decref(lockh, LCK_PR);
+ if (rc != EDEADLOCK) {
+ RETURN(rc);
+ }
+ }
+ }
+
+ no_match:
+ if (*flags & LDLM_FL_HAS_INTENT) {
+ int size[2] = {0, sizeof(struct ldlm_request)};
+
+ req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_DLM_VERSION,
+ LDLM_ENQUEUE, 2, size, NULL);
+ if (req == NULL)
+ RETURN(-ENOMEM);
+
+ size[0] = sizeof(*rep);
+ size[1] = sizeof(lvb);
+ req->rq_replen = lustre_msg_size(2, size);
+ }
+ rc = ldlm_cli_enqueue(exp, req, obd->obd_namespace, res_id, type,
+ policy, mode, flags, bl_cb, cp_cb, gl_cb, data,
+ &lvb, sizeof(lvb), lustre_swab_ost_lvb, lockh);
+ if (req != NULL) {
+ if (rc == ELDLM_LOCK_ABORTED) {
+ /* swabbed by ldlm_cli_enqueue() */
+ LASSERT_REPSWABBED(req, 0);
+ rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*rep));
+ LASSERT(rep != NULL);
+ if (rep->lock_policy_res1)
+ rc = rep->lock_policy_res1;
+ }
+ ptlrpc_req_finished(req);
+ }
+
+ if ((*flags & LDLM_FL_HAS_INTENT && rc == ELDLM_LOCK_ABORTED) || !rc) {
+ CDEBUG(D_INODE, "received kms == "LPU64", blocks == "LPU64"\n",
+ lvb.lvb_size, lvb.lvb_blocks);
+ lsm->lsm_oinfo->loi_rss = lvb.lvb_size;
+ lsm->lsm_oinfo->loi_blocks = lvb.lvb_blocks;
+ }
- rc = ldlm_cli_enqueue(exp, NULL, obd->obd_namespace, parent_lock,
- res_id, type, extent, sizeof(*extent), mode,
- flags,ldlm_completion_ast, callback, data, lockh);
RETURN(rc);
}
static int osc_match(struct obd_export *exp, struct lov_stripe_md *lsm,
- __u32 type, void *extentp, int extent_len, __u32 mode,
+ __u32 type, ldlm_policy_data_t *policy, __u32 mode,
int *flags, void *data, struct lustre_handle *lockh)
{
- struct ldlm_res_id res_id = { .name = {lsm->lsm_object_id} };
+ struct ldlm_res_id res_id = { .name = {0} };
struct obd_device *obd = exp->exp_obd;
- struct ldlm_extent *extent = extentp;
int rc;
ENTRY;
+ res_id.name[0] = lsm->lsm_object_id;
+ res_id.name[2] = lsm->lsm_object_gr;
+
OBD_FAIL_RETURN(OBD_FAIL_OSC_MATCH, -EIO);
/* Filesystem lock extents are extended to page boundaries so that
* dealing with the page cache is a little smoother */
- extent->start -= extent->start & ~PAGE_MASK;
- extent->end |= ~PAGE_MASK;
+ policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
+ policy->l_extent.end |= ~PAGE_MASK;
/* Next, search for already existing extent locks that will cover us */
rc = ldlm_lock_match(obd->obd_namespace, *flags, &res_id, type,
- extent, sizeof(*extent), mode, lockh);
+ policy, mode, lockh);
if (rc) {
- osc_set_data_with_check(lockh, data);
+ // if (!(*flags & LDLM_FL_TEST_LOCK))
+ osc_set_data_with_check(lockh, data);
RETURN(rc);
}
/* If we're trying to read, we also search for an existing PW lock. The
* writers can share a single PW lock. */
if (mode == LCK_PR) {
rc = ldlm_lock_match(obd->obd_namespace, *flags, &res_id, type,
- extent, sizeof(*extent), LCK_PW, lockh);
- if (rc == 1) {
+ policy, LCK_PW, lockh);
+ if (rc == 1 && !(*flags & LDLM_FL_TEST_LOCK)) {
/* FIXME: This is not incredibly elegant, but it might
* be more elegant than adding another parameter to
* lock_match. I want a second opinion. */
{
ENTRY;
- ldlm_lock_decref(lockh, mode);
+ if (mode == LCK_GROUP)
+ ldlm_lock_decref_and_cancel(lockh, mode);
+ else
+ ldlm_lock_decref(lockh, mode);
RETURN(0);
}
static int osc_cancel_unused(struct obd_export *exp,
- struct lov_stripe_md *lsm, int flags, void *opaque)
+ struct lov_stripe_md *lsm,
+ int flags, void *opaque)
{
struct obd_device *obd = class_exp2obd(exp);
- struct ldlm_res_id res_id = { .name = {lsm->lsm_object_id} };
+ struct ldlm_res_id res_id = { .name = {0} }, *resp = NULL;
+
+ if (lsm != NULL) {
+ res_id.name[0] = lsm->lsm_object_id;
+ res_id.name[2] = lsm->lsm_object_gr;
+ resp = &res_id;
+ }
- return ldlm_cli_cancel_unused(obd->obd_namespace, &res_id, flags,
- opaque);
+ return ldlm_cli_cancel_unused(obd->obd_namespace, resp, flags, opaque);
}
static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
* during mount that would help a bit). Having relative timestamps
* is not so great if request processing is slow, while absolute
* timestamps are not ideal because they need time synchronization. */
- request = ptlrpc_prep_req(obd->u.cli.cl_import, OST_STATFS,0,NULL,NULL);
+ request = ptlrpc_prep_req(obd->u.cli.cl_import, LUSTRE_OBD_VERSION,
+ OST_STATFS, 0, NULL, NULL);
if (!request)
RETURN(-ENOMEM);
*/
static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
{
- struct lov_user_md lum;
- struct lov_mds_md *lmmk;
- int rc, lmm_size;
+ struct lov_user_md lum, *lumk;
+ int rc, lum_size;
ENTRY;
if (!lsm)
if (lum.lmm_magic != LOV_USER_MAGIC)
RETURN(-EINVAL);
- if (lum.lmm_stripe_count < 1)
- RETURN(-EOVERFLOW);
+ if (lum.lmm_stripe_count > 0) {
+ lum_size = sizeof(lum) + sizeof(lum.lmm_objects[0]);
+ OBD_ALLOC(lumk, lum_size);
+ if (!lumk)
+ RETURN(-ENOMEM);
- lmm_size = sizeof(lum) + sizeof(lum.lmm_objects[0]);
- OBD_ALLOC(lmmk, lmm_size);
- if (!lmmk)
- RETURN(-ENOMEM);
+ lumk->lmm_objects[0].l_object_id = lsm->lsm_object_id;
+ lumk->lmm_objects[0].l_object_gr = lsm->lsm_object_gr;
+ } else {
+ lum_size = sizeof(lum);
+ lumk = &lum;
+ }
- lmmk->lmm_stripe_count = 1;
- lmmk->lmm_object_id = lsm->lsm_object_id;
- lmmk->lmm_objects[0].l_object_id = lsm->lsm_object_id;
+ lumk->lmm_object_id = lsm->lsm_object_id;
+ lumk->lmm_object_gr = lsm->lsm_object_gr;
+ lumk->lmm_stripe_count = 1;
- if (copy_to_user(lump, lmmk, lmm_size))
+ if (copy_to_user(lump, lumk, lum_size))
rc = -EFAULT;
- OBD_FREE(lmmk, lmm_size);
+ if (lumk != &lum)
+ OBD_FREE(lumk, lum_size);
RETURN(rc);
}
int err = 0;
ENTRY;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
+ MOD_INC_USE_COUNT;
+#else
+ if (!try_module_get(THIS_MODULE)) {
+ CERROR("Can't get module. Is it alive?");
+ return -EINVAL;
+ }
+#endif
switch (cmd) {
case OBD_IOC_LOV_GET_CONFIG: {
char *buf;
GOTO(out, err = -EINVAL);
}
+ if (data->ioc_inllen3 < sizeof(__u32)) {
+ OBD_FREE(buf, len);
+ GOTO(out, err = -EINVAL);
+ }
+
desc = (struct lov_desc *)data->ioc_inlbuf1;
desc->ld_tgt_count = 1;
desc->ld_active_tgt_count = 1;
desc->ld_default_stripe_offset = 0;
desc->ld_pattern = 0;
memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
-
memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
+ *((__u32 *)data->ioc_inlbuf3) = 1;
err = copy_to_user((void *)uarg, buf, len);
if (err)
err = ptlrpc_set_import_active(obd->u.cli.cl_import,
data->ioc_offset);
GOTO(out, err);
+ case IOC_OSC_CTL_RECOVERY:
+ err = ptlrpc_import_control_recovery(obd->u.cli.cl_import,
+ data->ioc_offset);
+ GOTO(out, err);
default:
CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n", cmd, current->comm);
GOTO(out, err = -ENOTTY);
}
out:
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
+ MOD_DEC_USE_COUNT;
+#else
+ module_put(THIS_MODULE);
+#endif
return err;
}
-static int osc_get_info(struct obd_export *exp, obd_count keylen,
+static int osc_get_info(struct obd_export *exp, __u32 keylen,
void *key, __u32 *vallen, void *val)
{
ENTRY;
obd_id *reply;
char *bufs[1] = {key};
int rc;
- req = ptlrpc_prep_req(class_exp2cliimp(exp), OST_GET_INFO, 1,
- &keylen, bufs);
+ req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_OBD_VERSION,
+ OST_GET_INFO, 1, (int *)&keylen, bufs);
if (req == NULL)
RETURN(-ENOMEM);
- req->rq_replen = lustre_msg_size(1, vallen);
+ req->rq_replen = lustre_msg_size(1, (int *)vallen);
rc = ptlrpc_queue_wait(req);
if (rc)
GOTO(out, rc);
ptlrpc_req_finished(req);
RETURN(rc);
}
- RETURN(-EINVAL);
+ RETURN(-EPROTO);
}
static int osc_set_info(struct obd_export *exp, obd_count keylen,
void *key, obd_count vallen, void *val)
{
- struct ptlrpc_request *req;
+ struct obd_device *obd = exp->exp_obd;
struct obd_import *imp = class_exp2cliimp(exp);
struct llog_ctxt *ctxt;
- int rc, size = keylen;
- char *bufs[1] = {key};
+ int rc = 0;
ENTRY;
- if (keylen == strlen("next_id") &&
- memcmp(key, "next_id", strlen("next_id")) == 0) {
- if (vallen != sizeof(obd_id))
+ if (keylen == strlen("unlinked") &&
+ memcmp(key, "unlinked", keylen) == 0) {
+ struct osc_creator *oscc = &obd->u.cli.cl_oscc;
+ spin_lock(&oscc->oscc_lock);
+ oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
+ spin_unlock(&oscc->oscc_lock);
+ RETURN(0);
+ }
+ if (keylen == strlen("unrecovery") &&
+ memcmp(key, "unrecovery", keylen) == 0) {
+ struct osc_creator *oscc = &obd->u.cli.cl_oscc;
+ spin_lock(&oscc->oscc_lock);
+ oscc->oscc_flags &= ~OSCC_FLAG_RECOVERING;
+ spin_unlock(&oscc->oscc_lock);
+ RETURN(0);
+ }
+ if (keylen == strlen("initial_recov") &&
+ memcmp(key, "initial_recov", strlen("initial_recov")) == 0) {
+ struct obd_import *imp = exp->exp_obd->u.cli.cl_import;
+ if (vallen != sizeof(int))
RETURN(-EINVAL);
- exp->u.eu_osc_data.oed_oscc.oscc_next_id = *((obd_id*)val) + 1;
- CDEBUG(D_INODE, "%s: set oscc_next_id = "LPU64"\n",
+ imp->imp_initial_recov = *(int *)val;
+ CDEBUG(D_HA, "%s: set imp_no_init_recov = %d\n",
exp->exp_obd->obd_name,
- exp->u.eu_osc_data.oed_oscc.oscc_next_id);
-
+ imp->imp_initial_recov);
RETURN(0);
}
- if (keylen == strlen("growth_count") &&
- memcmp(key, "growth_count", strlen("growth_count")) == 0) {
+ if (keylen == strlen("async") &&
+ memcmp(key, "async", keylen) == 0) {
+ struct client_obd *cl = &obd->u.cli;
if (vallen != sizeof(int))
RETURN(-EINVAL);
- exp->u.eu_osc_data.oed_oscc.oscc_grow_count = *((int*)val);
+ cl->cl_async = *(int *)val;
+ CDEBUG(D_HA, "%s: set async = %d\n",
+ obd->obd_name, cl->cl_async);
RETURN(0);
}
- if (keylen == strlen("unlinked") &&
- memcmp(key, "unlinked", keylen) == 0) {
- struct osc_creator *oscc = &exp->u.eu_osc_data.oed_oscc;
- spin_lock(&oscc->oscc_lock);
- oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
- spin_unlock(&oscc->oscc_lock);
- RETURN(0);
- }
+ if (keylen == strlen("sec") &&
+ memcmp(key, "sec", keylen) == 0) {
+ struct client_obd *cli = &exp->exp_obd->u.cli;
- if (keylen < strlen("mds_conn") ||
- memcmp(key, "mds_conn", strlen("mds_conn")) != 0)
+ if (vallen == strlen("null") &&
+ memcmp(val, "null", vallen) == 0) {
+ cli->cl_sec_flavor = PTLRPC_SEC_NULL;
+ cli->cl_sec_subflavor = 0;
+ RETURN(0);
+ }
+ if (vallen == strlen("krb5i") &&
+ memcmp(val, "krb5i", vallen) == 0) {
+ cli->cl_sec_flavor = PTLRPC_SEC_GSS;
+ cli->cl_sec_subflavor = PTLRPC_SEC_GSS_KRB5I;
+ RETURN(0);
+ }
+ if (vallen == strlen("krb5p") &&
+ memcmp(val, "krb5p", vallen) == 0) {
+ cli->cl_sec_flavor = PTLRPC_SEC_GSS;
+ cli->cl_sec_subflavor = PTLRPC_SEC_GSS_KRB5P;
+ RETURN(0);
+ }
+ CERROR("unrecognized security type %s\n", (char*) val);
RETURN(-EINVAL);
+ }
+ if (keylen == strlen("flush_cred") &&
+ memcmp(key, "flush_cred", keylen) == 0) {
+ struct client_obd *cli = &exp->exp_obd->u.cli;
- req = ptlrpc_prep_req(imp, OST_SET_INFO, 1, &size, bufs);
- if (req == NULL)
- RETURN(-ENOMEM);
+ if (cli->cl_import)
+ ptlrpcs_import_flush_creds(cli->cl_import,
+ *((uid_t *) val));
+ RETURN(0);
+ }
- req->rq_replen = lustre_msg_size(0, NULL);
- rc = ptlrpc_queue_wait(req);
- ptlrpc_req_finished(req);
+ if (keylen < strlen("mds_conn") ||
+ memcmp(key, "mds_conn", keylen) != 0)
+ RETURN(-EINVAL);
- ctxt = llog_get_context(exp->exp_obd, LLOG_UNLINK_ORIG_CTXT);
+ ctxt = llog_get_context(&exp->exp_obd->obd_llogs,
+ LLOG_UNLINK_ORIG_CTXT);
if (ctxt) {
- rc = llog_initiator_connect(ctxt);
- if (rc)
- RETURN(rc);
+ if (rc == 0)
+ rc = llog_initiator_connect(ctxt);
+ else
+ CERROR("cannot establish the connect for "
+ "ctxt %p: %d\n", ctxt, rc);
}
imp->imp_server_timeout = 1;
CDEBUG(D_HA, "pinging OST %s\n", imp->imp_target_uuid.uuid);
- ptlrpc_pinger_add_import(imp);
+ imp->imp_pingable = 1;
RETURN(rc);
}
};
static struct llog_operations osc_unlink_orig_logops;
-static int osc_llog_init(struct obd_device *obd, struct obd_device *tgt,
- int count, struct llog_logid *logid)
+
+static int osc_llog_init(struct obd_device *obd, struct obd_llogs *llogs,
+ struct obd_device *tgt, int count,
+ struct llog_catid *catid)
{
int rc;
ENTRY;
osc_unlink_orig_logops = llog_lvfs_ops;
osc_unlink_orig_logops.lop_setup = llog_obd_origin_setup;
- osc_unlink_orig_logops.lop_cleanup = llog_obd_origin_cleanup;
- osc_unlink_orig_logops.lop_add = llog_obd_origin_add;
+ osc_unlink_orig_logops.lop_cleanup = llog_catalog_cleanup;
+ osc_unlink_orig_logops.lop_add = llog_catalog_add;
osc_unlink_orig_logops.lop_connect = llog_origin_connect;
- rc = llog_setup(obd, LLOG_UNLINK_ORIG_CTXT, tgt, count, logid,
- &osc_unlink_orig_logops);
+ rc = obd_llog_setup(obd, llogs, LLOG_UNLINK_ORIG_CTXT, tgt, count,
+ &catid->lci_logid, &osc_unlink_orig_logops);
if (rc)
RETURN(rc);
- rc = llog_setup(obd, LLOG_SIZE_REPL_CTXT, tgt, count, NULL,
- &osc_size_repl_logops);
+ rc = obd_llog_setup(obd, llogs, LLOG_SIZE_REPL_CTXT, tgt, count, NULL,
+ &osc_size_repl_logops);
RETURN(rc);
}
-static int osc_llog_finish(struct obd_device *obd, int count)
+static int osc_llog_finish(struct obd_device *obd,
+ struct obd_llogs *llogs, int count)
{
int rc;
ENTRY;
- rc = llog_cleanup(llog_get_context(obd, LLOG_UNLINK_ORIG_CTXT));
+ rc = obd_llog_cleanup(llog_get_context(llogs, LLOG_UNLINK_ORIG_CTXT));
if (rc)
RETURN(rc);
- rc = llog_cleanup(llog_get_context(obd, LLOG_SIZE_REPL_CTXT));
+ rc = obd_llog_cleanup(llog_get_context(llogs, LLOG_SIZE_REPL_CTXT));
RETURN(rc);
}
-
static int osc_connect(struct lustre_handle *exph,
- struct obd_device *obd, struct obd_uuid *cluuid)
+ struct obd_device *obd, struct obd_uuid *cluuid,
+ struct obd_connect_data *data,
+ unsigned long connect_flags)
{
int rc;
- struct obd_export *exp;
-
- rc = client_connect_import(exph, obd, cluuid);
-
- if (obd->u.cli.cl_conn_count == 1) {
- exp = class_conn2export(exph);
- oscc_init(exp);
- }
-
- return rc;
+ ENTRY;
+ rc = client_connect_import(exph, obd, cluuid, data, connect_flags);
+ RETURN(rc);
}
-static int osc_disconnect(struct obd_export *exp, int flags)
+static int osc_disconnect(struct obd_export *exp, unsigned long flags)
{
struct obd_device *obd = class_exp2obd(exp);
- struct llog_ctxt *ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
+ struct llog_ctxt *ctxt;
int rc;
+ ENTRY;
- if (obd->u.cli.cl_conn_count == 1) {
+ ctxt = llog_get_context(&obd->obd_llogs, LLOG_SIZE_REPL_CTXT);
+ if (obd->u.cli.cl_conn_count == 1)
/* flush any remaining cancel messages out to the target */
llog_sync(ctxt, exp);
+
+ rc = client_disconnect_export(exp, flags);
+ RETURN(rc);
+}
+
+static int osc_import_event(struct obd_device *obd,
+ struct obd_import *imp,
+ enum obd_import_event event)
+{
+ struct client_obd *cli;
+ int rc = 0;
+
+ LASSERT(imp->imp_obd == obd);
+
+ switch (event) {
+ case IMP_EVENT_DISCON: {
+ /* Only do this on the MDS OSC's */
+ if (imp->imp_server_timeout) {
+ struct osc_creator *oscc = &obd->u.cli.cl_oscc;
+
+ spin_lock(&oscc->oscc_lock);
+ oscc->oscc_flags |= OSCC_FLAG_RECOVERING;
+ spin_unlock(&oscc->oscc_lock);
+ }
+ break;
+ }
+ case IMP_EVENT_INACTIVE: {
+ if (obd->obd_observer)
+ rc = obd_notify(obd->obd_observer, obd, 0, 0);
+ break;
+ }
+ case IMP_EVENT_INVALIDATE: {
+ struct ldlm_namespace *ns = obd->obd_namespace;
+
+ /* Reset grants */
+ cli = &obd->u.cli;
+ spin_lock(&cli->cl_loi_list_lock);
+ cli->cl_avail_grant = 0;
+ cli->cl_lost_grant = 0;
+ /* all pages go to failing rpcs due to the invalid import */
+ osc_check_rpcs(cli);
+ spin_unlock(&cli->cl_loi_list_lock);
- /* balance the conn2export for oscc in osc_connect */
- class_export_put(exp);
+ ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
+
+ break;
}
+ case IMP_EVENT_ACTIVE: {
+ /* Only do this on the MDS OSC's */
+ if (imp->imp_server_timeout) {
+ struct osc_creator *oscc = &obd->u.cli.cl_oscc;
- rc = client_disconnect_export(exp, flags);
- return rc;
+ spin_lock(&oscc->oscc_lock);
+ oscc->oscc_flags &= ~OSCC_FLAG_NOSPC;
+ spin_unlock(&oscc->oscc_lock);
+ }
+
+ if (obd->obd_observer)
+ rc = obd_notify(obd->obd_observer, obd, 1, 0);
+ break;
+ }
+ default:
+ CERROR("Unknown import event %d\n", event);
+ LBUG();
+ }
+ RETURN(rc);
}
-static int osc_lock_contains(struct obd_export *exp, struct lov_stripe_md *lsm,
- struct ldlm_lock *lock, obd_off offset)
+static int osc_attach(struct obd_device *dev, obd_count len, void *data)
{
+ struct lprocfs_static_vars lvars;
+ int rc;
ENTRY;
- if (exp == NULL)
- RETURN(-ENODEV);
- if (lock->l_policy_data.l_extent.start <= offset &&
- lock->l_policy_data.l_extent.end >= offset)
- RETURN(1);
+ lprocfs_init_vars(osc,&lvars);
+ rc = lprocfs_obd_attach(dev, lvars.obd_vars);
+ if (rc < 0)
+ RETURN(rc);
+
+ rc = lproc_osc_attach_seqstat(dev);
+ if (rc < 0) {
+ lprocfs_obd_detach(dev);
+ RETURN(rc);
+ }
+
+ ptlrpc_lprocfs_register_obd(dev);
RETURN(0);
}
-static int osc_invalidate_import(struct obd_device *obd,
- struct obd_import *imp)
+static int osc_detach(struct obd_device *dev)
{
- LASSERT(imp->imp_obd == obd);
- /* this used to try and tear down queued pages, but it was
- * not correctly implemented. We'll have to do it again once
- * we call obd_invalidate_import() agian */
- LBUG();
- RETURN(0);
+ ptlrpc_lprocfs_unregister_obd(dev);
+ return lprocfs_obd_detach(dev);
}
-int osc_setup(struct obd_device *obd, obd_count len, void *buf)
+static int osc_setup(struct obd_device *obd, obd_count len, void *buf)
{
int rc;
-
+ ENTRY;
rc = ptlrpcd_addref();
if (rc)
- return rc;
+ RETURN(rc);
rc = client_obd_setup(obd, len, buf);
if (rc)
ptlrpcd_decref();
+ else
+ oscc_init(obd);
+
RETURN(rc);
}
-int osc_cleanup(struct obd_device *obd, int flags)
+static int osc_cleanup(struct obd_device *obd, int flags)
{
+ struct osc_creator *oscc = &obd->u.cli.cl_oscc;
int rc;
+ rc = ldlm_cli_cancel_unused(obd->obd_namespace, NULL,
+ LDLM_FL_CONFIG_CHANGE, NULL);
+ if (rc)
+ RETURN(rc);
+
+ spin_lock(&oscc->oscc_lock);
+ oscc->oscc_flags &= ~OSCC_FLAG_RECOVERING;
+ oscc->oscc_flags |= OSCC_FLAG_EXITING;
+ spin_unlock(&oscc->oscc_lock);
+
rc = client_obd_cleanup(obd, flags);
ptlrpcd_decref();
RETURN(rc);
}
-
struct obd_ops osc_obd_ops = {
- o_owner: THIS_MODULE,
- o_attach: osc_attach,
- o_detach: osc_detach,
- o_setup: osc_setup,
- o_cleanup: osc_cleanup,
- o_connect: osc_connect,
- o_disconnect: osc_disconnect,
- o_statfs: osc_statfs,
- o_packmd: osc_packmd,
- o_unpackmd: osc_unpackmd,
- o_create: osc_create,
- o_destroy: osc_destroy,
- o_getattr: osc_getattr,
- o_getattr_async:osc_getattr_async,
- o_setattr: osc_setattr,
- o_brw: osc_brw,
- o_brw_async: osc_brw_async,
- .o_prep_async_page = osc_prep_async_page,
- .o_queue_async_io = osc_queue_async_io,
- .o_set_async_flags = osc_set_async_flags,
- .o_queue_sync_io = osc_queue_sync_io,
- .o_trigger_sync_io = osc_trigger_sync_io,
- .o_teardown_async_page = osc_teardown_async_page,
- o_punch: osc_punch,
- o_sync: osc_sync,
- o_enqueue: osc_enqueue,
- o_match: osc_match,
- o_change_cbdata:osc_change_cbdata,
- o_cancel: osc_cancel,
- o_cancel_unused:osc_cancel_unused,
- o_iocontrol: osc_iocontrol,
- o_get_info: osc_get_info,
- o_set_info: osc_set_info,
- o_lock_contains:osc_lock_contains,
- o_invalidate_import: osc_invalidate_import,
- o_llog_init: osc_llog_init,
- o_llog_finish: osc_llog_finish,
+ .o_owner = THIS_MODULE,
+ .o_attach = osc_attach,
+ .o_detach = osc_detach,
+ .o_setup = osc_setup,
+ .o_cleanup = osc_cleanup,
+ .o_add_conn = client_import_add_conn,
+ .o_del_conn = client_import_del_conn,
+ .o_connect = osc_connect,
+ .o_disconnect = osc_disconnect,
+ .o_statfs = osc_statfs,
+ .o_packmd = osc_packmd,
+ .o_unpackmd = osc_unpackmd,
+ .o_create = osc_create,
+ .o_destroy = osc_destroy,
+ .o_getattr = osc_getattr,
+ .o_getattr_async = osc_getattr_async,
+ .o_setattr = osc_setattr,
+ .o_brw = osc_brw,
+ .o_brw_async = osc_brw_async,
+ .o_prep_async_page = osc_prep_async_page,
+ .o_queue_async_io = osc_queue_async_io,
+ .o_set_async_flags = osc_set_async_flags,
+ .o_queue_group_io = osc_queue_group_io,
+ .o_trigger_group_io = osc_trigger_group_io,
+ .o_teardown_async_page = osc_teardown_async_page,
+ .o_punch = osc_punch,
+ .o_sync = osc_sync,
+ .o_enqueue = osc_enqueue,
+ .o_match = osc_match,
+ .o_change_cbdata = osc_change_cbdata,
+ .o_cancel = osc_cancel,
+ .o_cancel_unused = osc_cancel_unused,
+ .o_iocontrol = osc_iocontrol,
+ .o_get_info = osc_get_info,
+ .o_set_info = osc_set_info,
+ .o_import_event = osc_import_event,
+ .o_llog_init = osc_llog_init,
+ .o_llog_finish = osc_llog_finish,
};
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
struct obd_ops sanosc_obd_ops = {
- o_owner: THIS_MODULE,
- o_attach: osc_attach,
- o_detach: osc_detach,
- o_cleanup: client_obd_cleanup,
- o_connect: osc_connect,
- o_disconnect: client_disconnect_export,
- o_statfs: osc_statfs,
- o_packmd: osc_packmd,
- o_unpackmd: osc_unpackmd,
- o_create: osc_real_create,
- o_destroy: osc_destroy,
- o_getattr: osc_getattr,
- o_getattr_async:osc_getattr_async,
- o_setattr: osc_setattr,
- o_setup: client_sanobd_setup,
- o_brw: sanosc_brw,
- o_punch: osc_punch,
- o_sync: osc_sync,
- o_enqueue: osc_enqueue,
- o_match: osc_match,
- o_change_cbdata:osc_change_cbdata,
- o_cancel: osc_cancel,
- o_cancel_unused:osc_cancel_unused,
- o_iocontrol: osc_iocontrol,
- o_lock_contains:osc_lock_contains,
- o_invalidate_import: osc_invalidate_import,
- o_llog_init: osc_llog_init,
- o_llog_finish: osc_llog_finish,
+ .o_owner = THIS_MODULE,
+ .o_attach = osc_attach,
+ .o_detach = osc_detach,
+ .o_cleanup = client_obd_cleanup,
+ .o_add_conn = client_import_add_conn,
+ .o_del_conn = client_import_del_conn,
+ .o_connect = osc_connect,
+ .o_disconnect = client_disconnect_export,
+ .o_statfs = osc_statfs,
+ .o_packmd = osc_packmd,
+ .o_unpackmd = osc_unpackmd,
+ .o_create = osc_real_create,
+ .o_destroy = osc_destroy,
+ .o_getattr = osc_getattr,
+ .o_getattr_async = osc_getattr_async,
+ .o_setattr = osc_setattr,
+ .o_setup = client_sanobd_setup,
+ .o_brw = sanosc_brw,
+ .o_punch = osc_punch,
+ .o_sync = osc_sync,
+ .o_enqueue = osc_enqueue,
+ .o_match = osc_match,
+ .o_change_cbdata = osc_change_cbdata,
+ .o_cancel = osc_cancel,
+ .o_cancel_unused = osc_cancel_unused,
+ .o_iocontrol = osc_iocontrol,
+ .o_import_event = osc_import_event,
+ .o_llog_init = osc_llog_init,
+ .o_llog_finish = osc_llog_finish,
};
#endif
int __init osc_init(void)
{
- struct lprocfs_static_vars lvars, sanlvars;
+ struct lprocfs_static_vars lvars;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
+ struct lprocfs_static_vars sanlvars;
+#endif
int rc;
ENTRY;
lprocfs_init_vars(osc, &lvars);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
lprocfs_init_vars(osc, &sanlvars);
+#endif
- rc = class_register_type(&osc_obd_ops, lvars.module_vars,
+ rc = class_register_type(&osc_obd_ops, NULL, lvars.module_vars,
LUSTRE_OSC_NAME);
if (rc)
RETURN(rc);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- rc = class_register_type(&sanosc_obd_ops, sanlvars.module_vars,
+ rc = class_register_type(&sanosc_obd_ops, NULL, sanlvars.module_vars,
LUSTRE_SANOSC_NAME);
if (rc)
class_unregister_type(LUSTRE_OSC_NAME);
RETURN(rc);
}
+#ifdef __KERNEL__
static void /*__exit*/ osc_exit(void)
{
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
class_unregister_type(LUSTRE_OSC_NAME);
}
-#ifdef __KERNEL__
MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
MODULE_LICENSE("GPL");