/*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011 Whamcloud, Inc.
+ *
*/
/*
* This file is part of Lustre, http://www.lustre.org/
struct echo_page {
struct cl_page_slice ep_cl;
+ cfs_mutex_t ep_lock;
cfs_page_t *ep_vmpage;
};
return cl2echo_page(slice)->ep_vmpage;
}
+static int echo_page_own(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io, int nonblock)
+{
+ struct echo_page *ep = cl2echo_page(slice);
+
+ if (!nonblock)
+ cfs_mutex_lock(&ep->ep_lock);
+ else if (!cfs_mutex_trylock(&ep->ep_lock))
+ return -EAGAIN;
+ return 0;
+}
+
+static void echo_page_disown(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io)
+{
+ struct echo_page *ep = cl2echo_page(slice);
+
+ LASSERT(cfs_mutex_is_locked(&ep->ep_lock));
+ cfs_mutex_unlock(&ep->ep_lock);
+}
+
static void echo_page_discard(const struct lu_env *env,
const struct cl_page_slice *slice,
struct cl_io *unused)
static int echo_page_is_vmlocked(const struct lu_env *env,
const struct cl_page_slice *slice)
{
- return 1;
+ return cfs_mutex_is_locked(&cl2echo_page(slice)->ep_lock);
}
static void echo_page_completion(const struct lu_env *env,
{
struct echo_page *ep = cl2echo_page(slice);
- (*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p vm@%p\n",
- ep, ep->ep_vmpage);
+ (*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p %d vm@%p\n",
+ ep, cfs_mutex_is_locked(&ep->ep_lock), ep->ep_vmpage);
return 0;
}
static const struct cl_page_operations echo_page_ops = {
+ .cpo_own = echo_page_own,
+ .cpo_disown = echo_page_disown,
.cpo_discard = echo_page_discard,
.cpo_vmpage = echo_page_vmpage,
.cpo_fini = echo_page_fini,
struct echo_object *eco = cl2echo_obj(obj);
ep->ep_vmpage = vmpage;
page_cache_get(vmpage);
+ cfs_mutex_init(&ep->ep_lock);
cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
cfs_atomic_inc(&eco->eo_npages);
}
struct echo_object *eco;
struct lu_device *next = ed->ed_next;
- CDEBUG(D_INFO, "echo device:%p is going to be freed, next = %p\n", ed, next);
-
- /* destroy locks */
- cfs_spin_lock(&ec->ec_lock);
- while (!cfs_list_empty(&ec->ec_locks)) {
- struct echo_lock *ecl = cfs_list_entry(ec->ec_locks.next,
- struct echo_lock,
- el_chain);
- int still_used = 0;
-
- if (cfs_atomic_dec_and_test(&ecl->el_refcount))
- cfs_list_del_init(&ecl->el_chain);
- else
- still_used = 1;
- cfs_spin_unlock(&ec->ec_lock);
-
- CERROR("echo client: pending lock %p refs %d\n",
- ecl, cfs_atomic_read(&ecl->el_refcount));
-
- echo_lock_release(env, ecl, still_used);
- cfs_spin_lock(&ec->ec_lock);
- }
- cfs_spin_unlock(&ec->ec_lock);
+ CDEBUG(D_INFO, "echo device:%p is going to be freed, next = %p\n",
+ ed, next);
LASSERT(ed->ed_site);
lu_site_purge(env, &ed->ed_site->cs_lu, -1);
}
cfs_spin_unlock(&ec->ec_lock);
+ LASSERT(cfs_list_empty(&ec->ec_locks));
+
CDEBUG(D_INFO, "No object exists, exiting...\n");
echo_client_cleanup(d->ld_obd);
struct lu_object_header *loh = obj->co_lu.lo_header;
LASSERT(&eco->eo_hdr == luh2coh(loh));
cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags);
- cl_object_prune(env, obj);
}
cl_object_put(env, obj);
if (lsm->lsm_stripe_size == 0)
lsm->lsm_stripe_size = CFS_PAGE_SIZE;
- idx = ll_rand();
+ idx = cfs_rand();
/* setup stripes: indices + default ids if required */
for (i = 0; i < lsm->lsm_stripe_count; i++) {
int rc;
int verify;
int gfp_mask;
+ int brw_flags = 0;
ENTRY;
verify = ((oa->o_id) != ECHO_PERSISTENT_OBJID &&
/* XXX think again with misaligned I/O */
npages = count >> CFS_PAGE_SHIFT;
+ if (rw == OBD_BRW_WRITE)
+ brw_flags = OBD_BRW_ASYNC;
+
OBD_ALLOC(pga, npages * sizeof(*pga));
if (pga == NULL)
RETURN(-ENOMEM);
pages[i] = pgp->pg;
pgp->count = CFS_PAGE_SIZE;
pgp->off = off;
- pgp->flag = 0;
+ pgp->flag = brw_flags;
if (verify)
echo_client_page_debug_setup(lsm, pgp->pg, rw,
struct obd_device *obd = class_exp2obd(exp);
struct echo_device *ed = obd2echo_dev(obd);
struct echo_client_obd *ec = ed->ed_ec;
- struct obd_trans_info dummy_oti = { .oti_thread = NULL };
+ struct obd_trans_info dummy_oti = { 0 };
struct obdo *oa = &data->ioc_obdo1;
struct echo_object *eco;
int rc;
struct obd_trans_info dummy_oti;
struct oti_req_ack_lock *ack_lock;
struct obdo *oa;
+ struct lu_fid fid;
int rw = OBD_BRW_READ;
int rc = 0;
int i;
ENTRY;
+#ifndef HAVE_UNLOCKED_IOCTL
cfs_unlock_kernel();
+#endif
memset(&dummy_oti, 0, sizeof(dummy_oti));
oa->o_valid |= OBD_MD_FLGROUP;
oa->o_seq = FID_SEQ_ECHO;
}
- /* assume we can touch filter native objects with echo device. */
- /* LASSERT(oa->o_seq == FID_SEQ_ECHO); */
+
+ /* This FID is unpacked just for validation at this point */
+ rc = fid_ostid_unpack(&fid, &oa->o_oi, 0);
+ if (rc < 0)
+ RETURN(rc);
switch (cmd) {
case OBD_IOC_CREATE: /* may create echo object */
ldlm_lock_decref(&ack_lock->lock, ack_lock->mode);
}
+#ifndef HAVE_UNLOCKED_IOCTL
cfs_lock_kernel();
+#endif
return rc;
}
}
ocd->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_REQPORTAL |
- OBD_CONNECT_GRANT | OBD_CONNECT_FULL20;
+ OBD_CONNECT_GRANT | OBD_CONNECT_FULL20 |
+ OBD_CONNECT_64BITHASH;
ocd->ocd_version = LUSTRE_VERSION_CODE;
ocd->ocd_group = FID_SEQ_ECHO;
int rc;
lprocfs_echo_init_vars(&lvars);
- rc = class_register_type(&echo_obd_ops, NULL, lvars.module_vars,
- LUSTRE_ECHO_CLIENT_NAME, &echo_device_type);
+
+ rc = lu_kmem_init(echo_caches);
if (rc == 0)
- lu_kmem_init(echo_caches);
+ rc = class_register_type(&echo_obd_ops, NULL,
+ lvars.module_vars,
+ LUSTRE_ECHO_CLIENT_NAME,
+ &echo_device_type);
+ if (rc)
+ lu_kmem_fini(echo_caches);
+
return rc;
}