* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
struct echo_page {
struct cl_page_slice ep_cl;
- cfs_mutex_t ep_lock;
+ struct mutex ep_lock;
cfs_page_t *ep_vmpage;
};
unsigned long dummy;
};
-static cfs_mem_cache_t *echo_page_kmem;
static cfs_mem_cache_t *echo_lock_kmem;
static cfs_mem_cache_t *echo_object_kmem;
static cfs_mem_cache_t *echo_thread_kmem;
static struct lu_kmem_descr echo_caches[] = {
{
- .ckd_cache = &echo_page_kmem,
- .ckd_name = "echo_page_kmem",
- .ckd_size = sizeof (struct echo_page)
- },
- {
.ckd_cache = &echo_lock_kmem,
.ckd_name = "echo_lock_kmem",
.ckd_size = sizeof (struct echo_lock)
struct echo_page *ep = cl2echo_page(slice);
if (!nonblock)
- cfs_mutex_lock(&ep->ep_lock);
- else if (!cfs_mutex_trylock(&ep->ep_lock))
+ mutex_lock(&ep->ep_lock);
+ else if (!mutex_trylock(&ep->ep_lock))
return -EAGAIN;
return 0;
}
{
struct echo_page *ep = cl2echo_page(slice);
- LASSERT(cfs_mutex_is_locked(&ep->ep_lock));
- cfs_mutex_unlock(&ep->ep_lock);
+ LASSERT(mutex_is_locked(&ep->ep_lock));
+ mutex_unlock(&ep->ep_lock);
}
static void echo_page_discard(const struct lu_env *env,
static int echo_page_is_vmlocked(const struct lu_env *env,
const struct cl_page_slice *slice)
{
- if (cfs_mutex_is_locked(&cl2echo_page(slice)->ep_lock))
+ if (mutex_is_locked(&cl2echo_page(slice)->ep_lock))
return -EBUSY;
return -ENODATA;
}
cfs_atomic_dec(&eco->eo_npages);
page_cache_release(vmpage);
- OBD_SLAB_FREE_PTR(ep, echo_page_kmem);
EXIT;
}
struct echo_page *ep = cl2echo_page(slice);
(*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p %d vm@%p\n",
- ep, cfs_mutex_is_locked(&ep->ep_lock), ep->ep_vmpage);
+ ep, mutex_is_locked(&ep->ep_lock), ep->ep_vmpage);
return 0;
}
*
* @{
*/
-static struct cl_page *echo_page_init(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage)
+static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, cfs_page_t *vmpage)
{
- struct echo_page *ep;
+ struct echo_page *ep = cl_object_page_slice(obj, page);
+ struct echo_object *eco = cl2echo_obj(obj);
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(ep, echo_page_kmem, CFS_ALLOC_IO);
- if (ep != NULL) {
- struct echo_object *eco = cl2echo_obj(obj);
- ep->ep_vmpage = vmpage;
- page_cache_get(vmpage);
- cfs_mutex_init(&ep->ep_lock);
- cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
- cfs_atomic_inc(&eco->eo_npages);
- }
- RETURN(ERR_PTR(ep ? 0 : -ENOMEM));
+ ep->ep_vmpage = vmpage;
+ page_cache_get(vmpage);
+ mutex_init(&ep->ep_lock);
+ cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
+ cfs_atomic_inc(&eco->eo_npages);
+ RETURN(0);
}
static int echo_io_init(const struct lu_env *env, struct cl_object *obj,
eco->eo_dev = ed;
cfs_atomic_set(&eco->eo_npages, 0);
+ cl_object_page_init(lu2cl(obj), sizeof(struct echo_page));
- cfs_spin_lock(&ec->ec_lock);
- cfs_list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
- cfs_spin_unlock(&ec->ec_lock);
+ spin_lock(&ec->ec_lock);
+ cfs_list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
+ spin_unlock(&ec->ec_lock);
- RETURN(0);
+ RETURN(0);
}
/* taken from osc_unpackmd() */
LASSERT(cfs_atomic_read(&eco->eo_npages) == 0);
- cfs_spin_lock(&ec->ec_lock);
+ spin_lock(&ec->ec_lock);
cfs_list_del_init(&eco->eo_obj_chain);
- cfs_spin_unlock(&ec->ec_lock);
+ spin_unlock(&ec->ec_lock);
lu_object_fini(obj);
lu_object_header_fini(obj->lo_header);
#define ECHO_SEQ_WIDTH 0xffffffff
static int echo_fid_init(struct echo_device *ed, char *obd_name,
- struct md_site *ms)
+ struct seq_server_site *ss)
{
char *prefix;
int rc;
snprintf(prefix, MAX_OBD_NAME + 5, "srv-%s", obd_name);
- /* Init client side sequence-manager */
- rc = seq_client_init(ed->ed_cl_seq, NULL,
- LUSTRE_SEQ_METADATA,
- prefix, ms->ms_server_seq);
+ /* Init client side sequence-manager */
+ rc = seq_client_init(ed->ed_cl_seq, NULL,
+ LUSTRE_SEQ_METADATA,
+ prefix, ss->ss_server_seq);
ed->ed_cl_seq->lcs_width = ECHO_SEQ_WIDTH;
OBD_FREE(prefix, MAX_OBD_NAME + 5);
if (rc)
ls = next->ld_site;
- cfs_spin_lock(&ls->ls_ld_lock);
- cfs_list_for_each_entry(ld, &ls->ls_ld_linkage, ld_linkage) {
- if (strcmp(ld->ld_type->ldt_name, tgt_type_name) == 0) {
- found = 1;
- break;
- }
- }
- cfs_spin_unlock(&ls->ls_ld_lock);
+ spin_lock(&ls->ls_ld_lock);
+ cfs_list_for_each_entry(ld, &ls->ls_ld_linkage, ld_linkage) {
+ if (strcmp(ld->ld_type->ldt_name, tgt_type_name) == 0) {
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock(&ls->ls_ld_lock);
if (found == 0) {
CERROR("%s is not lu device type!\n",
ed->ed_site_myself.cs_lu = *ls;
ed->ed_site = &ed->ed_site_myself;
ed->ed_cl.cd_lu_dev.ld_site = &ed->ed_site_myself.cs_lu;
- rc = echo_fid_init(ed, obd->obd_name, lu_site2md(ls));
- if (rc) {
- CERROR("echo fid init error %d\n", rc);
- GOTO(out, rc);
- }
+ rc = echo_fid_init(ed, obd->obd_name, lu_site2seq(ls));
+ if (rc) {
+ CERROR("echo fid init error %d\n", rc);
+ GOTO(out, rc);
+ }
} else {
/* if echo client is to be stacked upon ost device, the next is
* NULL since ost is not a clio device so far */
* all of cached objects. Anyway, probably the echo device is being
* parallelly accessed.
*/
- cfs_spin_lock(&ec->ec_lock);
- cfs_list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
- eco->eo_deleted = 1;
- cfs_spin_unlock(&ec->ec_lock);
-
- /* purge again */
- lu_site_purge(env, &ed->ed_site->cs_lu, -1);
-
- CDEBUG(D_INFO,
- "Waiting for the reference of echo object to be dropped\n");
-
- /* Wait for the last reference to be dropped. */
- cfs_spin_lock(&ec->ec_lock);
- while (!cfs_list_empty(&ec->ec_objects)) {
- cfs_spin_unlock(&ec->ec_lock);
- CERROR("echo_client still has objects at cleanup time, "
- "wait for 1 second\n");
- cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
- cfs_time_seconds(1));
- lu_site_purge(env, &ed->ed_site->cs_lu, -1);
- cfs_spin_lock(&ec->ec_lock);
- }
- cfs_spin_unlock(&ec->ec_lock);
+ spin_lock(&ec->ec_lock);
+ cfs_list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
+ eco->eo_deleted = 1;
+ spin_unlock(&ec->ec_lock);
+
+ /* purge again */
+ lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+
+ CDEBUG(D_INFO,
+ "Waiting for the reference of echo object to be dropped\n");
+
+ /* Wait for the last reference to be dropped. */
+ spin_lock(&ec->ec_lock);
+ while (!cfs_list_empty(&ec->ec_objects)) {
+ spin_unlock(&ec->ec_lock);
+ CERROR("echo_client still has objects at cleanup time, "
+ "wait for 1 second\n");
+ cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
+ cfs_time_seconds(1));
+ lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+ spin_lock(&ec->ec_lock);
+ }
+ spin_unlock(&ec->ec_lock);
LASSERT(cfs_list_empty(&ec->ec_locks));
if (eco->eo_deleted) {
struct lu_object_header *loh = obj->co_lu.lo_header;
LASSERT(&eco->eo_hdr == luh2coh(loh));
- cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags);
+ set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags);
}
cl_object_put(env, obj);
rc = cl_wait(env, lck);
if (rc == 0) {
el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
- cfs_spin_lock(&ec->ec_lock);
- if (cfs_list_empty(&el->el_chain)) {
- cfs_list_add(&el->el_chain, &ec->ec_locks);
- el->el_cookie = ++ec->ec_unique;
- }
- cfs_atomic_inc(&el->el_refcount);
- *cookie = el->el_cookie;
- cfs_spin_unlock(&ec->ec_lock);
- } else
- cl_lock_release(env, lck, "ec enqueue", cfs_current());
- }
- RETURN(rc);
+ spin_lock(&ec->ec_lock);
+ if (cfs_list_empty(&el->el_chain)) {
+ cfs_list_add(&el->el_chain, &ec->ec_locks);
+ el->el_cookie = ++ec->ec_unique;
+ }
+ cfs_atomic_inc(&el->el_refcount);
+ *cookie = el->el_cookie;
+ spin_unlock(&ec->ec_lock);
+ } else {
+ cl_lock_release(env, lck, "ec enqueue", cfs_current());
+ }
+ }
+ RETURN(rc);
}
static int cl_echo_enqueue(struct echo_object *eco, obd_off start, obd_off end,
ENTRY;
LASSERT(ec != NULL);
- cfs_spin_lock (&ec->ec_lock);
+ spin_lock(&ec->ec_lock);
cfs_list_for_each (el, &ec->ec_locks) {
ecl = cfs_list_entry (el, struct echo_lock, el_chain);
CDEBUG(D_INFO, "ecl: %p, cookie: "LPX64"\n", ecl, ecl->el_cookie);
break;
}
}
- cfs_spin_unlock (&ec->ec_lock);
+ spin_unlock(&ec->ec_lock);
if (!found)
RETURN(-ENOENT);
ENTRY;
+ if (ec_parent == NULL)
+ return -1;
parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
if (parent == NULL)
RETURN(-ENXIO);
RETURN(ERR_PTR(rc));
}
+ /* In the function below, .hs_keycmp resolves to
+ * lu_obj_hop_keycmp() */
+ /* coverity[overrun-buffer-val] */
child = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev, fid, NULL);
RETURN(child);
ENTRY;
+ if (ec_parent == NULL)
+ return -1;
parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
if (parent == NULL)
RETURN(-ENXIO);
ENTRY;
+ if (ec_parent == NULL)
+ return -1;
parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
if (parent == NULL)
RETURN(-ENXIO);
int rc = 0;
int i;
+ if (ec_parent == NULL)
+ return -1;
parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
if (parent == NULL)
return -ENXIO;
RETURN(ERR_PTR(rc));
}
+ /* In the function below, .hs_keycmp resolves to
+ * lu_obj_hop_keycmp() */
+ /* coverity[overrun-buffer-val] */
parent = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev, fid, NULL);
if (IS_ERR(parent)) {
CERROR("Can not find the parent "DFID": rc = %ld\n",
RETURN(parent);
}
+static void echo_ucred_init(struct lu_env *env)
+{
+ struct lu_ucred *ucred = lu_ucred(env);
+
+ ucred->uc_valid = UCRED_INVALID;
+
+ ucred->uc_suppgids[0] = -1;
+ ucred->uc_suppgids[1] = -1;
+
+ ucred->uc_uid = ucred->uc_o_uid = cfs_curproc_uid();
+ ucred->uc_gid = ucred->uc_o_gid = cfs_curproc_gid();
+ ucred->uc_fsuid = ucred->uc_o_fsuid = cfs_curproc_fsuid();
+ ucred->uc_fsgid = ucred->uc_o_fsgid = cfs_curproc_fsgid();
+ ucred->uc_cap = cfs_curproc_cap_pack();
+
+ /* remove fs privilege for non-root user. */
+ if (ucred->uc_fsuid)
+ ucred->uc_cap &= ~CFS_CAP_FS_MASK;
+ ucred->uc_valid = UCRED_NEW;
+}
+
+static void echo_ucred_fini(struct lu_env *env)
+{
+ struct lu_ucred *ucred = lu_ucred(env);
+ ucred->uc_valid = UCRED_INIT;
+}
+
#define ECHO_MD_CTX_TAG (LCT_REMEMBER | LCT_MD_THREAD)
#define ECHO_MD_SES_TAG (LCT_REMEMBER | LCT_SESSION)
static int echo_md_handler(struct echo_device *ed, int command,
if (IS_ERR(parent)) {
CERROR("Can not resolve the path %s: rc = %ld\n", path,
PTR_ERR(parent));
- GOTO(out_free, PTR_ERR(parent));
+ GOTO(out_free, rc = PTR_ERR(parent));
}
if (namelen > 0) {
GOTO(out_name, rc = -EFAULT);
}
+ echo_ucred_init(env);
+
switch (command) {
case ECHO_MD_CREATE:
case ECHO_MD_MKDIR: {
rc = -EINVAL;
break;
}
+ echo_ucred_fini(env);
+
out_name:
if (name != NULL)
OBD_FREE(name, namelen + 1);
if (cfs_copy_to_user(data->ioc_pbuf1, &seq, data->ioc_plen1))
return -EFAULT;
- max_count = LUSTRE_SEQ_MAX_WIDTH;
- if (cfs_copy_to_user(data->ioc_pbuf2, &max_count,
- data->ioc_plen2))
- return -EFAULT;
- GOTO(out, rc);
+ max_count = LUSTRE_METADATA_SEQ_MAX_WIDTH;
+ if (cfs_copy_to_user(data->ioc_pbuf2, &max_count,
+ data->ioc_plen2))
+ return -EFAULT;
+ GOTO(out, rc);
}
case OBD_IOC_DESTROY:
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
RETURN(-EINVAL);
}
- cfs_spin_lock_init (&ec->ec_lock);
+ spin_lock_init(&ec->ec_lock);
CFS_INIT_LIST_HEAD (&ec->ec_objects);
CFS_INIT_LIST_HEAD (&ec->ec_locks);
ec->ec_unique = 0;
ocd->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_REQPORTAL |
OBD_CONNECT_GRANT | OBD_CONNECT_FULL20 |
- OBD_CONNECT_64BITHASH;
+ OBD_CONNECT_64BITHASH | OBD_CONNECT_LVB_TYPE;
ocd->ocd_version = LUSTRE_VERSION_CODE;
ocd->ocd_group = FID_SEQ_ECHO;
rc = obd_connect(env, &ec->ec_exp, tgt, &echo_uuid, ocd, NULL);
if (rc == 0) {
/* Turn off pinger because it connects to tgt obd directly. */
- cfs_spin_lock(&tgt->obd_dev_lock);
- cfs_list_del_init(&ec->ec_exp->exp_obd_chain_timed);
- cfs_spin_unlock(&tgt->obd_dev_lock);
+ spin_lock(&tgt->obd_dev_lock);
+ cfs_list_del_init(&ec->ec_exp->exp_obd_chain_timed);
+ spin_unlock(&tgt->obd_dev_lock);
}
OBD_FREE(ocd, sizeof(*ocd));