* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Author: Mikhail Pershin <mike.tappro@intel.com>
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_MDS
#include "osp_internal.h"
+static inline bool is_ost_obj(struct lu_object *lo)
+{
+ return !lu2osp_dev(lo->lo_dev)->opd_connect_mdt;
+}
+
static void osp_object_assign_fid(const struct lu_env *env,
struct osp_device *d, struct osp_object *o)
{
lu_object_assign_fid(env, &o->opo_obj.do_lu, &osi->osi_fid);
}
+static int osp_oac_init(struct osp_object *obj)
+{
+ struct osp_object_attr *ooa;
+
+ OBD_ALLOC_PTR(ooa);
+ if (ooa == NULL)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ooa->ooa_xattr_list);
+ spin_lock(&obj->opo_lock);
+ if (likely(obj->opo_ooa == NULL)) {
+ obj->opo_ooa = ooa;
+ spin_unlock(&obj->opo_lock);
+ } else {
+ spin_unlock(&obj->opo_lock);
+ OBD_FREE_PTR(ooa);
+ }
+
+ return 0;
+}
+
+static struct osp_xattr_entry *
+osp_oac_xattr_find_locked(struct osp_object_attr *ooa,
+ const char *name, int namelen, bool unlink)
+{
+ struct osp_xattr_entry *oxe;
+
+ list_for_each_entry(oxe, &ooa->ooa_xattr_list, oxe_list) {
+ if (namelen == oxe->oxe_namelen &&
+ strncmp(name, oxe->oxe_buf, namelen) == 0) {
+ if (unlink)
+ list_del_init(&oxe->oxe_list);
+ else
+ atomic_inc(&oxe->oxe_ref);
+
+ return oxe;
+ }
+ }
+
+ return NULL;
+}
+
+static struct osp_xattr_entry *osp_oac_xattr_find(struct osp_object *obj,
+ const char *name)
+{
+ struct osp_xattr_entry *oxe = NULL;
+
+ spin_lock(&obj->opo_lock);
+ if (obj->opo_ooa != NULL)
+ oxe = osp_oac_xattr_find_locked(obj->opo_ooa, name,
+ strlen(name), false);
+ spin_unlock(&obj->opo_lock);
+
+ return oxe;
+}
+
+static struct osp_xattr_entry *
+osp_oac_xattr_find_or_add(struct osp_object *obj, const char *name, int len)
+{
+ struct osp_object_attr *ooa = obj->opo_ooa;
+ struct osp_xattr_entry *oxe;
+ struct osp_xattr_entry *tmp = NULL;
+ int namelen = strlen(name);
+ int size = sizeof(*oxe) + namelen + 1 + len;
+
+ LASSERT(ooa != NULL);
+
+ oxe = osp_oac_xattr_find(obj, name);
+ if (oxe != NULL)
+ return oxe;
+
+ OBD_ALLOC(oxe, size);
+ if (unlikely(oxe == NULL))
+ return NULL;
+
+ INIT_LIST_HEAD(&oxe->oxe_list);
+ oxe->oxe_buflen = size;
+ oxe->oxe_namelen = namelen;
+ memcpy(oxe->oxe_buf, name, namelen);
+ oxe->oxe_value = oxe->oxe_buf + namelen + 1;
+ /* One ref is for the caller, the other is for the entry on the list. */
+ atomic_set(&oxe->oxe_ref, 2);
+
+ spin_lock(&obj->opo_lock);
+ tmp = osp_oac_xattr_find_locked(ooa, name, namelen, false);
+ if (tmp == NULL)
+ list_add_tail(&oxe->oxe_list, &ooa->ooa_xattr_list);
+ spin_unlock(&obj->opo_lock);
+
+ if (tmp != NULL) {
+ OBD_FREE(oxe, size);
+ oxe = tmp;
+ }
+
+ return oxe;
+}
+
+static struct osp_xattr_entry *
+osp_oac_xattr_replace(struct osp_object *obj,
+ struct osp_xattr_entry **poxe, int len)
+{
+ struct osp_object_attr *ooa = obj->opo_ooa;
+ struct osp_xattr_entry *old = *poxe;
+ struct osp_xattr_entry *oxe;
+ struct osp_xattr_entry *tmp = NULL;
+ int namelen = old->oxe_namelen;
+ int size = sizeof(*oxe) + namelen + 1 + len;
+
+ LASSERT(ooa != NULL);
+
+ OBD_ALLOC(oxe, size);
+ if (unlikely(oxe == NULL))
+ return NULL;
+
+ INIT_LIST_HEAD(&oxe->oxe_list);
+ oxe->oxe_buflen = size;
+ oxe->oxe_namelen = namelen;
+ memcpy(oxe->oxe_buf, old->oxe_buf, namelen);
+ oxe->oxe_value = oxe->oxe_buf + namelen + 1;
+ /* One ref is for the caller, the other is for the entry on the list. */
+ atomic_set(&oxe->oxe_ref, 2);
+
+ spin_lock(&obj->opo_lock);
+ tmp = osp_oac_xattr_find_locked(ooa, oxe->oxe_buf, namelen, true);
+ list_add_tail(&oxe->oxe_list, &ooa->ooa_xattr_list);
+ spin_unlock(&obj->opo_lock);
+
+ *poxe = tmp;
+ LASSERT(tmp != NULL);
+
+ return oxe;
+}
+
+static inline void osp_oac_xattr_put(struct osp_xattr_entry *oxe)
+{
+ if (atomic_dec_and_test(&oxe->oxe_ref)) {
+ LASSERT(list_empty(&oxe->oxe_list));
+
+ OBD_FREE(oxe, oxe->oxe_buflen);
+ }
+}
+
+static int osp_get_attr_from_reply(const struct lu_env *env,
+ struct object_update_reply *reply,
+ struct ptlrpc_request *req,
+ struct lu_attr *attr,
+ struct osp_object *obj, int index)
+{
+ struct osp_thread_info *osi = osp_env_info(env);
+ struct lu_buf *rbuf = &osi->osi_lb2;
+ struct obdo *lobdo = &osi->osi_obdo;
+ struct obdo *wobdo;
+ int rc;
+
+ rc = object_update_result_data_get(reply, rbuf, index);
+ if (rc < 0)
+ return rc;
+
+ wobdo = rbuf->lb_buf;
+ if (rbuf->lb_len != sizeof(*wobdo))
+ return -EPROTO;
+
+ LASSERT(req != NULL);
+ if (ptlrpc_req_need_swab(req))
+ lustre_swab_obdo(wobdo);
+
+ lustre_get_wire_obdo(NULL, lobdo, wobdo);
+ spin_lock(&obj->opo_lock);
+ if (obj->opo_ooa != NULL) {
+ la_from_obdo(&obj->opo_ooa->ooa_attr, lobdo, lobdo->o_valid);
+ if (attr != NULL)
+ *attr = obj->opo_ooa->ooa_attr;
+ } else {
+ LASSERT(attr != NULL);
+
+ la_from_obdo(attr, lobdo, lobdo->o_valid);
+ }
+ spin_unlock(&obj->opo_lock);
+
+ return 0;
+}
+
+static int osp_attr_get_interpterer(const struct lu_env *env,
+ struct object_update_reply *reply,
+ struct ptlrpc_request *req,
+ struct osp_object *obj,
+ void *data, int index, int rc)
+{
+ struct lu_attr *attr = data;
+
+ LASSERT(obj->opo_ooa != NULL);
+
+ if (rc == 0) {
+ osp2lu_obj(obj)->lo_header->loh_attr |= LOHA_EXISTS;
+ obj->opo_non_exist = 0;
+
+ return osp_get_attr_from_reply(env, reply, req, NULL, obj,
+ index);
+ } else {
+ if (rc == -ENOENT) {
+ osp2lu_obj(obj)->lo_header->loh_attr &= ~LOHA_EXISTS;
+ obj->opo_non_exist = 1;
+ }
+
+ spin_lock(&obj->opo_lock);
+ attr->la_valid = 0;
+ spin_unlock(&obj->opo_lock);
+ }
+
+ return 0;
+}
+
+static int osp_declare_attr_get(const struct lu_env *env, struct dt_object *dt,
+ struct lustre_capa *capa)
+{
+ struct osp_object *obj = dt2osp_obj(dt);
+ struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
+ struct dt_update_request *update;
+ int rc = 0;
+
+ if (obj->opo_ooa == NULL) {
+ rc = osp_oac_init(obj);
+ if (rc != 0)
+ return rc;
+ }
+
+ mutex_lock(&osp->opd_async_requests_mutex);
+ update = osp_find_or_create_async_update_request(osp);
+ if (IS_ERR(update))
+ rc = PTR_ERR(update);
+ else
+ rc = osp_insert_async_update(env, update, OUT_ATTR_GET, obj, 0,
+ NULL, NULL,
+ &obj->opo_ooa->ooa_attr,
+ osp_attr_get_interpterer);
+ mutex_unlock(&osp->opd_async_requests_mutex);
+
+ return rc;
+}
+
+int osp_attr_get(const struct lu_env *env, struct dt_object *dt,
+ struct lu_attr *attr, struct lustre_capa *capa)
+{
+ struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
+ struct osp_object *obj = dt2osp_obj(dt);
+ struct dt_device *dev = &osp->opd_dt_dev;
+ struct dt_update_request *update;
+ struct object_update_reply *reply;
+ struct ptlrpc_request *req = NULL;
+ int rc = 0;
+ ENTRY;
+
+ if (is_ost_obj(&dt->do_lu) && obj->opo_non_exist)
+ RETURN(-ENOENT);
+
+ if (obj->opo_ooa != NULL) {
+ spin_lock(&obj->opo_lock);
+ if (obj->opo_ooa->ooa_attr.la_valid != 0) {
+ *attr = obj->opo_ooa->ooa_attr;
+ spin_unlock(&obj->opo_lock);
+
+ RETURN(0);
+ }
+ spin_unlock(&obj->opo_lock);
+ }
+
+ update = out_create_update_req(dev);
+ if (IS_ERR(update))
+ RETURN(PTR_ERR(update));
+
+ rc = out_insert_update(env, update, OUT_ATTR_GET,
+ lu_object_fid(&dt->do_lu), 0, NULL, NULL);
+ if (rc != 0) {
+ CERROR("%s: Insert update error "DFID": rc = %d\n",
+ dev->dd_lu_dev.ld_obd->obd_name,
+ PFID(lu_object_fid(&dt->do_lu)), rc);
+
+ GOTO(out, rc);
+ }
+
+ rc = out_remote_sync(env, osp->opd_obd->u.cli.cl_import, update, &req);
+ if (rc != 0) {
+ if (rc == -ENOENT) {
+ osp2lu_obj(obj)->lo_header->loh_attr &= ~LOHA_EXISTS;
+ obj->opo_non_exist = 1;
+ } else {
+ CERROR("%s:osp_attr_get update error "DFID": rc = %d\n",
+ dev->dd_lu_dev.ld_obd->obd_name,
+ PFID(lu_object_fid(&dt->do_lu)), rc);
+ }
+
+ GOTO(out, rc);
+ }
+
+ osp2lu_obj(obj)->lo_header->loh_attr |= LOHA_EXISTS;
+ obj->opo_non_exist = 0;
+ reply = req_capsule_server_sized_get(&req->rq_pill,
+ &RMF_OUT_UPDATE_REPLY,
+ OUT_UPDATE_REPLY_SIZE);
+ if (reply == NULL || reply->ourp_magic != UPDATE_REPLY_MAGIC)
+ GOTO(out, rc = -EPROTO);
+
+ rc = osp_get_attr_from_reply(env, reply, req, attr, obj, 0);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ if (!is_ost_obj(&dt->do_lu)) {
+ if (attr->la_flags == 1)
+ obj->opo_empty = 0;
+ else
+ obj->opo_empty = 1;
+ }
+
+ GOTO(out, rc = 0);
+
+out:
+ if (req != NULL)
+ ptlrpc_req_finished(req);
+
+ out_destroy_update_req(update);
+
+ return rc;
+}
+
static int osp_declare_attr_set(const struct lu_env *env, struct dt_object *dt,
const struct lu_attr *attr, struct thandle *th)
{
struct osp_device *d = lu2osp_dev(dt->do_lu.lo_dev);
struct osp_object *o = dt2osp_obj(dt);
+ struct lu_attr *la;
int rc = 0;
ENTRY;
if (!(attr->la_valid & (LA_UID | LA_GID)))
RETURN(0);
- /*
- * track all UID/GID changes via llog
- */
- rc = osp_sync_declare_add(env, o, MDS_SETATTR64_REC, th);
+ if (!is_only_remote_trans(th))
+ /*
+ * track all UID/GID changes via llog
+ */
+ rc = osp_sync_declare_add(env, o, MDS_SETATTR64_REC, th);
+ else
+ /* It is for OST-object attr_set directly without updating
+ * local MDT-object attribute. It is usually used by LFSCK. */
+ rc = osp_md_declare_attr_set(env, dt, attr, th);
+
+ if (rc != 0 || o->opo_ooa == NULL)
+ RETURN(rc);
- RETURN(rc);
+ la = &o->opo_ooa->ooa_attr;
+ spin_lock(&o->opo_lock);
+ if (attr->la_valid & LA_UID) {
+ la->la_uid = attr->la_uid;
+ la->la_valid |= LA_UID;
+ }
+
+ if (attr->la_valid & LA_GID) {
+ la->la_gid = attr->la_gid;
+ la->la_valid |= LA_GID;
+ }
+ spin_unlock(&o->opo_lock);
+
+ RETURN(0);
}
static int osp_attr_set(const struct lu_env *env, struct dt_object *dt,
RETURN(0);
}
- /*
- * once transaction is committed put proper command on
- * the queue going to our OST
- */
- rc = osp_sync_add(env, o, MDS_SETATTR64_REC, th, attr);
-
- /* XXX: send new uid/gid to OST ASAP? */
+ if (!is_only_remote_trans(th))
+ /*
+ * once transaction is committed put proper command on
+ * the queue going to our OST
+ */
+ rc = osp_sync_add(env, o, MDS_SETATTR64_REC, th, attr);
+ /* XXX: send new uid/gid to OST ASAP? */
+ else
+ /* It is for OST-object attr_set directly without updating
+ * local MDT-object attribute. It is usually used by LFSCK. */
+ rc = osp_md_attr_set(env, dt, attr, th, capa);
RETURN(rc);
}
+static int osp_xattr_get_interpterer(const struct lu_env *env,
+ struct object_update_reply *reply,
+ struct ptlrpc_request *req,
+ struct osp_object *obj,
+ void *data, int index, int rc)
+{
+ struct osp_object_attr *ooa = obj->opo_ooa;
+ struct osp_xattr_entry *oxe = data;
+ struct lu_buf *rbuf = &osp_env_info(env)->osi_lb2;
+
+ LASSERT(ooa != NULL);
+
+ if (rc == 0) {
+ int len = sizeof(*oxe) + oxe->oxe_namelen + 1;
+
+ rc = object_update_result_data_get(reply, rbuf, index);
+ if (rc < 0 || rbuf->lb_len > (oxe->oxe_buflen - len)) {
+ spin_lock(&obj->opo_lock);
+ oxe->oxe_ready = 0;
+ spin_unlock(&obj->opo_lock);
+ osp_oac_xattr_put(oxe);
+
+ return rc < 0 ? rc : -ERANGE;
+ }
+
+ spin_lock(&obj->opo_lock);
+ oxe->oxe_vallen = rbuf->lb_len;
+ memcpy(oxe->oxe_value, rbuf->lb_buf, rbuf->lb_len);
+ oxe->oxe_exist = 1;
+ oxe->oxe_ready = 1;
+ spin_unlock(&obj->opo_lock);
+ } else if (rc == -ENOENT || rc == -ENODATA) {
+ spin_lock(&obj->opo_lock);
+ oxe->oxe_exist = 0;
+ oxe->oxe_ready = 1;
+ spin_unlock(&obj->opo_lock);
+ } else {
+ spin_lock(&obj->opo_lock);
+ oxe->oxe_ready = 0;
+ spin_unlock(&obj->opo_lock);
+ }
+
+ osp_oac_xattr_put(oxe);
+
+ return 0;
+}
+
+static int osp_declare_xattr_get(const struct lu_env *env, struct dt_object *dt,
+ struct lu_buf *buf, const char *name,
+ struct lustre_capa *capa)
+{
+ struct osp_object *obj = dt2osp_obj(dt);
+ struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
+ struct dt_update_request *update;
+ struct osp_xattr_entry *oxe;
+ int namelen = strlen(name);
+ int rc = 0;
+
+ LASSERT(buf != NULL);
+ LASSERT(name != NULL);
+
+ /* If only for xattr size, return directly. */
+ if (unlikely(buf->lb_len == 0))
+ return 0;
+
+ if (obj->opo_ooa == NULL) {
+ rc = osp_oac_init(obj);
+ if (rc != 0)
+ return rc;
+ }
+
+ oxe = osp_oac_xattr_find_or_add(obj, name, buf->lb_len);
+ if (oxe == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&osp->opd_async_requests_mutex);
+ update = osp_find_or_create_async_update_request(osp);
+ if (IS_ERR(update)) {
+ rc = PTR_ERR(update);
+ mutex_unlock(&osp->opd_async_requests_mutex);
+ osp_oac_xattr_put(oxe);
+ } else {
+ rc = osp_insert_async_update(env, update, OUT_XATTR_GET, obj,
+ 1, &namelen, &name, oxe,
+ osp_xattr_get_interpterer);
+ if (rc != 0) {
+ mutex_unlock(&osp->opd_async_requests_mutex);
+ osp_oac_xattr_put(oxe);
+ } else {
+ /* XXX: Currently, we trigger the batched async OUT
+ * RPC via dt_declare_xattr_get(). It is not
+ * perfect solution, but works well now.
+ *
+ * We will improve it in the future. */
+ update = osp->opd_async_requests;
+ if (update != NULL && update->dur_req != NULL &&
+ update->dur_req->ourq_count > 0) {
+ osp->opd_async_requests = NULL;
+ mutex_unlock(&osp->opd_async_requests_mutex);
+ rc = osp_unplug_async_update(env, osp, update);
+ } else {
+ mutex_unlock(&osp->opd_async_requests_mutex);
+ }
+ }
+ }
+
+ return rc;
+}
+
+int osp_xattr_get(const struct lu_env *env, struct dt_object *dt,
+ struct lu_buf *buf, const char *name,
+ struct lustre_capa *capa)
+{
+ struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
+ struct osp_object *obj = dt2osp_obj(dt);
+ struct dt_device *dev = &osp->opd_dt_dev;
+ struct lu_buf *rbuf = &osp_env_info(env)->osi_lb2;
+ struct dt_update_request *update = NULL;
+ struct ptlrpc_request *req = NULL;
+ struct object_update_reply *reply;
+ struct osp_xattr_entry *oxe = NULL;
+ const char *dname = dt->do_lu.lo_dev->ld_obd->obd_name;
+ int namelen;
+ int rc = 0;
+ ENTRY;
+
+ LASSERT(buf != NULL);
+ LASSERT(name != NULL);
+
+ if (unlikely(obj->opo_non_exist))
+ RETURN(-ENOENT);
+
+ oxe = osp_oac_xattr_find(obj, name);
+ if (oxe != NULL) {
+ spin_lock(&obj->opo_lock);
+ if (oxe->oxe_ready) {
+ if (!oxe->oxe_exist)
+ GOTO(unlock, rc = -ENODATA);
+
+ if (buf->lb_buf == NULL)
+ GOTO(unlock, rc = oxe->oxe_vallen);
+
+ if (buf->lb_len < oxe->oxe_vallen)
+ GOTO(unlock, rc = -ERANGE);
+
+ memcpy(buf->lb_buf, oxe->oxe_value, oxe->oxe_vallen);
+
+ GOTO(unlock, rc = oxe->oxe_vallen);
+
+unlock:
+ spin_unlock(&obj->opo_lock);
+ osp_oac_xattr_put(oxe);
+
+ return rc;
+ }
+ spin_unlock(&obj->opo_lock);
+ }
+
+ update = out_create_update_req(dev);
+ if (IS_ERR(update))
+ GOTO(out, rc = PTR_ERR(update));
+
+ namelen = strlen(name) + 1;
+ rc = out_insert_update(env, update, OUT_XATTR_GET,
+ lu_object_fid(&dt->do_lu), 1, &namelen, &name);
+ if (rc != 0) {
+ CERROR("%s: Insert update error "DFID": rc = %d\n",
+ dname, PFID(lu_object_fid(&dt->do_lu)), rc);
+
+ GOTO(out, rc);
+ }
+
+ rc = out_remote_sync(env, osp->opd_obd->u.cli.cl_import, update, &req);
+ if (rc != 0) {
+ if (obj->opo_ooa == NULL)
+ GOTO(out, rc);
+
+ if (oxe == NULL)
+ oxe = osp_oac_xattr_find_or_add(obj, name, buf->lb_len);
+
+ if (oxe == NULL) {
+ CWARN("%s: Fail to add xattr (%s) to cache for "
+ DFID" (1): rc = %d\n", dname, name,
+ PFID(lu_object_fid(&dt->do_lu)), rc);
+
+ GOTO(out, rc);
+ }
+
+ spin_lock(&obj->opo_lock);
+ if (rc == -ENOENT || rc == -ENODATA) {
+ oxe->oxe_exist = 0;
+ oxe->oxe_ready = 1;
+ } else {
+ oxe->oxe_ready = 0;
+ }
+ spin_unlock(&obj->opo_lock);
+
+ GOTO(out, rc);
+ }
+
+ reply = req_capsule_server_sized_get(&req->rq_pill,
+ &RMF_OUT_UPDATE_REPLY,
+ OUT_UPDATE_REPLY_SIZE);
+ if (reply->ourp_magic != UPDATE_REPLY_MAGIC) {
+ CERROR("%s: Wrong version %x expected %x "DFID": rc = %d\n",
+ dname, reply->ourp_magic, UPDATE_REPLY_MAGIC,
+ PFID(lu_object_fid(&dt->do_lu)), -EPROTO);
+
+ GOTO(out, rc = -EPROTO);
+ }
+
+ rc = object_update_result_data_get(reply, rbuf, 0);
+ if (rc < 0)
+ GOTO(out, rc);
+
+ if (buf->lb_buf == NULL)
+ GOTO(out, rc = rbuf->lb_len);
+
+ if (unlikely(buf->lb_len < rbuf->lb_len))
+ GOTO(out, rc = -ERANGE);
+
+ memcpy(buf->lb_buf, rbuf->lb_buf, rbuf->lb_len);
+ rc = rbuf->lb_len;
+ if (obj->opo_ooa == NULL)
+ GOTO(out, rc);
+
+ if (oxe == NULL) {
+ oxe = osp_oac_xattr_find_or_add(obj, name, rbuf->lb_len);
+ if (oxe == NULL) {
+ CWARN("%s: Fail to add xattr (%s) to "
+ "cache for "DFID" (2): rc = %d\n",
+ dname, name, PFID(lu_object_fid(&dt->do_lu)), rc);
+
+ GOTO(out, rc);
+ }
+ }
+
+ if (oxe->oxe_buflen - oxe->oxe_namelen - 1 < rbuf->lb_len) {
+ struct osp_xattr_entry *old = oxe;
+ struct osp_xattr_entry *tmp;
+
+ tmp = osp_oac_xattr_replace(obj, &old, rbuf->lb_len);
+ osp_oac_xattr_put(oxe);
+ oxe = tmp;
+ if (tmp == NULL) {
+ CWARN("%s: Fail to update xattr (%s) to "
+ "cache for "DFID": rc = %d\n",
+ dname, name, PFID(lu_object_fid(&dt->do_lu)), rc);
+ spin_lock(&obj->opo_lock);
+ oxe->oxe_ready = 0;
+ spin_unlock(&obj->opo_lock);
+
+ GOTO(out, rc);
+ }
+
+ /* Drop the ref for entry on list. */
+ osp_oac_xattr_put(old);
+ }
+
+ spin_lock(&obj->opo_lock);
+ oxe->oxe_vallen = rbuf->lb_len;
+ memcpy(oxe->oxe_value, rbuf->lb_buf, rbuf->lb_len);
+ oxe->oxe_exist = 1;
+ oxe->oxe_ready = 1;
+ spin_unlock(&obj->opo_lock);
+
+ GOTO(out, rc);
+
+out:
+ if (req != NULL)
+ ptlrpc_req_finished(req);
+
+ if (update != NULL && !IS_ERR(update))
+ out_destroy_update_req(update);
+
+ if (oxe != NULL)
+ osp_oac_xattr_put(oxe);
+
+ return rc;
+}
+
+int osp_declare_xattr_set(const struct lu_env *env, struct dt_object *dt,
+ const struct lu_buf *buf, const char *name,
+ int flag, struct thandle *th)
+{
+ struct osp_object *o = dt2osp_obj(dt);
+ struct dt_update_request *update;
+ struct lu_fid *fid;
+ struct osp_xattr_entry *oxe;
+ int sizes[3] = {strlen(name), buf->lb_len,
+ sizeof(int)};
+ char *bufs[3] = {(char *)name, (char *)buf->lb_buf };
+ int rc;
+
+ LASSERT(buf->lb_len > 0 && buf->lb_buf != NULL);
+
+ update = out_find_create_update_loc(th, dt);
+ if (IS_ERR(update)) {
+ CERROR("%s: Get OSP update buf failed "DFID": rc = %d\n",
+ dt->do_lu.lo_dev->ld_obd->obd_name,
+ PFID(lu_object_fid(&dt->do_lu)),
+ (int)PTR_ERR(update));
+
+ return PTR_ERR(update);
+ }
+
+ flag = cpu_to_le32(flag);
+ bufs[2] = (char *)&flag;
+
+ fid = (struct lu_fid *)lu_object_fid(&dt->do_lu);
+ rc = out_insert_update(env, update, OUT_XATTR_SET, fid,
+ ARRAY_SIZE(sizes), sizes, (const char **)bufs);
+ if (rc != 0 || o->opo_ooa == NULL)
+ return rc;
+
+ oxe = osp_oac_xattr_find_or_add(o, name, buf->lb_len);
+ if (oxe == NULL) {
+ CWARN("%s: Fail to add xattr (%s) to cache for "DFID
+ ": rc = %d\n", dt->do_lu.lo_dev->ld_obd->obd_name,
+ name, PFID(lu_object_fid(&dt->do_lu)), rc);
+
+ return 0;
+ }
+
+ if (oxe->oxe_buflen - oxe->oxe_namelen - 1 < buf->lb_len) {
+ struct osp_xattr_entry *old = oxe;
+ struct osp_xattr_entry *tmp;
+
+ tmp = osp_oac_xattr_replace(o, &old, buf->lb_len);
+ osp_oac_xattr_put(oxe);
+ oxe = tmp;
+ if (tmp == NULL) {
+ CWARN("%s: Fail to update xattr (%s) to cache for "DFID
+ ": rc = %d\n", dt->do_lu.lo_dev->ld_obd->obd_name,
+ name, PFID(lu_object_fid(&dt->do_lu)), rc);
+ spin_lock(&o->opo_lock);
+ oxe->oxe_ready = 0;
+ spin_unlock(&o->opo_lock);
+
+ return 0;
+ }
+
+ /* Drop the ref for entry on list. */
+ osp_oac_xattr_put(old);
+ }
+
+ spin_lock(&o->opo_lock);
+ oxe->oxe_vallen = buf->lb_len;
+ memcpy(oxe->oxe_value, buf->lb_buf, buf->lb_len);
+ oxe->oxe_exist = 1;
+ oxe->oxe_ready = 1;
+ spin_unlock(&o->opo_lock);
+ osp_oac_xattr_put(oxe);
+
+ return 0;
+}
+
+int osp_xattr_set(const struct lu_env *env, struct dt_object *dt,
+ const struct lu_buf *buf, const char *name, int fl,
+ struct thandle *th, struct lustre_capa *capa)
+{
+ CDEBUG(D_INFO, "xattr %s set object "DFID"\n", name,
+ PFID(&dt->do_lu.lo_header->loh_fid));
+
+ return 0;
+}
+
static int osp_declare_object_create(const struct lu_env *env,
struct dt_object *dt,
struct lu_attr *attr,
struct osp_thread_info *osi = osp_env_info(env);
struct osp_device *d = lu2osp_dev(dt->do_lu.lo_dev);
struct osp_object *o = dt2osp_obj(dt);
- const struct lu_fid *fid;
+ const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
int rc = 0;
ENTRY;
+ if (is_only_remote_trans(th)) {
+ LASSERT(fid_is_sane(fid));
+
+ rc = osp_md_declare_object_create(env, dt, attr, hint, dof, th);
+
+ RETURN(rc);
+ }
+
/* should happen to non-0 OSP only so that at least one object
* has been already declared in the scenario and LOD should
* cleanup that */
RETURN(-ENOSPC);
LASSERT(d->opd_last_used_oid_file);
- fid = lu_object_fid(&dt->do_lu);
/*
* There can be gaps in precreated ids and record to unlink llog
if (unlikely(!fid_is_zero(fid))) {
/* replay case: caller knows fid */
osi->osi_off = sizeof(osi->osi_id) * d->opd_index;
+ osi->osi_lb.lb_len = sizeof(osi->osi_id);
+ osi->osi_lb.lb_buf = NULL;
rc = dt_declare_record_write(env, d->opd_last_used_oid_file,
- sizeof(osi->osi_id), osi->osi_off,
- th);
+ &osi->osi_lb, osi->osi_off, th);
RETURN(rc);
}
/* common for all OSPs file hystorically */
osi->osi_off = sizeof(osi->osi_id) * d->opd_index;
+ osi->osi_lb.lb_len = sizeof(osi->osi_id);
+ osi->osi_lb.lb_buf = NULL;
rc = dt_declare_record_write(env, d->opd_last_used_oid_file,
- sizeof(osi->osi_id), osi->osi_off,
- th);
+ &osi->osi_lb, osi->osi_off, th);
} else {
/* not needed in the cache anymore */
set_bit(LU_OBJECT_HEARD_BANSHEE,
struct lu_fid *fid = &osi->osi_fid;
ENTRY;
+ if (is_only_remote_trans(th)) {
+ LASSERT(fid_is_sane(lu_object_fid(&dt->do_lu)));
+
+ rc = osp_md_object_create(env, dt, attr, hint, dof, th);
+ if (rc == 0)
+ o->opo_non_exist = 0;
+
+ RETURN(rc);
+ }
+
+ o->opo_non_exist = 0;
if (o->opo_reserved) {
/* regular case, fid is assigned holding trunsaction open */
osp_object_assign_fid(env, d, o);
memcpy(fid, lu_object_fid(&dt->do_lu), sizeof(*fid));
- LASSERTF(fid_is_sane(fid), "fid for osp_obj %p is insane"DFID"!\n",
- osp_obj, PFID(fid));
+ LASSERTF(fid_is_sane(fid), "fid for osp_object %p is insane"DFID"!\n",
+ o, PFID(fid));
if (!o->opo_reserved) {
/* special case, id was assigned outside of transaction
* see comments in osp_declare_attr_set */
+ LASSERT(d->opd_pre != NULL);
spin_lock(&d->opd_pre_lock);
osp_update_last_fid(d, fid);
spin_unlock(&d->opd_pre_lock);
}
- CDEBUG(D_INODE, "fid for osp_obj %p is "DFID"!\n", osp_obj, PFID(fid));
+ CDEBUG(D_INODE, "fid for osp_object %p is "DFID"\n", o, PFID(fid));
/* If the precreate ends, it means it will be ready to rollover to
* the new sequence soon, all the creation should be synchronized,
/* we might have lost precreated objects */
if (unlikely(d->opd_gap_count) > 0) {
+ LASSERT(d->opd_pre != NULL);
spin_lock(&d->opd_pre_lock);
if (d->opd_gap_count > 0) {
int count = d->opd_gap_count;
- osi->osi_oi.oi_id = fid_oid(&d->opd_gap_start_fid);
+
+ ostid_set_id(&osi->osi_oi,
+ fid_oid(&d->opd_gap_start_fid));
d->opd_gap_count = 0;
spin_unlock(&d->opd_pre_lock);
RETURN(rc);
}
-static int osp_declare_object_destroy(const struct lu_env *env,
- struct dt_object *dt, struct thandle *th)
+int osp_declare_object_destroy(const struct lu_env *env,
+ struct dt_object *dt, struct thandle *th)
{
struct osp_object *o = dt2osp_obj(dt);
int rc = 0;
RETURN(rc);
}
-static int osp_object_destroy(const struct lu_env *env, struct dt_object *dt,
- struct thandle *th)
+int osp_object_destroy(const struct lu_env *env, struct dt_object *dt,
+ struct thandle *th)
{
struct osp_object *o = dt2osp_obj(dt);
int rc = 0;
ENTRY;
+ o->opo_non_exist = 1;
/*
* once transaction is committed put proper command on
* the queue going to our OST
RETURN(rc);
}
+struct osp_orphan_it {
+ int ooi_pos0;
+ int ooi_pos1;
+ int ooi_pos2;
+ int ooi_total_npages;
+ int ooi_valid_npages;
+ unsigned int ooi_swab:1;
+ __u64 ooi_next;
+ struct dt_object *ooi_obj;
+ struct lu_orphan_ent *ooi_ent;
+ struct page *ooi_cur_page;
+ struct lu_idxpage *ooi_cur_idxpage;
+ struct page **ooi_pages;
+};
+
+static int osp_orphan_index_lookup(const struct lu_env *env,
+ struct dt_object *dt,
+ struct dt_rec *rec,
+ const struct dt_key *key,
+ struct lustre_capa *capa)
+{
+ return -EOPNOTSUPP;
+}
+
+static int osp_orphan_index_declare_insert(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct dt_rec *rec,
+ const struct dt_key *key,
+ struct thandle *handle)
+{
+ return -EOPNOTSUPP;
+}
+
+static int osp_orphan_index_insert(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct dt_rec *rec,
+ const struct dt_key *key,
+ struct thandle *handle,
+ struct lustre_capa *capa,
+ int ignore_quota)
+{
+ return -EOPNOTSUPP;
+}
+
+static int osp_orphan_index_declare_delete(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct dt_key *key,
+ struct thandle *handle)
+{
+ return -EOPNOTSUPP;
+}
+
+static int osp_orphan_index_delete(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct dt_key *key,
+ struct thandle *handle,
+ struct lustre_capa *capa)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct dt_it *osp_orphan_it_init(const struct lu_env *env,
+ struct dt_object *dt,
+ __u32 attr,
+ struct lustre_capa *capa)
+{
+ struct osp_orphan_it *it;
+
+ OBD_ALLOC_PTR(it);
+ if (it == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ it->ooi_pos2 = -1;
+ it->ooi_obj = dt;
+
+ return (struct dt_it *)it;
+}
+
+static void osp_orphan_it_fini(const struct lu_env *env,
+ struct dt_it *di)
+{
+ struct osp_orphan_it *it = (struct osp_orphan_it *)di;
+ struct page **pages = it->ooi_pages;
+ int npages = it->ooi_total_npages;
+ int i;
+
+ if (pages != NULL) {
+ for (i = 0; i < npages; i++) {
+ if (pages[i] != NULL) {
+ if (pages[i] == it->ooi_cur_page) {
+ kunmap(pages[i]);
+ it->ooi_cur_page = NULL;
+ }
+ __free_page(pages[i]);
+ }
+ }
+ OBD_FREE(pages, npages * sizeof(*pages));
+ }
+ OBD_FREE_PTR(it);
+}
+
+static int osp_orphan_it_fetch(const struct lu_env *env,
+ struct osp_orphan_it *it)
+{
+ struct lu_device *dev = it->ooi_obj->do_lu.lo_dev;
+ struct osp_device *osp = lu2osp_dev(dev);
+ struct page **pages;
+ struct ptlrpc_request *req = NULL;
+ struct ptlrpc_bulk_desc *desc;
+ struct idx_info *ii;
+ int npages;
+ int rc;
+ int i;
+ ENTRY;
+
+ /* 1MB bulk */
+ npages = min_t(unsigned int, OFD_MAX_BRW_SIZE, 1 << 20);
+ npages /= PAGE_CACHE_SIZE;
+
+ OBD_ALLOC(pages, npages * sizeof(*pages));
+ if (pages == NULL)
+ RETURN(-ENOMEM);
+
+ it->ooi_pages = pages;
+ it->ooi_total_npages = npages;
+ for (i = 0; i < npages; i++) {
+ pages[i] = alloc_page(GFP_IOFS);
+ if (pages[i] == NULL)
+ RETURN(-ENOMEM);
+ }
+
+ req = ptlrpc_request_alloc(osp->opd_obd->u.cli.cl_import,
+ &RQF_OBD_IDX_READ);
+ if (req == NULL)
+ RETURN(-ENOMEM);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, OBD_IDX_READ);
+ if (rc != 0) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+
+ req->rq_request_portal = OUT_PORTAL;
+ ptlrpc_at_set_req_timeout(req);
+
+ desc = ptlrpc_prep_bulk_imp(req, npages, 1, BULK_PUT_SINK,
+ MDS_BULK_PORTAL);
+ if (desc == NULL) {
+ ptlrpc_request_free(req);
+ RETURN(-ENOMEM);
+ }
+
+ for (i = 0; i < npages; i++)
+ ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
+
+ ii = req_capsule_client_get(&req->rq_pill, &RMF_IDX_INFO);
+ memset(ii, 0, sizeof(*ii));
+ ii->ii_fid.f_seq = FID_SEQ_LAYOUT_RBTREE;
+ ii->ii_fid.f_oid = osp->opd_index;
+ ii->ii_fid.f_ver = 0;
+ ii->ii_magic = IDX_INFO_MAGIC;
+ ii->ii_flags = II_FL_NOHASH;
+ ii->ii_count = npages * LU_PAGE_COUNT;
+ ii->ii_hash_start = it->ooi_next;
+ ii->ii_attrs =
+ osp->opd_storage->dd_lu_dev.ld_site->ld_seq_site->ss_node_id;
+
+ ptlrpc_request_set_replen(req);
+ rc = ptlrpc_queue_wait(req);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk,
+ req->rq_bulk->bd_nob_transferred);
+ if (rc < 0)
+ GOTO(out, rc);
+
+ ii = req_capsule_server_get(&req->rq_pill, &RMF_IDX_INFO);
+ if (ii->ii_magic != IDX_INFO_MAGIC)
+ GOTO(out, rc = -EPROTO);
+
+ npages = (ii->ii_count + LU_PAGE_COUNT - 1) >>
+ (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT);
+ if (npages > it->ooi_total_npages) {
+ CERROR("%s: returned more pages than expected, %u > %u\n",
+ osp->opd_obd->obd_name, npages, it->ooi_total_npages);
+ GOTO(out, rc = -EINVAL);
+ }
+
+ it->ooi_valid_npages = npages;
+ if (ptlrpc_rep_need_swab(req))
+ it->ooi_swab = 1;
+
+ it->ooi_next = ii->ii_hash_end;
+
+ GOTO(out, rc = 0);
+
+out:
+ ptlrpc_req_finished(req);
+
+ return rc;
+}
+
+static int osp_orphan_it_next(const struct lu_env *env,
+ struct dt_it *di)
+{
+ struct osp_orphan_it *it = (struct osp_orphan_it *)di;
+ struct lu_idxpage *idxpage;
+ struct page **pages;
+ int rc;
+ int i;
+ ENTRY;
+
+again2:
+ idxpage = it->ooi_cur_idxpage;
+ if (idxpage != NULL) {
+ if (idxpage->lip_nr == 0)
+ RETURN(1);
+
+ it->ooi_pos2++;
+ if (it->ooi_pos2 < idxpage->lip_nr) {
+ it->ooi_ent =
+ (struct lu_orphan_ent *)idxpage->lip_entries +
+ it->ooi_pos2;
+ if (it->ooi_swab)
+ lustre_swab_orphan_ent(it->ooi_ent);
+ RETURN(0);
+ }
+
+ it->ooi_cur_idxpage = NULL;
+ it->ooi_pos1++;
+
+again1:
+ if (it->ooi_pos1 < LU_PAGE_COUNT) {
+ it->ooi_cur_idxpage = (void *)it->ooi_cur_page +
+ LU_PAGE_SIZE * it->ooi_pos1;
+ if (it->ooi_swab)
+ lustre_swab_lip_header(it->ooi_cur_idxpage);
+ if (it->ooi_cur_idxpage->lip_magic != LIP_MAGIC) {
+ struct osp_device *osp =
+ lu2osp_dev(it->ooi_obj->do_lu.lo_dev);
+
+ CERROR("%s: invalid magic (%x != %x) for page "
+ "%d/%d while read layout orphan index\n",
+ osp->opd_obd->obd_name,
+ it->ooi_cur_idxpage->lip_magic,
+ LIP_MAGIC, it->ooi_pos0, it->ooi_pos1);
+ /* Skip this lu_page next time. */
+ it->ooi_pos2 = idxpage->lip_nr - 1;
+ RETURN(-EINVAL);
+ }
+ it->ooi_pos2 = -1;
+ goto again2;
+ }
+
+ kunmap(it->ooi_cur_page);
+ it->ooi_cur_page = NULL;
+ it->ooi_pos0++;
+
+again0:
+ pages = it->ooi_pages;
+ if (it->ooi_pos0 < it->ooi_valid_npages) {
+ it->ooi_cur_page = kmap(pages[it->ooi_pos0]);
+ it->ooi_pos1 = 0;
+ goto again1;
+ }
+
+ for (i = 0; i < it->ooi_total_npages; i++) {
+ if (pages[i] != NULL)
+ __free_page(pages[i]);
+ }
+ OBD_FREE(pages, it->ooi_total_npages * sizeof(*pages));
+
+ it->ooi_pos0 = 0;
+ it->ooi_total_npages = 0;
+ it->ooi_valid_npages = 0;
+ it->ooi_swab = 0;
+ it->ooi_ent = NULL;
+ it->ooi_cur_page = NULL;
+ it->ooi_cur_idxpage = NULL;
+ it->ooi_pages = NULL;
+ }
+
+ if (it->ooi_next == II_END_OFF)
+ RETURN(1);
+
+ rc = osp_orphan_it_fetch(env, it);
+ if (rc == 0)
+ goto again0;
+
+ RETURN(rc);
+}
+
+static int osp_orphan_it_get(const struct lu_env *env,
+ struct dt_it *di,
+ const struct dt_key *key)
+{
+ return -ENOSYS;
+}
+
+static void osp_orphan_it_put(const struct lu_env *env,
+ struct dt_it *di)
+{
+}
+
+static struct dt_key *osp_orphan_it_key(const struct lu_env *env,
+ const struct dt_it *di)
+{
+ struct osp_orphan_it *it = (struct osp_orphan_it *)di;
+ struct lu_orphan_ent *ent = it->ooi_ent;
+
+ if (likely(ent != NULL))
+ return (struct dt_key *)(&ent->loe_key);
+
+ return NULL;
+}
+
+static int osp_orphan_it_key_size(const struct lu_env *env,
+ const struct dt_it *di)
+{
+ return sizeof(struct lu_fid);
+}
+
+static int osp_orphan_it_rec(const struct lu_env *env,
+ const struct dt_it *di,
+ struct dt_rec *rec,
+ __u32 attr)
+{
+ struct osp_orphan_it *it = (struct osp_orphan_it *)di;
+ struct lu_orphan_ent *ent = it->ooi_ent;
+
+ if (likely(ent != NULL)) {
+ *(struct lu_orphan_rec *)rec = ent->loe_rec;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static __u64 osp_orphan_it_store(const struct lu_env *env,
+ const struct dt_it *di)
+{
+ struct osp_orphan_it *it = (struct osp_orphan_it *)di;
+
+ return it->ooi_next;
+}
+
+/**
+ * \retval +1: locate to the exactly position
+ * \retval 0: cannot locate to the exactly position,
+ * call next() to move to a valid position.
+ * \retval -ve: on error
+ */
+static int osp_orphan_it_load(const struct lu_env *env,
+ const struct dt_it *di,
+ __u64 hash)
+{
+ struct osp_orphan_it *it = (struct osp_orphan_it *)di;
+ int rc;
+
+ it->ooi_next = hash;
+ rc = osp_orphan_it_next(env, (struct dt_it *)di);
+ if (rc == 1)
+ return 0;
+
+ if (rc == 0)
+ return 1;
+
+ return rc;
+}
+
+static int osp_orphan_it_key_rec(const struct lu_env *env,
+ const struct dt_it *di,
+ void *key_rec)
+{
+ return 0;
+}
+
+static const struct dt_index_operations osp_orphan_index_ops = {
+ .dio_lookup = osp_orphan_index_lookup,
+ .dio_declare_insert = osp_orphan_index_declare_insert,
+ .dio_insert = osp_orphan_index_insert,
+ .dio_declare_delete = osp_orphan_index_declare_delete,
+ .dio_delete = osp_orphan_index_delete,
+ .dio_it = {
+ .init = osp_orphan_it_init,
+ .fini = osp_orphan_it_fini,
+ .next = osp_orphan_it_next,
+ .get = osp_orphan_it_get,
+ .put = osp_orphan_it_put,
+ .key = osp_orphan_it_key,
+ .key_size = osp_orphan_it_key_size,
+ .rec = osp_orphan_it_rec,
+ .store = osp_orphan_it_store,
+ .load = osp_orphan_it_load,
+ .key_rec = osp_orphan_it_key_rec,
+ }
+};
+
+static int osp_index_try(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct dt_index_features *feat)
+{
+ const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
+
+ if (fid_is_last_id(fid) && fid_is_idif(fid)) {
+ dt->do_index_ops = &osp_orphan_index_ops;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
struct dt_object_operations osp_obj_ops = {
+ .do_declare_attr_get = osp_declare_attr_get,
+ .do_attr_get = osp_attr_get,
.do_declare_attr_set = osp_declare_attr_set,
.do_attr_set = osp_attr_set,
+ .do_declare_xattr_get = osp_declare_xattr_get,
+ .do_xattr_get = osp_xattr_get,
+ .do_declare_xattr_set = osp_declare_xattr_set,
+ .do_xattr_set = osp_xattr_set,
.do_declare_create = osp_declare_object_create,
.do_create = osp_object_create,
.do_declare_destroy = osp_declare_object_destroy,
.do_destroy = osp_object_destroy,
+ .do_index_try = osp_index_try,
};
-static int is_ost_obj(struct lu_object *lo)
-{
- struct osp_device *osp = lu2osp_dev(lo->lo_dev);
-
- return !osp->opd_connect_mdt;
-}
-
static int osp_object_init(const struct lu_env *env, struct lu_object *o,
const struct lu_object_conf *conf)
{
int rc = 0;
ENTRY;
+ spin_lock_init(&po->opo_lock);
+ o->lo_header->loh_attr |= LOHA_REMOTE;
+
if (is_ost_obj(o)) {
po->opo_obj.do_ops = &osp_obj_ops;
} else {
- struct lu_attr *la = &osp_env_info(env)->osi_attr;
+ struct lu_attr *la = &osp_env_info(env)->osi_attr;
po->opo_obj.do_ops = &osp_md_obj_ops;
- o->lo_header->loh_attr |= LOHA_REMOTE;
- po->opo_obj.do_lock_ops = &osp_md_lock_ops;
+ po->opo_obj.do_body_ops = &osp_md_body_ops;
rc = po->opo_obj.do_ops->do_attr_get(env, lu2dt_obj(o),
la, NULL);
if (rc == 0)
o->lo_header->loh_attr |=
LOHA_EXISTS | (la->la_mode & S_IFMT);
- if (rc == -ENOENT)
+ if (rc == -ENOENT) {
+ po->opo_non_exist = 1;
rc = 0;
+ }
+ init_rwsem(&po->opo_sem);
}
RETURN(rc);
}
dt_object_fini(&obj->opo_obj);
lu_object_header_fini(h);
+ if (obj->opo_ooa != NULL) {
+ struct osp_xattr_entry *oxe;
+ struct osp_xattr_entry *tmp;
+ int count;
+
+ list_for_each_entry_safe(oxe, tmp,
+ &obj->opo_ooa->ooa_xattr_list,
+ oxe_list) {
+ list_del(&oxe->oxe_list);
+ count = atomic_read(&oxe->oxe_ref);
+ LASSERTF(count == 1,
+ "Still has %d users on the xattr entry %.*s\n",
+ count - 1, oxe->oxe_namelen, oxe->oxe_buf);
+
+ OBD_FREE(oxe, oxe->oxe_buflen);
+ }
+ OBD_FREE_PTR(obj->opo_ooa);
+ }
OBD_SLAB_FREE_PTR(obj, osp_object_kmem);
}
* this may require lu_object_put() in LOD
*/
if (unlikely(po->opo_reserved)) {
+ LASSERT(d->opd_pre != NULL);
LASSERT(d->opd_pre_reserved > 0);
spin_lock(&d->opd_pre_lock);
d->opd_pre_reserved--;
/* not needed in cache any more */
set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
}
+
+ if (is_ost_obj(o))
+ /* XXX: Currently, NOT cache OST-object on MDT because:
+ * 1. it is not often accessed on MDT.
+ * 2. avoid up layer (such as LFSCK) to load too many
+ * once-used OST-objects. */
+ set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
+
EXIT;
}