* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_FLD
-#ifdef __KERNEL__
-# include <libcfs/libcfs.h>
-# include <linux/module.h>
-# include <linux/jbd.h>
-#else /* __KERNEL__ */
-# include <liblustre.h>
-#endif
-
-#include <obd.h>
-#include <obd_class.h>
-#include <lustre_ver.h>
+#include <libcfs/libcfs.h>
+#include <linux/module.h>
#include <obd_support.h>
-#include <lprocfs_status.h>
-
#include <dt_object.h>
-#include <md_object.h>
-#include <lustre_mdc.h>
#include <lustre_fid.h>
#include <lustre_fld.h>
#include "fld_internal.h"
-const char fld_index_name[] = "fld";
+static const char fld_index_name[] = "fld";
static const struct lu_seq_range IGIF_FLD_RANGE = {
.lsr_start = FID_SEQ_IGIF,
.lsr_flags = LU_SEQ_RANGE_MDT
};
-const struct dt_index_features fld_index_features = {
+static const struct dt_index_features fld_index_features = {
.dif_flags = DT_IND_UPDATE,
- .dif_keysize_min = sizeof(seqno_t),
- .dif_keysize_max = sizeof(seqno_t),
+ .dif_keysize_min = sizeof(u64),
+ .dif_keysize_max = sizeof(u64),
.dif_recsize_min = sizeof(struct lu_seq_range),
.dif_recsize_max = sizeof(struct lu_seq_range),
.dif_ptrsize = 4
fld->lsf_name, PRANGE(range), rc);
GOTO(out, rc);
}
- memcpy(tmp, new_range, sizeof(*new_range));
+ *tmp = *new_range;
tmp->lsr_start = range->lsr_start;
} else {
- memcpy(tmp, new_range, sizeof(*new_range));
+ *tmp = *new_range;
}
range_cpu_to_be(tmp, tmp);
* changed between declare and create.
* Because the fld entry can only be increamental, so we will only check
* whether it can be merged from the left.
+ *
+ * Caller must hold fld->lsf_lock
**/
int fld_index_create(const struct lu_env *env, struct lu_server_fld *fld,
const struct lu_seq_range *new_range, struct thandle *th)
info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
- LASSERT_MUTEX_LOCKED(&fld->lsf_lock);
+ LASSERT(mutex_is_locked(&fld->lsf_lock));
range = &info->fti_lrange;
memset(range, 0, sizeof(*range));
range_compare_loc(new_range, range) == 0) {
range_cpu_to_be(tmp, range);
rc = dt_delete(env, fld->lsf_obj,
- (struct dt_key *)&tmp->lsr_start, th,
- BYPASS_CAPA);
+ (struct dt_key *)&tmp->lsr_start, th);
if (rc != 0)
GOTO(out, rc);
- memcpy(tmp, new_range, sizeof(*new_range));
+ *tmp = *new_range;
tmp->lsr_start = range->lsr_start;
deleted = 1;
} else {
- memcpy(tmp, new_range, sizeof(*new_range));
+ *tmp = *new_range;
}
range_cpu_to_be(tmp, tmp);
rc = dt_insert(env, fld->lsf_obj, (struct dt_rec *)tmp,
- (struct dt_key *)&tmp->lsr_start, th, BYPASS_CAPA, 1);
+ (struct dt_key *)&tmp->lsr_start, th, 1);
if (rc != 0) {
CERROR("%s: insert range "DRANGE" failed: rc = %d\n",
fld->lsf_name, PRANGE(new_range), rc);
* \retval -ve other error;
*/
int fld_index_lookup(const struct lu_env *env, struct lu_server_fld *fld,
- seqno_t seq, struct lu_seq_range *range)
+ u64 seq, struct lu_seq_range *range)
{
struct lu_seq_range *fld_rec;
struct fld_thread_info *info;
RETURN(rc);
}
+/**
+ * insert entry in fld store.
+ *
+ * \param env relevant lu_env
+ * \param fld fld store
+ * \param range range to be inserted
+ *
+ * \retval 0 success
+ * \retval -ve error
+ *
+ * Caller must hold fld->lsf_lock
+ **/
+
int fld_insert_entry(const struct lu_env *env,
struct lu_server_fld *fld,
const struct lu_seq_range *range)
int rc;
ENTRY;
+ LASSERT(mutex_is_locked(&fld->lsf_lock));
+
th = dt_trans_create(env, lu2dt_dev(fld->lsf_obj->do_lu.lo_dev));
if (IS_ERR(th))
RETURN(PTR_ERR(th));
}
int fld_index_init(const struct lu_env *env, struct lu_server_fld *fld,
- struct dt_device *dt)
+ struct dt_device *dt, int type)
{
struct dt_object *dt_obj = NULL;
struct lu_fid fid;
struct dt_it *it;
const struct dt_it_ops *iops;
int rc;
+ __u32 index;
ENTRY;
info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
dof.dof_type = DFT_INDEX;
dof.u.dof_idx.di_feat = &fld_index_features;
- dt_obj = dt_find_or_create(env, dt, &fid, &dof, attr);
+ dt_obj = dt_locate(env, dt, &fid);
if (IS_ERR(dt_obj)) {
rc = PTR_ERR(dt_obj);
- CERROR("%s: Can't find \"%s\" obj %d\n", fld->lsf_name,
- fld_index_name, rc);
dt_obj = NULL;
GOTO(out, rc);
}
+ LASSERT(dt_obj != NULL);
+ if (!dt_object_exists(dt_obj)) {
+ lu_object_put(env, &dt_obj->do_lu);
+ dt_obj = dt_find_or_create(env, dt, &fid, &dof, attr);
+ fld->lsf_new = 1;
+ if (IS_ERR(dt_obj)) {
+ rc = PTR_ERR(dt_obj);
+ CERROR("%s: Can't find \"%s\" obj %d\n", fld->lsf_name,
+ fld_index_name, rc);
+ dt_obj = NULL;
+ GOTO(out, rc);
+ }
+ }
+
fld->lsf_obj = dt_obj;
rc = dt_obj->do_ops->do_index_try(env, dt_obj, &fld_index_features);
if (rc != 0) {
range = &info->fti_rec;
/* Load fld entry to cache */
iops = &dt_obj->do_index_ops->dio_it;
- it = iops->init(env, dt_obj, 0, NULL);
+ it = iops->init(env, dt_obj, 0);
if (IS_ERR(it))
GOTO(out, rc = PTR_ERR(it));
GOTO(out_it_put, rc);
rc = iops->next(env, it);
} while (rc == 0);
+ } else {
+ fld->lsf_new = 1;
}
- /* Note: fld_insert_entry will detect whether these
- * special entries already exist inside FLDB */
- mutex_lock(&fld->lsf_lock);
- rc = fld_insert_special_entries(env, fld);
- mutex_unlock(&fld->lsf_lock);
- if (rc != 0) {
- CERROR("%s: insert special entries failed!: rc = %d\n",
- fld->lsf_name, rc);
+ rc = fld_name_to_index(fld->lsf_name, &index);
+ if (rc < 0)
GOTO(out_it_put, rc);
- }
+ else
+ rc = 0;
+ if (index == 0 && type == LU_SEQ_RANGE_MDT) {
+ /* Note: fld_insert_entry will detect whether these
+ * special entries already exist inside FLDB */
+ mutex_lock(&fld->lsf_lock);
+ rc = fld_insert_special_entries(env, fld);
+ mutex_unlock(&fld->lsf_lock);
+ if (rc != 0) {
+ CERROR("%s: insert special entries failed!: rc = %d\n",
+ fld->lsf_name, rc);
+ GOTO(out_it_put, rc);
+ }
+ }
out_it_put:
iops->put(env, it);
out_it_fini:
if (attr != NULL)
OBD_FREE_PTR(attr);
- if (rc != 0) {
+ if (rc < 0) {
if (dt_obj != NULL)
lu_object_put(env, &dt_obj->do_lu);
fld->lsf_obj = NULL;
}
EXIT;
}
+
+int fld_server_read(const struct lu_env *env, struct lu_server_fld *fld,
+ struct lu_seq_range *range, void *data, int data_len)
+{
+ struct lu_seq_range_array *lsra = data;
+ struct fld_thread_info *info;
+ struct dt_object *dt_obj = fld->lsf_obj;
+ struct lu_seq_range *entry;
+ struct dt_it *it;
+ const struct dt_it_ops *iops;
+ int rc;
+
+ ENTRY;
+
+ lsra->lsra_count = 0;
+ iops = &dt_obj->do_index_ops->dio_it;
+ it = iops->init(env, dt_obj, 0);
+ if (IS_ERR(it))
+ RETURN(PTR_ERR(it));
+
+ rc = iops->load(env, it, range->lsr_end);
+ if (rc <= 0)
+ GOTO(out_it_fini, rc);
+
+ info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
+ LASSERT(info != NULL);
+ entry = &info->fti_rec;
+ do {
+ rc = iops->rec(env, it, (struct dt_rec *)entry, 0);
+ if (rc != 0)
+ GOTO(out_it_put, rc);
+
+ if (offsetof(typeof(*lsra), lsra_lsr[lsra->lsra_count + 1]) >
+ data_len)
+ GOTO(out, rc = -EAGAIN);
+
+ range_be_to_cpu(entry, entry);
+ if (entry->lsr_index == range->lsr_index &&
+ entry->lsr_flags == range->lsr_flags &&
+ entry->lsr_start > range->lsr_start) {
+ lsra->lsra_lsr[lsra->lsra_count] = *entry;
+ lsra->lsra_count++;
+ }
+
+ rc = iops->next(env, it);
+ } while (rc == 0);
+ if (rc > 0)
+ rc = 0;
+out:
+ range_array_cpu_to_le(lsra, lsra);
+out_it_put:
+ iops->put(env, it);
+out_it_fini:
+ iops->fini(env, it);
+
+ RETURN(rc);
+}