-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Author: WangDi <wangdi@clusterfs.com>
* Author: Yury Umanets <umka@clusterfs.com>
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
+
#define DEBUG_SUBSYSTEM S_FLD
-#ifdef __KERNEL__
-# include <libcfs/libcfs.h>
-# include <linux/module.h>
-# include <linux/jbd.h>
-#else /* __KERNEL__ */
-# include <liblustre.h>
-#endif
-
-#include <obd.h>
-#include <obd_class.h>
-#include <lustre_ver.h>
+#include <libcfs/libcfs.h>
+#include <linux/module.h>
#include <obd_support.h>
-#include <lprocfs_status.h>
-
#include <dt_object.h>
-#include <md_object.h>
-#include <lustre_mdc.h>
#include <lustre_fid.h>
#include <lustre_fld.h>
#include "fld_internal.h"
-const char fld_index_name[] = "fld";
+static const char fld_index_name[] = "fld";
static const struct lu_seq_range IGIF_FLD_RANGE = {
- .lsr_start = 1,
- .lsr_end = FID_SEQ_IDIF,
- .lsr_mdt = 0
+ .lsr_start = FID_SEQ_IGIF,
+ .lsr_end = FID_SEQ_IGIF_MAX + 1,
+ .lsr_index = 0,
+ .lsr_flags = LU_SEQ_RANGE_MDT
};
-const struct dt_index_features fld_index_features = {
- .dif_flags = DT_IND_UPDATE,
- .dif_keysize_min = sizeof(seqno_t),
- .dif_keysize_max = sizeof(seqno_t),
- .dif_recsize_min = sizeof(struct lu_seq_range),
- .dif_recsize_max = sizeof(struct lu_seq_range),
- .dif_ptrsize = 4
+static const struct lu_seq_range DOT_LUSTRE_FLD_RANGE = {
+ .lsr_start = FID_SEQ_DOT_LUSTRE,
+ .lsr_end = FID_SEQ_DOT_LUSTRE + 1,
+ .lsr_index = 0,
+ .lsr_flags = LU_SEQ_RANGE_MDT
};
-extern struct lu_context_key fld_thread_key;
-
-static struct dt_key *fld_key(const struct lu_env *env,
- const seqno_t seq)
-{
- struct fld_thread_info *info;
- ENTRY;
-
- info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
- LASSERT(info != NULL);
-
- info->fti_key = cpu_to_be64(seq);
- RETURN((void *)&info->fti_key);
-}
-
-static struct dt_rec *fld_rec(const struct lu_env *env,
- const struct lu_seq_range *range)
-{
- struct fld_thread_info *info;
- struct lu_seq_range *rec;
- ENTRY;
-
- info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
- LASSERT(info != NULL);
- rec = &info->fti_rec;
-
- range_cpu_to_be(rec, range);
- RETURN((void *)rec);
-}
-
-struct thandle* fld_trans_start(struct lu_server_fld *fld,
- const struct lu_env *env, int credit)
-{
- struct fld_thread_info *info;
- struct dt_device *dt_dev;
- struct txn_param *p;
+static const struct lu_seq_range ROOT_FLD_RANGE = {
+ .lsr_start = FID_SEQ_ROOT,
+ .lsr_end = FID_SEQ_ROOT + 1,
+ .lsr_index = 0,
+ .lsr_flags = LU_SEQ_RANGE_MDT
+};
- dt_dev = lu2dt_dev(fld->lsf_obj->do_lu.lo_dev);
- info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
- p = &info->fti_txn_param;
- txn_param_init(p, credit);
+static const struct dt_index_features fld_index_features = {
+ .dif_flags = DT_IND_UPDATE,
+ .dif_keysize_min = sizeof(u64),
+ .dif_keysize_max = sizeof(u64),
+ .dif_recsize_min = sizeof(struct lu_seq_range),
+ .dif_recsize_max = sizeof(struct lu_seq_range),
+ .dif_ptrsize = 4
+};
- return dt_dev->dd_ops->dt_trans_start(env, dt_dev, p);
-}
+extern struct lu_context_key fld_thread_key;
-void fld_trans_stop(struct lu_server_fld *fld,
- const struct lu_env *env, struct thandle* th)
+int fld_declare_index_create(const struct lu_env *env,
+ struct lu_server_fld *fld,
+ const struct lu_seq_range *new_range,
+ struct thandle *th)
{
- struct dt_device *dt_dev;
-
- dt_dev = lu2dt_dev(fld->lsf_obj->do_lu.lo_dev);
- dt_dev->dd_ops->dt_trans_stop(env, th);
+ struct lu_seq_range *tmp;
+ struct lu_seq_range *range;
+ struct fld_thread_info *info;
+ int rc = 0;
+
+ ENTRY;
+
+ info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
+ range = &info->fti_lrange;
+ tmp = &info->fti_irange;
+ memset(range, 0, sizeof(*range));
+
+ rc = fld_index_lookup(env, fld, new_range->lsr_start, range);
+ if (rc == 0) {
+ /* In case of duplicate entry, the location must be same */
+ LASSERT((range_compare_loc(new_range, range) == 0));
+ GOTO(out, rc = -EEXIST);
+ }
+
+ if (rc != -ENOENT) {
+ CERROR("%s: lookup range "DRANGE" error: rc = %d\n",
+ fld->lsf_name, PRANGE(range), rc);
+ GOTO(out, rc);
+ }
+
+ /* Check for merge case, since the fld entry can only be increamental,
+ * so we will only check whether it can be merged from the left. */
+ if (new_range->lsr_start == range->lsr_end && range->lsr_end != 0 &&
+ range_compare_loc(new_range, range) == 0) {
+ range_cpu_to_be(tmp, range);
+ rc = dt_declare_delete(env, fld->lsf_obj,
+ (struct dt_key *)&tmp->lsr_start, th);
+ if (rc) {
+ CERROR("%s: declare record "DRANGE" failed: rc = %d\n",
+ fld->lsf_name, PRANGE(range), rc);
+ GOTO(out, rc);
+ }
+ *tmp = *new_range;
+ tmp->lsr_start = range->lsr_start;
+ } else {
+ *tmp = *new_range;
+ }
+
+ range_cpu_to_be(tmp, tmp);
+ rc = dt_declare_insert(env, fld->lsf_obj, (struct dt_rec *)tmp,
+ (struct dt_key *)&tmp->lsr_start, th);
+out:
+ RETURN(rc);
}
/**
*
* \retval 0 success
* \retval -ve error
- */
-
-int fld_index_create(struct lu_server_fld *fld,
- const struct lu_env *env,
- const struct lu_seq_range *range,
- struct thandle *th)
-{
- struct dt_object *dt_obj = fld->lsf_obj;
- struct dt_device *dt_dev;
- seqno_t start;
- int rc;
-
- ENTRY;
-
- start = range->lsr_start;
- LASSERT(range_is_sane(range));
- dt_dev = lu2dt_dev(fld->lsf_obj->do_lu.lo_dev);
-
- rc = dt_obj->do_index_ops->dio_insert(env, dt_obj,
- fld_rec(env, range),
- fld_key(env, start),
- th, BYPASS_CAPA, 1);
-
- CDEBUG(D_INFO, "%s: insert given range : "DRANGE" rc = %d\n",
- fld->lsf_name, PRANGE(range), rc);
- RETURN(rc);
-}
-
-/**
- * delete range in fld store.
*
- * \param range range to be deleted
- * \param th transaction
+ * The whole fld index insertion is protected by seq->lss_mutex (see
+ * seq_server_alloc_super), i.e. only one thread will access fldb each
+ * time, so we do not need worry the fld file and cache will being
+ * changed between declare and create.
+ * Because the fld entry can only be increamental, so we will only check
+ * whether it can be merged from the left.
*
- * \retval 0 success
- * \retval -ve error
- */
-
-int fld_index_delete(struct lu_server_fld *fld,
- const struct lu_env *env,
- struct lu_seq_range *range,
- struct thandle *th)
+ * Caller must hold fld->lsf_lock
+ **/
+int fld_index_create(const struct lu_env *env, struct lu_server_fld *fld,
+ const struct lu_seq_range *new_range, struct thandle *th)
{
- struct dt_object *dt_obj = fld->lsf_obj;
- struct dt_device *dt_dev;
- seqno_t seq = range->lsr_start;
- int rc;
-
- ENTRY;
-
- dt_dev = lu2dt_dev(fld->lsf_obj->do_lu.lo_dev);
- rc = dt_obj->do_index_ops->dio_delete(env, dt_obj,
- fld_key(env, seq), th,
- BYPASS_CAPA);
-
- CDEBUG(D_INFO, "%s: delete given range : "DRANGE" rc = %d\n",
- fld->lsf_name, PRANGE(range), rc);
-
- RETURN(rc);
+ struct lu_seq_range *range;
+ struct lu_seq_range *tmp;
+ struct fld_thread_info *info;
+ int rc = 0;
+ int deleted = 0;
+ struct fld_cache_entry *flde;
+ ENTRY;
+
+ info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
+
+ LASSERT(mutex_is_locked(&fld->lsf_lock));
+
+ range = &info->fti_lrange;
+ memset(range, 0, sizeof(*range));
+ tmp = &info->fti_irange;
+ rc = fld_index_lookup(env, fld, new_range->lsr_start, range);
+ if (rc != -ENOENT) {
+ rc = rc == 0 ? -EEXIST : rc;
+ GOTO(out, rc);
+ }
+
+ if (new_range->lsr_start == range->lsr_end && range->lsr_end != 0 &&
+ range_compare_loc(new_range, range) == 0) {
+ range_cpu_to_be(tmp, range);
+ rc = dt_delete(env, fld->lsf_obj,
+ (struct dt_key *)&tmp->lsr_start, th);
+ if (rc != 0)
+ GOTO(out, rc);
+ *tmp = *new_range;
+ tmp->lsr_start = range->lsr_start;
+ deleted = 1;
+ } else {
+ *tmp = *new_range;
+ }
+
+ range_cpu_to_be(tmp, tmp);
+ rc = dt_insert(env, fld->lsf_obj, (struct dt_rec *)tmp,
+ (struct dt_key *)&tmp->lsr_start, th, 1);
+ if (rc != 0) {
+ CERROR("%s: insert range "DRANGE" failed: rc = %d\n",
+ fld->lsf_name, PRANGE(new_range), rc);
+ GOTO(out, rc);
+ }
+
+ flde = fld_cache_entry_create(new_range);
+ if (IS_ERR(flde))
+ GOTO(out, rc = PTR_ERR(flde));
+
+ write_lock(&fld->lsf_cache->fci_lock);
+ if (deleted)
+ fld_cache_delete_nolock(fld->lsf_cache, new_range);
+ rc = fld_cache_insert_nolock(fld->lsf_cache, flde);
+ write_unlock(&fld->lsf_cache->fci_lock);
+ if (rc)
+ OBD_FREE_PTR(flde);
+out:
+ RETURN(rc);
}
/**
- * lookup range for a seq passed
+ * lookup range for a seq passed. note here we only care about the start/end,
+ * caller should handle the attached location data (flags, index).
*
- * \param seq seq for lookup.
- * \param range result of lookup.
+ * \param seq seq for lookup.
+ * \param range result of lookup.
*
- * \retval 0 success
- * \retval -ve error
+ * \retval 0 found, \a range is the matched range;
+ * \retval -ENOENT not found, \a range is the left-side range;
+ * \retval -ve other error;
*/
-
-int fld_index_lookup(struct lu_server_fld *fld,
- const struct lu_env *env,
- seqno_t seq,
- struct lu_seq_range *range)
+int fld_index_lookup(const struct lu_env *env, struct lu_server_fld *fld,
+ u64 seq, struct lu_seq_range *range)
{
- struct dt_object *dt_obj = fld->lsf_obj;
struct lu_seq_range *fld_rec;
- struct dt_key *key = fld_key(env, seq);
struct fld_thread_info *info;
int rc;
ENTRY;
- info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
- fld_rec = &info->fti_rec;
-
- rc = dt_obj->do_index_ops->dio_lookup(env, dt_obj,
- (struct dt_rec*) fld_rec,
- key, BYPASS_CAPA);
+ info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
+ fld_rec = &info->fti_rec;
- if (rc >= 0) {
- range_be_to_cpu(fld_rec, fld_rec);
+ rc = fld_cache_lookup(fld->lsf_cache, seq, fld_rec);
+ if (rc == 0) {
*range = *fld_rec;
if (range_within(range, seq))
rc = 0;
RETURN(rc);
}
-static int fld_insert_igif_fld(struct lu_server_fld *fld,
- const struct lu_env *env)
-{
- struct thandle *th;
- int rc;
+/**
+ * insert entry in fld store.
+ *
+ * \param env relevant lu_env
+ * \param fld fld store
+ * \param range range to be inserted
+ *
+ * \retval 0 success
+ * \retval -ve error
+ *
+ * Caller must hold fld->lsf_lock
+ **/
- ENTRY;
- th = fld_trans_start(fld, env, FLD_TXN_INDEX_INSERT_CREDITS);
- if (IS_ERR(th))
- RETURN(PTR_ERR(th));
-
- rc = fld_index_create(fld, env, &IGIF_FLD_RANGE, th);
- fld_trans_stop(fld, env, th);
- if (rc == -EEXIST)
- rc = 0;
- RETURN(rc);
+int fld_insert_entry(const struct lu_env *env,
+ struct lu_server_fld *fld,
+ const struct lu_seq_range *range)
+{
+ struct thandle *th;
+ int rc;
+ ENTRY;
+
+ LASSERT(mutex_is_locked(&fld->lsf_lock));
+
+ th = dt_trans_create(env, lu2dt_dev(fld->lsf_obj->do_lu.lo_dev));
+ if (IS_ERR(th))
+ RETURN(PTR_ERR(th));
+
+ rc = fld_declare_index_create(env, fld, range, th);
+ if (rc != 0) {
+ if (rc == -EEXIST)
+ rc = 0;
+ GOTO(out, rc);
+ }
+
+ rc = dt_trans_start_local(env, lu2dt_dev(fld->lsf_obj->do_lu.lo_dev),
+ th);
+ if (rc)
+ GOTO(out, rc);
+
+ rc = fld_index_create(env, fld, range, th);
+ if (rc == -EEXIST)
+ rc = 0;
+out:
+ dt_trans_stop(env, lu2dt_dev(fld->lsf_obj->do_lu.lo_dev), th);
+ RETURN(rc);
}
+EXPORT_SYMBOL(fld_insert_entry);
-int fld_index_init(struct lu_server_fld *fld,
- const struct lu_env *env,
- struct dt_device *dt)
+static int fld_insert_special_entries(const struct lu_env *env,
+ struct lu_server_fld *fld)
{
- struct dt_object *dt_obj;
- struct lu_fid fid;
- int rc;
- ENTRY;
+ int rc;
- dt_obj = dt_store_open(env, dt, "", fld_index_name, &fid);
- if (!IS_ERR(dt_obj)) {
- fld->lsf_obj = dt_obj;
- rc = dt_obj->do_ops->do_index_try(env, dt_obj,
- &fld_index_features);
- if (rc == 0) {
- LASSERT(dt_obj->do_index_ops != NULL);
- rc = fld_insert_igif_fld(fld, env);
-
- if (rc != 0) {
- CERROR("insert igif in fld! = %d\n", rc);
- lu_object_put(env, &dt_obj->do_lu);
- fld->lsf_obj = NULL;
- }
- } else
- CERROR("%s: File \"%s\" is not an index!\n",
- fld->lsf_name, fld_index_name);
-
-
- } else {
- CERROR("%s: Can't find \"%s\" obj %d\n",
- fld->lsf_name, fld_index_name, (int)PTR_ERR(dt_obj));
- rc = PTR_ERR(dt_obj);
- }
+ rc = fld_insert_entry(env, fld, &IGIF_FLD_RANGE);
+ if (rc != 0)
+ RETURN(rc);
- RETURN(rc);
+ rc = fld_insert_entry(env, fld, &DOT_LUSTRE_FLD_RANGE);
+ if (rc != 0)
+ RETURN(rc);
+
+ rc = fld_insert_entry(env, fld, &ROOT_FLD_RANGE);
+
+ RETURN(rc);
}
-void fld_index_fini(struct lu_server_fld *fld,
- const struct lu_env *env)
+int fld_index_init(const struct lu_env *env, struct lu_server_fld *fld,
+ struct dt_device *dt, int type)
{
- ENTRY;
- if (fld->lsf_obj != NULL) {
- if (!IS_ERR(fld->lsf_obj))
- lu_object_put(env, &fld->lsf_obj->do_lu);
- fld->lsf_obj = NULL;
- }
- EXIT;
+ struct dt_object *dt_obj = NULL;
+ struct lu_fid fid;
+ struct lu_attr *attr = NULL;
+ struct lu_seq_range *range = NULL;
+ struct fld_thread_info *info;
+ struct dt_object_format dof;
+ struct dt_it *it;
+ const struct dt_it_ops *iops;
+ int rc;
+ __u32 index;
+ ENTRY;
+
+ info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
+ LASSERT(info != NULL);
+
+ lu_local_obj_fid(&fid, FLD_INDEX_OID);
+ OBD_ALLOC_PTR(attr);
+ if (attr == NULL)
+ RETURN(-ENOMEM);
+
+ memset(attr, 0, sizeof(*attr));
+ attr->la_valid = LA_MODE;
+ attr->la_mode = S_IFREG | 0666;
+ dof.dof_type = DFT_INDEX;
+ dof.u.dof_idx.di_feat = &fld_index_features;
+
+ dt_obj = dt_locate(env, dt, &fid);
+ if (IS_ERR(dt_obj)) {
+ rc = PTR_ERR(dt_obj);
+ dt_obj = NULL;
+ GOTO(out, rc);
+ }
+
+ LASSERT(dt_obj != NULL);
+ if (!dt_object_exists(dt_obj)) {
+ lu_object_put(env, &dt_obj->do_lu);
+ dt_obj = dt_find_or_create(env, dt, &fid, &dof, attr);
+ fld->lsf_new = 1;
+ if (IS_ERR(dt_obj)) {
+ rc = PTR_ERR(dt_obj);
+ CERROR("%s: Can't find \"%s\" obj %d\n", fld->lsf_name,
+ fld_index_name, rc);
+ dt_obj = NULL;
+ GOTO(out, rc);
+ }
+ }
+
+ fld->lsf_obj = dt_obj;
+ rc = dt_obj->do_ops->do_index_try(env, dt_obj, &fld_index_features);
+ if (rc != 0) {
+ CERROR("%s: File \"%s\" is not an index: rc = %d!\n",
+ fld->lsf_name, fld_index_name, rc);
+ GOTO(out, rc);
+ }
+
+ range = &info->fti_rec;
+ /* Load fld entry to cache */
+ iops = &dt_obj->do_index_ops->dio_it;
+ it = iops->init(env, dt_obj, 0);
+ if (IS_ERR(it))
+ GOTO(out, rc = PTR_ERR(it));
+
+ rc = iops->load(env, it, 0);
+ if (rc < 0)
+ GOTO(out_it_fini, rc);
+
+ if (rc > 0) {
+ /* Load FLD entry into server cache */
+ do {
+ rc = iops->rec(env, it, (struct dt_rec *)range, 0);
+ if (rc != 0)
+ GOTO(out_it_put, rc);
+ LASSERT(range != NULL);
+ range_be_to_cpu(range, range);
+ rc = fld_cache_insert(fld->lsf_cache, range);
+ if (rc != 0)
+ GOTO(out_it_put, rc);
+ rc = iops->next(env, it);
+ } while (rc == 0);
+ } else {
+ fld->lsf_new = 1;
+ }
+
+ rc = fld_name_to_index(fld->lsf_name, &index);
+ if (rc < 0)
+ GOTO(out_it_put, rc);
+ else
+ rc = 0;
+
+ if (index == 0 && type == LU_SEQ_RANGE_MDT) {
+ /* Note: fld_insert_entry will detect whether these
+ * special entries already exist inside FLDB */
+ mutex_lock(&fld->lsf_lock);
+ rc = fld_insert_special_entries(env, fld);
+ mutex_unlock(&fld->lsf_lock);
+ if (rc != 0) {
+ CERROR("%s: insert special entries failed!: rc = %d\n",
+ fld->lsf_name, rc);
+ GOTO(out_it_put, rc);
+ }
+ }
+out_it_put:
+ iops->put(env, it);
+out_it_fini:
+ iops->fini(env, it);
+out:
+ if (attr != NULL)
+ OBD_FREE_PTR(attr);
+
+ if (rc < 0) {
+ if (dt_obj != NULL)
+ lu_object_put(env, &dt_obj->do_lu);
+ fld->lsf_obj = NULL;
+ }
+ RETURN(rc);
+}
+
+void fld_index_fini(const struct lu_env *env, struct lu_server_fld *fld)
+{
+ ENTRY;
+ if (fld->lsf_obj != NULL) {
+ if (!IS_ERR(fld->lsf_obj))
+ lu_object_put(env, &fld->lsf_obj->do_lu);
+ fld->lsf_obj = NULL;
+ }
+ EXIT;
+}
+
+int fld_server_read(const struct lu_env *env, struct lu_server_fld *fld,
+ struct lu_seq_range *range, void *data, int data_len)
+{
+ struct lu_seq_range_array *lsra = data;
+ struct fld_thread_info *info;
+ struct dt_object *dt_obj = fld->lsf_obj;
+ struct lu_seq_range *entry;
+ struct dt_it *it;
+ const struct dt_it_ops *iops;
+ int rc;
+
+ ENTRY;
+
+ lsra->lsra_count = 0;
+ iops = &dt_obj->do_index_ops->dio_it;
+ it = iops->init(env, dt_obj, 0);
+ if (IS_ERR(it))
+ RETURN(PTR_ERR(it));
+
+ rc = iops->load(env, it, range->lsr_end);
+ if (rc <= 0)
+ GOTO(out_it_fini, rc);
+
+ info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
+ LASSERT(info != NULL);
+ entry = &info->fti_rec;
+ do {
+ rc = iops->rec(env, it, (struct dt_rec *)entry, 0);
+ if (rc != 0)
+ GOTO(out_it_put, rc);
+
+ if (offsetof(typeof(*lsra), lsra_lsr[lsra->lsra_count + 1]) >
+ data_len)
+ GOTO(out, rc = -EAGAIN);
+
+ range_be_to_cpu(entry, entry);
+ if (entry->lsr_index == range->lsr_index &&
+ entry->lsr_flags == range->lsr_flags &&
+ entry->lsr_start > range->lsr_start) {
+ lsra->lsra_lsr[lsra->lsra_count] = *entry;
+ lsra->lsra_count++;
+ }
+
+ rc = iops->next(env, it);
+ } while (rc == 0);
+ if (rc > 0)
+ rc = 0;
+out:
+ range_array_cpu_to_le(lsra, lsra);
+out_it_put:
+ iops->put(env, it);
+out_it_fini:
+ iops->fini(env, it);
+
+ RETURN(rc);
}