- fld_iam.c renamed to fld_index.c because it does not contain any iam related stuff and only calls underlying layer to work with index and index itself may be implemented different ways.
#include "cmm_internal.h"
#include "mdc_internal.h"
-/* XXX: fix layter this hack. It exists because OSD produces fids with like
- this: seq = ROOT_SEQ + 1, etc. */
+/* XXX: fix later this hack. It exists because OSD produces fids with like this:
+ seq = ROOT_SEQ + 1, etc. */
static int cmm_special_fid(const struct lu_fid *fid)
{
- if (fid_seq(fid) < LUSTRE_SEQ_SPACE_START)
- return 1;
- return 0;
+ struct lu_range *space = (struct lu_range *)&LUSTRE_SEQ_SPACE_RANGE;
+ return !range_within(space, fid_seq(fid));
}
static int cmm_fld_lookup(struct cmm_device *cm,
#include "fid_internal.h"
#ifdef __KERNEL__
-/* server side seq mgr stuff */
-static const struct lu_range LUSTRE_SEQ_SPACE_INIT = {
- LUSTRE_SEQ_SPACE_START,
- LUSTRE_SEQ_SPACE_END
+/* sequence space, starts from 0x400 to have first 0x400 sequences used for
+ * special purposes. */
+const struct lu_range LUSTRE_SEQ_SPACE_RANGE = {
+ (0x400),
+ ((__u64)~0ULL)
};
+EXPORT_SYMBOL(LUSTRE_SEQ_SPACE_RANGE);
-static const struct lu_range LUSTRE_SEQ_SUPER_INIT = {
+/* zero range, used for init and other purposes */
+const struct lu_range LUSTRE_SEQ_ZERO_RANGE = {
0,
0
};
+EXPORT_SYMBOL(LUSTRE_SEQ_ZERO_RANGE);
static int
seq_server_write_state(struct lu_server_seq *seq,
LASSERT(range_is_sane(space));
- if (range_space(space) < LUSTRE_SEQ_SUPER_CHUNK) {
+ if (range_space(space) < LUSTRE_SEQ_SUPER_WIDTH) {
CWARN("sequences space is going to exhauste soon. "
"Only can allocate "LPU64" sequences\n",
space->lr_end - space->lr_start);
CERROR("sequences space is exhausted\n");
rc = -ENOSPC;
} else {
- range->lr_start = space->lr_start;
- space->lr_start += LUSTRE_SEQ_SUPER_CHUNK;
- range->lr_end = space->lr_start;
+ range_alloc(range, space, LUSTRE_SEQ_SUPER_WIDTH);
rc = 0;
}
struct lu_range *range)
{
struct lu_range *super = &seq->seq_super;
- int rc;
+ int rc = 0;
ENTRY;
LASSERT(range_is_sane(super));
/* saving new range into allocation space. */
*super = seq->seq_cli->seq_range;
LASSERT(range_is_sane(super));
- } else {
- rc = 0;
}
- range->lr_start = super->lr_start;
- super->lr_start += LUSTRE_SEQ_META_CHUNK;
- range->lr_end = super->lr_start;
+ range_alloc(range, super, LUSTRE_SEQ_META_WIDTH);
if (rc == 0) {
CDEBUG(D_INFO|D_WARNING, "SEQ-MGR(srv): allocated meta-sequence "
seq->seq_flags = flags;
sema_init(&seq->seq_sem, 1);
- seq->seq_space = LUSTRE_SEQ_SPACE_INIT;
- seq->seq_super = LUSTRE_SEQ_SUPER_INIT;
+ seq->seq_space = LUSTRE_SEQ_SPACE_RANGE;
+ seq->seq_super = LUSTRE_SEQ_ZERO_RANGE;
lu_device_get(&seq->seq_dev->dd_lu_dev);
MODULES := fld
-fld-objs := fld_handler.o fld_request.o fld_iam.o
+fld-objs := fld_handler.o fld_request.o fld_index.o
EXTRA_PRE_CFLAGS := -I@LUSTRE@ -I@LUSTRE@/ldiskfs
if LIBLUSTRE
noinst_LIBRARIES = libfld.a
-libfld_a_SOURCES = fld_handler.c fld_request.c fld_iam.c fld_internal.h
+libfld_a_SOURCES = fld_handler.c fld_request.c fld_index.c fld_internal.h
libfld_a_CPPFLAGS = $(LLCPPFLAGS)
libfld_a_CFLAGS = $(LLCFLAGS)
endif
switch (opts) {
case FLD_CREATE:
- rc = fld_handle_insert(fld, ctx, mf->mf_seq, mf->mf_mds);
+ rc = fld_index_handle_insert(fld, ctx,
+ mf->mf_seq, mf->mf_mds);
break;
case FLD_DELETE:
- rc = fld_handle_delete(fld, ctx, mf->mf_seq);
+ rc = fld_index_handle_delete(fld, ctx, mf->mf_seq);
break;
case FLD_LOOKUP:
- rc = fld_handle_lookup(fld, ctx, mf->mf_seq, &mf->mf_mds);
+ rc = fld_index_handle_lookup(fld, ctx,
+ mf->mf_seq, &mf->mf_mds);
break;
default:
rc = -EINVAL;
INIT_LIST_HEAD(&fld_list_head.fld_list);
spin_lock_init(&fld_list_head.fld_lock);
- rc = fld_iam_init(fld, ctx);
+ rc = fld_index_init(fld, ctx);
if (rc == 0) {
fld->fld_service =
spin_unlock(&fld_list_head.fld_lock);
if (fld->fld_dt != NULL) {
lu_device_put(&fld->fld_dt->dd_lu_dev);
- fld_iam_fini(fld, ctx);
+ fld_index_fini(fld, ctx);
fld->fld_dt = NULL;
}
CDEBUG(D_INFO|D_WARNING, "Server FLD\n");
/* -*- MODE: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * fld/fld.c
+ * fld/fld_index.c
*
* Copyright (C) 2006 Cluster File Systems, Inc.
* Author: WangDi <wangdi@clusterfs.com>
+ * Yury Umanets <umka@clusterfs.com>
*
* This file is part of the Lustre file system, http://www.lustre.org
* Lustre is a trademark of Cluster File Systems, Inc.
RETURN((void *)&info->fti_rec);
}
-int fld_handle_insert(struct lu_server_fld *fld,
- const struct lu_context *ctx,
- fidseq_t seq, mdsno_t mds)
+int fld_index_handle_insert(struct lu_server_fld *fld,
+ const struct lu_context *ctx,
+ fidseq_t seq, mdsno_t mds)
{
struct dt_device *dt = fld->fld_dt;
struct dt_object *dt_obj = fld->fld_obj;
return 0;
}
-int fld_handle_delete(struct lu_server_fld *fld,
- const struct lu_context *ctx,
- fidseq_t seq)
+int fld_index_handle_delete(struct lu_server_fld *fld,
+ const struct lu_context *ctx,
+ fidseq_t seq)
{
struct dt_device *dt = fld->fld_dt;
struct dt_object *dt_obj = fld->fld_obj;
return 0;
}
-int fld_handle_lookup(struct lu_server_fld *fld,
- const struct lu_context *ctx,
- fidseq_t seq, mdsno_t *mds)
+int fld_index_handle_lookup(struct lu_server_fld *fld,
+ const struct lu_context *ctx,
+ fidseq_t seq, mdsno_t *mds)
{
struct dt_object *dt_obj = fld->fld_obj;
struct dt_rec *rec = fld_rec(ctx, 0);
return 0;
}
-int fld_iam_init(struct lu_server_fld *fld,
- const struct lu_context *ctx)
+int fld_index_init(struct lu_server_fld *fld,
+ const struct lu_context *ctx)
{
struct dt_device *dt = fld->fld_dt;
struct dt_object *dt_obj;
RETURN(rc);
}
-void fld_iam_fini(struct lu_server_fld *fld,
- const struct lu_context *ctx)
+void fld_index_fini(struct lu_server_fld *fld,
+ const struct lu_context *ctx)
{
ENTRY;
if (fld->fld_obj != NULL) {
#define FLD_SERVICE_WATCHDOG_TIMEOUT (obd_timeout * 1000)
-int fld_handle_insert(struct lu_server_fld *fld,
- const struct lu_context *ctx,
- fidseq_t seq, mdsno_t mds);
+int fld_index_handle_insert(struct lu_server_fld *fld,
+ const struct lu_context *ctx,
+ fidseq_t seq, mdsno_t mds);
-int fld_handle_delete(struct lu_server_fld *fld,
- const struct lu_context *ctx,
- fidseq_t seq);
+int fld_index_handle_delete(struct lu_server_fld *fld,
+ const struct lu_context *ctx,
+ fidseq_t seq);
-int fld_handle_lookup(struct lu_server_fld *fld,
- const struct lu_context *ctx,
- fidseq_t seq, mdsno_t *mds);
+int fld_index_handle_lookup(struct lu_server_fld *fld,
+ const struct lu_context *ctx,
+ fidseq_t seq, mdsno_t *mds);
-int fld_iam_init(struct lu_server_fld *fld,
- const struct lu_context *ctx);
+int fld_index_init(struct lu_server_fld *fld,
+ const struct lu_context *ctx);
-void fld_iam_fini(struct lu_server_fld *fld,
- const struct lu_context *ctx);
+void fld_index_fini(struct lu_server_fld *fld,
+ const struct lu_context *ctx);
#endif
r->lr_start = r->lr_end = 0;
}
+static inline int range_within(struct lu_range *r,
+ __u64 s)
+{
+ return s >= r->lr_start && s <= r->lr_end;
+}
+
+static inline void range_alloc(struct lu_range *r,
+ struct lu_range *s,
+ __u64 w)
+{
+ r->lr_start = s->lr_start;
+ r->lr_end = s->lr_start + w;
+ s->lr_start += w;
+}
+
static inline int range_is_sane(struct lu_range *r)
{
if (r->lr_end >= r->lr_start)
struct lu_site;
struct lu_context;
-/* start seq number */
-#define LUSTRE_SEQ_SPACE_START 0x400
-
-/* maximal posible seq number */
-#define LUSTRE_SEQ_SPACE_END ((__u64)~0ULL)
+/* whole sequences space range and zero range definitions */
+extern const struct lu_range LUSTRE_SEQ_SPACE_RANGE;
+extern const struct lu_range LUSTRE_SEQ_ZERO_RANGE;
/* this is how may FIDs may be allocated in one sequence. */
-#define LUSTRE_SEQ_WIDTH 0x00000000000002800
+#define LUSTRE_SEQ_WIDTH 0x00000000000002800
/* how many sequences may be allocate for meta-sequence (this is 10240
* sequences). */
-#define LUSTRE_SEQ_META_CHUNK 0x00000000000002800
+#define LUSTRE_SEQ_META_WIDTH 0x00000000000002800
-/* how many sequences may be allocate for super-sequence (this is 10240 * 10240
- * sequences), what means that one alloaction for super-sequence allows to
- * allocate 10240 meta-sequences and each of them may have 10240 sequences. */
-#define LUSTRE_SEQ_SUPER_CHUNK (LUSTRE_SEQ_META_CHUNK * LUSTRE_SEQ_META_CHUNK)
+/* this is how many sequneces (10240 * 10240) may be in one super-sequence
+ * allocated to MDTs. */
+#define LUSTRE_SEQ_SUPER_WIDTH (LUSTRE_SEQ_META_WIDTH * LUSTRE_SEQ_META_WIDTH)
/* client sequence manager interface */
struct lu_client_seq {