RETURN(rc);
}
+/* assigns client to sequence controller node */
+int
+seq_server_set_ctlr(struct lu_server_seq *seq,
+ struct lu_client_seq *cli,
+ const struct lu_context *ctx)
+{
+ int rc = 0;
+ ENTRY;
+
+ LASSERT(cli != NULL);
+
+ if (seq->seq_cli) {
+ CERROR("SEQ-MGR(srv): sequence-controller "
+ "is already assigned\n");
+ RETURN(-EINVAL);
+ }
+
+ CDEBUG(D_INFO|D_WARNING, "SEQ-MGR(srv): assign "
+ "sequence controller client %s\n",
+ cli->seq_exp->exp_client_uuid.uuid);
+
+ down(&seq->seq_sem);
+
+ /* assign controller */
+ seq->seq_cli = cli;
+
+ /* get new range from controller only if super-sequence is not yet
+ * initialized from backing store or something else. */
+ if (range_is_zero(&seq->seq_super)) {
+ /* release sema to avoid deadlock for case we're asking our
+ * selves. */
+ up(&seq->seq_sem);
+ rc = seq_client_alloc_super(cli);
+ down(&seq->seq_sem);
+
+ if (rc) {
+ CERROR("can't allocate super-sequence, "
+ "rc %d\n", rc);
+ RETURN(rc);
+ }
+
+ /* take super-seq from client seq mgr */
+ LASSERT(range_is_sane(&cli->seq_range));
+
+ seq->seq_super = cli->seq_range;
+
+ /* save init seq to backing store. */
+ rc = seq_server_write_state(seq, ctx);
+ if (rc) {
+ CERROR("can't write sequence state, "
+ "rc = %d\n", rc);
+ }
+ }
+ up(&seq->seq_sem);
+ RETURN(rc);
+}
+EXPORT_SYMBOL(seq_server_set_ctlr);
+
/* on controller node, allocate new super sequence for regular sequnece
* server. */
static int
return 0;
}
-/* assigns client to sequence controller node */
-int
-seq_server_controller(struct lu_server_seq *seq,
- struct lu_client_seq *cli,
- const struct lu_context *ctx)
-{
- int rc = 0;
- ENTRY;
-
- LASSERT(cli != NULL);
-
- if (seq->seq_cli) {
- CERROR("SEQ-MGR(srv): sequence-controller "
- "is already assigned\n");
- RETURN(-EINVAL);
- }
-
- CDEBUG(D_INFO|D_WARNING, "SEQ-MGR(srv): assign "
- "sequence controller client %s\n",
- cli->seq_exp->exp_client_uuid.uuid);
-
- down(&seq->seq_sem);
-
- /* assign controller */
- seq->seq_cli = cli;
-
- /* get new range from controller only if super-sequence is not yet
- * initialized from backing store or something else. */
- if (range_is_zero(&seq->seq_super)) {
- /* release sema to avoid deadlock for case we're asking our
- * selves. */
- up(&seq->seq_sem);
- rc = seq_client_alloc_super(cli);
- down(&seq->seq_sem);
-
- if (rc) {
- CERROR("can't allocate super-sequence, "
- "rc %d\n", rc);
- RETURN(rc);
- }
-
- /* take super-seq from client seq mgr */
- LASSERT(range_is_sane(&cli->seq_range));
-
- seq->seq_super = cli->seq_range;
-
- /* save init seq to backing store. */
- rc = seq_server_write_state(seq, ctx);
- if (rc) {
- CERROR("can't write sequence state, "
- "rc = %d\n", rc);
- }
- }
- up(&seq->seq_sem);
- RETURN(rc);
-}
-EXPORT_SYMBOL(seq_server_controller);
-
#ifdef LPROCFS
static cfs_proc_dir_entry_t *seq_type_proc_dir = NULL;
return rc;
}
-static struct fld_cache *
-fld_cache_lookup(struct fld_cache_info *fld_cache, __u64 seq)
+static void
+fld_cache_delete(struct fld_cache_info *fld_cache, __u64 seq)
{
struct hlist_head *bucket;
struct hlist_node *scan;
spin_lock(&fld_cache->fld_lock);
hlist_for_each_entry(fld, scan, bucket, fld_list) {
if (fld->fld_seq == seq) {
- spin_unlock(&fld_cache->fld_lock);
- RETURN(fld);
+ hlist_del_init(&fld->fld_list);
+ GOTO(out_unlock, 0);
}
}
- spin_unlock(&fld_cache->fld_lock);
- RETURN(NULL);
+ EXIT;
+out_unlock:
+ spin_unlock(&fld_cache->fld_lock);
+ return;
}
-static void
-fld_cache_delete(struct fld_cache_info *fld_cache, __u64 seq)
+static struct fld_cache *
+fld_cache_lookup(struct fld_cache_info *fld_cache, __u64 seq)
{
struct hlist_head *bucket;
struct hlist_node *scan;
spin_lock(&fld_cache->fld_lock);
hlist_for_each_entry(fld, scan, bucket, fld_list) {
if (fld->fld_seq == seq) {
- hlist_del_init(&fld->fld_list);
- GOTO(out_unlock, 0);
+ spin_unlock(&fld_cache->fld_lock);
+ RETURN(fld);
}
}
-
- EXIT;
-out_unlock:
spin_unlock(&fld_cache->fld_lock);
- return;
+
+ RETURN(NULL);
}
#endif
void seq_server_fini(struct lu_server_seq *seq,
const struct lu_context *ctx);
-int seq_server_controller(struct lu_server_seq *seq,
- struct lu_client_seq *cli,
- const struct lu_context *ctx);
+int seq_server_set_ctlr(struct lu_server_seq *seq,
+ struct lu_client_seq *cli,
+ const struct lu_context *ctx);
#endif
int seq_client_init(struct lu_client_seq *seq,
}
/* XXX: this is ugly, should be something else */
-static int mdt_controller_init(const struct lu_context *ctx,
- struct mdt_device *m,
- struct lustre_cfg *cfg)
+static int mdt_seq_ctlr_init(const struct lu_context *ctx,
+ struct mdt_device *m,
+ struct lustre_cfg *cfg)
{
struct lu_site *ls = m->mdt_md_dev.md_lu_dev.ld_site;
struct obd_device *mdc;
LASSERT(ls->ls_server_seq != NULL);
- rc = seq_server_controller(ls->ls_server_seq,
- ls->ls_client_seq,
- ctx);
+ rc = seq_server_set_ctlr(ls->ls_server_seq,
+ ls->ls_client_seq,
+ ctx);
}
}
RETURN(rc);
}
-static void mdt_controller_fini(struct mdt_device *m)
+static void mdt_seq_ctlr_fini(struct mdt_device *m)
{
struct lu_site *ls;
mdt_fld_fini(&ctx, m);
mdt_seq_fini(&ctx, m);
- mdt_controller_fini(m);
+ mdt_seq_ctlr_fini(m);
LASSERT(atomic_read(&d->ld_ref) == 0);
md_device_fini(&m->mdt_md_dev);
case LCFG_ADD_MDC:
/* add mdc hook to get first MDT uuid and connect it to
* ls->controller to use for seq manager. */
- err = mdt_controller_init(ctx, mdt_dev(d), cfg);
+ err = mdt_seq_ctlr_init(ctx, mdt_dev(d), cfg);
if (err) {
CERROR("can't initialize controller export, "
"rc %d\n", err);