* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
#include <lustre_mdc.h>
#include "fid_internal.h"
-static int seq_client_rpc(struct lu_client_seq *seq, struct lu_seq_range *input,
+static int seq_client_rpc(struct lu_client_seq *seq,
struct lu_seq_range *output, __u32 opc,
const char *opcname)
{
struct obd_export *exp = seq->lcs_exp;
struct ptlrpc_request *req;
- struct lu_seq_range *out, *in;
+ struct lu_seq_range *out, *in;
__u32 *op;
int rc;
ENTRY;
/* Zero out input range, this is not recovery yet. */
in = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_RANGE);
- if (input != NULL)
- *in = *input;
- else
- range_init(in);
+ range_init(in);
ptlrpc_request_set_replen(req);
- if (seq->lcs_type == LUSTRE_SEQ_METADATA) {
- req->rq_request_portal = (opc == SEQ_ALLOC_SUPER) ?
- SEQ_CONTROLLER_PORTAL : SEQ_METADATA_PORTAL;
- /* update mdt field of *in, it is required for fld update
- * on super sequence allocator node. */
- if (opc == SEQ_ALLOC_SUPER)
- in->lsr_mdt = seq->lcs_space.lsr_mdt;
+ if (seq->lcs_type == LUSTRE_SEQ_METADATA) {
+ req->rq_request_portal = SEQ_METADATA_PORTAL;
+ in->lsr_flags = LU_SEQ_RANGE_MDT;
} else {
- LASSERT(opc == SEQ_ALLOC_META);
+ LASSERTF(seq->lcs_type == LUSTRE_SEQ_DATA,
+ "unknown lcs_type %u\n", seq->lcs_type);
req->rq_request_portal = SEQ_DATA_PORTAL;
+ in->lsr_flags = LU_SEQ_RANGE_OST;
}
+
+ if (opc == SEQ_ALLOC_SUPER) {
+ /* Update index field of *in, it is required for
+ * FLD update on super sequence allocator node. */
+ in->lsr_index = seq->lcs_space.lsr_index;
+ req->rq_request_portal = SEQ_CONTROLLER_PORTAL;
+ } else {
+ LASSERTF(opc == SEQ_ALLOC_META,
+ "unknown opcode %u\n, opc", opc);
+ }
+
ptlrpc_at_set_req_timeout(req);
mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
DRANGE"]\n", seq->lcs_name, PRANGE(output));
GOTO(out_req, rc = -EINVAL);
}
- *in = *out;
CDEBUG(D_INFO, "%s: Allocated %s-sequence "DRANGE"]\n",
seq->lcs_name, opcname, PRANGE(output));
}
/* Request sequence-controller node to allocate new super-sequence. */
-int seq_client_replay_super(struct lu_client_seq *seq,
- struct lu_seq_range *range,
- const struct lu_env *env)
+int seq_client_alloc_super(struct lu_client_seq *seq,
+ const struct lu_env *env)
{
int rc;
ENTRY;
#ifdef __KERNEL__
if (seq->lcs_srv) {
LASSERT(env != NULL);
- rc = seq_server_alloc_super(seq->lcs_srv, range,
- &seq->lcs_space, env);
+ rc = seq_server_alloc_super(seq->lcs_srv, &seq->lcs_space,
+ env);
} else {
#endif
- rc = seq_client_rpc(seq, range, &seq->lcs_space,
+ rc = seq_client_rpc(seq, &seq->lcs_space,
SEQ_ALLOC_SUPER, "super");
#ifdef __KERNEL__
}
RETURN(rc);
}
-/* Request sequence-controller node to allocate new super-sequence. */
-int seq_client_alloc_super(struct lu_client_seq *seq,
- const struct lu_env *env)
-{
- ENTRY;
- RETURN(seq_client_replay_super(seq, NULL, env));
-}
-
/* Request sequence-controller node to allocate new meta-sequence. */
static int seq_client_alloc_meta(struct lu_client_seq *seq,
const struct lu_env *env)
#ifdef __KERNEL__
if (seq->lcs_srv) {
LASSERT(env != NULL);
- rc = seq_server_alloc_meta(seq->lcs_srv, NULL,
- &seq->lcs_space, env);
+ rc = seq_server_alloc_meta(seq->lcs_srv, &seq->lcs_space, env);
} else {
#endif
- rc = seq_client_rpc(seq, NULL, &seq->lcs_space,
+ rc = seq_client_rpc(seq, &seq->lcs_space,
SEQ_ALLOC_META, "meta");
#ifdef __KERNEL__
}
RETURN(rc);
}
+static int seq_fid_alloc_prep(struct lu_client_seq *seq,
+ cfs_waitlink_t *link)
+{
+ if (seq->lcs_update) {
+ cfs_waitq_add(&seq->lcs_waitq, link);
+ cfs_set_current_state(CFS_TASK_UNINT);
+ cfs_up(&seq->lcs_sem);
+
+ cfs_waitq_wait(link, CFS_TASK_UNINT);
+
+ cfs_down(&seq->lcs_sem);
+ cfs_waitq_del(&seq->lcs_waitq, link);
+ cfs_set_current_state(CFS_TASK_RUNNING);
+ return -EAGAIN;
+ }
+ ++seq->lcs_update;
+ cfs_up(&seq->lcs_sem);
+ return 0;
+}
+
+static void seq_fid_alloc_fini(struct lu_client_seq *seq)
+{
+ LASSERT(seq->lcs_update == 1);
+ cfs_down(&seq->lcs_sem);
+ --seq->lcs_update;
+ cfs_waitq_signal(&seq->lcs_waitq);
+}
+
/* Allocate new fid on passed client @seq and save it to @fid. */
int seq_client_alloc_fid(struct lu_client_seq *seq, struct lu_fid *fid)
{
+ cfs_waitlink_t link;
int rc;
ENTRY;
LASSERT(seq != NULL);
LASSERT(fid != NULL);
+ cfs_waitlink_init(&link);
cfs_down(&seq->lcs_sem);
- if (fid_is_zero(&seq->lcs_fid) ||
- fid_oid(&seq->lcs_fid) >= seq->lcs_width)
- {
+ while (1) {
seqno_t seqnr;
+ if (!fid_is_zero(&seq->lcs_fid) &&
+ fid_oid(&seq->lcs_fid) < seq->lcs_width) {
+ /* Just bump last allocated fid and return to caller. */
+ seq->lcs_fid.f_oid += 1;
+ rc = 0;
+ break;
+ }
+
+ rc = seq_fid_alloc_prep(seq, &link);
+ if (rc)
+ continue;
+
rc = seq_client_alloc_seq(seq, &seqnr);
if (rc) {
CERROR("%s: Can't allocate new sequence, "
"rc %d\n", seq->lcs_name, rc);
+ seq_fid_alloc_fini(seq);
cfs_up(&seq->lcs_sem);
RETURN(rc);
}
* to setup FLD for it.
*/
rc = 1;
- } else {
- /* Just bump last allocated fid and return to caller. */
- seq->lcs_fid.f_oid += 1;
- rc = 0;
+
+ seq_fid_alloc_fini(seq);
+ break;
}
*fid = seq->lcs_fid;
*/
void seq_client_flush(struct lu_client_seq *seq)
{
+ cfs_waitlink_t link;
+
LASSERT(seq != NULL);
+ cfs_waitlink_init(&link);
cfs_down(&seq->lcs_sem);
+
+ while (seq->lcs_update) {
+ cfs_waitq_add(&seq->lcs_waitq, &link);
+ cfs_set_current_state(CFS_TASK_UNINT);
+ cfs_up(&seq->lcs_sem);
+
+ cfs_waitq_wait(&link, CFS_TASK_UNINT);
+
+ cfs_down(&seq->lcs_sem);
+ cfs_waitq_del(&seq->lcs_waitq, &link);
+ cfs_set_current_state(CFS_TASK_RUNNING);
+ }
+
fid_zero(&seq->lcs_fid);
/**
* this id shld not be used for seq range allocation.
* set to -1 for dgb check.
*/
- seq->lcs_space.lsr_mdt = -1;
+ seq->lcs_space.lsr_index = -1;
range_init(&seq->lcs_space);
cfs_up(&seq->lcs_sem);
seq->lcs_type = type;
cfs_sema_init(&seq->lcs_sem, 1);
seq->lcs_width = LUSTRE_SEQ_MAX_WIDTH;
+ cfs_waitq_init(&seq->lcs_waitq);
/* Make sure that things are clear before work is started. */
seq_client_flush(seq);