4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/fid/fid_handler.c
38 * Lustre Sequence Manager
40 * Author: Yury Umanets <umka@clusterfs.com>
43 #define DEBUG_SUBSYSTEM S_FID
45 #include <libcfs/libcfs.h>
46 #include <linux/module.h>
48 #include <obd_class.h>
49 #include <dt_object.h>
50 #include <md_object.h>
51 #include <obd_support.h>
52 #include <lustre_req_layout.h>
53 #include <lustre_fid.h>
54 #include "fid_internal.h"
56 static void seq_server_proc_fini(struct lu_server_seq *seq);
58 /* Assigns client to sequence controller node. */
59 int seq_server_set_cli(struct lu_server_seq *seq,
60 struct lu_client_seq *cli,
61 const struct lu_env *env)
67 * Ask client for new range, assign that range to ->seq_space and write
68 * seq state to backing store should be atomic.
70 mutex_lock(&seq->lss_mutex);
73 CDEBUG(D_INFO, "%s: Detached sequence client %s\n",
74 seq->lss_name, cli->lcs_name);
79 if (seq->lss_cli != NULL) {
80 CDEBUG(D_HA, "%s: Sequence controller is already "
81 "assigned\n", seq->lss_name);
82 GOTO(out_up, rc = -EEXIST);
85 CDEBUG(D_INFO, "%s: Attached sequence controller %s\n",
86 seq->lss_name, cli->lcs_name);
89 cli->lcs_space.lsr_index = seq->lss_site->ss_node_id;
92 mutex_unlock(&seq->lss_mutex);
95 EXPORT_SYMBOL(seq_server_set_cli);
97 * allocate \a w units of sequence from range \a from.
99 static inline void range_alloc(struct lu_seq_range *to,
100 struct lu_seq_range *from,
103 width = min(range_space(from), width);
104 to->lsr_start = from->lsr_start;
105 to->lsr_end = from->lsr_start + width;
106 from->lsr_start += width;
110 * On controller node, allocate new super sequence for regular sequence server.
111 * As this super sequence controller, this node suppose to maintain fld
113 * \a out range always has currect mds node number of requester.
116 static int __seq_server_alloc_super(struct lu_server_seq *seq,
117 struct lu_seq_range *out,
118 const struct lu_env *env)
120 struct lu_seq_range *space = &seq->lss_space;
124 LASSERT(range_is_sane(space));
126 if (range_is_exhausted(space)) {
127 CERROR("%s: Sequences space is exhausted\n",
131 range_alloc(out, space, seq->lss_width);
134 rc = seq_store_update(env, seq, out, 1 /* sync */);
136 LCONSOLE_INFO("%s: super-sequence allocation rc = %d " DRANGE"\n",
137 seq->lss_name, rc, PRANGE(out));
142 int seq_server_alloc_super(struct lu_server_seq *seq,
143 struct lu_seq_range *out,
144 const struct lu_env *env)
149 mutex_lock(&seq->lss_mutex);
150 rc = __seq_server_alloc_super(seq, out, env);
151 mutex_unlock(&seq->lss_mutex);
156 static int __seq_set_init(const struct lu_env *env,
157 struct lu_server_seq *seq)
159 struct lu_seq_range *space = &seq->lss_space;
162 range_alloc(&seq->lss_lowater_set, space, seq->lss_set_width);
163 range_alloc(&seq->lss_hiwater_set, space, seq->lss_set_width);
165 rc = seq_store_update(env, seq, NULL, 1);
171 * This function implements new seq allocation algorithm using async
172 * updates to seq file on disk. ref bug 18857 for details.
173 * there are four variable to keep track of this process
175 * lss_space; - available lss_space
176 * lss_lowater_set; - lu_seq_range for all seqs before barrier, i.e. safe to use
177 * lss_hiwater_set; - lu_seq_range after barrier, i.e. allocated but may be
180 * when lss_lowater_set reaches the end it is replaced with hiwater one and
181 * a write operation is initiated to allocate new hiwater range.
182 * if last seq write opearion is still not commited, current operation is
183 * flaged as sync write op.
185 static int range_alloc_set(const struct lu_env *env,
186 struct lu_seq_range *out,
187 struct lu_server_seq *seq)
189 struct lu_seq_range *space = &seq->lss_space;
190 struct lu_seq_range *loset = &seq->lss_lowater_set;
191 struct lu_seq_range *hiset = &seq->lss_hiwater_set;
194 if (range_is_zero(loset))
195 __seq_set_init(env, seq);
197 if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_ALLOC)) /* exhaust set */
198 loset->lsr_start = loset->lsr_end;
200 if (range_is_exhausted(loset)) {
201 /* reached high water mark. */
202 struct lu_device *dev = seq->lss_site->ss_lu->ls_top_dev;
203 int obd_num_clients = dev->ld_obd->obd_num_exports;
206 /* calculate new seq width based on number of clients */
207 set_sz = max(seq->lss_set_width,
208 obd_num_clients * seq->lss_width);
209 set_sz = min(range_space(space), set_sz);
211 /* Switch to hiwater range now */
213 /* allocate new hiwater range */
214 range_alloc(hiset, space, set_sz);
216 /* update ondisk seq with new *space */
217 rc = seq_store_update(env, seq, NULL, seq->lss_need_sync);
220 LASSERTF(!range_is_exhausted(loset) || range_is_sane(loset),
221 DRANGE"\n", PRANGE(loset));
224 range_alloc(out, loset, seq->lss_width);
229 static int __seq_server_alloc_meta(struct lu_server_seq *seq,
230 struct lu_seq_range *out,
231 const struct lu_env *env)
233 struct lu_seq_range *space = &seq->lss_space;
238 LASSERT(range_is_sane(space));
240 /* Check if available space ends and allocate new super seq */
241 if (range_is_exhausted(space)) {
243 CERROR("%s: No sequence controller is attached.\n",
248 rc = seq_client_alloc_super(seq->lss_cli, env);
250 CERROR("%s: Can't allocate super-sequence, rc %d\n",
255 /* Saving new range to allocation space. */
256 *space = seq->lss_cli->lcs_space;
257 LASSERT(range_is_sane(space));
260 rc = range_alloc_set(env, out, seq);
262 CERROR("%s: Allocated meta-sequence failed: rc = %d\n",
267 CDEBUG(D_INFO, "%s: Allocated meta-sequence " DRANGE"\n",
268 seq->lss_name, PRANGE(out));
273 int seq_server_alloc_meta(struct lu_server_seq *seq,
274 struct lu_seq_range *out,
275 const struct lu_env *env)
280 mutex_lock(&seq->lss_mutex);
281 rc = __seq_server_alloc_meta(seq, out, env);
282 mutex_unlock(&seq->lss_mutex);
286 EXPORT_SYMBOL(seq_server_alloc_meta);
288 static int seq_server_handle(struct lu_site *site,
289 const struct lu_env *env,
290 __u32 opc, struct lu_seq_range *out)
293 struct seq_server_site *ss_site;
296 ss_site = lu_site2seq(site);
300 if (!ss_site->ss_server_seq) {
301 CERROR("Sequence server is not "
305 rc = seq_server_alloc_meta(ss_site->ss_server_seq, out, env);
307 case SEQ_ALLOC_SUPER:
308 if (!ss_site->ss_control_seq) {
309 CERROR("Sequence controller is not "
313 rc = seq_server_alloc_super(ss_site->ss_control_seq, out, env);
323 static int seq_handler(struct tgt_session_info *tsi)
325 struct lu_seq_range *out, *tmp;
326 struct lu_site *site;
332 LASSERT(!(lustre_msg_get_flags(tgt_ses_req(tsi)->rq_reqmsg) & MSG_REPLAY));
333 site = tsi->tsi_exp->exp_obd->obd_lu_dev->ld_site;
334 LASSERT(site != NULL);
336 opc = req_capsule_client_get(tsi->tsi_pill, &RMF_SEQ_OPC);
338 out = req_capsule_server_get(tsi->tsi_pill, &RMF_SEQ_RANGE);
340 RETURN(err_serious(-EPROTO));
342 tmp = req_capsule_client_get(tsi->tsi_pill, &RMF_SEQ_RANGE);
344 /* seq client passed mdt id, we need to pass that using out
347 out->lsr_index = tmp->lsr_index;
348 out->lsr_flags = tmp->lsr_flags;
349 rc = seq_server_handle(site, tsi->tsi_env, *opc, out);
351 rc = err_serious(-EPROTO);
357 struct tgt_handler seq_handlers[] = {
358 TGT_SEQ_HDL(HABEO_REFERO, SEQ_QUERY, seq_handler),
360 EXPORT_SYMBOL(seq_handlers);
362 /* context key constructor/destructor: seq_key_init, seq_key_fini */
363 LU_KEY_INIT_FINI(seq, struct seq_thread_info);
365 /* context key: seq_thread_key */
366 LU_CONTEXT_KEY_DEFINE(seq, LCT_MD_THREAD | LCT_DT_THREAD);
368 static int seq_server_proc_init(struct lu_server_seq *seq)
375 seq->lss_proc_dir = lprocfs_register(seq->lss_name,
378 if (IS_ERR(seq->lss_proc_dir)) {
379 rc = PTR_ERR(seq->lss_proc_dir);
383 rc = lprocfs_add_vars(seq->lss_proc_dir,
384 seq_server_proc_list, seq);
386 CERROR("%s: Can't init sequence manager "
387 "proc, rc %d\n", seq->lss_name, rc);
388 GOTO(out_cleanup, rc);
394 seq_server_proc_fini(seq);
401 static void seq_server_proc_fini(struct lu_server_seq *seq)
405 if (seq->lss_proc_dir != NULL) {
406 if (!IS_ERR(seq->lss_proc_dir))
407 lprocfs_remove(&seq->lss_proc_dir);
408 seq->lss_proc_dir = NULL;
414 int seq_server_init(struct lu_server_seq *seq,
415 struct dt_device *dev,
417 enum lu_mgr_type type,
418 struct seq_server_site *ss,
419 const struct lu_env *env)
421 int rc, is_srv = (type == LUSTRE_SEQ_SERVER);
424 LASSERT(dev != NULL);
425 LASSERT(prefix != NULL);
427 LASSERT(ss->ss_lu != NULL);
430 seq->lss_type = type;
432 range_init(&seq->lss_space);
434 range_init(&seq->lss_lowater_set);
435 range_init(&seq->lss_hiwater_set);
436 seq->lss_set_width = LUSTRE_SEQ_BATCH_WIDTH;
438 mutex_init(&seq->lss_mutex);
440 seq->lss_width = is_srv ?
441 LUSTRE_SEQ_META_WIDTH : LUSTRE_SEQ_SUPER_WIDTH;
443 snprintf(seq->lss_name, sizeof(seq->lss_name),
444 "%s-%s", (is_srv ? "srv" : "ctl"), prefix);
446 rc = seq_store_init(seq, env, dev);
449 /* Request backing store for saved sequence info. */
450 rc = seq_store_read(seq, env);
451 if (rc == -ENODATA) {
453 /* Nothing is read, init by default value. */
454 seq->lss_space = is_srv ?
455 LUSTRE_SEQ_ZERO_RANGE:
456 LUSTRE_SEQ_SPACE_RANGE;
459 seq->lss_space.lsr_index = ss->ss_node_id;
460 LCONSOLE_INFO("%s: No data found "
461 "on store. Initialize space\n",
464 rc = seq_store_update(env, seq, NULL, 0);
466 CERROR("%s: Can't write space data, "
467 "rc %d\n", seq->lss_name, rc);
470 CERROR("%s: Can't read space data, rc %d\n",
476 LASSERT(range_is_sane(&seq->lss_space));
478 LASSERT(!range_is_zero(&seq->lss_space) &&
479 range_is_sane(&seq->lss_space));
482 rc = seq_server_proc_init(seq);
489 seq_server_fini(seq, env);
492 EXPORT_SYMBOL(seq_server_init);
494 void seq_server_fini(struct lu_server_seq *seq,
495 const struct lu_env *env)
499 seq_server_proc_fini(seq);
500 seq_store_fini(seq, env);
504 EXPORT_SYMBOL(seq_server_fini);
506 int seq_site_fini(const struct lu_env *env, struct seq_server_site *ss)
511 if (ss->ss_server_seq) {
512 seq_server_fini(ss->ss_server_seq, env);
513 OBD_FREE_PTR(ss->ss_server_seq);
514 ss->ss_server_seq = NULL;
517 if (ss->ss_control_seq) {
518 seq_server_fini(ss->ss_control_seq, env);
519 OBD_FREE_PTR(ss->ss_control_seq);
520 ss->ss_control_seq = NULL;
523 if (ss->ss_client_seq) {
524 seq_client_fini(ss->ss_client_seq);
525 OBD_FREE_PTR(ss->ss_client_seq);
526 ss->ss_client_seq = NULL;
531 EXPORT_SYMBOL(seq_site_fini);
533 int fid_server_mod_init(void)
535 LU_CONTEXT_KEY_INIT(&seq_thread_key);
536 return lu_context_key_register(&seq_thread_key);
539 void fid_server_mod_exit(void)
541 lu_context_key_degister(&seq_thread_key);