4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/fid/fid_handler.c
38 * Lustre Sequence Manager
40 * Author: Yury Umanets <umka@clusterfs.com>
43 #define DEBUG_SUBSYSTEM S_FID
45 #include <libcfs/libcfs.h>
46 #include <linux/module.h>
48 #include <obd_class.h>
49 #include <dt_object.h>
50 #include <obd_support.h>
51 #include <lustre_req_layout.h>
52 #include <lustre_fid.h>
53 #include "fid_internal.h"
55 static void seq_server_proc_fini(struct lu_server_seq *seq);
57 /* Assigns client to sequence controller node. */
58 int seq_server_set_cli(const struct lu_env *env, struct lu_server_seq *seq,
59 struct lu_client_seq *cli)
65 * Ask client for new range, assign that range to ->seq_space and write
66 * seq state to backing store should be atomic.
68 mutex_lock(&seq->lss_mutex);
71 CDEBUG(D_INFO, "%s: Detached sequence client\n", seq->lss_name);
76 if (seq->lss_cli != NULL) {
77 CDEBUG(D_HA, "%s: Sequence controller is already "
78 "assigned\n", seq->lss_name);
79 GOTO(out_up, rc = -EEXIST);
82 CDEBUG(D_INFO, "%s: Attached sequence controller %s\n",
83 seq->lss_name, cli->lcs_name);
86 cli->lcs_space.lsr_index = seq->lss_site->ss_node_id;
89 mutex_unlock(&seq->lss_mutex);
92 EXPORT_SYMBOL(seq_server_set_cli);
94 * allocate \a w units of sequence from range \a from.
96 static inline void range_alloc(struct lu_seq_range *to,
97 struct lu_seq_range *from,
100 width = min(lu_seq_range_space(from), width);
101 to->lsr_start = from->lsr_start;
102 to->lsr_end = from->lsr_start + width;
103 from->lsr_start += width;
107 * On controller node, allocate new super sequence for regular sequence server.
108 * As this super sequence controller, this node suppose to maintain fld
110 * \a out range always has currect mds node number of requester.
113 static int __seq_server_alloc_super(struct lu_server_seq *seq,
114 struct lu_seq_range *out,
115 const struct lu_env *env)
117 struct lu_seq_range *space = &seq->lss_space;
121 LASSERT(lu_seq_range_is_sane(space));
123 if (lu_seq_range_is_exhausted(space)) {
124 CERROR("%s: Sequences space is exhausted\n",
128 range_alloc(out, space, seq->lss_width);
131 rc = seq_store_update(env, seq, out, 1 /* sync */);
133 LCONSOLE_INFO("%s: super-sequence allocation rc = %d " DRANGE"\n",
134 seq->lss_name, rc, PRANGE(out));
139 int seq_server_alloc_super(struct lu_server_seq *seq,
140 struct lu_seq_range *out,
141 const struct lu_env *env)
146 mutex_lock(&seq->lss_mutex);
147 rc = __seq_server_alloc_super(seq, out, env);
148 mutex_unlock(&seq->lss_mutex);
153 int seq_server_alloc_spec(struct lu_server_seq *seq,
154 struct lu_seq_range *spec,
155 const struct lu_env *env)
157 struct lu_seq_range *space = &seq->lss_space;
162 * In some cases (like recovery after a disaster)
163 * we may need to allocate sequences manually
164 * Notice some sequences can be lost if requested
165 * range doesn't start at the beginning of current
166 * free space. Also notice it's not possible now
167 * to allocate sequences out of natural order.
169 if (spec->lsr_start >= spec->lsr_end)
171 if (spec->lsr_flags != LU_SEQ_RANGE_MDT &&
172 spec->lsr_flags != LU_SEQ_RANGE_OST)
175 mutex_lock(&seq->lss_mutex);
176 if (spec->lsr_start >= space->lsr_start) {
177 space->lsr_start = spec->lsr_end;
178 rc = seq_store_update(env, seq, spec, 1 /* sync */);
180 LCONSOLE_INFO("%s: "DRANGE" sequences allocated: rc = %d \n",
181 seq->lss_name, PRANGE(spec), rc);
183 mutex_unlock(&seq->lss_mutex);
188 static int __seq_set_init(const struct lu_env *env,
189 struct lu_server_seq *seq)
191 struct lu_seq_range *space = &seq->lss_space;
194 range_alloc(&seq->lss_lowater_set, space, seq->lss_set_width);
195 range_alloc(&seq->lss_hiwater_set, space, seq->lss_set_width);
197 rc = seq_store_update(env, seq, NULL, 1);
203 * This function implements new seq allocation algorithm using async
204 * updates to seq file on disk. ref bug 18857 for details.
205 * there are four variable to keep track of this process
207 * lss_space; - available lss_space
208 * lss_lowater_set; - lu_seq_range for all seqs before barrier, i.e. safe to use
209 * lss_hiwater_set; - lu_seq_range after barrier, i.e. allocated but may be
212 * when lss_lowater_set reaches the end it is replaced with hiwater one and
213 * a write operation is initiated to allocate new hiwater range.
214 * if last seq write opearion is still not committed, current operation is
215 * flaged as sync write op.
217 static int range_alloc_set(const struct lu_env *env,
218 struct lu_seq_range *out,
219 struct lu_server_seq *seq)
221 struct lu_seq_range *space = &seq->lss_space;
222 struct lu_seq_range *loset = &seq->lss_lowater_set;
223 struct lu_seq_range *hiset = &seq->lss_hiwater_set;
226 if (lu_seq_range_is_zero(loset))
227 __seq_set_init(env, seq);
229 if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_ALLOC)) /* exhaust set */
230 loset->lsr_start = loset->lsr_end;
232 if (lu_seq_range_is_exhausted(loset)) {
233 /* reached high water mark. */
234 struct lu_device *dev = seq->lss_site->ss_lu->ls_top_dev;
235 int obd_num_clients = dev->ld_obd->obd_num_exports;
238 /* calculate new seq width based on number of clients */
239 set_sz = max(seq->lss_set_width,
240 obd_num_clients * seq->lss_width);
241 set_sz = min(lu_seq_range_space(space), set_sz);
243 /* Switch to hiwater range now */
245 /* allocate new hiwater range */
246 range_alloc(hiset, space, set_sz);
248 /* update ondisk seq with new *space */
249 rc = seq_store_update(env, seq, NULL, seq->lss_need_sync);
252 LASSERTF(!lu_seq_range_is_exhausted(loset) ||
253 lu_seq_range_is_sane(loset),
254 DRANGE"\n", PRANGE(loset));
257 range_alloc(out, loset, seq->lss_width);
263 * Check if the sequence server has sequence avaible
265 * Check if the sequence server has sequence avaible, if not, then
266 * allocating super sequence from sequence manager (MDT0).
268 * \param[in] env execution environment
269 * \param[in] seq server sequence
271 * \retval negative errno if allocating new sequence fails
272 * \retval 0 if there is enough sequence or allocating
273 * new sequence succeeds
275 int seq_server_check_and_alloc_super(const struct lu_env *env,
276 struct lu_server_seq *seq)
278 struct lu_seq_range *space = &seq->lss_space;
283 /* Check if available space ends and allocate new super seq */
284 if (lu_seq_range_is_exhausted(space)) {
286 CERROR("%s: No sequence controller is attached.\n",
291 rc = seq_client_alloc_super(seq->lss_cli, env);
293 CDEBUG(D_HA, "%s: Can't allocate super-sequence:"
294 " rc %d\n", seq->lss_name, rc);
298 /* Saving new range to allocation space. */
299 *space = seq->lss_cli->lcs_space;
300 LASSERT(lu_seq_range_is_sane(space));
301 if (seq->lss_cli->lcs_srv == NULL) {
302 struct lu_server_fld *fld;
304 /* Insert it to the local FLDB */
305 fld = seq->lss_site->ss_server_fld;
306 mutex_lock(&fld->lsf_lock);
307 rc = fld_insert_entry(env, fld, space);
308 mutex_unlock(&fld->lsf_lock);
312 if (lu_seq_range_is_zero(&seq->lss_lowater_set))
313 __seq_set_init(env, seq);
317 EXPORT_SYMBOL(seq_server_check_and_alloc_super);
319 static int __seq_server_alloc_meta(struct lu_server_seq *seq,
320 struct lu_seq_range *out,
321 const struct lu_env *env)
323 struct lu_seq_range *space = &seq->lss_space;
328 LASSERT(lu_seq_range_is_sane(space));
330 rc = seq_server_check_and_alloc_super(env, seq);
332 CERROR("%s: Allocated super-sequence failed: rc = %d\n",
337 rc = range_alloc_set(env, out, seq);
339 CERROR("%s: Allocated meta-sequence failed: rc = %d\n",
344 CDEBUG(D_INFO, "%s: Allocated meta-sequence " DRANGE"\n",
345 seq->lss_name, PRANGE(out));
350 int seq_server_alloc_meta(struct lu_server_seq *seq,
351 struct lu_seq_range *out,
352 const struct lu_env *env)
357 mutex_lock(&seq->lss_mutex);
358 rc = __seq_server_alloc_meta(seq, out, env);
359 mutex_unlock(&seq->lss_mutex);
363 EXPORT_SYMBOL(seq_server_alloc_meta);
365 static int seq_server_handle(struct lu_site *site,
366 const struct lu_env *env,
367 __u32 opc, struct lu_seq_range *out)
370 struct seq_server_site *ss_site;
373 ss_site = lu_site2seq(site);
377 if (!ss_site->ss_server_seq) {
378 CERROR("Sequence server is not "
382 rc = seq_server_alloc_meta(ss_site->ss_server_seq, out, env);
384 case SEQ_ALLOC_SUPER:
385 if (!ss_site->ss_control_seq) {
386 CERROR("Sequence controller is not "
390 rc = seq_server_alloc_super(ss_site->ss_control_seq, out, env);
400 static int seq_handler(struct tgt_session_info *tsi)
402 struct lu_seq_range *out, *tmp;
403 struct lu_site *site;
409 LASSERT(!(lustre_msg_get_flags(tgt_ses_req(tsi)->rq_reqmsg) & MSG_REPLAY));
410 site = tsi->tsi_exp->exp_obd->obd_lu_dev->ld_site;
411 LASSERT(site != NULL);
413 opc = req_capsule_client_get(tsi->tsi_pill, &RMF_SEQ_OPC);
415 out = req_capsule_server_get(tsi->tsi_pill, &RMF_SEQ_RANGE);
417 RETURN(err_serious(-EPROTO));
419 tmp = req_capsule_client_get(tsi->tsi_pill, &RMF_SEQ_RANGE);
421 /* seq client passed mdt id, we need to pass that using out
424 out->lsr_index = tmp->lsr_index;
425 out->lsr_flags = tmp->lsr_flags;
426 rc = seq_server_handle(site, tsi->tsi_env, *opc, out);
428 rc = err_serious(-EPROTO);
434 struct tgt_handler seq_handlers[] = {
435 TGT_SEQ_HDL(HABEO_REFERO, SEQ_QUERY, seq_handler),
437 EXPORT_SYMBOL(seq_handlers);
439 /* context key constructor/destructor: seq_key_init, seq_key_fini */
440 LU_KEY_INIT_FINI(seq, struct seq_thread_info);
442 /* context key: seq_thread_key */
443 LU_CONTEXT_KEY_DEFINE(seq, LCT_MD_THREAD | LCT_DT_THREAD);
445 extern const struct file_operations seq_fld_proc_seq_fops;
447 static int seq_server_proc_init(struct lu_server_seq *seq)
449 #ifdef CONFIG_PROC_FS
453 seq->lss_proc_dir = lprocfs_register(seq->lss_name,
456 if (IS_ERR(seq->lss_proc_dir)) {
457 rc = PTR_ERR(seq->lss_proc_dir);
461 rc = lprocfs_add_vars(seq->lss_proc_dir, seq_server_proc_list, seq);
463 CERROR("%s: Can't init sequence manager "
464 "proc, rc %d\n", seq->lss_name, rc);
465 GOTO(out_cleanup, rc);
468 if (seq->lss_type == LUSTRE_SEQ_CONTROLLER) {
469 rc = lprocfs_seq_create(seq->lss_proc_dir, "fldb", 0644,
470 &seq_fld_proc_seq_fops, seq);
472 CERROR("%s: Can't create fldb for sequence manager "
473 "proc: rc = %d\n", seq->lss_name, rc);
474 GOTO(out_cleanup, rc);
481 seq_server_proc_fini(seq);
483 #else /* !CONFIG_PROC_FS */
485 #endif /* CONFIG_PROC_FS */
488 static void seq_server_proc_fini(struct lu_server_seq *seq)
490 #ifdef CONFIG_PROC_FS
492 if (seq->lss_proc_dir != NULL) {
493 if (!IS_ERR(seq->lss_proc_dir))
494 lprocfs_remove(&seq->lss_proc_dir);
495 seq->lss_proc_dir = NULL;
498 #endif /* CONFIG_PROC_FS */
501 int seq_server_init(const struct lu_env *env,
502 struct lu_server_seq *seq,
503 struct dt_device *dev,
505 enum lu_mgr_type type,
506 struct seq_server_site *ss)
508 int rc, is_srv = (type == LUSTRE_SEQ_SERVER);
511 LASSERT(dev != NULL);
512 LASSERT(prefix != NULL);
514 LASSERT(ss->ss_lu != NULL);
516 /* A compile-time check for FIDs that used to be in lustre_idl.h
517 * but is moved here to remove CLASSERT/LASSERT in that header.
518 * Check all lu_fid fields are converted in fid_cpu_to_le() and friends
519 * and that there is no padding added by compiler to the struct. */
523 CLASSERT(sizeof(tst) == sizeof(tst.f_seq) +
524 sizeof(tst.f_oid) + sizeof(tst.f_ver));
528 seq->lss_type = type;
530 lu_seq_range_init(&seq->lss_space);
532 lu_seq_range_init(&seq->lss_lowater_set);
533 lu_seq_range_init(&seq->lss_hiwater_set);
534 seq->lss_set_width = LUSTRE_SEQ_BATCH_WIDTH;
536 mutex_init(&seq->lss_mutex);
538 seq->lss_width = is_srv ?
539 LUSTRE_SEQ_META_WIDTH : LUSTRE_SEQ_SUPER_WIDTH;
541 snprintf(seq->lss_name, sizeof(seq->lss_name),
542 "%s-%s", (is_srv ? "srv" : "ctl"), prefix);
544 rc = seq_store_init(seq, env, dev);
547 /* Request backing store for saved sequence info. */
548 rc = seq_store_read(seq, env);
549 if (rc == -ENODATA) {
551 /* Nothing is read, init by default value. */
552 seq->lss_space = is_srv ?
553 LUSTRE_SEQ_ZERO_RANGE:
554 LUSTRE_SEQ_SPACE_RANGE;
556 seq->lss_space.lsr_index = ss->ss_node_id;
557 LCONSOLE_INFO("%s: No data found "
558 "on store. Initialize space\n",
561 rc = seq_store_update(env, seq, NULL, 0);
563 CERROR("%s: Can't write space data, "
564 "rc %d\n", seq->lss_name, rc);
567 CERROR("%s: Can't read space data, rc %d\n",
573 LASSERT(lu_seq_range_is_sane(&seq->lss_space));
575 LASSERT(!lu_seq_range_is_zero(&seq->lss_space) &&
576 lu_seq_range_is_sane(&seq->lss_space));
579 rc = seq_server_proc_init(seq);
586 seq_server_fini(seq, env);
589 EXPORT_SYMBOL(seq_server_init);
591 void seq_server_fini(struct lu_server_seq *seq,
592 const struct lu_env *env)
596 seq_server_proc_fini(seq);
597 seq_store_fini(seq, env);
601 EXPORT_SYMBOL(seq_server_fini);
603 int seq_site_fini(const struct lu_env *env, struct seq_server_site *ss)
608 if (ss->ss_server_seq) {
609 seq_server_fini(ss->ss_server_seq, env);
610 OBD_FREE_PTR(ss->ss_server_seq);
611 ss->ss_server_seq = NULL;
614 if (ss->ss_control_seq) {
615 seq_server_fini(ss->ss_control_seq, env);
616 OBD_FREE_PTR(ss->ss_control_seq);
617 ss->ss_control_seq = NULL;
620 if (ss->ss_client_seq) {
621 seq_client_fini(ss->ss_client_seq);
622 OBD_FREE_PTR(ss->ss_client_seq);
623 ss->ss_client_seq = NULL;
628 EXPORT_SYMBOL(seq_site_fini);
630 int fid_server_mod_init(void)
632 LU_CONTEXT_KEY_INIT(&seq_thread_key);
633 return lu_context_key_register(&seq_thread_key);
636 void fid_server_mod_exit(void)
638 lu_context_key_degister(&seq_thread_key);