4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/fid/fid_handler.c
38 * Lustre Sequence Manager
40 * Author: Yury Umanets <umka@clusterfs.com>
43 #define DEBUG_SUBSYSTEM S_FID
45 #include <libcfs/libcfs.h>
46 #include <linux/module.h>
48 #include <obd_class.h>
49 #include <dt_object.h>
50 #include <md_object.h>
51 #include <obd_support.h>
52 #include <lustre_req_layout.h>
53 #include <lustre_fid.h>
54 #include <lustre_mdt.h> /* err_serious() */
55 #include "fid_internal.h"
57 static void seq_server_proc_fini(struct lu_server_seq *seq);
59 /* Assigns client to sequence controller node. */
60 int seq_server_set_cli(struct lu_server_seq *seq,
61 struct lu_client_seq *cli,
62 const struct lu_env *env)
68 * Ask client for new range, assign that range to ->seq_space and write
69 * seq state to backing store should be atomic.
71 mutex_lock(&seq->lss_mutex);
74 CDEBUG(D_INFO, "%s: Detached sequence client %s\n",
75 seq->lss_name, cli->lcs_name);
80 if (seq->lss_cli != NULL) {
81 CDEBUG(D_HA, "%s: Sequence controller is already "
82 "assigned\n", seq->lss_name);
83 GOTO(out_up, rc = -EEXIST);
86 CDEBUG(D_INFO, "%s: Attached sequence controller %s\n",
87 seq->lss_name, cli->lcs_name);
90 cli->lcs_space.lsr_index = seq->lss_site->ss_node_id;
93 mutex_unlock(&seq->lss_mutex);
96 EXPORT_SYMBOL(seq_server_set_cli);
98 * allocate \a w units of sequence from range \a from.
100 static inline void range_alloc(struct lu_seq_range *to,
101 struct lu_seq_range *from,
104 width = min(range_space(from), width);
105 to->lsr_start = from->lsr_start;
106 to->lsr_end = from->lsr_start + width;
107 from->lsr_start += width;
111 * On controller node, allocate new super sequence for regular sequence server.
112 * As this super sequence controller, this node suppose to maintain fld
114 * \a out range always has currect mds node number of requester.
117 static int __seq_server_alloc_super(struct lu_server_seq *seq,
118 struct lu_seq_range *out,
119 const struct lu_env *env)
121 struct lu_seq_range *space = &seq->lss_space;
125 LASSERT(range_is_sane(space));
127 if (range_is_exhausted(space)) {
128 CERROR("%s: Sequences space is exhausted\n",
132 range_alloc(out, space, seq->lss_width);
135 rc = seq_store_update(env, seq, out, 1 /* sync */);
137 LCONSOLE_INFO("%s: super-sequence allocation rc = %d " DRANGE"\n",
138 seq->lss_name, rc, PRANGE(out));
143 int seq_server_alloc_super(struct lu_server_seq *seq,
144 struct lu_seq_range *out,
145 const struct lu_env *env)
150 mutex_lock(&seq->lss_mutex);
151 rc = __seq_server_alloc_super(seq, out, env);
152 mutex_unlock(&seq->lss_mutex);
157 static int __seq_set_init(const struct lu_env *env,
158 struct lu_server_seq *seq)
160 struct lu_seq_range *space = &seq->lss_space;
163 range_alloc(&seq->lss_lowater_set, space, seq->lss_set_width);
164 range_alloc(&seq->lss_hiwater_set, space, seq->lss_set_width);
166 rc = seq_store_update(env, seq, NULL, 1);
172 * This function implements new seq allocation algorithm using async
173 * updates to seq file on disk. ref bug 18857 for details.
174 * there are four variable to keep track of this process
176 * lss_space; - available lss_space
177 * lss_lowater_set; - lu_seq_range for all seqs before barrier, i.e. safe to use
178 * lss_hiwater_set; - lu_seq_range after barrier, i.e. allocated but may be
181 * when lss_lowater_set reaches the end it is replaced with hiwater one and
182 * a write operation is initiated to allocate new hiwater range.
183 * if last seq write opearion is still not commited, current operation is
184 * flaged as sync write op.
186 static int range_alloc_set(const struct lu_env *env,
187 struct lu_seq_range *out,
188 struct lu_server_seq *seq)
190 struct lu_seq_range *space = &seq->lss_space;
191 struct lu_seq_range *loset = &seq->lss_lowater_set;
192 struct lu_seq_range *hiset = &seq->lss_hiwater_set;
195 if (range_is_zero(loset))
196 __seq_set_init(env, seq);
198 if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_ALLOC)) /* exhaust set */
199 loset->lsr_start = loset->lsr_end;
201 if (range_is_exhausted(loset)) {
202 /* reached high water mark. */
203 struct lu_device *dev = seq->lss_site->ss_lu->ls_top_dev;
204 int obd_num_clients = dev->ld_obd->obd_num_exports;
207 /* calculate new seq width based on number of clients */
208 set_sz = max(seq->lss_set_width,
209 obd_num_clients * seq->lss_width);
210 set_sz = min(range_space(space), set_sz);
212 /* Switch to hiwater range now */
214 /* allocate new hiwater range */
215 range_alloc(hiset, space, set_sz);
217 /* update ondisk seq with new *space */
218 rc = seq_store_update(env, seq, NULL, seq->lss_need_sync);
221 LASSERTF(!range_is_exhausted(loset) || range_is_sane(loset),
222 DRANGE"\n", PRANGE(loset));
225 range_alloc(out, loset, seq->lss_width);
230 static int __seq_server_alloc_meta(struct lu_server_seq *seq,
231 struct lu_seq_range *out,
232 const struct lu_env *env)
234 struct lu_seq_range *space = &seq->lss_space;
239 LASSERT(range_is_sane(space));
241 /* Check if available space ends and allocate new super seq */
242 if (range_is_exhausted(space)) {
244 CERROR("%s: No sequence controller is attached.\n",
249 rc = seq_client_alloc_super(seq->lss_cli, env);
251 CERROR("%s: Can't allocate super-sequence, rc %d\n",
256 /* Saving new range to allocation space. */
257 *space = seq->lss_cli->lcs_space;
258 LASSERT(range_is_sane(space));
261 rc = range_alloc_set(env, out, seq);
263 CERROR("%s: Allocated meta-sequence failed: rc = %d\n",
268 CDEBUG(D_INFO, "%s: Allocated meta-sequence " DRANGE"\n",
269 seq->lss_name, PRANGE(out));
274 int seq_server_alloc_meta(struct lu_server_seq *seq,
275 struct lu_seq_range *out,
276 const struct lu_env *env)
281 mutex_lock(&seq->lss_mutex);
282 rc = __seq_server_alloc_meta(seq, out, env);
283 mutex_unlock(&seq->lss_mutex);
287 EXPORT_SYMBOL(seq_server_alloc_meta);
289 static int seq_server_handle(struct lu_site *site,
290 const struct lu_env *env,
291 __u32 opc, struct lu_seq_range *out)
294 struct seq_server_site *ss_site;
297 ss_site = lu_site2seq(site);
301 if (!ss_site->ss_server_seq) {
302 CERROR("Sequence server is not "
306 rc = seq_server_alloc_meta(ss_site->ss_server_seq, out, env);
308 case SEQ_ALLOC_SUPER:
309 if (!ss_site->ss_control_seq) {
310 CERROR("Sequence controller is not "
314 rc = seq_server_alloc_super(ss_site->ss_control_seq, out, env);
324 static int seq_req_handle(struct ptlrpc_request *req,
325 const struct lu_env *env,
326 struct seq_thread_info *info)
328 struct lu_seq_range *out, *tmp;
329 struct lu_site *site;
334 LASSERT(!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY));
335 site = req->rq_export->exp_obd->obd_lu_dev->ld_site;
336 LASSERT(site != NULL);
338 rc = req_capsule_server_pack(info->sti_pill);
340 RETURN(err_serious(rc));
342 opc = req_capsule_client_get(info->sti_pill, &RMF_SEQ_OPC);
344 out = req_capsule_server_get(info->sti_pill, &RMF_SEQ_RANGE);
346 RETURN(err_serious(-EPROTO));
348 tmp = req_capsule_client_get(info->sti_pill, &RMF_SEQ_RANGE);
350 /* seq client passed mdt id, we need to pass that using out
353 out->lsr_index = tmp->lsr_index;
354 out->lsr_flags = tmp->lsr_flags;
355 rc = seq_server_handle(site, env, *opc, out);
357 rc = err_serious(-EPROTO);
362 /* context key constructor/destructor: seq_key_init, seq_key_fini */
363 LU_KEY_INIT_FINI(seq, struct seq_thread_info);
365 /* context key: seq_thread_key */
366 LU_CONTEXT_KEY_DEFINE(seq, LCT_MD_THREAD | LCT_DT_THREAD);
368 static void seq_thread_info_init(struct ptlrpc_request *req,
369 struct seq_thread_info *info)
371 info->sti_pill = &req->rq_pill;
372 /* Init request capsule */
373 req_capsule_init(info->sti_pill, req, RCL_SERVER);
374 req_capsule_set(info->sti_pill, &RQF_SEQ_QUERY);
377 static void seq_thread_info_fini(struct seq_thread_info *info)
379 req_capsule_fini(info->sti_pill);
382 int seq_handle(struct ptlrpc_request *req)
384 const struct lu_env *env;
385 struct seq_thread_info *info;
388 env = req->rq_svc_thread->t_env;
389 LASSERT(env != NULL);
391 info = lu_context_key_get(&env->le_ctx, &seq_thread_key);
392 LASSERT(info != NULL);
394 seq_thread_info_init(req, info);
395 rc = seq_req_handle(req, env, info);
396 /* XXX: we don't need replay but MDT assign transno in any case,
397 * remove it manually before reply*/
398 lustre_msg_set_transno(req->rq_repmsg, 0);
399 seq_thread_info_fini(info);
403 EXPORT_SYMBOL(seq_handle);
406 * Entry point for handling FLD RPCs called from MDT.
408 int seq_query(struct com_thread_info *info)
410 return seq_handle(info->cti_pill->rc_req);
412 EXPORT_SYMBOL(seq_query);
414 static int seq_server_proc_init(struct lu_server_seq *seq)
421 seq->lss_proc_dir = lprocfs_register(seq->lss_name,
424 if (IS_ERR(seq->lss_proc_dir)) {
425 rc = PTR_ERR(seq->lss_proc_dir);
429 rc = lprocfs_add_vars(seq->lss_proc_dir,
430 seq_server_proc_list, seq);
432 CERROR("%s: Can't init sequence manager "
433 "proc, rc %d\n", seq->lss_name, rc);
434 GOTO(out_cleanup, rc);
440 seq_server_proc_fini(seq);
447 static void seq_server_proc_fini(struct lu_server_seq *seq)
451 if (seq->lss_proc_dir != NULL) {
452 if (!IS_ERR(seq->lss_proc_dir))
453 lprocfs_remove(&seq->lss_proc_dir);
454 seq->lss_proc_dir = NULL;
460 int seq_server_init(struct lu_server_seq *seq,
461 struct dt_device *dev,
463 enum lu_mgr_type type,
464 struct seq_server_site *ss,
465 const struct lu_env *env)
467 int rc, is_srv = (type == LUSTRE_SEQ_SERVER);
470 LASSERT(dev != NULL);
471 LASSERT(prefix != NULL);
473 LASSERT(ss->ss_lu != NULL);
476 seq->lss_type = type;
478 range_init(&seq->lss_space);
480 range_init(&seq->lss_lowater_set);
481 range_init(&seq->lss_hiwater_set);
482 seq->lss_set_width = LUSTRE_SEQ_BATCH_WIDTH;
484 mutex_init(&seq->lss_mutex);
486 seq->lss_width = is_srv ?
487 LUSTRE_SEQ_META_WIDTH : LUSTRE_SEQ_SUPER_WIDTH;
489 snprintf(seq->lss_name, sizeof(seq->lss_name),
490 "%s-%s", (is_srv ? "srv" : "ctl"), prefix);
492 rc = seq_store_init(seq, env, dev);
495 /* Request backing store for saved sequence info. */
496 rc = seq_store_read(seq, env);
497 if (rc == -ENODATA) {
499 /* Nothing is read, init by default value. */
500 seq->lss_space = is_srv ?
501 LUSTRE_SEQ_ZERO_RANGE:
502 LUSTRE_SEQ_SPACE_RANGE;
505 seq->lss_space.lsr_index = ss->ss_node_id;
506 LCONSOLE_INFO("%s: No data found "
507 "on store. Initialize space\n",
510 rc = seq_store_update(env, seq, NULL, 0);
512 CERROR("%s: Can't write space data, "
513 "rc %d\n", seq->lss_name, rc);
516 CERROR("%s: Can't read space data, rc %d\n",
522 LASSERT(range_is_sane(&seq->lss_space));
524 LASSERT(!range_is_zero(&seq->lss_space) &&
525 range_is_sane(&seq->lss_space));
528 rc = seq_server_proc_init(seq);
535 seq_server_fini(seq, env);
538 EXPORT_SYMBOL(seq_server_init);
540 void seq_server_fini(struct lu_server_seq *seq,
541 const struct lu_env *env)
545 seq_server_proc_fini(seq);
546 seq_store_fini(seq, env);
550 EXPORT_SYMBOL(seq_server_fini);
552 int seq_site_fini(const struct lu_env *env, struct seq_server_site *ss)
557 if (ss->ss_server_seq) {
558 seq_server_fini(ss->ss_server_seq, env);
559 OBD_FREE_PTR(ss->ss_server_seq);
560 ss->ss_server_seq = NULL;
563 if (ss->ss_control_seq) {
564 seq_server_fini(ss->ss_control_seq, env);
565 OBD_FREE_PTR(ss->ss_control_seq);
566 ss->ss_control_seq = NULL;
569 if (ss->ss_client_seq) {
570 seq_client_fini(ss->ss_client_seq);
571 OBD_FREE_PTR(ss->ss_client_seq);
572 ss->ss_client_seq = NULL;
577 EXPORT_SYMBOL(seq_site_fini);
579 int fid_server_mod_init(void)
581 LU_CONTEXT_KEY_INIT(&seq_thread_key);
582 return lu_context_key_register(&seq_thread_key);
585 void fid_server_mod_exit(void)
587 lu_context_key_degister(&seq_thread_key);