X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;ds=sidebyside;f=lustre%2Ffid%2Ffid_handler.c;h=537ea6d152d2bc8254a16f6ff1504a79e4ee53e6;hb=cc6ef11d2f972ebc440013bddda87a536a09750c;hp=73a20a7538656d1484303677599ce751212ee277;hpb=fd908da92ccd9aab4ffc3d2463301831260c0474;p=fs%2Flustre-release.git diff --git a/lustre/fid/fid_handler.c b/lustre/fid/fid_handler.c index 73a20a7..537ea6d 100644 --- a/lustre/fid/fid_handler.c +++ b/lustre/fid/fid_handler.c @@ -1,57 +1,58 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. * - * lustre/fid/fid_handler.c - * Lustre Sequence Manager + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). * - * Copyright (c) 2006 Cluster File Systems, Inc. - * Author: Yury Umanets + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.gnu.org/licenses/gpl-2.0.html + * + * GPL HEADER END + */ +/* + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. * - * This file is part of the Lustre file system, http://www.lustre.org - * Lustre is a trademark of Cluster File Systems, Inc. + * Copyright (c) 2011, 2015, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. * - * You may have signed or agreed to another license before downloading - * this software. If so, you are bound by the terms and conditions - * of that agreement, and the following does not apply to you. See the - * LICENSE file included with this distribution for more information. + * lustre/fid/fid_handler.c * - * If you did not agree to a different license, then this copy of Lustre - * is open source software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. + * Lustre Sequence Manager * - * In either case, Lustre is distributed in the hope that it will be - * useful, but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * license text for more details. + * Author: Yury Umanets */ -#ifndef EXPORT_SYMTAB -# define EXPORT_SYMTAB -#endif #define DEBUG_SUBSYSTEM S_FID -#ifdef __KERNEL__ -# include -# include -#else /* __KERNEL__ */ -# include -#endif - +#include +#include #include #include #include -#include #include #include #include #include "fid_internal.h" -#ifdef __KERNEL__ +static void seq_server_proc_fini(struct lu_server_seq *seq); + /* Assigns client to sequence controller node. */ -int seq_server_set_cli(struct lu_server_seq *seq, - struct lu_client_seq *cli, - const struct lu_env *env) +int seq_server_set_cli(const struct lu_env *env, struct lu_server_seq *seq, + struct lu_client_seq *cli) { int rc = 0; ENTRY; @@ -60,381 +61,440 @@ int seq_server_set_cli(struct lu_server_seq *seq, * Ask client for new range, assign that range to ->seq_space and write * seq state to backing store should be atomic. */ - down(&seq->lss_sem); + mutex_lock(&seq->lss_mutex); - if (cli == NULL) { - CDEBUG(D_INFO, "%s: Detached sequence client %s\n", - seq->lss_name, cli->lcs_name); - seq->lss_cli = cli; - GOTO(out_up, rc = 0); - } + if (cli == NULL) { + CDEBUG(D_INFO, "%s: Detached sequence client\n", seq->lss_name); + seq->lss_cli = NULL; + GOTO(out_up, rc = 0); + } - if (seq->lss_cli != NULL) { - CERROR("%s: Sequence controller is already " - "assigned\n", seq->lss_name); - GOTO(out_up, rc = -EINVAL); - } + if (seq->lss_cli != NULL) { + CDEBUG(D_HA, "%s: Sequence controller is already " + "assigned\n", seq->lss_name); + GOTO(out_up, rc = -EEXIST); + } CDEBUG(D_INFO, "%s: Attached sequence controller %s\n", seq->lss_name, cli->lcs_name); - seq->lss_cli = cli; - EXIT; + seq->lss_cli = cli; + cli->lcs_space.lsr_index = seq->lss_site->ss_node_id; + EXIT; out_up: - up(&seq->lss_sem); + mutex_unlock(&seq->lss_mutex); return rc; } EXPORT_SYMBOL(seq_server_set_cli); - /* + * allocate \a w units of sequence from range \a from. + */ +static inline void range_alloc(struct lu_seq_range *to, + struct lu_seq_range *from, + __u64 width) +{ + width = min(lu_seq_range_space(from), width); + to->lsr_start = from->lsr_start; + to->lsr_end = from->lsr_start + width; + from->lsr_start += width; +} + +/** * On controller node, allocate new super sequence for regular sequence server. + * As this super sequence controller, this node suppose to maintain fld + * and update index. + * \a out range always has currect mds node number of requester. */ + static int __seq_server_alloc_super(struct lu_server_seq *seq, - struct lu_range *in, - struct lu_range *out, + struct lu_seq_range *out, const struct lu_env *env) { - struct lu_range *space = &seq->lss_space; - int rc; - ENTRY; - - LASSERT(range_is_sane(space)); - - if (in != NULL) { - CDEBUG(D_INFO, "%s: Input seq range: " - DRANGE"\n", seq->lss_name, PRANGE(in)); - - if (in->lr_end > space->lr_start) - space->lr_start = in->lr_end; - *out = *in; - - CDEBUG(D_INFO, "%s: Recovered space: "DRANGE"\n", - seq->lss_name, PRANGE(space)); - } else { - if (range_space(space) < seq->lss_width) { - CWARN("%s: Sequences space to be exhausted soon. " - "Only "LPU64" sequences left\n", seq->lss_name, - range_space(space)); - *out = *space; - space->lr_start = space->lr_end; - } else if (range_is_exhausted(space)) { - CERROR("%s: Sequences space is exhausted\n", - seq->lss_name); - RETURN(-ENOSPC); - } else { - range_alloc(out, space, seq->lss_width); - } - } + struct lu_seq_range *space = &seq->lss_space; + int rc; + ENTRY; + + LASSERT(lu_seq_range_is_sane(space)); + + if (lu_seq_range_is_exhausted(space)) { + CERROR("%s: Sequences space is exhausted\n", + seq->lss_name); + RETURN(-ENOSPC); + } else { + range_alloc(out, space, seq->lss_width); + } - rc = seq_store_write(seq, env); - if (rc) { - CERROR("%s: Can't write space data, rc %d\n", - seq->lss_name, rc); - RETURN(rc); - } + rc = seq_store_update(env, seq, out, 1 /* sync */); - CDEBUG(D_INFO, "%s: Allocated super-sequence " - DRANGE"\n", seq->lss_name, PRANGE(out)); + LCONSOLE_INFO("%s: super-sequence allocation rc = %d " DRANGE"\n", + seq->lss_name, rc, PRANGE(out)); - RETURN(rc); + RETURN(rc); } int seq_server_alloc_super(struct lu_server_seq *seq, - struct lu_range *in, - struct lu_range *out, + struct lu_seq_range *out, const struct lu_env *env) { int rc; ENTRY; - down(&seq->lss_sem); - rc = __seq_server_alloc_super(seq, in, out, env); - up(&seq->lss_sem); + mutex_lock(&seq->lss_mutex); + rc = __seq_server_alloc_super(seq, out, env); + mutex_unlock(&seq->lss_mutex); RETURN(rc); } -static int __seq_server_alloc_meta(struct lu_server_seq *seq, - struct lu_range *in, - struct lu_range *out, - const struct lu_env *env) +int seq_server_alloc_spec(struct lu_server_seq *seq, + struct lu_seq_range *spec, + const struct lu_env *env) { - struct lu_range *space = &seq->lss_space; - int rc = 0; - ENTRY; - - LASSERT(range_is_sane(space)); - - /* - * This is recovery case. Adjust super range if input range looks like - * it is allocated from new super. - */ - if (in != NULL) { - CDEBUG(D_INFO, "%s: Input seq range: " - DRANGE"\n", seq->lss_name, PRANGE(in)); - - if (range_is_exhausted(space)) { - /* - * Server cannot send empty range to client, this is why - * we check here that range from client is "newer" than - * exhausted super. - */ - LASSERT(in->lr_end > space->lr_start); - - /* - * Start is set to end of last allocated, because it - * *is* already allocated so we take that into account - * and do not use for other allocations. - */ - space->lr_start = in->lr_end; - - /* - * End is set to in->lr_start + super sequence - * allocation unit. That is because in->lr_start is - * first seq in new allocated range from controller - * before failure. - */ - space->lr_end = in->lr_start + LUSTRE_SEQ_SUPER_WIDTH; - - if (!seq->lss_cli) { - CERROR("%s: No sequence controller " - "is attached.\n", seq->lss_name); - RETURN(-ENODEV); - } - - /* - * Let controller know that this is recovery and last - * obtained range from it was @space. - */ - rc = seq_client_replay_super(seq->lss_cli, space, env); - if (rc) { - CERROR("%s: Can't replay super-sequence, " - "rc %d\n", seq->lss_name, rc); - RETURN(rc); - } - } else { - /* - * Update super start by end from client's range. Super - * end should not be changed if range was not exhausted. - */ - if (in->lr_end > space->lr_start) - space->lr_start = in->lr_end; - } + struct lu_seq_range *space = &seq->lss_space; + int rc = -ENOSPC; + ENTRY; + + /* + * In some cases (like recovery after a disaster) + * we may need to allocate sequences manually + * Notice some sequences can be lost if requested + * range doesn't start at the beginning of current + * free space. Also notice it's not possible now + * to allocate sequences out of natural order. + */ + if (spec->lsr_start >= spec->lsr_end) + RETURN(-EINVAL); + if (spec->lsr_flags != LU_SEQ_RANGE_MDT && + spec->lsr_flags != LU_SEQ_RANGE_OST) + RETURN(-EINVAL); + + mutex_lock(&seq->lss_mutex); + if (spec->lsr_start >= space->lsr_start) { + space->lsr_start = spec->lsr_end; + rc = seq_store_update(env, seq, spec, 1 /* sync */); + + LCONSOLE_INFO("%s: "DRANGE" sequences allocated: rc = %d \n", + seq->lss_name, PRANGE(spec), rc); + } + mutex_unlock(&seq->lss_mutex); - *out = *in; - - CDEBUG(D_INFO, "%s: Recovered space: "DRANGE"\n", - seq->lss_name, PRANGE(space)); - } else { - /* - * XXX: Avoid cascading RPCs using kind of async preallocation - * when meta-sequence is close to exhausting. - */ - if (range_is_exhausted(space)) { - if (!seq->lss_cli) { - CERROR("%s: No sequence controller " - "is attached.\n", seq->lss_name); - RETURN(-ENODEV); - } - - rc = seq_client_alloc_super(seq->lss_cli, env); - if (rc) { - CERROR("%s: Can't allocate super-sequence, " - "rc %d\n", seq->lss_name, rc); - RETURN(rc); - } - - /* Saving new range to allocation space. */ - *space = seq->lss_cli->lcs_space; - LASSERT(range_is_sane(space)); - } + RETURN(rc); +} - range_alloc(out, space, seq->lss_width); - } +static int __seq_set_init(const struct lu_env *env, + struct lu_server_seq *seq) +{ + struct lu_seq_range *space = &seq->lss_space; + int rc; - rc = seq_store_write(seq, env); - if (rc) { - CERROR("%s: Can't write space data, rc %d\n", - seq->lss_name, rc); - } + range_alloc(&seq->lss_lowater_set, space, seq->lss_set_width); + range_alloc(&seq->lss_hiwater_set, space, seq->lss_set_width); - if (rc == 0) { - CDEBUG(D_INFO, "%s: Allocated meta-sequence " - DRANGE"\n", seq->lss_name, PRANGE(out)); - } + rc = seq_store_update(env, seq, NULL, 1); - RETURN(rc); + return rc; } -int seq_server_alloc_meta(struct lu_server_seq *seq, - struct lu_range *in, - struct lu_range *out, - const struct lu_env *env) +/* + * This function implements new seq allocation algorithm using async + * updates to seq file on disk. ref bug 18857 for details. + * there are four variable to keep track of this process + * + * lss_space; - available lss_space + * lss_lowater_set; - lu_seq_range for all seqs before barrier, i.e. safe to use + * lss_hiwater_set; - lu_seq_range after barrier, i.e. allocated but may be + * not yet committed + * + * when lss_lowater_set reaches the end it is replaced with hiwater one and + * a write operation is initiated to allocate new hiwater range. + * if last seq write opearion is still not committed, current operation is + * flaged as sync write op. + */ +static int range_alloc_set(const struct lu_env *env, + struct lu_seq_range *out, + struct lu_server_seq *seq) { - int rc; - ENTRY; + struct lu_seq_range *space = &seq->lss_space; + struct lu_seq_range *loset = &seq->lss_lowater_set; + struct lu_seq_range *hiset = &seq->lss_hiwater_set; + int rc = 0; + + if (lu_seq_range_is_zero(loset)) + __seq_set_init(env, seq); + + if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_ALLOC)) /* exhaust set */ + loset->lsr_start = loset->lsr_end; + + if (lu_seq_range_is_exhausted(loset)) { + /* reached high water mark. */ + struct lu_device *dev = seq->lss_site->ss_lu->ls_top_dev; + int obd_num_clients = dev->ld_obd->obd_num_exports; + __u64 set_sz; + + /* calculate new seq width based on number of clients */ + set_sz = max(seq->lss_set_width, + obd_num_clients * seq->lss_width); + set_sz = min(lu_seq_range_space(space), set_sz); + + /* Switch to hiwater range now */ + *loset = *hiset; + /* allocate new hiwater range */ + range_alloc(hiset, space, set_sz); + + /* update ondisk seq with new *space */ + rc = seq_store_update(env, seq, NULL, seq->lss_need_sync); + } - down(&seq->lss_sem); - rc = __seq_server_alloc_meta(seq, in, out, env); - up(&seq->lss_sem); + LASSERTF(!lu_seq_range_is_exhausted(loset) || + lu_seq_range_is_sane(loset), + DRANGE"\n", PRANGE(loset)); - RETURN(rc); + if (rc == 0) + range_alloc(out, loset, seq->lss_width); + + RETURN(rc); } -EXPORT_SYMBOL(seq_server_alloc_meta); -static int seq_server_handle(struct lu_site *site, - const struct lu_env *env, - __u32 opc, struct lu_range *in, - struct lu_range *out) +/** + * Check if the sequence server has sequence avaible + * + * Check if the sequence server has sequence avaible, if not, then + * allocating super sequence from sequence manager (MDT0). + * + * \param[in] env execution environment + * \param[in] seq server sequence + * + * \retval negative errno if allocating new sequence fails + * \retval 0 if there is enough sequence or allocating + * new sequence succeeds + */ +int seq_server_check_and_alloc_super(const struct lu_env *env, + struct lu_server_seq *seq) { - int rc; - ENTRY; + struct lu_seq_range *space = &seq->lss_space; + int rc = 0; + + ENTRY; + + /* Check if available space ends and allocate new super seq */ + if (lu_seq_range_is_exhausted(space)) { + if (!seq->lss_cli) { + CERROR("%s: No sequence controller is attached.\n", + seq->lss_name); + RETURN(-ENODEV); + } + + rc = seq_client_alloc_super(seq->lss_cli, env); + if (rc) { + CDEBUG(D_HA, "%s: Can't allocate super-sequence:" + " rc %d\n", seq->lss_name, rc); + RETURN(rc); + } + + /* Saving new range to allocation space. */ + *space = seq->lss_cli->lcs_space; + LASSERT(lu_seq_range_is_sane(space)); + if (seq->lss_cli->lcs_srv == NULL) { + struct lu_server_fld *fld; + + /* Insert it to the local FLDB */ + fld = seq->lss_site->ss_server_fld; + mutex_lock(&fld->lsf_lock); + rc = fld_insert_entry(env, fld, space); + mutex_unlock(&fld->lsf_lock); + } + } - switch (opc) { - case SEQ_ALLOC_META: - if (!site->ls_server_seq) { - CERROR("Sequence server is not " - "initialized\n"); - RETURN(-EINVAL); - } - rc = seq_server_alloc_meta(site->ls_server_seq, - in, out, env); - break; - case SEQ_ALLOC_SUPER: - if (!site->ls_control_seq) { - CERROR("Sequence controller is not " - "initialized\n"); - RETURN(-EINVAL); - } - rc = seq_server_alloc_super(site->ls_control_seq, - in, out, env); - break; - default: - rc = -EINVAL; - break; - } + if (lu_seq_range_is_zero(&seq->lss_lowater_set)) + __seq_set_init(env, seq); - RETURN(rc); + RETURN(rc); } +EXPORT_SYMBOL(seq_server_check_and_alloc_super); -static int seq_req_handle(struct ptlrpc_request *req, - const struct lu_env *env, - struct seq_thread_info *info) +static int __seq_server_alloc_meta(struct lu_server_seq *seq, + struct lu_seq_range *out, + const struct lu_env *env) { - struct lu_range *out, *in = NULL; - struct lu_site *site; - int rc = -EPROTO; - __u32 *opc; - ENTRY; + struct lu_seq_range *space = &seq->lss_space; + int rc = 0; - site = req->rq_export->exp_obd->obd_lu_dev->ld_site; - LASSERT(site != NULL); - - rc = req_capsule_server_pack(info->sti_pill); - if (rc) - RETURN(err_serious(rc)); + ENTRY; - opc = req_capsule_client_get(info->sti_pill, &RMF_SEQ_OPC); - if (opc != NULL) { - out = req_capsule_server_get(info->sti_pill, &RMF_SEQ_RANGE); - if (out == NULL) - RETURN(err_serious(-EPROTO)); + LASSERT(lu_seq_range_is_sane(space)); - if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) { - in = req_capsule_client_get(info->sti_pill, - &RMF_SEQ_RANGE); + rc = seq_server_check_and_alloc_super(env, seq); + if (rc < 0) { + CERROR("%s: Allocated super-sequence failed: rc = %d\n", + seq->lss_name, rc); + RETURN(rc); + } - LASSERT(!range_is_zero(in) && range_is_sane(in)); - } + rc = range_alloc_set(env, out, seq); + if (rc != 0) { + CERROR("%s: Allocated meta-sequence failed: rc = %d\n", + seq->lss_name, rc); + RETURN(rc); + } - rc = seq_server_handle(site, env, *opc, in, out); - } else - rc = err_serious(-EPROTO); + CDEBUG(D_INFO, "%s: Allocated meta-sequence " DRANGE"\n", + seq->lss_name, PRANGE(out)); - RETURN(rc); + RETURN(rc); } -/* context key constructor/destructor: seq_key_init, seq_key_fini */ -LU_KEY_INIT_FINI(seq, struct seq_thread_info); +int seq_server_alloc_meta(struct lu_server_seq *seq, + struct lu_seq_range *out, + const struct lu_env *env) +{ + int rc; + ENTRY; -/* context key: seq_thread_key */ -LU_CONTEXT_KEY_DEFINE(seq, LCT_MD_THREAD); + mutex_lock(&seq->lss_mutex); + rc = __seq_server_alloc_meta(seq, out, env); + mutex_unlock(&seq->lss_mutex); -static void seq_thread_info_init(struct ptlrpc_request *req, - struct seq_thread_info *info) -{ - info->sti_pill = &req->rq_pill; - /* Init request capsule */ - req_capsule_init(info->sti_pill, req, RCL_SERVER); - req_capsule_set(info->sti_pill, &RQF_SEQ_QUERY); + RETURN(rc); } +EXPORT_SYMBOL(seq_server_alloc_meta); -static void seq_thread_info_fini(struct seq_thread_info *info) +static int seq_server_handle(struct lu_site *site, + const struct lu_env *env, + __u32 opc, struct lu_seq_range *out) { - req_capsule_fini(info->sti_pill); + int rc; + struct seq_server_site *ss_site; + struct dt_device *dev; + ENTRY; + + ss_site = lu_site2seq(site); + + switch (opc) { + case SEQ_ALLOC_META: + if (!ss_site->ss_server_seq) { + CERROR("Sequence server is not " + "initialized\n"); + RETURN(-EINVAL); + } + + dev = lu2dt_dev(ss_site->ss_server_seq->lss_obj->do_lu.lo_dev); + if (dev->dd_rdonly) + RETURN(-EROFS); + + rc = seq_server_alloc_meta(ss_site->ss_server_seq, out, env); + break; + case SEQ_ALLOC_SUPER: + if (!ss_site->ss_control_seq) { + CERROR("Sequence controller is not " + "initialized\n"); + RETURN(-EINVAL); + } + + dev = lu2dt_dev(ss_site->ss_control_seq->lss_obj->do_lu.lo_dev); + if (dev->dd_rdonly) + RETURN(-EROFS); + + rc = seq_server_alloc_super(ss_site->ss_control_seq, out, env); + break; + default: + rc = -EINVAL; + break; + } + + RETURN(rc); } -static int seq_handle(struct ptlrpc_request *req) +static int seq_handler(struct tgt_session_info *tsi) { - const struct lu_env *env; - struct seq_thread_info *info; - int rc; + struct lu_seq_range *out, *tmp; + struct lu_site *site; + int rc; + __u32 *opc; - env = req->rq_svc_thread->t_env; - LASSERT(env != NULL); + ENTRY; - info = lu_context_key_get(&env->le_ctx, &seq_thread_key); - LASSERT(info != NULL); + LASSERT(!(lustre_msg_get_flags(tgt_ses_req(tsi)->rq_reqmsg) & MSG_REPLAY)); + site = tsi->tsi_exp->exp_obd->obd_lu_dev->ld_site; + LASSERT(site != NULL); - seq_thread_info_init(req, info); - rc = seq_req_handle(req, env, info); - seq_thread_info_fini(info); + opc = req_capsule_client_get(tsi->tsi_pill, &RMF_SEQ_OPC); + if (opc != NULL) { + out = req_capsule_server_get(tsi->tsi_pill, &RMF_SEQ_RANGE); + if (out == NULL) + RETURN(err_serious(-EPROTO)); - return rc; -} + tmp = req_capsule_client_get(tsi->tsi_pill, &RMF_SEQ_RANGE); -/* - * Entry point for handling FLD RPCs called from MDT. - */ -int seq_query(struct com_thread_info *info) -{ - return seq_handle(info->cti_pill->rc_req); + /* seq client passed mdt id, we need to pass that using out + * range parameter */ + + out->lsr_index = tmp->lsr_index; + out->lsr_flags = tmp->lsr_flags; + rc = seq_server_handle(site, tsi->tsi_env, *opc, out); + } else { + rc = err_serious(-EPROTO); + } + + RETURN(rc); } -EXPORT_SYMBOL(seq_query); -static void seq_server_proc_fini(struct lu_server_seq *seq); +struct tgt_handler seq_handlers[] = { +TGT_SEQ_HDL(HABEO_REFERO, SEQ_QUERY, seq_handler), +}; +EXPORT_SYMBOL(seq_handlers); + +/* context key constructor/destructor: seq_key_init, seq_key_fini */ +LU_KEY_INIT_FINI(seq, struct seq_thread_info); + +/* context key: seq_thread_key */ +LU_CONTEXT_KEY_DEFINE(seq, LCT_MD_THREAD | LCT_DT_THREAD); + +extern const struct file_operations seq_fld_proc_seq_fops; -#ifdef LPROCFS static int seq_server_proc_init(struct lu_server_seq *seq) { - int rc; - ENTRY; +#ifdef CONFIG_PROC_FS + int rc; + ENTRY; + + seq->lss_proc_dir = lprocfs_register(seq->lss_name, + seq_type_proc_dir, + NULL, NULL); + if (IS_ERR(seq->lss_proc_dir)) { + rc = PTR_ERR(seq->lss_proc_dir); + RETURN(rc); + } - seq->lss_proc_dir = lprocfs_register(seq->lss_name, - seq_type_proc_dir, - NULL, NULL); - if (IS_ERR(seq->lss_proc_dir)) { - rc = PTR_ERR(seq->lss_proc_dir); - RETURN(rc); - } + rc = lprocfs_add_vars(seq->lss_proc_dir, seq_server_proc_list, seq); + if (rc) { + CERROR("%s: Can't init sequence manager " + "proc, rc %d\n", seq->lss_name, rc); + GOTO(out_cleanup, rc); + } - rc = lprocfs_add_vars(seq->lss_proc_dir, - seq_server_proc_list, seq); - if (rc) { - CERROR("%s: Can't init sequence manager " - "proc, rc %d\n", seq->lss_name, rc); - GOTO(out_cleanup, rc); - } + if (seq->lss_type == LUSTRE_SEQ_CONTROLLER) { + rc = lprocfs_seq_create(seq->lss_proc_dir, "fldb", 0644, + &seq_fld_proc_seq_fops, seq); + if (rc) { + CERROR("%s: Can't create fldb for sequence manager " + "proc: rc = %d\n", seq->lss_name, rc); + GOTO(out_cleanup, rc); + } + } - RETURN(0); + RETURN(0); out_cleanup: - seq_server_proc_fini(seq); - return rc; + seq_server_proc_fini(seq); + return rc; +#else /* !CONFIG_PROC_FS */ + return 0; +#endif /* CONFIG_PROC_FS */ } static void seq_server_proc_fini(struct lu_server_seq *seq) { +#ifdef CONFIG_PROC_FS ENTRY; if (seq->lss_proc_dir != NULL) { if (!IS_ERR(seq->lss_proc_dir)) @@ -442,35 +502,45 @@ static void seq_server_proc_fini(struct lu_server_seq *seq) seq->lss_proc_dir = NULL; } EXIT; +#endif /* CONFIG_PROC_FS */ } -#else -static int seq_server_proc_init(struct lu_server_seq *seq) -{ - return 0; -} - -static void seq_server_proc_fini(struct lu_server_seq *seq) -{ - return; -} -#endif -int seq_server_init(struct lu_server_seq *seq, - struct dt_device *dev, - const char *prefix, - enum lu_mgr_type type, - const struct lu_env *env) +int seq_server_init(const struct lu_env *env, + struct lu_server_seq *seq, + struct dt_device *dev, + const char *prefix, + enum lu_mgr_type type, + struct seq_server_site *ss) { - int rc, is_srv = (type == LUSTRE_SEQ_SERVER); - ENTRY; + int rc, is_srv = (type == LUSTRE_SEQ_SERVER); + ENTRY; LASSERT(dev != NULL); - LASSERT(prefix != NULL); + LASSERT(prefix != NULL); + LASSERT(ss != NULL); + LASSERT(ss->ss_lu != NULL); + + /* A compile-time check for FIDs that used to be in lustre_idl.h + * but is moved here to remove CLASSERT/LASSERT in that header. + * Check all lu_fid fields are converted in fid_cpu_to_le() and friends + * and that there is no padding added by compiler to the struct. */ + { + struct lu_fid tst; + + CLASSERT(sizeof(tst) == sizeof(tst.f_seq) + + sizeof(tst.f_oid) + sizeof(tst.f_ver)); + } + + seq->lss_cli = NULL; + seq->lss_type = type; + seq->lss_site = ss; + lu_seq_range_init(&seq->lss_space); - seq->lss_cli = NULL; - seq->lss_type = type; - range_zero(&seq->lss_space); - sema_init(&seq->lss_sem, 1); + lu_seq_range_init(&seq->lss_lowater_set); + lu_seq_range_init(&seq->lss_hiwater_set); + seq->lss_set_width = LUSTRE_SEQ_BATCH_WIDTH; + + mutex_init(&seq->lss_mutex); seq->lss_width = is_srv ? LUSTRE_SEQ_META_WIDTH : LUSTRE_SEQ_SUPER_WIDTH; @@ -481,7 +551,6 @@ int seq_server_init(struct lu_server_seq *seq, rc = seq_store_init(seq, env, dev); if (rc) GOTO(out, rc); - /* Request backing store for saved sequence info. */ rc = seq_store_read(seq, env); if (rc == -ENODATA) { @@ -491,38 +560,38 @@ int seq_server_init(struct lu_server_seq *seq, LUSTRE_SEQ_ZERO_RANGE: LUSTRE_SEQ_SPACE_RANGE; - CDEBUG(D_INFO, "%s: No data found " - "on store. Initialize space\n", - seq->lss_name); + seq->lss_space.lsr_index = ss->ss_node_id; + LCONSOLE_INFO("%s: No data found " + "on store. Initialize space\n", + seq->lss_name); - /* Save default controller value to store. */ - rc = seq_store_write(seq, env); + rc = seq_store_update(env, seq, NULL, 0); if (rc) { CERROR("%s: Can't write space data, " "rc %d\n", seq->lss_name, rc); } } else if (rc) { - CERROR("%s: Can't read space data, rc %d\n", - seq->lss_name, rc); - GOTO(out, rc); - } - - if (is_srv) { - LASSERT(range_is_sane(&seq->lss_space)); - } else { - LASSERT(!range_is_zero(&seq->lss_space) && - range_is_sane(&seq->lss_space)); + CERROR("%s: Can't read space data, rc %d\n", + seq->lss_name, rc); + GOTO(out, rc); } + if (is_srv) { + LASSERT(lu_seq_range_is_sane(&seq->lss_space)); + } else { + LASSERT(!lu_seq_range_is_zero(&seq->lss_space) && + lu_seq_range_is_sane(&seq->lss_space)); + } + rc = seq_server_proc_init(seq); if (rc) - GOTO(out, rc); + GOTO(out, rc); - EXIT; + EXIT; out: - if (rc) - seq_server_fini(seq, env); - return rc; + if (rc) + seq_server_fini(seq, env); + return rc; } EXPORT_SYMBOL(seq_server_init); @@ -538,33 +607,40 @@ void seq_server_fini(struct lu_server_seq *seq, } EXPORT_SYMBOL(seq_server_fini); -cfs_proc_dir_entry_t *seq_type_proc_dir = NULL; - -static int __init fid_mod_init(void) +int seq_site_fini(const struct lu_env *env, struct seq_server_site *ss) { - seq_type_proc_dir = lprocfs_register(LUSTRE_SEQ_NAME, - proc_lustre_root, - NULL, NULL); - if (IS_ERR(seq_type_proc_dir)) - return PTR_ERR(seq_type_proc_dir); - - LU_CONTEXT_KEY_INIT(&seq_thread_key); - lu_context_key_register(&seq_thread_key); - return 0; + if (ss == NULL) + RETURN(0); + + if (ss->ss_server_seq) { + seq_server_fini(ss->ss_server_seq, env); + OBD_FREE_PTR(ss->ss_server_seq); + ss->ss_server_seq = NULL; + } + + if (ss->ss_control_seq) { + seq_server_fini(ss->ss_control_seq, env); + OBD_FREE_PTR(ss->ss_control_seq); + ss->ss_control_seq = NULL; + } + + if (ss->ss_client_seq) { + seq_client_fini(ss->ss_client_seq); + OBD_FREE_PTR(ss->ss_client_seq); + ss->ss_client_seq = NULL; + } + + RETURN(0); } +EXPORT_SYMBOL(seq_site_fini); -static void __exit fid_mod_exit(void) +int fid_server_mod_init(void) { - lu_context_key_degister(&seq_thread_key); - if (seq_type_proc_dir != NULL && !IS_ERR(seq_type_proc_dir)) { - lprocfs_remove(&seq_type_proc_dir); - seq_type_proc_dir = NULL; - } + LU_CONTEXT_KEY_INIT(&seq_thread_key); + return lu_context_key_register(&seq_thread_key); } -MODULE_AUTHOR("Cluster File Systems, Inc. "); -MODULE_DESCRIPTION("Lustre FID Module"); -MODULE_LICENSE("GPL"); - -cfs_module(fid, "0.1.0", fid_mod_init, fid_mod_exit); -#endif +void fid_server_mod_exit(void) +{ + lu_context_key_degister(&seq_thread_key); +}