RETURN(rc);
}
-struct kmem_cache *request_cache;
+static struct kmem_cache *request_cache;
int ptlrpc_request_cache_init(void)
{
/* ptlrpc_set_wait->l_wait_event sets lwi_allow_intr
* so it sets rq_intr regardless of individual rpc
- * timeouts. The synchronous IO waiting path sets
+ * timeouts. The synchronous IO waiting path sets
* rq_intr irrespective of whether ptlrpcd
* has seen a timeout. Our policy is to only interpret
* interrupted rpcs after they have timed out, so we
* We still want to block for a limited time,
* so we allow interrupts during the timeout.
*/
- lwi = LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1),
+ lwi = LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1),
ptlrpc_expired_set,
ptlrpc_interrupted_set, set);
else
/*
* At least one request is in flight, so no
* interrupts are allowed. Wait until all
- * complete, or an in-flight req times out.
+ * complete, or an in-flight req times out.
*/
lwi = LWI_TIMEOUT(cfs_time_seconds(timeout? timeout : 1),
ptlrpc_expired_set, set);
#include "ptlrpc_internal.h"
-struct ll_rpc_opcode {
+static struct ll_rpc_opcode {
__u32 opcode;
const char *opname;
} ll_rpc_opcode_table[LUSTRE_MAX_OPCODES] = {
{ LFSCK_QUERY, "lfsck_query" },
};
-struct ll_eopcode {
+static struct ll_eopcode {
__u32 opcode;
const char *opname;
} ll_eopcode_table[EXTRA_LAST_OPC] = {
return ll_rpc_opcode_table[offset].opname;
}
-const char* ll_eopcode2str(__u32 opcode)
+static const char *ll_eopcode2str(__u32 opcode)
{
LASSERT(ll_eopcode_table[opcode].opcode == opcode);
return ll_eopcode_table[opcode].opname;
}
+
#ifdef LPROCFS
-void ptlrpc_lprocfs_register(struct proc_dir_entry *root, char *dir,
+static void ptlrpc_lprocfs_register(struct proc_dir_entry *root, char *dir,
char *name, struct proc_dir_entry **procroot_ret,
struct lprocfs_stats **stats_ret)
{
LPROC_SEQ_FOPS(ptlrpc_lprocfs_threads_max);
/**
- * \addtogoup nrs
- * @{
- */
-extern struct nrs_core nrs_core;
-
-/**
* Translates \e ptlrpc_nrs_pol_state values to human-readable strings.
*
* \param[in] state The policy state
struct ptlrpc_request *srhi_req;
};
-int
+static int
ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service_part *svcpt,
struct ptlrpc_srh_iterator *srhi,
__u64 seq)
RETURN(rc);
}
-
-/* ptlrpc/nrs_fifo.c */
-extern struct ptlrpc_nrs_pol_conf nrs_conf_fifo;
-#ifdef HAVE_SERVER_SUPPORT
-/* ptlrpc/nrs_crr.c */
-extern struct ptlrpc_nrs_pol_conf nrs_conf_crrn;
-/* ptlrpc/nrs_orr.c */
-extern struct ptlrpc_nrs_pol_conf nrs_conf_orr;
-extern struct ptlrpc_nrs_pol_conf nrs_conf_trr;
-extern struct ptlrpc_nrs_pol_conf nrs_conf_tbf;
-#endif /* HAVE_SERVER_SUPPORT */
-
/**
* Adds all policies that ship with the ptlrpc module, to NRS core's list of
* policies \e nrs_core.nrs_policies.
* \retval 0 operation carried out successfully
* \retval -ve error
*/
-int nrs_crrn_ctl(struct ptlrpc_nrs_policy *policy, enum ptlrpc_nrs_ctl opc,
- void *arg)
+static int nrs_crrn_ctl(struct ptlrpc_nrs_policy *policy,
+ enum ptlrpc_nrs_ctl opc,
+ void *arg)
{
assert_spin_locked(&policy->pol_nrs->nrs_lock);
*
* \see nrs_resource_get_safe()
*/
-int nrs_crrn_res_get(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq,
- const struct ptlrpc_nrs_resource *parent,
- struct ptlrpc_nrs_resource **resp, bool moving_req)
+static int nrs_crrn_res_get(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq,
+ const struct ptlrpc_nrs_resource *parent,
+ struct ptlrpc_nrs_resource **resp, bool moving_req)
{
struct nrs_crrn_net *net;
struct nrs_crrn_client *cli;
* \retval 0 success
* \retval != 0 error
*/
-int nrs_crrn_lprocfs_init(struct ptlrpc_service *svc)
+static int nrs_crrn_lprocfs_init(struct ptlrpc_service *svc)
{
struct lprocfs_seq_vars nrs_crrn_lprocfs_vars[] = {
{ .name = "nrs_crrn_quantum",
*
* \param[in] svc the service
*/
-void nrs_crrn_lprocfs_fini(struct ptlrpc_service *svc)
+static void nrs_crrn_lprocfs_fini(struct ptlrpc_service *svc)
{
if (svc->srv_procroot == NULL)
return;
* \retval 0 operation carried successfully
* \retval -ve error
*/
-int nrs_orr_ctl(struct ptlrpc_nrs_policy *policy, enum ptlrpc_nrs_ctl opc,
- void *arg)
+static int nrs_orr_ctl(struct ptlrpc_nrs_policy *policy,
+ enum ptlrpc_nrs_ctl opc, void *arg)
{
assert_spin_locked(&policy->pol_nrs->nrs_lock);
*
* \see nrs_resource_get_safe()
*/
-int nrs_orr_res_get(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq,
- const struct ptlrpc_nrs_resource *parent,
- struct ptlrpc_nrs_resource **resp, bool moving_req)
+static int nrs_orr_res_get(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq,
+ const struct ptlrpc_nrs_resource *parent,
+ struct ptlrpc_nrs_resource **resp, bool moving_req)
{
struct nrs_orr_data *orrd;
struct nrs_orr_object *orro;
* so that lprocfs read/write functions can be used by both the ORR and TRR
* policies.
*/
-struct nrs_lprocfs_orr_data {
+static struct nrs_lprocfs_orr_data {
struct ptlrpc_service *svc;
char *name;
} lprocfs_orr_data = {
}
LPROC_SEQ_FOPS(ptlrpc_lprocfs_nrs_orr_supported);
-int nrs_orr_lprocfs_init(struct ptlrpc_service *svc)
+static int nrs_orr_lprocfs_init(struct ptlrpc_service *svc)
{
int i;
return lprocfs_seq_add_vars(svc->srv_procroot, nrs_orr_lprocfs_vars, NULL);
}
-void nrs_orr_lprocfs_fini(struct ptlrpc_service *svc)
+static void nrs_orr_lprocfs_fini(struct ptlrpc_service *svc)
{
if (svc->srv_procroot == NULL)
return;
#ifdef LPROCFS
-int nrs_trr_lprocfs_init(struct ptlrpc_service *svc)
+static int nrs_trr_lprocfs_init(struct ptlrpc_service *svc)
{
int rc;
int i;
return rc;
}
-void nrs_trr_lprocfs_fini(struct ptlrpc_service *svc)
+static void nrs_trr_lprocfs_fini(struct ptlrpc_service *svc)
{
if (svc->srv_procroot == NULL)
return;
#define NRS_POL_NAME_TBF "tbf"
-int tbf_jobid_cache_size = 8192;
+static int tbf_jobid_cache_size = 8192;
CFS_MODULE_PARM(tbf_jobid_cache_size, "i", int, 0644,
"The size of jobid cache");
-int tbf_rate = 10000;
+static int tbf_rate = 10000;
CFS_MODULE_PARM(tbf_rate, "i", int, 0644,
"Default rate limit in RPCs/s");
-int tbf_depth = 3;
+static int tbf_depth = 3;
CFS_MODULE_PARM(tbf_depth, "i", int, 0644,
"How many tokens that a client can save up");
OBD_FREE(rule->tr_jobids_str, strlen(rule->tr_jobids_str) + 1);
}
-struct nrs_tbf_ops nrs_tbf_jobid_ops = {
+static struct nrs_tbf_ops nrs_tbf_jobid_ops = {
.o_name = NRS_TBF_TYPE_JOBID,
.o_startup = nrs_tbf_jobid_startup,
.o_cli_find = nrs_tbf_jobid_cli_find,
return 0;
}
-struct nrs_tbf_ops nrs_tbf_nid_ops = {
+static struct nrs_tbf_ops nrs_tbf_nid_ops = {
.o_name = NRS_TBF_TYPE_NID,
.o_startup = nrs_tbf_nid_startup,
.o_cli_find = nrs_tbf_nid_cli_find,
* \retval 0 operation carried out successfully
* \retval -ve error
*/
-int nrs_tbf_ctl(struct ptlrpc_nrs_policy *policy, enum ptlrpc_nrs_ctl opc,
- void *arg)
+static int nrs_tbf_ctl(struct ptlrpc_nrs_policy *policy,
+ enum ptlrpc_nrs_ctl opc,
+ void *arg)
{
int rc = 0;
ENTRY;
* \retval 0 success
* \retval != 0 error
*/
-int nrs_tbf_lprocfs_init(struct ptlrpc_service *svc)
+static int nrs_tbf_lprocfs_init(struct ptlrpc_service *svc)
{
struct lprocfs_seq_vars nrs_tbf_lprocfs_vars[] = {
{ .name = "nrs_tbf_rule",
*
* \param[in] svc the service
*/
-void nrs_tbf_lprocfs_fini(struct ptlrpc_service *svc)
+static void nrs_tbf_lprocfs_fini(struct ptlrpc_service *svc)
{
if (svc->srv_procroot == NULL)
return;
#include <obd_cksum.h>
#include <lustre/ll_fiemap.h>
+#include "ptlrpc_internal.h"
+
static inline int lustre_msg_hdr_size_v2(int count)
{
return cfs_size_round(offsetof(struct lustre_msg_v2,
}
EXPORT_SYMBOL(lustre_msg_buf);
-int lustre_shrink_msg_v2(struct lustre_msg_v2 *msg, int segment,
- unsigned int newlen, int move_data)
+static int lustre_shrink_msg_v2(struct lustre_msg_v2 *msg, int segment,
+ unsigned int newlen, int move_data)
{
char *tail = NULL, *newpos;
int tail_len = 0, n;
}
EXPORT_SYMBOL(lustre_swab_fid2path);
-void lustre_swab_fiemap_extent(struct ll_fiemap_extent *fm_extent)
+static void lustre_swab_fiemap_extent(struct ll_fiemap_extent *fm_extent)
{
__swab64s(&fm_extent->fe_logical);
__swab64s(&fm_extent->fe_physical);
}
EXPORT_SYMBOL(lustre_swab_hsm_state_set);
-void lustre_swab_hsm_extent(struct hsm_extent *extent)
+static void lustre_swab_hsm_extent(struct hsm_extent *extent)
{
__swab64s(&extent->offset);
__swab64s(&extent->length);
}
EXPORT_SYMBOL(ptlrpc_obd_ping);
-int ptlrpc_ping(struct obd_import *imp)
+static int ptlrpc_ping(struct obd_import *imp)
{
struct ptlrpc_request *req;
ENTRY;
RETURN(0);
}
-void ptlrpc_update_next_ping(struct obd_import *imp, int soon)
+static void ptlrpc_update_next_ping(struct obd_import *imp, int soon)
{
#ifdef ENABLE_PINGER
int time = soon ? PING_INTERVAL_SHORT : PING_INTERVAL;
return cfs_time_shift(obd_timeout);
}
-cfs_duration_t pinger_check_timeout(cfs_time_t time)
+static cfs_duration_t pinger_check_timeout(cfs_time_t time)
{
struct timeout_item *item;
cfs_time_t timeout = PING_INTERVAL;
* Register a timeout callback to the pinger list, and the callback will
* be called when timeout happens.
*/
-struct timeout_item* ptlrpc_new_timeout(int time, enum timeout_event event,
- timeout_cb_t cb, void *data)
+static struct timeout_item *ptlrpc_new_timeout(int time,
+ enum timeout_event event,
+ timeout_cb_t cb, void *data)
{
struct timeout_item *ti;
static int pet_refcount = 0;
static int pet_state;
static wait_queue_head_t pet_waitq;
-struct list_head pet_list;
+static struct list_head pet_list;
static DEFINE_SPINLOCK(pet_lock);
int ping_evictor_wake(struct obd_export *exp)
extern int test_req_buffer_pressure;
extern struct list_head ptlrpc_all_services;
extern struct mutex ptlrpc_all_services_mutex;
+extern struct ptlrpc_nrs_pol_conf nrs_conf_fifo;
+
+#ifdef HAVE_SERVER_SUPPORT
+extern struct ptlrpc_nrs_pol_conf nrs_conf_crrn;
+extern struct ptlrpc_nrs_pol_conf nrs_conf_orr;
+extern struct ptlrpc_nrs_pol_conf nrs_conf_trr;
+extern struct ptlrpc_nrs_pol_conf nrs_conf_tbf;
+#endif /* HAVE_SERVER_SUPPORT */
+
+/**
+ * \addtogoup nrs
+ * @{
+ */
+extern struct nrs_core nrs_core;
+
+extern struct mutex ptlrpcd_mutex;
+extern struct mutex pinger_mutex;
int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait);
/* ptlrpcd.c */
#if RS_DEBUG
extern spinlock_t ptlrpc_rs_debug_lock;
#endif
-extern struct mutex pinger_mutex;
-extern struct mutex ptlrpcd_mutex;
-__init int ptlrpc_init(void)
+static __init int ptlrpc_init(void)
{
int rc;
/*
* memory shrinker
*/
-const int pools_shrinker_seeks = DEFAULT_SEEKS;
+static const int pools_shrinker_seeks = DEFAULT_SEEKS;
static struct shrinker *pools_shrinker;
return err;
}
EXPORT_SYMBOL(sptlrpc_get_bulk_checksum);
-
-
#include <linux/fs.h>
#include <libcfs/libcfs.h>
#include <lvfs.h>
+#include <obd_class.h>
+
+#include "ptlrpc_internal.h"
/* refine later and change to seqlock or simlar from libcfs */
/* Debugging check only needed during development */
#include <lustre_net.h>
#include <lustre_sec.h>
+#include "ptlrpc_internal.h"
+
#define SEC_GC_INTERVAL (30 * 60)
l_wait_event(sec_gc_thread.t_ctl_waitq,
thread_is_stopped(&sec_gc_thread), &lwi);
}
-
struct proc_dir_entry *sptlrpc_proc_root = NULL;
EXPORT_SYMBOL(sptlrpc_proc_root);
-char *sec_flags2str(unsigned long flags, char *buf, int bufsize)
+static char *sec_flags2str(unsigned long flags, char *buf, int bufsize)
{
buf[0] = '\0';
sptlrpc_proc_root = NULL;
}
}
-
#include <lustre_net.h>
#include <lustre_sec.h>
+#include "ptlrpc_internal.h"
+
static struct ptlrpc_sec_policy null_policy;
static struct ptlrpc_sec null_sec;
static struct ptlrpc_cli_ctx null_cli_ctx;
#include <lustre_net.h>
#include <lustre_sec.h>
+#include "ptlrpc_internal.h"
+
struct plain_sec {
struct ptlrpc_sec pls_base;
rwlock_t pls_lock;
/** Used to protect the \e ptlrpc_all_services list */
struct mutex ptlrpc_all_services_mutex;
-struct ptlrpc_request_buffer_desc *
+static struct ptlrpc_request_buffer_desc *
ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt)
{
struct ptlrpc_service *svc = svcpt->scp_service;
return rqbd;
}
-void
+static void
ptlrpc_free_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
{
struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
OBD_FREE_PTR(rqbd);
}
-int
+static int
ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt, int post)
{
struct ptlrpc_service *svc = svcpt->scp_service;
* Right now, it just checks to make sure that requests aren't languishing
* in the queue. We'll use this health check to govern whether a node needs
* to be shot, so it's intentionally non-aggressive. */
-int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
+static int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
{
struct ptlrpc_request *request = NULL;
struct timeval right_now;
#include <lustre_net.h>
#include <lustre/lustre_lfsck_user.h>
#include <lustre_disk.h>
+
+#include "ptlrpc_internal.h"
+
void lustre_assert_wire_constants(void)
{
/* Wire protocol assertions generated by 'wirecheck'
LASSERTF((int)sizeof(((struct lfsck_reply *)0)->lr_padding_2) == 8, "found %lld\n",
(long long)(int)sizeof(((struct lfsck_reply *)0)->lr_padding_2));
}
-
* the following executing phase succeed in anyway, so these undo
* should be useless for most of the time in Phase I
*/
-int out_tx_create_undo(const struct lu_env *env, struct thandle *th,
- struct tx_arg *arg)
+static int out_tx_create_undo(const struct lu_env *env, struct thandle *th,
+ struct tx_arg *arg)
{
int rc;
return rc;
}
-int out_tx_create_exec(const struct lu_env *env, struct thandle *th,
- struct tx_arg *arg)
+static int out_tx_create_exec(const struct lu_env *env, struct thandle *th,
+ struct tx_arg *arg)
{
struct dt_object *dt_obj = arg->object;
int rc;
DEF_OUT_HNDL(OUT_WRITE, "out_write", MUTABOR | HABEO_REFERO, out_write),
};
-struct tgt_handler *out_handler_find(__u32 opc)
+static struct tgt_handler *out_handler_find(__u32 opc)
{
struct tgt_handler *h;
return rc;
}
-int out_tx_end(const struct lu_env *env, struct thandle_exec_args *ta,
- int declare_ret)
+static int out_tx_end(const struct lu_env *env, struct thandle_exec_args *ta,
+ int declare_ret)
{
struct tgt_session_info *tsi = tgt_ses_info(env);
int i;
* -ve: abort immediately with the given error code;
* 0: send reply with error code in req->rq_status;
*/
-int tgt_handle_recovery(struct ptlrpc_request *req, int reply_fail_id)
+static int tgt_handle_recovery(struct ptlrpc_request *req, int reply_fail_id)
{
ENTRY;
/*
* OBD_IDX_READ handler
*/
-int tgt_obd_idx_read(struct tgt_session_info *tsi)
+static int tgt_obd_idx_read(struct tgt_session_info *tsi)
{
struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env);
struct lu_rdpg *rdpg = &tti->tti_u.rdpg.tti_rdpg;
/* Ensure that data and metadata are synced to the disk when lock is cancelled
* (if requested) */
-int tgt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
- void *data, int flag)
+static int tgt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
+ void *data, int flag)
{
struct lu_env env;
struct lu_target *tgt;
RETURN(rc);
}
-struct ldlm_callback_suite tgt_dlm_cbs = {
+static struct ldlm_callback_suite tgt_dlm_cbs = {
.lcs_completion = ldlm_server_completion_ast,
.lcs_blocking = tgt_blocking_ast,
.lcs_glimpse = ldlm_server_glimpse_ast
* sec context handlers
*/
/* XXX: Implement based on mdt_sec_ctx_handle()? */
-int tgt_sec_ctx_handle(struct tgt_session_info *tsi)
+static int tgt_sec_ctx_handle(struct tgt_session_info *tsi)
{
return 0;
}
/**
* Update client data in last_rcvd
*/
-int tgt_client_data_update(const struct lu_env *env, struct obd_export *exp)
+static int tgt_client_data_update(const struct lu_env *env,
+ struct obd_export *exp)
{
struct tg_export_data *ted = &exp->exp_target_data;
struct lu_target *tgt = class_exp2tgt(exp);
}
EXPORT_SYMBOL(tgt_truncate_last_rcvd);
-void tgt_client_epoch_update(const struct lu_env *env, struct obd_export *exp)
+static void tgt_client_epoch_update(const struct lu_env *env,
+ struct obd_export *exp)
{
struct lsd_client_data *lcd = exp->exp_target_data.ted_lcd;
struct lu_target *tgt = class_exp2tgt(exp);
__u64 llcc_transno;
};
-void tgt_cb_last_committed(struct lu_env *env, struct thandle *th,
- struct dt_txn_commit_cb *cb, int err)
+static void tgt_cb_last_committed(struct lu_env *env, struct thandle *th,
+ struct dt_txn_commit_cb *cb, int err)
{
struct tgt_last_committed_callback *ccb;
struct obd_export *lncc_exp;
};
-void tgt_cb_new_client(struct lu_env *env, struct thandle *th,
- struct dt_txn_commit_cb *cb, int err)
+static void tgt_cb_new_client(struct lu_env *env, struct thandle *th,
+ struct dt_txn_commit_cb *cb, int err)
{
struct tgt_new_client_callback *ccb;
/*
* last_rcvd & last_committed update callbacks
*/
-int tgt_last_rcvd_update(const struct lu_env *env, struct lu_target *tgt,
- struct dt_object *obj, __u64 opdata,
- struct thandle *th, struct ptlrpc_request *req)
+static int tgt_last_rcvd_update(const struct lu_env *env, struct lu_target *tgt,
+ struct dt_object *obj, __u64 opdata,
+ struct thandle *th, struct ptlrpc_request *req)
{
struct tgt_thread_info *tti = tgt_th_info(env);
struct tg_export_data *ted;
* It updates last_rcvd client slot and version of object in
* simple way but with all locks to simulate all drawbacks
*/
-int tgt_last_rcvd_update_echo(const struct lu_env *env, struct lu_target *tgt,
- struct dt_object *obj, struct thandle *th,
- struct obd_export *exp)
+static int tgt_last_rcvd_update_echo(const struct lu_env *env,
+ struct lu_target *tgt,
+ struct dt_object *obj,
+ struct thandle *th,
+ struct obd_export *exp)
{
struct tgt_thread_info *tti = tgt_th_info(env);
struct tg_export_data *ted = &exp->exp_target_data;
RETURN(rc);
}
-int tgt_clients_data_init(const struct lu_env *env, struct lu_target *tgt,
- unsigned long last_size)
+static int tgt_clients_data_init(const struct lu_env *env,
+ struct lu_target *tgt,
+ unsigned long last_size)
{
struct obd_device *obd = tgt->lut_obd;
struct lr_server_data *lsd = &tgt->lut_lsd;
#include <obd.h>
#include "tgt_internal.h"
+#include "../ptlrpc/ptlrpc_internal.h"
int tgt_init(const struct lu_env *env, struct lu_target *lut,
struct obd_device *obd, struct dt_device *dt,