struct ptlrpc_request rqbd_req;
};
-typedef int (*svc_thr_init_t)(struct ptlrpc_thread *thread);
-typedef void (*svc_thr_done_t)(struct ptlrpc_thread *thread);
typedef int (*svc_handler_t)(struct ptlrpc_request *req);
-typedef int (*svc_hpreq_handler_t)(struct ptlrpc_request *);
-typedef void (*svc_req_printfn_t)(void *, struct ptlrpc_request *);
+
+struct ptlrpc_service_ops {
+ /**
+ * if non-NULL called during thread creation (ptlrpc_start_thread())
+ * to initialize service specific per-thread state.
+ */
+ int (*so_thr_init)(struct ptlrpc_thread *thr);
+ /**
+ * if non-NULL called during thread shutdown (ptlrpc_main()) to
+ * destruct state created by ->srv_init().
+ */
+ void (*so_thr_done)(struct ptlrpc_thread *thr);
+ /**
+ * Handler function for incoming requests for this service
+ */
+ int (*so_req_handler)(struct ptlrpc_request *req);
+ /**
+ * function to determine priority of the request, it's called
+ * on every new request
+ */
+ int (*so_hpreq_handler)(struct ptlrpc_request *);
+ /**
+ * service-specific print fn
+ */
+ void (*so_req_printer)(void *, struct ptlrpc_request *);
+};
#ifndef __cfs_cacheline_aligned
/* NB: put it here for reducing patche dependence */
/** most often accessed fields */
/** chain thru all services */
cfs_list_t srv_list;
+ /** service operations table */
+ struct ptlrpc_service_ops srv_ops;
/** only statically allocated strings here; we don't clean them */
char *srv_name;
/** only statically allocated strings here; we don't clean them */
/** # running threads */
int srv_threads_running;
- /** service operations, move to ptlrpc_svc_ops_t in the future */
- /** @{ */
- /**
- * if non-NULL called during thread creation (ptlrpc_start_thread())
- * to initialize service specific per-thread state.
- */
- svc_thr_init_t srv_init;
- /**
- * if non-NULL called during thread shutdown (ptlrpc_main()) to
- * destruct state created by ->srv_init().
- */
- svc_thr_done_t srv_done;
- /** Handler function for incoming requests for this service */
- svc_handler_t srv_handler;
- /** hp request handler */
- svc_hpreq_handler_t srv_hpreq_handler;
- /** service-specific print fn */
- svc_req_printfn_t srv_req_printfn;
- /** @} */
-
/** Root of /proc dir tree for this service */
cfs_proc_dir_entry_t *srv_procroot;
/** Pointer to statistic data for this service */
int ptlrpcd_queue_work(void *handler);
/** @} */
+struct ptlrpc_service_buf_conf {
+ /* nbufs is how many buffers to post */
+ unsigned int bc_nbufs;
+ /* buffer size to post */
+ unsigned int bc_buf_size;
+ /* portal to listed for requests on */
+ unsigned int bc_req_portal;
+ /* portal of where to send replies to */
+ unsigned int bc_rep_portal;
+ /* maximum request size to be accepted for this service */
+ unsigned int bc_req_max_size;
+ /* maximum reply size this service can ever send */
+ unsigned int bc_rep_max_size;
+};
+
+struct ptlrpc_service_thr_conf {
+ /* threadname should be 8 characters or less - 6 will be added on */
+ char *tc_thr_name;
+ /* min number of service threads to start */
+ unsigned int tc_nthrs_min;
+ /* max number of service threads to start */
+ unsigned int tc_nthrs_max;
+ /* set NUMA node affinity for service threads */
+ unsigned int tc_cpu_affinity;
+ /* Tags for lu_context associated with service thread */
+ __u32 tc_ctx_tags;
+};
struct ptlrpc_service_conf {
- int psc_nbufs;
- int psc_bufsize;
- int psc_max_req_size;
- int psc_max_reply_size;
- int psc_req_portal;
- int psc_rep_portal;
- int psc_watchdog_factor;
- int psc_min_threads;
- int psc_max_threads;
- __u32 psc_ctx_tags;
+ /* service name */
+ char *psc_name;
+ /* soft watchdog timeout multiplifier to print stuck service traces */
+ unsigned int psc_watchdog_factor;
+ /* buffer information */
+ struct ptlrpc_service_buf_conf psc_buf;
+ /* thread information */
+ struct ptlrpc_service_thr_conf psc_thr;
+ /* function table */
+ struct ptlrpc_service_ops psc_ops;
};
/* ptlrpc/service.c */
void ptlrpc_commit_replies(struct obd_export *exp);
void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs);
void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs);
-struct ptlrpc_service *ptlrpc_init_svc_conf(struct ptlrpc_service_conf *c,
- svc_handler_t h, char *name,
- struct proc_dir_entry *proc_entry,
- svc_req_printfn_t prntfn,
- char *threadname);
-
-struct ptlrpc_service *ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size,
- int max_reply_size,
- int req_portal, int rep_portal,
- int watchdog_factor,
- svc_handler_t, char *name,
- cfs_proc_dir_entry_t *proc_entry,
- svc_req_printfn_t,
- int min_threads, int max_threads,
- char *threadname, __u32 ctx_tags,
- svc_hpreq_handler_t);
+struct ptlrpc_service *ptlrpc_register_service(
+ struct ptlrpc_service_conf *conf,
+ struct proc_dir_entry *proc_entry);
void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
int ptlrpc_start_threads(struct ptlrpc_service *svc);
static int ldlm_setup(void)
{
- struct ldlm_bl_pool *blp;
+ static struct ptlrpc_service_conf conf;
+ struct ldlm_bl_pool *blp = NULL;
int rc = 0;
int ldlm_min_threads = LDLM_THREADS_AUTO_MIN;
int ldlm_max_threads = LDLM_THREADS_AUTO_MAX;
#ifdef LPROCFS
rc = ldlm_proc_setup();
if (rc != 0)
- GOTO(out_free, rc);
+ GOTO(out, rc);
#endif
#ifdef __KERNEL__
ldlm_min_threads = ldlm_max_threads = ldlm_num_threads;
}
#endif
-
- ldlm_state->ldlm_cb_service =
- ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
- LDLM_MAXREPSIZE, LDLM_CB_REQUEST_PORTAL,
- LDLM_CB_REPLY_PORTAL, 2,
- ldlm_callback_handler, "ldlm_cbd",
- ldlm_svc_proc_dir, NULL,
- ldlm_min_threads, ldlm_max_threads,
- "ldlm_cb",
- LCT_MD_THREAD|LCT_DT_THREAD, NULL);
-
- if (!ldlm_state->ldlm_cb_service) {
- CERROR("failed to start service\n");
- GOTO(out_proc, rc = -ENOMEM);
- }
+ memset(&conf, 0, sizeof(conf));
+ conf = (typeof(conf)) {
+ .psc_name = "ldlm_cbd",
+ .psc_watchdog_factor = 2,
+ .psc_buf = {
+ .bc_nbufs = LDLM_NBUFS,
+ .bc_buf_size = LDLM_BUFSIZE,
+ .bc_req_max_size = LDLM_MAXREQSIZE,
+ .bc_rep_max_size = LDLM_MAXREPSIZE,
+ .bc_req_portal = LDLM_CB_REQUEST_PORTAL,
+ .bc_rep_portal = LDLM_CB_REPLY_PORTAL,
+ },
+ .psc_thr = {
+ .tc_thr_name = "ldlm_cb",
+ .tc_nthrs_min = ldlm_min_threads,
+ .tc_nthrs_max = ldlm_max_threads,
+ .tc_ctx_tags = LCT_MD_THREAD | \
+ LCT_DT_THREAD,
+ },
+ .psc_ops = {
+ .so_req_handler = ldlm_callback_handler,
+ },
+ };
+ ldlm_state->ldlm_cb_service = \
+ ptlrpc_register_service(&conf, ldlm_svc_proc_dir);
+ if (IS_ERR(ldlm_state->ldlm_cb_service)) {
+ CERROR("failed to start service\n");
+ rc = PTR_ERR(ldlm_state->ldlm_cb_service);
+ ldlm_state->ldlm_cb_service = NULL;
+ GOTO(out, rc);
+ }
#ifdef HAVE_SERVER_SUPPORT
- ldlm_state->ldlm_cancel_service =
- ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
- LDLM_MAXREPSIZE, LDLM_CANCEL_REQUEST_PORTAL,
- LDLM_CANCEL_REPLY_PORTAL, 6,
- ldlm_cancel_handler, "ldlm_canceld",
- ldlm_svc_proc_dir, NULL,
- ldlm_min_threads, ldlm_max_threads,
- "ldlm_cn",
- LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD,
- ldlm_hpreq_handler);
-
- if (!ldlm_state->ldlm_cancel_service) {
- CERROR("failed to start service\n");
- GOTO(out_proc, rc = -ENOMEM);
- }
+ memset(&conf, 0, sizeof(conf));
+ conf = (typeof(conf)) {
+ .psc_name = "ldlm_canceld",
+ .psc_watchdog_factor = 6,
+ .psc_buf = {
+ .bc_nbufs = LDLM_NBUFS,
+ .bc_buf_size = LDLM_BUFSIZE,
+ .bc_req_max_size = LDLM_MAXREQSIZE,
+ .bc_rep_max_size = LDLM_MAXREPSIZE,
+ .bc_req_portal = LDLM_CANCEL_REQUEST_PORTAL,
+ .bc_rep_portal = LDLM_CANCEL_REPLY_PORTAL,
+
+ },
+ .psc_thr = {
+ .tc_thr_name = "ldlm_cn",
+ .tc_nthrs_min = ldlm_min_threads,
+ .tc_nthrs_max = ldlm_max_threads,
+ .tc_ctx_tags = LCT_MD_THREAD | \
+ LCT_DT_THREAD | \
+ LCT_CL_THREAD,
+ },
+ .psc_ops = {
+ .so_req_handler = ldlm_cancel_handler,
+ .so_hpreq_handler = ldlm_hpreq_handler,
+ },
+ };
+ ldlm_state->ldlm_cancel_service = \
+ ptlrpc_register_service(&conf, ldlm_svc_proc_dir);
+ if (IS_ERR(ldlm_state->ldlm_cancel_service)) {
+ CERROR("failed to start service\n");
+ rc = PTR_ERR(ldlm_state->ldlm_cancel_service);
+ ldlm_state->ldlm_cancel_service = NULL;
+ GOTO(out, rc);
+ }
#endif
- OBD_ALLOC(blp, sizeof(*blp));
- if (blp == NULL)
- GOTO(out_proc, rc = -ENOMEM);
+ OBD_ALLOC(blp, sizeof(*blp));
+ if (blp == NULL)
+ GOTO(out, rc = -ENOMEM);
ldlm_state->ldlm_bl_pool = blp;
cfs_spin_lock_init(&blp->blp_lock);
for (i = 0; i < blp->blp_min_threads; i++) {
rc = ldlm_bl_thread_start(blp);
if (rc < 0)
- GOTO(out_thread, rc);
+ GOTO(out, rc);
}
-# ifdef HAVE_SERVER_SUPPORT
- rc = ptlrpc_start_threads(ldlm_state->ldlm_cancel_service);
- if (rc)
- GOTO(out_thread, rc);
-# endif
-
- rc = ptlrpc_start_threads(ldlm_state->ldlm_cb_service);
- if (rc)
- GOTO(out_thread, rc);
-
CFS_INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
expired_lock_thread.elt_state = ELT_STOPPED;
cfs_waitq_init(&expired_lock_thread.elt_waitq);
rc = cfs_create_thread(expired_lock_main, NULL, CFS_DAEMON_FLAGS);
if (rc < 0) {
CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
- GOTO(out_thread, rc);
+ GOTO(out, rc);
}
cfs_wait_event(expired_lock_thread.elt_waitq,
expired_lock_thread.elt_state == ELT_READY);
-#endif
-#ifdef __KERNEL__
rc = ldlm_pools_init();
if (rc)
- GOTO(out_thread, rc);
+ GOTO(out, rc);
#endif
RETURN(0);
-#ifdef __KERNEL__
- out_thread:
-# ifdef HAVE_SERVER_SUPPORT
- ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
-# endif
- ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
-#endif
-
- out_proc:
-#ifdef LPROCFS
- ldlm_proc_cleanup();
- out_free:
-#endif
- OBD_FREE(ldlm_state, sizeof(*ldlm_state));
- ldlm_state = NULL;
+ out:
+ ldlm_cleanup();
return rc;
}
static int ldlm_cleanup(void)
{
-#ifdef __KERNEL__
- struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
-#endif
ENTRY;
if (!cfs_list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
#ifdef __KERNEL__
ldlm_pools_fini();
-#endif
-#ifdef __KERNEL__
- while (cfs_atomic_read(&blp->blp_num_threads) > 0) {
- struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
+ if (ldlm_state->ldlm_bl_pool != NULL) {
+ struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
- cfs_init_completion(&blp->blp_comp);
+ while (cfs_atomic_read(&blp->blp_num_threads) > 0) {
+ struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
- cfs_spin_lock(&blp->blp_lock);
- cfs_list_add_tail(&blwi.blwi_entry, &blp->blp_list);
- cfs_waitq_signal(&blp->blp_waitq);
- cfs_spin_unlock(&blp->blp_lock);
+ cfs_init_completion(&blp->blp_comp);
- cfs_wait_for_completion(&blp->blp_comp);
- }
- OBD_FREE(blp, sizeof(*blp));
+ cfs_spin_lock(&blp->blp_lock);
+ cfs_list_add_tail(&blwi.blwi_entry, &blp->blp_list);
+ cfs_waitq_signal(&blp->blp_waitq);
+ cfs_spin_unlock(&blp->blp_lock);
- ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
-# ifdef HAVE_SERVER_SUPPORT
- ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
-# endif
- ldlm_proc_cleanup();
+ cfs_wait_for_completion(&blp->blp_comp);
+ }
- expired_lock_thread.elt_state = ELT_TERMINATE;
- cfs_waitq_signal(&expired_lock_thread.elt_waitq);
- cfs_wait_event(expired_lock_thread.elt_waitq,
- expired_lock_thread.elt_state == ELT_STOPPED);
-#else /* !__KERNEL__ */
- ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
+ OBD_FREE(blp, sizeof(*blp));
+ }
+#endif /* __KERNEL__ */
+
+ if (ldlm_state->ldlm_cb_service != NULL)
+ ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
# ifdef HAVE_SERVER_SUPPORT
- ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
+ if (ldlm_state->ldlm_cancel_service != NULL)
+ ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
# endif
+#ifdef __KERNEL__
+ ldlm_proc_cleanup();
+
+ if (expired_lock_thread.elt_state != ELT_STOPPED) {
+ expired_lock_thread.elt_state = ELT_TERMINATE;
+ cfs_waitq_signal(&expired_lock_thread.elt_waitq);
+ cfs_wait_event(expired_lock_thread.elt_waitq,
+ expired_lock_thread.elt_state == ELT_STOPPED);
+ }
#endif /* __KERNEL__ */
OBD_FREE(ldlm_state, sizeof(*ldlm_state));
static int mdt_start_ptlrpc_service(struct mdt_device *m)
{
- int rc;
static struct ptlrpc_service_conf conf;
cfs_proc_dir_entry_t *procfs_entry;
- ENTRY;
-
- procfs_entry = m->mdt_md_dev.md_lu_dev.ld_obd->obd_proc_entry;
-
- conf = (typeof(conf)) {
- .psc_nbufs = MDS_NBUFS,
- .psc_bufsize = MDS_BUFSIZE,
- .psc_max_req_size = MDS_MAXREQSIZE,
- .psc_max_reply_size = MDS_MAXREPSIZE,
- .psc_req_portal = MDS_REQUEST_PORTAL,
- .psc_rep_portal = MDC_REPLY_PORTAL,
- .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
- /*
- * We'd like to have a mechanism to set this on a per-device
- * basis, but alas...
- */
- .psc_min_threads = mdt_min_threads,
- .psc_max_threads = mdt_max_threads,
- .psc_ctx_tags = LCT_MD_THREAD
- };
-
- m->mdt_ldlm_client = &m->mdt_md_dev.md_lu_dev.ld_obd->obd_ldlm_client;
- ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
- "mdt_ldlm_client", m->mdt_ldlm_client);
-
- m->mdt_regular_service =
- ptlrpc_init_svc_conf(&conf, mdt_regular_handle, LUSTRE_MDT_NAME,
- procfs_entry, target_print_req,
- LUSTRE_MDT_NAME);
- if (m->mdt_regular_service == NULL)
- RETURN(-ENOMEM);
-
- rc = ptlrpc_start_threads(m->mdt_regular_service);
- if (rc)
- GOTO(err_mdt_svc, rc);
-
- /*
- * readpage service configuration. Parameters have to be adjusted,
- * ideally.
- */
- conf = (typeof(conf)) {
- .psc_nbufs = MDS_NBUFS,
- .psc_bufsize = MDS_BUFSIZE,
- .psc_max_req_size = MDS_MAXREQSIZE,
- .psc_max_reply_size = MDS_MAXREPSIZE,
- .psc_req_portal = MDS_READPAGE_PORTAL,
- .psc_rep_portal = MDC_REPLY_PORTAL,
- .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
- .psc_min_threads = mdt_min_threads,
- .psc_max_threads = mdt_max_threads,
- .psc_ctx_tags = LCT_MD_THREAD
- };
- m->mdt_readpage_service =
- ptlrpc_init_svc_conf(&conf, mdt_readpage_handle,
- LUSTRE_MDT_NAME "_readpage",
- procfs_entry, target_print_req,"mdt_rdpg");
+ int rc = 0;
+ ENTRY;
+
+ m->mdt_ldlm_client = &m->mdt_md_dev.md_lu_dev.ld_obd->obd_ldlm_client;
+ ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
+ "mdt_ldlm_client", m->mdt_ldlm_client);
+
+ procfs_entry = m->mdt_md_dev.md_lu_dev.ld_obd->obd_proc_entry;
+
+ conf = (typeof(conf)) {
+ .psc_name = LUSTRE_MDT_NAME,
+ .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
+ .psc_buf = {
+ .bc_nbufs = MDS_NBUFS,
+ .bc_buf_size = MDS_BUFSIZE,
+ .bc_req_max_size = MDS_MAXREQSIZE,
+ .bc_rep_max_size = MDS_MAXREPSIZE,
+ .bc_req_portal = MDS_REQUEST_PORTAL,
+ .bc_rep_portal = MDC_REPLY_PORTAL,
+ },
+ /*
+ * We'd like to have a mechanism to set this on a per-device
+ * basis, but alas...
+ */
+ .psc_thr = {
+ .tc_thr_name = LUSTRE_MDT_NAME,
+ .tc_nthrs_min = mdt_min_threads,
+ .tc_nthrs_max = mdt_max_threads,
+ .tc_ctx_tags = LCT_MD_THREAD,
+ },
+ .psc_ops = {
+ .so_req_handler = mdt_regular_handle,
+ .so_req_printer = target_print_req,
+ },
+ };
+ m->mdt_regular_service = ptlrpc_register_service(&conf, procfs_entry);
+ if (IS_ERR(m->mdt_regular_service)) {
+ rc = PTR_ERR(m->mdt_regular_service);
+ CERROR("failed to start regular mdt service: %d\n", rc);
+ m->mdt_regular_service = NULL;
+
+ RETURN(rc);
+ }
- if (m->mdt_readpage_service == NULL) {
- CERROR("failed to start readpage service\n");
- GOTO(err_mdt_svc, rc = -ENOMEM);
+ /*
+ * readpage service configuration. Parameters have to be adjusted,
+ * ideally.
+ */
+ memset(&conf, 0, sizeof(conf));
+ conf = (typeof(conf)) {
+ .psc_name = LUSTRE_MDT_NAME "_readpage",
+ .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
+ .psc_buf = {
+ .bc_nbufs = MDS_NBUFS,
+ .bc_buf_size = MDS_BUFSIZE,
+ .bc_req_max_size = MDS_MAXREQSIZE,
+ .bc_rep_max_size = MDS_MAXREPSIZE,
+ .bc_req_portal = MDS_READPAGE_PORTAL,
+ .bc_rep_portal = MDC_REPLY_PORTAL,
+ },
+ .psc_thr = {
+ .tc_thr_name = "mdt_rdpg",
+ .tc_nthrs_min = mdt_min_threads,
+ .tc_nthrs_max = mdt_max_threads,
+ .tc_ctx_tags = LCT_MD_THREAD,
+ },
+ .psc_ops = {
+ .so_req_handler = mdt_readpage_handle,
+ .so_req_printer = target_print_req,
+ },
+ };
+ m->mdt_readpage_service = ptlrpc_register_service(&conf, procfs_entry);
+ if (IS_ERR(m->mdt_readpage_service)) {
+ rc = PTR_ERR(m->mdt_readpage_service);
+ CERROR("failed to start readpage service: %d\n", rc);
+ m->mdt_readpage_service = NULL;
+
+ GOTO(err_mdt_svc, rc);
}
- rc = ptlrpc_start_threads(m->mdt_readpage_service);
-
/*
* setattr service configuration.
*
* preserve this portal for a certain time, it should be removed
* eventually. LU-617.
*/
- conf = (typeof(conf)) {
- .psc_nbufs = MDS_NBUFS,
- .psc_bufsize = MDS_BUFSIZE,
- .psc_max_req_size = MDS_MAXREQSIZE,
- .psc_max_reply_size = MDS_MAXREPSIZE,
- .psc_req_portal = MDS_SETATTR_PORTAL,
- .psc_rep_portal = MDC_REPLY_PORTAL,
- .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
- .psc_min_threads = mdt_min_threads,
- .psc_max_threads = mdt_max_threads,
- .psc_ctx_tags = LCT_MD_THREAD
- };
-
- m->mdt_setattr_service =
- ptlrpc_init_svc_conf(&conf, mdt_regular_handle,
- LUSTRE_MDT_NAME "_setattr",
- procfs_entry, target_print_req,"mdt_attr");
-
- if (!m->mdt_setattr_service) {
- CERROR("failed to start setattr service\n");
- GOTO(err_mdt_svc, rc = -ENOMEM);
- }
-
- rc = ptlrpc_start_threads(m->mdt_setattr_service);
- if (rc)
- GOTO(err_mdt_svc, rc);
-
- /*
- * sequence controller service configuration
- */
- conf = (typeof(conf)) {
- .psc_nbufs = MDS_NBUFS,
- .psc_bufsize = MDS_BUFSIZE,
- .psc_max_req_size = SEQ_MAXREQSIZE,
- .psc_max_reply_size = SEQ_MAXREPSIZE,
- .psc_req_portal = SEQ_CONTROLLER_PORTAL,
- .psc_rep_portal = MDC_REPLY_PORTAL,
- .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
- .psc_min_threads = mdt_min_threads,
- .psc_max_threads = mdt_max_threads,
- .psc_ctx_tags = LCT_MD_THREAD
- };
-
- m->mdt_mdsc_service =
- ptlrpc_init_svc_conf(&conf, mdt_mdsc_handle,
- LUSTRE_MDT_NAME"_mdsc",
- procfs_entry, target_print_req,"mdt_mdsc");
- if (!m->mdt_mdsc_service) {
- CERROR("failed to start seq controller service\n");
- GOTO(err_mdt_svc, rc = -ENOMEM);
- }
-
- rc = ptlrpc_start_threads(m->mdt_mdsc_service);
- if (rc)
- GOTO(err_mdt_svc, rc);
-
- /*
- * metadata sequence server service configuration
- */
- conf = (typeof(conf)) {
- .psc_nbufs = MDS_NBUFS,
- .psc_bufsize = MDS_BUFSIZE,
- .psc_max_req_size = SEQ_MAXREQSIZE,
- .psc_max_reply_size = SEQ_MAXREPSIZE,
- .psc_req_portal = SEQ_METADATA_PORTAL,
- .psc_rep_portal = MDC_REPLY_PORTAL,
- .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
- .psc_min_threads = mdt_min_threads,
- .psc_max_threads = mdt_max_threads,
- .psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD
- };
-
- m->mdt_mdss_service =
- ptlrpc_init_svc_conf(&conf, mdt_mdss_handle,
- LUSTRE_MDT_NAME"_mdss",
- procfs_entry, target_print_req,"mdt_mdss");
- if (!m->mdt_mdss_service) {
- CERROR("failed to start metadata seq server service\n");
- GOTO(err_mdt_svc, rc = -ENOMEM);
- }
-
- rc = ptlrpc_start_threads(m->mdt_mdss_service);
- if (rc)
- GOTO(err_mdt_svc, rc);
+ memset(&conf, 0, sizeof(conf));
+ conf = (typeof(conf)) {
+ .psc_name = LUSTRE_MDT_NAME "_setattr",
+ .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
+ .psc_buf = {
+ .bc_nbufs = MDS_NBUFS,
+ .bc_buf_size = MDS_BUFSIZE,
+ .bc_req_max_size = MDS_MAXREQSIZE,
+ .bc_rep_max_size = MDS_MAXREPSIZE,
+ .bc_req_portal = MDS_SETATTR_PORTAL,
+ .bc_rep_portal = MDC_REPLY_PORTAL,
+ },
+ .psc_thr = {
+ .tc_thr_name = "mdt_attr",
+ .tc_nthrs_min = mdt_min_threads,
+ .tc_nthrs_max = mdt_max_threads,
+ .tc_ctx_tags = LCT_MD_THREAD,
+ },
+ .psc_ops = {
+ .so_req_handler = mdt_regular_handle,
+ .so_req_printer = target_print_req,
+ },
+ };
+ m->mdt_setattr_service = ptlrpc_register_service(&conf, procfs_entry);
+ if (IS_ERR(m->mdt_setattr_service)) {
+ rc = PTR_ERR(m->mdt_setattr_service);
+ CERROR("failed to start setattr service: %d\n", rc);
+ m->mdt_setattr_service = NULL;
+
+ GOTO(err_mdt_svc, rc);
+ }
+ /*
+ * sequence controller service configuration
+ */
+ memset(&conf, 0, sizeof(conf));
+ conf = (typeof(conf)) {
+ .psc_name = LUSTRE_MDT_NAME "_mdsc",
+ .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
+ .psc_buf = {
+ .bc_nbufs = MDS_NBUFS,
+ .bc_buf_size = MDS_BUFSIZE,
+ .bc_req_max_size = SEQ_MAXREQSIZE,
+ .bc_rep_max_size = SEQ_MAXREPSIZE,
+ .bc_req_portal = SEQ_CONTROLLER_PORTAL,
+ .bc_rep_portal = MDC_REPLY_PORTAL,
+ },
+ .psc_thr = {
+ .tc_thr_name = "mdt_mdsc",
+ .tc_nthrs_min = mdt_min_threads,
+ .tc_nthrs_max = mdt_max_threads,
+ .tc_ctx_tags = LCT_MD_THREAD,
+ },
+ .psc_ops = {
+ .so_req_handler = mdt_mdsc_handle,
+ .so_req_printer = target_print_req,
+ },
+ };
+ m->mdt_mdsc_service = ptlrpc_register_service(&conf, procfs_entry);
+ if (IS_ERR(m->mdt_mdsc_service)) {
+ rc = PTR_ERR(m->mdt_mdsc_service);
+ CERROR("failed to start seq controller service: %d\n", rc);
+ m->mdt_mdsc_service = NULL;
+
+ GOTO(err_mdt_svc, rc);
+ }
- /*
- * Data sequence server service configuration. We want to have really
- * cluster-wide sequences space. This is why we start only one sequence
- * controller which manages space.
- */
- conf = (typeof(conf)) {
- .psc_nbufs = MDS_NBUFS,
- .psc_bufsize = MDS_BUFSIZE,
- .psc_max_req_size = SEQ_MAXREQSIZE,
- .psc_max_reply_size = SEQ_MAXREPSIZE,
- .psc_req_portal = SEQ_DATA_PORTAL,
- .psc_rep_portal = OSC_REPLY_PORTAL,
- .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
- .psc_min_threads = mdt_min_threads,
- .psc_max_threads = mdt_max_threads,
- .psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD
+ /*
+ * metadata sequence server service configuration
+ */
+ memset(&conf, 0, sizeof(conf));
+ conf = (typeof(conf)) {
+ .psc_name = LUSTRE_MDT_NAME "_mdss",
+ .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
+ .psc_buf = {
+ .bc_nbufs = MDS_NBUFS,
+ .bc_buf_size = MDS_BUFSIZE,
+ .bc_req_max_size = SEQ_MAXREQSIZE,
+ .bc_rep_max_size = SEQ_MAXREPSIZE,
+ .bc_req_portal = SEQ_METADATA_PORTAL,
+ .bc_rep_portal = MDC_REPLY_PORTAL,
+ },
+ .psc_thr = {
+ .tc_thr_name = "mdt_mdss",
+ .tc_nthrs_min = mdt_min_threads,
+ .tc_nthrs_max = mdt_max_threads,
+ .tc_ctx_tags = LCT_MD_THREAD | \
+ LCT_DT_THREAD,
+ },
+ .psc_ops = {
+ .so_req_handler = mdt_mdss_handle,
+ .so_req_printer = target_print_req,
+ },
};
+ m->mdt_mdss_service = ptlrpc_register_service(&conf, procfs_entry);
+ if (IS_ERR(m->mdt_mdss_service)) {
+ rc = PTR_ERR(m->mdt_mdss_service);
+ CERROR("failed to start metadata seq server service: %d\n", rc);
+ m->mdt_mdss_service = NULL;
- m->mdt_dtss_service =
- ptlrpc_init_svc_conf(&conf, mdt_dtss_handle,
- LUSTRE_MDT_NAME"_dtss",
- procfs_entry, target_print_req,"mdt_dtss");
- if (!m->mdt_dtss_service) {
- CERROR("failed to start data seq server service\n");
- GOTO(err_mdt_svc, rc = -ENOMEM);
- }
+ GOTO(err_mdt_svc, rc);
+ }
- rc = ptlrpc_start_threads(m->mdt_dtss_service);
- if (rc)
- GOTO(err_mdt_svc, rc);
-
- /* FLD service start */
- conf = (typeof(conf)) {
- .psc_nbufs = MDS_NBUFS,
- .psc_bufsize = MDS_BUFSIZE,
- .psc_max_req_size = FLD_MAXREQSIZE,
- .psc_max_reply_size = FLD_MAXREPSIZE,
- .psc_req_portal = FLD_REQUEST_PORTAL,
- .psc_rep_portal = MDC_REPLY_PORTAL,
- .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
- .psc_min_threads = mdt_min_threads,
- .psc_max_threads = mdt_max_threads,
- .psc_ctx_tags = LCT_DT_THREAD|LCT_MD_THREAD
+ /*
+ * Data sequence server service configuration. We want to have really
+ * cluster-wide sequences space. This is why we start only one sequence
+ * controller which manages space.
+ */
+ memset(&conf, 0, sizeof(conf));
+ conf = (typeof(conf)) {
+ .psc_name = LUSTRE_MDT_NAME "_dtss",
+ .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
+ .psc_buf = {
+ .bc_nbufs = MDS_NBUFS,
+ .bc_buf_size = MDS_BUFSIZE,
+ .bc_req_max_size = SEQ_MAXREQSIZE,
+ .bc_rep_max_size = SEQ_MAXREPSIZE,
+ .bc_req_portal = SEQ_DATA_PORTAL,
+ .bc_rep_portal = OSC_REPLY_PORTAL,
+ },
+ .psc_thr = {
+ .tc_thr_name = "mdt_dtss",
+ .tc_nthrs_min = mdt_min_threads,
+ .tc_nthrs_max = mdt_max_threads,
+ .tc_ctx_tags = LCT_MD_THREAD | \
+ LCT_DT_THREAD,
+ },
+ .psc_ops = {
+ .so_req_handler = mdt_dtss_handle,
+ .so_req_printer = target_print_req,
+ },
};
+ m->mdt_dtss_service = ptlrpc_register_service(&conf, procfs_entry);
+ if (IS_ERR(m->mdt_dtss_service)) {
+ rc = PTR_ERR(m->mdt_dtss_service);
+ CERROR("failed to start data seq server service: %d\n", rc);
+ m->mdt_dtss_service = NULL;
- m->mdt_fld_service =
- ptlrpc_init_svc_conf(&conf, mdt_fld_handle,
- LUSTRE_MDT_NAME"_fld",
- procfs_entry, target_print_req, "mdt_fld");
- if (!m->mdt_fld_service) {
- CERROR("failed to start fld service\n");
- GOTO(err_mdt_svc, rc = -ENOMEM);
- }
-
- rc = ptlrpc_start_threads(m->mdt_fld_service);
- if (rc)
- GOTO(err_mdt_svc, rc);
+ GOTO(err_mdt_svc, rc);
+ }
- /*
- * mds-mds service configuration. Separate portal is used to allow
- * mds-mds requests be not blocked during recovery.
- */
- conf = (typeof(conf)) {
- .psc_nbufs = MDS_NBUFS,
- .psc_bufsize = MDS_BUFSIZE,
- .psc_max_req_size = MDS_MAXREQSIZE,
- .psc_max_reply_size = MDS_MAXREPSIZE,
- .psc_req_portal = MDS_MDS_PORTAL,
- .psc_rep_portal = MDC_REPLY_PORTAL,
+ /* FLD service start */
+ memset(&conf, 0, sizeof(conf));
+ conf = (typeof(conf)) {
+ .psc_name = LUSTRE_MDT_NAME "_fld",
.psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
- .psc_min_threads = mdt_min_threads,
- .psc_max_threads = mdt_max_threads,
- .psc_ctx_tags = LCT_MD_THREAD
- };
- m->mdt_xmds_service =
- ptlrpc_init_svc_conf(&conf, mdt_xmds_handle,
- LUSTRE_MDT_NAME "_mds",
- procfs_entry, target_print_req,"mdt_xmds");
+ .psc_buf = {
+ .bc_nbufs = MDS_NBUFS,
+ .bc_buf_size = MDS_BUFSIZE,
+ .bc_req_max_size = FLD_MAXREQSIZE,
+ .bc_rep_max_size = FLD_MAXREPSIZE,
+ .bc_req_portal = FLD_REQUEST_PORTAL,
+ .bc_rep_portal = MDC_REPLY_PORTAL,
+ },
+ .psc_thr = {
+ .tc_thr_name = "mdt_fld",
+ .tc_nthrs_min = mdt_min_threads,
+ .tc_nthrs_max = mdt_max_threads,
+ .tc_ctx_tags = LCT_DT_THREAD | \
+ LCT_MD_THREAD,
+ },
+ .psc_ops = {
+ .so_req_handler = mdt_fld_handle,
+ .so_req_printer = target_print_req,
+ },
+ };
+ m->mdt_fld_service = ptlrpc_register_service(&conf, procfs_entry);
+ if (IS_ERR(m->mdt_fld_service)) {
+ rc = PTR_ERR(m->mdt_fld_service);
+ CERROR("failed to start fld service: %d\n", rc);
+ m->mdt_fld_service = NULL;
+
+ GOTO(err_mdt_svc, rc);
+ }
- if (m->mdt_xmds_service == NULL) {
- CERROR("failed to start xmds service\n");
- GOTO(err_mdt_svc, rc = -ENOMEM);
+ /*
+ * mds-mds service configuration. Separate portal is used to allow
+ * mds-mds requests be not blocked during recovery.
+ */
+ memset(&conf, 0, sizeof(conf));
+ conf = (typeof(conf)) {
+ .psc_name = LUSTRE_MDT_NAME "_mds",
+ .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
+ .psc_buf = {
+ .bc_nbufs = MDS_NBUFS,
+ .bc_buf_size = MDS_BUFSIZE,
+ .bc_req_max_size = MDS_MAXREQSIZE,
+ .bc_rep_max_size = MDS_MAXREPSIZE,
+ .bc_req_portal = MDS_MDS_PORTAL,
+ .bc_rep_portal = MDC_REPLY_PORTAL,
+ },
+ .psc_thr = {
+ .tc_thr_name = "mdt_mds",
+ .tc_nthrs_min = mdt_min_threads,
+ .tc_nthrs_max = mdt_max_threads,
+ .tc_ctx_tags = LCT_MD_THREAD,
+ },
+ .psc_ops = {
+ .so_req_handler = mdt_xmds_handle,
+ .so_req_printer = target_print_req,
+ },
+ };
+ m->mdt_xmds_service = ptlrpc_register_service(&conf, procfs_entry);
+ if (IS_ERR(m->mdt_xmds_service)) {
+ rc = PTR_ERR(m->mdt_xmds_service);
+ CERROR("failed to start xmds service: %d\n", rc);
+ m->mdt_xmds_service = NULL;
+
+ GOTO(err_mdt_svc, rc);
}
- rc = ptlrpc_start_threads(m->mdt_xmds_service);
- if (rc)
- GOTO(err_mdt_svc, rc);
-
EXIT;
err_mdt_svc:
if (rc)
/* Start the MGS obd */
static int mgs_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
{
+ static struct ptlrpc_service_conf conf;
struct lprocfs_static_vars lvars;
struct mgs_obd *mgs = &obd->u.mgs;
struct lustre_mount_info *lmi;
GOTO(err_llog, rc);
}
+ conf = (typeof(conf)) {
+ .psc_name = LUSTRE_MGS_NAME,
+ .psc_watchdog_factor = MGS_SERVICE_WATCHDOG_FACTOR,
+ .psc_buf = {
+ .bc_nbufs = MGS_NBUFS,
+ .bc_buf_size = MGS_BUFSIZE,
+ .bc_req_max_size = MGS_MAXREQSIZE,
+ .bc_rep_max_size = MGS_MAXREPSIZE,
+ .bc_req_portal = MGS_REQUEST_PORTAL,
+ .bc_rep_portal = MGC_REPLY_PORTAL,
+ },
+ .psc_thr = {
+ .tc_thr_name = "ll_mgs",
+ .tc_nthrs_min = MGS_THREADS_AUTO_MIN,
+ .tc_nthrs_max = MGS_THREADS_AUTO_MAX,
+ .tc_ctx_tags = LCT_MD_THREAD,
+ },
+ .psc_ops = {
+ .so_req_handler = mgs_handle,
+ .so_req_printer = target_print_req,
+ },
+ };
/* Start the service threads */
- mgs->mgs_service =
- ptlrpc_init_svc(MGS_NBUFS, MGS_BUFSIZE, MGS_MAXREQSIZE,
- MGS_MAXREPSIZE, MGS_REQUEST_PORTAL,
- MGC_REPLY_PORTAL, 2,
- mgs_handle, LUSTRE_MGS_NAME,
- obd->obd_proc_entry, target_print_req,
- MGS_THREADS_AUTO_MIN, MGS_THREADS_AUTO_MAX,
- "ll_mgs", LCT_MD_THREAD, NULL);
-
- if (!mgs->mgs_service) {
- CERROR("failed to start service\n");
- GOTO(err_llog, rc = -ENOMEM);
+ mgs->mgs_service = ptlrpc_register_service(&conf, obd->obd_proc_entry);
+ if (IS_ERR(mgs->mgs_service)) {
+ rc = PTR_ERR(mgs->mgs_service);
+ CERROR("failed to start service: %d\n", rc);
+ GOTO(err_llog, rc);
}
- rc = ptlrpc_start_threads(mgs->mgs_service);
- if (rc)
- GOTO(err_thread, rc);
-
ping_evictor_start();
CDEBUG(D_INFO, "MGS %s started\n", obd->obd_name);
RETURN(0);
-err_thread:
- ptlrpc_unregister_service(mgs->mgs_service);
err_llog:
lproc_mgs_cleanup(obd);
obd_llog_finish(obd, 0);
#define MGSSELF_NAME "_mgs"
+#define MGS_SERVICE_WATCHDOG_FACTOR 2
+
/* -- imperative recovery control data structures -- */
/**
* restarting targets.
/* Sigh - really, this is an OSS, the _server_, not the _target_ */
static int ost_setup(struct obd_device *obd, struct lustre_cfg* lcfg)
{
+ static struct ptlrpc_service_conf svc_conf;
struct ost_obd *ost = &obd->u.ost;
struct lprocfs_static_vars lvars;
int oss_min_threads;
oss_max_threads = min(OSS_THREADS_MAX, oss_min_threads * 4 + 1);
}
- ost->ost_service =
- ptlrpc_init_svc(OST_NBUFS, OST_BUFSIZE, OST_MAXREQSIZE,
- OST_MAXREPSIZE, OST_REQUEST_PORTAL,
- OSC_REPLY_PORTAL, OSS_SERVICE_WATCHDOG_FACTOR,
- ost_handle, LUSTRE_OSS_NAME,
- obd->obd_proc_entry, target_print_req,
- oss_min_threads, oss_max_threads,
- "ll_ost", LCT_DT_THREAD, NULL);
- if (ost->ost_service == NULL) {
- CERROR("failed to start service\n");
- GOTO(out_lprocfs, rc = -ENOMEM);
+ svc_conf = (typeof(svc_conf)) {
+ .psc_name = LUSTRE_OSS_NAME,
+ .psc_watchdog_factor = OSS_SERVICE_WATCHDOG_FACTOR,
+ .psc_buf = {
+ .bc_nbufs = OST_NBUFS,
+ .bc_buf_size = OST_BUFSIZE,
+ .bc_req_max_size = OST_MAXREQSIZE,
+ .bc_rep_max_size = OST_MAXREPSIZE,
+ .bc_req_portal = OST_REQUEST_PORTAL,
+ .bc_rep_portal = OSC_REPLY_PORTAL,
+ },
+ .psc_thr = {
+ .tc_thr_name = "ll_ost",
+ .tc_nthrs_min = oss_min_threads,
+ .tc_nthrs_max = oss_max_threads,
+ .tc_ctx_tags = LCT_DT_THREAD,
+ },
+ .psc_ops = {
+ .so_req_handler = ost_handle,
+ .so_req_printer = target_print_req,
+ },
+ };
+ ost->ost_service = ptlrpc_register_service(&svc_conf,
+ obd->obd_proc_entry);
+ if (IS_ERR(ost->ost_service)) {
+ rc = PTR_ERR(ost->ost_service);
+ CERROR("failed to start service: %d\n", rc);
+ GOTO(out_lprocfs, rc);
}
- rc = ptlrpc_start_threads(ost->ost_service);
- if (rc)
- GOTO(out_service, rc = -EINVAL);
-
if (oss_num_create_threads) {
if (oss_num_create_threads > OSS_MAX_CREATE_THREADS)
oss_num_create_threads = OSS_MAX_CREATE_THREADS;
oss_max_create_threads = OSS_MAX_CREATE_THREADS;
}
- ost->ost_create_service =
- ptlrpc_init_svc(OST_NBUFS, OST_BUFSIZE, OST_MAXREQSIZE,
- OST_MAXREPSIZE, OST_CREATE_PORTAL,
- OSC_REPLY_PORTAL, OSS_SERVICE_WATCHDOG_FACTOR,
- ost_handle, "ost_create",
- obd->obd_proc_entry, target_print_req,
- oss_min_create_threads, oss_max_create_threads,
- "ll_ost_creat", LCT_DT_THREAD, NULL);
- if (ost->ost_create_service == NULL) {
- CERROR("failed to start OST create service\n");
- GOTO(out_service, rc = -ENOMEM);
+ memset(&svc_conf, 0, sizeof(svc_conf));
+ svc_conf = (typeof(svc_conf)) {
+ .psc_name = "ost_create",
+ .psc_watchdog_factor = OSS_SERVICE_WATCHDOG_FACTOR,
+ .psc_buf = {
+ .bc_nbufs = OST_NBUFS,
+ .bc_buf_size = OST_BUFSIZE,
+ .bc_req_max_size = OST_MAXREQSIZE,
+ .bc_rep_max_size = OST_MAXREPSIZE,
+ .bc_req_portal = OST_CREATE_PORTAL,
+ .bc_rep_portal = OSC_REPLY_PORTAL,
+ },
+ .psc_thr = {
+ .tc_thr_name = "ll_ost_create",
+ .tc_nthrs_min = oss_min_create_threads,
+ .tc_nthrs_max = oss_max_create_threads,
+ .tc_ctx_tags = LCT_DT_THREAD,
+ },
+ .psc_ops = {
+ .so_req_handler = ost_handle,
+ .so_req_printer = target_print_req,
+ },
+ };
+ ost->ost_create_service = ptlrpc_register_service(&svc_conf,
+ obd->obd_proc_entry);
+ if (IS_ERR(ost->ost_create_service)) {
+ rc = PTR_ERR(ost->ost_create_service);
+ CERROR("failed to start OST create service: %d\n", rc);
+ GOTO(out_service, rc);
}
- rc = ptlrpc_start_threads(ost->ost_create_service);
- if (rc)
- GOTO(out_create, rc = -EINVAL);
-
- ost->ost_io_service =
- ptlrpc_init_svc(OST_NBUFS, OST_BUFSIZE, OST_MAXREQSIZE,
- OST_MAXREPSIZE, OST_IO_PORTAL,
- OSC_REPLY_PORTAL, OSS_SERVICE_WATCHDOG_FACTOR,
- ost_handle, "ost_io",
- obd->obd_proc_entry, target_print_req,
- oss_min_threads, oss_max_threads,
- "ll_ost_io", LCT_DT_THREAD, ost_hpreq_handler);
- if (ost->ost_io_service == NULL) {
- CERROR("failed to start OST I/O service\n");
- GOTO(out_create, rc = -ENOMEM);
+ memset(&svc_conf, 0, sizeof(svc_conf));
+ svc_conf = (typeof(svc_conf)) {
+ .psc_name = "ost_io",
+ .psc_watchdog_factor = OSS_SERVICE_WATCHDOG_FACTOR,
+ .psc_buf = {
+ .bc_nbufs = OST_NBUFS,
+ .bc_buf_size = OST_BUFSIZE,
+ .bc_req_max_size = OST_MAXREQSIZE,
+ .bc_rep_max_size = OST_MAXREPSIZE,
+ .bc_req_portal = OST_IO_PORTAL,
+ .bc_rep_portal = OSC_REPLY_PORTAL,
+ },
+ .psc_thr = {
+ .tc_thr_name = "ll_ost_io",
+ .tc_nthrs_min = oss_min_threads,
+ .tc_nthrs_max = oss_max_threads,
+ .tc_cpu_affinity = 1,
+ .tc_ctx_tags = LCT_DT_THREAD,
+ },
+ .psc_ops = {
+ .so_thr_init = ost_thread_init,
+ .so_thr_done = ost_thread_done,
+ .so_req_handler = ost_handle,
+ .so_hpreq_handler = ost_hpreq_handler,
+ .so_req_printer = target_print_req,
+ },
+ };
+ ost->ost_io_service = ptlrpc_register_service(&svc_conf,
+ obd->obd_proc_entry);
+ if (IS_ERR(ost->ost_io_service)) {
+ rc = PTR_ERR(ost->ost_io_service);
+ CERROR("failed to start OST I/O service: %d\n", rc);
+ ost->ost_io_service = NULL;
+ GOTO(out_create, rc);
}
- ost->ost_io_service->srv_init = ost_thread_init;
- ost->ost_io_service->srv_done = ost_thread_done;
- ost->ost_io_service->srv_cpu_affinity = 1;
- rc = ptlrpc_start_threads(ost->ost_io_service);
- if (rc)
- GOTO(out_io, rc = -EINVAL);
-
ping_evictor_start();
RETURN(0);
-out_io:
- ptlrpc_unregister_service(ost->ost_io_service);
- ost->ost_io_service = NULL;
out_create:
ptlrpc_unregister_service(ost->ost_create_service);
ost->ost_create_service = NULL;
ptlrpc_unregister_service(ost->ost_io_service);
ost->ost_service = NULL;
ost->ost_create_service = NULL;
+ ost->ost_io_service = NULL;
+
cfs_mutex_unlock(&ost->ost_health_mutex);
lprocfs_obd_cleanup(obd);
return srhi;
}
-/* common ost/mdt srv_req_printfn */
+/* common ost/mdt so_req_printer */
void target_print_req(void *seq_file, struct ptlrpc_request *req)
{
/* Called holding srv_lock with irqs disabled.
req->rq_arrival_time.tv_sec,
req->rq_sent - req->rq_arrival_time.tv_sec,
req->rq_sent - req->rq_deadline);
- if (svc->srv_req_printfn == NULL)
- seq_printf(s, "\n");
- else
- svc->srv_req_printfn(s, srhi->srhi_req);
+ if (svc->srv_ops.so_req_printer == NULL)
+ seq_printf(s, "\n");
+ else
+ svc->srv_ops.so_req_printer(s, srhi->srhi_req);
}
cfs_spin_unlock(&svc->srv_lock);
EXPORT_SYMBOL(ptlrpc_save_lock);
EXPORT_SYMBOL(ptlrpc_schedule_difficult_reply);
EXPORT_SYMBOL(ptlrpc_commit_replies);
-EXPORT_SYMBOL(ptlrpc_init_svc);
+EXPORT_SYMBOL(ptlrpc_register_service);
EXPORT_SYMBOL(ptlrpc_stop_all_threads);
EXPORT_SYMBOL(ptlrpc_start_threads);
EXPORT_SYMBOL(ptlrpc_start_thread);
return (-1);
}
-/**
- * Start a service with parameters from struct ptlrpc_service_conf \a c
- * as opposed to directly calling ptlrpc_init_svc with tons of arguments.
- */
-struct ptlrpc_service *ptlrpc_init_svc_conf(struct ptlrpc_service_conf *c,
- svc_handler_t h, char *name,
- struct proc_dir_entry *proc_entry,
- svc_req_printfn_t prntfn,
- char *threadname)
-{
- return ptlrpc_init_svc(c->psc_nbufs, c->psc_bufsize,
- c->psc_max_req_size, c->psc_max_reply_size,
- c->psc_req_portal, c->psc_rep_portal,
- c->psc_watchdog_factor,
- h, name, proc_entry,
- prntfn, c->psc_min_threads, c->psc_max_threads,
- threadname, c->psc_ctx_tags, NULL);
-}
-EXPORT_SYMBOL(ptlrpc_init_svc_conf);
-
static void ptlrpc_at_timer(unsigned long castmeharder)
{
struct ptlrpc_service *svc = (struct ptlrpc_service *)castmeharder;
* Initialize service on a given portal.
* This includes starting serving threads , allocating and posting rqbds and
* so on.
- * \a nbufs is how many buffers to post
- * \a bufsize is buffer size to post
- * \a max_req_size - maximum request size to be accepted for this service
- * \a max_reply_size maximum reply size this service can ever send
- * \a req_portal - portal to listed for requests on
- * \a rep_portal - portal of where to send replies to
- * \a watchdog_factor soft watchdog timeout multiplifier to print stuck service traces.
- * \a handler - function to process every new request
- * \a name - service name
- * \a proc_entry - entry in the /proc tree for sttistics reporting
- * \a min_threads \a max_threads - min/max number of service threads to start.
- * \a threadname should be 11 characters or less - 3 will be added on
- * \a hp_handler - function to determine priority of the request, also called
- * on every new request.
*/
struct ptlrpc_service *
-ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size, int max_reply_size,
- int req_portal, int rep_portal, int watchdog_factor,
- svc_handler_t handler, char *name,
- cfs_proc_dir_entry_t *proc_entry,
- svc_req_printfn_t svcreq_printfn,
- int min_threads, int max_threads,
- char *threadname, __u32 ctx_tags,
- svc_hpreq_handler_t hp_handler)
-{
- int rc;
- struct ptlrpc_at_array *array;
- struct ptlrpc_service *service;
- unsigned int size, index;
- ENTRY;
+ptlrpc_register_service(struct ptlrpc_service_conf *conf,
+ cfs_proc_dir_entry_t *proc_entry)
+{
+ struct ptlrpc_service *service;
+ struct ptlrpc_at_array *array;
+ unsigned int index;
+ unsigned int size;
+ int rc;
+ ENTRY;
- LASSERT (nbufs > 0);
- LASSERT (bufsize >= max_req_size + SPTLRPC_MAX_PAYLOAD);
- LASSERT (ctx_tags != 0);
+ LASSERT(conf->psc_buf.bc_nbufs > 0);
+ LASSERT(conf->psc_buf.bc_buf_size >=
+ conf->psc_buf.bc_req_max_size + SPTLRPC_MAX_PAYLOAD);
+ LASSERT(conf->psc_thr.tc_ctx_tags != 0);
- OBD_ALLOC_PTR(service);
- if (service == NULL)
- RETURN(NULL);
+ OBD_ALLOC_PTR(service);
+ if (service == NULL)
+ RETURN(ERR_PTR(-ENOMEM));
/* First initialise enough for early teardown */
- service->srv_name = name;
cfs_spin_lock_init(&service->srv_lock);
cfs_spin_lock_init(&service->srv_rq_lock);
cfs_spin_lock_init(&service->srv_rs_lock);
CFS_INIT_LIST_HEAD(&service->srv_threads);
cfs_waitq_init(&service->srv_waitq);
- service->srv_nbuf_per_group = test_req_buffer_pressure ? 1 : nbufs;
- service->srv_max_req_size = max_req_size + SPTLRPC_MAX_PAYLOAD;
- service->srv_buf_size = bufsize;
- service->srv_rep_portal = rep_portal;
- service->srv_req_portal = req_portal;
- service->srv_watchdog_factor = watchdog_factor;
- service->srv_handler = handler;
- service->srv_req_printfn = svcreq_printfn;
- service->srv_request_seq = 1; /* valid seq #s start at 1 */
- service->srv_request_max_cull_seq = 0;
- service->srv_threads_min = min_threads;
- service->srv_threads_max = max_threads;
- service->srv_thread_name = threadname;
- service->srv_ctx_tags = ctx_tags;
- service->srv_hpreq_handler = hp_handler;
- service->srv_hpreq_ratio = PTLRPC_SVC_HP_RATIO;
- service->srv_hpreq_count = 0;
- service->srv_n_active_hpreq = 0;
+ service->srv_name = conf->psc_name;
+ service->srv_watchdog_factor = conf->psc_watchdog_factor;
+ service->srv_nbuf_per_group = test_req_buffer_pressure ?
+ 1 : conf->psc_buf.bc_nbufs;
+ service->srv_max_req_size = conf->psc_buf.bc_req_max_size +
+ SPTLRPC_MAX_PAYLOAD;
+ service->srv_buf_size = conf->psc_buf.bc_buf_size;
+ service->srv_rep_portal = conf->psc_buf.bc_rep_portal;
+ service->srv_req_portal = conf->psc_buf.bc_req_portal;
+ service->srv_request_seq = 1; /* valid seq #s start at 1 */
+ service->srv_request_max_cull_seq = 0;
+ service->srv_threads_min = conf->psc_thr.tc_nthrs_min;
+ service->srv_threads_max = conf->psc_thr.tc_nthrs_max;
+ service->srv_thread_name = conf->psc_thr.tc_thr_name;
+ service->srv_ctx_tags = conf->psc_thr.tc_ctx_tags;
+ service->srv_cpu_affinity = !!conf->psc_thr.tc_cpu_affinity;
+ service->srv_hpreq_ratio = PTLRPC_SVC_HP_RATIO;
+ service->srv_hpreq_count = 0;
+ service->srv_n_active_hpreq = 0;
+ service->srv_ops = conf->psc_ops;
rc = LNetSetLazyPortal(service->srv_req_portal);
LASSERT (rc == 0);
/* allocate memory for srv_at_array (ptlrpc_at_array) */
OBD_ALLOC(array->paa_reqs_array, sizeof(cfs_list_t) * size);
if (array->paa_reqs_array == NULL)
- GOTO(failed, NULL);
+ GOTO(failed, rc = -ENOMEM);
- for (index = 0; index < size; index++)
- CFS_INIT_LIST_HEAD(&array->paa_reqs_array[index]);
+ for (index = 0; index < size; index++)
+ CFS_INIT_LIST_HEAD(&array->paa_reqs_array[index]);
- OBD_ALLOC(array->paa_reqs_count, sizeof(__u32) * size);
- if (array->paa_reqs_count == NULL)
- GOTO(failed, NULL);
+ OBD_ALLOC(array->paa_reqs_count, sizeof(__u32) * size);
+ if (array->paa_reqs_count == NULL)
+ GOTO(failed, rc = -ENOMEM);
cfs_timer_init(&service->srv_at_timer, ptlrpc_at_timer, service);
/* At SOW, service time should be quick; 10s seems generous. If client
/* We shouldn't be under memory pressure at startup, so
* fail if we can't post all our buffers at this time. */
if (rc != 0)
- GOTO(failed, NULL);
+ GOTO(failed, rc = -ENOMEM);
/* Now allocate pool of reply buffers */
/* Increase max reply size to next power of two */
service->srv_max_reply_size = 1;
while (service->srv_max_reply_size <
- max_reply_size + SPTLRPC_MAX_PAYLOAD)
+ conf->psc_buf.bc_rep_max_size + SPTLRPC_MAX_PAYLOAD)
service->srv_max_reply_size <<= 1;
if (proc_entry != NULL)
CDEBUG(D_NET, "%s: Started, listening on portal %d\n",
service->srv_name, service->srv_req_portal);
- RETURN(service);
+#ifdef __KERNEL__
+ rc = ptlrpc_start_threads(service);
+ if (rc != 0) {
+ CERROR("Failed to start threads for service %s: %d\n",
+ service->srv_name, rc);
+ GOTO(failed, rc);
+ }
+#endif
+
+ RETURN(service);
failed:
- ptlrpc_unregister_service(service);
- return NULL;
+ ptlrpc_unregister_service(service);
+ RETURN(ERR_PTR(rc));
}
/**
int rc = 0;
ENTRY;
- if (svc->srv_hpreq_handler) {
- rc = svc->srv_hpreq_handler(req);
+ if (svc->srv_ops.so_hpreq_handler) {
+ rc = svc->srv_ops.so_hpreq_handler(req);
if (rc)
RETURN(rc);
}
if (svc->srv_n_active_reqs >= svc->srv_threads_running - 1)
return 0;
- return svc->srv_n_active_hpreq > 0 || svc->srv_hpreq_handler == NULL;
+ return svc->srv_n_active_hpreq > 0 ||
+ svc->srv_ops.so_hpreq_handler == NULL;
}
static int ptlrpc_server_normal_pending(struct ptlrpc_service *svc, int force)
if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING)
CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, cfs_fail_val);
- rc = svc->srv_handler(request);
+ rc = svc->srv_ops.so_req_handler(request);
ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
static inline int
ptlrpc_threads_enough(struct ptlrpc_service *svc)
{
- return svc->srv_n_active_reqs <
- svc->srv_threads_running - 1 - (svc->srv_hpreq_handler != NULL);
+ return svc->srv_n_active_reqs <
+ svc->srv_threads_running - 1 -
+ (svc->srv_ops.so_hpreq_handler != NULL);
}
/**
cfs_put_group_info(ginfo);
#endif
- if (svc->srv_init != NULL) {
- rc = svc->srv_init(thread);
+ if (svc->srv_ops.so_thr_init != NULL) {
+ rc = svc->srv_ops.so_thr_init(thread);
if (rc)
goto out;
}
/*
* deconstruct service specific state created by ptlrpc_start_thread()
*/
- if (svc->srv_done != NULL)
- svc->srv_done(thread);
+ if (svc->srv_ops.so_thr_done != NULL)
+ svc->srv_ops.so_thr_done(thread);
if (env != NULL) {
lu_context_fini(&env->le_ctx);