{
int rc, is_srv = (type == LUSTRE_SEQ_SERVER);
- struct ptlrpc_service_conf seq_md_conf = {
+ static struct ptlrpc_service_conf seq_conf;
+ seq_conf = (typeof(seq_conf)) {
.psc_nbufs = MDS_NBUFS,
.psc_bufsize = MDS_BUFSIZE,
.psc_max_req_size = SEQ_MAXREQSIZE,
.psc_num_threads = SEQ_NUM_THREADS,
.psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD
};
- struct ptlrpc_service_conf seq_dt_conf = {
- .psc_nbufs = MDS_NBUFS,
- .psc_bufsize = MDS_BUFSIZE,
- .psc_max_req_size = SEQ_MAXREQSIZE,
- .psc_max_reply_size = SEQ_MAXREPSIZE,
- .psc_req_portal = SEQ_DATA_PORTAL,
- .psc_rep_portal = OSC_REPLY_PORTAL,
- .psc_watchdog_timeout = SEQ_SERVICE_WATCHDOG_TIMEOUT,
- .psc_num_threads = SEQ_NUM_THREADS,
- .psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD
- };
+
ENTRY;
LASSERT(dev != NULL);
if (rc)
GOTO(out, rc);
- seq->lss_md_service = ptlrpc_init_svc_conf(&seq_md_conf,
+ seq->lss_md_service = ptlrpc_init_svc_conf(&seq_conf,
seq_req_handle,
LUSTRE_SEQ_NAME"_md",
seq->lss_proc_entry,
* we want to have really cluster-wide sequences space. This is why we
* start only one sequence controller which manages space.
*/
+ seq_conf = (typeof(seq_conf)) {
+ .psc_nbufs = MDS_NBUFS,
+ .psc_bufsize = MDS_BUFSIZE,
+ .psc_max_req_size = SEQ_MAXREQSIZE,
+ .psc_max_reply_size = SEQ_MAXREPSIZE,
+ .psc_req_portal = SEQ_DATA_PORTAL,
+ .psc_rep_portal = OSC_REPLY_PORTAL,
+ .psc_watchdog_timeout = SEQ_SERVICE_WATCHDOG_TIMEOUT,
+ .psc_num_threads = SEQ_NUM_THREADS,
+ .psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD
+ };
if (is_srv) {
- seq->lss_dt_service = ptlrpc_init_svc_conf(&seq_dt_conf,
+ seq->lss_dt_service = ptlrpc_init_svc_conf(&seq_conf,
seq_req_handle,
LUSTRE_SEQ_NAME"_dt",
seq->lss_proc_entry,
osfs = req_capsule_server_get(&info->mti_pill,&RMF_OBD_STATFS);
/* XXX max_age optimisation is needed here. See mds_statfs */
result = next->md_ops->mdo_statfs(info->mti_ctxt,
- next, &info->mti_sfs);
- statfs_pack(osfs, &info->mti_sfs);
+ next, &info->mti_u.ksfs);
+ statfs_pack(osfs, &info->mti_u.ksfs);
}
RETURN(result);
{
struct ptlrpc_request *req = mdt_info_req(info);
struct ptlrpc_bulk_desc *desc;
- struct l_wait_info lwi;
+ struct l_wait_info *lwi = &info->mti_u.rdpg.mti_wait_info;
int tmpcount;
int tmpsize;
int i;
if (MDT_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
GOTO(abort_bulk, rc);
- lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
- rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc), &lwi);
+ *lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
+ rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc), lwi);
LASSERT (rc == 0 || rc == -ETIMEDOUT);
if (rc == 0) {
static int mdt_readpage(struct mdt_thread_info *info)
{
struct mdt_object *object = info->mti_object;
- struct lu_rdpg *rdpg = &info->mti_rdpg;
+ struct lu_rdpg *rdpg = &info->mti_u.rdpg.mti_rdpg;
struct mdt_body *reqbody;
struct mdt_body *repbody;
int rc;
static int mdt_start_ptlrpc_service(struct mdt_device *m)
{
int rc;
- struct ptlrpc_service_conf conf = {
+ static struct ptlrpc_service_conf conf;
+ ENTRY;
+
+ conf = (typeof(conf)) {
.psc_nbufs = MDS_NBUFS,
.psc_bufsize = MDS_BUFSIZE,
.psc_max_req_size = MDS_MAXREQSIZE,
.psc_ctx_tags = LCT_MD_THREAD
};
- ENTRY;
-
m->mdt_ldlm_client = &m->mdt_md_dev.md_lu_dev.ld_obd->obd_ldlm_client;
ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
"mdt_ldlm_client", m->mdt_ldlm_client);
static void mdt_stack_fini(const struct lu_context *ctx,
struct mdt_device *m, struct lu_device *top)
{
- struct lu_device *d = top, *n;
- struct lustre_cfg_bufs bufs;
- struct lustre_cfg *lcfg;
+ struct lu_device *d = top, *n;
+ struct lustre_cfg_bufs *bufs;
+ struct lustre_cfg *lcfg;
+ struct mdt_thread_info *info;
+ ENTRY;
+
+ info = lu_context_key_get(ctx, &mdt_thread_key);
+ LASSERT(info != NULL);
+ bufs = &info->mti_u.bufs;
/* process cleanup */
- lustre_cfg_bufs_reset(&bufs, NULL);
- lcfg = lustre_cfg_new(LCFG_CLEANUP, &bufs);
+ lustre_cfg_bufs_reset(bufs, NULL);
+ lcfg = lustre_cfg_new(LCFG_CLEANUP, bufs);
if (!lcfg) {
CERROR("Cannot alloc lcfg!\n");
return;
struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
struct mdt_thread_info *info;
struct lu_context ctxt;
- struct md_attr ma;
int rc = 0;
ENTRY;
info = lu_context_key_get(&ctxt, &mdt_thread_key);
LASSERT(info != NULL);
+ memset(info, 0, sizeof *info);
/* Close any open files (which may also cause orphan unlinking). */
spin_lock(&med->med_open_lock);
while (!list_empty(&med->med_open_head)) {
class_handle_unhash(&mfd->mfd_handle);
list_del_init(&mfd->mfd_list);
spin_unlock(&med->med_open_lock);
- mdt_mfd_close(&ctxt, mdt, mfd, &ma);
+ mdt_mfd_close(&ctxt, mdt, mfd, &info->mti_attr);
/* TODO: if we close the unlinked file,
* we need to remove it's objects from OST */
mdt_object_put(&ctxt, o);
* for req-layout interface.
*/
struct req_capsule mti_pill;
- /*
- * buffer for mdt_statfs().
- *
- * XXX this is probably huge overkill, because statfs is not that
- * frequent.
- */
+ /* transaction number of current request */
+ __u64 mti_transno;
+ __u32 mti_trans_flags;
- struct kstatfs mti_sfs;
- /* temporary stuff used by thread */
+ /* temporary stuff used by thread to save stack comsuption.
+ * if something is in a union, make sure they do not conflict */
+
struct lu_fid mti_tmp_fid1;
struct lu_fid mti_tmp_fid2;
- ldlm_policy_data_t mti_policy;
- struct ldlm_res_id mti_res_id;
+ ldlm_policy_data_t mti_policy; /* for mdt_object_lock() */
+ struct ldlm_res_id mti_res_id; /* for mdt_object_lock() */
union {
- struct obd_uuid uuid;
- char ns_name[48];
+ struct obd_uuid uuid; /* for mdt_seq_init_cli() */
+ char ns_name[48]; /* for mdt_init0() */
+ struct lustre_cfg_bufs bufs; /* for mdt_stack_fini() */
+ struct kstatfs ksfs; /* for mdt_statfs() */
+ struct {
+ /* for mdt_readpage() */
+ struct lu_rdpg mti_rdpg;
+ /* for mdt_sendpage() */
+ struct l_wait_info mti_wait_info;
+ } rdpg;
} mti_u;
- /* transaction number of current request */
- __u64 mti_transno;
- __u32 mti_trans_flags;
-
- /* readdir hint structure */
- struct lu_rdpg mti_rdpg;
};
/*
* Info allocated per-transaction.