* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
+ * Copyright (c) 2011 Whamcloud, Inc.
+ */
+/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*/
#define REQUEST_MINOR 244
+struct mdc_renew_capa_args {
+ struct obd_capa *ra_oc;
+ renew_capa_cb_t ra_cb;
+};
+
static quota_interface_t *quota_interface;
extern quota_interface_t mdc_quota_interface;
struct md_open_data *mod = och->och_mod;
ENTRY;
- LASSERT(mod != LP_POISON && mod != NULL);
+ /**
+ * It is possible to not have \var mod in a case of eviction between
+ * lookup and ll_file_open().
+ **/
+ if (mod == NULL)
+ RETURN(0);
+
+ LASSERT(mod != LP_POISON);
mod->mod_och = NULL;
och->och_mod = NULL;
* exists and return no error in that case
*/
if (mod) {
+ DEBUG_REQ(D_HA, req, "Reset ESTALE = %d", rc);
LASSERT(mod->mod_open_req != NULL);
if (mod->mod_open_req->rq_committed)
rc = 0;
#endif
int mdc_readpage(struct obd_export *exp, const struct lu_fid *fid,
- struct obd_capa *oc, __u64 offset, struct page *page,
- struct ptlrpc_request **request)
+ struct obd_capa *oc, __u64 offset, struct page **pages,
+ unsigned npages, struct ptlrpc_request **request)
{
struct ptlrpc_request *req;
struct ptlrpc_bulk_desc *desc;
+ int i;
+ cfs_waitq_t waitq;
+ int resends = 0;
+ struct l_wait_info lwi;
int rc;
ENTRY;
*request = NULL;
+ cfs_waitq_init(&waitq);
+
+restart_bulk:
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_READPAGE);
if (req == NULL)
RETURN(-ENOMEM);
req->rq_request_portal = MDS_READPAGE_PORTAL;
ptlrpc_at_set_req_timeout(req);
- desc = ptlrpc_prep_bulk_imp(req, 1, BULK_PUT_SINK, MDS_BULK_PORTAL);
+ desc = ptlrpc_prep_bulk_imp(req, npages, BULK_PUT_SINK,
+ MDS_BULK_PORTAL);
if (desc == NULL) {
ptlrpc_request_free(req);
RETURN(-ENOMEM);
}
/* NB req now owns desc and will free it when it gets freed */
- ptlrpc_prep_bulk_page(desc, page, 0, CFS_PAGE_SIZE);
- mdc_readdir_pack(req, offset, CFS_PAGE_SIZE, fid, oc);
+ for (i = 0; i < npages; i++)
+ ptlrpc_prep_bulk_page(desc, pages[i], 0, CFS_PAGE_SIZE);
+
+ mdc_readdir_pack(req, offset, CFS_PAGE_SIZE * npages, fid, oc);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
if (rc) {
ptlrpc_req_finished(req);
- RETURN(rc);
+ if (rc != -ETIMEDOUT)
+ RETURN(rc);
+
+ resends++;
+ if (!client_should_resend(resends, &exp->exp_obd->u.cli)) {
+ CERROR("too many resend retries, returning error\n");
+ RETURN(-EIO);
+ }
+ lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(resends), NULL, NULL, NULL);
+ l_wait_event(waitq, 0, &lwi);
+
+ goto restart_bulk;
}
rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk,
RETURN(rc);
}
- if (req->rq_bulk->bd_nob_transferred != CFS_PAGE_SIZE) {
+ if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
CERROR("Unexpected # bytes transferred: %d (%ld expected)\n",
- req->rq_bulk->bd_nob_transferred, CFS_PAGE_SIZE);
+ req->rq_bulk->bd_nob_transferred,
+ CFS_PAGE_SIZE * npages);
ptlrpc_req_finished(req);
RETURN(-EPROTO);
}
return rc;
}
+static struct kuc_hdr *changelog_kuc_hdr(char *buf, int len, int flags)
+{
+ struct kuc_hdr *lh = (struct kuc_hdr *)buf;
+
+ LASSERT(len <= CR_MAXSIZE);
+
+ lh->kuc_magic = KUC_MAGIC;
+ lh->kuc_transport = KUC_TRANSPORT_CHANGELOG;
+ lh->kuc_flags = flags;
+ lh->kuc_msgtype = CL_RECORD;
+ lh->kuc_msglen = len;
+ return lh;
+}
+
+#define D_CHANGELOG 0
+
+struct changelog_show {
+ __u64 cs_startrec;
+ __u32 cs_flags;
+ cfs_file_t *cs_fp;
+ char *cs_buf;
+ struct obd_device *cs_obd;
+};
+
+static int changelog_show_cb(struct llog_handle *llh, struct llog_rec_hdr *hdr,
+ void *data)
+{
+ struct changelog_show *cs = data;
+ struct llog_changelog_rec *rec = (struct llog_changelog_rec *)hdr;
+ struct kuc_hdr *lh;
+ int len, rc;
+ ENTRY;
+
+ if ((rec->cr_hdr.lrh_type != CHANGELOG_REC) ||
+ (rec->cr.cr_type >= CL_LAST)) {
+ CERROR("Not a changelog rec %d/%d\n", rec->cr_hdr.lrh_type,
+ rec->cr.cr_type);
+ RETURN(-EINVAL);
+ }
+
+ if (rec->cr.cr_index < cs->cs_startrec) {
+ /* Skip entries earlier than what we are interested in */
+ CDEBUG(D_CHANGELOG, "rec="LPU64" start="LPU64"\n",
+ rec->cr.cr_index, cs->cs_startrec);
+ RETURN(0);
+ }
+
+ CDEBUG(D_CHANGELOG, LPU64" %02d%-5s "LPU64" 0x%x t="DFID" p="DFID
+ " %.*s\n", rec->cr.cr_index, rec->cr.cr_type,
+ changelog_type2str(rec->cr.cr_type), rec->cr.cr_time,
+ rec->cr.cr_flags & CLF_FLAGMASK,
+ PFID(&rec->cr.cr_tfid), PFID(&rec->cr.cr_pfid),
+ rec->cr.cr_namelen, rec->cr.cr_name);
+
+ len = sizeof(*lh) + sizeof(rec->cr) + rec->cr.cr_namelen;
+
+ /* Set up the message */
+ lh = changelog_kuc_hdr(cs->cs_buf, len, cs->cs_flags);
+ memcpy(lh + 1, &rec->cr, len - sizeof(*lh));
+
+ rc = libcfs_kkuc_msg_put(cs->cs_fp, lh);
+ CDEBUG(D_CHANGELOG, "kucmsg fp %p len %d rc %d\n", cs->cs_fp, len,rc);
+
+ RETURN(rc);
+}
+
+static int mdc_changelog_send_thread(void *csdata)
+{
+ struct changelog_show *cs = csdata;
+ struct llog_ctxt *ctxt = NULL;
+ struct llog_handle *llh = NULL;
+ struct kuc_hdr *kuch;
+ int rc;
+
+ CDEBUG(D_CHANGELOG, "changelog to fp=%p start "LPU64"\n",
+ cs->cs_fp, cs->cs_startrec);
+
+ /*
+ * It's important to daemonize here to close unused FDs.
+ * The write fd from pipe is already opened by the caller,
+ * so it's fine to clear all files here
+ */
+ cfs_daemonize("mdc_clg_send_thread");
+
+ OBD_ALLOC(cs->cs_buf, CR_MAXSIZE);
+ if (cs->cs_buf == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ /* Set up the remote catalog handle */
+ ctxt = llog_get_context(cs->cs_obd, LLOG_CHANGELOG_REPL_CTXT);
+ if (ctxt == NULL)
+ GOTO(out, rc = -ENOENT);
+ rc = llog_create(ctxt, &llh, NULL, CHANGELOG_CATALOG);
+ if (rc) {
+ CERROR("llog_create() failed %d\n", rc);
+ GOTO(out, rc);
+ }
+ rc = llog_init_handle(llh, LLOG_F_IS_CAT, NULL);
+ if (rc) {
+ CERROR("llog_init_handle failed %d\n", rc);
+ GOTO(out, rc);
+ }
+
+ rc = llog_cat_process_flags(llh, changelog_show_cb, cs, 0, 0, 0);
+
+ /* Send EOF no matter what our result */
+ if ((kuch = changelog_kuc_hdr(cs->cs_buf, sizeof(*kuch),
+ cs->cs_flags))) {
+ kuch->kuc_msgtype = CL_EOF;
+ libcfs_kkuc_msg_put(cs->cs_fp, kuch);
+ }
+
+out:
+ cfs_put_file(cs->cs_fp);
+ if (llh)
+ llog_cat_put(llh);
+ if (ctxt)
+ llog_ctxt_put(ctxt);
+ if (cs->cs_buf)
+ OBD_FREE(cs->cs_buf, CR_MAXSIZE);
+ OBD_FREE_PTR(cs);
+ /* detach from parent process so we get cleaned up */
+ cfs_daemonize("cl_send");
+ return rc;
+}
+
+static int mdc_ioc_changelog_send(struct obd_device *obd,
+ struct ioc_changelog *icc)
+{
+ struct changelog_show *cs;
+ int rc;
+
+ /* Freed in mdc_changelog_send_thread */
+ OBD_ALLOC_PTR(cs);
+ if (!cs)
+ return -ENOMEM;
+
+ cs->cs_obd = obd;
+ cs->cs_startrec = icc->icc_recno;
+ /* matching cfs_put_file in mdc_changelog_send_thread */
+ cs->cs_fp = cfs_get_fd(icc->icc_id);
+ cs->cs_flags = icc->icc_flags;
+
+ /* New thread because we should return to user app before
+ writing into our pipe */
+ rc = cfs_create_thread(mdc_changelog_send_thread, cs, CFS_DAEMON_FLAGS);
+ if (rc >= 0) {
+ CDEBUG(D_CHANGELOG, "start changelog thread: %d\n", rc);
+ return 0;
+ }
+
+ CERROR("Failed to start changelog thread: %d\n", rc);
+ OBD_FREE_PTR(cs);
+ return rc;
+}
+
+static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
+ struct lustre_kernelcomm *lk);
+
static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
void *karg, void *uarg)
{
return -EINVAL;
}
switch (cmd) {
+ case LL_IOC_HSM_CT_START:
+ rc = mdc_ioc_hsm_ct_start(exp, karg);
+ GOTO(out, rc);
+ case OBD_IOC_CHANGELOG_SEND:
+ rc = mdc_ioc_changelog_send(obd, karg);
+ GOTO(out, rc);
case OBD_IOC_CHANGELOG_CLEAR: {
- struct ioc_changelog_clear *icc = karg;
+ struct ioc_changelog *icc = karg;
struct changelog_setinfo cs =
- {icc->icc_recno, icc->icc_id};
+ {.cs_recno = icc->icc_recno, .cs_id = icc->icc_id};
rc = obd_set_info_async(exp, strlen(KEY_CHANGELOG_CLEAR),
KEY_CHANGELOG_CLEAR, sizeof(cs), &cs,
NULL);
if (*((__u32 *) data->ioc_inlbuf2) != 0)
GOTO(out, rc = -ENODEV);
+ /* copy UUID */
+ if (cfs_copy_to_user(data->ioc_pbuf2, obd2cli_tgt(obd),
+ min((int) data->ioc_plen2,
+ (int) sizeof(struct obd_uuid))))
+ GOTO(out, rc = -EFAULT);
+
rc = mdc_statfs(obd, &stat_buf,
- cfs_time_current_64() - CFS_HZ, 0);
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ 0);
if (rc != 0)
GOTO(out, rc);
if (cfs_copy_to_user(data->ioc_pbuf1, &stat_buf,
- data->ioc_plen1))
- GOTO(out, rc = -EFAULT);
- if (cfs_copy_to_user(data->ioc_pbuf2, obd2cli_tgt(obd),
- data->ioc_plen2))
+ min((int) data->ioc_plen1,
+ (int) sizeof(stat_buf))))
GOTO(out, rc = -EFAULT);
GOTO(out, rc = 0);
}
+ case LL_IOC_GET_CONNECT_FLAGS: {
+ if (cfs_copy_to_user(uarg, &exp->exp_connect_flags,
+ sizeof(__u64)))
+ GOTO(out, rc = -EFAULT);
+ else
+ GOTO(out, rc = 0);
+ }
default:
CERROR("mdc_ioctl(): unrecognised ioctl %#x\n", cmd);
GOTO(out, rc = -ENOTTY);
__swab32s(&h->hai_action);
lustre_swab_lu_fid(&h->hai_fid);
__swab64s(&h->hai_cookie);
- __swab64s(&h->hai_extent_start);
- __swab64s(&h->hai_extent_end);
+ __swab64s(&h->hai_extent.offset);
+ __swab64s(&h->hai_extent.length);
__swab64s(&h->hai_gid);
}
}
}
+static void lustre_swab_kuch(struct kuc_hdr *l)
+{
+ __swab16s(&l->kuc_magic);
+ /* __u8 l->kuc_transport */
+ __swab16s(&l->kuc_msgtype);
+ __swab16s(&l->kuc_msglen);
+}
+
+static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
+ struct lustre_kernelcomm *lk)
+{
+ int rc = 0;
+
+ if (lk->lk_group != KUC_GRP_HSM) {
+ CERROR("Bad copytool group %d\n", lk->lk_group);
+ return -EINVAL;
+ }
+
+ CDEBUG(D_HSM, "CT start r%d w%d u%d g%d f%#x\n", lk->lk_rfd, lk->lk_wfd,
+ lk->lk_uid, lk->lk_group, lk->lk_flags);
+
+ if (lk->lk_flags & LK_FLG_STOP)
+ rc = libcfs_kkuc_group_rem(lk->lk_uid,lk->lk_group);
+ else {
+ cfs_file_t *fp = cfs_get_fd(lk->lk_wfd);
+ rc = libcfs_kkuc_group_add(fp, lk->lk_uid,lk->lk_group,
+ lk->lk_data);
+ if (rc && fp)
+ cfs_put_file(fp);
+ }
+
+ /* lk_data is archive number mask */
+ /* TODO: register archive num with mdt so coordinator can choose
+ correct agent. */
+
+ return rc;
+}
+
/**
- * Send a message to any listening copytools, nonblocking
- * @param val LNL message (lnl_hdr + hsm_action_list)
+ * Send a message to any listening copytools
+ * @param val KUC message (kuc_hdr + hsm_action_list)
* @param len total length of message
*/
static int mdc_hsm_copytool_send(int len, void *val)
{
- struct lnl_hdr *lh = (struct lnl_hdr *)val;
+ struct kuc_hdr *lh = (struct kuc_hdr *)val;
struct hsm_action_list *hal = (struct hsm_action_list *)(lh + 1);
int rc;
ENTRY;
(int) (sizeof(*lh) + sizeof(*hal)));
RETURN(-EPROTO);
}
- if (lh->lnl_magic == __swab16(LNL_MAGIC)) {
- lustre_swab_lnlh(lh);
+ if (lh->kuc_magic == __swab16(KUC_MAGIC)) {
+ lustre_swab_kuch(lh);
lustre_swab_hal(hal);
- } else if (lh->lnl_magic != LNL_MAGIC) {
- CERROR("Bad magic %x!=%x\n", lh->lnl_magic, LNL_MAGIC);
+ } else if (lh->kuc_magic != KUC_MAGIC) {
+ CERROR("Bad magic %x!=%x\n", lh->kuc_magic, KUC_MAGIC);
RETURN(-EPROTO);
}
- CDEBUG(D_IOCTL, " Received message mg=%x t=%d m=%d l=%d actions=%d\n",
- lh->lnl_magic, lh->lnl_transport, lh->lnl_msgtype,
- lh->lnl_msglen, hal->hal_count);
+ CDEBUG(D_HSM, " Received message mg=%x t=%d m=%d l=%d actions=%d\n",
+ lh->kuc_magic, lh->kuc_transport, lh->kuc_msgtype,
+ lh->kuc_msglen, hal->hal_count);
/* Broadcast to HSM listeners */
- rc = libcfs_klnl_msg_put(0, LNL_GRP_HSM, lh);
+ rc = libcfs_kkuc_group_put(KUC_GRP_HSM, lh);
RETURN(rc);
}
int rc = -EINVAL;
ENTRY;
- if (KEY_IS(KEY_INIT_RECOV)) {
- if (vallen != sizeof(int))
- RETURN(-EINVAL);
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_initial_recov = *(int *)val;
- cfs_spin_unlock(&imp->imp_lock);
- CDEBUG(D_HA, "%s: set imp_initial_recov = %d\n",
- exp->exp_obd->obd_name, imp->imp_initial_recov);
- RETURN(0);
- }
- /* Turn off initial_recov after we try all backup servers once */
- if (KEY_IS(KEY_INIT_RECOV_BACKUP)) {
- if (vallen != sizeof(int))
- RETURN(-EINVAL);
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_initial_recov_bk = *(int *)val;
- if (imp->imp_initial_recov_bk)
- imp->imp_initial_recov = 1;
- cfs_spin_unlock(&imp->imp_lock);
- CDEBUG(D_HA, "%s: set imp_initial_recov_bk = %d\n",
- exp->exp_obd->obd_name, imp->imp_initial_recov_bk);
- RETURN(0);
- }
if (KEY_IS(KEY_READ_ONLY)) {
if (vallen != sizeof(int))
RETURN(-EINVAL);
}
cfs_spin_unlock(&imp->imp_lock);
- rc = target_set_info_rpc(imp, MDS_SET_INFO,
- keylen, key, vallen, val, set);
+ rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
+ keylen, key, vallen, val, set);
RETURN(rc);
}
if (KEY_IS(KEY_SPTLRPC_CONF)) {
RETURN(0);
}
if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
- rc = target_set_info_rpc(imp, MDS_SET_INFO,
- keylen, key, vallen, val, set);
+ rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
+ keylen, key, vallen, val, set);
RETURN(rc);
}
if (KEY_IS(KEY_HSM_COPYTOOL_SEND)) {
/*
* Flush current sequence to make client obtain new one
* from server in case of disconnect/reconnect.
- * If range is already empty then no need to flush it.
*/
- if (cli->cl_seq != NULL &&
- !range_is_exhausted(&cli->cl_seq->lcs_space)) {
+ if (cli->cl_seq != NULL)
seq_client_flush(cli->cl_seq);
- }
rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
break;
case IMP_EVENT_OCD:
rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
break;
-
+ case IMP_EVENT_DEACTIVATE:
+ case IMP_EVENT_ACTIVATE:
+ break;
default:
CERROR("Unknown import event %x\n", event);
LBUG();
return &cli->cl_target_uuid;
}
+/**
+ * Determine whether the lock can be canceled before replaying it during
+ * recovery, non zero value will be return if the lock can be canceled,
+ * or zero returned for not
+ */
+static int mdc_cancel_for_recovery(struct ldlm_lock *lock)
+{
+ if (lock->l_resource->lr_type != LDLM_IBITS)
+ RETURN(0);
+
+ /* FIXME: if we ever get into a situation where there are too many
+ * opened files with open locks on a single node, then we really
+ * should replay these open locks to reget it */
+ if (lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_OPEN)
+ RETURN(0);
+
+ RETURN(1);
+}
+
static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
{
struct client_obd *cli = &obd->u.cli;
sptlrpc_lprocfs_cliobd_attach(obd);
ptlrpc_lprocfs_register_obd(obd);
+ ns_register_cancel(obd->obd_namespace, mdc_cancel_for_recovery);
+
rc = obd_llog_init(obd, &obd->obd_olg, obd, NULL);
if (rc) {
mdc_cleanup(obd);
CERROR("failed to setup llogging subsystems\n");
}
- /* ignore errors */
- libcfs_klnl_start(LNL_TRANSPORT_HSM);
- libcfs_klnl_start(LNL_TRANSPORT_CHANGELOG);
-
RETURN(rc);
err_close_lock:
switch (stage) {
case OBD_CLEANUP_EARLY:
+ break;
case OBD_CLEANUP_EXPORTS:
- /* If we set up but never connected, the
- client import will not have been cleaned. */
- if (obd->u.cli.cl_import) {
- struct obd_import *imp;
- cfs_down_write(&obd->u.cli.cl_sem);
- imp = obd->u.cli.cl_import;
- CERROR("client import never connected\n");
- ptlrpc_invalidate_import(imp);
- class_destroy_import(imp);
- cfs_up_write(&obd->u.cli.cl_sem);
- obd->u.cli.cl_import = NULL;
- }
+ /* Failsafe, ok if racy */
+ if (obd->obd_type->typ_refcnt <= 1)
+ libcfs_kkuc_group_rem(0, KUC_GRP_HSM);
+
+ obd_cleanup_client_import(obd);
+
rc = obd_llog_finish(obd, 0);
if (rc != 0)
CERROR("failed to cleanup llogging subsystems\n");
{
struct client_obd *cli = &obd->u.cli;
- libcfs_klnl_stop(LNL_TRANSPORT_HSM, LNL_GRP_HSM);
- libcfs_klnl_stop(LNL_TRANSPORT_CHANGELOG, 0);
-
OBD_FREE(cli->cl_rpc_lock, sizeof (*cli->cl_rpc_lock));
OBD_FREE(cli->cl_setattr_lock, sizeof (*cli->cl_setattr_lock));
OBD_FREE(cli->cl_close_lock, sizeof (*cli->cl_close_lock));
}
static int mdc_interpret_renew_capa(const struct lu_env *env,
- struct ptlrpc_request *req, void *unused,
+ struct ptlrpc_request *req, void *args,
int status)
{
- struct obd_capa *oc = req->rq_async_args.pointer_arg[0];
- renew_capa_cb_t cb = req->rq_async_args.pointer_arg[1];
+ struct mdc_renew_capa_args *ra = args;
struct mdt_body *body = NULL;
struct lustre_capa *capa;
ENTRY;
GOTO(out, capa = ERR_PTR(-EFAULT));
EXIT;
out:
- cb(oc, capa);
+ ra->ra_cb(ra->ra_oc, capa);
return 0;
}
renew_capa_cb_t cb)
{
struct ptlrpc_request *req;
+ struct mdc_renew_capa_args *ra;
ENTRY;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_GETATTR,
mdc_pack_body(req, &oc->c_capa.lc_fid, oc, OBD_MD_FLOSSCAPA, 0, -1, 0);
ptlrpc_request_set_replen(req);
- req->rq_async_args.pointer_arg[0] = oc;
- req->rq_async_args.pointer_arg[1] = cb;
+ CLASSERT(sizeof(*ra) <= sizeof(req->rq_async_args));
+ ra = ptlrpc_req_async_args(req);
+ ra->ra_oc = oc;
+ ra->ra_cb = cb;
req->rq_interpret_reply = mdc_interpret_renew_capa;
ptlrpcd_add_req(req, PSCOPE_OTHER);
RETURN(0);
struct md_ops mdc_md_ops = {
.m_getstatus = mdc_getstatus,
.m_change_cbdata = mdc_change_cbdata,
+ .m_find_cbdata = mdc_find_cbdata,
.m_close = mdc_close,
.m_create = mdc_create,
.m_done_writing = mdc_done_writing,