* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2016, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*
- * lustre/osp/osp_sync.c
+ * lustre/osp/osp_precreate.c
*
* Lustre OST Proxy Device
*
* = import is disconnected =
*
* = import is inactive =
- * in this case osp_declare_object_create() returns an error
+ * in this case osp_declare_create() returns an error
*
*/
*/
static inline int osp_statfs_need_update(struct osp_device *d)
{
- return !cfs_time_before(cfs_time_current(),
- d->opd_statfs_fresh_till);
+ return !ktime_before(ktime_get(), d->opd_statfs_fresh_till);
}
/*
return !!(d->opd_pre_thread.t_flags & SVC_STOPPED);
}
-static void osp_statfs_timer_cb(unsigned long _d)
+static void osp_statfs_timer_cb(cfs_timer_cb_arg_t data)
{
- struct osp_device *d = (struct osp_device *) _d;
+ struct osp_device *d = cfs_from_timer(d, data, opd_statfs_timer);
LASSERT(d);
- if (d->opd_pre != NULL && osp_precreate_running(d))
+ if (osp_precreate_running(d))
wake_up(&d->opd_pre_waitq);
}
* \retval negative negated errno on error
*/
static int osp_statfs_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- union ptlrpc_async_args *aa, int rc)
+ struct ptlrpc_request *req, void *args, int rc)
{
- struct obd_import *imp = req->rq_import;
- struct obd_statfs *msfs;
- struct osp_device *d;
+ union ptlrpc_async_args *aa = args;
+ struct obd_import *imp = req->rq_import;
+ struct obd_statfs *msfs;
+ struct osp_device *d;
+ u64 maxage_ns;
ENTRY;
d->opd_statfs = *msfs;
- osp_pre_update_status(d, rc);
+ if (d->opd_pre)
+ osp_pre_update_status(d, rc);
/* schedule next update */
- d->opd_statfs_fresh_till = cfs_time_shift(d->opd_statfs_maxage);
- cfs_timer_arm(&d->opd_statfs_timer, d->opd_statfs_fresh_till);
+ maxage_ns = d->opd_statfs_maxage * NSEC_PER_SEC;
+ d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), maxage_ns);
+ mod_timer(&d->opd_statfs_timer,
+ jiffies + cfs_time_seconds(d->opd_statfs_maxage));
d->opd_statfs_update_in_progress = 0;
CDEBUG(D_CACHE, "updated statfs %p\n", d);
RETURN(0);
out:
- /* couldn't update statfs, try again as soon as possible */
+ /* couldn't update statfs, try again with a small delay */
+ d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), 10 * NSEC_PER_SEC);
+ d->opd_statfs_update_in_progress = 0;
if (d->opd_pre != NULL && osp_precreate_running(d))
wake_up(&d->opd_pre_waitq);
*
* \param[in] d OSP device
*/
-static int osp_statfs_update(struct osp_device *d)
+static int osp_statfs_update(const struct lu_env *env, struct osp_device *d)
{
+ u64 expire = obd_timeout * 1000 * NSEC_PER_SEC;
struct ptlrpc_request *req;
struct obd_import *imp;
union ptlrpc_async_args *aa;
- int rc;
+ int rc;
ENTRY;
imp = d->opd_obd->u.cli.cl_import;
LASSERT(imp);
- req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
+ req = ptlrpc_request_alloc(imp,
+ d->opd_pre ? &RQF_OST_STATFS : &RQF_MDS_STATFS);
if (req == NULL)
RETURN(-ENOMEM);
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
+ rc = ptlrpc_request_pack(req,
+ d->opd_pre ? LUSTRE_OST_VERSION : LUSTRE_MDS_VERSION,
+ d->opd_pre ? OST_STATFS : MDS_STATFS);
if (rc) {
ptlrpc_request_free(req);
RETURN(rc);
}
ptlrpc_request_set_replen(req);
- req->rq_request_portal = OST_CREATE_PORTAL;
+ if (d->opd_pre)
+ req->rq_request_portal = OST_CREATE_PORTAL;
ptlrpc_at_set_req_timeout(req);
- req->rq_interpret_reply = (ptlrpc_interpterer_t)osp_statfs_interpret;
+ req->rq_interpret_reply = osp_statfs_interpret;
aa = ptlrpc_req_async_args(req);
aa->pointer_arg[0] = d;
/*
* no updates till reply
*/
- cfs_timer_disarm(&d->opd_statfs_timer);
- d->opd_statfs_fresh_till = cfs_time_shift(obd_timeout * 1000);
+ del_timer(&d->opd_statfs_timer);
+ d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), expire);
d->opd_statfs_update_in_progress = 1;
ptlrpcd_add_req(req);
+ /* we still want to sync changes if no new changes are coming */
+ if (ktime_before(ktime_get(), d->opd_sync_next_commit_cb))
+ GOTO(out, rc);
+
+ if (atomic_read(&d->opd_sync_changes)) {
+ struct thandle *th;
+
+ th = dt_trans_create(env, d->opd_storage);
+ if (IS_ERR(th)) {
+ CERROR("%s: can't sync\n", d->opd_obd->obd_name);
+ GOTO(out, rc);
+ }
+ rc = dt_trans_start_local(env, d->opd_storage, th);
+ if (rc == 0) {
+ CDEBUG(D_OTHER, "%s: sync forced, %d changes\n",
+ d->opd_obd->obd_name,
+ atomic_read(&d->opd_sync_changes));
+ osp_sync_add_commit_cb_1s(env, d, th);
+ dt_trans_stop(env, d->opd_storage, th);
+ }
+ }
+
+out:
RETURN(0);
}
* then we should poll OST immediately once object destroy
* is replied
*/
- d->opd_statfs_fresh_till = cfs_time_shift(-1);
- cfs_timer_disarm(&d->opd_statfs_timer);
+ d->opd_statfs_fresh_till = ktime_sub_ns(ktime_get(), NSEC_PER_SEC);
+ del_timer(&d->opd_statfs_timer);
wake_up(&d->opd_pre_waitq);
}
}
{
int rc;
+ if (d->opd_pre == NULL)
+ return 0;
+
/* XXX: do we really need locking here? */
spin_lock(&d->opd_pre_lock);
rc = osp_precreate_near_empty_nolock(env, d);
struct lu_buf *lb_oid = &oti->osi_lb;
struct lu_buf *lb_oseq = &oti->osi_lb2;
loff_t oid_off;
+ u64 oid;
loff_t oseq_off;
struct thandle *th;
int rc;
ENTRY;
+ if (osp->opd_storage->dd_rdonly)
+ RETURN(0);
+
/* Note: through f_oid is only 32 bits, it will also write 64 bits
* for oid to keep compatibility with the previous version. */
- lb_oid->lb_buf = &fid->f_oid;
- lb_oid->lb_len = sizeof(u64);
- oid_off = sizeof(u64) * osp->opd_index;
+ oid = fid->f_oid;
+ osp_objid_buf_prep(lb_oid, &oid_off,
+ &oid, osp->opd_index);
- lb_oseq->lb_buf = &fid->f_seq;
- lb_oseq->lb_len = sizeof(u64);
- oseq_off = sizeof(u64) * osp->opd_index;
+ osp_objseq_buf_prep(lb_oseq, &oseq_off,
+ &fid->f_seq, osp->opd_index);
th = dt_trans_create(env, osp->opd_storage);
if (IS_ERR(th))
/* Update last_xxx to the new seq */
spin_lock(&osp->opd_pre_lock);
osp->opd_last_used_fid = *fid;
+ osp_fid_to_obdid(fid, &osp->opd_last_id);
osp->opd_gap_start_fid = *fid;
osp->opd_pre_used_fid = *fid;
osp->opd_pre_last_created_fid = *fid;
if (fid_is_idif(fid)) {
struct lu_fid *last_fid;
struct ost_id *oi = &osi->osi_oi;
+ int rc;
spin_lock(&osp->opd_pre_lock);
last_fid = &osp->opd_pre_last_created_fid;
fid_to_ostid(last_fid, oi);
end = min(ostid_id(oi) + *grow, IDIF_MAX_OID);
*grow = end - ostid_id(oi);
- ostid_set_id(oi, ostid_id(oi) + *grow);
+ rc = ostid_set_id(oi, ostid_id(oi) + *grow);
spin_unlock(&osp->opd_pre_lock);
- if (*grow == 0)
+ if (*grow == 0 || rc)
return 1;
ostid_to_fid(fid, oi, osp->opd_index);
RETURN(rc);
}
- LASSERT(d->opd_pre->osp_pre_delorphan_sent != 0);
spin_lock(&d->opd_pre_lock);
if (d->opd_pre_create_count > d->opd_pre_max_create_count / 2)
d->opd_pre_create_count = d->opd_pre_max_create_count / 2;
*fid = d->opd_pre_last_created_fid;
rc = osp_precreate_fids(env, d, fid, &grow);
- if (rc == 1) {
+ if (rc == 1)
/* Current seq has been used up*/
- if (!osp_is_fid_client(d)) {
- osp_pre_update_status(d, -ENOSPC);
- rc = -ENOSPC;
- }
- wake_up(&d->opd_pre_waitq);
- GOTO(out_req, rc);
- }
+ GOTO(out_req, rc = -ENOSPC);
if (!osp_is_fid_client(d)) {
/* Non-FID client will always send seq 0 because of
ready:
if (osp_fid_diff(fid, &d->opd_pre_used_fid) <= 0) {
- CERROR("%s: precreate fid "DFID" < local used fid "DFID
+ CERROR("%s: precreate fid "DFID" <= local used fid "DFID
": rc = %d\n", d->opd_obd->obd_name,
PFID(fid), PFID(&d->opd_pre_used_fid), -ESTALE);
GOTO(out_req, rc = -ESTALE);
int update_status = 0;
int rc;
int diff;
- struct lu_fid fid;
ENTRY;
/*
- * wait for local recovery to finish, so we can cleanup orphans.
- * orphans are all objects since "last used" (assigned).
- * consider reserved objects as created otherwise we can get into
- * a livelock when one blocked thread holding a reservation can
- * block recovery. see LU-8367 for the details. in some cases this
- * can result in gaps (i.e. leaked objects), but we've got LFSCK...
- *
- * do not allow new reservations because they may end up getting
- * orphans being cleaned up below. so we block new reservations.
+ * wait for local recovery to finish, so we can cleanup orphans
+ * orphans are all objects since "last used" (assigned), but
+ * there might be objects reserved and in some cases they won't
+ * be used. we can't cleanup them till we're sure they won't be
+ * used. also can't we allow new reservations because they may
+ * end up getting orphans being cleaned up below. so we block
+ * new reservations and wait till all reserved objects either
+ * user or released.
*/
spin_lock(&d->opd_pre_lock);
d->opd_pre_recovering = 1;
* catch all osp_precreate_reserve() calls who find
* "!opd_pre_recovering".
*/
- l_wait_event(d->opd_pre_waitq, d->opd_recovery_completed ||
+ l_wait_event(d->opd_pre_waitq,
+ (!d->opd_pre_reserved && d->opd_recovery_completed) ||
!osp_precreate_running(d) || d->opd_got_disconnected,
&lwi);
if (!osp_precreate_running(d) || d->opd_got_disconnected)
GOTO(out, rc = -EAGAIN);
+ CDEBUG(D_HA, "%s: going to cleanup orphans since "DFID"\n",
+ d->opd_obd->obd_name, PFID(&d->opd_last_used_fid));
+
*last_fid = d->opd_last_used_fid;
/* The OSP should already get the valid seq now */
LASSERT(!fid_is_zero(last_fid));
body->oa.o_flags = OBD_FL_DELORPHAN;
body->oa.o_valid = OBD_MD_FLFLAGS | OBD_MD_FLGROUP;
- /* unless this is the very first DELORPHAN (when we really
- * can destroy some orphans), just tell OST to recreate
- * missing objects in our precreate pool */
- spin_lock(&d->opd_pre_lock);
- if (d->opd_pre->osp_pre_delorphan_sent)
- fid = d->opd_pre_last_created_fid;
- else
- fid = d->opd_last_used_fid;
- spin_unlock(&d->opd_pre_lock);
- fid_to_ostid(&fid, &body->oa.o_oi);
-
- CDEBUG(D_HA, "%s: going to cleanup orphans since "DFID"\n",
- d->opd_obd->obd_name, PFID(&fid));
+ fid_to_ostid(&d->opd_last_used_fid, &body->oa.o_oi);
ptlrpc_request_set_replen(req);
ostid_to_fid(last_fid, &body->oa.o_oi, d->opd_index);
spin_lock(&d->opd_pre_lock);
- diff = osp_fid_diff(&fid, last_fid);
+ diff = osp_fid_diff(&d->opd_last_used_fid, last_fid);
if (diff > 0) {
d->opd_pre_create_count = OST_MIN_PRECREATE + diff;
- d->opd_pre_last_created_fid = *last_fid;
+ d->opd_pre_last_created_fid = d->opd_last_used_fid;
} else {
d->opd_pre_create_count = OST_MIN_PRECREATE;
d->opd_pre_last_created_fid = *last_fid;
*/
LASSERT(fid_oid(&d->opd_pre_last_created_fid) <=
LUSTRE_DATA_SEQ_MAX_WIDTH);
- if (d->opd_pre->osp_pre_delorphan_sent == 0)
- d->opd_pre_used_fid = d->opd_pre_last_created_fid;
+ d->opd_pre_used_fid = d->opd_pre_last_created_fid;
d->opd_pre_create_slow = 0;
spin_unlock(&d->opd_pre_lock);
- d->opd_pre->osp_pre_delorphan_sent = 1;
CDEBUG(D_HA, "%s: Got last_id "DFID" from OST, last_created "DFID
"last_used is "DFID"\n", d->opd_obd->obd_name, PFID(last_fid),
if (req)
ptlrpc_req_finished(req);
- spin_lock(&d->opd_pre_lock);
- d->opd_pre_recovering = 0;
- spin_unlock(&d->opd_pre_lock);
-
/*
* If rc is zero, the pre-creation window should have been emptied.
* Since waking up the herd would be useless without pre-created
} else {
wake_up(&d->opd_pre_user_waitq);
}
+ } else {
+ spin_lock(&d->opd_pre_lock);
+ d->opd_pre_recovering = 0;
+ spin_unlock(&d->opd_pre_lock);
}
RETURN(rc);
else if (msfs->os_ffree > 64)
msfs->os_state &= ~OS_STATE_ENOINO;
+ CDEBUG(D_INFO, "%s: status: %llu blocks, %llu "
+ "free, %llu avail, %llu MB avail, %u "
+ "hwm -> %d: rc = %d\n",
+ d->opd_obd->obd_name, msfs->os_blocks,
+ msfs->os_bfree, msfs->os_bavail,
+ available, d->opd_reserved_mb_high,
+ d->opd_pre_status, rc);
if (available < d->opd_reserved_mb_low)
msfs->os_state |= OS_STATE_ENOSPC;
else if (available > d->opd_reserved_mb_high)
d->opd_pre_status, rc);
CDEBUG(D_INFO,
"non-committed changes: %u, in progress: %u\n",
- atomic_read(&d->opd_syn_changes),
- atomic_read(&d->opd_syn_rpc_in_progress));
+ atomic_read(&d->opd_sync_changes),
+ atomic_read(&d->opd_sync_rpcs_in_progress));
} else if (unlikely(old == -ENOSPC)) {
d->opd_pre_status = 0;
spin_lock(&d->opd_pre_lock);
available, d->opd_reserved_mb_low,
d->opd_pre_status, rc);
}
+
+ /* Object precreation is skipped on the OST with
+ * max_create_count=0. */
+ if (d->opd_pre_max_create_count == 0)
+ msfs->os_state |= OS_STATE_NOPRECREATE;
+ else
+ msfs->os_state &= ~OS_STATE_NOPRECREATE;
}
out:
wake_up(&d->opd_pre_user_waitq);
if (rc) {
CERROR("%s: init env error: rc = %d\n", d->opd_obd->obd_name,
rc);
+
+ spin_lock(&d->opd_pre_lock);
+ thread->t_flags = SVC_STOPPED;
+ spin_unlock(&d->opd_pre_lock);
+ wake_up(&thread->t_ctl_waitq);
+
RETURN(rc);
}
* need to be connected to OST
*/
while (osp_precreate_running(d)) {
+ if ((d->opd_pre == NULL || d->opd_pre_recovering) &&
+ d->opd_imp_connected &&
+ !d->opd_got_disconnected)
+ break;
l_wait_event(d->opd_pre_waitq,
!osp_precreate_running(d) ||
d->opd_new_connection,
if (!osp_precreate_running(d))
break;
- LASSERT(d->opd_obd->u.cli.cl_seq != NULL);
- /* Sigh, fid client is not ready yet */
- if (d->opd_obd->u.cli.cl_seq->lcs_exp == NULL)
- continue;
+ if (d->opd_pre) {
+ LASSERT(d->opd_obd->u.cli.cl_seq != NULL);
+ /* Sigh, fid client is not ready yet */
+ if (d->opd_obd->u.cli.cl_seq->lcs_exp == NULL)
+ continue;
- /* Init fid for osp_precreate if necessary */
- rc = osp_init_pre_fid(d);
- if (rc != 0) {
- class_export_put(d->opd_exp);
- d->opd_obd->u.cli.cl_seq->lcs_exp = NULL;
- CERROR("%s: init pre fid error: rc = %d\n",
- d->opd_obd->obd_name, rc);
- continue;
+ /* Init fid for osp_precreate if necessary */
+ rc = osp_init_pre_fid(d);
+ if (rc != 0) {
+ class_export_put(d->opd_exp);
+ d->opd_obd->u.cli.cl_seq->lcs_exp = NULL;
+ CERROR("%s: init pre fid error: rc = %d\n",
+ d->opd_obd->obd_name, rc);
+ continue;
+ }
}
- if (osp_statfs_update(d)) {
+ if (osp_statfs_update(&env, d)) {
l_wait_event(d->opd_pre_waitq,
!osp_precreate_running(d), &lwi2);
continue;
}
- /*
- * Clean up orphans or recreate missing objects.
- */
- rc = osp_precreate_cleanup_orphans(&env, d);
- if (rc != 0)
- continue;
+ if (d->opd_pre) {
+ /*
+ * Clean up orphans or recreate missing objects.
+ */
+ rc = osp_precreate_cleanup_orphans(&env, d);
+ if (rc != 0) {
+ schedule_timeout_interruptible(cfs_time_seconds(1));
+ continue;
+ }
+ }
+
/*
* connected, can handle precreates now
*/
break;
if (osp_statfs_need_update(d))
- if (osp_statfs_update(d))
+ if (osp_statfs_update(&env, d))
break;
+ if (d->opd_pre == NULL)
+ continue;
+
/* To avoid handling different seq in precreate/orphan
* cleanup, it will hold precreate until current seq is
* used up. */
return 1;
/* ready if OST reported no space and no destroys in progress */
- if (atomic_read(&d->opd_syn_changes) +
- atomic_read(&d->opd_syn_rpc_in_progress) == 0 &&
+ if (atomic_read(&d->opd_sync_changes) +
+ atomic_read(&d->opd_sync_rpcs_in_progress) == 0 &&
d->opd_pre_status == -ENOSPC)
return 1;
struct osp_device *d = data;
CDEBUG(D_HA, "%s: slow creates, last="DFID", next="DFID", "
- "reserved=%llu, syn_changes=%u, "
- "syn_rpc_in_progress=%d, status=%d\n",
+ "reserved=%llu, sync_changes=%u, "
+ "sync_rpcs_in_progress=%d, status=%d\n",
d->opd_obd->obd_name, PFID(&d->opd_pre_last_created_fid),
PFID(&d->opd_pre_used_fid), d->opd_pre_reserved,
- atomic_read(&d->opd_syn_changes),
- atomic_read(&d->opd_syn_rpc_in_progress),
+ atomic_read(&d->opd_sync_changes),
+ atomic_read(&d->opd_sync_rpcs_in_progress),
d->opd_pre_status);
return 1;
*/
int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d)
{
- struct l_wait_info lwi;
- cfs_time_t expire = cfs_time_shift(obd_timeout);
- int precreated, rc;
+ time64_t expire = ktime_get_seconds() + obd_timeout;
+ struct l_wait_info lwi;
+ int precreated, rc, synced = 0;
ENTRY;
if (d->opd_pre_max_create_count == 0)
RETURN(-ENOBUFS);
- if (OBD_FAIL_PRECHECK(OBD_FAIL_MDS_OSP_PRECREATE_WAIT)) {
- if (d->opd_index == cfs_fail_val)
- OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_OSP_PRECREATE_WAIT,
- obd_timeout);
- }
-
/*
* wait till:
* - preallocation is done
* wait till that is done - some space might be released
*/
if (unlikely(rc == -ENOSPC)) {
- if (atomic_read(&d->opd_syn_changes)) {
+ if (atomic_read(&d->opd_sync_changes) && synced == 0) {
/* force local commit to release space */
dt_commit_async(env, d->opd_storage);
+ osp_sync_check_for_work(d);
+ synced = 1;
}
- if (atomic_read(&d->opd_syn_rpc_in_progress)) {
+ if (atomic_read(&d->opd_sync_rpcs_in_progress)) {
/* just wait till destroys are done */
/* see l_wait_even() few lines below */
}
- if (atomic_read(&d->opd_syn_changes) +
- atomic_read(&d->opd_syn_rpc_in_progress) == 0) {
+ if (atomic_read(&d->opd_sync_changes) +
+ atomic_read(&d->opd_sync_rpcs_in_progress) == 0) {
/* no hope for free space */
break;
}
/* XXX: don't wake up if precreation is in progress */
wake_up(&d->opd_pre_waitq);
- lwi = LWI_TIMEOUT(expire - cfs_time_current(),
- osp_precreate_timeout_condition, d);
- if (cfs_time_aftereq(cfs_time_current(), expire)) {
+ lwi = LWI_TIMEOUT(cfs_time_seconds(obd_timeout),
+ osp_precreate_timeout_condition, d);
+ if (ktime_get_seconds() >= expire) {
rc = -ETIMEDOUT;
break;
}
int osp_precreate_get_fid(const struct lu_env *env, struct osp_device *d,
struct lu_fid *fid)
{
+ struct lu_fid *pre_used_fid = &d->opd_pre_used_fid;
/* grab next id from the pool */
spin_lock(&d->opd_pre_lock);
PFID(&d->opd_pre_used_fid),
PFID(&d->opd_pre_last_created_fid));
+ /*
+ * When sequence is used up, new one should be allocated in
+ * osp_precreate_rollover_new_seq. So ASSERT here to avoid
+ * objid overflow.
+ */
+ LASSERTF(osp_fid_end_seq(env, pre_used_fid) == 0,
+ "next fid "DFID" last created fid "DFID"\n",
+ PFID(&d->opd_pre_used_fid),
+ PFID(&d->opd_pre_last_created_fid));
+ /* Non IDIF fids shoulnd't get here with oid == 0xFFFFFFFF. */
+ if (fid_is_idif(pre_used_fid) &&
+ unlikely(fid_oid(pre_used_fid) == LUSTRE_DATA_SEQ_MAX_WIDTH))
+ pre_used_fid->f_seq++;
+
d->opd_pre_used_fid.f_oid++;
memcpy(fid, &d->opd_pre_used_fid, sizeof(*fid));
d->opd_pre_reserved--;
* all reservations are released, see comment in
* osp_precreate_thread() just before orphan cleanup
*/
- if (unlikely(d->opd_pre_reserved == 0 && d->opd_pre_status))
+ if (unlikely(d->opd_pre_reserved == 0 &&
+ (d->opd_pre_recovering || d->opd_pre_status)))
wake_up(&d->opd_pre_waitq);
return 0;
if (req == NULL)
RETURN(-ENOMEM);
- /* XXX: capa support? */
- /* osc_set_capa_size(req, &RMF_CAPA1, capa); */
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
if (rc) {
ptlrpc_request_free(req);
*/
int osp_init_precreate(struct osp_device *d)
{
- struct l_wait_info lwi = { 0 };
- struct task_struct *task;
-
ENTRY;
OBD_ALLOC_PTR(d->opd_pre);
RETURN(-ENOMEM);
/* initially precreation isn't ready */
+ init_waitqueue_head(&d->opd_pre_user_waitq);
d->opd_pre_status = -EAGAIN;
fid_zero(&d->opd_pre_used_fid);
d->opd_pre_used_fid.f_oid = 1;
fid_zero(&d->opd_pre_last_created_fid);
d->opd_pre_last_created_fid.f_oid = 1;
+ d->opd_last_id = 0;
d->opd_pre_reserved = 0;
d->opd_got_disconnected = 1;
d->opd_pre_create_slow = 0;
d->opd_reserved_mb_high = 0;
d->opd_reserved_mb_low = 0;
+ RETURN(0);
+}
+
+/**
+ * Finish precreate functionality of OSP
+ *
+ *
+ * Asks all the activity (the thread, update timer) to stop, then
+ * wait till that is done.
+ *
+ * \param[in] d OSP device
+ */
+void osp_precreate_fini(struct osp_device *d)
+{
+ ENTRY;
+
+ if (d->opd_pre == NULL)
+ RETURN_EXIT;
+
+ OBD_FREE_PTR(d->opd_pre);
+ d->opd_pre = NULL;
+
+ EXIT;
+}
+
+int osp_init_statfs(struct osp_device *d)
+{
+ struct l_wait_info lwi = { 0 };
+ struct task_struct *task;
+
+ ENTRY;
+
spin_lock_init(&d->opd_pre_lock);
init_waitqueue_head(&d->opd_pre_waitq);
- init_waitqueue_head(&d->opd_pre_user_waitq);
+ thread_set_flags(&d->opd_pre_thread, SVC_INIT);
init_waitqueue_head(&d->opd_pre_thread.t_ctl_waitq);
/*
* Initialize statfs-related things
*/
- d->opd_statfs_maxage = 5; /* default update interval */
- d->opd_statfs_fresh_till = cfs_time_shift(-1000);
- CDEBUG(D_OTHER, "current %llu, fresh till %llu\n",
- (unsigned long long)cfs_time_current(),
- (unsigned long long)d->opd_statfs_fresh_till);
- cfs_timer_init(&d->opd_statfs_timer, osp_statfs_timer_cb, d);
+ d->opd_statfs_maxage = 5; /* defaultupdate interval */
+ d->opd_statfs_fresh_till = ktime_sub_ns(ktime_get(),
+ 1000 * NSEC_PER_SEC);
+ CDEBUG(D_OTHER, "current %lldns, fresh till %lldns\n",
+ ktime_get_ns(),
+ ktime_to_ns(d->opd_statfs_fresh_till));
+ cfs_timer_setup(&d->opd_statfs_timer, osp_statfs_timer_cb,
+ (unsigned long)d, 0);
+
+ if (d->opd_storage->dd_rdonly)
+ RETURN(0);
/*
* start thread handling precreation and statfs updates
RETURN(0);
}
-/**
- * Finish precreate functionality of OSP
- *
- *
- * Asks all the activity (the thread, update timer) to stop, then
- * wait till that is done.
- *
- * \param[in] d OSP device
- */
-void osp_precreate_fini(struct osp_device *d)
+void osp_statfs_fini(struct osp_device *d)
{
- struct ptlrpc_thread *thread;
-
+ struct ptlrpc_thread *thread = &d->opd_pre_thread;
ENTRY;
- cfs_timer_disarm(&d->opd_statfs_timer);
-
- if (d->opd_pre == NULL)
- RETURN_EXIT;
-
- thread = &d->opd_pre_thread;
-
- thread->t_flags = SVC_STOPPING;
- wake_up(&d->opd_pre_waitq);
+ del_timer(&d->opd_statfs_timer);
- wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
-
- OBD_FREE_PTR(d->opd_pre);
- d->opd_pre = NULL;
+ if (!thread_is_init(thread) && !thread_is_stopped(thread)) {
+ thread->t_flags = SVC_STOPPING;
+ wake_up(&d->opd_pre_waitq);
+ wait_event(thread->t_ctl_waitq, thread_is_stopped(thread));
+ }
EXIT;
}
-