* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2015, Intel Corporation.
+ * Copyright (c) 2012, 2016, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
obdo_from_la(&repbody->oa, &fti->fti_attr,
OFD_VALID_FLAGS | LA_UID | LA_GID);
- tgt_drop_id(tsi->tsi_exp, &repbody->oa);
/* Store object version in reply */
curr_version = dt_version_get(tsi->tsi_env,
obdo_from_la(&repbody->oa, &fti->fti_attr,
OFD_VALID_FLAGS | LA_UID | LA_GID);
- tgt_drop_id(tsi->tsi_exp, &repbody->oa);
ofd_counter_incr(tsi->tsi_exp, LPROC_OFD_STATS_SETATTR,
tsi->tsi_jobid, 1);
oseq = ofd_seq_load(tsi->tsi_env, ofd, seq);
if (IS_ERR(oseq)) {
- CERROR("%s: Can't find FID Sequence "LPX64": rc = %ld\n",
+ CERROR("%s: Can't find FID Sequence %#llx: rc = %ld\n",
ofd_name(ofd), seq, PTR_ERR(oseq));
GOTO(out_sem, rc = -EINVAL);
}
(oa->o_flags & OBD_FL_RECREATE_OBJS)) {
if (!ofd_obd(ofd)->obd_recovering ||
oid > ofd_seq_last_oid(oseq)) {
- CERROR("%s: recreate objid "DOSTID" > last id "LPU64
+ CERROR("%s: recreate objid "DOSTID" > last id %llu"
"\n", ofd_name(ofd), POSTID(&oa->o_oi),
ofd_seq_last_oid(oseq));
GOTO(out_nolock, rc = -EINVAL);
oseq->os_destroys_in_progress = 1;
mutex_lock(&oseq->os_create_lock);
if (!oseq->os_destroys_in_progress) {
- CERROR("%s:["LPU64"] destroys_in_progress already"
+ CERROR("%s:[%llu] destroys_in_progress already"
" cleared\n", ofd_name(ofd), seq);
ostid_set_id(&rep_oa->o_oi, ofd_seq_last_oid(oseq));
GOTO(out, rc = 0);
}
diff = oid - ofd_seq_last_oid(oseq);
- CDEBUG(D_HA, "ofd_last_id() = "LPU64" -> diff = %d\n",
+ CDEBUG(D_HA, "ofd_last_id() = %llu -> diff = %d\n",
ofd_seq_last_oid(oseq), diff);
if (-diff > OST_MAX_PRECREATE) {
/* Let MDS know that we are so far ahead. */
if (diff < 0) {
/* LU-5648 */
CERROR("%s: invalid precreate request for "
- DOSTID", last_id " LPU64 ". "
+ DOSTID", last_id %llu. "
"Likely MDS last_id corruption\n",
ofd_name(ofd), POSTID(&oa->o_oi),
ofd_seq_last_oid(oseq));
next_id = ofd_seq_last_oid(oseq) + 1;
count = ofd_precreate_batch(ofd, diff);
- CDEBUG(D_HA, "%s: reserve %d objects in group "LPX64
- " at "LPU64"\n", ofd_name(ofd),
+ CDEBUG(D_HA, "%s: reserve %d objects in group %#llx"
+ " at %llu\n", ofd_name(ofd),
count, seq, next_id);
if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
ENTRY;
+ OBD_FAIL_TIMEOUT(OBD_FAIL_OST_PAUSE_PUNCH, cfs_fail_val);
+
/* check that we do support OBD_CONNECT_TRUNCLOCK. */
CLASSERT(OST_CONNECT_SUPPORTED & OBD_CONNECT_TRUNCLOCK);
RETURN(rc);
}
- CDEBUG(D_INODE, "calling punch for object "DFID", valid = "LPX64
- ", start = "LPD64", end = "LPD64"\n", PFID(&tsi->tsi_fid),
+ CDEBUG(D_INODE, "calling punch for object "DFID", valid = %#llx"
+ ", start = %lld, end = %lld\n", PFID(&tsi->tsi_fid),
oa->o_valid, start, end);
fo = ofd_object_find_exists(tsi->tsi_env, ofd_exp(tsi->tsi_exp),
return rc;
}
+static int ofd_ladvise_prefetch(const struct lu_env *env,
+ struct ofd_object *fo,
+ struct niobuf_local *lnb,
+ __u64 start, __u64 end)
+{
+ struct ofd_thread_info *info = ofd_info(env);
+ pgoff_t start_index, end_index, pages;
+ struct niobuf_remote rnb;
+ unsigned long nr_local;
+ int rc = 0;
+
+ if (end <= start)
+ RETURN(-EINVAL);
+
+ ofd_read_lock(env, fo);
+ if (!ofd_object_exists(fo))
+ GOTO(out_unlock, rc = -ENOENT);
+
+ rc = ofd_attr_get(env, fo, &info->fti_attr);
+ if (rc)
+ GOTO(out_unlock, rc);
+
+ if (end > info->fti_attr.la_size)
+ end = info->fti_attr.la_size;
+
+ if (end == 0)
+ GOTO(out_unlock, rc);
+
+ /* We need page aligned offset and length */
+ start_index = start >> PAGE_SHIFT;
+ end_index = (end - 1) >> PAGE_SHIFT;
+ pages = end_index - start_index + 1;
+ while (pages > 0) {
+ nr_local = pages <= PTLRPC_MAX_BRW_PAGES ? pages :
+ PTLRPC_MAX_BRW_PAGES;
+ rnb.rnb_offset = start_index << PAGE_SHIFT;
+ rnb.rnb_len = nr_local << PAGE_SHIFT;
+ rc = dt_bufs_get(env, ofd_object_child(fo), &rnb, lnb, 0);
+ if (unlikely(rc < 0))
+ break;
+ nr_local = rc;
+ rc = dt_read_prep(env, ofd_object_child(fo), lnb, nr_local);
+ dt_bufs_put(env, ofd_object_child(fo), lnb, nr_local);
+ if (unlikely(rc))
+ break;
+ start_index += nr_local;
+ pages -= nr_local;
+ }
+
+out_unlock:
+ ofd_read_unlock(env, fo);
+ RETURN(rc);
+}
+
/**
* OFD request handler for OST_LADVISE RPC.
*
*/
static int ofd_ladvise_hdl(struct tgt_session_info *tsi)
{
- struct ptlrpc_request *req = tgt_ses_req(tsi);
- struct obd_export *exp = tsi->tsi_exp;
- struct ofd_device *ofd = ofd_exp(exp);
- struct ost_body *body, *repbody;
- struct ofd_thread_info *info;
- struct ofd_object *fo;
- const struct lu_env *env = req->rq_svc_thread->t_env;
- int rc = 0;
- struct lu_ladvise *ladvise;
- int num_advise;
- struct ladvise_hdr *ladvise_hdr;
- int i;
+ struct ptlrpc_request *req = tgt_ses_req(tsi);
+ struct obd_export *exp = tsi->tsi_exp;
+ struct ofd_device *ofd = ofd_exp(exp);
+ struct ost_body *body, *repbody;
+ struct ofd_thread_info *info;
+ struct ofd_object *fo;
+ struct ptlrpc_thread *svc_thread = req->rq_svc_thread;
+ const struct lu_env *env = svc_thread->t_env;
+ struct tgt_thread_big_cache *tbc = svc_thread->t_data;
+ int rc = 0;
+ struct lu_ladvise *ladvise;
+ int num_advise;
+ struct ladvise_hdr *ladvise_hdr;
+ struct obd_ioobj ioo;
+ struct lustre_handle lockh = { 0 };
+ __u64 flags = 0;
+ int i;
+ struct dt_object *dob;
+ __u64 start;
+ __u64 end;
ENTRY;
+ CFS_FAIL_TIMEOUT(OBD_FAIL_OST_LADVISE_PAUSE, cfs_fail_val);
body = tsi->tsi_ost_body;
if ((body->oa.o_valid & OBD_MD_FLID) != OBD_MD_FLID)
num_advise = req_capsule_get_size(&req->rq_pill,
&RMF_OST_LADVISE, RCL_CLIENT) /
- sizeof(*ladvise);
+ sizeof(*ladvise);
if (num_advise < ladvise_hdr->lah_count)
RETURN(err_serious(-EPROTO));
RETURN(rc);
}
LASSERT(fo != NULL);
+ dob = ofd_object_child(fo);
for (i = 0; i < num_advise; i++, ladvise++) {
- if (ladvise->lla_end <= ladvise->lla_start) {
+ start = ladvise->lla_start;
+ end = ladvise->lla_end;
+ if (end <= start) {
rc = err_serious(-EPROTO);
break;
}
default:
rc = -ENOTSUPP;
break;
+ case LU_LADVISE_WILLREAD:
+ if (tbc == NULL)
+ RETURN(-ENOMEM);
+
+ ioo.ioo_oid = body->oa.o_oi;
+ ioo.ioo_bufcnt = 1;
+ rc = tgt_extent_lock(exp->exp_obd->obd_namespace,
+ &tsi->tsi_resid, start, end - 1,
+ &lockh, LCK_PR, &flags);
+ if (rc != 0)
+ break;
+
+ req->rq_status = ofd_ladvise_prefetch(env, fo,
+ tbc->local,
+ start, end);
+ tgt_extent_unlock(&lockh, LCK_PR);
+ break;
+ case LU_LADVISE_DONTNEED:
+ rc = dt_ladvise(env, dob, ladvise->lla_start,
+ ladvise->lla_end, LU_LADVISE_DONTNEED);
+ break;
}
if (rc != 0)
break;
*/
static int ofd_quotactl(struct tgt_session_info *tsi)
{
- struct obd_quotactl *oqctl, *repoqc;
- struct lu_nodemap *nodemap =
- tsi->tsi_exp->exp_target_data.ted_nodemap;
- int id;
- int rc;
+ struct obd_quotactl *oqctl, *repoqc;
+ struct lu_nodemap *nodemap;
+ int id;
+ int rc;
ENTRY;
*repoqc = *oqctl;
+ nodemap = nodemap_get_from_exp(tsi->tsi_exp);
+ if (IS_ERR(nodemap))
+ RETURN(PTR_ERR(nodemap));
+
id = repoqc->qc_id;
if (oqctl->qc_type == USRQUOTA)
id = nodemap_map_id(nodemap, NODEMAP_UID,
NODEMAP_CLIENT_TO_FS,
repoqc->qc_id);
+ nodemap_putref(nodemap);
+
if (repoqc->qc_id != id)
swap(repoqc->qc_id, id);
*
* \retval amount of time to extend the timeout with
*/
-static inline int prolong_timeout(struct ptlrpc_request *req,
- struct ldlm_lock *lock)
+static inline int prolong_timeout(struct ptlrpc_request *req)
{
struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
+ time_t req_timeout;
if (AT_OFF)
return obd_timeout / 2;
- /* We are in the middle of the process - BL AST is sent, CANCEL
- is ahead. Take half of AT + IO process time. */
- return at_est2timeout(at_get(&svcpt->scp_at_estimate)) +
- (ldlm_bl_timeout(lock) >> 1);
-}
-
-/**
- * Prolong single lock timeout.
- *
- * This is supplemental function to the ofd_prolong_locks(). It prolongs
- * a single lock.
- *
- * \param[in] tsi target session environment for this request
- * \param[in] lock LDLM lock to prolong
- * \param[in] extent related extent
- * \param[in] timeout timeout value to add
- *
- * \retval 0 if lock is not suitable for prolongation
- * \retval 1 if lock was prolonged successfully
- */
-static int ofd_prolong_one_lock(struct tgt_session_info *tsi,
- struct ldlm_lock *lock,
- struct ldlm_extent *extent)
-{
- int timeout = prolong_timeout(tgt_ses_req(tsi), lock);
-
- if (lock->l_flags & LDLM_FL_DESTROYED) /* lock already cancelled */
- return 0;
-
- /* XXX: never try to grab resource lock here because we're inside
- * exp_bl_list_lock; in ldlm_lockd.c to handle waiting list we take
- * res lock and then exp_bl_list_lock. */
-
- if (!(lock->l_flags & LDLM_FL_AST_SENT))
- /* ignore locks not being cancelled */
- return 0;
-
- LDLM_DEBUG(lock, "refreshed for req x"LPU64" ext("LPU64"->"LPU64") "
- "to %ds.\n", tgt_ses_req(tsi)->rq_xid, extent->start,
- extent->end, timeout);
-
- /* OK. this is a possible lock the user holds doing I/O
- * let's refresh eviction timer for it */
- ldlm_refresh_waiting_lock(lock, timeout);
- return 1;
+ req_timeout = req->rq_deadline - req->rq_arrival_time.tv_sec;
+ return max_t(time_t, at_est2timeout(at_get(&svcpt->scp_at_estimate)),
+ req_timeout);
}
/**
* request may cover multiple locks.
*
* \param[in] tsi target session environment for this request
- * \param[in] start start of extent
- * \param[in] end end of extent
+ * \param[in] data struct of data to prolong locks
*
- * \retval number of prolonged locks
*/
-static int ofd_prolong_extent_locks(struct tgt_session_info *tsi,
- __u64 start, __u64 end)
+static void ofd_prolong_extent_locks(struct tgt_session_info *tsi,
+ struct ldlm_prolong_args *data)
{
- struct obd_export *exp = tsi->tsi_exp;
struct obdo *oa = &tsi->tsi_ost_body->oa;
- struct ldlm_extent extent = {
- .start = start,
- .end = end
- };
struct ldlm_lock *lock;
- int lock_count = 0;
ENTRY;
+ data->lpa_timeout = prolong_timeout(tgt_ses_req(tsi));
+ data->lpa_export = tsi->tsi_exp;
+ data->lpa_resid = tsi->tsi_resid;
+
+ CDEBUG(D_RPCTRACE, "Prolong locks for req %p with x%llu"
+ " ext(%llu->%llu)\n", tgt_ses_req(tsi),
+ tgt_ses_req(tsi)->rq_xid, data->lpa_extent.start,
+ data->lpa_extent.end);
+
if (oa->o_valid & OBD_MD_FLHANDLE) {
/* mostly a request should be covered by only one lock, try
* fast path. */
if (lock != NULL) {
/* Fast path to check if the lock covers the whole IO
* region exclusively. */
- if (lock->l_granted_mode == LCK_PW &&
- ldlm_extent_contain(&lock->l_policy_data.l_extent,
- &extent)) {
+ if (ldlm_extent_contain(&lock->l_policy_data.l_extent,
+ &data->lpa_extent)) {
/* bingo */
- LASSERT(lock->l_export == exp);
- lock_count = ofd_prolong_one_lock(tsi, lock,
- &extent);
+ LASSERT(lock->l_export == data->lpa_export);
+ ldlm_lock_prolong_one(lock, data);
LDLM_LOCK_PUT(lock);
- RETURN(lock_count);
+ RETURN_EXIT;
}
lock->l_last_used = cfs_time_current();
LDLM_LOCK_PUT(lock);
}
}
- spin_lock_bh(&exp->exp_bl_list_lock);
- list_for_each_entry(lock, &exp->exp_bl_list, l_exp_list) {
- LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
- LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
-
- /* ignore waiting locks, no more granted locks in the list */
- if (lock->l_granted_mode != lock->l_req_mode)
- break;
-
- if (!ldlm_res_eq(&tsi->tsi_resid, &lock->l_resource->lr_name))
- continue;
-
- if (!ldlm_extent_overlap(&lock->l_policy_data.l_extent,
- &extent))
- continue;
-
- lock_count += ofd_prolong_one_lock(tsi, lock, &extent);
- }
- spin_unlock_bh(&exp->exp_bl_list_lock);
-
- RETURN(lock_count);
+ ldlm_resource_prolong(data);
+ EXIT;
}
/**
if (!ostid_res_name_eq(&ioo->ioo_oid, &lock->l_resource->lr_name))
RETURN(0);
- /* a bulk write can only hold a reference on a PW extent lock */
- mode = LCK_PW;
+ /* a bulk write can only hold a reference on a PW extent lock
+ * or GROUP lock.
+ */
+ mode = LCK_PW | LCK_GROUP;
if (opc == OST_READ)
/* whereas a bulk read can be protected by either a PR or PW
* extent lock */
* Implementation of ptlrpc_hpreq_ops::hpreq_lock_check for OFD RW requests.
*
* Check for whether the given PTLRPC request (\a req) is blocking
- * an LDLM lock cancel.
+ * an LDLM lock cancel. Also checks whether the request is covered by an LDLM
+ * lock.
*
* \param[in] req the incoming request
*
* \retval 1 if \a req is blocking an LDLM lock cancel
* \retval 0 if it is not
+ * \retval -ESTALE if lock is not found
*/
static int ofd_rw_hpreq_check(struct ptlrpc_request *req)
{
struct tgt_session_info *tsi;
struct obd_ioobj *ioo;
struct niobuf_remote *rnb;
- __u64 start, end;
- int lock_count;
+ int opc;
+ struct ldlm_prolong_args pa = { 0 };
ENTRY;
* Use LASSERT below because malformed RPCs should have
* been filtered out in tgt_hpreq_handler().
*/
+ opc = lustre_msg_get_opc(req->rq_reqmsg);
+ LASSERT(opc == OST_READ || opc == OST_WRITE);
+
ioo = req_capsule_client_get(&req->rq_pill, &RMF_OBD_IOOBJ);
LASSERT(ioo != NULL);
LASSERT(rnb != NULL);
LASSERT(!(rnb->rnb_flags & OBD_BRW_SRVLOCK));
- start = rnb->rnb_offset;
+ pa.lpa_mode = LCK_PW | LCK_GROUP;
+ if (opc == OST_READ)
+ pa.lpa_mode |= LCK_PR;
+
+ pa.lpa_extent.start = rnb->rnb_offset;
rnb += ioo->ioo_bufcnt - 1;
- end = rnb->rnb_offset + rnb->rnb_len - 1;
+ pa.lpa_extent.end = rnb->rnb_offset + rnb->rnb_len - 1;
DEBUG_REQ(D_RPCTRACE, req, "%s %s: refresh rw locks: "DFID
- " ("LPU64"->"LPU64")\n",
- tgt_name(tsi->tsi_tgt), current->comm,
- PFID(&tsi->tsi_fid), start, end);
+ " (%llu->%llu)\n", tgt_name(tsi->tsi_tgt),
+ current->comm, PFID(&tsi->tsi_fid), pa.lpa_extent.start,
+ pa.lpa_extent.end);
- lock_count = ofd_prolong_extent_locks(tsi, start, end);
+ ofd_prolong_extent_locks(tsi, &pa);
CDEBUG(D_DLMTRACE, "%s: refreshed %u locks timeout for req %p.\n",
- tgt_name(tsi->tsi_tgt), lock_count, req);
+ tgt_name(tsi->tsi_tgt), pa.lpa_blocks_cnt, req);
- RETURN(lock_count > 0);
+ if (pa.lpa_blocks_cnt > 0)
+ RETURN(1);
+
+ RETURN(pa.lpa_locks_cnt > 0 ? 0 : -ESTALE);
}
/**
struct ldlm_lock *lock)
{
struct tgt_session_info *tsi;
+ struct obdo *oa;
+ struct ldlm_extent ext;
+
+ ENTRY;
/* Don't use tgt_ses_info() to get session info, because lock_match()
* can be called while request has no processing thread yet. */
LASSERT(tsi->tsi_ost_body != NULL);
if (tsi->tsi_ost_body->oa.o_valid & OBD_MD_FLHANDLE &&
tsi->tsi_ost_body->oa.o_handle.cookie == lock->l_handle.h_cookie)
- return 1;
+ RETURN(1);
- return 0;
+ oa = &tsi->tsi_ost_body->oa;
+ ext.start = oa->o_size;
+ ext.end = oa->o_blocks;
+
+ LASSERT(lock->l_resource != NULL);
+ if (!ostid_res_name_eq(&oa->o_oi, &lock->l_resource->lr_name))
+ RETURN(0);
+
+ if (!(lock->l_granted_mode & (LCK_PW | LCK_GROUP)))
+ RETURN(0);
+
+ RETURN(ldlm_extent_overlap(&lock->l_policy_data.l_extent, &ext));
}
/**
* Implementation of ptlrpc_hpreq_ops::hpreq_lock_check for OST_PUNCH request.
*
* High-priority queue request check for whether the given punch request
- * (\a req) is blocking an LDLM lock cancel.
+ * (\a req) is blocking an LDLM lock cancel. Also checks whether the request is
+ * covered by an LDLM lock.
+ *
+
*
* \param[in] req the incoming request
*
* \retval 1 if \a req is blocking an LDLM lock cancel
* \retval 0 if it is not
+ * \retval -ESTALE if lock is not found
*/
static int ofd_punch_hpreq_check(struct ptlrpc_request *req)
{
struct tgt_session_info *tsi;
struct obdo *oa;
- int lock_count;
+ struct ldlm_prolong_args pa = { 0 };
ENTRY;
LASSERT(!(oa->o_valid & OBD_MD_FLFLAGS &&
oa->o_flags & OBD_FL_SRVLOCK));
+ pa.lpa_mode = LCK_PW | LCK_GROUP;
+ pa.lpa_extent.start = oa->o_size;
+ pa.lpa_extent.end = oa->o_blocks;
+
CDEBUG(D_DLMTRACE,
- "%s: refresh locks: "LPU64"/"LPU64" ("LPU64"->"LPU64")\n",
+ "%s: refresh locks: %llu/%llu (%llu->%llu)\n",
tgt_name(tsi->tsi_tgt), tsi->tsi_resid.name[0],
- tsi->tsi_resid.name[1], oa->o_size, oa->o_blocks);
+ tsi->tsi_resid.name[1], pa.lpa_extent.start, pa.lpa_extent.end);
- lock_count = ofd_prolong_extent_locks(tsi, oa->o_size, oa->o_blocks);
+ ofd_prolong_extent_locks(tsi, &pa);
CDEBUG(D_DLMTRACE, "%s: refreshed %u locks timeout for req %p.\n",
- tgt_name(tsi->tsi_tgt), lock_count, req);
+ tgt_name(tsi->tsi_tgt), pa.lpa_blocks_cnt, req);
+
+ if (pa.lpa_blocks_cnt > 0)
+ RETURN(1);
- RETURN(lock_count > 0);
+ RETURN(pa.lpa_locks_cnt > 0 ? 0 : -ESTALE);
}
/**
LASSERT(rnb != NULL); /* must exist after request preprocessing */
/* no high priority if server lock is needed */
- if (rnb->rnb_flags & OBD_BRW_SRVLOCK)
+ if (rnb->rnb_flags & OBD_BRW_SRVLOCK ||
+ (lustre_msg_get_flags(tgt_ses_req(tsi)->rq_reqmsg)
+ & MSG_REPLAY))
return;
}
tgt_ses_req(tsi)->rq_ops = &ofd_hpreq_rw;
{
LASSERT(tsi->tsi_ost_body != NULL); /* must exists if we are here */
/* no high-priority if server lock is needed */
- if (tsi->tsi_ost_body->oa.o_valid & OBD_MD_FLFLAGS &&
- tsi->tsi_ost_body->oa.o_flags & OBD_FL_SRVLOCK)
+ if ((tsi->tsi_ost_body->oa.o_valid & OBD_MD_FLFLAGS &&
+ tsi->tsi_ost_body->oa.o_flags & OBD_FL_SRVLOCK) ||
+ tgt_conn_flags(tsi) & OBD_CONNECT_MDS ||
+ lustre_msg_get_flags(tgt_ses_req(tsi)->rq_reqmsg) & MSG_REPLAY)
return;
tgt_ses_req(tsi)->rq_ops = &ofd_hpreq_punch;
}
static int ofd_init0(const struct lu_env *env, struct ofd_device *m,
struct lu_device_type *ldt, struct lustre_cfg *cfg)
{
- const char *dev = lustre_cfg_string(cfg, 0);
- struct ofd_thread_info *info = NULL;
- struct obd_device *obd;
- struct obd_statfs *osfs;
- int rc;
+ const char *dev = lustre_cfg_string(cfg, 0);
+ struct ofd_thread_info *info = NULL;
+ struct obd_device *obd;
+ struct obd_statfs *osfs;
+ struct lu_fid fid;
+ struct nm_config_file *nodemap_config;
+ int rc;
ENTRY;
CERROR("%s: can't get statfs data, rc %d\n", obd->obd_name, rc);
GOTO(err_fini_stack, rc);
}
- if (!IS_PO2(osfs->os_bsize)) {
+ if (!is_power_of_2(osfs->os_bsize)) {
CERROR("%s: blocksize (%d) is not a power of 2\n",
obd->obd_name, osfs->os_bsize);
GOTO(err_fini_stack, rc = -EPROTO);
}
m->ofd_blockbits = fls(osfs->os_bsize) - 1;
+ if (ONE_MB_BRW_SIZE < (1U << m->ofd_blockbits))
+ m->ofd_brw_size = 1U << m->ofd_blockbits;
+ else
+ m->ofd_brw_size = ONE_MB_BRW_SIZE;
+
m->ofd_precreate_batch = OFD_PRECREATE_BATCH_DEFAULT;
if (osfs->os_bsize * osfs->os_blocks < OFD_PRECREATE_SMALL_FS)
m->ofd_precreate_batch = OFD_PRECREATE_BATCH_SMALL;
if (rc)
GOTO(err_fini_lut, rc);
- rc = ofd_start_inconsistency_verification_thread(m);
+ fid.f_seq = FID_SEQ_LOCAL_NAME;
+ fid.f_oid = 1;
+ fid.f_ver = 0;
+ rc = local_oid_storage_init(env, m->ofd_osd, &fid,
+ &m->ofd_los);
if (rc != 0)
GOTO(err_fini_fs, rc);
+ nodemap_config = nm_config_file_register_tgt(env, m->ofd_osd,
+ m->ofd_los);
+ if (IS_ERR(nodemap_config))
+ GOTO(err_fini_los, rc = PTR_ERR(nodemap_config));
+
+ obd->u.obt.obt_nodemap_config_file = nodemap_config;
+
+ rc = ofd_start_inconsistency_verification_thread(m);
+ if (rc != 0)
+ GOTO(err_fini_nm, rc);
+
tgt_adapt_sptlrpc_conf(&m->ofd_lut, 1);
RETURN(0);
+err_fini_nm:
+ nm_config_file_deregister_tgt(env, obd->u.obt.obt_nodemap_config_file);
+ obd->u.obt.obt_nodemap_config_file = NULL;
+err_fini_los:
+ local_oid_storage_fini(env, m->ofd_los);
+ m->ofd_los = NULL;
err_fini_fs:
ofd_fs_cleanup(env, m);
err_fini_lut:
ofd_stop_inconsistency_verification_thread(m);
lfsck_degister(env, m->ofd_osd);
ofd_fs_cleanup(env, m);
+ nm_config_file_deregister_tgt(env, obd->u.obt.obt_nodemap_config_file);
+ obd->u.obt.obt_nodemap_config_file = NULL;
+
+ if (m->ofd_los != NULL) {
+ local_oid_storage_fini(env, m->ofd_los);
+ m->ofd_los = NULL;
+ }
if (m->ofd_namespace != NULL) {
ldlm_namespace_free_post(m->ofd_namespace);