* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2016, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
top->ld_ops->ldo_process_config(env, top, lcfg);
OBD_FREE(lcfg, lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens));
+ if (m->ofd_los != NULL) {
+ local_oid_storage_fini(env, m->ofd_los);
+ m->ofd_los = NULL;
+ }
+
lu_site_purge(env, top->ld_site, ~0);
if (!cfs_hash_is_empty(top->ld_site->ls_obj_hash)) {
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_OTHER, NULL);
lu_site_print(env, top->ld_site, &msgdata, lu_cdebug_printer);
}
EXIT;
}
+static void ofd_stack_pre_fini(const struct lu_env *env, struct ofd_device *m,
+ struct lu_device *top)
+{
+ struct lustre_cfg_bufs bufs;
+ struct lustre_cfg *lcfg;
+ ENTRY;
+
+ LASSERT(top);
+
+ lustre_cfg_bufs_reset(&bufs, ofd_name(m));
+ lustre_cfg_bufs_set_string(&bufs, 1, NULL);
+ OBD_ALLOC(lcfg, lustre_cfg_len(bufs.lcfg_bufcount, bufs.lcfg_buflen));
+ if (!lcfg) {
+ CERROR("%s: failed to trigger LCFG_PRE_CLEANUP\n", ofd_name(m));
+ } else {
+ lustre_cfg_init(lcfg, LCFG_PRE_CLEANUP, &bufs);
+ top->ld_ops->ldo_process_config(env, top, lcfg);
+ OBD_FREE(lcfg, lustre_cfg_len(lcfg->lcfg_bufcount,
+ lcfg->lcfg_buflens));
+ }
+
+ EXIT;
+}
+
/* For interoperability, see mdt_interop_param[]. */
static struct cfg_interop_param ofd_interop_param[] = {
{ "ost.quota_type", NULL },
return rc;
}
-struct locked_region {
- struct list_head list;
- struct lustre_handle lh;
-};
-/**
- * Lock single extent and save lock handle in the list.
- *
- * This is supplemental function for lock_zero_regions(). It allocates
- * new locked_region structure and locks it with extent lock, then adds
- * it to the list of all such regions.
- *
- * \param[in] ns LDLM namespace
- * \param[in] res_id resource ID
- * \param[in] begin start of region
- * \param[in] end end of region
- * \param[in] locked list head of regions list
- *
- * \retval 0 if successful locking
- * \retval negative value on error
- */
-static int lock_region(struct ldlm_namespace *ns, struct ldlm_res_id *res_id,
- unsigned long long begin, unsigned long long end,
- struct list_head *locked)
+static int ofd_lock_unlock_region(struct ldlm_namespace *ns,
+ struct ldlm_res_id *res_id,
+ unsigned long long begin,
+ unsigned long long end)
{
- struct locked_region *region = NULL;
__u64 flags = 0;
int rc;
+ struct lustre_handle lh = { 0 };
LASSERT(begin <= end);
- OBD_ALLOC_PTR(region);
- if (region == NULL)
- return -ENOMEM;
- rc = tgt_extent_lock(ns, res_id, begin, end, ®ion->lh,
- LCK_PR, &flags);
+ rc = tgt_extent_lock(ns, res_id, begin, end, &lh, LCK_PR, &flags);
if (rc != 0)
return rc;
- CDEBUG(D_OTHER, "ost lock [%llu,%llu], lh=%p\n", begin, end,
- ®ion->lh);
- list_add(®ion->list, locked);
+ CDEBUG(D_OTHER, "ost lock [%llu,%llu], lh=%p\n", begin, end, &lh);
+ tgt_extent_unlock(&lh, LCK_PR);
return 0;
}
*/
static int lock_zero_regions(struct ldlm_namespace *ns,
struct ldlm_res_id *res_id,
- struct fiemap *fiemap,
- struct list_head *locked)
+ struct fiemap *fiemap)
{
__u64 begin = fiemap->fm_start;
unsigned int i;
if (fiemap_start[i].fe_logical > begin) {
CDEBUG(D_OTHER, "ost lock [%llu,%llu]\n",
begin, fiemap_start[i].fe_logical);
- rc = lock_region(ns, res_id, begin,
- fiemap_start[i].fe_logical, locked);
+ rc = ofd_lock_unlock_region(ns, res_id, begin,
+ fiemap_start[i].fe_logical);
if (rc)
RETURN(rc);
}
if (begin < (fiemap->fm_start + fiemap->fm_length)) {
CDEBUG(D_OTHER, "ost lock [%llu,%llu]\n",
begin, fiemap->fm_start + fiemap->fm_length);
- rc = lock_region(ns, res_id, begin,
- fiemap->fm_start + fiemap->fm_length, locked);
+ rc = ofd_lock_unlock_region(ns, res_id, begin,
+ fiemap->fm_start + fiemap->fm_length);
}
RETURN(rc);
}
-/**
- * Unlock all previously locked sparse areas for given resource.
- *
- * This function goes through list of locked regions, unlocking and freeing
- * them one-by-one.
- *
- * \param[in] ns LDLM namespace
- * \param[in] locked list head of regions list
- */
-static void
-unlock_zero_regions(struct ldlm_namespace *ns, struct list_head *locked)
-{
- struct locked_region *entry, *temp;
-
- list_for_each_entry_safe(entry, temp, locked, list) {
- CDEBUG(D_OTHER, "ost unlock lh=%p\n", &entry->lh);
- tgt_extent_unlock(&entry->lh, LCK_PR);
- list_del(&entry->list);
- OBD_FREE_PTR(entry);
- }
-}
/**
* OFD request handler for OST_GET_INFO RPC.
* flushed back from client, then call fiemap again. */
if (fm_key->lfik_oa.o_valid & OBD_MD_FLFLAGS &&
fm_key->lfik_oa.o_flags & OBD_FL_SRVLOCK) {
- struct list_head locked;
-
- INIT_LIST_HEAD(&locked);
ost_fid_build_resid(fid, &fti->fti_resid);
rc = lock_zero_regions(ofd->ofd_namespace,
- &fti->fti_resid, fiemap,
- &locked);
- if (rc == 0 && !list_empty(&locked)) {
+ &fti->fti_resid, fiemap);
+ if (rc == 0)
rc = ofd_fiemap_get(tsi->tsi_env, ofd, fid,
fiemap);
- unlock_zero_regions(ofd->ofd_namespace,
- &locked);
- }
}
} else if (KEY_IS(KEY_LAST_FID)) {
struct ofd_device *ofd = ofd_exp(exp);
repbody->oa.o_valid |= OBD_MD_FLDATAVERSION;
repbody->oa.o_data_version = curr_version;
}
+
+ if (fo->ofo_ff.ff_layout_version > 0) {
+ repbody->oa.o_valid |= OBD_MD_LAYOUT_VERSION;
+ repbody->oa.o_layout_version =
+ fo->ofo_ff.ff_layout_version + fo->ofo_ff.ff_range;
+
+ CDEBUG(D_INODE, DFID": get layout version: %u\n",
+ PFID(&tsi->tsi_fid),
+ repbody->oa.o_layout_version);
+ }
}
ofd_object_put(tsi->tsi_env, fo);
struct ost_body *repbody;
struct ldlm_resource *res;
struct ofd_object *fo;
- struct filter_fid *ff = NULL;
int rc = 0;
ENTRY;
la_from_obdo(&fti->fti_attr, &body->oa, body->oa.o_valid);
fti->fti_attr.la_valid &= ~LA_TYPE;
- if (body->oa.o_valid & OBD_MD_FLFID) {
- ff = &fti->fti_mds_fid;
- ofd_prepare_fidea(ff, &body->oa);
- }
-
/* setting objects attributes (including owner/group) */
- rc = ofd_attr_set(tsi->tsi_env, fo, &fti->fti_attr, ff);
+ rc = ofd_attr_set(tsi->tsi_env, fo, &fti->fti_attr, &body->oa);
if (rc != 0)
GOTO(out_put, rc);
}
}
if (diff > 0) {
- cfs_time_t enough_time = cfs_time_shift(DISK_TIMEOUT);
- u64 next_id;
- int created = 0;
- int count;
+ time64_t enough_time = ktime_get_seconds() + DISK_TIMEOUT;
+ u64 next_id;
+ int created = 0;
+ int count;
if (!(oa->o_valid & OBD_MD_FLFLAGS) ||
!(oa->o_flags & OBD_FL_DELORPHAN)) {
count, seq, next_id);
if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
- && cfs_time_after(jiffies, enough_time)) {
+ && ktime_get_seconds() > enough_time) {
CDEBUG(D_HA, "%s: Slow creates, %d/%lld objects"
" created at a rate of %d/s\n",
ofd_name(ofd), created, diff + created,
ENTRY;
+ OBD_FAIL_TIMEOUT(OBD_FAIL_OST_STATFS_DELAY, 10);
+
osfs = req_capsule_server_get(tsi->tsi_pill, &RMF_OBD_STATFS);
rc = ofd_statfs(tsi->tsi_env, tsi->tsi_exp, osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS), 0);
+ ktime_get_seconds() - OBD_STATFS_CACHE_SECONDS, 0);
if (rc != 0)
CERROR("%s: statfs failed: rc = %d\n",
tgt_name(tsi->tsi_tgt), rc);
struct ldlm_namespace *ns = tsi->tsi_tgt->lut_obd->obd_namespace;
struct ldlm_resource *res;
struct ofd_object *fo;
- struct filter_fid *ff = NULL;
__u64 flags = 0;
struct lustre_handle lh = { 0, };
int rc;
info->fti_attr.la_size = start;
info->fti_attr.la_valid |= LA_SIZE;
- if (oa->o_valid & OBD_MD_FLFID) {
- ff = &info->fti_mds_fid;
- ofd_prepare_fidea(ff, oa);
- }
-
rc = ofd_object_punch(tsi->tsi_env, fo, start, end, &info->fti_attr,
- ff, (struct obdo *)oa);
+ (struct obdo *)oa);
if (rc)
GOTO(out_put, rc);
res = ldlm_resource_get(ns, NULL, &tsi->tsi_resid,
LDLM_EXTENT, 0);
if (!IS_ERR(res)) {
+ struct ost_lvb *res_lvb;
+
ldlm_res_lvbo_update(res, NULL, 0);
+ res_lvb = res->lr_lvb_data;
+ repbody->oa.o_valid |= OBD_MD_FLBLOCKS;
+ repbody->oa.o_blocks = res_lvb->lvb_blocks;
ldlm_resource_putref(res);
}
}
*
* \retval amount of time to extend the timeout with
*/
-static inline int prolong_timeout(struct ptlrpc_request *req)
+static inline time64_t prolong_timeout(struct ptlrpc_request *req)
{
struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
- time_t req_timeout;
+ time64_t req_timeout;
if (AT_OFF)
return obd_timeout / 2;
req_timeout = req->rq_deadline - req->rq_arrival_time.tv_sec;
- return max_t(time_t, at_est2timeout(at_get(&svcpt->scp_at_estimate)),
+ return max_t(time64_t, at_est2timeout(at_get(&svcpt->scp_at_estimate)),
req_timeout);
}
LASSERT(lock->l_export == data->lpa_export);
ldlm_lock_prolong_one(lock, data);
LDLM_LOCK_PUT(lock);
- RETURN_EXIT;
+ if (data->lpa_locks_cnt > 0)
+ RETURN_EXIT;
+ /* The lock was destroyed probably lets try
+ * resource tree. */
+ } else {
+ lock->l_last_used = ktime_get();
+ LDLM_LOCK_PUT(lock);
}
- lock->l_last_used = ktime_get();
- LDLM_LOCK_PUT(lock);
}
}
spin_lock_init(&m->ofd_flags_lock);
m->ofd_raid_degraded = 0;
+ m->ofd_checksum_t10pi_enforce = 0;
m->ofd_syncjournal = 0;
ofd_slc_set(m);
m->ofd_soft_sync_limit = OFD_SOFT_SYNC_LIMIT_DEFAULT;
tgd->tgd_reserved_pcnt = 0;
- if (DT_DEF_BRW_SIZE < (1U << tgd->tgd_blockbits))
- m->ofd_brw_size = 1U << tgd->tgd_blockbits;
- else
- m->ofd_brw_size = DT_DEF_BRW_SIZE;
-
- m->ofd_cksum_types_supported = cksum_types_supported_server();
+ m->ofd_brw_size = m->ofd_lut.lut_dt_conf.ddp_brw_size;
+ m->ofd_cksum_types_supported =
+ obd_cksum_types_supported_server(obd->obd_name);
m->ofd_precreate_batch = OFD_PRECREATE_BATCH_DEFAULT;
if (tgd->tgd_osfs.os_bsize * tgd->tgd_osfs.os_blocks <
OFD_PRECREATE_SMALL_FS)
stop.ls_status = LS_PAUSED;
stop.ls_flags = 0;
lfsck_stop(env, m->ofd_osd, &stop);
+ ofd_stack_pre_fini(env, m, &m->ofd_dt_dev.dd_lu_dev);
target_recovery_fini(obd);
if (m->ofd_namespace != NULL)
ldlm_namespace_free_prior(m->ofd_namespace, NULL,
nm_config_file_deregister_tgt(env, obd->u.obt.obt_nodemap_config_file);
obd->u.obt.obt_nodemap_config_file = NULL;
- if (m->ofd_los != NULL) {
- local_oid_storage_fini(env, m->ofd_los);
- m->ofd_los = NULL;
- }
-
if (m->ofd_namespace != NULL) {
ldlm_namespace_free_post(m->ofd_namespace);
d->ld_obd->obd_namespace = m->ofd_namespace = NULL;